From 0b476781b010fef90870738695c08fb75322ed5f Mon Sep 17 00:00:00 2001 From: bubbliiiing <3323290568@qq.com> Date: Tue, 16 Sep 2025 20:07:21 +0800 Subject: [PATCH 1/8] Update flux readme --- scripts/flux/README_TRAIN.md | 165 ++++++++++++++++++++++++++++++ scripts/flux/README_TRAIN_LORA.md | 153 +++++++++++++++++++++++++++ scripts/flux/train.py | 1 - scripts/flux/train_lora.py | 1 - 4 files changed, 318 insertions(+), 2 deletions(-) create mode 100755 scripts/flux/README_TRAIN.md create mode 100755 scripts/flux/README_TRAIN_LORA.md diff --git a/scripts/flux/README_TRAIN.md b/scripts/flux/README_TRAIN.md new file mode 100755 index 00000000..b091fffe --- /dev/null +++ b/scripts/flux/README_TRAIN.md @@ -0,0 +1,165 @@ +## Training Code + +We can choose whether to use deepspeed or fsdp in flux, which can save a lot of video memory. + +Some parameters in the sh file can be confusing, and they are explained in this document: + +- `enable_bucket` is used to enable bucket training. When enabled, the model does not crop the images at the center, but instead, it trains the entire images after grouping them into buckets based on resolution. +- `random_hw_adapt` is used to enable automatic height and width scaling for images. When `random_hw_adapt` is enabled, the training images will have their height and width set to `image_sample_size` as the maximum and `512` as the minimum. + - For example, when `random_hw_adapt` is enabled, `image_sample_size=1024`, the resolution of image inputs for training is `512x512` to `1024x1024` +- `resume_from_checkpoint` is used to set the training should be resumed from a previous checkpoint. Use a path or `"latest"` to automatically select the last available checkpoint. + +Without deepspeed: + +Training flux without DeepSpeed may result in insufficient GPU memory. +```sh +export MODEL_NAME="models/Diffusion_Transformer/FLUX.1-dev" +export DATASET_NAME="datasets/internal_datasets/" +export DATASET_META_NAME="datasets/internal_datasets/metadata.json" +# NCCL_IB_DISABLE=1 and NCCL_P2P_DISABLE=1 are used in multi nodes without RDMA. +# export NCCL_IB_DISABLE=1 +# export NCCL_P2P_DISABLE=1 +NCCL_DEBUG=INFO + +accelerate launch --mixed_precision="bf16" scripts/flux/train.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --train_data_dir=$DATASET_NAME \ + --train_data_meta=$DATASET_META_NAME \ + --train_batch_size=1 \ + --image_sample_size=1024 \ + --gradient_accumulation_steps=1 \ + --dataloader_num_workers=8 \ + --num_train_epochs=100 \ + --checkpointing_steps=50 \ + --learning_rate=2e-05 \ + --lr_scheduler="constant_with_warmup" \ + --lr_warmup_steps=100 \ + --seed=42 \ + --output_dir="output_dir" \ + --gradient_checkpointing \ + --mixed_precision="bf16" \ + --adam_weight_decay=3e-2 \ + --adam_epsilon=1e-10 \ + --vae_mini_batch=1 \ + --max_grad_norm=0.05 \ + --enable_bucket \ + --uniform_sampling \ + --trainable_modules "." +``` + +With deepspeed zero-2: + +```sh +export MODEL_NAME="models/Diffusion_Transformer/FLUX.1-dev" +export DATASET_NAME="datasets/internal_datasets/" +export DATASET_META_NAME="datasets/internal_datasets/metadata.json" +# NCCL_IB_DISABLE=1 and NCCL_P2P_DISABLE=1 are used in multi nodes without RDMA. +# export NCCL_IB_DISABLE=1 +# export NCCL_P2P_DISABLE=1 +NCCL_DEBUG=INFO + +accelerate launch --use_deepspeed --deepspeed_config_file config/zero_stage2_config.json --deepspeed_multinode_launcher standard scripts/flux/train.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --train_data_dir=$DATASET_NAME \ + --train_data_meta=$DATASET_META_NAME \ + --train_batch_size=1 \ + --image_sample_size=1024 \ + --gradient_accumulation_steps=1 \ + --dataloader_num_workers=8 \ + --num_train_epochs=100 \ + --checkpointing_steps=50 \ + --learning_rate=2e-05 \ + --lr_scheduler="constant_with_warmup" \ + --lr_warmup_steps=100 \ + --seed=42 \ + --output_dir="output_dir" \ + --gradient_checkpointing \ + --mixed_precision="bf16" \ + --adam_weight_decay=3e-2 \ + --adam_epsilon=1e-10 \ + --vae_mini_batch=1 \ + --max_grad_norm=0.05 \ + --enable_bucket \ + --uniform_sampling \ + --trainable_modules "." +``` + +Deepspeed zero-3: + +After training, you can use the following command to get the final model: +```sh +python scripts/zero_to_bf16.py output_dir/checkpoint-{our-num-steps} output_dir/checkpoint-{your-num-steps}-outputs --max_shard_size 80GB --safe_serialization +``` + +Training shell command is as follows: +```sh +export MODEL_NAME="models/Diffusion_Transformer/FLUX.1-dev" +export DATASET_NAME="datasets/internal_datasets/" +export DATASET_META_NAME="datasets/internal_datasets/metadata.json" +# NCCL_IB_DISABLE=1 and NCCL_P2P_DISABLE=1 are used in multi nodes without RDMA. +# export NCCL_IB_DISABLE=1 +# export NCCL_P2P_DISABLE=1 +NCCL_DEBUG=INFO + +accelerate launch --zero_stage 3 --zero3_save_16bit_model true --zero3_init_flag true --use_deepspeed --deepspeed_config_file config/zero_stage3_config.json --deepspeed_multinode_launcher standard scripts/flux/train.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --train_data_dir=$DATASET_NAME \ + --train_data_meta=$DATASET_META_NAME \ + --train_batch_size=1 \ + --image_sample_size=1024 \ + --gradient_accumulation_steps=1 \ + --dataloader_num_workers=8 \ + --num_train_epochs=100 \ + --checkpointing_steps=50 \ + --learning_rate=2e-05 \ + --lr_scheduler="constant_with_warmup" \ + --lr_warmup_steps=100 \ + --seed=42 \ + --output_dir="output_dir" \ + --gradient_checkpointing \ + --mixed_precision="bf16" \ + --adam_weight_decay=3e-2 \ + --adam_epsilon=1e-10 \ + --vae_mini_batch=1 \ + --max_grad_norm=0.05 \ + --enable_bucket \ + --uniform_sampling \ + --trainable_modules "." +``` + +With FSDP: + +```sh +export MODEL_NAME="models/Diffusion_Transformer/FLUX.1-dev" +export DATASET_NAME="datasets/internal_datasets/" +export DATASET_META_NAME="datasets/internal_datasets/metadata.json" +# NCCL_IB_DISABLE=1 and NCCL_P2P_DISABLE=1 are used in multi nodes without RDMA. +# export NCCL_IB_DISABLE=1 +# export NCCL_P2P_DISABLE=1 +NCCL_DEBUG=INFO + +accelerate launch --mixed_precision="bf16" --use_fsdp --fsdp_auto_wrap_policy TRANSFORMER_BASED_WRAP --fsdp_transformer_layer_cls_to_wrap FluxSingleTransformerBlock,FluxTransformerBlock --fsdp_sharding_strategy "FULL_SHARD" --fsdp_state_dict_type=SHARDED_STATE_DICT --fsdp_backward_prefetch "BACKWARD_PRE" --fsdp_cpu_ram_efficient_loading False scripts/flux/train.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --train_data_dir=$DATASET_NAME \ + --train_data_meta=$DATASET_META_NAME \ + --train_batch_size=1 \ + --image_sample_size=1024 \ + --gradient_accumulation_steps=1 \ + --dataloader_num_workers=8 \ + --num_train_epochs=100 \ + --checkpointing_steps=50 \ + --learning_rate=2e-05 \ + --lr_scheduler="constant_with_warmup" \ + --lr_warmup_steps=100 \ + --seed=42 \ + --output_dir="output_dir" \ + --gradient_checkpointing \ + --mixed_precision="bf16" \ + --adam_weight_decay=3e-2 \ + --adam_epsilon=1e-10 \ + --vae_mini_batch=1 \ + --max_grad_norm=0.05 \ + --enable_bucket \ + --uniform_sampling \ + --trainable_modules "." +``` \ No newline at end of file diff --git a/scripts/flux/README_TRAIN_LORA.md b/scripts/flux/README_TRAIN_LORA.md new file mode 100755 index 00000000..021c4918 --- /dev/null +++ b/scripts/flux/README_TRAIN_LORA.md @@ -0,0 +1,153 @@ +## Lora Training Code + +We can choose whether to use deepspeed or fsdp in flux, which can save a lot of video memory. + +Some parameters in the sh file can be confusing, and they are explained in this document: + +- `enable_bucket` is used to enable bucket training. When enabled, the model does not crop the images at the center, but instead, it trains the entire images after grouping them into buckets based on resolution. +- `random_hw_adapt` is used to enable automatic height and width scaling for images. When `random_hw_adapt` is enabled, the training images will have their height and width set to `image_sample_size` as the maximum and `512` as the minimum. + - For example, when `random_hw_adapt` is enabled, `image_sample_size=1024`, the resolution of image inputs for training is `512x512` to `1024x1024` +- `resume_from_checkpoint` is used to set the training should be resumed from a previous checkpoint. Use a path or `"latest"` to automatically select the last available checkpoint. + +Without deepspeed: + +Training flux without DeepSpeed may result in insufficient GPU memory. +```sh +export MODEL_NAME="models/Diffusion_Transformer/FLUX.1-dev" +export DATASET_NAME="datasets/internal_datasets/" +export DATASET_META_NAME="datasets/internal_datasets/metadata.json" +# NCCL_IB_DISABLE=1 and NCCL_P2P_DISABLE=1 are used in multi nodes without RDMA. +# export NCCL_IB_DISABLE=1 +# export NCCL_P2P_DISABLE=1 +NCCL_DEBUG=INFO + +accelerate launch --mixed_precision="bf16" scripts/flux/train_lora.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --train_data_dir=$DATASET_NAME \ + --train_data_meta=$DATASET_META_NAME \ + --train_batch_size=1 \ + --image_sample_size=1024 \ + --gradient_accumulation_steps=1 \ + --dataloader_num_workers=8 \ + --num_train_epochs=100 \ + --checkpointing_steps=50 \ + --learning_rate=1e-04 \ + --seed=42 \ + --output_dir="output_dir" \ + --gradient_checkpointing \ + --mixed_precision="bf16" \ + --adam_weight_decay=3e-2 \ + --adam_epsilon=1e-10 \ + --vae_mini_batch=1 \ + --max_grad_norm=0.05 \ + --enable_bucket \ + --uniform_sampling +``` + +With deepspeed zero-2: + +```sh +export MODEL_NAME="models/Diffusion_Transformer/FLUX.1-dev" +export DATASET_NAME="datasets/internal_datasets/" +export DATASET_META_NAME="datasets/internal_datasets/metadata.json" +# NCCL_IB_DISABLE=1 and NCCL_P2P_DISABLE=1 are used in multi nodes without RDMA. +# export NCCL_IB_DISABLE=1 +# export NCCL_P2P_DISABLE=1 +NCCL_DEBUG=INFO + +accelerate launch --use_deepspeed --deepspeed_config_file config/zero_stage2_config.json --deepspeed_multinode_launcher standard scripts/flux/train_lora.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --train_data_dir=$DATASET_NAME \ + --train_data_meta=$DATASET_META_NAME \ + --train_batch_size=1 \ + --image_sample_size=1024 \ + --gradient_accumulation_steps=1 \ + --dataloader_num_workers=8 \ + --num_train_epochs=100 \ + --checkpointing_steps=50 \ + --learning_rate=1e-04 \ + --seed=42 \ + --output_dir="output_dir" \ + --gradient_checkpointing \ + --mixed_precision="bf16" \ + --adam_weight_decay=3e-2 \ + --adam_epsilon=1e-10 \ + --vae_mini_batch=1 \ + --max_grad_norm=0.05 \ + --enable_bucket \ + --uniform_sampling +``` + +Deepspeed zero-3: + +After training, you can use the following command to get the final model: +```sh +python scripts/zero_to_bf16.py output_dir/checkpoint-{our-num-steps} output_dir/checkpoint-{your-num-steps}-outputs --max_shard_size 80GB --safe_serialization +``` + +Training shell command is as follows: +```sh +export MODEL_NAME="models/Diffusion_Transformer/FLUX.1-dev" +export DATASET_NAME="datasets/internal_datasets/" +export DATASET_META_NAME="datasets/internal_datasets/metadata.json" +# NCCL_IB_DISABLE=1 and NCCL_P2P_DISABLE=1 are used in multi nodes without RDMA. +# export NCCL_IB_DISABLE=1 +# export NCCL_P2P_DISABLE=1 +NCCL_DEBUG=INFO + +accelerate launch --zero_stage 3 --zero3_save_16bit_model true --zero3_init_flag true --use_deepspeed --deepspeed_config_file config/zero_stage3_config.json --deepspeed_multinode_launcher standard scripts/flux/train_lora.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --train_data_dir=$DATASET_NAME \ + --train_data_meta=$DATASET_META_NAME \ + --train_batch_size=1 \ + --image_sample_size=1024 \ + --gradient_accumulation_steps=1 \ + --dataloader_num_workers=8 \ + --num_train_epochs=100 \ + --checkpointing_steps=50 \ + --learning_rate=1e-04 \ + --seed=42 \ + --output_dir="output_dir" \ + --gradient_checkpointing \ + --mixed_precision="bf16" \ + --adam_weight_decay=3e-2 \ + --adam_epsilon=1e-10 \ + --vae_mini_batch=1 \ + --max_grad_norm=0.05 \ + --enable_bucket \ + --uniform_sampling +``` + +With FSDP: + +```sh +export MODEL_NAME="models/Diffusion_Transformer/Wan2.2-Fun-A14B-InP" +export DATASET_NAME="datasets/internal_datasets/" +export DATASET_META_NAME="datasets/internal_datasets/metadata.json" +# NCCL_IB_DISABLE=1 and NCCL_P2P_DISABLE=1 are used in multi nodes without RDMA. +# export NCCL_IB_DISABLE=1 +# export NCCL_P2P_DISABLE=1 +NCCL_DEBUG=INFO + +accelerate launch --mixed_precision="bf16" --use_fsdp --fsdp_auto_wrap_policy TRANSFORMER_BASED_WRAP --fsdp_transformer_layer_cls_to_wrap FluxSingleTransformerBlock,FluxTransformerBlock --fsdp_sharding_strategy "FULL_SHARD" --fsdp_state_dict_type=SHARDED_STATE_DICT --fsdp_backward_prefetch "BACKWARD_PRE" --fsdp_cpu_ram_efficient_loading False scripts/flux/train_lora.py \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --train_data_dir=$DATASET_NAME \ + --train_data_meta=$DATASET_META_NAME \ + --train_batch_size=1 \ + --image_sample_size=1024 \ + --gradient_accumulation_steps=1 \ + --dataloader_num_workers=8 \ + --num_train_epochs=100 \ + --checkpointing_steps=50 \ + --learning_rate=1e-04 \ + --seed=42 \ + --output_dir="output_dir" \ + --gradient_checkpointing \ + --mixed_precision="bf16" \ + --adam_weight_decay=3e-2 \ + --adam_epsilon=1e-10 \ + --vae_mini_batch=1 \ + --max_grad_norm=0.05 \ + --enable_bucket \ + --uniform_sampling +``` \ No newline at end of file diff --git a/scripts/flux/train.py b/scripts/flux/train.py index 20e78d4e..6a840b63 100644 --- a/scripts/flux/train.py +++ b/scripts/flux/train.py @@ -1576,7 +1576,6 @@ def get_sigmas(timesteps, n_dim=4, dtype=torch.float32): # Predict the noise residual with torch.cuda.amp.autocast(dtype=weight_dtype), torch.cuda.device(device=accelerator.device): - print(noisy_latents.size(), prompt_embeds.size(), pooled_prompt_embeds.size(), text_ids.size(), latent_image_ids.size()) noise_pred = transformer3d( hidden_states=noisy_latents, timestep=timesteps / 1000, diff --git a/scripts/flux/train_lora.py b/scripts/flux/train_lora.py index 9274e11d..1b036736 100644 --- a/scripts/flux/train_lora.py +++ b/scripts/flux/train_lora.py @@ -1571,7 +1571,6 @@ def get_sigmas(timesteps, n_dim=4, dtype=torch.float32): # Predict the noise residual with torch.cuda.amp.autocast(dtype=weight_dtype), torch.cuda.device(device=accelerator.device): - print(noisy_latents.size(), prompt_embeds.size(), pooled_prompt_embeds.size(), text_ids.size(), latent_image_ids.size()) noise_pred = transformer3d( hidden_states=noisy_latents, timestep=timesteps / 1000, From a394dda232c711f531aa5f2511cf259872210102 Mon Sep 17 00:00:00 2001 From: bubbliiiing <3323290568@qq.com> Date: Wed, 17 Sep 2025 16:01:11 +0800 Subject: [PATCH 2/8] Update Lora load && Vace training code --- comfyui/comfyui_nodes.py | 28 + examples/cogvideox_fun/predict_i2v.py | 4 +- examples/cogvideox_fun/predict_t2v.py | 4 +- examples/cogvideox_fun/predict_v2v.py | 4 +- examples/cogvideox_fun/predict_v2v_control.py | 4 +- examples/flux/predict_t2i.py | 4 +- examples/phantom/predict_s2v.py | 4 +- examples/qwenimage/predict_t2i.py | 4 +- examples/wan2.1/predict_i2v.py | 4 +- examples/wan2.1/predict_t2v.py | 4 +- examples/wan2.1_fun/predict_i2v.py | 4 +- examples/wan2.1_fun/predict_t2v.py | 4 +- examples/wan2.1_fun/predict_v2v_control.py | 4 +- .../wan2.1_fun/predict_v2v_control_camera.py | 4 +- .../wan2.1_fun/predict_v2v_control_ref.py | 4 +- examples/wan2.1_vace/predict_i2v.py | 4 +- examples/wan2.1_vace/predict_s2v.py | 4 +- examples/wan2.1_vace/predict_v2v_control.py | 4 +- examples/wan2.2/predict_i2v.py | 8 +- examples/wan2.2/predict_s2v.py | 8 +- examples/wan2.2/predict_t2v.py | 8 +- examples/wan2.2/predict_ti2v.py | 8 +- examples/wan2.2_fun/predict_i2v.py | 8 +- examples/wan2.2_fun/predict_i2v_5b.py | 8 +- examples/wan2.2_fun/predict_t2v.py | 8 +- examples/wan2.2_fun/predict_t2v_5b.py | 8 +- examples/wan2.2_fun/predict_v2v_control.py | 8 +- examples/wan2.2_fun/predict_v2v_control_5b.py | 8 +- .../wan2.2_fun/predict_v2v_control_camera.py | 8 +- .../predict_v2v_control_camera_5b.py | 8 +- .../wan2.2_fun/predict_v2v_control_ref.py | 8 +- .../wan2.2_fun/predict_v2v_control_ref_5b.py | 8 +- examples/wan2.2_vace_fun/predict_i2v.py | 8 +- examples/wan2.2_vace_fun/predict_s2v.py | 8 +- .../wan2.2_vace_fun/predict_v2v_control.py | 8 +- .../predict_v2v_control_ref.py | 8 +- scripts/wan2.2_vace_fun/README_TRAIN.md | 257 ++ scripts/wan2.2_vace_fun/train.py | 2078 +++++++++++++++++ scripts/wan2.2_vace_fun/train.sh | 43 + videox_fun/models/wan_transformer3d_vace.py | 11 + videox_fun/utils/lora_utils.py | 16 + 41 files changed, 2539 insertions(+), 106 deletions(-) create mode 100755 scripts/wan2.2_vace_fun/README_TRAIN.md create mode 100644 scripts/wan2.2_vace_fun/train.py create mode 100644 scripts/wan2.2_vace_fun/train.sh diff --git a/comfyui/comfyui_nodes.py b/comfyui/comfyui_nodes.py index 25c7e69b..a40efe0f 100755 --- a/comfyui/comfyui_nodes.py +++ b/comfyui/comfyui_nodes.py @@ -1,4 +1,5 @@ import json +import os import cv2 import numpy as np @@ -106,6 +107,31 @@ def compile(self, cache_size_limit, funmodels): print("Add Compile") return (funmodels,) + +class FunAttention: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "attention_type": ( + ["flash", "sage", "torch"], + {"default": "flash"}, + ), + "funmodels": ("FunModels",) + } + } + RETURN_TYPES = ("FunModels",) + RETURN_NAMES = ("funmodels",) + FUNCTION = "funattention" + CATEGORY = "CogVideoXFUNWrapper" + + def funattention(self, attention_type, funmodels): + os.environ['VIDEOX_ATTENTION_TYPE'] = { + "flash": "FLASH_ATTENTION", + "sage": "SAGE_ATTENTION", + "torch": "TORCH_SCALED_DOT" + }[attention_type] + return (funmodels,) class LoadConfig: @classmethod @@ -376,6 +402,7 @@ def run(self,camera_pose,fx,fy,cx,cy): "FunTextBox": FunTextBox, "FunRiflex": FunRiflex, "FunCompile": FunCompile, + "FunAttention": FunAttention, "LoadCogVideoXFunModel": LoadCogVideoXFunModel, "LoadCogVideoXFunLora": LoadCogVideoXFunLora, @@ -436,6 +463,7 @@ def run(self,camera_pose,fx,fy,cx,cy): "FunTextBox": "FunTextBox", "FunRiflex": "FunRiflex", "FunCompile": "FunCompile", + "FunAttention": "FunAttention", "LoadWanClipEncoderModel": "Load Wan ClipEncoder Model", "LoadWanTextEncoderModel": "Load Wan TextEncoder Model", diff --git a/examples/cogvideox_fun/predict_i2v.py b/examples/cogvideox_fun/predict_i2v.py index 112fe025..0475af23 100755 --- a/examples/cogvideox_fun/predict_i2v.py +++ b/examples/cogvideox_fun/predict_i2v.py @@ -202,7 +202,7 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if partial_video_length is not None: partial_video_length = int((partial_video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -292,7 +292,7 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) def save_results(): if not os.path.exists(save_path): diff --git a/examples/cogvideox_fun/predict_t2v.py b/examples/cogvideox_fun/predict_t2v.py index 5133955e..376615b0 100755 --- a/examples/cogvideox_fun/predict_t2v.py +++ b/examples/cogvideox_fun/predict_t2v.py @@ -194,7 +194,7 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -232,7 +232,7 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) def save_results(): if not os.path.exists(save_path): diff --git a/examples/cogvideox_fun/predict_v2v.py b/examples/cogvideox_fun/predict_v2v.py index b1d7a435..84d10192 100755 --- a/examples/cogvideox_fun/predict_v2v.py +++ b/examples/cogvideox_fun/predict_v2v.py @@ -201,7 +201,7 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 latent_frames = (video_length - 1) // vae.config.temporal_compression_ratio + 1 @@ -227,7 +227,7 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) def save_results(): if not os.path.exists(save_path): diff --git a/examples/cogvideox_fun/predict_v2v_control.py b/examples/cogvideox_fun/predict_v2v_control.py index 3668d3bc..1193d33c 100755 --- a/examples/cogvideox_fun/predict_v2v_control.py +++ b/examples/cogvideox_fun/predict_v2v_control.py @@ -188,7 +188,7 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 latent_frames = (video_length - 1) // vae.config.temporal_compression_ratio + 1 @@ -212,7 +212,7 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) def save_results(): if not os.path.exists(save_path): diff --git a/examples/flux/predict_t2i.py b/examples/flux/predict_t2i.py index bfd6bc77..c8518bf0 100644 --- a/examples/flux/predict_t2i.py +++ b/examples/flux/predict_t2i.py @@ -184,7 +184,7 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) with torch.no_grad(): sample = pipeline( @@ -198,7 +198,7 @@ ).images if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) def save_results(): if not os.path.exists(save_path): diff --git a/examples/phantom/predict_s2v.py b/examples/phantom/predict_s2v.py index 89e3b824..0d7e8f4e 100644 --- a/examples/phantom/predict_s2v.py +++ b/examples/phantom/predict_s2v.py @@ -244,7 +244,7 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -272,7 +272,7 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) def save_results(): if not os.path.exists(save_path): diff --git a/examples/qwenimage/predict_t2i.py b/examples/qwenimage/predict_t2i.py index 27697dfd..51c5a4c3 100644 --- a/examples/qwenimage/predict_t2i.py +++ b/examples/qwenimage/predict_t2i.py @@ -176,7 +176,7 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) with torch.no_grad(): sample = pipeline( @@ -190,7 +190,7 @@ ).images if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.1/predict_i2v.py b/examples/wan2.1/predict_i2v.py index 53459801..c2de0184 100755 --- a/examples/wan2.1/predict_i2v.py +++ b/examples/wan2.1/predict_i2v.py @@ -245,7 +245,7 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -273,7 +273,7 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.1/predict_t2v.py b/examples/wan2.1/predict_t2v.py index 26bd668c..2e96d13b 100755 --- a/examples/wan2.1/predict_t2v.py +++ b/examples/wan2.1/predict_t2v.py @@ -232,7 +232,7 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -254,7 +254,7 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.1_fun/predict_i2v.py b/examples/wan2.1_fun/predict_i2v.py index 52baeb59..dc86a68b 100755 --- a/examples/wan2.1_fun/predict_i2v.py +++ b/examples/wan2.1_fun/predict_i2v.py @@ -246,7 +246,7 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -274,7 +274,7 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.1_fun/predict_t2v.py b/examples/wan2.1_fun/predict_t2v.py index fb085fba..bfb27598 100755 --- a/examples/wan2.1_fun/predict_t2v.py +++ b/examples/wan2.1_fun/predict_t2v.py @@ -253,7 +253,7 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -293,7 +293,7 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.1_fun/predict_v2v_control.py b/examples/wan2.1_fun/predict_v2v_control.py index 6810cd61..85a74340 100755 --- a/examples/wan2.1_fun/predict_v2v_control.py +++ b/examples/wan2.1_fun/predict_v2v_control.py @@ -256,7 +256,7 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -305,7 +305,7 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.1_fun/predict_v2v_control_camera.py b/examples/wan2.1_fun/predict_v2v_control_camera.py index d66077b4..40adc130 100755 --- a/examples/wan2.1_fun/predict_v2v_control_camera.py +++ b/examples/wan2.1_fun/predict_v2v_control_camera.py @@ -256,7 +256,7 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -305,7 +305,7 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.1_fun/predict_v2v_control_ref.py b/examples/wan2.1_fun/predict_v2v_control_ref.py index 5f01abc6..b7115443 100755 --- a/examples/wan2.1_fun/predict_v2v_control_ref.py +++ b/examples/wan2.1_fun/predict_v2v_control_ref.py @@ -256,7 +256,7 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -305,7 +305,7 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.1_vace/predict_i2v.py b/examples/wan2.1_vace/predict_i2v.py index 47667b30..050d4010 100644 --- a/examples/wan2.1_vace/predict_i2v.py +++ b/examples/wan2.1_vace/predict_i2v.py @@ -247,7 +247,7 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -283,7 +283,7 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.1_vace/predict_s2v.py b/examples/wan2.1_vace/predict_s2v.py index e78a0348..c44c937a 100644 --- a/examples/wan2.1_vace/predict_s2v.py +++ b/examples/wan2.1_vace/predict_s2v.py @@ -247,7 +247,7 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -283,7 +283,7 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.1_vace/predict_v2v_control.py b/examples/wan2.1_vace/predict_v2v_control.py index 3f58b9a0..2c09c189 100644 --- a/examples/wan2.1_vace/predict_v2v_control.py +++ b/examples/wan2.1_vace/predict_v2v_control.py @@ -247,7 +247,7 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -283,7 +283,7 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.2/predict_i2v.py b/examples/wan2.2/predict_i2v.py index 7369e99c..52c93c2d 100644 --- a/examples/wan2.2/predict_i2v.py +++ b/examples/wan2.2/predict_i2v.py @@ -278,8 +278,8 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) - pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) + pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -308,8 +308,8 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) - pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) + pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.2/predict_s2v.py b/examples/wan2.2/predict_s2v.py index 98c0a1da..82876ebb 100644 --- a/examples/wan2.2/predict_s2v.py +++ b/examples/wan2.2/predict_s2v.py @@ -302,9 +302,9 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") with torch.no_grad(): video_length = video_length // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio if video_length != 1 else 1 @@ -339,8 +339,8 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) - pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) + pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.2/predict_t2v.py b/examples/wan2.2/predict_t2v.py index a68c6e4a..28d56cef 100755 --- a/examples/wan2.2/predict_t2v.py +++ b/examples/wan2.2/predict_t2v.py @@ -273,8 +273,8 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) - pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) + pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -298,8 +298,8 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) - pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) + pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.2/predict_ti2v.py b/examples/wan2.2/predict_ti2v.py index 548f599c..d039210c 100755 --- a/examples/wan2.2/predict_ti2v.py +++ b/examples/wan2.2/predict_ti2v.py @@ -290,9 +290,9 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -325,9 +325,9 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.2_fun/predict_i2v.py b/examples/wan2.2_fun/predict_i2v.py index 0b1d0379..79181b4d 100644 --- a/examples/wan2.2_fun/predict_i2v.py +++ b/examples/wan2.2_fun/predict_i2v.py @@ -292,9 +292,9 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -324,9 +324,9 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.2_fun/predict_i2v_5b.py b/examples/wan2.2_fun/predict_i2v_5b.py index 3c5468f8..1bbccaa0 100644 --- a/examples/wan2.2_fun/predict_i2v_5b.py +++ b/examples/wan2.2_fun/predict_i2v_5b.py @@ -294,9 +294,9 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -326,9 +326,9 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.2_fun/predict_t2v.py b/examples/wan2.2_fun/predict_t2v.py index 0f7c5bfc..347128ac 100644 --- a/examples/wan2.2_fun/predict_t2v.py +++ b/examples/wan2.2_fun/predict_t2v.py @@ -277,8 +277,8 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) - pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) + pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -307,8 +307,8 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) - pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) + pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.2_fun/predict_t2v_5b.py b/examples/wan2.2_fun/predict_t2v_5b.py index 9f501b39..49c5bb23 100644 --- a/examples/wan2.2_fun/predict_t2v_5b.py +++ b/examples/wan2.2_fun/predict_t2v_5b.py @@ -288,9 +288,9 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -320,9 +320,9 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.2_fun/predict_v2v_control.py b/examples/wan2.2_fun/predict_v2v_control.py index 39ed056f..b707b936 100644 --- a/examples/wan2.2_fun/predict_v2v_control.py +++ b/examples/wan2.2_fun/predict_v2v_control.py @@ -305,9 +305,9 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -351,9 +351,9 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.2_fun/predict_v2v_control_5b.py b/examples/wan2.2_fun/predict_v2v_control_5b.py index a7ebb5e3..16dab5d6 100644 --- a/examples/wan2.2_fun/predict_v2v_control_5b.py +++ b/examples/wan2.2_fun/predict_v2v_control_5b.py @@ -305,9 +305,9 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -351,9 +351,9 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.2_fun/predict_v2v_control_camera.py b/examples/wan2.2_fun/predict_v2v_control_camera.py index eeb6ee15..f112394a 100644 --- a/examples/wan2.2_fun/predict_v2v_control_camera.py +++ b/examples/wan2.2_fun/predict_v2v_control_camera.py @@ -305,9 +305,9 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -351,9 +351,9 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.2_fun/predict_v2v_control_camera_5b.py b/examples/wan2.2_fun/predict_v2v_control_camera_5b.py index d0b9fbba..bc7e4944 100644 --- a/examples/wan2.2_fun/predict_v2v_control_camera_5b.py +++ b/examples/wan2.2_fun/predict_v2v_control_camera_5b.py @@ -305,9 +305,9 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -351,9 +351,9 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.2_fun/predict_v2v_control_ref.py b/examples/wan2.2_fun/predict_v2v_control_ref.py index f5bb16ca..e842870e 100644 --- a/examples/wan2.2_fun/predict_v2v_control_ref.py +++ b/examples/wan2.2_fun/predict_v2v_control_ref.py @@ -305,9 +305,9 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -351,9 +351,9 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.2_fun/predict_v2v_control_ref_5b.py b/examples/wan2.2_fun/predict_v2v_control_ref_5b.py index c4c2b704..bf9bd04e 100644 --- a/examples/wan2.2_fun/predict_v2v_control_ref_5b.py +++ b/examples/wan2.2_fun/predict_v2v_control_ref_5b.py @@ -305,9 +305,9 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -351,9 +351,9 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.2_vace_fun/predict_i2v.py b/examples/wan2.2_vace_fun/predict_i2v.py index 276d7790..a1543c3a 100644 --- a/examples/wan2.2_vace_fun/predict_i2v.py +++ b/examples/wan2.2_vace_fun/predict_i2v.py @@ -306,9 +306,9 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -347,9 +347,9 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.2_vace_fun/predict_s2v.py b/examples/wan2.2_vace_fun/predict_s2v.py index ec0d2103..9ea61bc7 100644 --- a/examples/wan2.2_vace_fun/predict_s2v.py +++ b/examples/wan2.2_vace_fun/predict_s2v.py @@ -306,9 +306,9 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -347,9 +347,9 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.2_vace_fun/predict_v2v_control.py b/examples/wan2.2_vace_fun/predict_v2v_control.py index e679206b..e29f2f2c 100644 --- a/examples/wan2.2_vace_fun/predict_v2v_control.py +++ b/examples/wan2.2_vace_fun/predict_v2v_control.py @@ -306,9 +306,9 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -347,9 +347,9 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") def save_results(): if not os.path.exists(save_path): diff --git a/examples/wan2.2_vace_fun/predict_v2v_control_ref.py b/examples/wan2.2_vace_fun/predict_v2v_control_ref.py index f846f71d..c5e51d62 100644 --- a/examples/wan2.2_vace_fun/predict_v2v_control_ref.py +++ b/examples/wan2.2_vace_fun/predict_v2v_control_ref.py @@ -306,9 +306,9 @@ generator = torch.Generator(device=device).manual_seed(seed) if lora_path is not None: - pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") with torch.no_grad(): video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 @@ -347,9 +347,9 @@ ).videos if lora_path is not None: - pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device, dtype=weight_dtype) if transformer_2 is not None: - pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, dtype=weight_dtype, sub_transformer_name="transformer_2") def save_results(): if not os.path.exists(save_path): diff --git a/scripts/wan2.2_vace_fun/README_TRAIN.md b/scripts/wan2.2_vace_fun/README_TRAIN.md new file mode 100755 index 00000000..bad187b5 --- /dev/null +++ b/scripts/wan2.2_vace_fun/README_TRAIN.md @@ -0,0 +1,257 @@ +## Training Code + +We can choose whether to use deepspeed or fsdp in Wan-Fun, which can save a lot of video memory. + +The metadata_control.json is a little different from normal json in Wan-Fun, you need to add a control_file_path, and [DWPose](https://github.com/IDEA-Research/DWPose) is suggested as tool to generate control file. + +```json +[ + { + "file_path": "train/00000001.mp4", + "control_file_path": "control/00000001.mp4", + "object_file_path": ["object/1.jpg", "object/2.jpg"], + "text": "A group of young men in suits and sunglasses are walking down a city street.", + "type": "video" + }, + { + "file_path": "train/00000002.jpg", + "control_file_path": "control/00000002.jpg", + "object_file_path": ["object/1.jpg", "object/2.jpg"], + "text": "Ba Da Ba Ba Ba Ba.", + "type": "image" + }, + ..... +] +``` + +Some parameters in the sh file can be confusing, and they are explained in this document: + +- `enable_bucket` is used to enable bucket training. When enabled, the model does not crop the images and videos at the center, but instead, it trains the entire images and videos after grouping them into buckets based on resolution. +- `random_frame_crop` is used for random cropping on video frames to simulate videos with different frame counts. +- `random_hw_adapt` is used to enable automatic height and width scaling for images and videos. When `random_hw_adapt` is enabled, the training images will have their height and width set to `image_sample_size` as the maximum and `min(video_sample_size, 512)` as the minimum. For training videos, the height and width will be set to `image_sample_size` as the maximum and `min(video_sample_size, 512)` as the minimum. + - For example, when `random_hw_adapt` is enabled, with `video_sample_n_frames=49`, `video_sample_size=1024`, and `image_sample_size=1024`, the resolution of image inputs for training is `512x512` to `1024x1024`, and the resolution of video inputs for training is `512x512x49` to `1024x1024x49`. + - For example, when `random_hw_adapt` is enabled, with `video_sample_n_frames=49`, `video_sample_size=256`, and `image_sample_size=1024`, the resolution of image inputs for training is `256x256` to `1024x1024`, and the resolution of video inputs for training is `256x256x49`. +- `training_with_video_token_length` specifies training the model according to token length. For training images and videos, the height and width will be set to `image_sample_size` as the maximum and `video_sample_size` as the minimum. + - For example, when `training_with_video_token_length` is enabled, with `video_sample_n_frames=49`, `token_sample_size=1024`, `video_sample_size=256`, and `image_sample_size=1024`, the resolution of image inputs for training is `256x256` to `1024x1024`, and the resolution of video inputs for training is `256x256x49` to `1024x1024x49`. + - For example, when `training_with_video_token_length` is enabled, with `video_sample_n_frames=49`, `token_sample_size=512`, `video_sample_size=256`, and `image_sample_size=1024`, the resolution of image inputs for training is `256x256` to `1024x1024`, and the resolution of video inputs for training is `256x256x49` to `1024x1024x9`. + - The token length for a video with dimensions 512x512 and 49 frames is 13,312. We need to set the `token_sample_size = 512`. + - At 512x512 resolution, the number of video frames is 49 (~= 512 * 512 * 49 / 512 / 512). + - At 768x768 resolution, the number of video frames is 21 (~= 512 * 512 * 49 / 768 / 768). + - At 1024x1024 resolution, the number of video frames is 9 (~= 512 * 512 * 49 / 1024 / 1024). + - These resolutions combined with their corresponding lengths allow the model to generate videos of different sizes. +- `resume_from_checkpoint` is used to set the training should be resumed from a previous checkpoint. Use a path or `"latest"` to automatically select the last available checkpoint. +- `train_mode` is used to set the training mode. + - The models named `Wan2.1-Fun-*-Control` are trained in the `control_ref` mode. + - The models named `Wan2.1-Fun-*-Control-Camera` are trained in the `control_ref_camera` mode. +- `control_ref_image` is used to specify the type of control image. The available options are `first_frame` and `random`. + - `first_frame` is used in V1.0 because V1.0 supports using a specified start frame as the control image. The Control-Camera models use the first frame as the control image. + - `random` is used in V1.1 because V1.1 supports both using a specified start frame and a reference image as the control image. +- `boundary_type`: The Wan2.2 series includes two distinct models that handle different noise levels, specified via the `boundary_type` parameter. `low`: Corresponds to the **low noise model** (low_noise_model). `high`: Corresponds to the **high noise model**. (high_noise_model). `full`: Corresponds to the ti2v 5B model (single mode). + +When train model with multi machines, please set the params as follows: +```sh +export MASTER_ADDR="your master address" +export MASTER_PORT=10086 +export WORLD_SIZE=1 # The number of machines +export NUM_PROCESS=8 # The number of processes, such as WORLD_SIZE * 8 +export RANK=0 # The rank of this machine + +accelerate launch --mixed_precision="bf16" --main_process_ip=$MASTER_ADDR --main_process_port=$MASTER_PORT --num_machines=$WORLD_SIZE --num_processes=$NUM_PROCESS --machine_rank=$RANK scripts/xxx/xxx.py +``` + +Wan-Fun-Control without deepspeed: +```sh +export MODEL_NAME="models/Diffusion_Transformer/Wan2.2-VACE-Fun-A14B" +export DATASET_NAME="datasets/internal_datasets/" +export DATASET_META_NAME="datasets/internal_datasets/metadata.json" +# NCCL_IB_DISABLE=1 and NCCL_P2P_DISABLE=1 are used in multi nodes without RDMA. +# export NCCL_IB_DISABLE=1 +# export NCCL_P2P_DISABLE=1 +NCCL_DEBUG=INFO + +accelerate launch --mixed_precision="bf16" scripts/wan2.2_vace_fun/train.py \ + --config_path="config/wan2.2/wan_civitai_t2v.yaml" \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --train_data_dir=$DATASET_NAME \ + --train_data_meta=$DATASET_META_NAME \ + --image_sample_size=1024 \ + --video_sample_size=256 \ + --token_sample_size=512 \ + --video_sample_stride=2 \ + --video_sample_n_frames=81 \ + --train_batch_size=1 \ + --video_repeat=1 \ + --gradient_accumulation_steps=1 \ + --dataloader_num_workers=8 \ + --num_train_epochs=100 \ + --checkpointing_steps=50 \ + --learning_rate=2e-05 \ + --lr_scheduler="constant_with_warmup" \ + --lr_warmup_steps=100 \ + --seed=42 \ + --output_dir="output_dir" \ + --gradient_checkpointing \ + --mixed_precision="bf16" \ + --adam_weight_decay=3e-2 \ + --adam_epsilon=1e-10 \ + --vae_mini_batch=1 \ + --max_grad_norm=0.05 \ + --random_hw_adapt \ + --training_with_video_token_length \ + --enable_bucket \ + --uniform_sampling \ + --low_vram \ + --control_ref_image="random" \ + --boundary_type="low" \ + --trainable_modules "vace" +``` + +Wan-Fun-Control with deepspeed: +```sh +export MODEL_NAME="models/Diffusion_Transformer/Wan2.2-VACE-Fun-A14B" +export DATASET_NAME="datasets/internal_datasets/" +export DATASET_META_NAME="datasets/internal_datasets/metadata.json" +# NCCL_IB_DISABLE=1 and NCCL_P2P_DISABLE=1 are used in multi nodes without RDMA. +# export NCCL_IB_DISABLE=1 +# export NCCL_P2P_DISABLE=1 +NCCL_DEBUG=INFO + +accelerate launch --use_deepspeed --deepspeed_config_file config/zero_stage2_config.json --deepspeed_multinode_launcher standard scripts/wan2.2_vace_fun/train.py \ + --config_path="config/wan2.2/wan_civitai_t2v.yaml" \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --train_data_dir=$DATASET_NAME \ + --train_data_meta=$DATASET_META_NAME \ + --image_sample_size=1024 \ + --video_sample_size=256 \ + --token_sample_size=512 \ + --video_sample_stride=2 \ + --video_sample_n_frames=81 \ + --train_batch_size=1 \ + --video_repeat=1 \ + --gradient_accumulation_steps=1 \ + --dataloader_num_workers=8 \ + --num_train_epochs=100 \ + --checkpointing_steps=50 \ + --learning_rate=2e-05 \ + --lr_scheduler="constant_with_warmup" \ + --lr_warmup_steps=100 \ + --seed=42 \ + --output_dir="output_dir" \ + --gradient_checkpointing \ + --mixed_precision="bf16" \ + --adam_weight_decay=3e-2 \ + --adam_epsilon=1e-10 \ + --vae_mini_batch=1 \ + --max_grad_norm=0.05 \ + --random_hw_adapt \ + --training_with_video_token_length \ + --enable_bucket \ + --uniform_sampling \ + --low_vram \ + --control_ref_image="random" \ + --boundary_type="low" \ + --trainable_modules "vace" +``` + +Wan-Fun-Control with deepspeed zero-3: + +Wan with DeepSpeed Zero-3 is suitable for 14B Wan at high resolutions. After training, you can use the following command to get the final model: +```sh +python scripts/zero_to_bf16.py output_dir/checkpoint-{our-num-steps} output_dir/checkpoint-{your-num-steps}-outputs --max_shard_size 80GB --safe_serialization +``` + +Training shell command is as follows: +```sh +export MODEL_NAME="models/Diffusion_Transformer/Wan2.2-VACE-Fun-A14B" +export DATASET_NAME="datasets/internal_datasets/" +export DATASET_META_NAME="datasets/internal_datasets/metadata.json" +# NCCL_IB_DISABLE=1 and NCCL_P2P_DISABLE=1 are used in multi nodes without RDMA. +# export NCCL_IB_DISABLE=1 +# export NCCL_P2P_DISABLE=1 +NCCL_DEBUG=INFO + +accelerate launch --zero_stage 3 --zero3_save_16bit_model true --zero3_init_flag true --use_deepspeed --deepspeed_config_file config/zero_stage3_config.json --deepspeed_multinode_launcher standard scripts/wan2.2_vace_fun/train.py \ + --config_path="config/wan2.2/wan_civitai_t2v.yaml" \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --train_data_dir=$DATASET_NAME \ + --train_data_meta=$DATASET_META_NAME \ + --image_sample_size=1024 \ + --video_sample_size=256 \ + --token_sample_size=512 \ + --video_sample_stride=2 \ + --video_sample_n_frames=81 \ + --train_batch_size=1 \ + --video_repeat=1 \ + --gradient_accumulation_steps=1 \ + --dataloader_num_workers=8 \ + --num_train_epochs=100 \ + --checkpointing_steps=50 \ + --learning_rate=2e-05 \ + --lr_scheduler="constant_with_warmup" \ + --lr_warmup_steps=100 \ + --seed=42 \ + --output_dir="output_dir" \ + --gradient_checkpointing \ + --mixed_precision="bf16" \ + --adam_weight_decay=3e-2 \ + --adam_epsilon=1e-10 \ + --vae_mini_batch=1 \ + --max_grad_norm=0.05 \ + --random_hw_adapt \ + --training_with_video_token_length \ + --enable_bucket \ + --uniform_sampling \ + --low_vram \ + --control_ref_image="random" \ + --boundary_type="low" \ + --trainable_modules "vace" +``` + +Wan-Fun-Control with FSDP: + +Wan with FSDP is suitable for 14B Wan at high resolutions. Training shell command is as follows: +```sh +export MODEL_NAME="models/Diffusion_Transformer/Wan2.2-VACE-Fun-A14B" +export DATASET_NAME="datasets/internal_datasets/" +export DATASET_META_NAME="datasets/internal_datasets/metadata.json" +# NCCL_IB_DISABLE=1 and NCCL_P2P_DISABLE=1 are used in multi nodes without RDMA. +# export NCCL_IB_DISABLE=1 +# export NCCL_P2P_DISABLE=1 +NCCL_DEBUG=INFO + +accelerate launch --mixed_precision="bf16" --use_fsdp --fsdp_auto_wrap_policy TRANSFORMER_BASED_WRAP --fsdp_transformer_layer_cls_to_wrap=VaceWanAttentionBlock,BaseWanAttentionBlock --fsdp_sharding_strategy "FULL_SHARD" --fsdp_state_dict_type=SHARDED_STATE_DICT --fsdp_backward_prefetch "BACKWARD_PRE" --fsdp_cpu_ram_efficient_loading False scripts/wan2.2_vace_fun/train.py \ + --config_path="config/wan2.2/wan_civitai_t2v.yaml" \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --train_data_dir=$DATASET_NAME \ + --train_data_meta=$DATASET_META_NAME \ + --image_sample_size=1024 \ + --video_sample_size=256 \ + --token_sample_size=512 \ + --video_sample_stride=2 \ + --video_sample_n_frames=81 \ + --train_batch_size=1 \ + --video_repeat=1 \ + --gradient_accumulation_steps=1 \ + --dataloader_num_workers=8 \ + --num_train_epochs=100 \ + --checkpointing_steps=50 \ + --learning_rate=2e-05 \ + --lr_scheduler="constant_with_warmup" \ + --lr_warmup_steps=100 \ + --seed=42 \ + --output_dir="output_dir" \ + --gradient_checkpointing \ + --mixed_precision="bf16" \ + --adam_weight_decay=3e-2 \ + --adam_epsilon=1e-10 \ + --vae_mini_batch=1 \ + --max_grad_norm=0.05 \ + --random_hw_adapt \ + --training_with_video_token_length \ + --enable_bucket \ + --uniform_sampling \ + --low_vram \ + --control_ref_image="random" \ + --boundary_type="low" \ + --trainable_modules "vace" +``` \ No newline at end of file diff --git a/scripts/wan2.2_vace_fun/train.py b/scripts/wan2.2_vace_fun/train.py new file mode 100644 index 00000000..9af58b82 --- /dev/null +++ b/scripts/wan2.2_vace_fun/train.py @@ -0,0 +1,2078 @@ +"""Modified from https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py +""" +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and + +import argparse +import gc +import logging +import math +import os +import pickle +import random +import shutil +import sys + +import accelerate +import diffusers +import numpy as np +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +import torchvision.transforms.functional as TF +import transformers +from accelerate import Accelerator, FullyShardedDataParallelPlugin +from accelerate.logging import get_logger +from accelerate.state import AcceleratorState +from accelerate.utils import ProjectConfiguration, set_seed +from diffusers import DDIMScheduler, FlowMatchEulerDiscreteScheduler +from diffusers.optimization import get_scheduler +from diffusers.training_utils import (EMAModel, + compute_density_for_timestep_sampling, + compute_loss_weighting_for_sd3) +from diffusers.utils import check_min_version, deprecate, is_wandb_available +from diffusers.utils.torch_utils import is_compiled_module +from einops import rearrange +from omegaconf import OmegaConf +from packaging import version +from PIL import Image +from torch.utils.data import RandomSampler +from torch.utils.tensorboard import SummaryWriter +from torchvision import transforms +from tqdm.auto import tqdm +from transformers import AutoTokenizer +from transformers.utils import ContextManagers + +import datasets + +current_file_path = os.path.abspath(__file__) +project_roots = [os.path.dirname(current_file_path), os.path.dirname(os.path.dirname(current_file_path)), os.path.dirname(os.path.dirname(os.path.dirname(current_file_path)))] +for project_root in project_roots: + sys.path.insert(0, project_root) if project_root not in sys.path else None + +from videox_fun.data.bucket_sampler import (ASPECT_RATIO_512, + ASPECT_RATIO_RANDOM_CROP_512, + ASPECT_RATIO_RANDOM_CROP_PROB, + AspectRatioBatchImageVideoSampler, + RandomSampler, get_closest_ratio) +from videox_fun.data.dataset_image_video import (ImageVideoControlDataset, + ImageVideoDataset, + ImageVideoSampler, + get_random_mask, + padding_image, + process_pose_file, + process_pose_params) +from videox_fun.models import (AutoencoderKLWan, CLIPModel, + VaceWanTransformer3DModel, WanT5EncoderModel) +from videox_fun.pipeline import Wan2_2VaceFunPipeline +from videox_fun.utils.discrete_sampler import DiscreteSampling +from videox_fun.utils.lora_utils import (create_network, merge_lora, + unmerge_lora) +from videox_fun.utils.utils import (get_image_to_video_latent, + get_video_to_video_latent, + save_videos_grid) + +if is_wandb_available(): + import wandb + + +def filter_kwargs(cls, kwargs): + import inspect + sig = inspect.signature(cls.__init__) + valid_params = set(sig.parameters.keys()) - {'self', 'cls'} + filtered_kwargs = {k: v for k, v in kwargs.items() if k in valid_params} + return filtered_kwargs + +def linear_decay(initial_value, final_value, total_steps, current_step): + if current_step >= total_steps: + return final_value + current_step = max(0, current_step) + step_size = (final_value - initial_value) / total_steps + current_value = initial_value + step_size * current_step + return current_value + +def generate_timestep_with_lognorm(low, high, shape, device="cpu", generator=None): + u = torch.normal(mean=0.0, std=1.0, size=shape, device=device, generator=generator) + t = 1 / (1 + torch.exp(-u)) * (high - low) + low + return torch.clip(t.to(torch.int32), low, high - 1) + +# Will error if the minimal version of diffusers is not installed. Remove at your own risks. +check_min_version("0.18.0.dev0") + +logger = get_logger(__name__, log_level="INFO") + +def log_validation(vae, text_encoder, tokenizer, clip_image_encoder, transformer3d, args, config, accelerator, weight_dtype, global_step): + try: + logger.info("Running validation... ") + + transformer3d_val = VaceWanTransformer3DModel.from_pretrained( + os.path.join(args.pretrained_model_name_or_path, config['transformer_additional_kwargs'].get('transformer_subpath', 'transformer')), + transformer_additional_kwargs=OmegaConf.to_container(config['transformer_additional_kwargs']), + ).to(weight_dtype) + transformer3d_val.load_state_dict(accelerator.unwrap_model(transformer3d).state_dict()) + scheduler = FlowMatchEulerDiscreteScheduler( + **filter_kwargs(FlowMatchEulerDiscreteScheduler, OmegaConf.to_container(config['scheduler_kwargs'])) + ) + + pipeline = Wan2_2VaceFunPipeline( + vae=accelerator.unwrap_model(vae).to(weight_dtype), + text_encoder=accelerator.unwrap_model(text_encoder), + tokenizer=tokenizer, + transformer=transformer3d_val, + scheduler=scheduler, + clip_image_encoder=clip_image_encoder, + ) + pipeline = pipeline.to(accelerator.device) + + if args.seed is None: + generator = None + else: + generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) + + images = [] + for i in range(len(args.validation_prompts)): + with torch.no_grad(): + with torch.autocast("cuda", dtype=weight_dtype): + video_length = int(args.video_sample_n_frames // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if args.video_sample_n_frames != 1 else 1 + input_video, input_video_mask, ref_image, clip_image = get_video_to_video_latent(args.validation_paths[i], video_length=video_length, sample_size=[args.video_sample_size, args.video_sample_size]) + sample = pipeline( + args.validation_prompts[i], + num_frames = video_length, + negative_prompt = "bad detailed", + height = args.video_sample_size, + width = args.video_sample_size, + generator = generator, + + control_video = input_video, + ).videos + os.makedirs(os.path.join(args.output_dir, "sample"), exist_ok=True) + save_videos_grid(sample, os.path.join(args.output_dir, f"sample/sample-{global_step}-{i}.gif")) + + del pipeline + del transformer3d_val + gc.collect() + torch.cuda.empty_cache() + torch.cuda.ipc_collect() + + return images + except Exception as e: + gc.collect() + torch.cuda.empty_cache() + torch.cuda.ipc_collect() + print(f"Eval error with info {e}") + return None + +def parse_args(): + parser = argparse.ArgumentParser(description="Simple example of a training script.") + parser.add_argument( + "--input_perturbation", type=float, default=0, help="The scale of input perturbation. Recommended 0.1." + ) + parser.add_argument( + "--pretrained_model_name_or_path", + type=str, + default=None, + required=True, + help="Path to pretrained model or model identifier from huggingface.co/models.", + ) + parser.add_argument( + "--revision", + type=str, + default=None, + required=False, + help="Revision of pretrained model identifier from huggingface.co/models.", + ) + parser.add_argument( + "--variant", + type=str, + default=None, + help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", + ) + parser.add_argument( + "--train_data_dir", + type=str, + default=None, + help=( + "A folder containing the training data. " + ), + ) + parser.add_argument( + "--train_data_meta", + type=str, + default=None, + help=( + "A csv containing the training data. " + ), + ) + parser.add_argument( + "--max_train_samples", + type=int, + default=None, + help=( + "For debugging purposes or quicker training, truncate the number of training examples to this " + "value if set." + ), + ) + parser.add_argument( + "--validation_prompts", + type=str, + default=None, + nargs="+", + help=("A set of prompts evaluated every `--validation_epochs` and logged to `--report_to`."), + ) + parser.add_argument( + "--output_dir", + type=str, + default="sd-model-finetuned", + help="The output directory where the model predictions and checkpoints will be written.", + ) + parser.add_argument( + "--cache_dir", + type=str, + default=None, + help="The directory where the downloaded models and datasets will be stored.", + ) + parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") + parser.add_argument( + "--random_flip", + action="store_true", + help="whether to randomly flip images horizontally", + ) + parser.add_argument( + "--use_came", + action="store_true", + help="whether to use came", + ) + parser.add_argument( + "--multi_stream", + action="store_true", + help="whether to use cuda multi-stream", + ) + parser.add_argument( + "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." + ) + parser.add_argument( + "--vae_mini_batch", type=int, default=32, help="mini batch size for vae." + ) + parser.add_argument("--num_train_epochs", type=int, default=100) + parser.add_argument( + "--max_train_steps", + type=int, + default=None, + help="Total number of training steps to perform. If provided, overrides num_train_epochs.", + ) + parser.add_argument( + "--gradient_accumulation_steps", + type=int, + default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.", + ) + parser.add_argument( + "--gradient_checkpointing", + action="store_true", + help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", + ) + parser.add_argument( + "--learning_rate", + type=float, + default=1e-4, + help="Initial learning rate (after the potential warmup period) to use.", + ) + parser.add_argument( + "--scale_lr", + action="store_true", + default=False, + help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", + ) + parser.add_argument( + "--lr_scheduler", + type=str, + default="constant", + help=( + 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' + ' "constant", "constant_with_warmup"]' + ), + ) + parser.add_argument( + "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." + ) + parser.add_argument( + "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." + ) + parser.add_argument( + "--allow_tf32", + action="store_true", + help=( + "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" + " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" + ), + ) + parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") + parser.add_argument( + "--non_ema_revision", + type=str, + default=None, + required=False, + help=( + "Revision of pretrained non-ema model identifier. Must be a branch, tag or git identifier of the local or" + " remote repository specified with --pretrained_model_name_or_path." + ), + ) + parser.add_argument( + "--dataloader_num_workers", + type=int, + default=0, + help=( + "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." + ), + ) + parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") + parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") + parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") + parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") + parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") + parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") + parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") + parser.add_argument( + "--prediction_type", + type=str, + default=None, + help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediciton_type` is chosen.", + ) + parser.add_argument( + "--hub_model_id", + type=str, + default=None, + help="The name of the repository to keep in sync with the local `output_dir`.", + ) + parser.add_argument( + "--logging_dir", + type=str, + default="logs", + help=( + "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" + " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." + ), + ) + parser.add_argument( + "--report_model_info", action="store_true", help="Whether or not to report more info about model (such as norm, grad)." + ) + parser.add_argument( + "--mixed_precision", + type=str, + default=None, + choices=["no", "fp16", "bf16"], + help=( + "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" + " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" + " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." + ), + ) + parser.add_argument( + "--report_to", + type=str, + default="tensorboard", + help=( + 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' + ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' + ), + ) + parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") + parser.add_argument( + "--checkpointing_steps", + type=int, + default=500, + help=( + "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" + " training using `--resume_from_checkpoint`." + ), + ) + parser.add_argument( + "--checkpoints_total_limit", + type=int, + default=None, + help=("Max number of checkpoints to store."), + ) + parser.add_argument( + "--resume_from_checkpoint", + type=str, + default=None, + help=( + "Whether training should be resumed from a previous checkpoint. Use a path saved by" + ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' + ), + ) + parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.") + parser.add_argument( + "--validation_epochs", + type=int, + default=5, + help="Run validation every X epochs.", + ) + parser.add_argument( + "--validation_steps", + type=int, + default=2000, + help="Run validation every X steps.", + ) + parser.add_argument( + "--tracker_project_name", + type=str, + default="text2image-fine-tune", + help=( + "The `project_name` argument passed to Accelerator.init_trackers for" + " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" + ), + ) + + parser.add_argument( + "--snr_loss", action="store_true", help="Whether or not to use snr_loss." + ) + parser.add_argument( + "--uniform_sampling", action="store_true", help="Whether or not to use uniform_sampling." + ) + parser.add_argument( + "--enable_text_encoder_in_dataloader", action="store_true", help="Whether or not to use text encoder in dataloader." + ) + parser.add_argument( + "--enable_bucket", action="store_true", help="Whether enable bucket sample in datasets." + ) + parser.add_argument( + "--random_ratio_crop", action="store_true", help="Whether enable random ratio crop sample in datasets." + ) + parser.add_argument( + "--random_frame_crop", action="store_true", help="Whether enable random frame crop sample in datasets." + ) + parser.add_argument( + "--random_hw_adapt", action="store_true", help="Whether enable random adapt height and width in datasets." + ) + parser.add_argument( + "--training_with_video_token_length", action="store_true", help="The training stage of the model in training.", + ) + parser.add_argument( + "--auto_tile_batch_size", action="store_true", help="Whether to auto tile batch size.", + ) + parser.add_argument( + "--motion_sub_loss", action="store_true", help="Whether enable motion sub loss." + ) + parser.add_argument( + "--motion_sub_loss_ratio", type=float, default=0.25, help="The ratio of motion sub loss." + ) + parser.add_argument( + "--train_sampling_steps", + type=int, + default=1000, + help="Run train_sampling_steps.", + ) + parser.add_argument( + "--keep_all_node_same_token_length", + action="store_true", + help="Reference of the length token.", + ) + parser.add_argument( + "--token_sample_size", + type=int, + default=512, + help="Sample size of the token.", + ) + parser.add_argument( + "--video_sample_size", + type=int, + default=512, + help="Sample size of the video.", + ) + parser.add_argument( + "--image_sample_size", + type=int, + default=512, + help="Sample size of the image.", + ) + parser.add_argument( + "--fix_sample_size", + nargs=2, type=int, default=None, + help="Fix Sample size [height, width] when using bucket and collate_fn." + ) + parser.add_argument( + "--video_sample_stride", + type=int, + default=4, + help="Sample stride of the video.", + ) + parser.add_argument( + "--video_sample_n_frames", + type=int, + default=17, + help="Num frame of video.", + ) + parser.add_argument( + "--video_repeat", + type=int, + default=0, + help="Num of repeat video.", + ) + parser.add_argument( + "--config_path", + type=str, + default=None, + help=( + "The config of the model in training." + ), + ) + parser.add_argument( + "--transformer_path", + type=str, + default=None, + help=("If you want to load the weight from other transformers, input its path."), + ) + parser.add_argument( + "--vae_path", + type=str, + default=None, + help=("If you want to load the weight from other vaes, input its path."), + ) + + parser.add_argument( + '--trainable_modules', + nargs='+', + help='Enter a list of trainable modules' + ) + parser.add_argument( + '--trainable_modules_low_learning_rate', + nargs='+', + default=[], + help='Enter a list of trainable modules with lower learning rate' + ) + parser.add_argument( + '--tokenizer_max_length', + type=int, + default=512, + help='Max length of tokenizer' + ) + parser.add_argument( + "--use_deepspeed", action="store_true", help="Whether or not to use deepspeed." + ) + parser.add_argument( + "--use_fsdp", action="store_true", help="Whether or not to use fsdp." + ) + parser.add_argument( + "--low_vram", action="store_true", help="Whether enable low_vram mode." + ) + parser.add_argument( + "--boundary_type", + type=str, + default="low", + help=( + 'The format of training data. Support `"low"` and `"high"`' + ), + ) + parser.add_argument( + "--abnormal_norm_clip_start", + type=int, + default=1000, + help=( + 'When do we start doing additional processing on abnormal gradients. ' + ), + ) + parser.add_argument( + "--initial_grad_norm_ratio", + type=int, + default=5, + help=( + 'The initial gradient is relative to the multiple of the max_grad_norm. ' + ), + ) + parser.add_argument( + "--control_ref_image", + type=str, + default="first_frame", + help=( + 'The format of training data. Support `"first_frame"`' + ' (default), `"random"`.' + ), + ) + parser.add_argument( + "--weighting_scheme", + type=str, + default="none", + choices=["sigma_sqrt", "logit_normal", "mode", "cosmap", "none"], + help=('We default to the "none" weighting scheme for uniform sampling and uniform loss'), + ) + parser.add_argument( + "--logit_mean", type=float, default=0.0, help="mean to use when using the `'logit_normal'` weighting scheme." + ) + parser.add_argument( + "--logit_std", type=float, default=1.0, help="std to use when using the `'logit_normal'` weighting scheme." + ) + parser.add_argument( + "--mode_scale", + type=float, + default=1.29, + help="Scale of mode weighting scheme. Only effective when using the `'mode'` as the `weighting_scheme`.", + ) + + args = parser.parse_args() + env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) + if env_local_rank != -1 and env_local_rank != args.local_rank: + args.local_rank = env_local_rank + + # default to using the same revision for the non-ema model if not specified + if args.non_ema_revision is None: + args.non_ema_revision = args.revision + + return args + + +def main(): + args = parse_args() + + if args.report_to == "wandb" and args.hub_token is not None: + raise ValueError( + "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." + " Please use `huggingface-cli login` to authenticate with the Hub." + ) + + if args.non_ema_revision is not None: + deprecate( + "non_ema_revision!=None", + "0.15.0", + message=( + "Downloading 'non_ema' weights from revision branches of the Hub is deprecated. Please make sure to" + " use `--variant=non_ema` instead." + ), + ) + logging_dir = os.path.join(args.output_dir, args.logging_dir) + + config = OmegaConf.load(args.config_path) + accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) + + accelerator = Accelerator( + gradient_accumulation_steps=args.gradient_accumulation_steps, + mixed_precision=args.mixed_precision, + log_with=args.report_to, + project_config=accelerator_project_config, + ) + + deepspeed_plugin = accelerator.state.deepspeed_plugin if hasattr(accelerator.state, "deepspeed_plugin") else None + fsdp_plugin = accelerator.state.fsdp_plugin if hasattr(accelerator.state, "fsdp_plugin") else None + if deepspeed_plugin is not None: + zero_stage = int(deepspeed_plugin.zero_stage) + fsdp_stage = 0 + print(f"Using DeepSpeed Zero stage: {zero_stage}") + + args.use_deepspeed = True + if zero_stage == 3: + print(f"Auto set save_state to True because zero_stage == 3") + args.save_state = True + elif fsdp_plugin is not None: + from torch.distributed.fsdp import ShardingStrategy + zero_stage = 0 + if fsdp_plugin.sharding_strategy is ShardingStrategy.FULL_SHARD: + fsdp_stage = 3 + elif fsdp_plugin.sharding_strategy is None: # The fsdp_plugin.sharding_strategy is None in FSDP 2. + fsdp_stage = 3 + elif fsdp_plugin.sharding_strategy is ShardingStrategy.SHARD_GRAD_OP: + fsdp_stage = 2 + else: + fsdp_stage = 0 + print(f"Using FSDP stage: {fsdp_stage}") + + args.use_fsdp = True + if fsdp_stage == 3: + print(f"Auto set save_state to True because fsdp_stage == 3") + args.save_state = True + else: + zero_stage = 0 + fsdp_stage = 0 + print("DeepSpeed is not enabled.") + + if accelerator.is_main_process: + writer = SummaryWriter(log_dir=logging_dir) + + # Make one log on every process with the configuration for debugging. + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + level=logging.INFO, + ) + logger.info(accelerator.state, main_process_only=False) + if accelerator.is_local_main_process: + datasets.utils.logging.set_verbosity_warning() + transformers.utils.logging.set_verbosity_warning() + diffusers.utils.logging.set_verbosity_info() + else: + datasets.utils.logging.set_verbosity_error() + transformers.utils.logging.set_verbosity_error() + diffusers.utils.logging.set_verbosity_error() + + # If passed along, set the training seed now. + if args.seed is not None: + set_seed(args.seed) + rng = np.random.default_rng(np.random.PCG64(args.seed + accelerator.process_index)) + torch_rng = torch.Generator(accelerator.device).manual_seed(args.seed + accelerator.process_index) + else: + rng = None + torch_rng = None + index_rng = np.random.default_rng(np.random.PCG64(43)) + print(f"Init rng with seed {args.seed + accelerator.process_index}. Process_index is {accelerator.process_index}") + + # Handle the repository creation + if accelerator.is_main_process: + if args.output_dir is not None: + os.makedirs(args.output_dir, exist_ok=True) + + # For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora transformer3d) to half-precision + # as these weights are only used for inference, keeping weights in full precision is not required. + weight_dtype = torch.float32 + if accelerator.mixed_precision == "fp16": + weight_dtype = torch.float16 + args.mixed_precision = accelerator.mixed_precision + elif accelerator.mixed_precision == "bf16": + weight_dtype = torch.bfloat16 + args.mixed_precision = accelerator.mixed_precision + + # Load scheduler, tokenizer and models. + noise_scheduler = FlowMatchEulerDiscreteScheduler( + **filter_kwargs(FlowMatchEulerDiscreteScheduler, OmegaConf.to_container(config['scheduler_kwargs'])) + ) + + # Get Tokenizer + tokenizer = AutoTokenizer.from_pretrained( + os.path.join(args.pretrained_model_name_or_path, config['text_encoder_kwargs'].get('tokenizer_subpath', 'tokenizer')), + ) + + def deepspeed_zero_init_disabled_context_manager(): + """ + returns either a context list that includes one that will disable zero.Init or an empty context list + """ + deepspeed_plugin = AcceleratorState().deepspeed_plugin if accelerate.state.is_initialized() else None + if deepspeed_plugin is None: + return [] + + return [deepspeed_plugin.zero3_init_context_manager(enable=False)] + + # Currently Accelerate doesn't know how to handle multiple models under Deepspeed ZeRO stage 3. + # For this to work properly all models must be run through `accelerate.prepare`. But accelerate + # will try to assign the same optimizer with the same weights to all models during + # `deepspeed.initialize`, which of course doesn't work. + # + # For now the following workaround will partially support Deepspeed ZeRO-3, by excluding the 2 + # frozen models from being partitioned during `zero.Init` which gets called during + # `from_pretrained` So CLIPTextModel and AutoencoderKL will not enjoy the parameter sharding + # across multiple gpus and only UNet2DConditionModel will get ZeRO sharded. + with ContextManagers(deepspeed_zero_init_disabled_context_manager()): + # Get Text encoder + text_encoder = WanT5EncoderModel.from_pretrained( + os.path.join(args.pretrained_model_name_or_path, config['text_encoder_kwargs'].get('text_encoder_subpath', 'text_encoder')), + additional_kwargs=OmegaConf.to_container(config['text_encoder_kwargs']), + low_cpu_mem_usage=True, + torch_dtype=weight_dtype, + ) + text_encoder = text_encoder.eval() + # Get Vae + vae = AutoencoderKLWan.from_pretrained( + os.path.join(args.pretrained_model_name_or_path, config['vae_kwargs'].get('vae_subpath', 'vae')), + additional_kwargs=OmegaConf.to_container(config['vae_kwargs']), + ) + vae.eval() + + # Get Transformer + if args.boundary_type == "low" or args.boundary_type == "full": + sub_path = config['transformer_additional_kwargs'].get('transformer_low_noise_model_subpath', 'transformer') + else: + sub_path = config['transformer_additional_kwargs'].get('transformer_high_noise_model_subpath', 'transformer') + transformer3d = VaceWanTransformer3DModel.from_pretrained( + os.path.join(args.pretrained_model_name_or_path, sub_path), + transformer_additional_kwargs=OmegaConf.to_container(config['transformer_additional_kwargs']), + ).to(weight_dtype) + + # Freeze vae and text_encoder and set transformer3d to trainable + vae.requires_grad_(False) + text_encoder.requires_grad_(False) + transformer3d.requires_grad_(False) + + if args.transformer_path is not None: + print(f"From checkpoint: {args.transformer_path}") + if args.transformer_path.endswith("safetensors"): + from safetensors.torch import load_file, safe_open + state_dict = load_file(args.transformer_path) + else: + state_dict = torch.load(args.transformer_path, map_location="cpu") + state_dict = state_dict["state_dict"] if "state_dict" in state_dict else state_dict + + m, u = transformer3d.load_state_dict(state_dict, strict=False) + print(f"missing keys: {len(m)}, unexpected keys: {len(u)}") + + if args.vae_path is not None: + print(f"From checkpoint: {args.vae_path}") + if args.vae_path.endswith("safetensors"): + from safetensors.torch import load_file, safe_open + state_dict = load_file(args.vae_path) + else: + state_dict = torch.load(args.vae_path, map_location="cpu") + state_dict = state_dict["state_dict"] if "state_dict" in state_dict else state_dict + + m, u = vae.load_state_dict(state_dict, strict=False) + print(f"missing keys: {len(m)}, unexpected keys: {len(u)}") + + # A good trainable modules is showed below now. + # For 3D Patch: trainable_modules = ['ff.net', 'pos_embed', 'attn2', 'proj_out', 'timepositionalencoding', 'h_position', 'w_position'] + # For 2D Patch: trainable_modules = ['ff.net', 'attn2', 'timepositionalencoding', 'h_position', 'w_position'] + transformer3d.train() + if accelerator.is_main_process: + accelerator.print( + f"Trainable modules '{args.trainable_modules}'." + ) + for name, param in transformer3d.named_parameters(): + for trainable_module_name in args.trainable_modules + args.trainable_modules_low_learning_rate: + if trainable_module_name in name: + param.requires_grad = True + break + + # Create EMA for the transformer3d. + if args.use_ema: + if zero_stage == 3: + raise NotImplementedError("FSDP does not support EMA.") + + ema_transformer3d = VaceWanTransformer3DModel.from_pretrained( + os.path.join(args.pretrained_model_name_or_path, config['transformer_additional_kwargs'].get('transformer_subpath', 'transformer')), + transformer_additional_kwargs=OmegaConf.to_container(config['transformer_additional_kwargs']), + ).to(weight_dtype) + + ema_transformer3d = EMAModel(ema_transformer3d.parameters(), model_cls=VaceWanTransformer3DModel, model_config=ema_transformer3d.config) + + # `accelerate` 0.16.0 will have better support for customized saving + if version.parse(accelerate.__version__) >= version.parse("0.16.0"): + # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format + if fsdp_stage != 0: + def save_model_hook(models, weights, output_dir): + accelerate_state_dict = accelerator.get_state_dict(models[-1], unwrap=True) + if accelerator.is_main_process: + from safetensors.torch import save_file + + safetensor_save_path = os.path.join(output_dir, f"diffusion_pytorch_model.safetensors") + accelerate_state_dict = {k: v.to(dtype=weight_dtype) for k, v in accelerate_state_dict.items()} + save_file(accelerate_state_dict, safetensor_save_path, metadata={"format": "pt"}) + + with open(os.path.join(output_dir, "sampler_pos_start.pkl"), 'wb') as file: + pickle.dump([batch_sampler.sampler._pos_start, first_epoch], file) + + def load_model_hook(models, input_dir): + pkl_path = os.path.join(input_dir, "sampler_pos_start.pkl") + if os.path.exists(pkl_path): + with open(pkl_path, 'rb') as file: + loaded_number, _ = pickle.load(file) + batch_sampler.sampler._pos_start = max(loaded_number - args.dataloader_num_workers * accelerator.num_processes * 2, 0) + print(f"Load pkl from {pkl_path}. Get loaded_number = {loaded_number}.") + + elif zero_stage == 3: + # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format + def save_model_hook(models, weights, output_dir): + if accelerator.is_main_process: + with open(os.path.join(output_dir, "sampler_pos_start.pkl"), 'wb') as file: + pickle.dump([batch_sampler.sampler._pos_start, first_epoch], file) + + def load_model_hook(models, input_dir): + pkl_path = os.path.join(input_dir, "sampler_pos_start.pkl") + if os.path.exists(pkl_path): + with open(pkl_path, 'rb') as file: + loaded_number, _ = pickle.load(file) + batch_sampler.sampler._pos_start = max(loaded_number - args.dataloader_num_workers * accelerator.num_processes * 2, 0) + print(f"Load pkl from {pkl_path}. Get loaded_number = {loaded_number}.") + else: + # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format + def save_model_hook(models, weights, output_dir): + if accelerator.is_main_process: + if args.use_ema: + ema_transformer3d.save_pretrained(os.path.join(output_dir, "transformer_ema")) + + models[0].save_pretrained(os.path.join(output_dir, "transformer")) + if not args.use_deepspeed: + weights.pop() + + with open(os.path.join(output_dir, "sampler_pos_start.pkl"), 'wb') as file: + pickle.dump([batch_sampler.sampler._pos_start, first_epoch], file) + + def load_model_hook(models, input_dir): + if args.use_ema: + ema_path = os.path.join(input_dir, "transformer_ema") + _, ema_kwargs = VaceWanTransformer3DModel.load_config(ema_path, return_unused_kwargs=True) + load_model = VaceWanTransformer3DModel.from_pretrained( + input_dir, subfolder="transformer_ema", + transformer_additional_kwargs=OmegaConf.to_container(config['transformer_additional_kwargs']) + ) + load_model = EMAModel(load_model.parameters(), model_cls=VaceWanTransformer3DModel, model_config=load_model.config) + load_model.load_state_dict(ema_kwargs) + + ema_transformer3d.load_state_dict(load_model.state_dict()) + ema_transformer3d.to(accelerator.device) + del load_model + + for i in range(len(models)): + # pop models so that they are not loaded again + model = models.pop() + + # load diffusers style into model + load_model = VaceWanTransformer3DModel.from_pretrained( + input_dir, subfolder="transformer" + ) + model.register_to_config(**load_model.config) + + model.load_state_dict(load_model.state_dict()) + del load_model + + pkl_path = os.path.join(input_dir, "sampler_pos_start.pkl") + if os.path.exists(pkl_path): + with open(pkl_path, 'rb') as file: + loaded_number, _ = pickle.load(file) + batch_sampler.sampler._pos_start = max(loaded_number - args.dataloader_num_workers * accelerator.num_processes * 2, 0) + print(f"Load pkl from {pkl_path}. Get loaded_number = {loaded_number}.") + + accelerator.register_save_state_pre_hook(save_model_hook) + accelerator.register_load_state_pre_hook(load_model_hook) + + if args.gradient_checkpointing: + transformer3d.enable_gradient_checkpointing() + + # Enable TF32 for faster training on Ampere GPUs, + # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices + if args.allow_tf32: + torch.backends.cuda.matmul.allow_tf32 = True + + if args.scale_lr: + args.learning_rate = ( + args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes + ) + + # Initialize the optimizer + if args.use_8bit_adam: + try: + import bitsandbytes as bnb + except ImportError: + raise ImportError( + "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" + ) + + optimizer_cls = bnb.optim.AdamW8bit + elif args.use_came: + try: + from came_pytorch import CAME + except: + raise ImportError( + "Please install came_pytorch to use CAME. You can do so by running `pip install came_pytorch`" + ) + + optimizer_cls = CAME + else: + optimizer_cls = torch.optim.AdamW + + trainable_params = list(filter(lambda p: p.requires_grad, transformer3d.parameters())) + trainable_params_optim = [ + {'params': [], 'lr': args.learning_rate}, + {'params': [], 'lr': args.learning_rate / 2}, + ] + in_already = [] + for name, param in transformer3d.named_parameters(): + high_lr_flag = False + if name in in_already: + continue + for trainable_module_name in args.trainable_modules: + if trainable_module_name in name: + in_already.append(name) + high_lr_flag = True + trainable_params_optim[0]['params'].append(param) + if accelerator.is_main_process: + print(f"Set {name} to lr : {args.learning_rate}") + break + if high_lr_flag: + continue + for trainable_module_name in args.trainable_modules_low_learning_rate: + if trainable_module_name in name: + in_already.append(name) + trainable_params_optim[1]['params'].append(param) + if accelerator.is_main_process: + print(f"Set {name} to lr : {args.learning_rate / 2}") + break + + if args.use_came: + optimizer = optimizer_cls( + trainable_params_optim, + lr=args.learning_rate, + # weight_decay=args.adam_weight_decay, + betas=(0.9, 0.999, 0.9999), + eps=(1e-30, 1e-16) + ) + else: + optimizer = optimizer_cls( + trainable_params_optim, + lr=args.learning_rate, + betas=(args.adam_beta1, args.adam_beta2), + weight_decay=args.adam_weight_decay, + eps=args.adam_epsilon, + ) + + # Get the training dataset + sample_n_frames_bucket_interval = vae.config.temporal_compression_ratio + + if args.fix_sample_size is not None and args.enable_bucket: + args.video_sample_size = max(max(args.fix_sample_size), args.video_sample_size) + args.image_sample_size = max(max(args.fix_sample_size), args.image_sample_size) + args.training_with_video_token_length = False + args.random_hw_adapt = False + + # Get the dataset + train_dataset = ImageVideoControlDataset( + args.train_data_meta, args.train_data_dir, + video_sample_size=args.video_sample_size, video_sample_stride=args.video_sample_stride, video_sample_n_frames=args.video_sample_n_frames, + video_repeat=args.video_repeat, + image_sample_size=args.image_sample_size, + enable_bucket=args.enable_bucket, + enable_inpaint=False, + enable_camera_info=False, + enable_subject_info=True + ) + + def worker_init_fn(_seed): + _seed = _seed * 256 + def _worker_init_fn(worker_id): + print(f"worker_init_fn with {_seed + worker_id}") + np.random.seed(_seed + worker_id) + random.seed(_seed + worker_id) + return _worker_init_fn + + if args.enable_bucket: + aspect_ratio_sample_size = {key : [x / 512 * args.video_sample_size for x in ASPECT_RATIO_512[key]] for key in ASPECT_RATIO_512.keys()} + batch_sampler_generator = torch.Generator().manual_seed(args.seed) + batch_sampler = AspectRatioBatchImageVideoSampler( + sampler=RandomSampler(train_dataset, generator=batch_sampler_generator), dataset=train_dataset.dataset, + batch_size=args.train_batch_size, train_folder = args.train_data_dir, drop_last=True, + aspect_ratios=aspect_ratio_sample_size, + ) + + def collate_fn(examples): + def get_length_to_frame_num(token_length): + if args.image_sample_size > args.video_sample_size: + sample_sizes = list(range(args.video_sample_size, args.image_sample_size + 1, 128)) + + if sample_sizes[-1] != args.image_sample_size: + sample_sizes.append(args.image_sample_size) + else: + sample_sizes = [args.image_sample_size] + + length_to_frame_num = { + sample_size: min(token_length / sample_size / sample_size, args.video_sample_n_frames) // sample_n_frames_bucket_interval * sample_n_frames_bucket_interval + 1 for sample_size in sample_sizes + } + + return length_to_frame_num + + def get_random_downsample_ratio(sample_size, image_ratio=[], + all_choices=False, rng=None): + def _create_special_list(length): + if length == 1: + return [1.0] + if length >= 2: + first_element = 0.90 + remaining_sum = 1.0 - first_element + other_elements_value = remaining_sum / (length - 1) + special_list = [first_element] + [other_elements_value] * (length - 1) + return special_list + + if sample_size >= 1536: + number_list = [1, 1.25, 1.5, 2, 2.5, 3] + image_ratio + elif sample_size >= 1024: + number_list = [1, 1.25, 1.5, 2] + image_ratio + elif sample_size >= 768: + number_list = [1, 1.25, 1.5] + image_ratio + elif sample_size >= 512: + number_list = [1] + image_ratio + else: + number_list = [1] + + if all_choices: + return number_list + + number_list_prob = np.array(_create_special_list(len(number_list))) + if rng is None: + return np.random.choice(number_list, p = number_list_prob) + else: + return rng.choice(number_list, p = number_list_prob) + + # Get token length + target_token_length = args.video_sample_n_frames * args.token_sample_size * args.token_sample_size + length_to_frame_num = get_length_to_frame_num(target_token_length) + + # Create new output + new_examples = {} + new_examples["target_token_length"] = target_token_length + new_examples["pixel_values"] = [] + new_examples["text"] = [] + # Used in Control Mode + new_examples["control_pixel_values"] = [] + # Used in Control Ref Mode + new_examples["ref_pixel_values"] = [] + new_examples["clip_pixel_values"] = [] + new_examples["clip_idx"] = [] + + # Used in Inpaint mode + new_examples["mask_pixel_values"] = [] + new_examples["mask"] = [] + + new_examples["subject_images"] = [] + new_examples["subject_flags"] = [] + + # Get downsample ratio in image and videos + pixel_value = examples[0]["pixel_values"] + data_type = examples[0]["data_type"] + f, h, w, c = np.shape(pixel_value) + if data_type == 'image': + random_downsample_ratio = 1 if not args.random_hw_adapt else get_random_downsample_ratio(args.image_sample_size, image_ratio=[args.image_sample_size / args.video_sample_size]) + + aspect_ratio_sample_size = {key : [x / 512 * args.image_sample_size / random_downsample_ratio for x in ASPECT_RATIO_512[key]] for key in ASPECT_RATIO_512.keys()} + aspect_ratio_random_crop_sample_size = {key : [x / 512 * args.image_sample_size / random_downsample_ratio for x in ASPECT_RATIO_RANDOM_CROP_512[key]] for key in ASPECT_RATIO_RANDOM_CROP_512.keys()} + + batch_video_length = args.video_sample_n_frames + sample_n_frames_bucket_interval + else: + if args.random_hw_adapt: + if args.training_with_video_token_length: + local_min_size = np.min(np.array([np.mean(np.array([np.shape(example["pixel_values"])[1], np.shape(example["pixel_values"])[2]])) for example in examples])) + + def get_random_downsample_probability(choice_list, token_sample_size): + length = len(choice_list) + if length == 1: + return [1.0] # If there's only one element, it gets all the probability + + # Find the index of the closest value to token_sample_size + closest_index = min(range(length), key=lambda i: abs(choice_list[i] - token_sample_size)) + + # Assign 50% to the closest index + first_element = 0.50 + remaining_sum = 1.0 - first_element + + # Distribute the remaining 50% evenly among the other elements + other_elements_value = remaining_sum / (length - 1) if length > 1 else 0.0 + + # Construct the probability distribution + probability_list = [other_elements_value] * length + probability_list[closest_index] = first_element + + return probability_list + + choice_list = [length for length in list(length_to_frame_num.keys()) if length < local_min_size * 1.25] + if len(choice_list) == 0: + choice_list = list(length_to_frame_num.keys()) + probabilities = get_random_downsample_probability(choice_list, args.token_sample_size) + local_video_sample_size = np.random.choice(choice_list, p=probabilities) + + random_downsample_ratio = args.video_sample_size / local_video_sample_size + batch_video_length = length_to_frame_num[local_video_sample_size] + else: + random_downsample_ratio = get_random_downsample_ratio(args.video_sample_size) + batch_video_length = args.video_sample_n_frames + sample_n_frames_bucket_interval + else: + random_downsample_ratio = 1 + batch_video_length = args.video_sample_n_frames + sample_n_frames_bucket_interval + + aspect_ratio_sample_size = {key : [x / 512 * args.video_sample_size / random_downsample_ratio for x in ASPECT_RATIO_512[key]] for key in ASPECT_RATIO_512.keys()} + aspect_ratio_random_crop_sample_size = {key : [x / 512 * args.video_sample_size / random_downsample_ratio for x in ASPECT_RATIO_RANDOM_CROP_512[key]] for key in ASPECT_RATIO_RANDOM_CROP_512.keys()} + + if args.fix_sample_size is not None: + fix_sample_size = [int(x / 16) * 16 for x in args.fix_sample_size] + elif args.random_ratio_crop: + if rng is None: + random_sample_size = aspect_ratio_random_crop_sample_size[ + np.random.choice(list(aspect_ratio_random_crop_sample_size.keys()), p = ASPECT_RATIO_RANDOM_CROP_PROB) + ] + else: + random_sample_size = aspect_ratio_random_crop_sample_size[ + rng.choice(list(aspect_ratio_random_crop_sample_size.keys()), p = ASPECT_RATIO_RANDOM_CROP_PROB) + ] + random_sample_size = [int(x / 16) * 16 for x in random_sample_size] + else: + closest_size, closest_ratio = get_closest_ratio(h, w, ratios=aspect_ratio_sample_size) + closest_size = [int(x / 16) * 16 for x in closest_size] + + for example in examples: + # To 0~1 + pixel_values = torch.from_numpy(example["pixel_values"]).permute(0, 3, 1, 2).contiguous() + pixel_values = pixel_values / 255. + + if args.control_ref_image == "first_frame": + clip_index = 0 + else: + def _create_special_list(length): + if length == 1: + return [1.0] + if length >= 2: + first_element = 0.40 + remaining_sum = 1.0 - first_element + other_elements_value = remaining_sum / (length - 1) + special_list = [first_element] + [other_elements_value] * (length - 1) + return special_list + number_list_prob = np.array(_create_special_list(len(pixel_values))) + clip_index = np.random.choice(list(range(len(pixel_values))), p = number_list_prob) + new_examples["clip_idx"].append(clip_index) + + ref_pixel_values = pixel_values[clip_index].permute(1, 2, 0).contiguous() + ref_pixel_values = Image.fromarray(np.uint8(ref_pixel_values * 255)) + ref_pixel_values = padding_image(ref_pixel_values, closest_size[1], closest_size[0]) + ref_pixel_values = (torch.tensor(np.array(ref_pixel_values)).unsqueeze(0).permute(0, 3, 1, 2).contiguous() / 255 - 0.5)/0.5 + new_examples["ref_pixel_values"].append(ref_pixel_values) + + control_pixel_values = torch.from_numpy(example["control_pixel_values"]).permute(0, 3, 1, 2).contiguous() + control_pixel_values = control_pixel_values / 255. + + _, channel, h, w = pixel_values.size() + new_subject_image = torch.zeros(4, channel, h, w) + num_subject = len(example["subject_image"]) + if num_subject != 0: + subject_image = torch.from_numpy(example["subject_image"]).permute(0, 3, 1, 2).contiguous() + new_subject_image[:num_subject] = subject_image + subject_image = new_subject_image / 255. + subject_flag = torch.from_numpy(np.array([1] * num_subject + [0] * (4 - num_subject))) + + if args.fix_sample_size is not None: + # Get adapt hw for resize + fix_sample_size = list(map(lambda x: int(x), fix_sample_size)) + transform = transforms.Compose([ + transforms.Resize(fix_sample_size, interpolation=transforms.InterpolationMode.BILINEAR), # Image.BICUBIC + transforms.CenterCrop(fix_sample_size), + transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True), + ]) + + transform_no_normalize = transforms.Compose([ + transforms.Resize(fix_sample_size, interpolation=transforms.InterpolationMode.BILINEAR), # Image.BICUBIC + transforms.CenterCrop(fix_sample_size), + ]) + elif args.random_ratio_crop: + # Get adapt hw for resize + b, c, h, w = pixel_values.size() + th, tw = random_sample_size + if th / tw > h / w: + nh = int(th) + nw = int(w / h * nh) + else: + nw = int(tw) + nh = int(h / w * nw) + + transform = transforms.Compose([ + transforms.Resize([nh, nw]), + transforms.CenterCrop([int(x) for x in random_sample_size]), + transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True), + ]) + + transform_no_normalize = transforms.Compose([ + transforms.Resize([nh, nw]), + transforms.CenterCrop([int(x) for x in random_sample_size]), + ]) + else: + # Get adapt hw for resize + closest_size = list(map(lambda x: int(x), closest_size)) + if closest_size[0] / h > closest_size[1] / w: + resize_size = closest_size[0], int(w * closest_size[0] / h) + else: + resize_size = int(h * closest_size[1] / w), closest_size[1] + + transform = transforms.Compose([ + transforms.Resize(resize_size, interpolation=transforms.InterpolationMode.BILINEAR), # Image.BICUBIC + transforms.CenterCrop(closest_size), + transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True), + ]) + + transform_no_normalize = transforms.Compose([ + transforms.Resize(resize_size, interpolation=transforms.InterpolationMode.BILINEAR), # Image.BICUBIC + transforms.CenterCrop(closest_size), + ]) + + new_examples["pixel_values"].append(transform(pixel_values)) + new_examples["control_pixel_values"].append(transform(control_pixel_values)) + + new_examples["text"].append(example["text"]) + # Magvae needs the number of frames to be 4n + 1. + batch_video_length = int( + min( + batch_video_length, + (len(pixel_values) - 1) // sample_n_frames_bucket_interval * sample_n_frames_bucket_interval + 1, + ) + ) + if batch_video_length == 0: + batch_video_length = 1 + + clip_pixel_values = new_examples["pixel_values"][-1][clip_index].permute(1, 2, 0).contiguous() + clip_pixel_values = (clip_pixel_values * 0.5 + 0.5) * 255 + new_examples["clip_pixel_values"].append(clip_pixel_values) + + mask = get_random_mask(new_examples["pixel_values"][-1].size()) + mask_pixel_values = new_examples["pixel_values"][-1] * (1 - mask) + + # Wan 2.1 use 0 for masked pixels + # + torch.ones_like(new_examples["pixel_values"][-1]) * -1 * mask + new_examples["mask_pixel_values"].append(mask_pixel_values) + new_examples["mask"].append(mask) + + new_examples["subject_images"].append(transform(subject_image)) + new_examples["subject_flags"].append(subject_flag) + + # Limit the number of frames to the same + new_examples["pixel_values"] = torch.stack([example[:batch_video_length] for example in new_examples["pixel_values"]]) + new_examples["control_pixel_values"] = torch.stack([example[:batch_video_length] for example in new_examples["control_pixel_values"]]) + new_examples["ref_pixel_values"] = torch.stack([example[:batch_video_length] for example in new_examples["ref_pixel_values"]]) + new_examples["clip_pixel_values"] = torch.stack([example for example in new_examples["clip_pixel_values"]]) + new_examples["clip_idx"] = torch.tensor(new_examples["clip_idx"]) + new_examples["mask_pixel_values"] = torch.stack([example[:batch_video_length] for example in new_examples["mask_pixel_values"]]) + new_examples["mask"] = torch.stack([example[:batch_video_length] for example in new_examples["mask"]]) + new_examples["subject_images"] = torch.stack([example for example in new_examples["subject_images"]]) + new_examples["subject_flags"] = torch.stack([example for example in new_examples["subject_flags"]]) + # Encode prompts when enable_text_encoder_in_dataloader=True + if args.enable_text_encoder_in_dataloader: + prompt_ids = tokenizer( + new_examples['text'], + max_length=args.tokenizer_max_length, + padding="max_length", + add_special_tokens=True, + truncation=True, + return_tensors="pt" + ) + encoder_hidden_states = text_encoder( + prompt_ids.input_ids + )[0] + new_examples['encoder_attention_mask'] = prompt_ids.attention_mask + new_examples['encoder_hidden_states'] = encoder_hidden_states + + return new_examples + + # DataLoaders creation: + train_dataloader = torch.utils.data.DataLoader( + train_dataset, + batch_sampler=batch_sampler, + collate_fn=collate_fn, + persistent_workers=True if args.dataloader_num_workers != 0 else False, + num_workers=args.dataloader_num_workers, + worker_init_fn=worker_init_fn(args.seed + accelerator.process_index) + ) + else: + # DataLoaders creation: + batch_sampler_generator = torch.Generator().manual_seed(args.seed) + batch_sampler = ImageVideoSampler(RandomSampler(train_dataset, generator=batch_sampler_generator), train_dataset, args.train_batch_size) + train_dataloader = torch.utils.data.DataLoader( + train_dataset, + batch_sampler=batch_sampler, + persistent_workers=True if args.dataloader_num_workers != 0 else False, + num_workers=args.dataloader_num_workers, + worker_init_fn=worker_init_fn(args.seed + accelerator.process_index) + ) + + # Scheduler and math around the number of training steps. + overrode_max_train_steps = False + num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) + if args.max_train_steps is None: + args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch + overrode_max_train_steps = True + + lr_scheduler = get_scheduler( + args.lr_scheduler, + optimizer=optimizer, + num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, + num_training_steps=args.max_train_steps * accelerator.num_processes, + ) + + # Prepare everything with our `accelerator`. + transformer3d, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( + transformer3d, optimizer, train_dataloader, lr_scheduler + ) + + if fsdp_stage != 0: + from functools import partial + + from videox_fun.dist import set_multi_gpus_devices, shard_model + shard_fn = partial(shard_model, device_id=accelerator.device, param_dtype=weight_dtype) + text_encoder = shard_fn(text_encoder) + + # shard_fn = partial(shard_model, device_id=accelerator.device, param_dtype=weight_dtype) + # transformer3d = shard_fn(transformer3d) + + if args.use_ema: + ema_transformer3d.to(accelerator.device) + + # Move text_encode and vae to gpu and cast to weight_dtype + vae.to(accelerator.device if not args.low_vram else "cpu", dtype=weight_dtype) + if not args.enable_text_encoder_in_dataloader: + text_encoder.to(accelerator.device if not args.low_vram else "cpu") + + # We need to recalculate our total training steps as the size of the training dataloader may have changed. + num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) + if overrode_max_train_steps: + args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch + # Afterwards we recalculate our number of training epochs + args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) + + # We need to initialize the trackers we use, and also store our configuration. + # The trackers initializes automatically on the main process. + if accelerator.is_main_process: + tracker_config = dict(vars(args)) + tracker_config.pop("validation_prompts") + tracker_config.pop("trainable_modules") + tracker_config.pop("trainable_modules_low_learning_rate") + tracker_config.pop("fix_sample_size") + accelerator.init_trackers(args.tracker_project_name, tracker_config) + + # Function for unwrapping if model was compiled with `torch.compile`. + def unwrap_model(model): + model = accelerator.unwrap_model(model) + model = model._orig_mod if is_compiled_module(model) else model + return model + + # Train! + total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps + + logger.info("***** Running training *****") + logger.info(f" Num examples = {len(train_dataset)}") + logger.info(f" Num Epochs = {args.num_train_epochs}") + logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") + logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") + logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") + logger.info(f" Total optimization steps = {args.max_train_steps}") + global_step = 0 + first_epoch = 0 + + # Potentially load in the weights and states from a previous save + if args.resume_from_checkpoint: + if args.resume_from_checkpoint != "latest": + path = os.path.basename(args.resume_from_checkpoint) + else: + # Get the most recent checkpoint + dirs = os.listdir(args.output_dir) + dirs = [d for d in dirs if d.startswith("checkpoint")] + dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) + path = dirs[-1] if len(dirs) > 0 else None + + if path is None: + accelerator.print( + f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." + ) + args.resume_from_checkpoint = None + initial_global_step = 0 + else: + global_step = int(path.split("-")[1]) + + initial_global_step = global_step + + pkl_path = os.path.join(os.path.join(args.output_dir, path), "sampler_pos_start.pkl") + if os.path.exists(pkl_path): + with open(pkl_path, 'rb') as file: + _, first_epoch = pickle.load(file) + else: + first_epoch = global_step // num_update_steps_per_epoch + print(f"Load pkl from {pkl_path}. Get first_epoch = {first_epoch}.") + + accelerator.print(f"Resuming from checkpoint {path}") + accelerator.load_state(os.path.join(args.output_dir, path)) + else: + initial_global_step = 0 + + progress_bar = tqdm( + range(0, args.max_train_steps), + initial=initial_global_step, + desc="Steps", + # Only show the progress bar once on each machine. + disable=not accelerator.is_local_main_process, + ) + + if args.multi_stream: + # create extra cuda streams to speedup inpaint vae computation + vae_stream_1 = torch.cuda.Stream() + vae_stream_2 = torch.cuda.Stream() + else: + vae_stream_1 = None + vae_stream_2 = None + + # Calculate the index we need + boundary = config['transformer_additional_kwargs'].get('boundary', 0.900) + split_timesteps = args.train_sampling_steps * boundary + differences = torch.abs(noise_scheduler.timesteps - split_timesteps) + closest_index = torch.argmin(differences).item() + print(f"The boundary is {boundary} and the boundary_type is {args.boundary_type}. The closest_index we calculate is {closest_index}") + if args.boundary_type == "high": + start_num_idx = 0 + train_sampling_steps = closest_index + elif args.boundary_type == "low": + start_num_idx = closest_index + train_sampling_steps = args.train_sampling_steps - closest_index + else: + start_num_idx = 0 + train_sampling_steps = args.train_sampling_steps + idx_sampling = DiscreteSampling(train_sampling_steps, start_num_idx=start_num_idx, uniform_sampling=args.uniform_sampling) + + for epoch in range(first_epoch, args.num_train_epochs): + train_loss = 0.0 + batch_sampler.sampler.generator = torch.Generator().manual_seed(args.seed + epoch) + for step, batch in enumerate(train_dataloader): + # Data batch sanity check + if epoch == first_epoch and step == 0: + pixel_values, texts = batch['pixel_values'].cpu(), batch['text'] + control_pixel_values = batch["control_pixel_values"].cpu() + pixel_values = rearrange(pixel_values, "b f c h w -> b c f h w") + control_pixel_values = rearrange(control_pixel_values, "b f c h w -> b c f h w") + os.makedirs(os.path.join(args.output_dir, "sanity_check"), exist_ok=True) + for idx, (pixel_value, control_pixel_value, text) in enumerate(zip(pixel_values, control_pixel_values, texts)): + pixel_value = pixel_value[None, ...] + control_pixel_value = control_pixel_value[None, ...] + gif_name = '-'.join(text.replace('/', '').split()[:10]) if not text == '' else f'{global_step}-{idx}' + save_videos_grid(pixel_value, f"{args.output_dir}/sanity_check/{gif_name[:10]}.gif", rescale=True) + save_videos_grid(control_pixel_value, f"{args.output_dir}/sanity_check/{gif_name[:10]}_control.gif", rescale=True) + + ref_pixel_values = batch["ref_pixel_values"].cpu() + ref_pixel_values = rearrange(ref_pixel_values, "b f c h w -> b c f h w") + for idx, (ref_pixel_value, text) in enumerate(zip(ref_pixel_values, texts)): + ref_pixel_value = ref_pixel_value[None, ...] + gif_name = '-'.join(text.replace('/', '').split()[:10]) if not text == '' else f'{global_step}-{idx}' + save_videos_grid(ref_pixel_value, f"{args.output_dir}/sanity_check/{gif_name[:10]}_ref.gif", rescale=True) + + subject_images = batch["subject_images"].cpu() + subject_images = rearrange(subject_images, "b f c h w -> b c f h w") + for idx, (subject_image, text) in enumerate(zip(subject_images, texts)): + subject_image = subject_image[None, ...] + gif_name = '-'.join(text.replace('/', '').split()[:10]) if not text == '' else f'{global_step}-{idx}' + save_videos_grid(subject_image, f"{args.output_dir}/sanity_check/{gif_name[:10]}_subject.gif", rescale=True) + + clip_pixel_values, mask_pixel_values, texts = batch['clip_pixel_values'].cpu(), batch['mask_pixel_values'].cpu(), batch['text'] + mask_pixel_values = rearrange(mask_pixel_values, "b f c h w -> b c f h w") + for idx, (clip_pixel_value, pixel_value, text) in enumerate(zip(clip_pixel_values, mask_pixel_values, texts)): + pixel_value = pixel_value[None, ...] + Image.fromarray(np.uint8(clip_pixel_value)).save(f"{args.output_dir}/sanity_check/clip_{gif_name[:10] if not text == '' else f'{global_step}-{idx}'}.png") + save_videos_grid(pixel_value, f"{args.output_dir}/sanity_check/mask_{gif_name[:10] if not text == '' else f'{global_step}-{idx}'}.gif", rescale=True) + + with accelerator.accumulate(transformer3d): + # Convert images to latent space + pixel_values = batch["pixel_values"].to(weight_dtype) + control_pixel_values = batch["control_pixel_values"].to(weight_dtype) + + # Increase the batch size when the length of the latent sequence of the current sample is small + if args.auto_tile_batch_size and args.training_with_video_token_length and zero_stage != 3: + if args.video_sample_n_frames * args.token_sample_size * args.token_sample_size // 16 >= pixel_values.size()[1] * pixel_values.size()[3] * pixel_values.size()[4]: + pixel_values = torch.tile(pixel_values, (4, 1, 1, 1, 1)) + control_pixel_values = torch.tile(control_pixel_values, (4, 1, 1, 1, 1)) + if args.enable_text_encoder_in_dataloader: + batch['encoder_hidden_states'] = torch.tile(batch['encoder_hidden_states'], (4, 1, 1)) + batch['encoder_attention_mask'] = torch.tile(batch['encoder_attention_mask'], (4, 1)) + else: + batch['text'] = batch['text'] * 4 + elif args.video_sample_n_frames * args.token_sample_size * args.token_sample_size // 4 >= pixel_values.size()[1] * pixel_values.size()[3] * pixel_values.size()[4]: + pixel_values = torch.tile(pixel_values, (2, 1, 1, 1, 1)) + control_pixel_values = torch.tile(control_pixel_values, (2, 1, 1, 1, 1)) + if args.enable_text_encoder_in_dataloader: + batch['encoder_hidden_states'] = torch.tile(batch['encoder_hidden_states'], (2, 1, 1)) + batch['encoder_attention_mask'] = torch.tile(batch['encoder_attention_mask'], (2, 1)) + else: + batch['text'] = batch['text'] * 2 + + ref_pixel_values = batch["ref_pixel_values"].to(weight_dtype) + clip_pixel_values = batch["clip_pixel_values"] + subject_images = batch["subject_images"].to(weight_dtype) + subject_flags = batch["subject_flags"].to(weight_dtype) + clip_idx = batch["clip_idx"] + mask_pixel_values = batch["mask_pixel_values"].to(weight_dtype) + mask = batch["mask"].to(weight_dtype) + + # Increase the batch size when the length of the latent sequence of the current sample is small + if args.auto_tile_batch_size and args.training_with_video_token_length and zero_stage != 3: + if args.video_sample_n_frames * args.token_sample_size * args.token_sample_size // 16 >= pixel_values.size()[1] * pixel_values.size()[3] * pixel_values.size()[4]: + clip_pixel_values = torch.tile(clip_pixel_values, (4, 1, 1, 1)) + subject_images = torch.tile(subject_images, (4, 1, 1, 1, 1)) + ref_pixel_values = torch.tile(ref_pixel_values, (4, 1, 1, 1, 1)) + subject_flags = torch.tile(subject_flags, (4, 1)) + clip_idx = torch.tile(clip_idx, (4,)) + mask_pixel_values = torch.tile(mask_pixel_values, (4, 1, 1, 1, 1)) + mask = torch.tile(mask, (4, 1, 1, 1, 1)) + elif args.video_sample_n_frames * args.token_sample_size * args.token_sample_size // 4 >= pixel_values.size()[1] * pixel_values.size()[3] * pixel_values.size()[4]: + clip_pixel_values = torch.tile(clip_pixel_values, (2, 1, 1, 1)) + subject_images = torch.tile(subject_images, (2, 1, 1, 1, 1)) + ref_pixel_values = torch.tile(ref_pixel_values, (2, 1, 1, 1, 1)) + subject_flags = torch.tile(subject_flags, (2, 1)) + clip_idx = torch.tile(clip_idx, (2,)) + mask_pixel_values = torch.tile(mask_pixel_values, (2, 1, 1, 1, 1)) + mask = torch.tile(mask, (2, 1, 1, 1, 1)) + + if args.random_frame_crop: + def _create_special_list(length): + if length == 1: + return [1.0] + if length >= 2: + last_element = 0.90 + remaining_sum = 1.0 - last_element + other_elements_value = remaining_sum / (length - 1) + special_list = [other_elements_value] * (length - 1) + [last_element] + return special_list + select_frames = [_tmp for _tmp in list(range(sample_n_frames_bucket_interval + 1, args.video_sample_n_frames + sample_n_frames_bucket_interval, sample_n_frames_bucket_interval))] + select_frames_prob = np.array(_create_special_list(len(select_frames))) + + if len(select_frames) != 0: + if rng is None: + temp_n_frames = np.random.choice(select_frames, p = select_frames_prob) + else: + temp_n_frames = rng.choice(select_frames, p = select_frames_prob) + else: + temp_n_frames = 1 + + # Magvae needs the number of frames to be 4n + 1. + temp_n_frames = (temp_n_frames - 1) // sample_n_frames_bucket_interval + 1 + + pixel_values = pixel_values[:, :temp_n_frames, :, :] + control_pixel_values = control_pixel_values[:, :temp_n_frames, :, :] + mask_pixel_values = mask_pixel_values[:, :temp_n_frames, :, :] + mask = mask[:, :temp_n_frames, :, :] + + # Keep all node same token length to accelerate the traning when resolution grows. + if args.keep_all_node_same_token_length: + if args.token_sample_size > 256: + numbers_list = list(range(256, args.token_sample_size + 1, 128)) + + if numbers_list[-1] != args.token_sample_size: + numbers_list.append(args.token_sample_size) + else: + numbers_list = [256] + numbers_list = [_number * _number * args.video_sample_n_frames for _number in numbers_list] + + actual_token_length = index_rng.choice(numbers_list) + actual_video_length = (min( + actual_token_length / pixel_values.size()[-1] / pixel_values.size()[-2], args.video_sample_n_frames + ) - 1) // sample_n_frames_bucket_interval * sample_n_frames_bucket_interval + 1 + actual_video_length = int(max(actual_video_length, 1)) + + # Magvae needs the number of frames to be 4n + 1. + actual_video_length = (actual_video_length - 1) // sample_n_frames_bucket_interval + 1 + + pixel_values = pixel_values[:, :actual_video_length, :, :] + control_pixel_values = control_pixel_values[:, :actual_video_length, :, :] + mask_pixel_values = mask_pixel_values[:, :actual_video_length, :, :] + mask = mask[:, :actual_video_length, :, :] + + if args.low_vram: + torch.cuda.empty_cache() + vae.to(accelerator.device) + if not args.enable_text_encoder_in_dataloader: + text_encoder.to("cpu") + + def vace_encode_frames(frames, ref_images, masks=None): + weight_dtype = frames.dtype + if ref_images is None: + ref_images = [None] * len(frames) + else: + assert len(frames) == len(ref_images) + + if masks is None: + latents = vae.encode(frames)[0].mode() + else: + masks = [torch.where(m > 0.5, 1.0, 0.0).to(weight_dtype) for m in masks] + inactive = [i * (1 - m) + 0 * m for i, m in zip(frames, masks)] + reactive = [i * m + 0 * (1 - m) for i, m in zip(frames, masks)] + inactive = vae.encode(inactive)[0].mode() + reactive = vae.encode(reactive)[0].mode() + latents = [torch.cat((u, c), dim=0) for u, c in zip(inactive, reactive)] + + cat_latents = [] + for latent, refs in zip(latents, ref_images): + if refs is not None: + if masks is None: + ref_latent = vae.encode(refs)[0].mode() + else: + ref_latent = vae.encode(refs)[0].mode() + ref_latent = [torch.cat((u, torch.zeros_like(u)), dim=0) for u in ref_latent] + assert all([x.shape[1] == 1 for x in ref_latent]) + latent = torch.cat([*ref_latent, latent], dim=1) + cat_latents.append(latent) + return cat_latents + + def vace_encode_masks(masks, ref_images=None, vae_stride=[4, 8, 8]): + if ref_images is None: + ref_images = [None] * len(masks) + else: + assert len(masks) == len(ref_images) + + result_masks = [] + for mask, refs in zip(masks, ref_images): + c, depth, height, width = mask.shape + new_depth = int((depth + 3) // vae_stride[0]) + height = 2 * (int(height) // (vae_stride[1] * 2)) + width = 2 * (int(width) // (vae_stride[2] * 2)) + + # reshape + mask = mask[0, :, :, :] + mask = mask.view( + depth, height, vae_stride[1], width, vae_stride[1] + ) # depth, height, 8, width, 8 + mask = mask.permute(2, 4, 0, 1, 3) # 8, 8, depth, height, width + mask = mask.reshape( + vae_stride[1] * vae_stride[2], depth, height, width + ) # 8*8, depth, height, width + + # interpolation + mask = F.interpolate(mask.unsqueeze(0), size=(new_depth, height, width), mode='nearest-exact').squeeze(0) + + if refs is not None: + length = len(refs) + c, depth, height, width = mask.shape + mask_pad = mask.new_zeros(c, length, height, width) + mask = torch.cat((mask_pad, mask), dim=1) + result_masks.append(mask) + return result_masks + + def vace_latent(z, m): + return [torch.cat([zz, mm], dim=0) for zz, mm in zip(z, m)] + + with torch.no_grad(): + # This way is quicker when batch grows up + def _batch_encode_vae(pixel_values): + pixel_values = rearrange(pixel_values, "b f c h w -> b c f h w") + bs = args.vae_mini_batch + new_pixel_values = [] + for i in range(0, pixel_values.shape[0], bs): + pixel_values_bs = pixel_values[i : i + bs] + pixel_values_bs = vae.encode(pixel_values_bs)[0] + pixel_values_bs = pixel_values_bs.sample() + new_pixel_values.append(pixel_values_bs) + return torch.cat(new_pixel_values, dim = 0) + if vae_stream_1 is not None: + vae_stream_1.wait_stream(torch.cuda.current_stream()) + with torch.cuda.stream(vae_stream_1): + latents = _batch_encode_vae(pixel_values) + else: + latents = _batch_encode_vae(pixel_values) + + if rng is None: + subject_images_num = np.random.choice([0, 1, 2, 3, 4]) + else: + subject_images_num = rng.choice([0, 1, 2, 3, 4]) + + if rng is None: + use_full_photo_ref_flag = np.random.choice([True, False], p=[0.25, 0.75]) + else: + use_full_photo_ref_flag = rng.choice([True, False], p=[0.25, 0.75]) + + if not use_full_photo_ref_flag: + if subject_images_num == 0: + subject_ref_images = None + else: + subject_ref_images = rearrange(subject_images, "b f c h w -> b c f h w") + subject_ref_images = subject_ref_images[:, :, :subject_images_num] + + bs, c, f, h, w = subject_ref_images.size() + new_subject_ref_images = [] + for i in range(bs): + act_subject_images_num = min(subject_images_num, int(torch.sum(subject_flags[i]))) + + if act_subject_images_num == 0: + new_subject_ref_images.append(None) + else: + new_subject_ref_images.append([]) + for j in range(act_subject_images_num): + new_subject_ref_images[i].append(subject_ref_images[i, :, j:j+1]) + subject_ref_images = new_subject_ref_images + else: + ref_pixel_values = rearrange(ref_pixel_values, "b f c h w -> b c f h w") + + bs, c, f, h, w = ref_pixel_values.size() + new_ref_pixel_values = [] + for i in range(bs): + new_ref_pixel_values.append([]) + for j in range(1): + new_ref_pixel_values[i].append(ref_pixel_values[i, :, j:j+1]) + subject_ref_images = new_ref_pixel_values + + if rng is None: + inpaint_flag = np.random.choice([True, False], p=[0.75, 0.25]) + else: + inpaint_flag = rng.choice([True, False], p=[0.75, 0.25]) + mask = rearrange(mask, "b f c h w -> b c f h w") + mask = torch.tile(mask, [1, 3, 1, 1, 1]) + if inpaint_flag or (control_pixel_values == -1).all(): + if rng is None: + do_not_use_ref_images = np.random.choice([True, False], p=[0.50, 0.50]) + else: + do_not_use_ref_images = rng.choice([True, False], p=[0.50, 0.50]) + if do_not_use_ref_images: + subject_ref_images = None + mask_pixel_values = rearrange(mask_pixel_values, "b f c h w -> b c f h w") + vace_latents = vace_encode_frames(mask_pixel_values, subject_ref_images, mask) + else: + control_pixel_values = rearrange(control_pixel_values, "b f c h w -> b c f h w") + vace_latents = vace_encode_frames(control_pixel_values, subject_ref_images, mask) + mask = torch.ones_like(mask) + + mask_latents = vace_encode_masks(mask, subject_ref_images) + vace_context = torch.stack(vace_latent(vace_latents, mask_latents)) + + if subject_ref_images is not None: + for i in range(len(subject_ref_images)): + if subject_ref_images[i] is not None: + + subject_ref_images[i] = torch.cat( + [subject_ref_image.unsqueeze(0) for subject_ref_image in subject_ref_images[i]], 2 + ) + subject_ref_images[i] = torch.cat( + [vae.encode(subject_ref_images[i][:, :, j:j+1])[0].sample() for j in range(subject_ref_images[i].size(2))], 2 + ) + + if subject_ref_images[0] is not None: + subject_ref_images = torch.cat(subject_ref_images) + latents = torch.cat( + [subject_ref_images, latents], dim=2 + ) + + # wait for latents = vae.encode(pixel_values) to complete + if vae_stream_1 is not None: + torch.cuda.current_stream().wait_stream(vae_stream_1) + + if args.low_vram: + vae.to('cpu') + torch.cuda.empty_cache() + if not args.enable_text_encoder_in_dataloader: + text_encoder.to(accelerator.device) + + if args.enable_text_encoder_in_dataloader: + prompt_embeds = batch['encoder_hidden_states'].to(device=latents.device) + else: + with torch.no_grad(): + prompt_ids = tokenizer( + batch['text'], + padding="max_length", + max_length=args.tokenizer_max_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt" + ) + text_input_ids = prompt_ids.input_ids + prompt_attention_mask = prompt_ids.attention_mask + + seq_lens = prompt_attention_mask.gt(0).sum(dim=1).long() + prompt_embeds = text_encoder(text_input_ids.to(latents.device), attention_mask=prompt_attention_mask.to(latents.device))[0] + prompt_embeds = [u[:v] for u, v in zip(prompt_embeds, seq_lens)] + + if args.low_vram and not args.enable_text_encoder_in_dataloader: + text_encoder.to('cpu') + torch.cuda.empty_cache() + + bsz, channel, num_frames, height, width = latents.size() + noise = torch.randn( + (bsz, channel, num_frames, height, width), device=latents.device, generator=torch_rng, dtype=weight_dtype) + + if not args.uniform_sampling: + u = compute_density_for_timestep_sampling( + weighting_scheme=args.weighting_scheme, + batch_size=bsz, + logit_mean=args.logit_mean, + logit_std=args.logit_std, + mode_scale=args.mode_scale, + ) + indices = (u * noise_scheduler.config.num_train_timesteps).long() + else: + # Sample a random timestep for each image + # timesteps = generate_timestep_with_lognorm(0, args.train_sampling_steps, (bsz,), device=latents.device, generator=torch_rng) + # timesteps = torch.randint(0, args.train_sampling_steps, (bsz,), device=latents.device, generator=torch_rng) + indices = idx_sampling(bsz, generator=torch_rng, device=latents.device) + indices = indices.long().cpu() + timesteps = noise_scheduler.timesteps[indices].to(device=latents.device) + + def get_sigmas(timesteps, n_dim=4, dtype=torch.float32): + sigmas = noise_scheduler.sigmas.to(device=accelerator.device, dtype=dtype) + schedule_timesteps = noise_scheduler.timesteps.to(accelerator.device) + timesteps = timesteps.to(accelerator.device) + step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < n_dim: + sigma = sigma.unsqueeze(-1) + return sigma + + # Add noise according to flow matching. + # zt = (1 - texp) * x + texp * z1 + sigmas = get_sigmas(timesteps, n_dim=latents.ndim, dtype=latents.dtype) + noisy_latents = (1.0 - sigmas) * latents + sigmas * noise + + # Add noise + target = noise - latents + + target_shape = (vae.latent_channels, vace_latents[0].size(1), width, height) + seq_len = math.ceil( + (target_shape[2] * target_shape[3]) / + (accelerator.unwrap_model(transformer3d).config.patch_size[1] * accelerator.unwrap_model(transformer3d).config.patch_size[2]) * + target_shape[1] + ) + + # Predict the noise residual + with torch.cuda.amp.autocast(dtype=weight_dtype), torch.cuda.device(device=accelerator.device): + noise_pred = transformer3d( + x=noisy_latents, + context=prompt_embeds, + t=timesteps, + seq_len=seq_len, + vace_context=vace_context, + ) + + def custom_mse_loss(noise_pred, target, weighting=None, threshold=50): + noise_pred = noise_pred.float() + target = target.float() + diff = noise_pred - target + mse_loss = F.mse_loss(noise_pred, target, reduction='none') + mask = (diff.abs() <= threshold).float() + + masked_loss = mse_loss * mask + if weighting is not None: + masked_loss = masked_loss * weighting + final_loss = masked_loss.mean() + return final_loss + + weighting = compute_loss_weighting_for_sd3(weighting_scheme=args.weighting_scheme, sigmas=sigmas) + loss = custom_mse_loss(noise_pred.float(), target.float(), weighting.float()) + loss = loss.mean() + + if args.motion_sub_loss and noise_pred.size()[1] > 2: + gt_sub_noise = noise_pred[:, :, 1:].float() - noise_pred[:, :, :-1].float() + pre_sub_noise = target[:, :, 1:].float() - target[:, :, :-1].float() + sub_loss = F.mse_loss(gt_sub_noise, pre_sub_noise, reduction="mean") + loss = loss * (1 - args.motion_sub_loss_ratio) + sub_loss * args.motion_sub_loss_ratio + + # Gather the losses across all processes for logging (if we use distributed training). + avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() + train_loss += avg_loss.item() / args.gradient_accumulation_steps + + # Backpropagate + accelerator.backward(loss) + if accelerator.sync_gradients: + if not args.use_deepspeed and not args.use_fsdp: + trainable_params_grads = [p.grad for p in trainable_params if p.grad is not None] + trainable_params_total_norm = torch.norm(torch.stack([torch.norm(g.detach(), 2) for g in trainable_params_grads]), 2) + max_grad_norm = linear_decay(args.max_grad_norm * args.initial_grad_norm_ratio, args.max_grad_norm, args.abnormal_norm_clip_start, global_step) + if trainable_params_total_norm / max_grad_norm > 5 and global_step > args.abnormal_norm_clip_start: + actual_max_grad_norm = max_grad_norm / min((trainable_params_total_norm / max_grad_norm), 10) + else: + actual_max_grad_norm = max_grad_norm + else: + actual_max_grad_norm = args.max_grad_norm + + if not args.use_deepspeed and not args.use_fsdp and args.report_model_info and accelerator.is_main_process: + if trainable_params_total_norm > 1 and global_step > args.abnormal_norm_clip_start: + for name, param in transformer3d.named_parameters(): + if param.requires_grad: + writer.add_scalar(f'gradients/before_clip_norm/{name}', param.grad.norm(), global_step=global_step) + + norm_sum = accelerator.clip_grad_norm_(trainable_params, actual_max_grad_norm) + if not args.use_deepspeed and not args.use_fsdp and args.report_model_info and accelerator.is_main_process: + writer.add_scalar(f'gradients/norm_sum', norm_sum, global_step=global_step) + writer.add_scalar(f'gradients/actual_max_grad_norm', actual_max_grad_norm, global_step=global_step) + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: + + if args.use_ema: + ema_transformer3d.step(transformer3d.parameters()) + progress_bar.update(1) + global_step += 1 + accelerator.log({"train_loss": train_loss}, step=global_step) + train_loss = 0.0 + + if global_step % args.checkpointing_steps == 0: + if args.use_deepspeed or args.use_fsdp or accelerator.is_main_process: + # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` + if args.checkpoints_total_limit is not None: + checkpoints = os.listdir(args.output_dir) + checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] + checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) + + # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints + if len(checkpoints) >= args.checkpoints_total_limit: + num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 + removing_checkpoints = checkpoints[0:num_to_remove] + + logger.info( + f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" + ) + logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") + + for removing_checkpoint in removing_checkpoints: + removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) + shutil.rmtree(removing_checkpoint) + + save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") + accelerator.save_state(save_path) + logger.info(f"Saved state to {save_path}") + + if accelerator.is_main_process: + if args.validation_prompts is not None and global_step % args.validation_steps == 0: + if args.use_ema: + # Store the UNet parameters temporarily and load the EMA parameters to perform inference. + ema_transformer3d.store(transformer3d.parameters()) + ema_transformer3d.copy_to(transformer3d.parameters()) + log_validation( + vae, + text_encoder, + tokenizer, + clip_image_encoder, + transformer3d, + args, + config, + accelerator, + weight_dtype, + global_step, + ) + if args.use_ema: + # Switch back to the original transformer3d parameters. + ema_transformer3d.restore(transformer3d.parameters()) + + logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} + progress_bar.set_postfix(**logs) + + if global_step >= args.max_train_steps: + break + + if accelerator.is_main_process: + if args.validation_prompts is not None and epoch % args.validation_epochs == 0: + if args.use_ema: + # Store the UNet parameters temporarily and load the EMA parameters to perform inference. + ema_transformer3d.store(transformer3d.parameters()) + ema_transformer3d.copy_to(transformer3d.parameters()) + log_validation( + vae, + text_encoder, + tokenizer, + clip_image_encoder, + transformer3d, + args, + config, + accelerator, + weight_dtype, + global_step, + ) + if args.use_ema: + # Switch back to the original transformer3d parameters. + ema_transformer3d.restore(transformer3d.parameters()) + + # Create the pipeline using the trained modules and save it. + accelerator.wait_for_everyone() + if accelerator.is_main_process: + transformer3d = unwrap_model(transformer3d) + if args.use_ema: + ema_transformer3d.copy_to(transformer3d.parameters()) + + if args.use_deepspeed or args.use_fsdp or accelerator.is_main_process: + save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") + accelerator.save_state(save_path) + logger.info(f"Saved state to {save_path}") + + accelerator.end_training() + + +if __name__ == "__main__": + main() diff --git a/scripts/wan2.2_vace_fun/train.sh b/scripts/wan2.2_vace_fun/train.sh new file mode 100644 index 00000000..c207afb6 --- /dev/null +++ b/scripts/wan2.2_vace_fun/train.sh @@ -0,0 +1,43 @@ +export MODEL_NAME="models/Diffusion_Transformer/Wan2.2-VACE-Fun-A14B" +export DATASET_NAME="datasets/internal_datasets/" +export DATASET_META_NAME="datasets/internal_datasets/metadata.json" +# NCCL_IB_DISABLE=1 and NCCL_P2P_DISABLE=1 are used in multi nodes without RDMA. +# export NCCL_IB_DISABLE=1 +# export NCCL_P2P_DISABLE=1 +NCCL_DEBUG=INFO + +accelerate launch --mixed_precision="bf16" scripts/wan2.2_vace_fun/train.py \ + --config_path="config/wan2.2/wan_civitai_t2v.yaml" \ + --pretrained_model_name_or_path=$MODEL_NAME \ + --train_data_dir=$DATASET_NAME \ + --train_data_meta=$DATASET_META_NAME \ + --image_sample_size=1024 \ + --video_sample_size=256 \ + --token_sample_size=512 \ + --video_sample_stride=2 \ + --video_sample_n_frames=81 \ + --train_batch_size=1 \ + --video_repeat=1 \ + --gradient_accumulation_steps=1 \ + --dataloader_num_workers=8 \ + --num_train_epochs=100 \ + --checkpointing_steps=50 \ + --learning_rate=2e-05 \ + --lr_scheduler="constant_with_warmup" \ + --lr_warmup_steps=100 \ + --seed=42 \ + --output_dir="output_dir" \ + --gradient_checkpointing \ + --mixed_precision="bf16" \ + --adam_weight_decay=3e-2 \ + --adam_epsilon=1e-10 \ + --vae_mini_batch=1 \ + --max_grad_norm=0.05 \ + --random_hw_adapt \ + --training_with_video_token_length \ + --enable_bucket \ + --uniform_sampling \ + --low_vram \ + --control_ref_image="random" \ + --boundary_type="low" \ + --trainable_modules "vace" \ No newline at end of file diff --git a/videox_fun/models/wan_transformer3d_vace.py b/videox_fun/models/wan_transformer3d_vace.py index e5b619ca..f344bd6a 100644 --- a/videox_fun/models/wan_transformer3d_vace.py +++ b/videox_fun/models/wan_transformer3d_vace.py @@ -13,6 +13,7 @@ from .wan_transformer3d import (WanAttentionBlock, WanTransformer3DModel, sinusoidal_embedding_1d) +VIDEOX_OFFLOAD_VACE_LATENTS=os.environ.get("VIDEOX_OFFLOAD_VACE_LATENTS", False) class VaceWanAttentionBlock(WanAttentionBlock): def __init__( @@ -45,8 +46,16 @@ def forward(self, c, x, **kwargs): all_c = list(torch.unbind(c)) c = all_c.pop(-1) + if VIDEOX_OFFLOAD_VACE_LATENTS: + c = c.to(x.device) + c = super().forward(c, **kwargs) c_skip = self.after_proj(c) + + if VIDEOX_OFFLOAD_VACE_LATENTS: + c_skip = c_skip.to("cpu") + c = c.to("cpu") + all_c += [c_skip, c] c = torch.stack(all_c) return c @@ -71,6 +80,8 @@ def __init__( def forward(self, x, hints, context_scale=1.0, **kwargs): x = super().forward(x, **kwargs) if self.block_id is not None: + if VIDEOX_OFFLOAD_VACE_LATENTS: + hints[self.block_id].to(x.device) x = x + hints[self.block_id] * context_scale return x diff --git a/videox_fun/utils/lora_utils.py b/videox_fun/utils/lora_utils.py index b0766259..9b683c1c 100755 --- a/videox_fun/utils/lora_utils.py +++ b/videox_fun/utils/lora_utils.py @@ -383,6 +383,14 @@ def merge_lora(pipeline, lora_path, multiplier, device='cpu', dtype=torch.float3 key = key.replace(".self_attn.", "_self_attn_") key = key.replace(".cross_attn.", "_cross_attn_") key = key.replace(".ffn.", "_ffn_") + if "lora_A" in key or "lora_B" in key: + key = "lora_unet__" + key + key = key.replace("blocks.", "blocks_") + key = key.replace(".self_attn.", "_self_attn_") + key = key.replace(".cross_attn.", "_cross_attn_") + key = key.replace(".ffn.", "_ffn_") + key = key.replace(".lora_A.default.", ".lora_down.") + key = key.replace(".lora_B.default.", ".lora_up.") layer, elem = key.split('.', 1) updates[layer][elem] = value @@ -496,6 +504,14 @@ def unmerge_lora(pipeline, lora_path, multiplier=1, device="cpu", dtype=torch.fl key = key.replace(".self_attn.", "_self_attn_") key = key.replace(".cross_attn.", "_cross_attn_") key = key.replace(".ffn.", "_ffn_") + if "lora_A" in key or "lora_B" in key: + key = "lora_unet__" + key + key = key.replace("blocks.", "blocks_") + key = key.replace(".self_attn.", "_self_attn_") + key = key.replace(".cross_attn.", "_cross_attn_") + key = key.replace(".ffn.", "_ffn_") + key = key.replace(".lora_A.default.", ".lora_down.") + key = key.replace(".lora_B.default.", ".lora_up.") layer, elem = key.split('.', 1) updates[layer][elem] = value From 2bc9678b5394e3b6d75d62a8d5693b7288fad796 Mon Sep 17 00:00:00 2001 From: bubbliiiing <3323290568@qq.com> Date: Wed, 17 Sep 2025 16:09:30 +0800 Subject: [PATCH 3/8] Update transformer3d --- videox_fun/models/wan_transformer3d_vace.py | 1 + 1 file changed, 1 insertion(+) diff --git a/videox_fun/models/wan_transformer3d_vace.py b/videox_fun/models/wan_transformer3d_vace.py index f344bd6a..fabbfa85 100644 --- a/videox_fun/models/wan_transformer3d_vace.py +++ b/videox_fun/models/wan_transformer3d_vace.py @@ -3,6 +3,7 @@ # Copyright (c) Alibaba, Inc. and its affiliates. from typing import Any, Dict +import os import math import torch import torch.cuda.amp as amp From 3802750bfa45648d427669a03ca4a6526a38f147 Mon Sep 17 00:00:00 2001 From: bubbliiiing <3323290568@qq.com> Date: Wed, 17 Sep 2025 17:32:42 +0800 Subject: [PATCH 4/8] Update recompile --- comfyui/cogvideox_fun/nodes.py | 17 +++++++++++------ comfyui/comfyui_nodes.py | 26 +++++++++++++++++--------- comfyui/wan2_1/nodes.py | 16 ++++++++++------ comfyui/wan2_1_fun/nodes.py | 15 +++++++++------ comfyui/wan2_2/nodes.py | 16 +++++++++------- comfyui/wan2_2_fun/nodes.py | 16 +++++++++------- 6 files changed, 65 insertions(+), 41 deletions(-) diff --git a/comfyui/cogvideox_fun/nodes.py b/comfyui/cogvideox_fun/nodes.py index a9fff32c..22a4ac93 100755 --- a/comfyui/cogvideox_fun/nodes.py +++ b/comfyui/cogvideox_fun/nodes.py @@ -198,13 +198,18 @@ def INPUT_TYPES(s): CATEGORY = "CogVideoXFUNWrapper" def load_lora(self, cogvideoxfun_model, lora_name, strength_model, lora_cache): + new_funmodels = dict(cogvideoxfun_model) + if lora_name is not None: - cogvideoxfun_model['lora_cache'] = lora_cache - cogvideoxfun_model['loras'] = cogvideoxfun_model.get("loras", []) + [folder_paths.get_full_path("loras", lora_name)] - cogvideoxfun_model['strength_model'] = cogvideoxfun_model.get("strength_model", []) + [strength_model] - return (cogvideoxfun_model,) - else: - return (cogvideoxfun_model,) + lora_path = folder_paths.get_full_path("loras", lora_name) + if lora_path is None: + raise FileNotFoundError(f"LoRA 文件未找到: {lora_name}") + + new_funmodels['lora_cache'] = lora_cache + new_funmodels['loras'] = cogvideoxfun_model.get("loras", []) + [lora_path] + new_funmodels['strength_model'] = cogvideoxfun_model.get("strength_model", []) + [strength_model] + + return (new_funmodels,) class CogVideoXFunT2VSampler: @classmethod diff --git a/comfyui/comfyui_nodes.py b/comfyui/comfyui_nodes.py index a40efe0f..fe1d0b42 100755 --- a/comfyui/comfyui_nodes.py +++ b/comfyui/comfyui_nodes.py @@ -84,20 +84,28 @@ def INPUT_TYPES(s): def compile(self, cache_size_limit, funmodels): torch._dynamo.config.cache_size_limit = cache_size_limit if hasattr(funmodels["pipeline"].transformer, "blocks"): - for i in range(len(funmodels["pipeline"].transformer.blocks)): - funmodels["pipeline"].transformer.blocks[i] = torch.compile(funmodels["pipeline"].transformer.blocks[i]) + for i, block in enumerate(funmodels["pipeline"].transformer.blocks): + if hasattr(block, "_orig_mod"): + block = block._orig_mod + funmodels["pipeline"].transformer.blocks[i] = torch.compile(block) if hasattr(funmodels["pipeline"], "transformer_2") and funmodels["pipeline"].transformer_2 is not None: - for i in range(len(funmodels["pipeline"].transformer_2.blocks)): - funmodels["pipeline"].transformer_2.blocks[i] = torch.compile(funmodels["pipeline"].transformer_2.blocks[i]) + for i, block in enumerate(funmodels["pipeline"].transformer_2.blocks): + if hasattr(block, "_orig_mod"): + block = block._orig_mod + funmodels["pipeline"].transformer.blocks[i] = torch.compile(block) elif hasattr(funmodels["pipeline"].transformer, "transformer_blocks"): - for i in range(len(funmodels["pipeline"].transformer.transformer_blocks)): - funmodels["pipeline"].transformer.transformer_blocks[i] = torch.compile(funmodels["pipeline"].transformer.transformer_blocks[i]) - + for i, block in enumerate(funmodels["pipeline"].transformer.transformer_blocks): + if hasattr(block, "_orig_mod"): + block = block._orig_mod + funmodels["pipeline"].transformer.transformer_blocks[i] = torch.compile(block) + if hasattr(funmodels["pipeline"], "transformer_2") and funmodels["pipeline"].transformer_2 is not None: - for i in range(len(funmodels["pipeline"].transformer_2.transformer_blocks)): - funmodels["pipeline"].transformer_2.transformer_blocks[i] = torch.compile(funmodels["pipeline"].transformer_2.transformer_blocks[i]) + for i, block in enumerate(funmodels["pipeline"].transformer_2.transformer_blocks): + if hasattr(block, "_orig_mod"): + block = block._orig_mod + funmodels["pipeline"].transformer.transformer_blocks[i] = torch.compile(block) else: funmodels["pipeline"].transformer.forward = torch.compile(funmodels["pipeline"].transformer.forward) diff --git a/comfyui/wan2_1/nodes.py b/comfyui/wan2_1/nodes.py index a31c4a09..32c59454 100755 --- a/comfyui/wan2_1/nodes.py +++ b/comfyui/wan2_1/nodes.py @@ -612,13 +612,17 @@ def INPUT_TYPES(s): CATEGORY = "CogVideoXFUNWrapper" def load_lora(self, funmodels, lora_name, strength_model, lora_cache): + new_funmodels = dict(funmodels) + if lora_name is not None: - funmodels['lora_cache'] = lora_cache - funmodels['loras'] = funmodels.get("loras", []) + [folder_paths.get_full_path("loras", lora_name)] - funmodels['strength_model'] = funmodels.get("strength_model", []) + [strength_model] - return (funmodels,) - else: - return (funmodels,) + lora_path = folder_paths.get_full_path("loras", lora_name) + + new_funmodels['lora_cache'] = lora_cache + new_funmodels['loras'] = funmodels.get("loras", []) + [lora_path] + new_funmodels['strength_model'] = funmodels.get("strength_model", []) + [strength_model] + + return (new_funmodels,) + class WanT2VSampler: @classmethod diff --git a/comfyui/wan2_1_fun/nodes.py b/comfyui/wan2_1_fun/nodes.py index e934aa8e..91bb64f8 100755 --- a/comfyui/wan2_1_fun/nodes.py +++ b/comfyui/wan2_1_fun/nodes.py @@ -238,13 +238,16 @@ def INPUT_TYPES(s): CATEGORY = "CogVideoXFUNWrapper" def load_lora(self, funmodels, lora_name, strength_model, lora_cache): + new_funmodels = dict(funmodels) + if lora_name is not None: - funmodels['lora_cache'] = lora_cache - funmodels['loras'] = funmodels.get("loras", []) + [folder_paths.get_full_path("loras", lora_name)] - funmodels['strength_model'] = funmodels.get("strength_model", []) + [strength_model] - return (funmodels,) - else: - return (funmodels,) + lora_path = folder_paths.get_full_path("loras", lora_name) + + new_funmodels['lora_cache'] = lora_cache + new_funmodels['loras'] = funmodels.get("loras", []) + [lora_path] + new_funmodels['strength_model'] = funmodels.get("strength_model", []) + [strength_model] + + return (new_funmodels,) class WanFunT2VSampler: @classmethod diff --git a/comfyui/wan2_2/nodes.py b/comfyui/wan2_2/nodes.py index 7a2a0cc2..8d86fdb3 100755 --- a/comfyui/wan2_2/nodes.py +++ b/comfyui/wan2_2/nodes.py @@ -463,14 +463,16 @@ def INPUT_TYPES(s): CATEGORY = "CogVideoXFUNWrapper" def load_lora(self, funmodels, lora_name, lora_high_name, strength_model, lora_cache): + new_funmodels = dict(funmodels) if lora_name is not None: - funmodels['lora_cache'] = lora_cache - funmodels['loras'] = funmodels.get("loras", []) + [folder_paths.get_full_path("loras", lora_name)] - funmodels['loras_high'] = funmodels.get("loras_high", []) + [folder_paths.get_full_path("loras", lora_high_name)] - funmodels['strength_model'] = funmodels.get("strength_model", []) + [strength_model] - return (funmodels,) - else: - return (funmodels,) + loras = list(new_funmodels.get("loras", [])) + [folder_paths.get_full_path("loras", lora_name)] + loras_high = list(new_funmodels.get("loras_high", [])) + [folder_paths.get_full_path("loras", lora_high_name)] + strength_models = list(new_funmodels.get("strength_model", [])) + [strength_model] + new_funmodels['loras'] = loras + new_funmodels['loras_high'] = loras_high + new_funmodels['strength_model'] = strength_models + new_funmodels['lora_cache'] = lora_cache + return (new_funmodels,) class Wan2_2T2VSampler: @classmethod diff --git a/comfyui/wan2_2_fun/nodes.py b/comfyui/wan2_2_fun/nodes.py index a084c507..a862705f 100755 --- a/comfyui/wan2_2_fun/nodes.py +++ b/comfyui/wan2_2_fun/nodes.py @@ -258,14 +258,16 @@ def INPUT_TYPES(s): CATEGORY = "CogVideoXFUNWrapper" def load_lora(self, funmodels, lora_name, lora_high_name, strength_model, lora_cache): + new_funmodels = dict(funmodels) if lora_name is not None: - funmodels['lora_cache'] = lora_cache - funmodels['loras'] = funmodels.get("loras", []) + [folder_paths.get_full_path("loras", lora_name)] - funmodels['loras_high'] = funmodels.get("loras_high", []) + [folder_paths.get_full_path("loras", lora_high_name)] - funmodels['strength_model'] = funmodels.get("strength_model", []) + [strength_model] - return (funmodels,) - else: - return (funmodels,) + loras = list(new_funmodels.get("loras", [])) + [folder_paths.get_full_path("loras", lora_name)] + loras_high = list(new_funmodels.get("loras_high", [])) + [folder_paths.get_full_path("loras", lora_high_name)] + strength_models = list(new_funmodels.get("strength_model", [])) + [strength_model] + new_funmodels['loras'] = loras + new_funmodels['loras_high'] = loras_high + new_funmodels['strength_model'] = strength_models + new_funmodels['lora_cache'] = lora_cache + return (new_funmodels,) class Wan2_2FunT2VSampler: @classmethod From 397c5ca60bf58dc8753bb1cb2b9c4545acfb26b9 Mon Sep 17 00:00:00 2001 From: bubbliiiing <3323290568@qq.com> Date: Wed, 17 Sep 2025 20:16:34 +0800 Subject: [PATCH 5/8] Update recompile --- comfyui/comfyui_nodes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui/comfyui_nodes.py b/comfyui/comfyui_nodes.py index fe1d0b42..83ffb51b 100755 --- a/comfyui/comfyui_nodes.py +++ b/comfyui/comfyui_nodes.py @@ -93,7 +93,7 @@ def compile(self, cache_size_limit, funmodels): for i, block in enumerate(funmodels["pipeline"].transformer_2.blocks): if hasattr(block, "_orig_mod"): block = block._orig_mod - funmodels["pipeline"].transformer.blocks[i] = torch.compile(block) + funmodels["pipeline"].transformer_2.blocks[i] = torch.compile(block) elif hasattr(funmodels["pipeline"].transformer, "transformer_blocks"): for i, block in enumerate(funmodels["pipeline"].transformer.transformer_blocks): @@ -105,7 +105,7 @@ def compile(self, cache_size_limit, funmodels): for i, block in enumerate(funmodels["pipeline"].transformer_2.transformer_blocks): if hasattr(block, "_orig_mod"): block = block._orig_mod - funmodels["pipeline"].transformer.transformer_blocks[i] = torch.compile(block) + funmodels["pipeline"].transformer_2.transformer_blocks[i] = torch.compile(block) else: funmodels["pipeline"].transformer.forward = torch.compile(funmodels["pipeline"].transformer.forward) From 0fa3ccf280b2196050c287db9efb62fbe9c6d805 Mon Sep 17 00:00:00 2001 From: bubbliiiing <3323290568@qq.com> Date: Thu, 18 Sep 2025 15:18:07 +0800 Subject: [PATCH 6/8] Update mask inference --- asset/inpaint_video.mp4 | Bin 0 -> 195038 bytes asset/inpaint_video_mask.mp4 | Bin 0 -> 4768 bytes examples/wan2.2_vace_fun/predict_i2v.py | 24 +- examples/wan2.2_vace_fun/predict_s2v.py | 24 +- .../wan2.2_vace_fun/predict_v2v_control.py | 24 +- .../predict_v2v_control_ref.py | 24 +- examples/wan2.2_vace_fun/predict_v2v_mask.py | 387 ++++++++++++++++++ 7 files changed, 455 insertions(+), 28 deletions(-) create mode 100644 asset/inpaint_video.mp4 create mode 100644 asset/inpaint_video_mask.mp4 create mode 100644 examples/wan2.2_vace_fun/predict_v2v_mask.py diff --git a/asset/inpaint_video.mp4 b/asset/inpaint_video.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..7880f0e58cd681226ee374ef45ef4575f986e0af GIT binary patch literal 195038 zcmX_nV{~Rs&}}%u#I|kQwrxGJZQHhOdt%#mCbl)v&HH`#-XEuXch#=ys_I^=d#!U0 z5D*Z7sk4W@g_E5P5D*B^fBnzHWaw(lXk*X92m}NKY3c|70D;{1+n5+S|8VMlfq#Ej zZHb0Sg-=v!OBbk3xd(hd?i@ zASzDFLLjIn{KGT>82>1Q?d(0Q0jACbObiUnv`h?4|G6=DcDCoDr+0I6qjR+|0oYj^ z+S1uMn$iC^3!S;Mjr9-5&feL=&en;Gz}V2p(3qEzz!6}|%S>PbFtWBYw&G>vV&GyR zFtjza_HY95GPpByF}O1_G85PUc+COs1WqnSKZu>c-pS*~^rzK#G~s2WWB4)pX$WjA z+yN%~|9NEmvCwxkv^4|pGO`gEn>*Us80!C+G7>mD0<5hqoPH38JBNv}^A9j~u;FF+ z3Bl0B)6N#a%fv{-$Vgyn=;W+#?__0R{~zOj9XQzQ+nJg=0i1bhnFyTC9e*r-9I+Bu z+u2zen*WUS|DTYFz{%Re_$QeECtx73b^M=6j4f;oo&O8O!qyq!Xl?jI`jH!1yEqzp z=o{PF*c&?kjE#S4 zOw4o)1P=ct!^=R&_5K0YY=Hlk z6UZM3h?fx%9t`?@)z>e08#Y2`;Fd12dzr#Rx{RU&kMAJ)aZI%8M5bd1?!k!g3`X>?Z~_ImJ=od2NEP z2oTK~9z_|%H?G@iBcaqSohrO2%UD6gn4YbPu`Sq zh}@ajF)BEB_RrtfChRYI4&vF(^T0dk*wH31_dvf|ivY~#M;FKSb^R`M-fpqKHsVgz z=E>d)`&J^GjyQ?h%;`S8-`FJHtTbWBJ%=amnbcfi?AjVaG|_B!!P7)l{~+}x(#qs; z(bcr*TTxnXqN7!{fcY~v>W^H$keI51LF@B|#b|)}o#n7t*erL&FAqcL{9R|=BPXmI z__D!_U1CIztig6ZkEGfBMBQ7b`%6^L#sMHnaBHQd(w0v5LB?I(F1M`-7l3cCQ6n|* zaItyox)Kr&+unlmm}3texIIc#nuy+~TvBplYiL8S7NLl5S|a=Zy9J{RG7h<1wP5PvuMwD8Q3nX-Pmx^pW06}4K@ zfpP+D5m>XYM(&YxYyUm$k9~^=Zo<2VJAR^IVvT=o$hvdgwqbM!%~tKtt86$F+_=BX zS5~-pQIX-y$HoM84R(@z|issbg4?`uyMHfru_+W9s4QO8C#+fn4r7l~uy~fj3{@YF*1x}BBansT}l(M|#R#+BBt)ID|-fV^{?6pQm zt{hrNlaAjyqZnu~Ry zNE4L0^t4!?3+lbEax@G`Z=gV06(+#SSp()!QZihQdcCvk4)Tevji>e0PGX-MkL;)= zsMXq0iN7xQ?w6BBSe5w8Z_0ICw2X$w2)QM%tP&_ta<)3t3vwzxz5_5(?9$K%(pqwb zBtv26oN0+zm0#szF@n?D>qmZ@HCYWV&7WmRO7?YiE@&9yki!ZxvzplW;HS??lRNUx zCk1Zrp<=NlHvlb1HqP7mMIwAn?Kxt-V9u5Tqjyi~Xo*7N4}WM*9z^4=*@}WGH1cIy zlW}(Hgl&YP=S&kXq<(T9Ph1<;nv*}g5EtICXrP{T@UyIWzwrO7GsHu}luk1!yz{K-g5Wl?!w3ZIBoh(l<7ap2@bYY3X)Ma|eMyWsIyl`_IP1f9kQ z5=e30Op)QibXhj2;-f*jEytL#^?`Izy~k){d*nVp$wta(Xp}7X#F9;MS*y+so{|vS zVtaa@UPMP%fHSr2dt8CK=wj}f5njAhR`IQfbrs0X8+CP=;BuMgXgib5@V^L-O4LX` z=dvie--lqNmT+PWAopr$L)Ml*);707h82)EC`)~F;vkwmXQO;_Ij!8zg0a(ci4I#1 z`|0Uq&ovz0hZK*qA7pz@TXVNu`=6qmGyC4Kpkz33oyf&AF5tgcpo0N3X;s)mOadn6 z8j2u$Uri!PedI>@o8>gXZ_{WBoV^o<+SH0fYhU$6mT8Id%~CFZ?u4Re>2y6#(c1J_ zpXk>q|7OJFu;7juV8>{_?J*#LPAf+p(S^M`ZRyRnN9aF?P^`j5&&1@}#i>VfDU+u| zoUp!y%%PL@;Co){MVdFU+b9x}s~ag-OyI%3zmgjvY*c3Sy8{{WW8Z(3Gty8aBeCCT zx9J0&V!*qFRMgZL(p-7~PMSef--CVy<5&P7dtNS$@vzOS3JJ4r4@5e0p=$ThhBDZ$1UiVl=21MA9~VKZ58|)z_r?$CKv3;7J;5ckwneNOL2lJP!tn9 z0%{;z3|Q;+R1976E*DZVDqN#u#74cDnT`G!hgBUT{8y0 zoc(5L1m08%sfvq3XubRtsDoiyR^pFSblD8l0i4rlbc=B>Oo?&TfWNp&Lh&#ub(HZ; zko5-a{+;5(@h`oRw!qU2DEe14i|Z#0xG89!k5`9796gZOTwt($Gw(;3PBIm0fq>WDLa@$*7Bm=5p`Z9g>R z)<|S)pF0$+UkRtCdZEoj%Nuzx3=c zgvmlRwSgW>pFClO`}H>4tDD%5zQ2UOa|E8jq;rO<;=77%ZR=_bG3r4TW>HuWeLySwTkSYuT$125}+&OA3$YA*t(U3dd`bJocI53Esa<0OU{uw;( zgI=tl*T<-yc9gG6Lmj|b0h#oWlzI3g7Bt*!usa8hua>Kmkw~;TYW?=( z)4`lr7LBMlslP?4eu>ET#@+8UTM z-dP)z67FD08wfB$GbE_Kh_JmN-VrRm6Q{GlGGnp@CYwJQxyH6~fs&3I_c93x+pJ$V z^hu7*cWmRk%vbGzS54pVu+B79D1QOmD9cN_=D0d-qcDO-@~Yn>PNkVZuhFQ0K_p6R=dy&nBT+al>kZ40AE- z*YmRgI&xn;_TP{DC?yOzg?N*Si4f?AXIksUI^Y)VM1l>TbY*lecQIXoJ4fFyz9=b9 zZsy*f(6Q6LZ%p7T#mA5Z(l*(MSV6zoFR-((S}Y&55&s-mbmQWWrn%TwBds-eaJ@$c(}0U1 ztzhpid@a&q4T=VHGaW?p;2ENUNq70L0h8;A?N;ecX_;+#M2TL1u)+@}B_6GMir}sj zcHym}V9=`hOkl3|KcC{o%po~}6d6c#VS;FXsmRHf{9qV6S2*Eo4~{?d(ZiO|&Y;@xY4 zw|H=~_`ZX|dv1-Mn6QNN*L(3>v4(!&gqex0#b+?{O?(i!72Es$Zz6%eCJ6pb`^f)V zv7&m-$`pzVhM0{qJTn5(M_^p7*qqL=cXIa$*G}()H8v{g`d5aZiOccs~*X!u=$2OK7PQkGk8XUstidpBmb$Srj9oB%)YH*&q4U(O&Jnf ze?3)YJIHiU;+U)oYFn|ay8X@|HB+tX7KRF;-5n%%xxea?BJ3to$1ohf{KIP(m^+pV z^lgzJbzCngtMeJ9Uwpkr=@`YqjC&(vd7nu7kYH%5a$AZpdZT}a8p*5%ie!6>$=Z}z zz0~1I^Zva#I|BESJ}+60r2GJF_8Ix9t`St<+PqKkgWuWUU&+}1Y!A&C1c886KRMq0 zf#a9WtVK`J5Aj0Cz~V<@kB)VE7x26hEi8hXw44#^Iub2)f#Fbg2SdDPKXn%DhuGtC zS&)2EaLH~@pOgm52>0FHeNtp360b|><`^_IR|Zty3sl1k(2*dek%3yrHikgWWt-|O87 zA^V9zzp`oZY&2FV_k4vOmn0h|yIZ_GyUmGRQj=~E zQx|>HWRTe7hlLm4S&`eaCAN3uW^T+-sK^MIkZA{Ww0f(XHV#f5u3W(rJ`&aV_7p1K zB+>`*uzn+OSnM-c{#L^`u0mb;O=R(kEVY$a*)zh#Qh45na_4hnN$7h}!ZZ_7T zm>-%N*`1!gVA~wQ&~&k+{)z?}$RPf0yPCg3}%N_A8YqTuy zX#B$2sF_l?iAD*fYqX0z@H$?L@#y2ak3?#ZHcH77FrZcHF_Db-^sAdMosn z{&~mj#=DsAU8ifizlYHi&N&ok5?BbxHCk~5veEXgJCevC_ARY<`qQK%RSy_ni23A- zH|;w9L0ra#kki8D+AIFxriw)nKd*xkawJ>Rc_5yFBN;x95%=-*6V`*0W@{m5@1(_p` zkDm@iGJZ8CgdmM{7^#0*Kr<6&l7$@6{g%Jr*K*H@$qpE;78jQ1dR@OUJEy^GuvnJl z+`&non=a1`D;VKr8-G}m$FSSAS()WLjXol;`m2Ae%2~bY42neY1NR-?mk{x5RCQqZ z*d8PFZ*evp#}h2&Oo14VsE^(Z7I)$xInF2|YN5P#=B z_gdV>qvzc5hzzw!R}jl7TaF8F3d&GWlsGd_#Skw{8ms_@<|Zom!h`ocjwZ0ADpqMDPPG&Q`sQa1^IXs{CF4N4>~leKT@HsSs!S#(5>1l= zl#gr;uRFg!JJMN_fAkOj-BXlLpHwZXB2q}kmsP6j-{LiC3JFj0_PDcy55h{(^%B^V z2{aE_#E8xyP0$~(pVGrj2uHqRanDgCv9C0$Ub%{g+SR>0aJ9)OwbG~qR~3k$aYyHb z{4^;hkbBgid=ayqpO^cCdL>!353y3dQ?Wv!JRB&k=_+)&P`T-q?yt zgSwYF2T8NvM1HBvuCZ>1xpe+Sy?HtLoEx>VYu6=Bn?|D3*|q5bU6a|h zT1s8+m3}gzig%^&dkR*xiF5zo8ph6)6fa7w9#bZJ;-YMlSK>!BE;g#WQvZTDweopP z_VK;m%h1u`=dY>aiQsw#}CzFyk%q9 zQ{!?Ll|LEMWiqK@2|*!2Tnp_;FN&s8WZQ;)u`TRj5@Z%v2f|Kk9Xoo zWF_gQ;-5k*;z$xN2)N07RNk@Vi+I&|M-dAhh^?G}FSZkJK|7Ej6803e%)x3^evaET z9ytXXtd!dz&kN3NqU?8q5pgR^*hi&){i7Jdm6zwA+B6#vKq^CSAXJXGzzM_ox9sxV z`#mojlTb8Mm2w}R=NRzxH#ozW+;R1du%+8krqG+N@;H`3X`O>>aR07}k&YB&O-;#~ zl^Y2ug|sy(n;yLNTn91v7YP8@Z)NfCZ ztvYiYFK9)#UVWN+a+R39uzVJ=H60x&2Kee52tiNyLJv}{ON$h@bx}@_ho^}EryirB z=Z5TKlWO)oLQM>m``%Ui8e}Pr+u(#)fZpBhqy@fih7BwegQ1(%vv)sZg20~)L?=Uo z)Go$;n4_-qDWSbUcp4aTGTP6p(|W9sy!Dz^}Rm@@`dk7><-J-oO=Lvfi_ z>ju`TRBgy}M+%OEvYr!~+()rp_Ws6nVz!!Cr9$))g9`0epD6hQm#Ya@pgJF1EgZJ) zfOu$%$6mRy1Q={=zkoKp!W&Q6s-`!hIatwL1?TRs`P|t>s??MH*7mulV3xQz-`A{_ z;NOYlGVstTc(x~a4!!}IW!E-rdDx~9QT}MFK;V1=N3KF?oOx=6w}wi|e8Tu!r=&DO zF4kNDk=-E9F5Pc=1VtLG>`s^tZGZBzyY8^MkmFi!2IfO#^Jpy?lYShBk}s zpdyTSkObdvCi!(@`IJVfA0^3^iHLYhk>36`3w>0tRVZKIupNLxT|^1d3D3u>-fG-) zCKI7m`lCSsw5r5=Al94<$B;Yo$8}YW*Jq|Jhz_;>P%pYB@U_$2>DQJuQ$k;bq~Phh z(*3w-TD+g+7qWw@woB@XYWPwH+d*i z+V0T4G$o_iq^Js`oMl$<-+L)|vH@$0Rd0&nd^tjOlxVSejKdy+D5`hkAp1vz9>3Js zy~eYGk2@0krp zD@Hkyx4>cy52u{%M<0a)rGJuc%y@rpj-qk?mg_E#q|U9YbtTxT1gc3pW1-zSPtbjnuBXEsgqAO`@i=1z{;H$;JN%(z#V7uP^Hs&* zc&LLa6UV&%W>No8KN1nyHc(NP!iWQg$hqlxL716C+1a$mBb^p1%_B-g51!1}7puhL z;`0vYMdB=|%Z$YxP2ZVk(IimXzg{pBTWi@2&)zeCun^uYY|w54hd)IK+u9bZ3grLQ ziwTz*)n0sPCJ~WaxrKeC|NOU@uXDb+s9KbAqPnIlJWHOWM?W?H3}K0MVK&KU&m(0e z^L)l3=KH8GLGgy$Hkf6drA~!j^!{7s^twtWWZ1(m+vi+-i^lX!G1VO)to>(a7-V$> zolf{mSo~=1=Fj$vuPBZ7@AF34W12C==apInPR2@h-;D5qgLsHUtz}Dsdmbz+O&!XX zQW)$nP(hq*=dP+>6+}c%?CrrUh*x9O4u;H^($MrJ=m?mwyi*) zG?PSI%JrR-{7e!UpbSPis|AIL%x#+T;^6H|RvpGxVK!w{In~X4x`4QJO`A^VteXHp zRZIMgXP6>g?81PV?PkrFa!QfkdQeiK$X_OS4#4fxe8)M;DJDrvCCW~$1VY`RTyHv; zgktk#1YsXpDjCilvLQk$jpFlIwXQ10Nq?WLQd;ETs~fJO-prcW2!_3;LYR6+hO7e_ z8L?W1W48H$*JNtms5)!@j1vgjE{ccXi!4;aZ3-pMXF2OQ;JP4`sQ$)pi|PZfcHsq% z$LnuZ*qdJxpocrs@IeFFD+OTUBDL^%@DSE|RDFGtG<`aiIa9i^mZq6i57&8y<7@-@ zOd-8gKi%Xzd46wQOXHdEkfMdXA_&R}VY~kOy=r4MK}t0_fp)bAE#(b)J_cC7X>5^a zF*b%qe39RPSz36LE=c@{8N?TSvFK&=eszzW{)M4Ejr`XdIz)}Q?NYP$bB(lHwk}u& zytaV4&?KI;OWe(HLF~4X8$*(^#9EiA;LSihyVQa&Rmr=uWF+;I*lN6QAe;H)T+${e zsAQ1^<91(jmyn^N85~MkaHbb$EVCGqU~gS(1ifK-7=mw0n$lN-#m!sn#7LY^7@|C- z%g9nJKiOlw=`WES%CN^S;@F97~C*Y?OsxoCqtJqBITl`o#(M) zB*liXCRVQT=z+)hK=&&G$T5sLR@r2U{Lq>J0VQ!*gBfj_r6uMdRq0S`AiH$s`o3aXNMq@4j z`FUo@!J3MxLytYuSlg&rgQyNHY0N;;(C2<7SWN9_K6azh ziDJN9Zb~>cQUtpv=aGWxRDV?o ze@p`z>p&lAR(e7yYN6%1bJ(LFc|Z3cJsMMv9X-;;`ssb3wqr>nt25tg-d&X-P*+4c z{PhPSilP@b1-~3pM!mQN?v`=wE!FG(z>W1{k_kGTN}Zge!VM7OQvtCJ_?$oP(37X-qu*oeYmLAZx$=*)>~n@$#;Si%UWEKMDTx)fFy;&iIqUf&>H+u* z6f1TI<-icU1>ip*0wZ`o(kxrVyjCBIWX)uPZsw@MMQn69O&o?76_MDO4}Z*5`A8Xt zA(xaSsihJ@d0XzSllhIfO(K0hudG21U-1wQby#x9FU`SQTc3@1f-6IO&2Z&0o`#!) zTvO)9B+CJDz9Sd_ipxj95g2qwoK`SDGhs+eciJA`*ZTNuvsfM)YC`weEKZQs7>AZ2 z;h4k+poLU?<$Cg+5Ltz4s}0bqn4%aL+j~u@YO^}aU-@~bZfh25okXlL=zKqC()G;X zrl<>9ChWaK?&kk}LO@C}lkqzmCCL)G$w4+RgY{uzr>$M{lb5|~O_-a#QD6X^Y)S1X zCXvO19_!y#7Keo3?xb4n+dAB7pomJl^cT@Fx6#g=LrFeXryuOC_O&cX+~m>q8uBHd z3%4Z~%QyDn5YKBJ(m_`$7F`7Aod8+#F3p8STn***VtL?o1>ImWF+=1Q{jDc1Y@MKa z1&ZrM?oOw*BB=(WOF_|IF2emLY%EZpIv!IzOHW*n)KGSOyr?feM4U=|;BG%=<415{yW@?=XiO_Fjp6h;~4u}cHKgT|$ zXwjb+4PIcoMU?{BXUv7o{D8g5iaAwFy|Yk=6!OmI@grX_GTX60o5+sGk2FKV-l|HB z?4db|5~mD3OZ+*L3I+8xoy^%~gi`@SN<6cw2FKHwoJA1lBd zNV4klaVxrblRYbO^4a~RiMN>K?Dx6|mHNk0IFpoTN&rQ3S7O!QsW{iNQ#&(-tjl}9 zR#)82n2Dvu#&#hfn0;r9ULhh6iWkCwp)HGsV)5p(9>quWZ~Ik(4~S=~75 z00hmF&aN@?2&XL{2q7;yua{HNoER?E8dS)vQ2B(58knQbUA9DTgDE7^4)we#*76KP zA4hMKRZWxEUlLdz!M$M-XC0lwc6Qto{myNPPy;$qZG->O%YdIxCB7dLzvGGPuf=6j zZy^@eU=%aD?57kE5$Q7^?pc(lkq}$+l%&HY@Nstss1j)JbSCl`LXj__arfh0h|aF-crY&D&Q9jrvQl3&$@nm8Y^t)+C~2~kk@q5kFm#QW zQt8Dwv3tc0l=nNGrsNSyqs*QB(M+VWkI3&}t=*6gCX{3PT(-O|5y>Qg!m%t$CWtk;^ zWCYm>T$6K%c8uk0*&*XJ5!oYB+CLvlWM01{RVf}u>a*feVEvi%xlUK9wT0zz9%xie zZ-uuatu9Myu z1{i`X1XguPuW&hN6{lh}*BPjTi0SoM2S>A&*(+p`xM?{}jT=;p!yten+GbxKnc7NQ z@m=tF!WVdX!w6blc1=O^(z6We(IF>&6>Wt}`UqnfGD$l>@5Dmddxqg-jwrYEN=Ni) zleRw$IWg*)Hg#kL6ecYUjDFKWN!FsNrJ(Kz)KUJ`OsA*Y!kiHJf(9OK7te{!S&Yq58ot$d_rKKSsxzh8Ci|+ zV&g$X02>X_q&k%Wc@h`ps2Y(`1tE!==;*`_r$a`0d-L+~*?k5H70Kc0?3iFf$Q&h3 z+^k3O3bYD+g4T`X=&*(pRE;w;}>N`s}|ug%ebDi5L=` zF&_otgWK!|sU=-$sknxXUI&bVgi7}(gTj7pbz!rU&&?`HP#-y7X|A)@@B24!>CY7jKWKDVYhv`sHy=*TU>Ko=p9?PAKv&VQ3)sylUte0nMS~<<^!a3Oszsp#ee2wb=y$})w!-C>d=iP>v^2Tr zmaKpYFT2!5{|TT+pG+9&!47Vc+L%vaFh5PSZw)2RtuzWuT$SkGUg;cXd1pwQnt;4x zy`2)26xh{(Ic$Aa9efFf4l;@!-E(0|9u7wn6H4vi2_grr)5-XopKY?NVZaw7N$d}B z;Z$?pCBr7xvzJ*?>cs@7a<%5=+rHek-)VCDnhvbYYc~-*g4ZD? zN)Q>X7K;h&{--jiwhLNVmfHtgpw)D}jLwe9T3%5$(|c5By$>akN88e<7N`&3`cHV= ztUX5_eOR*NtrV7qv*ULKztjsNnsXV}5w={gzFDift|CKIb*a1F6^Ku#M3PrV~nLQBo8%fryK0GOc`AR{Jf} z|C~++PO0#Z|Gco?fua0-8!6M$YHvBm>^R?Dzx9$s9&mpb(}J>8w#J4LRDm6V1GE`K z){zj$7;JJC17cU2>5{CN0ftV(_SA;(DRHuSC7~R`-;}>f?}WRUyb8ySI4$xjE^O-m zyiK$>4pFs(k&W`QJP>)-n+?t4iUzb1N4H`Pl2OD-lpXg?lqS&%+Y#0byJhSSLXx-^ z8=n(Eh4j_H7@EFz@Y+z9&oPG(m^q8o#Sd#)UK~^-EY!fbc^o2B{h~dQBVK`Ke8s*Z zQsdu_IrVbeq8%4H7|Fx0oNxf)FXu6V*Z0AFitFcv3kYpuX zGta});f;?KD0R`vlH7ySGg6*HV~IW1yHe;%?6s+djQU&s-@|c7(aP)m=#L8~PWBwG zQ`C)4_k2W_gNa_76CbQ@gdnVpv1dEmC9@WzQMvM6ABsa|pnTpe!0=x}9P$g$uyxyc zCwp_`wQuz5y2u6L$dFd&&U83)?Wf{h_65)=RK4ErpK;Ng4|+O)*f_%jKnFxXh*_no z9y?NnGY;NeQS*Xm)-YGDyO8|JPhP*OmR+q*T|{l7l(RC8&>J+;&-3)vDOvMnrZR{B zD#zbOnTSs=1uG+7jZ>WN8nKl72efhjk5Uqpj4 zQaDtKaY*~#OJm~TXdm_Rw4LOb1xkI}%cVycCZcDPgFYW1wA8(SxU#MqSm1Vici0t0 zB`*{D-}-|{V!%vSmiTYa=nCM12HdV&8dWDAW{Z*MIXzKMKL#m`L`&aLML_AREnJIq#dT?(VCUtE?uefHc4z9tm;Tk^s>oEMOWz8?pl?pNjCDf>bMaGB`p|un zF+LvTMo&fwWG^p_?02l7RoVJ0PKDxGo>)~F_P6w+&PXiDL3BCb`~`7+dvVT}|_tziUWUf_*|DN<%PxL=;d zuCcDPMTe=7fx8j_Yds9OBd%DITr>qXKr;;~Cj(1G80Xy}a#)?Ob*duTrxSpR@~w?O z_!xRiCKwTew8f=ONdGCquh)LoFpZkyKyPy}Cz^Q;C2(N^65a{La)io43}2t>2lc4d zO!qp37*5@`aH*D=TT>z@|0{Ysj>KW6l1W_5*$WD&8x=>(?yihfz}(Vw;tIrLpxWJk z&9*m5$i+>8Dek>5G#4Y|m?GFYHyp$WIT={Q3Z*=$xvF3fziKn6@nO%MmQ^_M6mhMo zKwk41Z(vX`o*lN@mggfvW+9JVG6uRGjfX2+hjL;@|Vl5(oBu8>DiCm?Kb` zgg02Q*4nzaZg7`iB~dlR`YC8u{KSA;9ma+~*lUc6_8p{kCMIfBy{N#BG{Yt3vRz_l z!ls1kk^{5TR^P8LZH2Fn^FF)F@9l1sA3o<6O&B#g!`VgD6i;7?C&1MN^ez&mbtqEY zNYPu(g@DZghXNxA?1dFz&!^6yN~dDGhtz39<|^7Gl8F{oPp01B9i*)4Zg|U{oT)u{ zZQwbBg7(Xm=&|>RcT}SB9gYqSUU(lDYDs^lQmnKQ-WhX=3!9dZ#k%3@f5-t7GavTO zNgp>xTL>J(XW+?6=rE8+{6!me74=HiJ1=EwBUu@tJ=YY})=kQ)B<heYUtMfu=dWEE``ri4O|1RVKjOQag-X)}~=86Fq0TV*u-Y zJxqyT)Nrsv(8AfBDk(*;NHJ6@44yBQPY6A+o&|JOk#ygyR%0ar>%2sHixCt~*-1YQR*-G^uGrNjd_42`(yywC{Lk-+t z3ZzP|HRKk(UKS_~Dv`XSv$K!?e7;24-hV^mV$L)ar9pmaSic<8Hj*yhta@l%Qs_4+ z*VEE@=1FLKtVQ2_aj|L-w8+#TDr3IZy;<48vEU9e$(o7u?Bd@NW#&O@8p0I@KXMfx z+Xr+s$AF>SjL7iZal#EeE(v9MLchG0G*4?jjN zB1e$QZ@v;r)a8!!R(uI+mr60Efg?}{R`IEECFPnhn4q?anD0onPVKJtk}SOa^j~ur zxY3e-kd%6SZhp$C5~Am}p2EIVj3bs_U8Jf?eCJ#tp`L4Hnw}q`a{>u>4E}3diZM#BzgWkU4Mbx{2rg-IC5bmvs z1JDl01mGng6a0*4C`-DDKMz8;`Fxim)Gx9OcL8^4%94z>)7u*LKt9swEF<0p{2^0c9R2ZcKr*F1Q@dFaY}8%*uOm7P_beaW)}2Q@%Urk zqa6uqD;St~@44oWMEqOVT;f5`A0O> z7#aLdbd5qFB|E~4E>xbyyc{D|?tnpd`K{YoJArZE_u zdUEwX`1zPSOd(Rq{Lo;2E_nP_e(pEOzx~_6fbfbXbX=n}&A4t#WU{^or{HgE1kI5x z?(p-Oe2MX42foi?+liJG2CY6j+7OsOZFfrA5;8=~Oa0pXK*qWtC1_cj<$g${zvG;i z8Ll{&@54$~2;KpDDt(H+QyOS2oT;0wEuR45t3?7%OM4~%0mr$;k=jV(Ic(u}lj@K4 z2n1JR^zhU#`2$fBS! zn^-PFyH42DzGHmZ?iZjIOusmJs4F916Gz>NfzocCkKL+j`RsfTcc`0LV6z3Zn27q| zNNOmAre&joZHq0fi9~8y0a*wruBWh_IH&iN@iCo;=+geH3y%BrX>4@fE?>5Ya#-5$ zm6R>KSfDx5*PyHSmbqHJ*X}eizc60!*1U!6IhF(nLn$On|D%OPQ~2qe_s^w2S*~hF zG0B~tdl3Uo@KY*9nd~y0^uz?49AX2sC_wkcm@-i$kQ|?U%R>}AT$lj&9=JC}+dcD~ z-Kd+sYE=GJDRcqM2ps2gI((3>>95`IRNHqkYsHn&VRfl6 zxX6Hz@kHYlTr;aVSZQVDKX+tzXX+gG9;_4#HAUi?w5pj)M+xq(GpVCn2c zEq~bOEC>-CqMW3+@m?TljC{G9uWeRANnpS4+N zN57aTJJI6GZ1~Cm&u+>+yh2(*zAFqC#rr@!1f+gN>JR1`(dhZfKOMsRNok@0#ro|( zYMU97E4V=}2G*Kqk~W?a}Y_T?bSn_n4ix#Rv5Cg3y)fuY&QECH>BO)@ImvCd;q} z*}fQ8qQ2uAA7C9<(kV%f=z4lI<+TgO7mZ)@@mYVnF#->@niCDznn)&p49rBEMR(W> z&h5NgRrBD{e=GG8^>xNLpv9i%?`ojlBn@FkSqx~l8kJjcDK2-~31ng7a7+dw9YZ-{0=wRlH|TN^8< zth^9;AGxV=OJAVrf3jlRw*REOwqz(Q??yRH|NbqRn7%$JD8rdU&TeT^G1S z`P0z9CYh8Cf1A?w*&0%7aF0*1WQ%q2HwyRVdn%5?a`>20#wib*riI>vB7|t>h~@m=ssjC*Sfu&Jb11P6I%T zIF!L(;jQ$Go7MZrbq?ZrUstA!;%DOm-Rcr)iy?t8khrLWu_Y_Pr-g%*A8Dv&vdgJ= z_aerF;~EZiRu{o4uKpc3rakc~C^TJz)%C1ghFfA!AJM4IcaBzJ4=P`1lw4%-Jd>(( zsi~YiV8;FDioPS$UIEj4d^LT0u`JIr^8Q6i)*b$M>}hgIqq2n#!^|-DPhR~#AB2!{ z`sZkFA_v!QKy4p{bo4MPU2KaQhEA2Ph(K)f;D4BNOASKGxP-u&6zQH89cYzvm{Kss zqZxtWmm`ttZ};cu*H0v~@yH*|j8h=uQJZ<(qJn=|>&8o9{|>m{u`^{?6yW9nhqg7>izjb1ljsAwwUv zSg|@}r&&Lg;Yz$MLX6&%?Gd`=*kDi;E`{zSwNA)%5ERYgph(Wrp8dIX+jzfah2}no zm@YCoM8D-FoJ7~o9cUPn`Yw#cfT8*vA+Jp;cW2W(6=RZ5z<=;1HhHegJI=)^P zlv^~4ezdAsXsqcjeU9g2sV3AyV=N?2Swp6KDZjee(nm2M5l1RYV%M7L_&}l^+_==C zq0&S>Yh&QU3|eW~&N{Z1MS{>KdrE&)c?=XdO(4eg7a%|B^uSLJH3mNU3r6$j9Ha#4 zj6B^*+>G;ASzDp1aL1&XcqQI434ou!_F{Xp54jMQcGe!LHaC6YxHIDLD~wN+c6@Ws zj4Q#PXuux0l&XR2CJrZ0uh3?_lbBxrS}6^HKAYUU1lm&8j=bgBYIHp?p-*J|s;*3| z*W`Q)sWK+UUc-&Ws!nQOPK9#ya3S&v5#oG$`U16lVCVfSD=QFGj5sP>zR>5o#;ZFs z?~9-OrgASY=iqZ8cSE1;cmHv(3$ zrvj32+#YmuX~@*I6GF?uMiP9NaR2T*iLI07=L+=8@rRoG5VY|H9TO&b+p2lb>4E0y z_He|~OS7uDer8Gd)hDYqP{G8He^2E7xUDa5PAOnLLdf~63>rLfVOVQ8at+skBcwYY zxw&y2OQOqJc}3G`Gx@?bD_8yPnv-|y$&uLh_bqSORB_#A2BXZr%(|m2L?-^^sSNEx59uY5H0Rq16VU}J7}Nh>mE5> zb3kUvlF6&Fd)mwpHAfyA4f)On`IL6Eg3sGnUo1Z13hxa}}^l%tDuV|)~pHB_LY zqMsGbi_Sm9WRaN|9p#%vROeC3uxotg>n}=@wLg5URjcPo)0MX*3uM2b^Zla=$mUx# zc~0BPLFz)hd00tX)7BgP-c)>_zjaRWJRjH?u@OQ~U7Ppz z)ms{yz)*r=%AB`C*Vchz85G_AB*>#s5IwaN$&1H9bhsH@!V9$Be;C%&hG0giT+#&0 zIILdi=r=c-!)?$KqGjc-vyJ6U_*IF(*9xQumoqHWoKOs2>Xp4mP_uHc1$H9ub1Ko1 zazY3r6Z3h5_TNWw$Vv;ZK0V$-nQmJl*&4`BxA>@ADX6OU@b(rPyf?=YI6pCINdf$sGfS#<5$MONWU8KD(Aj%k@|C}vl!XB+oa)C(yB15)}mQkI9logm@} zdlDVS8vmRASf@;cU`t#A%0Z5Oa$KL)g`l>XL~mPzQ8T_{d{9L?FY;QpyZCU6ymV=h z@=zHyF>M)I9kCxf{uIH86I{_?+Yc8Puq?42`IVK|ID`8L7hkk4;a)l{L?G2c_LNzle_je{=zT8y(K(D5wodl2Vt9ZPm6j2M^;3>PXy_ zuph?(52y^iKk>f2rK{#+A>V%dIRpKPFvb(eB}_WuFZrcqz9@n$n+!lFA2dk6!N5m* zIbVu(DIE+eB&u6xy5;$(jA*wmw=@4=Q%G(Dq^>VbuM)lwYm+*W}q#+}w4cOmu8;C)>ppM9%QV{?@4sVG=ONR*xiSIL6 zegCU!{B1J60UX-SGF&Qk(ynAxauU|%joXw5sBL6AAJMq`A-m%;^29c#)xznxz}IM- zhF4l;5p@htk-6oorVFt8shUMM!+QEx??AB0KDj~1z@3sgy@lKVOBUQJ2d#dxQ zCL`)9vwQ2n=wZL*xA@|V_D-0HJ!m2sxa=P>jrDicJ6r{ps~p1N!QkDm?djq1tw@7g z6bn52S&AqG(lgK6O-shxGq^A0d7$gL%wC5BD}8GuaFg}>vQO+-JR7NDA|vzfm0uZ4 zD;O3iu&-8U5QP(6ZViKGNM2o1ZjZ_Mmur++Omu=oepkpeiO}pUVA8e&ZqrNLyU~Ae z5=G$C8hRb*pu`C8z0QyBc;&XY9<52bYA6=z2Uh8DS!Qa7Rb%-F9YN5rWpE{P&kL#c zA}zZ=zpKZ$x%e;7#m?}DniN-)2f5`7v40_K{)$H$7qdMXxEeOmQJsQJwDN!Bi4D~> z5|+OO0MrNm!Ai5VzZV1jtZF!YDV;D`$vuM^k}&vRP8aQ>;rF{i;+9NnTcO+o!j9V& zj@o7pl&JA6#kKIoIDnbf4yMzEIInff{kL=Xw&FJLUR)RS$1rZhy4+1R#$q~18;i`8 zK?85&U7|vsXE`PEv~3}SE}+c$9;lrTq9aX~FB4jtwQ9{rxR*X^@Y?`K)qCEW-iC0v zjAQv~$BhjD2`>W`VCW*%?P->j}a_4fseSk~LmkOYZG#aUev19F#d@t-E&j7xCJV4qWYBiFch>oKh zDIZO!9fH=g!azMqTOZ$)gg3b&Jmup0H$*TiLV{q<} zE!N7bDq78o_AKS2Ic3>t&giiQ!k800h4hMFw;-Wn+88t)Y#N*o#BSUirM$CGRtT!U zf0^1m0!%xMnB%5`oc;i-lvm^uXO)4s&AeS(d5w)M7o?`)~W^iKpouFOslgBY{$?DJ|kz>dk zSeWbjvkuvqrHmD;i%;Jx2(Q7mwi;2oD-4SfkLkl5Vm z^J%WA*fkN;CjNBDXcTFh9g)d{!N#)HWMLVsAUv!-P76?)T8giDe`?TXDo~d(2St`F zP(izU%#Tml&=?v&pvt5}IiYAYp|>{W2a|!+(;o*DQ03E7nuL}-UWj<>se+WQ4v zS=QBU*jaw2j86Dn<>z$fq(_=+8cE9thE2LRpL}IH+|WbZFB86&qnU(d1xp$TP#0>~ zphF3DNG?tvX)Plq@od`xFzN1}5^I0F&z~W*`xmfM2a(E{?x}Fspsy2irzJzinkyt| z%u0~A_Uxj!cz1m#hC`G(jZoOXtsT@VOVmu-`sKVRYq3=@NDz3=LW{}e3O*Q><{6q&^* zIW3Dpnv@h|osqcK*bMDay>KqL-SWU0fxjt$S7KPbm*e z87&x|3eAIW9fZBGqO@!XaBCF#{>LoZNdZ7@$M)BHHUbYRs(gD_O7dGs;QCRG)pInwW ztJB6ta=yiSFbcUv5bKbEH?migLuE67JjC*$W-h~Zd3c*7pZNR>2V~E zLWYCR9y&J@ka|n`|BZ%j_<m3#BUT0OIKf5ykLsfTYcab_q7T*K9OvS`ESQk2hXD3u8kTqUQ!O(MY2 zfEb)ZJ=X6v!5tCl8+kCG$u^@+ueZ68KXsH5OZAYmk)Dm0ygVee-C@B~p!DYoSx<&R z(D=|A0m;ZE@x34W2qnC34k?3~qAll5FgF<8P5>8>nau7;C1f4ScBfuPUBYSmM6%mN z2eu=*$en!q4suMD)qpcDNUn@o$rXqDgfKZj*dq$B0AO3Z(*|Ui6v95=xH~-aOnq5B z^(vDp?9T(4aL(HeIAmU#pTWgQx(w;7nwGb~z3aA-uJw9FUSN`P6-1`4$gN-z(i=_b z-3JzMi_H0*QQ>Z*Uw-!qM&AHb$gGpJaQAqiYQKWqG?!WedazH1exswjm+El@rf@17Sg~&VxV4Se_06(KoPkA|5w{ z*1R2HHh)l|$@zoU>XSTOStO}`mZ-R|e zdTb%;dC+^|qrLA^%6RHlcgFA>#G=vzUkuJPf-!q%&ClQ|&2YU?oPD{TxIoJDC&;G6 zwX4t8nJn8+kMtPk><8FMty}e6Cez0XuWp?Y)0(@O#(m6UPaK zxp~#<)F?NXi7%+FeSXsym6*Zjk?Po9V}8OdvZ89YW&p{by^l=X*m@CQ2EqCPzsk_< z1}zx;+xu)1SH5+^XuiSUKd;VBKlRcGe=j|N%=sq#Wfx|&z-$?a60I>@k`pGp=-M^0 zhb`{&w(i<4m4xCJpJ~>y|Bqinua}st7-pCX0rh|alnMbRTYd496iu@$pKMN?7cW-K z+zZSLt6O}G?0y}8i2n?+1^((PDC|$r=*rP5-~iA3tYHm^nAWr+gC-h*V=b672K3{c zmm$J$)nIW5aSKWDAL;V+$z@}2w$QHJZtKn#!Y_R;H@H)PW1Mw#+@%oV+Z!I@UKn0b;SWxj&e|oTt#_-Cs&&}$U0!0@w1rkv~`KGp% zASH1up%;ls-xdv37jun7h`TbL1ZQNelQm-E@gX@I^K5Lp>i!(j_Tu0|%L)#W2_eFY zH#r=@cK7E+9A2Q8`h53V#vJ(Hrys+u1au}79SvI@vKaj$h&^7BJ-NvpSLbD?!_5;k ztP@~kF-MV~$=JwdGL`nYSEh8`3T>?*Zv`g0qVWTwqYuI!(rEWfVI}S#U`kca>8*Jv z3NT8!^y0_t#A|2+sKf#9uJVXm^|Q8MuHy3=LmaHD=dpPrJ-ssm30P<|;~jX^#SuA7R6T8B7|9P>PMG8SKYapY zYZd@~jhRVeJKSf9M0T>=gQKD;=Y(~i_tO|&S!ui%tZsVB@8_mrAg(Tr&7;%5p~62R zVhvBu-Xgw~QWtG>Bx42};<3hqN=1Jmq?sCphZBFd+g+p_QV&%~DpP!eGJ2iAq-AtT zpYw{|%TaLDBM*uzq#t=(+(u5tW9p6%u=>79ipo&%d!CMhqWnEag>2j;6a6N>0;#w4 ztewG|d&mL<271EXxPy~L=ym$Cq~41tmM-8lk2|OiA!G&`*14Vqs+g`qDO7-3FX)So8Y<@{1y9p>2L;h)7|L>9DR1c8KsGyYqy2 z-6PI4Ln~&(^HxleNMqM3-)<>dtyJRy|AYeMFs={=;fZN~9eP7V+g+AZc)=<8a0~C> z+|%0&Imx%Qx!Uo?Nv9r6o))A$s75uJq}4btk+T^J+_5{7_@wY!binF?)G?8Sr0d7nqkh?~%0=k`B!|+M!N02OjBNGGoQV3n zz?KJII7s2_PaK`ozW(g>2WrNoXBaDg0o0UMOx|tzF)|rXHvGWYyYS>62(gO%9*+4% zr`FfDUtb$NOI=ai;?*4N>HJ$=x(?G!=2Hm^S%gbVR{T)EMkmbVw1=o}H_`TSQIltNKrO*NgHvr@pT~fWjdRb$fhI>PRZ0fnSGYe5CdPHcV`3Lfgx~|Kl zi4D;f_f`{sG)(7ceGCi9rWvyTUv%2xw@nBrXKPXKPKb|dN5LzeV4y(*TNrjDf!X1o zBXL{5UM)*D!p&ygs-)K;WT?{!Z54|sL~a%qcgwTwXBVFK3IbLDTtaBykI~pr3}KLd zvSJ0~3!WO)wjPjy|Z0GmL9y8QZZ^{*N zOdo~__M{>KUO6Dg$Xyz6pUPxgn-Vg&+*@=^5*m#^Ub_-5{R(k9Xe>xE)RR${d*-wy z233oSwqsEyiZ8Qhk0RS1qv8I2E?8Tuw|m*HmLJueE$`_bsT1ckqc;gKqb~cJkbntx z8}N?=*1S0{G5v5r@w*Jgr}>48lXg(`Uohzab*MUp(jW?hl+Oz{_2mCpG*>`9Po;Cip`N=> z2!S)CTj1QorOWJ4v&**VERI6YH%I!i`QkS@;EhG%i( zIjvF||0H)=J|Nd{fJ~0Sg;^(wgycD;o(Mr~SsNoftj(V)w$uvXCaFr&Czn(Oe#)3| z#eBpr)?x^1+ACW%J)K0?0>yW_%8{LhS~csvw#V%&b6b_a<|pPijBQk7JrzWJ}L7bKpOo!&3@6B6P+tgeYqARm+( zQfhbxUx}`8SKo^cOEpo{U5PpXm>O&*)`^GV`eu)EHZB*?8&9l7t{A!Gjnn$pXHg*K zrAJh=Il!1)Eh9+T24EvYHpZ+Jpf>5WU3@#_LPxV#%CU;yf)hEKW)^MP+;P~{$mP1dXM`i|6wCDU<=6sKm1SahEwgro0|xW zq(U*a{FC@GR6Cg>7dgaTg@UVjDnl3en-s#iGk}!gQuvz0!(3K6A81uim^$SFrOix! z=*wP{!nBtMlAyU7<8~NA0_0FO+qq7r##n(T6-$%BQ(qFsJy0c#Se42L4rdm6Pvl}~ zltU0GN42m+HQS8C89E&xtVRL{^c66sP_HJPwKQ~%CP)ru!eY52Q!X^~{g~RZP;u@Cvb4+GP>fIG8tZrL8DS>E}_CHfn-)6cv#tBPTH~j0befC|lmaP)yCv`h+NR z^AhCI*yIK7WouI*k^TSXy=ajNGklrhz3KU1LVCqQUBxVNzs2z00T~z6TE9|&F=3N$^A~jvUga~C z`_Cm4f$HyG<&VJ+XTWS~i8KL<8&f`xpc|A3!^jJ08Zjw28%Vq4_bbAJz7l)jqACRn+l1;(SnrJW-9*M~6fx8*fAnA7N% z1)w^T5MQpKya#1k@ab4QYitryoZhanF-3h(g+R}q5K+Ne9?SSjxSS%QR&(&y=cOQz zhWr4Qc;gllEE!SsY}WqnKd<@bbLUr7LP#RX3XMme&4zS$1EkgMyJUoFT=#+{bM6#`+qa;A;c=r6cw{t@| zPMA0@(OWQ7I)_+q9VVYS<%_FgI=UG$|6~&*L~dG<){m3o7jC4pbsGR`@#O4;_5u1n z+7GOL;xxli#jeMHI4ng z|BXfUVYHnFmd2&Q?{J)b2c^ERho`}q<$1NB8%y>Nf-StU$aW`TUVAJdb1g9)UT09B zSz`ySG(%xXtY&O{fNP5bB+i87RM>v?xD}aWUTLFXSflcv4Ji29s+ztGP!2y!MA?wL z`?ypK%H5(1Xcz6=A&a+_^32A4+cRrUkq$(dw;UoX8tTl00EahW5saa~RxK^xjqWJEv^1pE8 zmg$*t%#D99!y1Ar|&ODc2aPFYFk`@eOYC!lKJ37hX-n6N4&m&f<1a%{SVu6pY z;Zwo=ehsV~U{&nMuz?3y3qx^^VbKI_1dPER|2Yl-7ynV5H9uXb=5TP_y6$qg)har) z2bus7m~S(|Cv6LN?TnS`lDbKH$>usV;6yySzFs`$t-iIT8I4cJFGd&{wzQSALN%y}?V!xGF8JTaR7dg74we_v^D|J=m5><)%cGkoMoi?Yri8UG4Nip=ehUgfr>J>%$v(JS@S`Ss9Shc=$AI@e^!Si z$91>5u4#EMev!#1Gmrf@lA@+!_QOAM9K_A}zODARpwXF5>;>{9mS%9VZk?&OXZU$Z z$&vkRD#9g?f5#i-_TU;XMCFSv#xKpkY6Rv-giOeB%?ZdnztyqUo@h1D*U6u6CpI&8E@+gtXWU z?vSl(4uLnd8G@RX?q+v_+dT!JH>6a31$qRp)qTJTvcySTVe_>X|z+4PJVSx#m}Qqtb+O^p3OW?i3d?Dx_SV z_}wd~V^x93o?gvO-e^6O)(9q5-G+jZ5yJz11%gZXv?BYOL{r2PkiS3Ydh6=FnlX*3o_tjwllP)Ffv&!LHRi*1sT^_{d&eyA0#qyM1Z%Z}tWi?Y#DEnHoeAV*=B zZRK?84@^cGc-4Lo)!2(zpSeW2;!y~KL&c*3JK_|m7W$S#)?PT^j zBxhH&_dIXAE5fh{YLz&NiHCi&m?7lri7C&loYKEww1fY{`9tt&3Brt3f^RB^a5koE zg>f?%=oOOG(-w3LGjzIl#u|QffUQ-2e!I9%M2sWXv|u$1ra1zvMUPRC8njMyY0QzR zEFj^0c;3}86uQQ*&P734>69#uOa$xq43O#^$F3xOlaG0j@-yv&S#Mo+eUcYR2ew%< zPTy`P`q;UZ5ab-L=(!YO!ww7ec>v>-+qJwMaXcai>V4qG?qbP;J&o=O%Kf6n=pK!se)Jfh8N!`yNRf8T&TC#^GCxTm63vZMo1jG--xE})mRzT>b!c` z*OK^DcoCJncmNatm=Re)PfMl*Bm=_3#lpQMAp=y~dCT)MemqnZv>KN8nR0d!tC!)S zsW~XZM3k_HqWCi2(|9AzxC<6GU4*1#l0NM>s`7acTCmb(kl7dod!GLJS0D1G{u_?c zHQ|h1`q^%Cm6H;!Qhz|aVKKH8VG$xjX8kBJg8BzEABYtjf@h09@y%1_n)}W5^kS{u z9!=Bj2Wa(kw(vNvK(&;ZbDc|H+6C)GMKtywbn%pN5nhsLM2F+wQ$KZ#s=UcV2vAy(~Wv@HDrvIaWw;2348?%f0NGPG# zL`$H!d_vQCDXt+k>fov``V9{IM{VA{v3+u!osa(4A$*GtVkGA?2g^sfomVuBg3Ap_#aW{6)4n6IXQ|eG z1U9bE;pYX2#gToybXR3rgxw5u(0#XN%FPU*L{zYnhk)3UK&c>~1HhX{8?%{Jip5}I zC?kz~fI*G^wm%2whhIk%RMcw0t)y2gDh^i79dz*&U!-_Nw_36N&fAnay^eCBu(7$D z^YIKogvrQjc;iMtEGv~aNtil44fYak+aaa=!$E`%jk`1Y)7z!Yrn^|c000hUL7qZ* zlmG1$Hs%G}Q9E$!`8amWR!DAB8_DaNIxZNX|9ov2@V1L|N1uIrpjx(M;SOPIZu^|` zdB)VyNqQ^R0N6dXqJZ4y&Bz38wzzw(|$xsdr(^SPfMz#t7Xzd(*<7U zqjVwBp;VBRnSqMw&FgI^S0~Sb;P?+py`03KIX~_~q8&J=&l$RYNzlLrV1=w}4<%o> zWUJ)p=3U@R?_?{hKHxar-)InM{BM)95$FYimSL6&O|r+o;j%!>QUP6cC*)di+RUn?<2}& zlp+q|z?F?&2q&0Vp91&c#*X>rocj+V3_WT zV!mv+-rdYaWQVxfQ(&YLjYR#*kcmC}6eRH0$~mzkg%15hUMsGtUGfo0uaLkO;lsm{ zLOIIIn&>=X1gvG6>d5q_wT&&)m-z%OWQ-JDJ$5*xgJx8hU=zT7C+Y|J&Cfs!MD@>8 zK|?GaZEzasd1j%#yN>}ca?(PU+0#0w|H0z_)H(c*v@U!nGoI>OkrO#bY>1Bg)LEfx zb;2}t!ydT>R3>?U*ck?2neu_PjGl010eJQ3?)6%m(d&=739-4tQ2F3XoR`gg4voBH zl(tZsMlG0xl2!o2rxgTx8zGvP`yh~b5U^8dXj=>Hx{*}{)v%VH$jKL7!QJRj^_YT!- zp02JZGNJJp5yC@GM4d#0`_houT|`#oJZpCfA5{dCTiG5Zj=n4H#2Jj+Qr-@5*ddDb zsBdUgaVg9tW%JJ08W_@_1*zktvReT_LfzAEliieub!Np9)TF0L31 zg@qSF^y+ao1?0#+T)=*``E?XMjpeP*Y*PSa+{V~!i)e2<(PEN8%dN;y# zS)ygCZztR6^paPJ6aw-M4Rm)H`wp722KW-p z4ItQMtWt*RM}haczn7{>tm89zf)nH_O*<>#MDMm6#4|axxKP6A0&6tdJWy#~))f8D z)51|C+8f<(wN~NJ%XZ3TmFI3Iv-6ThPio(hoN&ntZ)epEk^ge5%oJm^)Wu2q`_WDR z_`Av;MJu+0ud71V-ru!g@prsMc4L8^R7`r)g%BdZ+wC(V3>Zn(aBaHV0uJMP+w2Nh z(nQyn;vx~dNy45BoD#xc#8+KfXksL1*gU4m`_EPNylbrwCuyW?r#1c3CTvkuA{x>& z*7}t^2o5$;@N-Aa7G)f@{47U8MOCmktI`G&4M?Ct5*F-*Q|ZHvd_>YBF2;wWyH-mdC~J`76%)HkdNIWrd6G8`~!n;@ts z?2D_T9jJwOgSQc`1Z9gcq=j=S{3MW-{mR#bkure!p8XwK_Q35K*;rI9KArTp*-sg! zvb5Ea2-I3XT5B4(c0Kv3-`U}~XM0GV(8~v(A{Vf#EU8>v*bl%g@fnlWCM^G&ueSOq zB*fa>n}jl%G#`Iy+pnpF5Wrz^P992So}uK9Nm|NRI_`X}TZ*44-Zlo`Ryw1Lk#j== z+Pn+yTC<@c1T|M8t65F`^U?oRsm4~gQf2eJzYF`H=ZB~(t8l4iH1PlE!x7F@W>gDV zvg*lAt_GXVWFaf57KBWvb|v!Xfx{uQ2;srUPNL==^$#*G8nPU|IxCJ_sCCHw!K!k9 zaOc^B2&#NWm$_Q!H}O&qK=jl!5A?*C(EuWR~3XE;j4fWer6PEV(F-K4ASj z^SrwcrVJr-65yAMlhy-GiHz_#mAYve?AKMq@SGuz1~|Y)b78FnKWhc&=Px=;i6(ZG z6ZvX@K3Hkea@3@YhuC}P4 za&b_1Nj2S!^AWGp{QEQi!unT@)^O*h#F6xy9_-)yj@X2F}}Kd}|!bISE8X@8V= zes=xX;%JCEc(#c@nzA^6k3eznywrs+X(7UJ(D}&Kn#f7&Ldb8Yo%ha675YZOcofs) z_9q94c1hqss+)OYPv;R^%!#btCUWWVTzNi8&I4B zvB>TtdbqCHEF|;z#VCG6YqIl9Md@1g1=*%Elm|VlBv|6g{-LYBY}xF$HQ^yC)gi5c z@Xv#M5-6F)`@ti-%7N%z?EBAMGh^@ye&xV*Vjf;!l&NSh@ z(40|Z>R?d1`Bz7!*TzK^ol9azd|heBGFxSRVZbV~lZYET-gq@SA#4`zL8fufE>*uP z0q@SUoK){WWMkLG833mRPYB@EWZseK6MW1biHI9 zX&;JN=TTCZZA&=v?Pocvf(L)G|BYtVF2z6ms^1IxHeJB2?ZX8dC*xW3>Et{Bz$%i&Y;|!e< zDP^>^>2mn@iah0Fw-{x3NOT7IY%xrWXWhJK2+c#^g+nV^;(9QM+G0CFL!x>DI!d$4 z??_>mJVo{@L-zxZAY$@kz4AOMYP+`-_4g?0$c4>$Z@4N9hM0#)-s8$$wRdRvlo>z* zv@)i#w6iGt1_n@r#`}LlwrES?JYgy8Pyx&_i&;onvtve3GU9G&y_6annV}1s#IjPh ziUV50X#~GA0+39qw%{3KHLN`^7eqT*oS*gOA}FNC=JJaN?o_zoL5C)HQ;`)AB7zg7 zggyy_rx%u9RzjPcAnE;8k9mP9ic4z8nDm(K>96q3>=yK_hQU%h*=-kuguC$I+)rs7 zLCBHsiSQ58R<^^kAc4+;4jX&S#=RySk35KrR>_QFXTDC|_2g8m&j-RX zEQqG}1gp;xgrbR%=`|(b_D(Mt-3k%4rJ}lpKV!lhy3nnBgA^OC?2ky&A%(OY?KWps=j8ferWF1!#gtkZa>H5C-2W^)Ya^JFIUSuJ!5`9ijT@IoJdA z4eg;Oh`qVhJsmB7FM})g$i73$5QuI5NcNxk&xVI*=l@3s4lVjQ|34AQCSP(9`^=q- zYlnT*)GkP%rwRX40iL20+y;jOV)OBXM|avoGfVHj$7)2eyjkS5XRn!-hQM#@>7 z+f4u?J={>rPUn8MGVw0QB5181{=Y+=jd{6gLw3_|J~IY##1Cv1b9KJ#k!iiTJh{N` z3(;n)jqAABOM(6;0uoS*3|)~hf>+wOmH2{M-oV*;$b!OZJt&Wk|5c-9?O+dTKh58m zAOM!~DHo1@6K!NSZ51m|YV%9uab$r*zFB4=-UZT`xGk5B^F1LE-E&&? zge7^`r{GX8lXyo~Q6$M8)~kON=fFU1RUp6mW!nD>Nk2jnO5E4LcTyHb7;g$RMh z9v%T3^Kn8lCPK1ITA%cLAD3Ct-n@E44n=ArdRGa}%Te&o+<>Zg#fXVMmjz9`jcY0> zjo3!iP=vmD+Q8Byr$;YNJFs35A`;y($Femn(0qa6Cd$Um{3f}9M>*;TUOaxQPh zgcjVQUkEAlbfj5hHO6y`og2CP?Ist~A<-a8pe=JUBF9!xrK?@<2jae+n(mH|I{k*` z?BLwVXM%;^MLc*d$;mWV|A_(7X}CfzLo#m~#oB4%&!InZvxWGge&6AHpm-8r`D}?$ z&Yq6rOh2u>7iHjscP$JJshRY+tvjr$mLAjyu{l(07F?;nJ_x$;)y?+Wt1=PUt2df* z5(6pQL}$9YT0MCGn~iY`6TxhDO^273!Z>5X!@47q4;-L9pF|`KhXW4(7Cg;_chX>) zhx8M-K82M1d~7sE#mE=P)VB}_*;N>F3D&3}1v<+yG@{qhc#o7)zW@LT1p%I6bVPsa zOOLKxdC=Xbfj;Hxf2*+-C`^j46NW6XBQ(0bl;KY^t{2MXQe1*lLpK7>0m+)xsqg6A z0OY;YBrBGC(^!g86hu<^XS7nO<7}zj{EQBOn^%@-gJpACggd;Qep|`>9 zw4~w$=&+@xz(9$Qp1Ifp&!eegZoNw#D{NN&WJ!iIZiMWO5P-xkV0-mc{{($ff={6Y z?-KJF4-S^~YB3&%XxlU!A!6Apc$?IV=c?^qu`0k8z*B^kQ>OuFD!09{hWx1Vx-A1! zInJEYC+c0NuJh%2ys8oRg^eA#-UmL%UEpC8f!2X(==&n( z;Qd4q60cG9%jJ{jH=2|uc0R4A?aBym^`}2y)=|U@TL#2!#M=p9C!f>m3hFd;!%>on z;nYU+JspD7O-EaeQcbcjI}a1GLx0{>VL&_rPb&S$poPG1`R8uJa-`pUfKfwmhyOw( zhtq+&M2f7~UlSYBh^R=%K!f7?!%(5RS~_U_40$VtrJE`KkUmZt&yPQkPD;(Jy957j z#p=FK3m$?NCA8Dzvk2*kzYQH#La@dfka^#Wyz@5YIv&k343jlsYSb!r`bn3?>^7v1 zna~Ro{&Gg``29mmgKY{P!a)k!&V{~ zg7}vec}$trTGYshyQLlE?K>5m{y zY7Soq&&*V_f65zKF!%eCRC5V$#D$ecyTVl$%`W-jbxBtXHly|L!Hp8+%RHdoi{yVu z)X>olBpmyxKao50wO8{!*=-Bg*}nlzSJn^f9J#3N(%fw9MQr?ko^C)tNA+E> zBVP*BRmm;Jjf~+VdBI?+IHpI5jR0>=2~gg7NOxP~`#sJ+{iiw>mo!T*f(jg|h59&6+8yb0sM{rWW!}m7&$;LWfOrrO)AV2=wX!3&WD;0>C9; z(8qs5WsYBM$Rs{hCU6bxaGE}jvJoJ9KG6_iP-Zkwxu>FdxqPz@naD~3RL5|ylBdF< zTV-{|V`8L^D4B*e>|d7*q}KAL>=dso!m!U|T5RJmnxf$Ia_%RWRv>RkN$$Gb@YyS6 zeLjvv_W|~k3nGdO; zvbH`BmpIPQAR?wI<$GSG^xD&-Sta#x1f+4@p6!}X6L?N2{IYC59cY}Qj zwQGzAmbOyK@X(nT(a0OoowZbUrY9!*hK~#IpTo7k1H6Fii^lfUx%%=j51^ja<^FN_ zFEjSZ%A1kKnW-8h1eNO8$wJ+Eqps;r==YzDS8TVlXZ*B&zpY29&j0QR4V^GoHfL2Wi@jj?Kj`OTkM&W=-222&M+sB4?0r zB@bAk_rveujQ>yReM4}Bs7!L80!CvXy*QW{^N}86EZZu= zfNsj%7#v!^6i{74%KR^}!4ZaRm<$NFU6KdK&&eDWBWK*PIc1OC^85dD!R(Ma<`DawB^{h1U97}zq_pkr}2EGBFV`@ZyaTg3XXoQa@uIH{B93Ar7@Z(L#=?{~6 zJF?~MTaQ~CxIKeyULav9<>bpE;-Uv&eVpJ|We=GQ06kmKv#jpdI&I*Y+NCu_i8>} zyt;sH5+Zt*_(*+@wB5f}Es8F;0oOUC?CQ;3 zlopL(*dY0AM$!jvD`y`oi3#K+{%6&%`q@+f72PN!nwVm>{)l#bE{FuKadBqzX0HuI{r z)ynWDE(M+`od7i-lL`*oq8I)QF!q4mO)40YxPe^S8~;q`Pl-%Vi)5(rw+gY9sb!*~ z_U_NjQ(cjI9|DMG^*yUddFkI1JJ^c1!)U=*Fb!vE2ndeG3s-s-m5y-R9lWK*S;Tx z8i4iDnG-MyECaZ+fmo&+*uhRx;3CGyzHL+PDRRp6Z>j$;+`D28XFh(}@sRgjp-*F- z2%nsbXJg@Ec7(i5R_1zE0N23}Y~3pLTpcZf%TJk9gXyt*jzN`JAZ;?GE;pgvN1M5x za>ZoFO`-rpK)k=v}U+vlMp^CpddA~jJ%El-6esa zu0G2b>*v5Vx9OZ(aC3MAszy$k#sNyr-hGD<9>_URa^t=$6|S{>?vzyLZs1=T3_5{` zUjTa~{{9nvEO8vW#sfePNqXp(5 z9SztMrzV5?ogAx13}Tvuz_f%zKp2vTeL0dF%r>)fe!MG?LXf&?hY`+R19ZHkovCvA2HL z$g~CkQY-KSE0+UyCWqplyn_o-4zweFfTh$f52{{}^E8rwGOEcqo?l*u;HT)mRBV!G zwv-M(G>d{$vJiwCtA3)bb-2AO4@7$Di-IMh#KDs$eZB>IOG_#5rwv+h2%P?qraa89 zB2(_-FrWeU>{uw(Xl@`$-89(x(Wkli=}f)XTCW+X^a?1mPs6)vytp$@bs>Xj>+`Lw z4!A;Kygc9fWl%B66~|mMga~T<{Jfi+XTf}9O2W-^SVvaT_Rvk52+X&a;>QEW9;9*b zfP;rF(X?(YfyZ-4K6_%9iK@*uE>m;+c@j}bUe~1sd_>7|D|-_9OFM@!CilLJ@MQPr zb$kN}*M@#9;*3(Mk6URbn!4oz))|U^w7sT|KSv8J>fkBtxfHX+U^o_gK|c||spJF< zGHJ|GFFZmjcssbkTb{$v;{X0ovBT$<2&p{&mT zJ>s`;QnPn~njpYx)HmwcR>8%w@uva6`w@C)sdBDR&B%dAmZrQFiyqewpbGEDdJE3^ ztAcyEkLFz`|G@JD*-rf$PfSNa(^ue;5&#f->dExc&&TaUUTJJ|^`iU(e}-c|YZuEA zX12ZOf;t1Za2n0M&2?K3qRyR6?EaKM$&dTBJ)|-whtpi1$}p>TJLru-XGWLch!mX3N4nnGaiIF)p&5$ah2E->ek zlc>yvBJQII0sS(OF6X!b8Q7bQaO^di2tDr4UEw5hwh}h*W8p$kCXLDI`MAAGmt+;F zWx|L4OPsqAqcn(tfcFvGD6Ma1iY4?3Cq?A+Lp|NGU`*6+hiwMgYvPI>K>fpt(R)yN zGwU{t@3vx7M7b=FPQpV7VgirEW`Nd%oG1Mwl{TGGlb24Rc>ly*M9^}=-AV6pKnT1( zv>F_%d=BH@Lu(>Qe~c+r**lUWAV3V{`4f|CpEivZr??__T*!~(Z2i-kZF0k7s6yP5 zpd2XI2tE>Tp=Zo=pY@QXf|qfowqHX3I8p_BnE26){q ze|ie2#l9EJL+T2qcOLoU5nS6*6l>ehOCpsAo}c8K9CZ-v9rZ1+jNB?Z zKgaAn4Y&|*77D#w84g1X@$>^>IU50tq);wBfP@u*HgS`{Y1o(uSh2=I)^u|*;B7oc z0&I8S+pAH|KJNc@akSZiu^Ul^t!Dw;#rANu?y=40wumf9a*;B-q1e#;=5pkVG+bpZ zl)12Qqu6r1ciiZd=q~P{lV7y4IID*QE#y0I`#Hx_`ipjn59Bht;%BleVov12V=lL2 zNFouSo+K_)34w=Y>tBB)-SOxR4K%Xk#p;(nb|(Nb1cm8yAz2o2^qI zJ60*Z3oQA)*U}BS%6`mL@8j#()UD_F2Ws7p%WFGLZB=N{`bjT5SqG(SJnG6wsz34Z zl0w*4{R$(w!Gb*i0mcr_oBMuC6?uvWLLLgH^OG*i6(8ytQRq(Xa({aSu)=O=4NrKs=hhGnSl-a9@P%f+1CFm1@TdnF`#hbb=Jg^bCgFb!#% zWyRZ0)ZNe0$74bcu>E>w%H@!#HKZ$8WC%wXnA3yW=z&DzF(ApQrjAa;HNO8grk7N0 zP0q8-XQ<@Lcx`8T)OD69*GJKi`%p+jy0Ybkw+dV+U(MaK#&>_EV~OVkc&kDcTO#d~ z-FH98)>|Z#Z8ZPRhws)Okz2SrU=2JDf%YeW!!b@3=uM?8cjP*>5=@1zyPBfR{MLrB zu60u^38Tcoiz>5&yhb4K6j`9X!z^`I7f=6u!?CF=pdXmF*uaogsn-;A*lIx~=xD=c zhZ5Pk3wncjL+hgV6>EnybRaTo+JSL@T!42Z`@|!cSUFX0l~0XwxBlF2kDIv)i(D54 zSQc*B#I9il#6}fCHv%t@=(mlzD57XRLn-h=BfP>gnLC<_nb_mDZ5wprm|tGYG|LN#oUfH7j$;}zB2I~wi297N{EC#&S53dBzLjlPumG=U+FGi1u52b)4^``G3 zoHo1JJjbpa(O)P<*frrERS*1tQvWz@e^49@AVOvT4VOX@RYDpA z+<=(g@?+WAsBy4aUh~sY^U}gK)pGBE`|mrErnq5D}|-ieBDx?CX^u zF(>@P1-891i;~KtvPFh>U(s8lfC~Gn(2}(;-dCgeFy+~xw>pz{4?$8>*GP6he1j>3 z`z5Y#wT%WR)C#Y7DvF{O;~a7Z8Y%#?1PWQ^5da^MvwHM(IbXqVH4w?V(pW;fWFE5V z?U)k2Bemeq8cNgR0Q8^j1|g^iSQ0)CF$-P-)6s`>vsFhpZ!CVSxxHf)oc6WQIp|4O zC}o^4x!73|!aMoKy!2hUa)`z$w~)V(Q_Zw?fZ-Rf$G_~qsWR(wMDv>X^ZUOj%rw-m z>L}C|d#x8CG$@y{JIZFm#svF96^0fBhA984BSBzfdVZV`nr(c<&o^%NxT^l#-gK6E zMEYx_q9Z=L)QBc{0Nv7t6q2oshM%`Cj|3%v0BzeXd zBB=8Fg33T&_0{zdiks<+8(&>MXcKu&rM_^u)%wIRfrS}B^AkDK-+{D z4YdU*2HJTRSZ^9>zQmOs@{7qs$Szp9!Vh^JskMnVgAFZRd^{5;6ORj|%o~|(uhDll znl!E9rjH}6vUZ?crn+pn<=(T(rW~Qru%;3)se(F$J->j}(sDf&nWY<1Zg6 zo2!v~YKO8Nth6XIEedZB0uTGoG{?-l`@0qM{RuUu6J4DD*6<-p9zW*>K9F^2(%l=& zGi3ixl?iijuymkm_P9iRTtG>F+-25`n<=IXxQc{80V=I%>pY^oZr)M1+p5^Q;No0I5zPx&`%OC8>68g_88bzdz2HF6s%JoV9|3; z!%PL`{bAX3_(|}3!@=5KtJh^t@4i%wL`;V_4A}F%9J>IZ^3h81%5Oa0scN3J1x7;w zRpovLz>lsvW8SVsXrO|$Qx`J0p`1d2Aw@>T?Ao5OxE#CukN^GU??#SM)wcp!&QY6w zLQdCv#)x*j-Mu2DyF`V4qP%~SLS%^yX4Q9Lm}fG${})GCJ~Mt;z6`um)kNW{f`8jK z(B?@;!N4tnH}2u5B?Z;Xy!YG7zbOe_%sDLm7e6=EsK({nw1hE@w57zb1M}K&3z)2% zIr%3xcPQ=>RrBW)uByc-^>xb_)BvDc8`#MCrpTHsN>lZ|Y7T)$f55^s{@oq?aYxbm?Q0=*o4^_Z3$=Wl;sp z9W68IjV@4WX%%c!6JM%ab$Jju#S9JZn7 zu)ptDL5&nCC?Bu+n#nnp;tLa@nBeXw<4W@T!JlG&v7S!Xy>&>>Qb&Xyh?992C~F14 z%m!p^tX2H5!3N>mD|+Vfb&}geX-ke`%S&dwTlN_zMSz*$Jn%NLnubhSA_*x9Al-sq zN+|F0M@``4>a%<%>iHZ-M=zG(sIN9bx4Y?HG%!^VbnUH5NbTfp#UW){OasSL#YKk{ zjH~_Q17ds)T$u;+SDrn1DBQLJ30V$t5{P`qS%zn$&OIq;HKp+3Ud=m|xPXPArtVVl z;;)Z2tT&OSwNR5NNs7s=@)TXk&9RofS9+*V)E*V|lZsT(*v zbZo-t;^1=CYoe=Nb&t#G^eHuX;K&a!Y-hbV$1;^DMB>4H6q3UAkre`m)q=q1G1P+A z%yD;*6ZW_Yn%GUFY0lbyWN#%5~m zxsn8St+dRpZ_ncJOP`Lrd?KGiE+Rru0dT<5BdikpF!Px$3y4!4^&XLYX##SK8wlKoN!osW;N8T-7pvxFTpINVb3mLUrbg< zO4h_Tf+#`PP0)p)x{f~udPJNNp9%J+>Oc$u&8at>%Z}U#)Ou{%=LJ8XrT*k|p}X@h zDCCTIFw-?79a3zPE8$5whMesj@jHJIxU9(sNVct@fQ3Aq6=Ns-5hzv9W8tg^t3VX2 zw{?swuoTiMg2g`iQ;?FbqMyE&X)Hab#i!@%wL|+zQq*R+@hNs8&v7@z7oT{TnRld0 z2)W@*59R6h;Q-ytSGsy3h=9L7J7p5NK@Zh z(Nk6GAqA5)dzeE*6MNq#s12k7KPIPZoT@)UV8Tz-9MyhSKnQRrPmNunz+Dp+a6rje z)JnLu-|oo?CMd=K1&5;XXGC~MjF0906nD*!EVnW$oV1{47c?ojLrGvk2x4|CAmqfM zKa}`#6mJ5|zq+D36A>DC3a~z!o6bSO@}|=BoZO|3BOOqSvv(V>gdI}S_#pQH+yhlE zXuY1YDYk$44Ko&T1gTM2&u!S=`;>>8no9(kMC`*K@n;pt8L%9}lyk zk^;!?F=OKB16j&ld3S-+A_e62NcFHIKa|ELZ}yxbNQJSgkT zu%sxq)t0LnLk~dh9<4_2!V+^n8D8z z>{A_FAov)lO8KpXgwX?i#_Y?#1`^lCgQu%0dj#5c#c0BX5BSy01Oly(K3#<9aNT1|dL(3MlAk8vAnI-fjQAn3Z+S??&!y%h)$g_MvSDU`(9I7x zT^Gu1Dwe1s3zQM2h^oJn+^>=OMQNJSF zU3||id3zYv^X3!JYK*%~kIDz~tW=HgyX@)Am4|c1&9ye?AjP`z8Wwq4`35HIKt0nq$ zEvmymT?(VyoxA`f)L&TMhWRa380kdV&oqGP9{gtHi2-l1PAA}*+ls2-RQ9Wi!=;7n&|Wyqig$IXai zm*GnLHThD7H#gwq@LLV(fLt1u-WpkZ`;-&6PN3& z>}eW@m0=$q(cW04K7!+)0W(BiK>*Xny&pRe32^PN zb-+RPVh0VVSnbelq&2q@FMTauPj$tc$cfozMO_7CFXthyP7r1?fBFCPBHXnB0daI4 z$Lo3PK(AzZK?n>jR7M7CVn4QuT6Ani!KF`Vk+n}=|c8P3T8e#2peX8~( zK}V&3KcfO&AYrNi9oqW2+<+ag&N!tabu>9W2m9w;oQZJ9?uLQLvc-+=%Yt&k4*Y$c zGXkTdsw+dybwZcrt0J>;p zzn6cILJvw8Ak*pJhN(&^AN>z3KP#c_4(alc07@@-CqeQs3p0ng>DAhJ&!^0KeWe5N z=mGxr)k=SV5)1Pnw`OWEb$v>oK+TNJNKN}z=1~mKeVKk9_yo+#lIAMV0FS9UU3w$? z35TSoQdU+J0k(A(ptZ;VJwIj5>~19tfm_b#JlF$w1l}+O^n31Cm;;*b!4~+>>>Y!Y zMNLgN`Oiz_OGHYDk01n%_0noY-wxUB_u3^pFe+|fg3KfXR zsKn4?e$1eD%~zXm(`P*|sYa5kp=@~TaI)mx5YQta+BGdL_Y=@n`pt~a|mzrgY}LSWj}i`oV@}wi$hihshp}J;_`k;{H5fZ-mp38 zvVuA&CzHOVD>T>x^9Dut~Y|=Hxwl_$I2meb2Ddfn)nZN#Iew_TB4IK$D4?G2N8iZH7NQI}`y4X81Uj@a!H;9W10@ zb_jGOs%{!ONGHKe*y7rXh*L2k^*IP3irdw^dn7sye|kT15;_V;IRdT-B!Gfw!5`s^ z^`@;}j0!gxL|gSiVNNm)dTkVGE^y{+HCvPtIpbr7DL5(bBdjku4Fra;8nxWz48?^6 zDh$U2#<{#5bIPSht zprtGbq}p#XuqLMujEJhqG&C=aUQgNucbS4Y_N1EMtC)*XvJxpXkW;h1Gew-3M3=>(k9~AB8 zEN{~8yyJfiu!Cf=UY|pPRBrcHud$$~9c990PHFV##%^Pa5Mjm}VARe~wNSC9jC zHf9B^5oI|jSK~z3NtUA9_b4Xu=A4v?ezQH3s2xy^IR}xn-I(TW=V)i$=XO+ z3MF;$N<6VYNQyEjqh{+h5Z!%MXoar@g-x`j@I{E z%-FSk2{(qI?E1fwcu?Nd>D$NbSny_?=RG_j+*U<@8{fFHFbEq(Rr zly$Cq_-kT+)#`44K@11Y$*P#3ARj*QXz1oF16by%i30*!QLne?v;|I+-J4V(^5Wo? z)2cgXNCpeS#S)mi{_Tug#M6mg2j3a|Z~H7rBhitwRa(ZYDr)7Yf8Jw$iy1lbb3QD% zQ4)CxI~ExX0gV=Ah%4fdfnNyEYzb5m0{g-=o->#+JQ(oM-y{hYi^&>>#j z@*qM%0Xczv)sv_1KbIQkt}stl6enfb?sa!CR@gIgORBbzeGu_=D61>_E8|X!taB!H zX6<*_W`QXCaub@NQjU_;z&C{+{BHT+U~|x9z8PZuRaIhjzAVcyEt2_qqSVr3uRmyT zfOPFx159k{#bBU#qfWaF8}h5E)Vv8l3Z9t4F`)h$pN`Y_VulGxfdDa6EwnWJz;8<( zcWw_Wsh)Iv)qk(A+i*>HMsEh7F{AgVkfqX$0SB=~t*L}Sw0Wt)DHvn=7g5%?*`-$5>H=Ipjdz)FGzrfow6*qf}!@g6Y+#_)&Y1d?0+F})F?Z`+M;hb&S#4^2t(1gd`5*&K0fBU;0_ZG^xE$`{5GDi^+xb7^Na5beh_o{8YP;xC|mF-u+W4Oq^&OlS1I(b`m@j6EH=ZI?_|IyXlCD7hr) z#Sw(>mm`STizifN?sYaajir&H&-X4Lmn3RY2~qB+7g1=3%96iZh$6AaT0`&M{kinN z=jEv#Zxr~0SAU4VN`i9Z{Ll=zKgqz^AiKI11z%Zkm=d2J84eZP1wHa&B4V?w?i zUN#DVs2R!iBGo{UFy86}vaf&88nNQQZ{_=R{C7mtOX$$#(p;&gkhqH-=gjJ!H;N z$PSL#@U6D-%a9^z~Cy9~Mu^`rHsIB#;X zc+&O~ZAI(bQ0tHRt)3qaT)H{)Mc2}i&Kj+bq-*3OvIA*pL9p3aC>~j8((=p@_5sq+ z<8Unk>F~GYf{;|8bS;os8Ocgx4f2zk6>j`LiS|dN+jb`;(@giC#98W!Brb1?j&u?t zuNuasJ#o(;_e{($z(n3qalH`G7{@W-xb#lB3*?%lgi%FO30hiP`@#p?8;t8}X7K#k z^lhbn7#GMdN>WH^JXan^q%^~yvOoQ*>+XXVE2{-~O=x6ml)RHr4kIp7pQ*RV6*c)q&33b3)e%OK z=Dn>jx>xfO7rdJdUJvA+#@FCR!{WDgT?}Y#V*xl)J^cdHJtAB7$>#z{vhojwCMOEo zlaqW2)M5W^{PM}b>mdLs@>r;`kaCWu z0YpNwtxyy#6?s}Za2F2S-mFQ?7+w}M4Be{xdCN2TmBvVAV3{Dar14Um*q?sE%$4%| z4_~UteDP^8%~EhpLHFaFx5yxw&-r^|ce^iyyVz>2T-=Ni7Ct$2PH*>UrWK(bp0IOq|Hcf`>1p#Jj_6N%3o%&yhtMjA@2j+ zyU)OfqmUu_e8`&`k2+K+EGBJv7ptj>#!o#DLs3+m#}EFtV5`gnA@;)co^!W=#zM-& zVwqOpmlN`i%0;IKl)|(tF)`S;-yO(x9`&IC{$QSe_2oD^JvuI2W|pmXq)=I{qyZy# z878G0bJ~+v`>9DwKp1+tf3|naPu77PIK222`xl&}W|(E)(cAU2pK>79v?Bu;|GDSPKQLu<61l(Xl$o}Fwl@70ccv|6S97P+j$Y{e@!!;$m%1XkKqO9DS zF`P{l+nkGKjm5fAr)X-vg^ki2aCu~FShGsz-~t3>a6hkfz;T>-b**Nt48I6%)jRxw zaP!1Yo6^9}YW?S#7n;?)=QWIis`cU7<2tSE)oy_~DbzP$L2q&SK~WP;&rKvr6kiY0 z^+b~u?bbPvz193WF~j|Qr2nfh&ZXE3D?n1*cfz?H-Dd7k-)rS>$@^Nn`GbaEqDJd4 zRt#pq-p*E%p#bBKK)T^Dw~IL{=k_}#QLtvK@B?tP#b)!+vPaYNt@O%}_U*KOnYYT- za>vJe-P{K5r5|5d25{aG{`Yc(!OrojH44V}+NQwz4&q|qvPSs)Dk$Uq6XFA8oiJ&= z{DPPt_O6!*dlpBnF$i`!_kc`EAW-jw+!A@oTO~9n$e#X%@KxlOP@5dvL43*L)$ouM zsDWL|%3-d=CShm_{z(2I#^q_2dV1Fs)3r};{#s)T$n3M^04>b%!+EvuV*BH=UR-5z zSxK=&-uPl(k669+hN%A_1zsJ>GTbb$6#ZwkE=}IKYMSYwI(X^(XevC?=Wz2+K}zh6 zbXh2UG$xua6p5%S*(NyzvSWDN>MBi(M7>S)Pv-Ki?c0m16!WSgVm-)~!)Kjc0ErlV zdXklI-;sUSt4UN3AK21kaME0DqUO|5jd~mcYxGzGt(8In^_AnhZ4c%(OtMnXedm^I zfUv!ZLU9K`Kgigj`n%H^?ow3d*H6JanPPJ8U|{^EZf*C^aE-uxCu_mpm!l#L@blg= z+@HU)KP0 z0V8Tv4HmNaqbKhfXan0x2(PjqY&~jx5`QKG6^2DNx6HoKQSgP*3hpLzt{jZoAaow- zM{Yq|qP%OiH{<9P6PFvly$3tO?<=;SiT+*mOn<-ee*c# zO{*4mR3$WZe`nau%tQj({{t{uR-C*?cmo%QmTRJ~WK7vbHStIpo+8S@B@ZrXm>TY0 z2YzI337iM1W*c-loS9N^GEEy%EP84Z^y00bKOS4TS+C2a{Ql}th`Z9-8s)(2`=l(K{@tDEkD{>JOCPX(keLHRKj55L=SWylex7@jJ>{Jv?{xg%c9l@Z81$BHWu z<HfcteahaAB!!&1$?YA zgTE9cq|+l{AzHgR3Nr(6an<`boT-smtStPQ(}0E#Q0HBzl*Y#&(Jl9iYTP`s^Jx3x z#Tn=-Cqj0s@`h2-dZ(o7I_{ZvVa>j5;4NH}y-F%QlywJC_c9+jZs(jGfAMHGGdc@9O<)jUSj0bkEXH z-%YNeheU+x@Egd^j!391E@gh5b4_&jUXn`OG8D3;qJ5!lQ&=j8)TnRlX$3wK0$sWj zaE*Z}7XfIbGL{gil;f4KKXG;I8}#gN`K%DJA<6Gu0k6XOY5k&2>Lr6H?)@U97048e z8LZ$+)BiF3TWxx|&maE>t(mFodE8uPl|Q(18Bh{>uB9-Gw=Uo#21$TOvE}fs#LQJi zADOzTIXd+xj_IA%&|?jV?k#80xiQ+&isV+cW>uUg9n7u3X;(g=AcIK#iNL8HlHFzZ zu)DiXkvU)V{?o+p#?7TjEFBjRk@KkVMgM|E3Ok8n_Ee6lnpqsSY`0h$DF?h)VFVE$ zFWo`YkZ`(qv$Ou&7~9TW{#KedT%|v(7T}BP5wt$8IRV1Ca)57=&ArK(Tk!je{^$T& zYQSr{_esE)O-wv(4YzJ<^Swi;u6-Jk&scu-000SGL7s+15iFnWab0s^EpQN%1qqB* znz$7vW{`1lj#?l{+I&AOV~>sU(j$k1KdMY>YOrtJzY}X7wzY_?A z&4MDGHH+huEmr^s0E!_FQ=pbwtUeLv(7DH68WdHn{GQ}v&(Kv|PABR{Wg7(;pzv#` z8idDn)h`01y?~Gx#TiE>!#zLxY$tHVzEvQ+9}=4$g4X9eIR|hj^jrCpq#KKn5D3Rl z+F<-e$Orn*fJ?hHsa;&_750<0dY}j{0?cA;3j*zj?1-mb+ssIeB7en59>l=kpy)0K zdHxL4&HV=P`=LHwpxjTTS+af-fqr5*=3AycKY0S{^Zdabe)<{k_ZWpmFb^^Zd#rYf zS?qU52An#ch#|k$zRQy5*4ID7^dUBWnBM}T6mSyLVA|5KRUdU{s2CT%9J#JE#1`2gl+aoQ=8^2;W=>8$ z-)3#w#?->kUW|k^%`X*#asXaUpsniL{L{!Bo#Pjy{E^-}KE(JQm$4s6P!DhM%Q4o5)H#x#ATnGd4q#!)V*tVl~=*Sb2k=7T%zvalk+(imQJqOtk9a||1>1I5(!DKuRQRcfG zsyyiFIlw9x_&mLPZtRm+YRzJUG#}R;Oo>V93*r7^4?cOH3cFe@97c&SHu`G7p`da& zE`0bs?w5c8N_>Ud(c=h%tF8DsGq;eBjxaLx%(E?^@Sd%VJ7x*#jo~)&4pn?Ct~Lwh zPGnmcdXh1Tp;bZ%jPeAcWR##W8k3i`b-LnDXG;~sJ)IGMfID9t$ln}=9Qvrc-FBVv zVLkD~Ahs&w1M{Gq74LRGGtR`k+7K>gop=X7m1y2EBQy~Lwt&`pltEuutEC9|kc?T7 z379{alzC0%*)PpBm43yDBpyPSUo6h9I5;{|&BeA8{(wsG%!0-C+y2(U_|K))V{x-* zI5U?@Gfmt17vHUW#|`UuJyU`XDdaB+8Hp8)&XzUh2|qf^S}V|^Bq?(IrdqvBCrC#j zaHBzY*)YBEzy#Fw3`6sSG<~w+kC;P7o`=^mW~Weu4_B5*e}@klc+2*4`no)r1^)N9 zzjtqw2hafai8IX#Zm)hzaW=fUQCc50!|53TC7zfrF+qe{MA9@zuVa!qpxKRCQpVv* z-C|$E1R!|6CI&?ebWJ)i->Byf{4*rMG8HoJ*#>{rY%-tAD6ueem<)Sqe;TTGXQizV z6sj5VjE4(LcUG1_D3hHjUUG?H7%=0JnZ;xv`LH`2!sM#xNHI08!e3Hqd}oC@J-3f0 zu%255)I(ff2KaJw_49$%DsBih+)?yP>@C$H*MEW62&#uf7w`BW3vHxYI$lZdN{eB4VsrNI8)gFX3Qbl*V|wS_igZPc@$W7QSSZE!$j5!u?f&c6F` z>QV8|-}*N0S-BOfRL&e_suIQ?SmVTxPvLcK@TCnAdxCtv0uIm&hA*1FV$SzcYXaB- z8*wit=bw-S-cbK&+OvI>V`1>d8pyEps-AO(>h=~iguQ&f4~Z`goJt2l4~h51^Zf-R z;cz!Tpt|OD(+1sGkVO_!oxs2r24ZSUdH(qYh_&&*4R`PtmO`r^7p7im89D2!FFvlR ztL0(xsg()-f*}xje5MZf+r(PsL&hsz^}lqpx9`M%*|H!abmmd4NLMBb#0io_UH?kC zb1IbIp=+1;Z4jPY8ZCZX?Y~J94hS#_nZ8DUv0HOoJxVE0bHty2=~1JghbLkdHm^#)v53r6L8J7t8F2Am>y_Y(zPp?wN`%XTU1+ISDNzs=Kv=0&y_tJx7Y}wMX8`OAz8d^x)Ebuat<>LhY|adpH0pjbW?~zT7G#2CFLA#D4JX z`T~Lhg|c1D7rH%Xt2ZTzTo4wL287?(0~Y2CC+JQZH-27Am>78C6TV%x7U+@5$`)nW zlHGPvxpdgK^Q}j>gHm3}Q^u~%&+;k;W_A512|Dziy?2x7{MIA|x6YO$mYMo%W z5jG`9`JjK%8I%m4S5*2QaL%PYvX(M^zwE&PXS;wL&g9W8U~L*%oLJfGNS;cW_sKYt zO-q*Obo3^@z@IA;;6O78v%V$y*={?Zd^IRPR_C#U z;H{jlW&2ijyh%Qhr&{;z*Gvk%gnIc^6PlhuT4_W>G&ED=Z;2YGATm^CK7AEH1g5!0 z)2)m6a~VoNYmqfmL49UL+)?a&bA`|N?7?Wwv76Zesj9HGjOeA=p~PB1LItC`ATAXY zE@>Sgt50r$Jr7Dw2M;~DhG-_>G!LQ3H9o;z30xG{5hciV^-MsNIGG^Hts~JKGv%(r zW*%$Yaa-;M%k@Ny|6^^?$)Phk-T9pSoLeiM+W;irr{BvP9XoNViB9)!DRnf~vNX#y z`nA&#{00z$p%5uO@KRAWaQ4HrGo%+%yo8t&k+Uh9CTCF;du#TIaTzQ zs>ClWu%zvk3K7Npl$)4X1`-P#qjqy8BSLre!c#b8<$c;&-|Tf=ojIAKWgNE)_bQ95 zvEs7ptZlpTck0AD0DQ4K6Q`P6Ax#+UV?183ZsQ*#`IgXgl6?}|)~0l~4=6lVPP4aLx&9bD zPO?5KCHbx}$Fgy245K9&lR<}zCfUnUs=q#R@CKLM+B|n-9uDwZm>|SGB&D|(7h96Z z*XXOb$>KKV4L+dw>WLn(;TF6zklw)5(xPsKX*Ko@5*4SK3iF@6t`F-YO#Sn$45HM` zO|?e@ zO4Q;F?GkU3f#uQnO6>;Y?sfPNHILRkHGBQ0km;BD;l|=!l@3#mK13%cd<^gvo)}B}|LD4x4}G9%ZO71Pa8+Hozt)v(gB! z)s+S(hI0)vOe(uUxf;G=oiJ2mQPXM=_`@$ssD&UXN;NPE69pyg~K=9#Ws^Wci6#%64DlCgE>HZ0@fy zvICRWQJwtd+P_it3h;|YkQj5L%dI7Cf&@H_gi?T{yjXkrJgho(!}{8?!OkZ=3c`{U zb2W(|hn>)qOnUyv#(PuEZQUKygAvPCc*RNB3H&{wrH_>WcbM}P$$23YdqsO;{6ZUI z*(kluYmscc^mzaoSYl8wrAii`ugimsR>%LCA-=LcS@La34$(2tG^e&y5!30aRv~h{ zlyT%n)viy9td%p{HB5c~kP6nHkjJvdMdT@y!XsL_e1XT{^I&k_B7A*j&6BQw=3Ne( zi=WN+fq|ZxifpKpz}#xp{E+XoNUvjCQ=Y_p|Y5&h$s51jgQihNB zEf=Qgq~3HHZv~tQ~eHTEFO>S9M`?f5dqNQSxJS3VzFEqbYG3PkcY$|Q)ALj zA|8aj?g9f%Oe>+)bV4zk-Y*?m9ypHh&RF1?w-eeBSY_}0krd&C2t!`lpn&a_veXU#~P#7ibfdAeQmOTZ>@ad4%ycn1+y< zbhp+2=!)Se4uj4>EU&8EFsb3#1h9s>gryrr#$g`{JhQz=R@}ZWb%#@ekdeR4w$)2c zl4Aqjy+)LQ(N0lZXTz@|BhA)))XLyUYTCvScL3+i>moMx5rXE z_O?*_V$I3b$+&irRq<>y0w%A1S*sjna>CGY9d$I*?(&BnHaRRj4zEs zw>?Mpijy3Hk0zDLQdIhQs)5O!rn{#QY-2mGeGw+CL?6TC-UN4BUwV3aNJmSBGb1Ta z;q@+v2~E|X(HF2xer$keQ>+ICQMan=M8Pf-baQmwGUqF6f(0Xt3x?sBZR1~?0Qy8{ zw&pRvNg~G*FZRPf6l6sShEdqTu^lllL4PC>i%p2m_CiY++z2DeC&PMow&!EvZs0{q z7uO3`AOQO4rfmd4^X~Kif|7dvs|mVY>zyL!djKEf121oVy_$f6=MTzi#Zdo-WBytU z)F3dKg^vXTY>e)Fn%a$m=oll__>O{)U2wM4S1tfTK)t^dWw}>|A^!SGXd?o&bF?=% zkl|%%cVQW;onYYAH!?%j`QUPYOf8%{oDWpiTEAGVXV~~e}qq|>}DjNfJ}Jry?PqnoB#kL zWkH&(OQ=E+TOsfeMLY1Pi}pg>UHEUL9YG`tY5LqnLWOv>{mjO9;J`B% zVFUr7KH?_3l+m?jhXmPB5iv|~O_{?7P~R&Cj}sq0u$p5bR~Jg(dFLbh7dX6RP=O&; zc0DgY`M#|@xM~vd|Fz3Z6mm)riwvSp&fMzd%144%{2zt>`u&DvLORSjaK=;nwkX!O z@{8>2W`$I$h@KMm$Rmndz@mgX`!#b!)@;stn^2BK!Ke9`9_>`Vx=t-{fFS^jP98%d z==_omlOZ)rEiP_9w}tl{NlcR|0Zer$Y0wM|Mf9s;Rc&kP!Ur2`{0r>ET6e)fjakIA z40sCp*pF1uWAkD$Kt-8_1*$&p4X>nIJ#h=A>=c;S`hn7~1?2n7I?v)DZ_8Wys$~PK zNP+VvTysLc`!$jDdluRC{k~wLYB@?JumQ%Hq_3od7(OLra+}+Z1n>q$+%(SR`g%5o z5^ku(0hAVd4Ljq!Yy1LM+WC9SRBdJ_LG-=h6F~&3&xGp z+@`H!x1MrOR2E2Y#5&T|8|h$Yzp(!`KY0164=zu-^=I--1rBm#luAcVq+Q;z8nc}4 zZ;bpg5*gg;gHq3dpvMjTWD|{ue#N^fWmax}sGL-Cdr7JL7!x7OcAhMnb-4CjBNRq= zPLvqLPTe#O2sHtod*X$LcAcYvef8+({ zbk$ZqtSh3TE~gj`Gse@6DMmF($r8^k87c?qSJi?-dHa4MHiAmhv&f2eQX6+Vbinqc zgTUAsD#y`8)5gnMDo5;chawHB9vDdBRJkYajgaV*LC4DEur}r)wGYCY7g2j3r|ZbP zx$&QKB$=)QFR${I^4@mc6@xR}lY&JaG{&47H@>GYdMtGJWAZC@Ra=#IzzG~~m+$o| zsv3%>>+G74T?A_E7>&|C$1%qHKa-qG_br2Jt#LR`Vii&+$g%8Ly4qW# z^AZa^w*0%5O;JTA1rDj7s#iazK~qkFhk|RVhj;Y8&k*(k*@}sERa1q?<&HB`CSMZ+D?k%6>C_mh45Ez#>t8 zt(Q*px#Z{ix&z1tsswIc$Y0D1slp`Ga{mx5;m3GSvl1hJ+w*OOB!Js{cf_>xs_#Mv zys>a)Xt`tz17>@-=Wf3kiMfPHD5Q-fR61H`dKfvDehCbsv`=J|>+(Gq&i_0~{`&xL z?a-STpnM-m+fNX!TWZ7C?yxC9F3ofWZRjnm zr@$WNnp2btzOpIrs!~*+bblENGLKf6TRhTd=f$8g!@1I@U33||oGT2fggU?Jrf|#L zLi6V-gq=0-l)1UfF(@2tug9kA!{_rnkYsVtULrj2g@>lu4C7!!^JE`CC@ja-(^rBo zHaw#*UU~{b(&dmrFMqURmyIkSGQ(IgxO{VlnM&vU|uB) z;&E}ytDHpn-oX-oezR}4Y|U5}5c~d0uj)Xw<|?0jB0lln7{y+~FSwh4-GX6KE3|BTcv&IPIt{I`4}+;t zr7YW{54v#7kQaOCxoj$?){C*82!*xR;5vkH|CqK!`6QT?(FvaqDLT|y+d}gUozIms zfeuQTC<^*MNRnnVtG16GR3$HCDS9yN+VaVmLO8W5bEAj?N+1LG^W%L(V&^g-mDA5A zqX6%Kkm*pQ!m|w*LzLs{GvHojjC5m~IugWPL(QEv4BuvUxtwgT z(~o+vza_ZBy7OJ0+2AcuG^sTZoEN{&(+l3?pr87DJ3FW|5Mg3Q#fuPpv&02xeF+_L z+Yg1Q{)54--v+bHEXsQ&se6h#p65W&$gy~7yVUgw8;f7u!bgyqAgJskkZx7~0*&ot zPYHL1VF$#ynYuK~pKU{K78|0j%8!m2LM=G(eazs9>x;VLq`DfquUZaVE<+4L{;C1q zyLkcthU11VUr;^1op3aI@IS(gP3=GAGAWBsrL@lqYZvGdcIX|aDfwS^@{AYPsi?-r z<0XHk$+$#DxV+M##96)W{OM-EKp%O3Cw9qY6(qy`Y4kQKYIDodVn;Z}qcF4*3V6OA z7-IcQul8&&Fia)?-Wso!=O@}w{o%nxdNhibO<)kt6J}NLRpf!}ZSj2miZ>bTf^0z4 za(s(@-r(O#aq!Je`=5RFwVUTX%r4jrvEPF1WNQsUaCXSEA-j=t@u1`0!eDd>4$Kx= zluVcqj1OV>TAs47bQDCY-V{K(T6*p4l8GIND2LC+zYVBtEJ-SHRYS7~jK1J?(oz0) zu=uSKJ+OnYn%y1W*xK3-Ue_V&bQGG~%40#!=|V94T;uer_(OOqO(~}SAfL8f&D&J@ z3(;9+I$#8R98XP7aeR`I)IcLzPPyhA_)IU-T`b3^nVPk;xyQqU`=yv+VewVDb=4z2 zF2;AbHjQX6Q)vgEP@@K=#ypaZXQ@ar& zul>G2T2L?BX!)+wC88pL-5?m%n08i(;S^xGw&dBeA1q3pmm?3Jj|`8a-i6v^ZKswb z-HhM10&Fv0;a33t=2!yvbb$8L4%N%!YWpTj9#qxK*wZ6snE+_^=kPS-6k;Z>w!m~k zOk}+A9dw{Za!udn-`yGg+4ElV5EeUwa&bNk3E>dZ9rIs>*L+YR@gjP4#OD>Njy*qh z#jb3qZc3%Jks`s2YHbSeG5Q1=VQFYb>(`oSb{O!8nkrXxXrlH~Hir8Zyn zFblA9p$On_YN7C{f9G#?lNj65jXIimJ$7|Y9eYUv#iP!3`Bc=l`dMsY#=*w=+0L!4 zie+WHJ=BytyvM`p@C1XJua%)mF_R@T22;XHLgN({xHeLUABn+YDi@qI40mS#n#JCw zXT(-@ovj=6UutxM^`e%vJQcTsG34gIThpJfQP4X2y%VrzIE-7TAZDoI*7=oRG{7XB zeiQr-j6y3Q_smef>u25**Pyhkp%H#*-?^z#O;z61rf?t5SS zA*@Pi9U`N0eTSiQL#teB_UsC!FzsGyR1V==^zcIY7b?NSW~h>h^IKK4q9&#UMBItD zDO)i6kE*q%6xS`Q5TOb8EzzO~cS9@sVMAupi^KQ4GJbDGSb15|z?k^Le`#m(z6UYk zJsDwll?TnNDUjhq7N_)nzZh>t_U!2j!acKWZ0Zxa#ysm;f3^ZiXu}=i0eI?+yEUCJ zwvTWVr7=Z&5Qm3>&J)ZTl3X2S z0i7INr(LV1qjh%@9B$yLOd8LK$^cm}Ou3V{lAvQ^rJ8}Sj6g(&TFVj+k)jGHRSFVe z{H);PlR(}Ovmi()BbT*OwFUbuNWYxW-?i2N6U^*O^_`A0>>o}B@C==zp&7yBY@Km% zwoe6Iw7yA-zp6GPu#3-+x&5eZ)`jfwDJwrpwXhI*tuUU>vS=Sor3l@>=57N(4EW_s zN%;jBbZwC+z9sF949KKS6>3Nq^AU27A89>C&%s`8PNR7QmE!j<2ql6!mfvDvlax2V z)$9$nw4qIgOp~`BD4niUwuL5GrWs7Zu>mQp=j;Kq=*`GEw}Z~(ntdo+d&w!HDiJTX z*g&&wmARrE=k+&tB*b7XgSp?FjX`ef%PUJPvAX@c@B3@00r94`R*ZNlw0nX)9-W#H z^-1Z5dH1Bir@gjUcjLHnF>rkw|I=D^MoNrd<7>$2e(!Z;wLRW^GUu$eDcf=A@XE)+ zi+TgyEd2^|m^DMMQBW|+LXZ5XDvXYtm6%Sl!(9HLNxx&t+dND6M6_x;uE9{ zfOOSrG4Y^+3*Fa2KQeHAogo=#@Lv|+9&o@nB&8xDm^m-i<6GO@6f|DbY}&2bUOtLt zE`R8MTa(d+_HwQqryJiY$RYJBFyvgSUfPQdL9RwAU&bo%&{=m3#;ivVkz96u>9Zk` z;f*O{l$@dv61kVgP#E0rVHWJKhsRLRAc&F$x1DPX=u#AY6|H5v2RXkmi{Ie-#NIiW4B*zTr8r&J{sI3vJ#R;qpmqX?LVKaDCree z(+vBZf>gk**D9_~j2B;g;!IXz8aLJSdw5Zz4y#Q-5`;#!3HnPwT3}OA+y-pz64cU( z&Si|Xzz+u%RHxir%Pl^~4($M1orGXjNASR`RC|RBlt=4X3M5>rD$s0@@O8`^&KT_) zeF5_;R1AsDccV%4fZz@7cDwny9I(!!*uHybqWhPWJQ?WX0UCa^*7)hdYX*=w=e=Wu zw1HN9eMV9h8-`>`$vgwAY{aHh;(LhB7ekvxN4l>@Ty&SJb*#D?W@o56n%}1ZA_cp# zu~<{l9lfVS(l;$s2&<e znK&()Wu~Od5^7TTI-kTUdgWfHslyXf(=cTVyc5a|6U#jRwDf+SS3X;@5S6p~A8X|O zaY6?YU*ewmQqP=-kgg38v8qVX8zA5 zoOn9N=O%5YZW%N=`Kufr|~Fu7gn!N_GBuaS?~_0$(}kp>T`w)244`6cJVR$p`V^ z(C~}G5^hn5091iV*q_(_1&ECik&=a8`|cL#Z*0V;HgO}M~~wh>Sc-SlUZ>{5BD5QO!r;;GWKC~R(93Us2dmx>+~5vdZ-BfdCmK> z%@)|=MMg1GBNg!Qq}%*G%CRd1y0*~Qj)RHVPk@k*IDz!ddzr4POGVH!Vz!Kfg>(nv z_bnN7E4l?fMB06M)9BASV#7zk@KJgWDH^rCNGkB#%ppvRI*NjId5<#t-lTs|EFIyF zb_zeqsN1Yb5DAYeEGRV6z!%Oy)5|=Z?-*;ssNybV`c)8Vy&dDi5>ZYEKG%3r_8f)=X$eni2pOc=gTS`u=Z_6!`?D(Sh0n17uq?7$aNSB z+wfZb3Z4xZ0XpobSU2y1O3`S)WQ5>lEDNv?moyk68pmAbV5%$lUMwc5;4N&YM`(ce z=jCYCMY~NoL{fAIq7O_V!N-K!U;bs+YE$R@s zvbfVOot43Y>Q|>k26|KiTh1Cu8s1*~m%1=&MPGNaiEZ6^Sjr`&G1;UXR((#h^X}i# zfHnagXN3MA&9Id<=15ZYJq20v0#RQYDSw4Y$!4Fw+Qr0E;Ka3Ae8&anSpf z7Y;U3-0~KTllPO82n8_cPQry!;2fNLB(8<+^!(@m2>lYPaNBn@f;xz*#Z9i^TDK&u z_<}5)#GEe6h6igb*leXL)~wJ6Vo}2ZIPQpV*8^3L-YRF%Fdts?et&T;R}Tk=hY2ao zq}_Z-j$|y-1STeEPW%DfvH+0&R`y5;sfrV>$3n?x&o=vq8|}Qr?xz8Mo!Ztuzw9N$ z+%i8dsGGcSL^Dgp6~e=6#{*c<#iyx7iE06t&A(#{$7zO*QFj$XIxRI;bW{bRq?XbK zjpxZwO^9ZpCr_EDxA0?zz`Rz|b_YdWEb5hCO6J1g+n?3~2MWhWChu+H<7V%w-7Sm< zc7*@;uuprBD~D*F6yv9&5HAsoi}2%JcU7hx2r- zye`1En?4daYp%s!-0x8cjh2xWHrkU5dE^HKe6YYF0K%N9on2+n9ZxQzAKn#`AY_iH zZXk(Dp0`yi3=lt44$*7tzo)ed8fvk#VEOw5RI%;jSL~mHk;D%R2*WIq5uEAU3*=67 zsSsIhkxI~4k_+8$<(S%TioV7e`7lQifOQg+Jf4Zb?q<|0egbK#|y)_FmOu$pdJ@?7)E?Tn1+jXFWx^3(9lec~g zZJxJYvZa@&dE+{GzDnPfzAhTedV{+^xJZ7Z>2>0fGE-5wPkrWJzFT4ZHqr@=5&BGH zN`iGCII4ZM7BUn~#rUz_3m$Fkijh%TFgH&+LXNLZo#-!A@)OpqnoXTVL4&H3<~dO0 z-MrX+H*lY+le40eY3nE?lB%pFfv=v-uD2izQ(@m16H&Tm|M1t-n_)hs>bY%TK`KCc zP9;*>P1oEsJ|bGpW1AZ8Q#o}qh$z*4{bSra%@~CiWxDxm``M%QFKTM>e(`Nkhy!(X zxqnPB`MV^cDLfp~nTaBLu{0W>>xm6^E$TdMvdmnW42iDUcNPRAm`y=E!e0E^JX3m- z>R(nNRbO7LFjGO?O|k4;;%M)0t*wXgtFQ_7FMHENKJz%!_ru0L8OB|Ib~TGu6zDbje3GF zST)U;JIt%;ahUO59EwS4 zZyEt2Pe?4&ebSTae^t1>)dQY|>3noNPW;>{=iAH}1vA$WgM_GXETiU38E!OElt=Ya zaBGgH6QZ+$6wLUdlYo+!(f33PDBWkIFTUYoVc8<(Q0LQYVUugxdHhAgY(ilpEIIsH5V;qVt;t~!N-OyFP$%e;xyjBL2Tn-a|BI?tVRac$ zcX;SP!Uy&RU%Jk|M?^0d!uHk!(`$B2DFH25r0YIPvdbw*pH0NpqlX-@ z&UF(5e1}#3ghmI28V@lL`|m~n<{oFAh2}jV%U^WOXL(pHaYappZ!*!IiJ**&UpT%I zv6SX{Q3H2BKefnfYEC5Q8s#WXNy9AL><)9VKc_D$V*$xzYUEzYMk-|gUKwdF+TXHS`m}L;uY#GWThPWR2v*Zv6z+ zuQD>JUBh<{2DB;5ejXMipO3$Hofbl**t-E2#~a+xOz@*cHj}`gh-EKm7l6PxG-eJc z4Bo2BlzAPeG&@Mt(+osXF)=84dMRTn((OHA9F#fNRovq(JFdIi-TGT;vdbqKTbg@VWtt~`^Za1@ZS@lNGe6_nBSOW5RPwdt2}cw z(^r_rWpxmX!5%upeug8@#{WxbO8XFQC3z|a&82LPH1lGy3a_Mm3U9eT^GcQ3Jt0X- zDZV4AyRMwkVd41i>;wZnoX>IBgPh|e{i%=1kHPd$3Q>K5)ls&tzUSGe9jFR$Fou0r z40n0>wvMawmPALqEDYW_U}?4^n?<3#{uApO4S|R9<_Vo~ZV{LR*kRkc3XmfJS9T3m z!znbS_dc1ZJ3_%Fabi54qy;$La_JZ+0?HPM!G0&4l$6FB;xNcJ)9!1)uRfn*CWxk% z0arZk-(yjpi$5(S9^RAyfoWe)y%kHOW1K-)=DPXQh?|)T$g@0@ZUT{_#$F#i@-Cjo z(@YntLL483s$y?t5T0UCDv$OHkEMZ$YHnO1HUO-crm;T1#G5Wd^ThJH$?_R2D9`h~ zH;9`n#PN+dfkWb!Y;5|+gw$18RNMJr#GgGD;1=?yfQ~!z2YT5NoM zljwCaZ}{IWw89FongWbw%`DKJfimX2aIENN3P*f|*4w$?W(B+1<_=VvCTkzwkplNk zv^gcuTW7)~^1_ctWAjZe1aH|Rp#_Q5_`Y$tKR%F6)nOUo`?e5ON1_C9D`@B2qf)=?9 zU!U(nukWPkNzXzi$-_#>ccISyL(p|A*=+OzqOIG+Ed1aPgn)I?Q>u2Ra`Ib3T+QHI zzY7J6hIlkdT`ZlG3$G@ zsSU2zO8b?uN{J4dH{7Msy|TXBe&F3O0E?vE3A3S5@5Y{Hs1_;*bjmuQ3ynM&tT>=h z?iu&3FMdjy)H*vjdL4K%&j66fgy-V5W|!$Pp9XODb?s5dZ5CP!_rFOd^o&GmQFYnr z*BY+dfVj;RH>t)A{Xj#8D$$QdNW9t zF8UyNQ(qLdbRArZ8~lzOzzMrEZ0`5tVoCh{v?AG43TFMS9~MW26^G`axN~X+->9q@ zLpuEy)EnyEwz>f4oqh+;gYnFhVsx$@2$iVO?<6@78*`-gj4kEQ@vgGZgXTm!&sVrW zoMBIhaD0Kt)*R;7;wB*gxbNj?AiPp^Ss78z#&$@w4K(CEB=n9z)}xNZ(*25aZ`oTT z;2CS_Kd+i+ze>kw*CdH|(94zdyV$gLXy<6Z$ADNyRC;i7LHFJCgw5;@D)^}+sn{f# z#ap-$TItdZbwK`;STY}Z^KRjVLrbHeJ?<(-i1t6b?Uo)^{8i5BMM!GD4HSq9rmu`n z@oxM461tUyP89JeH`ns2#N0=wW?a-1A%1O$+N+qMur^ zSV?do)!JSoxx?j!{-d26436R1(8$fZ*wXvM*QkDceX!G7S>`3mD?<;38kzr_e5IN@wakt1#1lz>$X_ixw;!RsPgRdf2}-WKgsfA|obZucFu9l^WR(9I~%i~E3= z5Oav{rNtX<_c`NX0G!?s zzbCZCPb0;Hk0%ypoS?Yh$T_l;rcYJwMuRC0cG=r(_Io9-WFcT_+chmohGD{)4tSHz zf`9G^18CMF2ueBeAKTS1VFy~*T1d!DhMlq`(jg-w?-k=H&HK!(4+-wON@6y)-`|G4(8qf*r-Q!s@1gV@TSm)4M%t-F3r7q_?ODNjFSsU8$ z+P~soQBH(nn<8)tGjy*lEp}1jOgl8_O|}g!FJ6-Wktb80apkF(6ETJh>C?+NL>Q(< zZ3S#Fi+KHZ84RDbup(GB5_&F5j9DmBaXqwAQFFUOtfM9e>lg%29V%^;k zn=%#t#sNZDwkVH+8!RQaJhI?^@w5QaY`H420`zVSs8nt!1; zI`blFimz8>-h&H%L`Uvzhw+CDg2uQ*pW_8)ga^-Z&Ftw4{~t{+%IVL$(A1@y9rRH% zJqvI4&2LM-s;um`dBL)q1a9L}4FV;XEQb zgAC5Nz#(M*Ak+wUYO5=7kLpL@05=@>fI&D3?$5>>2nx3EY3qcR%3-vO-z#?+!hX!i zhxAZgg`i|D`Xlw?zM9aB#CT}SfNT5j{d=$MRy^y@C)8w|8Xc0~m7nNoY5@EnY19|o z-Xz0`jxKpHsF9DQ8mB>j`i`UIQkHvmUX=lUL7vheMp*%By9U(E5l&szx)F6rg`Zo* zCJG>&ywu&yA!^)Ym^=Ug2pU12%0(3{pX4ncJNLr_t7b}+aWO0XDt3e-Oip*YVlKj) zWqS&R8U}kf`|^@ORI^#8gAah|S3m^$VN%Fv9159 z{*1O9M>`_=yp_j^Y)`3P-kuN`Lyxbcg9+Zv=hErMb4ap%fyLvy;`vd;3e|e;jk0M4 z0`avjVJT>~v%bQ0z$H0(O>NFQN`O?ZsDT{(N=O2hTqmnPLkF6Bo$_;``{SXquooT- z)vbeIw$EIXX=Kl0Ww4jQ0^PI^WX!CXxX}t=OpM<#*&Y9q1(Y8UX@qZ%KTL^&_91~g zEI(U|BG->Z2M8J`(w-^&aU_b$MQ&0MfWpT#REafxrJPh(^fQGm=20%q%P~Q{LaUND z5gqsPk#E31hXgysmV@g7(aqAUlS#XR#{+-ab9&#N)hLbzc5xlhhC9DgCqJm%}Q?HTqO z^wUOr^)nXU7>)8?ctJd*Wy0kOzn~(CAK-r7UKrfOtt3P0DEXK646vbV$m?!3>12fX zDNO}x8^dWL^DSz0e}D$71#seD-TU2Qk0?BNgKBAjW8FLo zy5LM9RAnRO`DZw-khqv!FRPa34Y0sH=5MV26k6)rN3@}+P%KqtjElR1d4Al8_~8Q!Hwil%+UI zksxUXWG2!2tZk7p(}$A`n5%MguUPsHU6w-4uYrMze!pOktij4L@jw76ZlrBW>)jOl zA)xH5`g=G}q&Dl(8HHi|=2ae(V6Ty$HanNqh}_*^o)GJ~nw;hi%P8Tt0bi&>A6-w& zNGVqFKdl|o#r#!}Y<^k2yelM#jU37cc)n%O^&LsL|8kH(l_GRxNZzn}U3s9t?z}gF zT^Elr<^R1Nq_$e$SlV+5gPdEbSh%BWE7mHkds(E<8?!)h&-+ME_Wf9c{U3BuNk%Yu zjo=^xxZlbNWLMJDh!b8TyJA4t3<5N22A-dkkdWf*z*F<&MyGkQJO-X8e{n+g)|UvhcUfoZ(>xsZxE@Agw+$*sBRuXN$Yc+waryC{wk-v^LDlawt1h zxMRzZfJeO%FGwmm_QK)b8Ec732e1J$kFR)33{!rOV{nw(axA?fW8a=XMeM)a7VtE8 z69Mv#*&9^DbvD79E69TSsuIaP_5cXS!Og9!1Zz;*W*QFjT3WWqxVzbB1%U|Al4u5V z*LyNGbIpZW?KqS;;i5g5cIHnBPZMC~YvzUKu84NWZ(eQP1O45)AxrG7`mbljCK+I$ zZ?ZiDx045@b#W!Bl36`PQ6^{3os<(xs)8J3dR~OVIrS2F90H<3zrQS{)AdA1-x-I5 z{Cz~av|LVa6;Ing#8w`y+4pN^$2vvzA(8Nujgt8Qc#9I$ah5R^-O(TV_Hl5XTS>g5 zxuY(pTp1!=r+%EqBqah@56e9?ygctN0>5Y|7y>0^Tj>HR&^koOhdST>24gE}L2bG? zR60O|^8VMD8R=n&%s+B_&%?I0Iz@hbFWb2M* zGDOp##qwNrh?+<#v&M1z@jD0{1atsVR0zgg{07BMOnhR#lv=da_-p(ydmN|rX)d;! z7hzO3)9L`=hs3W;5LIv(Y}?7LI>yWc;b2~B+0Jd&d1pW)9#lHoL%Z=btTR$j1W%yX zA!u1d0w=^Rf`SdZ3xEVfFUQS;rKPDz@LA~>3*vYVL zY-KT`hH3OQWuH}wN#CY~pD!0@T1g2183RWB7E=g$5^ynqFB>gLT=8Azr~%q!-gayy z#Pqx}2bmwI!(;pY@p`et@=CjES#`w0z)IJEh&VZlIjyT!20PUZ9p(vBo9}YfXnz0G zvw`gZD-NVBiCV_y)WuTdEmCJqgGv`<`e9<9yn|7Tso|v3rArDI%S~{)(x$uk^0E#w z##>LXvKJ7x$#NI-Q#d0aOG|)f#JuEZm;8UU^(*T6nQNhw++W7H!giQPj#s+vD%UF%a`?}CoyFb+ZaniuK`!-He z5P?*A0J&wZkKW{a(SAy~lwhlHBCkCEb=g;!(g!-W{R$8O4EB!Gi_(U5$5WR-{mj|FQB76y8n-%hK6zjKzp&J@Ji8YebC%D{ins=;shc5hKTxf z<^n3@OLz2yO?_KlqN5*lP!z=ZirnvEJu5M=k`4r4PGnV+vGOb^rkA}ogn*vc)ebNw5?X)s`mX#^7a>izaM)n zl6*7XRB3HR7Nc3Vr^27$h{qTy_~W}|gq;$j&oJ)|rfo5hu|_{8_@y&Vm65RrELYRx z5BN;~FXl!{k>Ruse)m1(KxL>VjB*n=JDZ=iTnPcC8T^ymq8mH-3c_Xn9HyxsYnUQF zs|@PIxyMn*LB)7!X$r}*@pm3Og=FU}Gm#8#SS(MY(%Sip5|EYIz6{zOcZFGPAp{H7 zV+H}74{j|KfOSfMR|CLb54O$|4}aLELx%(a+e8?k7$R7bwLjRaj(lka1Kk2LZ_7bEADtK21!~>#BfR>zz{*?W%t;aEjeK@fl40{ zv6QUO%mfi8{x^;{Sv))wNWKtXhZO33VPjX$-+(oneJL#Qm<+t)ZpT0XrKXFE(&S!M zZpP4AR~UwX3VT+g3-txO=-?)9{&G2G!7mOmoIuU)Pc73B%E@{g^rzNuEqVs0H!o4p zJAZZV$(P`bZEsY=&KeH)m}CHxnKm&bqEIayE@l=zIOigbiM%JMg~%&FUbfVq)|=1tI9hb`p>adY7 z=7jmobBq+mqRaUfq9UD-5yURs7;sVJM*LOxf6##MEmU>2cq}W0e5P3lQ|Xc%lz?ql z_h3n#8R!cen607ClQ)Q2!KK`PTvCo}p$xIiMyIjNaE`6Et=g-z)7kLNFrh2bz%F2y z<&PADkR9bnERis6q4~9S^YbriC0{_0ocYnaY`czwN$z#n65H4&o`qpwfZ|)+ruHQ) zhO#1S%(>k7FaQ7qbpf91YD9nZiC8I4Qz^tQXi@15U?U4{o_g!}e<732VyN@bA-MY&lmP&ST8!8zRy$^20oO=9K{$e7jigB`D zgdat@{erm5%EpF;>H`Vjn6q6>l9HyozEr|<;SBcG&%opYbxOz1yGm6!BRovgqxUSx zHXm01u}!iOM&P0^ImwtWGj|q`icMdGI*JyH@ZNmFU*S|bd#-|fngurO0Xk@qK<4lX z4lawiz4=TY6aAbgz2Gx}g>h0fAk(BUu%~wzHaXMvP)8qMNbq2VXPU;ue98I%tF7u`c1Xu=gOxXPX)p_7t7i z+L2eTeg%QN=5J!Jb;|HjFvG;D1KbHbdEDbe6g4ot6te`nuvL~)>RT_lVxXjyCea+A z2=bwped%n0_5!A28eBBKbPCP9gx^)U``sIvHXWU5O%M%E4vzpRsL)}LdDT_i_XI7_ z;Lj@y5}RV|=a?p|Q<=&bMB*&-M2A5+x1FA-20xO0SLVMG)$i)!g zDS~2+hA$owUb$V-x}#JHp$g-bz$C#llqJp*d5WZkfxhtuY zw9>^s+Nv|KVzZIGqNqP@3>utswgdYAo{HwfCe*o@00|Jp-Du$lPGh7vn&bOI`vPe2 zUagm6a^`%_Tv#KN*Vg_s^R}zuQHOXZc7Eu^*x2L*syzd8%fdqdynXqKv)02GPednP zp};~As={K}xMiqof7#scwjlQ^Acp?4oFeHY1|}f-YUWd@@c@7ZkXz z6s?X|b78wlqhsr6P`TAIU*qO=3r<+$u)Pzg^H52nilBa( z=+~#Pdfs&H_3nDUyYNXUI{UdC)v5t3nfL0iNxc8|1YcIGiW~ZlI|%3=S)fPyEMuY> zf3J2mIA&LvW@&(MVKK=wCaaQoD;zx>cR2?*Ic2*PjcH8aB9RL^d7 znK1k^{=@cG@P)W?H(>SepfmO%;7EEj1{;uE6hX7O#E-b&36Nz)-FFlV3;gp>A0Bjw z3*7wSiC(6PBqKVC(-{nbSMiP4vDaQn3ULx})#+){V52s;0=e&rcikjjKF_m|>dmSD z-9hNQ7tJAm?rY4v=gTy!$9})pTR2TE7F|?JH-o^#km?(&*gU8_;`PF(G6bWlxg5gtQa%Y_eA_|U`-2NS$O)vhk)AHzX7ay zNT?%<5Og=*nZ5zc#P(DKRO*1)5l!xd_p&~y%N6@XcJ2e8x_Jrv+IRzkt%WOhrm&T@ zKeu_c^ABzkRHb}i1|^j><^m4rA3ptq+^Srn#iaC0u(gKOmp@WYsI5)v={*3glHWvc z{JAf5wXrC35`)m0NL?{ptJslFHP~!^8ZFtoL~_UdhMCr1xs1%Eo9qgLNY(z zw16iwX|6~Zp?iwitU7T^RmS=DisuN%=!C%Iew7K02Cg6bDyF;I8VLlOk<}#)Y;h-l z_+$Wl7e)5;OA1MNae|UfCn9=gE!vGivQ!0Z$PQ~R+OrR=HEr2Bi|$7f-Y5Mji#|Bb zKLQ2%S;$zXSU@Xc^Df;t@m>Bd= z>)t6c-|eKl6LlvRnA8jgV4p&5vwRHaF=ls zc>;*_{u;^kQSBcJQ&C4LFUY@ppc%_>8c>CbiHJ|nnIqCuQA`bDJ;OP_n}{l{odD@J zC6#5glVVaHFecQ{ho5Ntp)oKk&OK{9V&=9W90n!A!U{(_q|O_!T7& z^2h@VGEA=QT{NfnLXgtkGvy3Aw9arT;(LnK4$wWC&f(G{XOYRVQs|{GKm;?2`k1MZ z4zEF7D?mghq|1)fFXzw!j=kra6Pp&I#1*cCWoIoBJp`^TK@1L1(A;9+HcHb@jJnPJ zZqa|NZ!^fyH7iss2f%6S!gfHW&j-$;iRXj=nFYx+9qE897msR`4G4rRU}TvFip}(x z2IstrdRmGIb-GNNKk1_BEiWv-hfdy_U#;y46M`bC?2$J3AQq`2Zf2gFjA6v@U+fQ& z7MxriG%sNV9VL(vDCsY_2LbOq%r{@o_DyGmMyIO!Ja2tw_prVALpHVTxDSTntf?@N z{A}_khE*u$#efu3If#^{K&MvV^KgT$jB;y5`zFe!B)FL!Uup6bUfBOFD+5_<2lHp2 zf^lYX!DMRRtn0nu=Cl>M9dV?iukdM}7uz@gSbJRr_q5#pUwZ4(AsTrjiRGu%1XQIA z0XES2lMPaK-*TOAPda87Rljy0UqkoTYMBXEi(Nf9LjgL4RR&THHxxxEa>=O}r3_J9 zn3O9Ib;)=KIUTgUyC|4uBSlqD1@SlKw9}VIPG2_lX$ZEeHudDR9c;g?ZW>_Ado7Xo z{l7USeivmx$>KR%zaQafE0zOmLvWZiy5{DD`T57J@8rTkgJMY+0mJ6MgJX9K+MNhj$c^|9IFMctg?HoUeJh2g}jl9#7rI*;kr( zu6tYL08%1x*-c^Fpljje7V8#3ZF0ffPXTvsyUp5@)FL9m)=!wTg8YoH+1G&fXR=O5 z$vu<21bp}=#_5h$G&a*I9ux#RKtX7}b`a-Eo>W!+vo?$l{WL<|XI-e&zKgGBvxQ{F zS5S*cJBi1uIQpw430hdgi5dg<&yQe<$Z84tKQ===)rR>8%O{{8XPr=1`*MNB=}=J7 zw|Y?UfZCu*eaDi3{>5j{@#J4)0+T$;9(`nEAa<1C&S%i_GZw;R6=d&y$-Z*TJ_5hc z>erq16?)}mgmaZ&$Lel!8@bCO4$mwcPijCqV)MgoNo}_TL%FF$4xKg$KD;2Uf>cD$ zNQG={nc57n5WK_;N-(Pk5HjGwFe^t%4?!_jok(N<@4KZj+;Q34LmYnWxn&>T@_x@~ zdl@?2l0PoQv@?eQRmsr(CfXiK%n}|a0BNQagvhD1tD_wNaXi{dpw|`r(UBAyOpKh7 zLW_U!9VAdC42{#8KnKos`+(g1&qvtxpUb|fI9pWQy`a)hcn1V8mC4ejP#J}Lt6KN@ z%VtBNBP^RHW$2>$JZ?u@&On@orVgepEiOy^J%l4n*~NyH67rO{7kh|AVLk_nbh(#M zXZI+tQ_$n+iGBtwxNO!{yMJv@>WS?Fi1LtKVc%H342-9#=|ME=>QpE&Y9 z6jEU0x7A(iBVjRITS?F{Q@N|oL5?B@LD1ai0?c)Ty|ilRvhDE>eT+hW+%23EKZuia zl>kdXw7*Pp?fv$J_jfC-34K~qE07uI?8&&vPu^gO8MBot-6)`!xNlxc8UC$Iovd-&uu2=7J)!mMx<;2 zz1Rwr!{{Z>cLI?8Nh@PHngI7LodOd zb_PXP`~a8$d{ijx1kpT#Mikzub2L|}hs0#GpS$_o6MlYiv%KK|Tpl4PYl&;t_FrpH zgnvlXO!oGteDG#SUnhK8VeC@-XH^nef{001s6DMvM7L^wOrf9Du#SF8E&~CI%qzhv zS&U~%Mq~bR>f8>SNYWC*L4j2%ZLdoofvT;j^K0U?JDKCSauT_MPlrm41qP^Hgn{EU z;L|ziwV>Ri?@FTVeFp30k&>a+C5Fzj)C+dtygAA+z(TBc+TSM~F3GCj)aG#Jdv5a^ zsgNStW1JHdLCCml4xCcHfb;LjWYj!N3eE8eG{K`h>IaBl=d`NRTJ?nt-vPSJg=6(6 zFWcs&A4?Hb^IQ&A!m4 z!802V1084>?{!H3X>=R}ZEf$)gh91|nILVs9sjpJo8gFhTEX2c8i^eTw#quJ6o8tk>p*v+A3cpT^) z-~>UiPil^&Q-ou0b$Y6*OZ69CKX2k#12|j7oy9 zWNZ=v{ppQ9>zWkFcO}qWx6fQV?K@5E*Z(f0?2(ZTXuGk0rPu+ce++7cSIz2(M9$os z@{1Il?brG&>G~b$k3g-O;ex{Z~MlYDV;%TMwz;Yt2_Jd-5W_svv*bw4Rvqhnugq{GD?dv-T z6f_X!)t@Mm5E^v;b6yrjELa;km8Dqj3z5=%>TBcPy?C&I42NA-lRjrn6SSm`ygA`> zune-4lwA;b?kzMnmS`2SJ(}vIkB;MJS9+gga| z7hbRQru1msBlE;Czh;l}xks15$CA=3&+!qa4P^M;n)wetTu+AADjs0LKC?J?Cr(?j zhItij@A0hAN>rp#n-rtS@wuIMc`Aq0SE0C|03$F!W{Y?d7rErkD{H5Y4O~tO3kncf z(1TI*j{lVw?629O7uP{5tJt!m{Ped9)DpALf|y`A%8~C2kES(HM9t0WPMhqhK|r`U z=w)4995f$vWek1EI6rL0YSv&Zbg+-Q2`GKwsgO8a8hY(pt~7c&w3pP7$VgQsXmA2Y z10-;PUwwzK8F_tA@oxy$Lb|?+(4~TVPA$hNrZk^4p4ZryRtSx27K#N7BZrdIw>-$3_W%h;U(lPBlib4r-mfjuo;H!wwFP^UXRrfAF?mQ&;8SXbOU2 zt&pEvq56oMhyma~5NF%KTa5|JwMm=(zwq9BW$cOfb?e2G1uS3TGePMz33~H(0F`mt zC6(=Xy^lX};4bb`yJ>??hpB^-8NYVodTJfqUrVnOUi7o$?=wC05DpD|-(`If&Gp@LC z<&8Q0eZOta)~b={uJp9(?Ot4F*P3G@>R?G26drv@KPsnkQrp}KD>|Gxk5D85vrM@3 z##ypZM5AH1$%RH|Gz?TqsOb|X+o6~_s~ruDkhKR2q7)LYksOTjUs+dri{%c`mB_V= z(oZ=p&~zAFab9Kr7L@eH9RuV1?uA-f+-+>vGJ9aMdCgGMF#FNJ5mV{qd(S49uh?hxgsi`YL@Sq<)wFI1$lc?DB{8TE)elp` zozv(_CdYv{#${V??c;xS(Cp)oI=9t!gai&8@8`Uuh}D_$r6_vt9+K=^5`p9=FHdMf zoeX`U=gWW>Q4FVVHa$r)w!i66v5=Vy0x(Kf1V&JqmW8DIY*SxXA6}R0SrK;Iz7Prj zu9juV%~}Di4c{YNLn<`#50#HgF`y^&d8WN7kCM73N;SQf)x%ieP95w(;BoepwFd`j zZEEkn2NA!2)130LoW!g&SJEw37Cul_ujN0dir(nG4plH2Uxs~TT9$(DLRxyBWKP<* zD|JlUH57ZxmpG)n(6LKtCZZUq4*t5Di^&laOv=QyT|#YCTlWDZjVwBgi;-2iEu^>r z20+TD&OobkaQU7CFSd=Bx1_Hqqn_*b1?7ivPo_U-F9u5c|rA6eL~lZ!6O)ir zCPK$8><{L7Ww2`EQ|~F{bGB}}5<6IKLct&-r*xl3vaP4ELsO0%IBjY9)K?06p(%^d zAS?$SL;2d$!fwM5*{4jqBKT7UZz;pa16fUSH%Oqnl#;+%X)|5%+OY3AToBi~(}R6U z3ea&gF=KV-#7|bZmz~+7El*Hc@^zQVfdS-7o5*);<7o?VRKP=DU|#54G}e|X?9m$~ zrz|>uR)nh~>c@g!6XLcdOVsrdAyNB##%sZ>HoOu9x-ZQYLY+r4kW0%O_9KGAUP-W) zwe=|>4rVkZr|~U{ys2ed7AVCQXfb^1lXnkgpW?AC#;VJ7G& ztZ{U_N&)?1D}k#7fKQO@1t^^810fZCy%bE~?f>eLx9Xf#z=mVv1sIl%xFSa9&rwy6 zN~x;pr7qogm@NtH;9U-gy7Io2f4(%G-*`;z6Vo*t^y-e}&Rnzca&K(JsDr&=JRqo9$sZ=R59VtJ{J za}ZU!bmVl&O!irr)n>Q~$I?&JGpL~ctj@p>^A+U}B}`~S??;yR!fmUHJM48><2(+a z%PrC9p=%_R-g|LKMmOJ@)sSJLBO)`36_zVcTV=`PP+qLd@)LmcgpPkh$J3l;haPRS zH8_iLKIjykIXOS?bmgvOVUK9qB`LZUAMb%z;!U6mN*>b&X&LD=z+=Av&|1#IAfBBN z&#Fo=j?j=*c}7wV!{<55!UFW0_z5myG_nyouB!P@p{sZV2v)^|SA&*8x3Ps^L^fs+ zoM&V+50$4v9k&`u5~%T$-!2b&EmjsV9LPGYo?LXr+qW<_@g8Lz-mu~CGi=UAj;fJJ!1t-#d@4VF1w zc&+MYvMz|}z@{T?JptSaLvtTo%C){;fAvJVgL>0qDk1=sV+Ha{l&3+x~ zqno0rKY*BhDX%zD>1|@YH;7DEcprGf25easeM|(9u8yf|b+I;$_TITlV-a!{pu;7( zfboYXo18uDJSP-hmX&gkgJOLRa`ikU^{k|q|^~i=v)xN@vBJR_>iWc@iP`rL|+7$GsxKL`v9M)!S8>c6tLw zo1SecHAvm&XgPnD_o^)97Kg8mKM#_!Q#1X<-I5pD+OVYG)m?IwPtUv?BQz<~RJP)^ zf4)vtVcoVX)cqa)UE$L!T-cpOS%&F)Zi+u!iVTil(~ez9U>w3Xui>s9Y6R`HYKK20 zL8ce(9E3d%%ioBP{MP5Uffcu<@Mr23yfPWG+ETP=hZCk2IGa%U02ef-7ekErYMhAh zk-JL3T?!n<$07dV(%ii%M;p_N7U(N-!od|Hskm(CoN)ozU!jNRWDMnvdY6oMF#t)i zwLv?6s6R_$fJ)MAFhoD;JCg-wWYfBISMV+0#QC~sJ$8^awxR{1?C>v z&I4p0l75V1g*(Syzrdo`7+uw2|H_5bg1{WhaV;cb!hN-e z)mo*(R~^Cebn`7CL=2K>4E_Za60twe$oo5GmCr7#?t(e0Zi4!~wyCKtht94#sWB8z zc3`XNM$Gs%<&sj14Igh1*v;%9rZ%r_-#G`_^!z^h$3C-rN)5-I7CS6T>@T0~E23B2 zu&P31zM?r;Vo8Jto(KqOI;bj0sp{Q9;i~2t31whxTS`UKuO`tMCJZRE zee2?Ib<5{>ZLQ}+aj-XNw*3@X5}c2Q^++`?1DnR~~ls3suCPsgD@ewAITdP9{6ps-^a3P_#nIoysTo2m{mIEvVIbO$`|@OdYgn#R~2j6I-XON>adK&>Td z1BL*RUZN;x8Jd8YIZ4gM5@x0g*AGXK^U|{Y2&s#zWTBYK|6(9ES04dGDjaH8~NRQaYEnuI)stg6ZExK3uskm_Ru{ z=%6wHMC+cq@tb7Q zr@=q5HY)HPQH+k`vdH>@H<@<4ph*t8kwkv)X9ZxI#}-ybWs`iAZ9EZn@ZIDnH10(q z&8$yVwt$9quZZ!aHz582P3ZeP<^H^}#D4qm(z7NCty3+(OXLQiD-f4BS~n+?e2V@5 z8TLj)VUj2AA#8zNCq4qp#dvA_F~-DUb@({~U+{FE89;n6Y!4TdEI z6C4aR_{+8IjroDI6Te3vN|gB~FBylp3Enjd>Vv{2>4+rI+JV#BQVFrM4V3kpba5EK zV=8pi69W%%@HBgRHSkn&DQzLSvbdv+^~?cbY@A0^;E!^TU|#pTfa%esp8owF6))IP zWe?T1qV7jo`I-*@zB*&HF64p`Ox1iDClsJ_y186f9RtCf3HSC`2{sM6SaOFt@noDG z9o`X&I6)l-?Oyh5UsoR|;b-+RZh5G^jfjG3uog?efeR#lyQB*22!E>Y~i`;O{gVCaQ2MT zH%b*)w*6tY)&dI0biZ{8z5Hx_k1GyaX#{gUEXHt&0Q0-^5#?Pl8Ypos zsbWxV0Sm>cAA6A(xae*smsR#Vx;wiYRT^+M2{(mXFx@qY!?D9Y0OKT_2MI7?YOn+j zedYb1d}W&5Iz|`v9kKs6%)T=DTF0Qkn>LMd4exsTFWXM@%?1ec=aB76V06v92GN-n z4H+4?r}KBICVdmOG~lN_3>=+e5#l&Nwne6?{!hT;txAh>&G@`67FlYBP+p*Kk9)d8 zpp40P@gt#3sWfs)ObTJ20y~;~rNaw8nh$zoDo+|kI_YIp4>19yHLb0)d>BS5@ElSG zdC--1yJy&V*lmN7Yat3ZJ8F`CB|xC6PACL4{t}!qz-w_>j)xsj1JeLqCLCUM_5Pbq zN_$z|b)uTpLtwT7&=N0z!)US)kkWX95$n%HZ`Mi#G5)ED*P;aQjC~BIwR)<(>zK`6 z4?rmjyJFBd6T&!`gj(9vR)R#zfY;(+(ege-sYi(3!+Pe2WUju9qn+8$zrN^5U=K1l z|B@$U+0VYia&C^`Q*@b%kgm?0<`_VsNb0jRnimB=_FQrk5Vpi%%Z7KIHq7V;JDuZ^ zvgDEN=MVc{dOa}!KZpd~Ka7eQ+x@0l8K{_OPhP^QQ=Jxt7iMNF z#-;3v^f(eaq%%|F3#CrW6JCJ_XqRF~x(dXT3_$b6GC$EJvBfyN5gb~6Z0U(CSdMJA zvl#WTAb`eH3hL_r3&b!yd zXN$&$0Q}%ONOxOyt_?9}wesvc`QBwAB9|5rTBWS@&WsA?55U z?!(uq4U4Ib_%h4T7ZhzGoy(@V0xBZPhSdt=0wEhsz?_CtmO$qiGjksMFYh#Cngjcy z?cmOm_;zu&wJs1#%~E9*ORJ>7nco8lWx(jb9P`osrjYX7X>6{3vvypbi+K|jsQN6} zM5FoCh3}-bPHF;DON@^yRpa`Xe#L}1f(>HJ^Ee=k6E$0je6jYVm{h^PK_rhc0Itnl zF0R|^OW-UW<{_43XIn#^+dD+|Nq9cHzfzH{@v)HM*VQ8gp%jdnojS)0W%y3Z2ZgGF zBMb5stA%@^)U*U?!!Uin)pSR(`!@Iab$uB=;s|LpEnb4K>onh^VP{r=wbAZl+Lw*< zBF+Wz41`1Ph_GAUsY)BE+LLI(eIwMkHj4{pf~#h!l+xl5>}eO9{>L2A5jkMo$8m*A zTz`{cspu5&3#GE=s>2n2w~Z7nMz7h@KNl@$v`KUPRa&sJ43&yN-#BET=xD+Au_|E~ zBPcoGP<54cy!W!&A}7jBlZY>{H@8$A=B`RC(-A~$uI)4^Xrln#QD*mJRu%RVjMd*I z7fZ-%@O-}?A;w=JRq57{p>Gm4``nD86Ox@Xkj<;}l^2zEUzu0IQo8Z65b3Og^ zmiY1^R>%tj4SS|Q!RQzVzSOPPjTsl90NsN@(L=KA5Nc3`jqr(ww(Y09Wrln;(w5X09;aw!Wx;0zr2E(CccSE>F}LJ99+ zxF@^yhG3x}PcthT{V zPhFkLaNYC~DP`z<))f;WF4=Rn{lv<+&ruA`36mVkLc9^f4~+>$p)IUKO22Z~deG8k zeU_Q%`TfibWg|wpYyfA&Sw&w)6oh1dsHun%Cv@#9AOquqgVEI^=uSPc`NJ0Op{W z<6wpGO+M`P98&b7Z6aivvUsR=yN*)9nvyqY`7Nc?e9wm5PjF|E+^K*0AKd62wyl^! z=iFz$s(%8qEe|nE_;fC5K1SeI5OP66vf$x|Q1nQe{`64*LU~pOmcfkmlc=P`5`-WQ zD#Dq2_9Q$NkVLx1oa21K%%_5swd^!fDN}zEk{@^9fWej0$gYVl)OfS@D{4c{x8K87 zovlx%w1MjPnwa@;?RC_FN z5Qwkc%}sxro{@k-e@r5ekJITp-?3Nytr|mtx;gnt7^JvEW9d;-f z`xQD!IG<@wHR=($^;a}Fv%9$%24rWAJ(WaO^ksY3Mp(a%KCaV8rBS-T!2kdRo&ldN zbWQ*KgG>!{B$N+?>rIoahG@q5%x3ltUD}oTRxlpnX*j3r_1M$-B85B#M*w@ zGKwji&kk-oXnQjcC4v|s5}iy^KcBNJN?0UqUj{v=lNkOMzu^Whq!myM+=DT0V36l; zsgz7F7hmMD)8W6Pn=dA5aD++BRxc!qtVng>qpY~nGz=*EuIAn958P_BB{gm_Ff%@$3OaRnO{5jX61jAg*C7mKXPmGyYi9~cQ9i2p@@&7$lS%!tk zYc#7@y&ps}?1BR|Ri6^a$14-58~Qzual6OPje$PX_`Uf^<|fZ zJ8fr;@u_PAQk6i=7Thm!4fFMjEfF-lC?Aj}^d6@3#e#6XWaFIvdJFHScajlKg2?j1 z7zcI=mgnZn2O^p{?}8rd-BqNy6)w81WHe2l{8yY`OA@N%@J~X!g!ZiiV|Z!uoS8j& z@sSFZFGZ;ZC9}Z5-MkkKxQ-=Ka-FRD6@774w%iu$t5s)^MJJ-g$QB9rDem&vwvZem z0+m>3n3S07XUOa2$0ZDN;Fd%G5VRNpgAO<+?41JB)(IH87?fVHVt0dK=)_}9e7R@` z=Q~JXR-*XZTysKW%LK$_6{5i-35N9zFAy>0U~q+UaC7^aIJ;e9g^i_dcf1%Kqzmi@ zHFucfF<`^_uBL;CD-s5Qfa`u*-d`R!C&mv`?)R-EF!?bvq3_Y)9&TZqmJy$AF_A*> zB&(MD;JDmtX_%~^%!mY0T+%rnXNocxsxD{y{fm(^{o0e&AJ1br^zGg>BiV=^_aUgy zAlyt%Z2OyoNomNWH4f^k9RH1Po@ZAZ!b2U8m>=$5W&^|X{OkZMC5hcUS z!@0Xi!oUOA^d05vqGC>wbDp8n_;dhBHOLsY(l>lu_-IUw8hvTvH>YvqvNa~RVz>uF z>)mDa=c0mUN5u0=&BR)=ZSM+`)34cK&-RaA0Rs~4iaCdMeM$b}0mPX&zqg3K?5C0l zRHUv>Ntcnx4CA-OVaeNU?0M+Ih12VMc(k@iD<-8ZCKOo!00RpFpD$`n|D?Y2cR}jz z7B)*8uoG&ODM*B0+b8HdQX+GU+Pyi9yu6EA5GFgjJgBIS6wn{eG~R6k5p$vL;elm^OHQQy!13x&Z<@Sh2x%>bje4+w{#tp5T@xwf*|TcAk)^ znU0!0>|5~`gJNzBElH{vL_s}kp!@82q-&A@lX7TeRR@qu@nvAhNACt==3@$mr~V&Rc1EB~ARoygIbOUxs3N~wRiS=_ zUX#{qpzgz5&adn)bXX$7B>%UJ4tAPh)#aOCR6-BE7BU3RC@VDhk5oK2*hNI1n5PGnAnHsab(Lx&epM+z_Nw+mX(Tn{fxbG;eyhFP zMdRPqNQQPX)u*X{lZsD`Z*8Vo1lCW>MAHSv@FsO_k3PsBsKL9?$IwkjX?fs92ct|GAIIy z#`Y*jODXjLmiV3fN-NHK4c4I{bi3JPu-M=#dpXL>@Ya*`D$3{$3=NWAbj7xr4v=*) zk`jg^L7%RBAx&RYiXg{bNJ%*zVz{?vePX8gT&X`P$0G9jUOxZ;8OK4JG)bsIY?(|6 z|7>%8mV^E2CK25Kg5L|DGEAF}o-m|$$bQUX=vnsc4rT5rr=#_lzPs)07Ft{QoLixEA;2<@vo?LEEIWsDe^|B_nz>i)Iat;ld&-xy%b-VJG?Uy3$ zd(=B;)v4bIWsq7^&Ny%Aiv0nRt?s|7(zf6e=~e_ff&K?&!roJPET3GB_>~jOMr1)y zmL7V(J3l(ET(;=1p8ZGgXr=p>Dk5iBbK6?{FysAS>?46Me8%rxib|UOF=<+2shNpc z{RCWv<@nk#Byw36b_Nr1EEhKsD-vGn-Z*cuO;z$l^8E1g`z}93-ng|i_1LnLITrLT zb;mv#2J6G3ZV{Lq9)cWDeR{g3?8_7Tw6Mn4ISusaOEft4U^3<^w?o%R z?)7aMgEiDx(hP*fcxnq2oSDDx@6WdXa??c2F zQ+6ZeKBq7e%vVeTf?iQAxV_V0v3t-ktEfplEvHjVpEuhLtxC%%5ecs;m*jGEljj-!!tJ?#bl~EO99(UAg;8f<4C1X4snttu zXROc>-g|oD_#gBkcDNkjFrUTFq@11>q!yBiPE<#wU(?CV{C{h$ZhMjLdFiF>OS?;O zpQrEyKmC#vcdbcFg*HCls8aXDekHpB`Vu0Iej$zyjmtH{tO;C~jiH8er3OWwtEeTG zwV)!`p41n_QGOb&(uz(0MGa6|qG7eiZ6yQDA7GuZGGXr(F5ie`&!nSF$y}JEavV-M zkpK)Hu3IK?^0?(e&7!G|OtdWD68#2)$Vfp4qWU0oSJqsSzu@TO0`8si-4!SsG)bWn z)|7f7LXfbd1+uK_0n{a-)m=vCcHtq(&Hm#X5x^gia~5Y9>yn$U$JdD|Q67F(YoD)i z%57hHe4;oRahy$s$jWS~Q2PQ4SZ#|}qOnE+91pbMdD0W89RB)uWgb76X-anpz);l~9Lx`&=AR{!DfM1rD`UH36Gga*OBt?cP0o>QHfj3aW)2Vsvva1 z85hnrkmjiQpUQLj0wV$9are0`Y68Zhl)V{mo7rtV`?wgx6D_I#x%ezt=M|>t#mfyq zift)f4d-g21cgp>c9@bd_|QXXigaBgnO8)kwNqRV*ZmBXF%x7{yS(of)Z9h?7brfD z-&OKwJXwp^C?$CY_#CmMf4Mw;X4dNlS8CYr9PnVo0r=wcsBe!9`1g_3krsIKFzLR5 zyq`YL{+Q~TA#iCDSUr~h``9WM#xh%{(sv_skQc0dm0Pml#87>X(n`nAoq2s z0-3uAd$3eRvJgCLg*}Mwme~#{k;mdP$nOFOS|jNjsV3pXd?6*6jMM50SBJ0OlYS#D z%kS6!!i)EO0)_rUv)P$y$b-6j)c1EJ4A2LxrsVz@Sv9X0B3|Ohl#s|0IVz^TJ;7NkBT1v_a z!>wAUKc0@4d$kC0M*aCbjiEl4Wt>cazI!O0Ux8VOcp*$Aoq>UC^mN|`n)sanZ^%Ot zqQs5wv8dbP@kvX8vZY^S5PQ`{lQj9^Z)NK+m9!*NQr(ae55(gylzKon?(-gbxdw*s zHY@J~aPD(aFG;|B`g_s0b4YEn-uo;_nq*_TCqcsk6AC${x{QTAZ2qnk&I2)SlbK;Y zYc*Tt7GA*o30bWX--d@u#KvK{l!nP?Pcl~Gyb%?0C)t}x;*zQPs<~pFf&Mh~{a!U7 zl!DP5zwMyrQ+rnW;L8sojPzw$dS@@sJU~fMHzm_uCKCnFP>7q5Ns| zvkN{vg9r|lzJsb)Fyit&0G%4?V%I(-smf>IZkEP5q0-x4|J%Q3urf5!@6>nMeeeJB z#&2z*r`Vw}0%>kK4hNfNxrE2tBaIPlf-8FJZ=}hJ8D!?lM&@6+n`QaR2g{V5!Ps1O zgaR}Ouw#~*169cR3SK#!5<5z}bU-dSK#1-GqYt7L6^s+aFA= zCn?f6E;=mfqisIroSH)43J1g(w9tpVPjAxf0z-B_t|~VQ)SJ5X7Z1Uxvd<9a=pg_D+Y6t$5XQ zVh3N|+^jkIZQq`}3hCe!_AS`Bik5M~l+yK<=ocHo5Es%aT-jj@)j*qDyv72lqQ$D( zhA7m+ECUZG&Hppbq}6eULxs?F#$dDVPf%Z!p*DaW5b3a3QyH~h)&k>LRO>VjNcjox zr+WFpE${;XZh7hk8Z$nBpbLdvsT5#ktgSJtE{R4KVBRT6@k+bYmDOo?9wxXl6Xt%w zn_4cugOt-o1bDA`6cwNS?#EdPgJqw`4v}#5vx#olr~oux z)D2vYVcq}j9arE6v=;VSJvZoE3PM#_IRPUX>q??2$`vCb{5jh+v-X{pkN@Dy@1ZTQAZRY*5&be%^MjIwl4UTrcm)PyoMon#C z!SNUgJWVGJ5tOt~186Imn4PWr*E;PTTmUZwl_HtCGibfTPxbLmEZ6=q-KYj^A+gMl z5YE(>or@k6$e-b>C@Jp)e0#7Z8>#;?!;`7OxI3B<-9hG0Lm_?YnLVhnq{_IswDwc;!MQ zA4~KGOuVb_BaO{6v9edI6{4hn$x|S%jaF;92Qz;D*X%z)bm+zDPA<42wB7nO@ZYm) z&Q{2=tVFy_z?xrvEEYji-p(`>0t)W*EGaX!qd6)8&@jAnqW+}MQu;uc_e|82aaMC( zBjdgsh~&L!!I3%xaxVUQ?hMXRimT`>kG0BcF^!X}?nyu)m_YwQJ-w~ZyIqa0k5~;o zw{!AKD2D`H(9unLt^)!yQ5aJ-O~-z)s5Da+cCzu>Ru97L(&y*mJ{^#E?PLvw0Fp4} zqS)^#B7XV_s7yc)4GBV=pi36KL*6HvI2XmFG(K^G6DzRv#Oq z{{eWQ`!|`tDa?9aNwDIOO>UcP>V-5kV?of8niY{l`Q_r(y~Zn8ICHYH58#tCP2r;G zHzI}F$F_#mnIRDj@1xQyZ3^nAv|y=>w8b(tlVnzaAz3Ou&0Iq@mh(GtZR-}`#E0&t zZ?~#`>hX<5Y&%Qc)1viC47h*5O25U2puzUvCfH_Ce`O~vWgv3$d#afT#CfS>KGvm7 zE~+CuMLt*q>pjX;G3N16#qyQ_EW~5K5A1m<8#V%NNu7 z5j&8joBnfMI2=AVz9}Yt*C&Bt2^e_x!x$BF(Fi@{1@|P3LdXK+A*B6x$mf$^AbA-r zj4f3UgPMqiU(!$5`ls|gKh-_HTtrikGtUOyKQIb>6Gcw46gF7HCPkZV!Y zBXP3*S-7QBcB6iNziPqa!e`**$-Lwe?dH#PKTXD}xhydM2Ed+L^errR0~|gM5zU~} z)lgjUj6JUZ@dRW`k6St3s-Q&wMx+l`Q*hnjVb-zO$l~?}zG;1VF&b8Fm0PuqwS8K+mb-6V)C)4iUE_Z{c^`21R=KxqlW2SRvF}nutCX;#S4{ea z?PJ@J*r75x73$VNXLi3#t_5rDue_-?`?`Pf{JEYXfPCr>e6E_WGr&65_|k_qI%aizm4#IU3J16I~>yQ~of<;LXl*n4ZI%gP9uK0rk$H>~jSV;VB;* zx*#SLc)saFdHKooBpR9@x>DuEH8Xdbw2X`+Mwj{P8AV5v1W=wJ?~R%KjR@50d|Ew>R&6gi^axij)P)I@U2 z=#qPeB~trb8t(F-CWGB;S7YRVY6WaY1Y|lE-1{jUy+{mT{04n@|ID2tk3(#E1QXj) zadBk%E5&iokv0^JXOY^CIKO@qi9Ncj@LJRT7SZ0TZ1S-IyEjxq_Ic!)j(YGGuFEV| zor$6L1Wq9%R-G zE0~>cFt84Y4wVOn&t({}Gi8R}o^TpniIe(p;If>D$@!7HQ3tXWllhM#VUzZ8bqdrD1-t-J7 zVkhs^U39ysA#2$qG^EeJq_*(rMlt6D2nZ+0?1yP8!s5E2k2j&$1vw!hz`evdat-p_ zgqpitIuMDhO8%D)A7!1J=X)lau|;1w(-fhp`vPuo+0 zyK*}@hs2dl#BPd-EUcAL^rDsv^1WQ3eA$^^1ZgM*jXPK^)}Sp~{cM>w`|Fu+%D0_c zf^C(LawJ<6t9YjRTGW0;)9KGD?iy_G+OwQ5&Mw5v)qEf-Zz*PO%c!0sQkfoC4QhHQS)tWfe%^oL`|CZF6_=Kx zyJ0F{%Wa`giaOq0G^;eaI7w+8b> ztYcBl^ub#==gn(X;Bq>QPFo{~l`Ag-ZEs91(5})4yf&C?=~F9{!dP*!e2-PO5kF!n zB+kdgb24)sr0X?!ypB=>bgTk$*8XGfIwMIwOg#EJo&?=a&_NX8+K7Lze5EjHe(VNv zxNI_~Vx$}v0`G^22a-UU=sV{(^-?8O+H|t?_d~ZS#}cLs6iB+Yzr7YR94H|tm;vD@ zO>Fja7jA%07AP#r7jE7OKGT#SzY<4r*kZblwTvde`!3NN>Pr<43{Z1PoM~yC=z8{y z1DsTW!?u|?nv6W`vNSt+DKQpyUZp<_4u2>)zX8LT4&PKQm{$Q>LnMugHUEXOjd}Vd zMaQHLC^yCqH`e1t8e(?^hS}CyLZO#ev?4u4h+s!t@8z<(j0wacMNlZE_f0V{9IZ1U zZ`QPgmi5s!KaY&js`cqXksYrUmEA8Ck6EMm0r;XO?jSI0Q{(aPa}T~DLI1-h7oX|G zX_Skf$TiEQ*)h}HnMaD3_u})vq#4|qV@(hzJtc}uJHgq0yrQlXelSFLu8EIp5VtUP zYWu6r>aQf_LX29JJk(362-52t0P1)y9Ix#G0PPO&fm9b5>+$q49Fqa#YDYQYQPw(e zTK&gVOp^qGH6mhuue%bUJOcZCt@^oW{tR?V?K z2}z&am#wfz&$PYwtoQXJa&3&+b8571>!e;&pmai}Qio3`9;2Uo@bLCr zQ*Fx=u`$tx@0h~X;aD_}%ej;bGhzu)Cz?9VDMiU3oRCLgKLSH?5i%X|p)q^SU;E0R zvjWV|I*!0?XL^F8pC2npj|FzSwfqu>II zn7XUi70Y${d^{&wrEl4JF_kD0XK6w8#X6n1*%me8187!vNkv@};Z->6G`bUr2fYk6 zAu94;VXgCgNoi$AxT;xLq*j85pO5ufd>Z6oSx@>$$OgauXoo2~6qE5{>5qNo8}F^n zpKvjeVyAM~2aZGgPZHwb45g3Z3-x{3!JEjV04SAuJ5QfC=Vy6&-PDcz+~PBlv}V(T z$G9tkAp^H=L1gCgFsM24eX`5u{4*sTsNr^5LGJ9QOSiucZ$UPBYrp^i1S>(GQbiRl zf92+ig~ey4INvPstOHCt>pcG|7#=i}+ZqQ?WeNMW*j)5L6v|YAvAM{K!70zzTPsOc z4AOr`2SYB7XR=AteT0AJ+uvQVZeoN6 zJrG1wil+4wVOlEI)?!)UgNM3$9^KvsWaheIyiHDSQPG*N*&qi%BVb9^%KCRmhgO$K z3-{wfn9Br#goW4TeOz^2_l9JO$|TjO0RNmBwC88?BB6U_P|sfJ)8n4kso9Or~(tqhOG9)H8xjIWM^p zC7#P@%QZZ2e6)wA$T(>hVvB|gJ_`%5hva<_fEcl~*B)+Xi6I1PUq?SP)Pq2c>a7{T zZrQw0aiLeMnFTeWt9$Y99^*T6*aC5%Un-bh-JzSkSvJRRapzByG@|ap*!s22{!;Cf zOTwl$$r1{HimsQzX)V%XfD@B=fz;$K9;`T53j-YXRy+d5mA0Z39zP{@1f$j(sFw7| znLk~OIv=smsclc{&8;70NP^5Q|+q5<$yUJQNJ~Z#hk>={la5VQJ;AV z$ovjgLBw0X(AF^65)KtX!ndf3P|w)hzH_Z``*5+2u(0qwdT}mjj9@;M5sHt#DN|~H zWLomBO+3J0sm|x6-C?W@g~@T)95bzdv99>!RS7sZh6_NG?$O(qsDnML1Eo{_ zL%_HFo|N^}y}N><6~6mwQ5n#X!X0z41n<@2m@zsv3T@vDJH<3srR}~8s9;Z~a-s`? zVs3p^Xo-WIVZ}RTIHGMihj;p>$2}q2>0A>fC~L$uayH*&rIjIOmWPV`+WUR*o8YeS zSrd?QScxze-bm07CejzP;XB-scK`qbJ^`O`bWQ&|5yCG8;$s4<)KDcMR82tK>2a@T zEo`11)ME80?y>M40By~lw7MqSDxixn7!@I_)GPs3N36l+FlVZ#H(_1S7{uNWfG7)szO`V@0e z3M~nO#bOnf#9A=;*$<)gqU`q~LWPm^LQ2l`HQveIfz@FC_rTvtWB6W}>{@<#=@QOP zScxu{lft+HETn>B5*SNA{8EC~e;nWef8o;eO|X{M1nkG(nawyr5R${X3Dzc-eJ*SU zzJ_N^yzdEUO>CoxZ+*POI!EG=xcKb=tOe09pv+(XJT=#b4rXZ>eFSpIX0#LT7$PBT z2)WPInkO~VSEhTk56oo1Ujhlc74zCmjQ(j;S2R!gzN-i00bdTBJ@@OS_&IVEu8SX| zz3@-U3<-4Be)q~p0FM`R&`UIcpz1US_a9^_)6mYKkS1v-kfnE`S%}+^Gi!(Epl>J_ zFzYuZU5bcg63eGU3}7+S9XKC7%)!VPS^L>5z{wg?ziBcd6}P80W!4iH-(_qkpV`la zv?m+AxE7XRSI-f45XgI`B0(1;%e9+A1Rj}{^`h1VcSFsXgB>Ok%G-kaBaM^*BhtF! zzJ|dC&^o$V;G~B< zHbv~a9g5g|j^W7m)4OLD9y8i#ObKl$CaD4g@n#GCG&Z*&-tscol&OOU=(Hz|Vf!8* zkbn4tw69giucU6UJTLt-J+eoBTB!j8R;J)-0(cs|y08gW#1AP?H5VSr6h2OweqZew zZ;c1Vl5KDp;rQCbR#cY|%b0-JOXJp^ECu(q=lYZpAHkYv@fs+|wP8^)fbNtvsrB5# zbsvuSFq6uek|}@7*KVOrUy}5N6$eNMudYw!oDU;uIx%jWeBTmGy>^4u(QgYvK_xqH zw_hBr*>fian~52fG#8M!ZJ27XlesEa|LTDz_ZjIy`frQDHHjdtx!O7Wr7i#$dbHmt zMJl8OE~v@SLm!?F4~`iag*8HUiD#~!aHKv zB(aM7%a+h|h(L^u$&9tBkK)<`FDcP2a--p(lmQG;O*jen)L610C84XYgFFlufKS?a z_3IOaw~Q%;>d7Ne^BcZ9pOaMdg~Bo#{f*KgIrI^d@1Yu2vfCSDM_NdL`fQrv8873% zGpz0r(iUAi-krG~f;yEb)aghM8)8T1WXt8ytN(5Nd;FawaGiFH4SU4`Ai!EL;~iKw z4BFJI+Y8FtzB}eZJI!jNC;y9eudBl-#`;xPMXv|h_U|rdDDkfts@U|SNp^lP2X`|l z+O?qDVvWgO%KVU@IbO?yq~z3w6&lByg6w1tQ~w-g1=oBOX}_=W4y;@=uQ z-1afv5|yo_+XKr}jhmB;#NBiAT0G8hSGm>g0tEM958uv}l@@*DTZ*&qo6lNn(=(X> zUpwzUVOqE=|HRGz6M=0-24iGo@$5WN5SxU#pY~Zu4{&Qkpyj4Kww(Mvk{orykDeF& ziFuc&$12jXHR)ORsB_qeRz-9u+DV*fvRRZ9GD5UTaunK>ba9r<#Tsafr|vPmE8$74 zYjD!gEWruE`E)Sy z*{oP5BGf6RTnA7~c{U=VAa+W)qaegxRz}O0hou?56HxQOCWo%oALM5TT1QfK>Pllq z)E#2HWv-$Gimkh8oFFCnyxj&~DsNWVBub{m<0>~YhOLn101p|f=S)K9oQpUHZ&tjQ zzqnY0(4HoqM3pYB2&Ic3$Qtm*>$H+Qe4F7W*ja?eqaOjtiRaybP8-MV!*46BSf}$kHD?fVG1<}27W{T7|^?}iCgUqz=QRT!2PyZgZC8)Py zS+w4=2u_Qp6`O*~J^#wsKbi8E4sUNsSP*Nu*($1)^q6NcH<;=~^7T8&4qZucb1 zC{-c<{UtbA(t7yRgLKeY;0#o15&Q!iKE}0=^^G_w85nQd59r#^qX>!1O0Fg{=WEAB zj&~ulx9%DtKMht{Kd2_p(ZZpJD3=yX<>FyNK|tqmNChWZ!9tF7Cn>@?qhS?Se_Uyyer%)f z4iE+dTKpAU4z_O9$iUAWpUJ*Qs@nA1Qf=abtfzGQE3946TPrzDAasFG*(x!Smt7^f zX(gLN2@L?T+5(HX3p}x*_SIg?QH@i!;i!##93cqwwQ&GZFyQg;aLT<0`F)6{{U%HT ze$QR2>m(xz=fqJ`bX3p4lp6=%04Y0J2*5AmHoHbn`#J|5$IWs5ivvMRz3{X}Wjbu9 z>p>>UQ-U2?m|3*m^(ncMU8XaZU|i~dpk%n(kxF?P5WRN?a0PC1V{*ImbXX1MOIo%8 zg~tf93HwN&bu5Wn7cJMM@tU*Nq%{TggPp)DL7e~r3v{{#i%tiS4?hk2LK$MB{s1+| zR$570*U(UiVL3YJ@fJraIB$9PKJ8z@g^|udYZrLzCCw7m7W*IG2pH!@2Hs1Wd&I_D z6m_>&45@E~MT%QnkXM)Y9{ZidN=wm0r$8ilV{4j03j2(92O{)a)>|*8iMEsR&RxTQ zAlZV0!9X;VLjGLPXcvk;$45B*={&ECv{Vp*Z+%(trggYE)zIH+YXnJ%uxMJ<_$Tq6Dc7w zy!&ct)GRkpw#~mLFM6tLX~CY*_U|($%E5B5s$I&?lz}#F$ioiqPDHPbTszz05DeWq zv^fObFkHBn%Y1=vENsi3N2me3GjHf}UbS(DH+iF3*@>C?(ym9>qaoinURNSWr)kKF zwWS!Qu{JxHY{9#R*^=rGMId)Tp0?cUmb!lY(r~#Iz_|u`pppPC)ipJ{t_D(A%{^&Q zoRM}V=9CO`^Go`hYYKoHGU{|xkKkaSk!TbX8LCPqr9>fSUb7hL>CeU%I`#FDc~5oMXr0k3 z2y`QmorC78NhGMb0jh7&FIh;MfwalIxW(=5#5L9GunKJ3Sv8#%VMim7uo+?_aho9` z)@}x$;LVJ99d&DobCn7LHiQ89pnVW6&G$hQd)uO3uD{1yFwW!}1B>|qYBWQ@6AxKB z@g3I&?zl<1v1!$L9CboxroBPh{9@u^l5a~7Cw()5X!XPn=X3^XA*~#WUd4rqN^MdOO z+0*lXk?<6O0Qz*0mgLaX;2r0wDRR@;XMv)_PWZY*h!YFTE_Jg!+1Xct>Ko;D4Jcd` z*gooMeFH)AR#r9~;{A}k%x>ucU>SMFPil}uWD=8wfzq{0cH}a5=Q#Z`9uajG=jiY) z;Jplc_YLvC3UoQS!?(*<2oc_DQ?q(((#{>sEBE?pa+yT4b<}$HuCX2yAM|otTYvW! z*pEm}XLogxLPg=A@zE?E(%eC3qWOP;`pG~9nO(WUth4>nI5F)h%CsidnKRxD%!{n{$llnKAmYh*(2#r)Xmx;ry)C< zC`m}B!cgyA)Ku>Chu=qN*}Dz%XM)JI<=8*h)nP=FAIVuqy>w0`UE>ZDjMwNp@lXgf z91tyIwJ5>D>y|}hKa&7OuY(k8w%I9)V8Ongb>S%5aKU*@m25kvFZihf-* z3fn#9S8!GVd5S%34`k~AB1;_#mVRglZH`yybf-N8VM9E1h!1lv#OHy}zg$oCocp^y z7Cyl8Tk|0|0K;)k+_jB4c^W9UM#D~1jvv?G@~A`3HkL}Ip6Tlwca>4jWap5Ejh}hj z0nXnQ>a~&;iY!Q~3>#|%FM5V}CG6|h&s{UGJk(spCD?99&SVpFdX3r4>Q_*jF}kdp z;*p-&aJlfq$`E~Zt)B{yZ2==$HI|=9+Vi)easYkU?mI>2-7DJSqstZ8fUmNruwZ~H ztPD@hbsnF`qS6>P;pifRA#h#Qw^2*7OE3FP>IRP*N(K%fz@ypoNR>Bcqf`G9kQFP@ zYMtU7tq%4?Zvy&)uE0MK^r;grjMCQp&0>|cDSYHUV3wu82YU~nWOJT}HcqN#^`=1C80So0yU~JI_ZUj~(B0iri^`VvL;ObZKeR1&u({0r6bZ^$&JK2rZ9HJSTY%qe{>kMpG=} zW*m@Yhy)RV#XP+Ur#;I?jjz4cx$;Z+q-ZES6GygbT@7-_CA|=Uu%a5M6bF|c*z?~G zEN#QraDNuxRCZF)*{G@ZJ&nHftox+OHooFX&cl+V%&NV31Y* zTxFzc{Q8Zh;RdMUKc82?JyaXudnaAnCEI(z3UAS4xSGLF~mKIqyg?c7k_ z_*IOU&8-4n{fD56ck2mSil2-moDHT3Z7}e2f9+G8H=ilr{l7F%@@8EDx{>Z@!Z%}p z$}UIF)#F;hS2(h+L7nfKAlZ23!tRgu6sfm!*xPWtQ{NP#nkFPZ7S@5k*0$C`X9w{u zL*>;DM3YB*#)axC5_t-1MPM{;Cvb^+>6_nB`bAZ6?CR=ah>XN1sS5XLE?|TQ1ZipR zLtou<`gA8oU1yMoNG3s~4=5D_=y86n;(dZV!E_Kz!R)%>}eEUVVlGn(%~%_S&^m5)gH zRSsjKDuU|o%6`9Jo>0%nI}ECra{8%IAt#p!+Ys-s{WecWA2kGZ+^`$Ycxuf?b^%+) z)_~g7I;Pydcttu;D>r3 zr@ftJqw}7bkR1nkpuW=fP5cyi?IB8k16P3mOIS+n{44}!q~OO9jO^r)$fx1jgj?52 z_+}Ld^%oYmEv#GtKgvg>t?nETW^wlCs;yot(+j?CVjuP`AbOo)+0D;FupqF(sj0ng zM%Ov#(n$}@$Xe=8Pm+rr0=JiK)2jqN7RKCP@QYm+wCu)k(Iaqa=P076`ThDsFYg=w zt<5S+nr4Bzpvq138Og*_;Z8>b`<30pD&K0{nru?WNoy!CiU>+XI|+r z8M8%gE^lw`$+CrtGk!&*{M?D2AdZ&^BRT3g|2d~}rZJunA7UZ+{kZcrTiH&~1F4E? zMSZrE8${Zj)C6(Uk&Qw1*KvPSB9SZkWq9(BV31yo_;G#_&3*WL?l}SZRy}8voaNIc zL71Zwg3Fnatb<1}o>HT>VoZ<5?W^mJj#S!)&n@*oN4S53q&N5sBBQ3d2Ht2r`n^BJ zqZ?YKg1!0w?PagCulKwzo=R6DbI?_J;~CSbxHp5BuEZnq7FX-CBxSHPU{=dd`!f5* z32e=ay{MpPAz4+gEH%WW*i9BV+qw#Y%`N(e`zl@@rLS4As||-7>m)MBpJw`{`3f3V z4|wFZVznq=O=jsOx@h@*{kx5_n}fj+N!Ig4*dW?+kD;1#ImU;{*h?+XE6qS6kSJ( zqLCPLPyM3+RSZtCWWfiu293Z^ILGcFM=J_SZ_#fl8q06fYobw<_bsR8=HTVT13PYW z3@lvHHwtM6Ro0i@frp+xfDHvD<3WCGJ?1nQ4ksY{11|nq`Ip5@!>wEGMKW!3)hsRa zl&A;4-2QLCsNL`ZYOR-`ytg4vHtrowKqn7-@64XIif4b<6et?Uz|m-08+57s)IL45 z=22oAlCml~qJFMsJH0OJrwQTeu>0@?HxR%0#JtR#joZ+iPNe6KLV5UFx>RvVs;S~3 zPC*C(-Ss!N3F0u&m45|LNVpq>icB<$TnPscRl*s1yU$OA0GAFSYdJ2-LvE2n8&5_x z?D7_ehpA2D4a(YLL1Gk+>IN;RrLD4>Ph}4GiBwm+yt+YZt`@}vHXRzE+RiZVN$4A; zM#HBx4AxL2$hT9apc|)xQ^Y>@iZuud3X_*u8Ja@;aLXv2BvWDrK@}SaFbV!tYlyE7 zPik;BOdga^LRB2FPQKGf^TRp`O(w81*I-Z59Cl$Wk~>+xoYoyDiISl+(2G3bNbT1rJgYxtbO%19qE~t@a+Lr+YAf4I8x3bY+0sYj1?fryBSr`H@w)WO!YnIqdR1Gcs=PB1(f4!@Txq6A3~ zVl3|RM%qLwmcT)Bs^%j}N-fZy!smC0fOZ^}reLro%XUS8r81hLA!h=fUyNo30e&wg zgfO5&C6!IRn`%LCx1p7n9R4UU(XSt0YduD z*Dz&aUV-uao-3J%F$_@X=e8|J^5{uKPrN2av-D<7`K4L6QqvhlV6*b0bP@ZB^B<$S$cQs{asTNkP{K~L7t^PX}CO7 zerTp~mDKDRuxM(^x6T^pB}Q`L>PGI7#n4=?vv9>%-4$Q$t2A2xYds4ekIQNgg@lvD zM=@T61_(QJemt?<;|&LP4L#X^gh>8MO5_@Y7^)#3fG$sF41FS7Rmmc51_g!nFO0JC zshl3J!6Ok4Hh2z6-!fVY2)b=k3X=rJJ9kRh7Nc~!U zx`w|Pmy0#CL`bFpyqpLb_%Kp^1!j57E0eK&*%snd+Cd4%V4Uue`!>|GCS+L3zgda9 zRvAQ3V|s158NAJWuD7czVAwpGLk{5cj9)#oBf#2iKl|-`vKUSZA?&!-YIc^-PWR-@OnQ#hUjWAo2OuH!024)Jju3?9eX~i#knM68TSz23FpFi9bZ)8_~7#>fr(F zBuC@>ZOs`2LzO&U3l)&eM8G$0J{fmJe#T=e*=8Y$PeD&|$QW;RHLrtbyw7hxSBRM< zC~0=g%0+zl#*kCw+i}7(rlP3^9`iu@11{6$cBvpK{Yz#|EgR1do-hHm=zjV9L8XSr z3?8WoMU?;m1T;aPmPHjTpXdb`6f$f`3zx1uk3@QyaC8D1NSFsWvNgR&@y`J5+zq?( zS+KV91=P}f04T~*h)zIYy+5KB;N%NKBNm~uUKQOEMRjXf}>g_Q4qzsyrmLGK>RQqAO*PcBNzt14E@IHtUi0FemSs%Qy zW=UpUk4i4Lmr%b;GU)D>WS+X;;JM<8aBs($w8_knf)_t~<@pE>JXd-GO!5^^02jpf zwz8kOTk4|Dq{VsOrk*bg7FZ|NCK|a}hvgSw!dNZbseYmFJf(pEjpmR+*Jo=V7yT|EA&1Kr)@>+pT_|vwocKH8i0@>)cdIZ7WL=TT zGBP6CT&w%39$kYWQ<(sjBR&i=Olp5tIPqUy(j!|&{kw&%eN#P4NQKEg$L?9R`QRn_ ze#sW8!B0Wv*OpMeV=(&Kgikx=STOmC&Lk&eqleNy6Gn&56a9p^Y1U*5>uRvrKg^}X zC>5kf?XwAuJQ&OM&}>a(zYq9=5UUp_g&m}ELI&9-P-(`f=v+s2Opn=__-~P2A~0;` z-h%S71>@G00bFI<$!7+YpqNvhVP(xCxB6-8w|+9M7sEUjis4dhg@^yCXRj#?MgE7f zNA|(~KGf>4rF&$@r$$@Ku3oODP?39QAgnCTX{yZ!PlTMdPL)MdBhqjBd?*fjl1(h@ zYGhQ(H7-0wJgn;G|2uqp;M735L7Af8$50C<96`-XXG@`JDz?OoD>w0|z2u%Rl{Ljh zieHNu*+5%SpBC7fs0QMwsZUQ^-PMhntLCfbb-Xy+R+BqNNcH25lS z1~Xy@svK|E(5fV~C~wF@f=%1>k2{hJMB*#kDFto*<^DC^=ZkQ&@hZXnX(C<@2Tv*4 z`s^W{;UR*S@KuC+md0Yks#GY`g z2bzQ4vQ;;Z8N@J3@@;b@f{dqB*EI~fc6hA)>t$LA$sJAV9p~3_n*jV$W{3a)0)PRZ zwRBDY>r5B=8R*`3z9rwFMGIC_GmSFK6EJNL{zPnSs=lrhvN2e5Y;7bx;)6Fs9?&-TEDm{!r0Ni| z-Eaq2OsV+BYtPowXZ&tmPR4(B#BYv$`W`2O2_<0-G~`LG{p`!9XQG#brIw7X2!G^r z>bf{<^k+o-F#nqym`$E99$9(jBYS9MyFuw8#qGghfzymNNjvhVPUSWvUj1#BdrCFI z>Iq(aB0rTXdN-!`_>f2>q>zjso^f?8A+iZs9^N0vWrx8Bio9utb795J+5Bx(R-?{7 zAYFf7rx@)VE&v=;RNpMLYMbLLC+U_xQ-u~7S>=Z`K7@B9am(yR%0=L{(vMz@ULzj6 zGK(z|8VOw=FcH^mEzK!<| z<;qO5^!G3_B;0ED0y}SEX7UDnrVCXLrM*+>Ny{DUOzMqA@zi&H62blY&jp4fLR14})zA8!ersXiL?dSjXryhSQ9fM7u_H?>LUjv1U8u{^ zmjU$;h4M;8qyq$;C)^KpYEAuq(EK2o)_=SswP8jOblZ7BeKP%lK>b8ZcBfxBCn@{l z_O#!XTs;_)>4`c(GI0O^0-6Dzw`xuQd&PYE#h={1ABemaQplwY*6XTaRK(VlB@C`g z(=&KHLz0dH_NPfMMWde5#@mAX*%Cd}{_>VDbx&+h&pL=1jtBxtP^eAsh(9^D9x_nY zB;5TlekTrf<9-=pkfi61G_@)sAHaGV;S;v#9~^d#f>&3;fspxnvnWY3XjCJOt$DJC z>idAVg5=#|)oZHEX^~l}R4KIcWeG-mxsJnzn)gXUW9!iWPV~ z$ERWzty%84iw?DfZ&Jh;JRQB%DQOhW;aD~7Iz~^UL-PEmFprD#yqAmtyW3s#e4N6p z3F2ke`SJnE1R5fy!9CMu?QoM~#D)#*aB1Vp>>*lfhV0=5VYgY{{s70X#rmM*_3e)? z!94&ezmK#*z~TRDwS=El=D%skWmp*4SaAbqY>Eg0P8kMMlnmnnIjAtcLDpL~U|tdo z3G-qOlvW$p4uT-PL{jIaH}s`QGDQ#bm1$GwZQaEfmP$*0XUP{c0ckBx@LjP0I~TT& zVEW!cHxd}T8ANjWrI5g|ad-=?gsots$q!%fcwUde3a=ONS**RNdPA`gJtMJi%HwY!CdXzAkV1wU*qniUm3X_ND@jL)Xc0Huo}r2ajcp3B z>yQJ)Y$T~}TSkEeacvQ~|Ep=GMO6X0oPSDP*VnxM!Yc+hEa~|FZ6dmz7Q8%ct z);RGr?1G6w000yWL7Tius6lL*ObP!`cLC~;{}`P-Tda$7)MBv>*R(~Je+VZm`Qf&T zSCRoH2g!)x9vQH_PJ&?v7MYgPU47ju)rqByiO@v6y^Ly$uTp6a0*MH2x0!9Be#sxw zPB4ihfYk|UeajkSEg5>5bqd5df|~Z&=>*fAi<0xQLSIDspX@qC_BT9NJqsjtHAe4AfE;> z1;HC0@G>py@2$*+)v66@(fGfGCmq{rYbE)*Qt~c-Zt)fFFIudn-DH6Ri-^%l`ZmVv zqf0w#_fG3n5$vEJ(&%}kE!mxh%nPfcMwMt@h z_l*Mf$vdpcm4#qAepCqFQ~4&+HEifB|MbopfN8=c1+c(>bB3>jRGv>e78{fcFV3mM zjuL>+X9etCSWuS9Ju-?DOyudbM#oUtwF3)yZkAcZiIq)0>=Qyu3T%(zTd;m_%8wh} zt|Z%EtX03tq$4ZS9PY$Np1Nsl-n+4-@)K?Bw|t^=ArN$Rh2)qCPuFXy;8BodSJie) zzEngkH%eEYU{>@aSlY`(oKK#Htca}HP zhnR}roj}mp-%PU#?_YOi3R<*;U;Q^}Tdk(7Z5shZ*5I(c%|MMa)Y)>uF0OMrcPO~K zcBKTDFHI%`6m%lQNXu@voECqU2sxip*q6 zn=DbiOf+s_+kI!6{eH@<<3e4GtiRpiBLjMrPF}8k#<%kC99Z%ovAa>z44qiiA)b$7 zQ_YJ9D(Q~dPm&s+7-!Bi!q@iJl0*So=KB-hjz6Jv5Y&TxkZfj@9)bZwkyt5oVLj`? z+wE?X5f1|K%tDWC{CFx`px3HzMzNSaZ`e2{yIPj4-IcRMO<9hS4O*8B<(%Q|Zz7|6 z&+^&qJW)HPK|);a1A}z_wO%*)v3z*lVLJsBW#0;6-DSbKu;>wGB{DN(66(L$e9D59P~V@s#{c8=;STH3-U8Px3A*k$Jcl1d!BB%)u!KY-(WNzdsJVw9}}dc-Z0 z{58R$IxgsH$>1#h3;W*)Y(FN%(;Gw*+%A5-B%7X(7O8iD^*_>sS;jpmy>EmJ{mzaU zb%zR;+;Uiup)goJJw%;3le>hkFNLc(DJcX?h>G^X4X`SwQ8L~s0cYXEZnVo)Wi;Jx z4wH$Mcr|_LF(xn0KjeeGasJ>V%8TAbJVzXeQidE6KUvQ?6MWTmiX&5}3 z+?o2?+PF7hT#keR0_Q^ahYgT3oq$Aay-c%YM`G9EF2Z5Q(uR}8`i+8!>dkH|(ItEH8{a$nkC-fYDwVp~)aF)8U0c_rll1mHk7e({VIh)aym z{6WE;0y^A=sKHH1dt+%L^Ge23xIu9#Pc5Z$<7Dr~qq1LL&i-b&g$OSTlE}6O7am@W zVrJrn*33h486;C#?t8}2+R__7!e0kXrrbYH29pvU5V2{K#=BDwD|*R zvf%FSU5WWl(&jvPES4=z{|W%0hG(t&d>e>X9;<025n$n~q)5Ivaq|`8a-5Ci{boW2 zywifvk*xvhW*n?huqlNr-5_iwdf#`UKoPj$c1TM986l1>c~h^rg+2I~?aeigu6gA9 zz<-F8&=EWlvUqesc87Yqo?$|Py@$eZC>YXjW`tRFeNwEgIyuXN25Ct3DDx)>vY!k* z>O2$IODW8_?ZQwNV~@VN)Cxq6W8(jY|GeSnITjF^W_Vj;D<=6(2ni#knI`9?0s|RG z#=nq)GS4yU<5C2kKO5to!lnOZU`0$`i2u%1j?Uw3CH{{`E9jXu=-!IsN=mc!5yS|E zY{*}G!6ciW(hV(8Ay#m7g(WmjOURa1*z{p?n^O0Cj#_R5TzG^iA9M_NBhi`swfOBgaRMK;_tW>g{c`JgoAvm1t_YK zmC-RKKt+(7pBq%M&Mm%)Sm@06qD%pLU@jrOXj5ofWW zEdFvLHc|F_;8({LFlVGt4buUwHiiHo$W+kbWI-|z%5E%Y9aYesvlm$~km%LrK-g^* z+SvcGU4at{L_~X*8qCrnQqqDBM|Mx#d~1Bh^v|=Psc8jxQ3t+~F(_1Z9H4%`r=KI7 zlUm)|`|s7jq!$AO4=DGRp?`8GwTz8!Qe8bOJx#gt(FG5<9o6@jvu2+|$qHTXhUE0;%+V#!^GEt*|c+EhKH( z9}IRqpUMlah$Yo-qRRP9Lhry zxttMMVM1zoZKu9a=ew(z5NZqc7y}V#E#-Lm@Do1%e+8l9oECI-ex?g zz0B+?ptjbQwAJM69*sbc2PL7P&R^(Dle{zkMKHk=D1IrI8Ds|_@B4IDk8Y)fkkRhs zgR&*^&qKkN$b2j=#$1q~BMPOv65lmuDl8IN%qJQh9NZY1J}jnk+SNO%e|+rx zk6LU{&!gBlIx==#jMw(L#2|sX9jIWfE&(6Z%%hJ@Uu$nS*hq6}GSUxVHDFwWVo0Ac z)65;AyDq;&L`wW#u%kL6vQFl=`-HL1CqpQ^j+XwxJ|e5O`<=|H>9RMnND}T~^_0x_ zD#>w6b!OTTa{oZ(JG{-#=h9T^j%f}7mRd)Z0qfCFn0A@CYnR=OZG$0;t$6M9om}B} z{PK9Nz>=*FA#3As%@28lsm|qut7Ahd`^l<~18T%vA5U`(uv*6nEgkJ!{Waa&@r+S} z<$o$z*>EqxFC1a0ze_<#z)^eO-++*-(p6>d_oLfwh7r53%=mY$9z zZD|+du;L(S(*mV$a2A()1x~CnWBEJPB5M6I9$#V*cUeCG#>O~<9ejA})>CQtskS*# z*fEXn!M4o$@Rnqf`K68M=k{aL0$Co)E^GW%EISBxjYe2QFa)m%hX?JG(oA#Z5e1d) zR&2OTX@90tvrA@#Yod7JPlI|L0A?lEqZR95mK#xa)y@<%_Ou}oqB=wlUYU_n927QI zF$pHa_y)^rS{H}GkzMlDDl~~o(zEs_YRVcHNIX*uj-p7YSz%nz-3^Dkm8iHH$&u0F z_F0*tB7VistTup9P?yd)*Ry2)Q3-xYRDa&hqvMt;__PL#q9xAFTp+yJ^yNw}n#=+ElIxray_BAG|o|+(o>M@7?{~DSR zm};LyKn|zmDumo=VFDYLMN*IU@fk%ZgrJyHY~R_5tynGKJ48nM0_x-5YCF z3V~sL+=TepW6uc31)#kMS@*Gi2ker2wqdW@kS7#?>RB?&W*B0f&AVWizH4_n^+dM} z6yTK&S%l6yO=V7q?e{y$Y>Np?QR%j3h-rlAt@80x9bW_s4c3vZl8&r~oz4vuq(kbq z9CC?*(Vad)|5Yc!4=zru8BG`b4+-@M3pL+S1g_4=)-*=R;=EseBO_Gg-bW3X2{^@O zgF+kU8ASuBg$RORvb0t~?`%BwU<)<%Jw)GT1qcH;e8yivF{C?ksh9zj?EZq}lWo-5 zK>V*0JFIiqelzB*$)vLQCU>~#pJ<>-ft}bXk$3n~srhM17z8an!sz)(Kz6IbU&IVz zu0pEF6lq7m_-Cpy`C#2sfXOJI{=8w@Ccw<@nDs?axdt0 z->&5twRubdwrST|;;I9rnr;hMXg&=YxriniL=Kg;D7pjp3SQ*)XUUe$0L1^FekE%}ex9-;+iIJn)jIumIHu zRl)MX)?2IEhrQX*@~k@X)F*+yhxbC{A;+naU}%3T!;!G3-LR&`s~r{OdZz5=s3b_j9&AlB~MD2OmfoG~f$v!n+K-=~)P} zfXv|&HHk8(G)uDYppR{BYb9#? z?W7jM$A7D=1Q}_D$hS2k7`eRIyi>2?Cu)o#PKHpPwT^rPaC)s03W9r610sUZk=Fh>C9o^xQ5ha1t49|TQ;os{Kfc=R%!N%#PMshUy#n*%+nXhX z4dn6(&JN!ZZ1p_=@-h_naz80nJ@SIgZ-I{*_RGq-@`J4JeC4-@|E=nd#60Um&<}cD zhh7AnEOYaJP{25OAy%)zaiAExuL{J zsGex3ZeuK|ieZvoCe3K~*i*IIynPG$3)X^36p1(R2rq~(NJ}XUUXlD)S8L7jMyJzZ z|L8P)3Jfj6I#WX2Kwu%7Jc|wRUUW@hyUdJ3*zJUe+`X-}*oSW$% zca@oevM)hwW=^&mVG!|2kskate40Qu1+^=_K!?$M$OMursgK7IP`^B&+T5wvsr+pU zd!&OTs_foE`}ep%Q2Mv*`-lJf#ZbG!fB|B1o`!{(l%!&*P5}Jk$)acLr#^TR3t$!( z=N`MWx_7~hzM!VLENl@CGG%21J0D#t6LUF&o=jlJ4=m990*>$jZ*^PGm`7%`r*TXq z3fzS49&j0WPU zl*78=)J37s_H=gSs&fFlK4@HRBmoc!Wo#LKy4$Bk)gFQJV_qniuXZ$#_|N^d{7U{t zX#iFsrCv3{cx9_vM)A#VL1;yVfbja)=X({-Sxv}tsqPG2%+)+sTH z!`(Mcz5Ov$)VO=vw=)m?-_p5x2cvtsgZXAQ5;fLr-8(wW*ke|!JN%Jb(MZ#g>l3k7;VsT;IJlML&{gh8;Dmp51T0yb^MKSz(#KU zfpVLS7Q`ov^7Nb5^=t@VCtl2UJmt8vSE#LVSbnI#$7Lr<@2X0+i@IZh)@_UupAOs4 zp~Nv+QZJX1mL~X}F~cYhciR}`j_QXl@n5X$7u}u{;R+XW7M&|M>5TnJ4$4l5^0Ic) zeG>q-bTh0HG^w5jeUQr;YeZ=AmPh|8>{EN-9tfE{83Q1A!o;&iu8|!inZMYF+Co7f zX2wG#)9W+V6s#MZtX>M%-&9FVmNCvdXS}#hA%65Zi$`%viB&X$4Nz>b!y#sRolcs0s)+$gVTv;Q2Uw;KpGNAwmQu6HqDW}YBb+`PelKaTi-@nJ^ zXLQVe!>3kgT-2IX{53K%ArTYsJ9537Or1of^s_$5qk=~YF$Cc``BWs1rEeT5l5iMy zyVmex+0pRiqI6SX-&WS9pap1<*bOTT#B5TSn7-B*DsqauqP5tF{YnJC^3jiUL`W|G zoiMnivdO0xa4WJIW;!J&dNG!W`01=Q13lOekNS`fVmb;?ak5#bW$|^)@Z_LNsXygC zn!@Ulz^ae}LQHHO7lRmvc=!+RjH0xhUK}NF&Q%avf?fNF;RUGlqHQRKfTqxfzb{P% z;ditpDtm7ec#bf1%C2})hTU*e8m^DK4n+*3y0Ar8w7JsHV!cnvcr{PfEIbmxLX|`z z1yzcNZ7gP&N3#0CEOzO7lBP_v^gYaW44ih`W+-18@B4KY!=EZ4+H3g73u_0fEB2YH z)fsRg4Vm1z(j{NfJbM>l8?b_mNub(lS+u~&%ildOA96kn{xqYJK!6I(Qe=5s<(^4; z`=ca?x;%!*~kw9tsYo&XMaNc-a*NTkP@U%Ubd7g^*wkQsalO0Z29?~ z^;q{){a^j(u@J3Co^V0n5Zy2SCgm8;w_5B^pKZtQ3I5}26bWw@XID(3Tc@AJ7E;bS ziHamO8JQQ!aMschcF{&}Ooq&Y?C#^1z5T0LW| z(+@x8m?v*<4cxj0UVf~`k#J5$*m(f73&@WBkhpMxyd&=m$cO`z(C>GUoHz=M>3Fz) zKSCrLXt&-sP$h<7ZZE&gy5TAPJLr?yxwg%`WIsA#W1r_spL9uRTO+%R)!alzpdvAx z;UTkYv#|X>^0pD4UXI34A;`aw2reD}TX`UO;rc~LnURC?LMc0QEQSLt#xJ@FJDdNG zsDs>tr~n@~4(XWG7c0*Awp~_RL9bV!o}sZ=4GB+}!jz>-?W{)Q{egY zAzd=jroD02)&e+^Ehr0|4AkjRp@#qp23GMM7FnUa$kq}_J(fb_ez?quus^BvI3>wd zx8{oFwz;~XL=VX(B~1fvE!|=}O~yeNatbiGGYIk%-y^2UCb#@^UuSO0 z6_j{x_X4Gyn&H;9mGP`w%h*jS%o~f zL3c%B7*JmvLJg*hn3tN_vptgSa<4PWl6_KL0Nk=3SXbaZ+92DNY}DZ(LpwqR#QVe= zU9<4%xg0gwcZb$hrmX%k>3vN&B)~(CR!f90p_%#B6MvrK3KOaGqvIN2)|e!>WXyml zv-UE>I$Fg#Z7-F8*Ft+qk?D55w3$XeTxupBQz%PnTZ1V^u}5>elq|77feB$;7O=KY?(|5pKN!MGmLRaaP*tI zTYUe)N^MGWQro!o(z*Li=>xd;Rkti$snHHO$lhDD95K!vZ&*n=`vE|fv97TLbM7Y- z4Ygk+n2u)T%VLKtXipb7P9qWWgE~>ZR{O*`M(uV=png zHwYfp<}4WUt+5hH^p~fXaIfo)8uEb=nU1>zLvZMYT!w>+Q2mao^Nv*2MmDj&2=<~X z`E{kSs@_P}#Io$uk=HuMQrB!uThq@e{X#?PNi|WC>^&QT}z!fB>wPRnp3v4qCwN$I5Atm8XGb*@Y zoG?TnGz57T`C`AC)qk9ev!G5gNLIX|&{&zu^;A8~oQTvHY2nFjyce)Og6Y&D8{g~n}+@ZU^k9?PD294r%T9;p} zH}QOduV6+#*WYz(%1T$z2;+_lVNqFTqUkV2`mDnl$`k?6eIy|}Db2KJ-wZwLIJhU2 z_BSg^K~-Nk0;u`p(9S`2N0+kaR_qIYfMIn=@Nj!bJmH=v)Q_{B5*1TKdwYDAn9`X6 zs0Vqdj@B&lHh=w_{d@66qyk0Vz7ey!jdGCXbhFH}6uOU&RtH~EnLnHQpzZ(@Pyx`L ztQP%txrA$-zY5Gq5#XMtoUeI~j^DO=t7wdI6Vtj)Y=-kS!(U|)a|ZRo$mS=9Vy*vDcbL}ZJlvLbB9aIQ5w|buEVs>+l8Bd#G>y!x!&S2~A*z$1-QtZISI7oZr4nd85EoVt%p0v>a*< zD*u9BxQ5My8dw%c`TO4>Q{KAaEM~8$P$9De;Viy`G9%r5QXBvnG8wy8{;mEY=QC}* zfN=#_VMLU>W7eZY49(=qH zOh9kbRpkCze<8Eg|2YyDVE2(4;po4*{gaP^#(=FmXou+5rXvAxk!ksZdQ$|1p^b3J zAy=>Bxiej;p-F%$GAGzB<`IEv2Stq7XdnZVE%@u3rH4;@5aC4#iiuU<{+xxfty!{aC^pP1R=@|5j(=ZL5jNR6mc|Qa*J^>v@+!caCPoBS8zrKN`eX{*X8iCw(+vN zxc=ei>U_slanZ$RrJscjc}_tk|A;cSJ#`_84dwE= z1Xf~vUESN^lmAboU;NjknE(p(9?%(gAFXB7ENTF=A#&jHfYa`gb{34*S#q224--7` zIIj>YRCZ)fKVF5Jg_?r3nM0M|vA#QGb z44P>k96OL|q&u$O*Eh{(-@RU1OA%S68s5U9^)$BQOs$(&w0WT3AZINNaGg3wHK--|M@m zO_C(c4*pP)u+XM`WG`lEV=1UJL_ciX|JFuO(6N075d ziXO`Qy)QMySie)(TITAk7r17$o-TAT&&M>;4Js1;t9wc} zC7Q!J_Lr>hb@<5uEni!McC7XuhSfu3??eqHBDlQm=Y<3JXEt;g1SPLCy`bI-gn-DD zk#3_BKW_84-IN@IyD=Z>W`F|>(nezuL`ZW*Rl0C0C74C-oyEamNnT|Cv^5-7^_c&1 zsX5W4F5|}sV%YlV%n80CM^+yDneLTqKE+&kPcg9-fxzZC!qKEz^=O_QPFV4Va zx_BTP8PG5D!P7J3USi@=G&bMSqkmX5oLD4u!g4{;N_%mUjoV0m>)4`xalIx&GqF`Z zbi@i~tb0so^iw%;rC49-a>Tq(=-QnCWS(M?SszU%@X=B=9+>N%G1+F{aXL{PSYS+( zMH1{_M1N6a+^8VwB@6}0=`5=ie_gX;{K4<&bu=b0a&pO-2tgzVg-#{o@%kp`7W>A8TcO#HkvvUbugPk`W1vBo3aH} z(sHe3O%;AM1HS2ish}9{8T}a?a|l1doSq-)`OQXV|Vu zhPCJomWwtcy2!k@ZBgi5jrO#QUAf=W1gjjCbK|&vIb*w$KbrmR5x&!LdkNDJoai6s zOHPmjL_s4hx2WFza;yHt6vqRrR00}Irg&t@aj%>{k_69c%8W6|;0Sj!wUwAbX?|8PZd-GOw;1^p<=5wPQ9SP)wE8E9ynI7g zw<*@)cIHpYg!5VijES>Uh0iwlO=_Ds-_OqP@{V|l$=G_<^kRleR@1>4?V8nchM(7c z>*8`leqR$TV5(}Ez6r&F!-ecsKr7u{G_W;9NJV?x&+>Pt!r&PWp|6TPRgE}!K+Y7mU{SAobGme5xF?Av>W9Ole>-P$ z#JylLPNPu}AiUIxBtmZ*;e$RrZen~eTxq>J+~0qC$LJ&z~U zOcj($iD}ZSTEz8RnT$b}M4bO813Efh6=HvkWTqHaERwEac|l}rigO3?u~lBRh=Xc5eQaw5sP;xXlAvXy&pB9R9>!LXR4slhk!2u{CGH)|b7 zG@W>)iRy)BdW0fr(Ao2V;d|CMBKZ9K8?l~Swq@D8teY?Rk-JC;rcb$f0$}F~>HJPM zRTUeXV%&JrgAm`Tn?kZTB1|Ef1JO$IQDM>#dr3RJAPnW33|}>MyaV zvg+7U|EE!FIYeD5PQy~0_ym4G&+r;l*=6Wnoav~Qfpo86c_e4RGkH&4*rv@rsC?pO z_AkVp#nb>!(3GSuTDJpUkABH6umu%srVE4X>zT$xK)1ebkYr(LzO+dY91gdc@biJZ zT1-&)ZyX;p(pKV}_|nY)NVHmIdYM!JZc5c!4FKR{ykFS1O9S3Ma{5E(l3z? zJ!yeI+XVWC2acTN)>;&;0TR{Ypi#;jK7DKZ|H?v+RCCxBECk@^0J}qevbHR)_}@IG zxc1R;;eGl&`)Iwd0Y1sP{DZ4R1=JmeIOv+164@y=A&s7*+J*OnBs9#=DayAI@Hba$ zT2k$@1>tYO)lM%h41f#{?K4>+4`Wvd;oM31bJW?J-{KZeGTKSfD;Me5lsPTcJb^r8 z{YF81(TUpH23SxbFPWhYVF{EcrWuiMYr>O)Tcs0tAR{n|bDb*|U>-)tkUxY@3A@F3 zj3H@zv}+D*tW0Kg&WYNXzS($anf`2@I-61MDX-6_yv4?h#GRPU*pZeIU3nS(0 z;n1HjmXT3@>x9_Kh+>!Obm_lj4s)MZ$eofP$--7+CGT2v${-MAlXxrfp6eh3R#Wl1 z8iqDKn@<}F4y#-hH_^}N1l;pvmIET}o7N=)srRH-sKdz{snOw%-xEp7o1S;VaSN`t zzBw-jN%jpe=BXfQ+ANBpKi*BO~|h=HygK*(t)*PwG_ef9OgfKs(MaO%WY25sE+O7u(1 zdRP_rHfa#r+((^4ErtQ#+|IVuKYqM6}LEGbG{B|lh%a;{gFG>w#oJWZ@bvtT4cPyrz*pa(=#?Vnl0{LG%HEE^CdJp_K+~WNigbxA z>zCm-3#5%w|IJcQRDU;Nz)#riiDi9Z=C&6nNnP5kNKjictQ?U{3&1$YVCjYfByD`t zYk`GYjWw%LzUU0e=T#HA)y(ySFn~-L#7+pyx>(>5TOgctLTL8ijS#r5Ur}IQ0vD_Xd zgQvC!JED$Sv8gMk~9fcy?n9e@W?xR0|8Ks#+nJBm#M?!f-~3--p1-c(#2T(~$8 z&^;KAwYP8Vm0yC%j)H5z-%pby;R&INHTetzx%!Fi!p;Q#2-~b#8A4(8zzJ>>l-z#qi1+$yiwiS&9{cuMJZTTpBu_O zLLuJoyfF-?l@ya>kg&r!DH3iFtCEyTP_IQ>zVvZ3^>*G6^;ovR7yf{!QR!H-x7Xx=)a_{sl6T@R8~NA@8RnVRGA=rPC}+Db zAQR#%FP!eBAjY-%D@wiqDfD7(&N)|pW3vYa zFpWQ-J9neNS`hRfC(Gi&(1JM8=Z^1aH4f1LL?Avv5Y+0~-~lFiIoh&Mk^?Xi zVD$&Fw+n@RFe&6OB+tRVtUnO-?QkHi%~o)tDLW$Yy>J4P8*u=m_6)FVwK(q#E5y(y zqhEiPXNNDR>v=9)(SEm8bbShPcd)dyADB_2Y4kVIz1-p|P(Spj$kY1d7x3O$tx5;n z*MV{m95s4(8-8GhkOIi-nxZSgQB60xFeIYwJ$gWN$)6!ljCPL{fEgmSPKe6qWFSU& z&NiXJtPmw~Q{>l}Ko+q8N+CG@B1b+j9hO$#1-)vlm?A{W72&D9D3UASB^87mG+<9o z*QWH+kb_-`;N%48nwgC1+i}PZ%h-~3>qM?+N(>el=q-14cU*H%ao32XnqS_lK9|~u6~4fs6#nr+nx33Ny(Ylc z9&+t|uf#aiipcYBn9%?L1Dipf9z_)`f8(yUXi?L`s^l8=8!*(&zjD=8BVdL#jR zmcwzw3tC4|91fO0C}W^)`Lds~0Mi`jN@w{O_>)(447-Z(^9_}?FP@E@3Rf$TomWEB zu*eBk%D zzJ9Yx3fK@$R5Vq3uN{5&EkBuDe*K37L+aImU+BQMmEeQvNH_#QN#bAMoq~3hz1y9} z%0_LZq~D}S*bFRIgSpsUZyq$am`5R|NT!U*`8pC`_~RX$r?phOE}elH**{&mz)rS3 zak2Lu^3gitAIg98?obMFPo`&>G5-`oW(T?IfsHjNg^Dm6qjH2=&J7XmG{+T%GMHU0 z&IFla(H=b{g1*Qa;smzdmK7j}vL0`^&F$d)Voc77iLqzXX6YHapnfL}d*VglaGPT} z3BFpk_>}z>bX&w|ZxK=czb`r7UgFCpOPR7pbEc&a@^i|hC0Q>053D-ZbM#3slWCZA z6!!1iE09Rs0xu_n>fyj-=v_00uj|v8gJraPa7Wa=%|KIH^;PhC_nGF_nCR~{I%&NZ zHoeoGZHkFGX-gbaA175DA;zt2;=5D#=h_DS{O7a%fl#AjDMcna)R-WpNaK?mH>U1y zWBtwt3BAu9pDY0lgUMLr#?d2#`HxTW`mZ$@V}*+wM~NLMS0qrOS}h`}m&VZc(ue|T9fAx|w?=an}?4gDCb3GX-{bjf{(Wh4A*?9+%U zMaI;VFk(iT>MDlmLjmUqd8z1xi(YpvAaTOn@B5LgL_i_oy1nr&Jry$K!IS5ax+H*cpV87AX>GoQI0uKeizBcRnU%_v424gxRMMBv!G)DT zuuL9GgWgfMl3CZBg^j9oUbWQ(G?T)oDr5W}7u`|;(0vQ5Qvt|X+Jbc-#n4!hGJ(LIg|5}(P z7`0YLv>ca?*41ggb)>HBwSGI#-mff~C(s4UW>&)>tBrDAHb;kU%N^KVZV(WZn)+Tw^7XJo?M=zY1RlwBk z4fk$Xr0NTm==U&^4zh`%cdAa0B3;yT-*QuSDt`rkx^bF$+BY%Uf)x`&PBq<%pQ)fv zH9Wv=fEcmddFcQXSs^3f+Fqna!-sh21vOB$bKGN!=GKfgie?`|B608W2HK6VFe5{S zHkAdbl#=)=X!e_JkS?$aqGQ-UPU_e$I@ob^R7D-G+hZZW0R#Gp63xD`p6;iW-l_i= zH3NL6%M9v%TYRCCr33QpqCMFLa3FDuV&=qg=PcKdSrpg2!&e=|D7a4En*jkP>gDHI zYdh>Yvxn`61qmq{r+}`=-l4K*I_uHCuMgr&9)3BPGhWRzh9kD~;RV-({Wzy_-FWky z(zdzlqD|+Mv+>NR%E}^gig*0#l_LCi7z|S(IW`H}$lY`7 z?>;~l4}(0W=cVgaI5Y0Y@_OcrU% zxp&rm0>fvEH-8mOxDxLTUR5XlwM{Yer;TyK`77NegBW0Yyx7f)QdwY-sQ+;AC@qKR zS=5g4jvFA+mt)c`?5%uYt=abpcsy65|FAsi$Tr*_G0p*-%43a_AM5jy=+{@dwy3t> z;e~#<=K_C{z9R^JPWflNeT}lhj8qo4nY(#=dZ7*0Q^>btPd8NlOgK{G~vFr^We-RlTzhAdU zj$G+_PJdG84H_E;R^v&zfY>zG*5PmjOZkXugu_4Dya}Y|+i&b$lKtGERBfwT= zT42O<{)%_^{oyB9jG9(}7ivC*^WCvuyl4xF^goqrU3?P`x^!~H+-MRk2LFaU+=G1t343@nC=MIbE!DG%;u)eKZc> z&WB(tO7z%)wdR~NVO`2;CoZu@k6+yDR_VmcK*-v$cmX?4)lVYOnDAvMOGa>9p_ycX z)BFJEUKdF+tDq6ojY{LV_A$wIjA^!XCxlX`VPO7n1!{FXpCckyVBf@c`{9u%sp{5* z01a}sPOSzP=@D1ZOByk_RfPawo|*a#SQRlj!Dku@Kk(!|BDR1=PyXXBwFJ$<%X@@C zo2IhHtca&I;!+Q*!&MQB5$=W3Jk$^sd-qa{85h?X<$6gn*#MMC*aEPU8z-`Y*9MP; z1_NHl0lag zGY*97*!;_FN10@RP)_^gw)U3v2uQJlMgbtVh3XNizm$JDBN_DSb-3v}=I?M&)A97K z)@ZhkvS~PdtNQHVAcv+(0P<$eg4Y8ZZF@ImyKb0kXxg(WpX`l&9_hAf$KfSZTFsB? z=&6Gg&!wxZU|amJ%-%S4Klop)5||+=9U0EvemAOV`jgMAQo;0<`a>P15%VAQ&R@zm zIx43>``5bJI3<*+k}7Z-BTo9c>~KF7v7afL!sK!0$xJnxuFP*l*;pUJ)gzmBPK2MF zY610_?7r1KHJU-h_5}Ib%t#h^Wk7Du1Abc(mzj8ep1SjkBYpjZPf$krg9)7Y`8y1* z?y2rGi_comG1tuDKfYQ$)oFJu2}Noi!ThC@cunWsW7B^ql+m|Pp2p<5S3pGuY?>qw z__f|7ZOlZ7#aF7FX(?>Gt=KOe3*3I0hn1Ozc)4c3^fE??SQM&}uHzu~TPu}&UKQ73 zA)VLKCxa$pzTY%s0w_h=L`~6EGc8k-)$~p==WB7MFP@9!rC}c0#d&ce0zhdGuZ)-h zJ((j2#mV zl5K8nWq=bHBU9P8!ly5Sai?TB(l)#F0wO+sh76>`VU^*ys#T-0Pw09hLi^I@rj$lr zH@urS1Sb-UnJDU;)cL>Iw(%e~r{^(!5BN%->g(dxlgmYPyuoq-c;7X4bjtS!U~|Ty z)n~&={uY92KH-E#wP!hR&v^!T8zG^x<*1QH(aKw~JzJpm1PJaVKk7(f`o=COKJLP! z5cu_XxGkY2gVB&Mh0D1|T4H*=IGV*=WGi!2h5kjLkNDM^IH0%&)l^9HM~aWf<8~Nx zh?wBj9l^cbq4CMjboRZ|S|zk&yPE)~T@y%S{@TJIVohF8-T-_PUiOu`$OONlKJn;4 zV+sCk+X1^XA_$XHMzK;P*@(o}gz8^uL+pB8ggK=zBZQ)GIaQFEDcU;o|N7GMYb_L&il&&cMIO_mFbc zMlqH~EhP6GU76lDkFw8UkqOk?c(bFVV2Y2;kbXdINeN6`(H~HGUn3^Lx4xgMt1R4B z@d%NE>39|5`+TxuZaNq__=~icz5H@+v zfxSYGfMvVyS(NIfh-3F^v=f)mXvZ6=#eR{C45cak-VV&|mlrxM-F6LL`R3YzzCH5G zOJ>%vj?JBf!K1wL#u%qlyY6~|h_Mh7RzX!j`9^zEr8;etyhy&xUKY`Yq{3bWC7Kyy z%uJ_>6iN*pKnGpt@ikO0C-b03VhYVaeVszWr-Y5{4J;p~Ik%Bc%*Il#djkn&6#?e2 zS!isZ8Niih6^yVm0JWQ%@(Ox&TLv*FPWO-0<|DNK1RMfFT7G!F-#Za$kd9+PPfVEw{@B*I#Ojl`BdHM#9%hgPB13mVO*ILf8FT7W_&20BsP+8vUe|CZRu^(Mwz9s1es7mxh9$gqlzB#kEvB zB^SB2UQs1_|Aw&`gkZDjD}(yV(%m+cwka-T9e|Z$VW7oWT=Ny4#KcyeE6%Bm1Y?cs zaA08GEFmwnWP38uRQh4_^?S>WKIN*y8ikor3+l}8dT4RX+J~gN)@4MxyDx@K9ViVS zi}7?dAroaVbMjlOwGGap_z~@t=yjTZE88h#JsLQh=0be7+on1A%0<1wYSJ}|p{!NT zWHKr;0AKy~joJ$vpMHdGrz3SdBOCm3j;w?XTetzKGs6IOYKermu4i$50t7-1e$BEG z%>8s8HVPZza#cW`%q77^(@+#R^uf=X9(z7=5h-v0jluwro|d_ed$E~z!$3jDba4JG z6nHneMor(LD2yNUKUDMO$*v;gpOnf>tFp=7dTQavoWy3MWg0>=_i3TakAEV8!zyHO z4}6XIk|rAj9bzsMVS|omt&GUXKnO080uZb3|5j~&1M^Z&IbFE=;Z}ouW=o)LPQ6Vi z?2qa+xS>m58dxx?{KT3#s3ru!sfqG$r%;=Naxaq+n2|s_lB#&~{i@Lbry|~3t*haC z6Fcp89`pFn4MJLNxonVY?Xkau{!K;A#fRHK^ym{;QaE~TH`Mzb3%KuT2H`Dn%{p?O zaW};qJ1frf36@DLprRdjj;R|>^aDo+Z&b;uX+n{8-VQPeriaIlcbAaKzd2`9<38-z zh+?M0%NAGZi*>?S&aSx}EU6k=F_F*#5sF(|l@iAC638>xjjj?b`v7yE2mB{MOP}63 zfM5b>cc@H@iBK-fe|o{EH+xokkK$RfS_9=7tK!y(HW)&N{8QDd-7b#MC@Ua2NLQw% zdCOfrY-~0a9lwKqg$h}zA4vD-GlS^8$SDZ;zqwvVpezhnUcnTueMV7|rnbEJT$vb; z=c$sBUBy`OswCPBj|YB<7<`hh8@3_;%enhrl&-%+^hxk6ExHs!>yOq)A^(^GfZCPWsU+_T9 zEJ>|9=@w@t&s0*`3jj;~@6_D!H*YNPR-bSR4_#!m<;+vIf8y{r6{x(?>?XLVYFeR}Z4qp0@FQnxddnpAdmN{mEFQ9%kJ=-285bT}N zL>)H0y=lDbgJ<%w%(EhFuc4;=N%^4|vMlhc08gYdCoS5D9o{_8W)V#+Rb%8>_sy9w zX!j@+7wC86Z2Z!C6j!7KMl*hhyGCvO8@1ZsRxXr+(cvq^&Ypp#iLr7p)Zv~D@Wzv`RG0tL#$$Pvi)VQRm21nJ+YISA@?HM$wJu z^L+?Hh}2AentyP9{#iESFiH=7t^_58H!(BNv;zc<$Nd0LCOD3<6I`WSw1|xex?2vj z3s;3JG%B@tP7VN>-(T?JoE>*Cx?aUf7LnW78C&k=ACG6*Y+iDVOsU8?=YT1)WBX5x zfcdU$q8{HX`&2QQF11=%^q%{%Y5dm)Q%~pHZu$0cV)U_9Awh<-JyxHEl+ugL7k#8K z;0hrnhRXIp1V&P-WrJ``2MTD)xPY7p4*yWM`R9nl{%KLHhT&{>Yfy#;1`kFQ_Y|L_ z1$O!1(gG?B1^U_DI~F`Ib~bd_!n%Kh4GZF%h85_;Y#c+T$6q>B;K!Oa8@KddC@Q2S z-sTHEc1p(GH{vOt-nk$p1x7;@oXk`So!6_o|3Fszzi3zv72GG$9GI22_SJ%DvAqBi zo_%CE-S7kza4lV-?bb|Hq*ddl z-*KBIj}6wyHxY47Fs_K4U_YYrJEwYIWR&juRj8xA0Pky1B)`)j*xnWYQ$+bw)F0%i zT=!Rbgdj%tYBa;$7`MXks)F5&Ua(%-N>{JPKPil|=-!uQH-@C6yV#ZRDppx75F~g0 ze)i3lDAOev6U!8NAxZcll+UARn$OUm(Jv;CIuD1zUry`Jh_c;5v1*Pgy-#vKxmWvx{}yx4{yIwUPWhLX_d?T~eC6 z)W$dgB`(NDDGc1Rz1Zse*=zlnl8B)B3_2!8j%Cl2-m7DV0r(h@+lpk|u2tT}^*IOs zQ5q|~q&?u7HrVkJYLXi|wNCS2+-w=4`P~Zi)@D=WuuJ$hkN5<=`G2*rjo5cHQCeQC zsw|#*qrvWzyFv1Kb2Jjrp|-K)V9eYsqPJnD+6zN}xp2IfN+Hi+ThCbTC75|@yl&&I zjg5L+)yTu260r1Qu;;&9Mova|uG(0lp%5@l-oMEmrABZqnhC1!398tO@5SN(&5|VY zq6rZ-ofq*D8ai;qaZtRsUVR~8nx1Wf@4|!%Zo&c~$o3f171K%4ft+bUMvzs`ZFGgW zn9w%8$!B%}rb%ozaohAf?a-2UY65L(GJhggEt~i&(X2CD&+dR(Q=jSSdQ~A==oJ8z zRNMrUk#p>h<5}9}ql86m^kSn$%tEg7Ifc{&PwefsNPNINHB}4jpi{574H%{l? z2(E}QwXJoH@~k@FuG==IUUoS^-Wtmw@-;&2>qK0_-uKZcgFIVZ`9Ea{G|6#e#yQQA z!RCKRp#$P1Pc-`q8MLK1I>Y4vXNt|sx!LA+!-^tkdeukb421!1i1zvajHy57HzTIM z)yKGJ4|X~Nz?m*_i!nX-m!3o);6t5vt9PYc-?a0Kg*T0A%#xel{w zh7$=U$_ZAlG=ukfK}L~nt&DR*jWff@kfy+rU_zt2>p;av!BW@M1CTUek$!^wje~Es z5e|YXE{u=I^lMb6@nh8+Hwj!9g(aJm(NvTN(3i=7+xq^=HY89Q?FQNer84$vXYT>W|00W^xo@J+MkyR2@^6m+lMgB*i2{3Foo1x>TD>ruf(o5(nDW}Osqd2UQX_#WiC>ZdJvHYG zwbFaQ`v8m)p_{x2%ZG#+FBD;t$jMo9WuFC=DP3E8KWn>d3$&mPNXWPHTDA)jV*8|v zzGFV@#D<#^#?A>SGEt@eV>sP+*{TX>K%u1O*f&!@Yc!)@<%CCT{m6^Mk9I&gLVoYD zgZn2w3&YWJ#oR>$%p=j@|7~X#rdiZACwyj2M$i~mJ1tL3x5-E9Bi^>Wl0{B#=|ADRC#R(m87rds zhoLZ7WNw6+5UT4YY&z_JxIYsCRIinp@@(Lhn6?(D zqBxO}&0IH8IAp?moF5#2@v$Psow-{}cX!y_JSSU81~EW0Pj&%$qmH-;7cx_cG6#o+ zqh^82Ae1A`>dRqLR{UzyTP|ifV=)Jmy*o5LBAnFp#C7|-9ER<#4uY~u|927Vy`H2a zU<+%5`mweCk)p= z#Cek4iKFZ)5y*%{1&9I@P4xtu}g!=Y7T{m_#bc=0syMdWRpXssJ zMfIMY+=A1Epfdml7Rk3;vADqj>hb`)-Hnc`osgV_r0008x0iI!WP5(z?(sRGq zMS_LFULgLVotD$)L{J34lmX!`1z>ZpQZ`gx*G* zm(LIo@9t!N^B^Le4`2qZ0iEv$4T!@?$IM;~G6L2xLo5dY5Y-~9uP!{T=jHTg+cg@ z;M&k$p^}<mx(D>LS5d0v1UsD9NZ#Z3Tli$GmHf-nkpo^dW^tok5u zx%RV!jpLF{?++~3x$|sQwn>4tqy~ZWi5`1+BpSh}8P-96{o$stbVmbX09&8xfQ8Ho za{7W;C+NKo@5#^rZ$Oa0Cj}zIVkD>7+pYs3)b;TsF$r({pe3|}+w$IOyxTNjTf=Qd z{~}P*Lx3?KA0vZyfy`2sAj7 zzhB}+LGbLPo2XVLfxj)T_jN=38*@T4Yqq@0Kea3Q#?8KK8DYEO@7JKx^9elyArBm= z6~V?EG^`9wSK&X4H0)6#+R}R&@8ZPvgAVl7Abc zto%noyn`%{=D6~aO}ZThn&2tLHoCVkdwm2D<*-C)x$H^N{UOMuEyJl-F#!fW@W^Q9 z{J^@f0006(0iI)OP5-wLwYF&q4awCo@X=?MU>>`(#pS#Fvl9Z8&WQQqKc&nvPFt@Y zHfOZ5RK_rD%oqd_K`;W87oc@?EMuZm$Mkb+cE%e?Jo?)@CBX!$x_0ROQ=Z@2TRh}u z@#lQzxL~-MG#MTQ{D(Vw27epm;z^z3R10Yh4gs`xT0eyo9b4X=ow+3fm;niF&D@sQ z;*Z|qDVTA>LE!2k>;Lo9-@=&W&D1;B!bOj#OYJiD-*jH?@KIf^MDwZEsoMq^%?<-c zpU;CPZG!wtln{*8PiWat)CW@fO!-cIHFaCW?*!9ex=j|F5rzuFYr4)p4BHp|-dyI1 zEX}F9#@%t}7M17aOagr}Jm5vbN|c%@aj{AdLh}P+_C3GJAvxG+gyU4;`}><#87k}x zn>&Xs`)?rMQO2g{y`KqRhJ#V4Oo7uLAX-~_!H)Dr7pVs&oxws~}b9+chY`W{f)&Ft~O z%L8&cc`gQK&_Z!J$zv61s)FZgygHv{+QRlgZ5tJLVd|p zLPbq%S~#zQswYwG;{X5=G(nnZNvJ_=nM?@3Vqfev1Jggb5ijaZEm+{0V;>+>P_Z+f z(mxv{jh+{*LmCnI;?Z4L5cvS%f3Qa)W&2w5G}0)iV1xJQ^Mwb!(Yr~BEorh}?l1PQ zRZP(xBFiJuzE^8Lac!$~$St~Ia z%~p0a9Yi~WXaPS?Rj!lP=Z$sw#|Cf2iO&Hc%Y^arL z(aju<(LS=fV1Ui6bWeo|z7Qug!MV((4WEKg40TZ@39bF;46{z4Js|Oti4~DmK4m06 zh7kbHQau4%}CvcY5P3YZ+1F|`|hP6tFV1YUItS%&k z#!6>Df-VIIc})U%%P~R9p;PUDtBRG5``E2qvw38m4PYJ zYSwpouyb6+bh&fWzpE2JgEoMRer#952?*FvsR8r8m((jbJ3r4fjsRwwpk_kC+Qz7; z;5LMA_$RM#Dao`HIv<>=C+XD_15GRagVE5_t^UtbDX|_PzR%M*v-EhjYvkuGFt%44 zR<7-gjWPT-?;d&zeX&IZ0$i3Z*60rxWVn5t?9pfw@BDFK2J32^PAB}*Y?NmFHjIuM z5|JGT+M`%^peJgv3-p^p7el;;o}>g~=}pqg|r){7%|2QiV*OJkZcIr%mYI9K8#Mq|yiM6!A|J4n5iND{`!{-o&2kr&G|rwL4AHYbK`j*(gRK*qF?s8AHyjd=q+CI*~N z*}T4OyX6pyQ62&t8Hn2CjVUzx!0T!Q{-`GFZk#l;z>e~VOB>f-aDSKdulIJFXJo_#lAVUJFy!z0W}vQgWEE$ ziYJ~}yYwSHDv8;>O_Yof#@8YXsWPLzqZ}v9{zZ>d!q%-`tu8Y>#J9N&x7=IK)10!` z^f8|0!zB-zqw&zJ@MC7_KG89DduG9N<&EkLBK@7_p7C`39>h84dDO5H=*MyS&(keP zrg;R>pk{)RA_)SKKraHl#$Ej9&-B4u@B$&=UKSByy9_{L#OUHV2jufh3d+X`OWKbw z);L&T_WS=PbTxt@WF5 z;}H*?EQIv{`(qV{;$rorrQZ}=&;B?_>pSs^@V)8U{V_YA2ac{8zhTnVf_kZDx?QRW z3_i$ef$ZtSG<+B|d5_jJ;b@9z@K$U3-w(Gowu*9mQGvTr;&1Y&e%N8+O>(k@5O%Zt z+<0H>JPBys!|`>^(4lXAl*f*_4Q!&TuGdB-epmKW^wNJow5GUd>|M^xJEjr1Y>IxN zNn*ahq?;h=0bmetX(3ujN(h9;Mpn-{(bQ$;wQkOTN!(ea7CjIi%yaH$QtT)ffKj;v z#~Z7o%khRaqa=4F6-5RM{d;-m(*A*R^?;No5m|jAzUL>2T&om}D`fLnq?u1H zdC<>-TURUYp;0jWl+3>G;a?pFmwAHW9Li=AtKUXo5bSig>D>|-`ks^-#(YtO(rr&b z&8M14BjS0+t!9I9;h4?~8-=J6@mWuBEp~lKlpd|`t??0Wx=GDeNUq;`Vg6b;+k9@Z zGId%Jgf_d(E!0mgSi;dVoOXux1UCMlHAg76myX#|p@h`meW;0? znF$Nz{sTwy6d#d?44cYb3mYE;Z`3#b|CqC3GWRVuDRCcs?&=Y#e8v=G1s&ch8<7r% z2Lr6VXCE>NJh7}UH<=ldaWXxD3?u2xwTAdja5W8syTW1qK@S>!EmX}{ofo}fIG#sC zK|i7NzN1Qp4lzo~lJTk>ElS4^GQXD$ngxpV;b@(TvR?_E4vf`MxIq>a!pwG$%)qV@W zOs`2#^g(ORwN2bMDD2rl@o6H$tnuz*KEb{Ahqs}CD_dYLs8ccCt(G~T%R~WF7~$=s zL0`nRjcAWN5eu9tp&VD?QzI{!Vj^e5Kd?d}RiT-qWwXlJ?U@X;uC31smdIfeXTxzR zL9xK>%lH!Y{`q!ZBX>&!+yf|umajs~Mv9*6FL>T0wFYb5!_Z@LK7kqxfB%N5QbWgo zzp#HrJ}6x%R1IE@t0^gzDN7%s=bE3Srxj@VE8VI?P=k9Dv^z9x1zYlQ#Y_X(;s+5N z!7)g`U{chmCGE&$JFC2{db?7dyMKeCBp!}3N-9EKKBVw{axCTsm(oXtml(SU)cOVU zxkQo(v<;(v?4){S3l?x)*3^9d~ybV$pdn~feeJ)21^=ax}YQW6fXO|8gpYB`=p z{%HyDtLNsMFcwIAMeb7QLTsH-{b@bFDN8U^LY|b(!_AsJ#W@i-BJV!@t&ZRW<*^{D z`FMHd;iVxrHnvNB=K59Uk>Zs2n6MJ?Js??;+3X;x^(UkYQM%r`1&@RYWL*4|R*(-! zk=dI4fASRl8MoYc9Iy-M&#r7bfwj~H0b{A@Q=tOM8on;hiJFelgR@hj==?Xdu;-8i z+&JLL|H>(Ej>GLQTrQ+LH$4rC0mH?$B6zO83naki>ns8-i$L&W86lEJ8o3-@nRo$j z=7b%pS$gRrEI-qHlg(WY+A$?lt>xnc;~RVn3>jKDlM&!TG&a8#0eWCD_h8e%k~?ex zk-+O1?%LLz0lAEwRu1tS#N*p-tzHO{ls0+vhe(+Rh@w>LP^ml7C9Y043rgnoEKHTe z&byI{M23x=p&_!e}KWx(jD%!HCZyT+)vgt}tOSh64szGFTsF+^@Aw zde~-qjm#Jjg-F`=a(8{jYDx-;0sIS}pf8%8) zC1Pe8_0AAvm}`qs{ETycVI2cahV|erxRL4rB-YJs+IjQHnmpGKEr_>=8|i-K{JCDq zUbzs|JP2{$<(!wWI%k1v#DS!YYG|TGcKt=KZx`RZObk@KiN73hChFQT6p&*}ho5GJ zF_%#NZ=N)+GIrjOnd0MK`_KYrZsUeh-I!cu;Ryu@4`;jUrC4IP!}%dMd+_Q>GTk)) z!B|s33Z(W?AGs^?xeV-QA}!CmzXWK>DyUUuvuCAnR!lbOw`)n*^({6o#_663TO6Ek ztBudO8sH?C+OwxhHiy8HD9gc_PZ!sgVJ_K@F1Tw>p%lXbuMqyQCf&@$ZX`vb+s7Ii;2O#9($Y$P|3Xn26sZFD`XX7hF!ekXC<; zC(iWAp^6%LVU}0+c0(@f2>L-F^3U(WmdAQhz$PM0t%F(`g&-43lXl^+=sAkqji+Zby@(yMoL-wjKSZ>Wul2l6rJO;ZL>suiE1Xee| z4_8L%bx^?KFPJT%H0($`NL(%2;YMtk3~m;S_~|A}HQ%e?$fw#cU$VK?WOufD@g0cN z!Ju|+uIsrloN#^)FUdDtrG?!@5Tir0JH|sE?nFCgrDu&LxTn#Ecp54K1BWSmJ{up~>=5Zk9)kn#MxxGPfb!J^Bgp z8SieuLVe2>bimZeVx{YN$!i?s)j;$&T2hCz%-mHyKY>-s4R6P(g#p~aIt4@qK z&fzvyc2VMu)(Q&&3J%~hT~fGA2v z%>V!d3qhWSMHMZ7Va}!QVrs^DDF)}!LIt0X_T$2oSORUj_?P32@|E@=5ZA4ZraEq| z<+^~2t4Zn?`kgF1T6|7cBd@az0~!wG{u!2}idmV@S76#XKq-u(XM51L#cb*MS>pna zBu1iKjD$2GURw{mk>G$lNn&+r?)(bf41gj{fC5JTyZNSfm8rR|OQZFU+PV5(H=3(bh+tVvZgu!lr?lNS>42PuDgsefvUj=Xt$Ou^<6z z&wR{LAbT8sqO_f1rk*EMIv^=w}UNxFg}loAort_%=}-Y7mH0W9JMgY?T| zyAeC^7yRu%LsfP@`)?BN#;yTFP1V)8MFrGK@>9>$@I zHm6-vAcEQ{lYMlCxrO$yC%MQTGqkB$ni_}|Z5bh>jU0Lc4N-IgBuxHC(M%o81>IE8 z@acpX)05Sl>nl&JttQ56Fz^Y!^naNzzW9Wj1#eGXFs}6`Exsv*#VzGJ4JA(!fXA}W zIE1$6_pf#>XPjXXt1q5G3rza#y7SMoJn1A^JAs66dzC_Bh&MT03|h}ID-k2oP@igs z(aVgJ@y&?K#;kinQ7kFy}5r>##7jG(b$N!TqQ2 z$~-wMvKhWbT$j7)S1hk`-vT4G8V_f*X4|?$ZEnNAq|;hP4`f$+Fu!6rhrqIW|;;P;=yr*mD#8=?UZv zfi#k$?Xc z?`}a%(z|oExQz@BclpE25LWo;s4Q2;jfv37trEY?b@pT~?h1$|;zIb!>{5|tp$NNy z0006-0iLCFP5+E;Yb4lKW@9qjODM*SZwmNNnw+?ni@ARjWMRIR&RqlfUIQp(><+t6 zLPE+Q&JZFNQ`_?`WnUlJ8uAbM1-4;AJUC>Ho$!A%Ble4MI<%`joedvS^NIHlaln3C z*96Pf&6b{$kIahowZ`PA5P7x)Tz7jINK(T9~NRQsM$Xtq;BVNZECLJ|SVmYdC? z_a7fT#M$G$u>`y=InEE^rlW*L!a6Ep$9}63MG|rsiO;cC2Y*#vNNK)93^#LfK5+81 zr*JNJSHVAmI^HUQby&w-=KmRwK7Dh^f67`xx_PLX#y?+#5g>bafYz}BAI_ZCpJ-Z| z7!S8Gxs9WkM$rXeW`)!)NX;>S?o#NcsLy%Ru_)&RcPWffJzV6k_OCNo1^y#ZCP%ZT zx!_YnC)PR~aHe=}u9&`{M4Y-0}(1;*|Bh^K5E_!5dwqSCY-N0GV1x84}(} z+C$HZx1bNq+-(Z3%wB&s+8xs7ciI|k{by|<(~3~loMzWFd#do2YrVNrj&ju$L!V{N zx&nyT8pyXD2*vYPyipGZ0i)I7JiEV-CDAU&7|n+eYwdw1!pa~kg=0Z!FZSPaYE`fX zq+9!798QrtQH&6VRR912Cjp+PYEA!|@$#scvPv6_wH4GirjbHKq~zTy`cWNLj5oMP z$S-GFo_{K;(jI7^Q{9KYU_qMa2=J^@G6i4u@dt$-tK3@C!!5# zcUYYS6o_8jfA&Foi4S+je1fsNs+ItR`}E;wXvh~b`!eyUXBH$Qgby}55?k3<>XkF+ zUyZ(cAj) z%8K~;8UbQu(MX@S%iY&lozId%b{PBT50+}rICBIYO;4fw;0DpBueVcWS%<~`rAQsB ze0uHBVu>7hsAmVPo}1z;IW!uem`EakF~rhPYJov+&>L$C$yQpNeB;xh50yBwK~Y6g zF!DgV=IxgucjdsC;WHt-$URRXEr{E;<03kDTYbMIjsd6%FLbI|ACd2%1pP$rtFc#H zT1yYIl+;D{#dME)XF|s|{xZ!IA+BX05D(Y-BMO|x?9{J>-or)K2brlgEzEGS51PnY zUqQd>ZqPY0q%dNK#Z{p{%wL6}Awfu!?B=#XGfvnD}((2}#4Y!B1qNpSd{(G%Q zUweu^DV!=BJCqqo82pFvds6lL*ObEY))aF7ZWvWkVyY;@IjD?8&^Jv_-3as~*N9Eq(cWJD^-X9sb0hj|v3UVoF+p`1pn(awRyC6ep7O6M}#P4D*F(x1B!flB=pAc-P6 z7><*CyW*Y9?P~nqgzEE#fkM5_GC|PQFuM#=RG~g;9jr(&Xh(u0=!CCIeDb$ti2WY( zo_?H{ON}ANLHesdjW5 zv^(L>S^FK=l=;PeYT`Xva0A(5<2oO_8*D)?qt-O|n%iJ!Q31re0*T^q8s?usw!QGx zLDM?~>&W&Q%OHV9m9lHbGL0u*LL;e~UAUlPbA9I)a%ZR$9YO*VVVUs>iHPBsZn^C9 z!-i^4z3HzmF;5#u;Y553tds%~hf!Z4%cv|I_Vz~&dHFF?!EtYD)hf8VQ-jOLo|6Rj zEzzPnI>YJ*M~*Yps;Aq{ju-fx$v(sT37&XFyY0eZe~0aQGU&*uqQYf=A7T*V7BD3- zuXE0qrDB1Inx$#$Y~FkdmC&mf$p`0U4OYgFmRpbabvir76sUi&P%S;9pyPuIK4sO+ z*Xa3lYi=NTrv5j{E-KGvp1wkzzI_cDVFZ4So%6>igN%M?1Gpz`qv@jYR-*vX^ZD(* z=9;LF&>#q^OQ5n!5$IwdnwGiES@$xgBH|(GsKlnzDC@FqD55z>s5c^kD_N!A$TH{w zH$`Ro(b}@eZN`DTiZyFom9^sae*^LJBTDR^_JyQWuqk`g-j!57-_Llp=qBWhyxtJT zRGl$sFrd8keM6A{L8j%H>b~tBAL}od-5_vM&ZTfnhJmQAq<1{DOHI2IT=fn^7-66s zRMHDWf@>L2X^%XlVxuvnne6aA&*=1b_HgpCz~I2EUl7#bN&tR%C}a146Q3x?N<+;m zg->vHZN8-5>|q3o)!!$wr@%Ljvdv#TNg-tpuO-2s+VDFAXY{jS2^@TdJ_L))I(d({ zJ7(BaPc7g@V7}|x^b6)4i$wVTODgIko+1KP9fSl`3c-`T{4$>-FWdQxk#!4WR&KEJ z(YWoScJL1VViO2;L9JTliZ`HeKfPdLe^I^ud(KR`<2f1D{@mp_^)45<=Cnf(&Ci90 zV1hBT_JtH^8GJ!EN3H^>fPCsyd|uoeegIURn~;bocYOixq5U|99O6vKxjJ?S-eW$} zL~9#a&1@FVW&E)xl`Xc}$(vi?CO6msjQ|#|Fq=!`3e0%4h^;H8C-pP@yap-#uDgYL z`7rH8vQZi{O-m(>x*h5rrUdA|8hUwMmHY%Feq$3vw5Pwygtvnq-h@J33v8TGSP z6NrbC?IiQFTgH8V> z1suvf_LnO)ZuGYJcS;iU$5nd$I-)q)BT|I0EHxMY!cYIhHGM4Jk;RY@Mmzq&H`e>X zEqr$b+a6_@bc1J^6kC?i6Y|f^j!q9I`ytNHb&~iEf?V_kzvZG@IkbmMqoFaAnG-IQ zlbBkWo!iJ4um=Z8#YUHbGqsk?9U!??jesg%gP&Z^sEJd&iK17YJ28VC1UYYk%h_Me z`dlT!>`h(sbL}8@Ilg?`zRnK9ZUvgX+(NR0DdY@)B|9a{4UZqZ5f7DKyRoXMe3|x? zU_40zWA*9wlIg&wM|tP!R^J9sDO$^DVWbxNZ7A8hP*eFvAr6%{GC8@p-Xi#7ge|C9 zWhos^;xw88s9Wpyz2*%>m-H?s$$}aHb5mw+2)zpRe1pN5BpeSYK8ny@FB)JuV8X=n z_lsyD6DWE3M(V}x6!X1Ab%$7%ZlwVV0_Gv{{SroCE{MryRwtAoRqd7HtVR6_wO8yp z6n!hk8|;M;kF7YFj8EDx#R+l}fU-3#{0id?8sVy2**UK3sjlks z_L#b_41}I(&C3uH|6Awbx3@#3wF|6%m;*b8ob4RLi1pP=96azsdaiWADXHmq^P@25 z=g^ZV-Tc8`2!^*Ke1?+yb=JEbt!WL+ki3V^)-^&|2%xfCsw}rnf6!z8ZAbs+dj}w+ z9`eS$>&n#zws%_I98*EWqk<)N8iENfkV`bRI6&S#2H=-U)MuK2Nr>32&*t!AE;qow za{OF|n?Ry1GFCNR{fMpa2+JxDvCaYq1`W2O?i%=7rewoWY`PLI)eRAazg`;3m>(#K z#VEIN0n{Zb`l6nCFuJJubvIq`pk8_ zYL|L4b;R^}?&m4MfA=_I#(1R)>w{)uRP9yt0kHt|_D^&}z$R(e{@^9&@ruHvl7r!6( z^e?!cPgN{o1Zyd0iZCcVhe)J%GkD3=+qv@lFq=b*RC(CnJKy0_XZK)jKJE-a*BFmF zlkxp6lCIzND9oD$ny2D9j#4>^gU}mtdhthVR^Y8KUptU*dc&I#FHzPvduTf#vh~Ab zV;`-7k{k*7rJ4Uzsa-l4ZzsmO7o5yibe3IzPA2|}~4N5*y5~TxBeVY|Tl^===+0r_az%&7Lvj5GxYIO#S zg{x>vLqN+L;S}Qk)=Lp0(~y^4H{}njgSA=w(j3a3@|0ZtX>Xb8??zN52aWZvViKG0 zI6>xAsTf7E)o?xjgch4Q4Ab&x206f;NoDQkm1hN|(Q!@Q38diCb}?kTXwKc!2Qdx= zF576NS?z0|;{skXuUnAvk{(}GNJlkZ5X%#*iPJSe$2Y}lMUvuqC8adJ50~mgZ&0mx z`PhLVn33Be`1n?}c}0rEDAKwN&1ybBvkkUc1O~_j@z6zuHt|r`I0mU$hf~^}+u-oIy|ozV{Wy&e zG>3%%m;!Gw)!pVg9t}_wF?qZkZZ*_j4&Dmc{OfgU+c4~ z2BN!MpagrbV-@pHE1U{3n(vfVY7HBZ126aM9B2F=Ma@WbqzJ+0mBW!9l%nY2oj>jy zZN5*kkn|wFx_ifm8G*Q~;jq;auKq_% zM=#Z(;3HbqB9YVO=yX*Vs-ylh{C|7k!CTYIpoJT0H;$fLVh;+waBZI(c05c{T)44zIyb29Vk&YrsKTEw}6c4bdwdyLE8z>v`&N)neCftcPT z$6au+Zbgn>iKOBAJp)y!xw-O2D)^t;0j`LYSQPFIL4w(kNM-~VtD|4j7HlB71uMUy z!WH!8olx)zUXW=JH7DnMb`B8QAG7)S>FzL#*a6Xf*1)Act#DbgmSxkaWF^vF!mBRp zx*W5V4cnEjO|FrkQIjr#HOIi3VhydqPFHG^3T=Ku;&^_UV69B#M%x?b7D{m<&#Ak3 z?b|@aW(qx7GN!OFXczXH1?&c{l%sAVuTk`ov#pQz5_~BP@NC>_mCq2;_+Tx%Qt`T2 z>|%C7mCxS})^c?~@>7nNFPCZKcG?MzQSPxB5Twgnx>k#khWwFbnRgC&?8fOR^y?)( z(3@xU(k#v8!gKGT7nMJqnVA~jn3W3+%=&29mqvXDX_Lk~?`Yz){ZplM_>ml$B`GlQ zv43OzBR!Eh8p(_x8EV$}*OpbSeJDhtLFk%sri{FZyz2-Cu z?PJ8ecWd^k(etW*VDxg6-Bp0HDQL*|y~A!WJ({x?S|KbD^3lue~U9qizNy$l8 z?0U$=v zuscXfup9=7SGY&#L1=cg=-)!a8yk4(Dl(S+c2atSx!<>Jlq=m_)RJ7;er>Xyb(Mw+0J*RgMo9XN@ zA{y~Yx|rw=XO=q3rhXO@y8j%eJH8@B1L1r3G+|e(8_p5Pa+gi3&x=brM^@_z#+mGb zZ_{d&hm%)itk2!5`F`#yvsosH_KuF7bZu=om@|z|6D9$%JL{(Sv`B#L4rIGT?$)`t z33aYFcO4`NOPRm`00W~zp2|fPET6I|I5xdp0BWSsr0}n`!W6=u)ZAk6l_~rMgTX#x z7LkDH=hZN~>=U+_S_GZu-g4Wo=A3SAF(H91(hAIm0yKZwE*;#ua%UJ+YB(F6eNG7U zI!IC$M-aj`hRJZA>yb;RFLLkLO$#VR^A;+PlT=XPHKCQbqbEQG1QKdLhr zMSJbxXx9B`?K{v5R8UljQ1u=E^`ODz5M{+CgW%a_j zS6+M&9DQ6@G*nltZx|rvp0}-}e{N@*l<%>7v!&-GI^6VPm@PLJ*MI(!B@eD5#bQQT zBOey=R+#`+gl?&J(rM|ML#g$H@G&Fl_3<6nmX~{2>2kFdAH@zZ1Sm$7`6Zq5LZNoz z54-ig@Wef=M!iEEbN?}KDd?16wh62Yk#(H-?u$iip4$#wmM?X)*Bf39qel%HA*5O!gPMs~t?b3izp^eVoPPc~ ztMW$7;RgDuXq5AXiR5z6%d}nMWKw|W_fCt$$Ye6@;|;GUVu?w%+@$DS6mWE5lv{@a zp?8d9z`!u8Iy~B+O%b=Abww8MreA}mDdCXOQ(K1gosL%2v$T7|TveQJfaN48C5E2a z7o#JmBv{}6fm8*s+{^9IXl4xhL7$N)INnfgkKYj6URva|vGkk`;nkXzW$H0=jf@V2 zaOptrxpOeVcbM?Fx$1(E{uX<!PjW zyHU^VzB{D7QFxsgZadOuUsp>PhR)$f!Ukgqp$_$J!M*c@aZxgCBVxHi0S#nd$Ld!b z)^MRuD_HCPzbxFiBaKf`KVE`n(O(|Fgq8vUM}uq+Ie^A0jJdKnu|%VL8hTHoIG!a5 zm-e7pD?So70t=lvU$qZm{L?N~{+pfR^V33S;^2Q``=3;kl?&ZY5_UyqbXgkOjgBhI zt#7BpVL4*W%^m~xYO||y7>P#$yAe!q?GvACx&(~dYsRW4ed@%q30!sm&>2yv1*$kig&oRPNb*RW-(dixwX|m&{5~s61j<06AtSuor zKKfJKkRzwCpb!K6(YpEa0N`u)M|Lj;IB%@v3Ho`BeJy5HLi=F> zPMwZP#*F}RbX2G#yd_kT1^meqEf;M7Wtht+5FxkoRkb-^#M8qW>Lf|4%2eQ9_Vv)^ z0($8#DqtfKiWQwjFL64Z#TdXGHt|}g*?(*I`5&%7xav%2)YROs0}3h}o>|(YhM88z zdb!hSu2BNcG=M8pNMR4?Mt*vs#8k1?1~RQ$u_}&>Ux0Pls*k zoSl^lu*AUMvIM3{(wz<*6yWRP2zmbz`F2cIEpT7ww1avmShe@{mzkFC!#+do;*6Hx z;Y6Z1*iMmrXYr}@xL3w>Pjc8^ojB7ozsULb85DvD@8Q3lz!>T)B#-)HydBC$s>Iuw z>X!i81W92ZN($eAav2&C95ICJUgz!vJy!$g>R=)9PQ_nIoYUl8V#Aq#08X5(V^#MH zjbwL(UTbh0pNh~PLsEl9^vW2MsQ>^0<^i7TYEAzp%?}?1CKt8a1>Xu1 z!H}o7Ly4Bcw#G)Bc((=QYo^*$ZQ2|ekNGQV~JX)w^myfn~4J1sE@2%0>XY%?jvwlx8Ma}eP)#d!Mw2+=D^|tCvZJjw=;F!aXrteAxBrT4yn~bSc z*l@W5@;3P6WFxnl&A2Y?UF8w(H)u?6V43Z?Gr}D)SyI*J0dm8Ofq}Yp>S2aBz$3X? z<&XgIu{$AbyT&jwf8Ex4!{8E@8LiI z01>u9n(#@eL2Q{!2)}3zjO?Bj^nGdKeQt#NHUe`pq;q3s9XGK#s{iFHZ^H}=Pc@3% z%f#l)58fd+7jPx<=T^B(+YHKgvYZpL7!(VOcTxL5+(PHz+;bz)R=kk4Cv=%^lm7Y6 z-<%Gi7-uC=BH|*~g4sLbP%W5L?Xfuv%0AV}elhrUfGmNOz=m4l^MFpvQ1l_LO1QbO zZ>Lue1E2fh0UT^H?>7C`FDs!6I&=zV{Q>|HPL`g zJQ{cQC=3wVJWQn~Wf=5NRxp2zz1={>>YHy?Nj;3>1z_vv&Hoid@4zbc_}X z_v-mK%Ssba9SBX8p#O-}B2K*g$O8*yrl~>JJ8GraMk<`-eHMCGOlBjEvOqoNm_5E_ z>yb4g5e5XL8>K_NSLQLqzC~<#%GQ;eN8m3jxrR%fk9QOQAe^8)Zm&xid&t*X?MCtX zPau_8NTgp;gvSJfG{JCLUb;b|X}2HxIb$RMs|qs8pCfEa+jqHh%HYt>)Pc(WXGo9u z)|W>W{xKETCR9q`cKiUhOp{j7G%_Ope)a!cf<3r`zkR8Q;eTSENv?yadxi3_UU0_O zddT>kVag|)4~&AcmIMCRqjkN%vOScgYhVGtvz(_UJzh3EWPcHmqu1$Mp`B@2&nSkq z?WlC~y#8Gj!nq0H*wOfRW7W6UG!#@K+z0GZ;(Q`VjqK6m;vASaoEEmfwb==R(-s1M zx4q}DbDQ{CU)GXd;2s5txg|{-&r&f&U@@2Dt+LUbwB-vb8wby>LMi1Jvw?}M5eeB? zPTS_t$Q@9lqidBkwCVy*9&Iy-jl($?BNCFD$>^$OJ_Ot92JI7eU)1;eb%AJO@d{uC zTj^wBk+@!o+o|c?6ULN6EOpgt>^=3JfVeaz`=TphPamFTkOUT73n_FahulROTk}dL zUM*xLovdA>OEd~~&V|ac*cV_s>ia9_4y`~1=$Z>{(M@csk_0AI_v%4=4^~&YGJC8t z>Jat#MYXCbyC#R!f&qj7*k}l`rpOT4P2O=LlfqlyJU6RoQoa*cZ!$ z?Sg1k2VeVwGz+ITR0Z)XTT!`yqKHu!l!r>R1*1E)VX4}v%3N@Lj@hs2XMxg2?Yb_- zT>h~M1j>fvrIt!UiRLoAnhrb+4_2|*fuMMn_ovyRWPA30a8jz>k+pDEd8o+%q7Q-d ziXXKKDKuO?A{jW>h5Pdn;Of{R7U%$tF1OrTf|2 z{XW1tT$PUB>~f*TvJSW6cMCiL%c`&?@BC$>58CpWG3}5+K@m>6W1<${Bq2ZJq6&O{ zdrl<|*JVirNJv3u)l%N&sTjSgbrPD$a#U?}>OGsUFa40q9hW(dGz8a*g`NC@&n}LS z8QuFX#Ccka^t|xln+Tmv_gbEtND5_)Xlj9FsBLd9qMt^NU_D7G>u_qpO5iuDIl6RH z(**9h+VBlA+5zntse$U>1kOneDR;p%n|UFC zpCXf{R()+;N`YE+&=6(|`r}>2C=~VW3w^5}2lU(W!@u;-G_^^6-1E_jK43QA*%!GX zXh$g84Rbaa_J1z}rc;GyIt}zyKuAr>H;n&5$rNRce$NV;C-4b)`V9TH=9{o-z@%cI zxt9V^<2z0gqCb@bisbYtt5jbio#HhxLFS92u4JXAGEXHx5cai`c9#5pL(K^BW~c%o zy9PvLef$HokyN_&k6x`Ux|u9T%XD@OD@B9gx>e9LNry&~LKGL2ZJIN1cqWTYTaDFQh%oqdPt;<^GOn~j%A?LI zOn0*MH=del46RYqaaCs4Hji&)CK9y0B?HB!6~UJ@$90eP0ExUFCI~YMIG?g;Q!hMo zo3Gpd{eylYLus>GoU$7g@+Vwz+LOkcv(Xy5YbW3z_X)6PU>wpGZ|~uAA9JL^4p?Za z4+(sHMh>KBqCXl;dDT9}K+jAGqTkqQU95H%tQ;)grKIBS8}DCtmLvR#zyF72@jfC&$4Vd`J8{(NxU7H~=>z^=pj)N?>6 zQ1LQ*sEGe)`yaBMRAzr)J{4G&r&rIt&Df!U&2}%Dz}r#_htQ4|*94fE-I93ppv`5s=fM!w?voG8W{{Ny-Oz zL_Kh}owBBGNo&R%P2bs&bnuZyT*ih?-$;?jXVFYy8_nyy`K;_CdB=vZ0a`u5Ndm<#0wNLa8NWky z*RByR^}l+#ZG%r^AjeK&d@gLt*N6wuY}!BVAyih|mh&+(3O2lr@XlaIQbm=lyHw_h z7cMtVL`Cp&^p|pV-_V7%muh%njs`=Xquc^P_jHEu4VYfpY--6pfsE&wPJq7Sx|a4Y zKm(~|topN&hi{OIb7IVWLHmr}wyS_4d*&7Ow&tPolSXU|j!5nQx)?(XTX9smn|Agm z7!^=Gx{UifcZOVR<>yeOW9aZpkt~Pu$)fTLMoKtOB?ekr;4uym(tH=J(o1bo`F7{r zm8BuNX9uzd@3Zq~#=N>OxpACbF85T2^86%1zP8_A93#_plQh(xxuE}WkkCTsfX&C$ z11m)%ahwMlrmwdi$y+OW26|lcfgb5$IUDc=n%SFSLIeIP$Ykg<$Oc#av24A0M(tq? z8G?@FcZC#~rU$YjH%z-pr>2b4Efa3!aE$8hxpB_)NiYtdEkq6u@v+X0BiR6);3d~7 zfw#v7r{vXU-h;r%{NH@a^M>hB&wtg3Bl-5l-Ka%XvJ}8a+>~cV zr})0JZJabI^cB=i+9LhBT%-(hpvijYs)jHCWI&t0jC++^La-33c!iNTH?IcNsu)uB zC;L)C4q>gq`4@=u)s3~_&eq|aSOPcwkY9+u&G!Re#0P&RXkMow%{^?Bw%o0-#6fb;VG$|N! zW^}PlZvKk-Lav>Lq-7`l1E0ZtsMNFFR(WRxC*FGWF8*Qd)=-Un zIf4)H7FP(j-hGc5W$=2ZNIts7kU5w6%myYehRyXBfJ|*3?*F_H&hxdccM-);wx8_f zaJFGx4>>)tC2cqv%Ix0Kd~xk-eoH0_-;U#J!yUIsE~M^T>EQRUEai>{sS88rbeAft zf4FeOB>4PA3Ykh(UYLy7f)kQ1&b`)TgD-Z4tb8)?+Mr zlpQ(54t%2xz0lI#6oIf-??MWrNIn>Hh@}MB*mkSC8DDNks*;izfB&`-1!${iRUzcu zQ`iHj6KSx2A->=Cl%0y(R}fZ<uGgk1>rs+nYuyuh>{OC&m_$D-j6GBs*7E-~Lr7Vuv&-lj`$9Vf#(ZD!G zz*#QYQYi8v>B(bx?6X;~D2$u|&mLE_9w6c#6DuuQR2~y#EuWz~4*gwV3erj8vq0RN zsK+3VxNbF}$u|Y6C%hmGJsp^ZiYaRi+AYME@I8^D@B->S6*yw*paj%M1OC!HzuFs_ z_30TcD!_o1d!CLbCyoH>R?TpeE7&No#Kl?;`+O-fm67&q=Vc?d`LSIf0G@<%s@0J9 z`9%`$J-(%S**(#t1FsNUjRVN2D}ih4pzk5(%$b^OU{XYhF(W0z<<`HJb;nQw!(Yk( z(!!(l`di^6dQRQ5R7cU0nvYE7dSgM-o<%J-Lo{FQig-dK|5-FaF#F0XGLE7lTd$^x z$YNmn1=PS9O-w)^4I+Za?+)Ag_e%9K)A2S5T9L{!HCkq6F zMj1Rn(eV;ciEVm`117*tqbwHoseX~YRY6@f(Gj|KNwpK^k-mjwMoy0A#X;Z{@b4WT z9+93C0k1NC7ows6J0&@E`4D`%vf9|1y)zU$nb6U?6!hZxN|g^KB;0TkBIF}hJu&pN z+;V(##(*JB>ES{{PHb0}H7Q!Ez?KCuCWMZKP-lKjctgP~WQY>7*a!x~(AUi;xQ@@ym|ANBpb?ECziQR8LBh?YZuG6Vi zPCL~fb26`yWk0-3Z0yA9iz<(MTYQztEtU?g4a`l-vh@F(LIPZ|dihtzsm$B}#Ob3%A%s z@?=qUzXv^gnK?@R=-OKw$VOP&8`6g(!zih4t5zwg%wDPje$UVMM}S3n=DdDj{giWy zFiyzfBCO0oi2Y3oY`#uH zxtG&0C|W9}%(v~?`_@4&cx{$z2?tXr!i@NYv{yE1re2DC-II? zub$*0;t_0@-QlPe&}O}cRT8j-kiWiLYsoo^KUVj~Y7ICQ7@G!+m-aXXDk@^;b3{kl9**P)DESZxE^}ySq1E{0Wg6Ql{Mv+!Qk5M6jL6pLpDKu%`}+sc2NI* z8QSr>40W?S%u+8t{CunERJYaWl-vA!)k4Sk?vM#$CM!s;nskJI>m zb8K>q6FoE*4)U`SZ71>ah*1ke83#!+0tYy`fZVh@4I(crnm7?NF~;e z{3B%qvT;c-i@lN6I45JBje3{w~<>kP|jt&N92YHfvx{?mDOqe9XOdFyFy#)D8%7q|`2_#y5 zK%fw@BqXvD;$6@6%9Nv-b=gDO~!`Q+i#{r$tPdvK=qhUerZP&};smu#9 zii@9aLz&}Eu8COGYRKaQqKYyldrSfi7hN z=J*z$o-Braf%vLFUpk_~p3d{w_j@!Pj!yakz9?tHl^gC|^c^!rU*NZkq)M{)f)n$? z;^g;ty!g_o0tN5!4VD=clLgCJfcve^Kl}oA$|~#X%9*YHhSXqPZp9(o(pz7LOL}i) zuT()z^t97iKM~dxbu4uSo(yIKGIT9>!Fh6$Ph9y5zWe`y%sKv?u0X`UQWICO zTIr_MK76?a81w5#RM8=AK1lrtMKT%*LrvZ70ph~bD=$>|bg^Na`|S;kN04e2pu4P* zx8J4D&UwYt@cI$Sdf0Y>0007C0iP{&P5)#%oeuPS;oMWOhE(W^#Fo+zvdN@ctsi7{ z9m^OHG08?TXhRGOvJfu3?~l9!E~BoW9VYa3z)}S@EzJKbJKN9L&%R)BnvV2Q zi+KH`&~oseBeRPP->gx^B}Ec8k|+L?p~AW5p=N1jC;PX*_ywn!;mu~AjP+RjhMtVz zj|q-zc7Ulx#d1+{P0cPCmO2}vXDrR03p8W4r4NbrtOBm3V7s%Mvu-HXazn$kPoTzV zuc~6-@Go5*;oLR_!-s5>&!J`KpVh6}`VqmQ_BeWyy?Z<3845cHcsV|lgC6TGFU~LF zQPcBG0d)32$6k-zQPa5SIXmmwJeFHou!7B9An(YNLSRm_nZ{~nWz_eI!tW&dS6YlaX z8KE*ZSUcY(QWYBh+9fx{9mto+QhCIS^SEY)=B8qmta^wfV3fL;o1@3bf%9qV8A=^o z@+11I=zaFI#f<&n&WS9x>sM7pf8R%DPBbt8&W|Q6Do>P@rw#ElFJfSY*`VOP5pd;e ziXS7qxX%rdW+YdD!t_B5wPJ3rJW zdi8NJM{ypOg@M!pqUpZ_Sp-b#jl<#kv^&P6*bK|I(gS3XS%VX;x^zwDf~FqnJVa%e%}ms?NILv5vbs_MZX^WI%6EI`(u)2|?*hs@5)aA-sT>7WRCeMjk5 zVguk8AN_M(l)HuLJ1;7ML-VaA(3{l52(nK?f|<(Ef1_+{Caf7rsatHyH5`hAJG~Nl z!iY;BA@(3E8lYk-d9dj{9&K_s8^n%v3VA{wocd1>x8uwR{Mx2{(eh!6f~rMMt6Wj6=ea)-V&`Re+w3zt(rF^cQr4p7RuTlT>EJ$ZQWvgTq1*3 zomf0?54g+QB4yL0_KQ2mJTXvS4Q;`@ELb7E(s0zNL2Q{!2%m(yZ7pR}K+9d?%kC6NY#R?r$u^R!o+`*K63xihaU{JJO^w)+ zd-X*|wPAZr%QB&_uM@}W!pZUsUBQHgVY>y){KjZ4f6CBTGvaii#iUabTc<@c`UL>q z=xM}-qk;e&v3T>bzq4Wp%5=AiQ9j2`{@(z>%iN0YCF=Ql860?Jd~J z8u!j3#FXhN?Txb;KHE*?-zPY->>}|FXX@}`G%PzN5%X!Z@^{YC)+Ra$otZ=Zv3%_C z<5`4vy#k9h4&sMuIH4-1hop4Bd<&E4_gC@^-tkC1nUoYHD zseFgZ7=?IL)?#Kyg7HFM=^3@UhQGt5>$aeeMIO(MAIEw8TeAkT=Ki_!;Gn^p=cT`m z3}an<8ffY}i_A>TBiKW9@Zv&c*J1u^0N@Y+)mT;rtvr{ol>`;FWMa%Ns5>z!zno>` zt)K7gnT{)@@;Z8Sgs<(^wyd%7tnA=&v-GD*Vhq+sR8G?*fS)PrUPz_eE4d~;;E_hH zvDug?Ali^o9CJxv#452qfIo$zZmrfOkZ7Qy=28tCxym5SkV@lDNRb0-$ zOG2rSDsvCmtV05%6$!xY;rhj^tC@WZ3aIQ&HCWPElF?*LotQm$C=Vqt#kk3fQ>w!{ z>4xqHI*zeHe#kd-R`9k}h0zX++%}0F^bccX%Ec+btm4h}jBG|kHl=-vt@9kV93BPd z+Tadh*yWOFWeN#co})UV8h4)?fd(@YdnXu(dI?2v!p^ z{xXz0jZm}_n+xE`sHwF_`WUjP4of3@$ooG^H~5|6aCpQaDrv6xwgvprrhccOk)*|U z%ig>l&Rh|+Y_P zgW-CX-HpNp%yaRl>3lrzd)$MB-bMHDiS8>e-<&KUAF|p@qan+dK z4p=`s^(gD&JdtzN1AT?l89wVd_wku7y(xQ~)Usgk(=42)#$5VWx68|8%>g(y_vv47 zlTc%Qe7QqKoU*P+wc=G$;Y-qOrnn(GPESp13R!!1=pt}3&*AW}%}C(exlwhm7tF)_ zasx&`q+x5ytcMxIXfpaxZ z2N{w37Mw#^VCCyLd^BN&>3vt^~Doe za@W0?1ELC_rL|I1C(&)D2~yV>yWZe|yDT*wh}SFX$P zbpJ@!Vl&dCyggI)EA;~Gp?Cq)VCG7veXBim;+?MGcC_^)6bkM?HeydUWxvD_ECYi$ z@U?O=Djga8p3dQg9yX>EYzS>5PADE6eHgU-xTE&jI=IKQ?e`OW|r@(+JRu^(SaajxDngMR}6*VPQgn+u!j zmu}JnMYiCrO}YT;AdXb2kMK(P7lCV)*D~?-bCsT%R~by1CZ4Ymu72Uct_Y)cnf*3? z&X0pNKU#85oFJHHP{&UG*JmODdSR%r>Bhmmb?8wTmpKWDTRNb}gRV`9AZ-W2nJnNy^v0>=45JRc zN)XPWHP>_SnUX8y)8MXx!VHEbnHM9iti4fAFMx^n(t#9e=Vs#g6n3bZe-mz7?kM+Z znbb&sWqGE$s>m!5lDe*bIi{}(>g^o z#m)|*{qo@m(vLa{f)mPjb31~F$R=t@9tVz+$(fT6sfch${$oikBz0e@Ib}|V{f_)f zadJG1r$PmFzK@jn41h8jRBbIgoxK=#4kk?mm*@8=|M07w+_r+Xo3=T%m8 z(c|=)8$tOqt?J?QQ zK^kwzfli7~0=gz<5-p6u-bb}neQ8YWdEUrSenA#0ulsNSQuzCuJmP2v$*uzx6TRqM z1RGq=VX53+)!(nK@ufQH>hT1%3J3hA7ch)2d{1>^(dQ{K(uK*_VR)9vnP{)Fn=i9~ z!PumEwW%Y1sS{qzpqd8*B$S%2l~?kE*`YNX6@mH<#Kf*3VOg3w7DB_RCX^1=K~S*- zZ3_;kmnkhKj6<; z5^{*&vIINnW=_iaQZF`ExIL0)NF+;^U50TdZBq^{EpdLxkx2Y>5*5~H6xSKi2}qL@ z!Cq{AkMNxqm`hMz<;PQI6R}I9K8+559U~gl3zW?rLgsH$_%=;RgW)-i6SF)c;TwaA z;!t~_H8&?60e8*smfL2r^8^dL;)}i~UHuSF%RGL$ zJK!xpEyjaQ^msYp>iPhjF)PyK7yfUBPrpxmA=!n~Wxdk_>{w7tcAW_A>CdK}qmD2= z=%CCx_8)39gGvfhP&DAN-{nI>;r7+h~GDjQk6HjX}oOvC=IrMBJ5WIMs?wnSeV`nE zb6qGC&c=#QM-3{QtY4}mI=o}dHdjM-FHmTdB!IxfSh^5Jdj*TPTwJqx$Lmr@*Rg_; zGQ3tF^g6uxO&WV%W`uui)yd^KH2c_}?rHI>&~Z_tVfn=rVUb|)H{Of2kPQ!krAbLN zRA!)~D|eLC?v)Wg3{ z$h=4Q*C}GrN5ff3ake(OtV*KH-UP zjxQ4cj$%ogyo(!XHQ9WBICI>xHsTYETT{x2ABI$NyIj`;2#<1t<8$rTVQuYFinGb4 z3k;AyDD-Mxyn(!K-5|?6Y);E*z-s1tZf_&Aw z_e`~z=UurrfQ=^Elp5i`000DDL7!4Z6)*qhUfNFP@-Y!d9@y;V>zRY<=KJxIfhQ0` zC5=DGs*g<9JU21Z2^HA;0vx1YydG8IKNV&+(N6_m(?k(C@!Nh;6a9IWlog5Fd-lR~ zJSUN%9ag43_Rfd3vh54Y#vbiwwS0L6uQyLqZ*p9rDA<65wXBxWGZohB0s=ptiyH*^ zsB4owH>XTkz4^AkmmzA7$UZAWA3ZANX#N(!<&XyKHLj^ej~0diu|Mb9Ap7*-g{32N zBn=Iylf@T1y``)qxFsj=%OpzEX%(h~1Lm579WuPKg-`A0*fLAAH1ucuv)Ni(5Jqm6 z=jB%Up!LT z8Z+jHUOv0?iO2>c>pYf-wU=b1*R^1lX*qM~aJ-_RNLjMw zox*>g`vGqq_Ddv!K30c#lF z2^#CI&kD%o?VNF1Z6aP3Cj(zGlcsITe7(_;+kh@M=#1oi$0naQ?dj6f_l&^Isjs2( z{C938qldm0{5WXYKltIa$o-QD+=T)0-fdIBl^E-Z1~L~N2PN(u82HAh#(xH(%{L&> zQdf=N&*q!RKjb|23x?=`B_aHD(T^7Z?PNDf?RGYo)B`EIAuxke%i6BAtLnr=(x{eD zlu`+|UIyV^C>u{W5EuCwXGujPPT4rSFJNGz6-(Zi#qy6&bVeKhr?__o5Rk8lPnDvd zybRqsYJ$b?$ayDG5_tY{x-tz%XU9SmU`%_qw+Ky%xGLE1v z?m@A4BIU3UhqoB1f2d|Pdo{BH9u9nNnj`V{MAiKh4mv1bw|YMeN)IPyneFy=&fsI@ zkbE|9b`XK%B@pVRY>crlauzpp!9!z#d>{eI_@i-FoR_#8icUXygB9H}T;n^6jwp#_9euNki~pB26kRt4aHsdmb{5B7%=wRDIt?NJOI*R&wgIWnyk zDBnRuE3O>?Qpn5~SO=B#xKWw8Sw_4$Xw`qFtM%aID47b`e+uf_T7LRG8oY0r3TaEZ z0Hx5&roXA&NMTW?47D}%@~ja($CZN`i{;_aJU(U_-K|NcO&&?0jcf~dx_qx+o<6sCfX8b#m~07w)@RP;ukv$gAwFb4W>-H~vZ&$5?}+8E00 zLKRiOANaV@4~co5gjl4l2^i7}fS14iV_SI$Vc-B9}9DN+?F zI>G-sczzAbv|vYLmt*Ch<>`e8@`)S&5~s7%YswVd(`~;9VeS6MuweOcDsiYT8$YuV z5t~QZC*^{&DX_^2sdAm%HoSbe<=7CSs1a0BpCU4d$ngFjs#K^eWoo5Ro#e@-y9C}o zfC(^CO^PnpYIgAU!>yLiIL0t_7O*`NIUmg!^VgeUjX;hRs`B$NLHhSg(Mn}3@EN;GM2ylyFmYkme6fJJKe$H#WIY3jE&G%O3G16VE z$Zn42`dmP@Y46|N73l*UAb?I3Og@h^7~GcAXJXTe&GJ=z={tw#eN+~E$JK4d~qTEGAR0ssM@b81chDV1;K$#CFdh^nBX495)ne1F&LuVqQ> z0FDs)?(C+L6gq{H9}H@T_!)Lz9Jczn0sC<+j$3PXrC&MAO60zpIm^h9)GL+#*G5GJ z%~S3C;qF$*vPMbBOT~{)(gen_b0&q?wIA)!j4D2c#x^~{gKD5772v-(KqWnw!|L+p zFoO!Yd-v+i?0|T)I%AQDQ4pmk4WL{un`;VGYUT11(!!pNNO@m)X9oL&-3ej9?1%^ohg$?EC2hgp{E*qf?cX3G{(u6 zPX4P_*&3@1nm%sJZS0+(u&5~Hf`iD{H~Qu7=ES!IEG0;X6}lI6awqc>DvGR5Nko3cuA;1Y?(|5pL>w1`{AM5w;=RzixTJ6v3HNS=L2@qc$#h}{Nwh?r}5gz<-OEaOgTZWLdq&3`r`kuXK97UYo_<$k)y-EoV}N+{w4}x zzm2LQKDdO)pIbU%SrhW0G>?W<-rZMx&0PrU<7dtrEAO!n++ve24FxJA<`Alr6ZZFN zS>z}grVs>D5PlycEK?a8dvf@`JCOl!{Xdw}_X?Dd%L-qNJ9LD^=KfeuJwWZwXshZW zayH^Ahw0DSVjbV?4_%OtaxupG&80-P3KYkXeb9ACBRtKOGehm9%CHvwrR-w=NzdC; zeoO?CTQsOUgzMr$BK;^Ok?zbM6lc$w{~dB|;@7;XzTJs#jFW`5I~z*Q@{_SYv-+;M zfPOnPqL|VIrT-a#9_u;^s+Ds(Hw6qeA$@*&uldh})hnrDgsQlRDMqEoV?*-zamHvN z6d>c_gWg&OiqLTyF`d`b=z!4F;(lP#skrBAZsZTGxUKH0^I>8J4U4AX#T zAF@ey98rVn^knKjNHgicv);siXv?9g>=9d%w{@5-H{g@#8T!yiXo`{b;KVh2J zB3_mq3VPhROFXzF#pNNeD4JeHoTomZD%)1K_YOW47?w!K1EujYrb6?wtE~pA-7>cI zM+t2@10|)3P&UoZKD%{3Oeqse%a~EajX)1B_S74vSx-TLnXmN+Y6^pX2*Ge+TRJb{|M^$^Nhx^@h zL~FEN;R@6IQ+!3+WJ9NdVgu@OCFPai`3?h)+eLk7pTEqbfkdN3HJoJOi3+*je zkAea|@OO1!XlC9H(_Yy@E$LVEh@`)^jC};otf4rA_Fq!lm8v;_96Z47N6e4gZl4YB zBTs|sOW4!Al=nlm{wfE^(XXtEDzD?>`t#oXbxpFw%tB~v+u_;R zK}yEvRZci8u1F>H@VRMrSnVWnVKpTcBF~lhs;sgVe`7{-1d_v`G>63}m(0|u?-zvC zshPYoKfa)bE`^a{-1ZffsaPdd@6v~tV`RFAwVRt8hi4aFMZP|&$a{A|NV}x%Tfdef$@SeQjF~U`bIkJdg#Fe(})u1S0`wRI4VG@V1gYsF6|{& z#`{rgwx=kyLsa{U%Ssm1vJN>0EbNF5>+19u%4EM!J-j1ZKqW1m%Hd7i5YWm(c{X~@ znjS5i{~o;$P1G?GGh?~nm#C^5;3Tz#yxQY8g7mFMqEO1@*yaaAJF^o+#`DmXZa*jf zb`Mf7ELQRtfw9z+4}5olp2VrADx-&6t|6e4QER_s{W(B?_cJiue^ieF$#tUW6Q_kDw8qXH#?AC>;#jO-Qyz=zTIV}&lm@wyZ z%+A@y3z`}+-~|Q{cSYhz%9gl}o?h%+R+=3@Xase{r^_zR$@2tbk| zy$*ImbTu8kMd0(nfcy=L=HyXjO6vq(zzWLlEqhUaZDR?es=PJZ;A#wVoE%WJ71u}RNKRbTOl<(0 z#A!;{8X9B-WR?H5wD2JMcqAU7`lUqO1t|mwjFabZ| zdUl`vAfb{{2d>Xe&}cX(((B4~A_#h0k-7wI*!(HLiJC!v!iZQ0z|gsqeGyXMu{Kh9 zb(vWSXa6V$dL!^}Q%UjOxIMFK_;CNgJHDO;g zBHk9U^Ha5Q_qte5j6Im#>$Iuov!3CMO|U52YB$KOByOw4 zt!LdUv#2akZ`hjhfgh6spx9pm4pGh4-X+zpJPX2YOMLM5uPX(wwhw5aU4RExOvsbN zb1a`x?T)vUkmKkKGWB*mHt8ly!QHIj-9yGzoGA6=qKbb!0}w0|xto~23=3`kQ+7#+ zb&&_>o+!ScIBlj?LRHO%!IU#1_D9gXn#405%(DC0!Iu&qFa*hG+lcn^fRG45Uue*$ zQNEZG8-EK_0GFzrnuhe1)BpcRvtafOgZ<7$&zCnP?8_Xt-4dBj?>lSQ3mL>mqk)zb`eK@`2RRp!xv>tT(pJDpm4Ikl&{{{|A6y@~UTEt=>vm zOB|J1OD1%pco1qb-k3ZF^57eD*!`6Ptoa}rGb|*V5$H69p2nfF*5@DnZdILaZlY?2 zz~ycpHW{xteTO~2-5a%(E9LZ-Fl)YR(M<C;-`C1f^V4jh(%lb6 zO*|fN9F$w?4K(9*lb(SIJu4t^dF~`H8#g>815DthwH`F)aRxcmReoCj>Vq;mNB$$* z%)*OOmso-x`7j*~{MsMsP#Y;|Ks^XU;>JnbvN4FdIuP=6kDkVx{>ya&fea?8N;e|U zm5n&kUI5lVbYMHSK1HEDS4S*c*LiklMiJr?KN8|@=Q~7w=PB76V!(eJYz5{WPV}CU zN=@M;vG`S=;N6Om{x_qBSdVY278g3+B0EtFp2b zReS|iGM-^D)u)Y3UT5WJ{IiJh6^#jz&kU z1;<8e72tWoYnuH`J_aNzHf{GX8NTo5x*g`YblKDfQ)107 zm2X0AzMk#Os(@(ZUi9GV`>M~EG+a#Y;8}-vJQ?{nu3kymsa9Ot%P-Uk&cd>_Ru<42 zN>mS8uC5g<4d*84=aHwUaD~Aahr9N}vyP=|txQYHv4Ft?XvmjZe>r^Q{1Mpuf^IVd zC%Qopw>SIH@q^H0m`RA^&T7C2msZ35F`jFj+iE_F1c|ZU|1n;TxwWlcqjh8kn{{+! zd+M=A2A*|t`CgZUZKUR6_V;!AQa6YO;$S|Y0k)rqjr8=Z*o?twgzjF1dn2sB#l71& zh-0T>?Xc&4xP&6s;}Q}|CZsVI8A&Q0FG;*Hj+>m?bq#*+7W`K>aQ$cpW!>Zhcb;GP zp7ZZMJhcfJeL;tA9YtzzSBW5zUx)S{WaNbZVNh^AwQ-=kn<@WF$dpMU*Xyo-2VpG= zj=D~-_^!rqhG_yVhIW-BgqlyOk~NsBCE2CB5#0xIrvlhrT3WPn_w1T*zCbGQVb4L; zhQ5sOYG!lCo#MOxElSm4f~d!*KdfKM|JGv%y8UG$%T=R2PCHM6zQZP(CT;F3q1o499C0M{RuV$NPGMsFCh5$`Oe zJtz@%r6ZQUsY(BRfjYDT3IaYQ(@+9)p~DztT)ABLj+J9dCo5$QawGWO_D@mp>)&pC z;S^T&#wwMV@;!@DK&(x)u0PRQS^0Y2_y`naRQETzBN$h905H&HSyoo5N+tKPv25VVz;?nhFal>nUf%wfF2U zZBk9<19U2d?YQiDfGiZJGE1RdRJQlm2+-3T{o!krn6NMM1weZNgv}UcBx?4nbHX(_ zArD#g30Yx%xKc`Lz|zHb2Y(A;cMhwDMo5b!F##VGybb_wNSkLls@4HnXG2O7J{9kyt4zW7I+#wn`r!k*pGY2v#L4D17duJrO4~^ zgDc6VJKVb9)?qp75%(Xoj|qdo~v_f#0PeywA)n^GD(h{>JY*+@9Sc`H!dPx zz~(L zBN0=gc?=d?ypZwLgVZBOuZgc}hYzEft9SXWlHWGyOju3ETF!N}#ypPbJUf(-Z2-?q z2G$)$+Wxph8G{ipe#c}hX1L}2ZG33wkM5fNby!r-{QZr3JAqH;3Q@Z#J$xSY~yw+zNvUXxbGgJ)#1 z+{M$=CF(|SL2k};c)x2dq>B+>mN+5fhfqpae4OHX&V|XCG~*mOLzghlS`r7>Uokgr z3Z3K&Chd`27!$sfLiMFWeaJt|&z#mI;pM3VIY&6h*lfKs;|N*rEvgXwO^a7uPD)zN zv5754`45zy6Mj>AI(5Ab{3g!)>5ATci4|o8I>5siHw=AWvwF&9xNUb^yZ;@bk?(mb zyChBp-lEVUOg+Yda@HRH@51*_h6-^Yv6}iXwkO;uULRg=Sd=eRq=H zXg1Q>pdZxulYKa^pW}GIZBxrpSw0jjfrpj*Fnz$o5gk1^!3Li}O^Pw=CTQEGsK+g1 z`@;EENM>P(zJ@+EpG+LIari|iIT-0(lXbVxHw>&`U#w=;0E?CL5?p>qCNm@j30}a* zdI;=-=#B73;%O*L7wJ@HGHEm(ne&$I2e;Zq59UH?Cr9Qm2P9M_S?mqvXSy!KyN&j)N}UphME^9yl};Tfkpk5DoNW15#dBOb z*d1=7`M2&}bke?eV~O`bHXHH_W--8Rsag=>Vd)+32g(~8(jo6 zs=>@s!m6P>X0LsRqWghzYW}eql4f$DBM$X+fs%m$Fy;YA^RxFL7Gabwfw~p!U8VPg zkNhUhp!To9lKwKduX7e!I9-%uGgQZ1o)*uGlTr;w$@^qrQ|}IgS<3mMwZ-V zj;>^wFmz#HfHcv3!f3&zFH*J1}ji#|U22Io0z$=+^kaeg%?=!&J?d4H33*?6~+eL?#k6%u57 z9el;2NL&uAd=z}a!{h}`MW)650Bb;$zx|1ihRo6%G#9*5Leh53 zr*hZT=YM*d>|dhKStLqCAR0-euYOBMR{{24Rh^+$ zfWX>>RX1D>bCZjJvEgvTQU2&e=u<|6N3$bkj=a7w5FIn@!LT7S0BmT}md)|8DQ7$< zWw_UXw{EWHk3;d!U5!_Go&6^Q4E`eH1A1p4S;ty0PbhQz0`iM|tAhhwLGp5OXxx0y zLW}#ik&M;kF`H&`VL-QD2JmJ53)c-f?~IX=RMbP;=)hS7-@iqb$2=-~9r2Op1Mk8e8ZICCe_8Td+5@L)yOZWddDm<2ZD* ziFxPB@j?LFai#qp@zfai+f2z@x0Buiv5=9zwQP)_m?ALRn3s0CfH=^{?tZ(lLAP;o z^*t)2W?i+f1=B#sH4*^cj;k3j2&|5^VZS>-O2yn%CJw7jY1zw!EL}Vk?yS636*uP} zEmdHTjyU7My49~^TY2QBo!x7?0W8J6vuLYeO5eB|)l(Y(kJ-9&4m@s}H;*+DG=a7e z?@Ne4H`GWmOqgZg0006O0iU;OP5&vFdRPE@2jM+XPkv~qE#EUA#1^ceS(7B%J9B3j z{-5Mn_Gz+<07@qxy??sY&oUJEwT{z9dK#T74X(x`(YOYKibMqBus^HnqoEQAT1CM; zY^*}mE~%>qYrf04+K9*{A#Zh#U-tZZK?%8Q6NDbTTy�ql!1l-f|omj0VXpzE-jw zZ1iQ3d2YA}o*6dSMWp38@kgpW@AB&t7iYy{n|BKdtJ7=csGL+#NH8K=g_&eYo?WMWnABHW<$b!`}cP5MB z`LfJ_pHv@qI7T~yaG-F7gP<#uBQxQF5Fbo{cEZCDY8Od`;RfZL(3|45XOW_!b+yH7 zv-{$%_XF|8_sBG|4+kE#AE1X^AMcILb)#+qIT-)|6b(U}yh*4*Y?(|5pNZ;ua&tGO zi-r|3C>$*3`}I|=2kzr9T1MW0uQo$cI*hE+6@S2<*%k1svCl84iw#T%0-gOLj}|Jz zI4~Uw6H?Q8?k6M$fn7RQwMaU#8BgK|(E(N!L{5+7&pR1B#uh<99D6aw??|{+k5UfX zEwZQFB~@F_^WmXD2)-o_lxB(%{j-S1t`v&4&a>i0$WdsO%*gWZN|k*-lh^#5tyt@; z(8PysD>|qV9h*xVKj|Mv*Hg_k%2o-nrf-}vzhTxo7}_iuJdoT+r-+8PslWWdZOD@;OcR<{4E*l(J}$Ty^@G_{1wF1(s57jK)ul0$nNNdR|EYF+K74eWMd9cI@pzYa1I3_=41i$pZ?Z_X*CH|X{UCs?+k-xIVPNe)ZiGeSB>x?5DLV@ zKM&zRn(OB^8(Uhx7@vV-P=F$MDgPn@*?GF1PtltkhU*XS>n=Y7JG<{u`T9b%&si|# zMHXr0*%Z^-9KqDog1>g2U_?swmbR0s&ic(Z)_x!kkLmBJL^v4$;@y*_8UWDj%5K^QM2L40s14X8BcU93%x zgp+y^Z1+cZqpZbP(&YsUAAfB#X+(Ls_(EJP^3*lSe6Z(2alQz zhn(}A5#=dg=NY6oeNzM9d5aHP$F^!Z10ffv=9rsifhLEkF4M<(p|v2xP? zJF@ZY>#oXvI$OhYCn)b|_x(JVk{UgAnBU2V_SKdt) zpEfR6o9tbP@z-f2SASmc!eZn;K_b9uf%fE*tO0X#Um<;Xygc@IeIBE$ zbj7H1mqf)?dzBjL$$VaJsprY5tnpl3;Y(M?kRuVJ*@#~H_;K5DEWO|dbY_dG$@#qk zScr_gN-vDRsLu)`O-B=-)ufeFNQwe?y3IHXsBcn{>=8x)i5e7wDi7FsAwP~S3Lb*9 zR5vDvK#F5E4&#$|Qa7Pr{GD5+BSBfi_cE}TYxc4V>WP+v=nWFAkci-^Bh51R1OI8~ z#=wNS)ANT-bUxusR`>Z2WffUacfxu%lhiB=YwST?11i(i=hgpQv5IH$1{8#}OLsO| z9Y5gLOlpxT{5y{dRd-Gb@G3*=RIp(mLczI#xNxX_Ba$40mAB)F_V7~~GMWjb66vm- z(p9o6-lH=ahG=o$jED)+YWOH1d)F+_rk!Me+O?1JQefyWUkkY3PFIVVC4*qh@&3fX z%^E#>@l&?{8`dj<7EP^h{V+toM5coBQm<2Vv;z+c4Q}A16m!O?ZDp1oG%67C-s9~9K4?n8phhK2JarwO6Fie z-$ZdQ#f^$HLFJ3dYWN-&A&&v=MVwq{%(f5=__bzd1cJJ~&l~z4mYGjV_}^g}&ngty zXR;!-%@LSilK`~ATxV80BKW>=5Lo(&YM6&r*)%`te1gXtI-Hz928M30;*|}jh*h0L z(I3EY9KRgtm4-cS@jR{emyNP1NxNKhbfiU^B|1RwMd}DJ)8Ajz)bKB+wjZ4B2*wC@ zTl%^JPoB}ex&Q2pebO-Jz@^G4j@XZM6Hj!f%_y5b?bo2ab>>Yc;UqJ^nSX5S#@H@V zWY#ySoaH$qyn{^}q+Q@_5rO=;17%d0d)0i|ujj|o?E2Uyld^x3>i|F8UO(OUpigqp zC??sGo$pu|zpG!$G7%Hdgof0Xq40VI<^^nk`nAb43MCEu?+vZBfmn&(Q2Z|JfdTsV zta=gxCLo2wV_>Y7@6t@I2Dy$4NtEilnsZRR$LrRW+oKSu^;hu;FVh!|`LL_o{MG9D z)uoadNLJ1FsxQkkNjdUUCwDw^b%sDbb|nE^sEq{>P+MQ)2qwoDf%EJghXgWv!dCtl z3p;|q-a2{3D7QLYj^<{n0&r3|E%}vMm-lffMOGQ%|KO1R;Nz|}Ya%wOA-VAnO*D}6 z%%`RH7X}Q!o~_MV+BhL!aTL@WV4*zn?Qdvl<72j8g*>OB!8ox4?1s2L0%Yd`D8O2? zt>y(06$gPBGckt@?4yKKW?1^OX1duOgCDm)SqT6`FjWp!Bkl%y0hT2OXSK?7nXfj$ zrBCsJ8VFrA!s93DeEAK{S27&(65$?XpAmqo>Vs$z^v-W?=fx`_27XPUF?75lag_W- zZHAz=gvC|(H6K*t4uyPv^@vi|Ebd}Z0hbVl{Lyi%ib-4wh6g%o1 zoMLAElUh%IK+tyeyACtCVvGb35ebmvE!(MF($YziM*4Ef<-XvftEXa;35l;zZx8w^ z%l(s^_nk~zVEF>?{q_8gpqrH^qTU;~3`oDyDD?rkLPUjZx+313yXTNl|uy~61C{-6!m=X>KQaK5Ipkv^)ahW?C<5ttytdxSu9USwq zqTEgD*$^|pHCR{Wb}ApH>;sFayjGa~Y7t$az)OXyFEibEuHn1yo+XtH>(XRNKZ>Nf ztPxx8`2Zux>-_Hm;;Ofy?W^@K!lZ+VWIXk~Cn0W`zAhWca+T_2rSAH$91^HAw33_? zD3oj8$4e8oX|M2C;<`mVVD)G);)Cw{GMD?KgoK;s|2A=~6?zlS-YDd@4S&AM+R6<9 zVp{OoS)OxxPsVLI90_`9)Q_$~%bM4M63ThYDL2~EH3+)~Thrt;IQ~!08Ixt<*&3fS z@?4bEPcI)EX|#&Hv?ZoJynp6fwpw4(Vfq=811QE4aCYwrBEV*K43G@lQ8!F5v z8IOC+b~fAZo9Se_)cExWsx!WHFt1`(DISZ zsGttAb7HcZMld2%NhM$KZqBN7d0HGFXMkCT9MlUr(4H6_VaXw@@p)=y4NHkF;wI{O zuH-xzo1l483|>U2@8D}$8?wQ{gbzpH>+|hq>BX(#oDZ2h^GThI4oIbJvqn^^$?;}x zi-7L1HO~=kWDQbA+c?iU2^Cf@#KWS`oz#UvdeNg>96%y&YCqyN|G_rN5}&w1c;(^| z<$h8c=2N08m5qsDx&W(aR@>#htF}}hoAOT~@^CaWI2}I#OloWV7*9|vj19kDhT7>R zi==jOA6P<2X+)>-;MJoWVMA472zP0lU=3R6xe0L?fi6={9Z?-w+I?R;V1p(hYjL^$O8~%;RGV>*~42>ZZ|^qZ?HAARV7H)NrN69==CBw2ocon2QW({%#C#OS^e~eopba3-6|s$e|GJv)GM(DZRDg=MkEeu5tG0OBv+DI zs;8Dwg+1NwfAUb@N%0G2+ozzti51=*VvfXN6^mw$u&$6@ zTz}sRhJXUA%Tf||L);Q{x>w;nx$$R`=btQ_(Wc{Fy|GEp5TZl(c+)5ZQ0Fv*8wnlqb88=0 z6?ir0OKMMfAsJ6#cw<9m8tvR+3FFj-i{0Kb5H!Wrfc;rD{S1v>-RK^HR8;GM7{b_} z=1>~rGzG!^+q><*a#v=F49~Ql8O(!>_pUA=53n%@vuf2@HT(cOv7}MQCzwBTmK&Cz zPQi}MPJgY#mMQ@Ol0HP~u`wopXxkJSWds?2`)W`DNNuk^91xA#B?{iMr55v^%Nx}W@l zdNL>Wt0v54de}r!3e$?%`hJq!YX)bHh2LPICTJU#hpsD!PRP~KE876U&H((hmMoRHY73AyAQ!^XxFpxmMl zomk8x9?Eg$nN^H29ux=D@J9vs#iyoMz>nTrDJ>EO@}TF7mC3c)Ec?F6lRuUiO*mJ(-Y z3PExc2&k6V^*H;&o7^y6cY4yS1!GU$&IW(IJqk7lIy9+|Qo>rU;s^iGA#{n&De@(g zAYDXbhzRrt>{Ms6`60BZ<2Kj#AsNx-x(1pfQB27Q1xmtT3#Er-Z(ZM)&7sxfh2Dnd zAG?MRB&;DMpHu+%Rm#?l1;?B{YTy5oMAIG2Q?&H&M#y|Fy-;ai8UUimLZMbb&WMYP!O^bUB?a2+hv?dV`B~OAKE&#)BW!Z+;Z<)ve0|(0qhmv}sk2~C#^7-)l zU_O#-8}(7gpIPOV4vCZxpozc$00i4XpV~zgFaJyJ=2=2ZtHCJ3bnQcE{PZ&s{Q|mY zZe`iud@!NaKY!#+nzhcZWMhE7umiRwND64dlTfL!s>^MlP+OXs?E zOn$em70O=+ZCA=eP8TN)0Sg&2lr0Q&a;_rxWuk0iBe)UlHWAf2Y}Gd4;9iarCz&&I zTic;G@aQJRWWyf%j4erLg0AB{#S>g0|l0`tD(*-Pw0IrnMkIq7nJji;(ZDitB~G2l!^-4R4so2AZz%`YWQiDjconCXr$lEQVT6YkNq4c})S0 znV@ACQgl@nHmoSyI6VPB@q<&ZX#Jo`>`~RbmyHQ+u@el3-7;Ty9UBUzX<(7#M9VSw zn@*QrPe62);5I)=uhB6D+%Hh?vMiGq&Ep-00il|f$*!8qee!ym)H1VJWcB|h?9>#D zwomo)r;bYOXO%65sGfN|tptF2SF5+OYAr;`!C7s3Q509~zcb?35ti_7CjCrBN>m5f zg`wyMBOv!OD!YgT=9$VI`p5AkcCVueHlpo?Q{*>mH<#zwaEOtnX&U%7TNmyf*u&T zefm0A5R|O_;exeQAz?|O_LC69lpL5TULv@E1j&BxGy<;*%G0m98Td?MmbWy&1%2<# ze)*&-EX`#?pIa2qRsy}5=9K2T0hnyo!u5;i!!e?q)Mw6Vcfkoa`(@r)t zD{loow0)sP(DG}zROZi{G8f6uG$vyb2My4(VF)i-3`m6bKW$jYq;R}Z#f)`Pv`8y( zr@d__>i!@;(fRC<{qdm@oIx|dz|(6JDfI{7fv~#gd!~Ew9WUPy#E=PWeE4~heJ`|CC zdJNa$e%pjd`-G!DKOTW*P{Dd%QsKZMs%^3Ye0jJ@GNzl6pB599Ye)tNLqm5@l5F{? z3ZzV5yvae>x#Q|#4kq#Ds68#lAh{nKoOV5*Xz~zvQls7bXQ;`fK$KeKlJUM*UN2PV zyq>ig{IHf@cD@^M-OtgjMm2EeR_s<<)UZ%CflEF!6`45mnjroRIB502Yu+*&n}Q8Q zKOyfPqpW{p#Pt>DG2@Qv3q#2oZmENcyIDS;z=6&DhU(001{YE{nK1g*<8r>6^H49G zi+>vy5e~~GsCD!WkX^>|@@pT9U1kmHHx}u>l{qXtb}*2f1!OMQ%)TPI;h~r7Uh_^; z>z4OK4YpZvCSUF0n*pm1EfFA-oAdJiqI+jkN{5WfBpw5d_|=!xC~)ss761tOQqM*W zoJ?~ogM3Ys@P(b;-nNdCrV^<$%#UdWG0H;@em`u4P3Le+mR-%K+b@CFa8M$^hx}0# zLyKi;S6@Q!X+$CXfH&~Vtf>G110w;S`E*VHPUo@xZ|(1T$)j5d42}j8&#oCKh;>7K znB)-C;4HR5j3nrE-8u&U+?6$rFVh{$(v`jP+O1ihOxxJtTTgmW65bw*H(T?J>&o7Kti%=s z&0oSj$GXT+=Yk})v6U2kIs(6R9A!!Rcr1uO=uyY{=9==M?5~-<1K3>#^8EkrX7LKB zG_0@Oo|q(Pid8@&hZ0+Tn@(QFyA z)vnX$r*I-}r?Dr^ zkvE411}8!~)>b3fx45|)h4dArOMTt+_g!7w4j|e_I6b=d}HYQ?&Eqg>O+~es2*!mh3&F%o*@69doMJ1%QE_W zOA;O}$E*<AElX=rc4FJklqBAWLQ#!c*oBr3Q*zfg?X`dzO1vD4S5-TGu zz(|kM;v6tFO6Skj7=%;63r4`yE&YIDI@1vTP(Kx}fDv`l4&+HE?n8^|3z<$45!wk^ z9~mTR&liRy>gHG{|_ym&f&@OSu2xJK0)5sox*V*JCt&-@KR`?h(Y#nE&w+Grq-t6i8on2u36%xA#? zMUe;qu3K7;6^7E;v)`kQGU+m+w0K@-oA+O$)(x$_yhR#~+WYQ){eqm;Uh2>h)P*nx z@;~MbpT+P&y%qMdE2y5Su`b z0a^5U1Rr&RloRR2-Yr7yx}stOe(1255VYqk$nA6^kxXLk56`dj&if z%d)sx@)3`)9%g>WSVQ(rzX>+xN%{B;jcP*2r9Uzz_M~Eq>%aM^+pxcpoCGc)fXVal z4sb=RKdkiwFoydR1Z4*y6kO21!$T_vql*&FBVdD@EVJAn2Oc|f(%M}v;E4$W@f-7gRhy)_|y)Em6|9KdDN zceAD&&sZjQZIo5}Xvz#yjBL57jwZCP>=akQrJGTgpMcp!U|IePQ{kKT9s6u9lic!{ zyF&6**@w*FK{SVA=VZi4P~wvf*+;OMD+l;tUr z+^US*{0xP1J;B*&zPR}7Y|6#T7@eTWMJ&CYmHoz~EQUH3hS50_BN+HLE>gt0W6{!7 zQb_}Yx39&%lEau6+}S9uR=~ohoL3#`-t=4O2PwEyLp37v8*u#N@Vq7VM;lyMtA@m< z!ic_Bb%bF6029nXo8U>PL2Q{!2%k`gtszQS=uieGgm1r4Ja3C*H{Uk}k52Cal;hbk*n8TN#+NE@KgfQ^d%7$El=qcHi%(@vV8Lt ze=hcT@Q}XPy(D_SnKEz`7bn7&MfOSd6YO!QUrYgB;8C5d;l|54xv)^2NDHPA>N!Me5)b#Gs2x)VS5Zf>CLub+5wogRD zrdBU&l4MR zT=6XlL7mn|LCy#@EC%}y{166`K=+qA(`l54b_SkNqnxB$eLH%=6QqnF2Ium#$;Llg zfjDVlhdN)TX%YDryl?oD@Oa(%20IKORw@n6!FR~p_{%-LVvR9Yx!_N=WwCA*rPsQi z02|Dr*)~!hz>$xtHQ@Ny;qA9p`q&kMEM}2P(;z0;kE*G3)7PqaQ1ZS?acAYO8JvgB z5_71g5oqmmc9UdDsQ(P%|9-zS9kk=_uR%q1jx#Q9^1uC-Oc&N$-U8j=NI9ypNFAr8 zgIHh$j`@(VF_kBOLFd~zejav%XMjYpoA-VqhwSQH zDU@5GuD*`9Z51Q*H${>e3FSIKa8V^91FUL`c0Dl(6%*qOc4a zVU~WfL~P-bl_vu4z}en*Y9*r*P}y|lt{>sQ z%S*}lcu{>#xnLlp=c+@S8yvxuMQ+4!eYTE~bT4Ep%ismCG%vw#;j4K+ky!KcH&wyR zUO*a19y}^1OD&WyYvq+|25xG&Eo=uNd8L2)K<<_~@AX2R8$Y;#>K?XIYSXpivLR$T zXwFzQ>yezATZiXg2L3FMD#YZQc)q>VQJH;3a;8lKcPECVQ&IWfMu3(igj3Pm_(toN zpkXiV7y0e<)V7?*RMp=v^ds=*z&lUrp$eBM5?}~QAbT>4?=gMgg968d5mHJJk#EA+ zQ#6a_yptpfARQ~Y<;K*#-jc?2kR{rIBS5&&xH+9fks)Jfo+_4xkM+w&@|;0`t#vKp z#Mf==T(BSe4YM+sp&k|aDNHlG2o}8(>~LR~QsO|X7`hgN9%U6-nkC!{tOHKc9wyB> z6yX|=FCg>^_i4u+bjyXP8s2VJ_0r8U0CA6AP|65Oac6)mEoT({M+!p~)pS81_$mdI zJM{`DFBkiOVU6&CUHm-(*Dj%&g_N8&7p~RgyDRX}n{%=)cmqP2L*gW9s*-{4;(*)l z>|O((1YS`tAm8StqolDQq1)?#F)|FLvOMRT7`3-Jr6GDxyCVh13(KoYT#SB}9eA0> zDd>SMg@J<4$zWDBB>DMcy@AcRc6&?pBPajZE=Kx1!B zr&aY_IKFmQ-ZN*ZhhUf9v-=NpP%&xbv638R+1WD(3F4$)|3*2)arVKfeLXT6_iXKC zXYm03hv<16PcSA_1cwd{QSvle+%-N7lN#h2__?#NN5F6r5V-M2Rz!^#%}XC}`n0lx z%-=qradt!ry55}UdzFuNS-3k$ox7EbW4ccs?cI9{*C$lVhvYNm)Wdcr%z;rQj~Ydx zB^lXq2h>U?pvL>8BsP-};*u$YqLHL^L(1y7(<@jaDzh&N33V1K4+DXOpm!aum=VA0 z+)c->J4E{ucDX%+lh0WZG+5@>CdlT)!TArPB-BsDnj6yf} zz`~qmvm=7QLJ15WTgKY*XGORg)=U|o6Ilc+wp#UM}m1O~A)Ux`|%pf0Gdx3`p08n3wq<33%) zbr!EGKxAXY{c*tl)IA6x;HF+iVq@)w!1KVtujuL^b6JCLak;y3R`)Z-z{$(C`pU|p zLeS+Q=e(Wx^Hk8tF+Phwh>`S-&8}5=;^vxK;!Qww{p#w%!)0l@DuYT{kYGp}DC7QD zUDm8YSFI1`7l3{=xq2=wNfRifj^hpMf%fb-)Tww?QtSy7M81oT?Eab^ki*Tr?DN@f zdn`u9rd3tl~27REV3jYh{Q6tJ~#Wa~O&`i%#FzhVuy2spzQewx0I z(LA%}Md=YMfg7h=)I#2Ae-Jf~^KX(;WIVxLxA=w?gUBv3-NG7X&Bs@)HXe@|HD&)~ zqJ9~6m_cjqwaSx+VJVzW=_4PKMI~mdj)15xUy1(dCTjnb|IytzrFq$ZEPM$uv5mDM z%m=v~PiBr8!i1aIA6ssg`3j{@f@U0Bfh{(j+u*e2LNV2kCz zjS_Ul5y(xr219|&znAbEgwRGcn|C4&&dCZ@l}0=gOmJk2YYV$M{^Xjen$7LD($-$N zX~9uPV=yAem$mjZKS^5`%63w%;1x|g?VAAbvn9+3iuh`!jQ!sGuz||WM*`ggjc^l7 zr@*7#`VpTH5Tu(StDb10Yx6V|xRvWs3NOoo^?|eSWL}Aa1!_9SwIx&-^3I=6sH;EZ zsUMV|oZZQ(#_7!%bJ)9q@Q>3zGu=(9J==bzSC3P;RyTNFOhQg-G~xyo;pp;35_A{+ zKK(S#sOE2+)#k!m8Zh;PGzoy=88+gd0oUFj1NCX8L}_*n=Wue$?fR%Zw_AV$E4<#C zM5@&ZS*^Um-AuAj=-*c5X>KQ)o)kOogFBlWGgu-dCw*v2fRF-#(dW*Zm1-NS%e&C# zM-C89(kcvX`FPw|v&jP35?KK_`pv*!%mK5ozy0(b!2nFq>|^KU9e7^-evBc8@#+%j zU})e8ajU)nItGkl^!~3qNf>lKL-_p|c)`|D_}JFfV#8|6n3=nPHtS4y6GzW=Kt?^{ z%xPr5{<@~o+r!sACWF&vF?hZ}%VI0ErOr`Uq^@@`_r3RAQ@?}aDd`F||6@nK8-Y9EaL;zrSZ|yB|lrGiWJ8bdPEMxMeNS<<5rdzfb;3 zcNiPfBR4kbZ#h!8*35nYlM7m5zN0=*+cnJvmB0_ya*R^iedq_fx|WWSIu=mRDiUeZ z!mTu4FC9$iLeDKzt=jT%G1q;s^Wz}Vlw~e-aSSzi23{j;fjx$X^5$sdB}PJ)@a3+P*{Dn*16ecFZ2+&@IbO= zv(fvugGX~1)#X7WZaG}pr|oKPWep@1zz6-wx%B}Qq?i@iQ4(2?>d5U@UBg1}!s722 zSUO{L+720(p%%J}Axi-Uqi_+zBFQiir1XLit$QsLNJD=e?MDB3-TXH!jaeX3Ep^)c zf2EZ(vXJ=^51y^7{7#FR>p!BE*xaC-8vKo$7p2Frc0IDEG1so70=_AB7{(GKa`HjJ zm31m~h`+K1pl|g79$1ypcvp}?4n{^j`3O<|N#j{TCWn~@>Nw$_23+k@UqFqS`7#ov ziSG*HQz^6NqcjE6BkB=LhazlRxS3K~OoxwMsnkjQXI)k+QJ2>&8LtuJtXplpg~+NH zgF2C2io8P1Pe=c3r&oYnoU<@pg*(&((JrjOXhlVJXXehp6=9E zQa7UxOsJ@C2`QtTewNXkGY|b7q>hbyZkd=|;@NC)8N`XKY8FKEgN?R6rMEOaB;vGerDxWN06TS=}W^bKP zteCc@sWC*+T#tSfUb&){9R~*UF^W~=W%Q1KcyvQzFR(xS(u^J|v>YfKK2woDuseYy zf-(ay>XA?gLJ^+8+A_+;Zu`2qQe!z;qn_0lw{7W@uXr`?{zeZ_gbu71?z7<-&az}? zlM9m(_5H+Kl;VTU-`wc5Bvm)xYWY7|bpZ7CnlsU097$_qP?#IPahfx4u+T3C z_fj#3&_7kEzXy69kths6sH~s;K1R{Ag*g_~dgQ3~u!fNkkl+*fluWqoYijKZI;FDHwRrZ_;D#9uCP7uTdiywEtFSiR&;*>dcmRu zGE@h=;n+;ti2ZJAiYL-f9(899vifm4yha=5xU}X#T}nDtsaI5DUUi}ZRp_Am?|kf8 zf1;ay3-1Y_n#I}XotEu>TYzA^a+pQ1piPSldKSq>v~0S=h{b6bDg9Rnt{kdL<3m&zmJMKvi|^4`~|XQ$`|}chvxOs zatF~gY_Cd2yYFW=rj}M#SfH`SKjM2z%k6H^7h*=;MvY_ElJCaI_QISFEN+<@8h0s} z^{(^K97^8V!fMbH9Q~AZnA954BvJ=HT~$CpPA+ObO$RQofa}nBxzKF!V2)#WM}r>- zp_7)7Hen=WZJx7I0 zA={#m9LOVdlUXAMy#R^@5`Yvk6!2Y?58q(T)W6en6zVb31!A2rIC<6w2T!uWb0%T9 z>Y3u4uV&s9_uy{32y388i57ek{RnBy!TNN5Z3g?j?h;b~00d4!o*qRNEq~^9G}Ubr z$*fLJMM-m)S;;ibX%wC%s5X0si|~X{HU}@GzB8>KtbKdpfxmwV4aygXK1_;R<~JY{ z0#S&QvSInu+h)GqPH>p=TN|K-?BpPlHZG46ODK#Oa@&lNm5^U5O5NoI-e zs2!3P91o|xe*%LD1~@>iV~b2A|4!ElVIbCgYtbBo1E|noWeIgX$IJpSngt7M9||I) z`RW@fI5sP9x(+G)@5Y;;MQnLGuE25xp8XrofG*GU@2bVhyA;gA%R*x zG`%RO1+(a~Yr+BVd+-yp{KMhhFR$pdwvZ_MjG{!f7-#nXrD~l!OE<;fJC(tfdhxq0 z#AY`GDqCj>LEU%1*P<7jY|ZC@n%gA9uv-prBNKuUC7t5f5YDudo9=n72G6cbZZ);o zsTXDnBID&`poUSx)*D?$pp!boe$@!V9m?$0DJvg$x1;N~1ld}Xv#R}L z@$f@h?qUPN*dw|ybN}6X4w&NOCEm(K+a_suYA+>99dg30xd4TZ3Q*(7r#IjxZCioZ zJk?MOW_S}ojqF%kMa2v)_Ge-hW5cfD?af8*H)`UE1Rb8L*}Vzx9LHTAy8(|7YRRY7 zUzUkqJW-HXy!1S}5&*c^pp+Fdtfi96tdSfwtx_Cng3fwM%jFQ9l{3E7uI6e-6!5t3LaIt$} z2=)Byxe64@p2&3Pw0RxZhZ>3NN!eLc%G*-hPc*^q4pz@~UR>-N78qH~8Z1gJe(VvOgc~7q*rk7*$&1~Q zjE_=d)@@dqQ<>dTWEmA+cR0zx{BvxmknmvHt+FC-=kHiQ{q}&@DI6U0oq$S?pK@c> zpEObIu>8Wdr!7P-hi*dO;0gR5+lMNR5-|kAZcj+2De+dipqWY@@8!gY(+d-0w_47m zNYMnwBsZm)xy*Upsoa%Hw%b4=n76~%N#l#2s*7DW1>T3+McFHnd{=5R6XHFQ^Q1c?LKlg>P zH$vW3X@;(`owcaIQGO4VfsGTobERqmvLwLw?D{82mBMKWNGby#S-KQ;T3&QzD_4RC zaAL}b8eNOI)RWmO%nMi6BKFKz@eXR`n)A!U2<%P~xYU1nwp1Y`DRHDQ-0PB_EhD6c zW~nop_p_8NmyF9g>g-Dm$P*T7pF(&l+a*flMJs|{c_GY$Q1S3f&&t#cD)TqPG37DW zdeSR1pKsHv(_CVhI`kY=@eHv(#y2)FQkfX>E<$&_@;`PWoc__7G$WqYH$Lu{%H^>@ zOR0Od`^d&oXg{z4KkV&7;pLE!!=wygBLBdRNMk3j_SP@}00J@robz>4cBy#b059sTlkDS!j7pB!?T+&BC=@)jHcrHa8sxlzdO&6V1}k2o^kok9YT zGq9;Z8M-ou1q3!%obzHiGA-_mVheZ^DW*pKS+Or^0{(0nF5$$}I}WFUykSreB^dM3 z8Hiw%kgVA0kIWBdV%Q$54TN2(hC16BE%9YIXCT=i112@<-(F8F&BD)!pl^J~=YYRF zg=e7kd(yaM+Q6uBS>24uD`Bx!7b_K5qb*egdmeslp`_ zL@Uf48!6qCXS0d!&Z>^*!8j0Op8mfyw%`Z~NHec{UA8SwJacsgJ!N|5!ti+{GH^^R zq-A0+ITuSbPG>^)DU>IBiewp8@sNLJ6KGXjKBu~+#~NP2eN{3@pXryYjdNjc^}?rvAG6Tevm=-c%#`FM;y5p@qi% zvm1{??X-Z6)n-~xLPro4BG1_ie9k$oYDB_b5x;u@yG{}3JE(b{&PY>qZu!n_M);CnN-!PMEC za}N5!KGp>vZ*>gWa2Ey`EdT%$G(nmqNvJ_=nM?`)?(aSv>k=@; zm#P8xV4Q*ecoSHQ#Y0T8Fd@b6AL1e{(pJtZh0K6E!7M?xOpAnesTf{#=YsbYFSKR zH*-WbwpQb+;IxB6J|Jh7y#!>e{UnFV3Kd$Y3_I)a2!S3p#dc>eL#TSa)N&S&kw5&U z(Vo#b^R6$@X-KQx>PF$Ym@@+=ZqoMz^HX@lyJ)h8f(Pagy%$U=<35JY09rt$zh+o* z_Db*(N(HDl;*?sY_nFO9(5!mPaL+4m)K<$lPK#g@k4I*z#T{5Oa2N5;zz!q>kUg`i*uN9wMsul?Br{yv_lfsRi8v|jjIajc~3Lw z1YzRz3sExi48DKz~1)z5Bgk1j4*%(wR%-Cc4*o~*@(2UUdSTT2>#=2IXIxTvG zuFo?aa_b&!+=~8v9Lp=1R~!y$qb#i%{?W!20~dj;J-4Xir<9ZD#^WyodqbdtvM6vb zL1ixyCYVuX-;;@T8x`w!pYXEXHau&GpXf+Mj^`k$JtK)|m0(b2%S%nTYz!sH0wAXk zx>!n#{$mwaZxa@WXD9eWd!wNDAyl%WnGm)Fay*6f9b&sE5y^ZUFj$39%35uo>`X-U zca6-nYyTf~3BQT$%KC|VG@hGHD7W2) zlMQ4ZtwmrWKYbK4vOLrrt@rns$nD8Ku-pOK5s9_znYUXzT>pMRxx^yWZ~GZSxfPB6 zonrHeq8|^ONb&F)?9VtV03p3Dd=b@B2MV_G_W*0_J|slZ?zKU@Nqwpw>KR5 zL`^MdgvrhMdqyXYP3=#kk=x_w3yzR{*4HdL$l{=Fi-wkWph3EiP1NqkBsrTUEt?X4 zu_J0&#h-ZAfDu~`$LDVgm0v&tF>+HVd7FJ#602!L4sQ^%zHV1)e1+lCE+8&OMAR5M zYA|2vbCIV{HSni59U<%@(HD%Wuq87^&x3CE*szMo!JYi+) zYGeRis&XpjT6il(T*^0pS~kCiwUslA^tD6g2R)mBGjo_{&L-*@RxSFf7OtTGVE@A+ zPom1}E2^=bDODyt9Ppy8@4a+OZg;X(Fr!4BdBD_SsRo;Y!$SR{D_WCD<|07MKBGTE|7fpN+^DV{v>$~jg?>6IPTRqfx2a=Xtlkvm2yL0z9Wyk2e$1q?U z>=`k9d>6|=6mv!}+O$1!K~t}WOlM(iCu~U!ac`VPrdRwx2`su!tClN!a{Uj!>pJn% z-AdSD_9ER=jPsuD)LBZkHOt!y(Wm=uXmGc|XT6l!Vd7#LAt16}KqCU<^#Qfgr|W&R z*wvK&UFx%rcs!q+a(MFN$%m-U&`I~J(6Ge9cl4HFi%MAVOEW`dNktetL=}yU9K#j# zfmUEk^NLMzDI8(GEl_;MlEin9_CZ0Fwg4f1)8u-iSwuWs;T7mMm%wP+sokH;R7G?d zg9Wu)brc8Y%3+#rz?Q5_DoOfZzRp`B*W5Z(?1ctHp0e^LCUblc?~ba}mmQ0#5Rhzv zv^U)6hGzntx2%>%^&RVAEnwz8!)XKY8N0xk+V{dw&9qe8vA)|DUz=l$sw(Wgo_P?OUD}wtS*Txx%XZhzb^}A898reDD6dx!sM+8C z5<#R(6RAs#FlVQXv@1^^akBA8n*2;$M9}-9WokJr2FxO{&%{9M@gPt$6}{U)WiF%$ z#Jb)SvbiijHe+*>5-Lo9$Ir}FVw{hv)tzYB(B`=@YG6>~c9W7bV6?9oaachI_pem~9LTeTlu(&&?uI}m zl4od~*(5%dZUvB~u~9XXDsf||Q+toga(f+6D}bNj^;H;J9V!&_Y<5W$_T9kjd1wKU z8U#Cc*kl{$f-L@i)BU&Xq;?vD-ewC?rGd-8x;~-EoaweaP8~^(j{mQ$Ei};o191UW)#yl^Gd^3IrzJ zE*y?Bm}+il2|Q_o%()1(q-Z~Xh6KM~Ygt{2$qpq;XNlzpkdZ<{%k9sBAmRF6d2?6` z38?hBMcmtvW$5{4P`NV9Ca9pt-VtUei%-{&2ZL|8ev(WVaFPCvoQmcxWAS5w*Nymb zq1PY6i1T2NkxD3jwkfs5#2afj48Cv(p3 z-Jw}zFUY3qMR+9dGxxxJ7JMLF?Kn6Og6g?U(I zdIH-S4aW|3Jg%xQ>*wWV0J7`OH|dHCzogS$DECpSQzx}lTkE?M41|Ixf}mUzMFA}i zd2c9*E``WDTGr5DCE+yyg1^D!)YSFRi4CA*%VRZMu3HmJA!mi3#8(e)zX7|b2dl9P zbvrdCs`EKR86VaIUE`JVzswK1x8;86~kwu9CB+aG zN&4`gPV)a%j`9kCOi+`i%7g$W30SaCSH`KRehE>ga!Lpr)~bK8zZ79Ec3PQTB~=yw z9Fi2qmqD~XJIfDiIch;+jso#%=Zu;KyDe}}%E$Qq6-XYFJT|V};%9730KG_(CH^F- z?&jP!-^REkOp`eVXwZ?79HH5SWj>1W4m z=YG`2B;oe8t*fRV$umEk+oQGF0$_g}L*5kgL4I9yWbT#|<#f=!|1!3*@vzb02%ILQ zCtk*`*wL4U$uFud{LpCf;tt3L!OeD?lo7L2d*E6VEF{g?7HYZ02wM&OSewk$-?1~` z0Kl>>L$1{_ibY6hUMqxeweX-zentAPpZ=OLdaMU`TOVpHNn_4j3Jf7pPz zc!Ht*yBdxf=kb|Xk}(LA?gZ1>*%hIZk3nmU!ydN4<}jd{4(lFDnTzS+-wl5s|FpiA zzs`U-&BOv&G?fOhIu-bGrDTk$OZ7dXrqC`0LHWX%t*6dD=?6@{_2d_|`zDBCwiv)7 z=8@5FkV1}*^6rwJhnM6^YX~q;QTGnk;mv^$msWgmKjgNt~A%-zKe~#BJv(yc% z2*V~5KlU^8?is+A4f7ee9RA63>JLoNWdiz$Yl{i(V|)sYaNji6p*G!>q4KUhY;d68 z|45sJ*;Fa!krTT-WB#xs^c_uS98Fu~BvOdHu8%AlWpw@k(IiCIhg5r_YR=Sl#Wk`< zS=YE@_aTEw;|t_pJKFCfFk368p~XbCjsvr|Eap&#Bl}hGBjDu_twpypDXDPZ1_Hz| ze?0W~7m1{RPR?ijM!K)d;DMcKpW0>}s!?s(*8^w1{DyJ9@QJBu9!+9e z9Lu^q^ED}E=YvUzNsxt}+#Q0MG+eI4gEBEMH5%9#-V^RoEph~veKk3ZLCbpgRJxh= z#M@#Va_48?Mev%gA{c^y9U^zy6>nx-XfhcF$twa z@`CJf@K|aSM26x%L-`jA?(4&|HN|M=i3YpbkrcEl_> zouyy?(b|Mg$0(&^uq#`U5%$nGZC%o?)OxQGXfuJJsgM#uTkrTzTwU8zKs?y()eFh7 zHz1-G2yt89-rCD9*?4J!DcP6FK68~twylp4%%D-B6Y@mJ7-`YuD#0rc3h|H;MU=-E zG#}10Qs-RhB1_Cv7>V6S>gHJDD&02y;3{Y1!6Ct>l5T3KGZgx&GdQ}ZU|8dZuGzR- z>D4Kaf92TkW_UZnqk*4eHvZEE= z)|Yoq?F6?-+vciJODst!z_z0Iex(!4_dIm>ImKi((!W!Oqjx5rbh-2gzT=OfkX)SQ zI~_}TX(YX!7BUglv!T=Z(dWNBiD0YXl1)6;!ZL*9rjdj=bd_=bf;-N2V#=3RaSRQd z3MAG$)ZHAGm4i^*HuC+Z^>JI!Zm(kwI~*preGDDzGX6P`Zz;?Y^!8sEq{5+o37ZQ` zG-=J)dTdxSz>M6~2?opqN5nD3KM*DwjsjoIOXD30j0T)iWI)rCD<>Ex)m(aj9l1EB z@8WGE95+F#l7~T2L3=;LCyAO$U|+Jhs+BE5O5nIXYU#TZ2{zQK-&M#AnL?SR2sT~ zOAcsr62F0XG`QVjEgGOobOiFG<_FaB${k5Ubw-VrT%V#H_q%n*LDPN#;pEQOL#Kz;!9n5wDZohBc zoLMR*@_}!ZKEIfLfxFAwzeCPYRLCf*If1`C1W&JA+~azOyr8q|+FVD0OjDrWB9$_~ zDRECnm&Opska9&eBQO8}1jIp}LPZrVe~cy=hO%UnBb+{V0)7L%oBg)yjmJtG8GuOX z#Ox|aLZ1=Pzi0^O|>V4; zNw8_Jwmw<(#rUr?rE5T-mw*IT=$C|3SDdfIu~aFRYnck-vexxGGC3dv)Rf10c(evx z_CS4QE{KlS$D-#&d9JJC0u3slVVY>kYWdCAMg*G~X#t(vGG-AKJYc9f5SKy4a7ex5 zI-BI*OC_ybCmWv`Ra-HrlejL%n{x?CR_hBXpJEiKlQH~rgF!!g0UwXbS>Y2iYbuDd z=->O3HQh4=Hju+6i^K{yF1)V!X6<49Rv$n@tU>k+T+O`E7X$pNX(yfW@V3EJ2bpAj zS=x>Wehb*fv95rlW}F*0C6E6PaO>G2kwMwbmAZ5+qkrXc%~n;$`yD+g(;+-a6rMlR zz~9{DjbZ;lsF=_=Ao`@h@=!uyH8BjkNVcL7@8wZ^@!6$*E$ixb?$C7SPJGjYd_ex? zyeKaU_smaLCHv#+e+o1~DUFFD+?ukBEp%CRcx%rCS8u_;g~-Rt8A`WFC{D+XFINL2dkax*`?U+lX^+Bl zTep}YUVx!5pZbfvCk2m^ZO(H(JF^Kd(lE|1PQm+ZAOB32_0;@%by`@4^fQ4*+ycuf zVNPvmK`bo%qCSO*WT@ihsjl(z{wpxyHF`*aC+j^#__@Gae4Q*N%dVTAdRF{OYTisP zBwN`bLN>rv&6McP4A;6iMfofpnNA|ipXH;tT2tjBwespUK!+TJ0k^RWYn&LJP2sjB zDL1WYz+$i}+|S&-s?wVUQ#iZS{k9Kf40NPuDP!K2nPX0`x|sXA&1|^8=@U(){=Di~ zFHu4Z=mJbx9cu>@r8egcMELYE{ITEuGCss1c@oC`&}6>>&1N_puXz9f0+0coVRTLZ zFzeaQ4$#ZU4%4Z_uUi&!J@IqkN3Q;S0q-JT8gk0kGJ^xXfL@Pb!_>dI@fTy341k=1zR1qeY<&oaycg@)3EkF zm!iUm4#>8m82<1Zi!WiSs}Xr-6F?_BE3xfl+n}}_Bel?GhlO1^Fm$wPuVUOEaeo1b zqHQ>i8V5I10QpRPk4|KE!zX)Sx%e;K>j-{h9XyHF~ORU7^`MRIS2WuztQ zn<8k*!S0$*KE;hMur+qpl7|sljK%L85W}$*$2q~jE`=opG5u5){ohtR9u&29RY6{J zjF54n77g?v3gRz(L7!?^lnOrq22O5HcvzY;O*Cv-!8sPWSCe`fGYK&cdsw`;>8{37 zolmjV8M%wFml#%J0!&H0Q-a4IxQ+8`VskV3CAI^7;I8u^@WQGD9RcoMsdyLJAvVY) zX4X_=DwqYZkNF~-Rzmjmos#LYvfs4=opmTzv8tDe; ziwh$l0p&0rn@|A2rxa78sO2i${=xxE+=0QhD;rKnH692y#@68 zn8BMj)bMPZh@FWsI>^O&WfaW>l=u5J#X~cb0BKt2x|r1BUgV1-UzR}fP=`pfJSSh| zfri1Q%r}wmrlSi(Z2|tu{)4jl?l(czBO!ki-^j31G`qHeg9<|Bc1#@V+IYj1w+-KjI4f-h2i9wzSE0p%ZwVcUcY!fVXhPZ8DQt{5_b9 z!+$Y<5{$xZUQk_X5Up*EF-e2%7W8bJ2@@CJ8vom;m@mENyNZGHe>nTYwvNVa8+X0Q ztJv7DY4gX5MQz-6C~DENt%=2+8hnw4=g$GEHZ06WoB)wWxI1PEVW(IIxnD<+?&f7% z1};!{SZ?oL$$e68E$}%SO`EfLt~@4e`sjJ#ZRI z*G%kZU^ZSEd=!95c%9Q?aTHSnbB?fXzD2dX=!(n=d@xD-ma>*B>?(nld>%C}+C-6u zKhbR%*&XK#sq90w-eKczmo_k{gK9XDGr@2GT<+&1*pwVjf=C^x8uB3{+sR?R(g*W;_0z}Q z<^&}YJl;2Aj8I*Q%1AO(Kg3cZX-yCLC57_k8pkZTz8gMJLw#nWxq8*ab%B5!;FH+w zTP2D|yP%f2Th6Ln;(&1)`JEM%#@_})u!HmhFw+~gy%UZ&*p8^8b^%Uz%$O<7N9res zDcc)5M$BYcH*er_WmPR#sw6s1D7$S&ZjXx*C3V{AS;;}|Sr~;1Mmb=7TXS+43};r? zdD*!O9*Xzd2Pqv1v;*1`sB#OoO;C=V2PCvg@)u#4E%5Xin(~kPTTQ52+!-j%o9(Ky z_}i{)u%oXso0&b}2Z}J=({aOi8S2{ zX*F})){=)iS@OM_DoHt1PC*X(g}eiiercwWU*w=mu_!dD#j~qiIM&vWfY=B6eJMS1 z>UB8tl;7;nUHMX6TRrU&AkV|Z*wjhG$=xJR)Wv$`7WWBd#uRwA&NN0S@&%CdT-v!x zec#6+G{c#y)VHbrXxiJ1b2AZ`d4YU`;N%ArgVY7YU93q_%3i`S+nx-8x!RUxi!oG} zrD?BactB*4A-G&**{=LqcPY0cJf=)LQh)@tU5)Nk_Dv+Xua_jE;JPi}my+fG`$9P` z3uY;n(hnoJZ_%%|#2CV>^#*rOn{gz*4DBi1xx`59g13~vMOvI4+b=5_!tr4g&IT9~ zX|pLp%Uh6GJQKCMK=+nqU!<*o+UL?aL}e^G4E{pOkbf(;MJ8QWcb7QG`()idbG|ymNn<(i1izlkI z?E`qQ_OkLPNEQEKB^IXR4>Fk z0^vDh>H)Q|XAc$rJ=%fJtIo&>)G7(RY@sNwQZscKvJ)9o(_1;AfT?=(cz1wBt@PQ@ z0ZXpAa0&hq)PZrT6J#b@MtoIo1>`QK*H*&m`p&jeUVN)uAok2M;WpEqc7jAu$SgG=^ZNdISikW19;0)2DveJiA1LN662ouMVcy z8J7W0;|~VeY?TSoEQC&JfkwbJAJ<7)Vm4gDykBG;V#KauqPoVA3u1T*;LQP!2ac!_ z?fy}pV_GDbFm-&bxj766U}Eev>9_Gi(b5}8O1Xi1!tl*7MC{kjZ-1%~rDZKwKJb>+wwG1f zgPy5%=_Ij{m(bG2{73Sx1S>yucA2Q&@+s=0d~$e0VnT%(HgX8?skRw^X}h$f*=vla z893U?0Tcm=>P~K5PRLt<)KW2CKP?UbihPwiV^uZpr)*A*@S2X31<~IDm&FGEPIfgi z0@H%|TMqg-<#~{P@R4ItRVUZAjP|9`T)}lF`di~7Q4arizGo!!J<1ki4_eMrj~#|`fqxf z*0wsdW$(b=h$$pvW$^g4CtKTKpppR;L31jxRp(!OnLv2RYDHu+qqOsvT)vh&jsPt$ zZl4Jvmi9;7qBArcamUnP^i|r;oefdTHV=EjQ}g!&U;h2_p@@4EvMn}?9>I?e8x_|!~h zCuLiLwU~jPnxME_S;`whN~A8?00G5GsnIQ+IVjl(>@V)Xpe<$87f61t+Ke>7xZ}hF z0>bUxTu5@5R|Nge+WzC?W2SC##G z?0;BkxNzO#2LY9~**$9$M3;;+N8kD0-_Y_exs+Tr#Wq}#0FOSStG07v?ot6`gw$vz znj%7put@mcbEXyiC4~Wb@!Zb=gzn2LCvBQxhvt!eV3%l-zFDO>sfOn}Q<#C@UIO@I z7~Rsk81f;Zy+7o0ARh1D!AD(h8*<}8N9REMQKSfMKs=q1kb;Qe#LYh5*Dw^q|XuL zn>s<=n{uZ|W-@?i@0THzR8J7AsG*Ctq*&motkC8 zSw5Qs+zg>EMqjA+c~By4@lbnoD$J{OoVDCkMnEARA+X!1X}cx=k&hquTZ`82LhhtX znP7{V9zeXa698H@aBWDOwXKuskDHMl_WoflYOX%TKmP$)t`pWu!HPH}vgBH8cLG`S zK(JB6{FvWe&D5B2gO)u`cGl2}Ii)!E2^v!Z4*6{lszogvE&%*>6nSA8?fGPk>OWIx zpR4AZ%#r2?dK6Jk9CBGWab={#hiOD|#BiD}fT_YXazrXE`ua~&nK)TVmva^?&fqL!-h>Z1$IFix$0ZcC$GpWkdqfBY4DH&6Bzm=%*Ft7 zWg$s=IVF;x?mO9>wH8>mfnp%NE+;}aCE!h5g#@g$>v z3I`(Z!(gtA4US0hnKVPt*aiI{Mi4;yfA4SEacmEee4*&v_QhupJwnzJox1)VoP%fQ zcEC^ulkKokFPmeKKwG%gf|8)pG-QCtfV>-23#Kdtm=!4k(2K&yWcMhqW%U$POCqhV z5X}Of%B@mVeGucS7#1|)e}B;S+&4v^IOdk=#R=#HX9Inl$oC>n4{e(0-(F8=qyCZu z|HMb79o$ziUf-~oLHV^h?eFf=cTp)d)8%ZFLRk=Y67(F){sGxH0eVKl< z-lR`rgno_ocl?sE;xX0_{Q?MrK)1ZDW_O$5Hc3lFg*4eU0xm$P5m~cAe$KPj1V1R~ zFV!&i2Ql2b28$t#>Osr5738MV)lJmFFlN-}JG%}5csX?;u2Nj7wV26q-vqjsWu`@b zG)_;diyl%3!5e=HTDQR7oQ+i*A(GHW zPA^uy5@pSWtzqbt&ixiN&N*e@==6FnmXB2v41`G)G_gvlYHm8t;mZ?hUAeQUnAJXj z?7j4f-5Zq^$Z{&Wd%CM^u4Pf^57EorN2Xu+j!Gimo*@;7ADt1+(+Y>)0kkO5j0rGS9sP0;$1^$xKUEFodtOJJjjZ~sRtY`+ zcv~{_P4Op-0r2&Pbsy?p1LDAr(3v_>7wq)(?L^wJI-9N&PR@B@{MUfr*~AWx&KPihabb`cc??1 zwSeHQ77=1_L2(H3$r~0jQnj6TRK{(TwJJ2(OpFp;6NJHV?p_!`wL(SsWxE0tsvqUs z$S^sIID^fq2(SnaAp%|^_`-#4+!vj@0$N_#ki*U1~3*U3l0*wY>CU+&Q z1Hb8-XqjC=cZ9$ll&YhfmR$~T$G83uKR0V5=-G{xTuxe-q5_qjC`K=j?Sf!QIMjsp z)NAKou}=SBfbOTfM38Huhif~2PQbW)#7Z&v!&i$BX<|{QH+$l|L5pk=>8Oz`q<2>H zzGxLudnDjyAYmfaP^N|EX=W)X(GVFDQ0;|GxjE;FCicJf%jx2^J`T>pT_zaFw|MK zKuMoTYjwiEI-G%PnoS<|*Ic9=HHafHMkB~I6R=#f5dohy|Gi`Cd}&i%f$9UcgjZ+; zcuYT~yBxrI32ndBzXhZt_g=h2NS-3cgt{Pgz1%W=(!BCo4)=~2APQnh;xWBxtx2Ij z>W@rzqL)5b72xgeVl9ppFGZ*xHt( z{xHdC)L67ao9S_!k968Y&A#uI|M6`KJO8*;^_UO74=iHZ;M6b?WVc+4JCb#D@Nrgr_3dRhTztf1EPj|DNgbO0$W9uUN}+u;>rW5IG#2E6Eh=l2v;6hbuo~xL@kUcb)RaMQA65Ey z0A6GYK4+=HGLtYEdksZrOWrT6ALCpuh7-UBh0vKaCAbmb`3>Ih) zX*Mfq*=Lw$&@`P#9ep6%ZfWiAKMWfF7j8@4ZmgxEbTXDH6)>rhZvZ}9e3bm`HlOV_ zc~u7-C9kiyHFQ8Cy!yrcPKTG{;1lBpl@VnXU!><~A5EvWURW<2B3^kUCVeV5cM1}Y zFwNVbWOW2DAW5}B2C^?pNnT%l{!~mHB+!~)mQ4wE!umwoE^j0;U8g@sjUqqs&GLl` zlV4Vs^6Igfdk7p&UGxERHwk9Ehn&IY;toZ$=e+%3#x!$KDp3pLuhh8r}|CibZ$3eGq5Lmk2V zO&q(LWz?;$=nX6g4#5|6EUw4~Ga5|TA}d6(&CPg~qjHiCTBqcWXMI}m+<;IT#EJ#_-D$geS z8QrLEI%-tqoKZ6I8jkS8mPC77H_=*cD#A~G(#FMjg4Gx?2R#E!3Lh>F@hrHw?zk?9 z32tV~UQbp6-*0v&BvfoJK-Opz)z`yE$9u0>>f>b?q1nqL2bOGY`iEmI7>v>j8`bR1 zQX(7lolj*exACa6$F81|BFG2=XR1gE`cG8A4>G4K+IFrk#})mg*gny%=fQ*S5o~08 z!Dg!@hn2L|wrDbbcW~w*PW_y6?O9^6{`26r=Mc4d&t8};w|9YoKxK&l00g8#o`yvg zEq~CCv5dS%64AswZ(OpfspL78-P?|gdTXa%n!~m(# z1~|+ta(jZAMqPX;Zb#c3FvOqxS@PPW*)FyRy?&k1DQ9nx6&EQIe5yeA@DWi}t=-2? zpJwT9UeEwmeGR4+k`6#O)!m*3x&OJ4g;VS`&{ri%N7(*870rqPiO7jBwFj+IE8LsE zAg#5elzVH_A~inoe2769LNq|)d2hiPDJ<<)m9a{&0L40VpT5lm(6IV^1K0~ESghUr ziTpQe1vm{S69ToJq-2tCL@ypzzhwfFgXb*Kq^f0@w2ng`#70PDIM8}6FBsfe(U{o- znY{_(Oj^T5>VF%&@*7;^Ac^;OubQ5Lrqsp^ad?u6`W~s4w)Qf%^E&D%n;NM9wwziP zfS)FQZKrRg%u6tt@|8ckz;}gYOWH00QnR6`M-h(hX-}yOzUXz%uaGN8dp;ovmGkcP z!eL5H`1d13j!r$72X8V11{R0pJns zQ-emL=C@0>)j;RB2z6VMe8s68&U#LKLiEF$_YPV^_O@IQ47%?$@>E_}$Y)T$R-4~* z{D9+EltX-*4kO5IEm=(URFJ}tulJTgK%Tu@8W_6A{bolv=OK#?0yOD`faSl|GEJnr zg)2Dw7fDbL>tqooIdPIG1BKdapec7=jlB}xTdjN7i;OYZWdZr0X#;GlG%NAyp9SCg z4nx+UfhuPzR$7sG6?bt02R{qEBjJuWCJx^L@q2?7gAy_2aPi`a_GZDGs#7){z^j)w zEwOCQ(&YwJ7X7p1*5#^Q2c={yKWk!nx0!44C_+6a!NrW<*UZytM+IcZb-5)xV&vHN zN0jpCEnE+KF~{lm2>emDR#-(V{qXHrcW8xOUSL>JNMV%pZ9=5zUOOW{h^;*Kw%*us zYRY_P(^|gMitQYuCUbN~!OdNMiQQ~>X28k(tRXtyqNkDqb|?#rK++xt!U2P=@Sh^M#ETuhH+io+? zp-h0frKb8&Wa|Mf0^qNv7Rz)8Q$r+E|-toVU#pF6S?xym_Oa(Tq1 z?7Whxs=B?F>(&+|26eh!xWU{|ARl+Q08o>QnOE<0qcQuJ24D3=f*9NHu+MWA?q?ub zsAf{Ih~))pgB|yrW+*#UZ;S4JgRx*MI-?9a8OfcUFv{o41l&OY00YPYo~3k6|3jQi zk9SX6w@6k9(zX4h$+Rh~0g)Jgit6HdK4GT&_wPqI>mG&hCBe~SM=9J9NXr%;W~K~} zN!UjxC!g7WMoLB>MymY4T!?Zhdp!#j9#M> zkL#l9$lGNEh5FyVzfgZS&R&$w>=Q`!dK-U$?|~>6>s)`XA*dk!*|8XP8Fn{+UWVww zA;l5A(0|_?n5HkOou1PC6nVQ`F8nE4>-7IeM{!O5W!=Enj3Y1nH)Y4aw^(oIj9|z- zQ=*x;n@52$npzI}S_$~z6-S03URw2ut+9W zVIO^8#izz3)#kUtXZ(=KmAYzKAQ@}aVLgJsT#Pnlz#-MFho=rNLws$Gmm;f3O9|oN z`qtpzckBu&tz@v_@Yf&>`_i|pr-=(NavCMqt^FXLi3dcG`!Um^A&9goM}b$m8MsSI z7Qkn{4j*Hi!V#i$&|cmldX_s1j)@(tqTWMhS?d#pOt?zl)uDn^99@(UFeaBCWv_=r zMMSt0eG*5F+_o@~pbf_uNp8uO63$s%q;>A2zW!jzX}NTD{6fozgM?x+yc+b8(6Fix zbHkM65hPzccF@4?o6gkODHuKRk325J75#ykjPu*S;yA$OwR$9}o+=lsk$((&^6Z1w z+lP~R@@Ict+Sz`~t8B;@*00Rg#XhfG1D_~Zq%smCSk2|(b!Cv>n? zdAUge)^9t~JYgS#uCcUne|$4v8+6-8(`Z7`0_wqNQ#UR&R{j!XNaM}NDKIy|=4`XKK>pmxUKvN3wW2x^>+G$GqX*vA>{&eR4DApEANBO---9pCHmV-zY@kT0wc^4IuExA z+$X1#HAB-}FOdE~FaFpaIPqO3C^>e(gW}{_S>F1Z=tdMkdSa`LDn|}+!1(_D&EKL0 z2|`i|d(0QEd%_c75pk+)@j71>Knj5X00Nr`^@m?U0fIxySWjHeSNmBeqp&e(xXedIx!zF!p#cc zcTRiyR=VuOWsWjsn@8mu3)QUlm%6o?RFXQaW$SZ3hJmX7sfaZ8sjMnrISz${L*$Kl z1DZ>+_0vIbVc~#i0m?}oh6dpY4;b6PRHsXs3~-QNRr;TU%#wD+tazQ5q#Dl9++hyw zM9ETv{H-z5dC~GgQW6K0kCfjCd$ctit~F!-`KV>x5a2}aztxU=Ro*%u5Gm0O25hLa z*fEe0DH9sSkr5+>-`>lkeN7(Vay*s0_oA@;pUw}Y*FZV6t|c3VPQsB2+q>j0b2t+Q zP18wid9J3fw^@KS{W`(Um#eUI-_{~+(k2R+L~p_Qe-iF+Xo=r_A3OP0_)&$FAC1pe zdH(|zM>1|qd!`Y=0}?(N)p}OEIlfO<>_4)n_nGQoqqx9WYZi1|{EcptH#@+@ zJ?u}9pgH)d6w4Z#&C5Re`>iX+|2Z_+D7GRZ@6)GQ{jCSuK|;r9t!OZJWp&O5+g9RN znE`BrO~fe`RD`~4JcH{6{l{{IUI3gO83iWdDmV(MGxRZloa`4}qJ=`SnQ3D!ei=7f z+1F=nGfb?J6SlHVCkD)kx)K(`W-5d%Ovr*CUe)B)fy<351YY<0hI1>PMnXLuTp4_w zD}NX>OIeR#l;Z$*%jeebHIM@BDHkj(rkh1OGLc%V>jpm`_URCB0ijmdJQz!gTUXgk zeHi$XI*hzY6DkvHkSGJzJAeQH5Y<7NtVyUrY?(|8|L~-aZS@8Sn|73juUHZNzR1L* zS^Fz9%sI%$KHlbP0KH+j6871DHVPYy_ooVWk}{U-LI>N^4|&jx{cqfn&{;&o`lmX>aW2W%x(ywnQXc6 zF%<*frPC=p0o0=apPNI8?memk{0frkdH@SK^NaQmm3hJ|wvDYWbkLb-3}D6Thl-?f z#x~VbaMVkkw|m|J=b*xc*C!E6-KI&LwFJ*!LHO*sL? zMz`>E&Rhge4T$fBX_V%&>lx%;lpmEu;g)SkYA6q3>7bfGAfQk$?2V{g-@7_>S?nX) z0Fvkwih^6E>gOWD_r5URH;#)j9Ez`CT!VIspZ|XctC`7I zN`sVda@czj4prDOaUQB8wPX;wh;<#bcVDfGeH*-UBi&>qhmz`sjubNiaw(RiJX+7< zWleYAHRe!W)1YQ^F*P+;;!(jyJ6udyhzp#VNlOHrz`H;pss(U&r2s8EH5%0{q z%WK^pQ>X3Ri{ImD@Wk7BS$~<@g=?*)6F~1O_pNLIgJ_0`#7pQp$G)K|DWMdCLE9R- z(S(Gi{I$44*(N#9kCKc!?0@cjn=G&O&zza>hebHu`N@dK6LTw*Rt*8ymcBh1DwWUa zB2xDeAg>~lAit)%X0C}pAJKj9X=NO$!nfxAR9a(FGSd5Q*vU_3JTIgase(n*_E`a#*L(6c}##pj-shfu&vSp?pSP1*Rn=8pjkmj=wfb#vu5IwW z*+ss3c~HR08P;Dz@0{jJ6uEzNL};))7+wcE@blhBSl1t2TkwU15--rJ1xDdpS)NB+ zzEhZTv_L)ica_-^IKO=h6eMc88ydw+Rmx+La%xomk}0cJ%iGmn;M=`Pw2>X$)R`drtyt*bqGIiWeE?YY;=XFwSD(tC zSl??qMTf)E!U-To6)dFFpRy537(ZjCtEI?wyaXmaxP--ta zl$|{`yko*vtq*pXf9kT9Sl5^pO87$*3=pN4hDF)RRGs#;RMJ$Hb~agTchuPyFq^(# zE-s>K$@DFUxA?r9e4onaoQ4A2|bKgdYA>3Lkn9QI(! z1|BxORC)A@#h_6T31z568z2L^emf9c)XOVa^ZltNDzvC34IL6Jp(ij2EFXXiVtXDB z4IM5o?$Yu8w$o@CMh{NIftEU>X=AxE9={&XCxuMv$x?{yVw|15(0Xnm!*8?{atS3> zpi=)AngB|!r2;(dY; zRBlgd=U zCt54lfRJypBhNh1xfhNz;UtW_Nv{YWL|CtU~-HCA4?> zN=}=Sp{?JwDLN&dwkMM`o&G-Tqj4f&Jn6j8Hz>LiVZAIM5(iL&v1${F(Q8|adtSSJ}!t8;oN4vuiKEO zXKC&W|AO^f^|StKCVj7rrI{H_zMVz`P|L_G?- z3u~;I<=I=Hk2?5$r_TcU0g6InnEmi3jD*)^4@8^$b$O_Xt#VMCaEkgRYSw;NKHvWj zdd5#2Q}durD+ll5N|uW?#T`#(3>?>Q;{JK(DiSmdU$fb#F?<`pK`3^GS4B`dLR4NlYL^UpXUFbntSrUcd95AK2S zc6q$Z434h}V^$_B9mZfiq**a6^MVQzbnFn*MqEsBGo8cf5m0{e_J0oN;R(;P_RsHT zetEK3K2bbNi#k`c+;5R~*ij5rl$H+G^oNp7t93d4nLFNgX&mkf4_c&6% zbhZz7o$At(mXx$Gf(!rd4|dO{P9*L0b;nSbB(h6{jE&rteXo$G`A$_;eBF$y-ers= zlcrlHbbVX{-S!f`uQK-*xa=ME;%n=I^PgAM^;F8HB5}hoIFiEI;GZ-B_6R;MV=o{p z))f({KRB)AM&8G-zTgU7(>ZJScP-~)f&T6da;wUmVuYRNs!lM*xz&+l)78bgg9wF8 zQL%^@$J^dM$>$6m@IMJe?WsAsFYuAP5{#o(f85?7(JsV&dTiNZfG)YCuIaOVDORDb=IovPHNS-{$?@ar?8u&oe9c z&^LjsF8$3rCn&UnYOz+-0H7Y^#gV8SvA?o}}1;w8PC69YoCCdYYwOynLI-P}HqR&R(>d(>%b)oF9QTt~X6m<^iWpb3WiC^#hWBI8~%Wrv21;<2$no zpAHiIA-r&rh;Mw=u__(NwrtQ`sr)d@9(>}of=GP5Y1%N zS@Wkh*F2LVs6$F%EFKgFy_>N7r1|!!2rj3D3gD_hVu;SfRKMESIMn6t!agn+Ha&z! zF+%8ahl}Z({@OV4exs{3!d>w`|Hje;hial&8sh2KMQ(PRPYY+Q0&Tv;FN)@(hpn@fJV4q(`+32WOlTzO5Y3DCLZ?lCALrE zz%fEs8G+fyG~jr@N0R#{J{W|WZ@`?J)|dPM&KN`PmE{o^W^!V>HEYrb_(A!|cOLI`bDgS!L=@nmNhL!jIcVu)-fqBroo*`XQGe)n0+ zp9L@uSJz8CaYkTB1ubnGf+>WtW@|Eq0i|~%HPCe>|1R3}!*WOPukqO1$71p!d#X24R?OsWnP5q5+Nhjnaf)heUb>f7% zhmtGb*Bb44N7l)T?_I`wuHvUHXgM{}K+A==7-clE0#!~+6vlH+>b zSC5#HuZ-izT6(=&1H>Ddv;<1qLC)6yWOB=84&{o&SCxgJX)SC#7k$MHXu2@@6C(0w zJhP6ITC~~KnVXwXdyRy-X+B|=i2^?(-JzE`(*3|oY2nf|c)3H4P6JAC`}FWWl~hQ^ zc6CcpCm8rU*?L#7U5jNoq_Zenj)>4%f3wx%YGQCOa@fa6Z;%Huggs(Oi@mmX7wR5w zy}Z~YeBB^~Mc&WVVMtAz>iws0HQc`0?*yhmMv|mQ&AwFOax>?%wJhYcS{I_pA`;f?op+ZmN<**@sWPPXdDS0hDID z7q|pZ8du&DpnEt2b=0da#G5e@(aOq4afR`vv{NwXc$y3~PMrJUu^-YZYy&WYDEaNu zLed}Ml~(I;awI6hzhEi^phw_D5osH0#1UDw3_P>ZOwN}%{#Zyw6G%x!#mMcm2n4f; zxNAJwfUm@J=0zHzF!_t49f`2jSoZkp;YcEm#4=#jBfFrCOOA9bZ7D12x?18uWv3WVdVwgmEJue2zBta8gY4LGp@UN4mWOAMWE%Uw!+`L=X9 z>XHV)mc&8cuymY47($ei4U*SNLARbY@sey4?7p+Jk;~M0oClu)kTl1mQD=+oO7-8~ zcHQ%f*Df7H)C7lQ9~0fg50|~8EN95?z|DZ%z7RE4$yVHa5%*mfSV5D~|8i2%-UxVb z#zuUUo6HNw)U><-;mnR5wRb0^C}0~&egE*v)^dJXvqGva??U3uCF0^+*_`bcShg`L z?P?@rnZldy`YZZJ*I)IeNE?k+lNjHsP6o$?hKDpvvpYX&XxeN2WIqa1ijizQ@DTcO zYugS`V$JLJR%j_$@U zO2VLVri+J8btEvjtT*C7qd&=eRC~EP&3OF!x;$JgN<%C%*?smd5R;(D)*1{1yvon6 zRql*)*YXw=X{&-^E_;T4N*jJ%TU^HRZafb}Uue1#-l*WTnyef81CJ0=MDu?7=+@WJW#a9l-J7WdHL7La#eYY{VvTV~B-ZExWSaaJE zDL$e&L`|-?4tsF@%guQ#T;lOy!$Ey*xot>a;eHpTG1K_X2F&08Vnvl{N!Qra*uBb_ zC-8WBK$Kz0{)5Kn3#Ycug7dhlCy{>UsNAV|ahF=w+?Q^Md-_^T_d)j-YlmWfv?!Ai z{Us}bCj7@Y(RjreAq$x>5T>GsM*py_7+;o;#tL$C=Yz?$QI{DK&IxfR$AfDzrdxJ} z(ZtQFN++^8_8$4(gCYj3E2vBkJmyA&C7e~dcZPyrEc@I8JIf}+vfzle8>BK%`CEB4 z4>Ac*g+4Sci~WlGvC)2~>{ASQ3n8n<)#VbANy)}7SWP*g+bK6^2SrH1)&W}Xz|3NL^GIhOB{B$@%~(PlNzfv@OYg?qOK4$CVHzIvrVFZe7E>eGkUOK#B(h)a z$eU7F{E~r;{T9|am!*Unkqqle=e?vvByV>d+LKDTf@)^JAa>1!%T$G#|ve|A;V0zd75~#wQ}oknu7VYA>(5f^FIC* z1mX5mb;B`D$6I@$JEc5+)aF+gG_SxjG;3w_`4>dxAO06c1g*@NyA|gqLexMVB~h>}T&_Z|Jn@Sejw=Zdc=gCA$yAkuRvx$IIOXAbBo%?3x>? z21*KH+baP-+2yE?^l5}0F_kJp4noy!(z%zK@A-eAAqe~1|u)(Icm`m+2)arSmZOxTU@i2u`Ew8 zM1k_`&z~Jvvt07tVb{slJhZMQ#@&qGWt5xAzDZ0ZQ`>MBxHdp7#2mN9S>jM5LTYGm zUb+tTr<(qmSm5+0GZso7M~T*ZRwh((hYO&bopIb!hVjAT>SXzUVfL!pf68qefOCt#xP^euMt{FZYUP6yqz7ik{GqNI7i@NIs7} z_p#8V(Tb;`O~(U>=O^GigYtMFN06t6x#E0T7(0BrQah@n7EP5$<(pI}{uiS{Uf?v6YlM zbA!BG;~qYOKPnf4yg?i0#$Eki#p-C`-LM@_eWLGF1m}hr47}`{4+TLngLEmHt}bMi zEgL;R{quT8!dX&o$<0g#n~V8@8;Y#=@@g6{-_O)lEHls^-$8}@vw~f6>m{n)qdqXv zg5}>_ao?$4zr$u#ZYl13i_(LJmyDNP&UVNr5WuDwFP~zISaW~dkf2eJicgR3MI^Pz z;?m4XVsQJTql+7Pgzh@?HT)9l{@z=Oni1+p_g03d%$Rl?*Hj^eyG-ZjEw8sA*?)mA z=Y-Lzy#47jY_;;PVcv`;`)lQprH`3Vmx(o}G6nX%Ge|Gn!~Y&gcHbtkrKXW4+Cz&c z*&x=h7~tuZKMaV_&f1RwpqVpht>c=ftv1&_gra1`ZJq|Ilj|hn_nKF8k*A=FYS$3rCz(7#Shwx{nL=djWvbBc#FFd_jm3dGu z3q}%a)q-*Mdp1zYq5?&q^|&YTDacaf%V@H|89p|3*3oQh^QgGUW}@46AuBW#=UOM* zUv6748Dagr0 zz$U2AA+!adceJ%vWMAsQ6Y8-2$2W3RxEzb5z zGll+pCFLL^IVC7b#ijq)-n~?UWV_-zE?Jmh(6Zw4@{qM9pfZimq2@N_x$=5vt$Yr3 zl+rgZg^+|=xj5Mp_+j8-apk5WN+TDqiSjsZ zR@aw&TD^^c>ajqtm~>ic`t(YNRNOx#1LV3yDSumU#$?hbm|InE4R_}di62A5DzbE2 z`6*4UiWsF&E4`ZRBGcx5@nI_`t_EI@H8uTU2t?XmF*pm?ug8!Je)S~R%1Twb&3zRS z6V@6@6Bm{#c{=}U+avV10c9Kmc!!?2oEK_GvAZNk=FRmNBE|M^-?AMlpbuzHUUYf&L z+GsD;@=?|L7L+Zb%OynLmZx?nYt@Hf*1f5R5)(pPC>*@OiUQJTKbfKL?U8lgJ}i;lT#co>WE zE1e@yEJn(^5)~(5hTW7?+8PwhsCMJxV%B{VqONu z>6Iexdy-CW0OlkuYpxo6_L;Q=RG1Y5!?B_ zK{exEDp&NNGSud5i?CXzhpQ!E8M0H~std?uL<1*4q1QLJgpk8sKOt~8t`9MF2D>XX z7e0v0)|lH3PbqEfI60c{u)&ndVS87S{vl$^IFoNf>hM?W{O=ZD*6Ez*HpjipSGNP z6U44VMT99@Om~9i!ESwV0qiDHxJLSLKc~IPHOmMb5=3V61Feu*e%$uLcEVRbQ5nAT zh8a-Q@YCH6IohpsKC2dJe-?;)FkjR*!6cscCHg+Vg`qH>kg}g=<{4hn(Ze8rl9K&fB zW<%TJg4!9bZ1?Jw>ewc`cnK4R7@VQh3FK`6&#$b?%yDJ`U+)z5K5WNP(cD-JkL`T0 z)?MH~Iug(JlE*`8I(Wq7>NrUaRA@ND3mrpT!i$q&FTCi2S<*;Bq`jdNG;Mq?TTj9? z-_K+dZ_1g(d21f#JP#}gf!C_w4X;^HUK+*vpA5QvJx~`itM??EYIHD^ z&iR-MCE63KW#MtAw2OiVx$1C0l;sv8?+8svB8)T#p>xnzu-mBrmhlh{^$VpERBaPk z<42tdhr8>CFTn>-z1zlu@gMm^HE;=Vme)hlnF>ZHx;BdfZ>F^8pT_bvnH+J*opt?N zi);ySuGr%bB4;D26@OkbpUSgiWp6odr8{l%CA=lI>BT9AW>J@m-^1_kA52mt!|Ct) zZ1#NQjfEF0_%XK4Y=iM$y?*el6zFp$PQ!lHHXg?~*yJuW_NQjJb^Nv+|FRttcWZII z;^*Uso-qZmyOgig-D@F!KfsC)8uIIdXD=C+Zz*n9@4)fw?MC*xY;WT?t{CD-DxAqh zmg@_s;mQwShP-Xy+%ijclDoznO|#PpOJJvsAixmCt8`)BR`y55 z<9K4Z_09DRJ$oZ03aFP=3_g}9DZjL<<=WP|G^Pji*?&3ZiSszOqXu!C^;S^@uKto;tW0NwERL7p6?NK@QI7XcI%x zchFJ`=J^&ed!>*q_Lic=FHBLW8_Kas-<3WmSR>kCCMFtS9g$=Cf@0ez=RJv$Xz8rh zZQMCSXc>XuwtTu`aN1Ki|5ucA>#Adp%^~|htXOyHYv~hW=3|-dqKOt3&m{|nBt=q; zIrjwOLc$qq_oiwe7&ifjx(<4*=Z(r{!=z7$uQBgPU`VFzFjpu?LNFQuT;BGUmBb7d z)|@|&*~(w_I-+pHCOI5IDRSe^e!m0kto6|DU-}EL%43mFc;jnXQXTj& zWKn#gHy7v`!ivl{7OOH?P|H!1-#I}<)djP}Vt5G!&z<8-7ot*IZO}7o=$N$e3SugU zyBJ0TD0T#pyYEH2%rB3JSQg934>R>UOHd_8S!}|yw*@rRt1ixy`VIXa?@F&^v-iDu zs<;Hocb=k_;u}ll-t+{`dlz;K?u5|E1n)qsp%8 zDh=F#eFB<>8fkS2QDg9gB`Oa=ikq{Za?hVzbS$96xfG+w9Caaet!wS89CFLMr$Uy0 zeBoglj98t1fy5N#$SuT@(gL?xQyk^_`tnlYiEB!9X6N-KA#sSnDPg}gZaGetqm zNG}*MQTyp}Yu`wrO|?sH&59o<#%32$anNyI5JThGWlY9m71F{sYL>Y}rhp9D z9=MC(dI)WA%h6pM!8I1Iwf zpo1gNCW4p_mB&xiS9hz1D8?RQ1};zdnnX?tk%?dm{;{KH5#hO-^71BIjHL-&_&3jY zq$h>D2&jxNvNGDH_C1es2!{%`>h$;wuYZli?8ERc zJ1yc&xDThw2#V}dxhu+^nEg`QjMXZ~K>u1<@gVnc*l|4ZT@4fIaP5@Y(Y^Xd1&7g2 z)GtQd7bq+=Oz<8Yu#-c?6gm$n@y@{!X7;pJYn|x*`vEjw1uk`VMa}N?=q`6jFW}Ed zb_3GYVyalQI;nx6lWR_`9Ja~cHyJwsO~Xdp75K&}bro@o@>zP@uW;F)EEQZ((#&%n zowXqpaa2~TE(`;uG38CoA{51Ri`3F~1EttId>lxD0R4fle$ih?y(g|P*tW{`StmzR zvIK^~EBM~(Q56@xVM_i;5`005*1NT*Oy5-LPqBi|glP!8;p?*Um+)d~e2J6iLAw)# zP8Z1`3yqzv3(%S5P82NlUI~ZexNY>!u*V@i{_V%MV|_!~qj4fHg=1Zn+Mr-!{(54Q zMW5mUA1GKPWuT4{O7x9C*XCQOpvmPv+1hNfV6wptI9b;GkiR;PL9h*=g$dW>4859l z9B0H^DK|&hAC1Xw4u_#Oq%C$m0Yf)RpEu7HmLNu8wbz&&Fi{I<(RRDn z`0d*jh6iey7H5=lFX; zI&aPam_d;5PEiQ!8FhN5Au<+UBp7;zd`gcoUU)3&c7|aLHOz!hmuslEe9DN=evAGN zd@CiV-TAtCyPkt}ZHoH!SZmEI2BR0Xr-L&K6Zt6xnqB*u3;m)KHB6mPc?X?_;eGy^ zEt*n_@~W!ylBZO7{O)n6_~99&%Vr|{Y5Olp5DD9$_!h|ENk6pbq;po=s7~T23mwC> z+5E-|qXJXsCG#|e)yk&XkJu^>R4^ysl>3p0p^UJCcLCL=rmm^=mUC09E5-p<3@U8Rp*}ua-jqkBu{mOAtLF~ zo%)gCjZQ}B)4F>zGaeMLO&B6HIS?D(nZ6)TC(wqW2eYm`O-^DFa~rLg?2$wcLxX(7 zRloXm#c7)IN>VJI%#*p^;HOcYm>!N5R18V`l>>Vox>uYBPM_HNU3?5V=DqDGSFtJv zAb1S%zxO=Kcg86-3m7UpW{Yd2&SDDF>KB(#jdJ5|w(RAbjInYSVNzsm6c7eDz6<2u zCK3M$O+(%315OiLgXGYEP7`07rs7IjY>y&rM%d;-bo|0HSj-|Fk#Jw+sjH*&%b=gB zAg^v@|6BG&koyRXiA7P)wT8gf&Z^gq$yTgAFiwuDmv$1b4>PKd49>*>`W`zcSzX;O zd$iJ}krEc#Ua@p65h8_!Y!RFh(yrFi2k|(uPb46G@PVqhT(h=3TNKZmRO|(*p%Mlm zf!xD*AP?+5x~n9|bVsK;c{Q5znTk_UEVmCg6D|JN@1Q9A0M?rT_gSgpzx>&Bhj2%+ zE2$-jLxoYJ{H(6iHjmCn1eIx-H~}_mP+iLp7n~X*?a%CT5lY;4=f)(V8cvzLlaLGv zCw`3vpsDdN6+iyYdy&58HbcOh%4r$wzd-eNX%TkEkY=S=AXq!4g^W4&ZMfALjtJIL zQ8~@PIce0)Vw&bDXpw6L<}FV&3!$+On@+7eBCbDreBUmGDfIP8PZpFlXA>NAjkvwD zj}&ZPRw?&l!0{rM+gOLh(*2m%cGH110&X>I+Q^p55-G%0qzOXl)e4tYC8-Bd&g*}@ zG$v!Q-!4Hk;tL@S?lU@BG2?6N0{3oABh;eZM5PO?hQL%%Kh%8yZn3`#4Vn z?3e*~EK)nMEIeAGV3%ORe_8IrY8+>OK;*d?pI9BbaF?leCn|g)Y?if)3|LZyCSKQw znOv42r63)BpNCGK85YGz4sX3pi<3Wj*RWypvgf^Yn~1F(M{^68%Th?sV~TqS}X+mqD^z^~@HN zT*VsXto695a!JiwQ^8xb$F*SeiNuk1_DO{YTJ>+#iLFBBuPMk5HpDayr(Hnlt|i7T z{C59IX_HC@aJK}0rQ3ke$j!WyUbW0A>VUJ9Ah&)6_(-HTP~ zTrk6%NXJ(2ki+3L?7l01<2r1iC{dGv-O(52xQnsMHO{Ni1a#5CnUb3uX}K94_000H z!e)k+U?|Bs+wcmrZ~r6X4Af?ux>8`>L)!3pGF!noCXg0>|f47l0iw)jqgMrmUjNQJ7L~yz)A5v zN%GR?Uu|zb)%Z(wCo0lSuV+@OnwFp;dc+F|6jXFOV|2aU3V4H;3oh!$)xa>Ku)(qr zb&>?<1?K+cWcp_AF?6}RD$SN2lZ6ZX($;pE_x_vLJ7Ar#4wfBX*an4;!aYdMd6@%)cQ8BIj&7P&>OrgQBr(Meo$eGS{=v7@wc0s| z#qw}5t?T90aOU@w2|-ky^SSveLXVj6=7Id(c^T5kFIfRpS`1j07rH?eooGCY_=*ML z@dLtjz?k}z2uA#!)m zOKSoj7VuJqDzSS6^exO;wea|h!!4=<2L>ENZ@vafUA)O)B~53Yw)tslS@FDRyzG%w zNV-+Wlfa>HgLXig{(tdCas|kehwlb-3c@A;&`RSy)+f}@g zku>(Nb4-=}xZ42Fc>!JZVC(>S`3w3sqEq_SndJ_szBaWLf^*7%J@?ULd4qK#so;9xmf>W^goMDX7n!^o{@v`r_N{il*2hY zSP}h;h5&PPGOz;DHI7b>|MfsVb#nAw44HxWGyJE`eV3umpUFQM?ceyn_)qpz{*Nsn{{QlSwa5Q&pMSN(XZQGj)<5uo zebw+Ei~rdCwENkQKQV|pQ0D$`*(Ur8JHFvXctrx2(-Wrg#kc$ z36Pct*5L#wZwKA1t4;x6{r!Nv zJrJaT?FjS_hCtar;j_J&fS~Ve$MeCE@?aXdl+J=!`u%RTFpb3N+$>i?L-R{_#o!Ob` zc9+CZ4XHoj$;2YX?1l$|AQ3}UjPa)=8c`nPNhvV~f1)CZlK5Z?Y)E-9{BaQLTs=fN3f)u6!Qba*o+Mi^O)bbkxvZ_fMP5P27Qn2 z9NiY_$I0DW!BVCYD7NZGQKe}c$2cw?iE;ce2s5^AjP&(QO-)7ficEEtWTUz{-p5d) z8Cz4qMmKCl&srl`A}JyXJT_@sh+~!Hzh$b(^yNv#M`}?mhLzrXMz$dtZ3t-HQl?z2B-EXWshm*o=7QJUE71vPeF( zY5jq(Dr>?!zg&Oj-0$1=o?Q6lC1cfP6khpa=AF|!&$l78Cqbq^34Iv8zH)lQd_FYO zc4;YKEpilfg&SUeS=}4Bg6lV6L*v;cF zPrJ&{6qAQa#NJXtqp3d&p3gmoE0>;Pa_iNaJO}dbQI{>tKs*`+&qXf3p7XR_R+b^@Sk zJ2eI>HL07{!c)oXrU0NOH4^}_hZAe20HB$`uKh>71VGhi(^SK2)T~Dn097ANVrB8* zn@h#ee%LLB<~*6&zR`3&4-r)@pOJyqjquM1-SsI7A@*Auf17~VH%}~CHsV?kYHNpg zxd~7{X-_zq@UBoBUNpe<4z3h7mN~;<`r9W>6}$Y3YnGi-fj?x~R`r4~o9EbP6;L+nV>*Ix0@5A>mK|&?vv9?A z=dMZxW;{gQ8j{hv%D1R%9O$Wq#6{sKD>n!cMtF5+WHyA_GS|MpaQ{3wbXm5uuPhP+ zc?_WyjPHQuvi)X*$>C^uMOv~2J0^p@cW8%4t8+GGdM6gpTY7gtZtah|53D?zRv+C^ ztCADZiOJ%!I%YJq2Xy;99U~wQ?*V!*99?w>_R0S1^Yy-bnmvd6gQNon8*INhI8u|c zO@Kb3QHKJHHq_s1;r?tA!(g|=+K4Fb4gJ9rw$1|H2mdUwAS@`_LGtN=p=fM4%5&Ub D%BOAH literal 0 HcmV?d00001 diff --git a/examples/wan2.2_vace_fun/predict_i2v.py b/examples/wan2.2_vace_fun/predict_i2v.py index a1543c3a..45f6065c 100644 --- a/examples/wan2.2_vace_fun/predict_i2v.py +++ b/examples/wan2.2_vace_fun/predict_i2v.py @@ -108,12 +108,15 @@ # Use torch.float16 if GPU does not support torch.bfloat16 # ome graphics cards, such as v100, 2080ti, do not support torch.bfloat16 -weight_dtype = torch.bfloat16 -control_video = None -start_image = "asset/1.png" -end_image = None -subject_ref_images = None -vace_context_scale = 1.00 +weight_dtype = torch.bfloat16 +control_video = None +start_image = "asset/1.png" +end_image = None +# Use inpaint video instead of start image and end image. +inpaint_video = None +inpaint_video_mask = None +subject_ref_images = None +vace_context_scale = 1.00 # Sometimes, when generating a video from a reference image, white borders appear. # Because the padding is mistakenly treated as part of the image. # If the aspect ratio of the reference image is close to the final output, you can omit the white padding. @@ -323,7 +326,14 @@ subject_ref_images = [get_image_latent(_subject_ref_image, sample_size=sample_size, padding=padding_in_subject_ref_images) for _subject_ref_image in subject_ref_images] subject_ref_images = torch.cat(subject_ref_images, dim=2) - inpaint_video, inpaint_video_mask, clip_image = get_image_to_video_latent(start_image, end_image, video_length=video_length, sample_size=sample_size) + if inpaint_video is not None: + if inpaint_video_mask is None: + raise ValueError("inpaint_video_mask is required when inpaint_video is provided") + inpaint_video, _, _, _ = get_video_to_video_latent(inpaint_video, video_length=video_length, sample_size=sample_size, fps=fps, ref_image=None) + inpaint_video_mask, _, _, _ = get_video_to_video_latent(inpaint_video_mask, video_length=video_length, sample_size=sample_size, fps=fps, ref_image=None) + inpaint_video_mask = inpaint_video_mask[:, :1] + else: + inpaint_video, inpaint_video_mask, clip_image = get_image_to_video_latent(start_image, end_image, video_length=video_length, sample_size=sample_size) control_video, _, _, _ = get_video_to_video_latent(control_video, video_length=video_length, sample_size=sample_size, fps=fps, ref_image=None) diff --git a/examples/wan2.2_vace_fun/predict_s2v.py b/examples/wan2.2_vace_fun/predict_s2v.py index 9ea61bc7..7d53f2cb 100644 --- a/examples/wan2.2_vace_fun/predict_s2v.py +++ b/examples/wan2.2_vace_fun/predict_s2v.py @@ -108,12 +108,15 @@ # Use torch.float16 if GPU does not support torch.bfloat16 # ome graphics cards, such as v100, 2080ti, do not support torch.bfloat16 -weight_dtype = torch.bfloat16 -control_video = None -start_image = None -end_image = None -subject_ref_images = ["asset/8.png", "asset/ref_1.png"] -vace_context_scale = 1.00 +weight_dtype = torch.bfloat16 +control_video = None +start_image = None +end_image = None +# Use inpaint video instead of start image and end image. +inpaint_video = None +inpaint_video_mask = None +subject_ref_images = ["asset/8.png", "asset/ref_1.png"] +vace_context_scale = 1.00 # Sometimes, when generating a video from a reference image, white borders appear. # Because the padding is mistakenly treated as part of the image. # If the aspect ratio of the reference image is close to the final output, you can omit the white padding. @@ -323,7 +326,14 @@ subject_ref_images = [get_image_latent(_subject_ref_image, sample_size=sample_size, padding=padding_in_subject_ref_images) for _subject_ref_image in subject_ref_images] subject_ref_images = torch.cat(subject_ref_images, dim=2) - inpaint_video, inpaint_video_mask, clip_image = get_image_to_video_latent(start_image, end_image, video_length=video_length, sample_size=sample_size) + if inpaint_video is not None: + if inpaint_video_mask is None: + raise ValueError("inpaint_video_mask is required when inpaint_video is provided") + inpaint_video, _, _, _ = get_video_to_video_latent(inpaint_video, video_length=video_length, sample_size=sample_size, fps=fps, ref_image=None) + inpaint_video_mask, _, _, _ = get_video_to_video_latent(inpaint_video_mask, video_length=video_length, sample_size=sample_size, fps=fps, ref_image=None) + inpaint_video_mask = inpaint_video_mask[:, :1] + else: + inpaint_video, inpaint_video_mask, clip_image = get_image_to_video_latent(start_image, end_image, video_length=video_length, sample_size=sample_size) control_video, _, _, _ = get_video_to_video_latent(control_video, video_length=video_length, sample_size=sample_size, fps=fps, ref_image=None) diff --git a/examples/wan2.2_vace_fun/predict_v2v_control.py b/examples/wan2.2_vace_fun/predict_v2v_control.py index e29f2f2c..a23f35c5 100644 --- a/examples/wan2.2_vace_fun/predict_v2v_control.py +++ b/examples/wan2.2_vace_fun/predict_v2v_control.py @@ -108,12 +108,15 @@ # Use torch.float16 if GPU does not support torch.bfloat16 # ome graphics cards, such as v100, 2080ti, do not support torch.bfloat16 -weight_dtype = torch.bfloat16 -control_video = "asset/pose.mp4" -start_image = None -end_image = None -subject_ref_images = None -vace_context_scale = 1.00 +weight_dtype = torch.bfloat16 +control_video = "asset/pose.mp4" +start_image = None +end_image = None +# Use inpaint video instead of start image and end image. +inpaint_video = None +inpaint_video_mask = None +subject_ref_images = None +vace_context_scale = 1.00 # Sometimes, when generating a video from a reference image, white borders appear. # Because the padding is mistakenly treated as part of the image. # If the aspect ratio of the reference image is close to the final output, you can omit the white padding. @@ -323,7 +326,14 @@ subject_ref_images = [get_image_latent(_subject_ref_image, sample_size=sample_size, padding=padding_in_subject_ref_images) for _subject_ref_image in subject_ref_images] subject_ref_images = torch.cat(subject_ref_images, dim=2) - inpaint_video, inpaint_video_mask, clip_image = get_image_to_video_latent(start_image, end_image, video_length=video_length, sample_size=sample_size) + if inpaint_video is not None: + if inpaint_video_mask is None: + raise ValueError("inpaint_video_mask is required when inpaint_video is provided") + inpaint_video, _, _, _ = get_video_to_video_latent(inpaint_video, video_length=video_length, sample_size=sample_size, fps=fps, ref_image=None) + inpaint_video_mask, _, _, _ = get_video_to_video_latent(inpaint_video_mask, video_length=video_length, sample_size=sample_size, fps=fps, ref_image=None) + inpaint_video_mask = inpaint_video_mask[:, :1] + else: + inpaint_video, inpaint_video_mask, clip_image = get_image_to_video_latent(start_image, end_image, video_length=video_length, sample_size=sample_size) control_video, _, _, _ = get_video_to_video_latent(control_video, video_length=video_length, sample_size=sample_size, fps=fps, ref_image=None) diff --git a/examples/wan2.2_vace_fun/predict_v2v_control_ref.py b/examples/wan2.2_vace_fun/predict_v2v_control_ref.py index c5e51d62..e0538361 100644 --- a/examples/wan2.2_vace_fun/predict_v2v_control_ref.py +++ b/examples/wan2.2_vace_fun/predict_v2v_control_ref.py @@ -108,12 +108,15 @@ # Use torch.float16 if GPU does not support torch.bfloat16 # ome graphics cards, such as v100, 2080ti, do not support torch.bfloat16 -weight_dtype = torch.bfloat16 -control_video = "asset/pose.mp4" -start_image = None -end_image = None -subject_ref_images = ["asset/8.png"] -vace_context_scale = 1.00 +weight_dtype = torch.bfloat16 +control_video = "asset/pose.mp4" +start_image = None +end_image = None +# Use inpaint video instead of start image and end image. +inpaint_video = None +inpaint_video_mask = None +subject_ref_images = ["asset/8.png"] +vace_context_scale = 1.00 # Sometimes, when generating a video from a reference image, white borders appear. # Because the padding is mistakenly treated as part of the image. # If the aspect ratio of the reference image is close to the final output, you can omit the white padding. @@ -323,7 +326,14 @@ subject_ref_images = [get_image_latent(_subject_ref_image, sample_size=sample_size, padding=padding_in_subject_ref_images) for _subject_ref_image in subject_ref_images] subject_ref_images = torch.cat(subject_ref_images, dim=2) - inpaint_video, inpaint_video_mask, clip_image = get_image_to_video_latent(start_image, end_image, video_length=video_length, sample_size=sample_size) + if inpaint_video is not None: + if inpaint_video_mask is None: + raise ValueError("inpaint_video_mask is required when inpaint_video is provided") + inpaint_video, _, _, _ = get_video_to_video_latent(inpaint_video, video_length=video_length, sample_size=sample_size, fps=fps, ref_image=None) + inpaint_video_mask, _, _, _ = get_video_to_video_latent(inpaint_video_mask, video_length=video_length, sample_size=sample_size, fps=fps, ref_image=None) + inpaint_video_mask = inpaint_video_mask[:, :1] + else: + inpaint_video, inpaint_video_mask, clip_image = get_image_to_video_latent(start_image, end_image, video_length=video_length, sample_size=sample_size) control_video, _, _, _ = get_video_to_video_latent(control_video, video_length=video_length, sample_size=sample_size, fps=fps, ref_image=None) diff --git a/examples/wan2.2_vace_fun/predict_v2v_mask.py b/examples/wan2.2_vace_fun/predict_v2v_mask.py new file mode 100644 index 00000000..ec346f9a --- /dev/null +++ b/examples/wan2.2_vace_fun/predict_v2v_mask.py @@ -0,0 +1,387 @@ +import os +import sys + +import numpy as np +import torch +from diffusers import FlowMatchEulerDiscreteScheduler +from omegaconf import OmegaConf +from PIL import Image +from transformers import AutoTokenizer + +current_file_path = os.path.abspath(__file__) +project_roots = [os.path.dirname(current_file_path), os.path.dirname(os.path.dirname(current_file_path)), os.path.dirname(os.path.dirname(os.path.dirname(current_file_path)))] +for project_root in project_roots: + sys.path.insert(0, project_root) if project_root not in sys.path else None + +from videox_fun.dist import set_multi_gpus_devices, shard_model +from videox_fun.models import (AutoencoderKLWan, AutoencoderKLWan3_8, AutoTokenizer, CLIPModel, + WanT5EncoderModel, VaceWanTransformer3DModel) +from videox_fun.data.dataset_image_video import process_pose_file +from videox_fun.models.cache_utils import get_teacache_coefficients +from videox_fun.pipeline import Wan2_2VaceFunPipeline, WanPipeline +from videox_fun.utils.fp8_optimization import (convert_model_weight_to_float8, + convert_weight_dtype_wrapper, + replace_parameters_by_name) +from videox_fun.utils.lora_utils import merge_lora, unmerge_lora +from videox_fun.utils.utils import (filter_kwargs, get_image_to_video_latent, get_image_latent, + get_video_to_video_latent, + save_videos_grid) +from videox_fun.utils.fm_solvers import FlowDPMSolverMultistepScheduler +from videox_fun.utils.fm_solvers_unipc import FlowUniPCMultistepScheduler + +# GPU memory mode, which can be chosen in [model_full_load, model_cpu_offload_and_qfloat8, model_cpu_offload, model_cpu_offload_and_qfloat8, sequential_cpu_offload]. +# model_full_load means that the entire model will be moved to the GPU. +# +# model_full_load_and_qfloat8 means that the entire model will be moved to the GPU, +# and the transformer model has been quantized to float8, which can save more GPU memory. +# +# model_cpu_offload means that the entire model will be moved to the CPU after use, which can save some GPU memory. +# +# model_cpu_offload_and_qfloat8 indicates that the entire model will be moved to the CPU after use, +# and the transformer model has been quantized to float8, which can save more GPU memory. +# +# sequential_cpu_offload means that each layer of the model will be moved to the CPU after use, +# resulting in slower speeds but saving a large amount of GPU memory. +GPU_memory_mode = "sequential_cpu_offload" +# Multi GPUs config +# Please ensure that the product of ulysses_degree and ring_degree equals the number of GPUs used. +# For example, if you are using 8 GPUs, you can set ulysses_degree = 2 and ring_degree = 4. +# If you are using 1 GPU, you can set ulysses_degree = 1 and ring_degree = 1. +ulysses_degree = 1 +ring_degree = 1 +# Use FSDP to save more GPU memory in multi gpus. +fsdp_dit = False +fsdp_text_encoder = True +# Compile will give a speedup in fixed resolution and need a little GPU memory. +# The compile_dit is not compatible with the fsdp_dit and sequential_cpu_offload. +compile_dit = False + +# Support TeaCache. +enable_teacache = True +# Recommended to be set between 0.05 and 0.30. A larger threshold can cache more steps, speeding up the inference process, +# but it may cause slight differences between the generated content and the original content. +# # --------------------------------------------------------------------------------------------------- # +# | Model Name | threshold | Model Name | threshold | +# | Wan2.1-VACE-1.3B | 0.05~0.10 | Wan2.1-VACE-14B | 0.10~0.15 | +# # --------------------------------------------------------------------------------------------------- # +teacache_threshold = 0.10 +# The number of steps to skip TeaCache at the beginning of the inference process, which can +# reduce the impact of TeaCache on generated video quality. +num_skip_start_steps = 5 +# Whether to offload TeaCache tensors to cpu to save a little bit of GPU memory. +teacache_offload = False + +# Skip some cfg steps in inference for acceleration +# Recommended to be set between 0.00 and 0.25 +cfg_skip_ratio = 0 + +# Riflex config +enable_riflex = False +# Index of intrinsic frequency +riflex_k = 6 + +# Config and model path +config_path = "config/wan2.2/wan_civitai_t2v.yaml" +# model path +model_name = "models/Diffusion_Transformer/Wan2.2-VACE-Fun-A14B" + +# Choose the sampler in "Flow", "Flow_Unipc", "Flow_DPM++" +sampler_name = "Flow" +# [NOTE]: Noise schedule shift parameter. Affects temporal dynamics. +# Used when the sampler is in "Flow_Unipc", "Flow_DPM++". +shift = 12.0 + +# Load pretrained model if need +# The transformer_path is used for low noise model, the transformer_high_path is used for high noise model. +transformer_path = None +transformer_high_path = None +vae_path = None +# Load lora model if need +# The lora_path is used for low noise model, the lora_high_path is used for high noise model. +lora_path = None +lora_high_path = None + +# Other params +sample_size = [480, 832] +video_length = 81 +fps = 16 + +# Use torch.float16 if GPU does not support torch.bfloat16 +# ome graphics cards, such as v100, 2080ti, do not support torch.bfloat16 +weight_dtype = torch.bfloat16 +control_video = None +start_image = None +end_image = None +# Use inpaint video instead of start image and end image. +inpaint_video = "asset/inpaint_video.mp4" +inpaint_video_mask = "asset/inpaint_video_mask.mp4" +subject_ref_images = None +vace_context_scale = 1.00 +# Sometimes, when generating a video from a reference image, white borders appear. +# Because the padding is mistakenly treated as part of the image. +# If the aspect ratio of the reference image is close to the final output, you can omit the white padding. +padding_in_subject_ref_images = True + +# 使用更长的neg prompt如"模糊,突变,变形,失真,画面暗,文本字幕,画面固定,连环画,漫画,线稿,没有主体。",可以增加稳定性 +# 在neg prompt中添加"安静,固定"等词语可以增加动态性。 +prompt = "一只棕色的兔子舔了一下它的舌头,坐在舒适房间里的浅色沙发上。在兔子的后面,架子上有一幅镶框的画,周围是粉红色的花朵。房间里柔和温暖的灯光营造出舒适的氛围。" +negative_prompt = "色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走" + +# Using longer neg prompt such as "Blurring, mutation, deformation, distortion, dark and solid, comics, text subtitles, line art." can increase stability +# Adding words such as "quiet, solid" to the neg prompt can increase dynamism. +# prompt = "A young woman with beautiful, clear eyes and blonde hair stands in the forest, wearing a white dress and a crown. Her expression is serene, reminiscent of a movie star, with fair and youthful skin. Her brown long hair flows in the wind. The video quality is very high, with a clear view. High quality, masterpiece, best quality, high resolution, ultra-fine, fantastical." +# negative_prompt = "Twisted body, limb deformities, text captions, comic, static, ugly, error, messy code." +guidance_scale = 5.0 +seed = 43 +num_inference_steps = 50 +# The lora_weight is used for low noise model, the lora_high_weight is used for high noise model. +lora_weight = 0.55 +lora_high_weight = 0.55 +save_path = "samples/vace-videos-fun" + +device = set_multi_gpus_devices(ulysses_degree, ring_degree) +config = OmegaConf.load(config_path) +boundary = config['transformer_additional_kwargs'].get('boundary', 0.875) + +transformer = VaceWanTransformer3DModel.from_pretrained( + os.path.join(model_name, config['transformer_additional_kwargs'].get('transformer_low_noise_model_subpath', 'transformer')), + transformer_additional_kwargs=OmegaConf.to_container(config['transformer_additional_kwargs']), + low_cpu_mem_usage=True, + torch_dtype=weight_dtype, +) +if config['transformer_additional_kwargs'].get('transformer_combination_type', 'single') == "moe": + transformer_2 = VaceWanTransformer3DModel.from_pretrained( + os.path.join(model_name, config['transformer_additional_kwargs'].get('transformer_high_noise_model_subpath', 'transformer')), + transformer_additional_kwargs=OmegaConf.to_container(config['transformer_additional_kwargs']), + low_cpu_mem_usage=True, + torch_dtype=weight_dtype, + ) +else: + transformer_2 = None + +if transformer_path is not None: + print(f"From checkpoint: {transformer_path}") + if transformer_path.endswith("safetensors"): + from safetensors.torch import load_file, safe_open + state_dict = load_file(transformer_path) + else: + state_dict = torch.load(transformer_path, map_location="cpu") + state_dict = state_dict["state_dict"] if "state_dict" in state_dict else state_dict + + m, u = transformer.load_state_dict(state_dict, strict=False) + print(f"missing keys: {len(m)}, unexpected keys: {len(u)}") + +if transformer_2 is not None: + if transformer_high_path is not None: + print(f"From checkpoint: {transformer_high_path}") + if transformer_high_path.endswith("safetensors"): + from safetensors.torch import load_file, safe_open + state_dict = load_file(transformer_high_path) + else: + state_dict = torch.load(transformer_high_path, map_location="cpu") + state_dict = state_dict["state_dict"] if "state_dict" in state_dict else state_dict + + m, u = transformer_2.load_state_dict(state_dict, strict=False) + print(f"missing keys: {len(m)}, unexpected keys: {len(u)}") + +# Get Vae +Chosen_AutoencoderKL = { + "AutoencoderKLWan": AutoencoderKLWan, + "AutoencoderKLWan3_8": AutoencoderKLWan3_8 +}[config['vae_kwargs'].get('vae_type', 'AutoencoderKLWan')] +vae = Chosen_AutoencoderKL.from_pretrained( + os.path.join(model_name, config['vae_kwargs'].get('vae_subpath', 'vae')), + additional_kwargs=OmegaConf.to_container(config['vae_kwargs']), +).to(weight_dtype) + +if vae_path is not None: + print(f"From checkpoint: {vae_path}") + if vae_path.endswith("safetensors"): + from safetensors.torch import load_file, safe_open + state_dict = load_file(vae_path) + else: + state_dict = torch.load(vae_path, map_location="cpu") + state_dict = state_dict["state_dict"] if "state_dict" in state_dict else state_dict + + m, u = vae.load_state_dict(state_dict, strict=False) + print(f"missing keys: {len(m)}, unexpected keys: {len(u)}") + +# Get Tokenizer +tokenizer = AutoTokenizer.from_pretrained( + os.path.join(model_name, config['text_encoder_kwargs'].get('tokenizer_subpath', 'tokenizer')), +) + +# Get Text encoder +text_encoder = WanT5EncoderModel.from_pretrained( + os.path.join(model_name, config['text_encoder_kwargs'].get('text_encoder_subpath', 'text_encoder')), + additional_kwargs=OmegaConf.to_container(config['text_encoder_kwargs']), + low_cpu_mem_usage=True, + torch_dtype=weight_dtype, +) +text_encoder = text_encoder.eval() + +# Get Scheduler +Chosen_Scheduler = scheduler_dict = { + "Flow": FlowMatchEulerDiscreteScheduler, + "Flow_Unipc": FlowUniPCMultistepScheduler, + "Flow_DPM++": FlowDPMSolverMultistepScheduler, +}[sampler_name] +if sampler_name == "Flow_Unipc" or sampler_name == "Flow_DPM++": + config['scheduler_kwargs']['shift'] = 1 +scheduler = Chosen_Scheduler( + **filter_kwargs(Chosen_Scheduler, OmegaConf.to_container(config['scheduler_kwargs'])) +) + +# Get Pipeline +pipeline = Wan2_2VaceFunPipeline( + transformer=transformer, + transformer_2=transformer_2, + vae=vae, + tokenizer=tokenizer, + text_encoder=text_encoder, + scheduler=scheduler, +) +if ulysses_degree > 1 or ring_degree > 1: + from functools import partial + transformer.enable_multi_gpus_inference() + if transformer_2 is not None: + transformer_2.enable_multi_gpus_inference() + if fsdp_dit: + shard_fn = partial(shard_model, device_id=device, param_dtype=weight_dtype) + pipeline.transformer = shard_fn(pipeline.transformer) + if transformer_2 is not None: + pipeline.transformer_2 = shard_fn(pipeline.transformer_2) + print("Add FSDP DIT") + if fsdp_text_encoder: + shard_fn = partial(shard_model, device_id=device, param_dtype=weight_dtype) + pipeline.text_encoder = shard_fn(pipeline.text_encoder) + print("Add FSDP TEXT ENCODER") + +if compile_dit: + for i in range(len(pipeline.transformer.blocks)): + pipeline.transformer.blocks[i] = torch.compile(pipeline.transformer.blocks[i]) + if transformer_2 is not None: + for i in range(len(pipeline.transformer_2.blocks)): + pipeline.transformer_2.blocks[i] = torch.compile(pipeline.transformer_2.blocks[i]) + print("Add Compile") + +if GPU_memory_mode == "sequential_cpu_offload": + replace_parameters_by_name(transformer, ["modulation",], device=device) + transformer.freqs = transformer.freqs.to(device=device) + if transformer_2 is not None: + replace_parameters_by_name(transformer_2, ["modulation",], device=device) + transformer_2.freqs = transformer_2.freqs.to(device=device) + pipeline.enable_sequential_cpu_offload(device=device) +elif GPU_memory_mode == "model_cpu_offload_and_qfloat8": + convert_model_weight_to_float8(transformer, exclude_module_name=["modulation",], device=device) + convert_weight_dtype_wrapper(transformer, weight_dtype) + if transformer_2 is not None: + convert_model_weight_to_float8(transformer_2, exclude_module_name=["modulation",], device=device) + convert_weight_dtype_wrapper(transformer_2, weight_dtype) + pipeline.enable_model_cpu_offload(device=device) +elif GPU_memory_mode == "model_cpu_offload": + pipeline.enable_model_cpu_offload(device=device) +elif GPU_memory_mode == "model_full_load_and_qfloat8": + convert_model_weight_to_float8(transformer, exclude_module_name=["modulation",], device=device) + convert_weight_dtype_wrapper(transformer, weight_dtype) + if transformer_2 is not None: + convert_model_weight_to_float8(transformer_2, exclude_module_name=["modulation",], device=device) + convert_weight_dtype_wrapper(transformer_2, weight_dtype) + pipeline.to(device=device) +else: + pipeline.to(device=device) + +coefficients = get_teacache_coefficients(model_name) if enable_teacache else None +if coefficients is not None: + print(f"Enable TeaCache with threshold {teacache_threshold} and skip the first {num_skip_start_steps} steps.") + pipeline.transformer.enable_teacache( + coefficients, num_inference_steps, teacache_threshold, num_skip_start_steps=num_skip_start_steps, offload=teacache_offload + ) + if transformer_2 is not None: + pipeline.transformer_2.share_teacache(transformer=pipeline.transformer) + +if cfg_skip_ratio is not None: + print(f"Enable cfg_skip_ratio {cfg_skip_ratio}.") + pipeline.transformer.enable_cfg_skip(cfg_skip_ratio, num_inference_steps) + if transformer_2 is not None: + pipeline.transformer_2.share_cfg_skip(transformer=pipeline.transformer) + +generator = torch.Generator(device=device).manual_seed(seed) + +if lora_path is not None: + pipeline = merge_lora(pipeline, lora_path, lora_weight, device=device) + if transformer_2 is not None: + pipeline = merge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + +with torch.no_grad(): + video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 + latent_frames = (video_length - 1) // vae.config.temporal_compression_ratio + 1 + + if enable_riflex: + pipeline.transformer.enable_riflex(k = riflex_k, L_test = latent_frames) + if transformer_2 is not None: + pipeline.transformer_2.enable_riflex(k = riflex_k, L_test = latent_frames) + + if subject_ref_images is not None: + subject_ref_images = [get_image_latent(_subject_ref_image, sample_size=sample_size, padding=padding_in_subject_ref_images) for _subject_ref_image in subject_ref_images] + subject_ref_images = torch.cat(subject_ref_images, dim=2) + + if inpaint_video is not None: + if inpaint_video_mask is None: + raise ValueError("inpaint_video_mask is required when inpaint_video is provided") + inpaint_video, _, _, _ = get_video_to_video_latent(inpaint_video, video_length=video_length, sample_size=sample_size, fps=fps, ref_image=None) + inpaint_video_mask, _, _, _ = get_video_to_video_latent(inpaint_video_mask, video_length=video_length, sample_size=sample_size, fps=fps, ref_image=None) + inpaint_video_mask = inpaint_video_mask[:, :1] + else: + inpaint_video, inpaint_video_mask, clip_image = get_image_to_video_latent(start_image, end_image, video_length=video_length, sample_size=sample_size) + + control_video, _, _, _ = get_video_to_video_latent(control_video, video_length=video_length, sample_size=sample_size, fps=fps, ref_image=None) + + sample = pipeline( + prompt, + num_frames = video_length, + negative_prompt = negative_prompt, + height = sample_size[0], + width = sample_size[1], + generator = generator, + guidance_scale = guidance_scale, + num_inference_steps = num_inference_steps, + + video = inpaint_video, + mask_video = inpaint_video_mask, + control_video = control_video, + subject_ref_images = subject_ref_images, + boundary = boundary, + shift = shift, + vace_context_scale = vace_context_scale + ).videos + +if lora_path is not None: + pipeline = unmerge_lora(pipeline, lora_path, lora_weight, device=device) + if transformer_2 is not None: + pipeline = unmerge_lora(pipeline, lora_high_path, lora_high_weight, device=device, sub_transformer_name="transformer_2") + +def save_results(): + if not os.path.exists(save_path): + os.makedirs(save_path, exist_ok=True) + + index = len([path for path in os.listdir(save_path)]) + 1 + prefix = str(index).zfill(8) + if video_length == 1: + video_path = os.path.join(save_path, prefix + ".png") + + image = sample[0, :, 0] + image = image.transpose(0, 1).transpose(1, 2) + image = (image * 255).numpy().astype(np.uint8) + image = Image.fromarray(image) + image.save(video_path) + else: + video_path = os.path.join(save_path, prefix + ".mp4") + save_videos_grid(sample, video_path, fps=fps) + +if ulysses_degree * ring_degree > 1: + import torch.distributed as dist + if dist.get_rank() == 0: + save_results() +else: + save_results() \ No newline at end of file From d9d4db0996c7340a5be0a163a79667a0c3231cc0 Mon Sep 17 00:00:00 2001 From: bubbliiiing <3323290568@qq.com> Date: Thu, 18 Sep 2025 15:22:37 +0800 Subject: [PATCH 7/8] Update vace transformer3d --- videox_fun/models/wan_transformer3d_vace.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/videox_fun/models/wan_transformer3d_vace.py b/videox_fun/models/wan_transformer3d_vace.py index fabbfa85..7ca57f10 100644 --- a/videox_fun/models/wan_transformer3d_vace.py +++ b/videox_fun/models/wan_transformer3d_vace.py @@ -14,7 +14,8 @@ from .wan_transformer3d import (WanAttentionBlock, WanTransformer3DModel, sinusoidal_embedding_1d) -VIDEOX_OFFLOAD_VACE_LATENTS=os.environ.get("VIDEOX_OFFLOAD_VACE_LATENTS", False) + +VIDEOX_OFFLOAD_VACE_LATENTS = os.environ.get("VIDEOX_OFFLOAD_VACE_LATENTS", False) class VaceWanAttentionBlock(WanAttentionBlock): def __init__( @@ -82,8 +83,9 @@ def forward(self, x, hints, context_scale=1.0, **kwargs): x = super().forward(x, **kwargs) if self.block_id is not None: if VIDEOX_OFFLOAD_VACE_LATENTS: - hints[self.block_id].to(x.device) - x = x + hints[self.block_id] * context_scale + x = x + hints[self.block_id].to(x.device) * context_scale + else: + x = x + hints[self.block_id] * context_scale return x From 89518c6034d9e33c72dc7d38224efb86c7af9512 Mon Sep 17 00:00:00 2001 From: bubbliiiing <3323290568@qq.com> Date: Thu, 18 Sep 2025 15:49:45 +0800 Subject: [PATCH 8/8] Update Readme --- scripts/wan2.2_vace_fun/README_TRAIN.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/wan2.2_vace_fun/README_TRAIN.md b/scripts/wan2.2_vace_fun/README_TRAIN.md index bad187b5..ca63aea9 100755 --- a/scripts/wan2.2_vace_fun/README_TRAIN.md +++ b/scripts/wan2.2_vace_fun/README_TRAIN.md @@ -16,7 +16,7 @@ The metadata_control.json is a little different from normal json in Wan-Fun, you { "file_path": "train/00000002.jpg", "control_file_path": "control/00000002.jpg", - "object_file_path": ["object/1.jpg", "object/2.jpg"], + "object_file_path": ["object/3.jpg", "object/4.jpg"], "text": "Ba Da Ba Ba Ba Ba.", "type": "image" },