From 5cb1f588525679c4f02b0d58208fe4aa2349d535 Mon Sep 17 00:00:00 2001 From: areszz <1031614818@qq.com> Date: Sat, 22 Feb 2025 14:21:54 +0800 Subject: [PATCH] first commit --- README.md | 142 + data/__init__.py | 98 + data/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 4139 bytes data/__pycache__/base_dataset.cpython-36.pyc | Bin 0 -> 8090 bytes data/__pycache__/image_folder.cpython-36.pyc | Bin 0 -> 2469 bytes .../unaligned_dataset.cpython-36.pyc | Bin 0 -> 3052 bytes .../unaligned_double_dataset.cpython-36.pyc | Bin 0 -> 3634 bytes data/base_dataset.py | 230 ++ data/image_folder.py | 66 + data/single_dataset.py | 40 + data/singleimage_dataset.py | 108 + data/template_dataset.py | 75 + data/unaligned_dataset.py | 79 + data/unaligned_double_dataset.py | 100 + datasets/bibtex/cityscapes.tex | 6 + datasets/bibtex/facades.tex | 7 + datasets/bibtex/handbags.tex | 13 + datasets/bibtex/shoes.tex | 14 + datasets/bibtex/transattr.tex | 8 + datasets/combine_A_and_B.py | 48 + datasets/detect_cat_face.py | 64 + datasets/download_cut_dataset.sh | 23 + datasets/download_pix2pix_dataset.sh | 24 + datasets/make_dataset_aligned.py | 63 + datasets/prepare_cityscapes_dataset.py | 90 + .../trainA/monet.jpg | Bin 0 -> 296150 bytes .../trainB/etretat-normandy-france.jpg | Bin 0 -> 620384 bytes images/method_final.jpg | Bin 0 -> 284293 bytes models/__init__.py | 67 + models/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 3220 bytes models/__pycache__/base_model.cpython-36.pyc | Bin 0 -> 11154 bytes models/__pycache__/cut_model.cpython-36.pyc | Bin 0 -> 8069 bytes models/__pycache__/mae.cpython-36.pyc | Bin 0 -> 8422 bytes models/__pycache__/models_mae.cpython-36.pyc | Bin 0 -> 8344 bytes .../mutilvitgloballocal_model.cpython-36.pyc | Bin 0 -> 11463 bytes models/__pycache__/networks.cpython-36.pyc | Bin 0 -> 52658 bytes models/__pycache__/patchnce.cpython-36.pyc | Bin 0 -> 1568 bytes .../__pycache__/region0_model.cpython-36.pyc | Bin 0 -> 12992 bytes .../__pycache__/region_model.cpython-36.pyc | Bin 0 -> 12018 bytes .../stylegan_networks.cpython-36.pyc | Bin 0 -> 23294 bytes .../vit2Gmask_model.cpython-36.pyc | Bin 0 -> 9239 bytes models/__pycache__/vit2_model.cpython-36.pyc | Bin 0 -> 9372 bytes .../vit2patchmask_model.cpython-36.pyc | Bin 0 -> 9713 bytes .../vit2tokenmask_model.cpython-36.pyc | Bin 0 -> 9189 bytes models/__pycache__/vitD_model.cpython-36.pyc | Bin 0 -> 9777 bytes models/__pycache__/vit_model.cpython-36.pyc | Bin 0 -> 8707 bytes .../vitdonly2_model.cpython-36.pyc | Bin 0 -> 9528 bytes .../__pycache__/vitdonly_model.cpython-36.pyc | Bin 0 -> 9373 bytes .../vitgloballocal_model.cpython-36.pyc | Bin 0 -> 11152 bytes .../vitlocalgloballocal_model.cpython-36.pyc | Bin 0 -> 10505 bytes models/base_model.py | 258 ++ models/cut_model.py | 214 ++ models/cycle_gan_model.py | 222 ++ models/networks.py | 1530 +++++++++++ models/patchnce.py | 55 + models/roma_model.py | 363 +++ models/roma_single_model.py | 272 ++ models/self_build.py | 655 +++++ models/stylegan_networks.py | 914 +++++++ models/template_model.py | 99 + .../util/__pycache__/pos_embed.cpython-36.pyc | Bin 0 -> 2424 bytes models/util/crop.py | 42 + models/util/datasets.py | 65 + models/util/lars.py | 47 + models/util/lr_decay.py | 76 + models/util/lr_sched.py | 21 + models/util/misc.py | 340 +++ models/util/pos_embed.py | 96 + options/__init__.py | 1 + options/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 292 bytes .../__pycache__/base_options.cpython-36.pyc | Bin 0 -> 7539 bytes .../__pycache__/test_options.cpython-36.pyc | Bin 0 -> 1039 bytes .../__pycache__/train_options.cpython-36.pyc | Bin 0 -> 3174 bytes options/base_options.py | 167 ++ options/test_options.py | 21 + options/train_options.py | 47 + scripts/test.sh | 1 + scripts/train.sh | 5 + test.py | 70 + timm/__init__.py | 4 + timm/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 550 bytes timm/__pycache__/version.cpython-36.pyc | Bin 0 -> 168 bytes timm/data/__init__.py | 12 + timm/data/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 861 bytes .../__pycache__/auto_augment.cpython-36.pyc | Bin 0 -> 27337 bytes timm/data/__pycache__/config.cpython-36.pyc | Bin 0 -> 1609 bytes .../data/__pycache__/constants.cpython-36.pyc | Bin 0 -> 598 bytes timm/data/__pycache__/dataset.cpython-36.pyc | Bin 0 -> 5013 bytes .../dataset_factory.cpython-36.pyc | Bin 0 -> 4141 bytes .../distributed_sampler.cpython-36.pyc | Bin 0 -> 4229 bytes timm/data/__pycache__/loader.cpython-36.pyc | Bin 0 -> 7910 bytes timm/data/__pycache__/mixup.cpython-36.pyc | Bin 0 -> 11571 bytes .../__pycache__/random_erasing.cpython-36.pyc | Bin 0 -> 3940 bytes .../__pycache__/real_labels.cpython-36.pyc | Bin 0 -> 2390 bytes .../__pycache__/transforms.cpython-36.pyc | Bin 0 -> 6434 bytes .../transforms_factory.cpython-36.pyc | Bin 0 -> 5119 bytes timm/data/auto_augment.py | 865 +++++++ timm/data/config.py | 78 + timm/data/constants.py | 7 + timm/data/dataset.py | 152 ++ timm/data/dataset_factory.py | 139 + timm/data/distributed_sampler.py | 128 + timm/data/loader.py | 289 +++ timm/data/mixup.py | 316 +++ timm/data/parsers/__init__.py | 1 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 213 bytes .../__pycache__/class_map.cpython-36.pyc | Bin 0 -> 934 bytes .../__pycache__/constants.cpython-36.pyc | Bin 0 -> 215 bytes .../parsers/__pycache__/parser.cpython-36.pyc | Bin 0 -> 1134 bytes .../__pycache__/parser_factory.cpython-36.pyc | Bin 0 -> 874 bytes .../parser_image_folder.cpython-36.pyc | Bin 0 -> 3026 bytes .../parser_image_in_tar.cpython-36.pyc | Bin 0 -> 7672 bytes .../parser_image_tar.cpython-36.pyc | Bin 0 -> 3104 bytes timm/data/parsers/class_map.py | 19 + timm/data/parsers/constants.py | 1 + timm/data/parsers/parser.py | 17 + timm/data/parsers/parser_factory.py | 29 + timm/data/parsers/parser_image_folder.py | 69 + timm/data/parsers/parser_image_in_tar.py | 222 ++ timm/data/parsers/parser_image_tar.py | 72 + timm/data/parsers/parser_tfds.py | 297 +++ timm/data/random_erasing.py | 103 + timm/data/real_labels.py | 42 + timm/data/tf_preprocessing.py | 232 ++ timm/data/transforms.py | 185 ++ timm/data/transforms_factory.py | 236 ++ timm/loss/__init__.py | 4 + timm/loss/asymmetric_loss.py | 97 + timm/loss/binary_cross_entropy.py | 47 + timm/loss/cross_entropy.py | 36 + timm/loss/jsd.py | 39 + timm/models/__init__.py | 58 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 1929 bytes timm/models/__pycache__/beit.cpython-36.pyc | Bin 0 -> 13767 bytes .../models/__pycache__/byoanet.cpython-36.pyc | Bin 0 -> 11525 bytes .../models/__pycache__/byobnet.cpython-36.pyc | Bin 0 -> 43550 bytes timm/models/__pycache__/cait.cpython-36.pyc | Bin 0 -> 13252 bytes timm/models/__pycache__/coat.cpython-36.pyc | Bin 0 -> 20181 bytes timm/models/__pycache__/convit.cpython-36.pyc | Bin 0 -> 12239 bytes .../__pycache__/convmixer.cpython-36.pyc | Bin 0 -> 4159 bytes .../__pycache__/crossvit.cpython-36.pyc | Bin 0 -> 19691 bytes timm/models/__pycache__/cspnet.cpython-36.pyc | Bin 0 -> 14814 bytes .../__pycache__/densenet.cpython-36.pyc | Bin 0 -> 13075 bytes timm/models/__pycache__/dla.cpython-36.pyc | Bin 0 -> 14562 bytes timm/models/__pycache__/dpn.cpython-36.pyc | Bin 0 -> 10582 bytes .../__pycache__/efficientnet.cpython-36.pyc | Bin 0 -> 75391 bytes .../efficientnet_blocks.cpython-36.pyc | Bin 0 -> 9769 bytes .../efficientnet_builder.cpython-36.pyc | Bin 0 -> 12660 bytes .../models/__pycache__/factory.cpython-36.pyc | Bin 0 -> 3023 bytes .../__pycache__/features.cpython-36.pyc | Bin 0 -> 12545 bytes .../__pycache__/fx_features.cpython-36.pyc | Bin 0 -> 2660 bytes .../__pycache__/ghostnet.cpython-36.pyc | Bin 0 -> 7532 bytes .../__pycache__/gluon_resnet.cpython-36.pyc | Bin 0 -> 10128 bytes .../__pycache__/gluon_xception.cpython-36.pyc | Bin 0 -> 6881 bytes .../__pycache__/hardcorenas.cpython-36.pyc | Bin 0 -> 6386 bytes .../models/__pycache__/helpers.cpython-36.pyc | Bin 0 -> 15038 bytes timm/models/__pycache__/hrnet.cpython-36.pyc | Bin 0 -> 19111 bytes timm/models/__pycache__/hub.cpython-36.pyc | Bin 0 -> 5289 bytes .../inception_resnet_v2.cpython-36.pyc | Bin 0 -> 10718 bytes .../__pycache__/inception_v3.cpython-36.pyc | Bin 0 -> 14117 bytes .../__pycache__/inception_v4.cpython-36.pyc | Bin 0 -> 10692 bytes timm/models/__pycache__/levit.cpython-36.pyc | Bin 0 -> 17526 bytes .../__pycache__/mlp_mixer.cpython-36.pyc | Bin 0 -> 23914 bytes .../__pycache__/mobilenetv3.cpython-36.pyc | Bin 0 -> 16609 bytes timm/models/__pycache__/nasnet.cpython-36.pyc | Bin 0 -> 15831 bytes timm/models/__pycache__/nest.cpython-36.pyc | Bin 0 -> 18311 bytes timm/models/__pycache__/nfnet.cpython-36.pyc | Bin 0 -> 33335 bytes timm/models/__pycache__/pit.cpython-36.pyc | Bin 0 -> 12341 bytes .../models/__pycache__/pnasnet.cpython-36.pyc | Bin 0 -> 10870 bytes .../__pycache__/registry.cpython-36.pyc | Bin 0 -> 4729 bytes timm/models/__pycache__/regnet.cpython-36.pyc | Bin 0 -> 18516 bytes .../models/__pycache__/res2net.cpython-36.pyc | Bin 0 -> 6980 bytes .../models/__pycache__/resnest.cpython-36.pyc | Bin 0 -> 8441 bytes timm/models/__pycache__/resnet.cpython-36.pyc | Bin 0 -> 54345 bytes .../__pycache__/resnetv2.cpython-36.pyc | Bin 0 -> 23075 bytes timm/models/__pycache__/rexnet.cpython-36.pyc | Bin 0 -> 8779 bytes .../__pycache__/selecsls.cpython-36.pyc | Bin 0 -> 10933 bytes timm/models/__pycache__/senet.cpython-36.pyc | Bin 0 -> 15147 bytes timm/models/__pycache__/sknet.cpython-36.pyc | Bin 0 -> 7739 bytes .../swin_transformer.cpython-36.pyc | Bin 0 -> 23074 bytes timm/models/__pycache__/tnt.cpython-36.pyc | Bin 0 -> 9905 bytes .../models/__pycache__/tresnet.cpython-36.pyc | Bin 0 -> 9691 bytes timm/models/__pycache__/twins.cpython-36.pyc | Bin 0 -> 15148 bytes timm/models/__pycache__/vgg.cpython-36.pyc | Bin 0 -> 9627 bytes .../__pycache__/visformer.cpython-36.pyc | Bin 0 -> 11099 bytes .../vision_transformer.cpython-36.pyc | Bin 0 -> 38076 bytes .../vision_transformer_hybrid.cpython-36.pyc | Bin 0 -> 13017 bytes timm/models/__pycache__/vovnet.cpython-36.pyc | Bin 0 -> 10478 bytes .../__pycache__/xception.cpython-36.pyc | Bin 0 -> 6618 bytes .../xception_aligned.cpython-36.pyc | Bin 0 -> 7647 bytes timm/models/__pycache__/xcit.cpython-36.pyc | Bin 0 -> 31091 bytes timm/models/beit.py | 416 +++ timm/models/byoanet.py | 443 ++++ timm/models/byobnet.py | 1531 +++++++++++ timm/models/cait.py | 394 +++ timm/models/coat.py | 661 +++++ timm/models/convit.py | 351 +++ timm/models/convmixer.py | 101 + timm/models/crossvit.py | 517 ++++ timm/models/cspnet.py | 457 ++++ timm/models/densenet.py | 387 +++ timm/models/dla.py | 443 ++++ timm/models/dpn.py | 317 +++ timm/models/efficientnet.py | 2286 +++++++++++++++++ timm/models/efficientnet_blocks.py | 323 +++ timm/models/efficientnet_builder.py | 463 ++++ timm/models/factory.py | 86 + timm/models/features.py | 284 ++ timm/models/fx_features.py | 73 + timm/models/ghostnet.py | 276 ++ timm/models/gluon_resnet.py | 248 ++ timm/models/gluon_xception.py | 246 ++ timm/models/hardcorenas.py | 152 ++ timm/models/helpers.py | 518 ++++ timm/models/hrnet.py | 836 ++++++ timm/models/hub.py | 171 ++ timm/models/inception_resnet_v2.py | 358 +++ timm/models/inception_v3.py | 470 ++++ timm/models/inception_v4.py | 316 +++ timm/models/layers/__init__.py | 40 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 3100 bytes .../__pycache__/activations.cpython-36.pyc | Bin 0 -> 6831 bytes .../activations_jit.cpython-36.pyc | Bin 0 -> 4303 bytes .../__pycache__/activations_me.cpython-36.pyc | Bin 0 -> 9158 bytes .../adaptive_avgmax_pool.cpython-36.pyc | Bin 0 -> 4793 bytes .../__pycache__/blur_pool.cpython-36.pyc | Bin 0 -> 2072 bytes .../bottleneck_attn.cpython-36.pyc | Bin 0 -> 6437 bytes .../layers/__pycache__/cbam.cpython-36.pyc | Bin 0 -> 5400 bytes .../__pycache__/classifier.cpython-36.pyc | Bin 0 -> 2200 bytes .../__pycache__/cond_conv2d.cpython-36.pyc | Bin 0 -> 3761 bytes .../layers/__pycache__/config.cpython-36.pyc | Bin 0 -> 3457 bytes .../__pycache__/conv2d_same.cpython-36.pyc | Bin 0 -> 1899 bytes .../__pycache__/conv_bn_act.cpython-36.pyc | Bin 0 -> 1588 bytes .../__pycache__/create_act.cpython-36.pyc | Bin 0 -> 3650 bytes .../__pycache__/create_attn.cpython-36.pyc | Bin 0 -> 1968 bytes .../__pycache__/create_conv2d.cpython-36.pyc | Bin 0 -> 1065 bytes .../create_norm_act.cpython-36.pyc | Bin 0 -> 2310 bytes .../layers/__pycache__/drop.cpython-36.pyc | Bin 0 -> 5677 bytes .../layers/__pycache__/eca.cpython-36.pyc | Bin 0 -> 6178 bytes .../__pycache__/evo_norm.cpython-36.pyc | Bin 0 -> 3610 bytes .../__pycache__/gather_excite.cpython-36.pyc | Bin 0 -> 3076 bytes .../__pycache__/global_context.cpython-36.pyc | Bin 0 -> 2402 bytes .../__pycache__/halo_attn.cpython-36.pyc | Bin 0 -> 7544 bytes .../layers/__pycache__/helpers.cpython-36.pyc | Bin 0 -> 998 bytes .../__pycache__/inplace_abn.cpython-36.pyc | Bin 0 -> 3124 bytes .../__pycache__/lambda_layer.cpython-36.pyc | Bin 0 -> 5497 bytes .../layers/__pycache__/linear.cpython-36.pyc | Bin 0 -> 1050 bytes .../__pycache__/mixed_conv2d.cpython-36.pyc | Bin 0 -> 2226 bytes .../layers/__pycache__/mlp.cpython-36.pyc | Bin 0 -> 4076 bytes .../__pycache__/non_local_attn.cpython-36.pyc | Bin 0 -> 5634 bytes .../layers/__pycache__/norm.cpython-36.pyc | Bin 0 -> 1539 bytes .../__pycache__/norm_act.cpython-36.pyc | Bin 0 -> 3010 bytes .../layers/__pycache__/padding.cpython-36.pyc | Bin 0 -> 1767 bytes .../__pycache__/patch_embed.cpython-36.pyc | Bin 0 -> 1711 bytes .../__pycache__/pool2d_same.cpython-36.pyc | Bin 0 -> 3168 bytes .../selective_kernel.cpython-36.pyc | Bin 0 -> 5532 bytes .../__pycache__/separable_conv.cpython-36.pyc | Bin 0 -> 2910 bytes .../__pycache__/space_to_depth.cpython-36.pyc | Bin 0 -> 2532 bytes .../__pycache__/split_attn.cpython-36.pyc | Bin 0 -> 2956 bytes .../split_batchnorm.cpython-36.pyc | Bin 0 -> 3356 bytes .../__pycache__/squeeze_excite.cpython-36.pyc | Bin 0 -> 3251 bytes .../__pycache__/std_conv.cpython-36.pyc | Bin 0 -> 6033 bytes .../__pycache__/test_time_pool.cpython-36.pyc | Bin 0 -> 2091 bytes .../__pycache__/trace_utils.cpython-36.pyc | Bin 0 -> 684 bytes .../__pycache__/weight_init.cpython-36.pyc | Bin 0 -> 2669 bytes timm/models/layers/activations.py | 145 ++ timm/models/layers/activations_jit.py | 90 + timm/models/layers/activations_me.py | 218 ++ timm/models/layers/adaptive_avgmax_pool.py | 118 + timm/models/layers/attention_pool2d.py | 182 ++ timm/models/layers/blur_pool.py | 42 + timm/models/layers/bottleneck_attn.py | 157 ++ timm/models/layers/cbam.py | 112 + timm/models/layers/classifier.py | 56 + timm/models/layers/cond_conv2d.py | 122 + timm/models/layers/config.py | 115 + timm/models/layers/conv2d_same.py | 42 + timm/models/layers/conv_bn_act.py | 40 + timm/models/layers/create_act.py | 153 ++ timm/models/layers/create_attn.py | 89 + timm/models/layers/create_conv2d.py | 31 + timm/models/layers/create_norm_act.py | 83 + timm/models/layers/drop.py | 168 ++ timm/models/layers/eca.py | 145 ++ timm/models/layers/evo_norm.py | 81 + timm/models/layers/gather_excite.py | 90 + timm/models/layers/global_context.py | 67 + timm/models/layers/halo_attn.py | 233 ++ timm/models/layers/helpers.py | 31 + timm/models/layers/inplace_abn.py | 87 + timm/models/layers/lambda_layer.py | 133 + timm/models/layers/linear.py | 19 + timm/models/layers/median_pool.py | 49 + timm/models/layers/mixed_conv2d.py | 51 + timm/models/layers/mlp.py | 119 + timm/models/layers/non_local_attn.py | 145 ++ timm/models/layers/norm.py | 24 + timm/models/layers/norm_act.py | 85 + timm/models/layers/padding.py | 56 + timm/models/layers/patch_embed.py | 39 + timm/models/layers/pool2d_same.py | 73 + timm/models/layers/selective_kernel.py | 120 + timm/models/layers/separable_conv.py | 73 + timm/models/layers/space_to_depth.py | 53 + timm/models/layers/split_attn.py | 85 + timm/models/layers/split_batchnorm.py | 75 + timm/models/layers/squeeze_excite.py | 74 + timm/models/layers/std_conv.py | 133 + timm/models/layers/test_time_pool.py | 52 + timm/models/layers/trace_utils.py | 13 + timm/models/layers/weight_init.py | 89 + timm/models/levit.py | 563 ++++ timm/models/mlp_mixer.py | 659 +++++ timm/models/mobilenetv3.py | 562 ++++ timm/models/nasnet.py | 567 ++++ timm/models/nest.py | 465 ++++ timm/models/nfnet.py | 968 +++++++ timm/models/pit.py | 384 +++ timm/models/pnasnet.py | 350 +++ timm/models/pruned/ecaresnet101d_pruned.txt | 1 + timm/models/pruned/ecaresnet50d_pruned.txt | 1 + timm/models/pruned/efficientnet_b1_pruned.txt | 1 + timm/models/pruned/efficientnet_b2_pruned.txt | 1 + timm/models/pruned/efficientnet_b3_pruned.txt | 1 + timm/models/registry.py | 149 ++ timm/models/regnet.py | 494 ++++ timm/models/res2net.py | 216 ++ timm/models/resnest.py | 237 ++ timm/models/resnet.py | 1472 +++++++++++ timm/models/resnetv2.py | 672 +++++ timm/models/rexnet.py | 239 ++ timm/models/selecsls.py | 362 +++ timm/models/senet.py | 467 ++++ timm/models/sknet.py | 215 ++ timm/models/swin_transformer.py | 656 +++++ timm/models/tnt.py | 272 ++ timm/models/tresnet.py | 297 +++ timm/models/twins.py | 424 +++ timm/models/vgg.py | 263 ++ timm/models/visformer.py | 412 +++ timm/models/vision_transformer.py | 989 +++++++ timm/models/vision_transformer_hybrid.py | 363 +++ timm/models/vovnet.py | 406 +++ timm/models/xception.py | 232 ++ timm/models/xception_aligned.py | 238 ++ timm/models/xcit.py | 812 ++++++ timm/optim/__init__.py | 15 + timm/optim/adabelief.py | 201 ++ timm/optim/adafactor.py | 167 ++ timm/optim/adahessian.py | 156 ++ timm/optim/adamp.py | 105 + timm/optim/adamw.py | 122 + timm/optim/lamb.py | 192 ++ timm/optim/lars.py | 135 + timm/optim/lookahead.py | 61 + timm/optim/madgrad.py | 184 ++ timm/optim/nadam.py | 92 + timm/optim/nvnovograd.py | 120 + timm/optim/optim_factory.py | 217 ++ timm/optim/radam.py | 89 + timm/optim/rmsprop_tf.py | 139 + timm/optim/sgdp.py | 70 + timm/scheduler/__init__.py | 8 + timm/scheduler/cosine_lr.py | 119 + timm/scheduler/multistep_lr.py | 65 + timm/scheduler/plateau_lr.py | 113 + timm/scheduler/poly_lr.py | 116 + timm/scheduler/scheduler.py | 105 + timm/scheduler/scheduler_factory.py | 107 + timm/scheduler/step_lr.py | 63 + timm/scheduler/tanh_lr.py | 117 + timm/utils/__init__.py | 13 + .../utils/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 961 bytes timm/utils/__pycache__/agc.cpython-36.pyc | Bin 0 -> 1526 bytes .../checkpoint_saver.cpython-36.pyc | Bin 0 -> 4431 bytes .../__pycache__/clip_grad.cpython-36.pyc | Bin 0 -> 960 bytes timm/utils/__pycache__/cuda.cpython-36.pyc | Bin 0 -> 2215 bytes .../__pycache__/distributed.cpython-36.pyc | Bin 0 -> 939 bytes timm/utils/__pycache__/jit.cpython-36.pyc | Bin 0 -> 810 bytes timm/utils/__pycache__/log.cpython-36.pyc | Bin 0 -> 1369 bytes timm/utils/__pycache__/metrics.cpython-36.pyc | Bin 0 -> 1610 bytes timm/utils/__pycache__/misc.cpython-36.pyc | Bin 0 -> 1006 bytes timm/utils/__pycache__/model.cpython-36.pyc | Bin 0 -> 11239 bytes .../__pycache__/model_ema.cpython-36.pyc | Bin 0 -> 5801 bytes timm/utils/__pycache__/random.cpython-36.pyc | Bin 0 -> 416 bytes timm/utils/__pycache__/summary.cpython-36.pyc | Bin 0 -> 1487 bytes timm/utils/agc.py | 42 + timm/utils/checkpoint_saver.py | 150 ++ timm/utils/clip_grad.py | 23 + timm/utils/cuda.py | 55 + timm/utils/distributed.py | 28 + timm/utils/jit.py | 18 + timm/utils/log.py | 28 + timm/utils/metrics.py | 32 + timm/utils/misc.py | 18 + timm/utils/model.py | 273 ++ timm/utils/model_ema.py | 126 + timm/utils/random.py | 9 + timm/utils/summary.py | 39 + timm/version.py | 1 + train.py | 77 + util/__init__.py | 2 + util/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 262 bytes util/__pycache__/html.cpython-36.pyc | Bin 0 -> 3567 bytes util/__pycache__/util.cpython-36.pyc | Bin 0 -> 5239 bytes util/__pycache__/visualizer.cpython-36.pyc | Bin 0 -> 8660 bytes util/get_data.py | 110 + util/html.py | 86 + util/image_pool.py | 54 + util/util.py | 166 ++ util/visualizer.py | 242 ++ 411 files changed, 47043 insertions(+) create mode 100644 README.md create mode 100644 data/__init__.py create mode 100644 data/__pycache__/__init__.cpython-36.pyc create mode 100644 data/__pycache__/base_dataset.cpython-36.pyc create mode 100644 data/__pycache__/image_folder.cpython-36.pyc create mode 100644 data/__pycache__/unaligned_dataset.cpython-36.pyc create mode 100644 data/__pycache__/unaligned_double_dataset.cpython-36.pyc create mode 100644 data/base_dataset.py create mode 100644 data/image_folder.py create mode 100644 data/single_dataset.py create mode 100644 data/singleimage_dataset.py create mode 100644 data/template_dataset.py create mode 100644 data/unaligned_dataset.py create mode 100644 data/unaligned_double_dataset.py create mode 100644 datasets/bibtex/cityscapes.tex create mode 100644 datasets/bibtex/facades.tex create mode 100644 datasets/bibtex/handbags.tex create mode 100644 datasets/bibtex/shoes.tex create mode 100644 datasets/bibtex/transattr.tex create mode 100644 datasets/combine_A_and_B.py create mode 100644 datasets/detect_cat_face.py create mode 100644 datasets/download_cut_dataset.sh create mode 100644 datasets/download_pix2pix_dataset.sh create mode 100644 datasets/make_dataset_aligned.py create mode 100644 datasets/prepare_cityscapes_dataset.py create mode 100644 datasets/single_image_monet_etretat/trainA/monet.jpg create mode 100644 datasets/single_image_monet_etretat/trainB/etretat-normandy-france.jpg create mode 100644 images/method_final.jpg create mode 100644 models/__init__.py create mode 100644 models/__pycache__/__init__.cpython-36.pyc create mode 100644 models/__pycache__/base_model.cpython-36.pyc create mode 100644 models/__pycache__/cut_model.cpython-36.pyc create mode 100644 models/__pycache__/mae.cpython-36.pyc create mode 100644 models/__pycache__/models_mae.cpython-36.pyc create mode 100644 models/__pycache__/mutilvitgloballocal_model.cpython-36.pyc create mode 100644 models/__pycache__/networks.cpython-36.pyc create mode 100644 models/__pycache__/patchnce.cpython-36.pyc create mode 100644 models/__pycache__/region0_model.cpython-36.pyc create mode 100644 models/__pycache__/region_model.cpython-36.pyc create mode 100644 models/__pycache__/stylegan_networks.cpython-36.pyc create mode 100644 models/__pycache__/vit2Gmask_model.cpython-36.pyc create mode 100644 models/__pycache__/vit2_model.cpython-36.pyc create mode 100644 models/__pycache__/vit2patchmask_model.cpython-36.pyc create mode 100644 models/__pycache__/vit2tokenmask_model.cpython-36.pyc create mode 100644 models/__pycache__/vitD_model.cpython-36.pyc create mode 100644 models/__pycache__/vit_model.cpython-36.pyc create mode 100644 models/__pycache__/vitdonly2_model.cpython-36.pyc create mode 100644 models/__pycache__/vitdonly_model.cpython-36.pyc create mode 100644 models/__pycache__/vitgloballocal_model.cpython-36.pyc create mode 100644 models/__pycache__/vitlocalgloballocal_model.cpython-36.pyc create mode 100644 models/base_model.py create mode 100644 models/cut_model.py create mode 100644 models/cycle_gan_model.py create mode 100644 models/networks.py create mode 100644 models/patchnce.py create mode 100644 models/roma_model.py create mode 100644 models/roma_single_model.py create mode 100644 models/self_build.py create mode 100644 models/stylegan_networks.py create mode 100644 models/template_model.py create mode 100644 models/util/__pycache__/pos_embed.cpython-36.pyc create mode 100644 models/util/crop.py create mode 100644 models/util/datasets.py create mode 100644 models/util/lars.py create mode 100644 models/util/lr_decay.py create mode 100644 models/util/lr_sched.py create mode 100644 models/util/misc.py create mode 100644 models/util/pos_embed.py create mode 100644 options/__init__.py create mode 100644 options/__pycache__/__init__.cpython-36.pyc create mode 100644 options/__pycache__/base_options.cpython-36.pyc create mode 100644 options/__pycache__/test_options.cpython-36.pyc create mode 100644 options/__pycache__/train_options.cpython-36.pyc create mode 100644 options/base_options.py create mode 100644 options/test_options.py create mode 100644 options/train_options.py create mode 100644 scripts/test.sh create mode 100644 scripts/train.sh create mode 100644 test.py create mode 100644 timm/__init__.py create mode 100644 timm/__pycache__/__init__.cpython-36.pyc create mode 100644 timm/__pycache__/version.cpython-36.pyc create mode 100644 timm/data/__init__.py create mode 100644 timm/data/__pycache__/__init__.cpython-36.pyc create mode 100644 timm/data/__pycache__/auto_augment.cpython-36.pyc create mode 100644 timm/data/__pycache__/config.cpython-36.pyc create mode 100644 timm/data/__pycache__/constants.cpython-36.pyc create mode 100644 timm/data/__pycache__/dataset.cpython-36.pyc create mode 100644 timm/data/__pycache__/dataset_factory.cpython-36.pyc create mode 100644 timm/data/__pycache__/distributed_sampler.cpython-36.pyc create mode 100644 timm/data/__pycache__/loader.cpython-36.pyc create mode 100644 timm/data/__pycache__/mixup.cpython-36.pyc create mode 100644 timm/data/__pycache__/random_erasing.cpython-36.pyc create mode 100644 timm/data/__pycache__/real_labels.cpython-36.pyc create mode 100644 timm/data/__pycache__/transforms.cpython-36.pyc create mode 100644 timm/data/__pycache__/transforms_factory.cpython-36.pyc create mode 100644 timm/data/auto_augment.py create mode 100644 timm/data/config.py create mode 100644 timm/data/constants.py create mode 100644 timm/data/dataset.py create mode 100644 timm/data/dataset_factory.py create mode 100644 timm/data/distributed_sampler.py create mode 100644 timm/data/loader.py create mode 100644 timm/data/mixup.py create mode 100644 timm/data/parsers/__init__.py create mode 100644 timm/data/parsers/__pycache__/__init__.cpython-36.pyc create mode 100644 timm/data/parsers/__pycache__/class_map.cpython-36.pyc create mode 100644 timm/data/parsers/__pycache__/constants.cpython-36.pyc create mode 100644 timm/data/parsers/__pycache__/parser.cpython-36.pyc create mode 100644 timm/data/parsers/__pycache__/parser_factory.cpython-36.pyc create mode 100644 timm/data/parsers/__pycache__/parser_image_folder.cpython-36.pyc create mode 100644 timm/data/parsers/__pycache__/parser_image_in_tar.cpython-36.pyc create mode 100644 timm/data/parsers/__pycache__/parser_image_tar.cpython-36.pyc create mode 100644 timm/data/parsers/class_map.py create mode 100644 timm/data/parsers/constants.py create mode 100644 timm/data/parsers/parser.py create mode 100644 timm/data/parsers/parser_factory.py create mode 100644 timm/data/parsers/parser_image_folder.py create mode 100644 timm/data/parsers/parser_image_in_tar.py create mode 100644 timm/data/parsers/parser_image_tar.py create mode 100644 timm/data/parsers/parser_tfds.py create mode 100644 timm/data/random_erasing.py create mode 100644 timm/data/real_labels.py create mode 100644 timm/data/tf_preprocessing.py create mode 100644 timm/data/transforms.py create mode 100644 timm/data/transforms_factory.py create mode 100644 timm/loss/__init__.py create mode 100644 timm/loss/asymmetric_loss.py create mode 100644 timm/loss/binary_cross_entropy.py create mode 100644 timm/loss/cross_entropy.py create mode 100644 timm/loss/jsd.py create mode 100644 timm/models/__init__.py create mode 100644 timm/models/__pycache__/__init__.cpython-36.pyc create mode 100644 timm/models/__pycache__/beit.cpython-36.pyc create mode 100644 timm/models/__pycache__/byoanet.cpython-36.pyc create mode 100644 timm/models/__pycache__/byobnet.cpython-36.pyc create mode 100644 timm/models/__pycache__/cait.cpython-36.pyc create mode 100644 timm/models/__pycache__/coat.cpython-36.pyc create mode 100644 timm/models/__pycache__/convit.cpython-36.pyc create mode 100644 timm/models/__pycache__/convmixer.cpython-36.pyc create mode 100644 timm/models/__pycache__/crossvit.cpython-36.pyc create mode 100644 timm/models/__pycache__/cspnet.cpython-36.pyc create mode 100644 timm/models/__pycache__/densenet.cpython-36.pyc create mode 100644 timm/models/__pycache__/dla.cpython-36.pyc create mode 100644 timm/models/__pycache__/dpn.cpython-36.pyc create mode 100644 timm/models/__pycache__/efficientnet.cpython-36.pyc create mode 100644 timm/models/__pycache__/efficientnet_blocks.cpython-36.pyc create mode 100644 timm/models/__pycache__/efficientnet_builder.cpython-36.pyc create mode 100644 timm/models/__pycache__/factory.cpython-36.pyc create mode 100644 timm/models/__pycache__/features.cpython-36.pyc create mode 100644 timm/models/__pycache__/fx_features.cpython-36.pyc create mode 100644 timm/models/__pycache__/ghostnet.cpython-36.pyc create mode 100644 timm/models/__pycache__/gluon_resnet.cpython-36.pyc create mode 100644 timm/models/__pycache__/gluon_xception.cpython-36.pyc create mode 100644 timm/models/__pycache__/hardcorenas.cpython-36.pyc create mode 100644 timm/models/__pycache__/helpers.cpython-36.pyc create mode 100644 timm/models/__pycache__/hrnet.cpython-36.pyc create mode 100644 timm/models/__pycache__/hub.cpython-36.pyc create mode 100644 timm/models/__pycache__/inception_resnet_v2.cpython-36.pyc create mode 100644 timm/models/__pycache__/inception_v3.cpython-36.pyc create mode 100644 timm/models/__pycache__/inception_v4.cpython-36.pyc create mode 100644 timm/models/__pycache__/levit.cpython-36.pyc create mode 100644 timm/models/__pycache__/mlp_mixer.cpython-36.pyc create mode 100644 timm/models/__pycache__/mobilenetv3.cpython-36.pyc create mode 100644 timm/models/__pycache__/nasnet.cpython-36.pyc create mode 100644 timm/models/__pycache__/nest.cpython-36.pyc create mode 100644 timm/models/__pycache__/nfnet.cpython-36.pyc create mode 100644 timm/models/__pycache__/pit.cpython-36.pyc create mode 100644 timm/models/__pycache__/pnasnet.cpython-36.pyc create mode 100644 timm/models/__pycache__/registry.cpython-36.pyc create mode 100644 timm/models/__pycache__/regnet.cpython-36.pyc create mode 100644 timm/models/__pycache__/res2net.cpython-36.pyc create mode 100644 timm/models/__pycache__/resnest.cpython-36.pyc create mode 100644 timm/models/__pycache__/resnet.cpython-36.pyc create mode 100644 timm/models/__pycache__/resnetv2.cpython-36.pyc create mode 100644 timm/models/__pycache__/rexnet.cpython-36.pyc create mode 100644 timm/models/__pycache__/selecsls.cpython-36.pyc create mode 100644 timm/models/__pycache__/senet.cpython-36.pyc create mode 100644 timm/models/__pycache__/sknet.cpython-36.pyc create mode 100644 timm/models/__pycache__/swin_transformer.cpython-36.pyc create mode 100644 timm/models/__pycache__/tnt.cpython-36.pyc create mode 100644 timm/models/__pycache__/tresnet.cpython-36.pyc create mode 100644 timm/models/__pycache__/twins.cpython-36.pyc create mode 100644 timm/models/__pycache__/vgg.cpython-36.pyc create mode 100644 timm/models/__pycache__/visformer.cpython-36.pyc create mode 100644 timm/models/__pycache__/vision_transformer.cpython-36.pyc create mode 100644 timm/models/__pycache__/vision_transformer_hybrid.cpython-36.pyc create mode 100644 timm/models/__pycache__/vovnet.cpython-36.pyc create mode 100644 timm/models/__pycache__/xception.cpython-36.pyc create mode 100644 timm/models/__pycache__/xception_aligned.cpython-36.pyc create mode 100644 timm/models/__pycache__/xcit.cpython-36.pyc create mode 100644 timm/models/beit.py create mode 100644 timm/models/byoanet.py create mode 100644 timm/models/byobnet.py create mode 100644 timm/models/cait.py create mode 100644 timm/models/coat.py create mode 100644 timm/models/convit.py create mode 100644 timm/models/convmixer.py create mode 100644 timm/models/crossvit.py create mode 100644 timm/models/cspnet.py create mode 100644 timm/models/densenet.py create mode 100644 timm/models/dla.py create mode 100644 timm/models/dpn.py create mode 100644 timm/models/efficientnet.py create mode 100644 timm/models/efficientnet_blocks.py create mode 100644 timm/models/efficientnet_builder.py create mode 100644 timm/models/factory.py create mode 100644 timm/models/features.py create mode 100644 timm/models/fx_features.py create mode 100644 timm/models/ghostnet.py create mode 100644 timm/models/gluon_resnet.py create mode 100644 timm/models/gluon_xception.py create mode 100644 timm/models/hardcorenas.py create mode 100644 timm/models/helpers.py create mode 100644 timm/models/hrnet.py create mode 100644 timm/models/hub.py create mode 100644 timm/models/inception_resnet_v2.py create mode 100644 timm/models/inception_v3.py create mode 100644 timm/models/inception_v4.py create mode 100644 timm/models/layers/__init__.py create mode 100644 timm/models/layers/__pycache__/__init__.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/activations.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/activations_jit.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/activations_me.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/adaptive_avgmax_pool.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/blur_pool.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/bottleneck_attn.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/cbam.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/classifier.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/cond_conv2d.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/config.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/conv2d_same.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/conv_bn_act.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/create_act.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/create_attn.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/create_conv2d.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/create_norm_act.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/drop.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/eca.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/evo_norm.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/gather_excite.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/global_context.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/halo_attn.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/helpers.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/inplace_abn.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/lambda_layer.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/linear.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/mixed_conv2d.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/mlp.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/non_local_attn.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/norm.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/norm_act.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/padding.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/patch_embed.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/pool2d_same.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/selective_kernel.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/separable_conv.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/space_to_depth.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/split_attn.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/split_batchnorm.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/squeeze_excite.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/std_conv.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/test_time_pool.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/trace_utils.cpython-36.pyc create mode 100644 timm/models/layers/__pycache__/weight_init.cpython-36.pyc create mode 100644 timm/models/layers/activations.py create mode 100644 timm/models/layers/activations_jit.py create mode 100644 timm/models/layers/activations_me.py create mode 100644 timm/models/layers/adaptive_avgmax_pool.py create mode 100644 timm/models/layers/attention_pool2d.py create mode 100644 timm/models/layers/blur_pool.py create mode 100644 timm/models/layers/bottleneck_attn.py create mode 100644 timm/models/layers/cbam.py create mode 100644 timm/models/layers/classifier.py create mode 100644 timm/models/layers/cond_conv2d.py create mode 100644 timm/models/layers/config.py create mode 100644 timm/models/layers/conv2d_same.py create mode 100644 timm/models/layers/conv_bn_act.py create mode 100644 timm/models/layers/create_act.py create mode 100644 timm/models/layers/create_attn.py create mode 100644 timm/models/layers/create_conv2d.py create mode 100644 timm/models/layers/create_norm_act.py create mode 100644 timm/models/layers/drop.py create mode 100644 timm/models/layers/eca.py create mode 100644 timm/models/layers/evo_norm.py create mode 100644 timm/models/layers/gather_excite.py create mode 100644 timm/models/layers/global_context.py create mode 100644 timm/models/layers/halo_attn.py create mode 100644 timm/models/layers/helpers.py create mode 100644 timm/models/layers/inplace_abn.py create mode 100644 timm/models/layers/lambda_layer.py create mode 100644 timm/models/layers/linear.py create mode 100644 timm/models/layers/median_pool.py create mode 100644 timm/models/layers/mixed_conv2d.py create mode 100644 timm/models/layers/mlp.py create mode 100644 timm/models/layers/non_local_attn.py create mode 100644 timm/models/layers/norm.py create mode 100644 timm/models/layers/norm_act.py create mode 100644 timm/models/layers/padding.py create mode 100644 timm/models/layers/patch_embed.py create mode 100644 timm/models/layers/pool2d_same.py create mode 100644 timm/models/layers/selective_kernel.py create mode 100644 timm/models/layers/separable_conv.py create mode 100644 timm/models/layers/space_to_depth.py create mode 100644 timm/models/layers/split_attn.py create mode 100644 timm/models/layers/split_batchnorm.py create mode 100644 timm/models/layers/squeeze_excite.py create mode 100644 timm/models/layers/std_conv.py create mode 100644 timm/models/layers/test_time_pool.py create mode 100644 timm/models/layers/trace_utils.py create mode 100644 timm/models/layers/weight_init.py create mode 100644 timm/models/levit.py create mode 100644 timm/models/mlp_mixer.py create mode 100644 timm/models/mobilenetv3.py create mode 100644 timm/models/nasnet.py create mode 100644 timm/models/nest.py create mode 100644 timm/models/nfnet.py create mode 100644 timm/models/pit.py create mode 100644 timm/models/pnasnet.py create mode 100644 timm/models/pruned/ecaresnet101d_pruned.txt create mode 100644 timm/models/pruned/ecaresnet50d_pruned.txt create mode 100644 timm/models/pruned/efficientnet_b1_pruned.txt create mode 100644 timm/models/pruned/efficientnet_b2_pruned.txt create mode 100644 timm/models/pruned/efficientnet_b3_pruned.txt create mode 100644 timm/models/registry.py create mode 100644 timm/models/regnet.py create mode 100644 timm/models/res2net.py create mode 100644 timm/models/resnest.py create mode 100644 timm/models/resnet.py create mode 100644 timm/models/resnetv2.py create mode 100644 timm/models/rexnet.py create mode 100644 timm/models/selecsls.py create mode 100644 timm/models/senet.py create mode 100644 timm/models/sknet.py create mode 100644 timm/models/swin_transformer.py create mode 100644 timm/models/tnt.py create mode 100644 timm/models/tresnet.py create mode 100644 timm/models/twins.py create mode 100644 timm/models/vgg.py create mode 100644 timm/models/visformer.py create mode 100644 timm/models/vision_transformer.py create mode 100644 timm/models/vision_transformer_hybrid.py create mode 100644 timm/models/vovnet.py create mode 100644 timm/models/xception.py create mode 100644 timm/models/xception_aligned.py create mode 100644 timm/models/xcit.py create mode 100644 timm/optim/__init__.py create mode 100644 timm/optim/adabelief.py create mode 100644 timm/optim/adafactor.py create mode 100644 timm/optim/adahessian.py create mode 100644 timm/optim/adamp.py create mode 100644 timm/optim/adamw.py create mode 100644 timm/optim/lamb.py create mode 100644 timm/optim/lars.py create mode 100644 timm/optim/lookahead.py create mode 100644 timm/optim/madgrad.py create mode 100644 timm/optim/nadam.py create mode 100644 timm/optim/nvnovograd.py create mode 100644 timm/optim/optim_factory.py create mode 100644 timm/optim/radam.py create mode 100644 timm/optim/rmsprop_tf.py create mode 100644 timm/optim/sgdp.py create mode 100644 timm/scheduler/__init__.py create mode 100644 timm/scheduler/cosine_lr.py create mode 100644 timm/scheduler/multistep_lr.py create mode 100644 timm/scheduler/plateau_lr.py create mode 100644 timm/scheduler/poly_lr.py create mode 100644 timm/scheduler/scheduler.py create mode 100644 timm/scheduler/scheduler_factory.py create mode 100644 timm/scheduler/step_lr.py create mode 100644 timm/scheduler/tanh_lr.py create mode 100644 timm/utils/__init__.py create mode 100644 timm/utils/__pycache__/__init__.cpython-36.pyc create mode 100644 timm/utils/__pycache__/agc.cpython-36.pyc create mode 100644 timm/utils/__pycache__/checkpoint_saver.cpython-36.pyc create mode 100644 timm/utils/__pycache__/clip_grad.cpython-36.pyc create mode 100644 timm/utils/__pycache__/cuda.cpython-36.pyc create mode 100644 timm/utils/__pycache__/distributed.cpython-36.pyc create mode 100644 timm/utils/__pycache__/jit.cpython-36.pyc create mode 100644 timm/utils/__pycache__/log.cpython-36.pyc create mode 100644 timm/utils/__pycache__/metrics.cpython-36.pyc create mode 100644 timm/utils/__pycache__/misc.cpython-36.pyc create mode 100644 timm/utils/__pycache__/model.cpython-36.pyc create mode 100644 timm/utils/__pycache__/model_ema.cpython-36.pyc create mode 100644 timm/utils/__pycache__/random.cpython-36.pyc create mode 100644 timm/utils/__pycache__/summary.cpython-36.pyc create mode 100644 timm/utils/agc.py create mode 100644 timm/utils/checkpoint_saver.py create mode 100644 timm/utils/clip_grad.py create mode 100644 timm/utils/cuda.py create mode 100644 timm/utils/distributed.py create mode 100644 timm/utils/jit.py create mode 100644 timm/utils/log.py create mode 100644 timm/utils/metrics.py create mode 100644 timm/utils/misc.py create mode 100644 timm/utils/model.py create mode 100644 timm/utils/model_ema.py create mode 100644 timm/utils/random.py create mode 100644 timm/utils/summary.py create mode 100644 timm/version.py create mode 100644 train.py create mode 100644 util/__init__.py create mode 100644 util/__pycache__/__init__.cpython-36.pyc create mode 100644 util/__pycache__/html.cpython-36.pyc create mode 100644 util/__pycache__/util.cpython-36.pyc create mode 100644 util/__pycache__/visualizer.cpython-36.pyc create mode 100644 util/get_data.py create mode 100644 util/html.py create mode 100644 util/image_pool.py create mode 100644 util/util.py create mode 100644 util/visualizer.py diff --git a/README.md b/README.md new file mode 100644 index 0000000..4e8168c --- /dev/null +++ b/README.md @@ -0,0 +1,142 @@ +# ROMA +This repository is the official Pytorch implementation for ACM MM'22 paper +"ROMA: Cross-Domain Region Similarity Matching for Unpaired Nighttime Infrared to Daytime Visible Video Translation".[[Arxiv]](https://arxiv.org/abs/2204.12367) + +**Examples of Object Detection:** + +![detection1](./images/detection1.gif) + +![](./images/detection2.gif) + +**Examples of Video Fusion** + +![fusion](./images/fusion.gif) + +More experimental results can be obtained by contacting us. + +# Introduction + +## Method +![Method](images/method_final.jpg) + +- The domain gaps between unpaired nighttime infrared and daytime visible videos are even huger than paired ones that captured at the same time, establishing an effective translation mapping will greatly contribute to various fields. +- Our proposed cross-similarity, which are calculated across domains, could make the generative process focus on learning the content of structural correspondence between real and synthesized frames, getting rid of the negative effects of different styles. + + + +## Training +The following is the required structure of dataset. For the video mode, the input of a single data is the result of concatenating **two adjacent frames**; for the image mode, the input of a single data is **a single image**. +``` +Video/Image mode: + trainA: \Path\of\trainA + trainB: \Path\of\trainB + +``` +Concrete examples of the training and testing are shown in the script files `./scripts/train.sh` and `./scripts/test.sh`, respectively. + + + + +## InfraredCity and InfraredCity-Lite Dataset + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
InfraredCityTotal Frame
Nighttime Infrared201,856
Nighttime Visible178,698
Daytime Visible199,430
InfraredCity-LiteInfrared
Train
Infrared
Test
Visible
Train
Total
Cityclearday5,5381,000536015,180
overcast2,2821,000
Highwayclearday4,4121,0006,46315,853
overcast2,9781,000
Monitor5,6125004,19410,306
+ +The datasets and their more details are available in [InfiRay](http://openai.raytrontek.com/apply/Infrared_city.html/). + + +### Citation +If you find our work useful in your research or publication, please cite our work: +``` +@inproceedings{ROMA2022, + title = {ROMA: Cross-Domain Region Similarity Matching for Unpaired Nighttime Infrared to Daytime Visible Video Translation}, + author = {Zhenjie Yu and Kai Chen and Shuang Li and Bingfeng Han and Chi Harold Liu and Shuigen Wang}, + booktitle = {ACM MM}, + pages = {5294--5302}, + year = {2022} +} +``` + +#### Acknowledgements +This code borrows heavily from the PyTorch implementation of [Cycle-GAN and Pix2Pix](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix) and [CUT](https://github.com/taesungp/contrastive-unpaired-translation). +A huge thanks to them! +``` +@inproceedings{CycleGAN2017, + title = {Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networkss}, + author = {Zhu, Jun-Yan and Park, Taesung and Isola, Phillip and Efros, Alexei A}, + booktitle = {ICCV}, + year = {2017} +} + +@inproceedings{CUT2020, + author = {Taesung Park and Alexei A. Efros and Richard Zhang and Jun{-}Yan Zhu}, + title = {Contrastive Learning for Unpaired Image-to-Image Translation}, + booktitle = {ECCV}, + pages = {319--345}, + year = {2020}, +} +``` \ No newline at end of file diff --git a/data/__init__.py b/data/__init__.py new file mode 100644 index 0000000..a7dd29b --- /dev/null +++ b/data/__init__.py @@ -0,0 +1,98 @@ +"""This package includes all the modules related to data loading and preprocessing + + To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset. + You need to implement four functions: + -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt). + -- <__len__>: return the size of dataset. + -- <__getitem__>: get a data point from data loader. + -- : (optionally) add dataset-specific options and set default options. + +Now you can use the dataset class by specifying flag '--dataset_mode dummy'. +See our template dataset class 'template_dataset.py' for more details. +""" +import importlib +import torch.utils.data +from data.base_dataset import BaseDataset + + +def find_dataset_using_name(dataset_name): + """Import the module "data/[dataset_name]_dataset.py". + + In the file, the class called DatasetNameDataset() will + be instantiated. It has to be a subclass of BaseDataset, + and it is case-insensitive. + """ + dataset_filename = "data." + dataset_name + "_dataset" + datasetlib = importlib.import_module(dataset_filename) + + dataset = None + target_dataset_name = dataset_name.replace('_', '') + 'dataset' + for name, cls in datasetlib.__dict__.items(): + if name.lower() == target_dataset_name.lower() \ + and issubclass(cls, BaseDataset): + dataset = cls + + if dataset is None: + raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name)) + + return dataset + + +def get_option_setter(dataset_name): + """Return the static method of the dataset class.""" + dataset_class = find_dataset_using_name(dataset_name) + return dataset_class.modify_commandline_options + + +def create_dataset(opt): + """Create a dataset given the option. + + This function wraps the class CustomDatasetDataLoader. + This is the main interface between this package and 'train.py'/'test.py' + + Example: + >>> from data import create_dataset + >>> dataset = create_dataset(opt) + """ + data_loader = CustomDatasetDataLoader(opt) + dataset = data_loader.load_data() + return dataset + + +class CustomDatasetDataLoader(): + """Wrapper class of Dataset class that performs multi-threaded data loading""" + + def __init__(self, opt): + """Initialize this class + + Step 1: create a dataset instance given the name [dataset_mode] + Step 2: create a multi-threaded data loader. + """ + self.opt = opt + dataset_class = find_dataset_using_name(opt.dataset_mode) + self.dataset = dataset_class(opt) + print("dataset [%s] was created" % type(self.dataset).__name__) + self.dataloader = torch.utils.data.DataLoader( + self.dataset, + batch_size=opt.batch_size, + shuffle=not opt.serial_batches, + num_workers=int(opt.num_threads), + drop_last=True if opt.isTrain else False, + ) + + def set_epoch(self, epoch): + self.dataset.current_epoch = epoch + + def load_data(self): + return self + + def __len__(self): + """Return the number of data in the dataset""" + return min(len(self.dataset), self.opt.max_dataset_size) + + def __iter__(self): + """Return a batch of data""" + for i, data in enumerate(self.dataloader): + if i * self.opt.batch_size >= self.opt.max_dataset_size: + break + yield data diff --git a/data/__pycache__/__init__.cpython-36.pyc b/data/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5af05d6e78bc9bc32b6632d131087cbe28bbb87 GIT binary patch literal 4139 zcmai1TW{OQ73NJh+D@Dc;&fYJr`yds+S*drZ5KtbH8yq9ZMua}G(m#Z)*xuk$f8V% z%FL*?1^K1k*Zzb4f&QI7^0iO>3w`Q$W=L6a*C~x5so~6-bNSA9j(@(f5pI0_;nC!l z;)3<~ls!weOsMo!5B-&jxSu7M{(o z93Jwu?;O5%>GfOOd+D^-@GWHT-li?fXBp_(aXq(T* z-ECW4NFStxbFmjXUP5Mlk4;97a7l(b#VzXPX1E`f51#X`qvraIxjk!DV> z?G|e*kxN{f#nr$FFb*)(0ypk1oGpiYTh4{6osqk6FWn1|`xpMg8F`eCmmaU0$1Mh)z>HgJS_{ibr{GTtz(_HG6Y-Jyjce>wJKAOa1akyghNANpA?uCUCrQ6OG zbgRsJW`o(o=JNG%o1LavX7(NtS127%0DWXxhrQ5j7%Sojd|Js%V06WcJ7zPHMyeTj zCjnk|AxKOVlISnOf^S|U)jLJAQo2{TQQ;Jg(xQ0`)gLLafzcbWQ^S0g@%J7L-Sx0U z0(k@$$C`~}oeaTmj}+_ytDOo-I(6Euq6KPzW3%+A*s!mJHn3sQ0O4hEBA{Tz(*)d5 z)F|atQ8N;XFjW;XNe>Vy?)7qwI9a9<{JE65Y&VMa(x8YG>Tg!RC?aMiEN7sDtV2%! zo{l9lxblnHR|K@S@RLma!;yEf>_1Ptv~jOHADwp-WQgiMKR9^3_p`mdu9`^^jfEcO zTy>>5Mt*+vv}e|LD{_Kki!Bf=U!F4~YB#5meDwe~r?KvaUf^!J_c21Z={4O=Pu|Cu zWv-CzG)%6btU20Q0J|5+k&z4Zy7RC9w8CjR)*ub59Q)!u^h3-?)v0ukjcTWDPcJ^W zc_)_Kh8o32t!%35WmDC# zEmgnC*Ae0k`4HRZzk7plO@%Cj1*DddDklu>;5+KhSg|q6VY;h_@E%9ALY7s%RYGdf zMF^v+V-Uvif-2xdj;1W~sMahzsyUeRb--O+L*4N&-BF$DgzMnHH`Ij zM{nSH3%lIWZTXse6PU__zhQ5*$!jpDKL7iR)y{_4ni@C@3X1JZEvD?VCuLMuP;Dy% z&~^oH!???eL{8FR1>5GM{1IgKZz=SlaTXbUgerVU>KjFD&{fe$)ho)q!mz!C zHJ~0)hiLU8y1kBZlpfobKE+2xyyMjCpy3d7+;ju{)$yFO8`7#%ZC|2{&_=_cY#lFz zvXK)Z8Hm4npiWbQHjEg*WaMIMex1( zgVNxb85~#E_F!L2zym^}uE|evh(#Sxog*}oRg_H| oXb8r152gkgao?vw(WK@Fo#sAipr~;QkjzbY-EVF+>&@H$1-N02$p8QV literal 0 HcmV?d00001 diff --git a/data/__pycache__/base_dataset.cpython-36.pyc b/data/__pycache__/base_dataset.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6904eb2e06bd4966f1e910ea44fcce93366901ed GIT binary patch literal 8090 zcmb7J&5s+&b?+}Wn;Z@`8gXYvBU$R$ySC_zM!VW|oOl)Ohjz8AL>SBRcwrNP+dZr% z*(0)>scvdM(gOq-A*+i4!M6lHs-m89ldAYv)msj^XS2gXwwYgsf<@b@2Um+7(M-#d*hI&Wmr_nL_ zY3*0`?T(GIIjoMHj-#u7wf%ahj#_KDG-`Ajqvg)Bu6?43im;z(!Vc{HX3#jbI_E@H zI8U_BiZDbiaQ4rOde0I|qVYuUtkQc=7t5lFw+q~A^bB!Mtl+&>w4N8MczaD;5G~BJ zR@A;G)=;}H*2M;DH^fD;iRVReNnFNrQ+!EW!Sj+ZzN@vb9{*x@5GL*@7L#G%hNJN? z7zI(9xPIjNdr2z&ZtCv&N#J&eev-J?KKSrX+x^;ysQJiGk*8mCd$DwddP%nH^?Rx7 z50lspqwa7bg2e5{qfs2Wm@7)qcjTubUVD?MO9jjsZ1=af+Q# zsDqtL`O+L`{9o&u#_UzR1D}515ZEVA$Jo<8j*Fo?B@cM6X`Rl|ed@fCb1-Xt}sIMbZ2mKt-4aqgTv>R&a*E z=Z@nrN@pX1=&*O>5%&B@48th!Kpx>u?fja0!;X&Ht}n!N?d!=n=!U(pOS@rsVz>*O zyCUfMlVLioS4(vbV1|D5q;v-<*+7=*ndTwirzhZq9Do8y%;TTmdkK@@brb)wV)}Gt zK%2P3GN?0ONnQ_p=>x@qOq5ioGQ=qN+P9f#HKYxBCuuC9f+RSaDmWYmG9(RmdqcmU zyvsOB2uRSX8OBH$Bxp5VQm!q5XlR#}n^-M(i19Lx)7;{GxjT^(b9=!!?hc@O7U4qL z81pZhT*BiwAK#!cH*Xy8AKmE2unspqzH{fZn{VE{c_W#~z*Fj(+>k*Z%>U$r9qxZ) z543oNjoBU_<<1m&>zFj51vJ~%9sRiVGI%ZsiFUw3vQWr02t@b$sQdsJh=PMx;$REP zD*)O92|SiycqjSS-VS82Q6u7FTsC zXQHlQLUt11xV9I^!#1h)!2o6rGjTo9Fdl$on9NNBC@#NkyFY;4AOi}!2{gbQ5e7*1 zg#YQ#IR#Lo=PF*<*hALgGe9JuvHFEDQHp-0EJQrR0MM{iL+(s>!wo8HKr;E}t2OwK z(4Mg3EggS#Uf{1D(j~FekDioD2nZbzFpl3+TVX9p;}imjCZj#D6q*pyNs`b>LtC4| zkaad3Fa?&tnU?jW$KDd3<4>+@k%}@Km(kp5PXt-0-}=p7_I}1%efE*Yo5uCe0g33Xi6%ESFp% zAdqR1=PA2D8Ckx$jNmQF$E4v2A;)nn!>Kox8x^iE0Z~I688-&&Pf*D8zcNmY6EibU ztjsv6WZFKQ&PRs)DoVPgomeN8=nB^wsQrj)xtE@qEiE%P(IYkXO<|^RUxk^`4EBb0 zYMdG;_Ckv_*HT4`ajFUHk#*u^c2?~fnIkGcrQ^)r$Xo6R&N-N+ts{2mcdxY-5^Qw+A#KoNAu>kLT6s3Cm@;|(KxS; z{KIfG8Ra#ehB~%ueAeLDp;7ult`9mD*mSRWpyDvskK`p#a7G?^9VqZ%ll)QgKAQOm z*Knd(_}BFYlC3w5O?}g781fo=+}-Vx4Cz-zA~Bgr$@gKH`Wl#S$c?PBuci7vPXAN< z5g!;&^?mb^E&sG=v7W-kyr?}T0Z_YDv>)SHq11<11eI)z>QRlbH?<5d=q+tJM`lmQ zM}Hw4^r030v1qM+u6?c}a)5*S#8Fg4uZyLk*MFSp)tL1X%c{>aU9Nyi?U5t@|4i@Z ztao&IzU_0PXChW9R}?GdTt8HEJ=2pn#rdbkqblbAs2EGM*9sap%lUN7W{B0f*)Ei` zJvq~>HP`F4ve!S)dLgD-oAoK{HPo-q>M^qZ%t)Fgt-lf*1+D*Ej9EHGIG}6Mm$}7$ zX2@nan}OLb&SuLjwC-dTAfa)p_4RKuI#73u>y{}pJ2g~&v|7|5N}JYU9OgcybL#5O zb2_{_)n0f=AeUdk8}x(quhZ!sy|Mm}2pCD<+ zS4q#xFmL2b+r-z4A@e z{RoLk?O4ZgJo@6xgImg8c{KiuEG?!>FDy#dg+;W9!lHr1$M|_{LXNrHzg^z$jnNjVSze2>dpX&XnKl+=hOsM#tx4m zjCg!F6v&y1ff4f!qK;2VJ{w64LnIbMUK#sockl%l?JV&q1iSEGusDuvuJ{92kIU7q z6!Fr+;$-D{$3!d3gZ64DW#MhokcEZxmjO*q*I1anRLEvAyU?GKgo&5t&U0cnwqxzV zy=UP;%1t;b{-XdV=|Ld9n{8e3R#(DZWb0(QC?A3gE0>a-u8egdN!}TxNQWjt%JcZJ zE*E);?EYrD$p4^#A5ME+zK_E6Ve;E7Yrjx&F1P*hIEX}EFO3C!f={GR#%&nL%a|^= zAF|<7z$QJ(?fYy-^7=z|Xzvha$y9Z!QOtzrE!FZV0!$6DcfH1!rrmyzIpz$v+GT*kAdw+woxv|(--4r*xBh5=Uv z?n=v8)#VQ6>k?$?M=T;S-$6<~Lq?t=JW6_PS z=5@+u?lN&n-bVI!NC~}Z*dO+deOd;$luj7=(WE@Qx4%Upv)F05l0rSn1*6-%KCWed zY{ExgL2KnA^|3E%J>^N8;DmirP2pM7Zm1o6Fisq!(O1pz7Er0x(@tOBsAEY;TN~VtW*R8Q7F1x5&2#^znueN=HZr1zLx97de97 z2v$%Z^!%*oi8t=aY(dW|_k=1jQ)oOXShZ)dw+E|B-d?++;Nqgp5Rv#`p~*j?CdG^n zjAS@cK#(&|*oO#BVVSH$7}Ab?RJ^GA7C7=U@r!QkARefj{uVlik?*vidvTtj#AXaJ16r&K_!%DUi1p{yO>)UkKZGD zWj@%DoDunTOz|_MQsl57_^;`bT1xx45O2?IiK1%ZkF4bD*s22o zbEJ*!viQ~!pk>awro^c37y6Pz6x0@RZwdg8s2)v_g;4`5%;>RF>GH$hJ&Th5wD z2Kd#adCvHj9Cd6~fWzW>1Y>yEF~Xxx zr7uH)Ui3IRs~LZW>Hia{)QnZ!2B^cAj$()3B=ssS2gO~KnE5RfGQ`j%0Ae0#_7oCk zUSp?$oD4}L@^g3Z-+SQQ|M>2%_we2)pY8%Rd|s0Gb(XUCQ1?qDb^i@GH*`sLrG}Dr zmLRx>mlPD>F2YD&PYq#YWRRhAJ!=i3#qEQc>i9-MVPEC=-3Jm!w|n5ri0+f;ZWi%f zFrK99Qk1{kT#F;z=Z(349Cd?szJ%Lnk1GOk{BOIs;NvS)^__>lT+O~XjPOY%#8*eU zHGF5=rM`6WLm&3&)>RET@bQ)K4YB3EA@C)TzAX}E_^cuJCrPS4giYIN4*Jr@P5hkhD2C-ebq>20x)8?kX*OewMK-kEuA7LVV z+u~bCzSYXBAKtt3JtTG<*~3z?BOQ%w|Jh=)v0>0N;+XU~c(U~+cP)2rsXNr$v|jQ$ hGCEL{($w`8v-P&K+Ipv1UuiV!&iQ7|sW~f-{aZA>|uJHVM1LE*}wgncIWKsoh;-)(&Z2 z8UMHDr#XwGm@_|4!#?+;JYih;y(E;fUVqNZ^+R9naX*iuxEF_szkT>L7rnicA~*Jw z8p=EEc0X2oquo|7Pus&og&W(iVj1UIdpF5<+i55j7j3gJn`fa4C0DXtp57WBHtRBn zGeZR=mZdprm`e6p!l=S6j_joeqvQ_^V zTu(Yps@*NT@7QS#hJBz9w>QTw@VpP*!|nA=m>XuevfVNKFhzd*G0@#~2=xBrZJ;WS zfIi*Y05RYr=wPtXqj(N{*gMc{X!1M|MSdd?A&C5ur|1u4N~aWJVRz;t)@=K26uAhb zATO3D^mj5K1#Er>I$v6hfDpxtcsyhBHTb+~X{&Wh*Z6_r8TuO+qd4JNnDW2L+x0g3 zd8Iub9JYHoE2C-%gdxCb{27sl5n6;&-NQz{-AJzinM$eL6O zYfvkyQws5T^qjoB`gu@{qL)$3~Yyd&i(>7{aPQ!y>mT`fM#~gHHD!_pv&y}Wuu0#kKsa?~HW$>s8 zP)SU98fm!CkJTVZCGH~Bq1UX>Xq|d=iJqq7D$LD+jk$m!GhxE*=uQmEz+z<p#O(2|f zDO6!Nn_1kBY-TeDs-~&6P&Hkwx6nfsh7i{Df+-jqrapEydM;?d4{?VHZZ5SAj51j| zaTP7xDJ1&7C0fd2RzWWiWY5T^CZk-Wx*@n4i7Y@YN?j>^+^lFv@+2}c;!BkN3JH2uG?8FB zzlRnt2;wYOL4bSZX&}V%XoFhxqE(^gZ+!MW2e*y|<~fc&o`Yu<28F4Immt%DqnK|K z@Ekyi?AOE%XNzyat5yV$USaRR(_7X~r?k~rJ-;pQ~8$Gzh)}0g10W0pypz3iYh=$1ExdBb$$g>>UXn1uq+dQSgCYJX=_p*88TO$=GvV|2#yN1_}8Q9xfk4&D6 zQ=mmxbJ;?am1s0Y-i-ins>CjurmH`c{}jwoAG1t*cgw4-Vt$$j*RdK_1L~1yBjOs4 J=PWlo=YKqMVDkU~ literal 0 HcmV?d00001 diff --git a/data/__pycache__/unaligned_dataset.cpython-36.pyc b/data/__pycache__/unaligned_dataset.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..499082c2392f2a0ce341c76664c16fdca5ee755a GIT binary patch literal 3052 zcma)8&2Hqz5$4~JoIhDBFVu)4dtySlpSt15mt9QKBP z_{I5uhK}$^l zn?ad97b$P2G47m|*@c+V=I!I5^ASFM3?i(miO1c<=N|XJc9MVxyn_}>@Ca?lV<^yV z_Wx96MSf8UzBxVn#&w(?!}G-=*KBhrn-!VXY?f85kQryI?XK~vs=J@20Yl(+wGwrv#G4oYmDS+QdkJkOM~tp?#|-Yk zSBh+8u#TC$6z$0~@;E)z5?G($)Bgl9&eB~wYxl-;9pf$iWw3Ua9eg2o-*IldwUb6~ z@VyN`wyXT-p8A_z(Oo;s=*As5+wqV4cyQz0xEsDbfjGBi^N!`jrf2#j3DO6&3#*13 z9-X-5?U}c*YG~*8ZGX@9@9~g=Ee7D*@2 zqL?>6tn!_s-i5G#|K5h$MxeNU$ram@!+rCb!-mSF{E|Z-E{|T z4{eCHzwWL3Yj2I@`}Y7K4zur(UOy9Nr7B>F{LN~(-q`rhjb<=$X4!c~**_z&{vXiJ zD=uEKV@Mhi2pIu8W2hlWrwm+iAryna_DH)LNu^EIw7gXbQGi5^d?~9;T_PMZ!)CHF zNXVK^PT2H>ZNO#q-Qa5neG)Wb$3|2@=td8#NY!R|C}Kk_uj-ZAB>G#Mr*~{7qs*kD zOfD#8QA7XGq(6#>5qWR38R z;D;ANnI_8h7gRT!;cNw~R3;U*oGqHTYCA|NE0Y&ZJd^b$HqD#vOv&1+sNN$je2~#V z9g^^lg+hYlKt@R-J4~kuvV>)HCTU(?B%P^khPh7Xc?I}V*-i#1o%uW$JVg#^x+tmb zFUxCoptNFhi~1QI*H2S;D>tG{Q~FcUH17=d-Pj#?2X4>ZclW~wF2fkFclGl-62wlx zjQl_kwREW``1Br#p)5i1@7;KaI``_^O~_HbH_~JUt5)SXLIO#A^YgL6CdD3p!d?rx ziDoYaTVyXb^}JmGCjBwg5@@HQR9qr*=5s+F`HAq89oQhr;AakThP<7Zs}dUQCP#kb zgRLx92o@vPdZ{&_HKny;l%~k&X?jNYwf_O()!`kMvWuRLA}To*Dk4fwFHPaJRYCnh zntrv)ip>m!ayd&=o4C~pM5|LGKYDPdfC@3urNas9aX0Yd`|&R=cQkAwsZR+zs*lO$ z9vSrX95&q+2U`U#2$OcwIv}0Pf(z9aHUHDIKU(lB1{q100DVeC9bs$_x=XJn{!M$i S-x4A;m5vyQ8sLrHp8sDI$4j&T literal 0 HcmV?d00001 diff --git a/data/__pycache__/unaligned_double_dataset.cpython-36.pyc b/data/__pycache__/unaligned_double_dataset.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..288527d029d7ff3d4bb09e32a4b4d0ee7280513c GIT binary patch literal 3634 zcma)8OOM>f5$0=1&TC&&&`Q*YtX`{%kASHvML_mG2b&XxFyxTdAK0Us`Fv-AgyxOYP6CLHH7DTMq}0?v4`4 zKTQ;<(*0AeVkMGNPAX9hY~3r8r#w!ZVf?oq6v-(^r?ZigV^Xz#9CSXxCvSmpv$4oY z?Z{1?)cw+mywpouXj_d@k)MVzr>zhEP$qeHTJrSMYH^aUElU1wTULkRc{0r;TN%Q} zc_JknCnd|PBxQ^B+~Y+#Pcp&NwWK`inAr~$6a0$>h9pzZDwd)*R+Y%;zhcu$Dux|p zWmcZDjP@wmL{tS!s{&j$WQWJ|L`{!XbxcMa8{y%hQJ$3PYr>-|g-3@lLC&#kcE!cV&u_DH zC@wh;0=t_FK2HRHbK}1>7QDGHF4gdm(Uz_l!5-3upfggCV=60nv+|hkogQil;P>#! ze}gD%X4h72Upls>oS8fGYJ1kg=co2t)}>QhaqtG;S@FXfGvl_Ab)P!oZ$_?DTeINO z?pf>qZ)T_7rE_Vo_+}mAyuL!(GMrF#RgW}5d!J@OYHP*Ax*OiM+JV%T8Q(SiZPVXN zec(2@`1(PasSMD)!1;a)qAT}8mAP93-4o6dCKo4-OH>m^{@m!3=7MPvZ4b3Q(jMJfJSn6Q3>bN;CwLffz{gG)AlRU}ui9p=6{$Yt9p!x6b zjn~~hzL=dKkKrNm_&4|N{qgPxckdp{h2Swxc3P$KSnyLE>#0S7`gDzxC8xcGdQbfbG;>@lV6uEq&7h z*e{%APhFesH~$TtIBD?0UiRyL?L+I9x_)Ubw;^>fd#m1RG&B^F;S1-_-g0NTyWCq| zt9R<%iBoTrU;|UfTJJoNYV# z7moEYR|`=B^Mp$_2Sk;LR+*9vC`b%DD=B{^38fk>UCb5usV-ma-X$8@}Did)Iw@nlqSEWMwkZgFHjc&6Q{*ApW7udKc zDH^g{ikEPrl^w>AvdKp<#Ds#ZoG;WWExk5*bj4&klyM~}?^I>F$!%+0NF_-4S1DUt zdP}Ot7#xX7i_CcN>kBu|%)U=n;P$MSM6~<({^+HJ?n_D-C@0#K#~b&z;r@sgV0Np8 z!e*Q35ROGPkLQ&f_}UqMfbT=?jL?og6y$q?e0kuRMH=!hafyIy`n0rQuW${+knfYd z*{ngK!o(%LB@vL5JU&U(c*>=?i4pBLn9%{X0FJRqv}nky<_L8G0CC2J(jM(b2bp|A zMThQ>7r3NS#e80kr#dW~4kGbFWw{Q=)%+Y&C%SD;fzC&~M`~R)LD+rFO?>qTe!BD6 z0QVj((4l$`xj!kSa3MdqF5aeTEd$~Rk(aJB3e0ajiV%Vgqcx1P;#B+?vf?KoQEOxv zhs2=DF6w78o@6DQkxHhhhq5J`WIT;cJ^PLi zQusk7{YumsO7@JiX>ztIz#0KCsl=OCaCoX8@^b`_$%GRMzNP&3#^0bxp(019L3YiG zMF9&og9BH)U<-p%0_4Cp*Zt-h2XTzN8^?d4{Zjh3{EK(K^Sq4c!$CkbLWvRvH9{wj zao>xa`n@>*Vv*#l5vWe9aU7fMDn?K(J|#j){1rl1Eht-35|$xI*!G<8diasy4zB5- zl1BtZ(ZitLrDHuh!O5;OxG7r: initialize the class, first call BaseDataset.__init__(self, opt). + -- <__len__>: return the size of dataset. + -- <__getitem__>: get a data point. + -- : (optionally) add dataset-specific options and set default options. + """ + + def __init__(self, opt): + """Initialize the class; save the options in the class + + Parameters: + opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + self.opt = opt + self.root = opt.dataroot + self.current_epoch = 0 + + @staticmethod + def modify_commandline_options(parser, is_train): + """Add new dataset-specific options, and rewrite default values for existing options. + + Parameters: + parser -- original option parser + is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. + + Returns: + the modified parser. + """ + return parser + + @abstractmethod + def __len__(self): + """Return the total number of images in the dataset.""" + return 0 + + @abstractmethod + def __getitem__(self, index): + """Return a data point and its metadata information. + + Parameters: + index - - a random integer for data indexing + + Returns: + a dictionary of data with their names. It ususally contains the data itself and its metadata information. + """ + pass + + +def get_params(opt, size): + w, h = size + new_h = h + new_w = w + if opt.preprocess == 'resize_and_crop': + new_h = new_w = opt.load_size + elif opt.preprocess == 'scale_width_and_crop': + new_w = opt.load_size + new_h = opt.load_size * h // w + + x = random.randint(0, np.maximum(0, new_w - opt.crop_size)) + y = random.randint(0, np.maximum(0, new_h - opt.crop_size)) + + flip = random.random() > 0.5 + + return {'crop_pos': (x, y), 'flip': flip} + + +def get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True): + transform_list = [] + if grayscale: + transform_list.append(transforms.Grayscale(1)) + if 'fixsize' in opt.preprocess: + transform_list.append(transforms.Resize(params["size"], method)) + if 'resize' in opt.preprocess: + osize = [opt.load_size, opt.load_size] + if "gta2cityscapes" in opt.dataroot: + osize[0] = opt.load_size // 2 + transform_list.append(transforms.Resize(osize, method)) + elif 'scale_width' in opt.preprocess: + transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method))) + elif 'scale_shortside' in opt.preprocess: + transform_list.append(transforms.Lambda(lambda img: __scale_shortside(img, opt.load_size, opt.crop_size, method))) + + if 'zoom' in opt.preprocess: + if params is None: + transform_list.append(transforms.Lambda(lambda img: __random_zoom(img, opt.load_size, opt.crop_size, method))) + else: + transform_list.append(transforms.Lambda(lambda img: __random_zoom(img, opt.load_size, opt.crop_size, method, factor=params["scale_factor"]))) + + if 'crop' in opt.preprocess: + if params is None or 'crop_pos' not in params: + transform_list.append(transforms.RandomCrop(opt.crop_size)) + else: + transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size))) + + if 'patch' in opt.preprocess: + transform_list.append(transforms.Lambda(lambda img: __patch(img, params['patch_index'], opt.crop_size))) + + if 'trim' in opt.preprocess: + transform_list.append(transforms.Lambda(lambda img: __trim(img, opt.crop_size))) + + # if opt.preprocess == 'none': + transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method))) + + if not opt.no_flip: + if params is None or 'flip' not in params: + transform_list.append(transforms.RandomHorizontalFlip()) + elif 'flip' in params: + transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip']))) + + if convert: + transform_list += [transforms.ToTensor()] + if grayscale: + transform_list += [transforms.Normalize((0.5,), (0.5,))] + else: + transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] + return transforms.Compose(transform_list) + + +def __make_power_2(img, base, method=Image.BICUBIC): + ow, oh = img.size + h = int(round(oh / base) * base) + w = int(round(ow / base) * base) + if h == oh and w == ow: + return img + + return img.resize((w, h), method) + + +def __random_zoom(img, target_width, crop_width, method=Image.BICUBIC, factor=None): + if factor is None: + zoom_level = np.random.uniform(0.8, 1.0, size=[2]) + else: + zoom_level = (factor[0], factor[1]) + iw, ih = img.size + zoomw = max(crop_width, iw * zoom_level[0]) + zoomh = max(crop_width, ih * zoom_level[1]) + img = img.resize((int(round(zoomw)), int(round(zoomh))), method) + return img + + +def __scale_shortside(img, target_width, crop_width, method=Image.BICUBIC): + ow, oh = img.size + shortside = min(ow, oh) + if shortside >= target_width: + return img + else: + scale = target_width / shortside + return img.resize((round(ow * scale), round(oh * scale)), method) + + +def __trim(img, trim_width): + ow, oh = img.size + if ow > trim_width: + xstart = np.random.randint(ow - trim_width) + xend = xstart + trim_width + else: + xstart = 0 + xend = ow + if oh > trim_width: + ystart = np.random.randint(oh - trim_width) + yend = ystart + trim_width + else: + ystart = 0 + yend = oh + return img.crop((xstart, ystart, xend, yend)) + + +def __scale_width(img, target_width, crop_width, method=Image.BICUBIC): + ow, oh = img.size + if ow == target_width and oh >= crop_width: + return img + w = target_width + h = int(max(target_width * oh / ow, crop_width)) + return img.resize((w, h), method) + + +def __crop(img, pos, size): + ow, oh = img.size + x1, y1 = pos + tw = th = size + if (ow > tw or oh > th): + return img.crop((x1, y1, x1 + tw, y1 + th)) + return img + + +def __patch(img, index, size): + ow, oh = img.size + nw, nh = ow // size, oh // size + roomx = ow - nw * size + roomy = oh - nh * size + startx = np.random.randint(int(roomx) + 1) + starty = np.random.randint(int(roomy) + 1) + + index = index % (nw * nh) + ix = index // nh + iy = index % nh + gridx = startx + ix * size + gridy = starty + iy * size + return img.crop((gridx, gridy, gridx + size, gridy + size)) + + +def __flip(img, flip): + if flip: + return img.transpose(Image.FLIP_LEFT_RIGHT) + return img + + +def __print_size_warning(ow, oh, w, h): + """Print warning information about image size(only print once)""" + if not hasattr(__print_size_warning, 'has_printed'): + print("The image size needs to be a multiple of 4. " + "The loaded image size was (%d, %d), so it was adjusted to " + "(%d, %d). This adjustment will be done to all images " + "whose sizes are not multiples of 4" % (ow, oh, w, h)) + __print_size_warning.has_printed = True diff --git a/data/image_folder.py b/data/image_folder.py new file mode 100644 index 0000000..2a137d3 --- /dev/null +++ b/data/image_folder.py @@ -0,0 +1,66 @@ +"""A modified image folder class + +We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py) +so that this class can load images from both current directory and its subdirectories. +""" + +import torch.utils.data as data + +from PIL import Image +import os +import os.path + +IMG_EXTENSIONS = [ + '.jpg', '.JPG', '.jpeg', '.JPEG', + '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', + '.tif', '.TIF', '.tiff', '.TIFF', +] + + +def is_image_file(filename): + return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) + + +def make_dataset(dir, max_dataset_size=float("inf")): + images = [] + assert os.path.isdir(dir) or os.path.islink(dir), '%s is not a valid directory' % dir + + for root, _, fnames in sorted(os.walk(dir, followlinks=True)): + for fname in fnames: + if is_image_file(fname): + path = os.path.join(root, fname) + images.append(path) + return images[:min(max_dataset_size, len(images))] + + +def default_loader(path): + return Image.open(path).convert('RGB') + + +class ImageFolder(data.Dataset): + + def __init__(self, root, transform=None, return_paths=False, + loader=default_loader): + imgs = make_dataset(root) + if len(imgs) == 0: + raise(RuntimeError("Found 0 images in: " + root + "\n" + "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) + + self.root = root + self.imgs = imgs + self.transform = transform + self.return_paths = return_paths + self.loader = loader + + def __getitem__(self, index): + path = self.imgs[index] + img = self.loader(path) + if self.transform is not None: + img = self.transform(img) + if self.return_paths: + return img, path + else: + return img + + def __len__(self): + return len(self.imgs) diff --git a/data/single_dataset.py b/data/single_dataset.py new file mode 100644 index 0000000..9a5c323 --- /dev/null +++ b/data/single_dataset.py @@ -0,0 +1,40 @@ +from data.base_dataset import BaseDataset, get_transform +from data.image_folder import make_dataset +from PIL import Image + + +class SingleDataset(BaseDataset): + """This dataset class can load a set of images specified by the path --dataroot /path/to/data. + + It can be used for generating CycleGAN results only for one side with the model option '-model test'. + """ + + def __init__(self, opt): + """Initialize this dataset class. + + Parameters: + opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + BaseDataset.__init__(self, opt) + self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size)) + input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc + self.transform = get_transform(opt, grayscale=(input_nc == 1)) + + def __getitem__(self, index): + """Return a data point and its metadata information. + + Parameters: + index - - a random integer for data indexing + + Returns a dictionary that contains A and A_paths + A(tensor) - - an image in one domain + A_paths(str) - - the path of the image + """ + A_path = self.A_paths[index] + A_img = Image.open(A_path).convert('RGB') + A = self.transform(A_img) + return {'A': A, 'A_paths': A_path} + + def __len__(self): + """Return the total number of images in the dataset.""" + return len(self.A_paths) diff --git a/data/singleimage_dataset.py b/data/singleimage_dataset.py new file mode 100644 index 0000000..0a9f1b5 --- /dev/null +++ b/data/singleimage_dataset.py @@ -0,0 +1,108 @@ +import numpy as np +import os.path +from data.base_dataset import BaseDataset, get_transform +from data.image_folder import make_dataset +from PIL import Image +import random +import util.util as util + + +class SingleImageDataset(BaseDataset): + """ + This dataset class can load unaligned/unpaired datasets. + + It requires two directories to host training images from domain A '/path/to/data/trainA' + and from domain B '/path/to/data/trainB' respectively. + You can train the model with the dataset flag '--dataroot /path/to/data'. + Similarly, you need to prepare two directories: + '/path/to/data/testA' and '/path/to/data/testB' during test time. + """ + + def __init__(self, opt): + """Initialize this dataset class. + + Parameters: + opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + BaseDataset.__init__(self, opt) + + self.dir_A = os.path.join(opt.dataroot, 'trainA') # create a path '/path/to/data/trainA' + self.dir_B = os.path.join(opt.dataroot, 'trainB') # create a path '/path/to/data/trainB' + + if os.path.exists(self.dir_A) and os.path.exists(self.dir_B): + self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA' + self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB' + self.A_size = len(self.A_paths) # get the size of dataset A + self.B_size = len(self.B_paths) # get the size of dataset B + + assert len(self.A_paths) == 1 and len(self.B_paths) == 1,\ + "SingleImageDataset class should be used with one image in each domain" + A_img = Image.open(self.A_paths[0]).convert('RGB') + B_img = Image.open(self.B_paths[0]).convert('RGB') + print("Image sizes %s and %s" % (str(A_img.size), str(B_img.size))) + + self.A_img = A_img + self.B_img = B_img + + # In single-image translation, we augment the data loader by applying + # random scaling. Still, we design the data loader such that the + # amount of scaling is the same within a minibatch. To do this, + # we precompute the random scaling values, and repeat them by |batch_size|. + A_zoom = 1 / self.opt.random_scale_max + zoom_levels_A = np.random.uniform(A_zoom, 1.0, size=(len(self) // opt.batch_size + 1, 1, 2)) + self.zoom_levels_A = np.reshape(np.tile(zoom_levels_A, (1, opt.batch_size, 1)), [-1, 2]) + + B_zoom = 1 / self.opt.random_scale_max + zoom_levels_B = np.random.uniform(B_zoom, 1.0, size=(len(self) // opt.batch_size + 1, 1, 2)) + self.zoom_levels_B = np.reshape(np.tile(zoom_levels_B, (1, opt.batch_size, 1)), [-1, 2]) + + # While the crop locations are randomized, the negative samples should + # not come from the same location. To do this, we precompute the + # crop locations with no repetition. + self.patch_indices_A = list(range(len(self))) + random.shuffle(self.patch_indices_A) + self.patch_indices_B = list(range(len(self))) + random.shuffle(self.patch_indices_B) + + def __getitem__(self, index): + """Return a data point and its metadata information. + + Parameters: + index (int) -- a random integer for data indexing + + Returns a dictionary that contains A, B, A_paths and B_paths + A (tensor) -- an image in the input domain + B (tensor) -- its corresponding image in the target domain + A_paths (str) -- image paths + B_paths (str) -- image paths + """ + A_path = self.A_paths[0] + B_path = self.B_paths[0] + A_img = self.A_img + B_img = self.B_img + + # apply image transformation + if self.opt.phase == "train": + param = {'scale_factor': self.zoom_levels_A[index], + 'patch_index': self.patch_indices_A[index], + 'flip': random.random() > 0.5} + + transform_A = get_transform(self.opt, params=param, method=Image.BILINEAR) + A = transform_A(A_img) + + param = {'scale_factor': self.zoom_levels_B[index], + 'patch_index': self.patch_indices_B[index], + 'flip': random.random() > 0.5} + transform_B = get_transform(self.opt, params=param, method=Image.BILINEAR) + B = transform_B(B_img) + else: + transform = get_transform(self.opt, method=Image.BILINEAR) + A = transform(A_img) + B = transform(B_img) + + return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path} + + def __len__(self): + """ Let's pretend the single image contains 100,000 crops for convenience. + """ + return 100000 diff --git a/data/template_dataset.py b/data/template_dataset.py new file mode 100644 index 0000000..bfdf16b --- /dev/null +++ b/data/template_dataset.py @@ -0,0 +1,75 @@ +"""Dataset class template + +This module provides a template for users to implement custom datasets. +You can specify '--dataset_mode template' to use this dataset. +The class name should be consistent with both the filename and its dataset_mode option. +The filename should be _dataset.py +The class name should be Dataset.py +You need to implement the following functions: + -- : Add dataset-specific options and rewrite default values for existing options. + -- <__init__>: Initialize this dataset class. + -- <__getitem__>: Return a data point and its metadata information. + -- <__len__>: Return the number of images. +""" +from data.base_dataset import BaseDataset, get_transform +# from data.image_folder import make_dataset +# from PIL import Image + + +class TemplateDataset(BaseDataset): + """A template dataset class for you to implement custom datasets.""" + @staticmethod + def modify_commandline_options(parser, is_train): + """Add new dataset-specific options, and rewrite default values for existing options. + + Parameters: + parser -- original option parser + is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. + + Returns: + the modified parser. + """ + parser.add_argument('--new_dataset_option', type=float, default=1.0, help='new dataset option') + parser.set_defaults(max_dataset_size=10, new_dataset_option=2.0) # specify dataset-specific default values + return parser + + def __init__(self, opt): + """Initialize this dataset class. + + Parameters: + opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions + + A few things can be done here. + - save the options (have been done in BaseDataset) + - get image paths and meta information of the dataset. + - define the image transformation. + """ + # save the option and dataset root + BaseDataset.__init__(self, opt) + # get the image paths of your dataset; + self.image_paths = [] # You can call sorted(make_dataset(self.root, opt.max_dataset_size)) to get all the image paths under the directory self.root + # define the default transform function. You can use ; You can also define your custom transform function + self.transform = get_transform(opt) + + def __getitem__(self, index): + """Return a data point and its metadata information. + + Parameters: + index -- a random integer for data indexing + + Returns: + a dictionary of data with their names. It usually contains the data itself and its metadata information. + + Step 1: get a random image path: e.g., path = self.image_paths[index] + Step 2: load your data from the disk: e.g., image = Image.open(path).convert('RGB'). + Step 3: convert your data to a PyTorch tensor. You can use helpder functions such as self.transform. e.g., data = self.transform(image) + Step 4: return a data point as a dictionary. + """ + path = 'temp' # needs to be a string + data_A = None # needs to be a tensor + data_B = None # needs to be a tensor + return {'data_A': data_A, 'data_B': data_B, 'path': path} + + def __len__(self): + """Return the total number of images.""" + return len(self.image_paths) diff --git a/data/unaligned_dataset.py b/data/unaligned_dataset.py new file mode 100644 index 0000000..b8df773 --- /dev/null +++ b/data/unaligned_dataset.py @@ -0,0 +1,79 @@ +import os.path +from data.base_dataset import BaseDataset, get_transform +from data.image_folder import make_dataset +from PIL import Image +import random +import util.util as util + + +class UnalignedDataset(BaseDataset): + """ + This dataset class can load unaligned/unpaired datasets. + + It requires two directories to host training images from domain A '/path/to/data/trainA' + and from domain B '/path/to/data/trainB' respectively. + You can train the model with the dataset flag '--dataroot /path/to/data'. + Similarly, you need to prepare two directories: + '/path/to/data/testA' and '/path/to/data/testB' during test time. + """ + + def __init__(self, opt): + """Initialize this dataset class. + + Parameters: + opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + BaseDataset.__init__(self, opt) + self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') # create a path '/path/to/data/trainA' + self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B') # create a path '/path/to/data/trainB' + + if opt.phase == "test" and not os.path.exists(self.dir_A) \ + and os.path.exists(os.path.join(opt.dataroot, "valA")): + self.dir_A = os.path.join(opt.dataroot, "valA") + self.dir_B = os.path.join(opt.dataroot, "valB") + + self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA' + self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB' + self.A_size = len(self.A_paths) # get the size of dataset A + self.B_size = len(self.B_paths) # get the size of dataset B + + def __getitem__(self, index): + """Return a data point and its metadata information. + + Parameters: + index (int) -- a random integer for data indexing + + Returns a dictionary that contains A, B, A_paths and B_paths + A (tensor) -- an image in the input domain + B (tensor) -- its corresponding image in the target domain + A_paths (str) -- image paths + B_paths (str) -- image paths + """ + A_path = self.A_paths[index % self.A_size] # make sure index is within then range + if self.opt.serial_batches: # make sure index is within then range + index_B = index % self.B_size + else: # randomize the index for domain B to avoid fixed pairs. + index_B = random.randint(0, self.B_size - 1) + B_path = self.B_paths[index_B] + A_img = Image.open(A_path).convert('RGB') + B_img = Image.open(B_path).convert('RGB') + + # Apply image transformation + # For FastCUT mode, if in finetuning phase (learning rate is decaying), + # do not perform resize-crop data augmentation of CycleGAN. +# print('current_epoch', self.current_epoch) + is_finetuning = self.opt.isTrain and self.current_epoch > self.opt.n_epochs + modified_opt = util.copyconf(self.opt, load_size=self.opt.crop_size if is_finetuning else self.opt.load_size) + transform = get_transform(modified_opt) + A = transform(A_img) + B = transform(B_img) + + return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path} + + def __len__(self): + """Return the total number of images in the dataset. + + As we have two datasets with potentially different number of images, + we take a maximum of + """ + return max(self.A_size, self.B_size) diff --git a/data/unaligned_double_dataset.py b/data/unaligned_double_dataset.py new file mode 100644 index 0000000..245984a --- /dev/null +++ b/data/unaligned_double_dataset.py @@ -0,0 +1,100 @@ +import os.path +from data.base_dataset import BaseDataset, get_transform +from data.image_folder import make_dataset +from PIL import Image +import random +import util.util as util +import torchvision.transforms.functional as TF +import random +from torchvision.transforms import transforms as tfs + +class UnalignedDoubleDataset(BaseDataset): + """ + This dataset class can load unaligned/unpaired datasets. + + It requires two directories to host training images from domain A '/path/to/data/trainA' + and from domain B '/path/to/data/trainB' respectively. + You can train the model with the dataset flag '--dataroot /path/to/data'. + Similarly, you need to prepare two directories: + '/path/to/data/testA' and '/path/to/data/testB' during test time. + """ + + def __init__(self, opt): + """Initialize this dataset class. + + Parameters: + opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + # self.use_resize_crop = opt.use_resize_crop + BaseDataset.__init__(self, opt) + self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') # create a path '/path/to/data/trainA' + self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B') # create a path '/path/to/data/trainB' + self.opt = opt + if opt.phase == "test" and not os.path.exists(self.dir_A) \ + and os.path.exists(os.path.join(opt.dataroot, "valA")): + self.dir_A = os.path.join(opt.dataroot, "valA") + self.dir_B = os.path.join(opt.dataroot, "valB") + + self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA' + self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB' + self.A_size = len(self.A_paths) # get the size of dataset A + self.B_size = len(self.B_paths) # get the size of dataset B + + def __getitem__(self, index): + """Return a data point and its metadata information. + + Parameters: + index (int) -- a random integer for data indexing + + Returns a dictionary that contains A, B, A_paths and B_paths + A (tensor) -- an image in the input domain + B (tensor) -- its corresponding image in the target domain + A_paths (str) -- image paths + B_paths (str) -- image paths + """ + A_path = self.A_paths[index % self.A_size] # make sure index is within then range + if self.opt.serial_batches: # make sure index is within then range + index_B = index % self.B_size + else: # randomize the index for domain B to avoid fixed pairs. + index_B = random.randint(0, self.B_size - 1) + B_path = self.B_paths[index_B] + A_img = Image.open(A_path).convert('RGB') + A0 = A_img.crop((0,0,256,256)) + A1 = A_img.crop((256,0,512,256)) + B_img = Image.open(B_path).convert('RGB') + B0 = B_img.crop((0,0,256,256)) + B1 = B_img.crop((256,0,512,256)) + + # Apply image transformation + # For FastCUT mode, if in finetuning phase (learning rate is decaying), + # do not perform resize-crop data augmentation of CycleGAN. +# print('current_epoch', self.current_epoch) + is_finetuning = self.opt.isTrain and self.current_epoch > self.opt.n_epochs + modified_opt = util.copyconf(self.opt, load_size=self.opt.crop_size if is_finetuning else self.opt.load_size) + + resize = tfs.Resize(size=(self.opt.load_size, self.opt.load_size)) + imgA = resize(A0) + param = dict() + i, j, h, w = tfs.RandomCrop.get_params( + imgA, output_size=(self.opt.crop_size, self.opt.crop_size)) + param['crop_pos'] = (i, j) + transform = get_transform(modified_opt, param) + # print(transform) + # sys.exit(0) + # A = transform(A_img) + # B = transform(B_img) + + A0 = transform(A0) + B0 = transform(B0) + A1 = transform(A1) + B1 = transform(B1) + + return {'A0': A0, 'A1': A1, 'B0': B0, 'B1': B1, 'A_paths': A_path, 'B_paths': B_path} + + def __len__(self): + """Return the total number of images in the dataset. + + As we have two datasets with potentially different number of images, + we take a maximum of + """ + return max(self.A_size, self.B_size) diff --git a/datasets/bibtex/cityscapes.tex b/datasets/bibtex/cityscapes.tex new file mode 100644 index 0000000..a87bdbf --- /dev/null +++ b/datasets/bibtex/cityscapes.tex @@ -0,0 +1,6 @@ +@inproceedings{Cordts2016Cityscapes, +title={The Cityscapes Dataset for Semantic Urban Scene Understanding}, +author={Cordts, Marius and Omran, Mohamed and Ramos, Sebastian and Rehfeld, Timo and Enzweiler, Markus and Benenson, Rodrigo and Franke, Uwe and Roth, Stefan and Schiele, Bernt}, +booktitle={Proc. of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, +year={2016} +} diff --git a/datasets/bibtex/facades.tex b/datasets/bibtex/facades.tex new file mode 100644 index 0000000..08b773e --- /dev/null +++ b/datasets/bibtex/facades.tex @@ -0,0 +1,7 @@ +@INPROCEEDINGS{Tylecek13, + author = {Radim Tyle{\v c}ek, Radim {\v S}{\' a}ra}, + title = {Spatial Pattern Templates for Recognition of Objects with Regular Structure}, + booktitle = {Proc. GCPR}, + year = {2013}, + address = {Saarbrucken, Germany}, +} diff --git a/datasets/bibtex/handbags.tex b/datasets/bibtex/handbags.tex new file mode 100644 index 0000000..b79710c --- /dev/null +++ b/datasets/bibtex/handbags.tex @@ -0,0 +1,13 @@ +@inproceedings{zhu2016generative, + title={Generative Visual Manipulation on the Natural Image Manifold}, + author={Zhu, Jun-Yan and Kr{\"a}henb{\"u}hl, Philipp and Shechtman, Eli and Efros, Alexei A.}, + booktitle={Proceedings of European Conference on Computer Vision (ECCV)}, + year={2016} +} + +@InProceedings{xie15hed, + author = {"Xie, Saining and Tu, Zhuowen"}, + Title = {Holistically-Nested Edge Detection}, + Booktitle = "Proceedings of IEEE International Conference on Computer Vision", + Year = {2015}, +} diff --git a/datasets/bibtex/shoes.tex b/datasets/bibtex/shoes.tex new file mode 100644 index 0000000..e67e158 --- /dev/null +++ b/datasets/bibtex/shoes.tex @@ -0,0 +1,14 @@ +@InProceedings{fine-grained, + author = {A. Yu and K. Grauman}, + title = {{F}ine-{G}rained {V}isual {C}omparisons with {L}ocal {L}earning}, + booktitle = {Computer Vision and Pattern Recognition (CVPR)}, + month = {June}, + year = {2014} +} + +@InProceedings{xie15hed, + author = {"Xie, Saining and Tu, Zhuowen"}, + Title = {Holistically-Nested Edge Detection}, + Booktitle = "Proceedings of IEEE International Conference on Computer Vision", + Year = {2015}, +} diff --git a/datasets/bibtex/transattr.tex b/datasets/bibtex/transattr.tex new file mode 100644 index 0000000..0585849 --- /dev/null +++ b/datasets/bibtex/transattr.tex @@ -0,0 +1,8 @@ +@article {Laffont14, + title = {Transient Attributes for High-Level Understanding and Editing of Outdoor Scenes}, + author = {Pierre-Yves Laffont and Zhile Ren and Xiaofeng Tao and Chao Qian and James Hays}, + journal = {ACM Transactions on Graphics (proceedings of SIGGRAPH)}, + volume = {33}, + number = {4}, + year = {2014} +} diff --git a/datasets/combine_A_and_B.py b/datasets/combine_A_and_B.py new file mode 100644 index 0000000..329b1ec --- /dev/null +++ b/datasets/combine_A_and_B.py @@ -0,0 +1,48 @@ +import os +import numpy as np +import cv2 +import argparse + +parser = argparse.ArgumentParser('create image pairs') +parser.add_argument('--fold_A', dest='fold_A', help='input directory for image A', type=str, default='../dataset/50kshoes_edges') +parser.add_argument('--fold_B', dest='fold_B', help='input directory for image B', type=str, default='../dataset/50kshoes_jpg') +parser.add_argument('--fold_AB', dest='fold_AB', help='output directory', type=str, default='../dataset/test_AB') +parser.add_argument('--num_imgs', dest='num_imgs', help='number of images', type=int, default=1000000) +parser.add_argument('--use_AB', dest='use_AB', help='if true: (0001_A, 0001_B) to (0001_AB)', action='store_true') +args = parser.parse_args() + +for arg in vars(args): + print('[%s] = ' % arg, getattr(args, arg)) + +splits = os.listdir(args.fold_A) + +for sp in splits: + img_fold_A = os.path.join(args.fold_A, sp) + img_fold_B = os.path.join(args.fold_B, sp) + img_list = os.listdir(img_fold_A) + if args.use_AB: + img_list = [img_path for img_path in img_list if '_A.' in img_path] + + num_imgs = min(args.num_imgs, len(img_list)) + print('split = %s, use %d/%d images' % (sp, num_imgs, len(img_list))) + img_fold_AB = os.path.join(args.fold_AB, sp) + if not os.path.isdir(img_fold_AB): + os.makedirs(img_fold_AB) + print('split = %s, number of images = %d' % (sp, num_imgs)) + for n in range(num_imgs): + name_A = img_list[n] + path_A = os.path.join(img_fold_A, name_A) + if args.use_AB: + name_B = name_A.replace('_A.', '_B.') + else: + name_B = name_A + path_B = os.path.join(img_fold_B, name_B) + if os.path.isfile(path_A) and os.path.isfile(path_B): + name_AB = name_A + if args.use_AB: + name_AB = name_AB.replace('_A.', '.') # remove _A + path_AB = os.path.join(img_fold_AB, name_AB) + im_A = cv2.imread(path_A, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR + im_B = cv2.imread(path_B, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR + im_AB = np.concatenate([im_A, im_B], 1) + cv2.imwrite(path_AB, im_AB) diff --git a/datasets/detect_cat_face.py b/datasets/detect_cat_face.py new file mode 100644 index 0000000..13cfd61 --- /dev/null +++ b/datasets/detect_cat_face.py @@ -0,0 +1,64 @@ +import cv2 +import os +import glob +import argparse + + +def get_file_paths(folder): + image_file_paths = [] + for root, dirs, filenames in os.walk(folder): + filenames = sorted(filenames) + for filename in filenames: + input_path = os.path.abspath(root) + file_path = os.path.join(input_path, filename) + if filename.endswith('.png') or filename.endswith('.jpg'): + image_file_paths.append(file_path) + + break # prevent descending into subfolders + return image_file_paths + + +SF = 1.05 +N = 3 + + +def detect_cat(img_path, cat_cascade, output_dir, ratio=0.05, border_ratio=0.25): + print('processing {}'.format(img_path)) + output_width = 286 + img = cv2.imread(img_path) + gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + H, W = img.shape[0], img.shape[1] + minH = int(H * ratio) + minW = int(W * ratio) + cats = cat_cascade.detectMultiScale(gray, scaleFactor=SF, minNeighbors=N, minSize=(minH, minW)) + + for cat_id, (x, y, w, h) in enumerate(cats): + x1 = max(0, x - w * border_ratio) + x2 = min(W, x + w * (1 + border_ratio)) + y1 = max(0, y - h * border_ratio) + y2 = min(H, y + h * (1 + border_ratio)) + img_crop = img[int(y1):int(y2), int(x1):int(x2)] + img_name = os.path.basename(img_path) + out_path = os.path.join(output_dir, img_name.replace('.jpg', '_cat%d.jpg' % cat_id)) + print('write', out_path) + img_crop = cv2.resize(img_crop, (output_width, output_width), interpolation=cv2.INTER_CUBIC) + cv2.imwrite(out_path, img_crop, [int(cv2.IMWRITE_JPEG_QUALITY), 100]) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='detecting cat faces using opencv detector') + parser.add_argument('--input_dir', type=str, help='input image directory') + parser.add_argument('--output_dir', type=str, help='wihch directory to store cropped cat faces') + parser.add_argument('--use_ext', action='store_true', help='if use haarcascade_frontalcatface_extended or not') + args = parser.parse_args() + + if args.use_ext: + cat_cascade = cv2.CascadeClassifier('haarcascade_frontalcatface.xml') + else: + cat_cascade = cv2.CascadeClassifier('haarcascade_frontalcatface_extended.xml') + img_paths = get_file_paths(args.input_dir) + print('total number of images {} from {}'.format(len(img_paths), args.input_dir)) + if not os.path.exists(args.output_dir): + os.makedirs(args.output_dir) + for img_path in img_paths: + detect_cat(img_path, cat_cascade, args.output_dir) diff --git a/datasets/download_cut_dataset.sh b/datasets/download_cut_dataset.sh new file mode 100644 index 0000000..d1ff919 --- /dev/null +++ b/datasets/download_cut_dataset.sh @@ -0,0 +1,23 @@ +set -ex + +FILE=$1 + +if [[ $FILE != "ae_photos" && $FILE != "apple2orange" && $FILE != "summer2winter_yosemite" && $FILE != "horse2zebra" && $FILE != "monet2photo" && $FILE != "cezanne2photo" && $FILE != "ukiyoe2photo" && $FILE != "vangogh2photo" && $FILE != "maps" && $FILE != "cityscapes" && $FILE != "facades" && $FILE != "iphone2dslr_flower" && $FILE != "mini" && $FILE != "mini_pix2pix" && $FILE != "mini_colorization" && $FILE != "grumpifycat" ]]; then + echo "Available datasets are: apple2orange, summer2winter_yosemite, horse2zebra, monet2photo, cezanne2photo, ukiyoe2photo, vangogh2photo, maps, cityscapes, facades, iphone2dslr_flower, ae_photos, grumpifycat" + exit 1 +fi + +if [[ $FILE == "cityscapes" ]]; then + echo "Due to license issue, we cannot provide the Cityscapes dataset from our repository. Please download the Cityscapes dataset from https://cityscapes-dataset.com, and use the script ./datasets/prepare_cityscapes_dataset.py." + echo "You need to download gtFine_trainvaltest.zip and leftImg8bit_trainvaltest.zip. For further instruction, please read ./datasets/prepare_cityscapes_dataset.py" + exit 1 +fi + +echo "Specified [$FILE]" +URL=https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/$FILE.zip +ZIP_FILE=./datasets/$FILE.zip +TARGET_DIR=./datasets/$FILE/ +wget --no-check-certificate -N $URL -O $ZIP_FILE +mkdir $TARGET_DIR +unzip $ZIP_FILE -d ./datasets/ +rm $ZIP_FILE diff --git a/datasets/download_pix2pix_dataset.sh b/datasets/download_pix2pix_dataset.sh new file mode 100644 index 0000000..a7d09da --- /dev/null +++ b/datasets/download_pix2pix_dataset.sh @@ -0,0 +1,24 @@ +set -ex + +FILE=$1 + +if [[ $FILE != "cityscapes" && $FILE != "night2day" && $FILE != "edges2handbags" && $FILE != "edges2shoes" && $FILE != "facades" && $FILE != "maps" ]]; then + echo "Available datasets are cityscapes, night2day, edges2handbags, edges2shoes, facades, maps" + exit 1 +fi + +if [[ $FILE == "cityscapes" ]]; then + echo "Due to license issue, we cannot provide the Cityscapes dataset from our repository. Please download the Cityscapes dataset from https://cityscapes-dataset.com, and use the script ./datasets/prepare_cityscapes_dataset.py." + echo "You need to download gtFine_trainvaltest.zip and leftImg8bit_trainvaltest.zip. For further instruction, please read ./datasets/prepare_cityscapes_dataset.py" + exit 1 +fi + +echo "Specified [$FILE]" + +URL=http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/$FILE.tar.gz +TAR_FILE=./datasets/$FILE.tar.gz +TARGET_DIR=./datasets/$FILE/ +wget -N $URL -O $TAR_FILE +mkdir -p $TARGET_DIR +tar -zxvf $TAR_FILE -C ./datasets/ +rm $TAR_FILE diff --git a/datasets/make_dataset_aligned.py b/datasets/make_dataset_aligned.py new file mode 100644 index 0000000..739c767 --- /dev/null +++ b/datasets/make_dataset_aligned.py @@ -0,0 +1,63 @@ +import os + +from PIL import Image + + +def get_file_paths(folder): + image_file_paths = [] + for root, dirs, filenames in os.walk(folder): + filenames = sorted(filenames) + for filename in filenames: + input_path = os.path.abspath(root) + file_path = os.path.join(input_path, filename) + if filename.endswith('.png') or filename.endswith('.jpg'): + image_file_paths.append(file_path) + + break # prevent descending into subfolders + return image_file_paths + + +def align_images(a_file_paths, b_file_paths, target_path): + if not os.path.exists(target_path): + os.makedirs(target_path) + + for i in range(len(a_file_paths)): + img_a = Image.open(a_file_paths[i]) + img_b = Image.open(b_file_paths[i]) + assert(img_a.size == img_b.size) + + aligned_image = Image.new("RGB", (img_a.size[0] * 2, img_a.size[1])) + aligned_image.paste(img_a, (0, 0)) + aligned_image.paste(img_b, (img_a.size[0], 0)) + aligned_image.save(os.path.join(target_path, '{:04d}.jpg'.format(i))) + + +if __name__ == '__main__': + import argparse + parser = argparse.ArgumentParser() + parser.add_argument( + '--dataset-path', + dest='dataset_path', + help='Which folder to process (it should have subfolders testA, testB, trainA and trainB' + ) + args = parser.parse_args() + + dataset_folder = args.dataset_path + print(dataset_folder) + + test_a_path = os.path.join(dataset_folder, 'testA') + test_b_path = os.path.join(dataset_folder, 'testB') + test_a_file_paths = get_file_paths(test_a_path) + test_b_file_paths = get_file_paths(test_b_path) + assert(len(test_a_file_paths) == len(test_b_file_paths)) + test_path = os.path.join(dataset_folder, 'test') + + train_a_path = os.path.join(dataset_folder, 'trainA') + train_b_path = os.path.join(dataset_folder, 'trainB') + train_a_file_paths = get_file_paths(train_a_path) + train_b_file_paths = get_file_paths(train_b_path) + assert(len(train_a_file_paths) == len(train_b_file_paths)) + train_path = os.path.join(dataset_folder, 'train') + + align_images(test_a_file_paths, test_b_file_paths, test_path) + align_images(train_a_file_paths, train_b_file_paths, train_path) diff --git a/datasets/prepare_cityscapes_dataset.py b/datasets/prepare_cityscapes_dataset.py new file mode 100644 index 0000000..2ff21af --- /dev/null +++ b/datasets/prepare_cityscapes_dataset.py @@ -0,0 +1,90 @@ +import os +import glob +from PIL import Image + +help_msg = """ +The dataset can be downloaded from https://cityscapes-dataset.com. +Please download the datasets [gtFine_trainvaltest.zip] and [leftImg8bit_trainvaltest.zip] and unzip them. +gtFine contains the semantics segmentations. Use --gtFine_dir to specify the path to the unzipped gtFine_trainvaltest directory. +leftImg8bit contains the dashcam photographs. Use --leftImg8bit_dir to specify the path to the unzipped leftImg8bit_trainvaltest directory. +The processed images will be placed at --output_dir. + +Example usage: + +python prepare_cityscapes_dataset.py --gitFine_dir ./gtFine/ --leftImg8bit_dir ./leftImg8bit --output_dir ./datasets/cityscapes/ +""" + + +def load_resized_img(path): + return Image.open(path).convert('RGB').resize((256, 256)) + + +def check_matching_pair(segmap_path, photo_path): + segmap_identifier = os.path.basename(segmap_path).replace('_gtFine_color', '') + photo_identifier = os.path.basename(photo_path).replace('_leftImg8bit', '') + + assert segmap_identifier == photo_identifier, \ + "[%s] and [%s] don't seem to be matching. Aborting." % (segmap_path, photo_path) + + +def process_cityscapes(gtFine_dir, leftImg8bit_dir, output_dir, phase): + save_phase = 'test' if phase == 'val' else 'train' + savedir = os.path.join(output_dir, save_phase) + os.makedirs(savedir, exist_ok=True) + os.makedirs(savedir + 'A', exist_ok=True) + os.makedirs(savedir + 'B', exist_ok=True) + print("Directory structure prepared at %s" % output_dir) + + segmap_expr = os.path.join(gtFine_dir, phase) + "/*/*_color.png" + segmap_paths = glob.glob(segmap_expr) + segmap_paths = sorted(segmap_paths) + + photo_expr = os.path.join(leftImg8bit_dir, phase) + "/*/*_leftImg8bit.png" + photo_paths = glob.glob(photo_expr) + photo_paths = sorted(photo_paths) + + assert len(segmap_paths) == len(photo_paths), \ + "%d images that match [%s], and %d images that match [%s]. Aborting." % (len(segmap_paths), segmap_expr, len(photo_paths), photo_expr) + + for i, (segmap_path, photo_path) in enumerate(zip(segmap_paths, photo_paths)): + check_matching_pair(segmap_path, photo_path) + segmap = load_resized_img(segmap_path) + photo = load_resized_img(photo_path) + + # data for pix2pix where the two images are placed side-by-side + sidebyside = Image.new('RGB', (512, 256)) + sidebyside.paste(segmap, (256, 0)) + sidebyside.paste(photo, (0, 0)) + savepath = os.path.join(savedir, "%d.jpg" % i) + sidebyside.save(savepath, format='JPEG', subsampling=0, quality=100) + + # data for cyclegan where the two images are stored at two distinct directories + savepath = os.path.join(savedir + 'A', "%d_A.jpg" % i) + photo.save(savepath, format='JPEG', subsampling=0, quality=100) + savepath = os.path.join(savedir + 'B', "%d_B.jpg" % i) + segmap.save(savepath, format='JPEG', subsampling=0, quality=100) + + if i % (len(segmap_paths) // 10) == 0: + print("%d / %d: last image saved at %s, " % (i, len(segmap_paths), savepath)) + + +if __name__ == '__main__': + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('--gtFine_dir', type=str, required=True, + help='Path to the Cityscapes gtFine directory.') + parser.add_argument('--leftImg8bit_dir', type=str, required=True, + help='Path to the Cityscapes leftImg8bit_trainvaltest directory.') + parser.add_argument('--output_dir', type=str, required=True, + default='./datasets/cityscapes', + help='Directory the output images will be written to.') + opt = parser.parse_args() + + print(help_msg) + + print('Preparing Cityscapes Dataset for val phase') + process_cityscapes(opt.gtFine_dir, opt.leftImg8bit_dir, opt.output_dir, "val") + print('Preparing Cityscapes Dataset for train phase') + process_cityscapes(opt.gtFine_dir, opt.leftImg8bit_dir, opt.output_dir, "train") + + print('Done') diff --git a/datasets/single_image_monet_etretat/trainA/monet.jpg b/datasets/single_image_monet_etretat/trainA/monet.jpg new file mode 100644 index 0000000000000000000000000000000000000000..738c1cd889db1c5eb8d7f08ab8d02045f83bcb8a GIT binary patch literal 296150 zcmeFY2~<ngM?iO zfsqM`CL$^-D$W={a7>H~$UMLaF5`lT@O3~PXWqQu`@i4+eCK@UeD7lJt*)-F+g)8% zU0r=$_+{YG&YaMJI1T6?`D8N~-R93oFRp#mi4z91>k8SHrN7 zn5Ln}|M3$JL2yWXH9QOQN5_&abP-laguJ|O1}Rq8ix|6uK-#fR*NYTyA&*#(Kmv$_ z+o>Cv92<|5$KXhWc-%T&w6pF8E9(sid6V@BtF^i|NNaQZ^>$XP;p&h~d^+f3YAhMQ z*hj;~KCXCneF#MpA_>LXe+zQg5+Y>a zPI5?GN=ot~g%^p8!;vzGq6!f#dZLrlMP-G>wjqQp5rqqZENUaFUgC*M_m3swLfs-l z0ZJYUhWx<~4aGtvCW!D_S7YrO#0kYxws5MGMPYHWC`kJ7OByGTYQ#ga-)a`+#KBg zkc84Vk_bp86H-V!LH#NbB|4Q%`Z72$__*}1h2R}K{k|5^=_K?Y1rqs>f^S^%&aXB8 zJIIu;1vg@{>(>I1?;osP;*)lK7R1t2LBSp_XwVxmQI~=fbafL_GUA+4o`{Nzmq$A3 zpKvb8|4iqSo)8*FBD?DcCyVC%>*bQ1p!;|D;GN0jMSNgZvQzryFZ8k$Ts%1?-8GgR zD;mLKCj)l`?);)HB3MKVo}z9;zPRm6Z2t1vP@unm_u7#7MjoI*@aA?^6wMD9zCs0-iV<-g#A2|MuLRxJBc0s8ip zUSvaJ!lgclyadoX;4hY*i}q!-ShsxfzIb!YhnDnBez7R>A6bi(>R-#+i(G$SEXyq3 z!40<9%2=eE5G4EAQh~xQ(!O29dP0`lw}YBwaF<-fz&>MYPgQ-zh|CKHVZr?i4RR49 z()`buMV>RD42djj;cqXo2~qXmRxSMB)Gf+FTEzQ{9m1W!Fv0J$+_XgywCHyiX*EPe zu@G=u>b2M9N4Oc|G9dA<4Lu3{t|*zgTTN83F1R2O(1RulfX#f6+(C<*VKpHPx1=>)L)eDRrX zAtLSpi#WhTJ~o4#;(;UK(qqZE1b~1-ccp$=3S^gvMHo?y7jcI!c>KRTezA=7|IZk|ojblPI!iQ?vB~7n*d2eOS{{!B?QmISubrNue!gGam-H9(pNM7fDe1eN zlLxb3kSi|$m0f=eg=4LRlu`p@Lc;&z}@l2g*Z5w7AAe;1Fh z6`%>^pF$GJT85mG3YP1gxNrTHY%*|c|Adu`TXcl~gkJ`x82?xDMf)-ly$GzkK%E_5 zF2YOxOJxxjsqmr?5?`}e4g+_XNXQD1Gn4?9={R6$x?s`91DFhcI~O6~Rv_hnL!k>y zH3YI1FClE;qfJW5k)C6LJ2)+j3?R>-BF5zEb{c;G6eZh~D>AcDk-hi=wVb zsPSNt%ShMVnF{u`y5@gX;{SESZ@TqOKh_1~w&T)qB=9N>&@!+|0MkuMAc%H#x&+d{ zn&JOtvv2w!0+;j}0Ll&XkV=dNwBoJ`BqqEAi7PLI#I~LRHL$Pq<|~5%3nm1G>-=L$ z?*R<%!An39zLT&RXhG=UP~G^9bjlKmxac7Pb_xo>RjUKntq+-i=+_#ugEm4= zkQ?L;mWWVj3lt5-gC&^=BH~PF540C5fDS{)pmL}Rs(~7y7N{NSgjmo;=n8ZL>W8?{ zFvN$RK;zJJ=q2^6{F|=5K7*=e%Sh`rQ zSdmzTSe;nA7+b7QY)I^h*l%JV#3jX*#dXEa#W#w3iie5Ei>Ha_iXRcL56u%}u zB>q(VrTDyryu@k=GYOQ0mqdib4v9>O0*MNVCW$VI>k`8f;}Y*AB_&r%8cW(qdP;7Q zOq9%)JR(^w*&%sZazJuS@~xDVl)99el!H`&RIC(PszB3>LnlvyETD1(v-kV%l)B~v2PAj6iqD>E+hQC3mbSk^%{ zR5np|uk1H)4`qK}wrrW+vJK0EmJycaFFU#Hhh;aHJze%ePD##G4lTD;E<^69 zT(jI|xd(E;%gf6f$veqo<;n6#9L z)n?Uo)!o#S)JxS_>H-aM4O5LEjZBR*8rL;`(^S#is2Q(Wq}i#-TMb`rx;l7u_Uii8 zcUHgE($@0OO4T~0)vNWJ_DXFh?L_Sp+LyE^b(D2B>kxEK==A7J>8k2F>n7`-)V-$r za?R>Bo@+AJ)UNq?%||_By)eCfdL4QKeOY~b{q6cE^snl_G|(~dGuUH5GvFD@7}^`+ z4J!@%4Bs0W8-*JkGGZA`7^@k38Sgfx89y|UH*qvcGpRGVw^nkk{o16p)oZz?a8sly z-t@HTT{F0utr@|r+H7E*#JUaZNbBm>-8YvrcQ&V()6Ab*thDg8*k{4An6@;q#9EeG z_F2wbA+3_F8m#yTC4@I(AA*f|ZEb2DZ++VO-g^1@t1bH?Tuq%zVE zc?fwG`N`JKHr=+}cGAwkZkyfrcK7X7>;vtO*x%eBvB70S-iC`CKA>z-WK<{W#YXdu zNgG=?PHZyXgxl1xY0N>-A=aVR;mKy*&D%CpH$QP);~48$=Q!%5@08%w4i3w>9c^B#uS`x|+GYQKGyNXf3Y{j%--iNz} zmxVuxu#Cuw=-;BTg|Ov3Rtk&3HeugJxo#8|M}Geca1Dz5@Rn0ZphPyh`*;tVw*I6p+-Mw2&O0+(DEkZX;eGDUp&$*HW}ovQh?8%~A_e zpQNGE%F|w^`=mGRgzb#ld7i8Sf~h+hCK(4ao>DebPE%$wF_}MRDQ1zf?(8z%Rk-UP zyWMv;W{YRXXJ6g3X3yR|Pjj4eYI9+^ak*FX^z#nnjpuvhpWQ3FH)-!r`>ghr?R&F7 zd_Vhu)`9#3V+TDB(h8Orq!-*jwDC~QcjDg>zWcdwec`FX`J#lP+lQ?VR~()@5`W}& zF{1cn@xsyVM}ID{Eje>c@)+sZNU3A#*)qkl-DP9P{f;wF=$K>cKNkXWDDDY7W=TQVG;wYCUS1 zbw+g+^^*0J`d=F`4SkLFjV(F3+c+iN>kb{zfz`XS?osdL-Tjdc2SUj5Pj$99GxXBV^!7RT_1tv0dGQwN)`i>lw^{wR{meVaJ6%5^f9m?#_Gji@ySwMP8@TL& zO#_z(9S5%rxend9=XI}t*nfCnBy8lD`&;in`6c0(i3iCKUhycrkNo_H5)X?6D+Cpf zRzIqHy!LU&6Wb@L*H!p>_0wc&PmOk{IvEHd){aM;Q|q4=AJ6^KgW5boffg8Et5rj3ehqvHq{bg zVPy$za?ApeR}zk_8;1iq9|ye`oE|-0LV|-{IMUC`FBOf$6FhgP<3e`(hsN(tirQf+X>>^cpM=P;*xPhK(-T1EaGcF!8Cm({@XEq=~L>LbFIH8|60AM zHeVVgr~f@QNQ&Xf;2?(hl^Vzv<(d+oA?l%*tAk!f1|b3Ef*{Rp4w||m*Sov9*&qM9r6drx?*d00 zI0rpZs=?d}X^z+s`X$u>iYP0qB~o-cE*2bzgo2}xFShV6k`~|4qsTbfvejTPD%iFUJd zLEE~zg7+@9P595XX{q5-cKd&$5td(v;z`&E=5*I*-xPvN`wS%$K<|KYqhjMldrAkr zov~oVqArV&e>YsDovoFPtqal}ZI2Y`v5T#ho4f0JguSh^l{<3%hA(Y1!O7O5uGyF) ztj!U2p$IFKl`YB!=*N<-ExlrA%64*QY&y<)2QaiRlkttdfuq;-Sl!^*OkGXG)0;KN#v!R6`bw8*7zA*k{H2)W)_&?D{(d2#|1#kv7=GMR&SfkdjM?b~(9DW6M=9_?SwaKe(o ze(A=y9KXI!_kYRz-x`L!?Z5W}{~wV1za7WFU#ONW_gnq>+r{Mn%)r0Rf`6H~zu1AL z$efpamgSOX{==PtHBXc;`y3~IHht%0@*hqB#6PHH;3dFPZ2tLlOKF^t!o^_U99oKr z!@(CPl2X#3)@ua}e8>WqfP=1!ONhZC1-O`!IHU)YP_}jnj+0bL->0ZwR;Su|b#Tr4 z(U(#N2=r^`ppca|v1xlt4dVw^?W{d_#r4OhfRzRee4YWWrBlzvJ9w|Sge0i5P+Y7L zeJ}$Phl8(Pq#(GGn6jR=ICu$6Wlg%GK4QJI#6DHDYgyd_50VoD-BJ`&gdCywJq5ck z{zgh0OzxU`4>E$^?#{UXa}(~?U?OcYPa)dAcmZ0^uj|}W_F=`TTct`zr#^%`pE_Ky zrcinm@yU3YdatO!g5H`f(?x7DHCwhaG2?ERD@#WIHv6|Jk=XI0k8f5=dX zZMt4q^`^XH-}80$17X@#T%x`aq7ffPnI3$`v*aE=@+nO50&<|VzLcMKtUM*WaG;d> zEI+NiH!i7O-_zr0d229>BD5N^UbEFq&l)Qj891e}rn7$>>1wD?^o zD68?Nt>HuT?Z-aYuWlsYpTbI>{DC@2vFu7Rz_*>u8tL(m)S(EhhGNW=q+So_7`s=U z>b;iGsPsOJKvrMA(k8eGzBV(P!SlUZFx7aIZ0B;aPV1H#Q8=RIc$@Z1MU(oB2F>&n zPou;&ts~u-a`xKhVE9j7$cL>NRdUCbEF9-*{EOE+ljAirbs8oMjzvV=!L;_@U|*^A zU*{UgC+%%GiWqGRX0rS|o{e^wm%YVvpFOTDMa7Ba{vNgFdb&gF#@g zR9n$$mO0c0L@Ic2YkczFjZ?3=?gMQ&Lt+zi=oz8LOrw$(pLky|%P1bqGskVO7q;;E zwN+AP6&?ta+eb%xdiy`|w^U3@@6xvN+F~K>3D2bbsDIPQ(X>c8#Gt$*WqPDoYED7( zATcA1c-P(il#bbV+NUJkSc%iq1EtT@W3@|<>Un`rpO#}bvCge*d5gc*bhy3FWbjcd zJw)HRSHpm})_g2XK-bV-9dM&X;>~mGD5G=t)mGa1>6WwbHwUHF;#zLlI+VW}IPRxE z5TeiayqmEH(_GpxV$;=y>lkh-!*`B1A80Wt533j@a(qsaDprgYZks%>aym)yk&%Fx zxe%Iei2tbg9)F9mZ+xJyWboZIQuwi}-tDewkz3+$P2$6@3)=3T8!%R?lU{5=*RYZM z$z7oe=~3P_$i_RBH{-OoD7IKdlur#*;5~OG>@mDps{Ak!BT%pSpcJ8)nuAulB)I=- zNZHdOQpq-`;W?)-!X|n(QZY!4hQrO6OAaQPgrXrsUK^pVb{tdqm*nil4 zw4whW3lNrW>l4;6c4N-At@^;bVWqC(0DTRE?1r#c)7bP0_Mw+Z=^Oq6N$c3X^AZ8t z!hXuW;u39XlSY-(nUR!AeYZP1+IPshg)#O9lP_raI}e|5GeS^pF`KbXJ`G0@l7ZsK zc{Ji9?88dGHtX@KUHTHY6PY5T(7*AWJSU(K?d}$wIu>tzq%(z1zcxL9c9|hO>Z-W6V{E-rF7%)it<$QV>!wIkalL=DTgp z8Zo53)Y3fxlzbr9xJVrlQNM?F3}A?+)+vCiz?QGto&+(yX*ZNa5)&DA%T9U#S2 z5T=+X2{MO@VxDq{oTEpI^|d#xj49PR`S7-A%5qfK>}aC2KERv9FQfsjTXz0FC$dT2 z$Uw*PLsxV|G5OG7*33*=K;n%YlWK`s`%^;9`-q8~$DTcjfmoS%`=o=+w~nOzBdL|4 z369N9vdP;TH{R!YeZs0YR2aw`wIyaMQVBh!*Vw$mch57thE}b+fx}vN+d8lx8>%Wi zO}kp@zfs3qIP^g#`2OwXreQ9mq{NAVAp6u*yGd;xZ8rnYOZdJ=9ST&O>BzjtVA~9A zZS$|aSwb`R4Cf5JnKTXi$AE>;_YK$6=HooBXXc=49C7AJIy1+KZ1QQ{n%5(by|_wS zH?BP^NeKI~_u#H_yeJXL9LMxIAS}I=9NK07ycC zl}W=Y%{i*3Opb_$;;P<#V#D(ac-6z)y2#vWXzkh?tvs>3LyeJ6%WFo<+;qITKgnh1 zAYXAuayPazB+-{*?u4~>?xVD}9}_d4+2s{OI=h0+N}nZ*KihFAjhV01u>fgo7DARk zJxmhNCG~%6ef2g;3{_Lqin?>a;K}*c<1Ak#p)p}Z+~7F7YwJc%#wnwHouj{nm0$Ef zIeGQ{JJOpPbJaDQ3diY5AwtxaV{ZO4h6H_K7oio$-cT|R(KPi1joWr=hTdRKHrm7& z!>%=5s;4d|<}i_idu8VWQih4JGV=rXN%@CDU*i7KTXHBp1#rc$P%9iEAHK zcuJiOXdv)e{5HJ1Ll=KGskSO%AOhPnIdY2}?Wtcmn8fiVt}#-a61KcXNfy{Y-V4=L z(kT0e)vVlI_?(*)7N>?bNdyqy<1NaJDUSo1o&2qenL@7mK*`a2WW3?)%bg{ka+=+b zoI3Dw@W2TVO8(u;AIu?#hnpDJT{x}fWJz*8TQEz8aZRSZc_GFTAN_j#^Y$PtZbQ(^Dq0Aom?<^1RweBy!P2BK^7T1fW-6OpTfiy4NEUAxW3uInY=L5H z#5|#Q0nIH*6YwWdaQBg}iu_X@62aLF$vcOqY)4?Sd=|?|)<;#qyoBZ6z1-oAg=j;p z2r-|RZ=SoX;e3dt09{)PnrGF**QMI|a@FC|?HJ;$s*=WgeQfQpn-Bl$YhVAB! ztj}!D6pj?U*MC5acb+iH43DJVd^MXmBB=Kbc*#n~!V_|(_C2gPR~Pxpfi^_dtC$|A z>e9!EF!l2Wd97^j0MW*iKgMzjnD2VG014ZI9Q&>bUvrhZG0*Yx^9~$_(Fpq3$bko@ z&kc;p*(H-B2kh3zYnZ*6fZd$hors*xxIj9#-crBmmd6VR0WSo{2(mrL{NVwb7RGpx zgOUsm)`mJ%>H0YdKhao%#+&$#KCI^2EWVSo=Xui!8TSQfl4{#v8ZCAKBVgY-ph}No zb4xWdEr+PaL+|nO$D`O)g}IW=z$^wi^j1mc0yJLh8YB?JaGVk=H5roHs$s?iu42q6 zhfYNrD+~ryblHxzZp8e6NIOEm$#*%@a%qG(&cU7x)+deCIhhr@bNo*g_C9Pq`dCXv z(Kl0t!^zCVYGE4Nx=2Qiq=P}jQ) zFDVmnyw!=VOf$b@C8K$L>D+taYD3f0T#1duhaSU?q(hrlou7!?xF=l5y_W376vAjV zy=g;|Th{Pv&pyBmADeznCnIF3iCdyy%#o3$H2sGa1=gKgBIbD$GPznD;+qNi6Q$FF zNn@7xy`ru1VZm=PY_*c7I9yyeR^#WQQ9mMZ@F&7sgwc<+U}fS30=9aSNvpT<*&H); z9h5TIlLWbhM0)mP&am9bJ!|;X5DT_g=?KTNvM*kXM~@(8E7p}qOYXd+vQ_@*N#sm!qmZkt zf4h}uqvSD2ZgL~G@~QfUSt@0(xq}aTi{Fuq8Tb2K21B0C#g-gq)F(C)NACUV6`FcX zmBPpP2K2tt&*-Lk!cP|h8x1h5jeU}(M2pli7k|;&Wt@@SRUpg3ydcu)X11@Q1OhfZ zJ8M_2P*MgtT3ku<#i#`+vG82aqUy%TA?mH;!IWuK;EB8^m1)d<%=5V``Lkp-$Lm?s zL*iqXPRL$*!HHPrx7{?mo3{%-v?Ft75*@@ZE;D$pVfS=K=T67dF;eZwDoI$c=rniq}84^PDwc`Oww?0r8B#xxssoH zNatEqF!_%;eNM6!%zVRj;Qu;_$Imh(EmSv$an-OL%~RaE?Kk-;{FVbOxPSi97k!~# z`hv0AwynS{q|NX$HNc&QGkId!tjutiy>}cHIZhs#@KP{~ueumUKQ&(f{>XP2+wA1# z9)47~eZOn2hK=~gjM{R~`>YU|pwy8GgRI@0jJ*yG6VheC|AoGK&DEHb;n%vGypQaU zZ0L1*87;IYSkj#0q)KQ`PDLj&EoobPW5xLwa0+=$d(BTnX&Q1J%=yRuZ z*Q2vFeSXwp)mA3o$)jn8Mmo-)9At!OjCx_-yvNI&yw&>2%zeLs>Sjkuf#iYm>5u-W z@3kE<=M&%Kx$@y%fT6^-R~VmZUPf)jR`1qQFw8TW;pxo%hB^vYC`l0KJeUbbslHFx zZP!x_^1{gC;mR{aSN;%H%wRtupk;F8&OpR=g|ybOdVRk$t?wo@JbUAkOW#d!ReEO( znSFw`fs?T%henxwDU@Q#*)$BZV`^|h`p_uL$MoI1iKMiB`Yk4(d?J)uItU*b2+PYO z9GZ}OFTuDZJ%)x}QTt?1dg7;A&IPS)NiUC3Z4FmeTR|w|mR@{BKapwdwA&Br-TD^3KE1h7t4Hd*}LyhaF;b67x? zGEBm8TpCrjN7g5iuxmSGDu3+N9=Pv%I|F^jtf+ISsjyrsYyt8gZ@W3@x^iQt7U?7+ z!BxKE5pPnX_qt^`&G~pM8&#Z;!ZE#3$OKF^&Z<1XSBBA?wYA^p>>f$CUiaCti#qw%GQI z{x62xRt)7)LW zcGG4V;Sl#DV5qr|P;vF1H!H<23pLk-bH2z11l>^wU^+z6}>|+FZ zDfLiJ+Ggb@ZzlIA+p4FmPat7Z#a@8=BafZxMD^N*38=`F-eIcs$8HpSyZ0wl zNUZ@oxzcxvG*H^0!v9!KPQartmh-7HGv<8eETgtFTyz< zr0MfDVAXv>RzrtGnaGkh=4`IEaym^C)RiYZ3^mLInn)d9WtL8~c25-Yr|t(hp4gqU z4rBn|Cwbl++Zq$3)y>9=wc7|WBgmSP(M-IY*~{01CnFoWnOykk;;iuV9sY&#?Y6e( ze4NlP*`+h(`y%gqYsNTOqDO0*QZqM^E@L@>bgjz>MuYsG+x)A5z{_Ao6+XHwfow<2Hg1QGvd599oEtJ`jU{nZTn`OL(QH_{*D}{R=2dd zq-Y@_0<1_~1t&^o=sf7$t9!i1qgnJmCxW-Pa2PI=C2TLS8{;KSawRq;Cbe|Yssr_L z53|9R{attFW9(@g$;f_Efz-?g%c0sf!OVn=j=3CeihDpD^k#S>sVQ&IGy9Kcr@d3JxQRdtA4)SR@nKbTG?>W5Hr$# zsxV_?HaWpI>*k}Ok7S#HNZR{M0@@&^%%|N6p>20tx@k(l`9F$?6T78;?E);Q%_Z`Uc1YtGo3QbA?u( zt)H-gmSY^wdwkL=ut2~!VqQ#gS0JPM)411$v$R_9()8vOp|La8b~ICGlB>9JPttqB zk#bg;p=jCK>^RBAR-(Ra*V%z&XqdGs4M| zCvD|4YKszZgQASbDs~U4SaWR%WtcTNIN9b#=@_EgoVU>I!JL$&3u=#S3PuW|@Pt(6 z)VvVZX{!N(BYoi~-sMu`gPEjR1}w+nG0mI&f_d#ZeAQNe20QUu^&^zzu@?_}PW#>{ zkr#Ar9;eD?W|Qf0Px&$9+=OtI3LzpQudg5M5on2@P;guB6@7P}#>2z!Ce~wo_|&uW z%U*(n%Xc4vw;j+pBZSGACr<9)L?lWs+u2c^N#ZJ9IJY?(g{iy~lh2$~Xl3`_@{L{z zBVlZrG{?1Xbh`B0^n$G^TPW|7;EIgafD~HfF~q~nnVIJr*DpUHg3x>u8F3tQCX(T^ zMZ<&^Nh-4WE=$|t1}{)AA_`zUg>=$@ctwjB z_@H}4c&Qr7lRrBFdGs~Mg_ZH?vvIB;>Gb4t@|==sI-@Be>7^Fi#Bdz@ZbE*VAH^0k zVGfagFa~>dXoGoFGll7t*bMm9YNk0-^0#X>_fsm4@R<9kJy*N9*GiU;*U}ro49b;0 zp|!#19ZQbpmkhDc!H&SI#5*q6uBRFiQ*^o#i92TSaWeBeh_qJK&QD$3guAkQQqzFr zbE-anz8a|sURTQy@2`5sdK_j^`8s`bi2-CiFo!ER+gusBS0W~IN~&A8**m|_Xy=+>km4%*j6 zN$5!Pq1ySRq{U!{+PLUf&8ONK*2rky35Iz2dD1!TWnNyv@JIjs*8M~3KzGV>f;yUb zVz?qA=8cdWs`9vCsD6z}*8E21M7`RkfRs0rVvf&wVPqS}K2GK|*tYA@-hIHsZ`huB zP9SRvrbi0O>CehDY7aCDg2{GXC%^O3vM9P;_3pXLvJ1Pr+1#rN%U=lUowYbgU3j_n zXUqw>w(7$?Qod5mQJ!!}^)!79Sl<~lZ?(4YqXoK!wt^=4npY_3fN;8VuuC}{p2vlH>#A8yL?8tk&{BeHU%22y z(@C=oHT~;po>ljq;+{?lYN_L?o4jm!-R!UgIs2AFDmlZBG~Z&QQdt3eaMkhRfrmx& z)mGk)uaJ%`Cj>9KY;f-$H|BGV>P*vly4i~3#he~e6*k)8r*PKP2-i-Y#+hfMCG1ou znJ4Kk4Wqw#4LITWSmEF)C5K2aF2+)ImNH>=Xzvp`XNs$C&@_y)_NKDZFqk1R#g_&` zET$GQ=h#1>YayVe4Herakp$bgU2C_P_kocJXNbaR0gQezNkCnrFa2q=(58rL@N|5R zr*^=$)p3|!YqUnwE{ErS*qeTKLOnK;Q<48b@}XU%^GUx;HI#jgV}dq0V*%A5X_~v8 zIALDFQsR{0Zima%tGY1=x8SH<(R&kr~BHR$bH<0b~2Cd8XB)sU7uLb z!rI)umPw&Y47p8*P0wbW(jblS=`uqprQC#W=Dn+Ca~V!@#o9xhEywmH6?S&zptkLG zJVfYxPbji6BkmzHBs!mfU3lCmjVi%8z~KaOFZW70wI`hIm|6XY;-+jNFZB(;+{<#}H4@U649?DO$nUjh8 zSGBS)@6|av%)&|-zucIevA3u1CD-Oe9+4qwTHVjO2jYUhR}aaOxu#&|uA4l`B)*T_ za%}G?k=PXp@_vXkM-oFfd=v!45-AzXX{?yi);x|Ao=>Dq$e#v#JXx>}NpLL4->aEP zI9Z;l&jCpq;mmGEZOh7GV1M%6;Fx1YSM)G(yGvBqGxCK*hX9>lhj2&O9V8HSv^%f^ zuaz^Wxr(*zLRgFKOtnpZiH=@&Mw|Bn#Mx5uLx=Vd6_F;uh14Din0)g*9h@tkrpybr zt(U!3NOv+?IBy)&rl%*KO4qTIX^S+aN-*5`Z7iYY-eLVyXVy1au zL#5T_L&+~)fK<9sf#n->qJ>}`iLvbURaY}+y)}oK_Z$_P?0CXbZjx@7azdwwy~j%o zt=-+-#ifvkw1x%aEyE^0QopC&<_G7}F~PO(-tT7NJsa+vKzeET+i~WuUJ|OyjP=i4 z8W+4J;QOVYGw)Iw?j?+71Iruow7gI!@3k4^O%UyJ!p^sN!kOl$Asot&dS{W_+Zc%R zGr7c4lq04@!0tIaDR<<12P5`i9g~)4n_u~>rPiftShZ@?ko7vFy8a91W|3h}sYt`T z%hOl`C@X5(CzHUe-ghJE&7_7ob6D#UShlbu-8|SX`thKCyPKVXxK4g^bPv~LP7Oypi|-bILMBKyAq=<<}bxF4O(xzTdH;iccfaQ}-rr4q7{2exOo0T(4_$ z+>nIgejgj|WaaUc&u@^QMsn3kCuhmX7WeOK`$|Yg*7^T?rE39Bdo?xtkFt-ZJ{z#(9Ci#2#TU5@~IL5*NI40 z#i=I5Q$bu<0Kc)->en{m^q}OeIfY6Am3q_8f;V}x_s97cz{9sW?~0fO`-qz+_Z7Tn zFJ`c%J$iG;V_3c+X|FJDmiz|lEyGRnm?QZe!|sjvpMAI0mHTEhs3BMTBSr=`+2N%P zav~W}nM32Qq=;y*N|{8=W73&~rji-uhI+#s3%{nJ_Ni)h4VeTIKcwjv`M}-b@ut$c z*Qs;aWg{o9;gml$@ovk-j3#k}sKDhXwIPS$1K&P(C)12lps^QuzAa zbG4eWnVAW>jtlt2v<2ws31ikFicr!Sa|tD@QZx3Z+R8a^??}Ne^X^BDwMSw`_keVm zR$X_$07S7CvJEw~2&HJ0M(;cdF1;m^lrJFd$y3mRuw|zHC#-w+PNUS69Kq6wNZsVe zK+4|x2)3TUKW$im;9#Nf+srF}OK?%bGL7fRR~X@qxf*Z}fKYb4QkLD8=YP`LjU}K$ z-&6PkB3>#}Dr2bCf!Et;tlG<>JQ^ey-7a~0PVHH#_-$7{C%|#N#qN#=1cQU|m|_02 zj9}Q{R{wiJ4qdMYYAbpqqKD`*qkX~j$RyJPr;6v-s`&L_nvXMTnjCn^KYHF^>D41a ze6luqDv3{2hjWM01P$uRZ{D@iVRdc45o5IQD_+g5N|_u)N?bRL1c!6irh{IyGMnA8 z!-F3=G^+9LX>eK*aJDjW;J6ikUuP>9L+bb)FRfJQ)8~67Kq90u(eX`ZYX{BAv(v52 zod-8Lvvzkl&G(wprp-fRbW@MLf=E(yHBJpbx=HI|MWx@WPr?y(2@w2ia!UC0I5O%+ zt+Ddu`_9J%oCW9w-BfKZ$oqEx)KG}Q)zR)A|6{*(f@6T=@d*sc9wkvqyhjUGQ^XDI zfd{`$ayoR6*d~YRXnx30KU4(vm$S?XGfU!4kWyK>-*BegXt-Sn*$n4kxdV`k>owDrB$Q7!` z#}thvPS0XaiXZI zE1$*e8w%Wixjqc{u%bxG@ECHgXPEla%Jbd*Q+%qt=3M5P7s_#Yl@TCpy7`Yz#n9O@ z?1P%XI2&eNkLL)if#t(SzndOBRoc&==^zyyGdNQ|-gpfDl#`Ou9BLMGa1St+HFRRk ztHDz;j+MS^&(7!=#+YlE98?|`G$IJ5^n{Ampx7rwew3jMLPxB(k8*6XB0HS&;~JH_ zCBNhQ$x7g(o}sa(TZvT{MmPFDrkpmE-M^2Od9RC2jpMVF8?3A?>f7A=hlwe-g1q=Q zpQGiL6?J-#4Ahn;EGtQzWvDCkQl9IiQ1Ty$pLz7yT*7li@Yw%^(fQ-p_d1{6s8@7= zt#mXEoR3&~G4nOt95|IMXDm2mnX|c4rW(aIkkC!aILeGrO=y}tW=G8aWjiAyIvNS- zHrJ6H?}4((^E1=am}rydCX@QOaj=A5`(R1TXp7Z3l`vz?bczM{yVYR*xTona#jo@J9#--2G6;V}2bh*pdcPe61{yc_>l282)Yo z?!mfX89+JLDfd&V(1-H4InvtZ=AG{|x9CrEVO4hL-xGfQ&UQnX!y_Tvd(%1l|5lO*pT}3?ZRzdX;|5PN&6gGSanJX z%gMb{>lWBXS=7y5s#cf&DR-z2tsoISX#-OA~6F5Q&l`rP|=u7qh>MnU2&?ZHa|7-zR%cQgtDx!6&b3Ynq2 zRaOYHHWr1QjI!KnD*~BQOrP?I{7uo{q$Klq{7>Tj{13Q1iz5YV>5bQj$xQWn8%IhNy0bBn>qr`iII78gV?~IOU;M@ z{Ca03ze_12U&T?Ah>R9)-kG@uv0r*JH+@_I){Dj(Ns`$)eJjs%D`IzDI&W{?+-<&qYLs-N zC49DPuX#)Ob%JkEt;uO1J@o2bUaZWz4gSm$1nvc7|LCe>3S@ zv>UD!()$TztvEI9%}zd0qB+gtQ^l(}#1Q%|W$>|X!C2IGwC(i606ujkQ`nzE^H(l8 zlCS{LJboZ~`2~C^e;e*|>_A4xhwS?_KWE45q`A%{0XT!Djq9P}>Y5(W3=S+nwnr-c zEG$~gLO&SR-^-R*cD&2{Q8h17{y`+WnZ@EbAJF~flmUz7L*)gzC(?-8_cVfMTPs=a z=(Jlr=BIvlm%Kus++NN&oo#l`vGqS^iK9Um<-X$d*@DWW>2S}|uWtCm@&p%Y?_!D(K z9%GDM@!XWi=eS3l8jBXluj<;BM>+SM(BQrkA+0N*k;EPG>LY^B(74xvU-PM}<`g80 ziodmujg>;`tSC*L(Ju&3Ve$;M_ZdfZKN670mA75*-_l`4Y-<>OelAzI z*@s%>!+6ojm0)>cIpL*)ry4JUL-=83>MB|DqSh{6SCg07qhYgq;cuSH-5ksro}PB( zQ|m1@vbKPf&WX;t69js#N3Uuf}J> zf!bXA=zSu+4d&HHc%SurOH;+P z7C152YHBgtg|`7oqE5^wRN(O4#4vE4aW~DwDXycuM+cLyat3^deMP^QNUp86&IT!c zqPSnCruij03`x>z+@iw*M~X9AAdrT8^^tmUAUCpy7{${7-)V~{oa5u(OdQ*5I{Z4C zZgsOsbv(+VATa2-Acir?xLlro>%-))-41{H+wnTx!DK>(`#b^HcgXfSa zwL+QQ42W&Ef0P(Ss@jyQV>wvyqcc*mod^25Z`6ONsb6u<;Q!(1+{2mv|2Ix&r4y-; zl>O zlD3N^zcBa~EO*$H-UfK^UqTd<+Ixk!_j--FG3qaRS~-LD>lwUD1rEkOKPbXMlOqq0 zVIYCi{5nbH`2wPP8$bK1Kd9*C-iZ4flr@k z({zYH@I6b1{QiS#c{}+9g$9K_t}e|);>GOgOcKU3CgKJzv^qAxd-XoQq1UOgsg<8f z#X9Dj@5{h=3u@HBu|*a9gf+-W2pNom(M?4DMx!k-(oSEpa|1wn}PoRj7g@kk+i=F^4iVtdTe`7;osd%U9d z*1ct5rMj*^NJxlROQ8ptnLku2rL^>vp5nJXYJNkTgiHdoq_F~ZzE4OVxkVlL%x~Ts zrqI?Ha~Gp{192Qt3j=+)#3K_xTVPT6&@dPy(qm}M5PserXsi}Gth=0!(o*Gv^)Iedb##-kPhJx_Z zBOL;*0?UZVwM1`QXp~q0IM2N+W0W2EES(F5Js>&GCeKg)XjP`DWsQDY>u3U%&U}H2 zKAc?~Qp`j$U3c1iovuZ@i{$*Ik359BBbgBpx%l!pVS3n@rFSF?vQyQeJ2mWP*+ddX z(6IlcHVYu)0Hny&+Xhg8^IJ@Ty{IZvtmAWeios>tM#P`ARtGkl#ZABM9$x<=1%aKj zb1i&7xNROW_;%8g^#ivHzc)(oC^WxivlS2z^VNZE?#SIS7UAjcQ-MFZkDtVqV>bOp zjQ$e&?q6L~l)0vJZhq=iS0df~i z)x$oQ`ic1s13p2%slMLS#V;xY3e9 zHR-*!bcj);>ELyZvJ@dLJ(k26sTDIAc83=%smuWu)DF4PpzY<@63vGk49|i*7w+CmQ3^hcE1{DN=*i7i4N;vmTGCrf5*@H6Y z#7qM=E<6O5|CQzPsN_pNuY`lyFO`g???B9j-z zTq9X484w8I^H%iFlUv#J4%YkFTP@zPfT02462LZHlfV420&5#_iw)D(6(_#yWhGhrlD@(Q)hLT-NFBa>z`$E781#W0&EZzD(WVERDpFN#t zhARey>|d)fzb#b*0F^0;4z}O6OhLxymOep1N-goZ^nXhXWb%QcNO*R~{$jih}*h zv+rdZaOw*#p|E0R?TnPc{B9B-;1#atNNRj$xn7vezY!rEp0)jc*Ik?+CxjI^7CR{I zH;8=g$NmhDo6UxSL%%S}6_$lDdNN0u=G*wf1@_?KDF1C6?_0O4OcX)7y?L}tklL(| zTNPRbR%Rk3PM%gwuC*81p>kCjS}A_p+rk!Ylqrdz86IA>F+{3*ttGQfUr47Mv-NkAD551i+! z&+G?lDRrk-$ewEL&AR9{D&(xp`qlxO_8c5@S$V-$G}dkkSYv0iG~-#0j(PrWvV!1Q zooK%>^&12;B=Gn{$E-ZqbdUyLNnSElhr@COuxH?b2lN}a%~6*^mYqwS-bW=$eC7=>CnP*U0A94Na64t%ov4m`rF|ra2)7n# zGtAMQsW+@5<3WF`p>4WSQ@Gsf&yFBm>yOvR52%?10X! z1#5uGR!>%)V5K2$s%W-$-EENOw|3V19e#1rzA7#|{e^7LUy18m6()VSTjePf&q)?E zPbN0yyJ)1i#*`aZJUcR-@lQ%Si*8}{Z>bO%o&5YC0AS1!Z|_JV`tH?9C3Mk^U3s2| zSiwNh&)|U>4=?0|GpZ+VcimBV~^ej5m z07JJp^9^ZHjM29xUpe@XK**jtd*c3WIy9*Pk$tD=8%Za%;^dhOKTDtg`8c2S%Z-t} zqIKL0vj??IVp2XQWXJPjf&Ad{C-`{b!k~P@iU9~CU!55TowF=

$K3!rTs}%(TL#4|WFgmfV*Ft8;h501Qau@=P9naz)Hv@^-X4i$JC!vU5zH z`;h=*{c_IC2W)l;xb*%8G@16otYB`rz+)&cs~8(-y2uHkf{g+gz=3X(9j%i9V4RL+ z1zrttRU}l4$;H!wb}nP_22q@oDA=QpYu%9GMy^-8Uyhzmt{a{`J*ImjjFko>zPGN2 zKUeBgP5aFq`+9LT$6F{8al9!S`Gxo9uwyBH2;%Vo#xhq)QcY=ZfMpUP$H=xY3yjh6 zGQ4tZ*ZX`Y2Jdp@0np-9Z+alyGS;HbFzwzjlo>a-Z8xJ9F#vO9TkZFl1$r^GJ#dmheR0-$sE)?hx2- zSHFA29hnRx8SQ=Fu1)QPRoY!zcq~wbi!4s>-=9DwDPq^?MfPkFVu3C3t)z=&@=Nms z%}mVXkZ3-kR&o$v86SR-_7V&3W4+~hs&F=xF*KfbY;uIqTE}JfpR^s8tylRhAKBTr zW(6!Va3zgsuIPA1lJq2`&aKsgKz}7X+Hh~cCk@ZFVZ@HS^AEb8mwhY)mb61DNJ`B` z)k+a5iAHt2maaFJT2mcztUZ^zhWk;gzK;?-Yy(YWK~3DF#IWD^`uH^(EqFH|ZQ*Ve z9P9$D&VvV!c>Ueo~&_w74?ktAWq_ zihx)bV&u(kwcV-Ba!4Eso)w6H#M{SD3zGluPtF0e?AT;v9xfzonRu!P+_66}P2Q z<)x8~k=ECBH@q4OdQ0*d%#VGf*BEP*^Y4`)YnO-}G z@L3A*rqVStne{!`ds2HD<{xV!1bkxn(0i>|UIhK5M(exYy`m416liHL%+$W2lgxW* zUKr&PNf7DV_TbUfHd-CzJ%|Sd?B_SHWM>BtwM@(bc_dl?^Ugq=K*D`hJ*f>=ZrgS% z029cYvu&=z3PL3(MPwHiBqT)fz-I$QrbM5ah$6DKsO^I^+CHR} zQDb{7a(_Kmzo&`y9 z;^D4;?Xi{^(Wc)KrxrdW2q8ihIDf+A>;2=I)0gBKkHOVkOz!zIOAP`7t{`Q(w{&_k zO|nD|=`|-k92t)($<0#O7OE)^oAzXJSx9q*3(g|CbHQ)2uq6QWd_dTQLEJRk+V?_M z%$7el+eTeN*)l9yse=83MsP(irsF&h6;`;h`!Js;Y*Bj=qK;B?2jLDb~rye$4gKs=>Vc%)@SU{Xk-xM7ljlI?iGDpSEX~k0{O#;o#Q&sk=MtB?c;)a$n#yr4@3`CL z_{^rTT1AphEO>CIu!0{qXEIEoBOs#}viidQ`^h06)@u)-0DSY~2Kd*2k>4{rLmteF z1!6{J$g%uYxw^grBZW_I+sQ0ki0Tuj+{w>>CLftPyh6sNmDnC|dP)psvRrqoc8!u; zF!trC)YC^H3yyYMR0(^1*iTA)+*4COMeG=D7uymFUQ(xEBulc>TUiA^&ew|>MIcb# z@>=fAOv0HY@8!AZoz}(7UP|2Dc3DIHz01t{xS}(CFVgsHFs6#sxkgd%@+Zv59n6ZT zMJ#wOyqQl+%D&$tWZ(H#tcu1EON|WMYe)LIjf`3n*ho(?#d&$A?`A8~j#Er3|9Yhp z@l55}EAJj4mQl0G-=zG-n;9bHb7I|7hTb)D$J!^li=%b>h> z7rB_pee_x;2~EFyyzq5B6ObiKw)bpw(`FI92X0)b{Y|KLA3dyyenp;**|*vwk~mVSeG8Z9oag5Jc*v{ zhYAAEAOpSAN1ieHwcGVAp{!(DsafetRS!TCDe(BXUEmkmGt%(tW#KJU?i`lm{LiF4 z8kp#$dd#?9`F{Xo>qtRg56Y6oMAg}Pl?*WX)70S-6|gjcH|t$1th;9~bl#uDSi_+m z-A_eY`^V!39Y>qcbk?G9vk8+c zN5;+Q@hD!2e^-`9k4v$f@59&U=D%qtGliL^ACQ=t!m?OK+C*ewVEN#L-DXFu!g|tIa$B&8S(YTS%8dJDK)Rt0x-VSYErL zRscQve==Y}n^~D?jEM>X#kgo!MJio}$vn_f<}^IYvZM$CByddRI6`dkM)Q>OpEOvJp`sU$&e;X- z`lIZ&X;L?oC^Nxg2nTAR=V~H+mZ`gx=kXX!q{qlEXZZ4MAj7kP@vDoY(i_w3(q0ZX zR#5~*dN7|(ohC~4*v^g7y4fzb z7A;dNwmV~*?Q13R2*v-ym@P=9 zhDWFxM|f3HkQx$Q?#)1AjCXpbJFwh&jnc4ew5RLrL0JKHxjI8VBeN-<$+GWdAG)g& zV9c*ul|IvWbfNKPZ~sAC(-Xy-aX+s!>OD;E_?l{vc*RI-k{aW6GeZlZ+Z z^hCk(Fd{$5E$*qAyEWMRLRl#t!%wm3%gdO&d@Xe@0>akZh{rC0sQZf&;0k4{08QTc zHFQ$!XQXJ9sv>(ODYrx=zuz5s4Z!}598&0($yDkdM|}N@L?zVS*~2`r4jcDETT0PO zY;o=0WW)ffkx;9$@^c}Og33;xToE}A*ca=U0lsbt;Iux=r2ul^ZHtJkF*g7eKK&IT zG6lv_mq&*8W;-hM({W1)bK4OJJ}<@Za%LQ0Bm?Q7Ak^SCfA5{Ns#rk&8_+0H(e(V+ z@pIe*(;V=4bnACK&azdkuh1K9AQq%_I}q7()Dd433_o!JcG15-)< zRs2qt^R}`ZyF)OMe+2cty0!UiOOa-Gd_?q> z=r=MsVUACHzW0%lK!b%#slp=y%U__t;W@N1l_xXbnx#%uDlHc&I zV-Yn*R~Eyje>Tii(3?e^FrVyC!fua0RED)5hz=q|39sJ*X`6~k7N_#O}{Xsn!dyAjX48Y&pqpNh(b*6Bu1C*RjKQET-)1IK!KwU zheMetTy*K7agPRw&H8Mi@2Ndb2LyfLBRN{fK#V`$&V#j(9@>J4hZC`*mc-ZMFjRb8 z`S3v(5KS!h%fm!1P)p(|7q`>zBi1SPlH3&d5-Q!%?M<$T;+xmIC<_eLj<)qJT*$bH z*S|};=5DT_gGi#^^C)2aR0@woQIp!w^R~$tcdJ8~i!;(BFrxK(Z{Gya76B}r(Z>{c z*5e{K@Pms?nut%d#I-RPKeZ@svq^Zl%xUs;8rdv0hzFCgB6ew~$GvWOj-3YR_mg>ofj09r6(3M#1K z&-zKms_t9tE(jRIz4YKNmsP=OHI9$V7nIhMk`RR2SI!)l-pbDkz+9?=?;3qpF9g!k zk4yk7+*>^SzAi*jl%uvRtUX41sLp3{JnQzW*m%dLPSy-Rem5mpy|eB{ftv64jp9!> z1Rty$DoBoa{6t)z6$ng%PbkwtZhPl&hd0g-{1(zGeOFF3)D$q-T3V%PTiSr@B7HxW zldr~0&=*)W?;lfw6mltmZ*CX$qjc+{X5al+n=ua{t-qX)O=9pifn8%s-f zVotB&-e?_eeM=s=D8EuM-h{oqi$zYu-n6#TQcJ1l*OKL7&L!Si>Y@c^?z0POmn`$0 zxQ{;Vhf(2^e)1f=4D^+n++5w z6f_5eW9UHXL!!Ehx=|F|U7a#n9j>|9a$FBRzl3Buw%>eq@z^2frlnM>E?2(bpVo?Q zWL(ZoYa~~Jlek^&O9F<}lmmplb@VWHnM1P*{0LkJmA5?t@G-XLBJ5=MiWh=d_V*5X zfDE`b7bw_WPntG=@0k3m(Cld6QE^#8fo0!$n%|PKPAi`KA)kvK`uAMfv!<8Cp`4@LR-E~EUi^LO}7DT10H&G`D_1wv+J=o>Sc+w z*VVrQ8BDxd40NLfq^E$j&$a294-~WpNY^#b#u0zd3=ZJr8U>dJJnYxQEH|0Ha-ni$Er07~+uFtGrM zBHmFOnEjxYb#58aFKA7E1`bnIb%gShbLa&o9==5KX7r?Bt@j6wa8OCVpJVIN*8 zFFrYb8EUmoO9aT%iyu#?8c$idD|e|marvYVsql5JNs^&v*Tb6oUxL3KF2^wANK*w< zjaR#^$md!kwDdWbatW<{&mbp$-ZxL_)c4hF78_Api{tS#9Fz2w?}zp|){3Ypyr4hV z{k=K(Z$ZaCXgN>X2mXA&ycYXrba831VL(_Jgfe^`>-XbZ$wiqcLkw5*lY$-Cd;1ly zDi}>g%|FR4F8#K;b0Zf30JTdqS}cGzi}$$+*hb#+9PU#{aUDCZqGyeM@9N}AL9h!q z^ie~e+Df7Umg(wJSEoTr%FPMcRzoJAZ^cL$sX>QV6P2zFp3TU*$E!`sfwi<6g>;?C z*TD~r$6C~ws)r!>-u3o`Je64KIL7#_kanp>YBz?qh>KbyfGzNlA*e;dN3I^F$75TM z()u+pdqz`v`|AM=?4C+WZ3Uk(6Vl?cy-jIX=)R*-Vr!AYm8eEG6l`swsCCpcOBUdVeL>T`|RHD(oyKvxYv%P@e=W`762i$?Cq7*33s|C4XquM7! z_=&}TeAn}JD`t8P&;5B?aj`uTNezLJRd{w2%>!PKpH^ zRrTAFlXkaQz#jk&$2dW1pV`+}CPRXOhx&2pwH`TU+Nf}dEPv&mX7FT9-$h`{MOen8>L2wbHvjOd z=rCiD&hJXOw55Eywiv=EN7M|M7zHaIr-x*^L?Z;m#=9pC+576X1S}VLO=y0P?{On% zw^-N&z<2jXUq9?8Si``LwPGW@YJqTD=()va){P8-h&@uC*O|3gZ{X4-@8Ej`SLS)7 z>08j@=o5jV2TmfO{?)I%(Kez8&HCBMV~vD!8$C7#mruPrH*hK19X|Wc%+2Y_pMB?8=`lSMuz(|K%JpG43v13oZ(F{wpy{ zA%OrasKr=40CmMW%P)q^X)Y#u|A^eLYnZAs)mf<6 zmt#>@MRA+LR~$1Tt=!%^btPCU>ghZ5{dSC?5mr@V>>HS-IO=BlZEP_Rl{Z}+s|W@) z+0+NA77u`q^xPWbc2M;$eSi0&921Y$`Q{VN^joL5KAtQp5}uq37aZ#`$}CxMTzu!o zbbLbp&oSdBmdFz@_;n+t4+~4h|9Ek|<7(=rmv`$;)VwwIivXHSEDDa9{0AzXWHhSK4V(IBUuVMRr<$|z`+sBkf z#4RJc#98qBET+#$En|x1oqN-JK2(mg$UTldtHBBONTLu)VRx+%uc550C6r_7N?%ebe z%2nvXF-Rjmh)|YON%;8=4lqZL0b$x z)PLx4!!t5R-rXJpZi!O;vK)|kF05)8C*M}T0`eL8tP>wzFnFo&R5RRq4n+=e{)XY_ z=j#hvEB&%#w)A`A4=VAl<#ibyV;DhzC70vs(RbxvWSa+j>LoX-5lTJ=E_z6Q z+XHMXvZlena8s0lod3$Bg|xc1?n|3(?{S3PDLlzf;ThS=LV*&WYj;$lj{#v4PF)Rg z^psye(0Em&VzX8#aH!ozD%n0p5&6AKUxoUtw`+QpN|olHDxxB>?5Q%Cc`bnzQC+WQ z+o$CLs4D=Bea_Q0zbOhCS7FubI)4h$%a27oH703pM;D8S@`@7ohyxpexUalM;oaio zHd<)Wk(fRj&+q97-@~i`h}a27B%{xDcX@pRy@(I5kf_nTIdRLW2g^uz|2?-= zaoG-~Mt6O(>4Y@eE1J}D`E@LR3H7PQz5h8cVIm*IU-^WNwIS7&{@RywBmZqjIcBJU z69#=EZmLHXG(+to_Es}o6NUP|j&iEB<|b5mq2 zwFY!_w&uw-(d913gN6%0~F!SfEBuOeFvb z|32F)Ibf%FYO1-XkDRZ)#zfteO~a!}BfV(Fvx4e&-&&5(E9dGQS%FR(nU?{(AM=g+ zUHN#j!GQKN^IMils!}>=xz9QuCOsP6R+{EJPAK`D8X$b*Ut<2k49~T3()jMW_?1rs z+O7Uf{j6cI64zRwyY%hz&NSQTpcSI>@uv6DLI0@>GR{xuv5APZmiyQG8O z-=(-)0{s4T_FDO?pq37|mF_}Z&8uANSut3d;<@;0L(&PTEPSR0FWd#zZqvrB(JK?S zpkH?XmH2%1tZ3`FyACUBfxY@yqHA1g>sI-e*~UdCvHxkVc$UcY{3{W&rSVq+vppE- ze0lecQj!i6^#Z+TfLj=E1N?!}_^<)&A0=6B3!{bbT;_zuW32K$t*dF}j)LaF;;ufC zSrC`H==Gv4&d+A0W`QpIE#X|RtUy+X-Q-!(t-Igh<#GFerJTk!L^h9piiSCi`X z81CuY>pw7Al^Jaa(Kx3>p)d1+o|wj(vmW(5E8i!6Sl!7m@;Lnb4#cwEw6Wwv73OH- z(n$2oQ$Td8x2W~}aE~AuhOZGw(}RmwXT961wzTyNd>9eoQ-QeQ_9F+kTW8BOdTZ~~=ndh);(QC8hJz{&eT5AU~w9b#XI8{QQw%AOj zO6x41`QEamlLjNOJ}g=lZpxK!Ss6Tp-wqoeq!kX2!*G}XO7t9mUVu33TCVrlH>EYr zDpR;~bR)9JjeCxv!w-M}ajs0JbkG7vGv-`$@dWIr(#fEOlPK@M66^EtDr4m=Fc{DQ z!O7&U>Ko7$&0-+3^@5UKpohF z^3l&qt2`1TU0hQmY|ix9B?}FS=kEp7ETz%w8mqWcWwo7|zs?2Q>7zUMHUer4#eRvj zB=^BVxqf1SnatH(lbNjAJW%1#xK|y1?vFSLDZV=03BiBIH>di z`%CDajxa45#Ay)e_eR8bmk(3jng`9lo$GZ>Ec&;b=hdQRN~W*0s3j(= zuXr1z6BFCvOXe8I%sM?tb}5=*xNe@KcplM2R4xU!d=m~>b0Hh4i5J7uO`91%4HTAV zDvnZCYc;-kK0<>TPDdJDWMVp-=lu5u8 z>RISPVfesg(L@C8C>taE;#S$B4cGX8drJ?YV;MG4~h>j-70;&z`M95C}FZ(JTx9O zl;(PX&$p%cg+0wul*tm%Mj<2bS+Jk+K&zF1zIN_s4@KM#!>|5s9xb5I7bx;QhclIm zrDnf{I1Mn=gK>u{7IBH`J>G}HMb?%CLV85bUiISOX`)I3&wO`$6lb*86yFWI%&TmVjpe!nxni&`rN7H7|}#6ubqAh9A3+r#OWgrCYvm>saKDR*l@go(!ZBvNVrQ17{R1&smD zG}S_GBLEG1kiEgqKfOhjl<8D{Eg0uMlCpD%2qK|iOjaZMt|s79kvc4!@V1 z0QBoi^`#6vztM(iFwDL*^_Yrgddn< z4z#A4J$Yv2Vr*Oz9C`v3bIKS*3mot&t5aUZ5c0$p0nF2fI^%&_`s}=wvOlQWW0&d( z0_)78n3gM3D+digI&efCcl0c*3>Q5OcavOsHd&eW&b+N(_eGRp|EIlh(s{;cF{hn- zJ}|j1)HZ6h>de32VADRcfxoq7hJSh0H{kDY>j8t0a(Eux)nD~z1D~zs9Nd2XeeiFQ zX_Js^GyCxO7Qy$;0sl)i(+g#o@OHqvNv+s#FP|y|23F?3KCC43k_*qdkUahJ&b1exs4z5L$ zQ?m`eu1+P7xaal+@7ZkU-StZSuj2+u-~P~nJ2E8Q21wArlMk+Iy^Vg$bu}7SIUWyY zDKAj<74D|SvkxDZI%1C8L&jCJXx`d||3yr7SlQ?n7WCFk(4Ku(+hnxoLbVgVRK6gS zNH!u=+cYpNt&ziKqFkZ^sja9vE!+m$z%_im8f2g?92+;coqrqsNydpF!nWW4T2`=n z{K0!4F`#FROVmt9X`=Sxxftv-tq^W z(->7#Jp&is8Z2hI4Abbu#9viY_+I-Mi|?H*mdB!B@ybv>UYf?bmcpz3O?| z`T|_cMkMc{(qDXxC8;i_$% z@8`LLX`&_q(Z7zANGaWQq4?w*8G5S7fH`aPW5$77Sy$S zFF3$RSAUt~Ce)~IIk4HeI`XgC466te59@%knb0>FwD<4X zSaw~kYN8Y5y~#fthvBAF2DY($0N4Lj=C%44GGY#0=#>LW(_Q&9D(0qS&T8Gf&LfCC zLT$Xf)b$0sdN(@nhL*%a@$vQc^sDmSMR2`wbt;1H`tZY`%SnCE@nU8oD#iw&DoJU@ z8}1u)_$=G2?}bopkJd32i>m%-+GDTFE-sMt`TXu9;86-r8rB8;K?01kLP(6xbr4lv0Sxm*V#GrQ%?Lh4fceKW zW5f3MI-(_8_uYI_?wqo|NVlRDk@}d-$0#gGmR^Zof{1>QMI2-3*zZhq&x<2TgyK5& z-aUm#?|xt$PUtEkg3fITkjgFCv!InkbGLVh5;uLpqlaJJ za5=#$idw~3(gUE!KE5?~R>Z;=&DLo}i88iT*H}qpDDIi76a?AGcB?Gc;G{U&*Eq#Z zHtbX0rFOh zyK+@?vg-h4lW_B2?Q(qey%Eg?_!XDA!u2STQCq%<)rW~=VDHugk z_Yc2fSmohoyt#k>zD$$a!YJ$AE}o9;7@S2`I{5tiRFUos!sWf){u@zTEHn*9ve1@$ z`*oZeI))p6Np;#Cp*K!7^gXNGEz;(F+V)1ZW(QED>jEvR^;l4(`0*nNK;l^?${vY< z)63+~WMqQ8IkiJA1_PCnF6P1lOeA=CuDUd;ZEkXh@ecpT$|d)4&k+6NWqfGWOd9vY zugI*HXA|TS2KOp@3*$D>AF{QNs9io`UTtR1zVeZg#}C4m48Q~H7Tj!V%}2FfeY0wq z2E7CIWs-)BelP?)y6f+Yya4Zw^DA!?%)?*(#|TvS%Ifdy>Yy6nSIsK^=zaIcmkqGr zRQ-ZbJ=p8c4eXHoA4#a_R|h$m}wy)E(w3#V@(C?^?^ z8ZvIv!hET$q3p$QuaDKc;1{mn6HOiBmFojcH_gqLnhJCtnvn_PqGHi-=dyd-Ms1J5 z%5CXxS0+dg^S%2<1hpc0DDe-zI{#buhBM?s;fZ!4_Vf&?;s)va($zCX46uh{f{^lX zxMu5HUAcLp)tPrkO+-929Tro1u?a%Zh#p@hb2l)%#W^(l zB5E%W(pVG7t%)H&05-dpe|Cs3#LbSq5Ple4L`QsL_ek$jGl0hCo4If(A6(z21$)iX zBEQSNzNBCJ2hquL7mm!vz;Zoh7i+#9Uq{!I?qe{tSvgZ1$Omv!aiqb2eK}t8AWM8b z*t9Suwfa4{^5eTJOZwhGIaE;PiGUj(J5Lln{T_1ruLMpfF^NKXZ{GPMAbLin(x+(F z4t_0}mhc^3lc*h26#P}*{`7P;4+0<783`~hd(?7bJ~gz~U2eJ&RuS>t&81{d)3x=Z zzWJuBb{In#Z2BL63>pACP5B#orfH>v{NJ4HtK~J2Oh-5$&zwIrB`; z`|p1xK581g%%)7r&9NWcjQ;`dFS6D|DUX zesiN#znU&^xMLkgy6so!fpat8tgvVxuZ|?HKK}Pk#EI&Cq~?N0;PGRa7*U+Krhm{xfbD&n3GZI7-k-il|QHBy;p)c0FIV&SXpm z>Dj=di~bN1c2}cw`Aj`fawd7?iBAGp-H9e`n zGs%q@adrAY>rm`)coAc^tVkPWEWf8-ds)nJ51CS%?-mY^2-b>kw1zI5Tx&B`#n&aZ zQvyp&ERF=2m*Kn7e><=>SHKK z`LKt7Ch~>e&OsSSB&7EAY^X#FDdwx?ysw>a6N9nU@X8!mvxjoSCtPP#{(=0^|x*oj2PjWU@KsSs^S!Yunz z@I(T+f-C0i8a)EY_ogGEttX}ius{WwcaQI6ex0+2$FRtnx&wB;TSq|{{{HgV0O>=Gw38A3H?HAM<7FlQhdU;$Jm?@swC;v&#u zJ)eZ@k^aOtyF~QZC$H$%*mRqPpzVb;Tc{?--2bP=Gy`L0qv~bKUhm|{62K4 zegXc}ZwBx~KxX;AGGi3PIPl{NAof{DWGou9MtkENyLThL)|=^@|0p#?0cbgg=GUmF zniX`id`9p)exwmv_x8SO)&XiOxjSYOlsbaeQ3q?C{gd~5tiv9l)T9Kwjv68dchAcN zNeL7St_^BaB7BHgb_7|jAgY-MUdK7>GHDlWpg11Zvrxn8tW4bwBe+Mg7EO7CP^lBl#$qb&V49?jJ;;j+oB9#SKX^E_2#dz?#jw!p3dS`3)%lVP_rjWwoXSkxMei= z-#1Habufo}g{C$NVsJvgk(7WFtFY9fv4IKFmd1U3m1E8xBhkYK+{VgSQyYC)VcA;D z)A#4H`c>UB0p-D-E$0W1IUo}X33fqS{vSo>;g{t4|KZNJI@LLCwXz)L-sUXH(Qp9@ zIl!&u7E^QJ9_O^og%by^QV{{s<3JKu<;Il)QYv*)(ZH~rxzF$U{RhB{=YH<{bHCr$ zRr}we$jZ+IhoxKI-qAt1IVj-JN+9)Ee9T z84N%+j8)6)vwaIH@6)7DZ6a8)!|XUv3?jVTN#j+1fOTn5tNbi_@ZY+Eg7{yUP<&F) zA7)BVCLnjF7%E+nbQHPNXECB&?b{2fTSEwK`PraZG}CxYpYU-n8p1dx4jl^Wio|u8 zuHBP8{PXxE_+nV~)IJ@Spi6TX(}uu2Kt`4)zh&C2u+b(YDV6;KiLw)}aFU|nIWLG{ zJ%m_~UDR3CH&smEl905k8kAXt|v!k0PUp>1tK z&XEmvM~E0)t%%onCk+&HbtVUu8Px`!>_~9@maPE_c~`|nVki7xb3e$OSF6rY(KcP5`Xc9X@E!rCc6SpDS7@X>*) z#8Ek8`NaNu53h6G8R<&q6SdgpPihNRJ`cR&=|dLHn^JRp)=t8ZP9|rG>FG-gC!>GqZGngpc$xkA=)cS_PtQyRtfbfy)zQF3)9;$9`fq1HW4K>fR2Z6Q-n!8E zPyc)In_<0drDrzow<41EH2kJY0gAdPZXI{#Yj&Z*fbF|$X|#Y9D2hV_iW$8{o10Nx z25WXhPg}lyI1d&sBRR0}^$>z~jJ*@erbs2#_B$$QZo0<7p#HVC$Lu55k`l*;t10%u z6pTO+!=l(h++Bte{|v=!+x9kUIj8T6bY;_ubxZ6vGNIe zIO#KgI3^=BRX&n~HUg>xzr6}qCX`-MZCwTi>`4I=5laXsyK;jf^|XOCt;W~3j8vBl zlgP}YiToHH_vET>UF`!ma`Q6M*_o%f%K2VD{BLL|FDLrM1Kg81%JK>;u+)lZH0Plb zkMYKv%l>wl$(t5T#01WNEU!8_KJXIDf$|PA=d$y)$$P7oqz&gOBmX!Ey}qRHo=2PWm<4 z>0l=pCp~$}4OWV1CZ-(BmS5BQshtQ+)pjm{C5`n`~DN-oB z(F_<$gEX^sei|ZQ$X^rt0;y_0tud_KWK~(*+}n5-053 zhB+a$>5ae-v*iyT9!t@72||y<9#ONBfQUnixTX}Ft5EdBd?K?;*pfGxs6#*~{8$+O z@6ges3hLgS#g6>qsyU3iTrgyaF*Tx#IHge#w)`tlm*IW&OZDiN{iTYGS2{9j8Dy6B z4rY#0dVyH;Y0gDBZBHTD7np;JQ`FTztJ)1PLsuFh)8wG3cdxt$GN}=pzhVNzSi{-t zH!ll;>wWRy=i2XjCaV`w7EU-IK$v-Gl83M_04QO)=~0jJY>a9qTlVP_IKewH%v`V# zu4g_G@a}FVplm^=e_%DjwALwW+6hCO-}YZTX@vlKELgGpgkM9R_jFrZ$SVKTtLnmD zdx-urH-yg|{^qZPdn00-*(xOr{yo=_G3^#4nA}RvQf^F=q?*$Qvn%$!0!7*w(`c)L zdlwLq?Ei<}y}$N;&@yNqAm5+gv=oys$cT&hUE=Q`yu(aH(dA3S(MM`isqp~CS#(6i z&6q;rPb_5UmfnuA+yo-NC(fvM*|No}6EFAye9_n7x0MGTTtv10)4k)+)f=(`A@id7 zg&n*^C=Q53SNX$O88D#nlvZW0M|Ep&DZQ+1@#4dyPSVoqu zP~iyO5*bTU>a7+axc9JXUhp`;ae3lU=7~S#rPZm`dmB!y!+Ag4~UIeDn09e z{u=)sVh7!*frcZ2QRZ)&?_An$2Wz2r;+m^(nM+3`IS?$*;=EW$mk#GofRLka!#Y79 zFa^&I8r}>BFlrfc;bthC<<*I;p~}Z2fpU?*g7hZriMkg|zY3LbyUkX`X6cH1ECj`e zFn)+Wr*7xc)=AOv?$>;A+!~+N-@?}eY7hkwq)-1Idc<>V;ePMaYZwXRZCjHe-ukyi zq7Smn-naV0>D^ ze2!WDsk9-RjC=XQ zYu>*99Xk0$G`+dBqD6RQ2^3Z4v)Db5Z~s1|uT@ztu+6qEcM2$ZkG766%2ltNw#E;j z0-J$<?? z_--9=G4IDZs`iUi8`l;O%a7eoc;GM>-BDX`eJH<{)t8wO7!;0X0fG8bYj{1WOZ7>g zr<3ZqyB}3O9D#_lX);`5CA#QqO2qi3Iue5imctPG7@1}P zPVG>13n^JZY-zaGa<2Y;H7>JCz|^VtHpF@;wEnv-<#mx46(yy5f3Ii+v}M27l(ov2 z{Wn}rX=8FCG6#+!Z()fRtJ_vzNZmSDv=4Lo`g9todE<)fzfk45h_R0)2G$^{XoDN- zPcIgQ?G-L=zs<4?QWEYXTWAGf{%Y_ZIg@FjsfbHn%Xw>~`}!EL69j&*SUz!}Nm;_O zs%0ku#Gi1oS+lgfBkF)5*q4JHhCR7g(q|&;!4uVXKfM`Ca-q?}6^>p6wsoz(>iVp? z{hSShrs_<8jxIwBVV~nQs{}T=GRN{nc_PcxZMI9F6MfvJGlZnvw0bEot;7fS(5Es5 zoC{RAi?P8_xIuKV`DZAbw8S3SRpx9T9b{43;E@=yqEgvy9Rwox@mGfY^Ir9eA5i8- zU>$&sd#yL75w@AXP-SpAKNYy)mvM~4VCUmbX12?f%^YhQN{2y8 z$ggYAD+*sd%y9!>n5qwBwT^bwbOahV&V2k>aabizPR?g z1!Bt6I_aoao$-baDDvkbiBTP_t^W$3rJ2>jY3?EJoe%=PyCBuq$~%12XHesxptE`G zs+P^Ir4M`X148k3he|A4B_@6`bq;jPKk_-3ytjK(ajGCeqVR!aZ2x&TXXU6Mrh604 zqtv$p5)KPo(~=dn0eW`~BLGT82}4t0KI10tJ=c2jZSl~kpd4TeM;!z)8ws{_UVEoa zrYB8gdYM!CzRTf^J7Qf^H`{Zr$2)JiZaNtF3$&3r>_M#g-Lens!=)~XHpXt$@wtOE z#9rIYaBs|!uvu-#Wm=+RgXcq~LQ?z|+(IWpEC(5$g)tAsW0m zqtzQ@i+4_Yw5sa*sSd^YN22A?ca5AI>DxBQ{sDd~+X`=|XI^QzXjIeS31R%d*+db) zsI{wFG)bA+Q_LuOJ{0nU_U9@>wpKku(SCb9@Ko~_p(@+$ z^8U-}g_JwbQdDkeZr9-Q(C@ww$m9Z(px>SjR}3(E`?Qq4zRZnoSw7(2jUBNcs%T}6 z*v^%Nr(FBvw|%f9S2&*-`L{wUgTUhsYR zlOzwDfGfeXWJi#-xaw zzU7i^>-iH_IK0NwuX7~tF6Bbnc`{|Cz~1Xsig33%y01vWY3k0xRYIO8vC# zo?AUAa=q-j z0pof6`E92Ya3(FlB?Tbu_0H1U+S@Rx!Rbt`2hixh-(^r&2?)NWGY)z%-Y)~o<9^{;PtE}#fue>KOi`w)g4@#@sT@BFo z{O$aDYXx~;V2xq<AG&Z^rsKb&tIE&a>TVrk}joKz79caxj0AaQ+;^gY5Kb;@M~J6!-V#SV_?3($B^I zV@^g=5-Y)j+A`P9!-1Pitl=}{ zoqq>ud{8YT*4yA%iZDYubli2M>h++^q+GdNe6xdAmG`iGbfvrC+uI3~>z7?@ej(}F zZhC7)3V-w%=d?vcFS_@!Rp*Ou9&vz2(cHwJtv@q}NtuMOc3`QCvR1k2KO)k0(Pe~( z5v3kYJ-Kbxc1wc6YUQL8jMTf}71K|Fz}3+SDLWSg*w{26uyFiyyXA)HfzU!X=vcPa z>`k^)bOjZhjkm|deG0B`bJ*~dKaiJyvjb};9{7do?N4?8nWqgPykhb~FLkbNSgUa;XDRR8n zt*`{g-(%HhqV$|A2{?#%d5@iEH>S(vvhk4(hyNuBkNQw!BKaZn6SI#OMvJvhLzCqS zi_RDMJ*&Fo##ZHXG^z158yfJF65ZH0{|==<|5uT6nacAy*h4dS7MzT!-xV}YhVqg| zs#1lo4uZmm7n)##%~G|-lsPfaG?9mmpMoNgSw+dJa!OFELV1MBaNQ4TU@_1ta$Vej zJjy<;LP?clk*`QgbNA?~g!-B?r+PotY{T~Pm+~3r#in1_9qh$Ko2h}h5A)@1VFLF#x5u8 z3c}v}5Y2?}TUV8cI>;v&&MjAVSP;*+;-Q z+Q5SsHOuf%(weKH2v&TZVe&Ne8+- z_+Y(a!*@r;!QuX%znwbg+v4J=!5g5QpSTh4{vKDd&J|hdV})Lqh<@iJ$uuX1-v+#s z)RETxXY$n#%W<_1k#s-jaPNNJkY@`A9r3o{dNSpq7dDn23dMJM>$;7(sOWNgr}8FBlCa__zOg8H9w(oR;^BS5I>#iwL zC(FzB{yGFQBGcFcPQ-zJB@FaKn7b(cVD@KYjZm(RS(g~USVk%>RUzxh^q)$#jVwj_ zjFuSE&gjb@@$c7xqPTTs@zn$le{7;@UG%!y(9d@^2+?%%Hkw)PRG|3ZA@`jAo0y~l zBkc;Xm|%sqGt7sH0b{SnKHt2L$@$1UdN*T@hD+BskUBbC;rbb7Sz#23fK(30Ej+wv z-};O5#1n~vJ?K@}K6zi;IDoTdziZICzJqg$XXIb4z#RW1YMbkwNwXd+LQz-COe9;3 z;N8taHX}~@uXE0K^9Z4;)^>u#BQIK8Qlj`Bd2Y+*W zH--9~ymd(n?RUJH{{M+6XRd+m57XS%mUIdfEpZ!FF&^FV1;2gl1^Kw9xK$dLlikyK zJdOm1R9#7QIU1XSLZXFS3X z1Hz*)%(}9q3^9;m+2Z62M$w{=$R+;IE4HerU;exy-CL2X=eLSQ6nQ;`j## zN`kQW(C(wC%Dqf+wWOjF^BPlgMHkaS^8OS7Ql1;Oh=5`OQZ7n3(%puCiG&jdS zukU3KYb%AQ7*P*O!%=*uod}GOx2KnTZo6pEM#NiPzzNapsV9Xy#MC2h81ojdsGfsf zgi27ovm5_Alt>BRSae;(&sH}_6-zvPp>=&H+w?|EA~~=UZ#C*Of+Mk2!zxl=?lAH&Cha+bfTDVmA0+NP1UZH7?3Pt?@`4yJGJuz--f9d(t6%!?{#3vM>YIV| z-%1tMNyru-m8zAm$BFX(<(G@)NsvbnjS~^GIb{ly9+3pZ_o{!mlVn=UH1$qV)xx=+ zxZ2KAZBYc$E~pKSM-|(2VRAQ(6Yge+{jY)RLkxsi@isivQI<3ZzN&lqu^X<1rr0(n zpL4IIIse~@gHoM{6mA=h4Ujf)|5CV3*7A2k4XsmY!Vf3!3gFgtpSkKs=JT4nbgDDS z!Rdc_XUNcEf)%YR^(3T=w*!fR4%3zHuSO*rP6X1~Gl2kJ1a}!KFfiF7g{+A$^LNZZ^e7(49`(*XoP!CX<9C} zw;afDTREpotMy!bk*Irs7uP;wO^&2eg-PuiBB=b1rmbC|UF|dM%ACzHiw(rTRm}VGf1ri(Nn3ia+hb~enFsFCr%Eq5X(x$M zMFd=Y*~1eGt~X>aJ6A)iEwlqe^;TBKvn!*n?i5f_2FJT~U8SSYQK92@)*oVQMOt{v z%bQc@n5Yovc2!$*_>=m;L+^>sK zuGXirwTsYd!2_)NFeZTM-0-;qo}Ot&dh1C{{G?j>1KXgYU|g}Gl8?_LL0FRo3(jfb%eWzYPJxz-SnS17fU5d&bvxC4R75jP<3q;_d z)`+~L;A#1M?UbS#VK-vi0@4{YFhBghNTRHC)cj}j9El-ce~hCL>EZi!@Q4*yx7{RMR~?xQ-($rjN-0B0$>IP zlO37vomHJmJ!xBAR)up)GcCf9NX%NXpE64<>z$5D%sKE=`^eXL5^hIr1mf7+l*kBJY{kv2?M(S2sN z!+vkN!29Q*&MIDR!DG#nT25Hzz(HlXz*xYr9vfTNc_o*aoSl&a3IoV1agqFOD@MtV z2i)1|UtWB^y|qTp@lVpQf{w(xPLXodt&vU@p2Ti(VW*wq$Q4>v?ebHR#Dn} z4eWGl+I40Nnc%?ZAeQ;9?)oS7vQqf^6Z%7-$pIR`h(x> zu-55Cu_c6l5qrozkuyOK7<#zD&(a+e4Jot`0(N zPQMZCE$(RE-RsU(v$)_^QANT(zC|6}*5>a8lwR)7R7^ilK7V1ui-=m{Gv_h_`t}}F z^z~~qq6}1`A_ei=3(d(3>m`sJtCJ<^(T)FV4P^hLh%6{)0&iT%>VY3n>sbxQi}sQo zfn|nD!EL}|{U%E_)xVzay+Y|J)2S zC{b;Uaj`q43dgju=~dN8`(e|Dy3h9|@Pa(|Qn;o7Z$D zjhC9*Vl>8|i1onP}F%B%P@qTU}GGL8Q_QJpavpCf2B7Om7)GwhzJ1FVs- zjuQnJx7ySsY!oCKppF$Jvb$=}x6*)dJ8>J{Nms)`&-}v?Ply{a>SA{8_)>UtZIjlk zWKkt~fphb8U2B*Rc$9wj;Zf^%!P9jigSDer9tdyl>!F$NScyd+4bl*7ZS7C}yK3#w zL^HSrLlnIzSUH+omePnCtS7UpSq}Rdb3|-Gx5=5e_re#@$MY)xpgNE`DYh|$+S&t^ zzS6K(q!mG~ITiWcn0q4=9Pw?=dsan-Y{#8%{&s~I*85)ai|sVGg5>L>;W2B4Y4N*M ziMcrzmPm>5$aSvmkdkU5xBybR7zee%9&rPG%c_hPz0ZB;H|C2!V9KKbD1V8F@%^xzJ*LC zy2Pz!-#HVKgQ_7o$X9q~4XGd$TP2~0yDz2c&*!u(14$1n0>a!X?B`MAx#_K#fVHLX zHeG-I_JCF#a!@L-uI$x+NLjwE8TShnJ~j;%glDI#W{{%dsP?FcKVE3a@Lo+Hs&3o`P@=PMU`m?R(g)?&YkQ zV#&7b={EPSGX=+N!nOT{+sSSWm%XRG4(Qd2Wa*ie*6km~u2{ytfo4ImGE_}p`48a!ZqdE$fTxn3~fZn1iX&7ycC5~dzuTeW2=HY6heD|jrk=aV}3Gey+ySYI07!toMDI54=O ze%Q+9v|b}<uCCd_s@q*}juNwNF zP0SB^)NpJ`!I=2Q*`#5)QVVnc-is$}Y6F9(-#7%~q z+fYyEz7PB~mU8q@QDA&bQtfXW;!iqIK|udC$xkwyF{bVa)LiAXMhse88)%u#jetdY z(!?}&HQiivg*WBAd`(i+DaY>4k?R}RM-bQobO&|Sxx0YDTb*!PeEAO3i+7`FY7(IK zF^fBe@3zI%^mQhWIw7YtMG!nf84gZR+r5_ix}fq3r+q7RwXUEVK4x2H`X=V4Te#s}FPny|^_$Bq zBRq#lP#o2oJ5U=ROlP}=qP`%|iwD+7Zo9(c&)$tz6&a@ssdS)%Lht%$-!F)|c-ql3 z)fNHIaYofod581LNk*#!%895W_6RbSOeu57054TpFO2dY%#REuU*~O*O?4d}($<1O z2Jn_<5NAny(SU`-f-^MlA9?nz?L3C4V?Vc4M6td&fAg3^Vu$^+I+kr+D~P=oad6|f z@OxB|m(Tmb$9K}R8%!rZm;Lxf0~`H!=l~IWAZ9JLxevt~ zJH}*cB&ZEv!n>HNI5#)1wYLzk_QP-D(^IZ7Y$k~+x=F#W^(^>0Iz?J1M*GV|ZzzLr z_w3SQQ1r2(T)oOKeSS}D*4~?4&#CDk4FTDt1fzr!_C$4daglgsG2#4l3%NJ2xSon~ zYp0JU{M{S2H@nv{JjHII8?diIkLEU9UWT?gm=hl|eESj{8$2UE!&U{sCmX{4JXe^}A%q@bjXdn+kHRUW@Abw@tI(s>=~b%jC*{)8cWM+nPxM3XFe^vG$Y>tS>e%G_l!LqwGws&FT=vZKDlTE)h?jl zbyM(VZsk4;EF8I@o4TS>lMS3l=hyjSlZ4HJ`q@^V-p+@;IuQ+<)Nxil**()FAZB9; zHl~Wblg;?bkC?cyn*b6lw%)71S&ua=p~pXXkmHz2-KC*&JxFMihWI(f+n+37fE+HFn|67%RlM^nJQ=%d>OD5b6|~?9s}XN1N6+@_!ByU$g?&~p*xYprymkStKrHKQ zUg%0*lY-}+9jw)oLBahN`^qC@Jd)LMkQIfm2z0ghplF+8c#}*CzI_ z&d+oafSK5^U23e&sJGW8CCJL3r7m#+w-NcF_sCjFB}=GO?4hsqtLeqUHx5-1Os zsG{d&BL5dAyt>mBOq?%EWVy0q88vG#kKr=AZY(%I2orK2yy3#-)~pbh5Vqfz^hU$) zf_2v(npWlyn?B8_Dux+NQ6qLXHI2i|3-n4y<)57J;0a>`~c$gU*3-$q zTc}4-E`x#Nldi0&Sy*6!iGq#AMJw63V2`0`3hCfJUgmANs@VEY^6TJwOfe{Dr!K}s zzAw7s&=^AAJAXcjAQ*SeFgmfOVJ8=JSV1YhR%mF1SmmgP5h>8R*jQdDusZCuT2^Qs z(f}kmE1QBfX1}sM$3k)C#iP7bE20V=DOZ1n;99!lK(SNQo*TO!BzDnOHRh-XA-L7B zIi%WK-wvbL5Zr>zlVJMrkOpDiS-H&snGVQJfp;2>Yx^ef*Py87@zsD*%e=(0A|dUR zexGe`qYtz#$eLNo0^6j?k&nD&8%UwK51vnR=_yjzAC6`6am^ty>T zt?;@_NN_~K&dgsixl2z9;8;!kW_{wqhPpoPS!f|Q zFo>?(6IL0o#i(Zo>2m(LD-ofTL7623X6M+m_iZb^Hc)zdg=#~0v7hXxAqrI1a6bni5Rd%-Y>e8=3bDZwKHbffpb;f!K&~{AH$T*Vjf#hnuuyy>B zNQymSn%5)VBa6ysrg$F`+IIKJxr&H}UEL z5omhXQN&Ihy6JV1Mv2+fpftgL)A#N``ijCGRNWjGP<#&h08_=D9VD?@aXdHR9Bcow zE=%q-+qVnJy?prI@jt$8=nHAV1-Dtj#@Cfnm8BzRDS@bnFZrw+0cdqz5ZSy?F5dI6 z`rZ6r0fF{QaBMk>~?deyYl>fg~pRdy_|ayqN;!tbbTP7$b`TAHv(VoSPn~Xo=t-?Azo}W{iq6dfZ`F1)*%Ghkpo|1XlCl8WI&n`5W z#s&)6e!Np^+OG`BdTOTlml`W>{X@&+ueC9L1K)Gxy7WIwIfv}(OuLiHVn)19)ls4| z2KIr(9@eOa(A}=MVDNlzIJ79v0^NaJ7f1+5rNGMjeW~@YZCIU}2n*}H{=GJ-z}myP z0^Yn}pJP_46AEW#N=@>`y#G5ygSvlFUld@FC+q*xsjEqG9|;C@UAcq<96P>4Biv|_ z!@rmPx8|J%(R54!h4iXm17)b_fkOXxh+|xCcA&&J zOs(~wdwBmlslLq}8Y_>6>o^m$eU9)v#)t6q%r5}+}oEs3cgMn?64?FPr@;Q z$4-RG<;u%?jW-@C591UwpCjUTjYi!PRH{4dw^rc4DTFfpP^xd1+EzMz<__FR`}u|~ z{*`7>>CkB&d5ySJrRt%*&~$W7{?#pK{RxMWlau(Qp9c~FC(CkF4NP5$U=Pi{M##p`ea#$jqov?C4^P6w$zqdxUh|^vjE;Qt4iblzrin45O8kS*g zxeubih$m|CgY!2hhCoL$Wg-RHYi4}+iKc~;{+zXI*rJK+euu0d6*vU*gq(6_2|L*R z*#-Arg629Q!EVX9mZ(!SQGh#IaKRv}qT+jtpcK+t;GDBv5_UgLTsymEDcHir^;@SI zp?goRiUb*8kN{q8*aog3Y0iE^QMr4B-k5V&&zK7@wGlK051meQ#^1l-{SiTF>FAiM z$6N$JK=$OV)JM7%=QJ6=liH9tf^j1@f-_NOY+v2~LS^8dc@5x;wdakEk9?*=N1;Dw zB8D8_G>REm5@z!2NB#IC6X8oL#^@2k+rL&fBuX#Ze@7>FC?weNM#&$$WNpK5Xt;Sj zaeNZ0Dl#1c9BzxBJ3yHAwOfl<_ex5fu)2lD+T%_!2}V=Fd(fe3; zG_Lc1hdQR_mxLN+iOm)LI1Pk|4a}b0JlVeyhzsmDZcZXav?uGUwQDIO-RIgO$z!Wt zn>Tg?i5oEEz#%Pp?fH4UX7cGAB*$u_^yU)#_}Z827W1qsqFOt|14iHGwqpA5!N62HXNb_R zlF}(N9G&pz)K!Z6Ey%drKGb%x^)*(HT`KoWwhq9qltWY}&gOdNtZ#iA)K$~q+ZP`& zEAOclOAe@DbEbpr%<2N>`d6#-_dy?Qpc^DcH~;ov_7Ip7l8=Sw!EE1 zRRyhps1AhFOTo&oXLe9u4m2e-ik_W0npokEU?%9y_nD?s+F<$3-->X*dFzvMTqx63 zlbN>eWPlR9(8yTCzfi?n<<|ql3eAiMzV-jBO{rUwUSR8%+BWGZ*m${hOru?Up)cBI zCOQpVCuE`=X!q-a%Munavwg$6n?feskUT&B=3huxu|1%$Uz8!`-1+^IATpQ>RP*k( zyni+x$=Qj(dGuGbiv3IXaf)0jt~YB9A5f7SNuV#dj-Hkv*?TEF3`z%=$5q8k|xB`%hTxmA4CF6$jd$<@YYZ-cw+z&P%(!S|{ z+6dyho!8E{II|lJb==IYFu$uWFVZ$WU9I8YKD5$cx|0g}-Cw4jkC;aB3wvbj-n;{S z@hyEJ^7iM3G>l+>Su0WXc_+Tj{7IPSkYa!;-=+DHPqO%9B!JZbSKfJXGxY|tZpojm zr6XY`CQ(eD!eTN+?H+*4?;+;ivX+cHimKVp*Q9VOr14c_+-tWR4#{C+i$y$aOa-~N=le4lHpF@$ zft6p=eTl*oBl9QtMSm(?77`<;0dEvTBRWxf!IDCF(DsG_Utmkpk%Rc({=Fcmht|zi zjsNxR6%Jt*2WSLqsxQJ{Hl*)Q-Gyxkh&af2MNe;`Iuqpv{IQJq*f_t+%)p|duJ zFQrK05u5c@-zIVXD+1j5$w$BBX?CN|-J)F!=e$rmn8rrkNZ3>9ePYJ;`4oUIva9qg z{=PoOK%X)LysnsT(qB+Ry&4WH%88fk??9+k4v-1f-(ozh7HmdV!e|mw13HTIHULm5I?I0 zSB6E_V5#{J?uFS+zUm$voBV$7!NB``Cm-;$E@f$Mkk~{Gv&cStSg%OLG#4sL=5E9U z`1c!2ZiOg{OZ2LhYn@lR=S*p%#RU@A7InT0Nc+f7Lxl<#tROdOvWV8$pgBGVj&c1A z`SZ+awMm~Lly+nuB8X_u$o=sB@xOJCwEmebi%$0_4}dfQM6eth=m4=do!g`6rvR(f zQe_g5oy)?Qj&18WBMr%=ufuCr1Ifmrt1svH$MdJ{NvFw{=zy`95~FEscV9xY+S@Ktz-I^}3=jIkF4t)X*)uF08bCVA%UODYx@V98o)TAO%@UG&G9w3({5_1QhB;nrwah%;- zado*K*z=Vu$0XchrIij=3}-Ib7e7%9{sAmsWVpdS0wjFz{TrumCm=@oLe@^~z@jtW zK|k;3@HuyF3PMmH*9b01R*iD$cN39ga!k@9TrPe5=K@$kb&9AehpE@8 zinYsFEc_vYLz|Moe&8A>1W?f>BeF-ScwdmfS3F2Ac(Q@JJYg@$sAX_wk3X3Eo3nQN zQ>1VeNF*Y7&Y{uK=e-vg6%fbUjh0i%uH_kaNd8_Kdi2i1$FP%nN~Y`i+>WUq-_`)) zklGAQPN~rjEOge!S|?n3CrClgyVWLuGGVbM8d%lSK1X5-XB{uUsbg&Awt<=XUTmh` zJYKxK@ZVM|8?+edp0K{nRfO2(XBNMJOwuxbbXdhUlJwdwVx3|ezkwsUgq8Um(efNp z0oaOZ=%7}0^x8H9v|q4CC{JEHj+8+T-Zt-V3jqRFqe;#|$c~bvK1sEY)ozvto{FJ% ztuFl=sDuOSA-=<4EVSanq+y^Pm-Kk7cEZTFy1DCJkwxCT)*V^j^5S#I)m4s9IIhZI zof!KOr21e<`m7bsT`&aCQ7_}5#5>}sduMA z=hRzG0=sfD(aD2-Ww@{RaKu7sKf1k(Jw*i-?LPyWpl&1AMhl_2j0(&($Rqtm&JA}o zbINUo9=-35E*ji^runK;fV7VbYI6??BW^^DfeDDGKY@Hya`tRR71%i|qpbt#)(*_? zeSOLN5oVY3_I8L)?th2I(kdl7@3zQ`2%I~@Hy^X5=M4=KCGOrlkyBF@L|Wq;&S^XC z7-bh2Ha1CmrlC=RcC~d_9YIW~QcN;joD99@6I>q@$ZKnJ#X33r46dCl^7UFn-2%jl zIYf?TM(F{Nqf#XtrqWjaal(*m}jvbU>G2nqo6t6K(ZpYmr}Un0^YY z2+3O(+rIr1aUFD>*G4ES^I_x^bO)eDv#k*q8Mws^+yO;Dn>dbsM{B9s%`YQrifLc6 zoB`j8k07t{vgv@S!sYkCe})YPhCc$JIV{@s}U*t*X&W)qWZfig8tUzUAXjc@~Nb9;_g@ zSl5m+j}V(td$Zuw&w!hJ1O&M4cY6 z@f-LL{h1U6LMrpy+kx5NCrM1O%F~PsA(y{%?I>{TD=UQib<2WHir>Ka(?d$X02y!u z{XXJ79${Y|MEy%vxBaq;tbF{C;&L5L8#&}+Copvg^K(2}bT)ILFF6Nvr=;?!$U7xOMk$0)u}`unur&_<*!e3EEiJZ#_Lw5@|I+uo=4=937fy3+j!6% zzaVY7`Aq-X>LmwS5H&sT{#4R1rLFnotA~0sRr}5fI?A@PL7#uDObGsZZtAJ0g%?np zP`!j-_=FW-tt zpEye9Lef-&cLSz%FIfCFgL=l5_7O!x5#2B7yWSb{GZ8VDviq`(DS$x2*)ZMA zO+GM)TX|b)wqkh-=*V?GfvV`ALugJ#_|#@tKyL>&%I%hWmYdx6KP;;JkUuBuZ+Dle>O}dpC1puV&?x@A&>5wcd15x@E zOls)`AO)YP4KX6y4cH|0SZy_I2P?vyj8O`<7Cp;#%R=ZPtWJa=|MS3VskpsOSzi8s zKn+F-usy7O4<>}h_$lxjPSzI6mnjInzbED;*|M7CH}NLdQ%`7@17E=Q)o()QfZV#y zi+yRTlp&tNt)9Zq8Q3I;Y0A=)P;K$xp6@*kcTM-ZB$>NClyYQ4i0e)vZ(F-MRsjCr z5u6lT=x$-Uw!tb3+^A|zzGep(%~ESU(Hg3cZBGT20T6BE$D3z;SHGEwh82g@XS5-j zQF!p8x|Ev)xKQ0IXwe!$6dA3~+hd628a5I|^L526*$!>4`AbC`q2e>)1|B^Ijb0Cl zC!P$KvXquRhIY{!h~lc)ol%ENhU8f(p8;NUdkaWL@#PEBLBX^ah0lnEHJLTRB z;fbV@fza1mPfsww=Sz3b{6guCMhI4Y9#uPjIeY2f-c;M?H%ePJm)VPZK9|cwe28;n zz~IizHy-(=EwZV1lHh!!JZ~}_{PpKT!&o+31#F!$%u8^_#El86ICBnEJ8e*FSjyma zr52X#^g73$#{}F7Kf05Jw|U+cPtC-!zdf98y}U$EA{0-A3%;(&pHYLfB}h03IX7bV zlT|j0LJA{vOf+QQkkNN%knI^sXB^Hx~PU z6rFci()-)TJLhydWo2bqS&mY1kDMj0h6_+k8RkB@#l%$H>(?|j7mDUSsfd7RPTY1f z_eu~=(1tTGEK$sTp6~M?e{x;Gm(SitLPA8|7;8gyO^HDjJK` zdCQKWs2GW@Hf{GWri<9F?e{-We5a+od~&Y)d?BD5a%CDZ7;2CRO1XOLoES`rE89bDo$Ul*_{|Pr=dOiq zMbm{_q2GsdHP=L+rZz@dReUZ7Drl`sxUT}=iT@6%4!lrGx(gFa!r@bTR1RpCwBhNPNIqWRidA2fw>$0EwRmZKo z1+Fzvp3)Ivmeyia42?G0j$Ah1u8`l#OH&hj&;~K4a;L1U zjT2E9H^Y6y5!4xGOXk%4(3A9BR&UxX zUhjM`yNd)HD<~sbmMnyRH~3`XXsn~MS85Z!gKmkfcNJmPs3p4gzW64Qao$V-h4T3@8`A|7%Nzcx z@|~-*h(E0F4VU{ntumIfho3QA#Ho$`q*s)ilR|bRH=x)uW9>_%>?n_lios0}Au(VKy0?Veb(X0A49W&pqM!QuJ;$$J~5Iq0C-ElK3g$AjFI-rvkxRs52v|B4Wg#5 z_O2K;q|+g8#Nzxo?-OE5C2scluJ2dc3Z9fdek$qfuTm4@pY*Ylg!{soP-KEl+c3|^UdZqkH*Ye z0h4P~Ce1<^C4Njx*07dl{}Tx49P7&i4%|c5K1?bl{E3;^jRh)k=L($rxZs-;vptUD zq`!kFS!@U9Kcdgsr5N1He#S#$9Jt$C{%ms7KKo=wE2ggzM6*@9D%Cj4pZwNhwW^G* zU1N}Qlyj*V!3XVhr@jaQx6Y+*xd%IW2?6h@lZ|a!Ic{Td8Srt!8-N~!YY#ws%d#L> zTwMGT-b%ZqfkAvbFw9?DLyedDbomgnU&st^mi;3($O~qV*+@0ivdWQll3urYWym&N zR04>6{>J|l{V5EVR_l9wEnuxSP9#65vMOFd?%q7dmKe*4$P+3u^fLRHwEpH zBorSYr>NlkU!y>LJv85%rrG~N?Dx?H+lj`TPYV-{wxvBkx9tx?2f?1p%Zsxnf9Qt6HS4!Imoi*5o{=0f!yf{uEZClx;f3*hT*+9i) z-MOlU^QfIMDbg>W{suB(UJ9I`XS%D%8 zQFaZo+zoVv!E1US?p63Ey#k(8$wSNS`Mbgd`;e2=nO6ye5 zY~)-~p;d-E;X;tZKJZTnh-ZpBr(L_(4!b;jowf8uCG2y8ZCcXe_lk}%i-%F|QPs*? zB<*+M9kT~3*l}i2s3w?oDlhkN@WtAH-Z&SvbrxAPv`^KSEKSP@1VgE*UZ^B#cWnKX z1xtL(`|HS>D#6LLAKgW0ci=ImXqS1e7v!F6CKj7T<^}g9Q=)C~5IQkokyxFzdu$OU zXc}COrR)D8GZ;J4lx+hD{ov673#_f_9g@L_WMdL;9)zV(chgCZs42@=i`m*`|HdX> z$z$XY^+N1n4V#<)9m19rMeo|XhSm@i^)r7i(p24^)?GsHwl^S0AihLs!qN3VQZr8t z&k655Cd;{Y7Qu=e^PPzpJyn@xgJRuRTWj|PEXiBp1O%e2W-jXZ*5`u!O^5ZPa95OP zvivdmPL6-T8Y#d2^8jq`hF0foB?4Z{QJv^ahm{~O4eRO2*RFR1)jPG4-5NT8g1K&k zDE;pcSMa6p8|*`Vj{R%|rYLgCN+^`n@agEtX|ItnBn(YuNW5itqt#-|?659_om7bbaOvrOw+jmBc4%#Vx2pKhVAJYS#QbsWB0wFyJSl(3UjWO|*bwWaq zOBW%Q5@Bxk;@B?$-^xyqx1;j4#zyR<@|r+c*Qobm}+ZWhcRvM=W3y14Jt@NdJggP62^Im z*AO6+?`ioAm?KxVM?)(e9aW>7RYk*go69qUY6l+a8Y*hYDiPqb>}V~*wbbNV zDkr&yT+d4stlbbvqUy$mZzqsYRmg|Tq;F9(0a2${XcUcW83R z-j0OUn}eXUf}xqj5R+LP3v~k%@+F%Wrk>nL2i6-O7Pzp_Pq%XbxBEm;+7 zBXO5Ypm~Gcs;U=3U~WH{fHEt(?b{c0yVWLyS>Z0`j=yt`4@uLCM#|S?CTeC$E6fk= z21^@NTc&xQp6kk;SRF~Ld!^T%gOYc`G^7-li17932}Hfi&{sF@_od0h?^6OKK=Gsq zMpYNQgtQ=7-T*ibpBq6#QMsCV3pW?F%kdj=<>Spl61m}zu{yKK|c3??2rxo20kNb#kbg#Nx97C}=N2;0b< z@w{IZd#?|Lr3W^xag46=9Ca?0H(a92`-s60*szRRM zV6!%VH>MFxRPaJ%Bsrs3fApE{?OrhXL|h7_b##!h%&PJBkTWTYPZV?s3FxG3UKjF| zp4LKad5-pOS}`s+m!pflJp98epy@M{xaqE4)I&*D+B`Up%;t6Am1e@!@SHYF)+NM` z7%GpQK!TPp-iRDXmCdG5Bc$i4V;ji%VY#)`DF60iQ}pu90X&{}u4$7ej&fJPuUxI@ zV7r4Z9~^SIKCM19WYJaOutkeXIIAnF^205h25*g8A}LLlPt5*yD5)1gO9qpwMq!kz za-Vw#Thm!0HOQ+)IDA%M_`Xy5&T9sRs8K%`fi+QmPTN>u!6Ln;@lX}^GI49)r1JR# zf5bOvhI0~Zuyv#5wIAfd{6ZDVehOa5zU_B644u{oqy2_deFmL= z{pYj(KpA(Tl=pAr_OxEzSX}xMKS-`;1>GVTbN`dL z7VPabFMORPcRZtbGaPVI|Kk^UAt*%PbayzZTw&5Mu5}>q@O(Za2cOIAMvjr6+y4(w z%LSUvu#8+c^oeY84WKE~K4F>BoDt~YIt0n`Gm5e*q*~dnYKAp73i2kX^FgSvE4iq( z6f7sYz2PG0E#GQ4>>tF%FE;{b!X9qu38+y=Nu6}WRBijNiz1tvS`7C{oSXYhqnGi_<)(uLN< zD3oBlLOyXK&NPqq!6KPb$^Yf|;Wi+XcA_EfsPV4a6X1YT|Mu`VR`g!mXw0_7m(e_q*vxWlgR7rh z_x$%Vx`_{Ff;@v{{yQZ7=1-mJ*`PM3-(0*}EIKK)>*RYxGj_+2)0wQOx%Bi90Dk^D zTkYCJ9=P#1J*}=1(%d6n*hJ6COiMVpV&oU41*;@F$FwD8?xWnXWx$E4WO})|slUJP zC2_FH<*77l zHIv-C&ucC~-|siBR_6rpDnAdmrMaJ-Ry%T|R8P;C?9kg5&Or!JabVxHIHI?p(lcqX zR=Yq`)dA=M)%XlAFS-vOw!hgL%r}`Ghz=tRGsj_1o-Zvgzal3rx`-%>Ii~>EJR>0r zHfB9cYZ0|oGjN&M+tl4OOr@KeQQ7S!M^5iqSk*rFWt6-SbH`NU>h4dRQyFl5s;8}qgRM-dw?4JmJ=mi zbjO`qqh~VeFmH9Qd$1OCUaV3vtP#8otYp7#iJB?glZQA&t~%2LN`3qZ?u;GnQiVT~ z)bez_qmmZfF1F>$curcK^GQKduv1EGl|rWWpLnUE0Gxbp(_zdyk5=nc0d5fi2D#_h=xH3Zt-)S5%VG^U^aV`nk3ECb>PJ>P*ObTv0FEq3S z<09?;I8$3FqNnoO*;S{TfX?7fE`^$Er0Mi~FLsL=Nm@;tD?67{R{`FnLc%3qcHP{V z(6Q>&h1J{LR%V&G>jQZf(

g&R^WaYxBs22VP1rND^*jTAhATA2?C_;l_Ev?`; zvNvSq>E|=l`KxqdAWJ1_{_fW?X$#+>upJ;2d<+r(nmmch6NZs;s{)h`?C(A2r?J** z+IW*2RkTr?J=0Tb@&2OP$u^^1*>UWr%ej*R2IG?ItM86gD2}&^M=25 zfirJT0NC-G7BoBEi5wpCs%Cm(&v3*-M*X4X6PbC>z)f4X+!GUf?Vr{|<5{KCg~N+a zue`StLH2kCIG`7&OsU|Isqd8SjgZcu(~xzmg9Id-1ONj`B?)p_bWO=!%_w)H7dK0X z?Oee#sh!L~T#McwVM*q6!jXLMR3_f>J0@+muZ=gs>v$gLa_YwW?jiSl&m7_zV1*dR z&V`3EzcCptr03fSs@*9Pa`dp+{EZbK-|R&&7iF`a_Apd2>1g)pCzwwIHK>LnaE@2` z%*?|4z=d_2JDpn4t#Ylu- zTCH1iI(t0dQ!V3S)dQ?Rk?Zs?BQDOe9sWDABtLeX>@=>1H+gxuzbqBTStJg;#F?IL zCGneHHa4c7yU7YOwoFW`H!x}_eXX}PTZ+6O9r?OQaiVUcnn|0ev&-yJ-5dQOrdLv% z6e@b=q5S)a9DU~iSKA6?{qZ)y}w(PIE3d=rvZOXxkiz9V>prcVQo=9HD|s86rhAt-t}Ny#3)P8D$!!sGJ2*ya3`kl#{l7!_<TCVp{4f3e_GaU81tK6NeaD+I|2$IPAPy z>-4^@)2@2%^~`;;p|!HPA@<5obK%PRz((*09t{dyot@oh2mh0@hoSDKz4AAn`IC~` zjyw6MtiDPIHCXjlLyvqqu*>7zWV$~yUB&L;7E8QrB@`-YX{pfZbMt4LKTg3)Q!dg1 zfbmW8>G>WVn_^PMC$Nop;<+=3^Q;+o>dC9p=gyk}7dw`OvRryp8#^LG`% z@T|ul^Vsf*M3q;`rJb;ReT%0(Z_j%E!aBJsLR<(fQaWO30V;%fk9Hg~-N)+eEZxfZ zNh;)D+T?D-;c{p}=DsQGb?ePFv$I?iaLBiNj;iz!hHik_$!KR<>(ISftJ*ret1e$I(*mu2vVfKeGaBV4-K*$xPTs6#8^*)GgN%jCjvfJuc? zAMDKHF*V0>`S7!azP6)ZLX)D&$8WX7*DOsOkXTc$3USu4h6A9U?i8I@m%|>sFrKIh zFUXey`*q@Cn;wi-{r!onn>H`>Bj=2!4Gf)lW#8^r-k~5yt^;_k!V+SAseK4rH^g@+ zb?5XdE1)99h#QdjyjjqkQLKE{|56Q`RvHcONS!YW4S6{*W_v=az$2G(U3xKyv=cTp zRHL?QuoFtYZDhSFf&bTN>qXCk+loVv-wN}2obzB1%xe9@(0h-E7LZlgHoVz?TF=ra z&*SFbHFke^V?Y0DgXs}XPzbL)9Z9c&-^gYhZ1VD65_exdD)#5wGg|%d&jC~VA>4fB zfdld0zTqW_so9Lw07^NB*w3HYS^s@|l7?F4gZ zGwAEygIB?GX=Ifg^y;ib5fF-HraBCEt4QC^VtFge{)PR)0SwLHQ~P@+D3|VZEy-$W zGUVtuziGaCpFQ{E9cigJcRt>FVs{KMQ2w>Feitbv`*_9)IV%h>y<7xLQ9f54RKw=6 z6&4TP=q$JKRv8=1--|mO5>E9IQh`{-Rqjtt=ckgzDO;2HejJy#a(`J&=`do^ED44| zlZA4C^pPnXCzbFCRk6!rSG>|yl#ijIZOOm<*5Q_ERAK1eO7Xmx?`t`Hs8iK?tPqC~+>IluL-`HRXG^qsoGn?Ha|! z6YS6vJ2n$LAC`?;rrJvq@gXiVYKlYJ!R7rtbmX&ck2;MV!ZfM#kq6%*e%jaym=Une|M(3ZM)p0^!16?9Da z@Yl}f^^_CAdu{2L%&`j&*yb(;Y_##I-nG9c(Qu6f<)MFzl|_P)2mW;%Q$oyHM%GGt zZjNAPJ4$54&ipFDSsITg)7_r#wU5A4V>KUL?kUg+{qKbn*DbV;-Dpqw8n4>F=AM*TUVFk%dkHnH2|AIgIHJsok`PvavDWe&f+;aQt-GU zgnY3mc_0VrBlUp@L>&>u7;;M@8u?qAw(b4}>}wS8u*mWmbg+kjlP9Z5x=!3|ZEXYy z-ug_6)XyV)cb9eP^dNF?$P>k}N%UFrki{!AEgK59EtXm_^xHM~FA4D&K*Q`L(HoNt zgFzSGoiXr$waU`{^7mng-BUhEHi3K&#dpWnG0-BZ9W!H$=8&hrx*MD7`ko!0t4{K> z;Oja%a?Im1INy%}_7kRG+un0c_uYIa+TYGPL{RO@j$fvr0QQKU#oEX*v+q>;Htrbo z^gx|+T&n^qE96G=TqU1rHZ6s)FM)tgjK8=py2@vlRL4DK=|#>oysYLZ&f%C@xMfGy zl~QBZE~@3e{ix%;fxSJvJzt*l%Rer`gC$$ud&YT*wn5WC2zE#^dfnnLR#AZ7yOB9s z_bg1rnydZSHl~plX9_1HO2;0SYuubL19o$WYFrQqnbDV`Q$2x?cK{w3c`#&*%(|Z| zOhD$3YbRVK*Zz0tq695jj{<%`-XZk&?8!EWYyXnr0|v2rF#NKPV}!~ysGKpK=a~8X z|FY;km^!?bsIF>Lqg9c99=^jQALT5@q4(Kyp`pvUlc&2SXPVyQ5wosMP8$Zzt%&nl za{-EW}- zCTcIzN_`k%Go^n6dgrXlgpEudNAfO2w>%rKf?{qj-p(wsxvaEeBq15f?cg(W99gKg@x?} zL|a(+>IWm;r;}eYD;Vt#z-zxxGslA3So5Tl7tn#;8{gEm9;&k|x(?3HzJ5^&%jQ@A z;FMVpyjUy!dLberoUf6e`{l0ya|dSCHKdXAUN>jf+0Re=m^hse8-hFiX5O~9QtoUr6hHsxK1M4W@n1isEb zoUC#D!HZpL#iobda5Et4g<+o)Ju%sYHXGbE139Z-0I9+zW3@SueBi2s`=oMGww%^E zUz$husL~-|e_Z-D`8ebuw9`2b?5ZSsM-Jnh8yE($?pU>Jr1C#Z-=Ep)rP6y_PiEci zYpaaO6ao_}ZTTnnEhCaG!)uA!%@aFCZ4CnGZE0mFBnyC$BeVk{Tc98rY=xKn{FZoq zV0&ZYz55GlvGJfMQZY7oFae&$$ek1QHx;cF0sm2J#&>O_(YBpSj2-Gnwih(zry=rx z(+IBY!6HMS3hKnvyGX5&@7g*luTMH&@G8zK0p|_IDZ;*2^%vfJTdpZhkx!*0P?5I1 z)V{@1-`o)|s1arMc%U0iZ8QMtJ2CT$ryOuj*mxWg?#v+xFczWEwEGPEG7XoyOixZL z34+ijW!Z$pKUiCzOF8+f=sb(_3;5}W@us=CcT!BW7vKMDvN>$=j$_Ml#Z3Xd9@Lck z{|87*P8B3-NZg=PLHz3108_pQR^GjMFXV!Tq`<<7w@KKI#ihOMe_@>z@s*#TMJ- zoG*q&txGm8WV*|?H$+*ccDkRs_WXAwv@O+sLM5k2?I-Wrj-!^j#_-NUV|}^`Eq|#7u|nx z06*n^J)Hg36n<*{7;R0>?y!B#qHDz@1Gg(Q-rMPr`FfE3(Op$3xg6aW)}hZ@iYts$ z)&Bf5@>thmcJ2#WI*V3~FfSIa-wqYEdy;kvZTNfI1bS96S&?TuMgu(5A`3s4oR1iI z>CN1`G98_sQ&3_+5lX47|FAgSdv8H%{Letr(Eb!^4LK;K65sf}X7IGfeBPBzc~+}= zph-1f9gYxS3eWq%t=sz!oQX$vs>MOT$g_QCC2Xf7=-m2Pef{EIS+=O_#K6FkrEGS{ z$5MA@^?^G;vPrA2tnoc%JJK1EPss7{(QPLR_Wb73?2dd!po9S>HOKRW4keD1&8SH) z@)M7;H--TRduguBlmHB>025;CWY4Xh^(X#*!oeAb$7ZVu7=u|aXHjE)@smh;6-b_WQ> z(ZpXhDs{2XN0WiZVR2lATdqd_PI(LcB481#C20k6R17;v;VpagF}62rZjm`jL2X5S z%sWOl@Mk|fQl-X3)`g~#&_+_j${Yr)|y1%e5fnCm2mD3oUMNK0?c^Vj}<;?D_iViS;i z{bXX_)rGtp6UD(yRg7AM6r3~#FGle5oeR}Gb~5FCIvI(WDNt;O#4?x>+3c9*d!=q( zcZOk@Q;;JR^5U~3wl~8*CL9-h0~u@l9opL1)a}tla%J+}8s=*pSi_$lPPhX~)N{C&O5}vM>Sc9_+f=2ze{#ytZv9f(XYaN*Fh?}y zho2;_spPvo&9FRO!CwT#^Amez(%$PO1hJPj(8+};mHb47*a!^yJV&gl$X2_GCD}8G zhmxTn(18rA!fRy{`Zry+iKcF&ntj4I5ugnVLP0L~eo0B0AQwmdGk?0Md9pKURB}iO z7kt6Xxk4$bF&1is0w0m~^iwxoc z37g;Y-yz79jcUl%vp07(Gh)(aE&ISA2TI9LNnZt}U;iEYE*?M)%`IXQQEIhoNwZFr zE#A7pgVC%#e`PH>cU8=~g1DilP?Z^gspiqd6~`mDyqyI}lntuGzLQg4>D3Iu{cDxU zN(Ij8nL>YzRqf)oeQ@$@Nb@~>4kNxum#lXHxljT$HeA%pL^;)jztWSW*IAHmJ$r7sIpuik9CM}% zos@%;YvGW08>XT7%wGLLyeazSE0-wQjRAKlarJCwRx+0e97{vWUaU%$mf1P^nuqn`AxLxVDb#N$4GJ(_vgRm4Hl`21Q z)3468IWhH!;}J)7LXQPgEqfgOj^T0-Q(_1Y!dkKZ(vSZ;BupTMr(RZqoXP~n&tCew z_Q|4W-@W12@(M!@TNK)Z@euclKxe70um-kYoQ_wQCKo(=5pn+AOaHSo&Fc5>_Mg%7 zvReaB{}x+IR{FR@E3*)>D#Vsf{PJ&ro-w#-SQV$Va-{N49}G1h0QVND+RWMDHuNso z|3Yb2`kmPGBEQiS)P9&~mBl#r2JoEpvOo%we}5bb3N~&v`d^1(xi*MSr(55b`|^~4 zz7_sM@PT>!aW!j`dn@Q2|8l~kwT*9ohFtnGd>{Qu6_D0{GSZ%8ut|;R)*=L4;>G2v z-11d$#y%uedDKMJ$EJK*CV~SH8{RDIq4FMXMGeWR%A&HadY^m57pyWuxq7y?T_Sk| zuW18EgE=Ei{;$&khzbhJJ8zilJ5e-8mdb2=VTkEq?Z7Kbz7LRlkuUL=CqBx6=G*89 z3h~69-ea#)Nb;H389Gxz?Elzdlh|M(orFzpktdDgSK*PibvkZcLIcul#f z9iGqsoZBf|xG8l0cmK~}g%urn zk=~v_KDY)v9v#}>PS*|=J9BXnXK063ha|7PWtL}UC4TpPE?iX$9fuSJPU+{*85 zgZYNVeWKa+>>9S0P?P^Vlp^`)PG-vRcAW3?%Aq^#O%}%0Ab!vW6VkNhTL~MI^0ZP~DR+02i{x zSgLc$o7r!o-p3*QuB3-y0O)_?)M!VThBU!xM0#aRcM;aS{im2p7i2uYTxcLfuag8% ziDyzZ7|92&`Z!3{{*7%d=3tc4(4|^4^z3IQd8@)af`Z%H`|!@%@tU$`o5zHn%Zf%G znftvy@zhxzaVQvnN}bbyoSJBjLQXNq3_fI?0=$VD9Ci_NiJdH3(6h-nQ2M;`);OV8=RhPf(g&vD0koXVypn~uIs zIa@dvJI-mlm`~HReYcli^k4>XnJ0qBlGKdUUUlBD#`((2zQ5gtUI{4JCoQbk#Dq;P z$6tpC7oa9rY`2g0ue62Xr>w_T;3`}_!J94o3CHaRs+BJrX8xj1&?<3Dbd#=$mi0$R z^ud{DW!F{vX1P-)q2RD>1*&>)a9!#LUiUm~|}gf5Ot4+MC0=nHll zW)?@+6Xbrc1x*f!I|?T{rD?EH<>l#|7L2koWm+@0~gAJ0Pi5ibYCM8+`g zBxG?`VGgE|q<3Q4<#5f#qN0_d{|-qx7|2+(n1uTr{&7jy!=k)2&{a)vke8rp@J6d8 z_9#1^I*a)T6i(H3Nk~CWvO$id&c>y5K&6UUVI+7AGe@?6y%%eL#Benes~F==6}akl z!Sg~|@>Y}`qw=SvK5qvk`0D*3S)%b2Z|gM`*kLkja<6kC>am|l4ns8%s2?KNnxfSg zYB`tXqpI2aT^yQFpFFq@Ff&Hu&6dohu1Cmr(II*@*{ycgk)@72M`RdCrI8cZ)&Pv9 zLoVfBG5<3`)tk8rq$UV0%+iHxD0ZaSY^}y;&SHhB!KSnIT)!fzfhtRA+5$}Fbwyxl zzvUYwrDO6>#n2-alLxf>q{VhOk-KGoMT7vQ@qHW9AuHpG#u};<%X8=eoop}BhL9`Tf7Bkft2PR&tnz62_Y>nvvKMQ5~9axW5>Uh2Xy_RPC zoW7`1^Nqr7K!)TU*Qa69ULL&jKPJlFu?=y85|%Qa#jAAc%{1J?WEn;4p>6JGmzB!HhnQX?y=w_weNGjpm*uD+nWNU4&1b4b(suCw?7HsrIN#kI4LhDKqN2 z@t&A->|}~nX#*|)z#S5@=ijTUt*ky35dK+XHex+kctJJ!WSC)m5pD=?vDki$_i^=> z12%`S%scbNsg89?E!K|xAin6tpl|YgV^lm}`B@rW7`cnP_HUn&Cu|$%pC=Ttiffwu z_Us4KwGGE-gxuIlM0nW(St%#joqwtNC|qwp!wYiGL0g(P#j5HbJTqoA6il|t9t1{T zi)8~Zy+Nmj0G<7$wSx2o3wt^QfFpe{WhhHAF7doAZloXbG*|DmY_muf3r>^L_Ttby zLT+SoCs~5Y-3vEgeWB5#n`rkdGK>L%L4==_a<0+IKpA!e_xg#%WVB*XST6SYf>ZS$ z!HwwoxT|%hXn5vr*{01J8SI<6tHuuY_Ej{mfK6`Pttg^aE5Fvntw=ZP=H766lVOw! zx9d&`3#vP*)3s~Zjx_hOs&oqm_mZkDP81wok$ zFDwZlyHy|&Z!Vfkyu^XstY?eP0S)Bu6Wid^^ytz~@$4lP9Se+@JST{{jk7uAc`kqO#^>L|i534u zn`m1_$0=3WgiAq_kofnReAP{Wj-e4cSA90yLms~y5#`6EuNe%F7RJ7ve|9G3P8~(y zRJAi$1V-Kcd}?g)n)HLb8_7}r(6gU9KrykMgz}m-b(EU6PU-w%3nM?;hpH-Z?;siA z29QG@?VcE3!uLGSewqIZ`Gf}y5Xsv{+;4@sIrFeVt%t+Ab1%xdpQei9^rt{@*Jz5C zf~lr~l-yN(!KKSKoki$)L`|s+M@7y5W*X{Z&MfM!Gq#}EC-e?crLHy$qnq}Lv)>6W zAI7(`fK}zniI?6oNXPTN@8VdjsExcDua#nw(o#rO=Gkp>e4wE`Qe2S>QMc`|bax>S z#dZ+KDofL~B>u$Mx(9Qxv#>ruJP{gDxzyX&Cv|jCrI8t1;AUGM@_Vh`C zPY!a_tvK^eVp#h^(`yA9r@2UNM89>QIr!pmOzRPq=}oFXrCvDz`9^Ca8G3IQ<>0Ur zW?Ggop!Omwn;i0zps!1ICq@K>f>`QW@QWhPg{FICsn(gX4JXQhvh$G8y4r0##T~?BVPg`ME@+Y?zJ~uE@>`;PY#=3&mn1AcXJNp5UK7(71Vi7}?m#8P`< z{oefjIhO)?JG}|h**@D=ob8ui^FPZ6Q_eKI+Ur^U^8m+0Xj9Pu{3YN1k|& zD+B9w4p?o=@8=lLvVcIPd+KBtK-BsiuXd1p+o1UT?H6N= zBs8ZAm5_`08Mf?xyGt_>vhA|21oycx+)55ao}>z$UE{$!B*h%nNoYu($70JJa78cY z81Ttq3kFI&A{z~C<%;IuWA53Ws)G-X(w-%(P3*1jQ4biTrQG(CIcdkgR0+83Sao~b zBxaN%sQN-_*V)ilqB3YvA8+l=Znv*I{`ge;@c!ks=9pdR)8#%zjhqk$nF62+fuKFS zNGj4tuwnXiChI1o!K0#aCyYt@3sS-gBWSqk9IY;gpd7K_s-?nNgW+8z`K5cIi%^)> z>0f6rtg>|M%VQh&#MBDQhEE8r_T>g5oM8(?m;i&Q{|@cBZ8d)V5}T$b9GUr`FvQs` zf{`AKkyT4lzZS0NE$i3S)r;6SR3;R-pipiT>GdW3yb+{+fg`plDH8z^%pepV)HX8p zK4NiJ#hB`cYh{GrvtQ6yEC_0k7UgE36sCi$NIT{?YnjNL6 zU}*eL`4iVBH@^5}JZyfA5!uo_15Jf7>4C9f5H0~j)a#4ND3At(Dk?ReP*-~fw>ba6 z72Ad9b^TpIKW0|JGJy>_4S%cfaarqZF}9)t@jPs?uiF62+d;u)3(=KFz?!pEoj#So z3&1;}4lYw_S=-kxrnGK@d8oYaift%aJ_s?sRB%@ClFi| zN?dLc^)~uhNmcXVWE%Gg{fa|!;SG{HLwYH3x6vH+A@k7vEOvqcFymUKlWg$YhhLg! zSl5ZmHZff%nb!3y+z*0yL>?iT1km?I$XBLiDpvv!@akBhYS;6q!!97g!Wh%|-KPR< zQ(fEG5Iy%R2J>u5vRKr>o_a6q4hrFpZlQJrXa{W~gttcetzKwJ*hT4Y;DlKrUrR&p z52UK7K6HMpYL`#478H3^TWEiI@1?=P3RtS}=w)!fisrjnm0UpaI$P!YGQZD7^qYEX z6LU~+xYI|AmbnJ#wH-qplqXY^K*1Whfq{T|4zX@tjS?zVi_!hx7a{7gAN1gH3V7~l zPAvVhrj(cRzbc$u5`qg2Sw?K^OiB_JE@gDgqGn>kKKG2VjukaKug+FiH9vj^4a6!3 zng^)Z>U~%9|9SMaHz)8Mj?i30A>GV8ZAYoj|KKGz7kNH&)+wv(z$!rIYTz_yo=Enq zGwthZyXW-#{Y!PuO*=RN^yWV{;hR(RJKltiiEjwV#q^xdB#Y+-+mU15=+u+PQSW4% zxF=XTwQ8H~7<gLFR7LuS-*A1vdu{#PUm%H@|@(ke81rH4WUyD(i1mQ&F71Bh;~V zPJb$ewiCMAy=7HOD8Aj6Hyh&og#dLJkaR+ zQCZs)YnQh!`Qa|(epR~BN;#nH z;97)2Z%t9jJS~;te2X4UG?ikh4VAVT(^|wRt(CvWETzYFsC)wF+7esP@i{YHTR$IN zoGZRIg0ZPtB)T7*)$%nrb&DkGqcDv?ey9RJ=>Wu8^B<*z7*^S-Vha|*;vrF^xBoob z)C62e6VN5@bqIxr+r9>=Wr>6fR{UG^z|Zik0n^$&T%xVU`Lbe!=S&-L)QIL9Of>n# z`#ZkUav4QJ?X$!{fn;FljOp@T^%RhA?JVJA`n|7}FcbSI%~YMJU?aAMn-IlhLgc`%*ni zcTYHmcy~X7GN4F1!!$-huEW}J3#OWCZs0gHTJmzQ>ykELQ8sRiYb{?KVLGBhMNta3 z4=xwwNz8hl<{YdLxo%~JyB8MsUVEB%pE{*6QiHPJZ9FCAFjO;{fT|X?ZU(OP>S{mx zM_Jo#>70sUca|KJ7+-hFXe*K6DN4zbkgvo$hwp|t7LGL%#i{;xsoa7oBoT<|Nbjr zKy(Zt{GCQoKp#T|{^X;&-cSGa-0KZ2Nn%x>FAxUcTP^t}L&%xE=3?&ygS(Y;c0EF8 zep5?V0>XA^qVssSLCJ~q%)6402|uv2(Jn%}Z_C`g8sM}vuom*gNBN;wt<)|N>|anq zR)i#PrZ|%gB96I_g>04Gy6WXmo-%Pqsh9k6*NHaw+EYw)3jC*0He9eeQPCVKB&en@ zg>U|o+tXhEn55Z*cV&Uwj5yFI@{&BT3t;B>t^1uN9P0XHr^~Fb z4h^)Xa9-^O95G}KXjdBJtoIERT~)Qe3Ys^rok!Y3Jwyu7EO%9hSq^{d)NaZ2_x}#n z|B>}B6qGcUVe+-^i6eL4`~CKhm)C{7@AKTxeV@?nHejj0xc;iB`nonkXfxHWRnIom3{$pnl;hOu zm%k&62Kfj6*w<)FUfRfV^Xye7Vw_WPl~2K{=MO`XmIS_H;YrRZSCbI$gf-+z&Q zoMi!!r7R1*Zf9+vg#Q>(*wt0#Sd?VVr`ra(8cSW+>FifZ)0(p@rufYq;t7$5R5^6k451rqY zUehPL(pcEt(qJteq$piyRoR-)1cmjd86tyu^kiWSOp>y~9?8ls6&*J$)I_}}zG>zLs3|E7-YfEiwF*%IJxwxmJuwC_gnhWbq$7=Xc{ZPM zLs=>+b_Gu-qSQSbep};FXyGrt7fMQUE_KN-lpS*$Bm1zZuHV;n)z9J(Aq#68XsUz8 z+b$?;FBxhl4BiCAmXsCs_kxaQgKE&1s9rio9R%+LoLWM}d@kD1^ruUXx=~71-RQV* z)KWKYQitubaj4RIn7Mw&9dR0>vH4s5+j7RF%fy~$D_%zE4H6>5 z7U4W*)t#PCddJ~$EHj6Jw>*xLryZM85~M4Z@6}-u-rQ=gu^nznq}l}+J@fLao@{4o zk5yei)f4=i(A92NT=R9e2iD_cOB%epAck3ZmHSGncPBg4IDGWLG-+I&?og zRYUYW3zz$%wPs2b-u?-a5%C74<^6rU*}vkQmG1lz@9&V(z-jc={(Q=dVL8(d@VKDlaW;=32)7m(I9s6p}#-J15;;)a$0`^q9(;5*gY za`Dqzm+hF9itYY4o zRp%6Za}L94F8B%m5VGZ`lK~ye2R>G}{dy=QkiNPE_YaZnsu#XdDK?Eu9>pKvGtCHh zkF~c6!He5FQfBKVnh+O9Qa!cLb&3CNr98lK2od`-_(h2EGZG!75b>~XD%leXKR;SI2?rSq75Pcc`$Z9-i`UV%Ro%`mi!YLF1Vf~wB$SbYxk%a=*VR_WloXx&7F7-MkdEl-6 zwF@}6?r6fo;2}?YDkXJcaA~{UsNF*Mti0r$AFc2RC^suguX=+3$k|aj1_!Sr%g>+H z+ICexi+n1f#NYz6-^BYv3-`YR7Jruojk}Ta*}6g*^CYK{PjLl0TF6wJ71)P^q_=Nl zQ=CqooZM3OFjPx*no=N6-y7WFhJ?9CUVYv;oMqlZ_{72|#M*`IhwTNO@mM=P`dOjF zRpJa@c1&3&g}|1iEL+(34kt@3<@e+|UZjcg$UH_&*<9(KJjtn6$^bc&ukbB`wTXnaXjIg`=H0^&&Yv{gw!nWO>q?>-q~9Z+{xdWSL#B;?8P!~Qy&ZZy zO_6(c=P}y_yah<>zhUfz{~rY$nBx!^{k*B4cBtk{%)64)k8?yE2OTSw`iZqWL4>GIpiN!R`6Y1r^sI@RU)5igor*$;WNO?l zl^RyI{74rmp6-Gz}(nz$76I=FoKj&KS+znT- za$oQPWI}(VG(P2q%CO~y2QS8=#uJ`I#rVf&LZ?XKu>oYz*B7XpJKZbOTUf=L&M!-KD#Zz4@j@ z$;XM;vLbc4-85Id$E`mPV`qw$mm=7GxBCZW%5d)oeu})+?YA{koreJl%h?&7*jgq0 zTa0ydS86_wzR&{HPMZv6%1};SG5ZfK)lPFM{w=Cj5MupH;J?^bUy&)n3)LjD z)OG0H_eBSqc(0caNru)jK3wrT#b#bZopy+h*C~J9kFWEWn3%GISbb_{aj$4n4l%uO`ry^Z!TKImUBwmav_)EBUMSF%%G}@-OwbYvYB)z z&A_kXp-b=C2vM&->vctp3@j&vabOXt%pH66Z{PC@bf)#&)dLSwX;7NgvB3l8MOAF3 zWq(`+^J|HRH+JAuSfP(tVmOsRkt&Ikgb}1dM1wVf5 zR>%~9o#ZG6!7)`r(N+P-m`0%-TL8Ca)&w~He)-FgK%S=BM38IbZYRe1m%xr4ChxL1 z*gw~rcf3^{8-#U5nRP1D21?HqUz1@{t*kdg#J6IB+D*fXwYF)$3^OS&k7}!@YE!hb z5!F(FMA(O;Y(|PNBxr1u+q63=GeFE?Q0z$2w7a$*;@an~{G62Gem{04(jpDbvGf@(Hll|NzQDWif z!bozKXGZWbcS~I3qwx*SJj!2plb=+lYglyP_9RG@v&^4Tq9A&v&dWQNsIq*0?2unW zjg@9z8{IbZy%mzlS_XitFSag+eZE6J0-opkAA%9FkNb)J{jJS^`Th3{8(fO68|6cL zj+BUv2n8zLvaiD(7(p>cnq%1$>Z)sP8tI1RRi_% zUBt-1Bh{w4B5Fv9wWl*(trU?a?Ja*o&mm^*_`yW;G|oS^#AW~9z(}fciLd?9yJ!0A zoQ(==D(u2hg+$js9@DPfLr9i!Pv|4v!^t@@>!WJ!#L2Y&h5M0pYkr3bGyjSs26o|U$ToXP0}gPDdo=(aeG4Z z6=y~fp=_$v-pM7H^=WT^Ifd$Hl>>NYTW4HL&Tw>Io?5u6CZVjy1BY<({=}{Y>jead z?hD^5KJ71X{N5Kh(BFn2<+pZjoN};(K3f=Usm!oio^>y{H?$8=A6l8nRrFnrxf4Ej+18p3|1=-IymBXW6z*6M1hC?bt39p+F5nfqpwy*VV);RNO&9kupX2u z1Yq-}zXZ0Q9NfG6W`XNQJ4t@D%37XsS;t0BE%brLB)cr%9)_h+OIj#e%h0+i+U^gs zNW*{`$DfeEf!-*ND!ad#vCl+*)u*=WMW7f)6`vXi)`K$dhl}O|6pvO@AsMOm_{|_( zDJWqAfHGCPULaJ`r`itLvNq%UyDmljiwQ4k6g2}2JpZz+v^>zq*KU0Ovbz(@=NN9H zs#_y85u^DAFYd?WgS{pJ%*Pp(oPyo_g7OkhFYD=h8JiGy=DJ4e;zySQ3v0sw{J2>p z7L^z)Zc?9%u5ioAhbY54Kdpyi4*Tr_7kW>0m&8=cCXzjDSBw=IP){0i{)8;J!uZ;X z@5UrE=RQAWFtv5<5SRMQ$X-I!08`VsjrB;iqwJ@2^t;-+Vyh}{CK-QF?8W)}OH4l~ z2Vp)^?kB($^9?$?0}BjXoaNNPa1M5Vu!(0ESRS=q)6c#4VnOAR;earC6}r|bk6BR< zTx+#w7+x&mKhO>A8Z$dTldY2w5--NtBY7?wFY6FGw;UH!X``p}K@N9%4<_{?ueHEG zxD%tyEY$s;0mSGyx5nC1st>3w+O^}#j0;zc)y_ve+sT&fU$Qz^c57LgDb%31ihSy& z6wHp=-#5B{8ZV?!dUP}5<9_{S=*zY}L;GH?&N3(E?;WwzZvyW16zU8t6M$#H+9)nK zL49G!=4MIL!WV&mCL5%m>#P|Wl|yhT-rm^w9}&hkwO%w`(VUYtcbu`Wt&e`N1jWG4 z(g-O;rzOu${Gf}_y`QdV6>A_JZ2AM3*~WZNh!V0YJfjq8e>w9C0=@t#NZ2ghs+Xq| zl~C@q^rdOOjmUaG!WKep9o_h$zrjdsEpu8orhz{{Y=d+4@FgGkJ9Zc4VuC%~)#iB& zS*yCoqfMMIy1BA$Sib|P(&M;3&)IX#6F!yBI~S$y_g zFt{M|>@X?VBT`!4o#WgauXLfZ`&myxJ}IPK4@&$veQ0k%Ykj~ODnm#^!J8tb zq}|YII{nGelJ~$IUnN|pBj#8PZpH%{!UPh6r#_>&!9QT?MbURF&JMk3snTfh=20T# zY-#CDGnYA%RdQ#Z{1SM7H`1)N>zpi66!5q1H6H|oq?DJnPE6KVM%#pi1)*p%j+Tu+ zOhi&rDt{_}s%!-NosZjeUbH?sy^$rSa6uNWHZh<>B*g=_2XI?gTtb!PeC}Bdj-idG zaj`&&ZFz0(q&^wYZ`Q7$h`QJbYYY-pIy@%1xZaQwj<78FmmI&Rr}OliF)eic)_@X_6^z!ZJ|)&z(Rl=Ubp2pf7u zqOIrK{cmP@J6PpucNr&g?sv>itaQ>c;6e$6mekvSGr=`R>Df6sCoAhXxl$eYd&*xn zWb0`2LNK`QD1`GP5{H%mwuJO6# zzb91I9s>ko>Tg?GV%$qkPs?GbjM#_!{|3q0#nz=C4SV4k) zS(kpNL%4~dq@WKbieI|LI2J8{lG%17h$+|T+v=bw#w2E5Ql^?Y`%OHjOI;ad?^7zWZug~MfUu~|6JH6X$B4~8g|Wz63z2td z>(}>Ux^DL=6@A>^i|IXMrJ_=?(RTgoK#Y&BtcP#C)z4rv2Wk}&A_FH=tv8HPG8P~@ z&BQpY+>YYakIS9m&6GR3V>(}DH&&vw*$ZYd`Heq{fIIIOV8J}cxMgHUOI|hq*3$WJ zueiXO#D0j)igxruU{%*)-#onEs z@2m!cbO3L?K)1nPGTR4wP}x&fs&7AXPYzBWLfNa(i+}gMH|1RgB>syL@#1C=?Kq^k zufGJ=N&{T4JrXE*&KfVq6h<$(a1Q?-K)o<~bLA9iFBOvAE-gI2>#E@5CTu}BzBV?K zp$v9keD?eUV#Q70mf{2z0D(@duaV5K#_}iuss`7vI z4nOlY4U2cEOpW7NSC7pbH*U=L9A9wK>-e-!GUhu&12uay-Si%eSLw9b=TUiMI#AW5 zD!@VQc*}2K*^&XypGhefg;p=(Y{m6HSQkz`e}+7$4$C9EJ0s+Z+CKK3mvp}-t~tj; z7=7-%nA4zsdQUuOD?k z{#xbS6-4JdS0IH9Na%@r{`X6FbmVS)p+#x>{?k|dUiHjEUU)Huk%;Qk9qX__sEH{H zU7(jJH!|v|q~R$rPbsPd1=YzLnfNz}#$8>-65-U8@~CADcX~eaAgyoljlQ#1uqm^q z4^A@H{+ilcm)RJJN%AXBtW4Gq#oNO;!?35$P}e0BNpn}3weVj8%@yPUQ-b(GQHii` z^xeC>goQ64=YB@MrT9VlNo9dyG@#yXY>$Ze>$MqM@N3-_a1i$%tc9|vFj?Y$oy zpuGomh+Lq9qhCCF-7a32HsC_ElzN(oub&uB7E16+LI1el=#SuxmJFd=LH|H7ENXbu zu-=V5&`9l8i~%*%abu|FZ!2-B?-8}jO{31$E5@q6Yn;r#CqTMBM<)t$R9U;Rmn5-z zs3N|6*G_+Bp18$%-axo_VT4^llk!NdwEvoz9LN1ne_eSgQlIE0I|em8H}>G_C$!CO zt0ytjLVro&Gpayf0uQW~*66mjPTtVb#`G6SpBFzm)&*M!wtYfSW1WmlpDvj6AJ%Z%F+O>_@J31^@h5PP{oNU-I1cPb-{#Ula*1K zI@7ogIxvAJG|?yhhf$ITdPdeIMjHDB4oMwvk)u`~)6o;+!^{;K*9j$)sc~qh>JNP1 zdymj{e=xCP>}rE?;>&0iZw=8S&G`PQ|GRceTOaCvTh*``SF2a;!3S}@as17V%Ekce zceR6T4u_DcYLA;0caQ4OF;A%rq{A-7l~RHYCMdZUFS82riA#U7Lg3Ye0741MU##16 z@{ zAG*CUM{ngoq)_g}4(*W&^bwxj>ak1@WKDpVy`U?4xoxHK!N}L`x zl;0Hl<4dnsPJn(;9umh4#)&$O+#BjJcWMon2C-wcbz~mm5Bgj=BZ>B;%iy98rNvg| z^OF8Yg|K7hOTAP?a?5xkcC?~BTT!ExXclm{PsB0hq0u}u74PAEXf}KkmZ_U6Kfuxa z(kFt_$UCw!6dw-d>~}rtLx8l>mE>E{nKUviTFh=UoW)Xe+hIMx3 zY&m#TN7w%E+b52$T2I8fgYckzg75D)m*n&Y2Q4xlg8u*8gmrGI^4Up8id(P)kcfjR z^9-l@q29km;(FkuRp@Bt?-w*o$*Fm0sk{D5#UwAycrP{HkumXi$&nSzcHVs8wM=2C>r^2ZfLIjh9*svRv{ z8F8DTJ1n)Z@B80-A5VV~Vq^mePnZ>7qOrJrQ^1H5=4z5V0{XJ!?->WcN8HlVP4gPe zLb2LW(f;1AS5Jz<(!NuzjdvH=p)k9Ps$v;rHc*;70DqTS$uEk zPO*uG`1xB3A68plk75cZy>zhae20A_tAY@g zUNldJ_l{G-62jxtn>AxjqFH&pOy*`NSGU8u?zOg1@xVgsNvoTo zF8yPl)vnh1DgFe*Cxax?B~*f;lv#TuJSH1tRjc=;Pi`eRQ)0l2tuS27qdL6S={PG?_-Awb$A?6Ril~!QOS_;LnPv2#x+GAyg%;*VZGfY z*`Cg=GOW1+Z$8lT5}PzFI9kcIP&7iYz0|T6swqker>&}!SwL{WURvl36VT0m#c9)J zK0|e!l3~jqLTDh9OCq*~a_FGlK%6WU4$|>vA2F(|BwxRti;G7i{+zN^C5Gt7rk4;u_FY3Edj@@qRXXa>%ICNPnW$s?RO<=t@2rEk*qhGH~w{f8Ko@QdGSi9A5D=9>3d5IFUEtC$mux>=Nv}$ z8zBmZ<*JGM70DgN&IVsSy=u!_usA@5mY`^J%k#aJubrL0@vWxjitmgXK!DCyS1hqq z(amm_nUte;e!_TI1#Wu!bUo~!lN;Gm>I~1j5<67T)r-8+DC$pX;W64Os)wx$wzG<7 z-F=Rq`&|}fCJH#d<_-$E+AyHi_d3PBm_1u=O|&Za5BFTot=l&!S;=}Va!+0PlsW^? z|1N4Yk!RlFCctaMGt~dYKlGh%7=#oK5*te*axqx0JQ%w~H%07`-xKOpiWtczwwd0Ifb^_RokJgPNaXQa`a~put#i>>W$t!K)W#&&}4s$;)G#d}XIM zJA&TM2^#w?H?zX(p{%5sa$Vvmv1v$mD_Y9aXZ&Ta1YUG?BDAUyqo5lX`WY@k4wfzxlR;u+AjILe~1UjqMeqjn=%l5?&3l=50xgKg!jxvr7N(^(%E-7IjDR2KyNZMc829l*Fjmz8! z7Tq|2atTewcES6h#DWs1*qnmLp*&e4cRbQb$(T-D^1O0k&LzW;L;`k34`ZR+-B#Uo{l`ypicB)BgbDM!t-UyIO^vm) zwew*1Ek${mDyI;5sE2j+GH7^Ut*U>2T4XV$rw1&Qlu{0fa_55iXL}?U_-$Z^xc@3< zSAeK8#$J+PC@r~<(EOsCssV%b>_~XfP*Ap5&ULyhrNVE}YMcm`)ug%I+Q#vgWcGaw-kx4mQf$RlbM7tedh64rKHg zEZM8>Jk3wbMAm|Mh(5zG4T8XcMw1O z$IIU(R5Vl^`(k+hEEtACyc22AE}d#c>5RWLto!h>vLdc9P+`A{8W9oRHe0q1McVAy z1$7w9okHFp=^g6s0^-9acs8p+Bq^!cr>R;g0Iz0T^g}D!{!0OeHbx4cGxEU0s8)lx zc1J>_Hn)|m>hQLqArf~=p^!j9Ic+TQmP3p9)5p%cw95WHnCTp`NWc#!8$Ks`B%~TG ztRMmxpJ|<5Jb15E$dz{}9S$gCCmfE~I%~dFWvsSPl0CiO`$ZHT`aTzam}u8Ln2p(9 zj754!b*ZUM66UR#W1^;$w?3Dm5q`~T<`qB*HK zQt4)P6dXhXmK61uG^3H9%_NWUFZ>6g9M&5OF72tFb8Gy#P@2QiZ$_w9#6Dlw;>=N^ zD(}pOQDcWoiLUmAI=EQWnMG&G)DqQ}(pBJFjXszhRfFNu`BT}NPinTY&tIVU@znjNde}fRYP#*RG-80f`a$e0n=l5*vNzKxZQ*eNB zj1bvu1=|nO0gFS}5C7bo^Npm7-+_GD9JU@rDJ>g+9oCms?Gky>1yK8klFLIdYJg8( zr9+GZ>m9ixJb&x%F-Q$5*eA&jAd|J|g^xRJ#`{VmQ3_@fh4&gxPU46WyzxVry39)? zXtD0yaFx(PhHNEiTY5b8Q(W8h=5FlDPK@~U55Kl($#ebe_?L0jb*Ye-7%N2i;hO+*8JrfW#tqx#i^#3+wqUmJhR_Y^6?Yyam3{*) zBKt{XdeVl?p%MU_11iWVw}-)>?}FNU~?eBjWTb82(=3_wZLvwx!zhzYe5nS}!w` z^>bgUl;}u~{kRU&Pi%I1seqlcBaM{`D&}3N+{epMbCI(BjmF&&1HZZ?e+&2d(3qe} zMVTA`ctV;(4b)6RW%4E<#s9e5*(oZ^OyN!cwJH-Buu@x56?u2Mz#)%Dq@zQG{Za&l ztmrl`Z(9_>!t!}PL!%YACtqWt`d^e3`) z{wp9y;;4D%d)!>}wDyN)^>r7As{citeBsn1wktuKcOW^X}biLyY($IEPC= zsh{O{4h#d0)BmtvcX6#neA>IKzkxmCgKZZq zz7%Dxa0Z=0{|Wn~5Ebt%Qd)@t0cifAg4MS0;X!L6f>Udjx!9|Y4!vQwF+ zs(#7J%$QBfQA0iK+j+lxn^y<8rL3qmV&r&maA_%I{>L-5=g3HAG7!ct+66O|ce);x zI~&A6n$}wCwZK-c#PLjn;Du6j?Xr`%6&zq(ApUNoNyux33NCIx3WMi6lK3`4N@>Zb72N`xiG1!`5yC#0%kmhYw z(3|V$E!Z2&m&iEOmPLudKD@kcpoci0p3YG?y=E<;??uhc5O?P{sSmk|#+rJN5nx<@tR!L` z>J5n(3CrayG}HOj#Ey$g^xU8ZUF6Wn5uF;Qhgr;~GfEHVUAJn^FDk_Dnz|@?P^0+c zJIN$9lyo3q4Ay)y-5Q828PNg@eq*fy0c!?*63Njr8p%Q~&H4Lm2xeM6!^FDA<}oS* z0^0Mrt|+h%M_#c@Wlj7vb)^N*o3c;SsqUQF9D%%OOaC$`Dgx4_+AV(tLJ!IEH2!kX za(cDJ;^?_k!Osd@+1Lk|&6u6Vj_MXyHQ}ZA^CN|yNa19%S!_j0^{|sxR9IWEg^8GK z)~XDY8bg}eDYUi!X3dF;)Rg_FM{x!Yhrvi<1ID=f^Zr1Z#8eEY?7A9@q6kR+YtsOGO(JN?+9rpY}H&p0|J3B8XU65N#=ysY=uITV8+YRKJ z<@ZO~MSx_l1QkNz(6aT8P*+Rtx3}9_+I<5WK1E>PiJbIUD#wT#;L~9Dj#;0Fv%K>247w8?-PAuqA{a!4cr-jk6 z+2;%--S5*_%r_IQTvdEf-w)7&7^}%pw2gkMut81zP_(k|Qk}1qz2yXgNs``ON{HoS zxx#k%=P$l|?MC0a?J(}`)qkMhoWIRZoDJNx+w0S1qzoiEz6cmT^(W|&ehbf=j@j=J z8=X>2k$2Lwk{UqJLYOOLu-svd`=Q?)sW{J*4&1%uA#t z3JU$a;PK~wNIU;u0+=kqL^9rGKfI##AZN#b#M{Lu)cAaPnlFN{$%_QBQknGF@6I~f zi%WyCItuFp8e&~{9-sl)4HfeJCkwx^-^v|~woGkaW9WW=y7p$Rb$YP)#Q1XcIei|? z9?3>?CKD#Hqn-v8gm?b)1~k8gjkP0RWfg25)w|fj;cN z-k#;`&alU4Z$B=~HXhDUL*P!X1814CRy6o^6>Rt2*+&eszWo~MUglD;jgof0RSBi?JfE;3jRjiuI-jhKUR?RLD3T>%i4o=uZ${fPg`$jnsXY>cEK^c7`&O7_M0%{RBi$nJNWfWHo)<>y$qYBX2SAet8hVm&RTFQuM$C1T%J63vQLe&|Ga2VAaoN@!06|F>nzXUZL zTfF()sM7HSG7#^bzNff!Vrqfm;GUg?)8v_}w&=99DwS&$!j2BBk3^sGZlsvpIOYJn zxlDv~)Wg=Od)xS(==l8zd-a`*?{pJx&YgZ=mAcqpxw>^&pC~~nYcFdpsoo(gWcYN+ zxI6d9ORpr84xnEM?&VIq4F5>lj@UIH#pzrH#M80*{ahwcLHZ-qdY!wA@+`mKvpf&- zD^{%oywhtTv2uhGqR)l527T3;nge+(6EJ5l#$F##CU{+iBDL1iTpm0rf!IW=bt#cT z$qp+zs6f+=oJ3s5CDdZVn~aOc=#oZN1ZnB1(zy7c-s(>6?t$*br@ptZ$WD_&5E$-C zs^xtwb}|L&wpMX<@vbokbWzE8BB!0HXWvpy_aD4v3&cuguF@^I=)%vtv zvqS=q*_mrqErSNzwll+!w^RO7`q=OI)9q{jSgmQ$Q7;JQ&Pz`h$IEbPJqKHiduAom zVvj#}s+ZMihhP4!tpMR}3uo0xgcVH7;p5}TCKjjVa~JF|m0Zbx|H|-)&1pXDEjfPm zfet$&{BdI1o512;Wf{BgKj8s6S_+b4hhkuF1YG)d;e~Dg@Bn*NrgGJv`_DgzIkg~d zhThDBj!ioO6VJ+pPFlZHT_>EZDj^=CrDEx>&Lq|JkDFZi;!BNRoyamjE2sdT{BAW9 zn|<4$;?HnPX_gf8BH!trI#_GxY4|E{FNSV4ppsM~Z2hW$ynH};@6cT{?ueJ>QP`q_ zm#IQA)ntI@;#7gG)+OdbY%*NwWb>ER5CiNq+=6OLgbbDE{&q#^MjP)$IrGIy^S83i zt7|RI3F5xpKd|JM$q%WHV7RZ)#@5QInlBBCmoT;_w}PCP7iD)(MX+n=7eYX6uvd$Y-ig!U-Z+~g`$$rK%}|*iz1~LyxY<{O{+q58 zuJE5|?W z>Jrx;hLq#I?69qeP9wm^Q{j5&B`fH~k!X9ZMQ5uUHx>nZBts?vsT;{CxMFIrkX@iP5Pt$-p^3_7AG?DiA6{(!+$7NzvdS8 z>#$IwCq)hRZ$|!6O~JVV7q%DPvnciQrOeCSUBBbbDgOLDLh8nqSgy-)b>zj1f_64> z4UJ6XZu4fr8pB!ZGNQ+Qp;xuETN(RcV}(ET`iwN>^htNo$E$3AUuI0}=r<@}$Rln) z*0?PIHvVS@p$hi?&c?5!T*W|JSEb|C6CCiS<=zVq!+>shV{>r7q0zF~iG-2P;B<37pp*PcM=bKkEp}BvJp!`x#o=ZUIPXMmN43UV2w34;UW{ z6gbu)hyqvg>ZCn=F@kbF=r}{CgsW@nd*9$SZG*FLh6x_i9M20stutN z?>Oi4V`Tfm= z8IuG<89_stf=?T~sichw8jU-WFd2l=T#WSPxGqfg?9{FrpKdML^vk+d;@{}%@m32SHmrzMT(9-Nz;U#I z^5*|Crq?lo7>FBvAH@yk1tf##S`5fCm7q;2ig%%luFEL>a`%p)Wq}*Tg=H4OULd&x zY4xv5ivRuFNaG@AtOD=73w7w+>-1!yIS-DXy|TuU;s}F7Rh`}jBs;AGT53=2 zPvyX>TvKPKRgtP3t{-MkjYAcZF?BZmC)LXq*Foh^Y;Ye@WNUhQF%H{CnbIxs{w2^Q z`L;0ub1h42{kMvrKa=$q_tgj5U9bR-akjfO1M8XEoxOI(;O57Mt0#*Pd(A1Z#1V)6 zOWh^ybXr37gAeO{#m3k0s)x$n;ek;b?TzYP3Bvnhk7N=Xn;vczB&Eht11F;uz^Qgw zb2b;&F(<9`oP8^aLg<58o-0O>>pe?S7+si5P6-~2&Q2-UTi-mVL)lDBDNpDu95r^? z(1o;T^RK6XyMGC&#LEJz%%PzJReDVaTFv6e_n`oTGGUq9r@}vZz6UQXZTic`>0mX^3^{y!{YyoY(^ujE|2~%ZJVj>Vfo7BlM9I7-?K+*o zn`$U|F>G+M1UJzZUlBF1wjL+SZ^AMAIw|e5<fTpa#;rNyM~ zg<@`~I=81UtX+Hluc9UeU6iwxv47&5KevGHLdBuiQcloeoAP1kV2}Cx;lde zUCB(CQg@G?OjT?7t}9V+TH%!!=1cdL-g8%CFK)4U1<+o&RG}`WW`)E0d?pUaD%s_gDt|O6Y>c0{WJVN!yJuVBZbPw z6woBI4jvIvjItUbErTo_)KoCCWe`O*j{i)mJvkh!1sxvWEnD`7AruqNnR%C-b6cHB z#yAojjquT2mkMdE-jSUb|LSCRAgIe@1{jnV8DlTw$3(65KNS}}KM{Wk(k>k<%3lvH zAN{cM^4`1Llu+z>-lO)(DdX@q%w#c#MjZc+SpTS%5Mv^KWu=)!VW!)4v?2rbpmQ9* z)(NKp7ravrAPo211sb>NUPs>~AVeCQ!^b~{VsO#+e2(#)ynL44lA5j1cB9&$-e0GjB5;C>2;8fCljo?cG_yP)7kc;B@(R*D=qk_mEq%zuU;4H8h!f7s@+lYPb{G`hEB}k28qzEozRs){AY@jvASYe zzyI<}k2vP?tkr? zG$*AZ0wSd%xlhB1BSkb(v{W=OJUMfp_j&&TKJf6sbKl?l_r0#F*DnlD`6KJ!RV;1n z9o(G+4!f0IjzbesA%zgAS<)pt8mr0T?{_nV>kw1R@HIb{wac1~mqFx%m&7oZ;!X5K z%So-j_yk(Yrbs_=jtX2U(%j(r(nDp!k3{lzst#xp}&p~8`@@8)c^Lo zXx(dz)ETc^5PbtR1Y4B}K=Pc6=As7OXa~B)jYBmmbfIjcxJ`G*8xdOoW!dnuc<5{% z!+NUhh^+2rU~pZ-#enaG@r}_n;FW;KrrBr{f2xX@g=t5%mQD7PMTIcH1< zMI-&oMXu*qhYJ@c=pM!C<&D8eu|PG4GK7D5IrI^k5i%9ZE>js)ct0RQ%E4}@vidEa zWLRzVgYIr;rjB?uI`y4(x-i#b8a$1;?32^`x7j`#vl8Sndts?mKxpDG4VKQ?>J8@6 zF~(%7PWsGY>&Q`SY%7OfH*mp3&N0tkL>SEB)=J%enC0`VendEex|L%s?^r3#VO*9{ zG|OBW4SL$PyvRjgTI_+-%=Nkd!p0GcZ9)h~`fS5ZdQXEaI>s57nNZ^nA2pGox2_xo zkb3@!(EYal%-aFS8to$TS^XMsm)|vXXPM`QO^|XJiWkQMvf)sIjVV*}@hQdB$O7l8 z09e5@!`>&o*S+*4OY}-x+4iZ7+{_JA^26){2FK6#jOc*HhCwF-T_CL!8d`Pugp zdOFl!Wlda@T2mbJm-U47IXIo*-t!+tAnk32+y#q+Kr<6!YHeI%YS+Hp9der-owUf+ zNz-<(+0L1Mb<(Bbsi~xNVfC*RTE-hRp-n)ZM|@`XXkZs0=F?zK^i>MHKRi_CNSdyi z;MJKtPM^)G#{ci+s-GF-h<42T13bN2skh_dMwO_=3P=cr|sCaCe zce?mCLrsR1h#Hz4b4akse{th8M6q0_C#sa5WIwym_o-8>INA}r7rIl|0-9~!tkw_A zRsMKNttnBV-xOi_SV6(tW(5 zn!bH5wyWrqjHkyKNyP0)Kz$1NjSG+2-J7$~5_T3_XiqM4Ulr_`_-ezK&7+R>FnDnFHVM(Wga+hD?ozPJ|xQn(veRNsQ+6@GT?2q9fs7!BEu z4k-9C`nKh$+3$Xw?fPdCPExUI#Hu^_Kmq*G3A(zF7Ur;vhLG~C8`}GRO$hY9PEZ=e z#z}ld(mBfmDPnaS)Ekk>r&b!~_~ASGpk>PGjA2Rzp68aBjE*|Jg33!f;p8~j^K|I_ zW#%q=>MEHHZH4EA&7X!}g`W|5NOeu*JSDN1CiV*vHTUm5FL7t}`w4F@nna}z4DG>r z12&+T?;2@xVHRxUTCBh3_QkkL*~ehzp;uW`6REgN0$N0_ z3N$w+LC!?pfK~JaBL{Zv;5>BD(qE3@(4XupW1nj7X@afNJnN6g??ask%g)ofocj%U(V0|eYbmJDhViWMeyC5 z%7_YEkMx@V$*#v-s$WaK3S;zX;h*BBM7&x(({!XA2hXijD;5?FTDdZjIQG&{;n&Rx|5o#Yd2R~fG{Zrm0U%Rn|feg=O z&le20T#^#kgCT59dAjP(0qzK3@7$t4Fw#;Iw7~n2;QFjM{i%jM_#Pw$QHHLaz)UB( z8%!~n+?{|I$yu=5IAenpOYg`OE1^(mLSeX!wfM(Y_Pf1#R#a+X^{(;$mx)x4Yf}~d z-#uOZd320Pc6D}^v3%Do4VOKXZXGovUg~^)xPGB=FR>(9fx1Piu$}(X<^6q#&gyyK zhd&-r%pDm$^{~-pq!>A+9=3bDSAEat&oU3!k2MYyOhO`v&u6;BLOV$F%V)o=9Ljiv zfY|t5Fo@i&P5u!+vHh~WFXd6e2jwtx1>9*>WCDC!I1( zS(Lhy>HttT-&vmKYhT+*?A}aJ7flx{ter5(NvLHLRD(|ikO$DD)ltd7KkSX>fJ_vwI05~J8+QXn}hE& zThx&W8I_AI!JqP7lC4NAzSW`k)w(%fD&^#tDB5p^4U-*GGT9$~Ml~i+wngoXU1I?; zc{i!+*XZquwu>&=8b2VV`$>OAgs&~=n#e8~(~p5)dloIdEX_ImQ?!l-XVGqV&Qk5N zi+x&P%bPOdXFT!Dl^-5PVBu*b%Gg4M-6fPdaI)FB z1xw&B?aKOrHSW&Pr5aH1neqhh+0)mu#@8qc;lqQW) z+|DKb)Ijl?tEj+2=S;g@Qz-lOwKwFqi)+p|E&7{Mg@Z4T;X<6V2D2)XOj5awtw^tq zrI>c~E8z|Yt&^=$qGO?z=&(E@R;KaR6~0t2G`4tBfKfm~CRv1j^Mlq|4n1@uqUDg5 z&soQ+#6|~#bmBPV3-{VRVwlq6;2WEdf%+4EEog-bc~fA(bkHg1$RTgPZf>x0{R6D` zhl_PKP4L`8DGO;?Q&iNH%3rDvj|MYVf1KDsoRbA@!wSIW=Z^$B5Sy4Gl^v$#9oUvgnKioxy_c<$@>0O)Z$gMm2HS@N`6G83z0Zg;{m|FrU}QTELX}C1zhYf%MMXD%tEvUUamv0i3;vA8 z6B#zx!RNfxn~nZX@r;0=TV^p)xV2!G-NWy3+?ZJ#UIElS&Ubnp*9 z+vy-rY9hCda&t(;>+h^mxgI)jVQh1TGkS`NY zzUpJt3+UCZhvKs-G{H>!-R_ld+0r$rixr^ffw^=cYgj^kEI8~&irCZn+ zn4G{Qi@4s?UsIBn^>ehE#Cywqf60Vk50r^m`nv#BF8A~IB_>x_ ziuh7==W^KIlv+q$y9`@DGxgy8nO?#jNYC5v?kHR<*TLrwnILz)L&Izq+fDEWLdLn< zH5#%s@3ep{a2G6l=Sn_MH4 zA|iZlt(@rA&*=035K-_M*GmH$%WWHk)Hi)SxJA)_0(Q}O7zV^jYYCvM8GQP75SR;->T9+5+6a$%3u3FkM||J&!)YWe+VEg&3bs$-E8QIdf5N&74z zNg`Rk@7I)CLaG0zSIw0y`o)G_bkz>OedOOyDMbSz9&HjH- z(QH=>K<1@dk|;fLG9lKb-pzU08!3qbC}E4k1eVLn&;EOvDNH2sVVu?jQCE{!sUwxEhj^#>d7SAqTOc}M%pB9N2G6N`m`(G~c=Ju78Y8sk)}6ee%-K(Z z+1XAv-al03J}5>|+0~nRZ?wB3wJAMyGO;oF>T)+p=8>*y$3I++`!fPFt&{w+*&O2xQCHBrue`))>8e zeHLyoLi+ySJ`wj*CNEF|vnp%)qKQLm98nh@inPt1$T*F&Bv-VDT#>yz0J2T6wXSwP8=j2q>rv`&3g$1O7l0eWY3qQYry1!ANP!-2C}K~D6Z8&TpE7Rts< zZx&TuP;_$ece)B*i&L)4|FV=Pbbb^{O;yw2jx4?FzC?%wLhOh1_V`euk3hrrA7xIy7Qa`TajaU z{;_|D+K>SoaYqaP+n4)UAru#ljQO#K9WW?saJ+E;5+trMBMxP1F|>e)XEvsFPByq z3dnSgh2HsFD;cJmn~}9&i6T`|Tm}HV#q7ds3yG{uUURMJ0p72E@deC~fdkLAJ+aXc zr}=n3ClGq{t>%r|J0Efp;^X!iT&?ASo)YtplNyCDd#^xLuRDqgHGZQu0c3#>bf^V& zL0#?pdjfbEQ{x;@<-jvDYKbP&JmW;e$cWtUBQ7bD87d!1nxQJ3Z`CMm+)g&a*&sKi zrZwo%#ax;Ai({99N8v^`r<@RAW~=SzIB1o)D*3}z)Z(c10!YT!vPL~j^9Dg&?>SRs zhxA|qRXG}!%AZ{;U75!j+iM)X)t@%R2zj@5l>(l^SCib5dqzTR^&-5PSMR*3m(M!3 zW~}pb3sVaKW7|BCJ}vf7k9y($#a=gg=p6Nbr%&ql?ekqf^x<;d-%M^uLpYhCA%e1X zzS7eczgBd?(9A}Nuf*3H$je+^qPIP!Zxk1Z5blK3+7-BdU;gn(Z**tOMsO;f0@#Zb z>8p0de=xlRj)=SmR^y6nZm?x_1u$!b__kalBc3f^o}r)pHFt4l8pc`3tZpTY_LMr{ zPtAFl7iQ)V%_*Y6k5HqQC!!st!4=yW>Hcp|qgaww&eBNtt zI`(KrtTN+LaiT6LfgrWKJeU;nt)BclfCIO_G5a-V#-O?W$Cn`s{TPjGX*^<(r24cI z`f#UPX%|#%Tc_3^{53wFO6ZQ9a*7mbK?ujI_FLo~L^z`|YH#HqD*Y?73BCUXI7AvZ z-U#q?<>kd7wn~0qnDUFqj@2(hohlfAq9&q#cwfA(9o%^Ap)?_6iel|G)1_X(3LCQj zQJR-H@@}2)C_^@gj#)SWF4<4#3P);epTgTi++lCWw0s&DO~6C599@Z&;~*NzU*mMw zi!QxVieRLx6zi#UT&8Zc$-V$CYk=5cyGP-wbY=o5S7Ery)%3t|AoFu6eY11~SeE9# z0K$n$>P7Jbif3j6S_!34YMTvX@{!6vjKL9&Nc5Ul;Rv zDDhs$xOIUG{^oG|p3iUswF*MQ<32c7QpdT(zVqOj{)hY&$Zr(%fdW{X`Gfj3c9>&aJbrJxrZ_y>=Eg9GfvP^zcf-} zo}q8ML&kC4Luw)7pZsLr{9BXlrE?-SqOOCA3!K0YJm;$eM-y7DBP ztDbI}3V~dFRb>Et)2%LxX(#EhSTmGoPn{R7T7X&-$!O6}f3cmM5#Iy+QM!h&jG$2$*|fItuuha`=nYUtH)cbQY%-&TqZUi`?auu62QSAQwQAA-{ zvEGd4@CItS{c?FC-@|V<8|ifli`F)+a8`@d$cUg|91Usbab5|tZ^a`5-#N@pQle<(0Dio-CxC9g0GxIn+nil! zP}7++9M+GJ7JSMOHvb+E#uUd62;4vNVYxvtQ}yXy!!~h9F_M7_ZR)g}@2{N7o+6sK zD(e6KG<7QCDM@9&M)AX)fNdcL7$kQu zp^V^MG9UDQ?i9d0y8aeb<<^6oEQxj1VLrB{afCzvZex-FlUrmJi#)q2(KZ_=t~XR} zV^)5?35uv&UF))t`1s-&SSb*guE|$ktxxW|CnGO|CL=Kommw9dhFe^y(^#z2!s`>> zwkkpcH2}*WvG(7-^X`gkM$fLeTzIFCQIk+Rxl5-@y^5H~+C-i0I@WLr zXmInC8a5W2p|?e6d}|6U4M_h9SHeNc!e_kNrLBgA6Yz|Mm>&h}7xx~u^M>|I+2Lk4 zSXxh(Ix ztKsax^2k0Z!$pj0(gok%xgX^L520NYr(}osfyg)(7w?GG@O-~%ee?ZnIcXRGa4wC? zg!d-1WBEehknq`uY3rN%4-_3A^No*0an=P>DcYLoh$J|8#C60(TgABXW#3o5aerrEaR;E&9TPabaZYcL%j2-O1eQx=r^EoM}R8s#Mq?p??RC?-x zbhAm{_hLlkNJ$RqhF(t+i6j#XTA&$ME6}1)5$twpZnp1w>+M+bMq5&L7QD6M!*@h$9?Xq6BGg9@EV$q^aA#Y49_!{-u z7hM_#Q!Gy#V7YX-lq{%h2C#~OV96ec1e-_CygUSJ@XYX`RX(23 z@N4-%+Q9T&P2br+%wPV#R8Srnv0!B6-4cUR_yQQRWAzct>m_$G{SDv%Tvg^%5lMgU z-~~isW!eNKP=+u}lPT-zWuCf}2dV!Ip*3wKa`50idpuRQXESnxE0gQh*jR&9ih7?E zsLEf@Ze}PEUY1q`^nU#vwa|pGYEa@lopW@liNIlV4uGV|xRfNMa2sO-^QUW)NC=Nl zqdku0t7CMC&=_5{T3|s4-I1|*qAW6l9uG=DQEc*i#Gt)D74AP(bFuJ9k$<(65{!HY zsM}n)g5(XuqpA8Y^v{?dJlt^WH91<;UG3^+Xg3la>EnZJa4F39mWYQ+EJ+XDpXDHe zdH-JlJLC29fyC~o-KlJ7&U~(%jAY_+I(i9GPcdraChTc5R%`TndI(JoTU9 zeOcw?Y>NDjM=-`q+qx!p{R59>%89|U(z9<32GD`<2tmSW+rjLnyO!RxKRyo5@@{RJ zv~=Ol`MA2Lpjp|y8*$Fj7mislhFih8V&jr{U#RdNbx5IdzON5wp?%i=uiaBx7$8D zix?Q!f^H11K2~4j9vzi;TWXb$pe`zPLf_)zDM6{Oez3(czFzWic0CQV*CLHPv7JhMgVVX$kv`AM-Xx*(`1ls>vL1|w& zs0^-Zs#Xv-IAEjT$NsNnfnaaK4NXO0ZurxhTC8pebde>k74%Do9!XazuwF_Dt08YW zLXJi0pP%2U>V6et{R z2uglv(H^|SHhj{JNiE=y7&XPG49?1@9lbC862iz}m30NBr~Umttj_OC^>}nL5$bvv zZkh4kQS5Rqt3kVbS?tdxf@iF@5+-x`$MRp=j-a+z%1yzpjk1*|tX*~A(qP<+fA*e_ zv=j)n2g{(+_;Siwgp3<600<}hKwcx-0y#|N8Ip!Zj8f!+JM0m-as}X4=myX-@|2E9 z*@?1PQh1*W4xc0!fro~C^7(UCSM3Q+bweXVZ2wxqQw8;_;woIhFdytYllFe3ejV^z zsZsnse7>{i>hN#WqqwYBp-2M+TQ)7{`(2q(DL1YH)=(;ukVr8-d;Ug8-mI*vW}Qs) zkHH2T|Lz2$Vo%EIN`|KPPJS`Kf3<_7<~h$yPBKp#)^uk}{20cb3Q0dD*{Zkscb{^K z)FJ2S+)3^g0(sAQUg9G%=UQ)x*&%r;H zV)FvvEXuR$Jns*v!0luU$eSMRW*h~ZZaRu#Za-T28}G3kX5ApJK&@BhEjlb=&Te6((RR@?%iPRiHX2QAq6gxui9A>mv-)0k?^0>RKeLkvl`-ci zYyf3=bFM18famM@s@&{}R}wV}PI*>NUK%uHk*&j(LR^vyLn=%1L_EB@&tMe<=Q9Y| zHsEUKUd`6K-?~-&6vb5RVY$S0MBvxe*rGYfQo9eJrNCequ>i$B>E|7Wk?WC(DYVDA zbII0(^8P!|P7GX6KMk>}7Pg|%TR}VTi;}#1V>5Lv^li_z&eY*AV^;WvdnqpT<;8d5 zj?bC(;H+9B`kv6jM=^(LopKVc-1nfXTg4(1QeKhR2nr`Dj})ESx!aN9ci8LjU;_P^ zWz>{I!vJElmNtaH4n4oqg0-TlT@D|x9zym9pxO@20F578D~P-*Z@fAqf((B=P+yYY zm2_ryqa6zBW_B69CZ}!!<>APc@1QNa0*YOy-5KTOxc@gPY^kMa=~Z_J>a( zW}DE?mn&L`izB0z`Q%OKlv zt$p{JbAl4Og1S?1w&v5^{@bifBD!QsthzN(BIn0usCLQW8I8{C)Ie9Q{&J-K&_eD? ztOh$f^;x^$hj{{u+^ON>*)u+_NUthioVgR1?E+RZxC@(-!!a)CL zypmj&)%sdR+~CV8Nd7_U-p@$(P`M9s%tgjjd|I&6?BHBD9?Gp;E9f0o>L+sio<=HK z6NjpVLXrcu8O|Tvj@L;3?B;7YOOl%H!;21#kb+k1u6^vQvGDl@ zDUEZb<|qckAN_sd%J5H~oxn$L;6zoWF2?L2_UTN0%M|fPip5$WTq&yZrq@W|7>XNUXDN$XcQSJgU*;SW6)6YB%1y-Yj3!1G^Oi zr2xa9;eR31_^oE9=uAeVsMvrsL z3)GTvi)x>5c*5>Fc9Djt>6rr72HzMnvx_YHWKh}r{@u<%Hkrn+$CBsgYz(JcsOYdX zJPnp>a44Ba8fM4aH~N+jR1JN~`L-I{tpCuaygYq7J*l`cPoK(nD^}8vTxxpQ`>sAyw{l6^<8D(t798vyLC94*8liEfq~fA* zBpGe&tBG*OhridG#au78RODg+xL?r@Nz)#R%ea@`8#e94>c#+ISO=D~zb~OL@jOwV zw5j`0zwP#`thm&WVL(K3O z>kq$a7fERE>5cQm}eOcZT@TIy9X8i`y(hJa)%TGR3FD0tV(QtaRS zchW(_GJ#Tyj3%g&_HqCZPk0*T^tm(4(_A*F(&=&P0dBx(U9k+t#5kGMdRwmlAEyTv z1FNB(%-yQ%g5rI+wxqviFq@)B!li^qV!ry^~dO|4A)kD z()#_?g~B!UXHT+Eyq)EJka(E`QCZxgU!+~_rW>{5+|Su`|6RB|V{(Jt{ck1>v?WE+RtzBaVbPo+ywS_ zx0L$GNA1wFT7TPV{Tae9Zl(G-zq!|aa7^+Ja&e;0s|@@PTe64yngh|Xr3VsvJk*vJ zb%q`s)uzTL5%Q5F8!8q*+lQ=Y$e z{VTf6fBPPbi~8t&Su_+ki1ars*gG4O?c|lwilh5S1~Fu%zFES`sa@2IV%KNy<-Ui! zC`@>0?guG<)_uSzC;RvR#U0cNS-Ttif(S^16ez=bEbF_GOL&E33q3ISvKH8#3)C{uIGP2u<(R%X22J^@I zsD;G~3y&Z$`yC+KZL;#LR2VpDja(h=v>NmFdU53mxIVW)a$1msb8X>>mo7-cq;ou} z36WzrqBH8zo)D88Xp#ZE@<-Rrt}>l#=1us_g+li{)yxbh27O?9CR*4{oh;8uL`|HZqmhUcotsQsO z`cg_MSpFG=k#25v_dssMG}b(tPfmLo>%4dU)oYWSk3Ts9iRhx$fiDBg zExmZ9hi4p|u3It#76%7ROe=8ttbvq+ud|(gi)8u(9Gg5{^jZ(?zEu=+`2T^Uhch{rE%uM8*AesfFX z{J8pa?ivx+7luz7SV6czd{gMs;yVh1i5c{Pu5KhXx^x8_97DPxM^VT6&OZ)V{;l?8 zV(7C6nQ75@n2s=G4m z{*S#&-H$FEmjaPw&CjRkjKKGpT{~Rs(|hv$aT<$%q18f~n%!u}$`>Q;<`eYuq-~NM zrVU#<@6gbgUz;&Ry;S84zjmr2qTdRz+#hjEAr zihDt6SukV#7;ZFYqy2VVDq8*W6WsZArLdKw)NX zXS(0?++l>FVmf`4#4M?(i@xG=IzPN}PMZ`!v8#d_F?Yb-zvlm5Io_^n=2}i=0yPX8 zEi`+X#&*9dnHsv+AKz1z(pf7N%urm2UHOhAX``IB?50<)xM>$ z211(?e(EyK&g^;hvZF>?4I-PB?ttL$B&rg8{@Z6HRa3}7EiSW?>tA@aqDQhm{S%vd zC1=-+{L-ut#cr}&O4cIcO%Me=##<1H7t$MSmxj>+G+M0aRTR!TwAkG9wOCfhT z3w#*he9G>d8XFGI`sVFu7?0c>i3c3Ut%I?m@?uklT$tDi}_xP^RkCIEpI z$zsbogLZYx?7J8Gj8w$c71qc@`bGR1Rz zaS&NP&vutS?RD^OX86hyg95e_lHKu;P2{JZss5es#rDD}?qYRrBg~<@$mPlNrQhOz z_>Jx1`J-Z(O;&X*z4u`3&XDpwk2_ZF<4xOi_NS2*SkmNBo!Z9z&WI1v-Z=Z65q#B1 zi&KAv@7D~UAdw!q=BmT+&@ksn`dW6MAQ684985xv6q4mh&g1SvSAP`svdsCcAs+Xb8uBu=sg4Y&7|@S(qk*b?T8nT6>k)>ZvLW7GSl2-@E)*H{f1ZNb$}aG| z;(9cGnI_R}Uq3QO+OO}*Mb%VP)cJQCYO5N1^|*czqqhJ_N))l5Voyk@n(5Av(uJ75 zUvC*tXYXK|9U3{=q-gEN~+u+c&;CTAUYOxwwqOV(i+#lARe#NKo!ro|W} z$AJ&kLXEvYS}l#i49=#gUy^JY8AKZH6;c!4TvalNeMvKfZPJLmshTYJvis9GKY|ch+;uz!@E;(q6@ux>WQGy6sbB6=Z~tGnT3&E zI1&0f;MRrbv|iWNI9JVUV2RgsHkKGQ?2s<;Gx+{8tpo zTK$FT!GbTL9`mv`WQxtp$N8C*%{GdK6ZxEe=KiYmZJ;7xK~g162RbXo)g2WPy^MuM zr+o1*bAN%Pr>;4npg)5lx^hfL==Turp)Z){|+|i5&5ADg2U^ki_z8Gz~ZhLGHO~%0IoGLpF=r&K3H5 z+hwI2{)2@hRR{-~OrMuHDcg!k-*@>vqPRDuqJQesi)r!>d3=yCC{lO@_Ge`t{Kus_ z|C=e@^?a{vPQi$Jot=U8lQ>G#ZS-R?5ol!#q^a(_oXMXGeiJ$CcopB8r|Oe8v<2Yb zmsR|0?k_bMY1uSU8IUg(SjZS>w<+5H0pg-Abe&(|dF9V9HES^fjK#~E*BKXbsPY4Q zYlCl!QIwK-rzKNY1lB$vjAq|TruZ9XjPbT@-JBm8do5kNo3bxxXweuDJZ$z7Srsj= z%FoVU>~XTbm%L}z89`bCp&{Pzp6>}WQj$*A&(3TBDhale$HKKgo_#TRAWMi#X1K6d z|D9hhD||7RRE#A#ECJgz5n7=5gA7i4^!_FB5I1bdfb+H_~pxY&};|8f>4}zUvdOZ`Sy7XX&FFd%hgZ?@ z!VxSYcU@SiRWHW@f1;`)^$vjg6DxhbI%L^WEmVob4uCHG`Y~Xq44p`ztAHX*Y6+XY z`LCZJ^faz%%L5l6BCba+l|3L0r!pDNnx>6bj=R=H8Rdu$oA_*?(qhL0{z|?^pI4Pf zmYvCoqAU)&_2KPjda}$FN#;>lD&bQCmbTg4qdBBbYiin#L@Dm28+jQ@w4bPx&jY9y z!H@u-84}dJ& z$a7xVipz*y%{Tnqoy`mnyAI&eya0=Kqc?AulfaxgX(^hh4E9KEvhIzJtEXP6T_x6s z&+9V`9?Y6wxgk(U-e8;;G8mQ+{BZO9R-ENO2@?@l+_TOh($D^sykviSa7`>2;?w#J zhNr5|@nW*2bzU_+G&bf&4xsVAV!k|M)Qh(#DiGxw#q8z5k>)4!tWG!(rKDUKyc;pF z+3bH(_MYpB$5DAdwwjWt7UU%V@&0D){D9sk-k|xx_nlo|qfyaOwN95cYI%k;r(cG> z&Vc?tf4Ocer&cIv?woV{P&>qzR{S#VQRfpLM`o2hdu7%TmFwk z{=oejE{c%pofG}->~GP^HH|~{_Ur+%z}4?NuijZ-TOGF5cnGO?ZPbiRv`+7Bc-*Yv zAnWu|!Qll(y9wB)oZOubY}YP`jMy9%0D71Wsm0OQ4 zA(-bNiDbn3lhOvW0?^n*)vJuNFnLgl*nH?Z9h{G6OZ=$5dOP1I?9m(6^j}WY=)i<9 zx2pg4Eo_~$2LDp``Q$e=dQ_3T8BbRLLc&n#U1&~M?AM*ImBTG!KPFK0%cd0#oIP-* zb3_KjhbIvp9!9<0m>D}t=udPJgA6>gxUX(zAL&XGT2Qtz?q_@4&_uYdV(;Ur`LY~V z9h#7ccG4X@(6drV%GDmC`E5l9Y8S8~wyN}F<}S2DN;A+Se=GhDOvt8cGz>ptF2^?Z zb#;^++Ps8h1aDzNm3N3UWqKM=mDyaRxN~ErLr1F#5o+Y?Mz)_ze4+YGbR{5$r>wMI zu<0L%p#k0rp@2B&(qk-I-~qnOkVWcVRb0R}#v8f0P)u&l`FQ;5eBOaVLhle9#<`rR z-NwA2SsRBHs{|3xv{!8S+05(m_0&m&JfUzDzfmwwT;9`oJO592kA<`qR{1EDJF|u1 zBjcKP8YbPd{(uPHK#*rtRKgQh4Y#eEOkQYsnEd6@5blw3E%=|`QhWU^M;9~~?4N`e z2e`l_n3{%_GT_q`KSf+_bM?v6$pmT&(2g<@x1?Ztoj z{c}bxh62L|xvu4NUNd>CMA)6^OP42Ub0d4YyoHLtrk0G9D3qR0c^m$z*9{|X27q1* z-9s98N;bnhM`6+Dvsb#_q(HNC$u`~)_VPA8RBkmdlYK8k|4X}|ia)3kb-kX#fE(Ry z(YGUfn%;wDGq2OCfSwz{Rr_ZMqN;yRbE&=ZjCo@F2z_aAV4MTDE3m)ERBkx9)*B6+ z(1kLhD6En+BUwwV|9#$n`+m+iQ#K}>atSkQ=)kY~ul4QPRm5(CuZpho6GRdR!4;N@ z;`8D5Kb_;j<78B|e^JAL4MY&+ry+SZr`x-R8Lk9(AByPlAtPPJn2(K}%wG@<7)I>h zsKw#N)t;h;o&>y-Dy&*7+|y!+HYe1n=1zs=$yH|?4>O(``q!#&K7~a(W=SJ7_6led zfu+9da+EVKWi&xdzxC1 z>{t}GrD#`EtyX4ra~)RoL8b~4Q2wOXX?$7RNV zFI04^d+Nqf{4-tX6+z1-dU4NRjjU05H?zU$DjJ>-|ABI-OAt&v>}+r;x)ON7}r4735||J_dfUKI?Nz~9xO9}!*DVoxm{eg z_xYcXTOy-XMu-*v?HebQWE+<$+?(cwwJWSvF*JCK4r_h|Mcd~{J>AkS3@PQ#{T(>? zO~(ZSj}-0Gm-Q3|mb2@&r9Si`5rVK-A1vQBdl?8tnuB)(osEU||U3brzvZan}z z<=Xnny04c?t#T%DgfBO2>8RxeN0l_M78t#|;>NoLum}tMj!mQjFNe3>)5us^w9nwSOzIiaS6i#rkH7?^42RkL**u&hi zTXkm);wcsK?>c%NUUDWUxjs*AKAp8*%#fzcM&@XQU$|!QT%AU=(=n6PrCXqewaTu~ zTKr+vc{P;CwAQ%ZVRY;c-hrzSj<%L+SNvKsOmo$F>UeMGn&}XA{35pr67*ylTTH2J z=@JyEA~uh!v^$LF3OOZDon*z=A_r+v!W%HXQ{FEME@*US($8iB-b?xELk`UG*HY37 zAsQRMQ8xoFc+u?}?`nHIt|nS<1=LZyYzGczU9@}`Q+()6vZ%N-IZ7xTfAe?mxYgPz zMyGY}WA7io7u_g+;xNM{2;gCLu+^5g{~ZcK)JXh-M+Q!ue8IS7gPog5aStJBd^U~} zJbyK4G)WZ~Kl@AHuXEU#+U}oZVVnKnQg!)~a3m#X;Lp#AW?)dqU=ZeFkN3rjI=0M> z2T~zi@m$MEm=#y+x?1R#ER(w-|ujd-oC{a0C_8 z947sMnVuqU>8BtdZA2Oyp7t^yE;$OJ2lZHv6KJ;6YrY!osq#4oWHtQhq5v@82(h!- zLo2NzLg&wYa=zT{w@$Pqx4jBq^n5Dw(rX4LN}lFAZ6~68-||1TzVH%y z5~MM(dH7bbon#3(s<;l&W%uUUPpaCSQO4+^*hpemN!p2ot{j4t$UaUzZJf|kcrxfC zvzjAZWNV;*95%Ti6D8;(Or#3IiO4}LKi}!h{bMqSZjLRrt;s%L75n07*wCWFaO-$t zV9qulJ1ocW7dOqge5IKzm<^8NLT4lBCB=0g+ zhV25J!T5I7@M_#J%rrnL9zmZK+6rXbwcd zam$G_2bv;va|VVtXF2=v`wxJ};hgh%zhAHC6Jxb}(%Xzrch?X=;w-;8FM5z9Yl@M7 z+*1oU=pxW9U&io3;e$eQg!$0KA5TNzE|F&_#HT~42}waY6v)TIA5-WW!l9Dm%E#V! z3a%mk;s*d@w->4%JsnK@0bUu9pQLgqli42@>sVasQTms;@{(3;tlTDNC$iwcWyjcf zOQ#ZB*sWt_k1L4S&nZ8wpi%+G-k~WM)s`~N%AZ_{R*u1X(BZ<ls4gi0?N$duNx_}I_Zqca%Q{fUN#?!h_5=<-#ipUg( zPWqsyG#5VS=wptt>q~NG&(i`%3+xH8r!-Rt{e4=NL-QHnW)jRheGHM!8{Z_6z@?R? z2$db3JROXk9a}v(bi5{r9j~f`w>~F3-m9XBt`U5iqk(ui^M$8uumO8hs(HD# zy3KFhSv*&wYhJII7NquU1Cta4_B0kA+4l_bLP2w@>L=JfZ9wPymTSp{`*)W}umH(r zWW1EBHvhqXt)wy|Poc!&X9c}?JXlJ*(2hnTYGxNUZe`pfjJ2dL5$A@_Di3{`_&rhIbS?QOrd&l9tC`tAwYw?O2IE~I* zGI<+W_NVg<&U%_@(hm%89g1(4d4;bOK5AVzDmLg^%iqSNf%_TjyX)ive5eW5uwr)L z`jTfS8-3ZqWM((#hYNW8*AcgKum!y4U|p%SCB$$-o#xVBf>5l*yL_(&CUgSjFtXUF z1#?&+9lmT>-G8y5y0ZCZ2BdVNmBtHShOF;S99TCQNjhy@K)d}`UM`$w+BGwi zbZue1RKwVq#z8!ZJpgD z^*O*r%fu$*UJi#?nmm}9j8!PZ5B>I+g#4QsR&WQqqVlk_%odVfhW-{5P?g)XuG++S z%0^T(6O%oEHs1#yAKdIP)cA~OVgx$`poluzs+y7}D6ceFyGA;f&RrxHv%Jsfr}si4 zZ+1D?yV1rF_8u@|SzXe@GxzY3C}q*=fY#uafJ9ZcyL8jNFYWPvmbyEQrKOEpMzH%J&l)yyxxcCn z%Nq-TYYm8Hc?<%@9bBj_0f&a+u>jlbGa^1)>MHR$W_zuXrG}Ha-vTxIxoi~ z_vY1@De%bG7Ip8lD7=E%E%SFEbp|!UF19H9`Ydar7Eb;I@>}OSey8kgA8mb*q9A9K z|MC`UGiT-^o{21w?vJfZUuX{jq7t9Q>?Kh(YiG0lw|q4NHD#*Oa!v-(qxfrrqqWW+ zmVam&9~Z~>(!}8n+vHjqE@@(^&BbIax&R%E>VG%fRlG2il`!;pvy44oQ9VTkbzEt$ zDv^;J%07Ql5;bo>S+-v7eD_m{CO=@f4Nd*nT*`r_sjAaPT78ZM zEP=)cFB;wl`ux+>;s;YORTpY8nvl)vSL$nZ01Lk<(6L*s zz6F$XWtPg!&dWqiUzCv6Kd`8j{;ckOQLWb>=_IURH;vv+^|pU3fdbE_t~wXOAW_nR z1LCj@D>~wl^VNz=`$CP94W!oJ#PF<6F(%dmbt@sOQ0NfK$1iS{eg-U7+uyqf{C8xL zusV5GDq8_J(^&OkZQ%k-6}|nH)yUF6R7HOXsQNFCOjV8$5<~2S8|Mp7EaKj@VXQcYal^r_HO51R`?rRn9!g5k#(k-^T#}MAS$B>e8xc) z7wIJwXjd}>SI0hJ!~SU&r@M@`nUws|C;1*Jr%S_(gZ$kh<=f0<>jZVfpL z6f?tk&pUbUfbmDhpBdJpl~nFxwNDW?We%};Rg<{eSYcj?&Qcha0a@{$zp&M_>Hx8% z@Rcjzm$|c_bQ}}jA*P|P+6U}hdSc5V@>%@V^+_qOUq>A2vWYWS#GjyZ%TrB^XM!)v zJ|xsxW#-LAJ_p+PrYdMx8kJi*l*1Ald$cdTBD*B2T8qpQxVw!<#jQ!fuvy!Plf@(x zwJ#g;bC>`9`xSgM4Ln1s9By+YNsX4?vOZ> zk^i3;!BzQvvOo!~lwh2E?k&8ZVMbrA2VPsx-E#?Q|3}1hIff@$F4H8e=nH@4izj9cDS~ed-xn`^eW*=p-&sFO|NhD-b5V)#WTmWaU3X<=Sqz4v3r*IC#nCY^lrnfRoQdy9~k;qI8MF@w^3Ua36n2rBP&GlXmZoT-q62g6N>v3;4Vmu&qV09Uh$7E*N@4k zoyM2!HJtq3FkrQm=sdyEu*-kVhugeRJgM>b^glC0F}whuz>ilJ^nr6eLh@=xi|Ftj zd}Eow_%qSAunc48JA?l$;pv6$x$*CkjGn5$h!vX?naD_JiG!0yY>z^sL9%dx^^8%( zGW+6v^Do9p6;*?zR~65b&XZ`d!CcbP6XAv_C8j?lQ5;(^{{rV9Bc2`76sS-Te_XHg z$#bPV%2E>?Z;?CW3WYJt?!d*HC9-CInURt67U?#g*hftK)Qr=o`H12odSrN6wcyJqVp`t z*eC?M3`E~|;woPZs3*Cv__w=IlPL4~Z|AxP_etk&lVCI3Tbm}g^$hdHZxH+e}SR24AO=s30M4_it~Sc+81 zRYiO^H*Wg`AA3ics5a2reEM2cl@s@Mitn0^LBG$(TvS*OiO2dw?vc>{uXrMA=RYYyTOM~aVA{O8) z)Bm53<$Liu$8*^l1{%KLyrH8!OHtazNId07AR`WDQH&eW{WkLA#?)`9>?=n2Pj)j? z-KxPg&G8%6!)^(0+RdhoJR#jB7i{EV*;tFNZFY=cBD^eE(`X`LLwp|9o*TwGLy}Xu zxyoM23uKqt!wkasjwbSl9r*4>Vk?Ik8{P}24lKdq-u zJ6g(dz22v+Im|@H$eywR>Ecge%=OP)ig2pWP-kxP<8Uhe4kInj3Dw<)em3OXHun-K zRS2?pG~^y@E@4?Z0_4Z!&;=&o`->ySks2c`QVo(&19kXku+I8#nC3*HhyT>;;;DwA zsakTy`4?CdWp$O4>Zw08)#2gmxIm4^mkdym`_-V4dtnxP2YMd-F@z1in0=`T`(U4v zcXHQ`eN~mtrL##pkvdHQ;rz;_KZ{*I%x- z#AM8$rz4pF2jxNY;;hQA>Z-KEMh$DRd{n9GBXsc#t=yBsiwC*tE@1cH zg0xU&d4>PGTkh>d<@CPUi{KiQ4CJ0X54(BUk-ka{vS@b~AX2`SA@22kC=!<&&v7Oh zH|-^}E5PBn*@REeye`So@H?=X4tSAyMZUjz_J$%+tfcTl{NxIyjbI{ODb& zT1Vbu^H|Ny5K7jPB}Kf?GpQ<6&bk&(P|0hm`79L`p77buvs%O5l_ZY$f6MMzlzbKI zt9kk<4r6L-_tLR1QOyrjyL2_U{=*nPozg(!)j22NQu`1#!(@7_1c7xYr{+Xybx}Nq zE*O2)w75*l5e1TDH{R-vIs*r;Nf^Nt{$Ay{(VUCflIJqrLceei41-Tv>*)7K_TZ?Z^+quQT$t8wf*kkkI3&m4k78i3*dWI0c*D6cDTuLt8WS z5hg-x)_xsnCbwByMcnuT_i0l65f}hcl=8$x|jGD@kbxqHFfSMO)1uhiP* z>Jf}?%7be2@^yUD->LlM+_o_K7YEUujTS1+Pg!0RC!=VsaPAH1{1U)*#W4Cj>Fe>E zR>?0jo=K0p-53+ImHLK5klSYD84n9hj&}oi<7b6<Dm(_`~J3L2G|-vZktoOfy3XJvpr*EOnryEx!H zUe3ng?Es=;Xqa5~oO7U$!hMTtcp+C23ZCvM39{|Pi zjU)#&ws`tn$_1~dUDy(B)CR1}{T-a8Ei~$RTi5^jWiYq2{3e;oXHZ>Mi8Cl?i zP5FC{9PIqnul`ZZTta-{>x$}-8K!sy1EE$2*p0V)rp-T!nfSO`8})QOS6yzxp+@jO z%&qchRTt|}_EPIgf#gUUp^R+RYb8Z&YBxmgMlBI@sj;EQJ5R}}S;L=={ZOCzx@Cr8Z-G5HM-7b6+qH!$iyJKJo8 zu(C3-2Mlzlxaz}g8l+R~Q;%Jfz?kyJlm2OkYk3{IsJ{6xUy;f{KwB zJ|jWb4#=OQh{l0;TZGUnagQZP`j$)xi7FzKUzYBWdNyw*E}^lV%quAHwy<2gBoA(D zqzHtrQ2TOi@4R58$G6+e5&Y5ei2?l5n5WmGIEuF;Hrx2w3PGmdlDl=|g%-@3+xXvs zvySVpBNi>8?cxcXs`*^XdU{2MHnW%ZI|wJj{m^4f(0lJh-c3)j-f zWKa@STkPe${;4!wK(>Em)5~8+%tkIfe~9j~6TFLd5sWo<4J&&~sX`}MFkybF$fgg8 z^)YWbCRh7NiUCtdOoYjp=GRwKovDrF1Lud%2HB>-!MYu&0{=&JQmvFmN~XiwMeUx> zas#`1)hL4jo1`MiJLuCFn$7Y~E+>m^ZxgTl;P0127%Qe_40 zQQvG99J&}w)poX)6?j=Y=Il3s8!1ScsBCCJ$x(ICpV|}UxIix1o}@}_2!EjLX=5EN zbDg*@;*oQSxTJolyQ9BL)t3 zBRl5@8iX6fi*mpXh1nLfA(3w;Tm;T9qKr3--a6{EqM~@V8y3o>M{}xp7K;WINPEQ$ z+b^qPC6j=wiM^1iO&&Fgy1w7NA?2EzLGdfE^MFzJB51n1skOXF8U0hIlnv7W&cfF` zn?efMRb&wO;I6@ZntT+s?`!qNS&c~6@so2|cA-XDc%Cz{d9^9EBHlZnihe`LKA*qO zvH55_@6PAN1!Pv2?6ejaYbn3%75nTwH85bSt!rqV4%>jN+NTbRTjLclyup`#LQP-}CkzTz)tE&?nJ zz2kKcu0l;oG3b_rHwD%?5}et#nf5^C9YxDWpUyN>775v&)SQEM?md?Fo~jYQ1A1JW zn4+^x(l7udotg;{u55fc*J{pPb1-AfOxAJdXbd4$|M0Wfx)#BUOoY|q$&CKO16za9 zVZ^h6>%J6I2c4!iMqFWe01DrOHTzfeoxIPrm+*KPU1o7g(R%1svk+m`-IuPI#|6X? z$n6}7VBfr37WkoXI{14a@w#NYQ^q?%K)b5zY|Aj(z0Gu+t){hes?)Ut7dfBT{YJmS z8R^VG?g#Za@^+G}r(FjnLrZc|!@~14#!)biep5(QoQgDgHTV7gKiMGtR~L?%WoA-> zqipR5TiPgJpC$|#{441%iw!h)>G~VR78K-US^X7N!Es;)3z3cnTN??xj9qjo>QE-n)##JvmAjZ# zCC-7+?_2{^xW_o|2{mPafq&C42Z&_2O*BZ;U1cII6i^r_x!YzaB4eXWT`h;kWg=iQ4q|L~r zxxd43-E!6=*Efur4tt_KNzX6)Mo~V>>%Y>wr`%!G_=plEdtgVFb+>{=Wwg7;>P+$G z7F2otCm_#1h$SBk?u{aiz}#6A3+<9&`h=dWsx}S=P7mW$e4IA8s{iPbj8eU4S=h}z zp(H&Cmb+^`p10_!Uk?02ITk>;9k?jGS#x#6w{$iu-ge$v8dh_fv*_kb#K+_JYKj20 z>yh8TblLim54$sI}}2c@>$>F&!q8Ey?79lNE9 z<$>>4ljh6oo~$=0Lo$(wG&jbkL3?;*UQ0-Ys;7YL%z+ROn z49^0A6EU=0PF0&rBw@hsbUP`dV*XXjg`3BZ#)0{_A`2ciZ^)&Jn&ob1afQ7VyC83u z%1nID*h1$$9rOTHgmeoRsjaJMeYXk66wanmK6gptb;_!$aQ3)gi~YsF1IIn!nP;-! z7Kz&DaUa&FeR(9b_2_*td8z-E+C%#h7%3;`h3%?LQ{)oJVhV=w0TR|i8TGWp-6KGk z0KKuVj+nxjE{l2$pSdked=^3OXAE_TIP@jNq;2!sz^~B4^@3_PCeNx!eDP63xelm2m^(bV+$u!`&5Kaw_W$HeGLu$x~*Sw1Q*y zw~$el*6M6{E$4NMR(9I*z9{--g%XUsgJ4!LKpTxz(6 zB_6fz60#Rb$G>~%-p=P+!>G|o0?jw}a=*E&(J?5P3dMrZVkz0*|JTO~Mw7Ax&oEf$ zlg6>3MTndF&=0>4rdFn>UI&oBe)wHh%sCa6>P#DbMH)=cKRmtI@|64r69BW{2n1sj zT7B=icoXPh_n1f_+EGZrsc^F^G_{5QE!+xP>iGK0*ZK%f*<}8_a)mQa@Ues{J6N(g z6)JvePf4iYxklx3NT?E~kwv&-Rai#zhnBmJ=c|b_m7bLz%pfccCYt54Gl0h`97qtW zuC!}}|F`VcC*z}N-}rJd=$9q1R!btA0w{?1*;3rDRQ^P<3YzwMlHAq{A=OTdhnw03 z&Vjx!b^D&zx)4>~9i6CXTjoGTkg@q_yon@`wRQ%5Rb~ereSURHCac32PBbBWS=jHz z-$pKh%w%@lT&^aXGVM(|)Q0n743ZVRQx~s8{C&9`2+jp5R$Qu zE2Tkk>C*j3ZD3qVq=+83!gd(;({#fCtP#G$LaM5pbRbN3)Woa+M&MmLvHcS7^Wp~K#S8BhUVMC&BOE=J+@JJb^tk9N zQLvE{xjR-e>^9sqOLMM%jaTm930;#t6QZ3{gtNk|88^9q7RJ3?_i;pDlZFwTanTpo zpwMSwOP$>!k|~gN)xfe)I;R#AYv(RwN3)iB@wRfLAN|g#4)FOsDP+s9CL{nd|L?+g z*Wc1f%zkEL!;tRtGvbYwM5(4}GF4b-4#g(FpE|DVIa9Z4LBerYC%-bTNc*l2v8kWr z1@v)Jrbb=t#!<`v@xjiUlP3VhT~RUrZ7sG5Q!{KE4h^Anjd7i!sN5Hb+jR1 ztpW9>%HSEHuNwWJLhOxT++2b;?l|BdpDiBiTCl#~Hzt}ZgNtO1CDIg@kuejq6m3j` z&7Axu#{GEjz$Vq+paT&?Q>+V>B%DMHY?IZYfQYj08)M~~(wgPkwBZhoz zyw?6&62Th%8LUxRq;5^Cb%uv;SuOZH=q`MD%7r>*LFCeRBF`TSh1q4xFS7$vN>7TC zN{!*d;d@eDdndok#D9^9C3yr*WdC~aj86`C$$_e7O=|w z;h(&_O0M*UYFD8iZahkZ^gK4Mp9r3E-CD*2!s~Y+qz)D5?gfrTe+PqMSE55VAX%Hp z0PiaLS&z`8L&{2GP0VdZM!#Kn==*R6a*Gx)7K=0Ox#St7jP~q$*_h^z^#Dy>To|s< zXqToF&TIZ9O`WUrdB4;f($cbyk9OXPkU8J)Xc?fQPWj8ldSYjaUhf9@xNPQ)zk;TB zUl`{J&8uV3WwY2k;nPfg0`-(*-_OZ<4Ov-_dzPYuo*R&ND7uaLy^+9WZm$qaY!jMS zc*vY{a2@NU2X8apz}`I8Jq#Y;jcx9ix~_1Mg|#@7h#UF;|s=LToMNfEub&uqCy`u$#K1zrYx-$S8}Xjl0)~Mf5x?ht!!yHR#%I;Y{_H-lBiz zE^i#i^Cy>0@5Ti#4*P~@d6bDMjT{ST3gL1W=gD>*(7620+#%y~((zy`YVaFJK5mK> z5-D!hkW|A-f^Z1D;R0iM4VOW{rE>;n!cwCidy=o%OuE1fcVbzQg>BW#d@yiVHexFvEV0`km~&KWT^78gY{COAWE$IDmgR@}+H6M~X)d z@-BVx0gI*8{g%^O8ypX_$7eI4yR|S==x2p69HCUB)+!z65;9Afi>Hv|FYc$J!ZlnT zW1++)7x(*<&iTo{%d$oqb4x9InGOZJn$`y;-)>~%qv1{bDJFc` zUX%oTdF{DlhRAWor19qvhR;j#;l1907e6fQE2mr>-7aDaYyQZYmlF1p)60oP_U|~a zrDzO$$6TXJu{04t$G&gdZC6hDz-=+{94`JXC#(c*kK#leA-A%&T_vo5Xzq* zJ+F&7yeHVi5+MG%3R)I*MTlr7xbVs!A>NPW*R4v&g;TqIjN?XzNzL@OI+^3(9E9&H zPMA3g0`@XPCL>TY)^?%dN(H;bXTcvETcpj--&`y(!#hH114c1?|7P@#LG^8OdT&%l zrWK_a{n4SiuGE-$A)|-|Fx`EAQqiJ|%H{_8*SlRzF#$dOV2$YrrQaTl#Q#M;6?kQa zgXdCx$;a}|36JmDHi7~Z(+UUeO60wcVxbiZr&XV}oOnHEldC7QR5y7{!ADb@ZU>dU zFpH$N3!tC{v7Qqi%U}Kj2Qt<@IMm~>7sz+Ie|v)ngzaVFEw*&CzkiV(BY8#Eac0_7 z!LNMrU3FA%*+06P;@itsM;*_4tzpoGCCDq-0O9@Sb*C_8+%n1Opo6PWvk!lN`5RC7 zBWwFdqO;O^O>b|p`KdgZ(GZe42^M@97xewm&pRztayv-qnzG3@ERQ3aZO9>r+ zO`bk0k;J{9DNY>@5i}dgN%YM~(u9F!;lL&O19Gj}H?VRqLAHa%23J@^${z>=o zied($m^SiW$+zr^&M7LZsYtCPseCDHam-TLTr9Y%2+x?o*{aKj13fQMFIB=UQrJw( zCffoy%M@uarG_CsbXG;~s7*QsWeW&Z+z*|UlH_w@)tAciyW7khw1489 z1@09D(gev^1Gb(lmUZ1A(@W?6>g-h1{Cs(AR*4Gk`#!W}EO*QHf3~{&pW#%}fx|jv zN=8K^aKJiz#yJ-tjpW_4rV%#BW#L>`vw;4Lb9wL z1MY|D9+p^>VLOhc4>fFIgMPF_De@l(agd_47{k!@ZNg7k5mWn z`LVz|lK#QdGqjlvU4nT2UnrF&OBlBkJ?W0_xEm6`c<`TJQsh#W!ol4=N zLVd7%=c)H&S#C6zhOmWNl%|)JEMmH<@@y-Es?@3+YOPWmKHoO7K2PL1qOcNVXP`#r5a=VI3*M9}tGN?xrrfRHp03 z-8uBT40nD|Z4{vtx>>oo;$BbdToyqsf{mVX3uj14^P7j5hw!PqwSo2-qUn`TUQ62r>3$ymn1A;c61U z#ZFs=^{VFuA!T<^_;lTR?%^TfTyJD1ES9Kinm z?(Ak^4Lhj>9a+c))AIrVck56uJF%Jw@;QFifmnZot_AOtaDSres#vIxgQRyOdZBAW zs2 z9kKs68Ya2ADYWDA_W{dB|K%6>rs>}~UT(Tt!S#Uu$D1-$+gu3?Ni74x7th?k^w?@@ zhMCwq;7S|8H+hfWLi7%zy1I9y8C#c_fi2Dqx#DF-GMTEIqZ97)+$>R?9%;vkPMir4 zT5l`tdmGvpemPj~qqj#o1XD{d^*}^eE>ENd~403lY>$7)G@27WyQ68xBIhoDg~W&C%tt z70)1&fgtMRV1N+6dN0$bRzI#HgacH&TH#C$_{?U^wFAG58beckQRc$>F~xR-FhO(> z1Nm6uGi=KhpSZnQH4#|QH(Qq%&KF?+DtaTI-F^S#W4lGdI+9q-3+tOpyUs3&`1w0y zfL*n`ZkVW6$+|(w3$Wux4DPm2cz19*>zF|pg?3>@W_5HopWv0}z~+ihsLr(I{yOr; z^&_YUATRo+%Z6FX-;q*JeoYFHOS1Oe)IA zMnv3*v>Gxn0l2VV3zKtaFLHMCngdJh8>!0&Zg4e+=h6=Om04ZiQuTDl?G1{R4cXhN6f*-bnc2ukyxbg?A$6(gU9p0Nj0I z3xe-l5;0N(TeGzzr|p|!6~Ts1qnQF*4Zbj%EQBqY25SSqt%Zwr^OIKsa5cl|ozmPX zonGK%j5^2rXFSc{7N&UgEXJW?p4xZfyS5ny?hO1b`_e2{zErrd_OW`~-J_zKTcOLD zXM*{c(^DVa%j@il78BNDC^}I$JdKZfz21uA@%%w3^Y>>*rC0fTjRjRNs{Zylxo~1) zvR62u`+CWz`^B4aE1IWmz zg8dW0tb+b3(|nN7Lhx+juR4P$`{iy!-MSfEM2m(`y6u2XK?mO8Zqur>apwF@MfJ?v znO=6=G;8iww4bz&R%}SbdW(gJEx;TitXFieefY4Rz~ubIyfJJhg~q=}v)r zxD?(fADHreTK_02&0np$of`NX_QLIsJzYBYfRR9SMm++n>^G~raXEQ(^@~z0@g_X! zc90*0yla=ChqC$Gs6|SnP)|>uvlHQ~*w-d{_Wo`fx7z>b9Uh$Fe01lzJW?41c;S9= z929WRsMG{lQ;-GaGRG^-`|wVq8u?m{aE=oP{8Dj*_2>Tg>Ne4jU;A!mtpVno|F}2L zq&vO(+_?s=dr?BGhaVVPt9o3&^kqVY+;oc>aTNWB6L+G1&B&p*<&ZTM#s8RdXnL-))1_eb zdQ-R6WFpb9#87{?1y$FKQpz}qgllB+58$g)X4x7^nRD}pVwZQ4(C_iIYh;K!RU$_h zDp07~iL#+w2Nj5y{O0f}nrexRu(u|JT00btm)aFOsAQJRSW^>9Blpz3h8Dk@kKq^P z6?@2^{~vbhyQ4jTP-ijqM=feY`(Y$^usslusm$&IUv4J!0O4xoH*gtW>R*OPvZ1pH|)?^|+qQPVu!r(iFdoShF)9i;-Xu1jm9NuI^aaLl4dr$&v?6DaH zV6rF9gjX2udO+MIBmS4z=q=YcWm~9;@%P9zvs|yQ(BL|rh7&1G9nAYRG+jP#d03TK z{LUVGD@$3HCq)pa!pIx7JdK7GetFTCAglKdeO_winY3~K9I2MzoN^v>=MS|%ocg%_ zfrQdX}u;pLbPWhG<#=2u|QbO@|gHD_NylWCTQ12@uCyvpjQ9xEU7wHL5Zd36x}*;JcnRoajdt9$Yo0ncMim-pmae)>16A*18gBRYUxO5dpFBZ-40l#}voQ>p z>^S4;+5Z@WOo)+y3X?%FWux4k@mA3Z51^Zf8AXhW^POzm&`fV>4FhDVScs#0L>!udJhH;kttK@$VuYCFb+;PFBR+;NeC3Yvyd|O;G zL?=bBuMEo;b_-q05Ai5V-5^HSvXKlbW`6w~cYuz<#BP$DsJjd$^-7I!)%RX^6bS0I*@Segd78o{g0x znZJxK58gg7G-@9%SR{ev{xAadm0g+A6L)bKx3@hnjhyP5$Gw-oXwAv~Em2sYmB#Tq zOh6mIANh5pptC!dQ53@o&CU{l%Rt*C=N4ET1+70)lzr|@oznkq5mZQ;%mGdXhoA^G z8eae8f;;bWdD0;r0z9qfJGOoJUq~LYR?2>A9ZCA?8T3+L;Ba_!S~{meN_ac?kKg#c zY#OGZr>Ad?IF|EPJto@UV93`ojxTY6}(&|AWDjkTt z+7V}fv<>{5jAyzlPlSAXq^AJS~y2A#5kmoU=W6Nirt=Nr@LwT92?-oOsIhs?1R0 zRv@NjV$U}5RxBI`H1J^*=M(g1ZKF7hju56nlFeZJf#MVn7N?PGYVds%C^n8NZ~7tU z_L&G@kxqi5?c9u_q2OjT>Pz33#|9G%Dznn#H3j7S@Cp8V^^{r%nK%(ZIS`| z?Fens>y|2M=o3=rMcY3M>ei<0?Hue(5)FqEgmW*V;j#DQB#kB^zpVpK7|9*^R*N}I z$iY)*t-R-roQ38}(W4XdHM1xr|qu*Jt}-t0S~*fXq~M_np5*ooAy1Bz1?|^8WjEq(1yQ%B!}}qQ`98P#}OewP_TP z#?y1BhBR*ctJQ@9ZYnX6LS{v{Ff!R1PL&w!NtaECi&(IRMb1SILG21i;R)}Lrv2t} zcM(ho$V>jb&W|9H$8+bbZglec%*oZkP_oTZL}{dX6)&vxxyP_(Ff$oW(ZPc>i`(7L z?#1K=CFA_aW9U1k@1#q|=R>7#W>5GlHxG}1WXmmT2S89hvew&I6J+*wZ2zNP$mUO; zPefC}M#25}Pd(Ks==HG?vW{=ghQTLELc{`uyed%S)RamYgCX?JWH}QvHvsl!(_`C$ z%_#(Va8$JfTCup~>IzH8KvG>$OhyEE;Ks=rvk-a?wXwm30!yNtxitTWUxnEkJ}i<`RcJx<0B58QYMr=Y113mBV^Ih+ zeNVW@@k#bI9f$#Us;bt8;CS?LcOhG7Pgz^cq^1J*KIUOjTUuW(ZECm!KN7K|J6~A0 z#cxw>p4#s#_G{0OW zjoMh)09?e=PWAUPp(jA6Ug_&~3Pog`)p2a!9R`>ktJ#j~4aNcdf z;}WQ}e5cB6rm%5f!PED8rAUAZJ-KOu+UTsO{L}@|&x(f^RylG^XI-0=Wd_MY?p7dF zGmAbk?ihFm+#`=)lBXjPLfD;RPDKaIg>;wMR6!6Oe*I969yO$W|5ntF-&t?7vL*4R zuXzp;7lqMZhbml$h_{+Fd|p{T?OWv91Zli?x^sW9J|p%n;2$OvvP+ARGFZyCsRO?F ze4T7fvCnNIL=MbEKNLNBldCPe`f8U^J>d*^mUM6lOXXvXebF$>L{e&T$3xH#lh zZ8E?Eo>%`FwWDTt0+7DnZhT6PIWXXO02!%s_daKT?oDcetQAn5(d}2b4I#4Gii6D; zuPlDIeQL+EVLm1a&$Gf8u&2WWP=9RDB0wIl7dbc04dLE&X2+EkFAH(H=et@1jm`lF zEsD=9ueav$SLq@D3AsRoDsP8^&f-6E*JoriH8tiJ51du%rN<}$?0JHu%`jJIiKmUOTd!y&mnhXY z_|%|wt_hR`yOFLfDveXhisV;(_1=I3_P7n+uF%BA^m4lK4MYYhHa?0!eZ>QvWCloc z8iwm&`h#X9OhcItgnwOYY;$FyUs#f{rz+tCjlA%RSRnMv&!lVbhJIK1UBgg`^SCye zUA1;t9+zyFclmT*L#p^$5IAhK01CUOG<7E0we+<`FQXTvZMmhaaJLs^&7i^STpD*O zY?Iy$4XvdGo_|B3c{Nx-R$=R76I0)+Tax^TP({~Y0GD~OZbD>=k#1H>Q z=Mw!E;Lef`-@rQSVx#;@^KLMKEmBM!!305W?Lls}RwvsPnYnuPHG2ahWp4@dnrfqc zw^`DS7*q=M604}$Cc}u@(zS!f%X!-39?)9u1)Pul8uMq`YXJ`3E?DN(O*jV z;!p%72JtHwW@pe^E6Mp<4n^WgT*k~wfnH);`uxDqdhYAZe-*uB>(Ew)~=_sjr+Fu0x2`ed!Z58W*hl?pFd3a(c99qMun zhY0v5u6lHBA+*~RESDmETN$P+^05Q&IDr^T+E}-2xu90C!Rb^S=hL9E;h?1|Dv`0Nq?An zL^%6)CHnWC<*q&zXPeUFVS4nfz{1!GA$M0|sp@s?;M+-*3nrs*hB!t&aQ?PpNE*lf zI?`?8W0-Dge9I!k3NtC_5_%+CTAlxcf^FmMK(8k{=}UO=XR1e=YGpXV;StMazmDLN zdw81;Vd)f|vjmlCNRSZA(mJu{Z#`9G*E1rMGN)o(Wa3B~q7)3Wnv*c|U%C@_6tU^YeMXU$5u0Pc=&%eAN*jhF^7Z1Vt4UPk3Pej?nG> zI19%#qnm5$+kpL5huGSw(WaIqds{yg>XsY?{H0^Ii(I_QvAe0A-~HDg1Sp5vyu+!~ zHA*J2B4GRrp@Xr#?1U<9Hgph5S@>3TsS)c5nxir)1p6y)2eLD8P z6Jf~%aT_>#Q^=|XF^DOM|-QD1p1A9LPR1SCne9Ef`QbFmAlb^;RHQ zwET;CEFn;RRs}W_&c$#k1?kJ?Mc|qSUuBS^GvEJi;&0@NXbALC7Pf>ot6p_Wa4sU) z1_WG)OX_9&kNRc=5%u+L!hbkqezk5f- zh8HAv+L;)`L*p7Al+dN6mufxRqg#fK?bQA|kr^O5S#V0jz=}`$NoeHSNtcC{`1OHE zimm-{BUL8*&GmHIu&}KSFV9vWCjH^?^=_QVQ~)%LQQtgk%D2>)YDW4N*lK4r?~2xq z#>eHCS?W78oB(ztWH9lbHNU>GDVU|fV$G^n{hc|{Gm!(3Y6DJj@tqY`I8}e7>5rJk zsf3IRV^7~QInJ!t{~O7}FszFcqPwoS24dH>h&SECxf}_Xf?hW6 zmLF%*UzXt-*0$lk){FO|+^yid3DL94ND;tU^C~Mo$|6fR$?Hmc6{$QnChMZyW-3M{ zT|TKM1#+9o*EckO~GIa1B^qc7Y0hg+Ms^tkh_VL>?!cf2M z2tXRrCf5$9vwZRJeikVzvcjD3A`xWym`9H+|DH-LB%y+HeMFw?xrez zg|;^S%J&aZF=spgUPKYT;3qD7;$&T7R${b$km#WZmP}_=q(2#`RA>m_VS=@kwWCaf z1bsQ~ZBxkF!Kd9CZmZ%bdiW1bn!{`K-+uk-e&8t{^BbOdnhRpjomH0&5Anx;H19L# zyHAD5O_jXKV4Pq08FyLSpXzNP9GFvpty8MT-=8-tk?r0ae~#e^7@ukB7o$&u+*^C{Gm~UrjfyeBEf7ccVybDv*T_ z&!y$@CNExBl<9}_rr&g0iP)4fwpLV&epOz%Fnr!V^sdQJSDX7}8Qx1O3CU&O@CLbL z&c6lq#!CsgA(s+b#Vp18X?N@2tuU^{i}llo{xN^=D`=kQsYz8|$Zcu%){M7W3fcUl zxuiqkonf5D)`DbZz*+18Hmkiye*8$pM||o;AhncYDCKAai&_s zdu9M1?S$XvgR%Pigi1-)`-JHxD%!qe&E@RFCCDH!Oo1475N-RB<8%PP<)L8de=ZW0 zszUj&RD1jc3A#KQ>{YU+GCFaAX90DYb>WlAiFWY-(DFjXyKjq8lF-x7V^7Y(>KF^2 z(NRRKo*j5zGNsA7t~89Pyyk7+$gOFrUGl!TL7WFKYoKfn$*xCL+?ENEQQdEb1ZlT) z37s|YE1D~Bf-qS!FjuacSBsocIqIWag`@M2oExH)7BYyZ)A$Ad(tGtr^+oQbw#Q@* zZ}jjtg-4~{@a_2I*?B%VOK*!;;xcq_S*LK@8wzYMV#d_hZ335+Lu7Ouo%P0V@4AP# zDpq%ar-rcu1MiC*jh}k};r!+H%e|VnuedhoDWzXL8moF-2Q)VC7_cM&l}U%1 zbn3sIU?`8?{grX~a@O*2GAIKJe_a8`mPM5s3=h7UzG7(`HIR8$W>k-g1nt>4$S+^; z^S3dBd_LwO-X$bDetkagZ76KDox)RSbaRf6m$5!neQYdQSs>{5^|xeZY{yp-L9%0 zs!&v%&PfSY%?Vzg9*n*l@Gv(YleSL{oV1oLOufWU!Aawk8(EW$f3Ces<@pQ|%FRS6sA(=Ijz8{fk={HG(g+6Q;#ocULT~gS7bZ>td1#nA<@^S_q0EMtItJfb=t^Dt4YHVkY-)~ z&|Oz9a!*J1>kb3`u4QjjRl2~omtihIW3=&|muM4(#x3tN4l_ePUK^_uRb}0WZ$Zy( zaT;L^{>WF%T<99jGO4+UVrF>k=5~}-^5mhJr>1 zxDl~8hYd}yK{F&=@p@nVp*!{JYSST~Rd8b@Qcz&VsG+P)b6TM^8|z!0u6I)i z2vm%B5YyVz?N@N>l)F6!=bM_qM)lhjhcoP*@ls-68BK> zuY|^a=eF3etx^$sx!cq(@+_TowIJ4_Lh$V(Z6q601d^ll}} z%6ZrFZ~}B;XRtLff#a#D8y}wzL!H$-?2XuEkvBLZ8kf+pdoqgRONtuW_;#<=_>j6F zjBZQLRSDPTe?W83FllCc@j)90ev@=CTMkXYsbs6L6>|Tz>tyW0j})zzxs6MLgH~<~ zjB003ei*aP9?fmqV&uJ0DEzq%jSW{~MjWh7>8u)gd)qF#p%=zB8pP(nq>sDdUKHJZ ztQX7ly+jc;@MsY)=OTa=aVT(8JX=1C` z`~I!J9VSwM_+{OvmluuRri5id`8*~ji{vtBxN3jbHkkcb`0Q^gJy)hWR9Q7p zqlN}{+{PtRy)C6ToVn&rh87*?)QU4P&1UziIGBLy3w_QWRi zE1?QiYp~yK`somGPr`72hlPf?)Te=k<_eU1%uFn+ z?T+h1%p1R*`gO){+4%)FL5uR=mPF;vjq#)ozq-|M-uU;P>x+exr#(<|A@Zz{idLtjApZU9n{sW3(b`;? zcbm=P>%4*sF-sr7gUXv=w~V`8%(`RQS*n%vfY5T<4vTOvD3)VOS4I9qD1i0;zUY8N z8f|O;HK}+Fcx61Cy_no8mSFGx$JHScwyi%V>tyPOjFPPx`{p`gMKJY}(^b60IzdnUd*7t2q` zX#bK;uUmz+hGK2^ipk2*efqe8y|`IMM}QsGQM#a0j-hJ`3M--~$CVrj`{WoI^rCdv-al zO+=SN4QCqW4Ya3kIW4FH{rM9T?C_ei0UGEYO{=XALg7QbuwHXI>+m>6yEAn z|ElLsy+yQdr>mFhpYMO}KzCNG>gYd$DVC(Ke4CN>A2#(BDsYgD$FpG7J-g5tKEYeM z)XWWh_|KWk0jc0sP@zrH%c~VGZ!b9vOLV~7}}gV#6ZFYWY;2af^tqM^XQ8d^%8cPa%ZHJaJ!UDn`7E8lT=b zUOI)Yz59tBik^qZUa5=I;aHN;d#P#b z&+aymax)*dyYdr`g)5{u{?gsP6NA2c0w9l}wCy2VKpBTG7WSTnn~*1Y;|2b?-{==! z1JSKaeml&W`ZC}Kz(S&Nc7|}<0lSFvuT8TE=TeBtMxR&aiiQUY_)!-(Ip*ApO?c7A z@u$P}e_`Q@t)$s92fb?n27oJSrMiq1@(76J<4(;|=Q50^JU_U;?WJyCcuwzkThb_a zGmv6ixS$vo!ksa5fa{m=1>$+$n3qRH`F5nJl~MffrqQmFy}mXt1D_AvD3~Q3bTrRg z2JUPl5geUWd-AvA$W2vtC+rNSg{2?r!19)IAb?2QF+Xd@+FOkUr0p*c|MLLl^o4Iw z(i>lx;!Lu4h6$46y;n9eo2Ynw3mLnl=$4WqQd7vUX6J#S9Qj=sAz}(yzH{>pSqhkK z8eqNqBTRY zhF4vc+R;)Ejej93q{V)}sZE2CWgRAv;)i0(Fth(oOlh(so?_I<{H~tFfDa#};$m&ds|qlzUYix7wDRqE#EldzGkJvi||~5^Nej>9+T( z8kb=(uh3W15xu?Y1Yi%rJF|V3l6v2^dY-5-O|h~%x8ZE_wPdq2SjW$=!PMV z!^tN|qjXtl#o7jxR)95awQNl^sJLJ#*n1|^l1fD#d6l{_mz@7Q0r(0S%~$G#4Y~7b zm|GP2r9g#Muj)6Cs#e?a>iA~vPARLk%gpT&6kfbkYMUAVl9;A`kZh!Yx6g(N2n@g* zREl^2Z?9DQqAr)$Lpr~q-YqJ|sbkjlCC^F!@3aTmDi z8j}=7=1353+DF~2(rFbk&-jr{OrX@2j5stj))S*;E-w53Yo2CuyNljJ5;47noarv# z-dOnI%aVfkqOaj&4T zjC?7%R4uOJ&nmHh*4{9QQ*^`WPRRNKSP`QFU{dj6%r2p(((;9UsRwLHQ5uo98n5$7BusUEZY|rm*Ejk>_5_>YK0QR9#1r)kN;V1Ir!9sXDoVm4h2ig& zAz7AHdr{?@k{*NEb=P~#NN4ZxLSi=ABnLZSRKC`~XqhV>oX87R@-ZF%GFwEBT0QnT ztvk1#4o4#91=#bTipMI=K!E!$e2&|NnueEpDY1X#cN;qb{~DOH^{QN3-=`+SO*~*@ z;1P=>S>NaxM!$ z)i?-A_FvFKe0PcAJ&eXJ$VB@wC+wfvUB93OnusipS!0G>6q30t^wh#xw?m&d4b`^w zI++KegF?~6dCh!O`NFH@tF+5z^vO6;3EoF;kS(rc#&py@>3I9|cNUnMeJFEOmIhGX~G<+RTn zcrvI{SKrXVJCb)RI(4e$uEWpt_dEO}-no$vUW##mJy(9QBy{k%JA3^Sic}$H<#oO8 z^_v2*@h+C2d$MXYqk2fW#15xHFkAE=67J@%TWrvPUbbIGe?-KuGM|k91!9XcZ{;lWbir5&}w^_-v9>oK-sFa6! zy*Pi)+D|>^Zg57`PmnTh7DyS|F(bGaNO{b#H!=ezhT8b1G#5jSn(-w*C3sMvXXjM` zGP))?dyhCX`Jy2NEN~S5vQYyc~tr^3U8hr zy;NfGopj%_hBHa5I0|0caNag=i@yBu=dZD=yn@yUC!Cud3@@>&omyWpVb@7D%R$u< z^SQ1FtV;yJ233-CV+yTafR(%3j|%XtIp~0C3xcm|u*8K$7X+aczOq#NepOiITy`c2 zbO)>8$Wg%@P+$9Jy!=H1fzA){E!e5*V_P=!YW`;(OY!H0ynaM5@I^w}%Ww8#{36X0 z&ei6-#jJUG$E58=Z`Mu|JHt*FW-9|MuDAJoZ)Rykua!UC3y0bE3s~!QU>3H6$qZcX z-bZoqyK)kl4e1LAS`ccAI`9Z^?kf)I)W%oDdz0h;J7M#*T;3Wt93+TEI=@~Z!bm}+ zkzfG{j`u*_4`;#*KBb{c!|4FrEyM15R#uE{xeB33?FW8pH-tKaqU1qUEm~vBXH_U& z_QbK-X!}MdH)vFNSlVCrh~h+r9v&5GL<*fc9ItBFSsI`j6 z0M2gEhGWRzDRrsymz=WnL-`TdBB_P!ACZ%zL=ChVX`x`lxMtlp{hkuM2ZJb|OrvmkQm!PHxPFn&e=~wRw!)Z4A6Mv)#75jv9=^q_3~v zo`X!30-B9b%1gl=X4~|UmKXfOj-%;Cgg+kImbNkO zSL{E=!_k%1i*o3VHj?31}_Mr(%K*n{3DY63-tUud>UO8e6@_# zmXwXHrfPJ093_|EIaI|6r(XE>+v&UyzsU~K*Ji?#lf5E7m$sOE`d zkn&2uP)SXnCC*sS0=1C}!X`^`1bwgqUH|lTrGv53y%e3K!G8cwUxHD?LDP zylQONIL7UgzO2kr&DWSF|5D*w(5%!gCZFT^-6P`#`dEGdTu*MZ@yG(C$mcBIz^_@g zwt|*1ft)2X0mPge!W?mqN9mg?XfdMKmxZi!b+@2?9@e#2{p0vuZDx{5^T14MbUM|$ zM85cA_~AWGO|GR+pK@B>?+g}fm!D1MnU3O!^-gE+d^pV(>{p(JQX-srB4!O}@0w4D z_rfP_gDRZdLJ_+Mq~>FaWf%AB`lMl!!CZggyNzW96*6FV0Qvdkv$Eix$@c2LE_>Oo`sa(KGPia_v)D zrwzuf0A8~l3~#=zV>Msdmm2B`MAR$zLal`z0gZ69pAy0t5VL6OSBM@0t^LfA%F?=3Q z-cjCB1Eb2FQ8cV)ZpUoZ|94_#YaGOOePSWmFjdL+RP-+4qiC|q`S@*7fbUX?zw&E* zTVgi%pu;FdWT9oUnATi!nA9BiNcfSmd=c0#W>=Qavn0h*oEyTuI-BbABn9P=Pp>(6 zvSaqc>MoUD|7^^~PnU@syu)(0uS0Kjg|`Mydl)h5VESA8%ho%ufF_-#$p20p z%~lckPw}7f8GWvCi<{qW7lUEaJQJ{@Ir}!R?g#YIXw$>hq3=F3K4zytp9=-xYc{D89&Z&un2V!2F8WY!2 zT8VXTUE&<6nhkXqJ$j7DeQ?-8^&-Pdjyh2Iz;5MxYL-p`;9J-Y?xsJ;Ah@-KWM6Jq_n`x}c|^r}&%FH< z42>{P6j;{kCf&!o-`gTF!^=xXPPBwYmO1MRDzPo}_nWVHr5`|}qmHTq_eG^fmn{L} zK*zZJE`miAEEwTab#Apfi?&XstnrIKNSo1A@Qc62-ELoMO_IVD` zi8^@(a*-xi_njNf2b(+Sh>E@Mv>Su(BY_hl+K%_KbiYpvp3^fSK&+H&gPwezaCW!L ze!_lGc^kj?e4uVJ?Akzwa;y>oWx`lz5(@mckF_4F`Cp*8TztvcTJLG-LhXj1>OKsC>POmgD+pF$;-P_96e`sotLC;r8qO@n}p>u6@YT$tfm?F z*OdYbe|B9A_}`Hbm;{B=)UT;t^Y90n+a(*tO8^wK3n1Xka=uvR46wPP#g_kSffWTj;0G%i*zM@4u?|gHf{)B%%dDTCF9<+#-xOPC|_wU1-_z!Lu zwicVY@W-9HsD%?)vNDsmHeLj{|LM>-2-QiM)yb=>79iCXtu=pP@qLFVL3R+Yp5R^$ z?ozU0STmsUF`S9&^>xC(=>>*z8@#$kb2mO7{8@-Hi@QEv%{Byl&yvMZE8Wfo0>`m*cVQx@tJVW`N7X4R0G;Vjvc7j;X~> z$2@7BP42fnucX2x{-o8Q<#bHh*R2%-e77{pd9&y3!$Eml=eqO3qRTBV^J8iaxskFj zVNDvXNfaZ|vU}4*df@)EvDb@FF@0-B8&dN+#W{ZV|GHV4V=x#f(rD&;F!AO{i-x%JY=2)QAq$h1u}e-@ z1yc&ESHCwjb656t$8nNJmy766?6DcuZ1}I;plYo4!zIvJ?j7~9JZJC8-QbX^!)>dF z-KYQm`EO{ZJ)Lkw=fckD~`hy%^Kbl$<(9pt;6wROK%3k{EG)FC; z=M*K*bQ?GQcj7npTYHfpuyGQYMP?WzxHQmJMBa;aE6nPao7@kP@OV>^p`5Y2^tO_NFsYdnSShYm zZOCj%pql0fimW&5KKfU!+V8FB7b}m_WISW25hU|;WByx~dTZQhbAt(ixi(Wiwo{bg zw5kJMxe>Mh`NGv}m{x*+%9pP+8&8!GjV~U{wY3YmS`#-w6erkjGJ~IVMSo-&E{a(# zWcX$&Cc}FR7JS<{2T4sX-rzBr2WB@$Wy_S$@mosN)V8296y8qr(O!h!v1Em7@*QOh zXz|4_JX2DKQ|;JWd*o2dbIJRC>^=+9>qO^Hy%{UR;}VupVnW&be(}GE>~!rs6P1pt zy;O}d5_;;zutKv_?^(4-82v*V7MU7e44LD&VEPq84dYWshVIN+L*`u4CZ_gtJ~e6L zW|lHVZW48wYx~Y*I+SOhf+-r#y4j2H4WOchcg_gCn{G=&PU^@sh)iTzt^0}VYm4m) zK2CJhn{+Xl`B;{J#WHvx5l2IQx8o}hH5+7l-mHI4^DAT!@wLiE=T(b*eAhkA@zN6^ z71bAwz)MM1n5&ASqT=_QoGG(Op-ISdoP}~XQ{>i4r__?8gWGATctqN@p|A-nq{D6q z8gJ))RC-u_n9@Zi{YqWh)R}70!JAW%%m^9Ilp@q?bQHI2O78U1D)}=llx-ga05K-E zI6N-M8Uy@OzaIZZgK> z0Q+r$eb@Hq$I@3A9}0)IHf7`!!k!!@;pt1M^e6$z+JQ`(d>pTS0=azb;2^bZ)?D`D z?1p7AJKpSue;)|)=k3`JeArZ~P-ln3SL}BuBZLk&-Mto0Zligc!uT4XsQW7|;XcK} z+e0A0Lto$Uuk4u!)1vagnU?}p*6)|FP@YNA_9&dT6S7|%0MW3ADu&*v>}yo6%22^ z2<>tSsMZ`NOD%-vKAIFwiJ2l-r}-?J+Xi9$J*m6yo|0U6G7xrzaouYOWZPZQ|9ltp zKYk>(Yi%k4G8sPtT9sbAepQL|SvWBnRyD@86MO%GV>FQPk}PY_uOk=5FNWA1+M#D| zJQFCdf{^N}XHU~{y<>gGy~=*MsP9G!J$R}sC!`uW`gpl;a%7~-^kk%<%2}XQTO0pq zrDvo&5~3RJa{7mitSjE)jzIo(-&TH51?d%EwZBh_|F|SMKokJK{(@pfL~g@9_g2~V z$Xu>gmoX;TF6Z0AHe73{GoLeIQ~T+Anu%PKrt{aB;=zTgkk27Q8(La#{rhwl7p?~? z7f@~bImcdnm>b%b-14_?CdoWqb(}MiRD2C#{1R1Fdhg>~_t55FB<|+-E6YW_o4*^V z!+!ARVNh!v$+ga4Oqcktw!)w94oE2!CoHhsaR!qN{IwUFWNx)M^+>(?z%GRnCR(a( z8+tD%IM59cYQC39sd0|_zFL|>s(pMv1!m|Qw3Yq`e8cjK(IUt6OI@dZWp1~zalKr@ zU@O9r+ff`g?=pp)p}USaCQR4c%gtZX*_bol3j}pM6NZD`L#cz)WZf5gw>;eBIf6Oi z41d0q8*7GrJZ`AM2AnHp(OvR&$G#0l@*Ar8iP5w6f9ISLx3i1k@f{^{@dX+Goyf(v z!6(?QR}7cu^8let-*+oM&cpL zjfDbxaI{29S*_L~BAYjB>_4RAyVx!2@vSE-#8)%+9jDz_N$I=}G9kCh&emOpAGRMD z_+$|IC>fEDwKyu->r@ z0QllRWm}1S5I5oi3DN}IDt&lv@oXOh?HfjDrrT>98wC92SKw7@S6h~6i`)C+CZuIJ zoLL;oqVc8=RB`uIafs2AeHU;pk8bmK9|%546z*3LU~eO73e(85Wae2PwfZo0L<{=X zK?N3a)Y>uBcm){jy9K)iUWc(nfw>cU`r9Fv@Z39*<%B8yqt+2cr@Qwa`lanh2e$z2 z&%~gTsOeYGn$^F59XI;-fcQ|qOa^G}4lVm+g?6t~Kyo9J6y02pMDBWzbW^#Jj z2{RS<30--*Igc8?WnI@$ssj#?l*wB~K0ul4KU3ky2njH};*?0a@My=M7ajcDum{JU zpg`3l<;c0nC;{~84c{A%yjd*Y%@^`nYwsf#_g=unXtqtZA+ebWCK2_K8+@DJcY%-H z9MCuq5WGZCb}`DWDlf`>f~Y92^_u(#`|cM8zhRfqsB5Jvogy>~goCLD^|1wwRU?V& z8mUFTlE%+Op*H8vim=1EtL6QGo$^7wR_e1_Oh)N;*P!*+i)+5R@k!M zklwsF*D7Qj^opb&>0b7YratpYw zTwrk)0A@plSO@%rP1VrIB}d&q2NHn10vP3sC#$ufR0t<14Sgau_*8ywtp*13a>oAe zw11i5xwj4bNV(tS|NNq_5$-6sSfdv6DOOz}fXuHCt=4vCzaQ&)bZ;`Bhm6#k^;E>v)_oti*tV+N}Kr(dGe2}?EU{b8^ep_<5@UQ*fkHcAWNUEGw^~ z_kYaUR5Bj}hSKpf7t*c*efPR|KsB#XCiD$nb%9Fr7)GCqWI*v+C3PP~BuuFM4K}f; zz)vZzvqd?Y(QX(0rFUqkq)Xmw0vQ5vE`zft#}?&YnEm0JmAAE0hxP`InWPLGBDw_( z4f<|76ix?pLAZ#=V;4m>BGFr%29LjS-cRIN?v9)>wNV>C^)~XhJGh#EZ@FMr^3RHU z8+<>cE~RXO!`|Ag9q{Xs!JTo1zP**gi{Cb+m`Eio?siwI;u)m7bodU}Qz7It>zAiS zvgZ%gZo|Tz9JM}+(6g^iW{shfvz~erRU*HujD5=zCdEAiCPF(1v5cA}D$dEPm3RuL z?6agP2D@WFAM+{^=@4975am?0z&`w%xikvB7$yL&aA@tPJFkPKJ;v?BE}O>Rbz2xI zP)a{@+VNHlo^wErFDfU9f*5B8M(uStW5O>5e2Q3X{?g4T{usDWPjGxeNBB22ZdD4_ zZ@TU5!f_FNx6y^njl=1A8!^Y{qukDU3TKCN7{3o9o5$PoZ&b^JLtGnWYvlbonmpNy zUA=_0GLtpLL58q+aH`(a)1%o?M)DPc;*CdI33rmpg#RuY!WR5+;J|9fNYQn_$k+cp zg?*}q1T|6Glc7xf>O{*ZKHN!f6?7p^aS117y>e>|0{@ZAKI|(gW_DR0H0OGM{;Dq4Ku~eQWsFE z(cTs=|IMDhZBnhnF%+z8)6Zq>r2wLqb&XPNx7x8U1aD$4pimTAOucb637)aIP4q15 z)U#J}_oR3)&Aiqr$HnvCd=q$@?XVnq5@i?`J|9+H)~G|m-n56-+DR142~T(gVCEbC zq>%Ti4#tNBvn+4ew$nm;OW?QPh<^+@EwIVcsea^rkM91y}pIEox zQ7D~&JMvuh0X)ldwmCV&3}7nz+{3?HzGgQug(t~{IBgVlBK-G!;f|;pmljQ_sFB1( zO&szw+UDtRD;81&lW|qI_>k&AuTfR2*J!P#Qj4&mp@Eh009&7Lmkc_R3J2waZT8@a zRF&vWM9HL1zE9NM5)l&dC8+?kX0z*J<0(Hmrt!aTcW{F!$6h&%J1I2XRL={8UkQ)8 zj#wW@^i+C6$jU1H!T-OV3wIk#fY-#`?~mr(X=?(otBXqgsJazPg^_;t2%q#@J2m7c zt`t(wPmlaSY6VJ>aP#8~NYEIH@ z+_vG(2HwNA{^nJnKX=tT$XiD+n(~Ntr*nI!S!uy1mO=H)h>V;N)5^;J@9`XNv)lTE zDC>lEyXiEMMrtx+d^9ScKG>vmtWo=p0;o6Snlmq&aUGW%Fs%OayRT9>YugPjg2NkQ zz{8vtACAu*d7M%u^nBd{vu$z#&(U$h>}?OtTsBq`-dwPL?Q8~zTJ11_8>s}v;cI=v z|Ii21qwi4X^Le!F7I-xtGxGJ@ps397U?vku3z=Hh)86C0ZY`Ylx0-d-3kj!?Eb8Y! znjZky-J5zsH(d?rE+;MfI(e!lZa1~NWAc3BrhYc?CRPC?$xX@oMr4V*&P@5HRt9`<*bE#en?6S5Kvn=qze87F7{33e*WM&|_r&n`E52dImmXfyKlh zW#l5>E@m3a3eY2dCs$a2$hXellB>wfyAzH2H`Xbf2pH=A{Z!U3iy37h&j+HaY2QQ= z!a-2!6RXkVujd1VQXE0k?y9tYkfn3gtWJb;Q?RjfIxB4Uw=~74&q6WYX=Wp}=Qxco zlFon}ncnL6TLcXQlTy_LlYvmFGlMn0FLmp{?rpKjjE~x8NZ=ec%#K_Y0Mx|o{cV%& z$cDZLsJhR{Oe=UOjFE60Z#;TN^_^f^%@w>5lU8j#e zHO*AfK6Fw1Ej~(Fza04ST%jXIwl!o>s#U=O9Q}`HEK-#_4*u%5(C17NQKU+pv7LX* zXAOlc;K#v^5@of?1kSXCzq0nVZzAR(2llkzJ_=#?Yu1ALi#O?SWFzTtlZ|dkz(gmZ zjjFC(XeLJ{Pn7CsJ=u8R;O`_!Wr4SXsJbq)fDc_8=-#d3QsW8eaLZY;^kiEM%h60c z(=GTojLgs3AR>IiAXO9T)@c54;wL&;GxBh-JPXl#P4n~9~L8B*u zi396!uYtl`KI8E@5Y7N@gePw0MCY*9o|7x(+V5BaqLg`A6Ui-_I%5bW&qp( z-cnx~M96`OeRNW9sW2bYhc$lbjQ=a7x+vW9vgRP_q-~3~{oUD8(0(+8Gd>z%Yx}nG zWY&A(EaA#GM@Bi>)ydGJ(nNtAA))6++7=RX_WiuA8S}N2tVuIst(QlIihCiK>>XZ2 z-nS2Ue!JTYLJVyi0r7FZ_Xf>Wf32=?htuOkfz}`>A0*JxTHqg+3bM@%R!9%}*Rh~2 zD>hkwVtB@g5Bi9%sTupexHm0M*Ct-{ny_fjk@AKi;^-x3qqO|p3(TcTnPb&oC}6x{ zQGGRH?ozc#Ylo>|v8H1^*=E)9gB!ZeDKA4fqR!DS}t6vWq^a66(n0J9N>Ni`kZ`njS$2znm9{f@4$XX{_yYkvvToY@lfP$5LoW zT^g>wDhapea2aRAxbOGGaqZvf%867gpR+zOu&EZVJvHAO*2N3fZtlw6tG-`!PST*t zfyJcY^ojo*2Knb)v$$>|_xo3swMmz0r&X^y=Wzeip8l@tO($c>2|3J4`!L&qc#j_! zZa)y@ovbcKIj`|~zbF;FO=-FDW}5#wgEmt3QWFN|VE^M-a-(MNe{S|?@9Uj^Othhg z^T@2ql7mE}z3D*BAht$|*h!#s^#7g+4qay)S-G&kFCCU?{BL8o@45m;Kh>+ zlBvFUAv1Y7yx8LBGh1QDy;{G6=8oBjfH;dBDfiG9fo+x<9z%f-3K)xA8THoSikTa4 z_x%3Kr$CzVVPuCz-Azzvkeq%7f`&|tjyHR~ps8z4w$4Y@16ic_^QabqGG#p#GwVq^ zaKo^cdMLneAIUGSi5+gKPGozN+wSlc;eYS!Il5-XBtbvN>Z@aH=P?t}I#J#mEzJobaksyw8$9 zbPS=>t?pWjWL@l_uD)B z7%KXvUY@4{N-y1I?`@epPjfJi)A54n8f9Jkst-pE3Fp^9l90N3x})ytdoOff`dCbv zGfs1{B)SJ{Tf5$v^JU{#ZJqDA1k?C zkC0iL!P5zvu)_F`__^cJ%0jy=bWX;`M+-?Um9MeTdWSZz%s>Ktgh;G^;FleF#W>AI z6bXmt9ITBSuXPp^_wwFCN;HfRHUcK~tyODBvmCb0C<3-~^XVBaA1%wYOG|BOX3RE^ z(s)b$CZdMcb27#FbMtZQ-D)IMamxO@_e^1Wou^a4l-JLB0nzIEM*quQucc*Qdr zwq-bjp7lzNuS>x%5X96~x#7(VP!`D^vA90nG}^4IpIOO-qdlcdMv6IH($Gl8(uI8O zunK_*Gk|ecyU3@0KVP?&jR88M)rt;*NPpB=MSC z1IX+V@am0?y@20E5;ipSSPeZ#FnEzYG;i}e@aMiYrT*cAH72_*CA$-el^ZLG+gp#y z41j-V(k_S1I&8v4CQYZ>N6EmAlD*Uy{)4Bh-3ab*IOv$A#&CJth^dYDmc92EkTIZF zs=mWxbf)Ch`tRLyJ2WHKke_ zwD0pRQ(&;^*;A-LZ7&vHvY&WahV*Iv_E3J!{%x2G$D6^&(GMPJ$FD95y}*jWCpN+~wfF;3L*7 z5I4@UQf2>THzer@ygpD;Ka$bSByh%@(1_aUY^xW;R;o#Dm&JhDDr=0;wCT7_L#M&T z4ui5t<6j|#4{8=bY?V5uP`ze-9X@_#?d`!huVKSzd#PzJJ-6z;lZ_WqcBmQFNtEC> zoSzAF)yqt}hY0acbIW1URcAG=*C&K@A8e#qbm>;uflT^$R?nj&znPv9!)N#GgH73$ z3WM(+305?@$9&2FRLa@{>3Dug471RpuF`P*Z_Dvqu@P(;Pd{ij9MpHyrtRH9Q``#t z4$9)a$}IpG6H_WKar4~MtoAM%C`_z9392f+FozA#4}vTxyP{vUULR?+zabzXfuPWi zSz6JRLrY!ddpIr6uxrK1M#1`NzKh$^-3lyxB|m5h`8e@I8n}NX@9Uo9b6l$KcNP8W znS7pAiy#lu(dr~HI{fml`7+mWD4~W^Z3FDrE|dWJwdF|jT(o;y#f{;u+YRgMMj1{V z4Zzx4XKK^Zfa^~gou-|&y5@8rQKg#-ZvkL{$k8k=8b!CYC`il1Km~8*OmZjr{mkO5 zhd(`2+KUSoW>Y~E2U-T3KE>C2Y>`)Knrq|v8{=*!Ftq%=_Y!Ld$S3}iDU#hedmG!o zZQz}o8-mdRwUo9F==4n>P-wx&7Lf>?*2arH^tjb!UsRK(HjD?4}UiIzYq7E)6>&El(tl9?X7mH(P77^L=szV3E|kK z#;;TD5uswFMnn?fP@yPJYes?+p^}Ccp$XB7QTsf3{sXU<-1ql;e?IT`brq}X;`dq; znn#izfMk-(`6ul~L2e*t1m_LeDq{Ok-c{zXOkCvj^ZNNiX@?i5BOm$h^hgSnKC*;L z0DBBPI>|WfdJ)~6uW~*8z_UHIiJ7@4I@Hhkwn-DQCbns?jT!qmFXb(iTYt}^v)cw)wz`pq4gR~sa^Ml{g)x{%i;qbJ2bo_TRNG`tcnEt)U@e!eDY`6JP`+N1j z`*~@W>D`MFQbuE=T<<(uo2B-kxWc2FvddAw+gxOlM;oSjQRoMOiH_8FoYTN4f9rgZ z9FnI(1(?Wt1{O)5>7nlQYYsoeN1P}u0DdOyLDISV?S>mu<2~EXonBhve(s4*dSeUt zGSy}0fBJGb5q76zE<+=Qg617ag{KM;m^cDLq91-R(hfBDRk|_G_GGZJ_=eB)@xY{j z(I^_lcTwiP@hjy|%tQyQM<*E&e-NdVB3ZT&-DKzDr1It!vx{V6lbA2QbY)b-xyk8- zXbVG{=nBRUmL90Kz3m_9OHnj~^v(>;!X`f^iQ;a{kBSt2{xnqgz?;se!)q&|0;Dwy zMpKK{wMQ5fPj*MwvO9kg+VECRT>GtI=62b=O!M2QmdT_UlH?|ohlTh}N94#g-FRhG zSm8xD&=W9%O1TtIW!Y5J(N1@3I4T3oh{5EKnk1>WdEW-!{b(?I*_YG}9N!UJQ~P`8 zxg_<*b-(3n4e_^AW^q=zk9d&A%v&R-fd1@1eV^j3CnuG5l9=Jnoi(Y4fBu~Rx7cT6 zKKS!re%@heJDWloC)hu-zQlR|;{c+;Jyw|P;=XDyZcw96Imv|9ozhqR5)E-2sr4IB zSme!M#5_gnOM?dP*|eoO4HgV(jFWt$JLSkLg3)wYjD##wEpN2|E!`1|#rPZ9WZy?! zvJ+FNE%Q+-plkThYz!4BVfL|Q^3aW$#7#%&wxw}hsEip$@x{sFXSUrqDzt2rI?fp_ zTG=#9{rk}GNbTDT&wsg{+l15Ua?3-Gf%ksDfRkK{uY8}xV8kr^9gW5yUHF8|Bur)8hJJ)v?DixhdoOc$sS zW{2w}fE-b@%57=j?dF?>K|0i)r(VxvQpxI!iG&G8z;sWU9j_cgQ-}%x_!Ck}JQ%9n zsGqY^)!u5mO-Ff&X+}#wMk%}>`x@m2HmGNb&!kQZ{vCo;^7+i}*rKBOz#{GpAt`fC zKpTv#06L1!$>1$o*e!<$1Jk6yW>uU@z6TR3<&w7*2F9?ye4zUo1I$jWBNLz;##KL$o?CZI6woR7Nj@1R9dXPBH;Gco4|Z zKDi{MJ)XA2=%Zs(tQrr4UKKi`oZ4dJ!0Mm@BS(s><=rxUV}{NB_7-*rYju)F8`E>u z6ztsE>=V-Zh;N(Ghh^7{Qu1nS>R_}Q3$=W8!cs2}JQ)Zsce6D9Ei$voLNjK(W!XKB zH(|W4i`#=^Uz&>>I}6|2=koy!T~z)dKhAM+iKPMGzf;x!%0=k2bA%rQdkcRDLV77U znS|-p)0OOUO9pC}k(IBh9eSSp>OAjfpQ*d4}QKi&Mcyrgn-_&!O*KG`CBdkA94e+O`dQ!7ZOHG!c zf^aE2)cVo7;BO}fvg%0ODaW}e5~LJU1QPz>>JGiu?t7zjK4S}J2V%h8VQepW2!rB5 z2!x})uUJ_!|>X9Ne?(c5)fQZW%bexf!JXBEUCK zlXKq2ZrX*X8wbtH$T+KJ{%!%^upP@Ai2+g!wf{-Azx0X~A}vp%>H>;=lTTQpbwF=n zX`DP`yMxlNN=Qp?t_5OfXE5E3FIkaRj#Qe7IM>+GaHGtpJ% zUbJ%-g+YDzaP<9q*lzIydFQQM=w>{uT_j`^XQHp0(py>7FwthAuiuRWcr4a3Ts^+d z3w9a;1Pr`>YaG!c*+8Huh;%RZlR7>Cdtfp|22ZGhHWKi7$Jb}QF9qH&PhxydZ*)(I z=^)1!6Luqc*x?|hchi;yrM9cTV2=}8F-^qVsdIrAayAn>M&qkRD9Yt?sT7@$H6i;o zU?PvPB>c{cS4p-P?BgS31}UD#1;Ze#2%}Diq%?q8y4lmE0N+7D#7Vg!Nl25JzgXV`96*m=LGF*E#eL3n8lBEckydGduYEc-{7TY#)5*d;l3nF&M^PL7_L_>* z@Cc}y4$+$zD$lYu)t}{EvV-(552jbV2RCFd2MhF*@=BI!J_b}B?d%iH7G|fO7mJTq zi8k7(de+Kx6YZRRzNuIMp2?V3z56ki`_fXhK*Ukhsu%CHm%{w?uqySZG{;806)nxe z9{VS#*#;ne&mUeCxy7<{eUp8`R$LUx`De8r5l(;iT|z6B>K9x1h#1p49%HY&ajU-% zv9;ICRIc82YG38+n^CI+^N>%3Ge}~{nVKr4=AsyTN1U53En5LsJI;oGhaWp$lV+Y} z`-Skn`Hw5xYj?Ue2KNf*VloHIg0G?c_oDTbmStG)CK;dXG9-16-CHSwyAvyGr^7CG zjceddCdbhMC&be|Yp{FKJ!)V4`PRFnH;#ru)(xV`B#E1bNB}Y(C{LGJ2xQE1 z$55(wOP{|6;?F#OR2B%(<+g?Y+<*0ZXR+BXm9w#2-rQ}-vHaGX5egFv8+VLF0vwfh zB#b3Z&$x?LcNASIOMw)@w-r*HQc`{X$?s1S^WK#saRrlMS#vp@{772Z4QK1e-H=ww zy>}Lub5MRqFGPwl5ga}}9%i%5oQsHK~vKYMg*A;K$$?vum0S zOs9N@Y^SVm#e<*o2%q!#dnAIR_S8MY3jhu1&yTpB3Xwb4Fh9ak(SgYz0d%|yxpX%* z{S74zpa!fA`CXsgk&?d`?cfi8ft(9-JZG%^;aQlLb~}^_7J5G<$}T(S^h%%K|L=$n zsNdI46mj9lr2`@3-K@E(J!CRxUuVr!LE9z%ZwrZ_XfoSt9${v55KPuF)8#2Fc54-s z3SM*W{CW#XDrdV7$T3vq67hH-?WJM)gwW3$R-`>EuL{w%KZHbw^$!WR7~2!;YCnTA zE_JWxz{r3uzP?pnKP8=C^p|A3p*`!Fcak8Z^9!jo-Vhnwa?Kqw1dAk$~G! z+uGf9;jVTJ$Kaa%{PE2@qwNz#+lBBo|?e zs`H_vXq`0wf0#0lg{3}~GMy2jG-uNkO2zDq6wBcLcdeV)`d}G~vE70eDNVJ3*UmzJ zFNoJ>#3NW+-%c2>fBgf&alMQTmv;036$@?qtrA{@r?*-VRw;b9 z?x*BRUKC2mEd(n6o_;n&1yw$&VoRru8;qzf0yK=*sHfudC?b$Rq1_W@{?qQsnw9p0 zKEb2d0%_gsAGinq9VxPRUSf>)Jkt~vk|+CAevq1{Ekd6C_TqTI-7Bo`D|!MISIW3D z@`3i1)<|(E=ko{x5Zr>&ePN37K5_Sx!M5-^jLbY8_>X ziiu^pOobyly&qc``_48gKkx@ub- z)SP2O7!fi|wULTar=J<;4dm8=+E~6Rt)L&pMd9j@B#)FET8_gqy_j>00^HNSK4c(q zE(c~tZIDQMt7qnTuQN~x(U($rdi1O18^pAV^)JMfqiI{|VLiMI@}jTzLq=VDlMq!5 zXx<}j#)UK#*|ixBQKipatv+gTN5#h8i~8k#jO*@wuQz_L8h>h!YGwn@=Vie38&#M_ zkD-mYB?firZ;a~e=gznr5?YU^1|t7IG13WFIWR59%-cI^h8G3=EPeO>Cbqv&UgFi3 zy%1K>5ueBz*wNwR^+x{jr7p0oX`W7PwwB+#*pM#3=<>7$^;@gPj;rD_R-7vWPsY%z zu=b9llqmGB=HR0br-6Mt5gj7h;-oW=JBb4iIBvTJDc%8fhV%P=W<|=70O7Yy!bPLa z_(R9fsm4~SCtc>U6jCB4%rsxv@jT65?C6X))i1mov@n3M1DGOf!v72!-X3JR4ALcy z^ijSzJLMO}!<_fX^wIKwVb4I~L_W{Yhk6t%ljXG6L3UeOUU9{byTs?$)tWm={0V24 zn00u}Hg53N0$u@S8M#tpZP#MfIm;W1T<^J#KC{d}h4l-krz%BxbV$&a2JHT@&I!04 zbsiZJ`*6GAnv_yQmHn6fHt+N%zoPYp%o5QfWLwYvS%>$BZD?wJ63+I}(O{89ly4cQ z#4a1|qwxuC8Bbkfsn81cx~Nru}rz zLtfA_?=9Q}EictofL8!IuNNrF8WZ(&IN!_73$sTS0*n@JC53s46rO$Ux6g3b<4;LH z#c|?ENh)T4IRdjKth45;Fn$W@!N4w#_CiB)pb*& z$Yq=0ajU@7BIHO-mS$e zmGff^pX6#NtExzS&xxF*z%vFu%4;tA0^nQ1iVx6Jz0u*i{OkWG{(AhgADIz52yTtg ztN%x#tx(QApNk~BLaqPoTXM;c616o?>!Oneg@_eYVNa2m8oUau1{q)45xtM%L>bBi zy82UCZPm#$%qjaq`ExR1bsL2ocd1#N3;q1p#8NQNI+Wx)wl;a7?@BMqUYARy{zSQO zBXxbR$apDCk^wC*%Rqj9K(_hIc@S^BDY|-5WE7>jU~%b(LNi^@)?eu-QY(gM^Tj44 zmJJp0sQ=sNg0gLTpGXnsmwG@H(tmfZ-JM~hXM<*iM~?44J@I-lOI7n?$~B#H1GZIR zGvh_sxqs%WIUTgKMV<`sT*H*M7;zyI{##7O7U|(h`%R0hA$Ncl>SE~)wWMQVO%*#Y z#5}PWp0I+QuVkhRnPCP0eoy`{yd3Ni_Kn2A{I&&H(UBfdSJQOaqAg5d703H@F?Z_P zWK(=X9L!t@jC0lF9yr>Gcp6Px=kJm$PSmt8-hJ12mL1Sv&7b6Sef0mc#jA#12`m2X zez*}v8d73u*JrU4DG%bVjw@OkDOMXjM`P5(vgY%JGjhNm0rN>ZD!9$ypN=mN2HJ7f z-uQDrpH;W0mW9=J{|xTQ%2EH0@Zxc!J7MiiX$)PU~#7RFYWD!TtkO zQ}v+$d;;R!6~ilBi=NukuVivgz1{C*I%$7RnfuiPT3

@jTBjz;_6%Y*If%=Md~| zjH1Rf@&uT)k$e4=r#3#mgdnrs7GAk+)Re(*!g1D3!{_fmFS;hynE$TQD;DOs9xFUN zi>g_QIG*>VFkdaJ``YFd)koxHG*5U{Txn=@oHG;syPT@xI>f&|wZV%S$ylhdDyAJX zbj=NmUgpcvZEQrcsrCK6;L{684u2a4>!k0k6yFMnJcAgZRBRVp$lI) zjV%y1!}nVmi%zPyGDG`;q873>TT-?L2F9FiNwPnFXh<>iZdmI(VZc%c+eXb^D$WgB zzw`-cUY51j8iL_98$}=3B(@lMH=MV~K)4s;b+qTW;v+Klm&|Gc2mva9DHS-hcpx@rDu2@_Z?p5o{ zV~WGdekt}Zxjc~yL7~giiKuhUpSjC=0<)Fx^8tCjRgl#r<}=;DpDw$e+!Vta7Zoq` zs8qj@;&%!O_QN80F*RhIRR^nzW3&}Nx^GL{#+*AV%tM;{IKF~AScrhVR3W8@gs9?2 zv;Q55H_Eg2Mb&@QUlq1x^)~9Wr2HFS^2VIQNLh)*n)2ix3NL*rB~#?}T4mmYX`*N{ z3kvXfzIjnvJ01*p#sUg!%0kTP4HMSZ3z-Kb! z%K6Il+gDXyAmoL_c$JlE?dzVW{jr*dHA4e{O@Z6h(^Oh~sLPY+X-vGADtHth*bTKG zj2F+8E{$l>-w7^v+rlQIOq<>zQ4cf!yzKnr3*bvQn_WYDd*~dGITyJlj?cmay5?+H z2z}6&;)tO$M&;5ey~h#_Ssksx^{CynrNWf6Jc|r78d4`c>jAn%2#wm90vMB5RIvcL zo`rWv?9tAlK|HWwyfxk(y!1h%Oli{8*+tC926;Ks!#*ZObd)ShXmBd(6&yO}-FkC1 zvm%*UCDg_-NT+8hhNcSne!6Y0l)o*GoxtBeVA+`X1{sQt)E-}7Qv`WhDVK%uem!=~ zMkkE8qeB*U!puFLj2jEG%&lS)JBsML%^}^(0=AVf=K95$)RPM_(8-dP89zDO&dkZE zn)WK)GOOzFY}07a4d zYA#wJxhnSs5Z{_^%8xH?f0Pup^5ci8cQ~Uo$4tS4_R8o~3c?=T9Kq!SjiNTuF%K37 zdPMJBw+s}q-ku8ku{sskYWTPFj?$=N_%&e&-flaa!wFa*dn*vFp~kyIqdKA8+MW@g z?tMpCJbvFA%nNgn&8M}cXuHZhl2hNP9r}V;fU+B8F}5?%nltd3>5hdVEn&L9+SaUcH}UKf@P4YAk`sdSfgUG z*sKR@n01NdRRr7My(R1v2x+yDd5)`q7d@_Z%j0QqJ7B- zi+vZ^`!r7t^cF#Co(jv3!hm%uBDLptV&vX;>1vwi9wIa?A5rXSHw97u-n0<^^Ge+m zck90+oO<|3Q}ce^!oq&%kanSij7{w11lc|(f68tcfW|qS{XFaX+GA5+<9|=n?i6&y zsFY4tubYi&1QbhN_u1e*dE;fO8pl0gdO}xfY}G>XW~Mmo7h;F~ZVAgaQ!@V1x@6S_ zbqq08aoNw;C(x*)VQsfPel+j#v$H-TNcA}N-qiym zgy(}bTG4jxsHS?ot@0m!Q3!Tt`{EBhAom-nj*eQ;mVZGL-N~1cA>9t$PgCsRZ7shf z19oYgS)ZT#*HOYC9x`Tis*O5{Q6BHSTRB%`Jza%pU#92Rp03rYx-8fKID|V6mUsJ0 z`kcHibWt@QL#T_Apdg+?KpID!pLeF&1|NVvB`dX5*9i3z%8`+ z^}JC1)?&W(hVQA%vwX4av!>ys_V`Q-X#hm1W4G zX(W+gczv!eGlW^#EHI8jw&KJkXql~NErzSyNp5RyN~Y zOJJn0iOIlf1<*4!8fA%vd<_RJ*c{UEOOl(;itdoPfr!%v%gxij;Ds$o0(|Is`{GVi zE1ty{jDPRxmpxsz3qiF9-Xk}kKeNn)G@p=v_lI-$wcjqfhXL+b|MUPfV_NSfKSfu{ zgo3mGH~7fv-)4V3tGGYeb;Fl|iY}-Hgfpylr=!`QS-cCg&-@LERdJ+f{Ag{xGbJb< z;WDm_H9^*BLVjPbUcy+F1}-gtT`5gWoQ5kSz^+F=khB2ed)HfE>vyRsy?&1rCUoLvuJy28&q?+{}nktPNN~5Yt+!ZD+G7L78ipqIYo_v zFT>ukH&Z%bWSkjNBX`k!Oy4!XbKx=}(lx%dVpDq-RjT+Xx z5{h~jJqFUN7yd<8MbCuO$?sj=joBb-d=kW;AF7Y7%U}DF&DLG)dKtAM7r&-#2Ui+v z8DH?mg`pED@8frGjOd6?zZj|{hx{WH`E_<<0|N_oT=UR{0R%pNLbb9SyMVpQs}w5m z5E{Yp^B_Y&jXA_#ePaI-8>9@4zbf@!Y6Ke!&dH}Qbis5RapDknL{JBep#U@Dnh_(d zpRiQS(zcYox zLn_9ua>vvgS#tBz=db>l#sqSTp7`XxkbEn;)2&#vu#+RpJN}!fJmL@u;*Tw$H!Ft& zF?YYMhlZ{s#zle><^7^X8YO!!SNx=&RXekKC;^w3=<`$UZJ-`Mgd~$3v3BEhqjS<%|9SzvPvilB z0@Zm_zaIN5Zwl`(oaK1$N5}s*JAYx|s1s7npOAqje6LS;0JmLo^-#1>LV_*bRox@& zah}R;(N0Sg5L7aD)p~VeO6QzjS$S#k%R4Q=NtO2a$_IwFQ2{P~4J=Qo*}5F>&jsT{ z{VP@B*LJ-lzGoX1NdOW@I6^_J-E_>U8PJ+m&%kQ7CkuF+oE(MUl8@fn6k;x?AgvIk zDT!^wO(^=_xy=xGme zovVkoQ+3j{%rpIou2+Tajpa?piMFNEp0v7nI$xkGO~ux?+WdFqUx22@J=+mV#t-&> zdpX|mhD`ySZP&J7{>w9Lk5pa%VjRqQCfz`{xEO*9B8=LIYp%P5DnBeJ^t73mv5|IS zm{{5k46h#n5Vz13U#078H3JJ+oYn7bA1L|KSw`JU_k{ylkkqhe@}uA=Ww4qe`sJh+e&Z6!lU2x7=H{MP;~oZ&w0_>g7(6;!h&w7C{6dXgGBx=MYR zUn_jSKW>J)V)Et*yRMXhbAql1U`8x@Iz_)?;jmV?_Xh{uSM&fNMR*0y5Y-*BDT-jG zox9X;bswJ<55&7CGwMbPG2Yc}Taq5VvcKmm+bTrOF9_XVxT&vSmRTyss}{#39Xx2o zc9YY0TAfkhxX1&e1;Sw&h_+Jt>x#cxs6<@?dMI`pJ`+DRW}IAegGO<3`gJjU6Y7W` z#Yefx7b_?Yr7I(>(=6nFg-z*27jH&$V5!45X`a``Y*^kzo24@U89m@ouO#bwpQ4TP zR7}@Iw?mFX1}HC(;;ONJ=%O+}uW1N}+a@^0-}-_UwYdag5r#rJ+n^6{H7Lub!$bHC11da>Bhb@8Y8Nq07bSf%Iued*JW z-UnAAyaK&!E2S)_pn7Ju6MG{;)2_G+)a?)Ju1+%kEsS>JL0P<%3B>F96v?DZ$}{zn zQGaEw2B#qbwsxSj;`>{R7Km?DlN)5IBD}*SSy`spajzm+6%`xG9bqMR%S>i%Ca+lc zkE;I}^sS|{y+0stsKBzzz4@wgH8hgt#T3^)D;%fc@6i7=5NRT|Xa+3rD!^jkQ0s`&TJZAKjIRIbAxb0oo`@}d2Ef^ z{Hr0eoAspN_bFiobSs|^`Xp37yM`VuI_7wl8S034=Dt?@d$d=btn8{muPAo8|8G$n zLZQE#fiFWIUkT6{T(&Wf%nPj@0&J9-y6Z~b5)4i~s1@pXwHEeu?p_58UoXvMwV)9z zZOh}LsS|~{mvb(!K_I3K4vyeoH)4F}@S&1W7o4DX(uJUe-iy9`N8=Picm#Da$JDK6 zI}nL^pC%W4bZuMD;arB!-l_)lw@;pt)bnQ75suqj{;KXC{7J*Dh2woS#CF!1Fxldb zi2iMjbH<3Th#;e6V6TpybWdYKo@w9m^@~*Lh*m7S?Bo}oO$>(tI{5U*%5%u`uje9> zZYqG!CG@~nR8t8Qf%NvD|@FC)d)Q4Fusv$ZJRZx%pp%#Y_v(tT{S59omsCBZM zXLMCNmpP`ek-7eVmq(GJVAgoVZyl@k?2DtH50f0<_I1x)9srUxbZu|`Zuy#ftaGN! zz;=}2_DDEdT>M8Zf31BvJi57+FTB@z(3Lj0)$!gpDPc_~5+F_WrBQX*Q@mm)nzt>O zEks17T6PDHXc}_W(Di9v>aNlnvyO#1I~pmK5erd&8Of{YG%B*UzW;Y5fA~tycG`f7 ztQ5PhsKXTv#n`=U-;)RZLLMHUY^ULuj2b!Qo_v5h4sKL)%0$OaIWLPQ|3o)nl|(-T z;lIwnubwON|DS9rQCfoU4M7W=B9`?96JI*yAoq;^$6s|%Y7fP(TwtttHe2YD_Pbl^;3hDI zB=$)5z7^e<)5(#Qbl>!-kNOM?A3#FB)fCTidsv0^|T#InMBIr$dB@LP}-g@+q|7OKoAC z66UPW%rp08j0=600)+owR!qrD3UEaQ*VS6u*P)Ek_Ud;n9#SWeG8jlU;n2=YHkz!DE}vi=b96BW4*5 z+L~zBD=Cm^(CSIkPUH2G{CGyGIo=vVHC!}`;GeA;(UAWoneP)NJ?2_d_|UGtWtBju zRe8>(JjgZsdOvr!u7#uxo=E}HBbt{4BeQhxaGVKaIehPvb-rp_ z?ACY9va`(=BDnk+tb7YsrK!ObX}ObZd1IJ2s`voqugd1Ppkekl^BTJOWXZQqu|I@| zSWLS_-aI8;*U$2{s$B8dg0R7sQ0~#Mv^M38PPnk@MFJ+A94fc_5~SYPWTV==!LMw; z)O2Aq@nP{mkxUKeJ{W`@Pxwo=q@l=GR_$pgp>u{Ls+%o+tHq?ic5`Ekq?YBsZ~X0U z_>kogE5`mzE^QQ(olq8|Og8DJ;vWZ-tplTODU?xdNiFiu%TX28W|@B1#>B%{36qPz zoAu&Kw>JOm$Ri2Q#W`f5reY4*ZJ?#y*uL=FPHFzUo|ZgoFWS*;fohj*5o@b;ZD?lR zJB;|XqM~LYBi&|U!hOK%5J?)}6DXCI?P8yLR~6YWH7D z0|p-tZ01=`;21F2le)pxS>bZP28NonYP&R!if+JueGwwhgb4hx%P=|X#fZ|lKX=Mv z_`f520mglb+iya_At6aEq!`X7*Q1>$r2LPTZsY{mO<$bzR~oOse<=s+_Ou%nQX)uT zq-23`sXFglBvIo$q#77F)RDKgx;7PbHYrNSA#77(q8-zt;54<2mJ-+_8tw(+xJ&;X z`RGD0?~O`R0a~I+F|hnkB7UjE6F33SMm3vJ5C2A_w_-f{J+0JHmfJ7nBF(d6d0l_W zKQOd#ydnNP%`RJQhFG*I67ouw^< zY~L66u+NCU+=X`mq+5QpFh4N+E&XFbZHI_;6++BVk{7}SNvU0f_~$t&Gn(D&h%D}u zKG8LLciFrx^~yh#n-($=>U~&N7%Rzb8553EwB4l^)4m}!64`q6xK&kt3kLg$moXGG z{5r8)++S%I&UI;kB7`av8!=Vm6HoN1LYHBkcz>NPl#Z_^Mg z#I^4vzFm0{iMv1KKQzP{3!qBmRO7pd8{8N@zCy7XEu*hzr$_=hK-Q#JS&X0>=jPT5 z0)zaN&NCAjZvp*XiWGGXeB$>oA3>tbgR=o?kBf`bnA8K3Mze7|X#M+HZ;kVJMWco~ z3?Ma=Zm3AKaBF6NYo*?O|F&uNpWbICHJMJUQ(}TOdQFJ8>aF!Tr0}au#4>*~_P~oD z9r@ppX+2lRR!6A-kiLK6G1pj)!)uxyfcgA*cp*K|B0EA=8mQF0+8eUkG@zl+WBV86ANqw?&Opy2fICg_{I|%(D!BB%Wudzm?DP@0mLy{J!Xr*PQ}m{cJ!y5)u{5)3memDb7tj2gKsdb0^@-yxxg;|g$y$5h%TmM4?2kZ~C9!Fob zJnK5&YH67kWrrN+iqWm(3X*5|x|HVXoB#Hzc)ees!kM!>!Go&9@=z< zV+2$?{%R&8iRd{yTv4w3^IL2a-*k1BXmM3RL{Be!MjmFNy7(38PB@ol6nn!!oKH`a z=NqMLAAc=0bID}$b+r8?Ev`YM+&r&NK5$aDZTW$-%mBZs>Nmdi3t@({d=1yRl2Vwa z$HIQH)l>G;+giR_F#PD}s(wTEKf@K<=$(W@^3(m+e79jeyg_|&OUXj}AxPnPY!kEy zQCMTP;$>HX$g4~;3cRuI{p2%*FMhg2Wzq`q9+LUiTn_zU9M5Nyf*-uqD%214TIGFL zo@wb3wH^S?rLdci623?;z8+#XF9*H=B#mtbxQE)BuX^!s4&NyLVBQ}4B?(|&LdmTf z^f-5hBFpBD<jrA31>)=C4$^~(Mh|=dD^(6zABjD-J+L07X zo2b5k0Q(lsjQKu&hxzga421b=- zd1@n+7SW_d4L?e5f>n=5Rur4M$sv9vll1SthcwA^uT*92@yQ8U(R8<}YdJ+sg>gtm z48r|!jNSkIUX3)}gvnh0%s>cTLiP!*dTM<=m@CJet5pu23D`Z`F*L0BUF6DpS6j~7 zfJ(P*-*wZy66x0a+BL|$oBcKvt`Mrv4iA6FO^n&FdtxA~vw@>oK8TfV>78ng6rOI3VtwM&Zjm={w0<1=aSKp07GY$ZmbNh==ll6}Jj5j3z`Do(^;+TQT zkivWVd6cVf>^^oAN)Dc&?8onDG#MtO`H3lCZqm}4pUaA#sUF|UU-b36yZ%6_DZ#z) zwu1LHq3)48xx|15{9IL30^!#h|9KdrR*T)MOX_$}rW-3{;GBN`cjN-a+iNVb&&d1c za0|2dz^M9Xu{;=5&G9}jz`Uv)>_;NMZB_uT`TMjJSRKriFs$)^M_^Tqgnd%HRMbE; zr7&K#@sfVG?U>z?YxMTcO*{QQ7TF05*L}Z%O1?AnKe?*qIabK}sV^}$`08EzB%SS- z21lH0%70uFH$P3wypzHI`hLg==I|n4%C&qBDc5u3YDQ}gjyt|5&0WH*o%(jJ_|$8k zMGMQ?=VKlRU8qW&_4{AjXC0)BbQU%$-LuLP-#Zfbn%gKf*25H8lv{GO|6rCi+mAm! zrB68v8%mYL$sO%=(@j?@Cg)_4T4wkFb!>{IS{P{(&P*_$#K*fPgoLq~+Tk}6oxfCS zR9>HkfW2D*c?(s=orFHff)ey4X_vgd2Ie7-h`D>W+Wqd`K-2sY~yOJv~8LbT&{_UK?2E3zlW}itlMb?Bc$--7S-tqfZIJyI_pKtpk4A)fKYoQz;z3!k9B34c3| zn?8%I_{T5XW;gPMSYN9Mv4xf92IunZCJxT+D{2I3=t}1a0R+OwMlePwoEp0M)sed7 z6lMHf0F8Lio0TG97&jLT1C582gCT0yBCGY(i0*9cCU?Ud2EAejDH5J2xP)KuL&JPH zA!uu>bn|T{pLm4l-R&d@Isa2k z#?H|8gm0q~)wu;#pRgq6P&SifiBy#EQuhBq59K$qYU<;60=sd_Ww{wch213qUhF>2 z{pd2q>Ez_MJQ9tUw(T$2ADn`_hk$VeJO)VHF)g!{l%&2E@CHo3*83`L3E^#-{~fUj z*-stJQpyymRrLq1vW44DY_e$dX7xTmfms5;sc z7@7Vh_?G-0*&#`xE7d-6P}}FfBY)9ep7v4Zr5D779hOZ_PV5bGT+_8uQ8Esig*J!) zw`$>Nn(Fy>xQ|}JIQl|aJAjKO3QK^BpX;V0PRF>$$+};&!r9^+O`}vs4=bo=1|&Q0 z<&kp`F_a~I)x_>@R)&6HAn;W_(#t)B=;&6tTQc~hCsnCTxI3*nn50ANd3P#cqJGmtQ9}duCr$(;^3%O$Myp7`u}G#?+Ha zUuBvmwy~ta!EVhz?$lPP_IE(oLHn1gXQ|^F*fcLUPlb6-G1Ev{!zf2YB^TY)Sl5f zg~f9X7d%k}16D{}uvBf;)vK!W??|xV?6_-RdlQC@R^8$&YQ8}mvT_Xj?g{fHPHKrW1z|Op&8qP(SFhw_!7v33xZEWV-lKH>4>yrBA zGM|?-S@(oh$;x!)gcwV0OCr;+weTq{kF3&=ogSD4{NH~^3~<(g-}F}58{bcR#Qt*L znxfu0sXGKwLjQugBhLv0zkUzxY=_VoXQYh%;DJm11NVv#i*<}I$ndDjOpvq?oH?)t%iyL&<>b6Lzmi3?@`a+N>x5v~mW406MdoDIayO zWSJqKfatKtM=fTGqg04hjIN&fg{O7k&UQECus<#8#KS-r_143d`h2YGcfsg(8I^?! ziX5su3Cdt-;`U<5dWA5~Y+ppX7}pvJ)g!R#5ts0F^%2E+*+;S)<7DhZY6NF4Z9kEu zof)%n$;!MNemkU&D^Mw|k(}11pRt*`E+p6Gzz$3%r!%}G}gvkTT;S1Ve9~K8>bi@svVlfYdPfS>0kUd%;p8CAeLgm%GmGtP&k0K@)Tc1UkEk0>=S>}4iLE7hBiHFXe(;jx8H9vN36kT+1Cn#5Lnw74v-L3{aC zwdPP+d)Z~7sP~jod4qKm@qYV^{scKs+q)!kS=SU{{>#m!PCZ-A~D zDhy^B-pXX-Nh+=3!lkqS+lD--*3OQ@Ib)8Kh3?U+fh!_utc5svZ=z*ocQ0fA)93Ef ziH33_ocj2(v`1r4d%~gP`HB* zen}^IFC?muH1PsPxDO^aQf+6JUEMd~Gq)Q)ms_NMFfO^BAEj8NCX_b&61<_c2UbE= zu$cv&9v`kn&6goc){rgKhgv2liU6VUj(@b+6W_^uimPqh&Y@lY(>Y|`_kA>q0$VPa z)YlzT>35aa*~W(P{Y+rq#Yc)awd?}^=xX5$8@->4?|hUbn;8D?MTBzeU$;TlUq!Zq zd~1zzheBEzq|*E@A>~4mcGP--(Bpg@i!^im1$%KLH8oZ8WQ_DZPohu#kBG6nhVNTu zsy(!4Duyz>w+^)+3_x4hmuRvg`mg_^=)A+(+W$D-dvEu=)oQD#)`~r1wAARZ5=4c> z=GGQP?al2})JjCeY)O#BC=wyo?JXs-Dpn;@w}@F4zTCWnVK=|Hl3Q`o1>P+E{Ym$C-_%xp+1u>>1OW+0lhmj!TDpPEFXYfN-m5G zH;zRPl{7Jl1EJ@VO8>Kd5;kZaD$&463l*`cb~4ZhJgo@NBAz$&nEf>4a~JJmTBKh{ zfSB`tp3^OmsT5<{(ccuhECN{V6-d27jjNDH%L+^h)fM(T^b-$OiGEje;3WOt@vbbk zZ%NgnEX9H+nt?YV8fr&G2arMBw{2DnXwT{UitZ{Et6p|Njob6uFFr6oV1K8ahtlqp zy7(3)eZ8f3mwH~A_f<*|ZiNU})N|EqyYl1~p$4^L2qWps!fHP-UptzWW<-XkH^gY@ z>V|qwMb3PpZh9iSowK_Q^N zI`s!|m(Qc}$PeZK=m-4H`NO%~zm8vLs#~6?$#A_k{q*qR0}czIgUgEPcNPm7(og6d?ga=ZmQ!C zWi_p*>o3-IpElhajZHwu#f6Xcw}#AC`G4LBl&^J9J?q@q3sDFkv1>-o^@lFbE>xk? zJXRl&h_ABQ(E0BPOa6}wPf6&YSLA(|SP)#MBs>#xy~vw+sU}!~C~gi<#3tAtRYa&tw$jF| zFe9fI3tl4k#;p)Sof(vc;WCfr zf3j0L>kr$fE9O0j(K2u6;)XQK=LxQZW?pCPMID~ayK(`0FiD`f8^b&u7ql}poEi{$ zP1JwE5mJRrb8Rc}ayJ<&STT+XqZx3nnw}F1)iI&n#<&)0mfH}EOl01jHAjPmjACNL zXs&HQ9r;VYkWN5v455l9x3#s@Q(eS~!LR^{Sg2A(<6MtOhRBhLYrNQ&x$!k|xk?S= zbUUk~x0Oeeix)>GK7Jhl3NwE4ifrfsqHHK!zTlOhFyv8IRde_=_*39(XDZMw$qi3R zQgh9|nbRB74DfG*k#@(c6vlbWgM>AASY>Sl{!D;-6(Ha%g|)-~{GfVb@DhmA-h7v4 ze)`Iv*xLimA@Y8H3@x#c&Oc<$MFZ>DX%FDRY8|EE+^Q8`fVR_&-QHe_>*0_2(B>t_ zVr3C;bdn-SHtbX;PP|)~H_a_o9?f3&9N2?+xSK1;ky(GyN2mvROD7BTz-^DE^pc|C z*xyNr<NBw2K5;K31U|P|A%bG$D8j)?0y}xQa(&}pRK$)BDJFHT<-Fx!#Yzq zyAa%h+nQ%)YFLL2lVl?m{||hZ_Ur~^$!f^as>paH${?)NtSf^)^=CH~WaLeUCRA%0Rh;Jrt}kz*5_Emi zI9F=RN_N1$fe_?f<*+&PBN?v_e6Gh)!3wTyK^VTri^lvBa@RqMsG1S1IMm!_v_42F zNR^Pry)~0@xzYHnai24J#tZKk5|1&y1pp#~a)^AV216W+p5U(n4 zfHZdxPxP(xR2cyv;aYAcd4YZdON zIYfoc9b5k0ry6pAgfSXv`ZJ!9PH2N>dw2Jy>G`dqyA&pSxB^)IJqCOrXIAN4-`>bM zREhc4BE~_q+>3P}H)p>+6*kT6ilq#ZYe?dDJ#fNoVU6-^)|~MvHOLN0D9u{YMPtmk zI&c8{>iwm~3+CdjBO~&fKu{?hr9hcMmK*u>OEWPfL3p|PS@#uiH-gf@VLeJ!>-DcW zY7y@^N@?abo0sk;5X$;{KOS!h`jNnHj^~6dVlwc013f-5%jA8=K-LD0zf-^x39K!_ zU2F3uw(oIh?{yxI(g-_bb6(JR^F6pjc?d z8pxb`9gWo7eXCzh1|%{=MQu*v8XNPTANdzNDs;tTzthf6?p0L>`8?{iqhxKTel2oP z+B*w^Y5x@c%-wgSF3H;&^5<&3tfE8fm~aTL%?E0`lLW<@cw zjNveM|NrN3gLG49_*J=sfC!2oCEq6xFyOu7h{kADmuKnqtXo)E|1O8WK$D}#JGx@P z`iS5k=&#o+<6O(qE!{EVUn~+~4Msi6uR(4%f48;^7Ih0s&o}8k=*`dh`e~+1VGlWN zvr8k{vW{VUG`IeSR?R?2qo?Pkl6uv@S<(Y;hnH%m+m72)g~lSLn#G@*wc(w3ZnT(*XYYH zouBc3IAeKCjcGI8V}n96V_RBs>dWc-u9H*#uH^?FSIttOPM7KHvFSX+6kSnf+|ohE zg&iNIx{hQaHcmAlAc()2`mGV+MJdgTxYeY4YO$b*d;%c zzo-kxgiby`sDTk#?LNU%V7$6S)QpNgWrD$Mk6Yl+vPF)ii^WbWu@%20gZ$;+P06VhD7$%w3lb$n@zI_ zdL)2Fs*y+k1Tw)7}7DTj6UJmHZELVf_y>9XrP&7Y20% zhL$kol^YF&jE(r+5YC|l6LBfg0j6U zqLsTW*`eQhpRGR6diXxBr34BI%i%W}2UqTV?pe>S}1)MK7)M#c!)%Dgmc#xO+kUe%UuCK-KN1zrZW<*z&S}YgP!d|JS8{r)UokGcFlI`F z5xWo?W^sdE_o8%jjv$Dx%gUCvyXR#6M?A}^za6o~r7RC>)L;_FTMTnvo``8EaiR1e ziu^up>lD;>0PGMi*o&zAYs*lE6|25KBkpd`X=l0w5ieFNdSmUYBrYbqc zha4+r(nGgp3Cm3L3)Y_>wH{fm?&n?maM9;n&iXPrD$8s~s`T?0MYZTiwR9TrqWAMix97df$%U-kqpU*^TnYCi0b^0vguR35u3OD z8Rc6Ge)m_qZ;jgy?}*E7fAsX2VmeN80mwurm69ql62z!3JVcVOe<=a(poi1iPPpT5G(-0j~7 zC?aY4%`J&^6Z|yH!Zk&4ZDyA$A#d76Y%t4m$_R>hA^XB~Rr}RP7AEshE)W_X1E zTm36~F;Vl_AO&tPdxjz1*f9ZYUZ48_#Rw8RQi_9^YWX^;PP>4b(Vjow;!Y!sr|5w-5re>*f5D4HCP;SN_;QFNbJl&lP8k9LY$= zoHy)bHn19V76tJf4?%T;@Qll;JE3oMPDOkDlvn|H7R080qyAav$&)r0TOki!YKXjP zk5XnR6!j`J&}rS7)`EuwcGvK^i)`Bk0}Sc*kD!j+u0Lkh+Eukr{mw#|au-IT!|(-z zO=BC4S@;s)#!m{|Ru0W#=*-S*(VTtWPx`;JLskBlU%f-cwU9aeiZq94d{l z#spp9n9VFqtjH}pI&SUg_DJM!xGUIVN_c_aQQWe%VR{#FPPt>JvBXC@oAv8Zi9#D! zIjo-?2Hhl{F3@$LTy;_)XQva3gFN~`nxLz;nV85(y1AQR6rXntEc20Mx5C~)gU(5? z#?RMQPnQ+U#DWUbUqf;$mz$ddNkawvqJD2b;&T}hH2sVJ_@;7*9U1XyBkE5dxDF~Z zz&zLuR-^G2o}_yv%FiP_!TN%es>aG7z@)Sx_oU2z;wPR@p7wD}?+P4PZfH`>ORySl zyU)yvj@bcYT!&c=-vhDOZw_8rJ{Qpew3duOYUohQHmG=7@AnWi=0>D5(T1{48mKRe zcYVeTxgI5qkMt5tcE#O+Yw1AD)7niex;yv6DWIy*LiY{6&v z=X#sJO2(DT=`VHrd3%`}t{yd2`crRU$M57sgrmT5Q=sCCSUm=Q>St@+Q39eVY9_*( z7~!7=#VU4+&uiRsc$kc!hB3-^h0qtam%Uwuc`me3*^31pNy#Mnn5<71Y!W2)nXl0i zr2xh2q+e6wVXwmQd*2pd_YG!HRn0aelh(ZVMQX3D6Fec&;KbDt9RVPnkP;COl}`Vj9JBro7(uk$d>?aR%MN5cu8)9`34pmGQ5>C;Xc{Og= z&0S!tbrNf{eT$45_6uNSF2MLS_;5^5HQ$y7oH^LtvZOuEC#yT7@i zfZsJ)5c}Y8*SPjD>XV#ZLZ6yA|FDZk8J9=qvcJ6jkx>rJ~6lD)k)UYnn8^m-{*&J@ zBhowiUwGw6K4PudVT0ZC758Uf|A!@ltNCB;6vkv^4*f;g93ggi)Gr`gU0sczc`;(^ za&__;ly%K{G!x#5$r9@Lm}BD~17n0PzKATmntyev>~*lARd{@JI0IDIUiz%l5?z2x z-pB$O`@2@{$3N{44nCv zpogXhGH82D!*BHbYbVwd9#2C=7T_Pd=tk_YH}9FLm+|3`V=(nJqea;F2nO(RlJ7BW z85TOh^{ZaJJsr<<`4>(ugE}RGs~gEEiuSjHUV&Vj+)P2Or#U87#Uc{0e~#ADtX8Vg zjowsP{O00p?KU%AMv4-7;CMTMr&7P+x3%-|BXjzu0*O2PLx34`)ff3EY^3B@f6d#rd1|i?Z0jU)?>Sfok)QURsJ_oJ~@imDH!ZYsVzsS zpCNMr+CF1yX?byZ&Q;%Lvh4wdD7zHU&^lh;WvAwyI>IOS>5v~RoU{WZ2dty|yYH7* zmj~Zk_Ii9xZ>Z+^$y~|A5O_1I(JTF#kxSg$>D!NAa02lP_kQbh<^< zgWYaSYo1PJ@@Z0zRo<3V&%m3ume#)x4TV#^7@#aow6KSC&cuV&QH?uhYYrqZMVzZS zEvu7oxYnr9BDFaL2vEP_3Wh9F88UEkQ`)>Ih+^+@Wl4Uj)gqBG8CVYz9G~jx zK$ev^9O#0}9-u|6Nl4HTx2*SXoCxp9>hjPz&($Am-q4Sw!|6GxYcbcXh9i3eb>F}91?fh=cRj*gGV+Id5IUcKDUGpBaaeVK8L(cFIbRq!LAJf z`z9q7K$;p2W_i@6kY|3Pga_Q@ZRZWlg*yJ-(P6@*v!))Q0_e@AI=w~lJ|K;<_pDBP zeD0)%FxIvsZ?TA`Q?h~Iw+;R;Bu=|Du1;QZCB??eW`S2R>$CWP)h=n^94guS_rKP_ z`)NC+8Ib%|Eaika1P&8jd;VH>)Q}yg>V*07rt@PMY#F4+OyR+(K5E23Lj#~TjdHXhM{4cd#!-xqur+uV^Y_9t@QQO9$ zBUojqj@%xI1MUTGkPpn^G&vCYXSDB{=NIL;w^`3{Vn(*z67JfA`qH$1!tvG1e?&{& z#zu-W0<^=Z{O^j-g*qk8p4IidTR{0h`;hkaF^4u}AlCyg?{Z7KYTlO3ejO$okL|BRS@@yw#=9KA zfw~G&%1By$d(@uKWcrrYjVN*nB)n>}{p0el3Ps1!c}HlR$%@iFjti(@Nc}7BO0INq z*|JGSxsf1a7a*^jJ%N-J@dI0ei>b5hvJe497tPPLoY02K8e*RRqoil`uOZdc08}kH3(*-MG6~$lKRdS zz6_3jO*`DFpBZfKPCJ_@SfREe!jgX-x_q-&M2sc1b5j22(FQ^l2L}vCD>;*-j!Q68 z7n4%X5uyoEh3EEjsCbjLj;VI}cx>Io9v5t^Jjty-5CbhbyX@Ku-ZZ(2i4D~Ni&d|Q zzNr+@0*^ALn|Ci1&fq_s_V&8`I%I0W;S#g@J28;`>(DSw3)>rNEa!v6Uo?-;UEbnM z+GWrk);IbT+d`~`L9e=$mEx6ZC_M6CvR&wNh{+Htw8;W_3+|(_7%3s| zW6<8?<00Pd@xn7g(9%rx*01v;o6AZ5mTc;n{cOU^kP=wmRk5ib^ZAtQS)pZ&ay9;} zqylrdCER&?XYjFPSFG_DuJuq@0n_LIQ`8tPO8?7psHOE%LESC$WW*yner4>Y<4}1)FRT5m zLjQ;bmYxyF3k@m$%&zMG?+vRPVu}(kZ(OK{W9{mBrM)@sR()4FtvzlKz5TGA7%kCz za2d0_pW+u^hNupBJWe(%=f;f3QVFMS=TAK>k;xpt#ydi`n+@H&HB|%U#wK&93-j&r z+Znj;@Vh$ZBMi>boRH82*ObL3DYKm;0gv+GIfh#AuZIzk^l-bC(W@wvBu%HaEScy? ze-XmLqa@oz=%)@sZ@SDvvZzde2>6t&Q;C<{1up%*=JN^NBr0>|lpt)#(r4DbUcrdG zqO0lXIbnAgTbhmWM>4FJ4VBU#EB+ySot|(S+k9sPek=UHOqfE%!Qqu%s*T9?za@oo z)EWrLG#dpk``Wdret{4Z`iL*#A zSN_4DNp%^c1rU%SPx4@*PeDOXfNb(#%%YMv#Ii7bl3P-7=Cp!jr2Vf$D&=>@UO#Yo zs=&oV1Mm zc>`VW(7UPW2UxfAm$;cB8>wH11coBwBQ@b^bia6#!yB;1e<%WWH*83%yEm>;#;lv+ zKTLc13GF#m*sy5VUHCBWh4(D`Ddl`klU?9bR7GV0SH6@hvZ}?QL9*9A6yii{Uwz*c z5A=e}`d^k;cMw4AXsXIGkab!1YmEI*;OD)S7ypZ;K5eC0oG(NIV&jO>MbD`DvHMs5 zB=m6Uq1-)&Rt?5SaA%`l@|`(x(V#dyEH3MW+D9EC0#0j`Da$h{H#Ga8_Epu4Da$W4 ze3w_887iHl6EGqllRVmsZo|fHt9Y>Z85A+glM)n@_}m}gl64vb<<-`p+AqjOOMz>{ zv^x_~8vM!x!g;LYdB@WI9^3Bwd2uqB7g=5ShXWIVcUAzuW|c|P@F)?#H5B!j_d*q> z_|q=f2U+Z$+Nbjptdp=Cw^Y+jR9EB^#je5%1!^{}*9*%Ig^6dwg<>hC zmN!r6SO;?gvC!LgLuLKhdF~&IFU$YITUc(O>*!TA#TEKrLNT>T+_K^|ILtI{n;Sh8 zwNdtT!TdDQR3a#F8AKi24|&IL8R8GaWmB)o0nCw~#pAiRvAt()&W6My99HNZ)2Di!*mV8BJMcDIphzKkF_V@Kg;j2=tgCyeapbpBD0)0iXyx z8SMNiRK@w?YDbZNLn#2Sx<^xiVc?db4pDZL!@i}|Qm%l?Fdxram-!GDj3ig2edK8B zKL_*qE@BRs&Bi6IjpoUp@Wiega|HuehHBoWoM{GdXttDo5<8^LqY!f|ngubY8v%|@ zHtpMdu}^f*PIi;GDi+)k2jE~MEdBY0qJ%D{GrV3tCcOgpdeRd4&)CAoylRH5+Nhwa zV!nhQkfC+L_j9CT;4sQX!$b-rn)b;Wu+1#Ygkd$yNpY3wDMEG&8(d4U&=b zA%r=vq+B^K+oKPvsN}mme#6wnvc$m}PuVFnRJ5a8Ny}2sP^3eGS7@;`zrxF(amsz^ z4$O~O6CN2(QnhrGRqU_B4A^YUJcU~^dIYq3oh(}ti>Y&K!E-|rf^2@e8?OHA5JLL% zqJQ|*qFs2_L^wuQ5SWrH%)A7nk$KHl8d)UBed&=@Su56U%AzA{YqQicQNj6}9AR%& ztWdI{#}=|n>tlP`)!O{~7-bPC?SD|E;-~vbZyGE|`c11=j%5{;mV+yUAmCLqdQ&PQ zFsp(b**tJZ198Fh-!h%fC@N2IQc$#>0m45u~)vibOj z`Bi_Hpa)rxti2hIk3O}u{do~$gjEcmCSYae-Tp|r+3{PpyS300P+J#bsLaO-ww)q! zYtQh-*pXXxe*UHWMZ<_DL)mcEH@^;T2l$L8hdzoH+>;W4mWQkv6f!JaT|(Jf*=j$^ zv#&iznJYk3e)=_y`pCJBVhh*?>0Twi*@^Ql2|U#lg#j&lL@3Pu>c2V8|GL9or%f)S z;-f##DTGq`{JfsM=n=JV*SYviQd`k$k-OVM9b)+z6lw^nbQ0B`gruL+s0b-BIh}u} z(k*4T)vJIccNQgQkakK!-5*Pf-m^KaijV5=k*&DE3WkIRq7*7TwS z_2#F$jL}I1%rc~K^W*~ zwXJ^=e>P;48Xb_RHGRzYbZ3!{S4A@WO^Pg|)iU{AP0U zx7+EHf$6QXsr?E0dR_AWWFsJm>z8*|h6b#lf#Hy^n<1ec-fU;RaJ|oQeQ2$P zYrA=v03_p*mt`Tw;%`;4{1!k<2EuU1oe$~)ubuaNc<@k$*yu`94NS+dhzmk}U3r}?jSOw4YoDKr#yb|F{KpNjTCO!njHpa_yMg`U$dP2l zi8$J~&;_epx=yb~(+ims<_MZ1!ihuOEZ4FYxizzz(@^Z+luVm~J>Lkr=-DI3MCv<9 zvzO}*0yO(yX694W-YPEe$QIe(Rl7h6SJs&Gjk`Tc#g2WyQiBAQ|swJeFy7JJ-{wVop|k&lRagh`mT`w zTl^{6lK#$ixG<@_bJYG4-~X{o+w#M*wC=-oqnZRo7RQF)q*mLAwYf?Ub`-IZsnZD) zgS*yn%Le`Pj$x;bcd|+lRRLE;6c`Vze1F~!G%EBNNgozpykR9M9uSq+L$R z+w_83cyJwYrBe5qnf!V>gfhl=(iVHW z!dn^gyV0-b-QfKN9p=|$s&KpLc03oBT zUF<&ly@B)L#4~Ma_CY`i9j&cs{_D_#ru0XDDzAIluSj!kNTsL;tl7a_{L_2K{Mu22 z(+4lY4iK|`-ihGyEHIXg99S69Pk>j&i@rn&V}}=|>0t-2JnEs?@tdd$o!}X6dlX)nUhw%&S1@fMx|pzVh}yxZ*crii{NJxGP>} z@;$yNUP^115T9gG$7RXR20k4R{tE46$q1RMhf~YSIJ4F3TG(>U`u@2K$pWE&7$Ewp zuq&^t$Ews~ZrtViPQH@PqkzVu$rW8NdpktTVT*JEHXPIx@5UqUXtLsZaDUBi=Az~D z$p(ON;(Hgwc{Up#TJRxzrt^<#dFJXO_5hez^~h8^hTz3Ye?-tN1?Ne_YcC#?jMf~s z^E$;cg#oG*H;6xou^2in)^o`*AQL$An_E`qbC6=~`nT`=n-=3*6yv)-=yO)HURW;u zR;O|}9GLGW6ly`%+{Nr`Cu}%6ZV4mQLRDr<$T>Bp5bVH#Bjs4m13agFC%w;|^3HM( zm>I20$i_awA8}j}gj0nOG6-=Zt)FJFplL|-3{wl9=k z8{7(=(Y3yyw8;%&=mrU`yn_2b&Vs4H=ierG*xc z*oFkZ13$%yiU=>)47piS>uoe&*3G{zASYz*xT0fIT(t$^z^hL+HpleGmS%Ds4Q@&k z_u2K^H}Ra%qyDEZvRA+8>Z+@!a}$V!Pghu@WoVH{IGYaWd8W<-m|WFK9Ja`o;Ln`npZcXKHjNW%^2ieN1vLcV}6aCKT z;jI)pVm0}#NB81^zH1>~OL3^Ae8<`<^Q<m9U{s;gT4$)j{W|-@8IOO4F*SRTSWk$xQrK-J7l>L(Szu6%G(7 z2QF4ydaQOGrnE&Vu)G-}2rpC|_Iy5G@Zso#U@%*}6(p0f;5fTx3v6h0q@O4WdYdd( z9KHX`GVwe&vc-+6gS%N?8T{qs;n zHxarrSeo5E<&{k1M4pQvy2xO3u{Bc14h)+j(};>|fc zE21~`JKvBp_I4^%Ew|+rCEW7nC4ZX@Alb=_49%N^lXh}IDKg6OkiuqKf(uCJEjuVC zd;JFHGXD7Q#?r!X4+6})G+?a=)4KLzkIv2pAH?GDHo?DImAM3NpAIDpPARlp?v+SUh%`@ILDOp;KfKbhoH5OD+=SQ9t1B*F5W3}tf+mJlLylO|F%{>R>u=CnOIUjr zn&-F$ELc$xg;yD{FRnBWDG#-^QjX$KgB*?#;* z((EzZ>a5HiFEc{W;^53CjM$Xblo00`I{Qb&j)qc1zPu zSVC%b@CVf-`SYSzPEOoHMPIg z>Bq}g?q%`oV3V^wVsXbJu~_OvIrlSdZE@#Xr0Um9esFlq*@pewRB!Ee1MJ^r{T&DT zm#mpf9oMFq2Oj&5F0kyd@}jB7?lOnH^L-<-;~j&3>Xd9lEb*5-=ckR8p4X+t`j=Jb z6~p;9+n&>HEw!dMCXP^=VrYF@Hm)D>0Ly01Gt~#;R9Jh&u?Csbqe0?95<`xIWL@AL(}qvF-!^uwdii($1` z`LzQ_7eWu`DlMLI6uJ6e$aCR_<<*syjitD7Q&yMw!(j!cdlixzRWYs>_1|E1(Rf4^ zugiS;?ILdaS@00p^u8l*>i>db>pZ8lK>V6VxxBeOllUTz;wQ5<^1u$u%?JWCQWo7J9&^6Mgt+XI@Dc+Z`#FvBhB2EE>k+LCruGDm6RsMDt|% zXc!oU(VNuC%-ZKMTtscdW_*I{7cbBx@7Dc9_@B{&OfzpNhR8(y^rOiRwj_9|9zlqO z*#1;m_72mdhOJ9aS?4m0ukAXZ67=$WN|tnECB5nlsF97Wkw(-$Y%+S-}e| zys>md+f)5&N!xVQmV#fIrL`89Zb2ZPdP`)xjDqfx6cS`2IA0tu3_5^b8=+;BxZG0E z5l^En^2hi#$BDuwu+-m@wkSq~ul@EkL9wJJ}R+w_tR3>=?x03w?r@*Tz8>DQ_oOZ+*75UHvZmgsqVF=f!%6Eh`1~P~vxs|GMNS z91;7ijE6Xl)yvBKJ(iyrQs?;c!!2M~F_t~z8`U3T$Nu-$#0O=wjQuaXw&j~Ty|ek> z-gWWIB>Od&2-xIP%|ILtL~62mk0&n&&&D<9n=4c%S9Po;7neJ?DwN^bq=(E2D=|%S zT-t89D?+nnuR63y^Yb59<(WA$0m#-2P09`i@kffhzmxQt_7TEbqDh&1&(WiHNN~xj zE!VBwwDR2s{#Mg=06v&eSGaFu+qQfStE~Erna>hBAxRvcKqVUxOQLr#gY%*uf0diV zkTS1Q;m`cz8Cp6TIjnrp(;U^cud&t!jxSS7{3S}ySaTNJi?N>7_+YQ!+B3uDN<7t0H@JVDj6RNrEi57g(}`||JZI7lvC{5)^}=l z2Z>s+N)wT$*#3++g*cSN|B=!2&#NAh#or?^PLW8CtbILyCwOyECLBuaCXLLMhSB<2 z_atpytb!c_vkA#hGsJ7FSEi`Gw$b>73#@*&`0_Dyj*L z@X|9kPd|Dk22@TI5x$#mYVwX(Dv{w6j94I9Owh%esXQKVL4u>~t9gVaAH22w42*z~ z>L^|qiu%|2J*nw&swiVOS(>|O8i6YxpoCPas~q3!2b%!aHsv_F4l&T%*8w8g=3>`4 zN*NA6yi`;7>(JLm#sv4G5xZIXt=kZ}s%v3B`0KRkfI-OhoJ)UdKnqA;2*dkG{WeU+ zMzIg&-$b7`AqVaZ`*n1t$K!J`ialIFeX+_jVE4YZ)|kRClv2lItELGf=4It@c(S)+ z?WgRNAO>xpk?1y#X_-Yx{ORHO0lPNXW&5OTgKozoh4Xt#qr#f(x-|$-?qZdMBm>x| z{9XCwODx44&(MX~t;-VlhkI*Jx`x#s*)d_}fNjMM+2ljCBstSm$hW4LoYz?$5Zc++ zGkSY$^$%%X?ltBi)l&`2yBQ-D12nIgynezpH_goV?EZot+gEBGDN-M~R3ChKPobR7 z-^H_aweNQPn8{XQ0WFczmdybRwYqxYn#Hg@EKcelw6C~ij(qDe*$yt;rZv@FJ>rbsDYW|38eUL8Ayi@L-!$p44)i4V< z>z4LRJKkdXVq`?&7e}zd)47+&g9qQsXaLLSi;BQ7UdO5sS+EiT&X{Q8+0`fpGz1?b4?S~UZe$@(Iu8GR)kZgaHN zW8x%If06L;i}cmPVydWuR)(8=FT-fv)v9uU<9e z)y6`z>MZA2N=;Dyw5}By9M}7%gtbwm&uJlkJ9|yrg6ZMN9PNv;b9D3SH&nKklB*2C z(2xupW{aLw`ekc*p%p}RW@i~6Q~5&PSeS#X($|n0X?(mErQeBclOA4ZdGavhbV*tA zxmic-gHM$6yUh{gt%ZoIw|bt|l!sIO(mu8eHw73J{$=qk!SNs5e-!FU!{5cz?y=O1 zO4Wuh9BR41#+?k~*je9O*x_nEa6HW;FoZoek7OEi6Tb7=^A9BCqFSrFx91gv{_5V% z`akvkQ|~hn=%Zpt3O7#kvWzExdy*L`}wTx8*Gz zMPAvy4s7m6aU*cRzFx@WKdHGa?QgX}o+^R4A@b)$M!98)m)`5!ibCxY3iF?s(Qo1_ z!u0~)c;`Nr_IVVq7C-Xo5gQv?`u zI<{4T<9Fos#6O%jV_#@Z=tBirijtKr@P-1;IntCn{nw$t#s4sy@hBqQLtJ+YEGPTx zMr$c()r|n{GVd2j3QUx(z0H@qT7D;7`=?fP@RKv}5YoRWuP6FKXzVF!fbJ0^PG=E?-Qg0D> zCXEdZv`^XnYv1w#BOeS>?-4F@%}FR!by$P=>;(ThbmHsmdhsyA4kwj%RiqzC~NIf?RXzfiY_bq zZ=W5`UOB8Q2yL_)M!~A;R2_M1dnD>?c%vXV?xnY|?iawwPQJmm6Sh7V2PS^Vl=CS% z9{}#wM?UATZZ~}SY;Mr;O@sWPUI|n!h#CfH79D{64Z41>F8gk`lc$^Qg)V<%)vk3)?V#xhvs1H-D%PX z6HaqoE7O;~O2RETKXKEQW*!9q;0ZJDh?M45wv!Q7tyuc7#-5M;lZ2C#L%!nZnwjCC z(lPZ%z7UBl#i?=FFf|B}i+6nFYOk-L3vtpmtj*eatNx)x0FKDSye2f118%ETj*4sW zl3o@YsgG6NyJTLKlO6H`uTQWvl?HmC_|0&74bOre^zVB#MBlA)WV*Q>%zdl#!~Sc! z3;-ht-0@3-m7JR9U;fP=$@gJ8X{~#4SwHto$E4UAD&(2%~eRu!FQ&2|Q z_vpMXoz6r+`ETf!phu?PTd9q^G}~)cDTN%*e|&E)1mygTj^CuN84vjdYq9)w0&9VC zpHGWgu%{Otx-UTJ+m+~E(QXJ?j%e*Vc%EtwCG1o!v0g_}N6 z`y1yIRTRM7|8%?h6~xPU z5v|6CW|~<2%1#3n)jXXn)PzXpaNWzd+K9YLcS1;5iJcA@>M`nCT-gY@YTj^k=26j8 zH5pbOEO9($s^VQD+o#cIQFi~Ea(cwg>Kvq(M01<_Yi(H_W#t4?w=8&|Czh z4iVUfu=8;T^L$M&jWRdIDG7Hj;r+XkNI&K1MzYvc7V1=dtU&VR;kth`hZEI!Txyz^ ziRtlbA6zsw2?&8Njb4p&(0lkTCEVYzyDR6^K-1s3T8n$dA-)#spMQX@YlRn8lrt)6 za-Wmh$qobUq3ss!o*uU*o5~OO4D(r<2_D-i4Gg{FkT?g~GM~s%gYMIk3-mM;89mPv zcZy1z0p8H2+S~1bMLE9OHil)y7lBn|U58Ie)N;DICDQM+znfXH=}zCB*G4_xk%pQr zIb1w`jx$1S2=B$H$WE;|KF-*pZu@}7rYV$G&>q8bK;&##m5T18;+pqYmMv^C<{Wvc zgL$PQth)vw>&me^!Bm3(X*ll2>`cd{=&LQ+>M;^ zUb`@O$ywv8CFCW8f$<-r?YJkNGKG9@NGQ{1oXP_4CU*1=ZeShmXVlj&>eHCRt*a&8ST z>w9@nV>tlb#XG-XNuNNa&zo-Q^oM(^yXez3bK)S7IKQ7RG|>>7@0?NIl%Mrg#^n-sxW0j6IDJZN2S! zwY8PCSTu}oplx83)KWU1l!C2)_(nFi`7O9Gne=l+8vg0=z+lL=3mLuZzGG9fM#l6A zvg2$~+Yu|P6ET)nb3>aJGp!`Jy6H)jtCZHuT&TGC;v2*`wS=Rj%kIm3fs?nRMfU5b zn|)uNX$2ZlZWLc3&WxD`haWw|WJZWDZxC&NqcVAV&yJ;99=QCz$?9CwK+qQ_om$`0 zo&GXzb90L`o#?){uzZ6M%HFfSYx6&o8f5FpHlt^n9vngZlwzu^9Rqf1q>dpy`&(TgULuXNQGDgAIe6pNEdz3LJ3@$H(CL)g?YHaX-EktKCOSNt?}> z2ZVjfgdhCk<%iuiD%u%~%PrfNX*ui_pimpw7+n`)=}IsV^k1#gJ$Z^_Cwxe~{whqui{Qvg%V3(!5M9>y(JpPQ$jO&jep2{13vv);UHFSTK zheG-KdKqc>DmGIFizsj69m*QeF^orFb)OH44qOX!Gaj{tdK$u)O&T8t6V+b?_^NCX zS2x3C%`>w~58jXr9&0v-KA8@S#o#jx?35e%BjSDHpd*m|yofzawc6VMB>SA9zxml; z6CW{%W$NzO=h9&nSwn00oAy=M!$aZ&LGkh1hHpkZd$Z^x=Fw%~sE==^#!nz^{z28l_A- z_W6XeYlR)C$(JJb7H({-Io@UOy?bw-dq@5Yo`50cYXS_pnVaRKfK6Wmkk;}rpMG_e zDix|epeVaU4EG<$%zPetcg?k@3Bcc@2gm1Gx5Y^n4>X!rAqhAcVXNjx)eo%!jrQ9} z%#Y9U?Qb{ZpLbf=-Vv%Wp8IbxJcVW9?pb&Ki3jBON+e(PhvLY(e>w+CLrpV<5pRR3 zH2@|OM>6g|#s2oT=iZC*?90s&tw_Jr_tW7`M`KkJPEHA{s%;6wrimR7^bCTLDtaN8|tesre7S(j~oiFWSVF1U%p8mfmst)puLosAWVey3AnZ500I-w0*&XEBw+L z%zd<`mMmU+Or#5N0-%mAE#_xf{CaJBt{Xec02HK_M)7J5%tN(d4;nSoZCO z(*qAHTwa(*i?|V?>u!$ORTlA1L5~U}hCMNI@B4_4MN`V;U{M+>qhflg=b@^Mv`Mge zhNYKUygK&>(!SiO-B|!n!xeYV{c3WObituXgwQ^a@^$^bOJg~OhMb|w4QG`(n%9V# zUx8Lf3%n-&@tyx4*Al5mkhurxn$-(zwv&lS%VFAyHzAu1=ezB)j!I(U(V!#H`RL)G zw(A=$A8z;B*4m{`>EW`C8BgUyt&h0cF(VY!ou=7w7pnb~k3tSm7#9|X2W?@dAt)Vx z_0e=qi|jPKE%gaxIiXrOr;n*r|KLJat!o{q{(>KMcx9o$EHnXX8@m)T( zp*vxi=`9j?$-n>l6^K7z|G>8o+CIk#S{dEK`h~uiA5{3%&0W*>YO{+Glz|3;niqQ> z7q?oY_?%RM8_029EMXbi`%fb`^2J}Em~41f$*4?hcYy$F+YwgK^jELy>;CbWjYWf_ zo14PsJWqO`*J&aB@xXpp_b8^>hv%gizpAtW`?d0Qd6VY9+j`A@u^kz`xJL&hwJLQ8 zx6Ry6_3hZ@Wm_!SX#jj$+iaT|rh9%*^)luA!ZuC25um(?$9Q+pfBQmh6j`%t<~^~b zo_X1`P&Lpcpo%Ze4#Rb!Q!JG`GY*L>qsq6}KdrCtEx3?DR00N3FiY3Kzu)*=a56Dy z;DH48l&`izl7IFl!)SSBdwG40^q~7pz_a{1Ku&tyr7)rr&F5|e(tY&`tQzwCogcWT zCr9JT5uF=m=x^dsB>F#!W;(CXy*NSA!z=rd)1Pi0qLmlfC)&_*9su@3@on8GqGekZFXr=%{8VA8?5xoS$`E@ktnLf&6=XrZ zaRSbs*-G!YI>fovVtV9}54rpa?W^Jjx z>RNiG7q!pGfpRLvh10-k$}S|%NivbHT5h;b7)|&cwv^hrA|ZUU^5Phl*h!^b6^y~< zwNl$MCzeCnTxb4wE%P2%qQJG9?VXUpONy%s)q~$vs{y^3ktm3kN1Eq3Y{F0^i|>$- z!W^Vdy>+?Pt8=B0Jgc7xOkbk~+nJgtn$j?r@JcF_zu;AZNcyhxV*K5e{TaS>tcUY- zWIutxkuobIWAlABJpoWA0CGkc4JEt%C|^eG^w_Q>-O2km;8-ZI0^f{!GlTd+;2>_x zEZHclU3Pj#id*C=9Q$Hds1tisxBLfAMU1t@yTGiRgX-!B`%G_wPvR^1P(nmF))mG8^23yInkSa)+~Xr;^f0mY6&isi5c48@-u%AT3v*H|mX%Ta1I-&8E&5OE2?mBa&T5 zV=OU#4KtJ)isy|#`tE8-bDg<64H1z-AYu2xz+50rmon05vdb!V@7?5FNi1pxXzXWv zu+i3SWrbJH&1<@Iw_V45vhq#?q~5<#O-6doxtZFTy_!laFo4N)r86){RSds0f{I`` z1J^JHHsfXol2eHyF3T|o0$y`e3^6-A=SC(yGh@csKhR&MX5T8kD+j zDnVZ)4e7Nk<-6BP!q6Ow6#$_&4{}7?;>TU!Oy#=cMbsllsADbQ+L5C|D@Tu;fm#D; z8YRP%ZNf|x;%}z$ZQTt77h^=!VO(nW<2vuwutVM+m8w7#5mf`T}orWRL zzwdU?!Uo(4x4N>LkBIN)eqLQ7+biu4nm{9vTK>x)7gPU$Yoa#j@Is91H-{5GwLd@% zK4~NyXJ4A`rQGhEKKjF}Fa2aZ*eD1R1#5QjS&mWN>-wMk0dNkD*GP7tx*0w~1iU|u z4fcfpxYU>%p~q148*Mx{X;FwZIE{3pxv=~ajoIwwc5{%wM*U& zDkO{HLXEx%Ygm9eqcrl9$MUaGZy{}UG+}WB+76jDp4#lbc*SV#pA0s_iAyDtcf}9@ zsC#eh%BT8CBuMv*SPEyUF_ztAHijJg?P9Pg@uAAuh@HPq85<&#Pur5ip|u?D9MC7k zc4IPK5@!G~Ff40LV%`~bW5>@X=C_oo|3w;pPTG9+C+@#}a^ES#Nbv0RTwv47aR={y z*V?Vr8fOgAf@z)VWl>a?E!jFBb1ZyitNsHD4m_lXUolKt&X0C&5T`LP>ICpglwx*y z1|%{8wI^^X5B=;Ufi~`k3gJ_vr9G0=HThos0_rPzYg_TGvh%q!*GmKhtTobg+3}(1^Z5l-7c%RP)%8n%=CxDoD`hecb?;ceIWt zJ7g=HSp9?-b{S`-bQ{UAl{F^*N>r#JU_H4YgUtE(j~WQP0l zmU+QdIWceHe!|6Jh`r2;RM*L?kg@jRsp@DMlRo^PkmIEz=1c|SVWYB7Db5MU&iyX| z5~k;5d;an2lq+dG-OPvS&oPyCJ@uhPMPf}`;bL_hsQT6^eeBOzT+SGsrIlAexqkC4H~tUyIg}@?RAt4S`lp1{Z`L z1a&-1Jo^8zxetbT%~@^S=t;Y5*=H3As;+_sK8aiJd~e6|VaFca-CBkD=u(YnEo{?b z=zA(aC@IP+08>=0@hfSl2UYF6lbs3=GL7${dP(LDC7NzMpz6e>B3X}bh@B!1*5PJw|_DKg0W@qLWobSrP*(85F;D#udj?i@28MU-pR!R-_0B{unxf5#O!_5mRd_xu;>rF7k44n9JBeoHEe|9$FVV(70nnDpg~`8yqzgP0@cC!B1MTI@;16qC&0M7v&B>Wq*^H?Lac{g}AWMLdN$=Hd-G zoY{J569%uT@r;d|KO>sI)%EOFKH0GRsJFRvjgjNBO$vTMUFPf`nz};Nut+Rnl#sV> zuAHoqLHCsyAJx**9LUH9fu z965Y_j}D({4KFbx48>taj;u|ks!ENax$69Ne@~<1yD@m<9Lv0lFQP?W6HKIbw*h4F z!Zc-5VmcjQQc2_R?ue+_9OUxgl*IIkqsK)A^deGgxtP4j|NmQ>=t~^ldr4gtFWawE zH>hLqH|&O~hoCnZNeb+ovk_I%Z)wusdjkZ3YLv8boBfgr?);MtZi0w!XnMHG@ z6d3cs{TAUqt~RIVWiI~}xD~lB!lGvy`YNZp^ShV3RDVaOdoe7{ijzk22dlk>rAKb% z(^Mqrcv>^Ggs^tKiBfSNG5${ava=9^@dpUzmdqxpOFOL47vm*5Q9mln(SPl4PQ|Zp zCo_iLkzYmJ&5FzNkzYjUXUkGvU83Xp7dW)w9|+9=<_xiy+TK-vrQ**T zR_mCd@6Fo(_DQg9kU>oaz zx!^0-vvLCsY^3CC9|oksu_S5h2vt{r_gXR%h*Qtuc45C9_|Ysjrzh2$UAN~21Vn8E0gbZf4)6W=XmvDE;P>(L%J5=IPF<=3mzI z3Ft$$AZ2CukS{KR)wwe;q2rxajuflSmUYlBS1sANOs%b&SG8u`udaF#*aWx#d@j-! zM8qlKoo#2uO{0a$lwL94fs(LOOu4K>V$)v1M*V|k#+0ACb6@UBDG zhdKv}E)xc)mN-Un=;j3M=eHIQvJl=Q#_vK9Hwd>J4$e81uG9Ig zEP-64){j2Vm8Ey5y>u%}wfUr)-6tzhtY;DNMW1`i8+(ws7V5Fcve?CBiwJSf{kHM( z+1ozDe=;?Szh?7x{ZWOGZ8@B*1O9y;3+TGQXjB?*wtEdQlKq2Qoma~YuVFfeyU)aC zNV(}GTSS*rT_Mk>RGUTy4+zA(6H!_Shg^7TFxQkQ$2s$+4r%`P(@V4CgKBqN?8~*n zl&M~zId>C><6c2#y(A-8T(#g*@qru*#Avx|cw!p-$-LUHZ$1IGj#diQ0BtWdeKeN{>4NZe5PRGI0)*Iz}tukDnu0^rEQs}{Fl@+4!_5NEQ+~qx4QswBF4ns6^xxL(L~oaYkozrJ@gFs)7;sf&#Y;OCaavU+Js#U` zLMXnT5&Es<%Dbhm-^IP}#oDxUj_0utD@Vs7Tq;_}=Fu0FcKYl)eQ9@+QR2@sD6iaM z1&Gfx4@kiJ`Ujxyxut5d&+u?jF6C){P{xT7@3S!p%{Pf&Ymp3L3R}(RCYH3r$qV>m z)h~7~^O`)m`!0@CG`7TfcW3E0O~1}doCbIm>Me}*ii?`;UlQ(Qy|yg{N{K8N>`b(_ zhHUO<=V@oCD1a8lu&D}a=%=O_ZOp3!Q9&lPcEgptvnC|RPpkL;Y^xn;J zE9cOSfZr#ydoZ#ijSkHrIKpdF17*%!K6FB>SQC+O_C0~zAm%TXjO5~ejz}~PdKVhSvq3eAug3|s~tbU%4ZK^^`G^+v5>=pmzW}V2?#LobX zzP>fTK9c?_hUrfD*!TKH?bl~n;pdMw>Ua>LCAQZvxjOdD(>sxj+hMx;KYXS&uPpa> zcGB6)%J;qIQCN06GeT)!S~bhjN6q@pe1|&pD#8( zDrh0Grg2=cKB2pjP_P4<@c+7KBX8I1k#AyYY)e}dP~xlOtQ-c9eUzX2n)bBG=hsxQ z(4nUeaB1i4&cA*fUUH=P%s)ti_4-~3{obhmwuJ!kf#1!>NfK{Z{eS)uy;1qwc*6+O z`Si~6WG)i?0l)2n!bP-)tj_LGj4Ty}8ymZj{>>ys4H8Yae-c02mkT(s8NV06 zFzeYAp4ua49@4B-6+Mv(0#cvLoi&TQtVX_C_80xC*=Ude-|vm*Rm^$48VEJ9juY2rd^F+Yj8s)Fn3yM|wTu0c zl=WEgVV{QS?G`Qx;eBk@r-0VRlCk;5$WguUnrNB6Y4p1b>_ark=3w6}Z*EGPAaQr| zzkLf-Jmr0RMfk|hg<;2;n^bC5mFl2;6ksl@mOu7nObjt7 z^Vouh>>+CUW@QSp$YTm#eDAwv+T|eP!j|dJtU8rmVclM0No9S+f|$dUN69K< zf5WJpUFJSljS^k;Zbr`Crqo2WwC&Ur1G`UMQORn0WUGw+(*w)#nv09%HgGgzU3969PkIvuwJ>Ua%zSt?j3vG z1!Swd!2o-ii(8M+nKUeNTUM+L%a`|_clSJqdZeCn%en+j z2xvbBE})TO=&KS0MGH@we{nQ7`Hma%I-kyT_th*`RbhG5|R^0a;%7 z=B9SBH`b9{)R7y@Yl^GdJxc~s0q5g74Qm^0M>wNqJ;)yeP{7p1Z#H6sP)fsX>WCBP zZv6LOza$x87-F2xuklKp>sd}VI+<8H*Zrq`@u)eB>@b3y$2b8pA7=$$bWvN@tv%z8 z5-D1ZW9p4_b$4R)RJ@f5%FFEP7SY!LMT zE!vyn_9um%I-bjl0rxaPWdPJD8$37QV&brpUqHvE((Mj8GQ9j6e@7b1`1U8`(HzY@ z@UueiRnIWJF#F_4*#)!yb6s3?iL^O(JMEONrWouRAnePsTkeh%?69Iw1j}cI-#YX% zkm`-gAX`2VY1~_+Gu`JsE(~^ULRmAj@2^2*$GL9^&$LGy1!El|EGRL^&+yIye~PZ_ zX7KqKGIG&NVB}!am!o4#HPxN-Mn2_KJN6);3**pYf~K0;2{$+Q?Hc)H~N#5`1H)Gr4{c%8(&4?LgWje23=r1#~e zT@Q>Y5(~OhOZZnhAU9SeiNdKDxtYu98PdFHjlJa}jN|o?D}Mt3(kVIfVQP7V;EzDF zT)D!ush44RFFykbExYjQ7h!h>kh{TMt-pljdr z_)&=WR*({aG(;vuo;!2TSJSbITMnHft(!RFX8vauKPhPLfvwJwbZFA@zkaJi$7-Bu zudd%%F({0-i5`!xQe^w22FhOvj!)0uii$#7Y%4|@R`<<%em=5qE2KG{7h2S#O?N#! zjh5}T(dFmyEc2*}@*m8>*4t6Au^e2vldj60VU$B;+I$=n_}@OQFkpRR>cw%KW4m+b z??kaC2I~R9F8B5p7exghG_wm%2yaQ08ur)Olqm-Rfudr*Q}iBPPWGcAq6`&aqt1_S z{BIvZEPLHyR|a>b{Z>_pSg&!W&`nuk9U1CUWHmiYs@VQ+*RX>hX~QPsguAun^cVp3 z?pf8ix+(U1=ngZws#+lHVyfTs<&x!~+1Y35a0k&4QTTZCVP6yO#A|GWHk7e6 zaw#deZ-qQ)=r;HDMd27JGtLVew&!#%_Z#D0mi&NwR8%QAPt&2X8_Ce28`iuZ9C$|* z*4}rOtdxZy7uB5xEAHAjaxlf0iA0@c+fr(+dZacdvT~J59Tt;|&K7mQJ0A>IZ7hT> zdWA1W>o!F`RO8{=3gtvA1W6`ew$gT4%xF2KaCz5*iaFhz^~CJ6C%ig+sPOIEo+zZ* zBZdCkl$*BFe$VWW0-~7-VhB9|HhqI#G(G)To+keF1(c0h2N@#`8*A*L28^$J zyEV=ySGfw)+|3qo9#@#VUW*smF8&nq)JSy!^5lTYq8@*V46le;lON?pZ~Jkl0|J%# zbV~fXT<_E&3#4^Moz`zD-!{SDVZ=+ggSrv`y(w~m9CYe2*9IntcDqh?(8uWQx3@w4 zpnCb9b}4?sq7H$8mmq>N08i^xx_9<8@Qgq0cUVt2Bd=JlHrtJ>wECS5^I^US`b6k) zV>+S1BFlf^8DTX}jPzucy#6!2RJCiq?VFKtqIOe*kq3A3>T;SR8QyN|*m&V)!%Z7( zP$}mAFF>>vJln_%0xY^-e$a@;^G-u$tAi5`X;&tJyJZ)tOcc#{*!GNu6=6yo4f@FU zv>qCOU4H*5pygj@VdTFGx@nn~_{^yiUktlc=xm|!nqcnZh=B18qN>}= zE2fQqdsvLh0io~G(tD#B@KSMkYXOlv%%P3is47XNJaJYMV^&wjr5g+2-!sM@n&BJ>`P41kx$x-(bkaoday6(y_DkBUXAsUG4fEK?I^U70vW%o&$R1V{n`F)K@P+KC^yoou0iAFgex(uIi~BVbm_155=k}A;^mCa*e-$cA1 z;xB32w2KmNYFS6!BBBNB#lTUEX~1ROOuSs@u;*b-dk~ueLANbjuZV~P^~21$^lw7f zU)>FewPP6{t+R>4F(U3PHjcEF>d4(TO?j&ii$cUZsFpI*+7pJ$$olG3w@pi{aEWtq zfai$723H&JCY?XFa3yI+k>!yq5#$SB`)X^RBwVd5#Ul9|!&@9Bj2RJ9WGI|yg+w=j zy2ZV;)HUH%%OX!wn(?>)Ws)&mr}BTR7lPvoir67qt@*NcTFzcCwJUZ&cWA&NJH(>!%z)KRU}V&TX}V;S9TY1;JD;tqEA>tP+FW zgm>5*IahEhe?As3yx?+UQ6&QH-a!Q!o@_b2Y#g0%|vGiD;;=DIFSocEw^Z zNTn-xxwOi$L@3%-xAWp96&7t3YReqC)}`t(ieQd6jIGtUubFrbIVbj!o}71g?aYnN zlgMrqNqwpNzU0WECq;eN2YYF4IXpPzVCdXJ7`;DarEq8E&f( zhRS8n8d*{i)Ee@Ildy|j`RLqmciV2!=vgyL{3^v4;geNz4NjmQ`o`fMlJgf4lzi^st;`|CBglaJ|2Mr3C7Kl{As+deeH6Su7|L0wb%-4 zLEbU`b$uH~LeT-pGRHb4w)=HwEK?$PB_e6`_2%WL7`I_fz*OLyIfh!vgPmQF4R;jv zMIeRg2UeMCp1yi#n$KdplB`4lCi-DauG7hixI0X@&+U>4y4{|RRfSo$WNA!z_1K(k zmd~%u3g`d!&2Xk4o8F61>b-k;vZ155#4)N{@FIH7ET|>kUwC-2hslVvUU9bc^0cQj z7}?@x)TX(=er-mZnguN^jGWpx@jlj|6i^@I5*j2HtV4=fh69R^S~$Ljzb4bsbwE?a&l|d6HLk~NWd^I6? zdD`JXT}U%%VOSFXb!+#!>+S^2G43{7bEhsyAcPA->3_hE2!Qrw<1S1)wsW59QmSt` zszs)=L>SCN} zbBI0Ou|_#L4T@!nyUV4G*npv*lXNk$q42tRD4jWj_CDatqc@zjou0x3<<9U_QK->5 z&Q|^PZZiOWk?d8Vn{wbL{yDiQI!ZT2fmf7O z*I&}N8aR)^?G%+UF&-4@sad4>m?Hv01kMW=FJIPmGY=qiP@AI~SbN!+o6B(uP#;o~ zQ*(TYi_o#~O3)34ZTeERK<=pGt%0w??>l|9s*uvTB$spny{gyJ2QRLRUM9_b>X;Pq zw5s{h+G1L~vt0>odhsMGs8-72@Tbp@Qc~%#kz)A@*4^g=ALa_Zyyb>eBF_k-FZbR} zbgVjS_?~@Qxeut@v9Ppc=U@cn4z&901;uun832^MGwimUsc3ofF^x<0=^jE)_eK6I z$Ex}|gyX)q`}3yIG>+hRnsfb3t~gp4EMIWZ`py}B{|mqM&#cZhI1?v@rWLA6#R=`);o3W^7ac1Z}w&OwD*EH@d)CUGtvaQw-I}1E(z)`2y^`YUD(294x`5AjRO} z|2k%_bUJ8=tGS|UwC{)w3%drrcE3EG3Pv2SNrCffp+l1dfX-5)vyxuMg+_OCDt=o) z9kd{T`pP%&b8`ND95k4lyFM4uN_hQV_)|qP=yjP1J!7n4n4s%g|7Qju$k7jSY1q|0 z(M{dl&;}CSHY`ry9o_W&w(Mbe@zJjX$#5s}1lldAUT7`M=yMI*)j z*g;8LhB8c&mIh?z;ukpKB(vIS<8t%gzoa>UnF9X;~OG)?|0vDj> z@gd&D`cpd$Z@G<6iQx_SXR9CA{(hBJHl2*aigyZiljlY%gFj0yx?2BM5_4`L#(lk@ zThB|+usch^bq{ zvGm3o9MhVu^1#DWLyOKk`awjn#ay4}k;Saco|&VcdMxJwd~yK>=HePyd_&h^sk#om z1;|6c>1AR4Y{V7{Ot@#>Q~8xIs)2QNn(iXxhn0`xcdXG_2sy%UudynzS>t1c=5=X% zbV|JRi;A2cjVa6`m4jEiDDE4!f+)IXn>!>xzld4r>PAW?(^t^9Y>x0Eo0MvfM9N`+_3-l)f??R|1jh8B}BO-F{1do_U2u)@E(MB!L5~( zIt*;^Vk4m<`$0W2;HY1z&Z2odqe5c9VH&@5_-hd+nOeTE$kl2u+Yk=k_(S$lQLyaV z0YT*;Cvn|8CrQ`thE`2f1d)oN-d+yI zL?1ycc%v4Uwvg|AO;&&3K~$${dCYWlbx%f=$FN$(mSGVgD@)+I<9AD^{@IV@1Svsj zl5>68hPIW4ujzH^Yc%*C{dya&pw|>UcwAz&s5U~;S(PEiaGNx!;(t3%F{93 z2=2od7_As;L^eSy+80owGd1e<%|-^=SsqKZf9PqRL88Q-qUG;%oRFucqjle>-(+N5 z4+f510q z{zPC!{H)Kr_nmCXObL6w_q~M$Ge&%Y4wsX7tn9uH_(N^eQ`BxpO~fvX$Z*btEMP1@YXOBU7OAo^h}>W*aXk4MAA2Ug0GTl{IYB zEi{jd~+-27Kk@GGSX%^;>|B&7`0SJk8TAzW4xQrq~GU)JN`V%m!-iu-=P z`@`lfKsrFqorc;So1Wh^GJD`QF#EHD|Oc%DbWXFXiy~eE@7N!o<{@=%U_BGde#F_!Dbmuw+uz zPHqPJwLA1tRI{>35?a=1s2al$;#C|+SeN2d)T;#D>QQwK>XNLK8%0gP1%MP>dwJm7 zkfS_J=gF{);gy13DYJ@@TRq3mi8nwPlG=dvYwBHLTNzL9sdnMDFUd&m8n|DlV;OHJ zO#G_b&cs=+swv7B(T)_oSvVHK!Hl<>!QPUwtK=ClH9ZTM_k- z)vap3xS%iGmpZg*$IzWX6%u{63cKB@A8ZCldHV2$Zl7Xx2d7|B3ocwyMpoHe6NM4h zyqvXUrNI12o{7MsS$%;p|RP^=Iz8nc*oDkaloLGrORf#;QKN--$SzB zsXtpgilHrZ)}s?dY_t$RRU#lr4SF2)?pU9X zcQ;fk;d&YZ_}~Smm)Cc{CmG~5-4lOYQ?1UExH}>s{WZC~@#-1rg8jNU4;+$bA1RfJ zVR~2OW*)BtBDSGFC6UIEq%{;KjZU`QBRDWubOk zqkr>FYO!gY-qiEufk2I?`i4M*qgOMN{6W5Lgi%-l)mz8`c4-7;T(A^2`NxhoBR#R9 zWyFnpFP4wLRv6Lj?C#sO^orquzT=L(9hkU)R?GP+o;N9*dLhpTdY=6a(|J~?P}duk zzU&XDTTQElt|2x*d%o8QmhTs$UV1V)jeI+o>9K;d_!ZC1+(_`tgwIh*;bZaj`SYLp zFzWxyl9{$o_=Ck3S<&2kF`9@yh~#HmN0MxEOq1Mpe1jJ*?l?9lxbtn++Y`T4n)H#3 zN9ZD^rH9=PJ0k!sMssep^QtVW48F;pK9errr2Phs#kQn#MptXZxEuf8*THeq-SPzo z!4$se7yS@0(gL@MPKl9g0cZ4$7Veh9@v{*QHoF3Ee zSMz|}bg(GUPJn6-(c52aLo*DC1tD>ol$FomL8tPoyzTgWLw^P2bwH1NnE)ELsRux9=VhDe9BKpe5*ZDmiWK78W4kS_*TwOnhkI!=f;x@wr;9!2NMpF zF4w29A3e~vc6=J093O5rGrM9KG?njOMq%EriQtMH7F2T_eGHEmtH@sHyaCRmi2DMl zwm5m5{ff}^1*f>R@nCauBT>jp+&4xKRkvK-V}QcDOS)bc&=~Ml zIb8=3k(ofVo^EoMH}2PY?pGG^-#)UhC95@}G&t;P1}=nCAl z-&0^3Ue2#ZJjkovk{HROVy7bb4z{>DR_%V0S}~JP;VEf-zS4=&QdyqJ*%C+iqn-)J z9V-jr-+Uzjp$`?6_GSG<(tMlKcqFVAq@d`XPZUX(0UEz*rj?1l(=vOpk&sK8v=<*! zuP3>Itt2j(8@j(21XUsZ%~jiA9Tv1K8wXv-c^)G?vMPh(nqpIc6ILP4o#?0^hcz#9 z%_1GE3$UU6oq4*50F0d1r#HBXH|wKdDko{L_^IwgnShpp0C3-d^g1fPk>+c$Imk1c z{t^=O8Vp<_=m=?f`9F$>Ce0kj-T>mFs_qec>+cft;VX; zjN68JD%?~V%P!J%EWJ{jb4iyTe6=MC^}Tqo`mUX0ARLyLIe*5AubdfICpIGs%LV#uF%&lRd0%Y8t00oJege{UPYehA z(vYSa`l-gavoGghbs$!3#7yN)(SOhRt>Gd}2g_gmnwyEo^~4hZ=X z7fs9sGocN_)s6wT^qWvPQ;mEzT!TggDjzS3a@921Df0wTf1~D0Bf@)ZD3Y~~v&s1A z(+mbhz@e|-M>9@DjDmG3(PjdEz3=82*O^}ReX>h3-N?cxd)sx67 z{hW*9v3hyDs|`$-aCV{+@oAe`BNf&>TqY`76E}sU0f&HUHrOZpG+~*k@z#L*{I@T` z1=7}uzJ^{*vA9JjM!=5FnHblUB<|4L&Td$0lxYFo8RpqQrg(it%vzx5zfkU;H0=u0 z$imp1u424$Sc~S<`XkWX9U5lln6jtivh8+Wx<|YCybXiLy@=BzfVJ{Vg#V5v(0q!` zB8rXtXzfpf*p)pVA$QsrG*Nuf`o=D+d|t&tF}Q?;KNG_~_kR?f_g|9x`^P(XGY zU8Oln#esX4yWs*969pAlsW@=uz&%bocZ#SubD#nu$DR9BZXBtgIjED0hK46+?!Nc; z4}c$d@VM{mdcR+<=aY(84W=j2BY;f`X<@9Vp*DnpT{G-a?(1t~nEDy@k3#6LEkAwi z#%G&){4GV@#)_b0M!%DGpXRw+zqo$c^ikUcN@AgEi%3T%%q??9tcC>s=}(1;yZIPa za+^vD`iqL7b{_9Z`~w(Uy9~7RD%V;z-u|#t?L$%AU@wCaP8%)qY(&#B&omfb+*a`J*Z&6^ByTiJTssz7Cw7T zl4?&3T}dix$*@41wWY3BYzNiL7HeD|My98SXI=X+)L^0@l(|)oO3K<8O3A$x=b+pQ z=;?p@hIo}P-B%mTbXn?TgF;VvrFyb@%S?9W>;u!qg`aee-%tCj4Kq)DGyHmiEX-bG zMKQF3uBggfIQ}9{A{T1)8oZc7P-l7!`?glEX@wgU4q=%b zt_uhTs>*5D))Yg#pkvuAwMDdy4Xr($>Y$YbOt6He*+^;Hwr>*oH`w9W!pfLYb^m%_ zsGqr~f6he3M>59v5C67!-ud69Wj;9aQMT|4w0Bsmkl1ahM-aa>)`}6hl>qVF$b}@4 z5=E54YZuDEio%w)4dGLbb}Pgai%6;if!(5P&~IP;LA=XI!vel|Yq)X;bslN#pfIce|;=Kinw=h%i8t=#?r z=@)UlR_p)uy20+J@<^PB8l0`=IZ%=BXjScR zy&+kIBpm>AocMh?YPm-v5#sPA3(ji;ZXZirG&g`HUpQ(~!4ftmTOyy1>oC%D+z~j2 zomE$VwEQYRjFPGCWK@X45#q49WxA2%bGb}a+`wFHgm0=dXK=~=u)F@^@#6FQgu4?7 zpCUs%vc5_n6+I;y!HMA%^I+|@Cbk>}ubAc}mY}EvsfL1Yv;CS|8P;?UyW@fFeGNP~ zL&^_xq{mdey=h@NhO)eyyf$ZJ&YNVevg&nd(B|Ef!6hs_>C&29NKICZxWQP+rovNS zCp{8NS!l$LV5z&o(4v-SuWI-qk7i^oy#hV(3fwl6Jo2ePJ#6-hXOr2@e~rA=iOQe; zJYv`kgn1eH2P-CWHxVY490xO?a0EDq{4d)-lveARI+ORwbGENRb zi>K{7W4uDo!BZ~7`1?xhv;UXsx5f3D8KNG0Yl?zIQemeOv?9w^-&BOG>VR$!35!YR&c&FkJeB;Up{6L4C+1)>)BmX%dauQTVurj3E#m$l$~R|@$Hnpd|zzQ_rY(a8272>+B#wGZU!gb@%^ zuM0HJqf?$^S{b%A$o>3ch-6+!e_im*US}4`p@@*anE=QCI^jvGF zwQ{(zOu2b&;H&EW9r4CF|9Cc1`JzPZ?mD{H+Qv$>IAjbFkSO5^pM3J6XZ9d0vbP)p z#DC9TQ~sI&9X}ARlXPgdK^LQ*9r_=ZVO&dqOtN4R?)pHQqlNjzSlI|tLo7Gn5V%$8 z&Ihk1*!n>V5@*y@mD%h*M?2?a9nV;)8#&^+R|C zC;T54IHa^IA4&9-D6q0H@JB5KsASb(zn4szS_>wk%lDidx3>zRRN}*9gq(_1Ths0V zPBfEQlFpcL%UdX%={f!O2xv|_0(zN@#5s$q6{~2A*`S>4yeQ4SvM2}cLe=uSC(a>> zDiU9LbQ})4x0AhOA*OWeLBq3dLZW%oOc91il>FNApm#XuiY6$`b)M=GB=MiP7*#tD zXeLC(heW}yP*caSr5&qByR-NIoA{!!{p-Y^Q`~^3{qCt(JH10u1kP(dZQhhlMSM~D zI{iSojB_>e;YfY0`dQ&i**R(KzCBuH4ZhIEo8YH`mf+JFayLJZSUdPK@dm`>Mk6ws|(OcP7L^ zx~bQ|rSOHVW41PqxLvxzaWi3+{nd|2LxhKAu(!htVI;dl`Upn*f^(^~FXj)YPa1Y^ zT$7nka3Q-ReZ1P(ytRqT=;xq**9n(yR_Bw zaF45`TMOhA-?jCW0z)dRg*jE9PuDNDX?fP-akWQkz-&@j8%T9Heq7c*P+Z?I-z9tE zA5p+vJRj0Bq$2W7*-AgHMX0H3q7*W2`@(k~=knZ9kkT--W38QC2TF?nyi2eT$N}iL zjr9|H1Ag1%UFy&oo3LSMmGE{zGu5t$0AdpI^Tj+DZ;gF!qmj7dq`=sU&P4Cf?-DIV zvAzptRIi0?zw9pA1dR8yW9pZe$2WQh`Uo(o9SOEuVN^V1+{;s><-?#mwC9paqwHW8 zyK>b?GDp_yFZ^GEIkF_smw>AMG!lL0<9W~Bb{bF7`%t}6f+pOl_#ya|_=EmeGpV1Z#&IfzhFL|DV+Lw} zIc7UouBgYhJ(zI{PzV5|lsg&P`yW4!st)~rnlspLkFDX&OarZ{mGt*}HJ2!|pSOJJT(_axy%BZD!x5hdwU{`k#tZn#1Z1UF%Wi}==Qb-HSF|eC9$)B0aDf(4 zxVOAjJd_gqnrkltC#$%3Q)tgsYpV}SN6zxCyS>(u8%>3Ak}A=$sm&RM(y+=um}mFn z3nQ%MRFcUW(R`=G9ocQHiuKg%&i~yY^6R+SjB_e$r(e~S=!fH7RPtX}#JDok*j=c6 zQ--;CFet2birpA6Rv`x&PO-@y0MhRckl<{4h>+jPL^NI*@|)b@AgQe_h(-Q3Eqse< zBbZ6yxv|PwHlbEU#xMCk8(Ys8J+-V~>epG#kAA@uQQO_~rQ(1j;cS_#Zo0u{{Yjsx z=GID!a~=ySt(wOV&8nwZvrQ+jFt#|4J6;3G%^ zXN;hcHGb9a8Zw;iCN82Jo&UV&tPyG*7;{{%>fWh2r5PjzPs#IrnEdhEt^E!?LW{Dp zl`{EHY1s=m|A6I-vV6-aLqooEXC0Uget~RPpO$+UbG#ld!3?9HpPy}O#^MlY!ZT^k zYx({=hA$my=cQ$WA7u-IOgp-fsy~~}U;DZdwelB(QPQ?{l*Gwl4KgGn*`O&{u%-h& z>?!<_d+?yOQ4x~JUzlYF! z(yN1&9KrSou+=oJ$%;1^JGU-v!0)f!|9ddNt}8*>ZbTbYmH#DlGNkz%;KH&bk5+D} zNJzpH%^MtQl@s@^_rrYM>=rpc{>O|noCUsEYb+ETh&xwTdL7zn#xib3S9lT^#U}k@ z?kE#jiDcOd_Ue8kA2*9ih%fsv7_7Gberbgz0%6001?DBWj`u~WTV=R$BAiy3aa}er z`Cyp^^OOrR)zcX&vr&N?W5tLg)s_p0e~;0Tq$h3yPNGMyXiKtD#YXM3v~HOqUTJIf zBpTr}3gOSHnXQS4vBFi`ZUPr~YqI*%v(ZxrxK`_4a$iGeBXmfoGzLJbt?o!@kE-U& z_Jt?}m`%=B?C)#s)2$DOgb3#7$&N-~$JHts)Gn&cTboO$wA)d(d{JKf^t=L~Qh>8> z9wpbdFj_5%zmm9Hx~Wx~pQEczR!C@Ct4f5vOiAh3~Fn8#l(v2BZ=; zvQQE#z}IRFYf3886qL6ATtm(^*prtU9-bT~BjTLGqJ)TqD>^-QZoWLmY9GcK!^_;} zF?I!19lOL*8+|!ea@VWN zdlwD(HuU3*;RUUKmZb~=iUXLhw_^rHSq_|BZfQn~X`;#AeMXV(rI zMKf1H3nM+xCu5PP;8Szf>{6^}D?dbvz{{!e^SsMbgt>qq%Zr|m7S!+-nOH(s5UV;b zPdmiqYqwGvLV6)!CblvMH?U{eVy17LCxqguO$P_OMSb?l7UCWsZTn-Rm z(?O~HStuemP&CNcAmp&gQBl^_gA*S*Vvvhx|CUM?oC%euwVI;3Dy`!`77SbE_jUJR z4x21F9h7WvAcWli~*KE)o&3+ z`~NyglxB3!ox#tpT1UnVVMq`SfaawP$JY?K#%IDo&9* zZucHPSukqg!^C^B!fC+~gqCf`jo9yj^5$ZVS$Ua>H{IZEUDwKC_~!aDj*2#-89a@2 zaxv5!HIZYf1cP7xxgl**j?mWLT1{`?*EhaK3UK;y9Ge?u=5RK3P3Cj!;#b#^&o#+8 zo69nHom-SLXMlnbnwJNJCc*WtB#pP$nS&x4P(;1|?tb__PV}oAx#ef^C#6`++^ZS! zE-^N|{jmDMN}&C;=DqFoaM}9(iA(y`NMdabd7oRCO&XlEfxQ7h!!(hfhlC8V??0 zVu-w&lJSnL5#)+)M4()fw(LwAtp4Ox1*Q%imz{4bJN!E#Lu|Pse5jpP#-Q6Wy+3_X zTyiWRn<|Ox-k(MJ+iV@_m2a<;^1_@e7tXv*%Y2tr*Nk#oV=ZrZ5sJbhc5|ecFzp4p z!F5vf<*VS-4vXDyH5q0*NX*>_g;%mL`puKA^ITmzrlPPGP~pv+I8s zbNa*fMFZQiGCk5KYS^}!4d9-F%*w1c)P+)?>5`P-t-)3cpgBQqmaSbctysX!rB&Mb zUXgxwxjQ9;XR^DydXD6{`J-@KdfKIfYE87h+m{Q1_V71@>FhF%;Zs^_3TwZqt9)HK zX4~Gg1S$0G)e87YBT0efC25Xt+W(pvoQ-K{70m2?-R<|A%o>|Cp>^5t-Jc1E%8ad5 zElf&ehG_%wtl?~j`2=YqL`m)KfK3grYhkAGsRBgjzQs-r3yE>@P-N9leYGQW==s-M z^)LP#gJK2sO;i-Qhm9|b{o{W4_dv$QqqlZ+7Mr#n-^=n$-S9HsNQ4E&^PF^V)bl^q z)Jr1;&@zu^G&CL)&iXHCHwUI9s!sy$QD{*f*DpWJ_H^Ejp=fcpfttnCW-QWZXl83! z;BD37&J@GUw>2;@NXSP0*w?ABEuK}@J5zpRDMQ!Zrsm7#AXX_pk572X6UMKWg*`xJ zzsL_>q9lCsy)|EiN;z4`GA*_0q^S^*Xh{j(cY|rQed9N`ootz7D^*H~XPQzndPy#OH5NK*i*q45Bm8572Ow>o)2?_MO2RnY{RL@x5M@OljgAgttCEgy)=lDP(p%Ie3kpoiDp# z(`l>K4ss~`T?Y2|o}&**?o;ejS)GT8)?zL_LJG(ono7eQr#;P}Q^B7m<^48q)++Om zN|qG;C%|+;_zSyYlp0$!FwC%Y$m~hpi?eZ>!r6!O;OVqDT}Ryu4-+jivMU&g;*HF9 zi< zb}b`w76R?F-mlDjA^9Gu#F2lo62{}SIhL-nWG-6tzmXv|dj|nepg#boiS4Gkh_qKn zrJ3d5FZlTS2^SNO-2z`6=D{8AX0!DYzWwwWt(u68{U;GZgUV%WLjUvrmu0x-?WOD>?z! z#g==Av&OirRvHtD%+3cfpC=9Xe6MbM8JHbL@OG`}THL^RA2yB8O+_#pCdfBw*Mm*{ zL!j%k$O;x8krZ$8w}-4BlOW!yCtojBK*~Iu6V%jaJ7TR|1e^=6k$IGwlyp+;N7FU{ z>xQC*O=Bf*2v)V1h{_G9@KnOsH{=jbVpqRIU6B*s#!A0V{C#rquQC@uiT~g^j)CSK zA5-~LW2^`bQGXIp5AM1R9#Utzx!|QE5lod3YSZEHqL$2wBKjl$0Pti$rF_zUNlZWrfX>^CF<9LhDERAXGSz3}N!g4&m*r`zS^viw@VTOn zpPK5VoGRugV4Q;$=aewAjz>cL3xSsf=bM)&tX!}Zq{a8j-n zCmL~qp}O!6JCXN`nP6LJ>-_3rTT}w^y$2=VN zGiw2c1Qoq)J!Ld__#sok<~EFn1PQGeGb#&PT8H8(aJS*Tm&ydtcveH|W%HHsQK#ba z90%PG8YdUjujSRur_l_xjVgYh_t(CfXi97hpz5@4`$m5w=15Cj&Wf(<{T^Vt7-!z+ zoOd#K*F43tB?_umm|WYkpB|oxDKZ_EULL!R4Y8v#)6*MPD~l6E;-xPZCpsffx=RDJ^6xV4s0A^O!mt+nsH+sY3^2+o=K_eNf1Vc9m9_B%py+gV#832 z2Dq?drEwcuus_8!C1;IxKelEXS}eow__HQ1Im+ID-#p88eT8d*w3=b_QqUbSR=l~ ze-Ijce;Ygt;L)fm#BcYmTnNt#=@a3jFv5A0`(Z6)f&aHK*$^Y;>so=5Afef6G>eO# zgnGJf8VGI_X{~?2%D(u*B5&m_f0vsHXSI}ZMlrm)+T84E|>dRT*4J~;p_)@-2 zrHN^bYS~1qLYY-|(>6K$Ns7h6?9vQ#Vat~D=PESf%!7E$nb>)^jq*$c&Ex&dxz8$M zS0|%%tZ{g7)d4PRDE}QJ=|C_soZuubqGz*?e{r|ghWe>Ie5L;TfV#TwDU&y~=IyEc z{@+Ufu@E^SE0d|9Xt$D=VJj`AwqKX~FF}H#AGn`DH4Afl9a8tNB+)@?WzC|7!|_-s z^?H%0RDGK-XY(wtW};$PBQAN-%e*hLH-&^QZN?JcAPffX(jiC(;6>r|y0$0lZIWq_ zVI>UV+4T%}1tqT()3ghe+N&8F?qcEkT`HaE_nMj)6f#yI6RxZ_Xudk8_e0j&ieglP zPCZ~0ee{Sd^b@!mvdh2i6r(o?my#NUIdCyYt z+tMN>u{@9+N4utepmguTy`j>D9_`;!ck_2wMao_;lDmY{ty!TZMFWRJha$#V<^jWD zC#v_CYsypTpr)B?ugfx5z-Lu4LB11*$frSHU_^3HFO9HMVXNS_a#&Z=#KeZAFxiU6!{IUbpV5A^FJ{&ce_Z(YorLm(CwCvqB%?YS#{g#55Vz%-BY(-|)WHae=L33kgfquC zilp@FdmUmZ`lTACVM3HWVT&h*9 zXusyFXGt4*_3MOCrhnptK?c<@Nx94SC$>npm~dEJ2lgGxJa)c+@0(nxYk7kur%-PN z;sqgycv;%Y^Y~oYU!b#;5#ND{N~DZItEn#c;nWR9VTu1(72Cz0nz)s$f;?y9P|{d! zYpt>hsfEsY*uUpoJ6Z<;DMW?iNt6stySMnShav+j3nqzc3veE+|67r$m-z=9(<_Hf zIxbO%DI8z!=Qw|r{P1a-x-}%>5S1rKIA$M(! z29)*}5s@~&x&|*UjtGfZXImMQjkEmxWUnHeaB`rm2= z$#lu{n8ySnYV%x?<5O@lr?P#q(SlDK?a#WV0T23mTJmsC`t`Vi$$dp0+IPlK)|ok7 zKd(X(vi9$Sw*s#RyxO7%869H=h}z7 zD>Z9ahv-|sT2qKL;XigvauSpbanfN69n7a>Pu9P&Qr$SRWG#rjyFjkXZ;iR!Ts%2MIk(qAWtN&P!?MW4bG!3)|*!lm?P#S7Lm#oE9pbk=bFXY)1*E4I1tT&h{%Zu{nL(;DoNF7hZthcdoiY$1jwzYlFQskHc z=ntjWA`f0YRag_8-QwxJ+%x7RoWk+rOtg4rrF+h z08^th#=0`1+0>3TP@gLxtuU&0*H+Z2IVx~#W&1m4(gI_pqN8d7lk|>ftFC7H?T6}$ zV5L%s(04)}vunoil5tEF*O5>173Wpp06wZ_F*`dt#}&%o^DmWv-?1FIsv6=HP!mnYPeqrWp8w#%)b<=-EbRS^+FDhtUgZ=)Gwge=5}jLp zKVg#$q&UfYSzTI@CsE7a)-b9rDXXFcGLnVU3>|8+Ez>%Wih55~-`VEzP zJpnTrN9Y8(jfSn?iLq`&!59tV1*;e(;H(-w`5ZT3x_-g3Ph2DDGT8_@ZV%PT#20eJ zBCd54&f14GC9XOKNK4*~Iy=*LP~BnJs#wwS?Bn{T(%W}w3O1<5hWhb3>{$C+z>wzk z%P^wBYhJB&5O)H{#T_UQk{&*NN$3+BJogIU)at-2YT>?KEE`@KS&T`nA#+$Upn}=4 zs6a1wPObSk0vRa7tLZRGP$hD;kt+q#dG9mS?!F9f-Ac2}t*=l9xirpv5A(`epKxd| zz93kh8JYujdI@VY89MH%xjX#+R6~!TCl4afytTp_K`oDjQXJejs@{Bv($!q^-uO$q zdE9;z8@j!sXBSVY{wvc)qZCPlPFXJ5OcEu2ivWYGQj@lQYMx3hhD@LLPeq$dM}Vh& z4W7MM#C!G+K7p1u$2%UuY|4OIc6*c2W&0&es!&TXS^glbQxDZL@ND4)+c8dCX%=u& z7I|zOUtWaS+KtRi?Kc+x@UMCJ{!)F@Xj{k{bIZA)|3m7ftlD0yj^^=i6Pn;@+3shp zCZ;)iRIBf)VfNleHM-St>b95#%4S6jxK4w#GIUcm!|6Qmo{O{AfC%7qA1i#1Y3Lcg zGDx^PD6VCW`RSYBpML}82*Nu4I-$>q2yEsUQ_;1XBXIqGcJ(2NApm7 z%kA?&`SyfJ%%gL(;ON@nvrRqjF3P~sdOf^9=W12}e+JtQ8Bde>t6ps}E3LpwBRyJ& zP0Jg=A`uTv;jMH#+9a&mYVpRQ!8z}AA#>|M-gVVs-G?oiYURzi=jrYQBvP0(8)9_NNJUwYa`z&DS7YKV z7bmpZdj4UM>38oST$-i^oM14^@n0tv`5!}cS6Z`F4C0({h6&)^IAp>*4KM7ylj4YK z|1s80je;MPJ@h?>jGC$7ydmjS_u77=DEF{6YJ>i1aqfwX`QfMqe!&G04;{0;^HSXs zjjz`@I6niOe5kTUrf&vL?wxn`>NHKb(5Hnkl{>S0Y)F}8FEl$YOI$?M#okOz1?PZ7 z&YlI2BK&xhqXDrbx71>HV!;#FbB8sa85b|B{>0*^_TeqwoHKi7VgA)6shRKHmh}ib zd0g!FhA;C7+AHA#*@fYMdyM|FLDazi6l%7p{&j-M^vEXIdMJL*$T{dNRIS60m~q`t zTDrhuCcjLSeQMN{?n`FQ4+<-`_QCAri07&e9xBzQSkN2Q`EQ!+*;{G3;_rGaiTGF} zp-^rG*19<;JP9Al{Z8A(=X&=1<$V9n3N%P)Ys!3psM1UnlJW6#%yC|46Q9NT^Z6YM&$%OUjH7Askw-CIZ?8`*HoQ?+K0t0%nIb+?Lbsq>?MRSYgLYD3jELH$n0 zPU3?xv^i_4$+0EC3O|Gr_t0BLSDUXrTyX2L4?e&}QLX#rS|cYs+8wf`vgIj(`U&vt z#x38`!8$l*v;}1_t#N25wr=9jNtU4sa1dcd4K=PVL%KU&=IZ^~wo-eE;cL($c1ItZ zRMbdEw%ZnaM2%qv9&5!xpji$)8f5*r5O-krw$<{OQXOmb5dSR8%hCOzC>ctxw9;c_ zR!%!6|M+#nAQ9*Jj`8Zt)Lh(`{}?vJH*beHjs<*@a-zI8^XC1=iN|krB|prK<}E$S zxma>PBj_z}Y&^uptCbcS|899=y zH1dw|0t9|%M=!=T)SC?*cmEx!5c14ag`V1IW&Iru>@?YTEQ_eza%^e>Fy@h=E#tCc z`LC;A`kDaz@a+2id~J<$kr{ICt-j#ApCHnnKUe9S=B@8J$C$znf?EXD3}Ypma;Qdm zW0BQt4=KjPLp|0@%xu^Vvu(&d*kq1DXZ`bWv)-n!ffFQ_u$O?#ZM6C)LMxE6DWHUw zCjJQ7Y7J0k#eWgMnW0*`tL~Y5d8I#Dr-t2CX{9X%h>eEv<7x`$GVJo`Qelx}8;JJ? z{S|zQNSb+Ssmy0iQYm>;t&`}eQcKQ=H^y3sv|3Z= zLY?3;!Q=)y=hul#@J(T@TKoQ~Mr`QRGIq0x{<1QD4fM5d&2~%L?6b+YJl{|2!CBY3hG*Y$XS2L61)Eo-LJnwUK&f^voe;ZY zzf@Boeb^lv&%Rgb-1_UpwU?P4h0h}IU(Bhszh;GEEgZJG$!+a;zZ`}-j`jVC5OGgG zwmZx3lxm-olf$Bf((Uv&f0kE#J*{zdGU+Z18Q$VkXb3nUr1OqY>aXVh014(KdqM;b zKNmtPTw>dpN5X^s-MtRGy0ZGtgG!Wxn658WXJBteK3KWHQit%sy8G&^y%pc|E1rb9R#OnGnsolsY zb+ep`8X~ZJFbfI>JWU0cLhm9>qa@eHw!ae)Wrbb^{?4VRT?gL@9N;9L_>LCrabzLu znB?d1*=*Vt7uX4qqI+{{d^V6aTn4StaVWFug|~Y0lYcF*88Q&`#qNbAbw?r5o${s? zOx3ta>T()lI2u3!Xm3ZW(0IkY+R^Y{$GgFpmmBD#|vW3T^Ey6oHd} zS1NGvq&EX_rYmMYqQBC*%hs4HS;X-^$xG|{oMfK2?IfYbWASz*A3;QZmxj40NtXVr zIY^!!5lAo>Ra7kPgx4m%k`=|WC zu@ZwSTgHL=>wy4Kd?eLT+`aF7sd_(kF@0rx@6;oPOz$fmm)1KAPrb;m{twxMf0pf0 zX11~K1pze znIBncME=NBLZl+I$FN6}6V`hD6h2+|YryF?*A8F$@->b{w<90|0<5g$nB@o1*OGwc z0%0;4S9nEk`Sh#Hl7^H%D=lsj)gbm&T^()Fot6`rU4Pflj@P29y%!GwZvLb(`9?-4 zp5C@@*mmp}DEjNRm}x?GPWY-{McC4v->)oQ@z_s5(e|7BZKWJhpP1e~P63@fR7fRW zv45{mmE-VWe!&A)@Lr?s6MlgQ)VFI0ybagFE`nIWple5FhTcz5fQBTZc(|uW|Ja>= zq~9=ooN82dFXJlx-r*NY9{}kJb5qP?T(t1)d0-u)k1C2X)xqXmjolP7MSwCV%>&X% z&gpMLba$+;GVedc_lS}>L4ny(fJK>-wTBNCK|`)N9e;1F@U&8jQoXhGcKq|b=khf? zmK*AevUR1dXxQI}x8ahgTcp$|t#PKB8%TI$sdlkja04Q(h@}Ss-ON#Apg=y}tSG0B zO28Ki&9EaReJ2+i+%$B5GiccKd2Qeu^M%I|zpP6;`ThUiP%ZrFz@KU9`TbAo7%Kxa5Wh~XMiHx; z>nK1I#pfhf$z}hpu=WaEMjq*W9$LO3p>tBm1tRq7?m;>$Kx>nt1I7Iv0^)98LA9mj zwJ=GxQehSk{%OSoxat5yclJo=`;~GozNzXHT^Rw(j@=cx^sMQ~c!FK7_ue3P&PA(C zb-a;SF*Rr?Af=JR{JOI3+iCkj_Gy&Ktd*7l|1$#_{-!>JG{3tZAP}NB6Epx~SG|6{~j)@6q z+xo_tlK_aUD{}r=&Rig;56aoWhb%TQ*@_<}Aykn`;+z8%$WkL0o$4bwTZoK7P z>0c-247~2d^C|;dmdg=@;R|u=X*xkFkso1~ze%fNX>nEIKP5S=CGQnyww#Hss{WFQ zbN7R{wKe7XL;PY_|CiPg0i zQdYKYmhk_@&H^j-XXY{pCem4Y(}s&@!ID3EMW-nE7;kkcAk*460rW3&)+1V2?( z^9*gYWd5UN>=fRsSn6;oSc$f-E6$DeUO$uQajy7lrH`2cHhtDFo~?|d9GRzi&q9lw z)_cQmYB~41R;m#Ml{4CDUCj_fY?VEEj=q~b@__R;_a!0Z(+h($vM$l&@YZ6hm=4tB z@;g|fvmK=4*U`AKdw7&DJ_Vsi9w7T>v zbqT|JMe@g@DHcCj`S#o+!`7UfzEzleYYg;OY%l8UNYvyJHmDc%HH_+@gEC3UShh#= zHny0ot+#zMV_^O(qtyBR8BBi?Ye2Sf>(gw=7=`X~Wiqb3d+rhE{UxtE!q9&chVKis zM1TvmHp$9%Q*~X!DsDHth2aEt$UU+dgK>OqcAIlX}F5P+RFR z=T7d)ZG#X87d1iQz11<9lq3Dddr=Cf3Z;D<1$Sm~>22mIQUB{%s?qf&C_mQu8HX-D z%9(WeL&f(I3UYY^iIgw2RhWV*k6a>LUGWs!z82RC(z6}dj7%97%q*NV<>r5X%V}rG zQCeSyAgTKBbP+G=;%74%NBlPDlpU$rmK?ScLht*ah5-fUZk2J$IHC@q-~S{FH67_p zSYcG@rK=}~5Q(LS3C(szPx}sEFNslQbuE7c#+$d!6egJOyk<^oUflSJU+{BU8DHKD zxc_JqvXuDKtokv3VUJI?4>PuBi%f9jUt`a1ODd~Z;PFZgj#)e%&{H-K?VNvmF*Iu5 z%P0%0m_@h=-YMdN%&MZ~+Y7$05Aw&>O1B8dBuvnVCA>Rcp=TtKqTPIT*Jt-K(LqFI zg{XQF@YqOTQWXGfC)t@=1`h-ce@H_h@oRE0;-Bpa*+b=S*$_jLz(K=a2E^*e_c&W6 z$0V&77$!lbzV9#_i`_6BB2!F@cg4^y3}RLSVkrvB)U46Ng1ScI)xF>D-or#$QTq5SoRs`Mx84l&odM$ z7F5R!R98M?L(e!U(>!xXH~JR?5S5 zc+Vc1oPj-`{fZ_?fE$Y7#=0}CxtVrnpr_jkbySdfVIgJ35j<3SJAnOp+;h?lG9THyh!2G9ljyX%YG2nQ5wGcf)i1TdFVEHy&4P8=T%3%r3Vf zy@T$|9Mb~MH7DypuvOFmz_o!QEfNJv9u}JNyXYH>w}n|<^i&U#({YhXT9>l|6fKZf zgAe0v$m<*E0lcqN`qfw9TS;pszIEVdJ-v?^jv$Q~DthI!ZgJ`0grvafzl?LluPIIa z6n887)7I>Y@`uQOb60Ar{jhu3PU5yoLivy5E34MbZvclZ1_j*5Gb33Av%h&P-caC{ zHu0w9;8E&II2Cm>HNSvK=H8^0E#%k%avKmz+mYcuUzCT6(`=GRzaOEO>7>1h);*=g z8x`8tNa3p#w`2qo0BUeYg1^h-JX6U}nV$WqHJCF@+8(me-3u|xw z>T?_lfEi3?H~j&3t#U-!2y~dSslIXfDh@@qQ55}kqDxsWGdW|UB}m;y5b*Jg+cxn8 zTzf|>L*h}Ef5i4F1vQ-FpWLEzAb7^<3X%isHQ&z8+6HgG!RlO@?8!{2bHQuuGSgw< zC9$%y~ALFvK{_TmAQE_#Og_hihj6H)(-B+}SZjGSVUp+FFvCrJ%`c-sV87%MU zL`nQd92_uInYT3m=^I;Ionu^=Gw(kgUGd1WijPI-=yfR&Gm)r<5`)Qjfub5PS^K(F zz)yR-Z3)Pg3z4!(j+>R)GR>DS*X;+Ck~<;U?#o~M^tkU7>SRdlpH6|<6(PRgR3bEn zo*Q6M9VB2;$@8*W3=9kgg1Yk`dN~d+iVwj}2VpHl(Xr~wMCnMPLvZiFj0EfLZlR91 zp47qVud{>h1Fub0BRGnIOvUQa1^`$_yx{L(TXNYkY*VHDXeL1-dE=$la>~VxB;lra zGifWc=0>2JtV&Moft)(j{nEhSQ*X=K1|qP9x{Ix(X4Qp}%=Y!$Pv)$eB`cko15@7{ zQ+qDxW;(mK`?W}um(90}nPQ0^0j~eEz_;WF8n*gp;`^emE4W|%H1aI#&Dmp`>QTC( zkDUWQuNy>_2_9%ZXiB$nk#8ZIbSX`%icZ<>LtAZLg{byL0f)=z48uR5M#4?(&uCdjf|*4)`*Ne- zVtRNIEAFDq;lQh}J;kBocazL|ThYLkwiaivDG;d8ZmZw!Q7J_w~)T}Jia*Hc> znxo~yK_X!8QrxLWapXRy<<=Ad!9l4=fFn)Bl{Q?6BN)>jzOUq(!>qezu$0Cf-SVODL z@bE7jY;W~pul}~;Z!BxoZ}70go)IbCfUSy))K%V0Jn0;Nbm=4_KbozkT!=}S5^^BU z)jn$;xNW7o|Ec(&XU6dQyfWAI1ms3$hw69qAAc2z%iVq(RUe}pbxyg@*6}#0hV}%M z9;E2FBU9$A*BM%;kmJlPD5tCav-vQ+!Wvueo5W)$vke_G^ScQ_fFQL z8&}REk`I>n9}t@hc2C{SJnHVmpAVJ6k)WJMG4^8lSC$RAnCa2!`iM zx}U9WF2?U)%z%ejmVJ^xaraZDSIE9Tma*t4m67PFtA;{Dz_C74zv-VUn2sXCN|NDT znUH=WX+azza6e?~(qp(kykO}s4>w}JNjhY+#8W)$#J|G&jjB% z5dbznn|=LWVtTz_%Jj~`zSQR`pFV89WHzV}k6q_osEdeBD717VSAf4=*gZ3QGgg8k zFI>+ux^dIYC1JxmdJ&LlyN|}(5yM%;`420xI$hTT1lKtZKh%BXw1qQ2!fd7o7Hh=y zDdt_@^4ix{5J}DmJ2J^By#lf`BJf<;6H6?JZ6ZR{RqN?{diXr^-FV0}nX^(&W|0Lw z$6i)CYGlOg&one-pie8z*vODKOw-TaIY)vb@s75qCrmfC=#ehx7sGwV`h6DmX&T(v zeqt!Slnz_$-JEjb)VlwpFh#U|)%$yK|L39xk}nd&?(tAE>%~u_KAWEd2lx(dmW7+J z(Uo-n%angc;qJwT^{~{I(2SSWhYr{}TGCnvsgrhBhdXRCH}!5FDgQK)e}eSCy;NE& zzX!xdTg11ujatk`8e*pOKCtPNCRpSM&T)M|sH3)QmZkE*NNb68QmQz>j=lzj>dl%z ze!Tjz)#yDKbd^IIU7J~nbc=ojdO+RQt;=uI5CvPKJ~QXvDgS{M!?Ufarue}9%62#Zoa4?Z>18yJP$L(2yl$btD5QQ50*x9)HgEQHGF*(`mFEtd_Xv)-ey)EO zyW?e;%KM+O#*jO5z9$!}F$={` z|GFCc5U!6UUs2O0#@c4+wReQ4L8%C_XH$2m6DCs+Y;A>-xsYb3O5eZWWy#IZ`+zC% zSGDtr^3MN#Cw69+)X<&U=doC6X-+f=RnCEKH0ho5^V2#NO4-nRwwS1RyF&2KRZ2kC zcB8)II8>eH*B{%C)v~W~w(p>LkvXJ|9lRfJ@$tO;f?8XSU6Y7Ui$HYv*N#!K=ig!< zsV>p4Px6Fpfy;5r+QjhQ)-wHr95TzEL~onn(X60YW*)^lK%Pa7Im(pc9a#5 zR~w%0t3b86(X{gDVVMfsPsERo4Bd&CRG%DjCySX;jhtnUkV#yDq6)l_B@=N~qQ_JI z3>W;4C){fTej-`*I@s~A&Y33=NOTA!camqBmHbBQXlU&%$Z4zK+%b2t|Aj+lIj7Vw z7RIhv@g2*FwV_yRJyYWzv%TOZ?HWj7V))7wd3{I@l%5(ee=4tx1cRH_cZSDRfWl)T zI4wMDazA?!;zdF@YGrgR?q@&4$L`(;uu`wGYZcjsMc9tZ;jJwd0)rw&jZt|xrtZg8 zuxehA8Y8g)i?V*z!Am9<8d>AC)biUe}Z z=)|TXrP+X3JHehPVvkyeDjq2by2Qgpv&OxD0{_&;+t^Z8p>Cn8y=c0Sn;IWh>;cBE zTrE&j&W?&_glpS{2)Y)japfr&nE44iUS4>w@Ztv!N9#1_G=FF(2+2m))qd|sT%t}@ zCs@xYefQFVRm8ec{AOJ5@f4uN5wa$JerjQ1;f;b5;-I7TjAixr6~R=8_P$ZW&ts~yst~GaUh4h#=cIQd;MNR9vekqSMj9LA|Ih^Gi5|vrf635>TPvnh^pl=sCiyW0{ zGwTt>xmE7we-}?htJD%8kJ=<4)tn32<7|726jOZ@=pL-eeHU7zdm(L*O0DvgXFrk7 zF;RFvMgMZU(V_%YV$Oybfnuxd+dbL=Qr6lIDI4>L{k;bkyu=s_b#I*Q_d<2cQ9M+$ z!-GPZ#!`%a9_+xFsR?qL=K8Ese`~hpk64cBAbFKwp0gZ%xRqX8+J}2(ddqkAFW5$^B?#j$_ng|s0UnOk z;r1YcYv6lEt(7 zzofKRHY(JE5n`UJCu^l!NrrL^DQ~U_|L`-8?r}M!WapYZ7#q-k2SJ#+R6FX8 ziyYiN{(AncoB1qGRwoyHzYLS)tH$t&Z-{SPi*p`>0HZVAxwn?hD%|n`JuW7@zPp}3 zB!u}ZjE!3|B|4r}aY4jfc=)=$aH=!9t>UEcmsDP%DukIHG@5j)-2X&-+_){Q_<_5O zD&g%!cO&`g2;{4F*SNFXQBMP>e1$=C*zfY-c=ZvWt&d|To0&T58f7-=*8J?A(Zd_- zS*M;5U|3)FH&a@_(L?J!z9tP_Tdng!n;M{NMiS~jxZ=*5t5lRJ5CItCPRF0j>XK>> z)+7)f_~^;(FR9D~M@G%krN;hOBc7*g_#LkeiFdRT5w#uiC75V0UT6rFsBNZ>_6Io3W0E}hDHAK12#4}=ZnT=fWw^pBZ<*aQyG-BL_Ee{r1~ ziH0xow2PiO`FXEIxf(N=GVI>3Go9`YUdC(fk0k@vuQ%_!f_{7olhmIC%yY3Zz=Kx#&#VDN)iQs;jl{O zvAGld*88+iMH>E$Tx4%@#Y3x~dhV$)`%e4emmkIQ;3=tb7$X}numHgUAJ_3a$CPt! z4}k}@+#A`5{M!p$x|cWVkasD}q1`CJ>fUCAMh%^r8aA@<@&%v~{MTNtkA9h8LyzPc zX?i%f-%`qIdY~x{v0m;cJEq*iMP)DG&2X?pC43<2j2%*ey29>dR1ecHCPY{=U&*XBooI{qIWj z-u$OZWOO>sJvMaL{-kG*)-3k9aHQS|mR_M7N8L&o6MQfqF7N9Xto)Xvl9Sn@$fr?So%)B56@m-q42AL7cI=Ybzg7DD!<PZ77yw zVtHm??_e%TCpKo;bos9i8n2q@NObdR3~@-p1_%GoGvZXWZlT=~$ZEudfJzL(NX#c` zxCQ^eKfCC=%OsOpvpv0?I5_d=^_`;Eo5_jC5pYm_ehTLKd;d*g$U~s38`zE?S~WQL zO6Xu48JE%%MfMQ`_b)7KrOia*!VSd-ie6I7&eafITPxV-=@ek4Gm^i z$&&hcsRPW@y2-eydFS-=`U-p_x~D4Qo9hQ9*L)c&W;OnGD=4?B$Yc%B8|me|L0><& z5Tow7Ut{ox_g0)Ut&xvI{c}ZOHXSOJa3mwDP+_te{iebGTBoff!Bi+88u_z_VduT* ziTe5yFJ@nj{j3p=eg58>u*3He(5q1>P0()0NkdU^Kmvl3rs590ViBr&P`2-^D=i+B z{z1RBBRXK1R{gracj&$x@CV3|)Amdz=6{VqMl#%mV_+BLb!300lCtBu5mHJCr~89r z18_JE7^Vc%R93NpYNL-6Znm}61nXWr6`~wDM5!n z$(|8{W;!a8^CV~ee>~C&*5W4=mZSlK(1if|QmlS}}A@)-naaS_`?N!ooERg*?V;{`} z5@$5tr%jFxG%WUYrZig6WO(_TG}wD%W6{JSZtVQ-rM{8=S;M_+?);poZT@}KuCLPf z?$=C-bBvAyo42mC-NISYTqjWVhs`OU6@Is?pFUpDg0^8lD|N~LPujg>nkzy~>ZhYw zd?9a)1W#t<;*O5T2Y@e!Hyjcvbc{t4j+@v()op|SC7)#oyF1kzLU^y)0TlnD;gwJU zjvP?$AJv|mNF+(d3MVkBukm_!9-^CM!fDabKi5MSCORA=4C@BxWu}#L3{-D<5P(5_ zKH>|;@0p`ZnYs_Q*r6t?Qk>|~+VPJ1n4N{%Pd-#bQOGla&I`c`z3Tpi zuQY~G%w43f6}LKY-*|!mR@UEvp&2!}7$(_7MHK^V3THs?<{;uWqk*3pP(# zPhS!benRITEjp}B#C7`0x)b47=ij-xdhu>G8lsM2qms>roz0&|r zR5K>IL{b+PPzQGSk`jMFs6h*?N3@W!Iz-29*z$CJ6)d`wDw`4(&< zl&5|8!F+B(#+8%tGGT`rr7&ZhhKBU_6r*z|lF$yNnDJ_xnTDvIh~X`vQ*QqD6~pT) zF(qEjt^6;Q$*83%?e`u-XL9y7zzb$NEM+mZ&GE4n4>E?-P9-dle_l7#emG|8e#Eik zzG%f_?2lF#QpS0K=ed*e0r_`YtD_Y~x5Ptf7Ut#O%0HFVFpI$cb(M>gixVDZX2qnU z;Wn-)zoI9fE?-W0jGM2)q|byhr_G%P-1I$)+3*zYu{qTv71!5|_Z_)~N%QRpGch`^ z_i!)2+ppgJu|ws7t2DJVQ!!UEnhP(P{rGSO*^*qgo$Ii;Sfd>e)qpkzrj|VjdR;K4 zH%lUVS?9g(bNGRN?!k6#+`SsnC>2_hW^rXC$TGF<(Ot2?n*l!+5uD8q5o*rMTTw40 zc{@KSDzJG4gNLUA3x_!;zPWbJ?Y%L!ItadP4nph?UR&aRVu$-wB)J>4+acfm-M7zW zbf)Z9niEGjNqH?!MG=yotNr%jA&t+C%oi zz+rW)M=v)Ek7L&DO!XJ`WKmQPCwAI9j@LHN$JW07hycr|7eBwW%n5CW&IR41aEVrD zt!pm;k3v-zgK)j8QfFO~%8OdI8mx%=B{jHnfxA64lB&Pg;4kFZnNx}2Y;OS3Xo53I zq*?)z+Lyn{p7eX@6sF$K0w`uKXT2IpxbLHZD(XJ1t;^~iE^3&(RB~zG(AR%rgRswg zC!Hwc=u5)r<{f>>_SX4^I?F-aNFgDDA5I^dIu z`n@}pLm1c`a`Vn16R3)^xC6{z0+Nqv_=*(eZk3^8cX4l}NyE$S)>cB2bK`7|uUKKa zi~KSe1(&k_N7t9S3Ft(U0Y)2~`8L)ji;xu?;by2)Cf{4YVzP$E2-+VpYrU`d$K}gs>#F>Nj()7C)~eQ8boC z<33dHwWw%J_H`y#qfHK2LwnY8hfB)zPd%`47t|o&{1Gt6`OUiSgU_Z+3~cfrsvRJH zvNbrEndkE-)kLJ9$f-Fgqxf4cfZaff7>GY(c<^5|Fr^w}4ub&&sw>Y=-$D>Xk;w2Z@DgVbFfop09mqGGt) zNjv#1E7P=63>vzpi4Pcg&DRmhVUU%*?~=~fYRb5I4VRr#V!-fbj@!&NT2^bBMHZU3vF?;11XtG8Ey0?Ndi^sm2h37uuACxrpq5+~$BT!42 zXvUR2PSPJx7?X)pVO~3$WBV8ACnSm_xSSkMNM^jsI57`CzG5Cwd~C7m(sjKhrO4;} zTO$|De=PmHP~bm0{PrnRe|u1QL8XSFFXvARiy&|&w+wPCJN9`Dt3V3ou3nWDs#Zt4 z7M{)kjfEjr^@1>}4QF*512T-2j}emQFq5uMv)^()zZCdulvv>FYJ8e}I^|ls35`7R z@Q%hpl27?heN{DlAn)IobB^kClb`XGSCWLK=WecQDm05%)fn*(5r9_6$gYyU)nJ~X z+8V5WNC6xc+j?J`Mw!NOfG=sR@KW6W`6cywG+dJV5_|h;*@b#@j~P)bxgxd9K;z~w zsR!Che~*&;ncK%>o)7wFM;ZfX(D>crMz|&MP)Oh{iRCl6hi;1XOi)JlJj~o@vNBfU z^H;;ACasHb8lzJ+$YJt<%7$TBee?{N6p-o=}-BhLZkz1R1y6&dO~}rg9(SM+Dga zSwdf`U)ql>p{8^p>fzh%;Vr`UXsb&3VcF7VUpXhH6@6n1z!6z#vUE^?m<;VIp=AKp z#Ec&oPWKRU;9(XE+y5p#6gb#F;C?4Xm=4LU&=ELhk&*wiy8FQD_C=fID*2aGXUcgT zgBQKX>+Xi5QyWcnObrlvCUw~X$_3xx8D-AV{=Bi(ua$uXLH~Xll84*X_IOY`Ky(g`NK;#cC|+ zFSzO_szLOCIce9{H41HvByZ#^FSYxW!GM7puu2Zt=3BPsU{U7S&JJ>Xnx92QY93YG zmL;I00x*Q3%T0b|ay7rC&Qw|&ZWmc;ZqZu3cwx=PdOAkvZVQ5l5A4b<^jg%fnjLdT zGhAQ+MZ>-DMV6PyQU{B(t4x*@ZvwO$-5ZPJx_$4$G-}gfE0f}9mIL^Iq5UvB(%(Ek zAU_YFLF6RgsWxKsX_yOyn*tAm%yZikyNM`F@~+vE`k{T0k>AC|+KGBrKOS45Mi_s2`nmtX|Fnh-jO+dfK!tivL;OZ}Q;oE?cbx~>nzM(8rn)Ok z+@BH6Z@r>$i*GZXc5`_ov2#5k{Nmtg%dcV5rr+x0DjjRLAEBN-#-cxchV-C->i6JFPE5ja#_?`HYK_I43t_Bl-rLUI33m6nV9BR;o4~3M-e9vWf<*q zqk^vRZ{;kneGwYGPa_qLwokrk+otUsea4cu`ut033Me7{0cdPCwGYN13)sZUU>!B~ z-BRSTNwa!K6fu7yEw!$4)t^>0G=Kd~&b=B8s8c&BF{13HtRj!9=tY&R9~sF%)gHFqA&G-@jqc^|EO-zOO~Pq;ZM0={@uJuy+i@kG90<2KL} zR@)JUH*;SaQWp7f9Uek%Y`6XKq3n=iCvf7SnYi0@>wY2}YXcHI_EuM0_x~CHYzSsB z+^(UCJKA;LB5I~Dj8+2M!q|@Y_EyM$Tq013KLnKdkS{Hyp@BQ*o~Fj&RzHges4MFm z$~n~!>uWpXkS@pF@^>>Hpd7u;w4w*e}&XkqA!@iCxc3!?MCW`Q7Ue-D${v8>7M;Pbi**`LPlalkWA6 zmFp#k`qV3b=HCJLNLJ@ewK@Tg($Bg_J$qE%FovpL=1SZL9WZrn@7bx5zK`_f_24`o zVlRK`&@0JoBk=h?gtTlVQl1p-4A)JVP+8s?utgF4k_NsqUiUF2D3HLjW;F6BRn3d& zn$1jxG*ZWu3XNXn=rn%X2X&p)v%oDkMq!&XTXtzaV>u!Hf(hnaEh+*b?GEiq__Fd# zN`-kbVB2v>@0ptKHO7hCQ5oR13PljapUuJm+28YnAT}^9{~ZKjIDI6T{VYQ_r~0ls zmU|9glCu>=D(V36V2V?DQe`C4r~j&U3z<(3Lrwr#9kuyrvzkW*ruEO^r=L&wS)@6S zRv3X4W29dXxgqab{1xvymF88$GgJK~l^x)6{gI$ltAfN+t7bO7u&SpZ7QP_V(GrRKXygh zX6I>RB6XqD7T{x&(6l|U{Eln?`z1A0SIE!kbQ2qrxBe=N?Q*~~C9|!(#g47i!gX(Z z>k?PAO})7eDYp>MOYOHAh!@U^euVQ;@Z`rBJnLQPm}-I7HxL zsT&4`@O)1QMn=O4T3IkaRDJZWx9cIo8S$7fW29P>}m6@tow8u z-5`kg(1f})9h2rwqf|!d^kvQd&arY*YmbC;;D9iF9})z=O?8`sPgQFrFMTNpMi@gV z$`5z^ERqPiF289U49gMQx6!9+iO`?&19#5KW$3(pTP52yTsz%&cW<)EKCh)kp zWM=AU%pda1B!fOsJOzo_YFQoAob;?w!Ro0^(^R7B9NwnU+P+Xo=G@&EuQO%u&Zmd} zejvW>&)u1mjp*JSUFEFwPku$ubWdeIPm+|-9Y%$8b zTi;eCr=WKJwJT^#C%D-y11(N|KITlhesUp=N>!c zCg^PYN&88Ms4}&myUDHmVZ<82Ei|2Ttn{`!t3J>f?i98%;UU>CbXh%|mDE-KqK{Px z(eD$&`Uf`;=z}}xlTgi-n#-hj zmwW-q-+(7^#jGfU)vxFLyPDyjJQ*y%(UoTXLN7j(b#66A`C8E2Y$*T;=cZ}2hpx~p zTY^#z29rw7uel;#!I@0}vi-A0zH7JKougT~lm)0I1K_7Xc;&=T_>X#yTMyNsj{coa zmQOl-byBZ8dlb9NiEcgzZf2bYg_9sDq()hpK9DgC3JM`Xhj7G5hoVs0z z#RQeWdo&Z1<82yY7H}PPfzju=)S+RaddAN%#EkLCF&E~4!-oH6v7UvKIt$7`E?U;g zb9$aHg$@CRC2kjT09xIedXVMq0%O`A@({0gZ>dHbRAFtflLJaaPk~~~amX3(;Y>xE zJ_mc?7G6FKy5mz9I|3W9dQ2EEiEzs1SDw)%YQS+?XwN(-%r_Y?$?B=ga zpm7ix?*u$8F@@?V+FbE>BRD=^xQys%q?-Fv449a>scKWJd`2C*5dMGUqnP(>FNBM@E!sGo|K1mq|L76}piZe^D4gu|kVYeKN zPR)4#vAZF(>Ipv5^l_`SFRq?Hk%O(N4X$aitt-0Fy7@&?{3fZK4f$LYFnaK+jAIBu zK&>W&C+ux7#^!pK?|km73LMo0owgoS*|jf*hnx-LDgYdQE<iv|Kxc{kb z%3wMn9EwAgd0ACT>sADH^-ALq&d|S4MP78lgN$IIoznd=+Anf`Z=olpec`uqF@*8* zBnK=;%%@u*BBu0chi%k8U07EQVe0ZYlCW{C<1q3FdNt$p!WCB zgjI((z8F(XZ@D_r)9FT1O-5|6B71^9<+t!fjQ@}Qxn68Y^8;es0R0NQ1PASY$kK?bKylw4g zpB_)PKdd@RXpjnz2RA1XMBNvm4S^4X=Te%Td<}no4q{y3j zf`FL#GrRn=o*DQbmKdI&ikH!BMBtQ@VKm@aHfei&u2eZ&8J=B_OAvXN&%5~XNQ(AG zIXdy^NkcH^lAjjP)AS&oRDcUyZ(^=y!}qo7PMQgN62BKi&{eS3OO-S&3jE!Bhx8rCapp*6wIy z@j^(z>|L#9<@!V1!kWc2_0ab||Dw!GEc?Ls(zM`QYKqGG$&DQgpRKv~N#pOVHDBL} zD!~VxJnE1|)sM%LW~(8$J((*9mDEsaKyC$)&Z`Ae#LGD9$`UufrKQ0WV0 z?4?CoiZA!5)7GyLQf=>mVpmB;UPC$-57TP_Xbv>E2Sz1tC}(VCR7?k;4wf2D%&wj) zj4Jccn$Ti04GHm?Cr#;$NKm&|*C@IY2{Q~tYIaZu`rmui#@@<3IINwpDVjKlZ;!}? zPVDe!+@S9EUbwRf%Twga_=~sFj~}{D0+;oCAlo|QZ{~<^Iflu#(wyLGUY}lToMvnO zl7e7KzI!HYzMraLaO0Jm!7JIyjO3ORUFJ3h7qXZNS&2s(rw#J$pLHhIlA}sOwQBo6 z$t6W1Egg;0Y?;FenC6#+m89&QGME#hMccjRC%eduOi|g z-$McoQSPQ@l7+CRRlq5Y9z@W{vvyxm1$)MHH0xo&*+0?+q=sbUU6=F$!$KWI@pUy! z@^ij_kR=Dy5H$2pY)o|ik^H2Ro;G1k+&6H6JJURQMCNm!o+%8`cptJwk5+dRCmlz} z(uWB-sqHFaJ95j9DDD!Oy1lCH>YdMOPcTu5{w}%%_Yh4rWQ3Etio>m=j~cCg%(-mc z@L`^_NzHs1n$#70dff&gl4Ge1)&c!d;7#NSvapzOvf%^S+wx(9q^@jW^UnS~q=qhI zj{~=M9Sl*Pml?2{&d~h0_cBRKTTX_kikuElkQN+zwK^QoGK8*AP}9pZERsB9S^4f~ z8~A04AC&2nmODQ2>~eeSO(hJrfreB63UOV^^ty5ePb+tn_P@Su|dV5oxM$N89g!VRbjP@L-Cc;-X246D?5Uof}r@lYM!4P zBYx_7)D#nY<0XjG_Vk;7r;o4p000B>>mKd5$-<`YiNUKD88L#iMU(cHlg-3NDhs_T zcUK2&8vBag*NH3m7DaQfS$C~I{P|{adF(7G_h`;p;xaeYhS}A_8ST~^!OT11{_vn_ z&m5il&-%mKQbSsx*5dzD8Kvh)GQ09JT5x4)tqfPve8i)&#t~(6^H{3B_^u{L$TRd; z_vmV{_B~|y?^=y^pEfZ4CHxn3^YakFeGHax?^foIF#5&3x3Vt|JLwGSLQUi<{va zBK9OD9WB@{*1#NK304MwNlDzP=@(j%?YoGDqL&H{4$bLaKkr?o-s`UCah#vM-O$&~ z)h}WDe8JiYJaepc-&fCU#d&a0hB)ob2*T?1yo%dZSWwAQJbdk)hZCn}2+i~W%s~zN zpt+?dX5DUq-1up&D_$)N_%3N3oZ8eG8=h9;u_WT-m#x~t`9nSK(wRn?d5zPCAs&fUfsQ>n9hN%t`-cNJhuhp zb%8w)d> zwyJBP=J@&DGnEJ`&5y^Os%H7q5^Y!8hnrsHg1}{w;bi;bUsAzvtxxkS!9V5*m2a!W z<05}s@u_01+kIDb+=c>ovS>Ic~6_-8X zI6IavePurIYE_P@6J)D698ol~_bk*^G()gC^ zjRNEEapN;6dRm8R=d8R9x2o0XQ58L zZvcDAc_>L#I&HQHDq37Cn4FzuTq@aS%OmHn3)90cHe>P=5girBkdwL}&&=m)sB8bZ zC*Hn?v(4A=RD5P5RE=%+n%V-~ofhN!I~;h(3AitLz=Kuha&y&xQtN_aiD&T#RH zwmW5*Wh~L!8+TB`ldF}R*mr3&(XM4`kD8VR!a+ARk7%+d)RTn0 zYIg*duzd)(;q8gByGCsEa=5L>^L_F~s75e;jQckW6Rpw-=1popFv&cAZMm7B;RYy- zJu93->qgBmY^Bnd;`MNt#zf@|E7o}hqy}4rnlw+2$5L1WLhZT}9Yb(w>|oj94SL#H zbo1rMQhgTs^fClXIR3aOIP*we+;!zVUEDD|i9k-mL z8-GbMjaC!1L0z^_-f9Tookxz~d(-z#O!@I@8sPAHC88r<0^Etq3f|$5AO-EiH9SA> zcPXw!xh=HUmJv(*5~F1itjDiz?a;0`iA>FVD~+YP3_ko%HEP{EVFn!h&eLNKl{&~Y zpjg;*yml+B_XE#~hU8BBU;|$d}_-}P4(9LI$lAOLHhO7exX}fRDo!&_yKIT5UK!=v+xVc=n`?JLmS&^0v~ zX^KT)a%JHkCr{+XR8~z1B7^hicAW!e1O6MB7sq~91Su)SvUwhh8YUauQfXsS|NIGd zZ|l30RnJ+i{4uq3_J@7X3u3$7|2XtF@=49gGjwDZHSW2E$6{1!$i`u9mEYy|F8}3y z>ym0VASs@zidUmzEXzAm!(3zkv2==rQ<9YdL4?8d`n*&Y(->!$@*>uY7$I*mn3N9>WtmL`k7{K|DvL=P@myBZ<4Md zkw$-t@;tdc%+!NNnO2jy@CLX6>bcaY?@0*5;XtTA6S<|F4&SzdO{WyHUW|T=KH9 zKo`3P8ou01^$f3o9jIGs9J3qIR+$e@O+ua@*RtcYd47#R)P%6X?319-1#0KXC$C*z zBwb(ZD!}=Fe)z$rLK=Lqlyo)$G)?5@aKi5zmX0Bqd6YL#!`#xmQ(EYTHE*_C&Lcd?KiYqA;HH^oRg-7$o( z3bC;>SV5iLR?e4k<%T~g?DRG&o(OwzOyDGNMl8EI1puwL=Y$)&bipnxNXwKW+Odc( zp}nP4`n;-FYlujJdja^wzebrdTxpB?rcm- z5`^@kC2)58sIxrQVCwX3HH1fNXIw|Unc8y@=ZXn6OmzmSurJ6jusbh5sH zZR%&!w_A67KXk&Fapd7X=KQtz0Ot93(rSp42KqEM{Uo8W2whGt{`CFcp_`@1L5eOq zB>Kra4_IZqp<0NsLeFo%i4XZ~X3Jols=OVi+LfMsY4m@5<;KdniT%^RKmTw)XGg;> zl>K!6L`-V?xr-%`&H%L(YW?VPd%ie;9ZLR=uLSQD zQz^Piz0*Srv^z+xDCDHmV*&l1(ju+MQiOnM|iP&5oxQjD=Xz6G|H5Da%qU#!} z8U;&#Sb=weTYt5K$wvfKDI&*mr^`u35IC4Y;pJPM^fbrFI=pf<0(boQiUSReH*Dz^+01pUi~= zCmH};___SJAnyCg9f+;#@!y#gZOeCYzok&_!*S1un zPJ2WHun4_-XEGxa6I19GH?wY2?jU9&dxfa#!h(P@vEk*56*NxW*cwc|?_6(S1G}|B zq!$~hn3>}y8yLuM5#Ojeb?5&TeVavJa@T@PxBazXJ@(05T9@gBIT+6_Gt-e>lYd-Dga|C4KmiFQ8lfFN7GuMN80 z^Lm|3GBxZH_^cd_G?I4Q-8H~@(N%>B7AONQ8&lfD#=(UF=_4vFB-yKxorA*A4xpsQ ze}7lR>t)=Y0#M}+ zAF#=R#x00TemZ{B=H_@+xE%5GFW$$zQEXgXpF;&QG&||JPT4A|r}l)=fai}O>1o^n z(4YwMm-|GaadX+>!#dg(toQ;9cmpvPtBRI!ZpSx!ARl^kF9h;qa+vgkG~blv&-mX9(&d#7XHlWsfJ ziG}1kiE6ft;Kx;7`J0n-qOK0mu?XW{1P^8rs9(nAB%TKG*ASo?+RylORJC&z8lxgD zTIM}R_MiRtvVJiY7pjrCl`gWQU3`Cd{_mRp!W=6;RALi{OMJ$&rYi=MU}z9YdeMh{rfeZ7u? z=+Da+-L#c%?;S89w^t>Gat9m43K>TR%q4+%%NVDp=Oc8VPg?biRHIx$!mFsKqsX?l zvPUh?_l^+$;y&+*#**QW|Bs^cj7xHV<8bFxr)A5P+fs3loaJt~07szUz)^0TxpSXv zm|IaWcMcpNnud^?dMZu9l>;r2EEP>W*l}hFbGofTG37nC*~o}%qrH|WXFMI!N5*< zy{|D*Xn!f+YWtp6ms84j&Q}JqS^E)cvQ`xZZej~OVOQ>)ku_;ZMLT`_rJ|O9V3tN3 zjxG~>(P<+nzlKN1$P$^Jl#-4qzvbJ^IO7!QV7XFoMvK$w7q?@B;3I?@%k_e-ii7o8 zi}i<~)C;a&sSM71Qr!wD+P6Slj}yw)wLaBv{R(EYOTdu=5B2JYoV7L0J?DAz)vQdC z1BIuk7zczK|%RHx_ zRoxbN{=_4%;t|zwcty7TWv98rL#K~1N5ta=$yc1DPlTQ(cxkHik9oX3D)G!$JQ!Fp z=V>xa&B>;8$A;U3a|-z`swQ~J?|NLGs`p-TWvZy*i1T0RbNOorLN_2)H908SW&-y{ z7Q;0>R7}}gGrbsnRo{=0-MooTh_`B-w|zEF$P>PSKmvv?{O*yJ&1Zv;PfaxC;M%bP zOgEw4 zB&Gl7G>!##X6Y%1z8;9H*-0z4s4(pQied!{IEpQ9A;r?2n>0QSctYFdrt?WGe0!ZdQ9#>0jvp z4JCX{M-p#X~wBILC#Z}_b;F7qKg`CcFgqUYp0)lhf;_#TW zi|}v8y7?k;(Rxi-r&m3Yjh~xU>t$Y5(=&Qz;7>_rQ~~~zn%jQSxrBj($)^obGXI_Q zKgSC>6sa0|(Xc2s{PHdxWaf+((pge~QO78Ny8~Inl-5|=HPYEB_SQX@EBfV({Bly4 z>|_B}PG?cK;A^m=Gh3M$8k*Wz@SqC+Yg{rxC_&~%|8!nz^;lY#GX z7cKj&eP&e;v^r++xK`6mJ-^cpQ4S^jOnl$SiuJ2xt@>hzwL7FA5Q@=^s6B(YUbb|h zL~c_zxqEJ1?!f5g9P&#sUdB*<7v-X<+h~6y*93Td-)ZjWMRblo9_4L2zWy(74k_Mc z$_b#+<@YVZoS1=uK~R^kSwqsD|GHziI%A0+<%qJh7rgUEvyjmR6#FR2U`DvdW7Oa* zx8YD26jdN-6#Qom93?O>nEvLFK6gq^=tM6aI26D60@Ftp(Xwpd@=n?%hL@MA)aohp zNaIrH9pbDdn%9iS5990a!~q%}m*-WmtLJAA`p$^g(D?9;xaibGfn_srKMDEScy$~= zW*EJ%A9dorIfxw31UA$3%WNrs$$NP63c*PuP)Jue`z_VXB`XmSjDoRaVYdH}V|*LN zy^LAxa>G9^YT$g!AkL6;b}W9Ir4dlY;tl*M#6||g%B2O6lRHX4ilm?V%ietNLi2~| zU%cEM0f7rqFuW>_u+@fK)eMQ`!fTl&JP|7@jzJS|Xpn>YZ|(dL{#f8f$p<{%aNIgI zd-2mx##b^d{0@*W$11-5!K@Xw+yW6yfTurF!EwO@WTjxkFw-D zX7|lxy}!+RG!j7K(c6wPLU$A8Nvd{*4iPIb&&J5~x{9*vk07uWCCU`~D3|Qn7ffi& z$(=S0?*Im%L$@1el`)`1)s)Y57kuO{kL5Uk{IzL}kA*raP9%`tqwx~m`!}&s5_9;`#Ro3FXEP$BV#!}+i&QO$x{#D0o$(_}W$gFV z6vb^c!Rd8o5Lpn{@_)Ef5>PhSVq<~KPOf0G>TrtUfa+MC@UNLuA2HEYFglU1$ug*+ zgPNjO7y-q3*f8jsDtP<);gxN_b;nH!!&pUKs=6~F;kl}IwSId4+x-Z~lK3l&Y=Ees z=of3@6+ggrsFr@Ab?;mUsYeY!f_2$#-kvvU zd;3ckVM0Y9r%LQMM);Hr;>aDYXH?iKGelf}AE?1xrx=nP)s8 zxV%p42NK8hLQm9o&Q*V=KYHzc_p(V)3ow-SI7!cx6R?D#mR3ib(?pI>Tg8G|c-7Q- zuUm3v2;s(vuavBvo7t=Hvo|5|b5VROPQ!C=v!82&l*bSXX|{${QvzO7P{SHJs(QHQ8$x?jpMTbI0R@r%7@fmAl1+=Duz~EkV? z!jDrw9u4r091$<8OtI&skbp|*yl@N{2ScpL0lU4efXOpVb-jC9(sBg8AHV9cALMc_jYJsB?fIirkX|ku| z^kr?_$}iE1`QENr>{B0cI%Go=Vs=p;?>cXEYGGhw{Zz1{Ykb=J1$6~Gv*@dHtlcK3 znHrN+saFfayc;}%kjcA1$XvC^7PPks4{J@Nr$#4W^Gi~Po&slPkr1Oz9=mv zHD6Dl%WRb)3!~YY^b(6>mp5oDrpTFKqV+VU@9Eu>H5@beRCd#08{n8Uv1q90+q|Ps z^^Z5R3jGG9F%%OAmxZF)ynT1OsAFX3$biVSE@T>b*2083q^3GRpk3TX` z7ssX<5?%PT7M4a6fe4;P?51>fl@9YQv)#sxF8O-qQrJ6d;RbbKlHVk@i)e0a8{Rli z9WE7GKuTv%k{gD(3~gyUq`5E7`SeCsT5KvqD`4-9q;VsVAlA=9Lx_?2J;s)GoBV9B zk_((bVX4#vM(E8Cqt9BRXR%5O%29&Wzx)~_YjW(0M@MS$-5J2X%xfZj&!4oZ>i=_h z(f_%D#%+wpHQparze?Hw&W{06J6D6R(V2=*UJ8M_kx0~~l{)RUkvaMspuDeI9@i*^ z-ZjCd7}0<^Q+q@KfS}%J%$nu7F#szb=4(1=1XDPBErzi?B=sRlp}aW_hg9$c1F7UU z9pR{Bu4(pN)-5kLF}x| z4<^wANQVl=_3FO4_Oa9gYxTpNIvvRQOij}e48Q#QSeYDLg)t38bo|+*f7ZqimDZ&` zT^7htNwb$Hs*T4+9V;XJ>aG#b)!b9o^r5t1V!!RGxwWjaqldW4t6rvG@^bmS3Bc_;zcNbR4wsB&{8JOe@Hea>hfkMd;+{HV}u<)pq(dJd$x`xFMc^#-pO*y z|3R!uN@-qR(dIS)5GuR9bT&JB`c1JyXxolv6(78Up!kYomQ-?34-r)QiGjm+>zyig^ zQkVC+VpV-)nUZJ>WoxP5C7d|0_>+AjD7U5SpUL0H#6hrdIe^_^4!0}0lR1B2pP3Jd zK4GvR?-P1tW@%9#*jdt!EHWk7T9t|-EQ%r<#Z_|!CA?1YM2gH%u=)vHk?Xc$NO~KW zJ5urZ;EpT5Aym{kca{u>(-(y`JVR)u&2$E4Dk{Eh?An11=$Xd@3bW--K~aGmvMm&I z=1m=QJ~l%0XUc-FjC&6>a-u^|m{VLlCd;?`T0Tif8zjkiMO*t8FQ^gkLVVjPKIK%R zGF)xy#^-JcQ&Qt1-4bu z6pJ!V{{c8$N-gMS7txQj5Lk~Mx49;Z3PC(KbDYaWw54!~hMmjN{BBq|vTQbS~S$b@f^YMEBOcyL?u`wQv=rvc8Lf5^cpg!hUpt|O5aDkk7 zoeUD&QWZ1y>@^MWUG(q|EX1x>PDMS!zpCi%4k=svo?W<=e>infIinrlA_)N~Zk|(> zZ(sM1TGVb8!TMX{jYz@)#l7F#V*_V#7(DuOvxlnWOA-rD@2a&mv+TlMob#Jv2I6Vq za#rE_Y>SKzJpd}26#a74A}Yc|DKJVLgl^4VcIv%q`=rf~{$Wb~n{J=~v*n=TB7dLh z_$k@ueCyi>lpBEDXbHg1sb${Em5^cp!uCM*xCe2ybM1KeJ5r6TsY8#nWWT8!90|Qu z30r$~8f-W7&kD1mrP7A3>3-pHhEwlD@S~pu8x{9|;9oomh7+U9cctTSdj+xY9LCNp zWGZ2FEm4TBO~533dXM(C%x^lrEqSbQ@#pVjawUQTDM1WlyTzrzx0wn`ClaljJRdH| zgcnkCCQ`J|KPFftv)sh3({W0-!nvN2d1RY@N4c{~cFd{R2)*J$H}KlmR-_SczW()> zg1aAFs#NmZsW@ zm^#|!@EgS9Ftt*xi7G`G6hO#AhPDoKzD4az44$@YbX8pUHe%W>YC189rDL)V%-S!y zirgFwY)V4`g$j=IzD9s%x;xhFHYXT3{g@S2wudTB|J~npva7;U#dRDE9>Fi&DXVdV zG$i?+smOW^%M}490A-gnWw-a&&}UXwC{&C5T+vne)ZXr`yl~%cnGa#&Bj%i@6{iux zUlOiw={&qIms^(p|6#4$m)?}!P!ho3j`XO{&Kvy|Dw<)f3-qKZ|Hu+6Nq#Afvce-^TpRdO2!q_ zJc7q~hKxQXN{wn+5G}oMULZpRq}a@gBzakhcz#k9)KaTc%+ZPprmmyk`NfnNTnrED zE>Z8elanCNsHCFTKD+dl>2$lCUd5Pfqxv1(q~p3SM0YKC=00@6wE$B$OmW|*OQwvR z%3fLynd1YO@X5&4&$v&o4PFkwYRhxjI^o?z%i!c=Tq)~Rk49QlM%eE z`#nV}7TmnXMBoQT{wcmulna3 zH=E2ddAog~-bMIFUH`fhVBTk>BY-)Qb)iV!C-Hott1zq(OQnW5p$olEo-YsK1>Al7 zHY<#C7VD~>H-RM!zD*vA@?Hz3sV7H@+PoGNM+4J3A z<#q&cY{!(1vdxG;WVdzu=8VGdy{r?&<6?DNO!x2gx`3zIJd;AMmnUI+nyz5@V6@Mv>i5m0IC^5uDEK&;2De^JLO zYVJhS3>?C)Urp5T92PWD@G%28Qbjm>gO}{+2K4wnT|Utzd6rwd)le7GleXZ_KS0Tl zn2MZ4;k65OLT8Jo<>$>hGKri1@g;Gb(7ckk5AK)~?+DAm!I4Fq!X+{u?9IzQ;nMd4 zyn+|s|7Ha1232!ncM%GnD*lrLP zjtjiG+Jn*c!}$2QvG5c2h2u41Na%Ejet2szE+4QoL3@O1Fkrs|M8nvM zVL!{o+Aoh#_ElJJDD-7|NI!X6ZZvYQ9a&dCx4Ef5Sh`7w$Mwz`dG8Pk# zky9&%{s|U%|3K&V>NN5fu+CxEuUz*4*kvIa(3U)Ko>4{C+Op6EWWZ%I|6jFVu3uog zsm|30rAT;O&HIuU>t_{Fe`9@hzy>Mfy_pOr-?)A7MX=t>p~7}`;>=I5myv=1N?fO~ zY_Mjook>Mi;Mesjd7WGl`7gVeV>G2u?&_E?2hH9=U7h3Wri=I;gBEA*Ruts92AEyDf?QHp8fd z&xrq_k%R`o01B~?XQTe^km< zMHcoO`Gv6*ozJeb@O1$jImRytMN%ETcNH_{hL1OQS@%Y!Q|}BKn4SrVDbkPnin7W< zm#_?}xXj+EkWHn|$k*XLf_k#$Y{F{P@+lcZt_Wv{@cqd*lu+Sd_Qqqc1A^5Kz9Cl)TIn* zVOQH!A&9AX}hTlq%*=R;D0ykHeZ zdWG_fS)NpUm*;}xJRS>%wl9sAdP4>WEN?#hC)JQZCsH8;^kmKAgqxc<`ExBX)nVHX z*cQJClRC!(>%)v@@&Ed`y!R9>+L}?1HFfR=I8; z{|gQ-&}KE40Sa7&xgY<4X5CH3SH=c_DaRFFODi@07OOwI)OHlfxEU1@Z>#ESk_+LV zjiAu-TL%m3`ANz%W59^F?%H&;jCp{I{?<%HOUCD~#X}a57sznRcnPpAbCVm;a184Q z*@v77+>c0iy!&|VK2;9h{RMIbpqHT&@(Qd2cG9j{SX%6?9JbZdh{o5z$r-1AC}lGY ztC-L#&fNvaGgAg=4~*(miPo6L9ZQ9M|CS)<%akJ1ei!*yR~_(ywc=V7&v|!s>vt{G zEZp-w`UlGwO|z*tuV!{kcnMSm!n_6%_|z2ph@{Vs)xuRNn|_GVFYPc_J%nxMwYvpR ze5)IFyklYe?VTj>8>Qf!q&VAr{*%xJ1pHjzvUf)ZmiFvSx>TTDip#{rne1rH!wiZ% zH}+lln%}*3%lgg}LTodcNOEEkA`k7-E6rZ+%T9nEJr4;Y7g}futL0sYsz3Sr<0t!Ywo`>e};n&zQy_=f1 zieH=C!g(sGk!8KZyZj!x{m(1zUFQ7}w04F{>}|Kw)LaW#8gU3pEsETViPv+&dASW- z`{*cI0npTqe*r(|(DRK~ugF|XaqbY%Z1`k@-bU+ztL=7nc+TD69u*`BsrXR@_o%h6E`wPMAZaUS8Uzx~&Gw#2? z@aHmCn~i^4NoMvt))$7820p$(374kVsZdra%!Cq=(J^{wn%K?j1);s5JN7bFvrUbD zsFu_hQ4V-?HUqam(BO8>;#L7xORA! z%5KY$Jmy$Hz$zkFH<<9livKTZfqHv1DshY`f!onlU;RVb<1c4G?K7RSLaA|7%cL)a z)@e|ngX_Lr_nd}bp(A;#ZdRTT5GKTR**?LoMyn1zhFpPaGf*(9fyh7e-w|)`Y+%Mu z(Ty`heWZkBU*PEQ<^V&ZnMiU0&FJY{{ikRT62(f9xtnO6*IM3cD4ic=iDkXN?XgEr z?4!?GI3x*=U+!~Xd4QS zIjZt5`mYC8;Gqiqv!?7*1-W~%@k+g>HnpXtQ_;N!9|i_2CeE;yU{}0X7)$%@nXPSM z1eI{0O<=^14iOGbOs@Cfll`C=r4}67+MI#RD{!cr|6>eBXF$IDx0^Yg?a_Cn?V{lX z(?FxDcbVxFv|B+^dFk7U>?_u_UQat}mo@HdT%+}xFPN-0isprTp1IiXIn#Lcv28FL zd=eyGr5Ve z;8bY@Qg<5IOB44d*QOtxEx(2$a9Ne(emO(zweM_Y=K=6)wPI}(Vh{Nw_L_|M6;qLp zc4T_KrNgHVQ&8aCe0cR3j~QS;&dN;UJip`cukc=dY`9?UzmIT#Gx%}igbe#uUs>F{ zuHzy?#c!JZDJnfmfdu_1zNNJ>Pc*FLz3H(xc9Cy?*m?y^N`({TgnRnfpRop~q<%8f z8rvf%T)=&hv8CVlpA`LxY&3r=9DHy-7Mr*>X5q*-71UAi7l4M`gT6tmnJk~5zc(MU zn>Kvpr2I^9`KZkLHM6+zV`|Jn6P*8Z1Cu{LhzumXCCwrjNv2SK5t2PNqTojMFVKRt zaPd%iia_`U);Bp8-Z*<>ng*S4c{Cqss{Mxvc8(hKt7hqQ)dtqH;B%lxpVh^`NgXrD z$MZUGK)FGxI>8osOP3+-en1Umu@RHa?zZ6#sp9P9B~Fcw=1eY|RV}C|$W%H>0vj0f z;qt}|K$07M&MQdU`&;QR>tgIEbe+ewTC76>mPg1198W0Z8!b2GG#+^I&6FfaE=s62 ze!{f~-R$(sjri!l{sIRnSuhbUWj3)a00{OBC|M~)v zcR53I$~NUU)mz$wC{;EPPfdt$UFKp>Q9LS~U}IU6RVZ$`Y67fT(4baHM3k?6U+`aH z8lh&DnRN+P`-%2T6Z6cwFC)HN+MHN`Lqy}^XMmd_->CV=(x^|;%CVqTzxG&+5*7My zb#;bG9vOmHl^{`N0xqLxsR$y{KE?p?;Wcjv}-XK@Jo;^@Xc8f%^8`a`6jq};AE9k z45|7p&iR%SN=kWrljnrfn;n&+X-uvgNNfU`m$0Qx0aM%6$O&2js5S`wN~&?1z6^ z0+vD2vZevFt1j2E-@c(N!-wju3AHa1+4dWMyK4H(W~lD%{7!??h(Dt*GK3r zWnYNWPM6L|R=#BFnW#PL#%6B&2|$E?CTHav?2Hy}Z4yW;@?2Lvvzv*zIoLPa3wDtyd_ z?BYHa)(P0HnFXXN_`REd|NVpPS%fkg-^QAOjkwk^#2ju!a{6otQ#p6kXFuiWU=w$c ztboFMyFpDHkl4Lk&zz}5MvblF^uK*g`S5e#?p;#Lo;9Vm<>1LW`Y6T}6#-cpy&7uJ zRh)7AcxJR!fQ5NfTic*06hxoTzZf2`6j@DlI&>Pm@%8k>u1jsa^u7pGup6_|tiZwj zyES{MpZ|p@e~w|NIn=IdU(}g!LBOrR>nFlVt`dzZS-(D^jE>2E;)nOzBM&f+kC(A0 zk?^GHG{xRA60IY3;Lo4*5WVNAKR_nre@B~ddZt~wsT?`=)M*AV$~N1JRZC4}m{pu{ z>H&Go`{NsJ2WZm`ut#~J)|5a6#YyPAgrV-xgTCFT2BuY--N^Y&4)8#3Noq}_hcqgw zpE<2qwBI>{Y+M`oeau`_H~M=IKf!a0eBP$Eab zLGmfSyDFVYE}1%4dFe$Zs(`hSM4fISK;YJ((~$JPxA~*|6KsrL4e-j0k8kC_CAF1= z!;I`@9PUGRkqSao`;Eqx@uRfz&}%(IDZ*ztBqrss%`d0ILEU|AqZ@zqB09czWpudC zFV!;+#ee$!B`hD)iBo!8+w5ZZ?_LmMVaa8+%YMgY}SiR1L>5nocc^xcJX^{Mq-t3^LN)6zgSnj<=)t?XqIWy*}q>!x{RY zO29X9cBVNb0~j=9V+ClHx@jr8lquv2D*rx~m!=4Q+T@DqvCiwvi?=Az$;hrYwELcv zeJ&`L8bhRREiE)JFVcN}KKd^u6%8sAiJJXd=W2|^)NYPVey)M;*m2^DU*;5#$Yk`` zujobB-KjGEeaytuBjp!Q(!?vzwmq`{_}H?EC;M%yzm&qfN#)y#4wsq1_P0x6+MIXm zYq7i^cY>T9C+y%B6>C;OZtyLVPnuRus{i;WyPK?QeC*XqE0#y#49wKNpK59qm-3^3 z`EGgtEKHy{By+Uf{J}PF7B2q)HmUa2C+mXd!513v&ACw1K5V6@?Xh_~ly*XN4gO38 zr;XTaX#;sb?PY(dK*^u=MdG)Z%$1469i?&Y6eHQ&NEaph(CNTA(g4nH*kgffc3TcF zP8suDACsL6ai60Hs-KZX2X?fF`&bfcREtlk-}Vs3%7RgaF75UV*2Xea+pYPdW3pz4n&8h=zL~pPz&3VjH!YD_lTBxzvicEcpAlWPgNJN-F}fmlZr7{#EXE zbn&E+iF<}T^%_DCx@o^htPjQ@G<@F6n#{?7ltsZ8nDNjcfK@lb-RRg!EctzmhJb!f zJqeY^W>tTip`vULQW}?d>c5E2N!ro#apV4+#+oHy2%1CY^72Ls$3da;v@1$^YwsEI!_N<&I4f{j!%8!#RR4y;BH~X2?ItP@uv_o3O}vw?@ST~ZySARf-hH8Hj3Ncw1 ze#I9#onOl-e9&Njd`{};El*rYE!VZtIz<3FV1)GI^vJ+^lrA3wTM+Fd0r(J3Z0zg> z``^dHha3qmG67kkN|AGc{isIo@x8i%x4G7p@V9>`Jv5vNW{^fbcWcx_69U}W0*IXV zGVl#zke=sM;+QV}6n{hvhw(*v|+w|e!C7kpH`;Hvdn z!)`>Yjv;t0kQ$X0RG{b2=5Yj=gEDJ59?+w}c$eA<;P?WA3U>E2!AFo1U>lj?ybjweN=A%39L1onJ8$yC)x}QQ@YT(cpq%O~)&7 zK8HFwst;bV>v5I+1PbgVedBtYC1Qm(#1LMeFC3gkAZ=MOo z(=gEEqXxhqcsoUJF(N%MM!L@zG;#j4exW2)4+j{3eGldgnoi7O*dlg?m2TVZw>7#$ zZ@JhiTde4ZYw9|FEID;60tDNKMx^kOE1ei>5N7-B^isdj*Wa{SZY1Nx&TBNKrC@XT^U-rnO??Yuj(XM%W02MHVTyDC4dwWHT|Hh<) zi@?H5b>YN$pEQ|hp@z$DP}ZT|d%K48&F1P-WsT$t0#^)Y(A>}U4D^HbG$|=d2smd|uOwE9;3y>0^AI91 zEAv$Xz{0+PSShWk|2}pCr{R{6e^fVF3y(Mn*Y;_L=2Qwb-0+gA0wM%oN2;Hx7D1St zQAI_X^B+xIF<1npdxTXdM2>X5SS2t!Z*FeMHds zPs92J(-CcqiOxjZrq>SyUv8m%sFXI>uz#<`O4<$)mQG8$eajq80NT}V+UH?lP4;&< z4(|Wc+^nmDaXxeL(VTqSm(N&0hwfuXu8|ZFHOAph5vZV<12RJeTlM9$K|=i+(qE)O zH0lw7GrjldC)z-)?}egrt`su%N~nIKb=k01MfbW)n8cind~kw!P?nQwv$eslOr}?> zrE;QDKX>bAo!32|>-|~0Di(4Uzg8PpCIs@zlA!Y=XCb&du>g%!6`I#38xX3g;hBMO zzVMA3lvtiq{^JxcxZn0=M!@8{xK>N9`JX<|3yg~qAdEU0DRroHezNZqF}p7eLFo|V~~vv=<|Hv*T|QL@<`{wpw31DF&f z)&EhWBzQ#kuU+S9szyR5BBopE41&JGZC0{cwi<79mbf_$!dcWHumYfbZ2VMofpr@~ zqUV#teICCJqWjsY@S_U5pFoQGdZCFB?~lvlyzW8%#9+~~_pH-4{hME#U>L!LS?kml29xqU5 zynAiK8=GD(vgBoWxi??EpOK{qmP!i@_`iq5=8a+pjJ^PC4iv+QGh@kCUgu+l)c&il zh$&sRx3}++341e^V#;fTrA&;IOWeC`|8?%gbTjpPEhkj>?=lX>r$%`UV z{D`Rx?lgIq4{|V^T)9g_UxOL-JF5Od;-7hkU%b8(dp#YIvWv+`Wp*JmF+mu2Bb$Np zzmEbiNSOc#FgEfxVIjj#Xor(PD(zjolPpjb6Mbp`qiv22mT@Q}@BlpRiqY9sgS^8r2#b_COCV%bL z*8j7Cq){j7gTFL0o2{L3r^iv-QBi1UW)oqE6Vd0BFlI4h1ABU{8#p`a=Y4>~cxpy{bSP_zx9$_kf7-EDESRSfbf z_Ie>>Xgivg8Rmz25 z;B6T|D4L_G;Z}IxlfC%z^&&^uX5saVyL2Wl*AYm*7Ga^HOTtJSJL5sAvf!vlDj1oKa_TNk z)MHr)c1@u$)>tb;Z0z^3pKCGI9XN%U$N#xmr{?H}9;^#m{`*W1;z&C3lkW;Xa&p?* zvaW#)J$U|O`SgWAwgtwIvU`O+9l68N)^4;TAl`kxo%VFKDQ_*V$BpgznNT{eG}~EL z17GU%H)?=bEY9W0+ylFlI&8Nk#i81u!mYddnsPGX^YY9f?$)+rkdn!gRYEg97su|+ zuFTsQA>0NQ=2ZXlh=MI+M(OuCkPzt}oVy=UW;*>WXUgO6Isxj*4%E_^Z}a_y_q>|j zosPWVr6A*wL>8&=Ml<~Kz`0W0`>ib0%bd+Xu9H!6lUv_`K*a$w5ZcRGo$xZeDEsGJ zrVOkg1~2UlE&#h3K+ruscBFLOEL@N-BUz+R|D# z&QX5tQ~TUnCzAgH{>;d+Z7HZxAu~AhHDh_7w{Kqh<%7Gj!YQ7?@^ky}U(JodIjbYU zb6{PuPFL=F5@WjEFLRcBZOiq|yYjVHOo*8NLO|z@nUXJ$tzUcFKaUM&j8=6k1K_XrwF!+ z`nZ*;ouBl=O9wBU`GrOz9fi~E_F#Z_l?53M;vFtD5qsOW__y>e7Z}UKBUm8N3nuqv zz<`?X^I$4M;_C6p4Fs3vL<}68e=`QDAF|2df!OAXB~$bWUv^Q!k@>RYqhr_~>BVw? zENE$~ZIypzfT=-&D~W&-#7LvhMZzxdlaU(>to=b@ae_aARXstevxk_q5&9gl0ug58&T(um!HAX!E((}^d@Z*N7W60yQ_!dGK^h~ux<0R7zESo}Dn20o z2U(mVP9dA)?F!X+2>jU^yqF55kO?Yy(bLr6SU)NiW6>~U{YG|8>DOq4j(&TDwp_(N z?mZ)|*0MP~E6)5)&@%dsaq}BDTHhj{lru(61kb9}0~|_xpJ`;%3I#haZSH3Llpp(` z5M7^XFK9RaW5`omE|RGTsQbc>tQ1?-@g0`&6dekqmR)`A={W^|ZO9jd)}z6y>#HaO zPygz80JLvgj8&pBru^(PaT!{%Qo1JAO1#QW0;BBu_h)qnE07zL?PfORVHHi&z?Ren zWZwM!gO@3@C%q6Jfyb%Ctvl3hvkVgJM%Be32=G{fi+t_nzNX&_1{lVb7hg%a<~zzy4;TYT|ip<@+E8(%#6L z!@qBSot3vn`y=J$0%4lS+7?%_=z9s6mezbVI=z+QFjSFF)5cOLJr`(M^uNC@W-%wg z>@Fd)0&SI?T`?Zm-Kyf?v z($bk*%lvsLcw-dRA$s${--9aip+|}}ep8(Qrn8V}mZd^nh=?qMG4X2`C7ul6oITqv zDR9{U^vh)mT@o+)NlZ6K_h(h3$X;6S)<26@bpSJp0C<#cnx!?v#i zU6C1RvIJ7|#c3X1HZCw^y0Zw~qP-y>WS=Hzq_)cA1uA*kl>^V{Fzks1^j>2cWpg9N zkXdzH;?ngM9Y)O;p`Y)u%FukzjfG;B9Hn?9uvw`;1d^(fprt1njtDF88WjsD>JpxT zl^e`UvPu_2Xd`mrl0ZN%KP# z-0S~mG8`R4VOo{JH0C?y|FNzidU?2Kw(8NywY_XiucC*Br4$S-yo8G8V>kFMx{*X_ z#^i?Uz((_0<^x=)|L0I1uA zHViA9z&(2R$-E!t5H0)S$TqU!8(_o;;u_~pvay@|mfc6yHRd%e%`P!e#A&1bUO$4QmNba6dK4F00fn)=?Z zu7;`6y<0D8PPIgdG_0_0EawrP(Yv$c#A+q9EQ^$6FvvBf-Imr()s(e&1o@YcTbp9CSiD2VJNYZ&!9np?~m-sP*)?aX0oS z`(iigL+p*Lv@5ebBGe~5xS|%?A5;Mi<5W}tLz+V;I!QKGQfqN;bIt3<-B=Vxdpiof zet_xBn4R^pP@gyl%b6;95A^n42N`i;%4$?JTl{`ZEpGb>w!y6 zE5^aBswLym2xH<)2eCi5^ZEvJt6kcgqqloN5#e)vN}3C+8hVUmS8tP z-9sP3X8XWQTLssKshvY7L%(>at!k)6wB48R%QpPItvDET%th5pnVuSOH|(SpW)FCy z*V3i%?|K%5vaz=mdD?TfR-BwlMD1#9P1B7WdieYlTXfctT*m*08v|C5mb*JcF!fJc zm3A3jR0w)gUsm~z*NIukrOPhTiFF`0>xlFh!Rh;4n1n@Q+S+t?T+D0CJtLf4vM{M}VNUyX8rUqQ)^*@fz!!OD8f8(8V+Ibp|N;56@HgS|AZMeXd z$Pj0_#dX}7>$EJlf}l7{1qDQNASsShPDC>YS}Haa6%ET-&VJAL_ZK{`*E8 zy|2jt8ETLT#&oR)-^jM$?pJdI2bA?+YBw1W?1WXc^oHOg{y< z{}U@5K{W*uN55)WsNC#rw!R?04)pNR33e*GkFQw~hp|$0fMB=MWb7vJvDGt{b+n?0 z1`0oDP>p5|2k?k14Di51?1iM;@7Y3=A(2!bK;&kjA>KC?g-$5V)EV|I8@6PII(h7X zMN&22z1}Wwj2j!T+SD2DIhPS7rQGe%-3)bn6!?tGANr=_c}2O_LrcaxzVvi-bjbnF&2$RY_H69w+M^BTQ3ElA|}>X z>D5(U3BKfQc1?jz6;|2`kU$+;JLYMcYUNKGTpq@{_d-2b&hip|n-?qjNx;6i|Gz$( zy`c7e$S>VYoqAIbSJ|##6YV3e;-X>xVYj?2)Kb}_@viM7G))fZd|AfPI@ktI;GAs% zEZ2z@&N7va=2z$$ysyd2Ll{Dmmt{-5X*6;)^56FUK40_Omp2?Iok_ECE=6BuFf~5; z{>?7~42v99QI6XUJ{dYjp%1>#g=*W}lJD%JY{+DWG){nG%C%7C2o0{`-L0QI)yMO>6~G?-j4OG46}&8X|Ix+rYk`;xSz=`)-U&@0Uv1dM zEo6x{m{q^WQRtfIzJ+M6YY3I-!Gy=7Gm?vrE!;K~^YjiCgBzxdZilndJZ5tS;VfsQ zU{s+sH>Vu?17A^D)%X!e2tj>skm>3JXap4r(AFJ-HF)O54O9Vf9)?1%fpSbY_o8k zzWZ^*T57K%(jGZDbls25z*cCwN_gLW)SE2sU0l8olVxz3R%_;L6c#+(gC7<(%aUP9 ztL(&2sA`*S`WU9OJw>xg7DP` z>EV?w(d1>-pl0W|rO9%NYD({1SSYlx&{OKh!N{BN0M0(`Rx|RO*|W=?@5H4$L7_s7 z6KfuTe#3RAIvbR{U}i|&>n?IESAIt!ez-}?#4IDY6!gEVadU>X*|O70HJ#kQgYz&178EI|v;d4Bh5p=HiHDL<vAjq{gJYT5ye-d#E)Wd5+A3V z)8=7&K^UzseC)Emevu;@)#3&K}|G&$e2jk z+;HgV$-^guEOZ;)m~g<+X6M%Ly1q(uFYv%IpyfL>W-#7)=6%z@{d(`pUuvm*6yYP5 ztLx%z9EkV2x*x&3{mL81_}T(+5HR~0ZIUkdN6Y)DY&S&VxZ$wdM3JTmO3Z-xtk+D) z6=8#!PIJy=1lO}j_xo)yS4xjxR;#NX@eTpJYi50!dA{L7IO|GLv*1VsEn-9)vlC#s z4|?WTiodbD`t?HTI3@+GW3s+vXbr;D3r#o8Pw# zx4!rGfc}H6+^AI~gxNA&6Z}awsJRb6XuVxkm;PgA-%M%XW^w0>Q$gRNHKpgqta&yz z4=M&;>H8diKDd=cUmQmp_!nJ3ZOBV2cCxE}fzI;`1A^FGQhzcZCR%jg^7->wpony5 zuQ|jad>+2;DFp5aRRG1AZ`TM$d?DkOB`9f`VU*K|aMhj(BCTR$wW2`2YwHwQW&_Yn zZ0YhZay~t@K&uIq;p&%0?t}U^yUy{7}RBOow&TGN00Do#-Q@z{&V_+y@+8R@!xI~3=y^^ zh{xW(W2Rv|qL`ubT-wVS0Gfe1Z7#oN$l^QwhuelHB5I9z@$AqQuYWzd)UvFjK^`9x z@e^Dc*>F|7M1|LVKMXs0+HZVvf>u>csPXCN#61KUf<*_--I9blTIfM-&aDP2WdDur z@K0yL&j0G;wU(lj-_pXOqc5sk4@U}Cr>-6OOQ@LGn143NX>+d!J`Z?T$==eEfzN^C z@#Rt^=9qLyMlM{%?s$d$DWg=$TkX%5YbH|GOKgeI8sh`3Tx>SXa4Ud6M8Z^Kx5@ey9jpD_;5lJo+!IR*WhXtt`}pS7%%|AYbGD6Q z)gy_c%=W0_cyp&d^f`x7K)!CsnAlT|N(Tp|d%M-0@PK+`Q2%jm=TQ8tdH04MR#s@; zlq0%yxg+^C9OBE85X723LgSu-gMz}6uuXcao8Z_edG%GYiS|W5HSmp8X%3gTrI}{z z-dB3(R-MO!*7RbQ8}6rnq`uB7r3RkZmF21Qyn~gZ1`X_;`hB3uUm22Lf;=)#I8Znn`V~>nz|WBH zFPgs?i#D2z>WXVVVHEcCU$Fgs874E1+0dpv?nK$RoceBs^lw_BC}|68NW6QUnGU8! z6>%rrv*C+L(RSMTVyS6mprVenM)2OPX)cGOfFWpP=L`SU{nni};pI%;9l*`)7Uy|1SSJ6#xB z-pCxQ;E&BEwT5}(=HF>~KY}mUb7y9VMWXm9Sks8Itd_iMU%5MX!ylbGGve4(=K(pG zosWf!lDYuz1poWk_DGgMc4lxG#(R`N!dwnpXMIM?70&(tB=Q` zY5bA-TMrpl%HqE1!c0r2TXA`rk~-y&4QqqMXqo)w_apB=8|R)czkX8WQA*BKX%OEq z+8^km>+8(2Ob-3fHi>APDOWjznOJ-ySrD7?uZIy~{~iGX!f}}Z6EOXTmzPyHbk?5b zVEgH!r}DE*gd1&uhF(mcm+gg(g}w`90%l+59+`Eq0Txc)8Al?#ul5Q3A)DlIJ*cz# z(?w(nh^J(8?sRs0_By|=qg~;CYJQET>P0&?h;*!PZ>D4C@7`sin7 zn55I13$k}2aU%-asY#@sqo#oa+0Fq~#k3W^(iY_NlCsvvV2emwMjr`b|0v~{IWfgh z;%5z`$A#n8gA6}5pDH>ZDbwr+7u*gCSRO;#agGynRqcPgO+kSVq)!+4M~s%|tDH=d zhAuH*l|(bm3BrQmhs3S-!J~euz~6-_cz6+l0|=Xw=@R3+u?I{bTtC5-+T1)V>LNuG2YVK@c(Et~6sfF@LLXA`8q!GATqv-jhU zUq>0xYbWiu8^CC!$9r2$i$>cI!I8X@xWsG}gmbdoBMBS(@*$pJq zbd(h4f0qzd;NCS3Xq_(!zI~4Pl&gmJT+%AI{!*_&==s6OK>MLGn{&G^B>-?h`Df73 zOOS_=``gwV<*Hll3`@BHD`C@MT?W-!*yxR>s2%lYgjJpw&yH?uxuP0XFF#~g(w}bn zQ10n|5JR(Kq|89Vki9zpH6QOrjSbBBvf;-fwG$hR#=gFjwRdaD6WQL{RIpHAVOBIPnQA_6=+u@3y|idpsWs|p@n zu1Y_AGx|7iKoQw{3b$JXdu_NH+lZ#`?3qN73yGBdTX_|yF+jOlv}lUr9#GP{FGC?% zUi(s2?+QEkoDb*>E1IH+|&8jfFcV0Y; zA!U2De`c5n+~rJWk5k2a$5Ll8;bbWc)oXJL$)!bxy#u}G{qmnFGpglr!nB?f~AU4mSA2hMAkD*m{`)5WhIj1;&B)kT+9Z*J8PvPG zR?{O2fjop{g$5bYCR(S9MDlTp+UwGKg@c_i&r3{Km6V-`#8C&_ug5sHhN;S5A|H`M z%M&!OxHd`&u;A#fJLMA-EYy-a;SW~FaITzH)3HxY0ZPT1AwJ$75pBp*719W3P&(## z3Oh1o`BQpz$z)8j_kf5kMM@v)9jBb<0{!o6hDOuheN{pK z8$MB*-7i9nO=kUH)nDp4LOkncuHq=fcSXWz@* zs%Yj+MQvwrC!R)+4;k3lQL<*cO0wQx18oH#s-vw7h8NL=``sj-xQs@XW4yCrHKtSo zoezbz?0ot7>x+1&WZ#v(-H%8fP-L;WH&JYpzXx+^&t5Ct55I6hUg@gd5#Qz>26)vS zy>;#fUf2JvnK~=Tf=QR+hEN`mBYz(_`g!A3Hj9;^D~3*)w##;ocIJ}i^@&Y`81r|T z+QP`y7?34X_PB}x0@%P`Y-P3y9L%a?LQz@l6Tt^UCD1FPm2H3_v1#9`#{J&v!B?yG zxWf^`n8jxV007SidI2)W`LLHQ#|ZYi^fRlCIkd0IbGxrI?V}Bzq8F|X?Jhe%d#|DF z2a^g<*@`@X7ui4+nJ49Q=WXt2TGW_7O-4w%rgj~Y$A>k!M z*yk}iN*&Kz+lqQn=9_zvyR5S(Z?f1tqO;$fx`55P^oXPq_q$Ew>Zt@zcK;PfiVQaE zmSsiLpSVP@LE?4_e@DLtJ9nBc8BS1bu3`Q}Cv>gaee*n}@DEiv<9 z2CE%(E(n6VP6IqHGGJuGf}WmNOJRTka(2OsuqTBfa!)W$a=3>5B2`uX)8q$4?}&>~ zb)2t2cXhaTKoTgM!Dd%@f71Rmc5=#hQ2M-af3V>l zlEVZ_&COP5p{qaMd`hok0^!i=rdEop?l5wN`?y zlB7S`#jS*hy6CLEOUvIgeKP}wnyLPpn3#^Ss4y6$lHTx|I>csQ&pZdUx;7hF6R)fC zdgp}93!yWnl*(cOn^S4sM<=3F)KDI9Mri325~vr`C+oDP%BcC3E+{nqT6U+p7A_XY zD{m#)p2vx%Zh5sgZQ z8L|&q@h+A;j0d-}5tt9@{zUt83^eTSy*btY!c^dzPg%2g*@MmOzo{bEXWQZFzTu~5 zV<9DUscSf+aYa{e1IelCWrrclhg>aN7*UJ)>W~UU-f4T$R9B9Hu4%RR72b6qiiQoN zq(~WXE52?zCJOBael4jCnrIZz8Au!2DKW6T(0#pzW2(_&rCz=YFBkD*e+yF@7mKd+ z_0U9;PYd0P%AShUH#Q@zXYk;BgwqHaz|vlmz4V2huJ*jV`Vb`pPx0kT;dU`yzcM8p zscH5pq%U2~)3_O8e#s~##%28QknzJC^36GVo6A#?&-z-{G#ppY{Hud3QZ-xLToQjd zX4(QuH2yQdb)bmKUUY6CQm&_BI_NNu%7&XGI_pNPqe6Fp2*Ee^U(3S`8;fjgcGiPm z@qgW_2~qzGRxD-gdDjfv^wF@mgLJiSkmvOTC$lIQ_2DNPGxa$0DCn^q4k5T+VYhG2 zj_XsJHgN2_-ljA$f#UBSuPBH^b!ait)Achi6_lqpskC|z1Y1d;Qn&K(%9f5A zr4ML>n@^g9wm+MxnLEs=RDTZvES6HPNX=BxjTbq21JN2!+h(YJ^|->JuH&bV$=wJR zqOpG>?X+bxB0cARdSJGd8SixS1qdW$uTbZi z($n-H!awC(g732|(=e7?q;mjWcCoS1zxej~?`QpcAH@+Iz*p75#zoWa6{eZLF36rD z{iW6873)y9r)c_~_524hMI&2SiTN|AF;k&(+Q5!y>0FA}$xQifTqGeTFZZ^H?Z`7F z0$BvVk<-^cUWb;QVZ-4}hM9eZ)w26LuZCNeIpI;u5c;qWB3~pg{Q2uXQ-@NP z+_L*OfXb`;t+Zdiu<^c|6hi9{x#*zQI3Q})eybHx7cSWbdRuYG#>>W@yCE&j8^WW_ zFiN1>??#7zI1RyGw0+_(j@U7V>s3B{DYZ8{lkQvxYXnLID8RQS)Z!t6bwmQkwxKew zO^K`o7v}vdS>BHEW5XN~vo+aN;?`5Sk_biih`V6qEO}t9(q;5{&MoCgbYuMlf=i7m z8^-Zu?sZ_#ih;aA5OwCW>_+lnMNQqb+F0i5)Z?S}eaBTpeQkCm-Vt8J!lH(XF6FaQ z|FxMp3*|ZlsfYVK+KoxD$w62BbR1obe8&rpR7&tN?Q48Gk&89}M=U8^iAjTm5mR(d z5&hG3YSX{`Xuk#d7|h5>SJhYUufMYLYTpMXQxqk<`>oZD*6a$6H@KbCH+K;XMD9A} z7M|H>M#SdgqBghuYS7{EzZG@y;7)rdvg}s%YzdrN!8XEK_v9RR{X;D&hxBw}Ks~}35zVN{i}t8ZeJNvG>l0pR%9}&XNhkvy zsp=Q(o~zt65g*Iqn!cs^&xKloJmfX- zJHd+V(MXfhtusC01m0s|2dSI2H$#p_;g*3z0em~HC^HFGLI6;p4-wT4h|TQ?tFpqS z(PcF)uX=ITBU1*FE^Yh$qQhAF(l!Q*@eVBvR;eo)@R(zdA}VGy!Gm?syLsLWUXSv$ zjsNZa{UQ;KIDTFY?}d!1=LC+MU%RpqaO0n~yxHP=#)>vhbUkM~G-Y`kI&9~(MM6Wu z{X3h4+35z&g_Q(>;s|p)7oP$^y>+a#CDgLc4Tuz4-KGQ#*9mGeK1o-Y`8+T0n#JzU zu(ZHd73b!Lr;9W*l1qX_=g*(R^~&(QIi}AI!8D2uAe0l)7xmE@$S=i~E8(`bwl1L$ z#imFnt7nnVaIH?LFNtIYADi29^T#aCse^81{HVC-G}&~qGF}(45fGUu3)1sZ6MwT? z>DZrg4*C;G&jy3_iF=4j#uKK{dtC^NLLZ0=z7J4Sx(UP!wVmp$&ApO>n zSc^N|8XF<%P?#}eK#M-Tq4ll4gN^^^!BdC9QTlE4)86xg$BM>PdWIv8Z(ZPRm&~)} zcEBoduK@l|Tx65x-oxJR1=CV&Z7_Kw-q=7KBaV@JIC&jB$z*K!`=?-sz1DYjQ&vY; z4`imr#+94D1p72G13!oXB&obh_uV%Gh1Le zWOOh+$rHEYd{q-V!nZWZJdTsUEFTj{;egsR&Fvs^Xs|E)*0k<3)&4AlNQA(|7^m>| ztF4w%ByUkuef97&iI>4Y( zDe_O#x`BY@#uB(}kACE@-v|C$?I+kd7@&lgKO-i9OapP4E`Y2FT;{daE8EWhioHrq ze;yXL2W8&bbMl`@4Z9kAx>a%}&aqJF%|Y`^(HgoF4M?Mmgc{9vY*Y+pa{`7LUfX6- z8fbH@4Ug(kli=J{-wEe|g&(s#*0a%KH>&xVu@&IrQ{MPz970j5Q&c6()tUzd^|0Zi zgCqH5Ot`&pjT`%a49+ECSF!%Vr8504+;;1y3YWbH3-p^{yp*JU1aI+ifay@RARK#R zK+merM4=pyut~4(Tdm!v*|sI6g|&p_(=pkcgYqgBKvryoL>M9teg3-qWbm>jFgCWP zf*Kbkol8B{LriHbzo8AYRsFBeC)|pzqSQG~Iy#PP5p`O_SQ1KC>E{Dxgg?0KzN+>W zT_Z~_z*BP<3)0IOnql|u4rd1WsnN>J3X|%MQIGOJHnO9tL{P^aJo@q1Ib-SCFEx<7 zoZJ!qP*v8n(cv@0c*gqOq}<;6{pD3d}0Htw0xb;ohdUA>BB2x-gIJ0Gl`4U#okW+4NXSYkO!^W4I5Cd zJJ{GDa2P%U@!Jy0N-hIK6yXi7pQ5S2nRTYmNmo&^f9oT-E2CEUPoKU|$c*eZb^`KQ z|79=MVJ%4O1F26Hs50xJewRaQwkLChBL`gyqOcD8`!q54I;`r^Q0f*zh2z>v2MnMN zalW$mI|HX|M}D=dU#%Oq%Y!)C)=~=On2Eu~-cJ=$?%XcLvV#(X3!r`ByEBE=Sh><6 zc$0Gg848Ci$CRwy|3|Xr_BFA9$q?!!v3vn9sqoNn?_8kU#U*- z$Pa`9F$uR30C`YuXXEAF_J>44iG8gB!vAKrhOMnxHrTo8A^=Zb|LSE{bbpU0a3puS(_UL7vpi4Y{+Sb6F90d+}i3#k@XUf0+cD zBtK_nKR_|2@Am|t2%5$XK3qg5GpV2xR^L}Z24#=1gAar_@8vWLMdiamj2?4Ui(?+B~>(ZX&^^up`?Lqzaqpe9+os zn}0uE4JTy3iDme1#Q22_i{}lkt}D1r zMUM}$^p0^DrAzYBd;)Sl&Msxm)KRu36mHpwbK~tb>w-|UJ0LSGL8LGD3g*~x)r)kl zH$%W>nrF^njV2@mi(SE?*5P7whfu%e?Y0^ws>ncpO`$gPRA(VpT(`C&x`Ds%sy`C* zAwVKYU(Ry5blISMtk6|a_nVvKubCSksM6a%{p-v%{L{V(Zck)$nfwuwn)cb@jB7*$ z(w1Rwyb$7SDR|M>jwd0(!r2h_S<6UpV0 z9xrt3cQ5S+?wPt0j1?C-1)zIgU!Md?aeg0QPPKnxIAT-*3b!0bJvAnwrTE?j%PV`t&E-K#cVTavUJ?5E3BVGvJiHJGo^48H> zDy^UBUqjoLGCBCgVV23uTUjgLA@N%BDe60>1#(2e!w zvzfBV_2Dc4brj@-|9xP5yZAO<71oW6L@++u#*x}!yM5PA=j;dm^D`<3xrTJc|5}81 zN{Ro`n*oMboKQ`mI_}dnz_7w>ucTE|0+Fj2-@!MwSRS+9`Lizkn2%M>=#8IK9BJZ3 z>gHm*oV3dM@*eRkUS0Ay8TDmg)o+gVCTxVw<9xi%N%=U*HGpeEwj+f&XG0Y(&$cU` zv63(~gsd_+U#WnSWmf1*ZN4oRD$-CIiOm`gZFL;*0=na1 zgn;KMYMyoDdQ*M$IA$ik4xI{1ib%S9&i0rgqo`F=G@uk)hMGEQr;e7#$_+^z0o6{l zXkXikH?MBy?-b3WqsFi+|8x@ z=Bdiu6~4)dyjfacoNK}--Z0>0PXqZB$&X^qDk9Id%FP|ZVE{*#^#WaVQ^Lc`r>VlJK=33G=kQF`dA*+;0du3+JoyN5G)^H_vTfpMBsvN$Ka;t5&8^qQCCk)ttm#fl&=|!bQ!QQ4rgv= zT<_cjHi1zkxrSrdrFZCXjlrzdWh-NYZYX$-)QD3JDARB%AZ$jR`5Z13sGOk}vGl@@ zCv!2Av)$0F)<2Rv=dzUNwee4A{Ux~b!cpf~ct)|EL$0=7;Qh%8jOlpx3S!7)Hn{kc z%E_RCb7!lna-)_7XU(Kn4=8!8b_(CI zdVeC|X2_n?mGU*GuwM^utj{h-fuq&+;LbCzA1hm?N6UYWfMPPQ>y`>hovEyVhdkROv5lBNbPhJsAPbPnkZMZ+t_`18}y0Yx09cF~rYR#O>PvO%! zX$4Wj(VG54OR~KdhF`56pSs=C)S$L!OmSq-!p^931N))L zLL8wa#tdx1<4LZ<{nh@%7SvI@W@4gVeoVy(2_5aJJjT4q^<@ zWGU-hUfeQjWem1*^N-vJQ>sb{0v}%iGfJ8nO!31sJ-Wj_rZOHK!ZZUlOZ2gOs!H#aVV{J{^zj-3aMiG1tiinpWaBH>2nbnlMmkT0GL+eAw&`-ePxZ1yNbI8*kt= zOpq-~^9ceQJOc`D4pinncD)PmIlr6vJQ&(kvE857~#q z8sGei2KgO__9(A!@+0>yh+5=_RrNL{cjT}{U0WSqk57`uQu9k*c=3jR)E<(cd<>o> z<%=lR#su0wp~4ao1aNkV+_T^LKS_B?_&)$$Q0ZzG)t zLlsAnK@HYB=VZapj1>;`b`JF5VB?-r0(x~y&8U5b)zfD>TeqB}a&94NM$_S~&XE#! zO74fnzlx=p6JH87AYYd;f@@1u_T6_8lALq}*ahg}`QYy&74s%T^Qw|$-?wKV{q~P# zT+gH>umAKT51b`drFFj08kC0s^VE(2?1o$h9)(jt>evg}me?9SFGu9E9z^?5OjyPK z{RM9qE7g+q;8SflUa3P3`h>KJp-FDU8T+hn!iMRwh(gKPYsb}|q|K=ULGWA}bUQAS zKjv+lz|NmJnoQ14$5&*&1{OXIy!VI^2j%kLXZPx6bNaF1>hIFPGas!k>3y{(_Zcs{ z73JKx6HBALD_k2RHIU{I`ZMxuFdhh*tX!M;~ZMExy_?-=6u0nn((?*jvO z2airnL8ode%o~c^aXtviyNz|$rjUSHDlm_WjG&L0r~sRg(s*4$eookig~ug<39$ZI zFVf9deK`S{5041-s5Qc`Gwqf~#zBL%ue+u6BMLVVUmBT-2u!3(e_ug<#7dn^2^lus z8L&fdW;KLJ=g-`LHGwQR3f*(h6SxxjInD4pLvzXLM04ATQtD6H>a-?OqUU)TeNc9Z zLRt6QW_RzcwURQFBk&qs=;+65f4=Sn6nFw-SOLxs#S1mjR(U?&^dBy_A|w)DVLgN@ z!>gWuL5AC8iiOy;mDm=T=U+c{74LGtErfg&LevzEXrD#X463cv>Mtc~SpAv%rQySe z&0mW?gbh?Bg))p-R(-ug-D?&l3`tfR-+dyKd*t^40Bm7war1}OufSKt%hwUtEio<3 zh_3<0b$DYqPa*ZfWvTZz$jLGl8@q3EVS{3j-2p(J6buo43-`%3S%kx|G%#W+T4b-; zeKp%@&LYp}6b`Q}>VH8FXVY$5?0uXf7ltLPK$lvbR^knV+!hz63M_ph!{woeYtQKE zIs>7*vyXbte*j#pKrkT>+G_hMMt&jWZwnz2}ndJP| zj@SFe-x<#8W2qfQMVAy%vg+3v@=y1i6769%8Sc47Rz^wUPE}Qgj^n-m%fOgm;FtSV zi23)+Fi^Kqvp{BoK>cswExvg~m$IxywE3OX>(gb7?S}gIUK;*+KlnxZlNz&TsKov1 z3iNgZ+@4ggLarbEAtGY0b1eBluFIr<XU*i zX&l=Qk5|3pDf&rI|yGs9{in9FS$Kqmay9Dy|HI==e~Nr+-Ejglq&Y zk6*R(SR}%Wuxk#TY4tCKwa%#V&u1p9_Cz%;6+g>bz94@Ll0pB(=>4g(bx~(cZ_M$Z zUY55-bLWbb`?p_<*#42*YRBdncY2h-(&pd!?p)H3Aza0umZ|)wUYjMhja~%V0olh2 z?;`_uPMb*p=L~8USw2?f-qyDhrfGX;6V=9l#sZII2mGzKuIxW?Ac7cfSyg?xWX)rB zN(>3KG*||1|2{CAWYU?v5?@)4rZ-DLa@sTRH9S@p_Dohh5w}M*ice#&s!`djRF{P+e0`cf*xK`!uDY8RAN+>|I36hNF_!->SMHDbx9xg zMq$L?{Y#D9&AM@uh61s5&vVC7W+~jM#zm|m!O?S-e_yKf_W@M~1LmM=ZR9^>xxNJS zjG^9lNoJmmdbq3@HG$jY2f#kg$!S%^LD8H68+Y_i4S{TFyszRG+C@CAYeY#f85CGE zSn-P~WzqM7U1LR<0c)S#GJPMlr@J??K*)Ao^dDI@w)6qF=o`Dq1vW1K6m6dt7;RUK zx9qY?((5~wlw?ZI(_5ZY4XzeM@#B3L3Q6So-N{0$wCv~Wqm62#%R3Dm^M1-|om|$0 zo$Zp!QhX|`viA{6uOQ*wIom`Swj$BYE5?LvqZdkU(tkwaL+FC zre(^-7h3^ghdR2hpe|n~*V(9x6aG>h@8K#Z6)OP=^9KbBxjH>_CUR}IwmFTz56}V_ z>?k69w~lDD3%`Bfd6$BX?b5+vz#~Xf-ohWz9q8}r>rP3SR0=MIyLL}Fabm&B{_KQv zE_XHOd^L~{Dq6<7CuyV=GH9VaiE!5=3_l#i=YSH=W?KB~eHN4xue7=N_`j2ggDK88 zizIXrp!_1=totHeyt@AMG{3TGu0UZxjS4J=N&$J)b9WM3|V$219bV2FIDbFt*&C@V?Vq`=!y4YF<5UTrBqdtWh zbM;d3x6?;Ij=rKbE@We`bl}oOhb+`m#1{XTH)Mx3yy@@G&-Tj57nCYcH^V_P0#yDLpFMWLDN^<1+0R8} zgSs*kf6d`!f%^clIMB6&9qM)6DA3Z%Fkhy3C)+7xpRNE|69OHBT>I9zo(*>b%3z0F z`}2BIy0Zc)^1zKrR)GEbk(?h;AztH8hpbfN3vk^=_U1zN77}%Obeg+ng%j}Oyz=G0 z(ZZI<<7!l027h=g36=f$FMybxH%UuT%E=4^c%-=i$)aXddhqjhXrz?@d6gp$cc*HII&BQo#TWC_o zt{7AWBiJ)+h&i!l)ARJv$^(*#6hEG1pU#sd@tK7<+6Ooty$hTlRR7aq4t@_p^ z!=^O~j|RU=%fUqlsZ=z#2j}|-R+FCEaPZkZp5{+EZL|p$hk+BRLT7G(cN?OkXrFBJ zO$Ws0P|_JHh?>@!3gxkKkzZ$hex* z>h5dFR#kI*fkjAg&2^o=xmxU5_*S-U;;m0V0~}Ts;?BrErogjeSiqfy*Q14QUH1Ec1M$VFfgZ_~ zD8aHxFaRMA=Z8&&eeMu-RZ0dm6YOhukioT1OQ_qjSAM{z0V}z4~|4ct0wDcQf$CzxfCGkT2*k<>z_j5yN#p4xsd>XOxHvfI= zV(VjOEK=2MFE@MuUW+oA>#wm@&hHwOZE zB20MLu0IlPHmX;>(iE@C+3@SHBZ@b{PUqa^cPE!FJGo?@N1f{?b%-4|^K77nAU7Ab z)YY17?FK}Bb7n5~ZqSnpHkX7glIQ5i%cj=(WqTuqn(a zT-v-c(#G%^)1Z1@1-{LXu8d(%nIwOg|}RfyIOlz&e;w(W{!UJt7nvE0liPG-+%|~?I`(J`F;wI5hmAWOkWP~SkD~MNXKVlec)#~{4{ePW zHCkJ&qSV%^(ZmjkO=}b(Tzhl7>`{q?m^FeVLXAl6-kLG0M5vGsvx!Ep*_!uve*b}o zb8NuKjLN-y<{$m#bKwJP8gvsPgXuN~G_F9<=6 z`Jc`CJn%ChA)3{aYaJJJ@xo;d@;4kQIl!?uRp`ea$VqLI(T)5i%vNKs%G@%o*J}MV z6U1%zwHTWCIy`{{U)865LgOd1o`kp7PBqh~TIbl)!x8j7yVWOQ=QL53XWCP!_B+-? zqltAd)rB*~4x$pA-GqJ3Q}0J_2rj4gbuwA_2@*r-s*e~UTj|S6$rlHzV+lN?Mv?!L zfJrFnQv!1qDDBzh{JmG&uzZ=dPYD(o|MaBJMy?dJTU#hUV`bXcmF0^Fs_uS&AcWhP{7j z6}9ZZbQP_40xHXHf6rXejEf}M*Z+(xQ!0HnCV{!CoyD{01F)%GN5Z2{2dXLDdMbeg z@Qjo!X#D-$lk0D!qJrRn*7{BfZ}KT&eR*CM0z-X>%hWrIgG{?s&2(4=;23JD)Wz#n`{fufdDcRfq*gK4fF z?JaR|a?&|4>w*Z&k|Kv+AniwFT_^BANXHB$a!+)|FPvCQ%fw@WcJUDtA`Q3PoLwFn zwi^W5o(R6iZFOT7-I)*V=AAiaM6OdDfY-k87h!rRi(I)i4;5CW%miui-u3%8<{J5v zB1@lSRWBC&b*{o|&Ynazc-o1K5wGGpkMUU)NeaU*!kafW=jwv0gsMkVwZ5inZG4^s z0vVl4Ye5c|zKGx?>gpJ3k(C}+HIqs3(E`18tt~sy1Ae>^6jrg(z*uPMwuY%=67OY~ zuL!~LvMc8sg0Wx3zKCk}vD$m#o^SXHjMsB_viu)r+tUhrcjXcwOHx5F8$(4P(O&xx8Y6x6{~i$P%_y4r}ca|Gsrag zBX4HfO&AhN-Vp<)+tny^LGAm$3%>tYcru(}OR&o81FNYX^#f6_X0oMMN*e!QgP!;L zter;(9e*b~q_NJsj;dMznX8udt*>NJ)JT_rnAr(%C|*3ohW;sb7dihziNqUT=0axCRon6ySsfVJFsC(GNTXwOo(t82ssBh&0`rm%ERb#SWG z1MF57)EK*FNGe__9*{f93Om-2<_9JwPDT-sup(v1nH6s(fqRy=2h%KT2q0j*b`j@u zP9j~Z@<;)AwnkvZL2N4@um`|Br2-gT1~`*r$ohf;xYb6-|K2VmkNDm%PwlTGDpS4tw_qrAa-t91(bOC3^H1H zhjk*Xq^TT@>9v_ppH-GkQbubA1Lbx8`rQ9 zVj9Dvymu6@ML7rpC1dV9Ri zy0bOhJn6=Bk!1rpiPAlTFpP>!-?6h8O;(no@sh0{%kFnXfXnMhZSnL)9|e=yx5o*# z9#JH7cRUdtBSb9`@{}T$&vgVN{EM{EBb@d&X|~7W$WIbKXDX38wYIkxofiAxNvU+r-9aAm~(2MlQLWY)G}6|Ju?7xUJIqu3!DvaAkU_6Nt+rDdr7;HkX(_CaKaIo4cl zG!7bi-$2i%uO!yArXNYI0bpVsK?HSZ3Ak+RTMpe#-5UdGbZ7Oj8K%z5l`7Y_7O{}5 z)E5CTj*g5e9R1`S$ma^Ywa&$X?LjB*si{@37QcG(t+?yYr4&O4riGi~EDL{yU+=A7_>PW=s0gXb|{+h6RYgg#SmMXL%PoeqE($uGsEK zUIio2_DE5QL>Y$dZ{A=039WP={O0Y44a*GWIg-Hfk&dgn^u~sc+Rhc*$|c?jz0Ik% zl#{CK2uueyyG#kTGsv6Hxjx6DB)Cw7l=#07pI5aigv=d=^UX@4wa?YIr-~l5!0y^- z&VCw=2sME;tdIwHM14sLRWlP7BJma@7!jDe8-<16C=IcDl`os~;@f?62b)zXAC2(W zRy}sx;mdLXWO2`?R%h)n7mL+&8YP8y_$m#0jZi-l14(@IEnhq!0sE>vB4MIL^EyO}!6~JYvj#cbe61f&X@Iv+-hAbPb96?5JI>A@y56S^Jz+N0k4kC^6rE>}iNn z1|W0cCCvti@RCUORW`E77XO%Kv-0$VSjhBz19=)-8kF&^6h_-p)B0yzUAk95!{|a@ z7fq=LUg&E^7(Hy)?!LVAdkT9lCU1F{?<%3;EpyOv0c{Pj8xs+naDpqb7$gc+^viEf z_*&V95^nUh#XqMttQp~5(-VYgbMC<@@f%A564{dO9PWf2w_N*!!5A&Atr^U)E06p7VrRG1n)I<#|Ll ztKbvM_%S=v!ZYGv)7bQBWA4=chtMj6)NJGuk%oLX10VTYsL94ngSQDb2T(V`Yy0To zarUrxK0)-V#`=7VHzLV%7MCI``l;}lo!ssLHVwt``!mx=QuSe_K>d~%=ODc77#lP5 zQQcRy;AWaIyvvcQ5Q}J$SId-t0+|$*2!za=yUk_4ysdZJ?3z*D`BrM;g_ve4a(u+qRA(AL%0jejWj9+N{HAZ+KVmX_Xc)2FO&vF-d9pt9V9 z@JXc7Uyno{HG;x*Q5r`E&PYwGN0~wuL+vc1D5ZA*?hpwUp3F=9P^rF(OR|5Z?2<;~O(ION(Jxw5(E|&neQy?c*`5Eh$+3N@4 zc)*pF&ROc;SaLKgxng26Db0K3n%Q=Pl8fm?!Pa^2A|2SOnG{TvFktrb^D`{Kq&z&(#kA0W%rZ_*sIQj{Megpmd{MC0+zXU@;%tP` zBmt2x3ipS5k3zNwM*PpK2wl-CyosOk5Y4lYCehX>A3^A6u1rgyD?H-bX= zbb`X%4#FLxhE{mJnGDKXQ~f68M=12ZCUGE&k_Ko@dwh zDAX!=^pTL;bLRKs&SuNKBswr^Y(_Og*9TB3j5)G{WS!it&j=`R{`*(W%HHL}wtFl$ zug+g*Etc!7SW75p0>R!~!*5EW&fKd+Cq)WOoKvBavK_x7`{v?Yl2 z$|aLribzCjPsrrj+^ovAM5E!Au;u2gQ{IVHap4RPnlG+X>|WSSr=S?Aq?Ir2gF}@I zEx3tehu`rvr!_A&_6zD}C4XU%X_O;Hb(vJ5N9l6Jb9ZV&x67zjUtodSIK$hVR-|l4 zFlTz%sYGxE`ocCuV6KZ;*5>yCvj>kK{a>y0cjo~L*V#kxlL%c{J9R)G{>ym|5k6~F ztEYF-)3d!CBf*aspaS_wIBaD)PC@ImF4~LfDp5P^ z2paasRoz1GtfEeC$n!e1xFGWLk99tTzSOe!fn&p17uvLUyhv&-J}YVtOfzshzJ z{)hy`huDOdaPJ1qGSWP%Azh=sNP07XF&w6RvMNs$Om5M-s}j*k0b!&K+&|yB%%fcX zAXJnsz&76tr)%%rrQ>s?y}m{0Y{p#$)y|C2t9O+6a|+DlAx=_GAtk%o`aVO54fxx`QvNm z!8vjcgEOInnmLEx>($mG$tV+8?azP#+X2P@d&;vVt>w`HPjw%6PLR$Q=qt>us zf&yk$X%g+{dqvOfugn;r?Zf^2MdCNoB~e335+?^%u^9~b2TB^r~zOs zJ{REJP~F7@0?yaIOY%JVhA!}`U%Z~r*k03bnIDTUC=%&*EpBi$may_SyShMh+wr} z;RA`wVMR6G2s^^1Ke1~mE-H|n|g$JHT>B~|Pv)t*@V;=0B_ zVL3(67#oE=_5vQRla8{t7ecko)6Xqpt0s8KRC5@C=K)#fPDeffKay!#K76;4aBg=F z>2s~Vm=5@?UrS-YSMAAw)iJ5a)CF@!vlnGt!taNaE9FAcChd>>Llh_8=(gMR^`E~=u?A@~|((qO=Ym15NW}V)Y&e8d%Ew`gz$DOuH#y`%u=#&LbEMkI#qX59{ zo1H5A(0_E&Oj~~;xgY&^s?-eoP`g|J*4yCaV4`d)74lNiEM{v;0ha)G?I|e#A2vb) zPLyonG{Y3eP5M3_el`-M$#p%PW_CwxR*yZ}oc(>vGp!-wjzw0%aKD$oK>boamtNy` zV<~!*;(>+6fJzwCTf5fgneIkmvQI8*!yn|oK_#-<_4pnPeQ)!4=Q^JJBmd-?@#k?V z4E(=3+Q0H24DA`df3p-%y)Rt`4a(uTW_uJK zPM|x0#wVztLjja%U?q(BkP9>!bP9#(ub1L;R0=33{}NJGY9W>pyV}{S+QpLz@~qHq zuu89EnGy&K_8}7Ga%$`$0VaEmGUq&@49wf-R7ic7>$N8*QzSkrms7yb+w!2X~(cGf7-7C|y3*d!Ny1 zl_Q}s)fuqT40Td_&8hcnrF*e|A(D&&%uNjLs473p4}^i{0Ma2zYOBeo@Z19$TxJeF z6#$%KF*@D6rDkIVivJ^%ma@4_nFQQYxNE*te1WL8;;ABxa6^p?w5A~2z#BK;#{~5v z&>bN=0n$nctZM(syJ3U$%B2KUv@4_L0KLP_Ce7pM#VZ7ltIhd0XAv4f5+$eH$@|0OQo`kpYfjyIj~3isr1^cnvtO$w zYcrfzHFQ?Kl%PNI%b!ks9VXJWfKIt8EmP4*$(^2R!oxD2PCu2Ze2XSAF1Z9}#YJ*r zl;LmVUFTaTA>ID!O%&Nk7PFdUIrP_xb*gk7bUUr-b6k{v%jn0lzKcXZemM7!WQrKK zqb24PG%5udjV@JZfi$7_2vLu1A9npR#*G>a8ShxMu|bhR@8xf>_VcUi3 zlqUflKo(jf7oV(bV8NMc<@|EkT}v!%9$cPs@Qp&dpct~H_xsP@s4(s0&UM)TccL~Z z@Z6}IBOXj>pYM9XW&drCG4$$o7$UD&08R8#hOrXjGqSl z0ovfE0-wA~ZwZA=jKljR z{aDAD=K3DA`1r3$wY6SW@rM+LbT6gNA+5UH)TjN2yiL$|YJ{bHlR4#}^7S^rX#?KQ zvJ5c)Y?t2n`V21g(^AG8i{!;44WObD{@X<-JWTFpZ>L%PA50{y#tLsI_FyEhZKKBV z<*ncS^ZK=Gcgiw%AE3pQ-}XWK6^$|Lzr-2U5;nBwnjq}?AJSUsl)EtTUr6Qr)#mg#ZUZvpvWWB35D)_}4%e^)ts zEF(q8)age)!lk3jSw9)?mWu($x<%7;5y<6AV`&UVFzq?|YQuaB%$sOZ*!>pylKluSR-vTe}rt`@v1`?_M+YHW?yy_*}vy0NEbD*K8}z zyO2p5Vox15SBJEM?frRjS_XMNUEv@uqG?>Ra7Bc4#YeAL$cNmK60*xixMeEZ1YAC@bNE+OWL?4+_LAl zaydvBUW}#TyW>$JuckFrmu6d42Z0lIl+2a09^(1-P+G(^2lJjNwzIrGWGc;8u9iUs z1dhM_6P~(1eyRd<*BToGQVvA`i2TlE*>9KXzE=tnR7B_clh7D8K3^7eCvWAv6F(;h zm|}Bu@X!(&1bkxUY;Vg|WwrIsml9w1Y)nr3_ruCya0eJ3J@OTGE+?>jig<9>WtVJT z-D77BZg{<6ZBQzQeR9VMVPA|8vdmtehPTEh8MTeCSuQ=0Pc{F@rr19_Vl-fDF-d62 zgd67|&#t#@Q4VmX=(C>!<%RDO2_~O=bos@=WNm0 z)06!+aZZ3KjG)J<%Jx-w7(djMxQri80?odW4Qo2{dxjzr*MS8PrW%vJiWNsb8TWEp zFs;l4@F~5uiua<>tMit&0rU>An|@Ilyc=orp(1m3T0yv}gS30lvS0XEFQeBG5{hN@ z5gX|E>anDU4+d`#xp~y{9e)H+>HkkgWfo#hEjB6F|7J?0HF|c`p&kG}B`C`LDByXRKOb<$gHbpc_xqqmf(#i(zcjm#|nEFn^SG z_+5iJa~mxqL&}m=4dV2-J>N&VbvtL-g8ad_v4uj`Lw+IAu1C2Mn+M?-JMfzP=5fTy zzZb1VLM85+w~rhhBw3|ZTKWfPq+GM`Q0L`1MZIKp#)of>Ipu9ud^avW1%|z)AR5>V zkbS-(PGS*c`e#7U;hCUHJCpLtXTXk{=pjL3ViG?lcfe8lO z+o$i2>|2gx{m;DxrTI&okFdmBC^?Cc6qZlo{0QJq2PhI0wn|QT0Z1%MEKAHNK*9BH zhNkW%GpW|5JPhlXA8xX}@LcG+dv9mQnjcZ|IwfeXqV4)1=ts6$R(9Si=k795D_{BW zrKiiN2fQT$kkUn>kD-7CyE&DJCzIRTU8xH3@yd;|H88*^#i5UuDdwOEyfzV14JrA6 z%XemIgL+pg@2M*|zT>{LLczx51+;R?ikE#?PZYg!0FZ6__8##|G|rP*_lX&nfS>puZT!uYTa@)rpiB*~ z^?WG&1bnHz;KGjDJC4E{m$Hu#&GPLTbj zB&5fI9hN0)B8D=A-R+W-)y_Lrs6_sh>WReGcqrqDN5#MNzKwn`sVU=y&HCZYf|%SX zB63C>)!2K(h-HkTAQ1=Q(LI$hr-ql>N~D#g&Py<|rWqUpBR*+Zz*O5n?VF<8Ln$TU z*O+tRIVzlnMY0*YXL~guZyNq3pM!Aafbl=(;a>Rq0dgU!rzpkg`hq zw$2Z_7t`=C7J(rQ6QL_^lYCg7T{lyYE2U7~aB{;l@JSTp26dk);4fYVKp z2H05JNk~=)EIvHplHYc`>YEK40c+%?fZ(7Uv~?XZX7KJv#x+-0@64LOrk9?O4FCaT zLcH)TM)grYZb*vQ(z>%*z1FfDhE{tp>Kk_K>@(pEq|9^HK%KvFpAdA z?pGC)oG5x`Y|zX3)l+8oXRdOSo5gc+-9JT-{W�iXFIPu4YQXvz4Zu{jM0gTkRB? z*BTVvxmYy19~#E*X&y>2H{SZVIWyFACEi@z9RCMur>@x|&6+#my5h1lL(d-lO_Ei( z>G`mT7g-;IMs!#@w#pKH)>1S*}fno0^bC# z_}Fj^uP2a&5@>suO+Cg?+GjyDv9%b#J-y+nf3mWoFNTafiIm)3gALaJqoj@YrtUoU z4c91}xi+O5r($#;aiuux!AZ%LP>bHiLVEikXTq)_k8^);DTd{gci5{OgZ~l$K*s<_ z1M_7VhW4)a?_S~aMgj<~m)8NsB7ZP9^pC0@d4cpzQwn_hd&P7*80EQQdt(6i;|^lzT z`bfOOyKIwS>;ZjZ%5cMzji3(u@a z;P&z=O2TJp*?qgkVr#B3;z8I@pDMjr6XGhMDq`S>`zJm`>CF0!yWk@@X_(6 zt#Q*^G}KuUj^!Xld5L1!@R+Wy{cuY|36iZt9yxL@1rY4HE5>1l1}4tCXi5OczA*gb zSc(+bv;w{<#nGl!^(@S%^IPOooO#uc~jl z;JhBC=B5rxRTVzFe4rCU1mz7fN*#I4XjjtbsKM3k2=>7(laTe$L`NFkfrJlCEZgQp z(82ybG9r23mEQEfibVf6XEhFP6NB5RElG2=-y^TQv`w z-8Q<;5=Y2oF$^PHX60Y;t7;^cL{3k)QcYxqKPGEfk8d_j!(QgwbD>-(=6}6aG}RTn-{g+99Nlj zU{T6`%bEKnSy`ix@)eVQc?V*TfH5Fpe3EWwXmT7IA9&lxw)!-8nhH#7w&WQOlV!Ta z4<~MT`BjmO`WVb-I)D4??Myz`V~uvZW6GQE6ZCWJCN1IWuh^!(bc56pr6zs zc9U$k4U{HtdbjPDac2+JwkAsDsB|h{GHt;m2yfrDN(f?!tWI@>;p;m**!*PwFrC|>oei+TOwTk%dN&9 zwWWtAF^xV#{yWKh{vR6a6GI zZe6UcsYrt_;Kliq4~GZaPKicp#_*}?WY5(_zpsShto2?&239ks=GiCTj!KA zj*r^Ao1D>&oAd#xMc!DVLr{7}bbpF;KJTkR=|r+Wrw8sGk{-YdiVt8$ph{s*j)9+@ ze~Isxap`I`%7(>{&3TM&OF>w_Rrol-Jm45sxT8O`m&L0uw?9ouqra>17}=Y$!uKJ$ z;&h@tHeBXjhK%$rLK}B|TG;-iljwMN!w+8~3xb1Zo!^ zRGxvs1X{{iK*7Gy1SN3wN%ehZT_joarNXzHCeEctwIu2P-w<0W@(Sh2)zGkLQ>cm^a{ zEpM|);1$^Xux#;VxMS6QNtS&Az}->;!0}GQdEdtWskvrtHPR9U&?Y&PzO$rw1HpJm zLx={T=p$KcOZ#_zxGfU($D6>J6^T=l)eU8~hD-H>{T>R6&+%9jD4UO$8Q(*%THX4g zaA<$H0Q-A?Wk?h#Ur)H#vSGQe_Oh0t1Ibf#4YPrc2;tq}SKTTaqwlLVGXEx_a`TQp z!zRbmBoPtZAT?K`v3saUW{q9wur+giq0Jcw`|i^n-( zhJBn+({jXq+qiVY$i-R*70+ddyNZ8%%Uiff8`-$^1m~0NHkP6p+K#iIw5?pd=x%*4 z-IyN!QXD_5nf`~T4W-XXUeF({M}`2t3vyJnbp3MSx3MnA^-WfN+D}O+!lE+>#g_bS zH#o@A$$^wj#YlB|xNXa-z3*^&*04S~c02FmIM4k&@X7l0Y4K5qZFbS7V)3v=T?DGp zD;=Zn`qzE#`n@uM(5v~P_nGUcgSHKr8q%;83%nM2Z;ra23o-KP=Q$!>-!+~HBt?3Ya$J$9{$WPOQfDFrKge^FUsl*Csx)}k7z``N%} zjYMJ=ZB*)#6fj|#?q2gJ5|V8H-Mkrra{3X6yO9$Wly;nD|3%MK+Xy=%SLIOMPPS2g zntzF=2**S!z|&$AxjwMV_=(=b5L1z4aA=){lc?LsFopsY+|nSw=GOxa z@lDNC?)s4q!kkgu`z;$bqMi4~_sNst_)cx(5TlHf}JyNpPrl`OvKW=6Qk8O%0RoNBJQeE|k)#n&Dmr;ft3?8gna+b9~v; zQ_G1F1Yeb<991z?W+{DrLT|;|4Un2tUdN8_q0E4+$Nw~4x<)KvlFsg-dolU9v5r|4 zZ1|f(jLKw7A<^fFn~b6%kFwX*^s`@w*>)R>iac|7$5}%T`?PVx47bE<^?^+3?CRN z9{!4kg4WkR`Xu_?b2GL2Daj&CqE@wY?d%XtYLE=`+Zy*r-N<+OwvJ5Ymx1svgQ4{u zYMTnx_VFVHE8gd4WhOo;=I&F{82$zE4KhA4Ubgn@<_I&+L5oHHr>Xd|>s8-hlha+` z)ISX;Qtv0rPO4UC;aY!^ECDSq@pndnIsX~VeUUEjBxUF?g;?-ZyUm2)idJ;!ag5VD z-Vxn^V3l7;m61DVRC0aZ(Voggxp(9MJ#egwc93p#MU3%_-NF)R0v0_W`NH0jJ}A=A z#^)1E+7BP5Ul%y5O7J%3S<*!vw5Zk^ofszH(Cr|$P%MU=%v4fO-+@t`0GQjZZ#D*J z#1h1z%H({i@P;{%FPD{K9i-xhc^E^@z8VYyVQ!RNfy9skdy%BP{dWlt!kF? zh}BuEp+j>8+tmXq(PUyIkdeydZL{*v6^2fiPIrb<&P(qp=F>hZl?m7D>vgf%>Ki^`d znlnz=zp?zPyrE(=-!jmX@b>Ogxsg$C`G~-D_=RBN_8dwrABL2R+s?u84V!i~PGwFo z+7#|aq}U{!<(2c@@!MflW+l7tChwj!t38yu!D0>+D5tfvydgs z8ROBB7}sGv#CDl&WXYF~_I3lD+u1Fi;8Cqb?qNq|3rSQ=TNb1D^miRRHf(WtY~LmK zAPLS2L!gPf`gf#oV7d1++dy6Gx%Ej}b+|{~zp37Q{I=NmQU+2jOd;l^s8m&!=mC_~ zVKiA=^tADroDSBZf{E!eV7YsHs|$St!S}qaSH84q^H8ldrgOcpx$q2>s+J-8_$6u_lS+A2W2L3MJqg`Bzl(MBVI66+dn)%VS z8wfN0P-8@o0x}U@J)(YaZB^*7=klBFWirx(t>a9W@iCjEtu*&*y}~1EvCIS=kIK>I z32^%DjUSRt0O{%%Qtenrl1C!fJv67rF(@7=j)bZf01zJ(2}s=45PVZm6vb@*s|b87 zocVp#u(5pySA0LM0*Y##2YTI!WSD$|eaP=+BLD%;cKiQS)*I%~GVUuxcFGK4-go3~ z_8_2c3NpSjc zx+uavDj;@Z5||p5zo#cQ6YVEaT0k4sg&N!9{aMwk|Cw6FQ>kQ*A3Azq&owZ$-L8MC zasGULVU1B%@5qY;XQ5XYU6pq_hJ6wHn)o4Sh&&{t{U9YpGN@`G@V?;_-A- zPhVKwMD!lU`PAUD3$o#tpWW;92$kh}RO0=lu6Ev67DB2RnLFtt$8?rkAc=6=%6p?% zE`M<7pu0pv_Vuk&#hlT+BI8#4;!eOh_kk^$-l00X1=Ckt-zc`3xqI)dE;)_r78Hlk6jW>2`EOCarce#(xQ^n2%ZJap+ynBHbrKg|SMwmp)!XEw8BcPT*1B0aEoQyzkWqzQlM(r|0AD+CO5G6o)~Ser1d_AiG{XUcbH9m@bG&}?4j zW%14BejAN1P2UpaN&=^_ZG>Y9AUtd8`6^1<6W&}m1DJctZk=p|LQ(l|@E$DS5`1ct z%^fN`E7#h07w3mbBnv6SGO#26IHmrueAfDNu8O8^`fr%}?RV!Lz1>$ktTmuW`}R>0 zil%7QRZ`HbU45zV<|RzV)KjpFTE{bHc6!MTtFYqGn2@&-J~9s-wxWW>=jsu~nQiqx zS<&tPO6qqkn0K%zZ4vfvBHE#sUSlPYEe+djJI4pZ#p%IGB0pfVE~jE$=8(>Q z|3s|z8E;o*33&Ea7}fyp*D>W=t3s21@~!YfE0S z-+%Roj03NiwRWrOU1@0;k*;9TORMg`Zq;)z1GEM63Jo!+CsxB_V9#*`gMXsbAqUl` z+nVh2q;+obfYMy)Fg)yue;b15Wwq=!@FVBy*!E6kc7hVtXt^q40&IG8|8+Jz$g8Zn z(?n^W#C2op<;AXeomu&-pj3V}>-VdJ@XwR}D|QRYNn_r{LYv;usdcx(gO+@la%q^v zp&?Et?pmpOM_i&W9T{v;Y61%7v?bk+KYJ`zgMxRY)$YT|2K|HxTrnxoTH6z>?j!H; zsDWx*F{br1>-OHSi!@Kc**5)AZ?BR}G>ht*z^P%(j0h;-&EN7!{j#zcb`i}t$2dP1 z7ZAiXE3?|!L=;3G_DI+!U$b(>DQ2@#Gvp6p%Af`x1SWnGd_$c>w|Md2iBo+{dw7xJ zd8@0@s7|Z&K*HXu^nrn)0}74d1ou9xb8mp>Cm$fR8Akn5=nvh8X)wX-`;-Yo6`5+= z6y_@y#UY5;8w&81b!d7^!l+mQbQCox>sr%lXm!4VT%WAWE#(QeETd}wfx&S6-@jLy zE?blx{1cVHEJ=`2F^%yS$Gs(g=vp;>K>Qtr5fOop;v?1=O$G*rR%!WW+T#S<;yMcpuXIC5HbhUJMILFZ;K>LwA*?`+-Uu5 z$Xa~uplvP5haE|GuxgqMI=WdOmZ$ot@0&vAQl;FwZYu!fo0;$}K;3)saQY5v9rz-6 z;9aE-t*)(nJ97}xrfd;E+Gz`Q#B|J6-kkqfuWI3a+srdy@|**kp>GPqxvJjwu!Ny7zrwqEsS^@bNjEMy(g{4n(_`gfD0b2U4v~?OaAxLii*tg z1u;8e>6u{oWl=G;kgqV@vZ8hnVC@+)syk9Cx-CSu% z@g5^i**jzr`d-}*hMYFgacwtvxHxuFMp|2b)@3UE(aSLP)px~-_sg!?**f?HhcIHc zHpRjY6c&e7KP=~lqqhn)JX$yum>o{p*JQ6o?b1!P%Cg5yUJZ8_cu~F~$8yqlNBmo& z8=4W{zhmGQ6{Ja6ug8CGc0z~k-H<#d)AVmhd!R0?q;6(}g;ZSIRg%n;&6e({rg6=V zxQtHpem>|h9@ZZzbUn=XEhoYlV6xNHv3^GG2_o2O)77vB z{Al$nP;l_&^~p@e`8PZ|bqCZ)Jk?VA!@U*WL4J7pLyjdeE(v(it1-;g^cqr-`V?7s zgFk_pnZD~B+W9AnkOGYA`93^ z?qxSstyyUG!6Xdoxzh@J;qA^bYfHQG1KjKZ0Nodao{fQ6T|JR`xnt6*$(sT_ZOHfD zyZu_zT=hnc8z{kKsEA)>494X8h60OB%FIL|t%YQdWF- zB1Fp2=4!qL#y&%A`%&whYSvr=)9{CW%_p9i;hO$u{!^ZGJ%9eG$}v^*XLXf3(PTNo?oLRIt8Cw}>4T~B zUH_e+at>rv09V4Z-i2HvoELi{Zb`$ne@xJUJ{rH%rw{@J{G#Vd-og~2? zdjUwD$Q&{Ik7c@NM&%}?r-@bTzeM@$%4PSG?x1-tadZlqRqIp+F@RAWlLG2pHCg&P z{&DzwkX#`ih*F!?7%s&q?8@ej?5zUjew^c{+34CE8!udt0ruB`7i^~nriqXFSX;H? zh{%5@3XE=f4Rg%s&NJq&E8SrGYrZ3p=zHGM&%;`{`W+4j2jPrylgW|1)aBmJP5Ip^ z-J3@}4lLr<>Vc^EqEJDqS%zOqsI>A=hsYvhc4gzLS+UJ%y7o_2IC&qiKy8;26Cq2X zhkg>RicQAISOC-Mx{_#`w%ugkvr4$s(cso&oj%oUiaQtjyI`nP5hrDkBhm8(3p8n+ z^R;oA#46DEJmq3jTg~wFUxb`$OES{X(0+N!sM)}^Bf^0c`K1QHF`8PLFD<^xgp|UG z2UNcgIU4zfqiun%z6*JcgDMdm^a05&EPNH6Ag`{vnrtMP{otU-A!-ZZO;F(xDZ=-1 z>_1hNbR+@L;IgGA-|Lw#PKHED1JvCee|vFtsik-6X-30ba{!(=&+c@UGy&bP4&Bh1 z&0P#o--Mo)co`)@Mv6;xJ@ob(=}R**dHDzcEitZxeuu1EPH$o`t=}g?m3dSzla7NM zdDC?Q3w>oSFM}sJRXjhfLpLXXvn|D!e}^I@%n9 zij43HIj9#rIu8TW)^!s|zm)7uai$`BBGoGQnb8jBcfS0-vL(+sOG#N4lZ?nYb`F*S zmz+x$DD~ca_*Qs*fkxP#^#`KP@GhHGk%PL6f}vm$vtStTQ<Bh_)3->5qp7GjrwQ#(+CN(Hm|FH)$V{H&NW(ia7zL6oDd~X;Vc2|9wpKPjM~t@BTbNx-49=I5mwHL^pf}vDG2bIbj%y$J zJpOmyOpxDueVscKwGcZ}wCQ$oQcQwq)YZNo0ye(%!#Ws27kzOoQCGN+Wt}{KKkHp3 z0Q|U29CUKh>AAV^KM?fO45M}$makpB@D;L#fiXKMgLK*a_J#Tuv&dx^M@B%k|I!3b zC^>>p>ctORIEy>ITzEz;;+=5qsEGu&g<^bNzqglCrKUWI#81(xF)_uokkGoqyiU(k z|D9N9p@12FhNanlJ;T6HZo>gkB+I_u~aS<%p_$mo4fee*Cw2k z{97t53&cQDTF8h%Qfj8HSwQ6Xp_v9!^k3$hpo%wOyZFxIT%$Af7 zFb}l?PX8Gj6XogClU>g7g}%E=nszuLT|{S2)KvJh+6JeMfLe=G67v5eRC_9D34*tH z0m3cbit(B`k@Vh`V0BzzIy}(P(qnF|pKv4V1u?*_Rnq>*+{5%%YG-!c)me<&MO|Eu zh?Ee*#{~>yv7Mce3T(+dNUX8I{4}Fl*=_~brWKKyCgr_;yYwdk5sRXuKCHCO%U-BX z)}J^V1pg(j0#Ha9wgM@9YJq#cC?_chG%aHYps{0PrZe*IW6p&z+RoP>F}S}1>V2n< zn-*hs4b?|S3oU}`8tp4yMu|Jn>kTk4_gZN4_1xu|71vN>-M}c9g9z=y2MVt7LLvX1 z7|8?_4ok{==SBvjQc>rdkdefL2=_2koU3y@&7Nb_r1X~Dy33h`8tu_eiWkvUKCc0Y zPal|?!TG=lkpyGQ0DD^n%W{4H$0->K{Y8YCu39H}#ZJ%!(K*)tY_vCFVPF|2a{}2) zabI3$kteplYX=3{hlrP|efj|c{odtHX9 zJFxV9sqQMbq6|57A+p_Jc&Uo&B2? zA-(3;Ru&~TJlqb7xHM#EQsA%g;oGPYRrw2tMC9B$3y|bamEu2|jlRx53%(A$*`Eo) z>-;`|SE~O&kOfm@v_e?_F4e)58$w#SOi+Y7(OP@F@gnWf6>T`Ug+ef$ZlId-vDdau zEI_{3-ptt~70$s4dTgbW4zTu8-1dx{f!>=bS)D5tQuSv-NH)85s?nPh`7Ni;8u$yXH$EhP+h;&+BLCecBLcKMcxA{y-s z4BQghoeX^CfWY_TKcxous$GOKTDd(xBMYm1KfNCqSmZC?Jh<+SUT>Q3VbaeRPihOE zFb++$iaDn_tAz5fuCc4f)BsO8bSE`sF*ylDAd;sW-m*uwIP=dQoNOGF$bH!p^13*klUrk{rnDyTFojNQepCT(S3a?sl)uivK^1b-r0qV z?2BfRA1x!g-#2eptvc$BtmKcFfKDF!8ds6IyBrO|p0dN2)0=grD0UyP>*c;c19#CO z2e-Y}ZF;Ny!IUp@CL)OhCmPC=HLD`)9@Gp}6>`gW5C#nQ`uzgH;+C!Vd>P>_H(d)< zO!b0zFvQ7}f>1$qVOubL6F`TJres?{60ev`7aJGhq1(a%vfqX>A`~rqE{HOYbUeJi zm#K{^-u%F59U|XBSLJ8+YZ|%^L0d;|_!FIU40%+ug!_f;?8&-)yu&k_CueiU;e(kr=$0cF~4+Dfk+Af+Q3tQ{|kD~LA zOKSiBc(-r6ZsjU7%~ElXoVmS~BXA`MD6Ue_(8L^Q?%U3tAg&yxB7vegkkr)MFb9rg zXo+MgYG7E-a%8{r``?ET#p5}j^M1cx&u8f1(b*Ghk3v0$hbj{c-k`20m1~qYfJfjG z39|rb)|M4jhPcrzo@5co;$P#RP%YVw4-~+smK7gLsHXMI^SgT9X6xu;ws(zEI5U+O zL5*rTkv>UqWOf0{RCeq5o<|7H3e?Kie!TX}fqlaMm!-}KvJW#NJB_3vtVSAim+9vT z``y^Nbi7;1Ead}c-&J$|6&r1Z;y*he+~gEAHY8hEi?GiN?G&7|tD4cjdf+bke+~X- zb#!O2TYyi0rkv(SnmC?tD%+e1U)7sH0Ect=%y;IO)YY5=R((QKnXXSukIx?c?+J7) zQgB5(rUKJ9XReBKxzIAk^SfA`ib(5ULX&Q|6#ccR)P=~7iI4G0a_&y|s><5^0Xl*A zaI6|;J&XZG^=qeUE!5@Y5}Jax>;@hnjipY&FNqA44e*_%kS6dPDEL>r_41a{&BjGA zRmM=p^k-!NH|C}|g!`%4-J&cYcPT#An1un%;6{7n*n{b#3l6S|-92}V3K%1TmYNkY zr&!Nb!ME~9*^qtuW;(|u^Iqw`FZ?hi*?<$^+j}Xe)d7-jXiOEUzb_+arV6UvLSr(S zvH3OYW+nak{uVog{jLOrQ}aBbtIzJkn(iZ2PMRY!GNOGoR#bg2+CM1s5jb5_Xv+6? z`>u>@!zeahwEW zR28NIU(JibbiN*3j%Z&YS5*C_F?b_w`t0Q}{HuU=uw?1*)Ks6kbwj2lK?^krjBP!O zwcv;P4YjhQ6Yi5VMJe9o>?3*ua{#q$x6VVq`UoC%jM=wpx|F*2%NEP(Ka7fwZ{Ewq zaGJ|z;(?NWqo~P8?rJGX8gGlq;VovsL7;ie*6!*Xk>7w?{7SlJ+tG~c09dYyG0Y*y ztfN|Y-(f}4NNr_iPTs~QIXPNyNG=%;Y7a@M(JmH!DOJa%4Tmheh6Eog$d&L~S8SqD z!O>}px~(ZLQRgO<;&AL$QPJu-ThsYjRaHVEnC(}(SUTaovn)s_<; zeE+iV=_yalyP7E=O0PlXPP|$2kud-4xD{5wq_Jl$nkUQO5}R6;V_#Upo-Uk#38$|H z)!HpTT(IfEwFgHV9Hb}8&!k!qk0aukJ#+?0!jmdRj@V-Nm8s-IP0Z9v+fXoV66+q*295I?w!JUG-NaUC2cN` z`=DCGqLbiV2&?d!YW>j{Nu&BI{&`U!79QzM#@>rxt>>L+GCtIR&dG=N8^{!Xto#{f zstrF4P>yEm6~KWRda8nE`sj%NqNk+1a$z!TB z@;3n#!A#LX8{+?(>zU5hRUta}`r}g&1_0AR7STYJY{Ue6uKp(N{o-rVmkE{!e>4sX zP{zl!=GBy(v`#lq-J}MV{BIj!##vbq&>$%W9}iJ0q%=Q2>~ZC{uX^WGjnn#>zV@}X zwQiK6szpje+f=%dTwV{9e z-xkO-_AKO)eHb#K{~r^AiM&skl9usB&azQ*0sY2gccnJ$9DXUm{rCahvfS+bDu%LX zpX-AiQ8D+oErK)eH0K#$Wf-Zm>=}7#Ee*~yHr6p8m@6HKO4m1E-1GjjRm!B2 z(>Qe)PQxioOJ4-ZFc$*^b=gUeb1fTNQcV^Xn7DGvb_+!xYmj$el(eH%QJ z+ZPp*TCVpG-<6+=D=?YF+Q_eax+q<^C9$CD0SP@eZ!wLy&_)j za|d055WPm7>qg5rEhA3BHmn}7qN2oMS`X<1I6K!ExJxl6nYDi1>$#sCDBUXisULn` zqv+xfk-_?-%9T0-J%z4^=tLClwEm$2B-;_al8uhG3u85^2b{REI(T}-S~GRVU_C4n z*x@TH*@E-bdLIT2%B0(mG+d1ZT2^)+!nFC@pr%Z)TTfgZz-jFZAt$4C@r6sBD9e>J z2l%N;H_C-JxKvK@%zt-#vHb14}BWCma~inF(N^CAHu?5B2*& zew~`X$HPx@g~q9O#4&Tb#;IX{+ji;cnpbPP0CE8)3!oZiY%&Z^xzEC8okkAt@4r14 znK#TnE$8mRk*rp}mNicls;+}P)MIz`5Pl|Cx z<4cn&%`DzvlN!#F(f8SS@6x2Fu>1n>turJIzpBUA%J7?7!lL!-Z{icsTzW@ab(7|A z%@dq0k@oyNH9dr zr~Mg#z|XQ49vp3WHcgehnCJoo4$rBxhe6tBaaJVrdKqCe$6*oDRD5wjqxq(h3O($A zsOCcTVHSo!8=dASnmd47UVKF#O)wC}z1(HG`(QkC?*g~S&P zmtHZG&9+a5Z6r2-ltHER0-)`1%_L zLo$!0cke>}OpPrNWib9*`YKIx2xsC`=$AS3+5^;Z?VADbWMfF%Aa=QqzINf#^j9y*637S0on}<57v5}@ob>uB3lCfQN-dW42zuf2 z6pv2Bf#P&b@8_>R$e zCo3Dx_K9OmD_SR7_Rl;gQl-pnCqud7n6gVVQFGaP3iRa7@P*OfDzy34Emi$Z=}ml- zT|?uGumAlJ=Y%d;ejN~!*B=;kf0dJ~{O99jWL*Kp81f?1_HU;w-N!W{sAM1Ha;Wh| zz1mnEn8lYV?+Vn_JkP5(E|bL&*<{Q7Pw6`jPAPoo2^Ykl5-9>{RN}3?_KOJUYvo_ zCrfYI927f8A)}*eOR`bp`9qqyR*3{#pA=F}${=l(MIwHAxe#=BO05SLrWX-_?Jv>x zP5J_O08tgaa3)ic|v2&y^1;58mwz^bfzau9o$j0v$`!O zCEE1qP_w}s^AmPQqp$mWuzqmk0*XIUAFkU(hxs?@7gOgfDd7WlUt)t#CBZUt3-&8{ zW8jMm65gx^%6V8^`HN3n%Je;y`E{}JM$~@InACG8>Rn$#N8@IUCF(Qmt`LRl3~d#u z1}9!c^r(yoK9m#n%Mu+xkze)kv!-alvG0Qhcfiy2E9>!zjE^RBRJ}YI(xqv7hsOD; zstFH={ZcFR;7mVP;%g18v<^ElMc~f2edRo)UhC@FhvA1u_&RVER zd!95ev`ux`UUBykUsy;sFYf7Ux&H$j?C!{m*MfFjh4Vm&|47v68dYl0TQFMlJ z$Ttw(?4c!}M(?D{d23~QR!FwU&~TqnQ;1up?w7-kk{)I|OFW7_|8%y?_$CP8kU5Z_ z7DK_Wckg1*-NZkDk`pI{3_9YR<>^>OHHBXVE-I$Xz2x1IbMuAtsUgvIE>>b-FR zzvllIZs)yxb58mQ%c!=f{hE70)z>=mc(rY$Zuh0ggvi~kpiof=`SLNFZ?#1pEz45W zmg3;lz&|@)D_htFs94+P{qKbbC?LE8Rn`R+nNOZAEy6%Q60v@@Nim&V5c^BlCfMl$ zxXJ1hRVVuE^8wcFLq?G=!lBYV9N()g_F4lDDdQKB7T#0SJq8d@!(6%t0EQaB;7KM8RNOzZer-A@YT{fx zcCLISOLN&`{PRYdW>2;NX{<)mQoo**C|qm+lpm+aBf8nR7pa9305+;$)rN2#s1hOU zSpL3A&25@zKRIA96&J>i=@S^&tz*X!i|_k-mmWdj4Cyp}=x(aJ-t_%Iew9a8Z?k(#H3Jpo zGJ{k*Bg7{}zROA7(Qm*B%nGZttPozBn<;cnC8@Fa&GXfa^2Mt&tYwQ#DZH|Cdt*D` zpl_R78W^id!n^bw4f3k(tZndms(tP^YVcYa&803;E&hNDI)4gw(q!58naHB)lcxw} zzmt|vy;j^`d|XF<=1aGtb9nmKpFYlhrVcc=QPlN|yLjI6%N2-}}e!WB{Hx5hIUPa%O{?wtCLTtryta4Z)?0X+T%~NFToMo21lwDlK zK2aWr&8w5}88YsSBPN^|Z8Bvq{qIBgRkb7Y2-7p4EmCaVIeUMGoLHOh57t& zDoRw$jyGIB`_JE8DqKfqdV346JDSH8R)?HjCqt2+Uyd+Rip512DgPK*+$HcpXVgZ? zILnU&afV{~Tc|1{9I?PbW8zqmC(=2qs)F!)s@rr(8>DHd@%h&4bhV$JH8t2hmx)wq zsD6bK3!mTUva>WYzgkOeye53I-PI_3_o?%(c7;gXG!1<%z9eujGd%1UoneW}rC56U zrL=0H1n;N->&?9{ln6C9{If$v4Z0WvNa5PZ6fJ^9q(nDOwWW8PkBzdV3I5F!cU-yN$r-f6`FKC`w+hkbK zweH0_8Pr?(zB#fqMUq2F>dEDL3xkA#Xs#e-X6SRZ6%Nw(c>h>Hh{E@TBR`?{GL**B1_vfM-X z)n$a;4~m?K*3Y%8U2i!kb+yBbN3>3KMosL`ieZN5J3mgyp*UP?b*C5Ymo6IfEZ@kZ zZ!~KV1KvhqmyHXBPWabVH4C-@2&$=t{i2$zlFvTc<8p^LM!$3^oz=#e2`jVk)xo; zxn#)e`J1749{au_rQt8HGve!^p0_lNv|Nvl9~1C9fh#N%m*yC~u9fXKZLCHmlWoC%B^)6{_|)m4B^y|ahF zHzP1N+$EgQ!spgVsmlo=obduaZRV-#s~dYpys`ugU_sR+T=!kF4bayGL`KqSQa+7+&6jn8+UU3S*;xW8Y0ezUoWO&cvPNPe)m-=bVqhH$&$$%o1XK&P=ytmQQI z4BmDzKGB>QS_2FE-InF*s%Na9Smwa`@?YN7x0R7bwa`YL1;JJuE&)+eRroF`oA&PV z-qxcuI8c*xzW&g{@ra)Id}dzf{)Fa-)W!AsAJ~W`Bl{BjRZloy!VRm6rr0mjNG-}+ z7jo_%wl4151;n-dYTCxtTiNjb*4FZzR;KnqhE$aAUuj2n+_yIiweV}H`>u4c%d%*L zz15Zq@l7)BJ}@nFv8^p7sj(D6#a)%NaR6eN6XeQW4#(4(?Vcq-2UHUineF~I#T&g{ zDH%mJR7cT_?aS6Z);i9lW2_zR43jl_U?NyKMjf8iuc~iJ+w|C`UwCZvlpu}rGv)`? zX@6}Aa9x_XS?Cv)VMEpzBp4`DEJ>3WY~`N@X)fgM#vxFCsrEzZuCMnce1yawy3iNP z1$O)6PvZxX_1|RUf^SMI=Y;}vqSB8`$5Mtu!#>WZM7ZmQ==RR+G;^UyyBSxwLp-l{PS=R0J!f7fX+Ak%H!$vM-VLw~ z1(gofNn>E?zVX~>iiHI2l|q3os+hHE^LXm1LGEX>W5NnBZV6~PpM>95 zF>Nt1TRM*m%Rj;tEv$!j>v|J%rRg4Yphb$k*mYngo?MdDUMc-BMwMhjIO~mI(DSLL zS_N-}G>Z*K{L#?S)E1s4c8q0wG4;?40pm<8tRj81KZ%K!Q<5ywjw1VTME5HVyMs{wE*cM!5m7@|! zzdrenX~k^S7sF1Z1YBY-b-bg9Hex=3-MdG_bgow*=|=7k z8=h0vPYBQHq_7VB+AcMtK-{PCRBr7y=5#K+Wgg&()Rpi|X5*RNdvsUtWb7TN=(}Wc zIe-H83l7sSOHG3-&7*GwYn-wHQm>)aWyu@wp{|*)9;I9fTgsou%B*RaK`L9` z!&ApOhbXK?#6>OH2UL9ldf9T^<>tRFCi(oCpbfku=4wWk-pf>W{eVg0+qY7KtXa5mMBiFdPlvSKwPFkxAo)!WbCZspgs19$@K&bM5a`Wf8=WXnoH zMAe!oOUQv;ty9sbHTygM*}(?PX)Vc_uwOw&+-|{CcULHH&aTNQP%((T^{h)%^0Y}S zfr+z>APC>$4h2HXrQ}-33jpl=8s_l? zJRCn*p--R|KgUS zFnJfiKzL+;k-i9I`ztLCmiK1xxeN66Ffz;i&D$RZ%bsbmAF-kC8=LY^^_(*|puoU4 zeWIfclJ{rFXWbt2yce<$PD>CzNFd}8hlS6HcDg}SHguIhod!vRBvO+PY!HI0K>cg7u|Kx0Cq4keAX*BHQhEj{VH z31k7&ebtuwRTFKby&5`9K;dGvK>sbK$rw1}6T@a`=w{liBKnKvtxNDkNWemyU+S5X zPAqnbUH6pkwjfd9oEV+BS4oWB-W5CVV`Nybn7}29x(WnOKC0$ERyPZ+O|-fm&UCM$ z0VDR?{}9$TzzaXayotGsgh8kE`=6}n6^&Jc=Zc65J%*>De);?e-ZLa^OPFtK+z1>9 z8&H2c?;A7&`NU&0rrGUxmfGEBRVIzn?o;yA^Eu#me1ZSS3~iW*xz$y0`$CmzBQuItU&Em#-Cp+! z%Y-+R7MW3_-#$jiU${}K1hHXU;tR+$Y;@5{J1rukNf?)01Ewbw<|7Up#lC}}eyW6% z2mY-Ne0@5mC#VFJqR!;p;-Q@90uwJ)DFIZoJ=u zHq@L}qIOiPW30{GdJw^Ep@F}u1d4v*FL(dluPWW2ClQQA?wodsVK>3ujnZ7B%>ztY zFDX=?H<2%TtIn@A3EF{Xm_FGkvputcrZ3cn0EScoPkQdKMLGPRpkRIlUI&+7<($*~ zSS`49ACVli?r?lY1We0?#ng&zR$ahQ)3YffDniiz)Cb6Ti^}qjnQx8mmP2P$^{-85LmYRo}S$za>j3{%F z4T?Z?{C*2~tQkQ$`);2m=!}Q^AwpD5f)=x~M)@r*oXip1tGkZxj$Nx}=nPOT;ahj@ zjlCpHhUjmC-5bm;l<6bl{PqGOfPgPTqnJ6KqTr z)gF7Hs{34PZyM&La_DH^A!-9Cc;p#@`TI!lcRjPJbvL)8E?(l|I)LO7>NEWWp#}ij zxAWqB7C0f`(g#P#$9{yhyb0w5hlpAY`)|y!2G*UOGCjs0e@JEe*z$^$HBt~sCil>f z%I1|i1!keit~Q5VO_r73JIOEcp&g?DxhM=E?Z*ohJNg?VRZAe(TKGTp&T;dbOG@IM zEasBB3Zm*fmVEm1aqR#cq($J^x~WA!n47a@>7B!p%gvMG%OyCChi%iLQQFucM~yow zBgG$eBoa=UpX1?F`))2&_SX08k>5QFqhtLX*S<%MTsA&PBZ{;$dlKt!E709|EDCvW zn{vQtI_rNve+3qBDoQp`(Ghpn+qI?yLJu>HH2azzL$FsUJ56J<&t< zK>M7P;320RwpLWrN?j0W=K?|`TYnE6R5awsVMOuTP__LgP4hC1b$lWS7gBwme9iqe# zl-iThe_~VY?-dRhH)0;~;+34q&@_@xZLEW(1dQZU#d%t##)*Up*F?PK>Z6S_ZZAwU zv=Vb7EmzgcJXd~SduQvr_##_(Rb=w5vm8M$s;~Gp+d=kT+3(L8(03)Th-#Ouv))!fPNtOBAZZ08Yn^8C!Dub6d2+OdP_^}^3`RU! z`MD!Rd*aStdk|9(^~fMtfPh@#ZbcjDQ+)M5#mi2#L}YjlfSCT4;-?*Ea_mIBwKRfA zZtdSpt{(SZgLN4I5aBpwjhWBLGR@a`ElYAh1jUv5JA+jkvwk94)kyt+dREuRRzy{d zOPFZmf*b?g{Fn=7c^cDXS~;nvz(1Z=PDT@*_iBDs$+xbmK~Zsw{-}b1nfbwZgMj8b z3%#=-`FJCzUy$n#q%ODelDW|mJK~=mU7wI_r=<@e1`({g4&zIUw)7b8K&!>_QbJ#5 zQmtTX1}@(eqa=l&iFjL`HErx8o*$Zl!J;ly%ti zeQ*ezI1Zt*hXVk19%BGU;Sr-@|Eb0P8|T9j^O6YOGw`L0$BP_@IuAe8yN!3~;(;Qv zPyH99#Yj?}&*-e`?7;n#aY|i`*wPR;ROOHFzL-?j;-V_Z1TAN3^u5s*zM9y~7#`C? z&hY#TtjlMff0pWQ1~uokH%FIH{9cxE>wZ;CJ|3t@x44u+CQe_S-iYb2NiNHoidk|` zr`kPOek3%nS?aav)-hnhXC(TmeXH}3GVHY!&|~#OZ%&jkrq28M3u?f=8?Iu{vPIu& z&3c1!kT&M=+C{BnDn{&n>evHei= zsOKO-5Y7jp&-uYFnpLFkw5$^q2%5wWmtV7L7HwO1+uFri(fHI6gd+qT5MB@~(4F`1 zKXbhRs;XAT*Yknyv48e=c^14y6jKgC1f!?!j|~u#9a~UX2-Sx)pz`Gxf}H@i3_or! z?9D9*f+occa^(jc5tVMZseu1?*@7Xzj8na(0xRmNs08M z@zL>opetz|6<>K1p9P0h9GswT#FVP1JRm&kzp~3E*U4?RYjR19AHnulYDepwmD&{# zq5bYwc9TFlNM%K&YTf@GiJ{^VQ%4Fo+ocHgkWnz3*75Jatd9aMa||~_b7|EfYZ+M# zCb6jv{-ua`jp7QA?dT(9&eA2*RNyUKJ1QK#U}w+{{ir&V+ycB;ks$64w?m{4S0#hx zYTExlW1ml&k$g+rSE=^HSs_pT^6?f4D*}*+SjKF7wU)cEq{^Qi`Ljc+hFRTKpR*PX zmjMmSQuj9C=!q$%SDQ9>-L&^E4bo2Nu4<~Peenp%zT8oHDx|4rpg7Z6x+i{HFv z&@fSW#XK*@sUdvKEyi#6xwq!5V2B4C(YT&GjxEx|R#kqB0uaX*>A^3v^1I(#7~PRt{?e`2`8YK;ecRcH{q9y5tM`E zi6htzH$w8Yy3tctqHg3pES$^pl=uvD>PiZ9oM!>ZW}yjRNvck%M^1N?r~`(9y`m>ikX$}KbpHV0=Cb+ z`0kSc6w}u_lnx%IJdk}T2_-`l!+=%H$jV^@!wlQVV-SJ~2{14&qBuDBs0r#91i`;8 z>}_QLPf%NQ>Uudruv;yfs^ZsTy7m+MGI;5px?h*z#1FxxnE}rA-l3P%H*>co zZpk(3r0+W2rN`A;LE#N@ym{`m9&f^w-vzhv%D@7k{uc^c4pV>Mpb_qlqg?Rx-XE#% z0C)Xw-3PjMuAk<@e53u`4fi)S0?b~w$q@Q1ZDaO?=;!s?nF(m^G0QM0w)*Pp1geAX zb4VHc-fKJLri4Aj5XV2g5!G-Qu0Rm*oB}tHksNpMNE5Wv!azm!%og{$OHG1RM+ypC z)t1ou346cG)VvBLOVnYCQKc>vc*PcF_di+WO9sf?QQ+Q8s&=$L8bkm;GOL3)iCW62 z;F3+PFTL;L4~<@X_ZF8GhYg{ugu9+B#+v_E7lbrprf;U!_$jbM2hw(F_JKm%;npej z$XI}KDZ2|O{t1X_nl|02-pBFR2F9B9X3JM=rLosO^*Ci!IK8{i$d!&IKWWu(`g~WP zj^xs*yi1kx)}woJU1d8Z=WGdYj&9YsU6?i~fqGR;@Y*FN;tvVv(ffO)lam>?lPs{V&p8_3+Fj7j@=2xK?A6Tr$0kYvc?atql zbsIc-{R}gaZ49S{5o6k_Lde-^{l98GR``Ne?7Z(b#Q7L>T=&@rgqeVPDWIpUAs4oi z$@tKIT(ZEMDKg%I>JmUupwg+sNN%$()<2(96y$1iNb1>3Qht7DXk%6XJ6urj)V~iw z<+BPC9u2^RlS<8XVTz#aPR^Z*`9gd%DkA6PyiCdhAa{P&{k< zK*N#$I>IeZS(&<<%wXpNXba{DH$~$fUBX`e$~?Y4cd6@FME={keBTnPO1f&)@Uyis zFy}+3OWQz-ymiw|LIqyNYM)s>MZy)=SeOiTlsXMPdNH+%&QKtQHIID+Z~}wVIO?3; zVg=5#YR25${PrU)*i$M(y-Ps#+!BopZ^XYWJnFg^8t*pvc1_#~)XOU4wNoHZe#Oao z;&talZGnbLeR1)6&?pjAc}21jMt412b*H~mAmjN~ZQ9_dOx+;G1-gDf0|O5P_Jl^2 zXKkhIa~ope%uw4(D(tecB|I+BIX#M7}*7K5Qj=*pr6!6){Qr>TEa5?ucS`{qIu$Ej1UY^?0 zx%JuK5l>BF-V`#YTz?U15>goHdq?MZ1l%1S5kg>Q7tn8t%G3H2y207;$4ov4Ck-I9 z?^fl8aU`3~0u7C=Q_I617osK{f6Tt>Zw}_3T-#`m;VkEE{7jp-xl)EWq^#bWB=XlE zujg0xQ2U9a8g+APS{Z%Z35K`kj1SnU2UeCs7Wq;-XAWOvt>4L;V(~JRTfMy&*lro? z3|3XbZ_vAJ5UDmKX9(BPVJO`bKGZEYk*woOG&Sjn$!;7@d!83xSr}yh z(7_a)%iCNY#Q9Ftc%+{^i&P47ZW&SV>G3nQZH;f>^h7(@%1j0IM&kb_*udctRrPK! zx=xC|rr>I}>sW>V1obpLBuBwpKBnro(Mqf-{*=hb*2I4cLFdM)e9X*NoAISFJ2dR} zacT8Ygkbt-SV*a{1vJTHpz1+^Gdr_4e+=XEw^E29#r;bn9vby2NYTPus$0#|er5mj zPX&ODyF?t^q_gdAC9gmUn~-Gy>207Zn`eUsh0jF!$xRq56v_ z{2;kSHI{Xf@!1(v%(m}rHRjXO%OeGO!}>0S-vO@p5ZT&|H0KT#y*5_+CLTssi-bJ5 zbdn?xGJs`E<-p^f^+ymtm1C1i&_;x-*{5z*i4~xFf0QWIKV3M5yP3`aZz%u(ll{K) zYiG=i0%OeCQ#7=7(YTO(AzZQfrGbFa*Ji8^>A*+e=FbDxFXkl`6rBa zj|F-RP~A{fkJY!EIY5()CPGc$1JK;Y;vOC6w_p1es-H(;_zE6XuMgBX`|`^2%$&B~ zz69;DyaSs8Hn3T)SYx&2Gr+5@hYTaDhR?6=zuWq5EuHc5Ti8mmXnliXe&P_gG1dcpGrEzo&9aXT8oE3_R+QA`?Ox8;D}6h5>2li2jX|Q&+0g z>gc>oN>AjNDZcMz(JDUR!15L@f>3#B+96;W=a5`gKFe`;)u4LMUa)&p1yLhh3vULd zl5H3u4w!U{xm6L~GD&f4bZOlV&qNhRz;kt%_QRJmZj_}k$Xqk9ztbH}C ztOHKi;!qD&Xz+~cg!$s%ZQ!*vIVanH24&BKRp45Bh-*b7pM3s3JO4f%46;S=bgI*z z6V4O*2pursxc@U1S8a3*Vbq7kP*pWxyfHB2U=eVtK6H5t9vJr~`b-4>||6VkGqxXo7ve7_6_`A@|e zw7U~rIMHs^*IKp1px5KVba?Sa=Wpcvt8IEFpYdTnqJ~Ew3BC0I^wBANu9;4~HWFPa z4YvKIrq&O4^~)}RHV^7Vnd{4cn<1T8tlQ6)0o-HE(EsJfgh?5bIX!10Ee3#1_YzkWM(-a5psyc#` zd_t*r)VSa;XSj2-?ZX8)fmy+vrLiJQDV3!;KnXQqflHGu!{)dynR(zWy-z%*gyYgG zavmm=-g-dg>7L^>e*wlr%PpbgT0@1WnX4Rsn{Lo(x3>5APU1`IVr4-K6Xdx8z)v1K z57G3sP77Klzw;>Us;y^nVJl)kb=0&)wMoU<#k7BD6$tT9;;nIK_wD9DStlzv_&WXVv^jxxe{c?lqoeL) zuAl6>yt(Zgg@uOL77AP?rldr5YM)WJdsF~U0vl8B?^2P`wvvUi&=9LcK#~dZ5gJuq zY6n-EVBL?9U!!XFhfTNw5&z-p&U@gJZsxLGzY|ALEy^u0E`6*BW2`@v^Ph~!enIQV zt#~x_4SnlaHaEY(lUHU{<{hTH>%k@-xSngS8><@`$X&`7PKC9L#-7@!cQ`r_&ze5~ zgJo>GPo_vTz}v1`P6V>`E=DH*#m`VXrx7g%P3HG2z&FRZ z_(2pN{Qbd?saZqK#=t&)tE4f#6V9WIC*NtBNvcTl)y$)5S$55+h9P@GC8(5CVL4=s zRkkIL<*Yh7EkBEVj;qLjZv@lC#DcB&rwchFO2OG$l zokkx-!LO18-D>+DP;xAuIds2VQIqwg_DE7`Nd6ILz^4ug5KPC=@Wg zUJ`<(OfyMLJj2pZ`p=G?=~iyJPc^z=A_(iNEi(O=%(9_O&^mma0O0TZtlPygQ=7;mYYpjUPoqnM+zh zf%)?Z_>rkqp2oJ3FaNQ0RLQ>GZ5{L8oDw8mkJYh6hb*h?`=80fcM1U31;8!fC>3-5 zZ)YD8IqF6S8baQFt-FmjzcE)}^mC%Gi5+i8s&3MGspxq!gI1dp9&@epV#5+CQ+DYs z!3SbnP7%K^MWjXJ&MTYrh`U08+gdxcVaaQaG*fyi?y*T3B;+c$@|tiLpBGg9XUFsZpA;W9k>_-KAkrN4`1wpn zXI1^86GvkzPvdBdqlw9Xd!`*YvF}Nxp3KOw1V`?e6NZRv;Yc%wyN&?XNLh+sJWi%hWzkRn}U1M zWSnp>Y$>!-_G?vN-K$>tYDUt~WG?7)U{C!e!ZeO}Ev%2z;#RT}vYVFtJ+ z$3Z3U^|L}0c;sQ}Q^0G16LuO$F3-FU$j@8b*`5?u5qiy22wyyG@?q9ng`{%5b`THD z+}C|KPi7(yJlpUWjQbs`QR_`Mcx>pj!30fH*X^GAvOXpCwt0N%i)>{ud{|D8T2V{C zFddB^tpfW#aA;YibzihwU}VcMmvJ2$%cMBZ=tT!B8?E1AXR|^gx9KpjaKXCO92;{w zH~g8NOvF}mC)f;YVF6GBEAsbEjTeDra|9HkOV)#dhH_a7Sb-d%cNy)nfRXNgw)LD; zOK8c7sC3!HxHS&%@6CBeSa6X1Mpbt>`rS33=yX9EEbhJv)Dk-bD86!IAH8R1Tk|ih zR|NvJEWxL8TwDBT3C{wQlRHxCtVZY^oqQ#pH1eJ2g*?c5xOf}^SL72wdKB1F;t>VK zj-Ip1W#vM*v|H%js~VxE4czgDoDzrE&`h{L1bAdAKg*4r&5o(eP8c7Ks^^ip z{U5ImzPk;Q>dR})iRAzv(KdN_rQB#Pbt$U*DD;Th%NWk^ntsWU(;?C6=>MSUf>8;e z^vEc+_;j@&RIbfVJ4E;11>AQ3>Xf`hC((5!}=)Ytu!V}ZWoyDqg8%*n4cxY~b~Y0kn9~!SaHF zEP^WmGAo#5C`tR~C=>{*OC3&KQsUc%`qP!wf2RvW3gzD+a`JfgkC7Xy5mjN<6K zF0H9Xp<$OW<3(#5;o>o+ReGpr*_Dz{jB;O|G_Cz!*JKngX;PR(zY+Ck$G*Flt(zT~|3FU}zS?7G*gHiuWlFle=R4$$xk$iKZ5 zD-f~gJtu07)cYKUHI0Hw8%tXnmKxYR&#@+BwK3D%fdh`ts9sQLN+o$|aDS2ModNPa z6N;mtng$%q+>(tZ`iJN8UEILnh5CpZ)mB%KVCpyEs~5}Z)UKaGUamD zoz7-9-JcXFewL0H&#Gm_&wB@-yx^Z#`ybIN=a~d>&d&$*{Rq(ZCH*CqZ6^WyAV1Ac z)5HIi`Z}FZA8qh!o2#lR^zD=jM2p_#b8{slD)IFR#(>Dq7<5ieu4a3_CY0=w-Y9FZ z)ksTf{^ZL^LK`(5>_yA*$MYp_ZM;J{cL?+URW|Y@OWOj#sR2_BaoUsQXbC?hbOZJ- zVbvW`Ml@e$VUU-jORmzpp16gP>i=nY-cEq1#R4$hMfNp0twRr5o~0BN|C1}nGL4MdPHR0cw0ExHhk9KddKY5AM9(I7H`BLqv6HR3&vPKWd^cKL4*k&4Vqk zC$WpD&<&TBvq{VoxAr9d?1$IDtLuu$9?E~92?v{wz5Ne>QX5Dv+Z%3jS zIX95)`R=6F5f3^)2UjWFevkuNxY;g0qWSBq-X5}`TSmp~yM|}6b5%j^17UuRFmLI> z>`oVGUVBVD8e?Q9#f8=hPJzv!hW8K57Y-ukDjoa4XCfM8lgXiSj{OHuOjgs7?L9Es zb=Idyi+1SeQ__$JhxctCD&GiuTpqE?r6>K_F#=vp3F$iz9Ea3qR)=@c|Lj;DyABK~ zk;gBs!WUWVCGsI(xH+mhzqYN9|1togPk(ki_Dc!x-rV4}SUN z{4Hp6|9RX9m4lfy=(X_B(16ZWm|Fo*dbw@O)D-~lwPcvPv`!dgnZGAx8$Os;`O>op z4d1fcD$dyezMEJ5ub62dNGs}8$~bP#_WJwlP2j`>WaWRS{$EGe8rHZzm(+5Jh>{koAfFg8!h(pD zn-MQ1VF?JSHW8>x+-$z_`;jNllOJc!%z4i{XC`OPF>XtS@quQd@nhSN=<6U+5Y>Vm0fY&L$1#o=1Yv8Z$877+Uuy9FE908)Cq8X7CoTr(uA0eoL*YD z2)wEoAna|`YMuh`SE2ijNdR0?Li{6yEuK-eDIBQtaz6HBn^{tht}$N z!%?r;l09cQ7HY+n;uiMfHxX76>$6tS=h2Gs?>oKOwpAC|ju)_Q(L)~{;a6YXFYvb9 zfsJrmHE1R?xi^t_JSm|&(12@A6zJXaPhGJnV(b^cb#x0b|Bl03nLqvZB}DEIc>gFYLq{JDLM$ zqM?(h%td^L3;N4*7ni|#s2&KPfAan6aQ52rXkH|1NydWGv4njDIE=xDvD}{C2mdk( zTB7ZzHT57m@vm910-+lxooeKNesa+yC%a|tJIuCG=;L)*-0jzEYEo_VPsFaPDle~_ z+qjA4qu-k((POAkR1a7GkpHijZ+d5sh*!AfQr&f3Ek;t$l&%<%t}8>5f4X@;6e3*iR`NC|3C*@Mp((sKZWP6ZF-G?}At#ZtLVH8Mdw z1(jH4TQ)DkS{B7n;YNw^o?QD^1*|H7E_Kv!RYKwH$qIBAr68hzI z?42HHeM{6MOOsjc4xr zZP$#)ZOtn4B;eWast2^tC`e&rGdAm2ncJ8xVA6+g8j6x6IyDZSH*GJ;*dLj`w94`J zL0Dj|C+u+Ttvj+izdVXp-+w8a-}V^Ri_&S>!ygeMz((CIP9FGpA{$rrhIT0c}=wue_yRa=olS8g?3X z55Zk}B^Luq(OQ6pD`ABP{dNwo%h=o$fal>%2%&?w>32glttc5*bK0_MNe*ydHJULC4TI>x;+C*j35M%Hdt$nP4!*{Z7BVeHLVdEiK~+DRD7aT{k1+(OTfkm3uPOFr z_kchRwwCuwuL&*m`-sHAK>b47FA~mvYthSr$XOZ{B76rvdF5c)ACn)QZtRdsiOG<@uJyaBEdXN0xb<^j>5j4qgpAlMs#=~>(( z>fm7?Zl;U16>BSR2YjIxcmeJy*D4t&qmMojWP}K4^v_?_!c*`>4=mfKBuattQlI}a zH-thg!hrhewJ0qk;^uWhkscSRZF*K70*@BUglZ6tHucy0qyj$0zDi3ERC17|0_(2OP z?}3gN%PL*kP8Use&gCzj{m)t95kPyR^m?*w< zJe-J>_%HAe%B^NDkUM-(D}XQP?%z-vPkUDU9Jc24v-A|M^tUd)>wORt?8(di0(ZL< z(Gli6zYTSI0ww@a{me`RmxooFaXl(pSZ0CDe@sUObYqfXxFd3`*xYt(##1vvOW#qI#d#OD69>!+A?``GoA}@ zhM$vv>Kc}jp`6jNZ&XFgTyM05N)b{btcAxwj`uFERA5$kFe*#bixLjsn zaK`>Jk67`NIZy9l56D%JR!MsbC=u9Yh30^MN@%uUYb7;P`~heMAZ0*UzUyyjyma}h zH$jUXR&J^#3}*ZO0^r?KZK^1&Y)XvpSM`jHLfV)=U2^f!_rmXih=gHtRRY z&=el@sdo(O0ah{3|FVB-HlxFqMe*nbF)1;8=mmj|iSqRP5V;bebig(>qSgOB)5jqh z+1yi7duckRsN~e{9O*H}?kJg8(80i(1D|jcAvHJ-h59WB6J0KA-?XH3I?3|k`+_i{ z`V#!hZ-e?{^S*lUCZO_K zet|%^ZqQ=A@b!tliiVAs?=Hb|?743Gd+-dPq2s@xxU^g9KqGJfD+rKemmdtwm6IF4 z2F(C1H?nDEoyXLQTHt90t)v_XthuvWU*eJO!)Fp_zj)1&01+J z82&RZ*8N)Sl?-uh{Ts;f=!2dJYJQw*cm?)MtW$ZzIM`3_-6bVj7o}lzlqGiHvHZ)X zy7?XnQd-fWEI9!S#pNZj(PxXP_67EUVTnD&xr;O^UskmjL;_*UX$$qM`dO#c*S4lF z6K9oT%|N5>SHs#Kn1oY3k-0jWBHc#%m`94 p(H+5_%z@sSTefvNyoIMGlA+%I1J8niGToP{U(*L4XUc@!)YZe7Adr_*b4BxL$_u$MF5n zx&9qXTDq89L7&>$*t)}9T%4fN(oiloE;ddmgRY&krHhvvBm8_EoPt6eJVIPtP!2&M z9zG#nZU77b3CI8<{7f8N+#KM9ziS-)bN0Nyz4}wjpY6Yu<`&}NfVTg4+@PrEx_6ssK=cM}KPjcYHs8zXXH^mBV(Xr5MpB!;$dQ9;{6FQ@o@gihVyrX z{;%l27ybMAeg)n^(EB3*^RHe8EW7?Vdfx@$pa5MUco*UTARG_^4(PrYQ~*yx2L0LK z;;#)60!Bar0mvw*XpcbfHiQ3}3O9feAxM8EVgm>uFc<+50SOrl4-niy2w)sU078k& zDftZPu^E+XFdkQ8J~Dot6m_@8#5y;D`7sSFDCQ-oot@uMB;1VFTY2Elc;F z$qf{`P*gNRqOcTMxuQm$DQyPg)6Ji~(_3d&9^t9QO?@-l=hmJPX(i45vpc`|<#lbm zUcD)88JPR_TR=h2);lu2taWhy`vs$*qQ0F^R7QE*(8BH|laP{uy>E18Mf>pL-W3kK zJ#a4}5O^C9QNUm)czbZbh<`kWJcE0T^8}}ArtyUD;eH9ghX_Fvc&Y3?%MpPg$+Fnj8mIG!%~VG|4*0YbpGS?|LF7o z;`03>fDRw@ARGi7KpeojQn?4-3XeZ|(|ivocK$RiZtjFfW<`d7BKXl?RttrefaJiP z{WPzUCF8aRVzH|gC}wY@-aK#Qq#j7 zYY9k9zN|BJIE%?0bv1N3dwmb^G5%$YV|EHi3?$WAxl)8T4Ikbf#m!*8d*J>{{8g#& zT#BQ@%Q^h!w&cxdlC9N?#rMe;w#|Dh#|BGgaeU2t*I7;kD|~ydO)V~)vDNQ)1z4Td z6#HjaRA&Shm&!y>nwraxTpJBTqCLa|_?nNzMF^UYs6GUeIyu!)lNf4;Eme%{QF>Z# z;#3PX$@lts*2p9`(I`A>5(D3ilgPH_-2>dAQBM6Fq8+F;REqT$^FE*c?XT3n!Rkob?^ihotbA`sm@-d;A0QKqqG*u?7O(&Fa(l>Oc% zrj+nVg^Hh2!XT}uj3Q2H-Y}H%@WR3#t?5I@9Aadl_2#cgP061;!}Z@{AcM^teIgMB zWVBT$L%Sl5_2ki7(>L`3=N#$p^ewxSkRsVyo7!4TTX1+-WTgg+Rm3taH`BPboWeF4 zej+jiDZ7%?9e`>Sh9cjW-cQ8BgdH8SS2~jYT1qw{)$iXpr`+l-vc7Zpebzs-{OYWP zuQ~$iEOxbN+v27!|CFG4P2B0EsRemAwqR+;*TTX19K(+>WOivs?Y#fX8n;SalNXn) z{8J|aZaM@7=*Le@aIaoNZvx4mTi0as);M6_P_8 zy{-l2)NHv>5+#%`0t*)6>+0r(=S~6@P4f)b_ z-0s(jz736Nr5f1Ead`P6fUvN%pw;&a|bt zR+LLm1phnG``3LclBSK$&<13hhSUwTOr%=kBd#*bLD^(FH=NBpKDp|XX7}t+{4#Xx z6Y8kNS54wyVF7&7bu@*aB9{>{hNh03v#Unyj5)CSq*C>n+f4d9S(f5fhSrZ2+}Afc zH7nj&TRh&Rn3dVnnf>j8KSZIMrtqr#aVG9C3cab9z5B>nb~|TJScqMz=0YgVMQy~C zKhn|Br$Iw`bVWkio%AH5xl23MHOn2$+GY_&Jxr5a8P5L1&pXU+9{Wb366$4D_W)tB zy{xPoio>a(38_#^g-^0V&Vtn9hxy2#7rT8Z-+h=GtkSRLX?%zqEvMv+`tpjJ8s~7F zpZbyKFA5a%Lee^BxjeHc)wI@5{%=b;|BEbKgrCY;iGfvHS>X=v(0TE5<4C zGzPYkcG<-*~5dDNc2>G~A zXG~2_^(jHLqQw_=EB$SC`l0uT&Za&MZ^@j6{rMH&wWaO(t7#^2n4LxYK2x%>8*SA0x~9LX4g0{Ssm623t(A7V;m;5+g{O;i%1ni{Jrs-F_sh=>%$B30u(dS+ z-O^*3dM^C}nql>g7S~IM-hQ%zZ|&n{B(>c2Ta{BQ;?-~$|H7I^_K)w5PohYB>qZ9E z@sl@1UR_GMF;m7A_Bp+X!YvVWDwTFWtsV^##3732SeShacp_E*h$HK^NSV-RPCR5w zJp^(r?S(3V=us4dU&U^?rb&{HVXd~|f@jqRQ?bhv3Y`Wn(K;F+Xo3WNjqxXmWsTmW zb@#Epr`8I(K5CLJjx{P-DvPo%{rZJgv1I{BbcJ1WOf<=|VHMRLV@0PVb_RRWR6+iX z!oSyAiP*70Dn^J2P66dRgooZjg50dO1s!U^jxkVW#^JVN_3E+dUq9PLCdas>NfZbv zz3eBJ(b(A+uJyA^rZPsZ#Ctc#J>FCQ6iuIKLV-3%zZ;Go2o*-Km&AXO%|)CAr3ihI z!D_gN{Yr1^dF2QPkKXm~l?b!973NaW}Uld6nJbx1SR(lI?Ml*}gES z*}=dHbb6OxtJ#FK!rv8ft3y~>Az95|`;lqEleY}fOuJ)ziIg^TL9A8AQF8x8JU-|6|JwxrQ)P;PW(r`JMQKo|V zWwOr{iDsd_XoHOuEUWT?4z6W}DUD?w78k(m z3R?QFG7g*gncpALj{y?RSK(>Q`!`NGjWX{#28yHAJ2X%KHz6HsZ1 z;>1uM`DmJ(^#X;axz%M&l9U7#osKca=)_G8+nAJqs@Blk7^%!;`e;dY5>3g&Fhy6Z%}RaT(Xgf;GN+ z1VE!TKV>{^9POPn{J~=XY#(Mvponc5_H+tn;EIR*>up=jrQQ^Y1!HmMcq(101>V#c zY4TG=Z46zilyY$DliM`(c|@rw1Jz8YrXl@rt+HX5D$Fn@WH#Y&tbxFtA-l%YjHB02 z2ay(jW!se+_Z8bVW7Zd8ku9J4rX=`kC!9W4(G#B0ywx7d5k&EoeoV50d|3i%Oj9Op zf>^F?STiUkFlhVonK>{_1qhp_#d@Fm+q~(j0(MYXHJ9eC=_GN;E*tu3l<%h_HS?e+ z;or}2cWPj!x(6|o_f?Q8#N3XEfD9@EoxJzXL; zv}K?VSiN;EZqjbfh()*&?Ht{x3~wOs(0aM)s12!-t_)tRU=EGa#$5$`8#G%;9V^WV z9Zb;een?-%Jz~(6Rrk~d-&)*q2^VXz-2lb2P5y+ZcO-85IfIeNRrX3{ zMp0u1(N6SimQf#xuLqUhWbUKfR~trin<6V&Ar@7$pOqtJO6)e;sOaQ0`t-l!g}f@% zjK35xYm;e4_3RAvYZ0RDO&g)=d66H|jv$qzJdH8T-#2pUWBHYjg~kg#VQ;*db$fozr6H){DGA6vu{GfM_`bui6?|gu|6Iey zJX#)NF7U<4M9}XmirHh?$2-DN?kaSsmgjnDE{ZASv;-;g)*_YCO=4oqDZ@rQdv(@bBag8H)G=#9{ z^-8B?gpP#_BBt=KtyA^HM}jU^aT`7|QPAx@?lc3o8%65%VL8EZ?bXo)Nb#epU_4I^ zNeo;!57riL(h)*ZZR5$pSa~rDtb=coB~$&JJNu`WqN8iWITll^47CuWcEPQ>(L|Y(KHX|64nA6GGo1L$8o>Li+>Z2#SGz&%<7!1 zy9YQfUwk)stFIcZ3{B%g6$dBxYE?`}ds_#*)!-V46uMQ-+Q>y!mdBCo)m&nCT_TkY z4C)Hs@4B14!&}2Q=>RQflBWvb`!`%Aiz zp)@y%WHjd|M{(BN1Ka3xzv2pe2A=ovTP(D@CDT7S?tVVLmPuS4W<9&Q@-og{DnVW%NBG>pAL?9LVF2JZPhoULfGIGNP!49X?$V7&P zc8W$XUwx=MNiGJqf_gC^g{ISq#exSE9Vt^!*F0^PNYl>5CG<@>YlR_IY2hBOw#6p4 zGwLQ5E4noCD}r%klc?V1un{$HVae^k8A;aLsotq(6IC0jg7T&@IyhJFRzzq#3-zzb zJ*lzS36GdpdW|*1;>`1zb!BCB%<#4Cq_8YY36Hfs_{q`qWJ!nNI{c3cYj@lh$YF>n zs_50}tg@)|v-kDWd2!nrVs#{nUE~h3yu1d}uKO9jRpt@nVyJJK)-p}3CNl~B-WPl# zfbEMe*2@S)M!6_eF!Y{mjeI`15pPP}7?2ogYoIxZ>_|g(4qJVPR3+|<-;F1nR8W$U zL;dW<&`Pe|m3VQ934?wOb%q(Yeno!r8>o&%AK#DOa=1$z+gwIvy%1wxTa_%|4_j_3 zY&jYNTVL~i+Gx<2B80ce3#q8)ZDm}!tJz0p7sKeTHdc4Pc0_Q|sY!UmrxNG6ZrI%^ zvv;SCs3LPII>s4p!Jj#2DT}lLmknVXJ2gs`8OGTl@`%<+Mp*JMN#xd2Bb_&}gq*M` zr-5s|eBY~AW@mu-Qn#Hie9RSG8Q9>pnKRxlDRcyvZFOjFT+3ytEjVuTCT|QX&+MA@ z#2~8W_`NZ!mOoFo)@wqc4wP`s%TR|gEzdue6XvQtw8~_)>-7$dP%&5`9A>{LIN5ZR zWuEJD%BFujny`*1?W?|N>!E8lIJMhtYlQu{ZV97O2E3R<7kcrv$pg7_BDy_gSr?D$ z%%9CkS3PbRq2g34&F#1%c1RSKwGbh2LU2HcsUP{O%C22CyJTM0>`_}3lkumvbFx>x zYnY!tE{xiY!v+UGnB)R|AnX&0F~(9C-j&z#O3VeoO)MT&5Jc6=lsr*IP_1my^^)0q zYL7Tc;uGU=fA_wWzFw#6ovOSBA8PmLhva9>rHcb&^2Pi8pZ6hFwp$A+nl6n!jy*WA zD6=OgO=+VCn0RPb~~mX>?sXUkA)u zkxLf2sfpspTI(7}GP3E!4kS|GT+1JN3>VFlQ+$I@0yrcse=e035EA~Z{9--IGPX>q zPNdcC`31}8?ijbd4eN~U38qPtjee39dy+&K(IS)V2Bbos$pbtjj6p=q0hZ60Q0mpW z$k_uWf!dUxyzhb!>Z;`FLF$sof}3p%5X7rQU6;o^b8O2Ok!wVmaw$OGTuy zu`qOb<8i(g+|;LJH(-(n?ii5^HmBlc>^NKK@MqiZT0$QxqKU z;;yrd526dt45B(OiDDUKMv~nv?e6EO&r$3V;ZFk>HkK#9I;YDQAQG!hcLMm0sqJ`5 z-qF#fQl*v8+MSGBs$I3$^h~&8nry)<;uV2a>8O$;gn4dJ>zyIC0e z_K|gU02_fa&17Z31#O6Mr|%<*lT`E$@yqzazLNbALrd@K+T=OC ztyalNR-%cRAZhFLAFpn3Re2^pa{9((;#o3a=LR z#G~$wy$4K~oqR)mtHtDrbz(-2kUw5je(7X6)8aebiy0f%;poLz?Y32{Z5yMo->#=z z{&7u>A@lii2tEzf(c`jRjpn4PVP)ye_!uMo1CU>`!mUZ@O6+->{m9#YZmwx$sL9x@ zbISScTUkI$p^u^R+9=XMS4dPWOpf~X+wnbNIXU@P$wD{T^;adeFL{`rlH$-}E=Aa% z2>V5j1=>k*K#|QB+N=bUObV-6aP5+0spU))g1#@2H!k_Yj>k+rWuxs4jQaaY%04yz z!u73CC^wBCFDP{h`lwIjUxi}4gSw%|n#sm6>q44PR}qxtxGQ-u*1HAfN>nMPt*P_z zvB7@iN>NUujd#u`43h5K9dTLg9ZbNJF>u>ateP5Deo?WSHMN9mZxB>}N~%~!Du1v; zdXVW7t6`BRN!y6w;fPi4+kHfz0;VV?K9Mz{H|seFtj<8pQ8D$9@OeAhFh4I(`YeN; zP1zR*G@N+0BXLna7*k3Uxi!wtd-G+=IcHxr2bW-NZ=d25KZLm0PUSUhEH3>}mCm&L zTzT%QtO*Az!o=Et&%P1$lqk)4**WrwoppR9O%Owc*F|cZ?(T4X??oc@RaKrp4VTQU z(c-+q7p7&24#c{_>cLZp4>jAgQAQL&f$r0{ia+IQ%jc_LEWdGO4hGBZ>#0J*nDR}S z>Jh|dEw-ZK**~=7b{;5+9FFhDj`hBw^1V`DRD6Pxs>NuDf;mRy@=K3r{z%vOiO4Ol zi}C4g@3HN%s(X&chy!V?Gp^e^brgIE_W7vY^T&zglJ{Sriqxjw6k%TFwVT#e7E*%f){btAiPB>427lzt%Mwpq|TPrUiM_x?Fq z7sbw-#8UtpX(g^}=PJi1pZy>jXD#s8iO%b4CLbhG#?F1h$)>%>^VhQ}eLB-*nW4*% zqaWw8TzX!cbY3-8B1Q|5L@nl1Kff%SS~n_uRvyQ&0Bda5w@Sr)-usJ07-Z#ys1sM| zV4rV5%&Ih*^;&(R(?QTN1AR+z?_BRI6d(;93gq=0a;kP{UexX+cvtV{!CZ?=uC4^2 z1#2OFa1(FfI8pt4+)7rO`-%)}!@|XlaKG-vldEuHL%0Y+_KcW#T7u zcsEv9E4R4Ve`WN>XrPQu8zGZ8PIxNdre$zEPT|rwNh~Sdj-YPPMse$`moq~P*LHfI zp`vv!aA-7x*e*stKKbmuVe7sNVv7bV#$^AXKu8x>{Yh>_s^(p@nZ08B!f+Rawln?9 z75}e%r$s``ZEE!-`yOW{GO-MRE5-q74k@ zF|@N%>Jg3xvRxc@XZ?RPUZm;SLZVS!Lp1`%YIpKNc)rP|#i z4Mi(uMG}yxgRMy`1Zsa;+t?`8zk1YY;jNbZ330GD{+Yo?@k5)Zr+w3+pT7JqyCby2 zWQl~>GycTyPqnGVkl=C!E;ds8=HrykYt<-vH(d%9!1hPFK}TTgWr3S+uJ&EHBR ztOcHKp0*{mhiHe@Ow=b}cu&4PobvnjQhimv_+W6B&o8MeibkxW5{btEshk)8%KxXn zbAQtNh{h*;+zn|j)xRj%o%pJ?xS$30*6Y^EI>)`4Xs>&&WH9F3+uN03A0yvma-*iU z?HF5M!dcjdpPSBh`{)(UijpNqEtEP{D-AiIip2hITwl;g8YP<6YX^Py#e$vN5O)X6 z_$rS;m>{Zh`AjgLglnPRC}Gv`Cq`3Ru7AlW_WT%+8TqrZC)o-H2@pOL)(j%3vK9?O zp!vS5+xcMeHbSbrx}rgl{7Ct*f)gYzz`B3JM{Glymh6by3&S9d=7zcp^u}(Wx|V

no zS5~XcSM>`(pK?R*E`{>A6i`I*Wb(al2T3val>wtOxnGQE-LGH$_yzQm0apEOI6^8;Cxf&={&6|L`VV~4DJ!+ z!DM%^U~AU71Zrwxq={lq2I;OA38#M3mgr41Q`N|Rd#hs8ZE~m;xbhP>uF|QIG;vV! z)Vonemzl(?id=+7imJ_a9iDD4Pczj#kXqMQS}c{DmY>D@K6;CTG#OPn3e`9&==l;J zQd%$*w1A0mWqoXVi^P|#jC2T>C97tk`#IV(afQ@PW9LN?Bz6ieir%`Dx*+8rnnMv5 zpQIzb4Bfle*@<=BV}XJJCAu>}(;odW$UE6z2ibh7ZOJ|w{h6A<&4-+&J=#svQ+Yhj zhUVT>jcp(IimQ}yLxVtX|LuJqi{BPk`Dyk^hD@xzGEbH=0SQxx7d1zpp!NLic%=<* z_Gm~sixHk1IALeM_ehML;({fcvvEK%g70x2FPmVaHtDlzoOSIL7!4PMb~Z`xa^;hN z(E^K?AybY*ekcqIUb>^8#d*n@gf+Hr?mou0x5XIjJcj+H7Ag1@aY1%QggR(5f9zdi zk5%MKHn|F$^2_(iuXVSo62aqCCuN92FkRA6q_y~~g?HGgXKeJ?MM-N$Gpz0dQ)Wqe zq5MBkcr;%B(!yyi4t#qFTSIg;d>(C|et3IVC`4jcnp&VaD0ej4v}=G1lfv_Xj###D zT7^pp^BkWt>_N3Jk_eNMyp~TCQ62a`QWs}cVUzIma5L5Nd1Zova0)Y& z#1rWiVo-_Hls~)FY(U4(DJNh{HYwbiUI;#c$>5vFm*tJxC8``FP5PUl>T0!_k5OW% znUKq>xz&~L+5K{(#`C8vr1|jl+hC66n~{%tGcG0v$S~OiEnQbmRT0vIE?(*Idw@F4 zbT52VFw^20`~6fzRRv*VE->$`B5vYIaI%-%=HC`UW?On?QC|ha*7c9*XDw7U>#nY{Mj{3NojD?P6>l-(|W0vcBr%fC1 z3MNTYRRQmaChkO=ciyF^|d8q3~lfHM_-1x8a+L##rN_>R#`Og#+^^SdRGg!=T4*7pWED+dr=Ll4_kfS_=jR1?2jssr ztM_N?z7vN}ai%(LBpL|f$2ySrskv^ZOE{C(d;0yvtVMrT22IPWM7sx!j$RBBg11gC zU9|>(8Z0iKI#Sbj%Nu2|__6cXUkWi&O*kI0TFS;<@!H|HAjoLTAs)ytjEieBXZU8} zltnlf;GPWdNGi_PPe&lSm+y)B#H@Z2>F!@roSs~4{y9M7sx*+WLH%a!CT;t!MxWfQ z>XdP%y6=E1>SzHc23=ZaIVDxpUSc7&9+4Y@?>-Ut1Vu}ZdYvjWwD0P0?pq@IYs8<; ziEc=*9o#I0mj}Fd@3wA~;=(E`u$me`{l%RQY<-jiR;4s*kI?5VX5G(2NM;gcr|cc{i=y%qYve z!wkA)hwpZLZ{xMc3}m#SLzIZOnV~vsB#t27mNdeVuHO369s{lOlARoZ)gd_pHL5|W zkm!k@;`s8P%W5YQci@XCqEE=}r5s739`&3BD(&0fN-1k+P~={na8KHe7TXjq z`ZZK^t!htw5X5B&dsZja7gkbh+96Q#GGL6C7g27hmw92!g338Xxomx_VxVJbj-iVhItp$(SD5RiI2s z(vAAcl=oQ4YLvimMYZbJ8-{U&V2oac=?w^jJ>UF|G-?y7)#t1qj=E}-kf64!-kLfi zdIZ#%_-7WN_Yt-9>wTRzG?boBNZL&t@j{o`P0ZPIsmBSiiq7>#p^13A}QXM6sE!UR!@8Z&IOpx9~WHMxt^_s1ZdA zZ8kh0`*fZxvxr|_zY~pvR;kH5=WIwO?*m3r+S16$huF?i7dM{b9GOBcS>kC&eHBB_ ziyFf={AUZQv!{zogZ%c@&`f7*1)^Y$f{=V$G(VN+jq|xb@cbdKx|v^ck{NccOK0S@ zri4tFBFV%|r}2`^E%h)CI)^%X*28a)8TdR~QC1Khq_z#IZT?kaF{djpaj{GQaCSLG zY@nIsfkrwcY4C-Ew_EwsxL>CQZwDu5YGIlT?h`3cAzH9WrCN?{lMmvS+GVYD(Lguw z_|kn$Xr-F2Jv=B}+*5)DWG+dUiZ6Cq3l*Hs-PK@8oH0udvPL0&q4|y~)Ty2}L1y

$tLc(scO`{Nc<*5CUy1`R~d4STfeDm&QlDFP2yaa0jr6%IWcO$aLf_!Zw{BrNT&s@0a;y8Y6B+fV179P4Mn%tSjrq76kq<-0 z`S5+Mh<(JYf~I0hVY$lI^34!EF!g9Bxg0NHch&1>L_!0~$0kykZuF}}smIH%f`0F$ zjP>LNQSO0DW3iXs@Oe^7OA`!Qr8?wt-g!QmUFy}(KWS-x>Spme$sMhpDw5I3P)g(h zO+M;u1NK^XbB9*xt}mh;a!gn!v8KF6dpkaN8f3-~u&tZfzjKhIe7?9FhD(^KBb1ga zQpqg;$?Q07G3@15>nj)|h3SrR(%`vz3etN7Ree-_Gs^mx_FyS{afi0i7{APtale*o zg)7}@oAS6mvXsWqWTI_Z>dmFdykV}*ay^#;(Q=X<<(Jjm!@mi}Q2E+Zlvoo#@X4fN z@fVhF2|E1>Ag!gS&|MyXDf{i+a;g$8#1>7I?~*(ti)KSZ&HtqJ1wz8OWO?3d z_RAfSC~hZ&f36m8Gd_A+Mq)AObXs%k_C&wz*!9HY?g_PW=?T3tQHoc4brnv3OtP;%K9R9cQ0ZEY zTY!M@o51xcns+ySQ=cXkjxx0udYDcOUQRKkgqpG{USMs$JV9Li zP@40t#g6`}s-f@)mIjI_7>)jhpEP)o2%_irqgIl~k>yEDqaBW(wi;9dN&!C3s?dKW zXU+m1BYtf!W;xBqFnp?LfVEIEy(IK;fIsNv$k?wc2mUVFejTVmtC|Wl?=k$ z?6_wrM+<_Em3TO1i^UyoZjnw66@y;t(@>VYAX&b_Ju?eWz2bIa!r_<+RpIvxFl~aI zLL}ZxD@#5|e=mgR2F5e|<%y6qSMuJE1DIJBnxx0}9JiYn3+Ca+dtiW$W|e3yO(c2y zJ8ReBn+>Y@QIWS=REBNV1!Z0|40wPi$xF*3$9GZ# zcrtg#;S}H26Qr$Y6vQ&vgtWjBX;aq>#jn(26YV7B$o7voj&p3AB}?kOfu#-cII9g| zo>AxQGQEqzl&h}0@7Sm$N4EATy+3|DYvBDUSAHH(Z97rf!#rqt9s`BO$;`(A>W)i0 z1k6M<;+fl)YlJ_2vVsvch>**o`8^CcYJcYK-qqg-*0G@|A69SL(ItB4Ur3rtLJU2s zA>v=_t6cXMsNkb*Zks|onxiK){OC}p{L;qe6L=&^X~62)aU0B+!MHO5!H$yVW5@Jn zR{9WHwH96}1)AoP;H|UZur8bGH{~>(SVV67_KD=o&s3d(k-2l}V%V6vaaEb)9iJk+ zEEcQQ=BU>pH=i1P``c@(8WJ{Zd!F|<^{z+XS#i7`^9`p zwp;8Qrk&#eDMk}s9cC!cC8)&$D6^q1(NyVT5Zwwo+lPNPw;rYFl!>_ygv&P3qzxR( zpo%SRKJ8}I-YJ=7v970^m8uGVQ;x|XcSlW+AT4@T_i{~RN8%vbn%X}k#J`LA<3a+z zsg&xdMM@CArs{2dec9`meX{hqB_r*@#=n;>!x9y$`Oa4_5yRT+erPsfE?E${w4*Gf z?lv)DlhfdKQGlnopq{33yA=&Zp78lyTXv*UmD56MBAI58X`y)v318@2Kl~S^bv)a@ zr`Q3vy&0P%)gO8lo@NtshX^lSe~&~hltHMn)LTjjq{JA%iYS(PY94<@NP>@)FI!(Z zpsy@cBRVVE7Boi|e^e!SbNHa_uXlbXr(Ks??xlbg!PXc@u;mWZ$7nwT$T$0$j`O?o zo2eDDxYm>yUCc#~=1M=xdTS@qcIDu^$7nev2d%_J49>@B6{_c=ukCK3c<2|(Vi!>H z8DEjpm#$~9LK>qWXdz=q$P@#}2v5LJMC_t?_z#SPOxKwj2j4=p#bvp0w;Ke0YKO`7 z$4FcvFlOFNaJsn;;+ac=u~D~}za*v?zD@8ExE@|M8MC&XdKk!b!JjuDYUH_1aMczr--tx`7V9~$ zi(sB)&rdL>t(3glcs;JlN!ySl%WP2fw1tOG%6rs@RfJJ#djfwfSRuWZ3h66Ze4M1= zxorpYarW@98i=zXIX|w_abqfS|mU=lxYhT758S>;Qpop{`4tE&gbUvl2yr0-M$x>iSrEgoh`exBE%6h(2 z{3P-?zjUv=S0}BrqQ?X8&MD|_IjpJ(8(TxR?c~RL=4fGd+r?+tj0Vy|D0Exan4hOc zN1E3CwJ&jQA$G++!L6^iGwi^;#UVq61*+R=uKJD7`&4hCJBy(jwugmRM#jK*eGjy7 zQ*Df7eoGfdTaY4e@r>{Qg%j}N*n(=sgdQIfFTW|oCt3!y~-e# z;bm5HeGS#99HZkV8Pe273Q~7bX%=Ki^HY4f2pQd@Ru~P-JW*Gq-{K!Je>q+3)lOU( z9(uh^@iDxjSb?YHTu`vf-5hl5sGBuMQd+4@^Rpjp9kx6D?!2K zR`YFWXRtVci|yO`l{CfAE0}wKN^yI_DXPK67X<&SZOz0I*PuosJHjJdZvBM8>#XlG zg5Gm5SRBS%Cmf7N>Nu%NEZwrjx~|d*IL$gAte3-i?*ZMw0WL((m1-@XuA{uUgV46x zm5g~=j4Y#1!~Gcvr&%X!w&iWt?bc6e4Fx8{kDA!1osz_WbK0mI<}}p06vjsNfsKTC zwB#Fy*u_ao^Vf%b=ZBh!G)Pc`f~trP2{j;q`s(Ks)~Vze{=OhO3BuSBURiMyi;!h0 zuWg5nxP1IZdm7x90TOaW)h+UA@_9AdSG;6)EY#CBaibc%7OJLk^yPRwJeOO}p+vg4 z5WEAe8{Td$RY;I5Itr&%T-|?3%}#7hk+dt>IeVPiOjO zeQTcWtf5+;#j8%nTg{$8$G&{y`JB2p&a6z}g-Bv55>JogJ-}MrzRi##HR_!na`iq@ zT^W9Uc&*LH zWyl$Kz#0C5|GO_~@5v6Gxm1kAxt#Hx{-t$4BfZd`ac^Fj@{1wyGR$S^gW;DG^$SEv z04Yl$VVw2+nGjjyv!8Kxy}UlptbrTT!;|1&ZuDwIZi)88^3bvk`B$pF|0p-PtD}Tp z_S`=EV_Yb6#`kebY2P0@qE`VOeJPS`GAcZi#PP5E@@AUD6^eMvmgZwBBokx}RmcKp zx9JaZNl8vo7f6%7XhxPASoEBz;SO14ro`Y|d=-zg{`BO?wUh(qY{HmO_ z%eO)(4kma=pdIW{kWL6L~(*-=^3(x=G~=4W9t3}M{`1k^MAZ6(NgxF zH^ljn=`ggq;knJ6`98Vr@-Q`NZp`r+$8CG9aiiKNhX1HDfO+JmDl4jB7-xk_D^q!X zl2%`*v%^5Wj3loVz2vc~5nK-@Gq< zb&;h6=ov?+1qKT<)W8?5jjCRgG?2gyXk((Gtddd#*RXnIxvrN>x|&KD@%Gjrk~ZD? zcB)LbJk4wE(zUwb;l(O-YQ~GZ&*2>UWRG9oy7RtQc0>2x0Pq3;6o5i=1pa4dcyo8Xx+qx;Vta)10g|JIO3q>vE&V`-Vjl! z#J4ot`mGwVNlwJ#7j}g0SNzYxE3jlF8@6e}jY5xFHS!FjQ0!_#GD-B8;o_A{JbX6o z6Syd5T>ioEQwU66=#PT7OI=HgH zXd-*odGkf>IvtUOo1;Xs4ajchLwNd6dY7yz$GrF%un!vIQ>_>D-8c z+KG^7=ZL6|wU5}T(BkbW)-y^eo^P$xRvvLPem*z zgtl-_f6)T}yh}d`iX+h@%Wg}24hz)L#Z^5R!^lji!@CE<0gAa$%7qlc#t6&uV$oP% zzXpOAFe>=3jhGqG07RV+4!^gNSc8x@qb@_1Gei`D&@+opUs=q^cqKXfP04HO@+;3F zl-EBbbCw{oVT@a9svWW7XaQg@&Jm|9$HbNX4fgs`J#UQi_0#JHHm-%<6t5RB36u)? zOO;Xo=^p>VBB%H_O{{WK)Dl(5Om$y79A-;h6B2*e5I!CvZO6>D1bytl&HU11C#`n= zTC?-?Yk{wZJ#~h3esjy)nGggug^_9a@&YeoP&guMYkQg^deRWJ;`9wKh1`;ru52P- z;d%D=*Ag$M0*kh}L`Q?PVz+C(;8V$B8TyeT3t)xF-X?qryz|Yv%Wc?`tzo)E<{oGG zxCo3h$6iG-LIpeBNbE)@G!p4)$r%Ns(om$?@jaY#aUrcQ z&$KF9wZ~T6TUu7cy0qQWJrWFHAZSbBe_PU;qnER>XPD1n?U6Vi)O+N+{y7p&bRRWo zpaKsFVjuyKG@xK%y>Z{1dG$JLcMo@3V+AYfGHQG)p@4R_Jd{y?%RFAH(ly7C0ZO_I zH2cE^J>gbLxpQEy2YpikY38dA%?n<1L77vLRlh;nc{k9@?AWpMEF6 zP)@1$LDLHM($P+>lLp#5CL=32KNuNy572F%Z=b}%SP|EFEn8aXzgn9My{ia4)5-8- zXj1xQRV0hMI-sy&jg+62Br9f$P6MZDMoXI-Tbiy$3?s~KeLmpjbUx`7$6k0&ME5pE zf}p>aoz>YAzK*jIm6CZyeGwxfIgt1=aA2%th)jGg~#BuR?c3a*ABhTfvO)N=N4yS@UYK zgY4A?JqEe8ai%>0%SRk3_U%TGb4*X1d(Q+cs8$F>#WLLEfBDNElZhRC<2Xa0nL6uvmemx2Q>Qu&3-_$AJFUvH2VR~en7Jy(Ch~^`vJ{<2Xa0nL6u zvmemx2Q>Qu&3-_$AJFUvH2VR~en7Jy(Ch~^`vJ{<2Xa0nL6uvmemx2Q>Qu z&3-_$AJFUvH2VR~en7Jy(Ch~^`vJ{eQ?uW1pzA40OFviFP?J-9DhvF1IW)S$Q#)rD1Z)6sa(0Jl$V)->^bMfM<8VA1 z75-jrP5>~oaC@PmDW~=4l!~&_P&as{Kl(r0ui3xx>?u|Sbtv>d_Ww@`$KnOd9nN>F z!*g?6TDe)kbr@Vbdbz*&V?Ty#JadOX8vdScxQ4+Uglob-+U9S~@>ia}wZtE7>Evt) z&-2IU7nV+ze{?TgM|yf#!8Q0PTt|4?S$V02nglleFW`S@7j?@j;9{CDubCjKMe z|0e%j+rQI)KKtvMfe1$bWBX6Z|JYpMZxR(KPw*S`*|CRH97XPS^?N5FG)pt-CD{C_k zM|bF-PPK4xbn$>e-CmekSV39;`yl?mF8B|({^19UhLyDy%*q*Fl@5HA**V+5FSoO$ zox7cjGt|!cf4ai|m&N|!!yozgxQ2_zMRx$+b2b3`Gadjw90d>`KLWr;Iq($Fe_gky zs5-!(J5QJ5+u!3JuHor_h5tu@5Dz~@aI>?4{xM5yXhAJJV4i<8{GRv|KmarV3%~=2 z0CIpDU;tPFE`T2p1|$GkKnYL-v;cj;1h4>X0Vec&AQQ+1ihy#U z2B-&GfDWJ==m$oCFTf121bhXyfqnS@lK2H&!{1ns48j0C1`&ZMK(ruc5En=QBnFZN zJq2li^g*T|8;~=|1LOw^1-%9(figk)pmIgzSW|lk91;XN!bnEkz}IZ`ITG^!(o6^ZC5* z@2}_n!)Ki9I@h_*_gv>%&VA24kz}c4`D8U@on*sg^JH7(YPTPsr!U-%(Ida8QU*s8SeFI8yjhL{eO%D4}Sj7@(M^cuz?~d5BV$Qit*+ zC5AGLGL^E3vW0Sha*=X}ih)Xy3Pxo_H6yh!wJNnawFh-5 zbsBXUbtm;C^(GAs4IhmXjR_5sCWI!9rh=x2W|rmyEfcLMttPD@q=(bH(4VKzps%GLpkF&cbwKcd`himi&KyWR zaOc3I11k(v3_=W=42}%v7%~{@8Aci2F)}mCFd8wU880&yG4?XPWTIdaV$x=EVY!DhSaxvW;5IuKyBa%!J%+uUeVF|t2be>f1I3ZR zLExC;B;^$4G~o>3%;xOmT;XEjQs#2zis7o_8s{eF7UeeO#&h4~?&p5T!^@+~J^b+S z4xcce1z#9nIo}jNHNPUiD}OS7JO75jAprw{vjW8e$DFLaIQVCMsQhU-6X;0}q=?NJo znd35HG7U0sWJP72Wiw=l&g<*xb#Qr5xN_iZ?BkT!KE zDNb6R%s=_gR@*kkcHRzR7il+QFKi!Z-+PMll;^4D(+5sFp1$Ki=3wPeD?V;y! z-D4YNgepLN_O$XWM^mC5(DfK53<}eM<;4bKpLmIRMS9J6!@N_xH+)X`uRyP4$CJln;ztsW zB@`wyCZ0*0OEO5RO$H~&B)?5Lozj~spL+c&^;N&CGimy1b=L&0#b5iB?w&rJp^;IZ z$(xw^TvH-v7a-Xy>2e{&(vGOzoV;;q~HocWjX zcMC8DGliyw9k&&3-!9@Vx>8JB>{q;0VpsCGRI9YEOrk8eoUQy)1*pQW;#H+X%zcYR}y@lS|*^YV7@xJhb;)m{!Rv#C40zOfEO4}9LZP+`pH~AU;`Sa)5 zBMIN~JPc0K(FN;-kaWhPr2-wjq+}$er9djj0)fm6h42MCA>5D{RlfDcRz5J&MU~GA zVjykcrHODy>YedHn4K{+cRqu1R&wDxc9dEr5Eh8`LL+<~!GUN`3=S5k%J)S$4EVg? zEX4=@!s3fkM!_4CGoq2rxtvD&Z_6FAY{ul9Yi;%R^+u!N9Ag6r`jfQqoWf zX&IQbB1~2W{O#g9N)1-=adCy2YUz9%4ERr#@2BiFKu!|t<0b`U^GZ_EvQo0L5&(w; zE(qi67$||k@$ZZHDnkn>V|0?ITb#?ey?>+H9$7F4R|e#rl|Iv7UeJ0{#8k(+7)1 zgEck5vXVd{2|i+l#JFGsa3X&V4Kn~Xkc*?Qqn4vDLX~g-HEa^n@)EML=F&ZU68Ip{~K->XP7J22ki)08xrm4hLG~`Lb!dA1FSL3)G4`7|R`uJS z*l)E&_~3x@NR>}UW?$d$+{QjwX9N!SGx@7Z`<<-Zk-i8GN6&ryfVtzqW`G%d?+V8_ z`+TuAM^AGE+6%BDUq2s!???ZiIgl7Y-5(^geP+k~E&NNXyQ7a62GHl{Mt$J)0JE=x z85)cAbqDP8XR0PpdHNV)kvL#Oy7)Q&F3}R{h5?Kkp^nD-{icCC;Ohd+f71&PqUZO^ zB!HGbE34~+Kwy4j{?hhmb`4KI#BbdDO@C&#L^}C=Mg0e4KxcIq4?mpm{zd|{`Wb~^ zXhuL+w_oP)3)u$Y>4^>a$KGG2@Q+k=Z$HO>B<|1WABo0(K3<;xOf~=v8F2MKcmDy+ zKhFD4EPtQ=pSb>x!k@VQ2F9P5e#hibJikNpC!W6}^e3)A0QG%`{3A|(;P^XCe_;AM zN`GMb8%TfP`5mJ_u>20uA6Wj5&>xuo0FarNC(_p(ivfIx*_Unq+eZ8w{Ri;B7~~Hz zdO!&9+z$i)Ea^MT|EsX?F80S}H+J*^9JsF!()p{xzj(wSL@h@j)IX5E1gU?ZeOK)t z=wHYDf(j72Am3g7kAV%4F1}a{;u{O(Pl|sZ^E*A1?&P)X@d$2dE$hq#NJI z_(szL!Y2}lxIc~ljiT-1hA?o%p?(Vl##o%MrVrNZpFO^=k0TQE_duYJz~F!|=n4c) zZH%MSuRdy@XyAzP15y|y4oCy`w`Rl z1svT!F_>a~za$LOKSAEx-)&6cIxZf#XZSZf+ zUviSawQ6_*<Vo3H!N3j(0-57?tQ z7BKxF+*i}larDF?zT*FT184%r(cf}P;Eoo;(dWB_U#MT9 z{*6QPOM&rii>dNyqn!{gE(n+J5`Lj;A>9x-AXS9OLm|@Ya`G}75Dkc?rn z6_k|Z6m%e(a*8rC+6syavf4@z1r2R=h`N@NqQV#Kz+d~j{8SbHRo;H>uPFV$=?}z` ze_vfo{X8$dFF`HnOViiG6*GkQ*Z;o$>xuj_UofCCaJ&I?{8nJOe8uDIWd0(0ea%^b zSg6W}a|GZ5`(nWuL;%qXhSAm|$Usvm>ECd3a#6cjxl7Aol??e4MI#6EzA1d(IXus<4B|iQi zYVl*dU-j6pOa2el_&(gPS{PgI$JYNtJ$@SSPn!I-X8+(d{y#R+ugd&2=AZQWv3mcP z8vPjcPda^H%KuB1z7P6Eqy4<@iwED|@!!q_zj)!V9^t2>bilN}wgE@7g& z+8p58{(rr`g_-|f#{mB~$iKq(Ke+yb>t7-8ubBU{uK(crR|xzo=KrkgKe+xC0{@En zKkNDru78EVzheH+y8eUfUm@_XnE$h`|KR#}2>f`w5P<>CGy{N#2R~1ME&$(f`s*SF zJ`nH!0selyNQg;*3;092f02-Vxq!#&DJUqvU*Gsi0VWDEQnH^Y-&y|W51*%iZ%2fD zUIWp8c{~y1(gu80g6MM_h>?;6ppbwVK}3v1#Ee9rJBWa9+JMN3Nq+kB1qCHJ87UPB zH8By0252TH0rUZwzFq){go2oohzdka!vG>CA|@jyAtfg#BO@Xq#}Wab8Og{GGl3{% z)R`$w_#9chLuC`Fa``vzYE-g5VViwzDtA!8iCt4(FpQeR$4tmMQ44azyowVCZ66Q@ z9&~&DCOqlp9T$ru_Xb52=QefqED@qgF!*sR;JYKhuf|@8aPtpvkGzYGkHEwM z@JT7iNk~b^DJY1LfXa*@(!*p-GUVzTCXN)$eBPl6xs`V*WuLIj^1s$#HKk&ca~kjw z2y0JF(maR@zft8ZC~x-s4n#=H+(p|$nEFCBI|t_x5mBguq7qC;R}X&Na$j47tD8I0 z!`IJ008l3)@*<#7a!Tq|K&zX1xAF@B)oN-9fOhvfI=i|76$gigM@9iX=U&V&ECR~D z*?jwMYa3Ac^EikW`2A5LMq)0pn-T8Q*k;in*egx1tH>i{ogFsbXtxDe|e- z%=eoTTlwc$Go7U??c;KJUBvp*!kc}!$Ntp>=H&E2ww9Y2&?O#=8`W(~+g1mbnR)Vh zZ!3pD2j8ePNSKf#Z0#tMmr_FIYHirwMS zxw!m#t~EK{_1d~A@nvU7FOD`ugy}UElIsUq+pD3I`+gFa6O(ChJAz^Lu z}c*!SeJZxeRO10u*#d57QA#*Rz$SH0LhVbV@J# zj{Rrgr_{L*$)9Qy-CECYP)VxUa|P>V=h*_)502evPu0SZ ztne!aIHkBPqTLB51;|@Y8k~BFnB!g9qtmLY7Ek)c@1EB?p_<{rlS8zsGUmovoI~0x zi+YeY-mWPb*-^Sf^wxh4S;L?rRI*$r!z h$# z$8i;lUQ-=uesuoE$DN>xHf3$|Ca);qC*n&mn3K-fsi$MlS~52X6bM*EylaC}q9qq0 zY!AAv8<)9Jqk+8RMHS91v(J5*)uv!|PN=!&i5L4=2Ts0^;Sx>oj5l`7D~RNJ2ZLLmRO{7vOI^;ZbByh>a)6pvbino>eY^y zwWRN;jSIkPQDrn~0)pp{!%-xqZ|j5&?e&F{a+jiI?7AGCjz@EyndDJ_9B7;JsVM}h z!J}tQ65+j9sbs9*@~+I&kAi@8&Gf2LG}G*WQ*dtd>oemYt=N02DXfU=s&E(}4MVP( zbk$wHCwNyjg=E{{LGK24xlf{KXZsDAE3o-pXNM=#2JALwYU*3D_|3cV>IEb@N8l5R zexrw$NhlPOBX?kvG&_C^_tZk@pyo`4!kpXhLetnbsfj5c@11#*TS0f)ri=a<_0lHC z5$nF=v3YFJNh_l|pB{HI^}L660?~@^{6>x~i>)5c?pMAPdC305ytK%mSBfxCD=@l~ zVCoy&?cirbm~q@xTPei>_H6%2IzwzR~2rQZlo{&Bf6I&E~v+c^#d}Bg; z(M5Hcix$3eN3QrjoSOonuumU}k zOgT@;2<08|d~Na0vsUgDb7kpo`p`0#$5V=#4SUmWQ9r07dgi`j)!f;VEnDnBS*+w! zD(e;Ae(SuAKwUKfgfEe}wL0F0>m%rELkDiTLUC11IPtqtmi1eqx3_qL@+R`EQhfD< z)T7GiuDwujK5XLkFz?xmFfkZlAIqpZd(K}_>u|p1ZB!r79#l9QGd(K-) z?e&?xA~eo*=QfWJ1|N}=jtuOn@HKe$g11wew;o^J;Azero!6Ep)G*$HTwc&QLE$sL z+OT$qvHS`KUr&OtIS}ULBz8|mk?8C|zdTK8d0G1D)U`zK7>6dcrE=O%p+}OsBDY1^ zUeKU!FJ6KtBD*Bn{DEa8iC0fu&9R5G5h`%%dBc**GbsH#EwB_3ci;yyhZd;!895sWBvq~R#N@rsdRDXPv zWUcn*@@k*5dqh6{!x-{Yn3A&Fg(anhXNq3c%9#)A>wMrT_lH)DzMMC4 zWr|M-)46M9|M+B_V)|vAX6<8jTRdT;Ep!fJ1MOwD@Oz$1LM)cn-=DK{b%U+`;1&)} zST0rY88Z*Heuzkj-VOl1e| zM)^_1wbFWm#LlUaLkF%vKKRnJw(RO+m~G6Uk0=pwrj@Pb zGr$?G2c=tbGpBb-gFW#t_2!ckDyWg6vqPxXWqxLy$iNHKhh36%aX-s@JtomzjeF-h zbaLaSBn=`$*T`n>rKd1^cd|7T~VMlwI{}8jKe&!F>DPGnWBpE zs*`_Y;K*@JZK7dni0mluv?520VPnpP+k6d5u|187_BqMMzFVkAY&Ro|S$kKeRsxQTN)9=VM}WXII`s}F;%PLDuW z%GzWq>WAJ>q|Q+-m_67Jr%^uv=V$uPP%B2YqQKS)7g2U$? zun4_k#MDmTRC#^_>Ve>mTmv!l zhYyp0#Knds$5}9Z?6$=0eW9`02ph??CDteX`u=R_n=Ql6_<+|Rm8vHaAigREAh}4$ zd_TpM!@SzjwK+5eW-fh=zHgkB&)>=*tsJGNvy-q>aJt|$y}K>5e(6{TH=WcG}}+|*riz9Mz1I377W9Y<$B8wuwf0(9r1>P@PcadW0jI$sS0k{h(aJ^L^y|h#V-}x{v_gJ*~O)_|;Y(fSWiZq-6sY4Z7F@bW0o<$-n zfhd;90Z$MS69CD>puRx2#0B4Yo7MbT!EY!>9G2v>oo))mE`7S_jWki2<2y zs}r#p{|X8An(2|6Jvxld{lM4WMbP>z$oO+tZKl{p?JZcMg!A~Q!s1PMXl*UE#bv#+ zhoV%r_zOF^SbW?T_jtmUOPpgOm!(Ub*|b+UqPLF|nnrjJa^jFkxPf$4B01zirEDbR z*&M3AqH-_JN)4PSdY@9PEg`~-J^g99vtwO?_gsG7G6o43&||F*Nxu6IGT(WkXWrKM z+(<))^_y|hxES;Wa5Q%(ZqI{0hobTh>&B^PgxK4R(Gy1C-fqV-3Km@cRBIy+b>QM~ zcg7$IHL_zi4m2vz8DA7Gm{xd)#K{P^qd1;%*o;-10R&eZbz=fg^+-}uGHV1A79MA5 zl+D4TRbbFjw$2k2wTZ${kCkP?SyJw$P3vCHJyI9LKaz2 zad5Rj)Ru&@tH?%BKrG*@>?)gbG!w(KMn?ziTfEY$Vv;5Qi3EZR^$Qux9a>GfEB+>H zV?2=7bCUyHNy8^i>3iPYL;HOit+3)9u<<-VsDou&+lI%&LWC#|#rmgJ<1P9d&q|>V zCST*zc2^LIdTK8z+ICeoj4nl! z#$C5+^kYyBG$~~6q#o649Z1LX(6At%s2#2(y!F;dhY4FwZ9DhYc)rQll`#EVhcyz%E=CEd^huc0yHNV#*CqCZ5eA3q8vAF>uDg zuwTr7?s#BS*(kGdZhtq*?xjH#C-S@-#(2g_3?Y&j`?ljkZ92{k=lvF ziq3Xi2OsAo-4~vs#m*HtKiEEZxWn?+aUSxg4}T){&sHc|Pg<V&gnAcj_gHN}ZQ>-gaUu|)B1I+Ps3d>2{AUKOVm=qb7cEVU6JEiVyV*Dy z1slsoKB^cIl1&X1BO;jjn(7Lxj$L!yDqEN^x?{M#P(8V1&@jw86Z3zWGWokb^>(0eL%c0)FZ_)#^apk;fxWEiXD-Cw`j7^ zcz;s=_qcIWSf@0%$RO8I)=M9nYU>(Gy9!PqpPhKKJMPTsQd^~`n}s-QK0Qj^7UKID zB;PvN)y~X?x+^fpzFIcHcFaXa!|!!Xcl3LFka>u~(VDr)!BhGgHUcq6GVu3()pVZZ z%tsq>ixz8z5T&<|-K}>xez>AF;vK~%_n1Q$Yxx5Qb!oBdn_=7sr0p&-q9#8{JP2^j za&!=-^MBt)=NDh}is*DX;a-Y5BdZ%T{c*#-nYvsHSiI2^i#3;rB-vh^y?}SABd2S_ zo;zf_FJA44I9W?vkc*kNV))}=v2Hf2=CNs2b!W%gsh05j3VG;aSj8`sI#7^pOoEV}>+6PhPo1(eTtsF?JiH1DQ&A?`-qf0{ z%G9}WLx{}p)8kZ!H~72=w_r)_PZAsRG4i2S!A(>j_r_WjBsDQkSFZ*h zo&@LnTOC?xd9N_C(>xjbwjo>QzOX2Pb)E zW6PSL=G3u7P!lIBpFNML8+&@9mO~gg6YvZWrPA|j8GfyBOl|%qwbn$WnUL2aefr#| z{^L8&6Oj|I7D2ozh!q=ueCNT`ZpPsSvEJ;(9uLkB zaFNWdj%FBhAv;U88O&-cKZBsklqa5g-n|&Cnu*iTG_U}3lRvRZVsn*RPvED?BGez^ zdR+rbuRGh1_u8@)?4pa3Gz#Ib*+D)?9%g!xM^%1aA?XZc0{NsDx>Jh5Xjx{&=Z@N+ zho0Dpnh0Qk!?Gml>Mexx21}8GW^$08xx)g@+s_PoJpJCk$F*6GXr&o=?cs`;fz&Iy z{M~!o>@n0Cm*|OMJ3~p@3_ohs8SEI*TiXl~&;#R=Wo}eLXKD3SgaG<5<~&xcwkVl& zFCo_YNRmt_@5i3^ zxfC5DpFu)oCD_TNx0@&PEF?jD;xO29Wz4ab7`RJi!&0ym{yLqK1;~H!lCZvk7klFx zS!Uw7`wHIgHXT5sMeFL<-j-a7Ir;=2ra)0qOBfg>5K^cp1~R>v;#&NzP%&~OPW^?% zcTOAQ1IGe4$5BWhCg)WKvGN7l%jo*uOF|)&{%pIMTZyQXv@>o+P2OUqC0b$q2MOm9 zyG`S+XNoZ}fkYYZS-Uts{|pQ;-SuGn)|O9>vmr~;e0u|H2YQV`hBT|Wg>&<+j+p&I z;u*2pj?__YCkW!8nUAvnVv_vO%D@56m(R5-$c_b6VyjN>?q-f8(s{DpR=f-oyT?3P zdo0g_&~#wk_5`nWe&MBvLM@J$W&F%Uo+B|Idjzm$+l$qs@p-wnl&Ta6pFLFF9ciN2 zr;s&D!bN!|^_T#M?t#k7aPy*|TwT1<(W8$VGm@Sd)|zX@yzXSwlmUAj)G+E!4X+ra z9N`kks_A*&>+PekMcsgUOApDp`?9!9H_K}e6K4%#OiMhgi$ZqGBeRS&Iww% zA0#xW@vipFAu)#XOD4yqnOWBtQpZ_{UrALbU~}wn+RXEYO9DE5Z__HY58a&9#UpPI zHg`jlb^6fHb->K5k2}xn5S{f)C4X`QFzE3B^8y#E&rhiLykrQOtR*1c;nlae z8hUYLg57fyvIm0RilbzuI;O<|zK@RA=`c zNv&bknzN@SSdBOExzDPPE*(9n;t!%=))&+gGwj&xsu$El8$YFs8a>`)>En=hx zzh%PcyxqCL;tCy6oeb>cXAt%f6BBGnIM7e+#pq${gE;Z*^>KA<`Q^K&IkPMq#(LoN z=cw(p?H)=L-OcIs{OpI@b5VNWcLRt*=35@Dk4vn_u1IS=o5jbracwUogL}J5E+*0| zTHP$Q65AQ@;BrkR>FRFYZK`KyKBbXt!6lYf9&3EOYG*cP1vI6`GjVGCbq%-H3Zzl z=|nx8D4!4);PUi55Lu96Owo5uBZr;28;yX>Fe!kz)QFVJ@zx1l?W8<%$4By#qT4pg z$XnfywNQ`tP+%XV_=m@fVb^Ndo}V`9GA+)(4TIMzW+ z5)`ol%0YF4F<2@8F_}qw8arczi$_mhO&(UUsJE~t3U(z2A2LqaX z)cJFjS`{kw8m=g~;Bh|bGQJujU;e34^u=+|j&{plpn%@oNzEMgIM(}%tZ5YIed#}* zY@6hr<{r6BpOyT^GCp1}W_|HyV`V~Ec%!caXndmeNtbeAdzMaAidF0xe$ll_?L1+% zq=5<*`va1ISoX>3Bn~J|^ zdq0aVjD;p|Ox4}vGgK4^XNZI^ z9oXXFbdUNG!s7@6w;q$%MB7MAa%l9n6?ypu29i``|FRi)#lZ8n0O6~&3`rNvkfv1O zQ6*t@jdF4p@y+7pbzz4&^1)+lgq&nj8KgwBYj_OJ5C>U+$A zB^xuabsSx#=ZQABZ(QN@)u-AzyL{8!lSI#QZi;{jyf$lTEqc%4Raxu_RIN_+b&MO| z-gmq-mq2&sV+#FNU^sb8{ad-To5*6Yq35m_+2?E z$<}diH>bu(Xp!CaL&se>qGM|wmz*ey=3s1IovW*C#TLhU*l~;8669w%vv5X?Z+NGN zqX)#yAK8fPyO36V(KPXF?dZYV#_Ih2wjQqxc6KBcUx-2nV@&;YK<~~j=UTJYJeH}{ z@Eh&!CWzi4uGT6erN2JVz?vuABuTeBf{HP}(-7roG~zp_1Qbn0%(wfiD96aMLE zb>o^p;9^icryvuCL`UdO6te`MS?BM7p&@!>AuBj zk|wdbln6wR0cnqMd*480{PimOi|Q*vlLJ#(DKE4b{NIfpN;}MRVx4Ad1-rVjwRxWp zCT760Byb(pfjW-lS{sK(pW5PY$sFpHGnWjFDsw6hBJCq0G?}xFj6PdZUEX}85^Nv~Q8L_Cl9X{Fs-~tQJa$? z;r?XSK`z1NJdlmK2qzGih-zaLnWnj=vw2Qd)#Rk~sTY%hapsBM@*8y>A+D7lO00Co zAFZXJjfT%bS@}69lcTbl#H!jN$NDI@%@b~oF1>#LX8dj`^cE?ZZj@EJ+PzilL>p@w z-f3Q*4>oD#ihjiA-svy(dG*?YcQ7YT*Vhu>znOJvDxd-ZWky<+w5diIN7?~Q?J=)~ zdhghUQ8sUrV9b@2k;x^iq7(KXrr(p#?K4TilT{088rPM zXf;FR)=_#v)3P$idb+u5oxml}Q*CD{&RJNm6bn8eHYZZG=yWCTs2x_Q@EU>g^0&R| zvrw2$RG(7}vN{drGVRC+t3s}f-aF+k3UxkpkQy9*=UQ265xFu4fkI$RgrpUVm9OC} zk|8CLc~o2Vsq^SN`z{56HD^3mAP<26iACQ?jwgDp{-y+_g12?Kkh*P6ryV+4!SkV} zI6`rxIzQ0x*e!35LcUn`9y{U_tdIB3ZkVT!)IJ^)YGi$~3R>F|oiJ(zPCwZnpf))p z`3EDs&l+Z8Toa=B-LJIOlqloW20emh)nw0k+vJs>U@`9R2+Tb0u zH{|t^!!%6!6Y-Jt>rO}G0`m-z*jdJ-76Vp711tT+kgPOhIclOi`{_l_(V$t1XLje# zDcG||9)$AOtrg(7~N!@?o3H2 z$1R~7L){dnQc%?dSq!1Awy>P70@zt4wN%xORV1wW!rspMtTqt9V zC-TDQSIP#Ov(=(jthVoQ(Ww}GaB}9qcPU}K4X@f%qL=PB z+^+w@NZZL{prj9;JcjQ#c*nPR%F6h%Sf8m`G?Q3Y?E@_IV*n}vKmIZMRBcUOf+BCs zk~iqW?f4G5P=%up$LT-if(H^;#CVz^>({G}B$TmC8Ej439irJiTW|g$#+i8Xh?P!V zq-2tPxkGU6&Vwc;twJ5h%xsvcKj+f2i^+miL>j za*f%Z+TNn~ROCxmypbC@DHdi`&o*gKUq$329!!Aw6fk*pWxzBdl@)zQKMzQTwOAT& zEK4;}`awTZZEe;_Q-*n!p#`sVhdYsQt2ALczW4f}G~Uy-}) zrSOTyq|$iTc(NX!IDN5wE;U}5qlk=^NEt;k619q}!Dv*)cF-WbwClA>;+?5)J|g4b zkHtas+w2KdS$+N)yD+;0H?L~wwPn>H^5IAO-VDigygA6{=gip?qvrtHRs+#I>jG9C zc$k3NwD$VJQt!mddlC!7I?u)NYt*{vb2lFeQiIJrd=umStyqddm76x>(p2t4(;d(3 zCC=Z>7D!~RuwW)rGjLH1je~34cHb&nG$xBEfwbPcmw_bBcSFO9qx4C-8d#c@r_UPQ$fT8T5Q3Udl{x$L~Hpc>!kJA zN8#om$NCQEPbHU1ube-C6`k4WQ5;RYk;x+c!AkKJdvU{*iLb-O(>KS__meYa5^Vj> z5j&%uKXfV>1##)9<;mV&p3OKI#K)#i#{mgmJg*zpUlSv#P&|wG79m?0oti*Bz zsOF4rwUNFm;{zT_QLGZYncysN2+8+p`tC(e_ah!JSqF#MYVwG<-g-ptB**GBz3GjL z8B-GqWVX>Zm^E1lnQztRXTsczt-D3S&3BzCPs8H!Xlj|czv$gE+Qc=5$x$#850YA0 zj07ijBTPEQEV)rpB_gbZHAxlg)%A%JcAFkLN%D`*@~Mhl>{q;yV#-RS1Ki5?Qm<2~ zGB;F(IQ7b0I20SO>62BuXIw$&U5BpCd*uuGyAUU+Wk|Td`-+59dk7Jl7h>4c)$msD z75>V3md7TU+V&{9i`qdR86N#SF^BwEUWuzddpQ0q#k=Af1==_(%TB1i#-%p-$S!9_ z1)Fkgb!E+pQeIwow49Y~s9FRi4=@ecuu}#_qw#QmQd`@}0E(;BupA4P*~dootWj1F z!dewG!Mhb(!lP83&&qmg!$JR%%1X&IIq^&2*b$e@AzN%tXO+3$e^RQd@G|6a&GtzBClPgxURIAdpW6#W-+uaL$NY7rG|lxWJb^Mh;d!-dc5ASwssX9A$S(&*xE!AAPx*@^)oQDvFwz5dHV)$B#iPe;~G?T=3kaq?6SzF*H z;+97o>&z?P<8jlhj~47x{3pqT{hv#!=(XNU*IVn?VbwOwj#?`58XDu_w6q-0Fj#He zuAWb>d?Au2z8VL)>yh_HkKf}qU&k8w+4wOx5i(s8!`XazTsCCZ1bjq&l2Ymj=cOda ziH_}sAzJ75vI28TWq8Rm*->zOK|@uRQPn38Ca=pLZAE^D#FaHD5~og!5VB9?*AKbf ziHl8oq94(l;yq#4f}N^!uFjBQ8?vpNw<^-r8qP5FjnD!)jHc#}xNnmPpj>G1=&7;O zDbhJcLYaC4xmO}9Yjl-XY!3wJggOp1(Qfw?EaqX7f0*!Or8)|7J z)K7ytooKTm{gyqdZJJ^<(|cPDKE*nHepO!FN13gs{Vfis6|A3}`(z2T-riVflh!vo zX=+J23u!a#)D&Z?+-YQ|8`+vVd$bI1SwcTv9HqzS5;H4ko!mtd)iWHEK~k1u+wm#r zR!%r$Rnb+ZRz_*D!$qjnSd|J&ZBuoc^Zp-0%bFU7Ov@63hY`7Cg5FgPtcf;~B(TJs zZJyQAi(De0kqKkqsZf7mzra<&NfIx;n)21vN6f-Sh1^HLaoWx&MH53 z*_^L<^AH_JUNY&NXLVXmaILNDR)DOGt?ldsA>#bf!)k?B(7q`4ae1-;>kF{J?dJM< zdcKTx@^~>~^V)8%fc(|Qp-Yvc_d0N1#FfRjoM>sqWQtMlxmh6EV}3>*-X(!61)1iB z%!FAxIxkr<>?Au~UpMe(-FKUJrA5O0)VU7y)u%rTzLq8$f2u9Rnq~=mGu1?uw|de4 zIwL!M%uDms==R`SM;$HrPh7`x4Y`)RQenO@N_3KOcj0heqlc}10|WZOZHic=k$?l~ ztX-!y|4J(8*)*1Zq*XlsuKWP+ko-~^<7naK^@Nlf^ELU_J5T#Ay!9-un$d%g6)m#* zcTof)4xSb@W+wyQOctb(DcWlqh!t8{vWZs z1=*Ut0<-#Dh_>OXDnVl|&8PQXCSVZ5XW{(!o<3mi;54-i%Pl0aet?*sxFD&gfdDh% zL!>5gZQaypN9tE)BR90|Tbv?5A=pOmL#Hu;mY1@Lg%8DgLnJtaf=2Ci3XR4I_&Do# zW^^jdgd5G9ro<%ntnk=~u6YXESHYQiL`JvV*0^+7dp!)2OTv_2kzd#D{7~Wj)RO96 zPWp@}i;U%M0&49rTxhO4gNBFYT706KFr$6VanUK4>I)N<0-?9oCk&x1RPCuSvjQWI z5{=ta><4Yb{5h=sTSIw1IK?&S7hDX;cX%XYPaXEa8&W%l(LPp=dvx6Yz{xksHwh8m zXDYdr1-9R;Jea>d=v@<+*}|GfR!}$;YJB#(ik|+NQ(N_~kMY3gw4RID-{}xSF?Os} zrN%Qr$N47%Dk@(u#sm)F{PU6&vtyp8AX5AlNw>^u7bnh{7>nK29weQ6GB|3dl@rR* zC~?1w>T$-&_T6p?#mlHO12=-F{gMs4`X9Wg3WOGz%-$!XDu*2wv6kUF<61{WAQj`g zc(C3Fb1vJR+W>w1ct_6r7QctEw%nxPr=EE)tx@M0F+m!Xkd}9cO;*cc9-dr6K$;DPmvAlG%va752}qlxb8sb&`gw*{5!C z(jB;RQQlsErIbv{kn~Y|UG(_jx=L}w0cOmFE6k{88Km4~?#qT#HG;3k^@gf12jIu8 z?OULS&U3k|ccs8`bK+44`h!!M=-T&A-d3oI?aIL`&hj}WR@@>f9Dx*i6|#m#$0mc$ zU3Zh0;X`(W7!s?O9k6C?FIy07e&M&Z%YO=OJuhf)1dTf~Zv_6iWmlIC4>tYYiD(cAU ze#JeKmkp^>(?_#*KgeWWAX+%mptG9ux_;Z2a7D#3lo;hdlW9?HbqIz?J$5IE$VxdH zmc2znP^rnwug>s_AW@|5A4WG}54oVHxK4FVHTuLIA)Pg>y)w>f$UDAiBr9S`y%YDQ z&oFF!d6l4&B9LI)DFRkcZ{O5^z)4)#{Wg(K4Z7v?v4c0p9n4B4V?&yxDEL^~LZ(6z zf7)oaPj3cgn0fTs$%U$(ClNk5Yc(&N)XB$BXN$4qQo_#KqCO9bwO4^Hw6Z-a$9X1J zdeJx4$3KI(Hfs!`#$?JoCA7fEu3(QNh5ru#u0T=0@_f}~W(rEHfT}JO9~*;jZybVP zr)4G(3=%7lt_;y7z;?Z)+8DN-?xYPqJkpwkoN%WcaY{&qU@MWRlVFZO%X_EHAk{%V zt^otF{+?4pNs=)Ycid4x!)dN5RMJSXd5nCpRu>7lX?+{EhhD>%8iI9I3e<%WwzDN% z=!KHdTyEUP#E^V93v!D|HRlwhm=(xWEW#iGc_mWj*Y=Zs!@oR<)sD`(abOV#qPqE( z8$l(aZBEh@>u_zl-L2emgeqfiO*Fz(69bOQ;?=Pve9td;Z{ckT*8K972Mh%?l#&XG+AA7X)I$_w-e{e-7E(Rl+8+_mDIlv8 zR_>N`fOc0enU`tU85&!%?DvXk;zwi4DLP_trkR~!ajpqk%tI`OLZA;QHiUs=arIvO z@c~SiN;I7~5jBX^-X?jh%EWBe2NUO6(neZo8CaIlsr;f8 z+T-D%9NCc-=2S={hm~-eq&%_2j=GJ3mc^Kljk&hT2W>W|f`H-#*-T_K zvN5;@-oQQsmQrL$ttDXn#v|ETTDr)QnE_&a#E~uU1N03v=9HW;r&Obbay3xAh+~}1 zFxVvXy2@0lATg7Cr8L-7n3n0n=XZCuG+Nr!lB? zlq)eUBA{(8+f^68<;|sfl?Izb3Q5MZeAd%tYX=~-Yjb0?U;7U|0wz4EK}d+44+_jG zV>7IVMH_=&*8~fA{A@HlZ3n|9tDqt}53E-dt%*V@aUcL0BM-lA#$)gn7V<~5eghhE=M*4(gA3Bwv?EfB6Xgx1KBw2T{2P4y$1a1tgWokL+L)J#dP=adM-2_r2TGAp5dx73RP@wXZf2?U&K z!6c?*?XIGjG;JUjRRnE2f$#43_;TuQ2m+`Z={_pyYvNskIfDY*-*953y_Y{8{M2eD zX)+QJku}+$DO) z2-d@1c}hGgiEJfs43X@L;%iy_&n0Y4!Ete6-@W+YA~85toK%u@_2E*Uc-13}Ol6iu zU5l`&Mzz^PT$8l;`145sVsI4bB+7l&5$fR%T$fo1JLV(p05l2#9MW1)B6d?_X@id% z$Z8j8WZqW5vXgUhsW&H0J{;Zy6T+&XoCY1pQaBtd z5A!rCuV-ZzMqB8hcI-!oeJ#xdosk_>r@e%Q0XXAW8nvWnl0?((DbU~8HNKqK0VW8f z=Td>3N#s^foGl9%MquRbF56o7xY0WgZ#<;UTVN#LmU@laPkJSk@2~1HW4IrQ|WY=IBFuil^_>iDV@mo;sjYiAPsqC zCkYr$CsQL(=#GAVmC0&MbzWo9wU=GWeS^8P9%(9s$)}5D3Ml~oHON+8;3F3nF{G9N zgZ>f-<(xnX#|oA%!xeApoA5$^6k{c8#gp zQvHR1yAj{NDcz`Z%vI7s$8QWOAM-Y1T+?hqJhh$L`|NG|r;g}4cE~kCl_$fnj+)HV z%0#6Qwws)2rd_Wp^Ku!w8^Rl>n+HShQ&QG`{je_jgxCfp}U}3hYAvm3t z%1QTyf0r)JbtGuF`hRaUFa%XyYJ5OQHOJ-@rpFBvD7wUMLyp7}J{;2AI<-Y_NJ@D9;drX*$ zb@zZloKM2AM^es29%*Rz5WtbV1@QMk@$b#4AtoX!i%zl#Fg_LB(kXq95J13)9qpm; z>_0wOw4x+%r&OJ1P8zyvw4C`&tq;7!ZIy@seM4Jvp+w`tv``~R{l>lKx}>30G7)Xt zteTPtw{m;fa;Z?`PKFdGh^%o`1LmqIK=v%a3$lalw=|T-JVi88K*x+v@2sY&&auQD zWFRPh|!n|fH zaFSXAo?9=D?``(qj7Ae$Y|6q$-ZT5EIF>|VuoJi&%q-oAHacm?cmYkRV3JdZMLb|w zPA`q)I9>&V(8-MB80|z@W($VWkJ8UmQ7Xk8NHn_^U_3aI@%GwnOIm_bk&jU6r5SCj zj<}R11eNSOXQDXNDMORFm7m42&{g5fU+z* zM5GdcT&NyCJkaSF)1|bl!VFF|%*^g&F6f~MC`e;}iLkdkl|k7}yr8eQZ%udeRV1CZ z1?~mLr|7Y_HiV~Xt-56~HOI`75&AnwR1N3|xnuNSQO_*M0MkirN!VpE@2*WfYK4&$ zA!XdrJ1GEe7tjq4HQ_iO6!AeyK`MdiSS6{NHcO9wP;YR4)%-~1NCeX^l)zNXS20yq zZ1L?P;E*&Ot>f(FQ>WIJeqgB+To$Ud212YOCh9GFACJKE$_W*|_$X-=$j?%whXE2j z6`W|q9(za$o;1o_5&_#>(xR}lv58ewZUx4Kk=%V^+}0GK0wAQ~D-2JF5GIdu#!u-Z z142OUZf-`(W4sYPex|v3H4-UjRe9G~qkTL(a;4A~F@aZGXme0IMRx60+f>K4PjoV= zD`VV!dU4MPnosvf4j%DbH&rV=fM7<(xzry17QY$`6roBA5g^aHvb2?xBEz&FECc4n z-uEKf`)$n?0kWAFAOf9O4voIG=_9K0_v#ix2EtffzLE5EX&}!CryWrZiN{yAyJ+hE z&_K64ETx;{*q&`m1J1U)wCU23HQ0u>k`lwyb5Q-~@UVH7p`iOH1NU#ggw@+>uQd2ObrLsi!jQI5g_MSk~kle0v^gZwNg#PWdPRC;|ZWn&cpxXhU*}2fn98 z3#h+;ymNZeMkcl|K}A?*STP9+6z; zr-(?b@+g%_3%D<9eH_|X34=|!bl}7pJr#-JpuYI!kTa3Fl>9x1mQ$pdtETxy7z}vV z86l?F=oOtx?gX$Gw};cu1(92R_gI1UorkPfK_ZX_MQ2i>cLpFUf5Bgt)&PppcUqx6 z9{#H2+fk#0S>y%1>w~R@^*y-Pcn-=|5~Pvh6OI*$_8rjqVM=O)c{{$YXq5p3z%{0v zgSJh7|I{eX04$^!g*n{fOQ~Vsa_M9D^R_7`Xc$*Ep;AId!8O6FS;I#fx}%kHvH(F+ z4eWiM9PniE+viD{Z~!1yX{7mhk`;`BAa5|nrPkK)Bv@OF?KMK`RkvX#xWQKw{*^z; z5=YCk0?IrMz8@Z32@pj-(`6`;h$jlb)54W(-fG6JVvk}je05zrj(I0+(`vXH$T%OB zO9Z)*<0(;XS-}iNyO5{EalY;W(rM4kYBDj3;DuIcMDE+2N(PB6rF+`rx3K(p=Xh{| zG}%}t4<0-xg>!YLF~cmAZEX>0&; z_Hoi9$kG*eg(d`J?XD5|aU(Jzc6MzG8(P-f2aWl)Y9PR;gn$CYdK#pSk!BuABo&EE z1l0JAZ+>hi!3S@ZHialFP^{7{QAnvz7`)mymUIKSH$8>CIN4qXP6o6Si~-l_T}c6` znrKpW-l$?&_FQhY?n?VL?eOISfQ&t9LxDk9Voh)^h?KEZDysu|CXG$D?ITq_-Z`~I zify-Cl!){EYttEo%8crF0k->;*m#?%)Od4f(-l&e4kPfawJOTdBUFMJbo)#}`^Kbv zc+i|mYfd;4V4q5j2c02dBM&i1ST`XnwDuMarMY#t;oIj;_kKCp4Q7VZ2*S%G?mtLi zU_lnJH?bDuq$F&@u9uXOPSAa|fS8hbgp#ynii71605|Px>-TXoReQyHZkGiVr)Re+t%}7V{)g2{Auyrw_Ti#W@m<&P+`!$CcP$Z6yigG}M(OAMl4Wo(O zhT?VrP>*M~PWo}u6#$)^DLS3;K-pP$DztJeX(U7`^A&7Wir;O$ys2?SK-*SmjAv}t z7Nn6&O72w@nN^CFu_LkeYt5vECYx9(#YB_SSrBx)hoh1IX$x^049@HW=A>;zvfE||q z08bl~nduavutD1qTpFLwom=HfNDld&pRz^t*GqdX%cW#-gV9taD)C`6>t1h|B*w@} zNK3S~0_)-6bmcq4#;8an5^ICgNg^{MG)r;roq=vUkO=-fwxq@?t#_$WB_cX2H_5w! zIj8eeYY1NF-fLRqdjaJ&1je1T;>^tKKHB0^XkuYzx`bi!q=;Mlf?KPw= zC0%_rkcp&rC0c1%?p1cV7SOKY{5aRzoD2hAWx%r&By{YoUoRp#bc@K89nBr?8rs_G z2Ap@l06{fE;Uw@i%0!Yti8@Be%vlHou^?E6+6C>(rT)6{QuW9(4M#NZ5HU&+E3fEZ zu~I*D*;eRtgU6Nc)e&1ll3_}&q^Cg`RhP|DM5`UHNgAKB0n?13(yDN& zAnlHUSmVQ*Nj9X6s7a6*$spU<2)N^tK=B%2ECr`YHJ9WIxP`$7lth79ikgLROOStOYjetTQO2}fNI(%jwSj5bAHSAY+(ET~ zH#&js=82pIH~Fw6sGlmvw9-Q=vX3~)??gek*W$hHR!kN(%9DsPfQBQ0ovDFaXN%3tL{}RoC0EJck4jE85DHBLW3-Og?vsXN~;EklICW zvfA3f6K}losBi-UoLYjBAb7g#rNbqZ$X&vN*^SMvM!NeiZeP6v15^nL15vEs%SMr@ z5|#l2Iow#^zPf!J@NiBP*wQdHg)qPrvDtf*?>^-O3wWJ=9MYY}dTPKVCM%`Gn`0JR zGZW@Ww%cGYVPmg-c~1Iv)ry9KV3KRILn10G?3E?=PTSenxUoJr(@twqnHW}_xJUp1 zO>q<`tGQy_%q_iG8v<@_P5HDuK&IYtCmtixx?DYmj|y9GWR2XDVcWKv>&mHMinDhf zB!vi^Ym{_W68TDif0HUb@>=%q+nx>|I@!BYQ_(#=6_nGJRxe;JaMJjoXp}-0Kd7&d$zseO}ozmr3)l0<8fg!n(REg3>4ou|_r)EZe};mOA#(bIv+c zXJs=wKtgs**E3H{@*5!CW(P}LDE5xR{PwjB0IwAX6hTY@UR73o(ZmC=+ai&0Ev3J= znkw{)ac`Yc1e5ZtZ4^;Rq3jY>QN2a&ZGWSc?-E4Si-ao(DL5Q9dR9{vI|05w(K{Wi z0NZZFn~izC{0014L^{_4!q!n+6V5?1jMySIBkHbK=L0<6+j5~3za_6q7* zsNhf*WQ`BL3+#6qYpVYM9#ADp6BTD)DN#t+0LHtLs)>Smvqn{JQbk}1dt4L0u=w$| z(&or9PP?U0rAeGHtRFpwfVQirQ{7cIl25khQ_VJq3bs{L4uKnfHI4B+JUh)LWB@IS z6(K-BAHR==Ri*%{skN1y2%e~}&Nqie5i-^zDxHezLlwTDfu;GSTdWFs*3RQb*m#4{yGuQsVeaGf6C^V^Lxef&zR&-E;cs#!$8Tm#sm*VB!V=Gdl!^!P}|YDla? z2~}O9%0}yrx9n~0<&+cw6!MgrCkXjgSCSUl zlZ6P9Vt!wZaq~$eJVG`fW}z5Zli)`-w?I(ntp|u~$Od_e>Cu)oB}T#kc4ALsaiRM8 z=Pr^&4ZCXTWdIbA3S*6MEUN3aOWAfjBJHCc4!y0)DgOWr5shhxLcuD)3doHT0Itkc zhnBY_`={yWgQZH5vaA-806Qy#gpS5A^0Qc$dsr>WvtPZh%QozcX`n)3KyAWn5n6}u z09P8LZzp$+wNq=`zcyb79z|Fbld=Fl+Td1FP$WA_-Tmji!S?7c&v$@Nx}PaMFl&vg ze85QZ@AjW@BWTvv1b$qjgcO1)vK&ZI)jqZ6c81JR<=joxYydrnW83NF)c}GGC{pDE zP}SvJozc)Kqn^>52ZF$l&r`;sMBsM(D{VMRQV9Y&&0;MY?msO*mb-?{+gRVk-0{$? zAQ4H@TvGS|CMK}?v4~mT38iapMBBKV_O_?^@zluTJ+(xrlObL%IMzm}j6=FgIg~HD zP#`OEJKP)JjG>}c;!yU}j<35+CVzckTKYtE4Tnxh0G1#C2<`jdx1LZ?G6?q5lQGlJ zJ8PVHg*314MoNt~vPuSnzM5Z^?utOiZ(VIpDQkiSa$3A9=B27*4$R3MDo*CNjk#3? zrV=o!FjQ5n88clhxM6U=GOYIoGz!5(1G?LX8z^(2)zINlF~jk%|J5c@SHc(~$Ub#g zy2T*gj6l}TEvGwP72?1cu4Xo+11T~1R5Ypx#%N$W4uHyAQr8|QPTbhrg*t*x(*Sf+ z-5o^}i7JgjSz}Qg?InO9gKeXlF9ZmWTJj|^(J@f|Xr$4+QTd*54J0K{d!$@m<6pa! zl}^unTq$!0V6Qx|I)appjT-6|qO5{M zZmNu$8yyXezK%Ax2=u0#SxMA390;yUT9Lh`fxk`%`Wy_dSwQb_4Tr#K%c*HTVyx!pHGt-S{`H70c z(Ni!~Ak1Y9O~u8=o*u)?tig(HI)>4OV0z}*B6N}DB>jLzf$S}^=I6cb$}AFO?X568 zejU3jxa($$6$ucAX^TZ6k9Or9yA3<{KMq=DQNo{Lr4il(T)t(Z4K~p|OkgNU30o*S z9YwA6@}m~uSCq00sN-FweN5sxS_NRf`HT~|5G|t{cKGSbtc0iCNmS=}FgQ(Srl{4P zNZV`b#w6V!ur^(P2szdVw#oCQ2m(!50D)m%Pus-fk39h-K{pxAFAPjWs_LMXU_Ud>wz6;|3UT6;m;icL1Bl4byp-tfW&$YFO8~%v zOLiJ-ZZa_eAXPiUPlo}~Qx;gz5=unSgBdro+TWPvi~39sHRK3R%tc^UoFM@%x88|C zwxh@B9Q3F0t08~01Sj89BePE3;B`=r3t^V(_7oy%9R~8wXzi&um-rpFPvotIU{9Y0h;Ux9jsSRtc&y5 z$RmwaO#`h2;Uc-cM##_x5N$gR)U<})z@xv$-0;NBY#@X}M;*9hSl!lXfSj0F6&-g6 z+h!Nt-@k`$Ij5r$O}t=~tHL?Pv9Ox#S|<*NMpxPh_6wi8n)0T!!jsfJwbaYz$YMLA z*K+u>Futc=$HZTh?#7sf6r5z8q@VWg!?LO~rh zo;RP$V&R!u!Ce_iRXy)-X&lnFY^Ok6_>FY)#t-^G`e(QYmPLuSr|jH!<3TDAYVRp2 z^cBRkRlB3|(IvN-f%F^OQZ?}%xpfs43S=!S7@RiOr4mKtM^cKU*wd^IG%Khe4*c;^ zI0|`w6auZiRq14!HHb&Mc^2Czx_**-Uzz}s0;uMqpmjn>z*iXRTx}7cV6I(S;2p}R z!|CSqsFPb-&dJ%rypv3_M9f5F+k7D0ckicuRHVVGrs7CZiH;uX^WqXg4)PVjB-5Z< z08{se$6jkk5yHGK6aWf4Yo(T^PcERwTI1f4w*sNHy}mv?vPlEZsc_nqLW#p?UNxFG znn-u6hG@uWry*UfZr3BX)6HY69yFn*N?P!lj0JV`PCzXp2_z=Pmc*M4J{<5#+1FHc zAfs0b;S|qJxHMsu{*)1C1f4X$ZfR*$?ZUmQNHZ9ZZFJQN(}7{;J*5DJ1cE)Feq64I zru5^W?TX|o8BFcv`K|`4!C2~U00-M{olagCeSfaiFq|PuFf;q)5QiDk>mKVtw_7nxc4Q zT7x2)A|h3cHr=-W09BNO^dAmY#^gldO>2WG!eg&xORdE53__}>5Rv1jmZ^RC>0zga zG`ET*QKG0P!9e8@*gy#fWjgUwl1VuE3U8Kr(1p@#u9}^y+Wia1iWEknOp*eMNy0GX zS2(Kc)%Q&zyC_)v!W3OBecuf=8$UXe47>al`?qiL85TqK;Fzy{&mR-s*dh zG}Gwkm4!BzmXXptxPCR$yv8*_xF58Q?+wEG_8}Q~;XYM{MXO@2H%B}~%7H^{ zVc7T(ZR5`+Fl6klx>t%u-Lq1`QmeEuS#A)pA#OdKZWXI#I`By%O>i;SB$7hAkrW1H z1QJ@-)C=3524b6e0SZh=9D3^#$y}u)LPpWKx2QH9w6~8eq|+!+N`Ra;{`${6RZb&o zuEiim7S<=&NATrhaG0l-+@z{;2MWMss#zL1!%mb^NQ> ztBs0BX(W+%LmLCS*7)0+DFnpTS-8@W2@p;dnPaJh7N`jW)N1akEC4$9{hZbSRnP(# zh>~lMmnj96jFOeOMf)hG-~xTUxY}cdc~UxO;ni3}glQJmVY)e#F7J2_*YF39fMacI zzD^S$R|Dlne7OVrz%w+7pgsHi4xDK&4!YXVl*c&MM0v_dwlg5rW06gXHqaaPwfRR< z0G)=m)x(7<>FV;Xbn8<(gb>LBI~fuNbp`F?pz!9(amR&imBK*j+0$7KGf5go{`;UU z??1b6dt7r`;Nw(Er2s}d4AP%0XFTnbC8(&%d7lu<(pRXbtd}lV)KV7;54!&VF3Eif z(EE5-ndH`&pa%~+Fh6C*H&ccaHm$-BzjW~Y}J*Lypz>5#ORLf_uO@}d&1*} zO|%3+icfWNr5!}f^Nm4{86s~qrocOP9!5-4kK-4 zk~x=h&mq70Savm-+-ct4Jh@mRNTwZFZcLdmSWO`JE?7%#Ran~nyPIE?);N1<#IThR zW);wDoN&(RBy9T_cHG}pi(K23D+9`}ZQ?;Hk@BuTFH=rkTXU1{2^E1KK^lK|C=Aiuqp4PX~ao?6Ng=`h3)KU(uy!(BX=A?2FkSyl!8!D0r1lSvs z*mBM+Cv8|FMv=pwHHj!CF|s8~KEOhyffx1#2gjc85(I&^k^&bqIAdHIf>{dhsgK+v zc_VQxVQ@Y0KckxO0(R4l-l1j&9(9%QMlQfrtcpr`NU9X+s21I?epxQ~NdpRbP9+;i zisk&DX+bqiP^ljCpxo--8edEE$#=ukOcyk&34ws0I*hnQI<< z42sQU3U=)z_BtE#sJbbWjMdiS!Gfrrl^rsM8zU^SK;OLyL-u|)zq8rR{{S*xnrO;= zb2T1JtAK&-!st}nYXPVl5%gU1X8p+7Rxd%-jbx80VW3ZF{m^Y3j4gH=>Q6MQ<>6LS zsyezje)^1<1{3E4JZ4~Q;#Ui8`unv z+)nF@+vxc5r!xi2%vRbf(WEPnonU;wf|6dN2~oS>RE!P2-vQs7IfKdmDHTA2w3_H( z_$Qe$G!GPUh1nR~muv1if ze6mQ?B9|9Nq2Z4@;BxjEym_?Y%H1L+Pi3wM9H6~fk_~B2EaGwKtSf`Sj?yy_K_Cd` zAp%;${_(ff%c=Yz<1~py+0;PW?FVH`OgfaLO4TMu7Xf_hb~ms$J-Ew{l1YuU<0^!a zvxXIy%knoy(iw!~4 z{{Ruk8h3p3_fybtsYUN{0?B}YUsc;NR zwt{N2D;stxRS*V%NUGBZWf7`Y0ujvw`g6dGhSJI__Nv6U5`}#L$N+-K*=? z^f1DfaXnv!V6DTeu(qT)mLFAztz2tYN%L^Y2$nmWGH(LlkVX0In|pHX!LqAoQokdQ zsHB;`OL=0nqUDt$dPW|Gpn(p_0JtFjj8rnKe{T=csU{5*7PO^DiRn9htAip!h02y7 z_XKSKUrl~L4tN!8os^}mEu^cEYn*aZd2C@o1KpWk;^x}eTa^h>#Nkv$!Zb(%O=DS* z`>hE;z{%zSewzzl+sZhjhMy|+x{{CrzU~#-UBHKqNjH&ZQW;H#nhsnLBqUX!5EasE zi@HKDc8y5e832~=+*o$!wFvU29DNB%$MLLw^Vk_%Y1QK&cqi&N9YTU^FKhCE__kFL;6H9Zg<_Ry;uQqT9R6LIpxG$emjd808V_eR-fWSC)m-r> zR~f0W?pPA?s=!?9RHbDxcM7-de#7C*Ey1s&%B)mUk_AgjT8EM~r^tI$xAG?5z`8`mE*EhEjU#S8}~~!n&aWhv_J&Mw~cu-r17k7 z(W*3bajBpE<&S;B006SxZg0v53W$oadLu|XE1c0y5+gF*+>NTlBx<{L)%<;fno8#k zYihdFh!uj4A2J!d$Q=?Z$tab8cK-l~6WDUS5dfI&{nWc6~YIR64F2*5~)X( z+h(>N9vATCUs1ps+e+vWS=OAeLSsnchy{bla8B!P+{f$VWCMxYRd6jPV+xDqUZy6J zb&f(r(fJ?)Cbqe2UgO)%mEnzTx0ba@_gAV`p|2$KtWgfgR5Xr9#CRW%HasF`ldO$I z0|3@aXaJFXsHxRLdGC20Y-~cC4xXgIQyxD6vZ{kQk%)ze46sEn#u=@zgptSC%9b8&4h_^j9eWf@sz# z)h_$eSxSRzTiI?%#z9@!(RMZu>;YoNMI7$o0u zx!k~5o>WK^z}JdYwQPbhtcukWih6YX@nn6C691F2+KkiS6zi>&lfGr}Sw|97S`ANiMGJrDXpADNO1_n^d0>e#B}yuppih zTJK3v#2-G2>PuZ-HE8$RM0trS$Kh>n;mf*C&?$v1r0fQ`hFK99)o*gU=H37Whk@P7 zA<0igc}nqxM9BHnl}S*Rjg-bzHs>S}^_?`oDc)@9tu8`FI84_Ej-gg3Sz2Pk$U_}; z_WXF_*Iq4@0+4-})Hs%TB&c_%bS?~Kmcu}IToG+~sXQsi08v#?ew#BDlu?Qafh6q6>X;?t&1Cu~+F+X8uy6le%ieU@%fM~50(+ldpz8sw`c_=~6#rHDRSi7uzLf$htcC;?1DNitx? zaG72tV)Hc8+h}3#*s;>$*3+BsO(iM`M%Xo)N$QzZBe6hM(XdeK?%aXx7y3BdNsuW> zc##0$1wnNnM%7G1xh#Q#+uqXM>`1pXyf#*lqF~6HmL_1%tf9AigsQ73A02cYx@Sxi zOovoTim~YOtZ(Klz{JTK#zRl$+iORtT?PAFzdRIUZEbExiZI#dT;6q`Y=_HLitP&^ zx|4q&zWijl9BI(vma&aS{{TudhcktC0heXAvF~6#yt{CrwkaZ%cM@~&pQDN{U*?3Z>2vkt)Ac;A{UAV*DWzMr~r7}f~u z=TPWGl19X^*g+_JSo6qnc-EXLLVP6r>m@TJY)Ov5xm_CITxx85b>nWHs@=7~Zc?RV zT$^;#OYe}Ys|8gjXx8Cv-<3+$PB@)%cnas{5d=rgl}dmg?29P9ygYewK{S<{hZ53c z$s213NZ-n|+mM38ZO7?5{{Ug-6yYR+fEBjEQBI>Z-9*aHyCDHU9{N~&-rYvLY?B*t zt2kEV0wEy8io*=)t6$5TR^AjYw!Vjs7bgm*Hv4Vm8&DacDtO{HLzDw4~lmjI)I* zBY>!ksQSD8+^J9_ZEUT?gq0aS%EFaMp%em=N*OO0H`I4+Kqs3@#+-IlP`yPU&Z*hc zT>QE>cS9EEdPP2ljIA zf>KXMI8~TwKw@_EMR|%qfl#!wk{T)1tS(KDU~loYxj;@4h&1bm#VayK%IFDqDET#w z1^nAHTKB%4zdmYs%!<-V=6>HzwIv!>j$!h_k(i;ljB*)g-E?hl=JfeM+i7ly9GE^%Bb#k*LsSspBL#(JnDP3=; zeo&H0r_>4*5>5bm)@r%sjy9Gu3z4_L{(YEJMNDaA-jkOma3wPxt zkrbREDpsR2$D`R?o}#6rZK2|)T?EcS(4E>5`EtntM@;+a)h*zXUfo{I*<9VVqLa%SPFq3MnLc z#L%IgIu(rc+Xv{rYw9L*z_84z*9Zn+!m&ICvRp#D1;T0Pi7IEWfGldVBFVjDLf6_s zBZM}a?%mQa7Qz<4%Sa%I)OxW$3L4yYXogjncdw-hyHsuac1OmedWXxMF3CMcN*;`L z8xHCpO*0(@7C(Y!j1*5x9AS!t(P0^P8pq(Oz;GU*)K=hqI?Z#c zoIeu9u-KYu;a3-Dj0I%#P;3$aq~7*tdizLb#Z0M^9YI=B=94N&U)p*w;tnxSZjWT| z=61I$a4D4`1x=NRN{V)*;to7MG>VS8p`+0XNWSat76^aVH|`Fa9!eox2$|#>hFZ8$ zK$>_|xz}F&@?Bc71c;qJ&^Qusu zBMND|g`5g@;(Ge4Jw9BaFtBpw5O1;A?6@Fqxzp*t97XDgBV{RS8rxn`7bJ`X)2X>9867j zIV7l9-3&HX;Yw0AlS*t@dg;L06RE?108Ja*E71p6=&-!ejc05Et1aNU za+xtnC2FXPR>>@WV#N`@wi-wWybhiD32$d^b{6)!tcqN4%ymUhd!yK;?Zwnyj_tI@ zBWddDrCwsmT}kRaMir7W^+cG4TFB`YUJZp|?l9!T8RJ0vM8F$1%PTgf+{0YMF?RH+ z*F?CyNk1yfY`-aL)wH(W2uJ`3_|%+p)1OnSHuu5se8EdCJk>D-zW9p^+<%1Y%%9xfsIVcd&8+}otuY7uVZ+j~dwxEi9cU@s zwO!&kAy^UQXfqMiM6$fDV0vO!r{@4&t$lYdyKeoDk0;wL{>g5WsTT;3Jbm>5<{OQi zTKHVHaVMfCeUv0+Zkp$;&ryU!L4mzJCS#Q0_(m^@)5{GU^8{&1M8yvCwMq9!ZU#4p zwakYvK~kM79CV2!@Dwu0I~m>!=F$?tN>0+aqodxQcBo+IT^q8Ih5)EFy`EKKqJr!k?nq%uZw;=garYB2{G-OEj>>3 z+t-efP)&>Hu9{U*;1s{*^F9l*yAFj5w0`uk3#crH%*qFCMfoUhuW8v%aG;B4$9eq| zf&FSU-oDARY?y0xxo?Woh{xVNeY5}>A}bYCcad1wlq$fE=j{H@+>wK0yXdIRO-R+i zVd+q$td$(P!crnis;Jjr5_{k4l~wn;!(eT$VW$$MERj7F=mRAsLbOv5 zbqo?!-C{Kc*74(rR{v9qsQ@)O-)A4BNafq+su2nx&k8-#0zj0p7w*0*rjQ4 z$x?CB37=ozL)??I{M#@HC}f1IVI%?U{VV_1B_|$=s-B@%m;EjjHuEgHc%3xoXJF~* zu4SkKpag*x&WjP4Ng48Fh|6y@p=B05gAV6j{By!pwqvHW)Fk18qtc?(R#ib*ceQ6!Sd3Pk>Qyv(v$MukoH4#WZ2dwAmGL=SCM1!Zwuny#^t)uW7my2Tk`e=&6$ z4-h@Q_{mw}j8*!N3~Q&xQm&bcsEI;;nrevlKIQOM8f)}(Xme@VO|2oqIC&VXaaJvT zEVBsBkcBH%9BM%Wx)Lr&$Chd|gC?9V6p{|p!o3Y@w5-xModH`pZ3cqMqj9kK>Bf`{ zR*C?KKzF}F2npF-Yw9Elq`~H(H&YVEWD3JWxH8#%Ez1?)DyV5hs&J2Qon(%zh!{al z9GBWdOy2ug8w-=*Mw)YT*AY!>@f695h->QRXJnuaP1tt$^JzLv(?O&5 z>Vb|C#-}w*kVhYr38eDCc~Y2>y5h#nyLABSE!&m~s_H~(o;!N_D;(Ifv5g~n*X9U} zKbEFFMS|Um>P@)HNhf9$rNo4!MB`p284>4}cdc@!H-L!AG9K%?!q)nESR)v!DQF51 z4_x%t3Q-fJK*uc{F%>pn-r7iGdy5PHTyTRkO(|){XQrH~NfSvFb1GCiwC>xiIsnRJRW=2zPV4D^XB*UY8K+otNwFr-dBc@sf{yrRWPH~E(Kw~)Lvba4Y5KSbKs3e`zQnbeU>8Q4V4pgpu z>Z@4_AP;G*RWXP8pD-9EM)IJFB1(n5%X>9GJf^V&uAEVn5TQFO5T~v-kVufGtg2eq zdt15F>gJO*xCK5PpM^_`mZh%pm`P@2Mv2H#r?lAL+s}8^q&8$o$A&e5C}fT16&r55 zM$}+WU@jGWb@AttqsMJF;z;Z1{VH^H&R&_}os>SC0`4Ss)Q@ksmkSY$(~87tuGQ)z zE{RLL#@ia*m|TmoJ*)>ci8EePt|WLwd7A2KNJMi3WxcH^+``0%u>)INa*h))I9G~7 zL`+v7BhG}#)zDKQapwG}+arPD?_S?e9802PVM)q#jB%VRI}BFjNYS)xe8p)J?s&-}4l-%gJiS0c6%rqqshT%=5}2qZNKmpBjNC2ABwxpuN|ZK?YQSp)NZ4v% zpAmvKmU>8{{msT(u~Ylk8$kNGr7XrZNzx+|IOFlCt%!LQD9$ZPK|XXULTE%k)cx|NR}3V ztM~*&8sd?B>VAYPtb0RBr0rXe-P}&2+6J8Rh>e)jOH!u@iuaRG8+o%=G;GbaqQAIX zuofVZYjUMg_Sc4$V12dd=Z)le;iugjSqM}5P1o*jejWAtIl1XL)kC0zrU)XtoU|8` zP_n972nxzn_zef_*Om$@HAgNXLPrl}V_D>Rp#=)FuEk!4c7UPw^Ks;Vm3GL0>8bUfN*4YTJ=t@Ua+NE|9ao-4Wol1T+jit@rwk>yg}#GQ4%yxAr&t0`q59ktK&Y#vtM42-2#I)w#-sIWI2sl#r{ ztEtp+!1Nl3s}(}T@fjhHakEYe+<1+nz4=tBKz+26yBj1*dUjS;mH=W%!?Zx%o?XE~ zTOX@WXB{e88!)YD)d4;nYd+EuW|AArxshxbk}cR@UfgWR*^M~aI(AMq8bH#9dYB}H zDfdL6w2FHWY;A8IY)RWz?v&hsBB9ErIU!B17E(%WC5^$*umoF9HslTz$^wGH!aVum zPEk=Yw2@PYV_$V9kQW1Yi3~{}e=HjXF;6LE0;KTw16bmuSj^j|5=_H;7{FVW8&7ek zZc#~&p<1?h1Q0g$)QKWKTyT~MG(z%8cO+|Xbo(#OczBv z{q@vw{LFrGyGF`aah;V)kB-By9MCjDG?9l=Ks-lJ4>4IKEi)GKZzg%Acj3E5DgSqVV<*a8+n z>;WM5I-W9;0*o5FYY9jMU^btfNT^fp$MTBdE=dj>>mYcKr=A*sG}7H#lw~-lZE;xq z$jqwI!fkbFB-QK=fDvKOEm&2WREZl7$^G@lNUtHNory2mBUz+NcHMZ@zdlqIBW5+B zZri(d^b_!`r{zRy$tj96V-vc{w_&BM>OGvVcmSBjw3bsMe_EY@V0gxE%s5V4%kZu> zn6bH&3c@gaG;W@1nVd%&BCYi#fCq`<8>{whnl3n?lqV8Q0q>(59G2$h;VEqzl%t1F zRb!hBs;8-*85`v)sh?z!O%ulIQ0;YfkPV~}E%Bf~X zHRq6$;4`-xi!`#&BNJ2Qg#=3*dk@(x2B7K7EqY)IX_tznL46?fMRP%VN2HdpyY2H= zX$I+VJNyqDLV(7!*a-%2kGNn3U>uDRpEgdmEfl1VD#Oy1^&Xr0kwsq~3yM|Ym}Vn1VzA+n)GV?rU=nxi z(%{6({BY07-k)@CrH*2-tS~s#M;Wx}O}iv*6~38x&+5 zMhXh0M~#5J_OUE87js z6s?C;Z8EfT9nl?abdhu;nGd>q{{WO{mdSp>^g7x8iIP0V+JpC>aWfpEFSqC=KI?*?_3@P(0NX)bvs{U3MWThMtNAW_et&qKYibmUEtGmn6%H z$k?`5uFDTh__xc%g40D1aUcPgu{JvC$YXfEsK#u9i*yGXQbJS~U{2CR4ZRgKWL=N3 zlRn$dN?k&=B#szFaiyr}#(c|t66x1Wa#kfW>uWis_k?QW-Xv2OL9kV>vJxb zk8ypohmf-(JQBF%j|^!1gpuw{- zzOi!$5|kTRDOn{&a3lIV4JgijnsVE9spD~_1t0(uG1p9Vv0%7XQ^p$&!5<%#Vvv>o zYSEY_@@)sWunUp`-`zIgH-5gaGt0}Evr28DBx;>U-~|ZxSLw?rwzny{QMm-pA_wC_ zUsZEvWXkyyr&v~0%JF8rn}=6Pk5?>k#X4X$btsTdHcoehpJaREO$kuezaWgWwmsuh z*DjmV+pdADhPc>72%4JiJ37g~EW!f;No(x~_lS`h-P>7p!E;bSO4Lu8QCn82JXXl3%EWb{et9C}SL(fYi_D>|P=(5ye96qPxX4M)c+ z=`lDWub_%5L(2_u}WF6x(MI$T=UJSBgBx1AYsuba%)lsIE-Pne2XnC?ba&D(4$ z#V5OhFqq?o4H@^OT_@@$8BIrtU30$c|q;gt0m*Ewe#GcF8TY1Vm!Dyv?mNY9?Y0JjYq00`HB zM{}z@>^*b}gR9#HHhJSxGdWeoi;Y{vp(73=gx^Zus52HLm}AMaNm*5qDQlgZ2q>u$M_ijfSPH6p&!WF4RD8x=6&k7P>Wi7nYi~t7Qa3hTq4^W*j z!ZHRe25QMUg|nYe^K`RR;aIa_bvS9Jm5-QWcqK*Hp7qm9Tb*a@FSdp^g3#N(3T|2e z8U7$RkDYUKw$91j$*v94J}Y-l1Bwq{RTlZH5+UZ7h)9FqbGVyWn_TF5ky^D$CInVJ z+#!AyM4g=a)Mc+O@rcY)v#r$Ya9G>7kHd*eh#-JOkxnz=SXoRYaf65JLw`fD9*set zFdPr5xpxuId5;_t46~DP!qH)Kgkq93^1@Z#KtZ%uwuF(VFx%=Qw+#C$CetTK3J1i# zDLoP8#(NJ&Zj^MIrYyseru4IjVEENS{Kg%b@T^jz zRD@Lqk_lj~k=`^hl^2Pwr|gl;Ys!A5dqQgYryO}MG$1J>VH*te9=gbE=jr!zm14BE z&`OWmagb*S{3^4vHVc??PCtz0e9J>N9gX30np( zpmaEy_2Mg=o6C_smESvT;qjqI9cOTK6z>gDilTu z2lkkb70PL`DPmSGRISgIP8fNPB;@`#NJTwrb>!POUSE*)(@4`g<;H z#v{qbp|PB^Lsc7M5aX1XWSEpy=49D4M8y?3MLM~3W;DFk!2I4WKHg0aAhLkj>K!i z<=P(d5~C-koo?YpIGQRwaO)4FtkaWoH(2_u1~ZpiC&d)`q43OJdP!jNTt~W|6AYp@ zVcTkaZaAfOw_x{WptBM%Kf0CK-ZtV&LV(lLK|EP2(at|jfc1+C%J`mS!m%0tXA8mc z`nuZuF0Psxe@9bNYblJ+E3=Ztfl@e?Ih53LJ-+e7C~0c1W*E znp`${;2ZqvdD1snqyQb>U{rD92(igt&HHw8)umhJdX@o{tYTxJ^p1$A2eeMb8oy9_ z$4hJ2Q?MSfMe`wa8w}}2e!wwar>zH5sbHE)Y*Q^|8krq1!z>}7tDH1Nr-<&q=)nM4 ziMI@Q-?kNB**M*Xrq4>hi>5j}sIPOo7&doFdhCaKfY@mE;>VI=hpqGQM)da9Jo| zr;i00w+*brUmL)qgh;T~LsJxy8PH0>ZLs?dxe9L1{pR^S-lkkp8%t(NR5;<&Qx1Q* zxo&A{TYaGs6A)9-LFpJ(cVW3Z(vLwg-lyRnrE?(9*v3>aOO!AipHW=#V;F5=3r$xC z@~V2KHBw4e5fkkS#_eU-lUDHdp?i6L!mQL#$!wJc4Tp$UBWDVZ_e--3w=T4GH*F=y zbx2f9ju1XHQ?Z!tSE)k=-+Iwk!0&|2S=`K2;oo8txC-I@?U2gTVklI4DaPqJJ&?LXjV4Qrz z$t@*Cgdooz{ris616tz#9PJ96)If^nJIs{;qlE0o8kV9G9zW(WRQ$ASI!kqqKosbp zl_dQK$CNrjn8KWG@LM`eC{{YOj;@v`HFC)zlg(HfWQ>Qg+DLE#x4?3)r2xutuPN0! zkpK$DhNh$%rL8c}4%k*WmJ?##$g+EUM=Fv88l=IQ*5o>J9_@~o!|qM2s$Y7BL(p##&^GNHAtbqGsb{CL?T38s|2 zsTc}Q{{Ski!&t*hLemLKy<}DLyM=pPl^aFAoq49r9V2B`=-^|dMLE<*JoP|UkwYJr zrj=P_a9IV#jh6b``Z;ORJXqCSJ4iU5t`(8z!n~=e>TBucsgMaN=~OgM3kzI;jBS61 zHgOn9JZo}+0(?WypLIClv?=;w@+nZY2xIePV_{)oWgs1GrH3pP0M?Y@Qlc^G$7MAT zq%%#rmP$)05ysX~w@~ULDliEZ=N6DbQQ_rX(X}OVTCS36g?%NBJhr;_+@%;_ z>$e;LX~vscr=G0kWA4}5 zHTt$fM~+xia3VdhO=dPwR1RBTRQ88Aw0D1lF-nWNl~6dP@+J~eYQeB zc=JhFo~o_GYLU`L&A4>=*Q&7SCWQH`BCw297^7EDF3WEY-TZl{BaYhBVIe&ve0uA` zj-0_2O+l8TIKY;89>m+QD#OHo4rn-nPeoY@I(5b-o9XIeht8!a(yU(kJeEy~rIAYO z;m!A$z!*~vrcz|#PO?_4)Sg*;uH|Z&>2JAqoxqkl=s8t54%)MBg?uotJ$ykTiD^hi z0wGlKhiik_ueAG5G>3%9;Z(Xh!5cdN01C))7^F#y(+Q%9&dPn{I`|EzO)cL1*l^C; zvujWQL=jk`r;QN49x3T5l=;s}?CJx3qBY%n+g@wDQdCVi;RJ};)mbU(8n4Zp<*DFw z0zc9=w_f*My8V3Ku#rtAWXy=?Wn*G73V9_{1YS~D%7n=&vEBA;H0OXEO+2>{l#(Vz zbB?lVc%hD3umb|lswDw*)7{h?dk-{8)f;K1!h#OoeQPVuBAPg)#L_T;G%*N9=CC7i zw{G0IF}9g)UNaaGQr4!4V*Lv{%O#!Wh6a+#ed}(KtPY;Sc}0~>#aVjNh#oF_>86^Z zIj0lPQj%3}-2%8R7*+0BJ<+@Bc7*?sYIwnCT;MMj~qI>>SYy4+fPp; z$2?I6B*p|oc-O;!V}5ye04fbpw&goEbXRFAyYH$+Xp6B%q%Hs_O?Crrc&#I8tG#eS z1SgGRNJ*-?NYCYxu?CXuxx7x6(DTViIL%RQfRl#G%~4c9c%brVD((|>1v zcqpC~WGy;~A8?M0Y8Ue;VUcO&l_`vx26r+b150Q%KMppcPe~nA;)+ZiJ8Qa`l2kE1 zAp^!#M3TlxAfeZG>uvqK*-YYauQ_+11SLCqYD87k0IEwoOv=7$NbaP2H1Q+b#*pY7 zD`){p7~nni#;l5&mNgfOP+@kC;bp&pw{dT89W9jrNWzd@z9I(?jb;?^!wYPNDMH%v z6$Znw+C8JUEfKPw3qs*a!BG*yoMW1ypo%q~EwKBNd1?dN_X}b)A0AKvB4%Qr4-w_k zFsKyXbP{<|#~IrG{4gqep^d>776)4s%@vf9JZi0>C1hb0EYeaZoUaj%4|+z79h+MB zxY=RjsO8k;0X0z4WQ7OKWOg;z;dl9k%6tW>uE9*wA3n` zQn_Q2T8COha-g*}}X-fVOFyZA`=EP-INX#ut z6@ZQ?EDeWZHfSzAysAhXCY9VtDIn=J-9*(g#te9wT8XX-Ok-t3Ygnn;3HINPKuM$z zue?Zu2d=mz^-BPbO6cQ+4>Cz23Ii$kw~GQid^o~WA|^#EQqUVpRA8FM_0q4FpE~~l zCnJ@BsX`PG2J4u?1IGMrKsfBAADD6^Y@919;tZgKs-9FA0r3D8ZVlLtw%zpd+RA65 zoK?UnoN?hv>iodW?5^oKkOt{7?cfqrdnD2!s0Edd8e=k=v4j6#K`Xe1M~*725IEpbbW8rM#`A9lY%oky@HUWu}R&K#@v; zU`Yyn;?}<$1tep#tlHM(i2zSKE4iu5f#keiYN@cD8pg)pw%d?L7`S~!Crv)xxhfjE zDl89(%o>`qD4{@FHkO^8l=t_P+T@?3jUnY6DTK1JLXS_o@u_gUQJZgAYBu>}+m zZ+msU?;bp-agC#@yi}5?I2y#0);v;@P^ys-Q~^zgiUMwKIKz$=5sfNoB~1{xkyuyx z$tcTO=%BHLO%P@v+QP$8r?-uzMLc9pBq3x@0yYC#%)wS8EUa9xS!R;uP?O)?y@$h! z3)swTtFAk+PQ#C_IwLY!W2s*-`>?9UST(zydm9~jqOhucrJZ0%#CvOpl~Wp`?v8VE z^DK*hpbrb{&lirU1}g3|3hl(S)Bh!MLLF`pW$G<#XDj-hEXq6-d zK*x^}LoZ4?X-%JAd}klc7+w#RGSx*jJ~Iwvl4@#772#XWqz1+b1CIY ze2bXy+{2R~%9Jk?&mIp}j}1JsC4`AqQ_Ex`<(M`7-g6B0hni-Wg;!6lN>SoZ9grro zb&REpG=`aFC<(&>Rypb_m#Cd%^pRapo@wyR)7AW>DYHa$c_Nxf1vHV#^D+9H0D9}55A)*-b4qUA(es@R7?)I*+9E0@Bk#PbL%u2|h-t+M}es zR{6SoJap3)r^TyP8j67_kjWy<2GOg5`@-YH#@x>JcCou@K~k^Ut6&jW$c%kK3Y>PO-y^Ch|LgCUQ^jdt8fkb@;=D^wgt_@NP5vI1mg-(!0aQ- zjYv12Newd7if-vkF@S&sjlFo$GHj`tFx=OOUd#jxsO<&aqD<+}q2X+xdGy_tH{CND;))jno`V zEp&1?`d^mwJ5p!bs)*?GmS?S{t&Xn?rJaOt35bxSNxJc}I+5Q_a(lK8*YhpmxmKkq z42_5f!-uVO)3W}_>~a@5`+gA=G<4slnATavFf4kbKITmIjMURGkNL$^ z$xreW?d8UNo$@r6KJD4B*z3$%&9f%%WeIH{PTi(_>UOhWv}Ht+40H-NFOK0kx|V|j z!l+`(If-E1njpdcQ|Dk%_C{G|RxRcLN!WF@yYPiMkw=ACqpc=S34cVp+?j zIPEjgWAym429il@;M})a9EFNRk%(y*vkk-_1H`Y|w{~H*dwzXXR!YK*Z4{ruI>n7v zP%R!M)7VN@RS}2Q*3hkOF3&ZlXw=EL8}85(r)&4&)=8e`*+Zpvmr87rj)g`eri$}C zy4LL=UE8I=KcpwvX$^>1bZVwav0O_er3kI4T59;#s;Y@rq`uw0QCUPs{5x*C^CmZM z>bHLET1rL2T#xA{XVekv#-ZHv>ECnj&ZRM+lRg|GgXycfek*HgX37=w;PA72ob|Y* zVA|z_Dnknw(%vKK{S4z14#Ii zeYGfVS7bZ2k-Q(aw;IO9D;VdlmUtJa>}N1O7T9fVRUHK+rk=ir3a15yiM(uG+T-F! zAoXRTxk`Z5Qzz#8%T@Z)X7sS zG;6pq+6Jl#@Zj}3wu^?6>!}M!7AJ) z7NNJQgAR*VR{sE5r>VsxOiL+HElpNhqJ6Q+zI$3k*6bN|0F5}#ad3cv2lk5F(dAdP zX)U`X%1+7@fQU*nnrIiJ!$&3d@8ca55;K=m!^weV#q>>tpcNdnqBaRAF zBfO$GiPy|U9s6=3&20O(CSa=oDpAWXK`<}?ZKB~vpK`2OYx>uavg+cED$j`GnUZ=@ z<~(pz*?^Yb`7AuN*4_9uMqhfArKgvq4iknWqstnD_op~<*Oprz3FzqKMWYt#9(2O9 zC3Yo?;#9O)hH|E8V8NbU?MVx_`5^31l>2{pYwX~Wtevoc8YLS2g5W7J>(Zon?O)gTL zBWDUUB`$5k4Jd2y%z=&N+R99;ht#QLukxg9|>I?L@GS_9A%s!tgWt`#jI1V)=$m>lcDhy2_SGocLvV|ho9s{1a zP0Gi5m3Wh~BV`n}l)zj7CXJOBO1gcD!GrY!2I>Yglxi?|;g=+3%4)bK%a8~ik}9^0 z!t=*=JKd6C0RWJAzSDTwb)N3wsd8{wz=-1&M6j;#=`xeTl-Yl$v^5iTKA)(aTP;h} zW~8gF!=aIxv3;ZfG~?wYQY zw!5%Up#)WR^+VFeY@wvhm|g=QDPW=SjTB%NZ_6~XNC!fn;n%}A{*cB+np84 zzN50M`y;LNBq<~rIQ_eP>kL2G2I9jqLx^o_(6o{L7@dSh&%&EB-a)77ucwCT4r9v{ z7~fN<@f;^Jo}kYi@mgFv<(ti+hOQJVEG2*jFse1(sRM~SJMA9azJ;RWQ+gBtCQ=~n zB#oo=qu$VW0n=vs0|g;&B}9NHR?Gx!_R%fsSEoEfAoT;L-Al{;ZpSkgIf`Jd)GCTh zzf1D9SS3y&7A_J?3*Bc zi*_1T0kna#HhER@`bNQVx=xhktYZ<$Se7A#<*8{f2x;nZyiP?G)X>ROD6GT2a*`vw z&K;6y-r$0WLXJUZ-M;m}lAT9v0Wq|B(DK{`5V40*;T?aa6u(wIE~>+N&s&%x%k{V} zU+P?`iqq9pM=Mj))t1{lD=R#UbvGe5Xgg`l_v~}DwJUbg3PDx4g$;%V!ax;>a-9xU{4t16>C18dcl^uMU=2J)R=0k z@tit``DV zHbN3abkNZmW|gEB0uM!I>DO1V-nr&X+(t`!DxaC7#_;U>Lsaz?n1vNh1R>y(l!PfP zb1VD6kVktFz%KA_!|d20NOl}2%TEdz1nBoZFXTJ9qzjlFWxU;Z>7$hidVl`#wrh)p>vzNmpA+%(NgtImeZ(NBR+KdFJKsD zJr;eZ5n{N8Hv6X)R}UPjBSwtJ8xMIy8;%WkZ?;_0&h@w5bd;nJL~tU4d7kQ8JcS{& z1eH;g%Jn{-@SLeZNnVZekzvqMVwChkd10q^r9_e?NKldiO?HhqME7i&saYX>N!jf8 zQKhctWoT#uoq{&>P`8F;{+8xc%QaXoe4d&qslcSda@HXWAf#p3vPl2|iwgz!weau2 z<9pWa%YoG`UF6Qwg$R?7S_ElIMw(dSy(Ph`sO6>pwc?cU)GeNn)6>Ho@!xm6bR?e} zaKSn5VwXG8;(;3^(<{iV3OlVhQz_{VHB@l*!|}OrwV_iLG_?U|mA&%H#GOIV0z5$A z`rm%D6*Ll}oOsYx-PN@VI!ddaV(7kT&DdpaCU)v1U(5W;TH0#t0+`_#jWqPLr13GS zrz*zJ7`Qqf+;ZD|ub5Jo-}Mkg$CYUA&AGsWX@@iAteex(mkwpdsp}<-rW=4`RqaIe zFC!=gB6+UtTGk_l7yIXW5@z8=Cx9^&^LbU1Vl)bDp_irko}!}*!sw=KLO9-^7?CNd zj8Q>5vbxOikgL7Mszj@KWo6V;P;4p=e=fC@4;v3Ui;OFHc=f#c=EgAEd&u z4Am3~TY^SwWTwPorh=&_jgg?47$})k-u?JHvF$H4xBD+7DnuwqCXhe7S!&=Zv5!H< zfE@4mdu3Yb@w~;2Q`Sh>jEhn;0mf3R$qJ${N=n}N(_5RLZx2`7&h=ZQ1g)hdA}|~% zfqjPXn@ne^2>KJ10+ZJdcQFhKjGIIky8eCbWR7)_ zT~~41Nl|TY9rl5~8LiXG2IWNyN>_m}N!uVCBWINia$VlFWf~B`!vIglzyHx8G_}%C zC;RXPordD@Jf2$x9hFRAo_1Y=Mm5aoM9DZFnvqjkNMK3Y1ddSa6bhSJ3a`3*d;5j; z1b+@}l02zCq=*C$!lbP#&E-8W>1p9RnAgJEHlpKOi(dNkKw~o#UJuAHKU#+(PZW}d zocU4#@1$rHUte=$8-}duMUILzNDDOIcM!gkmnn5Xs zc%~bi`GU=UNE^m8pj4>+fe#_mTL1yDWo`7k0jipMQ=AM*$b;qv)vK~{l? zyIXK(155i)ZZ=&?ghbU_b4r0o5!b?<=RY$9^%YAk1a(O5QVgM^1StwOu9}M*a@sV& zh^;St4jlckbPN7Z7-N%#)j@oTnlAr;B$MmLoD{5e=fzg(( zN|a<(sR+vm2VJLcdz~-a!->3L#8a+#Vg}xyjZH{RS&|C6(#;sSAlQP6UqF`a;m2Ez zq+$(e#Ucm>ozfKfBP(tdPE76$*8i^tV;xGh&*Jd zcNSNXU$uyR+7s-z8^G)|`rIIi$FJd8B#Noylrz;-%1}3(PY_Vif&nfg0?IhrmxM^G zIz$){X{1j&G!g(~Fsh|Q!Y6pn(O??nl75rV2Lwnl6xvd$N~0Qnq@AkdGE&y1WMx8_ zAdTaWar?UhB!m}vTTaLp{Z7Klu(C`3OOs5m1Joj4`eFoGrV)LPWNQfzqFk_oK~qyCUB@P_^RT-rylhi$O zO$>nqwNvasO{~Bkzj zwC^O7#4G9~e=a6PKX?hDAb0rj#H64|+esAp(wW9~*BaU6j-oG@!iu}n zH}9Z7PbiCl{9Amfvc?dR!e+8hTZOOicTx$l1#e;S z(~Nh^kVM4SmeA28!Q18OQW7c|VntaEGDtk=+iNJ@++SPV`*`cF$e61%prS}t%I>As zIFhmmqnFAC3aI3DDj3*;4Tr#;$7m*3wTMI~Y{NqGh{-tg*2nlwY;G z@{YJjAd?*<3fpe#A!7)`5i?Suqr|3`YRM`^RFfYuCfo#S0o8U5gXr(ewSbA)?W;P% zNRTi)Yb1t9RMx#m%rBT|%upnr^YLU`MzQvu9BTlln!ZV{ zc6l3ip=Lv`i4AQuwfOG222K^CAdE3jBBqK;nreA!MI2H^KG^)Y9KU1S?`sc;zaCSG zNQtebM@LNU>ZmbJ@H%dgPYjHpiv~XJ#@)BIhqN9i)|96S2dXKspV`4OncFocdP(Xk zcTkASEs2hxg)6DBvbSsUt|%yj(bGy$G!jIfH6D2Bns}ai*xez#VPj$!c<-*@EDtH( zCUl%>fTut%9v;qpDr9g1+tPGoup7wS;f}gk=slO^l2Uda6xOu@HEi?8+f2;TnNEon zI9m7;4`umu zx`~ZL8e}C(OzD%v^;D=KqFPWOl>~6;WN=tD?R%0m794TKg(fytd%Y;zfX13?2kGLC ztGvi+n#hU-U;qxD{g36Z6Og2;cpA0B(78-Oq=qGi%+)irNMu>Gj-nXd(3KuLM}9oC z9W%n8ORbe0Ixyk;>P}w6CYG+AX)3{z0Na%4@u%7C-r)FiNOp0GXv7UNNRhXNF6RO_ zAk-}?H~|e(>dk)Ooh`;yt5C)TZI=?15zPJ-f=XJ14JyjgETnm<3a!r3-setifSg7> zHLl^qw7`xPG7}t$AvCkcdwF6YwidPh-;W;{w*r}IutXHW>geNKnmX8Ne7Nadk!+6Y zu?8EDu#XRC8(UzeIMR;!K^mmS$fn1Rb1ZNo!q>TFk^cZm?`feHzZoG(z-+A{I*bS- z%Nq4=6%3yy(y)qH4(F&z8*PE?6!GmNjUl;gD2y=&8pt8T0A-Fyu@}FY?6wp=> zta>_qDiNcp{T#8%0dKbR7A_HfhHbx3D&t`%Z7K~dD8%vG)%e#5r;0tV9BiZmxmDT8 zTX^Yf9{u@Mx^&@HN)T5OGqC=YZM`#P9*$>xZW=D5bsDcVobPqk}Jb}~s>2tno zp{<5ouK4yNPghBfm<%&UDyr4kNV2KgFYMHCR@>^lwK|dl%1DYKyKQYKpkPI7jfr(9 ztu#5S3&H5IoW+V~j3#ODIkMrdt9(jKQDtgcn4yVeEJ`$GWM%DbO}UPid#v@%+Sn=n zD8pcmG!jPe-LB?Tr9{RQcYQ)+eA$L{XQCN@9nIOBsa;#lwY3%amRiH$#Oh#*o#~!u zjXe5Vc~z;(uHwXlsTvLox36>?vA04S2T;VQoINP>Hh$s60D@FU58Xrm0Hv;hV3YLc zDq{Uaf^3(WXxfW1Q0E&@QCB@xD*2(qI5E@2%oz(h+yU+3=I>wFBGn3ZFxieYEwy6n zxJzQfS1c8-6cPo; z23z~vu3?$wty-vQWw=5TKZ-b?mt6<)H2|qJZ`A~JD+C(d)QMmN2 zU5sG4MqIay=HUaY1Zhc&Bq~Fes3nUqU`bm6b+NVXI1%3qrrNnzbe#cM z#?k3Za?4U!DOr*Y4HIFMHPX}KICX6^VE8>;m6A4lP3^6jESQypm!vD$;* zZqPjFpE`fKZAzRZVL;|~mm0~K?M6<^c)U3FUs*&;TSUTPGOFAdi)uC#V!g)<@ACFm z=^BQiuz2(?nUlsQ8@nod9 zG^PhvA39NGa<+jENPO7!m?O4*t#5Dw+`jTmvP z!M^Fh+PXbx=ECT{g0M86ieFOY%4LHI#<09uBF3rc=y4ydRz(_|Vvdl1X{+X17^IgYLvtLy79I*8=MVxC>n(bR5$ zA)GOVl(oo0K=I_F-dLM!R3%D+cuZh^bOcsx@|`P6bq_9zY?G;Y4oAdsM9cWaHeu)U zzN!kd#xOHZSC=hdm^YJzON$DM98RQ5wz)(rOqC3-##Ja)yPXv4eg6 z2ooK0It>eVLn_qid167?>Cr>QW^KyVapugol%cJ!&Y3QNR^qrOJ3{n}Qydbk(L8S` zCSA^=#T?(+W5Dh0tF5KAK6IgE!3V0OD{Fx1TTP}E2N8gw3pdM^@C>V4P1I~=IvBGI zH28KsQI|~Sf=bD$_Ps1}q^so#1%~6_Tks`p^OrYi?j>3zjYDK~jx=ob%B*Y)j}fvD zT+vj_xL$Ln>2u+@P=_z&Tt*mhe6N@AO^HQMM@uxU42-*p3>5pr_n4jx^6bfnFEuF2 zft3@|Y6Z+L9B~Bf3fUbLPU>l!FjV2xD(fr4e9Up{XNjjNP40(^X@YDjI}7qfY7mu3 z@&b`sw=oA1!1Q_a(f+2ytLk!YQo``;N}~^-FGO(+IdRIFt6>VrsWurAoe4#6DOhL{ z{f)Q~d4x4{1vo39Y&|BV%O)0WPLtprCVeU%86|yHDX72|XdNdQjz3L}hVu~0`$&sq zk}jYTqMI8NrlC#UunJbZ3}G7$Az+;YPQi(#ymV7xu~)tUT|6~4(j_AdY2XMVwy*|_ zMhgS>bicQam-VG7DS{}$HpNNRv(ABRS3lrb_FbvQaY_m3=_#r{T9*sP>Dl61kS`>N z3+&#-J6_hc$FWGZxI(n_@f4I?TB!wKVbDcL$(=&XxON{k76Dx}xq7aOw7v~p4X1i} zlG5xeAw)o+_}bczB+TsqDN(^djxzz<-@X|=pG<5#6_qymtdsuY&pD+J@yLeE!yNnx~yM5ie>OdgKoH5`ify=eHjY@7#(da1|isQKcCyc%+ zfw_|pro*75N*eWq(@OBMzr2?_fj~`<)5o%`+Vb6il{lbDI|-tf8B^}}i3GsJb<#_x z9eC;!XOEG?bIvQ37azsptHsVvNM35X%63-OY9tEM6hfhmfn6`bY0Yi`n?Pj0SWkGr z5d)6c;q0U=W=dHoT4GFKDE#I;E`v7VI6=)Fbm|1Slojh!hGDqHCK){yL&k5k#}tfI z+uaB6;im0&%DV^L@GT)?2@2uuqg>x~ZSK&3LR3MJc0r&zpQ4z}ZA}hx!FvAyQ#~af zAtfebqs(|MEj?8d`6XFcvwPA;7!U>gIV&^u8E7oj(#^zyfCNFuhgAaI?7a?L_pPC< z4%o(#mAwz^9vLlOKY?}g2*j~x7?gM}fTzO#xKwbdRmhJ8-%AHcXv<0tjo{qW`X)@Pa zr-FkPo2t2gG1TVhA*#+8mLr#N^qP)3sMV??Wb*Gb$lql59r+P|ZCY6>(x9P+D%;j5 zXr^7jK#Vw3mrFWHj?0=bEX`LB^=6X>#H#TMJi(V}&Ki#jQ@uEWk}$qwNxxm!(4G?S z4*A?Z(P>5!dhzp`e%mT-DoeaX;YL#}Y$0gS^y8CEHWyww0+%tI;B zt`D;OYTGj|eb|qUHWu@Q5>6ZqC9%jVYgY@e$8oosjr2;8?VPu&ev>{>GMjG_%P+iuRm1Sgy24#RUv?08JzzxZdgI)yAGs;LSRgmDI2x4c6DghY7e@R5(G7 zj&P(X%$rI|P$524E@e);W~_cFvtBKUwk=pgVW~+{{$3wKk)@OtBr^+s{l>l=F1)^2 zRL&z{r>tEr!iEB!@V{I6Yx=jCa=vE8s%WdIA*jRP&3P`I&sR%QfQ_j0Mk8mBOA@C3 zI`CDLU);8U9TJn|4O-^iE7b-KR^Dpr=+4+yIafcZeBXd!xMf3CW0K}5>L@Cwm43|C zpD$SPnALUB6oJ@#G_wdeP|(fkz739l)h3%KfaJu`5UAdg(VJYJ#WJ^ zMlB>sMG9lsjaQbGm1BAsq{b$Doo=BOb@4oGyuC5pt_N2hJ1MIT-g%Uj7ebVFV@kY< z=?5>zR%$ZmTBZI`n4DMU94|0SS5C2^UFg7JRZ*(h-&A#A1$|RCiDdHHw>( zHj!)cpUj)LP=E&spVF7NYoNj_x-8+J! zR-g2e7g6Q0V{zZx##eOULUb(gk-}#P{b@pMT$vkd|I#Crl=V*}$wd@3(kqzC)9(re z!7Lb^M>{CnvVmN{cGTZ@ zB$#~hsk`}5Q#@|7vX!+kpa2F;d}+ceAtgiYubU65l|9Jm(L$CY z+lcYg$CX1yZ6-3NviKwS0SyRb&ESZT8ZJ z^d(WMU{gy_P7&(pnwmCUEP-|nqf&Pdb&8v5&AR3kfodz9VzDhQCs9tW)sZ1b8z~l9 zB#J;!y1?&I^ow)I)CeL8H0qL+q;U1wPDZX+Y?0Q|r9_HSc9%7nT z4{A!uWQIs_NZoeT)zq1_OI++JDYwGI>gC&SOvW^w#lnwAy0g(wP-TsLn2k2&ng^Mu zRs%!|(Yoy_1@%5Wag{F}Ri!jj2oncxH72gN6T|8q5+q25)mbHI_sg!f1yo!0*Nyke z)d!xcp@J~k$IqsvNjt0wH_rUEM1-1}dao~QD-ms1O>N%Va>^k*Vy;%8gp~*pk26j5 zQ+&-#aZx-n)YM7xaMQ$(RXsu62Gl98?|Q?>*~+HjDTPiQJ1aB|pM_P%l88~j0Uai3+?v5b5>%w# zYZeO|G;+4f+R9Xcay^HStHMHNXWvx@a3BFrAILEoC{kfb1{V;0ikO8;MJp=naT~BU z*T+b@BaM1ke75;YTB-Mf$SCN^XrB6$=7C}d-O9VAN*yDw3TdRRO_>`{K2;vFvV1uI z0H)G5AtJDXmT(H6bw$m;de`na%AHu)FsF%Vr~sj}*RGsDpVh)5ih(QRshl-D@zA~A zc%CyGD_dJ_cGJf8<8GiV!Ayf*O~Fb`9Df>Zro@&ixEJ|M!ip3rB-QsM>!{bU=Yj|_ zN1;1wXjl-a^&3TDsjH-Cd1>%-Lkq|POB()-Elu` zoJ$d?FAT8LJ{=Mlr<%KZ6=gOAnF-U4cj^fwkzOF3Mse%trwUgVCZ*{Xq_B!esqIsB zK0B;*K0eMULdwS7wNb2LI-{MnjiZ{5LoGb;Nd&FED<_G4#rr2fFMnq=;&vGT4{cqC zViSy3G4itckS_pSj;esoN zF-a>8x3Rpy?YZzB{CPy#r(U?(?DlN0Id-CrJqCSz_WD%0mLDuMt1J8mtW zOvu~q@~qROh65ENB|LHxW1e+H_O^mc6ae_QXdSw+)2}o>^ zq+x}a9oM)XHs;7Hz?oN-Q$UiUNC0C!b)L4oM#Tq!f^LrD9vS64XH z%4CE?50i(^k0V>n^R8cZnyVd0x0_Q36p^;HGy;_;Z^pSC5u}z_;uVWkMAnfSbrD9c z#ncUL{d%Se90M8yYfwu-QG3enG z)AREA-eh|!mL)E-`_hJA@oYL1e-1a^kvd7AD%;vpq`)}*DMv}>N#xZ$$yF+kF<~~0 z5pQ@9ds|N2eKK*8jcB}?3WFVWmSYFa7{mrKC7s{RHWwO@0nqZQbtPksT~}I;6BOAF z8<(0#vAokWDkVgZBxK&+Cc@m(LmpD?5#lV}3l zM|&RM4qXpx*-eDQWME|N#CxfNvMStFY5rz2O3FnmBD9f2t=i+>CidcWwn;K^uO#c8 zfNYXDQ_5lQH7zSw#`Oyc2(HyE5mg4jHr~gz&YJSgu!JW8PO|YZouj9t&ZNT)JuH<4 z5Iiz4Q!>a9?_xHAV05>ShaF3yN2)khkev;m8JWWh#^)`$sm~K$FB)i{X9|Fit&Hk}Sm)6w*f69ZG?$ z5=}Cwdyja5cDA0=%_;H(l;KrNt4V>6pW#h4l1EtUTTF{cSoywPTuLMi+Jaq4)8VHm zw@EuxXLK(7g9BHw841brORpI)h*om9y{#R5n^`fW(c!u;TU#3 zhSIT`wt_a6j8dnYEYU@A?>5_!t%&D0_u}~%N^#VrCP~BBr)6}PbuIgb5|wz2VrtiM zKAUuR5UPVeP~-TMC1nJ;LTK^KLNA!uB`~lP=)qRxZ65Bu`3ttyTx~xw%VzT+^P%7bQuVwRyWtrPWg%ipE31)bFL3Gizv(a zW!{=vTCI|!MI4m&GKmlyhCfFG`POG*rsd#Q8v?db-d$;Ol^p^)>aVk(Rr2ms&6q|r zknp^3DfFurte-2?#fZ&SUx+PJ&Si>f`J^mFO}pJKpdRmsA}ggP`ASj{l<5)zVk4`r zhF0q=!Ni_3LQXr1;eA8RI367pL9EOWtgD6MSTuoPrlUYN`Kd73aC_-;4+BBveYnBf zKO;`A;Yx4>i4?W2^t%-%LWcBEAsAf7_KLd)Gj}YtyD;|K8xl!kXefx3$^v};Z|dtF!_R$;Sf3~ z*^3pS5j#yj)nwUly1Y9Grp%CF_y!`ai{mm%Jm%XROm3sF2vmXxw}mNF4kQwsX-oF% zL4iqpf2NrBCx<2%QHxSy*cM=n&lPoI{*sn$h0w75;7{4Mym$5iTEQYJj#?>V6eu>! z{++WfX{AiHf<6@mLVU$m8%0{?1$m-f&ngXWUgyKv!+UpIyE;a#RDt_8kc@B@D=>N! z$1$9d9&pAh*jG=)l-X~3T zwOUw42UHRZdeGUC{z)YOyXG5(URR!jbtq|O>f zc*1~gP5TZgNpvYni8HhCqY#GMNJs)H;+rqcPV18JtU9WicuYbJQnD1Kp0Ca#IOLE> zV+yRq07$is&$oio!Fs;+Id-oh)UE@HV;NoI zuA2`MRR(xrmNpT(k1!H$!AU0CY9_P1eJ^)wA!t`$R|*W>TL8`o#B4kX_EOh7?s zhgWqACg)lV$CcrME91)9ftHsUsuaL`)Z0+SJ*}#%DYu6o7EP%nwz3F3eg=^@nW+zi zltEDRpfeh%#PaTE6I0L$X>mwtaXM(R6D>7MP*vJ!{o2i&)Qg35B}g7TY1?obOr0Bf z!i0?7B&q=l@}UDU;$s!W#xaayFy-7c5u|yr$t0DUS|}1fEG)*75X-gWED#1;8{BKn zCeQ|gOe`I=<<0)LI)=_-m$X?wAI)@F1$Jh|Bf>GfRv``%h2>}^#Icsco*-%J>Z3-q zD8$^0vjMHGz@A{uvs#eaHF3ni9#~SfS|M@X7&9Ft)rjT?r08!=vifBFy^Q9}O9R0p zrOem`Jv>!pJG^U%)W;^!ps-Ekd(J_y8WX}6Y|{GR!b6r_1#UJs__h^L6$O|R%4qaV;|Ou@GL4vgA1djI?XkBMJ34! ze3Y>VzhlV{vqqSEy$&rX4kjf1#PkWE$7l<6Or;4)I!x`r`TVFe%zbXH&slb!zb{bB zmGb2^BoWe5V(KGgT9h&{nL*I77ElJ3JS}T>TeY}f6DD~1R9L&vv53=#0)dR7l<|I~ zW=y{}3xZTGU%;JTI%2ev6sJhUgJ2Bm2Ki2c=f_?R(v>BZY9mqEN$jkgb@AOP5uXl$hT9e)Z{53ZKCUk zlG?**!={yOh~_%n5Bj@`;uJXiQB*9Dy)#EmQ5wc~gq9r&$UUdI;8}|{*tpb*gN8W! zDA_1qcLd4kpyxedvtkw4&H;%#n7&>%j|rxYZ5>l5o~`F5g_m$76Y$`vhMK`dc#j^c zD7ssO=m0^+fm%Ap%GFqGb(n1Vj*Af6MdnK^l$8lF46!mOBlKIrQ++kpjoeuwNl;RP zNXMp&nPo|Z6#g^9rem4HqZP{%M^lAUs!1Z@^)DEVEK3>Mu^pfb-$LVrcBt;IAe<9T zIO^b%F`5E%1{qN;1$Hf71`k-$Lx3zwVnH6$<Kp@00j8X<=ofI%{Ad{;TVh;hi!k_O#wk10V_!>cG z-h~0iIf^owXQtUGtTnZ>Nr6VAY&es!hh*Ege5)#2=EqC(Xw6ii3dHk`KC5IVBf>fN z)*QRi{1YYSexSV@J|SO;Lr;z

    q&WDKSjPzhpKDIng*hPkG6r&&SRoJAyN$f0Bb zj?qFNU8bh~M}xsnm*kr>(N#e$1qC`xer5@PSr`~uZLvvaZu)SGBU^>Sk_1G8e_Au% zCP219N8g6ZF8PB#V?7|pat>Od&F_v^W~$HYHe<{6<&)$imC`u3x7sS<2A95?@JQBL zE*ewA!>0^38VnrS>m?rsdv?!OEly7eAwm@Lr;c(EUPoqOALYbDIumJI7r;2 zHSBv>Yu|D=IQ&#M5JIo6e8u^KhDXR9Jt;e9bA}AOqDWX^2aS}RZ$+^ zSY{}_>=Y;q!i$^ph)*4yF`-3`k9Teqp(m!4)KOK?Vt8}mm_&6^O44FDb!H>pHCWaa zRzTLgV`__wo+4YalBvduP=Fmw4UW;qTjQnL`nsuvK$_R`nUL{({HjTduEu#gG; z&`rDcJS2JOM4h}T_tF8vCXl^F%o*CH=x~gUO%+BLG@mhuR@YSdW#kRJ^oSEh3#i+v z_;GEW$`&;&#Bh(D7J#*=CMYh)v6>tjt>UA?adb>@JP>9qK7y`FVNQ6fbcep^PN8+g+;J)ZJ@QNStRHe%m3ipIB~z}`PSOmFVVyweMS zLDZdmEz2{NKX6EFVf`=A(OzYpH*U;Knn+LTlP*-{++`~0{!a?S56aQj;EwPCIIxJK z&Pl$&1Ke=kt=)JmCNYVFkHU#RwB>E8aE@@Mj8m#}XN)HZtgeaTq=qpqLX#MZSR346 z;ub2q!LYY({BG9W#3@MY;YN13fOke7F1q$T)zphE%a-7dvk{XQc%i7K!~BFIIGhz! zP3krPEJ-5#-HukbA1W9DLS_j_h``g;Ms31o%hgz{Sga7qm}Oiu9l>R&ii|;0%N#(M z=TsrdLT?#h$6k8s#m2OQGKo5fnI4pChZ=&QM@K~pn14fZu5PJHY}<~h=5>>$Fz;$qk(1;;8+QPj8Vca1PQ>=g6V#1k3YR`D_4%K zUIQ9VXlY~?W-=Rsrq={&3jN$v-QJ=Cust1g^|OI~6quegAY!zcZ^**%Y)cJ+X{te) zW2~KGc@bC@k~4daMX%2rl{REiy_yt6V>SQNBhb^V_TOb*dIl`N3V5#{9WQoKNArBJN`LF=2NL7ns+Wr@AZ@YjXQwniT z3=RhwkA-lcnz97j1tZtNi88|YeMFJ3lG>f1g#))NmdxzKXyaFHaDttO%WWv-qBDgw z_*v^Gjll5_>AS}R|(u#nlN$Trr(wXF*DprB5OG_<6tWhvxN~X{X zfqRm8&|wW8k=Im9>mceUR|w%wG}Vi`sycs_o~2cbMGol-pzb%F1^DWSP{USo=^%k5L*<381hU&fw)&s1jG;sfHi1tB zpr}P=p1QuOXyLNNO?j*iK-Q%XBj4(L|D~){{ZPVG>}J4 zW%87>I4fKVPhmpn3S zY^Y*5>>rJ0#W4y2{1+`lju}}0zl(WRI(Vr3h_ZpD-S7TnV5j5Z@s!80sOf} z)h8NtDIgh$pIULKk^$yLMkRfvVVY_;RV}*z0AQuVSYNQ^P05gAMJhvT2@4xR#(E7# zmU>8|VGU7nvOVCTVVnS*bY1?y(JS&U|du?!)a2PUNuW< zN!TMNcD4g|8A8$(wtVWrvjhplZ%s~*55#4Lb$~-vM-v9ES*hH}_xFId+Y5KG?`|nx zqLV5S9TdYjD95HIy7wtyr-q(MXjbeir^D$Rv14J}*jwSscbcFcjuBo9DNnX(t#Nk*$k;Basw5+DYT~(QW0_1aI+>p)6#;4|iJ`H13%f%d z*!`RCPzm}uxf-^CPoZm41PMNM!Qe4N95nIBwavMFgZ%tVT!$`m3E zM~>sfZ!8WR{3)vyq!pPH!m{BQv^9UCmlvvvr$w9262<`p=zYU`kB=O-brTS0uAXHM z5vZhN(XyRt>!^Y(jXA3@{*fuzzm!2AXL^E9-Oax7AnCx@n*<0jN#RXUz8(VB{MAK0 zJOKQ_C{(nrNU|^P*lDjka0!Y`m87hRh~Z8%;neK(l{9rB;bk)UL4!;0X}E1SKSv%Y zNCccFo>j)bv<)C~YBuaRw2w7SOsb?YT98ubwZk`XILCkzNbvd7YAHfNBpIh%ClkS$ zHgpYkQ$um1gPSP5OPj(2Y7M__xZPRH)7Xyf#A$)P`r0l396oci{ zS!g1~@bonm!Tw!u^6vAjjE4GXqh9>5WobAVrxx0j+$e&od!~xMMwj&KSsbK8B>T3> z8m_uA{QbOjpd)Susz56fvar=OkV36Th%Os7LhX4f6iB3nDgX_8>Dc%9@wdTH`@NsK zyvztOG0w_kTvdv)RptmQD--;Zh$CIH4&Px&1L);jQ2=~tSFT5}DmOaz7rhMiLojup<7hE1{YT$i6+K1AxYQ6mMZDSnpML}SsJ4W9-f~+ zN>7^HIi{-qUtLY*OCT`{$r!i;xIn5H1|7z{We*`*bf0xoze1DZ2On)r6gaI`>UC-f zqd%q)nuZR)>fdetu2Ad7R16-9x-=0Bm43zM;2T+V|sfWDSrj#D#c9AbNAg zp}~gHKqq*yN|92eOH;dg!*Xl_khbyHQO1XNiTKk=B`GQx1F&tZ)bOO06#_V>XidC{ z&IqPmG{ zW(82mM<4_pqy-?6*n2ruAybYfoNr1@B$ARibkx`(ifzjkBq~?)2o}u;d>1}o;nnm>8lkJ95D9RI?8vBMYT|l zFu|5M<6WUj>8lU4ZTE4PeCx>yLXsvc2&;lEk-+UeX(bFsJsh9#fz7GIr^8O9s1j!# zomGS8XIUPCDoJON{{Txgc1S@d#Mo`(wRE2F@)5=*uF@I^CG7)&?HsqW$vJQX!N(5;ORv@B6PN$VP~%JV7fSi!r>3J0}CcS3>OWkjsfk zFk*+CJJnxAbA2T)CzkSkWn>YEDzGen9ZEWis=c9-YH4CLwkS*6?l;ol2JT&ON|J5j z4kmFPeN-S9WB@~-cd~{|>(rSf!=S6kd0^z+;Aw>MWjxVeO-^d4f!f8AiWZQ@M!xT3 zrN2!xJ`Y(FR_WYK)hokFg{F?_vEjL(MD<%#HJ$1|-ia+ejXq7;+Hp`fLu zD9W1%VJDjGTHx#8pFT@0I201D@P>J2+(19 z(z-m&L;Uj&f|{IB(nQT8Lp(8uL`#jbI45qGwwwoVu5Ej9XeFfpHlN0Y*$umGx@B|n zpjW7nbW@>OHwQSCWnN}k_R!+kZe5jSmcB;&B#9LaboV~fCRd0*6?cgnbE$~p47JmmC@uZLsw6v3%#-97Yxn}G`e zGz_0Z9wb&qjY@TrLQeyU6k?mTkOGx4)wYC8C#~4#V*(sdz8hCTcwnU87U>qG5gb=z zb8A>>sNqe;nM(2FOAX4W!SABQn|iy{HO3~Q#F~0cE{_I6w;CiV8x34M4H~~geEp^tmqD%`e<9H=i1wI-QCa7pk`{3EQHsDuFcfH8q za_22`Nm584{Ak|CH7bOtPbYq_DY))+n<;r4hVv9pIwna{rvBVNodI8D&qPR)%bdk(VQO}!tn{{ zq|ewaG`ZRF$|@W$6gBkj^zRLNs31zfS(@Y(wTC7pCDj!P1Rl8_-Xf#)DJlU_QlY|# zFR4FF*{`d3R4YLtpw3+@r<(x7gM7rlJB16+6I4p^BC(G4U5@rkUfcrjmP+$B?YdK} z9lB%PLX3|61g#phld?|Us;Ovlh9`zs=6pvsQ!WFTvXxym79odCQ5meLNg^RuG~5G5 z3VEBB_6#rFayPq56oiBsN%>XVv|1lmc^c?z%h=5IG?{v<1+9Fmn5ir2YcM?6aPd<| zOwh`=Aqm?ir`}ikc%Li1TQ6!U)CUQnYwS0A#)ByxBc_FXTBer}#G=D-+|4y@bxd`b z9#OA?mT71(>0YW@hK{f0)!Aw0X7^q~JhNfi+!RT;HZ&^_fcM8ZY-4AT1=y1 z>hqwZ62a;x$;NEQMIfc7hNe75q2n%MA&oNw1&ySe#JZCXUgVN!d zPZzGjX|oPl0$Pzz8yG28VwXmFBML=_k0o17jfU;fZq}s|0W;II`zkBmY|yRMxJ)D* zJi215+xbRd&AIeY;uNz~EqzqMaFs$GS(MJ_+}dsh&5v#k7)WW458d*pp{AX3@M=B@ zqm@C5Wc(Ko!tlH|4O*(ocx1w+rlmnkHCqw1i(68F-sB$KOLAUH2qPH=dMa5pQtHKAbRX+NVl=xEy zfuVs*opHQg8mii+$6`;Hrly&gQb|t?s~`|>C0Akhdv^BoYrR7)CQ~D)udaiQDQ~!V zC~Tm&8o+UQYOzY(GLEYd!Rgj`K2BFkxMI|X+R&n&;{Cz)aYeP$E-Y$|eLD&E(o42h zx#LJYBM!{D_Y|PRm;|9$hYy}gt?|54GVa`qAu6SI*yu^(+q>57n){*wO)c3 z15P8Rl~HEg!Idy?p3azhXN;p1!n+g1Fp=d{MIl?n<|p@qkKPwNwUx3K>HNC%t~MNh zN@nS5VXaP$NuHmTFEB4td0wWLlB)`dXMjc>s;eQ}R|SsHj~E4t4S-m7I`eDGA9MCg z@W&1wb*D3Ck^x8%a}rAv24Nd1+xkV(g|y+# zrcAO1s4a{(V@Puh?3T$1AzTmPMDr8sEe;0`W6fC1RJpe}*GSEVrY9@CWh`#xSb~aG zly^XQEVr{#h-oKEJ=6hSYTGIxp$n$jy=yQ$I~v6*aSRHZ<~~}t7pIHoDJnen z5vn{_-5Ty|GZC)>nZ1Hre1HOv2rzp1Q1^GYfoo_i9lcYxs8xyJ6>{J?FA1TmH5ly$ zKF6-bDIhdU8%VcAF-!yOB!Hv~6XU>xWG&+YM?o@ZW^R@e5~KK1I~tQ4%b8-oGGX-+ ze>ub>75IC}Pddbgr1^@Ht0`o3jlIwo$HegB&AWBI4iTVmoPSy=Q_5`WiVg8@r~d#| zu>3B%13a(hE-PH@SUw~EjVm%L?QPnjB|D38HSSQWwRjT)g&}(R4YU+-Cxs80OETga zrU~B#IXbXva+U$Omx&S0yKay40i1(#YUIlk1onyZ9VeQfY>@|yvH+aB}qxf ziGFy*GTt@Q8rrH#`C!L#-Arc_iXkT2m3G=mW7z>WxYGO+Os zf(X^dc2Eg{R!NxO4}%?~hZUfu^J!>OCTQj}IV8x$*mpN1_u!7=m134W#_^vQGk?QPd&B^H@eGf>wXjGk7`Dg8N(So zDSHS=NmGXnf;wq^g7rf<)?t{IV#28oD}+$gM}egVDNbUjs*c5_3ax9iZ0cpuiW+)9l&Lt)O43(UDoFnTWB^DC zsxJ4}aN*uii+u`9l2z>jiY~34E`>}YF{NMmmFjjoi(!<%&u|Ga%sqVCrl`e=PZG$A zrI<8i8(eBNJW9@9(orF1cJ)=wyQ)^-2%&E%`5iZ;dOUME(&NvB3X0q|ETm_iS)^;R zRhB}FyITu`rMSSc((zC-6}D+znuAzvibTiON-C=Ou`z_lR*OqE+$T55QC(F*GRdrq z71VjG4T9W@*oy*qWW|Ulp;N1d+DQaoFFRWoJe-+wKzuIG&D*kuE1x@>EpG zhr2rlcv(@5u`$->+9c+zwMh^DwBmTIU!oXft50R$@W z4a^V)UF44b?!596hRmE#3eZqmahiWgaQ&5QM~Kw|jk&4J&P;k*``G)Scn&@k*%;yu zAEhL50X`Bwbr;T{!zPC9QqW6uvrWFhA9mXI_TP-Q<7hD$nt0o*P6LfehI&C2Sd-6$ zRR&3-hh*xX09l%vZ4vbDV2`xB#nghR2o{E$V}2ql-_+?hmJ&0*IxH49h<`X{ruBvPy`>l-A%sH zbjR@Rk+;^m-y*8b7BrBf?`ASmIYQ){k-3Q){0A6%U=XOnm0PJ0d>;N429FZ`HA+-t zFIP;UO7ATxSW(oAY%1HlHRh1`9u-eK^wLqGSc8D66)Mxy$y*duu8ivOOwiRVg{~}E zv#=xK#$75-ojZJ~)|(3wRe|XF({j>RyfK=(l&6$F_(mGjMo1>-a~R0CvyGNq(fdO< z@Yzo-Cw-Kd4xg0>8Gkw9 z`Fb|0pQf~3K*Yo;NgTPgYJ@Z{LEYuUNl{K5-&>X+M-g7zUOFKQC<78iVm!z(N^SES zcH26d3sLFDJ(P~6#W7mcN_;AsYJW7H%Q91t_B)D(k)L(=`$~04*%8(#y4}Xf5JYtF zC&}Dt0K)HK?%}C5qAsrGiuLmKt1JTN_&Zw4*@lV2DZMTDVZtkMNI5Z%OewD%4W4;*eX)-zkAqZ@C}^ zYoCW1V5}&Fj4N#~P>B=K)wlGg)WoKmc`0!$JM;>wl}jj$**qKEBmz7+cHPGjUQsGn zLD?NWeNkEGl7?i6M+4K-NWNUt34EB+$8au3ayx0oddAWuc~u{BPMwP3P#Ut9w{oRK zQY>z%<~*#r02GU!-wBbC8nmDiKIu7_5-!+Cok_fW5n(yRzSP5zE;3MUwWPZ$XxoKFrF>{aNfRv78v zf^=}{7m+1HGTp2Zn1Trh!(LbO3fUFqBlw59np63zEnH&-l30U}n3@@piYfv~v1^mt zw;3AB$48wy8A2uwBi~bClCrqSV3w%Cx3nlBP~*1k{$}I#apt8*0Z5(H8`T3y1ohW- z@Yc~zhMo$htf*bnElQ6ifI4k$qTq{n8XNM;LehL9n{=!xDnDr(I8=4T-9ULT`Kc;t zSya=?ttx^@)kwF99yZsB(DL9yc+=}?B|sHn>>hP8ml&EE{L0FPLt0E}4MeRS)wLy4 z%U~_xbFVy?bnK@aVtkEEY4F9K)R0LrjSPP@fWFpaukHZ8I*mC00C=guQ%(511`ZL? zT+)g%(!~W}T~^X6NhZLTS(K-*3WG;B}qs5L%xK+&0A2_YrqXsISUkANaW z7$o}^{PsHffTXRmbbxw#X|=HGx^VMM^$8=Ytdv!hs~J^p+eeu8zp~tGy5?!;8wpmF zk~!g1QjZR!hDS&qPkB~hL;(m`5uhn@-wWI0$CloNQy}U92*AnVhh1Zes+L*_(g+#t zBE*KSM2rQ#qy=WwkB=A*Ye-LBSyxB{ZkhKQkkueaDG@eH6{V(%I9R-@Nd&7Zk#5|e zWm?ItceCIkdd32?wkE4e^Zx)L5^Vt*NlM2q@IKv;n}Ns0(ugBS{3@Zjo{07K)JiN) zH!(D?OG_*S#Ii^)a>qs2y}mq59stQTR}>{Dgl*%l1Q5JaFB#4hqBFO&22q zpbW=fuT@Bbi6o|~ljdWjr%+UhW>L6qHn~!7e-1XCrhghxq?m+p+t*x@nyG7O;(^tm zWm%RP-bHJGZUT<$YwODdNHS`~lzp2fu`LQmRZ%V>M-1}L&Z#bvYV605m`YMB;2z^T=g0oZBZn_bFwkaks&>W~Bp6xs85HYHCi z@YnpUGHsRVB#5qqOMZ|W`*@pHqyxb8(`s{B1^^>qtoUV31wk~_(u#*D3we>SMFij8 z0C(^fiIV7?2LguSRKU>ZUDCC2S#f(=$6ppjzZy542Aj5)R5y-I1gi zCU*4xwUN@jB9G^F*7#x0eb=yZTY7IghEVGg~3;d>IZCb{3=>f%}$b5;u9@H z{{XqiOYT{`Y@2`~eW#9w!cYQ?o|d5A`I!Nfs|^L0a3FTSD0X8{Ek#R$b_tpUdec#j(`5)Jr9_(%Ttb=*4Xf4`iV2;z zw2rkPf|l>0`0^?3dflbvdQ>r*mAe&FsLe0k)C1Z#$J1db|3?$fDU1!IaY?2n@|>JNCyCV@T|&x^&!M5(*TcJ z*%#A3bn2F5$v7Oj8`3Uqs+SGIDdV8d_=XXcR|u=7D)21G^%d02jzdO0)M9n_K;%!D zWp|A!N^#W5IQX&hh^ZFI*4C83G1o#K70_={`Cl~8)=J7c9LJoonqh>}8^yJ5pxFUiWSl*4nl)zGr7BE>@s64SGM`HQXL^ax z*^4jcOhW*ZMl+`psDsrBD zhN0q&ysAynS7|Dv_p~_9tUK#h64QfG1_AtOoUZ64oBLHx*j7u*Q}ph=@d%~B zrSiATaRw5BI+d9mHsV!XKXjXp+%Gwcskusl1e3#}i@UZ10Fri5Wz1OJUNJrwiBCg= z<=h&gUy+(T5XhG_s#Ro0VXH_99lUuYU0o=-sUjc&v*SoC?A&di67^%YoU#;odl$#r zTt^+Ll7f%To;YVw6p1uwBaKVnaqaHhaDz2`=+sfRoMP-avYFXJeqhTH)L`j};h2p| z)ulV)c!f=LMrM&jz#S0;nhuySK|U&YG|aqanri4y7wo-$HSJewOhy_4J1+%nXt;zkapB;-GKF*toW8g z=*LO3g(g$X`L3p(zY46X{{Tm&uAWM8KTlBB3t^)s$hm&y!5iU2xN}y z?*Mh=fbQvL7OpLJuHlIZ69=o(h^=q!7p0PzAnhX>NMqi!GnCoO2Iw|$$l0&?mT<3z zT!SQjXO&jCgY1QnD*leZ?E!2^;}7nZcbYQW%2tpzj6k0%*YitT7gm(DsXqSzZ9DZ_ zq?v20c&2U59WTxlIm0*PJT7XC?UpcKGx>>1a*Fi0jXd{idYE_a-1?PP2T{PwJCyaB zl!Z(QAQ6o5^Av4spR;&@;2~oQDA^Ay=IkwHW`IM7(+}nqG!tzEpD|DC`lB>qc(o$XQ={N_3xXL&C9I%r26=<~eI08LOxbCO0h4 zA%88xgsk&Mh1o)q-H#el)U`lFk%1A2_G5)y7SxfB6_?a5K*qXjgJewohxBIw&v4^U z3(`?w^3hezM~=>X$g1nrqXq42p}AY=c_>+2yckZqm`r`VW1^!M&|X@DNX07K9LS-w zGE(}E=;^szo}IDOAyMjtcpV`phN*&k0R@#$g9 zztyQ8N~~q$p?&d7B)eqTj;>=a&%Iesd-2;Qo(WsU1!_#5gyYW2rJ7Lth)R|a+2Q;~ zDB^WL8Oj|T>Acx*CFVz^!ZQ^ORudLmY(}pVnX9U(o;8{}ipfhN`=-dlzi#{@%Fu6a z6L6$}P@%AC2Of@^lrwDIHdfu*PN^7UCmO4~dlRO`C(Lx!l$CgnX2t0ssKcqU;zvt` zq>4g?H1zutJ+|zNa(sCywV5Twcd7w9VxauO-DyHo_Y=3Or)7M<;HV=nO zOC}rT7}FM(24c~uywx(ruu2%*lWrzmv!Il?Fg$UkSyl0KQGM9|c3=+0!=_RV6 z5;UY!)z;IbOw6sl;f_KR-|kl2EpdR{q!S*3kJ@xBg%OVqG%8YI@x2vIMmd8204Sh( zV+t{-hvbY*zHavgw~Jfd)8WLz=xa_oqZ|e_; zmg=%Am$d0PMzG!rl_U|ThZ$4D9X_*xj;d|_1t5}0Jrq84!aS<@HaCOj+(YFnseqcB zGX#kyjyG$0L1bk+neE%R16Q{N4uX`OJsWEFjlwjm?L2tW8yU&iCQrq2x(pvPADFMF zV+G)eN2KG&6q zm6Wj{Z6!hClawy;l2ZvN#L3%4uHH1sg=4D$O8%;1)b!XhV6b4+wbOZtiG=`wHYl`G zgJ8ty2e*m$e=c*q)D;3T{S71D3!Nwcrf3k!*$THeQr6Zql-OQ9Em6d(lo2FUl!+&l z@v45{v#HnF!O^@zJSIqp2d;r^TBZGx55lMQOCe@zw3igDhFT0RqS#&lxMQ@C%Oo)t zGkc%D;8-7z403yfh)PV2rf{!jS$msRI%5hblXSZ}&5TWirVlLG;DlA|oI5v^HD9yd z?LcJuVt)r}t6Gc@K z2gwmUh{N9+Zty&hfN8gOA5WbOAG5!JKwsw|G|PBBHN44Nzx8LG_r(kU@i!x~Idh6vhElvqTI2{BXC zlwHdkAA7MEzXh*e1$Y_5nV|iRpzjNg<0QvbXUrX7izZ~Y%DHPAmaaLWGkn%Ai4!<= zzmWr=(12_J;eEZjR_?tx&k8)b%zKt}gG7rb^m4O2W7XNC2(Fw&@t4FSuau#QV|QY! zbGLa@@0)9<2U*7Vp@xHQBw>J{t)^D%%PK1H(w9F!Pa(poa(yJzS2Sq^xa?6$ww($~ zb0aK1U%s~X9tSdGnCerpq#w6r`C~=-!|XJBpfw&evdZz~x(qNT7(l~oFC|_nA$bg{ z0RI3b8(4yexxJ4IT)Mq=!WNUOowQ|@p$ahuC`DaSEP!BCOHYub`%Wu6#gf^vq|iL6*Mn{z8N-O^ zozkcY!Yb{vPeZbleH`hYdBgB39-zybbCwT=*VE!vG}Tyk5kXo0k396II6-4j&Kbj8 zliJ+gRlAm{VdSKMq`@SdP9txSqx-z&)g&yaj*2F}sAzJRLvwXyO63ec2F5F}=%h4* z^feU(Ga@;KH%6g_hq|DPYkosrsAS7#>7=${1PpeLx)Wr%tlOz%13PAj9&F56hY(yF zG16>`7;07Iz?tL@C3xBVvhy)!O`zEJ2g88G+S{orL_)fLVw<^qfix=QJi&_vT!|7*SbT+gS!7+?=zS@}7|BnGQB&6C ziYRFD>>jQ;>Hh$nJY!-oXqn8HP0Tj{LAdvH*P2zxRHT?sRU5TpjRQhJRe%50ulePo zjtCl;=cqiXKhr8awPuxD#RlJWjd|Ebtnmgq1#>A3r9w!@k6k^-1x&P3ONZgK@X|=3 zVwR#SF$A4#Z?BEGp-3er4JLI3W+!eQ%4kYj`dD6>aL?v7X40q2&m?=Trr@QlqQ_8v zo+m2OVm9rX)G8DS8k3L1m8EmQT^lR-k@~?k)p++kocQ!)IcM6w4}5 zlCB>*bEM1dMr8A5S=?=VO;Nwh-Jsl3D|fzv+;kx*15VhdyRoT5Rs)X1_)=+n06sZ_g@Y0H!tY(Uy zRTBI%UE;paFs^WOzgiED+XK)9#&7rh zsTlcwwAA=+*+Tr0TqrRfe2WBDRX@7pXnLhgX=#?drKp8gYN+VeJKw}N znIOKM$138ajV4I+s)UrPdPPjausK`KsP~a$LL1JQ`N~De7Ye|4u)p8O76}Fjq_)g9 z(nh0+;C56;qcKbJRVh2EP|EV+F%6}ZkhYg*;ij2aLw>^HcN-sI9II}jNh64@p#-=Tp`>(i^QRc_%1Po9 zP*X&;G|?2QxY+_&-0dgr1AA-7iqc~cYUKzfA`hf<6&-MzArnhP(c$$Ii&V!f?5M<% zqOPP5j}pC)+l5sY?dt(5rs!jB zm8gg!sPdA7f_=5z^m789O>9)?uA3mHX`u!-*H*YJPknfppq0-GU7(C6P8|^x;*zpR zY1s-=MhZ0Vd1WN{mX`Kg=m!-}B1RoliMG4B;U4PhOjj+-<-;U%&om3=Ls2@%uC~2^ z)C+0NrA^EMTYQ%R5-0Vj^wl^rRYv=uu`)k{)mF_lkyMcuS%-u~V;my#A>Y4qDx z@QC9zmRh>n+NtD=EBwYB$+#qJv8c9}MUlH(y}0UwglZ&u(<^N#b5Bo2G|g35lCm}u zl=0~RS_FzmjaT=#GBaF{3-KT;SBRL~*EudJ9GYz3}(oeu<`76y7J_fn_q zijrfXos`#4SBd=L`If7#`K50ro|DRz1ZplwTdj}A`h0nyh$aChlCZLrX(|LCZ?>{i zRoY`G$x5&(X(FDy6?AW4_v|$B_VUvz=;2L}HR?!^X{L>5#BnO0na5|Nq=*`m6*GCa z0R5d4TlTrr;l|i(m28;ks~rkKou{jrrEVXG!Sd6?NhH{k)DQG_kpsOp<7*N^YkzK> zRzgpN1J_n&%{|z_;l`V*BE~4E5X%isN9eY}h`~=!<;Kr(sq-|{cNe_ZPv^@76=OK`ezd^_OVD!XrMRbrWkA`mYgCeW-c@iJ{RM69nzqa$ELiOR9B}*9*S*x zJQ|v;np|ZTDS`7<YaTx7NC2o!BuWak=>$$S8eCw&kWGfvMGPpR=89{y+dMaw0^ZJ1Wm{5U zbVYfDB!FOp8+L53CN3);oK1pLR7EQJ{$Q%8P{P+FAS39vZfxq>Vm$Kp<6DuUQJfaI9kj!y|>KsLQC*fK_Z0a6TP{ zwD9ApXawv8(@6;?M2X|H)=1`xmXbC!6tKutl#oQxO|Uh9Aq}o=d-vv@m;jo+xLIti zSn%uet|?le`KeCS@VfqtM8)h$vERJ+KMQij*pg|M(n58fi1Mk}R8a4dXQ`2`xR+tG zXS69tCjS6mG&Bvg_-SMWBNJIi$jMU}j-~v#V=Cd=PcWSg#h%vsLF26e6bKmXr~d#h z0tz;tI`wr$2b}~k%t^ih?Q6>%tqo%yIc*QC| z`W-CdUvRXQM&PyBf~8HlWNQGK5l!7IQ33~;^7mAEaqL1kVWt$c_k?d~AZBH5{r3;x zIdpe{TMcQv007|zYIIn&RlM0~LpT?dG;igTe*&HF;72T!u0+>_0pKP_vMYuqi&m(U zWe4b;f-)chLaw%A{`Yg-4ryUn8djiIf(iFySz?yHqLvxr!s$^96Xvso1c7ZCh3&6v zaZ9JeB+_eS>65TcdWNGC4N*^7O96#hSow_1%p1gQTUZ}y;#Cbh55|-!$U(y%lk%q( zr6Oji6mbbbT5wf}GB10Abh#RF#W@B|C0qpHVElN~CVs&3jvs?%?xW_M$6tqKdP!dh z#W0Muho&JV4NXtzA(o`n6j8aG^lGH;TlkJ*A8dQ%w_T=k zgL<=zrvcFvzVvI?-m&%99q1lJ&iHN<)YH?b7pI*b$rTYT2zoTBe|}jlXO#uWC%UNi zavaQZU&&gyww=-55=q<4)YF$=CgR~KaXN}{9aXm8XwP`wNyvC^R*JrchO&y7vkaL~ zwQ<1I#bJ=R(&39)t@d{F9anj5UV`X2^!w^zNFg6>QJ>;T>$5gx3U}#+9&4b<_%&QV z%ySMunCcRiD)UaBbJEgI2rT+oh18um7v2kX<|Ny&btj2WPeJIXe>}~*UE+){fS|wd zj?K8AsA?QXJ<|189_hD7@T~BdUOsTzlGwH-jNw>9PlkDnfXNI>wGI@q*uZT%@L{@J zpfbBhd(a$OfPuhtpE@PG&3`ysi4nK1wl&8y<{?X$E>{bimTL?B~@RW zD)a2kHXLy(N>e{v)Nh3*L>$x4yYct&I*u6VyH@u8#6t@fN6ASOp-8 z0{uyT8$D9Vc{>EF!L#-moVs~a9TL~IU0cwl6>U;0I*E?$^tb;2B=^0IzB~rzU7s^# zKQhmXHj**(6m4&uqTuL&4-YTKiQWn6sj@i6W6pRRW*jpOkW)>GLAJLGMvRGK@?}xG zwf>(T0W()Gd)RLPG3zue%GhBIfVD|HC=buw6V16!yBN&aC0uY}NmC0)NeplE9z^km zUvQgT-$Hyi^{!&u=m>atqpB#r?;t5EaVvp1VYZm_M^Q2S2Lz@;t^WFX9g#BCq$NKCZntCTXI~x$CEG zmowy57^QtSY^lWN#HcGUx~RTk@bcJ;NYKR95vxZE02RIMPbJOS?QyWsW%l$Djsprq znr^GW6}F(EhnS;#j$^qm73sDC)vk*4YYyrje8*&q63UdY$A?kXLob`XpXMcZ5G6g! zG|6^QG}lf_wz;zJS{>TqD(OEe3im~_Lg)d+D|$iGyh}ax8eE~1a>aL2^5!8`oLef; z%Nt_Y%}ACx%igrY@F(oNxfe@?(8pwJ0)ApmrWLE?Tv4 z8HW@k93XI@Ca(g_`C~j!pEAC6d_OZ#)tPDW958&d!4%@+WGdR|Y(Ti;yA0j73RR@| zc5%jn+5SL>)~!mw?V(?xwcSSECkieUMkt1v@7MrnShtt8ZSG1N*XGD#Xnw<3uIJZ6smVC?}s5+hr}CS`+;pv}*E4r(B_*`j3<{rf}+;^qVhK zVsy`#@Laou%~eMtY-+e-qOYplDb?N}k(NeVowm6gk+LkR-8V|Oc{<7222YFw207 ztX~43D%M9e7A-*%q{}AO{VJ7h1<;LjzaVYwQ&x&8Lt#LW3ZRIg&PQ+ggi6r6R^A?9qsm-CK5!r{cT&JBiRSS~e5iG0R04HusCO&Tmpz%xGJ zO~FzKY1vxFKsXI^MLZ%rP^=A}=45P1t znW*ZF2}fZ(j^(wUO`w?+_*=IUEtIVIfmRdysi$31-td?m6e?%T9+Md`;&kEStHbHw zix9zOe<~Rx8-!9s8pM`Uas})(?l^a1>dF$@ka~>`9$6>~17rdyxL0PQmoeHLTLr-m zb)uw{EqpaFNYKXWKrtUM0#&bN`0+*FSmVY5^eTe4&$yR$glZihjVLk=R1(XWs_Q0? z2^j2+TY+JTPO&0XNJ)|=wyPbi0psqnwzTS!c+uWcl%bC2|5zL5yZEbj23ZCEk(z2ye2wn6P~k09y^C(uux&R zl{O%zI$F$o5U9N#fdFjdOJD%m|Z%_n?u-`jGqn6DG{$D7zL}wVlMu%6{7MW>X<>XIv#eNj z@|Th~7K|zae{dbw@gs;-rkx}~3LAPTQ>|$rlQhRsB>A&F)K3(xhgadyxv8Vw8KtT7 z4T_M!t2fd#JVBT`h7*iIIAO+`l{%#!iZC55z_5x6x5J>OHQymll@V8CvU&4)X2rLa z+8iy&x!?ygapLZ&f}_HPmS}0V!a_lheG7G28w0G4s}#VZrlDEhGV_R>yg(m$qxZK* zy-NeMfP8poYkE>Nl4s-RM0Zwgt!GFWZ0a-$&(aFK*@)FfY!W2dEo zsm9_~X?)*;cV9LI&4SJ z#VTttYNc9;R-Ogf=4=*i#al~heleNd+$fD`z(BxLxn;$#c?dmpFXc?xjbj)#3pG{* zwX_0BXxgH_ErOB(xW#4EFp+>4?r+?2BO%l&dRGJELZq}^Kssk1V5AN$HW5XP;wwg# zSVlJt24Pc$?|3Nzb}WnY7q~XCz4Z3+Lz%La0#uMuKH5mEpTbr!RR#E72KCsF^O~C2 z>o5l4gL46?Xqh8?!tQ~+;qZT5K0_0E)}x8Ypj9bGh*)u6O@W_0nPcM`_1 z>RgFal`vWv(t@rr4Gtqu62(tJ2^USvB&*}5pHSdd%s8uprHSdDK2)g;pbZI34w`n$ zRS`B_tE0qeS}bI(ER~fTZ4`1-uE%vPe|fEK!`&djReQajdmMqK|B?wDh#aX#YTg7 z`J;C#(lClqW*qm3Whaj$(^aZhW6;A}K?WSG+Ze*Hb!d*Ht7;gF>2ceMSjiJ|MZ$-H zJ7$q)m+H42+7+~WC`-tFCd;^WHeKouSn(#N%Ced3#s`M9X;+BJ0*$blTgBPHVPj=J z-W5NbE!PywC@O*k@E(*{)>zwRF21Y}4kn(nFQ`1FTZL8%FHbl=Tfi`;j*gQN#4&j& z=7J9}u#uWY+QEVMfEPZ_9Ob*ccJuIuiizM8G$uy#gtsVc1df^w`bp_?H~N6gacw|_XO3oS9wBmS#(L{=sP)fj^lBCMV*|dYmd$g>P0WI9Ns6w!g2|XTEcWmO6 zn5xgrHQ9_*#<*vhNr%)dLYkiujinVW=4zk|kgOPs`*|5zW$mVC?mT)a9mja3?Z8v! zPS4qbn!gjubBc@u3&YUPl&}vfAu-ovW^JLZ*pa6aX1Qt;3O*uv#-3{B%|$x}I^eN& zBQfU2#^;5g!tz}|k&RZ=N4Ux!U-GvX2J3A-gM{y8t@lB+Vo8x73KB7H^Qi?=AW^r$ zb6y95V3?LQgux8>Wl4suu~-O}N!41@0*k7UR10ve8B&t43GogFjDp%>I;r;u>h>F1 ziDB|Gz6pT&n-aHHGH!cq@`vwyPku3)+9@fK;#Wml%KZ+iL4h(dz&uB z1910lPdf~^%6d#~u3;fSr^B~wp0287r_5NjY_rnP#;^x+pD|UnO70qz3_#uNzZq%$ zLP_`jmF2vkql6Banfxm?RCM)}F$#!Yq6A%%RLfHZG}205#05zM9R{HBO~L};9DRRE z*L@1xgkXB=90v`gElnLJ*kb}pLp-7ezhbV)C1wWx!NzC920FTH!BC(HthE&}%&;s( zVQ@-Y^zT_3M0SfebPPV5Y2(erk`$=Iom;Y_$M;k#OB7-pIb+3S+PAs|hPv{->>Vc=t%ku- z)3=P*siDE8sbs0as-7aPK zww9a>dQv@55~6tYaH$xLADI-@Nl_IAPppMjkmcShv&vr#u5n!qLnE?G-nV^ zMuQiYhAHss<)rf_C|a0omM2g?98KN965%JKrmQX#igZAyDvEzKpUrs+ zQV*Dpd_uX101eDqG`G}y@ztdtaoRR*q{>;uNCKK*!?0SFt**r35xVV>Gh^J&&;CDgVC*2iC97yn!iSw>i^i5c* z@zr@1@kwzdZ01%P0Ct7iq-&tH_}ZIE(o#ijD@Fv3gA8%61jO+9V1pEnpA)O90g8rm zWCUB>Is$uN?&g|k&Xk(k-Au_*6YLd&w+u>zhvqWx~r9!z2?zjHO{}lPr?#Q+>{{6&Am9Yo87&D+EmJ>cXlM zCK9hc2BG}be6^JE)=<-Y^Lru1|&fGm`+gKkAtU)y2 zElWc3P7N!EtMfMtSZzUL*pdn56{rD-ro#bg=!3$!Pg9AclAJv3xJ4S?3%bZPe* zXsDkKW%FMWs>7vQIZ9|Jh1|D>)m9g`eQm_oKxvp)rEQ>+xY8$uLl1@2GRG0Ct(}=r zRL@co!x3##K)Q2iP;kLa0mhTI&vtw&1w9;pbtvEYo)bJDEj0Bih^9c$!VsqIJ?raj z+)&UmV4QkQQS~+#F&N{S6OIXIDcBdPB$l}A}k!2caMuTg=abHbS#wPax> zElSkEM8-Mck|rz)cpX0eY!zZtuzTsE69Q9;wjPMb(xqV1#I-`Pr;ZrvF3Pb|GUzoL zk=x_NR;eHoaWueDK-MQo+vnBQN>fuo7y3nG%PeRIkj(4=EX3bw*OoIeN)ZbsT6!Qx z6~$_jU(i7ml|FbAR8?;#O-+KX@25e=((K5HA3C+t2?@eF2Tf*NUO2>bj~m4jZAl@h zl|$|V$8lc)-x5TN0}?iu~I3jQhd40t+`bDwm`NQ z`gouuC;=3m4XH{daE?C;!yX6;4O)^?JU1~+yY&tJKYHY!XD!_TYVEP7#Hf%k70#yx zhB}Dml*{@w-U~!o0JZgP#FOwHxZiM;pDYDA-yvxjaH!DJLsuZ03aF)!evt%}ca1gL z2J;!Q2gGw7q?HU(MO;8o023rn@2EnKhs^U+ z9+p>_Zg`D4N9J3#^d#Ha%cTi2al)*-T}u*@Qf5CI;Vm=~tx8E;s$-3P>XKt+9fj1N z$$WeAh5_dqVZG1`r#mlR#lp)~ZdR7AyrAYW(z0B_^l>E*vJf;Qr6fQ@q# z7*AQL^wUnGYrH{h5vN&HBWrPWklz0Q8*-(&e*Lv3mkp>WpD!J59FVv^TqzW>zT?(?v4a5BQau+F0_%lcyhTBns}OsWMgQ)-QLfL<3+U@FL%rwDMmZ_*g)jEN}yAIYSJi+9wXZ6m`m zG@&n$;0qH{VnG@sJsi*#JBH~?G27nY_E3BCa-_x>{3(`7ap;(eolK7_$sDpI0Z*9_ zsGWfq@m0Uszun6yfRG4*UhxIviiZvq{;fw#1wvM1tsc?lwQ9&ro5%+1`u5{xXqb}~ zX51!K#C&QwtD~%hN=c}eHS=PqBw73}+qKuj$&2>|$Cjg+~nwqF1RC(jWz>ROqrQnG@fR!U^sGOqGMi|-b`qsO-!PN>9H z8cGs45gR%`3NHSldbyivvUX?m{{Vne%ZcU8?^T22H3pWR8?AK|o^O>HiUT~ZTWJg* zL2+Y#Vee}DxY^z@bja}(LF=r}?1MRQ1-a52K9DjdhbJbw4unD!!wq z9W=mXmZKQW6IWr>OOI4ih~S!@HHnsU8wM{63k&y48 z^F89h(lN&aK!;!YZ|ZNOeNV)x^Ntx&O8A{kWqw@7sg%)B)?xJ(Oxs|pRr1vdYlx29 z4FDX#-Q(PH)1B(1CP|z~_f*y$F1V5e@T%1HH_ z&?>7cA!IXr!C=c8#(-U%*a4|L9d<>nx14JLgW{8b^`PE=mNtAMN$ahL{{RZFO`fOy z5%kXq=)QcT!>BRL(~DwR&V0R-h?1>wJVKtHG{W&ps(}nsRnwYh5h}aIZQEG)7v^tcTkcdulQh{L7~HNN$`Zj@N97+64g-EVX;zCY;E3kJw#Ew!WS<4 zgF@rq7dwHs0Z%DKL~0=P^AsZy6i&TY=eB`zPENtbRK@zcf#cYD%bhpGa{}Sj*sPU| zu~f-3d5&U8<6|LPCgmGR*M>PWPM0KzNfD|Ej-E7=FnOROR2kVv!x70Cwq@zx^hJ?qBh;>$b?XYkF#N-u`d5!-d;$q6khW;SYG|M^$L<*FDt+Wx zPws~+e-1&{{hwwwXe)6eOdLT9k3onPC9}Mi?!ZKNF#F~1Pwd)*S8FRJlGOI7pqFSvmW@Wtjn+Y2iY-rgWv;njCTWi4vgx>zPK82*$m#$`1)y#+p8tn*}J zgif_IrY|Mw+@neagn$(98rPlXNI57|vR~PtwdXnhgL+Ncs zUF&~VC?U_&;+41t4TEAat{C)mZ6PqijZv{1%w1W5w}Ir@x|`o(h5%RH#|#IpMwy;e z^G*;k6NJ#=>l;7x7dvzZIOVLBn(*B1obv~lgJpVnzFt|j!7|h$M#@n(f%arwJUicp zJJq~4tFmw;$T&#GI?q)s$Sqa*q=jI54QugNHWyihVtBmasdEZB!Erh|;rdvnrg2~i z($=}R!7FH1+7?65uPj%!Y1(>MV%{CK{WmqP{FvuswFwki#G1`iO9C5(j?17>- z5-IKvaCoNH{gt6fR^A;nT>6mk1tvhCd#t@W%$ZV54;pylsG`NA6G2}^JW@u*mNt#Q zMB9~_TJ~=J_;lW0;|NZ#;^^a1_2Q7?h)D#`9uzb52QzgirTrhwJwnXhrzk3NpGYbO zJ&ffjO%5+nTODKmf|bN2>SL&iK)Y7j8y-(N6L#ukpeI7q0Rm%X0-j`-N`k)Lsu?ps zsTvNrV_9bu!+NWT^#1^*cufJs@!p=xN|=5o!c^T8x0<3!0$)iNMqV{{dQzxx9X%k= zog;qr4z>b8?w>Is75|ci3GRUoNSbM&iF&lPKIjHKEHHYIhlvNpv5ymk5!8|mMX_|U; zlXLAp`~`O~zT18L1t%j)+XW4Q10FQFjp15u)HM@3G22I%7wIKlX3E)Xs2SRi3h6FN z%h-PCvkfH9aoW|ZjbnINu?#l^y}UPYKE?9cC8pL^r4ix&5fpuzK8uFZ>#c^u2dSz3 zBsd*%&}Ckc;O7m?z`Z8?W~HQpuCg@U5=IwnvAehAiFbXR4WZ7^K4Sj*C&pp6i|rT8lQ*tTi;aehVa^JzL8wI5DU<2kn8{ z+>?>HcMJr!TuPfb$UiEf&0AP5guw=nhgbgq3{I%v`41n)BI=bcf1=6j9Jx;_R>W!M z#2Fa7!A&mq_n1fwV&6&e;p^R*T9&1UidTsmu}ME1ebm!C(n$$YFqx{t^##x_uXIBm zrNOb>A#i$(W|p5ZRbV?Hs8pt%2G}CDG%Jpy5I*9Kc*4 zskn*gpiwZ4n^A|>tK(*}zBUv%wQHkjTz@_zNZGIdlcQ@3sr4KHeGiu$S z3Du7p3bEO1E0M75Hy4hx48vwPejmC*o^yfjY*{QrZ6?H6YH!C<^G>T|7^2IiscF!Y z=}R21ka2kMimVG0fJFvB51jP$wGhi$PGoSQTr#lkZ)IV`ms$$aQf4|PoUyg;)1;A( z*r#j*r#T}9Nuqo`u`0+a5rsZ7C5g#J1a+*lZ7y>(OzS|r2)0c4|gBTO+_R|+G=vEYxtHJT? zEh+JACZd+LF)4iXW99<%Q$)@ZkzF?)0ltV3X$^Vzl;c+<+3SNTUaqv7G5m zNmY*KEG00^5*jHgsUHccGYO$lqPKLg-@5y+rHC9vZCVRT8djxooIhR^(7k;qS9m-m zP&=Np(n(F0@--$TajX{`r%DXqvSG7)-V*R>2h41C^BmX}bT%XHaATHQ6t<_d!oj9ln2f4%c~3_SSVw*g#MCruP9{dO5Dwvs_8Kr>~M$-=}YCxB6Qe72Lu;8wd^=e zm}l;|VE}>9et#+vVc9SFh6O`cJnUil_@J$5%I%GY*BdXi_OhR6 z0=IXL7?GrW=>5FaETL1fC;+LBl2UrT+l)Ivgtqz6Fk#HB_&KO-EZ2q;l_F(mgv7xD+p9ZEbk)bSQ%mQfI5k(X&ZX zQ4@)v_FTo9X|kI%G?U@fCRL=FqES5Z6_g^$A|C3&uee^;*MeI&SuS8gh!gr%5QCy- zf;`oj^L%(#HCC9DQ`J?+@_tmFVSjas{)HF(cDkP0+kzamR^{j-N4wUF@9vb+6iT+y zOPU-qY-H8e*4NTaSsT`3Sau-^5*n3NiJT(bD-@3A+L7_##oL!VpaMiB?dqth`?a6q zgpL%Qn-Yf#G0;_%ro^$g`CK`nyuUNKyME zG-TdX>|$j~#PxMifyiAD>MvXKJ{i**5_8r+DSS$zj|>kKcs(^5nC1na+>LD4H`4au z{_W)mxC$&MNZF3V*H0qVC?B;tr`=3mu6+*m1EC#g$rwf>hv9kAVZ!0Zt0BPgUp|V1 z%+XSG7BSBm_l^G0)N?KSHQa-3Y=xwS5PBew(xct(zD}JfB_pe%jd$Tog?fOf>rYkb za(;iM$Tcm1HB=Mj{3?SJs>CU1YkaX7d`>BklI-EOpiD;WtBOtMKQLiI=3q0jYo>t zP~p|sHZ4Wv%Sl0lB(E4X>UK4n=0qU@@1f>DHuGz6Y9NEe^--ei9D=Bb`O>SV9X`uA zOT4+u8j^g>K&h7j#*@yHO2EOwM$SsE^Y!>}t2)h+;)+tA2;wji*GBm!N8Nzqp3b^3 z*mf_6RSZfRd?IWQ6mX4SgcVkLsN_J_cu<|eE=9q=5H?FnOq@Ds_16LWL{e`VmV*q# zqp_ggwGg6xluodRt;KUQ zR>4(Og;Z~Lufu%TN>xPn&_uHD3J=5b;f`xzr3pzIquY;_1Z~6O;1priRt1sqOjfdz zpBlw$=ETM$NdyAES(HT+qyPm94Z&NLy{vg3U$)Xg(|}1LIMnr)ElX@EXKxQm|JNiC zV966mTNQ^CnQbIX^0Q3tZ*mWor`;aUr=5agBf?4RHOzOCkhnW3mP|fsd{JquDKPoG z@A6U0O5nD)I=BYlcyYEB3&hiBNl_3DIJO^0C0uo@O4R!o<;-!RrUg~Ii)wFUr+zw? zG%ZRvV@)Bnl9<4g_|)2pdMcWjQjw@776zJ8%_ztv zQaq@&X;}t7F&~X(uYgh$EVIx}Ni>Qir;cdEj8rK1ltyedy~lnx`PA!yh#u%5m?KFytFT}i}_|Wfti3HjingvEpKTYElZa(DdQLzuOT6{=s1bO z4Ei25HC{@H>T4^q#NRa;N{U#gmJ;Jw&Woym1@|1e+tJ1 zE)$5q_{Dcoied3QvqCDXqd{ou1uX9F3BLALA$}dlDB{%;5=f?-T7l9JplsIdB# zr>y>1&j>2hOtj331;HD(htz#Me*mf3PcK%$X0lXaSajxBnu;ZMw5d;BP-BoHHJ4yZ z*f7+N{Ao)@oq(J;^wMTh2=Q~}N&H@$41woqD4@au0VN?76HM&;NZW0<9tWN%5HRwA zS9$Fu$pU@U--cJ!t2D`5{K6Mf4NSE2vZ64xi*4*r8EMxNnIy-RJstoEsEr*qBxPm6 zYAa@_a?#XFFo$6#{^~meuYE@<6O5#BZ#rK!TN0iT^id={D7Hi)W4 zJG99or9w$4CsVi`c|y}*z^$pJYtp69RdcGtYH2IsG~`u8BaW%0#TYC=k-O}k+j*g( zDl^8JSBX+E;q0lEhaIh?NwC_4iKGh(8cC!`l%IXBsz#>b*5xi)E5Ir#-t9U`QnhiP z(wpI^%~X@cPg96|;rC@Uamsfa8++H&=;hi9l6A=Hq|PZYQgJ6AD$hwnJKCwDgf{xn0A|AOCD7zNmdiLuC`kV zCPeg;j8tc=j)+4~PfuF)EDUW|1hUKy-35${JDqK>DCt(jrxDLxUmpOHKyAP20+Y6& zPT8WN;}BBHm4L=%s*&T0LmkiBG6n+n`0+i;nWl?EkXwm;GMBA$LpN=L?@ zaQS1}=}%lISK7ihjTRW~*o}y8ZZF%84J1VDts&hDC0l*fmZux6j%ee5lZtDFNoYds zWdsX;kg476>^bD#B?dJQZD~pG2~uz%@Ti9wmZoN#5K1WkEeuZ_usbmDBT&Hn2Rw&+ z!?Zj3l9x>w78N;PpxfCfK#jEO*MF|Caw}>s%l$=a3hn+ z7dI+PTK4#Hxj|fjN%vNiohc-Nr6hXO>esVO#*Sk&QrpQ@k3}{b?bERXjun6iH2Mp1 zr~+i;_*O$5Q&THb#^xC9`gD!A886&;_vO^sB6d}hh)FuQVbf7$#BqvA8l1!`VzKhk zfL2|E9nZSo-_IdR0D(=i+JudOR1fP8DKGj=lyOr+gsD{6(_H8XzhZnj=L&`!I;p2h zk~Pl^KT4FxlxE6%A(@fB>TNsHA-KF zQ@3wM&YF!Iik4crWTmGv0<_@9^SNHvQRbh!jkk{)b!$SQL)}v0p;^Mr7B-F@*NCM; zGlAHh&OpC=cH_w)57M11A!-VPKueRRv^L)UMsjpucg=ILH!U z8ffa00R7c4B|>=*8AyvU9#$&iZG!#*R1H2Sml2Nx6++&a#thCsEGsfgd!#bbpD+dz z!n@meTQ&4cX>AV@so6|TE@?qaz(C^z;%MuB9DNV!4r=vFN!B_pmel9mmy)o`gu?Oq zoVg`-2~CZ>W+n1!s^q7DRK~1XMcdv0d`1&}N8I-h!!AP91c?(9&@mO9A5V7cb1e#S zGJs4N!mj7jhpSGob$cjh81Sq%JhhfG<|T_@m`)j0ld(v5P~mkA0Gev5T9&4HrYm<5 zUd?sXgUngZGWYG&1B8-B*$4Hkipv~B;Y4saRBpTEPQ7)Pr*LEp@j{sPSk7=#40++9 z!Kh-V35umk!m?FXw6T?pDsEH^`i-F5ft!rGzHG|iQO&c-ZOH?Vx+b3hW{~fwt{M%{OCo1_9BO(oLzn9)<9UCWQ#}=H z)m1|cTNO8mFa+Ff*N3iUmv*Y|)Ipzk;~jL0-ua7`wE-PfVEqxs`oGk?r_tPZH|Cn$ zx0op>{$+^?Xd$S>GaYn^H8WAo@{cn|5nYqVA+S=w9YweucFCr})=G%jMIAKhR=@y? z*EiRnOZtP+FT~R)W4&I?wONNH=KNgeEV)~TntG|^T9w-c1Efp}se4Amn}OJJK*}6X zrmDxWDrsTHC}|Nk z)or47D6%jtH(9(m+LH%Lln6C;$`Gw-+f~8pfAH+sDHb0^l=9w6 zfyOB+{IFV(kxTj(+UdLzsJ*;62Hq{owz*Mp3e->VoCl%XLL9R+00b}FMS}tT7Iiuj2=fD~j&3)C z+#(2{Djw@U4Np*+djfc7lP%`@sHp0Hk!8#m6N(Djd_u9*MAOeQku(vgBEr`2;ELO` zwpqu#l^;q*{_`PZ6q1p~gc_g1Q`9vtO{w6^Jxjte_H4wYP5C}a70n$y@u-+bT^UQC zGV&10bR-TM-}Y}}`zZy&9KwI2?4%9vSBR$)-vdRvr+pMXFwObDq@Jq!cGPUiMsVeWDaa(c5Wfap9r8_zq`x##Z@Dg)TOFFf?#))o_HAC`jqTjW1JrAJGnq zbe}cnZieI3Ili?dGp{AZ$r1B_B?l8|AJ*{gt-FSC*nPrKxbTC3^4Lvl8 zk#f7FslY)TMNt=1X)E(wKhI+^;rMPN=FsOX8HkFZ9WId7I6$h!t_x}5$a3E0C<;o1 zsXIM*3Lj>ch15)n3^Ef{h)w7z78<0o$L>J7%wG_m6sg|GvSZR6GF zL8jPUx`;8SqA1wqob8eDuAXN6vkns3gA}WI7Y~BEN_psM14fGszEx65C)v0vq%U)& z_&BlNW`&n_vEo+4#79A*E3A>lT_w~26ByBE%ULd)1D`bD@WV=&CU2r_VnoW7RM5mR zKuAf+o(7U}9-*TQq+dqiQ$6L<3(HwiK!J$_`PDBigKi}W*sY=`-%fQnZa0#%#xvDL z>D)N{vtjhuJ`I{{D(dkpBL-OXr=zcqx>t}wP}w)g#d`t=4{}W7ytY{~0)K=LcsyvX z_jZ(*hju_Gi90 z&lm3wqC!R^fuTBc1SLJvV^E1;D0F9?pG4-L%n2b@+hvmUfJk<0X&$%p- zGNe9WAOMI(!6fQ9@8|ArTMV+C2Gs+J9(0AxI39}~Uw`*}L*GF1ps1SXgqY67}&9c!tMjraui_^YO#xcB)hTwf%>Q+_9 zu^3vN@|{*muf^mwFjK@SmRV%PoHEoZf|4o)1h0GYWy>!ZZM2T}QerWHel!7p!m@Oc z@T!sgL~_4Z`mxpeew*RAw3%ljV6!e2m-C)H^muJ1A__9iE7sS{5-1`<%IdDp-{B^} zg*TCfIP@rv>kN7#j_p}Wt)U7a@dB#myC~pUKQv~{l~;>lSd5u^vP=TIH(|AliD2`_ ziFImPieMI1J&pFCfaF!0+qrLaTWvvNCP%+*8D+1fXF&!ylR|!6$k`_~<%&#tY^h6& z&x4_?Gv)V^BzO^gmy`6?+E(g77q{2K>x-L|wo*22#CxdOEjC7}2ZaWBo>s{8M-Z#a z`PUGm&Qyjcz_R8U5Sc2j`=m2UUhxD#NcT?SJUjB1x6;WDCJESnR9sMw{UyIzg_yHe zYpSoIX=Doy9UMU_t0QRYyvyvCMNPi=Vl_N%w5<*4T5&u!)0f%9sW=J89;!u4h|5!n z)Y8zT6)U2}-nD zp(|zPbGA<qGRtARspZC3C3-8`DFyCNQs;bowU{Mn?>ZPHxtK&2smC3UHx~< zIU^In>2aK?m?V(TE)E(*W)aIAlr<5Lem8Kp+iv#u@oj~=0z1-tVlX>rq;!f$e(6$! zW(eDd!kQ{u4Z!I!{MCo%I$UcMj;XM8ro`ih99owOrTqf98UmyXsa{6< z5@?kuVJp{wAjUi?5r}mIFjchCQRhgp=xAeF=-`kEEfWyS=b!)fnXq0%9IyrclH+!s?-Y+qLbbv^*6$ga$z?7;uV1Xx_D8 zFzcs`D=1*8id57|h<`PbvREajn+pjf;;{|bMDDgy2X-0{ZyCAH94$ahM+3xB)s>?i z>;g#XqXpARX|YbAWPXrkTsm=6nXw5ltUji_(h6FfO0K1%jijiQHlU`{q1)VQc(%(S zxVS)=)VTB%uI=WM?z}=q%z2vFb0T#9Kf|{Y!SGDOUGv!QP+o%-ppz+K(&IJq<^`&P zj;fHd#Ym4rl=W=;WqVyq9}8rLN4loZE+vvyK$UuIB8aZ8664Zv27qtGKc+9!{4cEC zdCz$MH;+q;&{94PfmSvH1aS@ z*F+iKS8S%siasccAm4^$ct#GH>pbPW`-^3)*_9d?@oLyH2$E#N-T_q9D~D9mIjCux z8B#}%MJshT*NHds=}xH(sXiT}sPT`57jBKCed%%mF$c=7)@lw<6_;2$%tfPR{7%J=Y4LcbDT0BN1l3F-^+Q@{1_Ex|SA-V3{X>BSc zX*f@^o<^c2I8`U)?xx~3d3vUg62&DLyj1kC(qb*PC`c`2UBF)ZMz_-Z!pz#h+uSQi z@JYg?jH`GXQ$Z{Hz@Uo15ICMFX>kl{II5CiH0vTFi41NSpE5#>2=@5!%GAKYB+WSe z*0KRO^wN(K>JQDvxV;V?T5-%UiW=-!0a()Kl{D|ya0D}~%P++3a;+vUSRK_g5^ z2aiz0Xrz}pl#{1#9L z!Lj!7RkgEfSs=**e7dND=ILuNL{!{AF+*4`=>)WpO&%5Jvm!|>ulhYp6}j*v4LDhH z!-z}|yKN|G_T&?=owPk=+3?IO0i>m-#A=$737ZV6c~b2gyE2_c!BE3br-01D+1Fhs zVTp>6?H_3^1WZA|*ZQIN5Y%rU0hR4Wa$ zP7>l%#_#i3vSJ7n!6ISM%F(iep$fZfZ>7}n?)5_*B}1mSDw7x*?w1s#rgJSU0*zYB zG?Iy9J6qi@yGt`L_Fs=1r6(A|YUympj&%_}%EM8NQOi(jC?^#)wFh|y2^sdEcHv6} z8v9Qg?_dxP-L#e07Iq8rky4y1HjbTu+_lX>uG3( zPAeh^>Lhx~d+DKYK4flN{CQH&g?IqRZ%sJa+aeD=bjWdfLeo-WRqqW%#E|f`QV75n z+*Lz>H1B;lm05#2^y|i$ZIyk+K3SmK&2VIzqmrg(}#S&wG}Y-OLrOO7;B zrC^Xx&aRNK2~-K`{3+80#IX6pVbv>9gjG`jk;f5^p?3GPe6e$L?ITVbTf7!A21O;g zNx}&0#-`QFTT3$t@e)?F`ERy4Zi=h9eZaTZ{8svS=^h{^o6#afX|GHCvuzx6%D9w~ z+F4T%s|w+Vz3;50ZS>yU^}?k6v+l1U8dLyRwy}sQ-dRKw)zL)AW;0X`(V)=VaJXPN z(wQW9j8m>Yol2e^JZa`y+%lj^PWA6v5)>?H8JBB=v}#ts*jvAkG^ZOW6@@I&(TbS~8k4*CLFL?g3E(;^tlGGt1^}EZEfK1%LnR$Y z$+SeqR;r#T2_w4#bhC2BL?=|@dTU_m@We-!HG%Ou_^G5a;q%ob>gf@nxrxkg;ul5K zT#?3>xH<(?6%i^JQ*7$>xN{RP$WupY-5bnQXKfWpZsZRYNU!^Fj4tMqzpk9-bH{O&)|7| zaWXMGD{4xQ-LUMeby%eY6#oD_6+J~p&CLx(QnJDCYIm$`hR4~jD*ee!aGWO!yG<`p zjfZFMsL^8dRgfeTs#Hz`v610;qfan{;~@s$3v+9Hq(LLkA z5(?VinyWzc^hItGDA&LZ>NO|Xet9JYl{)zw>DB0g0^)U@oa>Z z92K|Mud2D62Lhy`hN>J_5|q>icczk5W(UApM%`{Un+s%(Au~O6r3+h$5I`VwQxoG9 zm^{%_Jw#K}sBdzjC-W}H#nqnn_;I%`d!<5-&K(q)EvG>-;=|%7k^<5z20BUcTkpI;ZmqmflzHj)|x42E_0PjVwoFX3)0 zYrH0Qo~mu;&`A@4IM)nE5U7xfkDh1w)e6+Cj_jvB|>%qunwM@%$SmSbqKOc43QRi*;SpRAb#sx>(@iZ^o4rUnpA~(95~`@ z8C8j-sccee@NCrrWn#_-n%Qk}@aCCgNHO;H?X5R(wTU}M6v-lC^$9Sipm>6?XL`|4 zFT4F!9z54{95^N?ZWZSma3lnb1~FLI1i@sef*M%To<{vE)ClU`MaO2`yt1rYQc6N_ z@}_&_g<*pH@T{=XQNg{2^6}l4s-6h~rk2{J$A^g=eGL=>kPlT=PJ$G$qLO-cb;U@g z%e3m!N~P!f`00gUw5Bx+(}o*3!s=_FEtAzL=)(U>cCJ*+tJI;ITM4k(~XMhA^chNg`W z)00(45Mm)pG)87s7P+{v9MOn)|^oi0Th*2B}9bjk6&NXoFBI; z;h3D!PfYVgP_200lk~C1_ty6pCqeMzWwfa#O?ZSz0}uv2blM=L!=k^P31|~@QK*v0 z0JfGr$G44^)51-8S_j-XaQW9!7^R#P#}!RQno39>Rc0=`w?4hcfWIo^4ecb=8huJ2 zaOY)AjmVl9l1S>xp{I_eq>>`U>TVl$fDaBi15ndWAw?iY38-cojndM?OA=0+F@jUO zZLSBHZ_fl3rnG|vNt2`;ItZ!MlUF4yVy;h^l4aQ93WXCDQ?mPPpp7hb=9~p03}ITe zSXROhx~Ac4JQ0SNRN;_F>BqzB_xMo7k1V zYjTcVD^S9j?}{W0Dbgr1^Fuk03^Eri=5n%3#qFR9m;TN&GYmDN=e9fCC9#lLU@CHGQ_V+!>J?!-V0HGj#if!gI}O9Aj8&3VI;j%LJF9tV74$duaz90haasX_ zOkwy?#JGf{sPL^({Ydq@sNHYs?sMuK*=AferoG-wE(~@#q@`RoKhQuBxxL%%5!_#1 zO?$JDs?~(2R+Gf^)K8yXBH`62#0)8?rFo~+$4z=upK-pQQ(-jNHa}e^Ma`*(GMrkD zdX)`MWLd))>1I>8 z3&p8g2fm9h;v<)RRp#!c%YtXxyt#wG=q4r1D8-nAW`D{D*a&fuM5O5Pfi_Htf0%Z{XVW0eLw0JD@|BxsouH; zNhs?ig02$q`{}Sryf1553lQ%{=s4Hy+B%SS;4n15ck{N?JEu0qY=ev7J!P$V?UIqX%_(6;11kua21c{U?m+;xj$wE~q$==shS zQtLq`L@V7yQ?A)x*A6>hgyH#fBxd~I5yVC%l5rY3+#3sn%A!CPJYGCFM(Z0JvIhlN zTH@l|-R4l@)Bpg9;;o%^b{}s~0qIrApSsWMo2eaT$MU{j!}0n7lqw_1_2(mFiCs6%(Ag6i+=am4+0tWlH9l`ZNOC+Vn?r68c|}>lHRuf18AcE0G~Q# z*G#30VtLCk=FCG3&RKS{n+?Si62#}KhYzcD^J(dgQdG+X)8F1H6pRM|IganTfu&v| z4$yuSD6(1}`(k=OeFFGTs7y*6cM-tz4ke4?7@QS;er1*#T9EX$D1S-w2~w$jx6mHr zUJ>2o<)hgmls48Hh@JSde&d_fdn({Y=W)u6JxMf{z)K0IV~~h+?>WsTLr8RxlQKz#=&=+mzgPvG2usKp&V3a1zQ zdkSf3sXBFwOC*rw%F1=Cl9DNjl_8dR*KCDJ)VR=T-=5C1G#0y-MwHGN#~LupEz-k* z*ABB7{*+yC3@a{S7_1rJ1ge7t$LiWz7geRO6KnwDfYx01~J}Fj9`)l-pkp zdnmPT)k%VoIyi}>PujfVN|Zd!N9e9_!Z04EWwy-OWoj@H1ei8EM-59f(8)RXaR_4r zZ1Y5a+{v-vn$c}C;#RCg#?BO#7Y_x)M*+8|%B-8JJs#*By+7&BDxF>abJSeb9y8=( zeLt%dhB+xEj!0stoM)sleGe=J9mGG*7Mdn!p600m{{HNu}pLsGHW*thKb6P<49aL}}b>@nj1RfkH zAjmlrJ@oH2&`+OcnyVw6LM4(cE~{F)M_Wi~OgdOCiDTN_Em`j%r4nOhS1z@CP^w0_9}qaUfjEv8ES$+ zw$kcOJrVfQrqJOkRD=BMC={Y9_B^X-0AT7ecPk+MF0f@-#&s-`g26stT; zBB%1$kfUkc-%dA}-Zc1-6ojN<18-5M>|II~sE-o9>I-Y@unHV{jAdx4F@qvWh~v0r zZ9FXEtoy$4KmxL<(2Ei+d)t90Sb5ayk}-f!MH)2QXkUg8og2QBj&k!`lzw*^lPGHa_=o=CWFS_smy!_(tM*`tdH zDnI~(MRtcAmje9&W! z*G5nZpLgTH-t=zr3gMwE9VC1I018f=)QfKrf}zum0yyqFoVuNus4!d`3YM1u!H>z% z(?JC0T9{pcyxWT}zNfPfqvKn4ny_H%P?L__2bBg}x_;@{N_->Ml$H5YB14nuu&l(A zwj+Z1vC{nAJG8XVJ8p?0nm0hKq>*NB{v0N;%`g7|Fts!x3fqnbI*(6PR{GVXC&UR< zdp}x5W-sMfUS6WAmZoW_u=QL%spi`YL~N{tlxHQ_Z_xMcac&&g+`8+9FanA5rrdF| zgp=h1u8bZT>BFfNW`iJQo|(^&X1visK5N4;Ob(+9IISKRECyy-DVnY+Vs}q@bS0Mi zK{|3?-TlM9e?#g5I?e~8lw4ofHiFn!h-{6bjZdYI!^Wbg7Rmiy>)%kRGG|Wl{J}+& z6DbLeZZ8%qh~ZIJQ%_G>gEd?<;^aCR3a}#1s(CDAd#(9(_lrnUnG#0=4@jaL+lC!M zNG7fqsr?D*#a0=E!G-$j%-Bv~rothgUfJw0X~nC1+x9u-H6=2-I5<;q;!oU;`5GGnyM z6BaNT%7R1vhbI33R}ixENLo_3(Ztj#4A1`jA8 z#wg~MBYiYSc+jYLljdMQgOTW)|Rrs5OV*5>@E;{#m5gJQ5t zgXWwf6SWk~r5qVM*;L-+>)l_jzUs8F5G|ej^XqA2E*io%*Z*X{Zce)o%l$Df{HpFeD zLojVgm^BI>rDmM9j^X(;5y-w~6^PFl$_!4PB9!seo$(1KL}LMN;hVAM+}zIby=YgA zi8%bK)|tu>VlbLFT{P+q1zsgX6++90(^SU=SgNa(&reYjCea*)yEnQ3Vc-WM4AR^3 zsZy1R20A^IG|BJYeNhmRjeq~qAQc$iHlk{ptg9VG2|y#8bSRA5uDiFpg}wCD9{O{x z3%IyxGE$s+;<<)Ug(xOWo`Fr2pD$RW5I6*N(?EV^vFQAUTODphF2n)i2)`6Ja7HB{ zRi8tKKr#on(v-BhmExzV6m(umj0l=4bzl^MayB$REN%cI-aK(PDqCj_hnS|cl*bY9 ztkLEwdLac>Ru;)A0R>cWNW~>{rjjU8d-AHdDpr6t)A~wDQO0og{HhIoM743iW5f4T z4UtbI;6%@T3EG1C`+PZIWki^YMA>kF1{m%5R$AOPu1SqGPE8bal!@laIy8kPUh2-f zNwxfYYsP}kGM^(`ZrMvo)Ik93>EzQTEOqqb$QV8uH_o#~6+66oL`I}Oo2}t)$Bp+3 zkP-xOuQW+SDE#1bRv4zFq^V@9$`tZRPM4gNH<5zraKH<3#@k#KX#zG1szS=?rxMVM zMO4wWo?w3aNg9qQLoPN9;agf|twmECPvKIdqMD&9s_G=eq?CjXYt%QEgl+|QX@6kl zz{G5yDsgQgCv1ZijssImd`J0^q)``?%ZJ7yb~}g+))uiI-cjEOCQKi7G!T-lF%wv+ z@WCXDQ7$nwlg{Zo#Yro|TIXG_5m3kB%P!ry(I=$USSwPbrZm%ya#O({%F#7ch1Fdu znqN9n>IeXmO}3ls#PASumn`X>hi# z5xNq{rKAe1b^9l2Hu!Ouf&K*HvZ|!&8fOwVj4Le(#4%b~Ags(7f*>U&qn?g80fvcp zmvAH5anCWA1)xrhI@Gou6!M@SEgC?9 z8GgxDR+(Psz>ZnPI-F7gS4UM{r&Jb7Pu_bI(I10ETHcsT#eG8y}i6`=T=CBf_;^4p;}oL+BF4& z?;;^>M-db^fl^7Oz98TL?W`@A^4yS5Ej~6qis%`Z^}L+z!B2`N%k zcKa(mRSq3I>sy55D1?t8{$QC(upal>xYTmWY!yfm@um}R;8%)FQ*I+Q4F3S4Oj4I^ z#1d4?PD04)x(nN}JWk@ZC`=EXIp9iQ5+bv~hR5Y3l~B-D0z(MaFr}0mvAb$*#VWLq zT4^qz&ZR&f8p|FtDoyhEY!uYOKNJ%jrdfL|Y{ zo9~mSY#)tPS3*dcJzFc5)T&9RN{SaCf1`;Zc5qLBw|gBl=7bzh9+ju{rUuY>^U+Z! z{$)zRZys`9}W@K6~=5uW8U%7%L_VrQPxs5&W6AnsMB9nt^$?Bu9F*ogK{wkgjYoyJ(j6sq z+sjgk3jl2Es8eD9)j>mtZ;2yah2rv2x3RdfQb%uRJQb-#{_5CR1Oj9aY}C4!hN>op zp%faIZ#JPOCCe)Tw!jM$;l~o<2H#~-XmM%)&K(AtY5q!{vEGU(>CslrzF#%bmrX*O zljGY?G!vydMgnVPK)PHc5-Lk1uM$Mn%P>~5!$w@Q0>E zH#ZEvJZdChF{eTl+RPZkSl0Qx+a-VZP`93dPirbz$&4tB;{0Op0wPe_YN7lQizxOF3y(kPd{N_x1~MtFnv)Lh>B zaT~d^5T8UAyq{kZwno3vU` zH9;eUQ+^rK{9=ZzOGz3l74rP1r*R``YcanbPHdE%2c=OBj`SphvT>(1>GootAl_9Z zH)aIM6BA*s+Xi0~=Obahy6vgfmn z^3=uzibSpeHa8;NLW?ARQcH$>xKhOy%i<&sJ#=d`k59VypE_Hc@NCP}T(3`@Xx5_? zj|0Kzo}&t?rj8~6RD9D+#eL@7EH7X{Q^JctmR|?FJ4P`^kA~bq5hAty>KCI=RH>ga zzMI9Ga13WRU>HaF#iPP7s(OJ(jp1`Fk~hdm(e4cqP_~dL@ZH;!C7yF`0#)8CN#g?p z#%eUV$jX+ajiY5YbkEa2ta-C$7t(sDGM)ntD^Ek!e7T=7nrfPi8xf&Ef}Wm3LMAmJ zZX_dxU`e^qaTa3ru}^>BsEktp2^6lU=6;dl zmHAT_#A&E=Lsw!oxP}QGQo68o64A`gnzAq1Aq~eP?8%oLZ}yL<&LaU4!c6?AUu0WC zkAf(Q`tJ0(>LVD>{ddgRMl*on-9gId1I>7bP0F}_BQ6*yaZ}_;T~9eibqMQu44_zR z+k*|+eF3yGwV6;baN$AB`z}h>r$S9uFHEcY!P9*CnsW|k{!7$Kj9FyDGd>HOBASj0 zSb>NM=^l!Bq!NJ`WqowMhWr&hl6RK9+8rQ_dS-%l*@_x`EVd#AZUc}oF0^M(o%FjJ zrOlW_SAz3<8Z@K9j?f(y@2}jWk!WeuVXt zF;-2~EF0BK)H#<4Tu%+caL$(!lm?_svB_0Ztr*`dXr)>wP@#`;-;FZEC{ghhPo-UM zp?|{>K>m5?FIPIjUlw4K)s1#n%amyOXENe&{LT|oB|I`TjpmnQq}WmZ~L68nEH#;Zie)yB;+ohL)C0M5b6x5Ych@xh1X%YT)BdZRj8Cr zTRlZehncDstV;sKkf7M|x0yQ_4H}gjPeALcTHh#wPlQq%D|#R4wtL3%+*q%uLxlB3 zY1=Vo{HK)W65+UIHC;5(MZf3vqg5AS`c~AaU^|W=Yap;%d$%W7Pgg*t%g$Uu3kkxA zmTBm}Pclzjb1zG}r|Ke`0l+ZqBAsy_uQnQ$7*$NqXQQQsDfctVVsbpk(6A!sP6wXd z>qK2>T9jKDgEQ&nM(y9YS=N;O$I6qLZ>2t-dTj=1%Xt^BSf+fh!ekhp7n*0n;7lH> zEJf6T&EnC;)1I_wFMCX=)5g$g(GYcmaF(@Fitvu#%Mm2%I>IO`AB&cqm{v z^qMSLyEx&PgXWA<46{c|2}4`_#iJ_>LW(lRNJ$zoPD+r66H)XZlU5?j}HEH*hW$F%nXqNWp61@+6pgiHRi1H zCEPjPr6;@!;weKnZI_fB;3h%BgZ!J@?3knFqb3KHa};Y+hg68srAXGM zw6XJ;RW{v>4}jAA$V|s_6KP_EC}~oFpFS`;C@p20y8Xhm;s;L1p}QLCo^;JM7^8Iu zFTPXF7)}x~8a$;|we=EIVv7q)MzrRV<(1V=58f_aSWpB0=Z_5kf!a9r=xu zgc=m#%Kb9T6!|-+nKJ{$Fzk05!Z7?xsU1(vv_cF=2FwfkNo2#R_Qq0`0|jPaU4Xf3 zTY>Ag63URrbV1@3@uuyUtg1#mRB8Gd)0{UZXFMkd%-L$QuX%AKmlMwzUI$8;n^sLp z1Jgqhn8oGB?6P_2>f81Ww**{UEXf{b>^RsfL?0v3N9%C`kbrb3bk(lrZinQqymaRV z%|p@^RufN`qmqX%sIr9~<*v zIM_N;H3Q{MYAR7tRy32%y(j2)N}Dw1yt|e2770m$(Bd?heqO`#4rIf!yH@!m$qP|O zQ1rBn#wJ+ZAr@v<7cI&LlBY(dg!F-o)%CfeWEv+qTcG}j`jtbFvrJi^HpcmmzMhJ| zr}Nj-84J}7pEw?pkQpHa`_eEVc#jTMIem$7N?Suv+qd0YUS)ED01kR-bDR1d=ntc` z*)I#kIs>Gn~l>i4Dp0K@sHvAkZ7AXeg;=_%mH zFFHXBOR5U0Pe1?_U(ZjDXC z6F3>8C$klIVM&cd^hHJM9!vDE(!8+Ml6pGK`JybVJ`;fFXqrq-p{0VNOUY3cT1PBX zO%Nf8jFml(-W+BuA-jEOvkKF;IEnbse(L@7B&%GB*~X|-)sAK8wpi<7lcsB5OVVu1 zK}m+38#h$S^^;b|0vTDUSt6fwj!O^Gfv$$!1zy>UgiW(0wWksY`PE*zSx8w|-PKH8 zAjy$+M>t^_3o1o6WXxEMwMmuo{#|HlT1eyBG?B|Z?HpjJZKWSka4rukmF1VWMiSpd zlZUdaWpdglyuyz*7wH#2VVTNSf-l{&G;@gi&5b;)phuYFGovFQ7aAJ zSkT5VcKC2y=H|MUCB&gASUgVJc;;RpH9(GvpN3_grefHp579^Tf0J_~)@NVqw@~m( zcq-^?aB7(ZqK=Ac{71;AsYzBX(tNYBMlBf|xH3~0C`m$45MWHi9{`>}VZSKGiEZBj z)e?NDZOdMby+%^iXC8`U7}U8>r#(Z&r=h0ExUM6H#g@h)RU|d=QQ>s|05K%5F28ormmF-mmcyy%Q5fT!+^IfUb}Gwi6j~F>7ki+ z-pPePkOxN7N$PB!)NECeZw;;nIa*lhr_WZWQV?AaRq`L)S$;FD-U^!(TJze4U)Jyw!%*W7sqpemzYkE*Vu! z+aZlv9LAMNCakTfRcm+TkGuPho0XxosCRow;(CmE(ouAqWQD5|OifT~s-39jn+}F1Sy}0e`@owzU6;0*J`l?`O*E994ZxYk zM@E_JQ?cS+HV7N0C;?61*mTB{As8Lu-`!A(z!M^i-|L{AnP ze@^n#G{mD$ODVd8VZ>K#G=fQRg(?m^W24HJ+ByMJVn8@kA5?uA`k=%!l{QYs@m#%( zbdH*`lRwpB*?uqN_^h~w45fUNG-c$boKu#PDHM(6O5a{xHnzpH-76K1B_|l@Jz|#K zs1hRu3ajk(hjn|^PpS@}=I(;xl4Ds<0-BF2M~zNpp9RZQG-@ehYPP8ko>^hKLKNFm z$mcZ6S9^BbC8Cryf#E%3k6rGq*pQ%9p|Y=cB6@Sg@)t_-4jj~%8rJP2r>K<2#qkDYM!2Wn`D&6MvX7D@7t-rvf%isD?}NAFm@5w z*GgJGaKG*e-?l|jqZ8D^S*&_Kx5DNju3@-W)OfolMD z1H+VgmfV#kWe!IeB4cT%SalKxGc~PNV!$vAN}mRXf}at}_>K&MrWePt32T$et=~}b zLm2@SJd!hL280u@F%vP&9#Yna0#vCoK#3s1KMEh)ShPTMOd3kzt%FffV&@6-@tCS8 zY7-Hlne~cd54^jMp|+J^J6`@ADsiO;lQ>4h)92+*Jo9a&gq&#tb0%nu52-lMPvp`n zj9)9_7(}%6!nPVF+qome{NFaARfoSG(tU@6ROHHgOU*h0CJdN^<~VuK9jw5CrF;qD zwv$PjI+rFpmTGbiLCtu!BV9{T9#*2Pz#)znnN*7Bq?WDPB|wb9 zP?D!m44iw(@~z%xtt5!q(G+#@KTf`>{UzZTetpe!*>5SgRH)_2ShiE8$8h{>5vhi@ z10_H68gUp@Ryr)v%EcPmfyH^AP^`IuclnT|0wfKf9i&FlSIaHM+RUR01#$*ikWpdu znA8w1Gb<5CUx(C4)X_pDR1h+*@pRj%`?(13K5X8#D^#H4*XL56OJtw`N@BnN(yn%V zQl3~LtH9wc7Pbnml}XB)wiTZ^P+q{C2xCe$lW#mt~jSiAd~W~p(Q325rk7z=-7>G z3>lrK4Ql6}oh%uRfn|uT{*l{>Et_rC9T*QWw!Hn*sz6GBNj)7EmU_uD`%_0GSe02| zVlZJ4?MF{|evfwIEN|bIe(6me9omnTG=>79Nh1%JRcFF6dU^2~S_~Sonq8y}N}7?~ z1&Z59m)`i_k6CxRaY++Y5BP{N0-fW<>Y&6WR)(^aox%uf#H``Yis4mV7fW1$%Q<(U z1d;8hid=C=40^a!1*?w>o~BGoH_L?LDB&hnk)=Y?#v4c4L9jjmcldD)rRyOAW={fY zwb4hGA$|1$NILs@wXKyXO6EPEtum$%<7)87 zKI)o0`g|y1bhzaNxLs|XswNOiQxS|tyXRIC%#PL$Zv3S;JBGEve~#tl5; zlBSSJ=4-W7$sX^g!;3LeK~OX7ts$rQOqhYJl8}Wl$XLs3_Pv(J;bVVq9!CT)7!hxK*=y>4t|Lv+bL|}Qr3+CFdF&k(+?j%al4D%00qrz&DmHHiLQS=S6d4o=JLdsRUTzLkb!Mf zuu^n3)RW5x3chBx0VmW|nOvtd5?iO*`#yEie{vL`^hG0Z~9YesuKDX8ud^@fH_lf?y0_njc?)4IK5+{MOm^$zy$eYHNq+?R1iHk%o!Z+l4&Fi z$jhq@qWS~Pr64Fk!_J%UQ~@M_X-?lULh~eW)kxC3esr^kVos?PpRb5vElON^qG)&7-`vLO_z0Sd?BGw%8T9YH$R#Hq9rC17`NrJD-y(@^tLodv{ z<+e1Q_MZ2Be=BicbeR}H*-bOD2-#d`H8k_2cyd%NLn}mF6x>M`KJ$2dIN3xJ0ICC} zaRBP3nzzRyDy>C5T#c!TyuT^fTv!lAkI}^H)OKT9aYZ8z94j)#ic?X~BhmbrsS8g# zQPq%f!7w&Jzie)`MRYJ6c6wU(^Sm1`X8B$rKf(Rl|W2>u&&Xw*iE<=H9{5a2c zw9cyANhKg4$Tb|+VYAlKiD~KPsY22Vpb-YuS+)lkX87sP7ZD)D8q+G&sGI=VPYsIT z)e%J;wL=c6h?;tOFas)s_L|Efwa3-R+H`8fQgkgkWC$Gx8pazBjyi;ftz$2AqMjIs zzHh5;c6)n!am5sn2Fh@{e(?n-2#i7eD?{bRd*iu@80~< z6o52=GgW@wb1EMCZ?2vhUK&~)Bv0i@=2JSu3|p_+j^(>)#N|%+fPpn$?Fde!oH$`q zW~s$$WN`Em(Uk&qnpvWOi+Hd38@2SX@Z*V6$TC5v-DP^pKQq$AVzN9j*zZ9gYBzv|dLGF(pJrHMY zC2i8O2AEX)yylv)`c#ftBAX48ppzpp%;V8g zQi!BtTHGCjrQK?Nd&(g07r9n(YiZ2}lww5Eg)2&QgDN{PNl{5#LUEO{6Mt0Q-EYw1bKPD(Z|p`s<=#g&rzrone>V_k7OXgIKBeO9DAn zG(xZ&YN1E%5eew|(`ro6wAD`|(j_S~yigY#tUE9YO^+xO)9kG{@k?@77}il*S~zN` z1#iyI51h=7Q2{q0Y^Ops?aM8)NPs}&RC3^<&1uA*ns7J_)jMa7uAvDdaj1rGuxxC; z=U!?{{RiG^l<^qSt@|7 zomF)-Gy+Bux8>_nj$h3Z&m%qwT~tDdgj6vY;d>vCkH?RT_M&7OuXN%PsXC7ekuF=u zqlu(Um0djG0IF3>7}$VY;cncr@6vdPrn{vKRAfd!g=C`28KA039#Nm=q_X_9>F+ho z!|onGM>H;x6Q~o>N)nWx3>fM8QihKr=7PjNU3Q6v%7tlIvfOR~tb2I-c;UZ7KpKTG zR6#m+RLQcQXh%gfF@BvuiDOMs041$$bhYky%el*2I_jxuNB|LqcAY!Ug@p8TrX6Bb zijp*r3Y*s4wLWZTPssK@cd4;TeMwaHkoiip4 zX*S^q0V>Dm(Ne#uIF(Gy8L;RSJ^o&KWG2J5fwqzKjwCsKuqoQOrIEqxXSC?Y6YT$PF zc39g>8}PZu6iyQ+lprW18Nz>Bb(s3l)u#UN<}ibDJHmrB+em?w8&|J&Su0_4969Ju|Ajr-%T}WQQ?N zQcs&mj3Wfbu}vz-bg(}4`Bd9186;Ma@i-A5I-1zkbaPWwr38;43S|p8d;8k%8$bt) zIc`P(<5x?n5TIhGE?}stZ#r5jAeLLRv_+jkziae{4wXWZrWbTZ`Wqc zxmv*cjK{DF$==R824gj{ZuR?!Nykb@&!E$JK5#%aRw3Z8{^ma4lk;WnVh@a$(1 z!LfWk5(eV+P?n-)EB21byLJbd{{WnJvz=VJmz42sL!$tF2>Dh-~4;zf&nXdi_Mk@jQuBn!P!c#BrD~W98CCMTyYj-eL!qMp-41 zSTtwc3P>l1cKPmYo3j^6F9dj@UKl-HRN8l6C(14l<(uw{fG5Mp&Zc2~0PClrOzofX z?wk6Bt)S?%IL-}OnlR3wVd-5#jnLv4B(<~E)wKjtNeYU!G|tAw7C__`91dsqHrbmf z@RYg+5wi{vLQ4x(S}c^G4@DY$f229vs~88T%FH`HV_23Mj^Y))N9dL}50eBLdZY%G z=%=bp##&@TepWz)xUgj%_!zWgGL_@BK?fgaMF_4-PLxO>1Hz9FOLES0%snmnoY{@n zW?ac%PhFGq-Cie$j1r<+$|jnQe;~qS8z%n%bA2}g7+bJJ{g7>b90ms*Y1ZG7jaJ#r zT}H#bWn^5NmN9cp(~P`ndU;oj#fMblxb9uU@hVxN2~%AdXvld$K#!+pJ-B{y2F;3{ zE|8*TCI_VyjLk{9?!p8{-^PF+PC0X}y+X(Om(Y{cD6zV(hRHoHMakH#4}-~5NmGkT zl>(-vOG=TZ(ECslG`5^L&N6#uP=)GO?M67r7*Td#e777@N<^UH_|fC&wn+7_h;_?9 zLDVj%(^bQi+YY6tYz8Qx!?B7eC7KwgSwJ&IEj)4sm2G)QTiVU-3P#SQGMtYiGP`xmT{oHD1B zvPkShZ8uN}9z0^(D-PNcB&h^rJP%h@DSh;z&`;Y`FAhM=neVFDoKoU7m{vWEi86G# zUTyCaRmnRkomdZfc8$RJeO#ODWvg(f45$ozzjZ{{YC(8_na2*gzpumVr`OzGPL%jo z0)+L{i@TSFY=Hts1W}jB9*p{JlQPXFF_m-oQZP!ItU=cmtIYYDzZCv!hRDSkY9gLi zUoFA=1bebNf91Q%a_Hz}K>4>L%>yx@m zT~}REoQ4~ms?9w$JT*Ao6)e?M(9qJXG%Yld$A2{qN%G|lbuD$&@mm)rK}n>lOyL;R zjHS{aPgQ*u!t+LB${0pw&Y4mP>Fe;U(NNWt_@h(EWm-Ak6F~I|Dch`@m@ot#2NtQu zlH^EEJ7Sw|;oUq>%CU6ID*9p0)G%VXr=b+N3ad0hQGrn7xXhT1Vl?nf(ZZ2UQpd}1 zzvNH5x9os9tH#n4O2GD3+q-)J4$2U6f298aNjXac#B%I88!uK+QdF*Ml5E0eteoNo zPbHgqp5kX>pvI?Z_G!fJCfEW}k?a)vm$xNfag$ECPfxuqbgv1^eMrfj9>B2=mBil# zr5H9?#3iXos;1jiCa2jh!y4U#NV`_r9$&gaI+BsA$O?DAxy3q7ny!z^Yo<-clLT-x zLJXqq$47SVDID#pWXID1hb;d9CCVA4;LV>o1}l%{yi$^ClTMRdZ3>&vBSyBmsU(rD zyEl~O2sWSw%5a@i82+1asM_uB?d9lEAgGN$tvX>nP;Q-P%*m0kxv>tEXL`&IiV=${*`FUNRQYMuE_M+`S1t9=1Tv7I17I}c~6C`A#Lo4@q1|Dt9N)L zj=8IZ=-;kSPdOW>*=sL!XC}#xAf=_C%Xufbp@@E}6E55l&q6)0*^!={H0n)=hp-E~aEOHGz5a185{9e=RK~Fk~7IDSH0^i)SUqF>an@i0E-#Hy3(3 z{3|^iQww%&1MCUQNu-y;MhG~ZX(qc@fKy0o6Mm|r0=M9RwC91k0tjKutCO{QvxFzFCApW zaur5sE7IPQ;j~myRN>L)sUU<=M!z#$i6m7DcC--t#hO*nTR?cV*PM5tn6DuaIMbeG z$P`$H2Z;4n93L-W$%NF?=Dw(9tnQSxbI*t2j3SP=53Vh%M%3ray@M$oj-1;j60l4V zFykVcQBf&OpDGi2b=1z8XKY8R8Sf(H%6!v};kb4Qi_zfEf;}cK%F{E#38_R69L0~8 z<3VC?#Ob|D(3FA*69%{3DKH7?rd*@etdE)V_BT%X8#3US-UM2ng{8o!gAAy9{+=po zr;IF)nuh?9e%Ikm%j(uOqe=9oi)}$WWK-5{$1$30`Hf~QGAfq9@aX8^T(ON+!4)-C zI$7GTkVZ_KluA6+VX&wc)E;W$+f$m8gbkfFv_VcM9&o4Gx|`FCH8I==3i*6*3c?y3 zM=|0!g>@t}v{P0xkayIux-IQt+Wb{(dfQ<_APuv!nSBc|uAQAVujj5VzvUjU<2_^P zMqcTr2ay{Vs%)`a)H$K1nz))~@?67E0Sc@aZj-5tiYD0><1f$g|aER<1lyq$vg1n z)`ZViy<+J0A=HkZW)7j|NHA%-KR!2~D`AfUrp+)tO+wR*HURYym5N$s5)J8Vmr?eB zAjmQY+8ke8#O z-@Z1Tmd!k@1@7eQpwmu3yX%;X)Wc*QIKuwk#IC)iy_4llOUG)_u z9!tV9)KyuF6{K8y0l+Y-svJJ1s)lHT#pX*JLmRYXRWe9#;twPn%)M6`nwpFOF*`ai z=$dVJJuZ69-0R{`(&vlGZahfI@i_yhE!#2dXuOb z!yoE4S!$|&hUAQ*XLLrMK=pFTw8kjO-()u7i04 zFD*%0!I?To80w>m&|JCH{*~prUsRr@`m&PZnZFQPtYati*D%*%Ikz63wpNjny{R4{ zO=I1{G4KF)>+P6O z(B+9U23?`2f(T_wUnWm1#)g>F$)@lb!nv>nb`U1%NkW#qapcoYTVNANJU<2N?M^40 za!y(G1&3fcuPQWHZV`dzJQ`};8h90X0|}z9>c=f-zL0c-JZ8SA_4^#e^Y&oFhEe`QhT?c-D;O$jyrYQFQBy%1>{!pd zjlI-(o+Zq}Qo@!~B_Q;P>Z2^Rl&N7Nwa?CSSJ&b74?!hNtjhqUNy_`tLZTqxg%@iJ zp5H*`F*?cu;D`oz<33wxP_+dpfv|C)doW-X8IpV`L0O66GE;?uN?ab2qDqRl4yBO| zIxrXUz0Vh94LFo9_F>Msz)*iOw_Jjg_?k@675l_4HZrP%x3b-FLD;w;ZA>Q0)vx^_Rza`+)HCbZ1~sz)2=Dr zSZd>{!{Wm+I_F=Jie?iQF{t0GtvLXKdDkR3tx%pjPRiygR7y^nAv`?Iai+#ZMoI*w z!{e%%yxNy|#L=s+r(?8h+v96-`&*r20l;-qWh_hq1k}pPj7p!$Xt4FGYJ|I(Bc)KP zbr$n3d-&*irwfg%0Y55Jq%06qhXYBe(-*~SBS~=B@tRsyW|o@}t=SxHrJHO~g}wA0 z_*PQ2I7kU2u4by!KpbQdEWmwS> z^deM3h#pw=Q{_C93TSETGb3R&P&dm@O-$_47{d|>W_DBJED682UMWIBASp*4it=7< zBydsc*-q_K6!A|@MV}g4SlpDBK}8JYs_yr^?Y98@d2q0_Y1V14%cr~=ghx2kX$#uB z4#Tr{B}nx#wHgT6MFTCh{nq;URd+ULnj^EOjHF8->f)oJRvS>Mn zBVhrm4OU~8d8vu=qZ2f&#=~1Ofqg;Ho;23(Fgi)u%+e)XB`E~pe2h&-8Ak_4^CHY< zH>ehBxruulkG2>n0QT;9t*o7-tfo2qX(NuMLQPd#PnJ>=TTwIctY;F-5G2zydoc8U^GNfklsAhq zq3?gco@()e2{HIquC#{2LC3FYt_zOkcd0`kmxwaAmW?!?{G^Kmv*0@pGS*;%uoHQ3u{>)g;4OsbEYkyPBsJAPcZtsn=WFTZ)=>Ad?kP9T1{@I;rxSiYe%3^Dy{} z*&&i2mxYR}d%?*f_8{^18GtaFZ3;o0ap|bA;P_VegkaJTTl8-fNEL$;su95WUzQ03 zYy;h1T1-jwBW+1IL{L`w)RC%53Wkyzrz;}Z_P(Gf?tQ$cb)hma9aUiYQ9sidQzPOy zHXhGeF4~Ee#0xDV!uDp=mK#1j`9v;%hGwnbB_b8GjW>T-D4J(5&d~*8%JimK-b-AC zvVm_4+lZ5L+Lek)Bct%A)Y`$3B5)p6lD`$D#Fjd*ld52jC5l>_jh<|vSdvN-d3Q?m z>q>noDwL6dG1s=DMVP9e%ce*wY2k!yXwD-O7nFc~*7AdGKEixB+O9RL?Ev|hs-jY? zS0wdRs@zi&Xd+MLQ#$PhVurSmvyg6fnBv{I_BTDapsCV;ppZc8G~rSd2vFJ4_|zOW zY;d2MI4aXZM*4UvCuu?WA2Utg9%-vbM3Vw(zBA??`cafkBE7nQPbhrb2LSujh)q=N_l0NzWXUrWpQwAac{4igvKH(Oa&y! z1Fm-0QqVgH#XWO8U|G}yu#5H#$~mQ>QL<@<-YqT(f=@A77M86kqK=vvWT>eR5Z)pV zMadxFT@PY8Vuj%~rzk70zTIv85VrzEg8jOR7O;4ev zS_va*YpJmsc0>`(!BmzPvfXYz{#{53n8b>GL0W>2!&sg}QB$KqN-2m~Dyln+>t!Rq zEJD;tCIvJWfPkEM*BAkj$%>U(>xY3Ap{d5 zNRORFc9JRWjJBZt9C0dAQ^J!t*;}43x2xq@VZy5;#3QeQimEq^G9$fNjCvA;j?- zd1_udNa@kf*+`;mn-5^QBjd)_gqe!)QgqC~KN@tT&UoOY5>zbAR5d(t#~LEBI^B2Q z+26;B#i$@kXBv5FR01{%n+A57RZ)^yyn}JGYs$opEH?7%%Th`3)f%}H2uytHV`q|) zhw~=!Sr5H5{^JYCH zuu76u`6A^frv*cRf8*A*$vXbdCl*WtB;!n}=LVc`r`l?)a;4O*RIJ1o3oIZ7@v$o8 z3wZOvHe5zCPa&sPpaBMbl-*rfoNpG(%(1)cJVH=)zMBoe9}~w4E%AaWmxG}25m4oa z8mf%c$u&e%s0@_m0*F}!^=kpBzmF@X(JI<0wIym2F(Vp}^0>RCtX1HeRaoLiitM*H zBH-$MgmF+|vxJDNhRE@Q!-%J}p~RixrJg3MMA|>moTR5qT!7Z_*8HxtB=DH67F0&S zYGxCW>Ex=anqNMUI}sbry4x+PsUx-R!tmg1$s(@XT8|M*X)-1f(?Jx|2UN6@c`X|% z?bhU-c*^g>v57S5tNxpihUg~0cC>;fj`(NSV!u7r<6NOl5j5d#tMLJ*^5wn(J z%lTIf*q#SiWHqWvHof9BK;?pl%}M_VM9+<;^q~n5?+Hx3 znvW1zsp_#TR*Nkc`55-XRzbS^B#vfOW39+M$Bg4Qy1ID0w~0|WlgG}oQ!Tf(v|%kF z8e2%f4U`FW@|UAo&X*%}GLx%PbiFHaT6{|n!*YAM%Cg<$< z@V@J~T0dy751a|5?A$3T)R8q*ABYxj>BbR?^e3fV63q1Yzf!tmC3a%Wq$NEoD4Iws zB43%KRZ4?A%I+6sNqyGTXexiDX z^+kqtr=(t@s&Jm2X6kH}Nn1@1PO(*&55@4@V+yK{Dq7X4a^|-06pB>)fFExGdA)_b zh;7ihNmkr&jubA*Z%x4{z?k^ZJBwqTTIfGVa-L7iS&l5fntCz)TFuzDSIYU4xv6Vn ztdS^&BMZ~Z8&WLAw$(!k8j?@6sIhIUr_?6}g9ejbg)+>Z2i;bKThJb>;;B`f*ROe} zI_At<41*e{k2B^k;emv3;h1AUf;Sc(VZ7U{+xHeEHvW>JjQ|11kF6du0ZyQC zs`H;JW^DJH^Zx)`F&M@dh;$1X%1@l|T71QfMAdXtEVQ#k!V1K56%)#nk1(-fEXLY! zO4v|rHmD(B#DINqqGhEeYEjw^3B3r(l-S>_9;N5(xm_**)HPG!nfEF~k5sK=SG##h zQ4>_ntt^TJfnz&u;CSA0rJEMesF8>~2(MUoUzt*Y;fSL>&_14ZB2KY-lgpUqajM30 z+v1BugW<`KO%eWEh|$k0Ds>+K zz_Kny>ZW2MHoFq1sHm=|mX`{utEVCtmPC>uk(*4+BvY;1h%AyDDk)K? z?-eixBB=M)P8*-PE9%oXXHJ*m^m%_1!)frm#go$qt7syopsd2;nc_hbZ7t?V{__16 z@Z`ag+8fNZAxfT^+kxp78s`rSd$AY~E;M_844j*bV>x_vKMv|Y^ZeJ;d|mLqs${3f zE2-jC-6dRA5>w$zNw%S6Cu{E7>mdYbz#jWtI$ugOfDj;Iv{0{gEgJ5Wy+o2`tuns{ zrRxqr&Fz3@Mb25bC1N$Xbugu=%y<MrF)hZp+xv z5@vjxE>)#iD86lXv>0!dJ5m|sUd zGGm=K&r~>Ob?L1pd!euC^mX)ga$^`ONeyi#BTqGM6^N}=!m=_MV)B?E0?ZAT#+I3T zmkg+ciBAwp-N0jrQ z^6J!}%2@UvP2|f&1w`AG>ElGR-C5FGk~AEuP=`W+g#p%U%`CJhjRsj~(Y{N?G3*;T zbpHUVIll?O>#=CDy1WnPKbb*KOhj}1(^N?xmcgfqm6)4?MU>d^u;jMOAz+lp0BFH^ zvVb6fNflx^b^(Fo_#D`5S*sAt)he}EV6UdGrNir5Ih3Gz6}+D@O8|=De0ZrzLBf3v zB5gz&BC3P&T**|~d!hXw!zl99v^d@wUr~#`F(xS`U`ocNMyjHl%r|9`j{U;+vGCwi zdeF$U0mOnNb^9qpjlS{n2J~^1(2zZPfD3sR-Z^*lEdz)4WSb zI=i&Y2oNa~qe@_zP}?+Tupe6aV=h!?Sh3uTO_g$lc=jMDDzF$MtD6VMMP&5TM^!C5 z7^#+F0UKC?#G3=hbG0Vjvg7NNh>^s`!a8Y|mY}i|fjcOG!HaZH)L&64I%SB#{EHdG zacs-h9O;bVv84tTUMlFmR*sqqr}tx#Mafqqa{?CoxE^};!!v0~(sZf-R~$O2Ze?H3wR@ru;A4-myQRf4{#)oGQR6}I`BDk5tm!& zQc9DFnD?3!*qfV!sKA^kEW!GRi*)B0q|8}9lMlr)=aJasIF(4oamb;jsv>_i6L6Q5j=Sn?~=Mq}C$T zq-iERs1C#W+v;N%>MBw%r<}`4j^zrtaNNt9E3kT3$~eK6bb+OzsELwj;zbuSD{Z%N z--#}=_mDLeq@B1QaTQ~rCrrSleq+sDdcgXThgZ{dR-P<{oakyI&-wQV#OL#Hij|ps zwMb@|t0Jw*I}Q7BYLK-gIF*STM^{xOQhZ)ih|8F#UNh!z%so4oGh%sSo`()v>dMSk zo`q{7W>rdgDB&|1HfI@_uE=rDGv=gcl<0goPL1@nUBI^r1BCfkmt?kvrHKZR{;qQuNcv&vPpR2IIp$mv zn-az$H104;T4^fh#N?1k73ryjWWJ%d6J^(R#{_TQu+^)mabU1+Ac_GFctj&W50w zStTsMSonJYIp*=^w+Hsh(3k`co<|OdoJ#_0qJ6M&dN2N2~wCT`=M0C^EInt`!q~aV86{N;% zGo5xHkY%IA-{mM`Tu!J{w69q{)rumd`|dzdso>wU3(H*3GXYZHc9OX`;xKp1;Dkk{GmyP0AVKe`nFk^#VuIB=jd znPQu`)DM7SXW%Ngy(#rxn>+Or6wSGhCyeBo!xW1Jj)Ektd9a)sYQ5_nEp0Sy9W zfSa?~ZLbeMl~}JXbiSjk;%DtwpHZOD&2K+GlEQT*J33QCKU8|j1qNTJ^!?LZgO2t4 zr}cTdn-jt@ob7^3g=R`x*;t@tdG<`D?0-ihf}mKacWPfO}#xm67>s_de7AihlFRGnMpNnRjSH(&21%Ycnx>T3hH{CQYhJs zbNM8!YJSo9kU8zxw_$dtU-x9e01Qm@4vMDnCG@_M&)9aF*ylfWa{}rP4beP%qP;o6 zvo2Yvaf-b7E)`c5Ej2w_m?|WR=;5$<_Y%s)9W~PY6FHcv%W`0Y);RQ1Yge6-H>pdPkEpZZ*^_o1Af{G;2jiieP6Cd@f2bt0i18 z%`tjrAp5uf01lvh`*@kRScJoRfjnT2j)BuhY~_xvqYzae3LX7z`l#tHb?FsObj&$& zzc66XS0z78GgTr8swlA%ra4SKo!4#r$c~P;Q^Q=xd)T+OT9!37L=bTH9-3;yEnctz z#ZEB}h9q#Oznuc50gjVUGyZwaG`O7wRxN@{Ra1i%g9%Nkwn-#AyqiWl zX?yamoS2NX6}T^!3>Gljy*6ll`7yvw+% z`>JkF!-uwcGOpZhkPMxNtFDRHoQrU`c0o8-|J1H3Y$U)Vd9w&){Ua}s(^A2`3Z{u9 zA(-F2xz^Uulo$##)5f`yhZ0*9@{v^vb(iPq;kr2$#iP3;8#UtyO^3IS>P!%&N5+$_ z=E({K0nv>{d_Ubusk0+|gvbL{ODs1kpjl0hhr-q^zl&l;hd!eI&ivfP&hi)}Gvq4vUO=QbOw? ziDX%BZ>s=+vit$!)`9>X0+MP}!8o6Fbz3GM9ahQeCR9Mm^1qzYdxu@j62LJX#~5rN zq=Go`r+eP)cy?@#dTE#Bs4I<9isq!Ni5gmANRkC&!0lb`1^uUqZW=?0F@fr>TPSH0 zux;t3HXVoJH27s9mUx<-k!l2`(1s(q+I2d2@#5ywr2t97oN;L+nE?ALLetcIoRfUT zH31IIOD$@c_bIve8GD2KIO3GFBn29}TErcLjZHXx7ncn68G@NAfD=Zd1u^Uf=G%pj zW2Y#d>`W^YR4)~z8QF*Lra@0i)eBz+D=gL2(rtC35u}O>8{At#?Bi)=7($GmpPg05 zbb=sz#x)$(#vVy(@t>EWF3cxr>Lrjk9_7?Ga@t>x6_A4@;j#rHT965W6Vt^06vb7G z)4!6>jM`Y+XiCVWT}HMfu_QO;#0YsVaWmE``b$MBi6?AxvYhA+9WEbOvCz}k;zgBO zj;me-T?Y6IOCxOLjfbI zm~lmECQNV?-%mx1tmEbIJT5J-1dJfh z3Yk+TF%?NTg+36^O1s_!rjb8Wd2YQ^M&tpX;xtd^ol zOsx!`JjfE4<>{1!Qg@amT%N++ugwH_h#Pupt+;jR7^|+ZRN(cs3lyKt&=GR5);kD6rM1hNO=rLbQOJ-ppexLuqmc9R)}@Vb4W0 z5e82lVk-~w>Nl!iJ{e$=Fe;34iCP%|@FMB}_E?@LDN2-1GhSE_HGQ)_RORK+*1)r^ z1vq^bw#Zb3Qfz(ivJLJ}hZML3_^QQ9Mz{c^Y^YUul_fs50-jm1DPoESmN$#YH0~odJ=b;YZa$tXOHfBoMOkD4p-0qI77ZjRM@8k#B677+ zRTv^kM~2odYo88i1GaFakF2RM6FpU)D5+>7f>~G-$tTUu)v~i1xTxq=<896^ z#m{RMw{N405~6kqJ1LuO2~bfoczV)?Dr`DeVKp*QutjJmpWfJ{8-=;kPaZh62$FVF zh;?O3IF1obSMf(D8+uO!kTYVVQO4X(k4&Ai)^q6X-tr3Z) zLe&B@HS84azT11^+FO?k0VfT$<*#niuR3muhMuXSc(EEfVIvorW?f`n#X^DZ=ye?M zbW9Rro0$?|6Vvjk5@FEM!SfW9@>DdtsnU*tDkJwk9$mJNWw_IqXge^cyQG1j!iPrM znNNY$PXLyiFV)8zD#s*suOxtwc9ppo_}`2R^dMqJG@a&(#;7}bdFrPeJ0;`OnrV{( z4{%CDKGLOu?e^7xw!B8>Wp5aqXV+lJts)pG*VMO&-LzQ!U-cXwlpr!e5(9%y)TIXw6s5aSf0qw6ebG9JQ z!kuQ{Nt2JQWSb`D+KRi?GEp_>Fv{$IoxbbG%V1S1H+@|S%1w|2U+AAoKC7yO<>JQSqiMG2|Jhs(3|^p z;v<_Z6SAL6Ef6%s3gU7`SEd#flbC7SWhtwvXDT*>Yl!r>(0-mLb2o5dIN`FINNNhP zBW5+7a;67GBxF_7#_|h9^>s=Zt*>?Le=c}tuSxM?T2_Dv!w-+0Jkw!R2^@l!x`u73 zx67Ja-Oi*P;0yeJhc2wa^$ZkiWyH^iYFz-)*6w0J8>$W zxGETvHE`;RV*skpOejlB51&9IAjZZS{q5)8_l1w$%BQ`YYJn+{r)_3wvivuvXO5;s z_tH3{VuHroZd4DumQJYICahGH5C{YcZl5$^6j2&;9O+JuSUeHmYoFb5mTzIjqEi9pUa3Jw{b@l(s1Zd(sonC35Byfa^f=X zjdv_f?~{81#E#bZ^UAd+7>K1!Ck#brrom=N*#j)sFVP9u=lmsw=CVVc*(Q&vIispr4@+`hCoGh> zJv{#aFI9`zOAN0RwuR~-5o)Jq?Zo%fkhcE-_Sa{U-5Y-CPB=E>qz_&-kUg1p82)o; zo0Yr}+VKElPhV9WOrhw9(N0RuiAB^tx#KvlFNZ{Nj4KMpYVeG|1#wj+Wq`)9%)%$L z2aZ8vJMtCYAE-Oq+jTUCkbt}i5=?t)cir!&?5i%hL&{Q?q#P#!K9NG5Cr>>g;#n^* zVK~23=sLNX^2KF+Jx^1*klAY##VY2gc6Ozxl~DY|5ugZ}`}t|UOj@s4J^<+x1AqVbLNYtRQ+@4Na7shE@9Qf=uka4&!E{}v%hmm34zyD=ZN}bt)caOl{#nA zDrh>7j^P*{6PEL~If`R)MT6C5y4*BYnu-j2OUbw4@dnb#7-FsX3jC=ZrI; z9X5*{z;fPqt)-VOWr=Zbn#1ubCKOLr^x-zb*o;LGtYSuHUhE%Jzy|N;4KZ=8WWmIU z8+vG6oLZNeDF!3g(yU+5%$-Wb`Wu1duBGEOy+7)=OKa+SSB~M$gD9lLA;St9V^2Lz zG?A3~W<_U?;`gG3y+>y8v%n6N^M z(lI+|ok{5{Ib)dDUh_5=*33pOta@&&!YMI^slux11x^=65*S=E(|{(45UQb8G7)>5 z@cH|jWH#GsY!s|vBw<9YTNOI9stS52&gQK7)(=e`UBixR>bhnwh;^q8kuZ$Wo3iU+ zlVNp}%d&b&sxbIVtL@zBbOdRj@4_p*t*fhTOpPi=1V}!jj8S}To&{!7p@E|x>wl&_ zN6vnc@ND0fI;|YTriOz(;hCO1K1HgZ4DzI)odOnikwg_N##V7~54*y>*J-5OmfXcE z5$i_z%GBk)q;2Y|Z0i+QS+VTDgkarA>c&`l%KWjH`j?TLFJMo<8KSA96zr{4DY{D- zjBblXCgdJU8@AI-%9<)Dl@BB3*HQLz;)+0WCIKAu(eC^s=`)pAOuB`DQ)UIo+1~)g zX=`W3scVG^|VOg&lj~%yYNaQpyvP!b*zGDCscRF8z8>Fi+;ry)t0gH!{}e>g+=#Q|4*DcQEICzmLzCWO-U8iGNEwDxz3d{pH)noC>aEjNe-dZ<~LW;=B=ohf<-;7yrO|6rJY6KmkoqsA_z!I1~ zRaq5%K?ob6tMV+zzhhbc}2Ryf_t+RR&sw|Ie z@vD33H&(qMbpSOC2>tvd0j1ju(~HP0KB{ByuCkQkE~vN(6!dCsxj&C{uNO z_FSEMR-qTI$5 z+>I~HlP(n7B?us+4vLMNL0j6CAp5Gz`ni?8K6N{z7*xG(!e}yfTgZ6bHa8w#!sx4T zJUbN-K`fFjQqydnc9EDYl1;pc6r20Rl?`5~Is0H0BME~Cs;QV-g(Q(ODZ|qDs?SB) zFVg=2R=U}c@(v1_>myU*^;DfE%GhLu=2|LhxF{j3TDYnMqnGmx$7xapkAYN{oL$d$ zQ48BJ2f`^cE|P+zoG9@0f2=(l>u*l5?8}(yqrf4}lvqX~oidGPO2XcXo@uETqDbdE z6MQl;C3e2+dk+Xr&_b23S4RRc^rM!;szOf~>7~z3I+M^Yo8ZvkeG>HJD&x6Yx~B@k z>*34vl&d1oA)QhjM-hriWCa^@mO2k~6Q>JNex^_C+lewT08;f@xUmad1J^}U)U7|P z9=7I~De+HEX+aKKm2($QGfrQlqoZ6djag@rsw$wH{Vneu)dXrsax~PRJhi{PMw1$N zV~FyjT&K8UTD8oWG-vWoCF(!ar$O>oDT3n_ISVu-=@az=hOTM~Tw4hQhLWa=mBU9Y zwj#`o0M`B--b|~4#+P=d3FApF+F>hFQe;gR?^S&c^~u#7KPcpUh18tC(~h6z>=><9 zA&~P9BZuM*G>x`jbuie2s%#|#OJ`=*$Fjt>$WMu7N25==e@d=|szCOd1iDopQFnO1-M;630_fD}1v41j#j+x+*mZ~L2 zmMLbsLYqsY1G?7HJ^4}_C~?q2u>=ff^`za(7EX|G_f>M_Tw4LJ%GrY%&oq^pmoj2Z zF$^0sVl_`*BU3m@C&cQ)9q%W)v9;T?2Jj*$yh&cUh5UUeRbr&H*b0CI(7%&6NsIY)koQOYQmCR zA7JXAeI)aC30;?Pid;tvr<)AK>Co3iYgMC~L91O|&@gDpAy^yu@e_f4=n6)aPTDfQ zv+xiUG@XKqzI5uAW2>HAcQoQyjwgy3{F*6oS^jC&wrwop#1#qMh`$Dw*3LMnD%5Bb za-QhO#L{yubq_E-Np+=;n+mL!3_@5MRE`3$ouEr3mP;RMoq22f6uR1zsDlK4qOWxN z)(8p4j!t0ee^T?`OLCSSn6W8vjDwSFBg;8ID+*d#dU{{dyvsaG7~Lwj%o|84em+!0-dgKuDslo(8K+zuDCN4$HlmLg>i!7T^>NLLMU(L=^^+J; zPNlXFATrqh0EL(n?BZ){1ou7gR8A2F4_z5$$(3NKO-#r0CSJ`HvQ)}q!EmfSQm#xo zjSV|iRCz)pJas5N$00x}bT;90Gi;!fB>SVF(4by;NEjsa9B4MG!ElV{M>64gzMn7R z6#_b4n3xj0(Zdj1&m3B~V8Cv-+8+*4{orK=9tB!LldCK1aQu5Hz}TG*JRyWZSsTKez1V%_W^c3K$C)>|o09dVqEg#aa`4B2 zPegmD8=7Vbdf`P&8WFNMnq>43>ydJvf6rM9HT1%-2gteRyZM(*aEz;mA;qDsjuA^p zm5(Zw6!MXIByAMy$+q9K+v^Ou8jP#>r`|{5KvZuUE-%zlFpqeyKD9cllKo=(hr=-H z{KJD!k@9Sm86ziQ69kBP)fB5!IH;&n7E-LTs*~=6v6Q2gB#V(TTh+Cf3sV_@6Efu`)UdnA3N&mO;88|bi~K%GwY8l7aC9!OiW@2tjMF^ z=1e(r-5Dp&jUT64FGf%6AFY&mI{~ky#&DV;K?Y8us;xL(EYvY37f~JBWR2RSPOId| ztf*0#S&jI{cWnK#8Fj>IkTH$CMJ;0C4?0kjh^6OTvZghNrZ3a(n&uo%o~H>{#PaSL zSD9+?id;4}{TjN)u}qaeEMD&ml8nC)4+1wgw&if*a92S18Nl(OJ)NS`pAHm<=+CIk zzPbMZBlJVmMlpuqU1!WR)i^E{k7S%sVwiP(JdLrZf;!JNB57DMv?X_=TpNpX*7~dAS7Vm_(aq&Q(BZUbpE^bx=l8kz4wS7Z+ zfyJ{uCJP2*$`~eEtm)-lW*_nyQ_ZERpis=C^bI7jZ8imh3$fImLz}^!IdZkxtU^+J zCNLw;g&Ad|j3-#xwE593gE_7_6m>6-Qp8qon52@fvJ+7AlEggFs1d4=*0A>Qm$+%P zIE4jIM0^h#QbL}63JVd@G_8v|EVxw7T_Ua=v!xa^;%R0Z(HZ=x(5r3|Ey3HM(}Ydf zIk6`YCO&7%gbv*ZCSY;&;Ztj~ZWE40hhVruO_y+7CZI_`Dd3Vif0dO5(b>X{_VFg? zi&2ui+dbon3f0$$KZO;p<+qN65`ZaB)9l-ZV6}O_E@3fJ?#1MV8n+aRPc}A2K33)o zebU-)*W+Ps7H1YNjSDI;2G5LnQNB^!f}nAx+{K!+MhBe^UQ8k=+ZB*hVc#Os)`CiG zl_DWu^o5Ap1&+LS+ji~WwhpCoqlZS_m0Rnjw&`q$PXkxc=+B~k7Ra3qz_Ye<&lqg4 zo2#eHl|vuFqr+*IqMjro2y3e9A&|5#r5pod;l0t!pWHpwYTD2!q?N#uM#_fsT-nzh zQc_4v^vu`))ve4rvn?86)a)>Xnl({5guBY_HS))M4-?M2HF(ahfOtmQ<|(#V$7%Re zek+zR+Mzx)A&2BqRtk7(Ov>s3v3;y=*FGJ%g5t@dvQ{LMR&10f_Io-j4A>SMNAk%I z9~5xRiYh}?N~9OFT>)ca-W=O(p$gJaB5Av*am;XzaHV5SRV-5Bv@xv$M-n6zvx#OQ zn1H)jvAZ>_d^oZG@!(P+IMtd`P)gKe0|%@q|QFXk>Qk$zT6rHUl8?XdyFEwu6F zixRK6z^&^^0yxyHLjlYy0BA9FXjn5!A!wN1$_OGgR@UCzai^L(zV9ll2*(izW;N9= z0gX*1B~+g;zFp9Hm5hobP@slku-^N09r&a2>q<;0O*ZRgYlU#;YA~L@o}n75t!EC~ zD&&qmuo9nTV^#9VHu2+4xTGp)15Si8utC|zrq;)dNhi$5S@Q825^7V0#Ln9Wn_tYC zu6#)X{An#ICQ8N=!=|aIsYwDy;n!1&A#XB#Jk^CInAg)88*w1sB_&1vzAIeBf$LW8 zpsO4@DYl@~uroy**kUE#qP{9qAVt2hovdze?I)Trd_x1(#*qZ5C~5?btSgFCQZ-YW zIp(L7>=DmN3rFUwsA63uVyj{-Epy{x#}Ma>3bBoHp|fX3HIlMQUF}^}NmV^dg@k;q zDjmoH`$5p5YxcFiJW5xm_;pPwMZy=1i5Pj!MUxQ2BYCQ7C#}QMLa@(EH4JYv$UfHb z48#kaMb9qU(jbs}xfGJ=3QobtXIE8Yixz_rnpj}N+G=Qy=;|vV0;St_-N7FI?}mV$ zQHNBbaDg>ovj-97wkg_bnIMjtR}I6XoxepCn1xIsR?U0Lp+&*=4m{(kQShuyGQ&kN zDKKj#O$Wiq=T02_C;Bx5;NIH%ttc4|0CLqQ!!sxT<&L%5*~Bpv<+9dtFn>);}4)a%p-3~9sx7z{ntf;nj`YQ$-W#U((x(={BW zWA_V6NiA!eA-5P@w<;o?L%r&V8)FKG6=PyD5d>4iH8`?Fl_9@Ir%@-Fs2A)YrYfC6YduS21_XFeKn^K5L#{*kwr8s1hP0cPbQ2?fs z74xCJr+Rhr*5rX@QEe^8g9T(6s<|ZqY&7vhfYeevPl!&FEX#P?%=2szTniuJ#R^~{ zP&U;W2}(@jc=cB*=Yf+|n9E?)(^5tgO<7XGb!!`I+m5)2{u;YOz(6=hzo*iju<+E< zQ&h=G96vCD0@gS?RkSy_Ah9+bxwHV8*M#&_M1`V8CYg+LQ`JvH6>33GBl+(I9ASWK z_AhSVR};8`Q~?qx@{(aQvzVId90z09-m-c8)mMTErVbR7a1!^>TgR16i)mCQWYrow zB_ap1o&G|4Y8m5-89rhr4BOl?kk)e=3^u6RPPabZ zH1*I%B_?ShK`PRoJtt*6Q)P?_cpjd`7mV`?!(IqN9Rvr;xI6 z;xV2SP0qC!KvnuE)>-e#)Xn~F_vW~ zT^+A+?dO+t=^YhBp+uN8;FOtMPZUQl$sXVXR;*DL_id-YEy?#sf#nOY6>NdlYfid^ z!c9Cpm;^DIp}?U@A~!V;9C5OMMaqH=*!b#x{xGz?$7O9-4XGr=>DYQJq|FA=$4!UH zOsV=k(lR80Q%hM^z#ophPA#8S!G$!ZiUfqmUrlhV&Uke%%KYOs45Rd^P|sv4#M36;nPS9|Y8K%sp)9K*0DH%Axb3GEp_bAP7}u85iAX9ieZ+n>kK}VBx0O>l zhAEg4BFh*A3z4?Xa(!&yC014Pn39q7`BnCfetTg<6dmNUKHCn`qOwf60>@xMG78!@dmaR;N_PZCsPF+&?h z5z7_%nI;BBecgysNCWEn_?F?AP6oE8Av`wLR>P|*DW;Ulj&Et>jypBL)D;XqyK`zn zM#E2jTU@9S&s}4~;frsV@{05dLdy&cK_=ycd-|Pw@{^?Pilc2#2M{{wSm|#?dW+Vq z!!}y!R!*tS_|z(K8k{c;lAbRn1^0Kn^|HeXi})4?;lSR1y!l>O*j>|suumD2=m9?J z8SakcW?8ktMYCGt5=02~JZkBB5%_=BWR8?m$<$te<@~|d2dKm`j2>*$j#E^kqccZN zk}NfG#Tf`J#UpYrd*4oBPiuShjN39j!+}f{USpNVt)YRGEHCAScFsY@OAD4p`H53xVNI+mIST z9Q83ZL$7vcJGW|hTXmSKR93KOZV^u)Byln-qrF`8{krL&x^I-Ly4jR7=S}j(1}PqB zpCnh|@nRLVlHw3y&{K#gy!DQ$)si^jEo-YeV5Y#9*D>OjXIaeFPDX@%t3d6-K0@bfUG|@@9E*bm2x(*GlGo&t5 zTr9hWBch15K?hjm_xlLo{(b(0$f!+JDu`qecQ^)o#cZB%o# za>r3mp+%2zF_qnJZTpbriV8tiDg^An@D!b&{N>Kr!h5a)Kt^fQco#JMDV#?!GUR(`j7!pd6ysM$Wogs>zqL0G@jPz zesM!!_kQ{{ooMM_L~$Om^am#A-1pSE8ykA>?cq`XazD z<`}VQ#ImI&v4+{94beeZD(R>hSEG1#cFVb`vv*lKqZVOIPcY&XbXe6@ejQIEQq(pd zOGu5ee7c5;nPf?0(p78tkRG~lAOr@%0x`#pG~K_5k~q;>^>BKB)Za=tE@SllisvYL zozpyGnfjS67A1^Dis5t=G<71XddRExGe+-DzFehthZj@BUD~usxhZ|I3fb31xy#NL z2tn}*tlp*7rg)>pdZp7ovuA8{RnTE^^)gC)TE3$Yo;qY%XP*<@VydhNSqR#E4^N&by83$?TqwAUSzFq2{L$g&bPR9EE)f|tB;5Zq;=<%#O7)PVV zXk~hE%FKzro_Oc*xFG{~;7c#HQ#WE+N>pSKtB1CQj-y^iD1P}X4?UMj3B8pNn*S|}LO)g@AZ$sA5Lv>UbqsT?o7$xC^NT2cdZ{UPg0 zEUp*%Jt|L%Dva~KO_G9-EYRXTPR_WFF-bx&EH4+Os;rVaF$*8`Na12cNQVCaqW}fu zv2ByEltK}=95~c(aeU^l5>j!DcF@u3SJgHh(X2Z&;Q2c}^=mctx$$f$WDJ>2Sr-x^)u9b zUod3+-<^6roO&D5+{kfUzg?R%mSM$dv20QrnkQ3Ct2FEdOmf;tq>TVl05=yLgmaW8 z#V5WK8vMCR{W*gj~)@!X$_bV6RSWPCiqh|uM{Y-0G$bab&OBBr@% zh>Oo=R&O;>6WEh(1P*DKIK3>ebVk!gO1Evb=_>Iajg)siFm>b89)G5*mXp&wG2@gt zbLUvF9Q}mPG&qeAm{PmYMO@7px7bh;-UJT(73A6a%0e7LY>$-|-dnn#5>+CLr>YLW zW*jFo=Ug+>Po&(gnL93^mlcwmAX7_Oh0L%OWqQ%4od7qDVFa&%15PH+?yv61?v$jX zj?8_u{iAvitwbpB6IF+dWxrB=J#4&~{#C2PF#O+9^Yl4;0giB{I_P4ant7U@%#%9E zH=$h(fh6z*cPY??xE5E4#ELV@a|b)X7NOnwMHIBGtYMm!2)^q@nM<2$$m1c|t zoobaHnjF`py;;Fy#j7Rhjx7ZEEi|EU3Tkz)@?laKin-`aixVZR>EJwg4}W&+B5@aD>$Psh4*oHBHtpR&dUg60Te#<0$!bxMI}q{ZY~g_>9z zC@KW5k!%HBK(Y4ryZBoHLfpEXt6AjS&PeuC0(Bxu*@E76fJve#0u{+8wb zsm&zqUx3oXh~rprq^ekgND?U4oO!l870X*fMVS_D7t}#MI32?V@+3v!HQ8yBeb(sR?Fr=$5;15PNE0B{m~=}Z zOIOldse(6La)9al3ssZyPA`K})S;@V!$VTEvl^IH9!1znfhCCTeg!Vs(%~yn+pvH( z(3aiC^gqqVlNG@YGSjmKFwgTb%wpdR=yYw$M}_!M zLO81rg)Ty%V3X@b%Qy6^HZCidDQdd)j8J53Dw7y|yO*+M6tYr$(5HCgm5-Y8PXSqV z1b_${>(47LLa_=au8W{NIw(fTSz{)8f62LnH1$KRP~tfzCW8*7-pfSr#?gxg`BsoQ zLTtMS)55EknzC(9t!GMcGsD?ib?tbiF;z9zFT`ilt#)M1`4bY#nVL+HB%Ed3ZX8-_ zY3gf{tVC#_C(E*;x+#6vH!H6q*E>d8bgJ6WyKaAoLC4CTDh;MdI=Ui=hCQ8qJay+X z;doVbeSQ&=s%j#~aQ>xIVlysf!D^7+c%zC@?oy4TbB`X}KHdJ)?aGwH?UWPuB#b=| zO)&Ev@W5g*#xt2Z57K_2 zbrUT0!_$REHH2n(aBOoRP|{RKgyswlA)@kQmKkAa_9VY+$zyHa93{=O#;lvr^{`xy zB9LD^ZIqIQczU{MSfuqY=|8BLb|=%!XQJ(ZRMw?G^(Q!ExQw+JPAn;vSwnfqo=B*#IYwr zDeJ^B%C$=9%Fx6>A%u!Xovdxd?LVbz*C2INjeOjAkZ>SUF0TQ6mymJpqjdtdhXccC zs1p#waEca=8v5F)B=X~DT8y6i)Kg zwp2<)(eBEJ3zN^%Iyg{CKVpJv7Yp zGI>&fs{1Apj^}QmaGLWfw{qV{h%013FglGo%xXAD6$7X}E1|&pja4>U%?XO&P$8+o zb4D3UG&Iwii8nShOx;nkRycFOT*Od6h^mjs@kn**E&GqA5aBgt_2dHK$4x}NZQ|}bhmMSOI8<7T{xE% ze`ME);s`xdKwAD@kTxw?07t%w-gfj|f^=`GeM!g*h{>I)^30#|P_B)>S_sj?Z)Bm7 z;{^Gma4S$dC|hzY+4kZ4 zryR@_#llJksNyiIEc8FuOxKwGO=3B^zcJu6Ifp(_2Wc^Qot(r>qkd zG1h^qx|U~ExEhci`fk~`%ai+3;+s~@kRuVYg?qD~_xXWZ&=3`Y>x#MzW|OLQ`Ev%! z7^M~iCU3%U(}rSLmJ;ON7#GgUPv6`e6GYM(V?kO*tCREt}O(Zvl-eggQ!Em<`^RH=Dm zk_eq#mhs{9%TsN4YQ${EVF2JvY*c$_o#g_uRe_;9tv-dbP7#mh>Wrt>oFk$>pfhD$ z`2PS&=fWuQEK;$Sj-~73f?P^>ojl5qK1kvsPolaf&L4=9aE9;!)%jv5@_x zDy+W2%ze#rqn6!<_39f&2NCN-yG5<$RHKa1F6sXOKryV>TP9nj!=a_asIeG)j8g`Z zd{+=SY{WdZX-|hN(oGdOxqYJRZFnMN3%jKEB)GMw?TwvAi7phj8YGAS;n!Awnf(o9 zJg1!fH*{|Y>tkYBSFFPm$Ey0#91+DdO+!x`wKXmr@ADE#QRl>nHfnozHa}x<1FBQL zNm6)^(vw{8IuUG=1|-nM)(^vL)8|CGX^Zs&x73z;e5nG(Uyb3iU@^=5tU27Z64TW* zb0Ti{N!xMS+%>%2jJ*w%_arC{pyGWgn>WNi{R z##qc5-wDPkx=oeqWs?-FimhoHNUP?CfVoks zjuaNoca>LY?%PxxMAg80GxXipyqnS-bEDl?ti!VgO~Bf&9+&3Tfh!=YimIwvCKXim z1K%vfvXiBby7F7h?iyZTUAIvUuO5h?mRQoQ+tRbB*Z$9kEs0~Z`BO`AI;S6( zrACS6nyAAZ^s>M%01huA`0P)ZlI`&=&9sFm5C|uL(6!uqS8=PSflJ zUy9TmP{08dN(Mq`p#)Jovna17q}({54iGZXBjfk6kvc;V1_>7?B8CLJsJ_0h~~EE1+V znAsV-*syPp-NsVT)CdwyD?;>Y3NTJHz;#l9L?ns6W;G2fAV^@Hh&Qkerk`INQ%9p7 zt`wgF0>~SGbxgx)q1t7{+_eokK4PqF7|uqPzP-GBd3H)t1|BMSl_^@u^ysND;qgyg z)fC26l3j(`S%{5QcCcHWZf#&bo;lql4-v+bx{}+N0gtrTQNv9YWc5{)G<48L*IC+` zCvSI6sviFFJ((?w53gw&%) zxudCXELn$z)enZ8ZvvemAPJ@x$`u9*j@hS*Y%Nv@qLJsJEH@Y)W-NCP0ssWxkFMHe z3@YgGgrwqn`u_k5b;B?@C|t!x zD7>aoO@~J^qluy>*NCY5L2iWH_XP80L>wRryXpy8BagdpFDjjB}h9=zAGF3=W zhKv?0RW^;H*0=HFX^=oMTT5EWGb4mozpOJ4^mck=5QbS~ikz4fGZq5l-WI-ucldIu zCL@J0WHl=QWbvlt#Ocibd|0hK?MO;~mXf3cay{)P?k;uOevLWe;4puEJ{^OgNya9f zBE@lvl^#W96!AD|CKE*Q-BjNCfvM$^0UCj(KQgcs0RRk0h^Yx#i^#Fk36_!?V%~4c zG;FW9?j+oke`T%B5=xAiihI1%h)^at4hEI9d2#CLVS2n$F+?pqG0Oh{VM~ja_X!^z zc*>mzX_`(}w489KC~^)TD!b2&8b_B;I+a{R2J3}g+MRgGb5w1m8Wu#75=f^Lk#IU? zK6OOE23XzF+azE^Xu><#TwdIHSE|6qHE1y*Q9Sfhy%rOcV#4ubUrr>(V4j*hBD3eB zikg}t`=>I%$tRt?M%5wJl#pydJT16&{_!CY5scv^o)Bjayeak)*X4?j_ep^V4`nmY zO@_NkOAIk8$g1M1og{6h!rOwQUjEzI@#5-=kBIt3AX11?z-*3;Yld@vK zczM=HE3ta2wxw1T3gJ`a>Kacs%xm@*)9d4@Z1Jk7CQ3#%DWJv7Ry1ZwXDzm#MsTR$ z+QY!xzPxcrNaIenl9=loKMIEi6^YhvbA>}L&AxasD=QT@VegN2->-g9Bw%%6wv(-T zgef~INfrTDQ92})2~SH%1d%D&0)jp5RBQBc^<6i#ku_mystKn*Fy*oF*uQJtTY+=!=F@p8uU#^L zd?Rk()5@~b<%=SwGN4UzyTr`fV<-1R1{(I`YkU*7s>TRTp@`#B*CbO%BvmO;jW|gm zrkFScE`@Fw+T*bR4LHi5XcZa8l_|Y|0o7950j-P6S@BnP5t${Po?V2ktW^niUsV@kmPlbHBSkwSH z2&P^Ds#62qQR=DaqJ}>zRFmpphzh(jdA%YpbE~Obh2%KAu16RvC{Wu8(8??jHo7*IMR}u68dk_ZmD%& zGMul|Y>8Ezvjpmu5@ESye8Rwmwj`AjAt@3DBo|@_1KIZQ*yYUkw=RSuua8h1Xi>WR zr+u0{TefRjy0&?qioBms{(?G<(43coXNtP~Lj&syOHld44(aY;powL}DcvY zvUy;Ge&CCaU~g-i_A}eV-2vRE1kMIJeCwb+l6HQag1|Q6msN_o0qCTje(AsBH;v1J z;CX+g)Dh+2?{D$>DWyoxV~k_d>|wfRsR6jj2Ego<0)mV%b$98iuC6(b#oG@$$c2bGhI93 zG}tsXGOaB`R-_vlVU|KbGkvAkjEuVUxEoq=R?|BxO1NnW)vE#3LAGf8ApJ#jnq0@! ztRp66UanVQM+?R4x^ z);xileJlE4>K9S^y@b=?T}$eIBZJe@a%1gpXQX1#DJR&c?7)V$pGl3riM@eO}yf*RtDO=U9 zyY);}`gNkTxG~iYsN1Ya~q~jfX%ouKR>W@LQr%!Rboz%=56v@3r$+1z?AZ$i(u(FDp zig`=8c{VxT6fja(TZ1b%m`0FPu(Xh7k=-<<>QZ(@Q3muclC!pb&b8RRe9+-^xDEk} zVR`om!?5}5f2~Ed#hJ49GlyYoON?~(zC2b;69AsBqM`v!9Z51w1geEmqAH+} z+FO8|gf!#&OKJutWJLQYM9OrakVi!e9VYb?)Q+$|P|dj~t`#{pi!Nihj4vj2!7C*g zxUH{6IC!S{sH2W6t3s_OnJ2&YcPL`z)uCrnR)PtSPdcj)1&~3)steWLYt5NEuQAh8 zNrl7nl`>Z2G<5lXyAP=nVpXjyk<>0|}Nt`-p54;wNO~_Cf znh||9^%Fb#e(JVe$#}(PO6ZO~i(r`kZKlr|t1af5ynhu;c5-2*nvy?~`H3n1NEpmg zFbeluox_36=UT3s)RMI}R0IG7M%-xMBVb#sAyAXHj#or$x|!9Ey!YjJniDq2Y(mtT#l)gmKNrPe) zRGCtmtMqcvQbk2kNRJ6&yvZ1IxtrRya_=cis5-$1tEb}>nO95Lh@XuG_+LV^=ca6p ziQ?2fZ|Y`e$=H;XnAJZ?awbrVF=7?<^A9CtRhFsigjFT3cLm$syKu_gq<0Byx2eE3 z%nxNFX|t@VU{zS>PONpa)JIjYexl@TOE~4~3=QJ0tNy5Abv5xYQMJK47(Vy${z5|u>R$r&b zCByJc@zlKegIC3f)m2GMYcQc54KlL4K*Af&7WeINp@1oqmtDF2;z8N8`kGRgT0%kl zl|U{zl)XxHj~LClGES>zj4v!=xT@owDyP9{Dx$>kI;>i&ZAD{2R}^&t5=TCCt?q%Z zZFo_gZma1fNhF~|WZ^;#yER~pP$boi`cw3g>bIs;ubLq079pFes_E*fvCO4gNYPOI zrD97ZI{B5mO;edAc_ne;!ESACD$4g^47I5xL4A8DzV|o|JBSb|lc$H*r%!s_lJW*@ z$oQ>ZL&`XSO1hl642FhWD;Rc3Ql6kFi)yLWScXz9z=+FpTU+EMUMA*n+3QD7epS|r zkU%lVY}L%tbbAKE>hMV8qK`4+*JXKpL~J}M1@6)gGzs+QCwg=0&sT8nlV`4j&y*VY_$@zyDP1Y&4AL}Ce9sQ z!u2TPM?fD6GO|!)7&J9?3!>Cm2TyY?Z30nc-;xy6n4CFZ3#A@S4IJQ~Em=#lp^=$I z>@V!#ZFZqj53-HW`SYaqSLseSlrpYW#cA=%Om_*Zo~Ew>>I@iVIK|<40C_-$znuy| z!9hgx70C`3*H8%8iWsyAM}*<;q{_MqR?kW?_=iXGZf3!8c(d$ZH-csiFy)MD3=WE$ zY>87I*-b($Rs^vpL2qvrn{=PJ1Jl>%NyW;OfEBKvP_kx0^cmOuN1=K9CZ40@Jm~a! zhb?tO5J;v!%I8;(2&yQhSBObVtZyK(U@z9nFv(&~D{ZFK+=>7t81uh6NEInaGc&nSpX&Ghr4Lg_fJhou1w%TyZ zCA>EaSaktNa84UUaiXl^%`GcZ29;H(bobJKp~|XE`C8C|WTi5gNcCX)vWR|g-~4zMx$`H;tjZt{{Zs0 zdQ##+jfC^EsO7Sg2W-->3h3wIebJ2DonR4Qb@ezEN@VIalB*TQluz<;w2J%XmK1or z))E7I+!fk%1*By*j}xIF;li9vv)fQO(pT0sXQga`>FW>98AC1Q`Bty_XHT-8ZNji~ zQ%{A}$%;ojQ~A5;+Mo17Nc`7E1e-A?z^{KRw7tt7aklkZ)?39MuUVrF_t` z1$FZpAndv_oA=~5o@z?FrqrXP;fBf?zpZ8fg(z(l@1FXZpE`St*GH3YSAk(-Y&MS( z%v4m7RMmXNFjF|EmPuJALRc#YYYQ8LYcFptO0f6`kX`9T(^@NRvm7Ou`M_4eN zc8fJpU&6zc^VvDnQrsz|~&?CrM1_c!HO@0oW}RRna&29r1WwL*>| zSU};|RvUp~*@vt>NP`*1@(gQTo-*ZK1|^bd>tTW%CTa{fbal;AiXwqxFsa($629HS z%v@TzeQ=WNX(tiY5PlnITW2zu05CubQpEJqb2{~BEkWsT zE9A`OfM*&U+nO+?z_OQ7Gd(O&<^KSYtj#sG??RLLiq#@Dn*RWx3;?ie5n-MCqq~n~ z%0duR2}!^igAtELJ0=WPJZAZ`FEpi;NGdq`@Sux1WK5^hyxG#;uV5abDmt?VOXZss z$ax13!QLt8sHjv|&nnDVf1|@wI-+~U%Ua9c!Cc|Hhnp!{h<9cuiRTJkZ)a_&DJhhU zW6FqEt4~$g{{YlZY|7n0%osIh37n}bsqrkkjbWhEw8+P2>05gjbT%kk?&Pn!*&Ftm zXky_6l@MoyP)|3^({k}~3mOt5tQuH!Yd`fT8R{-6jAEIrkdRLLtj4$7kT@Zg#2!Dxp60A{EgB25w^)mJJfP?`5o-PX+ENs)6NQ?9{rT(Q)Q zu~5`lrA)a#x{jivT7afu2!Oa~TJpdqfIsfmCLVs!*b*pLW3_3n7?VPLUvp6OyonT$QOnVg+A_dv3up%c_A zVh*HrTAvZ?22;-&Um1q87q7!`?0+<6%pIjf#_$@PIjHJtqe^l-)u56@XN-3PaQi%i zfJ?1-hmIf~xx{(K%9k2i^rMAVA5i-1m%63azNUJi=`7J>Idi6v$Dg{3fMhzjALS={ zqemt&Ngf~>B>9(!%Q7nayR3EK*pls{Z509Iu$(nV0(cQY%g@@}Y6en~uvMV+&(?Me z>W2}clQ(*Li=|NI$f)ZuuD11Zsv(9^S9u>YVTIzcjjADd*l8+Io1gBP>^nRJ#%^ zs2Lb(?cfVPyIa>`=G_ZP9uh|ZLsSmwgrI{vU>}VVPfxzIvu~zOykXGfD!PoZml2K+ z)GE3MiEC>kE@eoPYN+M%5EdyarGr}JUxWSB-KCbxumg(Y7y(h%_2y+RB_d276>B+9 z*2V{$@Jvsu(t26`x>0o3G~qNk9~Z%XPYkBPH)3$&QbmV3jsE~iTI(RFkPYVJP&DDa zrIu*j^X>Po=_r#tcmqv6acgkG2uh}4dC~jorh3X>h<`+~onKNbsQPJ_FzR}|$B*=@ z7t99_#mS`2-7GIs-cg4!-VW+E|-f$BuRi`apM$!WY&hTCTm)D&3!Ax z^DaFuNWcaKgf(4u7yVp%!(Sa8JOsIlnd(Cs3W60&5^t`&jyH=dx@F}hWOz;oWaC11 zZ*7wa0M8#vwO_*@t~f_Ra@Ke1U1mkWu`Ka{C-r9{WnQP!JXwPtqRbU(NMsdoMvy}t zCAU1b^BOYCdmc$vne$BLsqfthZ5WaOAdoh6?>O0G13p9>8J{Qb-}hVe{jip@=ja6gXJgFiE$)uDl9o``}qpR@`ZG5H{dw z_RlCNM5xMv#Ml4Vt}1rS*nwp;Vf5)9coO0lq;3W&;CU{5kg;W6CU3zO~d$C0F~ zY?`yF17{tAHq^?TsZ9+qTFPM@6OxlsQB?WlFZiy9L47#eh-yrUr%BfWV2+w$tHQG7 zB{`&;j%Qs-q^ND~x&xtml{W3+ZZ_i5Qd6TDtk_&N8>wkA zV9H3A$6bfpZBHFYTEH4W6v})E#X$Zv{;^l2GJLKVJ||L$t5s7}%MwW*?aglh1;N+9 zJgfwWuQ=&kNR@4#dTJU~XE++BphzRY!1-mPS&25yfd3z|rB< zDRuIqkQk15;4CjT@Q;Is=6hSYU=B$QNjUZrEqcJE^6cprH zbr;?}+o&Ce|kgq$bt*xVTi@bTkcnf0W)(v*ai1mlVMQ-Wb&GF2K_nh4k=O7eM+ zFK^v}4UWD%((aPp5s9jyX)vRPJUG*>E(M2I#HOx=U8iFahM6Qq3*x7ETNWH``_M>) z&2Lyt!H|9R-CkH|Wd8s`zyxmbi9{wrD{MLojV<(d;x*qm0e~sj916tFk+2WOohh=M zl@P-nP8St4?whAkGLLi-Ym3|#ww$uA<^%}ptv26!K~S6z8d0VN78y$=so__hFAGk- zVL%OuJG@7Qxn*7*fK_yNV@k2|;ZSNRvh=R6HZ-+GNEGHq4#)x9VSiy9OuWeI`!N+& z6f0rIaQ0N_@LU>L4AId~8*jgvO%!2Vf){ZtJL}3XFkX+duC&lG&`+|mRAtO6SsqAe zs~(~>06CBZvWwo)=s4Q1-B6t%3UO;zvNoE4m^C7KH4l}(Zk*Y7;%?IJA&tnnu)XYf zfIo0B00L<%pS0yn5DsCe|m0pq=swkr`D|r&bBda#y zb9Mnrfp3Q%Q?BftzRIhLP8}knv^X#^icv>I(E}gK4>hcScN&ET-odXMTdplwlTN;% zzY)T^mK-?|s$|9(LpIyNO$sQm1Y3AkVQ+7KU3R1yQj|wUG^7j!Vrz;^gwsLi$zLo~ z&@tNVpD;?WxL{iMHwVCRu)>Hq8gPW8jDhP}q0SgRBtA_>CnWT-DQM;LH%R5RHY$eO zeW3E8YeHnrU%yL&K-tfwaKdv=2~jlv04XJ1R8yC05aYdS3qrHNQ6JlcaQfwY6QKqRo! zUQxgXHKzhjDmy)9l(6AhdTKU>sG4G-ouM(ZH?wHDxcd2W<^o0(s2~tTK{bL*GX|rY zNnyi=8%}KjmucItyh&!;W39FC%J+(%s@$b21_zyU^00UUs@2N$`vj#RWm3US?7K@b z?X7_JbIQ8ovMP>3R3b<<$r6&QY?N^pmM{Y%Z(zzr{r$^u2e;eA=ya2}jchFnND&cP z;lhNnOC*%dG|CKVQWP=~ax4eB0}k5q>s~c|9a!+LH1xHN6=gJdL$WvM9jZzr5Ze$t`8iSu0aZvI#cl5HZMWCy<8i4|TL_FJ)|+C>@^7eP z1IaW;W4FwJ6-XxR4|{3bm0Ln*rkQP}turE?V8NDxm_;Ti-z*0AYkrr^wZ)6rSa;*; zLPi{EK(|_Op9!fj;nkllQz@$}xRVUETC|K!#lqg#8rz%E09KevwhC>jrkYVXr-W2h z03%syiH##S_s9di?PJd^qZk-9X9*L?rl~3MDH2%gVx>p5#0xB{3x9|a+u#QoOKXjy zm~f?EeNLvWndXj$I+$fNk*pK4mXaq8?Z0EEEQdh^9u>A;gB@O#BN3_#<-pj=&@=n7 z#T>=VRP zpCYxQi225rnp>f$YH+035D9MNk$c}xX)*w(QsE;JhfPB(BSw6TNq$%X9Z68?_wYZX zAm7+Ir8!B#1j=?+c9uJ{#ZffTz&5Xz7?#|317{@oolhHW!bLt~5&?l$bNF@i-_%|& zjOR?j(Tpnz!*Z@aLpEE@T}bK^)ziUsh`LeX-zg+})!A46inGqh?IOM}*=t zRmtTq=&9<|)rf}IHVyzJUqX3@kFeoigus;jy&g#kH|lh-C0SBd`sReHCP zF*)9;jtM4Vp{j~HaAs0(PeiKcYB>k=pxiB(T#jzq3ZYIB?5`;;5rClQ)MgW&b5BfS z!TO)-FCpWY=rYje`rONtk*U9x%GQ=ROt_QOMcqNc)r|t{3>6dAB7`Z&4la# zdC(o|3$9pyrf!n_&O-G=gl7zA6~bv3EoD3t3#Sp$GQlC1t~WI`GDMqpZAqnawy?eU zvn9#J)yMG!1L8EFG|mvyP$N`fTCL|#T^_6bKxeE!F5>-JrO*8|$`y4rl$f4*qoSpu zd_!XaAc`p2>I_A{Mj1%Ivx2nlYY$u%w5=X|dZL2~-Xhy@@@LAd`|$nE*bPTtI*T4# z$oZ!{RNRL(~N+wnLx#Nt@6H$>Y za!45d=wornuHC|>f=PolAiHj$xlJkY{a`KN*;Yqbh$hZ7;&eFb6t2Cr{)6`MqkEFN<%4Ei4+Wsiy*kAqpBSZS zDDdepYOszr257v-hsuggbV(dv!uH{DQ<89{ff0@rMKe}v%$*p_eOSx6C)1{4&V3Wd z>4#Ks=ZP>J9N?AV-7#1pix#P;XEur|fV(w2h`2gBn^#|PCs;|@iLJPMq$tJ_I8hYz zSL+t0o2dOvrp);_9jVJ0<`X_Ehhn%^7l#U5Dp;l@&sp-4Z`%F0BEGuX+VF9_xnqcB z3nLyl$vqSf$?pQ4Qf7n8rv6{lcETgU!$Fs8@ewU^dC6Hc zF3|63H#|&KqidkzCB?%HlAS6T89QUDmo1evwZ~DQm(%B|3HpcCo~mW2vNk#}%ZP_&F$6scGf__Boc6*k6I|Qbcd89 z14AFIY%Y^N^|zy$wKC+jxraSqv*7gII?S|>0IHu3r!|ae<)>E$WdN|=MbD2DWOV;w}zT`z>k9!kS-*%oRlZ;_L2 zL&-R_rYe?b-Ywc$y`t(e@pSdGttB6JGdnSZ?TSa3ILci@)ryA-8vg(c4^`bR>au3I zEAsAYrR#leCyQo0&eLI&lGs&!SIk2^F`#wwBWTflwbXaDxE|hn8;f{Oh(rj+jWRpr zzDZZzR$G$#lj|1%%vjcA!!p~|^tOXC;#g)gf!4&0i&kKB%4Dagqn?9!HZ5>~+%ALy z4+GAX1OS{&bkXo(J?7dUj(5&AIP>8+K0?Oj`T3*7OxdogRG|^i2L1adJAg(i907jz z;sv_o{kthr8AMFgKK>#;rae$}@2Wj6!f~Sn zhu6m>RrQrL?u|BsCGneakJEcDziYum%GfS>e=am#J&j9B&A~Df(}cnX76 zm|NF@&)^#d85{dBxK|xuDK{>_oTx9w4 zJg~>LB$UQuL<~mu0CqOwA*hT<5lSTkWSRo{(7i&f${5x;hGQ67{{S|^3{%BR6mf~_ z{MC%Od1PcUN{zG*>)z*pP4KozaSkZ(+d@o*;fhBJB{)Vwh*sd$5a)@p1DSAHuqvo= z$*Jn7=%%HSNQImjnM-aVTv&?@IKL*iN}3vQG@CIh1nS#BucTfWM+wEexcs@X2xpGz1jtuwL@b~-;&~!vfxS946on167rg+98$qB$ z4(X3o^N*_@s%H+M)zI`yGv)QbsPNisDgOYNVL0;A$4lqJl20u&R4h`>43J1vHsC?H zT~99-q_BWhs7BF+79l7uVh+l^evI^29mqLdFnl+uPzeNeIQ<@Bsls4%rI!;Wq*O4} zMje-NKKFlb34(P3L8AuLoj}CUk&krWAH}e_aEgrdn9O*La#7XSPe}zc%(5qyBy6c0 zjaSFpF9FCV<`;v6!fwQ>L~-WBZ)St3n3tbpx}RXbCzwE3S|djo|*F5 zTg};0+dB*j7kVb09rXFr+|%l73+eytofVaIj8p2AyhlV{A1a|oWy+6b_(a`l@>Ib8}Pw3uD$iA1cT73CGP^gzM zOHm}kg=lD<)uooEN|~TB`IQLa)NZ8(8`i9 z01Mcg0d2o$cJEr3bn66xfyDVyEyngL-NdCrD41s)^Mds=Ep^u-VBJ-z%>4k(QenAs zC}P>7NighMnkrh!=340J#Cu*Rk^+ti1a3wiBa-&#_IGJw(h}CJ0g!zt4WDmv+NU1}?07M1!$aTPh__v{49hL{_AY3C+-0QEm# zoHI07j$NQ!o{{Rl`227cj`h|`4f`csN{8}-I=Q>P525$sX2`+sH^^W6rPfz(6$*}OsFV|%XD>` zARBjy9?mTO&sZ!15eg97eKL$U#FcWpu%w+i%gg-_4$t+ zY$FoIDCqF|ii1xKu{xO>cfPTobg&)+hn9J5x}0q~6X8%YM_xX(jJ&StXi~6JO=>^U z+_TglR5NDmtCygg9m<^$YD$b^mm9^u&>@=?u4;IpqKTTs)Hlxe7UHfSybtRtRb;4K-$}V^);L z3R}ka(}0$@UE7zNabT9rDrW;bJ)IOKy}U~aAuI6#5kY2i>egA4DdQPm6t7%Q6`-l2 zSn6O{+7lcV2=1{-8mVAxZ*$4p`N(SAvP`G&;Zb{8TGsZmB0=`hmFc^wxGf)0bEO_) zp`S5fnWqeTN>PH+($q;zgqcjo=26-(TEV17R=0C)IEL#tb+;0>#OgaHBvZ^~;X@oF zZWHXGUlGr}ILrBq0idbrW+PdZYG9D&JYqW9B*iIeBnQiqC3qe-i5qJXYqIHa--T}H zd9#RGkcRL$3Bm^7S~Ol&V%DD;yjy9SuM)`hn4MhNLO8J;rwYwmSw1sG4Lx{eQY_mZ zLR@ztN{%&ep1Oove!uLT6y(TP@alRrJ}` zZj*G@qowbKQq?|PhI(qhoW<+&CKZZR(n_crcyR=A6BzZ3#pRGR029bs-#l6#DI^(O zC=ejyx6{t0yCxZa-7-vphBd0z1(z{AkJY?GFi2`B;LA0#;d$ExmTBlHF^noYi&s+J zS{=(O1HK)=-@}9K;efReL?}r~>+3yq5G^=`K*y)bwr-Ovbg!!!8zyvn)6Y!}f2HoK z!G>q%UbK2@)dQ!~Kvd7kTUK%sADzW}zEs%@2Al_4);l9D)a=h;W0 ze`*^^Ai<L<;|m!tBsra6gG)#4SERM8tpJH)W77^6{V ziMEs;2Y|agsjGXnAv%=V!86g&CWN;~yg|%8WXq7iE0L>r~gNcqS2{j27Fm zOZz?popbKn(FItJzMAF&mHR;l5kDQ3k=j6>RUggb>ZL@r5Y~i$B|5}*#6=d`SfK{*@#OgFH1-;?)uuRHUmF z&`m6W72C8M4`+ern$i?t5^HF0Sdggm#MTzas^a;If0wDPe&5V0K3N(gw#==wpODRMcq7Tlt)l1G(3+3KZCQTH}peph`)BjXKNZ9UvS; z`BQyueMTVz!#uFdOHUiZrjAOlEQIAzADH^qpv~=|J(*%^O zcS!LGTgp!^H%RAaO~S=h-dDLEH|EmNoMtHyt%U>Ok1B1nkc5RDLS*#(I%;H5<}V>_gTxI?f=CjLq(wov0uJSC z5Ic=Iv@o-z7(F!d)~!mztR7~ZA*ZE=DQT+A)K${E$tjj6np7ZYEgFGi;jR6=b<%{U z5;)TcQk1zkNca53NQVl`IE)nY*HhL~NgD{i(&^<5JB9a3b~e9qIN{K*7LoWaVBXj{szuBjpoq&o&H-s8U*3TzkvPn}*wg0m(z9hB#W;Bruel9vUkp^j3>B*d59 z+O4-z39z`j+rJz9;!}Wt2VGH5Auy$Wf|fMhJi_Z3)YZoIE>R|Vt0tBtUv9!ei{Fi$ z!GVlZqUT-}!aRdtX%h*x3`B=$>E{kSvH=G7@xR@~%D*7t9aP(^Dom%QmRMg+bADil z5Q_|%>g3C`?U=D6g%qfoijJJBys`+1*%}uYC$;%i=D;8=#Q`Ts>nC9I+fLfF>C_qp zT11~API*_O9YxQW%_aqvaNI+v7^CYJ7nkXztgMfDz07pr?H4!Ki8F2CYS{&DttdxH zFg^Qd(<0rA+vhvBT2q6J35qqjJMhKzE15CrlQHzqGv!RRO7!$ulOLZAaq_WDc3wE< zC(K}5TMu^kIu1n}-P?XsV`two>RwscfOejqp;G?n_79rgUld#Xyb!3c12%b0+nGeg-g<|xwQl-N?Yq^*bM|)W5!P@)79xQlcKqk_>K`;*r zodm{~K?IP}AZ8N9@{yF0SP!(WH3ai=pm9{wn#np$0n<}!{%W?8sU@Ch%H%#B^XyCF)kTnx>rK)6#EsD>Ibx!l44-+%?EOy_~9q$QuPP z;Z%q|W2&QJ(qoWTIP;@wdXRo;zse%}EM+$!p7!NJBtV+0PibHGq*VHugmF>5OtDWb znQux|7}rZb==2`}ek#F2CmL>4keo1Awxq)b8B$sboO$VDJ4;P8jS_BevvSScchG%& ztBX#PA|tM?-Aax)4YbCqS2UzYoBsgGZh^#U8wQO{wO1?&J~rozjj1%k^{C=;uHtI1 zDtN|bhCNZ?j$;~~!pudLR0FTqZZ^|^kxCvbDAN%YGOB#VB~XG4VX7&mL}rQ=fmzg@ zZes+Gg{`M75}>22Wl>Hzpd%mFvD8rF4kk4fRSYE3IAfF(Aic@DFK>?H$Ke}C;Y~Qw zl5v^ys8ux6`3|&rB+SIChp>CFE2#mL*pY2V8rG#KL;+iUwE-j?e5;*26;$+|YhZip z6W)0$gQHlVv_lq9Ik!Ti!-Z555S0XC4P>UrrdWzpU=K8~kdjA8rdZ^Rl6ykE{_FfW z(Q(a$VtMMV{$S}6IPbHYDsLz+094(&81f&YccI zObGebM}n%ZNkdWp057LtT4-+o@{xR0AU*yZs;(f{yUb*Gda4J@;OxpKuV~$YS~zNh zqN1_xN+25hILGIK!m1nq&L4$5(O}teqKXM|C{E+L2*iys%YN8pMeD2-;Qd4 zM@XleP$y9{)lQ91h$iQzCPTEW^hvbZ;C5eS{5evrFsB!+8HnnyB?cudF(k0LlG%WLZ6JfE{NjBpm%^9YN6+*7^6(C%(V0-D!7m_d-`cqcz zf{7;{HOa)7Jv_;TVShR1xk)P>Lyr@t7~%BMRf@;^@piT*n$YngK|POSv}pg9GjM+nQOC z8)>ByqX2koEK=f7MzrXO1abcWq)HxEX}rR1t*!TR=}9BXww+19RL8H(B&aHh=QNXDc`LMdv@ zG*sZs=&LmpYJtcEf$s%gHrje3DMNuE&ls*vC2SQT-wbMLBD5Y&EWy%Sur?vlh4k)E z8f_{HFsf_6)%Ww&QWfedCX#tsq4RI^1*ZyxTElXHJ?nc%EEYOx$#YTbtddJql+i~V zf(Te`g^ImF3)zqzwjlgZJ>HSVwB%?!DmsiZWoXi@M8FR*oIFx*0E;Nww%~T<1T4fF zsuL+ZHX4OROHd?LrfKS{-qiF|RMLS{G?E)4Y%#MrB-jPl;l$1%AtzP9{Av4S zDQijusYLrvkgLSY-6HBYr*BHVDaElW9NUid4!&u#6(3ERCZ(jpWygO*PAh4!xgK;gt=W47jYFO)+%|lYFNmm=6Eu;c851=0KYmFmJvgIKV zu^{oOD(#;aLE}!D`!3*_7bQ+J$Eu9&kbX@nDljY`6|cZ)@oI;|q>?st^AxR2Z(z!r zF$8ux14ds#bHa)0#*$t*+7KoMQ_oTTZR7NK#XMah&)J6(o|RRGXv_x=aFbOPhDfG` zB-R7ui8kCaxzLfP2f0?~rPPl020S*cD!SK_A`zr>TR)nMh!h(<9t!WA;6SPGXPf?lMt^*Iur!C<1 z(%0c6CSu1bswt)}DiKyRf*NwdNWv|dmqB|TUo){=Y^g1XNZZ#m#jCBUa^iuVo|RN| zxsy4~R%WOYqG7X|imxfzBZZasviq7TYY;dGWiAI&ty>KVoOQh#MlvgOZ^6SdXH316 zb01sqsz@Hc63y9rDAgn{5RR`85u?;aDNQ>G6yWf#> zw_maYN!9M5<5u1>XJvEgw^r-2w^K2C46PmyjAfkhj$+G4hr+SPOIe50(?-=2B$9oq zm|j(9PUSZd+ln4gEr{sGs~$a~1s@o?ql(u2)KmPt)Ywau#$}P|;evsc1ebf*kO3Nx z4R~0Wi6(XmT3bjRW`Vw>b*mE0y*$p7Wl8epXsV>a@nc_y)733R%?xxBIgtYUuA5kQ z@2?%U)Z?iD#v+?}At+HD6|L{3Pf;14qyJOigL=wtlL^C(3%HM|EbotJ{{T(7XE0=3HY~w`*5Vo07pbjk8Yuq&m)5J%y3)vs zS!z!E-9oYY_(;1{w`}sth)xmaI%#rlR=1L#B8fNRG5&lTs=Zchm(y&il5wh;u-Z(m zhhsRZRMX>lElSG__?{T49C61|)52D!GM84?P?P${AxczfdU7U&fmh#8(ZdpsA!ruYyWUMi^w0IhEqDOA!6E zz4)tqa`0_C!uA8J3~0A?u2dgV0FY|VeKz`_^<_oZ-%uGshAac5l#|ykcE+%b(NlJUC9JHzt*$c1$YINjnnOio7n^089{tzP$p1L2}D5=E-3@G+v-Dc_*Er8~+ zlYLR;EMGOp^5Y4Hj|@elrQXf0YgbPrpxPSNvG;x62ALiqt561y9cV@a3ancH08kxW z$sV_{*2Z(+OnRS~vmRZdr=;o)UMVIML4^u+d0z~mdE%&&Ij1YDBHZ|!3vk-|EoC-| zbtOQP2MICfMV6P!@AQ-50}2*1H>nKK)!Z8;QPcHrA#|(MS4Jx;vvy#`@wCJS6*Vn& zDj@kZd7v@}j@Nk^FW%NXRby%0HcN?7LWU8xo49c+b3i^JL%nyX9BVk#QsFp7b`_qh zvxiMF+I;zj;Zq7Zt4#xl7OtOUlT$Uf%jJO~F0Eo#+`YOSK|vl9t7+gm>ZMwzqoS&> z(!Z;G@sTr^TB|xw>C(5T9Y4%#HYc5OIZPUcu8$C>q-BnJBx$7cf!k(4NpAaZ!B%r> zUJdG6Qs96ld_$&x8V}{`S}qh4B_BF9Jq-L{I*A=NMaeyR>D?bjD6rfw3tB92WAow- zT}tZoD$!xk(udxYLRI3qWCvaZZnCG)l3P}|F%k2j3pa>Lp+JeNRp@7`1JyjiPn5B2 znTucTRS%C!_!#yDq$Zbp*I39_>u2c7Rqw2(GY%S9t*$`m@4 z=?c54GUQ=f*KFIC@T{_uVbytKE=dGbxZs>w&1{k*J6LuK0OCN>QaowKl&0ra!9cfP zy&_f1>GK%r?j4)4Y<4OI!!~18wP_6PO+#HA;m^xS5{3$)WZD!puVH=$v-CHl#6=xt zFO7K7C!f)sWOxlUm?&tmtQwxCX(ge|lx>G%aaXCk)I%+6Nfgl(R@^r&uiWvxjg%I6 zM@X&Sxh`>v1$u$dKDzbrV}7=9l|nAaY`s<3QwfDryvl-PA_l`Xz74dg&? zt$^U;Ep@gVC{luf+0s1dGj`Ukm>MDdW$CA@Y~|`36v&uHABt9DaE8RP&JkSkM)b6l z4j~dOpD<(c%OSB69X5ln2Nt&W;>{{5fwQKFZ(CZPfG~>K&!ad$RR>Ed`gcWIMscI0 zqo~C(TvAGU>;@@oA^!kKq*&D$HvLOfxaa=}lD~ItHqV<7Q%1MXI@Rt9?h1 zBN=_;PH8xfH6lD&S|EwSoH}RFzfV14$Sn_8dZkb4wx1-dm>yC%ID#5Fn#!dO^U_4$ zsT6S=6djoDv`3CdZtampG744W)qb;nUl!(v&-;N=T7}n5LS{n+TkG>d*3j zp$?Pur>2;GSM^hk!#~+W!fA4TRi#-fS!EF?`bACE!mCR;c^bfO zCg)O5GIz84cH7I*2?BBaDjMGVskwcveOz-aIq0gs8vz8H4fPX8+@p>FIRKbX2m|PVxr@2iaWoAcG z3!xtF;77xaUuFIC3PehXi9H^4ovWailA>ZG*0#{}xtkxV*p5c(txgl2Qx>bsFi#g! zD{Fu|f=qry z^Au`?4H~v}P^kWyqfv-a!JZ$^#KgZNnm?IU)JT-GrJ9y%DxlQ!M#lOu1b7k6ziypU zCVmxW-os@g7os`eDXe)zA!Zyu2Fj?B(nQGw@iq?u{{VLgCvQ3^hq$r3DO-28oKE5( z%<-p!#9}L6PsT4g`Mqo%6Qt`kILcM|LYj^$ythwIVmO4AY%AuE&S^|l6Gs}LVC01? zs%*{4v$NcvF1oWy$&wU7J8ClDjN0Pa^yv@?+0k0@bYC2-=;mqaW@zbFA6-|IF*;F` zaVmzMN#qg<8cR#%s7u)HxgPI_3wPq$-8w@G2~beL?I3`R+}?rF@fUH;;Jed3{7EHT4G2xxJ9<6+VDqrb7S74NSHKQmM()T3aq$}H5MlFWYB@n51i3F6f62orLT8O`of}TZ~Z4SzTF(Nrqrqe+6?ivD6lCE;x$K zD#$!RQ?Gt1vrUp1E-79Z#F-QLQ&*4O3xuU38KZgW57GyyPL%bwuAOk~#+YR^OM~WG zl&+eZw*?8ZRH&z@l20ehfHv}txa=eVZ@^CS<@s}#;nXYwF^&ozaWo^#UA#l(D;OFO zbN>LP-k^HO^(~ru9hdsqhi06~kTWA<-B7?Y4IFrG4?#g33oN*lHB_;WHJ(6XmNf*E zs03@m>sz!jq^T-Sur!&O_R+>>-nD2-u^cH8mHK(=U#Q(y>aRw+2Y^&oWsFN6rkew% z%M}wY4S}W@!X-qgWmJwDg^zJARJF7m5O1#LW#K7(*I`AyBym*6K4OXTjQ;7Bsiu$! z)wFa9Gkr_+kLz0y>YiolUROt%s_5Xn4it@=Df$6F|ieqmNPfJEhZglRRdu8x_mB3lGNd zN{o}6@VPLtN=MnG#2~6f6-H9qplG*y$G-$Sv$}b4xmsFM(6uNToI1$rjkHsn=NFEl z9pFg`krB3$*$WuMx(k7H;|5GJex~&|`AoUf5yG)aVoWayTDei6!#t_GOpCdLTkM;7 zav}#nF?(2(!BMD zXFjNAs-&AT;W@3O6^~0+!fE5AHMWQfD1#W-Xyj?c`8MQ?!J%_-qJ=0*BWJIjENghu z+nwA_J}LQA4@x~J_3t}%CVq$Fo~F8OS2kjjvZ9|S;rNU&)nT*H$`V-8ikX&WnIsOf z4-yGI_)ZCHLUbo~twVtxZ0a-3~ z$Q?_-Dw-;~6CCN}iHrqhG{#+};&MS?r1s@gxR;r6(#xT2IB^_7juhr$pIW}1f5bLW z@#-V+rOcQ<6HS@=F^l0i2T>{OD=}KE!G^SylHwGVROKpZsu+?Pn&)UEUJM=GW^OHt zR<99=JY-Q4Go4CE;Whu!tqQDy_?{yNnF%$;%|xFu^C>A^NnmONC`10QVa~jqv7K7X zeznVRr72=C+dMsdy6YQcjA~uXO)Pk1F44z&$Ri=jx7cYKGBgL47La7qYqdy$u4L`< z^`=Z;EMjoUOtmw`7>p{cQQ{%DX?udKqwj7u;|L_d$Bx`8iXJe3glsrRujyFud^%Y~ z@Y2?#5rkZSy&Xa)m(zgo0GW7EapvT2Mz=7kT*aEm<7=DvhCi?Ne%`~-ftD@y4L0bmg zDNQaBm+0zNu-8)2c@4TDDytK7W4_)t_VdNk2USs)&{Rk}F~*r%EVOE8sETNXLX-v? zOI$Jc_Y@icLE=vge~&?_7veDBQovNw zwS*xmfh3A-u8#qXd)2ivLhJ=RO418OVYcBJbw18GZNY$`aOk872n6jTW&{glgOvark-8B#;#X(m*oI{K!LxUwnnJ}NTq1BM1pYO*YX?#uHBT7$p?wrWs_U`E|dPtrI9M=CMBhNXCs%JBsRY2&HAVN|* zd#PcjoWpOZL?KzUKDDT6l%x*=2bT_-&A+E-7QBfnLt#E5CyJwLiL21**HrO-tWsuN zJ1RpK6$UP+F;ZZ-j8%0N)D*PT(4z^9N##u`nE7X8g&|F^-~N_b5JMVvAV@dh^_m_ za0DA=SyFBsL3u?Cg<@wI=pBCQrNCMTfC(c5*Fh`(6dt3xk(w&8j;wl;z^f{;MKIK6 zCHdS}5~-3H0>-4sl$+#0wj$RGJb9KGcW;>{TVk7gJ7s{4l0XBeWbNBnv72@Ym}YmY ze(vme$v7twI|e)lwyH1dGi&XZJL<5RSE~cbMNR|xv=fa9_$U9 zK6Pb;2M15eE2?ZO434HhAk952#e{`SzT)>GOXzj&Zcz@mz}A#f7Lqm|aj3Fk)Lwj> zVv-C#Lt#x;@xpGV%6G9)*YEJ=kl;whk|<%}oKGK>IN@}2$qbag)n&ygqGHh{WS!Rc zESfd@cEj}}v)f}WuHOC@xwtOBl}S-tOpA74DB z5PLC}*hiak@MyML)Y3kTi}M19c)Ri|Q|8c|@#Yt){3Hl#msSbkoz~ zSbR~(K>&cyF6fylqjW_EK~=?d{@qgR5mzze&JgM-xtQ;weth)lgT{qcJiBfnLl21hK!-$4#<`#{o>H ziQ!++P9~~pBp@9&ggSsozbG`6(q$}# z88~dFi6z2L#*MEUg+-`x<`oyzu#l2R6RAX|b_%kRk;X-EHMG>RuG!!e^Ep_gLn|m3 zBrdI>_L0r$APQwbC0-bUYb_=%ACWwyEFZw zcLuhfpD2<4Z&BjsCla1894%SBZ!O*~$1E>fNV)0Pi)3=OOq#m)X4 z&}}hQ{K^Oi*UO%Y$@8;Hu=#Vbm;9>+SeEwfs;ECtJ>7%pT1$#l0rz7y!K(3Uu7?f~)2iZAZHsk@@k|vP{6GDpEk>HJ!06OLO^@wFV#n zsbU?G)wKiw$~C`odA%Da*;QFOjyOj}acb$*H5M0yznB!rWs){~UsI`+#!}$!BG=h}cmQm9 z?FaynY3})3MHmi(W$~P|LSosLEXjmq+%8x!y3VI$nn>V-FW1z{Bc`aHuBZs&P{-ZY z_vSx+LT{V7Mn+3oPk5~>1o*U_qzcHtr+v$BHO_nIg0wazCNR~)tw*i>V?mK=`cKg) z>*#TOy41LF&v~p>)8e_S6QvDJQ#q6X)iJDwN0I)+NObMad*+?{!}dTW96E9NS3zt- zQmI)7r|_!y%Kd-!Q|T|M^td&C7dCIo*|NGix*QD^s}~WHnbuhvkyCR5OwoMDVhrcIo%&wj+p7jv2c+_pZt-{ift%8EOPNGsliQ-k2nP(87!ljCyn+Qxl zd`0JSTx!2+>TXZf$-dmiI(WrLPBfKiP8dm|CF+Z+)p?6E;hA$RRY{ice4^`(*2Rjm z$27Ict5{#(vHK_6!0u^su=xi7os_!I29B&sx+o6JzNUJSh^~64#o}~>I>R0xPV;%N ze40mPK1laL4Q)qmBzrr$0ccRgRH->D1SLUOr{6#>15Ju!Sd|7FOF-#F^|aL03mUT1 z?sieDe)BfGX2qpA;!I&Q>3k)%^uev${{Rk5^&d$(G1H9CfMzT|2>!m*$oZcPg^)`d zVYtZ%)e*2MA$Y$>xPnl4c<`fqZ&Ll)1VKQfUD|0)TZ1z-dU(E7%6Vg~BdGZ!1Ho%0 zs-}Yt&e&Zg9SqXeP~sKRH9V{YYOd%^s{a7Jw%XfHM`^j1z@3{hq0e@80!Alo12x?T z#(Fo_yz5iZ+*=FFbyZsA)zeT@B~=`%x2Jf+h@}d??$t>O zoh%oG-<(Joe)=&CsF^!xzIB_bxl1Q!eud?HUJNH9bZan6UzTwgF~y|ADKRKyqwJnHVc5bRherKGTaNs~kOX{3?$iq-~CxsjS(MG6oUU9LxP&&-e#Tx}QOa zVo`Lf8Iu^oDKOe9j0Qo9QY^C5$2v;YbjbE;6;9v*za&QUy=Y5r0$YJSIP_39>nj08 zNx?#tzLR}cWuuDqTMwn3{1D2uHN%RTx}=Z z2KEW_%QU4zHYG7J(Ob(i1uaSoIMJ8t4qV47#C3SqC4^6%#UD^bW`Tg@@sE`ID>aLmJ7s2{xnX;8OA)9bqUId1KQiVUGr_#d9W0!1{rUVJV9ApQxCGkNlM}$f{we zqpbPZ=~e`k7>X2;h`9=W)~4Bha^hOjB)2$#C#Mdf*G0E1rDs+E`{`AYv-hU{pj1bg zI?0>zb~WmAr+D05I>vJjR+|sP<;3v1N~1*(#Gst{GAM~uNJW6Qp3WGnFmWbyzoif#O;ds_7(u!;=1eXr^Xw}s*s#pPlux4>Qf-R=R zDILK8KYMzjh=m!0u+f_+NLa?JuRY{CA4qL_7^wrf&xt24& zaKP{k?T+9zH8W)iVXTHaaJ5qInAAlFy5(QqD%#%U3-Ae$=C?MeSi}yFG$_BfZSpjV zPvvh`IcKLi>kGkpmx)EwbCqisE8>{lOV!oU<9K`1tn@Pc$V<1nu853oECAPyKH7ry z#-S1;@U6B}8hR+t;va}!L$4UcS4%JqPpWYA?+TkZ=DaH>;kEvKO)0uX9T2ER6HHHa z2TvY0>fBig3EDW*%rKBDCUErwi^-6%`i$8GnPs!y6((oPH2Iw(r>vS8S~`(SEJSQs z(ihlJd)tQqIpUm4a-p}Qhph=R#!y!v?4skBI-!Eki{Tg>ElwMZVk-Xt$RnV_VVe%e z5?Xmr%S&`78IwTU>t|6Ws~4`FUxJ$xq0ikcmb)isEC!zo#IeVs z!R4T=rh)0>qI$9-R6`toSrQ>0?xT{1Qp%h`Ly0rM`E*cEGjT~HPTDwKN$MpJQz$at z2h}Y90D#rx>{kq<>PAk^RP;|tQ-{E~8&THP8DkfcIOo_^TTsuf?Zq~(2HOM$5O$8S zMfjdGJbLQSy)N~qq!{K)sHeqqIz`lsBL>4G%=wV-TDvQd*2;P-BsOR^c6H%NWK7mFROlZgS~HFG-ZMjM3MBFOp`G zEv-^y^M>mNB1?+~2jR^J96%^2GClP~G?Xh7g$Xz(QF^8lEPpC=4-v;Q&Lvk-6XF=Y zB@$OtQ*32fk*BF+D0b|&TMb7ys^&E%AdZQzDGcH@V_3SW);z~qK>a@Hc3-W2rO{F@ zB~){0kttIRs#znF^3&7{$V_ag44uZhTZl7Po$mCdIG#K_sa1<$t5S%Gt!%MxO@62G ztlN%ZpU5fW!}^Ddz8Z1-dbbb3V8i67GKk@;aPFuoK`9ryZs2>ska-!}9(;U5k=8V-A2D!FssIZ)OD&x5IQ^iG3TH*?7nt72NhPe$R zw&JHytTf^FTe>O$E-y_{gH+2@BACVJew|(sXHa$!cq6g1ys;@EVhB2T;Be}s_YPaS zxq=5AKUzH1ex2i>!Lb}lyR7+_BO=9y)z{#dJxnz?6#>9!N~xxJ@g!1XoGDigZK-{h zrsMK>a?9|yuSR(`^BR$i7& zLp)_X!-rR5*d2BmEfRdc$%=JhMS0prjU7lJ=t1NwzZTD-(n~-EY*0Sg>8UStvvdNiD=Yf9iqD?0JXZ(Iw3zBoU4hqQ6VyiQPflBE1PV-QELak5Z_L2l+_{!4 z+DmB(PMI@?J#~(I#hJSG!qlQnDlq8kpp&NglN#xMRmO5&Jp)l>KBc8@Ge>|)Ni8(E zyu);BB`ozwo8oH?vMIMQ@3j1-$FpW!LQbTpVh#~fp6cAC7HTAjP}p|SPQYuvQjwa! z27E?eny0BuDoT6GY3bRnzGbeZdu!ibOP1xpodq*EiU2=s6unKZI8cpG)hxGL6&7BX zr*zb~YgJ1=%spf>Gf5LJ&F-SaUiZF-hM8t%sFe3&38a^Ht~P?LWT;baD^t__P(y%H z;c{S=kxV{jssLr*`=pcO+wbAk)ypXXu$b$l4Tt{ojL@6Y{C*sviOCfPH(NeV71R|~ z7$l6)8k*Wzq*#23)89thJ6r>=W58DMSz0)vq`FYBm>nBr(dJioaM%*iP$v_{tly)N z`mo8FeoV2`{OA0j<+>-&RUJOW>LWOoC5B>GtCta`GHTJoZ!oCUZQGFr^|n&C1FCx$ zP6{7xq5Lzz)RSbp;MqzW01cyz8aMeb(=Vso?@7|`xb?@?lv&0Al=*rL$(gF8!(za3 zHV<7L6QnGXNl{5Rnfgg|+7|xl9HZHykdMo^gdY-OWFC&AOIT$OC%kbpJ36C>T@00Z zpEBiIoc)?JzppH>*1nA8oHnyFWIVIeDtcNxcNoPZF;mltD=_eqtdYoL4AHPGn}PRk zIN$R2Us_wXD@u%oYd8!x3RmCvc&Z4DA}Xz`y+d`%ztp{7Q#~-|**a}Yi_E!uGD%8d z#372a7EqJZ)ET|`q9l;6Nh&-+;Kt7*CdDavTAW5AI;bU@=LmA-C=E1wI(373uE6oE zLWiS$HpzAQ!w14CGeu@}!RqO&)`Q`vC+sVQn_k4*>JN34Fd`futV6d0x%m*1w%av6$1duOoLAkw)B`SPg5~Ll3Y)?mAZ3d+h^NCszrhrUs6#LgK=j#6e(^PUc ze(A{ZpJ{;pnW7*m4|`1B+b>c*JfO`d%aRqs3&0< zbpEsu&z~uBR)_|x{{XFhV57>Io)=kJis4!HbKV(AP18B@?NsoL7J6x(o~A11X9j7} zVG6F=*n`~PkxuGv*YkU{!7W10u%U@3A7vWv2JouV%wPy9!_KaM(66X`)73BY0i1eU z(}l_S2c~?|`5zhSR#L=jF`RmQ7c*g#Qq&q*^94;qy#rJ-O)5xFlosg1*5uKi<<3~C z+c1RS611yeBpg5yfg*vqE@Ydg65&zQP9mE6lhZ7FE_$ub*^?k<3e1C3OPuRzx>29& zX=!a=jRbr`q8S|2MD~e`MC>7G+Q!Y{$xiEdt=E$-RhWQC)SrUsZZ^B8 zAs~BpQv&3r+%_C3(yJgzR~0V&avC~QVr6AMak;rrS4}(Hmb!mf#aIe)O*{#OLSm;w zlJeCGD!R#vY#u{AG_3OMKjLC;EPQ+Nj$SDsX|0QuEnuq{_fv$qQw9Vp;O3y9+&*;q zXsB1+1Sh&KJP&^T`E_49L{@FanUol$BaY7znHrJM~7JTg2q)Z$u@)bc?MHH3ABk&YO9e+t2f zG|<Bh+jG^hel;GO5QaJ9hK802YPNaO&ah6gs3PEnRwUTz z-M%Xdww0Q#0d!R|L;rO;99fDhA9tXy)DqidtlUcf{OtXht$89hAXY!U;Z=B5CUJ zFD;(*?E-mIOGOf_goC+2E{WU3^Ff2e{HrZZq|vlpSR1&J~n!+48jaNFMY z2IzH50Xq)fpgO2Z+bAXvtr)CN5X>;(m}Mp*fyaZ^=63lel)9v`O#2oTXY2#J759#S z`*};7MIR3eq85@)J17#tx>27o*>f%#k7a1Gj4w1*(&Ci3&z|#2=mUypfR=%WW?(h& z<1Jd9AG8C?uI0*bg-I2zhvK8s%;VDUQ_`j@i?v2u&3G;ykK|wFijI^tg&LNw0LN{Q z%)=y%$5!4CGo$Io^3_=y+iftUEQOK8n1k1b6@q_JcGqan1B*J4q>VjWLHugApgDdC z%{x|BPT+|rGQ6(tplCn?u{_AsyqP1SvDks7Tm;mrsiBg!Dw?Kx85v4RO9qbWMYk39 zX{Gq-QdNvhRTtfp;Z_F&Sm2V9rgDFrNRxy?wgpsW1+`-G_9E9HarYWYGI-aJ_*#ZA zMMbB=aJefZcEzNvs*T;CSY1P^AMp>5f#VFe**Fhcx+hqiPsgOiboe$3T`>CQt4P>+ z^o8cMp&MI)@aDs4n2nT_wlzs7s+>k2h8VtD3QF3cOo5W3dV+ytW(&9m`$I#gx0`80 z5CjT+)S*BeN!Wf>9y5cbbd>bii`2#ttJ2hjus7_I0c-KLUQC%7(`sy`P!sLpT~sC$ zB|@ASymNU%311L~ITvm9V8_IG9(m0scn&>vL!}DvjiRM4U4o+vl`C^f#|x0rNjO!I zKkm5(=GXBYvdTh$#!XcgU^gXH-9A@_mTxp;aMZ~NL|2BICM=--^Gc-O=sa;PB;&%V z+@v1~+f(W&F+V;D=&>1UXz2&(4IEVP{kFRW44&W1j4djhO>DGMnM%Eb-Ca}T_>LO& zQcn#{WYZ}|6y8{81lXw&snXvbQAkt-^!usQEFwwUS--3ml}4V4a2Zx;fo7W%kgI(l zDid!GdBEb8fE83e7BNgmO*}04-6U!v!c8QT4an%0jFvYAOEQzF?VvoWWMFE_4gg3I zT%xOrmUBf{OB@kyS=s{&OB`;*YasW$bs#q>I+e4Ys@PaTKLJ_h#cT`|GJiY3X=HH*BGvTmX{C)~iX=@v;Hj_@c$B&a z3~7+^5ivvb_T<@Due`MjWFA7f_CG_3gUAm1x0xu%tZ68i_2XSqOvLb zD{-fO_vV=*PBi*15LKj{W20?7N@z?n&bW*ZEAEn2hszQ+z6~euTTWc*20eCFv^KFI z$?D??Rt#`>YsL1lCygDd6_lhcb{JB@lSm2Pj(@;vV7ADp|PaKsT2&*!ng&5<870Kw@ zi3Lj2OyYKQj#iNpINdY~RWJ1(Z&CX~VznhkcKU3r6;#;dyV}*m&P78VWXiGbTiDsH zxE?3hA|o2Jh$MxKLHE*Hs|dtnV@oo{C3~;|ZTi^lJhBw1#LYU~gmv}PlF-Ezv7*#HRH&P(6;0#`eHCx6 z+lyb0hd>e0R_;)eN_g!Rf_fTyg@Gl+C8ME2OzBMOqU4KQc#?F#hcrTFK*ZOaNFWXr z*0^33JC&%!Jc%VBL@FhAVx#Pm0r2s^C>lo*RPK|iCvVD~*Dq2vJvz?oQ5rL}n2_0Z zD!Kq#=Gy&Zk0mXGT1B!9k6LT2ri4a}OqFocvY8@^Re>7D*0KOd_#QU306`VL>9s^g zp|n>g^PyeP;NLL2MdPS#!Y~fKzSa8=4LP$UQ)wDOkx#i}F4X3{(|~1cHwR;g=1R&u zDhMfKb0U1y>l14na`|YW?k%*~3-F&k&3`Fm>+|*`oOVoMhZ+;^p6@xB<_$80pBV!V ziN_ulciyA24A>8+EQ`=Cof@2v(_F5}E@Z&^TZmMyJ0(=I?v`5ktGvJF2-}Oc_FZ)r z=PY;cH@VI4RQu}GNs=}II;*LBosrw+HppV#I#Q7X^`ztV&a*LQ`aFY#bk7yQ`f=0h zD6*zg>U7h#7e`+mO%*(|SJ9T0W=Vr9qAt2_D&XGbS5GLymH^S$&1G zG8u0YXN>#k)kS6`fz?#s>nj+^8IRTc#7d{jtW+q{;VCs8WEBb| zjnN8uL?N~Nux&g&gI1QPfMRGW;|-^zjyTZ(&N&+z$MLKm8qAoJW}57~G}F|_1uMpC zV~SrfVqJ;_oq%1$aE0u>MS6)U(sp#wi#JG0bdLx=>W|bc=RukAl+Hr|{$*Q+Vc*E6 z%lL#5Ee!;UyJg%E+`2yPY&<)0>)WE?Q-L^vvw)=5Ea^aQV;PWfG#T_$Hb;)<+zTu| z+I+i)3cO{ZL>nZljs^(QF*hI<_eK8h7i4#3ZwdQTOLHWi=;_*Ut;_uYW?q;lrEgAH z9GGSoQIF$wncXE!a|o7LYK38$3`kC zR<+T}+=ZXIW1q4%V#rk#RT*XF%88EBR?abcN@{c~OFK)4%O{sI3)r#TR?fg*P@2LL zL;!f~p%X8;aw5*{&7q{HQ*Ut@N909I;_1lHXSA*1T}k_?rLc_ z1(EIvSxV{L9wu?rzy%t7b@{Sf#S#XQNq*sdg8VhLytq>;AkLT~kz8oj1l2Zm^Ca{;(@(6d!&QLg zs$6>`=X?wK?nH!PON%ZSQ+3=awzvx=?iN9CG&~&T*Y931l>j8*eLhqd%vw#mg+PHF zc+ubKPfL19>JOwnU&HfWa-_{t^!pdV`hk(ECdA{P8p2_(!{UOw3Z7dkaqm&%EcvZnN;x|R9QNw2Sa*S)rd3pP3f)&LzZ!z2;xc8 zzM$dJ;wgvL$qA-uDClHjrUQL;_Tk;V$ho(L7+md75!XeIwzgePgCc0mdZp(h#4#h))z)3nB>D!OQrUzUt}%?-A)g%%%I1G}x5p_`>8Nsyz_@1tzp zhaR@@7*$u7psc6UXtR!Oq`<4_B>9Nt%9zed%{YI~nP91Pia9BxtOX*G9e@OT*0|u) zd-DV#u$0PGv=Ot)f?K<8=Xg}IBw_8N@6?`}{uwnLQRt2V)SpQiZve~~-UcQ)vo2D# z8bygyP9u2a6hRe>4|p@&_;41@?e}t}B|w0BF`>b@Kp+uE{{R`ny(MHmpXW@6(~Ogn zvc6`(;Qn=y@W;VuD`BZSB5RBncrnJE}W zR4=Ulr+RJ89S+M`qomzC%e_Csx`R#yCIOdn{R9;_A5gF=nCEAfKP&@DQ%)*J+1kV{ z@54-?FRtW;k_f?DF%f|Vit`6sQzAs3zM4nmT+Qh-6X@O_){N=U9G%sSX8^>o3_q&x zhhec{HT2lUNn6x36s9zmJwatu^6%g{dfM)_7Q50gFbEiqiYiO(6zhqKJ=o_%IvtHt zWG=UKS0-iLW{hUBkLD`6S_(JBo#7_Jv+=fa|xux@oYMyICS%>SUgpM=&_g>NUc`TgC5Z; zqgmr2#rVaRW89@DXfPBYOjC}u98pQx7*m&~jAx{Hr%CV(b28qT*TOJ za?H6xn;RyR`F&0yEiC}5rl(}ORH{l)*-elY_rTD1;wJ9akTk1>C@qp8Y}-a}q4VGq zwh}D#P42lUXM7ynh4+-;SOL}8(#*AzUE(0;n z%B>Er*JNy|o$_BFc-;7az^aWl+V_H~=*yTVQD=qj7VB*_Ail-5 zyo3OeheqGlmfe&9XQqqqtp2!U{Lj&xi_|=eoJB=$XRWToYU*&DShIYYytNcnu*766 zjMp*mYm4^eo4aii6(Gp!ICaeeGdD2QAW?qw^Xd<#`I9zucc;&jCBv|+GQH|^TpdHr zQ>_(kbz(7HwG-Xrrr#7~pc_=16(dq?{j1EhrxZq_cmvFuD?{j7w2@mo^t&6$c;-W- zhJ3XSBZn+T8cO8EX{uGKAdtw^aaRapcY^2#yn#yTVmK3d^0G)BR3u@tk%6Hehpcph zB*JTO-7DZPQe#;LjYu0sa~_W@%q7Z9}(At{;pR<0Bh zkycBVbEFtuP8iV8Nr+{fT8;|367k6!(awu_&_>o~xFX;#oT40m#M9#2yC~4)oOsAwhU#jBg>k5=FR@@x|yQ@EsJPh$_-C zgG6S(KI1svD^X1Nz8w;E9xO6!xtenxSgZMZ+&&tFsg|M)G_%Vg2&}0oPy>}B!0;`U zts!K|8nBu&x)BC21E!?9^*5@x_)H52&Da$*cw1pOU(H}NTXAS!tVGpw{{S%9KSsX# zMoKrb{1mq-PLe0CXt<>S$)-HPhIKv+@tkv(V980GDl#=41_fDc$8~N7n|i5 zeT7Y|cMAi>hf;6};g7zMHnONDB1c^`;k`w`7KaDCQcMhUQnW8`Iy<%U>G(EnJs84X)|_HrNt<*m= zzFnNjcPTV!?-uOfjz#NHNr;|`kr@VL`zVR}yz3@J>947uOx;}Sre3GQvL;r-F*z~3 z!x^j+QcWC*QA;2YGNVjgKuFjS-XQSF4z1e|+I&hX0y?Q3>JWq8D0ow}*0Nr=y3>dH zkL#aW=gsLun>?&Wrk^BLWrJm=WvOM3XxXkrWdQeYYXN>uo1M9KmVN&Kcr~eFdehZW zZu9S3?>yR&bhR4~9=awN0Vi*#dCgep!u2WqO7RH z)*%dHqG=ZfMGH8AA)GfrhHeME$hjaODp z>%9gSm7u_C@&x IK>AD{+V-+VMn$D4l)QJBbZ{1MKEUS?6m?Qj*b9hRTPW$~_EI~W z2LQ#?tE5dXGQMr;_H@Q-aQHfNhvHbZVofDfwGf!4pfM)nG>!-is=m?1w|mQKnD0^& zW(jtQAFj&;wKRkaw_OT??Fo|hC!=;Nl6Ib|y-inJ9DrMDr7sIs-} zChl_U_dm%Lgp>gXA3^2OG+r)mEfcNaR1~A-@v9}t8Bg%d&AyuRZYM{I)%3R$iD81L zAZLgmro?h=n1ZDYGtnj=LrWyc=1}5MBawURE;t&rwSBw45;ZFmw*>*mZ5=V1HtSn7 z0m3n&e?{n1@YnvnW+~`>ZsOQX6%fVpHP~Df)fE^uca`Qjsj5jKidf|uDhNvp*z3dF zt>7&*mzK_y>>_w|a%ls3eY@`lk>WIp#XUays>|7570kFM0TzDAxa_mTfptoXF(p(H z!B;gbQkTJIgLJNr1IHvl8!m#}3FZeX%^y?t_XSA;2r-5NMm-%ge$y9Li4AvP5OAR* zr(T)8X>}N386z-cI!as`i4#0g=Iq|EVHFgqNQMTfrb^igNg*;XnxRM{+kpFAkMFlu z!KEm+_ue87J#>9|MhrNF`n}2j#`Q0`9jjtRZDLwp#p;- zMpe;tB#MZ%_0b2auEk^P^mP4i z&iy&ec%@Dqm~f0fnvRef;IUP_(Z^9c`KNB;rBiDYVQxnbpfc-$w+fI*ASmKGIyO|0 zyMjzcB9>if^pWXnH{&v9thbd@rnv4kN|{MIgOo|7n+L?H8LMff+a}a)?zi}|fJVG& z{OBq>=uj9L=rpHF)R;R)H3Kr?9Tw@IP_RtvjCArO&G?NzAC>cde#7Ias;9+iHi!~> z_z(sj^@~cYW$_?!JC}2A1cfC#Iz@Pe#Dk50|I;JG4y>qJcAEj7rlOrg&peEZseAtb zsO)spZ#uSyU0DQy*VA0Ka?3;>C)rLhdAM~n)f6}+(o?$mNYv4?o$?!v*R_T89?mFr zg{2??g(hjeDe-Lb=i5@LYI6fX9;%L~52b}#TBzlyh>)km2x5Eb?P2tDX}1M4$AIgq zq%LA&pv>8_ujXP%3sFQ;bc#1G<^|VxzN=&OFym^mc}{QwnOjOJC0L$6Dw`%NjM7e& zRTPs=8I3AllAczPLbCu^`CG{EYX|y|9ZD8ch!PJx#d#}Q&Ln_#Q_#dK*o3B|m09+^ zM6$CVD|hbhaIJ68mPjd9*%iGhbvmaKe5wUzW37U>$%QI|Rsg0tcuvb&;6I!#?)?Cc zA$1BW(b8s{ON5L}9>0O-WpQ8TCR9rESxn_j(a4b)kb&Z+7Wc2=Vc&`r*hb7_n5Ten zw<_B^cnY0gTZlX~(8ouJ1ZPI5s7hH9836-v4CIi&_Z;@1PM9DXX>F(|0k)JFY|%+9 zi&87vDDHx`tzz35uuY?Pvv!@&9A-PXinu}+twf3HuJI@^!Q{xa&sNZ~K`PWKiP=f+ zOB-q6d1nejpeJanxZ7#*5JyHM3du=Rg~K8VWrGM%zI2~CFnw481-236uD0b-a)=s@ zd8FuB6DO=m^{1FCaFq8nv#*K7AlE+i~f+tgtDiJ8gMhc7oEkUt}CFC z9B{+mLQT-O4`!C=NfpxZUDr%ig>>4&O#pz1CGo*Ny^xk?N=gAVHBVbarzs$``J z3)Bi#jb0V|GQw1<9T*OCS1~tD5=u%#iYHG+bkfZTT45$*jaY6&^()gXorq)kPYt8P zVaNV%p1Npoj3%a@l1Zu~Vyur4v-`1?1Y80%t z?JS$PVbz4GYe`O`al?e+SgZ9Ty0^EAW%ipTDF$@o9(77qs-UA}uE50YQ)9d5*d4O1 zzj=p&zcW<`nHW|bge(9SE7VzfhInR4>0@bb1ByzS*b8^s8IHdTo?oM3YNe8DdPFi&tkn@DWtjM;(!l$=kb8J`=FX8SKac5EU5aG!^QMTfsIeN_ znz&sk=%GhpP#{u|AU@~>klTOOzZ*(anK1&HQ%fn?B=k*1Kg!(}zYwdIq{vz*VxcP} z%7L$IGu$`BO*oy~q=On~-CJY;k&OH)$TclgZqs1(OTEI-Nm(S0T{e5yQ6=|#2BV6T zh~ZU`q?{moYlrdyPFhNeT9u?LxKc$)3dEP~sg;R7C*8j(p$a>3t31d!5O5tkee~BZ z0hlC`5lU3hQmBr4aU=vf8?oJFUAEoW0(i=r)MTg;>r9lWmwU+>_JoL>5Nw;EcMPMO(B+<1&|siwBRv1NLoCn^;r+{0;I*B;<&jXoUHYcUmd ztSiO9^iy>%Ay~pmVpVvd-56SMT#R?dLe%8EkPU$c( zrdvt~B;sovvSCy0`9&Cn^wJ_s^2-|t$Q`VvSX=#+qA($FT z0#vk9Ac!llwwp9I?Z-RR#v-n(eJY$|4?`H!RaGpE^1LwzhU<^AbHWs+oPFc3WQ8dz9m?xhxQ=XcpoX-T9y%jqY zP>9G2Zjjq@otGaQUz_fPM4EXhT1+T{DkVHq!jaQHB$BGTNi5K}=*n*IqK$oo-<3*? z!Qoc!!4o)5J5N1Vmhs03sE^$-%(HG`+imw1xW9$??}V6)X~Lc<7s@A~ zj_lP5wGHptZ{hUvfvj<>2Gmw3hdBPV&Y}va;8+s5siwW=X{sO&dkYPkYv2zk<3b={ zI;rt)rxpMrem|{VkDv_a)69X0r zq1M{*Bcz7H5|~NbOPdKUYFdGe1vK=-r#XKsV!4wx$Ce)p!fMm`Wfe_Zi6W*mBDcA= z5FIH05$;C+;@ zUX_eJXyIh+wFY#fr-r*b#a&Zd3e@H5tR8YV8gXg1HuL5w9l_#I>I_MmR$(}Sg(5nQ zg5cdP{;%Ql=9)M#m1??LC#b9WLaS7dEURC&f-?C)?6|qs$GzNHNC!d2sEwp3ctr)d zzwrC?*PSx{51%RVWu>m&OPKIx%C*u*M^i+^w8`YjBSKnb+^WGZdwB51+cZ)EPu(<; z#!}}Br|(>yHp#1=x-2Vksd3yhE=h@W(p-5bM8q)hhu28-%_}C-%Obj)Uchj7y7_{C zF5rOx&$n#^bH>knl@lB&OX#PjDx6QH-lnmfYOyK$!HF6iLkd_*r5#LI6b80BTS8Gw z&l~}lX|Otxy7zB0%&nDl(Eb1-Y0C>`rKMp8gI`p6E2eFp@D93RI6X{N6J=b#o&c#p zR=-6i9X&N(9}teRDlATo;&_@NA&JRQKqUNZcw>`XtlM`Wj7b|{^%_l?A+YLJtT3wv z$rU{e>c>Md>mSOVxxz8DQ7pk5tLlv7Ej*kbYl4#W3cx8AAa^H+yRUt!;3i~zqKh;3k=*KxMG_u~dU5IYV94LsESc3BW$MLT zZHHInT(p$6?j$gIk4{D~@kX+?pt>x#A7y-aB)QFD#HDEaaG8(BgSIxQB2v6h3Nk*D zJyG=k00S8#qm?-(TsJG`>I#bdshV=dY?IXBxcwb-$5%%@Gm3OoRBtdTJGW{TfgllQ znQFAD;3NYCQN~x8AS^bcht7^hOwJh2O3fLUI_9j$jb{bOndYCR{Y;_3D=4VxB+Iyz zN&LSOo|2v|@8mSVx0h>xZn|6)Tw9=k5;4=LW4EMHu(;cf0L2&#FQFYc%h|rWJkoU@ zv}0+B)Z_Gc47IfI<(zh(7d~Jz)4>o_oUYU!_$=y4Tb>i#T_M0_B5=nU@-$|I3;@`6 z(4ikp^2JUSJovU+q*!Wq)yapYO=7Q=BL+wR07(JVtB%9n1lxhvuAC$&X&#hqY}zD) z!k4`%>d#F&V(*i4#%ap=E~27vi%*!bkx5roak?1MD*Uq}HLP}%#nvyYyAzM56qey6 zD+-m>?@&1cf_l8UPt`m(0;tTC^I`Z7TcD?orx0I0n8qci4sRldVeX(lBe=H-bFD!d zlO#_HU6;45LPb{{4_#{TETMzcbuPZ2FxBN6&+687>692<5{y!n+Dd5ZL=|!dX4t#7 zsJJA8I8~GtrOyJ4gFby!uJETxObRbKhd%mQ>%Ljd-6X~{wpqxTHw47#YBIJXfXjs8 z^%9$ksYs3G02WiRn6-;($vY~|E*oEXK{7E0f!Tv?tt1__S-GDK%H1#PjTJr}nsW9r z22RAOs`B<@%;KIbI}%uBT99fNe%-Pt2y93<7UZBVnR?aPo{b*rgg2X!pAN zvK_sN`Z$GbxZ0GGgW*1rO)!I_4ow+pdau*$LlHP0UeDOAUXxJbnR5`st0TnWsftp| z8nlaYib?l-f!j_saPx9-Gw!F5(&CE$09A7l%QToKAhn4PkzQ$z@fjyZd+FPPU**phl%#-kZO0!@nkbMWj5R;1>^rPD zo_*=IN$FN^{4c;Pmu$81`qyqm?Tmu7PL_)G)7$_-@^2P_0(y&Uj&^r>3K+#id9Y#WoVu5=KC8cpzBZ zTWRchkj<@9Mw2xM+_Y49Q{d_*Qp60D6!BF}Um=cyt?K9`D-A}$BQrev_NZfRIJ{r@ zlYt5Y7}@Jc)oiT<7zyDA8cUpiAXd85RAwwHKbNX_YfD0oS-h8xlUQ{ZVdKkwd96lI zoi>)8N<<`>*^MZAOPxAL(~hELjPaVNVX46|I{lS6o=vHT55;NjEgbakc*>M(F$&6l zBZNEG-92RD5CNFL(WXYX8@Wo%!1>nK*{jtTr;M-BdQ9_GiNT%m+*d4M^wkuZPAYn= zTM|mDW^+|hJj%?Qnmd;So%ibOO4DCU_QURx#$fTH1mE_Hb|64oP_Z$9njM6XEI< z4d2}Hq_Pm-+r(8>&w}C^8xO~%&DiU%JzZHNS5wlsW@cqq4jd@jHL(k)GA(Z=z$iMY zT8%~uwl|9f1HcCzl>Y#Y^+p!Ka;{Uruz^pQ@#!h)DYi_(yy>J^R!8$$Kn$!%*S~%$ z&9}4i*4h&qqirCx$UEmXEJz0l!c87_t$cyf+U}BJxQz8UU2QM&Ji}iwh(0kK>Fe9mpQutVk?8%jl%c87vAKqCA1W~3KL0fuaK3iQy}6f zQo#Keb;`e{c=uHDRwGT1WZbG?!}B&RNeG3ec^k`d=Y`sB*~5@H9M3IVSPW}Qo+4oN zZ1tfIM6zY%0ul}}*TN{cQ)GNX99uHsbL1)*YavQ{JT6%LvkJu^k=V(#&6`oRljD8| zEONZTPlW)20LFG>Mpt>AiW@RjClM5Ly&GXwy)Mq!ykvaO<9w$8Ea>% zkrD$rXySwsG^?|67iF;;5N*J2{{ZAKC^G9IzVRv?I31wSr*iYKU0NJc!W1VOGkse0 z+3HUb>AT?hf6{#YM!>W77e}0MT<@7H4E6LTj#mPvhBkBLdGRtKjoWpUX@14qs)T|r z9VkPFQlyQC&&rPTvbP6x+LTan1k_q?faknBrdakR*Up4z`6+N*4h$ivs440wDeB%A zTBz2xNu&9T!kdgvsAF)T78-F@TbGw_5SEaf2{AH!g(FaLt8%rOo~oy6m@&Mind)(j zyOpc*&I?^vcEj@|Pf{eOj!$ueI3D--i5<8&-hJLn?2T>c17!p(b7w8?3@T0=D&u-N z)Ewc^JZ}g7SJ#Sscck4;fil)>z%ni;6;?Ha<8Z30j53Ybi69fGK5!ddTSM;i88bc1 zV%;lwprpE;q6i8HkDW~Rd&cF$ksuI99uzF+OzT&da}#E)&FX5u8_2^Mj*lA2nTnFS z38KQLp>bDD8(2ir%F00z5_}rfr+3IBD`am5Ac- z#}yovG1Jpbu}4KuOjK;$W(9`3N}U^+!jDLT|I{k9@?tX9$e$&NQ%3Rcl2BoeSvHcT({Uriy}8z-tcl@0HOjOr2w0qY z>CUQxSq&>SP9pNiier*`m;^E))S+FgxqJ)`b{kI~Z$#*+Yo1Plkdpd3#QLX{Pets7H6=;E4n+~kYja}Zj`wwz&L&Lm&ctmF1Z=Eo*H63tLUK>BiK}JFa8s!vyJLPQz}**;Z{qEP{Zxx z!y0gt6TmTFA*pjB*VIfuL`y3cDAu@dZ#+Ut5yq&ibg4uGjA1=JMzBeNWy-09wYc3w zN4Y#rrv6S8-3hoEKym2WY0i;bQ?@}Bi80fbJD?vZcP^fUg}?u}q>>_jiO2_bLsoYx8PpK{yeQq}3Zr zm5JNO8pvofMM?R(bgF3))>Zl1wk*s*3uQ+(UuK-3V@}8iZ8M?8j3xyPy)N|A)!$J0 znL2rvu`DAcV_4em8M`^xO%5GHAhfElG+WrrLK7MgE=O_W751~e7WNr((&F(}z&6E9 z9gl7^_xX}s7&eSai3G;MRzK3W{gx(xVbf4n zTB5d^raDA%994yPkx;4w0K!6j(}S7LV2h-w*34tpFm=09w zK5frAE{)e2%31#a3(GjARCRO}3BD;A79+Muixg<=Nw6ST^P)SK+V)+2VB60)hMm|_ zR0p5qSOdPjl4h27&Njm;brT^;1o;XmILsKXRK&3i6B)uWEHf9wWhq&OV>HV_TMWuQ z;PkU9NgEFk$}Pz{*EVV9!kHmSj0iFhsKTN<%Qu*HNqHqJm>|F-(vz4~ej`~{(hNAQ zG&_vYDH5|ka8#8Ru)W8(ixT?O2^%BdMQMG=1xeY_&KOdFr_55k!vd*_s?1@WA&n7O z5xKAKn|HCdE~#2Xns_(_kdrkWVJzZca9L1Ext>RO5%>5o3fC9$9A$0*k+8)i#Gs{9 z?Am@+DvJn)zYa&LoKVOSm1=3w&Zfs{+o-c^uhY#spi{KdcEAuM!T8gRRG3`imk*}K zB!-x&O1UZEbwYzrdP9GE;yF*ul?dpg2TFCBRPpku5arAbrm2R55|*t{1U^IU*H>*+ z*@!*w%QotE!KT#HWRjI|#%m>RAA?Agt%yxq5Zo+$!x9pL+t@XU0^^!>z@v|)GGRn3 zAI7_b2Fn79K@3wYWr8fUK-?kL_d31&IMU7nucftqm4=&hvuj*FrVAl!KI*APVGv)@gdT}A>%6yY8lF%zi9S@tP;sppiF81ZAYUr%MZRDcB2$w0>w zs*^h^=?L>H_Y?XkVP5+kj;+`Y4}Rm7P&17tl7Skw)R^J`%p}C&kRyD&5K7yFUcvtW zNN!DsVSZF7NXDqsDH29B;P53ligl7Uc$H-f2FQn&quvGMRo+j;a>o#9N?Qr>04o#M z(G?Pyer7n^YewbQZS}C&KZUr;3FA(!;inEb?Wr^3lE=QI46cg|q#<3Ix5yCvB$gjX z9dY3kF--T05J3@4wew?=2AW(}qO`H|V_GPUww*x*x0`$|#@3}ki8P(X98b7YZ7v&( z%xA5XR!cc@i!AR?8Q8->ETg`@%{k^;AXd`a)~Ps5{_1+ok(jNCwfSjC*-ov9o|9Rc zk|snmK(NDqD8*43a;^@H+gWt>TKe*-$7N`^gW(?P%+z%FN+j`N@yNA6q>{o>iE`XbA^oR%)u8E{G%orkY3w={G@XotO&%HaB6-CE%eT)6OI% zK$F6o9$4!VC#jk#VolPDQm;<$?vMiB-X#}_oSPTOOrr`SkDx7uYVZk@iaB~uA8juoUZIH=CS z)l!WuJW{0b;!?W8L8+ReMLy=+{gpNyxl#tvPc*_*2N}TCk$MaCxz>E^)cm*A8hnFS zLy&SSoXb_2^JX8inz@)2hB{$Tqs%tDZeGUT-as9;c7tlxSgTeX1=W}!96%d(R!;T} z*{$HrQ+0CLOGy$WeaD90io1N@)E=|I*d`;BvRzgof#dU>10BON2^6(U<$00`sMl?z z5-O0vy8sC9%p_B)Q*r!L?X21s=|b@`BauMQQ}Yk1sPGz^{+RUh52MeNm9#j8E)k8_ z$n-Uo;$<}vO;8P^cU%3QEMOon#9(z(4>8baQp0mMT@r?tOlPN&;`pTW^!SBkJ`FsS zaWfSHYG|acE*Ql9kM9mqw3#Fyl{i`jB1{@edY{a_Tfx0C^`jTWa)uvA0_EI7I=Wh^ z=U0v?;}9!8$p8$2&fqjQ(~9!+1zff#X{6U`N?K8!gXLOg<7=IxmaiVeu$m^KjU}EO zPF>FNm0J&VFdIqm@#OA?wIo8OqIaH2a({L;UfHwNb~2h+k;VSZ^<{#<@G-fPiqQWr9@ca)Hjp^tm#8~;I zP=1kvUdyw>+*MHcrYSO#dQt-~m=>?<%~6*#^VQ;0)35>yYI zq(}urM1IsPN}Fm2G23{S;%}C&9Rvd(g-!V*iB+n4g&?|d(Qc6BzOm8_`PFJ%pVFS9 zO;^)}%lUseV!3v%1f|(75r|aN;qOf(l`y zN=Z5nndlUB`s4sbE4owA&q#PhT)B&X zRg4Dl2ahZuoKh#qW9v;N%`!&H4E=oPA4{2M(*|zCXsKv1yk9cnI0jn8aGXi;MaFR@ zt;BrcOqDfIG^rz~D6BP6pgdQT+7~VckOmuL<4k5ZB@L2AC_NQ*BcgeSrLUChGDZ!N z@!Wx3{DQwD<{Wz$#xa~KmwG)kl>(|cwkulc;0WwBv-ypucHsnqPY^JgV#?)9Y#czM zm-wRkdg+};RLk9M!*D7(Na}Gn!82|nm+I@Gq-S6}iA++q$lgE#u|)u$2=__Qm8r4; z1awhme%7|av6Rsj>2726iJ0+BcL~p#xMWn#_$3`47aR2(7@-t6t`#_rRHh3`V~MI` zaO6w4n{8LrjvHOtxPX-<0YxOwgV{v(S7=aHt%P*(q_0@84^CK~SI8LDxq}4DIDJ+L zh-ERAvd(nGq@%CIsH@@-S4itkByPgs%Ca)v_XOLDvi8ary-1Q3Gp7L~&Z&6hwv!?} z=wIqTNxcYFQ{+th)7jpcmr*0`X`b?#_!4yoYz#u^)A zR4U&mi_=zA2$oBF~zH#%iRl#60Xj+o8=rDd8(+hT$Fgd6Nk1FiOiKq*SDV) zm=ef!_zoODma{Y))VRDC+FWzy(a2bRnE9iPr8kiv1)t^H{;Zp zjw6mJHCA^^~WRVLU7XBP1?wMPv zXe0%s7*U;`RB+p>2-HmDh@-94?w|BWrdak&>#t7p%`Q;GB%cq9sJ%YS_~|$fFq2wx z^&dFx8c7kAfKi~e#{rpYTXB`Nq~e`~O$lyYT1p%p7>ck;e9`H?qIj{~r&*PAT^w|( zxP>keh>S_8sp^x=0U8HjLAC63(}CC7LP$LnZEtd-j134G3jpY@DUVTOljZEajAFP{ zV!I8)V4#kA%6ez}PR_vU-GZHY>ZuD=Y!!RVlNl7l6*RN=~g@D8J<# zjrd~qPaQL zx)CJw@uSOit4e8W)eP~JsWP51LsOUNWyJnO$Af1YoE93KDvq(Vjh;E!$RgdXf$hY_ zL?><(VBXOm8VzudQ#2xmjvU7%l1qoea`t9yuY^lI1T_&vt8|L0Ii0CwWGanvHS9Q- zSrNuPD@ke(T@)M()MZajdWV_1LDXzhHel4bvY=zx29F}-d@K1h6w{wJo;Is&I&&aq z9`i}q{5ZMwC?{T-2aMG_qo|$~YciI0&6wU94ri{*RhV@>HAl|V!JcE(a!H(RAwv zt6sl#2tI4-=3%YQ*w#1&HWe#Wy2V9CtM06Drr6bsTK3n(>NvY9%bT-n_DBQcMc3EP zy5uQXba0@@Bz3;B8DXczuqorI;#hagTKgoqVY>D=1l)0BFCP+0aHgEL)R0HqDY8t#(&oWU zHD+8)85t#TUY;f`IbYHp(UQu?+IX!Nt$_qd6qG5|37-%?RAD_9`eDjGusWwnk~(XK z;P|dPmg%1~{GN6Biaa$;BXY%3;x{{_VPsny@-*N4$TeW5njtH50Apnfw^rFoyh(8Q zL=#B=09vtie_a4 zTro(8^lnQx&|82E?)}H^&cETq3KZEemTk%r_K`xLr|gG~*5-`A)$CIQO3Er4<58Az z9ASZ?db$=u-fH$?5O6~Hk!};WV*Yt_t45vDQyR$HkakD1mi~FHt%SbfN~i4|ByFJ= zIb@7aG-Ft^;Ih|MVmPH_v*D|S%==|#n{hFq`ysmp@a98W$4QuB+-x0-Fd-e=+0*89apQ|t<;PIG2xW>&QZ;H zl#*aIw0Lz?jayd?-e0`nBd?dPqQi=^jN-^#rO?WPOv*@+Kdo-*hl3?L!I6QQgVax5 zIK~CjUYkSH-&8m*dBCw85BjHs($iK@e=!1v0Y++A)Y80+R>cHtLG0Hg+z7G@3kP1* z0ZNjrWhVs5@IGrP4?Pj%_$HTuOoYS;KX<1Wg(0ZgrNR}I-P>D$= zXBslpUW~mFXU5EL{d>W3rYG|2g4E#ng;v*~vD+DS9fUYYcmFa$8nrK3c9*u84%&n zLY`bJ{Xt1k1v9XDR5h7q4%%Fl z!AS!cb9Z6PUNM@>jOrJcES7L_V=?5noT*&i!r%AC24U^!0+&JbZfMlY1` zd?J--9*$}fK#ECW^CWG(o-38`Aot{Tm^I#`Nr;_>q>D#FBN8TnoKqv_2(f&Fo$%b1 z7fq?@D{+c!`-I^1tvWS6Cn6zZEQfW>sT+o2u?5Gwd+`Hy30Uy}AmJTeTsmn(w|E?U zUn)%X@p6|=GX7i7xq~)f&|%pp9l@r}f6q~>F&aEO5Uom95AsuwDkMn^rb#X=wgB+& z`A~7Sz{Cjy%NzwFRrv@|8!P|SEwWW&wGB&ABzt6$H;C~GC6-N$byZvGr*Y1@HiT-! zWpdJ7Pl!aEJq>Zi@rm%|Wn8gSG=Qqd3IiNFnL$>vkZqv=eSAo_4ERLjR!W0%2-vl!X+55$~*ywG`5;MO6hN zD|wMoQF#>bC@fmTV*Gr0v_0Y^6BR_dzq)uy;q0z;b<{zj#GB3)s!b$N(#1~bI zel*b~CSAeCccsTJq*BZ{3N1}IMJd*-Kk zvDVkP<7r_cLWrK6Db*mhN^lY9?5@TO1cF%VUV%pc0G6ZqIpUOoYcmkBq5=O!PUMFzu&?uM%2+ zJ=r|E=4ybK_SsQBfpR$7-d3U}Fs4F^5(qvXBZW__#1#}ALleWysH+@{h0&uKf}4WD z7k2*u0s-TJ!UZ(r+J{LT`qp}Ee-4o>RpV;5Up`8FDOFG{hVi-VPk{rKL%{V_(CJVE zX!rB!Qw}?ZQC6gtc!fo9byH1<#YJZu2WE=E3wYm`UD0hOLFt;?K$U_bJ^sp%MT=lB ziLNa?xQ;VBF@{>CppjKj5(i+U4Rjaq<2KF!j|yUV(x9-nb#!o=e` zDnlB}88*F)j7_|6ud|v%;6YbUvYA!_P99rp9wAi?T$2o-X{e}vTH81xtc>h2$oK@if;CdmJA2w#>2u6<-Ywi! zvzkJLgRv$+93<9EWjUfhk6_O7z)ZtXOq)(K%^ZRw@ip3Id))iDYs#2ft&?w)%MO0E8iP`sp5B+<<&7du}rgGFD{TE7TvfK4Z*+f4&2LzpVXSfTa^@$4%5(T z2a%U!itCD{8Z3)Ei0WD7wvDj{gX`wh{*f5d4FqdEM@QjUDzPl2Bv{TNP?c&vW&E^q z9Dl@63!iTjsZdM}oiy98BTgjq#+xcJS_t7vib*A&8Kfv_Di4?|yCj4f+O{T$OQ4Yin+ zusevj_>Nan=c=At>7AGXT{G5MVvXRYqkxTI4oN-iLtF#x{yf~$buB?)U2FgyZ@gfNMR|?k#vKWpY^;DspS4HN;e6iv#>Z%XGo@pymj=)8E zQzAjbZDgkyT;e)tFo`0HBHMmdp?5@b2rhvQ&DXd;$CuDH?5%gHNync$%C7(>3Gqc} zqe9;-w9DoI57|ay2Y&i##`VE~4P9PvDlB!-CjS7J!z!i}pxNufpkiA4hS9e`eY|ie zflZ}GaTrB640Zr!3xJ6B0*dq0Mw{hxrhqUYTK3}w)v8GVSG&Ya0flj^#WNzcETWO= zwiSpP0Pc*(xZD!Jdq*skE-H>T;)sfK80Iszt%toVw32ype7c5s+5OY2Xl>a3T(BgM zI&n%?u8x}GUZBQ4W~R1vkeIhO?mdfmcxkQp;nUHDR5u1nw$y){f-030LoIkhCx_0E zl^R%W9BCwMiq;ZAnCY$~7Zk{=hvDoz*R@*(nm_^lC545x*Ny7&ksB$*JQCSbMAL*> zon@LPhLpnw<54VZul9$4W7zTVqHtAdyP$Cbo6TGl5MJcFw9B&K05gF z$}n)I%equa$ML5H#AyTtNRA+;;pv=`H=8ZD-`lS?g$R;qM|z$BQzcDBB{!DTpifmT zz$)y<#MDAiS(if*4}Ocr{aQf8!?b{Wlg=)_XBulgnh ze0hx>srJy_laAOj*)GHt&H{QPkF8~|WL$PfGl+2LSsnCAcBB_+TP4JeG3bnMr$(|T2Jud>-f?hDhTl$ zb_{l-sTq!s4S_0YUXHlNLeFSINMiSP00|qdb8&wTFq0&xsPt2+Nr(v);f3N9#c6Q7 zTO7r4d`3oj(z6bUeqPBoMrk(_=1k<&CFNoJuLr?tQGEmS_A&qNk zsj%}Bh4+GkEDUaMwAg*z+KQ5AWqSZN4v3k}$rd8gAaUVr0jjeOlE4M~evQdP*X z86u*qO375LN&|k42-><3H5@w09&I;BC#H+D8)d15061-}O!JfF>P#0E!m_?&rE?v)FT>h9bYH4vP>mkS5N-8R9xY`yz zRap~Ejk0v?R_lE(4;DFYy%QTbicXbyjxs5e*WMkH^M|DB-l^cdAgru@pW}ERQE=!h zmKsT8nu`!fq%RbV46C%84&Eb*vO=KwM8tzyyG){Hg6@f8nI@m9*!>gl4cMSn~O! z4mMp-GXi?)HJiyPK)^=YIGtw5_%2MtXtLfn>3OMYa2#Fn?5m%j4yKBhvV+c9mXb$^ z1(r09QxdZR1%nO;tetS3X>w#lP9V^R@u+MFz#1c2lcOG)2UNPHP1H_^XY4ztwUbL# zk!iYThbBD~RYjqmYFXt(k#>(gL~ZONhmYpBDz-^&C~;BZlO9-6Wu8`#%q* z!YFC<)9CVkpwVXx$BpHTM<`_~7plQ=n)*7Z<$OCAs)Z`3=^i#4oh4QZ9D6ONDVgW# zv>_oQQN}vDYT0&DZclhf!wMPmXF_t8YRlf8^KJo@pD$wBdlIPWXH#;!Vli)mnkU}1 zVP)7MG65(t0zo$Ro&gcOs&BnYiI8O`C{FfFl?MI&R7JYI(cDj_JzU1J-e&3MC6aR_ zk{>DslXAwaeaG{nbsk)UQp?ZVswXW=BafS`Xx<|{amuArZO>)DBmCJevzT|W9$aWuloTr0 zM1pBOo4Nt%8!F^pqI!L%$vDm**E~1Lsl>BJA%)bMs>P(eaCIGnO+rp9m_*OC(x#4y_I!8Rcl zBl42%q^X)2YMi8%0DH|Ff`;na4hEiaOgyrP2UKjMn?k1$AcKV(OpDTvhh$-vfnXShi1mbAvh(dT#&B!@ZDB;wz z;khR%PBH3$n6?*+W-!UUh0BVYgJn~o7CLt0&fBCX0S1~5DM~ns!_|p%W*<)tRSq41 zL5no;3d&r<^G8oL82N*SFdCNceCCy)Ks02r-Qaa%NJz2tJiWezy8s4rt9d z-Zz&zQX9v$cI>7MQ^35`~D7f@>IFypo#rl7m5n;jFBB-Y3OAvfH zD_Eh%$x1<^H3RI-p!>V!D`l>vr!nK*K8q+~ zl*Gw6uf=f2OgRmUnwb)#X^p$sT#h7Ch?ydvN>kHW#TG4#;F4oYiQtj+!VxE{tUaVnMRoykrRG6uR6cth^jiK^; zo7F^Le%tC4@?KNHA(_K8?(Dky1NeLcZw)X>W8~ zpKlGb&zPdNON(b6a+MkEFh8Fo@BWM&` zcX)!LoM)npCJLFG0;Q=)Wx^}qrrSJow&>lZHx^TIroFq4DFi1FW|=B%$bt{bpw!8m z(w7!Erb@%{(071FG(~cF2 zda=)$_YA=?tak{arpdV{rgKzP;uF+j^x|4bsw9b(WR<+d+7xJc3U6+9#$@_gLUe?j zPnAkrx0M@Y2Nu0jHdebmRmmM#gQ=MQ7u87}vCL;1%(#YQz%xODuBhTQ7?jm>mU#o; z^rL9G*S{n@s>~`A!0hOtu5WS>;@3M(1o_)DX0Djz8Sv(=j-LsZo=R$J8X9;bmO6sq zU7b`CSQ~fY{&RTcj$T4c6NLtH>t@}&lZ<0T4!*Y##_@Vu1V@f)sbiz1rC`DR_6QUU zUt8RA33)1MDp3$%nvz>WRsa~)&3cl^lKOk}-|7AgmtQ^z47Jl{d>iFrsfrvzY&w!f z^LK9Y+CuGp*B2ff0d@<#z3yjtwK)zdBdlZXiV}8_kfk;j<3TAKdbUv)$bEIp`7<=b zkD8^%Vs?5q!y$!|M=ZZ;B~k9UJ+(Z_72h~Sg%WBO7D+1V>)}JndakLc$+?#T#BvPD zn5v{&{1+br%8~9}(X>*lGcARN!)tdOvY&Un*NBm_on@#HVku;OQq*I4M-IU8T(XpO z(bLOJ^b3c|^wl=~E#^|zAZ`09EpHAevsOSIsX>{sx-2pV2qUKR8Kv@47*W+!d6#9d)bOV;&n&H((jh7l zfjvI@TVZ2-^MzVf*H-z{Z%Dp|vmQdivkq#=5F+M(_s5m9eI(|vR##!QQOwlTanMCF zGfx7!1)cj4wmN_f{`ak$f{>&XBX6kbJrp>~vZO7Ihs9lbxAfc4-0zVvTwkTVI;h9$ zFq(R7ot3Gw-XA=;bHg00@_F(yI|OGE7|^BRLVpV zrwlgLerLZ-T^hr%+>er?$r!Fp>OMD#t_fWpYMTwh=aQzPo&ijPo@peKI$zQ^`bwKg z@FRm}GZYq<>a?k}9AN;-!-h%Q=25 zMxuigS@wnh07!x-Xj~WAhT`YB7r!Ii=I1WCylrKjDN;$*`cUI0wYhDjC%n}IVEI(e zi+&ycqw$=z4m+DOZZVDE8P-WLtZ%28&W2fNLM%oqYRGD8Clf&;HP%){Q|_x-f#7EF zSSjr(fCLRgVd=wRqZ<)+8&Z@{i0t@K9fx$s5y{bKK8WWosnu80wqTBmhmul(d+10eM z@5Q^Xkl8nW>xBg>fFdx8XEx1l)ob8ZJ#|T4RlsvCMt;qhJSm7{k>NGZh+wst3shF( zerJ;zX$p{Cq6fJ;-u%cL%%GNghST5x91IbKM8BLJ?|7vGRe=X#KaF39;qjMn?wWK% zBl@YK>TW$(pD^m0%&SAwE}+oOMV6{FWDqq?EOOOG>am&PR*pYBF6jshbIf_(?BTS9 zw(u~~`?iueVdoVr?zTo$H6&>#Z6BPE>Lb(`$MGy@IC^uc!J*FeIEEdF!_x{3PYO8fl$Q zAf+b*)IsCb*F^=Fmg&-@sQhTZbz`HQSj@dH=~iyYJt>>jdSkexnIowAZ#FI#wTCYo z)WRpJqm`oFODZT=C+W5LX=3UCeL#Z0%T76J=7prnbz(8vJdJI{wn0kMhgE<7)+ce| z>{2LS4Ha)1*vA`Hfv}w&<#`p04-ayDBj96dPKElz{tKm-bfCNzewqBv-4TNI2$ z(ajH#j9y2&NB)xwvuHblJ{+q~g*kx2YU#Eq5_X>{_*cG4wWcywVbze&h?kA&=T&FW zhVGq)m$~B3?I7WZs)qK3f+yMTr3{o$0a!b!xhC8AC-PtT$HjXr&u6?eCn2&6$&V@54uSJCw04l$|*o%M;dXKbohZJMm0W` zzaFQeg|Livo{lMhM^PjaZgx|nDQI9~8^+f9`L{K!i5&XX8!AW~J~aZdu>%!N9d2mE zD&`?!6c}{`pzTBMZ7WXUh#HN=6XR}DdI2 zG8;Iy`Av_z;I|xQ+k}DQ`+8^erxr*Dfx@!WVmK`;(^61Y#~aR8DSy(QdzL?G+BIWv zJ{|a-oXIOzfJPp5u>O?lP%wG(sWsGjHd?W`xeqM&A0MKJ}@v^aKDPfG$0@l9U6Jp-b*5aP^CN|Tp01^nqiNd1E zma)S$sxipcIay_sZ0{$Q%q_Oc4ZCrbHb{x4^o0Q=e!Ufvo2PFfjj@cv9ILdnc9V2> zwaGWPZ)ZKFw*18{YJvL3ItXWJ|^36(&v56Lc(J@k3Ft(O!cIKN@redC3 zDv98FX~G->FPeD_czgz?lr*ysHTM=K;kF%3t*yCU^#I|th^HA@(KvX1KaFRIG;xHa zN<2N`21rd$oW-1rfFn=hc}2dlu*s^npi`zIyv+Bhmac%yCgQI?#z>ky7{BR(Yn$^+ zxINfCG?)c9f`KF5UN~hVQ3&U}+?e9}`kg^t%Vs%bEVx4;p zigZ)et})gulZ)mot+7>b$p9_4EO0@LD_)d_OEh z6Dv;yQT(}yQh0-HY0LR%-WLMgLIW!a)T0t)%uiSDr%+U!d%9V4qcCL z9Q)KfOxNSt&#Q93^9)-RnolU!Ndbr42<8>CM4?X6?BbPNCf%JV5RtRn#PP2Myy=60 z=%TODk6m3w&mBtYR$Hd&)cO93w-)(soZ>VTAIa8-3KT6+)i|_`B;`X$QY1xRGjZHN z7qMlP^3>6txROp7P7zAo%fJF@PwK}x)%DNQr&juNmHLsFX>r>83Z*OhbA|k^el;|Z zy9uZRhhmWs3VEu9mApl`IPCj4VT|J0_hht{iR$b0+t)x&(sC!Ra$A;~9wL+Lg=-U0 zPlL%v6toNkRcZ4Sa?b)tVg|$m?l3+4dD6!26ifq!b2;V$Ns?sr@bl}Y`KYkpCF-ha zr>@#Ox@CqMWJj?Dl-U;7@V37mK9!i^NuOy6f-z90*y~bJ3_r`7l2qM0Q-p}S!{DuJ zk#D5)$#BjwR4dgOMAkt2qBB$AGQwE=yz)u6c0|0 zx@me^Dv8>9s@l3mQ}3*86`N?kxNe$wYs-~jCxs;~q>iZDT^xRvjI3~1nV!NohN3vk zt#hrUW*qTe%5%O%#9_o#dWxG*BU4b;g2;B$hNl}CKbhNkYkY0Z$&H_dHl>Bk4V?Sx zG-h&>#U(eKl@=m2nkF}KW9Dpa53mwB-~zB1(*k-iri$takx)lU*m;ZG+R^}vxkq(I zVh@eJJkrudG}3mIh>f+5m9rd2<*=%_#Hdy%o@ZwPPPemZbJ%OmB`V?TP9?=F4@!ko zhGy2TV?Hw%nXvr8gi6JCd`gRbTuQrFB$BTUGu)|4f$LeJdZ=ljp_;WTkgKYY!Jh5d z7GvYB_@K6+l966hl8|TSY4QniSfYYII+F^55UEZGR_eTZ4FbqCqYOcPseO4%_|YUr_*s9!G^q=ZPU=@uMR0#2Q*EPQy`MxbkD zl_p^At`xXZ%SwNk2pXb6=BcQ7{L=Y2^CMvB>miaE4gqeZDCHN$W}U0 z6Df!>_nUH|0@oU8_43+qnWuj!5|Jaz=UK#9qO8#W04s(ueYdGnfPg;IICeh*Eyh4N z*3{a9Ohk43sk%`~Njs)4$|I0&riz!$k<~!4-a^{s>0#{VmZ34WlZ$kx7cl6ihpWS6 zlF{K)H<&`KaHP?lstEr8bXK=}@q)Z|V@-vGpr$-E&*$hPSkn-JrFGgQ^IW&MJ8tsR zPF%YQGw!Xhp#W_ZIr?(M@{T9gPM=U>xwkIl3@(PfaXQT9)nM{qG^Z!`f9t(zIHT7{*u-au{NDHu=sBpLMrw}^0z(@maC~1)* zK?a#I4v^+-r$dNT=08%CK}APV9xsMt_;wtoMuuB0R5dY5&*mc#j=GB*YhlM*S`Hvc ztrQr*8K!*E)Bd7jaQT^6)vhU)WUnyS$Bj*wqOXzTEEiR$+KRU##`hf4PB4=S5)AotiqfC#9pnn<0>P-|-!HQJN7AmZ0MNKTrKav{P5e4klBzWt~q4E?U@v1Gj zN~JJ->zxZ=Pli#w4rr;Ot(xMu43d&yS%D}3M_Z`&o*cGNtwsnude2oCsS9meC*?^@ zX=g%Smo&>P^@JB%N?A&*N^N6trN_Oi|M< zB1z?jc|$C1ZtCQ1HumvFn+iJ@gmB?oDJX~&j}x}G_4N7FT){u7oP&kpnNoZ!0?3(u zHNf%Q3FV583JT1>Bq64ZSji+)5W7!p*YM;snp>e3>Q=cbJa){~uVCVfplN^y3b`3G z4p@UJQ)1OP-XSd|qcT!U8&f>f$N_T}fr$s(+;9rD0J;<*l`CZ|vX)|~sVQ58nbjmJ zY4GZ$XA(ymfa3kGK?myQl;Jht++op4ZlYxRUZbTX)cLOz%ksjq#I^ZGsi9wZ5orR3 zN7_xmAb>|U)8HI&6;jlKXsaZ7%K*q-aP-sGoPN7I;2F0D#jE;H)!b4AjAB%iQaedQ zhzeAWDC((YL$!jaWh#5$h6JQIhfr{wM@2Fctttkpf6~ugDLRvvGDcUX$Wc|?6&~hf(ynAT7g_3>=7_%j@@NToiarp-hukv z@_L#wolZ}$q0H-yN7E|huQjbmu&i64k?JQ@CV1Q2Dpt+@A;cH?c>oZp5j3{h;!G6A zGy(do!|*PZ`k<@oZDvBO#JYRcdVTW4g{?qLBNN6VG_?*)JJ~$hVrP3Yk9eEwZW-iP z4!qZeKoptBj8c5Xq#&CH(B5ckg<&vKB9DUuDO1Cob%Ldg_?5C3s;HFjbr(XFvla! zCF;Dz5>?U%kIZBi1ON{d<=06b1guP97~@yFus!0Iq{yo;%^hIruSGNWN1*9$Rjtd? z_5T0}%T;-!D`Gq3r=qQZx*C;+vLDkqQD!P_aeD)P5@rlsKWJQWVq1|kuGMWe@=%cl zLYzHWdUlJc*l$`g&OoZKp~jkn3+r!BEk}o@8(i_pQ5GeKiDO8m4kS@JF*j1yxl>?4 zlx9dvjh6`0q>yo#Kdm0$<-u*3z=JhYzOk|F&rOD5aCLjD4U6@2EK^Wn5aDxB)1_rL z60}}zr=Eq1&Q*rZ#TQQc@xDO{UY}&l7Aco4W-6=-pB>9sv~ttd<8fm26-!GkKFHDh`d5WU(((pdS%$adBDNQ7 z>Vfej9-m>UO{;`D(m}ydSUOFJW=@U8U(j+AKbR{; zhIvZuP9>D1ZgaGPYzGXptAx0nX+LXzi&43txdHI7tZy8W1J4nK6Y3c>~l2U(9*O3(J^YCOJ;E zO)eKbRxgI)SbUVyiguS|t6cqXDrUM_wGNm0p zA)7Ibxw0g~Db_`h)E3xd3V$x0+~s?F-gdg#V(>qawOhvUO7 zXKam_a@Hw63bh{a!XwMol{GL)u#kMIYZ~C_H3y!_kYpr-sE-d;KN@c|1f-4wZ5w(# zy_T}J9a+^3qn+tjBVrWPxK<&CVlYK4O7&`KYMo{PmDrLORr@B^@#M8*dpbcRC}Mpm z6!obpBmq>N(>|r9MD>BuOcONHP{kfi%ng<6Xz(Cm!zyQ-y&$bvB)5{%GKULm0(8@o zHTAmFVzng9!JukXugwiZ4T84v!+MF1Ri%A3HYp_=P$E;)R7j~9^OZX9mE&Qsi~)(Y#qXq0RyoH_9IcJ6E;jwO?beh(8|~?OrCyi z$&|UW2PQ_N#-JWl03!xq#1dF3DsQPB_~0?HR?47liVx3B`XAHDs;cPvJsm_-M;x@P zS5r|{Nle>B2?~i>{{V0RE-X03$v{z51L;>mCkeuk`JNmPDXdXte2n>ZYbndmmZ%Ya zScoKH59s1xr&GB@4K3S*%({gxc2d&W!x^n}zY%V&<2`cEeJ|-xPP$i(VLeF0)~bsg z$oP&664XzMRmibG;wT2mU6$ov*mdN$xe?vPX&|VD1BxgGkVX#D;h2I?1ly~asrMaHrf}2I zL|Lw0mhIC!YN_)YRLK@41qLG_c!HK?+`db<+h<>NZX{pm;NU2kB1Day6cceGKp=Cn zn=-5`iAG|Tt14xTkMy>k;z`LC+I9dm@V^)3#5j{D5w?l%WwQ`5p=Oqfo$D#$#PYn; zMNSwP31Wb?G%D( zK2%4lTn1P08gV`TU>aphf&x-;@&=Sy*=lek8Ti$={vv*$u%Ao4X!LK=u9)V`W|OA< zGdLzaQH9ZCuvUacP=;9p372$VWfoY-16{m@GE9w&+`PG{g{;rMq47+nvUO;kDhlcqFYl zfI9^f+pZ)agr2^N6S8MbunNj-o7U|0(Hws(K5>$4Kd0EFqD8@`l9qRhYQ!ccc?1P* zWg9~cPa&SwvqI|$OF$(;LWbGu>ZuQKvf|UKD*NA|m`{VC0zD93W-hNl>>!thCO zTynQHtZ~y_J{)hB29#k=`i z!VHA*+cjIcmkp$P8azKQ;ndW%m2#|b%+CtX5&I$4<5mMt9}aY${iYPgmcYl`ScPb! zmn6v%=`_5c^#1^xaSEC-UteEaPlU0Cj;b_AjohdNEU}Ls5V^ZZ?+^>PM@<}@&C=P@ zkU`)nd5QF!H4MF9CKsIA9BVEsnR2El9AwP$FP5xArtP2`7X*Rh%RQF>pn&0?lS&%9 zNJ@&32Ui*ku->0za91R`ma+*ZsFX=k-zoDFTftSVq>yzSVa%1-)TLwPO^nUCfG5{w zLwvi{oSTgtN2d6MiXp20Z$y|hf|fa|WL>4C-=Sx@1QBa!cpKhr(M9x0HlkMf-V z0P^5Mh^6ODa&N4B$1NrikuqL5w0IsW+3J@8%I#4xOjUyhkyZ`cLl*te2LO4F<}_u; z!ayQ6jADfrdGo6s+h$K!3N)QM^!xaLW;wcv(j4EDGlo~f=xM6DrI|9OUuxH@GSqv% zV$;Sq30w^hmm2Z6@|JD%ISD0r0wae_EVgm?9#S-?nNgefKpYxn8@}o-ic--UrekEDB7*t7;&qKn#B|ZE zi`Ms0GWJlyFpRX-`9~?qm}sl>Rz%88=TlclNi0Fyz6tK&%&J*!?v~fXk%oD>E2KmWpQEPEnTspocqdTu{{W}lTDu&}xq~<2_=OfU z*ghzikksI?5Y}S|DMSHFoHy4_SnKDf zoY9^#{{W{bB9AFiVmY3yJ`XzK_`V@6dbEElTvR+Vx0frjvAvgQ)Q&7-+U<9Rx71Pu z<2?h2s|V1ee#}ab3NQKJ)o)R=2TyR^80EaNkMygmZkhHRAiYKMLB5c zmMJXEs-V~&HIZ*6t-Zlfc%@OKjgmfob*r3)<;G5gNakpOVf}WW3+qi6QE;040v!JU z3t=$HU4!9t_;e-cqK=+qG_H*clIZ7gTM~Bx%&y9i#m-cQ``MJ7WY1PRO=He+-nW=2 zl)y<4IuDg?2kGn9?pw{dOElr#N2#Nw$rO>e#H-e960W4q^rE6kjTT&_qc0pF0%c`p z9@~y%M)O-6E9Th>)K|g|KHa9LJ>R-<`(o^UylGd3`p@YW2Z3REHx;PGb6Kbp%ae1( zT{*_-|)2Mg*O z;@A|qx`t{`HkxW<3@iSX6!CeG{N*idWGXW&4clxT2*0&jUTs1F!0`C{DIsZ1V+LqW z&KRCQRnvCI@XUJ#tj49Pq@>C?gX1$%VO30K5Ih2GE$LlkVRa}ZU`>G@98!jx0a)Q3 z9K~G=POAUcE7;Z{T4ic!u*au~2rnHk%cRCaZ>`+x^l8qqCi!JTMh3Zaphf}-_Es6E zkNI^hX;6l%TCMQ`B}78jUCwM2oo#dBryF|knbV2)QgGpHrgr=)W(k<3pu^{uy`H95 zawa&uwBVcRWeVlFb>Bh+3~9FW1c0RD6UL=Qj$#7cU3lBB6p#X@n`zLt$W%!jlm z@gH#}db&L6!j~>Z3rRG19X%>J6$=#cEuq708L1lqkU~;R)rLs{{X~?Hm8h|p@k=K ztt!+Li1t)mUZVw;IytH2#6*<9Me_8K-)6yWq`DF;eS_=b2QEr@NP|v0bcK@}N4v`k zi$h5sBl=~btZ8aC?wYaY5*Ax1xe_P?PS)j@a8&>Z6{fTjNe5p?&!UtRcwF(y#;$d0 zrHn=zvO-8ecUgS0dw;Axu3ayH2u}mTsIZ}mwwmYX*3C&+X>pr*#fS8fmL*AZ>sOIOlear<7}Pyaqb^IGI41lh0pGDnm{-u%J$%k3Wq= z$`SPnpVhrjRWtQ& z-u&b(Wg+J5P?V)=1g8ndX&YmvvrDpU=JPPL3%w+`f}j*e(Zd`jHf^i^^mXfUr!w@{ z9iyn}{x^tboUb8F*Gr1xG)-MoQW;}(f;t5*l*%I|8PRsw?mWl+>$G_VB`HplFvsK9 zSqqdcDsD+LMlP>AdW+7K_(VMoG~GMLB=d0$$CY!{RtqK)H1NG`KYP`Tp414 z!9NZKRO6=w#Ex*nj(;kUtw5Ne8l$4Ud+P(_+`pWv=(1K@&EiAV)ysH|YE2?>S70YySQS~wz zFx>M`Ls>N)1yvLjFF{Z5GP6cRP4758_1BkMMaGhaM~5F;VcWkH4V3$nx*3o%##!n1 zL(BD0VVye2w3u|*R#m2EtTRDOj6Bw_g_q1$(C7#tYBlGdaIIxaGw`c-#xMqJmUi_e z>EordR^gTJgXPAiX>m-!M`&p(CTC88VU^gtnNJ+a*oGw~H}%4zEFCb~dip#XxeU?IR;=b9~9hwM8Lgw%Yk> z=EB~?&m}4we|>r8%Hv>(_g7R@;n7w+Mx`YxLFNNeX{uw=F)BnODhavV z6j%=qQ3e&Vy!wd12{o3wvJJ^CJzOaSX|}32q%6!^+zQT?KOT7NJ9Zk<)&P;hn^u`# zM)RHu7}%MRK~6xF+pxK~9}~@`1d6J$RIdt`62&&}mByBN;oiuAZC4(}({JVB%LNVs zn`CQI6DFhl-1LYhm1VA|pH=g!g2Jkz=Aj#_SEfx8tKx7@u`GheDMp8$Hz!&+{HbJyUFGXkrY;9c)Q(Y}fW5 z9ySRZai)9Hl^!IDLgf;8YsvJ(Wzn!+g9EP(DH#Q6r~C!FgWzqRL3EqG673bQ&AHuRQX7&BtBk&goRG= z@Fw;=^y7f4p?X0v`cs6swjniHsFw+;S9rx$MRi$EA(0mA?#94d^|v&QN!eaq#4M^0 zJSz{(E+CTP6;Dr0`$1KY&WNc{JE3!O-rI523~Bb=Bo#~(UClhm5G7<3H)VK4j%3>8 zj>0g^bT+Z)w3AGzs3tZVhf_gIS5Z*-hAB~qMu04{VTjQX*0=95)8Kia(J91DI$1)A z1j&uP6ql;P@c8N@jcZM~MUm~V__;?K*8ojWh?s&0yQ;Bal=RbA)k4)WQmPT;y++%z`yCM7 zN6~&--rgssnk|(W)Ke8UhY6VveoW8C`58h9wcP9kv-5 z4Lohf1NuyCCcLF;RGbY;#4D;QC}~onq|#HNymH1;GIbll0`~_`AK}j^4MI*dq`stq z1~I7cymgX=sKyp_TPZaTw~3Et7Hb_jR@9I%IMq9{qqho|QGyD{ja=BYkR(7s^Hhah z8%x}5xE~#O!S%DXy_GdSu;B3Bw8A&GV9a-05y4OMp?x%8gH7zB7=Jg6w`a9Bnj>V zskr7cmfQqmCMzbwl0l3;Dqd)=!LiI6GszuAb_*_4jb^VDIE_e`6r2Gwtf>Po{{Sre zs!y`rxujdH;Z{+pvL-~K`@i>cWAM%Axo(GB@ zV4!``Or_v>RvfJs1&`u`FH}+_d6t1AN$}N{x+!UqTY2*5NdmNY0@fP#JfLAm%AP}_ z3E5n877tUCF$!2^QeBwRBt{r)-Kxdh$}riiQzd zIOhqXd`hnqYS$%dWLgJ*F+AATL$v*(I}Y5)#9I`gMh2mDx+Oqi!-S0Ll9~YoRb+byz!4RDPB#}HT~kG z$hupGm=uvac2RmT7^avWrYcR_Y7Qu41tpdVC9pr?(TFR{1R*IZ+I|$ml3c(6BAW7) z_>L8rV5`D0DL!fjhOy2ZRhx2Dq{z0INqaJJn=(@51p-ttr3EM;7$AdNjrvB<9YX65 zRq;&dC?uoBB+EH#`LwuAJ{K+%Lx{h1s%}fExfZscm;p*wtSC^}KpSF^ zxE&8sF^U{Ynhe{C<-9ovpA5w*V8P>%R69DzyJU&fCeR5RejxW8Ww)Fs8gMj$9VV5% zI>fV%5r>?-D8ycSl_n?Hd(sgMw17q@sUBRtgQ{W6bYX^D0Rlx_aZro1Nk8q?Z zFr@q{F*cKSbVjaYc=IHq^HFwaDO7H@jNQ2m(6&gSjZbBjIagn zH+SM(jnt;(B|t%%S$@iv#DfG?div7H-Ab(K&q8{KQ(uc?I7;CumM6_Qwvvu|%6Zum zr68En6QKcumijJF2Roy3l&RK$gF6ABe)3yN5VMR?uKYJ;ugcvzp~P!54ltCt=jLWy zsVxNx;omn+b$6pOw(`wydg8}{jqbeFaA6=6@uS>gqS<$*y;;xwQmMmR zC1V+e8ig`o6hgR~S(z%r6cO%*cP2w3u7It9An=1V&R@7~2w9Y;hRQs$%GzzkDH~|q z=KAbgKJ-b_!Va zx(W_8uji}!?y={5k&`jZHx9+}#%lEKA;dE_8C!=&uYs3TbTsu&0?iVTN`+!9N4Js& zSX(J^hYa_dI_N2S78I4I4K-lW^(Uw>dSU68S75}Ac3GgH!K$(PjPr#**9B6}i@_k9~6YEtS(EJ5+e^8<6 zjv`kDGC~djAVN#XOo4H0*u8CnR7PgY^|O@mS=3ahh=`OuNP9TqXqX*NRJ2ho2H>ng@B|aUAVHH^Y zelLRMiu}JSQ{kBOK1|aJdrYKK)uQ>31hWS$x8QRA;VW?j05AZWHnz1^Ax91rC3=19 zud6)Q*1WNjdV|tzH!kH`X2j*I%~4U}6uan61dm9m4P{iqDPCJEvu@L{2VOUH!$~1X zGgfO+NebIS_gnC68z|D^`1UQ1=DKQv&{t!Nf#zwW@};MYF0_$^iPe_d)B$ig8E*^d zFBwdqIH8VK*(s3`)+&TLL5SxW{cPu~*VAg8M?cUk_`K^r8BK>()K)<}5W=Wi6b!Zl z-2sjL&gX`iewPXq4wVS$>!QrSM5O(s%~}UX@~>ZdPMMBQuJul%AIQ17y`sS~R!zZj za?xS11Zi5Lt~W_zNg)07TV1s~yKsIUZEk8vGk}RW&~?_xoNAx?FM)Lj)(@*{>hzX8 zUY&C_Pf#;XV#TodTAD06JVm95GC?Fqe3lzRFt^dkV>-+mwzKZf@dy+JwYPEW!k|e= z_Ey;O<9TL_5}~45sA^%wAd-(Cr;iAn$+hA%%F&}vMYmWp8}{(zR-1sGht7wZ4iif* zq+wW%UI~}-t|y4%6_|F^j+(avj)h(#Q|C!k02Eu>_2RZEbyR%?qf((uqBE+#gg$5mi^xPa?zGuVg%{0qi z(g`UT$l@!6+`s~E0qzB7HU9u7Lt;ejjy)WHR6^!nmA$C!qIdYU3W;(4jb>VWeBxDk zg9)#y`I)g4GQ}KK^2m-LWF|06SZ==VNgJ7Z6>Q`uVW7_P+Mxv`nZ~H^5^P%$p0Zl( zLO9mCGA9oumF0CCnb{Xo!Fvy31B07)mZs#KXcFr}dOE2&hvs}fEJi#N569S4Ia*KU z@k)g51MTg$;bExLi*2reI!4n)_YI_(P{M}OkJM0#Ra(49&YXx)MIr>0717v80X!3j zz)0w#<7f#c1qyvE$h}AFZgXu1A|h*a2cKbKnmc$#RBMZ7ix%HVRjsW()kpIU+rjP^fdXuZu@l9tjl`H5Z~a zt>qaagf`Y6YeR8vKW;)42~1;1Zd4SJ2l1g+lMty{VyB|UsIc^&c+zS)LTyWp8*=!t zJQpzOOL!NZfV$o~M=<1E<|4WOyZIb|r~VF92?*h3FNYMP< zBdRhqhBSQlvC8c z4ed^HMIUnk~F{R6@|6rah-2H-jWc~&}0Ck2Oq%D>o3Uc z%mAS$7@-@jxaUo4@{JEuI&EEm=31xQg=Wl&f#LX|#PHebr=6%8pUiO6C0tM+Fpu&~ zq#omfE!y7e#}(YVl3WDDfr#Vmnn#*$&7yTJ9u)EO;YCj`JzrAsDp>HiFg!?BV>CHR zlPs=cnu4vWE2LPXj4Yv<0=cs>IvqH_KHom+w;NH?pbTL}IezPdRtv1C3jEPx1_1CW{Qfy+7CD7mZvk!=0!p7QY@$T9Tp< zE+3Yj5j=6UVZtz0OJ80FFE@>w=`R>DDT9UwkDUpvGH=UZiR+^sfOU9eeDz(8;F&82 z$L9V`O+`_OtWsgwhOZcud2d1^r1O>Ghzo3X_kFx9cQav4rJ%E|B*@waMHnK{TD1}D zRnylzwT1eM&)F^TII`XsCVt29Gm7K5B~~OvP(xC%&b31pjmymmw&aT8Z*3F<$uA_| zmF4U#EU2hJgMkJx+4IJM`Tp)A^+GW8sCc(Uy&(ER&Xw37StzSI(Te9tUTl|z^zAVB zg@jW@<~qqEB6*@CBU#7*DbO2mzdzrbyOzH&%7Ow$B5^08l;yjy#j?UyB%Z7&&~#VR zr>A~^=TGy!(EKwM&GlLSD#>S<~4%BaM~l4^8ykwiokcMAKhbI9WJd{vvW z+RP_O5H=a

    ?)K-J*#}!`)Sh>yPm)gA~ejT_WfnO~X24lJLAzvYQgjb4Noq819R@ zsVM0w6fUzdCQEniz^-)q)Z?jHD^ZPykDpx$>~u7gtq>=xnW}I-c+MGyu4)Vis56PG zAVEDQBToe)$xADFb2T^u`-Scg+P8-acXM-EyuM{GpaH9F&OLO-ZJ#@9Z0SPefj)E# zQsi9Sgw@l@ht?cR1D|zGMms#KQBf>XNx8$`?YmCud7G-;mu;C!Sb~5sBd74JWf|Ky zE|sozn%Ps*c3H~(6Y16u>PlXj<{V2SV!2Io^J0-@II$dNfZ-UDVRcmWShXEZB{+_C zl*rLkLRh8lG`WKJjf?En#IjVKZK!}mV*^(aGlXHbrOnc1n^yIrPO&CV0MO%*v#vz0 z>g2sm^oi)J63-b995`dr=DfR%CL2+PV>Ru*Vw9NEJTe&-h^T^`F?P@q+kr=0{L4uy zgg6P5s6mml5i}?7poFIpw+gZ>x6@vgbyGZa{{S;)o|xy3nssWU2nPehDyO5vu!DiY zP$rrg=V+#iV3WCLf?2%XebHgU^6HX+lH7^fF@r`0Qj$p+RKBBnO3WE6DD^_GFXj0& zu1KhzI*XRFzEi5Fl7j=24tVhiO*IWn=;lhNip;TSE?6*Y0CgJ^kSYF8sousk~aGXI=b$u031zn*{9T`Y6ltjbk`b; zdkBh`%*62lHYHPGTP;2wCY^8Mrcl5On;ToV8Y?FRljti~4EH1Ex6Bmy?{(`ml9)Ix!rDf0x)nPyL#NFds=$>pGhq*W!<0k|sx z?Y+jA=G$%tsm5tyF3OaUkRV~vPqo>n0Q0vNovPw0(=-=l+AKDPnTWUS=9~%$+l6W8 zfY~6LO{QgOX+q&N^stFtg*>&jFsg10PTiF7gKKxc6RW&f8c6rnlHo#Rm?x&D;h3sR zD*pg7j|_LaR6|Pus3^Ns`I}FY0U&C9PAmS&5Wd5;ILeBLSl++SH zPS%2%)*%~$K@MRAkEGw4J7RIID(6h7^X{%Rxwi_VVdmDRBt75*Iy$<2katTe>GAxB zG)h1u?CGsrTmWKcpM^eA=B%b;5=kU+e7Rs`id6-7uG?&qb{)@fdF6ndYe~No2X9cq zpx5J=4k1lNJv`NvwT{XEp@_`&Fu<{g;0omy_AXL&u^zu|@PbZ)`*$PsQD z>2((67CscA3rvYtC)g@DjjJo_Ybf#RXsM|O`_&7{`ar{W;vd~>-%WY6w}B-3VM(0D zTYW`KS+GG2D~V!FRV)L{jp<9rBJW#^h`0N72b3sI)YHB4G|INqlo*yBPPHpYl z=9wy-D@fNC3T4^>JW!-Y8!6V`@R*6H&5q`TjccgVhvlgYMol=1V8ELN*#_bQcRXp< zmIs|{^9}(if$*B(F}&cR449=nGS7J)bfR#sJAgM^y}UTdw_cyRu5MOU3dSqgWp*u5 ziARamRev*mQZ2OesZcZp11FD<8)*d~$;PbGlM;|;WnDj@nAaQVz7Ljqk%`k{ZHQvo zhaTznT*Rqjnrdt{r)ouaZMQ)j(M=!@Z?&}V%n|Kxcx_I2=xF}Y00mAXZZSkkLO$?8ua}1d2bWMW5e>dRVZnw#U3$39SFRWBAxMta_RvZ zgYo2fae3d{q=ilg4vcC^jsyObHp)??6eyw#E4$ioB4tiWVqb5i;+Z=UY-9SzhOf_5;FLHf zC5Vc;8B=kpfYQdn>C>|?HV57AM;d))V*q1~X^Ul(H1Cq@u^i8nV9Xg08l=npHG^a{ zn5}Iz&x+#FGrLtx(UPxDLz1c(Uf|zLa_+W5K~ePT*-q+(!5GqdqoE#>V$Ab(j!c

    _7R!LE2nO0upStu0He02-_>K+t zcYW{jheEB^K;oa_K4X(n=JoDvmU6*O!X)rKN0H=Kp5CDPYt-I~^-DZ-w-m=H8lxju zz7sYfg_z??x>-WYO(`2(U88j*U)^i_Ih@<0ydyZUYVEc7Rxup}U`MCIx$C~WwR4r` z4qGlfCl4+Lfy!tzT&ALS%-8Z`*(`KBe92VzJJf;o+k-?|sHQbfx+o)|$k#nJ0U*WU ztc7avmJ>n*@ylIN{xs|fx$(8F$J=2^<4BS;DnZ0_)1b#_pbWLx%yg!pmGhb9l&f1q zc3u7SHa)v?>bOc(1lN+a1Br>K^5#q}VDn^b?eY%YX!q>K>>&FX~R$M}U?mQ{_n;Oe?a0;k# z6h;U1trR+~^wFdP!@~S=52WHG3R3H+Rwn{QK$@n7t0Q^=EYh+WstUs#DvE9aRa*<5 zd9@NYVN?nr;W+#}>(ys3&Z$oY973)r{q{s;Wp^OmgRar-zP!*6@^P91cfKqE^JP^(&b|JIzMRrjq9&;0>PeWwxG(cuSX0G^r9+t{Q&9>8 z_xCT2*b!@ee5#u&i7^#$41hLN=_zsg6`Rejk|^o!)iF+r%*lIgRNZ|!a@pW&g)WR0 zupVN&r;eIAcTZA$=tsF6k-H|#Tq)B_apHgm9Tj~tqtjWV%``E}?D#j7rqTKEZ46u= zwPx_t8}gm;#2hNAypjP<(M|RJM#D^!Bv>Rhvxf3dnD1#9#`^`k`0)w8SWo~{O3Xo5 z+dn$snev{-rYstc8JOS7mhW@g_hE8R92aU@L=0-`xgY|RiSojf6#21gqCS2XO9Zyw zWR4{4HwS&t=P9t>|@zU+hC>b-i&Zy*S z1RBXiE9xSt6b)NaicCnXs)VV~Xh-ny<=fa~ML5$6BWWk)SSqmGNd$3LW6OoDMa)pv zz=v&xv>;zz*Y@+#9A><2h%qn^RYxL<1}}%<_;p1RM+H1}GXT{RNJT<|z_GfpAa?D>mZYU!bla#f5;&j6vc)ReC~(>c zl6x6NN^L|pA856%57EY*Y@84(?K=SBvYR8O%t0EOcxfgwhKZy^^BF@MhTP0Ae%Iv# zPU?dc!kbV>0u6UlM^z8YQqj>PVl0YgrKc)a-DB??8=t?IafQSKg;`^0N?^r4<&3wS zb8ah1mTI^@=VW%QLQZ&Cr2O{ zg2_6@tbtYMWJQxu2XamM&290|p1!-T0+Y(sf|f9#iD{WcY>T*96R%<4fW^4%3?_uFp%dkRtaxr(qprad z2CU8%GFMSb^b22x)x|vCR^6LrM*wO&?pyslt-Fg+t+LnCa+hsEm}f z7==tF8aSsQEG-fT++R?(VsG1t3f=;i6gI89C?@Jps(zMGEp}hWx`&D3*`=w@V5y3)pe#wktyj0-?hJAXEd98sMHf6(sjPV3}7Q|YO(l){w`f}&bi|@^sAs< zO_e$lDdNKM8p&y5k+F)AW~q`Xq*Re<3o5KDWhF(;@5u*t_jzrlE;tW)wmLpk3C?qX z?4=)W%CD}KWS>!8SH|$FylPsE6CyiDQCW^+RK+29ByqS=skwOw`!>*ckU5eYpV^*g zm^_8tf`)K7;7IBfk6Yc^c2|`&zUg7d@fuSE1%G|>8)*2?>3^m$NM4eMHA}TxD50pY zqgi%21iKc93!7WNZcZrdw<#C~jF0cC4!Z55Ix?ZB9)#jO^@%+u^y3zhE0kl~eBiEccJra#$-JX3O1ljuJhlS%2LrMN&8U(WFgC#P8w zqYRFv9xa3$o#k(PcjcAY##E3_gE7;#lR4V6puoc)Wkf0F!42$XxDze?}hZ^D!9}Hw9tFi?!08|mD4QYn{lkMhWk8a43NGDGJw57n35w>dmx)6n z#Yab(@k&|-`FEZgsNwSGnO4M^77&DZYku5YZF&;DBvNPXijyQ^6inANWsIfN_rYdf3vNIXlOB{?`7(upz}10Chc;~V z`I#ti>gh3D`zBhb!>KXPJPOCFrGecI*1&m|EJeN?CbYb1Hvq%{c6CvTtyiPYtA`P+ z!)kpq^@l6pc_cq4ac{FyDP-EdW9s+ z;ps$oR+_DplfsLqpiZsfd2<=8={{|&uQYYl6*-?L;;QguM~mB!>d^K zBa3&yG*5byfr+Bs-Yfp+(Xy^%8m`P$7~~n(0Fx4>#bBfUt!CUNuuk+ig#tJ#{$H3? zPw5HXpfL8^k)xL>aoFfPXm;JQ;FzHYrX5(MgQ%mV`G$^6FBzs(tHA2&>L!vZ>Pez$ z5Gj{&F1uJ8we7`jX^lZsG?5dCJ7|b{uE|*!GW7SXIF3Ti*xWgT9Kb8EdGZ@#62!|@ z9EM2t(M+3afj3bj62U-FMXkf!yE1LJ+tmOAw}m3SwOK*qB8mS1qOP-2w?c_U{jV!(|JwBs4=sx9xO5CZWX6>ObL zTM;rvTBT)v65SNblo@Y7)KR7&Fw9>B#&F4Q)ey%dZb*h|Ibl>~(W7Jo!t2}xtnN~k zD8!LM7g5@_IblcbpZ{X^^Cb?LS}x$$cFBE)0DWsNBD>^B>N z3aF|=RYf4CsETqWNmp^OJ+4OrdA4YmSZO41z@5DW7}_ZBFR)8)tw}v*g*d%V)e4M- z)VxOz#IX7vo7Uo4KdD$vRvk%|H}}`L1tFs0xBA&!A*T8;iQ~8mz zZ60N+UWl63>+t->C8q06Y-~EJz6?2VCB*SqFuGas3YiMjLZHVSP`k8=<}(XwMeTbL z$5Z!2ai%U?)I<%PG;K3feJ)JZjP$xp-Hl-{)~c1Dss3kGU5Da9o}N(_Axe#gMNW+D zc2ptvitF5RPsy$n8cVuFqstl!W(_Pjq>YNBzv#!&=2i5uMbtNH>a1HgW+tP=3>9Qf zkWxUbVpwN|6=W*T!L4F^RB_zrH#a3ETm}@*S(gS$17V{F)_muI^mn5fkEmJCI8@-& z7)YqZuxf}rs4L~Ioj{I`11PuYbp$qpR?rLa9g`)^f@yWi^q(q1bSKsZ3DpjZRpQxl zY|Dne8BXxwb-hHxrH$mOkNwSRWK>BMA9Psj@#Vu{cuv7gHdJYSnRAu_is0FQJLcT2 zTGV-e5{imS%32z_k18tV5=8Y7#--s=ziY7;Q-2O8P%H3=F^wripiM!@c&1-k{M(y9 z&OIi6$hGpx3|VrU4U!44NJG6jK_Zf6Q6kHO? zCzqwiuw27dm|4p>dt-Tv5Wwd1FvP{6MO2Cz6G;OrsW$DPI)ljbG|n!~!);ryQBlS^ zI%xLZ&6F}+bP*I{`UOFUQ3a5*p)sVxg~CHV1sT_Hl*X%J18~0Oh6M84Pk~`oan^|@YQ|Ng!>MkMGuS9_ zS6d$vI5>9UO|~ax1KYMqS<-N%mrAlcSROA~iDH}9uMh@!+E!e)n(wIW9o@1t|* zYpt0hp`N7j_fj*yIxvi}m9WfXFx10PMD1NXcw~`7M<1TAW~g>ly4Z{N0{jVYrIM~~ zP=swNNj`>-@&~(z^{Qk8Ro&LUp=JEnThuIT3>0(&9j>az73H9WNRw1b(xh<6%FGfg z?F1X~((;*EN_}Z%3?qy zEfom`LQMzUQi;Jw&-A569fnd5DjY)zrfPUx!yYA1Jh09q)suR5W=_}aM-N!gRRuFe zFBN!*Q6{_VmR#ztUdi1Nr=K{kamsx}!zwe@TEMVcm!51sl_zK(8njyoobBAh#B0DV z@iTF77p8RLBdk!XB~uMGGbVAVxeZ{(aKE0OuvXGWgA0NdXO37@t040N8|~KJ`HFY9 zYP7jZVhU6bzZ#X0$bGVKh~{V(%6O(Vk@|r}1$Pc|dw7l=mTfZHnTZrefe z=3-?R3U9k~r07rw(kc?o8cNczAgCHVeFy5FPq1#Mtv1IST2Vzvm*U1RkB0M*=u4o}3r zLgY-po-w@jS(mc5d!oVV@JT0+4x$Ym3{OTGqK({v6U}J-`&?SwiEJ}rZF8(Pvf5IR zWXu(9$FGc3n*F+6XraOarbZP_*+->xna4ikSz$43FC*rOF^Mq)U6~%22uKzQiyEJp zs4M0;3%n{8Bx(l+xxQtVh6}dcCQo>#&9%>>SA?^AoXuG#gsbtisosls^QDE8KOFhiC(`D7U zje+skg}H7`mEAIcogqOp5e7Q?seWOY-!STol9Shk9KS`8dd|uig?1g5{X){^iSdhL z`9_zhm{j<^J}psGSR$xLsE$XT8381M8G$-!!Y$?8oVL-@`2|H>Q-l%gMwuq^+}x0W zB06}|ht|e?^)b*Mxz*tvKkDRE^JN{i%|T0mz$hp&ylRpVY2o~5qC>Z_&1D_?YD1y(pv!+=iVtnp~1oRt-0+ z?@9S~Je!w#bJa{{XfX)rk&>|cGAJl_S#5jsO3VUD z0Ae^#!miz-)OKM@{+o4Aqxrs`n-j{}fLG=0PJ*)+!g0FV_X$x=Rprx39KvS1vUz~* zZo>RaZJaQ?L64MG)oS1vSO3u|D;+%?bTv3tGU8a0lT_7HPc%;(eV_=alG{n{K-byM zuons;D8m}%I-^m-Bch^C+5SnJ5U0awzHVnQ&nM0+qu5;jjTC#s*{`>Ze%UF*NSdQM zuqFzHVQF(dAkoE%MTXGK!HJs`h6RzMKvbsUMX!5+KqlPNK(Fsm9W#LMD3@tcyd@3z+;q~yzPGE@^7}i9m5>2>nYg$DJ z7RSH`<7){Z>Vq`ev=m?!!y=U_AIzqTrL7HK9SkWU7uR}7^_m( zjup0X0%^vGl^7y83U8*RqDHDp8ajBxzm#cYU5_J{3#`v_3BQl9a;4Og3}IJp+!YmI zBd4aH<78Re&!>i#4=__TG&M-f_8radd!GTPDwUw(qvcvsW1t0X#<)X;P}SE-OmRA# zK9-mP4G)^LLO}aPg7*aLZZr~|r}tHD+d`a;iTPJwDNM7}LUSEmMPK`m%fP6u9m@NJ zgKGoL6a(ffUCa~Wj&|01e7f}!Ok%3VKha-%OH%RqPQZ8DYY+#wjV|3VN!T&O(uR;i zCsq?j@6-PPP5O6<<*M27oZl81m9rCNdd!!P;rPZeM^8zMVg#loPl+xYJZi5Tlx3h$ zY<$Aj)N=y&(!$y7&Em9$I9yOTZ2%28_3^Hf?jTjBS8m&6glPnP>gBRWrwO`wm@quU zkv&ajY$qgQ@X_Fs6)M8!HFDXcD@a6Rhm}YO zyit;@Pr|TuzZUf0kmz&vdd!)dG(43QwfTCfx;dG%EEU*l)jLTv^l?jF1Vy1Ocav)c zI@rB}b9vO8)>fpaftA0bK<3A?e2?=DaH~WExJU=lptHuYtpc;#s<|V>nhD zhy3`UX)7V9oi^fgfh4jxavRJ9_6nwL+9@hpLtz6}KFx!woG*8fnZu1;BzfJT$+6=Y zei=grP8^R}f?#vcEKvNYQ7B58DE?x}atwh(s5;o&jJQa{X{3ojDvgzw2*Bi`r#XWM zrAc!>WP~;)izk~?K_qHOODp+S+j4!_!1{SpZa^b}#MPGhnCPI&Jo(Y=x0tK?JxQ9Y zB_PfoOtRZ&YUbX77;XQvVVjTSq}_DEab zgvhyDjgmg(oP4@zcXl;V{PgYAEHkSa5}H58PLWM%JrHLZT$-aFrH+Cip^ z#k1GbP^;^Viy{hHrg>xy^5PG9Qp76~#=zUSS)Z36!XpngGE;?aeLcrZpu$%;5p^%1In37 z9MyuLR2s0;7tLK!Ko{EM>F-I8I`-Y!U=G#mhDW$GQA5MzP{NE9zcu^`KfY5TAQaWmg zxl#d`flyNunrez>qE?Vd6c{BTz));1%q}(M(=&xK6r~(ZJk;Z|K~LmjI23TYfS!D9 zx$dJ&ixKy7id;#hYPc#MAxY}Gn5SsgDoIN+{)v3Zce{cMY2}Roy|v>@8vG!oXe5=lHICNz1&Oidokpn> zThge+#8f&uOj=5GS{%Z%m)ej<>o_9%+QF3l=a)-3KsBbqlQ_upsc&1DD8JDm#uDYm zlKGZYDpZvOv9P|r@a5tpRRB;-PBT(*2F4nOo>4UyvWh&~SQ1^Lg<>M$+^yn9`Yqe&;{gPpxYJ?fB|{y%E4qwBIht5w zrfNtq`9K~O#o#jgVyA%|tj}o!O zb#xzW^;TqkHSW8JY0KAe3c(T%Bd29tRs~6iV0pU{%hXv*8yJl$%ULA`IbDa!xQ!_y zD9SjK-H&dB5pYF~t;|t&;b+8C)mc`Ck~R^Cy1S#zxpM)C*oIL(ir74p-g>$J)2Q*XF=FFXuvD_mY!rCeD zn1nQxwHV)$+u21t?yf@(31wf09BU9nN1ZnA1~qY~e3e7s-qf% z56dk?^%Bs}Ge{2Ali!&YfRGXm#)F=51ONszRtnUuuqZh71D~)wt4pYNv!ugp+7&Lu^zkr2bWR!zzP)s6KB+0BHjVAv^^)~!-%DaG?bpDpQ{ zN_iy{tduf^fwvD~_>UfJ6(pQ^R`s8>ZJKBPcTgFrQiXnLU80VZm61RLLd8J(xk972 zjAL3a60;-7(`FTjRZ{iu(?wlHBtn-B%{Xm!L=n;#dg|W(d zX(6U~=2FtESCq$o%Wsb`^LQ>2{ewwH8Vnx3y3G8h(Z;Tnweb~lP+(nF6$EaeNXA3pLjmm^11j8+g$Ya3GeXWCT~WH1EaSMWP-bPPo_b0q zqccf0K-SPKt~WY^xT(~S9(gP$WmLF!#uWQamT}s8X{4{kYH=!BqP0=8P^3~Up_J_X z8}znL&vj54g!R!37oEds?G%Pdpr@5wIWHs9Q_1P*By ziPt9pI;+aLBt;RL8hCv^*TsU?^yd`B^6fq)N0J)5C*ZX8vP(r<7U<=udWkNyzVfW-pOfzl-FDV2m$ z+Cd8oxLEgCcjNhet4asnDVN+jL>MB6Oq(7xQ$^FnJZGnw%O=uEPoJg|^O$BsR z7@a*dlYF|9^I*q1EX-ZiO|&Wjw+v9pN>a3#)9$3sFIJ?ThNI~a~41ugeZN8V} zad(uK9Vk}Ox8?4kt2u#n5-6kffMR)b)NGMKi2ndnXfTLyoNF%i2NJ{aYT6nUID&ak zR}^Hj3*IMfy8=Gq+nTsPcLjW~=kB9&dd+E07K2^D63kuzRi&0R64py}>p!m|}77bt?hh+~mewIptV zqp*yU5thIUTo6IjZ~=dotGp#Zpz#KTcDJCQ01XfLPAOA{=E&%A39$URL@H_Kuf-K6 zt})!U-yuRB(le^xb$}NqjoanWH64_(*W>}lgTGatpfIen(@vq()nh`h5~9Lu;KQl$ z3^t}1>MA6rK3q};P`6@z(cpLkzt#D)pv+Ak*s^<~sM%B@_mXtIUkAb~-P z>+l^3ia(T{Q0bNvS&p_jQ(uYZtbtDofq*o-w8?nvsLQJ!&_Sr$kDrdL%md3RCV3z6f? zer+gLl&eYOUM!tBf_*5);hi1nRz$&QGe$=1w8=x5o=Utg0>JA&V-2m_s|cy;H>#*2 z*MupzbHgN|1s^(0y;F)}ilWo9F-8-zx8H*O_-F+54S&mlc9+ctliU_Gvky^29 zV5)b9WG=Q~(UO+;Y;St0hSb`nGqO5C>Z`0QfzdQFbnmGBJi_y46;Xm^$+K+P%BH51 zH)iSSp{Lkd4>2kvrIjwrAZxJ(>)P7zwHAIURwNow^QuT7VbChCKNAkRRbw4x!F@O6 z9Pch#!LXb&$<26PQb?d$`Pu$r3Tfp<-xyUEwfG$0yVTiRfQ0SZPe+w3$eL5G>QSoG z&dHbhv5R!qq7&uXim0*L${YtfR=rA8`E@eRNqL3QqZTRwEJ3#;4av5+Mc;ar1CAUd zb#&40^ST^oKs+k`dSypEIkTpHKg(E4&6P3?b@*l-i{r(LLfB;-bfy+YDr9ythltpz z3T$s~Pwuo!XBXv=uLRYOTN2phxE zZH;3OZCEf3!H(x$GKF_5z??z+XtzGMTc|2VBv!n0Wg0%B<_Zj36~ig%sj#+=vkahl zSPv=@ydesYy5!vWX~|VB>u(W(JjF(eS!}{d>#mIxQe(K4Tomb38WmV6>DujS4#qLF zf-hs(@q0F)H7Vh=QfC$d_RRFsx_U~fAeN$rIQ+5|V@HwR>PF)k*&lyk;jN=9bw(fq zNS}cK?GyuNy#BdU7R2hS@V}Qd(Ze1XlENyMC74;x!?MN53a3yH6J|FHWjO7pZP14m z;XE;78y!yAZ6x=K6a)cvs(r!6bT8ge0V3a5S2RIcqU| zI$Ta<^66ZnP{Ly&8ats~=rvV)Pl{1g#hY;nuzWNoCqsx~ z5s4NFUQa(@AeCkW_9PAqZt_;0P$@DaWg%lR&>?BYjAl2|&Z_6^E>5D- zSiWrmHtIjn))mBZGmOASKW zIcetiq#GA&5H2nchY?-nuej0f+62T;95_>tUfT!Yt_QZPv!i~qdUetMs%KnUs!H5L zAYxf_8Nx6M9Ab?cswSE?Wsl{Dyo$&7hO69b#5=)m^VZCOh*%19TH>a3ZrzMBrIIgOyCtE7sqt)i!>rffl~ed!~KouXwYa0+|}BgAI; z&9e#`6cMcKIDyb8UgLRt%Sj{^5wg4LZ?EpT;T<`}GWHGEo}K3Wua9B}BjrrL)2t3P zq{5)1A2Ag`lql9U8$%sQ@4_c?_Z)nb2LJ*|Cv5EZ?W3!ErWtEVh~Og%6umwA-Re#g z=_0cX>JD9^!!SzBT8|3D<<3~OH6;aY9xGArM@lJz7^uu<&JXWjgiKmLmESE5EUOJs zi6H(ot-Hir+xLqaV-Gq&WFJu78tLz-ey`vjo$_^OQn1{kO^If%oIW8bcwiM&@isG0 zhlyA$X-_Hzjh9Il=Uq4|-c?@~?VeO6F2GQYiW>>@qWrB^WovCSsV8jh>Y(4%r&=&> zxAZqWV;RRL)=P`zihM5;>CP{K%Y};U8mklE7xMg0riPv=B!hjVj4|#k*S`f@gP%IP zE=T1SwHQxF8LG3(byz3z>88Y>q@%8Amx@`O>o;|PzYieFg6&!+Lr6Ki^iAUsY!7CppXP~M;e_s2RvF@ zQZxXegw@KPm3n^k;nM8YhkBRlR%^|Vg5p!lmGgF9#cOaRz;KG$RViAMlMHy#9-O+m z&J%L5xFBi3mA>g%ht#$j3P}?O9h-5XkN(0@jA)8;(i)UQ%l*HOBA z)T#*cH8odNvs>ZN8K$%gP_aoOihw|@4G zAmLTP)=Gasy&20gR%KqMW{!r>(v06vhT>IOe-g!ODDf;knJ2HNIA$wZR~edGSsCFb zF&h95r%oN48)t*fINDdKG9w5lX39+K&L$L-5NrR^DDu!kwaV1c)6`QUT_$>>7B=tN z-MoG4bF2(LqM{-zmA5JhAyX%>-BK|e7YN3wj8n~6!ju|nmV&kzRitoRLAW1f@8QJP zR>*LjLvLMaZy{*Zk&n)i)LA0}iROYjms-sOQO_*%9W^WjL4nh;9BrEhQjH15yxfDU zXyQ1{E27HZBNP$Km!;VbJwujN@vklTCS*ZMp0l_dopH2xIYnqCP`G4rSb3?7aeRD!lz`6+H%93BQ+0&ECr zMVjZsw;Ec}AjTBU&?p8(`C%P&sHVgyYLHadO;Zet!XX|iiyK6#?wgo^Eqiw2p7juw z968xf$qLlDsWVkiUauBj!VV!EXe5cM46zd_Hhn<{%k67>IGMz+NihPp>F+pU(M=Ti zazM2>)mqR{Y_&A>&eam55vgYL8OQ^$w-_$Agq2NICjmNZCz=w*gp=pE}0| zEtaaKmWrzkV*?|@7AUU~V9I@{04-}@noYw?!Z9^eDI^{dv#Om<4oO;CDtY(L)jPgc zo?%OH>8P^CG`;k=@86F-a)JPf!nV^4m;|03Sn%f>Y<#OjSBFnE9W;1rQ#n%QpoT^z zukRKtw($299yGSmq#cH~0;Nhd$Yr#KAyUy)M+{%+)$rj`(v>@`OAumd%GO$O4Fh}2I$l@6yZ$qY$TPnJv&Or@z-rAx~C+g-X?{rqJ}3){3+9B2b6NyK^6 zY_iXWy)`uPt{En^De?&HHmKSG_eimGCBCiy28LCz-(=~XD z%JRcpZ3f;Ow7-5hPa1iZ6o?}e#+&i)m}(qenvW2#ud1wo&?Ox-%}p1bIuwXTq84U- z$M<#iUxDr7bq(6DRLZa(v?bkatlnk~w2VL?o`@LZMxWC*OzVDJ!FrL?%)QkbE|X)O zBf{`JcPDhuAHm?2vrZ{N1Gra%65S+~&lcO276WNE@61cy9G=qINV!tWn$$+$v>ZKy ziLCv|a~p?Jrohvr3_5(eD*U=R*w zu;^u%=3+=+X4DE=z!Gn0yEL+Gr`L%iQ3K1QO-MG6ofz?>6_>hUm@_6bS&H=wFPzEK zxUl++2P0){FAAZKsi3V}Rf_2`NvWoGs5cIc1a|GX_uxrPq!L&6ej6y=FMt9^9BElc zFQr_u)-1Eq$H*9dThFe?lwnW@2vO0&6+l1`n9{KrXuW z<15TtLIS!((@SZ?C!(YD_tl0*>&8&RbB<2IG0G}2S64@d<@$k71L6?SR?CN(0j7B( zYHEgvybc=9q!4xDyM~t88i53WW1>XC>#s9zK-&|>i0`ZZi+XO)eRRqh&#FCc$GUru zW?U_@op(#{LeOt@^5mmR_JsMdK@%^VD%;<>l5N~6`Kv2?r4l7(44s3{j_n(LO`RhF z@A9FVE}VL6>OO6oJLK%rF^u1XVR6*vT+@Xqrg>w*lu_)XX;pl^jUZ+#+=009D7EX=Y>Ri`ztui~DWIRO)6ag{2WX!GTyK#jpViz`Nt9TW=m(*r>U;yJ9u) zJ^6Ipp#13}aS8$fn$1;>Wvn_uRRjFRK~2#|BR}ZL@9y5$9HW;=NsgLxy+q1RG|!7> zI*N#Y(dKC554jr|0@ zFjZNis+Qb3icQmS^XtFRDuxAeSX1m6#D?<>jVvzSDi36(<)*#P#wvZRb(HcsY*JD zWsc#lE(^K$k4XW3wGrdtw8WJ zss8}kkU_$mK64hNo|-{&&c17T?9(U!SZ+nO+kGR9E4`+RidNVie~nCBN+y!A6HKj5 zhsjSBD?D*TV-)va3vSfkmu$~fQ7yq3Y^;+_OHV7Lv=YfYq%2h~;(sh^!*E;sJUMtE z;3?ku8zKij%7s%ilocvOz&_~UfCBenw@|wC&Y(n!Po(Rarf4d_H46-th-=s^v0e~h(t@y*l@bTSo(&Eo1X6hu1Xva|Zs-WY;P>beFiua6Wp0Y{>kD^`(*8bsoKFUvH!UQ7W!MQ0ov63cK? z;WNvEVv9kERoBNF%M(@kO2y25^yeb_W2MO0 zhg>miTOP)7STiOkTbQw7QHdlmM&*&bkQG%3I}t$lh`$6|rqWH&AYcbx+92K=wFZca z(`!3pnPV{3W4T)>;FII{j&-i36cXaI9rS_;h(wI(tKZ)|mNLw-+uNizoutHh)Mc1v zsIy^iF9f6$hm~4iPbhM~OLArflHkMXOVCmzc*Y+~PAW&ux(28#BW)zKm)8A&@aA>p zHtktkp@fJC96j~VyyG@<{^{4&XIiHmNE`=MC}OC=^AzfsZe2`1ra%>^N@{YUWB0c@ zvXFasCgY23lnQ|YJm|4XE_h=Rwv?3c%}q!lFHKiX9?vnBD5a4!up3Gr%a7_Iaa0LF zoO~nWS>vFdXpCv)r+H(fh5V@4GmD+2bRzwQugbWPBy`iwqf&Tu^ixmr<&L7D{{Sb7 zU9IE^9$JWuM%O!gZY1{Cno&@u$XZIr^tfX*n!stYn|hb@+m*4&Cc{>{8;xq}aA_(Q zZzfVDj6URG#JUFeK|UOTJ7~;P+=P#~B-Tm&KV{BoT8exnL)Hatt(L1YPAQXeljaV( z)a9H*F3C}eVRbarG*eU86}c zhg&4oloSN%1J0LNPHM}|nO2G}r{kY5nCa!AOuLviK@>FzqVq*B%ce`AGsYq*LcPmb2 zkys~~)+BAv>BB*4P(h9u(R!9n-GYSKa`#IyjF;4|m)GW;Qw*0Jq=yfuz$jy>RiwqL z7`sUnkr!Ea$Oo7d*b9T-iPLltprV`wC=4n(c+i8A#!12QmK5~k8mPnQX`nT^cMMvO zBMypX9%(>So$yLsRH?X7G}DP?DtD~=>&Qt7r50hvG9Fc}z_EIMvgVo$A1lt-^;6=D zJwf?7DR#p(*X)7}rVm$5J`}P*5k!C2mJ89mKd7A)>!e+AugWx- z^(|uM{;FlEQRrgDw)wEtR;)w(fo)uNZyh*)kX)6pw)B-IW;ps$Ze+Pwg~a(ni@LVL0v5?prgcL z!~kg)Vg3yXwYa&LEK; zOMyvL8a(b&r2Vv1YHKq$M{=fU!?KQXhcxuFFtsixO9pnT!fWEgBy?IyH_1GAcot|# z_qP?$_a}!o&f7lzpmd(DCXw6+P>3mzO$cUQwB^dfojQ9*)2#Q?Y~@3NgE%WacrfUs&oAVb%FkCOw=TkDSF;k3;zSJo^az`RXQ&U6b!#a6!G{R&> zWhz%`P&sYmLXxnDloWrZ^QJPTB}p0t5yRtF+tU84^w%t5cy4&={{T>_GA%x6qBIRp znQ3tP8d!y7i4zU;N9P7Y*xB5&*`NId{@8r;2U~JmY;LrWn-~CtEUo zCS1rhFws#}5rU8E{Y_g0iU+JXmeL>-&qWrl;LqDYUPI}JR5H(1 zGuC3H^*5FAfp^8S@>OAR%4yQBHJ(p3V8ldH5>33s3c6g4c+J`vQpSlLHFg%bm29VO zzIAh_xo0LtQ_;!EK_0?I6F=x#D(xebWPQO@CsP|AC z1>{B;e>T4TtCHG84ibH6`K#13Y5+{CcF>*D&qN(hz_Y$w#W8&6N0T!KIJGr(bvVr= z@nf%tK`)bPj6d|hFDVarUD&HFh--oX&oAAix1>w~!*9NmUgjZSfB`jR9U{q@m!_F} zG-phU)p>FJPbx=IShz-df$ArEdKI2Fj7C^W9!>43wttbo zY6K1y66SV}HsMJJ8U*m{4=_Q9VpSFOSRMmMRU$?fp$VBLn{b53eHeyY+lJYBNGjL` z5@rp`z)&UyF;?MSP=ys|I4PjPi$hgCSlDW(VXT{1_mpteq&Sp~Bt;?A+m6m6fSSCn z^||UPu^NQLF$iEv;^_&Vik?&MDGK7-Kpq#dI`Z$@mo%Ew4}=u}Vv5Rr%!FEErOH;%P63VTn;a zKk^*Gf@XPeg?gHGt*@CQs3nr@{{S+}bh41b_x4+ZY`w(y1trK3Vs_Aa?U-0P4-ef# z7EJW1)c&GWj8=Z1ViLuO)x`+Ka7;0(@bYC~On)#VW(c77EKRsT{&8r_M7ru!Q`RO2 zRT(pu-9GPuB_q@UNA|z)m+MtMCL2dDO{u6_ilrpOamsWRcy(u+2&!Sen|rD9hOEi} zBWc%!Pw&26`B_LFF04V}G$6Cx8-n&KKH43y&%>IdseJA(^MuCh~rh26?iCC5A<;y8PM8W-E{;nAe1i0h!gsGo;j zKSMGGBZcSAg=2kJ&M?DQj^_O39w}Xi<$N0t^9*zo(Z?c4lhoS8D+_oL@UjMlsfZ3bHJFmedvEJ9??V)9*(8G}QFtsZ`muYjVDN!!X=(OcJ*VqQ=&)8Wy91 zN>g1taY;uAsaGCafTGH5!^FcfRmr?*3P@W7j4<^hr=n=9DQ%*KoI<*4psKPCJ(jV` zjJ|Vx0GmK$zx5avZ?++hWxP6BRJ?SAyi-)0uw$qS3HbPOV#zly)AI>=0BSfuz=6ka z3XL6#-t zdDP+9D{d;c0E5hQ-^;SMgJHENxhH6m8zy~fUEH)GC=L|@RaSG>pG+M~%NaKzzelB} z>xcDLqAVLfbo(A3IpyH8sZX4at1?YT=Lp;j#_}=Sdkc<6D$ATzviqkrx(ugio{<0^ z6(_ND*ghnkG@9wB;eXI<8!}?_xVK8<>y8JSNAt0SRpa#$;rJKKJ4-zV4+Ogm8zeqv z!@ItXamI_6rD$^jgb+BMzPi%fY84n)eRcdSeH3K=m3338nZF@o*eztQMVK)RCl1A? z#;Gb+f}#lHrP*F0WtHJpO%LxI9XKbny18{)Qjio9K#V-5kd}~1;w%5tC5?;41W>>$ zVMr-R6!Xa?RI(67_MaqxM}faOu)BzWRfm?^yU zBp9RAlMJ|QHB{v3gintQ@X+C8+BzXf0sJwLv?ti`cPwuG$` z;H-u^V3h}842Iuo#Im0fTVID5A!liW?Wdl9SrGtF6CTWIF^J;1gh}O9X6l(Cl}s^F z;q=fd*$u#18T`Nv?QvuD@TF)WULs7NDokz{Bn4JTuu8e(ny!LaDC%7{M-i<3D7g27 zERI3;ojA7Pl!SP8)4j;y6SVdFYb4*#%y7qyXE<=ji1{T1Xn=q$K`6tw&@aZzX&9F$ z-Cl4gg-3)(okYW^u*$iml5D{?V1kuDMAWm)6;GL=_hc(;_Xgb4O}0dweQJ%Qq-;0{ z*-lYVWieeG6}9yA^)pB2!A^j#;75BWngb7~irVf$g$ij!2$49N#Z5ue>L1g`Uo?*x zY2l)nkOiA<)F2~scWa(77RzeY5jCcnX;7GgeD?M9&2y@UDLThpg|%HglfKrarI8&Y zHnTW)OWXsf9k|k#kq|oYtA>G5Bw`N1`1I48<5^OyRnVp^(bLOl;9AYTX))@3<-XES zZ8-1d8dUN5SC*Fb5JBp$boDuV=A)K6n&AXjlp`GrSPOvrLksBEzb==M6

    WKBMQeKu{K6*%|r*N@( z@Td$oY6B}Nx4`jISPDUfK^!WpSqhkwT;-n&j-gmn3aX_5evn;&w{x|4heCox9kp)w z1FMOszdM5(nVMLnsgMG)6%vI~J_~DGVTolxiRmF<4D1!x@a)DM?+jDJ^hC!PD;L$tl^5RwU`1 zE24!Jf?Upo39vWg$Rr6g;uHXY zYDBA1O0q{2v`Xt{X-NrfG$4!peYo0I6bOp8soS!Y4kQKSoRF));ARw8&>6qM*EN(#XJWv=V z7^<#qfK~=g1v&3Dw6(1@G@+=%mTGtbRV=-TbE((9yfd|Es6oP7Vw8$(l%)YJ;R5$Ng9=;E*QyD&fD1Q2+`Db%R|#>%tn!Zw;(;+-wdn5J08 zFpQs@1vWP?lRMxNR)}V*r(u1f?hU&VJZ-@}rcZXHC^mg2iLP(k1Zqsjuu&|{I96fE zS*C0!FigM31 zI`J1AO*;zOcXxd&fwJvdd+0+ogyRVF#$P~ff2^&d^U@r43Ril`wboSLGFN_bV`-0BL-ZEpRn zMfsvu40P2}B;YU=1XDEiOA5%8<7-NVyneqDd9nt|aR}H*z}87H=2~-90!t$mN@(M_ zEJxb4^e5ZOlO{d&wCMnfXQ(DwL0J5~Lb&i#?78KN`b}skY1v$;OH)@s>2_!&S0XcK zJ6sWK_?|w~f_$mfG@>T0GLfXka^?gW6qDstA*cibtr%dAdu|QE?b^G>+d8p|4O;{w zQHjQ!vYr)JK`lhsjbw8!CH_Ka>`KTEsO#VB;4?ECRuof0%LJuJI>PZ8aoOTB4p9 zs}kE}+HI=~5vL2oVDZM473qRT6vO>jqlT$!aN5c6uPm1NYJ|8iyLIpEJ-wVk`BEL4 zTJ6@B$3)Yg^Bi)Dq9v$=Vs!NGckNQfNQv+V#CUOHnpBFY4Ul7Lqetn}tubT_TR!D+ zjMC8b_p4YIDVB4l8JVJasAI2MSssRRpn{IF*;YaJcWb93PSZC*H~r1PNiMRWM1Mr> z6gS+A_}Mz7Y)gR8vWgxbPgR3r$yGqmLphE?R9%R;5XWL{nCE%NYI}HVZax`HIXd180VVs8~T?nS$&L zUojoMotcj~$yX(#bs|7-Ge+sLUd+ zSNBwgKvKuVFuy#_#3xW5wA+@0?VoQ7aT#YR;uvjJei;MdnAJl`h)XUBG>Y+(i)i+n z-pzj6ceomHV{J5~l8hWU(sjJ@TpEmyu}Z8T9L!i0lT>0~@52LCjcK67!#fpK{VvHN z2F|NMP;A7@a#O&Eas!1P!^ng$Zkmdk+o~c$geSd+3>Vsg%YYHUHMvmPOAM zYSf!e6cRc_%Ca;0{2#R_-h@^tGz>GZ= zAda5R;L<`=pQ!OO*`R1?Ow9iPM+x(%t*nr+bdW#J@ewmoX=cn{AhqyfQ(@ZqDEND9 zkTt!~rgwHxe)i+3argijCmaXkPJYF+-YEcth{VsfBC}OsImZ=+X0M;kA$;%m;W~vO zhTvS0p**9^-a#rDo|8!}-m{Ps_JcS{_D>25XUkQ;JvC+{Q4CHTB}tIaQy~ERT(Be9 z0Ji~*WC}~=ZYycyDn9xeyPfxphTCmM0OAro+-YNm;T2R9iC&gDyz4zWOAIottzb-G z3uIh`mDJ3NWRe&)x7<%nXMx?1&Vn(CvH02c)D$P|@B-1R=ev*^rFsRlS41MAQ z9WCs?Jj696Dl&Q#@TS(5-2f>b5z@2oHH|QwOBSs(*ls&LJpS!L`Knnd-$CHYe6<$v z0C77^JnFR}(j%*HzA1K_b_NV2$iQlyo|0#z)=IvZ)mE&M<~5SC29QNPOd3v=G=+y(wk+lL80Bhg3Z>9$Y_P6oN98`BmhV#E`6+@z9C ziPKNJgx9u-NrEG)j4f74o(8)sR~FZdMvM=>=-}T=>N|}%T2S0+GJ3W}ESDq{rUf`a z^2osrNLORIhP#{S(*FRL3zo?PWR99x!6I?TqMj01>Ig-M#sCac%vHG_BppW>dfjOU z3UQ^SYET<#+*8DnNiNvYZFcfr?Amr5EkHV^HqwkrNZI(*c)*(Gsz!w_s~QtvO|C6( zj}~`n7{t?P3Y{v#DN#vHDxJ)}a{wP~LtqygF+UzA?+|tfHMW{wCxvr-K~_gvnqwP{ zrq3U_r?I?!oT{|#;Z;l&0CoddZ72Xh z71#3FLo~6@NH&kkYZXU!T}gE+2X7A{PH81A2#KYv+R!)YrAB2a zej5+*91>&+yHnlr5PFC)9dt*WIc^&b7XdtHqoSq7jNxk~T=;e*re!J?c;}uj_O+1i zTmJxvZ9I@SMYkBaQXX;whuu9pK^#Q|v(MVv%V`9U;Uac)9iytDoNE}15<^j$DWfub zsUe-MV`H%FR2KLi3o{FAi))m*V6deqK?7&Lg1K|axL~5~1U4}pH2(k&X3pO|BxlH| zilMiqH@hiyQD9?e2qWmfCTx-0)|nmX8jdF&o>5V5V4GE}_emijz{78xk9|A;05``e z;Et;nta#qJ{{TXhnbnGRDi_Ic#CUGuiOvW*7Ico2jQgm)Svhi5Gq4aak9~Tod@m28 zdFwG@J|PkFDQT>sg13Fa&?vS0fOsII4|jxc=MYN*j(D8RT8 z$qFGPZSP~xD#3DOiqalQOoWhnI95!(OMPVzQjy5sjt{yGD$tY9fdLzS2qM z+J6OUG6x#f#ehzdw81?C<4U?5Qz}bJtrc_8Vwl;tp7FIpMj>s1SkuSuwY)|6>uDGO zLF37%RL}zyqGK2v zJn7#B#M-Kcd1^+>J1H13&Gj=^ z(dOEJ(GUd!Ok4)2rB|a}SQN0CC9UVNt8m2V}vUa}G0k_gQ?&)e+ z2^wc<{V76J0aBE$Kx`4SuDQ=i4J}M2n=#PETH%$GcbW+z`%3CB^mA`or|k`0pbRfTP-a^#xN!$tWmaDgLbmxX)uF` z{XEr}-mO^0Q zq44}EM+P%3G*ClLm!!mNE0{@9CJhNxasX|XclUlK+)bQiR?N7K4xK<^2oMRU>@y3M zB&4E2GlmmW=EUmSrmR+AbJ15-DgOX{?N3cBg=MmzG0n!Eu79J13%5h8CqW8OIGuyi zDB-sj%fOMQeN9io@vK@#6qt4c7oSaeD_*UDuC98TGxrk@wDsHF(dFT7)b;w?ON(V>p)0}yTq+uT^+zMMP$TH*mJ!8>9& zbk&i0tU^p3hKhGpGFEBFa}{nim}Q0~9o zXrbrW?#iK30XPkvU1qRjIe!&gMp-d@heL@&OsXPGI;mn-41Uooxar(jaCLK#cMYe$ zwn+nOlC7N-5wyMUl)Vn95;iG3ebkSH&{4p#K~06l3{n_sslH&%Qf_x`{UA*}q-pTs zK3$Zu(lrDmiQBbCj_xj2Rivp=9T*CcA&#ijr3t5~32B}R6^Ks~u(=!5lmf%YUVAJ2 z$L1Y$r65QeK;hPUY0Sx%QqmFu{{R@CK3h#W(IzJkl~+lDL4{LD1XTErGhG3aMZdkV zMwYj;UJ3Qwpaq^EhemIkRE)-z_*UBJG& za07Va?jO*El?4tx9aBQYTBWvv)M7S|#w)y7ND?ZUs2LEb4hs{d>g0tF1$wSw4d#QCk;i&|F4-|!uE>l;+J zDSKe{dQztaH5Bgf%~=~gQ!LRJ+Ll{eVb?>*=X`C`0mPj|?focYk|labQPUebDMc`^ zLm!b!XE)oriO{dSd!58zwxfVUr@JP9N)xTYAPt~bI-2ZaE$Jo|B#|Q@(nlZ{zir9< zZOVq4Q1F~jNUOT*fay|j8!Hr)*rhzlCLc5`tM1Atk9o6uz!vx)%Z^AQ6}Fhr;#|XF z>Zx;9ubReMxGH9)RZ6I6B4&Sh`z@`H5+#xb5JNxucvFQ<5I{RBKa$&Ks+g9f2g;&e<-quHse_~+ zI(Q^DsXS^GS*C_6c9j1BFOF7GP`3o%TOFgvUR^0%;f*(;2>Xl}uA!QeH`_+i7H@le z%kB~Fu^qYPBpgj@M%2wnnwV7>-m;ROLqmL6KlrFO9&Xdk0aRSpzk7*~rxtOT8vol=$>wMlB@XlGpusg0D60?~fnR&XQ= zs)Q&2L{z4rriDIYNTI7&7iycAwT-NIU$wqGstTmjg_NDUDIH%VRaG*Sky!`}6rjlv z2Ks0$566!m4ii^GSqj9(0GYykE(s-us-{}%>5F+^EH07j?R$&yQ)y5Va1?9$P90BF*nBjO^X-I4 z(zD6{@Y*9f{d{2-gsN2|&WsxXNd|-Ztjmikd6XD%c-hK>R~Whv_>+;QyjGT(n4Of; z<_Y6SSE{M5#a3*45Hmb7Ag05Sqk^6_IuJtl2KU#3Y^k<%LWIp0UKdWzihRwuu3V0u z!a7>ok94G2a$>BTg~ym=QlyU^t-%&&lUg#JLP0$SXr|rw)HIS~%7a0MoO+rxucMZM zLtZIefh;$Doqvend4Pn12FfvTo;xa3`41kiN|wPZ&KrZmL&{!Yq-_I6X0Y$uhV5Fd z!6_aJFDp_EZ98ZI>Lo@$gJw5LtJ)7TG)UhMl4OQrgKqt8sqrI(xBAe%Ai(OU5~6X( z){v{2F^`$~&*l{dxj@-h=ga1;w6I`r^>L)$x)eNQRkaLiHFhskiPT9~h|1N~Z*MlH zsf)Wn=UovBQPdnx`rOyeo(t#@SbuHLE?1PGN07GMsX zs0XH=OMWrJxW!Ywi^!@F+eGeo;&H;XrcgB> z)d=L0HV@rEPM>!x1t~SK+DdV#W>vS_CdY8O@#exYGhXgslT+7mK*!zPVoMX-jiNPS zUhaWTHIZdxjfUfY@Ts+jHKumeR*}M`Ln8UQHp3vc=hXcisZKannrMLv#8ZVla)*n` zcM@Ekq>w>6Tprgv(})8KXS+&)^Qd*OL?ehtzviyOp(Q#qchGXmFq*m?+GB+`LS8wR zP%SbL$Zg_1m*q=fU}?oHs8xsbS2SZemjTP~h*d0!Ow|~C&IXaP1@i6()4`@!a$IC2 z@S>*~P+kKXGPSvKlDuK`n9dwy-@D1hXk48Bv7E zA!wB`1v`Arwz2eS6yQbSi@@H;5*Fh$BhfiVgA45*6*aT!0An+l!ldZKQK!Zz7GN%@*(2vob)7jFOw}}5YZi+HdZm*QuFIIp zG`M`KTisj0=o7)aYethjN&O=9B_%agGfi^qmV0yEIf zya3XyfD>!j>O6Qv;!A2GG?BBejFP=-kOHvB(9@j;X2r7p3y0KK(VWFX(mZodK=G9& zz0e&&BJ2qu--?rV;#ehEiJD2=A;&+Wdh0D@G?lSITY^)^ON&G%rjmufLq6sv-e~SD zb-!*HodqDbfgajS)}+sP3{KjQmh;Xlg<*16W}F&klL?inY3nF0=DfQAE?cowI$!GI ze7f`kb&N5Xr7zf>WHz8@;X#*BE9rAqWy?5(IFxl}u11M)>eX1LkKMa(_He&Cx(_WR zQu`}?4X8w6OW3QhdfBkENefdtz$s&?UwjRKAv${x3|nQe6Es_-i6qzm*VfjarlT;f z)T6{9mU3DLr=t;hg7@4^jKqCMoknHlT@7t(O!Nr&n&<7yKbIsdwyZ`4M8WC6RJ6+! zxOGc5$N7`SJEqj2c%SfpS|+`+vu{s$pqB;!Bw>!v z6jG*wk2K~4m9fmZ77;+o(bZzlM37FmRwCYEI`^<2Ulv)R$6Gqlr=Xp*p7m`d)aX_` zK>K}g#<|mEOg0$WqPHHdm1+v1!fB;S#$>&&!CiZMFLBB%EJH`cwJ3BPVAHK;Ygrf} z2Y~`0deii@d0r+BNrz&XtkFiQD8vec$J{Y7vw?4k;?lNNj1p(B3Pkx(P%1lmJ|I^p zF)DZn`5d)ZOBzQ105?&H(=cWve(PIpm$=tnZ8u?NG?b38eYLLZNhByMp!9VflTG6> z(m_@iE#+s-b)WA-6tHn^Bu7m+`Dp|nxTI|YCYdo@rBw8g z*3{#bILs0#G}Jh9$ty@VYSIO;?SAKp?rh0SscKQ^0M*&d)MEsqb`!E`oKc8qod8H^ z)Q!TS>BNNE#2Yy`_;Cf}r3{4?LI;*K<+_x+f`p?|j~ssrOG$?%Sd2*WQB#My#|r~I zszVRB0pY-!^6_$YIG7mhB1oYnz0kF2ge%3+L4meuH;Va`ZnU&PQxA}BkU38=jk~j6 z`tTpTFR6tFn>t#KuYDclN*5zjiPpXcqso%9O$|6m-kLey_Aeuy3l715Zvd~{A@%oR z0H}ITT@Bj~G&PmL_*7vhl`h(Krh0@Wxd9rrRm~2X>aGrX+9lrai5K0 z`B>+PnZdG+(eAjoKTo%Sw|n;70tulSJGG!4mCQ0-WD>HZRwg#K)eEMc-W0xtDZ*Br zw6%v4k>OSwY2KI1#2G|v5k2Qpi)SDX`tY&PSp;HD7&f#zaDh`PBJ$+4GQw^OG>;!~ z;2Yz_#tKPLIQ7<$%oDJY)mfn3PVUw5uglo0NdlGlYE<{=fgU|=ck{jGsj)h$IM!0wfy1SG^sjl&V*0RUK=alve~ z#EN0tw3Pxe70kl5qB*JRR+)a-X;82#dwb1ce}^w`X{mZrlA~UB<^fdc0;Rc;p@w=a zM9VkXQxX(LC%D%_J~!djt-4xwOQ?!1xN%M*P`R3Mqn1i|9V?`zX;|!!<=mfo;QQOi zJCAKRmovFq)Eh`C9R{Dv!Vr}h3RO;XiAhQgH8`dbd(9#D?|nVcwLv2?>n+) zM$u1{)bP_%!z0uA^$J5&RMV9q-?6$i$>FPa_YOI!gy>Gg7=!7;kiTti+hHz%qr!IK z*HU7|AQG5g52cbgNq3fH8_}59=@8Yl(0FoH$!(Zjx|z91RN|AdLV!2Ux~qh(l2aRT zl*KjI9J5;TE-K8?Udr^S#exzSX+=POv)hs$S7~C>64Rr_!147wC=Z=4F?DKnqv0Dz z;g7bRFq|g`p^8v~qK#RH^1M`3B0>}$1FPAu@f>dREVs+8$9UqQO2EvIZ$)1yzJB`P zgrI5y2slZnirRd@wbN7Jc*Hn_6`+mM7jw8M(FW6P!`OTAc1+{0ptRbO<|KkhCaIfS zhTIOk+K_mGupXM~izDZ1l}T%GYBYI)R+|%>YO;+kN$~o3cBQG11Faz^jx0eIsgbfX9%iWxo*-5 zR1bL(9DM2v)AbULDJdzjx>qtAtt|~?O$6wD?dx-;>~!UAtn((6v8^gW*q%PxrOoxa z+CV5NT7Qd=vYDbd?j1kREh1EATImaapyB~iAfr(0eY$~b9zAXO#+=ZWf|8&~Gl+`S z(pYsQmlQPY1Z|pu{F63g7>d`7YPN==n6NRJ6{3Wyg#b7f+plgqyK9FPs611LMipCo z#u`Qpr9ygfH8LD$J!QvAd0!KO>6k2dg)iullf)6(f%fr7^9z=V1QkHr(@33Zm86lS z1xIHi!zMrjK8 z<_5FUy-o)`1w9)XnB^+zqn1V_U0EF1TgKY(mFIhiQqZJ<{S!XwIc4zbw54)L{xAmK zgHB0_NS*``Ds*&t zS*c|dIYyfirvQ^ZV$UCzPL~pI@9^!%AIfZkKucPLVYZOk=9dG+x0F4fg-EBV$`I0^ zOuvWVvlrVXOuL!Qt@cE_!vaC@9IFi4vXD@KgFJZCj|-cVq_|W-laF6jIT@S!F{q~%aQww= z`;teaAZRpFzHv7vdhen z;2rV?`bgsZufKNOIHZJ%)?yIT`ck5?z-;&aH3nQQ>MEm~`kzG#!~>;FsUOSBbD?48 zhPgfi?&3sgTf~Hq52ablbq0o|nNi`0@|dnZW@f`?sFgC`^6?qGl~{c~9uP4$(8|E5 zu+ZB4Z5McumP{UsX5!f->Qd4aI%nmDLQF?6Ra8qQHWgJ~N9Y2qv6cvRHz1LF`v(CT ztF1cON)h`&c<}e*LhQANQ0}E-f+rkjqA6h>Gt?>yYIiw)K{PC|)6A79$7k9K>9^iC z_65Q6>qv+uNa`3!k4lp+KaP2@(_)xGYQ}Ho1`2FU zs~v1?6?8i4x3>ZgWup+2rXWVyp)+@DAq8m!88bY6n0Zo?n;e3!C!=959x8uCD^gNr zrg83+CMMiJkI}+pWXn~;PzVM%j13znGAn4TCs3XT5%8M9MV0F+Wq6H7B57C{YB30; z5`(tqUpS+C!>|O~+rrVJ+f!jaG@clP(@GNCuAwd!!VgXnFMgN*;~G(fVh*<^P$HfQ#J`pQ-@*l!4lV0 zw6fAtG_QFQB8Pa9Z~p)w-;0p*$Vep;l@6JYI&qs8%F1>4lfa0RNItbRu~ricz9|(l ztY(ia3W=SiWxm%>D{hCryuIF7RK&K}aWRZ-9#om!(MW$wt_IHykwxCO2gEDBVq9Ae zp9`L+ZB*EWC<@IP8tzr^uYYGRV{dJw7gDq)Ffp==Z&LbHd$&@ecyK;c%y8-qE)+^! z<5wgSur)M~xd!E7ZQah(sMCiRwyJHQ8h=|v`j0wCXqR?A*h_<>5PXd`)noKD(kx{Y z7e*phO4uYqzw2tFi;eqfb4&#QBcdps}_gs1ErH_ecUWA7R&ZALrF@eKVC2Salae^kiv6dlYW8U_*?ht2}4YsIgm5?yt zMHuC6I^s!igihGTO>(1q(^VR3sM#uaBSg)G*A`%7qJZ;ERFuk~b;F6(-4VYAr-J>XI4ksR(j46@urcpo1PfNGU zk>in5?TcQ<{vG(jO2e@dlS|gw(K8(3N*YI=x-XZWra6!Vi{~jLlKZqbAe(8zx0V}L z!;eiXYg3@=IP}xRhO(6-g04?C2?M)S#Eb)NHc(YtzsHOQg1kWE&}l-8i7~WAHB%b9 znX4t+5hKgoj9bdx?70Ku%ebvrMkCos8f>L~Gg0d?h~$c=ZD~eAVx^7=LZy$6(_wyX zq!gSI2;*BX>XAN_)l-SZ6&h2^jZsH5{{U(!Lm5c}#4dxkD3%m)NcYuObfl1#5Pu44 zfhuLZl(lq};{HiAu~^-OucHz4@k@>a8mnnQ5>iASigzNedZeN=I7)cROz=~;zQA@; zSO9)D;&tiTBCekDW=R#2n+=<6bInb$DXx6%N!0t+w_&f}#O2U%9BI`kkt!awDj8&! zR*q_@)|x(Y$>oorrD__6RDPlN|<3ccBXVfKpGPeVt(#c5`NWsMLuqH zFG_|L4-j{jFe1R``Dnp+S>m?f2pSvq7w^SM34(P4O&8J}04EMKlE$*`9a9rlc`(z> z01`(W%oHvCqSriSE-a`H28^}n#0)4g#Ih|F0tPvp;(w zWD$~@AF^$3Dbt}lFsI73Ozos5F$7UmL?dRcE_|w_rwe7QYkjR6OD6aWV1?baqkw>lnRqh zg_0#EqPgDSB9}%x3)c%Xe{!r&6^6iGfmJnngg7z4mHR$@{?X%_3>FBg2hyH8Ram&Q94tzndkk zpx3`Nw3^ds2_PT1RQfDG=e}l7EO~`PQ^rn=ds#-B@jHqsz#4PBm11zCYw6~y8ax`e z4VqMrvX-n(EHxXH7@S##fa(bT?hA4S9SXvFsGjx$@H{rr-pP389gZ}VwTmp&w6;x> z`;8ZXD{I_embh^WZAlz>Qp+|-T1h+?CQqaE3Tof^>Bn) z2*FY)@i!1QQ}s14Of8|TqMES|=~L!{%n17=1F65&!snSvj0#khDEO%hh-A#UJyey{ z7_8ZLzFR6&ZwnNSZ}_S9>-2KJ%Yhyo4R7qaJOtXv`E2MPi)%zH9oHPbdDh1bIz`=E{k6S_p5tfJn|=Y%ehr4mGR zQK)lI+G!_~v6?C@V!j+jW~YiNcT-i?qX1)1bb)c0I1g2XEI$m}@1a}mbM}DmemGpz zh))V-rhvrptg%*Mbaj+)%Z0_5=qikKRn)Z*gq2muUHyf?w|^cBcS({S?;=2;&hsTj zf{jp4B9s}6rEp@hHaF7@Sj$60A&(TxRFX2xcH4iDz@TU>DMm0P@06T*sLVaACn*&Hc7LzZ&>FAgJ9 zPnV&?XJNFrt>N% z!;D}Wn>N&7r9iu`j6I`^>;gNzTY9M#!bx;tk4**m{RRsqW?qCwPAxK2Vl6C@tZaf$ zaS^0bZ*u>ot+7P8d7} z$my2qI%~vtS!;I-4i{Ak*eBuTO6>CoEryhY zm7TCYJyjmE3*G%TNqzqS z_adVhm3UG;2HN!wG?f!9v0*Z#d1O*r=<< z2u>JLwpEvvJ`<$rRFkV1qzz>wRnHJ$N}vFwb6ls^M1oWau#|z>i{dlDtyZ%6w1je zNK0Nfkt9NY;$Btnz{8qK%wdMjw0P6tqaD8*j|*c`N+pguc|+>kT31y+6<{sfl_)|} z24q!RaZ|E>G^I~|9kP^Z<68}^-)q@r40iDbs3*c`$sTnI3Yn#rc*Q)iEKPa*vVnep z&)<$fOcH0#w2PD(!VNxzFhS?aeBI=PiI^)X(%0Q<@r4wV9W~`T0&z9p%hN)px@@4nzgJwn(=Y&8yF2nlCQobaO$U+DX|HaqlOxK zh~l>7noz)t*p=EgpTC-CF$}pPK#F-brNqL(RX=Td)ru-;X~CyU%DY%HgWLds>8Cuk zw15|ko|^IyrNRUVsqx|Tw5#UJNe7i+-e6ZYkb|#l>24P-*RC9lNu|l|m`TE*RL*?S z6*dym>}8l3r7K`Ygd&@Ze0Y0ll`Tz4J|CSET;C#22X4V!cVle@WH@ygrkz-otHdUg zaZ)Z?{vQFs4W3zKyMFQKuMXE{+fsP~Ai*p=>bo)f0nmy>aH13 zDx3;%!;FoU+tyLl)QM?w3QG<#-6wY{SKAP{k_odpxR5UM-xdS+L4HKAt#sig{qf>Rx+;sk=E~J*P|X{{S$w%#^U8 z0+4nBXqwY7vq}meg$^J=+evIA1*pTTF&QyxOfBa}-BhyFO7{(@vVwbicq7UgTTJRx zX-`O+BD`@W{_ng}lNewp^`}8mmnV4ZC~0ZwtD|GcqzRy6dH6{|k* z1Rm(>ClD1PI(AnqJE(Ed(anc0V5O;!XG2J|RPE+M6dSB+^2mT|p(Bj3oVDE)myW(^y_eM49Jk6ZiSg|ZO9TgG4!FT33W0ILIrM8IV*4$XX%_stb6rM68 z*+q!FwM)uN>C&zNB&*zT6tben;>2wq%F$0Gp8h381at;dFc-XXH8gf#uKxfYEgjMX zgnKsCLe}vho*Y5IdTgZiHGKrwql#Pt(=34}r^K-eRlLQ3j^SCfC)uUD^VZJ?jE3iyM9(&UY#o*_Sei(5mv{>=G;QN_7mYM z5OErXKD?l~tw9DsA8MnAM;d;@t8jc%&0IxjF>^V!EnWi@YgQZlCO44)`?#{nY`UF9 zCI>|0Nv@lDu%ybt+v3~i1BV)jDKa)Aiaa32F<+IT##Lda0I%A+I+Aaw7UJ7?S-1d%nAkxP z@uyZ@XTzuxp&|%7Hqz4kR!05f~Ulo>m_g#-gSRP2DxZQVT_fDrlelc?TS~`H#c#2YN%aRl!5r<5U6xD~~ z^s!0u_yh+Pg-@AZ{JuFBe=_#kw-x|>+#$I9ql(fIJtR@3xw%7nim>1?zcc4Wk7 z5z|v))k31;8meqNWQ9~;ab*VEz98~7&TO1Rt_jozDV`ldhZ$yQl`w^3nZWh+`O{u8 zn{k{Kq4_)`5X2~1RbZC6V^|PhQ(>*|ro0X7VbrBbOgs)das6oROLdi$9}^^DF*Da) z%hdd13<|i$4J=f3E-us!46(_e0Kc^RM-sD;2iydb4$Zjr)vh;$uz&#e3@U9NZCpxk zE?nr2##_)XArM4`En#hz)Ezz?FU<0;ORRHV^*Y^93gS3*KO$55d;$Lwfx6YKqTA)?BK%E=`PAa0RT}+lr&VKsYL9cmm03XaQwj} z12k1p<8w1s7-%Zwr}BZ;?qaYc9}Woe7mc#C>+0(BG#Snkmy?PCf+rP{J-^7u@vcELolsYv5vII~`wK9V$XhAsmjE^A{f|D#ZAr!(I z+&c}$+z4?BvxbOR`I2n7kiD1#t;Y(kvKH<_M5PZ*>^S@2(S@dH;~JEdm7}6*wrsD6 ze=%AyRJo|a3@ccvDTozapcaU$1^dS|_Fs2ulC`BOlY|eARg0Tbm=uuP>I$&cJ)ieuG zl~OfLd!-atnx2^+G~5SeE}v&zY)>|XqqS4&;|k$lBn zu+$e#yTP@+!R;3!+;=X_Y%ZK%bb=C{oj<3XQ#Uz%<*P~7pj1inifE|Bu)4=9in~)w zA^j36)SzZ#a0j|<4ZH9=o;#_95V>6J;}mORw!?`EK?A3R`O>pe6!w|?O%md>P#bXxpcph%SwRiv;6o$2D5 z5;p$$y}j@O?O=FHn<7A8gagKnKR8qr2*hKuo+v4*D&VMEIb@>?p;1t%PnoU4$gDN? zUrrUgAmTmKOYPEBf)Zn@X~Ik=5`mA+V)IkWWJ##wni-`w9e`agZ(!kioS)&3JSk&n za07@3&azVBm9o^LE-g@L;WznOs3ltxy6<8D?cvQj=}8z2c+GCzGG?Lw04&93H8ixc zQb`jrlvG6v22~n=x~Ly_DBZ#kNfIl@-AV&y+QB9fSw`M`k<&dqN+C1KksZv;S5ip7 zV`It}!jeeaHA8N(ilBq-rxm^D;#Lx1fJJM%AmO&B)>Kw}wLKhD#4i|~o@Vn|Nj}dJr!3iQ zsMAXCLDLglsi|ukhEJGZ<`pdAqZ5^aG#$Yn-ZC`qD5Kd*ufK%8C~7TzlR%s*$7bJDb!uMK__ntWS*-Tm7Yr4p%MXc z46V8~wz^-I3qv9C4L`c5J0iGD5IFr6elxTD(39sc^bBta23^tbnK( zjn)f)Zxtc5rBbzeQpQ<<;R7B#X^NC$wX(d}gt4p>{*J1aXhX5F8h|Zf;mYCsGZd}2 z4v;XKV3Q^#O;uei4$#v%^PU<=0^AYUwwm$6sZ5WJJT78m;X&pl)0)Jtq@$#5we>#b zD5Ap`tOGd=yDPn=49wW;NQ*e#&^*1Ke??uYuXZp3q3zDJ_rD*TX7C^y>uj0Xz)o ziWv^OfFsA-!zLbaqBN#GDFeZ^f(#HT478Y8l7HuQs|-d(`KFF2*~;HiFKZuQ;GWs^ zIAwH4c*}$Bh$9MDVmOCH>Zw|TE9ET7M~F-#jd8p>s*-80pK1PEVv-H}2OJUHFDNO7 zg2K-nB8k{y@g`haN=FQN_fTn@I)9SPBl@M9@l3&uF+)BdjNz+FZo}VDeCJ}yf7RE4 ztk)s7+S`RoF@Ybg8rj}F2aNzH&T#fnZCjU46D+iKUSu`#6)zK_g&>Z`$AL?$Wi)(8 z8Z`XS1H)Vhr=~%^(WZo`0F%P<6uvak#}VsHv)4l{L~Seq;g8)h6Ju}L&5o*m*_D7( zOZqg*Se6+bD4I+jty|_L5$t&0-ssVYV8h|X=na(9R}QNYI8&t-K*KAmVZ-9hl+j0B zC;B}@#>K$xc{bl~(0N|yYVetws!FsHb_#_LNpl>MJ>{pQqcXKTIG!a_1QE8@+6dmm zLv1-xXy7vh)mHAoORPuJH&HN}7%>Q`;jN0PR!F9fXwVSi_g&4Z+xNJz6Xv7{s#uZeN~UKDm?ThrcL-M35bAp;C3>Lx@@oC>m5;)MR7xrkK=#ef_0- zF(SbGG~;PrHR5F?bXIgo8fQryw&e%SZo}b!XDX$}+N)(i7~9iN*nLNo@x(V5j^+;_ zi=P56ejKV&CY>t;!1vIfl5>Ry2|Yy(T(5^iUjsuPCkE2U;O;_4uwKW8R+lPdm^(!$ zU9c(#u4vL^97i=_-zZlaioTWz+G9K?Y?}z&HwN#@w6id2Le?TuV;VAec6zGwWBFP& z#OPWTma-*nlR!na1bv}%$~xI=Vz8y5#1ILYI8e2O;}jT>r=JhRHan>@(^iixlDeC2 z?{5>rNpLP?i8xWXf+KAyR%|-0WP=rnB#JG+GV(6<1^WU(;>Bi7tdOJZstYxs$UA8# zRRuKlk;#VQlF?De8o^B+G^E1Fx7TCbc&>%u-W(#T>l#lQMYaQ5O6gHb^!0TUGa8uj2tzA|`$E%pKR_Eq`LPC+KYWWT#HttX%v9G?!A%)H`igc6nI@Mb&?A8i0p?GRq_c7_CXFb zW)|_}mAiIsGixct9yATjA4=|&os}OcW0iS(7G{R1LrEx@%S%#O3VgutabR^ob?w7M zT@a#pnn>-epDGk+>oTTpgA<_6m_x=H4YlO)Oofs!y+!@1#Le&vJ99wzF*7nb%il-pwB)+YV9E3tjMrp#cihw-9Un=Cu1_@iY! z;+a;CvWpC#E#^vSYXp&|CrCm|6x#vZK|u<$nHBQ7DRLb3SMjoh7QT1^HM<*GW& zXE&M*Hc29-dThH)(LVU1X%U`BxxbHlaINgQCF3B#9{OCH$Vd|rC(fYaR4UQbQ#KOV zZY72yQ%PMsY-4thP2uCyPaZhqG2nwDs+)m<+cp2uqe=4)C4=Iq_}HPqDD227o}MK~ zk~jBQvVu>;L2h-a8JIy*l%NMk8s|v01Su#+tTxUcrEsdxYf`nTJ}(7xR}^|RoJ`1` zSG+<#^xO1Uj#K%DF{wOhGSH&gR+A&6A5OYWVx38(e=aNrilUBcc~W_C2yF1Ap?3R= zD#^2TwXxTU4t#simgXts<(k{nqL4>V$7nQ_uEn0E=ftL|piD}xWSya?f}GCI4z8}s z;Z5yvVSXi3qfq-~otRebn^IdisHE}652hGX4ljhuTUyn%m{o52gSo2eU(1R@!~wJ` z3xa!au~rN%@3nC}P86_TT&1iQiRu{C;g%hq7#9j9U2Gs|YA2P5_#j1FeIH*GFE3?l z(xgv76OXWpqj`(Ltw>n~J4w^?1e)~OZVFXsgcP_PQ65M%g(s+IW85K<%j`Ed=bg$~ zAcP?(QQICbo)pu0lf;E7#jJE0NHfC#B>TFm5X$()L({c%l@!s#a}`=5UNBGYA~7sa z`p1RtZsXx#tx!P12f`^!S>r&Iwva~LJ9krUwmLQ_=xb5Gu4Y%OvUu z9_hFM9egz6dyL-F)rdeHbNKwJ_p;mDr=`O^2LZ|Jrdqs98^kI;YAjBi`9v6L=*oe% z+6yoM0r4MS0*@yC?S2$ zxnbYn1^IUOOeK{UToR*GOCv|`%KD5vkJv*D(RrAmq9sHPw^Vr{Gjy}%lPJUCB#WXlgBvhRptJv41(`;VbZ z)9F~!vYzQ0&ViQkKpM!`TL4Y{qk(sB6{e}rx{X4lOnTGy)@C7W_7qwfq^AQg|BWByxN*njCyG5*|Zdq z7}r#k(x&&1%2*M%5l_2yhx|*17dM2q5@`c@dXTx>6*gq1-z<`pK_DBV`wZZZW2#6l%U~Fvk&6&>Ag~>G4o?JlkL@MmSc~Nsb1eLkQ_* zng@c1-rUsH$^&d;Kagj}fY0^hhFtnnEL*X-liP zKfD#R2TSnxCcLn;cl@GP&<9TpdML8~-s0i7Z^75kRXEb&veUG(Pf}{p2JNi~j>Emd z7cFmL;YMSbxUzJn0pKK>TWrD_0#&Lj)Oh9_RPReqG>^5t*YcfWBx-K*3lFP@d%co} z9U7DX=si6YTbkzxLQDd1!UalL-a##Fm~)6nxfGDNkRd1S6mZ{By@F3GpUby4POKk)xt{~$FXd;>t zp9jRFGST8Q;gMAxvdxQ$BUzb2(EwfE@YiqyP7HF(yK7cM3vys_kwJXVF1&g5JQ1Yz zQhrPqDaP+J<``zLbur5Y56zTS8vUmWzW)FNuN`A2vw2c0t3gk`^kEG*VqOHPfmI_*|PCQ|j=qc2>U4yW@AX|KQabJ07aEM!h$m{r0GR5;{ zB`uc0KF={U^A;19sA(E_rh^Howdw?gK>q-NWZ%u(<@OB%QUWJ|#L|Zse6j-N!a6Xg z*)Y0l$~tGK!=tEzeXr-!)J+?1?R#ytkBQ;M&7*75V9!W2Qlz@rToO8n2NPH#7#$2U z!H8j$m`|Oc#3HYEokwlH;*s={c!f>0-`8DMcAxct!*|TZ(Q+den&rtPrc>GynBDU=ivP8}6c8+eBhl#Ynm702eWAkn5hkD53!NqnlIh+^{ZA~q*zC~v0g3-W|% z@Xj81)Aq|+@zW&XfWT8$Aw`yO*;cay!~|6v$f&P*+NkOFcB#JW_}jlOwFE%}8q*taR!0DfI4k{L?0HF%a0G)_FXUl`o`*7hDnmf3aNiz#jY01nuJ>yJ%HkLCVxN(k};D#SLWGgTR9I`MW^wD}rN+5zj z!j?FC;u1&_Tw0!i;D0)uq>5=LP!s^97}T(D@oxu;u5TA|LY9!MBchVqS)qihyOi!iy)Rggq`4TectzhxZ;K~n|D*02TNtF@F zq&8v5m~=2hi(*wYqY%a6d1xpo)}EmP5L?TX7fB*u2fA1j@ZgHYjU`Zt1YF=pv%zleo>*2k&_}mmEsDz*5 z8$j}!Dtjwrtw~B`aFdNntHk7}sDi$a5vZxciy5RfG;k}sG24B+ue9sm>f`S3BTc*B z5vU%4@2e*)amM2Wka?d$*fLKg9_9L zO72d~G!7@Ps)X4s>WYe37=K9Z6vYIU;)ugng-2KBSssM59PR>Dy>Au?v)X^oJD(wTdj_t9wgQIg{J3L zl%ruA6ng7v0kjs1B&fzXML%G8nnf9mQ@nFkmZlYTH(2D2dv0$7?BHX0neQRR5r)L^ zqs*r3M7O7J9u+=i#Azg!y>OS9O|c%XlWd^a`1o5*coO``c{&nI@Dy?07E)GrO*-M{ z=IQ+E+Gu5!q_e?P(MKaFI*=6KLEDM$8&hNn^8IPh>Rd#DwwMJzT$)L1WW?5zuAX9u zl~n2%Q(^BNbnrZ7$~t9A^PK0-%lUp67R3DAMGUz%xcj}D*0yz^4r@qO8gU&JQul^u0%}AkW`ci~WtK7S3Z{(MtAB*Bx#Mmo1~ltWfMGF(W2V5N zj;0!wNcWw#k871Kr&4T2oTjoV;_$?giKQ&G^whAWY|Fl~P0`39xl~i`=%)VwM!Zaf zm2BZp>Ow@MV@xvFnpl8Jc9?Bw*UlT13+PyoIH3*eRD%?szX?!Egws^-i^v3YTce4_ z*dmQ%OEDUo?H4?MQgAy++qROSf@BC2P-1*aiW3_|CNirDT2uh;2Hn61zZ{gS5w@68 zol11Xd2y@@QC89HteC=Nw&j8~EIqa=2OD^4G6u?dw-i$cZDpGbYMCRI+2%+TnD>CC zwY9#@uj9nU)dALM#0iNexl>`(rQ=$vOHY5%yOkH#!`_H#?Bx^`2#BRgDM*;%8+IDN z*>P&59%K=iBXV4q+i`BgaUXvwE)^O?%~o7Uz|={wEG{=>iYQ@+vZ6aU2G;};rO(HW zx>T4mk6k>AwU8p4o|g!tV(}x12|MZt1zg)-bOyg3X-g&sK6PVoaft(}pv8zs9MU9F z)lOwUq%BNB>Fm&7ihI2$#0R0PH*n{6QVty_ZAF(RwS8yJz35Wnrkm#r85PCbb%6MG z;)TAHf!*z@Dc5Jk9aLVrm567oLo&SW7^9@zBsL06(=7RO z5{{+{TBRZwVv*gTLRjq^h3q)3mx8f48H(GAk_9I*ECQ{n;>2a7GfeGKN?7u z6q(X|G^NQj9XG>fI2|@dmm8MCf19dv6py>yDw7)!P5W^3D^;M7Q6$l2+jjv0L4K4k zbw;nHboq7+yF-!24V);%RSAl$z#3)RGEkNQ&_Ib^h`&H!+$m(oXo)C%mGjb*=-tIT*r^kS+suAv1<4;$PA?6KF3n|Bf< zj4NTp5I{Xu)jSw|NsXX}p=Nj3w9^X$Pi?KHhw|sNV>~M1Dvg*5be{;XuYx(zx3VdF(&0U2ap#+;Y_-(zo-fv{2=_KMs7usE^ zW_AgR3S(HR4(0cplete}lJktpt+p5mZPFq@KY}zVF0w+zFAwyp)lT14~@D z)C_89p`oj*^CO3BQBI`CARU0Z9e9cNRFNW#EsAgkj8{wZ?9{m3EEuLCHWy1Pe6)2H zAW9X%vYV1U-aloStuT{}?W^8fB&SdmXJ*ftXAI1Av1WYDNr+2B9jF@%apbv_5O*rD z7a-ZWT-B{=lBkbmKVY+&T zwLD3kJycHmh*S(|Sy?t60Ur~$l|ZZU9F6-xX!r8sUOjN>g& zMG3n>=5pdd8$jboOxuz%TnyDxL>h+(jI2lI z)mdklpLDB(;5<(Yud=6Yi31T6Mi#l7Zm2{_<4G(hGUqG-S!kOA*jSmWrVK2>}n0}q;X3bsRtZl zlDU@!Nf=T;55Z*27~I&N7f%%pRZUBz*?ALc!X$mBLb_?~J{)-|Hx3ZkjCO3Rs^u*p z0;%hzMq$J8e5*obS(cs%R$8h!F!kHzm@Xt%C+!9&iXCF&*b@<3Nki+y)@Uco*rqLq z;t!Etd#L~-)bSS_B0YaV)n)4jgIHXn8t1K5L znk+ymaTV>_*WuXA+$tk&60Xqzfr#m^|I*juN}71b%v53THXz89wb-?6t24-sMUA8~ zlpfx7VgBeGQ3~TRKRB**DGs()|MJe&#o!84OR!Um@4nLePa7CG0 z$p8_kARcL1iyE~!%7GnHK2=8Y!dfAxz>c`b^rs9mEF!9NHW^PP1|C$3yea3WN4g!L zvN^dn*Im3auuDJ{BzR98J`}~(v&#S}V30OJC;c`9-AU|!9qFbD(<5Z}sKsUd997X& zCy>LtIRY|4zLpmUz4#ot%&Zu9NN>eB5!EA($~m*KzPM~FTqF&h9z%$xNU_YxQzFTS zWl3R!bbBY1%{w9w6b_+n$8ED4m8wH33F$b+Cc3q?Lola?pms!0&azbEQY93#Vp)LU z5Ee1avMj4KjKs68)vj!PoO15z44`+8r71pOpLJEFDaRqEl%@J<9cG&7D{{ikw*tzK zi!-S6>cvEmhhwQA5abcxTX6-V*hZwV$x%FJm0Ydir}Izf;StZ46#oDPRaIPV6e$HY zKrOl`>P1yRBTHeO6g%QkBZd$N+lj1^;aILB)bzNUq$t;? zrNfc`08INd+*USXK0Hc`N7evz_)fvZ3U~92>OIP2j7s=U$fZpmPO{q3ONioB6$>pl zxbIHQVIc5iU?E67KXBtcy(2knqMysVg}g98j6^PL!zCb!!YTMT`^U3tc9d5 z6@B6YgMWqi?Ys6!1t=Rf;Yn^-xljdb26jmTlem^KfljB$rWkowydo+~PTKp#wY!Vx zI8k)FWtf2{W*jM1rMvn}5<+n}?cgGt{HR_!kPS!?#99fegM#IK<8V7|?`{V3s{*zh zK#_(NC9u9JQe{C(2N>bi*GO8rdK!6_3`(#-_6{MBUCebMf$VtguBTwJi&dVeoGfKRk7+l*)ygwwmY@a1?X5X?F(n8J##z#^Vt-Gr&t;(t03mD8-zbzwH z&syVEhEUd1wIyYJO)xfZ(aNgZtPRJMqT^iw9CggWJ?_0A;jIL?>e}VBFPSw+kZb{=2NYul6fCFi%)iu95lS76S#cE>OT8fF~0M*t)d z=|oFi!dXf}BptF~gX^}OXOj>K2(G4YJyb^^!MvxWLvRZ)dmkRc{9lyiEaoJ*(hFfb zPTyXtDa`I&v@q&|manvI+gX;D0a;5>@j(qeKj_fN&aE!(Zm3ywxYqm*W?752!*+br6F4smwKUszz0I_aGB*k8WhHQth5p%0t>f zB2rE>)FPo9<}PzLR;FZ-NF!kA<4>tamTA%lt)Ye-NYJ7T*hR6LjkW##YMb_#XPyjdA zPj3VnPDgKI+LF_uU>~(y4?sN>R`v7uz$xbRsQ&;Gc9GIR<54k8djYCQXTtKePA4o% z&nni5B#Dp^6_6;nU3e$5LkekK<4~?6Z?N=H9r~19M{@B{;C6Z_9yJDDoZ@(dFk*Fd zaA5RR>Z|@n(v9OmduzqkIa352AV&?cM0uUtt-wLU&j~dmK9}MYC?uf9T8_35#VBgk zY3ABJ($>>@{>~XEEO9A-xC5VUd3T9QN}@;@<36>vNVJS3@NvlvuVc9Kgv>T|z>Y_8UsCxDLks_}*ce+v(B)GsXwuRkyZK zb6T{d^b%*?Of-3Zn-!iF^KlpxXPY+hP6M^CqR12j@#0&161Y^5K0cIpbDMWa2>~im z+qVHieqN%*2&7idH8@@$P7G^LQgHb;sO+}UwfcB)&~zyzxq;yVi5v|OKxYBQO)sjm zHW`FcwjT~4wV1?#poXR?VtcOb8O2bQEOLg#Rla7TN_Ki5l{DqLTL>+u0z9Ic9=bY-qW-Y0H7iO) zhN@#M5Ss!tiQ?3g+_5}yBq?&@N2vGHNNouqr9>0g_*A8nDO1zbPeWb&{;;j7=$X)Et^nV&C=?W`Zy{M|HD!$%$wO(QRtB~2R7Ra*)L z3P4!`T(@&=coWPsi=_wwDLo{bJ+ZK13)DiDJTUh0rb-%&6<68E^BaR?6r(FJAS54ODr+recSP(+oJye2P^@(r1|@{zjU-saPj*!O zEn!9_huSVxi2C>nxV`-QTGreo;n(cjLYA!+WZ_CzC*4j{Rp#JfYC8HkK1R?<8%|NG zP`6<1qZ^%jaF;i|wQQ!{goe_giHv5J*jwGWd-ptRNtods6~hL`^PUe?ESPO1=`u^_ z8aY;1rVLNoU_&VSnBR$iIFir)+tkLD7mdjAav5}0_r4HH4~<1rw_OhQHNnr zvk`8`d9}6hBZT?x8<2m3JIDeM3?L|i`3R5Gl_e5)k z4DquIeLC=~F31*M8cr$riZOpXZ@M8v!lSqMQ&idKEa87MCLx%nt)(om!A*zLS)2MA+anj1(6Y> zz1d$tPQA47;m%fEae|eq2-yOV-Ze7YF|2_hu!+J`_d z?e_2^-dwogLT67;3N}T%Xl-ja5yG%Xj$$&sJ{t@*Mw)}nN=A*3=$PN$QyCXxO}_pb zU0%ABhYGY5aq6UYjRmbqDbyAs2Lsa;5}y^t4P!w~h)q>kQs}aVOSUTXc z8L6d(ytxmRB|T`4NYTK()HUu8hP(piLE?2Q6VpO_7TN+n(a~Hnd;#WuFho=CHkBnN z0H~u5!xl2DMMkCzFp>tSSs0suZ(=?ial5mq?W#L!5`e5D=UgL)4uXZ=OmNfH>aPt> zq!Fcn*ncs`9bt1CaHrDh3S~skMQ}9KveG1S?i!)07tB~hfrQhKu)q?WK#wG^>Uq;fS>Zeu601Iuy;!u;3eN@ZHG6=v_!vx0Fx z%5kT|sagrDo(6^?APFa%dy(uLjkw;WDEM)TRF=|lI0>#*czz*J!a7*tkx*F~p6#!W zwjUn+PSTSEowV!7kprfi8x719t#T=78G*V@8#VQ@Ad7*^CB$QaG|QIGsA2o*qkb|a zmMQAul9D&E@?-|&X`+w~xa+002m%dV4LTJHz*P9-f;mG}Z2~doy3>N!zr>y<&}RxT zX%oULC0-)~#~d@^O&lOtB$6oa*qwZNR{SKCn$ZEo#BHV+kVzd}(89>DscB=4k%2q3 z@xLzXiX=#?rId+Nd^k-fk|{_+(x}>MRTV{gRV?jK zzH5eBXjOD4#hm*&X9$VYN?K)7N!!s*RoQ~FhBH@NRSiVDj7vuVQjrz}66|$0@#40S znTU=x>#Id9NElIY#pJ>3@oL(N&CiKLBW}ba_d!c;0BP?I87u>~nod~eoKxcWN8?UZ zIA$w~VU*a4;ij$zc8FRslUXc7mKM17@kiv!K_Km|wQjNo7*pJNVyZTdt#NuwNUI$5 zPE`Wjppd4!|_;SZJ6*+EK9p<$0Gt+J>lNRhgR1PvLtw_ z&mjshp|Xots1Hs(UG)8rQsKG!fh(z*8KJ~-rCZg~NDyspuNt6Ir0aehBQjcHOQr%- zq=b!_<3$TFX3ETB97ch%tw5zQ3P`-lg3oc{Y^3e_`CF{r4;(=x3Tdk~0S92E-a?-Y zOd?1n!`iUx5HqqB)EpQ07hvUaG#|EDe+0#v|6{-fo(S=K=%T$xqC2aFU zh-K+yjvDMeyohSzl}_d{v#T4BG}7E+@y_f5F%_gX4jT;u`Rf?OaQfp$w2@&_SX9$d zJkm-gfPrQ#EW7=t!-O}NL1d5!rD#b=ffRxyV!;5E zkh=RV#EZF9N!TjON>h%-8tWTs{7SNy5yWa~>TbSVl(=@t7L!{AvaZp80x!nHnt(Ot zd%=eq?*qhZ7}R07b4eu}p`Hj0!vcqGn{L%N_VH6rk+5M-KDA*xP8AifY*wzKYKod+ zMIzp4O}L{N6Gf(M0~UfwB#M#MVsKhZ8)`4y3vf8&XKYgR zyNpN`jw+ebWBkgLDvbyshP{E%aVpqQF;0gB;j)zYeA1~nbzC6P&P1YE&4%JM>^0*q zAVi9$nJ1^Bu*ioGV-rB{h0cMJwIgO#V&24Si1pYR{i^N5Vi^!9BJnB6$~Op zCaZGAL6yx!d$;@B$YO8w{XAv+cfjeRMsGsIkx}x_C5z;IRN|>e{ML<}Ek!i$>PN?S z8ScdRs^N39g$)&5NTRC6p{>EMh|(#K&cbm83p^4q0IL=5E^G+mORTLWQ#ezpxl%!q zG-~oqJw%ky380e(#v~0i5F@pUx}52P%i zf*UlpVI=ILx74f%pl3Yh`0+YpNk-1m(Yi{J+}(}rHNWM<1l;IJM$t^V&{5SjFDbmT zVf69h=6XtcTCbi;xERajMpP>al-;dyq441Eb($%+JPDvidhk>6Ga`iwY)?2)yjbNG zTjGgNA%hOBrjQkup1`bc7jQt*{l^Bay6DGN0w{rf(#cAQ29|Bqx_Ib>l~7a~tWW(O zd?LOUs7RXjX$8mx3-H+)!W5a~&}k>+2r!T%rlr;8{4X@&v?mNodKqL79ZfTrSSoeg zcHEMUe~IFIn^x``B^VmhcS~i$CsiVG9-ZQtMjel0SfvBYO9TdN3(1(hr&K$ctaS>v zBZRDEk0o-WfcVjX%#h*IFf)x{%b1Gh>?()(4m{W{YQgV%s31_)FcI%7td}kiVnO1I zYlS$H5=NoZ@TPw(8Cles2jff`_cP+v_-o{pUaKIO}SUyxGVLG zam>Q~#WxD$N-Wq3B%B~pdlJmm^pnR=is8}EhSC+Kjzb&8QxIXP42Hxg7t@WYLqJRj z;%iMH3}y-U(vZ;Lc&#X(1jmgHMNkn8E~Kbg7>kk|pC4%hjw02v0ZO3LNO`~pp*8>0 z*9hk<+nF2XsULKx3O!W?8$`r^hj13Pxzl9cUEQ51AtZ5+WnfGBeXY3;sA`@B;ZkU^ zEKdfes5qWC35>6n@wd5DqC>RpH?TH5H}96fi7vG)ZsA&4XryiGrds^Rk||`R z6*yc~FvJ08rfHU;i)nBMhk@(-`zvL@l?WUnXW2&9IR<95sE0^1#EmiWr@Bt2ia2S% zmtob>!wD-&lAtEb_CPEMVcz%R^zV`ml%=vGq*4L7#SaxN#2Fal?SW79x!)DWW^|~h ztApo4$5SOFqynP$3bOm6-XIPfS>~&GvI_YNsSMVIkwwn7H`9y7owKg^43o!T6vf5EmhF(GOmzZdqD<5E4B1x) zijD}Y9-g+Lj8d*%iWZQLz}|4oq@NH+7bVu&Uvv&9`ZSfcpSePuDcClvr_!B{7nZQm zPc<$dRZoaiskR}BVsnN`5BOt=m=b(B@5-UG2^gM}jd=>xvoQch08&#F%hU_Lsz{|X zv;P33$0a%wByHT8q-Qtw`@BT{Uf5D_o;6!+#qbWmo;c&@6sWDJz~Q5(`A6ANPKsi~ zD%^>7oi2uzA|~DgPAh$2{{V9X9iwN$lDTT$oog6$oPIM&dhEjZ78_IZv~l6jBeW>X z88u^dq}+Vwm6rDxwT}XO!Ja1C%&G!bHV!_$6nT~8Uz)fH16yQbPR@)rO$T$=SK~h} zBgEVSj$WG&xO75BvJ$sod_g>p{{TCCnkgFB_ao} zABB3Rd~UiZ#eOXk(o=4Du*xP`HMOqRRRDN+@gDPCH2M?YIuww0BvP!Iml#~O&XPvc z^2E~uSE_ljynFY1!_ILZZr}w*AD<)q)tfd^si8W##)Y|a_a(vJVWxex+g5q%?N<)2ie4}SrsB&1wC=cqMcRHB!rMg$obR5Kq55tFNcbXi-l=m zkSh?v-u3o|8u)OzLIQ$SN&CRIRGSCj_@tx&0Mm|a6mQWC0J@PJ1OQWDuhoe z3{U3+pZ9VD1z!7Y#AVG#1p6w;L5L~@)AcG&uza;;AQbTvu(R&ai-YfX<Q6gGH%NpVa!9MzUS)iV_ z1%{1Q7%cBmG;XCqwZfa)`grl&uQ;~L^4BH9t$YU4h94TO_Eh!F+7J=oPSen7%1Zh* zs!7&*cq!KZ0Hh$Zn;QY_{@w$!TNPX^$SRYtaOaH)a}5n32}&SIHI_!Fo>gieE@Mj( znn}tDShEY*Jhv)-jvuEicTsOpY?VY4f$Q|78qpo+_f0AGbq_khhrTk^Z#rO}Z4yQ| zrpUfuX&X(GZTB~c9GYFol5N^c?S~YRr9BdOkB_pVL}qFaI9gF{CKRaqpnnfI?Wm^& zY{6Qw))&Gfje(@3r;&=Pj`v-lpP&)L?1J9UC1+1^LdMh6KM_S4yO$ZWfkD7qRuDu+ z9~zo+-bGl<*iKxR8N>r`V+WZ^>cn4A<;K(WUxRzBs>bN|ff#jRjRso0ddWp4!T_GJ z(N7D7)YIYfJ`IrOq^yxZH8}NsL4Xhm(`k&@THtoz7df@=-bZ$Y0Aw6aAkqF?nK-1a zCD5WkA`%1{rV1+jZiN2;EywX_sV15>sVnDC?}hE0GNV#fIq#vG@_ggk|l8lA@#J zqJDH*tj*Arr6hD82pD@S9W^dc#u|8~di1KLQfY9SIND(_U0TxANc%`VzykbIhZs2O zIB?ru4_qk#5;*h&?)_;KQHh!w%C)94U{wr$U*)Q(E0$>i_B;}`$oAWXmU)Uol$1y~ z@FI<0%hYtM?ig_%`KXw7HHlN=BBvC_OfWXJfkI27%-|VNO@*CYmyp?N`u@pJVe{rK;gP>ts_tq*T|xm ztR6_xuMVyg@|taY`76^g!?QgM0worwl88iN9x9Dx1pEo(4(BaLg=qtxoGHdKS6L*u z$zKS-`t-#@gQ>LjDI7l~hSvz#nwe{&l&iTrLERIv2UBnbxm521P64I}&EF+n%_(ZRB{65Wn%U_fdPcD`j{o zN>5B0_77gPZHKZ{;R`TuQ7U}JTXsHaS!^%h4}LV$;y(7waGEZq05gSRT6wI0B zW7#fbr=>LtA@a>MF~Yag-LU)h;*ZQ;sW68VBdRH%@^;LY)Is13$DKu+3eB{VrE$h! zmOw(uH50}N4cw5rl6-B%@7vyKCE}7ispP$Cm$WdF4APl1S5l;gs*aBWq@IBg+On#Q zuy8DQ62KB}FT%XX^Y;yCX(0(5b{{`lFtob1wsTvAtw)CpI8)^w9f?*Nc`IVW@GMro zBjuB}PcwCfu)d?J_dIl)Sq>an&-2Gf8TQ=%%^9qR@2FW~Uu8jNF`@?RTh{Wx~g*Zh|kJnMWkwHt1 z$8?S=)LH1JDs{3kuq5~&ZzFfYSA15fOz_)7X5LyYLPQca@HH}ge-X>{hMHPRd?`K1 zzIPU$^6%~gJ&7tfV|`_5tT^AO8wssh=I@5e6r2o1@D!h)GgV`+D%IgsSiK=u-yRyF zQC8L)UM;sIjc>*;+F&x0*-}mz(tB&i7UH#~Dg>Mg@<^@} z4Wr%!lY95#G-jw_$P1na_tVZ=7aD}8NF@8I;{?m`;PHx_ezvBntX&QpM7~kl2iq+m zau2|3!A%zSNz=n*(v|THMvvv|uv{)I1`1%)3anb8ZBUD- z4dq4wB=)$!32bv9+R93GsNu3Bu8c1-^evr8B_prlLherLrY}q(#UO&7in3=vGd;v| zM#zQB#_@T3v|DW}ueXa^yLDx0(h1r4(~Xo33X%kLQTD-VdUxt82EZlE7^Imi62RiU z9d;8kG*T4Ri^j@E=0ChiKUWf#S7>FjPf-|9ftGFWNJ_joZR1P2ZjwA0u1LxG-!ovD z!y)EIt$I8M8=;yER%xT8EgZDf6yZ=vZV26>0BU$qnqCWpEa@pCF&sZiFt^IfB`GIR z;XLiCuFqXA7#3~C@SJvuFi5canRmg8K`lJ#0A|<;5uxRBoVwEP=%grv7!&r) zQ8s9om}h4kM;c7j;jqr}(Bb%%1UNXn@XTPB<{U5XNbMrqcfcMC9Ltw(h)51d#tGz5 zVGnoC6ADHEb8WEG%Hawim7GVix<0N;CtVJ^kp|Bkl`A7HqnW? zWPfv#u9=EZVc1S6v&mM~Q_oB-4Lag9^&6dm@3jnLXz&MK8r#_^9wnkk90XDmc!a7_ zpc1XP?D*75tTQ*UYKC;D!{S$Fpu*{`DYIL&uiwYnZWmo<2yah#LL|j3MWWWT-HlK& zJP(!>&r63?I9Z{odLuAuc~>!0FPEvk?GYq>Bayc9t-$)5LJ1pWZHg3RY@tmBsKMx` z{30xW&qybzqNt;@Gt^Kv;)42(KstDEGj_;ZB}fKF1R6fKTF~SyjVEVKD`@dQBAlgL z0|ad#GnE^iQ|#(V()ZK91J@aAV-%ctj=B)q-RM3bV;^NV&y3HFz|BQl@^tV5h_O2R z#Cujj?(q=`ZBSFjoKwByXGz&ouyvA3NUAHGN@HTFE0~o5A46DueB16zo)TlCn`+wu zt~#p&;vXSfGR;dBT!Pa?_NQ}5lh$e@t|ui!K_v1<5E$L)<3-wjv!K_F5)+P@$7dR3 zGNXlNt&Xz^hLxJKO16|3*&vQJP!Goa_@H-cJ|yYKjVsc1M?k1)Jyk+9za(mahX`Q)u)upDGR2mJUlsO*<_~$8u4*S zz$4bV(Zh<$zco`pF!-gBt@B`qYmeLmzZ0$I^^K;UOJt-UN{c+zl@XSDcZwK~mAHGc zvaX}uK0m{bp-4%9WDc5T$5aFnwy`E7PRypFA0{}-F~8X>eLMH?<6X&*4l_=slA#}XRRH_m%Ug`T+S52~r7YaG1!OeVAD~5&WdyVB+6g-LTWiGW zl#mFcC$3vKry9o^%}QL&Rq{~E0Y~zKNJ;K2$HSM9qf|xAV-(kk%}cge>8Dd8*;(r% z49)SijlQmIB2EdK^4=*_E7F^xrNW_vBjC$XQ6agif;LoQEH&KUvz2vA)g@SY(za5S z>`C@nlf^0P> zip9!wZ62bYLUiE~(NZxOtE(tvTo?Th6s96t2oMPzs<6Lu1;=6!ha5LeouZscP!Tid zO8%SY%84_661m!h;}{#G$61F|4=OnjExe#%+EtJIOGDe)#dcR}Lcmta)!nLAj|h({ zQfAJZbpHSy#SMNK`Hu_2@q77Mb}T-5ObW|wi&Czotb1H`w~rRwI+8&~3G2qR)TBW& zY4Zs5>C!wdhMudvUB zoCI`53^;~psOr9L%@ut$qdB)eWqN!=7{&To)toMq1H-E@%wC-Q%yjilJykO^C%CTW zHVtjQ_Y~b+1-k^u!vRPOh+2N#oM?je52e{_8p-)eryzP%%$+^z)@PbpTu%sz8nXy^ zyuylT<&}d-9BaILql)bFCYuIF7=yNuT-ZuNq|saIx20aPL8~s%V{HrZIxgcRmR8Sy_ z<3oZR9xA$dAT#46tvkpZ7($&ujfRBS>PHr#0V5r>)0&8t3=IglH%)4Dt{+x1@}a81 zm1^rKUG}+C?z#*2wfp@XR_>OPkYtloEjXtqg$6jT6!<*fEl&Uf2}E^JzNCP4LI<;p zQiPPq6w(q;gzYq)#E7P@nds#v6+T;1p*~uh?$hYwZUMAxr&5rt3UL&anWnC+sE2Df zP#xOi#Q0yo1NZL~Dot%|gd~`Wsqw{0E2=atvJe%Sj@=LMC>%vPjWw+)+wG;D6&+PA zJ4ZaS)#0`R zTFm)4(@z)rZXZfnzD}%f+^_bRTb)O19XV@E$!*{j?Wb-mQhZ7n(l4n!SHZIeDF!Ej z)6a)W2@MH$YboqESQ0_ugV!N==@i&blAN~pXZi|^sf^XfEmaw5#5E`bn3xU4 zTT$V!6kDZ*7$PQ;smGFFV@qDB&|!FGSISY8hB(kNQ?ja-TlZa>>%$_#8xWE|8legx z>N_jlAH-|xKbv81hpfxE|xAa6i4Hdf(boTIXxy}V8NjoSPyvYG!T5$s! z5omIXBH|M!GL)E#RHbEbd1ZRkn8wIal9utRlVU7Bz778Xm#85`8X--=DjJg`hm|?u z_zo;7Mj&$~J}4frQ_^8rlZaw=D0d;0YZDQB1_Tqva@@IU1!RHITD`vj>Cixl!|A!FGh zqYTVAZ0UOboVmC1XcY)vA#Dd{2+yyHM2A8;}5Ykc%((aoCgBWd7C^<#@wAnY9HQ}e8oFq*)uR2sA8WV%kAg;)c!}AAyx5R zkdgo&0yy24Pj@~fwdOwzYge~6ib#~YJkQT*KH67BJuMU>mGh<;wRB(v6 z62S6!ciqFM08A-LNIN1&%uN=e%_|VoDm-|{wm_PRSyfkwLn~5YuwrVe=#pWzRMjj2 z8ju~(fP8f0E|B05FC><0D( z&mseF3!Z74chU-1hnh#@PB`VeiGb>)vRobids;x&rHeppWRI&o`SkF3eIb#8;s@j&J zO2ydyzZ2WI=9{)*z!ZQ&w$37)TJ9TKf|LSwKu|N+QZQQFV-1zRl;Bm3hcj+U`t*%< zIbF9x`T}kKBlL4tmyQJ~DMGZ4C#J5ZkXr?7PNBebaPu`jrxC|-W{Mav>{AkDe;guCQ3+f6dfTp*ts*iNBNlhWFEFMV^#V32{TgQM$ zcY)G{qCIF>a(2i`GlvO^!{seOjbvd!vuolB2X4x6{##P>{LE94u#lS=Iu$w*_3pWos2>Kk!FWz}@OwBm%uYRA*_fKHXKClu^ezrX_<#5K>H0vnw6V;x4AZ zam$-fl!Z3r;Z;NaKO1-s6r+*|gHjAKt{Q5`JDw_uPQ)J2MXY`tC@qzeR0^ZtOPtaO zC^0ngQH4Pz8KA`~ht12nXew!nsu6zsP1N$Jd0}1#@f&8GTXG@A;nZqk(>-M+JJI9e zS~V#lZL1k*?6)bP>^1muuQe{hkx#Z%P;{A&HR?)ya-KOOc_|{RLJc*1WrQ^%LuI;w z_J#et@?W;WB6{$o&9iWL08biHQb|~8epeU9(i)>5l=QT*M1nExQboxFk0#>NSOJn_ z<5zCY-5OTl0#CBJv~3J zj!iRHW}LxGRYw=~I%a`@o93x1Y2+cV;;M$<6Q>55RlUklrROxVf4#(yIwrka3UH~m z!C2!sPn9|v`kKU*YwD(_f>K^;aRL*!K%_ii``-dMvo6c>vv!nQv)%<5@qB3$?_UNL z-?|%t{3F;rsj8NRW1q|zWkxj_+~{j+j4`6G{C5&2>wJ89?cvMQXySlZb|x^#9wc>Kud*0BgWZ5vM0Z@iudtr>A{fTC7Y#}09$Wu+a_ z(+KJvG|aQ%wD_zGgJv4IQdr!}T~G5hXoPA~$tv3Z+aJ%9{_H&e0Ctt?hXw&E+3oA7 zdw4C7T_`c2i6j^_roi(iFI`sHb{9=iD5W=CV-}O!-*W6EZt(ZGd!64DHh(z*WQfDv z*F{d z8%P8RFnuHEP3Adk7Vf9Im6HU(!g=9H>~j;&c+^izi{`3(`LE?+v{jpmjkY6o>@@Al zIg#Hw;uIVOF&HX+Hf^M%IBaT4kWi_g5@LG%sU#xIn0-Ri*WmMFFd_ntQ#H3Id!Yc6 zw0j2xZ83Jzq1M!(Fb0WMyH~`A0#tT@IMlivQmOXQ;dCO7qOkAzQ0$E5P3*=+U8Cq6 zGZ)TBfB>PwaiW$i(Kwk&z&Q^yJdR&);V*da$d8!T!>G~B`uy1zbOW6B(op)iTzzyo2jg@3tB}+1> zPYqo(lC96lj3jZc7l&a9D|zxxn0mZ`*b6vy8@`dmh`MP?DG5N^xA3Yug`y9Lk})YY z!nS-mN~(Vo^j+{j-MZztCN?lwNvkG*?ay}VO@>Ro^1}#pc z$5L>5$joRgdkfs1IF{oqwL-L(brZG@%0!!+y=6tx+D8H;pITU8cuosPhDBF|;17jQ zZ#>0`)WTq$4u)C`qFP%6&# zrfEaMnw?I^Rs&AmxQgWjx{w<7pTd;e+{h=qAnn6#EpYk_N}NSOQ%#o5En1Xl;%_dZ zoL=KwwZJ|Ean*~}t^!b$4}Vo{m036AI*OIk!klUF44Y2RB-KY4#999ULxR!61w$mH ze~ch2^=LS#u#~7Ra8C%N%q;0C5}a`}Q(k;?3{O80!xEh;qiRYD7^Hy6g*}Gw#EW}) z3}ZAx)J?c4pS$;Lp)K8lnrwhG877z{pqX@=f!y(vVYXC65I6$T97n?-laV^vjh%Lv`G6r_rw z&>aWp>PFM_--Qol?OgaWxW&w@n;(8;kkM|7MnjqwnSXlFUn6> zhN2NN#BX=nY(eni1{!s;6h7JEuvK$gWV(YtbVpz7Rc)B^{w>4-^)OA6)9RNJ@fBXK~$3FA%;N2NhFwM zG4uBTYzMo2r18tErL8GMgyAwedg+#u*-=p)So4hr*v4R^p`UREWYw8?$OVw1A-%gBi>7L?o`m+M~^lgCT&efw#PXj^pFXS#JQrwGjtzPw%Gg za>@OmkrdrUiB_aWcc*HFcRxJ^Y5O*?zq_HmxT`SB5d|wcOh~O--kAy*!GbAahRaQe zGE~KdwHm|OX;oj$Vh9(%bHH&yM}lN@ni2<*r0}OqV;aLL>HL{0C8QGVW{IOHBvae~ zmbV;DtD_XCMJE>x_18Q$n=jVFiv=}0$U{jKRV^aEr%>C@y~h(JrKCwFl-s)7gAJml z3K~o#wOn+G1@(C;h$cb@*(=+cFO*=DkEh?tgOQ5DcQuLqDa9%Elj~qwk4XwUfeWglA#9#_0e;V_fn~o@TgE{ zN|_a@sPMTA3_!g+O3Xm`t&i8nUb1B>FsO>HrTWSYiRd*xn;yie)#_>T#PUk^5xZ=R z5Jz$v=Uy^~8rA*MDNww1Cj=5}x{N}uq=~VdE_y1R0yDEr7W#dZ798851u}&KDYUw! zMxbQ-Da|lwu@#A`aXA_^_o0o^6^R|lR@89NG3q08Mtay7{TN6sW8$(OeIvKhB%{JlTR2^C_TZliw-CJ#<`P9 z5~P4Y0xCns3@YtRDUoc@tuQ|JnvGWcU=hIkY3APmVle8Y^{{T7Bn5WLS z*}rWFNfN2t2|nq){B;)P6rc#NI|qg|8)LY2eI;w+G%$Ru@RE0LJ7ovKy@c@5rKTY4 zq{?3u?WCn*pM!KHRFvYVikovhqefcRp4Ds&G>9eu**|Z zKbW-?k;Nk_ou}-b3AN7_+uf}PNZDRn2#V#Ib1rJmHNtFj07tWx1cFS+c5y@gtOiVsb4VL#)C{!tODK z77c#fS8H&h45nwMiym532-#NcmNI4)f%OLyi>X~S%lV6^HF$I3*mgBT9W3~c9+JYQ zI2x(H%1og{BzHc;--c^I1tbCuHrNm*steZdNxfV2LZ21tXGU>IGv}uKXAn$pDQEl- z1BQ}J0puQ7qzI@!T0&0NMF27F#jCskLR-gdO#0OySBw&>txUD*2RvsgTyqBNrY%{U zYAIu-q*-$YDK!hjO-f8DNU-i3qZiyk;D0Sb8w*IoRwg>@&pY0%1sS7Di`RNN9U-UA zJz~l<*^ToL&E?gTqZP)N9>Qj%r&hzNPnb$FtKWK+_l57_#dj?{Q>hMUNFWeI$oy+| zt7W+mNFxr3qE(bVK2>^`#4(sM?q{vZnKLX=R!@WG+|y4?DC;GqQ!EsfZwZ7Xte^ua zzW^J{pDx|O`DVN9aRUMeO&H|~ZQwyxkB5~PT-nn){HG2ZnD9(v1;*F2 zssM$eb1gYv(U#kAZgkhjhL-uOZj-1H9XL`um7%gEgEST3_(f#X%|VFP&4l90R+DeivJP!{ou9F&tXcVkL8)#fn5Dw$36$D)TPaH$336%=cN!Te9US?^ej#A$1B9L9W=x{)hOYD*9pRd`jhKDub`8}#KSI#y9bDZ+U{F!GOv~tZ6Vw!7mw&X{= zo9&k6?zqd2e$fI-K*!Ujm$cAvBT6QW)>8CI>I)jp*@q}#xky%4;0m?1FwavAFx0~8 zO36g^MkbXs(1Gl|xDmJQk0dE=MbRtRB1rS-Jw}yV1bq4(prc2am`t>w2_(p^dFnzLC-f($l9 z(?%JSpD@nY9&Ew}NzN3KBoaMz5ZS%$RSFF^$H#}2Kg zhuIcN`c9^ksG66Xe+>S-;j>Q#PeXCN^s)*n+U%zY#p%*g!AJYVWbS>J?l=cd_%^Uq z!|7Ps0l>uzla(lhrS{4?#v*~QP+x}cPK;6nGA5tQ6R7}@CRKj{}T!lkNIyH#_2@UZc9F{j@D;7&7)!?cD zXZH`jfTX8qzLa=|Q>wut#4`q5#9_=$Up*cpLsDc3BH9xhv$#Gc+HygD-A$;tm1+bE z3o&)xCqk1LeCsUKn3i&;s%oqzn=VO$Fs;mqR~tNa7O+r44M8LyFm~qJbyonTDUOdK14+^!t z%o3!D97RXRDzVBuGI;T%#BPUO2BP|oCucJ#Z$iY6m0$8H)>eZDqM9;( zNy^#2n>SQZ;(k?KZY5C-Ej>vLlEog8lfLXg_Fsvb${BfUS`Uz??lUJFPy}ogVzf5FXi1k7riL(Ks&Y(YMg%f#0Ndfjp-)?`41i8F`>1K;YW^w@CH|~QJ$+2r zEp|$$idT?gI8^NLp=0i_U)|TiZN>K2O_xrYPr{m5UX+OnJ$3)p*Lw^&CKV$=h*IHm zQ=+vzFk$qyxPhGCTT29cRGWKw)nqQrA{5$j93-D?el^e6Tew?`CDno>?kPy~0!co> zSZcBSy$r?1LWM*F<;qq9q&2rI2z=#j2Z0)nCtdBN&vrx(lklq{o1W=FykHrT^D*$G zW+9p4#3hxQIHtI@J{yKA(*#kazeINd^y|W_yVlBdgCOu2N13Bre6i^Za40~)m_F*y z1o#AzthD$_*5VIn{!X>hRfs9^GL>`pa?FbmK|^}g*NjsRGngo=yl^4D5C|vG0x8n7 z7|)3D(qI@oRHxlBrX5eR+q-KJaclce6kJ_qN$^{=Bi3mj@D`AH`0PFLjw{uH}%q+qu85FE+-IybRWbMWmX$eT_KMG2gY}86pRHdq72gk~rac->5@K-bpp<89>5Nr9 zJkU(=I4EiAC1`0Zbr#!TTU%d*Hrc)7>XL$>a3Wv|KUPz1ZqAVCNlrSVRZkUlQekw0 z*aUN8RaOp?%+f@Py}(jzN7Kc(*UmD4+ii(GCW(J0!|MxMtMiCH>O$fe{bfY4!Hq>m zCM?lI1sril98s@pEMRM|BJ9a(Qsq&sor7Sh7b~`GAZbd1K-x+9)Ols2mNgAuBm&G# zGnQ1i7QK#w+y!U%mo7e*z$G9F>!B7^X3|?ONRoDQ*HdZeDXXdDrJ!pcZN5=PVOMVDF}LX);I5rsRNYb0Jx8cA1nBSy4e6*sx!Yem>Ki?*ta^w^O*)lj9l(Qz&1JW!$X&!6SGlgMoN%u~?ZR9wF>F<0g*a)oz zq`R$1@g2XVNuFpa7iu$51hHA*h{{N}fK%i2aVWQS*cw8IS*yHLgi1)Jx+zkwbxQ0? zwY4+EhA%Bhz4QU?f2WS#S_C9RX`VRy>sGF702Qm*N?0lB@q|^P#ihgRH!9py!sc94Sg9!@Hi~-Tl&vz7Nt#kq;kb;D8Vp8)n;C*)L)Yw7s6ZE8>mI^; zX<`oxSD0J2?zEB(DNbJQkOI#ZR@uWi7w@9hJK+qiK`B+0^+cLPKL6wisn z9Tkj`%}%I%x;Sv@mQV@Ms8eHqWycM2%Qe2NBh2iiw^soO;yNO@oV!}yU0hTZH8MF? zj75i($Mik2rV4(w}Wgtrqyxfpv)9nUV#}<~oX2U3{WPsUOWpy&|Q{2b9 zw~w=mH-|G}N=amu6OJ4zxpj-lNh^S+bj{Z7{B*hpls^$Ec*nnwJtvJT?u9QwXNS;+X^Kdj!9| zpwor>wEqC*pz0(Nag2X;E5AXwQII5XJ3VWbIga6?p^XM)tEz@~{XH%cur|g5+fBBO9KS{0+8BnKj$4vbcDD4AwzXlRuh-0V)N=w9<<@ABc#LOM|n$Gb~{VBi;fLg z%^P;f4y7Ye0|GI}Z?b~_0NJq3+MB>8Ob~F6pIQj&@yZmgp0+Hd4Q9skBd5jYN`SrjuGqx z2zK`N@FcN*;$>FJGIou=>OLRK(xDd%7;HN|gjZD9MQVc`Mgd11RXT2Jd0kYHdup~O z-$^_%V=Pf`20&2ZFijwO=mnGCD4|_jI_hQ@SzAXbiLzZCI!`La9F+An@J+pl7LRL{ z`^C9iE31T(m0M{8p-1;s?rfWQY7de~#y~OAz#125C54h|48fMthKb|zB6*~Col1)Z zELqRd?(kZT4gs#HB1yWPA4PCfpj)TL<@fd zuMF+&!$2hnQAy)7UViDvKuX+9?fYZiDQgxL^K}!oZUtKv9BMq88oK#hN_~YHc9Y}S zaNRcHN-9x1HWP&<@6?oONrU+qV^#?kE- zJV1*(QgkJCap7%NZ! zf+^&-bb%xR<+rc*)+af)2!e0r`F0!I93ez*kMceHP}Gms~gdP&7cq4n1*CBR65f zrpYPl)NBWG0}vW&5K zrDju!!0r4i9X=(8Ru~~}qT#ib)TZl8k3=bBXvpzq7dIcD5nCa4=us_sl#w$_U46I8 z2~e5Xc2_!zoJtsyvFRbLilcm#GGzJSoJOqL`JmbMpI{t0w0tt8peX8$LE=9eE6puZ z^F>hroFGjl&R(L$>PzEUf(#a#rPl3NuqTPytiB@JUx}`+l*)pdK~$ZceRRdPTi${c zpbbu&D^#(CFwEaYH6=JL3sN;BMpQ3(LpOdIJ#t$|ghcI;(@EE2Tv-eBjj_g<9|Nkw z#A}?Z(j31rGs7eWW)E#{fi4Gr+%>c}M6j zA*jLR!I>r6+c6^VJhDG%_Q~4b9482SR4pK?cGBYV06KgrJR+#%wZ%jig6oWNErO6U6I(85aBQ=<>yE`Z__%=!_<6U zSU;Xc8&npps+M!T)SyUd-a;ea*@A&_#I}rFEwBM0JbEbZ@#a~T5+v~aDw#9BZq51o zG-8>z6*Lo6RL?8JNkpv@z>=}p1&Aal>ebtT}61hNQUIPQ8&~J?- z7P_-W9Y8(?+(~Pivf@hKB&_Tm9U^MiHjnEGY>gao+v!RGqIx!@pAUE^2z;u_8JR?b z_lDAcj{>>Pk!kd}m3URa(fy6?rb-CJai}xa<8&!Zm{m&EM$5KXBv)Bf5pB91r|##i z+Ob(7NP|0mb+ugFpy0hiHgT-aj$t z+S3s6TX=YpS~MivPz#%%qllKk(+T*~!t51FzS?s{r>s;z%HFDHQn1t{c3TD=>{Q%a zw>{q!2rH!sLKc|=8pd9tjr@e5FCvp8BdWLzK>%6XPpI+NN)&)iG|4G39@@oEhvOA- z$||KwVAf_y5q{0dAX%&r6R4mM2sLd=P&$a)TxO!f;w7Sbc4@4ke?|_YZlsTID3=ux zsFEqf6s1yPph4tQ%M?k8G(rMnni??(bAO;*@`_O@*egxwRKfOPP78|A)9t0g(d#PZ z)=HX*H#!q>sMn5`iA1b{To20_)nx!SF#a{3FQ}XN_?OXkz4}tc0b8Wbo zCNQmO#USven%SeOuXK58>IJs#f))S~r`t9hzj{=V0RkyGw1twBHO_(~PdtYWre>0G zfhC&ES8soQ`g3W0GvYH$JkpXR2&EHZKO+>1)Ur=I`?V4kBo`pq1N|IG{g6275l()0 zK+;o2rbOg>tKcu11oYJUJc_cqS17;QMtvWUa6XoO&r>y(EzsR!k!yVi=7_%i&m3 z)mJjK#xw)W3c9fhwzwQ*K9%9Nl`ZI*F%)EaXXu})ePPTL_*PlN>MX`?xo zWsZ_pED~9*Iy__puF>1*;FX-ADQ{M)X;+R7ynYm7b04EfJviXFCSc6n5Xe1a=`|Gr zs>L&nHU|&$`Za+s2K1rWDw1#CV7ruy@qMkEN&zZK9bnkWxd9M(-;Q1-vOFLFPpxgIq`<2ACtxku@q2eIwFm^O;Y_LV z5yqNxPeZZ)08`YNUm3u<%Gp0PQ{iV2#~&fl$||PBAeu*oj4ZDc7qA6Qt*^jw=QCBL zLO}f~gzLkik7r8yU4Z2tjo`Tp)jlUz)I2t(Xe#p^J~J&I(=9Uzk{6<&j+Ab?gYGFe z@!}+1I;PA(C!lcitF{Ijy_7&bbp958m;Fc64yN=wsrV}6Q)N{uPkS-r2P<*llpz|FPfc91fhte1{Hs(9OQbk18=kRDtksp7+~5^YOGuew zimH}eT8$@UY3T(*Fi-++-6-2{qk+uo+}gR_xVQlMiRk^*z)*B}1~em7^m8`#AEP;g zGW63H6}VPSl+OOzF>3dYzkBFlQXsTq&T*xml;CY$~0p6vZ|vJ#xbu#pf1D zRZihy-*Dg~IlsKPalo7#9CqqG0)#mx>@?e9YgEo4bWyD8H%)qdntFlrH2#>f^=w!T zH8vbGhGERHT3Bf$D$r8X!%q8*?O_*~*HLr0j>Ru#M%hS6?|fVmBo4BA2O1JLlqJ<4 zv_RT_3c;Rw4e5)iG?;E9N0V{nj~T-g6r9)6rXP(%T(O9xg~Tm1W+>T6M;b7-h4kRR zD$MU${{UPE9C|$|yR4qsz&tqhRexqLLK$MAA4!yPTryhf)Kp(B zF4l;;7r5LPPpI%-&h^|Q?+GAnI{~6+cX6dC1d=+bD_{6}PfM2Sam?4A+XY&5b5%u% zVUrmso;5P5k?F*Ws6yXU#f@)Uk1N1{N1ZYJgd+)}+k|>W%^Cim5yP_eZB9E~hRV@y zqoi4&xRl0-?F%uD+TJ3=#Ev}v@u?$9g=l1>6ErnpUx!W!m-0Sg&7EA$xsL#s16kpx z%lRh)iyEu*V~9AB#T?EBI%8wq?Qasd9EktHL_d(VCy;`8OxOp7M!S zLeb(h6@@mpAw$`T~)82hH269Q?!81%E#E~ZU_ zW{$1&n*q;w^<7O`L5wyUl<~nxD%oR>XJuBAf`xB;vtHV8!q(mPP(mD1Q#b*_n0?e!b!FC4(LH>1qwS)B5AD^VO7vnl4<$mk$3D-pb%6$rM^ zW&3a+ydj(PlXQR_;G~a@8f7YS(8DHEID@u}2dwW`T|?@%J{!?2&w^C+`xVIbwTaX$ zCKvLgs;)`D(Zr1z8`dP%Ph-0LnY%3P^}8E&ytq=_0t|l-RR{OGe2c3MB2ybjT~w^S zOX`+H>a7P*vIkJHe5sD)+rgG`oF5RVS?Nh#t+bZaAG0CQjSoG%Y_{V2wV-JmM@=Qm zU0H>xM3J(E>;Q;BcfZ4)u@6vCr%-)JXBe}7J5K--Q&l`gqlgn{DylYK_u9G+2=X1y zZ5HU#oe5FlMcJNy>+ULX(kq5rgDT;45K!dTj|#x5L1?s<^;UR2$! zs6$&g_0(5$a$bE)R^E=P5Blwruq@L@)n2)W^(^3daO zP9vg{-e31-I2tPWc3$cZS4rP2#r)EOX(bDa#WO`C##C8aGITME_;);I!Eqzc!8A}O`I^59qo~UmlsUf+l6mW=!f{qqX+C3Wk0GJkXImX_ryaPo zd1-_~#?mRKui9+N#*PvM`)Ms(h2!{c4<0d_7y7k{VNkqOwNj)~Ls*vBu+<`!Eb-q) z0kq+0vvFvJr$im6VI2mS+uSr~yOpH%#C3`ba_$+4z7>Px;$P~-k1NSZi_^C1IcIqZ zC{dYNJ_k|41fk@kQb8SKWf-r!E)aY}-$E8EoblWu31`d@wNBl!PZbJzu{yD_kjDCe z6kL=i=;D_%7m`M?g){kPS0x7%OjNm}DC6+fEi{R*`ZUnWyH`VSc9U~|@p0=lrAsq_ zs$DeeamFkE*456MeBXzm6%{h=md>mvb|si-z{YL$bEEeE0L@U1Dv>>PhH6GsrAP_Z zKfM&M!t%vlH%isDwK7dn84=~kfpE1(i4pny_q)Cx)P5G z69ssCHe=C3j9o3uf|*7mo2Qn039H60C(FeEI)Yw1U)Vvv4cOh5lB`@R>6jE=_H^pt zB?$y9c;7$*d9s39_BPjsjrR>E5(dtUPpuR# z-sNfzIJ`lfum=r-lvFvZ1oF*k$n^MRF=&{DH7sDGFq%tA8yTfBtsOoiP8ZGdY1Jtsi*p+d@+!5C+n;#zMdYbGC!}B_){!dEp?K;Cp!kXD z*~C*VMJ`K|$ZS!Jm8u*pNnZGsi1FDW*|d8sc!6zOJPxu+AKy>AWi40c83`#RDI{T_ zC(>d?B<92N0ZAsqv54WQkc4_%MmK^m$_2`;H<$Mj!hvgOBSV5Z@S|X>^0`t^q!{%x zQL!vf297^2f~Wbcz$5(-PYB;ofT zjZLh4tA&M8QM#nL5k(Xf$r^vFbd0kdH8ul;*~R6)T9ULRDMUbkHv2L6QjEIN<1$8t z5|mCdPCi)drb>u$D&_f@qDKrEyVvZqqUtQ3T7U;(rTHJ;K5iQP!az#IPNDG>IbzAX zb@xJ6gNKI@55}NVQ_1DZ^)bs+Jwe>iQ$$uUr@?HgdvL!u$dhu~TuR2Fw+ugZ80FcY zrW52+Kq(if1bBCxRlg$zjcjX1_tYR$RURW1-rVWzK`T{`&* zNbrc^h_0!t>KvD;h{j}X9v`xZe0%oOk=A(m4i%(noug$7a)zO>36M4&wGR?Bv$3aD z6%#~#!%r-c?l0qDzRwOdm1VA7DKJlqgiKP5!Q?UtJS!5>(@;PY5vOQ2-bM>lec}&$ zk_GkPrRLtX{h3ezVFYoa7FN^gkd;Z}HP88#50fM`QqxnZw2tjM8CKs(Ctrss?rN1F zuo2=XXss_ThXbKo0l@UucrgJzm1<<3Zqh|e>XxxTXMKr3M+q7Cs~h^4e6cDEkRC%$gqN>T_4^TL){xapNB5(l=OBgC<4 zn+i-eMRXdhih+qE;&?}MXxdXQD{0_p#SRsLr4bbGM~zp#Je0L`2$g_X$PeuB@kP#4RaZh@)wH>D+hXg<8H85vlFr*7o71Q<7XQ(1jDAdrc>~%vwTq11k7b3|_J1r+S)<4z8i8mt=rT zB{K-KV!%ac56jTw;_^cAKjD_yU z+6Ujk7GJrnp$TydlRV)?clqnebfZkBDY}yx!m$d^I~}V`!!S>kFyN8Z#~`@(ZMDMv zH{r%%c+YSU8;vG*;764pwzgk;t67D6smBV+`B2niH>VU-^#^pn&1aREU%*FjJC7Ui zKbze&RF{Fn`YG5CT^VKBa)D9^#(wURO1wW0$+$fvbvS383(@a&toe$77>!QER{$H@ z+8#S?ypOnrGPOb+fuwNjnz7ycRJxZ^nOXtyAHtO~;S@O8`M9a-YeZz$;$?1rNbeo zjG&AK194E>pH@17?Evw_Zox|k?iQh+HF0%N9;H25B50(#}nGu1vM z{{SgTB`8A-jc`;1j{fNQ@=nWbuB{tNS8)uCc1%=rbXmB0Wh?^m!}>BhjViEQv6&^K zteUqHn!ZBG0oh9lX%&004np6K-siUg=FpV~5=597^;PXMD}4)7h|;8uBNA!-a#dv2 zNhRuVdMalN6quVp>roj%ebB5~btJIA(ZkzI)m|bJN>#*1&V$z(YHZvp(3N4*N4Apr zqZO*7qni!E@raAUc0`n~yT?`xQs3}DpCeoC#=3t00KF^Lk>m{ytoNz5?NpBJq{j?J z0l0=8Qw-5!IE`x7%~s4+97qlCuJ(^`A3@-<=Pg~jiBZ(0b(x^umFgYf^Ga!w3GP!sr5TaNS> zni=GW8?9(6U<}a=yYnz^axH7PLEnwdf@CJ_HL=PXR$JujGyE&HZzpj=+Y{rF(T?b#BPrZ|H{4%%83_`>=@7&~-(@P!?L5YZO=Tx<0$`<0@_UT7& zqlm2U+i68D1s;-rT3EYB9R#@IU>F04_EYv5oGYcOsai*ZzG>rVsv?S~%#~NTHe$9Q z>!%(5a;FxcgzEWq(l@PJZK72p93({4n4*%TEk+_vB~7@sHBgxrNZ9IEYdE#I-+eBL zAfBo&al)WL5ID%C78QqMIE1oKADt;g2A-SGj>0AETT-e%yK!@ca0woUiyF8|1pw_h z5z}4OocUbKhMCt3rG&_EO+TVW- z-7l7c)uiwpDWrDkw^GY#2MkU)8h`m5xfEj%gC2&NWKS+CH8Rvb;041+BWxZ5_Tg5C znYl=DvZNj$;pc@MC6-*bQkx+v8z&k=V;R0`8H3YMVbY{PEkeB?%Cym*_YnrQ{h;to zbk&}{Clsr!;X$U{x3=w zi*~;j+%%+U?$hBo;&Btgkucg86juRb2m%Ct`VjJCVi?9ThR01&h2nJeGB3?cl&>x*gSwtx1`BvzsxRGe=X1 zt5x|jR#izXm2!KkkkyfOUmLK~i|sRjq8lk74j4rOvn-|bYH*Nb@!RP{lD{90zt5+x zS+Ln&41zGiy2I))I!Ho6j!9ir zxT`>dP~;P%BCu}#&lKCT+fM5MN`~4>>Xuv3P6_r?o*|Ad6C79n07#Z*S9QcBSlEv0 zec^k3BZYaj;#?ij#c{%wTcu5`I6mJh$xDpj)icKTriM0`Z7D#IQ#C3|7BaKb$||Ule9?(4(uM`VHuCmq#K>>IP*aM6IB-5x zWj)Z)1f=o#)3qFQPVfmUx81{}$teOd4!em_Va9ImLK{+2m>yV~b%sG$lTQ`6dsj8; zz?zcw6A!iR_g!o_7Z0fkBw%>X$~JkH)Ui1ER7xBv>RscGnn-Bi>@fw5w1nIO2sSJ9 z@h!#H5`OCPZD#@SslUtdh!mwf1X9W_e2Jh0xHhpnz0>UDthmrlJ56~_ymkyKL`#SS zmY$|5aQb9ZDpb!upE^-1D~X1!@((^}IkhQ!7D+(Y#>dQM{yGlBP!7E_*Gxw0Ek; z4LB6o*|wgrx@gg!t7Ht|0MbTDVxU^=c$q{<+fFE^M#$)$l;J$$o+tTuZ&N0%Bp|sU zgWJQm96}IMN}@U`Gp>@Mr_Q0#HZ@Q;ERr8E+ih}KTmB-p)0BBlKHB?X9Uvk*NOqob;_U){3H8$~Nj~<(ZS+_$pXy@#1pq2v8o%aZ;s7 zGmS&8q*h%4q>U6BOevue>^j9aR8f=~Ee+MFJpvuPT|J zYMb^fO~(^zS(r3x(}>Ussej8brDa(o^V-)T_m&s<-^Y|wqzR=c)2m=6noT5dNDVu! zET~VI92p;IJ;2?^JMc~-wiOBRoGI3j&#*}KuNWi{l0b@H{fMzPzY{vGMk51CgNoTZ zDen)WG}A41ABjzaR)%2JQM$)-+wCC)a;)lZH1MwgV@kFf7V-yMx|`ALew#65jNL6R zEksgyW|m2$TPgQ}^9x;1dmT7qWt=$K1z7b`RokH}kZ|b~a5MMf9gX#~D;yK6Jr>LP z+Yu-gv5a}^T4-b)rlpbzVR)Y28jp_zcNzVvk>W`T>NtEWYkkECgB>)!>F44}jO9Eh z0n3u*vqhKiH`s7Caf?SSHo%4$p0T2q5I`pTGPnnhTg#}ebWmam&5{asD0vb^TJrs!?VcE1nydQ12rKeKj z{^D`QHV(SjVgcJVN*!bMY3g^P`GYTH&ZBfA80grouBykfz9E=klMh@@IJUtUkL84G zFO(!!ZIdw9Uy7$cE;R%z3ES0FQnaW+F%@k7pYs>0pHsOyiu4&`id;(!IDIZ_sKV>E z8n$30O|ll5Cq&fCw6C)KNG}>34iurK$QV=BZ}cnbTQo&ifn@Hz<(kR~u&HuhcFT1w zS6K}ttrKsaIcI@yAuJdM18L)o=1wxCEF<-&jk(9Bwb|=mqh7dr4eB-}k-DEnjde?> zdA5#YN7P(_fz3b6)-^PeM^0p{@Sb%67M zmO2L7H?z#Ly8$RkbwWp$_TK$TPekrp{J|F>7kz*T51@epr@KA zypJPMh|4>jcHeES!v>iH#7^IBAzQ$!0kF`ohjgz5!{TT}a7tC)^ratfk)(g64_!2!V8ZfVLeBKLTO)OM z1@fvetk;~NmJyiN0wETyXJn2=RV(Ef^)~uBhuiDfQ@K;+#HDUACLsN|?2aR@l;v5- zT4F#@_tp2o>gzjY?3IDlwl_&OF_bY%Ix6g+Q{-VjZUHoG(lk{h@AG#rxKVcfl0X*( z<8{ie8wCg?BcmD-F8Ls181>OS&VPxwRI=`G%oB7IFQ!nS$LdZsoN?UUh}7fNwK4?| zMmx3?=0lcWdZs%T4b{n+UbQa-K%wRAP=Y}*LxX(gY2^evj06LTEuhb7zT9&sx zb!MgmR9#}r_!B{gQ&r~F2?Yhk|W4vEa%<%x+_EzV5wu-&O*-j&QR#FqFQdcs5 zAl{~PI^&oYRqDlUb~GvEo;n;>2vU+#%1a3lBcqQNQ?2-wd2U>#_kGfF;YwA0_!A@% z(L@Rq>K1LA5XU-=T?DzBrCT4V#PI5-tB$1{6Ly)vF}3WWTRyg)Ja*6xiDmwwX3ToBdK|4+M@ZxdYH^f~mK}rL-{0Q9g@*ul89l@IUI0iY zgb2(UWt?{O(Vom*Aj&npJHqmGRC3YRRZ_=?K52>v6Rl9zRAvE#s~dn@ww64|o4wsj zR!Bl;yhz0KR9lwjZYkoU5U#ohunv{vj-KQAT=_FOVvT~z4K^c`vqf6V7tThWTTHi9 zWKeE4_Uv!Kt&PL!L5Cm%#8Kt_x-jCbHdnf(n=u$EuslaDYB#O@4118) zJQqEFf&n{5F+nCQf>J_f9jBt7;muh?8O1PaQ-iwVv81s>O-_o@yfYoHTVb#u`+IR& zy0lpugdP!1L+D3`5O|%mh_1?cBN(d1=a5RZ^~`A^4Rm;(CfFA1p*nXQF1)n2)^w5& zO&GU!>L5G$H514TLZYosXxLzQkOH#PpRna>=^xejrV^Sdp3HMi=xNmjw*o@h$WJ)EJap9y8i&8g{8Av`kgt|`zD&V1G-Oy;fYtc z6Icg$`;HOcDoRHUKKunMWXSP1nsdrS7)Tp2x|;l2WwDcJdoce1 zSSjPp+A_)|P8B3$j>G9xyuG|$C<#K;Z~$UGNTl{Do2w&;{{U9%;)WW4&=*`GfN}fL z$GwJwv16|aU(KA;`y!AJ_md)xTFOv|R9_OTKi47;SlLMIQ#ocT;JAJ{iOl$0?X@dY zMNa_$d)tyk1Qqt~;XT%Hr&I&0gwI3|WKrFoTAWKM0a3uo^fU6OS{ieV3UB1p^!UK0 zQfc0XIH9rZ6`Ohk--hh6UNX`F9V3kt+`}hJswpbz$7LxXpvsFj+|7eb)hxT7DCy^N zbhX*yR=3hFIJs=)_$AbkuPu~MYMY&8vgq5uk7QIz{KwPGr(I1p2bSsLkp|^j`Pz8c zYuqVfKC!}wGb;-Rlij9C={OFvMvdjy*T`C2QPK|)kEY6nFHkA5X=+N;!-h!FpDfXD zF@QDqh>YLp97}t=O}N}p-oFAONl0DmZFqd=GS>rd)WOv zj2Tvit5UHN1kmxp;21$T;ZdP{HY$dWGc?Ag=~Ac&+CcAbtBhS`inwJeNtMX!!m8Ey zY27+uF}9!LOH+YNj}1y2JzYTobvhLXPTo8bWw*+)(3GMQNt4k?FRhVqqp5QoKN^Er z$r3v~Y$4v!@2s&>gq=0+Z^BH~w=}4!B+0=1=~bZy08md=H$@#H5NEEWh0U5RkuAIE zE`7YCJ=!#u!ixCyQ|o1AL@0os9d$lEutTzHc%+hT-djNsRX0Dp!?z7tyF-YAwGwcH z!jc<(J|v_8)l#HXjU$z+{GF@s{I2f0$^(5MLDHfOv&o0 zan#RTHc~?*aexeu8b;GcrOo{9-Mk2~=KlaFw5aP*Cx=Bk+Js;u(`6>bK4h+cCkvXU zp`kaHO7#%|Z9#2=-akpVvx7TiS=%8FI0qRUeP}T1Zk}3_ne*5w&Yvsea@E6Kj8{br zaDltdElxqb!A6CXUyBYud%R7u+Em#jlZF(o$02a47qf|)TSbS@SXz#jL6~b`yvAs2 z;Zqp)F_AYnZq__YoMuWbhnAfwIBdq2*k$@kkdCtLFqBC=&y_OK zOamw#*UKHR_fDkSc%V)r4 zWrReL?6#a3*zVC{6wmEP0wmCG^ED2V9Q~Ob4iwP2cMQXX*TE~Xin4#wVWUTmG87SO zRid3ens9lSXYL6vC%Zy;DC5>(&{_PUr6Ws9D=PMRNT<9uY%>KE)iTjjU=u1ZMwCeM z#HZdYRW7Hsh#XIKznOS)S#wfG)R_TP?VjsgcTsSdKf<77j9=^L52vAB?OrmB7;aAPGM(pjw1Qn?X9wG)O(~Tjqd{-UC zYCPnrp{u2pZ!KK4GDNLoZ_*>}*mdO2)>ieaHtehOyh?Nc9JYwPnLp$ zp`C~5De)W*p(WaW)udD=-q3k#tE-iel$8Q@Mm0-&D|V!i2@xS!de0hx1_hXDE06OW zbI21C(#wgg5-8i;liI_DOl7Yn!d-MvSQKP$TPQu-GC0JFRny>=b?~S1ymJc0XIsS# zQbQ7!7w@?Hw-)+%XLFXi=Zz%_Q1qgkJC>MKDYc;{c#}?$P-9heteCa~1xLzQEnE-e z-Bj3uBe44Tpjx@H#{A-#4qL506_ArS9v)RDngZiAh~7NjHN*S6lZE$ zZ|+ImGII}J-WRAEV>L_b64WNVsqcDo^G07{v`G zp^=t2BxB_Z?h+fneJ!}~qcy)(x+Ex)pI%KGW$gzXg$R-5?WU&8(PI-B@Y+SBrk3%_ zeqh5dx8L0x0szv-f%}BLnk*CzFg&Q<)y8t^Bo!IhHd3Y>E|#*G*3x4?DzyqBo-9gu zriu1@rpSd#dq->V-z;Y1S!}CdIM2e1vn8}q-h`M@1~y|+;KHy9Ez)6mh*x3=>eaB& z6Len^1w^(#Q0B{a?YI(rYf+4N3b=9gwpBV(n$I)i!OXN-mY)N{OnV?|m}!BPhPtyh z>-$Kz8qCt-8~vY!wD66-r)@cwjcz4Q4Qf8>WNIdLYMf6sQcnXj2@FN2TLE*iV{k~h z)DiS)z!_Sc)VaWQ`>1DV=}L3~6HzI%2%wmJs=Ap`kF_z`{9(_d^o34TtRSf z;Z3R4A;h4F6_xXxf^VIts9GA<{R0{B;Tr{Cb8nd`*H4A`N>os?LP0+7S~YF9R@egq zc+Gbl&`lo7=y2Ehmi;TyM^KdnZ{2a`IxWxqNRJL3+~vBEmA8g7)I6!H8`9c~YuU3A zN`m=O=^~AD4jjS-WSWm3rm0yY7W)Ljyx#sdVZ`zgl-N=M8hQ@j z(xI*skg<3wCBtd(s)<6he=i0g&ZOyLVwj=)fak6()VX!Bp|sO%siXpjC2Krm0Z+Jh zO|u>}*m`AoFq(RH+fMY7*AHscGPq0UJ-@O(`SH(ugw(R8vSxr6wy7kRpw5 zpgbu?QjJ~W$kiB-X=9Hlq=bo20aIbjbrdtAMTaIO3~FLE^bsl){{V=ab{ux)O_XS9 zAthU8sM=JO4)YX^og=E0_;zYFQ9jjzYRYS}30y}pQ{Z=Z-~%?x6K?LF2czXe{J#7; z*gwV2+I*(+F-4K8p)|FL`b3JFUvl>FI^3UO;A-T!**Hi&JOvy!+bilI;~xsZ*h%vG;UPV!W0c;l2=J5hxwH7y=T20A&+FrOmkdlIBs(T{ zWRSTUlkVty_TqONX(#$a*-l$JweWxhQ-`00;lc<`B4l7c%E1-I&W8Q?&*fIB!`><5 zdWMt|M#`H?wN*$KhPpTRy~8@NET>-R2N@{|Pl-PYuz5=RPTI>R4wSNiiFt7eO_o_5 z$@twIt%q*hP$5Xvj7aASMB>zi=~#hIo<#s^cG{ApK%zAwAVohCcOD~9atZr-j}b~% z^JGNG>J=KIc%nfC9PLjlU(R=q?XdSNWj}B{vuX*(H41T1p$IWF-A@R25ldd;6LYp0 z&Di@Jcp6_)Z_7IBW9|3Wkjm1m6;4sbHS{(K5VTCx=o!*KVC|)Mjwek$3sTd@v}UB0 zrJhTf6e+l{@8QN+Zyy#kspk@Y(gbwzthBhTBvcM0iJNlkG@kI0hMTn4xb4JWlkq6b zlkTcA=!jHcX1I)B3S@*$KC_Eb&abd5U+l0pHsYr2q!6v5ol~GJcyOr?5dQ#2pZS|@ zXYyVaU$t-smiF_(l?YA~=StM3$eD^6@+VSpex1)lnQ&~qOOH#27LJlyu`n>txNVR; zHXw^>;lpbuJCGn58*!q`rjasFr5kRJdWNd5&QN9!yJTAY*@Mzd*!(m#n5{>bLhR+d z_-QH}s}b~YMQ?iZ%9Tn}TRq96;|PNPG^7g$T^f!f`P-&= zhAn~h5}Pu{a9p9(n3<%*l3Lit%66rh9h!KQo4OD$Yufw{?;DL<$XY_+Ob8-+=tq{? zAe5CTCmO1?>DQ-hV<$&hpYyIYoBHEdQ%43=$Z(gWz%T`@sa>;G8g-2wnlquf0Qhqo z1?FdScC82vv5CSZRUody&@u{_D;2{!L66P+wwAUiDcY0fuv`|m6R47<1{ReLfrQbr zGcvjs1ON^AGPZOi!3Xu@Nz|!C6G=F*j9!|ev_@gXjH?U~ypgDOE>nDySvQo}r)vfb z3Gd;>q=gA53aJW6Bmil9)BcK7;@BmyeCdUzYN?dIAyJ4{{G^JtT7+bCD-=*{*SY#Q z&BkC_QBg{?^$*s()uNV=3{4rxx+|M7ETAzg%@q!0gcQS9JmGYz*hN&VURI6?b}iii z+H_DzCi?K-Da@O>5st6uouZN5TdA{-K2+O9O@v@G%ZMCiDIr4@6!dJ8K!?sEBvuMe zftX(8TjCE6?a-}QQWY!22HH*M9J_($x~vjfyhgh(RpByx<`Y|&@j99cR+cpp`7;HMH_m;ur=bj1`ns zp;+D%w&18%2g8Dk%K(c-o3wzkAZf?iC=r?3lsXjJu;M7HWj|24%Bm?J7tWZLA^f%? znSP@ZsQJ0KI*4o~t9IOgX>LX~2>h}F8$b!_5%8}U)S?ME4iqaVs@%PSWZXk7(tkF_ zE8#2h__iV;Szu;g(v^}XbF??uTUy#*f>tiZm{d(S-lWOL9u$zM^#HiMSOl@-64NaV zk0^6ZQ0?;yE-EG=i1)Wxh4yyiZoF3#WP{U9sZfyvZyGr{57ZA)GZr0#NkM>5ELD-5 zJ{)F(I)hDApxEm;09^QAP6@L8@o|C@q)cJeOuLsQ#F7<_2>HVi%@`H}nWrY@ItpCL ziN_r}iEE*!juQUs%TWZxFd+CZekaVWZPLE^2{?`&G+Rzci9=d4Coj}BK3hEl{7$M7%7pJI4 zDI-*qsR)gG{Tw{qTkqmPoN=J$@U)Fuc>E|_!|F0VVRIvf!tqo-bv-Ro7K%Ak8xa?Q z0QhOak?tNoBczOIg-KdW3`bQuPlINhI-;7hFIQ0GLU^5?qInWIQNSdhd%%qV?|X4W zj1*~7Ks1y-g#wjGG>*Y>%;`}7%K*KGDLquN)fEQ$Cbn z`z(SEs#wtC6!A>M5}=zd;`K&KwZIL3BV%<~g;f4zrmB(l`4G3+jS5=h+q(Q9y2&14C1_dOgTC2AqW#8OI| zHuZxc6l}$WPmf@GMI~s&Khh&8wSGz!rBO?) zH>y27#wQrGOfZIxR}N~4;@JZWeV=ex75I)EH<;Qz;@KYRM!P4H5}mVO|IpUSAUi5#)Az>214q&8Ni10AfZH z<~IAQ8{=7yGfht=BozxxhMKam|jpEj2qLh+$X(0Nl zu)~VAp{E&IC~1^_jbr(krUid_yucJ|-@}x%yaeb|UJ=nV6k&47)2u&7QPT$0j+Ns3egv zd??bWQ+i3+RaS4xw8{2NJ+EfM-M4AF$JxV_<(G{%q6mXH(Rb%qD+yXm;Q$OhX_{;;UUg~Vm**yZo@`=b zs>54?j6VJamt+f}pi1OzJSavVaY!rs0N^5&lvq|F3}#A7vd-;kK58j9z3f36{(Mz= zyKOi^6|iyrDVr?u(Iqh_+2v34; zxxM_`U$jh54l~jsu}d{l$b8t0j~~48jieW0t+yW@72h=5iBVeM3}Pu`WoV5k3Oze3 zgVh?SWEGfLD4*mX++nZ_kz?50@IQZM^3)cUk~%sC8e80^KY;qk`} zh+zI&3opa4C$;#l$1U#NOswiXRqMOet+fQC5C%BbQ#_JKB?_bpj=`jPTz5K!8;XV> zSmOKphZ)bYd#@`$0Zy!6v8@&fNiLbX^bE>&8$?E1KFL^|HvtZmx-PV*!-6jU)hX_nrF=>X4FI>DQ8=1P0~-X5r0 zD){Ok0J5&4?Gv4T1aXzLsl*83OBr6H6CHH{a~1^E(bqv$Su@VonvornO%HGoFt@{( zU^%3N0QA#}(qM4vp^pm0;lx!QmRkHZzzXsNR#HH+d#)9<9ya2uX4!2ToFQuVVNN=g ztzabS0~|QgmP{I^ih-V;SrKH@-$4`Bex%TlsU~b&h6zG6>pOqW494yJ|m`Q=P1A z)pH%-ldhZoCypBnnL{tSG@~6O-;F$*=9Cm$Y?64Id=>LmQA;&0BTq{_Dq}GMrLKe) z1TSq4oH%PLw*1*G0;G^O?4rA?r^U9Fst7zPDprF{HA4yGklVpvF zmiuKa+YKulV`q?}8&$W9uC7x&cK(!8J98xflIwqOC&F+q*`q7jmJs*)iGZj&Crr7Gz+W+)dBdMN&GZ?iTM;38dZwSpC4@z;XJ+DqsQfC<|e z?V^opWks~8nCTwK6^5^;HKL{uAp^-EK4mRp#PaR}_uNzyZ>NpFk&wAS1P%ma?b}Li z^RlMWT#<&+hCWq?`lXm~`f_Qq%oS*@y0Td$WN^2z&~5_%01kQUSDO1;@ScMPt#6sM zw0NCVk0Vgz%9Jziny)Ta!#Df4$V$^G@$BfMUKN~<*=jn%!SkP$D!k4vQWM>{k~Rqu zHPFggbop?uQ^urr_m&wM4M93-E{i8E)Jcxo@|ji3w~Am zbx7F!kU2?ks%n_m+&bFPMyhS?9r(@uXf^ny#DTFq{3$h>LXo8_(K{x7b)O1q8t{LX z>Eea@= z%}7R~lRW_AO4P_I(=e0A%kiWge^9Hjw%Tw=aP(wYYT79(5_elzUE~j?yc7-)Ihw8c z0R z-$5s;SObTlscrqz=1t8 z#*Z@V1-j0nP9kE3Y`fBItvt<%R^>NZc*;)FU}0iD8u2pHqCrU2JZS9*Go+ZR z-|Oe8JbJqn{;K3!n%oMCk_HDJz^f&X&Ygn$$t@v?tTYzC6}Fbn1W7$SI;bm}W~%^% z7z5u#MzRb}qZ6pBtB7JXwJ^Nch9itrO)T+_C>L;dx7dIbHrYQTxPgNfMt=_>lh5+$4e55qcOQXfMtCX^$FS;FR=L63I0-6H`F+tYMK2KmawgvEP5A zPAYRGeZ&=&1K0AcF_EK;-urNv^XoNZ~MXxWXs{~(amQpu*wFMVH-!zo3*@o z#Yr-2Eleh`f4{nVdrfwVrYDjnLOgxL-+}u@sVgA_jvX{(aDuP`hq9b-nmR}-gVSOY zNgRYlU6&O_ciq6E^8-9`%X1PshA{^ z6g{;*Tpo66cxl~eV}{qJW4OBj2~fiJu;WZP*;nlb9W=i{Rw*)Ts<~=tgt61p#H|2< zKz_dv3O;lTvG(qc+eX5+k}0)bl_~%NMR0!(pd*AeCeLWbcucb@3%ne{U1qFrf+?53;t+%1P50)=KJ%mWSnNT`7$_ z6a+*GeQqtcw<_M0B!VWA+&EM`7!Yf<-!y)9H5^q6fJYp~2ybol9z3Ks{ZA*uOPVn>&-f;2i@_t%fA(cnpdX(>|?h85^1g(pgyYLu#tj>#6Jh1qYT zu{=(&PSGAzskHZN@R>=aAIw2esLw;YX0b~y-J0obJUE%Va1>+hBi}~IbOH%Z6(Trd zmKGH7!qQ0FQB+u7-Up2or|u+*bwtU8jCN9brv|H`X(feL7mzZ_#l6zU$Bn4!Q0k?O zFHeVMCSI0Enpk|`%eVBcE6B%j*K(3=Ve$6x$hk0M9kpi)NKiE0S4gyQ#j>OWCBXto zA99}OYLUvylu09Rl{A;q6SHk`p;VGN7{*p8#iN#O);++nB%9lesS1%J#qgsRP?98j z>pZH02@MpEuF6%TQUZJ0Tz9?r$WMzCOHh@8J3Oii;SnE~4N{cys8RCM72eC@J>Yfv zxxW02Y2eD>cxIQlUR9vR6UuC9aJq{-dB@#bDECopz5ICFEk+FnuVK`MiZEUz@Z7d=tc?C|B2pq8qsnFY78-w&*7E(r>V{FXRcNR{Rk%ct+Ip~K{ zJv`TE70*~b7g)N5nqZA^ESWP? zeo(a-eidUGi_>mb^o zm^`K_D{kADXp95k2Dk4RU*v|}MJ^Q*V~kIo4{ZZZAQS~_iKSHe;plNZS!r=hHxI0% zCkeyqW2la(NmlzzaL)ux(yfn(TMaL6SRjHWCL8M(4 zPK8|Kl{66i!bv4byn>#ZpM6WH2kiwm_HiBNWR?>uJ9ttjUWgocMGeqaXU8z$nHsM$ zXD*D<{N@!t8IC8&;n-$9P87r-kllR0Gt3ezd-ocFz!vcDYxXVosEt@+1mVzWR!qw7 z-3d|fc~iYsB^EJ)QLb@%dJN4HE(eR@?QoGdn6A>wfk&FVMUbHpnHn%&CxYrogjKGlye!Q9StmF-=ObQ$o?{5<+FQ(A!rUdw43# ze=wqZ(3qG5jUZbI)8X4%^!4x7E_tfV^_X5z>P+;QADbm4b(Ax`Kg~*^REGoY5-+IJ zwJ79SvPLp9sU8#|$uSQyaaUU6)?`4kqLg3EFnmLW)41no;FQb1ayX3MqDq(zMkp z4WtXad4BT!e9~NO5IECa`NVLe3z0f2OPlC18az)FdOXERRW25?!lB|Qk%w1sG*ZEa zyg$o=M2HhqSSJy}J@jq5F_~&|E>ImL>Md+o&+_Ma@E9s;9tK#1fsn;CL=<&v9{sp^ zYna=OOg@kg1Nzh7m36giXyA6x<(l!Vp;?7Zh*zd0li|=DNa5(lM5<^PU9%zawa7YO zhxb;jhJXsfo@(t%bSF@vSDt#+jbeB#FjU(N>GmWfqk{&;nP#c-PoG^jx=rsr_vtGmi=i+>7cahd>UEhSeZ zg~Emr!1YjxMV6`aj95cZ<5bwr7YLp~P`~K-nW}{C*&#<#Rlu=28}LFbgMb9$2Oc2N zO0E>th$$!89U`8wE}P?+niim*Y%oC@B_s_d^oiL|xa*@3w);Sv@l*L@sz}myQw`=V zAwyO(QZU@5)eItzIdMGFA*V}1@ho0_?Vz^6mIQV-Cxxg*+7XEZ7^l1YcMpVPJ1RaY zjb|*qMkjeA#OUlx%n{1l}&2P6<%4y z<#d{&86q15+e<5zZA#mKKS`usS#n}M9zf&Dc14Ieek#(8R~e z5^V%YxPZ*0lc7Diwr|uwx~B^Bw#e2NgZk21yQY~(7@}z~+-jbsW+2x+IxReNl6N6= zkIYqNK0glJNd8E}f>5C+ZE38^;R=Na75~xJq|cOMk*JkoXv^9uCYp9byHjGih96Hl zaF;9;2#-=tVEDDnmXP6DC#nSb)VQm1{WQBU`i6z0U9}U62zLbppYoF{ww@O}dArs? zDhde^w@CLLwMQ(UQh^D?9h%P_vL+2cIOY*k<|)k3B1XmOjwgpi&3pdvf;80oN$tdL zW=$&veilEN z-4c|gC@DP?OD*#2VCJ_1!p1U4*-!0)V^psNM7WgLb|tt}!AnOes0w^Hd!&!}E*&c_ zx@|~Mauv~m!_FwNtisjjRM==K97)1Y2&b98ehs*Mm5|LfLb)nKMGF4OT_;z%c2oQdG;i}*v@}r0UF3?A zc9n@%7Yn6Cy}hQ#I)iFl2*VhUWg}wxU2uLS|&o%CVd;5Ne5JY(;5R z7iy*xO)XjbIt5=SF5~fTHoHZwrKqhVZo#PeZ28ukxNS>gDO-y;8KP#56g`+ioAN5 zFOgY2TI=si3&ufVsNU^hb?w5;wdaN@B&%SWG`bCF2|9H3ZLa31sc^AYV=W8Cs?k!g zBzvb#80)XMj^0_jj5RVnANGYTnspf)ErLWuV0o#Syz3YM0si*}w5lU=r z018U+ot$Em6Bwi{Y4KS+kUjE=C4!UOvZ)+G?aRg8P6*i*bXtDMfsD;%j*gU5ZCRw8 zTr!HV@z~z~0BYU1`rZghRBYHOq@hxjk~-<)1X!V&mLHZffMr^F8c3Z~ixYppf#xlg z>ZS)zL{Wp6VYK70MLS`!EHLg&S`~@k%%(|;kgTTVbmemg97}^rWR0;}u>on7E&(_L zPM;xLPRokJMNK4gxQ(jl)r4wBw)cB?@!}_(Wk?NT28S0(^gpReLWhLovyCiU z1+bMTXr-nFn4^xH%c`iPniyO@$mF}I0`{{L?w=kOzhFy%re>9Xc2VMpr2Z|L@ti`c z*j7DFkVn-r^-v|UtI z(9|tDLPpzrv7jVv8jl_coL!SCN(*3Pfgb}!)~dQ%Zwc4@pHlWhnP5^yn2m7_XOheI9#gU3x2?IH|D zsuqB-x7t{hz+3C7*N$b#Lw99eq^AX9f&4ldXLiQ+#R#fRnGUkGE2pI~@S`xYMaAkAq-c%;amJL|U!jX-AuH5+Pw7G4 zB{%cgYM`lw=w^f&AfE|o-aX&pXrnD}sPJM~4iXdL=-Pd>K!-~psGVYV7#_n=py}ph zuBC?&l20xvkCv4ctZWkf_EcH#uYvoyWaLjeqL83eHvGEjYO@wljb$=}u#6;+bw0_G z8!o4jQJbZU5~wR2bXYwc?x%3mb0{B?JlU5 zq)JTr^03oUW_o&x2p7$X7tA#Z3Dh#l2IKT!gBDYojjBl?U`ZmLmY=B@ zs>dSGRp9Z3`G1!YiBVT*7XhEXosGD$%h#O}gC1g-*3(21LesO-lk=pI$^4@k!*PsZ zR5_}rN(iBiW+zKC$_>6e4y|c*We)`}$Pw?Qs9F*g+^Gnjxjx9H4j-2>zbZ3@bgh!1 z{pvdWMxjGk5BRNhH{!OL(tl#kkT{t9XsPDyRM`uBsLsgv8k%zVQ!19Jr^95y827*fdq2krDiLo4BD(PbeDr$crzEM|hiY=f5{kRlf@Z4%J zu%p&6os@oy6f9zx^~SrQ#H%sHF;wOX90{XB!UTlGST3Vs1G&E!<-eGDDhhE3C!8r2 z$9964X-kf=^QU-9(lk)5B>3G->v`uz)T?W8e`m*rZ(OA+N+7KD4ZU=d-GCFU!U5^w z)l9W`TxnFbZ$l(X4&-q@+0dUIJ6ptXsqC#i;45wqOyVh9PM{!^I%A?RCY)o>hb)-P z7-ZGbh0K1RsDk2R*H32EcbwL_Ebr7GhoQ|I9?-5RVxz153Q0!XE!?81%;C`}*NZaiH(Wtf0uKYSsd00p zq)Z;Irj^lQHFYfntt2F(`<{7$w*oJ$feR8oJV2X$Nh)+sI8mx@;6h2+2M>KIudRv}>8ZEuF0LZqM{xEaSBDAL$FBM2i2{ndK?70R_adnn;h(N$F9 zIMp2>nLn%q(=sA1k=jce>!7~^8I{j^M#wlA$JV55y^`BtoJ|FusWRRXj%A9hM>1ph zMoz@(w#|n&Y^$H-_bL+098~DeUHzW-w~DgMWiLullCjkT+cbka1Ts_=q}3-rWm>PE zS{(CDQRU26`DuowcM`|^oT(WWBd~5PzqfEkrvOMe^x^41D@%~Fp`??;*Upl-oh}Po zyO#<>WHvPewjydvWsC-S;PWwsuBeGb zO(i10yzY*^lU6TYMda>Ykc~Z=swOqa8PLUk?)+&6T9TbxMy=K~klYfy3 z4xoX$>$rNi%S}J@PNW-Vh)~^{h?f}}LlZ?~tHV`2yWW%ILYJ*Qz6lk*9 z)ce=5BoBqS8eCjBM|<|+C!_JBl(JBeVB^ECxK_c0!7t6zR7kQp5uY#2RkquaB8&a@ z9A8vOHU>nc~v}^+N1*D1L8=(0~z*I^9v_d-i?%VVSR4208#5* zrUyc0*qD43RLa5Qi_4UdTSl?kI7p{aq6&yUbgh&CsXIB_P9~C?i3pMBpr)7zVuGTd zdIH*95-tJd>{7Q@G=V<)am1mpbxt(dHWwr@$0i?+v5fog6p{POvr7;~yfn84X7MFZ z`XOH$4>e~E{>gHmg-(L1j;<7XtWsGci4Bb}V*RXF+kOM{td;brsRQhTyTA1Y1X7->Aih#5B!(ZGwp zQZ3);)09)Fco|MK+FM&@X94r4l%f>P6;({0@b|XHskk@B;Qjnaw$!!gI8uh3Nbzv5 zHYX&LO0?B#M66m#=PWKggpS;C#sK~tML3&<70lt&P4!Vco2LmJn}Ao80W%MK_Y0i~ z9AG;j1y5C1^5l}D0qCgz05J2wuuxS=hg7Y@OE}tO9}j@zZ}=Ci_>)^g6yh|4rBvL* zHzcZBt}3{bLn5hXQU3q}Z*$Kn%T;h7?Z%XTTIBx#6sHPtr8u;b-lCvLrA%I3Y6T=- z`hW+wO?chI?I_?jbW)}=_d%JF(M~Z`Q>dTjDI$NV51%s<^0-?2t<_9 zMdT?fBceJk;bGlIr|9AorVb-*88?^e0FPR0sK)BbJ3#GN^DyMlg6brbc+&5W|B1|$P#_4zr#ghIaMrb=%q=s zK*B2#%#f8WM~D8NJ13hXDggS}ge`N+i&TOoT=mu9M}%yw7G<5P>mq8JnMM5BTER*7 zSnKt2z35N_WLAr23~ixbE@E!97N?KpkyFcdpJbvzpf?@B`1j?LbKQ>yCX+bIRAE?B z(N=4sivu{j*>reN^-W9cFVf=2E_4854);#gGt=(&KpHqKd0`w z<9VA3kty;mCraw_bcmS#H%KS`XeD$IqDuAA#pS5+VW+m-J7pIlcA(5e1Rum(B9(9T|Piq_I#m;&RD#M-e06Mj=2b z@u9mSdR599Unnjwj$pN&Ud!>+&xuO-O?y(;@6nV}n97$6sbno;1-yCYGVKf0sRVmH zY1N%uXu{Lw9EFSGNkqLuLk1m(J}XO+DR8)`@ncZ>#L~=I5U~OPOI+Jt8Ks`}vXTIk zw~wHw8$*q$D+VZ_b%P~j`iyG{pq{?AEayiQnA1sy;eJgnAv#GNKH2IV1r8%%+X21q zd^iW*ixP&=f_~%chRP6SFKSU&3Mz78qZE18u8dOCVe?6Y)(Vv=GSk#H(J)rB$W@53 z8u;)W7*ZBNB+#x>q{ylG?hS@#90w|7sWHqlmbVC55^T`$$rRDl)kM3ZdFibxs3TQl zw)f*1rP8ffQ;BS>?SZzOwY5Ur(oT|Uxbx?y+()8aNsk+DvC>xzMdYByV#|0@#6gCj z#@p%SP}ZFQB$w2795BkWh2Pr;OFDW2us))aUgc_K!U}-{=T;4smo~#2u20QarU3Z0 z6fD^02~#aJ&ra5}i6bJ+JAvGBNNpe`L?n^Y0rpKWpq*G@giwcrWzL{<+bYv#92*-< zn}kwMb3H~cONGNVWo(heu}M`nn`M?jfXL1XW+a_>3Cwo|8H1BoM#JZO&lICACErI|Cbk-oS+Hu_cQ)m3&>>AzTUT+^Q(3U z>FKOk52b#WWxNLj&RtT+DORf#!Q`Nu1)^zdE3m0)Zx5AP(lR7&%(qpF=Eq)QZgswy z!6@WrwiNx6>tQic$iDNli5zDj~>iKw1 zBZyP9aK7CQnk~SO3!Y3_Ui0O*WQH6{l%23~rj>3~B|yQUj|j~fiww&3mGrQzRcKJx z)d0VxKVV%+1f4ZK_%yJ&1SpiptyJhtgT{lZez4+r?r@_Swn)B0GxFkUWKdLngLN98 zq#hSfv5Nm|a_f zmlYuT(aX&xYYIpc(-c9v%bF=oEZ9OR(k`rG`EH#mk}A3issgarH8eBGj}vGEh}4T85O_7WQ?}|PV?m;(+z@uq z{{Wl4XHn2Wgh!WYB*bylYgIh7^Fs|a&eB+&q_4nm?RUewlegbVnYK!Pbhg7X4hPhH zyuarUKRf1p=QJ3F4kw4z7~#WVXAHX{-Kn*~+(!zs*FQGoD1?)QQ}>Roq!N_i1|2l6 zrpY;7t7pTq=4PqFYjC?^jaM@Uim_AO-la~`Pjku3FUtFdQrm6=0~(BOt`-zjw1bU8 zr^_{&QxTecYYmcurYD-NJk^9qS}B%gI+&~gZyWaI63IipDF$b%uJZM!K@fNvcEPiy zdf~Aq8=kAGY7{y{8bRjGy1_srzP=i8>fb$lA~mKcwcAF5yiM#ilMKyS+8JSw7|j(` zRaEOSq{C||B|^+Wv1Uafz82z7=2r?hr~yjcwLmEW8+Oo%minVnQBO>mwi`u_L=|9= zz}qK6Z=fQ2zkUY_MWS5vP@%X=%+R6-_e}B$Myn zWx5-SoeG{Kdn#HE3Ygky)Ltc0AWbsoD!lnibipWoWhz(7mWrVp%3mm6P5ZS{RF9{_ zm%4dufD)Ojw3f();T&lT6!~(xo|ziO@|b zExM3Mg#lmx($=<)uNK7K2Zs!i}jBv3oMleLV0Gq?flYw6*`>xEmO zgqHPTfv4-OI41k*y``3fnOCB@^#Yu4_h%RJL}`z4rd2jgxRoTn@c z`}HrVl1&)@05;ASscCK-ov;QSJ#_mGWHim>;Qc;X>Sh4^<{-tQRRYS}i^p<%_Th_n z3MWH0>Lvt}F-1-kB$pq#!+;xoFsB%@5yon1t8)fgYBzE3B?PgSiE~ z2nb4#zlFJp$t>9_Q-s0UGNaW-_U>lStsrZN>F!o~cb!@ZYO@rL zH1PZW>5pyJ=H2h`;D7sTa`H&DXHP%zp9oQ3rzyeHk6VPik)sL5;mWLdA zs`;lTSssJSf(8e~tc6?X(}vlWXZbW1Tv<|y;ehtlYyxi57Sm}+0{}#r`BOxA=31gg z{#8v)PhB#%`do+3m@z&Js8TpVcc1qtQ+Dl*aWZi}l%br5zVW`J0R5xF24P{ovb{S$0MR&tk{Jg#Q4UyuDy+R+Bq6MI5|~dp9Mv!JdqLDZ~1|Pf~oT>Q@kw zKo&}fodg?*+(~2K>EmtW3%E%QE>$~r3Nij|+?1(kg!D!}%JeukQo~|(j-e!~gzw!_ z%JAE5hy}f*@J7>imTUy+mj~U(Xod7$-MKoU2V~(`ytkC7psh48N(#g&M0-lw!~#XY zK0EDUY2Nl^b`6DKqY; zm?);KK4lZq*HpLpc%_;}C68bLI&dv{aN|ZDpLH0u<4=Z}Ukk>lq&{{I1d^74pcOJ* zZhHZ!;hm-OS14?1Jv~%h?V6kMEI{b%tnlG;zynt@Qb2B~BMmDS_BFU<&}j{mwMT^| z7V1)x5woV16au#oj$e?jr>1!|^6R$*bt|;!PTf9>@FSQ!7O;|x2S(Zvu!s^OyP}|} zSn699tBQ^$8=;#EkxCQURI7pxyxX@5kPx3kwye`5Nz`yY>TXGCaLFR3tHr8?B#a$} zG(t$V(%>I<`uK5L32_)C6%L)Woz)}}Ck~yo^Zd59u~G1Pgv6t$7L(_9FS-c*X>bnv1|e_k*N)j-v=QE> zOmOI=H>^0~!b}0rMGN>#ljb)?5(T}k=2*eGu=^}+%iP(3fE`#&V}?AM(>KfU zXhdThBjZym5Gp}Qi9Hl>K;B~&stZ(P2Kt3SPM;lks@F6^gNM5=EQm_B0p~TVt967X zR2x7zjtBLwG_qv;DnUC@#PSlZlYHAG#P|1*8-et3J(gdX9cl~EdIryJFLwIW0181* z3C6NkW0F05KIGy=!vVQ~r3Xde^8YM^g2Xar3qBRes>GO$)n4`ZhW8Iv~aQeH#4dO;IGY@+JrGSZNh z1H=(NMwJv8bv6Y{HBLW|QP$QEcAAz1iYd#U*DA+`ws(#~Nz(wtj167(Qz}l9R7Aw# zg*0Q>oJSfJx4Pt9oh`*l%$41Q2a-O0R9w4sq^Uuf5ITsb3M^k1qMhE3vI#JV)WHTN zQzV8(Lu0vjokjdOu{q1d8l2Mu37SG}nM@%7g{$9BiAaH_dmGh{imZm&DsMSVJ@f@_s>u|$W)7&6iw;5J; z1_>m==$~aKOA87bfPy%jYaL!;pBSn6>YNV{s;b%&3hH@fi4lBBMK(W0xSZz-Qnhz$ z9WW{90Bjg>7}k1>_lM!l1Tf{SLCUJeC#|ilRwQaj3jARk$b_y6SBDcKv}~SQWGyN^ zKzw%AYA&Qel@ffOI`}+up$|=1^_5E-5<`2k$VZP9FL{~NA;CR#jjXBpj_yZQc9mFu zIWI05I0945VglfYU_R}5(~D{Mb*bd4I10OKxJ!Cc!4hE3F^1ZlGU_Eg zEoB~2ub5SWAvJZ0GeQ>9vfJs=->sP9vWQ4O@YZ_Hz0 z%D_|=+i`G6Pzd&Iw;FK^3Lqj$+0{ybO4H+B!!y%TDR2zcjJKcVj6R=f6&`GKP|dZw z5+Vp~_i;06yFnlTI_Z0s?6?qpgA*QIbe9=x6sE1HqfCBjyw7loc6D3!)D~fXtA#e< zOInxSsYk15@}nmZ<4Fk6l%(+>A8likDdNj4i!L&Z<7{45G|)*ABf{v)K>E1bc>{tJ z95SQ)L-nn@n86^n2u~E@Pm$maN?BhPf>~wUlgES@?GZulfMqw=*bfyt*t28>x;+Ip z0k~YH7{?lEtHmRZD5>f&NonPY2x{u4y0c%t*u>Y2dS)Z z)mV%)Zl`#ftg>y=`i7DIuGiq6*CBk3TSQSaxr1PMj2aUtFjuRkFhh;NEXpZosHcb{ z+g1ULzzcjvyd`$w)>edrC^#OHdZ^v2X5U2r0Io!jF0aCzV5+Ob+ssC!s+KELdGIAW z#(}ieTWDXTo*b(!w^9@gq|YCf75;wZ9o#4=ai`oS8rtavC2Ubl5E1@`iqg}<;FG!9 zGS*vzZu|{r8VU+Rj@$thb(X2%lrYN5h7;Gvu8*&!h*o^i{{SzTTL@p00Wj|}8meWNc^C6g zpLvd)YcX44AP*58A}C3_$#{}ggw>r*gW^>9WO4l5l{JrPWQ>)&vJqeye+9n-Ogwb2SeVdIs zaZk^#h##?Wr4p|SzCKiuY(i`R!f{MZaT3GK^CFFAP?8%rzJS=D5;;c$3?)IJ0Xu0k zi&KtD@e@t*PlHxfMLrXbP=(%6Tg>w3Bfu4Y{2&((U96$c54=&E{nIH5Mlp>&;y8Rz z!%JBdY$m2}GHED~S+CryCZgu%+$PGGrtxh!btPE&QLXuNxJ!U(Bi~8tta5@mVfo0U z5HS|2X1j;IzQck%b>M;JG>}r#r9-D=Q7Ly}a1aUM)7Mg>%r8klmq1<`cO+6(`4;cr z?C3%Ea5&pW=oEy4uazQs0SJ2X%)eE*!P}LYftn|2*Bh1ehB4v2g1da-I_eG8F$^jfS%rAwiN)kV4 zah?=UZED-(6@9%M54NJyQsx=jT8d0{jT2veBPu+A1EsIGxb|=}cYWWg6qr8hF|@T> zXG*0b+f!km5rq;_j!u+Y&z&7qOk6#NfGDvZ%ki^jh=Q3}*-N|Ss7O>JjXg<+V-?iY z)k&C4w5G}B)WsQFUizlXjxdmISK@C;tGM%zr%-l9V4h5Bk})5h6>P0^{S0ggU-fU; z@X)oF!dA5sqmJ4uQtwj%F{h%Fb(oxHR24Ln(z;kO1@i%8-|VPAp9-FFNGgTQ)npI> zK^~OjOZ>bqn>@04h<6bLWJnbIM~L?6!0p;xI1QaNb!ng{X~Mgs%yekNJUFY=5>Xx@L@0*)4(yk9{`JgW|DA zB}kw#B&~Im%(%6_gZw#C`VWMXYiP4ljEpIneqEdlPDuhnY|kpcxI73TF1O`ZFhWT7 z(wk<7@l?nt>31Vg<8jEc1Lkj-#LaR?Vg-(!xSiKr23CW^W;9@iP^?n9igH;XrG;W} zaUwzU8cBnJ{{RF4*NIaq)MgDC0+gLZOvkpN#fVc>)VIoE(NaMGytB+&?!)QvzZ-Jc zkGWfEdv_@-Ff@;iVaBrsZDerM(xo*+4HX2|HW~oH+g@z6gr|rUV224ITTLROlNhPQ zR(w*1S}HU)NZsuo<@ay23!XaQXtolS;ws2oD5!`An<_AB>8qxb=BiRBaA`zNSxLR` zwA)K@8g7eYP87LmNhws+NfR(jQ7mf|Qc=^llIs~MD`{ZCTG!Xab4@9Pj7gJE_|=aA zPS|}FBKb*K9dwl~xhD}o%@W&N?gPIVQnb#99=OxMl|=2Qy3DvHfK5rdxEsYFt1Pa9 z`dA(|&Cw;i9tM?grXKi@>RzB8(nS^tDNf(jEBp6&T$V%TW|K z%~R6R)WWD8f`uWsc#Cmmt%AZ&14*26NleO3SYM`oil%23^L(}ugXL;SC&0?d9JMvI z9%AiqD&ECZ5q0`_X_vBbNFH@UgiMc(F7o6!u1ajr1zZtiJRYj5qYIk|#wBX8i5S6A zQ4Ez#=`WbN6&54H+%OsmBW!K0HrhwqvZg0ZDOVP$!VW2g&M2wr^EMZP;jd7z31{;i zglN*eg5Jy&>EWT_TG}B(CuJG5r4fw|k>whUq0{f?Ynamr%oVA*p}^x>cuKPgSz%(J zZ#w`i2Jep*G^Y@WCY)(9xapwJF!hNA1r{fWViIBapOmmvQsK)K@|K9%oUh$`594K30@iO0%;tWO>47E!|19EF6?<8#zi!7R9>b#-$QJwdseNQyt0st8i9 z0Mh-X2@OkU+7s&EJ3;mM zw^Qau&I6*8EzKa2w9%NsC?ZTa;nWmS)6+#1w6O|B*%WZPl|n3*xjoIkt{mH2wg5?_ zuBe=Ps~OAE8mf8>a~o-4m&k_^#LGKJ5pAkkK!mW`3AkI{-MFDmX-9-Xrj)qbR|#F| ziw4NPo-nL0CwkV#`e&Bjj;|W6!ZCWfM#C{ynvJSFs%T&^R#SVxX&jd#{^=*0u}#ka zB=u~qA=5Y_Pfcslo^V-Wpfyx9*oA&8N@{TV@a!UgF0QHF$hx%5sLJDA&Ub;_9x-uj zuJFpb_D;%Lr~FcS=`&e^X523*(&1Qhrd_P6j#=@9rQ8iJ8t)={7_aIEMa%X0ZdCb~%&IdM z7}?7imTW@{^4zeS5L$8a>e3KF4QYxJl9)q1mpw8mtc*%^7PZCI~4esGx;I(8kRv@~UYHBFj8-AZxlS04@%rgb2u8ws%U9 zsWajyA6#PtTXQce5HO=#k$pb&MBucQ-A2rH*z`q{Dtv|=LiMyTikDc{5M_jHhGN!P zEv5VL>vy+RYFdA3pv?G86V`FtOB>5uDa6evGgsif>8q+NaEuE8s_AAq6o#rg?M~5aXyVz&$xfh*aja!*gXW zBUc?279~Xg03k2G7Uc@E+fo7THd!koQdQ!lP8SM-sWH^7wUsjd0F_vNSBn$GJnt%=b)s2j4CBVj zbM^Aq`8~UA>IoC^rtMuTaD^BE(NmTTDx6i|Igcnu7{oTbbPAd&Sj-HxS(_=IZ zF{oUbRV;HuXw6Zlf0f#7SpZw>?7qBMUSVJok99JJ78E>NDlQ*}WsK1NeHIZVv4iPO?RV{F5{{UtQlQHGp9buC1t1BpEGsnu1Pnd?uMOjI0&~OLO3RTf{R~w{~jy!)4iyV@+6{)8q11 zIYWJN5&JcH>K`*+xU^+%YH=P9;f|jnv&_@x4-Zf)B3kT#SZom@jc7=SL`>`#;p zz~vfx^e4QyoJJ5Zr3+qOnQ4@OpmCaRs!UTDr*9&zzMc>ARSYTTZAO6{1<-pf#E&}V z#3d^_dVwFhtfm=5s#=nbLy040FvEpWLpm(@ymS)Btw>mErktMkWbJXrMWbjkrGY+V z(xu(pI+B3el$gMnN_>GB(x#seY$~34sH-ZO^mFsLTaCRSV?yAkn5!oy zYT?4-5>lfWgR+yjW?`X{T8t|W!>5X$5MlGOgaXRUEUV%d={oST*EC|`9n34c+O599 zM)$c}$XZ@;z*La~@$;oE6<%DUH54m=R#!zZ9%|vRSX~cyYgmEo?ZpAiybUO1>DnN1 z>!MU;*NTO@_>ghbXAYrK=`nf?E-9p@o=h&bppi5%QovFqI^NE|Zq22+wdX8bHwZdO zJp!7x$&~WOv8_A|pJv=g?x+}!Y_2qMQvC46siG=fWv4LDyza9;!nXh$-+`;V`OBZ# zw&9V)ar2=&89Hr`dZfa7CjdIQ(q-{9h6QTOL-}NZ?z=+Gb-jSt%HHT6)4*P2*X=xH z1#u^B39|P=XepGEC!MvD48euMYFyVrv?(;z6H+i@`@eJ}s2no3yJ3dOPTrI9<4j-L zI{L<>k|(Et9C%hb)uYVI5~e2}*q3cgGE?o1hqt*mKTis?x8Av0Av4p!eza|qHq!w@ zc4HGJoN%nQOA3f2`C50Typl{~nn2&-_f|`r_;%o{ci9VMCLoQx@uIxa=m&L~)6{tS z)@r;X1)*6grlf*NB8b)2eBy;b*D40qko`lr<2kjOP@;5`98V8@C(Cml@M66xCty51 zl>us;tXxPk{$f!Cpgn9Bo17^?}BT z^E*_wuu`ojg(xbdb&eU}kyP#S>QZL&$l5^!816;E*M*|xD7h+WI#a@(Qb<77WSz58 zYA{?zjtV-nS`94DC0Z&`6NvYX9J@EH$EWj72CUsRkmNPJWd9XA@nIEDI|3CQ;i-Og-8uX2bdCQ2rp4bP6Jdg{R)=X{{VsF z8;hHeoeOUhu<-WN&7qPtB|--ZdpJf7RSFlus;erZ*}oXt)M>kDU=v9QociiIaAW>W3B6OyD zx@t^ySe*rPg~Mc_nhSrVOEO&zt?q{)5PUdr8epJ!!35(JPv1YNcyymXbt-5vJmFsn zqBz>lvw&5mi^^!&Es7Sqe4?em=+xddm6A&?{oIfH)VKq3#EI5~* zQKhXaNr;;>+J&O!39 z$m0!R@$f&N3_=1el&wbr#O`1YTkr+TW6qebMdABqT;vOUyzYckZRP1m;42Y+GUhh5U% zmee5aNl5n15cdtV%sAc=v=R+Lnu9t{u&rKQj<3vM?37frbmW!STOsk+mQr3pg)|5| zAX5z?g`_DeDUGs8r%EiLjKffpe=9gkNB)s2N?=7nskoDCY4G90DOd{7m;`JhldQth zB$Lt!$EuZa;dsK*Bu5CKS~|1lM~KerSG*fs+0KNK?Be7wwKkneA1b=a5~3271H-1U z$m>Z<6m=97xQuW&lOg#SBv3|yQI(Xb`UUuvN=t~v2|9YFw)2Zg2~i-PxFgKc6Ccf( zTrO!VGOI=|BvhlCSRZw}YypMGh4@2tnp<1SR>{L;e(G}DE_Ks`gA<889(0tS9HXzO z3fXeGAyqNN_?Z*E?QjLkfIfgYTr+l5Kn$u66G}~+xP`icM#!3;nR>Ri)s)nAj}%_z zNuY|JnUR0O8eg=1wjWr0w}v15aBluTAGw*VbR%p0xRH z!u|mXU%9zvn0&%uug(N}Dyu9N1qB3k3`ISRp9<4WHZBS?r_57RNPL%*P^VBF=ug+m zs~}}!RRvueDuUtUFTzL%VcSj6VYz)Lk(cE#<4Sfs0JA_$ztBT0)9&s9dEzxE!uH}t zWwuSOKnXn{&&HOpyQPCFDg&kmt`#Pn6QcW@5x00UeLu!spyl+EEIVJwEDNZU}WG zh)4#X)=X-ym40NwiNvfDGlk6@GQj@bvK3-&0$cJ}#aE&X4W%INDUaem=_K zSBB3_&dSRwg)$%J)5Q~@BUZUKZGDG^EnKU(k~~=Bwv%03!@z$DI{=DksLQ$c5>SX> z#HcCt*$j*3g#zOCw(AeSk>&m6%k7k_!V+K_6Xn@!Xjo7ZNSLXZMoz>-rA2lorWPgM zim@b)a~Sp&z5GDorIzQo;45%};RGMXi*9$2B|)&0B~U@zjV++Sa@``b(aYz}{!me) zNY_KD+N=S;7P#Y9fjU$aqqNh_A=`jb;tGGG)lw(Hu`EF?gTSb=g0Pyne3m}dxdYxF z-e_lvVJX)!95xE7vVi(jlthCaW+|SJE#vXT)iYxWSxZjeEn7ye@y`PmJ^NgIc~GCs zC?mZ9eZcTNRLi>Uz&a#sJw~L+E$9qPW2MAI-u|&zRtX@70e4OnE(f}YEon#u@YzZx#C}GauM&?9 zr>Co;UG&v6$dEY%>RE-Yr;mplXiOxMMQKWtssxIm4^=*?x|7yyZxqTHlv!%ODrJh= zx*Qgp4^b{6V~$2b#;R5OaP#*C8b*<`rhMp! zCSyukbi>9UhSO25<0N&fHAII|b^BlfZS3K_*lj2h+>8h`5GZj=FuXy6JoeORWWaGI z@~F)fF`R7_^U-ZnZBM>gh(AXNGpBD9WetD?OmL$tw%Lq)MD$L>Qr8Ep!=d^59LePS zw)rX(Wk4-&c2*kw95}Vg(%J!aK&Xz66w9}+7YH9{F;2B@i(<4fG&B_0#V`tLWby!> z`e?*SU_K*&n}L^GJ*f&%SF};$RdVo!fgb!S9d0pCggkLqW!bQbi0qz74)C$?u~#~L ze=aJ^GnZT%TPZ4xAOLEYHccf%yaW!qh}Bf}l_@nhl))@%{$!27xVhDm{{VSBSk0oF zM|p5@C(PEaE~VWJz!6G(GOx?6g=eCAL{YMKrXVDzxFgz6fndOek{{-du#EP*VISZF3fbf02w#@De0h zDS&Xm_8s;5IMcUDxoxnq1Z}E0WaA{M3K37#Sgi+nH=AYQ zCP4ts$~0o=#2jajU1Lm2oJ>rR%*<>dm7ydlwz{Y~+l6e}s0<*QUYor~h&?fdOos=p zb~TNNQ%yUPu+r86<|2{fv-a^FXyQ8g&R}&--rIt8P!BfVgkq_Pr#tm(@IfMxk5P#snkaJF zf_TdTEMD-v_0_odcGP|Tl+cfK&=e%ZN!ljVM*B zf)?Kk!dB!R2{zW}P9{@8B|~K{2T_wsQ;AoKD5AohW2_?WM^?;iTR_B?I*${=(oj@F zu^v>h)Q}`_rA7;w;K1RjsIQeOE0IK1Fvka(8I_jC%r@!c%Ofn;h%rtr>_{Vp7+!>D zDoigkQ%jGDjTErXXy|Y_Y3ikkra`fgRG|vD8u#FbDS5V4D<|1ScUEOUP6CeJAJgoG zn6MmUIpQ@`5H(#?cy?RG>m;vAi03}@M#)bs=1>4Sc_QDJ1d3(5l1?+Wl73bbqzpQ0 zQ1t1NBdyEzGG^>?bA?Q+QAJUe;5fu-J^aBOl=V+U(MD9R$1F6{!-<~C+<>CIXJu3^ zLxvP5Vz`e`Fh$H5CTqoWESQZX25bV3D5>aZYJ7zaQ55SK>5o&5HYm>mR^*Hm6;~B>_3=i3O;0R~w|})pr`g1; ztxl-pfZI;mC<0*7eqNtLhV?5Jr^(q%5l@zv^d>sXidpE}$d)xp)r#+AH&DloILcdi z0HQ}-8|yKRDsv`v#IbC(kN*HOim7n?KBO0*#HpO3$x9h$XkU7i(&5Fm9k~6~%%TaA zN-g9s13*S|%volVs+xlf!4f(u=!_8Jbn%m1A_@9Lk9!bEI*r4(0U4UoSvrjMP^!jK zpc5pXng(K)iwwfN)~jlWqil+n2Hu>kzP;~SdzfA>Mf|(fp$<3sEjD+AcV*? zip7^SJq|mA5FFjcu$@jSnrMD^gUkDo)Od+Fe{2 zIGRyqygLuWa2jUAC23LwG?>OMRLb(R(|dromH;H!*y&;4mp~ehqb9bN%yFU9F5)y5 zlo(A#6hW(P!MO)RL74%!>?Kc_6s>Pt3c%6^#j9|EMK!?5`(F`9bX$yXPO zyle!^h01JTj@Bi6+}n-ZEp4zuLV}6&6w``$7$*;%9qzmO6!hbe`bCIlJX1M0O8Sw2 zVTTc#d>Uq}qN&70drLz>QFe8a^Z{3Oj|nWS?$kjhdS;X*!f=c_YLueGaXcRlO6+C| zDvV%At%}yv0|Xl%L~4I4OO|2?KevX9XT>}^D#{@3J17D5iGt-kTPIagVI2j;@s6NV zrW=h>_2Vy5J!M4=64j#5OB&d&*B<1S7uv^)j#q7UY_?o>0>J=D5Kj|D`Gb0sFiM3C z-je7#x7GIQ?FU-(5BaI+whuk9{En*JLIB}Me*wetZB|;r@cf62&HYovVR|WJ`6_HKY&s>KrBh2@(FqX& zT}V2DLEtsamf1?sgQ>+FXA&ZfP$fN78)mOj9WCjmV8bwc)7G3HFDyvdXlf>Eii(5koITW5t>Cpz#YyU_h4mTg4wJ3@M5@db zwH5U_THh^()8<8tH;_)kAl^jfvHdc8lFyhgmfg8yN)79CMSpd6Rc{U(+ zA5RV)w?K%WZ6#+(8*rpvEBvwyH5!IE=zR8V^*DTSq2Ieur3J1y_vBNcsW={*sk&CO z37(@xvp?b$^*F{1^qBnkN0cR!O4>zP8dcP7w1fahj+*d`HdCo&YVl*XmDp)oq9TH9 z(~MMB;RtGR51hr|c5j^#h>|$d?3OAqYNfB!ucrh@}MUoaf8&MYB?56@{ri&4i7sSg!EA7K^0&w(@aECu!lytXZ2U;#JfvJc?Y$PEmG$CkT~Ps4VPYu>|h7*&g0J<*S9gfZ-I& zcP#_NA9wGh6((7uX{BYZ#4zh3w0M3oMBA>nwd7kihs2H+TZvo=5yF)|bfgdlJr)1d z)&ird35C^GR>xCbXOVogYC{N#lpo&@84dSyp!tUX0JCP2Nm)@nQ5Ao><9;8yFG>FrDz8u>Et@YXO`5 zt8XgV)J46a2c(I|)YDw2rEw6^Pf?H50LWvJYGfiypLtSIsQO6yIFZ)!uzW0NF_}c-G_d8PvxjalN(0NJ@{2W zBYdT$dd&2gKEOW;;f(c$f|4Y|a9<+;!<6|+9I(qIdo8EE8WXRxg!jFmTehz45S$E7 z&v2!!Zgb8jQi)a~V{Gx;NEtB-%-UnE82vp*%d-YZ>HM1#Pjyv4QNIC5wRR7zW~ZhPS0IKMWrnD>(-%C#FSt%-k_- ztx75+!7&DF(ayIOLCM4g4{T# z09AlKl$M(XKJS|>lOoC`aahJhI{x*Sza#rgo3_?VsXfV@PS4*$<%SmCgpuCI0n<(? zhdTwLr-4CoCX2~dRbU0myB!9c1?P7?&A}-$h@ zJ4aBevb%7dvU)0Q78!-K>eSQ;P~l2P1WXaPk?!u?bspXr*k@>IEAIh0K}LsBswTKa$a{RWR}-A?!B&mfWmvO|;CCm^8S&w60=lVD#C4a)7Q3A?~?@`QW z5y(507yBpU%BM28Yyz}w>B4EZSZzm2Ndu;oG?`u*u*SmcF*U{)Z<3V~l#O8Wn!s!p z)Zbnsy3O9!6qFPJ>om2S6sRajQKUvE)3|6>TB!yfheWOZ^;1sru(yq!i2S&C#(Jk4 z!&;A6qNgkpl1|EnvVck9TU7I~;pQ9!2^TG`fO;(gzw$ z;|e|`okOCtrWcIju~N)xBoejZikLD?>Q#WV`I`0`8}Wy4SZJy}YT6wjC=3NV(`8&@ zPnu?_s!1u8jEIxS08|^bUBLZU<3l%SDI`UC7LK?H17%33#BnMmh+;VV;gvM;i0RU| zZACnbeH5gM$G462;h@ZbaH%N@2LsY65*r|CCL_|9IDTfsRwE`URY?(v42%QJsZcHZ zk@Da2Nz4V}!*c7jaQ^@%O+S|!n|BUI*8c#4 z;J+@lXYz3B#s*?>JyajOy9&A1hSew7I_c`4DdoE1HDy*aR|>*PRp8VxLJ%-EH)tbW z%YP3Z6Xvfo09mROqp*#hr{hKz7OzgXz)%weVd+`1Y&$HzA4?V)o9QssAo*}ZPYZ5n z&BvJ=sJ6cz8ra(RC@wg4ICzL7){)-;>cSgHNEpHS8fn>=W2L5PA^x!ALg6t=EHdgA z{{U-hN{fFE+!*DndivBQ#Du~A5jYMfjRmipx3jrD*_B2zYdo+7sV{n^b6TS{8gD@FY-4*VY_`plMNzPBn&nU2ylQrlhTkph5(-Dppr^ zUe_%gwkO-Sw-#>ha<>^Je{eLLcr6=Y@Ea16I!517P^mGj%Z<`h(O{Kv(bGzHoIU9# zWeN{+)dh(J`#5ImTvR2ch>~FK@|r;1;V4p^Y~dJ)!>m*(F$~#-)e1aIAHs0hRRcO! zM$v`}PL|rOe_#Xa;X_%a(@v=?(r1A`3Nm`tmr#;~sFAes`1~fh!~CgcsH>@=kK~xF z)7N43iybt#*5JIJ>)){9wX+uK58_}=n7 z^%2QDw>^t7ZAcms%Ufm^>ynhrZNw8*U8?lKC)jJMoX42yq%{~m2rXOsIyh@&fyy|y zycRIz+v?&R%w`LM%X7yPJ{49UvTW&a!5lz1no7%rbwd=XX03)Y?#IoFqCrlILDc)@ zR3DA_Qkz`oUZtcH(L6foX}ilivXn>^^)@Gn<(M|qVCN8w0JSqygq8+t+-juW$JxWx z=0TQ;ajC=1QA<{>t^Fuak}#YNG*RZdlGQTP3hZI-hBr}v8}f@w4y-K+Azdb7mLcbwI#i&bcu1M97^2e5 zU9lX`hQi3cL^PArB7%+CTWHXaZydU{alk>*kHWlygq0A5K;blkId>&YwKGeYf0xTn z-{ojxd$_g8QQV90lldNAr4z0d@uPMxZ_<>MBoy`hYY4zHU%4R7X1tipjR{F2^FQOV z&It#WYb3LULx~1 zC0lqJZQS|x2-6aHnw~m;m*^`Vo6coue@ba+CuES3+>4S9zC5Bb8&;gqBqVT}d}cQe zGu{PBJyimy8KTRv>|6@J6s1S=V>NY+By8Vi#TXkA#Mc>(wX}^TT>^eo`!;tD=~C1a z)79%uiPQ?3i7DEj4zI!3I4kneM#up=S&U@pJTJm~Yv$c?L2Zx}Iw^J7ag-;za1AG> zm2zhH#o+#0^z|(*K|XA_gRDh}@Vq;8Htnb9%UoYAOq4OI2HC?3u+pUm9tkQv8+wgp zj=rNUni{-YD^4)lMk1Q82#~zYNzsbT@?OWof!B*jAPfoMe_9Y)A!;&5gmI^7vy0Wc z7%?ns4ySYHK^-kF#}W?8+r@{#@W~9g^Dzk`BOZz@OYOGc0FY*I6xNxxl)1#Q9Cs4O zCXe(LSS~3NGurkbgp7+@$4+}HVTS(zW}yTO2-#cBE}n4Cpg}m{kII*LB$;0ql+S`C zeic(v*D%h~B3O6Zy=`&fsp5|LUGWa3B?nQ;q0Lg0-jp@MyiP= zp+OW$4jqX-6ST3p0kt&>RIu!BYyM%GEvlDAYb*@X>RE2bEh zc&VqsS2oi=JsgVFbX3&BCi!w~WD!#$wK!W!V^$`v z2d20aboTeydSpH{|>hSrcRhDxD287>4Z+^vf`0K9<^0ydxodl@~!j)bnZJ!VUN{O12PGhK# zF_!#^36GI_UyG7p7hT1+nTbqY}FRip}+Bn5GX5v=83RE3}DlIM#jwMWxnh9ru zoQ1q7?Hr!MTguJIL547^N&(pZT1BBZ%SaQE4|VE+mA=N3=R$@8A#eWtU5h_(>cn zUYoA@uofl{9kkG23y!jSiVU?SPm}OW?2rwUxPQ-{WI__2c;)34h|+jgGu3-@u`-1W7>(uk2%x677^ zO4JDz>fEIAymb{g`+0F&%#F(vy|wHOwc!%;1>xc9rOw$1olzBzNiiBGSTJ_;X`-=r zsCFcupdAl@;&#+FF9S7LaEJ#ACy#6fxjxY}6*KsjI6MMM+qr zmNo?iuWQ_W{47$IZ~_EU^uqBf1d2vt7<@6*u#O4mc3WDKR$?utqWto%+zo|B)bgnF8d;fK}qWodYtz>Eg8hxuhFj96@`8@apaI}u~2-{0F zFx_VHzWu*P8eOmxvIkkEtzD`h9i2k4w6R2p(?u&z8*51FRRBIJs@M9s$8e~SX-aPd z={TB6)K=D-sN#YtEgLhk$c4B4K4aMZ{9O%=K*c6foz);zz||-LY6Oyf8aLVzn0SHZ z5`Tvh1SA4M!k4z{fZ4;GX})@@lt|!~mT3qi$Ya{V_r0}{Yt7rXk_nMc3#A1BL8Sd; zv~ihMvYFK+Kc<3aa>Ng6`JZ*Ua+07lt7R-p#63 zAIpnU%gGq7n`LP$ktBIidR1yxVNXRZ3p;ITB91gzl>S-|=_ok;4mC=Gj8r=dcJTONbD8IPJ zZ6{Ya<{a@Ic4^4jLaPzN)TysrUP)^tie-rIX#y5dq}+ji+;#lXN>uHnuQr1zoObeP z;CfTaInNBl@;_Pds>*Er6g49cY_T0$(wsJ{b(zd&!}McNFLT7{T9m0c4ys9P!jNZW zT3@K2PPMs*0L8sb^wMZ>OyfmM9DP8+9#p5o@rr0g%}{0`W1SS(k@kQW;>PuB08Jey z9BP{LU3@>yqRf>U-f3vDB$P>mEm1Vm$5Rtiv1nBL`!DWag?DbAI7y?6R+JMvVvrfi zvWhw^GD$X5O@PH61ai7wc~GwC%f}y!4I0|;9e-y4nJNVOP@>gfq!YuUkkvI=Ul^yu zL^*PvixYtP^;OAG?Clav{zC+FYsRi)w{zpb-{lmNPNN+ZW2!Mw{VATR__aB9{k|)In!48SkMW z9YYiLhdj%0q689l^;MQ77?>11VfZ}sl1&tzZ4xyi&qFGy7d~@Gwoe9BAnFCc?WYdx z7$G`<5N3-Xc1ROSxW#Pv8b^rYjps9lua6Bjc|5cFF(Gi<)Qt`JZ)&nZ#wjzYQ6Pw? z`7ru{Jr!EBqhc(OUJud+hg&HuJ=OzDk7ph>;NoeuVkV%|OEy`rsK+TWMZht76Et{5 zR8<0oSec*8+_R^azz}TK?|vz@wOYQweFbbNC$FxWG5p(xWB8RGE1CME9wl8y@ku3i zWXAmVRb2`RDx{URk@iLGJ)AL3jpKtzLWt;!ZQFn_BmvZE5sme?qgi^Ark=B_7*-Qo z6jMs;PgIQ}y-L|kX|_Zqd_w{*J8Q@7w`%!PLuL<76-8b&HmF4d*nh2$O?gKL{zsWQ z=5tOVOp;U9=6L8HDan~ih>>X0B?Md$Q*UnI>&0#EjsF1gr63->2VTldGe$~@D-%`s zjeaM2Q>HyS%^sRMZG(=bWbUKYRaItgy>%@smoj1XlDRbIjKfmr$%NX$*pvI(aH8k4 zyCHmTT>(z7f=CGi&M6Dq%b_G7N$Ed@0y$^YFR1MGB|cl5uDO>oV7L@t$|~_H`IigB zVG^l~Br(#U^8s62HtjzCCu4IhT67@|B4-E#-e@-N@mv)B-X4g?n_$V9-F`7yHek(E zb(OJ^K|r<4&otnh`>!b=?)CsUGq>IK#+(Fn{HT%3)i`A1LJkv!;WX5Mtn;O6O0d?z zkpip`Y}m@i-WRw7>*3-ug%sjU{ArKO6p{uQ?Wq;=B3;@gf{`2KrWI4hvPZj6z*vG< zDYwzBj}?r*8-OsUT)6RLGf0ZOR=X0YtfqRRlp!7_tES!NC>jfScd#EGR5X&kDKH|e z9xWtc3a4xex}OVLoIf7NMCn_wQp;TS3ehdLn~!==X$M{#ShfJtwPV7NJE+EyQkNK` z#HLCt0*&RQF48qrk+`K+LA1Tx4(vy3@k&}pcoLH*-87kLDf^8Ay-2_!#9;a9Ao4K` zD}2<~a~w?&H#Y;v0~F;g%8|7Bp+of@;)aX7gLG|C7Ce!%quMg5HV6qHsWn0j4Usx;O}%%+@Vl#G#AeQ z0BGt`K$!K49x;5y9v6z?t%t}3= znTI|RnJTe5+#?q_B}FF6EJ0>)@C17;z2ZSqMx&HfsCi_eP(aveLR?d68ipJxZG_;z z%c@5(c8pp8B>!NRR^b*O^B3{qZ)4#KkRtwl*eieXbsrXz;Vl0{D>vTNok z(R4q53R-0ioCyb@()B#04YE3T)VXT0>^h3Rd_t~Bq@<`0)5|Bk(Y1oU=&WPCj?y@x zC~-kXap6=`$pj@hj+%X&H{$fHl+o6IIgiSYQ%9H;LA#r-$E7pu78X{FxBbUL z$5;|5zV71XXxUuoXtMS%iPc3u0J6=mm-5m{P@!$(Bw($FHE)yUw;GnQ z;@h|9vZ-~L-r51B!ch=#0E6YWmekW_OubJHMP+HEnGmcvRM5`RHnt?2sR!6C!6F~A zVFOyo;08ERRm(TmM3h3{;&7AIUXwh~#;X+t9JFwNvI@Ftcy>!~-rXr#4!!|>yh1a^ zf$)fprZ&|RlC1+tP%Hk7oIR5i*FG#NrjHPp0cwasJf3u@pu2VjQOb}9iPwy({RL%Z z5;%V9ae062%6rH&j2WoV=V@uG(wq5W$wDGOGf{_A{H(|?ePo)$;B9^tCwP)vJ{U;i zk@1>1{y>17Oel?tREYAf7@c+lj?y8TnPsR0Z8wnSbagGMc4b|p4lKQL*5v^!Km>Gr zX##Sm?cA2!IE|bKL=K8c&ryQVM>ZK1L12s=!wf>CDII{d{{TJ(F2-7<0U=sXM9_-L z-tj8dp#=5Ex~0R6;n;-G(?I~E5a^OaPvprbv^vM%irIBHLqdE5pkRHKNA1^d4vA3X zkK;>9T&IJWMQMgAI&$FDPStG#71W)vNJoDY;lU^NeZR}$Dd{t}+d<^#hrUcIK{zB0 zhpdgY#!60~(#F%&5MglAr5#mU8I8P4IoosKIMQrv8fH{+943^xeR`*fO!N{Hg!yf( z6gc-x=w*Vc3dw|>>%n81o&jWexZtc&6V31}b88jowa^~4k zSRqP?le4ZVs;eEwX(_B}Ub3Q~De}sQ<6S2RN@> zvWtVKk1E-9)2t~Z`#sf8?Y4|b^QjG2fkM&KR&T0u0FFxKy4w1in`!iNmyE7(9v(1h zI}QQioO<@uyfN}pRIiyQmoaxV^;Ak4Qnu3ixiGng zI(3I*Rh5s0R#DRl{&RLOOJb>MCMp4+b|4 zV<-bvAS)0-@V=hjEw;@bT1mtt;UJH~ldY5!2Wa!9o*!5!C2EDiY0^0EYAUEwNaRq* zZM>rxU8MHcg%_5|b`Y0Uk50;5!`g9D2bh|C!y&+Hq^q7>HRqMUriLgMScr0c=5opa z+w_lzHrp?yApybWV}}Z=QqqpJp+r^zhnl*InDFZ9=tL&TiNhn*D_ggiSqKBd=asj6 zOIram#7DB5Hfk;a4w$T?&ZU+bOvys{)gpV(h_p&p#8?0#jM!=RxcXQ5ku9htLx&pk zhz+ausnm8&MT;!tOh*@{u3WuR(bUMd2=Yw~K>_zwLR{R7>uwu4nOy0FEMbRPqUB|_ zsdV^HPf(#23|9@3ifRfPDWeqSw^UL*O&BYpvlRf?j_bqi-`$ld(BjDk1|)Iv<4AK1 z?Fj@nj6PJs{QKr$N>Pi{Q`F3bVwVo3lAWX%KJWTM#<$Yl_`deXP2!zGM}`EPC(@Bw zU!>b51z&bBF^W!N_;oucnM;>YnuPbNF`D5NreUEWVhlbfhxuDBgeU?DU#6cr4d)s= z$thNp@f>`rRYAlsIA}2{9F7*gofFKOp1M_~Rx76Rz+V0%h;p^w+gvGD>M04@4l~o~ zN^fp0on$4{Nl@Dy<4rU52M~s_B{&WpL5#TqHldMaj#eYH0&Fkw@#L*%pRG)!p&C|r zbozX%JiNV+pzr2hb( zT2m54pGs5E^rr!yB^ZmxP+~r0DGLGa1s>?#VPWy&h3@ULjFb+1sn%~D$4b?Y5lLLb zN0KlZ94-r2O+*UC8etFR)J*Jt(3D&S_S1DcQmWG|=U2y+K(y zqpg~0;vmN)6vbgb>ujCA9v0>DyqT6)sR~f`#U#DWTwFHc1RrRpneePqrb@Z$@#76G z3aMj38pP@vzz@>g`1ljW7b<&cSpeaMHEi2Br$FP0r}*hG%pwSCYH@VKX_hvPKbciP z(^Ma5^1|&0+iUW-*1DSsDv9YJ8eL-6pB{sU5F|&ogKGR1MV|*ZUAh6iP<3g>2YzFmI~5I1E7fXrdaX$Y9^X|LWW#Id7E^Sru175 z06+z5_*h$sd^g)wMGe^qw}YnCo#c?NR`szP&{(EaBHQiknK}@S(ydL zwD7{)GH%>SY+=tDM)jY5N=oD(D5pA}qr;9);tU!{V+vz~`DE&WWhTntKW~T}bz8{W z>oAfBP8d>WH+(jT!(@O-5@|hOiDNLt(bRu1#8s?XDXN+{!ye;oMR$Eeg*m2fMx>;v zU0}u@l=fd^B_xETagjLn5`I*cr>?}>xvH?wGA1{UXqstGz;AK-O~i0LZ*6c-?8(%4 zZNtul4_vmK763@uuzRUjONeL8J|FZrqO5?B(nmk(q}qL+VF&Kvo2=cU78?YEwvsb- zZln}UPn3LV!fazUV=$ypQLIuBSILTXr;|+y+Cflwk=)BL?+zMg%iTsOwX<6QAOc1k zBz$WH1{IjGXv{w@@XbvkK=98s9IjM@atg?Vk$nN-rJa?^ldMWYzUhgg`^7L-pn#$_ zM2>wZLc{Vdexz$*GEa-j)~c3^7<7dqQTyZ1+4WeSH+PrYww-SUE`8rRP|jg^sA;t& zAVv^O`{_dtAJnlHcINIk%XU%WVcUV{ZQHogP*ta3FnyGFZG8Jg zf)W%usqT&}Yc1DN6PVW;#_<+fD5Qdlnn8B3AnbBaap8VDb(Y(<5>mZEBw?~69BJ0C zZWpUlU=eoBAC{vEaj=@WeGp$~qdTjWn1A*fBR$Zv0sWi3eh0?sWH4Zc(DkK8r6|-2{{Hxg= z%q7P$>nCL>ZvNG^C?JiP(QN8}ti0-0wj2W$p}{C!-Z=4kGAb(CJwR=(?g9$WGooDaHmiTRoBK3V;tdg#}rwgvcF^a58Lp^17Vj-cDQhnyzA=2kwIr)cf z6sQsb!UPHouIx5IB$AR6B=D=+TPUkmpASz(LRe##LoCz$iy_*QJZ$kXBiJp# zo^rd#+99O)PYH@X%h1y*@e)Ylc2;`ap;cJRH_k^WnM9(h86<#(9k-&Ly7tg;k0D~= z!3yvs9fpi^B({Jo_c3fO> z8o?(Je;Q`Q;G1j`MT67A(lqzJ&#mF0-HOeF^$hPVpc zaK})Ce2~PUW2p04ecSz!aogd;RbQd7G9DyLycq29x8`kItRTk3Ui#PFHa zq5(*c#*7)%7veMPSmwipU?LdmL{X9YRFFt@I{V_lkE?~o^=R5hzMNH|aLgY$r383F z;i(+Y1#7KUkp(<3yybwnBwddhx0ffxoH}RlrH-;tkOE9}Qihg}T8c$;Q&UeBuRqKP z<8}+|R#q$V=Gt8RUAg9;1d-rnrs#_VW<<`Zt=_uus!r8Km+LKi{0@TISxA-0)eYA{7iL0>dhwXwo>#AB>NHbICsK zlFtK>iY?=<&l90aQuOK`w5g_2gzD4mr@3n7XB3!K88qrkw6zjybh$S+KbIQr5-?H5 zjN4lE2ocvfR4L%Cg$v0~2o@43Y1rG?NF(@PjU^57pGu)GCs&0qdg-D{nW}uXmXRt5 z_pbz@&AWC08*wxGQj-eVuu{8sH5^2Wfg|}OMH;0!{m(G_>b=Rc8}W=}5+-S5I-r1Q zmZ{-&b5ofbltxPcsbZtHfOp}8X;EK=uxW<~A9CP)DK{=7Pf7%`XCE@+(b^(K?cAvs zA72%w*sV|*0-5jAq<+vHl*5T*bY?Y(W0dvv5(2^GRt>Mh+|n}F0G$L1STE8*2~e4e zPfdr{&ZLY=36V|hN{mS)XfJM9t)b!4XReGA!=(Voq}=nYY$C10Yw8$lBSG8zBUShg zJjgqMZIEn5ztn)6vQJzFNus7|dI++|gE;wrlAYY(8B0e4-|fK}}f5@&q?K zLlduVA28W%I3z(dnZQXALvO4AN6GVtK4r(qQ<+akW&59VG&yQpYUx*7G+@H9JYG4R3wk-UBmB!lvL5Bilkt zCP*g=N8-3n5sFEN;uScHVR$4JEfoYfIfY<~rkPbqmR0jzm5I4sH3z?7R)VhYP7~L* zl{vKZ3U`VeDy5)`iluf{2MLOnEe%A``~LOz`D{X+t)=@O{L<8QK&z=zkdwxicy7aq z)8c9*nNmfb3?Ir_DF?U@y43oBPNBDV;=5=E5mdQsk+jEE448H&Rfe__xQwW?O3g5W zpo!UO-3*>#0hN_T)25*PVDROI;V06Oy4ff6pc}23hpm)!Sl%tsOg@IOC^pM2MM2zS zLZ#V}7jW3tx5L@P+_BqjaOfw>kY0tMAxHvhlKO+|H`InRiB#cuk5DTxIcTQx{{WQH zMzmrnBQ_c?pnyD1yq4_kEdmCH(mcjA1-j0zbsip62mdvQ&zX=J>O$(Nrw=VuSoE85Xhj@lvdVnvx0X zrkr>qNu9NlXz8$11u|2*AdsrYyI*U!U{ABz!sRsEAPA32Qk5-4nUhJnoVQI|iO^#a zOChbp0}3oWNEU_6gt20$P5RpWyOzpSU?5}XO(o({_tV&HT8{{tJX;Z1#F81}rl%@W z5x49sE~K9m#qHXj=Mfm-dMOur3b4ioG+r~lCy!Ls)IJM~ODz=WF!}K6)>NjBHv4ki z3zxTR@I89B8h{S2nlWifBgN?yU30Ds6XNsgq8$Y%ZXWu@{HC^_ z48`j!;r>pkMLvI>gP|ulYDR9=3X2fD>jYcbt)DbW$ zgp@287a`P(jv585HwzMYnna`^;xOu_TstsfGgegMEnPlThG{m{RbiW+24y=HKntd} zKJuM3&|^33XWgT(tOo9MI8bN?O3ElU1tkGhXT%A!Y=Da7LU~ z^~p%_0HAQ1Ml8VDDNv-VJ{c`VRTeXoDzHlDiD|s-Hd8z^LYjgWO@`Xq*FPQ>OKMaU zkYb)~wI&o66c0|?O`1Hi*mgBThGsc%xflKTM^9f7MyG93%N0t5DB|M4+lW-tXdybO zdm6O@a5ewd)=qlt6|os1uFp(kP_h)mOtC@>e&Uw^-r8&1oi1#$wX81dOmG}}s|%NH zbd)7b+9ZSgN`gmSD6q&YF>g8yzbeCr!p2zAMvkP&00QPTxn>~!qr+xfQ@$i9X#7V6x6UL%2>k?NEGqf$4>3bJkUXnWR>Bw z4`n1}^r<6Ggz3j_K1D|w!kK!Kidf{TDTyQ4#|+Uy4NBZ<3AM$$aob$A32-T3DD$*c zFE5nyDgi|#`H|>)DotJ^NrK3=*nUc+r)k^eBCN~_6wjko?rz#;WTlW* zz@3z)#?M+p6!|F_=}sPa(ti-m)R>G?Q{y;f6U-#@llr=gNK0P(uB*1f+5$L=<2Or? z9GN(fnl>3ILW-FI0w8J=Sm493ymo=abQM&yL1w>ol5^Cs4{BG^MV@>R|r>D@}))BZ0v0x8Em0d+<8eL2TeF zC1ef*WYP7;ZE_%zgdP(F$Fj4o0p%=aovLu^W&lA!P_tAL*X?bDckRn%xq2BpU>$(t zTYmLx5DHvZ9v?bfQC%T%Tm071BJQcArj8FT*4?;A0=Wn2;I)_A2=J)t>PR073g{(z zVW2t`qOV*{GG-i44x?#lVXdpKs;Y=LlT8dTDytnl>$0;qhuYw0KFyF4P=ysNoIu&@ zj@mlPa^;r;g(yp4#K|P}j35dQr=WtPY}Iil89amZB6nmi54B_8d+;)CQd2IBPXc~I zg+G{d>J6C#jA8Pub#hfwK^iPn;h9yTsz{<$RRdcq07)Js>Q5cHB@Mz70qKAun;KN% zl^-c5^sHEQRy#ASGh-fgEQfCGNj^Jl$7mcyWX02(mZw0RBw@3srkA|y35LQp3E@H> z4Tj;gE{z5uRbL~il68(q(E&dh?f(EK--3G_xrWNTTF%+)^`PDMV*9NiCo`LC2 z)p&&!G_(BnH&lx?%y7`uMk9CB8|d3WHqzSgn>V{knNl2H-Gd10G_J{&C1@xBlt?EK ztax?P4icDT)v}~GgxG~Z9%qqJDZ5)+M*Ec9do9R|F>MG*EuEN1sSeG;NYpmn$+Crm3l^Lu!hL{U^Ka76#+Mi~M+jZYfI&Qub27p9xAto~pwJ6^o=o7ka4aB|Ah$ zT`oJ=rs6C-7!F;vw{Uc#BvMDE2_5DMBi~OrPF#?k)cBI(vZ!aHf@xH53-1(PGXD1| zcXrwl3Xwfug*Jwjj>Qj^DCn`g4i*vP7<5pSV&l(;-wu2G@PTiqh+oNHUM(ne;C9lh zd&Ux=6quz3BZFnSr;b_*^M^G1T`w^5ofupI$7v_n4+(8=t{V{IH4ch?^3|2AQ)`1s z`pR4uns_5@$1HWw0E|F$zYW4E=_Qp(16wp?*l^XG$I5YRN1YKZrshJ;W~0rt_ZyZ5&4%A62-rj41;n0XKqN#~7B!n2i4KFON{HF2ttrf0GsUdqgL=I^&v zN(nmjgU~58zVfo42~H!XgxY-Jg-=CH(B=w!CVHX?iKdP@B62LzpElc;{g2(k$M<#S zm82t4_aDHDI=9a)+)V4!)lHA`)sNXZ)zqOAgx9~16yhFNh@1OS_nxk10v@!+M4bg7jqy93BjdhKcsBo%S@ z(pwnEqhze7s;so_FZxwfDd(b_`_CNR@1~sHqai8~V2Wnx#VIFNH8PhF#OviuFAdCi zl<>TEh+%Z`BZ!8eF}H3Co-1(yOLI&eri0;XLW)q3WPP;1%h*14plBk*aoURPHxO3| zOk4dTIKcqkTit$v$NvDeOsR!3?v*7w3}ZfY>pQmTaW1A%qcRe3gVHH)OP2CnX&i}J zO;3oaVDsZHYo@z)1t;7GZuT7ZL=k%F3vi(d8+u}|oLVZ;kfzcW;5Y-<@T_w5-kzLHnVN`U!Kh}ti5CnDoGx(TClvnQcBS3R5}U9 zk#P(jT4O~`g<~_FWs<(WV-gL`?vBZ2w_$!JwLn{%6kI9c!|zn)1N&p{7H6(F)+)8R6{(nlB57o!D4PT1n}86EUh1QvN|Sxf{LHZ+-==>r7Dhy_QCd3U1n^p zj%v@Bz^B6FORR$wjv=rGE43D?LT_X2(}CwNTR5RA0CbOpnlfpHr@~g11x!JlBN>4~ z=0Bh4Y95{umEpvysT859uZ>F-@B~Mthqr+Y+j;F>Q2mKf97y~qTbJZYLr3j^4gR*br-y?{``NbP<({jp>OgxkpD=hI#ry{xNS%9z{L(Ml>_ zsb%(HDsrT6bTrXW%~3E4TTRTdL`k;})xNe$!$v2D$oy#&d#vEt=8`z=k$1FI8F+b!pUwId(zwo@HG1&&N&sBBiHQ5l>`h~3^2hQ+R) z&M9LpeLxQLQhLP~zng>|UD!&-1dX1QsHOR+rC8eBiGep4t@F!JnUe?1^ z#FgNltSM{RiA}LcNbz;!=jB-H>M+S`Dpnlokd^~`VZcTlAhp=0x_0Hin>6akE7D}) z(@9g8xX?*LWD)EbQ*?PR6~$(ju8%O(Ei92ROhsjtP%eb;EW?3o?2-3eAG9a~rjFUp zP{1Jx)rSgOWxkPSELM1EaGJ?6n#t8&WJQ?C86w+*ZX({!6?Z1clxoxt#RE%QE4qac zK?7wSzJ+uXs69HbgE!()hNlQ)Uz($iYQW^!mGgsG_Vz-}8R>(x2tCHDVdrw%>UMF@)JURz4U0(|JGsW1YKJW%2oV?N}^T3A}%Tn$eYcOD=# zK#})xv@oK838jxej;wJUYHS!@TEsqKs^ylg7}!v(e)jX+EQZJP;D0w^=UY%x^qIm0 z`)KOQ<)E($BptGT6%Pi>`BgCq(pjr8cw;wos*)gd?4YcKFg`nn5!u?Ys1;&SwDi>- zO28;=#-p&(pUPh$+8jcvGM{M9iep3?0uyuJ+i*E+oWUVMNsng=t2T-Np;COhe+p!q zF`OY&&Bfle2fD%Ijs^-fx4b)i2<3An+g!j>u#OS<)7Fc)w);m`HOiMip%NM*JB3Wfk51KWsA>Puw-L{CYr_hl{uq>KkMSPcx-(j!tYmUje2EShs+N_bbK;kLui&oVFB%Ok}c{qYbAj2uVq);Q+#iS(rJ>GfC z_O-#;RBp{dvH&8p!v;M@@J!g3msY;_xZ7PdwXGo?t;en1wIw@OPh#^Widn+(>rZH5-QY&h=0LG$XK)?E++V;18 zCUZ)rQxy9kw2f0d^jD*-o~o=gu_VQtVK@5pOWN9Tg<_ZnnsFsc3W@28HF#{bhd-vlh z%iTUgmVYyC3E3pmBI5Oq8ff5Vj-Zntl^T&4BVXO{@5M;8N<>e#ti#Gw;fC61s;+57 zwB~qTDGkrfz!0^&j=&G6iV(q1iVXEqhO{jNMjl@}NJ#Wl?;w%{qk?c&R(4afPV2c^ z_v41)YJgQFQG+&P2v3Hg%ZlYl5fZBvsH!pCEO06a>@~2szbxAiIWPb;tg@EVsY0+* z=3FWu{{Sh8wKQ@akMqg7z`wh;>_xt=QFaA5xB`y|{ncc<0O`PW<4P(#YYUd9AyJs5 zoLGjZkRZ^D8v=lRd_wyLLQvQpbg_^@9x`H{Uo+ryR+>6GszsQt=}fO9wTHB&fcv=L z^7Uzw41221IHIg#rPAXVAYU>nsYCBEGs5cev0nfiww~U3Rxgu)6|}sNK_?oME-0#2 zc%@Z}BjzJUrFQ^)NC%ZdR@sQguCfRwFsmT+Me2tG$hf4~Mq$bk=1TlUqol!VFxjbS zaR`;H2}+8kD;2%H&xa1Ot8^(+c{G`6N=~Il6?54Wt(jgNUR<-7upC+{4EHNj<*NK< zx|b5ip{d)bilYd58_Er;ypE)JuLNpcpu_Th zS*Ojpi#fw48~n!+sE9(4$q?RNo!iWP%6;9U!^DmyW)OuqtYcsR(W@vb!nMG{@Qgyb zcWl3fMsX^-k&0DOMv>J|6*PgvDh)LP>bE)${ka@wzqQn@J7`s4u$?;vOvI8j!fSC! zAPHZWyoSOs*;yc-Ra1FEyE2yw4UXUsVZh@psb@?~^kYY*NlDZQiZ7YhuU#L>2C^*a z)C@ZcgAG~hDRA*MG;1@~Htko3y9(p`ST(xi2|rnRDf^w8*opN zW!6q7QI$m{%(KTEBM9@Nf&4|rcC1pg?`Jt-`cYSre_xF4QO%7?fnGvnJ|hY~uxAZ)9*)3XX|zm|Y20T?8;{{VYSa*0ToPBhZoAk4_9UTm9! z-oWatdk=x-OH!)XN`}HJZW);|JQEhB#jvWjhQ5uAuoV|T`*z=NYw^a^P(*AMr$JnW z4YYeYIhJwE;gjjJo?+@ubtPe)r=h}dT8L5DJK4^V0sGaM9 zt4u)Y>!Uw_Q{l8!I2IPtRpZdfxzY@ImT2O2V%}+YP+MNaeOwgUHjooI^-)E@UWE{S zMJXo5eAr$ID*j$+r3z{&$G$?4+fMH5@!^BYNJ$gNZ6juNV~?FPR6sE|s#(@Fns5a~ zkh3U|CZUjqHZ~gDiOs8z69Sg1?@0g|q((Bhg)sdDl1O2ZO4E=Py~Cx!KKIaj@V#m& zz?^D{DVlH0IijkfY&wwBy(?6@y?!Hm!m6Es*WD+zwXx&3*@DValegR{lWQnZ1Wb%6 z8ID#_ywwX%=&KsuIpt#JKn{RyBoTh(@V4=k>m(3Gd4;SH2%vuxqQI+hF^3FD*J2ME z{)&`cBR3_j-~sj%!mjN-Y1>$7|scp&_{|#J1l0Fpgn;? z#X-0>w;amZ{Ii8|AlI${bs|hCd+2GyvAmy}sp}}|u{bFz3W)14%xDM`aqoXLfq#bW zI5ug<9T1g4iX}y>%_tS)Jrv1BCThub7=%=H@nZON7LWN(E4jwRA%ZNLgZ9B&jy2yt z!pe4nDb+O7keP_%O2^Gq=+wBbHI(PW+Yo`0qN5E{B%1-a{{SMySP`hVZfs@^>Ix~2 ziudKECKMzeZA*g%!SQ;?=D@Rz(K5s3SK*C1`M_IpHGu7|x^Y4nDuR{ZX)2VU+!XCQ zX{F zX1*Gi`7~2qZ{WmRUmjJ{ZGcdMX{M0u0u_L-|JT-HEPDWsSfj%-)EJdOK5Xqrhl;@{@!B>- zH1_kQAI&oM+`)jnI44)(SYvgS+@%^+`AG(3uTC9f8Lsgx>6ozvn-8SJp97((+Y8ZB z(!oc)LA{xy@~?kpylHv6j}qDvQaFGju8gggG`Lh2coL$h^u$wKw6x-=O^W3C>qf;0 zu-DWhf}rTzLyH^szZr8%F;hz_Ki?n1tc8U(0$%|j$?zxl`qWgGvkj?-5ySGG6&Qm5 z0O)MhRb=1%Yx4(N`*_W}mlouxG!lcu6OXo)JhmMWr&LVwf$>c_wkuTbaeRJB;Bz09 z4Kh(l5ki8%MH=lwJ*0!jmwZA3-jYth2g~=;BWTzvZJ-B9)FAcGS4I@?fM-0qLbYxh zu*s|s1|ct&^0TP{x5TQex4oF#hE_T4mW^smEZ`=KuJRkQOKEmeaq!{?wokv(m6KHE zOv6J@B-Juu6%i%8iY6dQ@olnQf&TzizOEWvqTTdrD=D4;z@#?T_jan%+5*o>0G@p* zvj?Tc1oG2m>_S;E-N$SmQqw%lO}|9amX}W*2;wUwT&V>pGJtRZMEp%DynTczJ^Da( zLu|}-9fFkd<5}kdfr&wYK@u3L6A&v>(m#Or7T}+4wBxtA?b8VgQ($yXC)rHdWma#U zD8)w-K!N(23$dJksJVio*q&_|Byl6c>rqor71W*90S((<0(c9|GfdLpD5cQa2FySw zrwSHW<=JZrNDOOU!wDzf(O6CfuS_bjJY3Y)JZe%nV(*79+LN3oZU6aQg2jfMq&W(n+6O zD3Pq=yaWZ$P=G!)GYi7!cW=& zriaRybu31C<64PH2^gqy=WVqiI1#wBv?-Sk)5Ov0KQN^L(7<(VH4>s~Of8_2f|;sX z23BiRAc>aa-37JbJ=tL@DQO{8WoXl?@i<`hR%kIyc1dAe96D zpaV7P82)``D+Q`yDkS-2V$7ggz>{-sJ#lhv+z`JHvl>Tk!CP`rCOy?F0=dS6e=9V` z63aiIavfAy?utvRXl@0#hWro#TBIx26S9<9yp)_11t;B7mad+9scGLA6!gfhkTo?n z$x&`dAdqeJaROg0r0E)uo)n?vcchYF9;#}@DCMR7C-v^Gr5e|QRRKcY;^6??ICqj; zDaFd!k}=yz?-8XimoVsxOjOoja2@p+`!rw|m5T^{;y}=@ygY9=b;Ohgz%%DaKb0XE zYdGW4P~yjGaXHqD2-xC_Yef&+lA&9~JF@}zab97aEl2}SlmQ;Jrqgz%VPSfe*HL85 znVS!uW`3Pg&kZb%PW9P=rjf#sZ?zBwBkJKT&Fh6By+^J#(dJu|TBTalBVvRt~2;e7+u*SLUirImiS#ZK&ELt z>Q}3+rDNyEm{0QdTarSB^G4(C(Cf$Dzj1S+aFY-RP{xoA-AYt72|P)sGnF9Bv~@Kr zh||$1AkbjZxHB|+fg4JcQ|#dl_I};E!UAPnNuz5W+FTW#BnnGUPg4a_;gg7ZeMEuQ zn;WiDrdIZYOzdPA_CM9ctmaQV4|guBYz?0Mlx)^o(lJ;{eCkdIm$R)sU1erk&kt7( z?*<`L9@u7Ud(5J(8)^>%ZNWoXV?ihv01&KTV;+B{5j%Y1$x9%3c1{ynFq*tS2!|2G zF`VADc)arnYNVr@ilSoT=H(+|rEWc=hZ)7Qp-rKn0s!gZ?V#PgnMo^G+<5Jz{Zx@= zTKa*8R?S^g9hRdIh$L~wZ`f{@zz=BPiz&8cw1)!wk~$>NPd67K!hn#p@RI}JX-f=v zj1jFhW+xtEiV#vpNduVw0CcE*>{!~|k@EGqbkOg>aU)`e*x*lHM;U`n+qk0Cvf(&` z_WO2w>0L*Jyzx_E$%rOY!lgqwu9C6=ERHq*04CC;fxVU-CCxK*+6ARuGx6BaY zLKdK<`!-Pmt+cDWY-vI32a#B+@VZ=Df$Aw~>nW;$^CY95dUk=;&4VEGM%()c;`fl* zWXg!@{4t~{x*)HE2^pDdMB@%IM1IM`Dw(L79U}b;yiih^VF3UL5?8C z6|dz8Eszv3#2i7I&a@xaV1qB!Rp9uXp8L~5O&vUd8)k32;#>blm<8&u4+6%yW~WyXOU`0;_2+@(5SS9qFnS(6wRk=?6- zj4`IHR{*c4mKp)m_BHXJdU&EUI**9$e~&ivcF1xYW+!bVVpiNcaVqu-O=7rqKJplA zGVL-lAaIpHL=|nN*=_f61;w+AD+ypo+G$HFvrd9;6mfwYDVmc2s90XAd_t2BOEWW5 zNRl1BwvG2ZEKW$GH7`m>PSZyhH=zm$0ZJs{BM)_9e0vE{XiZEQfGg#tRctG`k=zd` zDFpYw5I=EjRiqT1BWywSrKz)fX+aaf96a%^RG2Mod1{8QkYRDHnwpq2VUE7-q`k@Z z^UY+pP9&EX28)Ed*e!fW)b~fH#wqHHl1bD&a@oG_5#_my zD_O!IoMgmMlK#bQqyV`|;2>k?SZW}|B&8D3B#%iWfQEv!e6hE-DO5M~aap;zaSBn? zQRyFz6I?daYeFOijCM)(j15J6N}_^D<`GlU3T9<_g&j3Rf^@Se0b5a_w+fc}nkia< z2pmY0Mk`8G2tXxj8v!6v(<|k*>P>sbb5%AO6k82lEp%-gu-5Gyae}uV%Z@0z&Q);G zNOp1P;ajxIl-lGs7lo;YX>sQ*L3lhYZ)B>oSf_ti~{$Zd}t(heFWiItRra>f>oxD#ro{Mbo(;Fkx$s zAiHRlfD_>yVvbtPbSG1)C!#2q*L7C68Im`_^Cmfys$8=}O;(RqfH1LJxQUn_jrgxC zaag259b;#*h_0`R@lT3+a3YborUw=?ShYFdG*o6OFArP;cnOBT+yd3 z)TG2D4Zo#o+TpdJe$bVk6v+~0TxxR`ShZ1`{Is=Xn!qjL=P)22hY6g$H$zH6!)g2~ z#?=iD@a%#5kA}48=)!y`uOsdsX8>w9lcb;ibr@z*f>`Tj8h830H($0rkZHi z%BB=?69FH(eZ?PNy~4%ZNraymJmF6|p&lS&uvTWO6-qsoGDNWrjS-E|4K>@eAEQ%# zBfP&-)ujXj#Qrq>vkajENs7ob23Dv+7n*C8tm_oO0*AS~F;Z{g!ZfW6D6FWGdOB#? zP6uOvj6CbPt8h`e)!)eKLb`fsPek-J z)UvBpj9z34wG`61kg#F}(&EJYI2eA-_1y%3VC*7}=-{o38`jj2;y^^i%?DiktBLR*7gcc?Hw(g&p01vfqZoNV%Cj+0 ztTZuZPLOSbg ztwqlg`i)O)Bvwb~;24ZiHq|jyM$Ktk#E{2<HqSsn|r?n*D1vygH;?9vzD28-av&%%P zBPZGeY`5LS?q%#L2T>w6172d`?Mfja)SB#INd8jLP3MplpE}~lzSz-#@bNrLY@IlT z2&Gp}rDN?Upw=a&SRx-aSjc2u($g(G4JM;oSonN+wX3yl10OsoBlhW^5hDtVLxNR1 zJJLs2<;E^PZQ+F*aku~mx#JgCf!&}IdN@*hYZ8-$f;vq;3T!6={WW=xO2<;h8a9!f zZWb3R<5BhUmdMm3hyt$riAEDRY#%zsM~&hYl1?bFsKjj>;qrepK4O(&_JEPT&>e{8 z-+c&9owSzOhn!d=285C>u+liN}YLf?~5k`%oaG4!-HY__0 znHr(0swAsb1t5Z<79;dCj#S*QR|x7Aq*^4C0}4r2R8+w<3tKW%L%ES?=uFej$j7@l z(*3x0m8HQc5PD7n*+`VZ1{H)>3cO|Tcx8(Y!{?=|oNkV)sS8T$+fYci*Uci(D;28> z+lRX_6XGLcL#EELEYC3oQ%`SGa0G)gBg|>pMFcqfQQn3^(VZBSn zl{}i7=cX~-`87lqBicyN4pHo~SAd#Ww+fnrg*iV~>(WTxlM_hlp&_F*EC~;NHB-fQ zxkHEmr7?{#E<0rl(5WFlP|6|MRp;Y5(JwOm1m4ZRq6%VRE6!} zcsNE_h3N}O)OeFc2)$4WP!kk#axdb4)!e`Qs+xxw@@r{|(BgIV=+ebjQ0_^+>ykkO zR^Xc3B+FE-E)7cOWfm_uxDucwQjgVt;yI1yyjLz`Ig<#duFd#H9ZyCvIwQ80st1i^ z-qBxf^(M#R!fol?n=pWYwE;5&@u40|ypEK~K;u>QkNzTEL&6gpt)Z%BIaF*1Ng;z zS>l%uNU7y`w^GPRD0Y!?W5AQwDennF0Q9F9nA~CFG+yecGj1(g3*r&gR#n$s#I+@z zn38oRkHd%+eib;)A=2xSuNqlkSsBVB)-zL2sbZy9WsiFhPlpq#%H?|#g(ydMkY{BG ze=I0zp^ln)>MMQpsFE3o0Je(S-?y`gRgv8iaN9;Gy+SzD3QA{+Uzna#HA;fBo1b`r z`S4Sdb|g%Cs!ZA6sq-mG`XedP>nxASDp*t6Rxj{%qjvH&jrgXxCCm@Zrut^ez>GXs~*WO2szXc2aDspai$EZa6%%b+wX54V_d$dcxZg zvT0{OnkH$a4-^X!l?7Uj(09}Y?g#^0@o~0N%oO2G7j%89+wG)nW+77}QqMtK}})C{#lid?RzPgLeN2= z=NJB?jFyoOb{X%^!4zYAO4_VFSL`f83}EIEo!J1Yhcl_)aS9Uz+-e9HzjuN8fM zC%Q^$+xHqYPWr&7$$9aP)zxa#(Vk?yIrT{fz(!zri2Yw0U%5}n}1 zaONxgz>sfnKivn!Yi?axBFZ(QFyl1yH}J9`$GV*IwqmQAEITacWTr}ZW`Prh7 zBsWnU%cku<$?wgxe6^)5yY2jGmn{G#O^L^4D&WIkFpMwyk(fNlX-h>}EgeX5u|~wO z^4#9wc>6f%ZEA$HLE&10+?9>H1$TFcdgI{xTkDq z7{l>|TKvm~HWP?X7n4s}Lhh1iLs)r4S*|>I>fS@fXGw}>gsA}Oh^}#AS$0})BM8Q& zr>j!4Vjn!_Z!m*)_Y{JybKD(yds_hFNbhA{RaY-u{vm`^nwp$ql8+SGLb${-h^eUQ z7IF%p1V6bUy6(_uc)3j-Cr}0jFxdda#ee_M))u2W<#@u!m)@?1H8%GU#sU+lOOSQ? zdD4criqRuYxqy1B4ISNkrx&gLDj;r z-?Bq(rU2r9T6LRC3z4nBbpfz`1C2nZ$3`sc{KqiKm@!a%s4$sTe$0Hf%ulbokTl+8L;cgpPF<|3CBqXei zNi!6R`HS5|je0@ACIvXq%ad^F#XT&EiqtmY0mG?4f)Q=k4>%-R{v2lN{?jZF)G1z5 zjVNN$?)+(VLMMu`jx$pws1xmqzYms5T8Rwiw-m2vU>aNeoI7rQCxSMeUBX9r6a-@? z0P><0hOPA_r63)HLUBHQbq5fo%NT5K)LE)jCjNCM6L}Ru_5rE-&oy=VR>HMhKsW&# zMQw&JRzU6n+a#J|n-IagxRN?*j7TBON)2sQHYXAc=3O+W!0*cF$;n3;C@ z)x@NhH>Y$l#^cVJ1e;M=K|b=>gZy~W%G4Bfqs15uHhIL--FngBRAVRSPE>~%nLyo2 zjT-r|T0jwj*H9O5C)<7&t>kxd>SQMZBN}(|YAYco6s&4RR%ENgXNj>Ic*2W~fuxz5 zzy=$^Qf=XT@h9@oTRqB&RAayoAx`%ND@kx+vl^HEZKjrrMS2&krKUc7jYSiQY~N<; z_d0t;_!>((R2CF<3^Cd1Ms8)LM5Y8(+%p`emZAwM@d+uTb=gcF~E4AIn`vkTk)gL}jj^Ng9lL zdDCVoE+LN6Agaut$m-mrMwYIb;#nHky2`A}ZdJ$4x&4^bG1z`IwB?PsMzIIqTq}BC zO$yT2)MC|Gi&mkQo*C8&BWFN20%2qHi*TaXc$Ae8q)Cq|FuKVSq{P#8Mpno0gb`6; z$um{2`ZUz?BoXW*OSpGneij@QvAm7BYu)K2gNTf1lH(*Oa4e7q40zCylqmD z;}ke;OCW$w%Pn(4i~;iqoT`vKZGHrv%vRw#8&vsnXjH;jLPV7M(&rGtbKNSzEoL`O zn6Q~7D-_glJqs~9885cSUe-KWmt8Pi4K0F~4&F0JU2K9}O2(PPqNY^z>n75zRbD$s z9aViheC-mG6HM!rR5)i=8O&bO15+>!pZ#)G}68Bjfr~8owRGr&^p7 zD$9shtwB~#<>3T$%C-d>P!5E*alv+1x-MKDZm9`@h*s55e=y!uCK7}w&cdD*mk-GO zPsFiGcyT;hr{(at)?79UVI6E#u@V3>5-hg(>%)7uShuqQq@^U7^F3a4g%=BP{{R{L zMB`X9K3~sxl}ukMDRCxSESQ~qW}T#K-?<@A4YCZSX}2oEc!|VO7ImKf*my_>2?Gj# z!ZNlhp(DjH8X4Ls*0MYrdKOqGbZdEOA}IuGslB;m=gkHbP=TlzGxrbDkX_q2>cJ|* z2#uI<{c3wtV#t;FeKt&>G;EuirwWZ^iU#gWi0`?*fN=8DCkjFy2qi}zqB!9}e8V>> zDr~F5HuQ<8I8IZ-w!>FdgVeP|tS75VIHvh{fh1Tj?v}UJ!&^(18o4PTr7HVG;UsLL zcP(CBn9{k@MC{Z@Nu@nJ@FK}Wm$EH=B=wT{RQTNfQXK&WH?Y0Dc#k~EE#7le$Wj*o ze|zR>%Ph@pprxgSrgjmJkfw_KgNopgM~UVNDe(#C9&EA2P@qOHalNl#c)KZUyHP7b zv~Y|GqKw(KEDbIIt~g8s!YVy9IcX^6HS~C$4QkoqIBc;<8ZdPPAnq@r;MFZLpe%(p zK*!}lSBvHn7MCXo*%a%SsB3Zg#U2VPCVE@f!ub6wGNPyNER4d$4Qx1(7m0i(RVhKl zs2Ve7)Z0k`I+P+3Mjn(SO_8ITot~<)3GpS`)wI}j&_y9aE-XdPYhs_>2i{Pux*as(VaQrrfE6sqO&m9y zK2r(O0T{rKT7SlyD`l)Psq(!=RRT<_O+F_?Xi{c#pi=D@WAFg{cwuvB)uKj)gdUIu zFfIzD6zlVxDGgDG*Ud{JV%f^KiDQ#U|&?Sf&ddDWu17j82k9A}Ok#6Bf?`7^CVt+T($5iZ}8>rbDPB-ZMi8YU`W|b+u2*P z3SL5FaVj*#(dK+qAD)W74a!;Ss)TUd4|KMqhWohN3cI{X1w|)`h^jujRuTjyV`u_n z+tX2hsFXWevV6%LM`;QGjr_*%w(;Dnf5e5jfbN2U)VIJBOAhmuH~ThBMictuN=zp& z!ibNALs3&x822hD@akz@n41fkh6gv$(g-E6fD716Njm?2iw_LE@*YpsFeo2=XN@TXP@IB5xwZ82$Lmx*jQ3EMJE zj-qz@*K{3X|6Su(mt04MK?ZDO5(|2b8ksS&%>ok9DY~v&L zWoCT6(G)*mzL2`7QHWAgLiiziptC~nR=9*n?e8jqOS|7b&3I0if@Gy2k-+jO#TjFz zASfyiK>+;d(NlUu#j=h@#4$|$l&WLJBLYEJ6(orfTazp7fn|^)AiegHZB4k<)^wMh zZ$glmiGTp5*88@G5Tz^OYTAGlwzhh~5GUYl zc)s%4#f2UmG5G^Yt(Zs$K>|2nee@O7W$uJwIJ9xlV&Z9{EV6l_9V{Nh^os0+i*na2 zp`Zv#R(d9qr3KZf6_5_hC`ii-(;6&9;uP6hg&mcmY;zQ>L^8F#(y{0s-Sjr)pUPR* zJOmT4#va;!%HgmiIV5cdqMGx@aC*v#aVl!Gz)XSSGU3#d2&!U1w#rblT-&|)>sI#b zPv1I!hpw%Abn_}nN_+%JKcxT_kj}5uODII{?eN<6aS^#5CTf+?kMYqYvfo zI=C?dXO$$eo}WdQWfNj}yqLXFv%&I)#nc|`SlZsh!~B@pG|*bsLW$Wy{KnS!yQL=& z=~LDY$+|U6Hkvf5QLZK>Vp~uExYz39>#)0|DEDL@26oa$?i*ocYE*-_(w9_pcw^EB ztgFN5Wrz^%1YsLb2J;2Ro;(b)g42*@4%!gB^GazUI4iH`O;hK5K9S}LnWJx#veQ9@ zmv@vly2$s?cCh1zF}E6l!0`QOyteQlq96z}hq9Vw&2_l!Q6)AelokX^ldA)L+7q|F zhk(E44=F@M0Cv&x`=XFiaU*A4V1cm-P|#E3#~McaSbdBmT`dze#mBRRxxLeIm0JP_(sbiZEH>e5-X|uC$T_OCEN=Yl9E{`h3LndgANuM@B4Kb5r2|T$ZQcb|KCbzd9 zTp9G1K zBs+?El@$R)5w5=JH}+}5jZP%Ww)F}zPKY34Jn5KV6&t3Wmlm3wqxo+Qp_N>p1vVD? zZN+U~k%GWbI}IZ?NK$Z71Z@Vm#|9wL#Tw6u)FEq3Ba)R?6s#<9O0Fka=j_pn>Tq_j> z+w2RUuKcPSv>Vk|1;n z--*AyQhce063jI+!yO$Nf*9etMJh9Spp6EG`rF6Hm$qdMokDQ-Qx`hiPKg-t<4p9X zKQNlQiiv7cz_P?ZVs*CmAYS~f!BVvm_R?E+5`u9mG@y+qnidQSxVYIYs2RvQXoT@N zbtsV&JyfvUmo`mc6u7LrR-P|9W_3Pfh}%?lBXf2X~&j{9BR9~Zd;0)xd(&;M{G7xw9rdgCU7$u zV8kdXsaEPB3AvWv;_)YnfVCnD6*TY;gCK~c?QKnL2wN4~iZDCXY2A`7d=9z;#Ww7v zVnl&earJ}ZiRz{X%@r7P87Z)eTq>#C%YdfY9Gh5>UiR<9Rb{BsH5Djz@TC6$oYY{9 zX)93-RaGrhS4}ckNZXBdB<|00Z`p09!-_le8bJ}|O(DHXkPM2J)OgiLmo)_$o5}}$ zOE;G!chg&rG90xA6lwhrsK^xm07$P-OpsU9B!YXRw9Lh>MaGIv?`v_UNGsHkO>Gfu%BfXqnVEs!aT;O-<8zvvn*~l48>Nv;%EKb!_i46Wu^icLDGm5!&T$ zUZ52mMI(1HQwq`qgFqKsx>9<3PljS+7O2E<>cn{@t7KE>=+8={jOpr@2QMjuHlQnav!_mj*Zgkn2f9e4&$ zcC`?p1FDY7lmw*3Va4&P8mvO8@X84&DA~{Q`dQ`@G!c?YD>{YTRB3w){5YZ1GcbhH zed?rmur${sbE^u1Kp{&_CBg1H+Tz^WQlhgsQ;T#)$pWEGFpb$3(Y=6WQri9eP>c>Z znr+81#-RMgX|E&C4GTnCIMH+l{mWm!!<6pMymnWTv8Di%QK>4bDh#Yx%>*QbChBzV zFMWAp=_Vqtr9%)aqdi+l=^%pvR-8I3z>y|BYCNoBM~1{OJGZeGZ8h=m=E6ZHw#t+zubeBpO3?{-0uw@GBi_Fn zX(R$G$;X954du0k&Gk$F0E@M`c*189R&h!s4KOPa7i+zluV6{i{r;|NOvPOWWF3_f zXMOqs<$-dndlTFbyOwbT5J+tDmdktqlO@*=XHf6C;UVJKK?Q!xFg$E3LqI9 zDB1MdhdpJCzg16#XDD#wsF`HMuna0Ctezo#yPeb#8xf}l*0JrHpXME!SAmJt))^mDoNUCI9*)KL9F#N z(tNj-TwG2rl+jbN?c7?yhqRHYzZg?!$5z9#lrpy!h}tO$iBe_i+M3#YcMMEcId;Ju zl<=86(4xa})C&=>D&?w3DOks+@vk!Dnx!TqrjRjV)pTL58l8`Flp$iZX zzH}u*kVmxeolNLdq!jv6w;Uv5spC#nwDovYRTVV2JuM`1h55>QbxCAU2nJ+Opcy@* z$BG(ZEs4`4(;Bs?4nFEGIDSUXIG$;w#%r-p=Gk7W%z2=Y%BbqBe?;&Cwz>Pb7FD&$ zx)4s2wvC?5z_%%Y2sA9=(~KQ3Otn!wm^K|*Q$18Pm8>dSBVn*dB$u*i2Ghf5R;JnR zq7qLULjs$3^AyET3tPLjcg%;>nz9?B{pWc~8CU_l+gJWp<3O7I&7+S`OF zS_d3RrYuI9Y|~3yh*TA+ml#TBgAtXCe>9ZrR?lJjIgoAvg@Noxj~KX{l4UzM?Bvpv zT=yzWZ2}^n=sIVZFlw0bJfVn1nYOaaNd_A63c5iwWeZB6sUTd2vkuqg8_VulI?^X( zM%tjgZD6H-80q;`icHB}m(onX)O;?sVOJ8sE9M9%iGi?mkgDJ7C*#E^eCqwI&zH`c zQzc_laUC5#bRbjUGt|dPHW^Tvj}EDXRnXVrl0>sdF4DyvcHA-XwZ-^m888$KgGq$M z6yhfeP8>q2?}ydXH<}n3X=~!x@WXBhf2)FCp{h zq%XCI19&7@_;$b1$NvB`-5s!Mz1E=$7<=nBD_SZes*qz-K^rpGQi<$ZJ-+6`3aQ(r z_1BL6Y^`1#DYq@u6qyrW|IyX~=c2;j&zlXRN|iez6x715`dAw%+o#8!AaPdKsxp*? zdt>ETWYaEOrv)uZ8)aH&=4v%&AxBiO%ZO9g)JGA&^D30`)am=NTEGtr4kvc<-D?1q z0~<`z;b{A5J=tu3dMYJ;X3a*8sPN2kxv-fI=;$UsOlqs8*n%%{;5eH1IKOa07F0c$ zb<&1xY}x}_QWknKG{J^qRg}{>nay|2kSZt&D=QoKR$y&!Zwst1-3PmN7CJil;Y#nU z+I1mlB|URP4pqakJV45D%(-1Gm2MPO)6>9+mtCZ8j6}rg+#U@wodL3d*#%rck4fmD zZf?s?EGWF64+2S_LP(lbV%cM-xCC!DVZw`IN`;JgwPa1O>HZ-0CtV0UweDY*GObRJ z5`UzsMvT_|>>6=JCl3mLbs_4yu@sB^y8$s}ye2XuRHc#I%>e0rwki*Z9r#AA>{Y_Q zX9z&p37PrD2pZjlusmrlDO+cUw`s1cGkyV2Pb_$~Z6+^SUKU871&7O(2-9n^BID^d z<9U{P%K(}{Wxq{mDoUJD*q*+V)xxW5lNW9RgK9z$B2;iO&?M4Ud=CoT*qs!E&I0u< zL}o|18;+>P`h8rF`-@-9FT9X80uI_3k#MF{-JEv8#|ncUFM!j?%!!EN6EG=LC33{& zo7g?l%WsYNXvN*7q5YXsim-?jlv+PytWnQC3s{v=Rc%_y;aDRE zLIV^G>ImcsIKeo9=xI>6CUSSC{%cbard<{Yr9&ZY zmbTdTuHe2T{d_&la;w{ikY4bF;37SkQ8rz5XKIH7DI}a^gBW$1YQ!C>)(A#eX$%($ z8qE_HbNlVM-|D{zvy0~1Z~}q|1Od`V8a2tRL#R@M=TQWh*bc!-f9Cb*jb8B!g^NDe}LKGFmnA_DtYeHIHl@ZYu5}KPDoYo}ReKj38B=uDE4x_Rm(M^k(5u-9Lsh%W6H zaRnGg-jmlycK5BLXe4=Tr9@eBuCj~et7;0WlTk?GRSLcg71#pTxX^JER_nL8??~_@ zc(_l>l`ioPwW4w0c8bYQhE`HkBvjiBRO@LcmQW7k`={G+;l#P!-KvBkB`H=SY0Rw) zClv)KbX0k;zc#iSnQ+BQlD_Gdq9lqyJJ>S|o*>Ka)YvFeij%NY*N8%hDFG&@;<$BI z1q^kq8dcKNt34~yRK-oTt@eJ=y|2Q%x13R!#C1}}bff_hG~0)^Y{hA*-YH|mB8|;5 zQksa~MJ=cVv9`Y6HFm2&1hfNYMCOhIxAmaY`)5hGvWJuTYI4cm+`B zO3yVEm`)*!Velr>DgOYP8mgGi^%oJ;GwJ^T5#dv$sD~hxPXhoNL9v&|5HC*6bx@Bn(E;PgJ>Y7Wo=F z9H~WHi^)u0NnppT5MgSf#4#AgE2e^LaVu7oc+w8aUp58ROv#5) z!;Bm{mkEdjj|`9rB!D)f*uWc`ka!+rJF*H=bm~w)bZzrAX8u)jy(&r42-z_;+vK{u zy@x$bQo^;9ev&$DI82bttF8CavJiMHu)n!=)D}{pfiu8k(M0)$yG*pM`AI4#tJzFb zR`nvg45OvPs;IE4O6i)GnG%qhz2Eq)s9&PJxv zmXD7J5yK2mM@20tGLC72o&NxzXO*Lv1ePj10U?mfw)ZLXAb{Z8?2aYNx2O4n{{SrF zNlDmn!jxv&8^2DFHVl=N#dda_hC)jrqM0en2%$4D&s_9cZ1sy+}EXV8;5z=5(xH8>jhy^|e zUx=Ynl2uepy9*xMmCl3f@S+5nU7uH?wr{XBvv!3{AM z6FmgxtjB*6kp>pGvyHDHxGO1BWj%8=>hp$stQ`tHU9wQF!cCyvpq4D zcE`K-u9NJsw|jAIuIXk>fJ4F$0wj9jMatd_X9{J4qLJidsMZY8k@FS_PXv`1?FJ!F!+o`n@+L0z>?&r#-V6f+LM3{Wo z#u-sTM8zp-XRV4xMRHu(h3>ye;=Hx&!Ip|vl&BBygw?Owx44A_s4M(f_fW@{x?_uG zyhr3<%h__P7OIPB!%v&$IN7~qS)ts4+TE{yAi2yNNGtA=o{&x^lv!m?C};~>hYU7# zQr?fzPgJoiNHs+YP~nBbOq5bfB=qqC_SFv3ZQyOfJjXjq<6<~=V5`Sz@hHccPge~ETu>{9?q!t#b|S=pcu4c*Ng+EANyd+@ax$e7 zTm+7)H91SrFHUl9D~00Ntz(KRWEB;4`R280(Hb50y|wMiHF&sy3W(s5v*kt> zc^7uDU=6!)$JUVDY3jyX=msx>)!?{(Ue5T{RMJz^V0k)8rIvyS2mz&L_dssIuZZEb zqFcZ^moeFfCXwE}riwsXQbETWOjUZ{pE~WS&3!n=FjAEAN0;&JyNT36JsoV3lvW<_ z^FbOEZ4VcQobr4hHBU?^r8t#B z&^xMVCGzRE24mdYUN61fYeIsvDI>3C1k;{4wTy7t18y`I&sWl#N@ygZ!=s{>Qs}jf z9F-thw6L3S(}kH|bp* zES&_6oJiVzDeHxJ(sdkm2*Q-nLycjNDos`rD82cq=%=Jv84r7~P))o+@53uR#@9p> z3-XVBHFINf6dVZ}K3-LZTD%dwseI~2rAy0FXN8B9glY>cg5-QS9?UIDoG18C3BXay zHx2hd31u8YsON>mQYIY2^Fq@oG0O_XqGl+~^y zEbn2x<&p^`Z8ZT7+kHof?k$60sbmamOd%=&AQRQmP103TsEH}2hMFOGnpsrB!rla_ zVt7xex)g*618LeKjFiF>5>#C z3VoX=QrTRBkaknj&x+y{@`Fwa$n`FV}ty5}7(+#~KeU&|6Z0g#iFpb7A$A^x|qb z#wAScx<;u$>L23ZhSTWbTbYHrA1$R=jA>eI9#dM5Gto>o$1CL&lv9Y+&E5zlHu}og zTa<3~*OXF`6r8Z8(iN(nKdN|`TbyDinJwqg(;qfPzJm5TUiR07*M)7EQQPmO?Xo!V zr`lYpoM~l>dRR=*C@dXT@o z`Ib&wb5mnVc1YtxJ8b0+C&aBpaN}7#%p+QgtVjJIq9_4_piO?=z8*XyY4-_IB2POg z+q_{)N`ceWOjF}@)A?~>ScC!?0fpT`k-od@@ElRy=th+dy+0~WZhP6%YHWCYW+1<& z#0?w_F0n%@U4a%SzwzN}9mY^3_0gHx;#_{}mxtlleLncE_^hZ7)?*rlJ|q#|<4!ev zr2#4$e5%&$p-M1hj+#-^O7y=gEcjG0Kt{+AF_KkP7W*Ty`gq2KDFzm4;L?JnNEr3g zP9u%tAy}Kn@F&D&9fh@3&?tPxm zk0@5;_(qycTR>3MX1Yl5$d%+UG25hTOB*z%+G@v*Ax;ubufmiz(pmt4BB4{7TA5lp zdT8T}6EB&Fi3ZcI>w05{xwg0BMX4w%4kQsv0*Z%#kZJY`D)`Z6 zt#o4AlL=%-0>?w8{;m@`BNXhT`xOy_&NP$8a~zmbNl#Z*OH8p6=%JCCPY?)Ud;NU7 zcCDz$1kD*Sa3^S@?U?$Hi%(YZL4iS0LRpn+DH=dq{vTx1TUT4!Q*d6ZG+WecCYZJ=JBEDkT`x9MoeGQB5i}J6gqL zRGfWHAyf5Rn>PWIsBAQ;9w1UhN2kJYst0)Xq)aIRCyiB?8cu~d>I zHVp)F#?5kcKWmk;Wet!*VD;nstIsKG6Cx{}9vO|(Mzh06XQX?)k|9Q57ejF3;Bi=a ziTj|_O(npXi4`hGnaKJ4Iu~pCyUQ}VfIrq2(~F!^R@f;~!>XH-l?fYYVCm&H3sW4@ z;+Tf)Dfdl1G!e-prf8Ivjao2v1LAly-E6TgwnKmcwoV3vS*zz)hwkCgOH3vlR%oTE z%SN6$6qfb>0M2ApH0n>f1Ie>b3zOs9f~!=+XSqbCc<`cIrkmCKN!J`UQ0dd@lliSJ z9x+8zM~Kq0idwi+Xr5OWH+8-A>^LRKSwVA3#Vgfl4$?YP-z`}wCG2pH9yY^gyY#MT%k znn$FzegBtWTfF%c>%QuIMzbpV^wP^-o*Dme;-0%ifq0Kfl!B+BV3C{ zhUftz{{Vj)DU*#ktRE`k>{K`iRU7wI*_PVtZa$0*nU1PqYXv4cX-R?R{AVvh)cAFL zuu(|qHB}lhD#cCjt-vSf;gzOp<68{*<4K)i3z?%k(;lfonekbnnyzYjn!+q5va894 zOWbcQ5JJo^uDlrJd8mCv1L;KhmAcRYPuoTWW2uKhM}&&%D1}t!I44ldqM!nNvdqTj z-uH#=Yu|$4ke>+y9yCSQq|GHlEud_HEN&YA09Og_5aLj2V=kl}n<){AVEEGFR8_ckDe@HR z7A#Vg3$dSWq}+f3H`I>&Cww}R2v?Y=kmR;fQzwN&#p@_3af%2rSA^l)UFKq6NkijihE0nW`%hXDCMN3{(Q_dIOVIa5{x3B}n z=(loZwNnaxK*G_(ZyJjxA2nVhiy9m)=<`+=BzT2xP!mKTsZv9lidB)2IvZStzP=n` z){-?x9i!(@0!U7=;pzDrmA1a47RIN-;}}+Nt4Ql|;9 z)yM4fM4`Z^F%*{TE!L7#ffdzWXvl9(HY;C0&Z%o^wE&*2wF0V-5oD{xeA--Q8m3txsi2V@7)bX{&~M}6#4cWVq+kZhbXo$O zFt7j8)?y4nZzdgtdJ2hSZZDzbeBgGLZ$>TPY+* z0q^`Oe7SoN$HTK#OI~fYrmv|;NGdI06_nU^)wrtrB~`*gl(KxV6oHJv3`&ADj-e#e zRXaB2&SlCqkHZ_@YjT=bWldF{tq3ru{+H4f|h}ulLOo_r>bc3 zrdNUH#PK;Xh$D;|2q~5bi!tzELX-FK*t2Pupp~yt9ROoUnRep&VF3XKOw5{rj&)A2 z6s1fvE7euuRD>Ac%z9YFj!E6Lm)yPV0)h1Ki#Fbs+>H#WKy?A}+eUeA?;_oO=)Od|R{%>3*lBDehu0cGGz!Th?gY4jT+jSdwsmGQ{;0`8^uk*#*q#?8z z;-L^c>02%rfMKx3DW_aIC=ZhIR6-=0C|Ij2r3AwC zfH9hhhW`LHib$uT!>bws*N9RwOp)8cf{z~?@!!e1>N~YIttV#~rw#LMoE!mORKB8$ zrid+eDOC7GQdoSwRB-QP2fFPfpSy#cmdm(nQ^Y6CW7R=x`^H^Z3QQi2&BZS zrnWsJv`gmJ{H1hKiw|f5{{T=N58D3#l)6&mr%1*pqppr!Nok(~2B0uHc<`)Pg(}XH zP+|BrWQJ=)B}A1i1TMe}le*Vpd^noyBJk*83Mfh2JZknp+9$q!!KjXnl@TziS~_?> zY}Chy*@2{^cb&qg_|NY5(%txtoy=6AyeU}+P4XAAGBKF6)9(mseQGz#*9<|8$xmR);bJ2YQ$aGe3kOCAgbOlHTt{T zjb7w#9VtSJaGF(To3@gr98ASeT#Zj#Pf<{-E)JA_?DNzkOkLXKBHZ^r99e0TUAoSx z0lkFrrXI|#(D7{QE7}JSKW#WuMV2rW^5UfV%uW!iT~S=Vbf~?oo(g`Acw#OsEYgtL z0*D()<47K~zPNQo$4XQE5Fi@GS0tF-JnFKFc7{P6Fsg^%^87ZRVZOKIcb(nb+`bmE z;qyNF8D%Z9%Ox$~c+ztepu})^+*i!^NL>e$PgWndBT^~ z6j)s~94&_Bf?$yYqf|;NOiz8*5&#=b1^%8Dvg7M}L$2&TLX2(-65gbz108kUB~O_l zix;3s7PtVlanvj`$L#+ADZ1n8;=AmzDOu2{BN*XGnYwXJ!m$I78p!YpDNs<<<5-}I z?rEs1Q_3s2jSOroZQOR^8>=|+Ehz-?gFbX%-Qm=9sF>SRmYCym%CvZ8c3GqyO};S;2aEH8a; z!~0_D+WnhE6U5^bQ1fk}el^JG{c2thlydfElA?SE5RNJYw8onmrZECmL8Vb@T)dIXgd9H$g-A-Z5Kh%bRMdrzS0#stJ-8Os zZj8PZpg0o(jf%s6WC`O?C^B_6b~SjeDOHHI9X9u6iF|=l@7O4+WC}j5IV0KK+uK_N zGPH+D*iOObC>*;Rm!2wg63`!XVtPbyr@Abai$MhIfKj$k!=oaGqHI1`v1+zgIu`OK z+N0a0{u~JJ18QJ{a!QFfNFhGnG<%f*@Anjd2e!I?pREolM^O%Uty=g$L5NklO+4zC zv9uOYPNUto3GFjCoC;~Sg0*yP+tW(#EZStH#V!ZW%Av&u9fC=S%ZX!E&eY(?OI0hi zFqQB2M002f9sqJp$#dJACK9zWK$svJ176d&lE~zai?0W^^h>CXfaB9m&S3t zLO&*YTAGDO>B+G>m4fI`eWQl?-Ouw?_bdX^IIE;|QC?YOoV5yCm)@!{Ol&akj_aR@{_J6uYGa{j%~|Z|xW0iA=L%LRAatKQTl} zL)U0lrD!qLg+ZG=;JAh=8cd)~9C585l|>zx1poru6yLS<<5?yCTG7&`i84%paINN< zd+$L@ff#i33icsQQHexno`(^`aHhDV6lpBc?X~T+fu@@AMsV70(JE4q2FwW*mnceY zN<(^9M%f?}>&CHR@=#!sF@bBT>kt=2s)WTG+yiC@w>Ot&Y}z*gWk(IW4J*tni~j&{ zNKaA3PYfw$dXtA#g^LtdVE|AN2$)4-sZ$@??c>91jq|W@kiQA)8$TK-8?kbd6yA~s z$@$Z?xx*n;(i$3UW~Ko?*(zeJgZ!g>91DT);+8j)FI*%)Dd;Wz3V8#t#Ns%2MZgmygM= z#j0?Hfm%jm?uZdWx3n<_ltrzth&ZHzcn)?_<%e61C{Z0V#*eQ<@@_vqR&$r~MitY{ zTzs`o9aCFDY{>2I-ra+^Ivk=fBWgwm( zg%r-I`lrq`xTHfU^&=Rs!6_IRsB#7cg<({2x)KsLsHii_{k8YCA9n>gPDgCq7g_|K zpA_`;L7+xwy)GpF!D$^ag#a1PtUW^N!oyqC7$uI24_2(9pA)I4q6O{%{Tvn!e169b z^1Q9KQm*W21oTE76iJ)s?%c%52^h~c(_ecG0xtuPN0wvbg=rg6k-s%mhkYb$pfJQ#&KMMDTJzuk-}3Ptog zxL(!k%JBrsy2cetgfJWdKVUmYzM;vIbN+OhTN7BRVxtP5Gg@{-8iS!$h?Ctp`ZE(Tn{aI8KoD-y@y$12J(f>^lt77aPe&x_jZY@=($hCJ*Z7D3S*v*A9{_dr9`*+#q_= z=I>!rPz)FV9C|5BPPJ~WCJL02K_YP-e=5O*WQr(he4J++mj?3~wJaEf^3=Hlx4A}i zKHHufHIx2bvb9JZGmogG4CYQew52NXV>m$cIMl3DC*cq;lp~s=G(J)^5yF)69^s8d zt+-#=ZV|nD9w)*yk7=WoSQgTs8J~B;m})v@PgaRMmHz-Cusl+Feq6qE2&L!M!ptL*4J{BXFZppG&ty3Hc$I_9yg`|Z6 zx2q32fl(63$sQkrtt9d36--Gydx$nW%c$1eNbBt&j_o~E@)}a7R?%Fka_%bgIjW~x z*qw)#=3x^fcRNWs@P_S+hY{W=NFMrL)xukN3B#(T3^y4GdYlOggq{?s=Ap5uW79OD!*Ji_w9aC{C!0`Od|8D$S-dpw@Zg0uP0x9zRYk$l z6QWFUr;2)t=(kFle9%n8%X*qw%Cq+UhmANEP(ntT;%e;>+Oz$(0_#i^BM*dtag zI2IQk^51FWnZ90?renmOy7pIk>n@N?q|%a}mm3ktC@S#jC6tpa2EfQSl67wpekrlJ z?wdjbXA!h?(rd7%N|aOp969Kxs$4$3;*?@_G?;vp!osGSU995$iXj&o@V4sB=H*|) zqqC#Rw#H(W0Fn0%rk!EKYpH3Ps)|}ViR51}pLwMpNOEoM(}j7SVQ$z;RM97{j4c{; zp+q<%k3Ch&j+;2*Xeu!o;-#dFS~}!)Mp*BA6+OY@wi#Fw=o?PTwa#lFvp9kLTqS!h+Sn1ykuXb>3U*NCGtXmaj~a zkDV%J{h+839#s7eWO6%-YN~8HI*)joxwkf#8jyS2;m->alxhI&+f`OzlI&^&(wnO5 z8Y3^t)(Kffgz*L=aUgr30e^0%mJS5~l?ko5bgDtD6fx#$Md1rs6ktg=oXZg-=#1Qv z@#7jT7vYUQRkSJa44g+zC=&=ZkwsNBP(bX#1HYDRNU`^9#^CXi&u~adl!|qWiTlzd z@#v^C;cteSB&Nbvt6{p;$tbgr5&{rM?BnQtw@5B30rZN~ttlzekPkuIP^l9#SIxc} z8hkbn-*dUNKHcxe-fGuQf@Yg$`r%3n01A6J1$@lR(wc>i@|N1Z3uErC;&_kq4Yz`D z6kxq-RPY;iSE(}ZlH29;{!xJwJxPWHLVPy2yl=<5#J)h0(^^6j$kkbAd=3{t^GikU zos@B+?vSpQAYQ=RiIHsS8$s(dsjJi!o(7uht0$>wqK6HZDo_Z2I#nX$Rkq`Mdu%!4 z>Qa&lq|G|jzj&}hU`HA*xwb417n-h`D!3C6h_eN%CvvdwZPo|rzYgv5B}mqjg*c3% zI*Cr&195uEFn3g_sZg3(Vz?W7`_=>w-S5LoTXx)3q-~=x;?g2^QZBz1!X{rf7^SP6 zA2LYkD%dF}!sgn299GS%y&wVAPATN|_2W$oTT~+7O-$6I?I|IrlK$O0aYC({$5Mg! z3aGeH*iCkrjMdVpnyRL@quErg7z{oP2iL}2wMrxi9#s2RD(RYaj|?oLTtq`n9NLKj z0=vE>c<`Zm@CYQDGHif+U{oro>G0JvLt0|F0V7)g$NnOG4;M722nkRWoyDXZ*9aBNQulA1ZGF`25FU*{_d zB-DjYsyuAoJQQUaJ7FgPN$c4~_qVH5D-#?kX#_nynX2&r06ayE(*=S^soTh;W&*&5 zOIRCee*6@EMU^CgvKO*AQ9pX586(1Wbb(VM#Br;-%7AEi^O0E7e}_>Ks&Y|yyi zQ8J#Llv#6ZtpyRbq4hSqtC*jaigdcO1I+nmm`NTrhi7_tzG@UDx4)kvw)Ih^g|y-B zbmixFs@(~3CS)FXQEj4{O0+yw6~Kd4m5xayn5|^y6sL#Gk=~YxP(u;icLX4PoA%~3 zT_hWVV@RApsZqEH1LIIc=6AI-lue>Zl|UlPrOutt!;BY5I#O__n?e+>y1>#(=iFU4 zZH`7ddr3N69w<~4HVG>-{2MHHzqH4&BB(51K74|sT=WxH|ZDKQ&sM3OX}m5!t=Vad9%A&!^1 z2ieNOf+k3vx6y8SttKaJIN?f!fm}WLL*2vf{UAGhKCWHR z2_*2O=}}0Nw9^f2>sInY^qb<3QljMCX`r|9;%AnXIT25#AgB;_j4P^Cc;SvIQRCO< z5hm|u(@mqdHiZwuw&RH@2C|uU4W)&i@31L9K_re}Q3tAJ`I$Kyc*4#&UnCPQ%h35Pd6~pl8a@4Y6xsDj* zu0v6=Y2}?4a{!W_e zsRmWz;YpOJR-}v$+DT&g%`6tfdEcYzkLsQjUyi z9@wl{g3n5P>SlnlkU>W=?jS$W;in7T(Hl)HY$zGRf-G+og{Gd5X{wqTL530uK^iID z@B1|RxEIWzE+EYvSjv*AE}2G?4>RJ^r*g=G;FJ#j>g#rz{mSIw*?=%xYRHqyyiEMPf)SJ2!_E+%lnVPu)#g zxYoE*NREyaS+gEN&pBocvt5+dY$m?3#Q3fU6?;ohM^$n?*yJP>KHm=9k8ka+8DJ_1 zC#HoKm(9FLQ3{T(6r9djmr%0ZHEL%(w?l~GZ!%&jj?hyB6w^r&Mn>|jYhPVBO?36h zPmLoUJrt_cTk0xIjuAocPpGnfWW%#ec*YvyN*(x&71Y%PnrLm@Db>^;4*b?sw5en` zOdg7@>0E^)4ysJy`3pDWGuPDMcokJeB?*!RtEQdeh};H8j>S#R-PYrV&BZTD3WyyT z(~f3H@P!Z!fB)0hw-Tz#c*C?*^s9zJS~o=I3DHSD8`>1L`PF9aSal@;1jj&$9(91A zIKuVo)HXp>={{3Qs<^OBHj+v_y_oQr>eF;<9eYagt6yEEK~jI>M~H4&Q)ts{qyyGy z)u~z4a)qdMVLp_ZmY*40CXe|yPp2Zsm05#Y8+D# zX$!l`ro=SPJQp%OepMe3!f>p=c_PHAu-gsPap2VM(%RP5z&0fDJ;keb>cAy2unbI7 zwl^+Zqa8~p5>#;ZbW&znvHVI_qQdZ~#PxtixRq0hCUIk?&kO6X3NK%}d%)9Chun6b z!i<<7_ zNOLrmXj`iTtZnk^rFvnRqN*5Wiw(l4CTobHuBkFHa&!PMW5b1`F>NV5%5@BO(S5zm zfhva_DH}vUB>B^|CQ8Rgbb)=}nM9)b!%vI-#_tmJkAHef~k3E(o%K6x!T zw3MMDMxH0ig)7O%meQmbXBj61`HnQz{`_$|cMxKbsx-f*7*zgTzosC9OQ|*lp67(Q zZR@MI3R?6bW;SCzd}5AnEK(iN+5(o5JyFm)#bm_bqr&Ub2B`VUtA3VgnrAM6TSi8W zTK@n?9L;Vl*eUM7aCGg1=@?RMvAegHtz@@8n!3$0)Z#d?X`zNhG}5?{mE%Z4Twd2= zO~=>6T)RJY)FuGv$KWZu{HdnYOc?5(6rP^DHg0JCXf+YIN(vanifwc6HPwx~>Udvx z^}~RIVFL{#=UTLRq^uYvNBEB4O5sb5)}dS@5>XM8-4n)&?7kNwKOe775ncyU&24BCsPV3NmPy7QnX54D!n4Ne#L&d)ze>5j2U+`d^@B;YvGph=j7M9qhY zqu$abO;ohfD%^Ius`mDA?7rUHydkn+3_#;f-ahGWRxz9_GT>RdZS{!n=qxoLBPh!SjDT2VK^R5A#1R_w};{{{QkaddaVVXIOJ%g+{aC! zgLnG4ZFOWFN{U>C;rpnum)%O`#L6+*!>*ht^JG{i5@5r!rEE0vwx)MG8?}g#SnlCH z;ug2lBk5Ow@+cWH*b{`CsJck z_tg#aZa|ZTKD<@4M+Pe%2_7LqMPy7)D(NTQgTRV#Y<>pZY$Zu2Uc-jZg*$>wN%N?2 z<(aV^wbav7R4IRd4reb&#Cye96LdQEwU37vy88^IAsB)7R~QUBY5Qg+hOhE`PY|Gas`kxF z_)(`S9BZ-s$5Z!lv2GuL+EuD{8LJcBCRU*p6z5!X4@aP*!{@}RDL{^vr{tPLxZITv zszCJUfu?Rv!;Td=y(t3`!jjb$qI@R4lPP6NgpL;TYjY{8nnna01EJS< zvxX|LxN*_>hso-U1tdlLNI~wK^_pm#E%hG|T1oL*io81yWHQp?a;Y$)jSsmOycqaj zl*(LX3tx16LvIPB`KWnC9pu5KH4ak9RI0e1YRq_zXh`?eQ$Q-CW{@AWcN^_I86xF} z*eFWWhYzUrq6ci8OVUVFAkNHcTvjoYz(B&p4_vmainDswWwXn>R-4#Hq#(KOwN;5g#c%rJT>qr%(-$5k{<0K)0kp>W!|Y(#shB%W++mNzPP zw!e2QZbiS12~t%Q?{t^Og()gW9hDxFshO6M(Hdz&78Ve^!H(OD`;0#^#8&;F-|F$C zp0i3F+`A1SFf{NOdudrmnlMakw7H&!qM_yY%=GdxYk_a~R%@OgwVc?#iG3s}^if*3 ze&x3%li=VF2(B-Vbn=}|JUM2o5oEf>RbRR!LRg(dCtWRW7k+;t0t;$U98Mn!c;tDQ z1%2vM)HeG~O~Y_{Tx}Y9EWb$-C0(>D%XTbx`} zjktakufaNDHFZ=`VfmsAKByl#;g`(Y9sdA#&5c=2{k&AI+;O8Re*weg(OO1l-VysL z>z|b$Pfr~fr@`^X#ButlE9yjlHAt^pRV^xo{{Sd|M;a;Jq1TF^WVFy!s~xz5^Vv&s z`&V9Cf`WAI0w9Wh&DgDOV$Ar4716w33a^1>6xDS<%H^f0gzmFRC!0=Ulqu5bFK%g- zA(1LT2c~sNKT36#1I(1J1sz8kunXKO(KnU8c*n}5u<1iF2P=4nZgx>t|&GaScoj;PSoQd8kn zqP~|OsIRD~fcw_+R!LoT2U0ZpIjgPXLYn~%kgkEV&kSk1Y>7`gRJSqdJU&$;Jo;6u zSfGz9b%PGC!3%_ZI|^FXWNyj|g>kt~jCb3|LBfyiGs;eaf{-ai-s-DRP=)H~G=axG z9%r|b8S8U&G;s}v1;qYhsai0k@7)gM7ykeWV0c+^yf3)4f^`j;{D7-3c2?771Rbcs;TLmGd_ag+Qxtj9RMDDktT9>0Y*Y}KkvrE}&fiU{eeBrr}f zN(tJ`)a(q1Tz9YoLGWu1=IjjkK$eFVk!hQdQ41(rUC-6GaMG z>R7hzO~-B;+-5p3CU`*EP21(jDmqL^!kX(a4DMlgs3EC`3x@t=l#)j(?Y_4POLy@+ z2VCd9^Pq`N5M*ISKag>@h`{5APBhI+N1ZUQHL$vhkC%4+JZ!Dy$oJFSVPkF=n`gmS zb>o^=Zlxo=l)*biKTeqDAx{tWugH=?c`0OA&`6{UY-LS#9^NI)EiU0HQ97w@mSD=K z!yR%#KRNj%2+Sy7ggJCK7=-h0)(kU3H&L9T&+N*YKh^`O)65e8ftc>Y_eyP5N+=t zIg!1#`(Ixe%x{pD4JbJD@TV;Th$Tk=IBcv36UR#JY`Fr{s|c(jyGBz(X7Kp&I`0&; zgrz?^QiIZ*1x=*IYiDqh)Zzln!6J7p5|8yli`(JFZCrPNAZ$B0Rl>*;1R13~I2H$7 zz|>UBb^=MJt_C=eUgd`4dymhF-f6|4W3sL;8%orZ9xflPJVAr>`k7)b(pStcvZV}b z3s~58RX66!P?Us~K{)x-Wm9+3eGskIMgXKK0Qk!R8iqnbhA?puC&rCt9T2oz&7DVWtz6MfLocWTDI^fPYty; zixk9h7vEb&1{}%)iC{599EC>P*v5XV9y4caabzqUrb+6o(&d~Z8=g3Y zG}RRh-{}=lN?fpQG&k++=FPEr{8uBMG~alZ+c1bGl<{T!GXzC6xIIHwC6EORE4RS2 zUf)0*y~pKH0RA!Sja4PkPe;0>*0n?kuU&yO2`;kANGES`?lPNOg}=;{k)mMS8yrj;xVks z>_^!`V08?7ML)5`2~@-aGyzJaR8+|txcIkiemo;ZxCqjN zg&wzZy;_tLk40mfE#Q?EJKh0F5&MHR>4gDbm9XabtIxWIILs07$oo6{_Vb08A0pNn2@fa6a09 zj|PQy%TX)SvtM+7hhIP*STdD;hNIofz*#9 zNj0>EPPjN>PW5;mA3J%jtvHk{jWo@%7jIy1--V4`I4~GR7`}$mPOM4mrb(p2@Yv*< zmQhoRNIa(v9_@ht0Eck!@a4v(D^kAGOlnCQ5^?FIMk!N{ypkfjURCmsmW6aC{l{a) zjays6GAj1#e3*< zCl;cqeYDhgT)27yT560=?GaAn?GwYsT|z{uVMnT*?um^lk2+$H1fZ|m6;yP>p?^fO z)2WShJ;+ioZY*;B)TEZ@9+OKOWhGFe0qUtWm=+m;#~kzHGtDp{B|aCm1Yq{LHzS_4 zdE12%01m!BRkv9U11BDuX~Z#ny|sbE>T!yQh-QXu$KAEby@BuX;)K{bl@b&Z4@D<0 z0GveAT4NO$sEQaB@S0eU`Yk+!UtKf@O*m#W045A=!kbW0I7ptl<3*C?$Lpn!5XR<; zZ?Q%lH}7NNwlcQAv>raquz5#9k`G=KmBpz7NS`XthxE3tlDc0mzF#%JM3mE{iwGc{ z#jeZ;Z8+O4tSF<1>!%9Fol}oREU-MOPYrXkV$x8$eu3nsS*(ns?FR%4cyK{wV5E?w ztO7de1J?u%BpIJ|3V22dh)!O53MlatI!2g`n2kt~v|5BLWB{#(hR1>n1h`2$bxztr zeCk4g#sT2`hoelPgm6@4^`|jUwVBnU<)t3<|P)hN@ z<3%@S;z>}#oMXZAjT|+Y6((p}%u5ggS<`R|qSv}CgH7JhLE_syp(vE8K$FAps+*Ix zHV$UFbps~jHQhAE=(61%Z7UiGX_pGY@an4BUZGC%*M9i zZ1q>NrxIUA0Z>jeRc%oPL^9M<%Bu{NyN^wM0+VElxa*BB(fHa57X7`;uMaG9;kHdJ4 z<9qhw>kf<|l9xjR83%ylOE9XP>J zK6LOxP6Xpo*UH{}`ho<4ylVisQ||&r?f~|2&j~#hXB_aD zHPoyojxa$-1bL2hzc%8!DYOufO<<{%s32I6v%kI1amLw!Ms$g?K(&hT!x5~c5h+i zft8h%YE+3EXgK@Ot)%r(#}3?64vL!$%rxI8M;jTasvv18x8CwBc6EN_9i-@c9txR4 zme@)ZJ%1V}Qq;-)qGoz49d&ILWUrnK#1@vMFTb@-!B!p`^KZOWhO9>3 zy4q4w0Wpd76%s!+V3G)F<)SpP6^PV;8bv1H{*4&@ia1yAZ?iTEFW50GCruAWDg_3}m{>tF&B3$UXl6 zhlt_3cPmbij6vH)s!A3-SV7t;PlIE5TOY!)Tqh8z6cV(Xt8(@)7-os11c6OD0Ly<1 zsTbGV#jP!ufk^-!y}p#gj5>vdgFlCrV8Q6IOfsrf!t>c^Z%=YPMlFlt#)}fk5Jxk^Rb5K24J_laQDr{u^zCjE-eqV? ziAmEUm)c(~MJWUk##SXlDs70s4^4<|jkz&xLVos;bdPNW`HivnCwsq3{ z6e`})q(M$SR2)N>i+M^)h)5d_C#pD7R;Msvlr?@{9e`0))5dPeP|{Sn1lV2gA8z*i z_#Iwm**G_V>oR@xcWsxyr%Ih!D&qi0q-J3Ow$ zo9a7qjPD(ls2$>Ts&EM(877&#$X>fqD0Gl8I`JNZ3Y^)hn;%%t9XyJ$De@^XOg54c zD-(N3A@`cr{?063Zf;Uk)-%IQdrdX`sk?k!AS!SnM8{4W1xAAl&G%$ku*#K~??kOe zkcKDhqOd9lzzcGpl*|@WsR=lPi8R~Ujf-hklI9#mD&xX&`58rKA%#}rP5LIoC8Mh< zg!maD8!V#Q0mDY^ZW5UA6%1n>J@i~(WUOgQ!(eUJC)jbPGZt{83x~-SHX$sanrLu& zmYy}+eN}-&xO*7iid5y$A7DL6!kV{Ts@u|{Ql6D!Pn}A{@r?U~;Nn%JprwkY!E3QB zLJ;u~dj&_{Plfg4IhJR1gbfXZ^_2k>##3)-w~0#BsM!f8-xLu~jOItKsA_Xfi%7PL zIH|8$85glCeAc$N@EiiWn_axML=vnp$JvP#Id0vQwc-US7@5Q6Q)zG<&4>vqF+8$9 zRekD->t>E9Q9!$G8}|(dhc}gF&o+f7;RK-ZFg^62HOy_*9m?TxfJh|ISfth|obu)m zED+$h6g3qQIce);+|d99fJ9}vBieXNoNqQ-#mDR#3c?JT_EM~;cJH5MLgmr~$N)k0 zuAsza%~36~lGD;$EVzs^y(^t>xU#5T1D;jGiJ2*E$$}_e)65}YCN+TuHq)%FPlm)` zrNioGdUoIZ%A%?&v>I3i5SO;TC!Wi5B(~B>161RN5#{rxceaJk3e2W9ezk&ZOBNQ2 zo{C}Rs>dw#6wf<8;!e9%ZT5cN9$1jIjRIip=%TBgX-q9xbQ~)MW+ziiYFtJd#CcZr zZ6A|#UjeZVz_)O0c)v7K+gVU2QQ9yA*P@H^^(E{K1f<~;KMM2}I0bDq;-Ey3#s2^! z(Z?dHf5eMeU*UcduEpn}X;?@*LD}u4hUKQUA~hd>vYhA4c`}wL+X|tmkwW=mXI-%u z01Mvzylukly47ja9U!RejudTn@Y0^|05;Cqr2N^+IMmZ(*;DzA9OurCqE9thK+@R;1%`Oz{fT6;L#2&hG!IXeP(g@ohI)A39%GFe~K+#s=l<|-%rx8r?``++kw$$-b zUwNf#BtgIo0W{i>iPNa`^wh=5`F9V7w+B-4#OWxoq;$`MSJF}%nY);QAx@?Icl532#)sjXV5X{>aX&Rn2 zt62AJKJabGqT=SG2R``?ypZT(N~pBd)2$n;kUL7@D4nS>g$A zc$HLc_U&QB!!dEiLS-7b@TZe(5@e_=os{}z!7%d}hb_%fP{e$v^OlkJ0o!ZaU!d{! ztld+xx5Q8I{OJpqjrVHOgX$??MVc{`kyR+aAM*HLK{$3L}c82c(Tha z@0?P#D9;($=Sc7KB(i{wAW(IPXZ-6|9D;)ztHU8i8`}>=hDjIzJ?22S_zQ9STLiYC zq!S}-eCCU;b4Qkfl}0^3Dr28KXIV^tJC0Q1B0kHAPDvo^Vx(<9LE-(TWqEGmoCr#c zbO?`S6l(N~S1PM!xdfZT=^S*;UK0DQ%z^7tR$| z-?wF;6nqHcX}%MT)6EgD!+e^C_u;98UH6lr1ut>K6kOaiv#kXy>iN=St(TFh4oK)b zDQ$r^bg7;QY3nLHg*%dBwL~B%!^&lErra0UXZPs)%phYY4@sh%oVLMW1f**?VH5A6 zkuv2Bbw~QcnIXewA!?=-Rjo6=nzF|wm9HsHaX1Ob-Xq^iZ8H18!A3Ze z55Uc5r^$GBVVS0($dY0-MBLQjc!ex*hHX@W&ItM~#4c{#ox-6_tVa~YQ|;!jvXQ0n zv<;WI#oLFIDVp@r5YiEXO5#0Ns7!n(Aj-9w*#z2d7k_I2&0ZCoIa^*!T zB*#RGd~EI03R)$?tMMU?%472DDoQjvSZip~I0+--JM&I{RLE0_Nb1>3Sh2Y7^(D0u zdiKzblX`{IP((BfJyJ%C2&8CPo>XspYBEP_3;y-E$gyq2_#6avQ_S5Zg=tzNu4K`Q z%RN=gba5Jb3aK#oYHb8~B(R2+N7>p;JAmDDYmvfhoUEY$;zbB-vosX~NIWRU5g53iwnmyGUfAK z=2K8Zi&s*^JeRir05K(C`*j-IzZ0ao=_vyrDE3p&DWr``I=>oesm}c}%y>W z`hyj6`INbP3dE@4sF~A6Sl%|UZ;3qG?bd;)$_U~GCRT!#1&N@)74=2Y4xCfNfMXp# z#q#{r;EFu8h*qUlSlCQW`b8{2+^0+Z9PrCoFiprL^oT#LCRS+D0Rf^T3JNpVsf;_Q z6mV1I{J|$purv&7qr{+?)KsG_irehjdxo{|!_2cKKswb59VZMZBb;v_SP5wP^5on{3V_vN zGfSCq8dg8(^}FT}j~ffvk@4bJZ`o6dYYP7WoNDOjGEmjg zz{(`4+Oa}~MuTfw<6gtJ0@qoC#loedN>8w!syDR>Q(*`tMLyw773VbW5r`@zNi1co z#-NT%Fw)B=-owD~u`ayliI57q4$4sTnp3625$90xs59+Ma7T*batO8oN#)M|U4n%c z*lmBLYr^c+yvaup5=ZoK!_up0^JP8~Oz}9<_%-3$&ULh)8L`+U7-7jrOWO!QN0C}X_+oIve z98!#py?r}r_U_cC653-{*r|}>)8Uhb!Z1oaJ_+oj=P6oUgbhyVnq3bo-7~R7lfp>{ z-GwRdmI)z02(DE)reoVaAp>ExlS~Xp+sWoF+B!;bg4`_v8W~^ zqM%{;?kO~`CCjvRrgrn>VumCPM|RwNdz*9C$U@hE+rmtHYi`{vCy0u7t;R7lghvhq zMo}*E&xFS&(ogQ*+FQolR4!105EI9V_)&RrWjfB&?x?@aDd&nd#PaJjXu;j)XqfIE z06Qr>sZ*GkIQuHiIFkwC(^wjhEKZ(fOi@B5*U1fM1O6ad{{U63@#c?qwUD574VAFk zVrL44HVJ}8cce1cz*o)N9EA<7U;$?XOLpQ0REISv$O8;IX-k%6F{i``nsUQ%I8}jM zxT8a}<|Gqr9qv{JB#)<#+u9{YQhl|hJ);6Wl(D6O76Vg?%wvK(#Z0nNZH<=NBHWg~ zzRnmt7L?(no{ox0%ov;p4vKwRY2kpiB{V-Q>$&{4ScQ*)*8c!s5i@eoIE^#5w{fdT zQe=XCl@)Q?iZIYa51TIC%qYd!ABz*=!hF{?XGuF$HX5>6aAU%~O+F)uvGeG}kVZ-c z#GFTc+S=s(JOgGH&aNCFf_RD$TimBeZX^no9$UfjUCX)}7E|xj)9jMg)a!mFv$l2$ zkCd7+xbDiJ2v8K}xmPD`nv!Lb8i0RAB;=taXilCSZKXcTfpZ{m9Tc@lw_9Zxf=3#f z^x0;D0Zlb39k)?a5{MG%Tqo_n4 zV%`Lkx3B;Y-@?}43Hy*h^P>kFB_;?pE|M(4BuCw*G~ffZV+kK?3yW#Rfh>|ra5R-I zyfIK7l`5KCB{s=jPOnoL2+`B;FsSiAPblkdMh6bMx>m|Ws1U7(qMxJrp%2VbH7*$& z0>7n*wZmy^J;!d^aT3^!3Q0X6(xs@B5|RKrDZa9%rWU6QELBDoDi)F2J>cpt08#f_ ziUXkDYRObxlK5h?@HpSIAo6r2=& z^_*o|$*1{)Q5q~NWIla|x;=yo{roRY`(zMOaieeL>Iu|H!)*KoWm*ZTVg_V2LSV~u zWjYUdY5?K~gF(p{L~ZN%(dDblj0A|`L9S-Y3yh@Y5$V;x>hyo+qNW)89(;3y4cXC6m zLI+?^fmrSQIBL80LO|Ia6qoX^Op%2%RZ(N2t9+Fvkm!Ki24oXT$T67dW2eM${63@4 zfs{sR%96l%+SeRj*=eN%#B|bCC2oi#1JOzta~3^U45EgfR+_b$nufL}YpGv(&cI0) z9s6+gmk^+4JXBt`bZsARr7H5?6`8S>6zLohtovkYdOgmk%Qw|6yN@xHyjA-Yg$}-| zW&5VUhbDRBPguPY(O1vp;xpmo(;8Z#O%q8<=Kk`w(c|9yJ97QYWeq9`Bt%6eX59|p zM3G5(^7Rz>Y^M*BD%qn&s5LaM>n%eAZp;B2MxD5V<>zNpWKSCMPyvPUKI#>^b%WEb zeD8@#1}RT0aZ>!Otg|xjuuuouxIL}ggB{(qedWuhOl_i!$?!SHdmxYSH>w_ zTUA?MQ;JtLMj>54%;Ggt9nUn5cNW|`-1p=h;*_FFjwXhvf*@gDY(o}{4w8juH1rkH z2`7Z0ZS0+x8+$pR;nA}iWli|z1vke`CnauVjvdBXkTVcHqlK zc~Yj=npHP}h_%PFUxyq}FeJ=VX^7|t<58iONgm_9oXYuM~xn0E%f#k~%6hk-LD(#YCX6vA)y;TaOMi-3KuhT~Kta5@2KP?-Tql${`0B81bi8ut|uD=Tjh9O8ut0y6{n05TkcY$iJF44 zGpYpys*S~1-;_#1v6^v2sEvoNux}ggUu%s*gKLkgm2{ne<5WsQW<`5+Yq@QE>KOOG zkB*#kQb^fakpe0h1wdBSN&5%-LE{AkBOR4x08Auou4TYNrNI^q2ED=dgUg{t1vXU} z&*@wXnIgCV-%Z`@e^JY=cu1?t5}lH28@wa{WGv(NNYeg2xSW7Th{3Dt0n7zq0b`vv zRCy{04DWJR#Apt@yGjI(6y3m;;3kAx9Op}i%^KnKFve-hGgOk!(v=oBM=C6MBDT#E z*#Kf`1NN#Ts)j6`)Y>GdijnZUM>9!Qvr|PF5|%eU?bv~S_BwE1m1k^yq(-VCzp`1C zPBf&b&e%MHywb$aQ3Rr~uvq01$506YnJl2IDAWQ+4#o3HDk(D@DIv5ZB$Mk)ti719 zI{I45Oh%z7>PZD4#H3iHc+S4za;D40c1|+xslil^;fEW7oI&GEnAJTk9F;1vE5%S1 zRKDXIuq3U8``Gv5C+z|bJVjl%ilEO-&?(i=}>PBMAQBol<9ESGhuky zIE=Bz>pnC$!?_4%417CrZHpRLA#!@@HRFm>Kv0o9DN%;v_@*06SBxfIil_OIG=;F= z^qKs$YX?m(2EC4)+8t;uO4HR^SlE)RM;hgqDe{hB&6s{8Ss(KJLXp~ElfcZFU?8e{5Mfw!8G=;@�)bQ5=K)R3Z_>gLqpq(hGtx z#D0`kER>Xf)m`2^6OXn`(kB(gr>Cg%GX?<~M<5O4ik6m35$=UlXR`f#3tZj+;rz=` zcuD*y`qhBzNfz$x;sg(TRtz^f%Y{@2!tzuYq|y))o>*RFjaS=Z3mbd@;w0}HZQ&x@ zRy%h4Dd#eV-<7W5j5ch>loh$Unha5K3{xhWN%08HM3K)mEeQ>8_ceRA+jji8uxHFR z;Yni?#Kik3u;ea2ok8pY*qJ*zP8FK3srX94Otkq)Yir{`ku%P-D8FJ>IM+}(ru%zY zLQFN{KEdp-BXvrZ;c^?}J5Q;oFz58DqiTFgr}zaE`YH-N}aqiQ#zLB8dA^h_&Tg4zB4@baI zsWw(kvKOQPdU2$z*!Cqr0Ikb^Gb$VZ0KXmbNW^~jmlonHt>an<4lq^zG}B45Y0;^t zpnrNGdui(k%+=LmLmnMbMTbv)xVFkq?7`^URDRJYSA|-X zKc!CiH5~;bNlfF>#!&CI~g z$z7cAlz>oCWw`1qVK3&jL17wMjkdA1&*jTpW;V=#h8hJ!X(F0AJCCnh?hpvT0~I1X z;FVO!)OAv&bOXpV5XTvfjqhSe?0Z|sfy~b^RoV&{9YA4)2=e^Cp8jstBnu%)X>AaFD#UGhW&w9}{i>|8ichQhdQTntj8 zj=5!)8k8CaM<(h|vw~JLOv2G2ltCo@!|tM+iwj5PYd#T#a5UTfTE*V7lCuxS-j)fS zXNDP7r9>vpZFI5iuZIrNmfzkat&sh50#gLx5yQ@$KdW&qcbQ)$HX|!I zGSjh*BL$6(qhEIXt@0WCtVjz;*hUyrtOx%9GpQ+*tB)F)Q9+h4<*ADh!BMA%PdOGC zedx-Yk>;@i+V!SVAuKXH#p2 z3vEBv94WX;8r1PiktDTNqsK6(3h?3+pD==c!6tNOvYT9feDjYX^mv&m zSRUGGNG&T@cnW76D;-o=g*`)mG|Y64^WH~~1Sv|;J?w4fH-CzYa_pr6uu|CvWE^Qq zPl%S}y*;#F|WNU6f=o8}f^ru&vyl0C-f>h|$VRwDrBkjYF{{SS&;G1^89d{>M6c$2A@`_H@YK5ev z9BOpg$1YPxPL$OD05g!Ugv5$Wa89kAWCrE-L9v+{BQa|TwO1w%3(;Djq>TK;2QzX?@ z9$8TJHI=kz+r`h>*t(Bm-|6DRXorAH$|@<@0}9d27KIfy0%vH7aLoNq#$tzk6)h*5 zwH`AJ!duD+I;x`TM{&Y&x=OkllnjH13@OJqjkd^0+0h(&Xgyw{bq=r0sKqHDt)^?J zdg@e)HCFIRH-Hbs@MV-=t1b|f1r9iT=rfyJHGQP7!#xKX@~ksYhvpL;@|K3G1q=TA z!cP=q+C*B|{Re}KW~^TEHwY{BpquPoun2Pylg9}hHr6%izA-&aGeJ7lMO0NNnPPi8 zG52=+z5bEJH(R#iL0XmqM-h*uK8)E5SZpAL^c*Qmhjix|#AuB(XDWKRRzVDyJYd6Q zT`tby2gAP#Ef`ym_?D2Ai3C72XtqjOB@G=%0yfZ-fMu+yfV)FmhGCeMMiv3AOh*w) ziKgG6^5a5mRPo%>+V!^7TN<@Q93?$wp0iD+w1-3yI7SE3mows&^21FOd2R|!8W@FPYm=arGCD|?0gE~js_ck$rj#mi&>QUW*`;Xr5Fbflz`_A}K}Yw?`DQB^6E z5t?ZxRd1P}35qx!g6*_sHMiH}#~RG-ii#BmIO(drVzi(*-Ws?CVkxSwzb#^PNrg*~ zRp4tDu&5Ei1GxR~(hJ+@hC3RBS zP{gjz_cpjy(}fQ1vM&=o?Bml$sn2e$#OaQ#Fr}4OKq)Aosg|Zyr$k+zZAEIVth#Ho z+=KAtPiNNnc$I}AZ+&j`ks=ROVW8>OS9z(hoK0~<OoJik6b1MFazoT0Ve!e2UQ_?!iE5yZ=u`hB-76(zU z1BilI_OBrSwFN?`U){Qezyarl8N68tf-=nMe=@3Gss9b8=G(=awrULFDglEk7pb2oC%5f(^{DuP6Oje zZlQITqG^R=cuanpD077xh%sNCtvGyDJ0)`)M)d|Cn7z)wNbTR7PExl>acfZEW99p) zE!JyjJSq}LyVi+ze#H7^oG|b@Q_#pdO^;GYJnH!|MX04%Q~acIMDe;c{{Y8eey#}e z+^*);A#Ja`1bwQ3K%C<+vUys3Nh6QKf_i)whNfdfj7fsj)l-naJ(ek|W(3$928C1~ zr1s(3E$!b>E;fL_Itw`Ch$QP@sredzi$7r5TjyzT{2DxJu66loWW-}>;y}Lbj4VyQ z9`NBFXPjF$-{sPxrEHFmT3=(AlCJeh0Bw<3Fr3>3AbDvha4HI^i%y>`&W#(lViU-c z*6|lSCb7+&v}YEtb1SCg2a3 zxH^5F9$A;xl2rD9aUe(GG(~pdLX=XCQHC_zjOJXP9vXUF5~`*MkNRa@RbA>OAa~qQ zZ63kEp7Uo+ph5}N(s_6Wm;w~MuZYx<6Re< zWp`JJUX!Q{MAdIU$x37t&%dss70k3!d31Qn+dLEERYAr_Tih2I24sBTyY0%QgmD; zXdnm!VHi{DVz^Y$(=7zbu+xP`ifGAiFdbQ~8NU>+?UXncl)z8ElOtNeLWm_p*eevW zQ({buRV1}l0?7o`ZX$;JwXsF}xKW*wwaO%<{ApfV3RB|(Q_)ykCuUP6M3_8(F&tpp znWQon3VZH#sZ^wL#asR~cskGhlzm~hH?<)%zpX(5c1g(09RzWvE-Ux>6c$V_eM6u8|f zL0HW3JZZM7Y&Q@}RD8iEA6A=;kwp_A+FNk^f@(C&{G6(*ky{$mK0Xy{=RA1!389!p>S zTiEe4_Dk<&B5JnENp4gOjg<94mSU$eB>2D~2_8ll3bL^s{p)LPYx0TI3_$4PS839a zp*W3|E~gg6qkrYtRv45to5IOBS5}bs8@7?(m3HkbUX%!mvJ|u<#AbHXn*3-`OXpTj zmD5Ck$A>`2ZOPXA@Acb=F7MjaF*8*SsaQ!6aHvscYP?pUB$K8e5BWNA3}CAEHXsjZ z;sI)3RO*4}PCa^A8bWq#rR^)J|4;CT9+YIQYc0ZeZKR6RlW9yvBT743hHqv6jc$!^Fe4g_th8_QIb zlmk6@)05$FVj&imnwAHU?kqROCfoM=Pv3;anCLUHBzFCwyAn^gN0Q&Rnei6k}-Bvv5GiVV{*bLrqIi z`)R5M=)(C@!3hl>y6C_Q{e0U?QX5i&NLQTFqS1N8fM5=)$2!YTP-*HCTF98)B$H%J zK|R>+?e%dHb-W6EG@&+%5|;rnvb(QpbZFv<&zlkyc;aMaQ1&9$Hoqzv)~KaHNI{Ts z>G@E1iDDAg;xggUQX)c%x0T3Idv{U}r`xYQ&>>P2R!il@1ka@>-ZM!g%Oz~3816`e z$K`9L-A@l&Z&GytJ(RV!RF%dekl6lRqs1g>q`<`6{qX*3L=F9g$mK}`9bW0Bjq6fj z7@68l0(j?3>e_QrLnKtOC_Ysx`_f$eSHBuai7TrrWmJDTLB7-s4VZ)9he&Bkvs?hBowK%! zJJ4R3I!_LY3$pzSV7SXJB2F6B2u>WcSw-9x*5KPy!0u0OuKxf^flQ7&DSm2zTktAl zqir(KwljlN&yUhjRMs{ZP`*^hbd>2X>+LK}*Wqq04mobUX(t2eNz|0Qkd*)jzL&j! zt{q3|u2j#}HEA|Effm9s3^5s{jcKH7?kgtktPYnJzYe$Ik$sn@>HsOgk3KZs?;x=G zXaYgTdOE0u?50VlD9Vx`AtiMIKqpH9+v(&L+<+&A58hM^MPiL5h`18t6A~@4z2*VzVz_GMo+HcaZ+TnZ`?mW`UL6p~_T13S+Ocp2J(S<8y zKjJ@4xmOfaKpbj{XmO}Gg>%3V#Y-h!&Co=)0BQTi{L{B~tE!>K@_8)N1rh`9w0`$(J(_bw9Wq2!h(;r)-ClRxyM#vM zTsg2e>`jfh;3+YUQ%nw;;DgLBbs&N5>~HKGsg)R}(oxY^S!0cvO@JD*i;n)yINPZ+ zw`Do0DoKI1xb7{rkw^>nLGU*Ca>`IpfmBkl0;0-_L1t?ki?6p|hb*+r`BU&n@~AQ@ zHs{o#H_)HUiNwgpwuBJ^xOtKmS5Pmasxj^HH|5ZrW2T#J5D7FP<>_nSr=CcvqLwsM zFUU^B0R67NA7{6MJh+y`j6kCMjVi;92@&FysSFJa<`EjLOX_GHU^nagA#l;C90;Zsz0Sj&{&5501&qg6_|xO5SFCU zRobBmDl0t{?FKuKS4T9sz8wg|)fSqH95uoeK%?xlT#cl5JXS82v*HjssW@RNP=koZ zi*HdSqOHYuLdJ6lu#Yv5hbmRGpKK$`I-fJk+!l$%awp$`J#jVw-|FA$I6v zHry}Rso{biAYz?U#8M=*ktTH4EH5-+RHV_?Q`F`9ffC`%H1?^Y4hGxYm1y-B1ai!+ zy*fr7b)}(UTVx9MF+o#_#X!~UF_;s4beN_cE#-;Wg}&vp1|S~#9wKb#<;fC?@7<*!zF!~D6 zMFwAjwz193twRGaTl`pV{hT&o=gz0JMJFNTQPMOt(89JwSPyA7v=9R^}v=I0U|tHJwIBFN1Cf~1vI&DFUf~d#=>fu zqx~A}TaVIAz#9;FlE-<=snXLrQk+g4I?mdD>m@@fQr&k}xsEvS0BjLS>da*xwntbE zbE9mEJBEZP_BF14o)tNnG^>SpHK@{oYQE_{Fqjnc9GRF#f}bs{G|qg+nxb-`E3L)Z z2)7m>-VqJkp0mP}TwT~4R`4f>olS=qN_tO`#mqI!Cg7Cxp;@9fwUwt<1bcY(#rtHX zE}<%pjB)7errNQYT0($VI|X`qFy)#GaYHQ}3IfRRq*0q5>;O@IG)=S41f&o=qMv1# z(1Iiq2cMy(4J|GUPCj;$cdCk<249q`l`2(-i3_6J#10!bXKK%lN>ue9jUpF!%kaWL zJ0wjvRA+1^pUt7c@Q6~TO9J?`Z&qb)uOMxx7J`s&Xn9`d2=oi-Ic2j zpN`KuOxIE3`Gd`@{<$1#SXk3IrBzZz#;7gZUk(FGFD|ZhIPo0>37;xCRg-qMbGb{3Qns8a`xn&uHK{2`tcik9 zK<)C-Vv<%=3r6(8p$x{x?X^g-9vtz>*h1m8D6fc*(vzzG;W&_xk;kT$SXNWV@h?7j zF&U`pDHF_%E52f6kbrda5hDuT%jw5&GHcx_?wbTF0(d5?Yn!rGLe=6J#u(D)3&CZi zjVr71_>EH80K@Gf_UyuIfJeiEJg;}#w{KIb1>T=7x+l$ZJBLn`AcW31``{0qHr3Tb zh)Y#VMVdcCL3+wZsW5IJw{Wqo{uVqUe|eU)DW#PJ&K(g(ZDi>X=t^~Q!ykPojWsL; ztsGc&RIpit)KxhkH+HmRQ|-CqChez!n+Su`J89Q#Uf14hQR)#-QsycbhMtQPucxaL zk}4y^Op^#)Xr!8@z4YujL3x%pZc#s8$l5Anm8_8xKYxW5Z=IHW!KE^V6m`hga1^RJ?Lfx~w8Q{h(sK z_;|eCx6+iMKrf<;m1d^(D`-6wsiU7Sks4A%6U@u*8R_JjNL$)T7B}(NgqKZSx~x-- z&IbxNW4LU1al*MvisIOML?o+(!!tLNx)zBUOACd#(_0=m{#L=m9b9@VNxXD$5m6+@ zu^8)PX>hn?qz`j8BJs7!TUkn8{{Rjvx64rTB`R=(Mc0@8%F>{mJgMSr&ki#sJ|9_v zK%Xod%*3QX1K0ow+pdF@Z!Arz)YF7aVZyeY(7bf5u_B`X09TU|xp>g+XNm{Gt5swQ z6gjkHg2!D4j}(_)TYwygVr%(oMHb)yTX!KOjWdka&Qcy^u0dR2M#@;p(+Ucz5s6aO!G=R9YeA1wx=FuvfHwGe@or-6 zxVZ?oP;ee2VCe>!w6VJ_mAo__iQ7;px?It_%l&7hj!k2y!*D5GnWVMBedLQ9cjCOI z+qcwPVH!pM}@8ZEzV2?$b97NdxR2BK9`W{e)5GX`|5#2TY9BQdEng?(Jr7}RfwOBmR)#9;D~dm97;^0xxb*ScOGf6~ij0WsaRJe##fII{aaXuog5ty-<)k!m*=ePnU+(GybF=B^FD|jk8&d+5oyUapiwv{UC zjD1ZnvaUId;aH4O;TUxV4JFsi#IfmQk9Y&Hc|#QXEyq+;3?Y!H;z`1M>g*^fa<^1R zK!fn3r+{@PxA`qBjgv6y_A-iUnyNavCxy~ARV8H-_t0oRr-+@lbSx6EHt`fAMr6Xs z8W)F1qj#1tOj8r0rLV!V%yiS$EyS1;3Ye4?_f*TR+bwCaVLkii(Yf0m6C^9-&3hSUlBRINh{Qar6P6_jV4Z1bd5+e9W^rwj*hsr>op{GmMs{UbPmmXd#d8b z{{T-EBJDBW1g9P&@TQou@Pz6-^wNTtDAh{eGgE|A?S;ab<5i9w#OQaq9DSUnx5-kq zjyoVybzdzK5JnhdiJ>-!FXajtp*47sd`+0F*j+G6q!t^l#f^zR!NxNSYj#lyR-k`} zqpoRXlW(j_j7HN;_=XoX2boxyWql&ZcgstMMv=s*PL{FL>f$!98QXy!5lYu?Kp|Uv z=`{XrL`1}ArKFUBEHy-#mByawZWTP@z##-A@bc@W?5zq)O7#&K&&ri_-8;;UO%-gD z)QU$QdMnvkliTd!rd4k#)Lw}A(^tVkU<8E&?i8e-R9u#+M+LQK?J$)5B52?xZvw5^RXwc6Zeuj#@Zju!sGZQE`KModJ7bQg! zsXkFgZ+A-4Crl4YP^=hllA^3pVKeQt6l6Jbdp7ndU^U^r{hOBUfDS6@J1Gs7%3e53 zdn++C5kmD{2>BFN6W!pt1DJn?B;T07!{#e7~N{mK}6M&!+ zG*L;~$FVB69j(o^r6M(qdU2|_!UPi}gIr<9@H%#paD2Hd%;REJ&eba_kNA+#{WjyR zp$QU&c)Y3QT#_J(0OD%{)co^D=TlYr=q!+Jk87W$)SY;tNl+VRkf?&9MjtA57;lpY zn>Hs|hdS6bBus~55*uBcPlvaRKdG-7K9#LSXGl2o)-Rr_^GsB6$B0JYEJ9=Ni+_vT z_vykUT%{@@C_1>~G`&qMBqXX*Dt;SFj^Y)bLBXh;MeYgak4Wq-Z*Uv^H{o@@UVF_@ zcH>W~^gu`$id&^bI2=qXsA-CI)D9zsBwKEm3RW;YY6VVU zudJHQgNRYdDN!2l8uqyh+khwURdVSlFpzdmG;+{FTZJM7ZKjHvx#%||R4j7TT1FoG z4Q_2_1GgA?#Hlg-Txhv+KGP}a;ptH$uc$ICt5+t^wJC#eVWy{P7CLb-vr2)O^`(tn zIjdBd_)>~D!KzA`BB-yKz%rSmD%L*DHTY@7R`%B+P&Ryg=-0iqQh2DHwDnI(Lr*k9 zlM0?yP#ykmHdABlvYTImt5O#V7g>F281SU?(HARxj zq^=WLq^OFT7}k!Widojpx<=kW?|a((c`1NF3YvJVj1r;iQ7U7pu40uq{oYS~;lqvA zz+c@u0)D$(mrMGL#8b=3 zLIy5RK&;DzRj7^S^OQfnuc{UIb!&s(^ig!R(KFB?|S1{K*# zf?8LtSg4-v<~C;+8rZ4t@Zzf^f=s0P{AruJoB~3m&1S8~=rGE3gAReKXQ;YKETeND zZr|j2NFIMp`6U6T2r=>v5D&IB&et-U`xRi#e%EuKJU6mw_Y^L4w)q5(etAq zwJqFg6xc;1kQXs1loIMBmF`IdSwJ@0c&)3YgQP3MdbUxbEn5i$jAx|dNZeJa;R_j} zg~Ug2l6XjSe)nN(i}-PC2vJs)tmD^(IES0U20Uh%s_|R|+zMKBsvrx}!Fv<`020#r zaK#oE5i^IKcwINEugYnGjw4sSX~&j`EX@%ySRZK`eq2mnWuX|7eQEs;I7})9Gu6>T zcgDn3NW=w6c1mslzOC{0j%+yU#Ka14wAfLTBvN-0%h*mLpDBq_Lr($$z2HRiH9FiQ z3)_xsQ+TAAG~fqj6pNds`9;i0K{TP+wrzN1c+bVPHu`w?=bv0^pKi*lXiH~MgG^Z8 zO!Ed66->A!)UvLi#)Y>?RFDFRG`_yx2y)w?l=z7!q|$rXFba0iQrHGCYDCzi(j{OP zoHRi;0DZT$$J4_P=1Wt603)VqiDdygjR?6SLmO3jd5F<7D?>V@W?eo3MgHq>b&)#k zo`ce8J$|5YNXCUMKP%vQmYh*b=hjsPWHQsNt0$N};UP6P09j6#;F*>hyOgOWM?s?X zp-dfVKH61b_$?kKld&AHhSp0(PV8&3TI@mvfX@UPFj6jej|&ZaICE!|Bw8t?s!Fkw zwu{`lCEallB9Q%U%1~vzhS?jZIAu(gjTBY+k?JYsS?VKUy<}4`IveUVJS*O-Q*D>` zHsev^cGB(Lxj=hcrY1n@^y5{_Qic<|69%8<%R^8qBOtj4L1otZas@()W_BDXdQz1E z9+eupIKxLMa4CGS^wN;PNDpPx$Kl5oqEY}g0y6!bN^e4FNtT*1t4#rg5tHfX2BOdjekSma>sx4{zDZv?O7)SDaR+ z^&PbyspTlm3~JLK1E9F^<=kwFaJbk54V4}yoGF&0VVOz0Ml3ZSe-gQ=T2C6=A`_@! zE4c~YuETRSo3u9iIe1P2s8!*b;OdH4Mz$nemC%;uh$1Go=?V3yj~loT_E+e=!1{St zV^$k5t)T<%BD#8+@vNg!-B1uyQct+!7wUfsD(J9(P9GMJ6E zidfZ18lN3Lp8by*FgObG5EUYE<59NS!$QE^Xa(=}e%?_KI9HZ@KnfLbOim0k2c*N~ zr{l9Ip@LoUAoehe(G652oj8KMQSO37DEEvT`z!)hq$gp{>SG(JOAM9$mg zW&vZ6P8cY@xA5WH7fEaYKs@oJ*A|+D8Rw#wG&1HIYPV#Dj|;BSn`I%UY2+kd_n3ws zK^!<`ZY5f@1geYOZI=^_kUD0X4k1B6aW~92qOAq#g_( zAnSm7e@b(^D*FZ$Uvq{bgwS|YLWQ~Zk7N?uJzp!x9!pW7UP+?sZg~y&kp+phFq~rJwD+E+^ zFyXZMs)lz{DTD~BKudxqjjg)Ju^v2n_b*sZDFk$iqAn!u(tWc*<~xSb(c==w1J&Sf zG(u@aJHp-l)&&4I19yL~fxE^E72zZbIBCK_kPSOvtx{rW;>_~XK}?1!4>8PuR2>)< zWw{5$9uuzZ=U1jg4w^CULumvjZyHNh)k@V9(Bkqg8(iS*#A5^Gt6?YI7X8;eb1!BQ T2LtY=?ulB0hR=n6|JVQ7ledC& literal 0 HcmV?d00001 diff --git a/images/method_final.jpg b/images/method_final.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c443960ec9443fafe206fa24f3135db97baf4d9b GIT binary patch literal 284293 zcmeFZ2UJwevNpVj85ok}oCTCDNnpqz!Vr`sIU^uR;*f(RL4_d;2o4}Ya?T)3zqqbEgc*!(S3ft zv$LgxL&`cKQ7Zn!f5x9kB|6j{L{qV27{yF$L zw}0tzpz+6?fha!wt?h5m{;ka^698aGXx_y9t?gkl0Mv#80M+#0+F0KJ0O3;rs2Tc& z9*UpxV&mrKEXK?0>FIgP*2?16Plo=r{L2o1?fH+vFW-0T=lA`UcT9J!9+|s8c4PX< zR12raPVTNu2xoH(D<+;l3-N#X!CzkMm-pb-w0dOaYUPOjDjl?x**aRIr`yre*3H(* zk;&HaAIm`9so8%85=(E)yH zo-X6+U*sN*(Vzcu|GOJTG`fm`u(f9T*(|52#bn{`>hTk!b>im@6Tk%s0WyFZU;tPE zc7O*E07L+BKn9QplmRtB3(yBl01LndZ~$BZPrwfd0>Xd@AO=VPQh;82AnXfuNvkAW{%Dh!J!H#03&S+chbWJm?-s6QmC^ z16hL{LGB47`zxT z3|R~n3@r>}3~LM*3?GcA7*QCnFfuWUFlsPbG5RqkFy=5eF^<3h7!OPVz7FOA!@zQ2 zHLwBr5!e;{1RM@d1ZRRvzzyJT@ECXwyahgkU_nSBOb{N3I7AVm4Y7c@K%PL5kQ7KB zqz2Lf8H3D2b|IIT*D&cYxiG~rl`!=%tuZ|@!!Q#ub1XkXWf$rC2RkBUp=AM^Gq~8p;KggsMT!p>EJHXfm`2+6*0qE<;bT@vs@O z1+W#c4X_=tgRv8_3$UB8N3mD2zvB?$+`tjTQNyvs@y3b9$;N5G8Nyk{`HoAB%Z4k7 ztA%Ta8-)7`w*CaNczB03@_CFUnqBX%H05*HBn60ea!Np6zJ zlUR~GCCMUbCs`x~ld_P?ky?<3kYVLNAwKz zaC&F@WcqgcEd~k(83sFsmkdn|>x`s~QjE5YFBzK|H<-woWSJf_y<+NM+PzM9UGcj6 z^^EI-*S|BfF>5miGnX=dX2E5Fu{>gV$-TY)WiCY=vyI?AO?(*~PLEg5 zU$0-EM&DMy&H!qlV~}HTWGHW#V7P1~WE5sJY0PHqW!!5*WAfPKy(yuoxoM>trkSo; zf!T$*nt7)A(L<$&sE4~2au!Jzo0c+`FD=)tB(37CRv$?|ihHzXEomKZy>26IlW4PL zD`%TxyJx3pmu`1ruV$ZXfAv`Raj65ggPB9UBZ;HEV}}!ilb6$oGnaFi^PG#AOM=U; ztBPw50)#L|)VYzNov9D*?CzoN^Bz(juRTsZbv!G*h`k)WK6rC_zwlo5QSf=|3-Pt^ zZS%YC7vwkZFYBN21oY(LlePesfTsb=f$+e*AlxAPpn+h%;JDz!5Z#c5r}R$)pDu>N zLkq(Q!d%0qo{2w8e-3$W`+V?);EUuJm*JM-eGz;Si4hk_3uIp;f8?vktEfj&gVDm# zX)#zajxm$5(y@7Q#Bn}xi}Cm4YhE(F41al;V4BdIcsnsY2{*|-Y3|j%S9Qs($uY?n zDK;q+ukXArMbV*NpiWXPQ%BQe(n`|l(<9Q)-`KwSm;ukI$-J4Fn1z+)k+qVolimFm z_BKCWBE>s@Z+^~Trl z$=^pe;Wj;Ox@z`mK5B7p*=n_JU2HRLn`zf;AMeoU80=K(?CDbIYVVfqZtjunY3vp2 zt?z^N)%FYb*L)EAP(2_tP(3I-STiItR5vU-+%O_B(ljbP+B$Z3tZQ6pynjM0P%q6^@HO_!==jQ?3KgS z)zvqEK-SXfp%s93uhIU))eIm9;A3MGViVF5lhLx!(NWScvEIB%$HdBX<2El7D?1w( zmnat-I|rYDkN_Wth&a0Zar+7Wt^6DWbPW%Wh=2%ffmsFEud@q?-xd%M`1cNf{VVgc zi2rf_^X=y#82{}-S^#2fEEXsm7+?lK#2^e}jH?cS5ltv2+9yGmpG5yGAPg`B6AOxs zgNujmP)!72fWTl32pAI+f_9QX{^-vE2r(wfb$(eaQVnw`vkRF(P;3S^%boI8a?O!l zR>6m^!8o`SlvLC-H*TWY!+BdsSOg|2CU;j}0j{W|{6I@vM;GnASy);7`l^YRNSDyyn%YU}D7+S)rhySjUN`$osc zCnl#pewvvPtUNX=#l2lbl=?oO*uEOQ^p^|HJSOg=My;v(g^1IRv7eJpi5jX1hK)n zZ8@g!VwOEKT!u_T9{4|YX{&7O%-m;uAFCVatZE2Z6o<3zm)M!|OVkv2VKW5PC>;lpn-!`XrVcl~F05as)PTfGrU4%h4e4bk66CvaS+b33@W zNHm!qng94gD%wgLy&8ipfzX3$Px+~WqWN>+{3}8m3ZA5Oi*=AFu}9UBIFcq2AD(IO z3RX`@z^$@ER(eEucR6Oyx@20`(;ozlr7V{=G2i#Pr{kPDFkJiLt0#)7r$=hZ%vF-w ze2yxsU3EDcinkx_lBUEMdzpG8Egds>`vo}+92D#_#+H9!FHhrgVEM(VXDMKwVM5|= zn_<0hRBJs8lbdXyU{Gx95q0qlA&%Zh%dG>Potv~{HNf^1^+|C!yF2UQCmdO(g4${A znrvxbnp(gTDGVAEA%L&y7ov?((ch~NQx;}hNc=QSnMdJJ!*6nAF zB$ycPX70;)8BB$NEec_HCe&*uMOe`UDL8xVn+wi?_Kz{twMtcoyWKZ!?{?1)jj^0p z6$cQsBshm%YV=W@#_THAP8tWjPWkJ%bun}O;BjA1JY7h%V)UhS<}6-HSW9|?XTmqD zK6?!54aG7&6pNB`iE@^Kt(NfX55AmlxFv{;IMk4Tq^==P2K?{2G@2&24i{fos>_1G zD~<|2__1f24ovSO#;4pPRrmo=oEjMX-sL0>mbmt6`=O{Lp5MS&`b+EBBbF$6t;@D? z_cqpg|E!Up^QjRi*yY2hp)abR#b`3G_Dq+j=T6|j^DC;8q+ zF?F|slv)Z^YLq735-7>#5l;7Dmge44;*ien_eT`BB0l=HV>UdN2=-mf@i{6y_l&l1 zrkKY?MJJ1zsA=N#bk`uxb&j;(j}}+cdFN3@U4(o^BIyvLamcJn@|2GwmCeCFJnwE? zfLnX1*JH<@2tBMbY=$a<4tSPCHLF@vbjEIZtx(l-UD8*3!fgT~la)}!yur7O=CTioCYzt5K$&VfXmf8hUJ zf8s%&qvL#q@o2*vHfj%C!S#5flKee}X+Y=my~vMc)2OKCFEGh6!66~%T#l_5FJF3! zr zoUpHnQ##K16^`ZRx9-Fq7Hd;=F9fi{tDG$1zKfd!im$dZITci0R-`~wur6qmr~BMV zLE*e4TtBAY^XAyl7x5vZT~LdO8nCx+obbL#C!$733$luffjr;S)aDGA#x1Ki{G4py z8M;+9O8o#qxPYiT>Vd6cZ^>X)+hk_coN%xPhqGscIsi{r|0}M9V3y{n7PZyHAa_Wmu9m-(>UGG({p#}de zBt}j4UCxso<C~r-V7{bSC z@5LtG5kB{%a9TF$9L7(eHOiU;7;E0_8coQHoHy?|R4ftD3TM0WY7 z&$<*_H={LTr&7j>3~WCorkKlZ6-1w!(vd&Q&cAcyNIY3;2xA%ZoFv2D&`hrQII7yQOT zqy zXb9UoX!?SOhCqoj5h*=DVSHe7URMxL0w<;ZawxewPT6Nr(ehPd+ijuqZPE65v+ z|6gC6@3${#+#V^Rn4`A*U$h)axY4DM)KRdPh-jb2LLkKz)C7bdK4}kP!MfBWpX{g- z`t&4TLv#W!lp+P5==_*If>ME9x*sCOO6+asm*0(0Hx%aNnihC9tBoq@_zyO>Q_cF! zA%6_RofW0uB1ELbn#S4Pr9Rs2~~e)SE(Ol!xvS`6@3O3enIDK$wL{^;e} zuiC2EY95{3WRb8lBzM?pt8uqLDL7bGo=gk>*F}(vtK$pvvv!KG6&b_Sf~Np;-788uVlcmZ1 zF*BS?pzXh{gIv5CdpwD+2v+f1VypQEVw`1H!3;~9t|~6VvH2QC#_~4rvaMuP1v=P= za)r8Qy}AxGV>)da1cw(9?r1R-vu0YH%m`_#H3%XxR&?d#Q$6ld`$M!0dMBIPN>uVc zjyLTsS3!xnlzNsfB9Okp0kZn{U5dOSxGqbC!8M(&556Xv1rQJAaJjb2RE>4_y?#aH zDMhvGD*0><(`LL{)bVn`-50eT9m4b+EJ!KYGVkMd1e4sCN_dai=+SP+u7R4D0UzeG z=ThZu-Ht=c!mB@Xg-~kmStoR)BxQ6`$O9QTxazt}mxl{y2~?6GQuuOII2X+TUlvh}bFQKA-AN z+kM;xl}bNPR-2~;1$5;s^n87FqN`zeI-_MY+9Wpd|oqiakpH*ID&$vvm`(LMmzCLj4vW2B+BQo zu)5Rql!xvS*0ihF6Szq-n1wOQu5Lmo8oh6nOgXW?+1nys89SCCd2TtuIpC-AjZxe% zb(?%}{fkJ|yX%%%H>Da&jR;=_2mlP#&G@~}86zPaYg)mQV&Rj5$Qm9cesm0_{%4+J z{I@0Z|K=4YKW}4lTG=VJ4Ll~PCfh>IM!gCSUpi8Ip+wE)z_8_J3!@Kc5M_!&g>1l9 z=9M3OBIW23PknE;VInkEHGA#STr#tLZ%DMqM{(E(`+0pFy1);}Gj)!Aq7s@pIxKwhBQda7C=C9o5c$$zM_$u`#+Pw2gE-df!X7RkCj(XQ>N{3(|G4d_d3+;9 zFd$8fPhevgU#8lYi8gzQRv($DbaBkrnDis?1>P0Y(c$J*B z04N25M`sd*Q6 zgbqCC=&Y#thH~WZo4B~lZ4U4yb1#dEQbGG4lklO5XjV<-IKC7eck|>ofj1r?7lb#X zu&KCOw}(nNO&G2Kyn2(#nXD1^B4ggp^Cv}j3b#Ejsj|c-jdE>Jd;hKgV2x}vv(0)s^nv=m$ z%`Wo9X$5WDWd;dW)`!a%qa^Kpx)o~10zZbZoJRAELDN1Rw;bmoP&LD8w$d!ADLfV( zjNHjuth-;C&kw(MxOK^%9~I;AV$I0La|keY!ky$$m(rn5BbxceUow~F9=>2w&%+4% zh)>w_ATP7%pQj@mv${-4b~2iLedU%}@q3iCAAZP%T(kxPQT#)QDX`)E*J#J;nGkC? zt(vyJ3oa5uuAo+!C>0o_iWt}BD%ZmRC#Msx&y^fKa{~X z9^T)Fs+wn;Y zA7t8@v@%%%`~&Ze6_4AxShY0W&zze#S=4=t-!ll>M`eoaVSt;eGZe z=kjuY$&FuEP+A>+pj77AIsG_}TUq}(NR~A|5ZFK%NnQaRVX+B3%&c+JllUkU67#{~ zs?^6F{q0P91}5y&{qTOLQ}ll=rI@#U`z8tmM~a--aSz=(=h>b4nK zVq*{dXFXuO45{|7(>o2CpA*bz@n`g$Dfr^AC;;H~$lSD5^88#c;l7SAGt0+>&B68E zFcZ}GRr0#$<$2lwzz58kc@&ro(sG^0@2L0_C=9hkatVG`R(hz`2 zg@M3Bi}k*v-ZQm*2xwj3MYhDt2t+J!6W-U{Xp+4MaispHAa*q1QKel_GU-z)U_`Mg zmp=C7hU_hh1%YSFwDBvZh>&35kT&{gW6NFKU&&LBxvquH7J@wN*^mk=cc;f4(}I4G zt-At<<{Qjh#TJK`e03$==aP$F4^+MkGYL#B9z*eUv*pV0dEZ8H^gB?UPvR@3xpSDlh$NkK$9%AbpPcyE$Et(+DrHx2EXPP=)si&Y7c($hy9uw`*#eD^4e^v-8JoZf!ybQAT{@3)y$N; zr2rX7;Ov4lBTbn;HJ+PEmC|Xhzn{*<$x^}?CvdUfhyiuQS>sPv$}CKtZm&a&IrG01 z^TFm}9wE@d%}LhO-cSity2WXT!EN=?ht-410eHsiTPBloW0honq+=|=@i(ai$!4U} z?N1P0msNUI;aZdC&nzW;SpN1oIsGK6Z;h~^<-NR#5zLECM{USO=!!-Z0sqmu zRzaV5X`SD;qbGW~$@SW&ey%^B3tlm*Sf$%TMmsiLR*xE($or*P-lJA?_OA?C^Ir2V zw$K##l$f0?M5dvIPCoxoeEH=M{8VEa_m;V_zZ55*^E@XGz@KLyyf>$1o1^ z=zj2S@56`T#IRN>^C?#qjytYo!MO0WZ1X7o7!}M-^H82)W}x5=KxPKO0MIaQex|K$ zrhcbgU57@&(yiGT(Z~4v-K^i!a*7883^P2hYs<2hw*an6Y;VYbbFMhG7s5&Mk=5@f z`Z&5ynYfc%SY5T2tWiN{W)r0Zr&oZ=o?wxBy}lm*{*q?#{`dXtMHChr-7yS!SG+E0 zyx^<5U&7KgiYN~p`nIHT@^WeJ=#C1nC>Af?X|f-jE-1b)@rzEBVt-)*r_Yi*bP60J zGbrG(*?-~b`sLalW>Z1BtNRQS<_oh|x{FNyZSNFZ>RsV^&^!$-A%DjixoE!usspWFqfPd4soOq94=e-%$+$?+iw3FO0a8H5VCc59VY zh<@jf3HFXN{;a?s8Kkzrl?l_T_DL3-nvpXwCE7G$<>qOv;H&_D z{HW-Y7bW5>Nt2Q_?#^mFYQH-^?E*Yx5OGPt_=E2X=t=((%D7v01*|{2L_fU6S^ua? zx#J73CnB$W%;B(e1{V`rB9``EY3XQ3$3E%`2z_=1+>_pCy8_JN7u?e4=2t+h^mp8W zk-T%tA=2{9x}8(KfW!7HfH~lpET9byav0A@uYj_wk3(g?im8$0-GA5m%X=_%`G-v3 z-Lfh0k;?i}__%U=rWw80M-^4akl28Gc z&PGLftYQzWofNs4*V=uDYu4@E*KIDn68_qONJi>Gkv!NSmP^XMxOAvb(ZQ=FJR0yx zKNbXxwulRgOBj^1Atvqij}tYx`H|dq&hau1Rlv+r&9mk5({F5${$nVgq7azsA!?gpMkVM{8 zl7(Q6UG3Dln-U!z@d?Y`(x2J(Tgg+=VGZHstAHi7%eLrW=5tXs+k^BaHO6|%P?(B; zas=CVD)V0OH{mCaSc*_^lb}I^X-9TUXnnM!PnBO(^Jj@bsEN`c-5&gxS=MUWbNIOv zZByJr%^TJt+BKKVm};1_ya!)-p{emHEh5)$a=OAPs6A+k9?V}UKQq|}-)vi8J5 zp9(m%Qu}u^9YHu3FAj_e5*EL2l0fQ`Wx&_koeB3qXK@4p27_Mk;kqefHMPw8ZQ5=M zgLg%6{&!};4;;x5!cF8}P#RZ8iMM>`r<8oQvcGy?TxsKe_STQEqXo)X&Am(rPo7_t zT|@XpBTJovw3&r^zb9Wq?j>$^*9ht>H!{oR#eO!mtC_a>u@&A34tD*>Mm;Smoq*77 zk`N%O8Cm-2l%^qCw53%M+u=*|$r1wx;G3#y1Hr0@-d2f|ogG7uCmbPa#^r{g3`4!= zarKh+l*awV&)sSF>24Q4FgeS0El`BH!c_%`oUs|&MNf)-32~?OQ$I*_wR}JAR(~D9^eiO+UA|0-~>gW1%OL^xcC|1>ws9zYYPvV3R&XkIaa+3O2`O-{{sh@=A1n zeJ&&OH2(X;2aHLzxQ61D!$E73wgs_{E!oM{@?_d%=y3O6p0e?3j=XTnGuj}LCyT_D z*`Xp9zvWSbN5UWH(y|5xqvDHq-*ryuibPr=xD;7+ED!L+9gXO)L|ZspUu6z3OZ zb=T76`p46ug7$#wMDfH}UR zJLztMSDVeAD)_<=cq~PMMK*i4H7b`Osh9t7E(H>VZ!UDpEqs=+BDE`FTFthsfT{Fl_o*Yr1{#LCjHE9kB(c%K3H3M_f8338BAgys;;mV(&@%PGtw23+it`E zY7tZuCu`9kqXy$v+CHRb2dD0R$tw1GGO$u@gO&#V`iNdB70%UFOy9Y0vx>cgs$eN% zI}w`1WR@Y8p0?#c0e(beEXp#z`)0|7<`ocw+5My93K+@PYleQCFCTf7sH-L4@VK%} zn_4lj7lu9_|*W=t05(C#5cVh|}34(+~NGf=8>)i3!-=MSEK7r2bb5ddtg@NBU3l_MBrj! ziBrT&_igUx_1!6(R=5IQ?E|K-fneoQqlYD@lpO)P@$0dJ-2%M5Y~TB^)Z`iYuGh_3 z)8~Qzd4}cxu>V;~v-h_9I4NR2SX(o;-+=+UhqKctm_WAIRg!=DePW}1XJUon=^&+f z5ce^%F-nF#`S|Q*l)of9_>=w1(}avKry=+Dc|^@QD{pPTdkOA_K_OwSOfG3}bajoU zq8hCkLyAKjzH+~NYKN%hLY7N~utep;sFnhuUJ4&h)#Sstx)eXiMkJb%ImMJ=MqxcM zlN20It6n~7cuIQOd9%vAykwp>YmRP9UzDoH;vHL-Wt!}2({6KUbIqFwjS0qW&AyBg z(;u2#Z|2}_mPm>-ml*z@S+4-1JBgQqkwlo<^_2j73&1S{KxNp$Gxmc`!qZ*nZi2)I z+23V**Va12#9Y;%m>L`!99#jB0sA-+mre0kfPAkEJv5NFxynsNUwFGimLF}Q3vNmr z(tE7Vz*dXh$onq@Lg$AaLNKzQ!C2^SN6D_1Lx?v-OMY`MvvqIiBKXl25E!u5S-kOU zt?=z$_jvLJch)@N8CK^N0KNi-r)#(v+Zgw+MO;F!fQ;(LI+dsC`NW^lA))5~v{90!(U1tbxHq#=t?e{o*tT>Z%7|f zlsuD1FVXA)o5+B3?6+!*PlBU6`l~eL9c_NMYI&_jmxR?WyyAWf>7i0=F((rfSNumjdyN6fr8kQTLVb>E5XI!62mK%GgcnP%B!`!J(q8 z<_y7$j(6%!>)36Na>nvrIyB)%Tj2X&fA|bs0h!Zvra_YxtRfGH_r(?7EV;{|)o7RQ zNq_-x7^%=JB<{^K8&lEMqkPmJOdvogaju)_M>Kvs;POa?zORD)lDKNyVk+2^*HLC^ zmuqsfQwler$H4UbDBuS*J>YBnFJ)`R!K5|I~KxfUfsAZ=>jCDWyy-Vj?@{ zP#N7apjp9gZm5LCIA>EvZfily1DQ6J_AKtjjBn`m@Tg=6 ziYK5wr8DVyRz0zb$KAx86$oyTI+G~7@SE9e#IPCaNNPkLuwF}d@Vb}eE7#K*%yykD z5X143nLp7nnU(M*?pza3cFM$ZVltSNHS|^KK<&zP?Lp9-`Mx)&fDeQ34-37JyCH)~ zl6MO7X14}TG<4rFwC}-yl$PC+2p3<^o0S1n`Z<6+buT{?#3H_3*t-RS7(=iF>)SjQ z<)PP+<}BLU)NCLHptpC8)9yDrkY#>|pCa5m$9T?f@AGTTZr#u2=w<*O`>EWeYuOsgZs6r z$19hV;~cvDYIVC1$T^fXd3uSBfc+Spu0*O2OwB@@klOr=yGTl7ip~`a5nCHn@xqUK zAp>HbNUi(^V$H11AN;kAwOe$M3>lBg*%D_cYBSf z73{D+KJ&D?9=0_5XNlt%YW271F4stR=( z!%Tt$NW?eTGwUP&Wjz1qc)u=^^iqyeT0h{>tmMltl_Gm5MoqS1^q_w_+W4n>@VVD^ zhIO1Q$s%p9My&)mv2T?P87%m5N_?L2nw~+t*G5Tj_qxc`byNF`z>)t$&%*|H-ER-umPBGs?tMF}(uPqc1`!uK;3G8Qs3>BklhX z@_FlaMGC9f7FV6?9AziQTU&-eZCk0Bf(i4O7}eC_GKoU1n0l2MS`1o@ z3C_}?WuUi?QQ_S$yeLsm;C$R`jU9 zgsB(7=z^-uO6EL0=+gBv10@bRri;>vnP{FQ;wh{OeAwtE`qUH6oy@H|*3z?>#}n1w zo`TOnTdATKcqTL3Vu3JowCG*>p^62};f`en3QFA1qvyI`SEqwL(zdL(xZ@!1v#azN z5KrTZKIP4NEh(i^(<35a=8DZlAp@fesVfsbr1KQM(Dv}Na`_QqpC})DZ9H6ILR)md zmGi48OYI##SCQ?l;{RyF*fH#Ww@vhaQQP%~s<3lGvb>T=WMQ5aOq3eRl9$^~6pDe5 zKGoyG?X?)E#QHrx{!98_k_5W`QaILzVd|qyZIy^x(ZSgtqljPy94aUizTTg7_}_J$ z3)(HckMsHxgmx||Y72j@l_YgDYFh7BL_d{YqNTh*TO&EUk3|940X@RJr;L|j6Xga& z3s17)mS{IhvF*=B=HKg$@e7A#N_h%o@ywmimdL>_oUkPkR@5<9_@{J{TK(b(EL22 zBKK}&p(bqf`eWeP!SV4>X@$8fuh+&hkAT{v?@*` zRD;4LKXCRbGW7)p!QuhS;xB%OF+ZLMdma3#MI!pcgU2M2B;jb^ z?rAzYC`iu>*x3)*)Arf;#QV_3l3Jgv5I;7i9b$*k3Qe#b@_bC+wB z_*xXyvtEN(0)ht^*z^j~Uelo{!8*{}7j%m``m(S=ua)|p=Q>1&HUgOn1?4X}$z_0K zG7^A}wC?aos=9hU`V4dW3Ye;rKF1iw_8%b#wqp%e;>t^NaMeUT(WwO zYse#LLjS+If-W>{7h$)apwkO$t;X+vt?5mEk;+!fKo9%(kzbr_qds0dX) zOD=ugr_nMuC&Frh3SEkRALCW*glD5P-;cc=CIU)gdB>o)3T7$m85{oJhl#CMzw zl&JD$xf+h=UYcKopK`?MGC}bbCK&n86qiWe ze?L6ju2`HjV8H}ts3@()4YB`WR)l&TtavKOS`zLDt|B{&?VMDifHo2 z)ZxdHWn6~)XV}<`ToqB-X(5wuCJ%k@i_6ysvnQw<_BfNdYt2U&UVF225JP~_zN880 z!TowU9k4c)`}W!J`dVgq?B97y|0Y9J3_|Ew4;}B7WL*KWc57~?r~A(?>(B|!RSjyww!=Z%4X0xe8xayvT`}x6u6Htcrn*4;oUo72XO=3_ti@8j;wx%@QtP{sr7~ zMrRqUEyD&5Ta6PC4(Ul>_>y)a=BoO(96!VvA7lQ~xWkia4&MzwfZ){1UE9uW!x!~k zxTAT$r>u2wNtk-D3IbJn`w z-Om~}h=cwCM02M&&e1io+hM}7p=~JUMA*kg7_c{BeQ{&-Th-U9vposl#v|2-_fTz`gwEmVKd$m(NzkYE z`%oC1{RqwyGNjySVyYZPk!mxzpHNaYx-m~(d-Fxr4^#QkH-=tpK`cmfE#>qJUDhMk zeKod}@y6y5HT46<@_bl-nMPV&FH0I%OY=O{g!Jm9)A#)=;Nred((lQ23h%SgELKj* zcNhM4I66sYC;Ym6On;NYCxk4_X30$Osk=o~atm9OP2(i@;W5tbk**GFf{k2y!C*L@ zF3OL4Wast|1$YWI%LB*uTgWW&;YC&gudIc%Xzx5b{zkrD<_LG{yN{^9tWpC91Mp*G z!jkU<0UlFLL4#*b7GvBybK_)<0^H#Q0!S+B5&@jmltSL;RclmPvu#D$!T5RDEWy}p z!CYyl`sPyUxr*@$Bjt4#&opysiL4<6uu{oA%4hD>N3=;Qw#fzyn%FOTnCh8=p-dxT zeQVks6WOa)^l2#}ua(3ZW^P;%wXar%v$Tl!19gwhz76uZ@^7je+SgE)u!MW1zJ_VV zQlaqEKtswaM>9Apy9F)fj9xoRw|0FsA{rra|bG%0x$Zqk=WoUai%Hxsr%JNa+e@77WF8CM{`PZ zD3(*HmU{oQDfOUWunS6;aL-<)VZ3Qz>F&P60b)5zN^p4*2Bq#~#BqC9EV3=-r^c|u z#qdzTn&FX4j39FS3a}nMe>N%lV`q8ntoRCm`0b>0+agLn>WfC{FDMvUncUSB5P-2s zat{4Y2~Fe{f8bu8=%p#XNtkGC6j0bAV3F|#)7uX>{ayoaKNL9ETGvQPo!xfNNs`H# zdGyqTc`VoL1p-fy2q4K|MgLnS2BkvXq+Qz$23rcDKYp(^Ma_!n^h$c>C!W`uHzPiC zub`HgtJpTLN~}Wn*aZc6tyFa^yKy|U)qK#9xN{$p zGws-Y-?~VD17DLmD>iVDcp^&78b}k(ncb|RP_;<(icO-{jU_`Oqi}|l4@yH*Bv;b& zdJg1R?HoQ@jl8t>p|cDcB2WK4xps>$hrQ}Gj?*?GcXG~c7)JKLbe9?$%liZ?CL0^V zEUyQ(wl29$=ApxGwYOKm(75!;ne=Hv)^F8yZ|GD(NbT(C_2mWH#*=CU_pRv`8jz&E zq>mpnHa&ynj2fWNq$1qO~$s1XrLcwg{_-=z@hmwybbx|A(9*^fW?>=-E@PbNm z33eY>d|5l_CaJ9S_R_+9(eYdjoR+ZK1PqsMRX=A<#L>|G&@bWm=8*_Ej(@VU&FZkw z+)n4^Hv5}RzCCtgW{Y=v zoz6qOm(EMw0vqHkSaxHH48@f8P~Kk8xdN7Oof6*BcP8fB=UewAu+AXQ97~a_)UD{A z;;9~Oy&qhMNCXx+_nBj3V_QYnjZE8&Z*AWHu9i;P{jRV36*9Ry8D)vTC^Uq2#O^e| zpJa<;zv1r0iy!+A{Ct=}rrUtS%I!vEDpcBobA7$!>k5HWf;dAx{ROGqYEigFt9Uy` zozW87M)4umwIA+Lq>4jSUg^II1QKvq(f?C$|2|@x{ORtg_8XgzO2@vjW%<_<1Y{zx}n^Q`8JX=`qt zcedU*%IYKgkdbBV5IK2Txpf}D;-e@9FbuAPmKzMT`<@=|LApWXW{dB@f)@&t=XNRY zat1A?tTs{DoTNO-Vi~}X&9602QqV|UbBTjW=(xF_KWJ5cmzrIg6N0eQkz_c~s~r2t zItHgDMu``o+y-&zTr@v@ZO!BGveYO0_Xad4Q(F7oJbO#AX7az;%~b;h{ty$jugj;&8^+0Q%XEO!Zfw2~U$mb{LZ(u($QSrN-| zK`yddQq`&A6=C7lRjjA5LFc|$-&0FbBEb-cJ_RMAodFC|; z>T;1cDtYQs_9pjRukriaUoV30@7G$q`;Y2sSM(coEAgGTdV2mOmH9rcLf3p0y)x>t zEQ$!W(0I~eLO*Uj?r6-DV`cwBk(rGtcqtg0E<}NrD?^u5CqDJ1hmReB1M5HV)wC9V zBX`z65=~FxeyPRHTeKc?-##xbI*P4ISd`fJONh^ZR=B3{>C3d0cHQ&1ht2>24r^=+ zIZUD^(o5SlwcfC1`f1KYwB7p&9=BQV{cPXLX#NR7q%}1QY=FYxZOPkoX03Qf!R3Y~ zL6kLQ5mv&uFq!0@@Bg~z#nQ~mYQKV?b$^#Lfnc~4$-1Y3)3o1dn&cN9i4My_i4TiR z*3|izLvN1=ixA(x_U9fLUSgkp{k@B({n|dSR9%UbtI!(%DIW|b5hXSigFc?|TQxo8 z1q}MFvRdg1diqzajL2C8MCllOG$Q)fHQL6CslRAePo@|fu?R1X_+FCuqr;dCbQsf% z4!*Se`%iBl5#ElViATP}q~1ivi7qta{SWTm11hR5TNmDBK_n;$NK{agqU210N>p+Z zBq%vZ&ansrN>q@HfJ6bIBqs@yL2{HNp~y(iP?T?RPM>b=d-`<$iz=i@kU4 zU3-PO=A7UB=3HpfFjce{hz8c(22AOZ98cAcDM|zBGKS`qI;;(>hGF5{KEyP`haa*6 z*amZQV^ys;U=3zw?pj z80;@lJ}U^&k=IsmRvuk`FIW;)xOTv=taUX~LruL!t4)c^R#3z#cX(_qVD-gpGP6lH zIg=DK7PF5-f3e7)iKw2(^aTR+mq&iGPol{^JBwRZa1Ct*8EGv=Zs^OEUCuOD=nwl2 ztTE)9vB>{@##rBJ%7Yd9M~eq_v5E`jD3V3bVPPsy$!zgvx&?A$CK6+D2^ta!5mHUD zA&TflYu!W_>$Y)Fq%S$HRd6|Yi}+vkm3zvlF+gKn+Q-JxpUbU@*1jq5AqyN@gckCs zV*1_@C~((tYmQ4uSxxkYG-*42C*^o*VgPE!GDX!xSna$P+vp3T_qwjNWFR>{OSvPN z!d(#;H#dTuD0!((Jijn{UIX*0IXU_BjRb+Op)-?e$ShkK zWN^%Xl`#bOVRrgAiz}fvgj_OdlTyl5fi9IT;vze`#0_8YcTa`xC-8-ZHPQ%03w-q# zV6bb}{ZC9W75&eIVjePSB3Pe3o)2sx`i{h7q${1(y9PXJv2^=oD{gijISEohYzih4 z@wqFjD8rnB_?a&rtm@CB#+&dcKZkVOq-G+4WA*50?Ag1U_k}WdE)4DWSx>der4O#H zm5!euA#D~>=t^Rf301&VgcdVpJ#w{|D|Y5(4ov9`b4@9`xuon8wt51ci@to2yp~5E z;k^L8;+L8>+NBcZ9MR417OcXx5|`ZQj$DGeQBBp_t>On(!0dLhrC5OozqL%-Hd!;h&o znk`{3k6+XNWugk3*3!Pa2Sp$y(tz`gq-uCY8&)`5XW`8i<$B{M64*v0t_=QzSDeTQ zb4=4m0gs>3FziK(GP!~fl7UDdkE`P&>u==mC?9ApQc;|BaMVC+Fx{u4-tSx+!=?n@ zdmfC>p4VjyG}ia;;wP))6;4#ghl6U%3xtMKzPjJ-KQyvQP%-)^s{t+piNr2%Gaw=m ztvjY}Nx+$G+h&4up|5zwfXnsy39g5qKtZ}>pVjVpr*dE0yusv*0|O7h#2Kc92;`WI4Ku|Qw0aumB4G1?2TV3_Jqx{f8v3ib9spo{a5 zc4d7TE_3=ug{E*S;N{daFI}*7HAG7|*X^wlqkT$yEa`9uotB>S^PdboxyOc5)*Kt z>d3>2CdGM9MO+xIb~k%)2idqI~}q!+i{OaR|c)6ivnhb4^)=u^UC$Z&7=-uI5`o zNYx!FL7e)#ckYNkI$lgB7Q9h|e_a+I@>83hUc`f@k7-D}pI<@ZHCBGNHjU}n%I`-C zO1>b$Si~YseG*mTLlBO!q9~bq=se!oSJYgYAa!%&>LlYu+4g6INLc3{Q-U)!+ln_{ zzthXHSyvaC!}ckLC5Q$WE46^VMT}&?G4E82(qJt1(wbdOx71+^*Q3{sba4ok54TAU z^-tVa`jMTZ`?CB<>$NE^8dKZWiG$U$we_yV@P$@NCpY+6Gie~VNqMwueMfz(~%4|N780Ate?ns9uqC*-}<{X~g zkKNtDEIB0q9Iyb#7{334(p2ElQn$5Kw>Jo1x}{45@NGIJ5TKz16bn+hl$KK*`j@e0 zX~7e{EPo3DrsrE~9OG2$D1234!c*~Jp#=u;Y7!*x@a^K(P+f@2_`~2xzVBC$qF*hB zyT20KTo=TmI&afdd!=NntB6aE>-n44=bYwz8X+Hv)&dDycmFO|{+WznezKtKtf#=Wz`%$IT z*v|}WM-`M*7)6s9j7h|jf0k><9z`Cf+02qtomr^t7ycPA-mU_w2uQnfqiCMAK9L_{ z49A}jldnpWW@Ko0!?iuP=&PfMx-L|QI_>!Z!83sO_#SInh<%~wP|gW{bdmzxPFZ03 zD+9u@0^$b*1KgFVWdIpL#|ptm#psJ~_0iso!1nKlZ>hl>RPz9+H1PvEI95cn`G`fO z)jk6jU$!QQzg7N#Hg;1`>hzm$R(?P`2e4B#SPO$wi8GcEiLe>gp&!uVI{f7H%*mDh zfAWn~A}*~o!CU914vzJ=K)c=}k_@(p3I!X!Jygt2VL)5muR@pOy67t0@#tcPACf~} zw34o|jrtc?G=(Pae6hRvlHnCs+jr!P(I7$vA~k!#wcdpNvH!4!SOq+!Dy$_wOKMYQ ztFTh(;WH(qQ4kfjU41IiZ17ed@deiuB|pLbQ@@U2B>$yFLJX>UksS@#4AGV*3XBd4 zKjjDXGW&_BbMQ9(;k{4lV5q^cVw!>pVJ_|OsRo}1Uz>h-2D@K=>M&x4U<3op-K@WZ zhHu`0qi;kU-bMX>q!Eb!zQ?H@$WBk`=6h#70{!T_9^0@ z?uPyP@U%|i-@jBxPJaad+lxE^&({3;*_WTVPm<52Yz7?Wq2WDfY#<#?{qP2gU*6v5 z?Byds@4)`iJDe!S*%GP~2l~vWsTt6|eFFB@tDCJavuB*JUJrVud4hl}Z`7>O7G8iz zo!GwAj*H=!WWxi#MZxjYm75fz3qKbQ(+O%KKBw!7XxpZu=-A<2xJ(Z>;S8`@AJIR% zLNV#8+O|zj-36}T!W>LE1NApFO|;f(i;0b`y{zEU@cQ_%v7koKHeFZMo}E$ZuI;V) z7?^{%Lp1m%cm}ux9)jrE8KUR_mjdbG-VQNmkC!q9Pk~uld)>B?w5h*pX+|EB%I!N^f(-%P z=dgqo9qOScn?vm=_Ub{&89%xwFrMnbdHuVBI^2e4tF25VpWOGJxccqGaF>e|tuRAL zw^P-W9t8l}3u7uWs(#vD^dTMWG=7Hc(wnzYJ6^iS1<&P#c66rFK6x~xkk}XwzM1UM zV|}49>66z&9yF;Aoqo5KlEt2_39NJ=tSun0;3}@F=~GHLGc22w{zPs3m(q-}*fvcaO42pFPpVH^m`HWJ?w7Y=0oi)50Z!D@ z3QWHr&ksM#N!l#kEd6Z|>)O;W_k*0jUmC9Y;yalYnZw6<(du9BkNCM4w`kMC(!!l- z{)28S`Omsd*YE6cWdQI06MOU~92LY-8wr-Mp@Q5>eY~@Q zTwT764%4ncaqV6@d_4sutkdGp+=9P4N8V(XvQ1fe3}$aSj`QBnM~07Gk6()g0ht9x z40-%4VAf2+M@puSu~e~y^k0n8a41a><}f~g9Ypt>72Ct#e++1h^SAH*WBP+tE(L#i zY%k=C!{vRgc!_VT^=bkQA8BO^^3dchFMmKHu-{xJ^Y5)jKn?tIFPQr^=l_`dK>rXR zLE>sM{n44}8*!)!C^nm*3qXHtIeOeFx(;S>>^t}$G5{vAUclccG2q71UPlF z@WU$6I4z>TYIP!L=J?rK1pmF+(kOPTf4m7@=gpvBn|UTQKM$Tfc(~Qsx>AR}57w0s zV4(hToIV=x3Rnm~wG}pFyrlp4RgyJjGr#1g#@79M2nOvh+s^%8+J5{?+ll{ox83rm zwr7GL`}z2+?R9@OXFz8X44g^AKRRFc{NEu0(E0xVddl3}9QkS3{&^VwZMXkZKf@c! z&NS2%NYw@tzq#+bUmMU1{}3&p7k-}leZTy;!f)+{|1ky5e*3VN(ab?W!j`LvNJWz7 zI5HJuT>;O1nZ2c!>Cpa*_~jpvNrY*f@#K!MsdcB}vpk$N5yZ%uqx_hR-rtJOFaX@V;=C=lcXzA;=Q>;%*en3^SdsN**3kZ$M0^Zrx z9f9rAbY1K6ig$Espc~xg|6ml-v2SIg?{2|Ia^Xwu?9U4}kcy~)*&h(X2;eM{n&-=W zE{3Y^_m%lj1#>DkbBsYIW8jn83{yloILhkmQ)M=#EY=E=4(3#`O8@B-oP=3{8zQV8 zl@H8S*v7R;*-O)MKe&)lF8DRGaD;Ae=y8j|OSPmmao9})s7r{pb^?+R-IkXof8N0N zGH`dYMS*y@tOFI_DwvgP#NY43hJuw}PQKyCnJm&>yUB@gL6B~`j?cCQf%WKEVQ6je zzZi0VX6$LEpe%tE^-zCrH|3Z^ZkGVKC%+`hQPWYVVS1V}nwHD_6?++~Ng^Vyxo&|pNqjwClBKcis%v4&GqsLzu{ z-iuaY)LZgoD_JU7zMhMmo0YNW3 zvZBWg)YiEe}{NDhDzg6S;1-$<6K-nVqX8BP; zraz!E_%SQ{+k%i^4nHI>ykKRudr;Dk!P3mZDaC@pDJ|#Bl8W1>kmf@j%-WHrDZ!lLgv}hRU(?B;2^E8k-!6uN)6$gM(0zSQ zzwitn*p;Q5B?9k4LtbppL`aDW?83CGj-5V4E}DIzIXE5)lpk$h@Zq*DdF&Qt#6nt5 zq!qWxy>5y5K#S27GW}wJPb>8Ll)WfnqN$1Q9M4^RuL=Q1kta%c^tTf0i)^sTv6kCu zS4YLDflWd5I{_6TJp6?>DgL`=Ej+m9(o7x7XYFsO-+OCT+FNAG+9dzCf#MH6WbihQ zRdfyD#`In(S>|;Tf}s+&I$yVY zZnY|ZxcNehSu+@uw+#&jv1AqLWIj+Pzz^@5W$>s%T~z606{*;1Z-bRsFy4USIF~lf z73|Cc21z;C3;eHy!@;sF?ib55(SHSUo{`axm*N)JabDq;4G!Fv3cO*wlA7%0n~GnK z^~kQrnT(1O6Tcf`%xTTGE_s=A^x>1Af;*O1l4`(J4E#kBk!rG+K@DhW2ukRZMXay z*HhN(gx#o+p{6MoWO0w?#BRy;d&TkF2*2x5Ntw|r*1NrRWo8|x(1h@AIpl}YV46LD zs9B=c^NtG$RjL$T7#!ILUj^i{UhOJc3bY8YT?&9IT)I}d@fRwW4NkpF=6ypK6r*(z zPy3XIrbsyz1j{WPJ*1Z>j^oxE;!UH!Iu=^4EFxhv>hpaX^*-1ADy?+ByQrH<;g+KE zz{U+<#^)bYstEHI37QCJT17N-qNK^!jM?~VO6$O)U4Je6R;lpDd9$um^DS**T(i&Z zv|OE!e3Kgmdb}%W*)?*O4#M7RH46J&dGxwl$oS2r(8X|zdAGfJR`saZrt!M`J=SMH zXZ}43hSgll1CoMoAMY!kQeC&ZYFT#J1Dx*{@YPY+u2FQ$8j#K?@*88iZ!j*8>6Ws{ zfj@}Glu9}Jq%nOMN2SK#JaOPFp{iRz+JTmU0X2#dmrizCV(`0`#a6;azkO2nvgQHp zj4U4gsm0LP9WPsW%A&IQaxKme$jM?xtawUs9kM#q|3@4tJE48^Gr$my5vLxKKwxf}FTB6EBb#8oa(58p zI@SB~zN)u=bCnslm94wTaN7Bm6HHdP#S36OCW#61gO51CAMsUDPM8(jW^M1ks9er| zQpqu+HC8hn$W_yp=aEO<1bx(Ty74B)UtyjVZ5T;pD}=Emst9{4?PxFld3Az#E9W{P zF+eC&4_%f`F=BM1`|*5kiDjq{qvN2gwwI*& z$b~KbMwf3_Phg|zQ}ajFwZ=NuqY+7-7Ls2*SdzVY}FFra9 zf{$iQ&96;v7a^VP4t#cbvvv3zZ(o>l^;n}fk1&m&w>pY0F8B6c7E7y6;M_G=MzE-Q z_GuJXc~Pj5TIGW@#sU>TkTyivbJ0?NGvZOZOEEmQe15WAI1W^?*Oy!H~swEg*+bmgAG$xQ-;8d+vqTNl3oe=auSAa_Sb?&EwxHL)KF%H`1$< zUnrvIROoeW)!%7`j92Tn7Vp$k8ZVD*Xr_AdgU+C>I|XKbA3}%X1^x*W60)xmI^ELU z_!+!Wnotn}TWggt{NKiIw2}ct_)2%=)C2gEsl4XHropf|3MXJu55Si$$YXS%faCN^ zCuMJY>KNN;VuW}Tj+U^8k9>hIk&ZY$PHSo=D~V6N+b5gAEq-2B@)mUc*7aLh5UZUm zSUsTQyXJ}SyD(`JxF2@if&>yC4wz+O6+cKYrb@k6EM;Dq(RFsR-y|5q5sC`t7RP7& zuHuCOi#GbUQW0T|tE>3%mB4Z`+tBIC-8*nZ#p>9{!P18N&9OdHYjDyf*YBciMlG+} ztO?iix0CUaw@`GSAkOdRMQ=O9Dr~7w5l2SEWq4#uV)EIY_g;)6y5U1aajjV7=Y=9G zj2YMo*O)Vyms`nM&R^b$i&|8=Zx|IQBE=nH4U;p%<6^zYOfV6a??0c+#o0=`B5Xo> zpXj>*_t1I5Bfa!eF+P3nAXT5KdOTE-t-w00!AAi8;<`+EW1xU%00Z**E*2HS5!@EdqSA(u2vfVbEe$zTengJ zq~MJWlf7GK>h|xY59n@hC8F=$0UmqNnTz^*T-T*V|A?>=OrB+c7%~;8U1i3~Nj=w~ z>Zew8?Om(BzTDHfEg<&BlxNnN4`Ykj`0t)+S*Ti~*|<3AS~}3qe7)GNe|%=D-Rf1{ z?^Wf2rfwdqkg7Jw84<-TkHv^Rcw)#l)!`$g%Z@ure8PA<)Q_v!mzxy|>r(>$(W$&% zeLIM4!Py{#OLJ&ge|iMd$(m@cSvMn0`QdXK|L-13IP~X@4r^~7^P{7#?B{Ojp9d;* ziXH_%WXEc9|Fcc+{>eer^Zi9LvtVg90vq+>7yaV{u*8dN_fKkhQvq=@E7k>daNEf%7K)!;2{)BLQ`g|lDC?K&L+x=>)2WFtD2>92=i3u#HaJ%w63Sc z14%0o=1~Qb^b#u}R4%`_zqg)W8^Zr!bCHJ4u#&f+{WINA$Shd+97uM1wsR7D?X;sa z?MGX;dyKQbfy%M9$%Ef$6)OP>^Vt6f!CV2`#Y%r=E2u2>>vJ^3q|tYAzu zQ0&vGDA~zK%HfbdkG|ACdJTW&vRD9MnEbuI3Hz%<-`Nucfg9c3bhla)&-&K&XDxTW zoUW{{o^Sd_Qo5xdyI72fuBOvcKGu@=ADz1oup#8bJcOfX3z6mo~#RQglh|HJ#lKMU0++s;Wdv-3>T%G=9by- zJ@D!PK@F+>K_FD~EN2{nhOM$pvDcaMc34)9qy9ebZK<0Qv=?mA_*-Rpc`F~sd-&$O zj;I-F84Uyph?VtXbG%~PY)ZawS4=LfjSde+TX{=<40x_Z4S%Ez|JCQuMJ4`wB__6RztCA7 z03JW=lq@zN zF346z8Ql-p{>C1J6s68$Ki_ERk^r0d0gdLR3lI|1H;*9XAS1KR_R!Or-s6GGx1R5W zD|!yLx8LjeMl0~X!KbI5p5FqBJjL}Oe(lvN7ygaMoNOx+Mv$;e{uu(NPSi+jw>GnZ z`q74Zwyt59q`V$-rS(1@aoC(fi#vmm9B98(jV<~HNSf3-h7(Jm#|{dR|4+KK-Xt{U ze<)SEgqd2Bl%GA^X6s-I9uOC+W`B!gVyhtQaMCQZ>(6e zq;d#_p6OC&vo3xflMpPRcICS`%&?ogZh$kug#X?L%zpX72IPT?30OuX{gCA<6j=*V zJdZRvd5g;=qQ)zj^XNNLL#)4s+?k<|+DWNtg#{=g3_F!%=A|7@k>j{&A?vM4_CYzo zrPy=ODr#el6ewF0esOQeUhI**AhK7L%Rbfc`4i5VWh|H3j1qh2)6a3OIx#;W)r4pL zNvSuvlth|(SEF|hf}HP zTU={o%8wK1*+z!cqd>Gi8T=>q3O*HgMt&InP4Ut%?W>e=mHNO%M39PYWH*e^#JQi&7c+7T)L85p)_fnq0 zYL}NU@maYlw)JzG+1G|_6QQ>7=x2xa49YKQ0_i(^c-C&kjA9}tv4Z1l6_3{BmhMQG zGCVUy?Wf}A2rJIq4Sg+_UvWi!g$0Ybk&*6B1RA*684ER8pB^{rZws^nF3b<8-$8HS z?^X%uoqzqe+YhLv?g$$wUwwcerOhG45wOD%fF8xwtuDiMb>3iP&ZM8IY@6RzHd&XZ z_T`)8GVNH@5wwbRrG}3VeQ%ajJL=am0F5qzS^4E8>(i^GB>F90UbgpXDwV}}!KPEO zg4m%*^76FReAi~3@OZok9rqD}t8WmKHfv+CpLZ#OXl!7GVanWwzv-n{$0;>dS}- zd*7YgYFFP76%|!~kb2$`a8k(^>n@i*$mjq5?LLBTgJ0}%ktv-Xw;y-t$U6XvpT=%` zmA9jB)q3|lL&9g0JT&!mwUnErtVP=O6{BugtN_AJtQyE(9a!wxw%RUS5EkQJFa!v2#oA1wGdP$NDQYw2qG2ZEqT9U}#yqJcF zk8zz8@3h8aGR$``DE}atI#x6IF1WS)D#X0-Bs~W!XYa$UOS7VZHI6ZbOz$!#{Z@QD zb06*6yA#XpZVDtlyrcf~oP)%Uo^FZeNlb<;Gt;^g=t$QE-~XU!JH{h1L9jMFoZ2Kl zr7W%a<;byC;5Z5v@W%7%Pc(1CUkY3e!!PY!6J`L`o|J1!1aKr^u+w7T*UD;9-Y66C z`T;RwPyI%Vxn-0rc!rkCgH^#ZkZIahc$e(i829TGgl}RwT{l)BYY?59lpXfD zV$PZ1%(Czso^}oR{+v>*hd=xHaoMwfLj=xXbLC*?F;4LdE#}(Hdby1&S+Q1v4_Mon z=nc#;uOID1%uvDN=!cVVE&6q+$eYq_M+YJL4TLuc8%bSLd#%PdfAbTvkgKI%e&|E=~rXxEQ)Q(qZQ!uap+RLTh-~p zLHY_M7>Tp7Fr#>Tf$?5=S?L(+7gWF{M9aJN4z0J|)Y>g)*agtBcs4&EE7-2(&^foEi z9DXIN_gM>}1CWav6%%nj#!wZB_nV*u!-hyFCGS+Ft4Cer7n}O%-^{sSFK+Whw)F`? z0)`RUROmN^?IZfnmfPl~#0+3B_MwG$B{nf){&O3jm&7W`VJ~X6kb+nYP6jonzFn%t zp3i8AU(v6pRERpf))M`~R3aR_oj$h)ujjjJJJ4siRh0|!#9f+2ALyWBA@hE3pU>1YKjVdhfLVD zud0x9i26{FpA~z}LeVg*Z-ZK?IM4;+DP&A?{KCRa)k8DX;n&dRc*y9B&wi1OaIe7! z*Dl;OXVot~B+=4xNt=KrSE!{B_a)w24)*|c<;u~-^)!UDXwa-J1}}qNT7~@i*`WRK zv?{Nz;>W_9{B^=FI{c_e!Wy$xWFaDZA=o9?+@1zo`~v)=aB@|g@e$~C{kkmVxFXI# zO{U2@txpSYFkkcry2KeyhuWA#cOa=P^HPcF4nK0wp0_zhQUES|pymBIhh$>I;Mmz# zItCD#E#QPNjk12QXTiiSzQ3k%5l8PFS=YF^OL7Uc8b@KyK!jr=YesW}QDE>L#+}*BBUPFv7wUK(1Br8~t0-H$bgid>7`rL6=o*$lgeGOGx zC-=;@fvngtA2KTO96G)B#pALlo?KtKA9b>i;j%_*ps+WdJCx&KR_vTv6sm|0_xaIx88CzJq1;& ze~Ve%KKD+MLW-CY5JxiB^v3_Sz4fy#_U~=4&&#p$*s|Alur zaq>iyub}UJ5W5*#E>{+%UI+AVo6`ET zqIrrIaUQ%A>pSv~#r!2E%GF&@A`XXGxQqY($o=`CN zPA4DWZ(5Rd-Cg<+B<6plRuyT{*gNR4_lgwlq58dm5xX4T|MBt?uPk}vlfj&x9E;}B z>{~&nOsCj?%0_!s`dPz{Ea<%LmO_^n4U;_do z(t5d+>cR|U!xn?_eOv36X85%}{L*q&(HZ)iG9SLSG2be2giOtjUbKq45Lt}RFjaMk z4rkgfEIdt#GAC#ip{w>m28`jJ!w$VtnvRn5+tGOJk;iDo?Mvx=lw{42OVjd!{AEv; zj6jRbt`M}NNaz}PP``5{HZQD@?>@IF8zk`AW_`ECR*AF%htVc;B*7x(k`@QN*v9dD zWZM-OrwNR2m{K?K(e^p2)o=;KgH+54o0qlJwOhoa=q@^t@YglD!-jMQj3m(uF1($G zJuUFf7f1S4JDzKRl2lC1_X8>#?Oyr*<+`>&oRF;Gs?jO!aY3?C_e52E3gIVyubGV! zuPo7I)9CUAjqb^#lZ@lwb<6loQ=s=|ICkJp^KQbh2yd75ACR$KSm}Urd`C$kx08x$ z|FS!~lb1-t<@sLr#kxxXfmB&V*P4Jx?O5$|L^4VdL@ys8j*kFc@1Dh8h5liwD8+)Q zzLC3r1Mv|KKsf&v1Mjk(ItO5T=2Opdv@fj-?*e$lMyZ!PD~hFc%Q}LiCi{UUsZ#&5 z&-UU+x{r3Tcj=mgIJbNz#_l6_~xAsCG8Rz*Z zAHm%67)z)lV)m31&4Zx`XdUIW=A6Nh39FqV@O~i^mSz~hvd_@A^Jh`xbTX96tPf)| zWe%W>{LaOqOg*uIOlzmqwQQee)>7?Qx>dyk{AdRXb%sR5D+Ygm%s*;qmCTy<=10{ zwqgcEiYKj3W()+Nh_ZnX!8F>G|4SQn% zB_j}|HDZ;6{!TgOU3piL0n!^K&BT8#_o}P@OEFlxL*pgpEyh|fJ>t$_+y5|+=J)Th zzvzR_aXRe90_2ZiZT07)ma?tAYhFZvK$MDEECMKm4AqF^eIPjE76ZRBN)oUl4?!Ck z+F`3RXM~X=C(~zk#dv}%irE8HU3Mw>usJFqN&G;PoHg-C%X^2HZ0y?ncnKhg5Wyp6S6UJRi`jyA!c4+du-z2{Y zLeD7IhUw8YdWB=_Zb-xZ)<=n}tvol_2}`SG>Tk9|nzTZgage)zxOZG;4C=n&S2ZY< z=n}0DR-`KkKXj>n{!HU1WLU0%a(sW|O<2Wx_IR&*0)foJCySF$1^(2B%D#Q5YFvfU=!m(BvCD*}X3T#-k+r-%TmpMjR@q>>dHn1$JWPo)3uZdK9y_NiFVJ@Uz5n zgL*cp80OzL0YNd-qu{?A8 zgC*oDJ91@%Ou<@)nr77Ym#Nt7LL(OUpgkzWQ4M1*g^Dpse$eVR(o?LT!pHWh?M!8x z;Ozn0>V+y8;quO5(dH^eu4gH1>2)2oCTx|!VTbOVprDAa&vS>Ai>2`*nl@KU7KWRB zBkzSF7)g9pL*Kp&ml0rCJPD|d2y#KZH*@Z;$7Ez}&3I@?E4xU*y&+ERJmyrBSlxVu zQO zk_nmm*Z5p$XDqew3YIkCz$Lb~J62;WygDJ!%+&uODbg+V9H_VkJ$4hICAT|H=&^dp zh5Uk=LO})ojr+1LjwC+fR1Dh!1!*wi@p%3tGw(cc8l3!&`uw<|myFo!(=JE>!M(My zOb5$-r|m|37tcl*hp+*{)vxQ84J}lU#QNCQW&FV+KYY3uH?uB~qrmPAx;zf9XnUV^ z>(I7UkUmopFGLIKAb#WAW6@Oo)_UGt8YeDUbP3B;zHiam#Xrh-U}t(rW_LUe?4QH{ z%Mu z2$jv9aGAXVKgIK(K1eHk1N^l$p&P|sM^a2r{D!J6v5CtbHTY$i(txar2Q%>`(RfVm zSH?Hrh8~+c*=?5z_2LQRp(6aZ0F0`=m#}T=P~GZt3g3;XG5PnJ`)s}1`IV&mUt*x$ z`B%H*4d4N=xST`<1}HyLi4&P6OL@w!`k+{`sKv#Q^W8MDxEI^OtJmRFl^+E4GIf*2 z%rBY-p2Cjz1O~!tA3hI7(s+!BzjL=Y@A|l@2~$S%u-VssIJf%)!V4MVI-081ahQ*E zSPn0}zVTeF;ob8}4ldkIyNxd6FSFDkR99GL2)R)YQdVV8OI<{QA9_gHf2D0->yaB7 zZE4yA*1~hEIQpa=bQ^tFu7?+X{z)X~5+1rs^WuZVs79p7ws_WB{9vJH!0L{MMdZ4A zr|w;W#Ut{Mke}fo?c@RmL(7>Y+W8Ml}~IAq@-?NlCq$VcJ;-3PgiPvPVRuo zUz`ia@CHATFZ_5n_KH%83hBom&^N{5w-g3ByIzD;io!q3IifM<#OWFy}QP*?nlkOOPUOu&$ifnq~ zyN8z$OS0miZ|-~TYkkgy-RA|wz0B+vrcapINH}cczvJPFcA1yOUg$$w!^i6L+$Cd2 zbw8^q+4+IWkHayIrJeGs;w6P+wT6KrTU79-=`FQ~cbbSds9%Ew9v!&$*Xk{tny@Cn zyQ33$Sd0+SQO>ok=#O*kr08{c{soRkO@a8k;$Wh;2&&X|CS<}%BCLgIen4{TrB?1! z+7a#TRCBg?jIVVR3UK%Knziv6m#cEzG9O!9l)PD(S8}29JYlpzi}cR1kGqd#73(WE ziq`W?;j?4Ym#SnY&#k>^V=UMIMzgbgcwF$-J>l&qmviU9B8yQqQ_m>Coa|*8aoA{d zpv|$zNYWWTx+@qFE;QeoP4(gP?D7W`v(-GX{+k<4ldnez*Gh4&Lww%@l-qi`HA88X zaP*1B5FwsgcJHAqc9YfU*N<|KlwY~7mQ*m%JR#iWT`7Gz{R6rnJ@Xj5Oz#n@pCj0Pob!H>TFIPgjy9OoH-{UL-HGM_KLm3Pw5q}nOf`0!*E@>(WLbJ2emeDix2q>O*Zxu z1t)cRZKnu7$$HJw1J6Vrtsh-JuimnDbdu^$ux{3g)E0=(%@x!hqDb`i3?9mF4p4GIV6T_4V4=e1IpF95 zMDX!%h+x7FH~e6%HtP+VOntEAE3fO7Z%z{tp`ovP{RFY;(u#Seu$Q&(ui9Y?>vH?o z-Ww!wy(xW9y_N~yMubF<0<&`v!E*>O(W_tJC*~kblU(&tfUVS4-J&HuRFiiWVQQ4( zR6Bl&^*YlBk1#T2JtZiQX6X^os0b_>>UmwxB7R1E;139UQCq@oxb73w{tqW$D z7x7n*`wcR`rhJ3>054%+hm;cQb;Pr2n&Vy^Pvzj~crPNcbQG(|M(h(TD!`>}%|9cg zzYSLI0@)xsztdaOK@!OC^i~uI{+sC}DJi}07RY|0+CfPfJ`GnEC&ehaD5?okb~45l zVQeXl$AcS&*RFaC?WUf4s`q%3`bhnh_#wr!>rAMK0W=n10p*5Jz3fO?hoSu}Z5kTS z9ifkZKy1)}Oa~!I&Wpb4jO}jZG4RS*`e;o8+eh(z>bU`WK1GDK?43ua<`i|qadYb^ zGfXYJQtzOn{`(jxkhmFyopyqSTD}y~?Zf>^AK2_iFt9zxl@YC9C;;|iHw6bWej5)< zU6-HzmH&@4I|EB^?tYZ;oxl~{@D9og;S(37>~p1+EaL{p(<&7YwYBliGWS?PS}<(J z+>HLWY_?30^~ZvhmsWXtyR}NJ)V6GR$L95p?|fUG<7JT|-(I@CUXUUA?{?VbGQZEF zvq*q1EMeFx0^u`%B}^Xb81uaE2g2L(oL8f@LITaDW5eF-%G`^&>a$S-s#jP*s~}VW z{SUcB5r+yhJeBwY&5iIS{Kr@7UR=K6I*6+t8nw45HYsqxq{%#;7iE*GWB7I;`Ug}S z!wc(9!_|^F-#z2s%{_8t@>aV*=-LOS(ZNk%hX5!uRv7#-r>l-nbje)x3Vn|?9NTj zYO%E9jLpgB!p^WOyX&oWMXFaHJ(Io7HRFqie--V2G$he+^KtU?@ay6ZucY;X3$6V| zJmjml$goMQZ7SuPK=C|37fjjqW&d!|6J)8`QGq+g56FuieWUu?`n~%{X{i_3ZJi_D zF&`7y=Ur{_aEI?L2fSp-dc&O4GmUrHFG*1EOZJ4No?edQGQA|THsCl)0xkt~iwuyl z&S^jF16-^oE*t^TW%WkKtRsJc4Tbt{v-3nes^_hAl859(4XEt=yq_Wc$3ajw1R2v> z>fpYRUAq6e!#@;Wx}A(HCAHS^9X&5 z{$iV3_1N~J0EHr+Rs43!evMU~F@ni9gV1lP#luMb#FXVrI4-d`-X`5pipnuLVb%?7 z5-~|oxfiVkXRFEV_zZv|YG}4{Epq*P$|24u!qjd z+z_X2(rKvL+tNCe(6zJ>?YCJ0&-OJ)?lCkirw=l6-@TmMK0S zP^|k~DpG)|#bU^g2(s-sxszWe4#Gdf2 zGhgr>7%+Q}h2u$UYLihLOAsK-__Wf;p(0{0DC-do-Pd|7h|pG#ViV-Z^2jh11`8m= zy_!CSSKd`MH|7+4qJS$Pz(6O+yXvTfBhM}$AzcEGJa z*3lR~FYvlVwu}Qw=T3ylIb2oZj_5TgUMR*(Qtyi6<>>6iHINR!fIDv9mRI!nfNW4j zAobiO+H?83)bimgD#XS&^4zYj8a6t&upZD?NzlKA6fHBEm^-cQ`b(~dS(D@}c?sm5 zC#AtiHlO7ZR>Bo=$N8Z62`g&KF3&~fNI6oNpz2-RRzyz%!jrFI4!NG3)Qgv})PMAi zS8A{4jH=1Dl|oGD+76r6KYnOA`u?|+Fo62}<>#*?kLV@xMZgLNf!9kEr(VqHkVqdv zQ6Yn}J|fsqHrkJ^zOwb%sYT#hz!?Hw(Fia4?gtgjr^2jl$YptRs^duWHzq!`!@)rM zp$k-Lkv$>^M;DkwHzYmi$3-QO?HSgY32G1s48C~jL1jppVTiq4;7_npSn-gZR$0 zJC)x*5#-Fi7m0q3#LhEa-5!YXxHH8Wi$0=9sWS)b@Ms*);XE@PI3Ij7#@~8nu}@7| zr?v)pqIkhV*3_OLy|J#7>e7xr)lRMm|)^-Ok_jt4QJT_w=R1J6!EH-Ve12zfc4LiUl!4l|l zC#MSIS0rVr+$pv3s14~lwkC1W^{e&Rf}sL9>Q)zQK!Lm^xucCHfPRtz6ciC(x66mn z4*)W*+UITJv@f0cPptV1oBsb`%{%Cs*q~1|2m4bRf~#UOabIKUD2yv}M)|qF!h%S+p)&@bTXX z%-Qw-xjak%CZtQS5&apU9FTX>-0t$N_MRGvS;eC(XI2|H>#G_6(GqZ0*{_GncrWE> zV5>GnbR_31s^IbXs3|WWt7S1r-e>9(?%qo!pm=%GVAqYKCgkdE@3xp}q0_|l>_p95 zg9VGiL};j5{)R^r7X!5fo4p5cK}T{jioT8J({iX9UXl|0VvW8B5S7O^M7tc`&vq%B zA6+@ma8!oIEGTWkm6#j6q4H_{)C`ks^VsHFqwRR8Ar?Z9Eo|X`vG>+-QMLQt@X#rp zl1jIfG($*tcSwh{G&%z+0@4j4-6;r?(v5VNbazS&bHB^I_q~t0&))l-_nhbX>-mGv z%zT`2UF*88wbn1b5f3F%*CW5J>meSWi=kKagL|rOPAE_?A7xBIh3Ma`o3tq)711b< zIh8ev@FKHEOtuuq=uFFtxa!-|O+k49pj@@pxM<57T7d+ljCUV=;CY#eDPlLTm-~@r z_?@wG|8=iS%+DlCcF8O|-x>&= zv)sboDp#LM(tGxtmQ-;vBT+lj((g>rZ0J3&zI}Y;MUz=Vc27g~n;9aMlLU*b81C9S zY{812l6LAtbefWhP@do}d^VSInVl0T`C4ORT^-rw1$~dR+jPM+KmF6dKK}ddNH(RG zh(OG(?oj}g%7K_^WU7(t=W736*a`Bo^q63Tj{|h|&xn_!lI+pI{&SzV%PqCyuYeev7Axu& zxXa~eRy6|_j*MnEH;_A0DhHS*kfQ$S)@xvf0$wNNx_beJzl($l1Gy-6hg*@+E_!j5 z)Nm6N!P+MURo=K#tCn04Nqn+a#`T7{1ST1#)HvngNFVvJonpWwtYgcCqiQf=y9ZN} z);t{KO{5ofl{lsLMmSt+rV*Xf-Sn}!$3`7{Q&H<&E1P@A=GJt{)mzaxyB>|t^$A4m zszHqSML4ZiAF@?-Zk&+~1`id}gvfuJYEnrX$XF~sK53BdqYTly!|B^qU8 zamU$An-^>#ZOmY_(r{x6b~7W5F_&YlsD;~}VBGi)r4Nps>;#T8-t9RWqsGv z*-q|t7nMq98TMK%=;X(c{&x?VT3$XPl`Lt8((Cz5S%(|6X(h6 zdMEuZ)PPfTaP$)b8G3I&Vs@b*$7C%vAPaw)bKL*JeU)wgeGg#}*0tpZnUGG%IuX_o z#)86i$dDxH$gV>4D#F|MMUlad#&yxXHfqZ!@(bjj8@61YC?}HBVPoN-=$uRkK5G&% z_oP6)Je`l}7fA~xdqB>v7}HoN#)B?|4DxgG)q@QK0vhlaY_-RaE5*O$wt0$UN0M

    mQwsvYD zAbG9XYv^0XN&cSw5$a)oy&%;|Tpv?2Gm`U9Mu-2@^^olsqI8+?*AqEd&|%BooXZ_T zQKo2AM{0GVpH*~m(Y;4to?!S^#aKl_sLPs4;qEK4PPgusK1V|7&2|@CtsjQ7MbbDJ z+1~ZlPoEn)DekTvDQ@i^!1>8-oO3gZn7%y8wSAbDG6YeZ`V{xHzcssD6)d?2D5w^3 zBq(&Fi9RO^7VBV4`?wq=xS79iT%bgqKUHuT6R2NxNt61fcL;ll$rkfGWHbza5x8D zlpjG)I5wWINk`#1u?$Q*iCckOD<~Hcw~{*jqHmwVRZgWP8)0YPv6z^ppJSn4QBi@e zG^(3XxpyEO+i7vS=)A`nKG7#?PSrNoihJ1rmnI&1@{VjnQq+ssc7H%rRacZ)%A@)P zxXj~L&%;?uR}D|eZ*ORfD3&N%kTUX1OZVphv9ji(1XlzdG};@>={;lqcSvsO8T2hT zEPJ-?dJ6aYbZL<&&RX|z&r2fYvd|of7xa){o6qb8-BTK?8^&EIB}E7Y8YO)_OwJ;@ zQ84<0Htth7m9+_6#kxslAqqb#9D5J_cdJ}UZsBn3dwf38ThwOj9Wd7V=y>|UucN71 zsKs&2Vm!iDH0$8|ESw5O&Y%uu6F(q&lh+~`p3MQ;cqVazWkPf7}0 z9BHx6Rp#0&zJad0g7&td5N;(W)|`u;ez{FM0mnEalrRglEqKkOJmKi%R?Hw z$c9~k_=f&}Xn$E^rt??3OX97Df_Ae5wjyWo@!G+dC2C!_FQMH5ZD}@NFq=F~PT-Z+ z5K->oH<7-?a!KT^dbHT_!-^N~4nFlmFS`{v=a%5)F&bSQY6fHbEuWk$YKM+k`IOhl zXEJTc0Sk~xjgfNG%XW-hA$!`0Uls?9e)K~BEHI(Pz*GPw zs>I>DUhuicw0fvL{kz4o(A%oP@T!_Sr4S`~Z5&K z`=TQ#&qW;N)EZ6=3*)qR@QCxQ-k5|G*a+;Qa40>kFBld*$|Y9&Gti=iNmzo`?S|BcS@)aQNIWi!z!?857Z zHYMiE&4n%YmTzTq>*BONQUHHWF1oi_FD2vA$J<-(a5jKH_Yp5g_(aDfs|>#7(wSm9 zci)u!DuMSKYXj^rH(^_>E#JzU z!vmPxn-Z9O>|N^3PSC&2y#X#I$Tg-3WcusU4TauF!f$N@(;lopKqLj(u&;ajQA$^m zK;ZVLE=PSi#cy4MrmstPmlZ{M;5TG&jwyzb=R3~Xn1*7>I2bg%#p$o9-%i!zVg$&w zOxKW=S<46YwJ;F3U27zJx(i1s9(sPE=J1<5ux2Mwsb`h|v!PN+Qpx2#wadU^=cCAN z2;JbI-bb^d+te(`MjE-9s#Ba{UU87k{v!8M&xHewub`A-WLW})sRQi)6%fk*mec6) zgA*=l*0>+#@@$gwO7dcHM!^&13;>{ySA7|WRtB^TWHRlMJk$r?d(SP(U1T|H_>Cd@ zc{`L&hiV!rPs>?gr`sV)UF8mHr!Hqb11f@sA-mEeBdBjRo~Xp=+?$%BL!kQr;Wnj+9bJ88G|Y|DOGJ22fjhsF@pXe+`=M2VmhyDukU>}?>ejXCUiFel5+W1$x9|}#N8E0_d@#pB~{hdb^i$G9_ zDc^4@J;Ks8c2&YFLCi>u8L=oD%nF1 zYwNz{B9_dLoM=bZU@I)q`l%zA;Zu&4GJYcLl(51RYj|bIy&Lmr4GiT2O)96x#D!_Z z^`f1YM~?aXzmvvdoYFCMn*dVXy%98w)z@9GGmdHQ?m2$?(a)k4`Aa`5$FwLwWqhS} zY;nz`#Ty*{PVXfJP}KXILQiqe$srO8U>J70)HZ}HpE#Vy@)`! z4YEB^S`n0Sijf3w&Hyw{dkm>XqtvILUq5p?pl6tsy^KXMrVCyG`{-}kwbHt}yUOA~ zIvT08+Pz|pj179{^S)ubOZXKRA@h}lJ5ddJ%u_-=P1%90Ol9fL4`>`_&()c|eTh9P zDw9V)*43C3RP z=MS7y?g>|9^t_2I`$6)mtTtdS$Q_R`8%ZJN7LLf{t5v(Uht+t;U%kg>vC?TyY?mX0 z-nxv~y6%JoLI8w%)RCG=fb~EG6qA73#B{k{&(i5*pbvR@@CPXGo{3&ZqtOAKni55c zS4dY2ZnJpt0&AO>QlM?4m$gparYX){Tl~KEg zViHMN6@+Wk;DLxD$*#b9I5!iys4I>SsN@e2FQESjNC_%l7@iDTEmlONlu6iY=b+GY zE`MIGvI;`5Mv?46-l8k>!d;7yI^~YHOyL_~z$W44e1V1t75W48 z_PEifM8g$lQUH|ZilvH9ejzA$L!(%-!j?21VC_Np;OnVz1(*}TlKB~a?*mz}NRPcm zABuL26)Z4w^l7iRe5+-eIkO;|!{xBMDyr+FCjVuMcee?Xd}r|3@Yh9j$oY>yE}jO} zk)s8nReS@|xKj?szK16RAsC(p3|@4bZ9d7L<;g%IPFq>6qSG4oYxCDmqed$A0%&%0 z1KeG)ZXXU}b0|**w^hX*YL=rg`f~WDr$t_l`*bZZQ>gQj%-}K=UDx-3u-Du`8!v$q z8pyZ<(1~+8yEU5`gwLsry5gQBUY1qi@^0Ecpm>+CEwGNwOeZLMiYjz|k7w7hS9%aj z;%+j9CJlQz=MkMK_VUSyIx15tb0&QEJm+|gv`X06ZL_QKB}LLdJEzC73)py;|xWz2GWB<{S% zI7*I8*(^6Wf+HNBe#IwuPY++P8{ge~+g4+)CtK&9nrYQ&w{3#@G9_VRn_#a-qUa5- znc|t4<*I5{VeH3MWZH19uM1G|6B=V;Oj-Xm{(c2I<*u37uL|^Ov1*zs{#W88MQp-V z8zzYrNAqV7CinfB-Zt6NggP5CTeyZ&uO0;s?W45zuDj#-Qwt87{gelnyuZ?Vx;{JL zDv4?UBHxz6L0_9prpXt%zw;|(^wdL9yKps6HMQEZV7Ovco5Joy48PLa^#{RyWGe3K z!;CBmtE#%E^S1EdoSU(Cx>mNm2q4ik!t{<;!_&lF0!5>wR``8!sUK}MhW(&M@~3g;wvV1Mx6Lu>8-^}v-}g?wa*FY3ld~>Z;i)Q=P?@NjJgBoYY*4&EZtNXnfru5 z%WEgd5W@i7LM3YL1MBN#nx;I+Yy73DDu(g=_nQ_a+ZaMOt`yVt#11(9%efH-`08no zF1n;@QRMr!xphL2u=V4@@jy40LhuB?skS3`Vzj)9DWc>f_A-9pbhx&cVdiAt zTpl9!#>vn=r{K*=h9CPFjmVdX_z=T7hInW$ZZc9YQo+lFp|6Lr>lfW%QOtRHCo6=M z98GuEA)*T;N?cXni_$xLy1En-vZt2nvJ&)>!cbW?H1YG?^1#pi1cvZ^?if;Z+^?#t z`sJtyTj7_5PV4PaZp!tM3Yg2-rP8j;&!2hib>ptuPmM`QE?~l%3yNqhVYgH6VKuK) zT$D2czJhncmvU_iIfhoW?861f=^qGm@4dN}_}1t)aw$}pIP@q5Cr1pRrb7!|b>*Ez zaGMvMK6z8b(TRWUxY40==bfs)ppa{le_VwaZZLQX>e`Ui|r$KRI%@;IN$Zgk@#nr5B5Q zslod<$ZbVy~dd=KDL~D|LUWBpC zd(a^04dKn&jov8n+2^W~&!}Vr(q|yhy8POOI_{d;lc{piB08ZTMECy#Fkj<~X(*rn ztf4*afP7c-bu4zG5EH`5RKxw_mVaiE<||9BQ$Szb;AK?a6812sRw;eS@mZnll?-=3ALPU1IONiy2kpdef+Hrb||;_=uo7Yo(Di03Gru-kS{ zN#BKl&;6%0M|~;F+?NO->$U*R+6(uN`eKgNITJl6?rYJg0}F>fWxrXGIuD7T{i%?tu{WJXaf(S|xLzHus$9+u9gsiTC9PJUD>D0e_bQkNVM z+*lq#n)Hy2%mbu$cqNETIWO>ybwi_wk)v0kw9=nT=@Ur4YJ{t!M9dain@nstxRGb# zh)9v0WJ6kiIVo^vr~7#C>K4*5TIUMq|iuEA{BMZ36X)WNw{^y3aE!wQJZ{k{C~PaPf1^i3I$ z-vIe^i`a*NvYFHVsFqrx{@U-3B?!WgDoAxq?=KyC!4wh>vfcyj|E}+*Y-g|wnbW2w z-=}YvmeMxf-_}e|kp>4ma9mO2?X`6ga1LlMICSoQRC2ewm_sAXTfwAYc;Dzy4<;q> zIm=A!Dg!Q+WOi{T01uu#XZSS{uWSd7P_*5`D||~g2FC;RGu3XcUG5Nt4;_Pl&B7~q z1pow0$*#THik#TmZPBx8-}TTG@_;5Df?S&?Xfc4fF@U$I1>D2&2Sm%uHmyzN%-twr z+64DZKmB*aa+71*@Xy!$!0PGU1jH%TJ*RyclqIb79qwuOb?{pp@)%<6 zrTH+u&H;<%IKvrlI`>#Mvy8Z)K_NmzdICcS+B*uXzOA{;k0aI-Os81LNG13^iidpf z{PmrZzCNuxmORkyYW_eYZccfbi6g$(5jXdWH!XPt=R+ORb=ct7avz%LF7KPqWsZFA z57Q3kc=x&C#6xJW&-oC7MwO1_W*08`tDWhXu)-&t_^g(J@YzNxjAU;5tloM^qk7TiND#W^)Wj@E z^H%G6S|YV7JD^`}?Bj*J)W9ij64G>wAxH)PiSzg)Y&=(7(bPF=<^*BcmgpkBd-w9I z>7$xv%NUNwS}d561mJ{hF)+4kt*8j5o(1E{#tbZUw_$cy6y3P?s!JjeP|(^%(mr0O ziaR5KM&%Y6{=(Ux!VRgOeZGjgV+K_QI4v!qMoag8qx73m&Rr+&Fb`>*^e8{th2WvB z85?VfHKIvrN4`-&J0spn9gLaapbzo1UL&K7l-OF(OP)?i?no?7^638hU_eKyepLTz zZgc6OIz3Gg^|x1KFy13wQF31jhciWa4c6wxPDG{NT%0d2N)pP3QFmM&NrQy~St}D; z>8&>8oc%8gUU#ihxEe0CTnAI&8)|7RO~ZyYfeUd&Ec169K)wo(QPy9K_$pgL?g~tq zL^sR;wySo(WrhFNHnmyxpbt*4{$ zFN;5z4F2Rx-;XZq#fodpOE^q#JN2cUIwv%dK^14jjMR2x@A{?BR87o#8N2NUl%)`= z*hTMWnhH!vS*?9*?6PQAh|<+mEEN}IWhNM3GletaS5%FuUz;Nx?F_yMI3ZdYE^gA9 z+Yf&>Y;}B?ahLEEo5CwT!W(;QQ?ynH+^JSuK(zEP^oT3I?rM87Zhm(@+I*hV)AT5- zbPEN#7#QR$$l4K^(XnSZ)`2Lsvq{(hZYVA3UB@<$%=9ic#gSJjAw?6nKi{eG$8*e5 z3W>|?xjO|%=P`IWgsqg};26={BH8T?aYxSf7{-ubuhV68$(<8hnc~dC1mT(L&RAaH zWccfeTjpjczZYbf;ALsS%_Q5rZo2ft97H_|T?yV-(c&vqFI0>LtkS&C2W5PwS+Ink ztOxe=yWK=O>TTttFHd8~fY!pBtUB1ASguk_$|ABa;lRE)OOMUR``lt_O(P)vn>}qLuo+eW4 zkgK9g70tLtprPFEYiV-}`jJ2J2(2kD#UqOrCq(O-goMrQ>xuoLS~m-55$? z7aIBp$TeyLilUrZ;T|{gWplOwRIyCll9hAt(GnDRg>c7Ovh(Tf4C*Fe-F^rYZ=TqT zc(^)(xQu;?#&nvKfb#V=IsH(j105tvSX)sTq3>}NS$%at8;;mnOGV@aPN9F9;P(7o zlIATs2tqmlJBBERv9Z|P7OSm9ZK+8?tF&E8E$*>UFHw>X4CQ-3IS3NgJ9D4JR+Dkr;Kq3U(Ggl0J&`N{3(I zO+IV{H;4qTpQVv(QK5=mmD;IG6$j5yWrj!Ny8DVZooMY;GBgrNLR;g8{48hu#wWDY zYeq{8O?c4qUBP)zPC|(G5q$QuAfgDVsobKVsz^%n6QDw;LrTI~5*Mr~{3>SHQs(QB zy{BF7HMCn!N)HCfGZ`&k`;zp)>p)YE)>A}naT(6AUDcFt%Kt;M-mClGBXiI~L1oQc>A~ z47qEy3)H+$TQE)p-=`AY_+gKs=t%o)NA+;(s3bq!T)#7**CbIM(#Z_P}|vJ4q{pFQf~w^zgj zSJWflU4R~YgwET}_Hl+BhFot*6KEq+_=c0U|EgI*5T9%9G1#Oo|4@+A)~cKf#&$&+ zP!UzW28+;Xxq9ieY>zw;kGzIETr0MFb;8gdl}g-pUKjO6_8U`>T1s-jSK~^w8Y6t&MQN(ol0~k=}+LwhL2DKqI6Tz{bXW zHUG~g#@NzNDqwzGcb+siSbEag0M#TO45XHK$eINwEs}Dp)_|^HEguhLg?$XNb%=X= zM*}$U8BU(*!5;e6!fw?50KK#Vu;-3Dh9I_xmaBpS`?Efz9&Wo2(JNZynv&hdZe3nF zfZkF2pqzSB>hoBS2#^OFk=X-x^AI4H`Ify3CDtQG87%nFyF3{tE0~EH${oJ%Tn*G& zcFESqaqJLgis%f@i}V2XI+jKcB~?V|y&6+}fN>=N0Hai>B>G? zmS_P&{8gLTR)Ypz{E1NJD)VvlF2fjJ^%x1fbQWo?uyM>)sE`TSR(H|WAD|uX3X2L# zfI94{c18LFLn{1VVjcgctL=y4`7O`s7=XE&mA@RX0xSN`y84+bh4k} zM^m`(D785!rc7?W(h{mhlVBC*zTu~N-y$%KnMfVUFzg-&A|X+7fu=fQnV^=Oq9=$s zGkkODJE@ET#9ZK+uNlcqR_&P`hKy`ezeE!5YX&&w}91CS`7l{9Og zDIWT>!-QT6%;pbJlG-g0-&XI9131bPhN!on0VYto%|Q#>W3M&va~chk6l?pD&UUwV z@AXDKN3VHoDkvxg+A^;F0YW$UR`UzbC%*q&8e7)Z(DQ^hEqi&f`URh`c{0H*L!gDuzQkY+#Jx@ zF1=^`%vHxym1R6&Veqh8Z@0mhkb&Cz#Sow~UP1PE*U^8~BK%)$4bt1V5X z8Sqv4Pu?@O?K-%E>m2dJdXDpf(*LZrrg zCA><+P3=y%5p!BACbSG^$;`v*w1|FW`zlp|P*!fcq7mJwq#sItgSVJSX&xE_4rP7h zWwM6BlBuAfM`+8DY$YRH4}xYSWW}GIbeV^I(zT$lLn^s-YLYmaI92E?JuuS_GO`Kr zXr##+v=&Lz5ejtxfrc)~ zH}FtFB>MyEbI^btGCGl_$$U*eu^vnMUO2pZz1DShm^wiq-VQ0X?l>SxnY?`C+V6Rp zlZcW(>VWbXw&guTYK7cKsi4i5^te~Czzq}Vm!3Ol{gAEtv)Ps(pb!!KPG}fu07wLj z)P8Bw6;p)CDVaVknR^kuD_;pMcjZmTW!swxJHIJljB?`Wt+yV4a_@Be_IM0yrI}lj zmAd)Z8qn7&uXU{~TE%V;3pI@wj9*?C>iP5zU+?V-M_zbz_)TBl8%ZwTVAXjWK!79H zLKCGb9Qj>yd~IUcF3ppY(iW}qU>rd1m|D7Fe2&QxWFRZ*Q7-CowRm*ep`a+@Eyy;K zc;7+8&s!{YYFA`tN`HpuV=o;*mAWZ5Jj2ECiMqwHs3P0|s|&TQZT4)t0is8G`8^N& zv*Lr$k@4Ko#rA4(O>`n|F2s~M8T5eHAKuI}+kf81e<&_}D@bLC>FNinOaYp4j$*d>%)yEFwL!wjgi2x1vxn=V-q{FmW55sTh$DT*8E1dC^ z8U~wJE3e-LynYO7ZS9YX)`xnnIfFzD{}ed*{ne_f6w{;^Xz$T41Kqv25zH69a-C{a z<%Rd#Cti`re&4Gr>cxWh(p7kjZ`>}XyhP0BDSqv>d&{+FVaq^d10-JE(w*b%>g$sD zf=qO7qRw<5X*yz}F@|^pUu6Nn=~@LBGB7tq2>D$7MigDaG@$G8uw-Utut@o%RNmqc zh-M2a9P5J=*X1V{_`G7@zwdz{dtqD1IzegXC8eL<_@>~lf$V+0%r9HHdb_~-7z|Y~ zd3mPF#<*n7zw?nL@?WX^&?h&eA1$CNf&=0kPldj-+x$m!~lX#J@@0weCT zt=@f6wO2~{rq_7%;?T+J7vntL55LH+rIN3w<9XfkvW90oIu?Qx9~dotq4CFnpR~cC zOy}j0_D{ldRY4Lw)I4tA^=ceetJ$k18^saknsnQlew}m z=8q&v8Xr)JKS#-60tymTh?%jv$s2tx-$TJeDKbb@Y3evE%$ZM4z6}uU>$YBEW!&zO z-`S(Aw|SvGvlUZ2+q;?IQ%4>#QE=*~v~8YbI-|{)N*()@R!M&w3rTkn9kYQlrCvHl zHFrj-E=8}M7jS0ra85d9LcF6k!pam;K#H6+6==sa!{p@_V)qXaT1_Q)J(k21#xS+i z%!NYEMG9}6)-B`GFLJVH)$;3^D9eSnnjQ7NpuT(aPGeWIyYZzacupOXZ$G+Qm3`12 z6Yicl$t!=4mZ`SRUk~Rk+RmN=M~7lpBm#JmEFTFdkLonVb1tm=7iUZa*$?<~a%Y^u ziYxQz|tsCAZw`z)qkIUk?x!5VZElgf>DDp4Gt zBz*dwd^S!s`G!Yn1>ixCSdPkFX@RSWTQ*YRN_VDoLl zb=TW=-0r_#GIZT=%>vMeZ(iW*@ct;9|IgJiemwHn2Kb}G{30`a`7kIMED}bn%>;F6 zZE=KFzo^#D14^c;URk1XO>w9RvqR+EiS%jKxRld536t$wX&vd@e4MgyYixBA3K*)y*j18l!CIax6Pl0Mn5u9HjOq^Q(>hp>-ldn$VBdBaY zDuU%AMMm=XQplBNsv7Jf1AZF?qy{6XDJPFp0D{>gz$$wJ;QxeQC8>Z<&v5VV)J}?N zj`R33uJ^!q-+?fjA_q>C9|x#b*aB3?HK1nw9Kq)*>hE991l|K3*ks7jE+j$&m$+h31q_HT{}z}_)9-~TjX*Avd4 zPGGf!>-S+o+T(#_p%gIsUVy-yq5O~)#&Pi0Ce7_l#+r&a!{6T6S)@5p#sLD!UXpl2Fl1lH=r9J>-{zXAbm5uou#wC&TJ7{$obOs^;Vv-pMX~s?ZIbL zkXwLvEFeK*)G6!@gZ*u!Uo(Pw0J`W>{GYHgpLtpE_hrSVTfOBbTQL z#%M1-3z8F}JRd^ev-_8HwLhzJ|3F)dT8+Y*zxurBv}LPffb@zSw2n$OS$7@f06Lkc z*d{=4i;+bCqVI?6_x@ID3GT;qcMk4uRQorRu8S0L|4l9IT`c5X1L!xgly#{SS~Ga3>jl z0$gZsucm=?=3lLn40H`r4Wu+%*^nh^m>qny7T7#M+2{rCN%_FPz82mg^xTvEb4rqK z75^M}Bkr%G`-cIrqtWuS0k;|sLO5U_pC_E~mzg?%bT=IR+h0=M!IFWO_7FiTGpLjK6>UnG=}^A1=~2}=c`Tb*R$A}!Ik)P=l*ytr&x6fK5CM_ zgnW`~894I}=&xJ+MB@`BZ8GQ~&U=ypdnm4V*t8@92qyw&J^P3;PW&+bGmXl^{s9d zAU%ClKEd&$eE9n zsWVs|9*^*ZkyWRl>G=q^1P*?10Y{-9NvT|0CeGU@;gT*JEu!l$`GvbR+!iJ&SlZOt zq{u<$Y7T%(Vp18!J(Vr1@z+p*LD*$7L=+hKEzK#PE{Hi5`F)wJlO%0gnm>>!!WkY! zNvDw@ud=UA4=TF+q$L479zx&iNu@Ej-Yvny6#;Xu|FGiz51EPA)L3 z;g6y%#eBBVB0mU~AO1py3n z89xa9z?5i(o&U*HPl9IJv8R^N^FKh0{!YXlJ~W+!bq1;(WS!PgLi-sj*B;Sm9;ONR zi%WfD_C_l1k8DZ;2D-snKlYO`xSL|SX4H$BEY(VOIP+_o$27B;&!9K)Mh4+SaptMg z^W@5UN%q!PQM@Qs^_0bo`?mD>k+YzE7=q>9hit>1MlTE8G|M}C9?PPgM(V3!Dz?mg zX-uQcCkQ$cAm&z(D+mebEPu}3{Yqm62R}K)a(Ur#hnX*-)`kRcQ9WKprEzH06 z?m4*6(V`N@1@O}k0OAhrso_i+){hjP=sw_4$&6qx!$6*ik{!pVjv$d856Bkg_b z2UY*D5CO5+5mmPekK0d-%Omman&+V=W7^jNqL-PdhnanVGL^CesBelNRP}_|YhQOU zNQiy?GGE#&4B;i}w+o9)5Ns95EeSpX%l9C4xM-d_tR)&e%x|coN^{7vbz2OME)Cv{ z%j2d}P9oVwz<&T_py@F<-!6#e;_;q`;;s?COy&uu4_OWRofB=H}2Qq++ zqTUGcY1s=MiGQeH4x9M=0g|gq$chc%d#3X?=|P)T5Dy`h<78i@r|bCpHv%GM3Fo(Q z4|BDQX1@IDmL=opo>a*`npPSHw846xM!N#Ypy~Izs##tELDq~WL8H1hO&vRXVPQ%Y35gfJ@4DQV^X6UVQbtriSVr81=h@qh}( zB{piyPix{fZn1#kLzYI$c##^R;E?f{ZsRrWD;2x{Q&MIcQwRHp2c{xb*@U%TmotLq za)U9EZcB3?t5qmEypj|ca-P^weBMv1-;f7$o~857e=GBWPZ=#z`>g7Exo}}ZD+OaP zmOaLrzPH{dzDuk`j%{OQy-u->n85B{c9#{NRz<@d4k`gVtU0@Qic!BSNbl^MQ<9$9 z&JH^nMEqC2Vt@YI|DM0t>HKI0c?OV1E4=YmgzDY%1>8PTfVeNUG0Q+~5Uq>NACld`YsTTyalOM8aROtj&cN>CUkrm;M%L4#E zjt$t%o=6y&ogG^uR&@!4b?&>)>@E|fZ zY^_L@pRT{ZM3LWObr`=w^4Khl2B;6oq-{*Pm#VR&S?+dB*Y$~pCpX!y$Rrq^f{;K4 z)0wpH`u;aJ7kksMBV|KhvCsyg(=G>zOefbL*MQ5^$o-Y6%(w=r=Vd(72~OlL=g4wb zwT)xZum#C4MNh%_+rq*ZEEC~tU!!ai%A{qgT#geiCQVlH5;j0C|Urfiy1mNk^1rnd8Q~@$U11Xm7%0 zhd|=1uN23mXw&JwLZY_Bq}>?eVw{)d z@~t;72nrZA>M!Hvh$7A)AM@BDVRf4_W~lf)V{dgV`ShA8`c+m;yw$QtBzFg4be*87 zz(*Gxdo`Apv_%qKP3QY!omC~pow>{!b)P}~Cuf#nImN4c-9=R64Yc9xG79fRas&}3 zV?LTBTM;M)e88nb+5E85g1*RpL?%Gc+bGE;n%j81LY}scr@GAB21*p9y`OzgT6dgd z{OhAWb~oRn+GkLrfj49b#h8_3Z-C~k&2--=u+jCZ`exp>f9&p7D`eSK!@PZr}p zF>3_YMZj1p)Kjf$r4FTvgA^>Gp!HZa37`dyht{GrDAT*>;n82%isHT=q_BKl*YnI% zC=+{$5Niq5;j^Qow^&M*6XklWyrz5~y%9jvYKdrRyreKIKNz?I3I%3I-(OtWc4gkYnz3(vV^orE zL{1w~h{Gdzw?gmTx~`69yRuirRlVn%E96TX`Zq#y>#n0z;jE)>YWQgzcHp;#!f5Pc zBS1+DV|fdCIh^u-$tr|!{>pS|C@JyTYYAV=JJ6xOe=I_Sc7d(~zMhNKy_teFE^_G= zeb+~4b=QOdeZfVB<~=oSleRzM>Yd}Ck z7-3do9}VF8uE#nSUU@Hno?GT z^HCMNX%?_061fk-hLOw%=eXxM^AddA62)f}L4XMjdDK~AsugI+1f+{lXp!qoB=vd8 zo`i*c)WgpP492(ixZvT$^V2_l*qZt#o}KZdn+6Ey{A&aIlOv!P-qQ$~mWCyMW}^X+ zyC;WB7bAaw7|LE5k`QSE4t|E}%sG5eLq+9LKyZ#|g9wBW=9d+{?3urrR#k1jd_jaQ zhoAu3B8m?RLngYf$c)gQ#*&=~B}uiP2!>L~5ZORU5~J^%R)gZl>I+6C@T!osH^(2u zuNMy5;R6{s*>Ls~e~57Swnm2G;@myHF4bj=@>iq*6QL_txW;hLQaS)K$QIat^-3+W zRqhk`@&494%^DyH8g%t{GIPz5cHT(Jk5&WS|6Kg-M+|Pp4{2vAfeZr`*YBHZaK`3@ zOhkY_e~en`4-p-0ONjL)S4$!5wWX&d9ag*)K3mjUoxXIM0190;iyQJg83N=pown2l z-EM%*trjW9@JyG&Jg~hxvRRpHistP8GebQc6VC6qic@UiPG^>K-5wFUIJ|*rKp*?P zPT!E;x5JIHpfKjY3XByVGLei{zs+W0qVgdxV&mSz*vko4w)5yXq1T~q!qz9;6q5G>8r4v4rrIfXO9)|yd_if`V;?&6cp zho1`$p&!Bhb*OMo+8hmDo*Uyd1@;t-Q)iB#)>!YQDw;kSD!J`i$DdfMMM@X;pUNXb zkZZdMY^dcH&<`QM6zX|7L$?#QRIb(9ayk!306cp2^!{oaW8-Kx3t^fM$XK1<+P@qU zfRp#s*iWa!495;@xIX#fi@pz7xV9S(zxEkI=yo&l^&~dJClhinc=1v>rIGE!%XfE4 zXLs-V;8fpahXZVF*TQcB!{VcM6V5%ATeS?rp2q&3zXI*|j$2L~{J6djHTRyk{3>z_*mVAkv;PV#^OD0&s@9c;oYB<7 z5hv4K^)Ac#Xei;?BaMl`iJK{s&=cwo(*`B|SoJaIHctt09df)yllVewFyUK79jpMhlR~CPf zE!cK4B92)ORuZko+A!Rq`)d!r6n~EGsi?)&t`ZB0{!9tWUjZF^<1qeJ{fnz(*Q&zn zB;YPby)=-uuST=lg84YI+b-}?+y$!|z-faZ4PkVR^9xmnz89@%~xEf>)ij;uDy}C2pOdAld;+MTNmCJw{VriE+_;~Q1`S<)7tBmkh8FrMcMFZ zDg3zS<*Nhr351k#ALowFkRngK=m!kG)^wuO+OJ+M*rFkgXk=YEcXZhGIpT#2q14+j zQR3*w1MyV6rVA-Olfy5SbQVT2^=tVuzc&KrIfKK>kxzSB(U<*zTr z3&mLY>MGQ&pN4Df**Yv~M5K-^;8nRKb==Dn%f^Ns6WxJqWq!_YCO7KDM<)8tuG0=f zkQ|`X(#I;?q(gC_KO8}MRgV{g6Wll^xKR?_l|CDU_AR_T5kx+fXchItH?M)jxcB*$;EPu)JKepD|VXO@tD6n;<5SDw-He?tB%}W3`VyoNXWQ z1$o2)X$q(*B>IaNA0S4EX{yWTF8C+U5Hfz;{$Pf zGj4hPDq-=iB5_(B)MRxhGCgsmn01`%t~obxGv1e=m`fpDBWx5V)3W%M^Mv_nIJxiH zkh5xU?eY@p;A&Bod-Q|rC4hcprVEaZR5O3k&76pW4rMI_fgXV8)%*TcD-@C&jbtRdcWWc!|>XUZyY9V|kotCwz&v?P4K2ZPrZTlTWx zN``libR9H*4^+Z+-vqOGgei~NL}R7$0uCkEW3LnxbNE1+HW{g^@!8R+ZYtE(gn&FQ zmVyieh=ngGI#r_!BxXM`3PdXt!Qrc6AZw!g%%D8f*f4X!QTclu58fbaC`klc#N~mB z##q9I$Ht!L5ka?_0BwIrD?olW{GNJjMhY3=`r!j&dZB27{=ZQY1OgZd6gQxch{bmx z<;&#Dc#rK<8jmHrf?z+jPNXE z%2)F+N5R40=}qd+u^b|SF1}-UEovPzsQ1Ql{xCFg+bP7m|#FqD{TmPuNA=j1%RUO4d&FO z#g!IZ>^;1J<_^Wt)b|PckRLPs+Kdg9`%vz(=J=gZBqa$|t)KUBK5mZgqP34Z%|c4w zfO2QtcoyBxG}B3$;cvj#rn6O;m8 zT*v9&%`-jCZ{JYkUP`F~4pQ-+fP+*`fyc6eDbK?RF5HrhX$x-9l zj*TzDb3V?C>GI_tc=iWdpU|Py&w0e`;`mQ%{p1#>BT}YIArj*BUgQw#4G1#K&)%8d z>MG_gN7e@-1rpicM*HJ)-5(&DnGax<)I)OH$S2k=iN0=Uf|`#LX>s2}LEWf=t56Y4 zw!r|jyak`^zOB^D0|OhDQ&ovp9kC9NGd&=OKnLG}P5%Hb%l!eel%V1#?vTk@+7{j@ zyj4Ck8r;NIqWes4VHpf3OfMNZ_PKTkx)NYMvfd_VU3Gi*lFG!$&oiXx_{tRcZ( zl>XnbY|O=ei6uDkbUe1-$LSS8Lx$3U=SZnz6^WFoth>M*$T~kYksjyOsk^?*EUyw+@SP+uMK#Ns$l%=}=HgP(V5cR7!*q zq(MMwkZuHq5D=shP+CGjx}>|2l9rI}96E;aTe#hO>-qNC=e+NGzVCYfcrUKGm{`v< zYu2-#wSIB`?)zhf)qU?AQsq%l#n^Ajd4IpRXgZN>^f0e;n{>EiEb$ie$w@&VH{7B0 zOz6xTI!&|#f4*=u2K6>PlbtguiXZG(2vQ<7VU^$Nn{=o=dSPjP=U;KC8u|R-P*wi- zIaCATmsc+>3EGt^V6xM8Vq=879faK>yt{hFjLI;%IvS`_m@2M686Gf&%;*#>2P8p` z6;ZKRdFYzmm0^gmaKVdG^xgw1hOtxeY|`--^nPaVtsL82ya+L0E}7<+1v1|1e=#)y zXoJS&1P``JUgzc%R7#Mj#;N_)b^OQo{$rX0I*uR$_wIWHZct+?7d~eP^+QFdCNXz` z%SFrDMp9iHW6Awd)Q~)F}z+!wL))saz3G3?9xWVhAPpEq_^bKFL zL(Q2rAf`F|0ahA{%;(}#VghquR|gD0bzuqZIAy-Xz1bLua=PaOjZj<&PHMBL7xTHR ztbF(5a!ml9H}#Qpv;1;E;F+jEd0I1h5++vk?XRzsJ#HZZiiiGfE#T%yImn;>P$Dk*g}q;ik0dxMfH?;Tcit6XudaN`ORYWTG5heV1a2-AuNniz z=m>T#Tgo#Nm#F8QR?FwIj*3tzd7S!X?Z-1Z_%cZ{VNw1e9g`<42PiiwD-o5Xnxc@0 zKJTm}?df-sEw*hdBCB{3m@1-f1gGy76w4u%75n5S?B}@o@gK##x zynmb7nALFoD`gPB(=*>w!T}>b!et-es0$N0z=>} z?9z<&urbluz=g8>y?IHk@Ud;-agOV1&fBtQ%Y?>LHdg^+*9gXxMJAkw%(`-O~ zu{eJ3Pw?#ztvo~`&Eb00otDvaDig5Ya+;px;6{Y-6h~=OB2}f(^2bHos{3_;2?FB5 z9(ERU^G=YBof9gG5|$_&rONU;bmSJcA`D{Vl=q=+JMBJHJ*x&$D=`76@!i#w<9F{J zbG)+78)vV&5M`0Ni(W}QLWs(5r&q0TGHT2}eA=riDQjE)=HMxWiN+6twj>l60jp2A z1jE&g>|Qpg5A(Hdv4ZSU2=9r=^P|7Z6B6u7;BfCIJTODJ0aSH`-I4tYus?p_60{@% zIOoc%#O&JRTdI~nzf$2T4ap>4vIlJ;e*=OY=%p2z4mmuqJYIr_50&U2>Y+r&!0tC! zlGj&!gN4Ny9W1BfNqBu|62f?j%+2hObH4l}k7U}D&~w=7(~>Y!(;vw}NQz&lPu+Q6pZ^?(p?cD5!1nqd&-O)9Yyya^O~K_p7=jB3vI zD-R&j1WCr+6yU>|F4^fEZu_Ci0AwyHKncaS#Eg6Z))AEjKalg^Dh50)M~7ep72pWc zT}+$Att>mjqXX~;FyIjOgFr53Zy3o?W9D3|KUuahIkz#9U<>}r7kPSzeGXqWRs$7(g91Fo-f@lyH3bAKKYN z%^Nv1kzLgV^SSDEt93yai2G@*IKzfTf~>W1w(|C)BJn28DncBl@_JKsd|BlcJNmFoBy)31K!^Yvv5pmzn4Z%GSlGH2j*(g0S>uSFihPUBc}swA-!2 zmA4KnyGY>1J)$;wH?yIrq5gA=INhR`;6-O_?=EX?X3AmzXE6iI^|GW_f|_8(nb`E! z?G!Xp7z2PjcUMGKEWp?z;%N~Z{diD2hCoNS3+C#fW737=kRUH@ste2-EA}V#YF9b9 zJzY{>kl41hH;TM zJxdAK;K zYdL92@R~t!&FR#Ou7V%bc^2In8`?@@qe1u5zr2pl8fqfn9Aws2%xN~Ta7hcaT znTo%}U~p7jRaa*e7>ebz!F|&gDPuYF${~T&K-ngdAK3oSFV8MyZ~E(OqXSD6=#28P zaZi1N{G^7IbV0DTnVn6(qea7k%9nv$EAK&(u5Q9=Ww0BUnGO*{pZS$CHI2Jm?{vvu z$SqN`xTRXuw`m#Can`k1)!c@ENbzbPU6eNfZ@<$BbM7x728}xHdLkHz#$R( zat&83=6X8@vT5GA9(@VZ#W#BKH0mbw)G{je*lj(naKHAa_)`*I&)FsVNl$8f(-%dJ8p(GS?YAaiG8n_CBzti;PIsqAM~v-##TMb{ zRVUPhlOqeA@K|(MuqrykfWE$U3}mAJ=F_2fnjR!R;SXP2Y@BlpvEoP36Uw(z@I>Su z&fFvrgel$&}o+Ymm z;<|f-oAdQF-V-WoDjUdGDgj*qbf))aU3QfxiM}pH&8$U|?kY5V*WJ}gj3{OUBSPNV z5J>Na5NOI|tqofEZ89o8ec)A)V%M6*UvD@ywcg+v00}>6+SbtUZ%%9EUVQfSmga`n zg=ZWW^=A`f1&-wLool_SX+JG{w4H^*qO_TMwd@~qOgt%s0$)P*CBIgp=eBya5Vaz`ix?omnx&Z@k2}(2)dH+ z`vk07jU{>*o@_WB1w;5p5Pxz^XZ6(Jk2KyMCNA_s;50;xxu1?4@S}@ZhxNvJ*r5mx zJrZt|IpJNuuJl#ZZOTIo+E6AOXx}5k+12IPqF|!RW`JjIZFAUUEQZd1(rH^7nBWJ| zpSoyfBczrbma6oyPtO*DCM1akd*sQ%?h-3K0;f zn=BFN#(Q^u@e}52$kN!WkZ2lgFrx{zWVhgdh&c3O%jF&d=A_SA zvgI+7xc=nhYcl9)Hr#<=U^dR!E?4Bu7O+$49w$ zYy|hXzg4AwE z4+|@pzmpCvtSbBzU(irF3Av<6L2Z4w6YNLnsLJ`}-j1|YL8c);6B^(`T1<1UZ6rKQ zH09F(!*M&sLdbI=s+&9f7MbZz!`wrxjd!zhvJ!!E_UVNZ<#)Nd!-*n>aciTbDeBs~ z>@+w^%=5AG!bU<+Wo`6>?N4QTQ#?BAyl~p+x5TkCcxF*FQ94+MfnVld zSn@@wvW|fj;{#JtPiHy<#yLrWs~$q^Ix|s`=>~f2LimkDfgGyu)$?z`>?c}9-}2H=v=O$%mW8GfNr{M;=pucd#q@bszn!;OAkkNw5h$RKiuPXz0OgLc z?GLjK$kSdCOh=x#$%huLjblI?y@%Uk^KGn;3N0{})tIf{j2ixyJ@{JrNaje+T9Z_5x}Up#vh*EURDk*zeS8)k0O|T5kB-{-lio z$y|IFsI0Hj`vt3>4AQoL_4v)q#m8+MqDo8?u>;T(ZE1uSnA+X27h+K!pn!8 zvSMke=M=dWBLa*xAm+S@Vk8g`rFT{=B##hc`0v-#X8-*ccp!2SlwcFxn!?zw0jwi>m-i|&MxVpo4Wwe|cP$h1tsJ zH8rt!F##XZc;a+hJ^dnoJ;K?*WTY~@vngDropD=+DU)Yc;IhKHMxMg+YBD$!*4dFG zYYcGd4mch~oDc|?j4%YUBKMrmQy8a)R>77Z->PxtB5mVfi0vT(mnQf0u;*n=ImtOJ zL$qY=B=k1hmDUWp#^Fu3Nr?|Vws7MKT+4|QSZ#Fr;u5RUdKS0_HCAUBq3==6|B!fK zt1IUj78!MflGWtTw#0+~j&5)n3M_ymM+Jyo6Oj~11$bjD8e?GvY2%A8J_p>_fG9Lm zB!TezHTG+eYplU0f{Hm1=xqfVFRyP;Ba@aGV>A|tYb4-2;je8UK&&NA zm`QvELqE$7d0Yw*R1l-yd{sLi{hhll&m|8N;fMr2vnSwN{|-^`|Ia`ak`%PzAW4u1 z0>}xEK%y5+HbloR?%z;v{8Lqc%I%S$OEIB%my~+@$UcymV&@##; zKL=7}T`*<4%A0FrYB@WV9YMtWEe^~SE%Mv}C0fb?&hZ~>eCR>yKxq7C?Q#?ZH1#PQU?&ESpBJlX0|0^K`EC;!2<8B zus)sm@+2Y$XOH}STwVH&6eb!$&k&#qVE8-s@a%t?Wgw7&4MEkY0vXuw%9vSKz3Ee( z=BQWbLSQtb^a`(=vQT*reRav+N;pGPw&*br6@z{ zVR)m-n*z^y%C%ZRr1g;)iT$d{&1W1}0u55VBosoq)N#<6f=d=a&0v)jX<&#R_xjFT z4Pnoqkw(0|PmnU~;9COxqB+`@vvfY&D`YqA^*^0-buP_$m+frF5$(`6c@Vl%LkOj_ z+2EIsa-Os3jH|m=MsIRUrlpbdpH!iZ#Kna=P1$W?I>l^W-tO3HEH#-BogZt6#0j=- z6|=g<8Kxa+kTg1?b^#NkxggX_UfOk|oqTR?w}l(XwD;Uts8<$p3jWSV1-?x$IY_x04YAAHVDM-T!CFJ?qNv%LGX}q;k(&up$x%I znfCO7C~W#m=et{92{UAweH5Cr4~KKYqNzw;vij=0=7bcmmlfHDytcX}#w!ZqQNL3g zxc#*Qi|6i2saCCj8wP{}WWW3C*l z;g$mhO&KGia-KhlAa6$X zkL|tl%oq7xBVaj!eyb*DhDKnnTz4nbm!F|Q($B_M=#}ewsfaw58ix(O>{JN$N(`PU zV>!_~w{_gBqBbdBYy85xK5cc=g*Ff__dgxz2`3$txI^vUIw8uVIcd^(_ie+7FF;J(RSlQvKVA8JA5*CoWJGokk!%^hli<{Mq(wj*iRo91DO}) z)|5tQKEUn>x*8ES*bvG&9Gbp;tg1S-Nz^s*Z|Ub?1i?whoSm<9LYVj>--`ro7!tkgGKlw`g{4AGDcdDgjvxf z+RR+j&r2#ckD*t2x&`JENc7PI!?CPeY)qeH$H+R3>R!GRphe*MlFDOli?IB7_`)On<(%YV z__lQCoqPj&oVrJ?3=6CE8)DojLNJ5=B#-pm8uk^Im^O|f*B*=oR@#QF zCdBE*t-+fWzCq-5j%Yo2OWagw2nILacel03W5F09kUd7vG!_*4QDE{G7A}f+w3PauT={B_UY@WVs zs}WnDnqlwdP9v;veH}*Brm`IGkZ+x*M(dspq#1W{E4q*HX8lkqfYaQnEx?+JScB#` z4}&UlQ+LK{vw}IPDrR~a2De@;8O~s*HAoA8w+v+W1L7azM%4_B1*TC2IF_#ON0P9E z#sOcR1wmCrJ9}l`)qm!Qyf;$u>=Q$0anxGx?xX79XJ9J8Aa1b|!%iB0qqfq6;8y0{=9V(KM7I5U z)=5Rnl75fTd(xO(+ud~cc)Q${IQ!5@iI39MH-s^-JIP!RmE&E;NT?OJRp+~eHh>f_ z3Ha2w(|QRY)SZ;j#5&tx=)omYzec*!%|vO2T&2>bE!6L&k$v*LY!GrKa-@K9!=2Yd zMq-6f;Gz>*BV|i8S%iSv72=~8Eck2}$u5N6f5E?a7>u)ID@Hx(QDV0hoW+y&$w%@! zFT=)WP=0DS+JysOw|aWCur=jIr`hRt1l~7@=|=+@O|+&L>-^*x7s~7sPzId8McxLrHhAai+Kdtui$!;x!T!t#?v$X8YUC4lLGltpw{?32Jc#nErJeB!sV#U2%k@ zPM1wAeAC|N2TaQtQjC&MZS%J*EQud!3h*ylVOwfHvNt#_n&n~;AZn*7P$(n3ew&e+ zR@7kB?fl+MMWwlFQ$!1c8GlOX;z7tQ&S2XElA_&NU*VnpfbwZe-1lCz&eGuvHX2}B zt8K8RpkGLQ64BlYf!IL*@OL+=Zl29co#TB!FfTRc0ZyG=r+Je|^q%a+XM|jmj32pd z=Wo(6*R|$}#-d0H2X-qBK2~nOxmT$?YuJr-E>kgppJrz{R9+5Z@%g7zw-R3|oq^BX z?7X6+i&5!<8B^0ixW>zm)HKaSldPNo3`jj>SJAlgIjwTqrqOl-qHB~f^?YxIKM8;0 z9NpE=T({(53pRxF0llfpH~=(ncP{E*xX5|jhh5(w9Etm5AQe6Q{;V`b2hi-^>`}Gk za}dr?-!SjC`E+knJV_Z`BH>!)Z{~OiCx2nwF_hcU1(Q^&`u+<<$mb}MvJ%4FXy!EiISYa-}_=KK2P)yHa-Wg}A`TH+qPKPr!%FwPsa z+rEBZxZ@nb zO|2*`__j72#Tx1Baz!AyY86FHNDS`*dC5BGGnHxR^eqG*@(`Tq?1cH^Wo<yfp5`TqFBLoJ+A}Ww4HVSdz(u8_vX3OhgnVO zD6JozdIX&weQb(z=|Lv3lHGq}gsbtwX_vk+dbg0ZUJFA@pE3DEvGXtgLUa!>ZNunf1)pIBI)*2QdOQ*=%*MF*Nc-J{a?Py~Y_RskW2Qj-rbefH?BZ z@GOa5K-^t{E)~U9s-Xi}r8dn3`6$CM{%AN;o7g}hU zU>Cl;b$`TQO<5qEz~!`vO8}E^qz_Z&Ikli892MzIbxm5wn6=gOp{9lPPU$D0Quh-X$qzTy}84kDf(D2R7XcH^Fe=S^F+4{ z6mV`q_|ZL^9iaqBoQfpPlO#NKA>!Pbbd1sy`r!B;o#;(NC-nN1rvBSS`r70_YmHzUIa;$a0e^)Tz4Ab>%F?Po4P@ndQ)?M;X?HB(cNw|?IiN<3$i+>ot*bO^Dup2+_5@QkT zGJrQ7pj_twdC{f5#mC^T7l3Xy6=QapvM~v6B9GbuujjRtKc1zM@fZuMsMt>)?L?rE zvf$Ps{_*qyz6Ki`im7&Ik?Hg>D&#N$EHiP52QwOwq2xmN%yV#q+rTINO>A@j_+apM zWb~gW_~~3?l&`bYbr2%I_l9u&GuS=P4mZ9Kb85^JQTazljsAG%wNTja*{ruQEQKup zt=A5fzkjJum#y?*c2JQ17 z&-yOJQd8bi__hh_+eJYace;o5?PrT+~m%$SG14XKhow4lFonGpe znqp!7kwR#8fbRZ5hvM}!)C10cOdxVLW;0~(72DBtuYN} zijU#9xYWrZ*2C2H2T26a8xj#~-%F5|*fej{qcKhqqz*%;Wt(t&Y0nG!)sjP_pxtZ^ zGZP%WEwijH!BTe<2GH8gqCY?Gsi8g{C+-(OqAgk34yViK8}~btS;rZ%hn%d=qe`-# zPs^p;`e^;7mUfnIvHHolhODvm)yUNW7Z=)Hw8_s3V{-d1haTwJc!y+k$@~Ni{h@jI z@02q958F_u2c%9|WyP(Fx42w_?Y1tf6Seg74q`-%;Ki(PAVv09-$#z&T6e$=dSSIG|7F`m1>vKS zhge3Q@cGiSEypl7w>$*BPDMa}tutkE9wYG-M-}$l&$gkvD160!rk0^iUx%%;=Z%`% z>6;eP+RDdnXP@ZUPs146lq~tcC*-GHZ(1ne)sNpAeLU_Ii7ekMIi&$gr$%O`bcDz& zpe3|N-;(cBYn0k)*HO1JXbH|ih|~dRhxRDxuz^p=-A<3X)z8jOkqAz5np3`8T}@gG z@ySX0mlZKq|E-Fc&oje0+Cz%<`eWfQ(6{rS3=j>ts9g5nnrRfTkj+R?{Jc@;a7WaD z)rDS!Fh>J%I`C0Lc>7*wV2w7#c}4E4=xrdaxaq+&b~5o4XFfqyWrr(|A)h~}Z7R+_ zH;#acSeR73!8KoNIK+(qjn5!v|LQ^NJ)3xh0qh(I*u!6G4*v2| zRdzNcllxFzs)24B{}l@R8{}QdGZp9y=o}TmIalxD&s!UKjvjM0tDRY-*2b5xzoR+1HxTB#g*PLeW{HdNS5`mltQYCM}@cpIUXlLFVV&;S1)6 zvdwg3#!n;s6>OxbZ%rAE?6e`JlE!Ol{N#Mn-+Eoe3y1eG?m}!}=rM>f&CDCA>UvMJ z)4wAeS2JAAXEeLqdS!h4#Ts(0E2#?(Rzt+=7Oh2nrAUMGC@)1Ox&y-)MO}4WC`P#O zu$C(%04c}kzcH%9^hxoaS^tFu{`R8~s$iIC9okKA*Bf71E4TgGDDk`N9J^} zjSzR=GC8yrk8X1GH=}4-i+Xm6R7%nMh`=KpyKCfjKahLr>YF0f~Y5q^a{CRo|P0H^SEl9>UDugt*4DA)oRQ2GEAWHxM zk9o%MUCbOouLguY4nsWJ1y;7ey+(+nb%Jx|YDe~1=Jml6gYPAnA{2;_Q+b?b(8Rhh)h*9&oxP@Ib(ES z_q@m*JIOn`2^XrL2tQ~5G;0vs zg2RM9Qk)M{%n&OW&P_jA}0isU#5ln7n`@K?BZVgR1%I zFcS1LAm#tQip5#>bym3l#<(NdAD+Hr6!27T`rN$;amH9FOPPUPgJweh*q21i#u@GP+dOl!2^c# zHtk<~=GtRrOzmnYfFhlbaL<+as)_O)+{E%q@`k*FEQhLtX^u?$Ot7CIgZOi>DBkB9+|2f(uU?Tpg7W|9<*{9FdC}|23^`vb`mC+cBsF|mz&pGu> zaYNmfUm1ZJV>3>o z!)2nWZ!B;v#Fapr+HNi!o1(34D-u;Jns&p*tFVQ^5OVbep875H41qePpr+$(<}$T4 zFdk2VgW~bGkN;8$*7w@k-`#Is=^P_NRKlZ?6^B%SoZn&lGAKY$e4|!}OBN zAW|t^BQQQOAv3ooG2)1pmx*BNXts3D0Zz#NjQs|r^aSagR)eraG39<8`35nwx96Fo zyk2zO&+57uA-V2z@4&$Q<>8}Xw!zz7@-*5+x25P&x(65z44{sOKbKYQ%hTA`6N|&0 zySLQRRCwW+*gs2<77YFD7Mwl*54i?fMlU7vX8cq8-vy3|rJlcTdoVET)B4bv(J0KL znM-N?Fjg~K%3lR1Mm^m;ECbtO&FIEB1VXWL9QW@Qh z+7k02uab8?_0*INzT#aewM2s57sb5EL^}jpI~-P`x~=bK`N_3wInI+tOt0dr)|0Ur z*5{gEGr(OEkaSPYmAQ~ujbzw7M;<9M=&Zc?vg+=W9DFVzPj2i$*WTpJ@ei$6l=2@E zGE#e-pYd_P*#mM9sUQmVw>}USb`X5_Gkg^!Pz`RZeW&^lbvyn?A5Y;F%x(hrsceYLXfGQwc*(Xt} zPU-H8J6$gE&V~gDm_q;Nbx(#D*@_i-S&O($c+;)Is#y_kmI>}H!yikprn8rb&`A}U zi*}P!dwst5(z>5DiQ0(TT9q~7uSo4xV^w1lex;28?)V>3=l?Cwv%AciqC{!G?7!u8 zj{S(t{xTML+Uq7^GK`PlYjpg2J?@%>Y_X3kv`g*@g*uAfzkD%{AFh|F{TKLHO1_FOX4*$Po>cPxTO zm(EX=3cb0hMW=Ir!Iclehq^?yv@Y6H)H$&OYM@JG?M47q^LxbgS0LMj9~c$8?;PuH z4nFXmvO@Ce7wR(b(Psn`DJ6JlmSw^t9xLbBYCDE3{c8G%CGH8%6X(y+XIy8;mUw}8k zC^EF%=?Ie58*O;7WMj>FX@{zi@vnT`6Ja6k)@Aj7zKy_#t{+UwYq zk7elzK8P?zw)R`jjGVl$>Av_bVp`Lswz+n7LrSPxh7wL7Om-SVHpcOYxOM`6P$}er zRcqVxY?5eivadI~+Mat|UE2%OVvz;%Ozw3G@ zH(yJ?oh?XoinAf)cI!b6<#4Q6*`!z7EFRkWijclTPuZ+M-h;v7vb|Rho^^9p(%poH zG=gr<%e})9D=zw@_gW|3 ztgPo=4U@g#uRQBs{cwdj`J>=3H&9s+G3qWER2{ynoBQ>r7ujZ(bc#&9;DEm{zW(7o zwk}cbk?^R4E0Sn-hO@%cp)O&KL?gDVJ!!)X2>C9KVpSXYCr;yf10TEH@)r@!{2Lc+ zllu1PO4KAAS(GQwNw+2kCg}}qVV5s!K|R)C{yb*!CfiCvH`F1tl2~^VvS_jCy(cQD1=eepX@4HisX!6YV*vgv{~x<$~Ax0m20LJaf)RZ;}>@xYCB{ZciD zy9}((Uv=9%Hdrs}22Od$d7+WdDMJaN0lQ|*>(j-7$zD=##4O{Y=A5&gC%T(v8>L|hKZ47oXu81QgpfRdSuZH#a0@t6rQsPp_{>JJ9HCNPR@KV z;BR$cVg$qq3$@dtg{Zm215hN5P=f#5#;sWBRRWW&gssk2qB`6Lb~67xBKcGE=f;i% zPVwxdT|Xap24y+^1Vo)C+0Gy<>+i+|X6%YpdB7gI7kvOckTZBU2vQQS-rN8=WY@ei zjLb(rt4065&sxJsrXij22-J}diZ$dAmR(!Ubh-_qrL?0ojsK0lWa&r`&I~1UwU9%a z-F4C0Ap1;1A9xs;^jlKjx&$zP_z@X4G!sM$()QfqoQyMvoQ}JMt;!Tfj_~Qp|K0G2 z6bnW45T_-V=l#@Ax}Sy$H1HFaj|y1KaWyw_WNpg2{@5$fr$ue{WUzNo^1deiZLrz@ z)&*7WSttd#LJafnt6e)F7*5;yYz+0uNGR)lu2G9SKrp_j>^R+woZCd|Zk4t^IfMcRsV`_uUxo$IM`j zE*D`#_QJ%cyaXwyk;Jp+s;YxeN)yc6P5``BP_oH}gf_YQreeSD;FOv3u#G4em((4c zxr;6Vo&B!FX(q%uO&8bgx(SX5ABs;E(BPZBXr4BiHhmO#lkd3@lUWwKc4dGsP)A6DU#+^H9ziT_J?@Kp zfO$DVFv0^%W-iV~hQ~1Ak~D{N_p5nR)UC2N7PNOh3KG6O*I+vwEJH&Gv(^sMHhBlj zI27GK=q`zw>KyZKro*c%z@Eo|jBQ)VXulDuv?Q!a<7&UQq{K0oYUazF5_lOKPoDeK zdGrH)7x2q6{?;%1Z{!|R0B_YXW~G+=i1HG);&Wyi00~)*gUBgeVQH6OD=owis9{2g z)cUL=mz@pU?qNqkdkGdx%jxOP5A0FcoB5T$o=<(+nk%q`CBu*I!CePY&LdjwI}Ao_ zq>@i~WQE_!Si*vH_g<2z>oPfHsNX(}nG(Vir^26G#HL;&r=JThgE9-Odbs!8tXJ&V z+Q^aG)cguhMwJ-do?BQ8BJ^{=VE)vgDqiYKZyXk#YcOZXhW9|ic`7USXV3Au{*h}< zv_^-slPM^xdso)r3tTg{0hbx<1T)Nl_>+>+WxhPf<0~5T#4YzSVOp>lW14)vub?Q$ zWrWFJR{eHnSfx;1`m=PJx;zea5wKT0X7WPHoyIeLdH?L*q+LQl)A9bJ$$Vi%dv1C& zEp>?2i+u7(l63~^gHhXHLd&~78KRFF&522M#WB2a>M_g^zNbFs8Y1yGg9-^ zh+gZk0X;`qZMxn)eA9Ivk%!D9-W@Nnj?ObIu)^?LoX2e>9U>vh6~T=t>O&}sXw0%$ z7FA)dD6U1K{mvP(m>By}jC__UKRG~dTq@R8Zo5QIv7x>czmtzBq>}rCmGj&)5lKP& zJefRE1Wz?8dWM?ev(TDofiwlVJ~ge2 zD*CLsrACORw|xKn`NcaqzG`jV%Y2+SvaLvMThk#4)ww>;_Ej$4rEiqNFm0rt`T&uX zqA@k<&zXz2puEkM)KLCVi@*s#(6fDWd>Hdy3UAafJGE%a<=Q718eiZh7AHFKJ8n@| zN;||R02M`W3hl1RXS0K2pTX$;sL4WSDBqW-LRACo{egPqE4J$zHCF-54h75=%fw@5 zh3Uy7!j;;6a7QEuNmJAi4eYh;mD`<4D$vWC&ziDkexk?2xyOhXwj3&Dx@k&Rcz4nH zgsgL{)ctd%g>9*$mQkEtmz)@;E!{TF1DwjSYbPJ{lisvo`&gm9>k=jtP1X3C&kV|=%Jn#d63sZ@~# zVQ>8ICX9$k;2wOXM;Yl&t@?8mw@?~B^ymM}K@KgI9jmSKDUbm9>rz z@zGUZ@fFvN4ImB+DyN@2Sjcq3oqX&oAfNH&6TZi5Ly8-nHgUR95IjU61dH4M9pE;M z_Io;mDcM9F%0h zbAkJ5IQ~2iB&&%>l!TzK$tfLc)*S#%hznT7rr zrrj??O)^Q?12@aLmc1i1>mc=JEuAxp2Hu#%^nJ2^c$H@6Z@-^kTaDU(irV=4wiRZ| zZr5-2mexTDC&%3$K365)<;$jhKTTRkVHsIO8zan ziSC6k@-jg1)>_^Oiz-pO(^fy|{D=Xw#ZfmrY06IcqBFs8l}P&CoaF-cDsjdWm3bn1 zm9dKq4CMSG+Eq9rtT;U* zhG|SK37r^FMDZ>%>;0GeG^Sj?j5bHG?QrVFrxo?bIb>-A6kI3@_+1w8W9DH%FHPz{ zLrC_;$vjFxR=7}4UxDL@>IRu--HKB?MLjy>)3ndRFoV4LC$7?OC#izMS;)t6X`Prb z0s|2-kfI!T>+8wA7T77qFx#}5nPHK$frNQ*Q`Ey&G^JzW$0`87Li-3lOaa{`)=B`? z#`LivCjj1&-26m!R@>Kh9N75D$H2*R4*`f&7_XBC z-jBV)o{7wXlJ{plUbqB0> zO+ZcXr=NwqqLTPEVn+vcJ>>9C4z!wSp%y781S_Pgdb;Ast2lWNfw}G5HS&OfJfa25 zX+J0L&mKUhaf?yAi7~)y<$0C4RG_XSVUZTlUc?xd`1YlUUmC2M=9$k2-rC|~9ch5f z0ju_mF{@HV$Is>bf)E;e;7Z(ms+aYf|kI?f=ZcL!eOzctVICdx`eTy0d6KTH8@U?ALu;Fh;a^!4QeTF4 zkXw+qIPFqE5&2vKVAG1zH%VIGoK0_GEK8U3K& zdLh-UlK*r>gfk()%@k0o{XVCa5Ix9(A2s)uKTSooQVzrLduCR@T(=WhiQN#?Dcvt~ z=BMFj7i|-nr9Iy7Ik`jeq%H{0;A5sjW4Ke@`ubx_(8=KT6|5Dmvo$-uK^%#gwflDjU(lv zQbW-#(f5@xokO*SDA>m%Xme@Lm-RSHBzK7EN}ZCAAmVjOcjW2BCYbzw*Y7VaxKlWV zpEN;@Paeb~YuPM2vVz8T4#ajz$A=96iiw{5I5bmRG0>T~w6dgFXKR{N-?@3XdH76N ztOuWp^a-d%Ovlid3NBfsOxxU#94I9pTWH%1T%;O_GeYO@a)e(N=Kb8sLp~5`b@_p< z*DcZz5uCau2)(1}PQ?jcLNM7E)`1kGYercvdm0~~D0J)_=}Z&8q}vM6D5(uwun4Vj z?TS~HOZo$ypV`!caG^^-*n~?rJbLQ(R!Mz--f1|8(7#7q~A%&>17@ zAOQZ|OH0-p-T0An49^0cL&`z8pc_l-NmzUm!QcV{ACH~fb{n;A`Mfr^WKEfgY3UK- zG`qAy7}1oy_Enn{?Wy%u;xitgO^@4bC+ttpU-ME@-r?$@%Ag3Un+vqhQy$uTSxlu` z@tRX@e~uml*-2tgZ!cQnlk6&$Kb#vr#Q6GlZ2seigpsne!7%YDN~dVFRT%O! zqBk3k18?I!s}k@XEYRqgaZi=f2N6=~9vTFy5^e+}J|1-h2v@O6uVCpZI$_h8fg-0D z5lX-+%>qO`O$$6V=WMK~IYSpjbw0UH@xZGQb5g#-t-p<$&1;;)0aWX~se5;)>-7 z62wH6SXi_S{r;{2^Rh%;Er(5(;{$m5?GO}-w-^c8f=@4uQ ze+&j_ouTj#J%Ikw;%vCyNsyiU`>Vhy$y`Rs(4_J98zj0Mes&?n5SQP>=Qngtz&8%L zv(D%G+q!s3pqm+CvyIHYqyKEhaXl@kqcUp25dEY`8lEG@bE@Y14RSwro&s7EF+ix} zBHP9BnBwxoL&IlJ-25`G=PVCreeesOMF*j9H7qKs0bLY-@|2?JwLxrT_1WA=M4FAb zqS>==gkXdh3Guy+vQdkZp0aUX*1qYME^FACO+ZG+%U>_-vX`g-3s=tvssJfjKq>kH z^OqX5!Cx!XROx)Rz2KLGK%S&L=+5`7BujWlK8xcOWpT!c-(-iF?@vj&xe>%(fN^8o zqW~e_1eqnsTmm-F^j9u~tkZ|hE=aa9Z*s(#zzs?3vV&G#Z2tCM#6b|aKG&=*2`@^1 z!3b9OU~m-n^s*A%iReDJGy2!8vm^Rn^sVeps~=yps5OAU?NM~?ZY!A%!P9a|o^5~S zqeW)JuW%}}CUgRkK6uP6|4OD**4Xzl53Zn5aOxCVxHA)VxSYz?f;XApjlLo7`0^Vg zkRsaD*=-tW2wt2@ypwZ{$qjSao1W&2pwKY~-=s0p8IT8St3UZ2m7vx03ds?ZYB+NR z0NkyBOnpE4VKw2@bG`S;$!YupA#7K3mJVl0~RJ z%ec+;^xuKoKV57XK$Qp^tlzSCt-THOSi7D65?8;pI*_i&9l|9Nqs*YlDys_;Gp~TH z?SVUU8+L@JbqXA@&t8=-Kffx-sdAJ4p*-|KaIZ5;g^XCz3Q^C%QpeBw!rMD+KzZfo zQnt(_;nKz!%a_71v;?YQWmT4)o?8MC+WNAiELGISJQ8@%0qOzpA)AIm8g3q8fe612 zJD^*YdffahIY4*}b+Pu(Cg2vt(FuSqo&b?qCSakN^%Y36Y$pj1|8k4XqmwRx`})P7E}Mc2WcshXbU=9FPu_P z@Y?OC@b4Cb@J~l*zcD^*0{^=v(UQjVV$SKmo*NjCA%Fgg;k3_Etms|PX&k_22yVdN zdP%qS;_#0zu4au2R>F>fHxL7X1hBfb`-8PE<{yr5d3=pJgOCR-!3@oGJEz|u5yZf{ z1*<60&j;;RF#pye(Vu?oKX#}uK;R)Qa3^q}KaH+2I5XW(XXaUcU2Kyi{0VgtP6F~w zKQ7zZX#aeaSwHvpA&CClA$V^X`Wajj=;58JhtT;$P<4Cn>{3bH^hQ3q6n>K;pWCM_ z7Mel2JU8^rWv_TOQg__Q%f)4P@Bd-%J;S2fvaR6)L{tz20YRdIl2vl100l%Oi{va> za*jn36v>E4Msh}S4w7>Yk|gI`WT+~?#W|<@Xn%dW`@Z+S@2~rVr`S~OVyCs%oMX&6 z1_vkMRgL`(ZJd+vRckkq?L2|m5QIZ69_C3BXtVp{n8uu_OGf5ZZ-t60eTYNaqBwG2 zm3*_UbeKI$@Yn;);F@dlFdTOCYk`wu*ZvMg&Ga|;$_M0hWiOEHb*as-$(TRfRQ4Xz zzuXodpGsM$#Wa$``wehEMCgwM%s?Xc9`s*Ipu9)=SL^qd@Q~IZ=Mo|6a~eYl-a=J{ zATe`355N{Wsm>OtP~HrRCo@SYdu&^T^6D{EjAJ88zdOJ>Z)fe9opY_L!-_57sdjy_ zftz~lAFlBSHi~NI+A{`Y68cb?^ziIA~Lbefh#rXbGmU5;e+I7K`$gjr}}j z;#fcNvQR_cGWNC*dz_`5bsYGRY4)0f_IO#L`if)Z?fce&n{w=daYjU@6^u6fb(#gA zjep63Q-E#{)d*~>4>$(`=QNQV+3ZP8^>L1OMl0@XpQT%s#@o7^ArKlGIv#4h=(*-m zozVvt+sxfS=h$nKS%HYv({sfIQNb)jnDB|R2+LZGsL@S$_XS^2+VsBLP`1Bl_w3y# z*{YCBS-oR4!FebBSdJQr%>^gBvS-`%XBeXnA?Z}_h?%TCdiCIqt=Xb(>z`l6ZP60X zR8UjX%O87~ZhwBm(tj6{odhA&f$%`k`mbe5eUuD%w%!&weUh`K=2Q&d^A($}hT4)Z zSh~54evkDy)ABA(!i?gXjnn)+e2KA(r%d(d z)l`np52h31MiYP9^9vMQ=&^GPGNbg%JBJq*6YlHbHs1v=O{lZ~e2FAL7&?YHhb=Nd zTS0s7Pn2>dn1TMJhF^>W?Ue26Xugj*IqRVucow2%>}3q>U$H8MK5XC-nAU-E310}U z^J!|Z$nmEH8R2y^#a~WK_3Ze4p1qWW7f}<}lm&J$31zoCL+bZnzH4h{wn?$g%5YZAeIN-@U&mekx8WZz%dzWcpZ2$2QP56f$4A zgX{FYPogmRyI5$EMTcCfP4ltth9<2Lqu-890*Y3TS`~M!FDe)GA?uYjg0j_D ztd;M^_%-E)^}k*+s1BF>I_b&cbgM;n{+zqgxY@wau7P(J(v(R)=P8tG6gaD}>mR(l zCx&b(EH@q`5A6wU!hAs!sKd%1qQx3WPRvWJd*UyT>Kyy-UR`Fsz_W3K!phw=wW{xE zIm7Zn&nKrb(&>Ty1fS8W&kIrDKD=FBzEh5YR~>gMu3#N3GK<#wNJhqrt;XJ}ET)E_ z%<3H(xqV}MYt`zof{D61i45^5Qc8^CeSbqn?cs71Ho%qmLp_OJm10bhB{XQ_i)TMlXLQueOh6g;0&B^ z?OiVte;S})fjklBcG~qpKWzLEYOln|E^-@*#~ZRyiB1PZrw&Fr@)|7}&16jD;Y5;X zVI$&7)*pGO+B5~NI|+hUM8jP{-Lk@?3-B^u5r_a$4c2dgR7vdG`bNwM0yk!)b6V2g zrhsxQK|LAsdfb~y5<%RnwBe7g@+4ik5FfvTR7OqxfE>u@C%9WQvql$JK znkoEsrkFuD4BQYWONHgRG##KxM;`luh+opUqpcI`61m;ossPA*4S$|Vq3J^dO9N&{ z?27h5X7$}(Z1$&vQPv-^a$f1=i!*8yPpi5Y(cdeL>wB=|XQqFEoewF{~J`VaQxN+IAbclHJ{z zkMQIL_nKN6=xp++&5x=K8^mx!TqCLNbJrCWhNH#ULwC9VW;hf1c@;LPj&hIX`T@b+ zsZF$AQ_i)MHk0oA5bLN98+ zM=A-j-26nbWUT#0=OYh~F6V*JlIoN|b~hDEYHk;K!RaMFJL;RGy5RK2?I&gHGt(? z{^s%!4tt}_h>|PX)Z0)~J`zY4oGkiW9B5DLlK2ehQT_drJcj!2Y|E^KsVtBh+Q5{u zCvw&NLdv`2!6YwA0-1j?oP+fYv;5RCjV z>~$_@_|nycYRxg8GZuV}BI!GM6q_baw;Jkq-<;0W?YnIyek~^BhLw!>uA3(|9h8od zDVt1pFe{PRJ*c*>7RbExhJ=KKbRo)#_{gk}A9SC*Ze3;x+&<)(L31)w+}*3-F1{Mr z%627eCh`)2qgMx+L_gX$rB7#u-@A$;d0b)EOBGq`G0^7-P{_BDria z9TFW&((18GnOyGnbG&KPswUW*v*gTFZHR!U985*^E zw&i||#w|sik4%1i0ZiMkiV6%W7oD;hMySP&{gq>< z(wo2VoigrTf06YrE`w5)U(+pAgtmln-NjC=w0iOxTM>iekiY>AP&IAmne{YVsg;$9 zlB{qw5x>pF=&}&(`wGLG7mM8cx;urI`JQ?c3%`W>HDbfN+}A2;6K>2tq(x3eAR91U z1o%M-YdizgK|2va1;1Njk#FF}w0nFMG*=U9z=GPA!0eMr@+q+$zTpsaxZb&2=UYcn z82nA&iqwX`a4Fb|jUOj)3*wNcHrHcauio5gHmCMb_Jlv`3Ot3XrAnacD4r!(?6G7q zW(UTSy?+1Aei!9Ca}WJ*o#Cbs4Hw|6952%FLfP1YGqDsmG-2=U-`P>{sb_kB3^By; zmgrv3_>A@WF#mC*HvZ1xmN=8D0(FsPhnSVe3quNx;FQ5CE2!pKq_&Zz(T=UQdn8bZ zFzD(osJw6qbz1iN8vydJUR8v^d=mPHYg6qnTXuCkXA#ghAKdN$>g!wmEbNTzWsCmF zW0XkYg0u{EASY~>;N#p?0zqD&|r>wgct7oTgL6~GB?TdFuW4N+agJ7L*17c$k zMfRsAYk}bC0=CXc+YE(JaxM1H23h}yYTEFSw+Z~3a-46nE;9%1j9Dv&-a>{-Xsu*) z9hcPIJ=xMx9?E-bsPppWIz)hwGS}rMLQv><8h(;&^6f7AMHbmXy9>d{;yffJmN(O5 ze8`h=T0gY2ssA$LiIl!RR~p;bYf+9oprQ@3E;)-UOSCO?eYfc37x zT=o)&J{wumhQ3`qZ|L0Z$JX5J?G@CNM3PvRS2rN2z|WyRUQ0*TzC2?=pjJPoGk<(vvL1&_^Rv8wv&R!pp?QG z>gTk`XA$4dU8oxNy{+wfWX$jCa!3H#amP!JSfKfcw}3BFaxSCuXai*wDW1C<$!k!q zy)RKDuUbV}iCc~LDGtAhVIA)ft8nMA{kGD2>$7JY`s0Bg2F(Co$ME4lZ2nSBpa+9X za}4P!4d69SlhI$SIHl1#FNRjAM&coS=IoOFRKOhS#xK&W_Jty1z%dEw%CY4aA5VL+A*72Y?bc zarz2Jt%-2brOcI2!rYYceMw(=k&FtWfiW4N@uG2tOGL4AA3+yGhJJtKNuWS+Sg#XZ zP^vecg&CiF_?I>r$yR=^wL_{%B?(E|=TCcl5*GChp}uG{wGtc^ujC}j4|q{TZ8A{4 zo^^%(k-};zF>MxMQLis4No`s}=?V4#{1Fyg%poUt5S{Yi$=z@k2_mCKZBl)(SkPGs z=0PPVdutBSPc?5@T=NNADo=-OwGDdm6udot@6`XLSIF(viuX;5H?Cq%u;5hHOPC_9 zWTcZrS3{}qrmh%vxq_j(-L&RQZ7yKO?m zyo*Yd8b)Xuo@|POOy4#Vs=xy6*kj4P8GB4IiHM&p?zQc;9TQltCC{&AWRHl4nxv1) z%sAhaKKx5%r+;eJ<((p9o@VaAa*@NJX}G5z*YUoPdT&@-F8(s|K3R;o_*5naJ#q$8 zOw>gcYDF@pD+X}}Cg|1Evd_zNOEp87JD`a+?u>f^-eCRm^5v~_U8Vdb1T+vtL9(05ux{vuVv#JZ*| z!aBy?{KWJ%xHO@wro-&E?Z_}CHh%v|$e*0s2t$_W`gt^cDU(o3-=_M*3w4e2i^?nF zE|gOnD61GlFfI{Au|H_fK)weaJgYLjblz9#`q7T1z}j)(2ha}fY% z4!M}Ew!rLhkMbdyD_+;{;vugapIk(!cmNRvk-hM-kZ;imGB`k-X z{HA9L?d+=qA}QfLqKtP*?31hps-(Ct3YZQzMJ~BgTG?PuR;%oMc6voBNfhb#v|ZPm z?UBpvoMdBS4+`HV(^&jZg=Eo{)HqA;1VIw5mmab}!LL;rKKfmHhghOI_j^!=9)_0HjaHXSGt05}L6LAtvd*tblULSi;7Imr3ba6JVjG zU5JQ|LXs4x0U@Pw8;s|_%b$@$o3nBcYwz6+8kD4q#;j|@vfa62CDICEz6zzMOR&}y z_eznNJEzaP8Ah-m0GDO{M!Uc)+3M7bMHD$#;9q6*W^q6Hr893+=i^70Ai@yFkbHJf z@zi4XyT6`toQZ_Oz_GK#rsD#CitUilr^5n?bZok9K5*T|o zj^kEqKi>1Runl$X-IxiOgY1R6O=H}Aa^M1OSiP7ItkKK_IS#!CNT3UlaCGRr$V2L) z#pV3zF;n#Is&a%2*wg^TjtFPuY{AKa<=HxL3(S02qiJS~(MkkgZFu#7_Y%w{%gMF* zk;x;HDb{-(r@}O$ST=%;xEx5=)$;~mvQ{+H%*;f zZ2~iSGfM4`B(JCA8*nQQ=mwKh=jS^&|E;S<+8kOfVgD%xy(Ej5ag*l%Q9DJ~AybsaLV(LmPa6e!5HwjE) z6>t~9xxAfZj#Y^n!qb3LIL6r(Vf3$LvdTDf42D zNWSS2Ur%Xm&AqMEE3}pj->$=TgXbC;Q)tR*L!Das$ke(VjdN}nhtc!MsVtb;w%BpJ z=oIMq%xSOR!^xksT>8}TZSozttH9m)Dez2)95;dPt=&S}fbupaC>5*?$ohjxv|p`w z(@oRN99>)q{x)g$0SNqDDi3@oUU$tIN&kzh*=%gcQ1n_WmsrRY5$cfp4ALajimcLRf&7LG)g-0Ry<=VD!50ODYOy*ER zRN3Ui{yP?$=s;@it~X%Iukn2kA+9w&D*A|hf9eA@RiqDY5=gebl*sxM-@ZPm`2_wk z!~vrSf1g2d0BzOXbSz{$t!Q8+{QfHg_-^~Nu;?3<5`s*9ZfN;4-#2bZFvF|Lqa&Ru zyqYZ}K((za2|3q&C&psmQ=Q5J$}=Y9#$#TN)W{HhP|V-P@f#ET?@IK)f?_Oh!wVzm z)t0>VpSN<-_S%M{5zHxbc&C?Jen7x#*gsLmMD>fVUGYg`7XJ7}c){N6ao4s$_K&3P zCGN!c6Mqo8ul@F@2Rv(P0nWpZMF+Ps^a2&oDpFhc2`fNq7Dc#KQvLL|9Ia3aq-+e< zvKS!lL>Da4OPiDi`I$Kh*g+1%TAbnAMWUxL58_o}oR$Z_Z^{QafFzEaQ19ZeS9HQ( zbY{_ng|hduBh9{uvr}4k`s~E?>>jyAVW3>_ke6N-MELj0aD@d=yC99xRg&-dYv=&) zQAT=QG;!;(^3e>DDEYp}{dYqWR4)CktXnk|ievjxVc-szg_`JStHISdZ za>z_pTz-gA&T}Sr*{~{`W9^a1=~&V+(af(J!S|~whSCwjfEd_b}oa-*&;Rv|hg z`h9gA5Z;5>L9DPa0*-A2Qa11=UueybJHnQbj%+y!6D;j%kr{g1QtSkhbhHs2vtAsu(mesxUl1GpW)4~ zduZ3LmyRSXulM0epGp>Vc+@*uD`bp-MN9k9>%Zz5l@PdUOD2BNhfKjkgO%p&_|A>< zzSn(mwfj&e*k;De=^;hKp_=zuub?RLn^vV^pP_xyKAOzqTvkFNmD2;>Jr(1zkeF|9 z_nWE_k1K9cV+B4E{Mst+YPW%CBU+C!%)l+U5JMHhAG*{vm*-r99dyDR{HTHQdrFza>+@jtnE9r$NKzYM$-Hk>5v{w@!BS5% zN}ri$ky0CE5ah#z;|&EkMRr<&Jb%KVSR_4=tcu|19*99-D3`B_ z6?;Sx^L#)ILYl%6o$Q6%bKitrvdWVp=9%>Hn^m+1g&}VFs`r@fmKtwn&wybn(k!u} z;#NBMrMRrMeOGe*kiLoB_44c0^&ZiJBxlyABTvbTo}jfV1@q`PQHqdIZ1A#$5tURL z9~1M4S_jR*yKUjKE{8|y(%cN;9vhU=Yd3fo*7}Eci@#`8L-gq&c&CfFHkWx;&~u)S z8-Rsd<#^}n0lM$q?}_Ye^vrF?&J7qS=Re%HnjV((cnM!unKrDS7p{hdYY$)Po2O4veCE5BRW+ewcUDrCr9~2`AWO8=*awg z)NkXwvBR%W+gx`|`%AGy<@?Qg^t~1zcqi4uX0B{cVs01h+ z6wVoGP7}dYvZk~;DS~gJh+s$o*y2M}Z^(%ffaCx{5n6xFx4oLOLB#K04c?9bmn zU&;^L+Sz?F*lk1sCdzye?ThIfhbJsr>vO%*AG{V zk>`9-C9MB*pPy5e68X+*%zwW-2_@a&mia+f#?vrW^EB#gdTL+2E7VqE&KZsO`95ac zm2qn!?iP+%A)O)V#dTgB%XTV8iEzUXH@H#nP2mOJiE!d5R4sU(#3dvcH;E+*eo3c( zPNG3Qmjm|Qs3Vpr#2zRTFF29^ISnIn2;f!LDY{c$-2{gjuLD40Ad{CAfsH$eN(oL=J3<4~HQU()n$H@(~X`2_^h zfZ^pe5fA<^QFNY@MO!n!&=6<@edsU~9Yt1{SS68^$F~m+oPmQ_$-Mq3-ofC5k@n3i z;xgA~l!hdh-^G?#I0E`{(@n5`b6|6yYx^|1xSZ#_`7CA#7?-evZ-N~GjKfqpT|Rms z9_RTS18=Fhw&tzaEBvh&s3|Ex{^T$;G7_HNgoO z&Ua4#2sv9?%uj&O)8l;*sX&p0ks%knfuEjd2_OfAxy|X-oD{`}gs)#S5gnWd6u^{% zJ_nrPI^;ekM~njltMFCHKNy1FnSmCP-%K_C7Kr{5d0q$cs5J;*zwNHx=fyfVL*%%2 zl2y`dC+6uoS*F$)eL$7=Z_%FXWGc)V>i; zd?7+<62T2-WQuwum<Cy#S6VA@OZTjog|w0B_LJ0-a?ZG?u#h;F9J5}IX|o0 zz``Jwk>_(sCN}Z&>t}A{BamYpo@QiC1L&bX5g#YS8!p7 zYq;>iA+@rfQ)2TmC4I$y+MM#88r1B9O~FkhuJu5VWT^z?4FY@~CfeV*xp zc7@ryN#<$DDwYAp7b>J}xMaS6gAl;veIt9Cr$oqfJe>_86g4)NuXJqmV(~Ibd`%#L z5ls>GeoOuu=EvS^1v%sqIG~Vm!kXNCtvjTva!2m14+L+&Lr_v6qGP!>cZDOEfC~6K z+WsyX-wexy~K~u{PyaG){V4u0Wi+Atj8Z#=suHE{uCQaEM#(_WLUyCbrL6 z#MjXm_Qg#Fhbz-Dilt%LhkI!>joqjY#*-Cw zJ>0}vkblyhLvlSNG6ZsSI3Yxvtl8~vH6_#JCB zX>g;p&v(k{Y|<(ARw>p~^~^Rz(ZpgYPifK4GWt?8Dk=0X;!sWY86fpa- z49ndl4fgiv$ai1Qb3#3yzq-NG0ZC()Z~V zRn1pMLVf%NFRjUE?pJmH`xX)x4=&MCOj~1*gj`LGUHFZn&TY27ICcVdUx9tC3@zS# zFdeEC)hCdwG`0x@q;v(&GKF4#W|Za%%()e^pnCK17w(vThW5_pS?gzlu_i}W4U5D9 zw2yk4#5K#HTjw!14i?<1`+EW@s9*ldT1Xes7$~m2RFcYR7N1M@$zyt?$3b5GD z3)cHhE38kgia2T{57dspu7#$V|d53s?eGlRqGt?qGVx0A(*DCm|N)HzvEx z?6EbTX8IX>qf25`ACQtZaG;G2{A3I!uX?s!Z9dm!SXuF!aTl`mDIoyM26hHe;5!4H zOw+OtIm^RHdwbx9qCAYxFdG*sMP7Te1|Oj1i75|rUg5~RJ@0UpT%WlO0hblVqTS`q z7NXc=DG#{goOk>>_+0NiSX|A=vB12yLa`biz2%qQ;GU2R}$qS3+~X zXhZ`~A89TUqvg?tjig0;|CC4w8v|>Fm5>3ApGE?S{p{>zQpjWhAuve_?f~`hFB8vj zl(y{W69C*9HV)lb9|EvufTtj6M@j-f(f`N~P9ia|WZps50w`y|TuW(J5hV)R*2i*y zTv_|Zh@0v~%W3`DVJs+JD|J@>Dtn3z09Gv!{HM?);h9~OxBa`Hzf=Oevxm`mON$9E z;3o#J={|kqKQkVvtA1WO7>}VKTg&I z-&^_!we_%#)ZS6n7Uzn;ZJ&1AGUm1_qE1tkr8M(GW$mIK4}-lK)Sc3dvT1J4ENz`S zn)fbak711sRwiD*@T>CmJL8uv-rVPQ1EnP93e#M8vTAEJlF6Wg=CS_0bR=MyW5F9b z(`-Q0FM4;T>EVe)K}z+xIEbelyOzv*mzc(!4-Y_mbBLEce3aNS7B#<>K4Cfa&eoRc zwIheTn6m+ZHU196gG}t7|E>c-W)XJa$bO(Vdic}E$l%Gs`g0Vq{H{O3NAI_O$W?Q7 zqL5$VHdTb|@Y_xA0QxM_`2;X#!n|SUMxb%{W0AGmcfpHo%ip$N7VZ637STe)A-65s z0}l*_34NGKchU$ssy`d{G;SdwE7191@eFp&swmCGcne;(lUjx5#F#Zm!lttt?dTd3wuV7}Bnj`kDj z+`UB}l9d`j!;~GID@+w6MHqyRV}o5Mvg-6qoji3fY(fkv1C4}e?v6@EttOsx+P#s~ z1#=VQiU4OQd?kNmsJa_@Fz$B>pq=K?K;t=?G+xOPdTjdxivo^w>(TKCg0m8 z7^9nuT-6hI!A%sA^D|%TXhmC7)Z*<^JeH8DWhA;60C$0@K{Dp;m@;~LZ9x9{nCRQD zv65o|Y@tzq{hz$`Q>X;J_Uwz9vc)1}cDE!Q1nq9PB;c`%Y7{}^R|4Fc%q@rsp5aFZ z=d%(Qrr8yWoQ~2l3XJt0uy=nOTN-WIs>K%DqNrm){TNP%>i(V!yGM?@xbjyf6sGyn z8B#eK_evhK&|p$n_bFIiZ6J>bhTN~6Y@#G7->t2WxW)YK!J!VRX?te z$|4Hswac|5lRZBmYDK!YxE<66?9|=Sr_EP1ivLL0{?P92zr6Mn4885Lq|t- zc{QH{NN-co|AQ$A2TJH5eICp9ric#`s(^5siS`Crit6KA|kH(XramyxRENk8D8qrjx&e>}NpO){#OK{ld zsR1#f2^W!nR~ylepye3?HE;!f04<=&pX6+I15fEYY<&VfJhk_^{~CZs=1l??Pg6KP$e}L>4$2E-R};o&(I;W{kf!z!UQuJ0 z?qR{GC{ZvBz_cDu;h3Pu4(=gMA9#-0Lk2$e_@2pSF8u=jVkASlgW^6zTjp1)g|inQ zu0c$>_F}{%<-H@kr_qkm=<#TLw&c5`?SukhN_XD}w{pKt1+Y&-How=@Ir%X07ODU_ zV?B)PVQE@APdv_MBSC?I)_?93lc66HirWc2z(;PzbQDYB6<08Ka+8~rWB!1oKdxVS z9wW>5)TS8$Fq%b9-UTno*GmD2DNL%SkkeO!YaRPfeIwwX1;A24Sz|qs_L@S{oDTQa zgQ-?oQUD@of{`^Rd2bdcqeuVixlcjY*IDb3-5$19`Qm$%l&W+X0?tl-Lz8Rrrnds$ zTyZ@27v$~bo%Kk3rc4n`)p{(xDk??Xib}%$%zy2eaTiAN^cCw0vVJika)(g%GYAv{ zyhtc7m}xPOwv7YACCzPTw#>sj@AlP4qY1MWwh_ zE=Jx{cTl~jw{1nMfUScrLV+d5Buv3FGR4IjZD9XI!yw)}v!kJdheFyG-%aXEsB=2g z8y{Nd<;!rVdm>X;dFZ@=)!8Ln>pp~hHD7v$K`Pf11xd0>1va-f)<6T9YeC2Wu~$^4yVF%izxYyNWgp#De3O0LkjULwMLc9+NSNz;sN6%PZS{h zR@gRD^twJO+L+MAn-7uhP}KY2T6C}o%w{0G4ZuCj03nZcKUNYDwiIpKJknQsQ&at( zOEf=anX#wj^U5WI;&z(MR+2F@RUu|D?4@UrE?}BWp76Zi&-n=8=J_WmPuXbvfKF(G0wNCwjno)tXM=uq#o9Rm13K4Dvq= zqW)cw^~d`dbS;VStJUD-RO=7al_TB2BOY1;tcL)M%f@%8{V#&ORg8(%e~>TzXV?o< z=!w_-dBHVjO3@VOIzzXHw(ZxgkATVT#s+rt_Zy{q+EFXT1(DU1s`AH2E4(jJTlPA1 z;zaZ7IZf)w-kEo7wI%ExR3~+SZG|Od)+tAYFh4p!-;XQ4U|Sn^`+zqA!?H8Z*78>0 zrx(bcZ>>bITJ;kB6>?m&O?t@zO}IJBkT@YR(Xm_5tK5P>junuk$-qAvp5 zJ>k4-TCTI#9Z<(A-n>SUBQ@PQ?VGqXHav6`W~25Gi%zc}#V06})l66O1sXY0SckZD z55c~;9iDTKyM1>!)q08Y=a~dGA``W*iP;Fh*fUgFNY8vP8+(c1X8&MGyA3>ti56r2 zGH&}hp8F|m`)|f@bzrIu^ZO%aE4Gdq+8aXz{uUiR8+9g%m>Rey^&A&A4)M@SmFAO# z@)aBDNp!m`dPKUAxo500pnb4w!zHBTU0IXsFr$4;m(HHpM%2%N!JA1}rcf>WT+j2- zo;agE+LP;Rqmg+>^oBfr78&rQD_jEsp_y%@2otXUOHOGmP;)Y4v`;C7$PztI(g3cZ zqk*5wWuyN1Gi!hcg9weM?20gci1vO9N!EBd^%Q%TPB5MrqD7+26CLURov}n>nwUo& zcLZ0D9q?vccoO0LfEZ*QOq$$}CY89>Mu$R&wR&=Ju&}8U^sao=N)&Y+h38vMS#LbQ zRIbI7Wy6NWi?zEZjNg1&V)WB2fzx5uUF{WV*3jnt4i3nvLm}060X*{?-u_`mX9f*& z;Hd;5TJ-$q9}t35*evwy_L%lBw5p83B$E^CPBjD}Y)?2;CQodxXl(V+DNswz1Vl3X)9V zp;WzC9{<`N;ve2ya5I_{i&TRHyKB|4Bx)NK*R!+3jWJ&d<8N%?zzNLCsldE|cBJIU ztU83jL*=`uXm9`(h2Uiv+Lo3Bm$z&*r55z6(H0GWm)P@n+unj+ms_0?NK>3R+$(E| z`h7Hels+ys;Si}9J`wd#sDs4vG2UFeaKSHzd9=3)zeZuUT*t)gN;r2mq zUqiA1yi?;LTEvM*AYs~~9G^r0)9^^_Eh#80$I!GlTeL*;jA1Yg4 z`dX5OuwRCE<5XejhPVhl-Dez_`w5M$v;KD+h+qga?@ zBt)k{z(w)REi}SK#rq4_kyD9-@l-T>x$#B{Dx#4BZVd}`nGI_@XM+|kM4aa0Onwpy z;+LJ;o+I(nMD)-fk?*Etw(U|nFBj{{vfd)4q0%mpZ7pH$2x_&BuWg-~J?_Z0bkHoJ zDT^?pXZDyqz(6Xgl!)U>_Fb6*_gX+j>57GbHyRz;EuIBv6=kXGv&t#N;vCt*8>F#_ zKHU~hO%CfBEa3xERCNA<>=Z8oS-7T@8Uf=tPoFA;jj><28{*^uSIpk$u-%8qkG1d` ztQM#X^yJn-fk*0*xa~!T5rNTKzqeX{I1F25(ZmJu9HktHH%S1Kh_7+{mwpbB(4ezx z(3=myEM58eqUlxPOau*ahoD~10EV6@b+~jP3pygE7#EG zoL4&w;ZuJKwrsRk>oy{rifr1{!HF}%i+mhXKzFjl#y&w!WApAE>Eh*YXl(kh@k2QY+^fs;?$(MSAT2TO{c=1l?)iIRG(7kKD|N{DuVr$5N)NN zXw!njwOUY#tQ+odUjwg}jFd1w&9**n7Q#A7tqe1ne~;buq*_(^Rje>6W;xq7{3uKB zRogC3nG$#o3ngw(+!au4rRGm-bQLU|VVj z+MC!9%aHY*+E9yW6)X~| zi*lm*j1}r6(2q%>+`{J&SmazRPI-@6fB$ATk7ffVGw#z>{zYZ%UH@j{+Ya%}?0w|2 zD<7MKZ~8rfcz?r+6y$SUXJ5UgMLATE%GPChvs#~dkA}6Op@TZX8!R)vvKRO5DRieB zhLDDsG_h$=+ue!uq|R!mhF_t{EDMmw_5$~m;&p=1V}b%!F9dezH{0Q@?XS*c>c*E78{^r}`JV*6 z)fp;oe{4OxrNBc@BqyPm{@0PxC3EqYFvS`L5)=VFj1Qiz zPUxKd8g#!Cc0Lb9@v7DT;@xNmYs~A1uFU*^-0cE~-+x&d@t~FteJh$%L)Qe52vxlm&Wwh8Lqx$MNk zzCZz!W%tH~8kUL+ZVB25cynVAw(LdhoM!qaSFivBj09*`b zu19E6{d$mSk^h%Z$`l;j&(~o9(CX#F;IV)q-aiCjlrX9GZq-5J=@JyVM+N#LQ~uFh zCg&e^koe_vARzzy-*f-p`~JVr{C_y!dp8nqfj+EOSG!=H!KS{6NYhRn^f`0YDL!QJPgl^A>wXGOlDj}@ecX(0iwc|#1fniV(RnZfM4~wy_TDnFc#>!4#E5caEb8S8Yy8*{ zHsztkj+m;#cDH*Y4@w`FoWg5j-Ey6S`vOm+TP0hqwce=4JUr6wJ9B^_=NvB)P6OH_$x{ zuknTQC8{+J2+@2q8g<3XqSogowkppj(a+%AkQen=mM z8C_*EnK|)9=~6%+d|UP<#wxqQmHRZ&c(V*>j$N6Cflwln;s=j3tIme}KKR$IH{Oz_Sw8c9`RECNPw#_lUincUm$3t&007 zm7*}IQ;NA+x+h!+SIGe`LC2fXOmIQ4O z`z<|5JxSWdaF=jXsjd$Dt7;}H#g!d&2-M>w@@wk|jJbzfszM`H(QkxS1xdXQ+8BBJ z^qf|fiH@9H2yJrNKa{#_JC4J&jQA~H51qKq5luYYCS2hSQn3#11JY$H8Q)3UX_UWS zblX&&NdZpuWmWbs-iFJpDwXqTeBoQko7K0>c;XL%Dx68hA3oEN|MXQEFD1DnMmt-AEk~%4VIhI3Q90Hx7SgkfIMSKL9LYU*eVUuj9^to5s4?)_D-)2jh4>C zxtic__cz9%{(?}xxf(&Iz3~!ZMBe?U$BoB%v8r1%SX-1P0yz~6jFW4KZ-W1g6bHv9zwC&uLeSIVomh>^b zCw#m^PP~L@T+3oet?&s5)zeZ3IL4gtKe2ioc7!#x90rvq_#A6%54;&KELq zQ{XK<4|o}o9r}pxeALD1<4A|jf~i-8H}B^fDN~oH8N2$5z5eZ{0~(7cdUpIpT&{2_ zTo)q&%0#5;z+h=+sfbtTR}o%vJJMEmX-O_R;vnV$P|E#WbQ#p#NC{svp-)Yo#k8?a z_>e;LJw>5N*Xs}jn=T0-G?;nFL-sd>t<1=pT?&Vi(PT*X+qZgbCmhOlpI6d>HqK+g z^?UMkdLJwtSrcW|=1c42&+KsdZuZ5vW0hKdTW{1$DW(^y3Afc)F8i9+zUl~DZTnnG z%i-Or8I}p%vhGJ79jv6Omq+L`tZbK;IZ=J@*m}Y8=`K#3`;O&;X;G?pCu|(Hw4*w$ zQB)s;2Dy2BtAYxYhPMmusoj>m6H>){0Az)wQJfz;Uj(OMpS=Z~k6OM-ooU%J?e&au zy;S*b|IY5|anc?>Bb>tEIXslFH0@q0t>-vYl(EtcMRYSpjWDdwDK-KyRjy~s^<$n+ zpCBOL_JVDe?i?!D$OlPZ82-HdZ;HPC1`z+H0c6$~8Em&$W|%JAFw;ypJ_ATUGmooy|6N(cr7OPg+hWC|cbXDsqV+5cE}3@47QigP>q7 z0b|h4#CLR$!-79|2q)0Tn|n6UE7S(hS`)PDqFvxms%M5?1}T!XANQEW&8y3-GIXeQ zJyzFU&=N9Y`hPciWi*Y^tShw`9mB5UZWVuq93-(xrA%^6c|Mp|V*G?_FV~o#zEuVS z3hWw4>1dBRE&c>3mxLH#?dS`raoFl`zVhF(0!`(Xho=w?*yKax?Z{394iHN1JJTce{Y=CFQxqg#P%lv$}`D z>#do@D%s5Z6k1as6)Sw+@qfqv-PG$!anc?&l%0YQTPSw+n^8vAT^KsPCGFpQ_tdK0EyG z?#^V}I`QF5Jd(5ddg+iMF51RjpRbqfIGaxBS|{=gvYRXgCc};wUkBO+yK^$1^ZuAP*L%k^=H+ zCSP*TDjH)5Esw{SUmNaMQ4_mczjs4omQkoAEG|O$wzm=$7b90qSk-KZOLmcMz^lNQ zo?I;TZn1@@XfIaErt(4*<;OK+?XQj{WPDG&x7{d0h!&AP5q8sp!sQjLsOfa%#ZbCI z?@(AU&yL!>y#<493q-%vx1&SD-THgPrj3h7X|DeMBUzAW+<_3H1ssJ-9DfMe-;#!h zq?z3y--`(!z2hzG7656NGBn`9;^4qZ4Htyvz0NJfoXCLo=C`3&dAoz+7ef;+FSfRP z33%Quk_;bmP*TQ|8r?)mk51>y+L8JGt&1b4DG`qIF*HpvOw#go|D_zJXT0frHxKx^e-KYY+$y1WOoG!pMj? zk)Vm-&4sB|V)r3kgyKuykGjkTGUmx;+GFzK0-68HMRO699SdJ54KqYM|G28zt|TYP zYy}|?mt4hkjvRY;2z|@F+$H7UJt1KE37w3zcXPLir!kYdoc{~ktxv;;*O>}%44DX{Yveau1ml>5$a2KlCSK_q9`fhwf;7qK$(Am212A5&rfgZlP!zd433PBqr z^S&p4fd7`&>38u%P+mV?VKEtJF*!!MAjqzwg%{3ZD^f z0>QOU^Qq?Yavg-|YW-M!gN&3&>ClXXFU!b^2ve3{Ot{ZuUX_b(i)MRE>i@2j%y77z zc-yYxWhZ~Zr*uZC7Z>}!}yBg z>fN*}5cV5M#5@=n2_Mil|Ey}(fQGo~I;KyHq>by+RWh8nL9+ebJt2>6o-T%3@uACh zqD{$bInQfjtxsr33dOtHGrzmLbnio*O#?a)wx`FgZn=ddH4f#DjPqi7P=;L5r{Sss z;pXcNHcnWAeWRYt7tydXelo0GZ)M{lDv)S$?5B+&^M&;%XB1+OosFXgIAo7CZn${3 z{boHnEH~0TXxbKKc3Yr|qYiZDP3!iD>>X0neY&4HJR@6HA>Y|$c^5yNFAIW{fVcmD z?7d}JTw9keS_FbiAdn!1BtWnb+@&A{Pq5&c;2JEr6cR!pg*z1P4#8c56Wrb1-79yo zztiXJ-Mzo=@80fz_xz}*SoIXEmW?^b9OIRP_%m{Oxe#UVR2x1p0f+D8`PTL6maf_| zj<621QJ|;~e&d%9$3jg5nv01~WzsAxl2o(>QdQZ5C;0mCzkLHxaVdpbWnXWf0(~dV zt|9;Zft!UTO|`-b09OE1Gt((n!qi^V^`Hxx@tnZypriqY1LWHdid_dXQdNWu zB;ZB76hiN}CN`^{HXQx6E<;IT|E01l6&rbypg=`heTeN0I&I-v~K;57Zxo9DZ z3JI3^G)kqR^_}pLS9*8AShWaYoH&Gc?zurHp+nM$s6-3HC$F1-gHVzSJ~7HVzbhko z6#BaQb~I#1jpT7p-yF;Pxc#r^v@(q@G&X2eh2~J}X&a|Qmz%-`;=KcHN194YAHW{? z&Me#s0kE;UD89LTJh+bqfw?dwIUz4sE78`sORu6w#O1Cw#z$K;I)%Td5 zhOh<1wn07UMp|U+#x=WZP$NmnK+ynHgs)G}LRq^6-P9OiL)CL(6}UFI9Nq(O9S~8G z@vs&VY)QG(OaUhUXBO=|$>w2fgAA0HH+Wkbj5fkiwA=4^u?gWX?2tNmN_Ijtzdpf^ zE7F*&?D@{@k>eXG)l8YGUa7t>MPQO`=^Blwh%@e`X_1F8QFhtBpO!z_xQo6+N@KSz z5kNmV@*Xrf17%~ReL_Mp#OLGqOvr#OQF9Jr$V@3v8)fuSssk{%1ge>Bzz z(2h&&e?r+1Lf`i;I7@c6L-vmaBB4jVoNHi3pb#xpy-zx8V2* z@DG64D>j9mJ_P7cN2Dsr`L{jy{KkL15&qxEK^a0AJ49$6bRyG{@X{fVEX zA59s?DQw%TAXr^pOe%Sc29OTMBhC2B*0gpkTruiMooX|S5<-NPLmX|_`Lk>Rbqf=c zv|i)1fUhvQcXF<$&LA-jQ> zUs&bJGtG(tGGDv3P6b_JjaKSp?=mw#D*Xj(J_D66;{)_XAB@Gon)96cVgb9|nL0;_ zx$6Gi(+<9Yw`$*ab#3* z_@%iV`!1nL5xdkz;1LV%rIVtsVx?1MoXV%?G_i0HvX3;4%|q9x?E)BQIQ*Pnxx@Tg z;m@OaCs%kuM}4YvOsTSuWx^9jt6WVnt+I92NJye=WZFV8KGzlU>4X|Wl%YJ@ziD<(-0-WkhN2pY$up7e=GCL|l(O-cXQ&L=LtV`# z=-Zv-9A5I^akY>=qlTf4U|JL7m8YF*T2x&Hb?4H!3ayu#WS2;11#?sLI;rq(!3lVmGs~KM6_gIU ze$eywuzpOc?xIVw_=RBDyomprPWBT!oW4piQsQIczHKO;;Dp%Wk5&(eT-b}CbOW1C z+)O!cs(y5^t(q1~CN`l(?!4SIaDRF_#aOFO+Wj`>zEkTZ}oTdVc9XM!N~jhyJiZj?uSs0~;;B}vwp1z2Px z!ULAv4{3j72?ha#5L^$Yw-RcvlS|7F`iI@jY#FLNaKmSGN*Zjz@7s2#S~I@2EgBx; zx+=%~O{^8aI7Ib>8Wo7rl*j}&8f&aLAhT72!-6XYZ#9SaD_F{0@C6gxXRl0wYP)kk z(8CwkF8X3yl9IjOt<%7{aO?fK+t=*Ek`p1h0Np}Et#AhAS z;3GwiohPoogkMcpKX80;FbIxorVF-+yUhM5oSH2W+A+*p{T^_UCIg#*DENxpmW)GO zvbWg<2fIlV97#SKk<9y(P}#S?12;wR&HPDh1E){nnDJ;&f2>S!~;-OeWG z@R0;7pHC?2ri8yxQ|%`~|6pU|0(K~7yUb3XHz$QzW3x-8%}FSde08en5D`4liSc4K z`Z2T^tYT<%!d`?K76%T-6Jlf|CRIL+++omFaxJ{SU(z+4Kt29TGEk&V&Ve2rLyuHk z*SDEy*kyVGt9$t{QRUF+f-4p&4`G0j%?{7q6<`C=5Yl=6V}A3blc223#aa1bK-)KY zgBmE(Y$o*GY2Vm@D!$(bSA-0?p;*w*|R?ph~{JR6m z8)K6B4HD2vpZ1SHBXhP5rwcmB)52Plr@aD~Tn|~Wzd!edrK6JUz?U$9fTpABvZTjA6&kFJe&4^VnnJrKQp*>4Mf3|p>-e%I6)&pfw& zP8VUFkjzYSl*~fX%-c`@Pa-X3_gYWkx)aiHTMOlCTJ#rKFP+xjseB@55bYcmL!uT& z2!jP<49JvOIS_4QJU4Q%Tnhb)hK3f_sd-HK_dyCcU14chmOE2NX^^=8%J<1ImZbI_ zkYRK2VC2kxKQqaD5vPAPOGVOth&-xJx(6xj9TcE%6l7CQyCF z0JezS$(8lz8IhKI#F5`ZNPj>yY2pet^d2B|e?N4o*Ml#@OH8}gS3^Zxr;LRSl@nswv? zQt)Nag%|+mWPVul4znU}#NlPiSW^7_5@Qg}uG?MTsH&7hP;CzK~Quc)m1 z=(^0C9WYhqCygJ_+`R}sEf}Dj?sZEa0L%3Ett}}$p(A)Ei0i7Bbcy`B>Bl%#xH_%4 z9>>e0TJP9dT&YgYpBOR2g?_o3@5|HOn7MvbuW@ID?eP4~43Ryx(2~R8Y^Xs9HX>#n z&^CNLz}4#h_mhJrCDu#2k<%>#(`jxVY@2Q66@BbQ?@l6u=3#Z>Wxj@W1i3&tcp(07 z=R>Ba9iC6Vw-1T)Oh2LV)2Xt;&D3U?OY8w5*(b#ob00nRcOW?p(_B_U59p$* zq4Ghan#jG2*1ME+Pb_TOB?Gh#=T=uf@oxwk6j|gI5%4~hDCAE%@zm6Twh+W{jYz0@ zpY92??meeH<=|?8fk+6~oIoJlDLMy-iRZ3?p|Ty!Lkn8016v2S;d@pl)vQ^Po2I6bX{H4AoF9rCw93Xx9Do@lk#>QjUE7y3m>@p}jZd-vSQvmJ|^ zVg>2PO!+3*Us!p01%vs=C@-Z?o35_?5x+rnzd=OG?no8@QXIuDpPD5u$_7A+ui5}^ zSA75OVKi+q`ph&|h#}X5+r3p(eCzYxj2X{a(NwUI7x|D$#u{J|1gF#R*hZi1rg8c8t zfAf-9T}$lsyy}Df1xcgn9SEcYA=U8P~K(dCg|w`sXe4 zEqB|q7f>wvzsM$c{*^sgfS}uc;|K^oD8r5ph{zl1Lw|z+N=n+3BF>t(A({h6!O$hz z7|LDfW>l`k3EC~*wW_@R8#!9KWEs0^bLO;&U7*LB;Yp;R)$_A~iX}#Yf0(uvd+eDJ zOM@(KB9#$c3UzJ@25qv5E<=+U$nfZ_kX)t@82w>rqJiy69Zq3C{MR5GR|*fkoxQxn zhP`c4Hn_O_{r5Az{CZ>=N(5dF_U8hhHyH?P<-ANWiCBmSJ?z=vk>inNL5~0Ki2FLE zC(6)K($rIIbV-XmXTV>h&h7u$Z?i{nLr!lC_E z@H;H*7H6@`qlNjx?tH5ykX~z!PR4cMmv_TY`&C4Fk;lC#47;x4fHJc?^zEp`ifM~` z@D0HED`E^7&XSl~11H&f&xGu#>JP6~iLF0_VTaY@C&BStK_l_~!vM%_WtF3L^&{@s zfp0g>`SqyYWQ^@}LSK6Kt`q!t#`S{(muMtjM>b+Oq5Bvs9B!LL|9}~xxi5Yd_l?|k z1ft70mU-*f%0so07S_+ZZ(-3oEma%RM-5A|-D>~LeNDqWNO8I48%&R!7|c#}LLwzl zx-+TjJ`w9~B_zaXI36eb-iKpWWs-}S_NMy^tboGI-hw~ZGPSj%p=TwH_FA}Nnt3jl zr5RP6I!k2Wd0C81z%>yh`y>UkY5+wR>jCO!1Vs1jwTCLY+)(#|b*74! z(6juI;K#Pli-b?~z4&vAma54j&(4;U+O-eu1AR!!cUWS&XVqZ9ifAG~^fIuAsJV&| zDLY$pN?Y@GgTQ`br*lhnzg_?*Wxhb&AoY5j^++he;S1(wJ)|hBoD9pSp&9Sm+ zHAMiEM5;{e!1^|%Q1)<`>m30B=ppROqJyvjcCs#rQdI6ToO_eQ4X6;YnO<505g-7) z_s31w{eR_BM|KpV6Y;Jjc$(YDBKkwStZ)8$9z|A z_R*tAtfWON8rM&PPP_nU&&tr*${2Z7B22n#dr1c7u=?z_F{CX^3d+m;F6LF$7y{GjoAG4;#ols;SU!}2ABN6%(O);XfW1gMz@~wa<5n85Zr#kO65b?pVzYOLV&D%q%JvR@2X zRcJhu(3ewNQ&|k_kT=lH7Xin7ad(a1JutBI!O96xN~mHYTsluy5WTWZ=^Ba~*ad9bXvo417ZJs2Cy22Avp&Zr0Ev z#p?x}$tJQ-Gc%6%=zpbkd4WVNpum7pGgyj^4h$)cqPg{yM}ihlDL*#CM1^I1yOf&4 z$}H@H20_x@U5kqp$*s_1jB=nnqz*mRoJe%nuhe;gm{9Ovi~1prWE(3(WN8p_VgNq` z?TJu)Uxq&Vx#UJy7y4<&D?b#AZ4(vBG3tfU(r&Le)5uWsB)a&0LSM;pGktMo z4PygB5u^&Kg_ABqA2MP=r!Gz;rxs66-wp+VJmS@FUYl~epZbT%kf|2WQ64#WV61oo z6I5^VT@}EVQ+EGTAM_Hqv9F-#0LIo#0WEI=lD~N?(1EYNrn|-))yLpssHfi} zg^}n;sFQ>&`Wtafk6Dx7C(vdI5XDxm^l#w_KGFgo+ZFI`P*EiO!B7C27TOX4Kx6BB zazWEItD5znw^X^RLUJE(I{YCs;lJ`um}!sQZY6dpEHXId3*9mADpZ?v7H~R+cD>p` zG-Lr_4btx~UPKHfkz`a-Lk^&RrrV%`6xU8Xpw2BAo^Sf7K8%1cl(GnyANP*Qaet_2 z?SwtAj+#XOC$TvB>#&9KEctok9tpuPK`?)mAZ09m%R%4kWB96?{7YoTsP)+dOvd70 zosIi-wu9HhIJ|U6#doJa0q#*I1Lt}5RRUjI=ouA4qF`F|<@P>dlz6v*9NLOjfY}q5 zFes|^ArX0B$s23t$mq9rln@tr1-uVt;yv;z9|htDuzM1EDu))#IyYd-gIi^_*7!Ag z3NXoaREXwgc|fn9hQ#Km;a&jzHk+DxB&uswO_Dpsjzo;clwEQvpjj{BV1F7(m!EV@ z=}qO(hRyUZd^nSpZnjzTtt5FKTA;MtVqu6*_AOK07#Cr^&Q=tx1MgS;ie01AE43)J zh4=!_b?|}kEH=U*WH>96hdl7R2?=fazATDWV>NI7c;knU9jIgxq5QT3T*DRdsk7=Z zC+2|-FNn;?V2WcC#I(tkat?cufk?=+m&BA|ojt_IUCnl={9)?HLbE?Vo3FX7GEh_0 zAkSBEaVJi>C-(DoBPDO0AbIaxDpTr*>*_>#^deuslDgI}R~~-8n`aa*QXmBi&ItYu zdV6u6jZSKCj0(L_36XCt$_%Q8aBm2=Azk~}UWR09kL>RiD933$ruy)qn1qfByn4Fz z#*Aq$NM~0;s+ilwg3KG{@iVDr2bfJVbl6f+C25w3?`BWYs)d3qvuX?jdAht-AngnA z<8&ZM^78szz;Fhfu&_>pu5C2!Yt{PF7sFanIt^u-7SL1UJz{seMVmxDutKrZIg^ofBQ=${Ebkg+2%w3&nl;c1>xlTeHQA%qdWqk zfqbCSeovK6bnwP$o@Q_|Aq^+4ItunGc{8rpDxk)C4H&ybvva={AtBb_PV!7zS;!Q| zQQHghX7Jp2spWl%Z7-|(UOFwKN=YhEmU}pxmj?D}CHqDr*QD}mr+FF2C+(mVq~`8z zD2rzGwfXF4`(!bMxN3srD^Fv6mkxBaW2({W%GZOwZ>v7C{Bn%;D?u0bP0qrj^wS*P zo5%7pM0wQ>biv@%6zpi3CJm9}pU8eatAu0Y_k%iZcoE9IZV5T?7}YhYMIKw&$%Ky? zGfnJTE!+2UX~pS(dr3zi)uMdX!bfxWz{jjSme$ZFfz~&GDDpR`ya4YwH}wPIW4NE770ua> z4_|ZP!fv8G@6(Ks3$!!9vQi+w$EHf-IxS?|+y&o!gNZv;P}GGc`QS}@_QZ4|ZC}S4 zW}6L0YcUD$0MX#PMJwG!uaRdYD^B1f0puO+de+|c72a)*%Ns`&6-Jv~yx3GgMEX(b z02*Zg-<`!4=2h)g`ZGt9CcQO1sgdvsST+E?BSMM&?^DMg+$+(TnNr?+E`T}6C)>j0GVJp47 z{Y;Zx+skI7$v=D|Dytkh@**@7L;UqkId=?gRHEmpM2V@sfPC;?GfCua{jKF=eO_N} zc;kgpA%F_WxkoMim!QJ;FCt(2_1mOGls^%)Zw7^Xcy;f|1H_t<&PJi3vqB0B3zw=c zgf3dl^Sddcke4Z+(Hel21*YKp>`RM1h($#2iF;uaQ>EjmD9xm^$NQm9jCvj1y-|PrX;J8lhPsr4jk|bT*IJ$Z9s&TkMR^$a@7zBRg?6#wD5W76hO1O0X=ReL* z4d;kbdaPnEdVDaywNAToO?959NZ17YA$ZOW0Ot6AM^Hgq89rAcB;#sXuTHofjEOrs z5PObmNi#bb!-0{}-GEs^nu&$E^x?{z+CfVSvdGCk0ami8YIPzuVVBqA*YaOJLa^LOiKh%MT-SQnEs-s`3G+5Z$Bd< z8XI*~EdRj@ux{k90BMcEIqy1iS;hLJj%`0Llm0;|l$8kY1@i5$#V@Pk?Yqr8?K?=M z&}hIEy!|q#2gx!g1K6?x=89b@b=FtGST-9L0MVY&Qg_PBHlc8WG0#GzR+&f|htXUuV2vIBQO9xl1HO`EfzkPl6$v>y2N z4{xau1{pp~^^vg-Rt%d1oSuPr9SevY@xt>6>MR8MaO1 zIu_jOV>6W#@_3MB8@J|LAHHM@qJHku#c%=SE$suJTOqfMx$=k?V{G_|JuWY^ONAqy zM}AJ!R!nzeeo}lyGMgUy%Pnk2vNC_Srtm<|g7y6DE`*1r!K0u*TW*Z`%=A}i2Z#wk#Iu_%$!|$pm}xZ2b85de}gDP zfMh&Vsltf3f?W5S-F%s;U!wFiZNmhOCe_8a5KJ@F1XjBODxNdCRDWxe9ZoStWa z@A{{jr&m&Fj+JqJLI^cvb0(#+#q29*vAu^zlcbSTinLl0Pn$-HSK`&r)AqY({qtDw zB^vYdRWr;b7<7cDGR?;fRM`USb&7Ux62E2_YHr@RU(7V|Ua6mUxFYY!k5tHdXmZdn z*N7SLQime#qZNK|UJ7t73m?_DW+C#Lc@3$IVx*5Hg>ToY`ocyXx%e%;FfN)Lh~$ zM&DMQu$4OcsH9ADAlCdrUZN0K@R?R=b^?|TdkH5e^nmw+)M;*zc|Sa3C2vY23|rhu zM#=Ufy64yI2v@$2fm7&Br2&Kqx297;h!JanP~B`e)+oZFh@HD;8wu=3omx=*AA_x+ ziO?BnRwy=wo2JhORiWA8eoHx2r$~e6t0+g_KTr_%Ox@aa68f9I?Sk9>`JQuTiD#UW zcHA~LlCMP~}3>j^+ozp!g2U*f^wUL z6T|9{q_%4nnL=?DaTceV_k}8|in4PD#-BAqzFC&Xq1X$LGr_G4T4)^6hO__U<779_ z3GA)V@yF|g$Tz4?^s5{ctFGh4e-kVC`HEw<7IXWL3RT8|_XA@~aSxPZ;$d+U?KQCn z61cb`e8gNr&gbb?PBd$40Z|){RD>VHJ83{Jg2}F@H+7e{LdL}$PbYI~R_rNIZRBar z1W)@Id6PUX@@ZZQvrOCTXuQgbB9fOKD?Izw7Fr^R9kNWaEBVQPXXbg4+H14?^gMPY zwR`-*+ka_R_>0}yf5-gH1usBKkFZOs>k4}MdOWQ5bu}gvOFO#Z(Hk#l*?lFiT&ZRH zhg>^XT#Q?ur(OfJK`$^09Y&PjBiekt=+(7Ij1`>8{9n{P9o}tR zau~2rs_GRme#dJgwu_~Cs_86c24ZnM`aoO*NddACP}Mp4236`sOP(iV)tsS5iA>nYthd%T zYJUjNDu`S_)wAv;`&7Nr4=u`6v}*;l#68s>xUy^sm~_N~hK>tm++y5|*{pW7R@+mP zby}L23Lwd}mP40V!r5B$k`U;_KyHXS?>|S6|WQP!L=rvRMU& zxw>chX=WH7Pe6R+4V%ah1qErYZ-#jMl4zMnF?S7NQ37(fxEKXYb7ZRZO3CITcsGPd z-aP)Z9%EeDOBN&rbOt_1x)y5kBMw;lY0`jqk;JU6)+OSV#{;U^bHMoym%O% z17AxsFLe$tHPvG9ay3g3X;6K0|nhB#;g092vPnbNCh~eRH6Z>@!XjRihzQF_q6+Wpb8)D<&xOunHma1TbZp08XyK z_BRNxXbDO*U*hmwuiWz4S27MJ+nr+{=UtBB2G#9hvdv#gX|>$!MX8aCtA^msHmUk+!Pofq!ldpP~+fCm~T69y$f492v#` z%5Vt;MM;urfsZ1NlN{mjV16IeGkY~XoKvQ5>>$e&xJQTcgm*9ziLxj1LLtZw@vKt* zo96A4!|Leqan|77I!~P=H zNWvn&_R>04SIW@B2xVoTqoW?%CZ{L;kmAn5b9|VLxJ~hS+AEu!RAs}}MQq$GFTtG^ ztFd3)<436<31J9hv>ur~dJG^f{YJ7u(%Hi{qT#$DNKhNLd^?G{MCsJHwtJz z>(s_RB}7`+!{C~+!W8I?YcxK7@ce`t-o>4ErZ0^g9JhfSUw71FXi_dBvG$mNqYI}5 zIYi7iB=xlD;@zQcREBm!HI;Jc{^}E|Pdk1zdDk|mXvn^Sr959bd;MRXmz1&5L5OJJ7Y4V%gvXZa1q?guQ2%HsWn~PBlqe*@ z>KB&g(^m(V+9t`qF7L9^f` z5$_OD+4AnNlR#>&FiM@=3f4t`9`vn+ts}>_r9%X4!_fGwHoQ}=I-bKn7(+u2sYRIv z3`CU|UvHr+K$g4y63uVvgHp~LZ!B(Kw~jgp0w1|~gOCg8czd<0TyZ)2oaPKczQ>^aBH(Yqn;)-TGB}g6 z1t1z(DN_l*h2e@&#z9sTU7Dc;Az8~%(U9@be&0Ht)#M`5M4z?P=PVGGt6O@W99(t? zQu3OL=d{{>UWgqJsDE&re(FPoHVbgStOJ737)$prlpFnu-HW1Q0;@~Lyx+n?c5(@8cl%bq6)z1eW5L@3`|Y@W4{=!`R$4FK*-f5%_A9jur&>f={XiuCI-F$ZdRG~adDZK>Q-yW%6t0Z#1l(u`z!{kYw>sI+%4oNH7%YafZ; zas=j>m~6k3xcF|_Q&$;7N9om_`JtvFP246d68_P&O*kwFSFIGYa(7Cczk8w&ib96!1pPxhHy`x_PXWJVIYUw9;Ko9(LpLRH! zyF^&u0=>=V39i%@OEj43;V5TD^U1yl%Ec#OLhT(cgJ1DO!ZMTr zf9zk4OcikVAN#-v-*o5DzwnY>s!Xga^zndwU9lzhMxZ=?ubJ)h1Pxjn(S`Mm!%nQX zBL*5>K{doQ1~3DZ>+7-hIl7N>-JAWcxAc%#whpxln~1bM3&ADqbu)9DOPn)NP0Y$K@Z}J~7W_=((Ajt+;38gZ>YzLi|7d z+b)^FodYNkP{htWWp9ajiyh&8!h$J1%Z~&r(LM$Ea-Cm25!uhLGc+hKX0VHIQ651X zaa;?iri-#l(FMFG)c)PZgO8?bktkc0B`1#>1M47yg2LXnu>P z&y0;sSWZIaq>xK=g*C^Dc$SQA0fY#3A2omw`Kt3BY$*l{cV4e!>?FtdiO<`2A}yq6 z+nxm!D~mq*{WHOYfB(6S-XrQp7bL0#2T)MTBE^@gMmaRg&l_ArmvyQh6kXjU&!(c3 zy>Ri~!*7t4-FcSFmY;ABxA9ofM76x;2Y=<;M>{eR^0QZqd~p}GJJlT!$q3P(c?Y|M z;tXp&aioxaTq;jTwHRvOJn(5F3GzLwvOKWQu`SRGzNAb)P)vBaq8`cU1pH#e$;=C9#*fi-=v3;9)qeB{S2 z_^0X9YJ&&wC&K^gRX_>Am%CnA1C)K3=KNo_YW2V#(#fqo!jJB?*Ypk{cR2a_rGIfW zwSWX%hmnT@4?VSDf)>e<1u)(~PvL4(rD{U?*q06dBfmk??GE3G0Gqcr5BQyf3(#}t zbs3P<(87E?7*AyPgzY1Y2yi(6#5Fv7}SF+51!7u;Hi^=74l6w9_+jq~ss2Bo$ zL{)XuW+Ft0zG&pzJf@U+7nkjqg0WWTNNx_>SJyA#&7sjAL{Tg<8+c@HSH!4OjwHI& z{v4)q9>S%w)DDtd^Uq4q=U2`{&yBJdAySClvG`d;u@A{bNaTF0QVk9;9hyxK4s^LK z6(LI!VLuia9BTB`8!zm8M1ZQB_>N2YS^?!FMkaoIq>_N}>)`Ax(+3=n-|O=1siJVK zghKe`8Kti5_=M0hK`x!abJ4AzA@h27#1##H(IEVAdAt9kn);>}z%M*0_oTE?RgLGw z@8&f9`K!x}Na|Ox>*J)TOnMo)ZtXZrM`cy$Esdtr?Jdx|gucHu1xh~O1S1wzvAK35d-ma7X`q15{*{ z>kko?39pPQLqW~f;p`IoxVU!N!708Y!y)-&2(=5=TK44Tcs2iL(I(YN0@jI1EJD%G z!B#Dp$P~@k;j_p|U$cM8%I`5HG-zjy#?|Mr*P#dTc8p!IK1f?NiJ}&=*Pr+~k6RWh zl{Fid++@4XLJiLE>Ve`PjMvh;i)YNFys7q95dpm{^#Uz|kQOn8i`B05W?MJMiyn+9 z_{)qIWye*fk+9pyYbfJ_=yHq7{`#<~#a@0_sDkTRhLO1l*feikY&y4WcqeCbzHgB$ zbXABnj0k@>lWwT&MiKQfXJ$Jr;DgP!TkgOI8#iCy!DE53z(Xt?i51yUS>(u%b8-T{ zzLT(Ef8Y7K2DIfEse|`}E@2@l-Fm;mn?x$MSE|h$$a<2SDCVGx0GSJ)G$1M9-)UHC zm5f@QNwt;mItYz!l<*3*v~0SNZ)XQWR-ZZmjtV2tO&ndU@34ZgxHLiE)?ZJiswsEy zAcsVScI~y}JbnO}Y_4f16(OrdlK&DgMv)c4isv6Th7BT@iUY3f<&laasHT0uUrOF0 zgxjw`47O7ADad@B+Wiwoe{mdRS_~?tk(*b)|sUC?TO;M-6SP*6>Oyz>b^s@zf%5WC6I3D z$^-aTpBs?r%^m?F zY`E#qqc^=gOC?aQLNbg3P4Qz@kxA<&m;w0Wlp)(S4)&aRgQ4{(cn3J+;nt6L<2g`$dV0FS>f2_FpZEns&0dD8%lDE z)MBiE2Hnk${J5rRCzGFTRJ(JT#whT)X79YR+lb&NaVuc^We9&rj7*ml#cbm2_`+AN zQu3jO)iMgc54s3C)#>wci-EjCKBfhoUQqz6`Cm4B|1IVF^Si2W>0QuzNTo-z=THcw zcdm+#5(+mFp6atyi~c9z@1or@JkqmKaE(e*M&A_0_w+m89*Dtl-)Dce|7!L}5VBt6^hR2hAi3=%6HyB>N6$a(-34HnOeV`VdBmck{-MY}zZv{P;VPKXEH_PuGYo?V`_fo?32m2+7B}(#tL`9>8*jx&&zE=Iq_;O#Xj)ah$nn zk&KZZ7~}uQdQE6D63zj);|JUEW}A&no{*P`-z}FCos+a8z~k~E%(V4C!~;mWV!@2j zywkHs>hs7WpDYh_GJ^(*rFyC9``1>&w1@LnG$dm@vuSpCClnPIa_4~(?y$qZbmjiX zv=`P~V{U_)FQHq!MP1kvWI3?Q_9nbLI-;38{rhG@s@W1lvwn0aMrBa>BE6sjGUkX> z_6jH9#7kc8XYNrxT#lsUCq_r44;a_L1|K>thlz3-y>EEc1M*@PWS0IBrR4R@At!LX zpsPAZDh1tVIpaYD;Ta)FKfYIKIW!Pd(8Yj8#E~hX>P0K2srh+cOI7Ab^_g8ZC(>v} zouAfYY>S$V({Y&s+H99{(AP=7X=3lQY<9+Hois4}cPH=I)&Mcm z<%h))S-(7hq*H>~C9&$d6tEBBKa>CV!+#h+p52=nXS)jzSr(m|(u5Pwg~WvqJs5unX1(UO+A-KObD3`y=k?E)@YF`96?Z2@e7pO$IPh zTtLS)^|}zJ2D@=fk!nCqqE#RpI#3R|*n+gy6k~2ZaU$(&v&Oc}qiy}cfJ{J7jPl{b zdu?gytSgEnQ@NyE3IlNujC*uCkR}P206PUBHOu#To&nI>OIC*-^gxfelCOtkuPEog zpX1e>%n_Z^-BOe4-QGLDd%ph-3P?xb2eBYLJ_7j{Fm|;KAlt0J`@hfRLnTNzetH0zqfvd3HD7Y7|15b}?vdB;yt}Ld^0qm_7 zLE2Q?rM}SH*@F_C`Ue;a+>5?HpIo_glI-(t5N+3XPD4vy36GThy5YR3FPstHF(9ki zg58X_HL&v#dm5aY+!Vq!b9!E&rkv$l#4nq`QR0Kx6|OArVc%mHT*Z=V7ai z#H8cb{>45dRt5F&)&z)ih~HD>M4I(N$sBgwWc{{9#Ds~dNyYhv?&$D6K9M;76Q z75Ske&jA}OiZ;WswZr4&Wyr>1?(*TTxqSoziYX$tyg7xfu}>Uw>)5bekJdKmg7v5y z0?i$`uvo}58dL%%sFeS{K6Z+tcJ?hZGcv*xa!lBCEAH?aq(%lkl9wCV2uBOn9};-h zn#G>s3#(oU+|BRh0ytyAsVNC5ZVFamWo9AH5R$2Q z6k9mILCj6ZU;s0rYC>0%G~H@sNqZY`dxJbm3Btv-k@5RJLL( zoia8koDh6;Kvz@O!zxXWfut(tCbG!)i! z#E?h6l8{@p0sl?XB$iy(+nFJcAll`}{=z&HyqX49v`2R@fQ~&_g@V>DRU%Ja{4(^9MIz8*M5Z{HB-TYZ=`0$9@M`}~6_qkwPYpJN8F^u8;i6zH07 zG@hH`!I`lSnsHqbGy!T+=Km|BrQe2ryz(4dbDCM-JMZW5B)O#@5&n7N5!16**E4ef z+q<>B9H$KC)G9RSK7Q=jcXbu%FcX~Ns<`O!WJmI({&c3vSlCF2$0Sv_Vqk#9s<$^A z8rHihDB37EPOzocP$u{sxH03(!#FW_6fDEB$ZcwXhHa@L*hXH(GRlTSCv9&> zl`n*8O3Oj@u>-)riA+QnMdh}!GOyh}w?522A9wZrz)5gz{}}4`PEv_K^D}MCM=jaJ zyUl(1pmLyW#SHxel}FZbt#t2!2nJ~3HB8CaeJ@47$k##UED`Dj77ducK~9fkJi}@b zk-5qnjwSOtUn;JW=Yc?kT*`kA`QL#!64Oa4dwRc{`)Dd=x5iyazH*8|+(9mtex{i# zQeIyCv7sgwSeX!8@Pu5ZUvB;#a61d+uxtSeg3)nv5%>G%Mc3OFVG{6sP9ltqgh!mDAprqJB{aR4hX#w7LGEoV|YpGc!)MleaTU*v)pbTqw5`G}%@N$d+baEP z8i*6AE}$RuAFNG!jwovGyg>=Zcl}SvD$h^=yT2&OpGho;1^CZ3%W1!|Ca2e^Y~`+F z2l$)NnL~7^-RY&WDkl*Mwy+&#cfKXOU5sDzpYFQU$hgm_HUvU4?R#h3RIL41?FMFU zjI4cu)5HDzSyB8mIk4pZuVKFbbAT?Ope?B135=~7%XJPOV{PVaSB-2&XPtP^j6ubUHzj<=DHVC_ldHb z{d=NJsm%v$6c3+i!V7tfK;l1=as!<;owXGnO8bdc7RvDDI1ZxFypL2WNi{`kR;)}a zzbi-5E|ln2oigWnJNVG@edID+HG&XHpM-nt@)M2`L%VwLTLsh~KuzhDxXH_!%moor z)K$z}dsjDQ+U@%KLDZ+gFIY0)-pl$9eb<8G0PrI;YrwD^a%usHuZRY{()&XTc{lEKbw-{fmG7t;^e@yU2bvXnX#vhtL;2KUtrUa$~8Y>JOICN;uEMvm!R7rgtSB z9Bv~t;f&43L?KS|G2@VWn33i!^6(Xh6sp-BJ!ngI*xUCV9VZA`R$83(DQ9FPric`M zTC^9YnCvvLNVX!>kKhC?$qpv~N`FWa9J0fKBCLg+h0=lODd>U7@1~UWqrGA3+X}H6 zTk@#h;EnH>$4!gS8)mmYXUO;8ptYDk%$JItGhcnV53&Cdb3Fj7v81~58+392Xg2~j zrOP{;m;ei{zD)fxJ7?=6LL{tJl|ctp<-|`P)n+A$X>=#3=~5%x{BgHHH>;Trd?^9| zesw4u^||utXw&j_kE^MU$cAyzm5!KR1m$S_6!0Qd59{YQihK;)2sU36{b*e4j^F$x zDu4*pbx8z3LkI$-6vg(wii%U_hlGksNPiAqUFj81E|)+2-oMM7)gD908oAtndZ9Mp zEu*RO=Ab8DCGiFtHn(%#RvAMfBlYM?^a>ODE%>Zs_OeN%$|Fnf_A8JiYF-90iRN_l z*xvX7w6*ynXM6F=qlc+A)h~(pv~!VD&<3x^x#6@PBLegiU+5+51EDk@Dap2PyiR~5 z6YvG0I_fB`4grSPXPrQvHV(fw`MT)N^()2qZWwwVmJ^ii*D^4j3G~^u05L7qUKN|j zlI|TK9=yWUosV~4(T1{Zz^M?gQyY$gvnd%^H}1Xx0$lMQ$`A7?FRx<|SJns))zqrP zUe*WeuicF9ck=&`_ts%mZfn2rL{JbF3_ws|qM($BbTbJ70qF(->5}f82qH>1NF&|S zElPKHcX!8RdIq}I-nicV?zPW%-tYYJeb=QI&zMh)9(Voj`^M~$9n)!x+QYQ|lJf&n z@dMIqqU%q%0;Xy3k5=^RY!BoSd=G~nv;$^sys`@cGsz_FeCWCC6L*$k_-^1Ue!zUr zd7~tLKnSuQJk@i&Bd=&pMDhc&b%)b#Rf(M27P{X7Lq3k>w9Kp){?$`e6^U!JmsZr^ z!!U&ITdip;5YROpz6oeeLq=S}Gdt@86;|!H~D>#Qy$#hYXUaNueVO6)F2kOL9B1`uo-C78EXxuS;HT1yn&ZbB6M@b8_ArbRm$P!VhHQ( zb+U72(;pD(jON*KI4TSD7!!tkSX8HWrxD={*ls7#41&a_ttj&Y(37z9&lRr8e$Ay; zUY)Z6@H-Q0Z_+30W$)hUq1a)j#-k3DQ%&!|@a$}BzeJFhYhDXG?rv^|*!VhAJBn`m zeaaSj;HJ94OJZ!^n=5yzKz}Y%k9nY>-@dU9j<>>o?5J!rr|>5E)h@$>(zyhMj@9IRP@M{3DJh7z4e9*^sgU*DH1U6_1XzvT1i z4xtPcc_=t}))*&opg0 z5}ydlLPyQX$~Vb&>HGMzH-nhi<9?lTVgL(85o9*t-jl#M{sGorD{t zCTtya9Tcd(y^LabG!_d3c5g#G=&lYru;|~LgKC_zn z;HS^McD3Uq{`%gVMsQM^Z|g-61?yvGEQaq5yqr4}m+HYO=cDZ}uQDH9knOE9!`c-l z8z|dK+a97z(SZ|qT2=A>>f@?QW$h|8+basX2R)XiR_-mVG@<#EOKd4MkIsJuc$eoPQM8xV=k4dUGmM{g(gQpt92! z=L8asxsC!BnMSk?&8wF~h=olnmhLT-p?j_)j{Wb4@{D;2Te_my$jzoV<3nQITp6j) zgV?**b)jiWzvx&>7AL~DBH3<4CFMN5iIHYK+WvTkVzV+@RXCr?qv^H9xKm9+-_x-# zBepjau%=asAg_<8Rh{7yN{`UQ2IBt&@|ZIit+^_8$~J$>R?e);1bT_rP=az;m{3XA2+=p6S+l!`Nw@^5WTHF>Ara)yc0uBn4h&v?;l6) zoraNGmJ^Ui))RWp??=XY7T(}G5M}(}j1FsIkoEO^kB+Dk1!rCyl1YW!`paldNqf^e z^45S0dzI&zHlUnS6xB_#3NEz$LLI@c>89TBgCvDrZz`X{=2z5Df%vu^`6bZK6mtI) z25|;FiLbzFA!Z<1Fy~f+0J{Ua3yXU z{^wiaryRA{K(=-uQcVKt;?uu8y2bD=jb<7i7(2l}fCFaY&)Tnldb{-M) zW9DYEo#geX^y;6a&%mv*hRbD;Tl?5P=e>c&rcm4CFWPGn;F z1M&u#EN7@yX%OT--eu;7!Ci}OWGg7BZE7YhiOQumtykX2d}YLL7ilwg zI;00N<8r-JjyYpRi)-gtJ5Ym>qnI;eu{5{4=L-)tQnEV~>eErwjAkf<-0-89K+Slo zRFvM%93A5ataM2)E;7vE8s+V*nDnf0q0$56EFDxbXjZJ z;6q2kf<(CiX^HW58v~#U?6L+N8_(bV|DQ8*4Z?TWp|t>slv!?t^~~r!V2lF0kkAt` z)u~tKVwMbMSF}r!E<Jc>(SaQ0`SrCljdTKY*0 zJ@Hdfd2a8gJWIY3vdcrb=sS2%c5(|F;EF1K!xd@S&6_6yEehnP1O~e(f%Ws7zgRXR zm)+*d`4rl52(78y?Ud=VVB6rLj@-t|PAkBlzw(H!CYKL30d;hDclUoYaM^5z;YyT` z!X#2CT>Y6rX}vR$=!^_w$;Z7qSEtQD2@Vj{2NXI$5~t>5@mHyAwWuKu(BF@gmRX8HhKQj{-+uLls)Hv*0k(U4V{#vuDIaC8I=a7cmfa@;XOr5O zN?MR&a~-5j$KFvMT2^!C;%ht5GL3>#!0F^NRE)}K?pr|9xUdIu?knyKKE@8+4)Wap zp2WcHBO5$gRW?$4B{80t=X3OB2t+eP@`^qq#0P*-7$qk~L6BuW2|^MuArse>e;N+3xnP*WV90Q}f_lF)O9{)c_9`%I*s z>dL8nG&@}4 zd!Uqhwl6kTOYibP$6NtZwt*m0$5SkL|n=%PPKbwI;_+zXh4kOOpm)A%bkC_Fy=+}kGR{L0uq0bmS){kUxW5Wrvz%{e%DHkSjI~mi`H=`F96w{sG zw>9qNU<-9{>g11j&TQ!XOoz+oCUy(auP5o=;)G=x8>&bt zGICqmx5J<@$iRyKlg$=7&b(@E^5p`dPn&`%-a&^;f-P*g;vjwpB4f`L%h@orgSRQg zk5l?fKW1#m7Z0+?kIkdv-fw%e89s3dFt@Y^+2y28N4^`l3C||t@?y8bhW?z$iLCXN5$oMNgxo2!HEIu4mhruv`RMK!&|&10+`n2gi+25bEdAmG0V_4i zuIlMWy>t1K^T!n~#ua%*{pH#YTG{%l%KXj*Ua|^;4RU?-(S$zkfqin$gvRxq_EGttz2dfMSN6!TZcQPgXE5tkj z5;w1`w{S_b%qB>(%wo|}=c94bd_f6^WvqF3k%M|dLSRZVHY+n@ht{D=Lyq9M`gWV z?wd%4%G{i^t@P!#Z>uSe0{vIW6)pUoE@KTh76N?Z24)+>WouP&qI)Bv>EgE^J#o;U zjVB<{j)ioRy_M2e#x3!r2^$$V%WNhO({#w=0_u>}>kx?bgs(B2)3PNw@*$?%=9gR1 z%x!C9^kzd%Yj!v&fm1iw7h0MZ%2b2KJIU4}8hRgdt0nCbECxms3s@EY;@Ajt{g^Vd zq0Ku`le6Pv*}YM}_>=A_XTw#+-O9u90$px7B3r$9;?T16rQ-62NiLQ*9o)J^=CSV_ zq*C;Y%eglnFmE=v=yEamC7ley^3&JLayJf+3BNYvmMm6RjBJ)@jMRh|L||>hkZwkG z3qIfMb4~iyTMI+09$Fq#=0eM%CUe%=XNm8^wU#U*o%R-u_gJ{eZ!=+l4!VMEIg)jB zThOoPXzw8t>}V}+TJG!P9Bcf}vuEHy>_c9eX*orfVV_+Q=|)gUS)4?zr!M$7((uv} z1zBLXO)~5_L|bQhpRx!oy9zfF$~g}O`k#s++O5jv`dG~I=OKCDr0dKF^3$r^7nM`X zhOY`{b2<6F;zMc9q-x3Zxqe1zt)gYhft7)ch6O{FLrXef%kkK(XTX<})$S3@{+j%N zL+PX1RIrzu{*Br^f!Wby89}zp1A5LkSaw`Sl)N&PfxY5};-}k7N%|G(Q#pq)zX&JnH+b;;`|M8U zj!>0-hOHs*i`r08P;%8vt4SN;&e!Uo6wc zu%HR0hjl)4FR^e%35`wcvP@ByOPFnQdK#9llj#J8=vizx<&-E7eD3;uy9M9vyNl%2 zX0$`no{Xih)Ww53bkB1zq$i(AKEoi2I2n90EjA!+s@6y(Vm0wVxOHO97zho0Hh(KH zoC_8Ib3(%*DK{g`6B(?KKs8f&GK&IdD*}DjPjD z)GX`)UiGhJZ2CXm;p|h4$7o8#nW3cP!MMT7BseAwT!Sar*m{8Z{T%F;?f?FQ1NrKm zhDGEf2KYDFp60-0E&Lc|^ao^A;_TkO-^|D+(~$R}$`mG6aZHs+WgeDH=VIvETM|OgH5}Md`*bDSuDh)w zDkSNJeCC z2tt3mp9T~))Iem9g&iS)?}CYxP+lr{3Oln0yhDnOyqQglv;(?a5FuO^UV{tt%jXd? z07F5xZs?$JE0L+sQ2MiT_`SoSr;XAoy_#lkTg?bY&_EoM-fVd%UC%2S4SM*q6%m?;c ztw51}v9n{J{k0^(Q_%lU3~-q-?en6tzSOtD6z^Z&Xrn9oM(7?Jx>QuBAGd>FcG3#G zaW3n~hurb=-gsz5X|R!7b`QGsVYzx2+=0T+Jx!ilO&-sK391IGQ3Xe8En(RC`x@o> zDgj~@hF=s|CDq$N4xY(`)v4Y6Qqr5Ft+>@{N{Mc{kMROYweq_KF(sYvl(y8+gA9p# zNZN>aYHsp}tmf6Q)rfE`IkSQ_8@(P_m=b$pQTUQT9CME}o7oGpOq26@8_ngFD>^D7 z5Q+QIUiUy6$F!EHF`LIni{tHgB8==(CdDFrL}s+MmFemlJVw0UL{l?HA{6Zn&+Bi* zpFh3ijh57}f53=gOhYas9)BqyQtE|1N?~I5mA5?DkHdm-O5-o%qy&Yz-t^MIIpL!~ z`TUj_2S|7B!UCv70;rK95K4m^I2ai1EI2!q6sS?#>WG3rMYYhWuQd377#;g5tsbAF z*D?MF5c%hBoe+>=c+-{1d-Qn0?|N2nQ;Snm13w$d)K>?EyusL&IK1itljM?9=1nT0 zm&c_Y4Wz6)9SmOk7KRa};w}wT;v=BXB*oXcNA6&-GiFjg+6IVh}>2 zF}{X2h6)v)~-9$2=G#pIa8RmTtF zW*a8xVj~_7!uN;ZtQn>scJcRdP?2`D$oG57q_Fo--?P#CFXBGG<2|TrEk^k|ebh1W zgBN|~CQoR>Yu-gBYC;G`Mu7#-VNPh;s(_5}Q%@nT_Nx8jU9jTOX1lcfYX|aj9%@6? z3O_!^8tWg{xtnnXLZXFQrImGW&03HyhB;?*Men(fxHFaTBfU9f=}Lc~ zNGU%~+R&kL>4^0`_W#&woe+^%ui!}{yHsIND8`;2B7NErViJ=^G!R~_0y}T#|BN>hKHik<=zK)V01>n9JZ^}~?&Mr|OQ4UShj#t4%vcGG$tu*3 zW8_TJrFZ6QF`=>Xr91@x4E`SL*0Y{x#|kHLri)#+B-_{@w7O=$)xM~mS6^|ij^iEk zXDurW+TkaAS3uYNxH4LoVfZxfsG4-BB|r6pVGTX9)G}#8bmoq@xhch0#rh=-9VjR4=ls53tu%LUgGkQn1`F& zcqN||>3}F}i2w@kjb!}oyTmEy&rp?f$kf3zcR9supb$|D(%>M~-!LtASI~ z+ETl;`^1y232PFt*@_`k&hv~f)f|e<3?*HIbjFsCqTOCJ%gY1^*cZ`>hdmTCjHxDb zzI?ryHTwFU9>W7TOX3cfi&x{;;Rmfj((g4*i3{p0T-D+HLor@^2_Q1QsMg}DR186> zuu`J`k-HYxX5hSFXGvQ}J&n&=Babu1k+T+8c-%rmb46>zK6SKdrzDq=>8owRdsjLc z>2_JPUW34BTrc($`N2;h-aW;$or6U?)J;92t9v;vA}*7W$V1b>#Zz{;(!#=Fg_HNa zNrb%wz&mII)?Lh=`QyW5R~DvK_Q4Ck+Evm=l;QTWT7FMl+JP6_{D9mvzHO9b+4me> z{U9>Kj_{U&3v4+6d_X&K=4AtoAm<rl`B676MTu?VP)gzx%;J^F zNY)pN zOx0yyoWzYbZiz-|mv`gy;uy%zu4#*vd2Ku9a0XLuaO&$HXjCKM6YZ1;47znFmpe(d z?IWA3!&NpRxlYtHlhG!+bCqCSnWfW6xxtVsS~V9gjX!tgrC!lX^kjKSRFlk0*@>K- zdnKPJ^)AI*KTefJvrb{Hf2my7s~lxsfoSO|z5;?A9k@F5V%^|b4}uV_tl4*DdK}pH zN-N>=EF`{8-RN7B^US7H9Mit1UG6kkedfP zkKvx2@@`r6oDFOdshZjC0sO!Dx_LeaV2D#Ov!&9tGrQsoQu=;N$7rX(kH;8HF{p8) z9K$LESr+^H8K=*&`y{ED5Q^l_t`9Y&%KPSIP1xcwMEd9-Q*|qQ3B+SSQq$rw9lRiB zIHk##fKHkL$EWBqB}O}hM}TkHU0zWJT*^O;6cB0kGNHIcUszL`t|%kHh|+m9isY5~ zB?B#9lz~3Z0{!Xj^y;4(7EGtB5w1X0zMhKobx1RzB%;BQQof0MA)MBr-0!irUbzrB zTt&RtO#AG$zy#3y|9dWG7cBz+k7L?wPhN*NR=@}O5n;t*T1Q$)u2Ez;bgn2bvO5e% zS~ui+558tJYa)KUK0gN8Xo=4PUojIhX!2OmXw@jtsl^b>I;Nn^Hc|Eac@oBI0n z2PEv5x8_Pn`5ltaUQ}qW?FNxtcai1(XO}Y*y5X}e(34px(v3Ryzj51?PTj8N6`Iij zfecC*Ic1PIDcRt7e+_U+Kn$F-MfgnjLqtD?%FjqbLcc-SAP{VbLSA4`U0*s(hv}}dNsgXeQW00CXg;#N$ZL|L zGOe4zM^E!@wDv4(rd-VQu(C7z@K~5`$%i0u;YTQB!F`e4xm5XvNx4!ZBQ=ZsJyeTB3qaL`Vi*UD+vCESR>}7 zvw7@r<0BqY`jyBlaWkH~axLvB`gO%S#(@~8&7qx~0sN@0;&hPF?NV1=i6H=VS z05M!E7K3&o$djdmJU(|ce9a>^vZf%@vsUtwD0q=QB+IP}wS6P&V#2U2J4m1B6-}AV zQr6RKO`LmEYA1xK6TtmWGOy?p*pWh|#7Y9v(QL9mhOA^(0+VLzvHaC193{i>x4qbt zwO#ek>qY2myg!UE>?J8QWp7$rQq=R7dS!mmd{2;ScK1pMZUd!S!%AdPaaS%mq3j*t zOA(a)vLd6vH#IWRt-L_rSH(xV&XcU~4bMWdm)JKiJ;o;!Net;)_13WKE~=_1tN9XV zSIT;7@pJsAYwlzVPVRMpA5v844Fw1F@(2I-L$(P1Gh~a5pYfygNjuBh06VoSZg{%# z7C#_tXmhR{muKfw>^$q}mdvL#k~DU5_qiuK<2QsJOl{(|>LqaobBI#EVo!}|8^6ua zOdj7UK6mz_@gg7Ewt9<`%f8n%(zZYLt5oQj4pmSO5wM+qyB76{mW=*|C<9@TAr(b# zoXJU?VvtBH*byUT{)CpvGMBT4B8#wGpqm>kv{=YF8jotT+O}tVI$XU031LK@RM2c za}Bke&0a3wec}P-m{Io0kF)IW;QQa;OUCkuOA`Q>z0sWx6j$m9s3}~Tc zC$5Av>$yni3nASfQ3`Gg2%7uh=9IhWFEU+~K9Scx7JRX~tW=O9(G!_lF4%b(bv%%< zJhRS1zU611G?J*(wdsEqv3_s7WuG|S%q}D2n5n?X;i=Zm<}T++UF;e2F#={|ktg5O_s%80{ky*@tTNRQ4=>SoL%N{p)+OWn`tVyP% zW4GXMni1#;Bpw?m$x~I_2-bz|}Gx)uYMoeqx<1#&V!+dY$L)I19lswF*+=rG` z=N{~9{}FrfeY+Fk_NN|rgwIu?-Q;c=YWjY5_qLW85X>y$layeQQ-3Mz z%A$SM{Nxq~GkxbhI#ztbz}Qge?+(jpX4DZNw7#)H+-`^I={)Rc{=rc}O5Cv?z(~a* zEXq5N1R1N3_dSt_Q~>J|`u*j^05(4%B;MC^h1&vP1Y`hqAReL63tx<$JOs|t)`RdG zx@4fy&4L|*Pe;P0@z3?h1JG0OZ85^PZ5Gh2qQc4J#h;a_(jsV_S&;UCIyr&L?yB1U zrMAELRyPqgKIEo0NJJ|A`365izL!u7th)H(Okk*Y;R>f#O@!JEBtNSX+Sn-Q{Wu*=X6nmSy=wZ zELf{=`2gMVD)t%Z%j2?K0)vA{h0wY0n46PvW6vFo8)dl*XN8-G#0{u{L|8M@3;Sct zTlm2ufbIM-nO4ero`tL4ib+QLqKx{&J6bEUce)J+e{WpUav`@?5O4s@O#ySoR=o)j zlQYZN+Y0~qPC0-O{Wf zXS6`4YxMUjH~nM$p2mQ|QTgXut{()P$=`mV2@J&GKNg=Ayk_7Zg}+^i=Ih%s|Lm?F z@X_V?$JE|cjr)DT{_5oqRwB+M4kOqCfA+W+Hm$q-12P6+lM=|M75;y>i=aq6-MF)6 z_^+W)oso})yQ!aV$MTKpe>?D;jdd<8g#%BLN_@4ipPDIB5I*CII8m_u4Uh(SyWjR< zm9Y_S5XOw0z^iFV;OR~Nwo%#K_iaTGFzP-3J$zNa0`=568dKHz5zvKZDnQ?>xxsI9 ztWbO|^X2=C1pQYdyTC`iAit~Tv)L)%?k@Od zfk^vOwYL>xA$zE0#G!EHZAbV<^&AL{bpSmQ067Qu-I0jK^LPtjm3#$8BJMBn$%u28 zh8%4Ly4lICU|?4R=$r?@pd;hyW$d~2^W}Z;#J_!;Ex?kP7sSmHD>+o(fg6OWgf=oMHb8Uo3=pcmhH}d*U*-kpgu{ng3H$p%T@hCwO^x4VEA5a%j`lSv(z)Z!Fj zn{B|W1-j+E42}x)vtvNEKPG-cr<*G{%BVZ53vlKG&@(w~#3Sa(adhHIFuuBZ8x+-@*Tk1%v z5WAew)7)cPU?frlJo4`g1NN7>qsP)}s-C7U2F(Q{TQ38jruy^xKbWi?>?0B5>OXp& z4x6StfA_rGxj;Vto4J|=V-WJs^(hTjhK%vqAj0#!-N3^2_w9yFQ~f#2ApTqCAFJj+ zTHWX4rTWKs`5%d7pZ5V!f^E6g$=@ahoOmFvoHOW6H66IkXi2ib=?T)c{81NZ!9Pdq z-%V0CSlL@umU8|w!(VQd*w~^>62Ci9=-U7E*M9aUlbf@c0g3+k|B;>olX@ZdNR-L+ z_%se^X@$?XUQZ!JfeS1=jLpai(_xkkj*?@J620if=*1lSb}Oc7mxo&$iQI`6%f<}U zuS9rnv7y~1yU8+=8x84kfPmmy~(>Ipg$NV&sh*ODSaMx0b&+5fxiv1p<6PAbTKN>7elLg*7RZuYzBK(JgW|bAO?k-0VAAkhtJ82Q9CeqW!|cZK9&8tX9-c)bG!HsMb{z zTOqE@O=g#(=QOybD_Kmlh9%Ne7$rqs2ZT^f+iDrTUk(f~2l32_Em*TuI?qn$_5$f*%Rfu#jNr?&<%&k3x&gufJ5)P7PR9@dE2;;S%8wOw#iB0Q#1#`b{@psugAKsW@IqHZ=ZM(?6%^yR^MHvc zDZ{I(W31SaDd+Sa6nK2S)Pn5+xy@9p_q`LfEf9@7kv-|$k1f;Ue?U^f^mR>qN=3xt zfsLJ03sUmQ<$BzuK+a&Nc;aF=x2uZw`dH)dXWX~N45AJ9O6L`|G-OJa5cra79J>1Q zStxOl7FT-GmO(hX9|CQ8mT+Q-h1FwmO_7BK4X9( zTDwTJO-Y<5qv!e6RhbuNkQBiy=CWa)T<{6;dK*vubmmaCkR8hPLpY}Ht)wCnJ_*AW zZX(OfgykJzMAJbb92DJJid%p817h)0Gr_>_)i=Dl@1JE?BHpEQbF4M*6F*AcsN0JU zxfd%p-Y7&n32lWX=Bg(Rbeqb(m5o`d^tAM0K<)Axe44;lWulxv?;Bepy309S9OkKh zc_40J@Q8qS)v5`D&800U``wf0Psa>p=L9W5?Cz>qiK+Yas5(fZGE-{#<_4u)h(X1uUKMh- zr)8IIS~WJ$F=s-5N12*msXL`xcV{xdWfkik+&^u7?>pQ6ZjZOO!G^Oldtf>)MMVmZ z{QJ20@s$tS9xN2B9rJ`!AoWOp+!6mi?N6gW5Djo;)A|foXg3(bp1!*;ZZ6TDc+YDX zY#JCiJY&^?s{b5H0!@c^(juGzlK}r2vQqoZ_6B16wA)wU4LD-|c^5W~-B-OBj^qaD zls|%7?vJtTHf{;vh?OD_|u3?cBm&w7t67L|ECg;CYUYj5_+~} z^=;rOeV$KaEyM}nb;AA64e$kn79qGU0z{4o|M#JGo%p@JbIAVvp_QWQqn$WpY4Z$KrNG_}e_3jdnDPjJAtbatEfwup$=sxVX=l>!v^+BVQ32)hUYgdbS7|tJBWGlN zeeaRYnz2Z~LghXQ^U)EA&<}N!xr?-a zU*8Vg;0Sz`laYU<(KB$|J`rQ{TqRX!m(-@Y71mnj5Hl`|MOSEAF$2GDSfZT89u~)_ z-(5eS-&#tE;{AI4Mk(`#5dJrkMKp!JJ@wm0kD;(NyOK}3>NF|pF{s8Ja`|O!B(DR% zk!C{_w-{-X)YO*^EpZF;D(%NZ@1#3N7nqeI#D!NI-lsZ^j5XBs%rKIkStfbu$f3QL zl@z`aeW~5`(|uc(@o<5ymgPA8AmMw$rud%-@x<9MGf;0Q@P0e~NL&eF%e|#MYGblI8)dn6+iZMWIvG z*lNol*miT#%jbF)v7(e9VVPyiA9maH=K_^bA0d2Hw8k!`1vXe9f;iuKR4< zF&faJKz{lw_5W5>5S{f;XE;(4`b`%FyJ&jM*4u_jljZ{*Sdzv?pZ~vJzp|}^9fK8S z3_B26Gx9qE4DJ&cu;(=^OC5DHapPH)(<1D_{(GebR-nME42R!}X<=*0=P@T-|7t1y zYneK93y$!7`W3LTOZ_<+Wf$XFlp^T&?i>Y90yeD8@IM)wi_hR*1gP2UUo2^<*qsD& zdxGZuZM+h6lphc}(^ChKUO04e@M2*=wmc9kK>$ZS+2cQO#>4-gNU8}?!=*s<@N%$; z;|$=m#QQjmg-aJoMyS(BsG#%T$L+7C@xzi1FEOSzFhYI|<~H%P)vzmncdNtIBNVs6 zVcM0HwUe`NU*TU{@#K~=GpMvXx2bjREM zSC|NCx!$o+tf=s&{-bD{6~M}?fx8J+c$G);o5%|6MO>k6v4a&UV8H@1%#x{K>?nfr zC*Qd1l`%q^I07$7J5KcFFTr7$S{w+xW0(94+EPekslul^#FpH^n?l*nT3UZ4FLYnC zF5XhyW{fhBSxlHrh|%|ftpx9fvkYriToiu;Tne1nltRMKBhJ^PE{B>=fV_d`FF&@( zuAlak_k1y^Q4_M#>BQ#1d8O^;0KBdgq(3JJf9dZoTP#fPR7?C~E)MyU@{yj3XG~fv zC4S-z14>WNaKXff+o?T9={;Rub%m<41u^Xr{9Ksp8M8;K4b^D1PIgBuatxmK=cw+R z8lP^ubyhE@&x;>)^=x^51d?u%kw0zg=!2Iglijk5915rX6^d#%w>l9I!?H~m?4y!s zV=CA)e6%d#HYG++)bkE{4&ayfCinZytBR;r*sa);9#abPBEGNjo|AiG>UQgxd&YRC zG{@*0UzUW9PW&cc-dPD%km^n7e?TWfkG+7ci4BwvnXNf(nmXsQg^saZvDGvnW8mDq|MGB= z0PaMPNYnNsa^|`5(Q~s&j(rvp`Nq8 zc&my#PVdGGTwHXBY-ZDmL+qde<=u!)F6PZ)|4$NP!%Gs`8taCRCMu3qNFiu(=KAq@ z?#A`8(ZP-7hYG+i(hvM`9EZ%3ZO8Khrepn7#p1cetkf4FAMDZ3=RDu?enZrCO! z7R;x1l)x9==j#3vJu>tzTe_YcuJbK?3tw=~Nie<+e(6n+qSMwFo}O*!O&`kTAsdMb zRgN6dz&P=GlmGFuMPnp`v%k2xPeQI{i2K7Bvbqd^5vTMLulE{03gSMy83MHt0X|;7 zIKl1g;-NU?3n>D^Ppw3jy^5wQW5h?172q66zyxZ1CgrWA(R3B~U>*GAW@17t6+6cp z#~2)`SBpXCD~8|Jn#hNi(evuP0=n<4nDpXTBzRw$dMk07yw3X)!5~Eca;I)Ih)Hq< zYaZqlypzT@6(yZtBX+AmI)ATr;lUzi(*izUNlC-eQs@M{!D{7BwB_%{%^ zhdCPn>^u?fnm<9uOU5d=SxEAg=Qih9W1qj0<%?zN3&da5*L?EAP8EUOdz@O+5iS6X zynloR&kU9Lt#o@lq!nYd*(3G00pGsGpT)b`xmwwO( zUg!hCu_^wvWBzx4QU8xUaTDg%w<>=?mOFuI9V8yg*jGN5|C#~dC6=n&$UGf5Ctat9 zpFOTnIKejslItOzA_we?Ih81h6D_^MSCj>xG`0n3=QXop`xPzC+hs8mTUcwfMfN`b z0ih?yJ7Cf6k^-;iWGuBmw3~Cka{Er$w8d@M%j`$@Xl;BR0uu>`} z%HrZOebDm&FuVUmYwchY`i|6pJM;&+$R0VI*-iQ>=9HN4|17Ffj*SHyEnCp>V zb1F;kKE&+&kWa(0uJ?ALkU90kyvY1}PRVb^?c931q~BH}vmdg^j0EY(9;!QEO$fN- zRoq$P)&8D0*ehZzbiw;o27c^|p%OCpo&fF*x5VP|>q)X{Q+r8%rTvRe%^PgIo6?k| zt_DGDE(UQ-VmW>uOjv8B5ARZ#f0sHQQ1U3zN=Q$Ld*ga?OC@krC_CpNP{m&maZAj(WWGA*)ZMBg-q&p(e1r>i4dE-f;qwH(Oo;3hpPk?Jz-xo==Y z&66cqXChkKp(MO?*?~)I6*@Gbf2ucpxQ@Szs2a5}KRj-*T#p_YwpNtRZl|!!TZv(?K`PMY%bP30V=dwiXgPMGtXloPRTBIrnC?0u^TIGfR9_ zua=c(>9Z;#cb0R#A<7ANSxR7(zrwPT?pNICg<_Eb;mYI&c(m*Z$71Fvl8 z=NVRWWr=6Uh9td}@bJP;OVdNgy}GKET_zaEJnt&c>)zLI^dZJ!acCx!A;^3Zx7yeq zcZ~Xa6;I9Vz-NWjjDCDaoe$+hwY9U6hI|TDZ(a@GN6A;OT9?%-G*CxP&-7JnW)^!% zlQobica<=*I$p?EWVHG`mPD6_LgdYATJwQyyEpa@F=bqwbB$+v6w>9T+c;d`pwOWqwSTP%A z|6$I&gE(5<4bJh>4(!9y4hBhs$>FaWr!2wMrrh#vv!th$v#dD=Jtp~~NFlPN5><&P z@rFQHPj~p5ioF*=q#;^Agi=z`UMDAmL-!ch5xF$jOb#KVD z_6P)2B}B4vQ9FDtJWgIxWd&#vS=+l(%1@UGB=7j-VKOYYYfC-CmAK$SU^v3wln~;E z(L@^PKJh;CnzsPGLD0Bp#bv3!Jcxrelgfo>4TiszS$L&h#Pu^76KPAf@Sdq{VxXkcN?;*+;4Q*{77ese4F$uz|lmUlfC~EZuH;%M*X21Hb-Z<4@|y-nCq(- zOAW=_`wyM501f@kGxj%$^ZZ^JHo@|QthYrKblxqjY9lnk%+GnHj^5vzdH#)64;ok1FDU z;U_bS!JmEL`$rp&1B_a>maH_w`hW>z!lWu#vB?WckwqiCsmeRMokFD9g)8+is14`x z13wAzPlyvKE9!8H&*_tf2P5X!sMpD~uiu9A=o>!>r^q^DI;Hkm4;6$}0Uh&_V!L=` zOx8P!(5vJSbZL8pf#m~LJWz?&Z`AV-O8Sh`2w0B20bLP7y7hN0Rd3H@pP2(1%8T3i zuW`VXGDnl~iQDw+I8rK+P^!Uxxt(lhIiAZ-hcobFOjY<`+vLf8g|1MLj^zhLC_gkI z$cgcdoM-rh?=o)mcZiJooAa#AdZRp8II=_cPJD$9l^#KxK-&HDYlNzO9B&VC8h9elX*eeTou+jdW<5!2as zYkZHR5_QcXh_W{^<=^uVAATvZOUy@A~hooHaqO4~}B$z3)tXyRd=+HncR4Vrt?c~z=@ zw9Iy`W`%wVaeSJXExV(}@E%Ps$550ll7JqY&mLhUzVxW?xql_j;-uGnsIZJv6~Sau z)y8|m?)7PV-0H1m*o^+WeK6iFUkbj+Z@Na=6cED}qrlo>>bL3ld-6u>o{6e)N%|_& zd>qc6V>`c;@AP|N9F?>d(q|oqtWD^#Mt!SM+fctX1TFh_`WJ0Y&^b)OtE3gQFdFN5 zIV0&hN*RB~tUYcU1EzGLX`Oc`E-??q?h7_d`w@vmzu}F($vRU()v~qtVQTyN4Nj_- zjCpP|lqSdlO=G)qksrHd>_dyl`Cx~fS}$S1%)hGrS1b2l{l*H^AS}JZ_QbM2QI&0L znq>`bR-^;|o`by;$H+H6r%sionnN9kee#}i8i_R+?MP+Lsnht^;fBpxn8!W6As5s9 zW7QIS73b}kYCqV$g#(GvUG@1LHZJdzMi_I<*XQM-c~!+hx4s9lNX5p{2c8)L?dY`J zWCOSG!_G1|@?n*r*NU|MW~#jG0cYk+l2>H35`bfvpX;NPwkV())s2YlbFo@trY1P! zIr#l4AdvhHWm61M_qV|!JO`Q@9Eg8l-rA99-Q_%!S=2&~!WqOuJsso}CTd{~lWo0f z4;g|Z^Pe1F&5lCd%EVy7cF075Xr}TFPi?$@Y?LCKtFQbgTYDk&m1o+NMw zGMq`#uPRHjkkue?CFBmdnh>K11Sj4ECm(XJjBDA)oPwfln=|s2MEstma%EgaAs;BBVZvs0J#)M_8yWh@ZgreEc0G}O z%AR#YLQ2LC?EJc(Nf_{6JHwEn)p{sMKA0_<@OlMrqFNuEEqu6DbKY93XSqHKYL=Z`|@F1sP(?*v`l5K5Noc2Y3TY0>=n>SprS*YU4! za?k;Xo0_Psq=_m&_K@XDzTohxp^^XV08Ke#At`Us53Q8npr5#0iK~$X^&$!cx(Ov! z?{~RXGG}uJZup$tut#Rj+^@b_66D24sL6Qzv5_-?-r2nOZG)gu@>Z(n2BUE&DfR?P z2r3sFT1i?nKA~gefNa_SBJR!Op={g#@sT1+q!5LvC|k%@_Mt*3LY8D-vSr`b5sAdu zvLwd7Z^^zZGWLBbJA<+B%V133)BW7f_wIh~`}sc4=ePdjWsJG5YtHL9kLy_8@AqNk zyn91&{eE&G>?2B}FZ&6V+*%IE*h4SCFknMD*|pwr z+tyna3A{kads?5akz+$?A3{_$dGvW6P87K)Z{79^9Ie9%J#Cs7u*m*7hmN)Yd^N9hBI1CtD;#ghoc68in~ zB|8AaKYvsEAe__P>INLSf{*H3xZJ%rdsFA*?K!)IaftAOxcRmjEq}HusXo9J-AnAW zwfGkDTI8j-wmgl!61^DmKrGJT{pDT@wrtj1mN=7gnQ#P+6w47oWM9^EY@||E*N^u` zN0Y32`$dUg3v@@1KVNz$G!b069MTXx%npRmxADNi&BE|q@2M{jzK*YAV%m5Tk z_Xt_j?cMS9=}QJw<#o;H-s}E2h?(bbV?v_JIJ0ZZkBoU}?P}>AhOfM$FYpp0G^4h5 zBS8sq5P5%8gz+nW3>tjBw#PP(lXPg}CFs^*+1#P~f_v0sEz#AZ)O_9RU`hTtg7;x&p84cRStrqI^=21;@2kb|MeSi6XZNAa zZ@{i2krlJ_%Mn4JiK)BJ;Rv1SR$W`V>2Fa;oP0~2f<)_~=*_0@Z8AA_{Tz_&g-nr% z=xMq%6W4?S^-li^!`ZC13v}cdW(H&|dRY97-KL(UGe_sqiV;f{ErjAHL`Z1AP&y@^ z!#-DQ=Dqd0LzlrOGW8-6HyuSsZYa)?@NgKOIVLz>V_(L%nakkV4zX}&SG0J}dD?JT zmAUD#ri!boolfP^D{uXWU&uYE7B%-K&p!>j5r`0G`+;G6|7vnF)O~4<%dlGKGY1fC z(x)>Wj%%;kh)#5iEH)6DqZl&+VEq2wOp<#;W-RGmWS3n6Z|$nk-n|kC<@C(&7s3?q zcJXV`G)}lun9VV%63OUO*wRah+l5gOC!a?Ft7R2frxZWlx~;r^fF3(i*R7lp=nIWb^?dEvO^D#fGbNzt=g?)fo7B|@Mh&-I zm4m$*OobTszdgu#E}eU$4F#vp6(Lp)YoOW@3(C%$s7tWezIwVqWM|H2MK+&1GBbgX zfMWGKYdpaK1~ku?6BpIf-?FL{5E6b54s98<7>+$;;KxQP*V$*;RT#ar>{hhe-zh)kd#&vRA=vAnvML`tlG8+J`GYwz(|>+JTmB8`XVVH0PQKifOJLV9dJ z6_ayvFtuhPpS>&eWK2K8jhJwv)GXIhfyRpBGUX-R#)NNrunFD7{jQO6;*eS8?Ix1$ zjUwfP^OIdwt%h;~xjqeh%*c&&Uvv^7k{k4_WZ98(LlO)W^n1$Edm1d=et3e~U_H!qgi*A5f17iYR)ljlTFr z<3Vs00n%18vE2;scOP_jP^s=PVm=OT<1v$+CAUMGV2`$1d$@lLg-SyEZt3$eHNmL!t4MAWU; zU*~gweoEI3bKHzv@t~9n%1*()&=FbhuqRCteJC#1GHcXdIG~TTxESZYbfe=#9|(4m zrA~@g#368WFMJu?^&YJ%#!GmyWKdk<1u^*b%Js`&MN&XIqzw8jrH&%M_PaUYS9Ykq zZ|Z>P#X<6<5L|d`NP^Zv#DBIwEzCb!!!Fu$IyOB18YXU6WLnGtJ=#sKlV79UEG2mq zUu=9qR;t+ijFNN97j(M&GwDZ9Eroi^`E@)4vr?@T>t&0rB)aGre0v`R0tcPMV9H>p z!TBERHWRRU<0A=TG5IBL1O2&(Oae*9X5xK&_hAj~*eXG`p?KC!)@)_-Byddx{~E{9 z6zsHHZjtx0ymp)jn(VlY5%V%7N=VYqV=MCeG)>Lc>PX)}*rV}G6DiO)KU9*PpF;0K z_9{$71er-#Dj-^(JRN<&-hE%br6_o>009lq8nBQJgRRfP!CX{tre#?_2!Ni3pCD7~ z`9!EwX2LssFLR+X%)7o_LPYhoZ!By6D7hzrGz8GEyN&qPNo=_-5_RI_KY3pe&P7_UM1HvytdIJ{gyJx5`kSIsAV>}>wv25r#<6_8}O2#%5_ zae7E24P5T%Lx6_P`Iwp^lyxI7D)@%>?7+|p-S(k+o9jhQMgxZ3;~Ds-n!YD&Ao=k$ z-{km&-)`rF*?hyr8FT5(j{ITOgcqD!h$xl|2cG+koH{iGxdau`k&Ct9{4x0R;|2rl z<%Ebj@QuOHmfXv-SLEa-6xyGE*c@Thkx(>V^7bteJNt&wDkOE=pC1A;d9|AIzpz* z!Paa;Q=KYx6$O`~>8BPW35l=7gkk%c!&?QI(V0P zD1@@f*769K)Yv|nFhyA#@5tI$>BfHKZFw|aCkrjk+(la3c$Bvc{+3>FjGL>+bTDa# zec z@<+JP=Cq>#8;iw@)AOHTb79cEURZM|R-@|6K}~q#;X)!>mQV1VZ&0zGTiZup3XV3a za=}Uv@Q!M*N<0C3=`CJhdg`2&qS&iY@Xt6khL$H;snxRLzS81PFmVeY-_={fq9S1H zz(}8FHi$IKuE`$b09Y}RParru-@0!5oVS$HWQ95gcU7x$`RqRq89F7d@Aw)%*#ve> zj`(Oo?xnMZZ=aWTocfTl{-Ys(bIQsRx&*cxI8FwXVwat0{t@)?B3?i^y)vdeV>dVo zHy%uvemg!$8)9$i%oA{!mgp_!#RzK#7N1}sBNhf!M(Yq*e}tE+|gr>;aC ztFz*nE-fp4>K|1Y1R2nk?$BKWjR4~e{{L5nYC!YX3osdym%C_R&pjlheX*SquJ=S}R)1XL#%F~$<))-9 zcG1m!b`f_?2^lHux@ofS+51(9%4v4?mXLYTcuU1FlcTcNv6Kd@@o2$xY3h$w;~|{X zHoLJUn6uMwnZ*c}WbMT{%5gVne?oqva;PQFgkrIJhx4$H8iImjfr^l+BcL(k>>+YK zlV1K1`KXAlDhi{RP&&iJUetp%Ncz5k=Pd~hYF}duhL)AM*dgq|vxWpE zGzD7Fs(hkyUIxBK-rg?9TeO z?$2cB>h1SUy}Ud9cxCe5Fpgyiy50X1!U#WMAIasxFqwD}5U4(O(@ma%e(LCTa~nmt z7-fNxI)iYm0(>zB5BmxEK1^;W)XFlTmbg2%%j(xQ&3Z{iL$UjCPTT>1!IJV|5sW~r z|BWd6%i(K6@fW%eO~3_#1~XU>zX=ev&57mBK(!BFXMFse0~F9A@b#MJxY}$FJ2|H} z+#!)41@CT!>+LNrCvHS)!n>7lfG%)CKKmlGXM3rmzbk$3goW1 zHnaWZiWdKLMFWM*eWlP%VbRzy6N~RSLYt5VOpK52hI%{^do86?*BxA)_?) z4-?Jv)eL7&Cnn7fit}rg56Z4}cM!>c`m!9Fj3Zf%<1RGNM!|_lxXI{Af`jAZ>y+wv z*Ap8Vms85i6`oQh0cOgnw3PSysh?QF)SU}+e?m;G4qR-yJsrVdk^IXTPrx9(_1h3G zC;i3dbhBoyOnA#YYB1rJSmvb{`3FhBKUCvCz(Jw*I0U*8^H1X_VA`5P$NMM}33ZqM zgy?PL*WR4DBh9UGY%pHoyn_+OFvWIpnyY=4NlR|oaUTB(`Oqnk5J?y69^q2);I3zO z!CzSa#rDBf{BHYs;_n|3JXK7Eu`MD!RHMHV;K&XrH*)JPg6YyfAsvMV2(TmGt?A8z zUHrFKmYZnk^AmCksBkgk*u5Kxaob?<2i*VCSAgU=nmh>jgo1Z}+vnM0hkyO!tf5$i z`ro(mV6h1Ve+H1BWw9#9Z@}pR?>clN1*@Lv`)sKV+Qt!OkU zgph$klyuF4cfkCsz#NSwXv5R$=$6V229F=;RJnmF6A1Jg+)pwnf@ghlJd&@}W!vl4K-RC}|PI5Y1O zcRH@%?*VsgX@2|zcH?*c$58*#XK#g*yaN99lKJr>Oc1J9ug5)R#a)X|oPc-2v2o%^o|fM}{c=n%@2`zS9H|Jb3T`qU{@mwIRK+|Q1ag-5w z_F*wXO(gv$S@AnJ#42j;g&3wMa#*|?6Pl@^Z5kwS&Dawg(1MyLOiA2;g~6V1EIF`t zgmPu-N0AT6F+>0xCw4;OPPYA9Y~kqn#G00XcAHl303XlGQU9sQ|4aXe+x* z;SPPTzeOxeTz+5LT&8B>u_8sE&VxP+6c6(v^qdtR0h0kC;e2?T4}U8IMP^Fy27!=h zoO+>Eej{eI!m*W}o&nKx{wz$npBNPGv5`~n-eq?szr6hT=zdD@rv_5eSFLn}%voYv z{N+Xsmw}9AoMVj3jqr4xwLFO>R0V=a(N<3Uko**dmzc~IO0nyOs|-1YTpTNea@h^F7g1Z}sD#$+ZL>cXbWjhP#|fjWVbqE}_aZnsE**S*fX z5>B-ua3!-e>IN6n*!!o<^G_Oc{dx{no_XIGaIA##X_SXK!UDs`1$!%;+N`{Adt2+=qWM0uWc(n#$x_}vQ)Gt6o# zcAni&>kB34)hnx|4O~sX?bUH^i}^ckm0>zaCGaao~t39b!wfxF)UL9GwSrw?2MifOxBOxL*(n z)l&UB=r5@DEvq)x;pU2NN`=j@hM}YTw`P!=`>ILhy%AfW@9dCv*s)9luY z_Gg)e#Ukz+>mzx!>k)NO?M^Su4;dY6l3RBvpwUL#Mx^7iFc0S7Eb$mWF%~3sIcGv& zyB{+-I(Uj;fzNHpJ>HhsO6^UfLH1y(a!#R#)4G!vSARWnHn&n>=*?XI=VuQ-HQz4E zQln=jkb$&f(iNm>(04|w1wa%1Kf`5zC!NrF-`D`zfv(>A9XF5q0B0AYF?Z$Z!gKJi zqVwL{@DJr!XSB6fcfRswbE#A6fyDjQ$$A(E@}u1VR7;F9U&oTk1$k8V64jPku(Cr4 ziYD0}9?!O24&38C2fT|dCA`5zFWm`QH4sNCfBypvrVaDMU&{>KRrshPdC!4hF-qzU z!zxSJ-B;yTty>$Y3C4a6C;!9mfM3^-WVatT*OpLlrX)_b@M(8dub3_ zGHU94@m?!NgUtM*{Ex5fMu-aKxb|NG)he#Y>HLxdO{1vM?$of%9l?^Q`QZ;FBQR#f zMrpN0Rap3jb98udv!bgQyhLgTueMOyj1!mU;@3AmO~39}&Ai!w%l({FaJ+o+u}3mM z-$}=#f?()r_;M`XN;{!*OumVh;bPpf7)XerWPoOa+P0X-clfuC@X4*}2o>(Tn|^!+ zqQX4mb~`dhVH}4~nYARtMHJJA{N+FzDWL_*CEjO!qaUz0D=0NnXX`WPr8y+&aGi*R zv*HKG?Ay-|h>82si~CIm$ZokF$S9hfs%{Q!I(3$*5-+Jroy2U{(tXt>-_=v1Ln+qw zHgEZW+F*~S!xoycp<5m@@57iiHT=+#ikrXZye)C21!DhU;GEi>&BT~OJNl4;fUoY^atJ+e!ZX)^LS5{R?@4jz zlPAbUnR0&zL)HFEu8e7hpYjf{*1+#84Q!5k!ZTnv;uD-yVo=|3z~OTcsls=tp7JUO z;Hkwg&EB#(>eSnWb-ZA=Q~E}iuV@#Wbj|41L}HNIwX>|nlo-j==jqcmyW14bv@!~R za~x7vF!bfi`X+Y}6bvSRb?SyOx3p2)%e6qYuD`DKXOv4KAD8(eNYj3Slv$sRkOkyI z9DQqT{3TYVj>`+l#o-GM&!685$0{>Dcb1%D+JVGrduto7Y_;E!-|He)vgRD-Y+F5$ zbj9>%YVFF;e*yBm7xcP*D6l9rF;PP#9 ztNlb@>+`U#4t^7wINQZAd^`l_>10q_eIIfyl7rCbN}BWn0NY(Qrd{tf3Dl1_G(fuV z5M?iuUzox}TT}(IJ>T{o8pPCz9%*p1%Z_&Al(t1S;A=Y|Z>VRk>lLI+eZ|bjc}G!k zMp&4Yo?#2FHMG6mW4aBJrsVld^>{Bn;1Gi^3o14&?V}QY>nMWp_ z7{Z^@x10hEl-?HVx7#p8*FDufcYs7F;E$9r2mDHn#I#i4w$=l#$>YJO=}_!O=t;_& zTxyP{8UDu(bU@cnfI#mp{-w(IG#)t_PF3V|wSe_EZuZElPG5GR$<&aDF&4UUq$UNC z_da1dC?l|3puNCLjyokIVdy2?&?VcKbm^jONf>Hg(!Pf49m!RPI7wCKb2-VxwtP+YZus?dmr{&+pwR#`G2Vl9u0r2s zfwR~0-MP&HP&^YC8^yZ9H+XtDICG6_-llj5^MaHLQ?aVjtHTrtkXOZIf9Bm%)e1Gv`m4#&_t>#sXAbQ*2B3D2g>UPYC^7OgnwX7LfV zm0R`e!LBz(VpBS=`bRx8q1=ujI`wJgN@863%R<8e!;24Gm`#aKJF+mHCCrG}VWEM@QI-AdMQT)Z$(|g2y zN&PkVO>!R^`S0J>Mq5%45LPgirgSZ%&M%ESl-k53!Cut1FOSShT|HOXt8x`T*t9e;ainh)m{Qup7edW;xny; zb52G*7kk__gr}5F>ALD}Vm_cv%z`}q-?o~F54s*$!tob%c<>%@bOaoX1NZQtO`wWX zExkl9#F`DUB@lmXBFi6tdpni3lBw|U;8cQeopFEphi};Rh8QGUjoh^Bp!(HBVSRi0 zP!o`J4R5dpux`|T{KC)4FRwSt5=^5!27sh`pEsr7q_VwXzbk4=Xo>KNY<(g^ny5OI z@90(Mu{s&=ndfdK4h9(@Embn;m96Y~mTgrs^VaewFbS4WGC{_BTjqTa@3nJQv`g4_ zJnTBZ-RX2#7{FTBBNLaGB1O#}p*Yt31Zk@%p^D%%{p(7}!!Mba^$FN;)6@Ch5uRmW zdZ+PRI9FNK-lE4GHOpW24VcB|7B`;1z%>9Wr~UP5i! zG`>6bP>x<=4ZQX7He){m+{EgdH&w4gOA-d`KI=p(2Zi{{`G4K zv#;}1QsenCqz&;V#9|s=9y9OGiKvo)wsLBg85Wmh4ohKmG$T&W?K`|`kXAHjIg$WwFUIg}X zsm5*I^@rB=#A}B3&y<{WCA20%O>Gzy+K`jjS)dkaw)B_Hl@e_wG(9IJXYX`y;G4ki zF|6el5C1pq>d>!RVpS9gGBei?=dpxavYU z6e8PnN9&QVaH1tm*PWuN2vNdCRSo_GZ87sZmDhK0?zk_Car-9*_yIiQlLld!G}u09 zq-<90*w8L)Esr>s715l#*>Q=5GHNhXQb}r2!*Nw*Qa!kp%=*dquCucck5|i2GdG<- zS~1IPO@{|E8zH5>ER+_o3g_xRT~bsScL|-w-Gbo6lBB1u}KYJucM*+%*|xausXmE}~&Z0@jEm zM;g_xSN*#}r< zq8O2GmQ`k%h2_$Fk=ZK8`!X;ggQc78+-^+wBr%RK8XsB?$uoWZcr5^=iJ-?#z+kB3_O>cr}# zV#Dg|pq!XZk!@pVwU_>~nFg`K^e%ogc++nrTWV(Y&b)NUjdoE85sD%5)u5Tv)%=_R zMomtgeCL7hDLMbugS15Wo2ZVqbsBZ0b>a8Elwv37`5)$dXXm~0G=xS_z7K%`JpWw# zPC4)~)mTV8165C$>s6+dll5C7O?y7~Sg%N&CIB=v3P9^uyneG42-FVN*1l5=>|`H# z7aV_az()vz%&L`y-hUn z@06Pmk+1{}_Vz#!>5(R}rsyjJAy( z3#t1`g-a}B*HVOBV{b3}A(E#2e1(|Be5Kuq5jEEev5Be?PURhzTe3$jbxVmG1kd{N|*wX#P_F;<{ z74q!${9&4ndbvSIPID_8RYOjqQ0@Znm+Mf@+mQ1Vw}n}@k)m4vHCrn?YbpBhk)hm2 zi|A%sC6;t4W8O3k}#+^FB} zfZr+5!5e zBNiAGvVinmdfqC5d#c216hCM%T-M`lKri4ctnNP!am!cEWrL!J%%=#80 z#Lu2q$LUpjRj{lqhd!dHD!I zJl(U|nnY?3p_Hh3k>=yuxm5_+Ib`xKz0pR^%0l_Eek+A1TIQ&ha!UXbRyNBh;=DL8 zoUIWfGuOl5hqB3c4{^{3f%)@6@65~(@T%!r=mM}X7o?=R-rLm?j8c@0kj~iQHFCI= zR@8G<{{k+P5SeGGw#1bx#9DUFW=trqjokr(_V9@F=sKd6#75@rTqa7;{+t``(>uOg z`e6!+J%%67OdehsO7sK724SZi;I3ryZPYxy-`F0pp0wjh{wt&Zbz}TTDI@>Mixx(Y zsS>$#-ri6MULDcc#4K^_g}5Jv^<%OB4RfVLR216m{b(vW{3daauhJHc3+Oy= zPD4)`$@eW@Pl`S$3D47-6X%_$0H_H3+233GTi9x| zyWin%F0ID8{`9~WEPVO!MRA-qsiYz&C4bVwiy`CM9~Bgglt7);AE%+4th|3@tPhO|@VPUp@y#x=B6!7GO_b>wy4lw3G z>-CV{8>K^XP(C;&$yE|6sDW)HFVr(9GUB&2z}fYM)6>S~s!D~MB9!U6E1(~~8R;zX zrLdn)=@9f_tnTJXH@)4q7ESkl?M^|_1s8eCnVWwn$4vVY9LZK&V9OAI3y60>z(q8B?-sTJ1^~g7Kh*1RYIY{~^KA2-+W_x+ z>Bv+VC3g8CnU>wNQ`zL+M#B&XW+qz?xIBU`#?wS?i{*(cA$GG{TnJ~Ukj{O_;7Z}X z5B|>b4*vaI=&e{;IwywPj92PEp$5u((Z&PbtXi4xBfDPLZZ zjdAad!UqKmcTuXiM?2d#)6*waBmku5Oxr>jp{RHtCNi9AI95E1mnjwFn_FRK!MtHO z;w9XoQEIN=T&Uk2fG=<d6fjMhuzUTT!QV;76eEGkNvww zNrA^7AJd_D_Y=YdFqXq~KqQ4q1JqIc|2a2DLai_-cTVdt$A=l|SQe57gko5bHGtey z0i|1t#$St=5LKTTjWs&sX7A%QB{Ea(rA}N~5-mV)`nG{lm_u?|n<8qOQGR5No#_^b z{Og1TS8|et!Pa*U&b}H-4gm~H@&kQU9*grf*uM%0nP5}D?jMHWBBhyTttv#r9HohU z8+K_Xm9@lfF$$i08Dir1I-A4ASqs2i+@hvE*d3S8PQSNu8V?V1ucm5YaTNTvs7p1F zUoaCs^EE9?ZX&>^0U4qjDSfd~MxIm8#h{ynB#Ra9VzWG}@7k|WF_=#)q~Sm!)hs#1 zNipj_q~VJb_edKriM%<;OO?>(b}O?;B5FigQPZ_5$bHQI>sK|k$JU{|o8#?D*?jSa zg3@$5%r3BZmzQuPCR|aW>qKIxi5kBs6Ky7vox_2+Oy5;O%QqFyVf<)O`Bx2uRf+N= zC1uZrB5rgst0^NZl!LmWNI3dF9?OhS$tHcBd;O z*8;7bM^B*Wk*!x9X>z4?#BN=qzu^VOG)^n>1=&pW##8#^c;K0OD1o9?>G8-)%BT85 zl5Xn7FNI4({y)Mf(wfj zjwAOxy|-7YdBgW>YK(+YdS5GQIlqcTm5!;1X+Di25$)NayV#rCXO@z2ncuv6L8Rj0 zQ#A`5pduh;)@LS`=ZA9cBeo?Z3B_+;BxCNt=gPD#SQ~4 zi<8Ap92=7Mf-j;>N{oT^?3Z$56W5eB9ht8W4X-4WO&L=l(zYHlp0S^}1Qs9x&d`S{ zz2vMoE3WJ_#Vh{UWk40l#2@#uwpZ=;r)=e?E>*`A8mP-$Lq)R(B-Jl)xHgehl!`dp zhQvG26rFIlnkQ_np;EC%kN@X3T@9Fr~BMF$>o=$Ab(S$B8ybPiM%|15A)Id zuRBpkBSfG&M61`x-{tmu24jj-pVnmaZF=SDG-W76engTpTqBXX`@+tfY-4vV(z)-^ z$EQs)F6^_Pkxhqcdep8!&rG>K!Yp+xq@38EA~0z~lPRToiCv6^L0GVKjny|}F~`NZ zwxf{jheD>NEP1MB!Ah{-;CF-+b8-PO7;UWj+Zg9f5CW(zIjUy+zeZE9CoN4)tMIEPdeb>`QvCgWGp6%nQ~h3>i?}$u0}pD~_CImK_rHGWe=^i^ z;};M7Dr3=w_K>~Qr0EU2%orl?IRl1a)}=s09$2=O3w0@7Ja|{6@sre0S5 z%#2&JNWTi&dG@Z(O>-*6ksyT>JQnM7f25P2F?Z-0BSO;h4>(M$lmcdwQ4yrYl-D7F6?Zd z%+08x_-}N*AbC*06>Pa_gC)iXYk@54mseGK3|?}HU)vnkJZ?7mZk);N|@RPyQzWJcs6~9 zYbsvi623BYez4`SKXCh3?j+1VYzpH8e^ul>p7=071)t=kh?6GM<>Xpxhy0+a75`RaAyUNjJ&Amn{mz|hEllxnt%1JxdA)=KB5>ZPCWDD7 zA*tWhR=M7FQi&@hA4~rU7*P3t1sJgP*bJWjJ1{_j2L24XsDy7z+=ahgPhszwBqMc0Tn??hYR&>WqY{Ejf<0q>UZBXt4rbgJe&o4jNjm{lL4IM&1_A_UvzcY=K z1sqIf#X5Cz_{0jINH6UpOs4llQqO69Um^;x&7fV@h!nFEBK}+jK z-I75D6xo_SKPaNM0G;AICE`C&0R6hp^om4uG*9Mk&fWPt^^V{E#&IRL;-wK^+fraf zyXYC^$XB2b7gv~WsU9~YI{J`BbCyjWb0y~VRCvp*Js+i!qdEyA9bj6d@S-R7m3CHfdSku?+AT)u44iihHd` zJSN9+{kGK)mUuP@fU6)=xAW<1h_931%92~rfFALimRop-J6E|mX*}GTlu*|275?%< zv>0WePEklx#6#W_^WsXdT~mI^F*>ZzI5PN@M9F)qVqP1Z&`TQG*vc`fgNF=8Lc&*f z83VL31SA8FErO4rn}S8hMdZas918rjSB-K)P}gEj{aetB>abES=A(M-(x+qBtZape zo~&95H%A&p&#w9j6=I3lPAx=Zn@;7qR$;5<;r^fjk(&E!*|-*b$fHQr3Ep6DE<81d zeUjk5 zUAc`7Du%{f8kgD?W(6&MkWX5Sk+AgpoG%b@SOjx4`z^bS9IpGi52tE4i))2Im3opKI@I0y3hNBgW&XW9j)-GFP!nF;hxG~4WLCd$!d3yW| zEq^|_&-$qC3kp8A zwcgI?QILa5R&vhG$Q;D8A5ZFiT-)wn-c2HZM|&>o8a-jR?v=vHMLg5s!XU=EaNdH= zh4%PxONN4woZN$gvzIgAU*HJf*}EtfA-syNyrOfT979e3%|HJf_3}=NKYbT91SXHz z!!rsKwv7T{n35Q>;ZWHGPLbk2W7WXm$c&`46*o7B^O_m|-+>{VNK+QVz>}Xn4NNKn zxyE`0NHWIV&p@QS9QN2)KGJXXexz@9_O(NLaYh$m{q}yK0nj{lsdAe?q=_7@zgv)T zIJ){tj>2x9ncFVj`m`y7`!JW$#?gu@>TV*KF7JVqxwY{jlTU*XIH(G$=W**%} zMOs&TXBzyeNbgquuJn1emHbmVW3?-{X}(h^D@j_`_NJM?On$) z+Z(_;6F>P+sHy)a-`4=)4;g$GW^+>T-BBIxq1gug&B+5wU`3;P$J9FnJHHjBsMS11 zXZw3Z1Fpt zN5aTfdI(x5>0A&;jYeX{cDm|WNc08jgL@d3s2AG$riF^7{!W|B_lm>7n5>7WF^zCH z`swgzN>g4{J;Vyq#tTS}{u=3UOTJ3&IXCqNl&B^ViaFj$%v6xJd?pYle)|$t%T*KN zE;a5Tdnt9YXuatXTNhdsod7|(G#`D$G+{D%9>Ez!DdI3GMybpm0zgYS!)U3BN0oYn zRF_$yg_ars)F@B<5(+wcNzP_=b+S6)U?FX1M4Zz>s~=Xzr90_7e&p8dep{SRoRd;| z1GXpLr|mXcW{rL!lFjG>)xzYi)~306GgUK`*4CGX@-(LJt!EDc$W)KhKUN)-1OHE| z@cLK`UW)%8Q?z$~c`?|BMK3+V(<=XpE7cP5Z`69;%7JFTa=ZIc*3IG?05G}w9k8Ro zFjORKg{75*I^CS}_QEXt@wu0S0CO4#h8aN5x%!t5dbLnDUlI~NUFKngKxUNXBCu{h zAzUEnPQ-WXgwUv{n4f0s0oQ$o;TNcBUY0_tq#k@O^lcT1gV z^?qzL1g%DBAo+t@&aae@*P*MK+cc9g$MyGHyI^y}Si}sV+Zpx!7OZ^PjKR4V9y>t~ z$<)E(w&TS${xLT@kse2K$XKa&G!9d3-HkZY9tqNqq@Xm0%e{m2sZ}pjr08bhRUGvr zH`ZKCkEU)POUpp9Pj>Xeit5(|;eaH~cdiy+p8;zQR?80`OYu2vRa+EVrFd}*|8$h) z+0Ed)_okp-rNW(*=fXUD@fduk4poCqmiTdQ{6-g}l$igmq(+~$M9~bzHSxeG} z*El3L+AM2${t--?dqYQaMq7D=%}*T}NP-gfJpm*Lxc=Gw$d~no-@9IS@kAUla6!26 zimUh6ZN;jDso<8Y&Xb>rJIv%08t2hw>-~Nx8juhYmymJ2PDWCbk$abr-(7d>8-DVoHJt-i|p2G}Dn1f~6;4qZgD=$if+k z6XE4bI1RSyF~R|=F+$wW&~#(Vk8Jj#KkD=jd*|I^7n?y(gHmVI+xw%(u+(o(jSk7X30O~ujxmz# zc0NHUNHK$#B9Z2+R>+nJLZeZFG~nBaZS4TA)wE(l?Lz`Rbz})!*O-&jfirIW?6Mx( ze;H+CWepi+LCo%V=o3t4u2Z<~;JnD>itz0vZ$_QhZ?Zf>(|=EP2#z;pDk12?iEm6K zPX}g{SA>b_8f?k9LHJMEb@lv&aGoKzi;TbEFx30|WRzcLeEi+S5C8QGmMRN)y=dz7 z@q`9s><;r#!NPE1)<{|xgbSGpn}ifq z&yHST-uHYfCojR z(m~?lqsijP>y$|^V6ob23Wd7?8+bJebHhvPrb|H7IF}f|u7u;<;@{P|;Zxp)er_g?x#loA#{ts@JnpV8wsFbJ8YJe_ z`gtGYcL^R6Zg;`uEx(;Y)pwq$ul7UMB?14`)d`t@N04r9?^}DlW@PwS69-Rlf9z}o zqB`0B9og_0c#R?K+u@0n?sn)C@DsurwKzm+?Xo-nLFvNXx=xF(2s?%CX1uz$+N~(3lBkW>hfwMoQUb& zmhk^#?=7IJT)Va5g`lK_fYKnK#3Cf67a<^^q;x4MAi1Pt0g5OR(n_avNjC=FozmT% zi{*dge$U?Q^PO|v_xxv^e~f>OZ#W#VSWn!~{oFC@#D?-&icJE-*-R0@pd?gksGkTbUmRJYxKQAb-te$LZxWYi@TA3Nc;bh8bB{jg8<+@Y32t#xcSz z8*SNk+ImGq;!x}}rY+({5Oot!>E5CY+1q*?<`gV2QhmJxvz=DYWG->(APMJYhpviL z*i^m2!>?!G1KnR1JSY(8Z@u3err>qrDneTW?Og+;<0qF%$G@%o@J~B63&sIr9UY!1 zWf0fv33`5Ph!O!S^hJ6;L6Je~Q+J zSDQ^b&t<>rw>ZcTZd%$Xl;!7Iq%Y%%vw_TsxDE4>^{d}$J6LsiHYcqf``-HC>>PU; z_*FsxfcUS4ioy2;i2&oYu=)kr+X0(o3P?N#q!GXL0^kcZqa;Dp@ok==Z{qIFXjH8s zAPLAC42#nOZ|Pef$Q53i0}u{~SpHA}X1)UWIxz(4cMFg=48>Lc?@|A^C;mSk6PmE} z)o2R#ZxiEXt;7NX*N=SEzF!d{9;-`A@fJh;yHTAA{L1=2{7Oy~eVuo{+H5q^n*+8H zjS7;7v>mI=#!LOEw>Z;FFppfYK2fo+_VQW{kr=AIQj;@nj;uqB6P)K5HIGAUw zQ_}NvZ_|V*iOytxKEWG;&9p3Z#SL8@36L$iSf$ege^AgO&#n&ZITD_)w;F!E-L_CN zkf06QI_T&v##P2(nMJmH;z{^z{mnw6`!^J$vCHTb%vl@o;HBP1+zoj6at*DsW$YZO zb$)jP7Z}w|1sHC>k@)&`*{a)%-(s7R%%&`a_YtwPXS#mbSzut`{t~sKJ16!^JTB zVLW01b=vhf8#2h#%qCLHf~I!~qPMSvp-&_|#pmW1{CYqRITz;}TN#r3tlMn!Ix>Jy zTsNN(!s9JC*XPZAoFNG)-O);6?TF~bjgzv?reO^rEE-$_2*Jba{0tGt6mj6TZ?2>xB!C5w_&1LzliW~* zG_zaEi8wQcc`EU(7E4BBsWg~AmB8c15+RgWz3xzly&ZU}R+sFysL&p|7dOu=AnY=o zaLvAPO$|Prr>8(^{4D@Ti!e-sa!CQ#U^y_Li9z(jZLjY@dEDSN{5)o zhw(0iJhBX2>korcpH>&T%+KX>yA(YBy>{ajkQlUQXvy))TIEkrp$`twV1eMM?l%;3 z{ebP<8}0iHHZ5bXQn;x6Ff;Qfk6Ly^63g4?#LV~)$hutCP6sJD*AsbNX=FcjiPUpC zOT`%{B&Nd$U&lWzg>r!BgGXwWLfLt@vU2{mMO`L#{rlhl+e}ATQ*j~$!hH2od200HT3k}ev_Uu>~t;`fT>QzP6RAD&ko=Kl$ zt^wVo=k-bFh^ET=;v84%#Bl+etxdKQm$Xs!x=GZl1K|6M{o^ML9DsS)MOl6Jo<^UN zwR}9W$E$u$cH9u~wVFcKS0^RTi{EUbX20q^;l_DZV?3gUxBeW}l{ZiO`=aPHmJ0YB zzyQq$TWB&)NVnMF9iPq)b5sl`>NPY2_f0&FGe_o3^IVpZJ|E=@LpiY5y^nw={NG6> z-R*pWNV`7+sBMil`W7&Y4|VJ25Z?*&A27B z7Zx1+I8PIy#u9yG1&VDgvnZC3#lS!q>69=}Dg{~Xyl>>Sm5Dw)7R62?vpQZ}@+uG{ znq&-TBJKlwsipahk{p>B0AL0JJjm`tVNye{#VYUmd(DdOXBwV0Dr5Nr%GbC~2GeWC zxXMBblu@?8nEQY*Him`o7%s|@8EI0#J<3Fu;W@xN>3>=;QJ-6U_BcF$etJN#h@?Hi zJmlv&fsvw2j_gW;f4cGxs^${(#ek}zWkP5DUj1G6r@aTOw4KMWU4r!&=DS`{61w1i^yh~)WwcJrxEFD=}tut1wWBVf6#At5xrfnSqpv%B#)=$>nigauvRJ}p) zJLJ1q%|N7*$`E@=ULD+eD$eeYY%>e~{cBp~r2)N8P1)XYGvX?V2GCEJ%ReTNN6p-n z7Sw>eT9=X4SrnoDGmd2${hd?NVEH0UwaCxy0kCAc*!t6hSguJ7WPmhhz*@} z5$jo8Tv4G_%ml96KEr0)hTOMt_(Njjzn3d2Q4Pl;S$0hQ-x$o;#3e<@sFxKJggeQ$ zVYmKlCY#;`3i@UJG$w(WMrd$9j5#KZX}7p5UfwYa7gpSJ+TTW8Ixym(_rIYS!O}RT zYxQxC9#_FAYY}6DMnkWhobF4O0X7-6%)0V50>Av!nw|Le391TDL=6IbbRN*z6-9wz|ZI|%GL|WHi*RM81v~P9TqI{Pp=zY;x zn~z>Gk#PlVf+#_jJ%4=%SkxH2gm zDBr;Llzs`Fa+;CDrIllede=@Er+@pJ-<|XGrfQ+WTX;=q9JsHI>_!Be+x_%ts}pDF z;t=mC@J>zV-8u_JNG2O>#&$xBg5{>Sg{<2GcL&hcNU!Im@pHXRk$#{zqavBv+S))y z{n&Lgap%jAhWh7kR5-+J*;pQ(26f>R=+VS;yT#Y?_p8?RZ*ek8kq*vVFC;ll1Qdnd zK>oP;wDIE{Rhc3Ij$Sg#cUR=l*kvkbUiG2$ z6qWdj&WE(f{Ujo7?ku8YOM&ZjzKFYiBZ^HO*IHC&Qa4zf34%UQ^S6=YP=6IRPt1Al z2}8XPE&)T2>NV5F69^9sfb3U6iGS2_e%a25JPiBZWpr)ur#Dw+NMTDCx9c>G6)3OH zoNxGJ5?WClr==xnR{g9RKi&8VRTP`NFM-6HrcLY?9C1N*%Rvv**?k|$kL!aX-yMATg3YI7`+n>D0xpR!Ij@&^3)$dv1K3|ty z72^^{$&>ICzg78l1EjXmjkY1 z1zp+^Kn3mwo>`qv$hsh63G7Kz>?#M?2wpD`>$;XWw*LzfhHyp_(|m{-MSYJ(xzwNU z4j`Vj1Ja!#>cmnknR78@3RLrjm9G4PFrv1O@3NDYp8InmcR(vLQQ!V?hNq-qCv80n zg0SjG$(>JaKT>9CFez9T{dm?f?m*%!tM&_G}NGgE&x=Q6VM zhUb|3#LGMt+_kR%1ofzNX6txY>G&4vwbcVnO7mIrbt58X)mx$x-HeGUYifvZn(*@h zBv`NmGGyN&a``=DTI?kU5 zt@V-xC+DF0QkRZE82IVz8E4JV4(sNFIRykD+edFLdd1UY@XGAy+E!x z#-pv{!wr?jOgW-%J-4M{(YKkdes@o!&p~QP`xftPA~#z2(J5Nui6!tc60PaNp8Lbr zh{sVniH3P5?@!RL&Ap0qI%}-|l+l>git7(CeJCo5@9CN4=@tDE@ocu5v!>KK>pZea zfjXlQy^`T%Z>^L2aJ>X;{Nut~CbBM`wF8u6!fx>auSg!_GXiPGvpu=F<&%&z0@6{> zg&@YL>bD7SR>ZPoEo;rN9ALUuAl9;fe@w*$HA$&Nc8*$1F0q}r8 zz6Znm762ZkA!*L%8KwT?+VnORPo;o|(N7$LI}v^a6tBFA5#t*wpA^ ziY$IR_;D_u#H^5A#MRW5^+DEM6H}EB741|Aw@Q1$43@@nGyB=B{)pKHsF=STc)Y*# zWgM!8eE?I8BVn#mj-ZxH%RP;AaaD6POUXU0|KtGTBH@J> zXyrfqzoYbbh{k|rTMZ@I2SW8ZU@vc38(W5?{PV((|I#sk^OINCB94KzL5CZ1X8H?~ zs_HFh=##j6@(c0;c9`du=c?JzeF9!m8SR-yE&pL79|qP!1179GbXK%kYgAEiB~BUh zlM6aZrc(+viP%rJ8nlW4Zn|y~qPrd?9sLXPJ{3i313DRCf+sf(Q5@d9>_`Qg)T~m! z`p$V4i$sh}!LcG=EXpr(qIM+t*sV&iV@)0xE6vKj{A-Ni5ZYLb?-koP!Q@(nOjKyA z(B7AvxE3gisl$iJ`ujFppT>pZAvMhd``I*ij@>`Iq$mlg1DZ?C11KRHgiwm*bWQ{< z4iA_~O|79XTa-g9^Ci$lK4ebEx#0z#%jP-O5d+KiamKmZcTik7y6+WA|pc}B~DRRZZ{=! z=iOj$@f#Vu3`|T66Gu`$lVhvz8k3}DXMAF;kWu>se-0X+%bTgYub-V!oh#|yI5 zv7X$xv4Bg+<5Se6l+=p8DxTjiSBDxXd9!3QCB*1qE-(plJn`nlB5&5G%Fg1lSkwBj z&WUWxvW{Swb^gH=GjZ~DHJ?Yx|gvoHVQh>_GXV;pJ5Fa9ZT~p0p zrYvk#?e#kanok1D*B+8H0ZlqO&BS9>)OXEVp^bwMm9|l>WFEAI%r*zIS7AI&S9W#D zpOyFUJ?5CwDYvrno1&a!xZ|~gC#XuGEIbUmn6u)4%vrE=-4Z7lsJm@(SWc)bz9JId zNkK$8&)OotIf2eXH@DCtvXt&f+Nt9q(932fm->o;63Vi7M}XAt8gWGTus74XlFH7$UqEE|^KgQT8m%?R6^v+t^6^vjfFu1;zdG&LG+x$W1Si6UK6~Nrs*U*?Dgip zN-N3OoSQ!rVm!Kp_E*|!Ocu=YO!w{sh;~)@x5xL-VA{(ybFKbq1>GE>gR>Q<5Fv=R z)0~riJK9KyPTmZ`$+gDc8Z|v*Az-0w;Wq><&^M*knpt~1XRzA|H;1S6WcMI=uRVLj zzvByq!9O76$Mc0}mXPIO{vysqop`g;()zH3e}J{f_icJ<06bdScoPlvvU`0Eak7~B zTe7oAS~V_ORbSw~Q>fvY&=krw<9rwPE#z-UQ0;Cuu3H=$+W%Kad>`pwubZtu1hn=& zr3(d8+R-5ZQFp?&LQ$kf0fGxZ{fjb2JsT01E)6Tr1CVqK0!Wv#Hj=>Gs|k#izMKIw z@UK%za*VGKKCr~vS3!}AfevxgK22htt_FUBOlsS@b;xR$lvYvrmFSb0Mge(VjS=zCw% z%O`g8#WFG5;k_?$J`^F1GX19kKQ8&gIbzqU_ttBQ1Aojvb^Lnb1A7OH)VTEh=fQ}c z@OqXh(8Db@|Ar%xej<3o+wP1XRhYwqLHZ62!vX8I3w_D*A8^;1A%VpZH?~pvJ8Jlw zhoM>~Pp7>Jq;$aRAu;V)H%r{Oj`5h8Zi_3+BmbT=QCuJ%yix2-u4It^JcVpwJkIrP zLExOg1U`?C#vd2NvSGhnJ-$d^*MwIc!B6gXuUk@>rj+kec~}G2+3nMUfR}6BlDSe{ znB;-|IC9RKKB2? zv%>SXhvh2Ti4RkltdS_xrU?$e^3bh{MPQ}nT4f^DgXCEc2PyP@GKQ0w<&wnX{r9L! zCX_ELJxGtt$YBSaZTesGdj5^`BQ2VCUu%y@m-;@ls8=aF3Gc@_rdy7wtYAcpA-aL} z`g?f7zF5PS0ujH5txiftm}sciWWgS+HWG2^3))fs#JwiiA2UOi(Vgz_0P*R3J?A{! z0?Y*fgVo(aGNW1nESmsAlT)}TQp*5AhQDS;EJ&fE{bPzf&Oq*j{A2R8(zJ-y8ZIK- z|I0Q7KRlG!SOwAPvr%o{cCD6YH?H7IU?uGwtH&^B%dx+USYZEo5ef@`oq%qeYc0A- zvV-csMw^_!S%J7U_R$viFjQit8Qp?>tB1C!T38M>ctbAGR{e$JDvx%5Zz~b?7?%f_ z(*Lbem#iyD+n!A1qiUd#x9B7N+fw~^;SlAk*7BnT>}l`pKQTCxuSBUr-}if9x|_qP za(8A0iGp#>aToT?woFXV2dMcRLyb0S}fRJ8&}xw^Z;sUC&S$`L7|o8vWH*s3(` z2_nPwQX!=4u5;b)`@&-&Eg-nzA66ea>(Logrx+r1(LRx<{b!G{;8Mnl&OKKzofDVQ z66^6mV=C~E8roH(pRPZ#s#+z@z%uHa`NJ+i$Ol-el+va+g>-L?F^GJzR2t!ZnG1qY zou7(2q2TeV2@{N+-AqzaR!6;F!u# zVD(SqmH*O)d9k5ZDg_wsLI9Vu%x_hElw5mz19|vfNzs49KbMS`?Py zR&sDp)QJIRdM>s5*P(1 z1W0nCdqb!p8UJ9Hm>v5ohHHFre@Vx)(X{UK9UW*QEP04dSJ&P>;Gj-rjV&%I63L<- za09z(0H97qx+UhC+22sy2`wW7_aP|HU&`t?0H?fxJB@32(HK2LNTwz@1 zs4nWPi%KeWW2PgKKWd}$VpSh<{+CvoOXR_!f663O+IhrjpV0!0uMJ>)ovpgzm|xOG z7|Bc9;77K|?+7B$SITcfq4Iv{>rI;k?bRZV)b7K8y_&&cZWr(cs0ETT{ASu+F8u!A z{oSM|{ab`1UPk6qtb=8XedR0kY?h?r5Il$g8Ffz7rJXKM7(qNfF-a+2P{YIVkka8( z_l|jax|s_MB^7NbhuF6qddEj@Sakn!>ILyE;?JC;f9Bc!{ysCKDqz_5V_WqI>n5FP zh&1`C(zBuT4D*Vs_7qIWHymDHfS31GzxMJcEsaUBG^J$oMmPPY^o0|JY8&J(rhdh$ zY`WFT4)4vKR6NokiBoU191>L(xiYAg9k2=l|E1Ra6Mxj|LGahN82NS&=gsp8Ui4?z zp08T;lyqU^2FJV7IRk}>6^bz{yfYH-SXZX-YEgg1gwK(K>(G?w|93^LNKp5eiH<)x z1u*Az5=2_9W?%5RldzV2IEC^Q4^KTrf@RM%9dni^s z$n!d9**T!ClK$adl2>jmIH?4Nxe@3<#{XE+TX!CH+h(*ZH)>gbRh2kf6$(&wdi<`M zK__6Ejm=SChV&B$*|lzMlWvUQhEw&B^~c62FQxPIa3u7OcHQf}6Y5&h9|$8W+Pz)G z)_NkaqVE=gz&jHP)rulFwY^u6?t$)nl;3xW+D@};BJ?%(LGZLA2cE$uu_jkFV&Ie!xeL?uqj2=Gg zp0`H;u$`Ur+^rrJ7+jlQv)TUjmX2|>PioNHuUt`5!T8)H_6=FK>yG+DH$QEX`8CU# zbTa}GsGc>x9zkR%&c6QX*%Nayt%(W0hK2)%v1G*C;WFp#QGG4myRIte0XpMW93po4O4?Hay*qqrQM)ka3 z&_AGUBu{7;$T=z+n70kXu%%@%B3D2cgPUWPc!o&+ z&FomPtuM{x2pi1TD~Yvx@Wpu1C{^(5swnHmu%Dy*a);^!rc9Iid}ze>N0t>$mo9pUIBw zvqHN#N`h?q?ndSZ6Dd$V=CDf@i|p|btByHr9n&x`3WN}tShGro(cRPXBh&mX?@5cechtoQI= z>Y$XG9BtH4&!KBH2dKqi`r&-SZEmPmBPE^VLHw5eH;-YkhZnP5Y)rm~ZR|1(c&QHj z+}yH~So4H#0jZ`l?$?D>cvUtWyYQDpB`|$|yyOGb5$A8+N1_9^8oNCW4#4QrF62&b z4z}y$8f$RbKd!dg6IdI54Q=9JDYtgFT6T;7Un-N8nyJT^UK5tj9a@aH-{xy2{Ec?^ z`uhu2+T>Es_xh>*rgWj-f(53(_`-;E`P`!Rcoch22wgM{WA*k#8dA+BtT8+{Ceuze z4(z80U<>u<#Q@A$D@w_hx2AhlBN@P)$=o`Kd~BQ2MjxsdmRzTd^_9{2*~$~djOZ8C z{O`gY>LGJe$LC6?v#|ZUnY!ZM&bA#0zn2bufkq3SihfwkpHE|_wi8AX4!?_;z%HHG zeI1ARXFV5+BvrzH5d0(+IY!)DNMC$O`yU_gD+0sbocai69;Nu}-?yRq;{;p8H+R$X z)<3JA9JZf@{jPSRDKthURqbVrWY)jI@cUNq8RYk<=uf@>*%=y`hj?OnCn4KuLU@0i zx%B4b5I5c6&Y`t(1ou%qUxl{zno8Nm&TnP zN3p2YS`S$M1)(Cl>(0EI`1msJXpinQ3!~m74lN-=PERmJs;PgHevd*gup^M_*-y1g zU4x@!&plox=z|Q9nIoTvCRal8x=&~icJW=EgkzxolUUvR$y)d2#mZsxh~XB&g^^9> ze92b%2|5vvwc%B0L0+cjVb+QjK6eE$Yl|d6Gcm8G0T|D-eywDxbxjFyEOx1?PJttp zs!>6Ev#Ql;J&n-xgRdYNP?LZz+l?{}WoE}6R+Z*?CT96YCU%?}1e~Mj7BH1@r}}|)dHV7#k!8y z9!KwQ97vOA?y=dD+-^n{g1V+d}o`lEnC&)hpe z z3KpFP{({875J)t4%%3#TAUcb@Q&oW6haG)GT-03C%?6=g06KawT@-U0Fh};Dfjn8v zLeJinmztm;-rv8!e6FVq;&)j2|HXAbC6ZYh=hh#Cij#aRk&!P%vUCoIi+DcISb<$m z3#6)ptc+H(hu5_HXyyr)lyVb^5zSQqu7ZVT#Z?-Oq^;*0qIv)Xo&fsBlVB(HOmYh4 z#&o_9YpOY2QxE8ge5#1GENZ{C$x1qG1ozHYv~R#X9Woe7FKCmDn34j$SppK-mQoN? zQpo3*uT;QqOZY9X*lNCztf5OuasR||Y{!;y@)e>Rqmry_K%WH!WF z?L_vhtNAnHxh{CQ4?xHH9}gTXMQOgvd~oN{#)~JNaSy-y5GHlO;1@{8@-EDu2}sDOcA076!`|F(8hSJw{h(x3pI&hx z{DzbwF7)fl#axbsZ)3FDSj!&4=he78YM898Zv zs)~TYX9Afy8Fhg`K?R^Stb{oQylSSXKMY!msCU521d@iwA>wX<&QvCj`EG&~ck$AEvFKj;Vr?0i zdMF&W=0)#!(-OZ&Xg#*3T}c?1Hec4sUgOUaXnOb^ZPZ_7U~a%Xj!1`eMMhqZ*KH>( z?D2~(%tjjK`u>9XH!S=q8x3Rr*(@TFOj-He^6{J2^r2navyw`R^1g2Bx>L9#mfWAx zL*QlH&H~F2s%z*;d1Q17&xd%)o+v}s0IkZ3;>oIvgY{3*U^m|ErnGdXmxr3BqQv6G z+v|5JpNs@HzMbf7g_yI+Enl@iwMF_m)}Nk&)wkCOP^f=F9s|2f5lk#Y&0mmHHjO2@ zK^G;I1b^be8UgCPPI>x@(^OgD5~VdAUlg%7inaG6C_B_6W){g{>tY=%}~?( zg4(l_TZFYW(>x@$@$&gopG?q=3F6_eJ~&{3j5njA8o(={8%0dho>B~T8oSh^Bmm6P z0Ndxw%G!oWD43Z&-z}jKI}m_UrFlviL0Q{=L9_>yYaUmi2Zsi`h&CLg74P8PJhd;q zT~1>E;hLEB$W{U>()l}atTn#{<`Z)UQ2}Ybw)pjeo~ENBTBH(~K#{V4{80Hi>nT5P z^H2(Jjx|_zd64N41!7wlQE+^vos_le>mqGOWfWa0K6msRs15?*E$gy*H`;So>`a3P z0?`03tqT+47E^RypK}pOmkLK()}qq0R_~Q?qM;S_-S5VrnLkx^+T5gY4JoY}+A_W& zZe6@y%7>K{rRRP~1Ta^<%B~KVsrm$#3Hk*i(j{ zyppu4#x3BQ+5r(=F?DrR`yi_KFuuO#pu;QH&74{^d-!s*Q_m0@z&%4C{h5w?H|uhF z_gH^HEG;jHA^cmDZbq=jKkb7P4y;gMdsu4Arm;yr>@GdIUfvxf)6awW1{9XM!XPx67d$&E$pIrMu zNC_VlRv!uupLp=NC1^KLB&Ea8_f_>~RKpV45*=Ej>h{fo+|`}7vDac6LzLV>lz>uq zuUSC(33{-5lpvqE(hhI2MflXi>_=CmN*$73i;uMa3_L|eBwXE5JFe^8)Zrr6j=zWR z0=c(}De#mai+oOiS|u#3GTT|&5PvPEcC!F(4ae4f$xSx#V=X>EGl^^3m-}Zc44PjaV}Ff)3-{WQl07WPJe{#! zW+o5Ymo4MuEoRN?v^twsotvKrY&KOFlB|0-8i?w|G$wea3Tmx%U8>IWjo)YuiZrXV zRmJfbGqCHZW=C#0qzY3<0(bFl@^4pxMEM#Fh_2brjysme$E%y*VF!s5C31A~hNXb!om)u% z{em+PLF$#Ps`^a@N-hB0ZolSpI{*WT^R>HRqC^r=)%Q`pr2}gha;f$}bczwA&|P<} z^O-GJ7kFe~fUJD6IX-|EQsoK#9H^Jnw`r=9&iFXPiefUSy48A|D}kOG4yc;y-N#w5 z?r_=f-6yud)CYcn+ncE-q4-6Q3iX{~OJQS}OkVm9Z&**~i`W;jC|0Gt`To29KQys= z3)>3mLMHYZFwtaxZ&K=uCS8ANh%_nPz6`#2r-Z!(ZL ztxFh9@v0JsLDpr{HS+Y+?H$*Qj*w3hms(SQw`YJK#Wk?Sv+%7TxLReL{FD#uG}w1b z!4frff}^yPRZ{)HS$|Y`)ZGqE1!3A^pbtf;$(B;Ybwg>&m147f6ZJCSz}F)$z9-vSE;|METhN zq$fk%hDxLq*GIQ$DxLFOC*8M37v>h!&&G?7#P>-OuDmx0oS@c(aRq*UWL|XLhkV+u z#*QaUkTJnnqI%Fnw7#v}c+tA+I1T13uo9xPhNPU;4IwGCNsTC_|fTO8#;0G0AYj8>a`yC4_bSHkalXgUd8a_ z=*FQAhsQolmcjX!Q{6_9Lm?uUWaQI4O!=H%y3%M zNieu7HrR&aIpd?LlT%kRzG`AdD*u8+BhG+{_(fv`F-y;i#H*a$koV*mH1R7d`hYi0 z!(X&XkZvq>|7|{ziCW}SELi0-An+#_9Os-l5Z#I{#-|i6e~Dqd8KSey>FD}WpLJ5} zTbRb36CZ|a-0nmx`ZydcQ0OOp{(~NXF_JQ!>^=uUv~c7J@4Hz~BWb)cu)<;mNZxZl zUp2fuZf-_D4_K>aSrrm1&)(RJcIue>=&xs^cQh#aEgCO85wh z*$msojzs}0CG}=`&gD<}(ap4~=G$LFE`|GIe>gczjkJ;&!}%xhe0qJhe0rW-5~=$u zdkE->r7EN~-j0;7E}WwaWS-thm_N$*4PZ58VzN&+eG1Taw6DiUD3}_mBIF!9dpu5E zi3TjAtB7GDfXDG!ygeKGm^jma5`W{EUh_)A&Ck2bqk5lBo>@>^n##X;9{|a1kCE1U zlBOn}bQL#7zhr@=A_?mgl;?Y#6)p;y2xL4eby~NNkZ)wqc0o9fU0*;yW7$HK1Vm*OgL#QOx zZ5c!}dM-O}*~V-+TqplcTk!!i!Hib4*zNDnWFQCRDD{Hb@ieVa!X*(e^q2)}_KrPP zw{l*zdaLa7ED2WlIyRO1@?6+bq)hYK=NyFXgf6nk8?xdfFQVyjjptMuQ!3*p7JJP* zdM2iP!TF{AJl$Gu@>?$-qFZllS_RN>)S`#SEPLF}Z_kRBBcL~x-d zf4yt!Q#55!VwG%CKJ2>}Nq@nC_QrgOgAC)?{feeh2-O?RXxAXR4zdcG|i0(M{tH>U{fr9uKfDaLiRH5r| z*xwF#iTG9y{OP5sUl4j=X*Ui(mZtsd`X3!+i#hAL)aN5XJU6Pp2!3rd zp`NwUH1qtsBmVs(sGKJUfCmKDLPO$-|Nni2o&S}$5RH`Gj*3qjVI1mxVHW=+esh8Z z9>_y9fHVDagL_URLh1@{pgVK79k-G=w~Sl^b*KAe{mdHT#)A*t)s_zn`~ym37zqP31Si#J~5ovjB{I)v0hlb1i<)W_Ri$=SakZ`fx04+Ll4NoHe1(0hb@TIaWg60BY0r6+NCYfseyqSjW_xUrOoc5^hK2Gbk(gTc)*t_Bs2U3>lQ-)d2)u zgE^m%B-U8I3oK`9WSZnVw@_M$ai#OHYN0tX`d^TCf-?)S0c$Ga9`~N0WFo|P9eVIG zvxL;F)gO{itG&AxX*;Vh7>ppk5fv7d^dUn{nw{prb)gW`0w{fijy3oAov2A#!YMg2XHD}L=R-89)CA%P*75S~ z&xsI9sq$nq%)}&>V-18q+e~@ohqTy4BX6vEg4uP!F)Ui0;lT`dU32L7dt>z)(_Y%$ zGwSYz3tt3k53Zqu+FW5 zb`)dDKXg+)Q#N+$^%hV`5LmjFC5j%kKD>Q=rIUZZ-B{FXr{GZmZ%-3D@$X`Z6#G>R zsXN`n$Sy693uB{<7Y7Q{8AKG#-mv}|g&R1R$<%YIwjU%>`cPyx7LAFPf8D3a4gUn0 z1&5Ak4mH{S&)FVV0#0&(j)yMFYw04gi`Y@$&x2?T1ActmazK`4R24bZ?NXl8*LF7`K(FDEoz`0?^~3(+mR-4qvY%Z1qQd!2oKT$IRD zkc`X)EED>qT2LM@p5=`;f7Kj=RYDR`i>eSsukTAhwUCN*#Gr{I3LT21PzjRbgHB~U zd$!y}YJP`td}UmvToYxd}LMp57iUh_81IkY2>&+5H9wamoGq)lziLeG1+ z_gy`XO&5kbw8Kufc+x({IDqjM7|cC-!$Kd%Lidg+>Ve}~QYPJ(OhMcPp1|qB0uqaU zOQ##nxGiCR?BoGbls1qJatu7;ub9C|LM*_Q6DaV%Rb)wVJHaB&?0mLk_c3RCNzu%w z!jCeT4s#Rpc3b{KK?veiCEl(toh*Kg2{O23X398Gbme;U<) zZfTB=H%+EA%c#RDkDlshQ(o7wS3y1aW}SWP zWZl=#M@+Q{WsAr}q3CMcWr?;{Z0~8vjUDipR!Kq!b0|uFZstBnIg&xVUb;DSEqSnY zK>1zxP5FHc2a1Pq9w>@fS822LcMwCXDxs8gV_y@8jLn@Ji~K|@eByN2@DDOE@V)*b z6NB3TGBJrj>%p~H?)tyF?g`?r-pGOJmISaXVmwjKTbREJp{m3TSozIej`)MrjNMtD z8O?L~Z1t;0@AGV)x_qnCC&Tk=rVH89So*66E_b3@$ysopU^kvi!jyFR^E6Gy7TpC% zAYp1Xf&5C+&YhY=)k#0=56~!#>r?cb0!!3F4y*|opLLq8pjaP>C_hLti?AGKdJy2~ zS?n@+xAolvY8zKcF4QH2Y*fNRBpOTB^|zw~p6xgm>pckcF*W&aEb{bKb9P~Oa?3nk zi~;%aEA!ddR_mvl=Ob*n;y7*TnxYQa>|j7pQmdIlX0WXAx{g5ATu?m1t=Z6utIF1a-H6^Wb}0`?VFxKw;K< zXKy40=OGo%dSNaQlfIq72!1c6sDMvX@!7Wo#+RWqAp^byn%62#Gt%lr!RN z8ZcNyF@K8qvQ)iV(WxCN{?z+9(B$rJ-d^B7hRKYBEF&JVw+uWB|9MIIU6m?9TT)QT zD>{0Cf1`Tu>mS9<`Yfvkbi}6aaZSZpL-w1qXi9zN*lEV)cWWX+Ak)%}X;teZo$v~i_mW$SXlw}h(xW?xklyr;-3tqWll_1lH|bcfIE_W_@t%C$(94zCXtxLbIq|tZ9t%%Y75vCB!`vZO zeYZbx5DF2tH4r|TCv?Ri8ldnbLF;tMW;avfRuHU-cync%O#%Aw7etU{?}p5k{ChP~ zqV_)F$u5b?J3(>x#;O{b-}3GehFRbKM1plpnZ~T;3sDy_iOjQBXe^qv{n9_)QN_;a z0`VbNq>yDxyDBLoAl%(nxkR<(&6fxJeq~R|73ZU^7M<;<2`-{cb!F{XmabcYul6CE z`)k@^z4TSj*!;L|-?o#Zz1N^l>Dn-tVE})OY5*LHM3?%lF@&FCN{sc@&;=UO;h7#} zH+bdcl;SXFXYkI`xA#yWvN#SqPpGzC$0ay9dXGBKIMz%!zOPl*S2HD8wlx||w*M1= zb;Cw6S>)PQ$=z+!gPn=TO1brwOy|4cMbd63Y6E`D*#hD?yJP!K#Qg;4j{(P{L2Z=e zKUbkSsKzh<$VvbhAz`%?c~ck_+Gdg^(>TG@&Op=}U}dwmLMD?phZ_-q=@hwRZ8 zNebr$fHL^r^p6aOdMgy^{fyDzzD@i8;6duadUz0h1W?&J5bz4f3t89YyhwRCGu+Ns zYlu>wbe%MU&Y9h3eqrvMnPWp-lPj+p6bj&=%V(wjZG3^O?lQxRq>ICg%%neC^u!?1 zOg(*KRz$tvd%mle*0W+KXNcJ0l%yvV)ZV=`J};lIml3?4T)gX1@}vv7p zdyM~1mdtwp-=6(qUH?y>BlYZNJIbZ`7sP8A$;zoz42sPEy%%#Rzt-I;-#&_6%t_=i zbe6vJNxL5SPA0~UJhc84T4Yf$7>&ocxP~V$nHEcAm^Vn{i27B_i2x-LAeGm=V@5O zQjam!yv@oaTj8K>dXZnd0_0Ae2Ks_^Ah-B}BG1VD(*(QrEwYs{$5V@4xpyWFkSh&0 z=~%;fd`DZlZ4|kAVw{+N9KZUkGpfxXed9I{a~pkD)9@UfrfAOE_SDMuSmx8-^;SCL z1<4FD#sjg)iQ|kP6b5hU6_BKnu{O&c-Y=+%$i$NDc%oJULqm2jAC-RjA``$UX{Hh( ztrk?!B%Ys5s#aof#Fd4a!!bVfIXb;j*jN48dtv(o$>4dnL4r%O!F-kCAnjDVOtZsR z;0=GdIyAvPc(OEO#+rL*PAO=T(M%+r7O7{u zH(D+1P3vx(EEYKkW>#AvYQFxc4TRv~mj(^cdjMvV#q&I>=datYsY}LL+X#>v%x_TQ zP6mzs)X~dZXW^d5Cekv`Co_36CE|k3!_QoUgY#z}sJ>DEzbBO>HJj&UP0+sk zE&i;yg80bklxR)OMG7TRbWz-HpFbZld-Xft4|{vR!16k+4~TsteXamU!;aQ!@zs5W zR!7_iG0LQI#NJqBtzVZ!Wv=UIyKy320l1|HW@3-DUT%6d`wQSk(+R*XW1rUR4Y_{< zLto-SA1Q6GAiLgq5Yde*6!YEIvC~=LAPejE(NOu6Rc!EW*D7{_DpkS+ z;2{2R9ioi5?wMqxTES41<96SFzCWGA?%L#_L;H6PC%inGnUZDXRuQ-=uzpO&Q^Nu!8ePd_$0uY%5Zj38Cea|~`WdPrMk-%S-sNAP2Cta%54 zd@OG7J>DCmLHF)17 zJwoaTWO-`3u4zPfH>AT&acSfC~0fZAAmcSgW}ZliIIlS+8!Sn9{oS;y?0bp zTedY`NGL!A11Km-KuIE!w2)9Bk|jybNDu+ZITk@gkQ^l=IU_kIB}&dn&N)k_D1RHf z@4MLD_jdPt-}}b+exv_jR2`~Lol~dw*?X?F=9;s4ha*Rs7o_n6tO3qLP|K)J>2^)) z-(m0m_+^83*bUjlaGNO~-gpSaY_ldv#SjE@3=jBo44kg)3I!g@SaykY>gXKFz1<=4 z+N5gTlPyANo;oDR`Q)W@zef*+y(Ya`No1)#G1JTG)Id<>5aVmX(#P#?-yy$Ep0SwO zcxB|oL!YFMWO18^Co^V<0AC18$h?Wyz3`ifDfB}Ih_tQnJ@yh?ZoSU6_7hFp2^YF2 z0%6FYiHp$*QOO@!nY-FFbs4@m{7AMLA=a*{nq`lXnVp+P>B-rZC!jDXKcJaaI|Ht4 zgIp)uvT3ArjPXl}bTva22dGWNONZ(*U;HmIgL*Z29IbkRTdST^_@ZRZvkLb&_9TRu z8H8+~@Kc+TI$xH*qGuL);bANBNg9&QJ%YK~=!l53am~L{NXY-20N$0#ypyKtyHz`l zVxJRyNv(LQv$|Kq1DfHQ-(l0+B4E&rbuVz6Gh%qN)!&oq*CJRj5(iz=|K$VPwH=mR zs-YO}I}CylDfODL{KSS&-x>(D>m1n4$G$oda!GGP+#VM(X6sZf~;k#YJWXqYtqExZyCqpUmOA5fApb7 z^V`A3Et@0zPP-<~P6tH}2T6UZg4hcs5CVKQUR^q)0zQaZ`QyQJ%?r61h>odhdDuwJ(De)eqSQ2~Yf z!SjR}8zPzC6f5^EIs!0cx@P!oDP zkfqk3adkjTI^%IV*_&XrBTN9@KQn`FBCu|QZlY~=;Ygb7dUYoMV4npb)pi3kF9Dyc zZ@?cMetyF?;w4v2!(*_BdglD}Kk(%M;?%Sr9Q0~jVkr^CcgBMJg}Dq(&!1i@cWX69 zZUxyLqK>~gCgS`Va44RAcGDpETyjvV8lnY5Nm{Jf94T54oR@=fK*Tnj(vZDBnPUYU^+(4L2H`Najj56NVHr?KX=_d=OX zi##otFuFf*i40EjPxC6I9-Wk{$FCr3qAmL-^XAnt3DF|6moBeHbASHw z(!9v+Ab*7*#e9OH`*t^AOy>i}^gFn%X##oiQkdIJ&IM!3wwC}2PMbo%SwZ( zF{aARJ^9m%AKg|+G^vyPPsd6FFXwOS>y=T{Xh-gwlD?d^@d>(~o3asfAH~TfMS%y&>WV=;NGZHJQ4FDo2DIyk z_fgR<`)qyw)vzop(2Q_aHHL3_7{I@S$ciH5EOuO>&w#Bp^y_w}eXBN+hc(hlT@HBVzH zwz7UemS=&8A!6Se$Z(m~hRA-8ymJtrK|Ui5Xgh=TwFd;rwWv7w&JA!-xfBV( zTw|oSp>di{TH+s{zzWb7n{?Co0ntgz<(i>Dg3!Mt{^SlII(TaSfJjjsT?9A=X#F5g zxe;^BsLsr*X(Zr$?}{0#fR-f7^QQ<(cfX%ZKWg)+_Chu4Pv6vYQoDdepIoB=2brHm zeM7A6jSrsLuL4`q$;gu~w)RmZmwBn}iSS@)7DMmz=vSEr>B+jwsOx>ZmF3JY0$vxI zOpL}XI%1hzyv!dW{FJZH6#Fk_*{Xraqa=rX)TRS?zHC<1SLix8oZZ8}{}`&D zll3l&t^#FG!3=fkKl?eB&n_F};Dn6F(gl!813*^d=Xz7j_WMup%V09Mn(13WV)Ty` z`xpKab}4=;KDkt7w#ci(ut?n5jb+}IqLgdERbp7ef>h@YA9w zp7I;Hk%E?wDUe?9_2k;ta{(3rQ2%0p<@{DOHQLNz-adoqzk;gT(4O5K1iol=+4mJh zP-Ygnk(6-J1-%fs)#9;ys4@07^Q_3=j6nRnLMoL4KCJPgNrV2NE7cX*H?Ry0re&G0 zKG=9-{`Q2Bw|ITzleR=u&DY0{Vq6F@y`f(lKZ(+g5~6*5y|d}M8;W32m#`QuokU-y zLupq;wc%8qKGUf@1NaqRap^K0nvzb#UiYn!7n16eDdCs#7TvMca=3n<_IghQWeHRe z(|aF+h*$ZJSXVaAO^w5%U%Ww9SUd{b^ymv}Su?8W=7?W!c~<%G#n)M`3W#O5!w_@o ztJXZjhzFHK^M*&9$_BX@gOWpJ_U!aQq_8L!D8m)a{~+bYJXj@P}qAGnqX0y zxnRd$g^B#>iyhQV0jwyN$HoBE+=15=fb8o-6}L zt0n)Ng+4~6vbn_EPZOgY;B4Aj6KmUFvreNAwX+EPVO^O4k4|00HZ0@O? z{(xNm&nNunI{amL{I6XH={vjIoI$!;9Wj9o*Fr)S6 zj{=1M0<*%51mlFrKB|+%Si!#%S8y*8zAdD2=qr>UT;Zcj^i1eRmv|UsP=u!PGj-5> z7eu!H73T3@QkGFdVKu}61*!@CgmLGy1}X!;rxS>3uz;DGS;QCC_%BDe9vs@cs9h9A zi@c>uFGzE}_xkKj9hyca{4w2H?#A^3?C+JPOh89fm4ekr53bg{_DY}bq!$~<^)N_vAdeGuV;g$a>1^J$s^P6+nNo6M z#k2o?Z}S+KY+j=9cd1=?LE)qE5j306dlJ3Ru?smNm!XKLWVpqZ-ygK(%&5?dD1I8O zb8A4A%)z(!xx}!d5cB_>FhT$acylNj^x4n@ZL4=c`F7p`(Dl0J2gP=0ZZ`JBE<3KJT@Yv^$z!I)03}9 zC=KS4CF31-_9mD?z-m&G!xY|0<8AqFKu$lGP`hfgaU^HFB!z*d zfWhwr?i!RXErDZVsh7P(y6~##TUJp{nlREK4wCx*V6iR`+Gj}0zYH!^tvQYW2;yI} zkGb%Regu$R`oFXdY!Q^j(}zXz1T0I=h{64FqUZ@{wg@*sciU`~(thGb>zG&O15zx| z-S$PL{|6+0U1Qc{J=(7G@h37fXCnf^?dJW)UezNm+l@sR2YQ}|0b=txHzl)GO{2w~ zQC?(rJ=QaGBjMqiaDhyKGt<}pH*jYBjM0f7Yj+zq3C0B}7?heiuDm&~H}X6bJulZ- zi>x||JPimU@YwUVR?_S+?hIIhV&GXptg%Y70uLP$?ln_qns?fbcRjP*Ti@-{6w4HQ zO}R-3B2a!|={R8sEaQ%{jH;N1%*~9Ek<7gmnP$vrXAX67I@iypXK&yo^0T8soiMlQ)PPyG{%q?KrGlFL%C%5{TdL zHJ#h*^Bi$`8@eCyaT0VPLt#Lb38FyjT{|C-ym+iIu5A~M0+Qx+wJDafa1Z8)1qmt7 z4|V3Ey(xlUeGJr|r)&xL+!UA==*c|wMXBO;pcAnh`xN-c(R=um@B7seh%FkM_zZ^H zqIp6Lyvvh=!j1=6?i)<#LBzVLpSMijt#1@qkbB;FEeB%xoi@S+dde=ftj=M{3XgP3 z8|XV$lu&;0C_cm2)&)}Adc52k8%5X34YJH>(_lMQn>SOdQO}7!$o9$=ES+zl2PASPH3H# z9Gc(U+jA=6F2{G#ou!RS`ZzbYOFLLfmrL@QO~G)XFpVUO!xH|aPgSHy+~&5=^*QrE zwV-1PkotQiBM560q^ZDXkr8DI1Xr@@w|v4sdnEd2R&yRzGFj@&h3VcUS$AEQs5v?3 zJVuGSN2K=CgP-X*Wa;F|M9p&>a~jMB6r?(%bUe;{D3^a19At!*cJ|P(w8_Z`zLHk_ zSGb@-&OK68kp$yuy*Wrxhy1P?9f=E}J<7p65=`tjW%%$rm>$ORHANYu&4du5J^%X! zRdDcMWr2R4_yNf=1?#DMYBeZ-on_&12P5h!o*EE%VzObD)|?IiDLcBACKv7=SWI=OPmn884nq8xSY=%!h5D5!%Y z{j6uumY-)sY!atl*;(2S>x}oKAG731=5c=__C!!D7 za$DLT?To9EYp?c7E+9Ac08E(ZzxzY89bNQj&d4N-Q0t%e)Y*Cq8TeHJUWiu`(KiN($~m0 z7V8vI+zh)L~!Wz#tw!sf2nc_?zSL(*ZlCD29N;XB$5te>ob3Y&v<9uq@S7 zkI&p_(+vqE{m~;pgXG(`?nV**{J^9YYxLokH9|$kL{NLMM2&PH;}XePDkwh3hak4) z;)p+tt+d`=S-%Zi{+gd-ARPk&#>Xr4ezvz8_NDt)4>S zT)YNDyGS;VZ+6Ct&H}aNV?SI;+*;)*+ z5Rl<%H=f*O|7|a_@{V3aO>MiPvRYXFC5tx*XT*oxk~Hj%#TETalj(p}Feojf6h}V< zMs?yS3AxcOG=^YPXP0Di)t$cPYJVOFOnDI<;+o@ot+h)MHf08#v6OE)viEGy)3GVn zgo4XwvAWER@>2nRC+BD(X%ufYTv0T({Xos?CP+Q}xxxG2zJ6Xm^l#Ke2Y;%GIyOYz zs{qaXVW64coH~~WZ8wLB7h3E_ZGj_*?jM5(vI4Z-R#`-s=GPQ(7Q{vsVaQXjyJ5k` zKCOllf>f|TEGVU~J11#x-@n#7^y#!mioL~U=^S1pPDw1Jk?D<%GJ&NuLYQDTjpVDFl4Luys)o1pmVUVmpx3)PaNj@}X&WZDH)=Nn=8RMd zoWQUqUqv0Fgc3{ZTH`-=;-8!vAmM@O{2dzQ`SvlNsG$^X%QAis`Nv#przK+z4RJIR zW?{+2jM_e>r?Ir@r-;uXCy%9MjzL;Mu{3Qg&wzaX37cq1E<#^)1%4(-_}3T7C*sZa zgEnNSd|&3qvI?Aioakxf&kqPNl`3--Ibgsf+`?48`8qD?JA9h2PnWED6=-8RIDZ)t zF1P49l|J(`CbB=RdP#%pi12g3e8E3q>4I3$yQBwjM1irwUZM{em~v!|VbSNHF7BT}ShUEBj2+KOZZO(=tYnl)G>~~Wo5up z*)OFaQts+ARtTh(CS#^*RH!FUyV^5~1aeJCl;slyi*z^FaF0-rs13mua!p3aT#|Ic zxfXrk)mb*8QxRn7?9xtc&d6l%x@%{T369q(0ERC86L9&RMYCCxx(VdYtMY=@*kuGA z^b3G*i$6SnBW|*FywH?udw_qr=y*t2$8vv|I+_^Bp_6nzf|$qg7{v@$5uD~*9gy3M zpV)ni^MR^=>8;?pWtz`V%Gnj}Q{Kc@gcf7Dtn_@pYG+|IwV8v$c>PYt*6wf|(4y@O zO#Iz&<-*0$hELuph>E0A+HQh|Au^`M9}w>k>qk0W&!&Z)$k*)1%XnP}HgBojU{TFz z<#4@oHw3Z6@%v`AUF;xxCCF4;1r)acF@~d9 zdR(JXw=!9)wrgj_p&LsRNfkahf#^V-@e2q`P~TBTXWCu{GZdxj_e9EMW5QQIFP5SQ zZ5}ENK(xWHNpF0*hy=5dcM|G~xalT*p%y7&EW<}!yXB*bu>S$!I1~5+>%sHUldhvY zz=u(llg48HhesJ4AVdRjA3LS=ApX1NPqM|i6BYc=;r??me;bhh4-J(^E0MA2&GXy~{&e|-DJodE=@;9F-*sXpfSX!o7$`XlXvmTniWZ5(u z$2O9-ZFN;`xge4LCD#MOt9I5rnC25#dUMx{>0)PsyL~>R7ij{KZ_JL2Fbo5!IpYMCS!1e~P)N2ETP)|^(3xH2`>&(>)h6{1paaFNc= zyKv5aT(XCeb)~#kBr~7F6HDb}BNv()dqAp>aRFy_SjWG3xHk&)WYr|!xanLyY7)$D zfyMfjcy^oU#y-by{UCAIVt|+&X|IW`rU{=qUficw^)YQsi&niA#b9UArn468zAg%hFxS zC6@8|+qu%R{?Qd#R8zsZz7=?WcI}x<$2#J3{$2nHLQk!TTw(pgqRZoSo`>-&DZrqe z0QCVeCx!Z)^CU^}&VYCZAe5Xx9L4b9Z`~wv>;#0VWeYr>#23Z|@Gd|#@GYF-H%HSW z*wrJ&jQ2nu=MQ#NP|&rZBfhStdx4d$rDZ+J`M!ZI{>#5PRj0>#Ih9ag?PA5?L$H9U zoanDV#`yt2`!|GOyb_7a_Q8v6kfSz>B@VM=4hrIjJB`Gy1*GI36`_;i(A~@srze8i zSFbAO>*FFQZyoi8ll80Ryb=#p0c+wk$qz_s{NW}rln6K!LM*4DxB%5H?a;9SK01n$ zh|`#y8IS$8Pac-9wp0K7eWB+2t8bJ+4ja4c6zmQBa(5A**?>45e0vgAZCSe&4>(H} z@RNBEC6%AMghlqR;St+EAnsBd9j3n@2dtg(#|A$j{y!jlE2adiez{4a^K0^y=#@5- ztd4j(0iDH0kKB`Ea}EnasoJIfU`JvnYaK?++x_HgNfd|6w7XgGt%&8??_iXZB2ag; zPIxKWj(obo7yXAPZ8)MK=oF{>j6}-JxzJh^_fw%-qyC|V ze&VQ*`BlYZ;FVbgZ-DuX*7#D~O*+H=_qnbsp*mU-NHCy)id-IkYJd(2^!nX6cTPU`j@l6CjiVbGZqMZ4kI==pd4 z{%LyLOGc@Nrh8IBddWo>3fbjsu0ca`&eHmfB!&hXel}dhNjh{lCO(p6bt{)W@XF=k zTthqyQFn>YQeDwuDUsxq{emA`OP&t%v{h zcFYAVxJtbE0QEo;D{oZD1L)_|EMIq}6y4^sp|j|TVsGW|3t*zb?A zVCeHaaM&L1A^no> zKFM?JQ-+#a#TJB8Bp4c>Ypa2-%t0Swn-KB)bD||enZ4s0{ypzF#II+mm>?slr&6^t za*7ix%=6ht_`iX(xMRtKD6Wg%k_tNeg`0C?N5v(Vw#I;-9FYT0UEI6B}9 zwu822+tcA6kh|dJEKZ>q^{6}PNsg#1(Fg$9f0BtwN&(mh%ZYb3tjgGo*z_rNw=P9{ z2r?_DD1Kdr=0nq-LbFB^`z)@-w@Y+|T>o&_tf7O9v?qNqn){Ef=iJ5uR9dA-zU4!_ zNO(kukGC4SrzK3>n&|Su4AfBzFg^uRf?~^e6m&-YfUtmH9xN=MWnKU-D|{PFS~EtH z^a@t^X}4Xem+T1#9-AiOD-|;T!{g@AZ%~)qigov$y~jt9BXF7n=29N;U22AaCa&BG zLGT^;`Rik(lCQsQ9SR#!{(d^J4D@-8b28KE7>B8Xhv$&8d7Lkc0~nxqQWz%$rY=7q zl3CO0*mLS)u;vy5&4b!6-*;(U7DY=dBVPvDvSyI(!0*Rc9-XpSqkpn*N#V%9~n6Uu$z%Wlf5Abe1!OSfce}oR`oI{!27>c%x z`q#dGYcOm#AHro*x*o?}P!&cc7sAS#-lC@G53g2r2rJZ36}>vkDzw7ZcQ0A-M(QE{ ze&+(G@@#_zHR<(EA~)2!HU-A?)-qN<=%v@c!7Pbce){*5?jUYr7c}3%&HD^KmpZyL0KZDS9hWChyJ7y3BO0 zlA4Uu%XaJdeIeB;HddiAZ+L(GA*OU0Co#IeeHT%e*frS@lhwrXwkOy8L`xad3ThgP zWCX3q_|IWR=$#pU zyZ2uJl(kAptxOk1jDt_Ci^_Nsdop zvo+3)+49&Bbcof3d?)^Bda6o|Oq+N!3-d9nF5bOTKH?fiKu7>!IM)Nz2o0Cmfw7&} zPh&gK7pjSLWInYzYvkbvX{Y97_+Be~_X^+jC6Lt1$TVbGezV|FdM9P;UJy{!@ zaN02HiU5_xEQZYF=Ng-`;YF(q`7#W?Yhm zbpebK0TWOH9+~N9(QL7o-$|6FEZ<5)0NKN`OH+e08|H*&)sEic<`_2CymK27Yb&P^ zltq&*?ye6{vg2jCEB~RTb4BvOgXk`~;~JdI_;YpQpi(W$a1MA+ia} zerMIwX{WI$TM^bVIOS6X<$DwC5D=;lA^kq4C@e@6lVWSpTLANuk{(1Z%V0!5D!Zk< z9*jkN-@&#++NV#YrX5jJ554^@^h$TY2UmyPJfXxQ`RtOXk8M{hUGRHyi{c-&e-v0p zQA)MNeW8Z(md~bGg~}*@m6V*nqo;#qiurBqSs#mqa@&<4e@T&P%YbIyy+=r@o2E!W zfo+~V>#6XeKM}RXm*dw@tJkX37L)+z5bFKSXp|T4v zbOns`i2|uOaEa$UiAn0n9^*qY(h~zoCy|s7H77&P2)cJOJGS;nyhYz>)CaO4>ow=q zt%(e?PeNL+J>u)B{WQ&DIvK$fW4r=smR(knlX*UBGJi+M9pD12-|7;?W)zNBf|)ndiOi?~0UsWx^@*O+UC>@R7OhW);aOKXMiqt3R7i)$+g{ zHOFba8v!~nbFAgR9{!gr*+BDjL_&On|CxTH4v^qGKkfYU=f8-FmHx7IbF{hNd-t)= zZ`}OF`&C4EUM%I3QWz%WbruBVN4fq!&wavY4A{bow}b`m#g${jnE~qYIvm`ONNG-P z(s?4n6d#ST16hNns3nHJAYQ|z$CUqzholU^m_I!vv+$C5J@w2UXv=iE$P}TBJQVJ& zC4Q2+e`ln;YjMM#gM&~Rx8YIutqU$q1jhDFGYw^!l;H-SPJ3ba(4`5@{H$IPeenGDKH3YZUoqTtf zf{U&0zE{3y988a|%uj+5)IXd17@uLDP$=phU9w_rfrQX?&sMbjzB~-_sbhKOL%}up zVmyi)w1deT_DD3cd?QTkc|9$iNK>~b-Ta-C!~C+)1vghL2q`}OtJGkDTlYjChnW$R zH|V+uW&Op)lkTrBp2dRK`F_24dgt~{tW&K1z*8bR2l7&f%im5uxB;XAb6h2BJ=XRV098y>2s&mfj}S4Z!x4H4iu zoAOnmtl)VEYfkgaIrNS7%l*sXB$poh|DP&S*q6R9$^y+YiXzE+To2@l{jUg1GgkK` zy~_TmfN=#+%l!dAE-br4wkbSH;)`lUi!w{>N0B-7ajzU>Ae1mC%CLBq;Zv8|gAPlX zH$F` z3&HJvKtNn}gewYiG95r^wOONHgL-RHt*D!M!YE=m2c3>fx%|Etv9uJQQB6OJc3E1L zi=R1?PB!1E9KtlpiOUivML7#waAlsbQ@NcvYIvC{Lnu_d2rY2ChIofnfRr4Es~kqu zJ!tE7$9{KI-$FTRf(kHOLotG-o*Tx$uM!6%dHj>ECaXcT?kmna0Z#ZLq*immP?8qN zcw)JM6D=a+cJR!xUZ{tgiBnkww_A}#-Bo)U7Gfw-+Y&EJKR0uF?4#!h76Ht$b{zHQ z+XpdMlONEUw-7J8C!_bj0uEmU?~}KR3$g_c3O8_@G47A)&Xym&g-R}_UQ%cKT>7}$ zN3Hs_JKU}zo+etF1Z(Oc{udrJlIB|u5d6W6mE-KkWLe`9u|d8ucyS^Sv=veGzJ50~ z3U_YR^!u43>T_`>C9rU}&b< zxZ{5#V-HpbOTh1{ugD%*h$QMCp1d6xgNIL}1tE8Z@HmTd6l>T|`BBSP^nSEsuKLi4 zr2RJQq%8zpprxgaZzi04ce68V`q7&&HtmJb>Df1dvZla^7JuWzESaPoWLJfeO)S`0 z_L?cA)sTj&O?U(RD`z){AHxq8|1ZIcj> z5FL8oQZ~W$al;aYO{VzZ4AT4Uz1Nv{++F0+P54MnXs-2xh#3i;VN%rypJ83=qLi8_ zcV#3gr3mUdM9i|y*s0KeqJ4uG!j zzfVo>eYZxYI<5UNO#HCgfpsp);0jEQXBYe=Kbg&fkO7M`q}h^ zi|mS_E4=pxkdHE=RY$#8n7KZ_z1*H6+50H<@s@)tKO{ymabiVbp>Ev1iOz@78$tq^ z@`WnQ?Kbt=*O4?rUJb&1kOCBMU#m=)>+B@dQIjP?Mc!7%^2A}hda!-8<`_4 z+Txx*p>u2EzvbWV>pL%mJ-Yy7`JKBXlX_?1LKqC)QlG0L&J0U5 zf#7+!iEj0$(kOxOVC--HkXBuc82rZrE3;0Vz_9D8ElSgiO&>LBF)Z2)BApo`bT1z zD~r#w>yWGUpOpMX?$|i@gttDd(wk|^!&%;HG8u1p}Wg$jIP|8-xZrLU5@lx7bka| zXfrWU_fUk>qdpGCQK*qo*X^fkxJNj5B^ws+S@xlm4~(ux?bwM+T~oWp=$iiE;ihxV zH(Y!iUXN5sG{{I|**LXuzmtSvgl&YS;DX;WS-z6{TN>X_V@gCLLiQT`tsOd(MmHnA zWbKL08%2x0Cz_~FZf=W{;)?KRD=uup;o{?A@M*5Bt?bW9ihlM0782WcqlpJ16SJ5r z()2*c!T!L#>P42}NRZ;o$T+eDE$pGi=em089=L3%J~LNeUx-5oNZdZY@s536v%Rd0 zbu@6^+p=hpi_iCTu#*7&6$x4;`NS{%p!BO(aB_5^1tHBR+yV2VNfL`ALgv2r;hXUh z$8ramIp}X;n73$2>gpGXWWP*q&&6*spJI1ongT9VGbphP@dMg(vDHl}qyq@))j$U} zKT+3V9*QRdRY7oynU!!x;idRV;RP&Ekb?#u%rYUO1&(hZ1;$NlL2Y3EPyrsyv*Oh% zdradxxF>~054h=hhL06xGYxuH2G>pIJe~AS(55`bD8xYIdcz=jjOjDGX9#Re&Yzzw z)M1E6X~3GQ@4z9Z_@nZ``}YS{wG*a4^E+pV=AZukTjtBEL;>fU4@wx7ky8Ed*4%yQ zGKkLshTnk0f9p|^aq7&;M&0=i@U+NMm$#@PI7?a#0hyqi#pZ{4OSIe6XD-Pf`T?f; zX?XFkJ~Eycm-$ChGx#9H`9IpDf=6%@43k7KOh8NmhRNgedP|lQfHn~z2vQnJKT{f3 znuj}HsIulW9=#m!EvEacKfKr1;ywAlG$N7NqPX?J4y=f!yH$lx&b0uk$61y5C0lw5 z(!^Dvg#dodwoSApRa5YMtSh>Xzi8b`aof8Tl1`>pMpC=X%Nejznw-AlmyKuq#uxw3 z$%>q><5*~%PyK~?olgt$kvPmGexTttjffW<4TPw2S<#Uc$s66&Q!Tf;~qr%rQ3!1u--|?WyA< z&>Q5436br|o|G(j3#BZQZt@~y%|vT5ZzfudzpBr=8g9n@7S=B`>V~c8l+5;ajZfOb zj<_qw!P!2q=@QP|l@**|LVMFmbefDQGgc9rxD-A@v|hw#a1%%k)gE2}D#HU8<2@hV zC?L5LH~lR?-2RGpUEN%&gFGA$sZbnsh3tr)%3qD1VYmsO@G9{E@n{o22`-bN3tj1U zNN$97`)6&u0!-bD#I0sHBzR^}2Dy!T?rtnl64)JQJsxeZm1p@9J|<32GV7j9ZZ~BO zn~G7;E2(wP5yA?)aF0Gr`9AAA8?s87YYWZK$f1}b7>e}UJ`0QM(x1icc{Qe3JekYg zI5+s3yUD+C?kW{KV&B6Yd7M*`aN7$PG6jK5`A3__Aj`T4(dJ#Q`HNI3mIzI>@wB|G zQwuE61z*^{*>IKj0ixwT{P;-o^n%!Y6$@o_VH;Je>!y2={%(CIuJ-nL-{!@@UkP?(oRc$Kt`;)ow#e> zPV#wwdrQrV)l74Z`h^**E!h`z5>8?$)u$S^hIuA6^db^|*wnR8Zt=1n0Uvi0@Fzcm z1pUQgVCne^v5T3<3LJO*F9K(>hqYfBb}4XIulptOGe>=-ddqI}5|=1UUWBs=AHnSo zqpE6?8+cK({aD>M`{m4dA?3H7^nk_1#K1=v%y$AM>CgwAz@$J~nRmPols)(ir(M9O zwoRVqgEI~Lwi>a2>>p&a)lxuL`azHQL07mWE6!rnObwox+eTne*}Hcnb8c1F0{DaC z)Atexr1Gc^?X{i6&8>FSItYE`yMid+mCT%YLoCp!6Y9n{H7YfzddH9)gYjBpNv^jJ zXSK19O8o6eD779?2L+JoFI}GmK+$KU6lk9rAIFviU5T<)>bKSLZ>!1cKoL+v?Y#$!BQKFXFVesRQLZ zDd}YE=4a@g51jhyZdN$gOpGiKH~2pjJqQ###1*NZ4e@mz1@2IEsoMlqdB&@o<-_n^zFyY4+^kh%k?SuuN$nfj}PX zVb2f9{rG)Q0y7wF{$fh}17cFYFZ!NjT5ZF`_P!audA}}toD`HM#24X5u>(p~#Iw6M2aIrj>BTElvm@ zQGrGaA+_)N<-L65^-3t(KC8A>Q`S?tW_(rid&&C#xPkVwZvC_F<|;j6AqiHu!(1sn z0{Q1I3rib)MX{)jon{h?n$ot7VLeDWctFe3LVS!qN=r$m{-<-n#c9~w`$k7y1y-c)=;1J#+bN&pY3f~ zIk$USSV%hMW#@M-n|!y(;WaFWZ|lii{i&~gcg0$+(UwZRYz-qm9!gRC;3Ql4UZK=W zuJm?~_|x{v0JL|q%S8`fRF+edU?>E<<)t5yGA=o-Dwjn!6S`f!l`wVeAKR=3!5?2^ zn)@KBBTMBAWw?meg_a-436IPs@YdWxF9TIK#4!gWt<1Kr279Dc7FpvRcy$Ec2VbpE z8f@Vwn@~Dm2I{A1+4xzWqa+u z{77E@fTj}Em2pA*h=+?Z1vW2R(J$un>MZdB0bh@0X(zdEnt|KzW!!Lfy1iM6AAb2x zKg?IBR(B3y9KQ#;7fG#`CNA|5chjm1m0TQ1aiv>`Z)}Shk0ctLPXgjd^fzFWs8|Tc_%u7oD;#A+~NiOQ9!N01QBYgY3J6C{uz05rZ=6LzniqbERu zHB+$IL2rFc#Y)IPvR_SiDlQqV4r7l)sIBq(P2WV~4}bm-iMpYiO<(_=Eh zzZnqVYyIsB7PVP+5%;)5E(Frftx;Q@#PiM=0 zI+|#bot|itCL4G2IA_N2{6mlZZ?j%4%^z0Ezq9!@;LU37fksCI&gRIXV}t^1TPAUl z+@^2&RA~)_C@Wm}|EJQwdK!lsUt~f%?m5Qhhm<$-L8cy%VZN%rpemG~@Hx>HssX86 z)p!B!2waK3+!00^??HLrm*Ou1ha5Na!$j}gQq8jrAy8q73_?DaF30SQ++f!}@)_Lr51SLGv(l0W}zvkj%egb2LnBn6H}(2bTP(Xn8?8gvM_UKome;Bdo?@x^3wu$?77PVTY{#esyWHp z9QXLTCSO{%c1>uDBBhGO0fPUgE!v&r`nAp-yM=O#ADYe%ga-Yp4yBUS%5=X|5jS1g z;jM*P#y67>)FvESbbT==Uhlh=U+&$;f=)tvj0X{|BguJ8htAlbK~l8MO+{aSBq<+( z=9P;B^%%P>JcCB!K>Nj+S>S`@$7E~Dj4+Vzv`2g5`+bb`GIo7#st;uTehta8VrhUa zrR4(4ONqlLsu@!zt6dYT>y4%p%hxOXGKzxG-g({74?oNT>{TT7_E^K~ zL9Bqt3coBPdXewW1y7R2G8p9*57mm?AfsG?IW273ICX zsSxofN)DU$IoBMtBLjuWu4y@Wp6(Q$Z#w&#kgy^nl+{~20_>zKF z?9y2Bi=NrciI=N%g|N7bYyq95a)X*-k(RT`Ft+r|Db-hg8W~pBMZAz_$+}uQ6g-R{ zfqqu!pc9YY42E2}FH9h)^*P&680FLfNqyZeQIa6UTwxN2{R0hNlQN|1>js1yQIP3; zNL2|-6uC?V$xMJi>L6wi^aR2cI!LBNcnDT@^qnz9jLQY0#J9^R-{_6;mY+(f0d2?l zLxlL=lG-@M)DL#F@`$5XcG4tfv0m8%9DzO2hls^rXT*-R*AYm4=|3-OhLO z=%V(B8Td+vP9eG#9{*8vL6uraN==*|>W(E>4kdxLz*ZOra}rgJf{tv_A`uldR?t9u z+^_i9#GU3Nc`|&YW19FyN~DjU$cMUPhfDL5x?{Uw=aA`q36EM&FOFejm^GLfb;Ff= zI3r4fj#O@4jp_quR!uOCicYw?{Z{|>+XKroEaKZ@>m$x}% zDKA8x#ceeXiUNWVnO-Y#V|@7iG1k0;XZ}+bQfMFCf+fv;57M~>r=z8=eWr?`2%T3p zBX*bywBxDAaG|`CB?z$?`Qu@jI_|{Qn`cVC!ME71-5$O5rc}0Wd?>zpXw+wADFM5t z?MB$#nkKW!c-bwR*<0p)#Is~9YGI3?ovG%(&_-TvdJqt_cl0F&1>4X4>H}%-cH-2Z zF7vfn_8T5+nE&AB8*+*Tp_#%PSj~F1scolAqD+{D7ZmsfL)Wr51zZG1xaWvqN`15u ztn&$%9+Z<5f`gt)94M?dqzV@6uWC*leQ}HH4~k0GyO54${2?(xfepIA*pa1xRm7jth=qYMD-Z=f`zb6uy2#t667Le zmA0K8_h^|d<0w4Grc&m|u)w~3NL3+5hn6pNo6iF#4W-GS?GF~^otryI`z6Yk*%imAI9C>@ z{~k~Y`#0Ipx-p%_}YSGVo+ZLCs<^Hd-?m@2^gV6cZ%>U#$(K#Bzko>P41vM8E2OOlx zCRni+IQ}KwmEU*EQQ+~Fug*rnA~|#Cmxm-*{4 z=odgk-Jq#nwWo>!Hk%MOD+|VTQfmNLmYd8a)MM;2LuX+bLYp5~Kn~2B%!~1lD1f7X zra^4~A#XM>XjTU?q1dv2lPYzZFDHl2y zRiH}RZ#WPpDkRqWdacb0v>aM_o&%1=R)S_lnDS`CBl)hjK3Fs>XLl ztQ{|=p?nf9sY*O%ykWy1z(zv$maXr57Kg{avt=I$GGzYxD2=s8^q>Fz=NkMkj05Ti ze4;S)3=Yq~4mDXxw~Q69YoDaCSNQklFlVVJDO4aHa%Ijo%f=t;AzPO9u(?wxZ}oP} zkP)8v1q>P21hidJ+>gnxq?e}Fk0QP9Ih;Zhx=GMitc$lR50!Z}hi%X1}tdRE%m54AEVcUB(z%vMonj*qR# zP`6RjPX=~2nE5(fQ!7rBkoPg%lyK1}a|oT)U@@<>DLje`0kNjc@V~M$0PXPjBaGid zdewnnHWkHZ12TY}qwx9oS&@Fa>&o!3QZr$&vEF!4aexBtC+d$m@I&$4$)i=!cmrtG z*Hk;NE;x`hiM0$Tf=uf)b01sm-udRmy75{@3AnAE+dJTwLkiPWK3Gui@L{AtT|Iq-asayE zU=nWtSwS2Zkox68Y=eW$yqb%6N!nJPFQ@|u!{?6yKYx;>!E0Y4Z6DHQBYr&u^3Jg- zat{=nKr+fZ37ov;{e?y%aG3LlFf@W+8J_$~W_x$PClmbUB|Nq-1JL_sb|MOdj7vk%U{rTAO z7qxlOUKxEJ!VE_I@&je(VLU6n+0o!*ksTc9?CT?}l5+hcsl;;L$!Qr@b{$%?g5FJc_{%Gq z%m!Ik@|$Gm?a5t10OujZp#AK9gdb@)EBFr(N0->rN%OaJewSXXlH4dFzoaFs!T zQ}M0`iD_p#z%b#w;34Z`Mds-vnRgCf1~Z}s8%QTyhjc;0{kLu&%Cc!U=Fa@4ZWdKL zNj^|J+lxHhWCJ5xU#>?IB8_-6Q}#9>T4DKfGk2$5GxuQO1g?3L>f@o>4H1ltoMt>l*}8!0Cg$XNh$bA<@7W*wzS((Xmid`TUYx_ zAiiyv%>E;pBJ<5~N$Q)jWpe5BOPEg%@)(-tDPA-x1Gp0-Kyg7>Bgf~!B+Y~jCKe(m zuYg7pejff33 z3Oj6hvFBcNvY1p+GZLf`goJf#`2569`1dGEebs@^og3P9Q7Up)%i5*3zf6f-0tIW6{achR&eC#KrTvLcC`skoOCg>!)D9Ob zG6!t3sE2n;Yk5OUjA?s;V)t^+$g4Xi|#D@ru`F#mtTnbbSY&TG^#4qY8&<;?c>ylXqthu{_) znB%Y8Yrx%(sFkVRGS?_J=8#SO+Ng$-n^l*jP(%M_Tj60fzNZcJ1HVdM$)eF92njtq zYD@YIwFnOb^pubU55&R6-1uP+`M37l!d!b2#qs-K$q@wX=dr;f767wK1i8MC*vtHy zSO6Y|5`?e|4*bpjxhO%Io(!uCD|+15c*KrIYNlo zKe;i}1|E=&Ii^iyv@&lHv!wGu4t|U?{`R>ox_;Qbp*Vo?!7J7kkL2ksv%$nL)U#K~ zpPugB;Tzwd0k0UqY%nE$DmjbdcqZs5(OyM8zV_0zcJm`>?}I#6)cf%eZ9e6t$uBU@ zB?Dyrh2vEz7uNmQEh8q+fMx9)oY-+`kVTJlk)I)bRzUz5L%v|WDN?(>d+eIQ$hm#I zu+`rKd6sqcB1y(hhG5&==AUcdfNI6*8#osvX7#wYZ3o5wFYn6Mbr&rVa{BG)^h7?* zDjFiqda%LWCp2Z0@j*A4t^FncqC1ncia)if_4TUhx-MHMk61+pybe3K`PIbiE=J`* zqP03k-p}6SEOG8@=1Dzn+}|EgEna#xR{RS@po=xE6CpNX-{~c;h6a^2({}d-?G}N~ z`}XIip5hL=2WTECsKILYkPJv!;-Rq>s^NvT--zhU@6Knz4hxy>h|v2w9!hurX={!8=jNX; z28{HNW8q6;5ZFyf(}3>B7kO+?ei#c`Mkvvko6Ix1=)WG5>^s&=-rnC(34;768Xa&E#T{iMClpga*N(Uo2 zO57>9ovYO{x%)XS@F_w}JIml>vtzJL>}+p-_QK{h-O8AOF$eaItHQCsOKZon{l~)Y zA|ro8+<(A)8~~1eob=mfw=OOx3G^%g93~%T_RVK7Mm=R#+-Mu{pM9>Y+Nd$xv+;Ky zZ-x2DevI0k{|wYvbG_4l^J~Z34KkFYdAc*I7uNc~z=u?LGS%89X0rptPwEr<5VOhS zyS>Qu%B*#AoKzN5N-ZwPbxU6LmhovIxZx)ctWX$o9JsM&u&nU8or>&|tz8931LH6{ z^@d^#>dQpT*R?-?<7g`$YxM?4fqMhOW&lUj0GAjrWQWq?gZh925h=h1g0p--)E-%% z0JJ84yYkA30PpLn%;L|-dT7B$RH10qVcS9P>d72C7 z3M{2NxFTKdq9vW_7d$`;{pm==q>?Fe6D%y}5qo1!+9o5C!Sq}|ADO}g+ba46a`pH7 zb|?)K1oy;*quBZJ_W^ z%iA)YXrsg(e}PcFOG!Kx<2FugA>Q7`v2!IJD=E!XKgLmR9826n(l(xkEy~%KD~{oX z+-D?n3+l}i$fKcqU@OCvk@1aAI)3k|w7=k)q#I-H-7VV|7yDlUJ-KRgg@^1Wl6T_J=a67=iwx{kwd-6xe9h@Q$6%< zzDnThV#t_RfLA<x92RO87OoPElc zT%N&Jv;N{U;)=wp`HegUtd#{)>^kp*{>-G*rMC)SQv!?|HHD{rhr5}spy^4qSxLgM z@O_)z$_X=*8hU@Z#(+)Pz-qr-LN{TwhRWA6i7NDbz)_SBB(vKH#!XDN&c4#vt3%@va*6zEN{+AWzSMk8I}!1gEfFXgY#C1+rofX`cw!?t z(bB|8>6BmRdWtoavhaz0uS-xf`_OBUmSBRGo~4y-I9ZHzVjYxREyYTDK93%i5`SwJ zY1C-83#g+v4$}Jej+!&fbZlF2Z9O%!qiJkgpT`ySzEi0CfqMXL!(9N?rUM3BWnj9l z>iO$AtdsrWVRievPf2myP_B&hNetsN@}#0DPyOb#1zF%#UhSYuKNV^e=D;hI#0SCl zR7OY1_nkTFf1CO(%mKo|m!RsT@-%@+XY7=l&0xjI#}Jomj;^i)u^P*9%1qTN!y`q- z4qIlgxD$nchax2XP&J=6K1wpUf zIrf0N=s|g~kBF0uNO9gf_9K=_mNMtxJb_4-29$7U$tytxdP={$9^gtm*qb1{z{&3c z1!-Q(yXKZV6$B3_VzyJK%It3%I(u8vdEWp9=T}W5m<+_z;v>&VS8My+&vHlPh|#Ch z%%(j>y2cR@+|cu@t%j%scsz>N`u1Lp+q!o96LRjJuGWQ7;iFYm9LNq8$c^SAXX9&k zqX#|3epCd8!kefH=LgT-J3G8s27_!nz`Tg(%p=xnQC!<}dy^6OT1N^666Vm-7bF(o zWz~iL%0EYL(!V(CH7LBkNAXP8&by*ZNn)aa;k4aAii$c5>r%wrOpa7`X0x&*s$^i? zPUC-um)RsE-m56qI3q9X6V#s}+ESK|l@@~aEw*FfNs>8|B2Pas>J2hT09vo2^7x{c zFgtsB`=QAbm0b-{;ocSaT*UoIp7K@}EY6_pwyI{-{c&CAbBiCtxJdj&>=DakQVcB~ zetTNg=l{I#gMpN`yj$Y{ftBBsd)TDeqK7{n{WnoAmsoL`G5=DtN zImbtK!*jW+e|h#~yno@IV6WUbZf7qpJMfZUY`IcFO=EmPL7V~MtUx5xVg(;DZxRF5 z>+cp-B)M8~cl=9gG)5Ut=rG#i#{|A-lG~|-Y~-E8O0C0utUNkwC<`9HdoFQ7g;Yw) z5SGpoEbRI){?^*zf{OM^?fnQ9+w%;cAC@Un8Q5A=za+ za1gPF@ki%5H<+IJ65FY83NLQ2j=)wikA;CZj&rVtdXwA6A+&L27#VFGDm%**Z_OR6 zF_yF&!m%Z3&Tx6Q(bhe2KJpq7DTFQXI=81#r;YbEYhdBmv8zf_-Y1hFCqq$@AMVKB z2ZQw8??K<=z^?MkCgxT|q*&58b}NfQX`l|{sOv0Xb1`i4 z*(=SUcVBz%i!rXtyS8nN5(JaJoh+XYTH9K2Y2LP%l9QY3;j90?rvCTDI_S5&y*t}a z_qXD4*fx+-9_lSt7N>9+-&>Ezm4h!5qBc5yY=&2DyE0=j<)SSZ@E}llQpy~^uO&h= zG+8dxNJ^dN6uO{gH!F->fhR^gJdj?OT_G84kJqe4<^QP;du;F=XQso)^D83DRXgiN zq7!~SJZGsg7f4Yjz`Dp5|S ziei+sknyV%qC%V}=5_j?-0OeCIRrZGg;Qymaik5cZiCVmkU;$XT>_FDqvDS%=qp2U z$8@}q)Ry_Dzd(|{Rvit<$a|QKfeP~|N6NNSDn@Ukbo&WgjL>~IRE+tWY~`l{zNBwP z@k)BW;WJ9=phW_lu(hP(f2B^%E9}+~-Enz_|G3lO?R|x3(}0LAycKYHt{= z#XP^mg2KWV^asbE`qAIs3wj}Fm>B_UAbELFuSX9)>3AAL+`5fCVtcB!lbP@z@%^nB z{nze4)r<0pa4^2kI&n_of%9aKc<1r2k z^hbj)NJeB!zpp2ahPf3_DF!lemwNFa$!5jROjUA`LLQ5KGC%cEXk=UvM21e|pMAdc z3#8YR|M{1`k^R0&6|icrtBr**8)LUqd=z$nfjn0te1i58#Bt7uJdD2f8rjDUtSi66 z2r*D?a;-qnWZW%b+B(Js;!ILMa6bO3oJlGpy(M|1q`^y_JGPwdyV@w%-Sw2z9P84# zLLxVewUQvgj={W6ct+Epzim;U;@gIoDJL{v#AFW?uV&N&ddY*)f1f%0x&HaD_M@!Z z!}15dd+PK7LP9d}?Z)Ucr%{B~YL@?Aw0X@}nxb9$T<)XwnAs8IMXky4)w7fy^|` ziP&_#>u?fAzO-;qefdO#H5hOI{)DlQ1$m2`h68)~ffFaMVt)@vgp|_n3uHr_jJQVFj?EUm*R?xz{eZ zPngGFKiyOC0k??PCF)p~R$~(qvs=Q@QS1~FIgJwSsjEvKFpQ(!Ja9_getaqWYqb|YLzwBZ9o9P4*+;#5pKAcFv^ z6_xkE&tSfZJTwLNLa$QmTMZppKah^&hLLkaxe+DWXF6}|?>$K~+jo}!GCs(?;kXb( z>b)noVQZ(eMVt3ZjDFIH(Yi=Jo$A#Ce*M(;Vqfw zu~w!4m zQ<5L!AQ%yewcgVztILB$qRG?K#RlHNx9Rbdd5uHqU@_=WGgCKsj_&Eug`oV9s*h!X zY<0}R3<_%NLz@IPx|S4p&CbZl@H9UMuF~e2aD(BOb<^BR*)wIfiXF|}L@SWG+|Hl; z*I%KJZkU7)=unJxNOd-V^F=2!ZFGw8?kw=&uYK8n**YJ!>EFd*j=K^3;St@P7~^)t z4f%lH*mo|fXv?m#n^~$2AUyxDtYI7xg{+|AjBUBvz}ON{E@I0e5S;tu>tEm+tvIH= z+k`cY``Est);6^jAuH+~5z7yl5Z$AaYz^g?Q<8UdCY1<5B;@(?{{9kD`FkPST1`)= z16kE1J1HSbJV1+p;hyrkG}ddSc5(RW5nh%;k%;n-b?P!wWFHjw3BYjDSNsK%h{+Sr zL+o*YJ>DW0=K-l*$6<~#CxPY-?k~-T!9MCVtV5@!xX!nw^WMgTpC@K}_jCbP zW;2uvbK&E#PGPvsu|SP4>Pnk+gV=k_hH4si4)i|B$y(*{JqG>U;gM3 z!1TwnxMQktZQjdUVP=>k-JcfxBq)TG2xjJBW%Aq4_?KJFE7pcb>eRCVq^)So+!1>CfhYH?$6cg9G2Cj3B2(r z?ydS|uv5#MvNzm~ZH%lT6cYm0pJ2OLZ_X}NU^<>{39jq4#EI_ZU4A|_AsO6)=;&}8HPZRO3eLy1g+H5oc3Mi6uVBToSz(p|0`;u3r%WO^VM*KU zeG&y>a0UqATBbL9^(0FDa)qT8=oyMX1Ef=6NG89*)20PL z{%;(xl&YPQCo~Dy>2OlfOuHb4y7g?x;YvN_71xvLp^evl=4_(iMK)b%M~lx#)b8w2g&^Sd4(-lV+xwxJ$JV|ENMt( z7^HJd&qtL<9&q4wP&85)g+_Q%@fn4pAUZu!a=@!bH8{~=(@#pf9hsDq&MSZNemaGS zOpD6O(7%d)@`?tNm+F%I`BRSl<2eDu3)cYyW;zL6iR2}hmv*g9_xOAS+(qG+S4V{B>bUT<6nMu0a=gQeS#)nV`PDinf`8LfP-a@ ztX-T(Z0Wqln^DSD_VC_UTP@{>*t&dsYgG||9Aj!Pxq9U#jUXRhgsg>87-UGwvJh6H zTvG(4p7P|`=_yU!cnKR0v-}?Rxi)+cz&`d76#xPM@r6J)fwrk*f3&F$3gU0cna8q# zh?R#3wPI_EyUSS?*zw*+&@4%B=5(D#zv4bRXh#u6>1Oh7BDEGLU#TY4B|yBvEZLq{ zCX4*KpT-n}b2mA8o2l6ozRzmwZl@Cz;CpQdS3!1kBC8mR!noOTJI8Oge#d@yejG>9 zQw+lQwAz-tJ-f>23SlH<58dIhYr9@6ThaaHj_6FQ&fdn2WGCfCU8IF< zk6N|S=QF(A&UI?|%2zdzr!qEpJ+JEKZD@ViSWt@X;Evwljaq?-!%ksb%ScNU7PLCN zDF>p7fj>qQA-`d<|L!(hpZbe?mHcZ6kN^Ag%lXq7@6K*%OkjA4q+pFkWK?BAqx1Pa zod=Q5pX6PkJr%Ict{Ghh8(}3znnAtpQ02~^Hry2+^5<={3`8eHvr2QLvcA?zXS`;N z0HdXd{WllyyV41t-k4Epe|B~m9dx@(xx=o^1F!T=WNdI)oOJ`Er!|ttJUua!_q8KA z%9sHd@J$@xJ4_>RouE%Z{1%v&AXvKN@VQ5QIc-$0lkKl|kuC*Y*=z(?e>|{ga+M7E zaCLuoKN{(0B``aBFxW(qqM@Q;VRizHV9O zMyV;6tW1{oNy+DSg-`w%iptHcP(3wV0J}L}stOXU-W*bZ|4+t{A*21T!zg5W0uCq1h z#fv}}Sw(R2@w~?^kZa$=2h%fbc7{8<56w zAr9<-XZr{ujtRaDgn~v1CHFwczcwlfX9h#p9%b zybxeofGBht0viM*d(xo;=JD%HfyG@UPmIFsy>Sm(d_Bgb#P#S9+%ycldNwYY9Axtb z^%WMAF!a9kxA~At)nYDAn}CYLGzT+)JW0;)J)7NB+-13>T0T>5@Oixq7ASeSm-189 z{}oh(*Or!`+PUdv(*0vNAqj@lYuAONBHp#VDukQg&rUkqh9~?F$ytF1qk_ieaN}f_wc}zIlZ-bSOqh=w9&6n7qIev zNx?dafm>I9=Q8ko2sRw94M!Qh=7f{5MGk#L&Yc}{P!+uVImSTTC@&2~+Q!uWa1;gdkF)7YQat1TMxVy@<3UV+oJ><$lCi_Z z@4tBq>aj41@;)nC(+AqNAs<+$gKceLDNlRuscM$nCV*O4)bFv3Zx6wMdWnqRH_+v` z9e#F{rGF)1@-DRHUb<3zDl#%7ZhiaGO0~k&aVY-p$ZSJ_;<%4?0IOti zfqo%>RklwU%`iovxaeos!GV~OE@FjaW=~Y1_MpAG-TnTPgnrKVOEG)ZLsX}=@ z8*X~ihi`G6*ee`qD*bA&!!sdF9-)Ga6b(W?CO$7Bk(=gciR)hfc+uPI!o6vdbN8pw z>@92}Hae@`?ggrHy2YyDaJetv zcC_Vv07PE7kJ0EVG-h;1OKQfr0>I*G)5?kDYJI7h;L}wyk*9!qbK1 zl)>U~MN-z-sGLp(4c5o%8}Mi)1#`HEXoi+FzF?h8b)zrNF6-Iw&(vSjy7#5aPOFju zBki0dLODN@BA5AALExsD&Y)Oy?uh|(@-=F_12Dd|XsZioz9NKsWO;NRIPn=}Ue2(> z!f3>{xrUW?DOO0Yo}(w1d=2OgZk!W;}>B}O}wV|BWU2!=(KKV?!-*^`;j^@X2M!vNE0o zGd%aWO~XsGUD~x(LK5dprhokP^(G5jIRJQ#|8j-(gBAfLu&%VgF_BXpxwGN?L#y$D z`@!gAy&eNa6~Kn-u~mXW4NO>2LS%|pk;+=vb?TF7x+WF-#03gwaV~5)FHUj)ICcmX zu`9lZS3=AyBDwhXob(ZuQ2NK7LKz946)oZjrO5kn>iG|+r`7L+*rgjD)3wR?RF{O4 z+7$F8F})>K?473_WX&y4+?1x+d!g{baXW!jV$!7U3Nb9zRA5Lq{Q(RHOcjTZAguab(DpFk7l7^XSJ@wOU%A$1b?=%^T=8 z+MowIgg?3^#0`|NJ}T$K^G}kJ>|rZ^I;%_dNbNjjspZkSDh=9`7VRrj`T_@^GiPUa(8yj{*dkS{X$Jw51yuX>1 zTVRnbb1ebJu({anzETrgrFoCloHP6(@k0p$!!xdE&8T}6HAMY3n|xeb`)`;cX4pO- zK{fBObusE?8F1ukdidE%UKDNx93kRXmJsi+5ea_|5B*c7QpdM?lIT^yz&qxQ(*@4V zfwsvW4?T2!9JbVNS#e3oalDN?yKcKE#~k^bCn(|J%^;-mULX$vlA?AR&jy5PYq!ia zp1#7eKGp6jgSsGi*6_M);_$Y&7l(35Yo{u%ZNdHNV)nCUd>)f3d{MnnI}CAH9goXQ zVIAW0FkumhecR)91K=Qr+9N;!?#a`=YNvl1Oib8Qpv{NaG9S9DQp({&p)CI$Gach5 zouS5{eY{cR2m-`h0hV58`9mR&Ex-Hcv+>&@emdK|Tb~&x@57ovuL{ijFx!Yf%mdpE&ay8^K| zQ@cgHSedBpJqp%f;{TQXI{tTo2O0#|TJR*;=7jh=of+s^(terA8aJ$2NI zM3Ixsh_Pf~zU-=tC4COMjh4GbzR>%DaGKXLzqxO@V=yn~rrh&7Xl1$dcOcGI}^E{Sr+)vDHY;202jah%YT{}}pNVGJm zAZ)+9d+Yf(eG=ksx7#)kd(RO^aDTvyVrvzofh=pVhziln#v9~a9Kh=qc}ib%|CC6` zGUR!@kVvW(Z|6qiY^ZdN3AA>7V8Qg@YF|Dp^#j4}H@gNI^iO=bjnG#xlNmJAV+ro;{6ytDwo* zbR7{ZJA2M-Ny~NQ`fbLUkLPTqoav1!?_MRlci)@h%Kd3&j7%o*?$gnfF#wnqOrIyv(4Czk7hqCmrSc2VAZ?n9f+^ZEF-92dS*Zz&SBp<8cj z4+eaYDWj%==Vj`j3N+ zQbwoUj8>mmdxgy$X}Z|_f}^bynF3KbaWPKVD1)ikRo$P?F*yJAge4|!3$nbQY=jZp z)_C?{3Tnb8G;Pa?T*@j&%PiEnoBM*Qaf~TllYNnh}4+56aWBtjX+dVYPzpqlmsu zqg^#p114YeGoP!)t7J66I|HK&hBlk#o+{C*_F}-phVoVj&&JgTNE=u_VU6C9_^=3` zMZ(Flf{EvIoDybjo>Dw`*wb1yJ(eq%#^{Wn_a8nRM7}b5#()aytmFPLFMh71B1Kyx zy#9HpIb2%*bsK96lpaDQ)MO>${^b0LkdM$IYM@QhhOtbMnWb!W6Yrbp%Ts2hzIrl$ zR^Wb2{rJDodEKv$nU(va1)l8 z>^e6(KNG9|A|18AUwYk;6++~e-$;+=-J&Nh>WIo<%(CSFID7^5B;Ym=bQ3ZL1QkN! z-}72|GFMmoE4=LB81_fUkoS8YHv<8Ly+k1Bc$*?Rw&;0Di@5A`Is+jid*K`9{7`D& zS<7-7Xi|nqC`c-n`Cz$_Y5FaE1v=D#V_JtCOYjwc1?lWHd{CYm$1u#D`!zanHI0S80g~e+0Jm| zwjOj6V$YUFf&hiHA->x7fvqT+ZD1k$6J{?S;^~LG>F;GHoki|=vl;sL)O9g@prK)m zS#x!Q1>_G{xiQ_;(&bTWVNYMqHF%Gfr6I}Y zAe&m~o0 zVL~hhQ@ZaRYZ%I9`Zj7;>75?Z~YR_ng5=?XLAR0GMU|%W)PmDs_t7lSo#Cl*9b> zx5Q%1}EbeSj>n>YR=Iht$jbkq_D(=aZ$U6i8Np{2<`6l@`%}$;~b}gBS zf1*SYa}|97v^HV&`H0voda8tTUZP2NrTv8k7%@2~e~-%jo05d%fB%Df&RkP*NOA5S zdf%i2jkzfy*d-SJ@jh1K&|oxDI%ME*CADR%bm}xeuA54-w1Zzz~>DV zH3tH*?mHZq=&r7ah0h!7vB~ttE?c>-Uq{~BE)N;JIlH`%X)DvMRm}%U&xD1GfDN|p z*p8Z@pL~72E03SRmH)Lo%_H;~2?D7EubW$0uT^Xm-(5RvwFB<(t#pL*5bjnOvJSDO zHIz7~zm9?ZDtJptxQe>MH!siARx(6Gr?U+SycN$(Hq%Ccg%tHujO;gLN~i{Qf1bM}*%xDQ(Kh$+MX2KCr%F-t3}o zvhQ(I=`k!iR1VH#buskVWj;sLtq3bJek#aH|6KLMfk@QRIF|K>i)Qdy0vC&m57R|3 zmwaVf+MUif-?xMetljWc^t%G6Pj?lCHZK`{u>2&pG8yAD8xs@S=+IZ)P3D6*b^q6fjaJ7XHT4lp(#I z=p5_zEH(^$$^HOmRFyD z%^*5c9%9(Qbvw{BeU!eWf8?H>Dqu8-N-IW}JQ*`b4DV}*6E}<6jjF!UDDU*>ZCK}5 z_`<^D#Y&&Q8G&g{zjS108>p(3mMtp*EH$LL-BDU(B2Y+9|1h7>b%6I73mnk2X3Wl1}bdH;okTNfne& zpDcZJP9^&u14M$p@Eyb4fJI6ze(F_PHYQrb>QRP?1!fqzE=d_C|?$^7_Bu2h z?QBu17N!v@*{9RX<5jWybIDu7qHsH{K1AtNy>Z8}LziaP>!LiOa5Bj?#ExjMj>|}? z6~;(7gTaXjG}T?MPImENtfDWet}O}TYDn2x%@|NV?oxf6fc%gBuQFi&%-x8EzMTvP z3uq9WW*mbJ)kx|lMp@qU))X0ByxoswxDbJF7zDjS#R3hhu$5^h6MepLqRHHpk*Bmq zsW5~N6SJ1Q*}XGA&H(b5DGwL*P8SaH9J*DIdKhBZ;*#D?Pj9gemZ>Lk`L9YTL~EZ< z&lP&lo#Xt>rqq3Yll}tid7$Hs^)m=Rwo-R35jzK?r8j3zH_*HFgp3D|RIbmfFXgRO z`NxgSIofL?o?`q@M4zq}T=*EWwSSkXH#^PW{tGqxYg?Q4eEP%ix(xg2O0u_3{BxPe z5e@y#VZ`BHEk=50;0<;s2#4d^>zy&Eyr!%M5Jz_HY%y|o9oZbToW-@j%yfj+Ur!46 z>9Iwn9hCs4m1q)3rTOFd`Ky<1yxkbnXK>87FrG6hI+bX~o)}C>0hstm^4!oG`TIN} z+b7BFR_74p&3HEcVTDA!k^#uZ5R+DQ8B;|(yFT{QT)IA;EtF;5cEdT(3hftokvSS5Q7Xtz?VW%51!9e-Rh5=3#j!IzYwg+JsyBNPHs3 z9!-5th#C?YX#4M$#-^cD32wS197Spvcsl?Q)?h#~z`nw--4a=^m;wr&`w_dxS|Iv+a(v(f z$I?jOPZ0mtC$Ky98sz+6B;5sqOzDW#V^x>hwb!6_c^hcU0#z5F4L2zVq-F<^hbxHV z>SvC7Y&cht19xu$q@0EO`}f9wz93M10>#ot50bkR95)xaMuTRd6&Ys-hq7+8Pmvf?C|Gt z9dW`A+4n^rd}ZxGG++Ep<*%_L8qW-OC@=3}II^3z{&B~3^TCRVXtAk&=}0h*_>Hf)kwBpqrq!R@#R8iUNd&bxklEoZX^+ORdol)^CsI`*z1UkK&EW zVjyQjhVpc_c8Awme5qvhg>`6T4t!wwyW0p)4C`2rh$zv^G@}QfNeULX7`;U>n{4kCOkv*Wum=XC_10n75}n zSaPg|HT_HA(d9`lwt1ZR0?ZTnu*9Lnu9Bu!*%8`&uI?Zra zVj^Eu($Q6o%+i#?>U!HbeBliWXm37$9zD`vOtdzv%IK>TCeypmbUQb&d3HHpT7di8 zMNqA41;L284)Ig>sb6Z}Ua){Od78gJ;$yLB3JKM*UtPH&9(dKJf)Z}#re*iLxJ~Hu|y+WQT~j1HnaQ}z3o=*cBOA+*{fyUL+XN8L?qhvg70JLXg52v z!E`%Wn+#KcTE!aCTj5FdB%y^s*#ox`zj<8-(9(WLp z4}Zb46XVM=05?xGP!UI&?27EsZ{$SV$Dc!TMnR#;YiS!%MmHXvfg^!HRTJ2>*S1`mM4_GGaawE~^hb%&^5ZBheeo>eR|b1&1tG_|pn z4xK)B`2;+4?2KCj+n(b3REdb_nnT*tVl&(GMmNzWwz7h3c5%rY)Li|du2OGPv`xBS zT4L^ZJ1V1N@Iqa~Jb4skSIoxlI&n7Lwi*gQ{XsYfs%tvz6R$RCe>v#Gi>1^y55o++}v25+Zvd8_@?(G$(_NOZ{ciA2_*fvzm zIx5nitn+RL%#G*MXY`wUIXXr9+AwSuN!C{Pjc*q0dT9CKYk?-j^=rV+*{_~^^vJ!C zY2WxV<>401g>ssG*}6s4SjHKRpQv^}#p<9+o)coUEt%Ec(TWwbrO1Kp&+%VpQ{mYg zB9ilfA8nTQr;3a;PT8ulfcja=X=newUm$mS{;?f0m*jd_RqzZUV52AfvKp~MU; z@(=e&=j(p{zq!b{K|SX3ed=WbbZ}3u&|>)PMjF@im6bO0SIy#(vz52U4@YbchD;?H z>qV^^z9gM*sXu>S`lkE~@=tzF%4h>0#LJva_8b;U29zu_2(A7uy!Y>tKR?em-NScp2)w0`t~nsgk5DPv%vz3@1-zY0a2OSZCl!<)Qk-pw z**D3GF}NvC(;^l=TXSu|YPp0%`BI3QMV;nDAxTlZ++C@VltQ1#P+a|~_hQT&7Jb)?v1Bj$b zULzWuQJi0ueNLsN^Ef49wDAV1+SbK)BE5Kava)xoqC!i^kw9dh#78R!L23#2Nu;UbrI`BIsPvwdA%0(=a`B|X>x6C|G8nkr7b+~- zFA&+`>YDerKaB1b9&@Td#K~RB>+hs7{!EwxFEJi#4&ar%{`OY~<;imPapWQ@-|T%FAD)V0R8)vl z#*S5nNcKrcx=Ktz|2JX$$DW*>G3t-QT^%xK*zpM|+2#9gZK_`ECXf<-g=x~`?zcQ| zf*(npbw*CDk5)tl4+R-O%}pgAnr1gzsC_6BKR5c7n{ozuHnRGF8~`%SWBjl+M3cw6 zhZY!5JF?L~Iz0!S`)yntZ@IC$1aE{b(oqirKM@kdF(^vtIK8mXsFjY2}ku*Z^&jBVfl7DUhBP^LRnEtJo z1h?ij=1Wx}=8cFGWTWiy(?q{{7p`d((4pTLX5O`PQ&P~eS^Ddv14{(TJTDP^~gaARhASKcT1u4>$-j%9!=~a5~y+{B-h0vQ~1Qe0pq<0VqO?vMoKq%6C zk^L|C-RGRW@7d?x^WE{^F}^XrJs2T`Bx|iVne(0Pna>2mnOnPudP25_0lmOj(imsV z$T5~9v{icwDeXvl66z(n+>SyEArgi+QO0 zP*`o!24#Zr>YDM9rD^WRzW%;ds3^O29;)wkA(zc$WnPVX$&wJ_Exe)16!PQY=z)#u z-IO5o51n>R8fAeyQZtrv>pY5v$!npjFv+h$F%-FrJE4W5fti}IWxO-y595KMkH{m| z*p}xXHiz!_vQNvsc<2oDH+?}^ED$D)09Myc)m?kTphlbBHdU)K3NL)_K@`-Ix~mz+ znhRLn<+Rnp!QTSe|ISMf-x#7<`=;@+R)DYQL*l}+1#R?4U=o+ua9a>A*S7oZ&@<&9 zUDqtW3pwd}i*0wIgLO%LT1|GvLI_T$WQ99YeZ?%c5}k;82r*s~J3Ypue`e9uMw?dy zB+*8qZEti<99~}Ob%j0ufvezz`&`3CWU25UkSO?Q&%ZSM$qjq-O^a_VN}A*>Qw60g zmoK~Bw54Mw?(xS@pyrb^W8cyJXM6e29gLR$zoi`iQ>aW)!C_Tj8GjpP5jA4{Sx=!z zUVaz*xa`wVXJ(IwDJf;&a6F$EI=jK^2$ROLr;_F9$yNRAW-dEcFp^%npM~s`2Qxi< zT!3{FH5BiFM)|Yt_u1Atnh&fM%#+xeYnMeEu2jNj`Kw)g==ewcueaOkJhJ$dApg`N z>&DnOK#PA}7)+;WQo{D&GKL>+dv*h!*!*i_T=6_ zC2r}3_w2789?u`4xB>8vbVT~n_~pyOrcH#&xqwo!V(@(`E@z}0iJC8kYxziB?XMESPZ!IS$S%`kyPP4I|@x`G4V=d`=6sdyQ(&baPN)lCaQ=S=xC z9BpDZd6znWJ_176qkjwJ_HWcRaMFoBZ;1dgyexpo!yoqdOf<_mwFqjfoW5#U0#ZRZ z_Uub=wyws3X!Ka2JX@@V<}7w7wBczyv;B!LeIi&i^%Y`hyO#^}fGYK!<1MboQrG#^ zO059ef@U=wj#qi5F%((stNDCtIFwIr#`CJi@yZmjVM*28;L{w?j97)UcTvhpX%zE4 zC6kW;6;~$GYIYCfWk5qy4@_PM?USmWFEpLgjAufOUH4d4e@JeI0Kv4g6g|Mu_m9j3 zkmG-HH38V(Y40rzY_0jQ^RUQSW>>2(?s-C4<}J!KU0xgoT64M|K_ZbIB8Ow%>g|vu z-lEm1*@<2J!v(?vWa#j?W{Y3@Tq!~MDHAm?{7-Qi8QcLvxJceycFWSJDA=w&r{zn! zSsg~1&bPUqI|x**$GrcyRPPVRwYI6HXbim`P5RtCzBi;}m<2psa`g!#7vb$y@{>X3 z?}9*~`E+RODBr8H+Gwwlk56Yiwl#_I2-0Bi5&mCos-V9mq5eP1s57Ec5y&ix( zad6|z1E}fX(on|Gx%Wn_0Lk4AaB!Z_(6C~)y|*609scsW|Iv?~|2V1s^=-o?S#8*6 zIIPHCA1Fb!56}1ll8v_VW25sXQCj=%*w~Kkf|1Z?$58hT;JWr_nJbTCQUqp+{Zy*N zYEtS3(g6HGH2kNh@Y@a2@yAr%s2~dV=;jQuA6LC~jIG6kFn|YNyy?VbpW#yL4sHV* zMYNcz^ABL_12X+8Ko8~$k*iAAB0-COXZpC(ulHP=N zfiIGa?3GuWS3rU(B9~=P>m4F2mf^`gN%0X~A|(!*E3$Xa+fBiJ8DD#F!~pHzgUuF3`OOLgX#+`smz z1L^x~%2ofd3Wxx|RDtuUl4BD99|l6?UumTOwv1pJndH)-e_Ux6{jV4O^Ps>oQbooD zFK=tl&j$VMzW;4OKcCYqiDG~T{oeEEXcm0zzvN?S2%obyu9s?^A8Hu;@4JS7TH61N z-uvgy;mkzl(e0~B*wG>6Jv47^J%JAWZf~iGfx$7+?yE%IXtz}0Rz~2y70Wi@D zH|+puCUhcrs@-{X;3bU=2St9H7v#TTwoLh7tU+ zpMGfsS*m;^xq_TO=?>cS08iy)>kvwC*e?9&`t{ID8>*)UBF>iw@2<{NuY{%OsnYvfA~l*3QQ9?D zr}>|&F3WyB^u0`m8y<_lfH+ryq?#t6(iz0PvXT@il896rfIiDTX#OtnZ45Up+5?2+ zea;svlBczx?@A*o6;;A$m9Dv+c5h#&Jj1=NsfLy$Vkv` z+5KNY+rZcR>&GNT_mIoCxGf0ikN(Z{NX?U#A6$|FRiU3J1-ibroap@4tzPjEP@5n@ zZm<}$KpV)~`QNpCqX!xhFXs#Ja;3b_Vu_3$3*sW%oBOVA{F2sd@@hWS%;5qvDo>>mlO= zY#p$LGzo_j!!+Ne3Xk{fTsOhkdd)1$XqJSL1;X<{P?1UV6)b+TcV(>R#FZq3_Waq19Xq&83*YD?gqdZ^eh`V_Gj;ul;OWBfL+ z)+A=BDap*|Ey0((Ez?#`*FG=wQQ;30Wh3Lst%)RJC^M2&njr^hz}MlqtF!n{fohN=+6YhBdm|2%S(Z^Yo7cHxa-;GlogFBNDr z{2<@R+&lK7(p-{*xTZr504#Levels>&bBKN>6v*p=jS+I#=w{M zu52J(zt~=L+HHDvXP;wQ>-aMH#DDU5+wwHCo$B24GPeEBh*33-+L0`4{HLP_6E@yW zQv$Ba@7rT7&-^RavfAe_F2cl%V>OqEZqKlN&dhghBI5X+z{6s!NX?4>xQT z7H?RGdj{5z6!#Ps`#TR<(BB(&vd|qeEGq#yxB?Wj+C>{8S&a$Q@m5n=rAYEF>|^Lq zH66!Sk)(p@q{rm+70CxB}3nS=Ki#;iytvzSsWH3SJx-n0vdY%8;p*Y|1F_#GQ{cO8? zd@iaQpam?y)J)5zNFq6PLJcRcKHPCSFl&5<2f*3ZZ6xAzsDRFbCu$jz)%~4gxW%`&BBBU9tps_A~ z^7nUjnJWt zhR9ZZkIyjEU?bPKV^cr4JQLoU&7bIB&z^kP&JoSooe1&HX%}zCwFnZN;0?AUyS(_M zhRrSORkC6ubwpy|>XG(yYgx+-n^c61O`)lU!@?P!2pI{E^acmVmc$J}et3cwfK-ot zVL!p-<7{1d!c6<3?}*URiD+r&4@JPRn?lWBC*th=oK>Y&sLdOxv{mTw%(%fvhM2d` z-iZDkU0P=5n#dC~#lj3NF2ot0FJoF~AUl!BaBFILxRB$56#+8vy*wy*7FwwLCwf;< zjC1ODqF85pM>)@oSu}B9Pt`1tJxlvz-p7=>ncT6we73CZld^jo0%^2>-t{_Tl^UYI zZ}_l817`^ol^^i5`4&4Z)lCC@Er9k6_>GxFiHZB?8x_QUUbzl_-z`koQA1ymztC_G ztypfeYE)p!3WJ8^iIT5BvK#R<)Iafm!J*}e1nwsjS)B={6$a~haOD_d${?LW;>`KO zHs?eQRa2=`V3?L@{fo7U4jTSGdApSg6TA%_LATC0OJg0z9qAS`dF`TnjiK#(_tow4 z-(ss1)zCdH^6e99+0n!wHF+@BqYS&hIi4bp{xU3|tI^F(WXJc)ECko94-m1nYhsCz zjNBt}!Paw$d5Jlkd1O##?mFV|;ihidpX#BV4_dZ%Oe~#A&|yCPt-Mk7hJ4obLaiaU zs)oRV%a8|~wR{ON^=T3Po#{`810)Na_PG`u-}Il0MAvjYrBA~7ZhUh%i>C)0e-i;L zbOcKnPyV5N@S#B=L*K!$(~mc2iHEakFV4qIBE-0Za^}14t8dFqht@zJsrhh{s6Wcr zdYV|cV`4AKljOI7u*lpI%qu$(;r>oU-gzNEX}W*x+#j3n4eJxYWTW*;{z;w2`X{ok zx-KLneUFXabY9FE^CHp3i-yKYi*!m;+jr?X*_B?rU-Kcnwl=acnomW;F&{|1sa;4# zK|Ob|)>rG##n9^S^n_RDp(&rthcD7=T5f#&+8#TklXgSYi-F&$gr-{IQrpl%jel3( z?sgZ-YRPhYS7Y?^3IMsA`!?UOCma6$Zn&O6Et=UrE#rEphRDqZOMl8t(p2Ue8X+O( z4~)A~;EgZ@HNWrOFmndKKrp(V5j8bgCSsaL)DcqD=?-{zpAR# zb`X{&s_vQ);ui}Ak&leO1w2xUyr7?>sDKf1bvrlo9_LIb5>s=iIv2jluhLAJ@P&oM?{d)g^)B0f@on%P1U6^US}-j-Usdmi)3q5LhjyTlj= z($T!-=fKk%NrF@3pS+#Xw&qijb*ygmIn6HAX`c&yhFiyLE2g^eozzKTGnuj9{Npwi zwF|!^YdjmrEDB_U%vO2;F%XZ=RvG0w0u-@wg$S__Ed{NnuTTo2vaKAt?|ctN9U>sS zOiLfB=EMECddTsnaEd)l5hcBl7u|NQ!;yC?uTfK%{3w!rzXs|@Oh6`5Av~6s+n$v_ zFQ=t+gjV4LtXV$)K;tx3+%e*6$Lez7$BeZQ%J2CDxj$JS+n??78~~qm*v{eR>1wig zPTd!8rVRuX{CdMKTl-_8hQ4@ru6}_vc5+N0{mW;$o7%M!^Ww)leG=N@ylL?B!O!;E zwWuAY69?3DZNP?Q_-Qjl-!~{U@7gE#>ODJ4^qh|VDnA5r4YHa|9_5ZF3!J+V#99)QGB~)2$Tl6YktmqOhy!dhoHK+$>wa zELW&{t=QVS*v#jjn?OfA9|zvyW$CSWY%ataf9*R-7Jx~5;mKiN{mG{!mz~V}!vYU$ zrj8a``5s!kTzApv+ifTe0xuAa+f<^+t95g@>8-cQ9#L{3Q#92~ zi+fao5H>K9axmvKyhkxso`1`%1~n!|CjTp?lgZ0sk9U>v7E_;Z@zq{2EBd*n($$rd;)=H;>r+{cg|K+anGdA^F(l{& zGFDm~p!mT~fy^zQO&CkzEy@?x^05C-)~<1i>!+#v?)S#{5)h#+T6C`ui@5)2!t%5W zGAT0w(NZ>)_4!I9x3!Zwu(jhsL=JQ*aTVZ4BGQ>i09Wa*xU%{ez!n3_lMxK`DIJ^s z)E>|phfX`C#)bgEq%pHq1!45?WQ~_v`&ZJnF`9cbE<#7u%T~WacNAe(Ol4muu{xIx zct!HlnCa%agP224K*nAL+MavGWNZw+WvUEL6yRTGTn71P9OX3%4LViCuy!g>3u0E) z$DKst*0nGH*%!QPE&AKu6buJ-wp~9q^@Tz|d1n0x z{^-kKbHPb(+%Z5H4JTl;=HEQuTw33{3GEw;Q7J# zI^MXG`g(KW4wJ8+lk~s1)dCJuf?S2I@LjpFX*qMLIjWjCa|Kh*?;r*%2+(HSq5Wr8 z%&_mDxAxH{bQI;L?tZ|F*|O))E$O__x%5fAg?ht@NKXR}KBiYi>q;sT`;&SJ{~NB- z-R=r4MWwl;aUb33J9Z+3`|fN>4h9|SWqAW|lPzcE0MF}5h;ZS8HV5E& zsba?gXHm_zIVT2mh{gu?&|E@>SPEK8qTjQZ{j%88z9Rm{%tuE48hNHSC(L6N0skyv zY*G5r`H{vuE@5^>Wx&1vpURg%;AMKq**cm&HMBUx18f&qapPZz*PmFhER{m-Y|qLm zaaolLTlOf_*tC4ns#c=$%@Iq|c5K2eBuN=~fXtT-Dy_NU=lt?8uraWnph*aDoi&GH zYcI!2#kd#!e~4TOXT{jv`~JSuFB9Bs=9fCMhl=zp@Rg8pnrxEfi>`AWiBVGjyQ36# z^Jo^P*fFOnYrU-c*|w2kOwgBdSKyoV4?1~6bRLOP(K!9h;m;jO_=@C51_7kq{-p1v z>14V8#8bk}2b`DHZ9Ov1{O)DCH6c!gjgc(fIE(g3)<|CjGT(RY6jfuRe+UkF;n zAC>$$R1yG7Q>awfl~%DgzJIG5akSXeLo;iiDbAoaYBpRLHEbD+BYB7TAt{&^?sq;W z@fxjr^scz<%o0pgp4=sbp@u~B`5kvWaC4;F@klCjPP$B_^Wy`_DfGi=(N#+A1NGPL zrHiO?W>8NH#fTw-e9*-*yq##VT@3ro-k0H=4W*{?m+U7VO&gXzn@^HCR$Ob^T9;v* z($FLr%aJf7xDT{cn+~;2(bvk24j?RLujzs`F~)UaAlZh?3(o3Vg!j$W`!14HuTg7x zayLlwB|= zPS0LK%8Chj6M|<~z=~+w3kq*qgJ!W4@&)a8Zdr+4SU1t2i&w9-7NuK6jb7qeH2pLs%w0z zpj6;0Df-v0$ga9Zww*7}7`AlJsCR|?StSMLUN=wG&;n$$|8aT$O!0{IU!MJ-pJ4hl zvyC#+lWRB30w#UzGx@3u#+YYHi&f3ah>7^RSNs~tjzDJ%tEK~H2ci9OWS&-r=Jr9} zV{qUTS^m6x@+#??vKnPev%PL2`2-n%@J{IA1M}vT)TnRdSr^X91lcj7G6;4iY*2oz zd#r2cEw~8KWC_RTh&au)WExtj)mu@$s})f0Qd$x>YTyFliggWf-RkNs4r|&6RRIhP zP3I5RAo(Bv0UrDvC}`!1=``n5N0l3fWveoA5E}h`g2g3sBC6eu#|Dz~vHqaV1b?Y+ zcmmb`U?WVz;fsJ_bjCIdIGXA+Pevu5%1P^VlnVNZ$rg@foNnG*F!57xNb>=5*#fQr zLSk4TS$}@k-(ZX-0k)2uwGKn_VkK`r#Qg&D&~j7vCM?xxwhVevB+a;5B0==z! zbWP3QBWaiaTI=i6Nv`D8fu!}eYGDqeZ*$^^3)MBb7VrR8bv6F(B%!ar;!QH6MLxa! zoE1?*0!kQWDkadg_-wlMz)*j-g?f`^Y?<+}OQRzJF0>(QyWYq(d=wd0RTZnydoAT| zCqWXm1j0LT#=;asYi(AY8z4{%edSnGNMRnaViBPwt5%~sCEMIjQ%O<_on5}{V5BSj zPcZ)9V~JtsSA{>qEV<@T3EhTUD%qEyjSeJ8g_v#Nf%DchTha0T=XMzTBZegsEinsH z7k>SJK&-)rS20WTTaGHLt(|caJu&ZJLXmE~``a7H_dN+6N^5KxgsNnxU*^B=oyB)> zMDv-w`T|!3rHHDzqXezvP(+Ux9lfu}j3h*7(@fsgt<2WZ7^M_JKl^Fx{9^`KVgMHa z)x>G~0Vu#=u@s%r-m-9#I{bjMV?|gD z->#){qj*a8m=*+OiT48=?gWcHW{t>5Td7v}*raa8W_-j>d<}z4Q)NLTK;WVOsaX6^ zMCteM(YaQ6^O}g&65$>nLZ$_WO=o#BCVc8 z5Ik>-F7mG{hM{V~qiDlbMwxvnH!nxw2?ZS?7IQ{2Q8iqWSC2POybnkTMLZMtK0qZK zl5ZJ?fM8l@dWxbd#C$%zrvS2rJN>xz7m(e!+I{zrK2|>I688E}_LG_3bncZ2b4u)W zp?nvJRrOb{aD#d!L+}(^SGn;ef@ZDSG$^)D#;#{l`e2baR@18NQ3S*;BrIyjGnSY!O zmIO!{Fii>|A5j7w8jxmjA{J>LcA?9?)y4hwDOYP zYFrm>xa@dulIDxVwzzM1EbCrh&bdI0tBAErj1Bv=0v~B`Fx|LU5OHRq)g^01^t{aL z5!3D=em7sHZtLk4HKD_!49KI?D59B&O<8$(Gk~HA#-`^D z@jCn=LBx(z6)Yo9{S|;NiY1IBbcv1n7Azt47cw5ZaCJb0asYckq+QWWG?S8Af%}fWXOO$1w3Q z*CwDn`R~N?wZ+$dQl1W9H9P~h1bZp!(g3B(^ruE*vMfQH1oh}H{ObhaQ!@*}>F=Cy zi8RI}*t$%`)*SA{gTMzCL}au`EWW7yO1W!{NHkW>1+WCo4mnt&2-&B64^hWZ!e}(3 z>%gIy&c{>Kjdu!lgb=Xf-4@u2TCDFj9=0F74c1s>fpoFHyf?9z0-qq^^$=-nzYx5` zLl$?vE0(>$rk80#JeFy0*se^VcpnW#M)qVh>pW-y{r-=l^fvd~c~JDTcsCg#b$XR; z`a1{^t_$JY!{vA%;SAzd%{|$Ummxj=PL!04Fvi&4l6Vm3XQ58bgwEMc6rY?)*6E4- zSP|vaoa4TPi&zcSc1TCEnh0dBz~@h|+8%i-hzT;MyQna|qL3EFER0eG;W4xF-Zz^xz&@-y% zC}E1}-P+(Iwi&!)yxohL;5PMFoQVDSu!aUnC_#1m6{Bl88jwrvM*4NzESt)W>N^T; z2$`*}lvT_KTn1AO$yQozd|{D9UP42K}GI%+bA$BoCAb`NoF z$#g*EuQII{UE>wbo`T#Dt~U$yaio`=rr>@sJ5eM^;TLoHnjH}DpyjYAJq2>8Qp>Di z!P2vfDL9qI1Z)dv-sPNr&71kbOx0^Ty(4#1&jkDM6bCVGAfIY=x2|y@XF4!kMMY#U zEfhp~zdusF%a4oOacWr1N}d&aA*%T!1vY&{bw7-GfaAq!OQw8aR$$KVaRJ_DXIv7@ zFL)#rN&jZsj>Cks*KwiL6Cu)UX7nPLI*3V^UPY?aKMo}da_mCiX_~p;<5j;uZO`Kw z!QPl)dK{6FlYl^U?AmY=P#v=*p;hoYf&qS0VE16~w$rQTQ zgrRUzS1J0iB@;@6Q)5X!-4$2V6Lm_pYm8(VJ7OQy_kE$3B0J|u^`f*C_383xHw#%e zZQh1BZv|4gVN9Z_>#4QQ@Aw^@zvgI&<6>oCt!lIpTAU&H0#!f^QaB)!m+b>Yw06VH zZQ(ra3O#OKa1je$)NnInh>M6>FZzd~BhkKXk>jqe=##v5{?;4#x`ggC=E$Vg%6iMi zr+a~KDs2fZEp)UY-i+5fJ3CwH(MSe!83ro7^}(#DkjH)la&a*%xYA)@!Je<6*$E3m z+F*nlZpzC`6U!w7t7>@F_CW*wsx%^Qzi7z~O5NuNkahK58lgp_9_y58SEaILq~4_z zl=)IKCp?KHZ+W^opXS0H0I=((^j`qGM}^Ja$z@b((2u&H^@nOuVBTexC*E>gVbQbe zj=z8mAWovM8Q4D`JD4Mh`AC%dcXwhI_wIc>R&Ql_{`AQ=%daJtE?HiL=SO|+anVPk z<4Hb{E4ki>S93v!A8a7Qo1K=((V*5l@T05P=iuR(noUy@4 zwk*2u!v`j>FTSSTE3e$1+2v{Em{rW%5bzTcCDWGUg3olWJ`h^C%oIQCFL|(D@=x^Q z&wLC}UUO{o)Y=g~jh7lO&C$f-QcfaRBx-#8+^+-loELpAyUde1V7!m?q*&mO-{%OU zY05g$xb@>pv_ZSCeIn>&C`=hFvKekAYu}Q^CRtJw^pxaf4UO1t{M{^gyuzzn9j@8! zF0qO%2L*_dhi`SUy_CPnAnuytx{mA;6Y;l7UJtK$?r{<6UHJ~CTd)R#9pygYa4#utR!r-`RA=(g8 ztI9RN=lk!YFWiUt1{r{Lk^-?cgIqw63<{vB@u!L5qWa;NeT9f`D5}>4Mwf(r1gNh7 zNcLd78nj}}s-i&e)NJl?O$i+NKEuhY83abHGD#gn;z6Cm#hiO$*=<)`Rl}c}BPK$_ z!L0!yR-Mg@aDos8>V7viNX9#q-`6&4Wp+(^6+tCR89_(vHqvT|2{HG0;ld`u)4cxn zSXg1#nF`DVJU+}w-tt|V@o)o=h5{`kdHS&4BA4t-$NfC4AMG42AfaYZp74S>0~@Tf z7vyd7J1#l(-Kr}oN?_g>P$-vcVEElLIuwgL)E%~;Cv7Fn4bJAQ-wIkvnx*Y_D14B-hDh`(V+Rgb0-o@%+ry1{t z$tSC3Qqf{o7#T8f!*-&$FF&~3X7`9;TL^o~Ih2A5OhuEJvB|(KeW2B%Q*%`1bGAy` z%$b|FaUV^6>sHyuh4#PCrg2lH#d6h>T)YLWagE{^P(oZ#nA;WMZc*8%P1Igt8>y1z zOx_+3{+XrP4GcKs5R&~Y6|}Z=n&c>vQ|l9<@C#^+%C{r_#w{wMikcRPq`(Z}&8&g| z_4sDrfeB?65M;IUCX28Twk6Gq1AZDFFR|0~b|Uq3&c14eoORSLI&H)b1YMd3udMR2=UMaT4M`lHj=PE*g1iTIGH=FF&OvFMOTyp=*rgPu+Z<`u1@{^_i0E? z$$OlH`gp|m^is3iHkAeTMIYYC=wdOg?>3c+b}_+#OR`6$77jYZ-}_3oDQKYEwb>T< zN~*AVIvM*GLP*qXAQ@Gs$a@tE-;>8o?zsslxDjv=v*0}qK$0VW3>d(u3yaBoYDq{a zUNHBIeTa}Hoxy;RNDpI?g|ttQ(5Q|5{&0X#(OxIDym}@BM+4mPHn!S>iKMcrAi*1zKkzG1D9p3l4o_yVuaEBshF!c7TF*vHdbU&^I*2go^78DuMy=Pii zmOLjQ=_m6rj0KM33yl_T5yH^CYUwy-vLzfc?C#7$?^l+p>3LrRw`}lRGaF1YwS6u?vf9ne8u;nws!^@EEPlbt3{f32Lit)6*WqDE@@ zT386i=z+a2uWAVCoAdbvq%mY_kYD%!{UT}RtECmVE^!J^qPGf(CH>$7IvscVwD6Yi zXP&}RD|3_pE!8sG_Dqf@R=86fP;9i#0~(qf&ZXo9YD>8!D$;utRjUt!lT6;2l1_r? zZ6M~rxMf9;{w*PG#g7aDayM#C1Ev#8P;;lOp>;`TdD$-;nWlX{np0HNZ&lH}DR}mj z9Feg4Jd_3QLd?OjkbM~fP=6EyeV&7^j8sHvUfi55B6PlmBH)R!lF4DI>g$T-ok&#9kMi)9y>35jpKcxODmzT$< zf`tGlBn+tGYG*IKalA*Ah&N6p>g}t(U}_qxH;o^fv5Z-z){N7YU(#InHX~9Q+@c6> z-9C643-0JfM*FfGKtaJ>)M>{t1Fsdsk`*FZlr?P;FgYOuj?J3 z6`gj^-V$+%%1m$xDwn}fGZybI&|kxpEWyPIG)HtEzv%8_KIc>~N2pvw-JrFmQ^9qO z=ZGmiEp^Q0M6t10DTlCi^m2wJy*|4kFi__!>?FJyCJqhRa_02NVXC7}e!wpGbb*63 zlq2p0bXC7v%^h5lDLx%5$o2vfE#yI67C)e}B3k-wuk_>E`t?}Us$A48)89Bbo_sGqSeD+q#vM`uH6kCquP z8!OfuiNlzNxUk?E(~n$mg)OktMB^1DfLV$Fx7U;~3C?aG9Gm|J%(}Q67xcjU4};@$v=$}Lu&08?3nb6EY#?v;`d>NScXj+Y*%@#V`^qxz zwacehqmkefB!*L%nuRb{Hn)-7=HxnsD~Cc`0>8qO7s)e<{hf;ICw~41hDC*L=Q`0E z+A@#gALXZ~+|o_vYDULilhnZLEH=s@xxY&l$^G4va$x5~VG+slB7Ra$ce#>)1Dqu8 zOZPOYs^0mzCzsi{oUbvpbyTWc)K|(mIxRkFGzY?s#)~(WBYn>?KIp5Z!?W9y%az`c z)hR8w7&sP-(LAj~oTGLxD7fI#7~Ykb1SYs;E`|Yxo}{n}EgZGLgh9Qd&uONn3>40- z-#WV7*s{rrrRlSI74B?lS;G_H_7uM4U9>>n)iWy(lX@0X(EU0dPb^osg%*ul z-BGnt(pq5$)NyqIiAydMZ8goynh2l$8i29eyb^_JtEkVO?O6bTfYL8d?? zv<0R)FaYIU><8P5Y$0?~M7Oij?pyiZ@e-Z_8|HK4OVch#zt}^)3_X0@ORmXQu3nk9*fTq-2`!15cpO3r&Jg8_Imfx>|gIK^2-!`f8iSu@Qt5d4%crl z6gBj?-eeagZfx#vaV6Wg;*_Np>t?WmlT^3t$^2Q-u6OP~V;=jeMW;o%@OF zK{yD8i#Lk|lAAshjC9;>%hgpw%XJY;x(H)q$~$w~Ra$b(YVTwYPgQRT?^vLeT&ivu2{x5dJooZc6eC?Nn= zr(B^kE#B&TAXuM@AgL-v40`8`=01=D^!f!f6ke>ALBe|Km}@t8t1LlPPe%3{0>&4x zB1g2(ifz5pY~}_>LiWS5(8YL)3wpOkx$d!WTDI_!4mq3l)-g$$Hj_5T6fJbzWPnJn z99&HVZRwAm0uJ{-W|&Xt%?Q*yqwVg!mc}-Jx8kigXo6-m@!YK`u2|&M?*O2A_P!C{ z#hq|tFkfk9+D-*d2mpAs-P^sd{@_GZlYjv7gq)!1{;NbTkmtV_%=|IT0hwRQ)zvqS z6ddMUj)X77@P1@*BAhmIuljiBF=1Rd&P%Mk)6M2eW113?s9=9z$IcbHzDEdBMDlHg z*%A7;*t8)cz-YyW@7w5MP8mh-yR^b**IlcVTcD!SO!7jQiw`yZ^rza79qH}s@^hUm zO*r9gEK+YNyg45?i(aEsaZ}Z7hN>^p$zj^2IQhGn6fFn|P^1j;GBvFn_Y{nMI#@xj zG2*-J6;~gYX!0M`B%IjaSLQ?*tCKy+#P5K&Em%_GZu^1ty!Zi(NZ>@a5iATYk(j(TOCn6Hp07+TXqS~#&gx$%_LzqVsHIod;6 zLI|i=5dB)N06vlU)*V`;bsA{-XVu%H(?aG$L_8j0b!We8teku`^N3EIlJi!*G1pR3 zwoiR4!_+5H>G+wl!1iXCIE9=PBsy~fn5n%AaLq^_tV>af+3$|@THih}dd#^eXM|{T zV3M}N&j<+3RgZ_NoROX0+SJ~wOfBsZj%NF0VSCH$9*x}xtr=;2+?}^IR?XOM-~72L zgF5f*Ts2q$KAWRq@gI0GGl(_YZi=PjdkWnvw6KbPa#_)pZlN@;YaP4{0stN5cpq2hBpdDe0{c!f7TBuTR5vyq&itk%c zKg%J=O%;*~Itaq@v0|VxP|UFlQC#-bQ>&{j8pW*O>qLEPg{)9FLy+x@irWEqmoz_q zD!g6OhmBnDwR#rqLaai}LlZCO8Xc;fuD*9@wV*VX?C_2BVX$bJ(2NPI70i!q*-0}l zT|K1Fm1C}B;wSTh69k?X1pU$!RQEN|N#$VVX(X3{WY zpFaYCiT{NgN8;aWenwoQU*_b@&6;TgJ8Y&y(_r`|mn1-62Y~S)d3+&1RjCMYqV%hl zt2TX}Z&heE@ApJR61+tKjJG zL0@pzKX#}*2>&rBAmq@=3x94kqauY7?$_+Z)*5(Dzn;brt{bO!HIPO3iLr8wnhig! zSz;z(WS0lr7r->aID8n+S7O3pmd7FQBD~uO` zjMLJ>dBEoF$&$N?>pB7@0tOlAkU^fnjCiPeIdV}&dPSj#d{HuLt9-&hwKg!FU(9Li z1t>TX6ShnyN~ZcSYA!o_eb-ivs~(?B2t^@(ww(4UO>z0aaI9gsRdF1{U*hxrHK&=W zAbGTc<+C(VaJ-`Meek_1G>!N5sfoULL&MITa8A_{GQmz6s&hg@Q&Mi{Z{~I z&{oscP|Fwi`dv3ls?I!*!Fhtxbm?zPB=e(5B3swspS)!m>bg z3j87jPpCnqjkOj?&S)NxT=)Y1M_93Jwb4DGZb5aw@c-W`}yaIIVmWw+b#I*BsU8kPYkeTIp-;5;mkg7{Lb}%%1_?NB`xyPS8^S8TRZi zpz(sAQ^a0gazs(}9b6_K2s+JUl%8A0G03A{Tf66UzOD6kpy}0aeuTLf zt+N;;xvO7X5typ@hwyq>%r?i??{r}nwG;|jZ&Y6rWwS8g^Ti^)Rou02uwoz46o2@ zaNdDC9ze}oHhzqolwRcrI5 zlWaQW%)Lw7Y`QcUWnhbPg{uhSrz~x4c4QS6nDWm)c}0~UOT3c^iTB2#BQf;6RsKNr zrQh>R@8dRG$JN`f{k<-2rq9tE)m`T*2|*N?y+bYT&WmJhE-SMgV_bLBArm>J-dtUp~mj~ z#>X}46w+sn+9a{nw`_rMdPqGkxjacYr2;^DqNh=Yn39_$c z$&01*HGD=zm?Qp3zmWZuYGE} z&F565dz3gs{9!1n-Z&j*u`O+aG*zKKp+2@jF>dq4naHOWM9AWI%=c7;r@rZk5xXK? zk!nz;y%eLGVaE@ZuPY1t-_XX?%Q&35PteVrAFjvx<#XvM43>AUMkt^w2=-}#)C(iz zfO28ktqsF0obR_C=C}k@YN`$M-GGkUXatFwwDpwt-;LqGGmfh*`n+!gk6d*q`gprDdY*gDc3v5a+%`dK>9E)*yhzWf0|7S?6P zgFgFM^(%a#R11H)p;X75(86hiU#S8Fe;V7tnSSo$7aYV1Vh~nQCG~=U9pNvm6OEXAp8T z=t)K%T^MmG+`VDB%yE(^+_St07uE7w_I#FxR}}fX%I@gyNjPUc_qB*gfo;?(D<@Fi zO<&^yvx2OmoWCAR+-k0sbE*D%_Jw`f`h8xDdlv@Q0c!_Y{sEO`SRB)|>@W@&w9C#;PXN8>9WcS$@zGd|Lf_C@jU*D-#d+PdeUcoY%(%Zo@&Gv0HF0Nv1S_l9~ z3?JL3S!*j5sTFYPY)M>L04Du?{3v3`RI}Ci=FhbA zWZ?+~r*4X{nF_87hz>*x9v)|b+y5R{^{@Wci3k81Ok>MUjyBTI`IL?g@-6ETsCKx} zMf6}5RE&?}*(rllQn2SPTDCE^hm&)AqDO3)pO%m?fk_+(D1=rIA zY>@F%Mtj%yEg)6w@O@%mW+6tu@V6ffY^h1l;5bthpOa-l<{*wbHk45Fg6mL$-OYfT z1w_swFQcWy0X81y9OhVG+5vF(2B}>MR~sR{Fs|0ivxVpv^wJ9l#-6SgS6qA1()8MC z%(oLpC0sr&hVSscVvb4D^`t0oA}CmtkKlNh^D(xRw}a=}fr}hI65-mXNT*noB0uPO zpIR)^rp@M>5KNdgmnB~D9MBrL8ZOm8@(@YAHdll(C<)36qv_j`eY~Z8Gv$Rz%mWMV zBj(Ivp|ke?vps@iw;wfZ=U=`#9Qn#eJ>AZQohBm9v!VAFDh2RM6k6q2c;n zwWorah6+YcN<(9k=#-*Gowc3(_ElGmjNfJGtsSGrX2q)ezK2;6C2x3Jdj%4^@$ksu zfPBXT=k1{ z4?vCwmM|1=9kE~}MM z`IEBMUv7lMmp5V7GB(a*;36LdM zP^}%b!fd4HJp$Dr7hI#VbxRFKc~Z>;xuv@%J>sfG=Tpm=K4IJnk0IyLX6-HK!;7^% z#@0-oJ}V++@L|lcH_12%RITI3Nvet8p1m3kA&6I!gp>%QRoBQjU8#CWKQ&J=>I#Pa#*8t0@H+dzm$ zAY=+cB+Wt4?gwf~v04I|%wh6Ky??o))5uSx$1(e6JBfJZ&8}M=C;|euRy7%eDlw{x z@v33-9a8sANi&qJe3C@ffn{PFb|Qy!)OCZ8?OKwGdU}aY@$bO(5Rm2YtG9M%y*=*fk<>SOg&hwml^~pw2wbcTU|P@ z4N{7}Zj(4^t2ToOgI1O|i;+(lSfDT;a8q-Sx=*hM7?M?Z{xuE_4emY1LM?bkw^97d^@kH zhN1YhYxGo1o=6+=Y^Qp60JXK)sTp^2u6)9~g=D<~t;BByx}2P(Sx*NF)d};=?Ncau zKN1M)fN({v>g%35Nt_Q^OpR7r_B)ue!hVF0lH&@Dlk%6GmN%bUj;ZevZ!pT7I)CRT zFscMM?iLmaH2A1S@uf^$i$b}lJcx^RlE+ejeL`j(c=MF70mW)mc+_>SWVp(0v2*mR z=y;XnRwSGZ9#QY`0py<0HV_^S7%VNV^-N0IF2k>n%}T6&5r;Wao*4Q|LdhTRpU~3& zdS$>qyCTuzs%GvRc^`^cd6_wSCuzZjN(1(ADZ0DI5DT@+`$A0(r1bG8noe`mRZ2gI zyhKE8q7BNg_(PGslT^2JC%E1%@^iixX`>tOhIOY}ePyT0$Q?GCP_0Tfzd|w1YQI7xpJLQKLj9*;7MfJ{rVTl*wGVOAEuZ)qAIc9VuhTSOKD$~PWFyPW1n%5=Ih zp2yr1qPRaM-Cak);<8m!*;B=wb+P-T%V~i}soy=Y$hc;N13@0;NJD97=L=IO19a4t zM!O($sf?-99~$-CCh3v^F$CCq_!WLhVhI2R;s4I}B2g2C0&0mCIE_tJST;p7cceP*NgI zuXb1l+al>^K$y2shE#=D5PMCOHp{8_0@TDQ|5cWQb(hzh;$lBXs1S7R*5B9N>^P9J zPc6Hz>@ChJx6_*Os*7S#T#1Hgp8i?e*!7fE-Si8L;&PSGMnktojQ> z&|IqL-ag|a*Ss;5{Q}CKRnay4>8C<+k#_G8X%PkzinLeK-QTNaOg{@Q@KUwboawJr zo5!}Zv0KZI%VXn0*=E_JPjt~TT`aFx{uF!lRZ$tA((M#D> z;uRlzZ|#=WR6UHmzloW3zmi(DS#25s8>I%OGA;mNNR**yPZwpaC>QBvymU56I$y2H zdPYVgUOSQLIgExixcub=tJq?soAt|9j2JdsR>nIWW>GCJ^e9kzL_cDyk!II`gP==_ zsjbjJPmbiigtI~N81)J6oS|4MbNY zFaC2+LHmb}sXUqOg4jFnMVZe%Z|!1tS!67bgHX*MFp@@L&2s3@mK(jBqYp!#5HEWH zfqe~Gv@L^WHG9Q~*o? z5Z8)(W+Fhsd%fI1nlK58J+}-3%V-3icfZs8bW*nXrt^^bql9(oJ;4Q&p_)M~8)B5n z%#qh~d?#bPs(ricU@O&c=!*c-cQ8*wnp*0*Z*7r~qG_c!+lP>^bTdXT2Z@|{`^<6U z4RMiyGKIXMV#Ybs9~i$Z4<}0Gd>#gq)M(-dhW zQdYE(2BFDxGOiDtxwp>MPN{4u%0^0UUVaGcBNX=+h!y3H5-Y0M9oTGPscl|#Bz7&2 z4?L*=nO9ZN7<1w3ck9#{)qkww$kG~ClR7m;PtiwD5$mJ?;)4c5CHS3=B1by6xn0}0 zv+vw~tbTH?O4O_%GLrNnZmpnT2B&!pQG73;e|xd5T7K+OgXDdi>6c##-ne$tj%Eg4 zDUVmmP>2%}=X_=uI-)r5A|fJTRj=|7#LE4RbM80A-szq#!d`t}|J+c&bs+xqx$Sf9 zu}ZBpnz?K+QV2oF#`hQBWPlax)0DR8chDDYm3MPUeGnN1uX5V;{>4 z5ioVMvwCLd;w{xI0wLp4{l$3GwskGZwdlTJxd1m8=lg1Y@2Jp*hQmJ6$lzF?tsnt? zW719guHgR6%Us;5qY=+NDm`YeJPxC2_#(4}ys6^u_A3{h|IIDuKY8zT03sWx9mC4j z^;tW*bO|6_=e!T!EOT63$|9D3b6Jw)L5lYciDx{tLONF^pDR>{PAQu%esqpYS^eAo z)PIl9_5aEzC~8goCh4mpWB*n)1*==l&5?B$n8 zxcalum{~}RQ=K~+a8MGbtt+lsJ4lO@^QKQfzi07n1!VUpeu$1LfQbX5RJmoAyk~E% z`s`871;_^iTc0sHOGZa*C8Fbs_!s1u_B*ZBKG{D^yQ+rSFZUI_xSHK%ky+=8zjUo!?E)_j zWAGGzN$P_u2B>ccTZOV%F)btVhndynVa8YLwn#5jv=PQNP^;I=`|6r#_D7Dq1IsD! z-(hL|Z8WP_N|?UwYnI>Aq=_D@;E3&>5V$hi<*WM$1c(yz`G-3fhZT=9x zvjsr5`9Bs)jRW2cT`2r*V2B7WLX>Tp?5xAlB%n};E2{ddhG7?$JBl110NtiXVC9%L z-`|2BqCudS3rH153AdU-ERF#e#>;q_F2+R_IalbPHE3$>27nBJ<*u}Ypim85dIX5l za|LPTOb&@8d|wGtbpVgMU$;4ikg(nctzxiYyvGmlYA3SvZaGg#~R&|R1 zpe&8Q)9TMSrkmt7su`@?Z~5U-73*qaBLl0t7c*mK?fbEmH!Rs%nm2>yYdmc47tejk zRK7j&t%Pjz;L|w;=_ZFAGio|E(FSU2o#i^x6Q84NZF{kjTLh|un=k9~RZq8(-BdkI zRU^y}aY7Z3bkW7nI8}Kp#JC@2AT#5>f?{X$&wfjlr~kdJvD2-RwWvEYlA})BLe6P1 zF&ZoSed#>q)i;x36f!?JqGb?EAR+cKWEgCgXM}M8l0-o0{z^4Z)olCtm$5}#!lk&C6$8X(F#M|{7A*Dm z*#%0w*q`1^-md=3Jq8Szw4-BQ?Te>yb?%jb9$}x@c7!d0W*x+W{MB*{$~PTHGL~j<0q8=Tazo?53US+7 zr8AdlyIQd`B}Md!QT^NUxI)=qZwi3V3o`hQ*I2_k42MTETx}$!e?lO_bI^#8{wSbXG3`wrQY*`i}?r8 zC#n5Z+2EteXZ~%c^)KS>zv?-zMZ`sLENiadKU#uPfFmCt7cTjU=c=|$j9jv0i#L;1 zl89HLPcTl?c%eWbs`O^PD1+)shu@BzKM31Cg9L}a6!4p!1l~!k0?_BzKej#;{AD(>_0kd6nMf+!wZCn^O^HHSB~F4*NXp{$^%I=X}Nc=InW zBdWh>Wj@I@4JQ#e4Gq4?0e7gm|Vzv(-q+(lgYXvj%_7oVz#}tH5QPkl|&F zYA$%{)nI2@J>PYAJ_Z)(#8>@|P+l0?3}Q7g+uNfXz~MUT+84*^_2lBAK!zBr#D%CP0(TCA$xj7Hv_A5*~tMHA!^jWR5`ouR-8gf7@81+G6 zF-({}c61M}su12)3e+e~c$99rF>HYw=!ue-@ij3x!UA1j8jDdxti;2=u~Zvr`fKS# z%I~j2?69RDBuEH z7=`xQ<>!adnbioOy|kLmF7e_c4Pt)Hh~y@rK`uC3FS4zkdb==-d)c{@eylg#c#R>+ z#hB%(ae{88&s<(+Mh%my)tShPYzZs@DAwo(#}e0zV2Ib^~e=4}7Dfev;!xIh+hqo7C#Q*M~NFom0kv zHo;4UwPH(<;D*> ztkGF`31Bw~1SW1<9qbkGaSwg3TuY}b0QY~p%va9q&>$oxcg>6WfW5aSokVv3$nW8@ zcIay8DW;F8j9it89BkNSZ`wW$IXBuLQyBd#f74%XaU%6N{DYX%B*uBoHQrY@$M2^y zr!L>gj2uT$u$;vC|^=0+FovDo`Q$j-i> zP1RE;A?zj>iS96{6A`lazKIWU^JwOEI7{(5nYhlI(0reXdk8nusnmNu_;iNEp>V$s zM9~TkdtGN&Rwot>CpT`?QV-rd41QF#q~xF|qs%vZEAD7e^A75wy|(>(H@p4Zn~Rm4 zR})L_B&Eiwf4#f`9r+Q_#*_YU06=D451N{O)M9$M4B`YJhxg`GpeHZ6ZM>dxpGkgj3QfKJ|< z?EcjQkQ43tZvr*{@+iC$<82IzbDU2Jlhv}t2AVrT$RkOu2Fi2Q(vC(xeeuhE(_`$5 zdsGbX>H*tcFEBHLWEB_%9^YH}{txzn#qZ zc25_`%Rf;06?tFKZ$Qt!Byq$x&UjL{$LvCz%cn~u+wE6GnbXj_1wu9QRh}0{n?e~!5~(wXZ4o7WblY>>9<4;In#%K&{ywORMT=@A66oK z5F12x=87trb&jX0#RymX?o)pggYb5Sb_jJzCwCyI(_%GH@&lXhONbe`vI^?p$YuZ5 z5M|bwVBL8o0)6E;E9FPIzHc}`d>Jpx9 z45oR^A8z*N@=U+Mk(==3^=kN@y;`lcbMFrFrt^wHm1pTVs{1<5_Ug8{#vRT0ejCT| zh&O}RMP4MDyYCl~aBVSfTR>UeC66Iu$T3uf3Qh+0^h{VIpS4rzu(bPC=wuCh69oB+fN$UhzJ>sm6 z`1qO~woqw#YZ{qbvCZXs`R~pV$_X>5$)V*QIXV$pf8WDOq>D+WIqiaG@ipI-tF`aS zmHVv(ASGr07gRr;maunyoAr*l2q6s_;7K4;~@=Oj)#%P!XMpXy!Y%DGpcC|Wp~ zfhhF)*roV+u_M-MUe(Ea*@sM?TvL)Od_cm)yI|gR9_hNBOGCdWsYXQfS}N&@oOhD5 zHmCJ_2^;iClH0I{g3^@uhr$h`9CzsltlW+Ap1kyf-nd#I$4P<&4~VCD!P#A{tfy{S z?Pl-LOB!LEx{|>d4;0=P2oUhgK#&^;l2O_gd=gyKZXI!{t)2CdpBzcR-}5EfUyH#W zJjLUAfn@e;iW<0#8?Dd^c5L20`PwMc;l^bg)?_Q|2g0_fZ&?^wqgc3&+M&iO(`Y4h zXMz`lM$9VwLUk~vM`8bQEi=9^eLHIiT4e`ctTQ>-f+J&op;^L|83TqSgUg4`E4+g{ z0GYGP3$X#_7WD&BW2hX{$c5Elj3)eZjOnlD^aI(tfCy;$)nTH2AmJu4HvTxLTfd_0 zg`Qn?pa1xt_MFjg3Ho$6#uY#ZmSxKCT^u2j-T2H}pn$hWn6yR=-u z?IU=(h5npuhj`Vt zvwZUWF~n-3-M!57C1QR&(!q4x$u-hP2lWH(n#Amh@aHOQ9=CKEfO&x&{~SK2hHr{N z>%n)Mk0C7ZeJVzzf5d*=4KxNXk$|t|#Rv75W&*f05xR$j;!f#Ol%NYRE)y3$LlM=Jk!ujE_5K>oxjUcoi)~-kQ=#BZfb<)tLjc^o%feq` z!tnvUB^?){P?~O5pRJ0l7I!?PC7J~;gq*xT41_4Gf@#gjpMA6{U|k?Bj$)9Y_wB8D zs%sd2(QAYGOG$0i6|*zUSX|&qF;uh}(HqZ%1>na{KReGZX-|c8aK;N(Z85ZT5U5z_ zAwB8yu0-o{(}}9*GDB2d6k82yXY(qo3%jjf?D);G6JDNISJP#NIT=RJ2FEHE11o%f zHZE&57MF!{OiZ&MMItKfQ^>g-u&xM6 zYQ~oeIUWH3jE%hu-Gt((``)%7lhWbc%wN-6Cjk8WeCVJ3*#FY=QvuxYx4}4o01w-C zohodm`Ubt!Mlm{z=>qupAOVf0?7PIPZb}mG9JWa$Oxzr;l+EC2qyPd$4+^I{X>a+Ug4w~3q&;ZK2Eiu^O5oc^% z^EqNlR+Z*9>AJCZ@eYOu3(;(q|(nXWU4jS;Zj8E00^#h5#;P7hPU-xb0aADbMk z!JA@MxtI>8FXJ#Kn+1IuZH%pwsp2d#`&2R5&8K)=30mVvf>b^cqO8M2WntzT_@?h7 z00n%q;%9OF|IjPZP+;?A-gjD!fDY+Ecba%H`#|0Jg`V~@2N(t87lt^6ALsTfa`kfb zajU_@&F?wD-u%N1n0}$Co0kZh$EJV~{__J+DO8LpAVnv(W$ zJ8?EozvC2TX>G{4oZ-GCG#=KR(ihj4N3*8y9Q0~wzOB$!bJ2QV?A^|MXa=W?4XI!M z13LB;7;2u;yCAYCBk7@W?+$bL_QRyDkrcKwKJIVsP9$YCU(X>i$u|=^w7D)vAtjPe zmm7Tk-iW7ic?q2L4l-a+tJb=dU?fw(| z@;+$k2rp{=1A@3%;KFaTh`f8KGxYO~q{TZTMft1l50*c3wx%iFjwJTLo=C~Qd@i+r zHHc8Z?%JaBZHFaAz3YuDDleoAi9=lKy$@bj_c?p;cxN%)5neYuc>7+MitRS%xl4ow zDnqX%m)lta`PGp!`85NsKj?eaaqQ)af&P03V|c`BMV--~UVhqjJW-`#DC}t*D?D`#*lMqbf2R;{e_)71LOoF`{zUNIvD?$Xl|wq~)6*#>Lo3mRV&w9wOWl{Dl| zh%viGLrgu7Yfxe1}qjR3x&3;_eL2wg(0CW2dx*D{*F7Pw8B;KRfL#;|%U3PxWL!U#hmYg@!|ItU@1re01SXa#N=V+3oDM7aYt&KStA!#WDm+DxNlakq<9gCmN+po zd`-?NhaN0Jo09PqI4ItK=I2pR`fABNjk)I)sd~LRWxorp0Mb<+0^AJ8js_0erbF;9 zfNHIQhsp(`A5a#4mH6p%b>pq-3!Ay@z4h~ z_}==j28=8eGf+OHzs~+`5Fg5-V z$pi`0sm(C: initialize the class; first call BaseModel.__init__(self, opt). + -- : unpack data from dataset and apply preprocessing. + -- : produce intermediate results. + -- : calculate loss, gradients, and update network weights. + -- : (optionally) add model-specific options and set default options. + +In the function <__init__>, you need to define four lists: + -- self.loss_names (str list): specify the training losses that you want to plot and save. + -- self.model_names (str list): define networks used in our training. + -- self.visual_names (str list): specify the images that you want to display and save. + -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage. + +Now you can use the model class by specifying flag '--model dummy'. +See our template model class 'template_model.py' for more details. +""" + +import importlib +from models.base_model import BaseModel + + +def find_model_using_name(model_name): + """Import the module "models/[model_name]_model.py". + + In the file, the class called DatasetNameModel() will + be instantiated. It has to be a subclass of BaseModel, + and it is case-insensitive. + """ + model_filename = "models." + model_name + "_model" + modellib = importlib.import_module(model_filename) + model = None + target_model_name = model_name.replace('_', '') + 'model' + for name, cls in modellib.__dict__.items(): + if name.lower() == target_model_name.lower() \ + and issubclass(cls, BaseModel): + model = cls + + if model is None: + print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name)) + exit(0) + + return model + + +def get_option_setter(model_name): + """Return the static method of the model class.""" + model_class = find_model_using_name(model_name) + return model_class.modify_commandline_options + + +def create_model(opt): + """Create a model given the option. + + This function warps the class CustomDatasetDataLoader. + This is the main interface between this package and 'train.py'/'test.py' + + Example: + >>> from models import create_model + >>> model = create_model(opt) + """ + model = find_model_using_name(opt.model) + instance = model(opt) + print("model [%s] was created" % type(instance).__name__) + return instance diff --git a/models/__pycache__/__init__.cpython-36.pyc b/models/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..111de2fde931d0a5a8dd2a94359c348483a63d65 GIT binary patch literal 3220 zcmaJ@+m75s8n)fXX_74A6b=`Zkg_u^Wx7#7xnS85FhM8+<^m9_ZHTNIS9N!FY?rsH zoJo&*q?Lx3;azwFUvR?}ufP@GU)9~|0R~I9%dV>b`2F8s@7=sPcmJ zB^ZB%+rE#7h$fMUMUuyp81Gh|6s<`sjzn8@euyS*(G@+sJEAZ4@a~G_>u5OOKX;<& zo4K;A;@J~ElPuGvPr=ChStxF7aY40&zIaR20zW(H; zV5M|t+B{+0WOL=Pr8d%z27_-j<3cdbvf4UbkP4ZzEa%p;jORJD?2Ec6mizbEQrD0Z z^oC7SmBYNoZl(*z}6i9}F0-!$bCAnyON{G(Gx&{`~o(S;cd8F1J|Diqfko9f!ITv=zgSfI3x$h)*e1S~tS zE-V@DU?W(x^tzluoe{Wb6E?UxcOLds2F&{ zIIVL#%H~KCC1YlET~XeO5&K$#h?ZF{(-|+fB;+md0uUWQ>O`OUbZ&$aqxih78Siw- z)4+x~!uh*!~_NF;Zni`Yercokp97l~+Hv{um~pAi)DsSd>`djk-+4+5XB{e!2u=Vzj?FnRt{J9wT8f@N=j?la zD-oD-3_Uah&6S$r^;V>7g^^Jd;g7 zO@+$P&YBJ?ps-EHi)jYRZd$6LLr2r84EjOSmd}(M_L_nB#OGvkD~zdGlWmi}|AtnB zlSVYT?YNntnLXdpv}s+_%5wW-WM0Ce-;T%BzV46D7t3*mOxf|L$H(8^fA{|VvBgm$ zMT41ZVaGwV)8-=pGUV3S$MpRx#l*7muOL!DS924>|Ok}ROS(UG_T^l z^{<7GEdd3!Vp}CMIv?sq6rR9S~RP4_oQ=kMjenHa@M3l)tX7nu%87vGcH!LiXPs37>`D>&FgQ1IJ!G*JN>52QW; V82oTVUv_{;8~C}=>fh;ie+O5_+JgW9 literal 0 HcmV?d00001 diff --git a/models/__pycache__/base_model.cpython-36.pyc b/models/__pycache__/base_model.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9927d690ba5d137cd8c75a230b7d7430d404cf37 GIT binary patch literal 11154 zcmd5?OK%)kcCM;^ve~4lhiOTc?V5~bvpq7Y$R0b9HOknM;dvXfWV6&iy+7*%LIcggIVN11gKSztY(u{vYA!B@7{WJQ>5ht zS)>Wxx>a?b=brnW@0?q&FD*5m`<)lJ{^dJ{@$bgWuZ;Zn@gLnoA`D>$Mq6LawuyTy zu==HT$>g(AQ0`aSm43Bd#eG>+f?B`cuA9dDhNz0#V?)%u%Cyv85auUFtNu&0Xl%C3 zWbs2OJn4zsekX2ON$H(;Zzap_RuoIO6ZgG%CltRlQ5Hq(125hSst?#dS%X6VKqfELO#{xSkT{#RXhf#6|HOuBXK%@jR|)#0%m@T+a$y zd<)le;@8BtaeYR-BrfB+Dqa__h_%ON`&rQvuj0vh@f~pm*9+neu`V|7=AyVNejQJq z6W1WP?!mD7?T8EyK9K<41NUaHGgZ#vRz1?CdR+OW&+Psx7-4O1saB(R#zi z?|x`^r02$-?b^|3OEq%EJ_twl!1IJ1hqm7z243GA#CE*np?(mAd;VbC?)r~#I~sIi zKO98YRcmbVwb$$$j^hvf*l})N=aVOYeCxY`KlW6ks@J#CqKsm@;|779&u=|zYAy1D z?iD*6#w|T^w#vRDFGg>}QG7yYqruSa?ApSOUArs8zRIBzND|`qZS&nry#2h@Vi`oG8p674x9PI>4>vs>FPT23GO5hJXhy7xK zbYE-w4kA8i*{%>;7ha2oUdQkH9g3HQRezL`@VYLhl0Db5=|JVwk5_KJTlkOu6v@P# z7!J}NuO_Y*uK4en@>*Q#m4(@>@J?74ulrYv8MtG!yUj#>-&Z%kQt5xJ%) z@0Mg&R8V%Qx9ptSHt=RerKeSTMx|#}dQPz3Gop%(QgeS^GK{k$t|z(? zel!f+1HBzi?CFsojojemu2KPImdhSRKvsWXQ()OlkDmj#BQZllW_f-_a%sy%hkX4# zwOMtob^FIa@(wIQS~A2!6%ElCcy4DW!<2RVZr297WcN^S>7}>>tkf1D!V=mTmZ!CV zjblQ#4?v5<5O$@rgT><_ZOd>p{MsR>+BAZjUz|+GAUCzlR7nu_w7bu7LxiF zY=}2NHx&$QFh~}*yg_HD@5NRSkT2r$tM@i&x2|oBdj}gGnC)oey<4|_bnWW3Ya7u>dX6SX(T4Q4vA5rU zXLCahAljhe&&8b7Gb?%qjYb!d7|T_&j=weYoY}x%HUAr*c@haaTm}Ak<-bCFLjS}7 z&YGKQw4i6^zRKyu#`~mxBXIj$!o9hHy9ocrYzNuPyaSpPV7B^?E+G>eQ{!jm7seN6 z7mQ9=kL!mpEMMXd9hWwrsCX82qH{>TU?re)MdE^FBtCZAXLLG7-$!?SMr2vhck-!2gIy+&w`#Q%)sTq*`_`O2Ttl{LvoGxf@ zC=b?e1R+#Dy1CBE&bBuwp5%qK2~*<`sz?m0YAsjBGlcK>3Ui#}7!7niJbVX)X5j6e zKx#CLlHQ&KR^`kFD>(x6D=6HD^`h0`P6Z442GjstF7hPJj{bodgwo&k2X2tIV(TKg z{bV^maun{{Yg<6f7HRC>4%{5Hl-UD&xI^j=fQ~(oVs%%bIRRH&b#M(rEvZ~qb%xF9 z+A|NHnApc&Jd(5Yo#P{+0giwX!cgh)7tow8bW+tX+VyNUTIC5_dg(8!CXOYezscoS zEOK%DQlWn*S2%h3l`CnDfAOiN`N|WRA^V&PAv6eGxR}b2gQ(FYKvT&0U8(s0YlQ;< zYUd5V$>Qt2*3?e84g=f?MnGsKJ>_lqG4WjWiL4E*cJ=dtHc4|lErn1^z5v-I71j1p zi7w>6&YfI^lG^K;xQ^r;%-Zx^eF%`+Mn7PctA*{aJ$ z)RE62fsxNe*JX?s!C ziALS7uQ^mU1)H5#)Zl)MY}7rKpOA?ZUaE@yXfr3Z(6LdC217N*v;WEv0a4>T{bHj@%2*lNfN zXgz882%SW+I{@J>!Mg)32inEVVC2k5jythHotw9T$J9N*O`w;lfp?iSs0Z>o6J<&Y zd$I_Fqw2sR+Q$2+j>IsVV6V&O_?bcqGv)H_M=hJ94fE_yqi&h-e6-sOeCP;o2>Op; zjEZdO*j1l{P*%*G)uR*3&n*m9UMm{vsF<`CErGW@&=RdAxX-t_cYnd+bBB3zm~a`%6z$y zIa|r+M?q7m)%-{fxs|8JpRFqFvN#LOx5O*d$^4=5vGKdO0?DlLf80rpQWjdw-Q|2f zGrUYiKJ$`$(j7+jjnt&v%uN`b?Lnvr_FBu1Ld4E&5e{C7ZL)d1or&Ek$z2#-vV0Vw+qWUGwg5_V|IxCs>ldL z%83YsqZn-X!Tk^J%}?y>OsFJV`9qFUZ5C^Mq0pNn^E`gAM0{=#`eWlaZOvKr(7;{b zLxXc6I-!CUjIDqW6HO67lahgJ?np?G?&JxAJVkRdmwZ?NynI(gg!pTodJqwF*)ILtnw{ ztK)m0>L_sFxl%>QBx1taeSuag?wQ%0ocW}I8{ikCkD3H2L)ES!x)powNJm+$K#tGN zHEe&+zP$ccmj;k9rNFwq5J+bj27c!N0Si!N`8;1K+t?)Z_Oa0r_(7;d*}W87+6%ee zly_O-2)dus;cX}D-{Hi5hJxDQ0dkt=vemTCnd27<^*OH6+^Xc^X*#`(B^;XjFLOm; z1usFu*2IcSu!769d!SbqLfF;~Akb(gH7wrqg_x1I&@6Z0`%Y1mI6d`he!r!j^K1A#fBb+BM~!Fmhy-g&r(HGz0Z*8 zFDONtSUB6tT%Iw$3Q&Xe2GBqaUsRh$p=`FH6F-GGMYOvf~DMd5Dx?7d;Uw=X?c@$nKZRK72 z(e8{zCb83}Tl0q~3UKT-*jg3u$e>w0e!H;o*cDtro3U8?mCHCWfIeT2)`#)VEB0l3 zO_jf*Ubk{1I$oM9sM#Y;-Dd0bQPOh?kL3!J(@f4WQADT|cmv5@qU~{+0qCTO;D-Ci z^#gY+@BnuZ1md+g@`EQu`U!6U?3VFjM8m4uMt_%WaBsGkSl;1$I2|A$Z)vcO<+r+Y zhTeaR1|o)QjVjqz(_8`kod@ebV~w9J)J!#*)lo3sp=$9*HOu$zU*cv`nv`KyDifT; zJ&TA1|HRb(D1!`nPZ{k|U+xq}UYf_n7qOx1ZTb$sDYL9A&^E)Wh zY~8O=r-nLT9W6Du2!*S`;Yi!!NH~URIW=tc@!NO&efw_eLu@iWGZUX@a0P@26l%*4 z{P@6L12!-=7TQoZYJh>a&|E4J6mpyPE;@s z(R~3c_d_(4EI5ujk#-yg&bf5RaiL`#5zJG48HPipL&>X7z8(&jx3dE$K|4dv05fr( z)Ddz7`6)Xq?|KJOV){u5nIlHAZ9lEOe?${1+HF{MYDoTHr=Gl;j|*pdUq7%MoT$|~ zIQ=tj_Wvmbr=On$PIwP^lAG!Z60^G?H`B9RzFE-E30s3I@(sY3KItthFssCXBT-7h ztO@|E#!a5|!Yn_YRC}imtMLkmP(>3VygNOqz;GMAGl0CxpIC>r`0S)6swn>tliKI! zQ0JM6ijL&IQf>S<_f&-B2uIRi1pNPmu}y^%;|rv+%AeCwSufCoz8oP8-|V|Xho=N| zHlVm!jIp(P6!++lsVGMK6wb9&3=HQORBm|$5(zp5E-EyXzr|w>I#UL;Qw*4I_7%9&0%}|^b6&=a!Zb_65%lnrm zrCw!9>%Br~g&nO{@*}LXVau()lPJwB~p1EV5L*z)>iUJ35H+dry!2rPXK7 zC5kq$<69So7U#O1ZQV|P6PFJM}UIFA(G>CmaJ%die#a#ZR%t+tSvAl>QVvl}v=J`2TNr$1iky;`XZD#??RxzHZ)WR2< zideSHJFQFdGxpwa_-z45Fo;ppaUSCHmy{;e97lv5$C2M+xh9#9-_c(w$XD@5USUF2 zmg`Iy?UGlS@Jvv?!-USJia5&mnY+W}2TVR-vdQElCJ&f=&V(jGZ8aG(_mD}kPAX^f z8KdDOI1ziS*UMJ@l4YK0G?$tS%}TRUU#?f{rFyO2sF(4#Sm*CdW4ZnU(j`?J0dHk4 zRWy6CgRdchtvoDNo@P>Vw>pxmsHT`0lJ-SaJZ?PrI6{l@CKp{fll&%wT)$cRZ$Ip@ A!vFvP literal 0 HcmV?d00001 diff --git a/models/__pycache__/cut_model.cpython-36.pyc b/models/__pycache__/cut_model.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a8ac7ff35f527678ec3386d0f5d8b6b1586858e GIT binary patch literal 8069 zcmaJ`OK%)kcCJ@-7 zlI1i}x9;=YTlbvvod`t>2NZ&p}U4y z5V|nFR*%$1K@>#sYo&n`2ovv;D2oc-rl^V<-eoZ*rtz+b88M4@Rh$rWc-O>9F^~6@ zIEB-mPRzTX?eO`WM>w*z)A1s!={r$mdA+{x_S_(jEX=W-KvP+Fe_eaDkn@U($ZK3KBW*Vfl6DK#9fi9=hvp_Ji%WOZEUiRT|!pw{D~-#^$1WwUeq)qE5y zY*}{?n!dYrdz%S$;UBLa;)A}Ml!V)I z27a8Bnw`*Vy3w$n~7#-K|PY?rC3}2rhfHo7jZlT8Om#i=9!}9WS&~$B2#NVp7b~cA=gUF>B zJePO&1a#Vq53HWk??Wxe1RmCxtcN&6fe(=s0!Ox^q|n6W#e5`L!MW%}w1Mv&xH1}6 z*OuN{dT;64`qH&u8$!i;{XsmjUl>^X9jL06O@BK(UC{IgKo8sTeLHa5(6uLMkuusn zFYtB=8;Ae6h_eembA)sHE|e3PVF6giP*Jx&XaPC~?h+t~X|gisB#-qw1U^YCatPEQ z6fK^Y?}bykL3VsXDgrZncn)F2sTJ$K?*h%O$aPZ$j8^+}P+Z4KA5a2x>xI$6&zN=F^b#Bd682R)lV z1>$=ija^(7t|;rWur6ib@Z|Dx%lG=W`*`3zair(q+lLSD1t8dTEgD*$9I~9qa)j_m z10COzZhPQk)sV4u#rhz=5?RdS-GgHl+a1JK5XNM5eZx% zPq3j312Tq*4sA#ZEkAVP#2Cb$pP1-b-^B?N-NS@h3osegBXvVL!b-a^ zQoAJ?V8o=-P`a9^wbgeO4HUSoi>a&)N|^PPFvxH+-Ev?GqfKlQNszlb9heTna6VqlooL68dlC=Zaf_3LH|02d-J5swAXuQr}|3(|8-+uGyB6i_~!1L-AnOX zI9)#T2xj=F>f`OtmH2Y^N_X){nV?Ze-_`EhSU1yMI-)OK#7lHi{dzbRD1GEoaY7)UAdNeNKwhYu`?a)N9|# z+IO0S3iKm)id?Br@kFHF^!<-&hiXhRr2dKa*!VP6nz|}2i~~5dm9<3mSd-G?D*C^z z-0(dZc9^f5x6qD=a#0z+l}od2!z{(Ny>i13n~opdL}k3PY0_$&HmPBT18>3cJts9@fcN4CLKf)L3+#F)S`Z|~ zkFF8L#_`$@M4S`|+j@y{TR6R>0*J-EgK{L_<>kbqYbmSNlPb@Jv5~mSi3YwPABo5T zj9kB!a;uzk>m|vOzYhBE;fYpIC~D1^Rcq=wtwyk^71e3Y)UT^6nyIeo3u+Pl7qxly z68@&u;fe2v-wV)5U(V3^OH`m>EcC2uDqs%^5bLplen8|A!1PEvr!XL>0-pCb+F1e% zfL*sV(JrBF9>JgBb!GVryej~RYSveqs293Z!q_w9FXCx{#Vlr?=*|H$XvdRW&U1MR zz)@h}AP@gEU=cvZFrWhlicSE1&i(KmcwS@QCkvM?a&s)irKxZ_BsYb~kdCZacVPa>d1hbHL(EA|Vd{YM zVm_54-JCLM5NXN~POB5AUYsW*`1`Ia;34f(z}lzKdLPlF6@@)`w4|u1UmLCDlrl#X zI1{qB$+?10*>~MsX#y2v*M?8I2#-B=uPJDUFa4d3(@huaJbFx@{VGWyHx5wPe$v93y(lFWNR*K%tZ>>P7ZwOn5sr{=2FU-^dm5dr9O*iHs9LJ!|G) zq5^4GGVLL4Bh7?+gBs%YtiM27V;9j^>>@hOM#p-Xu7dt@e>q)P(J-m_=#D7W7JOUNFEgd26qhB^@uncSr z(@vk-MYd%dvPTUgirv24rO9N;Qg((^3uriEC&vhjcw~&xk1;R0h(ZBa zPQwr7Kd#lrEpTdhA?MO7h`fNDB>Bvr2dLmC;C2di4>^gEI@08V&;sOP&VpAM)7iB$ zcw|lJ7y)uljKJl@nh%A6kpg3CQ51n|B?rkuh0PvUIFGFflOiI@}i4pnzT+eX>d5mnUMd3B$`gOF9qhj9Pi^*#7j;LC& zf==2`F%q5UVu^n3Iup z;}x26lUaBpcBA;_$~Xeoh1a9&#P8p3J=gKtuFs|V&2crsKI1q7bSIu*nL$3hJcD6) zXMMQvkoB;aoS_iB?+P0SeIc8W7&Ia`Xn{d~zw8ic@}A^7C|->lj9EX!a74kYqOAbE z&mbn9)28t@)!}T;^z3GGmJ_?_dzj?{mGqjAltac{3MANTg3m7dPR{k=^UbQ z`4{|7*HE3-kj_o>niQE18j|x}`7LWAP#5&TyS+K4!TzZjpgx-iqUSwh0 z4Jg|d%KxAXqu&G@HTj3=;G%vamF+9&XpECt5Qt;m%HL0iC|u-tIZa~CL8(*uu_^W> z&a>JzOcnz4W#T}iyCO}0k*~&Tgsq4kjXji4&^sv%@+Y+8R6c`1<7I&$TmCnAq6;V# zjUtzddV$~bNDR$WnuuGk5RTOMYm(N{k3=euW&Q~bgzsP)^p37y8yM@qdTnA;ENUlu{~)DerX(54{L{0hts2}QFT-!kpacZymB}*s*GkRI(k<*tUUhX&lR-w z^OW8wZtvjVeAP6(u*1MV$hCpo5BgxvTbx6M^A>ZIw?O_2|BWQOkY+CccfX4XNba(e z{7qV4!>ggS9>2^KO4|Jko^k$yl7MM8?z_VBs!T=(G}x~uyPzX)P(fInLOsE_{F(~# z(Ua;#-Z;^RfbB16jB>0jwDsB4GuI>8O!T-NQ<^pXPe7wcahUx$d+#->+}^V()7z+| ziEv6SWCM?RKrVuGV>%AuIU&1&|3W~B8+r?LQ0z4f$`{BMoz0-3m+$9~F!IlMBKi;o z0!#K`8ad7dbwM*!3qm`q4bT5Sx3*nZck0uW#-Ut|Z6_6*(iQ`s>NVSbj5JO*QnGCk zHmN&`;a4@`YIQ;(-^R|I6_DiD%6%&6-r0mn!ddo{C7pHR{OsDWS5w4584m|6rfH~} zT6@d9Y@Re1%&IwM&X{K;&8gRrWf}DP2l6W-Me>tWcJU94GzpyMqZDbNzhQE!A2-VB jlt!8I+ACB@^1GSbtIm>`H`0&4Nl{N!Lvb88RrLP@B5i^D literal 0 HcmV?d00001 diff --git a/models/__pycache__/mae.cpython-36.pyc b/models/__pycache__/mae.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd29e4aa0ea227c4b9e9e8acdfc831ebd335dc4b GIT binary patch literal 8422 zcmc&(-;W$eUhnE()6+XMyWXAsX~!8GU&xrvI$o0_fX~jwPI3ZgWzN1B=HhE+r+Q|4 zc7APjue~06bP}7$D};mZbka$1KqPqJUJx(5A;c3BFZ==Z6yRweKzTt(_W*pp)jd7C zn@unh5;N-R>Z)&5b$x$)zTa>4wPMlzi{aenKU~$cf6-V&3@epsIRhi5U>LLp2osFt4-MjUuvGq=O z)92E9yVGxMgJ7}cN8UR3!l2jahu#2}PZSNBVEikjmGBQy@wFP`8aBgf2G_a4O?pk- z@p35hcv*|P&B9Zy$EVEAp54dE84Gg~?y$6;Mn+)l986gKb}s<@f91w$H!08})r`i2LG$ z;QsKE}gqN76McTgBX8Rh`B0XlY9vcVxz8Pzt5o>Me z!>AZpZ9B@f9nLsIpRv!-o1e7CMa~vA>LHdt)6wgqcj~GB)Ocz>wVEc0;Ugz1#MVB$ zt?e^4L-D}G7@DgD>gfah0NTWJ&#sN;DCs#+9=RBJt$Pt?%6TpmKD_&3muY->i4MJN*VqG~B+@ z#h6ZbrCayc22XzGUhf29)aZ8yH>$KN+M>;dS5H&pHO~urLF9RB*I~T%PIv>A@#yYSyRR&LGn?PhZ@7qWP4bJFbs zO48}FO-`mQjj7YQ3gc%rXZAD@K-;3yyg{(%b^2i_XGx3zhrO?za7npw&1C3Fx)_zDWmbj=y&=nL$;Zn7EtU29ff(9FaX4Uut#^=9eJYch3$i@R0kB?U?7Tcw^Ju%6NY51d? zkdqNc{P4f<6TeE`R8*G)u}gdn1q^4aKJcXx^ddn}sN^e6!X{}o`hzE)xJYzvQ}nwY zOla^#TFp+s9?AS}FWlMneHhJaL}Cs^zb)-V6GcTek%r3Yz51iT7v63!Am`{w6OWH& zj==3+lOF#tLYqh%<+ZZjN0Z{BlTs*Mj}wGkKgAvtFCkrY4pOV-fJMpK%=-!1<6Rf zL&dvP+@^x!3t(AaQAeK4-yxT^0j5`V#Zk$+C;9km81n(HkN{j`1$~Asuvu1si4HHG z%EM!#%GhUPbPktdA4Pl6VAvdI%i0K`3+zB&219W-mbGnNtY>Zd0s`J`J^XrP{4Sfc zt{`L@86y)dbiqBtj*Qqq5M;)B(}rEMkwrlVw-CDMBa{f8g+=iuIV{7nu=d~NXt;d6%Mm3k1+8abi1hVwaF!T{%_j~UM(HAm@n1$>zu|3;| z#CN*@TCxLa(o0Gwspa z>`GB`lLQRGsSOSRvi zLfPWCQA-6T$e_f1D!xMn$xPg%Lg7~iV$$91jU~`ScP6(52GdGcypA)Z2npKls4Q)l?o%ogwZf;>qD7V=tH?+!YC1rZtVcPm{4TS2c? z*?e-eGu+y3HalovJ37U9Xx3W^!U{*uMx*PZ74}6mp10QzLVq$ED)(K01i;|-!_`WN zGy`d3lIszSDCZ3CYjgim(D2C*BOyTYENT#iw1OV@_Y%ER9@$NFNNE$Pk2GmJ;?&D4@rua#+xebkZeP;JCfb5IUT>}ZTtQ}T52b? zsep%GvTb6zgd=+=V`ClvoMQ zw?YnBpV5aucdF_hpCgTDKEHWLy4#?Fe&=vej;y>%0V+-~L~$Fz1?ouHxCVv99i^%_ z;a3(l73op5Hl~1YIYE(&b5MSzlmIe^=l2aLJp4!u*WIK!N$FW^wX|*Hxh}q)(Xwqr z{4mOKlR^L}|Ecew01XPSyd8`#y(0=V5Mzw;BNxWuMESNGyA%_S3Q+h0AkW3@pJR3v zU|`LuI8QzC)V>M8bYei~Su);YOB z8RpP~+Gs8=r}Tdw&&Or7&V#;;dP4uVaaqyNqX#w2pYjUx%zdn*cRrJe2{2iR7utXT zMB_T@TKh%hw?Lpmku}EwS0=xOOnwV-sa=d0NK(()&e8)-Nw`Fz)~FPhASIpWlzazR zu|$U}`#Ru~jTT2sJ%<-Q)TtJm(13pf$Cvk_Xd09=bYStYhTfIY+4yX#lPj^YZG-Y3 z<7KR>A^QIk?MSkK81%eIaS+CQ5-*b`5#&ABcRqQb^;}vfomtJ5>iuH8q=2!M5hn$mz23?ryfN} zv2DcsJ_c_QS>8vA;RC#U@hVM5hZ)s#6Lb@GqAI9dIXQyCc?=yrs&qY+jylos0xqY* zfcqX`uZ0YkQX}bRX+5H+w?%{6*e%KKNn^{qCkx3?zKk8{l_#FV(89pw?|cr znyx0db?KxXSF_r>%%#%^Vy0-)B6d2ble{)fqiQDsbs9eHab0jPJ#zdp28SIKn(i7E zBs66Lr8(4v@Bh99q@QRpz3_L|IVfq+YQKNQ!7Y=$UD_z+}gaSon%x${*@Y1O`ed5)!ID-QH44VHF4^Y-IGy`^FyyK`3F@`cx9ng_Rhf6rmhPfJ% z%B4~O1%PL801%Ij7Q%J~!Y1f!L@tOODPBaBK|RR|0FE@)AS%R~>1X>C07iT5O+N0gFFGPX1TP{yb%Ep`3kyI@gb zMFsH-RHwXGcvNK8Bev=BdsL9GS2?d(quLcJUPo~(v?L{~O_7Tpn}NRPNn^9~1(%&^ zSbE6x?-&_Uo=6&YnZ3$hVXqo4(&hqlabNkLv>~0QK}Rt3BTbRfXT} zKk`>A*;Go@I3Z3#yBv`s)@wcc*T4SsKmPu&Zlsv{+08@^5`3PZYN894cmQSzI$#4* z`t_bP9^oX>psdeH(2o)ZI%4s;LKbO9hztEl{5}nQraI|R@Fb=b#)eCs(>NVUPDpH* z*#M0a%pbzm%WQbzE6tupH^|oMH-#&+`8#+>q>QLNNyl;6_RNO!v`4v-6C2@el6{PT z3eGl<* zr0jXp^*o%=n=4&Kz7gK7Gw*yYC zF!85&dLh2B{~cez1VKwp_#*xYk4N|-euC%!Bff|~#$4i0sK^v0Vady5mUIYAzQ~g| zPT|R?nCFFfl6=oKc`nrz3FTXg^ zm2aHFmp{Y1SLbBDx!Y?*{eCA*^z$N`rK7$tt@V0|x^~f4JC88DyOw;$TYIFwAA1qf z;q(Kt`humtF4pE#qTMLytYu%0)FHmK-&ZJ;7zC-S_>>BInOb=)-J=RG=f|zbWIiE2 ty5l4IqBMtKMLO5j zHm7RraZ|08m_)X;^2)%Xl~!2@5)bSPLP)&vz!T3rp9d7+g?Rw&8`3^3eE+HH>Y2$T z1PO_5)j6lm{rs2j|Nm3hN+tK7sxw>tE1LGN+L51$@|$?V7zx)n>uNn#XN<~vSFh_T zH#%nBR3)o!Avd~qFIUg?oVufWn%#WQt-DNnSK}79Khn7Eo1MaeQ7`fwcRteUC7$Ol z@-i>*BJvr(RG;*8h7z5w=BNGS3hDn z^jUrmZ6*FSeja(5zi^~am0v)gnJ1d5)n{?=i?Z@wBi!-%H}|4}k0Wtkd>Gsvy8$=+AVtx@avt(VrMk%qLdMBu0GXM8()TV7Ijcre-KT zH8F$zQ~fFAiRYeSU2k?eJJ!_FHRq6&QC)|^*sYEKxgiZz{dP3Op(3s@8D zV}*)ZQ;T(;U(jNsGF|&jTtxdKPQOG{;To9dZXSYE+B*=-lJIx;0^x^VTQqo+s_6%k zYc8GzJYf|{TZ^>^%VQR4PqnAUQwSzekL%CuKh$?k%xT=+tQpeT-FfJ31&xqOUf67O zed+eWZm-*T?28%`q`S}&ef^4LLCutTr3D@jdNSYZ4n5I`f`K48koo>z&)fDJJp8Hl z^Uce=5j8e0k2;SpleC4G-@bM0!;LE&87WmOruL}fCGSftVt zC8SHgiYGnkAsx83EUj>F=nLsgjwnc#WMN1UmMkI*G_k7iL#g-sGXH)Plj;PeQ&_`y8azdOj^+YwlD21g2hmpBpcGc6ZHLtkfmdX zlWq?PlTMc%ayo5!(mS22IC)odW^Y3Q7%nLp90vPdcMyhhp4bVr4o1HB$Pe1vQCKTW zGxWPH=>)wtPSat1pAbC0Gujp3bL}PE&1#w)K`2R6@s9z{ri=_diS44 zKfDP(8DYc^{|i5Hoti1BEej%-AWlGWwi`oV8bLo2FHmijYE42ZX*CDK$DVkRYTT9> z^gO80@UgU7-9aOg`MrL)yXX5*nsqg3CGs49|BIkVq*82G~5>jz{rJ!#_g zk<5`a1KyI2ze4@3#&FnuEK3PcO=Q{>ucA@yH(7~2N)TZR3)j%$Ej%FsjAmSfVBs-} zcq*)7jDG1%1L5~>IV2nCBER0q1a9T^ZG2vrOrN(E-pHbh;HYdd){ zL~pa>sCRY%#`f3JIr5m}bFEo#n`4dC#2K3jL$N-7Kf76Tr3(k7A4ZLS(^oVWzl;{e zBk>NE-lgPiN>-2n%Lb}<A7zIc$bdIwXZ48eFdZ4d>pva9CZATXyS)INJv$CUyuSLcm zuxafoJgKoUHc>+l+)3z>5gYK0%vf*P&}%lf$QR)jJQ{tBblMaB%nJDxKmiWFqD44> z{;>sJvPa)iz8m>(K+W)t>dTvJ)oW|jZ>?4D)v~7jr2fuYbqc7LE^SmVU7Pk%+!E(- zUXe!vrTledqjJBwJ$$tL%8mZ^qv39|mP=IaHR?wlWciz6=)?0K^xqa@AY=}%4C!T} zd$tdb@AUw*qzBTZO-eSDY+Kx+3bucOjnYwD3|f!Bi}r+>0mM3> znJK+eJ-JB?2IEu;_mO%hOv@c%+E?MsmoPSA+A%mi!9B$_a8Gfu<{aVP4bVifQ2aKP zeuommOYs4clwsl?ReYNgQZ2d=WxATddtTSLI?9>@GrJM)?48J*hLTU4|BTUWDW*V1tw5L13WXv1+?dBg*M5Ii8b_b z5gacbpXoZn9n4e2Jb5*)7#HC9=UWEU)EWIW)5Z6FL6)Qn4S6MO^oCu(3XhBrdet7B zt)SnoZaqHQ7;f*iT3ytyADv<{G;6H}VU;74qtOk~4hJHd%-bIXp+6lBk^2rn0$_0a z!CEy$^nvIx2>}U4lyQdjwYmQ=X!>M_5gH(>7T*CYq!skJzn{pRvdC^CLrR)Re56Uo z5p^o&(w#`V-H5h*AqZ2YE&QPmBWz;3VXc%{ND{Qv-&6sJWSf%Rk?eNe>H2+d$M=WQ zQYWbw0v>wF!F{P-06Fl2{klUf)V1#HBg~-aSelfd6xVV>a@7Z31nfJ}{?09ZGKVFDuxxRzB#9BNT z&n|1@Ij)aqQFeDs)V>qX#x7=eJaKsbz!)z^g-$VE zj4LhTr45j%#Klf2t`IGE|M8wi?c*X$M-DvI#W}1{ru&ft9ni+cOXKCf!`=Hjm0}Y; zeh4bB?DwM?^qi$ThX;4jx;j1^pH1awH8yr^^o-&atn>xapm>R%yg=RxdcP5`klYY3 zJ=b^NxTp19S|OcNgTFtK&ZT%+!B;Wy*pp+D3r5 zDC%0n3Z-$EqHZ0$fXhM{aNh$YwXq%cmf$py-IDABX>5BR$YRnHVJ-#R zI!4%H@Q6mjo$c=R`c$;2>S~g0NGEN$nw2(WE}bShots7Vq1dMV6CI_UF5)EHrg>HA zB-qWs-aKju?xnYcKf>UU_O0ozQANaBW)540i7vxXJHWppB3o59*O-eM6KPSU({j}+ zB2=lcdA+RLh6|i@92?^*=ai{3@)@dVL@K^X!4aT(RDENk4YgI?fQf!Kyb1VoM!IvZ9vHXFQxbhZS}9Rlus3V8ES*dlp+i8}56-90*so2gl0qGqS>lRzAv zLoSJ;QlqSNNU2w9j>UbDDAAsxSfetjG*&=h1ul`TckWSO2 zYZ3a9Bv4sN1Dzt123#n2n0PZb!80f2fQ2{Q%BOGOY;3{&z-YnTSPR-&4IvmpE=Y;J z05cfpFxJHdF{3hF*>=nW`0JFSFY4&BVl>FyuEY-~-zg2S9%M~)#g(~L!%M?U*QRYm z$g~aq))jKp$VFMJUZtY&dxMAmS~Z(WvHI@B-O5o{_+Ra3|Ngh1{^vjZ^^FukKf9TT zK?18&G)-i|62A*#3A*$GaQcnDG#=u1&!9-mNl=fRcxlqU6{1Kxg0JRB;u#J6foi0S zx|3K^oG30e;ww&iYhk<024s{017NmZVWSIQs`fNGJ;}e_%zPz^V>0EZ@GxPwijxf} zlFivO8`e^JFtHilCLsXpa47*FMk8>5x8eW-3UIzrBI`wLRCn(BX3Z249WSJ0kCKN- zYKC}96-lP=~9lI|OX^8#IOjJwX6Xg^(Ou2Y_#@|Kiu3isL$E z{LfbIIjhdR>e)mm^`ko}x|gB7gakX0)`DY2x=I(D_^t-0DPoAB)Yr9VxWN1bPS}t1 zA1Fr|n&@r{l8u0_whoDi0bO*70!oL35t)Wi#NrRbT3)<{YIR#IUZ>I-N>(ZPHA;@( z5JNWTXo5C6r6RtQ*gLSDxU{g;M~o>|6glL1e9Z73;>*Mqe4^xv{_1NUaO_vEZt&N) z=z~~DpK&zs+X1CknfMfMUx_a4e@7P(LC{iDx`NtY1i^E6pIg(iQFdA<@&k}t3(&!w^g=Nq!(o{*Ko<{=4NKzHkhRoY`BIun8U z{~=5!E8BY+U0$8a%B!c)8KAs>-~Nru3gmC&%*)l zttX$`)*q_xy>`A)*aV0K4AsvvK)GVQFD<$R`+*AWvk5ZieL3-tQzNMtyzB0`SaF1 za@p2Gd(m1<$5`^KR+a0Qt!2D(t(EpM>$s}iQbb-9K2k)%H4b&_gs6y;D1W3{Cx!a1 zQm;P3%$2Q2D$=?d7p^;@`=yR>TPWlMH@e@E`yuk>J5JQt+q&`A?M_JLT-TA2=TNg^ zB%L7K?Z|f6put9e`ad~5~DXOKrhBz*CVSJ<>s#Zp1MD`=a%8H!G=UI)N~6I$=clHtoc!DcrCj zy&YGWUSLLhuGw|Et}L4PyU_`-{4nzFx#n%xk%1R9P3(pF)u8Km5?$VIJ56^j>a20b z+@{THIg!^1cz|uk4SVQw$C3N1=Czjl!1c^)>*ia#vJ z_P!IK*yG-B-MI1Is(I_a^*pF_DmRX+b%`9+jppQ>L&d3~z9f!SXW7pLZQ ziAQ*8it_T*EMFPVGACXYuRTwo<(xs^_qJTPYQf% zwxF8+ktfeZ7wF$O2l)B9hqHb6eehd2Oxbw0DGKw_DvN==l53tNq%TjL!CZ z-gdt_rfhO~(h)6an&~i2)-L_iQ&WMGi=HnqRL^eIx8b=A2f)u(lR7T-y`T zr|Q-1oBa#D(4{`yn^)xts3k8tFxyVI3(Ybb^8Na%`92ny;RVK-z>&=`&NOgV5ieKH zVqvrhCyLy_ZaD|84Evdjt1qp-(uamp7&e-E0*OdGzK&MauOdQg70(`wm) z+r+f*p+vgGZhL{ZLpGuRH|H>K!Sf9<$CeA-0+na7Sx7Wp-F$l&ny=@sLWA;{tc%7& zPSjHe>Q$PdLpmA!x6AFu-@+{2Ae}xbTGGW>^jHHE2dtZSS}v@Z8MX@Q)KS=8M z>$|QKK~(5~>c)iKWC{D9974j&d|lTj&6ox7X**~Tv)4keKHHde^X=$tXfm_x98A>0PA@WpPDI96j$lZ)(`oT;J^CN~)eCVM z!`k5T9vSLb2b;$RDTp|;+v+$`Y>>5&^PpeaLEl*S(5xJ~k)7(8Fs=czgs#0G-~?%? zux`XzrpY+(g*Ldz3t}BQFIMe8Qe+()`{~Yxa3bf@M&Cc!Aj2ANkjaI?y|fX@gALMp zP#5&Z%TBO{&0vG*n;VQE!i_df@17SW>Y~L*%vIpX_&9Rj?tu-lY{Qneyuh_n&3giK z2wz2_7<1Z5b#Cf^1^KL6RP*X_wKo2z7EIO3`0u~;G!(S{^ly%>w~o(N@UZp14Bm?r zUmYj|^-u%qOl`QxG&2LmPVILScEr{b2nWjvYwBl3=}_HN4$;%j3{*cSe-1TyN)5%= zMY*ZIrfBHJ)GoojriQkKATKIS>Q&&HZ=-KfejE9MUlKD-fJaq{%6|2g4vfb<}7+11C4qf$Mab|lGgaTeVk_Si6&n~uM=oZ-#Urs z89b-(Jd0=YrKDdG{hmR4^HA|mqyHJ+f#;&<{m+RSf!K%I6#a$htbYzY>d|@sMIexc zhqUrl^k2i+>xb%8+l?kriGAS#+WVPJ<-=1$?3aJRFGUxzx^YX|pHI}g-}#ynUGiV@ zUp_LAS3rB!e+~U+{nrm^r#_G8GM+E&YGUyro#T~po9Cw5d~tzVA8q*t#<}X3kY1s$ zj!SP)Nf5GF-m(g}?9{qi#Z6nf&~gwF%I`Y+F6HBFA|F;RHIZ?an_Y{u+~j&%zTO}` zL4TwW2_3wPCtN^6Tk~Ogs7A!5>bJDJ#=D6m*Hw82^>r=Q)-S}W$KstN=RyCpc!ksn zS;jYR(O@)kR_VX;Q~|PWSis1(*RQZRhi@P=8sSrQHO|1U+t*|DmX%=$5vwK3$S02~416&E_aD9!3Yb2m0%P;UX3z$v^H7lV#1WT%TZae+6D z(O;5nlF}`sg#)DHv^*z_XQoSbuN&u@-5#bHjz4{qKX7wg-i-A?BwyXc=Tf|5ZzeO? zjB8)IeP`1nCk{};f%_6`Q76s{_a0Cu`?<*x8Ijj+$K?jNE^-;pw&DVoa!0ygPf3b6 z&V`-bsO>z!6tKEjk9Na2`xTc?i`bLjIWRgo%5lEw1UwE6>P4=EVMLuZDZwnu42_#% zuPWApAkN;t$h}MBKDRH%88Qj&*tjN~Hu%MnI9m)D%FEoFI0TL8btx`!Tg*t>x2)m> z@y0Wgie$ybF(D*a9=fev<`=6p#V?P^ioAj^Ud0m& zSzSl&f;y+IRP%aOTT-)HSv{pKXwPfs)vDG%{eOzq5lcjy#FmI+EzmG5;H+y@A7O<< zHPRykbXc52*rr46xWal-75YH?SUW~K6FSw;O_g&f!vo?`GJZk+5alBDWGSVUr}7!U zB7Ybm9s?^wT5}FslfE|3=>n&V(47YB&LxzWVQmzDiFKe39hh+fi1yfP+6H(X!N)Yx zViFED5t&FtEQvR`2&{~dFk{hC0T2guuJFLDtET$^3ZxOaV*G)0@Ns81AOs`LZU>hCiAzqvk=!&dhk9G=8Op|B0y zktaT!eDqwMYbWC~qGa5>1@%dyBENv7ek?8!IOIvmH6ni=NvxA#>p<2l1oF#Nu|XBN zbXD>?m8z)+MZlcJe8*|*)1oIex41@b(g=H$v?ys)LdHr`P%N2GhjLvc(_(v5hD3e~ z3EbF-Y7Ii7R&gh^fzNHI#K*)kK1L0IcS%)~w=p@TUqJtG35k*`s;3w%<^deb_|L27 z)&8ZYisDm;9GhS=#bk<($Yv@(ByS{}z>CP~zLC;1Bs+E%w5*>?Y5G_alU|Tu?o}A~ zD*7cEp-<5-3w>YXd}Ul)IX_a$fC(ct|1Go&Pa#pXyiwJPK>kIoXjJiDpgjLCsQuNa9_42YdPKnz z;^9Ml4;gF^z`juT5gZz*15GXoEkMxj7!DM5I)hn*Yc2>KHBhiqH30hP^Nui3lVR11 z;1^V2&Oy*yq1b3+Kh+4tVH5wKN^s~%O;I2goDRWjLr)<(6gu+21W;P*$_^DN0zi8` ztxm@Kb$P5}O#CIv=bZ2wQT_6f7zBUbi^%Zt7m+n#Se=AUj&3{gUdQ8+-ln~T20bgl z9SzE!h29Cw9XD}r5hn=mJ6!||+JHCaH6A%FT%RZ>81twDh;D|bD~ zY}IoKbSsR%A}t(^{8>s^XR&H3akwSvGtw2AM6rEDEy<5f5(2uihvZN2gcM6tvM}8E z@1J=p+lRTg#j-5{(VOLWpT)UhdJ+Z5{! zCz|3YgS;+l_KC<3z$ttITws8etre>7y28dPj>LyzgKFeE_yn?W?bx)+b^7GXl-#F; z@S_xz*hu2}HbqDhukczr*(1d3j}a4xaI~yUVj}Y+=s#IZ^4-T|fHeG%@r37)DB3!# z;0nSfb6OQ|E`KI61UL`&Q1xLu{VceO>a&N&FyB;$ zg$F;5azX_~KhzNs`2Mi?;Cn|XpA1Wb;;=j@4)kSZklR<~_maHOmX#no$kBUP8I*?A zL22CPZKCDG~zc8rbt_N3q z4^bXK(g!tK$K7v!O}QH&GLc8rpnw*+huUyl<;Vs-{$}{w4j~$e!PhwXGESYiTulq zPY)5H^xwmX$vH53=EKM(Xz6!Q71I$+?uo2Cp)`uWScW6)&?;gFpiM&Brr)KZj%t9o zG^r0Cp*|!|Q&`^NylQYypMUCpOm!XGM7o{Q@$hbZ4;g5E*dewpWGMid0FZ!x*`ko$ zVhUT8EZY*YYy`GQy2*|tWyID)nlDNy=$ZfujiWH?iQj za^$XpqAQWp*pt73I{AG{exDKs(KMX=E)r`dLC)h4;FvIp52h^;fT?E_2KWQ?j4MYh zfK}lRBP(H*ffFZ#R!|b-kmF;#APK_%7oNmJn#n^#sz%MI=|z1BmS9Pr)B9(i&JD?B zTIvdZHbb0DK@zhAhb#6CY2hH~cvYWNCmno-89_w=BVmLbAAG_BIvp&x*60|~tH8%u z46;!Cij8nHXeA(aBYeR5GUYj7PDf5)NIBF#z!)~1B)b-rg)u2{k6HQO{7y#sYtAR5 z5S>OzAX735LLT-aXykG&B<#gPPY`S|EoTDyF14s7Mh>~+z<>$L;8AP}lPrN5GyT=0 z%t+vWbYsO=cEprT7b=^=cmImh;=d`z)Fh!=PU_2v)jtcuIQ#_`XFipGNXei9M8lXW&vujrv5)zKZ$jWEyD_i&Ea8P_|N!K#0;BaXpNafhfU!7 zA$bXJV1^V|XUR&WbwD&=;2#Thn0>Gp<$-xJhh$qU{9++D%nx$VcX_{<{Fda9FgleB zpik3^Q3;+zVNk@aR0b%dG@!Ucg(7(a+)Uxo{Mw=NnljAZ-TIn>TKxp_0W>Xa6U}j! z<^hl4C!ml%HT=0@MP!H7Xx?7{!mr}XGlLnSL`%`~pn@_y2l6*SP40pcJFqhLC~)(H z8dc;5)WYzOi~Pe3@R5Q$DHig_YGxn!SbuFW%j^FD>z^CW59YAe`6FvxK)C?=^!gWh z{R@LdQRMZL$R(>sWOA^StbVvWSRR&fTSPlh<{c>W4wQMf44;r_vi8{pWmkj9mbRM2 zKlGOXHnO18M%;T75ru7dq`!)aIGk zU$)C%BkCtaEhVF}+n)eP9oGgEXGQ)6TGdw)WT=O-flGj9B)0C zkd+M!Aqv*BlGZ)77p#iU6OG(MaudVjNb-3@)ztEH`K$T){PBD#e++3ke?EVT+r^7* z$F=G15NzEXUmlL5cNB~nL-7kJL`tO6H9f&i2HSjH4;KqpAqdUrVT9n33GM} zF@SCl*N$a)Da;aNzTymD2i^odi=S3d^elthH~_l{Kfpk+eB?Dz$Sww!BkYYz7yL?X ztwLjOzl+~sc=)-7gC7mbzsKz2Vq?#3>~}lli0SRHIkiEwH!)c$STFiTtvt7x{*jXP#V=DtvKi<=OATFIf1(%_@+ju2UjS zj3hrqf*+=NZP%LTySVirSRX}UoR;*czBomAl?Zxx0B?go7OvBV`~y)7+zdZN2_<<3 zaeg$Q{4Er%B`)HU7JIyoYn$}vvy!Z)+@XZl%j=xln=$jm`6~(EzJaY#!sn5U!ue&0 TOOhwy;u77cW+uT2F z*~xCaKFNAN-(THFcTW!i(X@ASj(X0$b8l7Mx^=65@A}oR?jIj7-SnPY&irCBnfOMc z?^odZC`bErI+5@aUb2~(cmI>;Qu8TGNuNud%gkrFPBoLux%phumB`QMNlP~i%fvuDclWiR7po8!w9^ApS4=C>_R&QDV2te5u+uO;WVKa=o^Ug@=jS8AlrOjUVNxCrQtlw-5${n_9t~3NB;~O8E>hkVq})Zy z5$`cl9t%?Tk@C3r1SwAhDKn%T^`0c<$spx!Ql9dTk#a0ZxrdbF-qWN!9i;5{PI%AI z$M=%+toIx_&jmTNq`cdE4=L{nQVx)E(t9r{?+sE8k}~JLkCgWXDff|b%KHvdz9UFE zM9TBt3#7adq})%+Y41f+UJOzmAm#nuOQgIMq&!H<2fPoG^1&eGAyVeOmq~d!NO_o) z4|%VU@=B2M2q_=-s-#qdlt)SVh*u+}7Ni^|<&0M+r5>cbixkgmkkV)z27Zrlzu+x$ zzZjHw%uBwIm}$Ntbeo$=_A*PYr4IkgO~2Z%pKW++&4&L*lH8eeZ|4i0%gx3!PtF}W zzSOS!OUp~GT4%-YO-1R?G+GVc()OHQYBr+uf%0#F#)-P>)K53U@M9e9zu?pb($A;- zot?zFq?cMuUP*2wTVEkHP3o*mB|T;7pC>&-`m;fLmYlTZtdo-?#|v`uV0CISV|gDY zuRz{+26Ub5E9=nKi{5Rz1%A;Hab<+ z+VCf-w0$e5M6!^a;F#o?;+W>x$+25U>dxdu%HK_?x{8KB^^Sh+J2^EH^Na!u5g^1mD*Kav6CeYXxz$;Px;l?_%H;6MP5ukMCE#dPy#^epft#v(>7IShYHPq`6YBHQSGK5k3>NH*s_( z>8oA-4V-4S_pz;vRi@rpYR(tKR(_GHvdzp5lx!~Vn zjZkLiGL5ok-bR|QmbeBdd&O$C-mJCT)hb_N8R{Ot!s#xKwoo$x`sDbV2W@hoPcBtX z9!#z87|~aC^`n0JFAz6!{5dWdaz~JxTtSc_d`Dv9=182UQmId*1xW5{0Wf+&*AT)h z5h;B0(yd!NnR5bOdxrl4(o{Dogt-v}x(c`$*kv4r+u1BRn5~T`|OCdPb~43>w7%cmk3j zj^`k=SJSjGHOJV$ixTe5Ib%9-!B(%du-2+~U{tkcZw#bw_^T_;8vh=RN(WGrycCa* z8yF4Y2@WGliRAkBxUC_WB>hJyFq8A|S9K5Q^q@`^o%FmffQu^*JH?RuVX}^Lgae=T zl~U)3exnCI<`?3BIWIpj@WoysY-le7eQNrGcSpGLShQ4VJejzde zIOIBc$NxNJoBs_t z`W?s0%{lB;KW)Ja9ONFQfE$$^x-!zA27r47lY~}0≀f_Ud$-PPgkM+HIuY7l!&G z#r{5>M1~l4(HmFj9$i`8_O$V#0Luq4;5`&H_UXv^6ta*b=F=kP5sYm{hcpjHq`bq8 z(1E^$q#7qjQaMK-bI$HULOqu=GMl<;{T%kO2l3|G&LZkas)=)X|CHp_ZZHmRG&ra2 zsSHxCk$)owSx2nFdwr*|bZ@D(=-4wu_s2$4^p5l08S)N(0!TI`eX?6vmtJ#eG!`& z_}?Oy9;K3ZbA(tr;VWf8D$>ZP`EBK(P2|r*fiya3I{QsG$@vxTQ{3<6J`2-5$bFjo zawp@#7Sfj&{>u8^Q#G$5aia3_9qm^tt(8v2 zZ`4;7TS)XCwD1|8vgf(BUU-&-nXG?AwQYOeiNsJvsyXEKCZ7+gk5Y>Byv1c^SAFOv z^$5b}3nZSDj59nc!ipM4Tk^g{{pb%V#c-ysjPNJkbCkm{ArkD7M<8WsPc%K#D?H~l zTAiiNC^kd#k`Rfo_b7I(iMjA^)$Z^PaA zWORFua?4?5g`}_hUSX-#?$lcK#sFkQ>}#GfjJ!1Ay@Cwslt$6gR|N%!$&=puNRS6J z=g;NcOrA8~cRNkTU?%Go9n5?u(l?z|sy$D?Jm2W7`K?L~c<`5N&8794IA6tN(o(6y z?^Ml4dV&4558J)cRMDnZe$Ug=)a z%i33-Mw!XECiiyO6V;%RRW-8hzmMTP(aY%jqwyD@>Ue>}L>`G)j1}UPgBkvffYn~f z2E+#KV|rAmg7k5#RE#())`sG=^*eMuH1Li>EVZp;7FO0;9%5|K#G;jzW?K~8?<_%P zjW&!$g9crg=rD(c+}Af5ktjMF zfb0OJG(_TMaw1e0c8lAkRs3_DUJn(QJ0r2Tu#xH@5O&46GFMX@>DE~*CHi`t95^EP z&{uTiG`a7#Tv>`da*;!150(Yk8^4+*|8C2dE$BPRhbnWQmo>Hl}uqq49+G6`%l~$wSwJV*K${A3w(q21b&kD_-sUUC2JJ49Su zoz2S?uTigEw#RIs=ne7I%F0Cs!oH$$X{fHMJrdR!)1MWJ`|(Iar}Sz2!!O%!M9qEN zOhg3r3a_zHTWfYIr!JqCd9JStSNRK#M&)d$v)Vp<;K1r-GvUpy_=^X;m3kX8C=z-= zqrW_RwzJ%x(CYFi4Zm+UaZVsM_^C1}UWbS2fn z-UZgAcOjG9l-Qk+g+~mz&pIT!W+ozxjp^murQVozcC`+D>GTTW81%9W%@xce6Jdhu zNN7gZ%Fky2p(d80G#}&WhB$0*tXW$=-~g@+AP4EBZ@MEPizQ$DaAU_1B*8$9Om5 zfs|TP8+*mhSA6&}@q?ayP75D*7ej9&J1RsE7@K&n)`zJC->4AADKn6l0k z?Gqg0_#%P5!b$s{lh5~ZFYo~;pP!%LBMmOAOJeA~?ayQ0uQyIU|Gw6#;Dfdwvo<}6 z2@9^!T0Hr@U)5Jgd`7@38VHF1`E@0GdQmazT_;r~U7n4LnYp6;Pf^?%rp8Oja(WVt zewt&a{!8e)sOoZ#CHugtdPP*gc0W5N<`IA!cT9Mz~m7Jlg^24b?}AI1Q*}P<#eD29*sTcC=6yMpvsB z(LJ*|nv6YCE~>SVe1fN(oaHRzhfScWH6d%4Y8RKF58cZ7nkH8kxmxkDo-fKAG5DUv z+ES~s&#*-eR10NM(L>IrVmR*la(`H{s9IcM@coE35`XS;W$&VqM$}gPrA6jAD%JZtS+g&YwWWTJJ3ygUruCl8VN9C@!Ny~0&Y z;H?wn70LT7c_o+U#DS}Nav!nBCu#lk`SfR#sE2V|x4X6uKV$0l4RgJmgYRDE7`n6(-Cpjbu~Pq4 zux63Ne!ZN7H|SqR;MB_p<7XsEX1f-abhM;tiW@cSm15{wzjq`wZ$+VcnKMhZw*L}U z1r1=%K$?Gin%U-zoKmZL`Lnflt<%A3h}`e_?^Q{$BP^39vR3`ey31(psW;(1#H8I? zv)bDhd`Ofq7Pq@<#LWM=o-Sf-kP_CZLg$mq^F@0FcWH8K zWTFZ3B_97cRkm;EByIN2WX3dF39I46m5AIl=9@{5^;@jokQ7Bz6_%>nG#i^0rd$@w zg!=ebsoDRuPJc(I&*-FfU<9!Q%d$d7d&~VV_(94!rZ_fvga6&!osMR-WZTnmOl6L8 z>L9HGc**k_f1l*#bLlJCd^;I*M{Gq{^f1Sp^#S`p2dNkHY6@#z>S_jygfy0P?GFYi zFB+z!y@xCJ&0=|QlOmO4r(Qhp%&8ZVLM|@V8n_U&Hki^uA{UUH5IH>BHpwtx|NJv)|(CT6GD@eIKS61le}S z#Qj&#E+IS8wSn{<^dYKgyRymWqfeN|OGKH|0LR=L@VsHB_|6(}r<%7>K z`0h;!pdHh#26!{9I$~5|<`3BTMJj5SIVDPEX2B-X8L-a(f~r5oscu!Go@s2`G6SF=u)k} zpp4i`cT-J;u%_1kMj5e}zqhGODJt`ml#y@3&vwvNnT9Kad9(?AG`CT1eV$xtqhAkl znX~Q7y-4mPxql8DbnVPi?{+lVa%-Nvsq-lq&F(nY+d=-c)qI%z8!Z2Xw{2rWbIg3{ z7&hY_-3hO>TN*V!P2TRS1!^mI^4^WzbinQk-EueA&38-QWH)_2@9nv&S@)Z2--SST z%x5+fiJ-C=dX8)@lrc-U#^BZssAIF!+VUBM3pCPj>Qxeqm8~o-*A^S?NN`$N>s)^g zuJCXbt;L0{UhljY+M)c!;i2$42^74 zvij&5=CIGV5d?#ThoXcv#~%=eXy9rLQgd&kNg4trFU{ zHrIt8!b4ThUsx=i!H9ChYFNb6O3erDXJfnVzgDwNK18AKwS>2TcK!TE7T@ttql#C$eG(0sD`R!d! z#y=dH6>9#arHivND%8%j58U_Q!3Sp#KJeh94?BcaKR$0Td+!-6Ltgw1w0RJAX^%1b zhk^$x4y1!t4+4Hz_R*oTQM*UKQ;64w>sfEM8yIR(WL(kUZBj~JZ8T*3UTD0EaW`-P zLV-H4=BB}=ORb>h*huxkFbY%6t6@)brWpA=p}(vwSKhsb@wrub_sUsZr&>bg#+qK- zmfM_c)vY!@AQi^^9N1pj7aD995qs9o%;MB?;9UKjd-}iv^y6Mv!M#=iW^C8h8mfp~ zR*;kV7Ct2Aj$Y-=<;sze6+S*&d9EX*Y*(It`s9m&Uf8-9T@mXVvn39(WMtN)LvLbm z1m2Jf6s%d6+TK_n()Y@(l`68c5Vh9sZR@|QuC2upO4-ZP&Hp{ zc|L5w1Ht2(jqVjfxcA4^o|Lu6|5BU&Op}QI7*)%Yc zvlaM(s8lIvB~xeeg=};78_hLD@M3FixoTk2%eJZ%LnvQdkO|*k=3AT2lV|;&$ORGu z8*XlHTEr&Z0+V{*yj#-$KHfZ2G)uOo*+lcB3VJ(+65gvW8-KCCj4#2xS9qrCisHhs z?(`dpIy2ie%j8%dwwsXq`ClP#KBH>#k)qAozwPvf2EIL6VI zmV=z5c`#;3uOv2#tyS*jhE(a`e!~4nxR;dncn7OKK5$uNjRJ;%ERs#J^%S|1&OStL zepBx4(AUogKl?-(yW(FZgo|EGAf!N-ZM z;bRCiwg89|Q;5{&iqk*C8v?O<@lqs?k0E4gpzn49vV`z% z-iEo<`Ve^<<`aSNoVL6?AW&)@B~OF=;Xptivq3Jk_A$gMrry_`xQy#&9?^cxp3yWr z!gT-u*=`B2%5@9&L|)TB0K&xD6QExt!g~Eq(bp};igAV_(DjS5EiaTq^}PKyRWc-y zx+#y*66x_kSRATrq+|+vU#cT?wj9NiY>IiX5!)xjj4HLt6M#xLwJmN&Brp$j-@7Ue z{3UV3y4a|^dKT^m)E$?}`L~K2dk5^(JN_O7-r|Gf_%w8^+8<}wLwOtG+Z0YRJzF7(SLf-Oez z^No0bUs%HqQga3GZ6Y5wD+Axbqp?nAA{IiTM7PNOAZk|!hhGViYt81prf&|{gw0(I zH%mBFyyl--LKO6unwRgDKRt@qpq7dR>8iVI>f=gt&AB7;3GK7+fxv7pV1GG82fFzs z+0Hs=5i-Ol_RET1skdsC_G+W9{u0C@!i2hVq?nGzzv%N!QrEN5r&o@IZ>=7Gys|Ht znSAX23W6wH&lJgE@GJYmv7ecpiS>k+jb6~^`!3yAIVIWwbPRRd$gyU`R9dsg;ke?- z6b~yf!qgfXq}2Y(qS+Quy;hn4fCnk@+lafBGbl=unp-OYk-_#wH-W__tMlJ{NP>Po z&9@K2#O(y#q3^9~^mC(ytcObZ%WcFXX zY%5yldi}uhevWh<@k}Z#CO{w_dlHYK+w-OH zW2(oUL=j6l*4iOq!oFq;iQ@RJk|_4@#>CA|#0)L$S|%b3dnxJ5`llXb} ze$obF(+3rSG0u+ah*NX=CD8TppU+-m$lyP<%8a3 zYSOy2H;&^Q(|ZtV3?_?n>o<{X^r>=(^E->oWagdvzW}TLuX36xJMZj&u7rwCcj`3B zsh4xu(koDxfrE2U%fI)9r`6hCrm6GxO`lI;b$F1;b)o*x9wg{rK&vj~mRI8WqDQxREkO9F>qJ|@p@_CduR=xt7YvU7FFEy!ILv`0)ml4% zpI#~MmEJ@EM!{G2#)Es;rNdKK+vW8*P(mZ@eAdU9OHQ%)<;DBt@=kTNim z28|y9S~Lqz>igJYNrIowPt-y9!M3T zXk`&L+8QKZ*iwV}7`d?6@O3b#M!*tsU5)$O2GzkE|9=VG0!&cRbsMgV1LWAIWc~k{ z!S{bfr-*zy4?Mi*yrpFX$+$%1h%r;Z0S@5zMF{<#PQT8HwOja8yCoWh_R2zMwYdgG z#arpsa8b)|sO)d5Z0bzg|E=)JVK8SsoJdHu`kJ8I>oM?S{|SuVp=hjQWF9<4FXvgi)+>s;zH36}uH;vl#1OvSLJT;-1rqC?}Yh$ zP*y(9%=wm>ucc-7HdbHZ4}!~*rH<`NoZE&EbPh9Es+(n}*r8kjA?nNOJM0IC@eCTc0 z$Csi4I6#mk+)&Gb6U!pIBkVGP=iLC97+9%|xFzXbl@|5M4=qEZG2K z9KRNoHVM``11_!wM@$34`@1fqtHKTMl~{s( zUGyDHb-UF=NETt?nGYX|z9R7YU0faU65ipwN=)o9ONxNf0aqYl1hUEq*tJ(gDwk%B zSU{RlANb#ZMmUlz84tIvoExY0sgG`0$Mgxir&()}cZ`RX<)+rPdu4rDHB2o#ZLvz? z>e*h|mj`z%xEot(H810|$DB`l@vDFPH^28Mf9*?0+*DW6*W}EcTWy!}|3Ei0+XF8h zrY1fAAW6NP6KSwNG~BX{UWtzEEG@3BthN2$rHIMPcn2D2X$J4Y+7dG$YJpen6>4jp z6Jq?qw&rtcW z(BBhxkd~2{C?Us7p4YK6xj(tncovIMkn_u=+=`Uu|0hb;RXqG@(DfS(Npa1!*mLS% zxh=bexKa)D)BkjE1Z$i%;}M0Qkba&XT7~`pP~LbV2BQ_sm~c^3cK(v|kQ*9%jMSK4 z)JI#xah_VAirNyZEn$09NQW`O2e^Pa$!-AifweJF$D*_p5~!TBZ4odFNVU)gwx{I+ zQfgokEX!3Tr$LUqmTw@REOO89n@=o0`{Q5z;-CG-`r|mDr)j^hX8Ti|B*$C~HMb{} zD(GTuz*?|2ApR)Xeg1_$O7d}nk>azq$Dai-Yq39_5zKZEN)}F_P#te3OQ@TFA?&z9 zY9iw+WdIg5a{8H@XlREr3zWIAYbB$X!Sa-ef|F$Ze@yuSC+KMS)!l<8oK9LRdaooG zY|*LIdX4+eBV6^iKPeXnB=PswE@@7Y)m)_) zKy@$!GbzH6Cb#Vad%5NzO|gVfMd+<9pz7DuIg*&PlDW*^`u1?Tax-9sfNp>#1s?!1 zz>)^+bYLyw&r_#wue`=;&fnC_4Qi1qqt}K5|JD}dtIz(G>J{*!=jYsDaTERC%7}lJ zr{De&*Bq+@?_k9Bno%SEuZnlZ>vKFM>(8|-aN|c9+{7Alx0t` zQD%I#h6r`l2#A-tl5xZY2^1eokq?6a)|T_KAmD~x=@gud0R+o*8b}8$fzGp-jTHzj z_W{9@anXzU;w5=dimxdbHhi4 zY_8%aw2Z@BYY}%~9IKe68FF|Csrgks|7o4Rm(yq_^?#@fC&(r=OIpD}+~4{Q{f-Tr zK~Cyhq_+JJ+=yezrQQA$o-0L=gN<~@mRwtiE~v7?SLkmevxqh1(i{%3C|Ji6uRKL4 zC01t05>7y`B77~i2X03Q3__84xvRDuDJX$mT#8tKsg2ZyBQGW58WfQ53eNYG%A=%Z zQS-8dZL$y;0KV3UY`Y>ids$ENePsNN3SaWZQBH0KDQV#nW+qLlbD=8KRS2etN3w-3 z9_nQg`xYB4R9;0O@|?LecZ``q!Y;gGg`aPne9=(>bDbU~+jB=Ks{-i9aV0oOYw|PZ(34GS8st}5TBhgZGBTPGCOQj@DT zG(oC`U#2ttJ?ha?&GUj_Nd7yNs(yCXB?NnEf%S$F4S$S`nLaq1-!8|h!7?(v_`7u4 ztJ7|sO!Bjn$+QyP3L1#rTnOScO*qMaiS$|^z*J&#eR^PMLeU_3Vw0uGkvDycYyPz) zL(yLaQ8c#5Eag~XypS`hTvvO-pBSNj?Zcey)iW-D{h zg$e(Eq=FNJ6#osy78qFbBka);toi9zBRY!WBSO!DInEF}5q+)vt>%sN7mV~51#Hz) zC{s$DJTWuoIDncEvjHck{fyH75vN{3K5%lN8K(b1h#V^!lN&wrCY)fmhE_6S!Zfmw z6JRmF!5k0*-?z=YK+HW7KQ3`|=>vG1@##?ZlOICZut^s}2NL)uqYxa%4{#N{LSRax z&B8XqLSpXA_$Q#_c}2wP5S9;|1RuLl^JL;e=_OHp;hokEYIR^vR9sq7wQSuNXh0^< ztmo7_cX+3rvOdl{1D+dlL~b}t61j6^`J=k}oKE7C{(s4-mv2L_SUTsHyTqONP5>aV z*0RnTLfoFW7cHTy>(-|2N1~4){kEu&(RNFzX^=*4oFdf&eH@c&e;bd@MUsY~BkJq- za|0)pRtVa1PL|z&qgctrr3diIbK|g&ama4uv@C^SMNlt`PtAq>$Hy8R>BEii_;Ma#RZ-ACJGg^5nJ#=g*YjiY@x>8D-a`uU$Awl=&CAF_ zpp>fXw7mY2?%$z$MQ(a&q{vY;XEy4zlsKFW1?o1~!1O?;g|sH+`V5M9zlKu^USE7L zBRJQhHNv}HdaYoaK$&BOF(U^9lqM1{AE8i0X`C-aC|I(<3DGi%Xo=A2_u|caOwY@$^>Go zeNROtCXjI`RFRGmp@MIB&C*|QLc);0%Js@-Wv;Ys?2df2@SM< ziu(M2sFPmh|FBL!qSIG&QhWR#6`{ZKmFHnjKh}23Ge89W&pt$3ovu9kRcFp^5KVO&0l^9vLz7d^Pf>WIAEyj+oE^7 zyc6}1nMu@+(Ed0#!K)Ei6pYUycQ*qOfOUxw02P{;%;T)9Rj4AYM}+R0Q|RI}5^x(< z?3N@ZI4>z1AKFxv0n`mO-y-}xq?0HlTwKG^%i3hJf1lFt*XaSB4(en?pzJ1_Q4HyY zp-(rp@ZC}WL;dJ~e2$pm*W7;Kg&(PhX@?sut(e;arLA?>LTN$eyYy1^Wg&_6q*O{9 z?$t?%ex|8%H)v>dNUfNE(iWX#Q*2(svFYh5t6824R0-j-ML?T7QPh{wWI`@^cXE1veOQeZJY?z`2tV)Kg0>>GGO1wJ9_DhwFdFUOj&$LFRf5QKIyF8 z2yqU!S0`Q)DZ$t|4kc8YXGQda|23{+1o9uIp3x}tvU<;mUK+#SWbnx`o){EUnLbh1 zh9^QX2dr=!7ogcNgP;JKVl7aZUkEwU=s7|l zL%c0O2l5ae^wMvy78a8a)6Lb)rK6o3@eU+yJpeBcpI|v{H_gOmo+(Bq7>A#tzTBg@ z#awvyrNpI2Sb)v~55mx>mVN!TP;QGFE_?=A!MzQ=U8d2nMT~da|M%$LMQK!5B=D97 z?#5W|8$MZ98MJ&>7hK7PHBfq-S~6yMg`H8ugrP!l&Xz8@K^IOr?&OdHl}H@PJjoqf zQdhfJgmg1Yc^s-}I1E(`*qxng3|588{*+dV`co=k@*#vG;Y&Wm{#7Rmcdf;wO`QOV zzOU#RPJZ0aRVV@-;9B7fv|tE^X7fe!ZZ(zNN672meWM;7=>)YXP^|D~7qK#zyd9}$ zA4GDBvqBM1Bqvih!@OjwiSd0iI*M-y)C?Wxxez+~k8{R4=#Ww>uG|*k6zRY^Xff5z z-9?-PlZ}SSxJU3@-!lwxz)?b5qR^x-CPgmpXgB-yb>$yX2R^0_$T}V`Yho|wq=EUd zKq^q5&xay{`h5hA_^A5Im^u9-`B3f;FcWmqzMct52D<25vs4Ri7D94_06J}mJ(@nq z3>(phyy=`l%wXg|Ix>_CqdAI#KcoJ5j^`q#F+eGPf}BB0VKf5VfJUT|?1e`#j}bH? z4ULd3qMt_S2?fE-Y)Kx)gUj1YCEZV?jU`aAR?H z&u3*MAke)=<1nII&NMM>oFEzfi*MOe=)m-%Ih>H|)H~PdQtd2gOei@B zLHoo#Q{5!)^$I3HqR!#0DtGr$Ncwqot#pY5L=1@!xlhqbWkQnVH-R?1zH_LHBa}6~ zc#CK&Q2Jya+KwBhjYivjG%rG1C-sGAjKw9&mP*zGuRBWUz|m z6sXagaP@Dl#b9ZhCJ_y@OgU2zxJk04iw~||FqYj*HCz20JEj2ZC0nRm{pewD!?Aed z?vi_PVW_pv&Wt<4U|n8Og-3ahTsJJngFWs4fYOb7KddXG8nWd)<(o* zE;3U~?2L3gj~ftQVXZy_GHZ7XQ#2tG=}S*(2`2G`968T8qYfq<2nO7n|EBMq>SlRL zvwY&;=?k|5W;~-SNCyrBHgxH^fu83JW!`#PUpmJ6ZL`)8Qp0tsb?=?(9`z^XOf@&Q zoy454hmtM-85J18rGHwzV+Kahe$xK3eFTDg+J2!GNTYnO5j^#-h!LI-$wMACr#TpuD$`AQj$NCR5(H8v1(L;DZ- zj$vl12|WG85lS3}M(^ih6dGNcg&sP#c`JgILkLr#o!|mkDfTB^ktxYqa~M(HQt6-ZB~re@}g_ z&vqiIrg+z}m=sAI$dL%Sbs?nhbib2*0p!y@dR5f9iB*bYTare>Y z5##h03A5PvztQW2FKAVH>)+^elq%>x#?d~_Ngf6qNYzZP>Axgw><4mh8BXWZrP4|s zW@R1hF+=cb+`E`XV)HYevAHQaW{8eMUwUVN zT?Jn$Zv)ubNKsWHwoNr}7{yOzd0R9u~tdpp2Ykd+k z$Tpt*W6~$tzfd`P&$d>Rob49t5BdDuc3aH5t@VCVU4+1!NuBDeiP;%K>?(HP1LW)o z%HK&UEhF`WwQXB#+GA}@d!E)!)4IHC9ZyV4KM=|SuS6NVDxZQI3=MbAoi?c})=t;= zzSs)Z?6Qnv6+aRMe2|~`;mSuhoFprPasr0*JpY~KCWKvoNB@&@Se19R8C>mAX~A+o z#{Su?)f-p?6!GG zjZ(auG`GXeNnI&)xzqPv(v_KhKEPG4cv7Yb`78T30FK7oNmOtFxoGvWr|e&brLHgb zGN;iw&Bn`Sb(n?`NAFg3W>b=e*c)S20{;CL)ZA^Wb3A%qM4?y$!24wR9nIYHV28OU z%IP9UtS2|KJ(X7ATU~3yw7)Sd7 zPDa!MLoO;Izm+?QSo|~Zx*RlH0YsnJ#>l@2Fl7Q>U zKC5GWM}LcBQI-0JDau=vD+Dh&z1k>WYT_PaE_Jmd{3%Dj!O1>ntqo`JW&Y{+B`@^G zTpVa8?U}wJu#e_yMe{W&WraWd=yFT*czmHD$ zwI((qJpOkm>3N-`TfQmPnR4XjLn>ThEFF>gC|AAI0?rB1UM=@|h695ewvec$kbP7r zqBZ`D%6Pv{FX{9FPN8rUk(C&&{1;T{WfdAJ+&m`O7BWjO$Uq)Kk|DA&Ofp=GqI-yR zXlkSnh&K}qs*WY0S#FGAh!jX3O8R?rws|4lvuHr);2smStTbu=6}8{UuThD9U3M$! z=p*_jM=BhdaHOKYts$vM#d616BoG?mBYI&V8r|kArLG;Lf2 zfB*5*^{^8CSI6pTi?0q61_*)#B9Q|o-;5|Q1M1I7K{dMz-LI%1PQ(dP%c9SgLJWM@ z@e>Yy4GJr~1(z7~Pq|xR$7}oFp`>(U)iKXrF!#)09;!Lk-|s|wjTrG}$P?vMM;+s6|9wt4ZKCQe zO4TEBp_kM?z^u_VRgW|&akv(;bj#*11(moIYW1(4aJg&-;8yemxuU&kR{Kg;LgiuS zXfeQ?|7ECI9PaxQ6lf2#{-%jeBCuI;XQOb6J2Vyktwr%$J3wj6_doK#l%YG zZ}SY>LwCHmFRjo01-iC>Uex(4s<^+XULT>eUs88F`|56DM&6v9qPzE?ZLV*RJ3L08 zk5COOB}Y2R9nyOq(kXgRIK<`(EZ|z!>-5bCDkX#YIycczs-J%Cr#MAZDoNaDQVskK zWuavD0+#3H778{$%93l`YjVtwrkNjQ0X2Sd@M8RN3v}?9|Kn7&{%05?SY$jLAD1P>!-nv zH2V)D<)O$V&u7~NUuTC5+ap7WA#YkN!O#^f$go_|4jAj#n+dP^ z&&Ji1E22DU^KDRzAm$mA3areQZj0F$GaD>m-x$C24>}Xwahs0#(#8aH$`d;h8{4`C z|5v+Zt0Udr2GZEsV|hE)8q_mtEHK}td24c&-dA%PMZ0|AV_QDYM)Ys?JR5EI)w-B* zwK4OV>yE2!7zrmNI57iz>&66cA*i~w(FHeEY)dl|Dm49tLwLyiZ1jacXD_I>|gUSEy& z)wKT%wtbC*jj3i6Hp+G{&l~2JA$nt1A+k~W^p%9WP0>Qp8>Q0tyJRoP$ghhF(b{;#&sWfK%E%5XP*r;=w6!6B(!~Sm_Sm z3nTUQ)#M%+RaG39K7PTbs)hoBZWW4?GygijE?NQsbufLl@_Wjpppp1CIxWfc{-6qsN1-Izn54`zVG5iW|h{Z!6sX zLQ^rh6=y%#IhAV(57B!Ze>Ah8=6sM6=f%yAb8VvkX~#RoR**C-?>6N{1z5ja?r&%u z!rJ6zSmxkEy@>!of`^<*Ar-%NZf2Jg*3A+r9^<5bF*E=F*6Br^-p>gSfA*Ad`hYA7 zzF8j356p!8DSgU~Dmdo2kEw{gZPIj4_wB~}PCa{6C#eAO0#O#4nQZk_$}(T)ujoof z@4gV*XZ6hYEA8*%8BD9pxK+GQ* z=}z+@+-Z>ZR6`3E3DIL`Wf8^lm%%=*VBje^SMt(&>-%bR=ZOdoBMC-?0N zlN(w4hbLHStSzg7WHoZqIr=W+-Pp(7)SeKv>avpdZfH8OI&l8hNZl`myVQXa1Bwu(W~1Qlhlg&dh>{uEOb5+d`-mwv5J zp8RLXlY?)m>ni~hG#LAZ0i<@`Zw7ru@ZBvyN$2f-repcI%Q_5C%H%d{RD5@*p6#3Gbvu; z6Dj5E|3aFe1QWp<*nXAp7VLD#ZgJy_NW26c{W5F-8CNp0P3BE?rvkaQ-)0$&ms7w@ zLyJY-ZKFipUQQ9uR+ibP#hHu!3j)f6jIUqRRY@n=e|uvtii}&C^G+2wr_+DWX=aC0 zdRA0;Ri_6y%@?(`QPoy!j}V)Qz`;81Uh*k_r=C#Uf?o2a`LZo4_FaIP`Mg48Vgq0i z@(S@f?6Tl4S$DjZ;C%>DzkQg7Z7Fy&fkKE)xv!%r?MKOR@eg)l+ODKCS;X?2Mq$R= zDw>Oi{n3DEe%z$y0nuDyyMFCQIBhMOFSzAvZgrzMk~85Mi{quw0g*3M{0BLPxm>=gaj+SsY3EeJ$}lmS;C!pvzC zD402mX6D3+RoBoJR!nbT7VC~-$}3;m-zfz~!3tPg>M(EGEn~QJM!_P^P|he=x-gGP z@E=in%uI+swlS_cy5nXNEMxfm1D-BmJhkV+Vf*i;%pj=6Zds3&*#x$cL2w|KRu`Cq z#jj&gf??^x>(nG;Chf3187jFRe9CBxYBYoDU|wI15)7#kdZ5;AmqVNx`iN4>y=s=( zT{F7Q9O%-g0fYJN#zmzyLD7Z49jF_ibZfW3(|))*oysBc!`N z_rYPaUM%OG9&INKcev}50dMG&0WXPdRCx1{iEmi^Gn1Z)da+n%w!Wi!)=Yn91RRp` zCcZU8USz&=vYo^_6YxgMcAhwtBvdEcnZeG?Lo&4mG;-kAA=S5>n{%Q$3QLEia+)nYb*BfMegTBBY;$;4QplPmdDHsOqjjn5H`&BoZ7S6dc)LVyoLPF zSy>iwg!T5d<|npn%`b1+n&13ZTC=DHK>A`<+n75@$Bu!CO`F7yaDyHX>Z zE6m{cyPet3up~s=Td_|@OjN>6Ck9B#KHI@6NHX(1dso@#MeAAITnEXx`zfn$ulHaY zWS^Fc%*4BUIR5~x8*{Fp&+6(qPV2XXelz`BnD=d1;i3(1@`VX(vj64@7M|LSgmLQk zsM-(e6!C0g>G>~^qD^4CQh_EUMHoI#6p>rb=> zC6_9v_K`A)OUOWKlAw9POn{>y!vrhRDAUxc;~0m56sDw&*7diw<>u`=4?eWZlenz^c)H?2L6%>r!yfW&fi5rGwZ~S!)u62CHlo zhD(cFqjBa`S~Dz`pO0$`$vLqI^&aLqIFrv?0z&U{T;5Qsf(^0zl0q6`l~iyPR?HaL z{rC{sRX0?Ey@6I@f4b}SKMcv^NRL5tYjRdyU z@Qb+c%~rsaO5>76y4+v6O%8%xx%yk_ZsoDZDi6oPjVR}+pT%j^E`7EW>V1bExx|5} z#nzQ)uKd`wGVqxZu*kH!Yn#1h(O+I^YhPG=vb1HaN--?`$ua+7A7qn231PX$p2zH=zUr^k8~s~$ z`acLVv1}>hHtQ|6*Uqq|$6B*tK6?rS7rfHf#6?id|63v_@jSL{HxMy%`irvyt7w$k zD@F~qXA%8OQWQ0ClHx9*zzc}S$0F(2D4dIyVH#Wzrq?Pp|H2>E(}PZi^QATFaYFK& z7$L_+Q73skF+P#ni}zoJz2^34b;nM%V}!S#S^X%l|2m8f=;K2s0A9a!!{QxkkG%rJ z;$RCx3jekk7S-P+y-N}@o{_1YyNx0b5cyDg9x?F-dBvaJt$+w7-nJ(79|uy_f5ho! zZjWMp8T;)9U^Y@EiXr#|$V!`jEL~vtna<@+8I}-XQx|mWCp!|my3vcfy6v09yZ7F% zJxRb*0j_>KIK&E#=`8RegG>je!D=U%3sYUeMBPEQ+3TOEi>_fn$@2vwgA=?+#z`~- zn3Px)M!>AL*&|hvs)%sL)_LeAOnjF~D{CSRQ>2TvKhe$lKS$~~Pp4r+^3B&O6m$)y z;7e@xGua*QZllBmQ(kGN;3vCGgzZXnwgVJK z!XFKzb7LD{&$cr6Vyuj968hALZ1?FfQ~LLlv9&Gx?kLJ!9LTa$P5 z1<^{W{_aXq4&0TxdOo*iaS!xAytlF$J=IH*YwUUCGT(z@Hc}-M6Ons98kmBISjcR7 zpP-Xz&QF`_;ihj_F1QGBP;tuFwXSl>y@Xj@H%{HU^n?U#2Ww5RmB^4 zul@9&497Q5aS`t&)lIdhEF?|3y~lW=>wTga$=b?Tu(2&yIU5y%fmk{r;N3>H#u>)RLO_$WFrn|rO-J6;mikun$HB}s+KA~5< zwKca_cyv{77yL2RKjtfCG-N(Nk^XrANjfHTW)=uvy1;^IFK6Zjt-0-Y_A2^6&!g*= zQ0A<$6F3`qu#@oMM!#lZFtgQ(Xof%wy3{j*-u92twn2$c>OZm`u9nz<@5U+1_WQyS zD`!qwL=e`sthqR4;o9k+v6$g3`7r}PK@-rVu%R;1&WYZIGuNGcB~kECU3= zH-h!5md2OFZP9df@xqrzK1sW_eo}4J8)&DTv#3MPSwXAPQL9F^=uPW2YdJ9@gD>F? zGOTb|ADN|Ic@y2DNsh>uphT%AR@N^$jvo8iw))p=U1@m>12*w%Nt|!G6f@Z3BCK#z zUE;^iFkl8231b60TMtNL!}1fgDQU=d96jO$Afc^`iW?YuZ&Si?POgt=QY`prNusj?oYPe-^NjboDgA=7zNV9jHnzY}@X$-0e|0{C0sGZn zp>2DY)*JKr)y}+)0^l23YAeScHgD$Rf zMxSBQQEL6BQ4I+R|Jx#uXry1$8$(;e?n6eN>*~kw7fBtsjW#{e77EM&o^D-zvL}H{k^{M*i#Jt7&2!GD<^i$CIC3QwkTJprK*%g#He~F1nL%GeQR{;vwA|*DG)E6;`IwkHJT)HG@878O;{p#LrrNLc}Z&%2)X8$ zk~f2^rv$lgKh02!Ly2)Kn(Tz`xD-cJs9>R@SI?}jQ{NVsSrYtFnkFlBVNfVln^0{@TS0)sRcv%D>+4Pv2vPH zD{!PK&Ax|hO`ajgcJ+XWv6Kho2qvD*)FK`BjY*}6{kaKa$dG3RGXGjxlpv>g(T@1~)f`%|U=M4u#jEM;^3 zrp;~H45k86J#mbqy~?R0K*d<233l6{SvoN0bJnVHwFg5tcWJF8yD@=*S&_2{LPiWN z0VAvA#8t1tpScTyHgTst2Qw^@Mx3ztS%4%@5qXObWT5h>i@w@vZ6RpQZVK91a#S_AQIzr^(kZoa{P z|MzwJ-Vs}WoHF+DV^lfcj7lk~=1!Ijkxn@$NL zds!fD^|GzM^ED##+ie%P#agS1Vf)n;|9so0$vAiFB)IW+>$%)}-4v#24G$~%s80KI zn$ziJoi6J1-8y|ir=Qm8pXl_9I{kB<{*zAHzTE%EI(=0qZQ^S?X}Vo6AJVWtqSK5{ zvpU_Q(@~wC)#*JtDVC>2=X1doElSHzs4DsNn?Iy;R<+qs8SKn9V#2bqKO+Vh@vRRcATJz3-l5c6;N2I3KTsAMGC=4fwo407CG>;pt&oVG=G#O z<-h`Ul8c=DAbpEI2d_Q(6?$pEq2$J0g0r);yWh<0e7j!{hyL-8pPfJIGxisI&}`_h z@mJqKD5iMEavpL{y2wN*&|8_vtm&jodrzWh379uC%F= zvN(e-iXtZSeY18-^#Y1!Ay+IEAJ~j5uEdQOTFO%P4GZlL%x0kjSf>qsiR$>`Y*Yn6+-b?VZ@7lxu~ z_ooJto))z!7uOVYdl-{N>a9s;7#K>RbrzASbUTo8cWA(KmPt{6+uG-4nYDf%RhLrL z*9$!otzGGCW>7sk=f0G276DfN!Tz3(Ra8fl@hZI@$EDKM_;fmbJ9#;ojH{*5GS~G* zsj9Kjb7b@8`?GP5RarG&(BT%bK3QBF55PVA6)BAS+~xax_2l9CpWLS%-ZGoDtHjzZ zyoN@-wcG7>mEVAQYu@d5)u0;*%y~fK>)zH6X8RzN*x*}xu=!07Yf$PmQ1-0_R^$7f z*bO^9*D23B=t$ zXy%xP|B3tce|DzPV^H%VT4+M8BaqR@>ef2X^VYsf^!tuC;Rv+`bK`6YlE^5_WTEF# zom^owGi5Gw(E6N6(fVB{t7N6YJd*v|L`AhIL0L;*>n<{LKyCu!saA590I{{=s16Pp$8n$*1vwKkhK5+ literal 0 HcmV?d00001 diff --git a/models/__pycache__/region0_model.cpython-36.pyc b/models/__pycache__/region0_model.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd2ae2841d317cf26b15b5f189a65f1b2760f6e7 GIT binary patch literal 12992 zcmZ`=TaX;rS)RVkb!TU1uUf5ax#h@`#@?%Zi)C4|E|%hyqZ0Nej=LN>z07u+JT}FI6xpt@sL0jMN$wF2qA%x1Rkj30m=_Nr1}A<;(<~@6+BXY0KWe} zJv}okd8f|lbN+Kb=f9t`Pft%5MET~<=W40czoo|hbfjNK2)>2LNjXY0Wh(qtP1R8y zt*NziQ&&<*>&nSJam}w{NWZp@c87J%H@Xb1Tr+{yc zrVf{7i*jmTCdlo@X zqN>K{Psg7_xPlP8kA3f_8o=>TA!=~+d!Pk|q(jx=!>?&{^a*?_)STi3WcmR}=>f>h z1Ca6qkXeSzjg2+`0Av9WAl6u&BY-SVK#qobBkim>$AHd{I>+yUqIpG+VU31!Vyx}s zV>LfAR`VokKH;2lPCHLv^=F*Qy>#5ilVJwC^3**o&aWcBHkMyMl;1%9{8;%G@@L2L zPhOkwKKwK~No?#-`^=e(= z21dI>m>n2F$E|w1UX^JfAtgo+BAZ@eZ4FS12+Y)w>OFD>@v2enhO0yJTQzn;S%6e3 zTBL{j9XHB4?ylW!hEcX!Z+lfY=*_IH*-pa>yq;?{y&$|#xOYE8Q2|l+F+_Je$*3cjCD|Jws-7i@5I>Lz*%{2=M1EHYOBVlD`R5>9S|!T z%_d`b4)~TA?$^HdRr>q;i@k+z;2J})(Xu-oaHQUmq(v2MC9sAS;&wm0*vqf2`BfJM z;Rnja?Q6YF>Ss73-nwC%VW-1szExQ@-oh@Xc^9L!Z;M(GrK>=LkoQ!~VE1Ta>@akF zt7-4MBIu>JR-aydwg-0A>U6`gvD&_Irw+Df#1)^4XXaO%U2tN{ZZ<97tzj!}BS(zQ zYI(l5L&B-|lQWpN!}ATW@Fwn8!g*Y_=*Ok&F*uWuH>)7k{4%jjzMXN1b zh?vm!n!$Q6gVnm>wWt8Zv*gfGCjMqYVReX}-idnqjuEyEr+vqlWjy+>YlomhI-rU^ z!f7#u{g;Ozsl~1WYnWsz>Zlr{0EV{w>iraWH4{O*YGb#%EhaQR7VD}!K|zGpn_gQB zfEZTO^=o1MewJWad0^YM|NOZh9DnyezI(B^9J(z?OgTGR^ANG9J$kozPe9GbklDeW zW3OY7fp!uTdh=^*yG^fSxwpFBZCiLYHnI2CD?U1`x&{@kjTaeqVAzi15uw^mL%6kW z6RmnI@Yao2!_$Gm$g;CP;y63q(D2(KX#!%X4|Uq@Chyk$|HEG;9~IH91uXB9)(~kx z^GGM&6Qy^XZ99zgZs;|m9N=en&^FRMR4WE3d!$g$x z0t;B=`H=>88Y$K{Q{pr>_JfaYICf~C-RL#;H%Q6{8zlcB{m*U$ICrb%hV`}+Y+$Ka zGx*I0D=Hs}h>Aup)iXKXG6zi#m>Xe7aR!N1$#x&q1gc*b~!YslZ z!h8{{Xe=NsA}k>sLEB}ltcX5m(efx@)N%#kVJ&2BjBc&a4Bk`4XVK~yYSUPcAUuk2 z9N{s9(MYmg0qq_Qjm8Pmj-8|T)W+lJ!l$vmq&4l(d&;fvy_tGQ z>u18v#coE_JuIfB;4<}^3l+=0jL+e~X4;LF#h+m=^ zB)JQeb4YzTe#1;(w?HXo=Bg!JyGh?&d(VwBQb3#8Sm>IW7K{7{Oc*Tj(&jL+6=kV` zbr}$u0EFuhL?*VX#7pRpm?mk(Zy*ECL-HMd!k#aRsu9*wB(bW#5nPk^n&sk4@^qFn4{yM^ChUGfMyW1(KGQZ@1haD&F>jE{Pq=i6^LHaa?tX z(m|)`K?el)CWR%+dcijH%PHn}aW`BHDH>&<1i8K=Xy2n8STJ$B>tM4!6c0$dDBtde zaZ>YZyO9nFd>x7m%-QQv!EalRXm?O2%JXirCMuIP;?hObu%U+8P0tRZnem*}?L^sH zr;BL@$pS6&0(%r4US>Y|=;#+fl}GZ$k%; z@|f;T;X?ZqFVGURL3=lB*>^DvRuF07ZV+W&cj-i;3{(jEW`Xl$#z(oD?Q@?rfET(# zN)B_BgRG9yZ1P2_??;)}wz%P_!mCaw(ljuzDemDl(7-3&U`GMr*bsU)N|TgnMfxSj zZUJv>fiuRSfq02JU`*MPpR>^v*TrlFPHQZKO-f1vG7SQ^xyyKBPL1KoA%YPXF~Vg8 zDcqD|nnc!$S|kCdW|X3uQ;JGSJEyE8wW%$aa#~4Uf>2vPo#W~PYMoS0swJhj_)r>~ z5M{Ib2=$adYh>{e|uURYoE;6owB zT$)+jm}g}RH|d#(nuxat!@&-vChds;2BYgbP%iI~1Exc+qC4=07(u%QtAcokv@L@5 zq^DTf2cvDX#($Gk{WsWtxVAo|-)83wEGB80ujzxLWE_qVoDFTxM7iq)$`Xp^dB(qlsQJbO;MW3SR7DYTWL8$=IIPG2&?JAMbB&kU_ zG4*~A31EFHrXZq7qF$(^NNEi{hNp>oEGVXpOd}h^v<5sSlH$1)XqV%|ZRIC3>m4z?pPG;%EOI!GEnb}LESk?nGds!xKRPs{&%O~TBpU1%piz4Q8huGN^dwXQ-A%96rw)o4` zhRkRgkq#~HBKlK=pol1yDZ-+azuwXW+9k|2;S%KNNM!tTI9ozC1W0yJEuk0XhA75z zUc4Mbvs{OyH`U2=4n`Wu?DrIL7Aa*woJM1fAWNRsMt(rrf;vbe)(6CCiexWftZ7cG zoGx)1qjH;KUq_1C75ix}CvH;gL*qcZGSagM)P9Z(Y?k!%F_d~5K^GWG^BBQMQ<&{j z?;{xL4b&%%GlG)2OHdkd1YLGWi_m+oz25c750*&T7lBeN65*HGfeX&q?fQ&sfpPvJ z^nZbo?0o13;e~a!Mrz=6VSCT@gBz8j;uAP2i7RWAVuV?z6f?7{lwxuCETu>$isvXI zc_>^&Q7Ja~z&%-Yj`LedJ~saj<%i|sv(ytQp3y>!97Cwb24p5CYns6g1!xG-Qek(3?j;61l{;>AQsA z0)9(MZ()Lz;*+kDh^4b_4m5Zy{$NnJ z`}GOPdxNQdVNmQB`r46HKf9-hugkQf9!dF`ewMz2>HgH9)SpV~{8Xls@|k`PSFovf z)qb%*jkwgGc~_N?UC4|(L zuL9#%m?Z;0!JdhsYw&Xdk8#okoHAjx#_U)Ho6q2swHvJQ$A<>T{x{Jd@4HmMe-GIq zS^qxLyyc?{F*8RBA9;OD-FB?NEMU#Vt7v2Xkh+?rc3AOAdGJ3dm%>rinX*f2N$o99 z?7&#{3Gt|*Ew6uQ0z){aSg@&_VjYZx7z-v0GJ=p9%syHc zV`TJFuE06+^^r&do$%hu+!W_B{dC+XL|~jE;YTB6kAS~`gQP4eLLn9t2njQS&Lj{L zWMvj!XRg@IDy|i9e+7pM-`??^4w;;eN3RBOU1Y;~a&5bY?ZDr^UaZVArzQ7IXjkik zIWFtsb(D*5AToYCK$Dhj_6jGG0FgL;S7d~5-CQEO^ivpq%T~{D|@=Aqh1EQUSZV< z92Y4*@qU`Hf=njVfP5I{!1Jl+G-E^F&p=j4&p&XCdJMkE~_JIRI9}U9elxgTOVWxxj}wV=x1?HP-q;XM-oo@ zfMyTg4_IyjRt%SMcQDm2;vR&g24;ZU4Z_Zq@EGm|ra=?aa*ce~QNH#5n~-2~9;lUS zJo=s{XuZb~9%BkJ;@k<&J&xQ*5D0pbWe;f0dLo8W=?HoXIqK&$x1<{Z%2hb`BvRB5 zK}Ri~ilJ0Gg04DKyn|~PY5nfcc(-Uo3e;kick9R4t^x zHjg?>s5`!^3#hY%y5q1#-qpGO0-m1Gt}bzYzQ2eYCdxyd#V%uZ@h&f6g-c_*j1rWu zC*?;_eq^kC9XBEW`zGx=^|21Fk^`=(jBgu&Y$BXSc>bxTeB2w*VvF z2R?Lhm}!*Y1>nsBL_3)xN_>feb}=}%sKvOX~(cogw#C>^me zRgmc<>i~++N2>FmlyYbuY6)Q8Q=eCJ+C8vlUDv) z6#WiGt4;mHbyfiA}CQ==l01nLrDLAC{0*gXgGg5fOz@+yt(^P(&rXn{< zc0QijGRrJThNp*%BhjP=@@Tk$_#yz6WmXH#bd#=X(1n#W5#Od^{)D3MQ1o3yW@2r{ zD@m-aVSdueF-w#7jhVi*X~CC}#tJWOk=rt`Ac(E_HgCAIac43++@N^u#M&C>lk!|3 zs#aJFH_c&c%giBf0S;K4+A{MrDs92$D7kf5Mv~v`GRa7CrKNQEEXy#ZaMeI2)T)}H zyLa5AmtbI>BvVQ&E2lD06ZLY;MtgyrG-|^htO!K18xg7j7b1XE1yg zfh8-=C?t+TdKoz(9+a$UDM@iPLGtw`>g4(=>9U+6n~dxu=&p|X0=_HC*+|M(q#ysQm&EYR2&`l7`2{r()ZGk zjIxYe7l+Gm!_M-Mr0n6c>9~v>8HdYA-^)R=LV9wl*e~&_U`g_-vb-v24Jd`n7aUox zC%=%|Rl$2RH)1|H#xT8?;ho%uSBltZl;aCoU`aGJ_JF3cBzh=icfrG$jC5N38FjEQ zVr*mVg&fv&nTkSwwu37>T!6xAgBLEElFW(ydeYp}pq@$x#jn$_WaNwYDEbmbzk#SS zBh9+p@Luwv)A~MD{|k!#29a5i^kr4?8tTUcSef*E$oa)YI8n9*-b{0!EF04NhnG#Z zJ4|bTjyZgpXpMC36zD5EyW39)Y<>0VRvN-g{zFJN*!8l30H9 zh%nJpNu>s^YY9c^ck3bhq7TRtH1UuUz6{tU8Y~iBK8j^+510j5dIFXUaajmF%i&RI z8hS{+Pj*k1JqP4O1hvrh-}A6EZ>>PT(E9o@r2FJ%%Z~6-x(vu$FfQGttezUfz8_GH zA5!#}6um&vUs04uF!3YGegjdZAenqw9y7B^cEFi2gjl0IQoNMH&64;#v@#206$_&( zcyl57teK5js`zUf>2wmEq0~ps6=l0J zORo^&1a1eris`9Cos9$1jVum8 z^uFX{@a=TEc+gRVQ-*1i6sRc8uMn;Qp1~_Ox?M=)odfLIFcc=P9fvL=y3&YdD7I+d zT*zXo3$a+=>)<(@hnI6U9+QjrX*q>z-L39*+T?Z8T0w1Wi)eSEX&Ak@L5kl%{0p5u zZ9|I1541nak%VDSi@S=Ge`I$AtWf!OWAB zcAX+|oZ7yPh~Hqi<~%=xSoi()p-&oibA!qYV*nx*yb9b8qC7+K1TYZ96QbO(z4%>Z z%_Yv_wF|a%9rxYwe^7`l3PqJ7S{^TJY%3CMiE`&9wqC$aq=J)(hBu$2MaXoHH44OP K`3ouf-TZ$hJJvA( literal 0 HcmV?d00001 diff --git a/models/__pycache__/region_model.cpython-36.pyc b/models/__pycache__/region_model.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd86bf6ff5be52340bc52f9af8dda0791600f814 GIT binary patch literal 12018 zcmZ`a~*Pv7%U_yieJ&Ek(7ghqX!IEL3|F$J4meS>9RhaLFNc zXDDe2vPrXcQM6I(GzAI-Z4sbp+NQ6hk2GxqH2(_pS5WjvF~0)zMOG+Ei?n-x}o>kw(y!Ln1L5PXv_T&`SM*is_kvvdhPXgNaaGum67jKvr;78 zAlz-sR#>CKhJV?X+y}+u?Z8&3w(c3?jL?Pgu6n52Igu0jcNIG?3c|#@Ac~@dx5?8~ z>>?&yLAfMm#4O%ZVouECT^0*s5$|bnOdQ9%B2I{tc+ZGaVhQhAaT?2eDxP}7tNZPM z*W|u~Wm?;NerVO2ZWvmAtJCyaUJ!-Wt{eRQdst$8`PJr&vb*8x7`56F86-z z)~(MkTbI`_ua}b1Fu58gZSAzBY(EICJkN-oj_pFN#>Jx8aW)lEmbs7z31K17-Q5M}N| zl=%-)7MQX)_N`+dq8yKmMvl0OI7*y;1cBulpTQcQ5@*NyK0Vgb8f*CsTAmXh6X(Ub zx-LF0o_&-{$G8yXvFeMD^t4<>`O;YVxg+Iel&_4{ucEv*R$j;KH^k+!IX%xKyf8-j z#Mmrf%w|~-FNv2QsXPz!Q2C-Z&<1*6f$UuEy}(Sq>({%|3$1pCI5)Jyj#u+{{TlOn zCMtaVBo@8W$_l6s36QNL*E@3&>9SSpM$1F-TDEo}IiOUldTc}o9WO2jZ`bWMqqtDp zYx^}X?9HsKxT4{Ne$R87ei%I_-g-=o^*B!nvDf1=lJ3(;9(aCzFXBmoldUH7)PMf) z8^~VVy!eGGz9WlhB}+(RW6x`LdgYasrrX*PuG4CE>Y(HAzpm3~hvYV`xHp>-Fq-pu zf4f)7D7U#h8VN1N2BlpnQfyzFl3m-o}b^yy7?)xUwF`xf%{H;`Pfp ztdthvMv)gdP4~c)VK29_{QUAKdQc**PB$9+Ry(jB>_Hh>X~XBznFY0G7y8k0n@uP1 z>X`O@lt|q;EkE#g2r+tpe-ZN*Jl_y=Y)~9!&9J}5!lq`?Q>M9#@ zq`}(Iz|sm`Qrlq2U2d0s3$yfsbo!)=NnK;nnUW@!ShempJz$&_dS0UA!!>MpZ4Z0B z+JUl!{%*CU2gM(`eluL{<*`~Xx)YbcBu?__IG?@?kc&M4LhqTq_5&+wTcZ6SNb30O zyPg|CcIber#)$vq6!t$kgoMZ48cvcKy{e6xF$>_+PEdQSK!Y;FwrehSyW3*%;A3%a zCnqRLJbKeBDr zcX&@AgQrp0!JcEUQ<8;#2`BUxR#tYKe#i0db^ZIU^j&OX@9ozD3|R9ls#+PZvfR*e zh44wJ+@>YHdbf#QJ%*uG>(kMN&|+rUIT-1Qoo-|W?TGMM4vmyfyWQm7di;O*s}|!j zhIPQ@UBdEM2b;$RX^c3x+ibg0Y;+^P8JnPA-a+43_tC5zdXbYVoiLt-K@)n;et;9C zp~9*W=b0vB(+?eRksri5lwhnnf2hbRHui(fHQ`3?<+WbpV2!XhTq9HlCSP6)aqdpb zi}u3D(53Y)ohT2#9>Y)Z}GX-!m z+v@qg;%H>J5qJ|&Geu8KHh?7nw2>Dxhw5$R5F`y1p1!DD#{ph4n0qSMm?ACJgG<08eeyoXa?_VvVvZx50%Cll%L`iosFJu zSYnZk&_~)B{h8=o<74P~KKgj$S(pzeAJIxKp#MdDt$L`AwY^k_Vc|UY@FGUd-B#W? zJHYxI&o!o^Wz05fN$Y)Iy%)TtL@SNe#@fU@)gS#`A}?@)z)Y0?&)PnmF|c zyX(A^wK+T1=H*3dJ=yXK#<|*{xK6x8f|iy<|J;zaWs>%+`OT;NVOK}j;8a2ui|nT;CBKmC!SBQxMlJmDe| z+Q4_p12rNhRlla)Gu})jw5rN;sIO|Vwz?jxK8tOVTmt=r(lt^Ogm|xfiUy;R^GfgH zxWqUPz$|i{)oV>szTqp#3_E?GF2^}Qv~x37H|-o-1F?F?Uc8SUJHVMlk#4-`Jpb|w z+vF^Dq{phn6OX@)^?+oMfkQKF$~dCzD?RX|yh6(=gzep^u7r@!% z>BeT=4R{!w=y7{g0&>b6jhkcNDAs}?&cD9Fy{EE1uW!UTQs}MNxFOsY_`sDo zGYlBYpW@zh@@Pb_%kdPq#Z08LX_toPC=O_5RFSN>lo3KQr9-c|%Y0x@jq$+|wwKrO z#aHo!FCtOYa*hyV30N_!<<+ugs%5pJUsYE%Q=8XLR!qI39ar;OSv{*QYUi~}YDMcU zK1oIf+Qm=0f$7Ek}!gYP|47G zW&3ehfLVe%Px#>8Wy^aAO;L+Hk$oT?JnQTQWOYcZ(?&1`W;(1ilE+o+rdM;xUtkm2 zM=*hG;lvB!&O9qudfCV%tt6E_*y?VS7_Ytz;ZJ#$H!&$BS*^c zm2j3ohlTeqkpa^ysQ{5clJLS5L{4uQDLqHR(n?Ix8sz8P5@a zNb*Ra<(;~8g|tDJ9jYus*R~Vl1C;GNqwH<@kSdKZ@;b6jjR|9uljo!EQSpGyanub9zRQ$hzcHvnhie?%Wtpq#1q?L>c-iwsy|3$U;^!VvLVVnu2OBM{D z+b<)79f9dC)O|#C`f6X3$AuOk{&osyh&r83Sc7ZUg@KyfL=DV!^!bd?Q3G{6)+f(3 zLgY|mOw<&(=I2EoH3e4TrZAx@i!OrP3WY{%`>Ez2MjZL}RD!cc>Wt#BkOPQi3;GP9 zpwN{E7A&U~90C=|yQd==S3B9ZUd>dIg}+2eT@-#Ts$QLlFYxF6h!B&%h^z%fb`v@| zv8}{2&Bi1BPWul{dqIGSYLvZT)ex24@e7j7n*t!go`>WbO-QygANdJnZr zE>VhKp+4ldCJ7n9GDPyHctVP!Dfu$|$mG9wY@F*d&6?>53M^y*e;G$hyoLv`4w)tH zqSBDV806(OP#C}g*=Q$*AYTFs$P1%aRv8($*vqIx(?#ulF9@rr~R)dbI6u9?3=w$4%YK;r0{~b^QcUlDu2IQAdP^(B$v%gtV24R)INe zswcG)jOWomfiJ0WdLPICBL0u7y@heHN>9E*Zm-6HhUWP8KYRQZ4R$DV)dq7WHzPHZmiyl z)i;L(#E$I>Zc0d{*Y@}#M7}_uR2z2QN1S&zvJ1Y0@9af(zL63dDWRD4WZxF|D@PC@ zo=(IAp~nCd{}+rLK8r+A4egv-VGD9mH8|>Ms1|Z(Gz;`ubzZNiy;I}tJTX$n)Pxii zPhhKrR7+p2Lq|V6*Vn*uORS?ogT5vWuOrk5M#fQ>5$Gbe)+ywOW6t;UIA(o|ZU6Mt z5LN#cM&yl8O!41Fv6>3??;s!3o{#R3Y?Ew1ir?6VE1b|SVbP?iXlH(xhMJW1cxqH1 z{x|9qpyco>oMa7-@QLv)7;BwDWeMG~4LFlRWeRf)lnpY-U`R9M5y%1lQVPKi0UlF$ zs|Y}o616n@$p}Sk9_361D)_`GQ(u!oDk&3Onf+vZs;69uz>^^r63*H~fn?>$z$d>M zEv6vjY3T&?4@C>{D9$wZCXgi`yhMjKAn$t$vaXNyhIgKnCVE%Ul18Yc)YQz!0UN!G zx7=n8R|B|%LKtRr-$g#G7!Ly7sbgpH_mk_}u<;4a;&K`*;oyk;bs|kq+>%hxC~|9i z^6RLR?^E(MO1@3W?@_`&fk(ODLSoM(jzAXF%m@ry9PvzBAYUydaPs@;8CQ+~P+A_s zl$`))24=%@RzD4kmp{ACfz_f~F7X6o=P?>f_PZHl!{0FES8=!*bF)Q9?3XYu(aE=< zavqFGxm?P~RHl0P5nw1{;R3U0!n`_#i9iEeQx43d6Ek8QW=gz^%$%hFEDRu}&|u<= z48j8y^QPdD);KN^kf7_WfB|u6TI-zSd`Nu@!gwgUW%Ul+pgs`&2$v>cGEMHHMD!Y=3=QYs2OAfFdoQZY-k%=~Me%YF~ssK!~#MvA&RB263nc zzC*442#K9YCnzl}Y^!#w_4rslSQO zM8t_Ctt7{Z^y)0R1_M@(fd)|_mb|-YL!bq;=Ny@>WI_Vng?0t+zz_)_<;hEA349h5 zg1vmBGyu2^iqTYqF8&}VgHj(qn2RbAOiYrQLzM!@pe9}%$&@M#n5@)$UgRF-V9+LY zO*Zo&D2qk>@UjiXE`SJ;uBk|PO{eJbh?4p+4sSyt+Qpf^$folJR z23Q=)%@`k(?@u>MsN`Q2;Q9i$Krn0(vxuh>Dc~qbViD`Oen}iS3NOjOq2yO6`6?xk zkW^<9%hw$KuH=c+`Ytv99wp>Q*`=Z7h#zbGfCx*Io?$Y-q|s#E21K9cK3O*e-jA-E z>~~av{*2~80>lm;#3ps?^s;&qFlj(^3`AB9a_#gpu!!kF%`wC1ou2^CkI+?_aZI-1 z2trW+tLvFS84vd&4%{7*schrNS@8A%GCBYS07!w06lC(`|4q=$2=^)sGI_8)feeEw z`&L7M!A;3a%0R!j^p=8J<23Spy4NU-*zw6&ka$Z|W6byGn7jN7N+@8=dQ<)t65jWsNdn&5Vs2B^agA|eX%ujxyZ zhP?b+(7sM*N=`Ps8^l@{b#lsQ)w8;(_N*tL+elWns_J%ijxJ*9w$*Xsl0!Fx-6rMB zjsyR)nbs5>N3?514kJiUE%_aYe2%6?cW<&!$uCe6QbGZR#F1gZBwLMx%90q7XFj$X zUe_h6pilv=COm`0G!0c#%jeDO<}q{0oHn0AS~fS#=gceIGCtOFJ%=u)fnoLRW;%-= zQ#3DgQz+~aDUk{*Oe4pHJu&|A2Rt!c&ovDAWpGl+(gDTjCt3{7Z>Nhp?J~SpmP|tD zIL8;UcRwBJgas#jMt+;KnAC6x-@vX02q!89BCDNk4KZ69nX!Kj2y}%d3 zt3j|j4C1*>N$Nc=jS;R8K@ShYxX4ueJ}H!>5@K`MU;ZYF_Hi!a!XI0@ikrdoH`lU& wO8Ik?(DHa$V_T7NOKe_Cxb+q6gc6=dGQ1U8DM2fNVvJ)#+Do0YBhn2*UE2-qa{FAA~v8^}?Vpm0Z zRIFlED*64sp2zNCAySCEThr6i-P6<4^L@YX(cjEFCML?az3+~5|EFgd-!+DQ4)Tv- z3;w-n7{0M$_@;05%oS7i)`~5AXT_0yVWoh*-79RkE3V15POrG(t(1^|*!TR>Rdc2M zl;M~C%2mU!blmmIuDw$AtNz4QV`ajB#Gmx*Xj4O*DZha>jik+_Kkd(;O&vA+{8`k@ zCN)zi+3(MxWG*RbpyYr*kCOSMWEv&6`3F&QFe#Zq$?g6jlpIP*_Mzkse*q;6Ny#iq z?(`3%DBM^JJkDVamb-TqOO98F3NpyVg~dr)#uQZkQ{MgLxu+?$l#hLX4W z$53)CDLIIexBJIYavUYMV^$8K$Al?&aCtzKuN({F`cly0srJQrT+b)I_9 z^3s$2_NL!a$II_m-L-DN)yvLNovqD>7S4y^R`Ae?6Kmb@{Pwxy?ahr7YBONNZg}NH zz;50ymwWqKtN-?`Dnv=wwmNETf6LGJTIT}Y7dlxgztH*aOD}B) zorUnmo8&z%tZu4>&26=SajfC`-Hp~78mm?x#|uFx46#36zE1vFUNmE?-$z`%x_$>h!i3ow(5LZ*7OMbFSM8;#$A6hB?33X$HN`txjAB+O1ya*9}!g zr*E7&;kUxp(utk*D<}9Af)h`kI`#b0$)%+e0dBLo(FxCQ`oRg+S;Md~izhbFs+Uhy zGogUs_|}!UzKSXHoB3d-IC|coVX7L^HnZt3z183-N;(FB!dS8V(r1lTfPr7QS^{La zei5wfd=~SuQos$D+MCsxp%b&v#-c==4_OV*7ln-|zH>s?505Y_@x?AV8;lGHQa!G?H0t zfrrE}%jTTvnmhYPr{H)xnzqa|8uDg29YQ>F$b=xANC;>hH@9x@Is+>rD}#U>c`|#) zYODP?Pw~33y}2FgL1iP0i(r6DE#)6X13fy+Gab{jc4qgCE@}9^fAbeUx%%4c|NGHJ zOV!ym&s=mlmwilLvl*A0O%T_1kNIk|`Qmn~r)y*y)NLHfL^4b5POq2F6bH3vssrp) zW^EF=eb@q0m}k~p#gaB31b+=|%**V22~QR?Vtm9Iln0eTbubZ`II;${!6awQFGQ87 z3KE$Bfw+S@i6*N1m_6T|GklBx2%=5r*SuY8P>@Da&G8Et`8Q* zzL@O2WMATb&Cz}M5TY+zIz?Nrf~$e?>+6PZA23i-(j{|f#Xg7X9jPzo^{Xh6aTWX` z?nRHw1C*4o$@n4{x#E5US2rSD?+JHbCtY9vzxb)cEXxw% z%=19^*tyv4TvBBC*xqXSvAxl{92X&zSgS}Bv9sFiZpChUvwv~PUu804yJ>xjd zT~hNLhk0o)Pt~`#R=djYFZo~zLiXKg7uMkWKWolAiU=pO)+Yb+$2~bjsQq4KfCWSYP%t3Qj-ZYmFpyf3X8GMRo`AiqC!dc2 z9LCNU3o>DfJ;m1cR_t(=6i2%Ec1qdpyce~p7^=%9Gu48P&kl`xDj#(Mzl*NA9SzhW zCU-EQY>^Yy0&_A2H$dqFs9MIBKxx%hEE$<}f~?E{+i5L@GvJ%otbq|iimn4veCvXx z9*Mx{n(nnhJh+1>@_D03)+=8Zuia-za2^%qx&zK~EGxpjz@i#a32 z?rN(IrA2N(o}``^K#^&l+g@GmD49}WZqScVpY(JJ-Nhz&ABcET()-49(mCEOr9E^P z%Qfp})v5u;pe0PD({v+Saf&jwqLwhMy+95@fAOtrg+zk^MhRuu>K$mR?q@C?a7Fr9z8-UxLdN$IOl}vfGx5!bCqw`jJ0VpRm#gaC*?00K~ zSAGIO{HCOL7FQWhdI`xv8O}(KOq!CMO;Q7ik<_>!Lu#W`rYC8L)bvvzo2e(5+{Hv5 z)n3|CPqOMXlc$i3r>#q<8KNyuXsb%vn$Bn9N1!gDEYXhte5vbYj$0lvsjD$G)*lOf zJ&o=!rk4=itCtL6^GC5_1gf0^_1z@`1sH4z0z_l5)L8vEm zMzC@OteiyOE1)A`Tj&~@z!rScbR#6=48ai#;>qmxFtNg@-ufiol z;b}A{`Vf5F3q>WeK_gLZh?p#lMbWF0s#&eR-;do(o$lIss#~oP>N{00G2CM}2vyhb z#6`$EAC4VGVi1dYu_$yHm!a_|R()K7xtt#)>VMFMOW*@M?Js8MCN`^RUYWW7Ra6Hg zd&4e6*PpQtL2$av**h~s^F5wiGr4&b9f&eUc`2v{OB5;vHq73-3-v6OpdepcoB!GP za9ezCYLUrLF`)xvZ=<(D++W9eqK+H51q&Vw2mi9OGd+AO3AK-?m7En}Ji-@748{7( zPKv37u`giny;>EU@(uGIA=Mr0*g|ev?v#!#=!g97 zo$@hw3gtP!`wepuVqQE8!hkXrcX2B6snV9j9zB9?4R|kAv-vxm0MeUb?R|c_0Vbb3 zu2}~Ye^X9>fvpyp2*of-K~AXWeJFShTfj%>S+#~Yp;-177``G5U+`T~YKy){rPeR` zW$bf?uO0R4N1t7az~@@F}=aA>moL+SKC-Cwt`Nb?|voS5El3 z9N~F3D`9n=ei)N`$`_zQfpp!h2_{Sm*IS83SidR5t1}oGEZ6SErlxN4b6i;M!l@)g z9+#mMcQ1;sTG6giv5l6l*;Tx;G;s6|iqcy7;x)3fLVbZ)5Qp-JNtc`>|Z40vg*us{<7 zGj6OGbxD6=Z@au6$S@+<=)_#~Z#*lx zhm6zo$jO3#jBS*mvqzLLv@^^a|34i4AzbP^yj;T3Q(jfEY&=IFMNKBc-^@&%e2&Wi zWBllu6PcLB&Bm6jK%|~?5?B+iNlYx*d zkD=i$T7^xV`X}jipsE!VOGchEhtFT|2$JkM(@85{CM=oQYtAZ`O0azLW=LL%Cx42r z_Ey;Owfc7Uw3b45qVqOcoA5_CqBz|Q$GfH%!p}%re zRNo%gAKDM~m-aK-BJIy#XyF z)lng_P%{m0sd1t7MN{32oO{7xE}VgGiUn=IC}Z6x@@Q6A#9Pk%dP0vf(O?o92(2tM@ONsc;;HI0Ya0AxIINmgG)?5*+ABv|FMp9-&^c-yq$|0L^9 zGkJ;);CJdnK;nJ{en*Ak56~ib0Etnd-zfV-JVFu1ziQ1Sd0PAps|xow)#X`x1|Geg zxjodKLkj1ZKkj)=ffQb8RG2_v6s0-YQ<0=Vr6Nfc+Qq&g3S}Z#s7>xvjxD55<=s0I z#}@Jj^KK=grZ^;OMrSggIZVjSB+nuSol2fDcc`ULo6O1|HFh$u+~FHIRTj3Kfjw{r zg#lJMNyO?T?gm~*C|$CQ#pM7fSTGIxvxHQd?`au5v#@wGMS{y(1FF}a`USKtMa6Y6 zFnX$JL|OxLY0vk-10Z<30$O7+OVF;i8I0;{QU(PSW8rck7Z3H>V@8W>?)y=%*?)T6eWyL)>a!TVcg=YVqc4fs zqtU$pdQf*FxlVDK!xFgxvQY4WmQC!gsZG!_P)f>rtlzIacB{y%BTV+OrL)=Z1hLcJ z_FEcc#6}V{_(|5=Tbq~Eqijyx(lADth)ax?2(~tZj`|7KP{)buT$XEQHA}wgu#S+T zjZqF~v63s8SdwP|NL)DA3ft!eAY=Euw&M$zx_)?Gzzxwx#4_#jeH(idz{ydEI_U}W z*JH>SW(}^Pd21F};+hMx=r(J6z#SUoz#90lYi0u|aA9x4qJ zDjz6oXLipWrep&x`_|U)E~4|FPA3uG&Vph|o7YT#GQBt$Ar}A5?1u@cjX~JzhtDw9 zNL@e&*;62)keh{INP^=C28al?J$3uNRkAsQ-edtf)!gS$09PHnYvU*y5R!3$UVwP3HX47fgFu&AnV8yxvdeBk2H26jytUo=B0>pmT9`U znMF)@iWqtrsR>8$zR|H}q#$#Bi{$WejG~v$h?p5H86gKgIR1i%kfhi_p~|IfZDhc_ zD6GOQ1Ve-IS&Y*vCn1|KKO7yYRf)@pyn~5}+qk$qWL=!i%nO!?qnY&-?-aK1^VT9?(hou*~mgV!UUsGKBI zwj?<+e+pZ090_0wWQiNMuoeg9O)Vg`Fhu|~EV;7`69(19i5w89EKcy=K#4D5CnME2 z`ze+dhUinVUglw*xiOISi@X_H)&}BET!=})a${)tDV18ww~APwK;Kt*6<*zQIZmY{jf)cn22h3GIs&?cbyA!w3(jFx{!a8K*E|iP z5kjrxgeoh-n!s9Y_Y#wF@{`PyK0u+-QOX=okH4BfLRw4=E83~aEM&yD+#aSrPJmD! zr+I|>g#KWCllD?$0_+OfBg@i`*EA?jqL*;Cb`tU-ro$FoB;5hTFLbwVx+ed3Qj#Fy zVd;(=7Vh-8Q>wdVG5Ki>;IHutyl-fJCB+hsGy9Pg_zP%Lk0UnUS+#$d^Tn_D)vE@g z$r^s$p8^>c1uH%9=yY6qs?+bN76UOzcR4w(eSGF?PyOM})kiZIJDoD*Kt7h>2N?7a zT@njHhe$z{yT+P1C|*7a_b(pF6UIm9X@{)~YN8F6fUwXF8a9m=?|H!>wMNBPh^uhT zB!4`<8CbfKx{?+p^&~BQ1-22L@*s+eUp{~sCmc_Nwe&dM`xRWT;8%q=Cc}DK4vv}d zYvM4O3LD|{`V2T}a#v&eKD65BGiY!UW2hsX&}hzLl=}ty=d#>^EH{tbZBlzMy~3b$ zc^UT)dr4;CF>w@4!QnKC5QYjKMBSf^Du~pQbj=y?UJkC`?l+?HmyK7g@X-1lQS}wL z(M{uv=8N~fU|bHv0`7-n!9SP(b6}Icbd_RMn`aF}*G5IW$&mfU5x|BWASRf0CLeWal zB6H-ldYx1oyCPYeh-=Hm{!HUfk~V z!)~jm1RT6%aWzm>Sab*?eL-BnfE^Kox_K-BGBF5$Z z_C~YUx&jqFE{l!UIfHm7-Ak~7h(+y43(~%V*wxLFdkb_30xe&?h~)LrAEMF1)I*lt z;!2_4+~{q^jb!LtGzz<0y)MG4IEgDR7n0DxO$KCkwfS0-<7U^t9NUOo)jB1O=ka*{ zE#lTWR2wDu%J9e1AM8}ioN=mls%NAgw3=N<%^_>nYQWwoL$u-#A&$r_*scq(cx$kD z)3ZeQIy`PT4H5MjOl7hnGZRSx#HCZ%Wa$qyD+@FOlZ$&m8}UoiNqVgSWU?-w387G; z6(aBxPI_=)UtU5fs2_ZF6ygI5h1uB&P7c>zs7>>NvcS$@L7Wx2!EEjhi9l=O$tyyM@zm3ir%w8iou9+-=%ehGS7LYJK|O$=3eY9% zYeV(Wx}gc+1q$C8OkVz4IEnhY#Qh?~|9~M!SU~aR2sYhtzL1uq)zoeQ^V$1IVNf4T z4H|>#!3q@8L34yw$#=zr+BHsFTBebOhV572A)uD zurIQ{XsKh-zNoSaOo{d}KKVIAByMRrgF6x*wZ&-?6)9{Dg88Ghfe;9(`sb_{VSfn+ zWwaez$4|y)fMc;Mg}xaH@J9LJ9^?>SZQFX}L`vGOv2%A$+KG;P{NdgvR&IkwkV(%N z858Yy8PlP3OtT<5PCMSOiV#EdDwGR7HVoq$8X*e32*zoJp;AOTu;AauP5|$Qk*eQ7 z)dTquwWcaME5Rq#62#zas9$7}04p(7J;H=glQ`A3`vJs!2a!<&6vAbKFsx04!*fp& zPW+1Pb}Q6?{qsEYuSnSrOod9%Tk^U{VJ-yNzH_w1e)<>v%c(AJkN8X zm1cj%c5Q6gkEFz(_&UbhS+KNV!spA}Vn@-}QH)FUHl3k@H{x+3`twKIiAJ+Xf`5bz zkQ=s)dfvzCPKfm-STK5-8ez}LvNB`r8C%cgY3{37$$~}$ga=x#LK0sST^&74P@8Gm z{Tddjxsc2VeJ+o)J$wY>$)a;+B62rB5f!Vzz{Dmfs=tfH^4FxL$aqSOeoc;DA9e$e z!y@7@u$2A^rccla+Kw>Z6mA5Qe>Zj#lQq1OBNq2s`m!mWt?;05<1=h8%RBF5j;=3> zI>`0)RHuEGRnIXw%fx3w6;_+`pJMK-OinR*lF4Z%Pch+gQc4*a-pR`+eNr|R9;56C zrD9nZi@CGNdk6=jb0SqlnXBV{C&#SYHF%t|r<|#w(AUOM?E-qg35EU^sx~+T5{ITx z%Tz2Kp+^S={N3nevha2sf-Y`m_h+8aQtUn32_6S-U&!tHvunEB??2imV)Ms6$0EeZ zIUX|X=~UA0P!nq{jD~QubCk=oAT_)XfwBUL?1CLULXuAE;fWEyEl3S5+<5e*RcVP^UpfQX-F+OatO+U?uc@dymv7ZS(GA+Fi}xPIdG$GTBlL7Sj|t@?tY8zm zK8o=S(KTrje1z17BZTUA-^bgmVF6}efiBd@gxN9V{34E*`2EdVUdRCctzCj~Vac>SM$ljU1Qv99Obq$A=hT zTl6xK<=iX>=uT=)o%1KZQi6z?fHSQWAwro-H;mvi?zOf)iF+594}^@=4yrz#WY#+F z5#a_8`cs36{#<_FC_Cd9PzFlKTl!?_P5oUf z!LByAl18#ko1&5hGFocfw=2jS2gV2WqqA8&i>KD)k zpMk(ja|)_$?7%05*A~GyaS87*`r1ELYOBpHoptbGg(u?b6HOV0lxd`0<`w@clU%VT zU+WcuFEU5X7$2M9Co-D!kHuA;@9DtZ521Rou61TltGoSnZ1?@uSln6S9;@~@n^@k3 z2M&?UO1xVCkbMhszsB6pF%c#FOUx16S8Dn~ssr^IH>ee0Y7XNe{#!&R-Vo7`J}S>V zjBgywLZ$WKlhS|SqWtT21BxxZTNDO(B?3xs9kE|!yuyQb3K5S{L(GO_9yE9E7!Oam zxF8nsM{Fu$6LCT1iCDy_VqnW>B&WlqK7)l|NLi?d+=GKGyofn)L}W0i3H&l=tkUc) zNEzK)d6^5-8!B7+Ha}EH3L4b!M;R728JV3N$(#DP3LN)`LY5cB6;{U=2|Nb-fPYc< z;+J5mLXPKOE%2Sq?*Loa55^8Iehd+mWgG)Jfd@qx_cb`%@l^ufg<8spM|UuZu>uni zQ%^J8yN0iCQ0xIOCfA|FLboZe*<81{W+AYOqLVds^yEqpF!0gC6oR`Z;Wlt_C1P(w zk1?CcB1YrO2+DpfX)~?cL`4YK$z}Bq@CdXndnaLKQe!&a*Ldu;J9n0yCG!bf7DuFTT^1eWefFj8|B>;L>7PHdi6_pu}MN^D-%`oQb# z;3Xyj6Rr=dkjXZZxFDE8gO&)AE3moV@O^U5F|@@ys)&7L+#q;6`N1T<_pqQZ2qO#{Lqci%M+(DQToL!^BBOB{~6?V z4&QvPkLP5v$JSzpKg=8OZ&Bai1wj+gL=}(Q*s@=00|`l|)R4S~n8cW@;VHu0)}IE0 z{h!5|)HBLRz9c0?eEK^3U&MYM`I+X+!bXG8&slKN?~zcWXreFOzU$6M9E^|@*lI{ zpD_6yCjXSlKVw32SKnmvTS(%A0Ueb5z*zICCYK_!2ywntx<{SMG`YoT&EmhuE=qXK z3;}mdQP|d2;|jip)%2k?qMX{>Il38wxSG7d*pyFa{40(?hMp^K)Z_3aDg^06LTQb$ ze#4Mn=ZTrs9+$G0TyMk_vehB7_yfpLonw5E2gOYPYf#SkpRudfVbsu*Sp()UaGwH; z;kG*m_HuHruu+ikN1P+)p28NaBY~>qz>5bkhU>#sN3ddOgjC`{^ZYCmjh)LU;S7-y zm`8`9O1Xe|tlmJO0pQDdC!+!uqfmmCnv+njY;_L80N>KU3IK$V&|~^K@nKvR7QQ&q z#GNKe@V{oMm!q+`#8#?Q2{J{%VGb`EA`P%CO+RI`>&FGjjT7^ab4F*;GZ)$57n9lK zd+YG#Vs`QF6e()>D|#tLyBY=6@8ewcACa8C)rhSBfRk_MIQR^3ugYTm# z8V}#e>*M2r`-zTx0&wq;bI_hazZqzM{Y^j{(wSkJ@XkSFhPDm>hn?-is{qg~MCA;G z(=vBVS#eAmmh`f7*_W3dC$OB<$`Rg38iH(lqXf71aB>TZ(ug3V0%pajl^Ls?FSP8ZY;k`yN2$K^R7J%;#a68 zeg%e9PbZ7>qV;?p5A@Fq*y%7}hzj8x?+4J9KRXf`D#4nwIn1j+K+8SYuKti^f5hab z@IH#zF-`WTBur6T>Vp0`jL+mSuD;8D{}YoxX7Znze2+;E)dXmjXzz76q?^)Js-60IL^NKkq@D_B8;z7s%Jo;E5L_|*CWa^igz&i7 z(Vt$GJC2LKM&ZUfs}3%Ur&SD|$MboBiGu^(Soir!00>5#KXTIDd}!SVI#7z9)wZvf{VenIChf zKYqIXAGq$PID&+3UB!~&&up?n{TY&9x3uL6xyjeO#IYjD4_I(_Tx>!UZR3j-%Q(hM zEZZAfS2XJVCJJ;I5jisUsPk)*F(R|!$~GfOy0?ailF{%&*RKJ&Ey#- z?_=^ICQT+SCKs7}oXIDce45EsCW4>To@Mbu{0L$`iV`A(EB8J0e5fagqa^ko#yem5 zpy(rHU?C?5suTW5t7g?MJB_+`H&V~5H1^>?-MFW5cca=^Yt$MO$j>%DX}qg(U*kaI M!}UtzfyVLw2Q&8dg#Z8m literal 0 HcmV?d00001 diff --git a/models/__pycache__/vit2Gmask_model.cpython-36.pyc b/models/__pycache__/vit2Gmask_model.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9e90173f8583e7713a44f0163c4cfe99a7302ad GIT binary patch literal 9239 zcmZ`<+i%=fdM7!Y`)G8vB->eq&N{Is(ZyDhP1bRpwXIl9>IiP4BpV`GhML0}k>Zf! zLym1tM?e8(fh}6K>Al$hpe;~rfj$)504@3l^r7fO!M+q*poP(*?Q>q*-}jL-8Y#An zb9k=bxqaX7yNq9*n5g{C!^-aM?<>l`DPw;+%0EDg`pASLR9`Vw{%WQsG?DXj9o^Jb z1!djOcM4{K%Xz=pDVZgdX-?Uzm=!Kn%_@Iu-h@{-C%HUjPT^bdr#my|tg37&q9{sV zDx&1*M>%s&s2?hg@)NADY(G(vH`Jtj+m4*~L*e+SlmaJy6w3Vw<=TBaZtiX0dFNgj zQN0pNJBV7L>_knXF#H>x>qlsD_)IvDYUUhW%!{1Rzf_M@GcWR@@TFoF(jA#aQ5F@{ zOQI@j_?E?lsN-7^lVS?rs+blt_}0X%n8SBMoWtH1lKRK*?rdzmZ%6yQXZv#;!`Rt# zBctitQDnHCuJ3f5AdZYXAMF@+AdI_q6jQlFhg>m)6E&r~>j=XQjCjv6x^~x*6$5{F z!T^ViV)vnA+;eOhxIx>%iH(nfuI)-pdADP?oz*y8<$|$8SK`~T8wN~Z$9AF~=G?dC z{)%zScOE;gacj+ZrzOK^#rVK&?%7fp=AIq2SB&?1!Rn`WfNGCtzjNo#$1BFh)s3r_ zlo~eI#HNkiP|EO8Wb8TiL)SkrK&``Dzkje3%I4n5sd+0#*fQ=MG<|35);1H`i{ox| zeSN*{#(TZpwPx5^_j;k4|YA1B4; zUg$QRs6V;7>f4=NVOv{P2NKgxAM)}Kde5Wu$Z^|yF>eBVV)&5q|M~i-C@ya;|F8kN zk{(-@ktO<`<9GYD6MDC{Gh+Yy_SiCQKiswb{-rU}zz-&U-9U*cuNXgU^ruFoqAqwA zlgg{BLDR8Z5r3n;y>qvJwHG-whiCIHT>-In;{&5(ce{|xA<0i0E5@hTaGp1uvh5R45tiZ5BLt%tT{G_c4glVW947_9 zXdM^c+{0b3b%7)Rw@xS>pki#hezevv;IvMBH>q^|u9YsG6tZs-Y`h1w>|fdo9~p6I zi11O6_VMa1$Bx13^gs>$gneX%7>OT&UK!M_p!q}rf;6;~QT<|i&pYk^`qjT)`tg7M z<=g$m*y#Y9Q=+uhAwTpdFt*pR_$c7?i>U14s&GYFlYx2Z1NLWES6jZ@wVVe%_n|Fa z8)xr-`c43XO~;_F)$uOFjtpA}mxR^!4e7LdK4$e9TGxzs<13NDZ@hbOB8|Jf*a*Uy zbdDS<)@~U3R^;}bMk%R*q6NO}k!nhEV9X@n@;C4wbz#T$#(Lj7SSKYC zt&@6za@kmq<-s~3%*XD|@N0d1ZJqUFwEobIH?{~a7=e6nNpb;QZtK8;sC1wod^d2c z49(7At!NpUqR(hEYWi;x--23EOX>x6ircuR)$p(1IIY3go=tnCd2%2>fyA2U9`sSH zcKlrNS?DW^`o1oI!adMuSx-uP64Z5~&a;0?8mg_KUI|KKs!h}@-19Z) zROQzwSG<~-X~Pgwl^Et4+9#2wkfxDlkY+2uQd6~nDrb+(^}s_T0(jr$v`?;DV>L1TTAgJ?*%ft#JOY5`!1+l z=KFdve#yIn{^hvgy)4d)g=0GBE1(D^_PX~y%$xDvIHG%b6Y2X%*ISxcJf_!uYcyu* znPYB%!Y_HaKkqGCZ&bTMH9>m%!nT>;vY`0P!lor1+ox~K-glBhs?f}0rlib5sz{Rp z4YO_~1sZ1E&g!?DB>MD6CKH)UA0kC_$ml{ouN|r}F|GQ$+5`Q=R8AYJJdgf{mS}5N z6V+wmO|wgA|E6-occJK^qi({XP$HsSQ2MW&5hBZi28u0f?S>yVZ9lq+!f@U<>PnJ_ zUbSv#IvWbTm~Q-9qHZPXW}@CT=OFjiE>uMds@J|}z53>BJ7o8FrNap0h$p{Jn5`x{ zqZ+pC1IBitIU)f0HSCPD#!X2Ao0Nu|N-Cs^tibL#QBr&8Mm<~}*W?lntc~mCBp-Es z7gh`~maKuK=tesXa1*DDnWV~il~Lu<&{#*&z(sy}*yQBJMk_$w->L!-Uj1g-y-N7boIk(eEYE3X$?4$^sXmOYng!2$aT~fw& z?@I^JE!XLY#VBmW9s4ns!3mOF+=`OI2M#?DaV39?;B9i^l2Y3am?kaY#*T!FBfdYW zLBYyA(ap14kZ3`W6z*N)+11gUd)JaY;eRL5Zwb2t7O^EB7X%~uD$gdSz##f;Bvl@Z z9Z9^zSOpg_>WRpXjU2zlEMZp1SmKOG$k(yL22ykbnWEP88MUTffNGi2Do{6dt)SMl z658sy>*^XxSG7g8q}8=~wV>71C2dZ-tX@{@YJcGwQgll3&~>ol`70CvA1uKPB&r8L z_ehO%F_ZzRmm{d6Bkh91aHopgK>M+Fo}ds==@rN7Mbt}2uz+}8S^f;)3gEArwbjPT zd2d4gEUtT#fN;f|_GSQvwAw7^bDW<8Bb^y5@IT$;z6Ni=> zxQYCIgN!l*PIM|TscY=O(;ph0Z1xQ+uu#zF2p256VmOb14$asRqXjbJqsdM{28cAe zp(LY<$dJ&j8MmFLO=dn@wk{lGGGSBafhY5>Z0U?BlSq>2^vvlvhrvTb~yjN%X3(!RSkls><^4BsMk1}MS+ z%l}∨9E(xc7uUTY-|$dK=hCLLpy6);OP($+qK7$;;GCteoUXgmbIGJ(n^ICmk7B1QT^dZvG<|q}>6oe! zjY%hL!PcM$J^OsKLlB1MAHw)UjEjh#qLp%WnDYFOOLN1TI;ZwuIP-u{A$1B&Ng(*? zK1KnT0y|!)`-qtg)PW}Fg%%)|bRN%$z8u?~I`>=^IrIQb$9iDFW6pa*M^Bz%wjc_C zyP}O?sX`&U=6)vq#8)S7HPzsI5hPK35L^u*>j0AwG>L3^V8G&8?aGj<+h*|=S$BJpS-hPVwj0IN z1}l$Nkssk{B)w0HPYgp)eQNLfU_#9vLmL?Jko^GF5gJm}kFEy!9d(yHXnwbIeydoiNrq*o!o zgaB2vS7i${$Q>vO9S#|}Z8UF!TA1PLDAfh6koP7@zh^p}v@i6pD8McJdz5(w0-wcW zqA`P(nWxMmEe$4@zrdF$XEa4VqdF(@VA)dt-Uq#aqA?>4_7)zS)eXW7y9fjstzN)f z5E(a~B4s!E`ESHd6yIDMh9Gjn?PNCpzrOv1U~?F@V>7!x7Xv1Q@r1s~}@LG1YBTgeOqYGh9 zBNdsx(~)w>`hdJNw&GyJkwzih$14+8;I+xp%lL*u07bI;5cFk@gZ2{IXk=*toHs}F z;E;P|ytCe4zGM4M1e6ePgynq_+$76Kxb({e;(IoX@4Gvrmupn`mdFQ)?dD#p87%BM z(L0VEnAKDtu+Pd?gINZG3HD`^VJE$vJisJG=2#2-CJ{&n@@xDy>SyJc)XkqGMN`NW ztpWp^?imUpH>38ybLNgu8z-ZyE@0~Ds&7EVaT)0~vjGipx=t;?dqipQ0Cd6mL3+)U zV-#OvF(`=PwX0qUdPA2I{bhbEdWgy(M^8a!G|wyXyjnJogkJR~XkPRcm+MU9&rl{s z&H{dY(S%r|fV@ za7AMmvU)xsV4x8*nsTTg66@3k`hqgZBZ0jrw{NXC4Ba|yj=lQNRWjK$LEhglgq;cu^kH zxYX0ddJ7!C5*FtkxP;d--{MnA0dsJ}y${*baiqRbd&tjZ!(e~uK?G zqi;oh4)sO$Jzu~Vp-%QPpD7rrZQdkdpg)S87Lgc({u_dU=nz;G$V1EwlL)00jkFOO zQ5_}|^j?k`3HUVDLac*8r~DDVg@KM)#B7B@9;LJoUMR%o7b169c)S;v20HpQgy>BC za8*1k4T=z_l2;Krg;S4Uf*n%5jP~)iN?ZktmIoF5id)1^sspmFC&)`3RDq3E2;tO` z@`iF)c+meuL2vFN%7ISn@*byY9}t>c{bA)Kk>yfr>=jn7--yV1QOz=$A7_onBRmIhIt*hYNK zth8bJ$Kk#cL>c0P3CR$8LdG(?kkEujD327vlmEk?{1fUPWsNx*Qp1~+eMZ?Ql%4Sj zeh+O)uJ3k}+%DowMcBR_{P>l|lBSy>>|F=HS#(_b$s;~U5#S4&R0b+UmhVDulcdYv zr|eVenWjix+>Wg%KJYPHHepVXiox$3vWbvXJH~%3$Rb_l6xMU`yNGONhaR>Nh4dO& z7|qJqkjhCXg0VEU9h*gJM8$+x99dAFR*C^E#^!X^djc-z1kFr0N%NJ*2iV5?;%6^o*OJv@?? z)GX@(;;>my(XvF?q~<7wE7F8_vKI>Z0(Q*tHc1X4lr1u{?b!&EWF4@BDd~)3=QgOy z=t=NaL*gJnsido#T6?Z^tu$MjFI7u(rODDINn;x45CH9Ux(D(nH1`W+Nrj`)9OX>| z$_hty%@Y5_had4cQfZcnpVlao3pDpbWacb~FV})#ZD=;b`mvenSOY~+cKVkve!f?~Jjam5aYT|cXu)hx2~ zlO?GuRY-ugF@r%m(IkNnL4e!>1PB5L$RURTl0yzT<(LCc0|ZDQBtUZZCHdYXi&a`& z4gUPs_iyigZ((1ankxV5N4edzZ!5~bD-%Bh`5)m4`$&W$R8Og^{MPE4(1h;k9iwij z3i5`R>*VWs&gZ;Br&uo{Ph(2$a=pyCO1;AGYI~|ZU7z9nY<(8*yf@dGuP>;|T}2c` z@k>P%9pg~1FADWzrB-@|`IURmRHQXEF5R?4=f{C?JQRw)6Fm;(eu#Yaz8y98?%jIt z!yu$`Ig+*?HUrrS8$@CB%l7rVsBw5t*pFJ*9YZV$T^L`ghiW}1aw7kwQqLzVsux5_ zlu<5sLt@3NQ?vv^m;oS4VEDi*{d-c#Zj)_y!LJ$Qd-o0n^UfgPATdv0hp zJUa|cx6}2Uj^jt6dFzuM)AogV#||UPcW8g>rf|ZBbax$Ly1p6hIcC@HIl2JgPycy6%&0Jjo&&T6)QJ$3;B1@Pt*I`#@=f z8zYtYRv1U%TCT}On&2DGYD!#F7R9Wl@={GT(%S~zKi$2UI|AP`wRzCT?N5w7Hqmow zqUUk+Tox;0RV=r3aYCFt%B3_;MLBU=ymq9gWfQZX5oaf8zRo?)P1L+GG0XXEmb|zi z){a!BQ#e#U*M{0qA1L6;TK`REpr+gENhdUeE^$R@hFz!OHr)pEQO1URw8V1#^2P>g zEn?2Pj$Hrb8q#&M(Tmncth;VD!Ih|~)%4hi4!TZU5KhzXc~M+w>;-Pa3H#NJ4bSfE z3fsc9w~{;i-|M~CkbCU7t-Xj>fHu>EEdS51{v6r0yK6tGfvBW=l_eyxvFCW*{!B)! z{`tB~F zU;iK1ux`QY4Y5YgfxH0*Ovb`Q`fKLG zTv(;!r^_esA$-E7GgP9h+cfWc4)EFx9VY>`a0>_C*uzuAYEj;UbxxM zW4BIpCoXrqu9Zw3=hJrq%(@2z?4Q{S9-C2Mir}%IwDIgs$Bw|zbU`&^%qOx;9K)AD zw+!l*-*~0~6>8eJSHF;)^G@r(fBPR!fAwE~cfG$HIUV3_LX?&|;)nhe`t~{&Zw35( z0)<^16^!2ZIbB&P<)-<5bRjhPj&~2nGPm1{Oh1T7kI0dp>;{2ng>K)e72_%>THwnbshd~_ zW5&6r7uZp3^di@bi>TS&#ei6M(V-eTk(Ekq7|(WX89LU!kJBb1Va-mHw9TedesU8uzR&JnJsSY6vfy;VgBS0q@ z?OZ!ArViEH${|Kuxq;d)$X&D)DK(U~E~Z=R4-`$YfX%3lNT;+k^ey>CG1H<^C2si| zW6Sbu8!cEVsuIDPLj5$J89cLi=J3qpS*T*Q?L|Dt@GRjujO|j-7PPCi`D6{FbGMZ*P7iT9?ag*2 z+QMG4o^%>d)rUX-REaLOFSWN1l?fV`QTJy1EsUFMzkNvO@&i2Y;Q3)w6UUEm_STiG z&#CY1^DZb{ZC8+9q4~1XyHpaSTUYMYb9YIt)$_M4>DV5 zBhFK^bu%sBgm}tEtVgY=L844QGHb}B`4~^Qh=h*hi|SB~h#}QqY7dQ%6S=FYas}-* zE!H+K#;VK0m?URV|84oI=R!S0=UjV-h!N$y(my|z4a6LdyPgXp1K>z% zJTAE54kJ^AktHR=&Co4z9$MV-g`{(hivT-69CxAK z;ZWR-%YI-98FbMnF7bh}sgq<|gojo1uwgLRo@l*-+ru)$Z0b8a!JVvp zJJx-X%z7KsrDlV5J6XYP*j8?*6IUBxqR2@AGuEOY&I{)ejI6kX<=&SLKvrI)Ef&I{ z8FlO@mKB6|B*vCLY&9%0{E7^r?}X%eWpnhxRE2FjEJYlC8$D~Bf2>@%45y< z!9!a(71CHW2BW#CyQt*cJtbL}blO*Gw}0rnHR%>&(N zwgpZXIXwo9GZ^ERP(BVFptP45v2-9-&h{alnU%i-Gj3!^frZGsHpyl(;kPDIl6a#I z{OXZS$X3~?00RPTj&Q-3>!$Mr7|@6ukxh^i&jvd_*$2|>29j(ZB14k4Y2I`iHd*0p zc)IXO$-+!*dmhZIvZa$zCOIT|>%#)Jqr@b1We9J=aRkh*$K=p<$@P8=@6rr|4$NZ` z_rypJH#179)e86v*mS*5HtWZ12;bQpiJaG6gXy2x>g3eJFn?*{WYV=7w~pzvktPXz z?*Rcx669M*YAbPxOfO!PJV(`+k;FR5t?oHK^St~mY9PxeE~LAXSE)3e`V!V{i-0pO z?%Iug+Vr^PF1N^QMB%5D2ud7EeoRS|5?&9vWpWS6q#)jr`&4}$2|O{6S`D)E>gCOR6119hOu zC87BU+N|IP(WbNcXmHC#p`!&5I?)0H8)H5Y23m3qu6dCM$Q5h^Bo&H{HTF}PCzcxf zx>SNgL_kD=IPfw=s{md0Vo{$1KO1=soZB^|rjQJ!(zUO3`>@(($Z;(m2L&9|c zmuT`l1-_7n6V}c{$w9#$X?|K`ks2kJs+sf-K8O&ECPfOVCAlXvz9O5T0fz_**nmq# zju(xaq8jG4Y2>B_&5&!)kU~$DHmO^1s{9*jAGeBJ+CC?8;JbYP!;gAC#W^zN^Db_T zH3b~_U4&T7X3yuV49%;r5S43uhgTyfjIM2toO@lkoiuLt>+8=5_U_f@6ES}m1y%=) z@3G#nqlr5+*d>{JDIfv+4JI){#9)^CGBtM{VPV`%}f&LdH@O`!WS{PcNk8txC8vI<3>lrt`(bkm z#uVW@p)hWcZi~y-qZ*&Af_T`vJt z1bd0`sFChR9$*k6Sd7WPAOcB8KJg=zUlc6@<-f!e&LUB?vR3B91w8BXYJYiryw4j5 z*(s9|o1OBvDBv8D6J{fJ-;nR1M$b;$z-i+w+pIs^=)BQ+b!3UcBV^nZbTS%mXG-ka#4tkH7L;hjXM4m1O4&te_TM{pbr_E5h0t8_GYT zv42X*pHX6F2?KnH;@{#)%>#0z%IZli57&yEE)#DA<=|w^zLF;s)3iZ+NWW8f3V2xZ zub>vO4)7J^oo*Jk0dgt=IQr+t&=}^1`C$PiV-%V@MCeTzEb$3aXb2U(s#S|%`(acL z*ial4iJ~x8ltpD&ii)rcOQRahS03O~W>k(UgVLake(DPNIME2hGI*apk1Ahj=%qIG zs|XWL)EF<-Xlaf1(x`FGXw~nSV z(K3RRbAx$&YO7#13j>nj)#wC*j|*7w0x;;*q4EP|IE~QXKYyyAT|a@6Z_v!V($l;a zUE)1leDbdoYx$>iEegVyH1~7sK^V4jqz#V^j!_HNN3Qd0FRvRP0IA>c!SS>oIoGR$ z<-t<2w&4n|!hEVr!o`HbbGyWcW1_G+Sf%}ArV~uz1oFJQ;mN_u;3QD23Z7gUEDlz% z-^Hie!~59pBJX#R_q)iaGv7W2G$zM?zI}R8X==c<`Fm_QJR?h6en|=CT221_$8|}1 z8M^y2UWnY)%PsgEQLyj$VXEA?`H;ro!SC1C;kME!PM-ZAe)4N7W=ZXg45;FVlzff^ z@j3c-6Bl4wcksarzAMx#K8VSmQ){8;yJYjqpHZojbYcUT1DbGwU0==-u{tHcL&*h7 z)+qUuk{7(3r>KkdzT1uU-A*S7g3UDoSnm!#i*#K2suCT@1`Yiw^(_THTrAIpSwomC ze@V$_)G|jgMFeE6FgoxsS~hB@5?IWZfPY0uLv-@@X^46&R(~^AKaCBj2`8>hd@iaN z`6CfQZYc@8ZNK$)eHMq};)9KBq(?uhp?QADAUm9`z}rm~4%P&B46kb-c0%&5*c_A$rnkvA&6(On!7sPt1#Y%CiI8$7e)Ted~!Qft}d%!}IFmahf@f`6^qUj1p zxKp0Pr$>(3)=R`}o0Q1oH1Eeq>I)po-Sqvuhx6MGX9%On$L(xB)q8f-*tvV_ zt#^Zv%H>Gfe%K6TCu|Uf;V;|QZ=uHFJz+m;U3UyIFLYsirXHyEoXCm%XG%Svtf*cP zB~eDXC@P|gcS($ialFf7LQLXa5mRCs@2Z#)vv`k*IjsFeJbnM|tqoq;dv>_X>$X3| zR?Mv(H#8fb9fqdc>3UAb@uSeZ_2HIj`@+0!hY{sFw9yq)IAKG&+m0|@-;8z~vuk%9 zSvK*z75La_7`YD}^IgZ5zU#M4?ArX$@7k`!kheQ_%UO+rRnC}ObS$17xq;6FwrnTt zVaz>S?yi_OJm<0Fnm5+Wx0*5tSIqa_#*Qt8S>Lh!){6N~&tLu6_EGHd=(ld&y1!y> zT->-=PN-pV4J_K+4x|hoh31Z9KXko46Vy7q^m}_-fo$v??V6Wj22JzUUc+1?F?&A$S$s!tBdfRl_MLf6g zgjJmTKxu<5Bb69f7>8hAuE|B3;H%DRN?cPG#jK|CwVGK=Lz&&5Q}0-EVOiSQk*)>r8G`QIdMk3aHywc6SJQcFOJas4)-`WQuETt zEa$UX^5TM6K2(`b;XwIB+t>E>fdcld^oS&mm0=X_hX!Z_E!iAKCmS;1oapCEZ=Ei*$+`7 z`La5$?`{(e_Wy1f>lVD;5Nq@t$Qxk5WKc|`zh=JO1RC_56`%^!WckcU9*JxK$dP7f z6Hb9kn%pm&3#)Yebom56giqLXhDvmGYvw)A0dSk4<0POKuH(QPJ2>mLF3F6V3)lL2?AD2H$K{UKwUVjheEKecS$BYd{j)p4BQpw25j^sfHlDrd*bx|-E~sW4 z@rf)G$M7Z4ErYt{H=Zg$g_?HMt6xaYd8_ry-}sv|pa17yUGFbMP6v3K5T&IK`Jq3C zzP*mcTLC|xL}438g(FIj*3+yO1dXekJMbvC;V?eCC=ui!v$V#O)j3>Lc3>|CN$7vIhux7-0 zu9I=m4K3Wb>&H4!Bv!3ISEPyK{_5_!up@h8z2DwjC&du1lgfa?*jSI`-a6sPeRpg4 zw(ehAXPp^Ao-h)V~!@43EXrMNYR`NH!^6k}SOR_Sj6 zPhKsnMRiG?g$T8SU_vkMx9p6{m1rG%$$HXhJW(Hf@sSc;Y+q_`94I3+E~D<1_Ny2-)qd@O&gFGH zZ{Yb}Qxhi+arV}gtk3Ch?DHllTy0m7UZMH2(wkHgq-_`P)^m4A(be;tmUL{7-c5Vg ziSvoVsuxlv6z8egx)JB8*}9pQZ$e;YBi5r<)F9!e9~nAis^>ysLknGlE1-l#Ij{8Bj}a2ff}V*i zYwfBRG;A-thRks6SL#ZfgZ9L#)jP4e8LPMJvyf$L8}Oe1<)zoG%Wu50MK*d@I*b~Q zc>249!)k0WQelxEplKW0AOtvXU@e@~eod12#l_(g;xefh%eOmD7*`*Qg-tmQ`1CEQpK?0jQz&anA8oDDc z1-&TE>VB&k8&E2Dp!wl++=#B`}eVQnTW*o4{Tb~V^L zOIYqb=>UJ_McQH^44P5LevE0b129@MjPvh1bR)!q{4Rij$-|0^E!$_BG=UpA5_*c5 z`=|shDRV?O$M$-x`F@;#_Y#k;WMkgF6z2%QJF#&?*d6eMEpeM57|K_8G;sks(Q6~F za9^xQ;wDB4IDo7rB$G3Aye9KNy)wcB&vCtc4KrNA6H-P|tHvbs#1gc_q?T8!T2Za4 zIGyEm4yb1^>dm8k0$M?7&ohSUz_6U{LrgPIU<;<+(AWYm zkt=SJU1h?zO~fVfSRFXwL%WeJw_yQB2HG6qfC!Fttx{sbdynke57=0~y*89Rue%KQAhGAk@rUvL zwTY8SmuuX5M4t^gNpO4@P)H&nUqw<|j7wym@uK88s=kaQ)=7YM&+(c0<*!o%8AWj+ z-IcsbrSa6?ur@71(73p5H+E^$M=f`_MP4HcKcYlX;!yG)B~40rJ>rlh);wx8$QG=Zw?hkNu?2X0P8{P!>Hv$AU>6oD>zU-1C|VQFBT))v^$dh`5=I?~ zXBFvp)&9ycLimluGWAJnloZM{QkhB)Ng7EdtUBc2>8JHMk{TIS45k^i|y40EUXL9*`+YuE* z>XQt(1A~Gt_4zBD8Nx7>{Se4$o{;D%T2UW|J>zH$?&Bts@VaN^qkHnkak+?uM{+fK7;Ugtpu>Vd$)OWk5ypk?E92AtXDg zD;cbq_$7d|EZjy^yYh^e&eOXQDLI}ISrbarPU^|0>?Dq2Mvt(X&Jw73L4YqBl)V6~ z@y%@~@g(6%hmY(o-02R?6Y~aBP761W011jZ3Si#dh}E0*!VNO&cEWn$W|FyEDpi)X;bgV~G-Gq|R{K+Ro8SXlNmQIFW5 z7P*NjkcErGnoGV!Q?4R89$6ULeun078i}H<0r(f--A`-dco)_F(lN}Co>;~T#0Ioa zW(Cq`$?+d3`wZOVGqK49gNvY_02)_G*GBjOvn;(>a>)y1q9K&ZssZ&y)KSmk39!a2 zjU)HGgzGc<%eQQ=fdCEyd@z!af{A3N2$!P!h^N`GlW%Wj7g8(pk&w9_*^Qk9!Y!;h zHad>&*DDEdvk%ECzFq>%2=*=GVIy6X+`}M5!Wgc9LIjeIeDsGXKQC;A*MEv9oJ68% zWv$Go4CL0Q)&ALI&iPquB0Fxfc(ddF3I!ZUa@1_`?i%t9)aco<8#s0xYMXUz8-4f~ z^L}2?!IREKlmzf@g!egLq`d4RPe+ZoLR&e|J|XFYR9aQPDvYBNj~J8x%21v zjyuF-X}2WY5Ra$DVboy(fRga2#U2wWh{Oau2TCbNrDLnid+A8YU_q$P;#(i*#-t}4 z*!sk1XH>}FMpwkiGk%o6PhNLU zV!t8~GQ(F;<#Qm;pdxZh_#z6nPjX-(Dt)d|c|L1FE^BETR~3QqUP{CG)-(_vOlcIp zHH~Qq@c(x&GZ~F^T%)j^dwf?x$X+$D0P-Vr7xt`TKX zIn?))8qyWR8itehBtoQQ{!a}M^5tGrgK^}N)@f1QHPGXKM0J1SaXXqBjGGc^37fbu-aCLb=m}8GOke!)j&+&pJb6YaszK2p%GpG3(wXlC@CXrJP>=<=7SnS1c+=FWqGesnz7)zg>5~4=;mV@TUs^jHjd)pOWJt$rORPav3iKH0$LS9Iq(Y zb^I_@0K?{;H-8*#)!Ld{8Hg;R|H zo{WPJOC6WKOGSIKK{S3ugGzx9zteMJ*^v^GKc(bjYMG+YD+1V780~o&EgQ8ni5|$- zg8%ABBboAdXoz|%R(}wyAH@drq7zp}KDgD3{DlZA1>dX0<9aCxQf|EZT7425ck$gz zHq!eT*3jh14Y9(hmq)skj~Wq-r=hK=Uf@b(5HblZ7;9Fd*;rN8r>K>>W-7BjM#W@* zG=2)!-8c6@-+JOuhv=D1IZBJk56=pUp0qhWwY8$>l_Xil|9~glMxwA+J+7Cb9*cSv z%5q%Y%>I6mtHPoy8~8$&XHOih2K;aOn^aA$tQ%>~8MUn8$xWg^-ufhBqD0lu`!647 z1%878akr*!)y64mPoa6sipv&-j(gw$E3R7B14Ql9mV#x8ph49kF>V7+?M@)%i`Wwf zYvn`A4JaX(kevca_A19FB%Q*@$(U4R8=`<*6%WUdibX@!)auFNYH_AGU#t|ziW9{p zNquT_h@SU4-8~kegq_R$?SelMB%dD?{v?ocA3o!9n7dvgj$5Ndo}hU@KvJLK*!P<6 zuMPb(*yA=@;_^tvMe0uyT%YIu_>ckPYYpMh^k0}{o_et4(khSJVqCnMoar?cJ@{kL OFhWN562;2!X#WT16GVOh literal 0 HcmV?d00001 diff --git a/models/__pycache__/vit2tokenmask_model.cpython-36.pyc b/models/__pycache__/vit2tokenmask_model.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9a67f45949d415059121e2fc3912948235d1096 GIT binary patch literal 9189 zcmaJ{TWlLwdY&7};YHNdvSd5XIGc;OktkcvCRxWD*O$mmniaf>;v__p!H8!_4K>e1^Qf|C{myg1zMne>{DL~^kJR~^r0}?qWhee_WS=CQnaHi zg>&Xy|8wS?|Ns8W=<>`=>D=F&d;jyMqWp(4^)pcZEu?6GOejM2m4?bst)U4`=)T@H z8iuN%Z1}luzLDp0&M$P$hKVxGDRxSY5|_%2GCwPwnNGDa%jLPo9G-c9zPr#^RF&I` zC5du& zQ4}TAO;HvVJd0vRRPii{SuuxaSREa5pLPGRq-lZCtQ?r!ad2X63FCpzGL zJD=lKtlfPtvYNgVMV8m?`EJ(@;>f!B(XQnL!rFGCn95yx(RE9>QB!(*uCTnoiuYZs z=k#1zvhceZ26)jZ_U^mZ`>rDcFKAnMYwM$+=XerR-t9VVcOworxM1zlvG`8xg#i=T zb=;_rId>d+ux?%V-G{DcU9Vg3v}72qTOWAMeMbtbvF`-!b?d!;u<@xApxWoz@7%n3 zciq~$xOK6VQp4t&*tE45N*O+gtbNzH@A-!osC9YkcMf+$+1x*VYu<_#wyc|nP2atJ zeTNC{$8j&Zvbovz;{EkpLdGU%^d!#a*!jbZsHqu7=Pyu_^2Cp##wY+v;x{($3h$|v1>bXs?`CgyDDC=Jf7Az!ze{dj%My6aX8T#1%iO;3#Yu;(TP;kKNuz4l2BL?zv; zEFw#ceb?^|W+%kjez@oOg9}qMfD26HiiHxBShqf@4dy0uq8|7VlZqP~LDRK85r3;* z+ua^q>_;xm;n}<$Pe4??_|WP)y&j}(O!Cv(y7eh`mgAizxxkU_D9JT(qA~ANma*AE zbz{ScV>hsU=g^hWAa`l~^7@YkkeY6CNTkLrUw-qww`lTz33+3C_rK6@6xtouN~!G-)I znC`Ni$Z~}6NF*KKl5V^2W7dG-vTnT_zYv;F_T=&51lwM`myIHCR(=kFd@-B45&nIY-ds%C38JTMy`Dj;IxTI zR5Ox1x5>oxA{#gE1&Iz6NmTnU6=~tPzunmsPV8)L4myXMq!^-2QW;PfTbr>w+$0>i z>+O!8_T5XHtn;GH`(C^ilj7_`aWdBU;FRPHy1mw+4RPrL_kAyL?F_e0VZG>OWQwt% zEvWRjf|OTFs;Qn)=eUJyS_OZDjqmE_`u9_x=oB3b%pkE&c>$UxRyyiX8LCGbjGj!h zh+I22RP0Qvw1Ky_o|+KQ5Jo50$&2!ldP_ONOglGJI|ca#dQ8d(FpFDz zq-EL~#ufup%(Q7%k$b+z+>-nnX&IQt#V&Re2c>z>@z~}H{{8Hy-^uH3Xb!srimLAc2{t(n(1*P>Pb!zNJ8@kr6 zKU~A?+%4tv^CO&2r`{>Yn|PPWNIH$j>b);NR^p4DOP#GFWs1gSw7u5(5$4TzUO%F9 zc?0Q9q#w65aq1D>+Lg(e=e~E$TcB{YQ$~J;)|=GcqM9I0dwQplyG=^2k-ue2*YWAu zat_=ipDL?HAyYs}p1SSpNuIjx8(IBElf;>RWZIBf^C42Sgp3a2^U6q#i3QbPYWIu} zQ`xJjauxkGEz#;16V+pZOtTAU|F(41_n@Mocix8auS7&SuM9R%5DVLeW{GXPe$@|~ zjvu{^!g%ht>UxrczQkVD+lhKBQMVgQkY0NafS*F+rPuAtZ@#umR(Vglj1#VS@_U55 zYGN=pVUs?TPEQJ6w97D6W|+yya5of9l7~un10m^v6BEcs@NygI7C<*cTO`G>A7@29 zXtxprYUDO_J=~1jNht_zA;TWVBte$M>8lnVqiL{a#XN_xjjo zG+BC^7ucTEZzXyl(p7I^xy**JZ>2l91w+g0c9TjIJQTYrXeL@5CVAoBhsBi?vE4h; z1>(v&y<#B>TXENUh-L5wi5|D2B>#a+H$sfZ&jQ$%e5%B3I|0+A1-#gm&`iYA$2I6h znIpP6w$KwT2$KB!mw0x0GUxqENsb`9n;6%H(*;jB61NG0k=)|h#03~ckFBK4W3eNN zo0zKL046;V8JUsmx0nYS<^g1#{tr&A^MLh$3FsJ3!ie{o#)vu^^ zlrCy3s;O1AWi_u=)brYs_M-ZtT2%*Ue}jlTr6=exSWo;d3cv*xQAP;01E=#yjrG_- zJM_U3G{=#4MqwONg+A22(pCxMfPtODRK0+@c?7eC=N08Qc$R=`<*cnTRnB#0Vi8Ek(A~#ub)Z*vKyemh#6UrozBzObZ#ZH_Wik^(%W4Nw>5%z%G+#WgP z58!iJQP_piOmd%^&QX0rDYIpPgCQHVcgT|ckj>-m`dIe--WnW&)Pg5hALjaZ7ETvk zt8wo!eKy@B!SN2DkVHaWMpj!*ie!-SrsRv%OpKi9B*41w2F(2OUFsk+C@ExbC9hDe znt2%ZEt?QDG54J20loBb&u#9JSBV0JN8}!5P0D_UGC>*d$E8vW*|bD%OOKj=f(-sy zhk6aN{u-sd$c7Z49ZNXAOEw_NZb+vD=g6e5dTgm(RB?sqNnJgxfGE z=u)4(!o@KRL)njkoTf#jj}^_-tFXZNk4sDAn!2P8UOwRxPayXcsFGmt_1#4Q2Ltn4 zs0RpZ4Ar3~mxUG}xU-6@M4!%vrNKQHg^nJ8>QoO*Z_Ig580g6{+~!3duvc&p!Bi-Q z);!2ifOzZJC#D)4CxRvl*nzttY+Yaz;u?`74=orv8$B6PmAqm)0*X&bIoWW` zCf}ftvWbLu0d~>k%M>X?89rJk5B&$-`$!A27MnCI`C_efbogG3FgEE}$T1-SS^8z! z0uA^~(9H(CF7nN2-VC)c(^XNb3R)r8nI&DH>2uP=U|IQB)PLM7a#{bp$bkX#gZDq^ z2NW2Y=%u%Cb*x0-@$VrfW3~DL-)UrBJwayP=F7YqyHWgheH;zYh1bmjZU27l2?60w zZ6Ou>AEUyWf}uarn@&7+c?Nqe^WTYJz{Z2wjF>XGrm;fZJy+P+_ET|=#GoE|152O? z$A&eRe2JEP71_zi!qE0Lx}ybTidF~kufU&Q(5iTv>fp=?%#fYfgcXPl=slSgNP(rt zf252UxXEW?lM4nHK|cXBu9B{e@B?O9cCqA$7sy0I@RU^p+D)|4Nb@vUV~OUGQ(naN z8H2T(j^9Mc1|dBd$;aVCGE;;{@q9$k9N5X*yORs4mH0@=T#udRehT5AW6z1vb)BG5 zPJx>}M^^ETB49?aN12Q}>7wKVOhW97;rb7VKsu0*{*O?9R@g`{`~^}phfL8*T8U2? z$gMA^g9|5|^V8PEsgEMV}nIfn&#^c38)DFousYALIoc zJn3A-MgZ?dbeGE}m1PfQI%-4{I?9pu8A%_c(yjy*VI0?Z#*F+oex_9ZoXaT{Y9}fr zg|itINT)rII?4E4daV>KXGx#W>PA2_XhcO*M*4_wBR|wnD?FDV<#Sof&ga*!Pi>bVjR&A7yHLL(oQXaM0V$R-q*Vm36!Y5?>LR6abphK z^YD2?HnUS4w-CMbTp48&0}$&b<*BbV4U@mo5L}g}=9i9VA}C9A+i|17jVQp0j%=uJ zJH?Q;5^MbkzFkM2v3?FO>PM!PPElNVz%K z0C?(%k7=Ny8T#PWlk9-+Q6W1ub+=ZfxGe=|Z96I16g=$XhV7(c+xHN+&3X#9Ey5-> zN5pGQxHbEskT2jJI9w-VD$)O;uv5Thp(Kliy;DhVJ#`WmRoR0esIMS#6u~qNRZ}Zx z%?)$WTsF(*j5%wbku;`u3h~o^w|B@~pR#L-qt_fHP6N;i2Zb}H!KXV8bT*2_es#*^ zX9L(;6CB50+Y?)fyv8JxY|?BfYWh_7RJppU zeX82_G(CVc$paEzvcO&lh^Kvkw?#rLkdXKvctB83kw8KgAb8;=FU#-ys=B(zPI`1s zo$Gfl^?kqZQuAt~QC`nC_@u5*1NJy)0^? zj%P(Q#2lVgF)tSItcgXjglAnWixoT@;t2MBG^u>_?)DaM*ZveIFt>Nz$n5xb6q#;+ z;5&UMh$Hj*hufwd2=j&=#Z>Oo`PNP0L>=kwIKp%TGv0N~fjw|!)x__57~p(S?A~|G zTaGOQH|Uu-todOuuw98M@AmDUvk`|ITrjukHhepF!+;5F+fFpZoIAF>yKY|forjKV zUTvB0bY&Q=oA0}wU0VvXy=w=(b@RPpuTM>p8^?p_;^t=0jdzDT ztxnkA^oGHn9gH>ydofYin2g`Rjcp9vhZm62O?0&ECDS`DBV9*|wsG%c#RC_{D)Fl@ z9)VlAr4VbPMQ=E7sl>Xd9%`vQ)lz?|rSVkD9JkERP+oYdWid9qf>L~WPCXUU_Jw7w?nVFH#V&n;NQamS4^mJj0Q;!N6jpuQ)7sQK?^sN38RzEGy%+Nf` zBbqZUFVC#Sk)3t|w zoRm7dq1$ny(fr27gIzGHwPpGCo+Bl_|KW4U9yo4qH|7n1vrHe5@IU|XC5mTnp8dE9 znv&QidHp+VT6}E2Fu*dCU69-5xFEbr9p0=|r^9mNGIu zKiskX(W#lO0GxO$7fqCSC+p_N%^8}}08)WTm5q&{<5;eUzgDko-x!@6Mh?y4*?cNj z065%u&+OZS0c3wd@{{Je`3VkK-~%Rwz?QuzDRl5&Vm_{{;b3$uJC2ldst13ywywWIGEw{t)nF*}iWDP7m9@j~Yq4)prAThp>3`k7u!O z!TXJ{N8f?;1C&fg<`j#X=DS^B)6iK5#xYGsi=5;XKtceZG$Wf(6TH^tars)Ux z;0Y^TIw@w)5?Fp0csx3_8$K}O&=lc=AnoJTyN(@0Ea(L_jVZgyD(*jh3G|jh-3mI7 z6<}0Tn~oZl(tFRww*0YGCZJZ}Cwe70;lugR8<7 zWlbjLr7v)_ys^>s-GSxY8@l&x>DoB^=#zH>5bQW6b#2UcnRaB_LbxQ@wr@(OH}o-U z#NgdB-;K{iCcopIy{Y8x3}Z70W72AJqO}KM=v$FHavuK*zh*h9gRBMS9Fhi1bZ}>) zyC@a9erU&uF^pY5DWmzq4yGpc$ce2C0HS1JV9Us{?gqGWA`&%?q{wYDDZ7z{*X{<1 z4nRs&>#r4g3U~hXJDb9e?F*YDZ*P-yRkTT33_9$>W-RwM36db;lc)93`At^C(dKF(5BiJaw#$)dLO2R;J>}d?7W~D~}aRBjZKXdVp<97j@D2 zj1&!&k5?3P2kMq`fcaKota>FW&{L+|P&{4C_tZBO4YRoQp;#a@hv`&;vRLTRybAYy ziP=^8CCU}ACYF0JqEsb@k%#s*g#_HB_+QAo^9AMTn7)@(Ei*y3%B+_$8)0NVBRm?kq@#cZzox=R*`3_!) zU-Vwe)Uh_xemXwmoyCl1{Id6oI4;&6(aF~_e*?5z2kOk&%^q&oI`B)T! zrE^{_K9AGoBk6v=Q1AWa_m%j9_p0~Wp?$oLw(odvVBVtl<^kQ*uOVGTdaJ96V~^-f zUdqRu_|`FR=VM-&8FP7sC?2ME1#5lRt0BKcJIiZtAF5rUnjq!>%67^=HZnX<+79{G8_6lIa+JSmW zf%yD)tXJQBZJQjCfpi#<9P#+~3Fpu5e9@4 z2uatQlmW2>Xg2|)0W1ONrlb-M4PaY2Acn0nh4YQVl{&$Z&u$Nrmr?ZMC%6 zBNVP5g3M(p^m@xk<+_MPWDY z+Yhk?oG#JhZj=qOY*Y>o+lSGDfjGv=@3yjb|iEp$-}e;4J`{qx4@oDq6I-x zymg*u*YY{H&L;&@ApOL+D(pV^#g=%}AQ;J4c{Xtf2GQd}Qsc4Mk;Ge?scf~YQv)`c zpN>k8U(FjLvZy1+?=thWYctIAgs{lhvHWXD(IsSxS~phJx_TTcYe6fjb6Od-IsKyA zLg}2kq^+rCZBAQNi&|YhsjX-)sxPW9t8@4pEj=w?hZGuJ6Dzd8MFHT$V$Cq5dhpH< z)L4%Vv_oYbK*b$s#}$TERp?{wbL|*GE+E+}&D2Y%mk(fC@w|%s1)f#FY%Oc6&y)*Z zL;fP3^X8$X2y~YKy0qFd=PR5a0rVOSy{o7ng|bq-RR%;IfLO2t2zTz~Y{LMZn1J9i zic*+l?V0fZQz1*edI#}|i8agi@B8HIfq_JkIxc^|cMN}0V45DnSrzC+gb2W-OMXiX&1ADo5Ll6n~w9Dz&m z)WYeaXPewRrO)OidwsV7pd>7k*tK~qsgS+Mo02b4^95vyPNJ>*PQaWm-=mH*)KSV# zB}pYDbD0-JZY2x)j@`LSho1J_ga{3?0qYZT+@=qCGEq)`0pry$(2lCNWe%KVP~R5H9fDvn#@KO^Af@Z5KCB~UfHW; zy#|RO&oAbslrDsNhVGtZ zr@d^=7`L}?$a$Q$xg;shnJg0jNOFY$`&LidLjF41<%g8rr4bb%aG3V^)EH4?krnlp zjHzNou``e!b&{G+xrN!|K2?V_CY^8-Rts^#lka?a0@|=CCOY8(#zm)*DOy=4Y^_fJ zvieHivZ9VoOyB-DP&)*|WDD_o{|E)#5ZzuL6|;xhRT&$&!tbutHJF&fN?(h$*LjKGom?5|mL$6w(0U>;uaX z@ri7?XTqG?7|4*Sa1`(ar5G#SOYKb#-tH>MHEe_Ups9l z&$yquyE#3=cDhTT>=^+D=}_^E*+GDG$4UKR!~>!S_5eYFK8zsqDpSrX*QNjll^=(& zC9fvxwRY($nU1?ryL2rr+-{aq1Fw>=BFO@l+mtb`F?`iHtdh=|;7K6`zu%*gr}B(U zU>PF&5mH1kFr^5Mj=$0JluvT(%u#{VGbQGZ3wVJkcgWNH)a2<{_gFonUWA&2ihiU8 zS&K!An7qnvIy!tW#^(T%KFBdBS>$!pWEV8x`$2^p@E*ZqJg-45%xiNf&4~i?f;Uel zL8kUeVT1F&P-Q(P`CmFv_X$x7Y#&tf1;4>a7`4z`Q_2A_lE&RnQ{gC7I=oW z9l|F&2ymI*VZh83nU}snST8ddU5cG3zTBFGR&?R^vv}oKR~{2|--g%GuJ!<6ad_7W zqUNy_e=eerH5{v~MDN(~3=K-`4!YK~LkP?G>B=HA| z@bFmUO2R35kFqx@`;@YektNH03h=PwXkE?rd8qN)6OE~3m^I3E&_IlnHD`I^6HE`2 zuKNVr6R!USQgjxXqP3tL*ASCg(&q3a5H0dw3a2Y7{)$y~v^W)u?9JsuLD6HnZx)J) zs`S(Yh1n0FnK2tIR8o_k!77Rvm>3OqY{ufmVkNR^!H^K6TxrtC@=*v0-1RifQuS)g zsw5^=uK~cN)o2{C9k=WkQJNnYur`UEJ}%{J8%&49Wqyp+_enA9<1($xtET-VLTm%) zD=(pif;>w{$?-QvXRq6S2XR2eiQ(@|W71?-idj$W#&*t_&1HTkZ!9}%&};qL)Iq_L#3+w(K@ zIruPX;J^b%CbAJIH!G<&{zD>}?t@wSPl()~61m19c5FA8%7kUxb!>ea=4j*xp@p9W zV=^259w{=BDSB0_GDkwK>q~kWkwTQTRkW_6K3bb{<6$E;XUZZ`L1L!7m?1SYBs)@f z4e9Y~pD?EZ<|GDy^@etgVJ0CaV)u#LD#s8ZWVAHKI*p!=#EdG>s0DRl`~W=`<}#wI zULDqh5&e)yH>fQC3T0x|c_igPx(;ClM3jc|pDV2ZbGE)K45`z*Qa?e&W zI>2j-@R|CKq|D30LJYl{)e8a5V)oXQeS<}(I5v(d;{p=25akvj`cbfYakPGz)yW{4 zgx&db0kM3~2`DTd%KxA@NWU4j*W^#pz!^N0X?ZP$D6$K4za7Ffl?(aDX%_{4I8K(D zp2X$y$J8~KaX)4;L(eh)PD5&qL*Jp2$?z;Fm#jhaq(W?Q0Dw%G)Ttr6A``hZl9$w( zbT(768X>!bQ5gH6Ev6$8$jKxv8;l0IkD<-tRHEchX#1&Duo#h4{5jDhbbeAk$V&cK zq=-xxMWfI{73Ru{Y9b7=#=(f^@kC6WLl561A*r+0A*rC>GExbNCFEz2keE3?<8Y06 zMrY1|Pa`jY7r-ItC9Pj%U?spc0NKRRzZB|z@!@Vb#iu%2O!71u| ziaMX7&L_rw0!K)(8Q$WG($ye;)!Ui=09)WaDUZoPVRs+?(&s2=-CtdiE7X%`&6OFW z;}LbS+&tlh{R|z6K5_?%zJt(U39bgL5+@A>E_On=7Y@Ez^ ztrixb*znXBRD1}7@-y_&^D`>$TOjIoQ{8UPQGA?&;FgtCEegaAap6`{x2$`Jk7qq4 z%MxLSnxhzATnCQbZYbmnwENd7dq~;uQbukOJOAv;vwJS-STjeRo|gcGzNB;9rCc^t zO|3syK385YA1~L+C(35|SxIA>D~MAM`-46C2Skdwr^;UmI2fM>`V|hox6Ayw79V8O zpmBvP;0CawZZq8;=k)tQD(8oJ6u@wEWR-a_;+`)4Rg7J~du e%4mb%bh@N`DZS3iDn>+0$R+_LLSGIoY5xyh(sFhH literal 0 HcmV?d00001 diff --git a/models/__pycache__/vit_model.cpython-36.pyc b/models/__pycache__/vit_model.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c39955e91164e333a79446e73f74e306d45f60fc GIT binary patch literal 8707 zcmZ`<+m9SqTCZDoS6}8n@hyo{PMlRi8b`@Y|IX}>%-*ZAkpFYf;9WkvZ9W#-R7`DaLpL?#rWhKjB7SF<&t2|d&YhHa<{ z%0^fjRP8F4D`9P5+9t|0ryexy2A7(4lfSKCE||9$P_BlHgC%=eRcT(iT9ha@Iq9~$V>0Xaz8=2b;nJ+d)qhPelJd_-bkez zCB0Y+^+>fVq9UqaD0Vg9kzEsY(Lmi4P0_-) zF6P8Mz74S;7V&M0C9#ZeORR`heCNay*!$_s{OFw>-mUu?4q)x<`H9sH-6XO6!7%g& zUX&)*%@21hHxkw@H%X~HpyRDu!b`f+-}Qv$M^?J$SwnZ|$%ci$n{kBWC8>YUv)=Pu z8TnD)!da~kqoM0dOnGPE_PvcX-r$0@LzfY{sUJs7V8`{65$4=+<^H;LJ@g)UzIDB0 zz1@>>vTnWaclTT=EPKz5`s>!aqiEw(H$rv9v){gX^P_d^%B3rp8aXv=u8U1uyRnq< z{lwby+VR3&y5#zQbG zw^UM1@B^1Eb#7@0vutU8ucejhfkDqtPcN1pg58o)k|#tQ0h!PAlTPc;=yA)Ge%jK|DJ{^En=Iai-<@nN?mWSE-5@#o9xa>C}#t zUucKgp*~T-lVYfK(r)XSksb`atR}pkI||dR*4>N!u9u8k8ylfJ*cGmWXYc24 z_P=k9oauH9byqz$5y-c{C-WyYQt4#x{6v3?luy5ac3 z3=N@z$HB%SFVL!dQq`` z0w2OB9J)j$db*BvC-eZxW5aT*Ya<^j6jw zghMA^I;$4n8kltt2spm57vHzi*b?#mDDUIdd!Czuq3MCz#xbAB25}5O0=+V*J5l$M z0#s;g$D_uz{GNCE|NZm7JOAZ>{monBGpRQK-sVJUt5bd$&tdFn;P6qv&rhPVi>tyF z6-^fA@HswRhV96E_V_S$CF0!0UWIU;qF>0r#P(hFlZ z&5Tj%hnb0%E4!GG={^Ru5-)WMsZFxQp(_*5*^hABL?me&S(V#lX8MVP7x$w~2a05> z^B0P=aNXZ-Z3;JauWXKkgH2Kl$tI}`D2ywc3Es#VcEbLt|ua7}CB-}t$y z9`5`w%~F5pAUlV|`r}z>l~f7Ti84`-G?+7m77>+xWuiER2I&J^9X&S^pbd!R= zNZnG7Fw?0_)SxCmM~_Llp#-{U_0`uD4YRm)NE)TDVQf7z#ay3e)w$x6CPjB0m+oaI!>XsutH>7XR-S@I;u9)mvp=`1$bvxIyDs?+Iiu#Q% zi7x$-8AB$_2S^DqI$gwPtwS{>7F2(w-8DYQ<*cpBv*>SYnbx_KsXhx|o?Sruw~cF| z4|NP3^X6+rj3`%?@$;oHI1Y42>NuTiVcd1YVvkEjKcBtOY)U8atWv`Na z?gG+t@Voq~^YZJj?2xrRlpZ62Cm#JaVXK-Mj6T>}2)Nk=!Y2USOV|Nt?Vm~#v&@`s zA#0Gza3Xi$C0Xm9pNw!xT$AT%U~5)?W|d?Z`mia0g`~B!nxE`25H%S{3NqXcRgzVq zw7p13y4K7Dwh@%v27X1*zR(9*Jszb+QIGn)%z&b}4Q&qh;da)DVn@h$h%s56FN`gl zyx1rBYhi>7`@ju-H^~-eYtCqx)%wE`wwaVm-{J*smG!Mmk3_!e7M3fl1ZOMX!4^y@ ze=x{eU2skc^d$cv)6zJr3hy3ls;rLf-jUu%{*LU>5o<}@O9$=)EQ1qddfH2}>iZtO z2(cS~YhYILl`^yMMog0y@KaCbkZd23;8-Tt{;5&GpfN^!7+}9yF+V|m0T1h;BF-+2)o5V?{q*U0bK);v`)jMRN ze!yn$tYW{oFDq7anH#_ZH3s0FVShzKpDWHmj4N#ha2B zsTrXcCDTc4_0Wr$<>fonL55CND^4Y^QfV>fYN;5q=c?+9UbmIlCfB(R08 z$=W4Zf3{&3MdmArfwB$nIj<*Y`~lqu#kf|k(tH7?JUz4J_S0sRXl&3Y-# zG?%5hm`mlkyq7HoV|(Y8T);8fi!#C|TOxLlVM^1BbWGKE|y_94bEzqM7{qA&f)>lr?bImaL*;7 zqXz&w(*u(lbKVsOdMXU9RZ#`R)m+3J6^es(_X~+9jym>hsRrkVV2FZl;A9Ba062u$ zMB>T=3#QD*P{vdx&zA0X7B0&BxmrSqg})qJ)`Z_p+gDHc*1Wu*k{aU`QM8~I-MpPV z!9nicmGlUg=`MkgFA8u&mx?a}S0ZcI%l$w2zsY@f2q$;|1H`({l#9xZWAK40PGcBw z*E98oUAsg6i(13Zn~p^Pz!p{L1VgnZ5v3o1Eq{T7Y1%IpG_ z_L2QAQbNHRr3y8Sf8*tnO-k51A<<-;F>`!_Le3l#z5yUs3;Yuc#FLOQHg{tB&7$e-u$jqtrZa;K5i;z*pZzG{x$TB7Vuly7m}J zd6S>;TIwa~o1Lj2uM2-rgvkEwtw#iU+wJ9C$bW(g>jDP%Oz*ns%##`HgDib7_5j-r z<}jkc;1&A}bq_t^VB04m6q!Li@&=Yb5pD|WCiy%q`4Y0nqX@&;uh5+=BU7{vp#2Pd z@?~uvUsD|~mMl=*Q^^4I_;fmE08&i(r5`DW49(>Cuqg#Yiol&97!OE~MOXoIt9VXw zrfXz+A=t^_k9HGnG}1f`hFGO}9@=ncy44jb7*Gok5#u_2c!u03bSb^J&(MPNr+W3Apaf_ z$OrO;{{iaX6)ZyjKSN3uktte3Yw+CyoAqUNe7d~dleR&5$z;Elm;5a%xQ6_K*?!$O zZPJ?L^Oj&v^3>V zKO~H(PW02t#6VJP0GkBYf1 z=D1>}WMClNW>K~0xicRLC)8e;9gGHffT4(Ams}|Skmiyc%daW3%8b#StckxyDog`% zp&IH_S`|(exmy;#2*1I-T6`Q!=0<6k*pU8KkZMRQ_x}jRjTi^m3ervw3%dZN909Zv z0jLhU0V*JWUSPsZ3rRY{Zyao^P7KV-S)hVa-Us&!Z2Ip)Kde63OU;QfsT^qtFxmK# zq;_adYGAxt&=3aw26zNJ$Hh*-s=V@v-G~O((+=k*bF@$F zju?J!Qk%@@D;|GM41qFN z6;NwU7I2Cdf@}uP(c*Ko_#7=hvk}w)45V6(U};t9X%Mg0HljjyqYsNVjrYAMDbN$_ zghy5$ktN7J5j4r~pj?9MqyNL7{55q`)?SqZDpUAM+LV1r8OfPEwsQXn)l47z!%W{r zoTLVuZh&7^@>s}HH-;VOAs9aJDVCldNSCI6PLt|!1Y0}wq2WkC(KBQt~roN@46H$X+$642d^j_|v~dgiji}0R>Mj=ha4K5lRVP9pM*kUT~ZYJ`h-vXZvomA5_R84I?X>OP+<|(sj&Y275 zIUdtqMNn=u7#_$kX&Q4;gF~Vmmdm4>3P&1klYbAwFFtwntWG@Ap-i5pxj#o{uW(4M z6Gfe=c?iS6K~L70X`rZ{eTs)7GzwMFL8ztp?+vm_#6F>n4)M6tW#+Z~I^QJeWAkk4 LeG_`KAFlmBwKrA5 literal 0 HcmV?d00001 diff --git a/models/__pycache__/vitdonly2_model.cpython-36.pyc b/models/__pycache__/vitdonly2_model.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fed8f18cc6aa837d393e57871adcd88024085777 GIT binary patch literal 9528 zcmZ`*`$)%vSi z5k}2y#hI6Cen32r$6po^w#A0TLik5+FJIl6>z~^+L*SqW=2V z_iyigZz;Z6tCiJ*;@&@BSCoHKrhW$U@8O9g5}^pyS1gs^nxzR%=)T@HEJIb0H~d^T zZ{<0k^9$XgRYab~lsaXr%(;qH;dixD>(s3o2-U) z+jWCDGH?H2*K`75-f^Os@?F~Gswv#4DZM>cm|kGU`>xq@daf*+_}vZzY%+?yhpu_o zb!6ZLZ4)~-e-QK>Ph!XiU8n7?#o-!f%w0MX--*32U;?|28}%{fo+IzCnm2v-vFn*P z*Uk4^GK^NukG$r-BZX=0J3)KZ{IDOaeeMJ(_IdRCw{L&4YHnWMyj)JHVR20?+T07J z3?D`2zUw^n`~wrzy1evz2fLwc?oW2jOEJTidHbO0yW2N+n9zP4_oAyC8*MM%@9(WQ z!|q0>9~?NrV54^s6NR-=|23T0TF-mDi6>n|Q_F6eF1m>4HlAn~=RQiIl+z924$7sU&0U6|tHaW13r5_a~oc;#5n%CBPf*TkhMnk(F+ zF;(;W)GTj|XUU6Iv39I7ox+jwg?6YN>O%!AzA<=<8L{QH`_hffut(e#nNiPedM&TX z{5WQ3z9M4XL3wQrwKg%ir6V_Zeg)~O+3d%wBN13NTi|ZgG#Ywh#0Nb$DG0aa^!+#~ zH1|WV=|+Q@wY5k4U{rg{_MHP)O1l64i%1^1UVA_01%R_mAGq*8|MYWYSGHF^Yk;OC z_K+nciLvkcy+L(t&3C$c!m+!4uZ?>A{`bb9KCa%fKk;@4v*R*|am{2PYG_FrnYJJ9 zIsV|{)KY*@yp*dZa=emN^Rvbj&8P>dz@XCFTF`WDPsCrVH+Js~F83ps#_(v~l_vlk zUVLD7on8;JKO*^gW7Yf|8_e+rlU(4)c9i6rxR;o>D=XL-ZOe&cH%QMd8sx65zPbAL z05aU|_2a3z!ocJsHu->~!5i7if~MaGJlT%#+kxB0vLB*E(r$OXz}q809{h3z>lVD; z2y66RNIyWyWOz=os9}E40yg#CRbU*`WV9HQoB&7&0F-9r5Nd+gTHJ3u7gp&8+42br z2`#bdF-8&xtef|I7b?VzTsMW-Xafh{+{an3_kdM^&u%DP;BM@AezZQwW4CU6Cn~!iRpS=rU`F-H=;NpJx$c#f%gpY!>jc0GUP7JZ23u+h>c9UhCe|iaY%b;!t&8G@5 zs-aDK4GQTw@3#N@xBl_MkN)fLZVZ-Vw+r-7iPBO>{4l7YZ@+8vRv;D6p|FRe!VzUv zCdQ=~I9OO)Yx!Q!b|3V;hmQ0d?0xY0`vC|xU6Y#DrkhMBG94j25^TperQ7cN7&Tz< zUN=98FGVKb@!r8ia`*bN8H6!uH91n=*1!= zJS2cjMbcuNosgl%@O#;N=F?kL-k05?ZwKj4XC!Yq1u7kyKl(Np<*Z z$F+Yc>SCs?p>HK9ih7$yRk-C>7+aOULcY?eiP<)6FI9=_of*{6;+exUk7ohTBAzo< z?5wkd=PaIOJm)a>Ja$_J%>|5n4s|s0c|6}VBJEv5@8@X+UuiPHs29+i=DL9AMLZ^+ zi+Cn8rQ^yN_ageg1bdJE=8yHx%b@cLpTw*2Yn@A}DnHhy>Q~}M=XH#HBVO&SL8+fT zrW05Poej)!`AD7Wd!>ytwl^QIfNEh&`SQXc&aJcAsl{(%m*bvvPG6}H{^e68eyj6# z=ba;EipIOB`%dS(7`M>5dPFDoJv`U&{6q=W$?x+xwCEPuh_QNulZrq@uTa=jK+Pft|mEXW&2j5Zd*Av z2@>^=r$7>6rxd)vCfHAk`<}>hVL#@awFnro_n>}KpuX~s{pNSx+9hwLCtU_CS3Lb8 z=>at{2q|eJzD#m^&_)rI!<$$yCyiT@BtI#RmYS4Fz1e}&b)%%7*&m~WuAQ()lIlY* z>f^Gwvm{#yP&{P{B)O>PdoZ#9>tujnQ_(KNX@%i1n}wU96_Y&d4>u5!?kFh&p$Q7N zfx-a{38=QDgcHcJdeCkq26Wyw3=?=~+etYHZ6U)R`XnVjcQ*CXY@0y5iXIM39mn^a zD4Cfq+5KKpX!rV9W;CAq4o`4rT;59bK%}#7VYnagaZ#F z(c&=43-=*RO!hL91}6B>|?h&ABfBLjqDTwttHq3M7Tu^tcG&P6F~0A4$>}c*OB!I4x=cCNrs|{&_OC- zX+Xe5d}Cw?vne_%z>Y$jD?G63s_8xkx-?@~j3>y5j|aN}*+bIog^~j{kXfBNG(4DBoA={O_L8W@;K#@nv zLcZrT@6)CyE!*58ZxRKU5|5G&CHE+~PYJKbr(74ww9wv>J*vKi1YYa|YBk8NwaR;u z4LjckAf6J+Gud9QVQv&#{gvR=s z^fqay6B3z84M`bEB&<{9^p26$=SX7gJnCRyW_9{l{?bv9TOL&!k1BU6GRBy4rz-UO z8s}@{);iOiLHo>@W@${5#iK|Z~c!2~-6cq(mL&&mJKOMzxZ zc+I$R^+dRVr}tt~aXcfkCRC`C){}GGO+DT*J;G=@OQ7W?0iI}5_7X5AF!$WlB}Paf zdgSyF7wE!}F>f;EtZ-`rL{Qst2z&BoqTaF!H_1BOkF3J2G_%tvq()k4Jd6AkS0g{8 zgz<@CsKSwzbj%1Baw&-YKJ~mfPF|uM1@!+6PefrbB@fk%zrpe;W*GzFSh`8q(!(sW ziCaurM83vvO}-XGs*xw(FAtpueS540Sq;2JVyL2QrM<%sV|*JRy$bmy1R%e#B3qyV zj}H3SfKN!CBaI^;q`(YUN3JethFoWc^n0emN&ABRzEWj1o_+kR z^-+jL7hX5>vwnT!DM9cKybG(`25`mUeK&|2=TZd1XC8&s73-fwZ#wZ5CmH0BS#!@s ziC})h+Q(M}u&}jE%{>=RPAE^rk`jYj27M>uhPx%^68B=nN5{Hs6kR%IT3iPD@ zY&-q7@{Hwhm3VjIhkSR>zxw zGtYk`2whYc^JR6gbV@d|>l;f(0kr7QSu&m=BU0`IM=+-mPXRIkkkER}am2w~!(5kV zPNcfRFh^D@X``VApIosKvFPNPKS@UxhIyf%Xh&qCLw(9;6=CoQmL#$)2P?N7zloq7 zg1Yc7CIMu!T`JG)~RYE)C;sb)R`I|cG5=#MBT|*ik;@Z{0eQURut6B z=(6ZdY_c=U)jtSS`3WQH0_VjPk< z4Sl4ce~E{K35sUai-^S4i)C#I*Sw_9>w_0gUG+(uZhYf}gX0?~jfNXg7;)I|K(y@+ zY6v$7C)5s9V1_#RAyg$H8*2H6@hGrp^740)r@JM5RF1SS zK*I(y+V!9+j7f<{%*cP|cS_}NIiFIYdZI#zIhRqH2Nm*I2xsTPNBUyQrG#S(S=k7v zL_O*nTEVya&^Sx`E>dcfud~Qpw|bK6NQ;lc)f{Fg?rXaNg|kEXA2>+1V3J=VJR8=ly5f#b!d5H)S{9sB7zN9@7ipV%V>x>Hdd+3#cl>9M`NEwtlnqmh( zCsKbw$tk2F+wec}WQHD`V|`IIwY;{>uC$3a!pLx(XVq_I+SJ`>5~I>@0S`eH!_?2f zvC1Vi{XN#WuQc!-7b!yNjhZ3Sk>e*nnkAMeLSsKVTlhLEL&?}LTI z;;;ZfDt5~0_tqm)vQ#die!8w4SHQ}pA$@Vp9qWgcApu;Coatc&%Dw`an>|wARSxqH zc0N_msy~N(V9>n0#yMICRPGQp)yIE5wUVD^D*-z{J2OJtxnu3HHmuQ_u)3773&VOk z;~_Z?MrQ$(Tgc5nyC3{h08yA7&eBdnflQ9ta1ME1^Wpq(W=MjRhak-itHT-WqWa)p zv5PA2qRP9d@{SD_e-dh=Gq*Ei%MwTzR- z;*_TSBWhw9IputiASC+0>m~Xg0$T+b*-*u98pxV!hA^yMe6ZtBbYsd1B(b1YswLU5{MLw#b z!KtyrQel;+x|Ao42qV+bc5D@>5(NvMcVt7MWrE1^WX)x*6A8C!G%{TujZZzV$ePOt zPWR-&qn!YGNX+qV37a@2r+_Zvg~VFmVCZ@ftdERj$CoX%B;~0JipN?@+!XIp^VA=l! i$vhFWDWL^^)9#Yuwe&czljucc8ja#26wu@diT3|(vpfv| literal 0 HcmV?d00001 diff --git a/models/__pycache__/vitdonly_model.cpython-36.pyc b/models/__pycache__/vitdonly_model.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d3913142d63a76aaec35e7b97bb2560c1cbb1ff GIT binary patch literal 9373 zcmZ`<+m9SqTCZE*yJvbX9$%6;`agCWRtApwb%BwzTPt|Lsd{V z{9G?@<++^m3%#OMM49H4x@D`(rHWPIZ?#+NE?A2w=lyzb$y!#G8;U51;^&Gey2hby ztqAo4rBQl8p8$S%?y$I#%9Vc$>-M;qjtuUf`IhIZk zwL{s9T0~*|H#yIb(cP;YKa#?YhGB0yExo&A!ujW!c2vwJ^XLqu6`k znzvj>242uHabWYqpznARQ*QR0j=LF$n_Mt==tg`e_QHS(>^N>Tz??gdytiRq@!dzR zXI^QV@3v(aZJ6(StvyEy)7o=_&W8EkAlUra2~ZvI?02tSySrhYKYRXcIi-fpwXkV( zHO;GFc*6-}^gtE0aJvDE|4BO_l{g&_ExN@5b?Zt6Fy0Ep?@#4L~ zZnG8kwz`90-wB3W{r#9IY>vlo;>I@n-lOwK=_XoQ_R#dsMWky;(GKo?q;$cFu}VBE zjK|Tq%8Orr%T9#s?n-j~#HR6a^dkop( zT^+?fkBQ?mb5G2Sd~Rms^BDPpI4NEfFLZQaic^ns8I6~4vX{jxkM*qnDpo%&&dktU z=MjyWme*!h`OajOyx0(%k5#5qI8;8-4zvS(q=3V>hHo$botk z?X{R6C+y5mM65e3Z*HR1CMLIZl!ni*Bi}GvgLq>s0vl!<+>Mq-Lr;u&zwagm;kKQD zA18&@Ug)*lXt=n!`EU=6YH!=Vv+qht?>~4E*+bXs?8UqRaF*!<7XIg-{v5^i8|y!6 zfTkq&kR@b^vFG~zVRduUcY3?Rv3q{MgLeG=@8#jbq+KBdlRAiT%XA`IXiFKH zjvwwi{_xbyRsc}El?x_Hyps*{M~xYpQ6Ex)Nu|xrpyk@0h`&}Z?_3|A9Yik8;n{pD zPXIW)c;D}U-4tS@EnIkO4|m<{1FHa^y->Qq-PrN`s5#8zv~GMo zDT62Mbm=6YeG6duJ>c>1)L!_|j6+j|4}-LiS8uyc46&dGY8X>?lV#k0`Uv#Opl%1P zCkimCp-o2(3+X-YbpGo%{^8_L|MPDz50Axe59pr~rLB(nVOYc1LC@x+Kq{U?Wfxb4 zE6SQo%u63|xU#v~_PxIC-XC}m9O*eY`|#s;0}yPvCUtGjc9~9OIzo6P*p6>Xw=?iD zYslc;G;hXdB9q^EcYi9myMx#a!kDz09Bb}=82WbP4c#Zd!e679R6*7Ta}G!YCOWt? z(LI!MZ9jD4#2Cb$pA^x2eiu`dYUIXt1^`h~?>jPb?Rx>PoQOmXBgu1{Op0D)8c}Tq#kN8yfRfMa-G~ru{FWQOb5Vi>!~>d zRRi_Y&5O#Rx~&{yrkxw9-GclvdWw`AN>>-vj{26OVHUUc$@J-H7+VU8qSm2VCGPnW zbIbBeluO-;SnR-%Qk58n9@-a>7Ln>mOGwK|D^={YyNYxKX$|Qp<{rcDtDw1rna9ya zGfyCW(~NX9R<}>k4!+Rjo0#<+M$=l)BfWrh66r;x=}PImGUmO2@#dk@J%#x%@eRBj zztVkG)XAWGtj)Baj?Z-0F{2T`)`e;mtB>jA8<@Wd+Ra0CX6#l6YTQ2i2;;lCZROLG z2e_~9*={91hto|)(*1m)-v8<&B|hJMz5B-8KE8{#H@k0P-ctANL%ON&AzeUvr>%)2 zkLevRPR6`EGv@oNG`m)qGk#P-9|K_2=4s z8>&2t{)U!l&9jN>vB;*`OKAVPe9`w{4?sO#f|H^|L^-bvUz?X2+lKCmZM%8V4_l5O zT|!|z?Q3--$w8yqR}=Mym1A|DsMoDkh_}59wU7enxi{_C-+p6OrTS7*IAhU=hG8x{;KF&=xZ6V@y)wOJf@=Ep`Y!s~F+H#&CSkiIT7wCcy?tn=hnF-N1)$Jj4Q(FfnOYn zmj=N|p6A)bAs9rT^GSurVn@=xVU?%0X0kZ#NKagzG(=?lMy}sxez7Vu{PL85$TzSe zrRZ&Bidr@5YE?Y}%~IF$YE>(uwxC~7n<$-Cm$hSRQCrZ~)Vx+zPim{$E81)7f;L?H zW_g;^Idn6ubN&tmU*tSCYTLw+uwAWNp=%a;{sGzlayQi_j;8kITSET5W~%RnCt9B@IT&HPnwn zuPEI$Mmrs7mvaKh^TZ+9fvq^U*1%X4R4~afGvTVIa+NyZF2V_8!;?+FaRqh@`ds0G zLpMzK5m2KQyJE6HMtm~a4aokGWbL&8B(PZ8>D>vvupi4JR`;bs%^$@5+(x zgfdAkN$3z}wiBmzrzazLKCUZZ(>){~zE9DHhj6IPDD1%oCxA#T@u)eWl$pi=&5-Tm zJ7gGtz}EKl=2-ar{yO}N)FGfa2CV;Q7ETvkZ*cFFK3jt9pxp*$l6=V5ku{DdB{J=J zQ}R`6K8GyPNt$)v4Vd%gP3ky99fj;v@&eTsGKYmcK$h)Yr*)4GJ?*){J@O(^_>3|^ znM>KPQP!r6_d{Nv+(R}itJmc{YW_Ylc#S^w8e~UW<=x1JU2OvgpOVGoI1PZcO85-x zmX%WWN|+z5AyW!v^(3soI;=sG%_{QO)Zv+V`TJHn6Aeq6mXyw%KxRTiLPi1!a}XuH zYh>*?5*Ry=wtN>Jebze`zjPMFmfK5{S>yCWSTchD}o)L8 zbT&{8?l~)T^Z-g{dSJX`&U?Z@PmV!0FY*Apf`d4$LSezyy-fCrt)`AO)!=gxAW;kw zJPi@+0g(`5i5$6a!VcQ(%aE$%TGIv30>|W%E=<70#9s%wcrjSF+mJumOYq}O;w z6iw(&Cv7KJw3qsq6MBTubeF)&GXgBpqT(4KOknQ1sRIfJJbLK#;f42Ly_i>+a#p!I zg(2wbID`dwB~h=$r zjcDYlNk+!2>>>L(Qba)~B@exff5W5myfQ(-iENXiohaM$c*K-L+|5%nySd&=ti2)EJfX&b5V)e()Jwvn?4X~FtUlxUaP{zNX@;W3c|_ZNq^-X8=M z+L@@rcfdESw-B?~MO4Xb4+3VW$h`Os0(*%$>0<0g@ulWCo}mk`mjwxbdHD$e@NKvZ zR=ES%io<(u5H*gc`13vVv94m}ljtoco?#?|94Kq)nUD-jPFVN|*@J_vW7OSug^g3p z1#S|9dgN6sfg=1f)}8V+Eg4ew7GQjxWTbDJ;4bVVf zk#%5kY|u{+ldk&|>l2#)1yZz*OwpQ9bH@+_S=JWtCD_dKzcGNWs;l|3Iy^Ej8rkbj z1S1bp6Q%bPR8nSO1|)U@SOOZM?wIX}dAWsIFVE~q%v2Z|q#Q{NjZ}EeX(S`hOiFVK zqa0bUGyRAebSc_VJ_;e?87xF#Gh?`Z&GB0Zjv;soXJQ(tB@0f>x;fW(CXCalrf5^m zya6`<R-_wD#ohp)b{hHjOfRt`k^`ss5XK=;MZ|`<{(}!_>&9O79Oa z->S_$uvOy;Vpeon&WqtVk{EN<; zZIg#3w3|FEsV!W#!q3XC#XUn_M+>0?J)qi!p3ATW&q|NQtr|TIdJFtS#UL+$Om>mm zO3<%HB)D9rvivQS=@|(Hl|$_l(6E7j_Cin<#L8|Tu!M_J5eEAT*{~{ zg9_W%P2Rc$uH5sS>s45tYjfgd{MA%%38#L@((E^CbJeY?nFPwe-@aT zra_?E+z%efcz6~@N{#_-Q^By=eI?bMn&hv7dQxF(Y3wPfawVH789GVdGV)*_6ii7+ zBJz;2eKr^ka)_ZBKFQyqou@3zJWip4uZYy2QuY)`kx}@sNSTEPpIBd2O)U>EnLKC{ zU&L$SFW1#?W8~D!Xc6<$-wILz=^9e>XJF)*Su#_98mm72fKUc8Sa}}b{6r<@M>&j3 z`wGmWe=pR7{G+|N2ql<1)DTm&@H1TDpg1Z3j|$y#`m^RCsn^n|h_>QfTREDd^RoLpcCG9?(dmyF~kdvK^tN`sgob zcJfuW6R1cF!w95MP=M>wmj8?Y~^<)m89)uDs%)e#VbV3##RAD(_n8+{8x z-EOEmjRlJ5Qqb16ld?^L!vQYcPO7$jAMxC*r(oM6Y*BL*!+B}JpVI?%(42fg zrQf8CyaD!dB`HnzNF^O@=D2)tb{z;rDo7kOC>9M>Q>!l)&lXpTCyJHg3q`Z|yreOW zRm4>Xz5c%Z8Ik%7SyJYxHV2Q>z_h}_VynnM3*v`A4su#0G76iN$>TKlLuA$pM?RZD z&>UOGjxQaiI@3VGS8I)jBDxJ=ZzA-S{l^5}5b^g1t*FmUVoS47lq@+kO^J)hNT4T-HI}T3?x0Ocy zAy%&JJXDc3)VOrj3EeOEgxf)(7`V~Bo;(PVuikc|W_#z_8@GBPl?#1GMxH~>%8_(} zaK9(JVUq@%{AGu;4~okNgF{g*-8ICz(1r1VdZb!8krVk36e}+ZqKJ1vltdZtA}>?1 zN?33m<+7L)^LSUpf>^}6Dwf1D-ZilzR`ITjHEicZy!6hSyEi+%J*RW4*K|6(7w3I! z&)jW$q1o&>VQ6~Ye#h;)K@^(TzPf8VfiQ14VMO^ZZGOWPZrGIGo-0f*Fr&6>_MN^f z%O?J=^#bfTjJ&(9dCPTV-~}xc+c&=&^c_#4%bQ)No`GwVA%_Gc8Zdv^qEhi__vW4|3@kW|R|W#MuXWTK+7` z&&-rR_el9!lwX>uKZkN-ru;ls|AN?}GjY?XQOqPXxt<#iN}l&@=J zZLE(JNX5&;mziz%z1Bdwq1o#bFNS8=cbne6*JSR^n4do=G5WB)xe2OGB4O#s4WBxP zbi-^8qK%2bY?%9y1yC9dJvO4lz8e>WyYCD-QCw)YdtTEGht)}j@|9_TM4K9 z?`wn4Aa~F8TJ4Be07f)BP%r=U58p@j+|6@eYhX}WLMvHD5*ux|(;v=fgQ1oEj<-9k zXO!z)o=%4r!?~=s#d=|QW@b)kG@kRKi5yS4VScSKGe_8mv^bq%X>&7Zy0#~xPt@0T zZw${5LYMk*Z(hD9pisQ%(Cj+>K6Jul$oCo>=6l#lj&~C00!OyOIM>7%h_ehrcUR!He-p62xqR>yobJZx$&g3}_!S4YZ7 zJ<@;*Q-dpVt=velQ)AnLL9q1%V!;F&e$LN}%8`0qIYLi6H&XqA{03@@lp2b!i)u@K zRngFkseOWDEe&l;K~dCN)T_ib-$UQB{2uZpzar*Z0C%bqRs9<1bv$!;=J71xS;Vtc z#Y+8UJS%us@vNcmI#yrBm<#B60yOG*63>%*B&{*KeUeu2ktSb3uTyAEb3KLUX*{R# zdh_jA{k_fL;;UjBK%61|AsWi9D^K2q=e;k!z7!GFnr`Pe!> z585mKtLV4jf8mHu>X-0b#Ph{{O{_ei?|3O|^Vyj;Us|Tt$6H>;IIsB?q?c&ctaO=5 zf*``$j#av8rv}w3U$>{JwMGQa;Wnf?*X>s~G3G*_AlYO|GWpt4-1h^hfHC z5aHW+!eu0MH1Ai(YD8?RepkC=yq!pILzPdXzM;k1*7;cVSfrEWbD)1xzSQx6Ghhra z-=M*0fWwzn>^2#1%EnRNa{-H3Cr;`Y^8y&0?5WA%o$43W3@V1^PE z`{FD1OJ8_-mt32^bXnE7;^DWjCy)#>3$)4(^w}P)QV8=vqR&a=bxBeg7bi=I%Vg&4 z!0EbST)pds12Ppw!pSy^iTpWMf}XR&YI@|MIQd z*FAEy02&;)7_k=h;=FM00yVPx$#@R1((88PY7@*BxeP};aS2yg=J8guVT! z>)gi@u)A1~_QN>;6_>siaV5VC;BWF@<6_GRcpMtki(CoohdOIog6)+#8aKzDPpkz& zoWJ!V_pW4pZoL@i$OLp_)PGmvaJ0b>{Hw0-;VYVjBA}32d4O z93;|}gacfJIwo+J0q3LuAcHzrc;M3w)4dPf(TrS?O^^7Dbnos5KIBC2cjbh z-7>GbO@|x|#?&5y0tB8Dw}m_NsvPNNBa{4+oDKo*ohU)Oo($nwxvl{AyGId)KE*!n z!M!)bUKhxp)I@^+!>w$j6p%Zeu*azUHo@_?8ROsBnuuVhe-2(v;)YOE1wO}P6DO0N zYjEv!d`66ni+7+oNlN6aND#Uy5d`E#$#X>hJd#)^nbv`vng8XNsp5I6D5SfR#PV@H zb(08qvxM(C%>&x>wB{z)$k%BEK}nkukCI=Z#HWPUb3nNcl39tpA$vr=f&@-!pK1-l zn^t)*w1K~EXv0S&GCn~KfNDuglS?uEN@Mg7SCJ@%vU-}4VG$^?ieFKEUL9^eNeVx8 zw9MX;-6aEbOe|B;AyFf-1UN(v5s{RhBeAjbpyhqU4bs|)5GK7Kyi6};y~=(?M(9)a zt3p4}IA6pbQh>gTdCOPOF(Ul#1NFXd}VErStbXZMDj$2J=CGQulfAkL8Fw?LEI zE$ImPHPB^1l_luYUP2t8Y~|VFUYB>N(g-8BFT2#3>}~S(`E%c=;yr4UbhrrwLSOaK zZ@4s3#Q+Br<^B=cg{P4yTG6O$Wgz^rRyOK*FH@d>%j)pCCw;{yjCV|}60+g%_YN{R z8Gw4B9w3@CQb(Fx6bU6+(gS&?;O_GCPezg4Igd zERsHre3uf|RIHFH97##~Of*C;QD@(xmgJ)*2?1Jp2gzUH2`O5t+_5 zCg)^&GxK{MpEBVS`7FOR`7A{Uk%#N&=V1b2ejjK-O0mhvl8d*WwC=qX;R=GxEF_t9 z0XYg4xsNg6iorY^aOTM6qkc7_FxSc5xm2846%+q#J0@+LBLEEnwOp+w3nHiE=6t_ zUEZ3+L3H7D(*WnczxI$+@eVr@Ehw<4ci;wL<3yr~zJxlqU2KeEz3D_V%w&*bWi31w z$^j4s?#G1!xY$~!>b@&%?BZC!CpM@?lB0wyyf?O`vO!aBQxa10MM{31l3zm-FLfy{ zlQ@1?(#0MlUVVg^_-pI33 z&4WUL68bIUH$3$u@kqa6lTMrd7V(f!u@Zfrgo6U%DqTZB1=5?aC%vIYb@-@bW1Ji3 zBQ?@|eUv)_D~}5!{NPFF4s`j;ks-izFo3ELW9jF?P*k5kGRDQ0GA`Z!X;cs@DEg5O zfB%Q$^8JsGQ9c@1M&)sJR37PT%BXOl%8!%0(AJb7KPu3BTpLx!^-(2j^RpzM)z6KJ zxY<)a)JD}&4QYKe_o0?h_Kg$Dcz!fbYZFDROdrojMZYwf$F&PC*dCxff{>5qX&-mK z^{#R!K-{5-m_P|F3JPR?TwhYKFW2n!0$0+9igRG*mYWEH z(RH1GA(s3t8fcMu%BuCG*Ybi1`_+zxXssfO9C?)AMjxxhVn*TESiKgjZ%+sa`dW3| zQ;>pgwrvUs$PHS6>RWjavDy8|DtI<#YDZSyPYHfXC}lm_o5bzLBln0s`;h2g)}Tp?9Jhc-WGzU$mRTzk zUaXArLO;@cIAtRh!lKw@9l+!)KvWOgajc%~0kx^GDI&)`7$bu5^22l2oK6$bXWVtb z7oXm%kn1C6qXg_cqzrdsH`BHNiv;oFYU)_CCoR8Aq}s8dJVh8Hr`eVSL8FjQnHtjBCeqq*db%nI4@3qa=d@)`F!h zCUBdSPzB7IPFq7sWRbZ?7*G=UeTXMKiA0$zg4OE!yuPS|#k8`%3XQso((vq)Snas` zl<^u}Rb;$2!5PeH?BX97vV*TlT%r1;f{6ce%wsB?eyH83;1^NW!9hT+P&3q`#2iFc zxeRp$4&nY9@dL)GFke)IJXES;69Q5F8W6A%{x;|9l$Sq1o_L4R#*y}Qj9~*=*o#3` zvDEuHzmrk^jq^MTFbIuL<50L@B^?K$0-KNnA(hpHQ^zDngjUwlay}qlrWT7Z zATS@uRYy6f_dFiOra;3wA?@MDalR%vJ-ON8`z_+{mJ4-A5wQQHZ%2Q#Og_Won*1SE zQUV~s2A!BbizY#GnC0c4P_vII`83|P>Zzch4@Y|I%*0s~IIgAjNzgZ*G@rzN*MXK2 zd9^txMesXuvB{*z6&@|^TjfI9Q!?}+%*!1*6(h~8+RJzi}Ajsc9Q-E;x&VG-& z{eY4`MPel)!BR${KmyeAFDZE@OBik6Lva~UASAk?A?5|G3&DWjW}>vtak8^`ld#ma zdG!~INfJxQ5XU;kBvE0N{4-1wF)OE#6{;4@pfd~M#zkI033fV#v3WuqX&q1mSo^0! z9p~@2qatud?nncouyC1Q7#Bwch)&TjC;v)uL~54GCD3PS<){KyERD*z=gGnHLrr7G z8b!)Rxa+~A`SVB0tI9ZkXXjlo>_|U_d|=SLXleKhv)7k!cxkkVeJvf^*D}f_&}a9$!h2mFt%x%3m7;vfP7&oBttLAiuZ`BmRot~)#BQs+ z+bZw2$|qy^g!GbqE-Wki8pO7;liC%q627OxSZG!zz1KBG*@d6?o2ZS8$z7o*;>yfT zFQcIZ5}llZ+`mubkzIN;B=Ki7ls@$OvA&0ZcL4|rnA}YQ+zZVfJZ%?9r|Z!bZFDGU zi3xBj0ZxU$G(h@fvE-lANVs}{i1Nj>{2ikHE>SDVsO(H908K~qom~L=S7^{!Pk_80 z$|l0-tw??kRBJx*4nRy{*o1->DTSraD$g`2Pb+a}L|t0|SwutuXsj37uL<%ei<8C8O%|8xp){-%KB`!L zgyp?Visf}=6!v}K+AmU;EGUe*oxHU@kf17L8~fV-dPTG;2+T-hjV zhB4fDqHX>ZJ-gFTcN=qb*F+aewjGyky3`nSC||X0K=n>qQ?PB(YZ5t(@a3C`th9SV z57-onC$+Uo&Gtbb|DfQ>*l_T#aPntZU0iOq-R42RM?Q_-4O=tELnjlH zbwboIy|WqRxRa^F&d7g79zgaV>Gd>j#RlE}VIrymq`1s?lDzWdT2bMvKC8$s z4F0i$uc)jN`R!Yj$W!Ph-=n^CXXaWhSQNQk3B=UKmIO8qccBKCYD#;kiZ%{(}<$cbaO~yKL@lwLD cmr2L6ZJtEIs}P|iPr}3{y2<*c@=c}t{~&DtD*ylh literal 0 HcmV?d00001 diff --git a/models/__pycache__/vitlocalgloballocal_model.cpython-36.pyc b/models/__pycache__/vitlocalgloballocal_model.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6b59ad752c2b506c4429eceb2f549908b0ef484 GIT binary patch literal 10505 zcma)CS#KOycCM{gHk+#`YO!3hJ(gPLqQzS@8p)Dq3N|!^V$)l}PNCShx>?2Q>h`T_ zN$fP^0JJd)5GEcc86W|&FGzrSm>>fr2m&NOArE;73J8$pArLY^^6ZC9zH_R&*pw_J zP25{|U(fQLb9rTQvik179^3k-bw&AiW$b4l{RTqxLqtLmYM@vue>F=Jn$QEiV_1f& zAZ-MNPSGlIx)78)Wvh%d^{Mz(tIDYfYl6QutH$3+zwS?2)102MX81en&slRw6@&TC zG3!`1#)4nB>L@J*$2*JG301kHh_a}Bu84|j?CaJ^F)1cQ?Q_*SCDeD7M*R_1tZY0| z5jWJNa?6R_?{4gOV7?dj98aRlTOFtEF2~(vPMDi?NP!c3-H->^blj+qKKC5C zvt-^3+=s4b-dr)?Y{_o4WWM7yw;d@=YugFiOXl1CaQP!AM7Gbp-@JYM{U!7A#mg6~ z>1bG76N@&tx>9x@MCP{Z-1mZA6Qgx_>GyUwyRx}`xNBaD*=?D(cbkEG=jH|vv>nI2 z=<4cf+l#mRTPw|OXVveAyG}S*?d`@i!t$v7GEQu{=RLfPkS?OBWp__kUq-l%5J{Z- zQ1L(rmzhjk zUb`>d$n5rr_98RtxlOO-HJMs-V&@Aa!X8wYmr-gH3s^c*gVW~`FPY7Lyfk8vC9?(E zM@gfhCq}&6bCZ&ATTVZSlTvfL>owhIP+MLOoX(bT>`u^YrI zgl7hjDgX7izm4SiJLf-aU{F~>D_KR97~5{p8%*bep_TohZiC2O2NoDMApQ+aKh z<-p+V*qjh!Jm*ytDV}o4{ID@LN7Ms%IKiN@yc{-N+Y|BE>W$6y!Nq>$QXlTk%l8BX zh!^jg9jDiW3>XdhQDe#c2sh$|XV;a{pm1sF z%F-(X@N%cukH_ZfhUSB9$OJQM_)@mAuo?6rrfeq&?9gpv+4qqnuD3g0=xveI9{k;T ztXuGUBdjrSAq60uOcva!Olp{KwIHMV?h<4jkI7OaAM#L;bRh_(895|9K~*hom(PV& zx?#3_5{V==vFTh^(p9gR_W~Ds#*ADy6}8bS4!pUIvtH>zW1!>fdJlPvvW?tkk2``(7N-jiz#e zlPv54kX~{Ujl9;b4gTvuy#-$A+L_=yi8Vx*5h=#3Hme@~Eh1f1t7=(2sZM8qRLf;r z4S$0xPXi?@PoFPWraGpaL||ol6%-dMzB*Kf>b?fhm#I`yXcvZxovG6{RDi9gND4~M z@C$xXOzf*`%07D9g`w(~81j?xA zNrZ3ek+#O{_DNd7mzsPPy-uMu&2<{#8H6(k&mtVol=iEl-!o`$?koOT^nZ>|;Q9Ck z|63^-IMBw*UyRTB=h34PU+`Z7ESNu_oiCyPGR9unSI63}wgDjQix1J>FRUq_oY}*9 z`4|0(_!4%Px1{s=QvLJ?A1Lu<|BCy#4&3XX4Bl{2|GIiD@PI6!{;sdlU^H@38NBjT2C{9ax!AT>t}!b|*O3^F z@U^;>6rjBATZwunQP&c6-I@pg+gs2+sl2`Ps(t14mp93b=}DL6jw>GhBB=p2F-YLi zQUeILEhwW1y5R~|#8Kl7NvxTaM@vYmq}}Y$>9|o+yYEGP(i@bMXQ^RrT-7Iqs26yE zOAx;VFp`oNZL-*#U_q4)!`0AwNfG$M4TYp*PRfv^B%1C(T801$P+du-+mExP9=2PF z0iAXSzyo&KounFeZ6Uiov`H#_U<}&QWSfL-4J{l%87J_ZD4811+5KKpYWMnBW|U98 z&J(QX`L#q3MLO#mrpo}0y_T+EEt&rA-Fs^u*;Bv@4lId8i@Qltxc31T+0T_>N1rMWo5!5{R5^)uh~Z zLLP?(^11HhKFe%=>#Jwl-K6fuA1yTi_#JDM(4(P>^xLXVu z$yc~HQ3x8*=W;T^ZLuP0-?6HPKscE?tVnlU%?lzz(~%prm|m=jF?xB7PvmQu@ft$( zIwD1_6{gjidJ?*3S}UqGt&ChWCJiLt-uTV*`_N1K+DV0FqAK*E_JwwW(UmDAo zkT358cX7Xp{1LuY$jFJTtTvV|_>=NSF#Ob8x8(e!hK4VBm${P@$#47StMDW#A2o&<7 zhjR7IL?S58Y`*0*cWBdxHFvm1-k=eh6p;}s zw<+47$fJnY<5Ow}(Ky?#%aDrSKm>!dOSJ|eP^-EX*+AqrMB-y?nVg^oz_}!QBfReX4#<=sOyxC-d4mk2!_B+Q~v5B#E(#qr5U^u40Qu{MLtnb zIvB45IR&RqXjR`NPxqWqy3JAY{x$#KHXTy5~48IA`S8YSm#oZk@1YS*D5}rsV#=J_6XHrQHU4F4d;+yz0F6vt*qppI+VPR9)WXcF zz<#WE%OiV&kwChlFL?>+q1jD%7=MZ8dW_RZkNhPYkuo}tNnG|rBZ8ueiTbML4 zI<6Crjti;{ryRjBa@tlUzoo<=9VuTzd#eN?PPYDI2bN5uV-r3?*j_2H`bV^m$W5w1 zy%}m9X2YU7Xgu|N9`B!vEHYf_&~76{0ueS3F~O`)tcs(6{=zXZ+tX2T39~+qh#<^R zh0`MyL&YJEH!mrpH5(GgA;P!~Ijbr-LSu0L_Gm{Y%)UdxnWyIQgqNW&w|=;L>(=j0LE zw<^ZC`glkfC7#h z^QpulD4M$hK#NQ-OjYvtsH&b(8>-kPS7$oO-F3-iR-bz3l2P-KlWy^l_1JMhm?*V? z&SCUqf=A2x);N>(l&n+;$gr5lI>?QcwuH|oBjBtt3gjM|jwtunsN0`V^rsX(CVvTO z{}dr2r=Oz1{a{?si;y7Xsxa}LCRwaatN#z0P0hJn@GfG&5(3MUp8x{IOotg*fUH1@ z&6pfY=n%c2R0c!(UMhS^@{CGI=D=0Xj`8BqK!0jeWT27O!Epf^{x_lT6(4TLO`5cmgz78CFGAln(U035F<>{obp`bN)OW?RCsNHJlPkybvEB zP9i^oM#M4sISeUh_=}=)P}n2b1~(02oX3%xnTJ01Iz61(*Ra=*vDewXx#2AKHFsoR z^T=0FKEBsuyw~~RG5k9R?UkG|=}zI487`zd-8()!zE^{jyoKG?c(*m)ZH-UH@Jo;_ z={{%Xm6iq$t!=<(E5C>PZzJ%Jb>$;6q1f4g>MxLHj0H?iO;diy zh*!tV#y>I6^ zRmYlC4=dr)r>^bTDp4Ua791&&4MmtSe9FeWz#q_J2vu1NhtsBLB86R}avJJL`83UP zwB(64lWF~EJy~|Ns;SYcGO?4{_tE15=8v(w(_sE+4GDrM1nBVk2EQkzYO0~l>+`VB zr`0ohRW|_q3;@3r=)vqNa;-kCE)=RpRWppirKc;;5sH1&C|$GdPw@_r)s$>obemKh5ms)(%-!w^`7NxSUAl6YQoleE8Hwx%N!0}Ddv;t% z+Va>ju*7|V%E+3WL5N5+mdl2!skO7^8|At3$?`<`Y`Ic?uKWzQN{)3L*QS>c&}%z? z3dwyD1ZWKemaP|&h!wzCATUB_u|`NBH7TJ%0bPN`fZa&y9RTtUi9d@QuuXb>JfhU_ z@XoYA`Y0*zTRNwvA{Hg|emdTG`>QG4uo=wy;fpk4TJviAgnJ*nvpB?Qo<$-hS< z$crD-=QLg{`kmfx%AZZ@Ugh^9UU~ZDqwq6~Rp$43yukDGfmI<3XN4k3kHzvs>Pzn# zt~JLm3oBu`GO`? zf&V%CS3F6&QGP-Z(FJdD>{xQDNy^t!TD^`_QKIJ%jr@`|uuz&NW Dr1e: initialize the class; first call BaseModel.__init__(self, opt). + -- : unpack data from dataset and apply preprocessing. + -- : produce intermediate results. + -- : calculate losses, gradients, and update network weights. + -- : (optionally) add model-specific options and set default options. + """ + + def __init__(self, opt): + """Initialize the BaseModel class. + + Parameters: + opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions + + When creating your custom class, you need to implement your own initialization. + In this fucntion, you should first call + Then, you need to define four lists: + -- self.loss_names (str list): specify the training losses that you want to plot and save. + -- self.model_names (str list): specify the images that you want to display and save. + -- self.visual_names (str list): define networks used in our training. + -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example. + """ + self.opt = opt + self.gpu_ids = opt.gpu_ids + self.isTrain = opt.isTrain + self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU + self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir + if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark. + torch.backends.cudnn.benchmark = True + self.loss_names = [] + self.model_names = [] + self.visual_names = [] + self.optimizers = [] + self.image_paths = [] + self.metric = 0 # used for learning rate policy 'plateau' + + @staticmethod + def dict_grad_hook_factory(add_func=lambda x: x): + saved_dict = dict() + + def hook_gen(name): + def grad_hook(grad): + saved_vals = add_func(grad) + saved_dict[name] = saved_vals + return grad_hook + return hook_gen, saved_dict + + @staticmethod + def modify_commandline_options(parser, is_train): + """Add new model-specific options, and rewrite default values for existing options. + + Parameters: + parser -- original option parser + is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. + + Returns: + the modified parser. + """ + return parser + + @abstractmethod + def set_input(self, input): + """Unpack input data from the dataloader and perform necessary pre-processing steps. + + Parameters: + input (dict): includes the data itself and its metadata information. + """ + pass + + @abstractmethod + def forward(self): + """Run forward pass; called by both functions and .""" + pass + + @abstractmethod + def optimize_parameters(self): + """Calculate losses, gradients, and update network weights; called in every training iteration""" + pass + + def setup(self, opt): + """Load and print networks; create schedulers + + Parameters: + opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + if self.isTrain: + self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers] + if not self.isTrain or opt.continue_train: + load_suffix = opt.epoch + self.load_networks(load_suffix) + + self.print_networks(opt.verbose) + + def parallelize(self): + for name in self.model_names: + if isinstance(name, str): + net = getattr(self, 'net' + name) + setattr(self, 'net' + name, torch.nn.DataParallel(net, self.opt.gpu_ids)) + + def data_dependent_initialize(self, data): + pass + + def eval(self): + """Make models eval mode during test time""" + for name in self.model_names: + if isinstance(name, str): + net = getattr(self, 'net' + name) + net.eval() + + def test(self): + """Forward function used in test time. + + This function wraps function in no_grad() so we don't save intermediate steps for backprop + It also calls to produce additional visualization results + """ + with torch.no_grad(): + self.forward() + self.compute_visuals() + + def compute_visuals(self): + """Calculate additional output images for visdom and HTML visualization""" + pass + + def get_image_paths(self): + """ Return image paths that are used to load current data""" + return self.image_paths + + def update_learning_rate(self): + """Update learning rates for all the networks; called at the end of every epoch""" + for scheduler in self.schedulers: + if self.opt.lr_policy == 'plateau': + scheduler.step(self.metric) + else: + scheduler.step() + + lr = self.optimizers[0].param_groups[0]['lr'] + print('learning rate = %.7f' % lr) + + def get_current_visuals(self): + """Return visualization images. train.py will display these images with visdom, and save the images to a HTML""" + visual_ret = OrderedDict() + for name in self.visual_names: + if isinstance(name, str): + visual_ret[name] = getattr(self, name) + return visual_ret + + def get_current_losses(self): + """Return traning losses / errors. train.py will print out these errors on console, and save them to a file""" + errors_ret = OrderedDict() + for name in self.loss_names: + if isinstance(name, str): + errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number + return errors_ret + + def save_networks(self, epoch): + """Save all the networks to the disk. + + Parameters: + epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) + """ + for name in self.model_names: + if isinstance(name, str): + save_filename = '%s_net_%s.pth' % (epoch, name) + save_path = os.path.join(self.save_dir, save_filename) + net = getattr(self, 'net' + name) + + if len(self.gpu_ids) > 0 and torch.cuda.is_available(): + torch.save(net.module.cpu().state_dict(), save_path) + net.cuda(self.gpu_ids[0]) + else: + torch.save(net.cpu().state_dict(), save_path) + + def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0): + """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)""" + key = keys[i] + if i + 1 == len(keys): # at the end, pointing to a parameter/buffer + if module.__class__.__name__.startswith('InstanceNorm') and \ + (key == 'running_mean' or key == 'running_var'): + if getattr(module, key) is None: + state_dict.pop('.'.join(keys)) + if module.__class__.__name__.startswith('InstanceNorm') and \ + (key == 'num_batches_tracked'): + state_dict.pop('.'.join(keys)) + else: + self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1) + + def load_networks(self, epoch): + """Load all the networks from the disk. + + Parameters: + epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) + """ + for name in self.model_names: + if isinstance(name, str): + load_filename = '%s_net_%s.pth' % (epoch, name) + if self.opt.isTrain and self.opt.pretrained_name is not None: + load_dir = os.path.join(self.opt.checkpoints_dir, self.opt.pretrained_name) + else: + load_dir = self.save_dir + + load_path = os.path.join(load_dir, load_filename) + net = getattr(self, 'net' + name) + if isinstance(net, torch.nn.DataParallel): + net = net.module + print('loading the model from %s' % load_path) + # if you are using PyTorch newer than 0.4 (e.g., built from + # GitHub source), you can remove str() on self.device + state_dict = torch.load(load_path, map_location=str(self.device)) + if hasattr(state_dict, '_metadata'): + del state_dict._metadata + + # patch InstanceNorm checkpoints prior to 0.4 + # for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop + # self.__patch_instance_norm_state_dict(state_dict, net, key.split('.')) + net.load_state_dict(state_dict) + + def print_networks(self, verbose): + """Print the total number of parameters in the network and (if verbose) network architecture + + Parameters: + verbose (bool) -- if verbose: print the network architecture + """ + print('---------- Networks initialized -------------') + for name in self.model_names: + if isinstance(name, str): + net = getattr(self, 'net' + name) + num_params = 0 + for param in net.parameters(): + num_params += param.numel() + if verbose: + print(net) + print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6)) + print('-----------------------------------------------') + + def set_requires_grad(self, nets, requires_grad=False): + """Set requies_grad=Fasle for all the networks to avoid unnecessary computations + Parameters: + nets (network list) -- a list of networks + requires_grad (bool) -- whether the networks require gradients or not + """ + if not isinstance(nets, list): + nets = [nets] + for net in nets: + if net is not None: + for param in net.parameters(): + param.requires_grad = requires_grad + + def generate_visuals_for_evaluation(self, data, mode): + return {} diff --git a/models/cut_model.py b/models/cut_model.py new file mode 100644 index 0000000..cd4a191 --- /dev/null +++ b/models/cut_model.py @@ -0,0 +1,214 @@ +import numpy as np +import torch +from .base_model import BaseModel +from . import networks +from .patchnce import PatchNCELoss +import util.util as util + + +class CUTModel(BaseModel): + """ This class implements CUT and FastCUT model, described in the paper + Contrastive Learning for Unpaired Image-to-Image Translation + Taesung Park, Alexei A. Efros, Richard Zhang, Jun-Yan Zhu + ECCV, 2020 + + The code borrows heavily from the PyTorch implementation of CycleGAN + https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix + """ + @staticmethod + def modify_commandline_options(parser, is_train=True): + """ Configures options specific for CUT model + """ + parser.add_argument('--CUT_mode', type=str, default="CUT", choices='(CUT, cut, FastCUT, fastcut)') + + parser.add_argument('--lambda_GAN', type=float, default=1.0, help='weight for GAN loss:GAN(G(X))') + parser.add_argument('--lambda_NCE', type=float, default=1.0, help='weight for NCE loss: NCE(G(X), X)') + parser.add_argument('--nce_idt', type=util.str2bool, nargs='?', const=True, default=False, help='use NCE loss for identity mapping: NCE(G(Y), Y))') + parser.add_argument('--nce_layers', type=str, default='0,4,8,12,16', help='compute NCE loss on which layers') + parser.add_argument('--nce_includes_all_negatives_from_minibatch', + type=util.str2bool, nargs='?', const=True, default=False, + help='(used for single image translation) If True, include the negatives from the other samples of the minibatch when computing the contrastive loss. Please see models/patchnce.py for more details.') + parser.add_argument('--netF', type=str, default='mlp_sample', choices=['sample', 'reshape', 'mlp_sample'], help='how to downsample the feature map') + parser.add_argument('--netF_nc', type=int, default=256) + parser.add_argument('--nce_T', type=float, default=0.07, help='temperature for NCE loss') + parser.add_argument('--num_patches', type=int, default=256, help='number of patches per layer') + parser.add_argument('--flip_equivariance', + type=util.str2bool, nargs='?', const=True, default=False, + help="Enforce flip-equivariance as additional regularization. It's used by FastCUT, but not CUT") + + parser.set_defaults(pool_size=0) # no image pooling + + opt, _ = parser.parse_known_args() + + # Set default parameters for CUT and FastCUT + if opt.CUT_mode.lower() == "cut": + parser.set_defaults(nce_idt=True, lambda_NCE=1.0) + elif opt.CUT_mode.lower() == "fastcut": + parser.set_defaults( + nce_idt=False, lambda_NCE=10.0, flip_equivariance=True, + n_epochs=150, n_epochs_decay=50 + ) + else: + raise ValueError(opt.CUT_mode) + + return parser + + def __init__(self, opt): + BaseModel.__init__(self, opt) + + # specify the training losses you want to print out. + # The training/test scripts will call + self.loss_names = ['G_GAN', 'D_real', 'D_fake', 'G', 'NCE'] + self.visual_names = ['real_A', 'fake_B', 'real_B'] + self.nce_layers = [int(i) for i in self.opt.nce_layers.split(',')] + + if opt.nce_idt and self.isTrain: + self.loss_names += ['NCE_Y'] + self.visual_names += ['idt_B'] + + if self.isTrain: + self.model_names = ['G', 'F', 'D'] + else: # during test time, only load G + self.model_names = ['G'] + + # define networks (both generator and discriminator) + self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.normG, not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, opt.no_antialias_up, self.gpu_ids, opt) + self.netF = networks.define_F(opt.input_nc, opt.netF, opt.normG, not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt) + + if self.isTrain: + self.netD = networks.define_D(opt.output_nc, opt.ndf, opt.netD, opt.n_layers_D, opt.normD, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt) + + # define loss functions + self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) + self.criterionNCE = [] + + for nce_layer in self.nce_layers: + self.criterionNCE.append(PatchNCELoss(opt).to(self.device)) + + self.criterionIdt = torch.nn.L1Loss().to(self.device) + self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) + self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) + self.optimizers.append(self.optimizer_G) + self.optimizers.append(self.optimizer_D) + + def data_dependent_initialize(self, data): + """ + The feature network netF is defined in terms of the shape of the intermediate, extracted + features of the encoder portion of netG. Because of this, the weights of netF are + initialized at the first feedforward pass with some input images. + Please also see PatchSampleF.create_mlp(), which is called at the first forward() call. + """ + self.set_input(data) + bs_per_gpu = self.real_A.size(0) // max(len(self.opt.gpu_ids), 1) + self.real_A = self.real_A[:bs_per_gpu] + self.real_B = self.real_B[:bs_per_gpu] + self.forward() # compute fake images: G(A) + if self.opt.isTrain: + self.compute_D_loss().backward() # calculate gradients for D + self.compute_G_loss().backward() # calculate graidents for G + if self.opt.lambda_NCE > 0.0: + self.optimizer_F = torch.optim.Adam(self.netF.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, self.opt.beta2)) + self.optimizers.append(self.optimizer_F) + + def optimize_parameters(self): + # forward + self.forward() + + # update D + self.set_requires_grad(self.netD, True) + self.optimizer_D.zero_grad() + self.loss_D = self.compute_D_loss() + self.loss_D.backward() + self.optimizer_D.step() + + # update G + self.set_requires_grad(self.netD, False) + self.optimizer_G.zero_grad() + if self.opt.netF == 'mlp_sample': + self.optimizer_F.zero_grad() + self.loss_G = self.compute_G_loss() + self.loss_G.backward() + self.optimizer_G.step() + if self.opt.netF == 'mlp_sample': + self.optimizer_F.step() + + def set_input(self, input): + """Unpack input data from the dataloader and perform necessary pre-processing steps. + Parameters: + input (dict): include the data itself and its metadata information. + The option 'direction' can be used to swap domain A and domain B. + """ + AtoB = self.opt.direction == 'AtoB' + self.real_A = input['A' if AtoB else 'B'].to(self.device) + self.real_B = input['B' if AtoB else 'A'].to(self.device) + self.image_paths = input['A_paths' if AtoB else 'B_paths'] + + def forward(self): + """Run forward pass; called by both functions and .""" + self.real = torch.cat((self.real_A, self.real_B), dim=0) if self.opt.nce_idt and self.opt.isTrain else self.real_A + if self.opt.flip_equivariance: + self.flipped_for_equivariance = self.opt.isTrain and (np.random.random() < 0.5) + if self.flipped_for_equivariance: + self.real = torch.flip(self.real, [3]) + + self.fake = self.netG(self.real) + self.fake_B = self.fake[:self.real_A.size(0)] + if self.opt.nce_idt: + self.idt_B = self.fake[self.real_A.size(0):] + + def compute_D_loss(self): + """Calculate GAN loss for the discriminator""" + fake = self.fake_B.detach() + # Fake; stop backprop to the generator by detaching fake_B + pred_fake = self.netD(fake) + self.loss_D_fake = self.criterionGAN(pred_fake, False).mean() + # Real + self.pred_real = self.netD(self.real_B) + loss_D_real = self.criterionGAN(self.pred_real, True) + self.loss_D_real = loss_D_real.mean() + + # combine loss and calculate gradients + self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5 + return self.loss_D + + def compute_G_loss(self): + """Calculate GAN and NCE loss for the generator""" + fake = self.fake_B + # First, G(A) should fake the discriminator + if self.opt.lambda_GAN > 0.0: + pred_fake = self.netD(fake) + self.loss_G_GAN = self.criterionGAN(pred_fake, True).mean() * self.opt.lambda_GAN + else: + self.loss_G_GAN = 0.0 + + if self.opt.lambda_NCE > 0.0: + self.loss_NCE = self.calculate_NCE_loss(self.real_A, self.fake_B) + else: + self.loss_NCE, self.loss_NCE_bd = 0.0, 0.0 + + if self.opt.nce_idt and self.opt.lambda_NCE > 0.0: + self.loss_NCE_Y = self.calculate_NCE_loss(self.real_B, self.idt_B) + loss_NCE_both = (self.loss_NCE + self.loss_NCE_Y) * 0.5 + else: + loss_NCE_both = self.loss_NCE + + self.loss_G = self.loss_G_GAN + loss_NCE_both + return self.loss_G + + def calculate_NCE_loss(self, src, tgt): + n_layers = len(self.nce_layers) + feat_q = self.netG(tgt, self.nce_layers, encode_only=True) + + if self.opt.flip_equivariance and self.flipped_for_equivariance: + feat_q = [torch.flip(fq, [3]) for fq in feat_q] + + feat_k = self.netG(src, self.nce_layers, encode_only=True) + feat_k_pool, sample_ids = self.netF(feat_k, self.opt.num_patches, None) + feat_q_pool, _ = self.netF(feat_q, self.opt.num_patches, sample_ids) + + total_nce_loss = 0.0 + for f_q, f_k, crit, nce_layer in zip(feat_q_pool, feat_k_pool, self.criterionNCE, self.nce_layers): + loss = crit(f_q, f_k) * self.opt.lambda_NCE + total_nce_loss += loss.mean() + + return total_nce_loss / n_layers diff --git a/models/cycle_gan_model.py b/models/cycle_gan_model.py new file mode 100644 index 0000000..0e0874b --- /dev/null +++ b/models/cycle_gan_model.py @@ -0,0 +1,222 @@ +import torch +import itertools +from util.image_pool import ImagePool +from .base_model import BaseModel +from . import networks +try: + from apex import amp +except ImportError as error: + print(error) + + +class CycleGANModel(BaseModel): + """ + This class implements the CycleGAN model, for learning image-to-image translation without paired data. + + The model training requires '--dataset_mode unaligned' dataset. + By default, it uses a '--netG resnet_9blocks' ResNet generator, + a '--netD basic' discriminator (PatchGAN introduced by pix2pix), + and a least-square GANs objective ('--gan_mode lsgan'). + + CycleGAN paper: https://arxiv.org/pdf/1703.10593.pdf + """ + @staticmethod + def modify_commandline_options(parser, is_train=True): + """Add new dataset-specific options, and rewrite default values for existing options. + + Parameters: + parser -- original option parser + is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. + + Returns: + the modified parser. + + For CycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B, and lambda_identity for the following losses. + A (source domain), B (target domain). + Generators: G_A: A -> B; G_B: B -> A. + Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A. + Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| (Eqn. (2) in the paper) + Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| (Eqn. (2) in the paper) + Identity loss (optional): lambda_identity * (||G_A(B) - B|| * lambda_B + ||G_B(A) - A|| * lambda_A) (Sec 5.2 "Photo generation from paintings" in the paper) + Dropout is not used in the original CycleGAN paper. + """ + # parser.set_defaults(no_dropout=True, no_antialias=True, no_antialias_up=True) # default CycleGAN did not use dropout + # parser.set_defaults(no_dropout=True) + if is_train: + parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)') + parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)') + parser.add_argument('--lambda_identity', type=float, default=0.5, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1') + + return parser + + def __init__(self, opt): + """Initialize the CycleGAN class. + + Parameters: + opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions + """ + BaseModel.__init__(self, opt) + # specify the training losses you want to print out. The training/test scripts will call + self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B'] + # specify the images you want to save/display. The training/test scripts will call + visual_names_A = ['real_A', 'fake_B', 'rec_A'] + visual_names_B = ['real_B', 'fake_A', 'rec_B'] + if self.isTrain and self.opt.lambda_identity > 0.0: # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B) + visual_names_A.append('idt_B') + visual_names_B.append('idt_A') + + self.visual_names = visual_names_A + visual_names_B # combine visualizations for A and B + # specify the models you want to save to the disk. The training/test scripts will call and . + if self.isTrain: + self.model_names = ['G_A', 'G_B', 'D_A', 'D_B'] + else: # during test time, only load Gs + self.model_names = ['G_A', 'G_B'] + + # define networks (both Generators and discriminators) + # The naming is different from those used in the paper. + # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X) + self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.normG, + not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, opt.no_antialias_up, self.gpu_ids, opt=opt) + self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.normG, + not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, opt.no_antialias_up, self.gpu_ids, opt=opt) + + if self.isTrain: # define discriminators + self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD, + opt.n_layers_D, opt.normD, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt=opt) + self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD, + opt.n_layers_D, opt.normD, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt=opt) + + if self.isTrain: + if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels + assert(opt.input_nc == opt.output_nc) + self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images + self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images + # define loss functions + self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) # define GAN loss. + self.criterionCycle = torch.nn.L1Loss() + self.criterionIdt = torch.nn.L1Loss() + # initialize optimizers; schedulers will be automatically created by function . + self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999)) + self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999)) + self.optimizers.append(self.optimizer_G) + self.optimizers.append(self.optimizer_D) + + def set_input(self, input): + """Unpack input data from the dataloader and perform necessary pre-processing steps. + + Parameters: + input (dict): include the data itself and its metadata information. + + The option 'direction' can be used to swap domain A and domain B. + """ + AtoB = self.opt.direction == 'AtoB' + self.real_A = input['A' if AtoB else 'B'].to(self.device) + self.real_B = input['B' if AtoB else 'A'].to(self.device) + self.image_paths = input['A_paths' if AtoB else 'B_paths'] + + def forward(self): + """Run forward pass; called by both functions and .""" + self.fake_B = self.netG_A(self.real_A) # G_A(A) + self.rec_A = self.netG_B(self.fake_B) # G_B(G_A(A)) + self.fake_A = self.netG_B(self.real_B) # G_B(B) + self.rec_B = self.netG_A(self.fake_A) # G_A(G_B(B)) + + def backward_D_basic(self, netD, real, fake): + """Calculate GAN loss for the discriminator + + Parameters: + netD (network) -- the discriminator D + real (tensor array) -- real images + fake (tensor array) -- images generated by a generator + + Return the discriminator loss. + We also call loss_D.backward() to calculate the gradients. + """ + # Real + pred_real = netD(real) + loss_D_real = self.criterionGAN(pred_real, True) + # Fake + pred_fake = netD(fake.detach()) + loss_D_fake = self.criterionGAN(pred_fake, False) + # Combined loss and calculate gradients + loss_D = (loss_D_real + loss_D_fake) * 0.5 + if self.opt.amp: + with amp.scale_loss(loss_D, self.optimizer_D) as scaled_loss: + scaled_loss.backward() + else: + loss_D.backward() + return loss_D + + def backward_D_A(self): + """Calculate GAN loss for discriminator D_A""" + fake_B = self.fake_B_pool.query(self.fake_B) + self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B) + + def backward_D_B(self): + """Calculate GAN loss for discriminator D_B""" + fake_A = self.fake_A_pool.query(self.fake_A) + self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A) + + def backward_G(self): + """Calculate the loss for generators G_A and G_B""" + lambda_idt = self.opt.lambda_identity + lambda_A = self.opt.lambda_A + lambda_B = self.opt.lambda_B + # Identity loss + if lambda_idt > 0: + # G_A should be identity if real_B is fed: ||G_A(B) - B|| + self.idt_A = self.netG_A(self.real_B) + self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt + # G_B should be identity if real_A is fed: ||G_B(A) - A|| + self.idt_B = self.netG_B(self.real_A) + self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt + else: + self.loss_idt_A = 0 + self.loss_idt_B = 0 + + # GAN loss D_A(G_A(A)) + self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True) + # GAN loss D_B(G_B(B)) + self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True) + # Forward cycle loss || G_B(G_A(A)) - A|| + self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A + # Backward cycle loss || G_A(G_B(B)) - B|| + self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B + # combined loss and calculate gradients + self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B + if self.opt.amp: + with amp.scale_loss(self.loss_G, self.optimizer_G) as scaled_loss: + scaled_loss.backward() + else: + self.loss_G.backward() + + def data_dependent_initialize(self): + return + + def generate_visuals_for_evaluation(self, data, mode): + with torch.no_grad(): + visuals = {} + AtoB = self.opt.direction == "AtoB" + G = self.netG_A + source = data["A" if AtoB else "B"].to(self.device) + if mode == "forward": + visuals["fake_B"] = G(source) + else: + raise ValueError("mode %s is not recognized" % mode) + return visuals + + def optimize_parameters(self): + """Calculate losses, gradients, and update network weights; called in every training iteration""" + # forward + self.forward() # compute fake images and reconstruction images. + # G_A and G_B + self.set_requires_grad([self.netD_A, self.netD_B], False) # Ds require no gradients when optimizing Gs + self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero + self.backward_G() # calculate gradients for G_A and G_B + self.optimizer_G.step() # update G_A and G_B's weights + # D_A and D_B + self.set_requires_grad([self.netD_A, self.netD_B], True) + self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero + self.backward_D_A() # calculate gradients for D_A + self.backward_D_B() # calculate graidents for D_B + self.optimizer_D.step() # update D_A and D_B's weights diff --git a/models/networks.py b/models/networks.py new file mode 100644 index 0000000..933f792 --- /dev/null +++ b/models/networks.py @@ -0,0 +1,1530 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn import init +import functools +from torch.optim import lr_scheduler +import numpy as np +import random +from .stylegan_networks import StyleGAN2Discriminator, StyleGAN2Generator, TileStyleGAN2Discriminator + +############################################################################### +# Helper Functions +############################################################################### + + +def get_filter(filt_size=3): + if(filt_size == 1): + a = np.array([1., ]) + elif(filt_size == 2): + a = np.array([1., 1.]) + elif(filt_size == 3): + a = np.array([1., 2., 1.]) + elif(filt_size == 4): + a = np.array([1., 3., 3., 1.]) + elif(filt_size == 5): + a = np.array([1., 4., 6., 4., 1.]) + elif(filt_size == 6): + a = np.array([1., 5., 10., 10., 5., 1.]) + elif(filt_size == 7): + a = np.array([1., 6., 15., 20., 15., 6., 1.]) + + filt = torch.Tensor(a[:, None] * a[None, :]) + filt = filt / torch.sum(filt) + + return filt + + +class Downsample(nn.Module): + def __init__(self, channels, pad_type='reflect', filt_size=3, stride=2, pad_off=0): + super(Downsample, self).__init__() + self.filt_size = filt_size + self.pad_off = pad_off + self.pad_sizes = [int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2)), int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2))] + self.pad_sizes = [pad_size + pad_off for pad_size in self.pad_sizes] + self.stride = stride + self.off = int((self.stride - 1) / 2.) + self.channels = channels + + filt = get_filter(filt_size=self.filt_size) + self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1))) + + self.pad = get_pad_layer(pad_type)(self.pad_sizes) + + def forward(self, inp): + if(self.filt_size == 1): + if(self.pad_off == 0): + return inp[:, :, ::self.stride, ::self.stride] + else: + return self.pad(inp)[:, :, ::self.stride, ::self.stride] + else: + return F.conv2d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1]) + + +class Upsample2(nn.Module): + def __init__(self, scale_factor, mode='nearest'): + super().__init__() + self.factor = scale_factor + self.mode = mode + + def forward(self, x): + return torch.nn.functional.interpolate(x, scale_factor=self.factor, mode=self.mode) + + +class Upsample(nn.Module): + def __init__(self, channels, pad_type='repl', filt_size=4, stride=2): + super(Upsample, self).__init__() + self.filt_size = filt_size + self.filt_odd = np.mod(filt_size, 2) == 1 + self.pad_size = int((filt_size - 1) / 2) + self.stride = stride + self.off = int((self.stride - 1) / 2.) + self.channels = channels + + filt = get_filter(filt_size=self.filt_size) * (stride**2) + self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1))) + + self.pad = get_pad_layer(pad_type)([1, 1, 1, 1]) + + def forward(self, inp): + ret_val = F.conv_transpose2d(self.pad(inp), self.filt, stride=self.stride, padding=1 + self.pad_size, groups=inp.shape[1])[:, :, 1:, 1:] + if(self.filt_odd): + return ret_val + else: + return ret_val[:, :, :-1, :-1] + + +def get_pad_layer(pad_type): + if(pad_type in ['refl', 'reflect']): + PadLayer = nn.ReflectionPad2d + elif(pad_type in ['repl', 'replicate']): + PadLayer = nn.ReplicationPad2d + elif(pad_type == 'zero'): + PadLayer = nn.ZeroPad2d + else: + print('Pad type [%s] not recognized' % pad_type) + return PadLayer + + +class Identity(nn.Module): + def forward(self, x): + return x + + +def get_norm_layer(norm_type='instance'): + """Return a normalization layer + + Parameters: + norm_type (str) -- the name of the normalization layer: batch | instance | none + + For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev). + For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics. + """ + if norm_type == 'batch': + norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True) + elif norm_type == 'instance': + norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False) + elif norm_type == 'none': + def norm_layer(x): + return Identity() + else: + raise NotImplementedError('normalization layer [%s] is not found' % norm_type) + return norm_layer + + +def get_scheduler(optimizer, opt): + """Return a learning rate scheduler + + Parameters: + optimizer -- the optimizer of the network + opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.  + opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine + + For 'linear', we keep the same learning rate for the first epochs + and linearly decay the rate to zero over the next epochs. + For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers. + See https://pytorch.org/docs/stable/optim.html for more details. + """ + if opt.lr_policy == 'linear': + def lambda_rule(epoch): + lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1) + return lr_l + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) + elif opt.lr_policy == 'step': + scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1) + elif opt.lr_policy == 'plateau': + scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) + elif opt.lr_policy == 'cosine': + scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0) + else: + return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy) + return scheduler + + +def init_weights(net, init_type='normal', init_gain=0.02, debug=False): + """Initialize network weights. + + Parameters: + net (network) -- network to be initialized + init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal + init_gain (float) -- scaling factor for normal, xavier and orthogonal. + + We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might + work better for some applications. Feel free to try yourself. + """ + def init_func(m): # define the initialization function + classname = m.__class__.__name__ + if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): + if debug: + print(classname) + if init_type == 'normal': + init.normal_(m.weight.data, 0.0, init_gain) + elif init_type == 'xavier': + init.xavier_normal_(m.weight.data, gain=init_gain) + elif init_type == 'kaiming': + init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') + elif init_type == 'orthogonal': + init.orthogonal_(m.weight.data, gain=init_gain) + else: + raise NotImplementedError('initialization method [%s] is not implemented' % init_type) + if hasattr(m, 'bias') and m.bias is not None: + init.constant_(m.bias.data, 0.0) + elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies. + init.normal_(m.weight.data, 1.0, init_gain) + init.constant_(m.bias.data, 0.0) + + net.apply(init_func) # apply the initialization function + + +def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[], debug=False, initialize_weights=True): + """Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights + Parameters: + net (network) -- the network to be initialized + init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal + gain (float) -- scaling factor for normal, xavier and orthogonal. + gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2 + + Return an initialized network. + """ + if len(gpu_ids) > 0: + assert(torch.cuda.is_available()) + net.to(gpu_ids[0]) + # if not amp: + # net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs for non-AMP training + if initialize_weights: + init_weights(net, init_type, init_gain=init_gain, debug=debug) + return net + + +def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', + init_gain=0.02, no_antialias=False, no_antialias_up=False, gpu_ids=[], opt=None): + """Create a generator + + Parameters: + input_nc (int) -- the number of channels in input images + output_nc (int) -- the number of channels in output images + ngf (int) -- the number of filters in the last conv layer + netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128 + norm (str) -- the name of normalization layers used in the network: batch | instance | none + use_dropout (bool) -- if use dropout layers. + init_type (str) -- the name of our initialization method. + init_gain (float) -- scaling factor for normal, xavier and orthogonal. + gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2 + + Returns a generator + + Our current implementation provides two types of generators: + U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images) + The original U-Net paper: https://arxiv.org/abs/1505.04597 + + Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks) + Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations. + We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style). + + + The generator has been initialized by . It uses RELU for non-linearity. + """ + net = None + norm_layer = get_norm_layer(norm_type=norm) + + if netG == 'resnet_9blocks': + net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=9, opt=opt) + elif netG == 'resnet_9blocks_mask': + net = ResnetGeneratorMask(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=9, opt=opt) + elif netG == 'resnet_6blocks': + net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=6, opt=opt) + elif netG == 'resnet_4blocks': + net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=4, opt=opt) + elif netG == 'unet_128': + net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout) + elif netG == 'unet_256': + net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout) + elif netG == 'stylegan2': + net = StyleGAN2Generator(input_nc, output_nc, ngf, use_dropout=use_dropout, opt=opt) + elif netG == 'smallstylegan2': + net = StyleGAN2Generator(input_nc, output_nc, ngf, use_dropout=use_dropout, n_blocks=2, opt=opt) + elif netG == 'resnet_cat': + n_blocks = 8 + net = G_Resnet(input_nc, output_nc, opt.nz, num_downs=2, n_res=n_blocks - 4, ngf=ngf, norm='inst', nl_layer='relu') + else: + raise NotImplementedError('Generator model name [%s] is not recognized' % netG) + return init_net(net, init_type, init_gain, gpu_ids, initialize_weights=('stylegan2' not in netG)) + + +def define_F(input_nc, netF, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[], opt=None): + if netF == 'global_pool': + net = PoolingF() + elif netF == 'reshape': + net = ReshapeF() + elif netF == 'sample': + net = PatchSampleF(use_mlp=False, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc) + elif netF == 'mlp_sample': + net = PatchSampleF(use_mlp=True, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc) + elif netF == 'strided_conv': + net = StridedConvF(init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids) + else: + raise NotImplementedError('projection model name [%s] is not recognized' % netF) + return init_net(net, init_type, init_gain, gpu_ids) + + +def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[], opt=None): + """Create a discriminator + + Parameters: + input_nc (int) -- the number of channels in input images + ndf (int) -- the number of filters in the first conv layer + netD (str) -- the architecture's name: basic | n_layers | pixel + n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers' + norm (str) -- the type of normalization layers used in the network. + init_type (str) -- the name of the initialization method. + init_gain (float) -- scaling factor for normal, xavier and orthogonal. + gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2 + + Returns a discriminator + + Our current implementation provides three types of discriminators: + [basic]: 'PatchGAN' classifier described in the original pix2pix paper. + It can classify whether 70×70 overlapping patches are real or fake. + Such a patch-level discriminator architecture has fewer parameters + than a full-image discriminator and can work on arbitrarily-sized images + in a fully convolutional fashion. + + [n_layers]: With this mode, you cna specify the number of conv layers in the discriminator + with the parameter (default=3 as used in [basic] (PatchGAN).) + + [pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not. + It encourages greater color diversity but has no effect on spatial statistics. + + The discriminator has been initialized by . It uses Leaky RELU for non-linearity. + """ + net = None + norm_layer = get_norm_layer(norm_type=norm) + + if netD == 'basic': # default PatchGAN classifier + net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, no_antialias=no_antialias,) + elif netD == 'n_layers': # more options + net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, no_antialias=no_antialias,) + elif netD == 'pixel': # classify if each pixel is real or fake + net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer) + elif 'stylegan2' in netD: + net = StyleGAN2Discriminator(input_nc, ndf, n_layers_D, no_antialias=no_antialias, opt=opt) + else: + raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD) + return init_net(net, init_type, init_gain, gpu_ids, + initialize_weights=('stylegan2' not in netD)) + + +############################################################################## +# Classes +############################################################################## +class GANLoss(nn.Module): + """Define different GAN objectives. + + The GANLoss class abstracts away the need to create the target label tensor + that has the same size as the input. + """ + + def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0): + """ Initialize the GANLoss class. + + Parameters: + gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp. + target_real_label (bool) - - label for a real image + target_fake_label (bool) - - label of a fake image + + Note: Do not use sigmoid as the last layer of Discriminator. + LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss. + """ + super(GANLoss, self).__init__() + self.register_buffer('real_label', torch.tensor(target_real_label)) + self.register_buffer('fake_label', torch.tensor(target_fake_label)) + self.gan_mode = gan_mode + if gan_mode == 'lsgan': + self.loss = nn.MSELoss() + elif gan_mode == 'vanilla': + self.loss = nn.BCEWithLogitsLoss() + elif gan_mode in ['wgangp', 'nonsaturating']: + self.loss = None + else: + raise NotImplementedError('gan mode %s not implemented' % gan_mode) + + def get_target_tensor(self, prediction, target_is_real): + """Create label tensors with the same size as the input. + + Parameters: + prediction (tensor) - - tpyically the prediction from a discriminator + target_is_real (bool) - - if the ground truth label is for real images or fake images + + Returns: + A label tensor filled with ground truth label, and with the size of the input + """ + + if target_is_real: + target_tensor = self.real_label + else: + target_tensor = self.fake_label + return target_tensor.expand_as(prediction) + + def __call__(self, prediction, target_is_real): + """Calculate loss given Discriminator's output and grount truth labels. + + Parameters: + prediction (tensor) - - tpyically the prediction output from a discriminator + target_is_real (bool) - - if the ground truth label is for real images or fake images + + Returns: + the calculated loss. + """ + bs = prediction.size(0) + if self.gan_mode in ['lsgan', 'vanilla']: + target_tensor = self.get_target_tensor(prediction, target_is_real) + # print(prediction.shape, target_is_real.shape) + loss = self.loss(prediction, target_tensor) + elif self.gan_mode == 'wgangp': + if target_is_real: + loss = -prediction.mean() + else: + loss = prediction.mean() + elif self.gan_mode == 'nonsaturating': + if target_is_real: + loss = F.softplus(-prediction).view(bs, -1).mean(dim=1) + else: + loss = F.softplus(prediction).view(bs, -1).mean(dim=1) + return loss + + +def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0): + """Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028 + + Arguments: + netD (network) -- discriminator network + real_data (tensor array) -- real images + fake_data (tensor array) -- generated images from the generator + device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') + type (str) -- if we mix real and fake data or not [real | fake | mixed]. + constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2 + lambda_gp (float) -- weight for this loss + + Returns the gradient penalty loss + """ + if lambda_gp > 0.0: + if type == 'real': # either use real images, fake images, or a linear interpolation of two. + interpolatesv = real_data + elif type == 'fake': + interpolatesv = fake_data + elif type == 'mixed': + alpha = torch.rand(real_data.shape[0], 1, device=device) + alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape) + interpolatesv = alpha * real_data + ((1 - alpha) * fake_data) + else: + raise NotImplementedError('{} not implemented'.format(type)) + interpolatesv.requires_grad_(True) + disc_interpolates = netD(interpolatesv) + gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv, + grad_outputs=torch.ones(disc_interpolates.size()).to(device), + create_graph=True, retain_graph=True, only_inputs=True) + gradients = gradients[0].view(real_data.size(0), -1) # flat the data + gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps + return gradient_penalty, gradients + else: + return 0.0, None + + +class Normalize(nn.Module): + + def __init__(self, power=2): + super(Normalize, self).__init__() + self.power = power + + def forward(self, x): + norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power) + out = x.div(norm + 1e-7) + return out + + +class PoolingF(nn.Module): + def __init__(self): + super(PoolingF, self).__init__() + model = [nn.AdaptiveMaxPool2d(1)] + self.model = nn.Sequential(*model) + self.l2norm = Normalize(2) + + def forward(self, x): + return self.l2norm(self.model(x)) + + +class ReshapeF(nn.Module): + def __init__(self): + super(ReshapeF, self).__init__() + model = [nn.AdaptiveAvgPool2d(4)] + self.model = nn.Sequential(*model) + self.l2norm = Normalize(2) + + def forward(self, x): + x = self.model(x) + x_reshape = x.permute(0, 2, 3, 1).flatten(0, 2) + return self.l2norm(x_reshape) + + +class StridedConvF(nn.Module): + def __init__(self, init_type='normal', init_gain=0.02, gpu_ids=[]): + super().__init__() + # self.conv1 = nn.Conv2d(256, 128, 3, stride=2) + # self.conv2 = nn.Conv2d(128, 64, 3, stride=1) + self.l2_norm = Normalize(2) + self.mlps = {} + self.moving_averages = {} + self.init_type = init_type + self.init_gain = init_gain + self.gpu_ids = gpu_ids + + def create_mlp(self, x): + C, H = x.shape[1], x.shape[2] + n_down = int(np.rint(np.log2(H / 32))) + mlp = [] + for i in range(n_down): + mlp.append(nn.Conv2d(C, max(C // 2, 64), 3, stride=2)) + mlp.append(nn.ReLU()) + C = max(C // 2, 64) + mlp.append(nn.Conv2d(C, 64, 3)) + mlp = nn.Sequential(*mlp) + init_net(mlp, self.init_type, self.init_gain, self.gpu_ids) + return mlp + + def update_moving_average(self, key, x): + if key not in self.moving_averages: + self.moving_averages[key] = x.detach() + + self.moving_averages[key] = self.moving_averages[key] * 0.999 + x.detach() * 0.001 + + def forward(self, x, use_instance_norm=False): + C, H = x.shape[1], x.shape[2] + key = '%d_%d' % (C, H) + if key not in self.mlps: + self.mlps[key] = self.create_mlp(x) + self.add_module("child_%s" % key, self.mlps[key]) + mlp = self.mlps[key] + x = mlp(x) + self.update_moving_average(key, x) + x = x - self.moving_averages[key] + if use_instance_norm: + x = F.instance_norm(x) + return self.l2_norm(x) + + +class PatchSampleF(nn.Module): + def __init__(self, use_mlp=False, init_type='normal', init_gain=0.02, nc=256, gpu_ids=[]): + # potential issues: currently, we use the same patch_ids for multiple images in the batch + super(PatchSampleF, self).__init__() + self.l2norm = Normalize(2) + self.use_mlp = use_mlp + self.nc = nc # hard-coded + self.mlp_init = False + self.init_type = init_type + self.init_gain = init_gain + self.gpu_ids = gpu_ids + + def create_mlp(self, feats): + for mlp_id, feat in enumerate(feats): + input_nc = feat.shape[-1] + # mlp = nn.Sequential(*[nn.Linear(input_nc, input_nc), nn.ReLU(), nn.Linear(input_nc, input_nc)]) + mlp = nn.Sequential(*[nn.Linear(input_nc, input_nc)]) + if len(self.gpu_ids) > 0: + mlp.cuda() + setattr(self, 'mlp_%d' % mlp_id, mlp) + init_net(self, self.init_type, self.init_gain, self.gpu_ids) + self.mlp_init = True + + def forward(self, feats, num_patches=64, patch_ids=None): + + return_feats = [] + if self.use_mlp and not self.mlp_init: + self.create_mlp(feats) + for feat_id, feat in enumerate(feats): + mlp = getattr(self, 'mlp_%d' % feat_id) + res = mlp(feat) + return_feats.append(res) + + return return_feats + + +class G_Resnet(nn.Module): + def __init__(self, input_nc, output_nc, nz, num_downs, n_res, ngf=64, + norm=None, nl_layer=None): + super(G_Resnet, self).__init__() + n_downsample = num_downs + pad_type = 'reflect' + self.enc_content = ContentEncoder(n_downsample, n_res, input_nc, ngf, norm, nl_layer, pad_type=pad_type) + if nz == 0: + self.dec = Decoder(n_downsample, n_res, self.enc_content.output_dim, output_nc, norm=norm, activ=nl_layer, pad_type=pad_type, nz=nz) + else: + self.dec = Decoder_all(n_downsample, n_res, self.enc_content.output_dim, output_nc, norm=norm, activ=nl_layer, pad_type=pad_type, nz=nz) + + def decode(self, content, style=None): + return self.dec(content, style) + + def forward(self, image, style=None, nce_layers=[], encode_only=False): + content, feats = self.enc_content(image, nce_layers=nce_layers, encode_only=encode_only) + if encode_only: + return feats + else: + images_recon = self.decode(content, style) + if len(nce_layers) > 0: + return images_recon, feats + else: + return images_recon + +################################################################################## +# Encoder and Decoders +################################################################################## + + +class E_adaIN(nn.Module): + def __init__(self, input_nc, output_nc=1, nef=64, n_layers=4, + norm=None, nl_layer=None, vae=False): + # style encoder + super(E_adaIN, self).__init__() + self.enc_style = StyleEncoder(n_layers, input_nc, nef, output_nc, norm='none', activ='relu', vae=vae) + + def forward(self, image): + style = self.enc_style(image) + return style + + +class StyleEncoder(nn.Module): + def __init__(self, n_downsample, input_dim, dim, style_dim, norm, activ, vae=False): + super(StyleEncoder, self).__init__() + self.vae = vae + self.model = [] + self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type='reflect')] + for i in range(2): + self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')] + dim *= 2 + for i in range(n_downsample - 2): + self.model += [Conv2dBlock(dim, dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')] + self.model += [nn.AdaptiveAvgPool2d(1)] # global average pooling + if self.vae: + self.fc_mean = nn.Linear(dim, style_dim) # , 1, 1, 0) + self.fc_var = nn.Linear(dim, style_dim) # , 1, 1, 0) + else: + self.model += [nn.Conv2d(dim, style_dim, 1, 1, 0)] + + self.model = nn.Sequential(*self.model) + self.output_dim = dim + + def forward(self, x): + if self.vae: + output = self.model(x) + output = output.view(x.size(0), -1) + output_mean = self.fc_mean(output) + output_var = self.fc_var(output) + return output_mean, output_var + else: + return self.model(x).view(x.size(0), -1) + + +class ContentEncoder(nn.Module): + def __init__(self, n_downsample, n_res, input_dim, dim, norm, activ, pad_type='zero'): + super(ContentEncoder, self).__init__() + self.model = [] + self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type='reflect')] + # downsampling blocks + for i in range(n_downsample): + self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')] + dim *= 2 + # residual blocks + self.model += [ResBlocks(n_res, dim, norm=norm, activation=activ, pad_type=pad_type)] + self.model = nn.Sequential(*self.model) + self.output_dim = dim + + def forward(self, x, nce_layers=[], encode_only=False): + if len(nce_layers) > 0: + feat = x + feats = [] + for layer_id, layer in enumerate(self.model): + feat = layer(feat) + if layer_id in nce_layers: + feats.append(feat) + if layer_id == nce_layers[-1] and encode_only: + return None, feats + return feat, feats + else: + return self.model(x), None + + for layer_id, layer in enumerate(self.model): + print(layer_id, layer) + + +class Decoder_all(nn.Module): + def __init__(self, n_upsample, n_res, dim, output_dim, norm='batch', activ='relu', pad_type='zero', nz=0): + super(Decoder_all, self).__init__() + # AdaIN residual blocks + self.resnet_block = ResBlocks(n_res, dim, norm, activ, pad_type=pad_type, nz=nz) + self.n_blocks = 0 + # upsampling blocks + for i in range(n_upsample): + block = [Upsample2(scale_factor=2), Conv2dBlock(dim + nz, dim // 2, 5, 1, 2, norm='ln', activation=activ, pad_type='reflect')] + setattr(self, 'block_{:d}'.format(self.n_blocks), nn.Sequential(*block)) + self.n_blocks += 1 + dim //= 2 + # use reflection padding in the last conv layer + setattr(self, 'block_{:d}'.format(self.n_blocks), Conv2dBlock(dim + nz, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type='reflect')) + self.n_blocks += 1 + + def forward(self, x, y=None): + if y is not None: + output = self.resnet_block(cat_feature(x, y)) + for n in range(self.n_blocks): + block = getattr(self, 'block_{:d}'.format(n)) + if n > 0: + output = block(cat_feature(output, y)) + else: + output = block(output) + return output + + +class Decoder(nn.Module): + def __init__(self, n_upsample, n_res, dim, output_dim, norm='batch', activ='relu', pad_type='zero', nz=0): + super(Decoder, self).__init__() + + self.model = [] + # AdaIN residual blocks + self.model += [ResBlocks(n_res, dim, norm, activ, pad_type=pad_type, nz=nz)] + # upsampling blocks + for i in range(n_upsample): + if i == 0: + input_dim = dim + nz + else: + input_dim = dim + self.model += [Upsample2(scale_factor=2), Conv2dBlock(input_dim, dim // 2, 5, 1, 2, norm='ln', activation=activ, pad_type='reflect')] + dim //= 2 + # use reflection padding in the last conv layer + self.model += [Conv2dBlock(dim, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type='reflect')] + self.model = nn.Sequential(*self.model) + + def forward(self, x, y=None): + if y is not None: + return self.model(cat_feature(x, y)) + else: + return self.model(x) + +################################################################################## +# Sequential Models +################################################################################## + + +class ResBlocks(nn.Module): + def __init__(self, num_blocks, dim, norm='inst', activation='relu', pad_type='zero', nz=0): + super(ResBlocks, self).__init__() + self.model = [] + for i in range(num_blocks): + self.model += [ResBlock(dim, norm=norm, activation=activation, pad_type=pad_type, nz=nz)] + self.model = nn.Sequential(*self.model) + + def forward(self, x): + return self.model(x) + + +################################################################################## +# Basic Blocks +################################################################################## +def cat_feature(x, y): + y_expand = y.view(y.size(0), y.size(1), 1, 1).expand( + y.size(0), y.size(1), x.size(2), x.size(3)) + x_cat = torch.cat([x, y_expand], 1) + return x_cat + + +class ResBlock(nn.Module): + def __init__(self, dim, norm='inst', activation='relu', pad_type='zero', nz=0): + super(ResBlock, self).__init__() + + model = [] + model += [Conv2dBlock(dim + nz, dim, 3, 1, 1, norm=norm, activation=activation, pad_type=pad_type)] + model += [Conv2dBlock(dim, dim + nz, 3, 1, 1, norm=norm, activation='none', pad_type=pad_type)] + self.model = nn.Sequential(*model) + + def forward(self, x): + residual = x + out = self.model(x) + out += residual + return out + + +class Conv2dBlock(nn.Module): + def __init__(self, input_dim, output_dim, kernel_size, stride, + padding=0, norm='none', activation='relu', pad_type='zero'): + super(Conv2dBlock, self).__init__() + self.use_bias = True + # initialize padding + if pad_type == 'reflect': + self.pad = nn.ReflectionPad2d(padding) + elif pad_type == 'zero': + self.pad = nn.ZeroPad2d(padding) + else: + assert 0, "Unsupported padding type: {}".format(pad_type) + + # initialize normalization + norm_dim = output_dim + if norm == 'batch': + self.norm = nn.BatchNorm2d(norm_dim) + elif norm == 'inst': + self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=False) + elif norm == 'ln': + self.norm = LayerNorm(norm_dim) + elif norm == 'none': + self.norm = None + else: + assert 0, "Unsupported normalization: {}".format(norm) + + # initialize activation + if activation == 'relu': + self.activation = nn.ReLU(inplace=True) + elif activation == 'lrelu': + self.activation = nn.LeakyReLU(0.2, inplace=True) + elif activation == 'prelu': + self.activation = nn.PReLU() + elif activation == 'selu': + self.activation = nn.SELU(inplace=True) + elif activation == 'tanh': + self.activation = nn.Tanh() + elif activation == 'none': + self.activation = None + else: + assert 0, "Unsupported activation: {}".format(activation) + + # initialize convolution + self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias) + + def forward(self, x): + x = self.conv(self.pad(x)) + if self.norm: + x = self.norm(x) + if self.activation: + x = self.activation(x) + return x + + +class LinearBlock(nn.Module): + def __init__(self, input_dim, output_dim, norm='none', activation='relu'): + super(LinearBlock, self).__init__() + use_bias = True + # initialize fully connected layer + self.fc = nn.Linear(input_dim, output_dim, bias=use_bias) + + # initialize normalization + norm_dim = output_dim + if norm == 'batch': + self.norm = nn.BatchNorm1d(norm_dim) + elif norm == 'inst': + self.norm = nn.InstanceNorm1d(norm_dim) + elif norm == 'ln': + self.norm = LayerNorm(norm_dim) + elif norm == 'none': + self.norm = None + else: + assert 0, "Unsupported normalization: {}".format(norm) + + # initialize activation + if activation == 'relu': + self.activation = nn.ReLU(inplace=True) + elif activation == 'lrelu': + self.activation = nn.LeakyReLU(0.2, inplace=True) + elif activation == 'prelu': + self.activation = nn.PReLU() + elif activation == 'selu': + self.activation = nn.SELU(inplace=True) + elif activation == 'tanh': + self.activation = nn.Tanh() + elif activation == 'none': + self.activation = None + else: + assert 0, "Unsupported activation: {}".format(activation) + + def forward(self, x): + out = self.fc(x) + if self.norm: + out = self.norm(out) + if self.activation: + out = self.activation(out) + return out + +################################################################################## +# Normalization layers +################################################################################## + + +class LayerNorm(nn.Module): + def __init__(self, num_features, eps=1e-5, affine=True): + super(LayerNorm, self).__init__() + self.num_features = num_features + self.affine = affine + self.eps = eps + + if self.affine: + self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_()) + self.beta = nn.Parameter(torch.zeros(num_features)) + + def forward(self, x): + shape = [-1] + [1] * (x.dim() - 1) + mean = x.view(x.size(0), -1).mean(1).view(*shape) + std = x.view(x.size(0), -1).std(1).view(*shape) + x = (x - mean) / (std + self.eps) + + if self.affine: + shape = [1, -1] + [1] * (x.dim() - 2) + x = x * self.gamma.view(*shape) + self.beta.view(*shape) + return x + + +class ResnetGenerator(nn.Module): + """Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations. + + We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style) + """ + + def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, no_antialias_up=False, opt=None): + """Construct a Resnet-based generator + + Parameters: + input_nc (int) -- the number of channels in input images + output_nc (int) -- the number of channels in output images + ngf (int) -- the number of filters in the last conv layer + norm_layer -- normalization layer + use_dropout (bool) -- if use dropout layers + n_blocks (int) -- the number of ResNet blocks + padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero + """ + assert(n_blocks >= 0) + super(ResnetGenerator, self).__init__() + self.opt = opt + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + model = [nn.ReflectionPad2d(3), + nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), + norm_layer(ngf), + nn.ReLU(True)] + + n_downsampling = 2 + for i in range(n_downsampling): # add downsampling layers + mult = 2 ** i + if(no_antialias): + model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias), + norm_layer(ngf * mult * 2), + nn.ReLU(True)] + else: + model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias), + norm_layer(ngf * mult * 2), + nn.ReLU(True), + Downsample(ngf * mult * 2)] + + mult = 2 ** n_downsampling + for i in range(n_blocks): # add ResNet blocks + + model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] + + for i in range(n_downsampling): # add upsampling layers + mult = 2 ** (n_downsampling - i) + if no_antialias_up: + model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), + kernel_size=3, stride=2, + padding=1, output_padding=1, + bias=use_bias), + norm_layer(int(ngf * mult / 2)), + nn.ReLU(True)] + else: + model += [Upsample(ngf * mult), + nn.Conv2d(ngf * mult, int(ngf * mult / 2), + kernel_size=3, stride=1, + padding=1, # output_padding=1, + bias=use_bias), + norm_layer(int(ngf * mult / 2)), + nn.ReLU(True)] + model += [nn.ReflectionPad2d(3)] + model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] + model += [nn.Tanh()] + + self.model = nn.Sequential(*model) + + def forward(self, input, layers=[], encode_only=False): + if -1 in layers: + layers.append(len(self.model)) + if len(layers) > 0: + feat = input + feats = [] + for layer_id, layer in enumerate(self.model): + # print(layer_id, layer) + feat = layer(feat) + if layer_id in layers: + # print("%d: adding the output of %s %d" % (layer_id, layer.__class__.__name__, feat.size(1))) + feats.append(feat) + else: + # print("%d: skipping %s %d" % (layer_id, layer.__class__.__name__, feat.size(1))) + pass + if layer_id == layers[-1] and encode_only: + # print('encoder only return features') + return feats # return intermediate features alone; stop in the last layers + + return feat, feats # return both output and intermediate features + else: + """Standard forward""" + fake = self.model(input) + return fake + + + + +class ResnetGeneratorMask(nn.Module): + """Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations. + + We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style) + """ + + def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, no_antialias_up=False, opt=None): + """Construct a Resnet-based generator + + Parameters: + input_nc (int) -- the number of channels in input images + output_nc (int) -- the number of channels in output images + ngf (int) -- the number of filters in the last conv layer + norm_layer -- normalization layer + use_dropout (bool) -- if use dropout layers + n_blocks (int) -- the number of ResNet blocks + padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero + """ + assert(n_blocks >= 0) + super(ResnetGeneratorMask, self).__init__() + self.opt = opt + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + model = [nn.ReflectionPad2d(3), + nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), + norm_layer(ngf), + nn.ReLU(True)] + + n_downsampling = 2 + for i in range(n_downsampling): # add downsampling layers + mult = 2 ** i + if(no_antialias): + model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias), + norm_layer(ngf * mult * 2), + nn.ReLU(True)] + else: + model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias), + norm_layer(ngf * mult * 2), + nn.ReLU(True), + Downsample(ngf * mult * 2)] + + mult = 2 ** n_downsampling + for i in range(n_blocks): # add ResNet blocks + + model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] + + self.layer_encoder = len(model) - 1 + + for i in range(n_downsampling): # add upsampling layers + mult = 2 ** (n_downsampling - i) + if no_antialias_up: + model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), + kernel_size=3, stride=2, + padding=1, output_padding=1, + bias=use_bias), + norm_layer(int(ngf * mult / 2)), + nn.ReLU(True)] + else: + model += [Upsample(ngf * mult), + nn.Conv2d(ngf * mult, int(ngf * mult / 2), + kernel_size=3, stride=1, + padding=1, # output_padding=1, + bias=use_bias), + norm_layer(int(ngf * mult / 2)), + nn.ReLU(True)] + model += [nn.ReflectionPad2d(3)] + model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] + model += [nn.Tanh()] + + self.model = nn.Sequential(*model) + + def forward(self, input, layers=[], encode_only=False, mask_rate=0.0): + if -1 in layers: + layers.append(len(self.model)) + if len(layers) > 0: + feat = input + feats = [] + for layer_id, layer in enumerate(self.model): + # print(layer_id, layer) + feat = layer(feat) + if layer_id in layers: + # print("%d: adding the output of %s %d" % (layer_id, layer.__class__.__name__, feat.size(1))) + feats.append(feat) + else: + # print("%d: skipping %s %d" % (layer_id, layer.__class__.__name__, feat.size(1))) + pass + if layer_id == layers[-1] and encode_only: + # print('encoder only return features') + return feats # return intermediate features alone; stop in the last layers + + return feat, feats # return both output and intermediate features + elif mask_rate > 0.0: + feat = input + rate = random.uniform(0.0, mask_rate) + for layer_id, layer in enumerate(self.model): + feat = layer(feat) + # print(layer_id, self.layer_encoder) + if layer_id == self.layer_encoder: + # print('shape:', feat.shape) + B , C, H, W = feat.shape[0], feat.shape[1], feat.shape[2], feat.shape[3] + feat_reshape = feat.permute(0, 2, 3, 1).flatten(1, 2) + all_num = feat_reshape.shape[1] + point_num = all_num * rate + point_id = np.random.permutation(all_num) + point_id = point_id[:int(min(point_num, all_num))] + feat_reshape[:,point_id,:] = 0 + feat = feat_reshape.permute(0, 2, 1).reshape([B, C, H, W]) + # print('rec', feat.shape) + + return feat + + + else: + """Standard forward""" + fake = self.model(input) + return fake + +class ResnetDecoder(nn.Module): + """Resnet-based decoder that consists of a few Resnet blocks + a few upsampling operations. + """ + + def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False): + """Construct a Resnet-based decoder + + Parameters: + input_nc (int) -- the number of channels in input images + output_nc (int) -- the number of channels in output images + ngf (int) -- the number of filters in the last conv layer + norm_layer -- normalization layer + use_dropout (bool) -- if use dropout layers + n_blocks (int) -- the number of ResNet blocks + padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero + """ + assert(n_blocks >= 0) + super(ResnetDecoder, self).__init__() + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + model = [] + n_downsampling = 2 + mult = 2 ** n_downsampling + for i in range(n_blocks): # add ResNet blocks + + model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] + + for i in range(n_downsampling): # add upsampling layers + mult = 2 ** (n_downsampling - i) + if(no_antialias): + model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), + kernel_size=3, stride=2, + padding=1, output_padding=1, + bias=use_bias), + norm_layer(int(ngf * mult / 2)), + nn.ReLU(True)] + else: + model += [Upsample(ngf * mult), + nn.Conv2d(ngf * mult, int(ngf * mult / 2), + kernel_size=3, stride=1, + padding=1, + bias=use_bias), + norm_layer(int(ngf * mult / 2)), + nn.ReLU(True)] + model += [nn.ReflectionPad2d(3)] + model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] + model += [nn.Tanh()] + + self.model = nn.Sequential(*model) + + def forward(self, input): + """Standard forward""" + return self.model(input) + + +class ResnetEncoder(nn.Module): + """Resnet-based encoder that consists of a few downsampling + several Resnet blocks + """ + + def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False): + """Construct a Resnet-based encoder + + Parameters: + input_nc (int) -- the number of channels in input images + output_nc (int) -- the number of channels in output images + ngf (int) -- the number of filters in the last conv layer + norm_layer -- normalization layer + use_dropout (bool) -- if use dropout layers + n_blocks (int) -- the number of ResNet blocks + padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero + """ + assert(n_blocks >= 0) + super(ResnetEncoder, self).__init__() + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + model = [nn.ReflectionPad2d(3), + nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), + norm_layer(ngf), + nn.ReLU(True)] + + n_downsampling = 2 + for i in range(n_downsampling): # add downsampling layers + mult = 2 ** i + if(no_antialias): + model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias), + norm_layer(ngf * mult * 2), + nn.ReLU(True)] + else: + model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias), + norm_layer(ngf * mult * 2), + nn.ReLU(True), + Downsample(ngf * mult * 2)] + + mult = 2 ** n_downsampling + for i in range(n_blocks): # add ResNet blocks + + model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] + + self.model = nn.Sequential(*model) + + def forward(self, input): + """Standard forward""" + return self.model(input) + + +class ResnetBlock(nn.Module): + """Define a Resnet block""" + + def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias): + """Initialize the Resnet block + + A resnet block is a conv block with skip connections + We construct a conv block with build_conv_block function, + and implement skip connections in function. + Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf + """ + super(ResnetBlock, self).__init__() + self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias) + + def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias): + """Construct a convolutional block. + + Parameters: + dim (int) -- the number of channels in the conv layer. + padding_type (str) -- the name of padding layer: reflect | replicate | zero + norm_layer -- normalization layer + use_dropout (bool) -- if use dropout layers. + use_bias (bool) -- if the conv layer uses bias or not + + Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU)) + """ + conv_block = [] + p = 0 + if padding_type == 'reflect': + conv_block += [nn.ReflectionPad2d(1)] + elif padding_type == 'replicate': + conv_block += [nn.ReplicationPad2d(1)] + elif padding_type == 'zero': + p = 1 + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + + conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)] + if use_dropout: + conv_block += [nn.Dropout(0.5)] + + p = 0 + if padding_type == 'reflect': + conv_block += [nn.ReflectionPad2d(1)] + elif padding_type == 'replicate': + conv_block += [nn.ReplicationPad2d(1)] + elif padding_type == 'zero': + p = 1 + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)] + + return nn.Sequential(*conv_block) + + def forward(self, x): + """Forward function (with skip connections)""" + out = x + self.conv_block(x) # add skip connections + return out + + +class UnetGenerator(nn.Module): + """Create a Unet-based generator""" + + def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False): + """Construct a Unet generator + Parameters: + input_nc (int) -- the number of channels in input images + output_nc (int) -- the number of channels in output images + num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7, + image of size 128x128 will become of size 1x1 # at the bottleneck + ngf (int) -- the number of filters in the last conv layer + norm_layer -- normalization layer + + We construct the U-Net from the innermost layer to the outermost layer. + It is a recursive process. + """ + super(UnetGenerator, self).__init__() + # construct unet structure + unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer + for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters + unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout) + # gradually reduce the number of filters from ngf * 8 to ngf + unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer) + self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer + + def forward(self, input): + """Standard forward""" + return self.model(input) + + +class UnetSkipConnectionBlock(nn.Module): + """Defines the Unet submodule with skip connection. + X -------------------identity---------------------- + |-- downsampling -- |submodule| -- upsampling --| + """ + + def __init__(self, outer_nc, inner_nc, input_nc=None, + submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False): + """Construct a Unet submodule with skip connections. + + Parameters: + outer_nc (int) -- the number of filters in the outer conv layer + inner_nc (int) -- the number of filters in the inner conv layer + input_nc (int) -- the number of channels in input images/features + submodule (UnetSkipConnectionBlock) -- previously defined submodules + outermost (bool) -- if this module is the outermost module + innermost (bool) -- if this module is the innermost module + norm_layer -- normalization layer + use_dropout (bool) -- if use dropout layers. + """ + super(UnetSkipConnectionBlock, self).__init__() + self.outermost = outermost + if type(norm_layer) == functools.partial: + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + if input_nc is None: + input_nc = outer_nc + downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, + stride=2, padding=1, bias=use_bias) + downrelu = nn.LeakyReLU(0.2, True) + downnorm = norm_layer(inner_nc) + uprelu = nn.ReLU(True) + upnorm = norm_layer(outer_nc) + + if outermost: + upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, + kernel_size=4, stride=2, + padding=1) + down = [downconv] + up = [uprelu, upconv, nn.Tanh()] + model = down + [submodule] + up + elif innermost: + upconv = nn.ConvTranspose2d(inner_nc, outer_nc, + kernel_size=4, stride=2, + padding=1, bias=use_bias) + down = [downrelu, downconv] + up = [uprelu, upconv, upnorm] + model = down + up + else: + upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, + kernel_size=4, stride=2, + padding=1, bias=use_bias) + down = [downrelu, downconv, downnorm] + up = [uprelu, upconv, upnorm] + + if use_dropout: + model = down + [submodule] + up + [nn.Dropout(0.5)] + else: + model = down + [submodule] + up + + self.model = nn.Sequential(*model) + + def forward(self, x): + if self.outermost: + return self.model(x) + else: # add skip connections + return torch.cat([x, self.model(x)], 1) + + +class MLPDiscriminator(nn.Module): + def __init__(self, in_feat=768, hid_feat = 768, out_feat = 768, dropout = 0.): + super().__init__() + if not hid_feat: + hid_feat = in_feat + if not out_feat: + out_feat = in_feat + self.linear1 = nn.Linear(in_feat, hid_feat) + self.activation = nn.GELU() + self.linear2 = nn.Linear(hid_feat, out_feat) + self.dropout = nn.Dropout(dropout) + + def forward(self, x): + x = self.linear1(x) + x = self.activation(x) + x = self.dropout(x) + x = self.linear2(x) + return self.dropout(x) + + +class NLayerDiscriminator(nn.Module): + """Defines a PatchGAN discriminator""" + + def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=False): + """Construct a PatchGAN discriminator + + Parameters: + input_nc (int) -- the number of channels in input images + ndf (int) -- the number of filters in the last conv layer + n_layers (int) -- the number of conv layers in the discriminator + norm_layer -- normalization layer + """ + super(NLayerDiscriminator, self).__init__() + if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + kw = 4 + padw = 1 + if(no_antialias): + sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)] + else: + sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=1, padding=padw), nn.LeakyReLU(0.2, True), Downsample(ndf)] + nf_mult = 1 + nf_mult_prev = 1 + for n in range(1, n_layers): # gradually increase the number of filters + nf_mult_prev = nf_mult + nf_mult = min(2 ** n, 8) + if(no_antialias): + sequence += [ + nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias), + norm_layer(ndf * nf_mult), + nn.LeakyReLU(0.2, True) + ] + else: + sequence += [ + nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias), + norm_layer(ndf * nf_mult), + nn.LeakyReLU(0.2, True), + Downsample(ndf * nf_mult)] + + nf_mult_prev = nf_mult + nf_mult = min(2 ** n_layers, 8) + sequence += [ + nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias), + norm_layer(ndf * nf_mult), + nn.LeakyReLU(0.2, True) + ] + + sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map + self.model = nn.Sequential(*sequence) + + def forward(self, input): + """Standard forward.""" + return self.model(input) + + +class PixelDiscriminator(nn.Module): + """Defines a 1x1 PatchGAN discriminator (pixelGAN)""" + + def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d): + """Construct a 1x1 PatchGAN discriminator + + Parameters: + input_nc (int) -- the number of channels in input images + ndf (int) -- the number of filters in the last conv layer + norm_layer -- normalization layer + """ + super(PixelDiscriminator, self).__init__() + if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters + use_bias = norm_layer.func == nn.InstanceNorm2d + else: + use_bias = norm_layer == nn.InstanceNorm2d + + self.net = [ + nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0), + nn.LeakyReLU(0.2, True), + nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias), + norm_layer(ndf * 2), + nn.LeakyReLU(0.2, True), + nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)] + + self.net = nn.Sequential(*self.net) + + def forward(self, input): + """Standard forward.""" + return self.net(input) + + +class PatchDiscriminator(NLayerDiscriminator): + """Defines a PatchGAN discriminator""" + + def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=False): + super().__init__(input_nc, ndf, 2, norm_layer, no_antialias) + + def forward(self, input): + B, C, H, W = input.size(0), input.size(1), input.size(2), input.size(3) + size = 16 + Y = H // size + X = W // size + input = input.view(B, C, Y, size, X, size) + input = input.permute(0, 2, 4, 1, 3, 5).contiguous().view(B * Y * X, C, size, size) + return super().forward(input) + + +class GroupedChannelNorm(nn.Module): + def __init__(self, num_groups): + super().__init__() + self.num_groups = num_groups + + def forward(self, x): + shape = list(x.shape) + new_shape = [shape[0], self.num_groups, shape[1] // self.num_groups] + shape[2:] + x = x.view(*new_shape) + mean = x.mean(dim=2, keepdim=True) + std = x.std(dim=2, keepdim=True) + x_norm = (x - mean) / (std + 1e-7) + return x_norm.view(*shape) diff --git a/models/patchnce.py b/models/patchnce.py new file mode 100644 index 0000000..475793c --- /dev/null +++ b/models/patchnce.py @@ -0,0 +1,55 @@ +from packaging import version +import torch +from torch import nn + + +class PatchNCELoss(nn.Module): + def __init__(self, opt): + super().__init__() + self.opt = opt + self.cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction='none') + self.mask_dtype = torch.uint8 if version.parse(torch.__version__) < version.parse('1.2.0') else torch.bool + + def forward(self, feat_q, feat_k): + num_patches = feat_q.shape[0] + dim = feat_q.shape[1] + feat_k = feat_k.detach() + + # pos logit + l_pos = torch.bmm( + feat_q.view(num_patches, 1, -1), feat_k.view(num_patches, -1, 1)) + l_pos = l_pos.view(num_patches, 1) + + # neg logit + + # Should the negatives from the other samples of a minibatch be utilized? + # In CUT and FastCUT, we found that it's best to only include negatives + # from the same image. Therefore, we set + # --nce_includes_all_negatives_from_minibatch as False + # However, for single-image translation, the minibatch consists of + # crops from the "same" high-resolution image. + # Therefore, we will include the negatives from the entire minibatch. + if self.opt.nce_includes_all_negatives_from_minibatch: + # reshape features as if they are all negatives of minibatch of size 1. + batch_dim_for_bmm = 1 + else: + batch_dim_for_bmm = self.opt.batch_size + + # reshape features to batch size + feat_q = feat_q.view(batch_dim_for_bmm, -1, dim) + feat_k = feat_k.view(batch_dim_for_bmm, -1, dim) + npatches = feat_q.size(1) + l_neg_curbatch = torch.bmm(feat_q, feat_k.transpose(2, 1)) + + # diagonal entries are similarity between same features, and hence meaningless. + # just fill the diagonal with very small number, which is exp(-10) and almost zero + diagonal = torch.eye(npatches, device=feat_q.device, dtype=self.mask_dtype)[None, :, :] + l_neg_curbatch.masked_fill_(diagonal, -10.0) + l_neg = l_neg_curbatch.view(-1, npatches) + + out = torch.cat((l_pos, l_neg), dim=1) / self.opt.nce_T + + loss = self.cross_entropy_loss(out, torch.zeros(out.size(0), dtype=torch.long, + device=feat_q.device)) + + return loss diff --git a/models/roma_model.py b/models/roma_model.py new file mode 100644 index 0000000..48c307e --- /dev/null +++ b/models/roma_model.py @@ -0,0 +1,363 @@ +import numpy as np +import torch +from .base_model import BaseModel +from . import networks +from .patchnce import PatchNCELoss +import util.util as util +import timm +import time +import torch.nn.functional as F +import sys +from functools import partial +import torch.nn as nn +import math + +from torchvision.transforms import transforms as tfs + +class ROMAModel(BaseModel): + + @staticmethod + def modify_commandline_options(parser, is_train=True): + """ Configures options specific for CUT model + """ + parser.add_argument('--adj_size_list', type=list, default=[2, 4, 6, 8, 12], help='different scales of perception field') + parser.add_argument('--lambda_mlp', type=float, default=1.0, help='weight of lr for discriminator') + parser.add_argument('--lambda_motion', type=float, default=1.0, help='weight for Temporal Consistency') + parser.add_argument('--lambda_D_ViT', type=float, default=1.0, help='weight for discriminator') + parser.add_argument('--lambda_GAN', type=float, default=1.0, help='weight for GAN loss: GAN(G(X))') + parser.add_argument('--lambda_global', type=float, default=1.0, help='weight for Global Structural Consistency') + parser.add_argument('--lambda_spatial', type=float, default=1.0, help='weight for Local Structural Consistency') + parser.add_argument('--atten_layers', type=str, default='1,3,5', help='compute Cross-Similarity on which layers') + parser.add_argument('--local_nums', type=int, default=256) + parser.add_argument('--which_D_layer', type=int, default=-1) + parser.add_argument('--side_length', type=int, default=7) + + parser.set_defaults(pool_size=0) + + opt, _ = parser.parse_known_args() + + return parser + + def __init__(self, opt): + BaseModel.__init__(self, opt) + + + self.loss_names = ['G_GAN_ViT', 'D_real_ViT', 'D_fake_ViT', 'global', 'spatial', 'motion'] + self.visual_names = ['real_A0', 'real_A1', 'fake_B0', 'fake_B1', 'real_B0', 'real_B1'] + self.atten_layers = [int(i) for i in self.opt.atten_layers.split(',')] + + + if self.isTrain: + self.model_names = ['G', 'D_ViT'] + else: # during test time, only load G + self.model_names = ['G'] + + + # define networks (both generator and discriminator) + self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.normG, not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, opt.no_antialias_up, self.gpu_ids, opt) + + + if self.isTrain: + + self.netD_ViT = networks.MLPDiscriminator().to(self.device) + self.netPreViT = timm.create_model("vit_base_patch16_384",pretrained=True).to(self.device) + + + self.norm = F.softmax + + self.resize = tfs.Resize(size=(384,384)) + + self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) + self.criterionNCE = [] + + for atten_layer in self.atten_layers: + self.criterionNCE.append(PatchNCELoss(opt).to(self.device)) + + self.criterionL1 = torch.nn.L1Loss().to(self.device) + self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) + self.optimizer_D_ViT = torch.optim.Adam(self.netD_ViT.parameters(), lr=opt.lr * opt.lambda_mlp, betas=(opt.beta1, opt.beta2)) + self.optimizers.append(self.optimizer_G) + self.optimizers.append(self.optimizer_D_ViT) + + def data_dependent_initialize(self, data): + """ + The feature network netF is defined in terms of the shape of the intermediate, extracted + features of the encoder portion of netG. Because of this, the weights of netF are + initialized at the first feedforward pass with some input images. + Please also see PatchSampleF.create_mlp(), which is called at the first forward() call. + """ + pass + + + def optimize_parameters(self): + # forward + self.forward() + + # update D + self.set_requires_grad(self.netD_ViT, True) + self.optimizer_D_ViT.zero_grad() + self.loss_D = self.compute_D_loss() + self.loss_D.backward() + self.optimizer_D_ViT.step() + + # update G + self.set_requires_grad(self.netD_ViT, False) + self.optimizer_G.zero_grad() + self.loss_G = self.compute_G_loss() + self.loss_G.backward() + self.optimizer_G.step() + + def set_input(self, input): + """Unpack input data from the dataloader and perform necessary pre-processing steps. + Parameters: + input (dict): include the data itself and its metadata information. + The option 'direction' can be used to swap domain A and domain B. + """ + AtoB = self.opt.direction == 'AtoB' + self.real_A0 = input['A0' if AtoB else 'B0'].to(self.device) + self.real_A1 = input['A1' if AtoB else 'B1'].to(self.device) + self.real_B0 = input['B0' if AtoB else 'A0'].to(self.device) + self.real_B1 = input['B1' if AtoB else 'A1'].to(self.device) + self.image_paths = input['A_paths' if AtoB else 'B_paths'] + + def forward(self): + """Run forward pass; called by both functions and .""" + + # ============ 第一步:对 real_A / real_A2 进行多步随机生成过程 ============ + tau = self.opt.tau + T = self.opt.num_timesteps + incs = np.array([0] + [1/(i+1) for i in range(T-1)]) + times = np.cumsum(incs) + times = times / times[-1] + times = 0.5 * times[-1] + 0.5 * times #[0.5,1] + times = np.concatenate([np.zeros(1), times]) + times = torch.tensor(times).float().cuda() + self.times = times + bs = self.mutil_real_A0_tokens.size(0) + time_idx = (torch.randint(T, size=[1]).cuda() * torch.ones(size=[1]).cuda()).long() + self.time_idx = time_idx + + with torch.no_grad(): + self.netG.eval() + # ============ 第二步:对 real_A / real_A2 进行多步随机生成过程 ============ + for t in range(self.time_idx.int().item() + 1): + # 计算增量 delta 与 inter/scale,用于每个时间步的插值等 + if t > 0: + delta = times[t] - times[t - 1] + denom = times[-1] - times[t - 1] + inter = (delta / denom).reshape(-1, 1, 1, 1) + scale = (delta * (1 - delta / denom)).reshape(-1, 1, 1, 1) + + # 对 Xt、Xt2 进行随机噪声更新 + Xt = self.mutil_real_A0_tokens if (t == 0) else (1 - inter) * Xt + inter * Xt_1.detach() + \ + (scale * tau).sqrt() * torch.randn_like(Xt).to(self.mutil_real_A0_tokens.device) + time_idx = (t * torch.ones(size=[self.mutil_real_A0_tokens.shape[0]]).to(self.mutil_real_A0_tokens.device)).long() + z = torch.randn(size=[self.mutil_real_A0_tokens.shape[0], 4 * self.opt.ngf]).to(self.mutil_real_A0_tokens.device) + self.time = times[time_idx] + Xt_1 = self.netG(Xt, self.time, z) + + Xt2 = self.mutil_real_A1_tokens if (t == 0) else (1 - inter) * Xt2 + inter * Xt_12.detach() + \ + (scale * tau).sqrt() * torch.randn_like(Xt2).to(self.mutil_real_A1_tokens.device) + time_idx = (t * torch.ones(size=[self.mutil_real_A1_tokens.shape[0]]).to(self.mutil_real_A1_tokens.device)).long() + z = torch.randn(size=[self.mutil_real_A1_tokens.shape[0], 4 * self.opt.ngf]).to(self.mutil_real_A1_tokens.device) + Xt_12 = self.netG(Xt2, self.time, z) + + # 保存去噪后的中间结果 (real_A_noisy 等),供下一步做拼接 + self.real_A_noisy = Xt.detach() + self.real_A_noisy2 = Xt2.detach() + # 保存noisy_map + self.noisy_map = self.real_A_noisy - self.real_A + + # ============ 第三步:拼接输入并执行网络推理 ============= + bs = self.mutil_real_A0_tokens.size(0) + z_in = torch.randn(size=[2 * bs, 4 * self.opt.ngf]).to(self.mutil_real_A0_tokens.device) + z_in2 = torch.randn(size=[bs, 4 * self.opt.ngf]).to(self.mutil_real_A1_tokens.device) + # 将 real_A, real_B 拼接 (如 nce_idt=True),并同样处理 real_A_noisy 与 XtB + self.real = self.mutil_real_A0_tokens + self.realt = self.real_A_noisy + + if self.opt.flip_equivariance: + self.flipped_for_equivariance = self.opt.isTrain and (np.random.random() < 0.5) + if self.flipped_for_equivariance: + self.real = torch.flip(self.real, [3]) + self.realt = torch.flip(self.realt, [3]) + + + self.fake_B0 = self.netG(self.real_A0) + self.fake_B1 = self.netG(self.real_A1) + + if self.opt.isTrain: + real_A0 = self.real_A0 + real_A1 = self.real_A1 + real_B0 = self.real_B0 + real_B1 = self.real_B1 + fake_B0 = self.fake_B0 + fake_B1 = self.fake_B1 + self.real_A0_resize = self.resize(real_A0) + self.real_A1_resize = self.resize(real_A1) + real_B0 = self.resize(real_B0) + real_B1 = self.resize(real_B1) + self.fake_B0_resize = self.resize(fake_B0) + self.fake_B1_resize = self.resize(fake_B1) + self.mutil_real_A0_tokens = self.netPreViT(self.real_A0_resize, self.atten_layers, get_tokens=True) + self.mutil_real_A1_tokens = self.netPreViT(self.real_A1_resize, self.atten_layers, get_tokens=True) + self.mutil_real_B0_tokens = self.netPreViT(real_B0, self.atten_layers, get_tokens=True) + self.mutil_real_B1_tokens = self.netPreViT(real_B1, self.atten_layers, get_tokens=True) + self.mutil_fake_B0_tokens = self.netPreViT(self.fake_B0_resize, self.atten_layers, get_tokens=True) + self.mutil_fake_B1_tokens = self.netPreViT(self.fake_B1_resize, self.atten_layers, get_tokens=True) + + def tokens_concat(self, origin_tokens, adjacent_size): + adj_size = adjacent_size + B, token_num, C = origin_tokens.shape[0], origin_tokens.shape[1], origin_tokens.shape[2] + S = int(math.sqrt(token_num)) + if S * S != token_num: + print('Error! Not a square!') + token_map = origin_tokens.clone().reshape(B,S,S,C) + cut_patch_list = [] + for i in range(0, S, adj_size): + for j in range(0, S, adj_size): + i_left = i + i_right = i + adj_size + 1 if i + adj_size <= S else S + 1 + j_left = j + j_right = j + adj_size if j + adj_size <= S else S + 1 + + cut_patch = token_map[:, i_left:i_right, j_left: j_right, :] + cut_patch= cut_patch.reshape(B,-1,C) + cut_patch = torch.mean(cut_patch, dim=1, keepdim=True) + cut_patch_list.append(cut_patch) + + + result = torch.cat(cut_patch_list,dim=1) + return result + + + def cat_results(self, origin_tokens, adj_size_list): + res_list = [origin_tokens] + for ad_s in adj_size_list: + cat_result = self.tokens_concat(origin_tokens, ad_s) + res_list.append(cat_result) + + result = torch.cat(res_list, dim=1) + + return result + + + + def compute_D_loss(self): + """Calculate GAN loss for the discriminator""" + + + lambda_D_ViT = self.opt.lambda_D_ViT + fake_B0_tokens = self.mutil_fake_B0_tokens[self.opt.which_D_layer].detach() + fake_B1_tokens = self.mutil_fake_B1_tokens[self.opt.which_D_layer].detach() + + real_B0_tokens = self.mutil_real_B0_tokens[self.opt.which_D_layer] + real_B1_tokens = self.mutil_real_B1_tokens[self.opt.which_D_layer] + + + fake_B0_tokens = self.cat_results(fake_B0_tokens, self.opt.adj_size_list) + fake_B1_tokens = self.cat_results(fake_B1_tokens, self.opt.adj_size_list) + + + + real_B0_tokens = self.cat_results(real_B0_tokens, self.opt.adj_size_list) + real_B1_tokens = self.cat_results(real_B1_tokens, self.opt.adj_size_list) + + pre_fake0_ViT = self.netD_ViT(fake_B0_tokens) + pre_fake1_ViT = self.netD_ViT(fake_B1_tokens) + + self.loss_D_fake_ViT = (self.criterionGAN(pre_fake0_ViT, False).mean() + self.criterionGAN(pre_fake1_ViT, False).mean()) * 0.5 * lambda_D_ViT + + pred_real0_ViT = self.netD_ViT(real_B0_tokens) + pred_real1_ViT = self.netD_ViT(real_B1_tokens) + self.loss_D_real_ViT = (self.criterionGAN(pred_real0_ViT, True).mean() + self.criterionGAN(pred_real1_ViT, True).mean()) * 0.5 * lambda_D_ViT + + self.loss_D_ViT = (self.loss_D_fake_ViT + self.loss_D_real_ViT) * 0.5 + + + return self.loss_D_ViT + + def compute_G_loss(self): + + if self.opt.lambda_GAN > 0.0: + + fake_B0_tokens = self.mutil_fake_B0_tokens[self.opt.which_D_layer] + fake_B1_tokens = self.mutil_fake_B1_tokens[self.opt.which_D_layer] + fake_B0_tokens = self.cat_results(fake_B0_tokens, self.opt.adj_size_list) + fake_B1_tokens = self.cat_results(fake_B1_tokens, self.opt.adj_size_list) + pred_fake0_ViT = self.netD_ViT(fake_B0_tokens) + pred_fake1_ViT = self.netD_ViT(fake_B1_tokens) + self.loss_G_GAN_ViT = (self.criterionGAN(pred_fake0_ViT, True) + self.criterionGAN(pred_fake1_ViT, True)) * 0.5 * self.opt.lambda_GAN + else: + self.loss_G_GAN_ViT = 0.0 + + if self.opt.lambda_global > 0.0 or self.opt.lambda_spatial > 0.0: + self.loss_global, self.loss_spatial = self.calculate_attention_loss() + else: + self.loss_global, self.loss_spatial = 0.0, 0.0 + + if self.opt.lambda_motion > 0.0: + self.loss_motion = 0.0 + for real_A0_tokens, real_A1_tokens, fake_B0_tokens, fake_B1_tokens in zip(self.mutil_real_A0_tokens, self.mutil_real_A1_tokens, self.mutil_fake_B0_tokens, self.mutil_fake_B1_tokens): + A0_B1 = real_A0_tokens.bmm(fake_B1_tokens.permute(0,2,1)) + B0_A1 = fake_B0_tokens.bmm(real_A1_tokens.permute(0,2,1)) + cos_dis_global = F.cosine_similarity(A0_B1, B0_A1, dim=-1) + self.loss_motion += self.criterionL1(torch.ones_like(cos_dis_global), cos_dis_global).mean() + else: + self.loss_motion = 0.0 + + self.loss_G = self.loss_G_GAN_ViT + self.loss_global + self.loss_spatial + self.loss_motion + return self.loss_G + + def calculate_attention_loss(self): + n_layers = len(self.atten_layers) + mutil_real_A0_tokens = self.mutil_real_A0_tokens + mutil_real_A1_tokens = self.mutil_real_A1_tokens + mutil_fake_B0_tokens = self.mutil_fake_B0_tokens + mutil_fake_B1_tokens = self.mutil_fake_B1_tokens + + + if self.opt.lambda_global > 0.0: + loss_global = self.calculate_similarity(mutil_real_A0_tokens, mutil_fake_B0_tokens) + self.calculate_similarity(mutil_real_A1_tokens, mutil_fake_B1_tokens) + loss_global *= 0.5 + + else: + loss_global = 0.0 + + if self.opt.lambda_spatial > 0.0: + loss_spatial = 0.0 + local_nums = self.opt.local_nums + tokens_cnt = 576 + local_id = np.random.permutation(tokens_cnt) + local_id = local_id[:int(min(local_nums, tokens_cnt))] + + mutil_real_A0_local_tokens = self.netPreViT(self.real_A0_resize, self.atten_layers, get_tokens=True, local_id=local_id, side_length = self.opt.side_length) + mutil_real_A1_local_tokens = self.netPreViT(self.real_A1_resize, self.atten_layers, get_tokens=True, local_id=local_id, side_length = self.opt.side_length) + + mutil_fake_B0_local_tokens = self.netPreViT(self.fake_B0_resize, self.atten_layers, get_tokens=True, local_id=local_id, side_length = self.opt.side_length) + mutil_fake_B1_local_tokens = self.netPreViT(self.fake_B1_resize, self.atten_layers, get_tokens=True, local_id=local_id, side_length = self.opt.side_length) + + loss_spatial = self.calculate_similarity(mutil_real_A0_local_tokens, mutil_fake_B0_local_tokens) + self.calculate_similarity(mutil_real_A1_local_tokens, mutil_fake_B1_local_tokens) + loss_spatial *= 0.5 + + else: + loss_spatial = 0.0 + + + + return loss_global * self.opt.lambda_global, loss_spatial * self.opt.lambda_spatial + + def calculate_similarity(self, mutil_src_tokens, mutil_tgt_tokens): + loss = 0.0 + n_layers = len(self.atten_layers) + + for src_tokens, tgt_tokens in zip(mutil_src_tokens, mutil_tgt_tokens): + + src_tgt = src_tokens.bmm(tgt_tokens.permute(0,2,1)) + tgt_src = tgt_tokens.bmm(src_tokens.permute(0,2,1)) + cos_dis_global = F.cosine_similarity(src_tgt, tgt_src, dim=-1) + loss += self.criterionL1(torch.ones_like(cos_dis_global), cos_dis_global).mean() + + loss = loss / n_layers + return loss + diff --git a/models/roma_single_model.py b/models/roma_single_model.py new file mode 100644 index 0000000..3c94d86 --- /dev/null +++ b/models/roma_single_model.py @@ -0,0 +1,272 @@ +import numpy as np +import torch +from .base_model import BaseModel +from . import networks +from .patchnce import PatchNCELoss +import util.util as util +import timm +import time +import torch.nn.functional as F +import sys +from functools import partial +import torch.nn as nn +import math + +from torchvision.transforms import transforms as tfs + +class ROMASingleModel(BaseModel): + + @staticmethod + def modify_commandline_options(parser, is_train=True): + """ Configures options specific for CUT model + """ + parser.add_argument('--adj_size_list', type=list, default=[2, 4, 6, 8, 12], help='different scales of perception field') + parser.add_argument('--lambda_mlp', type=float, default=1.0, help='weight of lr for discriminator') + parser.add_argument('--lambda_motion', type=float, default=1.0, help='weight for Temporal Consistency') + parser.add_argument('--lambda_D_ViT', type=float, default=1.0, help='weight for discriminator') + parser.add_argument('--lambda_GAN', type=float, default=1.0, help='weight for GAN loss: GAN(G(X))') + parser.add_argument('--lambda_global', type=float, default=1.0, help='weight for Global Structural Consistency') + parser.add_argument('--lambda_spatial', type=float, default=1.0, help='weight for Local Structural Consistency') + parser.add_argument('--atten_layers', type=str, default='1,3,5', help='compute Cross-Similarity on which layers') + parser.add_argument('--local_nums', type=int, default=256) + parser.add_argument('--which_D_layer', type=int, default=-1) + parser.add_argument('--side_length', type=int, default=7) + + parser.set_defaults(pool_size=0) + + opt, _ = parser.parse_known_args() + + return parser + + def __init__(self, opt): + BaseModel.__init__(self, opt) + + + self.loss_names = ['G_GAN_ViT', 'D_real_ViT', 'D_fake_ViT', 'global', 'spatial'] + self.visual_names = ['real_A', 'fake_B', 'real_B'] + self.atten_layers = [int(i) for i in self.opt.atten_layers.split(',')] + + + if self.isTrain: + self.model_names = ['G', 'D_ViT'] + else: # during test time, only load G + self.model_names = ['G'] + + + # define networks (both generator and discriminator) + self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.normG, not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, opt.no_antialias_up, self.gpu_ids, opt) + + + if self.isTrain: + + self.netD_ViT = networks.MLPDiscriminator().to(self.device) + # self.netPreViT = timm.create_model("vit_base_patch32_384",pretrained=True).to(self.device) + self.netPreViT = timm.create_model("vit_base_patch16_384",pretrained=True).to(self.device) + + + self.norm = F.softmax + + self.resize = tfs.Resize(size=(384,384)) + # self.resize = tfs.Resize(size=(224, 224)) + + # define loss functions + self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) + self.criterionNCE = [] + + for atten_layer in self.atten_layers: + self.criterionNCE.append(PatchNCELoss(opt).to(self.device)) + + self.criterionL1 = torch.nn.L1Loss().to(self.device) + self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) + self.optimizer_D_ViT = torch.optim.Adam(self.netD_ViT.parameters(), lr=opt.lr * opt.lambda_mlp, betas=(opt.beta1, opt.beta2)) + self.optimizers.append(self.optimizer_G) + self.optimizers.append(self.optimizer_D_ViT) + + def data_dependent_initialize(self, data): + """ + The feature network netF is defined in terms of the shape of the intermediate, extracted + features of the encoder portion of netG. Because of this, the weights of netF are + initialized at the first feedforward pass with some input images. + Please also see PatchSampleF.create_mlp(), which is called at the first forward() call. + """ + pass + + + def optimize_parameters(self): + # forward + self.forward() + + # update D + self.set_requires_grad(self.netD_ViT, True) + self.optimizer_D_ViT.zero_grad() + self.loss_D = self.compute_D_loss() + self.loss_D.backward() + self.optimizer_D_ViT.step() + + # update G + self.set_requires_grad(self.netD_ViT, False) + self.optimizer_G.zero_grad() + self.loss_G = self.compute_G_loss() + self.loss_G.backward() + self.optimizer_G.step() + + def set_input(self, input): + """Unpack input data from the dataloader and perform necessary pre-processing steps. + Parameters: + input (dict): include the data itself and its metadata information. + The option 'direction' can be used to swap domain A and domain B. + """ + AtoB = self.opt.direction == 'AtoB' + self.real_A = input['A' if AtoB else 'B'].to(self.device) + self.real_B = input['B' if AtoB else 'A'].to(self.device) + self.image_paths = input['A_paths' if AtoB else 'B_paths'] + + def forward(self): + """Run forward pass; called by both functions and .""" + self.fake_B = self.netG(self.real_A) + + if self.opt.isTrain: + real_A = self.real_A + real_B = self.real_B + fake_B = self.fake_B + self.real_A_resize = self.resize(real_A) + real_B = self.resize(real_B) + self.fake_B_resize = self.resize(fake_B) + self.mutil_real_A_tokens = self.netPreViT(self.real_A_resize, self.atten_layers, get_tokens=True) + self.mutil_real_B_tokens = self.netPreViT(real_B, self.atten_layers, get_tokens=True) + self.mutil_fake_B_tokens = self.netPreViT(self.fake_B_resize, self.atten_layers, get_tokens=True) + + def tokens_concat(self, origin_tokens, adjacent_size): + adj_size = adjacent_size + B, token_num, C = origin_tokens.shape[0], origin_tokens.shape[1], origin_tokens.shape[2] + S = int(math.sqrt(token_num)) + if S * S != token_num: + print('Error! Not a square!') + token_map = origin_tokens.clone().reshape(B,S,S,C) + cut_patch_list = [] + for i in range(0, S, adj_size): + for j in range(0, S, adj_size): + i_left = i + i_right = i + adj_size + 1 if i + adj_size <= S else S + 1 + j_left = j + j_right = j + adj_size if j + adj_size <= S else S + 1 + + cut_patch = token_map[:, i_left:i_right, j_left: j_right, :] + cut_patch= cut_patch.reshape(B,-1,C) + cut_patch = torch.mean(cut_patch, dim=1, keepdim=True) + cut_patch_list.append(cut_patch) + + + result = torch.cat(cut_patch_list,dim=1) + return result + + + def cat_results(self, origin_tokens, adj_size_list): + res_list = [origin_tokens] + for ad_s in adj_size_list: + cat_result = self.tokens_concat(origin_tokens, ad_s) + res_list.append(cat_result) + + result = torch.cat(res_list, dim=1) + + return result + + + + def compute_D_loss(self): + """Calculate GAN loss for the discriminator""" + + + lambda_D_ViT = self.opt.lambda_D_ViT + fake_B_tokens = self.mutil_fake_B_tokens[self.opt.which_D_layer].detach() + + real_B_tokens = self.mutil_real_B_tokens[self.opt.which_D_layer] + + + fake_B_tokens = self.cat_results(fake_B_tokens, self.opt.adj_size_list) + + real_B_tokens = self.cat_results(real_B_tokens, self.opt.adj_size_list) + + pre_fake_ViT = self.netD_ViT(fake_B_tokens) + + + self.loss_D_fake_ViT = self.criterionGAN(pre_fake_ViT, False).mean() * lambda_D_ViT + + pred_real_ViT = self.netD_ViT(real_B_tokens) + self.loss_D_real_ViT = self.criterionGAN(pred_real_ViT, True).mean() * lambda_D_ViT + + self.loss_D_ViT = (self.loss_D_fake_ViT + self.loss_D_real_ViT) * 0.5 + + + return self.loss_D_ViT + + def compute_G_loss(self): + + if self.opt.lambda_GAN > 0.0: + + fake_B_tokens = self.mutil_fake_B_tokens[self.opt.which_D_layer] + fake_B_tokens = self.cat_results(fake_B_tokens, self.opt.adj_size_list) + pred_fake_ViT = self.netD_ViT(fake_B_tokens) + self.loss_G_GAN_ViT = self.criterionGAN(pred_fake_ViT, True) * self.opt.lambda_GAN + else: + self.loss_G_GAN_ViT = 0.0 + + if self.opt.lambda_global > 0.0 or self.opt.lambda_spatial > 0.0: + self.loss_global, self.loss_spatial = self.calculate_attention_loss() + else: + self.loss_global, self.loss_spatial = 0.0, 0.0 + + + + self.loss_G = self.loss_G_GAN_ViT + self.loss_global + self.loss_spatial + return self.loss_G + + def calculate_attention_loss(self): + n_layers = len(self.atten_layers) + mutil_real_A_tokens = self.mutil_real_A_tokens + mutil_fake_B_tokens = self.mutil_fake_B_tokens + + + + if self.opt.lambda_global > 0.0: + loss_global = self.calculate_similarity(mutil_real_A_tokens, mutil_fake_B_tokens) + + + else: + loss_global = 0.0 + + if self.opt.lambda_spatial > 0.0: + loss_spatial = 0.0 + local_nums = self.opt.local_nums + tokens_cnt = 576 + local_id = np.random.permutation(tokens_cnt) + local_id = local_id[:int(min(local_nums, tokens_cnt))] + + mutil_real_A_local_tokens = self.netPreViT(self.real_A_resize, self.atten_layers, get_tokens=True, local_id=local_id, side_length = self.opt.side_length) + + mutil_fake_B_local_tokens = self.netPreViT(self.fake_B_resize, self.atten_layers, get_tokens=True, local_id=local_id, side_length = self.opt.side_length) + + loss_spatial = self.calculate_similarity(mutil_real_A_local_tokens, mutil_fake_B_local_tokens) + + + else: + loss_spatial = 0.0 + + + + return loss_global * self.opt.lambda_global, loss_spatial * self.opt.lambda_spatial + + def calculate_similarity(self, mutil_src_tokens, mutil_tgt_tokens): + loss = 0.0 + n_layers = len(self.atten_layers) + + for src_tokens, tgt_tokens in zip(mutil_src_tokens, mutil_tgt_tokens): + + src_tgt = src_tokens.bmm(tgt_tokens.permute(0,2,1)) + tgt_src = tgt_tokens.bmm(src_tokens.permute(0,2,1)) + cos_dis_global = F.cosine_similarity(src_tgt, tgt_src, dim=-1) + loss += self.criterionL1(torch.ones_like(cos_dis_global), cos_dis_global).mean() + + loss = loss / n_layers + return loss + diff --git a/models/self_build.py b/models/self_build.py new file mode 100644 index 0000000..1cb0f37 --- /dev/null +++ b/models/self_build.py @@ -0,0 +1,655 @@ +import numpy as np +import math +import timm +import torch +import torch.nn as nn +import torch.nn.functional as F +from torchvision.transforms import GaussianBlur +from .base_model import BaseModel +from . import networks +from .patchnce import PatchNCELoss +import util.util as util + +from torchvision.transforms import transforms as tfs + +def warp(image, flow): #warp操作 + """ + 基于光流的图像变形函数 + Args: + image: [B, C, H, W] 输入图像 + flow: [B, 2, H, W] 光流场(x/y方向位移) + Returns: + warped: [B, C, H, W] 变形后的图像 + """ + B, C, H, W = image.shape + # 生成网格坐标 + grid_x, grid_y = torch.meshgrid(torch.arange(W), torch.arange(H)) + grid = torch.stack((grid_x, grid_y), dim=0).float().to(image.device) # [2,H,W] + grid = grid.unsqueeze(0).repeat(B,1,1,1) # [B,2,H,W] + + # 应用光流位移(归一化到[-1,1]) + new_grid = grid + flow + new_grid[:,0,:,:] = 2.0 * new_grid[:,0,:,:] / (W-1) - 1.0 # x方向 + new_grid[:,1,:,:] = 2.0 * new_grid[:,1,:,:] / (H-1) - 1.0 # y方向 + new_grid = new_grid.permute(0,2,3,1) # [B,H,W,2] + + # 双线性插值 + return F.grid_sample(image, new_grid, align_corners=True) + +# 时序归一化损失计算 +def compute_ctn_loss(G, x, F_content): #公式10 + """ + 计算内容感知时序归一化损失 + Args: + G: 生成器 + x: 输入红外图像 [B,C,H,W] + F_content: 生成的光流场 [B,2,H,W] + """ + + # 生成可见光图像 + y_fake = G(x) # [B,3,H,W] + + # 对生成结果应用光流变形 + warped_fake = warp(y_fake, F_content) # [B,3,H,W] + + # 对输入应用相同光流后生成图像 + warped_x = warp(x, F_content) # [B,C,H,W] + y_fake_warped = G(warped_x) # [B,3,H,W] + + # 计算L2损失 + loss = F.mse_loss(warped_fake, y_fake_warped) + return loss + +class ContentAwareOptimization(nn.Module): + def __init__(self, lambda_inc=2.0, eta_ratio=0.4): + super().__init__() + self.lambda_inc = lambda_inc # 权重增强系数 + self.eta_ratio = eta_ratio # 选择内容区域的比例 + + def compute_cosine_similarity(self, gradients): + """ + 计算每个patch梯度与平均梯度的余弦相似度 + Args: + gradients: [B, N, D] 判别器输出的每个patch的梯度(N=w*h) + Returns: + cosine_sim: [B, N] 每个patch的余弦相似度 + """ + mean_grad = torch.mean(gradients, dim=1, keepdim=True) # [B, 1, D] + # 计算余弦相似度 + cosine_sim = F.cosine_similarity(gradients, mean_grad, dim=2) # [B, N] + return cosine_sim + + def generate_weight_map(self, gradients_real, gradients_fake): + """ + 生成内容感知权重图 + Args: + gradients_real: [B, N, D] 真实图像判别器梯度 + gradients_fake: [B, N, D] 生成图像判别器梯度 + Returns: + weight_real: [B, N] 真实图像权重图 + weight_fake: [B, N] 生成图像权重图 + """ + # 计算真实图像块的余弦相似度 + cosine_real = self.compute_cosine_similarity(gradients_real) # [B, N] 公式5 + # 计算生成图像块的余弦相似度 + cosine_fake = self.compute_cosine_similarity(gradients_fake) # [B, N] + + # 选择内容丰富的区域(余弦相似度最低的eta_ratio比例) + k = int(self.eta_ratio * cosine_real.shape[1]) + + # 对真实图像生成权重图 + _, real_indices = torch.topk(-cosine_real, k, dim=1) # 选择最不相似的区域 + weight_real = torch.ones_like(cosine_real) + for b in range(cosine_real.shape[0]): + weight_real[b, real_indices[b]] = self.lambda_inc / (1e-6 + torch.abs(cosine_real[b, real_indices[b]])) #公式6 + + # 对生成图像生成权重图(同理) + _, fake_indices = torch.topk(-cosine_fake, k, dim=1) + weight_fake = torch.ones_like(cosine_fake) + for b in range(cosine_fake.shape[0]): + weight_fake[b, fake_indices[b]] = self.lambda_inc / (1e-6 + torch.abs(cosine_fake[b, fake_indices[b]])) + + return weight_real, weight_fake + + def forward(self, D_real, D_fake, real_scores, fake_scores): + """ + 计算内容感知对抗损失 + Args: + D_real: 判别器对真实图像的特征输出 [B, C, H, W] + D_fake: 判别器对生成图像的特征输出 [B, C, H, W] + real_scores: 真实图像的判别器预测 [B, N] (N=H*W) + fake_scores: 生成图像的判别器预测 [B, N] + Returns: + loss_co_adv: 内容感知对抗损失 + """ + B, C, H, W = D_real.shape + N = H * W + + # 注册钩子获取梯度 + gradients_real = [] + gradients_fake = [] + + def hook_real(grad): + gradients_real.append(grad.detach().view(B, N, -1)) + + def hook_fake(grad): + gradients_fake.append(grad.detach().view(B, N, -1)) + + D_real.register_hook(hook_real) + D_fake.register_hook(hook_fake) + + # 计算原始对抗损失以触发梯度计算 + loss_real = torch.mean(torch.log(real_scores + 1e-8)) + loss_fake = torch.mean(torch.log(1 - fake_scores + 1e-8)) + # 添加与 D_real、D_fake 相关的 dummy 项,确保梯度传递 + loss_dummy = 1e-8 * (D_real.sum() + D_fake.sum()) + total_loss = loss_real + loss_fake + loss_dummy + total_loss.backward(retain_graph=True) + + # 获取梯度数据 + gradients_real = gradients_real[0] # [B, N, D] + gradients_fake = gradients_fake[0] # [B, N, D] + + # 生成权重图 + self.weight_real, self.weight_fake = self.generate_weight_map(gradients_real, gradients_fake) + + # 应用权重到对抗损失 + loss_co_real = torch.mean(self.weight_real * torch.log(real_scores + 1e-8)) + loss_co_fake = torch.mean(self.weight_fake * torch.log(1 - fake_scores + 1e-8)) + + # 计算并返回最终内容感知对抗损失 + loss_co_adv = -(loss_co_real + loss_co_fake) + + return loss_co_adv + +class ContentAwareTemporalNorm(nn.Module): + def __init__(self, gamma_stride=0.1, kernel_size=21, sigma=5.0): + super().__init__() + self.gamma_stride = gamma_stride # 控制整体运动幅度 + self.smoother = GaussianBlur(kernel_size, sigma=sigma) # 高斯平滑层 + + def forward(self, weight_map): + """ + 生成内容感知光流 + Args: + weight_map: [B, 1, H, W] 权重图(来自内容感知优化模块) + Returns: + F_content: [B, 2, H, W] 生成的光流场(x/y方向位移) + """ + B, _, H, W = weight_map.shape + + # 1. 归一化权重图 + # 保持区域相对强度,同时限制数值范围 + weight_norm = F.normalize(weight_map, p=1, dim=(2,3)) # L1归一化 [B,1,H,W] + + # 2. 生成高斯噪声(与光流场同尺寸) + z = torch.randn(B, 2, H, W, device=weight_map.device) # [B,2,H,W] + + # 3. 合成基础光流 + # 将权重图扩展为2通道(x/y方向共享权重) + weight_expanded = weight_norm.expand(-1, 2, -1, -1) # [B,2,H,W] + F_raw = self.gamma_stride * weight_expanded * z # [B,2,H,W] #公式9 + + # 4. 平滑处理(保持结构连续性) + # 对每个通道独立进行高斯模糊 + F_smooth = self.smoother(F_raw) # [B,2,H,W] + + # 5. 动态范围调整(可选) + # 限制光流幅值,避免极端位移 + F_content = torch.tanh(F_smooth) # 缩放到[-1,1]范围 + + return F_content + +class CTNxModel(BaseModel): + @staticmethod + def modify_commandline_options(parser, is_train=True): + """配置 CTNx 模型的特定选项""" + + parser.add_argument('--lambda_GAN', type=float, default=1.0, help='weight for GAN loss:GAN(G(X))') + parser.add_argument('--lambda_NCE', type=float, default=1.0, help='weight for NCE loss: NCE(G(X), X)') + parser.add_argument('--lambda_SB', type=float, default=0.1, help='weight for SB loss') + parser.add_argument('--lambda_ctn', type=float, default=1.0, help='weight for content-aware temporal norm') + + parser.add_argument('--nce_idt', type=util.str2bool, nargs='?', const=True, default=False, help='use NCE loss for identity mapping: NCE(G(Y), Y))') + parser.add_argument('--nce_layers', type=str, default='0,4,8,12,16', help='compute NCE loss on which layers') + parser.add_argument('--nce_includes_all_negatives_from_minibatch', + type=util.str2bool, nargs='?', const=True, default=False, + help='(used for single image translation) If True, include the negatives from the other samples of the minibatch when computing the contrastive loss. Please see models/patchnce.py for more details.') + + parser.add_argument('--netF', type=str, default='mlp_sample', choices=['sample', 'reshape', 'mlp_sample'], help='how to downsample the feature map') + parser.add_argument('--netF_nc', type=int, default=256) + parser.add_argument('--nce_T', type=float, default=0.07, help='temperature for NCE loss') + + parser.add_argument('--lmda_1', type=float, default=0.1) + parser.add_argument('--num_patches', type=int, default=256, help='number of patches per layer') + parser.add_argument('--flip_equivariance', + type=util.str2bool, nargs='?', const=True, default=False, + help="Enforce flip-equivariance as additional regularization. It's used by FastCUT, but not CUT") + + parser.add_argument('--lambda_inc', type=float, default=1.0, help='incremental weight for content-aware optimization') + parser.add_argument('--eta_ratio', type=float, default=0.1, help='ratio of content-rich regions') + + + parser.set_defaults(pool_size=0) # no image pooling + + opt, _ = parser.parse_known_args() + + # 直接设置为 sb 模式 + parser.set_defaults(nce_idt=True, lambda_NCE=1.0) + + return parser + + def __init__(self, opt): + """初始化 CTNx 模型""" + BaseModel.__init__(self, opt) + + # 指定需要打印的训练损失 + self.loss_names = ['G_GAN_1', 'D_real_1', 'D_fake_1', 'G_1', 'NCE_1', 'SB_1', + 'G_2'] + self.visual_names = ['real_A', 'real_A_noisy', 'fake_B', 'real_B'] + self.atten_layers = [int(i) for i in self.opt.atten_layers.split(',')] + + if self.opt.phase == 'test': + self.visual_names = ['real'] + for NFE in range(self.opt.num_timesteps): + fake_name = 'fake_' + str(NFE+1) + self.visual_names.append(fake_name) + self.nce_layers = [int(i) for i in self.opt.nce_layers.split(',')] + + if opt.nce_idt and self.isTrain: + self.loss_names += ['NCE_Y'] + self.visual_names += ['idt_B'] + + if self.isTrain: + self.model_names = ['G1', 'F1', 'D1', 'E1', + 'G2'] + + + else: + self.model_names = ['G1'] + + # 创建网络 + self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.normG, not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, opt.no_antialias_up, self.gpu_ids, opt) + + + if self.isTrain: + self.netD = networks.define_D(opt.output_nc, opt.ndf, opt.netD, opt.n_layers_D, opt.normD, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt) + self.netE = networks.define_D(opt.output_nc*4, opt.ndf, opt.netD, opt.n_layers_D, opt.normD, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt) + + self.resize = tfs.Resize(size=(384,384)) + + # 加入预训练VIT + self.netPreViT = timm.create_model("vit_base_patch16_384", pretrained=True).to(self.device) + + # 定义损失函数 + self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) + self.criterionNCE = [] + for nce_layer in self.nce_layers: + self.criterionNCE.append(PatchNCELoss(opt).to(self.device)) + self.criterionIdt = torch.nn.L1Loss().to(self.device) + self.optimizer_G1 = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) + self.optimizer_D1 = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) + self.optimizer_E1 = torch.optim.Adam(self.netE.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) + self.optimizers = [self.optimizer_G1, self.optimizer_D1, self.optimizer_E1] + + self.cao = ContentAwareOptimization(opt.lambda_inc, opt.eta_ratio) #损失函数 + self.ctn = ContentAwareTemporalNorm() #生成的伪光流 + + def data_dependent_initialize(self, data): + """ + The feature network netF is defined in terms of the shape of the intermediate, extracted + features of the encoder portion of netG. Because of this, the weights of netF are + initialized at the first feedforward pass with some input images. + Please also see PatchSampleF.create_mlp(), which is called at the first forward() call. + """ + #bs_per_gpu = data["A"].size(0) // max(len(self.opt.gpu_ids), 1) + #self.set_input(data) + #self.real_A = self.real_A[:bs_per_gpu] + #self.real_B = self.real_B[:bs_per_gpu] + #self.forward() # compute fake images: G(A) + #if self.opt.isTrain: + # + # self.compute_G_loss().backward() + # self.compute_D_loss().backward() + # self.compute_E_loss().backward() + # if self.opt.lambda_NCE > 0.0: + # self.optimizer_F = torch.optim.Adam(self.netF.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, self.opt.beta2)) + # self.optimizers.append(self.optimizer_F) + pass + + def optimize_parameters(self): + # forward + self.forward() + + self.netG.train() + self.netE.train() + self.netD.train() + + # update D + self.set_requires_grad(self.netD, True) + self.optimizer_D.zero_grad() + self.loss_D = self.compute_D_loss() + self.loss_D.backward() + self.optimizer_D.step() + + self.set_requires_grad(self.netE, True) + self.optimizer_E.zero_grad() + self.loss_E = self.compute_E_loss() + self.loss_E.backward() + self.optimizer_E.step() + + # update G + self.set_requires_grad(self.netD, False) + self.set_requires_grad(self.netE, False) + + self.optimizer_G.zero_grad() + + self.loss_G = self.compute_G_loss() + self.loss_G.backward() + self.optimizer_G.step() + + + def set_input(self, input): + """Unpack input data from the dataloader and perform necessary pre-processing steps. + Parameters: + input (dict): include the data itself and its metadata information. + The option 'direction' can be used to swap domain A and domain B. + """ + AtoB = self.opt.direction == 'AtoB' + self.real_A0 = input['A0' if AtoB else 'B0'].to(self.device) + self.real_A1 = input['A1' if AtoB else 'B1'].to(self.device) + self.real_B0 = input['B0' if AtoB else 'A0'].to(self.device) + self.real_B1 = input['B1' if AtoB else 'A1'].to(self.device) + self.image_paths = input['A_paths' if AtoB else 'B_paths'] + + + def tokens_concat(self, origin_tokens, adjacent_size): + adj_size = adjacent_size + B, token_num, C = origin_tokens.shape[0], origin_tokens.shape[1], origin_tokens.shape[2] + S = int(math.sqrt(token_num)) + if S * S != token_num: + print('Error! Not a square!') + token_map = origin_tokens.clone().reshape(B,S,S,C) + cut_patch_list = [] + for i in range(0, S, adj_size): + for j in range(0, S, adj_size): + i_left = i + i_right = i + adj_size + 1 if i + adj_size <= S else S + 1 + j_left = j + j_right = j + adj_size if j + adj_size <= S else S + 1 + + cut_patch = token_map[:, i_left:i_right, j_left: j_right, :] + cut_patch= cut_patch.reshape(B,-1,C) + cut_patch = torch.mean(cut_patch, dim=1, keepdim=True) + cut_patch_list.append(cut_patch) + + + result = torch.cat(cut_patch_list,dim=1) + return result + + def cat_results(self, origin_tokens, adj_size_list): + res_list = [origin_tokens] + for ad_s in adj_size_list: + cat_result = self.tokens_concat(origin_tokens, ad_s) + res_list.append(cat_result) + + result = torch.cat(res_list, dim=1) + + return result + + + + def forward(self): + """执行前向传递以生成输出图像""" + + if self.opt.isTrain: + real_A0 = self.resize(self.real_A0) + real_A1 = self.resize(self.real_A1) + real_B0 = self.resize(self.real_B0) + real_B1 = self.resize(self.real_B1) + # 使用VIT + self.mutil_real_A0_tokens = self.netPreViT(real_A0, self.atten_layers, get_tokens=True) + self.mutil_real_A1_tokens = self.netPreViT(real_A1, self.atten_layers, get_tokens=True) + + # 执行一次SB模块 + + # ============ 第一步:初始化时间步与时间索引 ============ + # 计算 times,并确定当前 time_idx(随机选取用来表示当前时间步) + tau = self.opt.tau + T = self.opt.num_timesteps + incs = np.array([0] + [1/(i+1) for i in range(T-1)]) + times = np.cumsum(incs) + times = times / times[-1] + times = 0.5 * times[-1] + 0.5 * times #[0.5,1] + times = np.concatenate([np.zeros(1), times]) + times = torch.tensor(times).float().cuda() + self.times = times + bs = self.mutil_real_A0_tokens.size(0) + time_idx = (torch.randint(T, size=[1]).cuda() * torch.ones(size=[1]).cuda()).long() + self.time_idx = time_idx + + with torch.no_grad(): + self.netG.eval() + # ============ 第二步:对 real_A / real_A2 进行多步随机生成过程 ============ + for t in range(self.time_idx.int().item() + 1): + # 计算增量 delta 与 inter/scale,用于每个时间步的插值等 + if t > 0: + delta = times[t] - times[t - 1] + denom = times[-1] - times[t - 1] + inter = (delta / denom).reshape(-1, 1, 1, 1) + scale = (delta * (1 - delta / denom)).reshape(-1, 1, 1, 1) + + # 对 Xt、Xt2 进行随机噪声更新 + Xt = self.mutil_real_A0_tokens if (t == 0) else (1 - inter) * Xt + inter * Xt_1.detach() + \ + (scale * tau).sqrt() * torch.randn_like(Xt).to(self.mutil_real_A0_tokens.device) + time_idx = (t * torch.ones(size=[self.mutil_real_A0_tokens.shape[0]]).to(self.mutil_real_A0_tokens.device)).long() + z = torch.randn(size=[self.mutil_real_A0_tokens.shape[0], 4 * self.opt.ngf]).to(self.mutil_real_A0_tokens.device) + self.time = times[time_idx] + Xt_1 = self.netG(Xt, self.time, z) + + Xt2 = self.mutil_real_A1_tokens if (t == 0) else (1 - inter) * Xt2 + inter * Xt_12.detach() + \ + (scale * tau).sqrt() * torch.randn_like(Xt2).to(self.mutil_real_A1_tokens.device) + time_idx = (t * torch.ones(size=[self.mutil_real_A1_tokens.shape[0]]).to(self.mutil_real_A1_tokens.device)).long() + z = torch.randn(size=[self.mutil_real_A1_tokens.shape[0], 4 * self.opt.ngf]).to(self.mutil_real_A1_tokens.device) + Xt_12 = self.netG(Xt2, self.time, z) + + # 保存去噪后的中间结果 (real_A_noisy 等),供下一步做拼接 + self.real_A_noisy = Xt.detach() + self.real_A_noisy2 = Xt2.detach() + # 保存noisy_map + self.noisy_map = self.real_A_noisy - self.real_A + + # ============ 第三步:拼接输入并执行网络推理 ============= + bs = self.mutil_real_A0_tokens.size(0) + z_in = torch.randn(size=[2 * bs, 4 * self.opt.ngf]).to(self.mutil_real_A0_tokens.device) + z_in2 = torch.randn(size=[bs, 4 * self.opt.ngf]).to(self.mutil_real_A1_tokens.device) + # 将 real_A, real_B 拼接 (如 nce_idt=True),并同样处理 real_A_noisy 与 XtB + self.real = self.mutil_real_A0_tokens + self.realt = self.real_A_noisy + + if self.opt.flip_equivariance: + self.flipped_for_equivariance = self.opt.isTrain and (np.random.random() < 0.5) + if self.flipped_for_equivariance: + self.real = torch.flip(self.real, [3]) + self.realt = torch.flip(self.realt, [3]) + + # 使用 netG 生成最终的 fake, fake_B2 等结果 + self.fake_B = self.netG(self.realt, self.time, z_in) + self.fake_B2 = self.netG(self.real, self.time, z_in2) + + self.fake_B = self.resize(self.fake_B) + self.fake_B2 = self.resize(self.fake_B2) + + self.fake_B0 = self.fake_B + self.fake_B1 = self.fake_B2 + + # 使用VIT + self.mutil_fake_B0_tokens = self.netPreViT(self.fake_B, self.atten_layers, get_tokens=True) + self.mutil_fake_B1_tokens = self.netPreViT(self.fake_B2, self.atten_layers, get_tokens=True) + + # ============ 第四步:推理模式下的多次采样 ============ + if self.opt.phase == 'test': + tau = self.opt.tau + T = self.opt.num_timesteps + incs = np.array([0] + [1/(i+1) for i in range(T-1)]) + times = np.cumsum(incs) + times = times / times[-1] + times = 0.5 * times[-1] + 0.5 * times + times = np.concatenate([np.zeros(1),times]) + times = torch.tensor(times).float().cuda() + self.times = times + bs = self.real.size(0) + time_idx = (torch.randint(T, size=[1]).cuda() * torch.ones(size=[1]).cuda()).long() + self.time_idx = time_idx + visuals = [] + with torch.no_grad(): + self.netG.eval() + for t in range(self.opt.num_timesteps): + + if t > 0: + delta = times[t] - times[t-1] + denom = times[-1] - times[t-1] + inter = (delta / denom).reshape(-1,1,1,1) + scale = (delta * (1 - delta / denom)).reshape(-1,1,1,1) + Xt = self.mutil_real_A0_tokens if (t == 0) else (1-inter) * Xt + inter * Xt_1.detach() + (scale * tau).sqrt() * torch.randn_like(Xt).to(self.mutil_real_A0_tokens.device) + time_idx = (t * torch.ones(size=[self.mutil_real_A0_tokens.shape[0]]).to(self.mutil_real_A0_tokens.device)).long() + time = times[time_idx] + z = torch.randn(size=[self.mutil_real_A0_tokens.shape[0], 4 * self.opt.ngf]).to(self.mutil_real_A0_tokens.device) + Xt_1 = self.netG(Xt, time_idx, z) + + setattr(self, "fake_"+str(t+1), Xt_1) + + if self.opt.phase == 'train': + # 真实图像的梯度 + real_gradient = torch.autograd.grad(self.real_B.sum(), self.real_B, create_graph=True)[0] + # 生成图像的梯度 + fake_gradient = torch.autograd.grad(self.fake_B.sum(), self.fake_B, create_graph=True)[0] + # 梯度图 + self.weight_real, self.weight_fake = self.cao.generate_weight_map(real_gradient, fake_gradient) + + # 生成图像的CTN光流图 + self.f_content = self.ctn(self.weight_fake) + + # 把前面生成后的图片再加上noisy_map + self.fake_B_2 = self.fake_B + self.noisy_map + + # 变换后的图片 + wapped_fake_B = warp(self.fake_B, self.f_content) + + # 经过第二次生成器 + self.fake_B_2 = self.netG(wapped_fake_B, self.time, z_in) + + def compute_D_loss(self): + """计算判别器的 GAN 损失""" + + fake = self.cat_results(self.fake_B.detach()) + pred_fake = self.netD(fake, self.time) + self.loss_D_fake = self.criterionGAN(pred_fake, False).mean() + + self.pred_real = self.netD(self.real_B0, self.time) + loss_D_real = self.criterionGAN(self.pred_real, True) + self.loss_D_real = loss_D_real.mean() + + self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5 + return self.loss_D + + def compute_E_loss(self): + """计算判别器 E 的损失""" + + XtXt_1 = torch.cat([self.real_A_noisy, self.fake_B.detach()], dim=1) + XtXt_2 = torch.cat([self.real_A_noisy2, self.fake_B2.detach()], dim=1) + temp = torch.logsumexp(self.netE(XtXt_1, self.time, XtXt_2).reshape(-1), dim=0).mean() + self.loss_E = -self.netE(XtXt_1, self.time, XtXt_1).mean() + temp + temp**2 + + return self.loss_E + + def compute_G_loss(self): + """计算生成器的 GAN 损失""" + + bs = self.mutil_real_A0_tokens.size(0) + tau = self.opt.tau + + fake = self.fake_B + std = torch.rand(size=[1]).item() * self.opt.std + + if self.opt.lambda_GAN > 0.0: + pred_fake = self.netD(fake, self.time) + self.loss_G_GAN = self.criterionGAN(pred_fake, True).mean() * self.opt.lambda_GAN + else: + self.loss_G_GAN = 0.0 + self.loss_SB = 0 + if self.opt.lambda_SB > 0.0: + XtXt_1 = torch.cat([self.real_A_noisy, self.fake_B], dim=1) + XtXt_2 = torch.cat([self.real_A_noisy2, self.fake_B2], dim=1) + + bs = self.opt.batch_size + + # eq.9 + ET_XY = self.netE(XtXt_1, self.time, XtXt_1).mean() - torch.logsumexp(self.netE(XtXt_1, self.time, XtXt_2).reshape(-1), dim=0) + self.loss_SB = -(self.opt.num_timesteps - self.time[0]) / self.opt.num_timesteps * self.opt.tau * ET_XY + self.loss_SB += self.opt.tau * torch.mean((self.real_A_noisy - self.fake_B) ** 2) + + if self.opt.lambda_global > 0.0: + loss_global = self.calculate_similarity(self.mutil_real_A0_tokens, self.mutil_fake_B0_tokens) + self.calculate_similarity(self.mutil_real_A1_tokens, self.mutil_fake_B1_tokens) + loss_global *= 0.5 + else: + loss_global = 0.0 + + if self.opt.lambda_ctn > 0.0: + wapped_fake_B = warp(self.fake_B, self.f_content) # use updated self.f_content + self.l2_loss = F.mse_loss(self.fake_B_2, wapped_fake_B) # complete the loss calculation + + self.loss_G = self.loss_G_GAN + self.opt.lambda_SB * self.loss_SB + self.opt.lambda_ctn * self.l2_loss + loss_global * self.opt.lambda_global + return self.loss_G + + def calculate_attention_loss(self): + n_layers = len(self.atten_layers) + mutil_real_A0_tokens = self.mutil_real_A0_tokens + mutil_real_A1_tokens = self.mutil_real_A1_tokens + mutil_fake_B0_tokens = self.mutil_fake_B0_tokens + mutil_fake_B1_tokens = self.mutil_fake_B1_tokens + + + if self.opt.lambda_global > 0.0: + loss_global = self.calculate_similarity(mutil_real_A0_tokens, mutil_fake_B0_tokens) + self.calculate_similarity(mutil_real_A1_tokens, mutil_fake_B1_tokens) + loss_global *= 0.5 + + else: + loss_global = 0.0 + + if self.opt.lambda_spatial > 0.0: + loss_spatial = 0.0 + local_nums = self.opt.local_nums + tokens_cnt = 576 + local_id = np.random.permutation(tokens_cnt) + local_id = local_id[:int(min(local_nums, tokens_cnt))] + + mutil_real_A0_local_tokens = self.netPreViT(self.resize(self.real_A0), self.atten_layers, get_tokens=True, local_id=local_id, side_length=self.opt.side_length) + mutil_real_A1_local_tokens = self.netPreViT(self.resize(self.real_A1), self.atten_layers, get_tokens=True, local_id=local_id, side_length=self.opt.side_length) + + mutil_fake_B0_local_tokens = self.netPreViT(self.resize(self.fake_B0), self.atten_layers, get_tokens=True, local_id=local_id, side_length=self.opt.side_length) + mutil_fake_B1_local_tokens = self.netPreViT(self.resize(self.fake_B1), self.atten_layers, get_tokens=True, local_id=local_id, side_length=self.opt.side_length) + + loss_spatial = self.calculate_similarity(mutil_real_A0_local_tokens, mutil_fake_B0_local_tokens) + self.calculate_similarity(mutil_real_A1_local_tokens, mutil_fake_B1_local_tokens) + loss_spatial *= 0.5 + + else: + loss_spatial = 0.0 + + return loss_global * self.opt.lambda_global, loss_spatial * self.opt.lambda_spatial + + def calculate_similarity(self, mutil_src_tokens, mutil_tgt_tokens): + loss = 0.0 + n_layers = len(self.atten_layers) + + for src_tokens, tgt_tokens in zip(mutil_src_tokens, mutil_tgt_tokens): + src_tgt = src_tokens.bmm(tgt_tokens.permute(0,2,1)) + tgt_src = tgt_tokens.bmm(src_tokens.permute(0,2,1)) + cos_dis_global = F.cosine_similarity(src_tgt, tgt_src, dim=-1) + loss += self.criterionL1(torch.ones_like(cos_dis_global), cos_dis_global).mean() + + loss = loss / n_layers + return loss + + + \ No newline at end of file diff --git a/models/stylegan_networks.py b/models/stylegan_networks.py new file mode 100644 index 0000000..a3c625d --- /dev/null +++ b/models/stylegan_networks.py @@ -0,0 +1,914 @@ +""" +The network architectures is based on PyTorch implemenation of StyleGAN2Encoder. +Original PyTorch repo: https://github.com/rosinality/style-based-gan-pytorch +Origianl StyelGAN2 paper: https://github.com/NVlabs/stylegan2 +We use the network architeture for our single-image traning setting. +""" + +import math +import numpy as np +import random + +import torch +from torch import nn +from torch.nn import functional as F + + +def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): + return F.leaky_relu(input + bias, negative_slope) * scale + + +class FusedLeakyReLU(nn.Module): + def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5): + super().__init__() + self.bias = nn.Parameter(torch.zeros(1, channel, 1, 1)) + self.negative_slope = negative_slope + self.scale = scale + + def forward(self, input): + # print("FusedLeakyReLU: ", input.abs().mean()) + out = fused_leaky_relu(input, self.bias, + self.negative_slope, + self.scale) + # print("FusedLeakyReLU: ", out.abs().mean()) + return out + + +def upfirdn2d_native( + input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1 +): + _, minor, in_h, in_w = input.shape + kernel_h, kernel_w = kernel.shape + + out = input.view(-1, minor, in_h, 1, in_w, 1) + out = F.pad(out, [0, up_x - 1, 0, 0, 0, up_y - 1, 0, 0]) + out = out.view(-1, minor, in_h * up_y, in_w * up_x) + + out = F.pad( + out, [max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)] + ) + out = out[ + :, + :, + max(-pad_y0, 0): out.shape[2] - max(-pad_y1, 0), + max(-pad_x0, 0): out.shape[3] - max(-pad_x1, 0), + ] + + # out = out.permute(0, 3, 1, 2) + out = out.reshape( + [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1] + ) + w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) + out = F.conv2d(out, w) + out = out.reshape( + -1, + minor, + in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, + in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, + ) + # out = out.permute(0, 2, 3, 1) + + return out[:, :, ::down_y, ::down_x] + + +def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): + return upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]) + + +class PixelNorm(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, input): + return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8) + + +def make_kernel(k): + k = torch.tensor(k, dtype=torch.float32) + + if len(k.shape) == 1: + k = k[None, :] * k[:, None] + + k /= k.sum() + + return k + + +class Upsample(nn.Module): + def __init__(self, kernel, factor=2): + super().__init__() + + self.factor = factor + kernel = make_kernel(kernel) * (factor ** 2) + self.register_buffer('kernel', kernel) + + p = kernel.shape[0] - factor + + pad0 = (p + 1) // 2 + factor - 1 + pad1 = p // 2 + + self.pad = (pad0, pad1) + + def forward(self, input): + out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad) + + return out + + +class Downsample(nn.Module): + def __init__(self, kernel, factor=2): + super().__init__() + + self.factor = factor + kernel = make_kernel(kernel) + self.register_buffer('kernel', kernel) + + p = kernel.shape[0] - factor + + pad0 = (p + 1) // 2 + pad1 = p // 2 + + self.pad = (pad0, pad1) + + def forward(self, input): + out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad) + + return out + + +class Blur(nn.Module): + def __init__(self, kernel, pad, upsample_factor=1): + super().__init__() + + kernel = make_kernel(kernel) + + if upsample_factor > 1: + kernel = kernel * (upsample_factor ** 2) + + self.register_buffer('kernel', kernel) + + self.pad = pad + + def forward(self, input): + out = upfirdn2d(input, self.kernel, pad=self.pad) + + return out + + +class EqualConv2d(nn.Module): + def __init__( + self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True + ): + super().__init__() + + self.weight = nn.Parameter( + torch.randn(out_channel, in_channel, kernel_size, kernel_size) + ) + self.scale = math.sqrt(1) / math.sqrt(in_channel * (kernel_size ** 2)) + + self.stride = stride + self.padding = padding + + if bias: + self.bias = nn.Parameter(torch.zeros(out_channel)) + + else: + self.bias = None + + def forward(self, input): + # print("Before EqualConv2d: ", input.abs().mean()) + out = F.conv2d( + input, + self.weight * self.scale, + bias=self.bias, + stride=self.stride, + padding=self.padding, + ) + # print("After EqualConv2d: ", out.abs().mean(), (self.weight * self.scale).abs().mean()) + + return out + + def __repr__(self): + return ( + f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},' + f' {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})' + ) + + +class EqualLinear(nn.Module): + def __init__( + self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None + ): + super().__init__() + + self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) + + if bias: + self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) + + else: + self.bias = None + + self.activation = activation + + self.scale = (math.sqrt(1) / math.sqrt(in_dim)) * lr_mul + self.lr_mul = lr_mul + + def forward(self, input): + if self.activation: + out = F.linear(input, self.weight * self.scale) + out = fused_leaky_relu(out, self.bias * self.lr_mul) + + else: + out = F.linear( + input, self.weight * self.scale, bias=self.bias * self.lr_mul + ) + + return out + + def __repr__(self): + return ( + f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' + ) + + +class ScaledLeakyReLU(nn.Module): + def __init__(self, negative_slope=0.2): + super().__init__() + + self.negative_slope = negative_slope + + def forward(self, input): + out = F.leaky_relu(input, negative_slope=self.negative_slope) + + return out * math.sqrt(2) + + +class ModulatedConv2d(nn.Module): + def __init__( + self, + in_channel, + out_channel, + kernel_size, + style_dim, + demodulate=True, + upsample=False, + downsample=False, + blur_kernel=[1, 3, 3, 1], + ): + super().__init__() + + self.eps = 1e-8 + self.kernel_size = kernel_size + self.in_channel = in_channel + self.out_channel = out_channel + self.upsample = upsample + self.downsample = downsample + + if upsample: + factor = 2 + p = (len(blur_kernel) - factor) - (kernel_size - 1) + pad0 = (p + 1) // 2 + factor - 1 + pad1 = p // 2 + 1 + + self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor) + + if downsample: + factor = 2 + p = (len(blur_kernel) - factor) + (kernel_size - 1) + pad0 = (p + 1) // 2 + pad1 = p // 2 + + self.blur = Blur(blur_kernel, pad=(pad0, pad1)) + + fan_in = in_channel * kernel_size ** 2 + self.scale = math.sqrt(1) / math.sqrt(fan_in) + self.padding = kernel_size // 2 + + self.weight = nn.Parameter( + torch.randn(1, out_channel, in_channel, kernel_size, kernel_size) + ) + + if style_dim is not None and style_dim > 0: + self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) + + self.demodulate = demodulate + + def __repr__(self): + return ( + f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, ' + f'upsample={self.upsample}, downsample={self.downsample})' + ) + + def forward(self, input, style): + batch, in_channel, height, width = input.shape + + if style is not None: + style = self.modulation(style).view(batch, 1, in_channel, 1, 1) + else: + style = torch.ones(batch, 1, in_channel, 1, 1).cuda() + weight = self.scale * self.weight * style + + if self.demodulate: + demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8) + weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) + + weight = weight.view( + batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size + ) + + if self.upsample: + input = input.view(1, batch * in_channel, height, width) + weight = weight.view( + batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size + ) + weight = weight.transpose(1, 2).reshape( + batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size + ) + out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) + _, _, height, width = out.shape + out = out.view(batch, self.out_channel, height, width) + out = self.blur(out) + + elif self.downsample: + input = self.blur(input) + _, _, height, width = input.shape + input = input.view(1, batch * in_channel, height, width) + out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) + _, _, height, width = out.shape + out = out.view(batch, self.out_channel, height, width) + + else: + input = input.view(1, batch * in_channel, height, width) + out = F.conv2d(input, weight, padding=self.padding, groups=batch) + _, _, height, width = out.shape + out = out.view(batch, self.out_channel, height, width) + + return out + + +class NoiseInjection(nn.Module): + def __init__(self): + super().__init__() + + self.weight = nn.Parameter(torch.zeros(1)) + + def forward(self, image, noise=None): + if noise is None: + batch, _, height, width = image.shape + noise = image.new_empty(batch, 1, height, width).normal_() + + return image + self.weight * noise + + +class ConstantInput(nn.Module): + def __init__(self, channel, size=4): + super().__init__() + + self.input = nn.Parameter(torch.randn(1, channel, size, size)) + + def forward(self, input): + batch = input.shape[0] + out = self.input.repeat(batch, 1, 1, 1) + + return out + + +class StyledConv(nn.Module): + def __init__( + self, + in_channel, + out_channel, + kernel_size, + style_dim=None, + upsample=False, + blur_kernel=[1, 3, 3, 1], + demodulate=True, + inject_noise=True, + ): + super().__init__() + + self.inject_noise = inject_noise + self.conv = ModulatedConv2d( + in_channel, + out_channel, + kernel_size, + style_dim, + upsample=upsample, + blur_kernel=blur_kernel, + demodulate=demodulate, + ) + + self.noise = NoiseInjection() + # self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1)) + # self.activate = ScaledLeakyReLU(0.2) + self.activate = FusedLeakyReLU(out_channel) + + def forward(self, input, style=None, noise=None): + out = self.conv(input, style) + if self.inject_noise: + out = self.noise(out, noise=noise) + # out = out + self.bias + out = self.activate(out) + + return out + + +class ToRGB(nn.Module): + def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): + super().__init__() + + if upsample: + self.upsample = Upsample(blur_kernel) + + self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False) + self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) + + def forward(self, input, style, skip=None): + out = self.conv(input, style) + out = out + self.bias + + if skip is not None: + skip = self.upsample(skip) + + out = out + skip + + return out + + +class Generator(nn.Module): + def __init__( + self, + size, + style_dim, + n_mlp, + channel_multiplier=2, + blur_kernel=[1, 3, 3, 1], + lr_mlp=0.01, + ): + super().__init__() + + self.size = size + + self.style_dim = style_dim + + layers = [PixelNorm()] + + for i in range(n_mlp): + layers.append( + EqualLinear( + style_dim, style_dim, lr_mul=lr_mlp, activation='fused_lrelu' + ) + ) + + self.style = nn.Sequential(*layers) + + self.channels = { + 4: 512, + 8: 512, + 16: 512, + 32: 512, + 64: 256 * channel_multiplier, + 128: 128 * channel_multiplier, + 256: 64 * channel_multiplier, + 512: 32 * channel_multiplier, + 1024: 16 * channel_multiplier, + } + + self.input = ConstantInput(self.channels[4]) + self.conv1 = StyledConv( + self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel + ) + self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False) + + self.log_size = int(math.log(size, 2)) + self.num_layers = (self.log_size - 2) * 2 + 1 + + self.convs = nn.ModuleList() + self.upsamples = nn.ModuleList() + self.to_rgbs = nn.ModuleList() + self.noises = nn.Module() + + in_channel = self.channels[4] + + for layer_idx in range(self.num_layers): + res = (layer_idx + 5) // 2 + shape = [1, 1, 2 ** res, 2 ** res] + self.noises.register_buffer(f'noise_{layer_idx}', torch.randn(*shape)) + + for i in range(3, self.log_size + 1): + out_channel = self.channels[2 ** i] + + self.convs.append( + StyledConv( + in_channel, + out_channel, + 3, + style_dim, + upsample=True, + blur_kernel=blur_kernel, + ) + ) + + self.convs.append( + StyledConv( + out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel + ) + ) + + self.to_rgbs.append(ToRGB(out_channel, style_dim)) + + in_channel = out_channel + + self.n_latent = self.log_size * 2 - 2 + + def make_noise(self): + device = self.input.input.device + + noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)] + + for i in range(3, self.log_size + 1): + for _ in range(2): + noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device)) + + return noises + + def mean_latent(self, n_latent): + latent_in = torch.randn( + n_latent, self.style_dim, device=self.input.input.device + ) + latent = self.style(latent_in).mean(0, keepdim=True) + + return latent + + def get_latent(self, input): + return self.style(input) + + def forward( + self, + styles, + return_latents=False, + inject_index=None, + truncation=1, + truncation_latent=None, + input_is_latent=False, + noise=None, + randomize_noise=True, + ): + if not input_is_latent: + styles = [self.style(s) for s in styles] + + if noise is None: + if randomize_noise: + noise = [None] * self.num_layers + else: + noise = [ + getattr(self.noises, f'noise_{i}') for i in range(self.num_layers) + ] + + if truncation < 1: + style_t = [] + + for style in styles: + style_t.append( + truncation_latent + truncation * (style - truncation_latent) + ) + + styles = style_t + + if len(styles) < 2: + inject_index = self.n_latent + + if len(styles[0].shape) < 3: + latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) + + else: + latent = styles[0] + + else: + if inject_index is None: + inject_index = random.randint(1, self.n_latent - 1) + + latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) + latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1) + + latent = torch.cat([latent, latent2], 1) + + out = self.input(latent) + out = self.conv1(out, latent[:, 0], noise=noise[0]) + + skip = self.to_rgb1(out, latent[:, 1]) + + i = 1 + for conv1, conv2, noise1, noise2, to_rgb in zip( + self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs + ): + out = conv1(out, latent[:, i], noise=noise1) + out = conv2(out, latent[:, i + 1], noise=noise2) + skip = to_rgb(out, latent[:, i + 2], skip) + + i += 2 + + image = skip + + if return_latents: + return image, latent + + else: + return image, None + + +class ConvLayer(nn.Sequential): + def __init__( + self, + in_channel, + out_channel, + kernel_size, + downsample=False, + blur_kernel=[1, 3, 3, 1], + bias=True, + activate=True, + ): + layers = [] + + if downsample: + factor = 2 + p = (len(blur_kernel) - factor) + (kernel_size - 1) + pad0 = (p + 1) // 2 + pad1 = p // 2 + + layers.append(Blur(blur_kernel, pad=(pad0, pad1))) + + stride = 2 + self.padding = 0 + + else: + stride = 1 + self.padding = kernel_size // 2 + + layers.append( + EqualConv2d( + in_channel, + out_channel, + kernel_size, + padding=self.padding, + stride=stride, + bias=bias and not activate, + ) + ) + + if activate: + if bias: + layers.append(FusedLeakyReLU(out_channel)) + + else: + layers.append(ScaledLeakyReLU(0.2)) + + super().__init__(*layers) + + +class ResBlock(nn.Module): + def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1], downsample=True, skip_gain=1.0): + super().__init__() + + self.skip_gain = skip_gain + self.conv1 = ConvLayer(in_channel, in_channel, 3) + self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=downsample, blur_kernel=blur_kernel) + + if in_channel != out_channel or downsample: + self.skip = ConvLayer( + in_channel, out_channel, 1, downsample=downsample, activate=False, bias=False + ) + else: + self.skip = nn.Identity() + + def forward(self, input): + out = self.conv1(input) + out = self.conv2(out) + + skip = self.skip(input) + out = (out * self.skip_gain + skip) / math.sqrt(self.skip_gain ** 2 + 1.0) + + return out + + +class StyleGAN2Discriminator(nn.Module): + def __init__(self, input_nc, ndf=64, n_layers=3, no_antialias=False, size=None, opt=None): + super().__init__() + self.opt = opt + self.stddev_group = 16 + if size is None: + size = 2 ** int((np.rint(np.log2(min(opt.load_size, opt.crop_size))))) + if "patch" in self.opt.netD and self.opt.D_patch_size is not None: + size = 2 ** int(np.log2(self.opt.D_patch_size)) + + blur_kernel = [1, 3, 3, 1] + channel_multiplier = ndf / 64 + channels = { + 4: min(384, int(4096 * channel_multiplier)), + 8: min(384, int(2048 * channel_multiplier)), + 16: min(384, int(1024 * channel_multiplier)), + 32: min(384, int(512 * channel_multiplier)), + 64: int(256 * channel_multiplier), + 128: int(128 * channel_multiplier), + 256: int(64 * channel_multiplier), + 512: int(32 * channel_multiplier), + 1024: int(16 * channel_multiplier), + } + + convs = [ConvLayer(3, channels[size], 1)] + + log_size = int(math.log(size, 2)) + + in_channel = channels[size] + + if "smallpatch" in self.opt.netD: + final_res_log2 = 4 + elif "patch" in self.opt.netD: + final_res_log2 = 3 + else: + final_res_log2 = 2 + + for i in range(log_size, final_res_log2, -1): + out_channel = channels[2 ** (i - 1)] + + convs.append(ResBlock(in_channel, out_channel, blur_kernel)) + + in_channel = out_channel + + self.convs = nn.Sequential(*convs) + + if False and "tile" in self.opt.netD: + in_channel += 1 + self.final_conv = ConvLayer(in_channel, channels[4], 3) + if "patch" in self.opt.netD: + self.final_linear = ConvLayer(channels[4], 1, 3, bias=False, activate=False) + else: + self.final_linear = nn.Sequential( + EqualLinear(channels[4] * 4 * 4, channels[4], activation='fused_lrelu'), + EqualLinear(channels[4], 1), + ) + + def forward(self, input, get_minibatch_features=False): + if "patch" in self.opt.netD and self.opt.D_patch_size is not None: + h, w = input.size(2), input.size(3) + y = torch.randint(h - self.opt.D_patch_size, ()) + x = torch.randint(w - self.opt.D_patch_size, ()) + input = input[:, :, y:y + self.opt.D_patch_size, x:x + self.opt.D_patch_size] + out = input + for i, conv in enumerate(self.convs): + out = conv(out) + # print(i, out.abs().mean()) + # out = self.convs(input) + + batch, channel, height, width = out.shape + + if False and "tile" in self.opt.netD: + group = min(batch, self.stddev_group) + stddev = out.view( + group, -1, 1, channel // 1, height, width + ) + stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8) + stddev = stddev.mean([2, 3, 4], keepdim=True).squeeze(2) + stddev = stddev.repeat(group, 1, height, width) + out = torch.cat([out, stddev], 1) + + out = self.final_conv(out) + # print(out.abs().mean()) + + if "patch" not in self.opt.netD: + out = out.view(batch, -1) + out = self.final_linear(out) + + return out + + +class TileStyleGAN2Discriminator(StyleGAN2Discriminator): + def forward(self, input): + B, C, H, W = input.size(0), input.size(1), input.size(2), input.size(3) + size = self.opt.D_patch_size + Y = H // size + X = W // size + input = input.view(B, C, Y, size, X, size) + input = input.permute(0, 2, 4, 1, 3, 5).contiguous().view(B * Y * X, C, size, size) + return super().forward(input) + + +class StyleGAN2Encoder(nn.Module): + def __init__(self, input_nc, output_nc, ngf=64, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, opt=None): + super().__init__() + assert opt is not None + self.opt = opt + channel_multiplier = ngf / 32 + channels = { + 4: min(512, int(round(4096 * channel_multiplier))), + 8: min(512, int(round(2048 * channel_multiplier))), + 16: min(512, int(round(1024 * channel_multiplier))), + 32: min(512, int(round(512 * channel_multiplier))), + 64: int(round(256 * channel_multiplier)), + 128: int(round(128 * channel_multiplier)), + 256: int(round(64 * channel_multiplier)), + 512: int(round(32 * channel_multiplier)), + 1024: int(round(16 * channel_multiplier)), + } + + blur_kernel = [1, 3, 3, 1] + + cur_res = 2 ** int((np.rint(np.log2(min(opt.load_size, opt.crop_size))))) + convs = [nn.Identity(), + ConvLayer(3, channels[cur_res], 1)] + + num_downsampling = self.opt.stylegan2_G_num_downsampling + for i in range(num_downsampling): + in_channel = channels[cur_res] + out_channel = channels[cur_res // 2] + convs.append(ResBlock(in_channel, out_channel, blur_kernel, downsample=True)) + cur_res = cur_res // 2 + + for i in range(n_blocks // 2): + n_channel = channels[cur_res] + convs.append(ResBlock(n_channel, n_channel, downsample=False)) + + self.convs = nn.Sequential(*convs) + + def forward(self, input, layers=[], get_features=False): + feat = input + feats = [] + if -1 in layers: + layers.append(len(self.convs) - 1) + for layer_id, layer in enumerate(self.convs): + feat = layer(feat) + # print(layer_id, " features ", feat.abs().mean()) + if layer_id in layers: + feats.append(feat) + + if get_features: + return feat, feats + else: + return feat + + +class StyleGAN2Decoder(nn.Module): + def __init__(self, input_nc, output_nc, ngf=64, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, opt=None): + super().__init__() + assert opt is not None + self.opt = opt + + blur_kernel = [1, 3, 3, 1] + + channel_multiplier = ngf / 32 + channels = { + 4: min(512, int(round(4096 * channel_multiplier))), + 8: min(512, int(round(2048 * channel_multiplier))), + 16: min(512, int(round(1024 * channel_multiplier))), + 32: min(512, int(round(512 * channel_multiplier))), + 64: int(round(256 * channel_multiplier)), + 128: int(round(128 * channel_multiplier)), + 256: int(round(64 * channel_multiplier)), + 512: int(round(32 * channel_multiplier)), + 1024: int(round(16 * channel_multiplier)), + } + + num_downsampling = self.opt.stylegan2_G_num_downsampling + cur_res = 2 ** int((np.rint(np.log2(min(opt.load_size, opt.crop_size))))) // (2 ** num_downsampling) + convs = [] + + for i in range(n_blocks // 2): + n_channel = channels[cur_res] + convs.append(ResBlock(n_channel, n_channel, downsample=False)) + + for i in range(num_downsampling): + in_channel = channels[cur_res] + out_channel = channels[cur_res * 2] + inject_noise = "small" not in self.opt.netG + convs.append( + StyledConv(in_channel, out_channel, 3, upsample=True, blur_kernel=blur_kernel, inject_noise=inject_noise) + ) + cur_res = cur_res * 2 + + convs.append(ConvLayer(channels[cur_res], 3, 1)) + + self.convs = nn.Sequential(*convs) + + def forward(self, input): + return self.convs(input) + + +class StyleGAN2Generator(nn.Module): + def __init__(self, input_nc, output_nc, ngf=64, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, opt=None): + super().__init__() + self.opt = opt + self.encoder = StyleGAN2Encoder(input_nc, output_nc, ngf, use_dropout, n_blocks, padding_type, no_antialias, opt) + self.decoder = StyleGAN2Decoder(input_nc, output_nc, ngf, use_dropout, n_blocks, padding_type, no_antialias, opt) + + def forward(self, input, layers=[], encode_only=False): + feat, feats = self.encoder(input, layers, True) + if encode_only: + return feats + else: + fake = self.decoder(feat) + + if len(layers) > 0: + return fake, feats + else: + return fake diff --git a/models/template_model.py b/models/template_model.py new file mode 100644 index 0000000..68cdaf6 --- /dev/null +++ b/models/template_model.py @@ -0,0 +1,99 @@ +"""Model class template + +This module provides a template for users to implement custom models. +You can specify '--model template' to use this model. +The class name should be consistent with both the filename and its model option. +The filename should be _dataset.py +The class name should be Dataset.py +It implements a simple image-to-image translation baseline based on regression loss. +Given input-output pairs (data_A, data_B), it learns a network netG that can minimize the following L1 loss: + min_ ||netG(data_A) - data_B||_1 +You need to implement the following functions: + : Add model-specific options and rewrite default values for existing options. + <__init__>: Initialize this model class. + : Unpack input data and perform data pre-processing. + : Run forward pass. This will be called by both and . + : Update network weights; it will be called in every training iteration. +""" +import torch +from .base_model import BaseModel +from . import networks + + +class TemplateModel(BaseModel): + @staticmethod + def modify_commandline_options(parser, is_train=True): + """Add new model-specific options and rewrite default values for existing options. + + Parameters: + parser -- the option parser + is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options. + + Returns: + the modified parser. + """ + parser.set_defaults(dataset_mode='aligned') # You can rewrite default values for this model. For example, this model usually uses aligned dataset as its dataset. + if is_train: + parser.add_argument('--lambda_regression', type=float, default=1.0, help='weight for the regression loss') # You can define new arguments for this model. + + return parser + + def __init__(self, opt): + """Initialize this model class. + + Parameters: + opt -- training/test options + + A few things can be done here. + - (required) call the initialization function of BaseModel + - define loss function, visualization images, model names, and optimizers + """ + BaseModel.__init__(self, opt) # call the initialization method of BaseModel + # specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk. + self.loss_names = ['loss_G'] + # specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images. + self.visual_names = ['data_A', 'data_B', 'output'] + # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks. + # you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them. + self.model_names = ['G'] + # define networks; you can use opt.isTrain to specify different behaviors for training and test. + self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, gpu_ids=self.gpu_ids) + if self.isTrain: # only defined during training time + # define your loss functions. You can use losses provided by torch.nn such as torch.nn.L1Loss. + # We also provide a GANLoss class "networks.GANLoss". self.criterionGAN = networks.GANLoss().to(self.device) + self.criterionLoss = torch.nn.L1Loss() + # define and initialize optimizers. You can define one optimizer for each network. + # If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example. + self.optimizer = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) + self.optimizers = [self.optimizer] + + # Our program will automatically call to define schedulers, load networks, and print networks + + def set_input(self, input): + """Unpack input data from the dataloader and perform necessary pre-processing steps. + + Parameters: + input: a dictionary that contains the data itself and its metadata information. + """ + AtoB = self.opt.direction == 'AtoB' # use to swap data_A and data_B + self.data_A = input['A' if AtoB else 'B'].to(self.device) # get image data A + self.data_B = input['B' if AtoB else 'A'].to(self.device) # get image data B + self.image_paths = input['A_paths' if AtoB else 'B_paths'] # get image paths + + def forward(self): + """Run forward pass. This will be called by both functions and .""" + self.output = self.netG(self.data_A) # generate output image given the input data_A + + def backward(self): + """Calculate losses, gradients, and update network weights; called in every training iteration""" + # caculate the intermediate results if necessary; here self.output has been computed during function + # calculate loss given the input and intermediate results + self.loss_G = self.criterionLoss(self.output, self.data_B) * self.opt.lambda_regression + self.loss_G.backward() # calculate gradients of network G w.r.t. loss_G + + def optimize_parameters(self): + """Update network weights; it will be called in every training iteration.""" + self.forward() # first call forward to calculate intermediate results + self.optimizer.zero_grad() # clear network G's existing gradients + self.backward() # calculate gradients for network G + self.optimizer.step() # update gradients for network G diff --git a/models/util/__pycache__/pos_embed.cpython-36.pyc b/models/util/__pycache__/pos_embed.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5141c6019e13efe531c8d30a36ed087e425995f GIT binary patch literal 2424 zcmaJ?&2QX96!(n3y-B)C(llv7GDtv{7M7+GNKw@m^#h5I3W-vrt&rt-yu04)+N-gb zG%JS#X^)loGvL7A!hKFioO0vL0rFmq4NIry@;g|&XdWz#0Y7H7GvKFto!Y_WocS+7h;JY>5tZ3I-3_z63X6VN z4O6WRhiNt#R$-ir@E{Y_P}|G2n#p`u%hRHa(#c*bcEd-DbKYG@S3uS6FQybjk9dTZGBDjH+Tk%{%&E9j0ti9Zplp;5f{z_;FUAa7<3L##cO_Dkqk4 zK1h|@9~W_TZAW>Nv>eiJm0ebGvaehTMLbQ_8-uipcIfeWf`=>~6ZK^=iS(9MQshZo zr8%0i=cz2pj;s8|vy{Jhkn$H7?m1dJJRJ{|r4RELfB4yu>$vxy-*iP>#XH^k=&+j< zA}zajZ{L2n^TEzew~~k5+h0G39%c_{#1MTlF81Pa^m&n>hvH6mf<5E1JFBvBcX9ap z(?fOP6;GqTmaFItr_I`IiLLNe7BGvw#^eUH3AvL#LaBwOk@_iw8Wh+t1mEXnyE1-Y z6(h|>qh^9{89*IBqcOF{tT@oiYZJ%=U4UAvwtzhQzNDjd93}mz!z3}JW2u#o%QBT! zR^)f3EMya+tN#}w>CjQjwB42$o9;ZBEuniGtqjl^tZlBc zRX9WI7j7WJF5K`vCjWKAh&PU?*vQn>CAsX6|9tqiLo+)Kk06cEH?$^_x|7grk6;qYCWg?F0yi@+gf zgp~mreYZ}9KW=92ci>$L>)f4O5%1lYh|~C<{Ee^U%L`a*!ba{@wq~kqk?f>dUd|@U z1SgdVRyL7!b!3%^TPwSmq=Q(QxLVJpD8sZ7$b5^=CX}Cv^d_cP(Mr-2@YezSB_06$ z0ShSRWU+`jU4%fTq3Bh?OOVVO34BDrMxuspdCRD+O#{i; z(l#rY+nYecnt}K_&|0UmTRXM8Z_=rijUz+Sey@;Zb#Zh8650OOtG|Yf10m zddG6@)qbB!MGL44>VV#S9d`k7+xO)f9HWD)k#HLH-)_#gzdQvS6_Hd49(PR}_KG0=W zUdGLPZMtLnxRP;G=SfSbc^+ zmv5pokn=s=+I)i(71r50Uq==2*ecsZlOMsV!|tKl*|_TTupw+0T3sk~n$*0}mS_I2 d$wwJhdDr2V**LvH8CIuRk9%IQ>b1O!{{RA}bXoua literal 0 HcmV?d00001 diff --git a/models/util/crop.py b/models/util/crop.py new file mode 100644 index 0000000..fcb2612 --- /dev/null +++ b/models/util/crop.py @@ -0,0 +1,42 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import math + +import torch + +from torchvision import transforms +from torchvision.transforms import functional as F + + +class RandomResizedCrop(transforms.RandomResizedCrop): + """ + RandomResizedCrop for matching TF/TPU implementation: no for-loop is used. + This may lead to results different with torchvision's version. + Following BYOL's TF code: + https://github.com/deepmind/deepmind-research/blob/master/byol/utils/dataset.py#L206 + """ + @staticmethod + def get_params(img, scale, ratio): + width, height = F._get_image_size(img) + area = height * width + + target_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item() + log_ratio = torch.log(torch.tensor(ratio)) + aspect_ratio = torch.exp( + torch.empty(1).uniform_(log_ratio[0], log_ratio[1]) + ).item() + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + w = min(w, width) + h = min(h, height) + + i = torch.randint(0, height - h + 1, size=(1,)).item() + j = torch.randint(0, width - w + 1, size=(1,)).item() + + return i, j, h, w \ No newline at end of file diff --git a/models/util/datasets.py b/models/util/datasets.py new file mode 100644 index 0000000..0dde1f4 --- /dev/null +++ b/models/util/datasets.py @@ -0,0 +1,65 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# References: +# DeiT: https://github.com/facebookresearch/deit +# -------------------------------------------------------- + +import os +import PIL + +from torchvision import datasets, transforms + +from timm.data import create_transform +from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD + + +def build_dataset(is_train, args): + transform = build_transform(is_train, args) + + root = os.path.join(args.data_path, 'train' if is_train else 'val') + dataset = datasets.ImageFolder(root, transform=transform) + + print(dataset) + + return dataset + + +def build_transform(is_train, args): + mean = IMAGENET_DEFAULT_MEAN + std = IMAGENET_DEFAULT_STD + # train transform + if is_train: + # this should always dispatch to transforms_imagenet_train + transform = create_transform( + input_size=args.input_size, + is_training=True, + color_jitter=args.color_jitter, + auto_augment=args.aa, + interpolation='bicubic', + re_prob=args.reprob, + re_mode=args.remode, + re_count=args.recount, + mean=mean, + std=std, + ) + return transform + + # eval transform + t = [] + if args.input_size <= 224: + crop_pct = 224 / 256 + else: + crop_pct = 1.0 + size = int(args.input_size / crop_pct) + t.append( + transforms.Resize(size, interpolation=PIL.Image.BICUBIC), # to maintain same ratio w.r.t. 224 images + ) + t.append(transforms.CenterCrop(args.input_size)) + + t.append(transforms.ToTensor()) + t.append(transforms.Normalize(mean, std)) + return transforms.Compose(t) diff --git a/models/util/lars.py b/models/util/lars.py new file mode 100644 index 0000000..509c5f6 --- /dev/null +++ b/models/util/lars.py @@ -0,0 +1,47 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# LARS optimizer, implementation from MoCo v3: +# https://github.com/facebookresearch/moco-v3 +# -------------------------------------------------------- + +import torch + + +class LARS(torch.optim.Optimizer): + """ + LARS optimizer, no rate scaling or weight decay for parameters <= 1D. + """ + def __init__(self, params, lr=0, weight_decay=0, momentum=0.9, trust_coefficient=0.001): + defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum, trust_coefficient=trust_coefficient) + super().__init__(params, defaults) + + @torch.no_grad() + def step(self): + for g in self.param_groups: + for p in g['params']: + dp = p.grad + + if dp is None: + continue + + if p.ndim > 1: # if not normalization gamma/beta or bias + dp = dp.add(p, alpha=g['weight_decay']) + param_norm = torch.norm(p) + update_norm = torch.norm(dp) + one = torch.ones_like(param_norm) + q = torch.where(param_norm > 0., + torch.where(update_norm > 0, + (g['trust_coefficient'] * param_norm / update_norm), one), + one) + dp = dp.mul(q) + + param_state = self.state[p] + if 'mu' not in param_state: + param_state['mu'] = torch.zeros_like(p) + mu = param_state['mu'] + mu.mul_(g['momentum']).add_(dp) + p.add_(mu, alpha=-g['lr']) \ No newline at end of file diff --git a/models/util/lr_decay.py b/models/util/lr_decay.py new file mode 100644 index 0000000..7fa11f1 --- /dev/null +++ b/models/util/lr_decay.py @@ -0,0 +1,76 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# References: +# ELECTRA https://github.com/google-research/electra +# BEiT: https://github.com/microsoft/unilm/tree/master/beit +# -------------------------------------------------------- + +import json + + +def param_groups_lrd(model, weight_decay=0.05, no_weight_decay_list=[], layer_decay=.75): + """ + Parameter groups for layer-wise lr decay + Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L58 + """ + param_group_names = {} + param_groups = {} + + num_layers = len(model.blocks) + 1 + + layer_scales = list(layer_decay ** (num_layers - i) for i in range(num_layers + 1)) + + for n, p in model.named_parameters(): + if not p.requires_grad: + continue + + # no decay: all 1D parameters and model specific ones + if p.ndim == 1 or n in no_weight_decay_list: + g_decay = "no_decay" + this_decay = 0. + else: + g_decay = "decay" + this_decay = weight_decay + + layer_id = get_layer_id_for_vit(n, num_layers) + group_name = "layer_%d_%s" % (layer_id, g_decay) + + if group_name not in param_group_names: + this_scale = layer_scales[layer_id] + + param_group_names[group_name] = { + "lr_scale": this_scale, + "weight_decay": this_decay, + "params": [], + } + param_groups[group_name] = { + "lr_scale": this_scale, + "weight_decay": this_decay, + "params": [], + } + + param_group_names[group_name]["params"].append(n) + param_groups[group_name]["params"].append(p) + + # print("parameter groups: \n%s" % json.dumps(param_group_names, indent=2)) + + return list(param_groups.values()) + + +def get_layer_id_for_vit(name, num_layers): + """ + Assign a parameter with its layer id + Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L33 + """ + if name in ['cls_token', 'pos_embed']: + return 0 + elif name.startswith('patch_embed'): + return 0 + elif name.startswith('blocks'): + return int(name.split('.')[1]) + 1 + else: + return num_layers \ No newline at end of file diff --git a/models/util/lr_sched.py b/models/util/lr_sched.py new file mode 100644 index 0000000..4cb682b --- /dev/null +++ b/models/util/lr_sched.py @@ -0,0 +1,21 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import math + +def adjust_learning_rate(optimizer, epoch, args): + """Decay the learning rate with half-cycle cosine after warmup""" + if epoch < args.warmup_epochs: + lr = args.lr * epoch / args.warmup_epochs + else: + lr = args.min_lr + (args.lr - args.min_lr) * 0.5 * \ + (1. + math.cos(math.pi * (epoch - args.warmup_epochs) / (args.epochs - args.warmup_epochs))) + for param_group in optimizer.param_groups: + if "lr_scale" in param_group: + param_group["lr"] = lr * param_group["lr_scale"] + else: + param_group["lr"] = lr + return lr diff --git a/models/util/misc.py b/models/util/misc.py new file mode 100644 index 0000000..ad9a786 --- /dev/null +++ b/models/util/misc.py @@ -0,0 +1,340 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# References: +# DeiT: https://github.com/facebookresearch/deit +# BEiT: https://github.com/microsoft/unilm/tree/master/beit +# -------------------------------------------------------- + +import builtins +import datetime +import os +import time +from collections import defaultdict, deque +from pathlib import Path + +import torch +import torch.distributed as dist +from torch._six import inf + + +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{median:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + def synchronize_between_processes(self): + """ + Warning: does not synchronize the deque! + """ + if not is_dist_avail_and_initialized(): + return + t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') + dist.barrier() + dist.all_reduce(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value) + + +class MetricLogger(object): + def __init__(self, delimiter="\t"): + self.meters = defaultdict(SmoothedValue) + self.delimiter = delimiter + + def update(self, **kwargs): + for k, v in kwargs.items(): + if v is None: + continue + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) + + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append( + "{}: {}".format(name, str(meter)) + ) + return self.delimiter.join(loss_str) + + def synchronize_between_processes(self): + for meter in self.meters.values(): + meter.synchronize_between_processes() + + def add_meter(self, name, meter): + self.meters[name] = meter + + def log_every(self, iterable, print_freq, header=None): + i = 0 + if not header: + header = '' + start_time = time.time() + end = time.time() + iter_time = SmoothedValue(fmt='{avg:.4f}') + data_time = SmoothedValue(fmt='{avg:.4f}') + space_fmt = ':' + str(len(str(len(iterable)))) + 'd' + log_msg = [ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}' + ] + if torch.cuda.is_available(): + log_msg.append('max mem: {memory:.0f}') + log_msg = self.delimiter.join(log_msg) + MB = 1024.0 * 1024.0 + for obj in iterable: + data_time.update(time.time() - end) + yield obj + iter_time.update(time.time() - end) + if i % print_freq == 0 or i == len(iterable) - 1: + eta_seconds = iter_time.global_avg * (len(iterable) - i) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + if torch.cuda.is_available(): + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time), + memory=torch.cuda.max_memory_allocated() / MB)) + else: + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time))) + i += 1 + end = time.time() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('{} Total time: {} ({:.4f} s / it)'.format( + header, total_time_str, total_time / len(iterable))) + + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + builtin_print = builtins.print + + def print(*args, **kwargs): + force = kwargs.pop('force', False) + force = force or (get_world_size() > 8) + if is_master or force: + now = datetime.datetime.now().time() + builtin_print('[{}] '.format(now), end='') # print with time stamp + builtin_print(*args, **kwargs) + + builtins.print = print + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def save_on_master(*args, **kwargs): + if is_main_process(): + torch.save(*args, **kwargs) + + +def init_distributed_mode(args): + if args.dist_on_itp: + args.rank = int(os.environ['OMPI_COMM_WORLD_RANK']) + args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE']) + args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']) + args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT']) + os.environ['LOCAL_RANK'] = str(args.gpu) + os.environ['RANK'] = str(args.rank) + os.environ['WORLD_SIZE'] = str(args.world_size) + # ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"] + elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ['WORLD_SIZE']) + args.gpu = int(os.environ['LOCAL_RANK']) + elif 'SLURM_PROCID' in os.environ: + args.rank = int(os.environ['SLURM_PROCID']) + args.gpu = args.rank % torch.cuda.device_count() + else: + print('Not using distributed mode') + setup_for_distributed(is_master=True) # hack + args.distributed = False + return + + args.distributed = True + + torch.cuda.set_device(args.gpu) + args.dist_backend = 'nccl' + print('| distributed init (rank {}): {}, gpu {}'.format( + args.rank, args.dist_url, args.gpu), flush=True) + torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, + world_size=args.world_size, rank=args.rank) + torch.distributed.barrier() + setup_for_distributed(args.rank == 0) + + +class NativeScalerWithGradNormCount: + state_dict_key = "amp_scaler" + + def __init__(self): + self._scaler = torch.cuda.amp.GradScaler() + + def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True): + self._scaler.scale(loss).backward(create_graph=create_graph) + if update_grad: + if clip_grad is not None: + assert parameters is not None + self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place + norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad) + else: + self._scaler.unscale_(optimizer) + norm = get_grad_norm_(parameters) + self._scaler.step(optimizer) + self._scaler.update() + else: + norm = None + return norm + + def state_dict(self): + return self._scaler.state_dict() + + def load_state_dict(self, state_dict): + self._scaler.load_state_dict(state_dict) + + +def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor: + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + parameters = [p for p in parameters if p.grad is not None] + norm_type = float(norm_type) + if len(parameters) == 0: + return torch.tensor(0.) + device = parameters[0].grad.device + if norm_type == inf: + total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters) + else: + total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type) + return total_norm + + +def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler): + output_dir = Path(args.output_dir) + epoch_name = str(epoch) + if loss_scaler is not None: + checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch_name)] + for checkpoint_path in checkpoint_paths: + to_save = { + 'model': model_without_ddp.state_dict(), + 'optimizer': optimizer.state_dict(), + 'epoch': epoch, + 'scaler': loss_scaler.state_dict(), + 'args': args, + } + + save_on_master(to_save, checkpoint_path) + else: + client_state = {'epoch': epoch} + model.save_checkpoint(save_dir=args.output_dir, tag="checkpoint-%s" % epoch_name, client_state=client_state) + + +def load_model(args, model_without_ddp, optimizer, loss_scaler): + if args.resume: + if args.resume.startswith('https'): + checkpoint = torch.hub.load_state_dict_from_url( + args.resume, map_location='cpu', check_hash=True) + else: + checkpoint = torch.load(args.resume, map_location='cpu') + model_without_ddp.load_state_dict(checkpoint['model']) + print("Resume checkpoint %s" % args.resume) + if 'optimizer' in checkpoint and 'epoch' in checkpoint and not (hasattr(args, 'eval') and args.eval): + optimizer.load_state_dict(checkpoint['optimizer']) + args.start_epoch = checkpoint['epoch'] + 1 + if 'scaler' in checkpoint: + loss_scaler.load_state_dict(checkpoint['scaler']) + print("With optim & sched!") + + +def all_reduce_mean(x): + world_size = get_world_size() + if world_size > 1: + x_reduce = torch.tensor(x).cuda() + dist.all_reduce(x_reduce) + x_reduce /= world_size + return x_reduce.item() + else: + return x \ No newline at end of file diff --git a/models/util/pos_embed.py b/models/util/pos_embed.py new file mode 100644 index 0000000..6acf8bd --- /dev/null +++ b/models/util/pos_embed.py @@ -0,0 +1,96 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# Position embedding utils +# -------------------------------------------------------- + +import numpy as np + +import torch + +# -------------------------------------------------------- +# 2D sine-cosine position embedding +# References: +# Transformer: https://github.com/tensorflow/models/blob/master/official/nlp/transformer/model_utils.py +# MoCo v3: https://github.com/facebookresearch/moco-v3 +# -------------------------------------------------------- +def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False): + """ + grid_size: int of the grid height and width + return: + pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) + """ + grid_h = np.arange(grid_size, dtype=np.float32) + grid_w = np.arange(grid_size, dtype=np.float32) + grid = np.meshgrid(grid_w, grid_h) # here w goes first + grid = np.stack(grid, axis=0) + + grid = grid.reshape([2, 1, grid_size, grid_size]) + pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) + if cls_token: + pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0) + return pos_embed + + +def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): + assert embed_dim % 2 == 0 + + # use half of dimensions to encode grid_h + emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2) + emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2) + + emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D) + return emb + + +def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): + """ + embed_dim: output dimension for each position + pos: a list of positions to be encoded: size (M,) + out: (M, D) + """ + assert embed_dim % 2 == 0 + omega = np.arange(embed_dim // 2, dtype=np.float) + omega /= embed_dim / 2. + omega = 1. / 10000**omega # (D/2,) + + pos = pos.reshape(-1) # (M,) + out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product + + emb_sin = np.sin(out) # (M, D/2) + emb_cos = np.cos(out) # (M, D/2) + + emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D) + return emb + + +# -------------------------------------------------------- +# Interpolate position embeddings for high-resolution +# References: +# DeiT: https://github.com/facebookresearch/deit +# -------------------------------------------------------- +def interpolate_pos_embed(model, checkpoint_model): + if 'pos_embed' in checkpoint_model: + pos_embed_checkpoint = checkpoint_model['pos_embed'] + embedding_size = pos_embed_checkpoint.shape[-1] + num_patches = model.patch_embed.num_patches + num_extra_tokens = model.pos_embed.shape[-2] - num_patches + # height (== width) for the checkpoint position embedding + orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) + # height (== width) for the new position embedding + new_size = int(num_patches ** 0.5) + # class_token and dist_token are kept unchanged + if orig_size != new_size: + print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size)) + extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] + # only the position tokens are interpolated + pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] + pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) + pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) + new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) + checkpoint_model['pos_embed'] = new_pos_embed diff --git a/options/__init__.py b/options/__init__.py new file mode 100644 index 0000000..e7eedeb --- /dev/null +++ b/options/__init__.py @@ -0,0 +1 @@ +"""This package options includes option modules: training options, test options, and basic options (used in both training and test).""" diff --git a/options/__pycache__/__init__.cpython-36.pyc b/options/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2dc8563c9a7bcea599e4bc2915d151b247908349 GIT binary patch literal 292 zcmYL_&q@P948~`Bu?T$zgC`fHBkN6yRH}FodQm)h3A2-7clu{$*~}i=n>Sy@SL)Tb z@MKn78~8#J@`WF{9*s`VpU%n;rPP<`KQgJNA|6U2^`CRGyPRke`qTyruG_KadAM!Hyruy&)SH;C}P{L8=&2bY|r;%mp|NWmXdiQc(x)Wb!W&e+E!ALBj1afMpBlp-xU=s z=pQvv$K$6*YqYWTwRmu_=3ql|?Th>OzgxewzP={M!nKBO)DO8_6K)TZ+_<~BmdR*s z8{)_X))^fnb<3jJEo+4~P0Tf;s#o>89=}o0s!rbRgs>LSTL~+H*dQ1Pj2oo8uS$Aa zuS7bplJ>qTXOuZbX%r~)pe#V=kNYg~Wxm4C%($FIk8}KuLXS5=InUoJP<{~^{B3@r zKzT<|-Yrnx1LY$BWr4yJ<zC_R9QT zq3wQD;t%*2g&7`3P&-$S4M!Y&T@58xc96#fVxuTIE{ zAIA3{C^MP@8m1=A)ht1e^VO6)gsvE2cS?as3^m(tvc5NRulz4X|rwLP0gQ>K~ew38S zJjc~`JFthYx$N%4R>Q0Zkwt{~0hKK5stp}CcF5p!*YRLd*dENWOUsf`h!<&-4}g8n zji`j_Dn|!kOm;vlqKFsTZP%6umTzyneth=JAacb>xRJf>yG+eI*KYSlW6R?*(J#fH z;GN<926H>T4!gwIrPk%vdJB*B4oQ@?FSBka*!?Hp#uy3;hDJ$4t;veS*fnbz5!XaC;+}{XJ8K0}{HAgS*pk}bE*v9mIj*(as=+Kd<0yW|>4%|I z^6=o`QvU&|U{-aeUN9O%s?KZj=kfIqNUDmK9w&49b`ZEews=MD;KJ<@A`YCr%C`?Q)|BNd->DbIf{xV+S*7Rz}{w>*HZZPTyQa;DfN@ zz;}Cgux`%HUgN)&Bj{T9fMqj!felr|aX5W1a-9gStizsTxoUz;^oVS1cJ0#swM*;? z@rQTouzYGnD}J~q*+Do~nw#-Yw@hox1#n`SG$R{^?aSeY?DV-CUAGtZ^m-vym}Ezz|=|<$)7J zjCEQTgsyy})`R$+Fz^pDtJ7t461Q&U&7{LYD25wJ-3w%72M*$iZ4y^f4#L2VujFJ# z-6;03Dd=Jlzv_01Oc312nLhacjDA20u0jnm8D@1K;h-y$YC64H+p)bN;=`m73dAlw zivQyC*wkJqc2vZq0wlG_8K)lXVejybf8O|y_#bY^mkCD{Vs>E#uwGho%Du(X*%a=M z`FE5+LyL=W1gQO_-+msy8-y&YD+Z)Fv(so+l%6D&z!p83Q~)3u6|CP93Ei?0^kN~l zjKA`_EIm@szP2*17^8X`_r>o0H9$4RaA&gnNkOVQ*?jMNEi!!O z+!*KL^}ssb%Gi^99{O1mxL#BASE8tTgUF$6#^04~wqfrZZeCv03O;8@854AGEk zv@2T-_Z?U4d2lP~091!ez@vpj{~%t3h{r=K>I)o+d|f6Bdf@}uOI26uKwG0Tq5U>X z>Cr3Gk$XS2G)p43e~o*X7O$kPAjK4sX`jSJ_3_v1g%*YydcVm#7wR~W7Gsniw$m+&PB=MFG!pbo8-5f}(S zvN}nnJ?cuj>q)4aGDVM`KmIz#!KE5#rHhA5d(a$w>UZM7D#8rF7*l7kQSzV=Fp$1l zL6ber6gb`e@sip0fvU_*P;MD^oAAYHOEGHmEEyPz{nJABX){ACk3n$1F0HucYECH(OE-R$rBO$<2s-|?smQX zM2o+5#t8O?&=%wn6!8Yd-W?Ue-a3B$WY$WdWQ!fDNNs8z!ij856@m_Dz?;SwF-0De zSQ{1rzi4&i>zgyU6zOTaBvZEw`932a2tn32a+lq#B@4)5Sa1=Tl1di~ zhy>*rk~*mC+hOPnvNKIX)QDQ8u3SJRMpFPuj=vn;;sL9T8FF|eJDjhk?wu` zE?63whu z^>0|m@{#t8vK7nedY&QSgF9a-{;w}*X&Dxdh01HeQ0{rKEonfSB~@0H(F+k{HPe(5 zWzOz$UQA^vWo&0ibQJnC3wun=*d6nD-bZR_cG09;N+A4DJ%!ZmnU4hMW>HCS9jyaQcG2hJRInn7fIG57Q zWu8_0nt-v2LaSEwCB0E<=u5^50M;-T2yl5#UmzMl7hfns?j*BWx@{i(C`G4Ww*oM7 zeQHb$Zd}u*#=aS85i*oVDmQtgKhU-w0#>DI8NDh{>k{Bt0u-u~@}#;`5?}FBR3UJj z*U)b8@}P=3dY}aSCY7TS@%{1Q3k|ImdXH)ogP=zc9W($JKyk1Dc-8>4rAe*OR^wHG zn}DUGy?)S%Rwi{`J2L+M!gOvjH#n1WqBmZraW6DJ_j~QB_C4N$^|BV7-D&(u7Y$vT z;upUW)p!FO8{nsU{{j8^+_dqRjqkO<_#QO?c0T^sCzNnj*g~>-2AnjL4gII&#-8W< zX<|A}5IbWbV5E;hM$kxAm@+G4SFXZ$A?4gfW*~?P0MD5frvk*_5N)K<_Yptf+RN71 z%RJE{%DVu5Qo%iaC{vKl6L7Odi4U=%q?+0hafK>ID3dZ}x{~q$85wi=lw5Ez z1?H4YMN07|X*`#Kmj(uBHO)6@ar_sHrOim7NiAgeW!(YP3mh}ZZ7xarL z>m~B2=XC@1hW-oq*d;xFy9n+Ryk;)73Z|6ZSJ?g+ya2JiYi8oAs7(2drz_@`)LKxhYrm*3V>-f*boI+uaLO1!E3f%2CS zEOZCAE(iCg2>*^8C*umYiZrBarKm+{IUX^}337KANtliThPVcq{G_)8#UOA+*YObR zD4>igAQZQWa+3;@p?FMfwJgmjNW21n_My8yf+F#xP@P@#f?kivn8!(J?R?Iy1Fh3zfMDxAu- zKZIYyeXpGQ7dSEAY};6xv1dHbZ|qm2k^kcJ(fJ=2p}%Nnd2l`f(gYYs949En5yt2Y zafiFV5O)Q>b|Z&-U(uv%lvw#h1d!Jkv3#?w13n6CnKzm_QNc2z+tmZlgus zHcmZQ2yNN`RlHb9P2z-UO=K1)B^R0)Fd+Fh#nkr+tWSopMCT7Sdc~CBcUFXp3z>l| zGqO|PwDFB?Wp>%U3v>#kGhhZ?;tks1Yv(84;7h31G}UtN#>1coO$Rhx-rLdi`5yQA z;Kpq&hQ{L~zJG7g0lzLkxW@eO2GHb>Hi-5%7%&{qt5Farq07W*%B8COY1oQk^;okj zXug%nN+>a%xV2~2g{XVbAOpABTZyEodO=XEU=DB#SXEDqVlq1>S1dVh&^so=#8o#4 z1gIikn_LNMR4J;XQVU|WB+a=Xyi_tUaazhYFUX1j`otA@kaPC=_?C zXitalPfx$iUe0ErE&+j5m{ra-V7`O~&)&|%_LYUUKYI7%ina0~oF#n-4B;UjxJd% z54e#Z7dBD3i2*&kT6nvM+q{T}3`U^{ur?gEE4ERm;wg6@X`bXYgPT5@C5FW*H-;s7 zoX&+b%#uWsPnoLr8Faf0*8|IVYSdcJ%(#|fCB=lCBss=O&KgP%{Mwk*ql^BxaUbF~ z9gJ-7Bs>m|!>?LjhR5L(sBFE;qVshp1P){i$hO#NAg6!?;<*OWwH^6`yK%N(8 z8^|73ofGF9$ORzZ5f>ZC3qUT37aPb+;$`t&@ygfjTAS}d;<9+PA;ExrUtDP*Kkza? zY#=`Z%WDmm*D=1@822!KqcOgQ@qT062jf5t8;sXs-<#r~f!qKRh_@QZ+gNo;ywgD5 z^~lWz@}Auix5SSd$Za4$*+A|9xhw89WZuVKABe*S@*$X8;-d!gQ^?#GKWiWlyv)xl zMEv47h_;VIr1vi$y65|Sk(eUo%V?tI7f%n6AZ|#nbD76WrY6kH)DflSv;5T3%u{Qa z%9)9^lzFdRcI>i{7_C#K z(mmJj=gR9*c8yh9MpnLQ0FuB$U!`l@#lqtu!%&X z9}hiBo_jn*!VG(@a+}B0v-J+?3NH|Ql!Db%MkK$#fQi>Oji|kVEU6?}o$&5lO&|*u z&nhQu5%F6mt#4?@xb;aVbDw{Hxgj>W;OU%`hMvuxT?33bDcWA;@iI!_6332a66veG zn+(g8KuL_3j3zw~)=T_6X>0+uhV>!|k{liZ)KfRCot0GMj&#buJV7Rl- zHip-3|8MLdF>o4w7<))4ANL%)RLU_8DBe&nVFQOxl>dU1;g&q=+a<-0Kuql$i%f z!-+=eb7UcTcpX32Mao8X)Q0PxQz2DHB0F$;+k~eqm0bIS93hbukk#Ty$e1tP&2Ld4 zqf;E>T&GK%=aAXtV3>DOE3$PgEq`+g@8bIAuDh5i>KF42rs|App9`L?1P`Xye)qy( z|NQTN-F2OQKh@Lz;Vb|A+dFUEb(c?6mQ9qd2w?J*=MjZww3QmXyWf(H)aGfI`|m$w zYWzf^ycTl6K3niSNmG8zyevCrM_4i~9uE))KjT-ViavPosRO21W6+20S`|2FR$L6J zMnxb`=G8b0dT}t!NfjR@1P~SXtl?=cbi>j<*;GS}q?` zv_a=%=quK^8yOD7q`=;&Q_iM5%Q%xbND$-=$#oH%N4*_QpKN8f4ihXacexM|*Hffy zZp${>UfDKQms=B5vmNau<`LC9x?ab~bW&~?T;oLgQ=l(_>OT*LH~{#;&^=iWV}#WV zKRP`8^x*oz!O+ang)=l3CCpIEDf$r{KCHh(hrT|d)mLe;)O+CT;x>2i2*Tag`Ec{M zS0j7jUJG|M+uoGHsI?usOJ3=Z>rdXbxet3=<*CRQY7~{-h*EKm_7j!6QS`-}r&Pii z?L?7KaTIx_yrqo?eUxjQa{+yL7 functions in both dataset class and model class. + """ + + def __init__(self, cmd_line=None): + """Reset the class; indicates the class hasn't been initailized""" + self.initialized = False + self.cmd_line = None + if cmd_line is not None: + self.cmd_line = cmd_line.split() + + def initialize(self, parser): + """Define the common options that are used in both training and test.""" + # basic parameters + parser.add_argument('--dataroot', default='placeholder', help='path to images (should have subfolders trainA, trainB, valA, valB, etc)') + parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') + parser.add_argument('--easy_label', type=str, default='experiment_name', help='Interpretable name') + parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') + parser.add_argument('--use_idt', action='store_true', help='use_idt') + parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here') + # model parameters + parser.add_argument('--model', type=str, default='cut', help='chooses which model to use.') + parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale') + parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale') + parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer') + parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer') + parser.add_argument('--netD', type=str, default='basic', choices=['basic', 'n_layers', 'pixel', 'patch', 'tilestylegan2', 'stylegan2'], help='specify discriminator architecture. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator') + parser.add_argument('--netG', type=str, default='resnet_9blocks', choices=['resnet_9blocks','resnet_9blocks_mask', 'resnet_6blocks', 'unet_256', 'unet_128', 'stylegan2', 'smallstylegan2', 'resnet_cat'], help='specify generator architecture') + parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers') + parser.add_argument('--normG', type=str, default='instance', choices=['instance', 'batch', 'none'], help='instance normalization or batch normalization for G') + parser.add_argument('--normD', type=str, default='instance', choices=['instance', 'batch', 'none'], help='instance normalization or batch normalization for D') + parser.add_argument('--init_type', type=str, default='xavier', choices=['normal', 'xavier', 'kaiming', 'orthogonal'], help='network initialization') + parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') + parser.add_argument('--no_dropout', type=util.str2bool, nargs='?', const=True, default=True, + help='no dropout for the generator') + parser.add_argument('--no_antialias', action='store_true', help='if specified, use stride=2 convs instead of antialiased-downsampling (sad)') + parser.add_argument('--no_antialias_up', action='store_true', help='if specified, use [upconv(learned filter)] instead of [upconv(hard-coded [1,3,3,1] filter), conv]') + # dataset parameters + parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]') + parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA') + parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly') + parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data') + parser.add_argument('--batch_size', type=int, default=1, help='input batch size') + parser.add_argument('--load_size', type=int, default=286, help='scale images to this size') + parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size') + parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') + parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]') + parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation') + parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML') + parser.add_argument('--random_scale_max', type=float, default=3.0, + help='(used for single image translation) Randomly scale the image by the specified factor as data augmentation.') + # additional parameters + parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') + parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information') + parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}') + + # parameters related to StyleGAN2-based networks + parser.add_argument('--stylegan2_G_num_downsampling', + default=1, type=int, + help='Number of downsampling layers used by StyleGAN2Generator') + + self.initialized = True + return parser + + def gather_options(self): + """Initialize our parser with basic options(only once). + Add additional model-specific and dataset-specific options. + These options are defined in the function + in model and dataset classes. + """ + if not self.initialized: # check if it has been initialized + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser = self.initialize(parser) + + # get the basic options + if self.cmd_line is None: + opt, _ = parser.parse_known_args() + else: + opt, _ = parser.parse_known_args(self.cmd_line) + + # modify model-related parser options + model_name = opt.model + model_option_setter = models.get_option_setter(model_name) + + parser = model_option_setter(parser, self.isTrain) + if self.cmd_line is None: + print(parser) + opt, _ = parser.parse_known_args() # parse again with new defaults + else: + opt, _ = parser.parse_known_args(self.cmd_line) # parse again with new defaults + + # modify dataset-related parser options + dataset_name = opt.dataset_mode + dataset_option_setter = data.get_option_setter(dataset_name) + parser = dataset_option_setter(parser, self.isTrain) + + # save and return the parser + self.parser = parser + if self.cmd_line is None: + return parser.parse_args() + else: + return parser.parse_args(self.cmd_line) + + def print_options(self, opt): + """Print and save options + + It will print both current options and default values(if different). + It will save options into a text file / [checkpoints_dir] / opt.txt + """ + message = '' + message += '----------------- Options ---------------\n' + for k, v in sorted(vars(opt).items()): + comment = '' + default = self.parser.get_default(k) + if v != default: + comment = '\t[default: %s]' % str(default) + message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) + message += '----------------- End -------------------' + print(message) + + # save to the disk + expr_dir = os.path.join(opt.checkpoints_dir, opt.name) + util.mkdirs(expr_dir) + file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase)) + try: + with open(file_name, 'wt') as opt_file: + opt_file.write(message) + opt_file.write('\n') + except PermissionError as error: + print("permission error {}".format(error)) + pass + + def parse(self): + """Parse our options, create checkpoints directory suffix, and set up gpu device.""" + opt = self.gather_options() + opt.isTrain = self.isTrain # train or test + + # process opt.suffix + if opt.suffix: + suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else '' + opt.name = opt.name + suffix + + self.print_options(opt) + + # set gpu ids + str_ids = opt.gpu_ids.split(',') + opt.gpu_ids = [] + for str_id in str_ids: + id = int(str_id) + if id >= 0: + opt.gpu_ids.append(id) + if len(opt.gpu_ids) > 0: + torch.cuda.set_device(opt.gpu_ids[0]) + + self.opt = opt + return self.opt diff --git a/options/test_options.py b/options/test_options.py new file mode 100644 index 0000000..e4559ad --- /dev/null +++ b/options/test_options.py @@ -0,0 +1,21 @@ +from .base_options import BaseOptions + + +class TestOptions(BaseOptions): + """This class includes test options. + + It also includes shared options defined in BaseOptions. + """ + + def initialize(self, parser): + parser = BaseOptions.initialize(self, parser) # define shared options + parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.') + parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc') + # Dropout and Batchnorm has different behavioir during training and test. + parser.add_argument('--eval', action='store_true', help='use eval mode during test time.') + parser.add_argument('--num_test', type=int, default=50, help='how many test images to run') + + # To avoid cropping, the load_size should be the same as crop_size + parser.set_defaults(load_size=parser.get_default('crop_size')) + self.isTrain = False + return parser diff --git a/options/train_options.py b/options/train_options.py new file mode 100644 index 0000000..5df79aa --- /dev/null +++ b/options/train_options.py @@ -0,0 +1,47 @@ +from .base_options import BaseOptions + + +class TrainOptions(BaseOptions): + """This class includes training options. + + It also includes shared options defined in BaseOptions. + """ + + def initialize(self, parser): + parser = BaseOptions.initialize(self, parser) + # visdom and HTML visualization parameters + parser.add_argument('--display_freq', type=int, default=50, help='frequency of showing training results on screen') + parser.add_argument('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.') + parser.add_argument('--display_id', type=int, default=None, help='window id of the web display. Default is random window id') + parser.add_argument('--display_server', type=str, default="http://localhost", help='visdom server of the web display') + parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")') + parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display') + parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html') + parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console') + parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/') + # network saving and loading parameters + parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results') + parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs') + parser.add_argument('--evaluation_freq', type=int, default=5000, help='evaluation freq') + parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration') + parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') + + # parser.add_argument('--use_mlp', action='store_true', help='use_mlp') + # parser.add_argument('--use_tgt_style_src', action='store_true', help='use_tgt_style_src') + parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by , +, ...') + parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') + parser.add_argument('--pretrained_name', type=str, default=None, help='resume training from another checkpoint') + + # training parameters + parser.add_argument('--n_epochs', type=int, default=100, help='number of epochs with the initial learning rate') + parser.add_argument('--n_epochs_decay', type=int, default=100, help='number of epochs to linearly decay learning rate to zero') + parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') + parser.add_argument('--beta2', type=float, default=0.999, help='momentum term of adam') + parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam') + parser.add_argument('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.') + parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images') + parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]') + parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') + + self.isTrain = True + return parser diff --git a/scripts/test.sh b/scripts/test.sh new file mode 100644 index 0000000..8e8b29a --- /dev/null +++ b/scripts/test.sh @@ -0,0 +1 @@ +CUDA_VISIBLE_DEVICES=0 python test.py --dataroot /path/of/test_dataset --checkpoints_dir ./checkpoints --name train1 --model roma_single --num_test 10000 --epoch latest diff --git a/scripts/train.sh b/scripts/train.sh new file mode 100644 index 0000000..f5765f3 --- /dev/null +++ b/scripts/train.sh @@ -0,0 +1,5 @@ +# Train for video mode +CUDA_VISIBLE_DEVICES=0 python train.py --dataroot /path --name ROMA_name --dataset_mode unaligned_double --no_flip --local_nums 64 --display_env ROMA_env --model roma --side_length 7 --lambda_spatial 5.0 --lambda_global 5.0 --lambda_motion 1.0 --atten_layers 1,3,5 --lr 0.00001 + +# Train for image mode +CUDA_VISIBLE_DEVICES=0 python train.py --dataroot /path --name ROMA_name --dataset_mode unaligned --local_nums 64 --display_env ROMA_env --model roma --side_length 7 --lambda_spatial 5.0 --lambda_global 5.0 --atten_layers 1,3,5 --lr 0.00001 \ No newline at end of file diff --git a/test.py b/test.py new file mode 100644 index 0000000..fddd57b --- /dev/null +++ b/test.py @@ -0,0 +1,70 @@ +"""General-purpose test script for image-to-image translation. + +Once you have trained your model with train.py, you can use this script to test the model. +It will load a saved model from --checkpoints_dir and save the results to --results_dir. + +It first creates model and dataset given the option. It will hard-code some parameters. +It then runs inference for --num_test images and save results to an HTML file. + +Example (You need to train models first or download pre-trained models from our website): + Test a CycleGAN model (both sides): + python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan + + Test a CycleGAN model (one side only): + python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout + + The option '--model test' is used for generating CycleGAN results only for one side. + This option will automatically set '--dataset_mode single', which only loads the images from one set. + On the contrary, using '--model cycle_gan' requires loading and generating results in both directions, + which is sometimes unnecessary. The results will be saved at ./results/. + Use '--results_dir ' to specify the results directory. + + Test a pix2pix model: + python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA + +See options/base_options.py and options/test_options.py for more test options. +See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md +See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md +""" +import os +from options.test_options import TestOptions +from data import create_dataset +from models import create_model +from util.visualizer import save_images +from util import html +import util.util as util + + +if __name__ == '__main__': + opt = TestOptions().parse() # get test options + # hard-code some parameters for test + opt.num_threads = 0 # test code only supports num_threads = 1 + opt.batch_size = 1 # test code only supports batch_size = 1 + opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed. + opt.no_flip = True # no flip; comment this line if results on flipped images are needed. + opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file. + dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options + # train_dataset = create_dataset(util.copyconf(opt, phase="train")) + model = create_model(opt) # create a model given opt.model and other options + # create a webpage for viewing the results + web_dir = os.path.join(opt.results_dir, opt.name, '{}_{}'.format(opt.phase, opt.epoch)) # define the website directory + print('creating web directory', web_dir) + webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch)) + + for i, data in enumerate(dataset): + if i == 0: + model.data_dependent_initialize(data) + model.setup(opt) # regular setup: load and print networks; create schedulers + model.parallelize() + if opt.eval: + model.eval() + if i >= opt.num_test: # only apply our model to opt.num_test images. + break + model.set_input(data) # unpack data from data loader + model.test() # run inference + visuals = model.get_current_visuals() # get image results + img_path = model.get_image_paths() # get image paths + if i % 5 == 0: # save images to an HTML file + print('processing (%04d)-th image... %s' % (i, img_path)) + save_images(webpage, visuals, img_path, width=opt.display_winsize) + webpage.save() # save the HTML diff --git a/timm/__init__.py b/timm/__init__.py new file mode 100644 index 0000000..04ec7e5 --- /dev/null +++ b/timm/__init__.py @@ -0,0 +1,4 @@ +from .version import __version__ +from .models import create_model, list_models, is_model, list_modules, model_entrypoint, \ + is_scriptable, is_exportable, set_scriptable, set_exportable, has_model_default_key, is_model_default_key, \ + get_model_default_value, is_model_pretrained diff --git a/timm/__pycache__/__init__.cpython-36.pyc b/timm/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c81fb8bae66e0c1f42199cff753d3490d59d24cd GIT binary patch literal 550 zcmY+By-ve05XY0IP0}W5D?Y>qa~F)liV#(W7(iW+7`#|%V~Qb{M7CQ|-iTN7%ET)$ z;Y(XoEcvIq^PTVf_hpjw$o<*u)ODOMXK%HvePg$Ju*jX!RH%THsL++O!QF z+5w;X5YPZZ8bX(Lp+|cV(FkH1LqZei(>@I7z}9+?&ZzxudpRT5M2xMtRH7;w8zm;5 zOP*=Ypvrk+qCzOWv6Tsh+J@s@cv)~|hD|QxrIzcu5~VhY%~W$KYMspr-njg|uH?q; zE3WsC8-MTo_$AvmVmW`#mW5_-d~J?)?f*qj7WT5u0K4Q>RxG(W{7bJT*D@0&&!?lI z3AV45sIBpvQYAg>$Tp&b@DTwbM0627M1+VD38Ie}SbRG20Byw2nBk`%mQNC6U|@I*#Bjg}WH|tFF$a)HVTfW#VGL%_WU68{&@IXvM6+ZXCCqC3j4eF#0 z4bp%nX+l5(-|98qf{=tRuJiUM%QbihA`(GNVn|5hv?f1;F6p{B;63P*zKdIY07Ei_ z5gEBWKEW2Tluf*GHf4}9vIp=3(%uPiU9rujlt%W{RZ z|0J!O_!{Vny+%PXZARCcNjm53giWx+59PPL2`kSQl2N@|56h5A_6!%aU^1*Whl7~rd(uX8I9#YyXAnO(y7Md0U z3oQf_>?W0=P1=|!O)ITT%N_%^Ej!j4S%?uxchBcby3j&yWnv41-AMAxTAE;|l3iQV zvubNMgpz$LM|dne=WY@?uzKgvy&{AE@nMT0ntpon0)GzvTxGV4N%sBAHd}~-sqD>c z_IdJbGRf3NvK*Mc6NSnoTa}`EJH5$t3E+ydJTI$K=lRq1)`TxW6dTT7+H0pq2#;&A OANnId#<%A%^#20auKcM0 literal 0 HcmV?d00001 diff --git a/timm/data/__pycache__/auto_augment.cpython-36.pyc b/timm/data/__pycache__/auto_augment.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0105bffba8b88ef4d210f648feca8040313b631 GIT binary patch literal 27337 zcmeHwdw3kzb>GhHd+{JeK@>?*)R3Y`KqB!VMM&Qc55g8YPa8)soOesT9>Vx*loYI zpX$W@{m$H-oduQ>DZ6d*wb*aY%-p$;bI-Z=+;h%-V0TwnVc-pSjs5Q15{d66Vt?!L zddL}cPnaPf3XL6%CD~z8SO-a1` zXu<7JDV2UP;ijfKr@B<;3i4CgciQU{k0q{HSFF))_44Q{l^h{Z6$}-HH1qwMpHD`(4UWn{jv47Iio7o7Glz5AIvkyVN$d{Y7i^Zq=*$5VBR> ztM0@79<@X5#C@CUR|B|jSA%K@_g=M2?Z&-N-LLlGey@5!J&5~#>LIlk_Z@1VdKmYe zYQH*wd%t=_9mIV=9a6)%52{DiVcdt*W9kU*yVOy24ENpYxH^IR{pxY`2Hf|6V)(iD zx`XbJyUX3}-tX>l?{jywTsz&JZa;nl?tnV^v2~*lxOcgmUB}(zZk68_cZ++syA8j4 z@Y{}GFMjLX_3lRZPIrTQhopM&SVEmrBQGY@$Q66^A$3}v0S0^38`W9d_o;L0Jnj#x zC)As8->=@So>XrE%?=>lsCo+No~~1W5DP+^>$$Nc3^ZwJ+EE>Mn_Rzql_Z(VyO*mx<-$wtLh!<3Q`|O>KD~Jk@}rT zeM0@5dI_l?SN6F?-+N1E&^espIlneMH!Q&}muoWvzFYNbdVIQedBC5Y8yK6e zjSb9{Jm1v=GpO3MH&7cN2kxcm0aqSS%c|=Slt8XhNUr|b1z-^p=!VLisMRK>-JRME z35C2SW-9ZA!kN;ntGy7_7(4bkCqd9^r8+@2kj!EI&NPV13ObLK{F1XXs$xmcS1$F} z^u$1E%o`YbXmC&e;NCrhLqdlrKS|V*={aAQe0PGpBC}7AO}S;?dCc{3BT=tg!rs9L z`iF*w9^54*#2U~(=Zr2Pak(_@kZLo?A3}@hvf_>E^!N9_TIPHEhju?Wv{y0*!f1^9 zoMGpjw7jtHXLPOXdLGGi%JnB}%JFM#5^Pt_+1l8g2gZ3}b9~I#~h|K|$UQPu6C=vc)v`vvH+x9meJL08Aw6iDx>OEI(1l z81sJnV(MJt(}_MSNTLRPc3}4p2G+dJAgX{@9v>imcMU8~Eex=SdILueAAS?i+%@3M zX}35d1o8$nI9;nA8y*?(D>E~aNLqH$a0X5LmBRk%isz%R%s#S_2obwXLU-U6SpC=d zdhL0D30(Nwdg$6KxB;tGXi8OY8tg8XYgNw=a+R406pebft!Y1!)e}<~bA4;dzG9=k z$-3pI{PZ~9^fo^;m1X_UfePm^nnQNkBS8+`sKk!nmkW}!wOLJsg50AgPM#P!GJG~D zRL~?mUx=MzO@g#Phv-2D*s0n~kSR~rDrFZ4XD(bW=?O3B0##iNnI-+KNmgB8*4@RH z&-WtC%K;>;Zu|~GZN&-E7Q~E+H_4{OCzync6tp!pRG4F0{W6cu6X9e$>N)ioYl87lqmFBdIO)M zR?$EoqPcvb$eI@Y@m+}GF-pQ(Z51r-;HgX!@{fq;alHYTN954}ZK6t~aoK)y3aU|^ zN|2~hr6u1WFD(LcWhO{@l|?t?Rc83;QSb`2NTPuRnQ=&q znr@4H)|-SVEY3efgxEzAR?gDfalcudi81g7<}Enu&B#lrY;e|F7;rZMsiL<6^dZPfg#(DtM6(xf5t>9kA}y;^CqdCV)+c%rOG)2~2#r=Tl@LOM#C2+_xVIuL zgeK-dT2OWH9Ae%?IweS>kCc5uI>^jc<}tu6Zy>R5J73i9^rOhjw2*F6`#!w?RMiMu zi&Uny8gbgcMm2OFYbdm5yL^>8EU*s8m(^iMybd?m%B8)mY~?rAVg%QAEvAL&7_mo) zaMZwezAkjbPFY{pR^mi9s5iH<})&HK6p-t*=i zcf9AW0IPaV+57RVEP-}9v7RxJ;z0<_@QVpUDifBN%Ck;>VXC8^Ysks4_H|no)+cBz znt^TANGRUBKsgadW^_#)y0!{IjkiRZjhBCvB^2Yr36UrE(-{(PDZP|g$}Z*V$vPxU zJyo}Q61SszxH1xgF;zlxw}Y?{+}4QnroOypo`JOxWT3L?%6wpz0&6U=%7LW<%MGmY zW!+Ai(i9Szmb=3US-Xs0d2oUxg8Op4d5sg)D8Qtt~)I-xr^mWQW zqVFPRnJ{mWctHv{P6k7#%*S!kwGy#%e<6sM0Mhh^pIV+gC`YLvqFn=B>gUYsjU zLpgZ0awyNAkCnssNaBz}yo7|EcnC=dX%RN>1`%@}G(63c!tp%F_>)j3C)?KS1oC+i zSEFWpk2)V40xv0%^`A&ArRE=kiU$exSmK!-kYDM#4Rvp@o|>|J>K>3@wQjAbIxKqp0&KylRdseHnBQv}LHkrP*1R-DIXxMGu0Z5G1GFYLK3-(XhxE zjiIEJew?Yva>SVP1FN!(^+9KYnngr^84<;DNucplYgmisU$E9%om7z7N!oL39llU2 z!?@7N`m}J)9=Vpo_Xhj&?5A#wf1{ma6(2fnt$4w8XWMdd6zM;N%jh#XV5b?@f=$%@ zR^v;f`*DnFbi$8!-O!qAQ4HJkk096RiXSHurYk=4dJ#w;reUC;M*7z* zIW0!Suf~ckzTRRBkBZexWBSI*E{sJ9J{d!x^m-AM zAMa$xVO-u?0PsC=1^xFzO0TIy9|-$bpS^h3o9V*ZGkA{!&%)Rar|M2%fD^yerPB1A zYli0U9EAPF<_s&{y0pBrtS*Me=k!7`O#kaFSK5QET|7q(l3@{I)Ew(T?G7LYD*?(9 zEyg8;jdOgkM@-?59D+65Sv+~$&X#@xlx8k9R2}uQYI2oGQl)DkpQVraG z(8SG<@rMV|t%xK9WNF@z@cQUF5xVeMYPUpJR14-`L{=dklbzNYOHbpy1^b&|^lth4 z=0VVp7``TQw-!UwInLKFNBD$%or&Ya5tNrua~mEgm)9qjV2-i@h6buMLUCI}^t4Ab zl%XxlBsCkLk2!=%q{l1R)#?_L8B88SXi;#$dZ7`-m zt>{W3))wpsyA8(4k)TV|Jt-9Cv1zQ|2K$2~Jbp0>hgHxuVt(;8(1+dGg8sK6^g(Xv zNM0O$GCv|XI}!{fL7IL23JfIa;nz6yfazJ>Ub6$;UW~@b7EHd4#9g>Di{a%gm8AxwrJ1xPpE62sy;J>z>w(Lm@Do?8%# zh!V=>I8hXIC_@-Q=|&0T#89eXaH>>Gt-SgVF@!E(7oqENNmCQNQDoK@Bh@M1JAV|R zkkr)NTWE4aOd=8uCNYNwliJDJ%}pe(!=wKm!)IYRJ`sO|ot!L*l(F0qV?YF%6JoFgnX_Vq2Kh4$jWM`2dOQ)y>fn|r@U&?Y zLGGNf5rQ?1r&xlHM%XYkh;mo>J?|Ib^-RGSdK}Il#H`wW2h1miJdF@Y zC4_XUL&JA_`b@#XB38fyss7mDLbNX0Y>@`0Q8AF`OJ13bYZ@-TJbU6 zSoC})T_+v}C7|z?V0H2Mnc=g;r_MpMIy(Hs$@4*~TAIO3&dBhoBgONli>J@Pu$r4F zj#q=uPz6Ka@Y!QQF6`b2Pf>Ra@=z*!S!&jNtjRGBgVfEgT%{F_QZf!@gZ z8a&G|4Aa&|*jw=q-mBylnzY9eKMk(RP+eWeZ(%HiXV@;LlHnjem7SgRbMTMnRmv~m z3&PEp4wbl&*B?OK4oT5@#jd9Ot}CID+$}D~)GsryRWd=5ZfMItJQ#8slWF5rP~@*~ zbx1=JCnTg$nnEnd&Q&WiiIJWt&A|vF`9mGy0-xSU@Jj?j``h{SmkFr8!$bcoe7=W( z3<^5tV1(=97#&ulg|LZma%Q<^1g61-HeD>9Z8B{g3|O#Dd`ktZJ6V8Qv}qaB!bFj! z!y!Im+9+N?5SfC)-Tr6-OaQSzn)o=q3!K?OcbrNgTs#d7e~{scDRe22djv7ke8U%8 zzE~;~-XSQmBa4|{c8&v!sU6O&z?lP!*&WUd`d9dXPIcfFtFJE`Q1Ed==b8o!z`XbIlFLM?iLA)*g(0v5f|^? zUjtBB$unWA&1wta-D)e~J!%`^cGU~mr|t#3PwfEQsrms2)F9xH+6A~<-4D1&JplNi zdI)f@+6VZs+7Ea@Jpy=89ReIyj{+W6j{zQ0M*)wiUS~*M-VgApu0ZDpyvUu3B~Dg(^mmZ47`z( z;z5rH-e~u?;)}NlKql4Kn(9GpHL4d*ulLz4DOrxBWFMB4Yr^Sxm3TQ(vEFMY=k!1iVh24SOYTh6^VUJm*NS*(AZU^4&$guxO(8vp_1v;G{xZ-fDogiTuiW*E?n zVe$~pOAN9fd9VS5rI;j=hh+NsSPThZ6KN!oJWPO;^4FOWh{_mE)J-YPOih3^Evbts zmd#8{9;PlUFqKlISW_3t^lvbo!Q(kT$LZ0`BaWCPF{KDi+f&R`UrLb}CXeT%QvN2M zP+RE)D<~#OOl_N}n_`9PQd`4H!BprGufAYv@VLri<1vI{zr|34Sd*0|(`JJ)F`9Uo z7!n|*m>5lp8Jad(X_82Q6dUKQt8b&(kF!>5); z7cW*~Yz|{cU4%e}FH#pNb`!IwAVEeo+%N$zojkB-%VBsZJ;HSwYuk6zX^_Z!0I3s6 zYHBz0E7m?CrSL`Q5l3AxHF&V2>39sG*xzTUL9EG2lWDWTm>5kw;w?x@ zF);)WLEX?)sxQTwBoZLS#(67wm>6~96wgEOh!aS-FELEzgr-u8DOTztG2-Kp1jLJ# zfXyfm{Y5G#dBn#dDRvVvij711w;Aw-C^S9_-F}^3dKDB3hoRKX3_*-5!_b#d+N%sh z*y8{T1Uno6vA+QjyBh$pw*e6Q8UV4Y0T6o{0I{P15c?Sbv6}%9dl>++lK~L>7yz-0 z0T6o_0I`Ds5c?MZ52;~5>|FrF&ILg1TL8qa1wias06eKq#cUz&#AvmJMn?2IP@~A) z3C$WdpXPAGrW`7|!T@O(OU*wiFC`kY9If5?Ei z9FqW&7)gZ3@c{B$|D#wQ5+E^*#FbJczzBn8fa%DRMDQ>%t}wsfC5Q_piD3c+(|ECx zBwlP2k9e_?#MDJ%e3_*fp(L3~u?F>Krg5PprI-Mziy#&kN^!)VWdRa{jT!IkkrK>CdI~MNI#0lSV2Nb5~qg*#ETVrNPt0XEUKId zXd-6tkWozV*c4X3Np(|wDJ4#zCOr%rBtT-AQUs59`;dTmDJ$A(6S3D%u@!7+#q>r; zppCEAReysbG8r#pv z2#S|CbkpTQB$CDYs{}@Bh-~^N42Taz5@5uLbntk9^u>P~%R>Srh7luDiUb%j(hP_X zM1n_L(0q-V#-)bDhz~^ZVnr0ii*4c&FIJMoMdH_4N_-%aV&emmWNHZ1%+v%lNf;l9 z;)scy71S*h)fk8*AU+UDDe-~m_lTH5eFc$4tV)tNfut@5F)214Lk6OF3}N;POeKcF zL-L5n5F$%}K}-gsIOm#(89d(2Y6u>i!s<(1q?GtT6em!V$nk+FUW(umZy!zWn^IP^ z(44TdpV^Zt^-y{yg$UvbLO2Zh>vrko$o5`J?WZ6NmpQv7_)-G3_;o)#U?}F2NkJpFkvR_0b&7oAY7UE*m zU&2$+J?wc{F^(;UN3^bKOjCT25o!dP2oxCg);84r5oy|kui;{PE~t-ww*Dr<%lz2* zBgGEk^0op*OBt~K3kw&L3j0T7Ap>eI3mLMgbuQ=_#?lcp#oa=BW+tR3cCuz6TwIMc z_?8&VsLZe6Q)oQ5Rz`mUA1ve$E@`qBdt#-moJLO}b`qBqw+#>BnqzE%#8lgSPu<1} z^3J6s7Dj-7OV-6TSRu~EU_d1)OHB28BnpvAHD{Mc3*OwAnMVq2Y>Zrn# zmEg*3T+2~iHhD}m)3^lHGkCz%uS_0uvUvUhyb;+Qm>~q&&2(@QbNO;+0xQBZ65`Xu z7kgM4m5rv1b0dqn-q0_8U@=+9KecG@a3U3rTp_v6;kxe5*sw!mbFAmH$;lMlKSzS) z1n59mcY{lqfa`7LQaCl-&h~u8dPe^hB<@S;zsh_+ML;J?keb56d2mHq#-|x#t)Tm7 z5L{rf-FP6~xfF1nz~$5rQE1Fa>t92B{aJ!f5Rie_pMm}szW#(Ah-}ZD}Wce$5R7zzS9~-x@>j+z-vJgGfbqj4E2LY4?|7~n?*sAj0RtTX21<=KQ;P*y7H;>^M$qr&*|O1b9-OEa~{VZj90YhH&TRUBLWXbh&8ZCgx0DqOWJ;EaQhBI&09kN z$DcLwml9cqc4x4AomFaJhLz|zEup3rwxrzS2X{!&MB4?g9i)DE2#LXIjo)V`Hb9 zrT>VSi7qCpk|<%KcRtS#R|wuo@GOC7sBh)d7{R*<-b3*71Xl@OAb64B=LlXRc$wf` z027U+>=WTilCQ?r;fEm~J~;8Vp3fco!$tpK->T3f)UFi4MsN_+n`kyMl=a6*~Tb3fWSc_YQeT_9eIOC%&OYBgC z{yy_f##U|qppkh?X+qi6%QIeNsjO|NE29(t37*RgX#A0ohj4k@03!W3FPbrz`i7cu zD%=Z=rM_&-g`ndsTAdCT>7bg{9y{#?A1r^mGiP{H+ z>fB6my0m~}Zi4Rc+>J(Kg)${t(~ppk{?7#eg@D>ijKH73>pNIgh9_kC!(vYi!}i)W zBOk`)?E|28joo}$<%mmGHDV=W4$o<}hPE2_HluOd*no>Y#_sHN3FpWB0WyR2a}Bqc z8s3V3qgMAYl6-2XiBgk$iM1kc6L!YhV3hIJnzd2J$pWd_Av{ol<6$DW%Sz6VCq*sa zi1#Ga@-?ECL#b=5)x5Y#l<#mwCl#*r$YQd$dSEfL!vVJrEZT+qN<~+A!>YvfT-~l( zpFyNwh^i&KKi8noFK3upgK3AY%PhaVIc4{UDCvbpv&dP6bS|eE7D;&O+L5w*2x>8w zqC<2nVVN-%TI|mS&A1qODFdZ7g}wOc3$}N!pL~af_l&Yd3C_&_i+To>{5_mGkV1T( zB0z;TO@&n!K9649&$9uPWt8CjQlXw#IVi!u1Qi$R?ovm+L*=mm`rUqp>M!1{sjNb% zD8~B{yA!cLFQHw2j%lJV-T3kqKhH4zWxueLMrnDq3ZX$5bIF#F)d=~42}w%GEeQED z6Oy{PU#;=>`$=`HsNegcesjq*b|9;stI4HKE~~$g!x;?sD!ZET@eVw@{H}T@I(3)2 z?J73`tSx;KjZd_~NI8!+*Pn%QuH}H9|8wfO*U*}IKv}5@ZF<}x4h;;P-twH*I1FGK zN0C(lw)1O4?T=J)p~=mw<31^ zd2D~hnO06H*oi@HHde;#G&~@Sv}0D>J95+$&7R6Q3*;^axSRncPL&+rCN^Dt#_&iWFYFf0Mq&d|=wgDnPKgc^6mI79t|J4~DK96UDY6vZONh6d$8 zzF5J#UYCs*tJhCw4MG1HdHmlt0c79zhk-~_7r!|Z;NOxQxDpv_aT6s$Wu6p$73DuzQyhNc z*r^jw#<+T;wm_j-!gpD4ixu!^q$jTEj~e44T;8JqkrhDwzehIvS+UK21=RsxdBz-B zn>9z)=FE|`d9?<5z->Vngb&VRG$N>!g?7b*DiO5I*D`Dlm9 zl31{Z;}1JleiJtlI7iE3x!{#|wh>X^x8Px=^@M|7utO3@#UzTUh9#92GXjSZP0vo2 zg7oEz!nsFj<<9c>Blx#4QY+}tOF(*Pub@AQn=Y^gNn46cUmm=LlZSE4 zluSj@ItPoyD>&$eG=#d16mCA2JEk`{_lo&B zOlOVbh&Ay55@LHaCNR?TvocjODeTG!yN-h?8DUp;iuNHVPspyUoC#L|yWo8hc7Zu< z*yV#c>3Rk!I_mjGNj*4O4mRNZU}bWu6V@9h(g1T;(Z~I+sqT7uY8Ae$=A;{@YPzOw zp^b+oMf)uuV>1|>CfR*dwZZKPJi{ASJH%sVYjb|zn($~zYA8Vtr(%qO?c0xQ8sZCH-3fMQ>XeS%d zQqWd5v~Md+9QhR}{URUC(|DnG36~vumnP_oN%Qy)aKMhfv5V64gjRrX`k}Je-kzAk zqGIXtXj4_5f5`4h5rFr43Os`&tq}T~gw(`7$g7I3lqaWgOi3i^p)$IDDHQdqNR17o zW_;&yL`}V(-&-Y~%q(sueL^=aH_v9Q)!1dcaztq3lcN41J%?f#wP^84;T^k+o4blX zo{fA|6mg-C3P%A%KB|84QMK$T&i2aScVICGV}#?MJjoE)#&?y8P0~gbYBrY}@?zaf zVu*9j+9rYoUaS>^qnWL33_?6pDjfGYDjfyqJ_UL-{aQh`SX8w#_Beln;UZNdX1&Jb z?oW_{Cojt!axM+=mdzD%t8=&H@|xeG_Q@qdktB0~heA8Vl~ikSu4Zxx6Sg9yFw&wW za@feg_k~GYub*=sc(+wfd|%o5->PT9q0hnJWkVX~;q$VWAd5Jlr~)Q{pS^g9zAjZT zzOMp&ThJikd3qVrGNn4^*k@sLV+hj7}{8u4u%V(SUZrnifpGYME-LBvD-xF{F)4nX5L&4+)&97HI!d-prV^8b8**TY;{UlG=L+B z97(#vndUk5htE6_t4iGM^g11BD|4fBiPW#m@0{7Q^YZ;W)vgxjk7@nlqiV3s*@N{s zLg$bN_cvZcn!Y|)4+@HUL(^^P-TZWZ)6fd_7ua4$n~tARp38#Tw$Ag5*CRYgh5ALP zeD2}XE`r?vi+47s|HZHkyFA=@vMfY=Mwq0N*+6<-V1q0hYC^LM0vO{!JD-oZd&KcW zajjn=pzSW=ysvAyL#JuaNjZ}HcNo2$S4Xs>)*(JvPfEa*U4%F)f+S5}&MuC1oG44& zLbE3Pt@P&5hZA{k8qe_J$O3{*tY}hm)};xcWikMC`UY-1%NDV4Ps+USNbKPa`c8Zb zC)M^c5BkP}B#yWb4`By}Fk}g+e+6l{8gY7#SR=PHprcrXd)31lAB?U-?YkKH;{@Xb zA0zlhf*yb%`{;?oPvFwO%%F_~4#9SUUV{4wb`b0&=qGrH;3&Z{g5v}y2%aPuC3uFQ zNKhgu6SxEwf+>P&f*FEY0*_#h;1a=Qf_Z`kg0~TTh~O^~yqDm22);z{6@uR<_$I-( z2>yuR`vm`!;NKJcF@d-Yf5@jF6Z|Pb)ZjE37}E-+@lgK>a{I$WQ69M!Z{A5nOM&_P zts_isu~YcjSWl8mb|!PVTqc*sFJtabE*1U?s}N$I@jZ(x2beW5D=9L}2S3TrJcl7| zVJ`jJapEy@sqUfN=G+!sn+x6Dx8Y}T7@pa0f2NoaB@z;rAQdSHSAheHDnyhx70`x?29PY!%Gnt^o7lT%W*pjR zeZicooH_RZ66gL1)C1=^R7jk1?}-!7%r3MN!q&{ZpU?BY@6PV~tyaDF*&D}6gOEST zOFs|t$5`?vHbxkokO5&1b0_qOaB}K!cS#3c3J>#`H}aUzf*alW4O}WY&IZH=gDE|klg3`JK`k5miSsOS^ z-jXiDB4oh^Sp?ZJ3oYg|_`(YFnZ<;rLg4*oatnh8OSz9wOZIl*isvjUDC&M*P!<6% z%sQwMmP;xE*uZAE3ZMD62&>_&}k z1o@4!W?(!~ra$QDFGBwjdT*XN@cd+EWAq|ewce~#`isE$<*&BG3LWf?iv20rfmgCB zQ+*5UEz9nR55U%{*r8c==wPn_y;iY1V%@Ssuh?sr-RfX>G{-OpbG1DF4mL)~C}G@MUVoQ%d-E^hk3PfmuJy!r#O zuBX{-u40*9aADpM9i`K;RrQMEQpGR)Iy_F3IhJmvnc`xWPlhVZGwn_JFcWxvh<2r7 zI!Z)7i)V?t*x4VdVRt6-giD!b<8G2?qjWqMHnBU(MK_rX!86sJ=8R9e?{pb5Bv(5Z zVQo|HbT{SJiVjbPa;TIL=1YK*0VUNiOStycd^X|Qo1{|d_O6s%n3DQJo@p1g z>eoKGyL-ET*guN*`v<#U-Z_fz_ID5U>VM4lkM;-G{)yT9;`4j)y}ctH#*=(J=0baE zHp;c1Dn6B_SFzR9-cXEXS($H}(5<=Tu^pZns}9e(IL;*>)HD8lTuuOE&SLu*M2Mb* z`4Wki>5gWap8rC`n^6AR@0oFI_byH!_7V(8_WFB!Uv1yq-tNh{;PI5Jlbp$(;A3=s zd-t%X(&^MDA>NGoz1c(ks^Bu8ob%X#RRu(VmlJG+dibx?!1d@Vjhs66HLNBzQirZO zMyk_@Ms5U48*&rUIwIP%<8-X||137z2@unvuRCHBd|QDI%{Xx#bPYwCC#j^ajk&s0 pPC1I>Y&hj{EY=Zklk7NN+TMagN4GHV`GkLDF0M2ma#Lj8{Tqm-ovHu; literal 0 HcmV?d00001 diff --git a/timm/data/__pycache__/constants.cpython-36.pyc b/timm/data/__pycache__/constants.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d06bfd16e598931c81ed3c7ce10316f647c9ae10 GIT binary patch literal 598 zcmXr!<>k`%mQT`WWMFvAfCN~8YzH7N)&deK3{eaz0#S@9f>BH?!=g44P8uK=nZI#{N0i^~D9YT=(stpDf!qUrPV3{qt$g-8s*H z-?V=&8x-~X)r^U9ZMh8%0o*grpe1`O<&oX_CU3KAkF+BLI6mlCkaGYZs8DgNdN7zC2JSoIeSfxTLLbwZjPZoA@R;Z{sHj; z&LOu%J$)VBUHx1`;$c$0u8w}UL@_QOWq&B-MU}~%$Dr+d2l^?SeH zd%C^2=+}Pw*6v?d4dWlismn(FE{gmDf*TEkGj4WR+iY76OV{m&t?y35(Ra7u;%;?H zU9aIWgWKHcl)HYzXGZL{7vjn>Yb?h6rAPpWR-CidSKJG@XIM~iQJQg>C$C21e(7GGR zmy~xB|v#gdh-nGTh^OqjS^?r=!&Z7d1(hS&gD&uB9PoBkLEJP+`wWMqrclLybPl^hOGKobJOC**d{%i=?sqycv8i!79)duau zS~KIZtZi*>eu5LcRg--Yhh3a-#$`>!tt3ky+^g5}q}$blw174@dWXsj!z4}eFkHt+ zCCy@#SJ^6aN58=vr}e)v?%g!0CoVdIH57de|3YPGw2dLVX#C1NGLNjot9WMO%Ft|^ z$40u8TWx!29&2-m!A_pV0;afi(2RRI&Qg`aPS$G0LY1CJB2Chk^62Yn)Q!ageS3w*SHi8G zouBW7J6n%;K2r-}2n$T|xEqGbLg%SPRUL7T1~_`L%1XK|<<~r=g$Ilzx2N&k-XR?YWc@$lAryXk(@=<{Y!)WNps;pi$ zMSwP~qbk!Yom&$05n@AO_(SxQ+A!H*ZEjlKp-Ik47u~7oDM?$H5K{suaH7Z7cw+5? zm+`u47ZZO2b>$JLlCqT-?aHjv&%dH18uKB_cr~+5G(M~*83J8&r=pibJ~&;@F`1Xv z0xd?B$75yHDwcE&&4LbOPAlR%+U}u@*RVdLk?D7wdVRYNHE(b0;t?d!?qDZ=7 z?@+xGhM)JNPVq*N%WG2*-=Z4rS5J6q5rk4gn~aMZ4Up92IXH0r^L=m4vwTa?`zFfd zA^^OLGRq#znl*?$3f8a-!UTJ)j5x#P=fE*bV3>kER@K_aIGkv=6QACET{%GFB;D3d z*d&vqi|z!t`V=0bq8X(uE4lHKOXBJP1)rSf?8JS9dMlh4vU z3AQB+*doRdwBo|!$(RcmnGl6WfvR6v$&^wysArADO%m^tAo`x-!KpLU$45u^07W04 zQK`VYP5AeUS#ZSQ@>x2bGR4@<$-t@5Nz0!=9NZ@}zi<5deQ*WgEj&;o?Auf$%s*nN z5v`ZS9jc9R8Aq{aegY%<+o&ID8LKsW{PzLAR{27s7_}Yox0xOYrlwu02%9M1_ z6ZiQ7BJeV=@I~BxMBvM~FYpx@#Hw1n*KhqSIT(fAgUwMTcrGHuHv$<%fExw3LD4hF zM3Cv6qS=X~v=;SS-8ju70;`~#9K`a3jBw^iCsz4=yx}0du#m*VMo7MctVetQ4b%~| z)3c8J2v(i3=ew$MdNx%~v8<@0p_F1|M7kQ>)N+XGGzgc+*KLZ&ddwsp{pn>lDCh=RP2WU$srWFBOD!R=s@a!75w zMeO+=hE>a&K9o&;L5MW2h_uL^h1P@Sk~{SH0~%p!E>Y}YUN;nYy+DyfGX~;8;)ucJ zIlas(e+EGb4L8Rhz_x*!3q_mCLDq}hgj`6u1Qsb5+Jt_D7KA`L)nJLIl8WF{a|Jio=#md4$(w`1C(88miS;5b@Y-{!1)s6oGZ6Gv) literal 0 HcmV?d00001 diff --git a/timm/data/__pycache__/dataset_factory.cpython-36.pyc b/timm/data/__pycache__/dataset_factory.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3db11e8c1315bd6edb4a4f0ec56b403fac6a2e02 GIT binary patch literal 4141 zcmZWs&2QYs6(?u8Tx!3xve&YmIQBF(5UnF?CA$e)!8NQ{R;s#I97|1NHtm|)nblI_ zlIxj~gHace?t#F^w4upL=Ode@}Rbc?3JEVuNf5uURQUfwH;uT2C0Sl%r?G(z%K%54L8#jR-W z>IC<1M%>kun-!Q7O!sPTtvBP&m%NJ{Jo9!&8@`# zJZtZ!ZF$X;T|Wxbk+a_OcK8NYNlEwCqJVMHFq5hKf!F5p`o|w_t--LgzTv5T;RU`_ zFHJbKL6Z8q;=tvn9B;N}`jGKSl!^YlAhm}sM zSW6F+yb_)tCXR0Wa3{$fctMf};uJpc9+B z;0`AyBj=4{qRd^goBPIy$q6~m>5&!ZPE0W$=QfRB86A`5nDxv#wqm04d$#y-Y^nT- z`D-Jr!Hf+vF3i|C^Nj)K$ZxzdeudKcUzKsqTOxDe&DNx8$s0e0(o-* z`5T2KucyUX*ds7D#d##}07vJS)`RjsW>%C-ZVJMbs)E^U1m=*++~1^*QHL*+VPzqr8iFGyVcJ^7t^T#5~4fopV> z6&t`)PMznt5|=v$v*U_y#wE>JeNG-gDqB8PkPA-CrzR^NPsapL zJTZ@J%86^>y4Id0{ub9*={)LEhLvepnbs>r{3V{o71&o}7tzTx*Zl{p9Fq`rnF8JZ z#j4XB0B|=s;MLx&$H7Y3-4Bs z$@p&JFG127Eh!lBex-RUZ}Dyc?=134+;vv(`g5EE>EEjA?(}XAVg&L3R?Ps~7_O!| z6`LJ1E&}4!u+GjsC-O4rdOkM(VEhsAdTNH-+H&vco&Qm%H9y|ijtj99PqBAj*qP0$ z;m2?q5#@=~a5O%xp6#~cJiG8BH{2IjA?z=Cvyf?MmH=rMF*J>?Vv)jcJ*0h4@KANR z^dj$J-CoJjFO3PS3$!<%tH}2BC~jrst6a2yxKmD%I_QFCiUd?3pfd#JaV%x(S?H zJd}~>1kvHuAqzbeUcV2D_#sVS4Sa}+AHszJ7)}p-FI#;5y)lyS1p$cn!T}fa)oF^(Gw1_!A0ieRvaV8!G-p(T*R0Cko2^wi=2b3k)KUwYCR} z%VmmvZK`=HI1~~g)oMd1NV*Tg@%6;0W`0``iw;8+0N`i6k=6YU4I@SSf=h@la2qxe zHFM~z*>%Ti`4ZksP+GuEmT6mXPw}z1km@zs1ASm;7d#36ncwMf4BymT=#|={3$Iyd z4Uia?L;(BN>e?pt73Cq;NRrC#ey0AN$$O8P*fR|t+RC%oHwx{s5>K}ol z>psOk?Ls_hzj}jR@IG7z;LZdB)_Q{KsKjawD<mym z5AFiVOX@b0n?~aUZODm$T+Q-Uw7tvQN@H_mr_T1uY`(;@Oq78%h<;%w&d{8q2D5I- z4@WhoYvJJN*8PA()}s4;I2jUTMDGu_bgo)jSX#_Xo8_y;g{7qrH7e_4If>QT4+342 zC1%SlLh-Ydnm_C7LK2GE!T}eU_L36h${i?tFc~LP{b3b+^yyowcr@gPV7;MA=zNn2 z6qEL-iXNI|;i#%T?5b42?+GSOAuL$pB+f9D-Lmer(hyDTv|T&8@Y8TFgb;Y`n4ztY z@~Q*HT*FRmc-KfSQlX^@e-|I%F~zZ0C56mG8adI#wW(1SNQ&e5efdB5iI0GxaVE(_ z&gUX2!Ucqqhra5j2U(Z}lIhmgXAf@wsP)Op=KAf{>ZW)f#ZGrU*_tqKQeBatA$sE- zAtI6F0v?L*0;?z?e+BtR*bA)Dgo9*ML?(G(GGFK#PGH?8DoCn88Z(Kd%fxZ~8i?_M1Lbqgtm|UbF=wG)HZ3Sl%UNNKMXC*t1#4Jr7hzO{Q5|U7 z)=8bf3YmpH1m@~F9G^4ACm_G!2rPZKHrCeGHgpIy&S?PM`h=S{JAVpz`$ w4V!ZVa`1k@Z{nwvyaB`^b8t!g;y@@yJyIhCM#Ddcz|!CY$sFW}%R(>ZXBUxHjz6O`mSe7t${=Dj!X zH~jwca;x{VHwS-v#j^fkEff#!JE-cXAl%}1V)e_f-M7(q5@+J}UAr9f4r=}S1B<)d zdv0-0xTkKvfqsqG(XX4n&+SK6r||;vto@E%G#;_ZG!ZhQfq8M!xPz*`3nDDqs_$^S z@A9S*Zuh-XXurlA+<$KM>)e6ZRz3mkYtR<=_t@yzyg&Il+Zyo1gkLf=yqEH$_F}#RUp(GI8~aZkqDS!faIU!XhH_6H1)Poi>2Hg`K7~VG9JY# zOXjC8jYUWPG)i(69|_bN(+hGM%%qM~q|3Hv2VIa~?2P!@m zr8)1)5qMIq&LxmLAYt!J6fDexVOE_0PAZquZfQOVrI;o$OcmV7reu4TboPS%d@{h9 zv1w&BpQfwm6pQ%h}V1pKM}nT@`CYMnSU*UaCk=HFYG_b~6y z?3oqTW)5h5=7Kh~ubT%gy)?59YNs}Lu3Ko;U$agj7xQiOsMcvbT;>)okh_1p{ZKr~ zW894k2AQ(xkd2Jz;YoJH;)D$nF@oYK|NK#BrSQmrMLSmE*>urljp7o-P3)Or7-o0P#?(FXNR4zq05qg|) z)std`NP2L0zX$y%W>Ie;LaLo*clxaG!w|uu!|?0aKy86owtwkYx9Jzy$8Z1J1l663 ziEI*qf`nz&QB8~_XlP5@8XiVOz=x;yi4CtKIby`aI7T+C&z(){ht>)489yI*hmN|f zYo8G>yiL5QjnCc9MJ^v)oVaw8)*%+`edJRwA*a^hhmnx0P@y#IHMEMAyGjXMs4O+^ zHD)dizDCnttcC12a-BMjIOQ>NmvKT#uG2W#4s2ogZS+(d#Io1jE!6Awy7OPGWfq=U zi>yY~Sc}YtQ&LRh9JVBl@F86;tI!BT#8VjVVos6CE&KSb|KugZ-EsE!I}Or5WfKvG zMJo&^8P5}Hx5MyB&XQ^d8}KX&L-{q@{WT)56QOuqAY>2ef;3Q-58^L-PHWX)Hlq;+ z&I%px4yq!hF977K0Yn1GCT{^4YkY~f4L~mQ6?_{8DA$U$hhi$2zMGFK*kFH#@#YZn z)$q}z=!HvC0u2Enu$Fd17H9d0vOO|zmXf&x;IF&L2oCzIe2gQ?oO|grjSO=79WvL6)xS&rq2MyV;$UxI>GMF_kQrslw_rP zJNR)WdmAr>sDhTqFDQ6k&>fI;4rn!ls-|A(3xQcCF0YM@O{5CquK?So~# zf=dYSnbj_V&nm{Lmf*8CYfS$zb50twy8NqIdF^aveao5=SK|!0*3+#^(ii5w5{wyp zbzYHVC0Qdv7fjMclw`e2SW-09I3ErZQG!qz+p4IEX%>woB{8{7WQB-Hnv{N|Nt(4` zk^u^@8}t&1h;S+9FxK(Q1a%R) zJLkZs2Ti)Jo4PfhF>vPm)*9lW!rkZo%(;(;$_dF@()F1`(1u5jL$Nz^E0A-_8R7wa zeWBX`Ia;wyBd99|jWkCh6_Qa(pesa$PDm%EY^?&2 z5rF064FD{y2UOm-KKAiEvw!bCS%2a^DpNXe?^W;`w{_gY>FyNvr-dC}z#z)_U$$-$ z#3Er#Z)fNm1gcxd?_A{r19;tANfxn0-9}@+va?>)Odczm59TK*mL@C(3WZ7tF26(C zP$U&Tz?@HooSfr%QP&yClJtTp6{ld88${^!bPnrsle#xRI$jCw_DIs3r{A73PJf`x zE6jxA{ICYu2=@f4BB7S!+ie8fmb31zJ1h2D3Ha{Sq4z)Fyix2*oHv*|BLymq-?+lU z2HGRTKuT8i9<8F6qF)gn)nwJnym0^5>aiu_0q;?Y@Pc4 z{$u$znB_N#P!J+3e1qC=5P6eGK;+V6;wFuqZ*N-0?r&k@T~tLqzv`4FLx{Tifmh*scyAi?H;QilkGxh zx_i8ST-NQ*O!q|n1nNardaBfCKT=qkRh}xWvZvKgvU9A;jy+ZDb8MO&$2-GLV25d@ z;WvxcNqh;Id9sPGiSedvtgC?vk_g*~%A=zwLGV9pCX{dn1nf(XEAr^&s9D zG%h!L-Gvl#!R!0m3ut$Pc!6E>7aNWD8qD_@TfEV@%35CQTBEVpSbUFNZ(g}_1tae_ z{PQemwZ7ME-Mrphyt&wF-Dowun^#*N=6Khxz5niw_t~{Z%eFuEnwvl??ydVVPC@uB z;Eeqp9;|P~&f=BDtIqvi6giKn-StBIIkC=KO%?gR$K$~3plaRma4U_D{~Y(GprhL4 zK5og4xfe#Q9`DvPapLa1rH@wDRvx;`E2~SNfAY}1x3aV*W?txi@NgO6r#|JleV;7{ zQOtwJAokeO!JdI6&)YNE|3Lp?Uyxr9oS zY0tEwk+xgv1e#{F^M9jZ#F8+%?IbQF>Ylo+eAyqGv~psOR!+1{E&6-HX8S&%Gzqmj z%x5tADJ3xyGs!3FjB--Btt7cgE2rQzM%$jOm*ffC8;TCdt=~RYww^ww8Ab~Xf9RCO zc4<$;IsXpx%gkW8r?{PE%td>2GCeW&)I{9`T=d8CzUb`g!LuGY=aum}FAPn>^hFFu z*zK!Zomb#yw4oOEHGW0nX8xbJ{pWYW?a~;x`4l%$X=-wFd#bAJYg^U?CzjvWqFb~! zv!E^tyUbZ~TBIGCQac2_&}j|AW=xe{KMuOVE`$tn;0%H=zUc&AZ{3faOJU#fIQMoO zj6laUH=uf;IskM1Jty`!gc4AYEWZ=gcqUsp7Vq?ZZh(8oEDa`BsN(^#^bCr15bev- z5JX~Ng}>6a3?Qn?N&lcgy0ptzgxcU`fXd=`%L~kly~Tyy_6~Fn^P`28J9i#I{udXb z0r%Z5^h1wD3*2AFsXkg-Tfnipa?(OaD$2|Ko!{BFI}lgss{RL;2o=K1RCeX%xasBi zmv47^O|KJufXWE?|8)?FcunIKI>glIBRPQT>;U>9ioLMui(EW_ z@)eb(DDt7odf^J^JudW)A7YH?A#%Gu??oafNg{I529&YLNh*pQl-s&5>=y5J-Eh$D z?+7jIi+so=QH2@%VTAR}hv_%hgkE)PmdG_c*bHIIj~jtG>eV0&eeOzx(w>oO%B$6p3*iX>N?Z+3_#^1R8B(a zuY@w8ffYBO7(7mtcCKyhLG0Wd>Sk7lu4Cq=!S|tKQvGbP{4<(mhbALvn?Y)8*4lRy zftj%^KmP_NIw%4{-9Ew!QUF!}Hj=})4`1SC*A2oTcHNhWq`c7QI%faAnDIY2f*F5% z)Nw>OS0}iQ=~*-C#Xj#fyhhLo;vHdhp^ZeL>urOl2VpF1h?*PqF&Nc~!c3J9Cq_?& z!e|bdCyXb7{}m?tUMTb^X2NLouU;3%hSzCv8^EFh@rB3W`kBj65Ju!<@CvQ;E*0-l zagAW}(13)LtVo0`^rZNdQJbK2gapDG-SHn{orthh3Z{AyKT|c-5`LDdsngo-$yd;3 zeETGKMZ6@8(Z8dJl{Q#;PZ`1k9%|eF47LNSe)*3-(S|zQ4;nSdx%kVrsw8@w{ABH! zI)w2a*TIPG9A_9cAp&qMz_x0;fUgbLMi1vOQheD6AWBTxEbx7*B|1SrM)dF_<-YO- z1S|S2v3pB=(Hd$JpEA~}5Ke^A5bnp&+?th!HG`GHV5~ia16K-rq1%A1*n~s3 zTANBqd=&>5=7SjiWml3)m=Jzy5TW>W8aARq!<8fy1`D3Jl8_?T0;|OQ20>Js1J3<0 zcGJ0{_>~7aNE@|0CxyvBB*0uN;89$+Lw`Glu=EFUy(sIc9wx(pKB#(WtY1d_7;4@9 z5!Q+>piop>HMA;JEs@?-Piu4NnbE)s8uc4cxi4b{xyqOo$P*ybDm5E1GkI`rTq_y* zHJHQ`=##etn=cPDHuR~D$h$6#oa=4^PAUgQ-JN}b0VT+xO=)AfDf!UY5}S^;B(V|s zD0R_azAgVDU_!j5cY zDypsR&L5%3p>>+X0QAszj4=iCM=*xbBb;4BVhi>-fGocUc!?z^1i(3xTi&ML!z?4m zhv<*U&m;ubYGu-Hq1W|YSJknpMpEmZLtNd2rZ}5EIX?e+p?GCXW>Rb z&s~JKZEc#-pU@hp8=D&6j#DVDU0`+!C+SdJdrpLV?x0A0mxrhcgM#V~k&_-nk=1X0 z`pr*uA`XG@$0%L_QIa|m{%IU^BIrIyhL}PS;eOTQo6ti+*y;%_V4OmPlx1lvVWy^# z44}_M5n(>a=Td{8!FbIOh0zL3RI(}!Lo-8nP8R21(!z&12kbD02;w~e$OuJ&{sWi8 z{Mq|-X568X$uc4i_{_Feezy4c#{^R??BHtkNJ>DcYn4XJx=kQA6?n6z% zB%VtOyqX;ASC}?D2cb|Cm_C>#J1Himq?}Zesbo4ip3EdClhtQ%zn^6h!B5{8AmBf2@SJ3?-h~ zv=QY$QkCHaRoR_S-e{j-F#a&ihhTpUC5KzM@MjmkP(lr+-N3A0Bs$7|k}E0lYla#t z4Bw1rlk@UEPbY68FM-=+_A_gEJ~)%m0!74I+(UFGavQBq&=7DE?Qypqkg5q8hhoN2s8tA5qGPlUCuks32F3-$qflA|C>YOjZqX2|0O~fy`61 z`{_6ZMH@O{53zshQgn8lXwdKXI1*Nf+&Q}KyXHAxy<*!>_xNZv(WQWO}Sse_NUdNFxN1GPL*}PPBNSFHyjPM`R@YIoGaf-i0@xa-|^oz+nPM$=n zkKQ79o0L}S(~BwQx4c1T9Lx%vZY&J|#ff_u30C^O=0GsG7*J`NS0)A zeZSY2cA<7r8XM#S$a$0Dw!C)EXqrz&i;BY>Q74yQ#vPpQk8e;BP|>D>Tq)i`QJ*44 z0sD@arCu3zq)`j+66oaKx2U;I1^Gs)NhX&i^=Nyj)lZUubm_pZFHh`dVGmJLa*pIz z$swYYj=4?nL{u+kEjXxA>Y%7dH;jZ0ILg50sLn2D3EXt@Ut^c(&rv9^8*pDKV)<^r zif$mgfH`VKn>K8%in%iyTwtt1E6g8Q2c`ghRb(P;^(!xLl${LmtvUjFW+3x`tOPUh&3z54S~sDP@|@h6vExSz(MyS>h{A|iL9hZd zo@l>>R%9yd0h~ZMQ)rYk_RM!B)CtV&f=S~INVlgQUH zVFx1$4-{BhQ^p&+XJ0Bd<4nN#Iu3;xC3zjGA2|v#^fbu$Iff+Ly~3|0Pp?dPkVqoR zIPoe^1R zQ(V$72uPBM*pL2mlPBmQP8_!_x3$=5PPk4#(2(_aH@ z_hTnxC@0?VVka|Nlz{f1`24bMJ9w7BJ-1|yT3P7jT*_wF~GJEqTChi`!P{nn^0X72Fb~{vEp4U za&AIqP2GJ75|X}aHS-981tMnFYl4<^kTi&yaFMJ@n$q>-@63#*Ip|8AQy&OV&U?+_ z`!xS2C`3X2N6_naB5ALXMA(5@U+2X4@J}fJd|7((m&qn(u02c%{+v5QAk$-e9J#IZ zPs+?t%Z#WO#?d7BsvkdKh~13ZsqrN{Ez=@tUMwfW%rq!`i-s;y(V&8~FCSZcPC=C9 zSYh@WZ69`*j(ULd<9iX-?KC?4AP

    5ej5FC`vuTo35&R2e5&wMxIaulj` IfA-k_0;Rz~6aWAK literal 0 HcmV?d00001 diff --git a/timm/data/__pycache__/mixup.cpython-36.pyc b/timm/data/__pycache__/mixup.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28d2edfd573673a2ae794285fb08c7af90a99cdc GIT binary patch literal 11571 zcmcIqPmJ8wc_%rX84h=LwUXA-N>&}#aUqQ>?MiDWmQ+=iEm@8md*j+_>~5^m+#z>& zX6K(CInwTy(;{BGC<+L0Z)q>N^wOXxa!k=2ilT>}3iOf#fdWBuaGaA*Jp@78-|u@I z?(Ayiq=h>JK0dyGB;WhK-}n2z$LFS}-NmneY4zJz4C5!p#Lq_l299_GNocGXp&42o zbH$Q#X{8iC5mv(LhvteMo(P?=hFm#38M@&Va+PpCtcTOcRl`%^k#H6{=T##-8Xo)5 z2#@VrD>Yml59e?-7u9x4D{g4LZ8S~?@0y0;z0ukl4!ocjdN+qjx3%TEZw7-%#aCTi z3N!|QI)b3esW8q1$BHH^<*%rl0O+09`XG|Rfe;F-rw9Pw9? zq(yKK`>kg5>3`$bsAP5rGn(1=ThW88obZ)0du`b1WOm%zi83c{21ybrzfsQ2t!%0{ z?D|bke-vjRWd_1D&PR#kA9zcth`oPK_DF$|JmX>q5$z1YM|$BVDry!lPc z$kJkrCGxwNv3?jYs%Rae^6E>=i%F~7l}?KyVGD!ptlIBI{zgAJiVvYYU-#dr&DP)v`!oI2KttFOQY$@ z(v#!T87Zw}=ErR-XnSO`RNAt1Hl0pq(j)2YnvK<|2LDLFdOHDF1xoM=h~lmGhrJN< zePOk~<$CzN8ucQAO6&!mLfd}Vi|-8s6?v;@;;jZTR2is+mHCL)yfxFz{+$g#dys)*qM^#2zrS8TAsczou^!wz^R- zhRDYYu8j1OS_i1|0r_in*Zp_{Nb8+ThJ#Mjxaz&u4b~%17h*suF52T^y>n}wevqK0 zM(TYpP&CY+sA&79el8CE$fFT-^-D#3g>hFEg~KLFx|mu0LYTJxFo|0L(iWexQQWR6 zZO*lNe6HnTcNLQ|z7IxU&(}iQD@yLC7Lw&0s8pRLztkvY^#%Z>dn(CF7%r>mwacnJ zv%qdr>5gW#V(!#Qbk8dKxwEo7OIF6(xB6L`tv2)tsMj1OyyEG>ilck@+eL0GvzpsD zaNh3V*vPCcoVPX(s%wo;<5Q5H!o6cEtT8nMv^t&tPni0zE}^DbVm*FSIEb2pUOz+z zxMc1^b2qJcHZeZ{l0$Q?Tysk!o4DV$lQK~*ES&;EL5KB~t*=r`w6a{ZwD+~-)&7=1 z1JUdf#w+@4=wc68r2$ zL36_^bdRD4gAJ7)3==K7xi;R}9tfEUqqShzNn%lO4A0RC6~lNKbUNGUDFb*5OBWWN z8#fZVv~c;t!W9AxG>hnfo?nQj=MSM_!Nb6vR!rprE6}>SZ%8fg31NzgLv%EA<jVbkIoS>XC zr|P#@ahAzBCJiRfGP%HH5ec+z2#p&a)VSF~<9t09X;Mt?Vbo`#c+cWbm2>b{FQD{J z!5L^t3$6PTTtMc{&6@IrCs@J}O7GdUl(dPx6A2lWd9XY!#V_YQd*l|-66gn6!>AUP z(i&_jv={8m2&-B*pM(`0nXsM}=#8@mU0fsOc6Kfn)=94+z(k~Xdkp4HOea(TQ@bFd zzl{0k+!e$`aeZWEWJy#Nmmn*AaemqCL6^_7# zP@LeE8fDCfDg*}?jcQlta-*%zk`w{i^o6GxEna) z-$N1^D<=FMEDC&_`P`qe#hWqU;mm|bJ~ZIp%!WsCu7=0L<2XCvT=)deHTXg&adtzJ zgHu@<7&IINF(6kvHDvL`nq(tLa)^il4Qt{EB;FuUK^JBgCXkpBbs`vSm^BUJDfYz<$_j|Ar^+3|#HxkYD3KVzg(jz<IA6 z$R3_Me*i&wlAeLKp?c*s^VIB=Q`QMqqmEy=9eWEhz6Gkiy=uHGEh1Kh22?T!w7yr=V>lyhxFEh!XH9Vm)Ga2ju$4_cP_Od(H74w; zzQ%+O=RsU{d>=uX#P=CSis?2QR>kDsHIL;veO)nkX5Ju*EsVP~xd8l;qB}TbF=oK> z;F!U^L<*FCXpJo7KuM7zK!Iy6NBLt@VVc_@u$#8}0692<%%PRju94`X%-u9bC6r91 zOr3U}?c@N#eYjU9BazBJEWrh|d+(!-9{F#IeiLob;yOo>JN^hQ^wZ`~SM->;6MED` zJO0mPIxO#kdZP_p+r1~9*)&qSO_vmc#sJHR&T>gicm)PID~HMUAj*Y@0T-c2w|Cz9 zMo+w~2|*IbALDFpG-w5iq8ziw=nDU@KkR@qM_!_a(L!TZ*i~i&dE=}qp#I!aR^EwJ zKhA8TiJ~c;5FHPfnN8LtqLY<@DJnwS2>ME?Rkzuq62yFqMp=EGm#*ljPvWRag9|g@ zYq`&=04mUjt%D*r>uW3O10#Ln-ZqAcpGLv}&MX+&9D*3OY2kbtVmOau7W|-YK4b2j z$VKqL5KRc3{2229PH0j5S7btS1Z~G+AV#MCDKXpdXLcFiu{Uvzb2%(Q>^jIH3c}n( zn<~T)%c$O!G9W}-glO05!T-WYECG77%ZR#_dapAlWBkP-wF~SmwSO1dP>FH>d_?rG zhgST9q?QYqnm)4E=3PqzJ%-#0bLt0V_m2E+f~l zMJz-V)Vr+uLnfr%IysJ9c64(6MSvcnIHqfCVA*ZZ@;NLxQ&$e#oWtTf=FZ7S7hn1p zKs1dZsyGCQ{tFrHG0W;xDNRJU01qkVeiu=|tslZNp!Ak;_xqy?)D^yLh^W}83JU^m zK5`gw+AOJaDg4}1hWbH*zRY3O$I?&*=-BUB@CXnG8%>Ss=v7X^=|lWKvS3kMq*H1A zBWn#lx$b{h-8Fg$xbHRN-Z|Zh5w?6RGj(*tku_}bj*$?20IO{m$5dz|x?4y5*95UG zFALc(1g2pr>>p@bfIQtBwTz@Ot;8&?Adff)BEAtrA@ELxglA<5EQwvwc7$dZ0ZcLd zd&6Y4VGq%=D1!eutwn&eUcJNCHNt=_lZZrC+4B2{fM;eaGuv8J#FbcJ4{;?fGIu~| zYD5x2i|?W~ren!)=ZqW_4*o45#AVa9fD=b@Q5p9-TtpaA+AkuZc^W~J{bKU5xpkU? zqU&xMADefbLR=6C-X|_4h|5wgF26WbT)e^ zG331_>#GpMr9uo#6zQo{OFA)YfKY8aGV-g8KQrHpE4^~j!} zs`n@#cbU*rRDZ;T)JKabQH$H&?=uvA^7TjTs|VJDqtf4E>cR>WD+uA z%uq)iejPC-&;LGpOo-V_X{ig7Q(u3|KfWX5jF%nJrGZlCS3S2g9Vv#3_{!;_=K1P@CmU$kf_JjUNd^n zjK_d21ER_ff`c-k=dKBA=Ioj#SU47Wa$jTuesmDmQQzW>9=3i@pdz!o!;YWXK^Xdt z+61D=I>-VYf*C8dEuVWV@O%!!hb!0%Fy$qw2abQT${YqMZT=_3GENb zE!U1STH7tdZtYortuo{}JB^naW{`N@1oeyV!wVEwa2SiD3uipDNwDA(K&l$9)*&(1 z;u=I4Oi=+;z|fHRy3Y_oQJ|KHIf=EjvFtgQ|MUbkF` z{+{cg8)8v^m!n-rvaGW&^7;mnJL)B7Uq;fH$}n~>=tjPuxxNoyXxL%C?)%uC>Etz4 z-w*pu-&b$2BaN9BuLJMbWyRj3OOA|0T|>fN5+HP@ZPPH_S!d2U<{U*jE%n5L;zyad zfg^q%$-aP8JuHPbHq-5}999r$Du>n3!MPGn=b@@ z7d&gAjVM9i1`f?35(*>QL!@+6);f|x?&EWM#{vii2MP#==qU_q9flRxFsjDgUxGQ= zG(aJZY7Lm=fP=Zv3!J&1F`7ydxZ5njb$o^~QZTZgAIdkXfg9^D&aL)0-#nv3R)C&dpY)=6Q-UvSxED|aJA^#qN{dt+N$M1kW zg6QXhUe#<7!Vfc?3wjNC@;3@SLr{(lA=xzXO^v;2_H4^Ajd&|5!Pqz`0V7p^&W(dp z%z0#;`y7<98qhhRA~-4fX8wLbS60L=P96yAu5`~9-rW_LSMxaAEti^`|MXj zHDsAk+55~rU?Qrwr*%g)Q`%zH4JK5!$Ew&$RaDGxjWg}Tv~8t2AlX1Ge%lp=eiC&D zhpQk-##mE+fjh@bNT`Cxa`Ib;0-k{ix}tthnHL$~fw%SyRB{IYr=W_U!;ewK$>9q{ zoW>mrMbx23GEoUJn!=Q!f@xV@g$kNrV>T3&2fhHxZG-IM!FRW}{tX-q>=@C2e}<(f z5y!<6O+gvmTp8;S4Ok%cQ;hldX}w*A!qiberR#IW5jG8PzLr~+`eEf_P3mnYskL2L z6MS*AH_o^eqHpKcq>gq_$g~ctrmcx9qFirJ<2g9W%!D<$1#7}BE`%N@$u}dl2c*>$ z32o#K)?;b82<)#Zmt)83fFOP@d7p5$cKDEcJWW9U*dBo&i`YD1H}=q$B3937{0wqL zdSd)lOQZYFmk&pF;rHXwmJ5+$B)br)ALBxk7>E=wJSY2S#$+F#ErITDRzQ7h`2A>c zh7oIghJn~G95Q}@$D>{Tj)l?+7Y_nS7oh@Dv?e(^3tPzH^9dl79aiz-R0XaUTREuv z7&!)&_;FvQ#~65Re1$RHuok)>f?nL>=N=`5f_RT=Qs)K+go6RbnQ$ts^V2qbZUvgb zPezbOm`Xy6(>a!5LjO0;g<{|qaqMNB)TYj}NYOhgBC-}P$Q$3rA%537yx6;SMm9sR zp}(~5QBF<0JIz&ZSednOF*8>W$iUCtYy7GKpIi>EU*_6# zh8~7feI^4Y_m~`bhsSzC3XT5*hkQ@y*tX@Ka?Uu%70dC#0B2m$`B7ZaM++>e#BRrO zqqKZy8Q*j8?pay*q9&9w!*sv(50kBCe~hNOF+Q6rX9fR`~pm5iE8eY S+_LMq^X^G^dgf0J`+opXS;7JU literal 0 HcmV?d00001 diff --git a/timm/data/__pycache__/random_erasing.cpython-36.pyc b/timm/data/__pycache__/random_erasing.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8f50ab09a764f91fea94dd86aabbe8dbb20a15a GIT binary patch literal 3940 zcmbtXOLH5?5uSYk;1VDynU?j6J(WsA!bK4jm5LmeWl2TJa+MuMB`HyYN@}y*SpW+z zcA?pUNFs|%pd9T}PIlR+Tyn`T$PY-)NiKUzDpgVGFXWW3XTgUaRXJo))AQ=->FNHu zXK=02a9f|gxBJam#{R`leHPlcP_mCe1Zy*42s1X?hOW)FscWlkg;w7dR>u^!aGn`$ z=N=OkQGLclb!xUN!hFb@wees013wYzfZvsV7A0N2x;;|qNHtye$1>_hi66&@JW8@* zBtyY>(H;zA?knC?YM5=cTHQ$XM!V}lI%ti1qh#DmlkUd#Rw?$%OzaxJJ@kWK$T!zF z-0gID2-!Ww+gJdC75sbrv&cu??uAL$bwBZgJ?y8_Zm4>pq>WqrWZ~w<<_3R|W*L7> zf(Cx#zJNK{PGJWl;Rgf zEEvvunFF&{ZoWSI5Ke$8?VYAsI6@r`!=fU>r%@0}0~@{|(zFXlr84LhPB<8W=*G<8-~&wMh9VBxDqb$f;3KL;SPNj^t>zrA9-HdAb()8f$Be>wS=$y z&DOYo*a}h+X05y1+dqYEHe1<9hTZ_SOGVa_VK+*Xd$)I5DjE#*q89Cu4!p2*`}**( znD=1oQiY7fXD$e{ZDZN6jdxACh+aTLM<31KLdiBkLIwmfLSNew%VJ*CfjL_=#KJSy zcEqAsLR}GO#97o;QH9CgE*d3DW+3r@j>FCH2+(C=90n@mK9{J0jW|S$P69k~A9Ub% zLo@`ZQ-m3hz^zS~t-E~ot(03Fnxcgf+$5>v@3EOGSmv{;^D%Yq#On3d=D2~+O7QcVe;i`uihf1&JhZH!~cH{d|9Md5%&;gpm z*#_F~gDCNQ8TwoNM^O?DMg!ss$mH-C45J+;II-}3sbH7k2()x422Y&82mZm4DF5L9 z6h&HPL!jwIud>henO@QCv=WG}trA;Y2PPk2WR0hZKF&&zCYf4U<0}+asNtd&jl`7` zQ^PPbK7OS%0N;(%V2^O2*Ar+;;jqvKhfx8?mRi9te-(PR;6pnU##S+LYdORI1#T+fr-azvR z0)-3Ldl~YY$_gh=_d^LO2o^~Olyqkl3%9c@l$3n$N|{Q1r#0sb8yUYN&lB4PA{Rm0 zuGUHiE!w1)Zp&{I$Gb${Bf^Q$-KcNIk}RNG)I1LvNO|5B^fFq-?219Zx^WKFofT@; zsF>%B@wo>&Xl99f{YXSmq2>6NQ07oH7FKb?<8i>qgMDMqmS@TG^mtfilM2S*(?0oe zjoQfqc;U}-bZSnj2me;U#niaNepyUv2Y*wJFm&(FlR5B`1fZs0o0@rLndMcW%T;rI zcQ!Zw1)Ca^c~w^pj4r_U^UD~a-=uze^wQC2BX8vP9!vj}*K=!+$w#CSFlXiU^9&kk z4bivrhVCz_MeM(Xat3Ix=M6lK9Q4i#>vJc^tuNSb&3pmB`a9HG#IHfEj`bnC&z?Mb z%$|Jw_~@DRf*_#dt7)D*X_{{pl`@`7033U^sM0X*&B8&W8y3}3648Bgf)RNM5(_Il z7|L(yR>_ToB!H{qiSEPR@ihyQBFW| zD-5rw`byCYu9#Q8r00^Jua}sdg@w^8jQzq=>9DZlu%li@WJN!^!aLYKBNdpjfCpC1 zIBP7SwP2h_>l}Vd`e%Z^Z;Y2;L8QJw14>Zz)ljM^8jsJ=P{>YwV+xN8;|O`V(J{(V zP^)8nV|~F4mOLuQeqm3{i8Zk&p%pgP91|i`PF0WuAjv6E$E^XQ0edrQc0M=DfFDZL35N*%iwLI)vT3HarAqj zaP?cLeEe<@<5eQ`+HB9yrr-uRCVJJzho`D@IJ7a5g zcw<~$)31gbw8$7ZwHtQ2!lIrg*6^C)K}xW+xNjD?p|9im19*X(3( zIJFFi5`v&`rjwO<*|_+I)0S+{f1*9s8|bx9{Hs+~CX)wRtsxJkq*v3i#>|9@C&`4Pp3{!2r#hM%=m F|1WMi7vKN@ literal 0 HcmV?d00001 diff --git a/timm/data/__pycache__/real_labels.cpython-36.pyc b/timm/data/__pycache__/real_labels.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..709821866d57f8626f69d96e0dd83808b267b3b0 GIT binary patch literal 2390 zcmai0L30#E6z=Zn*=!~WDn=oKGNl&fs@YjW2}n^W1`1Isg@FQVOB_1Woy~4$cP2f( zn51SeCKhnA)T8BH{!(8(<>Z}{@AYmrK`Goxzv-Txe*NC}eeZ3~&ddbS*QZv_RS5Zu z9Qt@L{~Sh92O~+7NGe%Mo7CM|lexRstf;gI z)kBf?g~@cd3gbpswA7L^!7Y(1eKCA+MXT@!70Rp!_qJIZzwz0Fa6Vi!CNC~VkQKr3CH%XAIy6@I(;+jsR%Wl1qP=Ha%I96T76z<&2z^0X*?#6`(s} zk0Nq69dHgQi0IiQgqhYmMb_J+;BC(blkm|=xCme@!YB+FLrS(oe|~qs4ehX!@6Z89 zwswi}aCMd7lY7AGH)NT7OG;jnn3oi+Zz?6}_&e0y;oWss+%XeB($a%(tH3mPDLwQ6 z>q&__14)qcr36F64`vQ>f~OGB`(WCnB==8##|Cu3N+wAOB05GS`<+Yg5AQd&!NCD< zzH;N{?Yqm13ym`iF(@Y1m|@dvT#(y-{i|Eou0JI;&$4>M(zdM(wWaYOXP(`Nq%dM3 z+U{&cNhVbhU0Ynd3uG-sMPIA93uI=p0LokKtats&Qe@iQuG@q%NaLZ$dcO56xFp+& z0lnmxU&ACAmOKa4nGJ;6$VPH_!EpFk7 ztS*2%=uBk2s!u*5tL}$x7bMbgMBgTc8rJb(4Xtv{Q7^q&pxDvO58B@l^AW+P!Ko|u zcLvmeiw*+^-q-<|9XEg)-IMTgjaz>eir84z%K<4RdKxnw@$UcvNj%TH>kRlivrqWh z7XSc1X0b?;z81;WAn*~*qJ`;9g4dS?)$$pCS={Vl#i*)#nMn&iv8vK9d zP$R-9hTRi+!Ps43RTfNDXEZ*86DHzVW=S0D5Y`&Il|F^W0f6tk$f2!X8U}M2&nU1x JaYz>~?;lV&Hr)UK literal 0 HcmV?d00001 diff --git a/timm/data/__pycache__/transforms.cpython-36.pyc b/timm/data/__pycache__/transforms.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6651158283f1030332ed62ac6651a8b64cb8563e GIT binary patch literal 6434 zcmbtY&2tpT6`!7uo!Qxy5LkQ(f(NRSXl+Fz;KW41AR#cQ48jHS*Or}O*dDEBwL7!) z%o>n)PvRO^T**CEx%uMC!MWxS$e+-+R1OZeTypUx`MsX~6iV49Gu3apdtSeO{a*L` z=!;WR?$SH2KD=;3)Bd52J+r8YC3+)|OtT2sUZdMB)X|etxjkKCht5_|fm#Dc_SA9#>lKPgbW%Zq^PT$mcf!ohC zZtv;ULufg?h*lA;nXuS6%w51Eyaf0PF9Xi@%(UBYu=0 z!}kn7&QIX`YIug1_84G!k8SBy51gOmbKraqoS)_FzE(N)oJ71)Ve;^LoQ0yDG=nTk z;yVcsH{V9LwcZNqVU4f`kB-U;N=hmTwJPIURp*TBnA6|}sG8E=Og1{L_R|_6(_^5R zC~GL`C4fw8ur33{(BJAky$dCBeMb}X=rg-IL~c?)S{7Q?p27ifo4B=j_Ou;7C#uh} zA2f8C!ILO`PV+tg51vXv>Tz3Ife^t{X?3DFyClt+M=e3bV5lOR2Lo0?f z)3CWE?RM1kqgGuO;x>K1)C8^c+gnRK$bt(?yN#zyHOMDjy0NzQ{)O`wE-a-T5&Eq# z+fI18B*Ho*cyo1QDT`VyHE1alL7Z+SqLnVTpGwE~YeBQ=`-ed{bpbRs!(2Aa%4~P0 zFS^A6(gx&-5pmE8eP6o1-%5C=N%gYtKk5X{{Es*WGK$kNaFqH;?uSrP5{{#*_Jl@B zj4On24q%|s&}f)jOzD+XEpBiV`Ymt^Dkw;2GuaH|G!a7u6KyI=!L+0<>uNsE7VsHV zCM@(-bU{m2^?h8@5~DcEeE$?M6ssEBJv^8P(#Zb~CnTnaJL(o1FSDbvLc{Lrd#vGf zwMG#;kq!mzu$;vLF>3>$!=ZYS0Ija#iW1e!1f~c~6Cg2*831Wz5YD!Aw?xwNV>$~` zN0*X6Q1)+)acSfmXrp3_<1`rNQWGZ$oCc8gRx=5*ix&h5 z^MCJ*IsBzTDGAcirwu_p(vHJGglQ(7hfy=4hO{3>wGN6j9wtwuy&be#QCzRu%^;%BSNml8W4*_4##vYYz5d9&Kel2VJALuIWPxN4_sQW~_cD)aSuJU` zuPlO;+7ty?2-N?d3B`{=K>UQjPYLXkK9OHW=}$MDiIzCr;j|kvEbZiAf6J48nh8Hk zd}ThyjVX+igEJcv!oJDm;THz245rqYcpdeF=O=w<*nSyK8S#+m`FC*M40&ujNO5My zj5-;PP@Wp_sW#{&hNyrn_WEo1?C11IpVOSAqIDf;DV-6GnGA-7l=PD9iUo`XZwD|% zGR52w@C%ad|03&8YkPs^F<$$q?_ip7mM;*?D8q+N!U{_IOMrnbN$XyiM!O+j6G?kD=Ii6i2tLAIq90zi z9eVZX(=hhlUBB%qf9qw52af>?9ue?R^@6k=)-rU%KYNd(Yc^+;Boo2R7Kh7_nzeMx=eDQpR+Qdd8qIwhu&9Pbg1QgW?7AA;YTzZ3Oy|Hxh zMOqW1qSR{zJY4iPw^1b~6=l@7XyWEm4~Bl-&#@2RQ3`E2&9n}6T^`sz2>#d!X8wVqd;m40PrMVv)fjT{m^^c}j>CuqRBv5aN< zmcFNpIoKDrM;_gXU6^silT=4w#!hO0`bllaOs{th)kYvkZLyJ=4MYxmjA>nyn>)Jr zO;*4-J9aatYbtthzvwA;4M0TTI6c;eAXBUS?(Vr<;^}tMX>#wOlKoDE<1i2Zvw*$y zq#_IXQK^)q8>La4X7EX&G@B7Tn3Z+fO*BZ~(s>++7;Y<-Ce`Fo|K94x^}Bcc^^MIN z_ujpGdv$aD?uMM25Vo>v(n@IqNK5InV&x8;yo9()V1>XH0{ff^`CpErf@Mudgr~dA zW3KMNV7jcpc3->qU8Z6b`;d2~hXM|=yK5-vA0RD6FGymb3i?9$vs%x1^0$nU@ULs1 z{IzFhh_AcsbFFLOpGAtvmAA+Gb&R4kp;th=ps)*{qo-$QcIKe3*tNR$EPCkeIpe*j z2ED~D#Ziir9VX6nivxbB1)46$7Rov77Efr1>D{qj;@?rdrLNm8VYIV_SmDu`4_;W` zDXmxTma`JYZaW$hsdIyQbI!U>{x9dOSB$R#yR5KHV81)yqF1DKbc^}Fe04o1o*hve zVGe4kJ`H4ygCWM|G_qlXyk6^MH)ZUo(&D- zy)f&Dc+4WxEQ-bDTwJL)A1!!|1#f%7dt4!Rfc3S5v~Tebl;My%5u+z}B_x%RNJiG> zIYsg+rmsv1I?QywVc-S%erb2&h*CDh7D2Wxjb>7p20Vr|U>|^)bYc#}@Mj0^&WVcOIzjLkGV&1Upt?3jL<0kRom7JV*i3c>)n>%N+<6LG>exvD0e z8YhE;h?|kowyKoHhY-O?}^@@Gv(6^Y7Zi#Q#tY- zLVu3{NmgtUxKH39H**X>{)m!ijLsac6Q=d=>s<0wau~Rilc8uJcP^+ZjW*XpzCElg?BfUnJJt%+u;Uc3h(i9&R<9j4pO?mj(O#- zZ?XTkwC}GATA-cdVo#JdzGdE!XUfYcdLaQRS6jynpOkyJV4_i9jFL`dHx%>ZCW5j+ z$)4;xO-MQ^a8OC-pNSfgE@-<~zWapuz6jgG_ZKfW6Ua2Zg35r@ZgHM$(OaLL@9mZr za+6Gr7gmtcoRr;x|CyYB4oCSuZZ97p+QQ29=d@C~qqv3da3Sgbg%mH>!x;8TT=|-4 z<|5jC>!sE)phUbYJN2@>3;!VA_ou}o0M=i|xSn0`CW11WzJe2Xz!8&XBZ*?AWLd-= zS`#ha{BPUYy7eIjbyCttd-%JuyfF-n&{bc=Ta zMs=2*!8`puW4oNl#+SNy10=*R0Qyn;Kd*lCo_hX$6@MG_cLxCcMpSPSI7=PHeV0Je z_I;kzz#zq+vW$>8i)#4=Wp1U&V2Eu3vy(twKwU&1nVyucx11?c*O+^#{JL{U(4WdV zb;(pctCICk5pyT;;@A!6Rwu4e1{O4>JL<+&*sxzzK}OJy%=7-zdaIp??1m7DkOmB} zAR{DxP2f5KWi~B!jh5fL(GlohR#DP(c&GX5w)KIR8Jjz$)HxMu47a>pmvvdi$;+^4w9jCF6Z~O zR}ceg>DvGfu6K15M|ofRs@Iryr>6@@1@hO`j~E5m?!oKov%1Ml!!aG(v1g3`0!u~| A&Hw-a literal 0 HcmV?d00001 diff --git a/timm/data/__pycache__/transforms_factory.cpython-36.pyc b/timm/data/__pycache__/transforms_factory.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6cc45048649c550e08f1eec15bb2e6ec8527e010 GIT binary patch literal 5119 zcma)A&5s;M74Pop>6z)B-JP$U-Hjd636NyK@kTZZQgFO6>yJ2zH^!bMXb6njQ$4di zp6;Gh*BI|od+{0phmDdW9C8G~ffEP*2qPgOA<^Ox#39)W;sBQ%2)|c7U;dKPqkdIg z@71fX_ulW-oL*eCH{N*a#>bAP{ZpHJs;Iw;H~tR@(^?wSnK95?x+;y9fig451ZKNea;vP`bAw7y zZBa7~u4Ez?^5?e-DO8NLIvkJzxyCqg-HAt=~$w_~Ut*|;; z>&#Zac^w5BVXDC(ez(e2*&5_mnQ>iftnYp7G`Snb-G~RVbJ6XHi0|6t(g{4V6S3F< z*SRtB2h0z9jvu%^&k?g;#2v++^J8D^IL*sft~k$M-EBs^v*TP=vR5MJ4dRAvzvgys zddv|~4~jkL+;ujbt!TK*{oamnPQ7^QMdw-+$IiR-9k`)=gp<*>8+u~T25e-KmCIMo zUAnM+q1irv;o`Zs-)y$8TsXI#)E;WS-aMa_CNf*s-n!bpy48e&I;PLs?x+`dp=eKa zC+m|YVh8^1Iab{riRj@j4Ku04g15o*L*emZJHT?2^0;m25611u)7PLh3a)vvzvrO$TaD63&zW77>OBpZkS|Z z!5TI@94+$7wHkks9*NsOg1q#FN4{Owt`-tCB2?va^#|BIV?^qF@-s zUc*ZAe$Z>vcJeayUV<66hurIWq67Ql6*O}4@+2d=gWqcWE2w{db%VLWJ+-md-`(io ztm2IeTU+nI_)cxaBkr}wR=>f$9?s;_x$O<%2Z8FeF}0Mq-A$e4Ugxi=az}pgqv z^$s=Avw}B|R~@^FjTJcfVV`@dCyFw6pvx*-Jflgg zk1%+k^Uq{iEJ;gN4-7TKa`=+sEg$HZ%MvR=z6|-YSiVoHgqG5MEpN&!tK2N0*8p?Q zioDQh&iLLIWin@W{3g~HzelSImqdlyx;m@YVDivr^s6%aLFy`H>_$!I(f6*biQ3qK zYT;}6GWsiUAfsOwtNk@uz{;t#p?_uUKsC8QyNurVWUaq0o{$T&C>P}ttFzTZjH+vL z8QgzC%Q0Dk1}e+rvW8h@uG0wM$?~cuYpdAN>l&-TA)j=+uoBzBbGhrp-q7W)@LqYP z$w%G^2O)PpY`ffZxF<$DL`ZgH$92SLIPjdP>ztnUo$H{~PGY?uLLTvv0D-(I_5}31 z!Wp`8?8Qpf`G(^T28s+n5b;!s(y`0R(3HTxWpL@to>D zRp$&NPJxmpNkTOAeY#^0&cdK}rghReoWv6>HDnItZh2h9O3w|1F+>b-ZbY}nNT#-H z$LH*X)Ad7l@PNioe^UGXrGM=GVe?b+M@{?p#DDXzjvcx5Du5;%V@(KQI0!#dQEKYu zh?n}X_osXG{C%@S{!I@>$sFG6cw-7!JxyvqtdmOX%Vx4T>pSd-?V}9l zaAblsjQNP&8IFLhrb5#~;NHeSqfjJvI11YFaNvtLv6y$u?|A63_`{@ZHhtUZ2g{>;i4%TvI&Mzk zJjo0^oMCqmxguHEiUtvX-4{Seg^s*Va;roLRrosfu-xI$3mIRemM1_OEBqMMzfR;? zkR-d)rDbGqsiN2+we7yrn-u4>8g*>^ICY#sXAY1!Ag zwJ`SXHgLDKIQ2^PS)71260ERQaNRb=sUU6@l%J8z17@47Kh{1^V6yQ$AliBn_*x-c zRoH3*WtZ@?4O2ICTdy17qpW}u_Ex|tWonrvy#xvCK3uke&o#r+j~kr4utSGL4;>NJ z$ztQbg8W+8Pkx@9<6~I44e57IVvm$#3JeFUF6SKm)hSY3Zr0JE$GV9QA zOd;aF_5N*P{7eJpm@IoYFSQgY_pPupk)O$mLf-}sFGvI9SV-&&{o**W%gGG?wakgd zF)rnh*yX@q0xlK$%c9gTOC)rvB;3o5afxIqguw_Y_Vy9E1Am5y$~)$R6Q!P2yehR( z!KtKj4n3V;8kW=vHgtX-g7Cm^k|sJU1igF*>0TSIKZ{HJ1+YHCI|)c99(CZ}EqCz9 zc%&=Coev*JU*I|VA4sFhX~KMy$QdHvC32C-IU+Pegc(=tD94Mq3I)7IJW5hI)0LDfdX@~_P7P#0XL?Dpd=E;3Ugh{|th(5QZuYWu(Vpjtfltsj-e^MkQ=DS>e}kE4U`BMJ;|w zWEhO=4(<)O_nJX=e+7xj3R20Mw2?|?_UqxE)VYMYN!4QqJuK=WERsRWiNprxm4)P0 zNd8Xb&~8d3kTNIpvM`aMH0X+nL%{ygpqLK(U1}bWE7OQG%q^18z7W*yqNA(%CcdPE6wiJX?qixrEbH( z?gc+i6L|?p*L_88ad;eqq@kYci~W77aQiIOro(o3()*^tb)Hm zb{+5WFI-4vQA198ifblhdihC z1l@~HQVw=fbm`tTx3@o_xj95Af-1{wtR}V}4o4#OgZHSNuqLt6jruYhGKHl_?gdRa zDLph7+M?7!X;w*L>S_5mX&Qh!`& z!+qy@d4u18{>EC8Yqwd{X}9_JX{Lnj$%4A`-a?iUg(@JmsQi0D4vTAN8yG8YAHFa1Lz%{Ls9o*uAT&fK0- zkmD-4<f6po= ATL1t6 literal 0 HcmV?d00001 diff --git a/timm/data/auto_augment.py b/timm/data/auto_augment.py new file mode 100644 index 0000000..8907e50 --- /dev/null +++ b/timm/data/auto_augment.py @@ -0,0 +1,865 @@ +""" AutoAugment, RandAugment, and AugMix for PyTorch + +This code implements the searched ImageNet policies with various tweaks and improvements and +does not include any of the search code. + +AA and RA Implementation adapted from: + https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py + +AugMix adapted from: + https://github.com/google-research/augmix + +Papers: + AutoAugment: Learning Augmentation Policies from Data - https://arxiv.org/abs/1805.09501 + Learning Data Augmentation Strategies for Object Detection - https://arxiv.org/abs/1906.11172 + RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719 + AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - https://arxiv.org/abs/1912.02781 + +Hacked together by / Copyright 2020 Ross Wightman +""" +import random +import math +import re +from PIL import Image, ImageOps, ImageEnhance, ImageChops +import PIL +import numpy as np + + +_PIL_VER = tuple([int(x) for x in PIL.__version__.split('.')[:2]]) + +_FILL = (128, 128, 128) + +_LEVEL_DENOM = 10. # denominator for conversion from 'Mx' magnitude scale to fractional aug level for op arguments + +_HPARAMS_DEFAULT = dict( + translate_const=250, + img_mean=_FILL, +) + +_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) + + +def _interpolation(kwargs): + interpolation = kwargs.pop('resample', Image.BILINEAR) + if isinstance(interpolation, (list, tuple)): + return random.choice(interpolation) + else: + return interpolation + + +def _check_args_tf(kwargs): + if 'fillcolor' in kwargs and _PIL_VER < (5, 0): + kwargs.pop('fillcolor') + kwargs['resample'] = _interpolation(kwargs) + + +def shear_x(img, factor, **kwargs): + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs) + + +def shear_y(img, factor, **kwargs): + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs) + + +def translate_x_rel(img, pct, **kwargs): + pixels = pct * img.size[0] + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs) + + +def translate_y_rel(img, pct, **kwargs): + pixels = pct * img.size[1] + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs) + + +def translate_x_abs(img, pixels, **kwargs): + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs) + + +def translate_y_abs(img, pixels, **kwargs): + _check_args_tf(kwargs) + return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs) + + +def rotate(img, degrees, **kwargs): + _check_args_tf(kwargs) + if _PIL_VER >= (5, 2): + return img.rotate(degrees, **kwargs) + elif _PIL_VER >= (5, 0): + w, h = img.size + post_trans = (0, 0) + rotn_center = (w / 2.0, h / 2.0) + angle = -math.radians(degrees) + matrix = [ + round(math.cos(angle), 15), + round(math.sin(angle), 15), + 0.0, + round(-math.sin(angle), 15), + round(math.cos(angle), 15), + 0.0, + ] + + def transform(x, y, matrix): + (a, b, c, d, e, f) = matrix + return a * x + b * y + c, d * x + e * y + f + + matrix[2], matrix[5] = transform( + -rotn_center[0] - post_trans[0], -rotn_center[1] - post_trans[1], matrix + ) + matrix[2] += rotn_center[0] + matrix[5] += rotn_center[1] + return img.transform(img.size, Image.AFFINE, matrix, **kwargs) + else: + return img.rotate(degrees, resample=kwargs['resample']) + + +def auto_contrast(img, **__): + return ImageOps.autocontrast(img) + + +def invert(img, **__): + return ImageOps.invert(img) + + +def equalize(img, **__): + return ImageOps.equalize(img) + + +def solarize(img, thresh, **__): + return ImageOps.solarize(img, thresh) + + +def solarize_add(img, add, thresh=128, **__): + lut = [] + for i in range(256): + if i < thresh: + lut.append(min(255, i + add)) + else: + lut.append(i) + if img.mode in ("L", "RGB"): + if img.mode == "RGB" and len(lut) == 256: + lut = lut + lut + lut + return img.point(lut) + else: + return img + + +def posterize(img, bits_to_keep, **__): + if bits_to_keep >= 8: + return img + return ImageOps.posterize(img, bits_to_keep) + + +def contrast(img, factor, **__): + return ImageEnhance.Contrast(img).enhance(factor) + + +def color(img, factor, **__): + return ImageEnhance.Color(img).enhance(factor) + + +def brightness(img, factor, **__): + return ImageEnhance.Brightness(img).enhance(factor) + + +def sharpness(img, factor, **__): + return ImageEnhance.Sharpness(img).enhance(factor) + + +def _randomly_negate(v): + """With 50% prob, negate the value""" + return -v if random.random() > 0.5 else v + + +def _rotate_level_to_arg(level, _hparams): + # range [-30, 30] + level = (level / _LEVEL_DENOM) * 30. + level = _randomly_negate(level) + return level, + + +def _enhance_level_to_arg(level, _hparams): + # range [0.1, 1.9] + return (level / _LEVEL_DENOM) * 1.8 + 0.1, + + +def _enhance_increasing_level_to_arg(level, _hparams): + # the 'no change' level is 1.0, moving away from that towards 0. or 2.0 increases the enhancement blend + # range [0.1, 1.9] if level <= _LEVEL_DENOM + level = (level / _LEVEL_DENOM) * .9 + level = max(0.1, 1.0 + _randomly_negate(level)) # keep it >= 0.1 + return level, + + +def _shear_level_to_arg(level, _hparams): + # range [-0.3, 0.3] + level = (level / _LEVEL_DENOM) * 0.3 + level = _randomly_negate(level) + return level, + + +def _translate_abs_level_to_arg(level, hparams): + translate_const = hparams['translate_const'] + level = (level / _LEVEL_DENOM) * float(translate_const) + level = _randomly_negate(level) + return level, + + +def _translate_rel_level_to_arg(level, hparams): + # default range [-0.45, 0.45] + translate_pct = hparams.get('translate_pct', 0.45) + level = (level / _LEVEL_DENOM) * translate_pct + level = _randomly_negate(level) + return level, + + +def _posterize_level_to_arg(level, _hparams): + # As per Tensorflow TPU EfficientNet impl + # range [0, 4], 'keep 0 up to 4 MSB of original image' + # intensity/severity of augmentation decreases with level + return int((level / _LEVEL_DENOM) * 4), + + +def _posterize_increasing_level_to_arg(level, hparams): + # As per Tensorflow models research and UDA impl + # range [4, 0], 'keep 4 down to 0 MSB of original image', + # intensity/severity of augmentation increases with level + return 4 - _posterize_level_to_arg(level, hparams)[0], + + +def _posterize_original_level_to_arg(level, _hparams): + # As per original AutoAugment paper description + # range [4, 8], 'keep 4 up to 8 MSB of image' + # intensity/severity of augmentation decreases with level + return int((level / _LEVEL_DENOM) * 4) + 4, + + +def _solarize_level_to_arg(level, _hparams): + # range [0, 256] + # intensity/severity of augmentation decreases with level + return int((level / _LEVEL_DENOM) * 256), + + +def _solarize_increasing_level_to_arg(level, _hparams): + # range [0, 256] + # intensity/severity of augmentation increases with level + return 256 - _solarize_level_to_arg(level, _hparams)[0], + + +def _solarize_add_level_to_arg(level, _hparams): + # range [0, 110] + return int((level / _LEVEL_DENOM) * 110), + + +LEVEL_TO_ARG = { + 'AutoContrast': None, + 'Equalize': None, + 'Invert': None, + 'Rotate': _rotate_level_to_arg, + # There are several variations of the posterize level scaling in various Tensorflow/Google repositories/papers + 'Posterize': _posterize_level_to_arg, + 'PosterizeIncreasing': _posterize_increasing_level_to_arg, + 'PosterizeOriginal': _posterize_original_level_to_arg, + 'Solarize': _solarize_level_to_arg, + 'SolarizeIncreasing': _solarize_increasing_level_to_arg, + 'SolarizeAdd': _solarize_add_level_to_arg, + 'Color': _enhance_level_to_arg, + 'ColorIncreasing': _enhance_increasing_level_to_arg, + 'Contrast': _enhance_level_to_arg, + 'ContrastIncreasing': _enhance_increasing_level_to_arg, + 'Brightness': _enhance_level_to_arg, + 'BrightnessIncreasing': _enhance_increasing_level_to_arg, + 'Sharpness': _enhance_level_to_arg, + 'SharpnessIncreasing': _enhance_increasing_level_to_arg, + 'ShearX': _shear_level_to_arg, + 'ShearY': _shear_level_to_arg, + 'TranslateX': _translate_abs_level_to_arg, + 'TranslateY': _translate_abs_level_to_arg, + 'TranslateXRel': _translate_rel_level_to_arg, + 'TranslateYRel': _translate_rel_level_to_arg, +} + + +NAME_TO_OP = { + 'AutoContrast': auto_contrast, + 'Equalize': equalize, + 'Invert': invert, + 'Rotate': rotate, + 'Posterize': posterize, + 'PosterizeIncreasing': posterize, + 'PosterizeOriginal': posterize, + 'Solarize': solarize, + 'SolarizeIncreasing': solarize, + 'SolarizeAdd': solarize_add, + 'Color': color, + 'ColorIncreasing': color, + 'Contrast': contrast, + 'ContrastIncreasing': contrast, + 'Brightness': brightness, + 'BrightnessIncreasing': brightness, + 'Sharpness': sharpness, + 'SharpnessIncreasing': sharpness, + 'ShearX': shear_x, + 'ShearY': shear_y, + 'TranslateX': translate_x_abs, + 'TranslateY': translate_y_abs, + 'TranslateXRel': translate_x_rel, + 'TranslateYRel': translate_y_rel, +} + + +class AugmentOp: + + def __init__(self, name, prob=0.5, magnitude=10, hparams=None): + hparams = hparams or _HPARAMS_DEFAULT + self.name = name + self.aug_fn = NAME_TO_OP[name] + self.level_fn = LEVEL_TO_ARG[name] + self.prob = prob + self.magnitude = magnitude + self.hparams = hparams.copy() + self.kwargs = dict( + fillcolor=hparams['img_mean'] if 'img_mean' in hparams else _FILL, + resample=hparams['interpolation'] if 'interpolation' in hparams else _RANDOM_INTERPOLATION, + ) + + # If magnitude_std is > 0, we introduce some randomness + # in the usually fixed policy and sample magnitude from a normal distribution + # with mean `magnitude` and std-dev of `magnitude_std`. + # NOTE This is my own hack, being tested, not in papers or reference impls. + # If magnitude_std is inf, we sample magnitude from a uniform distribution + self.magnitude_std = self.hparams.get('magnitude_std', 0) + self.magnitude_max = self.hparams.get('magnitude_max', None) + + def __call__(self, img): + if self.prob < 1.0 and random.random() > self.prob: + return img + magnitude = self.magnitude + if self.magnitude_std > 0: + # magnitude randomization enabled + if self.magnitude_std == float('inf'): + magnitude = random.uniform(0, magnitude) + elif self.magnitude_std > 0: + magnitude = random.gauss(magnitude, self.magnitude_std) + # default upper_bound for the timm RA impl is _LEVEL_DENOM (10) + # setting magnitude_max overrides this to allow M > 10 (behaviour closer to Google TF RA impl) + upper_bound = self.magnitude_max or _LEVEL_DENOM + magnitude = max(0., min(magnitude, upper_bound)) + level_args = self.level_fn(magnitude, self.hparams) if self.level_fn is not None else tuple() + return self.aug_fn(img, *level_args, **self.kwargs) + + def __repr__(self): + fs = self.__class__.__name__ + f'(name={self.name}, p={self.prob}' + fs += f', m={self.magnitude}, mstd={self.magnitude_std}' + if self.magnitude_max is not None: + fs += f', mmax={self.magnitude_max}' + fs += ')' + return fs + + +def auto_augment_policy_v0(hparams): + # ImageNet v0 policy from TPU EfficientNet impl, cannot find a paper reference. + policy = [ + [('Equalize', 0.8, 1), ('ShearY', 0.8, 4)], + [('Color', 0.4, 9), ('Equalize', 0.6, 3)], + [('Color', 0.4, 1), ('Rotate', 0.6, 8)], + [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], + [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], + [('Color', 0.2, 0), ('Equalize', 0.8, 8)], + [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], + [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)], + [('Color', 0.6, 1), ('Equalize', 1.0, 2)], + [('Invert', 0.4, 9), ('Rotate', 0.6, 0)], + [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)], + [('Color', 0.4, 7), ('Equalize', 0.6, 0)], + [('Posterize', 0.4, 6), ('AutoContrast', 0.4, 7)], + [('Solarize', 0.6, 8), ('Color', 0.6, 9)], + [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)], + [('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)], + [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)], + [('ShearY', 0.8, 0), ('Color', 0.6, 4)], + [('Color', 1.0, 0), ('Rotate', 0.6, 2)], + [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], + [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], + [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], + [('Posterize', 0.8, 2), ('Solarize', 0.6, 10)], # This results in black image with Tpu posterize + [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], + [('Color', 0.8, 6), ('Rotate', 0.4, 5)], + ] + pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] + return pc + + +def auto_augment_policy_v0r(hparams): + # ImageNet v0 policy from TPU EfficientNet impl, with variation of Posterize used + # in Google research implementation (number of bits discarded increases with magnitude) + policy = [ + [('Equalize', 0.8, 1), ('ShearY', 0.8, 4)], + [('Color', 0.4, 9), ('Equalize', 0.6, 3)], + [('Color', 0.4, 1), ('Rotate', 0.6, 8)], + [('Solarize', 0.8, 3), ('Equalize', 0.4, 7)], + [('Solarize', 0.4, 2), ('Solarize', 0.6, 2)], + [('Color', 0.2, 0), ('Equalize', 0.8, 8)], + [('Equalize', 0.4, 8), ('SolarizeAdd', 0.8, 3)], + [('ShearX', 0.2, 9), ('Rotate', 0.6, 8)], + [('Color', 0.6, 1), ('Equalize', 1.0, 2)], + [('Invert', 0.4, 9), ('Rotate', 0.6, 0)], + [('Equalize', 1.0, 9), ('ShearY', 0.6, 3)], + [('Color', 0.4, 7), ('Equalize', 0.6, 0)], + [('PosterizeIncreasing', 0.4, 6), ('AutoContrast', 0.4, 7)], + [('Solarize', 0.6, 8), ('Color', 0.6, 9)], + [('Solarize', 0.2, 4), ('Rotate', 0.8, 9)], + [('Rotate', 1.0, 7), ('TranslateYRel', 0.8, 9)], + [('ShearX', 0.0, 0), ('Solarize', 0.8, 4)], + [('ShearY', 0.8, 0), ('Color', 0.6, 4)], + [('Color', 1.0, 0), ('Rotate', 0.6, 2)], + [('Equalize', 0.8, 4), ('Equalize', 0.0, 8)], + [('Equalize', 1.0, 4), ('AutoContrast', 0.6, 2)], + [('ShearY', 0.4, 7), ('SolarizeAdd', 0.6, 7)], + [('PosterizeIncreasing', 0.8, 2), ('Solarize', 0.6, 10)], + [('Solarize', 0.6, 8), ('Equalize', 0.6, 1)], + [('Color', 0.8, 6), ('Rotate', 0.4, 5)], + ] + pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] + return pc + + +def auto_augment_policy_original(hparams): + # ImageNet policy from https://arxiv.org/abs/1805.09501 + policy = [ + [('PosterizeOriginal', 0.4, 8), ('Rotate', 0.6, 9)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + [('PosterizeOriginal', 0.6, 7), ('PosterizeOriginal', 0.6, 6)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Equalize', 0.4, 4), ('Rotate', 0.8, 8)], + [('Solarize', 0.6, 3), ('Equalize', 0.6, 7)], + [('PosterizeOriginal', 0.8, 5), ('Equalize', 1.0, 2)], + [('Rotate', 0.2, 3), ('Solarize', 0.6, 8)], + [('Equalize', 0.6, 8), ('PosterizeOriginal', 0.4, 6)], + [('Rotate', 0.8, 8), ('Color', 0.4, 0)], + [('Rotate', 0.4, 9), ('Equalize', 0.6, 2)], + [('Equalize', 0.0, 7), ('Equalize', 0.8, 8)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Rotate', 0.8, 8), ('Color', 1.0, 2)], + [('Color', 0.8, 8), ('Solarize', 0.8, 7)], + [('Sharpness', 0.4, 7), ('Invert', 0.6, 8)], + [('ShearX', 0.6, 5), ('Equalize', 1.0, 9)], + [('Color', 0.4, 0), ('Equalize', 0.6, 3)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + ] + pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] + return pc + + +def auto_augment_policy_originalr(hparams): + # ImageNet policy from https://arxiv.org/abs/1805.09501 with research posterize variation + policy = [ + [('PosterizeIncreasing', 0.4, 8), ('Rotate', 0.6, 9)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + [('PosterizeIncreasing', 0.6, 7), ('PosterizeIncreasing', 0.6, 6)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Equalize', 0.4, 4), ('Rotate', 0.8, 8)], + [('Solarize', 0.6, 3), ('Equalize', 0.6, 7)], + [('PosterizeIncreasing', 0.8, 5), ('Equalize', 1.0, 2)], + [('Rotate', 0.2, 3), ('Solarize', 0.6, 8)], + [('Equalize', 0.6, 8), ('PosterizeIncreasing', 0.4, 6)], + [('Rotate', 0.8, 8), ('Color', 0.4, 0)], + [('Rotate', 0.4, 9), ('Equalize', 0.6, 2)], + [('Equalize', 0.0, 7), ('Equalize', 0.8, 8)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Rotate', 0.8, 8), ('Color', 1.0, 2)], + [('Color', 0.8, 8), ('Solarize', 0.8, 7)], + [('Sharpness', 0.4, 7), ('Invert', 0.6, 8)], + [('ShearX', 0.6, 5), ('Equalize', 1.0, 9)], + [('Color', 0.4, 0), ('Equalize', 0.6, 3)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + ] + pc = [[AugmentOp(*a, hparams=hparams) for a in sp] for sp in policy] + return pc + + +def auto_augment_policy(name='v0', hparams=None): + hparams = hparams or _HPARAMS_DEFAULT + if name == 'original': + return auto_augment_policy_original(hparams) + elif name == 'originalr': + return auto_augment_policy_originalr(hparams) + elif name == 'v0': + return auto_augment_policy_v0(hparams) + elif name == 'v0r': + return auto_augment_policy_v0r(hparams) + else: + assert False, 'Unknown AA policy (%s)' % name + + +class AutoAugment: + + def __init__(self, policy): + self.policy = policy + + def __call__(self, img): + sub_policy = random.choice(self.policy) + for op in sub_policy: + img = op(img) + return img + + def __repr__(self): + fs = self.__class__.__name__ + f'(policy=' + for p in self.policy: + fs += '\n\t[' + fs += ', '.join([str(op) for op in p]) + fs += ']' + fs += ')' + return fs + + +def auto_augment_transform(config_str, hparams): + """ + Create a AutoAugment transform + + :param config_str: String defining configuration of auto augmentation. Consists of multiple sections separated by + dashes ('-'). The first section defines the AutoAugment policy (one of 'v0', 'v0r', 'original', 'originalr'). + The remaining sections, not order sepecific determine + 'mstd' - float std deviation of magnitude noise applied + Ex 'original-mstd0.5' results in AutoAugment with original policy, magnitude_std 0.5 + + :param hparams: Other hparams (kwargs) for the AutoAugmentation scheme + + :return: A PyTorch compatible Transform + """ + config = config_str.split('-') + policy_name = config[0] + config = config[1:] + for c in config: + cs = re.split(r'(\d.*)', c) + if len(cs) < 2: + continue + key, val = cs[:2] + if key == 'mstd': + # noise param injected via hparams for now + hparams.setdefault('magnitude_std', float(val)) + else: + assert False, 'Unknown AutoAugment config section' + aa_policy = auto_augment_policy(policy_name, hparams=hparams) + return AutoAugment(aa_policy) + + +_RAND_TRANSFORMS = [ + 'AutoContrast', + 'Equalize', + 'Invert', + 'Rotate', + 'Posterize', + 'Solarize', + 'SolarizeAdd', + 'Color', + 'Contrast', + 'Brightness', + 'Sharpness', + 'ShearX', + 'ShearY', + 'TranslateXRel', + 'TranslateYRel', + #'Cutout' # NOTE I've implement this as random erasing separately +] + + +_RAND_INCREASING_TRANSFORMS = [ + 'AutoContrast', + 'Equalize', + 'Invert', + 'Rotate', + 'PosterizeIncreasing', + 'SolarizeIncreasing', + 'SolarizeAdd', + 'ColorIncreasing', + 'ContrastIncreasing', + 'BrightnessIncreasing', + 'SharpnessIncreasing', + 'ShearX', + 'ShearY', + 'TranslateXRel', + 'TranslateYRel', + #'Cutout' # NOTE I've implement this as random erasing separately +] + + + +# These experimental weights are based loosely on the relative improvements mentioned in paper. +# They may not result in increased performance, but could likely be tuned to so. +_RAND_CHOICE_WEIGHTS_0 = { + 'Rotate': 0.3, + 'ShearX': 0.2, + 'ShearY': 0.2, + 'TranslateXRel': 0.1, + 'TranslateYRel': 0.1, + 'Color': .025, + 'Sharpness': 0.025, + 'AutoContrast': 0.025, + 'Solarize': .005, + 'SolarizeAdd': .005, + 'Contrast': .005, + 'Brightness': .005, + 'Equalize': .005, + 'Posterize': 0, + 'Invert': 0, +} + + +def _select_rand_weights(weight_idx=0, transforms=None): + transforms = transforms or _RAND_TRANSFORMS + assert weight_idx == 0 # only one set of weights currently + rand_weights = _RAND_CHOICE_WEIGHTS_0 + probs = [rand_weights[k] for k in transforms] + probs /= np.sum(probs) + return probs + + +def rand_augment_ops(magnitude=10, hparams=None, transforms=None): + hparams = hparams or _HPARAMS_DEFAULT + transforms = transforms or _RAND_TRANSFORMS + return [AugmentOp( + name, prob=0.5, magnitude=magnitude, hparams=hparams) for name in transforms] + + +class RandAugment: + def __init__(self, ops, num_layers=2, choice_weights=None): + self.ops = ops + self.num_layers = num_layers + self.choice_weights = choice_weights + + def __call__(self, img): + # no replacement when using weighted choice + ops = np.random.choice( + self.ops, self.num_layers, replace=self.choice_weights is None, p=self.choice_weights) + for op in ops: + img = op(img) + return img + + def __repr__(self): + fs = self.__class__.__name__ + f'(n={self.num_layers}, ops=' + for op in self.ops: + fs += f'\n\t{op}' + fs += ')' + return fs + + +def rand_augment_transform(config_str, hparams): + """ + Create a RandAugment transform + + :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by + dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining + sections, not order sepecific determine + 'm' - integer magnitude of rand augment + 'n' - integer num layers (number of transform ops selected per image) + 'w' - integer probabiliy weight index (index of a set of weights to influence choice of op) + 'mstd' - float std deviation of magnitude noise applied, or uniform sampling if infinity (or > 100) + 'mmax' - set upper bound for magnitude to something other than default of _LEVEL_DENOM (10) + 'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0) + Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5 + 'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2 + + :param hparams: Other hparams (kwargs) for the RandAugmentation scheme + + :return: A PyTorch compatible Transform + """ + magnitude = _LEVEL_DENOM # default to _LEVEL_DENOM for magnitude (currently 10) + num_layers = 2 # default to 2 ops per image + weight_idx = None # default to no probability weights for op choice + transforms = _RAND_TRANSFORMS + config = config_str.split('-') + assert config[0] == 'rand' + config = config[1:] + for c in config: + cs = re.split(r'(\d.*)', c) + if len(cs) < 2: + continue + key, val = cs[:2] + if key == 'mstd': + # noise param / randomization of magnitude values + mstd = float(val) + if mstd > 100: + # use uniform sampling in 0 to magnitude if mstd is > 100 + mstd = float('inf') + hparams.setdefault('magnitude_std', mstd) + elif key == 'mmax': + # clip magnitude between [0, mmax] instead of default [0, _LEVEL_DENOM] + hparams.setdefault('magnitude_max', int(val)) + elif key == 'inc': + if bool(val): + transforms = _RAND_INCREASING_TRANSFORMS + elif key == 'm': + magnitude = int(val) + elif key == 'n': + num_layers = int(val) + elif key == 'w': + weight_idx = int(val) + else: + assert False, 'Unknown RandAugment config section' + ra_ops = rand_augment_ops(magnitude=magnitude, hparams=hparams, transforms=transforms) + choice_weights = None if weight_idx is None else _select_rand_weights(weight_idx) + return RandAugment(ra_ops, num_layers, choice_weights=choice_weights) + + +_AUGMIX_TRANSFORMS = [ + 'AutoContrast', + 'ColorIncreasing', # not in paper + 'ContrastIncreasing', # not in paper + 'BrightnessIncreasing', # not in paper + 'SharpnessIncreasing', # not in paper + 'Equalize', + 'Rotate', + 'PosterizeIncreasing', + 'SolarizeIncreasing', + 'ShearX', + 'ShearY', + 'TranslateXRel', + 'TranslateYRel', +] + + +def augmix_ops(magnitude=10, hparams=None, transforms=None): + hparams = hparams or _HPARAMS_DEFAULT + transforms = transforms or _AUGMIX_TRANSFORMS + return [AugmentOp( + name, prob=1.0, magnitude=magnitude, hparams=hparams) for name in transforms] + + +class AugMixAugment: + """ AugMix Transform + Adapted and improved from impl here: https://github.com/google-research/augmix/blob/master/imagenet.py + From paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - + https://arxiv.org/abs/1912.02781 + """ + def __init__(self, ops, alpha=1., width=3, depth=-1, blended=False): + self.ops = ops + self.alpha = alpha + self.width = width + self.depth = depth + self.blended = blended # blended mode is faster but not well tested + + def _calc_blended_weights(self, ws, m): + ws = ws * m + cump = 1. + rws = [] + for w in ws[::-1]: + alpha = w / cump + cump *= (1 - alpha) + rws.append(alpha) + return np.array(rws[::-1], dtype=np.float32) + + def _apply_blended(self, img, mixing_weights, m): + # This is my first crack and implementing a slightly faster mixed augmentation. Instead + # of accumulating the mix for each chain in a Numpy array and then blending with original, + # it recomputes the blending coefficients and applies one PIL image blend per chain. + # TODO the results appear in the right ballpark but they differ by more than rounding. + img_orig = img.copy() + ws = self._calc_blended_weights(mixing_weights, m) + for w in ws: + depth = self.depth if self.depth > 0 else np.random.randint(1, 4) + ops = np.random.choice(self.ops, depth, replace=True) + img_aug = img_orig # no ops are in-place, deep copy not necessary + for op in ops: + img_aug = op(img_aug) + img = Image.blend(img, img_aug, w) + return img + + def _apply_basic(self, img, mixing_weights, m): + # This is a literal adaptation of the paper/official implementation without normalizations and + # PIL <-> Numpy conversions between every op. It is still quite CPU compute heavy compared to the + # typical augmentation transforms, could use a GPU / Kornia implementation. + img_shape = img.size[0], img.size[1], len(img.getbands()) + mixed = np.zeros(img_shape, dtype=np.float32) + for mw in mixing_weights: + depth = self.depth if self.depth > 0 else np.random.randint(1, 4) + ops = np.random.choice(self.ops, depth, replace=True) + img_aug = img # no ops are in-place, deep copy not necessary + for op in ops: + img_aug = op(img_aug) + mixed += mw * np.asarray(img_aug, dtype=np.float32) + np.clip(mixed, 0, 255., out=mixed) + mixed = Image.fromarray(mixed.astype(np.uint8)) + return Image.blend(img, mixed, m) + + def __call__(self, img): + mixing_weights = np.float32(np.random.dirichlet([self.alpha] * self.width)) + m = np.float32(np.random.beta(self.alpha, self.alpha)) + if self.blended: + mixed = self._apply_blended(img, mixing_weights, m) + else: + mixed = self._apply_basic(img, mixing_weights, m) + return mixed + + def __repr__(self): + fs = self.__class__.__name__ + f'(alpha={self.alpha}, width={self.width}, depth={self.depth}, ops=' + for op in self.ops: + fs += f'\n\t{op}' + fs += ')' + return fs + + +def augment_and_mix_transform(config_str, hparams): + """ Create AugMix PyTorch transform + + :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by + dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining + sections, not order sepecific determine + 'm' - integer magnitude (severity) of augmentation mix (default: 3) + 'w' - integer width of augmentation chain (default: 3) + 'd' - integer depth of augmentation chain (-1 is random [1, 3], default: -1) + 'b' - integer (bool), blend each branch of chain into end result without a final blend, less CPU (default: 0) + 'mstd' - float std deviation of magnitude noise applied (default: 0) + Ex 'augmix-m5-w4-d2' results in AugMix with severity 5, chain width 4, chain depth 2 + + :param hparams: Other hparams (kwargs) for the Augmentation transforms + + :return: A PyTorch compatible Transform + """ + magnitude = 3 + width = 3 + depth = -1 + alpha = 1. + blended = False + config = config_str.split('-') + assert config[0] == 'augmix' + config = config[1:] + for c in config: + cs = re.split(r'(\d.*)', c) + if len(cs) < 2: + continue + key, val = cs[:2] + if key == 'mstd': + # noise param injected via hparams for now + hparams.setdefault('magnitude_std', float(val)) + elif key == 'm': + magnitude = int(val) + elif key == 'w': + width = int(val) + elif key == 'd': + depth = int(val) + elif key == 'a': + alpha = float(val) + elif key == 'b': + blended = bool(val) + else: + assert False, 'Unknown AugMix config section' + hparams.setdefault('magnitude_std', float('inf')) # default to uniform sampling (if not set via mstd arg) + ops = augmix_ops(magnitude=magnitude, hparams=hparams) + return AugMixAugment(ops, alpha=alpha, width=width, depth=depth, blended=blended) diff --git a/timm/data/config.py b/timm/data/config.py new file mode 100644 index 0000000..38f5689 --- /dev/null +++ b/timm/data/config.py @@ -0,0 +1,78 @@ +import logging +from .constants import * + + +_logger = logging.getLogger(__name__) + + +def resolve_data_config(args, default_cfg={}, model=None, use_test_size=False, verbose=False): + new_config = {} + default_cfg = default_cfg + if not default_cfg and model is not None and hasattr(model, 'default_cfg'): + default_cfg = model.default_cfg + + # Resolve input/image size + in_chans = 3 + if 'chans' in args and args['chans'] is not None: + in_chans = args['chans'] + + input_size = (in_chans, 224, 224) + if 'input_size' in args and args['input_size'] is not None: + assert isinstance(args['input_size'], (tuple, list)) + assert len(args['input_size']) == 3 + input_size = tuple(args['input_size']) + in_chans = input_size[0] # input_size overrides in_chans + elif 'img_size' in args and args['img_size'] is not None: + assert isinstance(args['img_size'], int) + input_size = (in_chans, args['img_size'], args['img_size']) + else: + if use_test_size and 'test_input_size' in default_cfg: + input_size = default_cfg['test_input_size'] + elif 'input_size' in default_cfg: + input_size = default_cfg['input_size'] + new_config['input_size'] = input_size + + # resolve interpolation method + new_config['interpolation'] = 'bicubic' + if 'interpolation' in args and args['interpolation']: + new_config['interpolation'] = args['interpolation'] + elif 'interpolation' in default_cfg: + new_config['interpolation'] = default_cfg['interpolation'] + + # resolve dataset + model mean for normalization + new_config['mean'] = IMAGENET_DEFAULT_MEAN + if 'mean' in args and args['mean'] is not None: + mean = tuple(args['mean']) + if len(mean) == 1: + mean = tuple(list(mean) * in_chans) + else: + assert len(mean) == in_chans + new_config['mean'] = mean + elif 'mean' in default_cfg: + new_config['mean'] = default_cfg['mean'] + + # resolve dataset + model std deviation for normalization + new_config['std'] = IMAGENET_DEFAULT_STD + if 'std' in args and args['std'] is not None: + std = tuple(args['std']) + if len(std) == 1: + std = tuple(list(std) * in_chans) + else: + assert len(std) == in_chans + new_config['std'] = std + elif 'std' in default_cfg: + new_config['std'] = default_cfg['std'] + + # resolve default crop percentage + new_config['crop_pct'] = DEFAULT_CROP_PCT + if 'crop_pct' in args and args['crop_pct'] is not None: + new_config['crop_pct'] = args['crop_pct'] + elif 'crop_pct' in default_cfg: + new_config['crop_pct'] = default_cfg['crop_pct'] + + if verbose: + _logger.info('Data processing configuration for current model + dataset:') + for n, v in new_config.items(): + _logger.info('\t%s: %s' % (n, str(v))) + + return new_config diff --git a/timm/data/constants.py b/timm/data/constants.py new file mode 100644 index 0000000..d6d4a01 --- /dev/null +++ b/timm/data/constants.py @@ -0,0 +1,7 @@ +DEFAULT_CROP_PCT = 0.875 +IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) +IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) +IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5) +IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5) +IMAGENET_DPN_MEAN = (124 / 255, 117 / 255, 104 / 255) +IMAGENET_DPN_STD = tuple([1 / (.0167 * 255)] * 3) diff --git a/timm/data/dataset.py b/timm/data/dataset.py new file mode 100644 index 0000000..d3603a2 --- /dev/null +++ b/timm/data/dataset.py @@ -0,0 +1,152 @@ +""" Quick n Simple Image Folder, Tarfile based DataSet + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch.utils.data as data +import os +import torch +import logging + +from PIL import Image + +from .parsers import create_parser + +_logger = logging.getLogger(__name__) + + +_ERROR_RETRY = 50 + + +class ImageDataset(data.Dataset): + + def __init__( + self, + root, + parser=None, + class_map=None, + load_bytes=False, + transform=None, + target_transform=None, + ): + if parser is None or isinstance(parser, str): + parser = create_parser(parser or '', root=root, class_map=class_map) + self.parser = parser + self.load_bytes = load_bytes + self.transform = transform + self.target_transform = target_transform + self._consecutive_errors = 0 + + def __getitem__(self, index): + img, target = self.parser[index] + try: + img = img.read() if self.load_bytes else Image.open(img).convert('RGB') + except Exception as e: + _logger.warning(f'Skipped sample (index {index}, file {self.parser.filename(index)}). {str(e)}') + self._consecutive_errors += 1 + if self._consecutive_errors < _ERROR_RETRY: + return self.__getitem__((index + 1) % len(self.parser)) + else: + raise e + self._consecutive_errors = 0 + if self.transform is not None: + img = self.transform(img) + if target is None: + target = -1 + elif self.target_transform is not None: + target = self.target_transform(target) + return img, target + + def __len__(self): + return len(self.parser) + + def filename(self, index, basename=False, absolute=False): + return self.parser.filename(index, basename, absolute) + + def filenames(self, basename=False, absolute=False): + return self.parser.filenames(basename, absolute) + + +class IterableImageDataset(data.IterableDataset): + + def __init__( + self, + root, + parser=None, + split='train', + is_training=False, + batch_size=None, + repeats=0, + download=False, + transform=None, + target_transform=None, + ): + assert parser is not None + if isinstance(parser, str): + self.parser = create_parser( + parser, root=root, split=split, is_training=is_training, + batch_size=batch_size, repeats=repeats, download=download) + else: + self.parser = parser + self.transform = transform + self.target_transform = target_transform + self._consecutive_errors = 0 + + def __iter__(self): + for img, target in self.parser: + if self.transform is not None: + img = self.transform(img) + if self.target_transform is not None: + target = self.target_transform(target) + yield img, target + + def __len__(self): + if hasattr(self.parser, '__len__'): + return len(self.parser) + else: + return 0 + + def filename(self, index, basename=False, absolute=False): + assert False, 'Filename lookup by index not supported, use filenames().' + + def filenames(self, basename=False, absolute=False): + return self.parser.filenames(basename, absolute) + + +class AugMixDataset(torch.utils.data.Dataset): + """Dataset wrapper to perform AugMix or other clean/augmentation mixes""" + + def __init__(self, dataset, num_splits=2): + self.augmentation = None + self.normalize = None + self.dataset = dataset + if self.dataset.transform is not None: + self._set_transforms(self.dataset.transform) + self.num_splits = num_splits + + def _set_transforms(self, x): + assert isinstance(x, (list, tuple)) and len(x) == 3, 'Expecting a tuple/list of 3 transforms' + self.dataset.transform = x[0] + self.augmentation = x[1] + self.normalize = x[2] + + @property + def transform(self): + return self.dataset.transform + + @transform.setter + def transform(self, x): + self._set_transforms(x) + + def _normalize(self, x): + return x if self.normalize is None else self.normalize(x) + + def __getitem__(self, i): + x, y = self.dataset[i] # all splits share the same dataset base transform + x_list = [self._normalize(x)] # first split only normalizes (this is the 'clean' split) + # run the full augmentation on the remaining splits + for _ in range(self.num_splits - 1): + x_list.append(self._normalize(self.augmentation(x))) + return tuple(x_list), y + + def __len__(self): + return len(self.dataset) diff --git a/timm/data/dataset_factory.py b/timm/data/dataset_factory.py new file mode 100644 index 0000000..e86bcc2 --- /dev/null +++ b/timm/data/dataset_factory.py @@ -0,0 +1,139 @@ +import os + +from torchvision.datasets import CIFAR100, CIFAR10, MNIST, QMNIST, KMNIST, FashionMNIST, ImageNet, ImageFolder +try: + from torchvision.datasets import Places365 + has_places365 = True +except ImportError: + has_places365 = False +try: + from torchvision.datasets import INaturalist + has_inaturalist = True +except ImportError: + has_inaturalist = False + +from .dataset import IterableImageDataset, ImageDataset + +_TORCH_BASIC_DS = dict( + cifar10=CIFAR10, + cifar100=CIFAR100, + mnist=MNIST, + qmist=QMNIST, + kmnist=KMNIST, + fashion_mnist=FashionMNIST, +) +_TRAIN_SYNONYM = {'train', 'training'} +_EVAL_SYNONYM = {'val', 'valid', 'validation', 'eval', 'evaluation'} + + +def _search_split(root, split): + # look for sub-folder with name of split in root and use that if it exists + split_name = split.split('[')[0] + try_root = os.path.join(root, split_name) + if os.path.exists(try_root): + return try_root + + def _try(syn): + for s in syn: + try_root = os.path.join(root, s) + if os.path.exists(try_root): + return try_root + return root + if split_name in _TRAIN_SYNONYM: + root = _try(_TRAIN_SYNONYM) + elif split_name in _EVAL_SYNONYM: + root = _try(_EVAL_SYNONYM) + return root + + +def create_dataset( + name, + root, + split='validation', + search_split=True, + class_map=None, + load_bytes=False, + is_training=False, + download=False, + batch_size=None, + repeats=0, + **kwargs +): + """ Dataset factory method + + In parenthesis after each arg are the type of dataset supported for each arg, one of: + * folder - default, timm folder (or tar) based ImageDataset + * torch - torchvision based datasets + * TFDS - Tensorflow-datasets wrapper in IterabeDataset interface via IterableImageDataset + * all - any of the above + + Args: + name: dataset name, empty is okay for folder based datasets + root: root folder of dataset (all) + split: dataset split (all) + search_split: search for split specific child fold from root so one can specify + `imagenet/` instead of `/imagenet/val`, etc on cmd line / config. (folder, torch/folder) + class_map: specify class -> index mapping via text file or dict (folder) + load_bytes: load data, return images as undecoded bytes (folder) + download: download dataset if not present and supported (TFDS, torch) + is_training: create dataset in train mode, this is different from the split. + For Iterable / TDFS it enables shuffle, ignored for other datasets. (TFDS) + batch_size: batch size hint for (TFDS) + repeats: dataset repeats per iteration i.e. epoch (TFDS) + **kwargs: other args to pass to dataset + + Returns: + Dataset object + """ + name = name.lower() + if name.startswith('torch/'): + name = name.split('/', 2)[-1] + torch_kwargs = dict(root=root, download=download, **kwargs) + if name in _TORCH_BASIC_DS: + ds_class = _TORCH_BASIC_DS[name] + use_train = split in _TRAIN_SYNONYM + ds = ds_class(train=use_train, **torch_kwargs) + elif name == 'inaturalist' or name == 'inat': + assert has_inaturalist, 'Please update to PyTorch 1.10, torchvision 0.11+ for Inaturalist' + target_type = 'full' + split_split = split.split('/') + if len(split_split) > 1: + target_type = split_split[0].split('_') + if len(target_type) == 1: + target_type = target_type[0] + split = split_split[-1] + if split in _TRAIN_SYNONYM: + split = '2021_train' + elif split in _EVAL_SYNONYM: + split = '2021_valid' + ds = INaturalist(version=split, target_type=target_type, **torch_kwargs) + elif name == 'places365': + assert has_places365, 'Please update to a newer PyTorch and torchvision for Places365 dataset.' + if split in _TRAIN_SYNONYM: + split = 'train-standard' + elif split in _EVAL_SYNONYM: + split = 'val' + ds = Places365(split=split, **torch_kwargs) + elif name == 'imagenet': + if split in _EVAL_SYNONYM: + split = 'val' + ds = ImageNet(split=split, **torch_kwargs) + elif name == 'image_folder' or name == 'folder': + # in case torchvision ImageFolder is preferred over timm ImageDataset for some reason + if search_split and os.path.isdir(root): + # look for split specific sub-folder in root + root = _search_split(root, split) + ds = ImageFolder(root, **kwargs) + else: + assert False, f"Unknown torchvision dataset {name}" + elif name.startswith('tfds/'): + ds = IterableImageDataset( + root, parser=name, split=split, is_training=is_training, + download=download, batch_size=batch_size, repeats=repeats, **kwargs) + else: + # FIXME support more advance split cfg for ImageFolder/Tar datasets in the future + if search_split and os.path.isdir(root): + # look for split specific sub-folder in root + root = _search_split(root, split) + ds = ImageDataset(root, parser=name, class_map=class_map, load_bytes=load_bytes, **kwargs) + return ds diff --git a/timm/data/distributed_sampler.py b/timm/data/distributed_sampler.py new file mode 100644 index 0000000..fa403d0 --- /dev/null +++ b/timm/data/distributed_sampler.py @@ -0,0 +1,128 @@ +import math +import torch +from torch.utils.data import Sampler +import torch.distributed as dist + + +class OrderedDistributedSampler(Sampler): + """Sampler that restricts data loading to a subset of the dataset. + It is especially useful in conjunction with + :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each + process can pass a DistributedSampler instance as a DataLoader sampler, + and load a subset of the original dataset that is exclusive to it. + .. note:: + Dataset is assumed to be of constant size. + Arguments: + dataset: Dataset used for sampling. + num_replicas (optional): Number of processes participating in + distributed training. + rank (optional): Rank of the current process within num_replicas. + """ + + def __init__(self, dataset, num_replicas=None, rank=None): + if num_replicas is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = dist.get_world_size() + if rank is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = dist.get_rank() + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) + self.total_size = self.num_samples * self.num_replicas + + def __iter__(self): + indices = list(range(len(self.dataset))) + + # add extra samples to make it evenly divisible + indices += indices[:(self.total_size - len(indices))] + assert len(indices) == self.total_size + + # subsample + indices = indices[self.rank:self.total_size:self.num_replicas] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples + + +class RepeatAugSampler(Sampler): + """Sampler that restricts data loading to a subset of the dataset for distributed, + with repeated augmentation. + It ensures that different each augmented version of a sample will be visible to a + different process (GPU). Heavily based on torch.utils.data.DistributedSampler + + This sampler was taken from https://github.com/facebookresearch/deit/blob/0c4b8f60/samplers.py + Used in + Copyright (c) 2015-present, Facebook, Inc. + """ + + def __init__( + self, + dataset, + num_replicas=None, + rank=None, + shuffle=True, + num_repeats=3, + selected_round=256, + selected_ratio=0, + ): + if num_replicas is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = dist.get_world_size() + if rank is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = dist.get_rank() + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.shuffle = shuffle + self.num_repeats = num_repeats + self.epoch = 0 + self.num_samples = int(math.ceil(len(self.dataset) * num_repeats / self.num_replicas)) + self.total_size = self.num_samples * self.num_replicas + # Determine the number of samples to select per epoch for each rank. + # num_selected logic defaults to be the same as original RASampler impl, but this one can be tweaked + # via selected_ratio and selected_round args. + selected_ratio = selected_ratio or num_replicas # ratio to reduce selected samples by, num_replicas if 0 + if selected_round: + self.num_selected_samples = int(math.floor( + len(self.dataset) // selected_round * selected_round / selected_ratio)) + else: + self.num_selected_samples = int(math.ceil(len(self.dataset) / selected_ratio)) + + def __iter__(self): + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + if self.shuffle: + indices = torch.randperm(len(self.dataset), generator=g).tolist() + else: + indices = list(range(len(self.dataset))) + + # produce repeats e.g. [0, 0, 0, 1, 1, 1, 2, 2, 2....] + indices = [x for x in indices for _ in range(self.num_repeats)] + # add extra samples to make it evenly divisible + padding_size = self.total_size - len(indices) + indices += indices[:padding_size] + assert len(indices) == self.total_size + + # subsample per rank + indices = indices[self.rank:self.total_size:self.num_replicas] + assert len(indices) == self.num_samples + + # return up to num selected samples + return iter(indices[:self.num_selected_samples]) + + def __len__(self): + return self.num_selected_samples + + def set_epoch(self, epoch): + self.epoch = epoch \ No newline at end of file diff --git a/timm/data/loader.py b/timm/data/loader.py new file mode 100644 index 0000000..a02399a --- /dev/null +++ b/timm/data/loader.py @@ -0,0 +1,289 @@ +""" Loader Factory, Fast Collate, CUDA Prefetcher + +Prefetcher and Fast Collate inspired by NVIDIA APEX example at +https://github.com/NVIDIA/apex/commit/d5e2bb4bdeedd27b1dfaf5bb2b24d6c000dee9be#diff-cf86c282ff7fba81fad27a559379d5bf + +Hacked together by / Copyright 2021 Ross Wightman +""" +import random +from functools import partial +from typing import Callable + +import torch.utils.data +import numpy as np + +from .transforms_factory import create_transform +from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .distributed_sampler import OrderedDistributedSampler, RepeatAugSampler +from .random_erasing import RandomErasing +from .mixup import FastCollateMixup + + +def fast_collate(batch): + """ A fast collation function optimized for uint8 images (np array or torch) and int64 targets (labels)""" + assert isinstance(batch[0], tuple) + batch_size = len(batch) + if isinstance(batch[0][0], tuple): + # This branch 'deinterleaves' and flattens tuples of input tensors into one tensor ordered by position + # such that all tuple of position n will end up in a torch.split(tensor, batch_size) in nth position + inner_tuple_size = len(batch[0][0]) + flattened_batch_size = batch_size * inner_tuple_size + targets = torch.zeros(flattened_batch_size, dtype=torch.int64) + tensor = torch.zeros((flattened_batch_size, *batch[0][0][0].shape), dtype=torch.uint8) + for i in range(batch_size): + assert len(batch[i][0]) == inner_tuple_size # all input tensor tuples must be same length + for j in range(inner_tuple_size): + targets[i + j * batch_size] = batch[i][1] + tensor[i + j * batch_size] += torch.from_numpy(batch[i][0][j]) + return tensor, targets + elif isinstance(batch[0][0], np.ndarray): + targets = torch.tensor([b[1] for b in batch], dtype=torch.int64) + assert len(targets) == batch_size + tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8) + for i in range(batch_size): + tensor[i] += torch.from_numpy(batch[i][0]) + return tensor, targets + elif isinstance(batch[0][0], torch.Tensor): + targets = torch.tensor([b[1] for b in batch], dtype=torch.int64) + assert len(targets) == batch_size + tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8) + for i in range(batch_size): + tensor[i].copy_(batch[i][0]) + return tensor, targets + else: + assert False + + +class PrefetchLoader: + + def __init__(self, + loader, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + fp16=False, + re_prob=0., + re_mode='const', + re_count=1, + re_num_splits=0): + self.loader = loader + self.mean = torch.tensor([x * 255 for x in mean]).cuda().view(1, 3, 1, 1) + self.std = torch.tensor([x * 255 for x in std]).cuda().view(1, 3, 1, 1) + self.fp16 = fp16 + if fp16: + self.mean = self.mean.half() + self.std = self.std.half() + if re_prob > 0.: + self.random_erasing = RandomErasing( + probability=re_prob, mode=re_mode, max_count=re_count, num_splits=re_num_splits) + else: + self.random_erasing = None + + def __iter__(self): + stream = torch.cuda.Stream() + first = True + + for next_input, next_target in self.loader: + with torch.cuda.stream(stream): + next_input = next_input.cuda(non_blocking=True) + next_target = next_target.cuda(non_blocking=True) + if self.fp16: + next_input = next_input.half().sub_(self.mean).div_(self.std) + else: + next_input = next_input.float().sub_(self.mean).div_(self.std) + if self.random_erasing is not None: + next_input = self.random_erasing(next_input) + + if not first: + yield input, target + else: + first = False + + torch.cuda.current_stream().wait_stream(stream) + input = next_input + target = next_target + + yield input, target + + def __len__(self): + return len(self.loader) + + @property + def sampler(self): + return self.loader.sampler + + @property + def dataset(self): + return self.loader.dataset + + @property + def mixup_enabled(self): + if isinstance(self.loader.collate_fn, FastCollateMixup): + return self.loader.collate_fn.mixup_enabled + else: + return False + + @mixup_enabled.setter + def mixup_enabled(self, x): + if isinstance(self.loader.collate_fn, FastCollateMixup): + self.loader.collate_fn.mixup_enabled = x + + +def _worker_init(worker_id, worker_seeding='all'): + worker_info = torch.utils.data.get_worker_info() + assert worker_info.id == worker_id + if isinstance(worker_seeding, Callable): + seed = worker_seeding(worker_info) + random.seed(seed) + torch.manual_seed(seed) + np.random.seed(seed % (2 ** 32 - 1)) + else: + assert worker_seeding in ('all', 'part') + # random / torch seed already called in dataloader iter class w/ worker_info.seed + # to reproduce some old results (same seed + hparam combo), partial seeding is required (skip numpy re-seed) + if worker_seeding == 'all': + np.random.seed(worker_info.seed % (2 ** 32 - 1)) + + +def create_loader( + dataset, + input_size, + batch_size, + is_training=False, + use_prefetcher=True, + no_aug=False, + re_prob=0., + re_mode='const', + re_count=1, + re_split=False, + scale=None, + ratio=None, + hflip=0.5, + vflip=0., + color_jitter=0.4, + auto_augment=None, + num_aug_repeats=0, + num_aug_splits=0, + interpolation='bilinear', + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + num_workers=1, + distributed=False, + crop_pct=None, + collate_fn=None, + pin_memory=False, + fp16=False, + tf_preprocessing=False, + use_multi_epochs_loader=False, + persistent_workers=True, + worker_seeding='all', +): + re_num_splits = 0 + if re_split: + # apply RE to second half of batch if no aug split otherwise line up with aug split + re_num_splits = num_aug_splits or 2 + dataset.transform = create_transform( + input_size, + is_training=is_training, + use_prefetcher=use_prefetcher, + no_aug=no_aug, + scale=scale, + ratio=ratio, + hflip=hflip, + vflip=vflip, + color_jitter=color_jitter, + auto_augment=auto_augment, + interpolation=interpolation, + mean=mean, + std=std, + crop_pct=crop_pct, + tf_preprocessing=tf_preprocessing, + re_prob=re_prob, + re_mode=re_mode, + re_count=re_count, + re_num_splits=re_num_splits, + separate=num_aug_splits > 0, + ) + + sampler = None + if distributed and not isinstance(dataset, torch.utils.data.IterableDataset): + if is_training: + if num_aug_repeats: + sampler = RepeatAugSampler(dataset, num_repeats=num_aug_repeats) + else: + sampler = torch.utils.data.distributed.DistributedSampler(dataset) + else: + # This will add extra duplicate entries to result in equal num + # of samples per-process, will slightly alter validation results + sampler = OrderedDistributedSampler(dataset) + else: + assert num_aug_repeats == 0, "RepeatAugment not currently supported in non-distributed or IterableDataset use" + + if collate_fn is None: + collate_fn = fast_collate if use_prefetcher else torch.utils.data.dataloader.default_collate + + loader_class = torch.utils.data.DataLoader + if use_multi_epochs_loader: + loader_class = MultiEpochsDataLoader + + loader_args = dict( + batch_size=batch_size, + shuffle=not isinstance(dataset, torch.utils.data.IterableDataset) and sampler is None and is_training, + num_workers=num_workers, + sampler=sampler, + collate_fn=collate_fn, + pin_memory=pin_memory, + drop_last=is_training, + worker_init_fn=partial(_worker_init, worker_seeding=worker_seeding), + persistent_workers=persistent_workers + ) + try: + loader = loader_class(dataset, **loader_args) + except TypeError as e: + loader_args.pop('persistent_workers') # only in Pytorch 1.7+ + loader = loader_class(dataset, **loader_args) + if use_prefetcher: + prefetch_re_prob = re_prob if is_training and not no_aug else 0. + loader = PrefetchLoader( + loader, + mean=mean, + std=std, + fp16=fp16, + re_prob=prefetch_re_prob, + re_mode=re_mode, + re_count=re_count, + re_num_splits=re_num_splits + ) + + return loader + + +class MultiEpochsDataLoader(torch.utils.data.DataLoader): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._DataLoader__initialized = False + self.batch_sampler = _RepeatSampler(self.batch_sampler) + self._DataLoader__initialized = True + self.iterator = super().__iter__() + + def __len__(self): + return len(self.batch_sampler.sampler) + + def __iter__(self): + for i in range(len(self)): + yield next(self.iterator) + + +class _RepeatSampler(object): + """ Sampler that repeats forever. + + Args: + sampler (Sampler) + """ + + def __init__(self, sampler): + self.sampler = sampler + + def __iter__(self): + while True: + yield from iter(self.sampler) diff --git a/timm/data/mixup.py b/timm/data/mixup.py new file mode 100644 index 0000000..7e382c5 --- /dev/null +++ b/timm/data/mixup.py @@ -0,0 +1,316 @@ +""" Mixup and Cutmix + +Papers: +mixup: Beyond Empirical Risk Minimization (https://arxiv.org/abs/1710.09412) + +CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features (https://arxiv.org/abs/1905.04899) + +Code Reference: +CutMix: https://github.com/clovaai/CutMix-PyTorch + +Hacked together by / Copyright 2020 Ross Wightman +""" +import numpy as np +import torch + + +def one_hot(x, num_classes, on_value=1., off_value=0., device='cuda'): + x = x.long().view(-1, 1) + return torch.full((x.size()[0], num_classes), off_value, device=device).scatter_(1, x, on_value) + + +def mixup_target(target, num_classes, lam=1., smoothing=0.0, device='cuda'): + off_value = smoothing / num_classes + on_value = 1. - smoothing + off_value + y1 = one_hot(target, num_classes, on_value=on_value, off_value=off_value, device=device) + y2 = one_hot(target.flip(0), num_classes, on_value=on_value, off_value=off_value, device=device) + return y1 * lam + y2 * (1. - lam) + + +def rand_bbox(img_shape, lam, margin=0., count=None): + """ Standard CutMix bounding-box + Generates a random square bbox based on lambda value. This impl includes + support for enforcing a border margin as percent of bbox dimensions. + + Args: + img_shape (tuple): Image shape as tuple + lam (float): Cutmix lambda value + margin (float): Percentage of bbox dimension to enforce as margin (reduce amount of box outside image) + count (int): Number of bbox to generate + """ + ratio = np.sqrt(1 - lam) + img_h, img_w = img_shape[-2:] + cut_h, cut_w = int(img_h * ratio), int(img_w * ratio) + margin_y, margin_x = int(margin * cut_h), int(margin * cut_w) + cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count) + cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count) + yl = np.clip(cy - cut_h // 2, 0, img_h) + yh = np.clip(cy + cut_h // 2, 0, img_h) + xl = np.clip(cx - cut_w // 2, 0, img_w) + xh = np.clip(cx + cut_w // 2, 0, img_w) + return yl, yh, xl, xh + + +def rand_bbox_minmax(img_shape, minmax, count=None): + """ Min-Max CutMix bounding-box + Inspired by Darknet cutmix impl, generates a random rectangular bbox + based on min/max percent values applied to each dimension of the input image. + + Typical defaults for minmax are usually in the .2-.3 for min and .8-.9 range for max. + + Args: + img_shape (tuple): Image shape as tuple + minmax (tuple or list): Min and max bbox ratios (as percent of image size) + count (int): Number of bbox to generate + """ + assert len(minmax) == 2 + img_h, img_w = img_shape[-2:] + cut_h = np.random.randint(int(img_h * minmax[0]), int(img_h * minmax[1]), size=count) + cut_w = np.random.randint(int(img_w * minmax[0]), int(img_w * minmax[1]), size=count) + yl = np.random.randint(0, img_h - cut_h, size=count) + xl = np.random.randint(0, img_w - cut_w, size=count) + yu = yl + cut_h + xu = xl + cut_w + return yl, yu, xl, xu + + +def cutmix_bbox_and_lam(img_shape, lam, ratio_minmax=None, correct_lam=True, count=None): + """ Generate bbox and apply lambda correction. + """ + if ratio_minmax is not None: + yl, yu, xl, xu = rand_bbox_minmax(img_shape, ratio_minmax, count=count) + else: + yl, yu, xl, xu = rand_bbox(img_shape, lam, count=count) + if correct_lam or ratio_minmax is not None: + bbox_area = (yu - yl) * (xu - xl) + lam = 1. - bbox_area / float(img_shape[-2] * img_shape[-1]) + return (yl, yu, xl, xu), lam + + +class Mixup: + """ Mixup/Cutmix that applies different params to each element or whole batch + + Args: + mixup_alpha (float): mixup alpha value, mixup is active if > 0. + cutmix_alpha (float): cutmix alpha value, cutmix is active if > 0. + cutmix_minmax (List[float]): cutmix min/max image ratio, cutmix is active and uses this vs alpha if not None. + prob (float): probability of applying mixup or cutmix per batch or element + switch_prob (float): probability of switching to cutmix instead of mixup when both are active + mode (str): how to apply mixup/cutmix params (per 'batch', 'pair' (pair of elements), 'elem' (element) + correct_lam (bool): apply lambda correction when cutmix bbox clipped by image borders + label_smoothing (float): apply label smoothing to the mixed target tensor + num_classes (int): number of classes for target + """ + def __init__(self, mixup_alpha=1., cutmix_alpha=0., cutmix_minmax=None, prob=1.0, switch_prob=0.5, + mode='batch', correct_lam=True, label_smoothing=0.1, num_classes=1000): + self.mixup_alpha = mixup_alpha + self.cutmix_alpha = cutmix_alpha + self.cutmix_minmax = cutmix_minmax + if self.cutmix_minmax is not None: + assert len(self.cutmix_minmax) == 2 + # force cutmix alpha == 1.0 when minmax active to keep logic simple & safe + self.cutmix_alpha = 1.0 + self.mix_prob = prob + self.switch_prob = switch_prob + self.label_smoothing = label_smoothing + self.num_classes = num_classes + self.mode = mode + self.correct_lam = correct_lam # correct lambda based on clipped area for cutmix + self.mixup_enabled = True # set to false to disable mixing (intended tp be set by train loop) + + def _params_per_elem(self, batch_size): + lam = np.ones(batch_size, dtype=np.float32) + use_cutmix = np.zeros(batch_size, dtype=np.bool) + if self.mixup_enabled: + if self.mixup_alpha > 0. and self.cutmix_alpha > 0.: + use_cutmix = np.random.rand(batch_size) < self.switch_prob + lam_mix = np.where( + use_cutmix, + np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size), + np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size)) + elif self.mixup_alpha > 0.: + lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size) + elif self.cutmix_alpha > 0.: + use_cutmix = np.ones(batch_size, dtype=np.bool) + lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size) + else: + assert False, "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true." + lam = np.where(np.random.rand(batch_size) < self.mix_prob, lam_mix.astype(np.float32), lam) + return lam, use_cutmix + + def _params_per_batch(self): + lam = 1. + use_cutmix = False + if self.mixup_enabled and np.random.rand() < self.mix_prob: + if self.mixup_alpha > 0. and self.cutmix_alpha > 0.: + use_cutmix = np.random.rand() < self.switch_prob + lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) if use_cutmix else \ + np.random.beta(self.mixup_alpha, self.mixup_alpha) + elif self.mixup_alpha > 0.: + lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha) + elif self.cutmix_alpha > 0.: + use_cutmix = True + lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) + else: + assert False, "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true." + lam = float(lam_mix) + return lam, use_cutmix + + def _mix_elem(self, x): + batch_size = len(x) + lam_batch, use_cutmix = self._params_per_elem(batch_size) + x_orig = x.clone() # need to keep an unmodified original for mixing source + for i in range(batch_size): + j = batch_size - i - 1 + lam = lam_batch[i] + if lam != 1.: + if use_cutmix[i]: + (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( + x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) + x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh] + lam_batch[i] = lam + else: + x[i] = x[i] * lam + x_orig[j] * (1 - lam) + return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1) + + def _mix_pair(self, x): + batch_size = len(x) + lam_batch, use_cutmix = self._params_per_elem(batch_size // 2) + x_orig = x.clone() # need to keep an unmodified original for mixing source + for i in range(batch_size // 2): + j = batch_size - i - 1 + lam = lam_batch[i] + if lam != 1.: + if use_cutmix[i]: + (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( + x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) + x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh] + x[j][:, yl:yh, xl:xh] = x_orig[i][:, yl:yh, xl:xh] + lam_batch[i] = lam + else: + x[i] = x[i] * lam + x_orig[j] * (1 - lam) + x[j] = x[j] * lam + x_orig[i] * (1 - lam) + lam_batch = np.concatenate((lam_batch, lam_batch[::-1])) + return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1) + + def _mix_batch(self, x): + lam, use_cutmix = self._params_per_batch() + if lam == 1.: + return 1. + if use_cutmix: + (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( + x.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) + x[:, :, yl:yh, xl:xh] = x.flip(0)[:, :, yl:yh, xl:xh] + else: + x_flipped = x.flip(0).mul_(1. - lam) + x.mul_(lam).add_(x_flipped) + return lam + + def __call__(self, x, target): + assert len(x) % 2 == 0, 'Batch size should be even when using this' + if self.mode == 'elem': + lam = self._mix_elem(x) + elif self.mode == 'pair': + lam = self._mix_pair(x) + else: + lam = self._mix_batch(x) + target = mixup_target(target, self.num_classes, lam, self.label_smoothing, x.device) + return x, target + + +class FastCollateMixup(Mixup): + """ Fast Collate w/ Mixup/Cutmix that applies different params to each element or whole batch + + A Mixup impl that's performed while collating the batches. + """ + + def _mix_elem_collate(self, output, batch, half=False): + batch_size = len(batch) + num_elem = batch_size // 2 if half else batch_size + assert len(output) == num_elem + lam_batch, use_cutmix = self._params_per_elem(num_elem) + for i in range(num_elem): + j = batch_size - i - 1 + lam = lam_batch[i] + mixed = batch[i][0] + if lam != 1.: + if use_cutmix[i]: + if not half: + mixed = mixed.copy() + (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( + output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) + mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh] + lam_batch[i] = lam + else: + mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam) + np.rint(mixed, out=mixed) + output[i] += torch.from_numpy(mixed.astype(np.uint8)) + if half: + lam_batch = np.concatenate((lam_batch, np.ones(num_elem))) + return torch.tensor(lam_batch).unsqueeze(1) + + def _mix_pair_collate(self, output, batch): + batch_size = len(batch) + lam_batch, use_cutmix = self._params_per_elem(batch_size // 2) + for i in range(batch_size // 2): + j = batch_size - i - 1 + lam = lam_batch[i] + mixed_i = batch[i][0] + mixed_j = batch[j][0] + assert 0 <= lam <= 1.0 + if lam < 1.: + if use_cutmix[i]: + (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( + output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) + patch_i = mixed_i[:, yl:yh, xl:xh].copy() + mixed_i[:, yl:yh, xl:xh] = mixed_j[:, yl:yh, xl:xh] + mixed_j[:, yl:yh, xl:xh] = patch_i + lam_batch[i] = lam + else: + mixed_temp = mixed_i.astype(np.float32) * lam + mixed_j.astype(np.float32) * (1 - lam) + mixed_j = mixed_j.astype(np.float32) * lam + mixed_i.astype(np.float32) * (1 - lam) + mixed_i = mixed_temp + np.rint(mixed_j, out=mixed_j) + np.rint(mixed_i, out=mixed_i) + output[i] += torch.from_numpy(mixed_i.astype(np.uint8)) + output[j] += torch.from_numpy(mixed_j.astype(np.uint8)) + lam_batch = np.concatenate((lam_batch, lam_batch[::-1])) + return torch.tensor(lam_batch).unsqueeze(1) + + def _mix_batch_collate(self, output, batch): + batch_size = len(batch) + lam, use_cutmix = self._params_per_batch() + if use_cutmix: + (yl, yh, xl, xh), lam = cutmix_bbox_and_lam( + output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam) + for i in range(batch_size): + j = batch_size - i - 1 + mixed = batch[i][0] + if lam != 1.: + if use_cutmix: + mixed = mixed.copy() # don't want to modify the original while iterating + mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh] + else: + mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam) + np.rint(mixed, out=mixed) + output[i] += torch.from_numpy(mixed.astype(np.uint8)) + return lam + + def __call__(self, batch, _=None): + batch_size = len(batch) + assert batch_size % 2 == 0, 'Batch size should be even when using this' + half = 'half' in self.mode + if half: + batch_size //= 2 + output = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8) + if self.mode == 'elem' or self.mode == 'half': + lam = self._mix_elem_collate(output, batch, half=half) + elif self.mode == 'pair': + lam = self._mix_pair_collate(output, batch) + else: + lam = self._mix_batch_collate(output, batch) + target = torch.tensor([b[1] for b in batch], dtype=torch.int64) + target = mixup_target(target, self.num_classes, lam, self.label_smoothing, device='cpu') + target = target[:batch_size] + return output, target + diff --git a/timm/data/parsers/__init__.py b/timm/data/parsers/__init__.py new file mode 100644 index 0000000..eeb44e3 --- /dev/null +++ b/timm/data/parsers/__init__.py @@ -0,0 +1 @@ +from .parser_factory import create_parser diff --git a/timm/data/parsers/__pycache__/__init__.cpython-36.pyc b/timm/data/parsers/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c27e10bf535b6a70cf9a0352a17b2f30c06f6295 GIT binary patch literal 213 zcmXr!<>k`%mQT`RU|@I*#Bjg_WH|tFu>g=rVMt-jVaR2SVq|1UVG3r@WPZsA6xL+C z#hYA|nplz=UyxW-oLc0k$rQy0VaBH=CYR(FRThB^TFFqv45YxsFF*a1#F9iK{i>`= z{p9?V)M9;CXXh{@10y5-;?knj_}tWzjQo^h{i4+L%=|oeM?d|N%-meCCVhz6#rpB_ dnR%Hd@$q^EmA5!-a`RJ4b5iXRKGIE@%%_{s5y;g%q9=v8J$GQH0_t{(vjg zhxmjjYiJJ}6`5lBxb~e2!-@QJLnx`x3Nn>-)?5WXb+4jfbBZQb1v4EH@maM@r;n=> z8|HLQka8<~itj=0r11iFc!HjymndJRbMhNZ(yXj6h_=33^+XNWXMV5r;xv*n%%Z}h zNvynVB$c<%yqxD7EGyJ;dAk?oIaglF<47@27A#H<5=PG}JyUs*q|95tDg9Fn5R`UD zja6)1BL-L-v=1uRz$5_0>npFw1W$-re~6~&6M1X*(Rbuy-H=Kog>Pxn+0^(*<9EW8 z*XHa|KqD1(gK~Hr#GEo2?Dl%kyPMr^AV-3Q8B>Rx%0RF;3C|zw>;)>xvbrcJA|aWO z!Nta%;`q$nGHb^?E4G)+fQDEHl{Zoz(QuZzWwuY{c4wA;zr6J4dTyhH<3v$Rg}j}+va86-jOhsg3MhZxX^@4Z5NR`)D9abQo(7+lUz4t zktPb->PE`nGofu>uw1uTKFXK?W_)L`1Z0F&8?WMJ+=R$AvW$hH*~h|w=@VgqtwWbO W*Ob=nn_C&Dqm*r%AEw5?Y5f6pc>ve| literal 0 HcmV?d00001 diff --git a/timm/data/parsers/__pycache__/constants.cpython-36.pyc b/timm/data/parsers/__pycache__/constants.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5fcdc4224ed5af9b13f1811825f3c533437883a0 GIT binary patch literal 215 zcmXr!<>k`%mQT`VU|@I*#Bjh2WH|tFF$a)HVToc$VGd@{Wc|gWSCE$uB(nz literal 0 HcmV?d00001 diff --git a/timm/data/parsers/__pycache__/parser.cpython-36.pyc b/timm/data/parsers/__pycache__/parser.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5044634cbf151dc4c53e6506c3b81b1a42fbe33a GIT binary patch literal 1134 zcmZuwOK;Oa5Z;HAI%)bqa71w7;!9iwiBnY-719GJLPFvY30aP36I|JGy6Z?Z3a3O$ zEUTCm|52@l&&<7cxLyT`DXHRZ?EmYdp`Qb8T-lpIuiX;6!Q!fFyIMG z`H*vV&Om@%GmtAW6ySVhUgJ0BFpn4e;mG7VjPg|FUowd3b9EeaPEpJ&REiBbFucw| ztl3a12X?`M#+tAAumK%t!p@oxo3IBiw9wmuHUjMz%@3hAN=I0(J!DDFPz*r;!#qCt zLFG6`C1Xurt-5*dLzss>e{pf;M;R#Nzdbwq)H~|+d^6W7D62NUR+BiJp1&UWd7P&9 zl0UP!=GJy+SA`n{@ifkZph4II72}J8vc7H=>qi7nhwMX@+R{8n(8MCcAttW7ST%4AxmtKwd=KE8@>@^ECjc~%~{U3 zVuhRVhOIajCI|JniRB%X$8_~6stNY-`N=oDK;3Nl78l@JeixUtm;9lIL?OCIwjWR0 zPRq84o3bsU_ojWE#3qljbap~y*`Atg@#2=>swdMuPO>OW%n3Tx1y2+W9Zn~zkT@Yk zv2WMRmIMC}yPRtM@Ii%&OI1y^4&tQx{aRdFMcS4iDB3}gW-w34?*zf+JWR@nCfYRd zXXmPEQu-08`u}m)zOF`OXcb)UO2Ip_E$#P#Ckq*lqVh4yeCUb7JuVA5p{2L{b>ts2 C(BZ=X literal 0 HcmV?d00001 diff --git a/timm/data/parsers/__pycache__/parser_factory.cpython-36.pyc b/timm/data/parsers/__pycache__/parser_factory.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29045fac0e74b303b753b92620cbc79b0d21b281 GIT binary patch literal 874 zcmZ8fOK%e~5cYVzn{6JYhzkWIxJE)&iV$EtQ@(v+-i|f%j}3m|0GqjF?vmin81)nqN&4uAL zTYwbj0-x4Q`rB+S$hOI{TFBs><0mO(%Z)Pkfz^HuHA}e+Yo0;vedpg`9+dBlKMuyN z8Wx}@Cj0_Tyy0TJZv3|MrPw9{=+0}?sUanNgYKH{->43Vmd^1FL`zPMTZYb{BPbRo zm73;nujW6Y$ohJcXXZ-mm;{%D{aLjF=sYV z(@Yy}H}^DAZ?dX9K&;RPnNG7@*{~^~rUrf2h@-gQvZ721Wd&B1S=9;7FB3e~HkhDF zv+1fzu&Vom;RAx*oJ2A)$soEK&!SN!m5vVf_dgDv4F-|EKs79sIjf|OP@Pg6Z}*NP zlNCh+idHx2TRTjXk*RRDGnv`m2vuU#aCIW2Li!OY1|f7okc1El=ildB-V@m5T|jrB z4;VNT;vF*ay<3hl_xOfs^ZJ@;pwtN4&6?}k-NQ_cuhT|oDE+VmAuH+ PEB8CKYc-^9==1&n;{@kt literal 0 HcmV?d00001 diff --git a/timm/data/parsers/__pycache__/parser_image_folder.cpython-36.pyc b/timm/data/parsers/__pycache__/parser_image_folder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb557879fd0f38988794457608d4443fd0e7a565 GIT binary patch literal 3026 zcma)8?~fZt8J?N_;q}_*+#OMA(jc}i60#_ElvaRBbP&?J5CK=ziwIaLtM$(KZhhj? z?N%Pl+M_llqRkl(crbB!n}^TnD11P8hj*V5-WA@;8Fe^2BK@t!=h3~0Cpu9=M^z$~ zkdZz~bRRg}$=W1*snEN9WA%(;*%3=f({lt>Y&G${%pIm}PzQe}S-`9-A5Xp$AY zv4&)02yLqUl`eC>P0Ovt=~Ch`5*}^IQ+Y z@aIW-DmZL%EWlpk;4r#hRu?imKGD&g+jnk9AC*c)kMVn!6yX=(Omf(#rc)$(E|WYy z6&GJn=!8E1*Y?!V>g8p^<1|l{if2iMJr95NAm0D&qy599haVmuJxV=X37%ajJ`@Lv z+J%*l$Rg+Ukz7I3l)WNS6D;Rf|VeQC6 z+w0chiWa>(oO<@#=iZ7fJLd;=8&=v|c5QE~?yl$)w%l6w>fSV1k(!?RlGpyUZO21c zySrj5N=fZa+jR$9XZ$fayZM+r2VR{9@-yA3y%k+v(_Ot)U(-GAzW|HnHgECZ8PV73 z?G^oitmvP}CuBk`ivqn}`(VYef4Nh;f1xr2>zy^TU>q$VHt;GlHLYFe|uu`~CG{FBHo z4E7zv2Dgoq@pD5{+hwaSe>X%v-x)5Z7sIsVLJjxt-~SEJ;?7XbrHE%ho06-c6vtUv zJh*o_)Y)uijfTw)Q0sSW=^r=5A5<4#hIe_E>a?6ydl7J!E?`S;Yx7bs~YB zYH&9%(>+uo)ZYEThr&HW;ej0f3;`7I%eOITVQ19 z#Lyf(*==~M%NQKeOxuQWfShNlf&3=!^#4P?w7Z0ifx;l!0AUO?K<4xYumQA$9ZHjUqOrWYf)8bqk52z=k@sp|&1vl=r%nF>Njf)30zAB{_++sIvQOswc zM;ew{WL1CH_zek+r!Oj@Y+x(xNBS%`Gs+D3FGHh=u8`R4+jCVEK5CVsri^^ zQW?)Gq6q+eY|(qxM53q|7f)gwfIQlo7unfd#La@Etp?%dbGOmvuky{h8~l&x8i#-L zzj${CW?0e@>ca09y-6b)(rwzM*Xb?|SOD!^`a1RDE#HONDU{0xZz(?NM^Fg@>4}UO z?^vN=+

    IhdrLk&(-N-h#KsyX!>ce*%|$)WYOrb{g${h*_q|DiSgxNs(mu;69f! zJBANURv@n{0>TC&8#7LpPR-<78tR&YR5}{&Ozm*`f<0j{!-L{$klHDT9cQOZ{$2;% zsp+RSje=l;;C@Q}?yl$`;S_+%heXkM5ZSzl4$3*CvfGuvC=;G#o1Y@v4n&OSd2Lpn^xerC%f(jf9p}0Xnw$S+?(j3B2J2nHbL&M~_&*Tqb zrg2NSd3{&Dj~^Z=rZ|^3vF$f<`LbAa;uu5}ZtpCPk!Q-X4zMo1^(v&7bH6562;71V zp@DJ*>_M@jhX4y#wb&ryi0WkotvH4t#PMAiv%*Q};>N4?wzCe>&9%UgUU~^P*?0-H za_ItLBTemp0HEE{Ew0y&de8Q_wQH%$U>2mhOHK7HO!+#3M4w6Yy?g^JBvIdM@c(2p zWP)U@%6tw27Ho9r<V*9USxnWVBVm*o1LB zEBQPZacsJAd^S(=X2h~j-UA!?GpzQZGFxk!rXtVbIv@gD!RqL8D`E0IXnhxo3ZM$S zUN>+9$M*KS1|r1E6l~EmqidMR2Xmd}YB0-G+K9Pd**|y^qa%Ne6~+f{4O~y16k6FU jWv`%J_6kSST4%22Tzf6BRXwO&P{}Uqf?{@^-PisNBhmvK literal 0 HcmV?d00001 diff --git a/timm/data/parsers/__pycache__/parser_image_in_tar.cpython-36.pyc b/timm/data/parsers/__pycache__/parser_image_in_tar.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae87757ceae5e0d8560556658f2995bd31d76ab9 GIT binary patch literal 7672 zcmb7JO>7)TcJ8YFnVz2Ea5y3aS( z(v%$}Lv(dj^{c;E?|tuwZ_Ug&Yd<)?dE}I){hPM$XQ2K*p6H)Z2(7LOCV0r|Ouc!X ztG8a))!V2W>TT97y!9~G$<=d=#u%a9$=CB8r|zh>85TOldJ%O?t$8XhjX1uy~4D>XdepZN36aeociJinkb0kV@(uCynZ;SG+DsgOMxS1zTl!HW*?jN zWx)jxDx%yp#GI%+X7v?}Iwa;XYCb59^!gFOZ)w#9{~psc_p&Se*pGtP?fWtcq#NJ& zV^;>gh}_th%~lw=oA@p!bdlrSzTb)_8s9nD++~8p>{YLDzI{sGRMt-Ls29ev8 zz0Nz%S=V=?R(A{YVP3b_98=K>k^0)~g#sJEm(ETYw=j0HgUD_4y0PEtVth7iZ`61) zyk4bpGcxBxe=`Up*O!yC^tw1_fLS}6tuD?sUK(du>wyK>U)S&81hL#{#5*#mInMX} z#x^M9-d2F2(%pRIuDMrw{YSF3bw745UbuL{z1fQ*_YSo?e%E;cvC%fFEH$>m-e#4j z#)qvaPK~RrMvUG<*T>X;=xqm&Uf?P;^jSYrmw;L+?D@iLgnkrx9luXK*Kd5_UAuex z+QzNxKis%AD0|pvtJ~~FwSKFy9R>|rZ}OW8?^*N&8iZDZz~%zIZQwBlhrsBCN$6!4oZ_NLZqITEbDXmWGlG4b!lV zsxAxoE{iBqyK%o23K?{(dTK;L*xdW}Yfa9A`2gD!%8^qwBnK||2tP-cf?>Ht+HOZcP?kQHS#Q&R>|s57WWG=;_n zrRhTIQEa-9y7_Qwmr-z`L;FE=q>m*m;Vyy+IFI~ z^NBeENDAW_|7CaR#17^tsIkR_33FLX>=94QEo}=R`OF*^5uT`}?tWQ@g7@C?)xXZ(Fhi z3XA~-b_9A|%+Tx)N_SAlU6(c_iNe6B;UWfHCQynTT}W@Uof^2bAT|1ae7|a@23?t4 z1+@}-IfGi78>1i;*a18S-3Zbf4Nk7TS97mra0ygV>Or`p^jRUMJK!}}9;3DG4D+Iz zno^PgS1V~g=$fP+h>}_oYGBri8xwDn(Attf>KIRhDVy4f_m_U-y)r;A& zy+&`R8>g(5!9Y&5Brz$q$)qTDsWo-lqTT@3IEoWWEDs7a0U15fX%reWScTb)qb%cX zvqjWhw9c>3ui74W{8sj98 z>k$X?=AJ=s;z9yx9olhG*dqT}ORTF{13me~j{YGw1?ZJbL(e{A@l3lUiqNP(I--f$ z&ka%j68%hzXQ3@~&)Cor6=)2Ac-wxi$(NvfZ`(laKZV(a(=y}S`72TBE@u2x73lG?XZr9Ec2W_?X(z*ZoMR;^;S_FS zqdbw!qCA<*qdb)yLV5a2=c_1@ouYjtsZjrO_TW>XLg6`=eZ`a4Ug60Z@T8)6vI?}C z839E1xU!`^HSoY)e9CsS+u`zO!hFiMwBZ6|;q@<=ctcbl^QYPq?FoCrpXg7FrYX*f zbC2!eqFVDj?d0LFvYmm4KOa`WKQ4bSw62A-z6mKi93KVdE(v3}{P1daeo%gAxRNX; zE6HI|gUlUCjy%`pYJ4vZUdG5uHj;W1 z^vmBS3*gM*WU0v&cld+f-qHTT*=3mbv-o)I#wT_mm+?vQt!JPZu8ND|t;ZT}f0b^3 z#GZ0>qHojaC%nnTA3*l)_|&d}ahxcIM-v0H{bTGVN5vnCx8cHKuHn)6RC4qe+CBc% z7#@pHw_n>eFq=WOJ)2^}CRQ1yy_JONH{gMsjSICkssj2><@Zx`NaU z;uYH89++nd?*ZC*88mie)cQD>x&~y^$*A5UAX!4O1q1m}dB~Jd@&sc0m;3>yRtN?^ z=p<+|T+YFHj9P<$JgeG;lr;~4+)L+D2pbmYIoq1!<&!#L8H}d!OoDSa^D#6YHg6e1=l@usOdB>Xr+N(yVx95 zZu~Jh;nRs~wHg>uZG`%hfUVSO<67F za#MbfmU!z5eSjmKP3OXlxS|(C-PdEc zp`(tOqQi@1ur|gnD{Atc#MtHAIr+|m(yjrUs#9P==-*VfZC6JhSOpIaRudS+m4`-b zvN8`%iNHRwP+pR;k6{&bD_5!eU zE{nyKTORkkmUx(2YDAQ+teC9$Tl6reH}?Amm@FbEojjHjTf)QHJagcm49@&7jNU)R zxhuU8;hmDnNl;DUw%X!EP$LT2;3ASk1x+CIP)-->D{@hWhU3#owXS0MYp0M~TD;Pegnk>i69LaJm!)RX&9 zT>Hw_N!*mEokm6B@9KE#d{={8ZzK%j2GMKseOSEzAo2#ChK1nIk5J3bL-!}Yi{f)U z<11 zaPpg_VM08ZUq>Jt5q$P#uMtF)P;m$433j=DLsAL?&_-aau(Kww(N3snQFw#Du|Km^!WoSNcR;E$v25wk9Iop9QFMP z6>m`A!j;QczJJYIzy9I1jmtN#dAI&_ef|2~)CwNLYDMx4(O6mfA~h)GAzd2#Eh<#7 z0zeB1qm(l*&r?6?aB7I1PG5zzS)65%8B8q_8mKb>F;+K_>I2{-@AjeSQu>do$5TW0 zda=6mbVi~2B*anH1L+A&krs%^Yen>}K7+y|W|Hzp^}PB<^|}pPMI`7Xn$1U0L3Mfr zGGyiyyw*!ueKJFx_nUH!PNI}l1&jr1Quu_l5T+b!d0M2#BzK}ru)0%ulPJj|s{}%- z7FPgJFOCCy1#9x#psN?Mm?{IhnT$gjkVo}HYbb+Hkm&GRJypn<1?Z$&_23AG|%ag|rt zNzksMrwED~`YiQrksD~!7{fAn$(S!VtORNceMD<1XvHcR!MVlfS%o|3qqU0|(wdYUC;Na>cIXz)lWGnPkUHz#al$V`zJ4&IiS6-6#Zp7i4cL1Qnx9%(n;tx6uACB?2){SK`HzA0Km$h zp!@IfObpSR2XAMZ^4sT1Pc2gBcoE>=UdPp%(GKn-OSswSl;q$jX_9%WDPkk39z1OAii%%61S=%55qhP@y1lj1FTViQwd^oFXle=BEi@`G^K6z9|m&&?G0+ z8@njpJ_AOH!C&KvPNLA5qg2O%DsU(Z$KN9Ulv;8NiZNLJM&3-Xhgd;ye+5rO?myYS zvCTuSmn#dP_yBwQbve97oavIrS>1)8PzOPNWP$a9YwXEE+sWvT@k=>%}_tJGN7;Lv$hwv>iK?_7;bj!S<;?5opGnX z8L31cOp3B@%H+tmXcguDszMosg6F}4kREv6KjC|H76n96{jDg0v>=2Q>;VZTy_b}2 zuz=o<;*)R`QX#WJ3c;=#C|VLN6nsh=1awnHh%_m-aYJ zz6YWJCB#G2l{1psID0SLL9*4J*ztpB&QE=W6vHALg$CUk#9024EGuk!Wv zYF-|qHRq{VK!LogCwdLf%g`sG{!ghG(Z`vw`Zt0w1j+OgAVYsnM06K&2gTpwQF?20 zSkDDpx0h9abyYql8g+anJwHV=%~7rwzGZ5{0l~kxnnXj4hR8f3PhFB$O1g&nEd3v* zwiCC)sMcvkjm+<|ATsbBC0Qy-L69VV0ML;Aj{Pp23qsB`k4ybf<(0E@pCih9R1nC> z!&H#yWME4t**j!bQ{rY{%Km-wE@^>+Jp!NbC1!Yb=FH4FXTI~DeZSWW zhQIsC_{RZb|6$i25AsK7dW=r85tCdx86R=`?~EMjN-uMBZ{%?Xxsf&U=BR1qX4cC6 zkq?>6R@TmgQ2@Co+cJ2?N1dlkc4YSzlikEyx}&ai4%nc7{@?InD1{L^G2u)oov6?p z3lpkDNFD05ILeaH2sKIZKZcsTP}f0lc%15sRk0}SqRZ(pE(@J%gDt{z5{jFfVcLka zu+zCXO7v#17m7TbWuma7rVM2=Q%NigPEAdi7U4vERnApdPQpJ3m8d;hxK|tswHX8- zi}(bJraVf_G5i>xg~RYsIXhG7(Xk1)9&9}bKP$BkpV50Rir^bWneBhW(GPgt#Fp?M zQa>w&jN(ja9pz$1BYVGp8tr~@xVwL__sRYNMmmKsb0xCqBsn{bUD^px53PYl@A_Zp zj@ZIQ^IjfTY|57GG5hlQqER)be93fpx?!BDJN5AHF8On3(Ok6bNTX^l`NFqyvuZ5? zfO~AwuG-Vqk{SP`sWvKi+O8T)&ROL_@<}?~c*b4>I48dPy9p`}djzIqx>aC$()qK4 z_YOhm6*K*+v*eH2lK+W)%_dwnWb>78ZdEQ;IW)fLR*w27)^=q}`T!2rW32m{O$a*e zedWMEM8ZV`Vu~R>bXjy99LXQJygk2S{6?q?hj3x=XTL> z1!KMbk5a-&1PE`=&b|${Wg45f%x60xSx7E4+4;N4OQS?=NG&ZU<>q!)#v;=@n^*d8 zxJY;W7H#k|bVuak{k4nEYlhqLD*l>P&OOBHuh!u!-v+K~V^Pf~HO~;f?h0q7j1YyD zS{L&$HAD7TQ{SPJ{=cc0`j@8Ct<@8V6PvGaGnKW1y)!Rg}+O(l(S!!yZ zXr&N&Qu}rQ`__%De32+cVXm8EHcJXwyWptWBPZ*oE)_^&Wpz6#=6RyP6197t&IY}@ zwI(ceo7Q`zTsP=!7?Fu_lIf9S(z+#6Mdzw})*3rgyL5bSUZkhDT8(qRs6c`EuAGxw?Gnn$aJ@RZOvr%J}wWB6VKp;CP0j;l*nIoTQzwWQN z&hqD>Q0M;+58kBui<txI3b?DPUqLT9CRJ#C7mN?VvIs_%{t5OtD1xoV-F~2_O`+hOMWImq3EvoIfG!p$;8Ddm0)Tx-hJa#AgZOaa1EGw+JMaUd#3qE z4*FtI%lKehjQ!>A!Xu8Y+@^52=sE1K z{U}25Gf|}IUe(*wQC27-&AR_kqZAo%+pbcjsy$VfM*WngJh)$2bkr>^@)=OQ#(A0_ z=|KZ{PqK;nHR-l5YuHM~F%zVBJ6hMa=!$OC5c=<-=^vtFJa7o40f5+ay8J%Cdj9sc zTLES^FC2)8ndqV5lKcowe~6Aso-Iaq4T6j}6d(kH#@z}m1jyn=6Sb~s6~;TYo8g+sWUvBRdDDni6hR}39%9at zJLBhfzH_iuc@kgb{u?2t+C#ykP_9TF0d$Z7t8WUb{s`zbns2N^=vmUcLLAZGI+TjS zvEoBtjCGmK!Kyxm(XOGIeT5kS>0;(FCN#}5AjYE@-o5F{g(|}FE9ecJCr<`#N(0(C zis~SW@>0$-lDkoKIv3e`29c6wj7%YnsvYV+qK*!&duuSqNtQw7(n1{bTT-I(r3l!+ zK(iO^dtl$b-|}5Mjv{zC=&6rM&&I@NS#xGT1{GCL^#OHs1xP4-)EZHM@aZZ>x9GED rb8ga1Z|13vZAG%lY4^S|=UUYK?UkQ9E$pDFM0rRrA8BTPZgA&+SJ@Uv literal 0 HcmV?d00001 diff --git a/timm/data/parsers/class_map.py b/timm/data/parsers/class_map.py new file mode 100644 index 0000000..6b6fe45 --- /dev/null +++ b/timm/data/parsers/class_map.py @@ -0,0 +1,19 @@ +import os + + +def load_class_map(map_or_filename, root=''): + if isinstance(map_or_filename, dict): + assert dict, 'class_map dict must be non-empty' + return map_or_filename + class_map_path = map_or_filename + if not os.path.exists(class_map_path): + class_map_path = os.path.join(root, class_map_path) + assert os.path.exists(class_map_path), 'Cannot locate specified class map file (%s)' % map_or_filename + class_map_ext = os.path.splitext(map_or_filename)[-1].lower() + if class_map_ext == '.txt': + with open(class_map_path) as f: + class_to_idx = {v.strip(): k for k, v in enumerate(f)} + else: + assert False, f'Unsupported class map file extension ({class_map_ext}).' + return class_to_idx + diff --git a/timm/data/parsers/constants.py b/timm/data/parsers/constants.py new file mode 100644 index 0000000..e7ba484 --- /dev/null +++ b/timm/data/parsers/constants.py @@ -0,0 +1 @@ +IMG_EXTENSIONS = ('.png', '.jpg', '.jpeg') diff --git a/timm/data/parsers/parser.py b/timm/data/parsers/parser.py new file mode 100644 index 0000000..76ab6d1 --- /dev/null +++ b/timm/data/parsers/parser.py @@ -0,0 +1,17 @@ +from abc import abstractmethod + + +class Parser: + def __init__(self): + pass + + @abstractmethod + def _filename(self, index, basename=False, absolute=False): + pass + + def filename(self, index, basename=False, absolute=False): + return self._filename(index, basename=basename, absolute=absolute) + + def filenames(self, basename=False, absolute=False): + return [self._filename(index, basename=basename, absolute=absolute) for index in range(len(self))] + diff --git a/timm/data/parsers/parser_factory.py b/timm/data/parsers/parser_factory.py new file mode 100644 index 0000000..892090a --- /dev/null +++ b/timm/data/parsers/parser_factory.py @@ -0,0 +1,29 @@ +import os + +from .parser_image_folder import ParserImageFolder +from .parser_image_tar import ParserImageTar +from .parser_image_in_tar import ParserImageInTar + + +def create_parser(name, root, split='train', **kwargs): + name = name.lower() + name = name.split('/', 2) + prefix = '' + if len(name) > 1: + prefix = name[0] + name = name[-1] + + # FIXME improve the selection right now just tfds prefix or fallback path, will need options to + # explicitly select other options shortly + if prefix == 'tfds': + from .parser_tfds import ParserTfds # defer tensorflow import + parser = ParserTfds(root, name, split=split, **kwargs) + else: + assert os.path.exists(root) + # default fallback path (backwards compat), use image tar if root is a .tar file, otherwise image folder + # FIXME support split here, in parser? + if os.path.isfile(root) and os.path.splitext(root)[1] == '.tar': + parser = ParserImageInTar(root, **kwargs) + else: + parser = ParserImageFolder(root, **kwargs) + return parser diff --git a/timm/data/parsers/parser_image_folder.py b/timm/data/parsers/parser_image_folder.py new file mode 100644 index 0000000..ed34900 --- /dev/null +++ b/timm/data/parsers/parser_image_folder.py @@ -0,0 +1,69 @@ +""" A dataset parser that reads images from folders + +Folders are scannerd recursively to find image files. Labels are based +on the folder hierarchy, just leaf folders by default. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import os + +from timm.utils.misc import natural_key + +from .parser import Parser +from .class_map import load_class_map +from .constants import IMG_EXTENSIONS + + +def find_images_and_targets(folder, types=IMG_EXTENSIONS, class_to_idx=None, leaf_name_only=True, sort=True): + labels = [] + filenames = [] + for root, subdirs, files in os.walk(folder, topdown=False, followlinks=True): + rel_path = os.path.relpath(root, folder) if (root != folder) else '' + label = os.path.basename(rel_path) if leaf_name_only else rel_path.replace(os.path.sep, '_') + for f in files: + base, ext = os.path.splitext(f) + if ext.lower() in types: + filenames.append(os.path.join(root, f)) + labels.append(label) + if class_to_idx is None: + # building class index + unique_labels = set(labels) + sorted_labels = list(sorted(unique_labels, key=natural_key)) + class_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} + images_and_targets = [(f, class_to_idx[l]) for f, l in zip(filenames, labels) if l in class_to_idx] + if sort: + images_and_targets = sorted(images_and_targets, key=lambda k: natural_key(k[0])) + return images_and_targets, class_to_idx + + +class ParserImageFolder(Parser): + + def __init__( + self, + root, + class_map=''): + super().__init__() + + self.root = root + class_to_idx = None + if class_map: + class_to_idx = load_class_map(class_map, root) + self.samples, self.class_to_idx = find_images_and_targets(root, class_to_idx=class_to_idx) + if len(self.samples) == 0: + raise RuntimeError( + f'Found 0 images in subfolders of {root}. Supported image extensions are {", ".join(IMG_EXTENSIONS)}') + + def __getitem__(self, index): + path, target = self.samples[index] + return open(path, 'rb'), target + + def __len__(self): + return len(self.samples) + + def _filename(self, index, basename=False, absolute=False): + filename = self.samples[index][0] + if basename: + filename = os.path.basename(filename) + elif not absolute: + filename = os.path.relpath(filename, self.root) + return filename diff --git a/timm/data/parsers/parser_image_in_tar.py b/timm/data/parsers/parser_image_in_tar.py new file mode 100644 index 0000000..c6ada96 --- /dev/null +++ b/timm/data/parsers/parser_image_in_tar.py @@ -0,0 +1,222 @@ +""" A dataset parser that reads tarfile based datasets + +This parser can read and extract image samples from: +* a single tar of image files +* a folder of multiple tarfiles containing imagefiles +* a tar of tars containing image files + +Labels are based on the combined folder and/or tar name structure. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import os +import tarfile +import pickle +import logging +import numpy as np +from glob import glob +from typing import List, Dict + +from timm.utils.misc import natural_key + +from .parser import Parser +from .class_map import load_class_map +from .constants import IMG_EXTENSIONS + + +_logger = logging.getLogger(__name__) +CACHE_FILENAME_SUFFIX = '_tarinfos.pickle' + + +class TarState: + + def __init__(self, tf: tarfile.TarFile = None, ti: tarfile.TarInfo = None): + self.tf: tarfile.TarFile = tf + self.ti: tarfile.TarInfo = ti + self.children: Dict[str, TarState] = {} # child states (tars within tars) + + def reset(self): + self.tf = None + + +def _extract_tarinfo(tf: tarfile.TarFile, parent_info: Dict, extensions=IMG_EXTENSIONS): + sample_count = 0 + for i, ti in enumerate(tf): + if not ti.isfile(): + continue + dirname, basename = os.path.split(ti.path) + name, ext = os.path.splitext(basename) + ext = ext.lower() + if ext == '.tar': + with tarfile.open(fileobj=tf.extractfile(ti), mode='r|') as ctf: + child_info = dict( + name=ti.name, path=os.path.join(parent_info['path'], name), ti=ti, children=[], samples=[]) + sample_count += _extract_tarinfo(ctf, child_info, extensions=extensions) + _logger.debug(f'{i}/?. Extracted child tarinfos from {ti.name}. {len(child_info["samples"])} images.') + parent_info['children'].append(child_info) + elif ext in extensions: + parent_info['samples'].append(ti) + sample_count += 1 + return sample_count + + +def extract_tarinfos(root, class_name_to_idx=None, cache_tarinfo=None, extensions=IMG_EXTENSIONS, sort=True): + root_is_tar = False + if os.path.isfile(root): + assert os.path.splitext(root)[-1].lower() == '.tar' + tar_filenames = [root] + root, root_name = os.path.split(root) + root_name = os.path.splitext(root_name)[0] + root_is_tar = True + else: + root_name = root.strip(os.path.sep).split(os.path.sep)[-1] + tar_filenames = glob(os.path.join(root, '*.tar'), recursive=True) + num_tars = len(tar_filenames) + tar_bytes = sum([os.path.getsize(f) for f in tar_filenames]) + assert num_tars, f'No .tar files found at specified path ({root}).' + + _logger.info(f'Scanning {tar_bytes/1024**2:.2f}MB of tar files...') + info = dict(tartrees=[]) + cache_path = '' + if cache_tarinfo is None: + cache_tarinfo = True if tar_bytes > 10*1024**3 else False # FIXME magic number, 10GB + if cache_tarinfo: + cache_filename = '_' + root_name + CACHE_FILENAME_SUFFIX + cache_path = os.path.join(root, cache_filename) + if os.path.exists(cache_path): + _logger.info(f'Reading tar info from cache file {cache_path}.') + with open(cache_path, 'rb') as pf: + info = pickle.load(pf) + assert len(info['tartrees']) == num_tars, "Cached tartree len doesn't match number of tarfiles" + else: + for i, fn in enumerate(tar_filenames): + path = '' if root_is_tar else os.path.splitext(os.path.basename(fn))[0] + with tarfile.open(fn, mode='r|') as tf: # tarinfo scans done in streaming mode + parent_info = dict(name=os.path.relpath(fn, root), path=path, ti=None, children=[], samples=[]) + num_samples = _extract_tarinfo(tf, parent_info, extensions=extensions) + num_children = len(parent_info["children"]) + _logger.debug( + f'{i}/{num_tars}. Extracted tarinfos from {fn}. {num_children} children, {num_samples} samples.') + info['tartrees'].append(parent_info) + if cache_path: + _logger.info(f'Writing tar info to cache file {cache_path}.') + with open(cache_path, 'wb') as pf: + pickle.dump(info, pf) + + samples = [] + labels = [] + build_class_map = False + if class_name_to_idx is None: + build_class_map = True + + # Flatten tartree info into lists of samples and targets w/ targets based on label id via + # class map arg or from unique paths. + # NOTE: currently only flattening up to two-levels, filesystem .tars and then one level of sub-tar children + # this covers my current use cases and keeps things a little easier to test for now. + tarfiles = [] + + def _label_from_paths(*path, leaf_only=True): + path = os.path.join(*path).strip(os.path.sep) + return path.split(os.path.sep)[-1] if leaf_only else path.replace(os.path.sep, '_') + + def _add_samples(info, fn): + added = 0 + for s in info['samples']: + label = _label_from_paths(info['path'], os.path.dirname(s.path)) + if not build_class_map and label not in class_name_to_idx: + continue + samples.append((s, fn, info['ti'])) + labels.append(label) + added += 1 + return added + + _logger.info(f'Collecting samples and building tar states.') + for parent_info in info['tartrees']: + # if tartree has children, we assume all samples are at the child level + tar_name = None if root_is_tar else parent_info['name'] + tar_state = TarState() + parent_added = 0 + for child_info in parent_info['children']: + child_added = _add_samples(child_info, fn=tar_name) + if child_added: + tar_state.children[child_info['name']] = TarState(ti=child_info['ti']) + parent_added += child_added + parent_added += _add_samples(parent_info, fn=tar_name) + if parent_added: + tarfiles.append((tar_name, tar_state)) + del info + + if build_class_map: + # build class index + sorted_labels = list(sorted(set(labels), key=natural_key)) + class_name_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} + + _logger.info(f'Mapping targets and sorting samples.') + samples_and_targets = [(s, class_name_to_idx[l]) for s, l in zip(samples, labels) if l in class_name_to_idx] + if sort: + samples_and_targets = sorted(samples_and_targets, key=lambda k: natural_key(k[0][0].path)) + samples, targets = zip(*samples_and_targets) + samples = np.array(samples) + targets = np.array(targets) + _logger.info(f'Finished processing {len(samples)} samples across {len(tarfiles)} tar files.') + return samples, targets, class_name_to_idx, tarfiles + + +class ParserImageInTar(Parser): + """ Multi-tarfile dataset parser where there is one .tar file per class + """ + + def __init__(self, root, class_map='', cache_tarfiles=True, cache_tarinfo=None): + super().__init__() + + class_name_to_idx = None + if class_map: + class_name_to_idx = load_class_map(class_map, root) + self.root = root + self.samples, self.targets, self.class_name_to_idx, tarfiles = extract_tarinfos( + self.root, + class_name_to_idx=class_name_to_idx, + cache_tarinfo=cache_tarinfo, + extensions=IMG_EXTENSIONS) + self.class_idx_to_name = {v: k for k, v in self.class_name_to_idx.items()} + if len(tarfiles) == 1 and tarfiles[0][0] is None: + self.root_is_tar = True + self.tar_state = tarfiles[0][1] + else: + self.root_is_tar = False + self.tar_state = dict(tarfiles) + self.cache_tarfiles = cache_tarfiles + + def __len__(self): + return len(self.samples) + + def __getitem__(self, index): + sample = self.samples[index] + target = self.targets[index] + sample_ti, parent_fn, child_ti = sample + parent_abs = os.path.join(self.root, parent_fn) if parent_fn else self.root + + tf = None + cache_state = None + if self.cache_tarfiles: + cache_state = self.tar_state if self.root_is_tar else self.tar_state[parent_fn] + tf = cache_state.tf + if tf is None: + tf = tarfile.open(parent_abs) + if self.cache_tarfiles: + cache_state.tf = tf + if child_ti is not None: + ctf = cache_state.children[child_ti.name].tf if self.cache_tarfiles else None + if ctf is None: + ctf = tarfile.open(fileobj=tf.extractfile(child_ti)) + if self.cache_tarfiles: + cache_state.children[child_ti.name].tf = ctf + tf = ctf + + return tf.extractfile(sample_ti), target + + def _filename(self, index, basename=False, absolute=False): + filename = self.samples[index][0].name + if basename: + filename = os.path.basename(filename) + return filename diff --git a/timm/data/parsers/parser_image_tar.py b/timm/data/parsers/parser_image_tar.py new file mode 100644 index 0000000..467537f --- /dev/null +++ b/timm/data/parsers/parser_image_tar.py @@ -0,0 +1,72 @@ +""" A dataset parser that reads single tarfile based datasets + +This parser can read datasets consisting if a single tarfile containing images. +I am planning to deprecated it in favour of ParerImageInTar. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import os +import tarfile + +from .parser import Parser +from .class_map import load_class_map +from .constants import IMG_EXTENSIONS +from timm.utils.misc import natural_key + + +def extract_tarinfo(tarfile, class_to_idx=None, sort=True): + files = [] + labels = [] + for ti in tarfile.getmembers(): + if not ti.isfile(): + continue + dirname, basename = os.path.split(ti.path) + label = os.path.basename(dirname) + ext = os.path.splitext(basename)[1] + if ext.lower() in IMG_EXTENSIONS: + files.append(ti) + labels.append(label) + if class_to_idx is None: + unique_labels = set(labels) + sorted_labels = list(sorted(unique_labels, key=natural_key)) + class_to_idx = {c: idx for idx, c in enumerate(sorted_labels)} + tarinfo_and_targets = [(f, class_to_idx[l]) for f, l in zip(files, labels) if l in class_to_idx] + if sort: + tarinfo_and_targets = sorted(tarinfo_and_targets, key=lambda k: natural_key(k[0].path)) + return tarinfo_and_targets, class_to_idx + + +class ParserImageTar(Parser): + """ Single tarfile dataset where classes are mapped to folders within tar + NOTE: This class is being deprecated in favour of the more capable ParserImageInTar that can + operate on folders of tars or tars in tars. + """ + def __init__(self, root, class_map=''): + super().__init__() + + class_to_idx = None + if class_map: + class_to_idx = load_class_map(class_map, root) + assert os.path.isfile(root) + self.root = root + + with tarfile.open(root) as tf: # cannot keep this open across processes, reopen later + self.samples, self.class_to_idx = extract_tarinfo(tf, class_to_idx) + self.imgs = self.samples + self.tarfile = None # lazy init in __getitem__ + + def __getitem__(self, index): + if self.tarfile is None: + self.tarfile = tarfile.open(self.root) + tarinfo, target = self.samples[index] + fileobj = self.tarfile.extractfile(tarinfo) + return fileobj, target + + def __len__(self): + return len(self.samples) + + def _filename(self, index, basename=False, absolute=False): + filename = self.samples[index][0].name + if basename: + filename = os.path.basename(filename) + return filename diff --git a/timm/data/parsers/parser_tfds.py b/timm/data/parsers/parser_tfds.py new file mode 100644 index 0000000..ee5893c --- /dev/null +++ b/timm/data/parsers/parser_tfds.py @@ -0,0 +1,297 @@ +""" Dataset parser interface that wraps TFDS datasets + +Wraps many (most?) TFDS image-classification datasets +from https://github.com/tensorflow/datasets +https://www.tensorflow.org/datasets/catalog/overview#image_classification + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math +import torch +import torch.distributed as dist +from PIL import Image + +try: + import tensorflow as tf + tf.config.set_visible_devices([], 'GPU') # Hands off my GPU! (or pip install tensorflow-cpu) + import tensorflow_datasets as tfds + try: + tfds.even_splits('', 1, drop_remainder=False) # non-buggy even_splits has drop_remainder arg + has_buggy_even_splits = False + except TypeError: + print("Warning: This version of tfds doesn't have the latest even_splits impl. " + "Please update or use tfds-nightly for better fine-grained split behaviour.") + has_buggy_even_splits = True +except ImportError as e: + print(e) + print("Please install tensorflow_datasets package `pip install tensorflow-datasets`.") + exit(1) +from .parser import Parser + + +MAX_TP_SIZE = 8 # maximum TF threadpool size, only doing jpeg decodes and queuing activities +SHUFFLE_SIZE = 8192 # examples to shuffle in DS queue +PREFETCH_SIZE = 2048 # examples to prefetch + + +def even_split_indices(split, n, num_examples): + partitions = [round(i * num_examples / n) for i in range(n + 1)] + return [f"{split}[{partitions[i]}:{partitions[i + 1]}]" for i in range(n)] + + +def get_class_labels(info): + if 'label' not in info.features: + return {} + class_label = info.features['label'] + class_to_idx = {n: class_label.str2int(n) for n in class_label.names} + return class_to_idx + + +class ParserTfds(Parser): + """ Wrap Tensorflow Datasets for use in PyTorch + + There several things to be aware of: + * To prevent excessive examples being dropped per epoch w/ distributed training or multiplicity of + dataloader workers, the train iterator wraps to avoid returning partial batches that trigger drop_last + https://github.com/pytorch/pytorch/issues/33413 + * With PyTorch IterableDatasets, each worker in each replica operates in isolation, the final batch + from each worker could be a different size. For training this is worked around by option above, for + validation extra examples are inserted iff distributed mode is enabled so that the batches being reduced + across replicas are of same size. This will slightly alter the results, distributed validation will not be + 100% correct. This is similar to common handling in DistributedSampler for normal Datasets but a bit worse + since there are up to N * J extra examples with IterableDatasets. + * The sharding (splitting of dataset into TFRecord) files imposes limitations on the number of + replicas and dataloader workers you can use. For really small datasets that only contain a few shards + you may have to train non-distributed w/ 1-2 dataloader workers. This is likely not a huge concern as the + benefit of distributed training or fast dataloading should be much less for small datasets. + * This wrapper is currently configured to return individual, decompressed image examples from the TFDS + dataset. The augmentation (transforms) and batching is still done in PyTorch. It would be possible + to specify TF augmentation fn and return augmented batches w/ some modifications to other downstream + components. + + """ + + def __init__( + self, + root, + name, + split='train', + is_training=False, + batch_size=None, + download=False, + repeats=0, + seed=42, + input_name='image', + input_image='RGB', + target_name='label', + target_image='', + prefetch_size=None, + shuffle_size=None, + max_threadpool_size=None + ): + """ Tensorflow-datasets Wrapper + + Args: + root: root data dir (ie your TFDS_DATA_DIR. not dataset specific sub-dir) + name: tfds dataset name (eg `imagenet2012`) + split: tfds dataset split (can use all TFDS split strings eg `train[:10%]`) + is_training: training mode, shuffle enabled, dataset len rounded by batch_size + batch_size: batch_size to use to unsure total examples % batch_size == 0 in training across all dis nodes + download: download and build TFDS dataset if set, otherwise must use tfds CLI + repeats: iterate through (repeat) the dataset this many times per iteration (once if 0 or 1) + seed: common seed for shard shuffle across all distributed/worker instances + input_name: name of Feature to return as data (input) + input_image: image mode if input is an image (currently PIL mode string) + target_name: name of Feature to return as target (label) + target_image: image mode if target is an image (currently PIL mode string) + prefetch_size: override default tf.data prefetch buffer size + shuffle_size: override default tf.data shuffle buffer size + max_threadpool_size: override default threadpool size for tf.data + """ + super().__init__() + self.root = root + self.split = split + self.is_training = is_training + if self.is_training: + assert batch_size is not None, \ + "Must specify batch_size in training mode for reasonable behaviour w/ TFDS wrapper" + self.batch_size = batch_size + self.repeats = repeats + self.common_seed = seed # a seed that's fixed across all worker / distributed instances + + # performance settings + self.prefetch_size = prefetch_size or PREFETCH_SIZE + self.shuffle_size = shuffle_size or SHUFFLE_SIZE + self.max_threadpool_size = max_threadpool_size or MAX_TP_SIZE + + # TFDS builder and split information + self.input_name = input_name # FIXME support tuples / lists of inputs and targets and full range of Feature + self.input_image = input_image + self.target_name = target_name + self.target_image = target_image + self.builder = tfds.builder(name, data_dir=root) + # NOTE: the tfds command line app can be used download & prepare datasets if you don't enable download flag + if download: + self.builder.download_and_prepare() + self.class_to_idx = get_class_labels(self.builder.info) if self.target_name == 'label' else {} + self.split_info = self.builder.info.splits[split] + self.num_examples = self.split_info.num_examples + + # Distributed world state + self.dist_rank = 0 + self.dist_num_replicas = 1 + if dist.is_available() and dist.is_initialized() and dist.get_world_size() > 1: + self.dist_rank = dist.get_rank() + self.dist_num_replicas = dist.get_world_size() + + # Attributes that are updated in _lazy_init, including the tf.data pipeline itself + self.global_num_workers = 1 + self.worker_info = None + self.worker_seed = 0 # seed unique to each work instance + self.subsplit = None # set when data is distributed across workers using sub-splits + self.ds = None # initialized lazily on each dataloader worker process + + def _lazy_init(self): + """ Lazily initialize the dataset. + + This is necessary to init the Tensorflow dataset pipeline in the (dataloader) process that + will be using the dataset instance. The __init__ method is called on the main process, + this will be called in a dataloader worker process. + + NOTE: There will be problems if you try to re-use this dataset across different loader/worker + instances once it has been initialized. Do not call any dataset methods that can call _lazy_init + before it is passed to dataloader. + """ + worker_info = torch.utils.data.get_worker_info() + + # setup input context to split dataset across distributed processes + num_workers = 1 + global_worker_id = 0 + if worker_info is not None: + self.worker_info = worker_info + self.worker_seed = worker_info.seed + num_workers = worker_info.num_workers + self.global_num_workers = self.dist_num_replicas * num_workers + global_worker_id = self.dist_rank * num_workers + worker_info.id + + """ Data sharding + InputContext will assign subset of underlying TFRecord files to each 'pipeline' if used. + My understanding is that using split, the underling TFRecord files will shuffle (shuffle_files=True) + between the splits each iteration, but that understanding could be wrong. + + I am currently using a mix of InputContext shard assignment and fine-grained sub-splits for distributing + the data across workers. For training InputContext is used to assign shards to nodes unless num_shards + in dataset < total number of workers. Otherwise sub-split API is used for datasets without enough shards or + for validation where we can't drop examples and need to avoid minimize uneven splits to avoid padding. + """ + should_subsplit = self.global_num_workers > 1 and ( + self.split_info.num_shards < self.global_num_workers or not self.is_training) + if should_subsplit: + # split the dataset w/o using sharding for more even examples / worker, can result in less optimal + # read patterns for distributed training (overlap across shards) so better to use InputContext there + if has_buggy_even_splits: + # my even_split workaround doesn't work on subsplits, upgrade tfds! + if not isinstance(self.split_info, tfds.core.splits.SubSplitInfo): + subsplits = even_split_indices(self.split, self.global_num_workers, self.num_examples) + self.subsplit = subsplits[global_worker_id] + else: + subsplits = tfds.even_splits(self.split, self.global_num_workers) + self.subsplit = subsplits[global_worker_id] + + input_context = None + if self.global_num_workers > 1 and self.subsplit is None: + # set input context to divide shards among distributed replicas + input_context = tf.distribute.InputContext( + num_input_pipelines=self.global_num_workers, + input_pipeline_id=global_worker_id, + num_replicas_in_sync=self.dist_num_replicas # FIXME does this arg have any impact? + ) + read_config = tfds.ReadConfig( + shuffle_seed=self.common_seed, + shuffle_reshuffle_each_iteration=True, + input_context=input_context) + ds = self.builder.as_dataset( + split=self.subsplit or self.split, shuffle_files=self.is_training, read_config=read_config) + # avoid overloading threading w/ combo of TF ds threads + PyTorch workers + options = tf.data.Options() + thread_member = 'threading' if hasattr(options, 'threading') else 'experimental_threading' + getattr(options, thread_member).private_threadpool_size = max(1, self.max_threadpool_size // num_workers) + getattr(options, thread_member).max_intra_op_parallelism = 1 + ds = ds.with_options(options) + if self.is_training or self.repeats > 1: + # to prevent excessive drop_last batch behaviour w/ IterableDatasets + # see warnings at https://pytorch.org/docs/stable/data.html#multi-process-data-loading + ds = ds.repeat() # allow wrap around and break iteration manually + if self.is_training: + ds = ds.shuffle(min(self.num_examples, self.shuffle_size) // self.global_num_workers, seed=self.worker_seed) + ds = ds.prefetch(min(self.num_examples // self.global_num_workers, self.prefetch_size)) + self.ds = tfds.as_numpy(ds) + + def __iter__(self): + if self.ds is None: + self._lazy_init() + + # Compute a rounded up sample count that is used to: + # 1. make batches even cross workers & replicas in distributed validation. + # This adds extra examples and will slightly alter validation results. + # 2. determine loop ending condition in training w/ repeat enabled so that only full batch_size + # batches are produced (underlying tfds iter wraps around) + target_example_count = math.ceil(max(1, self.repeats) * self.num_examples / self.global_num_workers) + if self.is_training: + # round up to nearest batch_size per worker-replica + target_example_count = math.ceil(target_example_count / self.batch_size) * self.batch_size + + # Iterate until exhausted or sample count hits target when training (ds.repeat enabled) + example_count = 0 + for example in self.ds: + input_data = example[self.input_name] + if self.input_image: + input_data = Image.fromarray(input_data, mode=self.input_image) + target_data = example[self.target_name] + if self.target_image: + target_data = Image.fromarray(target_data, mode=self.target_image) + yield input_data, target_data + example_count += 1 + if self.is_training and example_count >= target_example_count: + # Need to break out of loop when repeat() is enabled for training w/ oversampling + # this results in extra examples per epoch but seems more desirable than dropping + # up to N*J batches per epoch (where N = num distributed processes, and J = num worker processes) + break + + # Pad across distributed nodes (make counts equal by adding examples) + if not self.is_training and self.dist_num_replicas > 1 and self.subsplit is not None and \ + 0 < example_count < target_example_count: + # Validation batch padding only done for distributed training where results are reduced across nodes. + # For single process case, it won't matter if workers return different batch sizes. + # If using input_context or % based splits, sample count can vary significantly across workers and this + # approach should not be used (hence disabled if self.subsplit isn't set). + while example_count < target_example_count: + yield input_data, target_data # yield prev sample again + example_count += 1 + + def __len__(self): + # this is just an estimate and does not factor in extra examples added to pad batches based on + # complete worker & replica info (not available until init in dataloader). + return math.ceil(max(1, self.repeats) * self.num_examples / self.dist_num_replicas) + + def _filename(self, index, basename=False, absolute=False): + assert False, "Not supported" # no random access to examples + + def filenames(self, basename=False, absolute=False): + """ Return all filenames in dataset, overrides base""" + if self.ds is None: + self._lazy_init() + names = [] + for sample in self.ds: + if len(names) > self.num_examples: + break # safety for ds.repeat() case + if 'file_name' in sample: + name = sample['file_name'] + elif 'filename' in sample: + name = sample['filename'] + elif 'id' in sample: + name = sample['id'] + else: + assert False, "No supported name field present" + names.append(name) + return names diff --git a/timm/data/random_erasing.py b/timm/data/random_erasing.py new file mode 100644 index 0000000..2fa6315 --- /dev/null +++ b/timm/data/random_erasing.py @@ -0,0 +1,103 @@ +""" Random Erasing (Cutout) + +Originally inspired by impl at https://github.com/zhunzhong07/Random-Erasing, Apache 2.0 +Copyright Zhun Zhong & Liang Zheng + +Hacked together by / Copyright 2020 Ross Wightman +""" +import random +import math +import torch + + +def _get_pixels(per_pixel, rand_color, patch_size, dtype=torch.float32, device='cuda'): + # NOTE I've seen CUDA illegal memory access errors being caused by the normal_() + # paths, flip the order so normal is run on CPU if this becomes a problem + # Issue has been fixed in master https://github.com/pytorch/pytorch/issues/19508 + if per_pixel: + return torch.empty(patch_size, dtype=dtype, device=device).normal_() + elif rand_color: + return torch.empty((patch_size[0], 1, 1), dtype=dtype, device=device).normal_() + else: + return torch.zeros((patch_size[0], 1, 1), dtype=dtype, device=device) + + +class RandomErasing: + """ Randomly selects a rectangle region in an image and erases its pixels. + 'Random Erasing Data Augmentation' by Zhong et al. + See https://arxiv.org/pdf/1708.04896.pdf + + This variant of RandomErasing is intended to be applied to either a batch + or single image tensor after it has been normalized by dataset mean and std. + Args: + probability: Probability that the Random Erasing operation will be performed. + min_area: Minimum percentage of erased area wrt input image area. + max_area: Maximum percentage of erased area wrt input image area. + min_aspect: Minimum aspect ratio of erased area. + mode: pixel color mode, one of 'const', 'rand', or 'pixel' + 'const' - erase block is constant color of 0 for all channels + 'rand' - erase block is same per-channel random (normal) color + 'pixel' - erase block is per-pixel random (normal) color + max_count: maximum number of erasing blocks per image, area per box is scaled by count. + per-image count is randomly chosen between 1 and this value. + """ + + def __init__( + self, + probability=0.5, min_area=0.02, max_area=1/3, min_aspect=0.3, max_aspect=None, + mode='const', min_count=1, max_count=None, num_splits=0, device='cuda'): + self.probability = probability + self.min_area = min_area + self.max_area = max_area + max_aspect = max_aspect or 1 / min_aspect + self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect)) + self.min_count = min_count + self.max_count = max_count or min_count + self.num_splits = num_splits + self.mode = mode.lower() + self.rand_color = False + self.per_pixel = False + if self.mode == 'rand': + self.rand_color = True # per block random normal + elif self.mode == 'pixel': + self.per_pixel = True # per pixel random normal + else: + assert not self.mode or self.mode == 'const' + self.device = device + + def _erase(self, img, chan, img_h, img_w, dtype): + if random.random() > self.probability: + return + area = img_h * img_w + count = self.min_count if self.min_count == self.max_count else \ + random.randint(self.min_count, self.max_count) + for _ in range(count): + for attempt in range(10): + target_area = random.uniform(self.min_area, self.max_area) * area / count + aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio)) + h = int(round(math.sqrt(target_area * aspect_ratio))) + w = int(round(math.sqrt(target_area / aspect_ratio))) + if w < img_w and h < img_h: + top = random.randint(0, img_h - h) + left = random.randint(0, img_w - w) + img[:, top:top + h, left:left + w] = _get_pixels( + self.per_pixel, self.rand_color, (chan, h, w), + dtype=dtype, device=self.device) + break + + def __call__(self, input): + if len(input.size()) == 3: + self._erase(input, *input.size(), input.dtype) + else: + batch_size, chan, img_h, img_w = input.size() + # skip first slice of batch if num_splits is set (for clean portion of samples) + batch_start = batch_size // self.num_splits if self.num_splits > 1 else 0 + for i in range(batch_start, batch_size): + self._erase(input[i], chan, img_h, img_w, input.dtype) + return input + + def __repr__(self): + # NOTE simplified state for repr + fs = self.__class__.__name__ + f'(p={self.probability}, mode={self.mode}' + fs += f', count=({self.min_count}, {self.max_count}))' + return fs diff --git a/timm/data/real_labels.py b/timm/data/real_labels.py new file mode 100644 index 0000000..939c348 --- /dev/null +++ b/timm/data/real_labels.py @@ -0,0 +1,42 @@ +""" Real labels evaluator for ImageNet +Paper: `Are we done with ImageNet?` - https://arxiv.org/abs/2006.07159 +Based on Numpy example at https://github.com/google-research/reassessed-imagenet + +Hacked together by / Copyright 2020 Ross Wightman +""" +import os +import json +import numpy as np + + +class RealLabelsImagenet: + + def __init__(self, filenames, real_json='real.json', topk=(1, 5)): + with open(real_json) as real_labels: + real_labels = json.load(real_labels) + real_labels = {f'ILSVRC2012_val_{i + 1:08d}.JPEG': labels for i, labels in enumerate(real_labels)} + self.real_labels = real_labels + self.filenames = filenames + assert len(self.filenames) == len(self.real_labels) + self.topk = topk + self.is_correct = {k: [] for k in topk} + self.sample_idx = 0 + + def add_result(self, output): + maxk = max(self.topk) + _, pred_batch = output.topk(maxk, 1, True, True) + pred_batch = pred_batch.cpu().numpy() + for pred in pred_batch: + filename = self.filenames[self.sample_idx] + filename = os.path.basename(filename) + if self.real_labels[filename]: + for k in self.topk: + self.is_correct[k].append( + any([p in self.real_labels[filename] for p in pred[:k]])) + self.sample_idx += 1 + + def get_accuracy(self, k=None): + if k is None: + return {k: float(np.mean(self.is_correct[k])) * 100 for k in self.topk} + else: + return float(np.mean(self.is_correct[k])) * 100 diff --git a/timm/data/tf_preprocessing.py b/timm/data/tf_preprocessing.py new file mode 100644 index 0000000..44b4a3a --- /dev/null +++ b/timm/data/tf_preprocessing.py @@ -0,0 +1,232 @@ +""" Tensorflow Preprocessing Adapter + +Allows use of Tensorflow preprocessing pipeline in PyTorch Transform + +Copyright of original Tensorflow code below. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""ImageNet preprocessing for MnasNet.""" +import tensorflow as tf +import numpy as np + +IMAGE_SIZE = 224 +CROP_PADDING = 32 + + +def distorted_bounding_box_crop(image_bytes, + bbox, + min_object_covered=0.1, + aspect_ratio_range=(0.75, 1.33), + area_range=(0.05, 1.0), + max_attempts=100, + scope=None): + """Generates cropped_image using one of the bboxes randomly distorted. + + See `tf.image.sample_distorted_bounding_box` for more documentation. + + Args: + image_bytes: `Tensor` of binary image data. + bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]` + where each coordinate is [0, 1) and the coordinates are arranged + as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole + image. + min_object_covered: An optional `float`. Defaults to `0.1`. The cropped + area of the image must contain at least this fraction of any bounding + box supplied. + aspect_ratio_range: An optional list of `float`s. The cropped area of the + image must have an aspect ratio = width / height within this range. + area_range: An optional list of `float`s. The cropped area of the image + must contain a fraction of the supplied image within in this range. + max_attempts: An optional `int`. Number of attempts at generating a cropped + region of the image of the specified constraints. After `max_attempts` + failures, return the entire image. + scope: Optional `str` for name scope. + Returns: + cropped image `Tensor` + """ + with tf.name_scope(scope, 'distorted_bounding_box_crop', [image_bytes, bbox]): + shape = tf.image.extract_jpeg_shape(image_bytes) + sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( + shape, + bounding_boxes=bbox, + min_object_covered=min_object_covered, + aspect_ratio_range=aspect_ratio_range, + area_range=area_range, + max_attempts=max_attempts, + use_image_if_no_bounding_boxes=True) + bbox_begin, bbox_size, _ = sample_distorted_bounding_box + + # Crop the image to the specified bounding box. + offset_y, offset_x, _ = tf.unstack(bbox_begin) + target_height, target_width, _ = tf.unstack(bbox_size) + crop_window = tf.stack([offset_y, offset_x, target_height, target_width]) + image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) + + return image + + +def _at_least_x_are_equal(a, b, x): + """At least `x` of `a` and `b` `Tensors` are equal.""" + match = tf.equal(a, b) + match = tf.cast(match, tf.int32) + return tf.greater_equal(tf.reduce_sum(match), x) + + +def _decode_and_random_crop(image_bytes, image_size, resize_method): + """Make a random crop of image_size.""" + bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) + image = distorted_bounding_box_crop( + image_bytes, + bbox, + min_object_covered=0.1, + aspect_ratio_range=(3. / 4, 4. / 3.), + area_range=(0.08, 1.0), + max_attempts=10, + scope=None) + original_shape = tf.image.extract_jpeg_shape(image_bytes) + bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3) + + image = tf.cond( + bad, + lambda: _decode_and_center_crop(image_bytes, image_size), + lambda: tf.image.resize([image], [image_size, image_size], resize_method)[0]) + + return image + + +def _decode_and_center_crop(image_bytes, image_size, resize_method): + """Crops to center of image with padding then scales image_size.""" + shape = tf.image.extract_jpeg_shape(image_bytes) + image_height = shape[0] + image_width = shape[1] + + padded_center_crop_size = tf.cast( + ((image_size / (image_size + CROP_PADDING)) * + tf.cast(tf.minimum(image_height, image_width), tf.float32)), + tf.int32) + + offset_height = ((image_height - padded_center_crop_size) + 1) // 2 + offset_width = ((image_width - padded_center_crop_size) + 1) // 2 + crop_window = tf.stack([offset_height, offset_width, + padded_center_crop_size, padded_center_crop_size]) + image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) + image = tf.image.resize([image], [image_size, image_size], resize_method)[0] + + return image + + +def _flip(image): + """Random horizontal image flip.""" + image = tf.image.random_flip_left_right(image) + return image + + +def preprocess_for_train(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'): + """Preprocesses the given image for evaluation. + + Args: + image_bytes: `Tensor` representing an image binary of arbitrary size. + use_bfloat16: `bool` for whether to use bfloat16. + image_size: image size. + interpolation: image interpolation method + + Returns: + A preprocessed image `Tensor`. + """ + resize_method = tf.image.ResizeMethod.BICUBIC if interpolation == 'bicubic' else tf.image.ResizeMethod.BILINEAR + image = _decode_and_random_crop(image_bytes, image_size, resize_method) + image = _flip(image) + image = tf.reshape(image, [image_size, image_size, 3]) + image = tf.image.convert_image_dtype( + image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32) + return image + + +def preprocess_for_eval(image_bytes, use_bfloat16, image_size=IMAGE_SIZE, interpolation='bicubic'): + """Preprocesses the given image for evaluation. + + Args: + image_bytes: `Tensor` representing an image binary of arbitrary size. + use_bfloat16: `bool` for whether to use bfloat16. + image_size: image size. + interpolation: image interpolation method + + Returns: + A preprocessed image `Tensor`. + """ + resize_method = tf.image.ResizeMethod.BICUBIC if interpolation == 'bicubic' else tf.image.ResizeMethod.BILINEAR + image = _decode_and_center_crop(image_bytes, image_size, resize_method) + image = tf.reshape(image, [image_size, image_size, 3]) + image = tf.image.convert_image_dtype( + image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32) + return image + + +def preprocess_image(image_bytes, + is_training=False, + use_bfloat16=False, + image_size=IMAGE_SIZE, + interpolation='bicubic'): + """Preprocesses the given image. + + Args: + image_bytes: `Tensor` representing an image binary of arbitrary size. + is_training: `bool` for whether the preprocessing is for training. + use_bfloat16: `bool` for whether to use bfloat16. + image_size: image size. + interpolation: image interpolation method + + Returns: + A preprocessed image `Tensor` with value range of [0, 255]. + """ + if is_training: + return preprocess_for_train(image_bytes, use_bfloat16, image_size, interpolation) + else: + return preprocess_for_eval(image_bytes, use_bfloat16, image_size, interpolation) + + +class TfPreprocessTransform: + + def __init__(self, is_training=False, size=224, interpolation='bicubic'): + self.is_training = is_training + self.size = size[0] if isinstance(size, tuple) else size + self.interpolation = interpolation + self._image_bytes = None + self.process_image = self._build_tf_graph() + self.sess = None + + def _build_tf_graph(self): + with tf.device('/cpu:0'): + self._image_bytes = tf.placeholder( + shape=[], + dtype=tf.string, + ) + img = preprocess_image( + self._image_bytes, self.is_training, False, self.size, self.interpolation) + return img + + def __call__(self, image_bytes): + if self.sess is None: + self.sess = tf.Session() + img = self.sess.run(self.process_image, feed_dict={self._image_bytes: image_bytes}) + img = img.round().clip(0, 255).astype(np.uint8) + if img.ndim < 3: + img = np.expand_dims(img, axis=-1) + img = np.rollaxis(img, 2) # HWC to CHW + return img diff --git a/timm/data/transforms.py b/timm/data/transforms.py new file mode 100644 index 0000000..45c078f --- /dev/null +++ b/timm/data/transforms.py @@ -0,0 +1,185 @@ +import torch +import torchvision.transforms.functional as F +try: + from torchvision.transforms.functional import InterpolationMode + has_interpolation_mode = True +except ImportError: + has_interpolation_mode = False +from PIL import Image +import warnings +import math +import random +import numpy as np + + +class ToNumpy: + + def __call__(self, pil_img): + np_img = np.array(pil_img, dtype=np.uint8) + if np_img.ndim < 3: + np_img = np.expand_dims(np_img, axis=-1) + np_img = np.rollaxis(np_img, 2) # HWC to CHW + return np_img + + +class ToTensor: + + def __init__(self, dtype=torch.float32): + self.dtype = dtype + + def __call__(self, pil_img): + np_img = np.array(pil_img, dtype=np.uint8) + if np_img.ndim < 3: + np_img = np.expand_dims(np_img, axis=-1) + np_img = np.rollaxis(np_img, 2) # HWC to CHW + return torch.from_numpy(np_img).to(dtype=self.dtype) + + +_pil_interpolation_to_str = { + Image.NEAREST: 'nearest', + Image.BILINEAR: 'bilinear', + Image.BICUBIC: 'bicubic', + Image.BOX: 'box', + Image.HAMMING: 'hamming', + Image.LANCZOS: 'lanczos', +} +_str_to_pil_interpolation = {b: a for a, b in _pil_interpolation_to_str.items()} + + +if has_interpolation_mode: + _torch_interpolation_to_str = { + InterpolationMode.NEAREST: 'nearest', + InterpolationMode.BILINEAR: 'bilinear', + InterpolationMode.BICUBIC: 'bicubic', + InterpolationMode.BOX: 'box', + InterpolationMode.HAMMING: 'hamming', + InterpolationMode.LANCZOS: 'lanczos', + } + _str_to_torch_interpolation = {b: a for a, b in _torch_interpolation_to_str.items()} +else: + _pil_interpolation_to_torch = {} + _torch_interpolation_to_str = {} + + +def str_to_pil_interp(mode_str): + return _str_to_pil_interpolation[mode_str] + + +def str_to_interp_mode(mode_str): + if has_interpolation_mode: + return _str_to_torch_interpolation[mode_str] + else: + return _str_to_pil_interpolation[mode_str] + + +def interp_mode_to_str(mode): + if has_interpolation_mode: + return _torch_interpolation_to_str[mode] + else: + return _pil_interpolation_to_str[mode] + + +_RANDOM_INTERPOLATION = (str_to_interp_mode('bilinear'), str_to_interp_mode('bicubic')) + + +class RandomResizedCropAndInterpolation: + """Crop the given PIL Image to random size and aspect ratio with random interpolation. + + A crop of random size (default: of 0.08 to 1.0) of the original size and a random + aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop + is finally resized to given size. + This is popularly used to train the Inception networks. + + Args: + size: expected output size of each edge + scale: range of size of the origin size cropped + ratio: range of aspect ratio of the origin aspect ratio cropped + interpolation: Default: PIL.Image.BILINEAR + """ + + def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), + interpolation='bilinear'): + if isinstance(size, (list, tuple)): + self.size = tuple(size) + else: + self.size = (size, size) + if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): + warnings.warn("range should be of kind (min, max)") + + if interpolation == 'random': + self.interpolation = _RANDOM_INTERPOLATION + else: + self.interpolation = str_to_interp_mode(interpolation) + self.scale = scale + self.ratio = ratio + + @staticmethod + def get_params(img, scale, ratio): + """Get parameters for ``crop`` for a random sized crop. + + Args: + img (PIL Image): Image to be cropped. + scale (tuple): range of size of the origin size cropped + ratio (tuple): range of aspect ratio of the origin aspect ratio cropped + + Returns: + tuple: params (i, j, h, w) to be passed to ``crop`` for a random + sized crop. + """ + area = img.size[0] * img.size[1] + + for attempt in range(10): + target_area = random.uniform(*scale) * area + log_ratio = (math.log(ratio[0]), math.log(ratio[1])) + aspect_ratio = math.exp(random.uniform(*log_ratio)) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if w <= img.size[0] and h <= img.size[1]: + i = random.randint(0, img.size[1] - h) + j = random.randint(0, img.size[0] - w) + return i, j, h, w + + # Fallback to central crop + in_ratio = img.size[0] / img.size[1] + if in_ratio < min(ratio): + w = img.size[0] + h = int(round(w / min(ratio))) + elif in_ratio > max(ratio): + h = img.size[1] + w = int(round(h * max(ratio))) + else: # whole image + w = img.size[0] + h = img.size[1] + i = (img.size[1] - h) // 2 + j = (img.size[0] - w) // 2 + return i, j, h, w + + def __call__(self, img): + """ + Args: + img (PIL Image): Image to be cropped and resized. + + Returns: + PIL Image: Randomly cropped and resized image. + """ + i, j, h, w = self.get_params(img, self.scale, self.ratio) + if isinstance(self.interpolation, (tuple, list)): + interpolation = random.choice(self.interpolation) + else: + interpolation = self.interpolation + return F.resized_crop(img, i, j, h, w, self.size, interpolation) + + def __repr__(self): + if isinstance(self.interpolation, (tuple, list)): + interpolate_str = ' '.join([interp_mode_to_str(x) for x in self.interpolation]) + else: + interpolate_str = interp_mode_to_str(self.interpolation) + format_string = self.__class__.__name__ + '(size={0}'.format(self.size) + format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale)) + format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio)) + format_string += ', interpolation={0})'.format(interpolate_str) + return format_string + + diff --git a/timm/data/transforms_factory.py b/timm/data/transforms_factory.py new file mode 100644 index 0000000..d4815d9 --- /dev/null +++ b/timm/data/transforms_factory.py @@ -0,0 +1,236 @@ +""" Transforms Factory +Factory methods for building image transforms for use with TIMM (PyTorch Image Models) + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math + +import torch +from torchvision import transforms + +from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, DEFAULT_CROP_PCT +from timm.data.auto_augment import rand_augment_transform, augment_and_mix_transform, auto_augment_transform +from timm.data.transforms import str_to_interp_mode, str_to_pil_interp, RandomResizedCropAndInterpolation, ToNumpy +from timm.data.random_erasing import RandomErasing + + +def transforms_noaug_train( + img_size=224, + interpolation='bilinear', + use_prefetcher=False, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, +): + if interpolation == 'random': + # random interpolation not supported with no-aug + interpolation = 'bilinear' + tfl = [ + transforms.Resize(img_size, interpolation=str_to_interp_mode(interpolation)), + transforms.CenterCrop(img_size) + ] + if use_prefetcher: + # prefetcher and collate will handle tensor conversion and norm + tfl += [ToNumpy()] + else: + tfl += [ + transforms.ToTensor(), + transforms.Normalize( + mean=torch.tensor(mean), + std=torch.tensor(std)) + ] + return transforms.Compose(tfl) + + +def transforms_imagenet_train( + img_size=224, + scale=None, + ratio=None, + hflip=0.5, + vflip=0., + color_jitter=0.4, + auto_augment=None, + interpolation='random', + use_prefetcher=False, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + re_prob=0., + re_mode='const', + re_count=1, + re_num_splits=0, + separate=False, +): + """ + If separate==True, the transforms are returned as a tuple of 3 separate transforms + for use in a mixing dataset that passes + * all data through the first (primary) transform, called the 'clean' data + * a portion of the data through the secondary transform + * normalizes and converts the branches above with the third, final transform + """ + scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range + ratio = tuple(ratio or (3./4., 4./3.)) # default imagenet ratio range + primary_tfl = [ + RandomResizedCropAndInterpolation(img_size, scale=scale, ratio=ratio, interpolation=interpolation)] + if hflip > 0.: + primary_tfl += [transforms.RandomHorizontalFlip(p=hflip)] + if vflip > 0.: + primary_tfl += [transforms.RandomVerticalFlip(p=vflip)] + + secondary_tfl = [] + if auto_augment: + assert isinstance(auto_augment, str) + if isinstance(img_size, (tuple, list)): + img_size_min = min(img_size) + else: + img_size_min = img_size + aa_params = dict( + translate_const=int(img_size_min * 0.45), + img_mean=tuple([min(255, round(255 * x)) for x in mean]), + ) + if interpolation and interpolation != 'random': + aa_params['interpolation'] = str_to_pil_interp(interpolation) + if auto_augment.startswith('rand'): + secondary_tfl += [rand_augment_transform(auto_augment, aa_params)] + elif auto_augment.startswith('augmix'): + aa_params['translate_pct'] = 0.3 + secondary_tfl += [augment_and_mix_transform(auto_augment, aa_params)] + else: + secondary_tfl += [auto_augment_transform(auto_augment, aa_params)] + elif color_jitter is not None: + # color jitter is enabled when not using AA + if isinstance(color_jitter, (list, tuple)): + # color jitter should be a 3-tuple/list if spec brightness/contrast/saturation + # or 4 if also augmenting hue + assert len(color_jitter) in (3, 4) + else: + # if it's a scalar, duplicate for brightness, contrast, and saturation, no hue + color_jitter = (float(color_jitter),) * 3 + secondary_tfl += [transforms.ColorJitter(*color_jitter)] + + final_tfl = [] + if use_prefetcher: + # prefetcher and collate will handle tensor conversion and norm + final_tfl += [ToNumpy()] + else: + final_tfl += [ + transforms.ToTensor(), + transforms.Normalize( + mean=torch.tensor(mean), + std=torch.tensor(std)) + ] + if re_prob > 0.: + final_tfl.append( + RandomErasing(re_prob, mode=re_mode, max_count=re_count, num_splits=re_num_splits, device='cpu')) + + if separate: + return transforms.Compose(primary_tfl), transforms.Compose(secondary_tfl), transforms.Compose(final_tfl) + else: + return transforms.Compose(primary_tfl + secondary_tfl + final_tfl) + + +def transforms_imagenet_eval( + img_size=224, + crop_pct=None, + interpolation='bilinear', + use_prefetcher=False, + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD): + crop_pct = crop_pct or DEFAULT_CROP_PCT + + if isinstance(img_size, (tuple, list)): + assert len(img_size) == 2 + if img_size[-1] == img_size[-2]: + # fall-back to older behaviour so Resize scales to shortest edge if target is square + scale_size = int(math.floor(img_size[0] / crop_pct)) + else: + scale_size = tuple([int(x / crop_pct) for x in img_size]) + else: + scale_size = int(math.floor(img_size / crop_pct)) + + tfl = [ + transforms.Resize(scale_size, interpolation=str_to_interp_mode(interpolation)), + transforms.CenterCrop(img_size), + ] + if use_prefetcher: + # prefetcher and collate will handle tensor conversion and norm + tfl += [ToNumpy()] + else: + tfl += [ + transforms.ToTensor(), + transforms.Normalize( + mean=torch.tensor(mean), + std=torch.tensor(std)) + ] + + return transforms.Compose(tfl) + + +def create_transform( + input_size, + is_training=False, + use_prefetcher=False, + no_aug=False, + scale=None, + ratio=None, + hflip=0.5, + vflip=0., + color_jitter=0.4, + auto_augment=None, + interpolation='bilinear', + mean=IMAGENET_DEFAULT_MEAN, + std=IMAGENET_DEFAULT_STD, + re_prob=0., + re_mode='const', + re_count=1, + re_num_splits=0, + crop_pct=None, + tf_preprocessing=False, + separate=False): + + if isinstance(input_size, (tuple, list)): + img_size = input_size[-2:] + else: + img_size = input_size + + if tf_preprocessing and use_prefetcher: + assert not separate, "Separate transforms not supported for TF preprocessing" + from timm.data.tf_preprocessing import TfPreprocessTransform + transform = TfPreprocessTransform( + is_training=is_training, size=img_size, interpolation=interpolation) + else: + if is_training and no_aug: + assert not separate, "Cannot perform split augmentation with no_aug" + transform = transforms_noaug_train( + img_size, + interpolation=interpolation, + use_prefetcher=use_prefetcher, + mean=mean, + std=std) + elif is_training: + transform = transforms_imagenet_train( + img_size, + scale=scale, + ratio=ratio, + hflip=hflip, + vflip=vflip, + color_jitter=color_jitter, + auto_augment=auto_augment, + interpolation=interpolation, + use_prefetcher=use_prefetcher, + mean=mean, + std=std, + re_prob=re_prob, + re_mode=re_mode, + re_count=re_count, + re_num_splits=re_num_splits, + separate=separate) + else: + assert not separate, "Separate transforms not supported for validation preprocessing" + transform = transforms_imagenet_eval( + img_size, + interpolation=interpolation, + use_prefetcher=use_prefetcher, + mean=mean, + std=std, + crop_pct=crop_pct) + + return transform diff --git a/timm/loss/__init__.py b/timm/loss/__init__.py new file mode 100644 index 0000000..ea7f15f --- /dev/null +++ b/timm/loss/__init__.py @@ -0,0 +1,4 @@ +from .asymmetric_loss import AsymmetricLossMultiLabel, AsymmetricLossSingleLabel +from .binary_cross_entropy import BinaryCrossEntropy +from .cross_entropy import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy +from .jsd import JsdCrossEntropy diff --git a/timm/loss/asymmetric_loss.py b/timm/loss/asymmetric_loss.py new file mode 100644 index 0000000..a8b10f9 --- /dev/null +++ b/timm/loss/asymmetric_loss.py @@ -0,0 +1,97 @@ +import torch +import torch.nn as nn + + +class AsymmetricLossMultiLabel(nn.Module): + def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-8, disable_torch_grad_focal_loss=False): + super(AsymmetricLossMultiLabel, self).__init__() + + self.gamma_neg = gamma_neg + self.gamma_pos = gamma_pos + self.clip = clip + self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss + self.eps = eps + + def forward(self, x, y): + """" + Parameters + ---------- + x: input logits + y: targets (multi-label binarized vector) + """ + + # Calculating Probabilities + x_sigmoid = torch.sigmoid(x) + xs_pos = x_sigmoid + xs_neg = 1 - x_sigmoid + + # Asymmetric Clipping + if self.clip is not None and self.clip > 0: + xs_neg = (xs_neg + self.clip).clamp(max=1) + + # Basic CE calculation + los_pos = y * torch.log(xs_pos.clamp(min=self.eps)) + los_neg = (1 - y) * torch.log(xs_neg.clamp(min=self.eps)) + loss = los_pos + los_neg + + # Asymmetric Focusing + if self.gamma_neg > 0 or self.gamma_pos > 0: + if self.disable_torch_grad_focal_loss: + torch._C.set_grad_enabled(False) + pt0 = xs_pos * y + pt1 = xs_neg * (1 - y) # pt = p if t > 0 else 1-p + pt = pt0 + pt1 + one_sided_gamma = self.gamma_pos * y + self.gamma_neg * (1 - y) + one_sided_w = torch.pow(1 - pt, one_sided_gamma) + if self.disable_torch_grad_focal_loss: + torch._C.set_grad_enabled(True) + loss *= one_sided_w + + return -loss.sum() + + +class AsymmetricLossSingleLabel(nn.Module): + def __init__(self, gamma_pos=1, gamma_neg=4, eps: float = 0.1, reduction='mean'): + super(AsymmetricLossSingleLabel, self).__init__() + + self.eps = eps + self.logsoftmax = nn.LogSoftmax(dim=-1) + self.targets_classes = [] # prevent gpu repeated memory allocation + self.gamma_pos = gamma_pos + self.gamma_neg = gamma_neg + self.reduction = reduction + + def forward(self, inputs, target, reduction=None): + """" + Parameters + ---------- + x: input logits + y: targets (1-hot vector) + """ + + num_classes = inputs.size()[-1] + log_preds = self.logsoftmax(inputs) + self.targets_classes = torch.zeros_like(inputs).scatter_(1, target.long().unsqueeze(1), 1) + + # ASL weights + targets = self.targets_classes + anti_targets = 1 - targets + xs_pos = torch.exp(log_preds) + xs_neg = 1 - xs_pos + xs_pos = xs_pos * targets + xs_neg = xs_neg * anti_targets + asymmetric_w = torch.pow(1 - xs_pos - xs_neg, + self.gamma_pos * targets + self.gamma_neg * anti_targets) + log_preds = log_preds * asymmetric_w + + if self.eps > 0: # label smoothing + self.targets_classes.mul_(1 - self.eps).add_(self.eps / num_classes) + + # loss calculation + loss = - self.targets_classes.mul(log_preds) + + loss = loss.sum(dim=-1) + if self.reduction == 'mean': + loss = loss.mean() + + return loss diff --git a/timm/loss/binary_cross_entropy.py b/timm/loss/binary_cross_entropy.py new file mode 100644 index 0000000..ed76c1e --- /dev/null +++ b/timm/loss/binary_cross_entropy.py @@ -0,0 +1,47 @@ +""" Binary Cross Entropy w/ a few extras + +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class BinaryCrossEntropy(nn.Module): + """ BCE with optional one-hot from dense targets, label smoothing, thresholding + NOTE for experiments comparing CE to BCE /w label smoothing, may remove + """ + def __init__( + self, smoothing=0.1, target_threshold: Optional[float] = None, weight: Optional[torch.Tensor] = None, + reduction: str = 'mean', pos_weight: Optional[torch.Tensor] = None): + super(BinaryCrossEntropy, self).__init__() + assert 0. <= smoothing < 1.0 + self.smoothing = smoothing + self.target_threshold = target_threshold + self.reduction = reduction + self.register_buffer('weight', weight) + self.register_buffer('pos_weight', pos_weight) + + def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: + assert x.shape[0] == target.shape[0] + if target.shape != x.shape: + # NOTE currently assume smoothing or other label softening is applied upstream if targets are already sparse + num_classes = x.shape[-1] + # FIXME should off/on be different for smoothing w/ BCE? Other impl out there differ + off_value = self.smoothing / num_classes + on_value = 1. - self.smoothing + off_value + target = target.long().view(-1, 1) + target = torch.full( + (target.size()[0], num_classes), + off_value, + device=x.device, dtype=x.dtype).scatter_(1, target, on_value) + if self.target_threshold is not None: + # Make target 0, or 1 if threshold set + target = target.gt(self.target_threshold).to(dtype=target.dtype) + return F.binary_cross_entropy_with_logits( + x, target, + self.weight, + pos_weight=self.pos_weight, + reduction=self.reduction) diff --git a/timm/loss/cross_entropy.py b/timm/loss/cross_entropy.py new file mode 100644 index 0000000..8519810 --- /dev/null +++ b/timm/loss/cross_entropy.py @@ -0,0 +1,36 @@ +""" Cross Entropy w/ smoothing or soft targets + +Hacked together by / Copyright 2021 Ross Wightman +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class LabelSmoothingCrossEntropy(nn.Module): + """ NLL loss with label smoothing. + """ + def __init__(self, smoothing=0.1): + super(LabelSmoothingCrossEntropy, self).__init__() + assert smoothing < 1.0 + self.smoothing = smoothing + self.confidence = 1. - smoothing + + def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: + logprobs = F.log_softmax(x, dim=-1) + nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1)) + nll_loss = nll_loss.squeeze(1) + smooth_loss = -logprobs.mean(dim=-1) + loss = self.confidence * nll_loss + self.smoothing * smooth_loss + return loss.mean() + + +class SoftTargetCrossEntropy(nn.Module): + + def __init__(self): + super(SoftTargetCrossEntropy, self).__init__() + + def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: + loss = torch.sum(-target * F.log_softmax(x, dim=-1), dim=-1) + return loss.mean() diff --git a/timm/loss/jsd.py b/timm/loss/jsd.py new file mode 100644 index 0000000..dd64e15 --- /dev/null +++ b/timm/loss/jsd.py @@ -0,0 +1,39 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .cross_entropy import LabelSmoothingCrossEntropy + + +class JsdCrossEntropy(nn.Module): + """ Jensen-Shannon Divergence + Cross-Entropy Loss + + Based on impl here: https://github.com/google-research/augmix/blob/master/imagenet.py + From paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - + https://arxiv.org/abs/1912.02781 + + Hacked together by / Copyright 2020 Ross Wightman + """ + def __init__(self, num_splits=3, alpha=12, smoothing=0.1): + super().__init__() + self.num_splits = num_splits + self.alpha = alpha + if smoothing is not None and smoothing > 0: + self.cross_entropy_loss = LabelSmoothingCrossEntropy(smoothing) + else: + self.cross_entropy_loss = torch.nn.CrossEntropyLoss() + + def __call__(self, output, target): + split_size = output.shape[0] // self.num_splits + assert split_size * self.num_splits == output.shape[0] + logits_split = torch.split(output, split_size) + + # Cross-entropy is only computed on clean images + loss = self.cross_entropy_loss(logits_split[0], target[:split_size]) + probs = [F.softmax(logits, dim=1) for logits in logits_split] + + # Clamp mixture distribution to avoid exploding KL divergence + logp_mixture = torch.clamp(torch.stack(probs).mean(axis=0), 1e-7, 1).log() + loss += self.alpha * sum([F.kl_div( + logp_mixture, p_split, reduction='batchmean') for p_split in probs]) / len(probs) + return loss diff --git a/timm/models/__init__.py b/timm/models/__init__.py new file mode 100644 index 0000000..0982b6e --- /dev/null +++ b/timm/models/__init__.py @@ -0,0 +1,58 @@ +from .beit import * +from .byoanet import * +from .byobnet import * +from .cait import * +from .coat import * +from .convit import * +from .convmixer import * +from .crossvit import * +from .cspnet import * +from .densenet import * +from .dla import * +from .dpn import * +from .efficientnet import * +from .ghostnet import * +from .gluon_resnet import * +from .gluon_xception import * +from .hardcorenas import * +from .hrnet import * +from .inception_resnet_v2 import * +from .inception_v3 import * +from .inception_v4 import * +from .levit import * +from .mlp_mixer import * +from .mobilenetv3 import * +from .nasnet import * +from .nest import * +from .nfnet import * +from .pit import * +from .pnasnet import * +from .regnet import * +from .res2net import * +from .resnest import * +from .resnet import * +from .resnetv2 import * +from .rexnet import * +from .selecsls import * +from .senet import * +from .sknet import * +from .swin_transformer import * +from .tnt import * +from .tresnet import * +from .twins import * +from .vgg import * +from .visformer import * +from .vision_transformer import * +from .vision_transformer_hybrid import * +from .vovnet import * +from .xception import * +from .xception_aligned import * +from .xcit import * + +from .factory import create_model, split_model_name, safe_model_name +from .helpers import load_checkpoint, resume_checkpoint, model_parameters +from .layers import TestTimePoolHead, apply_test_time_pool +from .layers import convert_splitbn_model +from .layers import is_scriptable, is_exportable, set_scriptable, set_exportable, is_no_jit, set_no_jit +from .registry import register_model, model_entrypoint, list_models, is_model, list_modules, is_model_in_modules,\ + has_model_default_key, is_model_default_key, get_model_default_value, is_model_pretrained diff --git a/timm/models/__pycache__/__init__.cpython-36.pyc b/timm/models/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db8276a567bcbfbe26db4bc14cbde8a1596584ce GIT binary patch literal 1929 zcmcJQ-*VGN6vnOipX@k^lQ@JxTPV;L(-u)D6euAiq#=Po+L>m0u{WBvw6;ZdCC%>I zjr&G@C0%u`1_sx&hF~?NULlwEj7K<8~s0PrP5!exz7T``|ve) zAqq>OBTMH69pVD9Oso*A#2T?qbcqdOlekE15toR|#1&$jxJq0jt`j@N4dNBzCh;n9 zi+GKAo%jjyQ{rdD8^oK$&xyB)w~2R%Ul6|}enq@X>=N%0zb1Y|{FZp1=n)?fzstEG zdup3)s~xtZ9Avwd~I4%7?wLLIWhJYSbTsv~x!Ub2_!m>sJVcA{RfSL!u;txnmgdc)qRx9qJt zV`nhm{iW1x{O!Oq({(cEUbmdJ0xh@|zDhzNv$jcPWQ&0xb0xAB!v~bj4Op@w6CV1( zNCX#Y62&%K)xwOGn77*betya|Y+{8r-G!`uE{r{ol=wMGw~+8t z$UtQqK@v}dwtiktKQ0RD*0QC@_$JU%YI$FZd?+r{L>I$lBVfsgbUvQTH(+&~`0pZ{ zHF0VYcbnO=7Q@IunX@(&)r;8bY4H;mCFF~&$z0f{=vQlIkEJl#`poyEyyV>67;&2I zhho6T()t%-nswgKop(2fP*!pAnLXihEatXOwXm8;u?Ww)Ke0+*M3&Y2(}c&O2>KXQ z0uFv9NVsJ+>?VvFNEKZQ&D=mI#-LdXObQD!Hx#iExU?Kf4wA;KB?f~ih@cJVyTegp z@~AbG<0SUsuA{$Pj4lI_+9-+H;)v@oNVJH#VbzhwIqOkOXDY|tivo{NHdIp(%v0HG` zW8hrk6xRqT0wbaBq8C+jfqvWk26q(N9-Bd;6*RDHW7yi#PE`wela(jKA!|&c*`zfH zp(7tH-1@KQkEVScg{(G7CV3U_TiKpPK9>>vnvhj419*Dt10L8!PjwkivaX<1L0Ekx zWD18(!+(mBuA{ghSxxdO+6{D?D2pg9lqHm9logaV$|^_(?ZHCmYxW^lgk${yFhNZh6o9{oT|H;4Ndkqr<~rdk=a&&x|#^TZA1Yq4Dr? zNql^8=E18#dHE?Yo)2dU-bnuabeg%lV8Wr=!xxx|L0sp5|6Axd9jEQIoSV+S%-A+) literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/beit.cpython-36.pyc b/timm/models/__pycache__/beit.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..325294a313c9e6b7a5a00b198fb387d70102b932 GIT binary patch literal 13767 zcmd5@Ym6J$b)FZ=A(zWttyWL3WZIS;(~)REztaG5fp8| z@63?<&^oRV7bxYqd+(Y1JolXMo^$T_f${Oo+`DdH{J=X6<9o)IUk3Tpc>GVAhM|n2 zp-g2p&7z6FcGGIvMcX9JX*#V$F(G-YnJgxyUaH|1UCE^zZX;71Qwf!9W?SRManwwx zR5RC_C{DB{i<7NA#XYU5;*`+c=HAwHaa!`}=Dybc;(p{GR%0rA)hr(H4ybXJ+c5EW zLOr6U)ZPsn`DwLJ?ccDFI}pD;s2&}?J*3_?%HI~%R5RC8#e>fn>WDge)lf%0w{d7- z7Y~DSyUK%-k14l-a)-JTlsjX}3@CT0W1t+1DM!>}>K^qL%MhPzI<2`@y`npQry9)l+Vy5@F3_Gg*DCvgr{@;EdXULH8RUbdx}WcM z>g^!kX*aK+?B!1`=d1O$cYn}pdn#XPmVLjD;mSe1)6QS=>b0f7&zIXO-;xPcI_+w` z=I58nD_(xl^V<2cQl2`V@jWl!(e+xrJ(_5~yx2i4tUBKXQ-UE=q^GFUuI^Mk)zjXr ztavoCpI^L^pUa=^bg!V#r650lV*X_Qg^us%UuL;gZXe$qt}?8KW?r*%DRYC`vrsU@ zRJW`Hunt-G-CeG!+`NGeGeDSL?A4p9B(o`90xL_EYOUaene*kKvh;Lo(Nm%Qe6t(6 zPw7q<#HDaN(7kr0)W$xRntlWM(-g7XLj9j?!?=ORxba;iwQqlmzkfLurWWg! z9-a%|2N}Qbo?4yka`q*U)1L)<8=zc1`V*@5Xd}>aWgYx{`YU9dW1rq7c zJ$?4SOug%#-M$rZMWyAE7LEyCs-8WqHaxR8C;CqE- z{s<);iU{dn$3_Ikz-X8Q!!-I<-zZsqvu_>mha*iQOzt9HUbdszwB@d!5KaigExRx3TKg*m=%t+=o4oOqk`hxwY1P7fz&&LhH1Nj7Kdx{{O~q8!cSlsL z1a))La!if&CkB>j#5;<$VP_^KJ%s%^+;?PGZbLrNPxs9MWp75_B;?HzWk94S7GjwG z?kPd;bFdnm#d_JtaaZDBlrvq}i8L(Q!3|mrn&yLckQA)YkpnxSf-7AwJfJcQp=lNV*y;l#)mTI^M;sFqxj7y38`4acIR zGqN!Dak$aM6&6}jGy{3r$GMa(FP@!K^I6Aa3g<(vi)p4 z7aO=Z4P4Cj$3WZ1*&N__Se1-61v-!%l(EKsSoo<8AT)YD(8t)M_A`^jCY(6~D_(C7 zBajydfh4Bn4#hcBP^4go2{FaPWaOvmw}S7HwN0eZvrMS*Hv~9L{caNGN%${dg5-8f z3WuR%OJyk2JJ{guNJ1MPp(wIG#iDsGOg04^^xIhHr%6m{Xe66aWArm5C422fz>$8K zg)}_CwhyWC^cmI=eRNjp-X&2bjkpIY_1gZ$p69_ONztEel`m^ix#!q~dRG|N3#@5g z);}eG&xGbeXr9&QS%cFF&5NPA9GWYkL+uRrj}&Sfu57v$nyLgG>1g;H>cc4b_aQN$ zAjhC4cbbz{22a+aGR;6`awxl27Akg_ie|3v*;JFbmqJn((8W2;o+Md-N4ii*6L#A` zQowGeM8C1uWIkIeUF?;c(Hl`~O}&cPpS<9;{f^cbK?%oW9H@9em>3J_9Kpr-NwyGN zxEHyfz~fUPUCS_Y4{6dWct$@)miY6@+>w2T=n@2x{~XV5@JT@7oa$D)EvYhI3Q-px zbF|xt6a3L9Oyl7?{Xa&s#cs<`)^UDXflceJ z@4#A0P`iK%DbO9*a@Mk=9}g1!6!1f^W55&Hg~b!KO$W&J?P=OW;{p8X{y5I%gt9k) zJEd&j)n5eSjQ8Cj2dfHgpz-j+H+XJOzV4en?W{_^B6U!2kDTeL1#Gv7y$IpOBmQfV3MN05Z>v*t6Vxc%*+>ZqInUH$g!A4yOL&fAkw~7b|I_(0K3IPVP341n?29h zS4hcly z=(e0x0uiGFih{=Vp2g#T0tw(1M)0z$pORyTaLZ!`A_w8KAZjq20o&&2tsGc<%YS^+ z-fYRGxm1TH>dP%kkd z5bl68!HTbf9>qAot)t-3F*9$jjz?S?0u?|ht2KM@nFLHkcARIPe(t?t`Z>zt0`_eO zD(N0;5|Z=^6WOktfanrly@to9OuCi}Or4t2q*m~Ze%u=V{9{OlP$Z5b;OMFgM-gyz z)hIdwF$u6M5l+7l8%pP6)AtOl#nEOHSw>#rH*gMaK^H+5g>6+t`G*$4Ia+a zU~PI~)3OnlNK77hICI!=G55u?5X)wp-?IfVBkXyS4JhfG(5D|~QOpy9gno+2(@f4W zd4|b5nVewqER!dY6w;A0o+d%gfft#(z~sG5USjexlaokx2e?1O8~-lXj-uqC?-DOaFC{sCO5W8X}LbxX_CY&h6u zfFvX{F`#FgT!{9AHfOX)G&ZA+NN(DE+9UNelMz3$QG#Y&Qb#}%fj<%E@9_wN7_kku zIae;E^a)&i@xsh3PDa;c#XZsGSaC}5aW8h`Sm>NV>~Qtj{7ZEoae=LOP*V7F^)7mscLqP@(B0yvv?%2S@+iZVAH#gt( zrx{Kgy&Z1@bWv*@HFE3Nz74jS=RFngsf-p^phNAMgSm?r8& zXd>71XM)?M1aaH|gh~k^z^H^bd4@@YJCK`UzXJ;+{GrsEY23M`9!ib78pry3F(x8d0l_f4Q2x`{eF4sGphcM= zkopvs8sO-CSpgn|cRyuabY6_00)063z-u8iX3d_645IB2^25yIO$3>6QQCb1_v40d zp}(ZDdIuF`gqO3A!(%Bo{U?wa))xPH+nngM5Cr^3hjTiyW(JU|_0)P|-CeiVjdg(I zAp^VTtY`Fba27zzU4+bLqIuKS%WT@w`XlITHw#){r8)!BgwU- zEDDaoE+=xBt~3z`>?|Y7hnq@2y5H1va6^}6`ZfaA`m^km_HgJTLg9DI6;J;fi@Z{Y ztNk3dHcUjJ#3XL^D$BlJ<9mm$34#F8{jTQr>OFjsD0jQfD{#BTn~CqlHO=K(-6AALjhox8ds&p-<^3({cfhI|?}Op4N&Hv; zI>w64l){*dtbdhrkcqsp?8w-gzS}>esZT@6NYJ#&;>L9n0hIml+h!1L%;7I$l-5DK zbIha8Bx9Cl9sw4{AaigUCgmSIm#qpr(n}uK2|Q=<_>^B7%va%^T|N_tp+m%d--EAM zYt|aW33dY^$OgmoK>Y&-O-rHTB-u{AIQ?>DbUF(aVg`Tw%K-(L4kANqDxfU|wWmWz zfD}NqhLL{bG=g=Gs;`tF33R|6|Dq0nR|$NdP)5{P-2GBr?J#{Lq{eXkk)fs;=IY(T zgB^mmiCqX1utkKfF7Ot5=ZVJu2Id;MlMMHWc9_Fq82Q9QgVDqs_%wyTlL)LwqP!JTM)Hg%B4)y? z3<^UjhFJ%1-3sld*Vf!msHoV@+t0f&SM!1r0av!qzn^mu`88Mf%DCI#?>;a$ms_&L zuTbm-{dtu2YfL!W>x?|n?vx^kEGe&2zH(wTv=s};jtbkx=E7ysJQ}E1(Sl6@;@25b z*1Gw@$%CT-x2|>by{54_3KG~T@R339iHTVY zB0Y%w33|)1B|;|@#GlB@X=IOh*SixumTy>0UI#q-pUW}kM6}g%a30`X8k3_&I zKASfhxG4Ax2@)504Y@GsUGCyrHVJa-65oBE$rqSN2Lh45$>JB85Z#Ba{SxwFW>LPb zqD@;&`QF76^e{9_MSSoSnRJ0eHxPS<5+Tz$h9T`Gz=N2DD*Yx>w)+Eq@!A)M> z0VQzDkU?N`(uy~KXY0}CQ@8nu7!S{+(RSfnio_s>5B*J4jbt-+ySca>a}{U#Jf}xp zcn)y)uWA&77@abpSb#PO03;8qUI51RqvfqjT%6$%vKAi9oPpRqoP zs9zuMNWobsjt_0z62Gxro!wTv?;-s(2J7NYX_;1WBUCr_i#EB#qu`9ER*xZvU}nGAWCo9B1GZ}hYC9G6yNwA{4`Bk2g)K~gO+xgI!bH~*NFK2v zi;qlpvSECo&X4=vh!<(S=~MhK7;-13G-L?#uwwq2B<|!^e(1j;bvHJ!Rck975ZNai zNZ-bW|8KHz7grR^0`s@NbhcXAy7X8ANZ+>f*OLHztuGRQ@9&knO#*(TB0#fl5Z*-* zE}X0VVCj47U-^%Jd;*7*_>#NrJN==Z+Hj1tm;=n;ItSS5gIhTeZ$9bUIKazZ(E(mB zZqos=K(Ky%G^WzmVIkat7C4yy-=f2t7?F7cE!d44ceA6I8_eH2H`r=@D>rhHDvC4N1SH4n06 nXR`c104)5YQG>kv9|4=?h?-4Iy)>1dddpNNle5gMyZiG$`ACCh literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/byoanet.cpython-36.pyc b/timm/models/__pycache__/byoanet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58e99260bc300867bb4ebcf56c9ae9724f6b6df2 GIT binary patch literal 11525 zcmeHNX>=Rcb)Laa5L`^t@@|dYEQTZ!Bp_~*V~Lb0$+k>85*^2Ao5=w80UUFHf!_>7 zkzvv_w$mkHmS#_!wAte%b<;IzlcwpGrOn=En?FrYek3?KCnvx2EBC%P00tn$fIeYQ zeiRfg-o5kQyZ5{IzIBuv8cJsFKU{eBoTB_l34CtF@@f1!7ZQpBN?rjKLY7(yHBd?BlcF561}mw2N>$2Aeh6L*2^e@%%@4C- zNWvie0sJA1>_m_oMedK_kKq~^+le9{NB(v2djILQ$nC%zd@c@egg3#PVO+Gl4*mq* z0&n%XL-00uJG=w?xnA^h1H2R71@CV0H^O`1X?SmozX{$4e+qww{4JvXt?+*M0DKT` z6MY{>y&r-P<5Ovs1|NZs!X5B2cm_U>(&z6a(AF1Vtxv!wed}I`+@Hg<#KE0UkK{+# zD2uTuq~Qn*q1RM63deSuk_nVHB?j`B-{lr@-^MPGvI4Vb>K+A zU%@H->(uktaJmCY0{#YVX(B;vEhHyfSQgQnO}~(*mUelnDM`TJqP2v9B%Pk}>$LhG zL=66RYw8@k8M=R15S$LBxo^41rw>aDhpZeQ(%;h*<~VYLgx5qKEe zFQHA%IqWpNBp^w^zu>b+;HAE-J-lJY=kR!NT;}-)OyLhY{M%BcPnz=fmgxr3GO!Jz6-C0-xu6r5fhPo~ zA@_ZF5}p#=?XY@DN&mojtEwp45;v>miAU`^pLlSiIL}Ra;^1hRxTL$wjSX zu`6c5Vp>(ow2h1ghHDfp!*R5N;V{sOcC}=dwUW&>2cN8CuiDCrR#tztDsC{S89x#R5UE@ zvcXNG>N=WTI+4`0BifSfx)!Ul;<~oVjjDt8RIpF+?1E7#fT3Mv4q}s{^M+*$CV9rL zIwml#8J2~;Y3>@+DmF07(SViAs!2|9v^B<=W;mTzJaX?tno$MKGS?Z}jOG^CjB1%7 zlJn>afuPK=N3In%wG8^Iw#m)%nyX#3(SeUpv0_w@Y1ygl^klL@`@eL^yYAix7SAoO zEU)VKET3I`_`<6G!1CgXH@-`JY4x54#!^W~yuqbSdx?C0rc@Tp1B8K?I6f&LR__|K z>N0LI<1*7_c+#pjUZ|TE=mN36VY+L2u~bI+FlS}c!TR!%rxZyMeM0yh#_#?3b?!j2 zO?%Tp9A8n0xr(deyn?$-T(^@?r?%C%hcM@cSInP;6{Ue+rSU@|aU|uE`26gY7cZE_ zI)1lCP-SHjbL#T6pj+`ZX24{rcs8B#!gX$WgVlOP7YWE5FKJe5bys)HE#?i>Y}=B> zM3LJyy;gL+A%VGOTLy*Ei&U6V^}>z|Ub19z$JI%dmlXY(C6jS)v}9gkpa(i0u9(#T zpAPdB8qbHCBpu@94?%i8B!_WAjul7o$BV6R7`*H}qj1{g|KePR##A=5_1I>nh*5Jg z%V*BK1XCiLaq67u70e(ToD4<8uAW<5$+%{vl95T8DQwyX4$sNjrWc{uU5{NlWS|mC zs1fm(Q2Alxx12TCtvM$$nKH(?UO)#|GJL~N!c1+`wRv%E!mJo&HsSZq8Rmh0v4p9KGmUXUs3mmuAVjrng#-;y05$^@H)&8cvFSB+*yGnB3HSE|;Az zk}+HE!KihZuUJHh5@>#oP0i#=3&I6{TsmDKf^f${skX!#sdMHu$Fhh&n=xjJ^D}c4iN>oj_nO0m@~}OAWjMAE z#w#7BHG?9oE6mU3=B7%*y5>;nG;He?{|7c@XL$`LKVgzln3~EiWI+s=1De$+MQ|~| zKy9H`ZWuyyao&Kr*=aFv{1~HypC6vu8fY!3a>1gNDxHv1-61S1iV;CY}**NY1NSukbalt?C7IYzwcCh|W*D;w_woraD?06?dz&Mh zicV-x-&X90`{$<`NtEZqb64iP2;~T#QIaAK{?40ZqL_~LG_YkBZjUdKid7`u06{0S z9JGs~z|Ru7nrqv3VbD%f%}%dMH}q7+AHwQ;Ij zjV~ZEHQLl_SRx;Dz?m2i8R<}dKxQnS8N8%rR++(dyY7|`KlRfWf95t!pP&;GkFWdf z1_Pi(mLzGfJf7KfmH-e|t5Z*+EpAK(_ z-w}F)is#eF$|}Zitc1rzyav%7_IRCcLkue&_6G49z^Gfez~fQUXUys}d2N^@i40 zgc0#kwFC&qB{?P8UeKt4-VryF94NSs zwA=&S3(5Wx5g%Mj&@VV@Q0@RnlHTC_qznVjMuDV^Y#Ab_R+u|+etJ^omzaCR6|7km zw~=_CT4oO2I^xZzxR|UNSd#Htd2n_4gtoe7IvTIz1tslYG{>?xtj*hHa*AnJfKaJF>EOn8bqaW1(^%Sg6`l0W8G-I3iyX`#6^DHckxz&sZuVq54Vp>iKcsM@;&NQm!^q{o69g-FK~9ot&zLWM!O*fT76 zNiL$oDg}JybJf_i7g({|^|4X8-<5~zzMlZLwnHT84e_cg&$t}dM{zq>#`PyJr&`*3 z%N0=-`|v*8O--H&xSRMrZP~pw%V}rqO2O6^+wTSCK+ujhtz5F*bnBfYZ8rHdnObhy z8)^&sFq#^FDquA6`!srbx6xvMxQEGEvUV+Ca!b&M$yE2#0h5WxW`p$Ozl+kaEcR}g zNw-s($Zy8O{aN@MSNjg~CXv3pxxd;P^-ZKJYQIJD7D2_WvM4nz8KP=Lu=y~RiR^y3 zKbHpcC&Y8q?MbQP|I2;*VdZRE-Os-}ytuA|UDS1NfZpv+(hFfu$3pSm7+;U$D}20O z;ZYLBh{TB`ka+lp+`?0VBW@t@9!k8o$Ggm=c&O|6{~B~%ya47EYGf0s65&MXKrPR) zbVTEHl-bvXjhubOeAd3N5G8SGk+=)*Kk62{o08KRLrf8MRJ{RjDpH|HI5`x;yNe`# zLnvvNlcC`NC!hE<)Ll0vBoTF3{K~#YBO&~Z;f=_cIxghY3U5h-^pJX}L)v~a-c`TZ zqPrhqL%2%=>OmWJX_c~GI^`2=wad<4{gBSlZk;GY{HU|raw+Y!JB3~mk}mWK5dreq p81;wtw5jxNNR0_CZMm$|e42_T2k==OzcK%p%1xeR-QU2oUjq1`@R$Gq literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/byobnet.cpython-36.pyc b/timm/models/__pycache__/byobnet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06ede299f3eeb8ab6068133b4a25cb40ebb4b126 GIT binary patch literal 43550 zcmd^o33y~jb#CvQS}m!i(JUV9_So1X+uCO}#D2r(}}$PGzsAiw}2jssZ;@8$jfsoS@^H5ywZ zMj_w#G;^z~>Q>dM<yGTvtd8qh02jiG#jnNvaw1$8?Pj?iAqPdqms-fE1lWSN-CR@ zbm4MWr90a#{y@1W+au+sr+c%#@I}gfl{MKlmHuo$Qbbj(ytc9~yRNc6yI#Wa@`lP~ z*~=;$vl}Zf$-bm=dG_+k71=8)nQW$VW%kO-rtGH5RoSa5S7)!TyfpjL$~D<*BwwO@ zZDn(IGyEMYsXA3kbxnn{18?@L9{fw6@n^4tKdpLIAN%SWQ^{)S=@NaPa*T8?7>%SKMjjn&Q zV(kd!ay(b4>(vbl5#xF_h)@QhA=Kh}H7vEeUX8fFqwepR`#X-*SE?^3~x@$AsC>h`ml z>&{BqfGWU#4E6?!hPz!+(sEhtG!8ed_)*;q17v#TYUM)dRrG2h~Hs*6URkEoIC@>XmIV539S> zBa8uzm%FJRR*yJ$+82%T7RA_6# z1Hjhyn1|IarQ9~{@*q8=48*mzDGJ7d`_+W9&dYsZiIfMIOnGRDl((Fhl704&wB#{0>$P@59Y<_y%w6ilqL>MZ ziE8X9hJY(4(F3Q{oO%_og&0&v|FDmMLB^KTJ~h7pTm!t7mHuj7y0Jw)u3m#4d@V{N zl%?wb(0TQLNWE@R{U1VIy*8r$6?IyjLG53U`MVnBzCk_V_RFQN@sq1o@*7vJl|)|iI`58W7d9J`UCiHD8;Zq2pwk{4FBnB&S>^HWQyiMa@9>p{_?s9Uq7yM&*&#{4)&iZrm9X1(w}H^v){ zZuRyHwC)`X3Ac3`-eoOiX?U$M8eVIRhUdmyB)qqveXV6(CL`4vbGNjnHAc_-^Wl+i zEj44X$x-RX?TPbHb(-)A>ljJ`_%g-yi>w=s-IIIknoKXo=`upJ}BXrN%$`H zA@vIqzDdG&tN)^YQNk~m@GI1RRUek{%@V#x{WtYX65b`@d(|(iUy<-`2_II!s(wwv zdnA0H`gQdY3GbEg{pvT=Z%TNdgdb49rG8t&`z8FK`W^LA2_KN~L+W?c$0U4E!ddm- z)$d98kc3~U{)hT~3Ev{&ht>a7AD8g05`IMeFZBrt-zMQl)gP!ol<@5m&Z$3ApOo+& z5`IkmvHFyR@04&}{fYXtgeN3Css2`Ncdg}PpQ99j)V_O_=x&T^+gHahdsiek>b8U_6RZAA^g?Zfck6oH)lfG z`_(h**)zWE1G4YMQDR5&m1Fclx2z!EDxeP<{2RUwut|9XVf# z_vh3%@ctX_`-c(pP4z9re9Mh_1TlZ7zKxh~yD^U<=I_;Wh(- zG5@H(gP8BQF?sA`9yT5^9yM~tV@6(mcOjCU#4ZT+QvZau{ImKO)Tn^AN*r6N{uMR- zp6WupLV5qDzK`<0j~oVCQpB7nxvlxXDutcW6k?{;zr*!mpLGPk+y@2FGbJOAQj^kq zg9dkF!LvcQAv|G}K8>|(z?d#_PjD3bv1aN7cV5}kc=kK<^ikPE#I`O1XzZ!nac)IX(BRr!5_xd(h{61g)`yrn%v)3$D zrv@LY&6UV%a!Znk*Zcs*cN?7|y7?oiCL0mX(>z zTZYOMYSm(CDpRbP84GWYqR5PWq-GhJiHXdl6giYg96nOAGL@P#${BbHv!-cO?eeM2 zEQ_#fnNnq@Y*Y}3yaoy@mun|jk_*_nF_AcQ_kEcI#bT*YVzSNm7#3=34P*v0Q-+Cj zx$xHDAsgH%zNks{&lUTn3ma1F3GvOwJMrYH=cOW%6Ych~m~`^W>?_ zy$4WgCO?ZNm;(btiM#VNhIwP=u?b_=M45Zc!jY0~6zo~k$m}<)(o|KNj>-)-D~BT3 zh1O%J+!5QJv2Gk5&YLGo$A@a>)Np>%8Xg-N*)}w~Wn||_Vjp@Xldr1G38OT1#I|nq zvP_liBeRo3g<55}q%6B|e7FtFU<1qznR2OMRN0k#X7UAe;MmYeB5{u~bN``3sDf$C z9G{xXUFS9C&b$m0-VR#!DU9#FTJ<5VRK8Swfy$4Lj1G;AZ`-+jd6nOf zk$kX}uiY}NtJG58J8wOlNZeY*IFvC=mSM^G3!+$=Bl+V-X3{XK#GF#Gq*0|p zGo`AeReBI=#mwGQwMjH$D0Azz$Y&ar{LGA@5=1BuC)_@k{sw;NLh}V6WFB~B0aFhG zk=s$NijKJJ+M1s@i5v%@)vmQsxxO(KVB_GXTs9<#wlK8x;XES+ngjW=!E|qei+Rjd_f& z+Jm-ERndoqJPN4JRwhv`3Q6n_OxB;*WR2TZ5!odcB2}%fa(ZpOc~NKn8m{&OEafH2#(m9dupIn z571l5S8vFSjf{;BCC*VGJTVY-;vPM7!oaYya-x_n<$S>yh&kcIvmi@O=nnL>6WU)Y z*iP)O8G_B1o$!5Cc%0y#>M19-4+xwGrF3HV8pmc0V8DRiiE^|{`SL)(S##^1dk!6# zIB+<(|G>dL_uX+gcjtjU6HecG(f1zSe+~rNhdh0gv!$}i$?)cg@wq~A%IP4|sT^>- zd}<)+r1p^(6OYYXK*^jqr}EzF9#l28H*Xh?Ow`N@LSxE_?Kf*PcjxURPUoKEQ+L;D zvX_#Cz-=OHV^?}MGSPi zRhz`z&Jj1AWFv;G#qkx`S^Nv&>A-V49_w;AbqenjszZfU#P-=>rz)o6XA^b*+rb70 zk`tw`2YtSCczoxcp%aBOg$IA%+U3M1OXX74$eVLk_yRG12>)V!b2~ym=nzO7&-pNr=VB;s>Q6q!KMX;1vQ(JGGuP#f!06&} zFxhVNdOED1IY?)S4)r|d2%S+nV|2#hG&*$$Bj}(bchXO1iB2`Q@Fn|GJKNxM!t4-} z-o-ugGQ`bIH@JQa3pY==G-!C{lwAWs94uAxQ^ug%+on-A$N+~`?L@U)%d6qzBSWKu zSlbM`Ol#O(=U9W=w{DxUZr@(WkL}#CQ}AJCmB(OmbZX0Beo`4* z)y^@2xZ0yWIW{=9b#nWbozRRC<5#_fV`$;%wypUcTZ{ieEgT;l-MMqJuysV{+3L4& z94!P58Q(rKS(H9KddaY+K_Ib|1;g4pdMsyR+Nv$vRiUtbn*d9A({}V+2CU|5n3wo_ zZr4DhIZ>M|#ObmPFytmj7oIY=T2A$Hz3L4d(NO;?}XPBhvG$3v%2AY3v-G9NC%QA|S2RX{C$U_ILv*3^597 ze2et>+$sTe@wy4LxFfH&Y*Cs+&#q2OJ+y9u-BH}RqloRZbg;QPU>hC0b;Ksc8r->U zeESwPDr9rjsTF_Wjk+n;O{zAzV`TeyUaxDXit>j}3@`3Psh;%OrCS*; zIq+&nZt}<=mIWi&erW=-$`B0=5mR`Fu}zSBAVZOl$k;8v;P zxCywBZ^n-`^9HYg2AxszRfwO{(MSsd(NZCj3s%;h$D`YtfkJCQjgJwuPNsG= z{l*Tbs~Mj&j@Kp^M_2{SU+)j99c|!x79PU~!&YB5aG%cz8X+S*9ifWq@u(4dJdP)! z{MiotcBnu$X>=k@%t$c>(j-+d+hue=-i3EPco#xk5BzEPdr?-On=XyGHSYU0@b@oz z-;cPpi{jQ&6*UktH=&B>D7u?)C2v@euXAF#9HpGOoRi2=RybRxKbgxNgMeJWG2_TY z;hE_Ks#PZf@gY(+{Eg zNq?W8Pul=SkZd>8e-_T-0mKl6m7iVDNBbU+po=-AQ4AS}HLgP01oWIS3|T^jIkXtg zNH%43J)Tn0Y&XK)Du!WAV@TuKUi|i|M79rk`&0*pGxqozmBdi$GWw0R3bw`2HFhod zv+IoYkFUeq4er|wc-!s0y$o+JLrjksv(b17(CTv5_wkJ?iGfd>SE6Y{PLxtrh)kT| z_{ni6TFIZJ-1J-qFFkxP!5@5w4>}=8$Yz~5rf%T~MbQ&&ea1xl@L23T+jrUrDE)&Sn4c}%Ijn3TyF0Zk z(B`XA2R(H7CI5FU2+nXGC~Ih(Y$#C zZygbG=VbP1F9$-VjT7V6IR^!fS!T-QiIRdYLidDeN)*N5Md%P9RJNSdoMG0mnJ7_% zK*h-?;Aid_qOCv7rKSyeLj!1K^8%}|M`Nw`^EW_5#a8zPGsKRgbJqdmO z(U7_E34alKdKEnrdn54e2_HgGFPw?l{<^Ootou&~_xn!6mS#Rw4*?fDCeH0fDd%1T zN2Jc@sId^-E|C85kH36+@mL$-)L~z+)~5pfJ{3F~FxS<6Px>|hvsIA9bHZeeO+|<3 zFP$+E$ObV~Ac5*65H~Vq)@Elc-A_V{ha^5h?f5A_RceN}L-G=p`X)bJfl^Hw^nYc|BbaeLQ%r}vKC>2$UM z{uH9DBpjbh18=#1M0L>R5wEB<3vmY)23Qr2lJ1eO#HqbkHwre zI{Gva;b_3>w*9a3+ktw(4xSE}`|Xe&u0utn0#(GZ`&DqQ?{pAIhp@FD=o4J$n?lHr z)NyRUk3pLcpWKUFK+OHVN4CvJ>UeWBVpgFo3$FK7BlrcGo{!eUcC_fLgQ2L<*+4z? zj=)p?DgSolKbC#KXUFQ{(|!zZcDa~ zbsW?WvCuGVU`hpzQ=Es`$9S#McwR%SAiH@CzH`3^2SiwO>!sHKdC!s1d<_r&V!O>( z;iu%o(gWzqYMZa8bB4|naL$duDZK`>y)?qioe~7PoJ+yy7V|H=Tu)>-dbx?vZ$lE9 z1CT6A*{2yn0B-uwZp=vHz8m)Kc0JuJ$eR>yWZ&*7B*ov;yGkTmCDKbLj3t^HD-n&I zkX0(rqDz_$5r|%E%pXN=CuFE8V<7s%9Vc|(39<0Qqz#Z)nt9QeBIgU1FF8v-OfM&5 zLrZQba}HoJ)tAa9H407@@)xK!-wTIw0v1pZTL34B6$z`Pk&D{j3slL=D~zo~7IT4= zPY{Zi992Eah$itAq#{0DpcSO;=N_X&T7=5QOzvwR^I&Pq2IVHS_HDQ~Y@Ug5jz*hW1 zr3W(RLB2geXFr`obZ((@E1lcu+)n2XI(O2UpmP_UyXm}w&OLPQrE{3heRS@p^8lR( z={!UyOXrnz9;WjMok!{9=sZRzPiK-&0glH;F;9q!n!2aq0pkp+K;2jJ*ZriiWbAWq z%^W&#kJz^j-T{5Sk=YE}5yN%^MYC3ceGDz(U@k<*ib_M;*=n5u# zs--I!zR0e46ee@Dc^up`rf0;ZGjx<0AY{dD75&R-6_dTarBw{Cv{idYR;5)!C*4-j zzl>Hf*?U@A#qf$-HM%OT62kAcivDG^ipf6E(kg~8vQ>{YFmNvwJEunV02WdLWgziI)_@XCP8+S>(A z|1y9x*;iVCGrTh3vgUVz)4vShO!mzd;0&(}xa>7t;Pfv8IFmi!0-WKM0hc|F3!MIC z0B5p)Z2`{k%7Dw>$^}mUGJrD~<*|C_5R&cL%7DwB&IL~YGJrE#v;{cBD+4ZjNf$W% z%K*+~sTSZ2uMD{CVO`+#F9SG}t!V+y@XCP8-q{6C|1y9x*~S*&46h8h?Acx5^e+QA zlU>yUoZ*!L7g>P|oc?71XR_;BfHS-@;3A`Nfz!VX;7m5!0-WKM0Trx7kPDptWdLWggDt=rUK#Mmkh8H%pnn;_ne5IM z;0&(}xX8ZTmeaos;7oS71vtYi13rnIjh55D4B$-m$`;@ZuMBvh1vvf70M2BSEx;vP z!0CqGW6E%K5pvk%XOPgV6*7p5lE2n%aHu7+{imTLszVhyPfd^icb3bM?c4 z`M?{Yq7y|Wk1WUR;FG@T5Z|7rUdMmz@(1u9^2gwW2PS}$A=TGfzjg44)~|csZ&S-h zJzt=q=M!Zl+FL;?{Dk-uFyD?J$Df0YFxb$c$*CFhU3gJ#G#d)Mml`K$a0s@g!L7c--nE5lEuaisGSXnwGH#<7%7m0=BN11mBs_6W&S*0ZiRzteVYDlOwn^5z!Iy& zl6}zhM4;(O`!jy?GYFrjq1}f_>uNZ4pA}Q+G$`ME(9#B=q+4hEUI$@rU2AG-hY)l% zwSHtk9p1~IfG8537*A%(1##S48m^1I}j*_HRc+=h>0Q}ihZ-hVCWv>j{BS-4-JNBM57G~;+_pniyF>r zy7^h;d==ZAfG0t1J;o#Hm-x0;hw+|&)(vnPtyXb^Doc&%IQn;{GaKU3Kb+AIKLOc$ zHi8x=oX-880R#?13i2MLxkyBe5UfH}O7{8U_~i^heSFrFaCnpulPWMBIE^D~^~g*= zl(9Z2QbSbHnpf9@)qwItff__OWbKgnkXeK(mGAFx!*|%BD%7{rk@;9X1_gj9T2*)< zRQKhg)db$eP6NAg(RyS$E~;IB9j%e~2^B>fVsj%I_Z(-&YYflO;#4s-uh;IsRv#d? zy+gG_O@o2M2dN1v7^SkD#dLZKwb?4qJMbW`k(HUAQQ zKm#qY@5srZm}?QI2R!bbEOkP(AaP>1Djw9ePnn-%#t;z)`rZ&xz=^= zfjw7A;S{YZ=|hs{->~Q}!pSD`$ER=vl<%`0^ve;;VwDxm=|h;goJJ^4aPwI>uO}LH z!{dY1Rl?tme;fRLfrNjq-|I31RUT@zr){Ql%=h7u66(HbjK?%)Hmy3)|FA9!n174c z=9l5X|5f}r{*x^@ErceDbJIALsoui6aH9JBb8Fh6yD6aaWnCM{h=a%s73oaT;lz+; znSTc#Ob>Lc3r4xzXgOyQNX$2xQhI^Ib%u~hkYoP7qzoXEuulubZ2l43>8A|MdIKDo z0mP0*tfbH~Salo?S~mc5L-HF|pc}wO7z7>-nscCmTNLOV58ML7AH@7Xjv|Z(kOnDn zn?;}=(aNp5f10KWc&lI+@% z#5dsJ9Cyc28mQ_cx2-SCzh@#ZKNO}UztRbUk%G47S6E0i+oe3BAf`~#N#@s>;p=of zfJH|TOu!O&7A zTxy0JK0(dGs?Uh1PTX69yFPI{NEEhrJ)mwep>A>X+8QUZmv#wzc|>~d>INB*xQs;N zQV1Mg%v?`bOP{Vm4Wx4?*j)TQw@bS2@If~$cqK@a(g}QQ8p$J?>9f8MXAx=aa7kl_ z9m0@D`>@WKPr>#iQcs-@%y+>iB}T#~XyC8I0(BuO1g~S7@hKQl5kX*;qCyK{youN2 zR<+(Solw4`G4r_HF`Yc^tHv0E5z0VMsymy>8>ZtZ@4)Kr-$ZV3(V6asG68K2%CYWaK$5G zVxsSTxzN%j02jF(nYvH-$IyN2`Y@jOXNLg0%SzBF*RNkj!byu#rA4 z4Ftbnw&&uZro3@6wd9#q^DmgDM2C0}HUOGD#y6n&+9DQYwqIUJb!Kp9fcXW42fBsG zwY2g%meOp`JDDlz8W@Pdf%!V7_#T}4qgFu2H}Z5HMuM3e|$C!#@L#yDlR4bRsmAv~uRZA|7_huwvE+8TA0M&z$Iy zyoE6_&3{J-b~|5X5p0Q0Bi-Zmk&_gr?4E}F9d;I%P(GL}rST73Z*5E0w5DzA7xsrcacQPPUVnP!?1^IuS|G+H;} z-x!p>5gc^ekc%ogSO$6gJVVf~PT2!5kRw z#y`zAur{gp)Kgedlc|7Uo9}5!A#6q7M;2i6nCrlbJ2~AcEA9@knU5fCO4!WDbUfG$ zVo)QONeC>MTY|YnWli2C%;no&zJF@VrF81XXA*2#CjObcFvrE@S=^QV;Z*&oEPr$o(vR zg7xhjM$g&L;jLyNqd8!UCT0m?DU-r&JVqn4T_+uvq{0=*XG|dhGW*jn!ePG8*r1M=m|A)^1rNb_0%t)D{GA-G%&v069hQ}8qPwDe`N;f!43Ot7Ewhh=a zY{I*%VFx<5t~qC9M*Yy3jAjZge8mJ=!T8hZq!XYMq$4a}`a8tna=3|5wr{a_7BP{H zi`)O=oTQBIy;x4-@f6`5!bKupTvYhXr0|(&3!lNtv9URen;mQ_nX8%f`at?cI${NE z=O5V{L|95<8ce703+A(SanCHpfjV0_5WXCM(;0AFVtVi(c~AloOYtBx%H%8IKb*&Z z!pgh+Cm{T1J@^mb6xgE1-=*G)#Cdg@%#(ZqyWhr5W}5qKCa<|ZgTp_(T2OE92eb`& zLio;M+^crz0B^bzL&wJbUn-L)YjU&7a+xv@T>n+VHH5gdscm%-t(Ge|_L zHCtcc=H7GWo>pmGp5_9>od)plQRmaEhD^Ugg~Z2;Gpxp%&0 zcsdPNu^G6o3zwm}*S6u7H(VB|6AL6=@*A&qZat z?J31PsH>_~ah)1&AT(>ZY)xO8=GG90E^r>l!VQK3@YZ66X;lqyPw=)j+_{KKpkZZn zjLwOmz(Fl-dZAV zDQ}xtZrdiFztQ9-m8%WC)eJAhu%6Ic%@2BI{8C#P&8=nxTg^``xz+qq+g20u%a*NX z{GwI@;iXmr7hwDD#*_3$uMqVB2?-n%*wO*=M%+UHVVSzlCl6;Qaea_QoRq-QR5ai*xP=fdwn)+`^?NgiD88xU}$b zy&H_SAd$3rrM!7FTgpAhV%`kC+|z9FQXIK;Pnag^<0C#UN0vQd4}zDLA9I5FSz+PF z@sQ@nz2L`uQ(!9^e;4M*E87poJ<6T`dB74vj%4e+XY{ zn!_~i`X%#cr3V|;bisQU`MNCI&VWT}cAA0CqI6uYS;W7N6j;Ocp=?C~_n4A-?tfrdG<3t&T6Iu-C|sK7a@~Ws zK6vK=GHNac$ZUlhl9>XC*UOT{Z4;`-6$COMSH6g4z^)f&mgn!kmlV%(Q+H{3Wm7yW zoG&E%Y>H@2kL%5~alJ*H6W4N5FiU-x=9a(U<@=Skd`)gi!R)7(F71x@bTAAH+3pz7W^cdtkly zg={tLQIG$WT(h48@q=^Cdza#x&6fXX;F?WTk~XiDYi3`FYc^Z_Vq9}A`@IiNV;_rM z>>30wE!X5U+v0J}SPR$Wn>MaV!6W~cmZHX7DJleP%ao#a@LL&Nam!UWIxuEyth-5#qOXG&Mc~3rc?=#RU3S(5bpSpHR zZ>maj!sKL|ixQd{wcXk*0>5FND~Icc(l{{RS?@Ft*W=Sz74n4n0#2~w0s&tx zf;f~Q8a!^Sm?v3JAdTKw#TxIGAKF3OPy-f10WKJis~3E~$2<_jH7DLe{3}|$j|1w* z?aq2fn`SSCyb#Q~+Pp4To7dDdA9Tq-bjRn=_C+F8;S3P0$}5T)FTg6gwM2CtFtvqM zh`R3c$mPUf*U(}p5#u^SbaL89{;+7LwTM%sE>3c~XjjFWXX9Aaw$szQa_{;rseJ%6E;}Gn?^R#bal(G+x^)Q^1yQyP3?Sp1*3p8sOe;fKY z=++YWg$@oHwg{FBxKzXM^J1d-q=v*COUR7`zUeqN@B#Bdl!dXUMoz{uG%@w=ck%DH zcIkUEMEcnQg`?aVWNDK!a%9{+-ncS-Oo=eniPE4cU(Sgb6<3**@|GzgR2huLa#Y7( zX=;G9-je#ES2u_j`;E7b^uwK7*V$FdKf{oTX zx`e+U0(6Mh#guEVx3%F*v6gsKHe_B!@c2e5SPN+s7|R9(E-ho>q%5+!ByzC`8B1SR zcid!r8(V3tt{6b} zE+s0vo~D@1e6r5C9{g*n?b1+_xoSHVoDV@YCzc*U$2$f4-&2C!2k3YwsK0-{7iFaC zJ-C1;S`XnIeJ@RokU9mm;-~ShtKQQ_@4H=?T74IjEl5wjTg9=x_`aQ{lCQ9UU->A@{R28w#$}UU;keSxyq2|I z#CbgZTs8s3cJEPJn*Y3mbF`tWZJmk+BxslNRD5 z)vjK01**5-V$(^X7K`^1SbWT_Y43)mcuJ~;rwp(rFNM?Ct{gl(0med_cWD_5XX#I{ z=52bn4c!rwZ(uL>3uut}i*R713_Y3o+YG;r4!aDtVjsj0zC}V!ORiY;>~z`?mW9`0 zl3(}Tq*pw0@m-fTBg6ev{OF2#$}P;i4v-fuQd+*uB9Q)+RVD}@OcJD1aMKa1&~`8Y ziL}A!&G6{+2h+Hjsu;i;Yy(aqpiaEc1m_Y$FeM$avG%X~e$w|-Sd$9}!5x#0Gy{Q& zxxV|VN2{>EY)HazOH*buy6Alb6t+t_&&DflO*{A~v05qF13!*Ljpi)m#T66=H$x)-W0ocX7dtcao%L`MmJ}G1q)Ok!o4#mO`TlwYW;oG#eCC?D>QUxWG5BG> ziQJvg1;D6gzU$Bfa{J@aYsjuM%!K#%PUw5hhzf0!BZ7!tJYudWr)K)()Qta0Op9K0>S@0| zIU`sL{vXCgeN_c~emio>nH90nKVsz+Zkalz4Uh%Hw=u!H;C!_)O?d$x(bD5@OP!t3 z8(OZR4<6Qb>p(}EPDkYHHb2Y-_PK?>HI96pQ9rMCtYPc&J=)h|0&~tlFsMoaJ%V5 z_$jxll9datZB_DBEZ+AT#&-g7D1mDmCw^!CrtdLMd!10 zWI$iQ!f^AMg=Xv3h8`2nLx@+s^rnD4J~+al(^yiKcK$=fG+)1W$_ zy~ve@+1?t-HB=A28K%!3tfkW@8|k@!vO)Dp4aLT@U#&e8t4F6Jr1JBzYBv_0s1*+0 zty>7GjWz)c5|aJ&6&>r2k$ARcQI9Lz1Q50d11?sd0v zinI=yoqHf^(YaA(m!ajVU4}M^tpf&igRh12Y4+!GJrYYaM3AYS)eW`|m#`^AsIzRC zmmDs?z7{l~_ncmljpEr)G}7mXF`Fgb&;SGabvydk>ksw=v1K0~k$}o!r?{D{`>SRrd1#Okk-?GIDd-vU_^cp4Y$>UsMuYi)d4vrmFp9#80$rdxLIX#n6d(mpXiNy|n?940+O01H12`Xe-UaJ2gk1n69qqm! z;Rq7-e6eo7c_)Sf6ZZsyxc5>vQ8*Ip#C7TL7C5l{PLD>dH=zV==K4%M3hQ6tHkUbUHHg z5emuWG%4%%w7`l9thfs+Ca_{ED6qifl>a6!v13wm9A|+P1Ekgyn+vhQg!v2f3w~rr znrOL*lfA3=W0xU?9Qih9={pX`iOkO6>yw6bK8{mx49p4UxC>4;-dGM<*%&874p`DG zI*TtjH16H!3$Ij7#XZ{K^lBjPR#&6QO+*3m6CdvRhIk;53`9ikzR{m-4F?bo#DeB4 zP#-LbKFtc!V%+Hjy2+P3^Cpzp$tK2(o`wg#d9z^|l`T^)`VM}eA2{NjRt z?8$=jDU^-Re4kX5y)x~a{g2$w{n$KYt2~4`9i2~O{}jQ2&7Y~K@I)bZg-HhXJxQ!> z!sbWiZ3w>|vajmE{xOYr++!uNBg0Kv zhtAD6-64lK)*_UgUk5quIvXecu&(K^udA=EuK~)gqZtSN*#Gg(b@en#j?{bW9XL3; z4mrC~P7n5%-OvE85zS#@AwItzZ_=tqc6XE5?XFizpm3y)c%XCVLJ;lgvwN8elWe_v zXLVn_13S{S=qdI<$o!IqYktY@Lm6q*Cv@yf*gsYC->qJxmAM$a&fb0?wCrP9_`oUcVmxOSdjo5cncm#Nbx-n<`F z3CHNMDuC8|ci?c^@5`A!^0JG5Vy&N9nto4%bS06Q0oDzn0Hl zZ)nm~^B$(Smrhd$aF{_^!9I;boCpjXY<{{x&6uKO&nBukLaBG&<}N1YF)|021Q6pJ z2!IbAWLs}b;bke7tRb^0{aI!2GJ)3uOx9foAe-)7*>%fU2#XTl(qu5{^U?G%4*8=f z$`?ssv)zrYH&37Po(gWo`(?e&%LW*Z`Ma^sP9qfICf*;Ae+&gAHB_SDPcf9yCt;L^ zb@)1L`OO>}i+M%9HN&UHF&KIXm!uaUP2Z;8@@B?=Zdpp{&xg!$C~taO9C0O3ujgIo{} z>xj?M|9LuJpu=T@UNj`l_4NNJ9r8z=i(Jy=G}L@w)(|#>vvjzI5F~D}W+_uj7dT$& z=3ZikjF>pvkWqib+*@Lt<%p#a&%ICzV{tkDrTyk($XQ@?{>ha(osp){u^WcA#jhB{r%)BCpOxXd9Hl|B2Gb!-GZ>_3`yySl zeUNa$NCh{HfDVfiS{t`=Azy>tuT+5+vTh}~Wx??jnz9T|RtF(xgJ~_zf;W4+9Z2=q z1f;@SoL0%;?;JnOu0>RTk8YEXA{=hW*qd{rA!8@S<-j46&w3rYxJAZ}IgtzCo&jHu zOK;8-eE)qqY;F;Gcih@y!>)w-2pz^ZHL&!C{+CPh4)VHO%#Pd4(=js%H%W`6X-L7R zQ(WVqyd+i$L9CPfSh4uIV!`q{{4n9BvEHC+uorr2tW92zoLp~V&C!9F6z(;U%P}xp zJM1->U63~+z6)y|u~%A%NGbi0bn8-}e^=d&QgF1j2PJU**9Ew1v4+~@)-=Vit{rNz zww{a12!@hN>00V;{jAinzm!>i+*^C zyybljpevBRQP*&}eFb)nS3*-d zu0($&q;BBIK}60w@gw9U|FMtnoIciOXFB zyk-E!Jc(jNSbaffV3YX_6Fp1kdO8ZF2)QtSgu(aHc?lh1UuF76nB?{Jy_QasiC)Fv zX*y@0EI>-81JQ8;8$n&O0x754sWCjZ2XNezYqrO^4ip6217($4c#3%E1d0Vz>8p6Tm>Hy|u#70+TU1u5 zh%bPAhE=36#|Qg>2FUg#o>+i1C4y%k9*e9?^INe{gyj`Kbc9(D6r2x`9}?Z9BOxl~ zck`f`b8B2P}mX7Q2wZi0s$<>~LS zYUfG;ap30SbGtAoSgCQ4osNb`HJ4cR;zM5gJ|n>de(>whSwjvAxhwzSgtR-5i<2&0 z&vh57r#HY=_LTJBg)1hPF#Hwk!U5BDnd@3q6NqB-EbAJi&{qS!3ckY=<|Je30EftQ za8ZQ8G(|d7bfQdgC4JA*DKl81BbEC*#(kTPH=vCP!Uc)&{|~ToT!zG=A>RM63F3)h zEaD+s2(OSt58gBI5ZQPY2~@$8LUbXKl3%crKKep^lu&VT7v4x8mUh$FoGQOfb6C8& zj7$nd8E-m72K7bykOo)h_oCPErW1$R)A$ZqVur;5&B^!b9EOPF4bK;H$ z^Aqx}hdG78v7BbgewbAG6y#JUVfRAOgR_h-n}|;cu@`*-JDGpG8y?@(tK=)TXLbP{ zNASiRr9(p)bS-HYt|@pQqe^g`$UblYm~B4CKvLf*=H9LMRwhDgKs~u0GBe_>;9478 zM0U)y%_|uZmhaX%kvs5llRQ3bhd6zf{t9`EU3#U*_0z>+QofU;`!z9~y(wDzY2DT$Wf*J!tM_ zYR^Fy&o9148P_ry9W-$>{d5{HTLGMx{k#b2K9YZv&QcKnKg#lks{%Q*H~jxQg@!cD3By!Ystn zs)`2zz=?@SY^YjwEk|%YL1jo-LyEb(m@#_9DC5FQOA|v`aLdj^R&{+$wuVkWowanh zjM0J)k#UGTVHGto=jq5b zLp=Fx_R^u*y%3aNt94j;mD_GMQElsY8fUWI~6w5!jolz zCQ_oJeLY46Uk%%OiMojD3U11i{V+OE9tp7 zW)7MyH_B@KqMRjSBaGwY!t0a#+D(i9e0NMpbhDbcX90>tK zq8IT*koJ;3nRqOoj;DK4o$2e-Thim{vGibiO!uUAW1)0tT{JzMzA6<;CNtMCcE^ss KMBmH1)Bg{(cp)AD literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/cait.cpython-36.pyc b/timm/models/__pycache__/cait.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9756431fefe7841e692d9c7b38330140448fc0bc GIT binary patch literal 13252 zcmd^F>u(&_b)T7?eGPY)D~ggRS+Un|uVa%Ir8tVC$ddJnV~cTOIZn4ux68dla+mwk zouN!I%jU64$RuUjqG{3s?gMF2pbr!&+5r93y6FQ2`X5Zcq)mZ@2m+))fd=^y_xC$9 zyIig$hb-(*UGko}?|aX^=bqQSC$m{+=7sy_?Pm<*d&baj9Px7q-VaQ}P)5m6rm`Al z(<)gecB0tbl5%O9##|TygIaH zA$C|D*)s8+QqM&>N28o$(f1y8Au2f@<-8~Q-Wz@IQx{o7-M{6Ormh(30X25ZP-AYq zezX@W9YfB8DubL%lyeVq9#T2v{sdsSm4ZH6y79ka}J{rA|odL8LySX4OeaJ%rQ?>Xdq) zq#j1Z&@eJ|L+_k(yJ_s1Hi&F{D1K&ZrMbs)*D}>a2QJQje?H%SQ2I zYyVcb(5QIcljnWkZTYo!t59ndo^Mtb-NH3pX?Y86-E_5Ac)qE~A-rrUaPrZVr%%$|7ar7fOV`R5 zFI_(W%8S>^S1+BP3np*Ne)-zPZ5#^&Z6@beY7JFxVy=xc@h(>v7K7vM8?J6tZkF9u z-_<~-tlWjlO2cPq(GIX|es$?mbKX@!>}sPEq%P`q2boJj*4HboYPp4VMl(z>r`^Sx zhi<|_0vjvu2Wi{2V3da-iT)h|~o4fnZ5ySltnGCoogj9;rXma$PEb}LHS8dc)K zpM{V?cmcsH0CaiG9c3t6#Z|&Ld{ZS=N~PZz>zZG;KrCZ(wI9R`V;jNP{w(@gLB5Ci%sw?cJL9crx7>97rMB{BxKZuamGg5mey!P@5t{I3s@SgS&dtE)-XBGO9+%m$ zEHi;W+f14I5v12n^pA#WOfSq=YMqt&My$f9#+Tg>dG#n8g6?YY7JMgZRX zYH)Sm4NjddpE`A#o4frcPE^qea-^T~>Z%v+`>r%5vy%=tWmlTz*|0sEGntWOzq6gx zP+2(XsAh9_Dx6;xWH$Km^zi4Aid~~*LR+v@YQtE7PN3qq($Ev20o*c5_J&~_r8rne zT2sH+P61s0K7jnHi71l9m&cKe#rr&fZ}g11*)vR|YjusX)it};F=IW^O{myGFsgFQ zhZcfdvl~;^Rt!0b+j5e(<)o04?%F*Q^__lck0XppNnA=Y%D{~6xrn=c_gU~{d%jlj z`Wb&$%m#691%fX~mBEd$UF9H6#zs!(p^U6Kuv)Dk@nWq75fzx%19LerZv;v5{q_p@ zc?GPztdI(9VUuY>$tWfR+jAQWK}^+}`ZU%fNL^pPQRW=#Zg(x?%C^cM@q;Ci4dB zSt{1EqB+~W*m@duGTk)lFqVyCX_D}qrs&jMlWi`NaAR=Ni_G}hTW`I!jqsNK2$IDd zh+v7D(ixBJ_51@To-h8I!=g`1E@kc3(Xk*7+?@hAcf z5X2etamzu-BkowoKroKAc3_7@qBccapG1Sp`V^pmVuA;uRcX372u_)L%SwasOu2l0 zrP2sXgdX*)XcS~4a;>_JMxSIkIjrUtb}Hmcj={zB$Z%4zTy_^1B6;%X5!V41g3SU# z!=D?g+X!(t;U<;6;0SkuS_AHsqFMv)losyf2zSb(ljwNo&s~mU$a?a^@gM%j)5o5z zW;qsqIfNvFu&Qq&GQ_IVq);#utLu`2fHX8f6AJ2BtyNxdD?Zd4FBo5{Ddi4jq$^dw4D}Wq&nC;d29}f6?)8-# zs+Siv^t-%%n*EaJ1aT^}C-f`K7$bnO2_E)5`t)o5%^=M_%1{)S!m6`D42oV5YeF$j zzf{p6B%FyL?zf@b2X?!KIVBe>&1R*1q92(p=Hvv(@e7$`8tu!Ps$z&}nM0o=Am!*^ z0tg&VEkeZ+7CcWCI*_(u6iWe)exuS@!LT{@87+sf+K@m8CrJ?Qpy!au+Bh?&1@_9* zlacS*{Ub{G&Vd^6Tv8K%r1SyL{R9#|`7FfW0=O&$9e6JVVx1x6oCd^TtS9u)GmjYS z)}TCrI!T077hIY2(k}rlYoTH&rM(Pu4V`je=ye}RC!DWn|x%B2@y3DVMf4*Ms{ zgeC{r;VJqTnUsAY*u_bBx|;|xo0Og6&SXdvgC7^3zy0Yn6KE0g!tYCKfII6nzk-q> zM!`cntkc-LbsA+&8qjNE1HC30=`7S=MC%cqB`qmzxTHdTrk|od15r4n&%}51nZDq9 zA7&NtfbT-5**U-=vk-B_ldC@n5X7K@=x3QcM?gswBt&OvMBmp#$&?&9mm^$9Uq;LR z_Q-w>l$9@`q_3=42`gne)?qVa<*b~Uv(~0YY}q@jEM(3kPwyuHx2r5vQ*cD!$&w>P z6(t50C0@5ha)<`oi$mc6+dT^QT#sWf;>t!muAt_`U^lNP){{^n65V7SYKBUQqCs^5 ztQ`GN(Lp~w=$VpYpB5CKG)iGks{~XL)@J(cY8!Q@uYPn>P*xJsj}^`z6cJk|4iFO% z7mQ`1n27`K%!O9(hp`AlY75CcI`sN^7E!*28cSBw5ujfupwbx9z#L;j2p?r^mEa|U zn*`?wMpcrKFMOOuFB4oNkZ!39?dT^Ve-Q%Mp#o|LZyMXKouT&cH0RPap^byNC#MK%-7J#BV6|aoh*e zoCa`MPDVDctcJ_M49vM=axZe-8j|aGIG5ica-5wJO7kNl(tNsq3hyYywU35+g+RpS z-laDW$hQ#gF1?SUlt1t503)`5I5TMu(v`_SH|EkH#eD+!S)suH%lupPvED zspP^@u^7fx9u`AZ9Z(an7;^4-Jzqbd4ywspP^U^0>X15&{DbOV+(;aShksHXQ}-a} zkUFm3gYRLnF^*sp?$b0K^rHaVdB$jzi$AqSV`7^=!T}q&7R8XY^`me9!T)@Irk#|h$g_Wx8AG9M~i4 z$NXd+RtPL+*zlX_9yCjoj_u@jj}3A&$jxr1pr)`+ZX+4Fe^UN0lAN@~PCixoP}{b=`h>ScSC$Yx=3ctYPbO1B;0UjvMN^)SG_loN3^) z0JkEYvp-~?Aq!c%{{p?xeFJ0q42+yg!#j&ezxJ*vaV9dQr}|TR3{#42CN{@5)0>&i z_@=dKcg>^5rn6~mnwzQ3q%yWL;gpMJF|%Wa-JPKdFBKj?!cfU<;L+~kqi->u4MhQ~ z1-XHvOi&LpVx@&9oCg3X#9G6g2(*Fn{ zTs!;D3-bgj$X-2!g|VNl5ky zUT?P=H-m}c^$80!S4DpXAef{*8`cL$Pqp3;aOXB1Lvyvs9wZr2oka$UT`4% z4lkxncL97JHC#qzqS?XY3rRknVo|ZQa(3b z|0=;(Sv4l}p-f^hnx>H!&Pk>?AgypbjuLVJ>3HPK$#J6w z{;vq$69C49VlW%JV7xI6Z?I{!o`~wfw7b1@ zP}_!)$Hx(r3^1=pNL-Fa!~=f(Y%vY4*B$PocNQAVaX^{suxfh7J zEtUPUC{19GL@zL#J3fxkP5Mn_+YtC(4iTD6IV>Xm`+55JMG6QzjL31o-OxntfMwNE zNN|moc+U&(&~GEHzd<0p*$hcoSmuCl2IH-EIqVg;-POuXJoN~9s9~*5%E%)Xc8$&P zWo)XHkg{AwXsV_bZgS@lbBiu+czJs)ce@7$1Db0GWx(C#nj)~RtdrU7yh1p!1O3Ia$36SG3+Kg>xvw>#VSo9JhU4dAkwy@!?lh_R6+%fnx~ z3k5uV39U>6w=-*p1c}i)148FY*$9HcA0tBlhJsXv+fOcJB2Pegp#jYoJ_6u_G8?Wz ze&Ic0oz;7NaSu|7`c(ckYn|6JQQ7s2uNtcbDaTa_$|rWp;cA2n^16D}ppuBo8#*T8 zaff4)z4v_)yDo;XdWoT{6KwEBF3d;z0MhXV#=k~Dk_!^On2Ui3< zy#xAp36=CG4!0JO>=bizAodUt~j?$)$QUz# z%}#XHHRRPAIq<{P@w5Y~dwHQX%dI5iH*g`Fg0!13Cr$k~NM9Zx&2vt~c{aSK0lF5? zx-uY=Ch;@}PW{iDz6J3H;#$Ccc-J^;_z+-kz`ywo>#HVCK3{kN*=%2}5(RPX%&ugi z)%FXuW~bpcaXaIx0w}8R35Y7Z1RHt)R*(hrrtt!5Z0KoIFxk;AE-P`%t;z%Za32^0 zCk`?nU%`b>)4im1Tk9l{5FTpRZ=wwN0==jD0my}nA;wE?lEKrut!UDsN z>>4%L$wP|?x9X?SZu?CFo^8Dbu=rm~-`o85PyX|4F&-*-pJT}%5}YRZ3PF+JO#m=J z#&Pt2pIMwCZdBf6oY)4AtaO?0k?w@KG`>QFg%{wOb|UQXokvrDhm-mTf<2>2x!OmQ z@w=g^|B)5%u(O)2=0@;i-n)d~wj9_!z68J@qB=yy-}O`~ETKgRcuA6wN&@MGS)uz&vt{APymV|@SkvDNhv{FwJ{;V0)$ z_HW?IeH$@BvDDu`#`ljOTdj`Z$Gkn^C-7&N@{!wGA&y1kPymtw;Z7$oMP@6@?K5C5bA2qi6;s|QeQ>55Oub3a@Km`3~%|Acm zF*YF(>-uuheB1x`2FY?+wX1l%Wy^bdnlhm!kdo&()2&wcC?maqXX`#ZQJ#*WPLSps z=xIKj-g#dgB$wO<-ne<8@<*;1B*ahUX)!N%MN>3` zhmjYg&V+`-S?aJ}7r>5XEIg)!U6_ZXWy;LNaxrHtG-(oi>WvV>JEy|(Onlw+KT1? literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/coat.cpython-36.pyc b/timm/models/__pycache__/coat.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a33e31c10d03c8e8e350cd91e63d472546322485 GIT binary patch literal 20181 zcmc(H3y@sddDeaP`!PKajiixwxA);)&8{?~=^5`sC3dyeO8fBcuvy8w&}F;b>U(>p zXZodc@8~h^i8mfOQXq!6HLk!5Q;|}61#vTPz9lIZbC3g0anTs=1~>O zMFHRU-`lqznlZb07kcLYea?U0=lthC|No!=_Hr(lUi|0-m!58D+V5*ap9I2Z@VGy( zYnrWlgJZmrZ&>$w1hGn>5XhT zhj6@^Z%vdZT7_}}xf6D>IoX;jPqn7Y(-KcLXIcl!2U@e`+1A1G!PcSjq1NH@VM$9j z=UPX~MJ_KbbtwvO-F@cUr+eJK1s z%x`<{omlyS7d88cee{NAA9d0j#a*-fATamY_XBf(hH0`q8ydDMQ)K6XPZKW3Ng$L%N3*JI8Rd)Yp2XK(1cdU+nP z75jvpli1^keb`>G^AdXku_x_CdqQHz5qru$X%{4R!j4_g=AYjFdNF;rQ@vQMs@i(P zb86m}au(9**Q;GeohqK~oVZY{HXQ`oS57?dc~0AFblTNs@#R)^%_&|~)wWyjsFtJL z;)&wA=XKpvi;Gott#M_cqt+Iym)ynD(#e&DlTR)`^GrJZN9*-Qt%35=Xsu&A#j1BI zD!A70*0(M#)Hu~l>JMP^N>U+dV7dc(1cb=7GVy+#XJP`v3bUTL`KQ^gzf zWubdLeU}6}ulq^cak{lm_d3FfZdG|`ecte67q_}i$G2W>xZWz_2VQ>d`4`Wvp1W9i z;oSM>KlbXy%4_GIU-f71L0`D|!d;NLhB7mkwi->lBHgH5Mb|5}`kH^VbH!23>h+3q z&2tpys$x6!>Q>WZ>U_+{++$I(=(uInhzPt`hAui`b@*TZ0|(Qf#8!8Wm|LDEoM(kBM#F4}1N zT8-o4$H0@t^HDr*5unFyoVGRFvSW7K(>$G<(N4XS>FHlEKuDR@#y3q(yNgG=`wf7# z-}rTYzV)1+xYVd^;kmdy+2zP94u`(9&{drc-&%L7_I$=Sw^Y;5w6|K7T9c!4{dA+< z-SR4KW83jl-A<<&1d}yPR;63>{9L1r+30qfRg$D%s5h=Tc4erP)pDwB-*i3OPuCmD z#R7ne{j{_|T2k|-I)uKe!+kJss5ryqXb2z0mmlA}T2*WAU)9txM8EUGqFwc>rN!-y z>x-mcck$fWvu~D8mP(5*_D-ecc7QL)$`KZLF<(t!jI*d)@0`)1GK>C!PrRf^wQp71Ot9cdoXZ zovOWf<>bQB2{v@%s>7l#dL2^BiIu08Yo%)G+7h!N?ZSb1{k(#fZ)PyN4XQbuV%_4G>VH@UKU~Gu!g9vc z6lh3I6U-1CAeaTH;fX%nu>81B0ytV($N6p8dfB|G)p3g3=8a_8a$-*0iPw$8U;yvn zthVDfta9R}W@+UlHh;p;@qBp0X};cZ&$VmYPsn-EX>Nsw1*j66^ompLcH9PWMW>Cu z(`c_P6l<2WuW2cK^I?zN-v#irU2Q|()pX4>dZuUY8tRdr*)tmYj)`xrVzm!RN(?Da zhM2e)-_<^=dx?$YF4$HjwMLk}cBN--WOlU|v`=h&+}N>pVz#~$?^!*|)(>gDIN^+T zP_vCWZ6|@!*uCAjk$S%2FjN!{~ zzNN`5f@$7huJjAqu8!X1?Ko$xx&c0$E{30%+nAYZv#~AgY$3uk`RqE5;`M5~jpMF} zgSkkqIhqAIR=2=dRk2f-WOp?EYNHL7aH{y0d>6@9kGb>3M!QI+_+6F?XZ2vQUiON$ zYP+~z#aen-J4GJ-?x~T!vcF3U#pm0o+jG`}4h5}CtH+C1*BiCEdu zA0F*P(d~kLxW!|8HtH;Za6jGe7i(BLwRa80C5(44<;Bu|`AR$Udp3m`zuMpwb-T^$ z7(u=6C&2uH@kNngJPF~vB$E;u!hV&6%hZbFKI zfY*I?oprH4ac%qY(emg_X@Y8eCZ^>y_OzRT&_mV!h`x-I7!9#pIrnK2tq zxd=7oB(ty^n66YM5P>QX`lM2sH%0dR5j-x15G|WF(nb=3LRwE6aU%_}VM@2mLwZ3( zh`63LvIuX_?mew&#^T0_crT9%lE+y*ZW>}>Pv6i!Zjn#!#CH-s6Ff7ylk#+O%3iXU zBCp&uY-1N1UJqx6t$of2A!7Y--8!u8qem;l3<##4}xs6nBqL=OE4};4J|NY`y z8p@}8)FiFj#!jJE=(iAzVb?;-5My_lmhKg1w4F&W1KvMrr-7U7O^RqMJ4YSF+^FLK zm_5^Ov=pV~yZ;NHyZ;kFz0CAim>!p;0B}}^qM|wEK)>r$uqKfRp`VHiPDaO1C`S_f zn22`BMlPhd?taRFbnK`qWZq|4=kr9GwW{aq=Y138G(V-de7=6!*RQA-CHN8bf_$%r zP6!y=~JK4XMsBk%FXKA2S#Z#ERFqyUEK!XuT=bWg~|># zD#F=Hdw8KxvE6}wZbRAgT$awhdO!N&poT)2g5PP2(D(nSJnsPoK3Mx>qqID^Nf zz@J3tvd08hLp1ut!jQ|#hovJsBPTnk_C{(%K1xD9O4;cV`6%ljJYTJ0NufYjMn=8; z(PEH-&1erXa5(@u<=^FCj*jJ`C^QSjyQH=A|DoUc)f%-Jeu(EtM6y~(U?5rb49Fu! zPw$D`(@WUaH032z_8pkJZD22&kaH++#eq%qERkW7{T#cjAtfo0dJs-SLeeT3Tf^Ro zt*)Ute7^CVXj)`Eml{>KkNu_jtoksLWf}bx^*?e-7nD!4gF1~gKmKZ??NpU-Uf#Uo zC#WoUw!pS=+_ft%^j0W7t9TlzpHOR@TG`~xCzDxIH&E#HY^I}WNLP77YrIq**r`k7VH)f zV!8$xnQYFL~xeXn*?tG`1;wP-is31R3F24 zIki~{>nx}1vNbCpr=V(C^TEQCqujyc(o&#dsTcI4CJ)!7SupcpZ)uRh_5=HD@P$5G zizzae^U4MkusWYopJ3#Z1Qmi$5s=Wv7@DdQ7xk-8oI;nEkn}d5h>hj*ia53weK`62 z+{?z2!p2P78e?NIurV^XVKp!1!&KP566$!{V>%wtfocZ@FjVa#fp)dyIj4#j*Bfpz z;O9kz+(rx1rlRmo=@s10mI|P7+@mIng#Y@R7XxWl2rSx2+)zn8{B{=rXrAh6Bv;WL z%?%6u4IB>J2olR2L$A>Yo5MDjQ!<-9&+P8r)%L#xu?t9o!& zy)XNdB=kkLFJgvi>3K^LSHly1QenvDY>bE*Zu{&BjF^Sl#>$U z%s}`^i*Y7H@zu{~4Mm5sjrwvMI}$sz(SnJ*1#MGI632^9 z6?!`?&a~eL#XD^%rP#S%71~#^Dz=0Zvan+1skR~16wjYuEgpOA)z|04&A(72_r|Ai z=6o2^wAETRK+3@w?PqSTA!!9P3TZ_S9)uvJa9=}bh=YnVwjS(iNHNByrOtSyYrP#G z*n*;xX%B|$o;d?eVJGWl5zd0LLGQOM)SC6szqsKK0hS!m^DZpF&g=oglqfx;55P%ibSF)B?MAX@?})( zH9XjkSxp7D^!1=JzAL-ph*f;2Bhhc>`VT4Q2;VH>5e zjnayre_n1^aa$pF!yzVXfg6^5!exOg&|=B6Xl!h`f%1}$SgyM-&sBedg|dBS3k}p& zKMd5~(&Qf?#XSz7fxYG-zvRJS0dZ|OfOA2LaZEoB=4-$L*xxpC{0~^K;m3pN3$)U{ zhQRMg03}=NvtSa^Z4*1w*q$q%4^BJU(u-T&6R^_5PQ}X{*nP92xmSiNhtiKDvVT|W zjPTZ1wAw1f@{TwcUDS}J)uq1y8;dj^fG;0icKQYw%L!(PE$TZwgG!` zRX|L#JsM=8-$NC^MHY1TQu{$OyDg71+O+6F|d4~?)8op7NA zr9jL)G~wb5z>~Ng6FUM8wQ(3u&3-B5qFp7 zYRu_##v$E;-C)*$+HrW476y~zV7J6*n|Et*7RQHoF43O!P;oB?(>xSGQRXNj84z@+ zzJc8%L~}U~9)xJgF=5zFVDB2Cf(J$}G3JUv>`h6GIpYwa4Wz=@x09227V(tC^I_Zy z;}eJ{dN}k@H}e-HJ}5sKrdwh96nc?6sP!fwrln!f&m&wwID;NcB0Pm~7RMzmPf$S- zqpeBoP!c1(F_x7W!?0F`qfSSo&P1cmvKAR}u9petB+t_CsTC?0Xt|ENEtoPvTQ6cM zLjItg!M6Pw#D|fEGxHPbM~VC~fXd*#L{dM60E}ahd`0?|)2RSr@Bd|yMoTIFL_QAk zK~CnMv3+24JM^XH$$h0|^&@CceV*V+06$iTvx_%mHycc|Y<2D*u?2iLTiP9D`X(MX z55Oydq&`Jp+6W&VsY{LAU_s?L+oqU?%K2 z`v|@Z_EGyjd{5f<+YjJ-O62HiQ0*hWbr$Y(+qux;j?PtJx`)y6yFA`SSQFPRvDIGe z6SuZ@`#=88pZ>$I{lc@n?Q**A`PJGKhl>ySQ5p}$ZFd1@?~r&s**ygA4>Ox>zGLD7 zZQ@oE3VQ+2yk!Dfw@iEXu!c~qX8}Sv4{XOC3`^W?NRJ)XJj?)W)-mMBcuCIR4V-^; zpRn~H2OzB7L;14Emm1559LNU&cqm^E`O;(gkOTQ3A`j)uBRt_1HYRC(3EJERcSVn; z#!}Eqm@++7FsL=c91;v_i!g@;gIXfYoOdLs z=T>}9yA{V3X7)B`{HWkL6QlV11kaI=;_nwcY97Ns(98XheoMD=n+6ny|Lzr`Fr+pf zys6#PZyGnvo7Tf|Gtr%8C#)KU65|jQS_INvMLwOTCVv;631LldL-u!A%10YHU%HAEb z^ETqh9Vv09`@^y(1vn!e2UXB&I4u-#0_NgiT)&8MEud!Xy@GSF6UX;ayfutAciGc^^UGnF<6n6u8 z5Jw75Tt7+cE`=dK9k`B4I0<_joiJS7iPM#|!pnH7QulbPTmW*+TgBGqLT?ou=ZXzd z=fF|E#IOi}qJ#*L8oETpf_VYUC-mEcs5{GHz+N zbkKZq1Dt_c#O-w8MT9nG`O`tubyTB%h(OG+Kfur?frw9BcJ(I-x&Y`R?OpI$=sHV& zjCZ{#oKiiR1gD`HH>o zGm&JkzsH#<1v60^oeA~#Ig)=s@Jj?=A$XS~zV!i)m==!jb;QfTh?n2L5&zf+IAUHb zf3G862}Zo~{*CyZeMUTi-pc-_+%5ZiA=uyC+qdr7*Vx;TLN=g7+1l+?xWjeaeg71S z2iGe?bo*}ZpGH1h#8CcLe~;iF68tK`*8twX0soc3faguQ)CoQPTt5pfUyd9);Hnd7 zcJ0$MUwZN1ZvXgmI7J>|UtVTkR)$QMGs?k@P#cbnuzZCVD`8Fj%OcrxewJP@5$Zlx zMjomtUdz>ws5$f)9K1X+5-W|wmPcYMeu}CUUP^G3*gO2#wxc?Dy&}3pT^b6PhQiB3 z;T5^FRDYLU`elM&A^0l669juKjQX=o_#*&*3=VELFVw)p-Jgi!L5=FBG)9v#7y!Rk0`JWw8{R@|Yj`7r-gE2h*XIb15Rjhzn7G%% zaZKD!!xtq&?OXkAX8T2gze6A^^c95sTsR=bC|X!6Ijx_MZtdjY3C@*& z%p(5;pq$}h6}pV7$AAH0o#$8Ty)kMLS-3S{AvxbdK#QfJhV#vmIF$3anS@!G?xMV% z8-XTlRQxlifX5#$$>ooi)_CS(GuFKjuBK3w=VDVPYo>}{fW<3sWDOdiA2A;aP1SIK zz)J!{f>Vv+6hHii5*ZjeDUo?l^*s0;z^nfw9&Y&H?gW)m#LPiT8+iC{Y^GEjd1-oN z96k-Y);=EAkVFmlq(-?RATzg!r0`Q4-_V%8zJvo)a7sOec(5IXfP`$~0i`GhX}(3; z5pQDpA$t|swqQ?`7<|1oZaVorpnr{P#LZ6A34Dn#>*BTxrv%Q_Ffm4w1^X3E7mdWC zZ$rme53E$`p981<1;K9;{7Zsk03fJ=@Ipl3wmH%6RDxk(Th*%9f0KQZwK2A5q#t7$ zMtZ{DuvT$zfK`r8f4E-NWpC}%Vt&m5H>D*)Uc&l%)S&ptVBn+a;&?ma?PK;EeH;Pa z09OEQ9d~L+NjK;Qs74eV(2m@!1)e^}Y9Lw0gFSp2r2}s8YYas!q3_+pjqYdGnw?8{ z1CMW@jV+;Hz=i+FP=1?3;o@l6VRUfCV8-)ABVoR1A!Q^6Z@V|-eiF(X-Rw+{KG7Hr zjOs9{L_Rn1?djsnZLlA&c=pu`aG`=P49q%Q4N;&l^Di;XQz%t);u!*lS z;rjqaZZv4u9^(4$_<-?F9^1s?x&MYkjU&Hikd16l$Y2NQBilQMY(gb-2ym(7UIH>d z?jR)0+X+yPiDA9rwD1Du@UX(WMWNwfJME>X`DrC3*|~ogk(3d3Y>*apj5aWyvC=UQ zD}lMkX2LZIDQMtp0E)t8OW>3#?;U}r;CXQEe&mY5|9plHm^2gjVq7Jxk$L&aw|Mmg zEDX+Ytps=CO>oT4!sDE8J6pJhi6NZYg_|aF!bFdp@{W+SmJ-voi91&fcf;_#m)5dT zYw(Vjix#$)y!^9aYw(BXrHizNbUU|4Yc%@@CUKhlbJALFhvt0~rg&{5kJhr+iU>`h zwOJWkwwLSW!?9)W=$EI$_GnI@=uJp_$YJOAXpa{Kh28{m@`|7!?M?1XA#C=DnM8X9 zX>TfOuh5(9O@-|h(B5am_6n$Fx;HKDA%{J&M|;z#a~kJD@{XZC>CJ2$*qGggLjYoD zkaCdU4Q*$}hX3Ejq0Knr8m_t!KaKaxBiszGC}!*ga9n{P-!Z&hFk>fyOGUWTy=mZ5 zr?uY91+Mtv9{Y&*4EmmTbd}gS&^vHJ!y9Pm8OGSeoN60$SX)^``cH2h!Q9InOYA6S zMrL5w5J%qRP8M~N#&7|GG@-PhbWkeDy=K9%%&Q?27egp<$GBX_8xVszFx9uv)yHKp`8^-b{-`s$plx?`U&Q*;r-dkT& zksz#om)U-q;8zH~Nr ze3!!SW%*9yrJOasKmg59q^f~vRS55~BJGYp9N>Nd_2LBrT!T^O%R)*{>&1Oyvm$4F%nTXZni%v=QV$y^?G1ZwnrJ{F@2?aEJ#I%XqJ&6O&RQA2~5; zrvY7SidG;%|pg3}9o zyPXw5qklta1u_rjb?7ZC$TXvfKqwW&@2L#_DkWD08M@IW`;;IA@jbOJ@!3$gozl_jJqz+0*xO$YenbK!=lBt4~ zG}Bfxp0tt`#eC6vP^^k1$TA|}>c+L#xhnFe^9zt$@Hz?Je1?#YNu}OsLS3)a+ZRX%1vI8X z)ttiAkYo*zzk>dzosT4H&Ig~5;*s~+F(Rr*co#s;;3q3EUR+uk&?zD7)#*J9le7n| z@g0oyCyXD_F;`TgtW`hPH^@}z;=Fx&BwB2Dyn%Bxf0Cr5if_uVkPQDK3f-MyFIs?8 zRA%TWK>V35OhXDD)8HD>80X`@3BAY9zOe=N+6vxPRVj=_*4)ny)!@pwaX*r{QiU@% z-gZq7s#N%$9Zf6<3SNLi^cGBT7JkKGX^#B%L}f6s;3aS*ONx~Mzdu2*t4Pr!jZspT zd3^-|Ppb!NC$$m-atBX3{2|1PZ%`Zm5R1PP@98qL2Ie2vvpubWYhJ-Xmd0G;bs>cs z@BSo!oOyStRLILRaMBg4%U6i_eS$Fy%M@{ZTNPW0VJO?Dh+^kKo=lO6@Kdr9{K<{I z=0wVFMy$rL#OyJpzl-q?M$M617%|6f#LloLFHNH9wZ?@5#jw`4j%*NG6#PM@mFE^7c75jWcXPd%E#F7bK&xz zLC^2LO~4*D&akgC5eU4?iI`!|a3UCf?-RjNJO6(tLg9C=G&R|ug9gvr_c{>=h9-jH z_dXFU^@HEzMDQ1a;-Cn6W$-Vci1Y|WaY7=G`w z!BU^UcQ(Lu4oZFJVedLW^1q-yg%n&v;%BE6N;??7;GW ziRh~YI|O$KzD&TQSvc{&ze6*0_77y{u5AJt6WCz*2o(pG}1^KNi(+89*^W1Q(-$@U76jT)wStm zR!iNgG7h(l+JGFkF*X5XmyUisfbaV=Yw4Ce z6B7YccmDadeDCGI|NqPPPE6PfAAV%%*atN2U$wDc3gzc;1uy8D=4utq)m@{ZS9G}> zb+clkWHzj3s*=*<-gMopXDV5=Ta8@PuGr0dC6B(8n{E`E6O{=mXBx%kWM#5BRheo| zSEidYm6_&jWmfvKjk)H&%04OQ8vC0EDhH%&Hx4!rRSuzi|1O?z3+}`ny>i$)>=xb0 zdphn@$$gr4cjh&-@{o7vvgXdZb9Xd%&NJ&rw#+TP@-W)=x%<(!KW?iWMau#AAX*M4 zEsvn(kb4*{hm)3)`=ooseHd##>g{u%a*w)?+|e=HW2hZ-OYWmmdmOcQxsSPzOYI5Q zxTclM)$ahEr3>xWEB^J;)wb(3Z2P&Y3jJEcyX~Glaq{U}yS46z$L&%n^uvbt?Cp5) zsnUzhj%u&_t(DR%e&DxTrR%EN3YOcd>8YS}(+^im*V@Y=Ctq(jx*>*C8>JUpZnqZt z>t5-+Uk$vV&zY;;aJ8+Tz3m=57u4Df|9I(Ic>9>w@2pl^Ua8u0OV``ob=7ViFTK!R zX-mr$uZn$z$4ghMVYuqul*X%VRci;wOXrj?_4B@x=`Q=-h95t7t-8|1JXgGRuNBJb z^>$ZbYtP=Us*n5YrH=AC%5Q}wsh&Fd^ojS*KYii@&wNk@Z+KN~l!)ouecOJi+VNDW z;kVXKl~%*B6P#LDsH$83`g~ihEL4|*h0*i&N0h(f6A3jSdNh6osOm1w*V@g6OzA3QhW4hX9MNcu(aeg+BfYT%E)3_k1`#St=cFX(Y_b2p1XW;@#1yo z`HPp%eeBA0=jz3Ci_zSD&DX9!zXSSfczkZD>o;7dNfvNOJg2t2QcgwL=T*D&QZ-zS za$(y!8Fo7jFPaEdw^egmpmnw3M8HB=TfNv^^4!S0+UQ`qqViV!AoNr`BUFa0;L>*90Jy$6)#1Jzj~kl@v(_VP)vLPYTx51tBg(hBO{dnV24KU;_FJ89=mh?z z7v(zbb|WrYO|ROD%pi25sb&9`=Q?BK>}6jCp+g!)woK$N`<^PBY6|PC=>aGkin~%4 z0LrttMd`JhRkad)Q&SJ4`ol{LZZ)i)T-dB{fab0jEL^;B;T5pi$%UY+fE*7J;sy)E zTDx`m+~Pv$H=7GWt{~x(`OZdUk!;5>V?fMlhM}kNm(|rHsBOM~z@lzretD_tce+ar zzqaf*yg=A5=C))yC-hqz^POKK>3@U4mASRm~_L??*Ng6_Sd~ zJM7}<8sBvAqn+R{cp1RcU>~%K;m+LEmUS}BogC~$#?9W*DwdmbZQN7P!g;p3}AnaaikTdKou83kG+rUK%hJrt2B>!Me<8J<~Ps znP|-pT8Aw;wAejsi*w}%y<42Wz?y6I45>}XxRi`5qC6Q+)u($}-PnSiz~~v&XMrF7 zSz8ub=3EVLn^|1J*71Ae><+~df@>{7Z^sPI_Z#$ox4bV(1zl*vDC8ET$(H;i&n@GZwh zEwV(4vz>O}NGV4ccjaEHsw%_;8%TvP1FMnMZb7gMD^)P^iUajn#SS(-)efS(f|m1J zu3z(layGI8ud%F-<8{=J0YupwYwM1@=lj_9L4tYd;LA986(Az##LB~QM1<8*j(Ru1 z8C;^TG{a8B4|ij~K4TR0BK`{coW40hHku!xsz&VcNB&9Ux_~SAMSz}O*FItPjlS6j zUFzWHwQTSdOLeHjL*R}zQ=KF&d+;DYIb3z{)t25*p=7aCPl4yaLG^TxbEFSzTNb#( zaOpFF-u-kh)k{xlb^1!?mXVCNw#+_fgNns$EZIp(2XCYn6&XNLU8`dR)OzJSsJu$6 z_$+GC5vW=Bqi6|c05stKFfLnvH|{q6HVZV78YtN1tO$e17w0=9fC>gry=ZGJq1!2 zuw_s|Kz|l3nO+uzvXUotX-)SsAPJPymN5n8P|wY2{alZ#X~Iq4(^1Z%T$D2B$kz*4 zbrKheYP*?mqF(ITTlzt*hi3=1UUp79p!M@T{M9F+;xJboif&7Pn5`OALmmW2FUUNz z7!y~>EOw1l-w|7;i_9b{;LWo;|Bave2)YIA{MWAPsAE(rbp{xrcw0gVP&06=po9dak!(VB(efS|kw;AQSt>8x2^C0vT z_NnpbgW6#6^<7s8*-X@9&Mk% z6&wa2m(A-l_}h=af?hCY^b$(owi)5JT{2LkiQtc7PqYfpeF_Co0(=LqK#&7^P=FEW zY$5z{wa*&~Of8#-G}oNf`c`Pw!DSF}bD3)2s;BwfLmGI{P_ID5(S8k_cn!~kvgyTG zcE{ED+09(5d3?3ie9xIxArl0S@1_nA2uU&nlnTPrp5G#>EqYGn^a#O`t-KJ9wbQJ2 zqO7QJmU1rK!fFe8H?mmVgUD!l7jF1)11krVM@^ps8V~E1o;5c2-;YQR9y*T{-k~0S z0~I0@Iui&anovvA_A!F%>mfxl>l(8f&UiREh&uh{?H&l4D{^kp%OLvQ6Lg7(M{|a5$q>t=-vhV%agh z4Nn9*G7Ak&d|VBBS7^bj)x=9iGEX zLl*cCR6?NL)ng)uAd5>Uj0Vr@o2Z*4s%YTMmN8ZbV{91*AzzRv8dB6qXwl%%@%IcV z87$F|iX)dMgpP${M>h_}=t;f!_SqP^pIPv{(T$WUK{Lp)$fm_eOJDN4goQpL=lkHi*xgzWlD-v*-8(9Fi1R{7r*;R^@?dB3imAHi~ zVUGed!4pud2XTK){-9dth8kha-zcZl6L{{DdXlpWcnYPmeyKb`N3{i)5ss?uB;hB@ z1;@G3tv2Ewap=@lJ}heR(=1UVjZhV(uY0Yatz?tYMB)|JyhdZ-8S-TiM4jVf;w3(g z5)m8}0kVds7iY5h?3A6$78UzyxQ0KP1pa~(0Kls?-=?k|lwfcj2~z3%@tMTh6O83Nh-%AE3=w3k1YLWWqgH?`2io z^S#bt%-izd+2J7Z;h#fwKnUS5p$2{Suiy%}84XsN0kaK%o!zVyBg8)Uf|QEmCqh6;cKC(hsmRN1IrgesycEA$HW3A%q! zWfLeDdwDplX{a)mESCDk-UKaTuSf`0Hrbox3jL|x6r4t=nEn)Io^o@&sh%A>9B?(J zd&ZY^wZAvbHaHv8iL)`e7{`pqfUl9;p%zgEMULY(p#Ry(s&(D!xFhls9~$bZQK>u~ zyN{n`|3!k4`*@jEVVsv)692_xiEfK(6Hu+G4#5qAfS>{ZuarSX>?hKz+v_ZTmTj*R ze4OAU!6ykq0@^XPPH>ap76IKk{yq_izPs00{*}dDzAv#1yI}A!gZ1EYeS5v?8D}%BT%iHwkVNP${WT6NvHYvou_I zputA2O`E`-T*$iQFvg6E)=sK57p9pd8}!|S9(7X4ml6Gm8~f6x141-K1jm`(d4S{ zy53kru38J7MirhjJZhw<%9A7G6{VOVe>%!`1J7CM1XVHaar{UMDI!ViT9kbev*Lhd z0|63dLADA>F?sxC>}e2}k=aCYeIjOAs?69&&!N>($a1xn*t?%*kMQ}=u=II8X>%v> zKGil`kFd4KCzCglq*m;s2%iaWkqiF}SL}BoofRjv4E+c+61lseZx)3iMiz3A$>N=^ zg;>ZRM*$RIKug=m=mk{bA1n)OWzs`_kc`!+2TZ3g!i|%65&64|xzx`Q{44+x>StJ% zpnfPJv7bfJ`5rD24GkMU40|4LcOZ?qy@E$K13BJ{S3VZyu8^*az}%P{_!g4*IXwEO zgXOYDRzF-+Y}+M?Gicy1V9!Vrk(Q?VO-?l^cgzjRU*2O}PqU1J#pO7aje|uverZWh zOTwDozdPg3-T}K;^6s3w4{l(=eOL|;^~ywiWLPP>54lIsGbs{11>_!$(ii0TZ}Y_m z9OT94c*yH^+A1soX(fraa2{1!mGhj^g~i2^oQaeW*ROau%`27U=wpZOlo)+EnDOaj z?iA3L2-mpo7UY@6HJXS6RcnuaFXCN#HAi}KHM zYMVMD-J1c-z>Cq=a_Z$U+oKM^I9C@9kZa9^$Y;_Xq0NZfz<;@VzCYWW#k0^L^@*?u zZwD%J+vEu?TBp!Dy=}PGJ=U;BJ){0)G_p54961}#p_h4e_*2LARV`*}J)Al0SPV8y>(|WE&_GSn=R?1}m_1C_Idv z=a?Y8XRfg2A$WngH47ftDa?9go0isqb}ZB%7VUUcb~H8E5q9?ot`hSgx89A?qgxuX z8b?22^ym8f!pD01T=RFhygJ>M)Z>T*;dHQH#Syw>a zPZp>v>IenDY}~M}#TJog5`O44kq$8CPdrEu{Lnt#K&lA`terDI;C!4st!+LYGyYI@ z&7YR!LU0C!!FcgickPNX8+lYtm}FNU;;y!>HH0UjM&8^`Z|Am+ZEYLvhqUdCt8K%o z-LqRqendY+|6VSBr?PduY>$O7_psCh_4*gtA-a4XrAvskhbBS2{X@Vv6%e%NFX0yH ze%X$S!-E!@6Ax!L9hKN4)`fJ*;f>SnI5-a$!zV{0@shHqkv$z5l3&ImKg1QI-d5Ct zWi!gi5$q}$M|tvHKpV$E@jP5IQm+F<1wJ;g$uUO}2hju#Ewi`6hKhbj zl%#Ou78@<9Ng&ZAt#=6{D zs@B$)aB8MV$0)zjXfNS>pQqu1_n6SX#%YMdNN+_tPQt`#0MBx2^(zFjk_h_itdcel z z9lprq9*G_XMwECaQZC>M6aaI9%mHp8$s6LhEIzfNnjY(Ep*v9LlGiL277-W|Ot|4B53hcOa`X zx`BlvBH<6}u?BhYnnec4qwg`8or#y!H}@q=?jE5gG5PQuhGvJCEK3iV?x_Rl>IgZG zkZZ6J9Qf={qDBR*5XL5N7BXc>`rk%pnr!*U9;WY8aAi9IBXruYTW_)9#> zmylE-B&Xy+7snwu*5FB{Dd$U;dY?GsDRIkr zU<{|DuirWqACbtR$8op^Ome~1s^@S>(vk!d@Ue`HIt-bCnN#s|H$Ef>76`;R3or;t zz{wRx9wN5JPoO|jv0on{HV6>toM-vh353l)%95xbVZGl(6&?p{fhXqax7htUft*a4 zwQ8t-8`besDaqvUW1U3N2a(>5%%#TKeFR7XgrS*vk_4cO!VIBpOhFOxJ_iUM4-PlfpJPPK=5G_?e2`ls#bs-V$P(^IHUi9&zmY84 zonweHj^nm#$WjZNQj5fK4)sNX!#Xrg+FyY!^VE0P{6_?TOd#94%2J85whFh$KoLIrqbSilWmq$hoctJv)c*zHmnK zIXyI>vw^y0IOa$mXooZ(jyzKaK%TW2O$?j^r`>97Y@Xg#E#Zr^lHbIKY)u|gxf0gg z{|#d){0ML`nSbJ5ULz`ujScAFtXGu}B^}xL ztPtNb;0qU5V($^Y1W{4ZsVO8GJo%c;51%DT87xs2J~kHQKwM| zwPn%I3?oCJsBXr7K0cCzI1)L1BeXCEp9;9S*N};mHnV2}Y4+j{CmR~09no5gY`XXF zzyJDw{^jqSfun7DodB`;>u8Rxx~MgA_~q|}6Di!KW?0aIhw&MWwEv%>MExDs8lpt~J!(5N#$zZ^-$VVa zQ1U4I-#JRuKj1m_j|4->ibaYOzVrZ0u>JoN6YA%&;5)&@V;KLxiV05mlMldzER-lc z624EuCz$g* zMjw6N2}j*)-~*;WbV8Jt04azQee|DVUyeMi@&Hjb{tQ-aC<=RYXk9*~cI3+^sBc&{ zC-%*gU}pK&&>*QIXL*F+ulQ^_jzr=d<7e3XIRa{6Q5+&Fid>?IpfR=*2BW%;vZo<@ y2sqAgH{y_6c^p4L2NjGdz#}Z7j5gCQ@Z-|>Dp=6#*Jn=59GLNJ+t3Ty`1&9GgyFvc literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/convmixer.cpython-36.pyc b/timm/models/__pycache__/convmixer.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac3549b5f95cb16364809d53b500b9428b769c62 GIT binary patch literal 4159 zcmb_fTW=f372eq!mrF_(9VNcTcAGXW(}t92H%?R6sbWj6VVg~CCl5r6#cF3sEwxZ6Evx`Ui^qmU$|Wmk0&=1M<}G%<>}14p0&uZ#Q)KdZhfoITL0mkuDi)~xFC&D-}j zZf!OCb%`_Q|$xzUuXudp9Hyjf*(S%jS^%|hYz`aFye7`6d>b$b}aJm-0j zqin|u+8wC{G)#|Vk9=Bq;i2yrjMR*cYAjADTFY&+l&6xu5xs`l$P)9>8d6ee=eRFB)r&Mm-&h z(CdZSPM@cBvTQ%Owb86+QLk55_D!)O+3Q7LhN3ojB26-D0h^|Dkf!U*W+s}=#2I{# zws*2@kbYFJci@8Ib}i`l>RE5_eiX!Fu@g>;LmvjJ^Zw%`?)zMw`2OB{ZEZ2jTmRMN z54^^jx10Xj8(ggovYlGyiz-t}j@m!uQCC0s(7W8gNmp@_2l0lp<3_;`dCvM;! zcwUr5ndeEpoglT%G>qHg5|$!Ei(Z}@6@hNh z0)QfNxC>DxY|KtgMPd1Ay7&=>CHpczin2hRCkySqKyGkCfK(~UMl16Zr-N0SG)eqk z2u)m%KxG(HzwCLBV4-|O$u7vka%rks5XSLLJvtMi`s3Nr;%)RON0h1#E1QaQ#BT)b z^rUw|W_}}|k-tKtYql-rMT^_}#g?UDTx;0?WtW!#!UbODOYp0c19z*a{OiI?6P^ErYCJoh>LH8C)&M6;f0&)74)od=|omB;#o-P9)RE81si zy3aDVCk*`)6K$e{zoG)K&=d;$by?aCMG{W26>uyf9v%UH!m2k&p)Wu>*!oY+rz}G( zXS$+V*LcqG2jx%@*b7Sut98v>SR6i`L7sSF92qtLqCYZWVH44gh(Xlb9nl{SQdt=I zoJUDVkXsQ7zoeHX^(2w*gYeNXOtQ$2RTM~@ywl*a^ogGZJI%i6H8{4?Yip84NHEtA zvS=?a5(bZYYO_Tiedk__Q51o;gX=X?ltSAsj^3R+eV$FVYjHpDZL zJ#-WvgQ}!FpRq2|q-y{gDJ@yoRB36cw9%(&W9JqlPm13V`H09hBIHI`PRXvEU+TpD zZ9n$NGsm@AK|CDMX9KIJaBj2F*@Z^OC%zun2{324{uEf#1-eg|>gm%S-5;s!h~JX5 z9~1cm1h*e0@;k&(kgo`NT&_%^eICM_;HC=n;C}i$Ea~ydua}fIeyyZa`E+rcBvID< ze$i9~xkJ)W2vIP)`lzz7+N>rq6#~_g_$h7rE|H%RQK@L&+PwRDo~>KN{5=tMI@Krf zb0Tw#9H~lG43WuG7sO#_&pC!`I<_GG)?9B2HbOBaCnXKtn8ye>AU8mCjKUVNvHmyx zE7DzW$`TLT{xC+Iw>xkYsiwA+K3f-qxZ4tG;zcq(4QwJNlW>nq$NvbTAObBs*G~$r8#CK=~JC<{bQ}Ik8`;T%}9f! z2XxmFlvd&wAXQUAkf3DCG2|xo=+;7493BOkIv#2^X@KBIy6W=m(JlO#B5EPkp*%!`ax8qUr~!6Tdh^a%zq`-@?WLmiGci?EbN3a zF2rdb_;l+NbfuH_J$02-HDBd}GPeoPa1*VI|r%G-Yd%c-fP literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/crossvit.cpython-36.pyc b/timm/models/__pycache__/crossvit.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a96f59ca4b66d9827cc70b2a44176f546ae89170 GIT binary patch literal 19691 zcmd6PdvILWdEb5Pdl!obK@bEfYE8Yk6bXP7B}&2c6U>ExU z&%K~PXg5&}+M?XTRVIn^u;Mz&WTx>XN}9esoz{~{(@FnK|GRBkcjBponb_l*cA8e5 zM*aQ1yDuz3k>W6Im%Qhm^WF2h-}%1pe2;VS#K1se>a!2d*RL4HpBp{D0pw5NaxYkh zp^TiNOl4Kfs+F@$=Ix4|v*jKthjU@cMatoFG#8V6T%Lw1iE1*JlonyMPUTXPk5mS# z>0G)xm>Wbns$!L)>TqsY^6|<@byseeh9d`>R4{9x+k}%I-VOBc(Ss$I+2@@ ze5$gqx<9wS`atdhlrN~X8oXiV4mbzYkQ&}JaUW3^)$UCTxiK}TKBxAoiOmp7`&!TU zt4pot52(+p7dy`nw4OhxUTQthv>=~o-4F7v9(pgCd+-HAJ**zNVW>x(a5=MK=RN_* zqv|n09&14k0&+-Y0m-%?4*_ymJr2m@Ey%;_h&p=1$UWi=tILk%JnB5UVC^-W$JEq& zmYP<_ZiF|?+#$3(uAV@Sc98J&89z>`bUv)Klt{H%!!g9HlGjY4wbhj-d3a zI;nn2N=H%pY4xo7l$0h>%Bknn^HQ2Z=?m(VIxVGXlwMP3)Qpslq4c^stIkR3I7(ks zv+4yYJ%LhQeOjHD(g~Elq&}lQE2Sru{gRO_6dnUfWoC5Ubzdo6&Rnc3r;Olhm%2HM^Ag6Q}asoTIS+v z#VZ|ssaU8unO90KL6>!*<}TEA)zKN;GUuy>MJF>;DY$NFp;RoeU9%YsP+0Yr>iSb} z&Md9gj?S)T4!u|^E)}%OW@a#LYJh=XRLnMhnJ zte|40R9iloS@OIU_vF-6L9dmrP1g0|RAJtoI(}^WiOFLpo_^+;#OHKru~fr&MPRsF zf3f5(t(#0AXD&{)u{bqfsn1VU3#Hmrl`G;-wXr_Aay^ll``qPoCo}RSQ{&ra zmI~LL%)A4H;8g%onFU?1X6mg`-HcbqlNIf_Sd~nn=&fQZSmad?U^i2!R5FM2$B&=L z9DRC?ysZc>SbfM&WSoIiVR_VmjaF6S?vJ3Z%*-U5E<^4Tpj=y0!g7SY<#`C8r6g`$&RSgjRF1?YHm zezjCld3oFX8^Bk-xUiTF`LVOQzH+JHE%|Y;oho1eWS>!@CSAh(&Hje#&$@+(Ep4^^E)&9_}o`9lk( zHAm%pS|k=q+V%3qdhME@kS@V!j?UV81asB9+DX>Zqs;F{g8UfsdxCtJ`ACo-$IXu{ zgUJ@%-!}9^D1PwiDOKU3W;G^X3UJ37B2k=%IlwRbKC8>M>`d#0HjT+`Wv-My`ZMva7v3y zo_ln9@>qwJ^2eSe$xp6$OIcI%mDXRqe?6w~_)Dd>b?%hRJE*os-d- zznK|g!hJ*c(`{_rKRiF3-;U?Mfu39VTEBFk zXycg<7Ovd49{Z5+(tYZ99!%7k?dAK_sSC+m>GXQPrs|by*V~mtXnO3(G$}} zux95++036ysp%Lw6GnoiOsM_Iw~YmpO8t#^E~Mfr0d+R~wh=ON5o}&5A6g8ysB_$* z)(^B4G3W{c%?Ow(ps0FV-$&O`>916iBl zjW9+{)O2ga2^GQGK zu0pHzV|i!~B`=@X)P?+5sk#{G8;M{a6E#Hi6DdPKv1&Cx@se|Om1=*X;zyuZUpub+ z(2LH6m;DfRpKR0*xlUz4pW%QrLC3W`)cKn0$3;uZt5VfZRB1AZvZBv&TBaY*2Z|6T z^bi|A%;YqZ94>bfiD4$7CdAAP)P&$)$_$xffKK2#V5NIY_WG_a-A}gPwMeA!N1En) zbBgY_kTlG)@mi>9HSK1o8TL%9j!_o+T?XAYLf>wB^pRd?Ll zV_v38Zf{u*hlHHgt?Zk`ZIiF zuKDH}-<OJKn$N3x5i~-|(61u#2U>(vbg=2Ph=((T z#y7`GLP#f&dj*%vg^yVwE4^P6R>al$b!6vMSKM=q3VFq?hrjjOnpab5GHwA|{eo*xHY)$$4@ zEtD3gl#p^Z77&;Xd7$UnQS9pSwLFJsE+33g;^<|0%8@BN+Uwe(LYdu{QOBiWLn#q4 zV^-Q4G5IeAT3p|=eT{CF2XQncY0ThqKR_b#zz*aAI1>v1`_T9bB!aS#+6(qx16)c8 z4zz-EDHrSsVa-tE#P7G)xI%J@>r9vB4Yrdfk8*;@cCxcxZxl}7hS(6B4jmVBb za0s6Q@5?~U)?a`4;fGteKGZ8Hf9i(SRL(+; zn6j2b`lJ^oQPm<;lQt-wVj@B8IS6$wxvns&GU+gGYfeB_yhoOV>AyK9_|a>upI{>L zBz9g=!SJs8amtN6)Q_cXG9U{QU?42w)yhg<)5X^v)DMZEk)a!?3F%Yx@|D7Ms5=P` z806b)CX3inGkyyVT+YI4hwCF%igI)?pC4o$fk1JcH0ny@0S&X3RdbNL3_=pAyiL+cTtb#MT-*oYH# zAj#l^Z{1b+J&St(jZiKj^q-_YZ7FWdSE2mnS6cHu-L{dM$-8b_oC;8MrZ?X zXgAZCRkCkZe~4K@_mCN+*S_1q%lplN?#Rs{U|?vk(Hw?;0!0|_NJHwE(I2rih!OF$ zF}xi^D$xIo)HfgNYT^pi2nz5yC_o)?rkfzq zK#=_ma~%<;-(t~Z5=AmAdv0Hn9qtjVb_HsDikjLyRCz>n+(4=(E{Z};7rq}s?4h^} zEeMj+qC{_3=K*(vkQO z#MDdth<3bHUF#T=ls8oOEEee5T07*4z!#}8@tPQx4Ju$E(1<9H!uvy@Pi*;R zI5ltw^)DJQ`oe1`r8Oi7WX=(39chHoI)dF9t#?vJrN6l=96h@BI|yaO!4U~^_6D3X z=;s^k2Un~a$5<)wBzj_h%7Pp*$^(=nt)4AWc16uyfT&iXO{)o&5>#*E9&f;5ajQDq zOiI7WZFAhYrzqY9eL?zt1DqQLM})U+f`PJ`O~Y#dVs^08JNj+lSHFYA4|{c8T=MO5 z$@61{8e&R(PPNOATy|=1UF)y0V6T@}^xtN)zavEi9);MlsqU7Vi@;EE!7e|?Dq-P` zTw3guV#UpS^<}3P7%w4KlQF5r%eaS-ETZb` z8PH$gAOx+^-VI!ZqmutQ>uLy{SY3f9h=be9nw$>S$6VHIb_<6AYy(MO)%;i-1H`shdK$@%pcvUFF1RG z!3@#;ApuB;}xm3iuqb34)~*VpzbZm`y*t)Z3c`rn38)9;qDN03>@>oPHwx0 zDI-*fdHubHrN0wAY1qrA}wbgDM4$^&Cgbh=mtsc5i%V>t-c*PDa2CSrz z05DED>K>T3kS#DkIW9{JxHpa3SRkEXnsTMY5ZMehF@LO6qVO}ofq0HDtyU@sNrOQ$ z^@Yx1gUJ@f<`g$M+*Hnq&Uj(Tu^~F*Vq3TQ_gP`6*riRd>92%H? zHN#jj9GjCe$Z5nPPf8Al6*6m?LjkL2PqyOR;uY&Wciqe(2^oK(RP&CYiuXEdE=ibB zV&89+l()ng$yy=PY1&=E%pn{rsG!{&O9*b_FaWCaDY`>6yMQ-dV9$Rj!!|uEXdH25nq(ur030o*E%UZ9F(T&U?y<+&5L&r zt~!V~X)KPt9?7`~;^Oa=ADu51S8>gPeL~{R4KXh64-_h;Mfhav@FQzi*HJke6(ioa z@hZ>?>Z{NOM0ALGkZ6bM;qy)QyewLYnf@E-LH{O`SD4^vli^#%)tptV^fOXY-X<^a z5+VPH&oDWHO?$t2+&qpASpP2C7jbod!>yb+&AK)OoN<c_;+!ik#o^X;P~y@y_Sj3EP0`$}DVLd+t8 za9ks^bbTI9`^?PTT#Ia74`*Y7#iVEShr#RqEGdJU4ZYa!jVZrXYA z2W*(gh2R;G(9YX7qD>V2v4#kJJPS95kVd@dO*k$ZHuRTa4`CZ=7r$xXS>h(#=X;q; zda3fj26O4P1#b{7;-Z_p+8puF3*74?-!y*BLakwMq`V7G@Gz{05z3^S2Jai_cLRc6 zYDaFZy}j1ewJXNjQ{HGWvzurQJ?P=r?B=fKXmdB9>GE!Gtg)*x3YojRF({!SY9&8_ zUY08FfgKmy%>Z9;2Hah19_=B)814{~m(VWGyr zjx{oCXS=Ole+nz}3GZNcZ8j_(`rBl%BY?M7p$mW4x;kJc(j9u$2n$*%Pbi5R z0i}XN;YP;?ZZuhT9_f+08c9?lKY`FpaM&m>{Lhm&R zJ>0fokCH=FGwPv&{m}q>kg8@tnk}r`tvyC|3P=!lwc5hujxMQhqrXpeF}ZC}yFDls zNEtLm7g!mA=iG6W=ena{FPcY}A8ki(o63&Va}%`FK+Jj@^#pSYXh)P# zyuG0IEsnUIgg(=q($1qDX}|cxO%u#)z^$6`;1>+Wy$MYk?p62*+X%tZp-U*%V#bR# zE_Qi)1!%Oi=d<_L`CRDC$6S1K^!*q9^ZG9!%A+awyIzFsBV7lAmQIpNT&#hF1AJt z!`D%{j+mjuzFTL;vIhcdP+mALFD&7$E++|6WX->h?)pfr<)@~J>Zd#LE;^@C?zpg} ztlwgDvSfhAv&@Oz87futu5hXTy8z~r9&Fl5Ui>J&tyVPDmKB{}0oLr2TJz&#r}7wG zE>T?7I;e>QAgJWq^Oa?+GQ*!Z@g?(qizCTvh%cEUSM02qmUA4o6D@u9k5)Q7q(!&|<_o2xtoTA;1q<>%+Imw4Lopjd3Zl zJw(zrV1P0FAn`LxwwLac8sK4YOI+7b7vTpfL)9>i+L2Zdh=$$zv{M_g71oxGC_Hxy zGmyzz3KNdW#c>$ZrHT(1S`g+8I8TTv5|E89pa}#O4pI1V*fXr`<;4OALlVorSq;QP zS4R8^z%LW!qWA)3X&hi+kaDOUpMpj?xs5tIawLSdlv*T9_lroVtToNG7jfo*5`!wn zS)RQq<4b{tMKy|BuZPGaVw6%=P=J~?96rS4NCWB`Qr2%h7l#bCZaH&c&*^`TQS?7U zf};aAxK{W#aM#&5a-#nY9{B?rXF_1+xcNn0P#`#YcN?~#kao^O98fUOL4!Y7tLFnO zf^Lh2>tF1k1T;8=eK6mmr0E8ZB!m=q3&?>pw;rH=+2x8KfZ* zf&(XVOm>MAuu$D-2aCB5+v{j*%^j28Mu98ByL$u)6kRiq^B-larGAN$?Ag#_LR=eB zzsx+S7}M+VlW^3|W;BU{?2C_LdjPRwdpIcD14GT&n4rN18-(4zL4QQLx4PTMYM zy(y`>Me;dot==bfM92SXcWylj&~R{wfog>QmrVWz6M7iNl zO5iT}CK}|DT^${DyM<%=57{66*MZU^_)7%*!S|H&4gqR82fRUE{{}KWN&a)Uf_=N3OKqd?x}8l<`(@BCdbHE`@41hW>X<&M}eDnYhp%KoRGq zyh3pqO1x}ETANe~KXX-$M;akncE*kpF$c z{{fQUw#8Ho&e}!q3V@0J7dHPPlfOWM{{Db@dYbU?A2KfqLk5s7+!?bY{b7mMSd9x9 z;N;89on}HUDBzf1Le7uCA6eIq{tpEI8I%9SM3DO`bFVWYmfEPIW*b=3?<2R5s2W2C z2m9>cjOc;mvG`CTW+x)CJ(_ho)Eh#9l8+X$oWem+jT!x!(hg{*v`|O4!JpKhU;#^7UHf`uelmig0CnaNNQvP{olJezFDs z-wt2M$@fa`WJmgOj=V@Ha#W{8J?}a4*#@LL4k1APHLT@`dytyjazsz#Eo9bkzS-Dk zkiuaP3CUpNv&PkQOQ%DS?a$FE6xO!iz&9uL&jW<$MQGW%?(z_q93aLASP77==ityC zK0ZpPzQml!1&RUv3=_ObaM&N3+`nahhKVSUUuSNfiG=W< zKn`-?e-Q9Lk^JyK{uzHidLF?4Md1II3OIxXauGzp6ktU`ERKBhGsir?C-cLS??bcZ z0KY}diqr#b96}^?)D-ytuQ(NSmOB3U9-LBM-VvwA1!X(q6w08X8Ds>0|C(qdZS|s5 ze@G~;pMFfT+xK_CB;j|!P+@gCr(wVfj;US`e^DDB$lYM2isMLI`4?Y zW5mmC2-7BZLidD3cKc6vKqBFHK;r*97Jmzq+Y*ux79T+|U~!`Jj+mTcrQ2XKOiae^ z36t#hdplr~@H=2~_%6vY5SA%~JSZ#&`p6N-+tgqmIgWfZIrcI37FC7-lYhV*5qn2; zW?AVr=!_Gc$$LU4yZw_L&`J0m(Ag$T>^|UL7_BM}boJcj;T>pTv z1%D_R`XrOP!X*7Gx5Fg<#D$>;kf6fHl#}fCPtme}$02-YOnwPZJOlgh$0N+nE)LR! z>#}DbzZcSE1GM{nffex64C+hH`#uJM;b?g69h_TQsr4@UQM z4=4N%7#+DcR)-d}9ixL__Mtvz-}SMWT_=bDTt;B^kVNkit%p(dc4!@9ulS=Y_kdP* z`vbJ>M=RlXKx>y!t&hX$u+I@yob9zq=>TKC)&TG{P~JD^pDYk6KQ{U~bb$2f!5-}^X3028oD_hE1T zt}B|e*}M7wl%~JOk9gNtaAHqeoB%yweoPMQPS$GT2J&~ci<8}RW%@g;XwLEvz2IQ) zB>rNKAD71n6}W*9i4GAw$6pkgTbDX04H6)=#3J2m7bW7k$As&kq?n49^?L;buh%Nre3jW** nBDbiwb1C>kES8xHjXXYbaAbI7E*7%P{i7qRiQRz4?(X_O^)|}n literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/cspnet.cpython-36.pyc b/timm/models/__pycache__/cspnet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea881c2a45360f0799180ea980cc677ac26cdbce GIT binary patch literal 14814 zcmdU0S&$@0d9I_6>gt}JV`pY}#UeGjN3b({=(f=AY7gC`MOqLvf>upu^=!}dboXY} z>@n1sV-}FX!zHjxNPv+!OoYM0cK87kL^ym9Vf%rQkFYEJAc6il4Tgms2a*t zR?DoK^0t=jsx7h1lD(9z<`B1A`4y+?tQ4vR(@+_eZMiGOYEk03R%vCVIvs!Hch+>+umh$QoT)0s2!V` zq0-51R8mV#z2H=*{hj<%(`sDpRJ+t})Z3{ZS5K%X)w|TY)qB)+>iW&Rx?$52x*OGv zYI@ODqeIm1RW(&tzFJhx*Fn9cmQ_ccQGr_BM!l7Xv+A6BpITS%S07Xxpq$xshPW8T zI2KZFFRBl#kE)NWPpW4@_vS6!4%2;BeNH{6KChlvZ`sVUEwy(uSKXyvP_t?dITBQC!9Z~m5>N=#ptd6QS6VWq;5v)YwA%oFR2-2Jz>mz-TS6# z7$+~B>gf9E%F&=Z?}x5?q|(c2u5?@eir)^su-R!>I*rOv-3fxq6QQ^0S5A65Y)z)DzN&zOuGjT-n$`}&G3SJ#QglzmA#eIVb~4so}2UZ`R3W#j$WMe7J|9`ckSOl zyMO;(2kvs+$Nh${eYV7rsGMzj6$Q52KDucvRvOKgA5_q1ucgIiczShVw%%Epd$QAB ze7MB|OF@1Fc+g5L^78Z~XVmk1qV= zQ0#D4+nDCHQS`flp>LnvFX3ALG(KgvQJ-;RdsVmMVtaL^R_BNMLF_i$-PN!bG}rvN z(Cu_uNz|$1YinJ6_eisi@91_~BKmO#ve=I8AXKs2XzC!Wv8ULT_GI8@Y+VLxdbAhJ zSen{F0>t^{nxBFA1YU7&`JAU0gKrr6jY$6d#GLX%@4(#J(uKJ?CMcLYe)Q-&AOQ#F zxCm-1kdTfF<~YQi_WeiZ=fdX7%ACw)Fjo({kjmNag*d}G-Gdf`5(2}r%$%9QpJVE4 zkX~zDMp*T^RJ`W87eZpe-sTEc_};W}?YFQJ!IA2mYqvU{nmfC1cK_aUK5I$e+7HnE zZTo84+Y5BOQ{8&!9kbo=^o%Lrx_0I_*6I1A*V=&tx9+|D);kw&y<`7D_AHZe=@oqO zr96T~Jox?O&`d@+(vwK(DF)LBelrZFSaI7y3`_06ZMW}L`);e>Q9rO>hQ`5)3c9_K zi%UZZwIy|KkmA?I<2_3G_NKSc7J`0nLeNjugrLw*UpkEMy+mn@?G^9*OeW5EuoCL0 zgSfct>o$-l954>Mc$9`@OStUa(by(4yD;YJ#=a& zulJ%P&MKd?GRJtqKi|cWK&Ql|g-#f@{4`ZuXm#q#y&|l|AgCqF8-SPL0*|u+b= z+PP!^3(}r`aA?Mnj`Uq9NxI1`>dtC6(04QEw;0^R;H?a}GE>$DHMUX4Map{(t(=1- zXSX#!XmKW|uVtIpF}R+=4GeB#FoU2@$l{OqK=3X_5weGGLl^MCp+G#v*e8E~TMb45F zmBK<)h>DSg5_{RwjmU0W%8Ig4PGvT88J5fF=z{GMl!J%oicWt3A3{!_iaA2$AQc0Dfkwt66gt=)q2ipgUmmblS(Y?r6v{8uuZ{e(ZuJDm^r-^XNG)JnEf4iIsCe#TnIv zLdh(3n(di#>@?e0Y+gG^_sYo7JqW*0{idthK!r zzgmD!OZ1Uwk|dyR?W=6AoDF0IlupBT%rcZh2})rCF~^*gxFbqpmpN_iGIb4fbrSb} zZW;+5Lc}+)!7-|qGO@j}6dpgbl*I!ngCdu|hWk;YVM3Pj7YkKU>KBcwBbyeZTKFgs zr0P<^$0INk?-LYg`}O6uN=3}Zo8LJTRtDLX`-lu~1wWCV#e|(796B*yA4yrG=FQ=; z@dV#PKzkxGHmoIU1EvEP7#2om(%8T?FLNR%(6Z!4RBqG4QiDBHbESrbmTX!xFIav0V&v!-h29#X zFGVHP8o?6f0%M!(TN)N0LkGpyBO8&*)91&p?peiw=o z+0KfU70c^@F0e|LR^fpp>tK>P7#D=uE{(?wdOV13GJ)S}#5q|5=?Yk2y8_Q-c<{mC z6>VY-VEMUe6lSK3gZ4<;X3zPRrV{OyuO|i-zN=76482-yuo!d$)L+6AQ1u&j*&H*+ zAnJR}Y4a*PIdg4#izLqWMxk%K60xW_$!9>bf%v^-MFuwakpB%6n{!T_IS-Za7|LU; z+GZH%)_kb4nr<=6g1XKnrV}Qoi>&RdcAVtN49Ln}XnzY-0ed#gwQIIA`6}AF(1Dc8 zpwDfHL}ad$SG*zq5VIw-AA{gR2fPJ&&_rpe-H&CY^dXdH`gvK&doS`tPoX_Cf-aJF zv5=$x0suURA~w&*PH?)T!}@BteO&U`7{X*$-v(iD z6#@f@<-Y3Av0Za*YU?PaHD`*HZGt9b+pVR5jd-b6JG1Jw(vp0wraE;9Pcny(`;WXm zF5c&b_0#j%@f=VhO8R|ZK>s#^cqA3Ky5DN`M2vS8IdDEb z;@DDBM@j$36~$lh0D>!GZ3cLoX#h=uwAeri(gJb>X#pm950JJP7mi``DnPonqqpDw z1;F~sq4~P(AHYEwPajv;khmhQmQq|Tzyu+#jtH)n`nWnWgsY{9C^^8@UYP@obRxk> z;wcOoqgIrqm47uRCRi{Fz7o5F?Dp)11?FkJKpZAcGGv zATH<)1{WEKAobxuf0%jwu6uJWAV9hMYdk|BP%Yijr-x+e%%7-_qBH(>;-TCx3!^X)p&I1Pje)&st?$5QA+u};OR&W7 z*1ykfPBaEw$im)ov6*14oPr&v@-Jkg9Vvb|(b)QA+ZBt;LI0DeHyMo!o|H=PvBK`^ z(H5d{Rk+Fkjk{Fbpp8~KUkyhhsBCPf=GUj95&bh*c~iR${j;!~#5PRYbL-QP!qU=c zI2KKT^D_FFh%(#`HL}W$reP_Lk|WtFO{`CziJvqQPS_*3DXI);dYp`-Y+O)pYyO0w zTWNk^{=i~8fx(x}zyR0;Qm_{lxSL~nAL5dt#Lm+cCD|J#DwgCkI}+@BhD9Q>Td^;; z;nPbuFS5Dd?iU*??lZ7qPlWy?D#tmoKLZ$sUVG8k+}`RZSpff-hWCLrLX-tf?v-i_ z@#wa!nVs8lbnq#%f2YhRHlb|ZkFxP{;&Me$4ElZcYX+u33=?mFLRbu`aSGL&?Qyo132Lr{s>RN(IsX(*hj&r zGbD4rF8)X3|nfXK*W#4VawrZ}-6f~LCA4hLTqDV1}K_nNdkBtV3Y!QV*8cN zTVP3XwAUtvMzx3F&Seq&X|$9q0=9W2$mVkSA)dfRz&$eUwy|q9^SR3UKh7F|!R-iM z1=s*NpSBucxo@jwxjozh)rG`Hlf#0wGSv~z3M%6A+K$zI^K2a5(Nq(t{u@NLL3Kf( zx)4H@LIO<8DvhhsQbu7`}CM~nT;%(Zh$K*>Opupbe0MNpDy6D*oV(i zsDY2cNE-!wek?2{u@pYXAY1Oz2taidy|{qZvcRVU_{>FPfW}fEKFjb*wVzD(Q2F`9 zoqhrzlwjafh{0udIpo_)lq}cL1ns`e`V9to2EW5Vq;&v(k1>zHmPqX;V|`51pFnbE z{B`3M*R1|DgJ&6hhQVhUJjdX33|<3LrYYciKxkNi6!`B6Dic-);Q&Muqfq4J)%Zai z;UoC53z5sCMzTAzlHuTjh05tK$IC6f8FD6K4o7(;n%g=I9)j7HUdrnRvYkh zazZor$9Uvdr^|ys3Oy&)#uJ=GrOFb8_!Yr|g3_u|LEC*p;Ba5%A^#goI6SN6Jd{v`6<-3DxvV{q0IiXA!^xPX_l zuvDC=a2MRhpmp1&r~o?*OA9$>lm)FTw7FibGcUu@i*HdbxFjb%yji*8_1a5ZjWmm<6aj~qY>yVLNu!&!(SfU6Mt705WuxGkH* z*j{UP{AK=Su@RzI^G4#KR(uW89V-vvPn;DG7J z8PF{dJ2>bFx^Tw9idtRKUtnGi-Z~$yx37`vj}T!0`VAHo>#JI0EaZe{1e#I~Gm~Vw z?{(urieGi<0heJ5sw2tpB^84z&P;KbQtO#iL3Jw0*lKA7Vq;ca&OlOAoa|LMA?94j zm*XSJoT%z};?)y2$MK+YG^~=?N_*gc=2JFjp;wc?GJwUwUk-rh2;ed0z(uEDLd`m> z_kIKb{(`3wY}2^VxAchg{nB|XNypFoPNO73Pwxn>;J`dDK?A#LhZ<33T*N5iGRqip zO1J_uf%k}PqsmwelbZc`AIDb=ZK3+NAQo^!T9E_OSy%kjCus-W{eN~@fv$1fSExLQ zn+kXT>NgVD%=}}gj^AB5g$oIO#lxeTjr1M_?|9&vgkn<})X6XGT+$?b`NgfcWLy+pzMS+s+zLX~ESV$iZ>Xp>67!e*UUHh4V* zH!Xrn(Dm!QaZ=}L^sF=`R)X9S(1$TSvF)8*q-vMn{Hc05jZWju4Xr4PS1^Ymz=@}C zfK?mz4y+u6uz1z(%WnE4P-;PQl8oSfNmJYWIv;b((IEr$b7!JAskua8EffPJIp zM58LV+gKmNJCFBxLaQ8DMQ6DI1wfC(v4!gnXki>y5UxGIK{he8DQ#g>Iw70Vr*X!& zW1aSXX{j8JMmuPuVOwls16taPPd)7&?WtjN0X|&+h9Kt&R3Qke8CucMs1!E zsX0@?!9B&+K;P_#D6FY(0#QD#l*Bu*FKOb423MZo}a5%ML zeah62AU3_UGn!i3#cO41a?{3X;8Q8hZlO6ZG*^*^ibzs#Kxz+D?R_cj8-@0IDZN_S zo5HoToosJwjZc|pt_QD0^!^{`Z|rk+6*#*lno_%@o$sNA3g;5k-%BmXtLEjD_tF?< zU1S6Pp%aYX&w$pXoUG|b7<-h#J_g*_=tBrV{!PZe#o$jEh(#xEKPs2Cyx?Cj3&KGg z^C-cw%^A{D$E3GQ!V6toG!bN$^TK??YuA9=aR!$wR(ZNj=!i@0PCGg9hTQkXc=3Ic z-@5;YYZnhe?a-XlyBW~ogs$cD{*)PqnenE?>}@cE{p&wx@OB2`;N-!ce0N-G;5rX) zmNeT~Z!a;wZwbdZ{fA}<^gEc%y}j7M;19YatruwO(Ky3vHaLZmv(G?O8}9vJ^}|no zjs&C2UULPqD&g*b2PZT}3d(5a%v4WLE!{v8~o&Jf{6%3$xV5J!W^N%xj??{!0AZ&#>#K88jIDJ%fK>AUwU6Bc%k& z$H)^A%W*PriIvRBy*RG3o$QJJaL`WX@I8ztxDx?onOp-{(1495n;W-vUJB02cb+klwPJ;eg3t@l+!;cpD=!;8!uzVSvZ( zT9oL2MSw|X94|Hl31X%&wv87ddyx!q!5EgLf7x!YW8-PJjK7X zTgG2UwCP}WxKs$s9VNgN4F&V_E5J>_q(-`$l{T?rR-pCd9H5Zk}T{r`0{KV zar9<6i@R04>8Ec5Wp83>>41$iiw?|W-tD1z7i(xM%u0p7AYU0&F)UmiWfOhQC=z|n zxaFtRMc6+pYltJr-AyR#@$xL=W-p84Cud&0Vt(Lu8D#bE4h;{}eI__OeVpK)d+YE3 zB{)EEdZx=DJFov8RQhHF&@&?V7}>d!Efl$}`3EJ-plgF}Kr5h~r5w8)pl2LTH2NDBFq_#i;x zok<~I5+4NrkNMs@)cQ#Bjlah6zQU-P`o5$)z9c>ft~?o;Z6fj;;QMgG8eg)l83b1r zg)7R*6(uaIZ((PbTLIZ2VW#bpY`Jbn49v)C$PR>jTMQH;^H*dnn=FIN$#%-~YgCsd zbNT8b0k84pQuA`l?l(6mvevVM6}~Ojo8|0anQx2Xy0@&8!<-O_3HlJ_=I7@Dq>_}< z7m+t(p7@3TZb>e6JP)$ik;BQ^c00NIwWF7uZLGHC@-Qz655>(Bae@C2W0wDUWAJ}G zux<5Q*f0iSG{!mEVI~{qpuWSh?+$H#KOLoq7OZ+b;^wC=Qb73tc>2ii?TCT%?F1EdsTXIn67vycPn^11g mW$od-u{?HO(-SvOTs?7MVvk#yc*rf_5DnaHd9rC5@Ban$EzTkU literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/densenet.cpython-36.pyc b/timm/models/__pycache__/densenet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..716a407a7d45c4b5842dda3d4c98997dd33e1c19 GIT binary patch literal 13075 zcmd5?*>4=ldGBjZ4$0vmlA^Ry?|Ki7Ry=jBWbL8TyY`B9ymZzW@4B}+)k6+B(=)2> zkwm)352gSo>&VyuL7X6)AV^{ZdCW@?=R%OD00Hul$9f8omk7uo5F`PT-}iM-&ybSV zasqEMWSw2dS6_YgT~%jCM~h2eIl1v4S2XRPwV~e#@^2vo|EO!4(5jlyh0)ZjI#RP~ zw9KlhQ=QecTA6Bw^F}jU&2qckcD|bDTwy!EU9669zO+5MJysn@KGQ6>CaM$ME8CoG zO;x8_)75E|b0Xh7(weExaK6x-ZOv8ZT1Tr#Tl3ZV*0Jg_t}ix^x1Os$hy2K8F0zFs|z(nax-c$rJDqV$@0MV#W&8I&%Gia5=s1(Ys}SH&4FEu!>Au^<+? zw1m#948UORK_oPpiDS_xE4h4coG|W#91p!1KM(ZnQd0 zujTon8#da$eQ(JQ?|JS{P+Z??1a`gA^lYTAU2AuC?RMSX3d2rtd1-005pH!i7HjR+ zQYRjB>24#SAzl&5*o&RrSL{>Q-n+4IcHw%{?FL@OzK3qbdgHz)?45hA+zjlF8wBB& zYbHm!!T3fcR*1}bx#%t1ZL(lP1HuLKT7F1UPlP#5k++Hief}$ zL^jkyoeqsCd|Je|mev~oVrtqW1ntp3QItcpiGbg~zNvB>janCBZ>$bTyV47=ZT{V5 zRU87VsLZZxsu922a=6+HRI%ZAx}g&^_B>S}q{rEOO}0Bu2TL7o_*h1#-DKiXR?BmJ zWd@;8*kc)l4$UK{0Y=4?Wh8Q(_sIb$NAlDZomcmKScdMX*FDZTnxky0#QHTCFACwP0z85|u^!H;XZY zJR;4=>lXf`jr?Bo2~;ibH8!`xmh1QFd!f;CH@$^qXz4XQHvl}Mea~;UU9og`d2wao zo=3d~Gq0?!IMQ7>zwFg(_4BpGPPmnX< zh%!G|Y)c$1BK0BAFI-yKSiN}hVr|(axPQLhtII13wUw3Sv*$0a{QSLFF03rPCeEE- zanCRR4|=bzEVviWUh-;}E;7aoYNSsJz>6vk6@)7HOMSErHNJ3ZP{4`?1pqiGtX?>$ zCiK@%BIgNW6thG&wJYfXdQhlZblxF8 zGIZXNW<7}6?Sd$X;sXtWq{!tF@P!hWOG$aOUlwDOtCqg6Sz2`z++ti6ZxDli*WLAG z!Xz~9R*iTD{U|LW5H|@vK}6gp()NvQV;^iO(zXF*WJM5T`$k<)YK+j@&O~}-M#i)T z*a5xBJj@KW$f6H_Jt!c*2p!wupNt8s@_WzE!~K2rw+2vtE4Uz89As@(EgZMo}s z^?IY$fOM_olofc*x|$>g8}rpZH&!E?vV9K%hP&oA{CELHmMBfyCV7bRi5YHp*O%3*ZvA>?0L2dCPVx;Z9ju6<0LTy`* zp!Mw=Jp&s~0b7KP(|cxQVhfBlMmoe{hWUpwYi=mdpsRcp5perOB+9rSKm@jZeDs8q zu6JUi9fnP)AGIB)-3=jQ;*}7`=jEu;p%!%?da4$XaL|mBUe@>Kh8A<6VY(t(4Xut> zbR3!cGm(Bn`)~~FF}5LTg#Mm(C;Ps}dXc%dehY{lmEN6h1LD!yl&*lp;x*{^RW`(d zY0JYvPPqQAbD$4T7mJsop4IdE-ti$&A8UR9TmqRwK;SY5055ou?-{%qIBAjod&Uom z{>`--n(&X*1X?G{H9EDwqHW`d?qhWo9lDXcg*qez7!q30fM`^%)@+j$y^ZRCi9z4H zFa&VTZ8n|S7PhI=hNO1}xL&-{Y~yS2#$qxAlq{pxfku(GPVgt+ra8aTpSTYM5v`hk z`UGDlkeErDC3EkkX96lkCoP3AL#yUNm~H55ymTlVj0T2_DQ-Y^xq4edD-~a-j*Jj< zZ*pkvfZy?+|JXtp6M5nFT{D|s5BCpe3~ufOTyT(tnAx_-@*SJ6Uc*W=?v{7lkDD|5RMs!}6xf|_h}LiAIGoA+xTvTa|kQ9B}Jxk3@K zIcA7Tzol}-bHPL8FQK-Q8EmZ@?MUx#%5&sRd{-skyEmvhxcR&%-$tuo4w0sp^t@pi zWiZdYUNVZDo7O91BoAPfcq9*;L>eGpa-2I|w;6wt1&qKvIfk1S$!nME$f<1ChcP60 zVXEx3?|PCn$V8cCK-6f$u;gXf)>fr;#Ix6_gR>bwi&V$>;L9a=x4? z=U95BoB|D)&r=$#BI+w0La&-0gtst+nW!ER-B3NUA}exG9dfKX3o{Ah^l2BO=XoLUgu!3#LS3S z1+?`V8JvOB;s{)kIWZ$C3BBG>FgqkZ+zqwdE~3`x?ScR z%`;?r*r%Z6RqO?Ot9{Rg<71cHfAH@FcGxDjob9>r95TnVPkprW5sCed3p=Tjj9Gxa-+< zc-z|225YfFmK&a=xndpFCfgzgHAsiEG0>v7XeS1iecIl!`#g>y3AenY`=Iw=X{m8% zpJH>5mXjzSHW_ZVTL-S<EtAiwRAxPu=}1kS{i()NyD`BVROI~ z2=CPGb=mdK*w82fd!T*;n~V1CE;bB?sAtCm*r`2wCLP+gH@eW}n(cd@T!de^?gGn~ z?bElQ-g~DP?e{zn#K`;nbV|Fpe32;)&YGCgh_oKrAo)im%O24|dqjHGBhpkJkzy~$ z5wYWv&2eSIySv6JY8mkaZ05Wo>3|dU0#Td7kipJ~4W{xy@;G^ijc4+o7 zuq6^YC2=I3eYhvv0Ms)a;q}k(y0?Vk8)Ei%wBI$v+(UDd#x99`Z!#L|P2K;8aFkRI zs33m}6=b@1Bpi#5L{lX409P>@5l0{9dNX1^nikfqMx*p*AAXejV`P?ZO6oyk0U6p+aWoWT zZ!(;~_bFk_Yk*-+9Diu_j>dC}=LT>@N8{P4U2p0W{m#ellYj z=SfNP*OITt`0Ky%*W>h6SOh~_eoo9%**}rAKf&#Pa`^iv#+{!eit+Pn$`YQ}VaXr+ z+(|)zHJ%E8N$NlBw0v#Lg=lF8u@5X8;(7vB$#?KoWqnt|DJAbwf!%D&@UU@1?jGwH z?1oz&_^Tm191+6{rGW>DGuFs%-3YgGwL( z&Sz`o)gm)r79@LxbP9N+SeXxK0M_G4Jy%88sfmsky9mz@U6K%3=gHwlOL4;8j#oJ; zXJ`PnG}xXXsUGVgtiBd*yZ2+YNfqCNHwm6U+(b!8eU0@hxlOZL0ykBvlmG{BV7;vh z9v1GAAc<`h(#MnuCmCxkuv{TN`m2$|PT@7=TQP^A<2x3Wi}4gl@?R*6)Oj_&<2G6l z27Eu|HXW6XuP9`f+7uwF$utk0ctf-AHhj;Om1D6XO;(R84GVR)hExH!S1rZ}<3r^e zKKw+$r2KXC7~pqcl2uF454l&{Z|=fX3KHeAnxK^(v^?VBX0C2d~lL2^P2!I}85Z+$6?qXcV zxdbp%{t?>7=Zul{bay5@l5zwU`({92MGIE}I@at*m}X**7o8 z+%($2B~v83KKVT1$8-I(a2M>f@*+E-Vrx9B4C%r5-fng9x%_iwj&)g%A$7Uq@4Ag0LYg;m${{%q`z;Ky4uLvZ$AbafsW?F@BI)){#I^_B)2orb zaHPWY0QUaM>OL316E_6xB8d0hECz>;?eG{2$Loh) zNKNHW5rKs16~Q5V@))NGg+xwT#sIl--2u$eb~5IO-2-z4U=D_!$lV=DM#uikV*ZYf=q*nNsds9A&knupza4HAP=)j_+-uw zd=`lqY48IWO=r+tqI9)ywi!{RP z!&oG@wWQi7V=+0=S4)en_dEwIMcA{T=9ETtvzOhJ&5Tr-V4&aNK07dvn zeC%gZ9gXnzp;JtCpNXv;u$2e4vWKu$7{V5w)_iW*dI}6UtFFh3kkC^C4S$qa{i2HPV?4Oh&rVw-8v1!i=zO>QLHQK{VjBq9=1$Dw$$1yV#!pDe{LTA0DVeA*Xk? zPTdb_wKO*D)tCF4TKfKO@J3~*i#$bR9^4bjsD4a#v#eoPQ6g}t>t}C~HBf})-_S%) zlHD{+hajh=YF6GLWwNX@WGTi7TRDVh(KXeN zuWVMZghR}B$ijSrkfII;a7;|2Z;w8ckfITyL&tze)%nJuqT{L8Q~uA&AJc3J#-52= z;Ucezr`YL>DV5y^bC{o>sXnuZat} zfK4Nnjc?gvL!GFNmCM+Bn2#T!%Uwx#Eo9WwV=sr_L(4yBui6R532l6Z_ad z`2!lxr-)skWNpRz>+_T&O_UWW);c6fJV_i!5<`m5Ad|*Gt>qlu>#O{g*lYI&Ei>3a zWZ{66@Cb)oWJr-KBc4N=HH$@qod+xMP(Z};$AK5tLGes=Y_i-z&9#=x(D`~_Uzf0~A&j1@zf%4(V` zrrfO+OYX5od?hZqL?hlvuB4EUHPh>vmCU-mVw;AFt3)%qo?FRDKH1E#7gh?APc=u@ zM^{FXPp_FPN7MyntLzS5G89Mv2{%_Hh8YR*PA$5B&MkD}($ zsOAJ}9#fB_=JBZJBx;^eY1E{nnp0{~J*l1oZKvH)^@@60mF}3k=E@nAE~;nLr=|2T zN|)5LYC=knpmbSHswpX*MQKS*s~IU3QF>LC)j26WiqdD*ta?sLkD>IMnp5*qdK{(C zsq<<8Blx_Vx{Af->CbVa?WJ|m?kQF=qYq+XWNQ_6bFC@ninSp4E8*WD;yac;Z1 zxVW~a-8ILrw_8Q0rHYrXEEaD~6|cHpdD-=C7TdbMR&P1YqNAJ*-&Mt$Zm$PP-O}{?jRh_0=e8s8WSZ%l5;v#6M`)<|W)b4%ew=g|BIXyEow_xv4x0Xv* z@X%{-EWUbq`SP{OrOQi;Z(q4qdE@fpaxi{R^;_32?ScJ9$qdFW@^l>-cV^+H^dxUaPwrl?AL&-Sb@?whv-hmB6TSDE=&5SzNE<@*Y9bp=v|* zR6-?v!#7n*rB&ubyJLQvLXcao|69y3_HY?{|H34NWDOU7|Ne52UadFlE!WZ8xmCw= zD@_W@8fsTR5q%9kgvBe z`Hgl{#4L!fyG|>JdAyx%kQDHQ+v;*@fHr#!XYxII+`i@mAK7cYLHJY6nNd05`cy6azWD{qQ?ZMR-s zT%Pjl>+4gp8s3y@I+Gi>gE*Ju7>4usR>QK)q#4Iw+SF%I-kzfbLx5Ft@^)>q-e$k< z5cPFu&270p>(0$itUB}MYVG{o|CWfQC_X7=gQS{9AJCl&^@=% zRn5=MtUB)e19hF7X4lp8^V8MY=c>~6u;k87Pn2iRpIfL^XQki#A!mzMPN8#%&a@TP?mSL$R7gX0G#Pt^|kB%y3?B42*Gti1ZpA@Q0+EdC?+JeeXG@MJ8J6Y z^yJKh24+*{qI(6hH#c3G^(NH$^J;azwjl3o|3AHN%O7~*x$^u%d482IEaifPXhD#| zjnW*XB?AeQoXDEAnVSxhGU+Y3gUix~+?JNY_VCf%LXelHm5AD)Fj(C3tJK-42_{= zH3zIFqEVB$XK)d%JdK2y+c9>nhP7*$h7XsjV|MVqPOKAG7OvRcRL7_!@Fdx>cG0$y zLOxB`>Mlze>0_~vN)Gk02l`|&o>9pu1M^HR3t}#n5<&Kct6Q)tg2zeE*LCFvsSR4u z)>;r>tvepLSr5VZ8q7-y_}z4>u25CV2MKQz##@&_Nsz8o>aDt8sRUN56(lbLsLLuy z5U^*0>?@96y}sPm>!^#Zwq}C(Rrku?5en=wCme3ibYHbZ;rWR4-jUzE& zapIP3;*Y77#@EOj2fOt$8_8l&HqPPlDEpjVAuIB7u+~kZ(U8_z&^22$4;~6M zF9BZ#9tt!sDVmp}mlEVjW83$eu%y)++v8BbDO?lp7W=h(;tG)Ry$eXH^kn!;CBBpvIoB^DA7mik_o#Tb z3h8D%QTJ~Lws@Gg>I##v*XybhHtz~sPc3BHU%;3itEm1-^Ar?e91swPY7}s1e*4&v z(0m*g5UqbiEHD~f3@{pyfe1a4ABPmFk3n?4!(g6^f6Hjtm5WOgi1No=fhe-rh-2L zk0YVX6YxZKGHb?;jVrr#!5{5pcFjx1PtWe;I(FlTjEAv91Ot90F(wdf*EEezR-R+r zU06udc<+OE474BX*z%5}lFv$h9J7q+|3b|}90@Z^-KBqWEE*rP6@R8X-{YvyN8Zl~ zP88^s|@y;&>SJBk<`fObC&s*gkaZVd3cz3*TW&$}!v#uOeY693O58=r{Oi znaSswP~wAxK-5Q~Vh~$*wt|FqT5E2QbT&5J7J}P1#OPi@=pJIFN38UM%;L?pHxVYo zX^Jyk?koHNjOP0=s^8)?1m}gwpJI_-jDDL5QE_F20l7?Hf@x1Xe6-5sB_v(N?4t%r z|2i&@LlRHz5K+yfb;2CA=w8K9o5tU$AZrfK$M6godwf{s4ggsiD5?ap$-}1M1Awf! zzI4SPy1TKRID|H_b=^<;G|5zj%q~nO#P{kKt(_#U6ci(=tQ(fs#8X@Q6ycLb%kE^t z+;L+k1A`8s$2}e03EN>!0_ubug!}3{*g9$CsF?bnJFMq;|9~`4r~M>cgBIQKICJ9I z7pP!RFV1(Q2mYh?1^+}O*x-t>_rLgQ7rD}reFh;&$hI1Uas!r9e-Uk9pRvrY#s*Ok z+pcd6Ebk1Q*4#n27s^j-%t7L}~b%Bi*nb4T5#Jr6yChfqi2XWbmu~u7wR1m{L(~_g7{x;xBXrK zNE1fb=U^U9-Z0dQL`wh&P=vEUNLH~^*h#;e1VG*!2GW6)Fc9(!ce5MdQK6Q^<^bIU z4#XLx@7F`OY=~}c)P+{M%$)dfP3FXpTW7B4v57Dsa(Mx@LAKtiRIfWN1dv{kgZtJi zg?LKAIT%d-0#Cx7GKfIjJOiL8m=dRuS3QjA?OKvqq=;UHcL7N^7V}N0CJrU2<~v|R zBPP)qb8%gkNZLASApT-aB6?RCQ%^ zD1M6d;-|ix=0r}fX*Z1PC6rjw1f6O=8k$t~n2*ae1G`Z)wpog~l~?#12MP96&z!*;7y(QjhOyEM$O{Ng#i|w6Sx-C8lW=V5(My-RO{VNzJVaClkMaXL_u_b z&nDk?0=6f5R|4*%SvNcR?^xkG2U_6WHEaNZzW8xtpba+7IpYsZJ?W3|Dc`V5y@VTu z^a$#V${57bs2uG&F646GIfTAYTcNaKzI&E?X~;>TF5@s^R6|=TgIhRxsp;(*KtRwy z)z?eop{{#;naCP-J;=P_Y(?Hh`ZeXYd#S0g;24+!MX$-=jsi; zfxLc?N#BX-aY28b4>*MOnQS6~yTtAMgMsFs(7+?9MlAGCxFuhd7%&Y59W&E$D(a{; zW?HDXV2y0}FiHFg98o2DaI{>d-tots1LiN{^6nr3P|F_Cn$dy)iHQ&QiLm(CCxYn7 z-jSteJ27As%J7$gQA8-X1n}zV!a&%sBbV$X&>nCPA0PP|M*ObzJ}|6xLVDw=^u{y1 z*Sc!~-3BlXoY0sOVzS$~>1`&$H!=Ag7RhkE!{lp7N~u1+h1Q<^gXAjqQ`+l+wKapQ zd=TFK2ikcjkQiVZcb$^BfOz;DhfxQn6ZwRRT}z%Gu;gUn0M^J8xbMr$K;%n&){iF- zg^!f(f5a6GW9*Xab_aT>EQDkuF@;NwYLuo2+=$-D6{a#c^`I@8I*g8UEGL(HJ=yV=QV};(Gxx`NlKo_h}wXd|AY8OdA`%a0gtRB4RMgX6>=_9gdRc{w|OH<&a6($kQE(8N5D2^{zDT zx-g;B@aw44-(_-#PtEnfY#nUyA-0Cc9Y`8{n(^>2XT?o!M~ouYD8ep16aHDFs0sg$ z#cY`WQ|2jbXB2QhgB12FheL#(15hPf`J-H9af$t3MFtxW$*rqC`~cZ*Si-45vSIob z;}IymoC(0XKii47U}fk6<4W93p+%hgTDXU>Y_4Ghs2*(kdj;&DS?~su?^Es4;i+Ay zcGNhW-yNiE;y89S4yOG}Xc{ElEiAhl*puo>qo#xo?!@cQpd#EZ=(}LQ!@3(xWLrp* z(=7fbk_j?>3>k(!4Cqe40pMOx+LV)+$^A0k3+&Jl;1V&C+y%aoSZpbqnj(wa3%Koj zYfxBo5pDLqdpXYuz)oVY?J=&{@Z;Xu9t|qrBJ1`NGUAHM2`u7Bp6y|+;8BHOOmm0T z(bGKnQCJs1aiWnDC{7C$#+R?*tmW4*kp4CkE=wsBLImHVxd##oE{O@*|J%$_0rmHg zz^@kq1y)4FhZzhf-}Q!weSySM0u2Z^MKFb{OJF(wPkZ zLOS<1AEJ|%<74TRJ-aeuLb~7F+oSVs!CPUeOC6o>KAo}Uu7P0OWo`s%UpqMLZmmIc z{+LwB(h;Wua|vG}z$oy%_D}gpmi~Ll-G>cwp87f|XX`HI<3jm%mLlDCpoh+G_0MtG z?0#?g9n^+l9@SI-A(KC1B2y)U?spgy%2u=pX$ToYrDyDHI%X%+_EL}EF$_ba|2i)3 zNhINpK*y}beDIXMj~_!6ox(Sl=?`%XPEQXqV=pg-F#u&T^oZYO?psK3pr7SNQoV(E z)AeBdv&yYG_y&(3SOrHmv@48vmDmx4q}i?&1kwqoWL=L zz_)ht<=ux+EwX0E@-3C?7>M^mMvX*!RDE)O8QsPC+WQ)EdxXwC>h>O^(Y-UM0}H;v zMl`u%N(U_KQ+%G>AAj{n7xYT%-b?gfage`e@;6NW775_$E6j_8kxo?nfEDCakb@(E zKtKGzZXe$?U;icYOXLd%nUS~DehjY)Y6QIEH;TC-UgZaPHTu!K>XXsu)xiGoeHg~o z`h*PoJM!f3+5dhF(?4XhVfoU0YwaFBvG!m-y@)3wQbfBdRB7*+Jg*SDymg=Utt024eO}*TZPkC9%W~`(@%l64|RYNFu#0&Du{uqBu@M zB2lJKB6i$*Bz6x7Mc6-z#CxfGWPrNY4ow|<-Moi7)*ejV1w8r41?*E7XM{ea01v!~ z;fcw8B>pU>(p}EO6U<&;9VQr>&Dui}j4caDY$&hTu#F^^Td?*J!8l~=Eop90YHhr0 z0Jw%l_hYFpqW6PS&0arq57n$anCg4RN=@9unfiQrv@T(bGEZ!M)=ADCYLy zbh6j`!*oJfS$i;@7w}|AoDZfmK|~)~-9g)zzt8F(KHxy8VaH`3B>z4pp&v)`;PkTBcZca6HD#=W>3!vJ z^y2(S78}0xkB8>uewzDRNkaV-pNM+)`rTpb$4$-JABlP!)YEktq<+wKDMX@v$Q73j z^J{QR_AUE97JTXxv4Fk);2svR_Fxta34FMJP9ztjMRTQW!8DRTDDVf7FQ!>|@GZP< z0L&ji>KTPRxYV=PpAQoash6=#P1n6-E*Pyt_=#k2v|F64 zZMI||(`n+rwe<4(&)6x$|6el6y|(`UT?tax-6lS?_9TkIUMByWO|;FI4*$oGzO7U2 zj?Z)*`5Kqufc`$KlHtci^3`qlVW+t7Z8qTPh(tx=xrsY29;kR@`Vl^*A=gDFk1`?r z2_}eYXbOigYru~ckM=Yk1nC#Uy~UR}H?M^RCvf~M8DD*jB38nmm5kXremk`PkN=4V a@l<^5>{w~+<+1Ux^4QyU1~sX}U;hWD8XF4$ literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/dpn.cpython-36.pyc b/timm/models/__pycache__/dpn.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..044bea2b1a451c2f6a3347c0ff13fdaefb4ff55b GIT binary patch literal 10582 zcmb_iNpKridhQL4Mgs&va1|xnki5vS%~hKCCv*z-y2Ifd=)u zDT*90m6)2D&gRLyaG%^~L;bI!3(sY+EUtjZ;+N>wUJrSkoMW8o%^loF)= z`@R2d|NGy+y}x;BXvm)X_A8ZN&MV44DTjU*${*kfzf=`PD2^gjp*7W(=4dLF^`_o3 z9D~b7Gu1L3lgp`Q+DW55)wCKJC&MMXk!fU|9Li>MsFio}tzl=lRd5Qe5oZLW(!y#M zTcge>mov?=*0?j?ns6pslg?yo%9&!k-8|EpcBWCzuBy&iF)M~d{*mgO^UsN4QP@}U z9`OtQh$!ysD35k=W8L?7_dP*xG5OeZUiwH8Q{v1cMV#?7jhFXy=M`Y4#aUp^b}=Pj z&WV?Rd8vyz56sIV3rx0)xgh35Nt_2QulmzsUR)5bK2rBo=QY$8#B1VpuDy=hqIg4G z z5P4C(9hBO&(ydLedDn~9mi=g}E!RW)Lof72i5g{nwH|oQ(w)!Iczozu6m5jxn44Rz zM{Apv*=oBrSKZ#&n!9y(IV^1fX}7#!yHppx7nW+W-70xOJ6iMQlV~|*!uGb;n%mfp zi1Znnc-FR8*6QJLT7bd&8`x8&>GR?xtW?^Nek8qmfbDGg_0_egO9;_O14#U#-sE$qA2PFSxHO_uV3Sau-R=pab zn%?kaRQH-FrdBq=_vNw?k9>UR#z(i8Z?Cx6k9+6#jpev-1oi34t$2KZxxIAbvrkr% zj$8R=DWu~MZ)>8Ihz;`uh7@MVAwDY-4O zA|=d7iBwWjk$G%))Sqh*$J}!LUvxz|z@r@eBY`x)Djxj)_5C_FNCJ znT>Xn6(BZRz8A!L7>U@f)nynlGvoxQDeJO`mmKYZvL?rm$|qI#<} z#|IeBiH%@(V>>qJ6whF~&<0R6OEvJ%Qst|t?R;^H&d9Bo!65uVNprKv?HErmU-qt7XxSId z#_R9ST&Y~27k>4!;Q8GbyNiWO3p3XiM9sf^$!EUp_%C>Y`DA|H<# zr&vm2lV26CUSCXJ?=lv&CGYubRUBadXLL*9w@~mEM@49$`Fc(LiBeNxF|eYowCCB=SVkk}q)+8NUH@%3BS(Pjb{pd}r(K{>wkQM4Eds;)= zQ&gp+b$Gpft)sZQP%w+Wy!t<1|Jy&@soZ@Z22b}lLYU1|xVhm=`4%RKE!V9F^~iN& ztrC>=*a-b*EjH_cTU~?631C~@rnk-ZGUFz;$aUEY$P1W2&H!A&6EYjryt*^eqkFbD zca=;j{pd~R^(G!3z(lIcJRj`N9PqLFFqxh#4X+WWYi$YpBd*gho=e{;^jGONKG!1h z5>3u4^p}yBf&1oQi6axz?wD%Ybfk3DNZr%qRHQZZ9@BSm6b++;D2Ev@Gw4Z77xh*- z1?wHwgD~=fsvoB!a+!FM^N;na7s*-boG{>twDJ06l@-+nqsum(Uf{KSoSyBH6WeT3 zIp@0f5&0x7l3W;{l6)7XSZ~y$xZt)Q_>v4{*HrV~WSN&$QcgVyZ7|TVPh4#*3 zL!1gJ9JbVgQAib1jI9FE|Mj(k#H^*w1l}Vs5GG>3e4haC=!BSjfToY}u$o(HF~z*B0@MHXMdWj!`XXYp zc#xsYIJ$2%U^pq@VzZg<*3W}~#9gD=MgWKG?Ca(%Iw5HGj)4?pT2C$z9>e<6i z0c);n;pdU{C0qfwq^ZBa!MPcRHjXeY{}vEC#SD+5EJPQI(MgRdlZw!=e`}wpu!HFn zZCbZ2!z~(m_b@qNpa3F6Gj~j3vl0w-tglpAjPhv7?!#ue!%>0M1mkl1w3iXCr~36` zZ~-&1A~7|+`G=YM5r#!pt)b=E*pTf8tL;i`P#AVV0wzBKSSlN=FY*>)ZWFjcfI`u7 z>eMDuA7~C~8D2D`Jep|c63VQZafX8Ag@+4nHP-7vHC8K*z3y+jiAFotnisl6RGhyN zhCZ$>?clbQZ5d~(G-}HY7Y4;e7Sq_|CJ*mR4==fu#8)QBYGMV_+@rb>cMFT<%)lZz zeVV+an9M>q3Hb>D+E;8>NFLp28{YU15T~1n6|{upmJ}_on*7fKwD1p5%MnInyk@JK zI<0BylvYG(3@tgepcb_Wly)YLXu;{m1+xEC-~2?^)FC3-lIT0*M;Cgb?)eV~>%g^| zxCC#?pzo;Z9zn*G?c*n`$X!Q0eZ)~~0er(Ql(At5W!;juP=asZ64E$llwEe}cpHJ} z_R2_yN*!%a-Ax6gvZ6dI!my!RBR-17qN>Falk`8Wl`fCDv8U<#r2eqF{K^p39PAa<=^U3I>S2c=U_qE*& zOdgCY45_i)4QV9Bdy-_OoRe3ne2oBw7m`9WNq$A%CBPE@+f@24flmpLK+Den%66hL z6pJ~=Lj>}(Uwp1 zfn=j-%SouDdV+5$r z!4!c%0N|v?Dhm9Ib??DyHQ%k#6`GVz#}ob?;IV)Vo(G;Y3y2^LFx9chbB6Ck=9g-e zjx5?Ege;$wJ3|x>?i;(q9aY{HW}u1mZXwF-DZ3+)-GS%>za}i;Gs5oV;lEOy5s`aO ziL#9xk})=}QA6-yBO(vn5X9c*JgB}(el9OYI)yQ1w+NqHgkp|D=!;`Yf}K`&$D(1Z zFou7{&M5rhXeYI2VXm?Flul7tol)|}yW1lP63+eDu2~r*qBpA)$i-%I z>y^Jti;_!{l!ZwaT67oKv^yqEyy)%>ADMK~mG@`>-BOYPm)wPFCH0V(Vl0?Wi7YBb}3=%9BaLCl%s4){IAJ5l%++=8W}qS0-0zijU*ex*Pgc z61*xGh7cBTN}TWRl9^#~>dFg3*~Ad}0KhS8Rd6**7(Il_Ag$$ODqC(1jkph(9FK>N zR@vexJyvCMzgO$0hvr{n@-P5U^j>J5Q0LlR(C!i)-e>I7>&ZEO1a*Ff#7?dRT z_`}G*({K*u)tw?ObhPS0Z?p^AX+Wb+$aI zMw%^;)lJJl>}7*!TPHxba@?utP+7KQNF_Q?-i&NfX^X%^0^0yEE0jS@#&FMLTP+}h zJhIvUKzEML6kL&*w_~*BQbC0iv#78e?(0a1epK7CV~0C=ooVB*p0-;_`Q=Q zo_F_+90BD65`XdoDsg(iU?a=ENxFo+2LQSD=yf7;C)tSg+n;=vSjk7I${!KntYoPL*EAlw;e>AhB%yCdt?3cO|BMpp5P!Zy3H`Cw z(SD)*lsIiHlXcJcj4rG`(!X)BYZ77BdZrb{1xhdERrx2V^*@Qyrn7i> z)3Yc>u_4xv1on z$(bVqyF-W{5}@=#;sdC@G3VV_%zuw%?xaVRD4JaLEib*m<*cZnX z;>W=+3A_ow(tlu5X}}cb);-{mXZ;~Hou*M2so}7s`~@LSXUbqgRnoZUVhRm-^8`}} z`{I~#53|W%5cqQfd_7r1>EIkyS*2K9Xd_Q!3bVA!l>Q|0FME@~`1GeqWbz(Os7f05 zTuh+>^Cy@>*k@u2QbYJ#l*8(S^vDt8JG{&`<>2oz<-sDrfGLARP7KRagrrOZPw?Zg zkZ_qyc%rTMPy#<_+;j1R2GGZtzO`k|;b~Mk!jmqQ)f4~Uiqozu+Esi9#g(er^!c6` zN{ZUzOl>njsNZged_Rg!+^-Pha6&KnJZkYb+1ViA_%a^tRcCo5Zk)LD*n_hCW1g1& zx@DIBBxdlJGct`9X;E`}&2J*bAF|JmP5zu7CU-OTc|+8buTNI;7fEtk4)Z|Q#l;9; zVqc{mUlGc@!(Vy%gM|Em&`Sg;WMOZXxJ9=4WMo)3Sr+*K4w>xp)OrCRw%$wb(BC9t x!xaDn*A)Dt!l18WH+p(R4P6&yWSkhd&OcKX->?9;H#&!-wU@|k^BabvQQhG){ zgv{jt;c&R`0D*Ad_kDz$h9^8X-PARq=o;k?5e4H zRn_(C^?R>gogH1Zs-yq92PHpr!teW$&-Hg4;Va<^>(={xx^L8{`}IK9p9_oz{0ui` zn{v&g%@S_Twv4s_)|?HFw~n?-sBOG;ynVC-;g)P?u4}X_w`z1%u6wjQw|aCnN(J@S z?3&!#(X|q8%dX3i&&{1TdS329qYsjJXI9IdKYD)dg3${k-j#iD?!wUv5niQtj|D~_@@&8UkNO&Y z?H&Hni;Ro()%v>Ae%J5%s(5w!hB?wVTJTNQ?^)LGX6tv0zEwZ_bO1G-`zN?>JJ3rI`)9cCz?B^6YQ(+^_b+UKb_Fr&+eRe?iInWys^TP$;njGj&h&98tzy%%XYQ$RM+ThwNpdEm8 z!gWm2APV(axs3CG=v@CLYz4iz7b*e1BM94bBrvCVK>94f{U z+X{EKL&e(=I|uGuhp!Tdod@?Ihp&=|X>jK|e3e4%0=Ne|e5I4FfLy2>!kuX={t&<} z0*+(yc8*xkivisRJQXx!L3;qb1h^{bxCOlw(CxriK_@I|FQ9$ESwXWFv>(tA@K(^A z1q}ll0qzQ#x1do#W58cQClMR4Xgh`QAl#5c+uIQvh8uBcYa+G-?x7BC3y56?cez8` zB4Ss-?R026jo6iNyByjcMQk_R9*4Ha5W5O)uYSVf^clqV!CkFC)`H9;b`9LMa1XO6 z{y2oMgS#H?2DtqW)gO=8!{H9VJp%5aL*plq7WxxKM?J|EK4sD95ON=Oo^uCcl!Ke} zC+kmp<)l6vF}_E>Ile2t zooOfaIf(IHkow2rPQcALsn131v2djONjTE_@lNXV5PJgL6XBi&cM9$fC-wP=JsIvP zaHQ9ra8GkmUw{~?@eKWi@Sh3CHr|Ew7oBbc6<>@P+x2WoCzizJc~1IE5MvE5kaRxd zMQ|^6(qD=g^So5jUxv`#a4&b#33&zFE8*_ZUmw;}#^{IlclK>VHhyPWvD5P!G+UdHwJ-CZ2L_-dd2etp#)uosNh@olG? zMtcDHfZh#Aw*|QbW!|HINdGWw2x^@7>L1ZRitu)X-=}{}|2V?E2)|#yPyYnMeF%R* z|D^sYg!>Wxp#Ew7GYE$e{*eAz{c{M15&p3LdHo*{jv)LI{R{dR5so7KQT)=Nc-zo8b zfNk(&iC-!4FTf(WU*fwY{zcdWXC%H`;$MOl@N9{!IKzgFVkK;QnK5`UP)zlmP`|4IBh ziGNG~v;G%}UoY{0(*H~UtHf`R__xuw`{4Wn+b=NMTLHKxfjwMcw7;6+S_F1LU?s$Z zaIFG+gurN(wZXLu?4ZE@1@R8JPJtZ~7;UvKxK#o>EUeQ^B(GXzFUG6WYESXyAe zK|BH%71)@-XkW(Q1_X9QVE>NzAl#6^G6JL3ISe-@SGZX1+mSMS;=IrN#VkflUkSuZYt!euTh|0@ft*auBdX z0y`!!KjO5NZxq;Zfl-ETg1cE@Cj{1n_$_dc6xfWwC|9F!j}q8p1=fQ2t#FSP*sQ=P zYmb483+$x8S`ohuE+MeT35@cWgi8tR@d9f{T!%9R_5^`ZCev_Z0(+vsIuSntml4>L z1V%X>hno=CDS@p*JPVf-*c}3+?B?Mn1@>ftaWpdpce}uzBCs`xn{WkzJyl?95ii0` z3+zsTtwa1M+%bVYOmBe1g(zZ33h0(-8& z&O!X?aL*9f^8|J-;?IOTEwJYc>^#Kpf_s+0ULddsA^vQ*=LqbD0;9fpF5L43_9B6u zkNET9ULdd+3+w{KUkLXifxSdv9DBbQ?j-_yslYBo{H1U&6WGfH_7KGHhI_fd?iScZ zh`$2vl>&RYz&L8Z2kun@dxgNZA^vK(dj>hz#g81v;UN5j$3G7nD z-vIYUfxTK_+Yx^g+?xejf4@{5^2*71$dD7D4=daPJq`8wD0c`~z?w6xf>t7DN0)a32=fn*}z2_($MA zDzLW*Y!LB}!F^m{Zxz@O;`hOQLSSzb*f8Rsg!`1h-Y&2a#6J!98G*e+U^@{1EZpY= z_D+F46!Fi){e!^XC9um7{{q|>1@>-%U5@yd;Jz%d_Xz9?#J>XfRe`-%Z$9MPwzJ9S zOEkCoeA?k7hPF4I&ZII%zPR5gYWotoO!kD9$!iBr9G)~&M>;xo<+V(1Dr@ABB~i>w z=C#SRQ;21XM-oLXHF?x93)<0ynK6ndn8h@*iK3xvxk=r~7PMoT;t|cT>(UZda@b5{@|pbD zZ76o6Sez>C=UvKnz3uTIJs+jtp6bD_}0Y znly`r9%+HQ0XwvVM)61{KLIpV3$#P2M3%Aiq^a%MzrQdi^5K!tKwmgK5*w7q?wQQ% za7TA&3}yIQK$sKSfrOdJ8K6gI1~@avWOkaV1*yWpM!PnjHcTU*GUlrhMk2jr=TjBL7BNK7gS zj|f3Eo1$Pn*1)Pz1=%f?O%w_l@T_9Sx!!hogcZdi0};OM8#2dPcAN0ecI^g3&rIjG zYviBn6XuwqUKVmGq9OqDU1dxaj~vSs3^yUs{Ob%ePnMp$>d+9{I509GRqsy}SoiMm zc5PP}DD3UouJ~9ea6?{_cW4J7E?|RP@2+DB)6n)C( zNEG(%rcCXT`XszVyCz}ktjDUY%orp9(Y?wjWXAH^(M&?SDw7swDP|H$ z)VY7xp}Ctr9FFvbqOn+%^&Y}Y%^JN25)gfsIsmd}6+2ZW?Xc=27dWWZAT=+^!K{(t zp&{0LIFmntHlV#!BBCp_gVV{(#Ki?|&veq*p&iC+EwZwMM!}Mh?dp9CPu!f*fii{C zdeq!CM?&FHUpO`#9IDVe*-(g+1DIcb`c$G}u+b28^Bs4*zT>11DOGS`1( zIsxs`o1aV^>EClx@9souA~~5i3jIaXF#2<%MqXj)Va^!g#aQzy_CDCS5g64nk*77E(UjOP;hjx$_1>hYIarxGR@ zJd0rS^{9S-PjhMGwKwd#dhhkr3o*t=_gX+sV8(BZ2}XF1So_wU(z z;PAB%->2|Gk#U^szbL=8^(;W_jn2&Voz&nz5C_tp3Y=-!z``Vp?VfUqE2y$H9B1x&+oSe?<@f1i(JylwbE_O0|)QBhZC_h#xt+fG(R56}SoWS$f z?%p5Yd*D#~@IJe(J%Li^J|n>n8lA;tsmqF7GdVd?K;niZQH#qfiBogOQ|YnNX5f~X z&K4PoLxAFG^$3<&l`;`A;&{PFBYJ7A9kmp?VS@JErZL8sZ>k5Ey4=JlZE(jddJ^%gx?^c69$p||PncX!PCUl=&#>*?B``6-N{GjP5$ z-=_zw$TtRuzhCSuwI(wu=#5mVojAvVSFY4ycR;|mqtrBQW=oy<>0DgEMxoS^$xlre z;|27$YH^a`n*Q!T}IFQpPkL9zI3BCU)G-|KK z&HXv(B(iui+&dIYC5J-8;Xd@s%UDw)+^dho!stv?O;bzXCh5J=s-uy3I24MTiQZ&1 znoLDfF=^ZF%UfeOq_fJQq0mSO>U<@tjHoIHMuvxsh@nfB%S_jp&@~cH#Eg+}beVW2 zoy1#A@g?@AqOpPG@W?Q0Dz(8p!?+<~LQE+b=0%i_@g;w+g(*ush(RbeoJ>X2iieh- zx^9IKPYQLTx*m;1^t4bnx8$v>tTEg>Kp`0#iLg3Rjc0sN{r7{k`1{RH=u1(9tBVHI zXju=N4?*(GauXyXVw}>`(O7bb7@lcEwP!S7*wdnz;K2Zx7t#yU6SS6I)+?zN1zM5b zRC;7MmL5q7UhTx|O5im!z09vH#>(T-NT_$nfQC!y1A+iMkTXvJ0%tx?Z#AN0XMN`@ ze?0V~U+(N_uBH{qbY>0u_A_gl@nf&t+W)T?{%Yr$TL{7yAK%z>emT2y;4uK*L(oQo zK6GtY;L@i8q_KnFLeS|K<=36M?^ipQn-h!zIUyYmhm8?M@tI|Q$wp3I^59@(Ae0^& z6dM1AP5#CW$u+l({9@dP!+eTBHfcu1IOS*yz0RfX8bnSw{F9+-7gLE9__07|V6p~o5;Qd$HX z)qG=*-SD=ne=zfloo8k#FkO7NXG02tPyFT1nd3HAncUHAZvqolxomIYNFT;Iy{SAx ziTv1+M5Yf@O(&-Fse+6*bqse3{YOqr8D=i){Ekm1sY_WHfCEkM#t-MAf_)*qPhltq5ij8?#6#njRQPii%s2nxl5j1BqlR6&X>zplQF2 zC9{CtJOYEU7*C9)dZUAp(8zE&LB3Q21Q`^#I~wU@q_>fd#$l5sLu@P+ufokdW}!C{ z8A^nbmTh@-SxFK<8ll3C#3RGQy@|ot&=6)9MO>G+cf+edb93r#=p&(2GAyR_nR_Ur z->1g`=`tvdV!DFvw8avM-r>kla?ps1rE6QAG&oiQfiMv;5<`hW!&oj=S<~tWD;*4_ zMqt>8s$7YdMupU|WNdI?NR6D9zEh}aX-p~&52qucVWrENe>%CgHbVYR!F5x^(GQ} zWFV=lnUNJ}DOAc-uWsncNK_efE78&cRytw~N74yH=1EqfQm7O#S~{H!4~CVAwgTiG z67qt&!)as1H1;qn4TX&G;7D2w!<8U!D4xeKBV-IFqTv;5X_%FcjOcnO6;qS6EAUPu ztTYsgh6at;ig9mLTB^rl$@JigRT^WZ`d~C1O~h7A4Gyr z4y;&9hgfM?$6Rg18X+B9@)1(CWtAoO#$!VRy-AGV^-xF@#F_7Ma^VwSc-7wb{SGq; z%P&NNMz5Y8iYAjWv9-oNaNd)D{)i7O%d}iI5e#UmL^zR7rp0u(CXTXYf|1dE3{IG z3>&?JgW=&=BBG`PR;J!?9QCG$uuPFqCiY556(bQ8 z%+a?+QZgwtcKhWW-@bPWdVcA;8_fS5jW}!dRSE$!WQATMIS?DcAcHdqb8tb`MCHn1 zm+MWXLL){frG{NAS8*2B)gYD$BZ?ayw`AS_g6|_938#zUiC&!jOr#T*@w{^N=j{5E z=~x&fP{Wj!t3PYkKNt#Qih4x7_$RIy{f}p{E^WPn&|oS$kg|pnk6keo3LwEm?_e}K zl+Z&mc*-2Q5uHU_wT8Zc>o~GxT_vC^hb!4-0&5*%Y{yAR$2PNam6KN{dPl;E@Q9I8 zQ*ppiBci2S!&yjCB(b4oD3yv0s5z4}_kkJC1YJf&C1aEq+#c}i z(t?=5vM?|N4=2@1!wS+Q(mNa*(hbX^KJ$+t$4WLliuvN9p=jFL&6Hj;mEr8Q%N*xc zyFzbbBs7F!lA6z1wt8~CF)Y`mQ`W@BvekouP+}y3ZD*>tUHV;EIYl?fEavnRiBvQd z3Mtxo?Spt=I|g85S=AMKhez~ibRd?L)tTk2D%TrI$MoT0975qfh~=y*i2y8O- zvJQGM`J8*@J+_jD`*0w^**Gue21cZ!yku$oeCoB2P>`3lCV3}kKCtu;7QQ<3Vxrx$ z^iLOi=*&2u&fUdJOcXT;D3&_yO}(LkkSj7YP>x_fqbm{%l~xrDcPV!ibyJ(Ms^Rv|snW)Z{Pq-) zCwSQK0CPU2w65YIvQNqb7WRNeJYZ1|Sgfi6&W@*QxV_(~8gB0fDy?_psy2xfOPjp+ z%6MficmZ>zHC2x-)Ie(UWJ_zSN*&LZ)>Vzt$)L3`(6S~UXjuaTEo)#g57>YQY|sNX zC7dVskC=mF*$od=k!XdYm$ zb9sO{m+t}QT9F5st2rKEuD5uAxgz2L=9)!mOZ97TRuyVXNK);~gpR3U7^sTq=to*7V!j!J(`2*w1_u2>J5&S)>k!v^I=}VoEI=#I>)29Gkf5b znwvMZJo8XGxAw`diHVxmOuXgM6JANGR?j3=57*x(*w-qWJ zLBQ~h;>w=UfbJh{(s}(}GY_o6wHnPhu*QGw_}7u{eyT5xOLz3HJKAw=B`$}&!#5hl zwR&st+lq_v*6QnU4N@B}(c1t>J1)=Lgx`**`kH;CojCBe+1!lk5>LdcpSJC4BaegP zsh*0X_Z@PKb(faT8pks{?#siL*d)xO$q5{Bx9$b-xD&wXX*5_p$0p5*%0vF!ajQTo zJFVj^=MJeu(`pX=t8>xi^VxDXd%Jea_1Z-mkLX7|3rdol-uF!I-K&oohY#H7ow&z3 zN$;fIppJ?k0v5OsfR&e%)bUsxrt#*J z9kZQN8bU26J7>G5wiSaXyJiEkzIf|w6a2Q>Rq)$qgN18m+huej!p^n+E*={{}y7lB5)U|qc&1}o8|5V^E z9J-9JnO!r!7QgH8yZ+=_JRh;~HXUX4w$sf>*?{tZNDUiZHEb%LML1mR>}tLJbknWG zd^6mZ;#RcPk4sT~;N*_|ifY(li=&XGV4-Mcbc4rub+XuD9_hsipll+=GqtPKc6OZL zlw57d6)$4%d#TGlD_e-C^QHFkf!K~b7~VP@+g32FP`56pIB}pIBYm9Wm18YCKIsVaduUXp_)8y(6g@8T)=G&r4Fe< z4iXE-=D)B|n>;rG8=;-2WUVu=>CACCG;4kpc}z~1nO~##b$Z{R_f2~LO7C0rehLq# zZq=dZ(Kg~obn!CV{nEAn3yB%J#d9v;DIOUAP3`tv8I#$S^qfX zX}p;{X@wy<<3YGqxVAATrPw~+0Z1pgeY|Tdz!HG1n)TmBUiR6TY({u`<>zXz`r zG>+p4yl(!0kRQ|g2|aFE5GtA9L8#Pv75EXntw^Xb@%!+w{hR07OF=o@T~HL`kzgTP zX;q>SFE~8&BN9-K^!nF;G+X_f{WDuDNK=vBp%02d=~(pP%rM$!wZ^ZGFZ%_H>e{<; zHebC<+~Q%FKV$WjVxNEJ?22kD^QEZu_{U%8y>Jd^mYPX4;W%^!-VypyOK(u$HH*Fo z?1!BG6LRA$o-AyT*M)Z#IW;CVDTF1RtXPfu8XTOjK>n9Ro^AJm#RC4Bb1NE9ojWDX zL}vcd32QwJ0YMwL;en~hV0I|rgqoaCvlF7aRV9K}$gh~GwThX%n8;=k|M8keqN~R- za^2BF|IS8AA0+Zc8B_nNnp<72qPfl+yKJ83>i&slGr~TM?^NDqiyk;lS)*K`xoBWh zxqF1-_fM7D>nQc}= zq1w8^KT~o0K%bQnT=fM$vx`bTkIQ$U*QbI0K5v5a~x!r(N5xJ$Hci%(yD zH*Rpm{f*6|?P6MX;I*tbdB}3+?G@IfW?j;ND}(ToQn`}{`f>A%49SHHhY>+!H*kz6>)>dPRwL7L*hs#jvEOu3zVzW&!#@bG){I2&KYRTM{m6~aMFJgI`U56M9`$lzO=K_r(62 z9JU9p@EUmFZx|Kx^~SnyqRpHg^PPmLf!xjDzbj`0!ld+#H(?;;A8)sYXe}@xA*qMd0H|qyS#|Yv zRkH=YQ{gka;gy1kDO{1Ho2v-<0a{n;;9CY8;Cyc>s2fKi1*PUJZrv#bk0!Fyc+o-i zqNRfrW(x92{$#0%9jWw|;>nw0XSJPgidznSj&aZ|p$+m*y8Y~!@NUlW6eYVWZ7H{? zN*VWbnt#E=&HtkJS9&x(UA-vZU#UfI#VQ5W4SpsOGeJmKCtuNbvcGC3;#A&kEzN-- zP~QBYeGX%^0#4?*4z4f?Pxqf}D*8^sqCDAxSBLSkjuEeUq26>7$`a$~z{%!f6J9mg zC*4%U?-t)|Q$ba_W&7PclYh&mORItXaXcrfiR zVxHub|1Ml+eRuP0lf1K*B4#9J1L?qQi@ZU;u~jpF+#?rS(F(B^7<&_8bx9d+Vbkz; z&`8Tn*$%HO-!Q(jE}HCzELL3fCWI^WPs#u_k+m;Dl4l-BU|ym8!15)RQ+m2T>|FfU zhp2_cF5C%a&$(H*g2i*Vq$@L(#Qps=;JoA~RK5moejJM3N$LV=$hI#Adb-7L88~#0!8$<0)LtQ@}al z$iS#%kGZlBhi0~xAAzUi5#cZY$n%_b;bg2t7J6@mP5D^1{YS&Z!Gaf za2QLxBRV?FJBc-Ibae$!hX-*Xsn6%=W2Lu+EfvgdwhWeaxx>CAgQLQnj?Sl!9t@e6 z;HegKVghrG&TFP!_E@Q#%O>h7P+W~#R7-@Mfl^w#)Pik}(}OTQ#DMu1CWsXy!*9ow zkwuoC4wn&A*6Lp73}>^HP17MZjZ)}Cz!l>XYaJ)fVMw)-mm25P^lKIS+b;>P=KbXU zi%^@dwUYa3_;+EH7?6MGAS{;6Ln#Fu-|>%TKP5q=;0XvQDS*)*uolf*$GBvU#qzNx zE|del3WI>|nn3^-&8=a;T5A}vPG1k+Pu2_rHr(AZ&oE$PA_D|9`;KklBy!W7I$Cj_ zb5Az&@{dCW+^3!`TlTJ<^l2<(CNV|h@INkuo-kASJhsB;Q%7E6^$l$tsWWhWx?`y^f1oB|{9IkTC8Ja8r z0#{Ae8aZ=|`=$#@NM*#}SU58FsOo$mX0~bNm`uoL!%)`gMxqg?EMPFLV1m`aTq5t( zTM2WXXR85KhkBk311_o#lpVz8pz@%|8RaN_;Vz7!j{>^8Oq^+Tayr^3IuJR-MVicg zkdey1uf;Xg5q04xiJtc5xAi(j1E|fi-=gemz|x1FkJBw`h;R3NuJOzj)l%;a_K%`; zx$nbJA77`$Kp$VE)Z5^9iu||$zk-1&F6;VxZR!Z_b+<<5^DBuFey~DaYEKQZ)Wdi)3>upecy1w3 zmRigjbEv*cF|rL){HgNUV-t5i8|Lsa7#@A02(B9IsOOCgno(j9qsRWOl56J}pf5ke zt`df|cXa>GWvpQU9+ZF00h?u=0r*a|BE>!P{3EkE#@Zc-lo=N6Svxz`B1=ZfSPNRl z8fF-=U=!3a{i}D(1~Hq6g*iZ)$6G8jEsaw6Y%yl6uxKPRR^4|8XPd=5YZddX3FcWV z%(J%HHkfCt6UWhy+8mt7S1#2_2eF8GtBQVy6M(LHD@^e@8{v7Cq!UryPJLEd`O_!T z1GuMg_XbiU@Z{KjvjspMAI&IquQGcU5w|# zQrxbI7m38SYnZ+2QJq;;=CCU>_K8JeC^l#2h{}w7IH!WlgOWMw{p6_1jC|ofU=R(> zmpP^~BVV`=d*h z0}T4m%0Y$m9GQnzW?&HIlLvxHLV!>D%-(U%@s-R(Z#Sm7wVoTb*u}!n9LDX*b^hp z7~FIINFixy=kjm_3#|Bz$`NZWDOf!m5hLU4(0!6&j**CrI=qG>oa~2n!r9gGNMtcX z54R|9g8D0P%aCtN?a+g!2!|eR_LS^HEr7HbzGBBSTw zs~zR}_ZThic$%JC8W>&2bce}@m8DIXGmr)>W=$mx*j74V_CQ+kEch8P%Rxi{1?1HBGN-l$I7GJj)EOY} zA)mUZi5xz?6Xh4MJm#M!vZsgKlS4@tzgABU1?Qa{de&U*YHz|`mbEiCZSfhDybgC% zICe!f@_8QPJxe3Uan625(!I5EIUOc-dB7L3wLQ}zbmJXn$^^i(%+6;>~-t!Hi$wOoJ<@& zX<@m54wv;}SmB&b!iTo`86kV+7{f@h$1zzd_IS6x8pBB)!^ui}d<~{cl|3GG?C~J% z@z&W^*yC#x_hN7{mrz`f6GzK05IhAX!m!X#)c*XSWZr9=@|fOTCL6rk3YX(r%JhUa z>1Bv}m6+ce_bM^JN$ypm$}W#6Q`{h3T zuHLsYY%l&pPGG89<;K9a@`qiamx4J;o$5>yR=DsPNjV?pTKs7)Wd0`Y*v&P)HcpL_ zSKjPcvTbGQ;KGiFCHpYShHc4m{>+h$dk~q+l7(yp^;Yz$P3C2o0>+*?z5OyDCxH=K zk5CsDK-GwB6-H#}!|laT^xkc=tr(GYV$qy^;zlpIFMOUt&;4w^PPfn_EC*DYy$7=UDgO2pIt_M5{AUrFOZ=@3 zo-TtOf(nz}TJh8a*W>_iL|i5nW;vyG3O(Is@5<|1HZ!X+F7-1N?utVo{Rr#$jBD1vjfX)}ou; zxyVmz(G43Jw75IumJ7%3vl87M4px?I)|U<^;z$`R#^|t08dc=pl;B;g<>0X>TRRqT;F?pRLt z9y#oh!EgyaY$W)Y)*K692<1K?gI%DO_0(xQ?cW>OfnKwDJo0@&G z3a{W(UBV(Z8x&2$us4HR`$MY_$@HrWeB4tC_8pStvof*?50{+wg+>NO1|+Y!4@8_f z$2mlj2>0Qum-M(_AAEo(H*jh?ZNcNT=**DZTQiUj*TA959 z)4q|uaG7BEv^m#~U2RvG%46NwM%+0wJalJXj<-?Kh2i3cpMjax88+GQ>MY)^#5Ojy zE6Bba>RJk?9Ls65_XGt49WdV18bpUZ+joe9={cl%T)l$n8rX2(4^|?u+F*(28f$Qa zd!I0Gahpp#)qbG$5KnvkfhX9wAE>s#hH@6JBHP!smitXr=(|H{Wk2sRbZB|8mPCBJzxMp~^gfhJ7@mPf*=f>v} zWHoi~2=YzUg6u3N)^QfYEyA*xSVX^D#F8R5iEv(`nVUWI?jjEl`}NgWT!h$iDiSjn zL0sy_>HH#6ar*`~`?Xt}{noh_7jeB%m2j`0Ikzm_hco#TT&lD7_;Jj?qKn@$U;)27 z8=$MF#$o(9o2biEUsGd-pa~iEnrqQfh1E1mWWRUk8;GZld zt*c&2x&<{<+>ybKMi#hvxlz%F6A`!oDTA#@vL!UY`vrPhW-iih*uU!#*p&ZygxAcQP>+T8PaV7oG)Ei2n+4unfj1$GzL&)Tky2|)AIKRhlm84k8Xg7Eb#FP zA`-F)w0j+%-4)ih9qZQ)?d_RUId>k`F8_`Af*62%1CVzx;^=;)dwHkI} zK%D?h`<4!O1I)*uf*QmLGZckpfdG$Xl0ZeR>P->2aG&tus*%s)udl<={q*Ml6_dJoWyUzJ!0j-8mMO zzu)aV43djkxC4!`Eh=U;(+#wa1^&X;u?;@lZo0uQ^)0e}F^~2|-0h3iwofx}L)G`& z?HaS+m}di-)f{V}fh^F1Z6NLj5?0Yb5J1@t7a%a}GrM5P_;vqf*yw@tfOwTvc5iT| z-9DtN;hNlh+?XnuNtCUT69P>h>V;0_iAs}a5f>B|mRd!mxp3`k!U-{4OePh0qrC~a z<}}gIsySPDAamTvhRqa8)gVluIj#uIV`vNF$^vH*R}#@jo1BmeJgeoSSr6cLXrJCx zB_Ay_Yb%bC_3jR7rjDnL@zKV@M6@fl$nfxSIJ@U68ik^k!u70575b$? z^BFP~-$02cBZ(2>Nql_G<^-6@!LbRTAe`W6srdSNBC46>2ZyANe)FO&bJj~+=Do6t zwuCu&*GJn>6>X!SEiSt{4p&3l-YMLDY7cz1c?&7Ae(&Ai1bya`(xyswY+nK|OE47;^gT7iw8cr$0ccr)+GD!d7s z7w@@kj;)mnkYD&v-wwfeGtUOV;m_OnUjd7t9ot!!<=7K!(4$u=Up&Y8CPe_>0H11?pm+43hsRR5SRQ`p0q!TL85s(aGsYa%up{; znD=xQQ3#utD0BC|XV(&?S1m*0ZBDuocZ?k`b<3Rfwt#t`U)2J_7TE$1c2p~x-PB>uda1*__f}Dduz9I7x3;;sl0og$ zsvPQI4rUma+slDV%`C7raQO39J);CK zz{7BoQf7~QaO1RV-6uFxcQcB3meUM%se7|)4Cfr_L`#^p-Uc!6yV;;N1aR{*7PfG4 zDWUT=DaZy{b2F7qYMr}D;i@+?jar;0IcjA7wy{jTZDZb#*0hb#indh`N#^Fc4V65{ z!=M;gA9XjwO9?UqaONSi1sRNaKUc+Igw2cb+-lqdV~nkjxiNMK?}a*Z){8px{$myD zge@KF7+xPI>U9mTnX_KhnfE(Ys1vqysAKecf~eQb&UsOtIqOB8d4E`iI$=wPI>v=F zM7^$YA#>J?I`jU#3U$I9)H$KyDANcc_H-gQ0|PLVaA4B%Fe0KUJ+Z}Z^`Pq@?F z)0L!|zGGE(Cl{+>+3Y%?O+QCQ}?5a%}C;a62N^G{n?wy9f2u#)OWU zWEJ%yOHVx^o|yv$742q$Wvz@?(9X;=H~VTZ8}r-K!gbAFvcO*}NoRF@Oj$P4MVFs+ zCrP>*9WyuSn7<*?u|NlUOc&Q#q+^OV>CQoltme#ZGd#4C%dn8f-BH@MJ=2BaWX`_I z61P|q%IhvOd0cS-3$F*({NwO^i(k&{v_|#6*tyRwYvv6ueqnyF4NtZ$*dH_RMz)t$ z0@rmm`K7*!meO_-gYP^9(9%|A9qIV;oF2q(Pq)d}HgnLst*~0JmXG{tuw{$*;*z$j zSj^W+g2$s`4-%9DV<~L!b5xEZ!#9!HmK_$Xmln)>E@>gQqI|&*s}zJ*Py>RcsIeIV zSxr%E6wui&)3;^jCpC0XCvmqOyxD%?J?e<5I^ufxNT#6Szw6}cHhl5|A4T95b~wkH z!bNxX;nW`5nsRTco=K;118N?h6;e+-WEfiIN%uIu7J+Y4)qeCAU%>B2 zq=Mv*g11Di4Gg$S$MbV_uJ~kJj|Fq%PMAirDAb6 zkR{Q;Dau&0{8V{Jyq$>w`BC9uRUQ>(HkDqd%(Cu~!>9I`hR_P4Im;!RkODr%M}W%9 zCHRCDH|90ZZ1(0-sGTRFm@P&O*ZV%1cZc&nxzI<*B2e2>gPJo#bIOGq!@!LP78g`^ z%)6@sH$oPHTP@k-WEIGEhYK}^ff~<@Er=TPUQ>Y@A&Wrm+#0f#PksTvC%f=t82Ird z&w}_d@53wbBcuX9q$>Tb4URC=|aIU8y}3*6?>e!`cj{gH;-e<#~t*LayZ8)`objC-`7@Fi-0&6?^$ z?XMn6JdMq_J1}Pi>-GAXv!Uj*K+&W5gfCO`v2$;JsjqJS)7kvG#_i16Q1e;faURVl ze3_b$edLWb{~2t)ZLnAY?`2`;Y^eDxaHmJ}316n>4>sETXR`Tqtu8ZXL(OM_=Q_=A z=H!Fue!`cj`9qC1|1_Hq$G`a(shP8(=Ci=dJ(^GWGBtm=(dOR;TpXUCcdeT_8)`la zyuqXSgg4auxecC?YJ*4Hr5`AKyscXza2I#gQ(&m|e^GKiS7Au_PQ*F4BT_%e|pwpcPek7TIJFk;Sz z$iM>M^&kV`%S48O#ggIqBtu;W5_2|01{V0S2N?)oCNc~zmJBZ-8R{~Wn6n`=u)wb! zGL#J^!k38*LyIND3rPkz{>^VNF=s<$V1Yk*kb&@JBE#@v$?ziJRG;C*oDGqI1zK9| zDU&ic5Z(|O<~E$JsOl*~+WyH*;l!chv~J{!4O>qyCNbdf=WRVPLqjxRfi(^d%GML% z4bfn3>#3Rs;Ri;8mjE+|296EkWkNA$L&RW#tscZ6e3^(5d0@nNDTz^+9mSjt5rYLT z^dJV|OGJ#ztE3~^5HYHash5!!bs1C4*$^#QpwELAgf9~ z*n<{?FB2^W9uO^FPFmDuRxxKov|xeV9<(5QnP@TifN1dw(xNWgia8si1q)p7(4yR% z6245d7_~9hDK<249Bv^SqgspJ z0~9SSy1?NE=4^;6ERgh|3gJscm8u@4iYkrL;#EMbK3XtmL$qLloChrkZ-^Fi+n!ak zR_+rZ^H1E@OxnGgbb!O3x7o=IcJuf65ngELHVYhgnqM|M32&(RbDNzzDg}hicg{>S z*#3KgmD7I5-10IxnX@56u)vc%2toJ~5yFEH?4uVA(&06vLtPdYb2dZ=7I>xy9SCoT z4s*-HzDkCuI-?M?&nPsiBwkCRz~RqZIGDkvMP0`lSm1>YEy_J9;T5z%9<_sEI|3d% z7$_X}V!pi(SM8)}pP8Q;&WVjisqs2s;!tD$!voCP5M5Z{RisN70zQi_azcVB4N+#k zJ+^aFrt-u@qm+3)aC9i+D3SR|#4-($hy~u->O~@^RFEhLz;$qiOW=7h&)kCKaf|^6 zFSzI0wb1hhJar5Xf8Juj40hYrA7tA)_|URg5Z+MB=I%2*T80A}RV{lX>T_D= zyhyL!fH@m#A`5)lqlttsUlVbVqpFE-ViW6{R$$JCn#clQ^=KmD%hyC4_^4{)o7u#= z`ZDHhsEI64@@OLA%hyC+7*HvKZ($Sbngn3ZhMLF%KlW%M;T2762H-ll!bR{rv`mm& z?Ce`HtZkmHGeebKH8!BjX5@ubZVd(Aic0NGp$yw?tZTD53;dRCY(qdD>A>z#LYJV0 zRZECyp}kwQL7VJtz{hSP!*&zv+F;58f2wIBp-a%j+Lvf;I&H9pZ+EqjVY`KO?Neoe zCcMDPmq_RmwD8DWYn@c9Bcw=|j8uFWOJ3|uDK8wpQr*U|v@7I~CiLt1!Z0j5^F z)~h7@9WIhEY?GuezGi_gha}REg0Bf(0+M8ENMiH#a?|9UE}AfG)1)pzV1Z3FG$C{e zXj022mLM!QN#5lm3Bxu?>XIB5&}v9R=n{}5SHmXmUEj-1mv_79!mv%3x^|Ycz$G`1Y&7&Y=97abV3=}?y%VS!i;9SB_lIy|O^ z4%V@lDBZgrX`^S7`e2(4(|LURmrkHkHi*!Xdi4+5VtIQ;#_zKHC%o)@Sw zXF~*F0fPkS;M29yw9w{ zoUlbW~_$jdyiLr zHP1S+jU)OAbwXb&Oc|-|n!)>+GCAx)!%1x&pA=4>&~VTj9jUtdy^u;|ao>h2Zhjn? zSV{yJI@+@2HRH*g^-_a*U*S-rMNX^`wn%E^Yp8M6Xpbx?W6zisBq8WO3|h2r)3k8^p> zLwo}D&n3}Jv$bhm3W){Y#imP_YR&IMDG|OT&EHki{0b$ti1vSSp7t}%Zhu{BjRijH z(SE|0r2Vz8-z^l2X#S_z{5kX@(;8|%3w++A`Gi+A-bi`VE0*Kcg+uGHoWcXAK0|}x%IJ_CX=b=QDS$j0qj`ndllFb zvKZ_>SApH-9pAop>hhmB*fG^SPV7!JfE`QyxB@#u7K7dAiQT~uocH9PKjQl~?SAB7 z$5eBM*gduZ>{#mO71$91uBD9tu05Mi?YF&JK){SJk)E9xC4gQnAYjIrNY73jAb?&i zAYcwMk)EA6L;yV-FiZeFXoL}l>Dh@p2%v|=hcZmhPP~i&dPuySVS0Ar6$H>j;!cL? z*@;&YKo5z#7^Y_@?k0d95^G4ahl%u%Sktz76X$*Gyopz_PI{=bhWhi?xtAsBq2xY> z>DATF#n*Wb>!D}YQ|6|Cc`g&_*@=ziEoPZakfw$)uV(G^yk6dNKD&&jh4(eY zgr3)nsmXmUbJO$6T|>eD4qp225aR!BxGhBgKYHU2BdgN`tJm}fS2B?v65VQ>(ZYGi=XxqwdbO$)wo}E~e+pXzpa_?txdUkQQ-l?${%a5}7yv+C>)_NHe4>TXn zis_*@sln<16X|&+K7xt#ka&<`dN%rp2%v|=!wl2w@$W-4gi!5ov%pniZEBM%6;<#N zDl0B)bRuS(Jh>vdBI3%sZ(rqOD)LoCY=kR-6%n-RfB&xl1>R@+Fe6ZEPvhJ0#mUKR z!3^Mqmx9F;Q<#Z2o5&PrGe=u-(o7xc%jeDSFvEH~+9xG(HEJST>L`cJKQUK(F_X*n z>4{>(Z03_%=mqJu!W&)hzJfKL%uc2z3T8V~HdLleXR^9snw+9FJLz@NTSc#%-fDVl z=>3r1T6*i?m0FJ&S*$q}%=H9qp!e_eHqzS!uheE5W0^wHJYk;2Xlp#4$Y$elvzIv@ z%$5glnAE4UhRIbla|^v~0GlCt{L;33Y}Dk3JLQ9fCcpG2cX-Q}Y~-5~a__&%HExr; zT;v{3x$#o2m6Vk;S%i}LK=mo_n^@7!^!R0F`F^r|hS(e>>{0X*Ot_VyN7H+Z!05&4 z-9|4-FGWwMXV6R2<44)#TWTi19ws0Ck}qh<=cwe{PV%87`Rb5-xktWKBVT=yFQ=F$ zpHiS#q&H3PD7|C!j?+6qZ-(Au>CMtRN$+v=9#8KH^qxrXN%T(9yMx}7={<$sQ|aAF z?`ia&PVX7?o=NXCy}Rf=i{7*8J%`?N={=9$^Xa{S-V5oyh~A6oy@cLN>Aj5J-Sl2g z?-lf3N$(zducG&AdiT-6=2;qP~h`Zk$hq#Ktmn)(f?||cSZ>7Hm@d{@>D}YasFlTo;E;X(z$5p!HuaR0S zo?i=*+x7azg*AtdfU6Qle!g>+Ei>q7VXO&f$^Jo%VfM3@$RT}WJU_tA* z!(SWX`(SxRQGPB}#hqWPtP}T4@36{w{7QLMOZEG5cnHr7`kRr{fgs!_F)zQXdEKFP z5A9mtbs_$p*L5!3gS+~>9)#aZ5W5J!{C9rW1znr*c7ra5HQG0o0zKhOTP;nlx(dHzOzXpvlB}&yuf5WEBiUMSCjrfAvS&EM zCVPvV8EHV}A|nPC&;ol}1ju=TT=o!b@)zU}NPs=@ngEG!Ip>t$d&OpVk7mZo!ai(| zSS%K+idC;({eJJg(xqmz*8A~yH~xNAQT{`j`&E#C4_ExJNL*#0xFuH&N`n&L)opE{ zA*TiESRa&2imSWjpfWZF#@HN~1MBW+OxC^RqV*u!@BYCVkN*!n3wkDajgC8VURcS~7qv>Of+KMI}T z%M!}Ede$5Y&q+Lc7=?R%H>0MFMxa)3u)2AWLV)`QupkI6giKk%Ym4WJ(udHQx4SH;y_ zZB+R}8I?!_KCcc8x8h<=2j&-wt_-S>yD6&Z!1*so!^)?y?1b(z&%+^6S^GUJo_Iqa zi(`!-t6ky6)+iDS>(wik%NjXjKX_WJS@_))oAC|K(UTwA!>!o5;D<@~2A(Fni8b7E z!q5w1tf$3NEouwb7Bm9u!YGIwY9Sw^krhi}Tf%d9c|D6p#o|~O!f@q5c~xX zGpiP>n$>37F=!uJ@&PrPE)@n}we4Zx#IbGvqw>Fxdajc={ocX$)7~(0y|{Px_U*?& zt9~!Wma@k{v&fBm!rO$7y?=APm-ypxZ;Yxz-1DZ`7Kd;A@s=>VZ{vz7qiC&? ziN6M})1?-!gEI@Tv@(_2P^Ielk+^kSw{bC)q@``eEdw~PYR|B4+gNF=Gv(AGr{y`U z+_p~n)3kb(^)IR0FQ-dr?0DY9^~YURd=KT~`$)RwOdoqrD9%vHpHOm^k~ip`CeGoO zm7Zk!6Fc_AoAid0x2W8JwD%q1zKw^thD5;}Yy5Zc`rHO6ZI|C`!$?Axppk z@(tVmIS@C0W7xJE4Q*STrqAdPz5AbJ#v=*rEMd{57tMS%hh7k%axuDYNmzIkJ4g;8H@1$e7wBCc8I0Y8*@WHTXTomcXxyue;WY@p zAxIhr?|}rvJ?jV7dL+iyePSp)+?*jFfION=_Os$0e?kZtdNcgw&6g?na)eNnVVwqw zAKhs}w6yU6$svF+A`4r+w4>q{tAd2h^XHYy8e2|`t)DY$t6+1Y z5>Jq-XF9<{rfr04tbpA*tDX2$dS^tt32{_b-|+-!$&UR4PxR>>U4&e?&BJ3#@Ikm`X;zL3u2uc^XSosK}lyW z(YprEQe4AVq>yN}crgX7KxQUFnhN>U#d$7`w<3`YcauXiXS}$F8WNUEdJa<++&h^5 z!Kt~aOHhLr60V6KP;!xyWl9K<#0rwEe#=RQTeP+Nt^_6e0DnoqJ;;HQR$!`rj7NkF zq&f_krmCxz(mB&K77%H=%j5f~cnFbTL6Yr=lq^bVRRWJjbw(yaBtj;MNT%Cxn+wR) z64dv>@2!_@;5nP?lNja+)W^E>h#+YXW>7dY|Cb-HSs-jGgSl_9H9&UF2M=WzupGCX z#M*N9JS&W>LfP5axiA8Aqi57#TUqU{SRdzBmJ?b|07M9(zi0^S2_2Bd^wQ#>v)BI1eOcRLcD zNe86L(ykiR2%;zZWGTxX#;9!2%G>V#+#V$5&&&^8vbvL}nHqbUtTovu`0le=*YCDw z_${tela{-09|_;$&**g}j0n547RE4FZBWU_M3^rAg1;T-e!vN^{!>(p$%aw%R_QEY zy`riN?G5c5P!16P-ItZ0w*S|M_hhVr19H3o0Krf z10*RgtFsX-Azl#aOu>#U6>*o6MT8dwK>@BsVE^7R6aH$HC*R}MP`y~x4P$aw*K zCeIl{K)X$5gYBe>oq;D@wG-K(E`Cgtow30KdL$W%k0|*GC5sFp-bdL~UPKP!N0c0r z7g29a7a6)rNmto?RcraZ;D_%VC&5=E3-JMZ|9_AKZP16f*gP?HT{U4@FD)ISi6&~$ zFQ(}qLlYcwD2T$Kpwg(xRDsYEQw73HY{WL*)&f3$A_;N7KP^ua@<;^O)XN_R<8BI&x^pcHqR!xG{`i8M3xVKABj57D9^Q%w*oWhN8nWIrHIywp8)hz-SG zQq97HVG0zq+1W3U5w@%it7)DK#UpA|5TW>(>dUao4qWifm!v3qCZ9TvhxX@gOtf-zKmo=?PckL}qM$g>EuYvfw@? z!Tq5e`OhRrQa^B{#vD0K=?));{V4U|?90;qW*Cz8>lZXA%9tjFb)e7Zj$=sT{#Rs3 zxKKGmk`qPFlysuZB1GR3#JmR=PohPz5}zDULRL43ldX(SaL*7!F*P8n6`ToVF3;X_u6wyiKl)vW*sm z+>DnR9m3Qp$^gK@;YZwU9*IwkJOonWXOvLHNkS5hjBRuASCr#uiz)tw@@&V_6j)pm zTnQ?(%6&id9Ff&>*&vMm>L_`50E?I?zf7ax@f?kVzsKvC~8hf$4_+ew7OKsp><4tSUHX(NYyUo!7e zQ6PXnlt%t%^VaY(os)k2lTU6~x9K#O;y5>lh|_zB`bJ{;Bk0)}fn3*G_jZL7;Fj!1 zVkee?o}0T#^x*zyxt(pzo5=pkW)y7(9!^{~SBBBJ*I!+`-dnx4a2~Vl;p@p{cbN}} z@$nTLI|p+79B(Qd3-$zXOb4~I?vcA9{fTL8OB(8T==6JGcAs8kS7M>;ao?Iaz)*Rp z#~uYAn4|Ul1QvXr{_5)Gm9@)P`d5zhDV9GyzUA6#e`WQ`)wR{5TVgcFIa>WSd~|iS zfBo7~js?@v^u%GoyJ(Zr;z!6yFu=_8J7f-d7Y3U!-v>7tX_dW;cGzHFBYy%ik*{%w z4kP()a+nSN0eDIJ8to#$rt$d%`5EoP&p`b11$iG;cnrS)%e4!SVLrUp1}D}KUNiFW z+SF5MG4q-0JX4Mr!tCs09(BgJN+&?Y z(WEr~aSZVK{Qq(IV>SFQ&_qc~i_?ua7N+&pMt^{|Ux(2zw!p!1Pkfeu?db_u^(9uG zKmMo*51*gYpc$XC-}#5y@jVjDSr$uS$1hs&rx(Ta%~WtUMJLOurGf|u9RB9FEE{LMA#F!|FaZwf%YlYgRr#Gj(=^ec`BPz86VxcxGrfLVh zId9fGD5mckwL@Y?9Jr&^4vSeahW8P%DCR`zj$S(|9ur5!IARZo2gHL&dr%w`Hr~g? zL*ik)ZSlA`E*?dWhs2UNA&Q7S{JbVkiozYVX2+;KBAyVZ#Rot?E{{Z5G{MrynVI1>bE^aUw&fV_M*Cdal>y5Pg>T6F!VaB zZ7&>@KqlJ_ge|?c8+pPGylAs4TXw^9qYdfVtL<*R71|Bi?bu-?{b0=&Uc(Rk$nOT> zg7tGKOzbF4c=NXK!3Rlp-EG%5R6LyLDnhUA)g$mqS$kWNbnBeC*Y$%4+`z8hLMx)~ znis8m(q6r7FWS#_d$*;(wjS9_k1aiBU+adU{R(||+`xK=R?Bi#PYS~GdUa$)Jl}I= z!+)^#Con92t2ozL^22dP95KPZaLf?x9x9xG8v~7G|OqbxA9rv&nOC# za>s3Xj__~!p^p(vrbov-9pGd_c=fLE99P!Y9Sl@*$Q#TIMwTmbHa%*4m`q47?6z-t z&T4@CYhf}uAnaCOmfej&IQ4Zm2)uTf%wAk}uD*En^76`!tIo<5=hEdX7jC?;l1zer z`K9Y4q$_K0KmQNgGjIRfvupqLwSWKK)zz1ttr|(8)9qlDY;=;0*9+@3c7q=c)pK}4 z+H;ZC)OR#2E`1rL#vHJ}aP4{GpJZu_zoyB9$i~6DD~rO7+@;0s=Iur5eYkk}x#wQS zTrDkzSVK++bJ-Q)qV(3VGM>M%ycqeN&SD2y+u`D#0a{fHb)k1VDbvWOL*qn^6Vx;V zfu_qt2k<7)&{X4-8hO*X?l&tJ%bC zZPLuv@@YThVdRlVC>Rc(oTJY&g41}y3Ia`^)k}H-e-$QKY|o84_a3SAre1T8DSt#g zfJlt>9b<>uYb+<^-7K|gd+8c2XFJ&FtYUL_ z8+OBQN1hDrRX4=8hu9EaFIu-dXqw+^`<`6D?j2K`pzU}3sH#f~^8c_I)39k7>6{G8tRtwcn=pfE$$2+Zonx- zX!-7FvJ!Cg$*teMkSCaTBw` zEyN&aAc4_-W8twxZ|!ULyVkk3A4c_Vr}xZ9i5YP=wQbvhoDDsv6+(0_oP(-x+u<{a z3^FJBLN$}*{ZRO8ek9MJSdt4dv6#oi2*Z8#V0cCtUZ$FOa%d|UlGf$0Z!~))JZgY2 zRJ7u-+l{as8K0uv4wGO<$G(Tba7e?Rhk+onpQgADd4PC7yY=@mhVHfR_j7ShZ_R*#T{M#2y`lM;yvJ%kMqJp-p@ukF^2jI!N));f$|n8<4k!9 zlrku9gEGOC6QEQ;`6einOql^?3Y4>;z}NsE1MMha_do{SJR6zl&rFFlaPy?<(gn=t*>v!1)fWeiJ-fSp&`lV@STw^`G+cC}Yw4N?=Oh^6b>Y2>6#ZAL zlqJeDB;Iz@+t){Qt%Q5WSdK}MVLooqIxW!l$R%~mMXPOId)6^0zQ*c zqbutk>?Ug8K1PGa1XYt6Bx57JRy7YRl}~}&_Q4xLE9h6di zB>YhJNrw%fs>44(pdBjf7JC_F{aQvrH}Pe1j64rY+w&NPZe>dFN-BCOGpiq_zfm$Q z-N1hkTKXKvi9Un>AZ+x5O&uD!sE;b9lrZwPnirk$76uG5rcj9{U%ivOkf-bl-kSz8~o;-tfw&tZi zQ3&H2YK^7Yb;JR>khW9uQ7wazRHWzD*P#MXaKT=Iw%J?U_B*|{-Em><;|ZA?+4YdO zz>NIp_O4X|4V=6|7qE6m?2!FkVa5=naq>C zwaHz1pxP{I<+ObIEw51Zh74=P&`oJc26?#uaIJ4rb7>fosrPpKJD`W8(X=A`vFuEC zDwoCfo@EhH(#sk8vUT}7atslIGCuNnp2Nc+F1ALFwk!E+*r@Q!;1d|+SGSCCDaw3G z56ZFLNNF?h*0z2WWuqM7350H7GkuqQ9X$Dd0i5y?wpjQom85Nl$J$zr^T=20k0E_5 z$|F6dboeCj(NMC~ACF7TB7w&{hA>)L`E`hn48Kc4&uNFWIP($67vMj|rWKcX+gpgi zqr+BhVykB5yV&#wTK!M4sd^wXa~j0YyK$~*!OF+>77#%@SuS&NmkKX!NCl@HHhpec z-oOy;+7zi~|LL>Ul6(y*NiN#xwLR%lE{6z34-*ao0F+Jo&XS!je~HprW@Z4O$y@aK zI)Z9BDGX3a856aSAel;)_vq3pH~cN9;meRf&@ieMxg6=1q=;>R#p#Q!+W1hPKmdgD z1hL$v;FA;x3V1pFD#iApT_w=|6;DW|2>r11S);5U)ys&}R~b5H4qAp{N0BzC*D2@V zS48AIp70z3Ps4El%q*ef7yzEEmnHGae+cD9QRD%p>$My~`+(d{g7@*x3mW%vNy@}s zB#Fx)uDyaC)v_-#?yY;;3i8FF^?7AdOdKK8Y=LIaP)}yEw zo?To7?6JPFx&RToh;0sF3iiO}BJAx&4BzTv2jlC>0TkM0tk`WYb^T;h^ASi3RsLF)o$P| zHdYC{-i2n>sJvri;1cc1^3VpY6Ea6Xdeap1rvEA;F#s8dz(CE>0cmIs>1NHzK!a*d zHZ~FFA~VhdEYO_%?K!|VdD@&JbJqaeMtKXbXu;vE78P5T{AFFEzo62EPXO5XxP~pI zInb|w3k>-y(O9!2z>Q9H`bBI+%^649Sj!CWMP&$r2~a8mkdZSMFe=yVlh|s? z0XxYiwu-}a>qZewN3Y(JpGED}jC>O>d6xnvOef>YMI&bkrz}aK=L%)ymlQM#tJrA- z$r!A4I;kOxTp8I`(3Kr^lp$%;utBd*HcH66?uMy(UtEFDz$Pvn2Y5aJ;-Ym*^CZEK zNghCUfLXfL%mR24zRipHKTr)puw+oinImNU>4dlg_rfA<61r$W zR{(V+bCQuMywB0n)mx@~8Px0{4R4hLaV+)0*#q*397^N7;gH6s50Q!oSoknm({qSH zYWQYkGQychN*?8?d;xDrxK;+*hgt^15;&!aFRfW%o)jyFKfq?wn|#E%V?01urq&#j zKaEP15BhBUF7>5+P75ria2%W`fJEu?d^9d9O(1>PK(G;u5YPHpBe3!0*8Q_(jvhashJBa_+Ap_rd$-o*CvokK9Gh z{e9$~yKn9T!`xHi5Zd$O$P|a;T#A$N{6QobaUS~N$g=WE-@-~z&LsI>YT52CR$iyS zND1uZlFvFKMC7ki3NPsA5!*gyzXvt4KiQwR86H7w#1eR!O1?sc9Pg3<%>kA00-k^+CdkR%6^P3nY}auuc@IR z^eicjc0DQXB5DQsdsHPEBm+;JY?v`+!_?tbMBE`b0};<*lH;WPa%arpQ(#UzO`_)# zIBG0vEPYC^7*lYbA7sOYj_*hSnMMVoXU4D~gy!_)u#x8=h$x&ndRUm03Mo9%uf!9eysTQ*pNuEd)*)YS z1%}Dw-cdk)vp<#c#;Bm|+ZYe73qHwoJdOIxO#dTD)Bzp1yQ&fGte~C9v)gA#^J!=c z$}tIPJr$d+4ETOKo?)cKvy8U99{_AVGXR_I9w2!I$g{_x@QghySmHQpr;-zL*4{;X$>qWI5|~Fg65w+R zzMaAsQsA#6$;ZVBxrZM$i?i0d3Is_{3J58on=KG>T$zpZhQGGK=3j6Mh6|OXpMsxv zk!&WuKt~kt22@%0lK?>v(3pD5-jw9`3$(}%f`#iIu=Rj0sqFIIKEc+=P|Vuo-J>8K zbv!B*s1q(MEW8K$xDI)+5Bhk|2`_t&DfE%(Tl*xNB-{gb^$_=X?7s5I0T$ABockdl z%5}X6HjK|O(wk&~@IKD=Y?6nhhY=~1O`A(h%)wfj0r)AqzAD*f+wGQGzOeKVL#Mr&^$)|_&0Tx9PL7ZnmBW~@B!XgD?>xUL?S(8 zID|c92N=dzBxC@=ZwMVA7rk+L08R}YlNs$ZFcsv|8}yYuimxcgXf-Z7+#$73&O#mv z3YVl1Q&>2h3S6Bi%64Qm;dO%!HLq*Ie{*eCa~v?=4?%tZ`o1z!AMq#?ZqX9-usNZVZkWwQp}t3@#Pjs-bE2D}3P(avxWM=$4^6 z+jK)4$7c4*5EX**pi7)?n=yKij?OtJ-HRj*vsbxx>{HMpS9wKM&yrb=rKQfb{b;DE za3%}PU^kss+%S<~zBK>{J*VLY*nRdXKu2w4_5yY;>Rm{RI><24>+2+6M;g2G(hY8B z)lLgz$qz1k+MA0uGbR;!D^$%N+!ST>f#Lm84KqKukjhtB&%knLA7I|T>TfOcomN0% zDzfi#t5UG>Zkk`C68UC^}CGz%|!1=x)xZg?B5=> zG7V&UeV5=ilg7;tY8KP^{Dy}R2PxmC%6TLw-!p7qp&@+|8MKmx15|>$4L(J&ShXn1 zQBW&TMP&|4oT~)IHKWVFLrKh9j@Qoc$|9L3^srk&B`Bu7q0M8*Bb#3jr?1X_Uoi^(EgfwMw6>S zBTeaRug}mOIG$vN*P)R dd`g)@wlHR8t$ZO@$QNSkgf&KLhr`D{BrCQQIB~;>uuI2YDl!5ai-i(-?Ffbd8j&1ZAQB8_r+dg* z&dc4+l}QXQUR{$I@WFq<{tNyoea%UKL6@YehfC6XF*3w7Yx+@D{ngi1{r=8QXYlx~ zvp?+;@*lGGG(r9|yy_V=oN$_wjE-pLjGW9JxfJ`Hw2?JOO)I;pm$gPMD>qU zH@WwMj5>Ue`@H>vj=FrG@9!G@J_=L`5DaW0-cwPOI0~ok1*%ESSU*+Q!FS zy{GW6K8B_u=dfe$TsljtX+t=tR~0iEi2z4_B=ROGd-Z6*Lml29EY9bH2ryNHPfkz2xPR~d z{ehZE5oAJ73a$oHjKRdChrc`X4tB$XV4`fnWFR1rFUZoVsJyKKS^yIuMBq>cpqj&VBoHlB@DyJ4Yr7aC;c)i_ zEc_UzU($0}zFH>RFk`(IjpD2D{Vg;V`N!)^h~7E51Y|0jJDhw$DEZ3aXx1?_7y?0B zi3-zvmO+PebkESpqF6VtftpwCld;If*QI=bp9*a>$l{MGi~&pJti|T&BrT#aRS%A` za4G^E-E^%r$X~%~u+%sQ78_40Et3+QXkY-Z88+>;CXA~!Oz7h6Up8!6_wv8+^y~2S zUAYe`Mmthu#RYh0StEY~ebN^dL_gf(U#bQ5$2VSJ}V|bMZ zVXPw!N2IkRaD*%i#(1Fo%2yv%-Z=yn0_jj<<>r>`ZOJW^m+h*}8`HL2z+4wn3G~6Y zFBiJOVeEAv2Iy&59`{cO@9-|r#XE0aTFNZFs>OS7e1Fg$G7fy|0Q)*Hd*_7c7WT=X zw2%MGZr;;vzFQI0`3=>b%B>nzlkff0{r7FK(Txe;uV_Lm?|bK8blLlo@Wx-s6Y>oB z{4JG^?pD1D_6c)sf(u)~=Tj*{Em+7bT{;~G&qFy@$5z6x4UgC%aH4k+04 zmgGu@xlTaSdRZxjmSK_$&JNFtB87FB?$}oUr&%}_L!saM!<0Q2$#|j_OJb(wY@=qA zM1wNalPedj3@&Vj(GP1NjJ1#}-x5%mC*W9`ECgTsrXrxW4$o4t_6^cAv+0@DnV6}9 z=}D+Jip~?wwio7wX3q;bP4Y2|3(3MXttTrs7rMVT2FJ6I|F1ECE)v$k0sCw?e8#r5 zpyha$rgN5LWjZL%#2irmnbEum(BCWMe_?mL5Gp^=Y^DJ!olSoHJRz5g|NjBkoWWgevKI&StRa4fJ?H$705?{la%pr=a zI0aEWUfld!KFy2g`6?cVcU2$n6Pq(;Ivds-ucox=Y!XPnXA8W<4I8&CO5=_pc+FD6 zHOw>dMPB<^{Q0)kaT(VB>~ z6jE6Lj8~s?3yE>z7FZ*PHz-?Ht@16evF=N}br}Byb_93f2Y5wNxZu{xVZYPqz^%sq F{s+gkH}U`g literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/features.cpython-36.pyc b/timm/models/__pycache__/features.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6462ee070f1c7fb786422e1e68efd96396bd558d GIT binary patch literal 12545 zcmbta&2t>bb)S#@nEk*K1iwJpmdBK>;@T8I(y~m^lp>iFL^%?yNVJr!Brg{`v%rG8 zv&)`Y5P)Y7F;pg96;;KRQz}lCFG-azsmec)@-c^;bC^@Aa!9FCIpvaa4$1HLdS-S3 zf)*1GFugN9J>9QgzmMO0J>OnfsILCuS2upLY8d}&%pC<>zlJmX4GPa_8J_7`19ND# ztfAeqP2Sss!m!vX%5`B-8kSpSxh@WzVWm|WR$J9!tyPn{(xBd|75U_S9&|R{kKYb^^~GtIvVm$H&U+Whh5d*=m(o_wB-+1 zs?|GNzN>r?8MFtTJrI@+irN<*sD8w@=&^&I!f{{up1ZLJdN8}4e#i;E#Sw@<8{d6tV|8xKRXyX?_9W^L!jgSL*qiH3zA1cqsyl(ZoQBw2# zUS~3hJlr(R#PNK8+(j?AE{{7Z>URcBJ1I!p!rT2YN{V+Thr%3YKLex{#E# zDuDTo*P$j@iBa%|QX{2me6PSMHH%3h^anjvMU~XI8e`?}v}fH|z5cHabsEoqdwbRE zM4hXv``de~U1(6a`o_(hA3#v9u7=Rr_7H+J^1@Z+Z(^)BUteF1`om!+dYJ}7_!6fL zKK7$_`z-nm7f~2y!K_-#=CbLSi)P8(KRve+D_QfVDMWTHzJY_kIrG^~WJF-mwsinj z9avzKx!$x@1y2;sraFnDX{jY%lMOO4U7W#fQl)ulkEjq|!+j_NnfqtwiJ3K7&!vDw zZ~*>>@1ck-&%EcTXCE35&7PKA=aIQ>+%gW#_erD`S&`kd4lMOr+NJ>Mc4AAnK-)qL ziFst4$7*|cr&;WLkCqa~!rkc%CO$Rxp1u9c@XbS|Ti=CR| zoT|-Wq-Y*z{Zp47qmo0o1K5hyXflSaIA#iBWRQvu49QZS9{pGWFvX=4W{4 z5riBHbN!Jizh8f7^#E(A5|6A*Z62DawLDRjS#4xKGIC}5IBipuH&b;>`wZ|-K`Rtg zjy;vUGWv(9rc#QMIs3y@Hy~EMkxRD*&;b3I=@?c%bT6W+GZ?r~ENWEqF*T%QORG7r zk+;s3>IpJQyC^c*fJ_=%CXbWHxkWdRTkn_9PbP-VBWux?x&2OIq`uI>iJrK)5So^M zZlwGJ&ocgL>YCQ^g8XH?+oz75);h*jpq|C%UmT-lGONVA2RWlWADI}`jFgL-UBlyS zelI@t{8nZiXn8heL}+z^Yb)bHpyR3u1!g~hWh#NemqPo^oO(%1u0$aOw>vHnQ~Hc;!-6*LMP zC=A;Ho;hu$*6_@+5_&wEspD-R8E9CFWUT$?a<|&u>I4BI7`M}fl?g{Gw(RiPa_Gq6NE%Cl8@0#HhnR@ip_4$ zk$Q-g`FKxF>+?CSDTW9-L&SiOQFr30s7q?Pmo|I3JxAra`30YAbBIoAg-ngUlS@v+ zsG(~HJ4D3**&~0b*D%K%Nv++6-0?}f{Z2l#xqoQrxi+~n(smshI^y6aJt0vu5YE64 zv=+>}mf6VVb{j3)?RV!!JvBe!^ttr5zRn;0zexN~R<+wnwcSqn(oX8__Wem`pr5ER zx>Y3>4hn#eAX0RT>KqG~1rdk(1`CRZdY;8a7T;tcl+Fq<N?iKG@DvB%Hc3e9!P2>JD>Po14gt|gj=as|d zsEF*BiO75TLnA6hWrUMP+mX1tGIQ6I4Nl>GoFPS15KaT+no|n_`#iQ0X@uhtkp4Ba zP0XD+2^HbIfa)K!FRCTthRlVD#N;vX9AL{`n4+CZDiniuv^Vx!wUpg$MDyx09MII^6Cs}N#0-1IjHW##6E3$Q%EKv5m)Eb0InXq}-87C*x1e1&Kd ziMiF%GiL{hIZVpzu-*64i(a4z&5m-L3-&QiiT@0%VJ^W{Gx<CGxsyKA+1PF#P&9x$L0;>ZebV->x!;k z(l?*~4~}MG4vBmhkCKw-?;x7M16nQh2dpb})Ml8NIv&$g6B&9D4~IBI8chRpsN&rJ z+Pv*qIc|NCt}E%RWrQv8KkyX1Hlb`{1wm4M-@iZc17=~9 zQfEB&11~9TkNQEgtZl|pZ_q*VIcSHohSqS96hxV-(xdq5q^_^E>H$BFhRBNh-W)5* zML6-)pEHNBhZ>#UvkK6!MTVWejr`0mU!=IrNwpFckOezVodeDaw zKpDBm;_o)8oYa`SVM-W4R?x*dU)>1|Z|XmfmK?6!a560OH0dv%TP)Ju?ZR6Ag3f zXM7_yJFq`Kx5HhOTp;(O%b(21c+9UKM@lNs=qQ5;Tu9kLpBF4yZ|Pcasvy?SiRwS6 z>;9Y`YkjRN5Du4FXvb&aKcUdOat9XnKl(a9@BraQmgm zL7Spyer8R}Q6(;IR}UB#I#KNaq2KPUxD2p$<70bToz@uI#g(`UC|ZjhgnfS&)#FlJ zJC8R68t{p;y`TfU*a<&)*SP-yT%%Jr)Zax7wmC5Ii}Ico{zG&Eo3_qhVYBwH1xVoe zVq760OCbf)RSfy|PrN6e0l4}z0QchlGe~g3MiEgU?h3gi2%3d?&67zH;g_$}UA(Gs zDm}k=;iRNRLNWRihbj07@D%{#Io6Ta>MayR>BG^EubQl3h?SHkW9-ZNc{nBGXVC~g?2sC4Ze;o;~`uRG$JYKnIul&KhU7h84WzS610dQq0(-PjRn@up^nVv@6lKI zbrgmTSc4th5=#D+5K@#L&OU=afS*-1e^`T z-NT|F27wT?1aVW4fDNZ6aq_kWAz0*Z%t)+Afr~P{xPkhTx)T9e#Mo*&umWI5#%BoF ziJ2BDocb!q*|P|(S<%64kqNg@bCFbLlo+1EIpR~yTQVvd51NWmSC%0r zbw|jMNb8?buam>GJ@9PAG!5|U;>;wd4*S_)(M5rPHNwyO+FG-IBw|xU`C1ETHAJ*E z(@J-Am`3u1hGZcXxh|4Wnb=YGAS)v_lHlzwuKp6I4&Kfc?8cephUgk8$q&jBkMJcF zhp^{~qu6u1j5OX1cRGSEE8yN@jxXPabq2oN|L-S(OPcT0zi40jz7Gg9>M|Y93?1Ck zn$FUtTy;AeP;Np;xD^d%{9Ww>h=|}k7{qnpf$lJ<)j2jMa>SR9u#c;t%MsusJ;Yl_ zGInsuUBPpLB{Xt}1LQP5(ro{PUWK%L)b}pAS=|N(^Z@K!;9-Bb!HUwR zz$IqnY?JehrDhI6v<2f~N7*U-#g`0ezTCFqYG(7ZdS?1i<`$rVuq$FQzgkPj* z|HdBKy)o0XjGCv?GX~n!v+9%ej0+$trY5{%Qc&%%5av9lP~>rfQyAqGp-@HPdlz;2 z!9IC}-$ZeYI`LC1gmMzPAww=I#E<@FD#RQQL$v?Ic0{HNY2DMtVY7|-$Ad6HvsBTT zd|pj?`2xJN&Y7p-##=r2ZZN`EPyBw*ze`LoI7DQ%s8EV zzbCMgIClbQs0O?hJ%7 z@2Lf$ysXYy6NDo$6JSXXK3tg4SFlDfA)ybr;kg&*qLUGjQHOnrNSzvD0J)~DmxXHNU*+7j|phjcc*=|s~O(XbabOO(?sQ_GB6NkYM5Sez7HnMe5 z-~Dl112Fjj=yYM)n4XAF0G%$}Fm`XmCx9dyUimX_YcLVGI6WCJ#&y7z-(akh(Gu_C zMeZMcrYXVQE5`lb0E*PZUjeuNidS{x&&_ZtDtVQ-4xC!eaq20JQ=`*y6|-4Hk{0_^ zp8*oJ&xm2IuP2KHrjZ75{A5gt0Z$4WBM;wfikZ!~2x*b%F;C$PpGP4Z9M4XXb&5?8 zy$sgz;|A*y?mg^U>W?{K?XIG+Kt{1HsNlXwV)ih?_xNaLkT2l2iuKf+9_IeHQV~v# zKyHkW+xB468mipu?_vZ(DjE0xhRaPwAJaUmJiw{-9WAs-?`q7D zyL=Pc31#13L&Ne`CzKrGPtZ+Lg3&8W03@ke}C@Zf0G7Vnv5R{X-Gxuo&p z5s&t#c+{%SOw`OLYJLu~_Z2a{y2pas04iWX)+AOhz&T7&Q??KSo;czE!Wq7Z!YDLk z6NCu9fIpq8bj&5dPNpo0=FiHh@%%Dsb3A`^)p9sWG0J`C_i?lPWdc%x@Qcs{q|E`{ z1;Wecu7LM|?PKpT#0t%YAKGjnS8IPAB%+wIa8)6W+xYzVFB( z_zZn3#YVaB(teS|qoV0zbu;PhXRYd?gRu7V2%jl17bs zi(SA0C#~h)ku8&zAW%PK9gVOSQITMgex`Mk>O0~q2pDx-E67ZgXvW!G*AwT4_ARe6 g9tjyY6-3mq<7Oecu^X>7zEN#BRmWL+eevY~0)n>pb`gQGn#qQ?L00{Yyk|J>@U-)R~oJxe0pc3LHL$9M0pLp{`Y{UhCe4Czs|3`HP(S z>M;Hunp&rXaKZ?u+=^*pF^l5Zj_t%@4vZaMhzp6!TuKIY;J9%yDX|i8imdz-N!J}B zydzIOq6B}u#L7RD)Ow63%kf-NVU?uHs!5I2l6f|tEU<-Sku4@mY$=!S#mh;Z)${RO zyppW4)ntvWQ6g%hF6Ko=tct2w6l-EZEOk7#J~#)GDJwf5 z9P2XHLpX=$dG#&jH9r5!VIPT)M}&PMEWQA1i+l;zF5D)3nb%(tUdP=doB4d@A!)3h zB>scJqfAB}|JGxFH&lZAw?wG>QusHYYZ*qmo2C985%+{t-Vdsq9{3%td+OU(t2fYi zvYE+)*0ZjHjV-0aC$VU?$07o8Dx%rZN$kDD2sNlF?S*lSJ7H|4={txKE-wZLLI=~s z-L##-;(8=OnmApa$js!UTaBXe_OkTZ7JnEf!Yn_G!dUQ!nx9#$?S?w)>}N9BiS!mX z-frBN53(%AG3=Y{HPwF~E8UP;??I?Lpux&+H}0k)lsj6dk7SstHi&Cjrg}eoJ{FbV zU)_0jFq^_b?{3xA9D4H9^Wob` z2mbh|pJv+cr(8&1cax+Et0Gp-I2=H5eAUVNG55iRzV39D&qb8U5JKd4+y1fed6r(% z{&AQBKl3rR0yfI5f6(!}x|!dFMg_45+rjuoW$cji29-;o-ycM-K==2(77ul})jByG zv?5SJwQlb1{kXNcwbfDpoFEaplX2CO;s7vlduP9e+P22pw%X69p;XP@z^vgerG*T@ zIMiu!^Ck3(1B5spm21$03!Qd9jsAxNZ{Qvni-4ESapy4*8kvIW7mK^GwdA9B50>yZZgc& z7eEAI6pEL{-PfcI1&rIT%FM=+^@=d(HE{?l08)x(ev-7eLBY z0z{J$;A{l&)0SG*Hm4){l4@s2-;j|tBthW~+<8=%+uD5tJfMKdBTH>-`z3u%UemUf zYf{q1Avr9J=nde%aCB)%hmJ0556+bJ+=z1Lt;Gw!*&wsR5z+4c$-M`FE+zba4=@DI z&Q9yk)#laB%U56pO3JvJ_?ykES1A!vf$5Z$|(@gHnejYZTtpE7c%JA<}cO@ z`ocoHj$G+Xth)Gy4(SF--J!*yAJgBhrw-_6?dLep!`QHm)!a0+Yn;3gS!OQl>!6$R zZX}c`;v@-svylJoT?dwqvZS}Yi;7@wyY%GBd!cCtAf&5-0Yw@o2Wb9n)``9N4j3V2RZ zNN=bVki+F5h~iMGAZS>#Z4$Faeg@<^G=-T(s7EXCE73a88nviHPuBm-n-RJjKkN|@ zd(ae?Ee&9R!U|A6v_=3`EUo(uTQ-0vzXW33XJOn2@ku1&I0$C9Lvi^E_+#gs zhtYSa0H$_{)+!QL_8Xs?Vi52w0{N~U^x%b-=mS&8Q%RyPOlcBcl}DR>ekRMNugDYt zv;4}#E-&Kwxw}a(lR5{CamNpiECNTCkd%>lNZ@}MkrgCWAZ%s4wV7sVkfS{a;SH7e z+Oeexua`f@fdY5Qc|5s*B*)nzjt~pRy`Mjx`9olAC3KEKiRCkQ!^~&1j5MX|<5z4O qeW1Pu;vk}|ie=@vs!`9r2&0991?aLY52$M+uLeKYUMRbEt@ICJx523Z literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/ghostnet.cpython-36.pyc b/timm/models/__pycache__/ghostnet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0abefd0993f20910a04e983d6957357122609a01 GIT binary patch literal 7532 zcmbtZO^hSQb?!elyL&hsE{8MwBke9%KaS~0!<|_xiWaeUN2^&$_U>5LuH=}o)9GO~ zIn5!vxz#;8KWJVu8_01mN=`PCD2M~=;)_o?1TbLuVjuw$B*&l+fq)#80dmkGryz%X z?={K!krWtkbLw@~tLmzHRquW8Rn3jrS*Q7ps~h2Sit-=IiKmGCZKU|Cs-g&`r3h7M zeYK_Ht@pKo-qO{aXS58iEA-5k$+=?B>{%@vd86+PW?D0YQmcg41!4AQ2jy0IFxQ$J zR9Y4075nD~)mC*d-aC~VP{e{*e4>a&-|RhoptmlAb6#8k=faqC z1)Ph*0mm70u8QZy)8aD5to!H1m&6ru^@)0*wyvS{TcR$map@V9zAT;*&vNNmp*>I< z4ev6R&RGrX!C=_;2Y#4%Nf3qgs8fGqGm4XSKdIl3gx{}wab5VGAoN8&2yZ%*+MB4B ze*LcRCEL=E>m3;l>UTDMZ&?54(3jj>yj0&zl3{$a+4SUY@OUYb-KMt@HoUS3*W ze&zbh&cjW=o=7hU>v3BK!-Qs#fH&a5gSZ|CgP`xpdJ=)}asB;hBk22>;(IroZ_1z> zgkC@Y;O2C|ZjfwlZ!EQ=L34B4+wp@J!^qog-g)oE4X?em5rux-Or-BO2VR``ve_l6 zLqBncdr2hQo6Zp_+W$AYV5+kq5tjR8AN}HxZ zkA^gZ^bMr=3P?&O;y_iQ`iYXLq9`n3f9Ry@4@pHz>%o8OigJXc9DNKT3m{!2JpXko zv#R-nun_+FUlaUwFO6UaDmX)HO9DKvCBhnK8ONJHZpGC`i^Ee&d@ z$r@gnxwYfTZv0b4lJx)cW>a{Hx6<72?KRs_wz#===gxa8*H>1WF*Mp8_{nA@;wCL^ z6uz;#-b{kQpvh|=Pt|2|%ho)lR1?jQ=p5w8zaoE0#V zP(P>ZCBEDD_I#PmprwqqL*aJ9Hi@D(9Vn`jYN_Jt ziP0+{XFR1Gs3R-2QlR_=rKXJR#OxK3tW*V8Pt8;p+9gHk!ZH+Jp(GRtYpv(H_!S6n7Y1r< z1YZ1*zyt=+_u6FWW*2KajOA~mO=C7I#M{74*+40?TsH`V#C0## z7-r4`9~co5C}k$$0{~^_H7{vzu19i!g7J=j@7=64l)=E0du%h=T${kzjV9oX4KZTB z-^p}LNnWR6XSRG9`eU0cFow5^VkfszGfre6{LFxIZnt2km|uI-3-LkRDuw>ev;;A2 zH#d3LWn+_9(YL^rC~8?XkxH7OUVyPYtsCkky!UG-v}9?r4sFsDdPwt3q&rCQ zn;@yuQ&Kp+5fr1RrW$g3qV;sBhu+o5ArJacN z`nKQiPnMtN1R%PNSH2abWlKmg0f42JtE_jYY@BN7iN6XmwQ&`etqI+S0Sco7>z-P? zIkxyBTfBu8wX->v^R*~R`hMuQxAyCKbWz`!vg$j{dIZlHkq>{cb}aeW(Yq7aQqmXw(7&h^kI;kapkj&s)pxmJrvR=8R#2~H1_xV_}7+K%aqvCYT>4GecGZTNn7N7B4omOJ!6&GrO0Vn z-XQi%AX$;)0kI>0hf0Nw&=osCga>Qxz18omy_0FN5AdU}RwOA`X2qM4OxoK?!_E!M z3__quoD~Ow$TKi6&XfU1Cr~OE$@bdG*kxI3oDU zUln0(3Fo4Mm=$GkOq|3jcprn)yqrZhM-<|If()KdZ+-HYe?iYb-eMFws)L#7-sA2R zfzE?7MIZ-0=?ntlh`FW3R}x_4f%->e*NRJ+K)6G8YIe!?iwVMsRIe$*IM7B`V)q;% zkVWB09)lm6Xf=cFF7`^vENWo!V&)Lmt}19<<{$=Ek9>s=org5exzs}VWB00E3W}U$ zeuP8ks3gqM?C#Idm+`vty^luat}>bv#ZhJV?X(Qeo8K*r&W)<0`B80DNvw2!Il%vX8xfAYP~o zoM-v49Y#qVQQr{Y=Zhmg$UKiOav#WX0EGaE9~q`)6$QllMLtjg9c}50?Kbt4nR!@1 zuurI#qqr&wf%c0V{iwYaF9j4lQEbNNu4HfMH=fC1oh*S(H7l>iv5(Vk6s}1bNyMC; zZY&p2k23wab@DSmLR3J3Mc+=bpXZpF-f-xL2t!5CPI51xZ-mQ|AR#jV*2|fb z9|P{;)S7!0as<+$1{Ti`F<55iGtWvS@p#l&5)NcD-F~#;^<8S4#dtyAb~PTHxns z5_2>^aV<33={9YEGC%aD#AA{NVML_^p(a$eY{YN48q#|PAueoE?-kf$%@kLGi1J=d z5XsSTkS%snYqP}~G;<1>@+gUfYdEY4tYf^>xoYoFqg^6|7N@C&pY9|%m1)_(uubluOkot}JX%_#9&kkh60p9Tq^8%#`VCm{p{5Oh|e zPy#E&xp5#-WYd))I+hU;NraSLZWDP7f}51%XyUAp4TVzcSs=Nwj3jKcFsMB7aC^j)+HO0|c7L zud_i_9zp#HHTwY(o+$-O|6f2wc>X>T-$&tecf-=1qUC%77M@RU-JKd3*)i=A#cwek zsp!B=hb3Wj3thTTr=SFPAL9~*B9l5B*M)R_nAh2Gs#KbMhq&v+cen@7zeA6eFUX(c zs14tv&HN;fl8sXB#5fBm@f=V?c^-3|{|vutbnTImC>;d>m?{=O!~yL`+6U^Wkf;f? zCDl55f`j2jgmQG>eyECzXj7mz(3(s8&pUNIxMTr-!`a4l@A!foPS@{qeZHG*tRlN! z29%@gC?3&W2cPMWXxENNsgJ%4Lb!wHUvC`|t{i0lJ zW6J$fXES7Z0$C1%v(kyC zF)kd6k3AW9VIpaS%z=kvTk(awOyAJpS&4;r^E=hEm|t<*5hPk$aA+bX3!4>K&NBjXYfXhr zd`qI&^6!7sSuQVK$1I=0*yCA#dUlpn_c^naAJKeIBQp16pPD0m{hQD62IlxI&GDcA ziJt#HJ4dSfb>~QJ@t?L6>p1?pOr2=*&+tP0JNbJ~eh&<}43gQM?XaC7{EH>5Ly@l$ z)8d=Dr7&c_oy|{*OWc?*nedMU`jGST1MZ0~_Lu0t0muI|kWZ*jg2&w3Thwsg?{tE8 zfQXU;x;!|QHnnl6y1420al0C`W#LMN|0g5&BzXrq9D95rSEu%WL*xPR%{&n1`?ow2 z=4kvU)ZiKs+B9D86Z^|MO?Rr9^?T4yTnN5OQ;q4uh3+d#_#XtKI=zxdddQi&Gh?x3 Vm5oKcdZ~J=`Yq=xnp!gR^nZO!^GpB$ literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/gluon_resnet.cpython-36.pyc b/timm/models/__pycache__/gluon_resnet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ffa096c61b0d3a751f5dbe0d6be4a256eb0c6c1 GIT binary patch literal 10128 zcmc&)OK{uB5e2|M^jWVg`M3VSf73Qa@GH{mwJpoC*K14JSlV5On}h+;poAFw)Buz$ zMLF#r!?INK})H18T>GlDujkv>3!Wxi zd_7hA0G8k~TtO^Hj2JA#Rk-%8_>EZl5VmUG9E`xPT8+B$%&?J3UR#=vbMm>PtZB^D>W(chubQ?}H7!Fi)*9BD0ZNrMYRanPH0@i- zh3e6}hsZJZspt z@|C%=>eRHlyiMybF1b^W7iJ&LFU~KQAIv|T{o?6zd0~Eb(Vg5QURr*zjkXlxqB~h> znHH42o(NxAudbjxx-`GgfR<$-JE(c zB3P|p!JmKMbt4s1Z((smdZWIUa;2&sm)$^%S?)-^RV(Y3X4{7C%4WUUa>};3X}Gaw zqhWbuRA-H5xv4wum{~_e&4#7X47tIYq1D}h?SLy+O=de~YTuQ4duG)%SUkW+aSUv1 zr;1B#oM@_$CZvppjr0Uw-0+Kajjh;!5ZD=H|NArvnxmzYn?K!1>Nv4>a(-^^%e0zK zC+!w9$~D7TZGeqay@CUJG`pB|%v#O!0o!wlvhN0NkmiYcwrNm;B#B`$h`&*hokM<; z^>H}1-u9RsA5S!y;V{jt8<60R=(UeSunY~Yn1sf9-D+r%TvMl0J!41}%4?}gLY>LY z6ZDKXWJIz$o)Qmdg z0Udg$xwL29lmQhL;P9-gC$a^#n6Bn>yw3<<-_vS)HL+9V)k8Jd;DBkSb`YN{X-FL$WR;)M`;L=QPrzy$Q#_L zUJk$Gj;Zx(E?rbr??rS-J?|L1n9f&=8IHL=M9i38q;lEBjH;!yjJI3Fr`!o1k+nlU zFWl;td2vv$%!^apqdmG_9I;F2z32OA>$@KGUWvPe-Yct*Htc$kYws3vZRn$I_tV}j z^tK##staSsQ{7`dV&9MY$oVY0$GzwGxV(E}*Nsk7{qA^;jqm7qQ~FS+q=0@XF9>1b zY0zmTDD~MAw!~@xL`T{ZP70>DCH+SFrHDz8wCIk2QPo-&rXiS)1sLhFC|2%h*9{v} zu}C|`b>&Vl&!`8Ub$Au!UG8{UXNKk&ZI8N*GMicyL`jq>Zzv+|=WR4#%Dni1uOs0I zRm#@dzbiqIUQ1gd4fyAhBSBzGs7hPj@PnHd<}f98SWCxrSZn9yiBv)1Y38)7;LkRh z29wqhCcc<4hAnNwU^Y`o8gPSP>JF2U;ln};6o@8VuIweNlw0k>$hGJ289RuT5Iik% z$vc2T03af$U=OHJ7pQQ5P?>BuD4rp1Q|H@ZtT1-{7%z-$&CZh`z&;_mFEDn8Zc(n- znKiD(=fFoq?FR|-1roFo#pnt0pzf*ai-@$Auyp2#B<~5)#lDWG zruswLK28Jfo}`N;7#I?1eT}69i6ldXluPe{v`wpd`w~_?khoxANTj7>=|JL=onxH< z-txDHX=MEE!Cy&q!1FrTwM9g_R#UY9YTDQLwiWCLcLu{~#)F-O<;r+8RV%NX;HCX{ipIkxCir4Hj86n52%{Qsv9?ZNYY1CU7=e8PcdoP zRDxIuL4-<@p+oKCx$Hi6%;u>t|RxP)X7Ql@<@*XjLTodox5{hr0YZ z+U3u&1Nd_X>vBBQ@#oIGx>0RyrA%jV>>3VgaM+~v4Hi3$l@JVyXGPx9djC9U6M=>|)(p@ZGum){09 zaUigy^~YUcNj4Bz2|_(#N4RU7j%;4r8z4T%^P~hn*Vz+Nbpl_$S$DsF-4R`5VAeIu zbT(YMg8wh^Vh~2FYjCe7tv?ZV|7}c?_lW#bKjaC_YTtm`k370jv(Io)j5ZOL#A%j|kA0Ux^q-cfyVU z^oG&bduDZMp8sQZ0FM@3S#z9vnWx`9VTeD|=-iZ|mwCdS!oi}Ycos_t7=;$^miUqC zU$J2xQ!E8aQf5Xyi3z8nP zE%>?BemekP8gI8n=HRv<)qn1>1xb5sF^U4dUg$KX&w9ah6HliieAPs8)x=&${a`%+ za@Sj6WV7e}URRdp@w<@(p26-lL?<&LLwN54%EZ_$tnfeH+()8a(YwEFcxvVzq42%@ z!eDj$l7oMZFyXBxWT4ZI-^9DQ3}V?8J;Mx>%`umisB>G9Zse@wn9c4f4+} k=+lcgmz`(n8Wcx^azqkG-&Ugd%ub25X}Wgn5#KKV11ev&vj6}9 literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/gluon_xception.cpython-36.pyc b/timm/models/__pycache__/gluon_xception.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b46ebb395df023513f32b17c49fff59cfb1df43 GIT binary patch literal 6881 zcmaJ`-*X$+b>3fC080=gL5dP(TY_ZCHWCXUDN%M*)r~AliYGGdv6-aBB;73cE`TK$ zyU^W*B%)y32{o1Tz)t$ozND|6=^xOUKJ?G%L+3svlgUik=~Ex`(thXeVnIl9m)e7~ z=l;IuobR4>K-Ly9nM0O8s)X+_%~mY899n%=IhnisXyIe81YR_7~a<^1Bo)_LtgA;LBaLU1JuT zW0zQ!U1p13*|XRZtFdLa@@vai+sls>c8;ySP}r(x?5`Z_?Q@WvXBQy3kV#e{Sz~2L z%9-RmtFyP*+b`7i1#gL6Vec^Og?g;E*O>mb(zxpUR8^EO50i+yy}I8Y1ofy>e-sR( zu>Pm6H%R;_w03)bT*p7BK8Sci6_TF!N~O9RF|Y1~tp3dNyS*g-uZ6A*4|nBI+nzUg?Ci-Qo0gT1*he41o%#p8BpJkaTCFb4c{bfkIC8{m=>CIO3gpK7^n2hI>sn>bLegqn;CXzqi#I zr2BD$VUmHfcVp7EW&OZ$53rad>U!7%Uf(;cx9az!!68QLCH2kq&Gq_MQ5@HQLf?HS zv`%3<#^Z(H?k#{R}g=-A)&MExFut63=!6CyxD&?{P7Q3EBgPF+c30WtDqfKTbTJ zHWt=Ixwp5(qTFVCvdG(8qu>p@!=Gg<%6G@_J6-RFoJ`!}Uf?+~6vLutVGud2^>n?t zaU(N^R9*QHH$Gb5*m5?u+~y$Zo$6TJ>36Zj?&FVsdGF`H|L;FJwICA3J>MPT_5Tw! zM8S>1ryD}+xD89_Lmmjr4+q1*0=D%DOD$m)1+$5O+WlB~i!9v9_-r zE2=Wq$BM0w3!p|)z?U&D9P3A=#N02ARs4~pgmQIkGWDc{dZjT+n0A6*>~dC5V>-%> z69Y1fDVSbir%@1QoN%9cqL`VAC^0|VF0;#{;UE^qp6|pkp$hF;2X;2(UMv>@%bPp! zcnD)8CGabld}B@&;^DyK{B3-Rl5P8;pV&5k7oYqx5n8y=!cds$u81OSz0IBpZ7<9| z2G1I0VGw?Z64V?xArxh%ziC(RT8ZE9r|^=YO>VwTxchaCaRYB$ z15wnfYO1DQQ4O`EE~*u6bm0}%Hzyn6YSaJ^{nC3MZ+sPGtn4deK=Tpwwhzr_oR)Da zbf)e!bp8SA@&=LbfHXAzUHl03IbSE4BpW0tb|U`F;p~s_5ld@RE%`S(|LWwjnvKHL zLNhaMn54td>3g;C zWq_2_YE?>RJdR4}nf17m^f-eRq{o@8IOB0;QTQ~7+ygoH-5s}>b|Kx1+)TRIHjrl4 z#S(q2C2+2ecC7JcVve{BctJg&JC84PLIUC`Z~@V~cCbQIQ<%K##v1>r6Y) z`LB}Fe)$**v@NL4+Mi=OBo(-1k!h%d`JNPx(F@!;EjPy|%8DoIF(Qf3WU_5Cf2D+xE&rj=PJX!mg`l+6gOX-pN z#$I@x2?JF(PX8BAqsFhHoPSJ&d=Ka!`(d0oq3a1Fz}Y4W$#4*OLJz!9RPV*H#|icy za2|0^uq~{wy(dF2OnfH*wjB5aVc~$xOq#z>UDK>Yfg=n+uU~nOf0V+Q&b>jyl8(m7 zZaKOBbnapS6X8jEB;zC;a$1ZiN5dq8tRK*~>Q9^?cT%|)i2&3s;Yd}jX_i{j7U8ND zbwyp(OwEM!f~u)a&c-^o4&Io~F9qO7_)=9z^Xb$ksA5Qe%$%}e@>{6P zw}{*(Lh6**!9OA{A7ipv5-~{epW>Y$#wBSAE+qM5W?cLgnEZ@ZaWdoZV+P{k0-e5&(0US5AM{XFgTQP^6jmHyAQBMGWbED64(6F{$Qo z-w6}h)iWoqhf#vynsd)hWPIIo7-aPU_Y&^-G=3Ij$rdtLl74ww$85WJ5 zxDRWY1w=qg9&-S`0EPS((~baUMe0&ILW{L* z9dFOHc?WIQ-q_~SnKqYa+SJ)a^m&C^!q>07VZ5u~P=s+M8}Z$l*4Kc|^Vk)nj3+wy zDpvbW#xKHsE@k`@4zKxn(O?BDZ9}384Emn5?{+%PNTIySa<0VLmoQ#pLlu1x8 z7G3N zo*v(1N&I_69uc`qt!3i4DG-iq@-|6HH^NL8$p3u$v6%^PaymL?D8L_5u@2QC8zsGO zu8X;hp}k3?13AJ?8sVngiJR$|w@7kJN^Z$8C>3v!c#GTAv%zp1ASzi_g;WVgCIziX z--Bq+32fR=z68l6`j8ufl*o2*R}qVO3Na`NmT6pSQ*zd8(qxtOps7A^#AUyv}Kcj+jNQYAIbo zc(9_@^qRV&Yw9ZG7iDQmO~g1CQKPC=Af>;WK3aZ*2jqTCwob1Mb|?S<>`-!xL`hBe zO-*B%b(BrLO;qW6WSZ;$3reNODe7qL%y_f4TpiWupGO9E#+J-=azGBCFjm%To%`^@+mkFE^wC!BO+13BaYC3cZg8n#Ct@1BKt%R zhy+CXL_#7F5poIsga{`>t_|Or#?G%8-(nUR=eH^Uh6XWtnS#(Ce^aV~*DWK>+hqNh_lVrrABqL9{SuFAqdQ9z?a7U;Q zMUmhdxWxa0iX?b_`)0_%|F@_ZS3ne*L70FDtgd#(mM5u%%0EY~M&*?pLKJPAMK10e z-b1mN`_xI?-VSbraOXyt#h(*7BqHZaiOc_&ZO{QkdXK9hrGf#4)V51dMCtm&d}9d( z2y*Cvj|m0RAbPBJ^aLodrX&cR=^BpdzsAkMKWKkT8))nZqZdU7*v7dD?z=%ucWKP) zIKu$A)ajln!ws*&X&jk;h{~B-F^?gqqx@-_Gze>&76|hIw?EzZRRmeJv)kg?Yv2E( zRnWQ>fN%*|Qc?Lg_{yP{0uoxXTrTYk1xaJ2gZnADFl}_T{;jFejoVu?J>hd`2pFWE z;Ua&7AO4catc%EM{{wjn$_Bz2G7mmKA!tiekffnLq<4Ipd`#Vx>H4opcZQr6%@J7PMy=-sxY~@RI)o|Hx;TY# z1Xj51OmErwA*b6WGD=5Yg1w+G6Ptt3N`?&SJqGQz+zGkdGg}}xV?*|MHx0->q5fk^ joDJLn;AQ|f02O)DXyP1w{m-h@R%`Fq{tAFn($xP0*}`WX literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/hardcorenas.cpython-36.pyc b/timm/models/__pycache__/hardcorenas.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a7fca9d0604144a2a4e0655256eba147d96965c GIT binary patch literal 6386 zcmcgwNp~B^5uOSqx1j&|dBoQ`(6sYGc6h+F4B*u|sA8?sBm~H?g&4Tm{ zB$E%!MLOgV`X(N4?;!bCWl16uk!{J5+p>&x#a5mq+6ky7Y}Hw4FF46|5?WNUV5gjP zJ1y!-JL70=&B?a2POhDE^6k9fr|g2W*j{vs?V_O5_L8&QUKaI?z2dC4SDiEMGlJIa zHRo*mEYz=&9Lc|w+vn&xQXq?8%TN}_<j>-(Wv_eET)* zGmg(c`qPloL%REwX$7>b@Tk5JuUW?v_?%a<7SJ^LAO%s{t|1IqYf#Ok=vujlgu z^M)?b&A#Ef)b?R5tDT``6WxKSZ2h?v^mVh_C6#f)QSxEOF`IjIYfk_dKWXS?a z22voC6iJiJD{UnI4YxD5Z~Z4BNv|PEum1%ZTY#(w34Z_mElT}I<2g5-3twYK)1JAR=YT9JpKp&U^&s#1me&E?g zV0kWA9cs8d;Rl3kU5oj!OCXD_!4k>|whRSZiL$aHRwy#4BKpnDv5DfgA)v9Az_VvNAV_W8RI;?oU z55;wpx&brn>-CCZTL(ke^ed+4RKtGN&p^_S_mJ*yKN{B=*7a@u4x`l7*ISOS-`J>Z z>5cW~jiy06l|j%i%i^U^NA#_+=w=N?Z&o(+dbibTkUB-trz3MGlG#L=H!5|VH0#Zl zxpj&%Z;xeei1)lvX~GuMb(4@&l(`$p+z8)uOW&$>8#UT8PH%7b#xgg>d$wS28?B8E zO1gDnN=crdGI!mWB|EdEIZKjRlFpJ{6TK9Fh)fws0-e5v3XzXgfl5jMp)OEK;%oJ2 zAz0{2L2{(LkVh)PcI8Q`D+lRO;)VQ;9AribvhYF~0iwN1juL-YzLAfTBz2Suw4*dh zA7w_#Cs~pqsTT@!iFTACOGlX?ca#qDqx47~N!>(H7|G|QQ7R{mGNkw_F-nSAv{8!v zBUl`1Pl_O0(~BbwFmn0uFFOptm-;26G_^-1ffAZlg5N{hEm_XMzFCS;pa;-D>_D194-Pd zwdZ&iDJLL+z~Z6^1;NFf4p~{~8V0GZ`?>^s&b4s(bIb3SlU(cAp7}J4W;sOhC^GR3 zf_(_xn(HxVEFm)qhiQK-c5k2Oqj9?D+6Rx>IS|F_C@sd@P04>@6cO`}^>24Z_ec86L+NLOf3lya8K860Mf!rq<`P9P)2M zR?c#5vRIyaW-!Ze1D=jf5v+t=aUa+QWIPuh48pfzo5;$9EYtScr${ddTEwp~yNcYR z-h((WWq!!@n~+HJayo&bNtJUVSAmaQ?1MooC$qaSZZAGIU8Q4zFp#L#MfC}f4Z*wb)D9Av%aajjPf;$3I2xeH*~tLn~f&&4=+ym&_CwuKCM+6 z8*dJ4){Ysrshc5xJnZoNWLA&{&8^xoO(1Zl>B3}G*i^4KqG3~Q;f(dIXa+2&cC7d8_uD{6{I#-=l;To^U6nz${rsN>t>Dc?E{pzQdz6Br?c zs!7ZVvE<_u8zIi%p%Jb^hM^=nMc_#yhiHO5jyJ>4p(P3^F`6X7)BsC>&WI;M62ubq z9k68g6pRpda@LG~CL?^8!$usYBD?-+0@E?Z^a-f*WQc|{{29^|qJ^k7HYIHTv8DtD zG#`}Wa|xJ{`VJ?Py`SHd-mtU?mBM52j6qB_&BeMn4939oHY`K69(!h;kB%|U#Zw+X z?x)48@`+6hXZS0cScF%2@I|bO@k+>$*%FG9_vZgsc%}=V<^s5>`r-!3(*fM4`xVz^ zzlI?^gP#X0_)T&0>5;F{Zr3s`_`v7Fmt1k5_QR{^>TC;s8Kz9!J;R$O#_aG`n(9*< zJ_PvUwkhtHJSo08`z()wvyS*I3<935iwml_oAPw{C72zsB2I-LBH1m>?qY^_331+I zcv=ux&Ctsj?!wfyL0lljEi?ToTt0_3{f!Jge7scW6iq1t)}Mo$We!t9%i>8gd zx3$Wuc}Y0%lLg-EvYhom(>;Z6nQGG_Oq931CH&K6P_d37o*>9)RXVrC- z_?&t{az6KMTRo>fkDTX}^M+Nv)oXgMb(G)o>e{bG%}(I0cn|#M_DBi#pqWwBu`U>ktKRb-IUIDz9E!z2?2%2}AE~-nVOk z`#z;*qiiR|uW03KU%iM1ahq4Z->su&Ih~|#28T)hR;|^lZMFO)|CKKLRBI)f&HZl6 zN9jd&t2*vcwV^xh>b`Eh54p3-sbqQ`1KHR9?PeHl_|fQ|Z=1Y-xzpLhJ(`>u)cBIF zb$6JRO5B}VSlwwgD2-zHg@*1I#5~Ml~hjddMejX z#VP%#u@k4xS^adBKeGCngX?j6w{T>=XuWgg=bV1FpNq4*uF9ide;8*~L0P}(oU;Na z&i3e~prBT9nYiOeNk+1hEPnP|{`)yh zY474~2nnR~>wS;n;bGb8d)-bmh`h^R?_cgcw$Yi)YjpMl<<-o?@@(RC!X(|TMLS8h z8OmyBqK>ZbBxx>s;x@x(fF1;OKS^^}Bq?kH%@s~E&B$+u(m@@C51MErhe2&``#MQC zgGMLGHafapi;|45bw?+s)*($BooJnXc!^o%LNYV%a8>$|+Yfy+^6`^oa)S2!L%qNb zOb7mhiS3ctdx?ENNe2+d-?sE;P~?Lzt*Ba5TV3hx9`V&hcX z*wGyou4sQ7o9|0EH&&u%yS*aw9IgyBz;gF6nK`is&Y|`28WPK%wWsa0?b`EpR=#=s z=8=;|nziSg^LEM3I%#{(PCK))aRoWbhsBk+*O&*dLG`l{;tcrj9I?hfJ?U z+$m}7jOEzc&BP*GK|ac=%#m&6E6T-o>>NQdQ6d+o=R|`TP4^?{F1$=|oBw%yX9JQ&V2KgZ^JY8?dnbj4Dp#6k%sedufdCLYz^h&*6E@ zyNa|pwL1;zl6pqh1)lUeD{mT?n`Swq`Qi+;VQzOu&5ZM+*^xG!-O#785M^7><40Dw zxAeLnLjBMb%N8`|_@rW6US8IVtiw&16gn`R?Pkx{y{qHuqs)(NqPIFOee<=i0t3K; zhxJ+u7G8AjBVG$l*6lz!=I#R+Kw-$9>OC<~!DCSr)$1)uVQA)ZZ{g8J{&MYnYP_HH z1&;S|CKs7}f{DlE5|d9cp;FdQGWjGE>KT2N$)}lIX0n8&T%6cp&m*H!^HHih*R=X} zZ0`breIG*H_LcxMl@|bKE_b7y@(^p%$hxkF&x-@F z6;9|^kB>cAZ&+SN3;Z<*IKSJ47E0jiSUbhzV zc{3PxgeASkesI<6Xb%Y1yUU#FSn=N-?6$Vw-h%Wuf@RaeR~k~hv$g934>vKk7RPkx zp=t=l&3D|DjS9r9?_Twq(dE#CmGyPA*20zq(g50kf!WuvFU!)wO>{u{joN-I^6uAK z`@Yv{Ot!-J+`W4P17)MnuZ#sLynEMs008bwJ9h^q?y>?$h0^F|M<0$}NTj}of!EjW zeq%I!%K$La-N7Jl>g{k%TI8C@Xx2P-(g2^R35P%pCJ3t{lWE+BP}XTwbug%Ey|L{r z5i6C~ycY)p9nbZkB@`M}l+|biYu?SkgT?eUya1{o0<~5r@Ub8mATS#Xh66b=FULxB z?l+15q~lm&%!Gc>41LHUsu2sR(Th01%dJ{xa_ujB5KvB^QMeD9t(Ldti$GC`8dy_w z3J!)5FE)X9ZbJYmudnD)DF>|QMIia#IS=ALVoq~ zaE{iz^;#=5Ystc3c0T+X2ReOdQRduiy5lHX|o zf=Dik(YDm#v_O2Jz>iJM-Mho4p1u+8)K))x{rP3A$584hm4j?#4TP@I)FCEF96=1a z-5E$HN5GG2Za1TF*sC{QzKK>n4r-W%zC`HDs0h2TEb=F%@EYJ@I-ke7>Ia0c-hR-$ zhYiHhV{<}YFs(36&Bmb&_t9++WO8!8lj4N%!b^dV*?Sp|!-UulMP6h9b4pEu&WY44 zdvC%`*!DEK0L9k~n%Fhi|Ki>((@h*NW|AMufgr)&owra@)Kgu2^5GVH#H;Z-3RUfX zttme9RZPua^4eUI!&Vo}`2-AvSjr^#pr!*<(lgBZ872gz<(x5!FW|b88*E7Z5+A>a zB$*y}hYomBobZoNTEGRVZj8W$lHEf)A#tpAW*%mbrVc&nf8EGj47dt~&p>N;&GnC4}gyzhBu!FXpy_EhlX*;F=CL9hx(Qn$p zMwE;4y9GXfOk7+@zrMK~`u zpN!p=_fr_UCfo5o(akzUDW#h2B)1111s9vWD{q^*rX8dxHDCfLomo4yHqsVRKolO{RnHhGW&{Lwe?-XsNsoTTB(!Y{7-%~q1t zeiz(J;zXTtKFK$OYJH~`gq0%SS2cU3Qji;Yqy3e{ekHMAP3$jg(1{;HO|vys>qfv` z-TkP_YQKU)A;Fv_-sChOQQlr~766ZC<=Rcp*;fIL;NVnI>is{*ROv6{s!nLc579Ld zvdGXT=I>)564wtTrNAXn9wiF|OrEKL9XK&?B>|h_oNgf}4YDQo8z3dWlj0M=B)BXp zuL=NCg%RqAaaHl5jVqQf#rcWyxfpT;2Z=IU(0_7R0t|!O(l5Mi1sBs+l;3mp zk8O*8d_sz)U-%jbnLFGIHpolDIyZJ%zw@5~;imUea984}5WA5J!1h05w_*3)gLh*W zBv0Hg#zi%A*6NoIm!l$fLLn}}F>?5O!;&7(#zpk-RGdOO_Z=DW+t$7RgWi<(T(o`P zw)#`H)q9e4zF?`-a2*LL=cV2mGZSxHU&A~pr$6TTN3-L7jjraU8o<`570Yhzd?wlp#`tyS)<7tfJe18^ipXErtV9C{aUcG1E z`8)W)H%2Yt`CL4A>^a)M!S?^RII2^q zlTnWeu|Qh&LhXNHe|ooxF~>@B0?`^M`3RKdEd{~yYeHp}y*2NvhnqyW0~`Xo0*Y&7 z^Crxbw=`r*|0;RTcYrzuPr2C}&Fy_{*j?JFi5?9N*oqg`Gz^1BW+?Qd(K|qORO>B* zXCNDFcr-kj#pcA?jVcUx{jl`t{=-FK)q(4(KxE_I52;^{U`+DS0YFiLH#S3C;w?qB zJ#=bZ=;~mWYOwl%ujNsf3^ikbnKVnL#xw$8G=|cn3PWQ&ssbnlI{G+RV@J&Nc|(HO ztg5EH+|bl8N!D=4|Nn49?^6^i8u&-7qHvwC#NN^ecG(aKjSL3W~xMXmO7%Vr9CXKoQbih&V*EjGsMUHr&yK0{d6 zD`NUVtof~_u)N-T{7o~@ctUlL<|eE6o*JV3M_`;W_yVr>^)k>ErMB5b~Nfx`)qG!oFk;d^kR3`jaISV93w%5RE zGIL3;8H(p)%{aYQ}DNRtsYgS6oGF@PT7(LSis~h%7k*V zOU^XFv%ZI?bzmR)5c-TJ@K11aYyvs#kD9;~qECRwhzmV0lXM--uPq?i(a#|=#7U}P zk_el@CIzK9LuObe2|TGvgD*|rdk)it>*sKNfH)PvZ<T|lyT=8Rs5)F7Oo0^lJ9T0UXE|Z}D^ugJpx>jro#lnQ-N2L zRx_MbJOx67*OB8>m4%nG$PoFvK&UUmuiylNUSE9M;tK)SsKEe;PZzH&K&)Jsq8nPoo~NtLclH z0&<@p)Sj2xXArcQmb$0oe0AYo4+5IsJ$pn*tFa3s*Ianvr7*66WUrX%f32FSmz0m?6 zRqwRB&p$zNWXoBr_muIW1f!WyInhwCy?ld_)>imDE{63+)*Rj&%#L_(FwJ37>d*l* zhB{1&dm!UrPD5~+NqP(RBFS}Yil79se6SGo8k6j{?(BC%6BtweW;v6TXmcATIv*w} zA5Zh`4uf6$?Ic@kG!Sn~PE*zGRrP*AMnWQ)Ap-L3�>OXLzUk7^(gyljo3>7tQXc z_DiN{BvsYy;!xD0vD8k09ul&wDt(Go{~8lg=BBxC@ah+s{B+ne>%d4$G=E>YN z>L)}fIr|Z^vPgM)3r#XsVbmIJ%>t~>0<6sf)Z8MzS;QtvgaJ~9B^_MPB9EXTE#+|y z73heeH|jC(=wCz2BXvki<>D*q@I_phIHaVh2(d+wDWFNtT1P1uc$|z-FxkH{5AzMm z2ZlR$WbNWC!x0Sg&swircTA)=y^&ycJyASrIQI$O0?wF{aW!w^jRn|L5k^>5({@oU*VscxrRPKi5{6rxU5 z`|ZwsAM}nsIkLqGSF{SpXtY(o%E56tEucdQWZC+AxEn1uU&QY%e8cNV3^)-^z=<8b zilKq8{*tzPXn*Q2omQv(E zxncAV(8vhFQ9SsSS^6Vf9Q;t0{`)+{BRuDAgu%(MkP%7)Zt-Z)--nIoh9TQ|J#xTy z;^+h{UK)r%(65LKK$XgXDgEV-Y2n~l3t6&65H`Rupp=xnmzad_#YU9($k8HG3M>po zGQR;@unnzF)8QIthS%au>f0EYxJQQE{3(x8q{RfdHo zMyP&~Q~j%K<9Cr%M(jO--3jP6N9Z8~(;D$9rsp0?Q3H=)zz&Y#fFYL4p8rAo_<)WCSPrJ2h|@O;;bV zt9g{Q&^c}@{Rg-n2@AWF!B>Rki@2c90GhyZDHj3*T|sgi5avlNToD*O6@gTPV1T%M zNPq?CiO%MQ&MIKpO0_@6E4?>G+9ilGoI8f~7-AW0tSuj#pD`zR-e3;k$BTY6yvX`1 zcV2tR1V}<35gHty;uaiijLEHd)x-J>GX`y69dbbCRl?QO8OmB2M6|Yigg6mZs~e8H z7I@7zh@AVlg$10Tz}h~cOfzghiiDdO*cvnjnCD?XObFBLcm|T6mUoR=CeyDq4+i@| zM>e)s(ogqo1lfmknnV0GIs&HSL zQMD?QWHdbWx}hFs8_fe?qVeWi*S~{@&}kxmFYp>{D}FX0mtr19mGMBzrbRs&iiBH2 z1Q2_bmCc@APeRAjLW#mw^;aHc?j!$)CxZk8PFBgDmHJ}zw1(A3eLTym1=-Kp*u@zqEoI;_Ljl9RgMu&Z;I1M#ea(O@f3}2a30kB6kIo!npL3aCDd{1Ax?G2CD8RjGX}b+-2NP=ekIvf0d*;7Lp`?D};wf;gOQEQsuB>aSt6 z-smf48|ARcaIQJ=$X#1&;-~>|1cT{NDv*|a6QG42s}1PqW>9bK(-|6`pSXh1cA#2R zh$mkQJ=v+7vlNsqIdC{6l_^@NGa#j}jFsvR9II`F$_EDw#$6T;RECT55FD@3d!9KO zQ)3)~Gcam#=s{IGLF;hPdx5Cp!HtR$H?#AD)>b}TQi|UE@R1Qj4iGgM2Q=0ORfT~Z zwC~}(i-2cu3DXIB-mKR!lcs%Ca=iaW%sgjlk|Z{a=J;5~PcKu2$t;zs*_D9AEwij#|zopl~OKUsf5*GvRDBuumB4Ic7{3n@N$&Ymm*f*oeQ zN8K3ra-gL?ZVnI{p)zY7KB|e4R-5S3WbDN}mlH1%`e3wU+=C$z|EQKHG~=j;IIVH~ z+!c;p!6wS$ffT@DS~M@GbmVB*FzR)3F^}1liH^(v1~6h|3b5%x|33mNipEx<<&ZqCz!m-gsz#UjWee#cX+kNgeF<@pMM0Ni*r|=)$~#H zA2az!Or+U=%&Q+U`6o>NDU*N3g!a<_V9sfhA5NlhjX}o7kovQ z1pE|LAh+$`CUV$7Y3m8XrXB3zT64usuM5#O|J$1$~I@pK#D@4IJPlGauxcaI_!CBOgWn8Tt4< z)WB(WcU%^)Ig89n$v>c7^Q${xK~B6enLYW2|HjwTV+Cd8AfofHaV5+rs63|;m`DfC zi|`EOvE!?BYh(^vxQJ8mq!w_1dw~uNToRiu2_rUUM&G#xwwh;s9KEI-QXp}r0dRnS zP|5HV0&0ODhm(T?t^_9cJWwMkoCV@}potdcbWLUhLzQsBHFlQo2WhGQlsBVno(aRb zRlr)Da7#+Y*85f6>F~dXrVZT)Q_xbN{Pf>JT|^p%gRFJCtwTf{j<=`(4V(YBEQjM@ zsAeL0C3B5gMZEvSWb1#A(*J|=OZi~Lfzh^wfv+l00n^#zI<*TwTnJ*)I=-Pytb>W2 z$5hN?TCIlj7RYM}1Bjr$^DL$nho~UJ&nXS6jNi15p( zeeWWs*1@!LdPi-F)1Nj`noX#*(L9dF4Iks*p<6On`fuP0Ui~AA>Hok&|B=b@Juh{B zIyoJ#OozkM(za>Q#u=&g&2nmE1D)8|SYIbMc9(5Y32$ujeq(cE9sc+_`a^~ud83QZ z<6n5Hs#6CYh*NM96gSl2TPTQAZ#YT`%Ma5Xn*RX;7mI&`qc%vgIOqiIq`4BBPPyh< zXfB2vTu<`m2yPJQUq&8jw!wjB&7GkC6%&_9kx2+bEa@ss0Ff1WcI-bO-= z%|EhYi}3n*E|3~{-kwdlDfgmVa+lmo`E%}ielEY5U(ILpg}j@e&*$$lzf HV)6e0tj@n< literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/hrnet.cpython-36.pyc b/timm/models/__pycache__/hrnet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9075e06a6ee41aa7fddb5cd22207e0b2e441a349 GIT binary patch literal 19111 zcmeHvdvILWdEdQv--`tZf*=V}6uF`(ic5P_j3Ymy10Y;DY-A z&%Gc)c;Pq#9np;`(~sD(Ew`@iI9B4ONt~v!lct_dr%h)%olZOBws$g~WZaI!X(vt7 z=``a`lm33+xqEj30+cuzHUD(M``z>S?sv{T=R4o)oXdl~z1fi$c1#A^vVLTB{Wc?g z1ebR^ZduAITFO>V%`V!w$7)VJR*c!EEMAODU1BC#OiC&>lblHxGf2m4*?Ld0r=Bb3 zfSpjuT5r9t*eB^!ZC$;;*k4~?Trc@_ZJ@rPxIxmH+F*TSaigTOwN3TS#m)7h;t=vZ z#VxKqv(?8 z-pUlWyF1(+YAdb;{vJ>tQbqNk+I}l3B|B7J{j_?ut7IoiUQ-|LDtQ{O)Zs;Q`Cha$acb!iQHRW%dgCKTyiR<)?)VWC%7ji_X&&}*vM zHRxSYI>I&dJmp6vhTfdI8kHD&*VOzP^scLg2-nd2h-yV8hTfw3XjEe8{fzq98uWfv zy&mBjdQ0j?RAT6TT>V^BV(7i0ZmvP^6Y6hAxQ5=(t4~HHhTbjpW>jM6eM;S4gWjjr zXChof@3ZQ2QHi1V3+nSxiJ|ue^^0rJ`=a_%glp)1S-lmN7X{psdd5x8JhT)m?sPZ1L+aVLoZ`c%drloe-I1_v7fOz*V<bN>_!z%7}H>$sOKM!b zD7p6`_f7Qy^;43247p!cFRAmA+lSn@)P%YqxnbnqRxhhpBsZd*^H$+k7XCbc`lWH# z&t{J|XREHtPw8eof64b}y{AV;rmOy?xyj*5vpynN2JSmkFHgJsj@QbbSDmU>%6_%k z$Yx*E)#+-ZT+3f_%gWVHXY+Z`ypGaKetu7-kUv+g=%&}4@=T%P{Y7y0|{bAeT1kN9(N_27B|m$x0DMa7a;JyaOJ<=ZNy(kk;-wq<{wlb;)} z{z=TT?%=ZS{1HJ4U>X;G|Mj^bJz1?)8*W(#iAu9^^+1sA>T5QL&FNau)0nH9S8%-` zTW!qF`6aKq;0Br5X0v9JX-xZUX}01Az10TZZMInxmI>l@x7-L~p09%JR8@NzC$ytC zp!q^f_u;13wL!tr{Y(=e%|2^rRHg@U3zCU8qoX4pX023r{Yy>djc~l1jpvV!kNDMkeMIKZ8@Z$#u0K3`J&1GW9t1%z z2VgmlowVclOWQh+{6b@um7=ea;m5BE`jjZe$A_+e9?Vq z4f*|&>ose0G;Cm4%+=h9N?JN#S-dB4Q3H7jfSR>sEjcsJ5?;Jz&BR)E%W1`0@m69f zfs%O3D#cq#zyxZnQnHl-#5I!+F%wI~$)F^=1PY}dKR45>U=||Xrz~{K89!B67bLv7 zSy$@`v>K#KrD~(kpww|1=$WA6&6yzYb=` z3@65QFIr#N*}dHNd}OhZKm2UIv)Wm*uxl-xwiFPo-D_c;fhaN3f_M-DF&LFW5(a9$ z><6h*tvNmI!s-L7HFc1Exm=rbPcmIdX)a5dOHJO_E&*pyPXPoOd8AbLrp;tgI+z)I z1*y+-#%Qfsn6I2Y5MKSrVZL^)K3~guTPN^zGu$rJs~0k6Ic41W*>b~!_|g|a08-w)G6#%mxfUdk!^j?0LC>+W zU%51nNk=AEIyMfCbMEB$#D#M~0+M9f4U*;AS+}7;l(SyU894ySqTKl)AxqgX0Owve zr6F3iUb#M7!{eb;u&XPWA?P=9q}gaonXi#EhCqOvk;fIc2f-6tV_R__w6{A8kF3re zBB?rbDjY8UxT<*xXFP$#w6&O+-;Grbsft%@>%A%pT7vn>Ky3Z3;pMfM%OwdfVRZ=b!@ zS$Oy!Z`qc@k-wJ(35pDP$FG7Gc@Dhq=mFoRHaagQi+s!2qJt5L-L~gvA$6>ogdT0# z4Tn1Bgmv3~?SqSPju7e(qrP1_Oo`QsUykcn!-iiO>#xG;!vUtWMai&b+A5T zO8u-#+;WibQ9@zJ+F(&FV+BE~VwA^)uA~21NEf{|<>*Cf{u}56WaALZFjt?$ib@^(j*I-#m@6!5ThHQ-+ZEjO7(kGsyX{I)B4>2+6m%$r zaGc(wxWt7%BYrOuKYd1VRr}YC3>HSE-Zb3mG367?5m31W=?k-FdFnFDJ_=CCc1d-S zAt`F>)AWaAT;QT=^urC)kQ;EZ7Xsd1U-ch-d)jteE-y^FZM#93fPK&CJ z+uOkQ1MsEONcH1N+x<}917X>aJ?OA*NYn(xfU~gs9?PTCQ7>8{9A{p#La6Y$Ld>eR zWyCP}6m2Vd91KjmerdT8^+Ub@&1uVCcy!fn-2DtBvprvthwxn6aFovc00q%<-HsF% z5tfv|C3_mo0%1iJVZ_zYtIndc7^50#0S|^D?uV@_dtaeoi_ia@;DX7pT;>Hg(Q=dn zQP40n8+}x4ofa08^UwO&Z(tupEe%6`Ijt0)_$HR7-AZk-Hba?S`6O})=1kd=ZA!uF zaVSV@2Fp8S^_N+O!Hsn>ISdBceVSqr#sP-Z)d+fNVX%%^okVpZZKmQT@yUBI7R8_~ zCg)3>+>jT+aIj76>-q)}mrK_!M>MVu? z3ibqegW9CfM^n2|uDdX`vn2-R;13|3E0wOym20L(EK*G)$LI)(cTL9EZxE0Nf`mka zg5(4upG~bl$+F(ix~;f~_JkHL-wBOR^Fgt8Cy=_qH=_5JcKZ6$x$OFMD!nTm6JG!^ zu*Zq~J^-?F^lNKF?{ySKZc|>|rld-V+w_pQNj4lO<0d7r&C#dU!GTI*dt<#CKq>{N zY7j0|T5VLDaL=e+YO@+bP1fyk;j2IoY=JYiRXqS_s!wfG4+3kQ+OBrs-Y?G8da%YL z!TRNcj>xkJwL1(%2C?$h=^wW{AM~}OjV8ccp*rV}Awe2`cmiQLz!eTy3{F!FJWHnw z%4rJ}iVx*Q?ly{w;m})j$5;+y%+ILQEv&Ut7Wp2ANN>^c%E?nH1W-l2MNjqeDV2?$ z>Ra~IIY3=(*5;Fr>dNqQ70}Zxs4FP`jgx z{*D#&H@zeMd`N%CO&jyH`BqHc;UToa`P`{E?hm(;Y6wPxqhC^Z-x3{xM_Sn>`&Dwr zZd{Mz+T#~c&N?{iYHLVoFKXV`7zKp~gu;9`1(n8_35~^`#hyk^THCEKYNcH&wwUuD zQ`uHd_KITD)6LF?1b6=((#)(~viB1b?YaEAAboQL!hqgm`_k>?b8 zKgk}hVJ8H;JG0LYq7l)(No!&FE&&p`^uvb{P$<{DXOU<>s%dll^*gJ-1Ty#Nt^P9V zALw*bKlA^t*`1wcF}Py=eF3dkX>c%pA{#OO^;tJaASR?;eG0V*%J^;_er1YgPUVsp zWHk22DvWpqNu%TRlSE8-K6d*G`}GCn^k)fPCfLZzjN;agsPv006l0TN!Y+$5HrY5J z#%2(QEqlOhCXEIt9+i^OVpf_sAX{cVCPGwFRqPk_7(Y?`$k3fUAnX%9GRjAcdv=N4 z*<|p2*bQEluTnCP_H;@Pw24Za0X%8EUAqehO&2=Yh=B^i!LY%xl?Vuq3c^?r=eB3i zONj9dBX1IHYj66&dJuQjAj({X)`S=IxBcd*q-TnospbVV^X#~jwnCy1Jp(BL(fJ5gR?omf^uj9ZfFYD2o`$pq@(X;mpJ)*oI%q@D>X$ar;yQVc-iRtnR-xMiH$Bzr;rkT&#x>*pMwuL9 zih{@3)V3(1j*>zQvL|FriCE{l_|5BRF8zz30G^`5O#A$cBuMR4=+~Sy{SOJa0*nLv zD@rf5AuBtytLld#n;Q7wJ0vk5Kj;#W)oTeRys4VtA{(nCSq)!4&XQmWA7HmZXxct7&CFPCkg!>>|OM z-1a(a$)9+zf?IKuQOQPylqhUPi>)Y!v<=JqiQkS6mQY??UIGfm^&OpNp7Mlm^~c#b zJ!#?bP;C`^x}rKOdG*^scFC(W5-rHpG&r%Ju^U{J&;t&mIGd0g!ZiqfmDtUmRV1&d z!<9XgQC>a7|2eeS72@BD=|@C_7A6?-ECOu~u2`qEE1YkOR2E!65ry*2MqMX>A5vR% z=653CZ$nNqWIqVzOUHG8oSWhhLb?bj-o4eja@Kwg z*q`F8ZAC)X7FJLiD~M~X&*_IvGlVL)RY}Kna@B$4upchdf&O{KWB^#`} zT&~vPv6dR#qpX#Jq_J(q%?L7>B|hkvj7RGjd)Kb79N*su*{^YYDa~nh<}jlDfc^Hu z_Pga%w-Fr;D!a@}29=}BA;oKBr^q%(*H(`y_c1=)4r#giSJ@3*AhwXdyXob~kphna zp93e2??M7H$tuYK1jr(E1W{i*jAbxV0qYW7bJ(@FfQ`K@x^3c4Xj(bO{@=i=n6Y=H zUc7?kHcHTMn7H!>zs5J<09Y7A$6nZZ_gk!d1ny{3yEr!4h@#?rqxR+b$nY(0GInK2 zm=Zz;F(<~cV5l&rwaE8`jAwKUaW8K3v@t}ugcyi_M;P%41F`Ce2IAesxXF!~#CSBn z>{?eiLpTg$7sUqx+s9zv#KVU9#M}8YvcKJ)a0D?PyL+f) z<$9}yFqZX_cF(}(#C4%??f1jy;K=A7X zzd`Vu1m6K@Gd-s(aOTa+Vw7b1q!CVE=;C|dVH5o*z@`GW5TeUo7+PySBR1zci5Bix zk&(p;M^AQVA&A42IFx3`HY$_-SSf>m4)LIELq^Bx0$Al(_MGp<*!&?lkk(8_^z2^n zMqC_7oK2uTP0V%*n@9;-`Pk5jT~QK7%rehyCM*eT<50{A$G|cjVEP*bzeMm&f?p>1 z7Qr79h&LpIE{d*s!3;hoO@?V2 zWaNjKrac>^Vb4NwG^Sy0{1D}x5KCa63Q`Q1K{M(3-MfC4D0Cd{8 zmBqh7zVw#6Ij+5Pl`y^vZDsun1fM7P0zkp$rZd^lY;Uo)FraU-o|c{d6ahms;*n~a z`dWNw8YcSp34TEE?+G}T`ZmF*34{gYg+IycX9(m?2+%Bt$!uqR7ODRY-jc&x*v>kZ z-JO0QyDqySok_>CiNW6V28lCPfEWE}g7W7*3lME(**N#NLG%PSuQJ%g;&)6|G2td% z9I}Bky6&y^8NZBdLuD~%gTdXy7ar&L%71{K-8H!RFvOO3=P|t!CC%cagM5AhA1>f@ zd3CyxZ%&y{Df0AcynNNqPyx+`E>+)nopWI@RWC2PG(W4GR}nJv^5wi& z<-wW!HO+%J_$s2ASIxZFjq@pOMhU#XL@%H?Mip)k!dKUW#!}UFE~b2AZ2|`0dy5 z&>Ycb%i8Cg;{y)qWZOJ2#N|_v;ryAY{N3$7`shXJZup5rICf$8Fc^42V=B06IDd*? z2jm%?G_BwiLLLK%SH>4BO^q5x9rL46_so#VN|3!pSqVQD4g%0$A&A7AoV}1}_p(!- zX&})W@ZJxActrAY(#!eQ>oA6sLtyAG?*5Gkp*hEtt$Ozu;&p97=7a zXmlW-6mu*!ZrZ131&i$axUCk9jr1!Q8vAh`m*}5M5L>UMA&TtJbT(FGu|ao5mPlvy z$+G)N<}jQP*-!aXiWE|XDf0MTydLt8x7xl+n*!RAA(U501${?7&(&lnIVNbf3VwtJ zj4+|{)BhL%JLwGURn>fZxBs88>iYy8!}IIJ>|e>K0fQ1edxkHEaOTa(6A>^`K>5df zFA^aMv3y(+?b&r7=N0hZsjl1)g1)$|&n{Mt_u{v>TR>-D2Qn7#Csv{9}F(G2v0> zVQj<@7QnG91nyv?qs5!jqB&T16D9^7cEn1!@*?(*g>NM6O-;Yt;3oAo4DP_5GHfVp z(BTEzgMZZ??tAliINYn*-9}O7R$4xP0^%mRyp|0V#1^WvVtrzDM?4Y4sP<|3=>I~n zqR&*C|AT$bBf)@;QTEH>CIp4KC9?3)J2KGQiIwt=KCcKl7-f?FPb?4%;XgAa3;Iuy zg0;8YB*2m2l4@3PK1FN;a&t%f2T6RT*FMj3FSY+8JobltL#pz0ChouiPvZm&%Omzq zA;aqhT;4?hv-PqFM;3Oh!(&o7v*_quKA26V7O@SohzK-fFc!d6jAt9SV2g#Y70)5A ztCDYFqvZ?En>^JW*RT3Gur!0XQY(oQ_(`y1di+$-+g3Xg;_rx}i9$T+>H3B*$RMtR zL%7^XTaU1+n0l^~O)q}lX1+lyO;u|+qXdBv$ux*NT0KE*wmB;=tof2bu8X!8q(BxC z0#8%%=syQ2q=MAdvaTYu5o9~<1v%3V(_GXkJo(@Gz(ROt;!~Fd;v+Lb8 zBWPXCFn>SIJo4_EA=+?PGtA#lGrQkiGekSs)eQ6Z)6Apqt{I{o>S~7h`)Ov+yK9DM z&vZ4z{QWdjcz4YZ?O0bc;bUTuwS7Fb8EC2Rq7+eSgXzDX!T~yMb2*wGf2THI zWSbvYqs^#N|J1$OT#lw+dZ#wIe9UaEGPY5rnWMFMXgQjuVJ!h4>{YM!Z@>O;zz9t>fNS>3E+h)`6Z5GocGuGH&GDTBF(=gBv6HuvYsyt1Fq^T}6g}J7v z?J{GwGb?f8^kEoXI2`^g`|I6y;Hh!<4(FP(fM%a5$K59E<{^Zc>OtNUf2Hx D3rQ*Um+=&m83s2vON^lYW&zV}GcfUOCXLhzEJkCE#7>>S(RC|nrf%Ts zI!jtX3*&a;rR|`dc7jg25G*ibdkMV`db>fF8J`)MZy927%aea(2K`~QiP0q*{ldu1 z&#A{vj-<=MGFG2QtCc*Lt^_OT(cox$EI5{~2CM1u;5ajcC04m7mU;VeBUt5~-~{BK z+?D^YUHJ?n3&Dw6wqWDu#S3#5oaC?ZMZWZi1wZ3w`4PU1+Kar$pW`d2o#IFNu}4PW z3s;;L=J?FOJYn$F?-@VNPdu`Mm(Y8fKmWbSU*IRvei^NwVf00Q3awXmTRuOH)~oz= zeulpUsju;$^H=z*sGa5PW25)R6FQ8I9;=#@NUAtWP_;ga)Ck}9Cs~|ly+-BTyz=qQ z@Xq!3u76&w@ccoRqE*cO}2$D1uD&(<5bupy6%4L5Pj`Ea7+hKTL z$O3}HuxD5H&B2vPT)8&~H}ZrFS$TKFBrjsD*{&A*QGXZg6wXn4zq> zF=M4soEaFIRT={}qj~n&DH(4J%+mO)`6pv$qOVz++~W3QlNqH6YdSjv&&TJEP*a%| zz8VQX$@`Ilwtha)Z5T%MNjv|+NJ!zQITuOcN2u1?`e&nz`+X@QB?>=Qemw9q(HBJl zC3|angKBZ}_MNLY!&|qnUH>o)Zr|Rye5k{Z3!jS#_JSDxhCfL}R0w~HM`ZW?diKrR zx32qfRwxnistr^3wH3R1S_3ORHv>wQ?HSigSt>Zj-5Jb!+@m`V|*LXC1>tV=P3E#JGcu}-JIUK?*6a@Qu?)dGP{8<9l6 zK!@B#Vb~t?Oow&Z3LfdBr%ylt4qTE1T$&N2xf0qH^!y zh{+4&^VH{4pQBH)kVLeV6=#FIat66fBPDG;UWf?ZS=mFGPbZRWf%9&VgLxVd;V3Fb z@(j(ji6V;1C6A|V0GZl9H3hV3VhF?y;)fNJ`dsVc6fJ(n8wVD>j*;RR3WK>gnC?kuo}lTrTk_?Zw|PWx$H&#?J3=PKQHS(7zG&g%5bt zZsr#g(T@kQ;Qqa-@+VP$D;f$v%i(13f+$G@U-R!wGym=+p6vBuy%sirEMlL#YdsS* z26e-lj;aop0FUCjlsVB}Yu{fFTfuv_>TPIH5dI9H(_{{lze0UqTjD7~sn+&WR7zuv z_y{yHZYoxyW?{mXk^8){#WKr8BrDbw7-`PF!8k{}088na+}^U}2k3K7Aie>AjnXW^ zWw2Ir<0-pL(bsrF#@)GE>*UAWFGl$^;eMVa+kQXKR1|~7GT~qL`=dxkeI+D;Gv)%? z54L?N?#D#zYw|RS`wu<{9(FNFVpTPOnW}rGCIz-Fw zqupa#;Qmql$Op(@l=gt4qkwSefV>cF%v6KCVTLxb=|z-di@e|f%I%HKoi;E$h=*(X zU>tx4zFc*P{bGfXj4%U*$Oq*GD(Ho@#j6J53M`;?J%`?^&BY*^CI~izVdd4Qp?UX^ z)+h9l*5mc&d)jY8OwmDMfbu**)#XD_8dtVnR$27w!0j`oJrJZKH1vN0-BqpUv++GG zzrZDk+zMSGaEo~IgPjGW@&R|ymKq;MBgCUFm zaV9^;WILOtVLyq$kww*^#sEnmaEatAiS8jy+y@8wJb~k5JUW05L1JHmyf{qoo$(j* z&*oPaUV{bBhgaT3j;MVR++nW0S+V$#dp`9pCdl>rc{;iD6-gxVjh`X z+C{=lQd~l1Z*IjN$hR;RsB+?r!a#57fCtezS9ZDu0+Dy9Zjn4m*q67c-I(MP`584o zr=~5VY$z&cIzcih}(bOH*9u{;Sh zfj9N2IqQ}Sz%(R&`xY)dl!`8lyQRxrLNa8cg&kV(q{VV^yf`^@g_{pjFiH^U8uXKd3^yV*!X<@3H>FJC-nOr{CzQ0^RRBUk zSt>4y=lV%pi@NL6#Rwo1l4fn^Q%Tn(oqYh9v=d!#?xF~zsT$$lxh1l9Hsw^@UGuLb zSeM}{c3&jhZ_sMow=(2_MNx}F-VG60;Yan&?T{dBFNf0<`>$0NvHi~KNU2G2VIB96 z{RDBXpRf1q!0RW-lr#@k-5_a`l4!}UlMqOR(*AcK`QUlD4EvTt!&en^I()mX)MXZ90pj*qvO%NAhTt5?9kWrCRLH5(iR-Rsu(i?4`xWO=7;THaXnM>sUGM|)czX%#u!M~M*`I59tYa=U zx?qwOQ*L7wj^+q`nVUCy4F4NQ#AS@0%?*5dy_)!%bkzpf^(C)r4W=1>I>?hk5?EOUv8ur;}VNorz20E=j~CDUl8q4?vpo zBz^1}7y6{@98hP2zaUYBl@IG*1>8`dT^Q;sr_K}gHLi{el$J|LOWcp}@*l=qLg!d@ zwnh1mP7ySMw4DwdgHEn=HtAjj<4+TDiLNaL9X)Zh3$Df=PGJSMUqZUE43;+8Nz~|e giCd-VHM~Wy>$$Fb+*xwEt!~p>)xQDOztpk+0_=ir4FCWD literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/inception_resnet_v2.cpython-36.pyc b/timm/models/__pycache__/inception_resnet_v2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae88182b533120861fd53b39aaaca29def36a45a GIT binary patch literal 10718 zcmd5?OKcp;dG79+>3Q;@5%tzev(m0t<6Uvc(SvKpvBZ@nS=kk>UCDwyanfo|HHS^j zYpZ)GiJ_6TDPxg1N{&H{J>(E1Fpxuz4v>S9zz7255abe|FA3s9LL`UekV}w5zOVW* z9CEl?$%`OE_TSZiRabTW^?(2W*W<#(M4|TX>E%D0)3l#!JAM;LU&ZC!(KStI4Nd66 zXzLAK-Ho+WBc)R5wbWXsVIrMsXFIt@u9I)%bxov2rd{Y18%32i+ojG#V?w2~?a9tm zV+!e9OK(hzyeNn_#am)hOgT9xFQ&x-yj}jJC=Md$yqF!6GxLejIB-)Fhs5EhnmFvF z*2-I{#zAn7h@;>fjW{#l923XEIUaEifpbEf1m|SLISkG#;uJWiBF+(TPKydSm56f` zoL9veaLz=WW8j<>IdF0j=eVee*Tol}>Wvf5l$aB5h;vW%ExmCPPxGQGzNnsF5yoAu zTHpA`%AF^EPc~O8x4KQI@4LP3`41eg>-gvI*DG$P-*!4q*SDEbxZ4}ZrXwmVve&7s z`hMU0a;?^K{nf$pY_r#?U9*MLb!xuT^?Gup-FtMt(-Thn{6?=g8#Z$8_N{xB_uM8* zIMvFdRTmArUSZkxQ0<`K>sD^|dabtehF7_l)UxX{Dt1>?`qJ^G?RN3nBgbv6`d;PS zV&87AI+gnDT+&2go4T}AHG(6zZZF=vzI6Sbb!+L`^*i@&ePhYGeSL8$I6Nlf?!B*V zL*KNj9?UEc+_tb(N32KatJPd-fq$SW9ou)TX502Wcg1xin3K|Jxt{OHuzYYJ5o##Y z`}K_v$H+}SVh=iYx7H5_`eFDSU({=m09cf$$t{a&vfCUZ^M>s$S% zA56Gih@szWE6oYg9mnnlDbE){Va1i6Z>cs4L+J&jw!Q4Mt=`It=lB~>K6f!eoh)0d zTjRu4zx1WLed*F1#RXkCIHpW+;!`SULZrzr9i_Z4AmL~*Ra(Ok=EvF!OqWPK&BJ8D zEIrj4>5sLv*2n;e*`WB2?YYfsY78WGuW3g0@Xj&g2zp-!@ikxH)V7Q@V@uODKed+L z)HjXI)Mk1!Lu2^N+|;ZL_~s_s6X|CLp0jw)Avg03?^=1Gp;dF~Ml~H2A3CxN$*aLM zJzu)Q39@}#2)Ek`(#x*xwf^VF|M~Z~m+xGGj;EZyhg%jJ(6*aSwGd>yLEn+`1U@au zSrD-6TUKCnyMY<@CrGQ&Mi>3&>QYa3>LM_fyFvN`=e=(RY3TQ$Y6fY~X|Du%w`-x| zE|shY6${hyBr2D$kemWR`*|x=M$7uK_TP7F!uIWYZDZ|8tqGIk)vjN=cE3JXuh+bR zbgYi!ul59y)PfzmxwusG-A<>bK<$kK&}{#SJd1|jz~vP{G<{M}>ofYazA>|7q-Nvp zHVqcSk9S_zaCv7yHnlZv69)LZko%gRq>Lz~V;uCQYD!XDaIpCT@ZLSk}l> z1kOrA!bnMy6|b9E?oCqInv$nJ!t@hXcDE z=BO^qI$N2D#M^Y*ZOa;d1L|YLlAoeRoS*Gc<#YOER&hqaK&9o+dkrK8gMtDS^=S@J zfP#)dArs_oyN?}VU0QAu2mJBQQDAM}pMva$0M#BfQQ6Z_@3fy;Gkrz|nV5^Gj4(wO z&pDBQme~qH5tlP}lrvFRK|P}!-ZOSSZ>3OwPSu}TW6Z!-i=y-_t=dcKQS}!T%qEt$ ze~NG3egGmVDhzI-&)byb_EnM$33UhBkmM_Umw9k+fSn)!{--zS`m|>4fp9FqwOW+a zH@QG^mP8>kyDaT)b9GK~1cJg{=feRUx@)&#`c=++oTvx+#r4)5SaaYr%DjL9ry|DV zp6H=!;WBc(qac8XQ8v;5$>9{~w7LT<2ja2Zi@`BuF)FznWE@lJn#n zkLPgB;L$W`wQWKIO8*a(FKbG!EY*w(@wdfM^_%JpMvF7r`q5YenU}CKRel zOYT+Jh3yLZwFgxSFneM2WtROVknN8^ZeRdj#l1Qi0_Q!X)nLJ+HSM;e@Dr*Mu_LdN z43+A2JO(B`TV2R1^L7|+c?XPmfJZ^|P2?n?G55fRIu#AsUd%8Spc*L5+(+WmF{5Gw z6&b9+7y>65l+SEt;Eh0$HfP|Qj%bRXjrlu}l-rG@>u6KnAbFpJy$ri;4iP19lFLX% z-XbAzhFU)a)EfBSG)N4nGW^&iu8pZ!05PU?c^k#5F9$7->pfhtn>r-P8G%*`4E|oi zOb_w*5Ht6w1P)g8jY3 zI;!&~L55ypTAxY9zTvX{96hHcePKOkR2}<+%)VL-jpBDJNAOwIb_*{=F<}^Zl@l+3 zuEZMqa;i$n4_H4zp3_SclAIA${YAXM{K9j*z*Dp~scOkDhkz|glbNS0#rAR zCj;v!DXgU6If|T2L`}sJ>t29$M+7A%lyxsD>t2RBqo?w1e9&I&{ta-q|B!@1go<#- zTYLqF#LgS4CiyMA17H9RAySYgD{h7e4uj^25DqlhAsm#sV^lQ`6Os-gG>b3b4SokN zd1VlSBlZRipAtVXzpxh{jGJg>uHo{21!NpPd@PSKPFgXP?Pmm)-b};xgCd=YY(LXx z%-?~U><-lIYxXl^@w1=B|Ae0#S^`36deq*dqcM7PN$2*G_MYSQL$(xxXmVPTGXf&Y zjz8};kQbYlEbL?N{|GgWLWkbhS2J=zK!q}RVwgm0kHAJv5im^|TPinFinI4|b+Hud z=0qifP^yr;6;4x902^B?Do+tkM;JH}T6gimvCb#cja~l3tpFu^;wTk(#XcjNb$pw( zoh6}2;RK!2nb5f$SaK^^IT`O3+#>cgm|IMSN}FKHkiUvjH(t`$a14Kj zE5w!oUqen4SMsAN;!mw{#J)yMD{R3wD{Ns&ir8YHB#PL&$uJxOhvC!|?m%K%Yvkaa z%aUt;+g#IE+7*EhPfOqjDSN#&vT}R`T!q5F!^+Yw6w*gvO$8K_e9+7$Ch`vr?=9c_ zzV2tSdYsHWkeliXGXr|sA~I|YWq000onHd)BDy8`f?)a%VBO&j`ZcE>dPytFQ>3w$-ex8 z_pLnIJdIk8ixbNJRxr{hVFefi#iRdf{7s5)U=vhnPBhwQ$i>CIjZ@E3-{G?DSzNEF zX~O9xSHqf!(*4lb?w<9AAwSoA_E_>DA1>Zl_2p^`R+Rf&t6q?ojuWDnOD&7BZ#N$3|i6M}NTIpI=e2*ETIv zkPEY{`6%;muH3`>zlJR3o|5_(mbu@dcG>(VeW&_*I4z+aQ0^nh3vRnfY)8r|R$O9N zR?sT;KD=OZ(eoV1{#=)`7wyx;U7xp;X6m-I`F2{k*uX1A?_-4-(Z_3Si+&&|bnM6K zQ!WhOys#`=BYBr>wR#!kI?+nLyvho=>>^i5IIE3wFWV|Ums~JWkx87fU@K#q$f+Kf z%UsKRTRvn_u7w4~R=c-sw=I5>0tHcj>^J!zEC+DT*M z_^o(W#c+0AiSS2As4w4t-!9&bfQRG$ zYqYg;*v`fb?R%&_R3>E; z^cz8z=98N~Tg*~MWPgdM?a{AO2qlz`g)qZa>tlP>EuOe#0J3nBIgt`Dd%@th?D*> zBHYxuKj1@}$21pb34Y)BUCPi{3W~y6u?KBf37mRKNxrLE2}(OMJi5BKR97Uhi9;R8J;;B9$k5#qNQuobOS=o>GFq2Crj*vDDka2z-2RE zL)eH9$7k!&XQwxQuxBJHYOlXyJ%#YJs28``xoCDT0fqKT5|l^ z+1WFduf&H^Z28z-pY6$3&0h9u^Oxsv=4hdQdH$;pDt#MgA!b#T@)*8P{uT%ZiD}%} z8Q_gf`P(GPK!*DwEcPvmK$+o;dhG=}|F@OvI9=6QZabAlvF=EZ1JiClsl*4SK8tWv z2Nt~wI+L9A+STiYxl8Qz+(jP9Qsrau>^KrOJPQ_|w(zCn)H*g#N#Q((=Ws}B>R=Af zJau8t!($nd43AStkqnvs3^HV~2Z;<#sa{Kk;Nzl%B*7pQnI?f^D`bw()G^Oj|EGei zWr_E z8HjCsFGlzH!fFvcn9CB!=={uyS*fOAc&KQlc{monw4^6B!c S<-_F@xW7{F6|#n2!u7u+k$%|No!Ui{)~0{)0zX-ghaL`sY;QQ$YMY9`AJ{l@h7t zlrV(ZGL{YXZLX)6(<)}Ir`I#fS;W(=Tsyy242ODVBe?7N#1``q;U-tF}AK2Y|H1E3rTDf>Y=C>{glv5;~A zltbb$D2GGJK~Ro}qo5oODUX3t6_11Rct|+}$}#Z-C{Kix!=M}&PlEDfNI3$^Q{rh* zo_3F*H%G;yc$b*P7_06saauehp1o^q8_SO)bw)fVPN>u|q|S;tF|SfjAazch6sJ_` zxG-N$&Axla*vb$7#Wb~`6tTPzk|md!@9Whit3B6UOI8H<@7wax>#NF{SEIu^Ye|Szt&rst9RS;8@E+E^SYt=l~#9U zzU_FvE9b9Xxwuq&b*Z>Rrm4+Z!OWFw7hYPbEnT-SEnU9wp{v*JSFc|R_S{FkwsfHu z?2E})Y8RJYxqjv4nyoVSj>$kfJK)09tP#wt^qMVUt6|u;&{4a-+5o*G-A0rB(pj_V zpzO!*A}h(1shwxh%1$1!1|EF={e>X6(yaIJY)xJ4 zc5W^R`;=oZZZ2YKJ2wM!wLV)6(mmM zo97yIJ1j+d*TsYU2jd6|Ijc&Kma!;Ico_EeTr%ump_-ofOEH#wen&a6{Z*!|1HJe~> zd8^r2^V?1*V)PTuw$pG=gguvT%XK^qH|oj!&69JdPTX=?%bWLC?c~@_xF=7ZeO|cg z{faLx&swSzavI~3RRlq)<*c|ZySuvTx&Bt?k@v|FH-|g*yt{C2MV!UV`)gYpkNka& zR&MEtR(TVwKLItOXqi>Qu1rSc$aAgM^M)=q{3EBDzWN@uK53>l(a*!isb{ z^|e!*r-BI`J+pbn9!e|(6`cc=6Duub4yQ-+7o(PJ0v-6m^4!*HFwyMT^);u1fp|e# zVc;sz8(0H)B)ZwyiRw2n+=8uf)3y~w*~8b=~S)VJxF#es(<< z@&>uVx|2bvd_RLynLb+RTm4MKx|8jb5v`gAYPFVWy;@n%mrdcy=g^Vaf;@#UxxiqN z!D$9(7;wOWaXBbDHydg;7lb?q+H6j~TV-vky3s0P>4H*ntyJM4*X{Y(eLOvzM1M7< zV2RbP#HJvYKnmxzXci6iAe_QeGKS|htP#&E<%o}(*S&~fCc=634a}w~jBW0!W=%75 zT3$k-Afv!PNY@=78yUwU&of{%qutDf@B9$*JSE3Q(b$?wtaeyn)~LBKbdF*s)e-p;8V$-J!`0nZD`GlM)2t!i%MK|9q!e*wJT7G3tnA89 zD#@xN8Gj;}`Tru;i?upul^=fdmHhSuf)T_rW5mL2M~EdXg-Dj7XoMn$fjfa$d7w>J z6hh1bpehQ`Oi^+n0yRGc-AA+Tvv_L=H1d6j%cmJAhBg#K%M*zbimCIGm@Tg|py*1% zR>Y?Ok`X44nBaL9{UHR=)W_VaTKOn)JZd}<0BGS)u;`2Ti$42Fyu66VyNqDKqKJ(b zeFXx(wVsJ4^j;cJphUZohLC3xf;~Drvsrl!Zy6b(d_QPGvF&Vzf{bDVzE>4qWG#yK zI5AM{x{Azj{E?7OLSTo-|7b*15)>q-D_WcLBybW(1XRh81bv)vzBrc@PWb^=zrx^$ z87R&mAm}!zq7NF6SU^c18EF|$Bz?tvNikO)wD|L=r0?QDUm)Xl$D&%)ONm&Q6+_WhkK=@`K0v-tzqcB%2WmAk029(kSs%zZpGA~!Az8S zD>^6jdueBU`HF^_m)TehGkQ-9)tPgf=LYHw6AO`g!o*^v%OEkN+l1*@w+Yju-m$x-T!3P%;4i}$15kfQS$ zwDfk-4&|&^9(haLY7atMQQkHJ<9h5leIn()RN!s@XX)nEnoMqX$JjuOYY}ikeHuU-6AM9EYyIGIl{b_e$z2>Sd^+0RAEJ8w7P{im`w~M_ zWO*gkk4yIh64@wLzqo~u0Fnu-i8dHk3^8qaMK&1r&yWUdo3VuD1uOjRt7Lg;*e449L=wDzyfb8e;cpWznRoseLeh`~m8RQxqFfK$1zy>A*O}6#YPO<1g8}F}QK* z5#a`T^{aeGh#RGRS(5gB!HuQ2z>RO=h2INq040*znTeE@(8Y)wqzrVgoI(~_P}&Pb z)X1Ym1{AG}6~a16?M%0l)Xs=c^b6@$N(yPFY8qHLfzS_#9tk6C6jPetCI}Ilpk=X2 zq^+^wkS3}N^dlgN@EW69589d&iXk4*Ty#XU^_2zpylnX!lQg`=TmsYkmkb|%V$7s{KU*R zUizo4yDv2U_iO+8_t#clc>#`0<@340Ix$QH8Txil>aSA)JfA|iH{}Ei?M8sp1my-% z@~+NQ-rD{1-ze`*B8Y_^#t~S~*xHkvt^38DUf@^hF|9<>+?pAhnS?VY zJJuP)rsMYNg2Hm95yEUEcX2dw}jB7Z>Q*53C%bTTe!I6kR z6i2cm?soNwt=c6eH9?HTgR>60UH%RcE$4!ylVhVRL3UGYQ zfKpUkHi1dn7fi&m@Y>?m8*u`6EvmeBQ0;W97kZmlyEskuidB3rU-`(jrK*3cTU7_| zRpG9BRY$s2>55)QI354F>PpYAwp`Dvdbo{n3rBD$&_$+hQk`>nvbo_`S9_j!yTg(W za+@76n8)i49Usl%9IT_?xx?EB3Qhh+O$dS@4QRMrS1g2N9uKebP+q)az_wxefNZ2} zo?i0I5+_rT3vK^u$>2$4&gbPGqF+W?(0P^4B8_QQ_HluV3eho?r4mZu%mDeiMj2^b z?1@{bjFjKyPa+o2l9$Ej(5MwGYN=RN*jeKAPY*{4#w#iUB@A4(li zsbZLSu%CI|kTd>cq5@o|`B{xS3-EoQ`?duhar zyk-=}%e-I|#w)y96vn49rbCQ3II~ZPX@tl9Cn1@~@zCdb_+I*>?88%dp5FX4sPEcN zT}u7<%{y6twx7j3J+p1TZg}4fQ_nIb|IL4{pA|<`{2%(+^%EUa9KDnC=MbOo=S20M zX{0)HVV#qz+*i@osr3bx5V>vS$LU4!IMc5ipZMyBanTFRf2_7s1-oe1?6Zgg4dSgT zG&T~)Itice4owVgXBhH5i0^Q#X;v91ZJ@>03`u2V=hqN4xz#qwr9oMPslBkmD>Asi zlT(*s*1VuV3975RE?Ip@w?k}*Yn)b>YSd+xAb+j7=?eRd*x}wECC|E1>|C9P#_B}b zzl{U$E=o1N{3U+=^ab?n0Mg1=J$qVfD18!?hC+oSCb?y@0YwQTT(NPwE`OS(RMvugit%VvD{-tYKgag2 zCUVa@%J&u(IU5%~TaRf%(s8Nw;h+%Xm$JR1GUptnmd42yd5 zU$Bw%ECv>DA_N5jAgcPdSo`4tKYbggzE^!KHNl=5~DUSvj&;}JW9`I^wm2;ll%<^ z3LTU!8TyWt8L6lvJ)gv7qoKZkhJEM8lL8iO4Nco<(V-QM`+pu)$F3;kT^Khkxe%Hw zlz&J$6!x?-A+_g7n~>?6-k!|B*a-5zbbybY1}^!01Fh@fDHv3`e2%*nEDuR{^V>2F zKgU6qP#T=8{$>y+JkkR(!`$9st7QhCWbl&+E@S5TJzE+v8H1c{i*6lPX4GaH^RDjq z@3Q{yF?g6!$^G~!JIVb$pSH}(eEwLzkk91PMN2Zbjwk-arosY9L#9IcL8gK-1ajc- zN>EA22X@S@qlvWE+v9yy*#Tp8ay28@W1XDh!-veRJYxf$oVF@YYMX-gnNp`R z(N?AGZH*0<1}VjUzs!y*xJumOg1OS$9JMgX%3on`LkKk#6DB8~Jst(NSm@aESm-{G z==n$CkxAU)B2X`y331HD2=MA4d6F>JlTC-(uD<*eI_5^?~S7 z?D5O5JrRMjUYB+RQhZR3fJU;$h-N9{Yu8>hmVV%gSdZ zsqysYkl^%w5Rdn91U^{fKngx?b6pvV^j-Jis#EE-fV;4i@}BX{PaC)@tFFSLbhcCR zc?$qW9w%WtTuw(P$3j5!V2Kc$+|zA4N8d;UXb4CZUmJJp65y$6-&@Pr5d7w56SVtshYd# z$R;$-pg3qzId0^ip@7yf3+kdc_BU^Nf;ZK}H;$}sFk^>5K4>Pc!hfB!d=O2glv~n* z>S;l>#ABGC@K(J14szoym~74MN6q_QMEn#CQtT9PYZ;s=ur`?#mq?ExSAH7-kbrS~ zu_@>@B&tya8P)fZuBy_1&Yr1_E+KYflW5?|D^mTU$cY& z3ITuiGuP>8OLIC(&aL9NFl2V8g&&E;aqaOd@Yfo1{E3G&*;1N;#UR5rb8Bu3ma?Y= zBgpF?+Q{3I;2dOI&TZz=a8nkb_SPR|@-I|@YwB99vSalX&zG6^F$Sv)ZZP2fqNPu5 za%5J`#9Vtd{gN|~;1$I+M?eem@6{IO``KIX6$Jb(NfGBv`<0P4lP;D`$`gKRVj9Kt jAY`nRnR;sK@YIQ^>eR`pBU5EKoc0xWgPskaoN4?YjlIE! literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/inception_v4.cpython-36.pyc b/timm/models/__pycache__/inception_v4.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb608f4b7b9ced4fd4517c1f08eee2a3c74615ae GIT binary patch literal 10692 zcmd5?%X1q?ddJM*4e%j|ddoJovb~1AB1DRgcAbqaiIx;)C9Yy=l>~8u9ij&$XuRAr z(1XEdU6HDM;wHCTeAwI8R^^cV2U|HNx13T_sYG4*C7Q9t;42Vyv*X zYC%p<_cuK~-CzH{S9f2TnaNka_r}KRyNSfVCdPg<$X~KrKNBFafn&a2lzc`c%x0_An}1}JYtl+&Orstcf8h$v@3`G&d( z%EgFs7L+&DTcErZQO<$#w#tH%jVR~UCH0Ofzp$#Wd2?z>y{mFBtmjtsb!9(Dlq);` zxOo3r(AD*=#XFt4*9)56&ZS4Ii_Lbg<+Z&|;F6Mm(CzEGrxqK!+g{uXf}a0DrLx%! zw)z`O^=`Ye<|?n_RRXW$cXgxHeR8SYRbK1TPPe;cT7T!>orjBen|0Li%8O67n)qVV z&u_Rsn(g}fC1ZnJ~eo_Nj8t-xP==X%erZ+VL= zOPAwMwiVPcYV6y|D@UvAXu$z^m0;uJ1P+O;3Y3tG&&pA9&i-4+}9jAFYPP=8(0u z$E!PA2k=in;YiwUr_wXCajDsMH$B5ezoNaC2S)i7)qT=wbzN0?e0gd4Qr&PpZvFA< zrEhzySFc`mFE90it>!PCL}CwDV(&eSX)lZ1CNBK`{i86m(X97z?aVbWoWAz_rOTMh z&f~Cvfd#b|SnTR@A$0n>6&5=Ewi&VKhxulw*AHrbbH@vFy>7Q_oD2(}dAft42zSzcpqt7IGd)+SW@j@@ZZutg^M8K+AOCP~6g=C()U`8FxTwV&~S&-*2ScmygrA<^?4?*A;I@^H8Z`n+RqdJeYc|Az+I{A zY(J~iA;^B^=Gxk$mCGwD6(2~cwY^}gtBA`@XouU^*DFD@-L8nJ_>~xsK>Jefnf?a8 zz_0juBnfNQN?HrnytT71Hb+atan@~C!jE@eYq)$iF-UAD22fo;0>`(lIA=#W3o~G? zmmU2U>gsPYDI+P{8s;PsT2J*7X_8b($uzq9iL2F{DELG~!ph6v&hhb%MYYP7zzBTB zlyuy-hbhU|DDC|g^Tk^2^S;|MPh`k?nO~WS_*(Z`ty*o=2efwy&o#RuQ_acn3HGct zn-R){7!a=f`Rwjc3`7V3(HB_=0iZb{1gS7{uldwdSKK-UfIr@ef5HALGSk>6;*}#9 zPX%d8mQd2WNuoZ&x#mpnadt2ac6{#|lOG}hzhi8bv-$!^`Zt(x3iU-KVeX#$^ggr( zP@8GHPpLp;yfSlWkn=TUPdr`n{aUGQS$IVR3D1Ptezjro6yWEwDm0f^vlH;Ix>wJ3@*`JAdUO_h2;EgQa;18*z0o07L|RYZ9v zE~nIN)XE5YbL)FQMKAQXnaDJa!A*aMRDouOIieltP~Ng6iF zR!jr7JT`SAJYVO-5uWo8ywCe^m71=>F-)s=JN2#0!_4xbpk}ZA5)!CJI|*PfI85h9 z0AF=~H2}ZKaol3^T_(SUWE|f5LsCCta*at0#Sy@1KMw*pdVDm1lf|6mA;9M}DRo@& zkC^1|)dBwgB)|{Vh|qqb5n=ZULjZX@O%171#t#sUm|oxe1$ttTauvA=EofBaP#NB4 zMF9^Dys=gnnfrhVbs(;QPNKgorH_J^24c$Pd`xk#&t#AbNZ>gpiNl zXXD0ZLIsj?WQ62n$Nl<**^IT|U*RkJwO|AhI)^bx0>-2%07a-MQ8Q96Ix4f963v() zm=3Rpr}o5q4nUZd_fp%rsJ$$7W>VTAFy(vFeqO+tUvGW^HD|Qrl+F}Q%E3)(%3Gk& zwu{+RX9Q?bDgx+_`GD$Ws0UZV-1WzsL(^}Rs%<9l$jhFclfz&BnQaMlwyr!Aq`wZl2A&ijG#ZQ zvhWfhIar}FL0IxRC4mx@6-r*zu?fTq5#q&&?^nl~HcQO>-oGF>1^YdE$p9sxU(Oqd zQ|TTKaihJ7ghTQCC#c}hA{pU%9`=6c_=tnkD5iblgb9l2BOZ=oiC`suZ9oOVuT2dY z6A%5`)PCfSK$|`Yw0a$1d?{kT#6zEa9wHVdsWdj2qv4f&-{BV`ycRVnN5^X{P&X!Y zJS9)?`cKFlfY(^N4jq9Auj4vCjg>=3&8xiNH4Xpv7|JDMca?(StsL z^2xZJv^s_I>9|}_XHY&X?WQJdLJo8XzVu9F6X-?nw+ZLgn}n1x3}o)O!(t@lcR@8S zG2|igk@q{(`V%~wuoL=sK-v2O30h>sui|JE;0HP2uiDPR^&@ljfFC*3Txn70cUg~G z{ZN7a3smr5M=~zZLlZ?8e#%VUm=^F~Gc#o*p1##f#r|g;_92Gq#u1%6PST(A8=T6l zRXn9hIlAD-nrTw-slY_=e}c?@!GB5cN)d&T+85$xhQfxBHiN$zlxIa~SuTtzhZIT$ z36QWy{|0aR3rMCz-U(xPfUNvF8^gcluwH5mk17-7{$KM8kxU%Zq#Ru)Vq>^A zVGJ3wiA?+wnf)>`74S?1J1|Fd))?>@{KI(zjb@KDV;~I;>*(WQk{B}9;>H`zrm2-f zEpE*j6KQC9(DG=_m`Ot`L^NY64Xqf=Y|rAFi-M&Br0bwyX(TdIpp%lQUQ9;nDPV73La%61#l&IH}>-vXa zkI}KBm!`sV(PCVcno_Sj`FN*#rR;vZuqg$26e$PTb0o#cf%?lm!`ya>e5n6E6S{v%?0$q-tW1y%?3TB!?iIZc$j+ zY;`x>R!w%)xXMtDB6{gX7CXW9Ew|%`GuW<@4Sd<0*4yksm(`;^$t4mhOlTPX3zvTj z2}7KsmFAzU#Bq-z`8EFXP7&|%uVmZU_Kh8mN2?=sO@qiE@4VJ<$vQW%#LabV?OIyK zWHt!*?M8&Nn;wo8#78n-M;ks2F&f)BJO07RYIO=Kf4s+5;!PApFy&+bRtLHB44_AF z=E74_;B2!~Td0j~6Y^N5L4U>^`Dyy=7-cNag(~cgc^uB;=*2|eFR<^N&;(cwhMgbp z|NhDc@0U}wq}Q-lL*}(WgXi%sZnAs1pM&$!T1|}9x5s!m(TDvNC;AX`@!D7)_V3w4 z4JP%9tW-?FQ0ll5 z`_OrT&aXfr z+q*CtU)cluFYTX@i^+8!F)1~09tTIXYJR+5iQ`cPo(}rz>i(sbUQ(qA05gH)1To`#il(g0`w28jk1>}6nB)Z`*Fc=!cxw&otS{xOr7!A6s@+6OV9Z5S7s z%L1!1!Mj9)BxUOs|KA8RwVLYIacqN*7vRTpa!Z|#ImU2&SX^rKJF@!iws5{F&g&<9 zDaVsBOFT}aIT+2+gqba`g(E(`FgQ%N+-EF^4$qo{Wcr!rst|kN%E6HVOX+)Z+{6Uk zU(;~N{0V!6RbzY+4iR=?_PfSzen^w))A%Ma9eJy0&l}BcryUzpl#aB0K2@44&6G~z UdcAa_bfI*%^ls_HeAc%97h2&k2mk;8 literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/levit.cpython-36.pyc b/timm/models/__pycache__/levit.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8ac5e79918b3cc1aa57cef70d0ae2b3a66f629d GIT binary patch literal 17526 zcmch932+=&dR}+WeP93#KoUGewkWP7?1}&h>Rw7B#Y@ssSXso1Ygp1~Fs}j3U@pDx z0ZFK5S6G>sHeIP?b5yP2+;Lo~Qc~W0 z-`_n4Kol|l92@BQ!ScmD7D|MwnFjf`YxKXYX1Ctoy-|6mOL@^GKQ;}$H#P{yL6 zOl8&0MHBB>-D<=ZVW`ac#lW#V)UMf-jjT*J+~5z2M~Y1`i$DIF01F&ky~;2=HdISdLi=tkh({` zsIEl51^7OvUW$BI;rqP$VfrMugX;bjbMcU~Pc5oXs!!cCH_gSn;QJBvfI2R|!|=VV zX4I_sj==YdI-yRA&xY?+HK$IA@9s;6dQd%d(@+mN+1k;~*y26lJggo8=aGnW44g;R zW8gd%aXz6+>eK2pTH;=WJ)xdN*pmpm59#j*=P7jtoHG$;8l1E0X>gv7IG+UPoH`HA z`H1r=a4x9(!MQ);JfJSBOE-~W~uFpH(eXf13-ttzf%@rGg_QkU6A;PoG zWk);Bit}35#_zbj>UnMV@!8q3zFu9QY3Y^O@{&6{ck;x8Gbc`-JoRukd#0?tYNhVH zv7*bX<;L8Jxs!Egz3M%H9K5Pmcb<6T2UDajb1Zkf)s}wZji;Svt=vF;F3DHza#Pu7 z>*cajS72VKAD?e^mTn09na)bfUTAgJHClu5UZ^@+JN6Z?RapxY>IV7GwK{bqrt=v` zuixj`&)w&&v^v6hzU-m3YOa00)!A^QEVWigH_P=W-YDzOSJ&;fcG|ky^lb4yJ~wD2 ziG9Nk?x=O}www;Fb9dAVAtmg{!4(XQKNuixw|Rd2PkG*f9cW|zwq zXQ|a%(~gUrDyy^7st?#LU0tc77PRdh6OuHoLG|b?tRW%RoRTOVy_1 z+Dqtn#BQ$7qPob-ZnTtBccq%k)$54iweSJAqN{BW5gYBYS6!;stKJRd`Fu-NQChHC zP2_cBw$h3|d#+yq*3y2l<+}D&76`4I-J(#NpEiTEa-25H3U{i_!Beio89!gGc+*xe z`Rwy&FI||wuuwXG;o{lPU0x_%SvVi;xsCh$g|qWpCh}yOrB1c3N>Yu|H8feNvb+); zYN6)!@{N*n9YY%}R8r1zxl{KTic}e|Rcd;jcHId^RvfQXu6QLxcQi^f;^|JaQfgw_ zARQP5?W|NeMZ&KHMup$z4^z;fl>Q7J*9P$^k8{dUag|U>&+tr0*}TfWoAb?||2kq?W3c4xWXLQ8apGwd6FG95r)drt*Gr)hVl)`ujP) zOA3XVsZZyESVz}`T(i?CRqAEeb=)9ZZMHjJ$*pcUL8jem)k9~xqFe1!yW#~S7^IGF zx9ZY|!5I5om4;Hq8&0_y#9U7W+2yKs(P9`aK~}zi&PNl)^e76h$9iPi(nY!_K;TZ& zoe15Ncm>I|Yh}ISe$CJ}yzgI_Rb{U{H@i{e_)(5Kd*R%%G@Il zonRh!miV!`(EkdXn7 z;oJyPYt9XEWH_2R%H1HL9JJyVdw1)bBsMd(>Eh_x;_TZpIGHl=28qjDc`jD?1^zh0 z7*FOVJJT~%%r|^ssCO;j`ik|oiTN0x*Y_X{A|VSI?&&h5q6*R-*O76LaUj8FkgsT` z>^UWA4orb3kV-#Ea)iY0le_6+qL=WvtUkx4aPGWp$B-IRs{b*RM_8tQiXk6W490yK z&!8CO+<7rR!H;b(hOjttJ{h}&Yc(IzT1l-tdCg? z#%*pZ200Zx{a-3_xifG$1`xloXsOJXjb)&HmAIK%j5+aIV$)DbmAYvF`6jHgg7OrG1Ejb^Um5W^P zm#Sq~3Km#PO%yrdc7R}mbO}pQ)hm^PY+1>o-oXlOHJd?_b(&K_?rAQK^Qas=aYha@ zSfE_5-1JI8@|r`;p`S*bus+6Jr@kDR<-lADtTi`Cx}L5oCy2GnnB%G%ErQ7v-RiX6 zps>^|g&9aGf-K-xtKK2l(dU?>8DvV~Qd=s8GhQb91%!3*L<>>YJZfgmG5l?e_j+Qc zSFt{HNFo8{7BhCg4uaN_K6u&cnq3H}8lM=R_&$J&3FDyAjrk_HF~AGs$1GB1EXVg7 zlMJ!gE43sZYtjJSmU7DI#=R8!DOSrU^qst8^3GyFA-Dbfey)}mIwG_nbW|m7rMd|} zQL}1ewQ(P*(6fnJLVWSiCw;quO5pe1rv}4L&w?=>*FS_80iOauU(r|K(+dnKqQ|kW zqm$7&>*adKarI@!%>jZn5x2Bn){!SsRx}Un_twQ zC+UAJh!b}N3F6PvbW%UhP>IYw2+V8xv+{225avOA8Oy;J5ax0jF|2~Q*F1nuj^mj! z3;5d@>31~aZvFS~eE<8?sjwaTb8yHjJO_DAr#a@E<%YvCUy2$7?tH2AdZ%0uLo`QN zkPu`Zq?)Y~)GehInIqAFtN`}+2Bpo6O??$FEd%rnTpk`-2GUj`t;wn2>Hp%9%Y7J* zzQoCj#EGf+ki-G54J8ik2hdDYB6-pn0=Xc2S(FM+PpNJ7y&pG?N+QaeL!LE{;c>Yv zeUR7@VH4XXY!Z>MNpKKHArv+V4CrBDlNb^<(zy^fGIm7Tgwr^y&*S6LsQMyZ97sVj zEc=jzIjQt>3?_n*VHwJmXynY3%MqhLN5a)Uk`iId+hp|R!!7ti`2g84)sqhsgZA28 zLa3_w)_seSCfeMZ<4+-cRTC4}+x_NjRbP zkC3p>!_IhxE;hSvki1Ir8c7)>nwx=Hmj=~@4q;1k$nKb(G7Wwfe3x)DoSb_H6LWN^ zg*i3*t36?2zjcqo!`Ag-MCzJwnUu6Noozx|LY9J$0UnzVM-c}xq}o7sq?WOicurH6 z3<{xA*#%yrgbB>+J4zg+V03{-q~1eth|Dk^Hj2X~4I^y5cH{THk9_z&%?Ui6(Oj-I zn_LshgoC(BSB*rGxFl;Nc8_cm2X%1Y!sBv6rp-cnET_raT_9|OAeiicARizQ`Vb@? zWGMo|3N3u^jmclQ^zSx)=BaIhfI>k80sDS;L9l7|MSx%tW!;K(6J8RNKIO-OM~E?E zdLE)7UCV%rwTvmyf0n|EH7p7^i6TjW*CF`Au56K?+f5*7Vzh90=IOdjGLQ!XF>VtD z9A$Iea#0O{yK?viI8PfEeIO$qq zAHfEpM65OvfsGLb*w7azh1%$lIH7MKP7=F`ldbRLH$C+e^>b!!Zy>tvbq`Uqa|6b&=G(ruYc$MYD8?=rqIC@&PcQ!d}8F$X;J2w1O=SHJdmt5ORz)+sTM3+bYx+rtFDXTD$KxUjzT!Of;4P&jndM|hra<|xEx?+ z?ZtCAdNfg71yBDMi(Kw8IC{88gdA#gGrbrSVP`DHMYttUr$mr`(Q&H^=&aI9%{Lbm zzTE>LLjn!6q68=p3gC|P<~){;^N175?h0H$rm)YUDFl*|qu&JS32mE!xY+OVK zc(pWmNH+y#vzFNes@R9h4ARpl)q_FB#FwIu4w4F1nTkWLRE#Uc|8Fu}YRQEwh+~sN zg%IFJ>5r{Ar68uN>rCvGE345;sx{NueIhNwweFwcaam78gz22MG1hOBXsHFH*lpot z>JM8fzliW(!xPQAd_t4MqW6EX$aNkLH8ZdTUfBqGs z4iBJWzcjaxATBvX2{MAX0%BYkFUPu8H-?471Ww|p2%q?7e^F2zX|W#@O*M2a$MzdQ zRwnEU99{9vScsx!nfCzd?kk zc6oCS(_>J(KiF9KGBW-SGpCqMn-~k^Y#R$~E&klsKzhRDbA|(BjgzP)xm12JrN%_O z&~TGkOoNZvq{fApQQ4ar7<01V=9og|-nFoOFmf}#m{$cg3UzBl1W^I4R}3;|2|C3- zA!3#Sb$*M`;$lVS16;Js)6H?}wlOt_L=()R6iq5Uf&MDNVgQQ%@ne%%OW;m)(?DC` zOk&ob(JfqG_H*ZrS6=Oo zUjMwfUg(Z_d;L)btB5~FN_i!mq;NpVS+=_4{Nnz#pEmqLZ696x z5z91Q|JPTI*MH-xfznQS`)ddMSQwY3K{*ei?1xkeA$R!&$i>5c-p?Zp7_`dPIU*7< zGX!b4212aM_@#7S717;L(7R_A2>*s+ij<*g3JqL4f_RBXc6a$4M(@!U~GFm zVBi_;Z=)@Bma)|gYeB2Ivfwn`mX>KBSmmYZVyHDi{x_snn$^@)e}hbamo99;CmHe% zL(=tTxP2Fxt3k5eLJPS1CmAw6C~U`SO8*JQ`mN*D~rGT@#FzJreGx6D4y+2(Y|n9*rtQ_VS|mYgmaVY zF|-({DHvatJ|tra_P!82#AFD-8pOQ{{pXOXr)kEyMRt2degRR^7;K?bwDKt4#^|6} zk`J=@QA7)p*aN}FcE5y_8hp;-aTh>PB10blg-MzU082=@b_I4}(VhkB)Z%L?eew2K z;6R#SmdTr`dbLXEFClmR9?9Dv+=0|#?IKtt<;#X}qJ-_D9l}4DlzrS@Ys=(u+yd*J zM}7vnY|=`j?&y@mgWB(ws9$>~?5}orW*1 zvMRS}LEXZ)RG#12jCalE*QG>Um9ww}N9F0}{G61DayhO>b}5hO&_m@J!KSqZs|Qx{ zE^Pi#-v<7apWfz2`4J<{Rf#3~!19kphy z^P#~bl9hiRoe(Mz=m0|#D{TE9^g(fjK9Dy(9*RGK7_=6JI3p6FZ5(e1h0RYP z2!{iBY^L3E9sNfb^;bw__@&&|GEOvHhgfuUqXU`u6Ab%llAi;C-K&pM^dBPk4}%2e zGl6+EFkcVMwZL2t;*d#A5l>o*Vl$$UyXkU4aLmeZkHW!mTQDEBj+%Mvu*}^V2p#+h z=Md-e_+T{m5Kbagy95vDTgU}g(1q#Iofw!%M*npZk=)5h7E`8#%8A&NC_kk7VMDNZ zABLIVLim5gBhxNx7GecEX^tf(Gnye4JpCURJpMi$FNN5M#^wQDDhgi0Y}kR9^1w?x zoJPEagK69F(nK&2#(hv2HWY{2yfoAn%XG1lW3lktkB1z}KsO0lJ%pZ!d}uhyMCggQ z2I{pR@1v(2ZB;~5A$r1iry+)B7$PS~2OuZlA0Q{}%1ACb&WaLuVe=dK!{XMpKO zYqocH?Wi9Q<42h89<;_Wl}5-X{7GQCd;Ow6K7{G+lNBqw1Ji{PivkKK336*Va^!?Z zZ)6WDLVS3MYR2_#1rL6JbocLobfL^4pc8DP|0rFQOQDz%B$R_z6zxo6t7ynGH-z#y zW<%pj=Ir{)3xIQTN}!)8F4z!V*F=8$?~zb?g{2d0`W1S=O7h1@{y52>AbFSMPm=s8 z63Pwzr%B!;`70u9ao9WmcB3#t5R(1V!o}03 z>u!=?0jaZ=c{sKqzx#Hn>xV)>E(CoD_!jbncrf%Y-!TMCt-$Od@XsRr6Ol`^S_B&5 zCs51)ctL4~H9Z7|NdZu;0%GzkSRHlB8Yi~8>{{O^+deUg7bB47DBT~up#4tN*54hQ@IGR~QY z1R@^ABo86u{R7B&V3%po!;Yz8NJL;HxJds)xIPYw)ZbtdPSf8%g#0E_pI~(ga1cTo zX5nN6A{kb})BhDCmk6Vt(M_4K+#eB;NEnz|6a!o>gSM9^O z`mCB#`@zYn1L`2&c`>w&Kq0wXpT;*f{_Sq3>UrYW#^I%QOM7ydUXJeKV79$_V+n_i z>~r(;IC_smxi-!ytvFdZTR;EVg$s|{JjYwDhlh}C9M+DGDC1zbhePYS^VV(bw}`q!K_=L(ZGYzPoIGa&;-Q8oZoM(S?;4=;!98m z$%BGOs0}Es3WxzSq(JAihb>>^F@bjqAQ1}YrWwaa*R0lcKtZb+w{W1%{StHXvH}TV zPV6QlHvnrl6S=c+XZ?Kpw{b!=DdoUM2)-q5>}T;bcVZdF%YG#iEyyA5aLtc*8 z_IP`}ef~ZG;{*OdC4Mq`9(rq7?=)m6A0yNdQ*sXh-ymrNIxabos-}n0r<|K zlp|0TCgIzsOmMO=)y8@`0Z=!u8vb3ZlZvfb$n!GdSb*twshqTtg>}o0@;-=q?FH1w zPO%(3yNm6JmqaHNjFKZSh1?~r9ERJa7K1oD zYts+ewN{s*Ca@jQns0|a6qhes`rkw4NvJV=6{EMAqzM@9W0Vea>!WnUAAKhVGg%lr zrpF}q_-(O612I-H2)l60Ot86rfL@Wklz_mjUB2RC1`Uz2pmgHno^CO0NT$UyJY=2T zVMG03m@TWGsrkKu6=j>fnNSE93?P$u+Op!n1SuCN3TJWk09QF$%?nz?l)H!V@;t}A zj1$v1(eLUL%vJPPiusFjE= z@yILXzE;o}c9k>m%5h%!>MwjZNYmDm-cP7}OX3#jV5GKTnFHGiDvYn{j4D zIkHR(Y7SK&Dmzq#5FZK>R3~c^J_`y^Xe2K{5n}ix!sX0l?5OxR3UX|}uMPFj$8tu% z-V*8=d*L7)yot$Gi_R*ahq%Jx9GmYaVRV|buHbQ}Kvog`m9Z{N)xNow)Y!26DeLt( z&FK0)1Zx`v`nol9Vz<5J_cN!lU%)F+PyP;@irFWPjr+p_;}m9eR#-We@{O6(7^CI7 z`{YbN5$_aWrr^$i!u6&g4x6=$FAL)1m*w-Kumqz!a{C{>?-|y{o?XA!uk^NdVzz7{ zIk-a=04+pH-ZHKq^{nld{tgT9FF`(_L>lg_eoPmVv01ZW2+JTdB8} za1)Hzslq5WBMZ43u3!uzZd+^pAGQ`HDRs}IGGCJBF>7OQ*lIhY_jPNIuzr(02M_x# zQg_gc?op_67Efr@xR<~KRcu5IYb<$mnff$h0IF=7P3)<{#a+|Ts-Bz=7nBsT9cVn* z_vvdyI3w{J45TE|ZIWLl`4=PyL85J@-KGlek%cD~`yxMqx!pMriec8VbcZrCvG?B^F&h|^nCLO{XqzZ zCN~`wMBW#2*_;^$&i!H&TnT{1;LfRM!M?%yPUo!<~<3*5-*C<}t31w2<2ju#n(EC40 zevRaJNd6azAO(Tu^ZM7xAX&JmzfCs@x{kwfe!8$@3m{09N~%@C$#I&&LUZ1KgA0eR zR+=sC=-(#$Kau<4kl}ka_y1u}XuxSAKK7;KqSX2<}D33Ahr-ZTAEckFx zRD3|EJzwtxV|b1+yh_1%r;XQ-0?lHy;C5J$kb8#%M25cqEfhv?fdug;ESV4%!uj9i znCC;K-DVsiY?5HIvg%aU#8g?r6+~m;zGF#)(lm+drI_JRZ>(kRzY+w;zk-WxI3*kRu)pA&%Q5UhfbXSL>|M)G zNNCbehLs!XA+%DfS--JydYjj7wmi9%>@>q63z3e_D=@ zXbSTn>D_4a+#hFjxYL-Ht1L6k@D2|)Tyep7hPQL%-mu&t4pOU59lNa2Sjb35az)$K zS!R?Yr?@Wv=%6G!wiusK4|JXZ>F}_LzM)4L`6vm5kfCul2S>j_DEi8BY33dV9wYe# z$-N}^k%$U$hORnEheXtqGm-jDI|$V95qAeX(qg5If1s%|lNT@aK=k<>XAFEG8`|LN}O{`ddgJ-c^!xIFdp=jQ*uV;FyA^nC6^ z{&`%%hgri=M#WI3%6MkQ#CO)q)U%bWNm$Oy)$^6Sm!ws`nJk8ffc>&^&OQR_0h^`{f^2V^|8uW{m#mr^}8x})$gv{ zEp zRo$uXQg^GJmkVfdk2+8_Mq;Q{QZ}+$Uo4N|6EVr>dAk;C;x?> z{DVFDhkEi4_vF9WlYgWq|7bVQ`Ks}FMjM8D?DDS4ea8&-xY~WeP`jPt^8Fjx$^(E* zs7XL36UgTPnNoWI*^@vlK&I6UATtT%^MLGC`vBROK)wLT6KWQa*#z>ST4T<6$az>j z{XtgkS6{l2uY6HGqn^EBR31@nbwE9bwRzOJN4=w-R|hYc8)ju3rFC^k9hTB#D4kbF z)KMutj?%m81$9hHyHWai^`bg1r3sYYQ!lBPr8J4sUr;C1NhwXC^b2ZEeOXF-P})$Z z)K{c5jnW16iaIT&8I*oeol&n!X)j9OP_L=irL+&Fi|P&arj(vQ>5{6bpOexoO24GO zs=g+rCsF#Q`nsx0=_!Qkm1n_^-E? z3x;8xJaKB`q8vHyu4$E`NDp!L7NrXT5ar4QaRES_;EvuzzZ5(G8bc^OH5dKDFrk zi=H!~oxrj2n5k7aaQ#L#)OI6S@O9nMQ}dobKjpa%r-8d+b=f{QRri(S1=Tu-J=t6< zmrwDb`>jXP$?UfdHY^Nrg3~yNp54%?g)Qw^=+$~np7Mw_kq*h$=iJpvUoTGC^TE{2 z^qwas_e{_3nJ$-~w{_^&Jm;Oz_kz$}S#ldQduFD0mn|!FL(h5kof~@Sc{a3L;gYYP zedkQt+S>I6aKsOMtWmJyV)z`XvnKOh_FEbR z^Xxmee#2e0n%ZenzyKQg&!hIKq=ai3J988$hREoHWrP$roFT zzI7%Zhpg!-*AKOI$X~V_{#mT708iSWC{H=oVH@M^fBcIk2vE!fnaCD(;dEl0Ra{SU+N~y2^TG z(mEZp+gDnC&3bLpI^m=}ed3%s*m%wdtq{GhSZjWZGZnd=kU>n(U@gZE$a6Jtj11_k z<1Q|R0mS=@c5MZsKlB%!a0$`~i#vs-Z>~W(EQJ=aV7=l8f%Q7eSl99uScb;jxEU3d z<1}liiu^#6;??%XGtr&LPaZsWbnfVx>XD-_9DMb}nd-@-2j`-(tH7tv9Jykmukma& zHs1mnRz)9FN$1tt!eVrv4=Lc;YgOkQWFG`iRXGcG%M0c1V8gCEs%kfz-rD#;gtZTA zOGoSTj*7A;y=GK6=C$};JZ6WElH3vPH&JaV8upx8t5I!0Gojb06#CVfu+{V&Ol3qn zi*A4i#>0t>8VAWw2GgJI-_dmgg zzJklR@)IPBxbXQe&qV|CZmosu%ui#`Po8U+@YMQbQ~S$NZppFL_+XT6X)hXVwCYun zM@|rx-A1z&Rs*-~M5U(hdvUf{!`!OPS{MyOB0IY2dm^}^?F-PZs@l^kS9k12lnp`^ zl^0wc0G)ngH7a)`o}w&$+38nr+yQ(5JSw(n0Jr(U0P!s^WQ%hgjI z@`_1~7HDUUvEr^54}j0@vTSkQrTq#s=?@m2qV^jjR21X9UWd zT$s1rcrh1T*vGPHG}yqHG@Yu`Gmz6&1(7UNa3f7ImQUMs?`&Vw{ntO>LqtRGV`s0w zEhn4K^hVm@k)Ns^rkS>T^7_Xqb4(lFz(A$p%*=``w%A(Y>>3pUCL(UD0sN7s1{cGDcdA8+3~E{dW^h6WtNFO^UHR>NN5CnX}(XGgTx{}rU~nIZX|Sk^^Js3yEpFb@4dgfUHv!l1mbUIA)H=`M!RDm9Da?T!s3anJWIYg2>BPd|vo$R{|2FULU8)Tm^XS;?wFC+Fmy!twykMT8g&%S9NC zg@wXJW5HAds(7JPDbajHK&^by$QhMEn2C5^i?(slMiLqu#wEolbGae`%F|y`?-(Z(?A__+Pk?x{SRzaMIh*c+_!5Z6MZ~&Q38}wYVYhe{` zkfWoVf|Z}568=FR5f#9Toi6ltbRXsE*i1)dAz)HRphpPLG#XLyxT0Geu0>BBn4CsZDGPlD zV5)j$sM9%1z*VUl`!&@nY-v766K8r3$@g#t9JP@xo4d@iIVx%Ujx7*ZBU1T^k!u)- zkq=%(vJP5V#sV($JF||M^m3NF6dRe|+}*}`Gpz|)TF$d3|KdBKknEf$v0mX5VEJ)o z`V^BdBY^|SKxflOybX4_q0bH0(My%k}#`ska0PYae1;|R5*x`R>SFANOB?j3JxIY3WqSWoDFl!c`PFqZ2{sz z1VSN!4{U-Lg~2oN@{O1DNpu!ZJjTf>TLYi)n4`Qnhf&UNz|GFvUUSKg@@+@^L6n!&Gxot`j%K|Cp+;~ne z;a)UgD^;tvJnf;RmyvXavIT9)dj1aTh@>&h_DC{_4m}lgmEF9>#AZTnlUCYcPNu}9 zGe=qb1pxjAu7JZXnnQU_NDWu|8Ax)$eq=i9E}AcsYCf#I$}J2CH6UaHjaL-SXH-f; z4P{Kc6cwT5Vtx19U46$L0PG>uR;crYl21ce#__Cpiq<2PLXVU;>Ac$;@um8^LZmEC z9KP>A|NGfp&!zIYR!%U|r|P~7U@tX8@MNh~sN5)asN6{3P;U@ZC>7RA%7kJ&5Du)D zM6u3s4}7s2hqpS`an3mJi=c21oX9SOXzmtk?dSLa?n>Q#m9H_ z@XQMsyokJ5q7+VuKa6;qd{<*pFUm&0!m1)CDGg(}$Oi~bYZtjF>jl$HX2u6&St%;u zbyj+V$(u|nOgNfoP}JnA?X{est6-wRM5)9&C6-r0#6q~F?!SXe*o?Yl2)bk>(I|JB z?HwIj+nP&B)Q8!elSp+$tW9EGHJ5V`Ik84bazu8YPRT>36mSj1dZl~6t6B6{@qqDS zj4_S8ewzu^i~bsufXVe}7vbd}qjn;S_L!F0-kwggBL@aI%Yk`5KvV`FG?B`|C@$OaRkSBZixby%pva+xr@Oj|W(=6pvT$#NIEtQF-byke|3(PNZjAQ&MXF(SH*L3XGV z7ZKCGHogA~4vR|K0Ie5swRc^Gur9fG!rT1j1clrmMTSZVMia<9Ch^M|Glr3zv&)4I z6NfTrK0|VJ&3$d5>G(h_AaZ2cXn<2gpk1HCt*8)3jiNjiZ56uJwFBrHndgJ35NjYZ zZ!{zvpG2Ad4IrX^jLewXe&FgE^;S+*EoYR@ik8H>xh#iQ;5KC7RAkO)F&oHWNMPpR zaKJ=_ZlZ|@NL+?@wvid*TuXDpsbURc)LfV={|6ua^UVG4PENmv;&?8~*Opq16@3;Z znZ58b6uZop>N_%9k-6H{y0JAuqwL?}N>*_cYdC7Q@40%;9d0gf$;~uzH2E*);dhy1 z*B__pS8@OMaCI2j94!>{noxF>ehNwMYCpG6+b4c*UgZ-%m&{B*7aJvD>aL$#jz;@t z7wi9KSO2H}Zc;7b^IOc~+Q+7In(Ex!BWn~dQ;RR=le+p@_%L5BF*V`^-D(~ zw8;)*4Q)Hiq*`eT+YCE7MHLO7$;zEg-LJSFP7{`#l&ECqF1};G}BZpmDi5PtY?T0SqE)|!d zqY*b6SRUSB%@IJht&bpB*HLtZIryqYx4sx}x|U&u)RvMBShpdelOQTN+=Qlf7TdKi zvcz3WqA#F+mB^BH-n?YW!=Z&P6)xp5JmXUK5~g>#7)P4MO_%}wws4mZ5g^KO-#FeS zmH{~d$p07~5SdkYys_;pu2T|58c8ERjO1V+S<1}nZ=+fKEV01F_!eE&Ik1-qQT|_8uX{&9Z2Sd)y0-&9hxWJPMMA{tgogs;GS0 zdAr5XmF-1&gj*J!s34OHV(m(!DRw&UX6{`ic#MplPtd>2PlrJFY@)YS)lyQXx5s!n&zPJ7I(kbI<^TnCD@9CYmR68I~Mvp2TL$^>}*- z#PPgAHMYc}LL65bFGRUIqk~z`Uz9CPUBYuj>>*&(Yi&XP3ZjW1{g;^txpZlUtFenT z6U$uqU3aEa>+L9I7E%kZ!??tcfoVqb3$otSTwr{4=`wv#G=g(1aZHiN$Mgeyrsx&3 zeRulA{yKUT-PN*4_~AFN!?=RSkWl$lk%|v~Uzk|LaO5Z|;QWb}PR)9b8Sz1(IMJ^{ zmO=}nX=UgMwuJBp7jZ5UwlR-#Y{FW!9lX#tozHVRF$xPX2h#~{u9s2?r=HG6sD$BA zs80kuY4vt;5*lR?G!Bnm=6zEv`(8sAcB}B?_t9(2blg<_7+s6gC-Dipl(iq`49MAZ zP53v|JCDt^`{u#9$g1o_hUAcrzB3Pw7zC2hzl9nxm0UXrD_VLtha;w}45qz3MQKO> zXY?FHU-fRv648?M0adj)tFOE=9rKN^oLA-lZlM+lPraApyvO; zCHkV6%b1)0hH&^LhHW1!=ZaZe1DdT8MQj)^fH^+^0FoDhPT|hfk0mLkLc{_tu*qSN z#5XefH)CShF!e0S904iBFzE70X#k}neOl9KEU#zv*P&&T`@EwvD<%D5+7A0MsCP)p z<*Vx{#5`9D`hB@SDD7p73~fd>_0dj7ID`@Y>n)!7uQt_xDXBjUIQ!a$fN|zmOkI?E z+dE^~)M_l;0bvOj5H;^Zol)*e$!Y6>_CZVRAuIMuakko}N^;xW5@}eEhI);gno(l6MuUeq z&^g?lQBmob_FGLB^g3$}EZKn_hB_WtPAEzLHoK>oujCdmy-i|E|5emD2 zx6w>4Lw?_$9YI<&$1>X`6bA@GAd`6Ww%wQh1Ginfo-dxW?{=sxs!yD|8 zVVmqT#@lzjZmi=t-#Qi0Nc(>Hq;P#2!X0Nl8LTI5C))Gg48}$GJk~ z0${G_4d8pzx<+H&8CP*uhesqA8fUoBl*mS=2-9U0iyHrpVNofa^xwn1bWuAJheoNy z0M|1TjxoT`V&87sm{-{k5JCHknQt>B!`+eGTr}J<7ps25 zTWjy_D_T56&et5|=s4B|)Zg=VgUmRXgz-Gsd!}>@=g{RyHSG6vhOmROuo$ADS6Z+> z>&{WFeXR#D0THy(P)`qm%od9`1;Kiui?NUF)Ha}$YV46IP9IUCDhV6@^klgBb zh*CO71`t?;Ralf$jyU7^BfwG%4aLSuoEEpZP&fJhZ|Wq?$KP}dFD>_cF& zy&4O$iPKYiXDl3rIEPE*)9rf;!@}o^g`w(8NW^o5cA`D5e}jqGNpy|$M@XP-nWu+C zvXt4w1mP_~B{sPE0!dYln{I(tY5g{lDH=Y17pPNUw+w}6Qxs0mZiT|z@g@s#s4o%+ zAA`ej-Y24fL*`)<{g@yUP%jGgKO%H1+_BX=n{Y?iEyvxVYmnbL8c-8%Hi%eP=0^B<4GkI_q?2?qD{V37G6#~@q%{w53()`!6o059VT_95wUc}gMtM!4^V z4+^R4lT&3?yvp?%Y`<>d<-quL`HAU%=jZ4-9H@nthnH*dE@a=Uk9b*G;kfvs_2_AL z{Md9234Zj*0yqLe7zpRlbLevp6k* zfa@j?zSKU)(-EY=>6u>R{`MbujQvkB^SK0aXyO{A{XVfL+lb`X80)5H9waN|+#q~` z$@L?d?f;laE+Ruh-bpehv=7B3bCDJMk+~Yhs&E;RCNXh2!^C|y*>5ne`@K62&Uars~X7-7we*%^CN33aT<~u|V|JRT+dN+!HcJe3vL@p~p z`0ZYSy|X_9V!y)JotqMSZ`}U6R$!Kx{s;+HfccwV0d|th^oep=0m5(73bZ@o=Fr4v z&IOEh>DU4W(Oz{*>$=w9N%rv(608C9H@ybzWcwCtKzQF8q&Ca{C!+o-AfSV3=5I#S zv;BQUa-XOtv=7C9F6v_~`*aWLnZIcwXD5$rLF9zr?iKi%h z$NcWyzRBr*Gf&6+xu42gdlpFTtRC|>qr%z#buOH!@PWizBeZYXZk-BGm+!N*Q*ZC` z-J}P2pl3OmziDP`3_fHAbgm~^~W%zuM~rbVeq{~L@@}z#Ta}o#^7nZgyr38 z6zYGD@m?_y4AB1)xlcsl^QhE8A@etbLbiW*3ltK5i&2;mA%9BawqQ^W!(1N*4`Oy5 z3^IQ+7-aizZh=ARH#zK@m9zWZ1Y4(MmTm%oNkE!hT_dCaI%?qj?5+MktmpaYzk(#n z@z)47W`o0!3-|@F5NE1`_%)p+0*ms7|74>PN6$C#E3N6xY4KddbQar<3R+X1$71h!=13Pkr`H~2 zQI4<`pTS`)%{H zIO@v5W}biJ2{@$aDb35dxC9>G?WA%2ZY$J`pHR+?-Z@$x-8p*q=-B9iyS`A|Q69>e YL#6AyF!CW@<>dQXWy0C-aJSz71@Ne|XaE2J literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/mobilenetv3.cpython-36.pyc b/timm/models/__pycache__/mobilenetv3.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8aefbd0a4b41890010d0bd70d04953b58c5ff4e3 GIT binary patch literal 16609 zcmdU0Yiu0Xb)K1hKe&8TqAbZ{S+-1Dd@Y}n70Hq<+Oj1^wPZQj#2GL5&aRf+m+uTI zkzJ-~Xf$aHxd95aXzRA<3vJS-b<_4offUWZG<`R10(8(H0s13RP^1C+FDTG{=iJ%Z zU6QUP#|qFT=kC4dzRrD|bG~!$>`YHja`L5zS9(68Y5%TueEJc829NWOp=mF(8>$ZHhN~mFk?Mimf$G8B!Rlykl*=S4hpG?d9;zPB z9p-eha-@1RceMI&?qN=+DrWVO+#}UvxntEwbB|V!=Z;q&%RR<<>B{5P6S)(J_lQi% z$UPxm7JZ`sO+7bekBMF}a7V}YpcoRvcMQZv#5HkU91sWZggNz!xFJTxA@R_iNJsfr zy~C(^gzFu(Gj^|d_)Z9=%#QdY9r0rw@kcx2$J_B8%^p*ac_61;&IXQCb+j_ zt55BOawkDKAu^z3e9E|ZRg8%zF`^0a5phzCBQ_~MDkj7vVpAd~ro=R2)AkARF_9HB zZ|XaGE{oL1#jKd))C^LuiFvWWsad2xAr{3cPR$`@iPPd~PR%3rx;P^~#Hj_O^5U#G z$Eii6R>U*nJf}_}RS*}%vz$7Oln~E}i=28IDO)Uw=Q(u-siJs6T;kM+kSd86#Y>zz zi_}eVSzO`NIi$*BS$vpN&mgrbu8JSy)OiuUrj4y_ebc;BUny7YW!p7x%p{ZN&8wT& z>#}guELR&9vtDfHkKdR%X%_0$M&2#2RBZEB*}Z9AUovmm<pF^jTZMTKIyP|jDl z`b08$HQ%u1Y4e($N1NqZ$t>1oyU80f=D2y&bsNs<$;rIDUEY|e%hF_i#hIKwH8nRe zmCer1CX+AZ3v0G8-FnG(Z`#sa*)%83XX}kkNj;m{sqD1*a@}#vS1Dc1*OGThM8|Y5 z*2qg2>>wJxTz1^$v9LFM>B{-%FD_raZasH#>HI5~uUl6xo?rHc_K>e#f6hA)P%kY% zd-3Y^OCMfV#YgtZK!v-y>bU3H8|${cWna8qD7*Gp%p1JOGh^3WO!bBJaz)tE>lJpP zE^Lb?#}anY8?{@dYPM^YYh~B+r`OBK`g%=Rg`4?W&8|3Jue62DZJTKvKjv{E1@=c1Ah={1Gxw?pnxJbO6Z0f&YVB@Ek%ijrU+Fd-_-ESd~aReni z`25E?FAkYd14t0(7TBtK$wDRXIOSs5mSagTv@R=NsnMdIb_xuX*9Bov(Qz$Hlx9%hJL0qAz(2ER2ODjY2-$3dRiP zkORcSM2}EGLq3WxFS>S%Ho@mLIgaEHUYZnnH=mu{THTza9p+44eD>KJ5R~ksgQd5s z5Tv?rCTYs+wdc<-PrBu5b&{9e!566n?7)qgiN>ZEre!~ZW}PGg&4}w^{Ka)SiPV^` z7>KQu3+s5cHd^vr!tPmLnSjQal(&4roNR2mq+Q0#)qKew_xqFBL3syEiu$curJfg) z8&eb0kJG$WTl_tc z1;o;g&&A5M}UR+TN6;j(KSg0QrBQzPa z*@cC9Knt3T_TKow=6>G^sKT;T^A(b#@x{4Q^LabF$o+rt!n><~2i;aIS^$y9$ct(d zJox!m{qp8pD9)a;vnxNE`BwdQ&iAQoHoveq3(X3p)#nZGyFK?KgL&`J-qIkm+VRYH z&jXTx3g~V$Sc(0})K{jnJ$cJJxN9kuD#lX2OAfVR?pvp0I`-6Uv$eawUfkTEspX>Zk^1l*gHO~i2H;1;$y)*?A%?{WzLTHP!dfl`2y#$9gzoZB z1H$-#n2;erb~0(=a{&fXnDv@j-84I)BLi!@3gg8f!pzr%$(W76^-85)SaVLA(DDwp z12CMq?gGhd*$!xS8+eaz%$QEYE&$9FxVsYp@HB7Qd2!;Tc?(F}HcOTI3eY^E8R`Jd zih9j9S8Sj_o7<$~a0_Z1^$Pj|!-N{^>#mtMi{%>XuH;=9{90XBxhF=Zoei{675oJn zy6-cz%+R~~1yq({9Al-seVER&22gZe5?ERG(Hy{0D8ErEP&R!A@eqy);c*_v>=Qg} zYCFcNv7_l)Gu{|+L#yHKcr(#35sPdmn#sl!Zge}@G@6pn>`2<&2%%oqi=`Z zWHZzZ19BS8mn&WZ2VpzxRN%ddE;3$#m@(P37|1gbL@^3sIwTh5M`;I64xd4P&MLfwQ` zYSUgATu)2dR$lU&4cD(mUZ)pT0_nwrCenTk9KidEmTPe6963xCQ%cq>k~OF*xJ_0S zK9{FF5E3))ChNtP;6S)`&5K_W)RDXC#fpj~M~E--AY>ZB*q|47>`Ku~G9&CKs)5D1 z=X}la66D*kry^e^HWJj6#dCLw+PO@@6$+LqAP-BvNCA$*H2Gl)UPs`?SPQ}FjB^7R zRxU%cN5iqnZ*^#?Eafd(7W*l3f~sFb@NqoO6ap=j(UbU#>rwg(#q}Yi(jfzsj6Oj0 zP#UojJ*^+qO=D~5O6vgQ#>_-(WxE_ha+aQ@_HziZ&*_0It{SD#w%*iNLx@9CA^R#0 z>0OQDiIqYvsvBQT>}Uv+pr=TxSJNsNw4SE^7DA!&R( zPjZD~WPjv!3IqZ#_8j&!ghkCMFW<*NhfhOF%!~5Qchr=Tf5-FY_2eDP2{zkLfUX=g zy$^FUq94__Mmy)Gt8_3yBp>wf1Vs_S(EP#Z$`#d_iQS!N$dhlY?w_LGNi}Ht)}hW` zyNeY_>(Yn5>EUi^i#16*k|u`bWi4kk4N3YpEDT?3MObS^n>bZn3(K=^46MdUU$V|p zxe3-);bpj`SJA3+npp=VdE>LFsf7rpU)of%NCArvi9yQ?r}Isr!1-@uIjIQw7aWyjg}5Cc(iB! zkI@Pp3!SA+K-*&#=65wBj5{Q@B%&B2#usW^de|ks;l<(C!@;SQ#$s{-xm+j0t`&~?gZO3S`O>9^I;3xPo=Hvk<*@HAP?Kr0GPQiwwk$7c z(TUc2h2m+;dSg9b@pEF9CF%tz!yf9I^q`a!I25=PF#kV}nAhX0&VmgG%2%JnEPAX_ zNmiuk43 zJ;fddy&2II4^Hw4ihb?M(%E$b7(n)?Js*#0qQjJY_vk z0pZN+?d@0SMVdZzD@4bUAw16a5bVO3Ngrb-n<*b-rkZKSm}$n9F<`?81UTZxfe7i& zsEsQVK#s_RwoEpY0j^BB=_cVyg(=ZmGt&%-=$*uNFT|&(*$dR#LY5h~x7oYe_gP?G zw9?N@isR8Y<1XRI{vG`}pg!ab0(1KK52<0amsktQ?^2o75qLc5J34Ud0iafG_275Oi^@$6`Jf@{EZoPcM7= z269qR3V>yzT(EspOCt4y8Q3f)uGq@ZS4f`W8w^PTXD`#v@6R` zqoNm8yBH`72ks1p(Q|ms-o)X%{27#xKZ{^&RFV|42r0F|#_AQK-JpOGDg&?ph_*oo z<4{QeLg{(hjq)`LK7qhX*nnj=p;#}2gKf-)#UmHD88do4tr;g4fe*=o(-b(8l#16w zx{GfJpwXPMeuWOmb(Wu{V26S-1*;U?!#eXg7)Vx7!;5ggzFNZD%2nNyaqV4PaHUez$_pru7TZ_wP zN!tc|LFq8|-ZM>Ol`XQa-JW+DP?Bi9_5^llXgkcV9@%XiGaR5EsLthfoi{GOsx@&} zft(mwI>1wHHAH|1P?S_dvKavsg@9gD4q9lrt-i+6yDk3DVHol)1YR`XXb>d5LlnZL z>Q)AO0KS?R74`-szzbJ!7vn`Y@|AV0a#XD{TS(qXg!eps#S4)oB{QS~UM4bEU&F%} zFyCGjOcMM1!Dm+>Tn^_zIu&c`ERgSOJaqf5cQh#`~q1}Q6cCWO8na<%QS z(6%F3IjCs?B`c4M&~`{9u|@`t25>OY#O^7ETzrpe%}`o{N>98UY8t<2yrpkPn<06_ zjWnaHQK;Rh6>UbLsZz^Jfh0Osy;j-0u5f6Flg8IYYhZ=}E!0`6XyhVmMidKaS3poDl^{wHHx*{iYZq))TlR<79uU{r90YpcslHyfmnDoQq&>F@Co12;d_Kc&koV!0&wV#0T6^dUeJh}z=9h^ zqZ#4lrRD73dgocb2_=*g?0JGOJQM!O*UNU1&-YH7=kDtyj0>N~{oiShnWleB%m)bN zMg=#Pr%gIcP;r7xumLN$O2-9%>qgnc0g1_;rUFB`Bf_!dw6!*4In$OsZ55{Htr{*5 zYjtO>-0()qvXz~+>{+Xjou%wfI;zdI=vzmq{EW3WXE`Vj`s@_k*Qtr@99J9YYHr>t zEKY3=wP|Q7J2BU$W?Ot^C(sni9-^`V74@g5vwr(3FX)=`=39MxeT5niW12=;I5j`D z1dEVtPnMTd_tdw_&P_RreMH$My3^*lf@-`8q7og3GFK?cP-dA*C%rIPPf2@G5_nKr ziVZs3@(j@kIV!78yBUrPnH_In?>frgp^_w7UOaehAg#npNw@r6DiNqSC0avP$*#4p z!mN!MOWyiV6lCP2rJ(GLuo1_@A%x3-^6O(|9XB$%{44#|U4fe^ALQh3skx#)KE4Ufx0o=`ReI!@713Mc3Y z2az6Xm#x*raL~sWI{Fwv8N7*v8l#Be9VBrW(tQN+qj(->>5kT>I(jlee?%NZjYm6b z97p`Ib`4i|4e>a+pl(E*xD(!vxlw2coU}q&hs6`fhmjX!&=ERH#i1h-N=E>3$HbFw zLsPI;h{Iqjtq_M+NHi1B3Qzt2(+U94bcI*OH3sfE{F@vvj4S;0+(27lK*CvJbc!%( z2@>J#?ACCXV9!jWNR=$Aa*%OoD|7@IKQ*80mhHKPdHF}! zwObvEt;J`DCfYjKX0FXL>8P#Ib`GgFpY5(JrQMkImiFOV7y+_S3!DXtPc;~1@j$w2 zb?U5B^L`dza(Eq+jaGTG_s(DUoD$FNc@E}^x}B1LKr?e63O6^nkHYns7__W3t^6XI z_D0&9qs1>xocsbZFkhvhnVOEv@F>C29ml>Na0vZT}BjZ z?AQYl{vk?21G8FZY_v)&zDU6?-JVi9(1+4hnnx6H0lJ&d%66FU9 zT>&w+9DhO?WIp7dQm~&!@86@*hoOiaa`ChTA)~mS43SzV#SXL`>Q1%uTWY^mqLq#t z1B|i^-xL^xxX2Xz1$qcfK^h5q*c6-u2k=s04gjAPMuCi>A{Su{UJwze_9)aU4jQ0v zak7LECrb$VRZ~*=q8|!-z*p3_QFCz3ke?Pqeob7UqO`B#xh9l6D2K%nsQ6Hz;?JU{ ziJD&)kD%r;^!cbbj&hHQ#}S?oPawoApa`F$7eG;S!tXQe>Ys+XXV5n(rf|*}Bj6{d zp~$mH#i8m$Vg_a^(KZ6~S}3dfIRb;L2^aue1`A(u_ZjHZ;YukqHzfy^#FS?h# zDs2K8IP^lSb^Q6d4+hP5eaSLAH8qcyET?8qanYqqAGvbTT*pglxDyioH8QSz^Cm!j zr3_`Cmz%+hp-#POE9MEt`uN{JNb_`ykI_6mFMkQUgGEc(hd`BgtE@n8wp5l<1W*-O zwiqqdtIENO>{6n$E&bZ5Hv)wi)LO_Y8j34ei{cAe1rP(Mi`J*JEr6)<<~o|+ou}$4 zqcgoY+fv}HuY!882)px8FE9ys(bcb)>+b@j(=)TPo-Xbl#4HBGQ8tcEsiL79HJtq> zYp*ft9DOj4dt2XsVivmwINursCUQF4o!2=r*{SZlU}9(#yYjjwrp@23iJ6_n*xGS$~3g=sUMAZwXo~CeDUVtfJg0qXQso%Y~7P5<_B7eSj4(E?# z_`H-dha~Q+f0%%*kvp4{trIrgU(P{CE$uA z;=na^z07jmy@a>T`K=aPbT~u91 z&34s)e`uXdDqZAi(F`3o7-Xl!yv-p9Ci z3&$Lq@!4w&2NC7pBDh!fsKVC+_DDF0@&T|nk7oAF9uei=A-GresKUQ?u}75s**k`u zCSDT4afmMTGxQ6Ih`{;uE;SR~t!D5_Q_Zp)j$A+kdsrr-{09V3J@kG6pBlZV&Y@c0 zBEI8@Xz*F$xWM5K_UHX^lxp#fi&4A|!u-eZV%={3h^IF={-Ax zi1MEhw8n8yx|3>sw>^$H3GFsRml?->SBAN(+$MlX9d6%pmXAh@q#P^}0K7<}RIHx*GI{4ma- zmp!XNMEMN__caWvmDw{4qCWUxdcMl>S#idQas^^-SU@0Bl70p}2X@l;=9Qq4>RWLuF5NaYK|J ziks(6`3%}<%?-uxl^ZHM(Z!8l`v^4>Qn$-c5gNxpAOqA6?xBRfgzfWr)Y|)vto!=E zz*pFK@fSZubMels!+ww##e2;-;gNKqEq?_8UT>pc&`s28`~s;r*h)@tO}x^C7mt(e zxZI>l3HsgI1pO$m{o6rFciA#V*@MCB|M=O2rQTZR>nLxiJ4gL+RlQ-7qUv<^r*5<| z6k}(NP5*0@?5AJ=ffu`JS8&1T$U!0vQSb%@!xWI1s8^~FQfw4~7gs+Am7DSqCGn~p z{c_0SA05iyB2x4Uzg5RK)#{R(oI%DM?Ch`rDHn*m1jf*k042jVN#KZ42AgQMpps1u zD{5AAB-lzaSs5fut8dg9|vk4s(_qR_7fAbTOR5wQKS?f^4^fu1c5K;eTO^l~B|A-H%d(Y+9mb8FG~GTn%bg{)^gft7 zLs??SO&Ym%nnY>)D`*g)ZIHG})Aute`XhayzuG1U`lFa%2Kpl<&;l*cUqOTR`_7%2 zoqb56V*&Y-CFjhUGiUDHnS1Vc&OP_e*+L<^_S~th54R(ce~W~E1@KQ{vtEiuB4(r> zF{7r@jrNSX5hWk%#_BQI$9EI;gmB5-#BQpdHsfZZo9SiiS!l-1WH;B#*YmwXz0fPx zi@llpjL1{n+1^}z4t#niS}&OyGiyF-o-vE&jJayAnR8~zoX7R$FXTGW`uw{h=7PES zTEtvzCw9v>V)X?`mds^HmOaTLB*)AZNLD<_5+ujXdmy>TlPp7W!aNDdNl$VNl2c{{ zl8Pr;f#hEEK1lBKB*!6nn|VJZ_j{6iAbG$%4asRwasrYE%`_xwPjb>+H>>8uuSM&p z+H+>je8kMW7QGRzSIpRpk?O|5XDXXbYqM>ixmbDV`KH=wSF@FV)9SbFW|xPRPH(T< z?zQ`N)9wuVl^xaG+G?AXZ8hjsE;Y?|zx|+9*;8#>H9LKr=nc$v*UDC|*!G_F_O-R0 zj(uf+Yqd4#t!eeOq4F86yt;SY9;nup>>(S!8g)~9O=WkQ-OWQZJO$;g{Z7|xhWjIA+ssu68?n=U2DYT1%ck^g#M~*1}hwY1STp z;=;z`=T`UZD~B;W&*5`;s>2+(o&WjXzg#;!{hv>|sjW_HAKP~z#qW(LJO2*VgDjtB z9WSqc)y<*mcB6}z+*H+^8{1c1H{0p&?b{8jbI^7(d{oV*TWYY^*yCI2^znRqgRZom z8}GH7eK%&=rkm^U_Zls}j<)4yw>t`j%YCvUly~IjyUne3w=vk>w%T?}*G<#7cVL|c z(~i`m=%0p}dOfm@erd*E%b;f_%;am4di?cBJW@}<+R|KIPUi96`=HxrPha5n82rolVkj6sqy zBe-8;vl?@AFSk`6&miw8Y1yh{wrOCd+3D}>{O?!(``ho^df~~P&;J^KUw^V{xUu%0 z<)(WB8vTB+nspP_{$5+D6R4G&ZZtamj@@XuM!)aIKhS>ugKn(ZvK1e|jq8?rx@otr zYz|ZpmS=4Bt4TL*wY%GHuG4R{t~C3okLBhEXgU8#ox#dELBYK&7 z0A=5g&B}sAqQz)7YD7!XgHq5gtN!z~465MAJ-18PtouNm$Zo^|hP;aQ*^LIA;c-#C z`6%AJdI-9zO7aLu)liRO=SHuob;^X)C`oM()GJM8Qp-w%L{KgM9n1}@<>{alT8KIc zg4c@H=r?<9v_`hUPQ2eGpKmliy5H>TGxE&pEcD%i_exvsZnx1GR-+nKYb=ztkyhqY zTD0)AQA~-Xg@fQX-S(4W`>;W=efXad+fTVOFSY?|&8=>mJz&F>XBe@6KD*fe+rf<6 zKde5gt=@^k?vt?cxE*6(@a#U$-r?JQe9WNSOJjEwakCB`wk#|NL zQx`~rTZ~v1-!5z5TUJ`$_Yo{BEtW-#BC00M)QDwe+}Wp9v){TBRN8B0_L0jif+~MB zt^i;^frC*4bCPP&G5YAluM$qfV4Io(wy7}KQlDUd6M)dj0-#d_XfqW6bZ!Em^J1v! zO|(n}brH~;2F8kAQN0c1C17F-?ylQxwZ)371#Pr!Jlz1N$vp_-CVSz=Q|a@qkN=!gW%;6D6r9 zNm!=3Nb)qvC6GJypckNH6+wLL_7Hq|;&J@06dMBhWuVCS)1lb%tfAkb@`{opbf`Oq zDVNZkFQQ%74{0zOseRPvK0xa4;$W;J8R!JWW#TiUlU?BQE=MUCyWAv3C+!p^B1OF7 z#BMN#NsCnLCMab;7nIM~Su>5131wL+A$LpNJalu^HS^FeNOV*PY0LeBl8c^pG^9Nf zs$Ir}$C292PSjyeI&g7Qcg3jm;==^q#-Tw%q(P5W&tmV!dd+K!a1R094FaaCJC_k@ z&9w&meH#H)tGjQu8|YIy`YGw2eHpsavk{9kSoI?(-Q^h2c`+HIu14)zP1@g`#aLsj z(P3P_-srZsZQf@nQJtMDwqgatnzaedT0=C$N*fbO8=g|=4reE?J2!FNIjvNUYlwM2 zpLCOi=rA2OeiCP_6%gV^5`T-vVsynQVJmaP(LpIN+)3Cd;Yv0TyR-Er5Llp&<6yCf z;|xFqty04Vtj&opMP8)^$K)I$4%jk*B;^3&aV`x>Le6EtAuh+ctdnuFIG2M24giw8 zlXudP6d*~9qzEn}*UdOZX9nkBU%NRc?c|(-GrJQvqc@CK=bSl2l7?qtc}0j(KSuHa zk{3x{BKaW6he&>$HLlg%(lv~BhDIS~GCwNQ&0iQ1 zj$Y>(&B%4W*^FE_c0bRKMy~TMY2>;McSOfGtr4BGqjmCZZR9%7+T4-r&bf(e4U9pC z(TWa$8O98-NvsbzHwfkFG3pgzs^bs+D40IMadDM{Z3JM z#;6hk0{nBPbC-L#BSGj?h>D;)blZZ~hnAt^vJ zDuGh$1~ORQnH0{jM|gf>B_bsz%=FC!wStmnpfiJ#SoW;BcmsVz%Ajm|={$4i(&tbv zJPZ6rUdGNPd!7=J76K1Y4oQoFbirOkrfinS=A68PrKx($&?`~TOh9v9{L-Vc-Gw&7;#Z8dnI zX=3atad2U~({Fa+c2USyUz}$MVCf-`fUBbh} z)E42?u#Gsi+R&-7oAS4WEpTf8iUXK!ByeRqIl+!)IB~cl?aauDGqV$LS_vn)gKl|) z?vH^PMmyl>+R{!6Tn26_0~eKrBqQh8rL%G_4~LcaTw6}gu_Nc@T;SRuDL7e_1c&An zpk0EbC=zzG8M&_P%sXYATaY@z2|6Wb!E=O*&SJj9C34E@npv01cP@ka)HlCe1Ek+hGR=9g-#zF&Gv z!*}-5Ut&GPB^kwvqU3J4q{~dc(l2S3lti)oai75^E=f(eBqV~dGQd$iG>3SXTRIZerwZezqxg|Lm7fQJ8w%Wpb``gA8^^WV_&6^Xw{a_H zp*~6#K?#eTO|nlSpz#Vh_9_h@l1P~rV2dYUlpP)mofT*phTL8z(93aK8aTWhw|^w* zZ_67W6?FdFJG{-n=bN~*Of%4Hlsu1d((@Q60R%{&$X15WC>sZ*pg&Fei$#>h$GOBZ zzYPNG4*LsYzxEg8#9!E$T;ya!;3eog^k7H(3wjCdFOuL!{RJi3UyvL17rd^B0Z(D(`N0CRX(l|z+RNq&yxXGuOras+2UKQKCQ@eFFrH{8}O{09oL z=n4I-)8fQ%#dipF56c*=3~l4CIE9bn-v5NncM2yIC3nFoWI27*>`wOzv|#ZH-vTo_ zQ1PY&faSnILokrj1fHh|WSmAL<{~lWA#4f~gV-EM(w=0flbO)T+Bx_PdLcwoNLVqI zm7gLjOpz76It=eP<7I_)bR0?z>!>?GOQLT8m(^Jfho8i|7{VbYfSDAcol0g%jQ!3C zr^d|Qo8I^Y%f3PK8p$t^JOeU4QdZKE9`2N!QV4hQieb_n@-?IHc^JA@=Z8R}ev#$8X;@LO^RhRCBPZBk z_u(qRk+HyX3_(hv#=9#JLc7hd0$yBMQk2{U2q}J`ZSJ?YYJ(`UJ{epFF!Ba&_!eOV z4pKrl!U%zdIczOS+Zkq@d5oL}Ojo9eMs;$~$xz44LM!LNh!0VINd-?U8q)GXBo)Bc z%*6d>1y=H2C^rEs@4?gGW<);vDeVx#~_BU_$Y(u?+a(1pw%qn4ih+o zfQYDburz7{lW4Z);4xV?gPL*R=}>mtDCT)K5=h$-G_l2Zlk;Tfm^ZlwYGc?~Gs&B_kM=fq*}Yv3IcOFp5Pn3$jE8 zQy5qx!%R{UOJowJOip0wYmy6jAxU8|Zy9?msEJ4F8TgMSl|@kp%i)!um50t(o~oQ% zKf6(_9C8_^B>4|N0!8Vkhu@e%Zoh)V?_iUkHm6_j^441X2?x$?wpf&)BQ%P1s^i+B|hL_ETJ~vxaS5#t4;7 z^~>n4hYYHv*E8*|d;RUxxtFV{@imb5^2!Q`o9!c))95y@x0Rb|?(KE2H?TepIgH_o zkA%8ais6t^(qWo}`Hw@65)WrVxV+{^u~#=i+=R^CV7+rnFO{*}jPi$nZc;CeJ^V5> z50+aP4eA-5nyf5meqiic#>fl6hnJj8+=+)T+*(v!rv`k#F}+{ zA28@YtJFX#j$f2Pa+>5}lFyS|261!4$6c2ho3NxN={h$lI_om<_6};^e#6_J_4eny z{bLd#yR+kO%R7DE+h6eZk9+$k+?3p_w(c`EMK?E55a!slBIejMbJl0h`OITJbKYkz z$nr-$Q5-ir_y8d9y;Ur-!bpK;p%2@`O?zw9Eom9`n~rl`Wnw-mYi#u(s`47H9`7YlIEjJQ9I6iX6QN0#p2>!SahU*+M zN)#bqN??jxrZTWLT%(#q7J_O9YxV2Ut|pZ9Ihuhqs%}8Bf!NRnv~;ZMuylL_5~}?7 zSb2g|1PzH-c4FiK#hP=9yC_}2vtmgPKG%oZoB-hE0J;)Qd87#%Ya~1}3 z_ee4slJjKXlmyQ_L<)2EDezpDhS_^34PMXRBeVgpXJm51GXsiQd?yDvbD()-Kl9)j zL?I9A6u>h#frV;L5j;bv8RSD}z;o#g=4hN*@SN43!(0q`&g+*jS5pFC29LRYXCC|l z__DL$l)*28UvL(k1@KGY7o8<%5&SavC1)A$1FILbNiAzQ`x4%o`YjT6B=tKaM0E8P zlHVoyD#`DW{5}a2I_eKe{)ps{N&bZ7Pf7lay#dV z)cpklHF6p{18ykW2xMmi?p(kumwaqtYzo)5SS0rz;oJyFki_pPlD+1iv6Ys1qU zL-y>DJvU?@8?xtz?1i!A2=1(xaB2AN%0LIUHt=3#Z2}<82NLYV{P+=8)s6L;@sfi+ zLx>2v{ZlxAjZFOn2ZM&V+TeP80)s#TXMtk!sg9nr)K(SSl*k4SIxQdW;aey~Fn^1p zb?5&gI5&FPP0{Ugk>KC)lmLbPj^~!&Qryee!AQlQlPmK<%Ol|6=d3d7Gc18t!><}cdi=yuG2W9( zy7`cnrTz+XhI?0=s?+S-ZWdEr-Zz+T@@0HawPOvRSXOqr#fJA4r2anfVYU<>KO(-A zU*V{4;i!eTAwNDfZ!b?>0b@tD9=}mStI1_jG5wAC}yfWUD>X-C9p~`#sIrnr5uthN6d3sWK&1 zxn91RvZdM*yNuk}3%Fx;S!RQoo?(C?U?6r`h}b29Kg_Zb3oHS#zyd+C!4AQXK>Z65GxzE%N*T)KD^&^EN@|>-W*N+yC)+Y)R^<#x&^~u6y z{dnQHl;u!1RhW|dA>2Pvc%uGf;mP`^3ZIg^eC?_F(}kx^!?UW6_sp(YIN|<;JK`R? zXBM9Ip4~NimpkenerS5nx?|Dr5&pX4AEXM;T{GOH?!-OAo$#{N=Xb5b3&=U+V^1L7qO1r#IY}+;j5u89ZHZKka@-o?gb&MfYX*6?u9EPnXqN z)lJ+qW-gbGB@Dw^^8CPaon=*O_-jp7_mne@yyY3E(s16|UT&)LMlN@ud+Su{t(sT& z8iCVXb1tl}D{s9NR2u6CbmTiF-*G*^tSYN`MWwNr%bom~CMRv)&>&@nR&6`!8@0Bpq`G$~VHt3OK&f%NxdX@E! zz|U=z?s(3s=QZ%U#vMs+XLTObz(bF*1eUT%*f zV`;_=)2)&UKw;b_8jYDmc=XL17p`4dy0ToneC6tex34c3Z(O;s6plZVeRKKp9%PJx zmdC*)7?h`qjV35t_KItpjdD$%!xbb7F*{tC)8#IfJU=u8kRwMDSWpJTU=6v{vjbr3-eIA$p9Fh)Y*hWf>X={Uj>|7q{)=T8d$^3f?=cxcvW^RX|LyfKy;>=6;#zKx zwm99Q$NA2+RI?h|8(zts849gURReK0>w3PPALc5J)@D%jD{U{#avJ(JQwD*Gt#S|! zR~jIFt69T7Z8pNuwaOMK+tL|vjCc4Q?LfO1e zBEkI>e!|qPyCt>mf5%WS;PD5S=iO3JT9|KFx3MW82Ij9^y7ZIawuO0rQ+dU@2d;Ph zc@nwVxOQP_KB&~|^FnH0_91poXpJZZOf`+?cC#xe`u4QC8|{Yq z)^qtKM^KSEQqt-0B^LNh7ot?cCywsYFw)uQ@OK)Iqnb}uX%jeFX zIeT`l6>Q9yGTCWo`9UnQ1D_iV!0W z!kpa7u%c`>2=o1f3WvK-x?Wt!s%0);_m8~8KmQaG&nTEcB0wNOAKClHn#mn>FAEHk zb~Dh1$@_+F6jInhxiEVn2p~bhG3~F3s#u0Nqtvf&LdMl9w>)QM?oP!=g<`)DkDIMH ztDAwd>3hC&s@5!*Y89x8Q;zS|)}SJyzMa)=r&KO`EfJ)p26Pwpmi0QC{*_bu*GS`+TZR(AUquoayGGUAHB6(E=orOB$Lu7g zjGa^`<=RIeRf<+%SCbuJw2nD$bS&3;XdyR++;j(+1NRwSqhs!}hpdz&r6lJXpjmP$ zk}%(UT{bZUo4r~o`Q6-q2&t0vH=&5aOc7$C5)_MAVn`T@93N#MlBYQ2Fm=7sfSwGk z+qdq7X^Pxx|E-H%H9&SiHJ(-5HcAqWL$3i>q-7m;Y-> zI%d^)*WO9&SUdJkvIFvibY?Yy1+Y4Ry~wdT%(teD-Bgu8IN@3l?O@8?6qlx&bQ8Pg zyVg#6C$p37q&t~vilTluF^VOtrpJw)Tqjq}xH&id(8Rq_&31CTyvud6qXzn2jrV`> z;fEjY;rdWr#p6sK+`?h0SMU_30bk@v);*2v`exvTsm;b}r9oXwkkx8JF@Hv?`^`0A zm@Um0KY=V^j+r6#Sr(gHp?NVhmqT+YG%x8ln72doR%qS{ZL*-|7b**3nlKGO1(oD} z0XhCS62l%zESRIhHaY7A*eGYV5BHcSsy}0^GpKx3okc1M*cf|*kOX@pS0q^3tnofy zEZ*KM)uNJgvFJ9-U|h1R`bi|=aKz1JuU6}FG)D+Ma)FhEyXSHDIxe3?&e*m&GNhO{ zAT!g*;GfSOGaxfHdjflSkQKO6Yw12InvSGsMx@Ki7$u(y4J)=#r=(z_x~3?7#bFu)5cvl92I7qHy$$ ztI-`upA%wh`9ac)$Zp`KPh@AgKOuAzfknaHu{)^__Vq(ZkD}d4yNO%2dL>A9(z|9O z6{M=^U5fNfr`s+YV6S(qaf7lUz;1_p$e;)7p^bWZ5#RO_*pVAZkwbbiGal)T9S&6* z#WjFnz$!mHv{7+guP-B8DhEZOM`%+>&kU=V*cF*HOcErY6>d{+GnX(+3;WEQ=q9KH z+hLZ?iom2By8c3F0aGf9*3hcgTG192k)VZq7P&=5z^RA5!d^w*lc!aYEpvQzG_VoY zj|MJkK<|!tFUw#nXyp?!8wu{*adQf3-fSP=&&`i&0j?h9)+JoBfgoq02|#zMTh32{w?qG?P>%bfsiALbo+Ox=4VPDX-KGB%!&zPbi60@KNMb zDN&Pg8tutPrqGo|IrR?esXxZ#k25JS;e0~-+Lh~XhgsQWOQ2;>?5ID%NP{mL%u`DR~K&!&@*y6y

    >MlDn(1Sjs-=GrQjwCl2E+BgD6eOrbx-ANH2+cT82G0bjwo> zFEV;yt-3Dcg<8Nnz?x7dTyZQ3cW#*S166^M+hkt4w+j?eMQDrFW0Nb@VacF1g_Rcd z!#!4pD8ak@Fb~)hKdB8?&fwL>V7mpw@gUy)Db(p(a_XHfsUGE4s@W7Sj@_>!35Z%u zH+39l=7a&c0#fZJ%x->qf4*=txtn|p*(8XAu-e`QXpHa&2pb^|0L&O?WZFHME;z&6 zYI$Y4IUvHt`yo2rnZ5|*aN7CYY3H4uJQivzf`;~4Q4+DYr`dorh^HOcWFD9rY%Y010f*c(e8RU29e9-#?8@TaNam zISIr_rQaTnN#EUNrtmA?wr{h=iu`dqs*abm`6BL_j5(50%mI|miJlXCJjSEk_S_1x zws&xY73#yI0rWwKYiweT6SBtG8gY)SI%|lLfPDO>WwjH~JtnZI2^^W)VXv}3cN2my zQyrjEH!Tw%va5V`|^6xPZU9vgXsHpuT20VKrNGnef}^SVZs6OhyE`+G55MlFAx>Z>aTsAv9^@_iI5Et~JSuD3@1vDAVNxUJ82)AOZ`^#+R68h%c_fR6 zm`5o2_wz`Qz^=BalW337G_=77I~mlofkPy(j}vHXfS05U)=D22Snc1Fu>l=5HOTs8 zT(AuDf?NO}BT?Qm4HKA-tH54Z&!}tqVVA^YfdudxWpDs&6|qF5hP;w?jXcSA`M+i% z_?aMM+&)&~M$?9GN-wa7jL@zV&Dj2J6{y}~XZJ0(`U2~kMZMfvSz&RsrJ_Yo9=P5) z*ORGW#lMLTv;azvz*Nm+wI@w=54mMr@gG;5e;)3JrGL8uTOL~yWn7=q0Zw~ajIPUI^o`|pJuuKvG`iT| z$l@;{vW3h407*}yYfnaCRiV9M7!yoc4@B`mqo)CjjaylDImmR<59|l2&LKA;nmyad zp-oO8ayA%x00f|_`nQw3mGG-U9!k&rvWZ&5_l^7J{ltCizI{J=KXpHSKeLvliGD8= zjDWq(%YfAHW7`xD30_Do$v%g+psrSTk+d)0pg@7ALL>@I5xCE{9U@&=A&?Y6uh3P< zhur|yroAqx;HX}$LAc=iRG*zFf$CX3FS5N z+_A;*G3ae17z_v};&N$AK#|xb$0pk^#zI@;Lmy5l?3II2Apzk&l?(Vy{HBo{m7mtI zU*X+fWI_@7ap>+JqmDKRL8?&*SDJrD?;Rk*qX2GBtMY-!Ev5dVvl7Gd zgX6X<@#6jnfmkYT9T*ZkbrEyl`xz#1Y9j$liZ^YK8bg?1kBVfEk+ARM27k~p(OsPN zz1NpS--K4_&U%?bm48p*;yq=E{{TJrl+M;0?&M|TORwx42@+5tM~)jiY#E z?IzS{knW7bsI;L zcxa9vr*avhkT2#tBdxCn!@)@P5M6B#6FVcFL#=P&d35JcX9zV81Hl``F?xyO@%%2F z`dRcMRvpIlI~Wsu#9xNGWWH=l3wA&jWxtQIAt|Ht|A?Cp#;Zp;+9O7F0)A(+dQ8;j zBvtK{5ge~hVe3w^pIzD#=;JG<{~7c$EDH3AgX+QCO_Te{;8QGx5>K){k5-6L{uFwA zs`@l);Nq|De~J`v){UK`JZ^E%z{s9)vz_7K1ifCnR`pr_QXTJ1xMMrV-2Beu*89P8 zSPk?*1dCPc>>P*39W>pU>Kv^;AH2XaKI8XETn-Q|pVE>}b&kP5g}ff8F>-!ae%XS~p(?Gh(%-c8;NB;#Fg3@+;=;-^9v~ zRHr*f1uo2V03X~b(hc)E6}*VIV+G-pA9IgAwBBWi;|0(L>CIi0s87A>*?>dEMCTz`vIA07L^JGvn@#D?g? zf+^mRGccSuG1B! z0tmMekea5gKD*MTji&~NJBV9*ukNjN{~!jM4no{sQ$J{8d*C2U87Ghc>16l=EqmVQ zMQ6Ivn7bh{9ZZ-72n)CM`sqLd7xrUl9(&pMb%*@Qo&LVy@%YhJvFlM>bl|sKbe7>< zvWAQ4{-8Lz#Q^2}4o9lFA_`K#)M{yT zLfNs@grU{*IWcS2x$Lc#Hfw=%r&Qa-j$3O=@CcA+dG?~SrkdCuJpY9wOu}y6^IvlV z!<}_jdV*=?w6nrZEMkKXT0E>EklNg=xj58?Y10pdmF58;Uwgcwu&FfF;&NsGORloX z2I82}FiC|tn1zU}Y`qI-G3#yZLG@_7_|FWCRZshXQzajNLT$d{`ykT%Lfm1P>&;sm zHYq*Z_V5?A7uH$9#HQP$GtOr}OOH*Y3Rc8UUVr%CfAw4c>o+ox7~+Go&Vx6;*rD_sv9abo-Jt2}NLIV7}U=D5wq$pHM)b?nVK zmo~ieEvMH7)~@e8TC()7&s7{4%XD8D=6ZFJ8>v*W@DL!znJB1}0V|&1@+JW$VX9d1 zd4jPjPHJd-UA%jsOez4w`vHuUKZBaub0l_iY&@$!kAmj_gSFLcw4Whnf2_fbGk4lO zk=D#MhY&|vbk7E?V|cbUMW}`;9fB4>%|jD%@JqrEmijh_Bewis=iSwAGBe*Z#>130 z)^U6cURggV!7UdKm9fpV)b@eASIc2W8`xz}{TzGwc_uV-6rp&S=fL8?V?MfrM1i;j zI@e(;RpZ?)CbWPN_KUeq{Y~c5DH^8W5TN;Puhl9o^|zV-RVIIm$xk!!n1~C3c33#1 zmsR*&ABd9-&sV?5;tXUFf(w|GNs=qay}+HB{uVf4nF)1l&J48k@@_6-tAihlB>_NBd!2C5WCh)(4jB!Ez;+T_4xVk85EDBV0__(3jdY3PsLn&%G;YrU+^2&KOt=J&;-D=9D&i5$ zEroWYwy`Q!<~m*~f9pRr{>#_?Q~MWQ*Zay+Cy*VQrO;gECX-W2#8BAt{{$t{SY5&0 zOiu3&vMXFw&2`xzvLurgD)CJ;~*%Jws8EZ0yQR#8OVN!-vA1Y-?nR6#!pd(%XNA!9AYjktr8 zxI`GT(FSa*FOBZtuwKW6?QDL-`g!fZOte!n(z$kSuIIp`80-p1VLOqFlAJ`*53=Wd zPdgSb_KC=oGK)qtB1_Tr%BAf-*J8H~V!GVTS}u|I!O#*49DGZ7XkS-vbl+tp+T#bk zE1o}z#!FZ-g&UBHkuya9O_2s4@y3yL55eL1H1g&6g=YJRyl}9*OohWg{*vDEzUOd5 z2V5~)BEB0;Y(_RH{Q$Nwa)P7zJE)C7wMZFCF7>NSC@=Itqaz7YF1h=NG>Wk7z@V>j z(44b@J)AJxM`hsqYsI^ltiswd^hMmraCJ~QI$$TEXn4O7mW;iSa9r>y@+(ryg1pK0 zQ}8AB0*#j*2{Ln0_PY~p^$(c)>r8wmZz6e&{}9KZ_2IBse_iL4GiYU4a3dy8bSJ&P z-oT{e^*WDsA2n&ja&5K1fCyJqL>NvGKD2}+aI@--_i@vXg%te$CJw;E9Kl|1tizM~ zJ9xQXZPBpi)i50yOzPjVh^p%`w2(6?Z*%%0Q8<148#mi|S@nnj-?7x!*xA>ae1HTQ z7kPh%$v1Qc89*=1H*q&Jw$CxBqx=7!l|m-}fyrNH@+~CG>aXxtlD~?(CG7*$QuePQ z1DsA!h97x4^wB=u-(W`qEUaoJ2ve1HSaqKIm(2cGO#U?!p&Q-gkBjdAC6xagT;h|* zd6@)+0{)E@a_Lb!W95>WtYV$-H!9r%ioo59H#=&;+r9oI>B>#)>KBDVJ1Qaj6MI;a}e=q)+C z<(qN4%*VD{h}sRIzdWwtHAD;c_b%@m;dsRv#$!O)c7tB&<$h}t5t_)N79-^n4p1u( zm<(PLPrlxXzbg{kanAVy!)|wRI6E?`eCL2u-T1VIc9Sspfx27$QxIAG7Lp~=)4gQo zkovnUvzWY$1i_#H-@5qx1Vwaqn9`a#w5e0WERF{eCkI7+x+o|SgzlW!Hs@zwOwPDAB*VIFzx3sOt@v;1NvNao|jV- z&-+^7aRIb^!vY$LfaxlXH=FI%zra&Cj6JXp1GI?)Ys$y5`*aaVaUDTWa2TZQHMaAZ z^sJtHe55W@{|*UHdUn6%@#M_NOe#$A2z7lGmVx}@!6jC^)bArNW=W#Wm?gi62aPzvAh4q)9FW*r z>?DBP7j^(e+Fr!m4((gswlCZerhygVqEJFyob;uP zikLr4(om=&Ox+Ec7p2_+qY1|kdi#4Eo@jg#FcYxU)c?d(uEy$$=q8Qp^SJzTNIHpD zj!J=YB;axZqpl%V6j+b}IDYXk5#XTa2RNJbi;4Hm9lOI|ab`*UNBX4ICG@fk1T(}F zl=upuQpSi0qXAoyUqoE95~)v)gCb+TxH|QKICiQ1cRW*hJb`l z!06SVsAUrBL)49#jf@u4@-!X-W6f~$?l2;3Sy0Rrih-)`Ar}#+3eJ;D4rmD)jhgt^_SsxU8#4R*~F{nR40>Z_eC|W$o80-caBxql6rebq! zfbTM&6U8ZYb|yp%^TL`%{XyVCB$Vb~Bjp5fM67N3p#X40G?hOnwFf z5Rtt{rrDFD?=n#Vua8jd6e@Ko1`G!5zy#=w6g$eoBs1{AJTo4l7@L0-#T0PbfxM>J z>f0#vIYh&rAeor=hfSvCePmMc40~ki%}>vc$R9KPHh%j2jW6;sVyw6mmDW^w44I@i zQ?dCcClg!!(mpaV?_e@fmeHx?CeG`9h$uJW*FyvQ@UT1l$UYovUw>pDo_5}f1xHWG z02J!4!v(Vpe5g*St78a22ec9HVmQ4$_(?Z1g-Ms*y^W0jEyIttk=gu{)0?e+jr7+0 zHvF`ra$vTP;BhA^rwRTT2AAGU#pa)!;B58X zM+nZ`9~Z&grLV{hk#^We~Ew?s;@&Q@3YDgIUG5M=ydmgm{6+e zZ+~)q8Dbe^`Stu9Ke_LHnI8_VZN>gAMf4G-H@q4Gv=JD}GApHzvZ@>&+X2SSSn9=`l1huV>@G%RVdR=T8?UVVhtT@f&hb&Irkl?!h%*aT|`P7p09+Cs#RY%9g(=|gLR+XS*@&I6L* zo6Xla58p*%<6tHK1@)wfz|sTaAb1M0Rkk$8un#?u!ntTfcFNN8U`}J02-DF4D$#u58n5ks;;hfOrjWv_{X$e zr_Z@{>fUp|yPtcjHPq6Q=)2;~k=}bX?T1?6(+_?dF6(!jugVh zNGei{rlKK+M+*(bSSlubL!q%4PsNK(sV0$Qg+#GA)hv9tkW3{do|f^}R4cf~!h&L3 zs;$_bYKKN#H5EFF3sVb)PZTA5lu1KvY zo{>7ExH7e}cxLL%qMp)=PfI-wx_?luYQeEk>gmQ=#?w`s`fv6B)F0I%)iv7yd9nJ_ zogt;+3aRB@*a}EtToGJRb*2}pLu$Yk!_}zH@F2QvvuFHJw3n5*O>k3>~`k^d^t8fkDy1FiO4?>=S>l(FHZIgOL3-;pL zhwEBDlrda~>v~+ztP6b>LQ=S%T^D)-LT<$MoVw67LiXdz)I!1So(&^E%*zO_EG~sw zxm8^%`E2nuPMTAf&7Pk++lXAEsms+B$24`t(eTkw>Kvq$10H+OrdU_Tkl*XTv(AjG z8`X2buLfUG`&9;fH`*QRPf=xOL&BAOuIzJ@K6k+9OrNuaW30B%P5InGpF8Aphkfpd z&rSQ>bA7G?yGlTkanA(!=LPti0{qPZ{`mp^1p)qOfIp_nYEm6A&Qb@|8Oodu!^nCso6#ezeSDknMZ}+`e(v#J3%;0<9|sm$=&XAFM{-9HH{oId=}Ro zzVs4Ecly#xA^nX=EajI;4CMaS=U(n}cc~i;_9d@S$7YeQ*@!{SyS>m?)`h;x3w?E6 z=sjL;tIxjXIQg|TS?c9^sFk;5P6SZ{!CACq#6ghtxa{6+07xHwsdZl`mdbPSo zy+*xOy>50vY7nv9tL{TA_jxh1hr3U`UcCWvtTotA|DAfHdegBG;$J87>(!gpTSQ(j z@*C9s>a8Mg5c!SjZR+hJZxs1W>K*ExB5xA;&FWq1-6C%m`7P=_>b)XgAoBg{ed_N; z-Xii_)%(>4M7~hux2X@R2SmO|gTFNnNbvXi7x@A84fRctuMqh|>VK>MBl49Ze^~va`j*I7 ziTn}uZS@_IheiIV`X}Xxe6`3QQ~#{KEAk$ZKd$~oeNW_Pi2Molef5yY*NFU}`d9S> zk@t%HN%ceZBa!!s{3-Qs>c=8qEApq+Pt;FEzE0%Ns5$lTB401^XVuTt&qaQw$e&Zc zP`?!USt5U4{fBy3U+Vvge51%;R=-t`i2NLp zzoLGpelK!bh4*T!zZ%lC%#XvGrVp3RVy2Lv&e-{KN#AW6hCXcARt8*|Hk#325WX)njanO*%OHU%8WT`^z6-M3Wk1JF*9l) z?reFql$Y#ZpSQ=#Q+7QlkDKaXZ~yv%4Fie9)wx_gn@2`>8pdQXUsC$z+n=dtfz4*$ z=20XuHPV|c7yFc_?vz=;yiud81(d_ zV6JCOL(k}k%H{-N1H_|<#ag2qhk$aGm|d?)k6l*!CgSF z+lR{42(7Z7%b05*B4!p-xoGf7DPvt3b|zo2^r=aeu@OnN98JBLv9n`(xr#wG>w2kt z2!))n^}M~x(oLgaWGq7=LaACT*g!4oxp%belvu+s#iGucAMvYduo-YBT< zkkfp%sSMLlJM&o^Dh>OlCJTlWzj~5YB2$1y<7CFPQLebV=(4M}U$Sd>*S_@5UAwnm zd*#0LRlBwiJDraT-n(z-92%L1#}}K%Xr4uxE|r0O)<~o1rV0jbb&gEs3yQK)NFPGg zNN01S2ygWtkz3tOPU{X@orlrprA9_M@ttT)douQz)3W{G=$>-9FsSxsiiWd5D!1Be zs~zRi!9nFLJT5@AT7X^}Ei_%o95GBMQ5AC~Coa5gmz>sOX2M9T{K34H9|0y>mO0`F zJ%`qe>k3@fN{|Xyg$dQH8dS{IY_6KHw!0@$3EhSMraL*D|7k?i=5T3q50Ny2jDGbi z{QP{Y(>RjPPT`tP0H-2aN9o|ebmJH@+MCOEC!ENXS#X+5Q^m9to?$tOd}(sZPFwkD z!)cmC_FXof1uW^wtnIYqOMrZ`T#)|8i587a$%$CDauT_`Y1wHe;v^(KfwengE`d?z z(kkf=o6E?PfRB^M(wO`Td^xd+L+DMc`!#bF#NY4gW91s`n;t*X$2M>E?b@;9I#j^H zK5NQEp`*T*mDR_hEtf9YKHO*Li^V=EV9Ragy^}|rhO|N*I2$oqNf0d@4#h%I{ELUo zZphQcYWElDO3XuEjqjU0Vwcf_^yJyO^>~qcjn3**je5# zF)bZUI@jN`zJG0QQ+6oRJ86&2#gL`|Gxs34ISp07FSqUnLBc$fI;;lH0^0zzwX!jF zgg)se5A@`+>(=(?HV)Ecx71jSbK7X`nJ@g=Gagv^i>*((8G}6=H*MUoZqwk9Ajb+b zx1SaqxG?eU*H8WxEEwuh8`fquLn3Q{BBh+QvQXTt`^G%r>PpB<+B4dcCf|{)!4N)c3q8KUt>2|)wBxz zPBPfm>ZE}>X|PTja*|RAZpA<>0SV6qBs>?8>YrO%cMnjCCJT0OeLz@Ojh}b0#&cSr zXJN+fX$D2;wg&D}n=&%#tJlfUUl6>H2ePQe{-*)lHeA*>K@1IhAzCVYt!6}wsQWJ$ zU8>#GaGV@dp;V(0zo}7$Q%y$VrY03hH5*BTy3OEQJiY~dEBFM$Tfr|d+HP8)qN#Q~ z+pZc?9r*50vD89*FI0`GP9u6#r;4W*;d_y4N_82FZ|cJL5_~Vg_fmW>#rHCNFH?!s zay+*j@(Rc+@O=ip&rr>&m9S~0if~@;Gz{l<m(256_!~!tbNL+lN>4AcELce zhQ%JLBNiAr1IujG7AKY+IhrXJGnndf8RaA`=6f=u z6xP|0Y$aN0V-oqZCi0VcxEP$|v|*O$yi>SX|S<`tAw~! zry`YbB~po28dP+)X(m>Qp`tVlJMj&NH>A1Jn%jowybAI4t=&!LVaVnYk_yT5NM1;C z6Um(d(7#7wjj1t6`%c@6QIv?0$_{1$@cd)!oRl`>7-me%6k{%N%(g}3cmrIg7`Xw&S|JFb2o$g2`+pd+UhadHYYlY`eeQap;=Tf`49unum#sk!Jsl> zr*x@SiFRs7Lo@M8qtYgt%oUZ!QXIYkR54)OXom%3AkGS>VwHHMp%Q~`<81RxQ>6(g zHw_EG9(WIJMeVr<@xzY;-x+%qW!?vQUw3l=fm$ULECt5fp=Am*_0`dQEpA}te~9;M zfCfv&kHl$W-;gd&73||GwHx!F!<(}w;Qc8iVR0FtMcP7LQfcGB-F!1b_GQst<-Hg+q!_32(*X61ujqTiX(7 z&f*0to0wn9Q*UB4e$9|(DRsh$V6IW^+>TSana@beWSVmuNhV2tDkN01iGAtZBP4Ce z9bx-J4Er%ly zDt4?T6=kP)OiMM~icuuR-ap~QFX3pnJ!_jU!^1yeG+ZC5B+3r)BACV4l}K@oQI(;&N1yKD+mBnL^JOTyfl zY${HRm-(zwC{zJhg&Q#O%Ty9DJ_U|TeTx~4hdN^>L!d&KeFRvDRFJ}CX{hL}+O45n zn0@%MrkQYs-ra`b-M(iSQF2_=7UGiL&em?zZUbgOj5AIk6%H0vHCX{KY<>xkxwmPt znCdj9vnD!TV@&{}Gg<87FqhibyQNUZo{n`wAROW zeia!l)D||s0gWs!|3gUlv3f!LmL#($Ba#C|XiI2S$C}WV*rtMQsnKcK2`?<>i`Zs> z>t4F!;cTNssA*h8$8ZVa9LeZSuV_b6+t`FEQ5BkiU%C=0g%!q7jGLi}sMRasQS%0j zp1gmtCtqwwN>MvH-heSUuEMy>5u{-@iiQ^}C2*(lC~9>YlRVUG#E5?}xf@rFD6RPP zZr$5n(0!}9qnEL`+q7!uDsOL2KbSG|8SFLn>U-V28rfD-dDs2kE4ei9M}GG0<~~f; z$QNYW!)ec!r%H4gXA4tGrqF1=G0gY#${5Dv^0Rox}b z$;(DVu2gMFejOFt23cuZ5xW*?#gwWov@FyaPK2hreB@Z(C+=og+&get%%5~K=w#Si zO80`^F>E@xV}1w3>rBkAGC)kI*})FrqCh)YRG)UWKY3Ia!YN0s3o-wpF6wG_rQ={x z3{;y(JlxeRg;Uo&7#TknhuvE9NJUhuG!4w9rD-HpOI_1g;Iwc%nw!uUqdiYd_tfo9 z_sH&a{noQSLd9O$7FJ=9no27LQ8C_B)T*e zMr$PB*{MMLw!AEz*wpjb+zF#C)wS?a^I^RoXFM=97)3m||;f zth7n1jaS<7{6ZBv%7L`0(k5-218f3!7UE6}F*H; z3W3>f0tG1!3b@tT%*}rx`5wvlLH3~{bDWXwEQ}1kiW3uGf;Ebp`1tM)cP>G{fy_v{ zm$|)>47FWQG^at}8^E5pd6Y66hm%l#|L#OFt_(O0vY{!$pex`DcLqn`Wk7d>?UKyo z1Ko?HJ2d~9k$jiL&%Fx}vtrJXypTlZwzrbIjpTNcJ4kLOk<8vnj<`ClzCB#_8ct&| z5B~=oHEeIy-eVQjr3)c`m#_=D7>pK8(325fCS7J6Q!@MuFTqs5D;&e(z7^}zE`%jR zOTxU@7E0nf9$FFV5N-uJ*t%Zh(H(4c2D1aHi6_sU$Fc5>htMuDEuBFN8b@uyDs?qE z)=UY*R23X&dKfdu(HF4ZiHx=g?Hdq^wl&@uxD%H<2NBwYE6zKTiZ-5r+#JwE3qu_D zdYZ|AW(#>qN7|kFU?n_`9%H=qXb7_a+S7*KC$gB@;j2Rkg!gmDRCT?(Z-2j5QRT$3}-R2Q}&4lADiWR zHXgvtDH7!*JCZ<|RWFajut;yi^rHHbs}4`?UQ=d!9yWi-?B^SX7~hkD*WNS18kon0Zj4-h( zXcHPD%|&OL)XJHJYOBV+3@ND&i~>lZbg5b>@!X|4E2vlA^X`+8nWj<)?Fqz&Fm>ik z!nIRkTI9xLM=Iz=Rr?Zcrup#QcGPa*n8kCyJGC44+z_5g;%b>`lX#-o`#*T?Z^V`R;E`v#%)K{k`h$e*&s^a$55DRB zf0U4$9dAmZ?=N^dt$CMus1k8!8~5rP7H(tAH}~B0-}2PGkGwxDA^YF?Avd0T@4xwX zTe}xHaV(9hD;+pv;d6pV_6>N}N#JnZ#1U+dBDA?s#^NPES}f<4CzNwI6?a&C2~Go* z){7Tq3W&OT;u%8+4@I8Ybh*jGl;uS2Old5OH2hD@Q5-9TkQ#u{y5-j*1hk18|Rr9uqS4 z-)`YxqLnrCBRCp}h3RGqeNvC1&hscL)o`HEwda%^M}4dVu>Mh0CcAGeZ@C93^-R8K zxo0^K^2lV_9+Pvo!@IAMv%NhRAFiIX#KPV^;^_h@r*nCz7e_aFs7<#@`N>I~NA)vB z4W0)^Gw!ZB1KeY0fg-`F)&lxI7L6ZcuUmBcaEKEpSmhLJ&N!s2d@hF&90kT%S7w8s z^YrQ-p=e^`ucX|=ry~ZQ@D3>QEa*lOW#lsO((9`y(3$J*d>G~<4Y!-M^s&t3QH-Wh2o zZyFHcJQ0@_0!kJd6`_P-#qvQx9Or3?P~DkL@wV8o+0qL*+Z#MC?We^I+JeMPqt~q~h5UqpOdZNtrB$|`p@mrtOVv{2 ziC!d8a97(*5E-OWEp1J9qlAo72IoiRurQ9m>cAe!xG2C_ z@2DzsS!=|Z5dgAAADOc2vgkgVmrSC{jRg?)GJ!Jmi^nRI4;eVhEMrw3IYzS8`h`=+ zh}=E3jPmx+Egzr5>MBY{#42yO8L8E=H8|F+TZZAbud&QQp2o(w$fL>1#PF2QR?7wF z>o3`JtzI;6vigWVg(KXoSU9g(o*Es)Vb;2gVk(T%lT*`383#8FxYo8$;(YgEePho+ z|2csNbsVI(+)|n2Fm>Xg~J80u%Lj}^S;n#Q#Jl~wZjyIZHY^?O{Cd_+7;|X)zZl(@qlc3n4ip+** zn(QQY_d??>*isJ9#L)}1@-9|5a;K^*8V#7`Qg?y*20LlDkvkg7F{So9WW|rUEatf5 z3nw1{?Nl-BGls?&VKo#Q?}9g`*A!`wG1UW|Pz0S`rTrmNBhuTBH*9jf#+- zt8lHZ;9U23H&zRA_;8rwQj~M+Y{zJ51MKzU4x9UkS%qbFmP#(vaId4%KC`&8d7O`j9ik z1cali1M3+rJzx*YlVNztu;-%|icf3k@Vg?+7f|L~%11GS2fu}00cd~M*WM!9Tf8_glsGrb z-HYVe#j^6Mf7-pzwB+eKBL_BLuZgE@#qjjd9(fHz$0`CZXq0*|7GR8q^E@ytu?q?e zV5_IY-M#2SK7-x@F8TfiW-I$zVlg8fQ zOT!0npu*jkf#=^Y7x1zKmPFy+{u8;lf1lVw1-z9qdC_k;qi6InZF+TJ(&)Q@YWqU9 zhh*`?nKnKTZ9P@CHnZJkLG?pzmlW+B6x%DSL z2NwfS1K}G9c+*5q40$d}?vco5gB*Wy{e8093g@z(wZQSYa;*EkkZHe2~fgUQ*Ncxl90<+?4!V=3ixJJS-k{P+NNmfZqz zk1+Jo7}vY%mIkW6*-9g#d=CYu4&i2%S9Rbq;QJ>QOvC&Qw#BZL)?>azwB@8%j3>RU zv=eiuTkt-}+iYn#i}M+rE!#n_uW}YlU}lbK7{KOhcfNCn|%w=~9>vghnkk55n(An<9WLE~R7WsBH-EB4r5dX6i zDmkIziJij_AsOpch)Kh`n@-;qvR?1RW^4k^;FaN(a1SrTdi`8kxpVa%#aev@*649? z@lZz?>v^o>-IYDvkZpJ(jC)<8OMOZVupj8}fa$hm9W& zBG!4gNl0%%Q_*9ImC{FCPTk||ZNss=I*Ts~SiB-=@$riOQ3H14yu@anP$6R60mV2W zwtF0$^&S`TI1kmR$zDXNs40#4LR#(~yg1Q7;FBajC{bmT>fjaTG9WhvAGyY3Cv~Y9=O>IDe`l_h9qo72!JwTXYz{ zP^Tk@;{;gFJq301*(ArcfQ8<{6Y0ruO4UF|I?U4JhQ5aGgC{JxyT#Y}%>9VL z?<-jAqqqbg4zDY4EcEgL$#RzFqBUgn02QuFdRLqoiF)$07JD~0>Dr{Szy+mw!mB&0EYZL zfT7w;0>Dsyg1{W}hwGU4@8su^cdFeN$UEhe z&HLa}nfI#$c_%-Qyi@J1fxJ^b*}M-um3iM2$UFIYh$>x3SQz~TJ)3sh5{K?NF?^OF*An%k< zHt!pr%DnFjeKh;ue|oD#NgTh2IS{~0o5J~U_kj~ zF&H=u@h5!cS=Yav$&J18c`3Yiht2aNmvVWuwgsYAO zewh~p`WYo-NgfF`Law%%o_vQCqIwuQ*E`M{RWBR31{5mjDs{`0!If*(@k3c$r0Qq?!K(&nl z1StO{2-HuH9v^|hQ;WcUm|tBWk)Hg(C@evq0wFqQjeiZ@o^FV-V zmj)1^`~(qr>xqu;R}Y={yGy1WPvVb7b6@f?wlAwc7%khEBLRfS&jTTvJQ9hLjd5+!F1)uC@9BcjSO6jN^FWAdw*(NPdGQ*hYRHwo&aZ0oy1)LEFxGOx&uK%Pa!0sP?Xaag$dgpH*D`>l8nBK5B+y<@o}_xS7L^zG25OK%dn0+0s`e%Z zkW>RUGk^rz|AU}gaZVn=3y3C372YijAgKmi$N-Z6!Q02=uZ8#HxA}`$IwUBaC#L=Q zJw&Y&m`@K;>jc8)c3MSJtwgoX#%uGnX8lB4@#EVa#N<)h7t=J7s%bkIKmr7Jk|(L& ztVQM7ee#$*ey_QUsguBxN7?Fw`jFbSYje2jYg zx}$+NBBi(HK!?!#@#X%AkKiJsVASo*MD1<7FC`Im)dY~YGgL;NsQC`^GH|_VBUE*c!3d{TaglOF2>g|Uqr*tUO(0U_si>hc+HzX0@>{U zSwq7z!?4oCtX{*9UzH05{^ksRX~v8)B!<_?@w&165f1mSXf)-fO88S4<$~qD#r<2V z#pRbIdrKwncfI&mnR=N_t*&->+ z9~!jWw@Viw#B3vJC+Q$rNYY8Nh@^|8o8$!~H-qy>A@)nXCaMy`l<^Gu4 zrx@~Fl5dcFljH{^^1AtYM#Pu?P2g5*k)t4M}Pt|r+-@(hw| zNcNKKBe|C3I+E*2o=Ng7k`&3aNqCOH-H?9{Iqs>N`$;k+BP3Z8MM5vPDZXGlYDjYfTu(efUHKX`ML!PCspO{)IQU`AjEl=j*X9ZHsm+ zOU9E4EtE_oo5JoLd3_cd_%X`5e-SswK8^5ii7>zXfyzfw{8TCb(un*M9={ETH^#f< ykF#_&bToIwI@*(=#1+YI+V$uE#iJcJX$?uB+!}upuSUdu{57BSVtDSj>;D60R|!)9 literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/pit.cpython-36.pyc b/timm/models/__pycache__/pit.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20192c3a5800726bc08c2bf538ca7847d73534ce GIT binary patch literal 12341 zcmcgyTZ|l6TCS?D?yj!xxp+Lb$BE-q9LIJi_KZEbIa#k0$FY+r&TJIVCeTVKdivD# zbWh)Ws>YsadPJbTvUicAfDj<9K@s8scz{KS2gGuR1tj2sCsYpz!~=}PO1vPXeE`1i zRCP`F#Fse2+pYTRoWIWXKmWbz*|9Nu?(N5yzCW*N|EdlB@+iNG%m1#fX+kS$LKj9u zFX`%TlnmTcja1VtnL3}DjdU|p%BXU>k!@NfOO=gAu9U;`Oe5E{OSZyg8~J9TRA`Qs z#!$CJt}))6C`~jcOOvW@H+D6rN>k0YPsR7$@qIeJ?-5@WGq()1*e}k;rANdoap_Ux~(odBh1?F+#0OJ620GP*sc|sfl=1`0|2+Wh>FffN>%;Ug3C5`}dB*r-6s5o|A zD?K63iKoRgSotAuk2o$)T-W+~=}DAl#hfZ1M){;TrOHpCJTIPA1 zUR32{D4!NDsd5qJm&I39`DtNX){3u{KgJO{@3z~GTB~|usqA~gdB5h@+AU{MmRtUE zTQ)uE9DBF6SafPF=iQCPwydn!_PIgPsWm$duj#b{r@egNk$k7@JHqoTvbKb&YAyTd zdtR_oYpr5_=W?eU)XEL#^%}@lBEvs|oC4X-I z)X8UOPrdN$$$8seT&ej^r7b+?8fGnvawou?%d*{cf)&qc%UZS8!gNqs^C#y|I`6f8-}!*Oo8^|hMcG;?>Y*h(ufu^+&UVU@#6U5x-=)QB|+(J10IrQbl*SB!I8v0Bxt<@UBZGspL_Zl|m zR+g*bfi||*C~vsldf-Wr!WG_fd94xfDSC|sa;;TyTVMh_3v)r+oe$!*j7zUt^U*CD zl*@dr(XOn9T7`4+XW%N}dK;JT0Q9I0Y@rELq(vss0$pS&`sD9Dl4wn1P9@ST317J#Vd*9&tUuz6IpDze>iJCz_DtF^F)PP?IwEu37gt$V^9>SZ>)aw|;v zfe7v8n)E>%PHC$daNeFQrsO1+Dt8S)(U2LIvj8Yh;}&LCua#xh{|!w#sQ%#moG1t7 z`MGX=V~!l^&s{iw{(bPq{G7igJ+}#=XbXRiL~ggI~$?NV>*c8 zd{S963_XLtsav`{gnG9!5RoBnme*pgo9k=@6y+1OX1VH}h)0%Q!-Ep==S2HjtI;lt zxvM8Nw)dx&$b`|gXUPjRm9(nH&j@18#S(nFqO z$vr)kG-#o;H;M9_ZycC^Y{h{r8 zd#=-`qEmc$b1fzNI{jQE0aj>3J}49O^Up}0zUZ$SqwQTIyMr*>n zZBRvDH%rzH&D2UcDCcZAaoM}F21^gKq)bxxEl1f{D37&<=XmR2J>Lo1&b78&b;`26 z))J^f%h2F=0w>t$(9YL4fs{Syt&orbmO z3&m8J_SZU|l*iF1v|K1{P}_ysGJUVH9GcX(FiSZ%vIefJRIhvj&E+h>>$rT{C{6Fq z?AY1t&~W;Cn2}zvCR-KN8XdSmUgvT7j{vMs_b}c^g-xS}W%l(={YvHot*5J{s(E20 z=Fm{WnT9opC>AZqA)2cSvv3+};&w%w`)-w+p6iCTOG}J% zK)K+$SJujn_(|4vMZ1FibJvp05vJ9tgqcP7S#2qwCvGezRK7Lk0aW}c+#9j!ls&*ECWG+<6KUTR^4Q^zq66|B!=t)lTqPjBjh zb~AO;yqOl-P2;9^Q}5~f!NY-3PjPqz6RBI)&l*=u?8;oM-unLUefHnq`_5UgYRc>Q zMJr5+S~JYijiJ`|!(6k`aU~s;(7LjE)m^HU{m>L}VZvNF2wHAbBy`~$t%No`HW!W^ zoD)-WjgnN6X@D?yiTDMyfT$R=Cqunf%!K2~K%B`7$CJ_Aj%+W5IaODWa?1098f=DX zwLnRR*cPrI*k>r&@SQtnzm6wL#xi#I*wFbU@-=$~PGh;@pG9HNsc6d+9Pz6Jif7fr z>0pDsFQHkOQJaH*w=EfA8Uahy3o}b}LqQ&|Zl*TP&GcrbXVl?C@7Ff7z=N6kdY#d>(MMZyIJM6*F{xX? z^t3v7o!Y1e6Ckv27((l1zhO}1IC6yRW%)$m&~7kZJ=06|OmKdtZ>o{dYBeW+9b;l{ z$cHglUk`oB$_tX4*rJXE0bk|Kf{tD(cRXk|IM{*LLJYyMveWiG`7&CBDa1>XToPub zr`k#$NYNV7mhiQGH)yXySB3g(p?*Hp-wgE+N(F^;8(yo56LrvSn+cUlqf+u`05tf- ziLB#F|50*p=e=aeRlv>qGjZ8d3y6Xy}1ya~k- zNA+R4>&doXA$Re|J9Q;Bf=>zfyq@Iw_MoixX@JHffZzo3RF>M4S~KDR)W8+nXD@#Q z!xhtEeihLq0!iOrWoL?W@*Q1fj7O&^D}PSMQH z?4Vqtq%edMR+5ZA-pS_Yarp-TAQTW-C1_fm!l48=77X|*hFnD}*#sEz19AyxZ}UyG zMl#e-;)>h=Fsp5J_w8KYfMInhIP(7sr}6-1QGi1To6feTMdFT`! z_A*MLN#h8`tqe$;&y={5>1CaO2Z87G#lk$8r?OIRfmi*oK<-Iuc9diw2XBvlXI8q2 zp<-`s91c-;@5ojYCQ4L2IVa&B8jT_Di5y1Eqn2rGE5SSQ$1P5*)CSzd?w-+U2fUHF zqx!m^z9Qvr42HM#LRu0t!UcmP^gqOt0T(dH5PK9a(1FX9Oy+m4Yb6zI)GcIx@+$wB z69q8_ye;;M2{DO`XI|_QQ@|9&ZZVDfnBtysaL>MQCe~$9DsV_=cYp1^DM#CO@ViUC z^2=o()rcJR3uGbLY>P=Pl_a{j?YAm-0i@f5i}@QR0s$CfR0M>+IUb}U<$}9XErCh9 zPCq~xz42)cdKxx=zcvF+{ir5V`{225PQWN^om)r3aR-`xHr}w;TXaudk zqg_9xMwmjI-MxY^Z_!Jiu4ic-7f4(`jelwT}_A5Lv zAFXfh7TV@?Z#Qt6K5W*EwyJ%q%j>}-*y(ProDeHVjg*QoQE3|V6xmxT^xqXc8n;hx z_v&SP)4f73-8Tw@bwfk6|ANL5VR%r*Y0Ea>BkvNw4{K9{8p=m;9qa3HK)q{0>R>`;!Id9}a76En#5!cq%eYiju40`6 z9m~4b*P>`f=_vHe1bCg`2e5i?aY*ItIxH!mOz^v?{tm7vHqg4y-%Y?1^?AoYL07sN zR^Sd!{7_&eU%^z>pP2rwH~yvj^RGa=(q?b{FMe_ZrEO3wMusCGRv8XTW@t7URi&33 z?Y0bagHXbcA_2-TA}&O0k&7yQcBa7zbMKZVf^#I8BWITpihvIc_cuykE3F#lhZ5Sd z#g5t65zE0Ig%+DK^a?G!q47KAiiceE8X_;h)2IcKac-D`2du1g6frSG4-3lAh7rK) z8(+Rjl*y;xMsVJM&EgFGLk%biYR1K9zUUx~B8#XXS) zwQ9|(a`U2HE2pc+t{)J5jo>u`MIe4WzRD!D(xJmuhj&c zeA-&X#~ld*1jt)Q!NMIqykA07{~~~vqK9YUe?&iGFh!n$&zI2;>j#a)^!|)P@c-xo z8v1eE_v(+p-#Vbrqz)j{r%c3I|p3-ujOE?J-z&i{OTJ;Ti9Crq4dL8~u zpMp84XBfuC;+TrWk@UpxKVL!e&BklF7B=0gc#6B?L*=Q2xe8u6;Zfkq4%&yA=$IgZ z%Fof8k?1*i7P*&n9sUMa?6(vc3mv)p28WM6(FxQSi628Jj{eySLqV+mW7HKd$4Vjc zWE^4@pcV)xT5UI4DPBBP${T+VWR#IJy2tLinjM|)ydF*h1&5#)LF-{FF@#SJ&>%1lozdc5sj{v5QlN;GJK>sH^E#*6pX<-D#Bfb zr-Z{tCIopB*x^A<%qZ#DI&zOg)*>f=iikeJ#{^1wXT0?e^t}90VyLj> zHwm5u7?4<*#2+KpNq~}w$=GdAH1t2=xjzZORPZEp(-iKu?ilwV@y;>sOyU)cIZ9$h zpc2U(IhSxyq&$37Ir4`)Y|FBfqcm%kT4APr9T zVUDt|e;bVP%PA-%bD@i`3X(5N{8_~k-OMb9okd1Eg=xaHN(!lr_?nx^qp_cUr)IaeZ_kHTWO4K9!zCmP!6D>KBv4 zBhBb5zKzR&0U*lo3Mg|qjqf8sy`RRb14Mohu;o;MXbmqD(js*mZ<>D5_yh;si|87T z4ZJ$H-D+%f&x}-^Ry)}KREM9z;QN%%e_7+GefvC!{Lr@`csq$VV3be1ZA=IZnx$am z!oquN@En`o1u5|^DThrc1`P{CBlv3HIDqWcvaG>V!Hd#lNclMX{5FA;0HqJ}Zbc%y z=ec+fnt}$2RUuRC1{^xNa`@WeyXcf~VhRV5*{g)dUJs7ki71g`=p?XX3e+v3@6(Ijk9V<4$SW{k2$J(hO( zz(?5#Zzsu5YROP&f53L!eOOSGccaflc921`>jbw?eL!w9@TW&h5?F?M{v`;I1tIQF z0Qu*bQ~nvi)=8F_9%!@d@XS3n%jZ9Y&2BlE=Rr4n z7=0dSv+VHFJvPhdBb&8>P!fF>g)bt}!oH37REbRIht9UJApa1f$Uh?ZV*>K<5KEKS z)$uRFalW@uf6AuM06ft4+2IXte;S2RdFDr4kZ88}5(F5M=z9|&k?A`zcSMiB!VsVUYlH)GLmXh=N4me@wu=wCZFcbPxox)i;cWY_G1Cq? z|AN~-_MqElhv1&uRzt=vwlNfvevz_%vG|NsZg`hl`b_}7FU04$%ua;4WqiXL&}mg4 zK_EM+K0;F8!9?zQS~cSbmb3gSbNl1XFuUS4@QINhIW;zie~%zfK${m@(Pyx7LyoaJ zbya;l?GCxP(f1dJ*yHyJE)yJQ<4p8HLzK0v5ZxxAbFaK5Wh9ktSC&xH0!V5`r5z+C zM3P@5sW)Uh9af(JfU69z0^dxW&7tQ1IDna^(@@~wz}=WcP)WD6kg~_9dPc9PHVO;o WRDP;BwF|IfjoXF7c>JFz;QxQGadDjh literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/pnasnet.cpython-36.pyc b/timm/models/__pycache__/pnasnet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1929f0fbc732669346f240c50249e04b0b14e283 GIT binary patch literal 10870 zcmb_iNsJ^{TF!{beXFdjUaA*&XSdh1-IZ0<-FCMz<6g#X36IM?ZsUPuAUY~vR905! zQhpKDOOg%Dbfe)?Xbx;Mn0?WV;J^VPv7QicMjTK`NR~JZbpV9K34!4I-iwILtSnF4 z7FiX4{PD;C_J4c-i}R&YzVY_y^$%Z5B>p)u@hbtpg3G&PBoZRgObA1mU883Tyw5hX;ymk z&3Q%7cB{RG<^u3s+h`sVc~KB`@uHX$74edISyaV>IE3zcepqvOP$3QtLo&x2mka8T9Q{przr$fpKP-@}~C}-Rg zsP`nE&x)t<{Iq(03eV4o9G-JwiBsa7cuqY3*l3=17sQfyLF6ABkBnwbByT0^jp0A# zYukOt>%0ENt|QxSt<&4?x;?k=JAP--ueGJKzV3?Jh8*;2*B#;Z-RHg9wsd{zbozMF z8wj`S7JO1X*`ciAqYv84`G0f)=dZ+7ejB7uq zYhT)a;16VLv$5VCtT%d&=eu%(OEWdXTR0q)$>;YdF?Ay-yeox^UVX9C^6{A2cBJ2N zx@&t79=<=czSHRnTh(CSLuc*QM!TL2a@Pm_yVv?xTYgYt$9>msfwC&%$L+zOi^uus zZP)R<&PKv}m8t#nus?^#@&A`ez4;U|1!lz3!rnU4}i z!nZ~S{;W}Qlo}br*iDZTc2ZGIVUAM5+D(Fz5(zYvT!Rc1?zpm#&a25ud%o-lml6nl z>9>P)TMl-%J;`y@Ov*v|R<;M2 z;t2{WNP2E}BPewGc5Bn=qYqwC9AKHEC*9haxJe_(*?K8#`@4z%daEHE-&t)8w;nWF z&~mTw=Jo3ztS+yvHoP6_+CA6b90(6m*oNM}b#<-bcY3{9HI&byg%@ynMSz4+HcTU} z;Bax@IxI!QY?)-mk9S_zae0dXqr_HX1Pk^Mi?W5KP?&+qHr6nIlmhuY0B|-L829DN zq$zNYl+4CJ-gBf_#)GF;-N@r_cyxbXLcY2oPlFZniACr;J=eB_yiJwf=@Kv6_J=!8 zSHDqXl?}F23g@!rcDq=&xEHAJCA^pHj9Ogw+2i_W&2m;zK81v5m4s8WDdB06nUe5a zP`M_Zerq!xla{a>YA7wr5Py_&;!4(sK&EAVl#)kAX5Snc4>P{Cm3)NBw^M!^@}E%} zkcRxHK;zr&C_{zYHSwH_x%1%8jLZ>~a1?PD6gRXb-&w!; z2F$4CM*2~b&!Cg?0s)08F#G)=`JVfw9}ZGZ%ePmwvJ|{XyX}>raLw^sn`;BvgPG2* z_ielcS@Ew16RfN8+tqqj36HF^p%)2i1ngaqgF%PE!lEiY4d!|sAs`5mQbo%f3!Lid zB^31z0Z{yl5I57P8kO)qJhp$ao}dR;P|tySkl4avg}MPl6=KO4gUOoIm$YQRW3oXGUY_3IT3>ZiYl^ZD zXpQ$+Q?B4)T2o?q*V+Mjr+=8zjcc`4 z4ZM=JTvB+RBw$|wk4GaF3D|Ic+^I;W%<*!4PLbmLuCb}m2xlZIpF*Ub6lN^kz$y(S zq9XC0D|T9pkF{K}CH{D42&OFXcYu7NTv=ObA6pmNPFhX|HM`aZLLEN`dko6)1mX`% zWOkDqX4`xOMTaHMgP-NHxEDm}k%1DQwhk+-qlmI4w2%{dW!=k4e#>h?#<|;Ou%{Ma zk6!vlXu}Z?4|e)KVwP5SN4Pek9kuFsT?3o8oEE72MSkyp@smFUo_bQ<^x~4Bu=<=< zi8o15_99jZC0jxMmiyrjb}xwTC3^(5zT3X3f^@Ae>1_w${YsE)x^i&S5%3C{w5oNe zw9&yH20aEB)G4`Noha>*L7G;g+jtY(_&h?Ks!=o3u=czUPaYr(vx=2M9h3>Gh)3gm zfMUVJhGyOZX8Fmj6g_MkEBgrR{xCV>*3Mkh6PDIG_YUfk?-EeggOu7&1y;-P>nS}% zTKOPzUyXbvLRJF{5#!htkBxsH?4ilfX8(vWJaNz%BR!46sLy5fx!ym)7DE*+OH!s( zl&gJ(EOfK+P5`?A*m+A>49A1)b@=UT*fO=KaQrRcqH$Lf{wF}<3FqYM_%cazel+nA zp$;aVQ;eX0%Ticoga8NcvY;dtB?k-(0mg&L_(*?~Wzl_XvTsQ?&<)TTLDz`da zE}WIAa?!j@l{-IOE}W>Ta^X}3Q{^rMsr&YJk+mz$npE0ZSTvKM zLX{6knzA%m3-+`7)z{;y#i5$URG{_&UfgA?RLra?`8=htkq`WATlXA0b_+51@@$00 z2!@L79XDn@DV10lh{{a0Lr?k$hUquyNkL7kojiQyR@TpnjGq@-csxdNd3sb)@C%AR ztO1lFc#Ei^B+9!cd~W0~OQ;z(7|B7FuBaVSdAdG$=DCoIJEMrJ64G?Ly5xLB1DEc3 zb*8Qb@Nje}H!+q&$V#vuNPNaE99F)2KJ*O}-gu9mN4^E{KGwL#hcg6k5YXo}vpomS z0iHClfXNbQp|@A>$5GvkPiNp<-yiJOk7!Tsvw>#`7<2_$Mzi+H{S_6~OGbe56@s@3 zC`yt6q4MR*Vap%kRH^x;9<^TtpKE)zivMP4nPjuO>@XN9k`GvW5|`C+0*? z7W!!ck+K(*Stzb41EAwZLuvJ7z%F+%w=;SyY2#~wMPlH=e*=Mr`-$OZ1X_HFkz#bG;-y&0B zX@$G-IrrKWf4CRZGv?+5CWnH{0coT>6^D2P@^->8O>_(Zu!O>em6mh(j zq8|n&%|kidVLXy>TCj@?k~{1@o@6LF-K*h|!&ExPf`UI!J}nj@6hQWinvb}gP7?$YKI{31Big`{XnS&oul z2CacUsu5CkoyR~0g;!kX@dIclFn1?a%K0h( z6y_$y^U_@l_#DqkL;NsLNJD&)XQLs`)1R}%+e}cO_G{1_?%yof>Zf&4^Rm}CY_9IkH9WgQ>3UY#n zj1p(FL6JS&z%eb3r@Wwi6=#Y#=N?t%}5u!hrHMV!H$kM}oq*^UKO2O;T1c z4IJBYc#ob}!{_ti{X%%Z7~WqA@2`gUOX2-;kWuxmEJw(SO6%$zR#M=YIw*z%K@ttN zmvK!{Xyg2w$tfn?s-QqXUf>ASL}bH*J}+R@s?KB8VWa#-vV4btZth^`_3wD&2>^z! zY4uk`n3^{h8MG>_2v@~jV;IDSfvl!X&=oJaCTK?9EFuxczbZoE;gPA6@Uh>6XsRFY zysqO?o`QZ)hq#6wxSjwu|G<_zeCf)ZuZ@kT8ZPr<w{n!2y=70B#V@0Zwn zZGGAkCg|txjBWz5=nPK?$ ze2iU)v5PVGN{qc4W0zv=a*zvafjx-Pm3@1)pb~~)v4uNN0Xd5i=$uZZVY7FfZtoMG z{Z+-nLv4u_;0hkF7_;Z?5FTONNy=3m4RG&7VS=zAL1C$Lc^nbP%V(`g^b=+$$f|FK z@YMs%)}Mo<)qvJ1Fm41H+Ht0_7)NU**Jt>jc>Z{U<1d0cIvzR%n=?E-ttL_2nwDQ@ zy-ZsuH!mL(`&ok108n7!_1yma*0bdJ1A;#!Q2pkv`TLzw(nI|lE|nk88s*osRz8(o zxDn4X)ss=?+hlqUKp*>zj19}jOdi8$CWwy^NA8*;`4JTLm(8E2Dx}teBCar%Cc zAAf}vr0?K^iMF?2Am{AxtEccwqj$L;%)upK{j)GyWkdcQo?2Xg{y6L;E;Z~L5I?b@ zKHTd-(>cV4rjJkeMu`m!Sq(Tj{JC&B&t24?n$;Y92!>&{=;ZZhY5u00ui`ggn z!!CNAyKZgG-I4eh?dl}^Eqt@o+B~bcUyr`Ga^(Ha-KBwSH=K2^vGVH5>eBM^#fz7} zaNDkJJNN`=N%8AFV$#DGAd*6d2#|B8Mu=VU-w^*>fS{;9BRg=;RLQS!o;VMQbk#sn zC6i^ACqTV(#0l_{)_e?R3C4{&{?7y%+ZKZszDL2wTHP+DM{TC5&OvTtr;l^ULD$pa z&zDKesuQ54eqW{6f`#aDNin0agU`Cgxco~N%51t_Oo*qXC`hZlyQc#yinC75sZb@W zzkQPrBn5)KbJS-rf6b@#JL=;Z^|gl1^Ic`>HwiuZFeRyKiq*_bNTBkb%wYxD w*L4v31$IGg8Iz1O%q*-y5pl;NU>Z@!L4WyFwNhPF*Qv^Ll?zoTpEZsD1KZEt6#xJL literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/registry.cpython-36.pyc b/timm/models/__pycache__/registry.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da13b3b9c9d008e3ece4f97e4557f35ae59b19f0 GIT binary patch literal 4729 zcma)AOK%&;9iQ1Hm(+^Z!$qtf7;aL>PMo#^8XGad)+N@QA+_>y zm!28MlDfQ<1{$DffCBvt1^Nw&^jZ`>^aB)o?WH;N3*^%N{YUEhVsF^! zG%&9VPc%eREC^pLik3JvSm-omT`o*{$B$TN5v_$4>ztBH=$FNkSbo7eD`G{gVq6uk zh&54v!8@nLS>a*lj5sG67}vyk(ZqOGtcwMV=fs9s#CTr3Bz%nP;$_joxFIfxQy5~2^>?aptX2NMOd+4e_aJeG5yM@kwM zMu`+XPK@C5&|I`TX!;-M4BO)w+jBP9#EyH=@RUy+Q_GwwS6i7Q7B*Po2zTP*TzyhA zUIx8S9rb?3_ZoZ6tTyGGB^U=hK1|r2%ly( zz&Q2PQ=f_k&9d4Cd+Mcs6wPWS8nlBuOV!Sln`H~XuKsRTD!3Vg&i$R7U;B)B%~{^) zcEdRCcJJk_N^ZUWKy41#z&UL<0?wPcNnXgvBi!HJmHxxpx0+b@n=r3q2$X=Y=%6%+kY=7=79S8F#rZ$23^Y^wzXu=!Y zhkFOxJ)G9tckkT!5N>f}TaT6O4&gSb(Ax?Ql_u}Ly|Zni;c(jmE_TVyuE8so=!iB_ z6=a_UbbS#Wb6UK`*IbYLe939LP2S{7yoQ~CpT_8O)yAorxsobtXx3a`!i2)m3v0B3 z%+Z;$sq+q-awxyHGrQ)L*B)Th0lERp*yD9rINxJarq&H+Q=T{(|Go39_MqUu?c|L} zi)cU^hs$E^T)UogZKYFmTLAXCe~Wy%mkvj_Zx9AT=dr^8!Zl+R#^kowZpCRYjP-3y zs>6r%tzU?5x7%*+D5=(QE3e1tbEyzf8W3HS47w+9<*j+3->I-Mt_ZVQMt84QM^9f1 zjpkNb|HdTa>a=0R>;_~ssV7b2?734msb@^Z12*yC=FHR$>pO7An;A>69`H$H|7O;B zhdsVDX&P^@k=3*2)R`=pW`=OC-hzXBQ@-cZYT`Y6>k-|nPrS@ke+ERif~Y^vTtIX8 zuVt=<=38i9<^$Rype+L0CndB+S^?T8k67lYznK=o0k8mSP5sj>m?dB=cfiwf=9v}2 ze}^Ci-8;SbuE&?=0GN0*~c#tf=eRJW_t$x zC>R7=Ptr6dLhmQNcr0X)Chfc`&hY!?V@91H9w-5)V(uI|(Z_$=l_>T=h?Uwq1-75&o{UZzOET1r?&dNzucYI3FR!Tbeim|g? z*o>?!Fz5ZwQngo}b@s7WfiLz>#7#w6pr%g-{D^ZLf-tfbV!JS>%drJNj7DiDRp4^8rbg3b$k2Y7AaQ7sE z6sUiRX0r!#zZ$6l+6}61_~9G(BqGZU;Zda#^x>jq_&{j0BC+&*145`I6mx`NYCq!M zMYG(mVM3)MatAYxLGGO*fm)*OpA5Ue4kIn1Z4n)NX<|@^R?buUaBxI_DX>~$#iTcN zle*jJAV0iTz97E__V&;;X_}#u=#i@EqmiUb#OurGAVF?8k==Kvj$SjhUvpFc4eQ(2 z%?s3;R60koA|B@F+pV?2!S@ z2Mb8eUP08g4tvE$T_h}NDC*kuHHVg9Xc;P4M3h)KGb17qkX&9T5m4QP^5^t~e{4lT z1@(uFvuBR5W`*~Sm3!M!S8z1v$uX0)CC|bwU-2#y}n6!mfqG{B8sNTU8PcmIygB14dg6&s}OA=4(mD_6&3c!>8?&Hj_h-C6N=B^?59JAON(E)xgiM~*fj29X#6$RP_BASSA zyN8b|w=-&Nb`Jvk`1{g0FrOSj*p=WO>QYIOOQccdZWSI0**ej@PD&>6ZYTK%J85FsKs`mA zB%8+=*(v!@?}Ns#<6{xO|MrNFCSP%8j`TGtfZXOHf&TLNPQ~~i2rBssu6;P%Kas6BSvXqS$GDNRd@4Qlug&C?a}J&Gaf%93(cC}ZhH7@H2o%h3;zR>Ue1vK literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/regnet.cpython-36.pyc b/timm/models/__pycache__/regnet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae87c1729c876423cd2179b21f84620f77aa24f6 GIT binary patch literal 18516 zcmcJ033MdKd0uzVi2*P;7JHIhjwp&;NW`%K7QoU=F81aLE_aurG+BDsnCck}FxOT$ z77M5+R*0oYSzd~gt-~^9*_Lcewj{@~Wz$mZ$cGLevK2eAq)ze@KP9p8Wc%s8y!hG9 zdrH2qs%J0@u4D)?8tCf&tE=k&tLm@+s+#fc?$pSmch1Ex7{)h^wkw188Qkv2qJ|-i ztRYNcmCdZBo{@!UHj1}MIaY~h_6^F!LF&M%J#2dw% z#C@&rLtUkNU+%&I95> zz{guTd&P`6ft-`AoB?r4oJK#+v~muJv*H|b9%|*hPCSgUcm%XGTOJjUipRv`=xsA+ zRy-ljtNt}}HhR>|c~V>u7tzN%TD2~Tr^K7ZCGoV*5pTJ%FFPQ9OJv2rxNc^5W2SbC z%i@_ECZ0bSK6Byo+3;zH&$;lK51%4@I^nYrK8xXVK75wK=R){g44>ujSqYz2Q4{}8 z$Qu#R;)Z!%_*@E~SHtHr%6>j9dyRRjXM4noSOo{}6>k$hp8Ldai-!1@=;{7*hWH`z z_UndtyAxeFuolVQCVof!EAbq@-7bDttcmM*y91@37wUz-CQ%XcLDl>cv-whL9Yk&Me$zoqYAnk(3ixIiT5e! z4S>EZ-Y-6&pnCxQzWAW{aRuEA=nuq)#7`*b5THL4uZRyTXb{jJiJuf7QP3L!eMS6~ z_-O^b3D6&lpAjEb(0zdZM0`yAtb&FBeO3IN_<02l1N!&k6*La$&&9tHzoMWCKwlHTDn6s2BY?gxeog$k zf+hidL%b?}LqSu30`Xb#IR#Av`cLBDir-YwQDI#)2LH2NvJB&bQ=E0YRO-B4cjWz< zXHPqBsaP#liy34u*W_YG=VUI{?Y!eYn;FW?dtTkWe`Lg#*GgB1YqB_E&$%OGqod=) zhsP(Urc+PIQn6IE%bABO_456#^2L%jzce?TuT@40Xlt%kTa=FL*fKvqQeVlJ-I2L+ zZEmDuyPhM%oJvhN_#mBP)$xYwE2-2;+jT^yR?XC!SkA9ps-cZc3D`2_QgspE?YY`j z=f2F`l9!Rr(6X#~PR6TcmRu)OS;^Fvs~OQEMPN_KWxw^d; zjf%E;8x`gpr;27osZer6>W!H~NxEL9Y_B*{*ExCaLT2c}%!P9&Gq#J8_}c8D9*vAG z9aNM~1$`75Pue9lVQ$^Yqt$#yl=5E2sd}>HgmmQRo%~|GR;q$9c5Bgb>LkJ`XI#(r zoSdpstCm-E!**TwhhtW;Jq)fl?_ki;9>*q?ddSXmn7vxj!55jCTgi-MPSxrwn3Q=h zGd4Olnz>MO-OSU3D|R)t4&`Xf4qCzPhaW$2?#%3&OS#i$&YpPcu}itf&zzVI240hW z@zUva6O{(dU|?>kR2Df!agGy_%NL4+u^@e_Y`ZRpQai`VE|>+wH(oML!#A;(VzWgQQdju$?c?hiz=M$>={h-Sd!<^?HCOYzauZ4c zZv~wZpu`HIoPi*|R2P^I!12XpTNd3Qf{uQ|kOOGzAJ2~9qdhjVy09{m$K<&qXHK1Z zGsb#s#9fk3u7bI!3AI{Ewd%POvm;)qQW?=)v$2MPSgwLr_oGQSj>It2W|t}V;B9>$ zlH#iepa0gA(+ zVs(0^ctN!homuZ?P1W=$o4)++-}&E%{8rQE%=$F5A4q-eqxJuCVn)9_V9l(bXZByL zqCFGY0B|9@ACL97AStq;%TL@tv(A33Kg+CfUKdZz6tAfEK=&bj+sout79f@f7G`I3 z<#!u1>wV0AhEP#pG>FZf9}_6SA7*I2S(#v|sz?56tMv zcokJKC$AnnGt(pobJZB)tMmD*F&HJ%sG_&-0s_;E9VNInQGPzs&4AN!?4?8QtZtfr`Ikwrb$- zFE{5E{6Iz?+!u6eiGD3NIy%-4Otb@&?Z9*!aJc=;czXjAqiw||+gqD%`*L`!9hhtb z#y7sKw9`hQ)f#~o1q52O0ob4ozy@sqHfRH|K^uS#+E#LHv;o+lZ6!zAj65YGFN&y6 z+$|C1Q$*3Ej)}Nuc&13uS$HMooA0N(@0u$pLxRIM+0Ik&V5L0gB=)vw7s#A~h& zVb%?G|J2C+;y)dQcsr=8T5SO*%8)`(L8Xtq- zGc;8gn;1V@ILf(d`9@05uV-M(K<6t}{tUEp(BnC-m)o!_c5uWYD+{CI$Ye;_4q}3= z9JME=$H%l`u!EQ&E1jvrxU=J{ow9Oj4EDg(QLR0XHMvNXhYNFdslGHK zcRzp>CbPLZ%>aTU88yiiVEcIth~&#Pn1f!XTB{DNIu)skCcAPAw5E5q>M zBghV7@QcN1{sm2SWMv-XA1-$2J>+&M0EpqAoa#P_th$hKv z)@hL2h#)wj^DI9S=2_Tc(0LKxYL$z%@|tzoj#Z$S5MLcRAr_Wg^q3JJ%v~MU`&_A9 z@(?yFAP9zVg^nR9*B{f{bVx`nVZ5 z%q}BFVhxW5=5k=pZ5p9}OdTsDK0`7dtgu~vK5g!dgA6ZDexyM_2FZP>)B=Z zLA*3;-kL=E--^Wkb`saDn+Oadl~NTlY7vD4vlzsda|q&WqEc4zm3K(q{|7!b?;$>U z1x-qZ8*YWK_D|uv^%Ri7Z|Y$<1*(S{BM16c7)eRkLxj#EtEKv2G>B`C$ac5Kw%j1D zrzG2_v#SU{hLg3afIQR=}DVjI55GbE*!4g^05v9Is>4%{3PWs2Q%$)NP6QCcJBRSaSVI4#Qw~kUjy| zT+-b>BV|nnR<$0)QMOuiB$aOvDc8Wn_1dx{gE(cDaFFLz5qYeZt}Fql*w;|S^;SRt zS7#9SYFtT!scfg3>zrGz)tzj&1`vAoN_II&X|S?X_H-1hgt}o3m!7_t4U0I}>aKLz za*E*u#Gi|fo9zHz{l1w&dWzwg?ZtgSX8uIe|(7YHQ z;pIMt#4)m_Ft{O`g`^Ds5YWY@Qw(<;vJ!uV&KHb=DG)=s-jPi(hJpERB3PkmxRSE^f+pBqptxzIgJQH#SU>*M_UnpdD0Sp0=UA;O^HMIeb4QHNU zlzzG7Vo#*x-Rov3;<1AuoPypvVAF^D6;+Ub@gYMger_=9X^y~(4c43Jcxo*E3X<9(P4W?!x z`8qI{Ikj&CMy|<9_@1=wX0Bi)OzkrTor{jd*r^bi8aZVF4@TuXF+xGy#a<0oVp5cJ zuuPK4A!IwH$Tf#uK99Uh$V|XME@K02uqTMQOIW7zIvYvmU^$n(Tuv=pwcI6_iA?fq zjC^reKgMJSvn@ ze-t$WYYswH`$VvFK7g#vf~}9m^_(H3>gDFDKS_Mb05n%`ZzJ$E1zUzJS6Pcvj+qZ* z3v|*hG_g?yx?69SFcv%HeaN#~d6CvzRDFE}U!$$On9BP>0DWDj<}#80E3OB2n& z+@)UgmqgBLf7=j+)gYL$4!9Sa%LS<%xAm~+hxe{r4#svl{F2D!L@f^n`Y`E0Pchd+c=2QPBpD3WK!i4mJm}-B5TC-VTE2y0hth%_GwesJcxyyG zOfjcIi9k$}ux=!1Kg?=7AiA+K^;+Yq!{D>C@)wy z*^mw@a{tT749&9?R#h)nn*0>88{D|rh_A96z8VNN3QFlxY zKvZ7#<7yzzhVWT0QjMZ)w2}0a0_GSuWJL`6B7s~OQ;_Bu7B}(k% zs>pMKSZ&S!XJV|ir5fPyN*CRE0q$MKcH;i5v1WH!$=$cwB? zGmwJP{aYkAnS-P(%c4@mR>JJt6#O+zU#OU(kB!Lwm;ZL7enP?ej-wXKn=%i2O{m_?}(nqs^q z>{M_605rLGBQdNr_`Mg$Bw$3~Z@^4jg!`?i`oj0sz9#E#m=j8qqdIwy_bJ@!G|9Dn zm`h{L0Ecx^x@%Q>CG|9f7uEsH3NuLN=W*l(wtuKK_wF75i_wej0&I87Hew9)H=Pr9hFLDw*!-||F&T07^${iH2Mvc5eva6;72_0ct@kp>-YP-T?+$i*2|{62XDI<_V|4Z zd-cKQ8xhl}UJlFbQ)MowGW%JEcDaHM0J@D(bu=t{JIWgJJL>Blsy%f$n(f_O<7MN@ zcR*_r*njQXJuS)(fU<+|BKInsA4Cgxa+Y2;uY52RwEJh}K1`#$lL;qX_Aq&h3573~ z66GL#DIBjr$TOkNQox<+fE&J)S4Sk`+FXU@(5)3)Q){PhmW)bVu14IqHMFo+CLCLb zL#f1WmYDRR3I! zY9WUl`79IK#gIxjcXu$Om4y-|#4o?fKezh%UtczBDba86jQfCv3r4oQcX#+V~}JoBHX&4 zsio40N+G!l(aX_BtPw}>6HX8107IZ_%!RaSbT&E>L=7d|4GU7KtI@3#2P1Jw7-lKT zE0{xRm5V6NO}RZqqtT1IPox_CqO-B<8hi#_=b8Hqz$$z(xd?LvuxY8PCr z0el%i4h%E+zXOyVSmRND7vHYD_i1boEigzN7cdt=FN4JizZeU{94-A0av-)7* z0gR|V9ms=$jQw;=2P+407?*9#Mx3$s<_L zi+BWP4!NwQb-vQ`Re*266gD?o?rYBc=5_i#_gc0}Qo4yeBuRb?8Mh|to(-b3J*zBL zWQI=chMlPz{SZI=CX$(3Ukq^ z-c+!`-86jnZ&14ae{l__RB&1@qaoQ~@*7AHXRp_tsz4a8jt#aT2?4C!%OIX#s<=xP zwcQ~V_PrazCYwgs)<&Q`6c>`xMpCw9Bg;9P$taUbUr8EGyFrfzo7OdiFHY$QeqsW`+!? z*zRwbq1f)v@pe1MZ4@ttb+@@E(jVCaVMJlYzcd6@(u~9OX7nd*-W{O{&Nsw@_I)=d zZB0J;g!d`jE;SOAJB%%DZ(H&k$O$Z*b-Z=3E=}g&II4KOIl+dxx;sRDbBWgGEkz=k zPT?ifaod(*IT&yVdobJ$Gbo}0iWUyY%RfOGr7xh(Sy6tynW2bm`U6?!{&yxVb6pK& z(92&{v=e)>jo7^*v75`>6j3C}irhEi<=Wkv;5A4&hMFkvHaRYPLdPWxK*_&FHCh(j zH-8+u=4*Oc)`M@fFUnOwpEe7g0G*Kh4E(rPeO#Xq%|6#&t z^kUp?A zq%2l!LrQo%NGBE22e*cl#VTz`32z7Kv_g7(Ye-qlZ9__UJ4g=?Po6s~XSRlv#a7yo z65bBd@!>HbJ%QR=U?dj%p*Ez1w}W(IcoaxaZVf4mJ>Q0u@OF?+4o?6nMdB8E%3|+o zLrQo%NT*d#Pj3w=i@m1}DdFz}X;)J?9Ojgs*&0q3`#>8`!rQ`0-aWfDoGkX?Hk^dF zgL9nyJhwHREcVehoP@s*oT1=-Ybbah+8R<8`$QX3!rMWr1@FUKL&{>m)P|Jsc93ep z`^eUive;+ZkP_YwQZ0C?0JpG6S?qIdNC|HTsTRDCZ4D`l{dOBt!rMWr1@GfqL&{=b zYC}qRJ4m(Qo!uHz7W+yYQo`Foss-;8TSLlXf7XVS@OF@D!FzsdNLlO~ZAb}!A4o&N z`_>^Pc%R%FP8R#GZ8!;U3nvBdg{|RavH#JAlkj$MD#3ekYdBf#TWvVQYL{3${86C5 zyJYg(3{!=oksoClCfr+Z1I)97|1AIat1Yu2h9ES4>CRwIkW{}I9IjS%u(^Z3xE|)u zv^RdOh_JC!#t{Kmxe!5Iof*@?Yqhtp!{Z&KJ*Aq2GeF0AMRqce=Z|z4DuNMGKVy{G z;WgAzVI3h_W3~9>>a2_2(EAiCaf4oO-hP2^bexnMplo|dTU*lhm9#~bNvTXcWpXJK zNS@{gDto20l@^s$Pm=0FQhZB_Q%UhADQ+Z}zvOb1Tpp7As`yTFM%$b`6|!L1CrBQH z|AcMh2YUdy-$tV1J2<=5h0rtt)(M2Azsny&*|+%H7fq$;KK)aFmU6P1r8Ja|CS%Et XR9F94is@bb#bh$sfiq;WoxA@ZI7(F0 literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/res2net.cpython-36.pyc b/timm/models/__pycache__/res2net.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b9fbc2b52b681cb3a96609abe6939129ef66fdc GIT binary patch literal 6980 zcmc&(%WvGq8Ru)cyVUAw`JFV3^V}rTN*h_SjUb8Q7t+|O;W$mYXuxne+!a?`ax>&g zwm_i;WFRL8J+&y1Q_)LLJ-6t!_a1thQ_)^3pnpLx?e7~>>-D24jMffU^UcgRGv9pk z@p}#R_T;41_~?c9@4lxfe^bVuD%$U(B(|z3Olc`hWm>4VR9R~+P1btPXc=hfVX1Gn z%zn95RuyKjQdsF%TUFUML#sd0nvm^sSnE%=CRL@Yw5Hf=tjesrYHQk`W)rM-Penah z)Kf)0UDPv0JzLasRI?-Blv^`b6*kX~-c{I9-{{Tm>#aHVIy=rzfbtQ4g4NkccIvLW zueRpVdxM>3&&l3V^xkC8v+v2?G4$SIFECs7j-$80USwxv?*w{_>?QWH?43lf!Cqmn z%HAoaeXP`%cK=}C^poa>pW1H3iuPxzwa(l@>NC5;CKEGgpQ6MhZ>-L7fV_zSJX|RxZ zZs^+|xZYMfj(mF^w1d?5(joW1uop(uU4A>*UW|FS;kJ{;^7*Be#iixt)$`T?z4(o~ zCXRl1ef{d?jmx*3OP8;#e{$`XbN%xAhM0c@{qe0!2T%;9u8R5gFbJ8`kC`7jJ2-&j zb-KV$alacRsn7EvU`#L%2?@ocrSxbh6DS{{Bv3&mBR#XA4JBr#N~*FltFY=fR;K=( z?I$qrp`jCfWA{zD`FLaZ{ zPlOdjgJJ3?s@=XITv&2Z}w)HO~=lK^cxDkr|to<=3itsR$lliw)(6uSSs+F)+#HXFOWI}H!A zOd6LjUi@Kmso88Kc#lrsPd8(hG)Ss(baj2Bkp}&KLyDcSNi*`(#lan6ko@OBGN}P5 znx>knfnP=C&!N8?Jx1C1PSD*<`)<@2#S!qTe0c7FNX^5P)ftgG_4Pj%asb30BmS#Uc(TWhYkwCz*fZ+T_M zIhQP~o^7r%#=NJ#ABlA}Ss;sDUOV4>Ci~%hO>fOR-&xN0i=UR-EH5pO^rq>qojvbL zz1iJ*N*mrzp{P*ORll?5ueCc;Ny}oU(B_dAk7IHL8^`1-FpkMz(!-c@iI^=Y-#@6Z zaul)Rk&zxg!qHm_0)T4plsv@);kBh+W5d1Y@Zo5Hw-5v4bQ z{&ZFybh64`HLdj~GZmxdc?vfW<~;*5r?QD0hxLjx-BtExP-btxpU!}SxvySQK5y>L zWi!25Ndfd(DbuqmekD*b?^X6y@IBj`B}&AHs(ksYPZi9@DoOQ-Y|qN}Jg6E!Q+W-z zqeKb3l<(=-`&eW$ghJ<7u@CsnrpGuv4*JuQ#)+&(asibqS&eX!CRd)!EDD_W=p?5y zI^AieKn<)7`6h~pdE@;n2>KISK93N)WiT|_ft$!kRyX-;cr(IGQXVj0lm{+jLDUr$ z78Fs@?DBXxNciiRhxkX+d7Z!;0JjkF%Lucf>-qKPL@5~}gW)e@L{uCnh=SB{fTh$$ zRD#5jVO8jT_jb#oP%TFip(79@{JQC;D0RX(Mrhv&7OTtz&0H(LOx;Ub$oB3Ms*yI-{OKZK)OQggT{BiZFvD!BVF%R>QBN&1)vcEcJv| zQ+Mab{Kw+q3mj!~l+jU&a}gz(2S}Bk+9T7$9f1r%s-L=1FY$926$UAQFn4_(Cl04o z!rbvG3qitg72TYBav1UmVwR@nYJ(gub=px2V+rl4sJqXPGxx|$PtL+b@~`BHB$t=K zbEfVQkEr+6J(ylk&Ge$B<*gL_!^o8$+|ItfXTT+t;Ec4B%AR?gkH>GW9Nr+L4Z^I1Bv?{7)X5K!IURKHAT2NPuT7lGZOsO|PEjW>zpTZsV zYUB#}U(uQ*qvwClXa;)VM?WbYpQ=Pt5{t+{7*zrPz<_s6o^N_|zL`;Lws0XQKx=k{ zohz`5z|KkeKq8s8rbs%4bTS=IrIb4rPC9V#FFgD+wCYEsE%FO!^UDCDByT;i`2mcg z&|@E_?sGTwg}xgM_(+-gGA(u100wD{8a$FL{j^@AxcQCye(nMxR7uJ8)e5F!Ih{gj9NHGgb@{mt|P~>v;*O-{l&WdTMXH<#%zYF=Nj}joaFG|1!`m3lM9qq{TgLlpoDuuf#QP_1X_N;4*%kJE`Kk z#CM>p2b5m$I{@D<)fZ^S6YDCi`XkD?R0;QhRO#cZ%mnC)^GGJ3J&+5O{$nnXjKb9D z8$M+ME6Fp7|G6U_7h~yRlm9+Wmxrg9(0X!N(yBk9jAZ$;$|WV0=dR{TU47i&Y2Krq zDb36lIPcasZWmuMJW#t{@KghJ-H83sN j0Kms^jnecPdC#qBhHlm9Gx_67&6xYlDr;)ZdVKjer^em~ literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/resnest.cpython-36.pyc b/timm/models/__pycache__/resnest.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2e0e312f430c53375e8cc6a36e0b863a07732f0 GIT binary patch literal 8441 zcmd5>OLN@D5eBf>7j{XB6e&xx6T?q=ZIS!Xk`!&n5+zIWODsi_?bPN0gBWlL5{q3A zu%ftua)?yr6H`8v0<4yl}TN;#?gfH|d-N>x}D|ACzH^#EKxBs-#HrLx#* z^t^kzdwRMDr$bp-b`6m_S@5-)U8TdJ*@T#gPOlc`hWty+HRJ=33+R<8?N@cpA z>F6!JlWk>@&$67K>*QN`iRb-7r`Re=yx<$1QmZ8KqF?Tev_^o>vl1)cRa>L(C>vp; zn=0Oi()Z!?J;q*Tud&I^4C;=u*V(t&adv_|xtT@&DfR|C%bp(QO<>P#YN&ZO!Ps-3 z6E;A*Rf?_^qI>>NAEUc$`dZiy|h3VZpkdPi*?LGCZ~DiPat=Ron}p$JBHk4Hp6CR?l^L9u{m}|=1wr}x>9-D zo&;OXYi_vcUXRQxUFQ0sVO+I)E`QbhC@p{0yx#M@Xlfyf+#vG0fw|~L>s`JUeq>IW zD^b)7U#(Vce#^T#-R147y%bg(wOVt!c4mHN#xNF`-HTjiF7s~3>~gQ|1-5Tq-MG=^ z&Wh=EdcL_{HP>CQy%L3{9Sw(Ud(ldNY1-*_s)H3fXj7`spP8?26TGxxzJ)TgMKy*2 z`n_e3X6kY?qR~T#d%eiS2t7MGW!eD)9)wDVNp!nQ+)l#n_J+if}Rsgea~lB zhq!9dRF<>cMtM=P6Wk1t8Rc%9$l*yll;q^hq2*Ct7)26V+?=sYat&n_05wXeOy$TMG8hRsjr! zx)ERbI02IO;IUBxn>RH#Q?Jj>*J?z`);S`=z?|)O%GxWR9R1>~$S-+LAE}bvM;4zT z0w*6YfwS{dHODzK=QPhq0)I&a(!OgS*FPAY`t28I2hrn_(@f1;wqXyNVdGY_#-^He zR&TmYathmC3P~>iu~SeFo3|QG9H>pZxjZ+!OsuFBgfZNhuw5zMnBBo=YD|>2+tljr zFax>o%FNEYVmxhzF5BzfQM$jZXY9I^T^V-wj-6 zZE#c;MdnVCVdA$%mY8P&7Prm;UUEoJ>30-~3@6#^@K;bII*4NBj<%}Z!S0JoOkLB$ z6H(^JYJ8X}YlZMdiH}LVA@L&;e}nKykF_|@v?rAb<&GNXCY0L~@ksAGaryR0T;Nfh z!FYPO8XJV)o@DCnOaPwTo{Y1xVrAoTz+9AHEyQX(K^`H?Zt5s2#)skwjHp;fJPO|B zHwiDrlPt5Q^UvcV{~W!_tbh`g6*n`1#&Ez|BeAlY#hlqsVVR@Rcmksq;!He(CxX3PuPl>)D!qsWC3_n6C1Q>MVY)DOtDdAHvSMV8ylyCsaE%R82D zZ@63-jC_YBzw4}3a&i#fL}xhypCN3PK%KxeK;@XohJARXdHuq-d|BFhq^3vf|5 zxg8fd?{blWE5hff+dKjCnndmb+>ZtmS?TDA%u-Mny6r@DQCzs$z6ys1l|}m|v!u7e z$x%ugH$>rWM*eQJLERd%8w#l{oTDgdb(-o<%evE88IJwXhC}OCjtV_={bf;*{DaRS za^NH0$ztCl3W-)xa%>KU+VHS9gs}wo0lp%ORzwCf@e9f$=v9Tq1Cp zz*_{~CP0GWaz9XfQ_-bpgnCO9!j&$MoPH!S-F~!RSa}3_2n^U7I)!A46jj#@Xq9{j zyMIpB6wqpL{9b{kV^+#!W&*q8!ps;Jg=T%9@Umrn+_(L-CU04+>!7&k-s;&1E4x9e zdDqd{IeBdy`3p~7X*gpO(AVMx7`<2Y8Aad1XZz2l0f#@f%dpQsR(wkS%>UAFr^ z0yG4r8Jkj2?F*ZXjqZg0y3F*kS> zaEtcvC9p>xE7y>b$_#HYD-SS{lV@5eiU{F5RwSlU^ZhqMcjHsUWxyR=8z zxL@8Q?L&h<4TXuD4EL+WjTQ%z_rnU@{VIXaHXlUbPk?vH&aMKxAvg7-%lai6HI+5t z2bVSV`W@1KSyTCAm-XjES<{`tSIZhL4kYV`+}n>N>aAno+Sc;`U%#j;G-@hp!VfNL z>h%Yt{i3Gw2NbmqBc7P>Tx!Jg4`{@9i5fRT|3%cZ^Z);%rtc10TKz#h{p zG-%3v!Vk`T>Lrj8=I?l)RQ?dm*U~fe;LK0&R{tl==Rd;g*LX5Bu^q*B`*qiAP0zHt<=3q_ zM2k2XCr(1fF(f1*aW3MJ!w`;ykR`wd!g4JOYzQp8T_7w37|xI^u)qTQeSf|8`aN|^ zmSiaj!J2v1RsXB&um7+5tN!|{Ub?q8K77}Ua-R=}Lw^+V{XGExZFs8hiHAZeG!;@| z6)AKr>Vm(m6rbsy>YnMD>Ot&w)lur5 z>6_{ke`jga%;u@hGh3#%%=Az7&upFAIQ#M@ho`PIuT-1V=1XDxZc+VetJ-!c3jcNow!;V83IDE35xm{) zgI+;Da(oG%>(n*sMVC4dBLV1()wPn>OMJ0=eE#cvab8L|a(F48>(%ve_o%&>I+4?W zFV+oCti5hvPNBQx zhj3b;83%fouf6W}`R@tTL({KOw&dls@=>QAqsV(qy%_GeQ;&PSJdXKtex=WU+(|ou zRFjhSMknnFU#ycpzl+7o1&f;;EMDd0b2IX}McU$4e_MF`(BO@J^?>xOS0kmyZVG8{ z!*jcO2=0C>+XHV@>FHxrR{{PiHKWQHGp;@oQWX_{Jfz}g z$LVV>MyFl`$gJuHq}zcc5dS{)sIm~B)Vg0)RSo`Y;eSA#Rp;R6i1BJQuf7BR>ySfT zbs>i?CkKuMQ))qdC*EFf?ndY}>b2^z$HN!HQ+p)*pn9Epy@dBl_#t&sJucw^2|ujf zpx!9q8zh`nPpCIZ_+=7)M7>$PMZ)_eY^d*2Z+en7%EO1Ppvsy-&+mrHn7 zeO!G)!Z%6yQT43)K?&b1VN3mx`lN(!k#JT0u=)`R-zwpn`cd^`6248sbLvy-(-OX2 z!e`ZwtItSyzl6`JpHQEb@Bs;*S3jvfC*gwY*VL~|_^5<0s^3umUBdTD_;K|`^_vntCgC@zFR9;>@GB+!M)lk3 ze@OVagr88qqkdPyCnWqPWvl-w;gb@6v-)4^_ayu(3BN`CzWM_R-zVYkQeReoDB=4h z{8sh9)gMXt0SRAHf2_VD;a5xeN%be{PbEAh;kT(jQX+K`SXm+-sQKdFC~u#)iissE?`MZ%_p-=qFj{hNdf5`HhnfDoRrz@`QEG{O-) zQGuNj*!vJ}!_zLXqQKsda0i}Fft?oE2M~_o=@Qr(fqf9+IG%2Sl?3)7gnRJx3T#GT z-;Zz~o=pNP3+%%PZ^pAlU=@KqgK$5dtpb}B*hdiFhG)CL9u?RRAiM+5PJvkh`zXS@ z@az^?RbU@OnDe(Q1y&Q-#}VfEe6_&l1ojDpDVJX)u(JYt7GaLdFBaH26*&>w`-9<7 z$k+?#*oBi7D}O3++^iloYw`HWQ^jhcS~IMgsS-vtVI=aE*$atEA#ti!o2}kBJUm^j zotn!HxV(GUlr0K;j${#SNv3 zHAFN@31wAgD|5AkQO1<M<|O~XWU#j23Z)jRp_i$c>Uug}55E&UHqdjl_-D~j=rW$rySG>dLbH{ULH4rSW=x}ON2@hu#|uTPTFWwjJML?TsI?81u(rF-K*ZWX zKOOj4Wvfm5ufUJpaps(1O;~0BsMAgCHd22Va;)~i2}L5|j&K|P#lqGegy$#S zPDiFLRSbo`C0%Z)VxgCiiQMdx6N97q8O)@|(xd5Al2zF-ywe$tcVR;13wfg;g>4j4sj z5LM}(mqQqOF2}$&)7w6IR^Jt@TBsM1m+4y)x<&X~*2zV-%x9#c zt4KPEem0q(Fw>*slJrKG-(0D*j3^vQ4vwmHE~6&&_`I65qp87MGLdS=PGc}k_nPxIMs>f{0&Ek1iy5%T1 zT_(q;1}6&USU#gh*ii2H$DvQ;{rudAG?L7W8MzTXFqPM>*ucQV zB9$(4FgcPoCnpQKtFMS$Lhkc5k_y$HP=gaWV|1)g7!@A7&8_ls6nC<45g#1St3qaU zLi5almE=&I)@;|P$6!7^k;eQ&SL5KCGD(gN=En--lVdsIgZZ`Pl1>epncU<^Zshso zl9?Dxo0()jKcRU+c^H+33OS=VJC`dJ^Mzu`tjYqmYR(kTnZ}vw+^lKoRji&>XC=-s z-)G&ICPrSFxnruWCR2rx(X_7X#*~5qaAYDqF*!b&5+*sl5x96AX;7(L+8iC#oZwAe z)H=4C$99=P$V!asPKVvt!X^e&`GT1$Ovpr{A(%G=GxUYzC?$iSzrDhcxH|jZLj+0HL zGJ_LlLFFSSve4j*d;{b7LtzO<2FRd{dF`yhKlEjr2&y$c$JLN>H8wUfIqrH0ElRnbKhO)DX12gZVOiMtS;_Q5-T##S3%g zyq*dusCBEur!HV3FjMk=Pgio;%xGpLH4JSvl#o>P-F{qj<#$sZmdzB>nOu5Q^RzeP z-Oz$ZM-FBu(rGB4M)eGI-SM56oNUII#U9H}=EuehBUkENaEtS*%?&K&9?Oo88tL@-h-h)=Jz21BV}!5kOmbM_3N$r1 zo->Tm(Y&znWtc97Ev6#pdGSK_!wfg!ABCEPce`i%O zNSQOn;8<=nJwB>ugqJ@^Y}?^XzvvBr@ygF$^H;aZwt%Z-3hEcFeTe8&XL~*VdACf9 z+EP>(lanc_b8>WKGM6jJ`u_6gS?3Nq36N=BSK3W&M*UWiI$E}zF=-SEHIjyb={%=nm%B*Y$4J`^ zMLC1+PqI#qr<7@qNh4qWAbb8XJEr?+a>DL%eb|}Q-gL$VVn4(Mrp8<#COi#q&3!*Q z;sT*bX?Sa{7wm4wouZ%w_Q0bl4~&_e3kH9{KZLg0>b>*TW1A1yz^VllnQw27%lh?J z&-n}W2T|&_##>*p+x^f+xvjt`ct?xaEIIBi3nm(Vu!sjo`Q2_D%sLtY6zL0qS$f=P zTwydq!hYVn)Yv4b**y0Tz-kU>Z1LiOy)?5lk~UCof&t%H75nqGn3@rm@B-RFf$ zoS^#Nve#|(-cp-6Yt5BSVB!N}Gu;cJ0^p%Q&`r{aB-s0cMBgh#Rw52!J|HJ|8rvS- zxi}HPK;sL6D$ZCysqJ<`t13j%i4Xu$obG8=IUqCwk3l zL$D+dO+{3ADoUGiJp6^vY&+eKrvotgSLl5rRDeB{YJa?Is#A5q0xC2WqkRT`yPg2r zQ*q!Fx3(Z#!RzSd+u+S_hQq(DcqoO~R@FG_NDGg8=wjq_49cc>`BLkPD?I8yIK%WY;`o^IO~TG&$Wnmt^PEp*j;Rp?At<>7i%wV#R1K3wmr z$Ie6-`fIR+uZQcKRY$$wvQ)SnsYmgyi&iC` z-do>{{JJki$`J*-lx$pb+g#s*++y_@^60tL0eCm^*dlrK)OzbZ@S}d&K3{&D>TQ4^ zH@I7nHk9qJ_rcv--voD?3Zd0{H7CpuG;C%Pm2&Aq0;=NK3f7SctWQCAEHGBd>5U!z_HtM zMWcF>W=YuXEE#zdW=S#T1g5M??3;pd_7Q3azS~hF<-Co6* zL^GO0mQ}IrF7y1XQKos99VwUXjsviyNhymqD|R$jPTFloz6R4B!_ZbdDMC{g%~|Y7 z)s)we-`v>HMp+sIseGJPVA&jvp6o91&%)+uV5{9$HA@9MCMcC*0Mr50@T-%)pILYN z8rY7r#b8W@7UK}2=JF==_ptlXZPcf2fV@uGg&Jo|#s$-|<7Jpnxqg}_IR2PnXrJ`~ zlxBU9MfT`ybXDvw>I0-!b_XonvW5X7bY->8Qa0P@>(=)m@%8NEL-2&6eUZ)K%`otZ zFR+UEy%=6V2Ry_ zJMp&7gSC4;3>{8>Iu|RDOQ)oLBVe6gyqLh=16a(1bqVZ~fOXX|tf8jidSpB#^@LfQ zln~a9+AV#+dKXaAO!98}zK_m(=&*gQr|Eo<4!PEP9~|tHvaRj#d5G{+7%cL0wZ#qJ zS*TdBH&b`8xo<%1P`E7;2b*~R+z@}=hfEcj@Ar|#$!4IN9UMlMCOUXLOT)jp68$~d z?4xr=Nx!k)fpQ#9Id0zlD!Y5XQOlpAfgw6Px!C#?ozK9rdmT>Co28P=?X-*oC;c!Z zQOe2C@)DxJ(I5sCV&T5PkY$+&0y9i886jlZCy$lzoW*YmO|8d$MqoI%~j>5(hmYp<;Z7-B66|2Fo);k%CB1v~g z>4}X_$X+KMQ%k5cB~)4@l=M##QEA;lMY>WpHOBpcZF+3{enLM?Me>>3o9DvveAgq+JX5A0+gL=zJ26+jH29boXJGJI(Gh0uKep6`t6Dpa}Os zU{I83;n6~Lcs9JxTXa}I!b<%poiEV&F*vPdh71vJWu-kS(Wu4>rNrMO_q9t2Z!B-f z1}zaD4oHNCywI|t9+VAUE?u5a=kiP-m$=A_&jRL3htTP6f&D6A-Co)rfqfaUo+i1{ z>%?;8N}rFBg=LoFX6xrXdMpw%6tj+)u|Ca6KTby&tHE6y#@|LlW#9?O7gx4uY2$`m zDI;M=(lkixNgC@XSk`Ch{3kk|P_cfJ!OzhlV>e`p^-~ObwR8J{NDy}aX;v~IK@yN4 zop{5)7#;ecX?XB%xLfUo6ri|wdG^q_S*ot8vvNlRwXI#>iQ#L73+ zI|cFzdDCXSq82T;vF)(T>rz+(x*}(RG-9tqugsNc*O@QP!GsR$XA7v3)f)tm#Z9M7W2Eiv*d2~2EjJVK$om?(Wdq9EGOk|vyN zgg_G+wK1A+23;bckM%{s|CBHJ7nt9Z&4so1!K3i64slx3CoduwNghJ37YJA00-0_D zd$u37C^x3@;O{qX6)Eh5zIrQq8MDTkWm97jhMgcBqwsp7=C1C>`|1~oMF;ak&B0H< z1>fZaoWpr?692fNPXAqas-Hpp>Ci$qo*q<@GwNROQ=f_~Y*NuSs0S9dJQY6wmRcD7 z4}Gl`t;a5gFGeqh58}YrL$eP?7WyBIE^J-cwy<5baRRe5^xn|>A`3gz&rG)jA8I`tB#gQhmy zTlyaTfA@##-N<)a{mLgp1yBL8drm_|f)X~>cR+1|77vFWonYL0PeC8vk-la9 zH0UT3XS-dN%zCcJkq8@PzsQ%%_8Rof#?*-7%;7*Jk60gN@(;oph}xZU;OA78>U}KW zbDC{@*x=^m?Y_v)R0x9e|a$IklIelZRM zgj!!>{60^Uq5IQy1VS~46E;;S5s*7i2o=kP%2XdTxLG1DWml(q8P@eEs`JoJAgD(i znOjcv&RXW#Y(tSDnPw4HCZ>q*Wo1WHZYr+db9vX8v`%$v1mrxE-J?s>r%oAxE3yOc z`!Iy`<9!EU*CNa*E#FyRMLv0E=KjgRi9mH8PCy5W?!(m-PoT-^vo?wpZ>h+eFZi!p z^{B4GHqnUk{tIeEck})WoH38_{tGS{)fK$k0gyOu!Po&vx7w+8;kQS$rM(zaue2yu zjeo|H^|;u8B!>0D$p>oiH4Z51z)_y3OyEz>RLbGAM3LtbXCRPp3b9o34k+?`rOt7m zn_=+4BhN{mID{M%wNpk-a!n*|NSrdxnu%KyNgQmX<~dO;6m<(kHEA{!DdGV%C7M&yC)}R>L}9JSzw5W>sN0 zIgv~ajljTiT$i=qEhT{ivY5#g&l)%(33^oUABSL_GqB1m7;~i>4z7~&PWyQn?j?1g z2Q`TWPGsigz#`Sfxpk0R0z zFJYT_R``pQUsPT?mDrn3?Mv_(%M5tw)!Nd7OU`1rpEl(vFqxt;r}FYPSD5!$Cb5_8 z!6%uT7+6u})iq}B)vmUD23pGxO0nfqs99!0T5d&^tCd%oxkHv^Gnbk>yysz|Sq-Hv z$7eMRQBYY1c0M?2aTL&a>fX*_OPDgh4}$@Qf~woGgv*^C<8%nKY~)I&(w(U>zOJHX zUR0;d#HtI$pbMk!%uSShL@ z4pOq);2XDPK(CbMIJTmTVT5n=26>0(D`5KWCgkKsx7#tp!XSWGngD~|pHxFced5N% zeo4SVi+=}rWITaYy$s%#SuH9kiW3rfC~>bd!gIK7sGE;qG-@0iKZ3*i%t=I06$O-V z>YRu{kLTfkR0K(tU?(1WxoO}&0x3!2aZ=-)1V=7Ny)WSmvWeu#$jG9+n3a3ZTaT_f zhyx!I(^+w|LV>K(E%K2Ago(ZRN~vNE$o?F6)pR~SiZ0G7*&-{-S%-KDt@4z{^#nE0 zZZw@PfgQ(TqD6Hb%XlRmhTsN~6v;b-{T*%}dZNBj<-{X;uL^z7m+de{LqL3}JR%wU z*mvoYHR+_eo$W@B&@vzJA*)$VGOWS07EOhjN`ruV_IQ|3o=X%EknvqfM;^K%u?K?5 zrTQRdAXQ+B3m!Ni$wY@YDFE#Co(2kD0g?^H*B*3b?5&!Od6G*RhX+vu$%q$WxE9O{5$QTR~ZfKh$ZWI zm&5@@=S^}`u9uJ9|u89!DU6q07a?sW^_~$TYh^9xg z`%mGn1`L~+$6@*vm~ya4W;$zCPMfkMO;pbmXUTE68iNBdCT+EIrb)i%^3Heni0;FI zrQ_mpBHdt|6Ed*}{$ga#KEl~NT7&aNwC%Zybwx$Xalx*&fxVRFrUbopn>TP>L!K{ z5~XGtGTuq<`sx_b#UcOT5CcAFj50^`9Wzwgv3?wIAkNCt)_WLpUt$j~pWzQejt_Fg z2}ogbtBI)7L@}bbz-TUW*!pd_M?DpX9YwN}`JDjrkE*dvc%pc!k6?eKzJsc2C{lM| z-|eb19n~E*TGG(+Bm$LBD2v@kC~0VO({T|tH&B7a&U8Rsw9`5W)hc!(p$6)pYRmd_ zD5*OOk%ir=lX0vP_6|E$*A>{a)Nt+r;^~6QYPX8-4#5(rUDn0DP{3}|Fzg5RsXn#o zQdnY2JiO_a*xiBHfHlPitiB8RdNHRvpM-WY8!zv2O5UY5qtqBy<6FcUCytVL$u4BK zTQc`Hp(@;kc)0KGQgoU&KE3rWsJbQ5wx?6Rl9U9mWb_$9)=CRG$ z1=Zy^*iE-xgbE+|ZYuA`E-Q=4`?2C zP*X77ws1vl3)GlbVAFs~yLKoP<4~y4&ZrNVZiafUU+sRnyS{@;J194=T)67|uKJZw z6mEvnFLb)Ue)W@}^HY&W<50zgPWPVPTEB{_Ikl@EKD`aJ3}v_1cHqr+Jdye~b;YIV zgD|Ja?!>bTu-#BcUU`WrxJ7aGQR@aU+huAkgg2ong8OnZWdi1G&x$~ zceqKf?rrGqKpfYuD5!e3xLbvUzr$R9myS(lp$* zLg$a@hyw8|^!+iNkI|t5Qg5qL=Vtv09NOQAGM=3p`fVC3peAep>uYq_IkB(&`}Dt^ z&XsT+!z0mr%Z{_OmF!S?%EFfW%YVki9XKY8nU~#(kwUfvy6!Q~AH(1RiwSD6Y~Fe0 z*jv~g=J^^mk?-J(9!_f=Wnk6nBdFUqidnlQk{D$w_&BFmF@oKX@zcYY57Hs{tu{~G`grft$$^Z z?F~~bQL6H;BxuyYYf_A8Q(^AH5;1LX6s}r32?Ym?QL0n@I_TGr!bFW?)!h-4jfv}7 z6v=ZJc%}LvJfU{(fYV^AFB*e7k?KeNy9+ACSokV&uZ=L&6Yh_80Co+OkR9P|(YUA~ zw?+D+Jy1>d0T#m>?$GoAaz$i*lU|QDcF<9({I$XDz;ggkmHT_=xxb9T9G&?%x^`qC z3UkS5Epj?~F=A!#OGA<zJsBn+`7dr zG3g_Hb}v=LE}!zgEeLNkeK~Jxjrlu)pV$D{o6ZQKesJp$Wi9K?h&BoXO?!4QUUVd+-*`4GU1inaS_lj|-WP3zsPflxa7{fCo zra*6ZOT3OHQfP^}teE3r=Szj1ftn%%mTag`g!TD*#D;wyM&qXmI#zn9e}bB4go^E;i2TV^#7%QMd-f_$X0PQt^+^2`=|rggw zmywelbEm2UG3zRDrY0|kP&<6s?&Qpv{ghTsM$NjKF@*L@BQG|1&k|qSMQD6Gu+{{hvy(fZhk$DC?p|yi`*-1z0{TafRvDTpIY-g=@ zz!N*zh8-Ucz_9tdIo=bC#@k{&UW2=RM)rxXLVRs_R}U9pwSnE>(C$zXJHnhONn_gG z&hlUGON?6V*s>dA_xN5^xs&K<+wC}Z%cSW|WwBQ-=Agc;b3Y<_2i91hX3e=v7=kEZ zy)9bpdcz7U+=)jj%t=KpRN$S5&cA7xDPlV|%nC0=Y8c|{*lMlo>bK2bdjLBkH47V? z+&|GY8ca?&n;EjKkXadc2IF|h!k{0r#!(7J0GY6CfnO>Fz)Sg(7};@->7^{i?IP?X z@#lz=3;^)b+x7* zF6;_YFNoO+>3HLsaAvA6Eute~0jyioQ99vTx?pN7K*uLoaXnV=&P?^Mi#V3BZwS`GUfK$4-p##iKfP^DSTj{8rZRq6Nmu~u zwR7)FTFwYYlC9bIGBu+C_Pw0|M_8_WxMix%M9Q1$5E4M?`Lgdzmq2Vu{v81QR>;5m z*Mu}v?P?&6QHYQL((6sf(Nr@XMZd_oXn>9f)`SvMB^oGU7$PKq(#FsccYd}QheUID z!^-J+a7{=v)m|d)o4pbiKzp4jiKEg@l)StPg<3>6K*>XELW-%90ZI}QKxsoL3B+1a z@?|9ADkyn)O-M7<G$sE05+4hURY$cVm&7N|4~a1GCNeAgI5HplVS zX9tA6CdXdaJ#8c!r*o%@u(V=k=Zbj~oAuT-3?XQN1-naZymKo@8^1PZz-Z7~B0kJ9 zB)5FS#Hs49A&mTnriYL_tTiM{CiPH+33Cqsi1DJvuYTrU+GKB7Zb@*yhDKzHSrsDWf~*hXRCZ@Urfm7@@sS^9VC9 zCv@*Y9b`8f`{|;p)WAn=*JF!%c@Q@XIlqE2J<(e>Ns`mykCS+9@ zFFyhdv?4DhlH6Rk#1J3|Um`-Shk)a2!j-8$9Ke;3b;R|~R=5U7yEQI5mKzUGtf?`W z>e)bJ5OQ)=t+6bx8-t6dry}Yez!2OHl_NbqZEG754w zSAh9RjTg@`?j8K{*io3j7Ocul;&5>qha(fCnDI&UN3K0|eiqjEVylTc?cFdw!44`6 zWAB$T5+|GwHz2cP&TcQ_UYoedy|K4(sLRMzaj0u#bZBH`GM&2Rk*t((zg{%y;@w34 zkF4Qahr=+QHeo}2#)26I4UV(-pSXKaY7T=JZXTbn4KE(#SJdNAu8HtW^(l|=oe+eC ztRvlro`dd5XV>fn!Y@nsuaUZ~hrd^?iSSJIGXcUAvMR!3{u2;xfgOG0xd@MQYI=_Q zLXTgj$1g$i`_@ErruyXo%?Vi*&6gkKpNr<17e@INjPjRhL$d_o?_U$)nd(ac!V}V( z@HgO%x7_iJ2m&kFfIX5iym*A?myY}r+ga4QCQ6G(`d1%5Z~&+8?h(lj+d@&(v=r=) z9N%9om2hBYu!i<*uG`T34ZTHx!msvGxS!Ez5(ttnuajXae)U-IN0Y*Yw5IS{7Tm_@ih3{R z=^!(g;H`!B3$$KtO*XYA(lgc98>Gk7oRD=S{n$!L@2X#3pnY}ewb>E^`!#E#JyZQl zfcAu}iuTLT)K*LTB_?kFvHSgs*5wbbi2<0Zv&|d#(fJ8kM+O*QW#?aNo&Ev`X!ZLt z3-pK9!~smTCBOlMwB~?S&)M&5Nqzl6L*C9LUoeFt3{!U~*u2vPi>L0%{je0^7bSgZ zWP+PW;Cd=vw?l2+vU7U9jfeP;T>Fii*Q2&PJwt&&5QUoX7CBFnLeH>X@;Jz;>fuA*q=g9uaI z6kriTR>dOAuP4^XB1%3QBL5a$%C?^ zfT_shLyB@cE{#yl{Hd~Yqm_|JP3X&`981xCxIXXs3a~ifHLlJq zH-}rJ#0Fj;aZKNIR|-J=t_A%S$0xn>GA(k=6K_)c0h5Ls!<%pt|6Y8}L-iBx^DTaS{!o#Zs6fQ-Vi&k(IOD%$v`ruqK{cFQ9VYCpwAJ@f?KJY7Y zs@pRM#Eo03N7^bX7Z3E#|7NSUI)1`iU+FeFrmqq{Zt@!78qHPo1{!aWb*)N*rYNc8 zNP1`lpDdpgzI3fuuzq*!_=6@XzEy6`ELSTQZi_fKj8Bpe*Km{JaLzO*^O?ffu&nv2 z!<7Oq7si(+hAmMBI6qnE3Q+m(%`{pHf4_lm0pJU*ikFoaD~n83eYAGT>^D|ybbxm9 z{#THlCB5J=_;AoaLO?H_plK_nNsS#8cB_`At&iaaw@|7altLZ6*{2^j!B~WUgNXm7 zC5X?-vb6NK1NALh0Qd;+&1Rho8=R4EMTCDHX!E=Y&rBX`L3qNy*@SnGq3IApj zemrni_S=E*KaEzsd^Md>IBS*hk0b5#9_pFNM_Uk{@NY8V@zv;WHyKaDOJ~2#^Qvnl z{2P$=c@v(Q{CEq(6aEb%{8BS}d?8#DUQf*bUC-?GNG~0p$5SuUgmA5NeR&=+)<7pyRyw=72ydAzXdp+}SXf>5@TtI_eQaO?!Is|X&i#;0bYciW;U?cS`}wS;&!SE6 zRcxv9>etjv{p+NGXdBv{H8r8FDG<;Ft(XGN-FMydZ}YdjPZz{J%HVxfcRz0(5045z zr+z26-KV>QXnIzq&p3znT7BCymF;hP(ew}+X!|Yzg!{+f@wi_!8oaJfwL{M%-wVda z=lmD+b@}u>@%gmB*DtekO@N2C5Ylyu1Hesr- zcx@tw6rtlGG|(pAi10l5vE}JqtmvOx{k3Riyef7%zow>Ps=x6!ji|o}ZQV4t;f=>G zEAFZ*8!Y`zwA5(HI|?>puzWr(e*tY`Ww6d!J!dlMKYGpYj+BJ9ZvKtnOy8Y0S}_Z4 zwO=vYY6Z6{&YTB2t8W^nYHN3eh^v?-bcIbrMJyE73g6qMIu>f!ctB;{?HkH^S|7Pj zjZFqSZLn!BUxV6M{HnXY1)f8h)@iM(eZB+9R^L8M)z{QMga+ED2LKu9d556abG;)R z*l>v~8t(BCd2iDtINX2PKivBS?YG>f3ojMHj*qraa_*zACQck81ed`(8#x$CxG|$) zdiK=VnUSSTv(S$Dwr8F}i|YFZqGW-_QF>8P*YS4sRAs7N9#yeX)6b0>LcbYQT|9sK z=5D2ZD^PV|O;ly7>zb%aXn?An07zfuQfhHu#g)$y-%UT!MHfI%`unCk74uG4$rLPC znzL|@%J==82GfI68~cvWqIl~&;b0#{Hde$!k<+7rH-5-y2{xh6cA>Tm!LLcW=J6nuCHOReaL7gOCGz=Mzg9_zGRX`+hE zVzlJ*Gum?*CUCpg<-3*R$yCRQr>F`dj;esr0IuH180a}`}XyQFWHv-rB zR4WPHwD`g#)n_Rr17^zw*lL>R%$k~qsmfmSxC<~sSJ*tOTY$B0p2Z8W7R|#`YpoYy zt7)Usn%an|&NsCYp@BB)0pKn?)tlff7W@?Zt5}46A;3nBZx++;&Ant;e`a-svvsXu zrc#r(Z&(fCP&_~L%lmSUl;&`U&hR3OI2@vvnPa{(lf!ke@}WvxnG>{m!#!pCdx_AP zHm)jj6bnzF9kgCl)>+!v%TKPAud`;5)#~LAQ@!3JoV(5o2Y_q-Sx;!Jeh?#U$*@hzf%r_L=`$%Vuw(vN(G0$wP@d zz{R+-Ade4;DjsY*Tdd4g6MJgXG0YO)7Vv=`m~=W?1tsrfd9|lj#%DciR6Lx+HV!)! zK81E6f$ykmMFhG?xl+?#hOJf-XK<;XzIO^Kkgt4oG-u?`@T*f*d<1p|pQF{w8KW#8 zVa3H%utG(~7C$Ol(q-$skwpogt97VH<2OvKrR5_&Q@EF+k@VMXu7}UwT18HByHskd zR?f<0dhcPIvt>ga7LS8^t<62N*1HDIGz<%Xh|hM{ziSY<0I~Ii)8G018vxIsSgmA{ z)lCJ#)%SL$`mo1Lm}PFJktd-Wi(W&kyh8M5*MuHZJ==sHp{qh~MSDJe>NU`l8QcnS zd~{7XGS#P=a3nN|s(C5idPo$t z;2$$V2PZ1R#TJ6A=VGS%oWBV~Hz;|9t*tssnJ^BpG&;CzL|1>MFJH!#bWpqJ&gxqy z_P0955o?ewOUG^5Ap+>QfMEjYfNH$#NgVg(b^_?QfFuEQ5IaRb9rtCL06H#U+4k8; zGB#n^s2aXn%4*Vat10GUbX-8AYAsrDjLGRh)S+8YamOEVjvieU3R}O)C>=VAtTP$; zuz>Zu@Zo`t`j3w!uIm5B)ubus^VW+wE`1)2VRseg%J~{T*;^IUdb_>G?>yiGll&Pf zm*q>2_EtAIB++r}mQk|f4Zn2_GPJvD#hIBQ9`(04b+9;7!dz~NWkBRh zYJaRQI&nJPbU2i22|F5(l17pzST;^UI|Ha$KrxrfnS!kpepc*i}m#ba$D zxS!6i(D@>r-=gzXI-BWG5houll&|?&lZ4$!=jC*6rgIaWTj<n~`{;;4)dTdsn$8rR z*U)*8&O>w_rjw=f2pxk?j!vFVmChWU$LPF{&g%58^iZCBX}3V z7iwK!(BJQaH)88F5#$v2y=O>=>EoxL`H%VHW6n{@U%LVmsE*U_#BI}Mvo3*WDY)dO zYwmY->Y$4nMOXukPd{?h_45Dv^U!|eA!WFDdQcAorgz_JT-|bYNqFbOnXTZIBG5jk zeOCV4*VfbCf3W|ap1z*VJ+V-@=Z5~*hDLfi@yTpHu_b=pckcV8;=6GEcH@aV`7zw( z^Skl*-v2O8tH&S5?`Ii|-?QlH^@ZK|ZoUk=ex~Px`t2DyOq)q~jt{>MCcl znR0iwyWErQQLtpGx7?TQQ}I-(zdVo~DDTPcDGz1`%X_nX%R||r^1kf8@^E&zJdzzL zk7h^9`?LE|{$ zVr(;pR_{gZBjUcz2;v_W<6=UUZ^kBXkUzDl-=IdbZV(gGZ;oVd6R(TI;(pB3!Lyn; zA|AM=i3hCM((N0OtO>}2;vqmD3LqZ>>0k2a)NkWf9F_VQQh!IRh*gz(1gXC(u81|2dK9U@ zC$5URNIA7OKKZ=N)s=accI_ z$;k!av@$nQsFo)eRONAL*;XDMPtFw=a*k>RjW|`aoOghZQ!JOwV#TrKe7<1W6Xr$q ziyq7?YACk3m|wNhbCy*>sX}Q5Bw#LD(lYb5S(6qTELH$21CNp&VqTPHRTdYDm3+yZ zm_4603yW4^xmHC-HfKYKA}2FnDqX$Lv{!1is&s6#g6Tz@5rl-)~!!r+~ zzb7iyDaAlPu+Rpy)p`nZpjT?Aby9Bgy$s`*US#yqe_IbXTYJQ>!X znVOlJNN-V)7}LG3r=`F?5vPg;XBO#1Ee}e|m&Ocl-?^ubpFKT$`eN?f?8(#TFP?jP zHuu!&a)=_5 zT7g64$H3Et=LtOaK?E)(t~8{cn20-?ql<({iqxBFSAX3AkIu{%{|ltFEj-%RI}8#C z7VzNnqes2uT(JZWm6!Dtrl1VYSXr$n7J>G}d;wDtS&=17;YvAI;GitqOZ$^&7weXn zBIY?2O%^c7Tn+Qpt=MI)>eHncEnE4D7qK1TrRR&%_UGM8tM=3+jYZ@Tu#x*3!I&Y3 z8E1ev4jE0x{P-xoy!i5!yj-xqt;su)eE0E5k$3Vlll7&mlO#=h^7P4*mq2ballF?V za%Ia|tO|RQ>s+mzJw7{0u0HAWWI*kT<*d$3)UJ9_Qp`Bov@-}a!_eb;6n{xw-i>tq zRD-G6U^>|H6aFERuN7@|rpy%`pNQ;9oDt(sjX!f_YHfP_+>;kBJ#!M9cxIwftE<6o z{XSN7s|Sq+Ywd4Z>f#L4=Lof zK27o>KN*W{x51@C_HqN+770~e2~N?;+EwtI!s2+5a>jVbxJp(Y9C=byuT)CayqH{t zkT5Rox$!^{3wc}a$W$&ry*NHKHB}Jv_dh&Qa~2oo)#ooB1^W#r&1X63@F;J>IkhRa172TJ zExT&6r8?=;uRa{M8TJhsVrF{TYs>N54RS`pI!!#b&AL&o5U1;Fg_s3d`@_B(_~n9) zVhL|F%=D?7hg2XWdIxWumjnHJU19!_DKFEK5N#=^C56#?-C;?jT8jdM*32UmpTe?7 zrbKR`;`O%ytkvq=^&opI34$z88sdi%LLARYJoZrpuD)(Knrk?^i@4DDYx}ef-PK2t z8q_w7^@tl0hN}%~%bI+^jkx-Q8ex!_BC}g;cHxiyke?qt3V}3I&aZh9h&5wTPha&C zq6p2UD!o*>SjnyCODmQat5{cZt4bd6gS*g%mn`R(EiA?AXNg!Efd*;fpq`;baUQ7x zL5&aPI)3aK1eTW7VOukV4qIC0Wo=%kt?XJV8>M^)t4fSgAHL%+WBAzwFz@#I&PxkI z>WNS*%qPKo0BqG6?^&H0Gg}nLxl*;T46W0|=S-DaAJi}CS=Vdght3J;4@Kvy`4q%y zC`dMxUh|Y?)u;>>1yw01|5vJV**>hBLbngsD2{QFC@kCm7J;K}XiNHrrfY7z*5}67<8G=p z>=^4QH(k5Oi8xUR-^Qk~o_3Aelp?yAYq$}Rn7-cab_s1cCFk5OR|hoe#@x7wxTq_l zn@Ly8#Vc_E0X`RZA+CcwwOj(Zy4x*c8xf?EfOji+3h-{g<7g%BCPZR0j{J0(-=*@6 zpkBt!h~#oqe&6YKGaGs(?({76Zm_35p@C3Rvs)%c2i+%4$$o)08vEi{+>84&=_Li` zjTQ*6#~0bdbR6q}<$(B(Sar zddVEv7no5_?qNyJy5t^EJHt!!ShJN*h24c7ro9+D#TQdEehObq9rja)WgqH$My28< z8?@@Bgwjl)e~tBcQQIobdvWaB!lLaZs#F6J$@u#rCpqdS;_?$j=LNQz!l;8WW(j43fr*gjbgppEVk>m~Xrx3i3$G!uB7D$4C= z=8@|~xd2|gZpo^hlNLd7UwFX5TST&xqxG<;ziiM4o!>+eTXA8%e*1OkwU=$<)?93b zB&PxBfiGjr7}!D}{3UQcY@9=kxdz~b87hWNvcy!B<&nd53{SizoltFo+q;o{!ElrE zCG;b64Dt*L4CWYc&{8m1L;xj>!}j#GY;r9Z5PQ;Igk7hw;&jeN0`?rJqT+2-u?G-n zdO8xtYNfHBefUf3_2KKLC8#vkH7lP%6@Sl=Bh9MlEI#rAl4D735t=V@IWL`~X1P*g zJd?{^2Cofr61kkH7C>8^O1Z*d4T0AkP+h?)l^T>s^25qrW+6p|KY`dL9(6b+6VZsK z_v}kXB;lN+@N?^jQ@VL9)t{XOv&O`8utRjfX79jjkz zvigwGC&{DR&|HkPJn7y&o zrTnwA4gR4n}> zKna+IN-&X+?U66CB}I%rk9o#=^4aZJe9LxxiB-1SljP@;CK9zfy(qdoJl5m0rX_-x z8I%|_hZ1saS!PCaP!%St3~C4(8=;*i|8wNn_aN|jGWl|oD}xV%D>r#^k3Oo`_q0&u zt#V{3(Xq4{D4ygXwmmCWJN+GGH~8;J3mP}zyrg?RCs~bCKspBZ#IXQ=JIq+Xhsk*x z{1sd|;IxpFgP7vK0rzdd6c1Lg1_#!bk`#R?djqcfM^9=u>rP%QuOWaOq0XUI0A*2FDM-*)SK}egpx{!!DGcB|}NHSR}9!9FmY=qhJvcH!kmU zaAm)tRnl%!v6!f0F_2QxVaz|CFhcIMGx+`tzQ+>&e*Y}uYHGYFr8Fg8sEzAIDWHv| zI`?eHc5P=rjcIHyGiUuT@z`pvB0c(mUfI)-Q~#tSo&61-#&#R(=)6Hy@fLczI6FHQ?<69QM4iw(kq)Qxf3Y_X zH%Y>YNy0t=vgvLjj_Zg{s^K{wQqAi~bWja6o@H3s+(-qqgToPy9?MaC!X-Q^UvN_@ z|3r{J;Y2D?(iAkLG!*U>jx7+hqPKV@jhuu!s-V;O)QCeiu5y#0#%q3lK#fsoJv{JI zB1!VE3p2P)F~b4z&JGu^P)hGUb$8=}ay2iDd0wpI?iQC~_^(udx>9i<3aeY+9Bxp9oe^w| z*|D_0GhRVupK0_nHjltdg%?3yly@lp5l0eK#5mS9N?eN=SOSz2SmdMB{_qSzHb%LM=NhBMEEUSxEr+QO2Z@A=4B?!3z{@;o;xB&!m0ROe`?X zG)*#TuM3tDP#Ihv7OZ-=pP&N-FOU2_0&~Qf*#hkk{OrX;n*{fw{ksUR+k=VaxEvKa z_Moxe>qH=SW?&tNY)01m+@!qUiB=3Jw%(89eZYwadn~zDaZ)ZEW=?4@&aLlpQ}QK< zm3u}sv=Ut(boYoDc6Ke;*;KNUOKGRe-2)3m+8uC>c~~}l`-9u-_Pc{3{$|4Mb9=rL z`GURyqH>M(0XMqdE420g%jci-be|4z_mCY#o zNdv?odP%69tI8x1v{@8uN@Cr9D-V&7O;pryBKE;Zkb!-X#sVszJ@`v2NK~&6w$S1B z8=Hrv+Snw+P!tTsHeLQ4;P17K?ooGC}OB5x)y@ zFVoueWTQ__m)}6{)?)}Rw(5TcZX|xl?f9tG{{-q$ErzYr5C@jy_JE`RjP5A&i~Iut zr5C*CApyaDP!lN{)J1w!n>W@Il|Db-gn(OUuvT`BXyA-#k-^Cx7v0A+SV9#PoFy)L zkdFFkguSY4Z&0=`C<~}6+aHug8oeY%quc=C5&OTQT`Viw+;dE;+@X5c#bDU;-i}g3 z$27x_Myfg;hQB%OOb$Kfmjhn{NXw3Jn;DA4D|-OyERH~>25^8(jD zsi*R5=av799AG0~L;%IBsoN`3Z`TemZ`eRyQf(ucSexpX64kxIz^_IIt{rN>9p-+R zK=VqEXclA9FDJq_HG)Y4}abBeTo^uH5sa1N_+^;$%AlOO=DCa1lK&haQubyF-r9bI3EMjFAj>^@jaka!aWeX?(mX6!@V?9fAHLO zj+7ntHgyl=mskFYz71^)mmS&`HK#3d!!0rf3}J0V$(W zNKyqZCqE`09w~|ye+R9%7~LRcLLQortLZ*RXcq!#TdlfOrGx5y`ZQgEjeNZIU_lo5D5ZC6K z=3t&hm)nQ?5sIvlr5=9sP*~rGw!i80LVoI7A9nhe29$oXXYISrptF}cio0(EtwM%+ z^95sl25hc#f4TtDax zxr6R)?m@stAzwj$b9>wa?l3rC*ZS@5ZIHp|qA8al6p3iWXuorSFi3PXpGQOw zs1vc=ZAcw-`$gYosaEz_MXBF2; zao>i$#dB6sP}}KC3Zdb<n%34%0L~w;pr=X1u%zOp6m$6 z?wyUD5eh%soxMJ9O|{6Oue0s1FlZNDBoN#?sk@*g z9CHJALuIxbyS>FoN^c?gPY`&SdAI}8W35<$uymeqav%9!20X*PI1H~=Mab8fh$*KK zF9`=erNcskT`A*otyU^J%7Cv7O}-_DY?ar0MAS=JSQ_}h;+85>FV>XgB!8Zre3`{E zT(|%g+t*BAW@axlqx21gq``~3z|CaLR6?^C_iu5%WC1?BIGJq8p?OJl*#`G)uWJGK zyZMsRPQ7m2z0nIZP+K!siUJ;+@Uq6JIQw6q{zH_9nQ|L*H zry+yFSJ1%Gg3G%>94A9CMe^?uAZgu1$m<}=tGwLn!*>$UA=DTJWXP!ZUq_+gjTvb; zhUX-nK$>+ZlEJfqCJ}CVWui0SN$V3>M1pVocdEDuYFn+q#>H9d+%$g(*ykIYM$_wi zLzG%ucsC){!8%#shUJIf+|mj^o{#XofPjV@yGZlOQ3oP)Qwqt(x9oboP3L!5QQ#%Z zJ7x$%9msjTsxtBdr}|ezM2-4zh-gQlkiVcXPc|P#&JFk+41vC}!jS(85YK?i>kY0w z7c5*9hi<)J3cH38GVHp&Oxxrn8a*Fk&%BUPwpbcXaH5d^(vN`rH>wsgG7DxR|1k5s3|7Dm(%o|2>mH)@gYUhStA6Bew1g~=hO8y7g8n{~yN6}fbjR;AI zYLV4A8rKE9guVW{AT$Ii_Vq7_71_uR8l=}3=!hZrd4p(jwWv<=uLaT0vfCp{ftO3o z176)tG!PA!{)Z@`)_(`gl)d(A%{f!3^o4WQP;H~~x7pa=XYe}={sDq9owCutS26hy z`8C*S5?1#L^Q^GJfEs9vJA^VAb8E_fhQNy~@yfoqfV0+;|B2u~VeqF6)L7OS+u>T` zd&qyDV;MyxX+$FiSP}252k%Ko;C~QFM$)llT(TVI0Z6qD?BVxw=?!eJt z!>~pi$oG-zmHOIkO_EIYBuzMU%=SZ zXW^5B-#Pmy^=ohklO`bYLkPUYe7=&yIp#&BRa$|4I7;749H7;T?^O?9DW4zr!EfHi zISs|9fOm;lIsT+_y!Ya~yqOW|<_|&>Bq$t_(qShy%SGpkdE4t<_T8p(K>@ETd@X@m z)RT0ennhhN6~4US#qb7@o%0flc{}eovT-tSLFJz@*zOGQknt^22X~H!BWe(okwHib z8B(UD9$m6jcn(1Bi7BU4`5yq5{}Dk*Dm)ERcuwN6-v<8xr_)Mswj&{2)7GPIB;Xw} z9B9f?0sC}YPsYcm&P!>y$vdN_sN;dKm z%{+&eO$}NANNOYEbT9RQ4E~ysrC!y)j{g1LQ>+Dy_K#?g>YHVn?Wz(qD5*Z#LTj(- zX0Na5Yu|DD(2LS5s71V{zxa#LBNBL$>nY^Fg8UTZdFUNMzJt6pp00K1EbuDhjAURb ziSE-7gEIhQqUQ}*FW!o;!yRC0Fr1Mdl<0H25%#}@84YmEEbVP8F|bXEp|%oxwkff% zt;Aq+l)zxPEpKmI-bh>CP+Q)j%%{?Wp2h>8kQlgn`6-{JC>a)5YcKbB{kaA%72z`R1O`(xcQQ zIG#o;qKMrM;Rn)w6~+~H7^L~GT@C_!4Q9b?hF7l*`KnU}h&VH~-{r=RqZ7@)@EL8B z$eJ1vimHC=cgkwj$tkIMlFZA0Z4>WPv8Nd^>D!>{eagX4@utaze7T&j_qIUhEGIAD zL8tWrY7clxr>$-)jJz(pXPch9m_ni+n_PkFk&R5vhjU}8 zmfn4Q2IjVb6|RT%x78))_5QZ1KB%px0wwa3^ah;V`ak%L?T23?7sZQGAeH2Rz8d#A z#>m{Es4e0hMLI6luIBvPvj5J^KWAnF!ldHa6z_Z$?!Phi4ukJApg`sI_%F8=7V+Yn zv?|I(-{RnAvEWM#<`@VDIRs;U@~;>V*)?1yYw~{s7F5Bf>*Azi{dNCWb*_Zrx zETk`GV{SC_Z%KD#wL6Qm4*blZ$5$xG7aoJ0^=vuK^pGU_cql%d|9MV=E_uD{1&tbRGk?H2 zq(zNFF=aPO@M&ER03J?E1lbgv)x?;HP)M4GXeM+{40#l+k?m8VOuGw5KR@-SU$}bu z5#BafH5cJx7j&TyDe8x!@xU9g&21(t;$6e=SHRX5|!-7-FFK{9Yok4Sp=P3;bB@f41RA z$d35E$0SY@wGh9peN1vDw?mR;cY!2}{Z$*1gzSjqdraaC>Nk;;yaC+~KNi~sek}Gk zZTJzgBYy8OiHAe{9!G3TQ5FyL?T}>IT_DL~5o`*7yO0tIxfLYuM}2=1iKN_z8BjF2 z9g-}&3nW=A)rKS?ZAgav=pN)?PeZ|TbzffE054LiUKG06H)4g$ky z8ANJUK7f{PI0zgXiVqVgGvF#!#K60Z)V6ELQ0?Xdffj?n8;=k)fcz7H9n_F1%nHvp) zRd+iGmi- zm{Yik!mn^xLcL^mp1(VC^#99`beQ1*XLT+K! zUTw~rdIeQY+eQC(N`5Spb`0a`JpOKUMzicLCXK~Tv`rczw=ij6ZJ)HY)=4uTGx2ur z8lE4^^nL6mrjKQJF?}rdMBDTcviOf4&>{A?PegX zds72p1uDPo15r?jC+dUTrdnwtu^S>fj&d!7XcZIM*8#K7vNwe^D=ZV~hQJ9TBJ9@Z z{>1fY@0y5M_ojxSI^!_fh=|a(frLk`iJaZcJ?{+sxerC08*X5hy(y$wp-!Y*=AN)N zl;76|K6O3XyPkX2y{Tca!e_3VdqQu0Ag9BD{2pUU+4>q{yV=03+cuCc0(M}itnjnf z4TR8JAIKTB(z1zfY9OrJHjr)tb{GgNeCfJ@5PItaIg3`_??70$Z6Guu?lcfqc;mW( zsAhEK^oQ1lh|oQa0IiKN&fv$Hc`uP*0E9lmID-a4;{-53CjCKG0Pam51JGW4@xuNu znbh%`+Xwo8R=q)unzmLH08_ zz+evpB?jHCis(q5N6d>qrQShr@h@k0~13i$y+ zzMREomEo3#OG(3^r1ec%wuL zd4d7ix+F`KWG7N_1CnuzrTtDO??T`ukNG}|k8nEdQQ#QuUfM`6;7~4uBzmd}uO(zGA3>+TF4D1=$pNz%zp}Pj2 l9PEwj!*{0BX((=)8$EI4ZTBPrZ#>LT1WzN*?^F~1e*vy!!Nvdp literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/rexnet.cpython-36.pyc b/timm/models/__pycache__/rexnet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2212e550482004b7c5777d94f633794f2b7831ef GIT binary patch literal 8779 zcmcIpNsJuVd9JOytE+o@dNy)~WTcWrQ*4Q|a44>(#hsEOb7G4W>2&0jYWh|8)J*ll zSJktnItLFKNl8YA9YBVcIEj-(f*83Z0g`KwLjpKC<@8Pl0z?_W2OV-Sd`Z6lRdvrI zn-L_aX6mp1?f?7VzrH>*G-S=b{`6w;xT5^0vgauPzlt~fKvfi`R28N&El{hf{A$a3 zRR^XAMl(~*sMKl%*=DYqlXxaDoB3+KX;m$hvn&@Bn#F2S;$|?^ELBU*;p%X+TrD?8 zsw2(O>S%MUIwsrm!FY3`Iw5f@IMh5`Jq-LDE3%hE?D!+4`m8s>F0vEs`A6!OT73?sOY9`8$kK6?zQ|r+FUryvP`b=c zv1wU4fzp@Q44aju=TUl@&9T$6bP}a2Y@VHwr3y+{*-PxKEWNGn*EiHdu?7@vi)W!u-kR}?WF0FebaCHEk9iHTMcyYa4+;)ksJB#mK)gD+ffvF zEw8p>w_Em&c5AgAbknv4uglQ|^RKn}%G>s|wG>62@Y3w8%h&zYnKp0Cx{KlL{M_96 znYp=(7w16-b32j8>^g5Z?KbxtKCL2YawEGxzTrnp-Nl(&yE$76+N-YX&vI|Qg^k>3 zcQ!C;DY8%2D)#)`{M>X(vAuBZ{;fAaBJa%DcV4?^2Y$_Kg`Q>ISaMqpFSP4zZbyEz zdCIPPZq((T?X5@LtUx39hV z<$KPZTh|uE_{Xa6-n+Sj1643~e6i~X%xSin7dUHRl~b!XDp^sw5x8OK*L{z_>bXpm zYaILZ9PH2u+zpS5JRRWm*0owB%$vO3`HCAYiBi*D@f_x_`k}uVc*4AU>rR_>0}o4; zxYzK*$m7XssG0KWb-(6AL~tAq1SfLVJYqvAlo}C84}Hl<+`tDW+S43{nuXXWm=0QgjGSOPy%q?MmGxb`< z5_*>hqR{F#of_@l3x(ykI^D<#{Y_8gJMDImpk@slb~@Plkl(_NI_*FTxiFfZ+Y)*h zF=5qx9!3tWAuKtcRycd>#*O=skonmV;^#CWO>GuJwi^(^SFSD0l6cO_gQZ%M_?gazFzA4%Fk@%} zD4MAn_~Xw2-}HZp&hWKVE@wL%5h?w&-*g+^^kCw^bHS-u)?RA`ZI{ii&dr>jPS#78 zT|Mobo|~IKz4#Jitp3tWCt6B3_j#^(W^VfI#W{BV+y#$T{5+RBJ2$<^PP^w^_rmAA z6wKfBh1%TW`T9JQd^ClX@S0e>i=WwppWVeVTojW2Tn^YRV8m_#Lk0zmfvQ@qHg6`H zUn9BcKiL!s=YV($+_qBHSm_<5F7@(J9`1XXv=wg6kgip!X5LW@rJ9AlmPPr?e#>+D zu50)(ned}4DFSVvHAE&{gI^ID?gia*3pFcUmn;JbGHHDH69BRZv9hHtYg;fKF@lE` zt$i$3V{HZXjt)$5j93HA#0Jw=bY6|JQ4aOSxU!|jS*CAmJu5O%W5qdUZ0qQoZ&|V0 zvly(8lSi8sTPzdnfCxIav#7P=3^P{@{-;qPwlHH6eKN_6TN=x6(~3i_tejyeD#a$& zQk-Eqmj;WLW!AQVnsS^69AOIhQ&^CjPYu{dS8xzHXLUieLf(^3TJT>?Q&YaXGh0wy6z_Q9HxZCY`oWF>YFrCB{JEGVPJttgh^QhL1 zM8R(jMv7t^X0bmHFVeV-^mv?3Mwn#w9B;iNauQ=}!n%9w0sPAXRGg zt?fE3egjq2EUIct*uBU*iKTZOX=J&LW_}*vBHoZnidIrb)vTI@p&VC7H3Qfo)l{F- zvZ|$xs+&^>1$d^&Sl^ob#gI0n4`D(XYIkh=NQ$7tEtbwCd(3n`VP=SM&w+7QoW#NR7ElYzMBHbsDEtGm-A9#{7HhqHL+M%T*CQ=9 znSKO4Zz^A(@4@~q>yZ&<$dXD6oMDA+ZA*ne$irsKzRViOEa#P8{^5@wD6H70iD;Pu z4dY5|q_jcs_s~|9-U6N>&c?+pV~d1yD6!K!q=(WL?@$Cc2w|z3D#q}JlK>59{nyJq zY&}-rl_Y*kduZHELOmQMaUKD^HZv!3Uf?wmAw%XZ<{^-An%yAUHCMl|E(eIJk=}G( zCO;u)N}tYu8$)5&**bTCf!eH06T)3exr>G+yeWxNpLb}EJ^qbT9FdkW zjfq&g+k}}%Q0~FHd+;)pfWc>UI$n!O&%?inF~Xp|34PP=REC7L=CeMl-lkSKFQ3vS zGW;T6r%aJh{wf;zYt$o`xX!R@4SY%{GLq$0a}hBv^{=LWFLdN;s$b3b-A=Mo7;ajR z*9h1EQ0>7%FqBhsQdZ0GE63F1@G9fF0sM$Msh!gJLmD_R6tx_R^iV25dU*qHcmg^Z zLyLMj$iKIA9 zvVN7yujZA%Mwuq z+aM>D`fcXZv^|_b)RdDc9|;EcSXbuoBAeLbT~W;QTQt#a0oPZSb)RWpqPLQzcjjS#Qi7KK`ub0>`%WhbPP$a;w%axJhV zSrdv47Yc>hzjcwRlb`DMVjZ4@SUD)Cq)vt{64)RS?kl-(NG4xLRrmscf@q4;Py>a#WbfUDo~6i6N)^1fjdDjU5~F^WP*;1^5szo{z3b(MrYS zirX$pgOm`qQR)mSL!yvU4<`6Is(4I*V(&EiC&A^1s9d^+yBJ({wOhA1 zZ}VN7FZF_R8Ys;s$p()JPz)`KB*uL`7h0=@9K?Irg)@aC;J-`#=oExOH$a>Yr!tnr zhGv@Tgb4wKM$le_!=MWwF@yv}-U-P}iw87KlYmsjw+VZdzsg^7VZ8Id7Kxq#WpK(*SbTcE{2K+P74)?5(l7%M$BAMcBt35qZ z5dh(g>pDUj1%Zdo^dBG&{*LxOowKoU525X79oM}PJX|<1Cy?0!^Xe{Qa|*ddq3>tu zI^E#p>3EI+5s6EQ-L7H95V`cn0b#+d<7Wiii7}B~K?>Rk`44H>V8=;NS#r|*_~bJ0 zAJHYGNP%QXgX_eFh06aN<=X@8CqX7Xl$$0F(WYaij$~eLP1`Ib$8Fy~ZJ(Z*Tj$TB z6Bta>z;yo^KKw@n_R}J}{S)dF#nNSY->SrJxhh%n{;J3ybss9!XYBsf&#d$3XsH!| z#=rf9p8p|-^8e97gZ>3?pB5^AR$9)k^W&H%p#=z@pMMfsXwbjo?bAZ#&qm8Uoyr$5 z&0boRtDk@`H0VF}&_d-;LCXvKCB$Bv9sJC767jNqq0;}kE957LPI7`fCcr14kp>97 zeST4SAB`DQNI4=?yjPBx0Tt~_Qf6}>m1owUNagdGY9L_zFHnN0lhjGlKLOP=!B6q_ zsiyL0r~2%ZsV1A%z>DYSS3V8ZG{Jx2?Nd$V&q_5(=M#Bd!6GTu%GFOpHBIodJycT} zM?_ayxNQ3xJ_#5E>HN2-E=%pkK_5g-bPe}Q|6de2$6@UnuGoG9{e&qm5ocO0{!J?8 z>)lo@LNpk1GOPI64_DRl)<)#%XSW&pt#0?HJb~XB1B5bR;sRGww@wZ`NLh}UjBr77OuNdODI!x1G@vV6Cbv2(GRSfXxL9LJm3mM^*3ieo#MAm~ zqn>N1@?MiEfv^JjB5;!*TC{v!iqOjd;a!X;8US@UA zre5BQo;_?YdiJ7cAA0t)*V%q{0IM9p*gBqy+zwjF+5RgbFWB<-fQ#Q&Uv8kM_w)J zaL;i=XN|Yl+M&lBKX8tHwv2%@&JE9PPDOspbFOuokw3NO@vt2@^B3)RJ1sYGuHyyg zQV0mKIp>}qtvIf8(+?UT!;LE!oF%vAarb0`8pz=`r;pja4ZHlle^+eNt%jx9H)r0M zo|!p4|AuW}a0wNxb;ApZiarsxbjiX-X@tVssRjyq+zZ_3K)_wEV#cjwYy*yd%_)S(3NLGk~m0G=l{&B$$UZs{WkjK$E#=QpC z^0uj`a?2#v?(eB~kRk*!F|9PE-l~ z4KL2EwcAZuwQAt4%33XoNBsb}ueF=v7-OU5xk0Rl5sU4*&%>xvYX^5@dtl2tr>*dO zuPADKlTxFQK+E!%0Tf%6iXZq%>{uHwYQ~5xpkn z(l_?*SStZ}uBG^Ew1ikEU5vkuz6PtuvtMmH4&C>u?rH=`;%SeqVh%lfz}0i zH`NE~9rIJAs|pg!z3F0aV!_yiuy>l?lNP>03!5ku)mHOrafFT}fJv)VV!J{r7%!rp zuT<`I+@>56=MZOv0gCliKZ*;L_Fa#YEGPO#?1~fVV)GWTq;)H9sC7-Kf;BxS};1BW!Y61aN=xO5?+ zvtQ|Ex*9zvm3<1+4k{0f2qeM{?gP2BuHH4e83vJfm_;v$wn?J~`AHAeP4f1$%YtUx zMEJ{4&)GGXF}Sy}#r&QVQ}x~O7O0wq_tSK19!!=N1Tj4Co-v5cCHOUSEFQVwMzxjY zHgBO}-0-e`Tr^|T4=S~lFwVB&J5fcw9mCV&K4jo5*5#yV`;@oK%^=GwhOioj3QM6q9*fz;u-SEj7J(#A{BoN^swSl!Sx7v zE;QS<+k!eZQb%;rlWZKxfA}qm9kOo<+0T&dcTHyAHp5q=%$I6p;xu%ocg-$r>O&gO zqEDw&+|<#sk`~U8>7pk`J#?lijQtqQNMtgc@g(?wf6X!h%)vh55IJXF2u5U{zfjQuWZvE_#q;km|R=flwB6ysguyv@ZC#2GrJS^1cg zk9ql6kdIUHu~@XEMY@DZ%T{>XUz04+@)=e5Wm=XTdQQs<6nhdgtO?r~7A}UOk|s3O zygrHAg!m&fE#p;jc&ToL7z^7wq-#jH=rDzF{Us_9geFoqwXo1t8?dU8u&&I&F*=d{ zCFvgndPhG1*9o>)S;axFnq6&^Y%q>-HH+Ko>Y{C+p5qZpI~rNdOJQu#fdl?Q2BMp3 zKzj|3V|Hk`1|1=wl2%X0Wu&OeqkR1M5S)a55JqlL^J1e3_bk?%Ucj$nXPlG`|Bwo@ zB>W>PNS{cO-=G?)NKVpIGzXjqjm^dyo5MXH>yY0aVk*?#1H6ATb^@oYdykQT7iu`y0PPu33?F_P*B6!dri7brC@@y16SvJOY;b{qPm>h)(p2Jk`|KKEgJ6xD^N@)bX zI5W^VyaGAT2hl%QN z8waqOkr0H_MM~IoB1+s;T?)~ob*Jiqf4me)k7M^pMn&93AQ3bK9*rj8n-vPrF+c1~ zf6vF=yN%;1vR4G4Lx4n5BIdO7Vc-!N1msEQc+0(i91pyg)IgV{A?Dep%z{v?{|;^t zSF8gdHQ{73NYeE#K0>ks z@cK6qP6>mN+oY$^EeO;yTM8ho0fHK6O>O^KAlg01)2_h=T%R z*Cvv%Y5Nc>Q2WlepGr?{e0kde6{O8kg|;6GBT0mJ;|}+uXaN5&3J+B5K*0{M_dn6@ z7!W=+&Ikz*>Il)JhNO)xdMZf-sY95gvZC1WH}+Z5h)ANXz5Q1-J82kG*?6TVx~Zv{ zJ`2fmo8jB2^xlZAM8v|!P!;|%MZ5ylJdDNwni4zdt-u8R3}EG)@M{RkovM`d)nDUI zaElER}*CpE;)^){#=lTgIDh+5sC0(prIWLP>_j=W#I-VS7P3*ys253c`u zC#gW!ALireiF2t)N-uy^oAd(6(?W2LyqJi?IHM8rR6!KpZdF3>4nIeI=c(|hs8c~p z@GOdWWI!PKF6tSjKvHA`5nhJ;7J*)%0fW*3@vhsh--1L|0{D@TCFEUGk#*o)OuQ@lxi0V(V+5pm-{Dpxzg{zkaUNW30Bz{LKO8nlKRBS zv3fbqlAMwo)})wqcnmor%U1xG5kAA3+>^ z<$)eimLIVkA}Ag1IXT!W%WSIODE!~xs_V==tKe(aESqEVY=NC>Py{u87N&NU-nH2T zzP{mI)W_H)zPrhKf$e4c#GG*4k-srk5k=!0oXQSRujs>=yf&eZoFvpRpgaGW$tmbZB>@1E;nN zr&dgMcLKY6t-m|=Is1bBlvR2dv4pD0*{7$8lTEQ~AjDFYpb%x)>s`vAoZJWZ3Sl0d z?bKN%f&)f*l)4^SU2;E&bAE<|;q*g|l^!9-_h;JIq>tQX{!=ijaA}fHNOP1*5zaT# z9Xal?j&S1n6v4enMTH777~AJi+}i#JNVKzQAhSE@&fWNk}nP*=|~d1(5I?6Cq80{YHvaDWzm$Ufek49%U*FyI-F*| zN7gn0Ajyufmm1#&h3upppVBq$>C4CPd&2UuDIVho3A{=Ld8m*1|CQv| z8)5Pjf`0~3NKUna9DM=dl4WRW@6Xl@^yJkEM$oUrysW*ff!|`2f#w3{vf=cDW5Hb!{c?M(%?4to>sQnzsJee*?hYr%^DCT?yR0!;|0aU2{9H=~*E{iie z=yL4YkRh(koL%vn z^kX_sN+&j(?m9KH)Uf0)y!bjN@0B7+SNtO22wMElsW?x?%QR?SlRsS$Uq&etCA6GS zVnU>(EA7w#afWbAg-<%5u2Lb3t+U`jWSrh69UD^C33>q906%>|%7^q*UNdz&Pnv^t c!jsCpF|lW2pKVXPF!7f4a|JpjZw}x82QcYnh5!Hn literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/senet.cpython-36.pyc b/timm/models/__pycache__/senet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6047981e052799bbf61e2622590f3f977e9bfed GIT binary patch literal 15147 zcmcIrU2q)7ao(T%$KjVC3I3TY>W4gs_y<9WvLyZi5+e~-MA}v^b`Er9-Ltp1cRYcR ziaKy-XL{$ad%AnNdxnd{!aZcvB3C>=QkI%sD28#mK&n_ox^XXUSSQk)iNo@n{wZ)@VLIQK*o=bZTFi9Iua5|A&6^MITWAX9)?;sronaHde>6v`LG zizvUy<y<%>ZNX92k+(txA`$T{(r__FvF;PxeFTwE8I#q<+> zPtTu6>4umQvs|)Jx+&(wJeOWT>6Ta!i(I;Z(rvLMu5jr^lvc#DxXPuMP+AqQir2XG zGD>fYYvOe-y@Jv^;w$0}E?pGHJuP>~zGi4z=HAK&j=Sb~R<+)$Id!Mu*2c`f(a>};j#ZEP23O3nJLsy*ASJFV)@v>tg) zS=yK_)|$oHy6t+7jMP$b z*u|P--CUcsOJ4N>e{nBcWykimrK1|+L$h(&a=a4eXM4fW&Q8_!tPMvxGnoU@6l*!% zANr~kfQZ}Gl9x05<9F`fczb1S<$mGz%Ib};y?4KGcjd;KKlv2+-u>GLP%Ij{m@IBr zYofpuD`18jg;Kc!_&6=D=M+jc+jXnusv`j$kxm8EaHOj5$9XAztwf#C4{WWG?s^xG z`z0i8(oz}d6=)$(^K_9EDUtpp)7F1Xl8{}i{+Fp~2Y9rDf1@Ocq=E;(|9Q(#6{|I{ zzAbmNHT#hx=fLfa2lM_AX5|&sr(DK2w`I*AYHZilbRE~vR2!{rui#d99Y5V_Hft)I zD#>Q6&?BrF3}?rQI7OrBDqt)J(0~TAnG(*%GUg zr^TM-eg4YQbm?kw={0-q%1q1K$mx=1zq>I&-sQt;Wy7o6joCmBr>k|l;sm+|Gp{+e z3&ce8VWZZx#q5K*nfd954z+S;U2rPN_k@&exG zW~{Akvgs+zeVC(-%R!9sQwaViT9>OZmOC(CxD(A@U?7}Uo$ zHnA9OMSix?Y$)|!^&SQL!T-J$WT0=;(C}`x;d*wXP z*0Grv8ckWZYXv`{xXT~9X=4{&0~B@BQ1aA;SMX!Ss_kNn)9b9RRYH2!&lXhE0(JZ* zQG+$Fp4Cs_8Pnx5U?n`^kG2B(xi2AcG+6Cg-uPH6ll5*qN##vpiWscn*vDE-%f~^8 zxSzVWa)K*yhPnmvJiv#k~4fF`K-ebg)@ zgF2|u=CQd>3efZ(7PFAyMToCS8V0xzZ8@^xsIBro%1LaZHM`{GhWt3}8%N5ksOYB( zOo~E5PNOJi2xgW`^M33D=e@7VTu@%{llQ8Xdb2A0q^mMH(~r4Mt?Z{@2sJR2>!)G9 zY_rLO0n-Jwfun2MGbPbqzK-M_JOSB9Kz4d|$2%08>CC4@<_GaqgsMfJNl*bU;VsO*j)(XhRb=$YVecPzv@ zsF+L8f;8;9Qz-bE0$FC%c>#P|_-Na%sV_`LNo4eg10t84TCGE9qBeGZxS;{jX`xr~ zxJ1U3kxlZ_mrw|QM2+is+$kg-E*Cnu+Ylykw}>%kXKpqj#l;dfVAyrDnjTI&IpmvZZtvXExDi*TuFel!za>L+CWuWjR`#^ZF_o4{IMgNBMLTh%N!=9N$|V8I7q zL491L{Gy*BgUPkY3-r@4AJh`&V$3n}Z6M{R+zogaCFn^L%Ce7XcwK)J)m#!7FpRlIf!8*gLQ@SEpJK%#Z}> z;SX9F`nj(l>E#dbMw0a*yn-j`e4O=Q0(49H*?TM9DehiS{1P$<^9ws2uv--;9MCwd z!PZJi8V^5ur}3D0+KnI6v%=-NL5pthnZCM1C}+tTVpTotji?swn8@sdxeBB01k$l~9O*bK>Y+8I zX-HeILhZbYw_KtmPYHP^SW1HW3D=WVY@^AR4PU-d@e>u<+-|vC9okF2MwN9*Qj?pI z*YJ|GoJviEYF(}oY=IKeNAi72evJ|`2;@CV-k^ka9qA1DHA?PN@*7BUNu~8typuHG zm?f~I;4QyJRcLsw$1OSvsLN%qn$9G94m>~6i3CMB z|Ch}9K?tI&AuV#zE!(>H(YE94I@1VSOt0*eszK;TX^t7|4h14=wdO+?fuwTT;m$&} zs=`bZ@z{pDX00n`c|*O{EyOj}S#_;*RHV^Fgem|ntaDG7ybZ^K=L`?pLIef92&zKR zNEoI+uGK6DA(fnxT*bD$Ks}O6BsPiGe~skRBo}gsy?mdfgN>fH5tUiUQcOVcL%QSp zBu9ayMx_*W5|C+_WRPZ(|Bi$L%Jl|GD83yop-MQ-h;Wh|DB+AL=r;+#{2vN$RGL3U zJZ)-40@1a)o|kw^+B;++oYqmq@{ds4J&is5z-UPI+-`L3%p{HYRqDu}CBVO|?@2yHgF6L_FNS61;t7BDIFNpA>{5 z8ElFv7K$v_VJP=DjPi&`W0RsSa?~4BEg%#_ylsv1wl%@q)-m3;vTFy|ff#GzRhYru zZL2F8YTHs!A@iRr(wuqw&ngYK% z+sGN3AnzqKOHys&u6806Ao3_90(5o&4|XNURGS-f(%>ELXktX9`wN;+&)!DR!D94f zeCe_0lr2X-Z5oyx*>vz~BR1V|s0V7z($)Z5E!6nmL)V9Ed@wla^T9eFx}2@E!8$)9 z643O-wnJXFZGdG{%(BDGvLnp0qs+4DH5Kbs$~>yJnPo|B!#Zb1?&a2g-94t5a{~>I zthROCHGW=(WG>>N=HzJ33Z}pl|nWf>}j>o&GYcI*sHn^Un(AYRqS(p znpc%ndmdY(Y;<-_QZ_ot{ERwIk^(348`G;W-6YoS zxJ;jJSFWQkwbInTJj9|Tp7=XFZUPCS_{PYvBv_Bkr2wEG2jD`PJITHibTH)26jQ;e zZH&%rN%F)gI<>`FXF`mN2|$wKn3%*nC60>|c&Ax5Gw`=gOWJS#^N)W@zkhm5aXrmo zm8Deb$R5xMjc?iyDy3l*={FiY_@KEBqhDs*-Z^6Ab8P^wbSj4IJ=)h?~`HsGeP@vI9z|a{du&H4QPKnY=1szkAwID?JuCc zIiUTCu>FOg{fjuNkBlz@IOclNn*uFP;)%7h;^lqwo8r$gJbC*M8 zxocL}vQyzG<(E*G^qaO;ahThQHe5rlTR-eIT5&pX*WiUAv`GyD-4WV}hctsd*=g5l z7&Se^mV6HCT9;~c9t@WX=a?=~S=+7`;f(e6?3X<=tH zdmPC6P{mqb>R2@2_MQdk?BV2@r(+@zn6-*EP!Pp65p5G&iZ8{ zCFm?2$LeEn7s7|z5Vn9=^~}t+a5h!JMUSgHSNkP`F`YYnX=vb{4nJ*eR_&U5Sj`Bw zy;Zvr=R)Mrr?Xb?@;MNY*mZby!rF8_CL-ZoeM@`+acATfF01zG~i=%j7%c0LKWR0wo5P%$pM+{L9>Me zoYQItV_nE%l@6;8tS)4Ujy?H;5`~b~a_5xCNM^b6BRJkLP7TtuixjXsIgzji2j|IU z^fUJyY*!7hYS$zN(PWbw-L-e#hf2d~cbtZq^J4ROUp;3w5WxO^<08&&cDIIl!NmT2(* zJJbZH0|nX*Ty|^ybf}GGNLEP{kRKz-k0=fZPRU$(1;D{UmjA#PmqH5K40*Wp7c-_1 zVxfbCPC6M&aP)G*7&9i2i|Gdb5}X=1GPC+1pAD8pKU0X1?vAh>+4ZVvf$5jUL-|{H z+@B*6+T%2wBz?#&%c{uQ59oVW!n(r6 zy?0rG9sS=yWnZuMd-dN3xB#J3UcC{+LdWn&2j~-~y03BCpsNEgDrO=~-*fGSyk1@p za?Obnz!)dGIx{$4HvsM%*dM7lxHA}Dmi&n3KvJmbyOV0P-U=o1o#{t!Ql~uNOUMxM z3w+gMlMb1&&_WNqBf8=d;|P^O)A=pb#)%(`21%p*ZAwT;)J)ZtgRbcG&v^{4LeH!{ znl&Mep=i0IT`Eer_}g7om*uOq|r?cAj}+Wm@7=m)hGnG>wIB+0E(MZH=Gdjiw{ zZTfyDOenJIj|E5Cp^|z;2&r2Xof{PN?jY7*y$7*yQI^qlFJh3+n7qXDK$1dFOc?tn zUnWtJEQOA_RzYL_;cAoK@>6`58)g*tC|)R5fcmPRB;`&=`+tbqYWWU9G*N5f_P6^z zLQmaBBD5w6H5{PRnyCss2!$&}5wBi^g1auk3K;SaXc9j`atO8ga=;I|9K(biR-*@m z&81|Odg6<-CFCMuCAmpowrC6$$nHj*z}gHS>OZx8otlt{%O6ql`;>%JRhJbGiMWxP z{UsiE5=jc@w+Mtj()7%D%FM)5=~QwxBv(f$-=*rrS}G{r*2^YBMG(tVS`{}xeq#KP zNM@}0L&7QB+cl^F+@3Zi$qY4SM6MRdU9^W{XLU?!wrf90i$0lhkDwcr&@2G>6Uq}7euje>1-kj%hY>gaamX*w z&1ZFn9RUlPy$&omY}kVZLX9C7sUU2h$3zZz2}p(y!{-HjKNKJNGwPvN%}8qXvpz%! z_JtrK|CDC%EJQA#QU?*68uTyBKt!n3&-)M|*cXDx!AY9evk-X^l|DNn)atML5Fyxe zN8}+oJ)m9snTSxO&yEPS`kOvP2-bth5CAOcv@?a09+eTDLgoBFEm9GQ&qQPemx-tw zNeIb5r=&y)$-YAE0dn`Dzz3;;t}Vvi$?sC5^GG`Q@>QZEaUiw&Tf&!h8+5`Lb}{a& z2jl0V+uAhCLv-8A5d?}<_ecm(t6%gj7{QJ({pf-vj-=F-E_fCquY4{M;!P^*9GLxg+MC7DV&ef6+^m-uR!6sYMBm2ij3;mgS`yVNk>=zr7A zG#c#I_{TcMneujn>)AE@|DiB1>6VP2rvJz@L;oeG`=5mTiV8d6_|N>cP5L6*_iiP-nS&_ zcS*WYl3rm^lf?3w literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/sknet.cpython-36.pyc b/timm/models/__pycache__/sknet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd38016f4058d3a1bb5cb4f74fc8fb618e2f2310 GIT binary patch literal 7739 zcmd^EOLrT`mF{l58z97&Y+1JBww&08Z4w|wDT?Euu{r>5-o$1knDdd@I@XB_!d5Pu&*H%!A2M%NIgutKwI;=d4D{X(~3QeH7E z_DkJTzuYY&T@vN6(yw-_9Iu46zTLI^^=`f2=%OJ!uZC0oX1B@lT6n5I-JM4K8=@{6 z_ss5$oDox^dEdnUsr-N6Kzv%vaNaECiMa=*?(7GKm=~w-8RE1oY|iZ$y7S0+Mm&q0 zXGb}w#jD~uaTe`9Bb(wi@w|B9p1E&!pGE3*@uK(|r_LbN5-$mdQ_mswhIm<=6Zf3{juYRB9MoHrNrJL#UdS^Yc*^TLJ{BTzwzcE={!n@T&Z2F9==ob1M-s22 zZ>q85w4JN*Urlhv-q(e{<- zvY%viA4{G{*GrNJO*EBU4>abOw}PB)K>gx#j{Lgz?Wi9ZB2wBO;u$D5b=eO*vFjQey zkA{8M4{4)yW(Uz=n7BIFm04{N$6+3=`YIl{1I%SAh%oX&9P-v>#lG~Ste_K-*}XvN z#HIE#o7=+sPOSfz4fR>!Fe^DPAmen-(>WKDfNvPJ({F%^RfKdMlF2;$SB$(mp+d z26Y3%uq?A|7V)i`%0YT}>x46)?u-wmeQ#YZjM`Sf3V?xb3utZ_d!oJF zSzKDUBdHd3t)xcpOP5^bEv!iCFJJ1cEDn;5mZ@l_f5AS_uTYv@8eJPF%FgB=I?`7>9r&t*?p6=phxBJ4=rfN{Mj%zz{{cY5sU zjkGeD4~)G^QbOj`M6;pgbntq3EX5tVn0q?*pAb3IE` z(;9#RWqxUKiCSW(Q|a73T6ODboj~P2)oi3i$SJgGxXrW%c?#n--05^0az+>oGp%A~ z3j%|+F!r~yn&&5O=WOZSm4wp&Cvcg*%QO4hFGGwBvgU1mcB3 z0{rheOX@W&-)`qfc@ayUI8mI8D>?p#7lBpQnTo+F0w>DK${@Bt$gC=Y9Gg@NO)>W1 zX?=_7oY71z;K6OcZUtZ$dcJJUW+jYBD)lO!WL4J z%dnSKc}x+YxQwykKd*C%ir0^ZQQtRV zd6KT0tF5x4f?1KaGb`^(6>C>rpe$=G%E~!X0}$UvRz7^99G*iqL&bAN05>M?TEtvT z4I1X|t0#{2kt#kHM-768$)GopNXsmHH+GIGUCqGw)_qy@7-250i)2 zqXOF##4J`(P@jP?z@GL)U)tZ@7fxK8QT0}x&R691CC-=ak_j{nDc*41pANk+f1*~< z{%G?)lb?K^)vtJozp)C0Sr)8bigrWMV6v%^3i>h($7)FXh>g8Lg;*u&WG^Bx{a4K* zRI*{7JyR|4nSV-Vv6xi=*UTh^39}TCgYSr0V4*Ryz(QkYsR{cSv(&Ta9$uk1NkSP( zf9rrOp`2qF3MU+M8u=5B`EMkij$@KyQrawoK|p`cgQ)h+6ifnTnFTu#yUa1WRKPCt zV3!{-yHue_#2f`t49sJ^F*jx%a98O;;Rx^4nRm(~-kE=-b|aYr>&!FjG{HJp9A=$U z%sSJ|Iy1~Vv&=ek%sTUYl54B2A_Ha)=>%Ph;flyY(XR4}L5nB&nw8g8JRE4Q0jyVT zlxm$;9V+}e3Bo6}43QOJ(!!7h3TLHW7{@BtK0@>y+Zp8ACdSD#K|Dc zhy3sHSdU=$&BqV!DX}*}Yfr-73A*Npd+`Ls{SG7U7`ID|!e1h<^au*WOk;E(p>QQv zCq`kO{^OxoeL^d9lLT+*;jR+o{R4tN3vm(#V|NLKUw-^jO`uqP7fq;7NqmpQ1a%b| z)#?@rJ|`2fRktZ`GNiH6F~rib`Z0oTLJ-6jncrs#Wa-(BV{AxvG&QSVfXKltHG8n$ z6Y%1527t#RgBAQ`t!8y0d)_d_?UF9rihxknA+pAi2O8J#%C@Hh59iq)D48ga+#Kpf zDn1!=PVdw1=u$qua{L^HG@W?^Cxg~dd5BM}4#H)^0Uk0P%EZ8B90(~&_X~SgV)W=z zUe3Sk#l%Q0QGr*BF6qV8q^ax`5=+>rL3TS&tg-uhI0<044t+%2%Mbcy^W zGK9~=!RK1g>q!OAkJD2zxkjR1uueXtQ3GSwRqk_%m2>VZc#_V+Zxd+lhnJGfUPljL zplkN%uz!F}aCpa%mHT=0#BD~m%s3q8ejPmBl5zm=&!G>8tl>p&pF#V8mi%BE;x@Lg zssyf%sGQk|iC{a>Fu3d7KWs6$CJqP*pmv0sH*gs#<@b;>-$CHN;G|0qgxo@S8Rgay z@cFm*pgM&3O!>UM;FX_H25Sg4nc2M3c>~8)Mp0A$jG%1@qtb-^GjBFc&VQnwOr4-7 zFNg+;&BE5q+d{vQEHe?8Yq0TK)CKxgf*$bGv|El>*9<`$g zAhL4)E`W_ze&L|LTlI$yC{*P{bvQ;D)t}XK;jun2^Zq&Z_U^K9qNuIXw>!hcm+=C6y$u#G!^WB&C&Bv|fDav7%-rk-PM^S8nxmRnK%! zKSsA|$YH0)uy?p7y$Kgkjp{b0|Kl(_%WGj;pct@}Lp-gCZl&aHlSbTqs4_G2r*P}Q{mqV;^n5PuDq zciGT1N6TxD?ie*augkZQH}GxN%(|7gbUw3ciFz`hl(_z_yK47wvO)^=VF{T8>JnLzUQLvL(V(MbJ%(0wuKUpBKFvA12NlK z?n!?aF|FpZ$eTsk1Qy-ho)6UTw7W#Jd4b7Q%j@{6l zV{WE8zhmZ)A>|oo0VxYn$}>ng?kpl@F-loL%93*eDJPOM%^OoGb=opu@ z`Fv5+HO;xi%UOSMYLZFsI-X_TAxid$=5%VzIz&n?gELE3TM4d!M1 zOB+7MQ>@J!!GX8lIrGMajifU<tIlEDXZ=l8-eNhPLM5Elm{9&8&`v@v`{Hy zVb7at3SCtPI>Ef5EXETE5TC_YkX*l3RIA>vYw8GYfB5c_Q}m0cmRi;8OC?OMw{+p$ zxv!i$aq85Px2fDh-SyX+j<>{VXg1zBv%KV2>R7{E2i}s01zf;d>1@Nrjq8EMxqlLM zdnCDL7Cf?1LrGZ>r*|i&krH5#(FKHf+?IMQ7>i$;A_k zC&GqT@Teej@En#Tx9Z7Lg=fF;T;bHIb-#IXeR0EI6N=pVX{>}*_D0iw@G8heN~k#o zhY_@DKf*4(fZp`=XkM4*TaAa-Xbls3PeMFIx({jZu2Ht!hc-M9T%p!K&(a*Hx8*#v zX8cOy`fjaM9$G7VtyvDJ^4^!RSFM2+st$6QuKfsm5l z!WZK8AnKAvaeneMi5QijWmFi^%uOKL~XZ*0koI9Df6-0c_2h_NO8s}>Yvvo+W zL)wnMZE>zJdxp$kXErfYn8!prQ8fqV(ZW22`AsK{c|;z}W2W^VhsCE;OK?>n)GYda&b{@Vd*q!P_r49fRubL~J3|A_Szt(*|e7Ou;djktWJqx0$=NNpR z!SkH}sM*A_R4*{$3k+Ula2mmUQoY7_|D36pnD%1~i0FD&!B{kU$@le~#_AE{F|Ij&>Zp(Rv~6EEAITPxeePG0-ymE-SRIx}wr zV{X=5Od$vkAP8K1)wMU8o-Am4rBd`3h5T>%wzs*l(Nxg6R+{1Lc##HIY&dqK**M<0 z4=J)3Zu5SI;Z*9KNnYLrq*pS_#NT#lt=MP)?}qJh!9^G2U4a$>jj^Njown__%Fu~7 z-37boICjwnc7{&20vbvIziEH8>8k4s_PUGUYO%KIF52fqg$*4?$#}h^`9{HkZekxT z19$s{wqZbp#6}lF8^_7y_U$L_@Uj2EX%~@ z{VT9sU!$nlfvo_dLMs?xO!`iaQI|7zxGB4O)I=)A4+-Go~!!_DYw==6KgEW>IYiAv6$Jice zk3jX*zF~C2GV19~<1?nsYTLPXt~!o=hkost)rlQR>6DW?%(9u#P(JDpd;S6l@sifg z_2sjA@}Zx*=8NjX=viYt{uiS z0K_yYHr>gw!+DsICrNoS>mNi-S@MwEVNG++sFS_TQimK3++=Kd6|;)ZAHEtg%~+xa zGaa^2=&%K#5y#yUQ42_7aow-}xv9VY#(!@8!mCicY*d4E6MMZ=>2LM0-op^Yn~=Ui zx`2vcN+_r|Q6iMPnRvG%7tBX)U^E&*=G~$yLPwtE%DzsWKQHxMr8@L*?JB2jj6FWM#8lMr+B7l?L{AV1gk8 zsc1_EnRw#{mRKY*@upMUBthyD3{UmqR*<3v3X%D-z#;?gj#SZ15~MqfMiD!x4J*F)wFg9Qtz*HIT{AyWvgBcQSfE+QB8MFc;G z%cGFfjI0IO-kKedJd2%~q_xbd z7rpfonFK$hxX3`x;qpGjs%q=Nre+T^#l{6rF|nkv9IIxVconQ;CsCCR2~-1txRW3b zttPNRb(BtRr?)fh6mnuSkjb>u)fBBuJH~{zoke;YR|ZQyvja3$Wy%N{iG0g;yF10n z?wDj7QM(pcK@O~7q@6|DC|jOk-G-A~&wN`~b8Vxlu}-j$ELt$zIpFIY>cv>v)=mP? zN5P8pp0dd22vd_Bmh==1i>#}`K1OlG#=}&!ote-uHqc_n2x%Mrx3eCJOA+q-a5b+7XBjO7(n+*>_+XYW4FpuoTuQA3AIX|jC zWL&^s1+l<5TL|=J^(s@P6Z%Jiz8>gT1B;v?n2z^i%rVK8@oa_y)2Mdn`Q;QnrbdTpZ+ zLd+0@0$jh}L7aGIuT>n!ZP>)^fVN_Vcj&!ALbsBR6dMAUA0Ar151`zOw0kjeEU$+F z?K81X#1~u$nIUO(-#*Un$v+3kx5y`)(B#OMU@99VkFY8;!+xWK;Ix z|1)q3T@^QlE%|ydItsjT;o_HLE>EA%-jywj*Qe6ztFNn-pu&sn3$sbBcPO$&nT`mcw4fe?}$A(R0~E=W~`iFIAQOZi_&c2f%p zK?F$)4Xo?UvgFemmH~ys^L;21JNnmkNL)xkl+60@EBTbagu^B{~(dXtX6Zb9<@5OMQs#`f6u zxIbN;agyz^_PCQQL)LFk__NaM#E7>jRE-}HPS5c|aO9+ozr2+b|T#Hz?3hQslyg8^>@ zey|Q~3C2Ut)J@35TPm=)O+$MikwsL;S6_>eJ0Xj}iOz#h<{wjUu~`v*)JB4dLG~U3 z41$Ir+Z|7ki5X~M)+>!*?2I^7!SF9A)r7vn&3RG;pyIw!J_9=K)H|#z{TA3O%!k*5 zOw3!tLZ{SO<{Dux6Rn6mR`2pQ;n80$*2NIZG?+TValxGle{C&Dh($qN!GrnP5Wt9* zbpr|Ap{Q^0_GcLg*}u-1(EiUe_A>~AY?r+RV;pTncu0V*O*5Z{vR7aS^69mLbkEDD zuSK!IEH(TL(ox|lV0-9oV zYuNL!sY8RI4g!nncG5S%(Zp7}4ig?%ep`n=LLDH)=6V&n0CTjHu)zwia}02z2Gn!f z*gO0IT%v41gGt;rx6=)nPiXLfX#-jec37sJfmR}SOo0I)2^}DVIP?RABTn+R0c|3S zT)Yo2wwMJ+?#kNw^H}8CJ43RBO{6o zO%h$}ZBOd_gpwsKmfUgX{W%6=)0M?7DArQVS}oi!0{dkXa_uS3uad=no3X!uV1A?r z@W)nru>c49MS=Z6+FOGuw6y8xGtzBryRCd?tFx~2Ie{D@i}MCRzhLP}EGh38TGOKC z%f&i@rT(a4nK^yhoG_>4e$KG;Npnn}Gg^m-F5M39&#G%2!4`w-3|b6?$ykI7K_-^* zA)h0pS48UF{&lvZ0Qqe^`dwUJ7D0N#LPxUt^hw3^60Z1TMKSL@BE4Xq@V%@HbO)>l zbWc*32Xs%lX(tJzHUYhbV`w3uhxr@E?Gdh?AcylSrL}ilg_Am~t?%1%Ye)CCjAST; zA7tADsQmpdT&Md%FnT0Xx^R|YH}YED+eqEzj=Lc_qN-x;{=rx5A0l1JgG^i=g~4!q z<%7z=90m-kFs&klK~ao@$u@jKOc8vP5}BM>4*enAbgyH&-@x~Lx<~rSZW2+!rK5fv zL6A|dvnhv^0*m8?>-+;$ev5k8U?L_U6w3!N*73!VIXPH!FPo!sHPH^rj3 zyi*Urd%=wj$Cw5M){W+C7=09Xz=3~RgCW&KnEWpA5Bap%X}X-5t1$(0v59Zv8!$UZ z-;!E}%?NzGlPA4vo^+h2Q@)e8TTdp=(|OkU?&isIR%zD)f6uO3uuU1llW8X#0goq6 z4Xr@QNYcPc)R8%21$s2r4`B15llB;Wv~l7+*mG==X{EP9?eS=!A8U=z+ePKV+vD5t zcQqE5L;fr-wl0585AtMT$HIxtQqfm`2@|V+1;HMyR{at)(Z&)OTPJXx#B~bSvt4!- zt}^nmFLIR~Mntm&4d@HTgmDBeSpH1`9l&N{w5IyjU59b%K~jk?IzGyV%D{=D1vrI> zKza>}hxU5?m$mO&AhEgJ+Be8W?4$w0q@EY9<(VnMkZs>tp` zfx;$f&GdD$nI>_7P@LdSc*AhEWpGWRCup_?d&}VJ@f$ z$S0%edQ1B{AMN#65DK*1xJ#)08am=|7->Yf*T-P9YEAYH2AFiio6qeksMN;kyRVyd`H-mTDr*5izQNLH(hO&$skfzg2N zD%PYT+sgkpMTz%!Hw#j%E=J_C+ji(97tQa3K4a2@Sf)NDpov}FcNvrDG~m%vuZTnA z@Z`t#F7%kbfWDrd?`Eutd93K$^y<5C^8>LlD(OAuus(mnCQhq%&n|}sNi3u*YdB&H zV;gLk$bqv4ozu; zy=CZVUeBmnJ9o>vrQb@zvPB1Qwvlwe$_qGW4K8FY$5MjQg!<5aTpq0ktH}1jo7=jt zp}2J`b&D`W16b164fTYhH#SO|H4&~-HLD3N8pXKbBa9J({xJIzYr`7>8?St`BV?0_*3(^P9plf% zm%#EcTkwl5<2clLiKl<`EMbJ0Qje(Sd7O;`T57>h0OJC*5Ziu~mKgk*AEmDloM2sJ zy`dbyPbf}~7-8J+H562pO{Af&(CS=>O72YUM3#gEvdyBP=u zaF_aZj3UHcdzc5lhfM2iRUp?s3(I46CY@DGFX4(mlTpn3CL%*17oZm2(s~?f4L}v% z1jD;v0>LJ5H{t9nPYMj1boSGL;HIiPLp0?axB=Ff&qnvt&WzlTIJ3?{7#MQS6V9A- z2ya)7@|`fGjNzTI$M8KakZl69@MJJPU{ERkN@9q^Fjkp&vMPtIG0u&d+!vGL|a-0+6HYqM)50_OXRpSHWDVV>t9 zz99$g1e}JaJ-g@pIz2dTKO1*|C^LGS=WY{B=K=kgofmSTPSW?vGc!Gc`{aoeC*nHA zX~_dMof^Yo^!YO347Tljh#=%bqvMMAkI(|;84WeT{Ae; z62sSi;;x9S=sBj_UCMy6gvhdwyWaYq6(Nu|?r*r?+3Vp=@k!ToS;YIAYLB}64!8Eh zp}6|b0K}!&;d^&tZ=zu)e1Y&o!~~4qNg{TK-nBckO#3MWLy-IGcYcYVfAH$-itxC^ zfS*IS7$=nXz5oQ>h4FCW0LJ@t;}FnW;>b?4#}QA`sVeU5&|!_ETC=DoGP}ZcgQw!0 z7B>*xtts`B?Xd<=rkc0u5U0~RR~_A+l5aRv3aGc5RKMy^RQLIl)%~zLP4U>5v_0k1 z89hZ)I!|*(3cwWdPUC!QQciWwz(AS4p`q+7jD@3c{G#7GCe?}U1FPEh^wzKWbEpLl zT!6Fhzu^vjHN?0K^}MQ8Xj2`A?Ag1oPkko&h)iMDoPVr6 z*FM;uB{I2<_v^oHe3ZSS$>F_c{9`+MIyoW{``(tmZvc*zFUVD^Vg7X*!v6WZ{Wcn zbiiX$Yb^|5qCM3k@#5itTt7_h@h{(}<`S4yoR&=^C^TwoEATQPl)V}4UQN3UjW8oe zV^?sNF3iif(BX^tXB^u$~S_dIU%zX zpL>vFw$4d|&^=C&8;pdLCzBPvc`ONd^EktE#e%-_QEf~RB~a0c-Q&EUEJpmB=c&W@ zn&s56%Cm_y6Df*%4G6YP+RzY;gfigN6>zCQVSib<4 zji7#qwbBV0s&ymbs}FK+Ts_I;e}e!=%{P?#_lV_Fm2i61zht`J2=w}Jx72TP$&=z* zGK&KR9aVQ$Pnre}LQlzI=%hZ);|9i+Pz0$z_e^Z@7HD3 z4%dN$u~irE>hpI1$7CPGu2c1UNbPR9ju}EY(#UG9xq{zfkgYi|yAv=7^bIJ- z0nxOgVg0p4;pjQ;x;U6Uw#cAZ*CO+ixFJ0wdkWk{Vqep8%-<7sr}@}Z8CK%sXM^JGRI!DrFbd`A6ie1%K--!aBL8hZbJpRqavx&;;9 z$L3*=4;iP8Mpk|p&@zQzY6#Cs$6R@W^bpH}*l1{{;U5nG-A$dr!hO^sg=ET!82hOc zjhZ_W>(5aCjXfjl+@q^vNjk*daX-@$#>3i!OR!@`vO<_ZYN>zBK@gLnJ+8 z8R^I@2y@^o6V`;4HnUcGL^0p%@n*{4E`{rDT;2@?a6)S9X*I>)0cqn^Dy$xZ{(8Rf z1#diDl<=^_PcINN&6lol505?HYbBV&`yY%0cq5)d9Prn{sYSKo!+^w-pnlTF$*ZWX$%wlFa+@BmuNO?-JV6-Fa3(ZFDdh4aWTXA+1X}ERB zTf9_B^TUu|wZXx9zP-qEiao#2qC`x}exKTan^_g{MvCKd}o4zjQ}T9Kfzb%*7$A4{u2XsS{-HZKM(+l%0`yphfIxjvo!L*necxY{9oqP zr5r#lEWssTXaecLYQhQ23H=M<56KKngA{#{6c-+2SwJ9?_M@OG?1Njl9-OLFzSaL? z&?U$K5%!?!JPUpbsDlcQo!w4a zeztW<*gZ(gSk#8&-(zt;^C=0+at}mM7U0kP^=@>gJv4$o6SB`sy#$SfRF`-5F-zIj z|A++WF2yO{_OGG6)+5>c-s^E#g}P$72O=K}d@w*hrrj6$?nnAWyL2jdBY^9}}F_r+)O9t%+sUi)`>(yK zqG+t4ML7e-_9BMrDlux{H%U2g+&#tme!$=?gEI_hqzRJYiK_60x@{(Zm%;ZK&;lWt zKUB#G;swGgA|?S3iXu#r87s0C#SNlliFK6%iHXrE{StIAn7>~kIlONnuu^GJZ*wq$ z@Ha8SH_d49#d~LF*36F5C`^TyX5?;+cVS8v&k&a|jOP+#BU zWHpXnGelJ^-800JZ?}()GL7R%Sr$i-awJVTfs~`-7*dX0ljM4Ix5SP_rh zGa4twlj4*(jb2arRZ$mbMD3nAG8?Dy^^|y2Jf^?S2?b#T zw)SjoJBkM3i)(A1yc67B?aQq-Z!=t5KY#AI)pO_0pI`)geZ=Y+SkNUcPem;@j76x;L&|+)&F8 zB)@g@a@|zRo4Y|rxJkqB1<|(KY;DyuDs!VVQ2EQUKX}uNw$)rDcY95@hvoM=F6tGc zzPldnVr}t}bGoz+abEt|zZ?GG=9HJ@=`u$E4%{MW1chHQ~ zT+qXG2K|l~1^u2{XrXQ4&eXEIzSmQkFcPZR3S=0$&3^B;D(W7BR^ZEeM$TcVvN9%h zOLFRxvp{?iFXimK=gF<`yM{c4&mUb{6JF%4uMOLG*T_oY+LcR}zO;UBeSIz5mA>2c zqwT&3!3bLz*0qZpYf;bz2a!Xvj5`AOo^TH*^PH)Z9M*g1f%{ z{OTauUX48Y1NLkfA78*KaH8@!$q5k5d?v0IA7~;;+MI2*iF%XAPGjLyi1-H3(;#v- zGNSN-(K5+G_X}%kV+9bJH!?T}9 zyI0c*7)CRf_Q&Kb0=F;x5F|22M%x@2rV(4Q;aYJ9G=q3%#n{WmIegi1PB=?oa5oz{ z?ObfemavwM*cO@lES*nN%}GiDDaAO;qi~NEX9<*p3HLK== zSuuyz$!4yOr`P0~@F^lt246x5{~jbZ+s2nOd)8iN&yKN|F{Dx3;tmQE-`JtHJ<7JT zfG102xayXZu6HNd$)93vZfCtHf126m9c8|%%nfB;N;)!cDRU=j z$|mC^yG#t!Tdi`C`9Ma&fu`GCHlM-q}Hv)GDq37(;b6_-#db3cnx`AMllR0Ln+JOT4~8nPPo%E$*F zqvRZV!KsUZlQX?kVw{Q{TmC^b9~Vbv&p|5-BckZyq-GWUo9N$+on-@UnKS(_rKkv2 z(0u%POmPNcxIFc&Dg`~a<$Dq2YN+N@1e_$4foaq1g4c}Pj&~QRX%pgrtnw_HnXGMn zS=QMLc}L}_F#%gO8>pNH#-Ilcgvs5`fLW9zDg!M;IZ28tGW)zs)RaUyd5(lfN#%7- zqHJC_hL|L`sr>7J?I?)usse{anYo>oMyo0VQlHaDP{V+7Fy{%#Ykes}Vy?v)_f>%d zl2m-8Ub$c3I5a|UGWHHT)`Lw}WPBJVhgUDcn^duQ{lOqj9o?C9L^>CH-_M%yn$kJgXHH($PJpQm3{5X^|w{wI?vJu7=DVj^5PV?(QeUJ2|pTt`n28p^$VY+VK(rut_t z%7cU1hC{vhLqp^=LKl%<0_I{tw3b=ch*_B7#sk(G*L8`LSpnI?P~NR#3R+?M(0Byh z{f2eRek<8LSfh5AFl0CVpo1@K^_;Q>A(T2NjAS7EsYdss;!7RajWB`-FTct1PJFb5o#} z+$dGp!k|+7FX(P1F>NjC^WlfmXR%7pe=o5~bcuwUol7wK)B-#11GGW1X&3 z7|)(C5O=-v8nvvwLYp*UxuKwE7`vuzHFLkh&{Atht=0<_QN~Gz&>uOX!BFC?e=j1E+Mp8*e?hP5-Z=VS2ACLRZgs)++4WK1hPxrgHN zMBn{11ipnv$dOYmcR@0(i7tTg5jeD<+7}fN-KAdIw=mgH)VUYA9(ax+%p;%{WCHB7sZ7uajw5G-)rt46 z9c9VfH~>z(0L_~^H4hm)2z!h1BED#KI_>;kHLhxjQ56|28J6Ptcq!G04L8xg^>>_2 z{0GR-rkZ!LU5Vz~3$SZ5NP|{`b{C;(r>XIx$Q&~!In|jQXSUaot!pkJe>pA-`##$_ zqP;K%+CK^~sl>TaMoXpTEo1M70jgPdCM<)P34o04b6;OmGUykSE z<@hM1-aI5$X=Iwlx6E7iI|h1LiI3?8%@p# zmqE15EEA2|a2m}@OSo#_2<}2e-9|Y{j5rJgcaip|2xI9;o9|P_V!fob{bZaiwy4dZ zEylJ<9;Vg@$k_Vo^7pR&+wj{LAbw8ZyZ+dWu%zmz%Cr(DMp8jLTWL{fERbC`J`rV! zN_xnY(T2miwsKVAO;5rqhoe>Qu<`8Bm;F!`nw`*%`a6CPXh}S#7g>J5wx46MJU$ag zQ@#_x2?6hx%EBG71s{bs82Io}P%~7;8`{vn4sAg>o3xihWz&+99^29y9Oa_QqE_cF ztbOecd(RKHwxh6qY%0Fxbnm$e@|z^Yca;@BJf06*-_#<59p$Tvw%W9XmV8f)HQHIe ziSkBo>Hy#W7{UawicnScPXfgqq4t%ykCMcmFnIN2NWw~&%B^=~r9&BVrX2#5Mt^S!>C z4Br)g)4Th9l+jpVSi>due<=UJ2sjEo0vabkHu{Ur4fI6z|AY=JTRyHccvYe=GX8ne z9~2JucQ9L)kv~y1IpfK!$(Pzz0Tm`ZIw#3A7zGtSA~YCBBQ)3qNHna+oi1$Q$X7`LK#xQ+8kIS9VgkwGU0mM}MC!eMtmhjCAx zy@_jKA<nmw(LU?~?opi5?gG{WLZF8_4IX>$TF(X5i4#C-Q{_JD(}q z`FY6_2bN~S#4jR;SFI=X^0dLuI~n-`JP|q#>}?Xt>DK~-;$*^W29^}&OQ?m#vxh&6 z>+RlcT=~|tSgvX5Pb<5Yz*DUy`(2n&Jvh~qB0=kJ?dck%G;WM)^lZ|p!bUU`!HWoO zLQDEk+gf0w3m9a=VI{;=4p;->DHDF$LlQg+MPPj7L+!vtZC2ZAhxu!{x$QT127UNf z-B!@y&2+2xpQx=JNQOnpc+!-Aj+CpD#Ue0d5#C@q-d@BOPrNW7qn2?e4TsII!f}H| z0(Z4C5f%3s`tKe+2(k+Lr(XQUZI zm1kNA^}Or0%H8&4;K2o5oHi*>vd*88X#HZMWT=wclyH~(E^ci98~Gt62*qI@ssyiE zpXXaruihs>YqRTWJ(nnGT1-$aliwkEhGdK6^CXXw{3Qr*k#Rbz$U=I^xGG?lo;@7k z`WF{i?1wFct_<_Q2st$a&PZwm)uhq?uc-sy_MhYc_0-w%Pcrq#SU#OP<3Pc`VG=iM zcINVLnfj2;m#kHM$eeX+DHdjAjnyXva(WWiu^%JByjoBGYl+V|Vv9QhtG!<0 zc`NXT53BsO#PmlMjU}C!o||pIgR9F>AA&}8Hb-CNg!&3bU!zDuWMk#QBCF#z@*Dt4|m~w7t^F&_~X@e!Pp}xUV z1SY*Z3DaLQ?V!~9C%2Z}Gl|{aDy73?* zC#A#+s6Kt)_uPBW`R+NVwK_3jSKmFe@!o=_{ew34Dsd>*As~C7#^RpzfE%8)7z1 zzbW1lPf=RTeVMOKd)d};dc^534e_)%^T4dlh)d#G@f>D4aZ?k|ix(bf;st86m#LjZ z$&2DFO3tPwPoU%_Q9(&1EjcAFi*JZm(EmwqQd|)W!g`?Z>9twpu8Kub<=j)qT@y>9 zz_~f(u8U=1bM7>9Z;KVN%DJbJyCKer^PD?_+)eSSc#U)OBJ+_}xwZSbz5byWt$Fdq z`CEQ-bN;p`8y(qp!@4(r^Y+IJ*FE9a-PjZJS7d$DkG*=_lb*d9$KB{+wd%@k|L$T( zHmhCHsIDw6tu8LFzPh@!h^%dYApNEvy1{(ABfQ|^{GeCUk2iZ8i}g;s+Lr4ptJRbT z+dd#6spzWsffSzf#C5+OuN^>0sBmVZ=Lf>!!JK;-!>Ko#m8_cnsO#3f_0Dy#8*jef z5xu}u_SK*#Z+AMuict1DVK+dFD;uFIUkltQ@*BPbbwP`Vu`txl}b&%i@c{Vty9X^=k2G2~8UMUJG7q)`;VwEOxO26ldG z&Hu-YrXAqX4*rfv9;Ar}zkj}@tPMZ#L(i3xW{}iux3tj_JJ_}G?(*W&ZhjN|FE;8G zTV;AOP{pvOd(yGf&*Xh<{HQ|R?Xtxu%vEK=m+4kH} zWuh3n)$nB$J2Z{5c|6{&N=8l+CsPAbG2}5y6G0`jIV30`mPh1A8rmO6k;-k|b7eD< zr>UOEFK$+a8@nsj-PTUE4oQisZ(qCi&6TBq*42Fzs5Y4c36MvR2=aJiu2jbWE{a~Tn+X(#ng4bK@L=j#KbuWyi z8!Rj@x`Dsb3+quLpft z(hEEn5>*wQdtuOVMfL8|;_^be;Nilt;AQ9h!piFTjdK@Xt<&_o?Z-CTHl|x~mKNL< zaejGY31|~CXp=9DLikdQyd*#n;a z^>a&NsV-Q=9^dP0F&kK(QIxh+5)XYfcM`9;Sv;z}VKQXGu5Th)QsLF%d zWQK~Nau7HI?ws^+!-R)`m7wGro@faq2DHZdo*}3ETFcnebZz?-QkgyNy7p=5z8;%> zT$;M}+1w{+38>X~7v5>i-$};5Gw(<9?_9aMHZMBi*?7JedGniY(3tN^ukJ^{mqpwt zHd0{}(=8|omsPd_*^iZlD=-k!3vs7ljFlTy3KcucZIa3%uE{efm9K!T;z=RN1{{_3 zlFsGIhH`D~I8d_QPk(FXPjCMH?t@Epnvj0MAsk!H2N zDGZU>&&L^*X8So|?q~Msipj1W(5-X>>nUz|GW60*AQ#2b7oN&@T_OCisd7!(>2)Jz zZuoA5qbRub*a_SnPbSH(E8TX*R7Qw(f8>4E15)~KpmNu6N300mp-LXre$5CQas`vh zWg_Q@yh`LXB8PWI73qpu5svR}gm0ievOqM3?322rk~$R(erB48UPi*xYC7yjL+G#* zt#4}$oovJhg&M3wk$<4ovftKBt(L@Mn9)RWRuiQ?<9@!Ml|PQlgYte} zOzaz>At3Y4q#(I3ukmV||M8=r{nh&$w=Y3X`DpkoNRt>cD;bh%H-vaZs?hL*#A@KC z%7UEWtmBWlCkEUe}ZIO;jq&as1Ga9f!|V zW!Z?47f>Z%r+O|}QRaup|}pIq`z)=MUt%0bbc$WxZ=oCXR9DjQi`DJz7Ybl z?$ZJ5oX3$v$up>-REAq$rBqr5!Ff&NI&)A$Ft=98N?NNVw&g7%Yeaq-q+-es@KXA= zvZBq7jO)EvWjek1s8Z%({lh*JHMEY%XwY<9FBzs`>oz|Y^1IVxOHMmr22!H6^))~N zjyrVQp5rLnA!8cAj&#X!KI^$bQX}6-Ye~B-XkStxt z6Xif)-5KTMEMJF!B8xiu{a2u{87K%K2q>Uyhk&98pyx@oF zkl4|~knyib9EOac+Uo!a0_GeP66eG~ay?agQ;gSU!&^{a0uwQ;CmkPlHrzDE@-S9Cxu{Ry(H%6F)MG$BzD;-gYvoshSw^jC=R z5?F;kq%1GyBcy5*a7BA{@{|B@fQ&>W4}(RBhS7^4no-sb0D}d35|BaE)TaR&Ge{Zw zDb(i(Hip9XKfuNY#>7GWUtvRQ4dJ41JdprM_Ar3t9tt21<{w89`7srIlgKATcppZa zLF)WnJkd#zG57#*5QG3g3;@Vz@dE(Kj;k9`ME)AOO9CSK>qHI%hWrgG8%{kq6NZUT zQMHAKEee7O;P)&G(8Ga(w@b=wxKW&D0W0~WLM!7vgq7dHmBA{f^$9-o z*nql{RbrC88)xt5;~XHwoYD5kRydrq;sRj7m?4?5M1~BbeG@RCwd}Y^Wqn=bp43p2 z2kcn;x%-7tod6u5K7;zgu-@kS67>}}*B1`e)2KFV6sj+By~*{Zbo^30(JIH2Vj`Z} z(;9{-_bt%LegSlGIE!da5eUo*%zN^+us?^*iUCHqQ4<8p#8J)bFJsm;{`gW)@k0aH0PY=xnLoptr%-{8df%=!eV1Cc(n#(;e=1u11UO>D>1qGV&AVn6FZ zaRyPwPl8TKS3q)^{w7j4RB1d|$0)9H>}xEi@0Ikun!e9TQVp1?c@A)G!b%;YF*{zn zMUIOlgN$cZ#xtu2^B88lCL@zk zk|6`93_(9-A_^Nb0;*EOb14Q3gSycHQ;7PdC6T0?RhbkRRQbg0ie!a`y2uq|f~(0b z*Kfn^b3%&q29CJtM08iE+}- zqke3 z$!6)YhcYnAY_I`(l!Mfc9HQaR z&|$D&!}*UkdieaO#^%3I`ja^3po63P0xgoA`D&KfvHdx5K}SKBFkI)!!A^|KUUrpS z8gPS1ornOzrqemXqt>|)@_oc4CWMnDukv?^{2meJjBN1#&O*L}`tRe3N+5_xUbJVd zjBQ#ONi{cyyF&AmIFP=ubCWo`eZ7&15wLg?XaGN&98g2#zl3}G`^M+AN8`xArUIJ) z>$ppcxL1J%xX%!$nQq|wQ|yI^%nYK6)I@jZmcw0F`nblFjVLcZ^5Aw7l0MQMmEuXKE^)hhP8y#61==KmTL>}T z4`p5c0m_C7PWwX^JnzpdNW@x$KV~y#8%@)P%+Q|PH)4wP!J0?OV3({lNf)M0+KjyYXY$>UkapAe(Gj)%(5i&UMigVK+*4(jmYp>^;Odg{UPwDK96fe40v zmeNE3YmDdri%K);Nl488nyk`z`i+PGdq(9QM|A24N8|fb5a9o_AgUKhgS!7Vh_cwT zSq#I(-o$^i2=EyrO756%QL`+O91&87#9SgHUs=h2Adx$g3>ccp+l6l`ny?DW#KV_Sw^%0B%0Pcta1l>h($ literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/twins.cpython-36.pyc b/timm/models/__pycache__/twins.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91ed40011c23f067bc196dd37c8451bb2b5eacf0 GIT binary patch literal 15148 zcmdU0d5k32S+A?F>Ykq2nc3OXcH8T$-Hvx=J!^Yy$Jk!4*Bg`d;CLqvbQ0P<{i}igv%AyuppaSEal%HN;fNCgIk+SQLhuJd3POlWAR+z%ibFwwU?eUf1Smp;-|wsH zV`kS%ER;y}Onv?8t9RA+zWcpCGcn;Vyyu>k-#l*^e`Ab&(kMTN%l}=|FqBa;l&P$y zSu*i$H?5XkvQ6TgrqfE460Kw@Dfd=0RZ2DEkXrZrodZ5=EfY|WMC1fOXhY8@^ema^MC(mGl?D&=hRSnGJ{ zxRi6vyIOab?ne2H%B#s6X6YX99yO&7+%oY!9e>Zn-?Q=e!T5WQ-|Ent$x^{fdiQ$w z)~q8&=|1o9MME7{M{XGEh?i-c*tWOL(*3|4RmXrk7U4?o0Oq*53z)lN%t>JGR`&pN zPmFm0n1Z?&n0sT)gTUOUP5^Tv#uU|~>K*DN=Khd(R6V91P!HZPG2%OM_qZymhve>I z+?`eLR1eGDDcn7wPN{jho5$UgYC)Zry9L}mr5;g>a(7x;myP1PdjDQnzE*Gh?zzG% zo68+tT`SaE-DaUvD?C+rT^>DEc-6aF_v=Buy;=y?yuu5fUtevr%H?h)s8^bWb3x#> zgL$>MV%3HYR)mPU7zfjYi*09}bJy_dVnXh(Q3om>1V58DLb#5c*ynJav zX8LfUqwA~nHfB`qbT?663l^I7s@L{C%&gWata#1NHP_uGV=ooWFs(eV%eqA?On*z4 zuTW`*iRF!M)5GIbx1#xiczW=qm(N|iuykR${KAD7&%N)`a{1*8=a$0R+t8PnU)Y6+ z7)2+vUv758^b5MveWentg_)pJUJTfEI1%WLcD39Fc`D5^MmnUu)w&;e8n{4Ls$RL( zQ5!Oe{O}P_UK5owLwe(8c4ed9RIKhb%hy1la<#S^8dc7L9}8Cw*L!gJ1%N)q)K!LZ zR6->KBQPm!D)VNxZ+?PIkzK0)gKZeQxQyK|0j&PvAMo>!&xWa$dUXTWa&HDpUtKGE ztrbuC^H@3EX@t(2S5d`mXm99dm~C&g%GGAY_dP#!>+SAFQ1naUo&TZR-1R z-wOtuKgPlO8u_zTX)km)14`bhdaJVPor?R`Uel|9bPK9;t=;TY)WX%%^N*am8l35# ziiIN@ALQ#^1!=)a%~Rgl)81L{Y-PS1tQAdp<(~ibAAAKfJ>@;-RaZ_wxx%r(5pUeY zLVBmv;@LB`Cr&TQsQT;qs{WA5k1l$T)y_Uql`*e;y~h;dRu>l^f9&)l5Ht|?>;9S% zX`eh@d%RMqJjpSO$?$-%`#8IWQ{yPkb8jW-CY+2a4apbgM+j*%RzX@+DMN#g6Bqo` zD0l`O4x?l#vt%pxrcr|np^`T;CC5vs)SD*t*bS$YylFT_DMeitPG9O&AsL$&b!VgN zDX7kN@4>>Q%jcdd_+GPCs0?ivl_%_1rLfAXVzPGW_Wb=|Dw=mysuX^xRamB)^gjd8 zPbhQU_H%*p5i>9Yt8Z*u4SU-#jlS8p<_u-6TmI95q3pi-NvPkx1(^6$V%z+Pi5f;3 z7{CN;A2qg;{iJedpsvfwAk|0%3q?I^@D*>_z-EAT`|v?f&JJp9vxl5KapcKFKLJc$ z8KT(b4Kz(d&r$VOp;svQYn_dzf@LhI`qjGfP~0p~byJu7h0vT|MRk0>^sK1YW!M~N zrC#v|=r0y0!~8jDd6>#h`-0XT9VUFxTI+YBS(q+Ejn{*6IkYikm>KqsT38t`wA$@3 zd8yv^Dmt{UtX~aNG?|@^Ak1KK+GT~i&=CWZA&d~#HNTh&9j>81gK_ku1Wyq>P4G7{D-dX5PHdqV7(ct~Dq3IkR`{b{?7^ycNko zvsNV^@Iy00Mmdkm{}O23Hyg$W?JaxD*-G@S4_aHvtrT!(-|i>-seWSH*@74C7+dMB zOy6i&zyZz~hq3nj-f{(-+=tBucEbTr;IE(VyZuZ*TT53Cbyp=g2GLIO5jzD8+Wq+5U8Y2IGK>x2-Am>dqQl2m1#b zSvV!BTPDgzBiBE;&CRXpQpwwMVhmz0+AtVeHz*-9;Z3w8T0B-8mm)Bi+2 z8jzMKVSLES9WoIMU&-M#IWXzq1~;hFr~fdSq9!< z+wXQDUQeKra5?-^Pq#MUFFgiSnDRR{_z~BI`QZ?C;EJzqbT)kbF1DaF6zB9atZH7@ z=j8kO&|C`5^P&0f(0qR>fg#Jy=qrf6bo8|nn&p!9?rzC?Kd$mxXqB(YL_+gQXs(Cm z)zG1kMj}O*O5`d`!SjcsrCtL@G`Hd8AX8Jg=J0nIayDlcOxJwWJdRQhrTg(Wiz|<^ zYxa)bLFVF4i;jMnBb^!o(z}?Zs%ckR9#oB6jvZ)}bLH}tjY>0qk}8)~rwS!N(GL@s zA-ZM#L6#?C1yS{y&4H5O8)5mL!&js*sM4Ot)jC48d9iLn#jnM>{VCooFt&|`IcCjFShI%%OIc#gY_Vp}m^A|imIoSYXw9-> z%@RS9x&`u=guK&|rH7a_w95dS0d{2H;x@5;Uj`jVEdkhHPj10X!3+uSHz0hbx*eujY+hV^aBmE zI%x65;*5TTb;*93KFyf*qB!fv3Co9Yvvgzq1@IdG=x-6zd@W;j&S;#f^`YgbJ!H422llyTz@rg1Zjw7QuE#jItb1;)> z2y7nTz2kRa*f9$!N@r*wv*H=NHzT%w7InRLWvk)0t4W9uR|B?1?K9o-i5`XUkJCe18BpO12!sijSlY{R#cae%ukaDAh<-1@s|1$`WB}>s+jv_d_yB?M)rh&MY<~%t ze>VVqu7@laR^V>1*}OR|UYBbfr4_Jx$L_>f`^A{0Oc{!&}hshXhCb@WYY#@t*@2MJ3=8{CVIpTnTb&Ej<>cB)!yF zl!7OSQZiUwH%vU=>{QoDJbb2}iwl?y2U@C9WcXorA%)Av<$nQSOe-To5FKa1+cDPD zdNDwp(I?x&3xc+Uz?7Z#Jf0#B(My5E8GO>uY%@l1+S37q0a}|lH^LzXv7uMHL8_nJ zHrvSnS=?=EP!~RDG+s6U%l7SA1FW45z_k4=JXl9LqJ5*tBeC?NyvgvrkR8j`+vS>9 z36N&-!^yR}Qr;LMQ>g~!CY4#}FaRh{j?`wDplW+WPl5muhjDGh3~QGqs?ad~5;_d( z!Dg6Y2W2Fz)}oe+p^a>l&ajr&&HMLd#2ulxp;*uMunjAelywQIDj3C@qmA zL@}aYAYx)bevg{HiD>cz`E>V7qO6N^@xrC|>2-GgwF_kn)qiI&TG~pRNAsFEF8uH_ zbh{V|4uvvG6F?M^Ca^t3DQ}ABfiyuv(gntzbV2Hsj!%2KOBoZMtq!{7H`$A8;6DMt z+^Gcl7TqF-W4J`D5RyT+BT%v+1}PECB>1%g9X9g9wx2srkip#OQtC zA_583+{@mT4f1oPsq1_so*${cgBfP;&|3@!w%$TDSxNBpKAXHx@^7-{3c+yXJvN!b@K?BEo19PNGn%+6 zuHk1Or=k~!;?GQBR%|c=izrj*A(-&e+|*NPC5+WBRvRhyu~yj1Yiqy z&zPXHsFuL5`&cSy3JLeFi4I;)s1&Nu!5FSVn zsigeKnrk{YVS3PC=#Cmj@zF9{y^ob-OGzGkeiSE-RG}CYy0S28V!1xXX5UNjVS?`h z81R%Z*$HCFhyQ^qGRb0)(~|#t2%*hMD%K<_ZYZrU?G27fWvD+2fQVtA*uTz4BKxFNn2>$1Fu9B!+K$#0KK&rU3W2N=!C={B#dd)B zH(Uev+{))QaaCNy59N^`{|3MwRUqLbGL`pow_Q!}&%@>ca*Q}9aLZEnW6%A7m#R&Q z%9vI&P#J0L$h*i-&Z>hqpxAM8VNM;oVU#A+Np(aW#SVR59aG1FnN)YFyYW4x?okDN z9}uN7jdgnfyUN@x?EP4@;l6L19J}O&2gml=Z`)Zfx}t8*sY*9Mj$XFKUwLi0@Zc+( zy3(qvXxDvg)4f=bol)#>+-dv#+FGT4=JCY^bTN-T$&o4(dmu{~Z2~_B0`7j804@%I zg@sAuHsZpEQCh_%pVeFc^ednF&)@#VXJAfjuj}tp^(|EgYe7Xx18d{*KaS2u%8;C2 zw)#0hdq-lCorKCl-C;L`F^PB&_SO`4IVAev-bYk&ud9F&NRH0!3&w|*k$pCBzGNam zu6D=mPocj9N02<&Hn9(bY%`vu8k0K+=x?EgX++P__tZ@n+e!2j5!ujyb5xah!@aWn zeq%V^0p_P*=BFDo3R(AHRz|j2{|zJ#-mtE(?3vH2Z^-V=P2;9{)4FNjbZ#bYCU2&0 zrf+6$W^d+dIhdc+jdYOh=XR2q=V0V%%=JKHqCYKj1a0{Atnr3*I-%^dkLct@z9mREw{SWeBTv#lg+1=x|aKX-S z(dGc)wB+fg?LaZT29~&jq&XdzB@tKX_-#}Q%>Xhhx>tnw1aM!@JiY3*z3W~5%td?_QCLp${H0R)-bXU0R^x>vW%9djqaCELjeObK*$j2(Liw*HWTC+Qtm zuvv!d{)@2Ga0Ded|7L}y+R5^%vTtQ#=Wty07c*liz3Uivf1J8W+!_I6)A7a8)QNC% z^25zFM!GnbvBDGt_Q?Fuu5Mr}PIZw_l+an8yTho$*G%w)?l4I9wG8(tv6(?$65U+$ zeHl5jCnWZ%A+hZ}OG-~kmXtguGz6CgFrg5BD|-KUQN`~Ytm${5b~M7?HGOnAOq2*K z9v|~J4xv$)R33J$R1bBgxsqz{zo+q>$Fv-^Z%yCd@z=M|^OYWdr-6>`9(Si8#sp`lzT6 zvUUP}HaD?1A$zFt!4Kq+@8sg}#pH#e4BuyY`;!EpVo&*D7DU!NTCy+j@#hFWPw;70 zOn0?+6``aroDwFZ*~LbNh0T$HXZmGE`qP4@V>ZTl$QLU1I+#QG{UnfYtG8q_u0MtUU40A6uKS7W`}Wu zs7#_WC_J-)XgyMz&|CH>LbFlN!6j_SM#6?u*sq0q&M58BC`xmOi#8vzr5)?Nw*E1+ zyc0`4$9J{x5Nf6v&3j)|HKzN9+NNlOOT0k;5*mh6+`>{wWWnNl*!}adCc+28`~rlu zjHaF$3};xqgLfQm54A|K^SH#T0+k}2j{eDncOE9puX;Fy#RFb){ObAPh-Pm#8gXw0 zeE@w8cL7LlUNY)KC`beZ$PHS3Jit0(9*A$mY$3xhW(ygAteO=?B5-|$<;g+Oj>%86 zG!&je4vUq-?5gakm3jDV-};18NI~v-mkWFsGJ^rO?9D}Qv%k{N^XF?&fyt}b_OHhE(&(YP|LO1^IaaB?6W7L$ zy8dJI>N19q@5B*fmZdB z_Q`mJX^g;mv-J!{r%yFjFAft|zhm|nF8+1zhn@xF+zoQb{#xuN{_C^iJ`SA5=Zdp9 z8;C7UKd7{;UKG@wWt$v8n2fU^k|1-a0>cdUw^=zTM>#}^06kXDF_2*f40~_B{!XGC zp6&}%OxNO^W$0jMbPdNfuIM04^gP`uhuP;D>Mx-JcF+>FI<|gewh7G^GE9|r8Qtz< z82!uW{3Y^80fp$;$Q;xYRFgpFk9c913|DU^;-5V#e8@bEe(5L+^ZY&vASkre5iHa7 zpNWnUK?<0^r4xs!ovth#-H$>s**NBbLnU&cM1n|BRy>*^&g=El$WJ2mhT8YzNk(>_ za9BjbP8`KXDsCC6`WQnuw{IiSkDaM)Xv@RKx}i^EHkrOvLmid+wEb~7(9om|Ua<$) z$nucHm6K@4BO5X%>QCTl;uiB?>9LGK6c#>@7X(+F!B=z$!i>J6Jt|}_VTnC)H|$=$ ztACdDevaT52&e`1&l8B$cs%WoSHxP)l0p%54~@wXUE ztHi3qkeOR8C}r>m3y%`@2#$OSjwjHQDCOQnurGwPK(OkKH5ps?;yp<{)Xk41^fy2{{^jfMV7xBtxl2_F|AmJdio2Z$P(kE(tkfG-|Xh~>9MAvXBfZ4@GQ4~6I*iM+jz!X4yIe$xSo zf~>{zmV^{S_C)erNFODSKg>$1tVkaGfUjNh#Ck-MC;DF0Ka;<^EsB!~&d^4lU98~os2f<0tPmv4(;0|hlA7>N`qsGO2NkZA%0b5$Ot$v4LdhF_HJ zQB^aMs@i=WliU3bfcl3q#X19?$beEU?Q#)!Y5k*Q>R+++*91aNDiI{_zn6$RX|56d zimVdTkL9;TKQ{Q}ZS*7dZO~74)KwaE8PSib-W8>v1s(B+r%61|s2nM|hkjEECy~cg z{ef834?Pv(wL0VTh&XRxe!B&L{iOTasrC?C$5dnaZBdO4K6@M0q#HAGd(0P$xBow) zU+1X51rR#&8#Rl?WU$%AhN7k=)p$oE&J@Y^$b7pUWus;WxAQgpc#KCp@mn=_RMs!C zuZ$oNOlgWPMj@-{4|AgQwL4{3|Q9>;`s#7Ie9KSwGI|Ovc z#FU8H)M9L?{30DeuGF$_BfBD{6L=7&pN4V9Z|0t%`tiRVz)7VUlOd(Vvo$$u3I~tz p%|F}C;YV8hq1L`D=j3c0M_!&jJ$-0;cKZ1Aikr2}TpqWnpD%QuC%Vrsb)PTNlfATKmX9-=z06MHeg(gm*{k?< zX{~^=r1Z(o0EW+iLkG@Xjy`JS*XqZB}BhKT^ssqy0%Q=h^Iy9fN(DO*}Hn zuXsn;LYF(WqqAw2dz3;iuY&*99{vo=Oa5!%e`ODUmK7xb6!;hR@aI@j@=t^R)jj;P zY)JCYfdBR${y8=*`6ci#?%~g~QOSQD{CD>7&$AK9e}ieamC|D8kMCW-Zdr@WZATun ztGv~;SE8sLzBM&f^P`ou<;hB`In~~bTD-C{weE+0E3i+m*J_jP&C~XoOSi8~%uQTw zxND(TvLB*_?b=oEpXy2!N(+#X$+Dl8Nlt|qT&_)76~7V*eKFV+M#T-Ik|qwldu#Ff)upR< zoGVwaE#AF($GLTNaY>ASjD7pgm0cB`mQ*pmyyiEU(`+%X;n0wsO0@?55$@IS8jm|c zE8=d&qbh3+4@E`HFbw^w@A0pBE+bCAvg}9bkfTUJE;BR2ln-Y)NEmYoW3JDbPZ;yW zkaZXOMGHcy&@kx&bSOF8U&kH32oe(%&}IezL`tNx3^Q4F$BNbO(PHM8{QuAuWf!-y z`!66o3sS?4>pw3FbJ=hBf#>p$RrTCxjeFtbbSJZd2~AcjB}?dQydiSITGOe}biGhm ze$ZZvoY3!hBHM1Y8cEWu@K)PtV=)ST0D-kz4VTtd7){R&gdRprSXG~gkyB{}>%x-# zN%56*PT zu3o-;4=OY}6=FtC(~DMGES#d{X$998m!?SjrsN`pQ;FURgI1hI5avM?O;gjVfuE^z zAi2_+?`;9rdKffXkWY65OtyG!PhT@L6U)nNZrZIZOtzzyk}8M%?9JyUD(C0lJhw3K zQuAkTH$O4AaDHKS&aFOQyM+l>Szcc77BC^aZ{eAz3H_XypPrtbo1J^krlj=~?lPM_ zd(J!ed~c=wF)?46Kf63PGxvN=X+uG_3umWi+-YiBGWifzmX8n_BXXF?Fp*Ir<3tXD z?7jmc)LXm1OBn^5(WjkQGM>~y1$FutsV-gNSrAWwsaMJx%X_+K)Khe`%#QxPQdMc! zJ}SWIV=u$JWZqW{rEFpc7ewYV_RFnCdzaq1t6+S))a}oyUq$)F#h-up1G@hH;!;T$ zSw`ECyAd|0TZx>8yXkRkHgu7sE3}d5A|OcPCTIv(KnOl!Wm~Ij+n8dk$BLszMy$og zxU#KonQ6aM%3}kisTe-WHrhDpC{s6M*gB-MsAt9+8`QJ<^>R$Xl#Hc2KucrU zcf(|h=S3=9YkQneU>L%596#_Q#}Ok5)U0|u04z(y(E%<(uU2M+f$7Z%EeJ&B3g+Be zgE64VQ|YWMoh@aB5qgcP$oYZO!#9yf)j=+|xA$zbA%*9+_#B49Y1Q~SkaE81vwkCt zKI4!L)5{3(HbZ#aJqtO7~M( z&@RvHr?TCYiiOH7?a~nCBJKX)M<0E(i~A$~F7a-Fz&@nH6&EYFfSk~q?uJmWi7}U4 z2Y=mj-1VA6CSsOxk_*2;rAM2iQjjd{4SlWaL?mVQ~3kawEgIWLP^=1xw6N*2Cr zp`0gQAD~iTN66!ce_!;5>ce!ip&gCl47^eFHNK;=L+mheV-l3cp%_Pb z5yO0+7b7e$7x{)`%PFu zi#NOIm8&&c%WgwzlH8Xf%{}-kL4{l)x=0}i&m@+WfzJo9%wX!O79NlE-@}FgMj4D~ zDz*r_YS?B@Cf2udOy9~!W<9%As3}{;jlYi7E6TV3d}}DO;^H<=V@H*(;dqD{s~Z0h zTPjn}Mfo@v=UFNq{-N?Cjin#s)Bx-(#sxTHaK#>LHT69DLY>TlvXy^uBPvA2cnF(v z2soKTZ&|Fc_1M@NiA|=gX8Av%HPWb~0ynX8kJXw^eFUSjq{q`_ze8KO2RH6381+zG z{D5XMynkeavF{yWG=g^n6j&i9DWu~JD?ZMk^=LFE!9t6(vBicY&X0q0h-`vH@8Ko> z>kze1hA45wc*HJ#Y3dSWbw)U>+v#e zmoY!6k&Vc}05srr%iRleE0ttFGQ;6dywGS9I7DTo!~8rRQbpxn6~#q(uAIEltDLuZ zqKSDLV{g!jTIIW0*dIWM-1FI6?#4GHf(gCh1tRUX+g`w6YH;r0w4?c3Ai}!sJy`RC z$afpU?Ds0|nbn35+GyeE6ms${g+cDM7?V~G_2Ec64?Q~Ogr!jlhP+Ju(thE;N#t8Z z_HPjWZQ}hF5t_3w@sf4yEcoYv2k%_UwQNcMJn;e2@(~JkU6^p?BgbbOB2A9LEDkRT zUY(-;`ljqIav?#sQYZpx)7>9=#pD~4Su@oWWZ*PQ9Y*@NHVT?oUsW}f7C}d~P9CSA zfywI~Wavq^GYxEumr0+1$TT+ccyM@KtJ&WZe~6mh;gOL|hIg>iryWvE!=P1u%8;au{i6e; z$dQnG23}3X(o-rDNq>ERhRnuKNHE(9V0>pwg?p<;2(xW#{05%zzTrVpm<>kuu@2Z) z>lq2bKbEENiI6w8HC1_Fb=zjqcB5aKW+^J|=MUDlP}`EFIr%1orMK~(kJrDSKx<|R zevTVB(wF64V-4VD&@olygI16n)DbD+G+zn5weODq?E1fU9$hTuMJB;lI-rMs5JqkQ z+)Vry@{4AHS>#rWp;gy!0$&l);7wRnkxsye&!bn71t3t{$Pu|qZd6%WLM^mS-;`kj zAbZlPyKF*j9`MvSM>pTY9nxt^0Se^Raj4HIp2L!ks+}Ve6KFUG@K5Exfkvf6{5?GQ zZ6bGw$axs=c{lI!ZOYS5=HxO<^yP$9{2mC!8F=ELb%^&};-zGuN~G_2K?wAhsz>gd z94fz0ss3K*A)y38a&l-4GSXtEG;C7(5dhNjq&oVFsavUW{Q40Z8dP5ge;XNJ*-|&6 zz8c59&1}hY}94w7tTPEhO5_4!_}iS%f#Ahn%|4{x(4SB4jr?$jV+z!BtMY+ zB0{+)Gl`F5n;IJzln2H8&}YEuQ0InCAP@Uzw`oCbuet0=3u`9}pG5c&VuRP4RPn2u z_8WGhZ+4rv>UPwUopka8&_$f6IIwJp7R~G{ojXuEyRUSQ?n8Ym%8Cx{tZ(CNr?$Y=|b&VMqcuJU!X7y7Eh z2P%*pI4q~?Hz@E|4gL~!LPA+ZGoZl+G?{*;w0Xlw@$aLoWQfeV%YEz}K_(yJLTnkv zUqjh`^d|KnlP80CH<2Lapp!I1B$`wMYN4|{Eq{b#T#SXR(n%b#>L?ZCSQw>QHa6fj zYEg#ec5p`jzV?B-m5NhX@WPT%9SITgKm{)YbM=ywwsgJ{kKMp~cS%|A0Axn12-Zbj z;DmiIhW`*{m{X2 zHbTEbhrQ+&@oC42{GL~a2_} zoJsx$)xvQQ0F#!-9yX;@V}T;j_Y?51!Hy65mR7fCUI=tZY;xUBAQ355tPLcuA@Hix znZn2Ug4P1-V08WhC}$%cC><|bj?F%Uy;${YYlzUm9kov`o-E1zXW6*EbMl_YH|;B) z*R~0>TaC3OJZ>*}(Zd#B4ee_!Zr?=k)0?rSI31^wE3pZCv1^eFG8DP&(2gSKa6bayW&_`5F-! ze>9W-Q{bDtK)qqauKav)hHCs3kslLzOoW1)@?McY4pC9*#Uw7I^mwEd zm$paZsYC&(*>XJw&W%jb5nGrS;8x-vD&Ec@7m_Pao;ZaTPiSZ8H#dAfR-29UYpUuuyxR86O;v8hugsjE*$Bf{@YLK~S-nxcGuu?_bLG`w zZgFA$y|W98?^{}O-DTCR&ootaz1k?(X3AmrmGx@4v9&r|Y1Zewa-*^n%=O`NL?#wY zo%VdcRcW@iR|=Ls@%)R=T)Vn*^=9eH)#Ybixqh?s;?-wX^u#^YFW)}vs zH7cbBh=guBeML1}H_G9L2ED?{#?|_&@9ClIYBp<8F<+73cb zyK7YyfRfF|9qslJ!B(R{P>pp!!BRw9IRGf1z)dG_y4tifu#@n3v2INoo$611ydFcDb00J zErr#_wjSv{%WD;)-1#NrLhxP`e4}W>uvw53)3-Mr&+=@~SxfF4YbIspZU$xzGIQ4` zqRugjDM(F5yDzuE$#U(*T5Dy!Gw@LwrR54Cf0_6qlY)N-2#q~s)7&#mqiwhHZFARd zTdhglt)p1q2W=<~p*`D9dx_J=9_JdC@xLK&VI1fN-a}o= zOM00@Vvvj99BE`dj*t^L62Adj4a}2|$9LlKM?C}da#xP1nn!deM?voyS9AOecaZ7+jq$L653osia7F zJydO!)?hy%-hs|Gp_RR-AYxjiS|b}(&-42_CuDeSH5xj38OjG!l%NtWXs28W7j%lI z3j^wehr&YCxG0T_(Zf>Iv;+Zh0>8GV7Kl5=Of3?(48|$JYyxYCW2pDBX^BlPpGz&( zTn%9DGNs70U{Ta#b&g;jfW!z`Ff13Uhd+yI$iK9iv(i$UL^+R=V@{f`v`?Bl<2{L; z?NF>juJFe@ugkcCn*b02qYb~e3nAGwyCthzvOVKgMqNOw-K}%Fr9`)sj7uiOz+5S$ z)CVxS`XIqi5L_VmNdm+khPnsEN4rl+gs&Y)S{l`q|UXg*IyA*t7Se9$DFA6wOWT8LA$z3*`Z!}$KixPUo0Cx0ni)BrIzpFyF|+)&jcGhv6|Qrnt>37UWj*p19a0tO~E zPW4WPsZE-pWSfRYj13JUO`#Z)4BBZ-IO34>g@K|JOh8`Hv%hav`ogfG@30txbjMh} zbun^g{dS7wE5SOR`;eZ7!Vb0`Q?wCz>vIz!(}XB_AROhUucX+D+MmIx~4nlH>$ z@5gJ}zJ2SCdWxk?ISd;mDw=knX`67>fKma$yu>+vn&1XI=Q<;n6n(aOg)OfVyado; ze^)jIyAPoym;j($QYWc}t~q07aqWzCS$VGlx_F0-6b1Ym3T<=Kc+J_hcI{nfmjo6i z+@y`BO2R|HSEcq6n~BI{fkEb80_qAToF*$Qfw03iGn}T;qwo{u)#y@*jo+_S# z_sFy}n`y*I$wL!m1KuM8u>fqR^9IMm3^wLKIKSt}{6lRA-pUcv;C2-q5tWz=DtdP1 z7)Ag z)eL;~vh)p_YX~;ph*(8l2)_z$#M-OuV?L|R%i4TSo3Co~wl;5R^A0RHmE5$#{a zE5S(s!_1kFo72`wa1wUcnii|GGuq>5$J98Yrs_q!qgFbgkW|-LevW{~t7l*oW7Xc= z9l1Kk+Uo!myMTm)dCyACfKj2`@E6dIc8viDzrg%B@)KzJD<;Lx(4Tyc<3`YSur>%L@kb*3GH@j z&uk<^Y!W33Nw$a71&Je40@IDNkT=K=-b#fMzaR!mwjtYxko{7++vS~`QK*v<$VVr*`) zBT>^X$BqIA!B7uJmaH0-S8Ki(MG!fT5yvzzgdIs5v`_(t3#I`Kn-*#cz6y~8VhPI} z1I*%{Hg|F&3HMql3-8fli8}}xlp9$}TcM|uccg8#AO(HFk@=kkGs7ZSdc+ zz_y`e9}vPl03rqiOtwb>Q|$@BG*mkkLWS3?Jxl#uJM|hjIy4p7hLm#Q2+hQ9s&TFj z!!d=;R@|E7R%kOjNzuv?qW=hBo@VZ@5sq$-VbhYu+x8X2G@rnBXfvh$K}^edIMFr_ zGHqgZq8r7W*w0{lm%`hKb@zr|(OfW`+sL8ABx+ByZqGz%3alSKH21jvaR>lb(ZM6a*Bil{=>0ac=KE(wzdUgM zXWUb`e+A<|0GdpDqX*6!xbs`qo6%$`vKP5cri(D0~-X&Psf!mK2@j}Gf1&&7#k~DT6?Pl+~ zPGY_8jZn zhs$MurH6+48I%WRdZ)|gi|=sR+zNHb%;B*}vo$a&eV)*o@rcm+sa-Rq;lTnAV15sh z2ljz+07H1%I7l5N4rFiS*|)5N40lPMbIbA)heI(jlERhPJ7Sv(r-^ODyfzmKL&w54 z)k0eoH}(QH%3l;lG7ON=3T!QXf18-*80z0>^UIFLK;{f&+q25d0{#|3|zK z)BeXz_V0G5dK*WIB|FUklVn(4f96GV%t0vB;y7MhW zsjxIFBopSoDA~~KvJ50tV1JO0hZ$NJn0~QujK#Re;y{n3z8*_)k0qUDs;--Xces&N zOd#l?^;&ZkCxT>^qDNL4Rwv4>R&5(;S7Anc3RF0$F0lt$5;@~tmV|YmVu@Oc+`ErM zp=h%&hEf!mO09F^LWF{nZ zknzuP)Omnk#uad0!(k4;lfUOZhm4_X<;^jaCvDd7m@Q{cSXn8%Xqm7a^CY&rc`Jw3 zENaG(_fI1`brwAxb2>5Rq|uVY-x%t-$DdA2OC8^5-C3!da+uGWvXNRs9sBQ$-5Zz< z91_rjTqgei0uW-;5}KRb%sq)5kBN*Ve5(Ya%o;K;@Ex=#H&am-aStJC<4f^dX}skf zZ|y$gR7rY&H13UzrTR>_6&XpkcAq=FZ^XS_^!^Tl+#v+WoF922;D!`7PQz^Akb4K% z0>3yw&a51_A`%pIof4wWa-+n9AqOX#TZo1o9&keRn+<5SUsgyJ#l0aw!|F}YF4E@Y zb!m@8KN(58B)<_@!x)Ltp`i&&I@3KOSD)ki%xyf6NbH*MgO|)4*9>M(y z-uVw)!3=-_-sd1kW4Ke|IxuOTgk-tq&Z$0G=-0-sZ)0f0O9Uc`0!Yps1PS0#V)VORsXg zWUvDWos+Z-sG@dcg!Fwz$Q&EtSSR{2syY6Z`?e9|3eYbQNW7t{#`Ge4O>``kRyF z4^6I|6H3OD=Y?4_3){;4=Y5}$2kqzO@j<)1v@k9&;pI6LKr={VOm*z{I`ki;g{}S^ zefz{NI_|vR_E|EM@8=>qIZh(HGujihPM*_LuVYX+r)wPhIfCa2dPz>|zWOS`pAiTS z%PhS>@K6WDZccGMl24?ulC#z{=>|VBxtShM+pd#-Xt}GDR7hSNOFpY;+YoDfSOG&k zfsbyiJ@eycXu<4)Kx;T9#3>|#&8pe9f5Upy+;!T>DHNO)J=_TuO0b>VI~N8XXBy3L zrdr2WMRi0#zBhx_$KO{Cd{z~GUNWnP`aeoaXd4zqXJ6Vv@KpD&D%DhqMoU(yvweMo zLPDqRz-+_tBB)V54w_)6J@s{Vp8?PbnWINGp_d*l#h)(S>3li!G?~YP5?Wh&<>=I2 z{UvI;c9a%`eS~_KPza4R`O*%VL2+748^e)Vo-1&OpDzjBKv5=-+oYr=HTZZ0l+3TF z5aS$u13-O@K=$0DD8-H_e<{xM&Mz!zdtrY5CWPlx>?u2*FR^rkphX~I(LPHO%E%6s z0h0PM0j;q5CIIyFV=T*L1dt{+794!B>EWbzVeYU1k8-5Mvxk03fDGr^K z4`hO9cb4IdF2N9im^0Zvq@(Wv)wZJM5K(*`h7U9zW;dbGU$8#;qU>(u08WnOBoh-I zl#p~i^i0w!p1i@7l`6N*kArSxpQWQnYlDLQzEefJVNOUHWM}xT!zzk;7 zeGepo4q`{3Bg!#tIrWl7N*p^$vpP;1JBgb;PU56a(kxDs*EGjzdYbZ_)+b5ZlO89v z`+fg=vjQLiic+L=407(={oVWj|NZa%mv?7>cXx95v7HlJKW`X+V>JEUjrha(IDZ*2 z3}vJYWvWoYOquc>N`>$pE`*DbRK#Riq!2B}QZb1~3mwIHDlYL*A(2W*n`FK-)hV&A zd}qEp)q{Afu&&sf>MgEMtuOYa`idJ;8_=pl#S8t#jj4^rO{q=A&8f|jmndv0ZcS}1 zZcA+|-jcedxIMMKcx&obDN7c16nCa}nuZ<9TlQ^>X6knJN!6vgFPW)3>^oGCT6e|7 zcdz=C+MxQcgc08u|O+D|{Ag?p{-l2Z9 zUbZvHv(%5(^KMhOt2?fQFp^#M*q!y*?t1L5dTdWUHc*e=unMAaj&>`LuAX{ZzG6E{0RP zk@KiJg`87C&RxiPhk6V-j|Dk<)Xy9x_o%T~BI*fs=2A4Z*G}XI7Y+4Udk@M6 z)H`1Zt0&djOHt6mkm{H;CPQjmJq3CgR!^&QmyFat^*Qw}^)1L5v3IJUS1I+amrT?i zMd}ySGwNAM?MLbt)w@+%QU{Rwyn0S$By}%R|4vP)tfUSi^-D@ATT=HS^~-8fO-bqy zQoo|6RZdd(BlW8)uVy557^z=V1yz*P+mQNoRZ?Y1J%H42sET@CQg27Ems&7;8k<@Xd zeowtueY>O{LF(VD7u2GpP9XIqbxFNXQYVr6ef1scMM*u1)E}tJ>Lp2?Lh29I`_*?! z>K#b^2lWAUMN*F;^+)Q1>O+z`jnp5jm(_Z6i+0;w;n zkExGK>I_nUroLN!kEGs-)PGdptG-WCPa^f_>J{~>q|PGs7wY@f4@hbpDNlVu{h*|t zQlYcPz<MO#~Yp3aRAShhnpYn zSQtc5{Yg7po+{;BjxXa{XS11teOJ=L-=H<^x)tZ}@Nh<7%$*-9>#5<)gfl$4Z)D%l z$jJSp_a~FjJzBnCxn;}M8BAraJgYI@@rSJA88@@nIx;)OS!CGxUMo{l)|2+sY$2m_ z^ODEuF9nVB+_fJ+vhU!~$iC70_9c_!(>6dfnak!f1uLJqXl2XFwsMZ8?Sh?g031Fv zb3T(RWF`tWCs>=)RL-5AofyiNi^Egp@>Ibd)V5=1fQjMrf~_>>RKr#>dECy8TSfF< z0OTiu0rY#p&P`3b0H!V%m+N;jleH(x<(YcN%Felala)*b_C+LoOwl`GCLzMR>jH91?tK+eiiAbHhL zVL9#`Fo{Z)oW>9*=By(nrR}`+SVmVe?)2R9!3s55Xz*j?NZkE z;UOafcBeRQ*LrkzY6`F-B3suF;WloUoU)!QUWMMG`le+H z(BP_xu?&R0{)yRKL8XP((-*Kx(%H!=ud|dX+A5u?R0?xmZ$@P*ZaP=0%)04pxpaOY z>Lt%)-0butMQ|i9{CJ__#gFT91)0-cx2tDM*>nkvFH=Z+-32>4TdE~7#U5=>^)sG`exFEVXs)Ig^fp4A1+b+(umy0;xK z>Ce*1&D&n0QZ5(L5{+juyL2V%dfmAahF&Qbgr#}CvT_30NYzNu&Xl~c<0>yXnbVFB zGA}7l$W7*KJrLIY7^>b_3kE{EgYh^5#JAwfi_Kif=qcyN4804;=ks&J z>8S*)2rjm1nkz$?uD0GH1vr~BT@OUmga&Ry= zIx;i3f8Rd4GFZ^*eM9^9jSQj!NT6g7UQo>0Kd8$59&{u*IGE#RR!yLLQX~7O z!@j-{9KMEw6RE$`3Oxfwm{?Rn% zKY0Jd{iFLPGKYpL?(_{H==53%XT;wkN3OF)zE;9{eaBTJoMH__ultFDCxzVEr|Rb^ zy_FkpY0b~S;fcNkr9-_d9vKx5#Bq zuDW9f5^|xX=d3z-Z)W7)dxZp6KSFA0TKbu|cXHI8JOFtY+tdyA!|m>nvhSa`SB=O_ zqrMdW`2CJ!D`laoRr^RKx_VnRU&I-1Gu6HLU~fl(JO zwSZ-z29{zjR~Ymud(fU8f;tLcgIOpab*3;lGL$Lg=Afu#zO^r}r-RlFJcZW>IIzp>Cu3{p1n$6nVmIJ6yOWrNZ0&s;w z|1m+K^xBw4^A77ju~F92ZtF7BZ+>3W_*?pvkv0{!bSi^celzs8v_24>Vt*5$x0RsB zTFJ&`HzaS9WH-^9KM2pS-ag^wvWbtKjn0?lY95Rsb+TT*hTnxe@CGW#AX=dkK$EN` zKt>y$nkr<$Na}wu#L1sSU8Zfo+rWUQLHtEy5*`LBdMS|#tC;G5^)~XN5iwFxsFM?3 z;)v^lIYL`{mD<=7f9rQ*7};15qoy{B4^__21OnGsH1g)6VH(v?)kue`W;L|UScp|) zDzX78>~z?TANdV#BaN58RM?1Ce+wz#=riJZ+zn_KHt!j%KHnq3NQJmS>P9xEV<5ceB{M z=$S`7bIdc3`Q4k(d*+O1p7$a|zmF?g8w6oq2TXOaL8vbv$DzT(0IG(}UO`zhv<)as zn)B-$hzsiDq;>|CPw00dlwcr6vZP4~WSLCUXfRt~yepl4el}C^ON2u7EE;*;0Rd%g z9FGGc;=lkjt~j5do-s~wAGDdxuV9gE}|anPIJF5!Z@s^NYEbd_{LU)5wajvm5S zK-CfY?eEt$McDN5W@cIpoOR5zfX=w_qday`oWE{9de751}%E(n(l^xvF?<{p z`%(zCHoj=QXucSFG5liW#psK%7du{zPbRQB5|`rcCe$=p^Mjo0=F2hk&;GVllP{b4 zbhWd~kZ;TaXGZkpZdG0Q-gY@sO)iGub=Zy6EsO9!oC=XCq2BgV5_z}Ejj^3}H{H)z~33-C6BKogb~&{{EmgPfa-4G}YgU9IM*5Xu>l#hBeibxjO<( zk|y|f1P%&^>nY~sic{h+VeJ9uhl|lHJlh5i!+i&?BPgW98l5Cs+W6iX%x-HX1xxE= z7t6wR$&uRV9LL-VxZhy8`j25@$u2l84{7r8SlTXd{*XmfbJNKBi;at{r6chofkvuW z>qasZU96A^A0dbiEe(_njYzGspTJO?T%1~J*U#*=7LD2AjOI%wvhrJ}SKmi#&16$0 zOi*I^J0{0R=e?Ezw>-Rdthe^SkF5aE+9mO$o4`?DfP)HFZW6vxv#lu9c7d02apH&`e`-R=yCqv15z?)gk%7A|0uY}PW~ zx`bIDuxPMA2La#AEXO%H0O6gO6g(1-*IoTuK`MtCfDu#)r>~5Q888ddlEvJXf$;b& z!V(WNO${0-Td`^V%+%HtT6hpLw4_-G-K3x;5A`WAfLp0F^tha=F-(8mG|((nTE^Qr zQXRJTG&f}dRDyxO=F*0~52Q$#Xz92bkMr{YN_hA=@Te;`V551W5SeG{%K!ZAC;sub zKl32O!!W#ia2$=-n1u%)Njhlxo>sNx?IrvPf@Z6ym;;H%0a?Efjh8(de+LVAKE}b0M>m`{Q^+=_SwF&(D&5W(r;u?xj|#1FX;`3YNefc%a`u)_;)2A7bz_1FExqI7DUn`m$Lao!*94_UF@nqR!Ct z_ptrP5qLeV93s`(wC;7C47|>!rKx`a4TBSZKn~KE{%-avU9X6J5fH{v@9*$&4kIwO zP*+1OO&|0%A+y(vLj|zI+-~-Sw!mF*Te#OuhI_<$u*cjKPKIxn_^#0Wwv|^=UAsfT zQ|vj0k3-6Uo-8>NbGlsP1-utjHD_Y_c9;4coM#rzP6Ow)=t9WlX$4N9IFp5-d81y& zm`Ui0fVYLwkAv6vRx9qBep`AqLp=XE6zk72_#6Xr zhkeIFgY+~o4_=rkMFAAy_NafJt?GiIuS@wf@+ccQBJ}~;+koyQKcP(_sONSpMP&0L zs3VgLogYCag##DptOFGCv8jIq&K=aew??W}_V}#~+E*(Nk-7E=-fBeQN z)gJ?)hcQs54=sjqj3r_@4m^qeJ~eRFK(tO=!YgH*S$s`K4154SzU|{&Cyh~tgB%p9 z>R8iAZ(BOjw%YZb0MYl9X96UhQJDn6-}Nt17w(4=jz@d^+gWf?Oc5$OopHt>kh zOh`W&)O@sF6O(MI+aY8U9`kj!Sv+DO?ytskh|4zN@1O5wQYQW?W85ABO7i!`&Z$Cq z0$%X+ByT67ULKj7A@WW_gjNRw{L{kno4h z&_{=8#^Rf)qBN_KDPt!5a!AMPPYcb2^+()kd*f4R_&lJ06!N_rUEWGt+MIN!O&V7S>(q0Lk=FpbbpPez`J zEc7m{U+7!dQ0)c5QDoQE_0`@j#=?4KF7zVq3*jHA_EwYVVLeI^TUcN13+jIo3Wh%9 zZb1D$#QpleRPB@c8&HB6`VZiQe%8M)gwhRlXhi_6{%XG{IHHRo#JlqAs{Ne*%jWZ2 z&KZKaE^K|Fg{~@(#>B@*F#nAg-|uoQucs>F*~&Aag-r{atDDKCQEO8*G!sM3dBI@_ zv3zwSdN%TXwb8U=jhMhz+S{n-=ddr;xz(6qFFUFm&l)d>p5JlKC{anXK6sw=D^CvS z5OseK<@LEM(BoF%4xFb(HIh?LffH?oLL}fuUBQy2nFVZ!7MA(j&dAGQ5N2_rwD1l> zpoqZRVHpc94fbs!rp$Mbm6_wfBB7BtiMGoHIPiTxi#~KW~jXE|C&bA!Uf1C||g27KA z7}(NKAbVYMc=grdcx#26(m!kC%uDGcol{!NoMC884JmshBQbKnJNrwp^GkmKcpPYsCK=O~r$Pk$P-2)e&@)iupaI;{FgZ zkf_9#@V`K{U6&iqk^ki}H0}tS@aYfjhPjEykT?z`x8u7DvEEQuD1uVNBYW{3#~G9< zhK;xt?P5(c5|5#FKT2Q>2<-{=vKKK7beWq%%W>HDVbz%{IW0fO@NuY(2cd#2t6{O} zh((7B&C$pQvEnp3jTl(25)x)E;$U=(CfJ!N%r~r4wEgB|i+J-%r_fuB#LRCnoX9OW zuE>HxFS1N%RgBWs^Cx(|{$mDz%HYck06$;a7J?q=&>l0mDf!-1aXG9&Ri%9@&F5SQ zGftYXmO@!mnnII?jY2w?$JC!d+)_qqL!}t)euUWbkB|L=iydZPC?4totmn5b-N$Nn z4bwjdw|@1t89Tg=bXt|OFa=Vq_Kh4ovg%)Fz{%*pMBv48QxN8C{aeiWZ3e%?;CC4a zEM^(IhGpdaDF1KxIQ098$0Hyh+K~DlN_NL1akDGj3wlb%`kv6N`M3DU-wCKQV!l-q zG6G~*|B(F)D9rLG%0GTb@e%4d2w?G8$`i;US)OdmkW&uwyaI0loK?tHCDnmc9LHBm zEZ^mCji{M&X`61;VjFCWs8gzj^(UnzS*^b*qD+NBkzZ6WTS21wE-CM18@^gcS?T#N z)}QV7v)k=rd)RoSTBeln-WXZE%|ExbIS|zELH;^?dMCq+A#A0i zt@R|eo}=K*6pzL6nh|HbS@Q(BKG0f;qU&z!+%(=`1tWxyp+#YddON(8EM5u|kj(b- z1>!U%GmG!p@%7XT@JO^K?CH#Tyq$UxdXB;zUXq2z8VC*4XHXg9CD}6G@XSH8>Fc~e z$#|Qz2<=6V-fr%kJkNq`aNe56i=LSx-V5dX1z0yW60;88K83#~71*|@9|wk(8}u+0 zpE+fftUW$yS){B1c^A1h_P0|NyVoE2p6PPMo}4Wd<_0Xtka+p_9Y@}Uy{=M_-u+`L zH zes4jEFO`P9C?({A{!RA&1q5}OgCkDj6UN8+0@g7FwSut_R`-iHkqK*5*!M(v37#n6 zSA{qrvEEl>3!N7~?ZVP4 zN6Swxbh$VRtG;TNU*lDJ&D6%(iMI6;wc%A9X#HcOw83$9g*F>&C2G?(+tlXF=QP@E zZnVKM**a%blleZ#gbDps#(bCy9NBJ56RcR;fHF(BE-7t(=C(%Z^Itt@JpX6s8rZwV zFI(s??GjEI&Tjx*S}`2d!STIYsD(A+)o#Hzsb@pJ_B7&Sd*f-S-QR@oTUnxZsGV1Y zR_ZijHS!mFsy*;uGV&YqNfF!rFD#WkOam7NZ1)uR_{_}^rz9UE0kw$MtpO8`TZKAGxTy8mPn*fH%Y-XpzvL+jhw?GK-B8gsE5zGv(aDBzOe$N zbwE#BIE(ytwHsbr;ozAO-6eE<*Q+6}(U+MZCp0-wEh0m^1Fa} z5$-S^t3h#6=Rjjb7tdqO-z#fA@2~m%ooaV&owlrc&gEdW3t`)eU(%v^_I``zwHtlD zU)JvK76{(g7!yXkyN)eB^-!biV{E)dFUrH;S#BQ`3_&c|LZFP|W*`EBd_y>h zV2_yzl@Cc8Y_XQc@hwz}gy3c>Z9lLUO1&ov8_<@}JyC_HplMk!N(ZDgisxLC zQojspbd+%sLc~BB`=CRdxD_lI;*1JH!eJ{mrWHopCRzACs4*OMVVv`2-#=un83sRy z05>1tl9V(ASzKfh^2`gK*)B8S_JPu9<*&G1_oIbjJ{SmZ{n(sgXuzgCe@lyswqbW; z%lTW%6?YiAaEQLcbzA4~jICk&pYLoc#7hXcsbc~M3CP_r28um$zLytSq;Aw4oYzy5U1%`Lqz7;bNK7DrguWK)YxVSNKWav&5N z?Cu4##^$PIb;he$aXiXh31KZ@UF6Nc4jZW>wgzgU8(1z#>YfQZPhfE^LF4{pt4F7z3Cu6jvZpV;n=H=T~pzMZ4*^9znspq`#j6LT?#RX#dx$Q;QGPhmV zoE!K-FtaG=n4oHzm+yUSicTt+nf@Aj4(6u+iL+Q{QahR^m4u8{Bk?%ne2TAITmN{W za+K!Q^#0-FaFVgp&Db1Wip^1JJYbId0}Jba<^cX5gRlF;YRk9_KBr_iUWB$5uLHgl z(5q=*SNFf!?*B0OM+TJey(n+lNoyLZd=@5(Hj!Tb2zjX(Q6SS?K3+VF z8?OutKrqm&zsh)BT7iRqMRTsH1Q}hrTq?}@)BYGT z;aScZd1hX?nHQR7gw9X6AQ?=W{mCJRBk*DI+__zpqa*Cj0u zNVrEzwxc92=^impZZmfwzsDRk4F<(i5%6r z7|AEFSHqMAYf5Ax=EC6-b{jY+ifJ1;sLdJ?)Id@WoVJwEVbTu#-fq zu_@YNlCb08OoAQ1du)E!lS0GF0xkGA#nm^U!`7X=9Sosx!-joq{_ZnNDqE!dL2>Mp zJJae-x#80aO6Pgint1axIUD+#BLDV0T!x0%KKv@V z$vNvD%YRE0O0#mF^uyUxlL2m!sDMpgxI+ z!A3YWS-(#hWY+?7jRj_CTsO=2zvy1!+E8+5U3q?r8=0TfY~Ud9%qKkalxIHbndjP2 z?)5kM58zdDkX(AQ^dM19>dH4*RpJcf_&5fiE;sV@c2 z0dF!g1$(u&TrAy1^98szaCrkCEEAt!=##=Y(!;b14FVW~SR>wz&PCItfAfWU{>_TckZ)4|45m3h^rT%ZpQjgXt7BB)7s`Gn;b$@E;sjor z8cKyBqscqae6IypG-hyi?=a<~#_D&Mk?dYAaX2Bj7=57nS zR~U>2dukjzDgV)argxLX@R7d=LY|I=#HcQHjY&BN%f1l>+D$=w2siG;tRzY*C&p2|LN;-v3fcGA|v z7G7Mvh>x$w*POq;i$lNO{P(hEFn`8z^yX3zXGZUQB<2M<7=NPTUFbp%DkI6)!l;N_NnqB z*;WkSjTtwlt?xl{g=uqf)uzq(+NaG{?`xYj^KRm_uPmK*Z848MG5&~k>MoeXz@-Xs zrIl-9!AZ~O%CnAjm%MX+SL3c+nup-a*m7eo?Qgy%O=u|Klm1bF1~zfQm&{ z9d3dy<7PX4vZ2+Azbsn~0g;S+ZIV zy)_rh8NlVbi{)Nasx21A*Nzgl`n@)kFmF{T!K>RXoBZ<9W?4{jn%2JHs40Bx^lRDc zt+6tlv;s-Jw$r!l$`F&UQg0&?-3nXtL6!$19OG+;A6xxt9e%#x%DkHhzdw8(;3sGG zRiBk!m%SPI(R(R;a$8sdKlh=ih9Ab)jvu!A&vpFxA_DVPg&*knTj+cFL+qOZKR17$ zuHk0|)EsI;4dZJ^4O{(f8)}&MMxf@cy-{BWHTO56hViwdhOPd-4K>VbK}}uFO`)(! z2T9d+^c7s1b{#F`GfTE=V&%fHb@s^P%Q1KB4DZ%DTo>5dBtibx(7us-4;*UJL)1kx zF?QjCpATWE?Rw15Fz+~nU?*UF?J#4je`>&N8&#Ohd!1oMf%#^^tW})7F3f14t-*}( zwZn|9q7a{b1!z=Y#=I7oy*6F$?4uLVW|o1~w-)e#DElTBfGfKmyNQ_k zbHP-L{uT_O^`!FFMAYgv&Tnf%6ys}06kBa;LlpDAiKxaUH?3FIw5_PsDX$-BLKNd` zM-*G#*@h_QtqM`AQm-dlJ@)=vj39C6>?gk#3nj$^jkM;!C4WW@L` zBTfa(ePghE(~h}sO)RfU;rJl1Q9H#jzIH6L)dNeg%-oxZ<^Gk|^3Bq4y)}`%Dn;2t zO-N>Z?MP;;N0%U(xh+UuNhqT8{wjo`3_S>hW#3$V*}vo|D${Uh9c=2GTy^LR&Av-zIGyDtMO(c*hGHL z+!i8ONn%-*J}I;58YJ*dh7P584dE+n+ed(o8k!kjJDS<*xn?xWw$0oYG{4rhPA?-_ z>-g>37S;Hp!y^Z7lGWmk<>{~6Qoq^*;OF=bzUHF$b;j3N>PJz%Mgxqmod(#d&`ble z)S25t11l}{)tN> zv(-hySv>!I1wHd_B9uSoLs`6?-fUoAowRwP3CxVI9n5U?LK~Qww<<8NO4^+FiP_@s zhrH>ad_7U`WD|fHUps)=>H}>6X5Q-z;J^#uUz%H7*HZmx6Mz|CJAm2hV{HIt-tqu$ zvq`kGV_d(&cWz$6l?+Xu(aHHAXky6q3q9X#)Vw~{b-?Z9W=+JVnjKd}V(%v}}m+gW!UgpXT+ z+6X^UBYa`rZ(i76kBvXJ4A_~scCfS6=azt-xo;rY-zv=g^fF*)-rB*=R=>Uk?96Qe zI~fsi;k2>&)ta6^Dq>f&-|=e|tk2o}t!lhqnVG@eRdh3M7^<6ofc8isH<6jh3_gD9 z)am*UHR5*~m$O-)SixTX944+GXAsz{8DBf7+3HIKbqo>1x4km&n}7ynp8_ECa}359 z1b}9I?SN*hKWhWD^c38i7nYZ-^j&-q1H7l*UyHwy86p4m_5luIrTCJeHCGJ#uM|T* z_b(m6L=ueG17y?-eeZ&0YHVPWuOQ$Z2TTlvPTbS$f}Q32GLBq8&nrbLa~jI2;2xE@+CstmB^WPHO%Ps~P3^Cw4I{phPe2YQT zVP4ZiThkd>)BRM7XQsISiKCggG|`(zA7enz11+|4vBT1!rfF}{qUV2@u_Fvb4Mc5% zrmU+ek7>#ba(LA|L}_j#nj4JIQn@gj-V5HvVNq9jTyLHm*@iSy!$dStzNQqry@u1#{ayA!7%a2hiNi2=q9 zP$DOz#5Uz@5}RH-aoJUgt~u;2*Cdxz4yhcHL$0VCT+YE}RSv1jDLEwh{>IF3W=Lu& zQJX9!^mPCD`}M#2|NqALz<`#$dt&Za?@7|XN*#X(QNN1Ef2v3llZq0Pnc~P*rKreM zk2$fbT2w_{b>hW1V5*Z?NEVZ*#hp|&T})TCq6UlvOFDhk{$hW1pg2$+EDj1-%Gp!h zTijdSSKKGsX=i`+K=D9zs5m6rnscx^TpUJSW_`|~>fz#H)IVVZZ1AaEJYpSTd)QvK zZ%sjMKWd+{gKT(BMe7hdye8xK2rB|MvKF(BtV+?tjn*e@>?*q^T5qE@&#to@qIC)_o84rSqIDXr1$K+w z7OhdV7TF#4zG$66%V8g|yP|a#ttz|6?u*trv|KjDJ`}Anv^@4r_ASxMpjBhjY(})Q zXnlvxvX4Y7hZbiK*vF!kXX=bJ>YIlliu#?EIc_ukf$iI#tIu-N^~)ZwT3kQk3AmD>-hGQdX%Ers`m(Fou?|cwrE*3y=*&{ z?%Q?C*L~BqGMaYBEG+^n@G2IV#r3%rJ*(gFYAf8X%m;cQSIFzro)2bFyK1`HI@zvK zIZQE(SOO;M$(qRn+jOQzmGI#E_pjZaoSK|9Zcg61_R+mri$cxLwIx=drm z!hxB9-SFH;1vX<=tuWnghUvgF3c(U&3PY#4<-1l;m|(5y`5Xg0HEsD*R`8$@4wblN z29^;aJ}MY~U{$feL2gxS)VU!@3&@DkMBPlsP%6>7=nr}z_2apV$JbFbgpp(tQ&}A5 zwjV|^#nMcB*4L1Ku0VMPr|jRvBxxOwwEjyJm4Et2`un#lVRFtcE#aB{HNg1$<(2yJ zni-Vl4XX+b$jpHka~{}2i_Vllk5HYrOg7pV#+JAf_PI+{qvV*rZ~38SyS1gjAZCW? zn&&xDHC5tX&8U@vaKLu4-*V`e1pq@-Zx@>YUr3`09|lgCSbS{qiqB6{2P*#S(=3BN6|(h(l`KiIpPjsM z;{imhkoA|iWmF-89`m!rE6=@sZ7Lhs)oNC7%+GG-lM!hP#6DDwQn_**lla6fNm1kk z{;6_G=5L~1|3%9N_<_e^UNRNWt2mZfvwaY{iot_OGW@K$RKWtW)5ffAt;eeVnY5!m@q1f71#g#)i1GXF?*?6=w9Os2}+aDP&Cr2 z4uS@))mH3aerZmKF@M~&(%ISy1idsLiQ-sOg}4Qghw;cV@3HH6Cd)p;5jY0nUs%pA zEE}W+b3kaC&+|D0vyWXWv+>e}-1$r`n12~IVwii+V`D_4d_<#sq)R(wW4=q5UP8on zwzek!nrqu3`=Yfq*;n^k-1G;?R3~7tNe${jwgM(ZCcwqy(8%)?k8#>>C+yTXQX-=AO zGqgP)oZ$-01|fIO*9|7@qg(DJVomh2dyQ?rg(Dj)-B?p+$L`5CIv%2q%~4zaqu7sF zOXmxClkLq8Z%o(KsTWRtFjoouYX8NXHUA7MmQ<7x)+*v3r+QQJn!BvT+sHd9}kYS9WiZ#Tj9F~}}sQg6c#{k6)jTn_JsIL z_BZ(r*)MyDe)<+I?pjU%Kj8-m+O75f!Qc9Squ}2_pV6aX++V6$JWLtr27NFPK=`)n z2c}!HLdA8%#C?w~IaWA$4eUK$a;ymBh`vn!np7@!ldgN z6>hRHB_e4ETBur&Oec)jE$;cD>L3UMGR1RP(Y#s1k3bER^QI4h<@~akuQYGE*pwgk zQ_$1gu^*-@*j=;`5eRV2k-^NgUl8Rd>ifzJ0i?>~aX2GCB zEexmx|9~c!y<%UB#sDH%OdC<$Wf)O7^9GH16~#Or{~Z*PtjT*3Zlpxuk-)E}L;=Wg zc@V8ZydPFla=*MEWlnxw9+vAv9on90F{VUJra$^s@!Y`Uzl#Er1C4>~%7F~sQ-YY# z1HFMHA*2>MwxH7cg*byIE+ml7gPs5`Lypv@99_adkxV2b`dlNxeJZA?_!vc)Y#I-Q z-;`dcc|O8#{t3O3mp39hPILgW#}VqW&(QG6U=S^6&xhsufzBN^@kZmlHiutCr_m${ zr;GTXgwvpK4C07-zhQi5$#j};l7_*&5*Tq6efUEZ;Xso+B@1U>lS3!*0)G9s2}yA4 z0%{D8Pui4)Fl8^ETOW!N`0 zfSw*fF5nro_7}>JiO%sUOe+MBWKQTV5*%8MTSCq)7WPHr=9nwc$37%^%q0i0cew(> z&|1W@Ld~~=<~zOz(6GM)(;tn8$wx>)nQjnGR;G@xQ&FLUw!a~&;mPfj9nB#6b9C@i zC?tqzzpTjIL4BR%Z=EbaJ0n6AB7mUMGAgjLloKIIQyK~%Vlh`?D%pR2I)Ic?o@7mN zHPm6*s1Dl;x~+mcK^*y%B!kpq4CGN(6{f)gs;qA<`6TWhhOKVDfnC(_rhiR+5^uyo z>4EwP5^GTGWj0#I)>0( zSXTZi-iY(>7UVD?c;j>OQNzB9sfRGZLQ(dsoqna3m`bP}z;b^*@;`%vId`iej z3UTfj6i`PJA*v!DRfmg&G%m`f*Fl?I zCpxVEEfsha$v98&vD#N*$kSKp(eTMosLe+hUm#c13viX{LLa!0m4 zkRVNRue`yEZb+LZq%GMaZGH8l`gCDJ$lFYIX1vgpuqHj|aI+~JB}lPOb3z?SmO7|# znxGvr^o7dBq1atOvQ`OE<^q2gB%=O*gQrO%_yLhn5&Zv~MEv`-$4w*}CFo8_^pp@~ zF7S6jBI^J5czQ`h@aK{Ukwupj!ESX)QDn0cJyN7~NYV9eq=-TuAw`?1Ls8w1sPh9t zl)1p)1$C(ZKjZ184#A&G9j${pbYFqfw~IP`LdSY7dVhUdpFW=xItJ>%q(^!7D7N%) zF=V@@0|Sp^RzKZJplo-!HriB8GE2Nlg^R*|4tdYweHVF~w-D{lAn6|vj?4w>E+|X= z{{>GkWeNV3Q1%}>DEn#%D{f242g)zv+KHxVqkxY!f<0Fn68uhPM>jOU#0 zxWTWCm(pTO&cR< zCzDj+NJJV-BSe0M%zsFTG8g!};1u=$PdvSvNARx&vG^ZhCH#+3Y$jHQpgSSfj|frb z0)H38qW<^*XcMsr-bJh=079&)@%D{ywfUF!cjdRvNhvYuGsK z#Buq&6QBU}PL4r0yGLjpmp>synG5_~aF6<5Xmd|(>n6e5+#}Mpb@Kx>0;G(QKtl?X z(sPv5so;xIB+!&VL7E3&i{hVxl+H$)2Uk?W+t7NvjsE&Q{l*e*Nvrf_oGxz7T5$m~ zK2aX7FmY3l?jQ8=LPnqRaQD#CG7L? zos;b%wPX2_YEi}mJ-XxiFkN+O2B)u`_&6bSDwG_7RIbr1kEwW>YzK{deS-}nt^5$d zUw{n?*r2xA5SdD)%T!XXld^5Srn0a8;LF)?K6g_e$9G5g2+4Es3f zDUs?=P&|k7{7(sqGUh!r7a*1Q(d(C{ISq1u3z`$Wi{|Zf{0`piNc2-3Zua(qf47t3 zWX`@Kx$mVlW%D-sp63bLE!qOKThXRLKItTLGfPGAuL+q+u6J)C3Z>>vD=haD|AEI5YI+Cnwi}(6j{MNGL;(2M2u=FeAucIOkDLe mVJfu=CxW(1^dM;K*{kk9t0lEPirlYl^Yj58-{Rdi@c#w-xfA#R literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/vovnet.cpython-36.pyc b/timm/models/__pycache__/vovnet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35456cdea32e9477a53af909bdbae6def8054894 GIT binary patch literal 10478 zcmcIqTW=gkcJABUdM><(6fe4sWn0$BCWoYK(Q+J{6h+AwiB>FHPA1;8n^Qf*9?tYo z)jgz0^dJjMu@fw42zgl`@dAt700-U$f*>z>$`8m>-ck=ifV?b&Ajo5om*hLuGs7V* zS>gg2s!vx{SJkP@Io~;TN|&=)v-sh;wb3hz@^4DtZv^QDJicNm3R6l7Q<+v*8(K+I zDXrJ_k}lsyEmn%jRJ<0eB}z%8je4qKmdr-Flt%3si`O%aY$+?#iF&RvP#TcwWPPwP zR2pgwmxdcBN+%j4rIE&HX|!>&bh0s48f%P~##P0ca3)v^e`Zx@CQGvn%T^7RV*_lE z4IwoQ4l#Cuoj`g7f1~W=Uh=uxjIr@2v`K+aicPRdb_!olfzxR=fYi9;171j^*jYz4Aycor1)if)rfbQ=jlI94WtaB>%K_ zvYfrTmoA;Ysj%1B)FXvWIkDQAhkEHON~T#6B}J5+LrESbGi(+mvz?L`P%_6ZqvUd@ z;ra%%|B8fs->5Z`;fiKuFBlY$lYRZvEPun3&`DOZ?oT&xmS=| zVehbaW$q$!AF%~?UgiqO-C>LDn#^57?zh+ydr9VAMebvEoh{4UYslSYH`t3ZH-+3P zyUAXbxoM`~RSKWj|EMZT{$BImiWB57-kZ%|$lseQnC2aO)8YPG=B51Si(Y=&b9i+p zZ+k3%^UfzzOU=e+E3gB%iBFY^TXr2U$X~O|8*5F^$)l_7Cg1oxKb2n(f=&OeV$tRg z+^y**uNLh!zc_n!=Jn~BtMiv$*n$uD~4^(LRX!5t_6 zzUK$FS9bDuooWNsCF>vRe|C0y=E{{T^QL*L+1zkg-VXA;!BpK~y|p%7ZZ?WL&6Zc) zw!K@+%f+qcmgfXh%n6)wz?Pa|-)QrTFm_S2^M z9@Z4251Lg6r98j3lP~6%nwvY^t*!_8xtY0{{Krk-&won!hV7aAg!Khg7`I$MSSe^? z^!?k5H+ts+ zn~CIg-rT%n2kRnT=8hdWmK_A180cg?G_-8$8}qliIje5(IDCHrW8METk)IR!2O|GU zM8PavMI)jUfuY-v(FYVMH-v&=Co#E(RbKZ+4w6-MtTLt{^VulhYy~nqAbD9(1`FF; z-Vg)wB@1Ltk>rl=vX)&JDdubj>)WpH2!p1v55~$qhM?@4`r{)uVuT#7?t@njkJ#k$}DHA>GPQ*AJEov?KQ7V(nLRcr|~S|@h3q-@-)b* z!h;BuKxGM*WT|gVAW~+<{S}sspGP%L&dPoQq>2Z>|9Dp<*4%On&u*HwaC!!6>}~BP z)*(&Pl`!vRyiRrms4|8t|r z?7*HY?$&mSq~U&Xd1>h$w0N%Qx42_9pyf^G7fJn^-p$38V&FC!9WXgS5n+(_{RBPw zNf1R#ss{e}HKaQncfBV-7vJunM6m-|Q*HyoKNW!$xYivT3sz*!ZLi+6S#fJ-dUk5t zp;mtJkR(9@r|dW9U!R=;ltb!wH~t^_LDHrwS7+xc&irL!R!H*#rphl7d6md(M5c(4 zl=31G5=%ZyWRA#XkTRa$kM@Ut{+l3EGzg zV6jI=De)ChrIduc#>L28=Y9)D$+hc?o12cu2zh(5PnbjEKvL|JJd?bK%0sQDh3eN( z4W^(Mb)}$-nBM}<@Fi3dNy~CQH?S-qxZykX3Lu47eIRkliUixTq)hQeROibee}%^< zc8a=t=1F6k?)9k)^#KsK>6Puks$;k!&Yhsey)q4zeuS6w&|v(JK_0vlKy4qY*Oed5 zeWtbbwh`i(u%Yowp;l8vBZPzVP*s)tW+$&PV*>S~vQL#jtLdS}G^X!qZ8cOe>DUS< zWZ=XH6VsX7*#BSr3Wi8IP+*6{Ac(jv7gfZhkrg_IT!`@-)bM>GAAkte6>&&$tL{XL zvOr&GWkdoNciZOdTV(tUh@uXwNp(;=spiz((@$KdPLo24w!yO-j%5kcBI5@JNM|hT zJ^(-ZBG;bZAztxS$Mc(<(;>tL)V4-3?p72WaVgR7q|gt?^&t@kt_+YDSkL zP)4Tri+55!p-fj+A*Ya3N6IPFD{(2iB%i5LoW)t zg7_C|kf1WARrE0ZHN@RYdT|E%v65A#oqcdUNQH)!r8nC-*~g;*auaG5jq0}tyunTn zdelREAk@GuTT6!nQle^*DCEDUKC>aopEjb5Dea7Gr$_zZTP{rSe+dS{M5sU1kRR-f zV=y$48)`E@J}FA-TLa+b(B5MeMERwTCwYs1MY zUgjK9K+r65(y`zUYnADXh;iGiI-ION|A+`}zDUYi4VOu!gd-v~lV_KN_egFJ3F+VB@d-DQnx;;^C3;@^we;lSfjU@pB>2Q#x_ROj}4`2%QwtD|9K#N{Wb-=}x5Mi!F4&NB2u?kb2 z-f+0*bR5aJA8?n!w=6quT^^AnY;=59Y?6vr5_2I~Kd6{+Y$V9kseuHU8d4&4+kS8d zek%@82Klf2bNWukOzx^M53HDo%kzm}7>Y!cQ6+~+g~%Bq1Q5a`C(pMVn=qMA89vzu0u@hkDm|o_WE|+{Q-6JmDb<8jv8S9V zshp&gWbzS6CFNv86iP+#Dao=dhmsTpRPi>Y)0)Qm42$@^<`&{KQmW;FOX&7i^+04_ z0PFocNY6_ksUedl)%iQ@$PslW^(94vmk`F;nT@!AA&7mUh6$!`X#Si`BY-@ikmtbQ zQp29BERHwqH%s81!#j!h0Mwqmj1;_#LG-0fD(xYBGba?j8zej}NVbQARLz8X!(U)& zKtQIVW44;BhJ!!V;BgE`<)8!*(bwFeuPDb?t@||`>VB>2(yK^g6j5u94PX@H>;5;( z0XDQhReq+i!9Bf7o=qlY!SC1bv)kLw^81VHP`cj#98U zqd~j^*=55hmoo!Bfw7EyqenAnp9BPsJ@}Jgh(WI5b${I+r?uG79;=&QgySr|VYDa0 zv0ykH4ad<-M$)XLIY|d9v2Y?Bp)?!)Mhj2U%1EFa3!{39bs{_wPCQomKQRq6%UxC4 zCqf);Y8lL4so^U-8IB*!cOI%}Wge^dJ%A*3ARLoxH4==<+B*EO7#rI&ur4Pbe2KEL zhYH5Nixb0S`&2L1>&Ne++De4A zc_iUP9L8O+2&ktP$ZkRgm;7mXrcuZu*x7`Ik$5bRMTv^-S#Ujsfn$4%PKk7i7a6bV z)m_iAxf|?s)cL>O?ddgm98CB&aU)VVADNwPDv(GvPpLMM-y`xPBGP9i1c+ddBZZ?W zf-oXeLFkU+7p@1qyh9&i{$||`L=t21H|?^6a7wEY*)765j1vKxPBUM?g2IFFIh{y^ zL9rZ>rPGxRIO5DjF&rN~$sn+ZFTp1zTSzBlF|c8~4cDv6@St6{L_7*uh;%RZh`K5G zvD7@Ugt6w@V0lZfl8Em*yy;to(~(=PBd{Wp(iyfrpVK;sbQOVJ^ejy;EsBt_C#z%j zvylVX$sUM_$Q`zxPn9iB+fy2p235u-d;SFGX~~Vxm0KLqVg#`|u7+GNi*51}qFeCX zqf@*Y6_Z=f{}7!@Q!S-RsGd+8Pd3uH%8P7`V&veKl4F{IQSSa_bR3sqswdU6>IE3B z3#y?{s^jW;>Dmv2o>s>-6UXX{T1FeuhT-tj;d&4yIARY(YIk7R!A<_?O^-C}Bv)k4 zYd!P*M<@{*LV9Xsy%&&=toI^)$x%f3AR>1iJ#~X0qaBWiYxF`JWWdNTmV8OoDXda5+(?XBPp$;NKd}y#Y`vyDK z8SK$Iy)zN{KJ>`pjvztHblB_ZdOTkcU)pyJ`e`1I80@$-J?;B|Mnj4-9-mH)6ybTKw6y^LV<=P*ax-uk>B2>P zrPq*DpU{OIZT}<)8sLOgk+%ki5T=w}-fGgV@?9Cg5PHoGZeZLwGy=+yeTEDEWYkOd zu8}L=UT+TS?kflPTiu(@>9-Lsx9k2p(|v8^ZBiG?XsF;j7rlN-FTD@s{ssj{k&f#A zg!s}_adoHl=1&Vpj)`2jJQ6g2#56QqkisPiHM&EQwxzpxu@yMMxM`J_TV2k*a|#A_ z8NI-38Nf?op){8wb<43gb|{?KZSxDNAR8`FN|HrNQ3roaqosk-MWd#oTZ(BGRIK>3 z{uM>SvRJc>OOrSpS-9!Z<;Wd$Ad>P%XWH|kQJv^!rzXj*Xcj6NV6&8GNkXzXnb^pF;OY<<1&=W5+T8fR!JUp{+hDa zhiZ?%@8{R5lfgYeMmJM5D*D5i cdo(J^8N<2ZH-=vr{(x#_jAuMpg$$nm0|A0jbN~PV literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/xception.cpython-36.pyc b/timm/models/__pycache__/xception.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51fc3b53bea27b87c68934f978543e9eec360c83 GIT binary patch literal 6618 zcmb_h>u=lE73WJYN|qJJNt`|sw%sFY6Umm7Ch@wpNt*1@#^}=QrLJRXUP_`&k;)~N z#D=^LbBgYP4E?q*`v>-Ez%UH^M}`d-!!TgLHe>s^Py4jrxuob=>)J1~z{7J75AQwq z-1DT@7Z%L+8)vuv_>7|bMS0?{2K+T#@vi}x(p8wsv_KtdT}@*8P#YTE0@Ilh6i20Q zNmYOsg7T=+tw_8Wn4@a93cS=;yERs374|GU!xmVbon;nluw!hA9sjxMtKEe!Dr}jZ zc%-lso;IxS>)l0APO?*=oXRK-P*zw8lu|}H#-3wOv8NxY-6gNeo@dXn@*{O$?H*^^ z+e-5V=l7bTm~TZq@tBoF)_5n0xI3_tffH`WM0qEP6DMSWxAX3~L6VH)jdr{5CxgjW z%Z*0u{Av4}t~XBnD7^62&Rv?LX_~J&lVlL_4J+p|uQ6v#Yi;dVuEV_0GtC>^a}qDM zoY0!hVcm|H7g$bgF|X%`;P=A~)4au<(2e}qx-p1?z)Q>-|AzIt=Z&p5J%@*W*tZ@) zK#<1C;DH}|*4y6L;m%gzSvR8aeiTed&e)u7$KenC`>lxg+hf*iuf4ps+FI?bU%K4F z&@}H3u;2sF?+=pL8mF4-@n{4cdDa^qhmihg9C#xyOdOiHWnFUzzIPv6B9=f1YKarsK?%GFLfy0mfi zVykntW14qv-oAPJ`pr9ZMtIfQi6$I^IDOBG{aug9>km6uuhIn>xV05QC(v^OMKkMg zVbcsrsDNJjyp=%3F-aM>HfQ8>BhLvx|ITXb(s^qJ-aT(xpZFIiZ2mP^#a$?cds0X` zmXViu+=_bh`p6}*;z2YCU^1SyzP^55ia&C;vCj#Y`*fjsOyTlqY&nUQZQ4A5S1Owa zDqvF;%UcsaVD^X%%O?A<-CiH`W3Dtm+YOvJ_Itj^L9BDH@5h)lxCA$5fIY~~9W*H=5M z4(nWL!GI5R$baw>c5z?=>;Lx5`~UdrZ~ytKC~f)f1lQj{#owP__5Ti5QE*}6YeMU} zO;hL-9thJ9$CJb+g^S8Kih>j^lfLY+n}`LU^f!(IX^6t0;}$v&PgKLnDAkM?3$y3* zII(GdVIHc>P1jGMkqzR{0P&Qrim<4$(g#XUr7-xY(nSDd#Yall_&_m~ZUI54EEaPI zCI9R&j-aG)IykqKO-OgzKWQWW3P7SH>Qvd+hT6WODv3TcrYe5=)R-2g#i>dW_;Gov z*oEww1_2;~geoh@XGNw!fa2z@rY4Gg9!ONQOv|%phr+K^ht3C?-;Qc^hDJUZ5I(W^nzH_A{<6Os&0E6(bHCAKmG? zfh%4Cm?}eMiqx`)V;SNoB&I^W>Sj~pq#AAk0DlfQp+4kIqDk-~QA!9q4;;=uhe2Ef zP*fAY-Ba_jWPD8{J*C`OFT6Q7RtLp`U zZ66ASgFlDooMe-Sz78ZN123!flBE0}OuukFh}>-{`Q6;ut5~FhOB(xA@PnhqKGmk` zwjQ6E8Vq-SJvVR_6qxzrLT2{GskV>PuuCBfooIb!uk>)5l&AW>`nvM&c&|Jy@?=_O z#Edt{jsQCcM^9%Ik3Y-kw{yiC?GrbJyCSVV=rXFxZ}O~wKO19ZaWX(LTm-I`L=g& zf|TStfzY`(ZdRqm^XF-W7vu_{#-fyZdn^`FVeuqQN=OkCVImYguw}UAxiKaC)6XV^wG`|1#Z8GxQ zw>L1zeOn{nX3BPjznFWu4ooA9+S5Y>;XNJpsb5z1U`wg@>v9vE{D)rxfaA&#fGv&r zOGK5Aj)nv2obTabTn9iXfXJp=(+sVw?k=Q)&jNv-`p?~FkzW8WZxLt{kVi{y&Q}TK zOU!gAotuo{8m^ghpOI$q(a!xk`0~7_BAYd2ZY!ebu5=A5;E~%3DBDd|MbTYkHMRgs z2?hNk?q!+vD$vUkrx3Rr$ueqIURV)I=n)b%lO-FKk6)&eZW9F_1$-2GsDENB9(yj; zDpVKC31+p($Ck2GuN_d4Al)IW8(>|!bHDEvAPxPIRC(+f!V2bM4^G>rB&*nza1m1& z!D3PxmZus;AEa1B23C4p1ib>f$P@4@dk0zjCIM4XaRij2zMiq z8pYfOxN9=PErC`=s|4CUy?q>GYMf@{WsK|i>1 z&RMa~J^T)+&3y$~Gq*^ukj6qS@4PHb~mX*LtLD==tJ`k;~MYB8~KRL_=&o7g9ck^ z3n8mgFV0zg1%dy-f=Nr0KcJu{lf$j7tV5FE9S{#*Bk+9y5WYY-Bvbr9-~}05d=#tn z=v6%O$w!3YxJjp}i;@&&5Q_@;7{o@oC5ma$aX@i&x3)zYzSBa?Z>=6sBFaeX8L2^} z1t^<-y^qOj+LHsZf9C`X`RHK>{swJ-_;w8#$klby|GC(Uc*#WMJb z3ZpWgqr(=}eh_UrflYJrliNLOpCGh}UsbkZyQx1DJJvL($um|p-M-RpmX4qMnow+88kC~?8DHg7Z(PV@?vq@LFf(pcG?cx zqL?=Q!l1}23Oyh7p)4;%Vb|l4jch|{EY)N=EAeCgO`?5^7E?#Cm~%8dnyO=vViHy< z)YW48(^k|P67zBf8)^edT>kR=vbK~xt%I`$oPJCCQ#!_!@bTL{HcKUkSsjKf|L9KF z4P3ImhDTHTgZf;R+QCh3D9@6z_7(NMhlV9@kAF$8lOri8&GXfc^0-Jc@{jJ6S>&3Z z0wQyUY~4^ZP{)8zWm%k|%wouzP9url26wX{wVNJb7X+!;R9O8``zdu!>;_Ewh~U4% z`!dBVQ2FlZT=aRqk8W}PMZ$kni$lfFtnJZr49PKTg$g-_t~8^Sa;%(Vl^ip3teRuB z99zh-dX6pTSR==d<=9e=9nZ1l96OO?Cv)snhGp&BN*WN}p#%6ffp-afhk%R$Hev4( za0qM>a0xI19)TW#K7j!NpTLm7Hi3Y^h(JgnB0vt3HH&GHIDfub^_R!5-{6I~1CaKO zX~CF5*-26Xng3)tIeYqqHO$X?#2Ru_eg&)iFKghftRW-IKq@5zN6Mqbnw~#tl}uce z@r?_yC+s&%{JWSjwPOk(LcJwQ6uA-b`S<7%g|ClMhb)(W2cDR|N~anqqQ*!6+{i6` zRxcZz{86Sv{yJed0pN1-X3o{nqGa1Fa?u*gVtPvadjzD!pHy2@BBFzpVwDX;tG`m# zO{0AFR<2lz5yVEO6jO{yE6%Cf(-SE0G(PUgk2&~)^mFtSKhu6n3N$u_F^HmVsIfW= z_HGa(FIMr**qH=~S-t)f&+xU+;Ix)3yhZH^u9%NOr(}K~Y19c5VUD>c5646Ew~en7 z{rIqXNW)^qewcj<{1XXo0w}7cmJwWPD*p*aE=61VqiAX1k~>@ilF&Z_us0vyN%H-0 z`o_h71c0L;95zR(qAt4C zT;@LjAFPu;H1Gm^C5xq|MKN%8Xh546<@BQ} z-{Is&WLA#+;_?BFinrx=8ky$PuAEFl?myMb;l?HQqMY%SG@XCp6h<+1eFogRmgYvS VsB1Wn`JY)_sxO^ix@VR(^*=6|NjU%j literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/xception_aligned.cpython-36.pyc b/timm/models/__pycache__/xception_aligned.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad4c8b2fc31cb3aec895a7f6293097a8a808ce38 GIT binary patch literal 7647 zcmds6-ESP%b)T=Dota%ODT*c~#g^ByBX}E&E9%p7o!E*j*^VPdwV60rBp46p&Ms#; zJF~hoD@u!Dn}%xaG@(%xNQx#g5FkO2yyPJd0rDsGwNJ%G-iiVx(3b*zDvGqfb7yw> zVKzk@_@zVMGw0s9-{+j)Irr}6nHi`3*3;|%c1}_LQ91CK;9o^b>#CwKrK2#FX^}e6 zI+{wn9_f9fV@R&jH~MC$3f_pUf!(nOPRBv53NxeHpx!|j;H%NhpwVdzW;?Tkxz3y{ zv!WA&`OduL?darSp|b$q>8YJ3*fXroW*)1Z#b7B|WDPdEtKvOZzE9Ab&F|`+Q?D!R zBwKi_u!X?rpWe|sXHfD4TSUoXS@J2AEHMWqPFeCKJI7A5GwA=RV1Yf$KETZ?rG#c&7NjWnQJor9i?^NdkR}}?@^X;f1?==hEX%=Hg85@FAiAqt9~%d z!X$29S$Uy(?dl87>no14wh^XH{CiD5;XLrO7n)t344SFWz3lgO^89N-FpRwQR&$WB zAWG4GF!Zu;JqnszVYbm+yVKkX!rn%P&NF8t%ZBMo?RF2JM(fL{+|GhHO?Wp-w%SD( z?e!>GZx6gQ3wWCcY2aZQZB~ra?pXyjv+H+zEnPHjMP8bQ-7w&91RfLi ztt5W%`S_-vi5Z^UoM#vt%wp{E%WPB-LfCU-$NvOl?7EuA+oD?U3REsf6+ zPXs(oo*M-}kC!kvw>;iUf1&WFkp1_2ZRTa(rS`c0sO>{5)AsFKx4r@`xYSNZJa7ll zl7ywutsW-y`pwmL77hjz?R!|-hwBvvvpjqx3=-)g`cCU0il(WiYT(aO`P0acqhs`% zZx z#`Wc4w$V~Kt@;xl^xD-6{tYja}2K|IbyS{Sa#USvnv+jyqqAr}t@~^Dy^VhEK z^Ve5=lEvg9cR){i4J1%HDqMmFThQ>Q2XF;Qm9F`|(uEUXmB)6+WG1UVRyx)96+`J* zuntSiz7qfuJfIgD>m^nwVg+6D84%wFp@-g-NEOq+1|eI^l^w0G?I@~}>$&3Uc?HzS zD!Gb3JvVYQuh5Kkt0*^fD+io|tL9avY})*7yy3QXsmzK^23vM5*;=C+?^2&OT4}PC zQ?~lxK&x8Ws>T$o##~)%8Ny669f|x2dY2UFIo)r zemnT$mqqRK6cwyO2rv=aX?_Dl%#?!ggD9E^gJuZCx=xj(U`U0iNlF%LX_mUUB-P4C zcKTFH?x;1BJ?o5Y85Yrn<-(fDitSE?{2Z(q77lB!VmBSpm^jY6azRqHr5&%`S@p^B z(L;z%wyh(jzXLgJ%9&0Ew+(T^{ES><&S^5tN?sUowXX2@q!^XHv8ktba-FH0I)5*# zQj0ze9;R5c zMqXp;lJZ04BMr41$W=BCX6~vxh<3|5)dO{AF}jx5PAg1DskN!`-{yJ@Km-g`atAZA zcP-gg*4kF@f9qXD(b%@yY5@T{m-a_+%C8b3pDc73Auj?S{yY^JWb=Du8?8#wLgE2g zY|)H5*!xdBtvUWjRC<$$R2mY2sD-iXZ+J1lCKdGrp~N&-jT1NB42RsJidMRj@XQ}& z{IisG-U;rHf|#N=pi_nrP*k`V_X6Pn=}H~upQFJ@HeIB1&DOB zN5eqOqCcJ$!5kd*h`&MI$X%f2bWWxdH{tGsk{V9*d}@_)nQx;-N_$BtWC4d9q%*3f zp3+RMj$8wg#B=I``lMP{$4iHG_Tx}QdOC%oE6C)eormB^g3CFSK?kJn0*=y)q!CLB z0IP%=oxg?F{7oXXK7?I_E&N3g05k2oT*&@v$jJLhDXD^jSr_i^X3; zUH->JUM51qE5tGd4*n{YO$VE7ql636mOnu%;lfl;ElLeK9xlFun)}YbqnB`D^eZ@3 znG_V4pn^Cy4x43jc-JJH)L|F%oRDIC7Fa@2D*M=h z%4y<};k7iCy(X&nkmLiydC;4N4;0x>!-t13r^A>7%cbyv5XQ(2c4E^^&u5iysu==^ z++@bCwrywCzLnWP8@;R1x>)r`T5kW1_Mw{DY#t@{eo0UJkkG|p_O`>!?OIP_4fP;V zr~dF)*$lZMpw7>}rft`^XMhkh%z0S>B@D4QjlWX4kpo)$wam;KD6gMZc2sr}Xjo%2 zdHsh9YkZ{R%&Yg*>rha)me-2b3(9sQZ{&vTLyTKb((x1mVPTv3EU96y(3tF9|8Dw+W0`-77*{3kGCFS5;vL|Fc zpe@3sxm~QpU6g3|#OfY_%bq-6kYk63o!WU3{k>L{%2lQ8)ul#Q!6 zx%!*w^0>Av0cjcFa2Y4Tsyr`kq{8L_8~O4WWEM`gW$;z%?j%Tge3A%cGde!u@EyP` zNvrw0M9LY`i+>el?{@%JPCmc2RG=#h>zRh{Ug~39gxHQBs zP$k*{SUgUAqTa>9lnzN@+)ad8EJ)Y^RuIr^IAJ!wOAT;@;{}+%PT4vwvTU6SJqlt@ zUR)S-cx=rU=xfUk=`>ZW7?GcZ`&7&TkL5X1hN}l;AXiZ9;ZDGdTsmJ0M+Si|9r-#l z{)lsT8q(Hewg`DS0k2TkJGD4?hy$wZc!DhwY$U)C3Oz(h3HKCD28wDOz-*|C>KWa@ zn;?5$hK`nc9w6;d(hLChJmQ!0I1Y|aP2jIQqwnK3IZJxzP3aa=N;?jX1VR^KpT;*( zA~c+_)dGd(1gEnU4P0#$QLa29e?*;-7f{sk*`q@ruIiJ+poiWRbIA#u0h7@b=xjpn zLu0Bm3tgIph6@q?HPn#`1SKU=^6sQS&UWH2q@G3e>ByYt=^tbdG3lUYyip(Y!4 zwDR$>_bGHi8#j%lC$aZ*BR!gl4^qa;(w}{7YNdOcD(p{aYP35=*gXE&^o}<8zfLd3 z5XJTtq5N^vlhO3gkIZNq7)&;7;$c<(WmIj=9C5Uw>bkfpLmd8D8d3&AuMj8Q?tS7o z5ji39?7zgsx6`X+#eV42LE|^T-MAI>4R|iktdBT&QmOOSu0S~WDwR$SE7O0(xTj5Dvn=Rri zZkSnPx6;+Vr*CVS0rqhVpnJL*h!Uka$C;~+#$mYhB$MSdwXhpuKC|a zEEle<$e^WgCEvi@xpa1V;giUx$?$lOfNSp`fH#HPBY(69{|kZ*-&25QYj+U-OHVd! zRRqlxyY>F#7ytVAch~Q|()+>R(({{FAVxhHrotK|1H|y70gN@I;V8W3O(Ho6Am$sy z|M`TV;G4utvI7oq*`!xRikK557Wyt#odgA~psorr(-^{m>&49m`Hv%Vmwo*jbyA84 z6tfPXZ)o#y28&v=80r6o+^=2TmplJctn_%f^FPC<19GPj=Bmy(QP@7B?^g?lkBprhX@?E70v-(y@TI=@aw}e{baHK%Z9*DE)gzVQ|Zx!DI?yr zh{Axj#eHk1Q4}vv>XI80R`Kf*f5dB)MHJx4+c^56q5#P;m7DMixa5;}BSjQXK3=-E z0)M4NN|Ef_jpQh57S_wf`Q>xu@Z~L?fuI?Iu{Hc;%}`Df=} Max6`)≪x0Ldp5#Q*>R literal 0 HcmV?d00001 diff --git a/timm/models/__pycache__/xcit.cpython-36.pyc b/timm/models/__pycache__/xcit.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76a0416bad31e1b09d2d587b9fc19d5c1cccc5c1 GIT binary patch literal 31091 zcmdsgdzf5Db!Xpxch5uP(O8xx*}gJCCPNd~GhWG(o?mxvN z5fzz=C`(04)|7>NtQ0NBrecH5s$k`P@ zt_to~tDg^Yt_dJ^aNn(dA;`HlfV?2MU#EUC$hkg%+z{MvR38s=_5_g8;C_?(M38fH z0J$Z&->NcrC)T6H~Azp5TpuawjcNPSK{rXH8njY$2P z`gdwdQhSj4yn2;7DXCGUzM!5^ua?wJNd3CXsV60MGg4nvc{MGmTafyaDkvqXTao$? z%26|t8bj(g)U2A5)HqWAQ5Ds^q;5m%KdDoyB&oef{byBH6-iAX^<`C6HA(G5>NnMC zr6qMcQeRQ7@+9>_q<%{+s56qf1F5g7vuaUNFGA|K)j72!sXLMS9rYSjm(+`q`dziG zUMs1)korCKBkCzhy#%SRsUKCZlhoZv{k~dJPfO|^r2arXqh2qm{Yd?xIf7pP)Xz$4Qbiw$jQ-Wq-`R(C)pc(@Ts@Q5 z#eAjU*bkKRvyOd2=PT|^RhJ!Y?|J-i@x-WItk@5qJ5kkzxoq~4eA%({uI$r=m9LpRcPwk;cdI?;c{TU0@$p#{ zU6>v#RLkQt`GPZDt)9}3>*VpU@x?;X8=o#!r^m~A*K@Q0#%kxX*@yErN8i8msB*{Mc<{xB2loC$C2P{Mdpn z`F)jza<0JEI@&v zikF>y#gDn3^0PBV?RvRFwQ|PK$`gt+MMsau^bp#qFKYy&QB5K=mhcGTBe?m=Q)lyf z*8NyS+erTNk#Uvx@)P4r^XJA37%+GI=;6b!oVaabV%%NO=ugL+t15S#9bK*5cW`pt zE0)XSGPZ7m(R2!^cH(l`(~)S@O5!hWr7g)>y1T)}sx&q;oiElFrc1@bOtIv+!qLLO z404rxrJCD2k*n>!BR4T|`&iAJ8?~h2O9wZKB&Wb<#I!*a`|i9wL?In~@hLD@(4Tr6 z2=0MEuUI+PL0}Kf>ZlzyYlA3SDeIMniQ8KTsn-THYj2o_07V-O_pF;oTu7Eu=Rh3V zaP=k?8^rMgkprethVv3_3YsNE!DV<=i_7`@3KSxa~IuUrc%PhKOdfib`p z$`7Ll2NgP?t+-mV*1|;=>c-WCcHs)LLR^;%HO*-mAltDf*kD^DZ%uT~R`{(|n&579 zovqs@&1m7(pm{sjh8u)yWp*^M$v18Ec7j_Q*p?Tf+bRudUsnC?YXA+ZuAB};;c&}j z2oSXcTM@P7tc8fZ*@Y+sb|Er3AtJ;5P0lcw4EJ|H4XW;}HF1Rv2)KVOak@rUh|6$n z3s0Da85ZjVo0hGS7osy{Hb=LW%noQPnXOq3Tm|lu8$WK&`Ezea;6z|yk4#0CH5F6Y z^N|^gMvA91Q*kH3Kb3kmMq|a(@u}qbNIWu?qQSzy^5Lpmq)jPbI$FUtzF3((R$b7r z2`qg_tUt|s+X)KT+9{nwLAzS9^Y)A>Zl7{=#VOfkS~r*}_N$6xFSw4Pfs5&DVSRxS z3y-slu8n;ZEL`@aD;Gz-4y&r)WS&7VBD-kQ=MdxF6I5S2f^{9%f*o9a8=|7ykY;@BjXnKe>Oj-%q#;=np@g zgCPgjjhr8?RQ%*&*li}1Kjc+UIh7oZM^1sg>BrISbAGf|M8!fLW~@HXDFZ>?gDJ+( z&J~qH1ywBjF$tWSI~w=nu2Y)PC)u1Q7`)1~DU&nCr(8}fJ$g6uP9P}bat9GatgN-o z>K9{8(ptK<$s`?2G}dT8taR2P-@4zo9t%6U4~R0j4&!qB(JS@HJUV$kx*Ydn^XO7^ zuysWQ@kBkYq8H-x@Di<9^C_6&ta@xEwwwsc#X8EN1nZ(ivYx0XV6uxm6Q$`6p~Pom zSMq7;D`0%WJu?znPSlh2WKhzS$ACQ(U87`@PerY4*s|Di3h6!-1Ejy6QZbqcFT}_W z^Vyw|tr6t6<2Fx-%*4@-)XvCqI&5W-W7-<#XMlO2o^ETODM`GsXJRZlKe!TEvkW9d zSZ2#gWHw5}<%Q_{kjkuB%W1)#SqJX5%KY27+3w5gnRH$(emym^DxZMI(`bM^WMG* zKeJGAPcJwQ46jM;)SSGhUybs5ih;};P8iOpGnjT>zHkag)*>veg{n&|c?vtE@*eEs zjxH~F&S;;`v38!pG=l;IHe)Jv&KQNKQj0+(fw`LVrV_QPn_D!ubAAjdGcRpc<>bV} zsv5>wbp{D{7XnVuJ=V3>P52wJZ23EBowTk7d};Tkr)z)*_TWh%(Jy20FoMxEMNtK= z35cR>E-;oO-k-~zUdWe%oK!BSss+dmN((;*!?Zrj*gzmr3Jy$XfmE@H*3wVnW+co* zh_NHwB!YA@9v#eR!U`0^{P7;jiyM&eL|S+j1agGDV+68_Uq}KLuR~U>&@7)=+O=gX|7W?9R4R9h+qk8?sx3fUprjg0H^cHm$t%Kz(%OM=NtN>F9PNtB;G#GW7!)~31r8v8 zH11Q#bKu;6K+w91a(I?44jV*ZbO#48YGM$^Nm)H(vUQ6CF^b356pX!aJ9(Jd7vA@r1u^dqS_c5`rtP zQm=zxM1QA^7!$#yIl>py2t`vi)(9BHE}4O>gwI_PSM+KgUADSeK?5`)(;5E&D{@96PI&CpVe>SYFY zBWM$zuLtzaxQy_$;?W&fX~GJ)!e2wXb?*YGU4Bqf}rh$!R7tI#G1%ak>C&N_{OCQH9qfA*{_~;ZD+9t2VT(w z$Ct|sl_C~LmYjCHvyKCepNg$Z{-(q}TdXKn0@LRgX6gRsmOvg!hxi(JFkI+YY|-rL zbFzJabGvZLR>k}*w>UPGV=T(>mpAqyV{pm`u60y00&gOaZQPu=4m{fV>5_SB$1>s? zKm2{p@ki}^McFMYfHZPamX<<8Y*Q>!B#JoX;@=d9^=NIVZY@V4kgOfhMY+k~3WmQp zG*~e61qzk~v|g}#jv6Smr4?>5(qUQDMpsTi`UGW7tF`6{|--iWm209To7_Q&XC9aXuWW?t48hmOK z8j6Q6Amt7t5b+RAW62wanBbmbsUO@G&U?5^Lq)03;9zzmyH#WW+m1ONIT>4yFDJNm zPzrEwwi26XrzAPsp>CqPxObCvCt?%?fuVx~80wl;DX53lp^1xf!^jITejvg{UZnI} z2v|Vir_dcDHr_?hPcisu2Jc4D5U&)CO`=2@^sPzXI^3MxrlUCPzlzJHY>J?Bc0iVz zzwOpiAG@g`MzWU_Bl_*AYoth>vF8#c??9nn!DU2AA|Bl~pb2XeCDgP z8lI)DlPUYg_P%Ih-;24z;x}~QSP*MDlTnZ(2gv0;oR^{>zz&KzdGE+iX zGQ;B7=|xY)^_^a#o?5XgNh%I9KhxOpnAw@}vUSVD0g-xaB!cn&&>_<;! z^oLPvpq}*x=eMxVh#W7uK(oQnC37yT^sCfKDT8vC)%y?*2kHj)^%IU{!k-l>1u(Q$gEY_T#K}0_KVV4Xw8b7SGMVTJ;Jzb0TNWO_v!%v{5Tt+v&@s#E&Tv&5?;|e=VypM2km6emkJYF zbR=@CA1jy8;D;fEaiGM}qPzIV4(gxX9Ln-~l+BvurR6BsTa*JzZ0Zi7ucmj%&p zP>Kc#EDvO|D5Na5GA($Joy^kU{!o#yBSW;|xB< zfSOjYCTR**Zl8tg{N!SeEH{-{%yEs%6iRMRSY$0R`}e47#uKBs2ivct)gQeRO6rI; zgpKr|wX}1s@m#0)^)n*6xI0{*KKB7M=j&_|XF?kK99U~a6WS(*8|z`WM%UMB0;ahK z0ylvJu-H4n{E~edd@=>03`1}hYg)e_d;IXhrN8Jtvv{zfDeTPG| z9PNmr#Z3b2zp&~8uu#Ls#=*>&umMt|LGudDBbBgw>f)lk=jE?HVV^9hs%JlFpTz2*a&z`c95{1aMoqKz zD!T7jAhcSxshob~!Wb@h20@Gbr;LT{pO4eLi%AH@f+v>&O#ws37g{P+25>cr?$r#lf{YwnQ?36r? zKFe!A1_|${cmfhSm=S2)z387o(3lsY6f~kwq@Lsl_7|HKb~B*V#`*lFnKubTFHOCe z0R3QqZUIJtp28&p?Dr7~R51u92o6|ixT--kKm~(^hT@FS*pA3b0ybV6E^zW6+wcTL zN`jk^6$^@7aybRNq_8VQ42&5n2~j62?0gy!>JWs$u3AscXUWnl(IE=YJ_w^sJu~00 z`cT3`Jc9L=c1}R+=^=<9d1gXISK_8!7_XR!%t5wQVzL$o3Lh+y)W6Iw5q%SpjeB6h zH&(_uLK$5Amxig)*c*q8YWVBSe=h?WRM8ZZ3zcbXaGJYuoX*7IRqluKC9L5Mw%2cE zJsI;(JS*T{XPcoXTT@>)@k255F` z3~ijlo{iCD2%}*i2S1Q@;SpJO1}_bHaJHB{ZDco}>TkhuO(O(y z$|>Z}UBebcQ6>^y+RA5jJ`>8n3aJPcs^SY*6o=S&VZIxZ)SADb_FezG`=?sW85 znD=k&qJItf{|A?mx^bEy(_1y6ZBjQ40DtZw1Wn~vm>Gh=fw;hxQ1Kb4xS=D??59)M zPa8K}tNNP;#iH}h3gSyBthA-^$2+H-2rN#y7J=1JJbxgC6;6bhsGxnJBHQl`K#|1+ zX;Eb3qR4K+G#!Ehd)fSOpuocMD7CjK}qSXU@w3p=z=%}tm7EP}N)&g(ftwjo}HHY><)I2%uUOVU1!?xkV6 z*fFFDW9!1-a1e7(BGQx;aM52ak^wKF(M6A!(1u&|v#n~x=x6G3wGHsBx<+kRSK>vb zKD9&b1f*Z>Qdi+VAR62tI@k8&^pG#TUv#FX3ms?f#Wr=s3Eb!KeBiYLDb?6Lx4a^v z&(bee&R523=fo{`YmqOl+!`F)hsVrmyeq=v{Fg%0^`?n(@TQ5BHZK{RM}NqwdQsk( zpeVc857>82hYcM#5ybFbi7dQ9@_Y9lD{M7o;8tyMFFc8%Xo^8E8m#2T930(-l?kT( z321BLZZK*0E-wZxz_$u5FVhiX;&H(rD@NOnop-+gOH&h=2uFL8}`lH_PDx3lrFi&z@QFde%r!gdT zIa1rsN6uU4qvvDiJ~SiY_F`7Fx!L9u7~ zEiL7*Tuq@peC`ff7f;3CV!>jOp5KX?3cvbF6n2aB{4TLuT;*L2=#c7H0~eweWx_SM z>=g^|PLj#C^Yw!vSGWq%w-4P)M z)@gbvq}J$N2GU&+21jv4G>XB>F%5GNth5GyV++!Tz2#M!?s^6Djt$PRzy-rN-i8S* z(lnp6Aqw3e4KxA}KN;M#uv$ZJYpb9fygAY+Y!Yay_!00ty#3WyIyBidYBZ_CCe~QR zY_aLIRSS(8jcQ`yV4onzHpU#7zp<^V+HDPP!_qJk*?aJMSP3KGRA}?Ir(rmf&OrgY zID=Q$7TU*73^#bBWA72KIjRzboro>a{0yz_aGL~YjGAq@MWhmTY7o)mhYz;+yzHCp zhmJqo);@0I#a=}NPRp~JJ=6NswyM%XnwC1Mx?UCh;(En`ZN|o6hTR*Ud^j5Nc)!Xu zXOxVm)9fM1u>4UwH-@6S>^+sr*hBL67|3s^?Z&SmRomJ_BZQD+u%b3*%BX!rm<7Wo zMuw(;B5*~tp}>i%9JmmhdLo!t9*=r1Y6scb?<{GSI1Qx8@WuZON|?IAV{W31SY%A#UYyF=ap5;+J_+ zY{z`8sCGuwgpW64ivbB;SbIb)*s!#Z3d6Fd2&^V2+d-Lj|6Y?_#Pz#0@nloZJ!!ij zsH8A4&)}FrAQ(d_0x#Jar&2mM8=eQV zTQqK|261t*boU^bg;;;WUbeglmap_`ay7FWTeViBs}U>|I4X4^`_|~`IMp?1Ya3Br zsRZGj%_!b#v~s|~_W6VDQ`jGDpO=9Nm~+q&MKxH9x-438b1PsoV>g0-&fZv`}a z*7h$@42>u@jJ<*WiK1^J=%x579BMuEjf_gD(Kigq zA3z(Sm4Aao(1ILvkN#5v{)~YP=_RQb+o#QP>;nDg2wu~qefBjYAHVP0OK-auoUjX7 zlcT#^_C8dv{K12GsT8m1Rx3xfu4;Wbf&E;3LW{q&fSqY}MxqwKoypg;Y@cCcSxcHb zAV0C>Kwj!!Ws!k40VQX6_~vRLu=E#MCe1W?z|R`dc^sqTC(H?~qfkqsM}TK5rOB44YO&f%bu9G48Ni*QBA)}dq8kY?#;oGhc*H5*M~whu8&#z98J zXEGegsb+hO@YdgEz6=%zCfK%N+b30;7}h(mGN!a&9OIZyAQIWhvlG`^Nw`P*VUjV9 zhHWrT$7z#`#;skkArr>3*0nKM=?2>PKlC$jK)LlhjLX#sJZ$`EvtNnoLv(KOz$N_& zIN{pNsQ?a{05??yWnqq!bZiBsDz^mX(!kJ_8Zx0`6(UJyX z*q*Pq097G|fIr@O$wO~Kgyw)8LT(lGF7fIZuQ@jg;1by=fG5x}nEBXVSea!?HOK3l zfHr3+4j)M8u#n$3i_Zu8r_mz~w?rXPJ=0IYL|p zg%TPPXo%)^563D3GxSNE{lR4n(^35jzLqlr3-tzZhZQ(ktyua236Bd|DO2BWpL zxD93cQM=#NrqTKI2c;&gY%8G;jefqs2#s@O>}%sOcsXKV23eiw2n_=`5Z*$-w-|hz zfiS9=o)?(>4uijDAhJ4%135Ul(&BC7MqB?4g3+`YHZi>93H?b1Z$mKEFXBAVEB)AX z=@dUF5GZzT*SH4OmvD`FmgCA53Wq27iRu3H`@Rhry(=W1P@m2lyC9kJE5;%cX7D6kBFoev7fMGWZIE z?=koX213(W#+Dh_jR4r6WPCS+uOYaXG>ssV#(55y?5)8A>C8|(9m~ek+oeR)`lQ%Ma=hD;_|9>B<$B3u}j<>=|t=zLlzmSc-!^%xlY2KauW`UHjqwiQ|* z>hQJjjg*z>-I3F`Gkp*4k3|$LhNqI%nu0HF9;cg5C1Ia9;K5Bi4`11GV)08JPF&EQ z@x-SwS%6Q$AN1~s%Dx9S7WjWscpsoo4db3xTRCx;Q;TC>Mh!@vYcbu^@cg9eX`c8G z>JHX3z_8^#3AE+&GVK~zhu#CKa6}O2(Pi0N<+*g=A0ceqgg=;Klq+I<%5qxM{W zAElv7ei>Dp!{1a6zr=CpLz0oDJ%u@^aH>|t_9llfXh74>%~aOXkC)8s=HXzxlCzWz zRkyLF-5nYkzFk7aehl`3F{5-XU2~}-jRHfR1(t?7kr;m)g}L~zmXfBxQmTPtuvvpP zilxLDZ%r|1oNrk_g{2P-gqO4iRTF2ka16VE7h=$Zo`bWj_z5mANiKL_m z#er9-#e9W^r}qCh^S;mE+sqpb`y_YDIp=Ej|I`50=t4dhD63X+DoewP=@;LcpUSuk z({4c*Yo2ie*u*EnZp8eNwFc;O96_$j5h#M`*btPLER?_P(QIrRVuMg*MsQt?kf-N{ zt?MieeJgS-T;bV`ydK0Qm~hxI3*9ZkmC>_cDuvXX!8!d=b&bYbiXFWE zhE-Z91I>692yWpboEQR$)nlNwZ_=F4ehSr%ET>``DyPSw=Dd+vvOoGe#y-d(RR8r0 zm`tHr5WkG`!$o8(C33@jSuc^jOOcF zXl5J&DN9@&omP@{AE9duvu3ToMy46v2Bn0yQJMunM)!6^x<^-~pM{1MjxP-1UE>Qk z{(02BB(24#*yhx84DuU@;(7+bNHg9$)vWbDTB+W_@g=m4>K_(Qe+h`!;OP_3$!Os<%|+?WEtE3eJEg33M;E1pJ|C1C z*(+lzvUmO5JKjPmE0Gf8SkA^)|%|1 zl+fpeQW^-2MCy@yxrb~>B*uHEl(k;jMJb`1i_(29lrr8srL6VpE=mdAT$FO>*cemB zd#9AOX1XXPbX}CnL3g}S+iHoxM=`o~s#&vBm6z#-dwW<)U)Vw<`$Q}%5l&+UFtURZY;ZD{L6kW!9 zr8ud%XsfZvesw2h$M8=5P7JDNXC07lC{3rMI@n{hDiGA8zaeh??kfJZ*~z$ z=(>pPRaFpSyAey#{uW9Z@10WC`dSyIgl;ZM?`@%!@!lzAt#5QuO6caI^Z;7a;9kai zr!Nf+!j|0I!@leY8q*+>@!pAKt?zabN$92_@@NZ@jQ36?YyHnIA_?6z zL>_A)lJVY&WUcRa5lQItLS$$J>w#VGYax>H-ic(bf9)cY&`m?+{VhZ?-aC=3m5Oyb zLJ8e8L_W|$B;&mk$y!^wh$M7fME0l~G_788T%!k`eQ67&jQ36{YhBSrDWRK-(g#~8 zWxRJvS?iiEN(tRulpb%Pl=0pvWvv^#C>3168QkDMKf9KXV$k^C(Ek4fSixtNkKwHX z97Fzp_wQxqOP7zM$Nh|a`2?>7xEf1_Iri+QMcJcw}7VrVY7{S=IH<&TBW?{b`-@pz%*M2QYB4s79ZpPC zV!aW=1~=&X5QD=EWKSjAjRQ 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + if init_values: + self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) + self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) + else: + self.gamma_1, self.gamma_2 = None, None + + def forward(self, x, rel_pos_bias: Optional[torch.Tensor] = None): + if self.gamma_1 is None: + x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + else: + x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias)) + x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) + return x + + +class RelativePositionBias(nn.Module): + + def __init__(self, window_size, num_heads): + super().__init__() + self.window_size = window_size + self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 + self.relative_position_bias_table = nn.Parameter( + torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH + # cls to token & token 2 cls & cls to cls + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(window_size[0]) + coords_w = torch.arange(window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * window_size[1] - 1 + relative_position_index = \ + torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype) + relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + relative_position_index[0, 0:] = self.num_relative_distance - 3 + relative_position_index[0:, 0] = self.num_relative_distance - 2 + relative_position_index[0, 0] = self.num_relative_distance - 1 + + self.register_buffer("relative_position_index", relative_position_index) + + # trunc_normal_(self.relative_position_bias_table, std=.02) + + def forward(self): + relative_position_bias = \ + self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1] + 1, + self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH + return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + + +class Beit(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., + drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6), init_values=None, + use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, + use_mean_pooling=True, init_scale=0.001): + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + # self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + if use_abs_pos_emb: + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + else: + self.pos_embed = None + self.pos_drop = nn.Dropout(p=drop_rate) + + if use_shared_rel_pos_bias: + self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.grid_size, num_heads=num_heads) + else: + self.rel_pos_bias = None + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.use_rel_pos_bias = use_rel_pos_bias + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + init_values=init_values, window_size=self.patch_embed.grid_size if use_rel_pos_bias else None) + for i in range(depth)]) + self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim) + self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + self.apply(self._init_weights) + if self.pos_embed is not None: + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + # trunc_normal_(self.mask_token, std=.02) + self.fix_init_weight() + if isinstance(self.head, nn.Linear): + trunc_normal_(self.head.weight, std=.02) + self.head.weight.data.mul_(init_scale) + self.head.bias.data.mul_(init_scale) + + def fix_init_weight(self): + def rescale(param, layer_id): + param.div_(math.sqrt(2.0 * layer_id)) + + for layer_id, layer in enumerate(self.blocks): + rescale(layer.attn.proj.weight.data, layer_id + 1) + rescale(layer.mlp.fc2.weight.data, layer_id + 1) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def get_num_layers(self): + return len(self.blocks) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + batch_size, seq_len, _ = x.size() + + cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_tokens, x), dim=1) + if self.pos_embed is not None: + x = x + self.pos_embed + x = self.pos_drop(x) + + rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None + for blk in self.blocks: + x = blk(x, rel_pos_bias=rel_pos_bias) + + x = self.norm(x) + if self.fc_norm is not None: + t = x[:, 1:, :] + return self.fc_norm(t.mean(1)) + else: + return x[:, 0] + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_beit(variant, pretrained=False, default_cfg=None, **kwargs): + default_cfg = default_cfg or default_cfgs[variant] + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Beit models.') + + model = build_model_with_cfg( + Beit, variant, pretrained, + default_cfg=default_cfg, + # FIXME an updated filter fn needed to interpolate rel pos emb if fine tuning to diff model sizes + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def beit_base_patch16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1, **kwargs) + model = _create_beit('beit_base_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beit_base_patch16_384(pretrained=False, **kwargs): + model_kwargs = dict( + img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1, **kwargs) + model = _create_beit('beit_base_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beit_base_patch16_224_in22k(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1, **kwargs) + model = _create_beit('beit_base_patch16_224_in22k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beit_large_patch16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs) + model = _create_beit('beit_large_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beit_large_patch16_384(pretrained=False, **kwargs): + model_kwargs = dict( + img_size=384, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs) + model = _create_beit('beit_large_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beit_large_patch16_512(pretrained=False, **kwargs): + model_kwargs = dict( + img_size=512, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs) + model = _create_beit('beit_large_patch16_512', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def beit_large_patch16_224_in22k(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, + use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5, **kwargs) + model = _create_beit('beit_large_patch16_224_in22k', pretrained=pretrained, **model_kwargs) + return model diff --git a/timm/models/byoanet.py b/timm/models/byoanet.py new file mode 100644 index 0000000..f44040b --- /dev/null +++ b/timm/models/byoanet.py @@ -0,0 +1,443 @@ +""" Bring-Your-Own-Attention Network + +A flexible network w/ dataclass based config for stacking NN blocks including +self-attention (or similar) layers. + +Currently used to implement experimental variants of: + * Bottleneck Transformers + * Lambda ResNets + * HaloNets + +Consider all of the models definitions here as experimental WIP and likely to change. + +Hacked together by / copyright Ross Wightman, 2021. +""" +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .byobnet import ByoBlockCfg, ByoModelCfg, ByobNet, interleave_blocks +from .helpers import build_model_with_cfg +from .registry import register_model + +__all__ = [] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.95, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc', + 'fixed_input_size': False, 'min_input_size': (3, 224, 224), + **kwargs + } + + +default_cfgs = { + # GPU-Efficient (ResNet) weights + 'botnet26t_256': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/botnet26t_c1_256-167a0e9f.pth', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), + 'sebotnet33ts_256': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/sebotnet33ts_a1h2_256-957e3c3e.pth', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94), + 'botnet50ts_256': _cfg( + url='', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), + 'eca_botnext26ts_256': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_botnext26ts_c_256-95a898f6.pth', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), + + 'halonet_h1': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), + 'halonet26t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/halonet26t_a1h_256-3083328c.pth', + input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), + 'sehalonet33ts': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/sehalonet33ts_256-87e053f9.pth', + input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94), + 'halonet50ts': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/halonet50ts_a1h2_256-f3a3daee.pth', + input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94), + 'eca_halonext26ts': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_halonext26ts_c_256-06906299.pth', + input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94), + + 'lambda_resnet26t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lambda_resnet26t_c_256-e5a5c857.pth', + min_input_size=(3, 128, 128), input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94), + 'lambda_resnet50ts': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lambda_resnet50ts_a1h_256-b87370f7.pth', + min_input_size=(3, 128, 128), input_size=(3, 256, 256), pool_size=(8, 8)), + 'lambda_resnet26rpt_256': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lambda_resnet26rpt_c_256-ab00292d.pth', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94), + + 'haloregnetz_b': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/haloregnetz_c_raa_256-c8ad7616.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + first_conv='stem.conv', input_size=(3, 224, 224), pool_size=(7, 7), min_input_size=(3, 224, 224), crop_pct=0.94), + + 'lamhalobotnet50ts_256': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lamhalobotnet50ts_a1h2_256-fe3d9445.pth', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), + 'halo2botnet50ts_256': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/halo2botnet50ts_a1h2_256-fd9c11a3.pth', + fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), +} + + +model_cfgs = dict( + + botnet26t=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + fixed_input_size=True, + self_attn_layer='bottleneck', + self_attn_kwargs=dict() + ), + sebotnet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg('self_attn', d=2, c=1536, s=2, gs=0, br=0.333), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + act_layer='silu', + num_features=1280, + attn_layer='se', + self_attn_layer='bottleneck', + self_attn_kwargs=dict() + ), + botnet50ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + fixed_input_size=True, + self_attn_layer='bottleneck', + self_attn_kwargs=dict() + ), + eca_botnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=16, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=16, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=16, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=16, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + fixed_input_size=True, + act_layer='silu', + attn_layer='eca', + self_attn_layer='bottleneck', + self_attn_kwargs=dict(dim_head=16) + ), + + halonet_h1=ByoModelCfg( + blocks=( + ByoBlockCfg(type='self_attn', d=3, c=64, s=1, gs=0, br=1.0), + ByoBlockCfg(type='self_attn', d=3, c=128, s=2, gs=0, br=1.0), + ByoBlockCfg(type='self_attn', d=10, c=256, s=2, gs=0, br=1.0), + ByoBlockCfg(type='self_attn', d=3, c=512, s=2, gs=0, br=1.0), + ), + stem_chs=64, + stem_type='7x7', + stem_pool='maxpool', + + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=8, halo_size=3), + ), + halonet26t=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=8, halo_size=2) + ), + sehalonet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg('self_attn', d=2, c=1536, s=2, gs=0, br=0.333), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + act_layer='silu', + num_features=1280, + attn_layer='se', + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=8, halo_size=3) + ), + halonet50ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), + interleave_blocks( + types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25, + self_attn_layer='halo', self_attn_kwargs=dict(block_size=8, halo_size=3, num_heads=4)), + interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=8, halo_size=3) + ), + eca_halonext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=16, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=16, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=16, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=16, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + attn_layer='eca', + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=8, halo_size=2, dim_head=16) + ), + + lambda_resnet26t=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + self_attn_layer='lambda', + self_attn_kwargs=dict(r=9) + ), + lambda_resnet50ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + self_attn_layer='lambda', + self_attn_kwargs=dict(r=9) + ), + lambda_resnet26rpt_256=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), + interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), + ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + self_attn_layer='lambda', + self_attn_kwargs=dict(r=None) + ), + + # experimental + haloregnetz_b=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=3), + ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=3), + interleave_blocks(types=('bottle', 'self_attn'), every=3, d=12, c=192, s=2, gs=16, br=3), + ByoBlockCfg('self_attn', d=2, c=288, s=2, gs=16, br=3), + ), + stem_chs=32, + stem_pool='', + downsample='', + num_features=1536, + act_layer='silu', + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + self_attn_layer='halo', + self_attn_kwargs=dict(block_size=7, halo_size=2, qk_ratio=0.33) + ), + + # experimental + lamhalobotnet50ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), + interleave_blocks( + types=('bottle', 'self_attn'), d=4, c=512, s=2, gs=0, br=0.25, + self_attn_layer='lambda', self_attn_kwargs=dict(r=13)), + interleave_blocks( + types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25, + self_attn_layer='halo', self_attn_kwargs=dict(halo_size=3)), + interleave_blocks( + types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25, + self_attn_layer='bottleneck', self_attn_kwargs=dict()), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + act_layer='silu', + ), + halo2botnet50ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), + interleave_blocks( + types=('bottle', 'self_attn'), d=4, c=512, s=2, gs=0, br=0.25, + self_attn_layer='halo', self_attn_kwargs=dict(halo_size=3)), + interleave_blocks( + types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25, + self_attn_layer='halo', self_attn_kwargs=dict(halo_size=3)), + interleave_blocks( + types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25, + self_attn_layer='bottleneck', self_attn_kwargs=dict()), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + act_layer='silu', + ), +) + + +def _create_byoanet(variant, cfg_variant=None, pretrained=False, **kwargs): + return build_model_with_cfg( + ByobNet, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant], + feature_cfg=dict(flatten_sequential=True), + **kwargs) + + +@register_model +def botnet26t_256(pretrained=False, **kwargs): + """ Bottleneck Transformer w/ ResNet26-T backbone. + """ + kwargs.setdefault('img_size', 256) + return _create_byoanet('botnet26t_256', 'botnet26t', pretrained=pretrained, **kwargs) + + +@register_model +def sebotnet33ts_256(pretrained=False, **kwargs): + """ Bottleneck Transformer w/ a ResNet33-t backbone, SE attn for non Halo blocks, SiLU, + """ + return _create_byoanet('sebotnet33ts_256', 'sebotnet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def botnet50ts_256(pretrained=False, **kwargs): + """ Bottleneck Transformer w/ ResNet50-T backbone, silu act. + """ + kwargs.setdefault('img_size', 256) + return _create_byoanet('botnet50ts_256', 'botnet50ts', pretrained=pretrained, **kwargs) + + +@register_model +def eca_botnext26ts_256(pretrained=False, **kwargs): + """ Bottleneck Transformer w/ ResNet26-T backbone, silu act. + """ + kwargs.setdefault('img_size', 256) + return _create_byoanet('eca_botnext26ts_256', 'eca_botnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def halonet_h1(pretrained=False, **kwargs): + """ HaloNet-H1. Halo attention in all stages as per the paper. + NOTE: This runs very slowly! + """ + return _create_byoanet('halonet_h1', pretrained=pretrained, **kwargs) + + +@register_model +def halonet26t(pretrained=False, **kwargs): + """ HaloNet w/ a ResNet26-t backbone. Halo attention in final two stages + """ + return _create_byoanet('halonet26t', pretrained=pretrained, **kwargs) + + +@register_model +def sehalonet33ts(pretrained=False, **kwargs): + """ HaloNet w/ a ResNet33-t backbone, SE attn for non Halo blocks, SiLU, 1-2 Halo in stage 2,3,4. + """ + return _create_byoanet('sehalonet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def halonet50ts(pretrained=False, **kwargs): + """ HaloNet w/ a ResNet50-t backbone, silu act. Halo attention in final two stages + """ + return _create_byoanet('halonet50ts', pretrained=pretrained, **kwargs) + + +@register_model +def eca_halonext26ts(pretrained=False, **kwargs): + """ HaloNet w/ a ResNet26-t backbone, silu act. Halo attention in final two stages + """ + return _create_byoanet('eca_halonext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def lambda_resnet26t(pretrained=False, **kwargs): + """ Lambda-ResNet-26-T. Lambda layers w/ conv pos in last two stages. + """ + return _create_byoanet('lambda_resnet26t', pretrained=pretrained, **kwargs) + + +@register_model +def lambda_resnet50ts(pretrained=False, **kwargs): + """ Lambda-ResNet-50-TS. SiLU act. Lambda layers w/ conv pos in last two stages. + """ + return _create_byoanet('lambda_resnet50ts', pretrained=pretrained, **kwargs) + + +@register_model +def lambda_resnet26rpt_256(pretrained=False, **kwargs): + """ Lambda-ResNet-26-R-T. Lambda layers w/ rel pos embed in last two stages. + """ + kwargs.setdefault('img_size', 256) + return _create_byoanet('lambda_resnet26rpt_256', pretrained=pretrained, **kwargs) + + +@register_model +def haloregnetz_b(pretrained=False, **kwargs): + """ Halo + RegNetZ + """ + return _create_byoanet('haloregnetz_b', pretrained=pretrained, **kwargs) + + +@register_model +def lamhalobotnet50ts_256(pretrained=False, **kwargs): + """ Combo Attention (Lambda + Halo + Bot) Network + """ + return _create_byoanet('lamhalobotnet50ts_256', 'lamhalobotnet50ts', pretrained=pretrained, **kwargs) + + +@register_model +def halo2botnet50ts_256(pretrained=False, **kwargs): + """ Combo Attention (Halo + Halo + Bot) Network + """ + return _create_byoanet('halo2botnet50ts_256', 'halo2botnet50ts', pretrained=pretrained, **kwargs) diff --git a/timm/models/byobnet.py b/timm/models/byobnet.py new file mode 100644 index 0000000..fa57943 --- /dev/null +++ b/timm/models/byobnet.py @@ -0,0 +1,1531 @@ +""" Bring-Your-Own-Blocks Network + +A flexible network w/ dataclass based config for stacking those NN blocks. + +This model is currently used to implement the following networks: + +GPU Efficient (ResNets) - gernet_l/m/s (original versions called genet, but this was already used (by SENet author)). +Paper: `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 +Code and weights: https://github.com/idstcv/GPU-Efficient-Networks, licensed Apache 2.0 + +RepVGG - repvgg_* +Paper: `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 +Code and weights: https://github.com/DingXiaoH/RepVGG, licensed MIT + +In all cases the models have been modified to fit within the design of ByobNet. I've remapped +the original weights and verified accuracies. + +For GPU Efficient nets, I used the original names for the blocks since they were for the most part +the same as original residual blocks in ResNe(X)t, DarkNet, and other existing models. Note also some +changes introduced in RegNet were also present in the stem and bottleneck blocks for this model. + +A significant number of different network archs can be implemented here, including variants of the +above nets that include attention. + +Hacked together by / copyright Ross Wightman, 2021. +""" +import math +from dataclasses import dataclass, field, replace +from typing import Tuple, List, Dict, Optional, Union, Any, Callable, Sequence +from functools import partial + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, named_apply +from .layers import ClassifierHead, ConvBnAct, BatchNormAct2d, DropPath, AvgPool2dSame, \ + create_conv2d, get_act_layer, convert_norm_act, get_attn, make_divisible, to_2tuple, EvoNormSample2d +from .registry import register_model + +__all__ = ['ByobNet', 'ByoModelCfg', 'ByoBlockCfg', 'create_byob_stem', 'create_block'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + **kwargs + } + + +def _cfgr(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), + 'crop_pct': 0.9, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = { + # GPU-Efficient (ResNet) weights + 'gernet_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_s-756b4751.pth'), + 'gernet_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_m-0873c53a.pth'), + 'gernet_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_l-f31e2e8d.pth', + input_size=(3, 256, 256), pool_size=(8, 8)), + + # RepVGG weights + 'repvgg_a2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_a2-c1ee6d2b.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b0-80ac3f1b.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b1-77ca2989.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b1g4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b1g4-abde5d92.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b2-25b7494e.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b2g4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b2g4-165a85f2.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b3-199bc50d.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + 'repvgg_b3g4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b3g4-73c370bf.pth', + first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')), + + # experimental configs + 'resnet51q': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet51q_ra2-d47dcc76.pth', + first_conv='stem.conv1', input_size=(3, 256, 256), pool_size=(8, 8), + test_input_size=(3, 288, 288), crop_pct=1.0), + 'resnet61q': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet61q_ra2-6afc536c.pth', + test_input_size=(3, 288, 288), crop_pct=1.0), + + 'resnext26ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnext26ts_256_ra2-8bbd9106.pth'), + 'gcresnext26ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext26ts_256-e414378b.pth'), + 'seresnext26ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnext26ts_256-6f0d74a3.pth'), + 'eca_resnext26ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnext26ts_256-5a1d030f.pth'), + 'bat_resnext26ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/bat_resnext26ts_256-fa6fd595.pth', + min_input_size=(3, 256, 256)), + + 'resnet32ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet32ts_256-aacf5250.pth'), + 'resnet33ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet33ts_256-e91b09a4.pth'), + 'gcresnet33ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet33ts_256-0e0cd345.pth'), + 'seresnet33ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnet33ts_256-f8ad44d9.pth'), + 'eca_resnet33ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnet33ts_256-8f98face.pth'), + + 'gcresnet50t': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet50t_256-96374d1c.pth'), + + 'gcresnext50ts': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext50ts_256-3e0f515e.pth'), + + # experimental models, likely to change ot be removed + 'regnetz_b16': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_b_raa-677d9606.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 224, 224), pool_size=(7, 7), test_input_size=(3, 288, 288), first_conv='stem.conv', crop_pct=0.94), + 'regnetz_c16': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_c_rab2_256-a54bf36a.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), test_input_size=(3, 320, 320), first_conv='stem.conv', crop_pct=0.94), + 'regnetz_d32': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_d_rab_256-b8073a89.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), test_input_size=(3, 320, 320), crop_pct=0.95), + 'regnetz_d8': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_d8_bh-afc03c55.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), test_input_size=(3, 320, 320), crop_pct=1.0), + 'regnetz_e8': _cfgr( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/regnetz_e8_bh-aace8e6e.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), test_input_size=(3, 320, 320), crop_pct=1.0), + 'regnetz_d8_evob': _cfgr( + url='', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), test_input_size=(3, 320, 320), crop_pct=0.95), + 'regnetz_d8_evos': _cfgr( + url='', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), test_input_size=(3, 320, 320), crop_pct=0.95), +} + + +@dataclass +class ByoBlockCfg: + type: Union[str, nn.Module] + d: int # block depth (number of block repeats in stage) + c: int # number of output channels for each block in stage + s: int = 2 # stride of stage (first block) + gs: Optional[Union[int, Callable]] = None # group-size of blocks in stage, conv is depthwise if gs == 1 + br: float = 1. # bottleneck-ratio of blocks in stage + + # NOTE: these config items override the model cfgs that are applied to all blocks by default + attn_layer: Optional[str] = None + attn_kwargs: Optional[Dict[str, Any]] = None + self_attn_layer: Optional[str] = None + self_attn_kwargs: Optional[Dict[str, Any]] = None + block_kwargs: Optional[Dict[str, Any]] = None + + +@dataclass +class ByoModelCfg: + blocks: Tuple[Union[ByoBlockCfg, Tuple[ByoBlockCfg, ...]], ...] + downsample: str = 'conv1x1' + stem_type: str = '3x3' + stem_pool: Optional[str] = 'maxpool' + stem_chs: int = 32 + width_factor: float = 1.0 + num_features: int = 0 # num out_channels for final conv, no final 1x1 conv if 0 + zero_init_last: bool = True # zero init last weight (usually bn) in residual path + fixed_input_size: bool = False # model constrained to a fixed-input size / img_size must be provided on creation + + act_layer: str = 'relu' + norm_layer: str = 'batchnorm' + + # NOTE: these config items will be overridden by the block cfg (per-block) if they are set there + attn_layer: Optional[str] = None + attn_kwargs: dict = field(default_factory=lambda: dict()) + self_attn_layer: Optional[str] = None + self_attn_kwargs: dict = field(default_factory=lambda: dict()) + block_kwargs: Dict[str, Any] = field(default_factory=lambda: dict()) + + +def _rep_vgg_bcfg(d=(4, 6, 16, 1), wf=(1., 1., 1., 1.), groups=0): + c = (64, 128, 256, 512) + group_size = 0 + if groups > 0: + group_size = lambda chs, idx: chs // groups if (idx + 1) % 2 == 0 else 0 + bcfg = tuple([ByoBlockCfg(type='rep', d=d, c=c * wf, gs=group_size) for d, c, wf in zip(d, c, wf)]) + return bcfg + + +def interleave_blocks( + types: Tuple[str, str], d, every: Union[int, List[int]] = 1, first: bool = False, **kwargs +) -> Tuple[ByoBlockCfg]: + """ interleave 2 block types in stack + """ + assert len(types) == 2 + if isinstance(every, int): + every = list(range(0 if first else every, d, every + 1)) + if not every: + every = [d - 1] + set(every) + blocks = [] + for i in range(d): + block_type = types[1] if i in every else types[0] + blocks += [ByoBlockCfg(type=block_type, d=1, **kwargs)] + return tuple(blocks) + + +model_cfgs = dict( + gernet_l=ByoModelCfg( + blocks=( + ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.), + ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.), + ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4), + ByoBlockCfg(type='bottle', d=5, c=640, s=2, gs=1, br=3.), + ByoBlockCfg(type='bottle', d=4, c=640, s=1, gs=1, br=3.), + ), + stem_chs=32, + stem_pool=None, + num_features=2560, + ), + gernet_m=ByoModelCfg( + blocks=( + ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.), + ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.), + ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4), + ByoBlockCfg(type='bottle', d=4, c=640, s=2, gs=1, br=3.), + ByoBlockCfg(type='bottle', d=1, c=640, s=1, gs=1, br=3.), + ), + stem_chs=32, + stem_pool=None, + num_features=2560, + ), + gernet_s=ByoModelCfg( + blocks=( + ByoBlockCfg(type='basic', d=1, c=48, s=2, gs=0, br=1.), + ByoBlockCfg(type='basic', d=3, c=48, s=2, gs=0, br=1.), + ByoBlockCfg(type='bottle', d=7, c=384, s=2, gs=0, br=1 / 4), + ByoBlockCfg(type='bottle', d=2, c=560, s=2, gs=1, br=3.), + ByoBlockCfg(type='bottle', d=1, c=256, s=1, gs=1, br=3.), + ), + stem_chs=13, + stem_pool=None, + num_features=1920, + ), + + repvgg_a2=ByoModelCfg( + blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(1.5, 1.5, 1.5, 2.75)), + stem_type='rep', + stem_chs=64, + ), + repvgg_b0=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(1., 1., 1., 2.5)), + stem_type='rep', + stem_chs=64, + ), + repvgg_b1=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.)), + stem_type='rep', + stem_chs=64, + ), + repvgg_b1g4=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.), groups=4), + stem_type='rep', + stem_chs=64, + ), + repvgg_b2=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.)), + stem_type='rep', + stem_chs=64, + ), + repvgg_b2g4=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.), groups=4), + stem_type='rep', + stem_chs=64, + ), + repvgg_b3=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.)), + stem_type='rep', + stem_chs=64, + ), + repvgg_b3g4=ByoModelCfg( + blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.), groups=4), + stem_type='rep', + stem_chs=64, + ), + + # 4 x conv stem w/ 2 act, no maxpool, 2,4,6,4 repeats, group size 32 in first 3 blocks + # DW convs in last block, 2048 pre-FC, silu act + resnet51q=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0), + ), + stem_chs=128, + stem_type='quad2', + stem_pool=None, + num_features=2048, + act_layer='silu', + ), + + # 4 x conv stem w/ 4 act, no maxpool, 1,4,6,4 repeats, edge block first, group size 32 in next 2 blocks + # DW convs in last block, 4 conv for each bottle block, 2048 pre-FC, silu act + resnet61q=ByoModelCfg( + blocks=( + ByoBlockCfg(type='edge', d=1, c=256, s=1, gs=0, br=1.0, block_kwargs=dict()), + ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0), + ), + stem_chs=128, + stem_type='quad', + stem_pool=None, + num_features=2048, + act_layer='silu', + block_kwargs=dict(extra_conv=True), + ), + + # A series of ResNeXt-26 models w/ one of none, GC, SE, ECA, BAT attn, group size 32, SiLU act, + # and a tiered stem w/ maxpool + resnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + ), + gcresnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + attn_layer='gca', + ), + seresnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + attn_layer='se', + ), + eca_resnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + attn_layer='eca', + ), + bat_resnext26ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + act_layer='silu', + attn_layer='bat', + attn_kwargs=dict(block_size=8) + ), + + # ResNet-32 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, no pre-fc feat layer, tiered stem w/o maxpool + resnet32ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + num_features=0, + act_layer='silu', + ), + + # ResNet-33 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, 1280 pre-FC feat, tiered stem w/o maxpool + resnet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + num_features=1280, + act_layer='silu', + ), + + # A series of ResNet-33 (2, 3, 3, 2) models w/ one of GC, SE, ECA attn, no groups, SiLU act, 1280 pre-FC feat + # and a tiered stem w/ no maxpool + gcresnet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + num_features=1280, + act_layer='silu', + attn_layer='gca', + ), + seresnet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + num_features=1280, + act_layer='silu', + attn_layer='se', + ), + eca_resnet33ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25), + ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + num_features=1280, + act_layer='silu', + attn_layer='eca', + ), + + gcresnet50t=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25), + ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + attn_layer='gca', + ), + + gcresnext50ts=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=6, c=1024, s=2, gs=32, br=0.25), + ByoBlockCfg(type='bottle', d=3, c=2048, s=2, gs=32, br=0.25), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='maxpool', + # stem_pool=None, + act_layer='silu', + attn_layer='gca', + ), + + # experimental models, closer to a RegNetZ than a ResNet. Similar to EfficientNets but w/ groups instead of DW + regnetz_b16=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=3), + ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=3), + ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=3), + ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=3), + ), + stem_chs=32, + stem_pool='', + downsample='', + num_features=1536, + act_layer='silu', + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + ), + regnetz_c16=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=4), + ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=4), + ByoBlockCfg(type='bottle', d=12, c=192, s=2, gs=16, br=4), + ByoBlockCfg(type='bottle', d=2, c=288, s=2, gs=16, br=4), + ), + stem_chs=32, + stem_pool='', + downsample='', + num_features=1536, + act_layer='silu', + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + ), + regnetz_d32=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=32, br=4), + ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=32, br=4), + ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=32, br=4), + ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=32, br=4), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + downsample='', + num_features=1792, + act_layer='silu', + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + ), + regnetz_d8=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=8, br=4), + ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=8, br=4), + ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=8, br=4), + ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=8, br=4), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + downsample='', + num_features=1792, + act_layer='silu', + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + ), + regnetz_e8=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=96, s=1, gs=8, br=4), + ByoBlockCfg(type='bottle', d=8, c=192, s=2, gs=8, br=4), + ByoBlockCfg(type='bottle', d=16, c=384, s=2, gs=8, br=4), + ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=8, br=4), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + downsample='', + num_features=2048, + act_layer='silu', + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + ), + + # experimental EvoNorm configs + regnetz_d8_evob=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=8, br=4), + ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=8, br=4), + ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=8, br=4), + ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=8, br=4), + ), + stem_chs=64, + stem_type='tiered', + stem_pool='', + downsample='', + num_features=1792, + act_layer='silu', + norm_layer='evonormbatch', + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + ), + regnetz_d8_evos=ByoModelCfg( + blocks=( + ByoBlockCfg(type='bottle', d=3, c=64, s=1, gs=8, br=4), + ByoBlockCfg(type='bottle', d=6, c=128, s=2, gs=8, br=4), + ByoBlockCfg(type='bottle', d=12, c=256, s=2, gs=8, br=4), + ByoBlockCfg(type='bottle', d=3, c=384, s=2, gs=8, br=4), + ), + stem_chs=64, + stem_type='deep', + stem_pool='', + downsample='', + num_features=1792, + act_layer='silu', + norm_layer=partial(EvoNormSample2d, groups=32), + attn_layer='se', + attn_kwargs=dict(rd_ratio=0.25), + block_kwargs=dict(bottle_in=True, linear_out=True), + ), +) + +@register_model +def gernet_l(pretrained=False, **kwargs): + """ GEResNet-Large (GENet-Large from official impl) + `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 + """ + return _create_byobnet('gernet_l', pretrained=pretrained, **kwargs) + + +@register_model +def gernet_m(pretrained=False, **kwargs): + """ GEResNet-Medium (GENet-Normal from official impl) + `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 + """ + return _create_byobnet('gernet_m', pretrained=pretrained, **kwargs) + + +@register_model +def gernet_s(pretrained=False, **kwargs): + """ EResNet-Small (GENet-Small from official impl) + `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090 + """ + return _create_byobnet('gernet_s', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_a2(pretrained=False, **kwargs): + """ RepVGG-A2 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_a2', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b0(pretrained=False, **kwargs): + """ RepVGG-B0 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b0', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b1(pretrained=False, **kwargs): + """ RepVGG-B1 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b1', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b1g4(pretrained=False, **kwargs): + """ RepVGG-B1g4 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b1g4', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b2(pretrained=False, **kwargs): + """ RepVGG-B2 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b2', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b2g4(pretrained=False, **kwargs): + """ RepVGG-B2g4 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b2g4', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b3(pretrained=False, **kwargs): + """ RepVGG-B3 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b3', pretrained=pretrained, **kwargs) + + +@register_model +def repvgg_b3g4(pretrained=False, **kwargs): + """ RepVGG-B3g4 + `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697 + """ + return _create_byobnet('repvgg_b3g4', pretrained=pretrained, **kwargs) + + +@register_model +def resnet51q(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('resnet51q', pretrained=pretrained, **kwargs) + + +@register_model +def resnet61q(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('resnet61q', pretrained=pretrained, **kwargs) + + +@register_model +def resnext26ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('resnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def gcresnext26ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('gcresnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def seresnext26ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('seresnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def eca_resnext26ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('eca_resnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def bat_resnext26ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('bat_resnext26ts', pretrained=pretrained, **kwargs) + + +@register_model +def resnet32ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('resnet32ts', pretrained=pretrained, **kwargs) + + +@register_model +def resnet33ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('resnet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def gcresnet33ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('gcresnet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def seresnet33ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('seresnet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def eca_resnet33ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('eca_resnet33ts', pretrained=pretrained, **kwargs) + + +@register_model +def gcresnet50t(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('gcresnet50t', pretrained=pretrained, **kwargs) + + +@register_model +def gcresnext50ts(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('gcresnext50ts', pretrained=pretrained, **kwargs) + + +@register_model +def regnetz_b16(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('regnetz_b16', pretrained=pretrained, **kwargs) + + +@register_model +def regnetz_c16(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('regnetz_c16', pretrained=pretrained, **kwargs) + + +@register_model +def regnetz_d32(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('regnetz_d32', pretrained=pretrained, **kwargs) + + +@register_model +def regnetz_d8(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('regnetz_d8', pretrained=pretrained, **kwargs) + + +@register_model +def regnetz_e8(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('regnetz_e8', pretrained=pretrained, **kwargs) + + +@register_model +def regnetz_d8_evob(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('regnetz_d8_evob', pretrained=pretrained, **kwargs) + + +@register_model +def regnetz_d8_evos(pretrained=False, **kwargs): + """ + """ + return _create_byobnet('regnetz_d8_evos', pretrained=pretrained, **kwargs) + + +def expand_blocks_cfg(stage_blocks_cfg: Union[ByoBlockCfg, Sequence[ByoBlockCfg]]) -> List[ByoBlockCfg]: + if not isinstance(stage_blocks_cfg, Sequence): + stage_blocks_cfg = (stage_blocks_cfg,) + block_cfgs = [] + for i, cfg in enumerate(stage_blocks_cfg): + block_cfgs += [replace(cfg, d=1) for _ in range(cfg.d)] + return block_cfgs + + +def num_groups(group_size, channels): + if not group_size: # 0 or None + return 1 # normal conv with 1 group + else: + # NOTE group_size == 1 -> depthwise conv + assert channels % group_size == 0 + return channels // group_size + + +@dataclass +class LayerFn: + conv_norm_act: Callable = ConvBnAct + norm_act: Callable = BatchNormAct2d + act: Callable = nn.ReLU + attn: Optional[Callable] = None + self_attn: Optional[Callable] = None + + +class DownsampleAvg(nn.Module): + def __init__(self, in_chs, out_chs, stride=1, dilation=1, apply_act=False, layers: LayerFn = None): + """ AvgPool Downsampling as in 'D' ResNet variants.""" + super(DownsampleAvg, self).__init__() + layers = layers or LayerFn() + avg_stride = stride if dilation == 1 else 1 + if stride > 1 or dilation > 1: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + else: + self.pool = nn.Identity() + self.conv = layers.conv_norm_act(in_chs, out_chs, 1, apply_act=apply_act) + + def forward(self, x): + return self.conv(self.pool(x)) + + +def create_shortcut(downsample_type, layers: LayerFn, in_chs, out_chs, stride, dilation, **kwargs): + assert downsample_type in ('avg', 'conv1x1', '') + if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]: + if not downsample_type: + return None # no shortcut + elif downsample_type == 'avg': + return DownsampleAvg(in_chs, out_chs, stride=stride, dilation=dilation[0], **kwargs) + else: + return layers.conv_norm_act(in_chs, out_chs, kernel_size=1, stride=stride, dilation=dilation[0], **kwargs) + else: + return nn.Identity() # identity shortcut + + +class BasicBlock(nn.Module): + """ ResNet Basic Block - kxk + kxk + """ + + def __init__( + self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), group_size=None, bottle_ratio=1.0, + downsample='avg', attn_last=True, linear_out=False, layers: LayerFn = None, drop_block=None, + drop_path_rate=0.): + super(BasicBlock, self).__init__() + layers = layers or LayerFn() + mid_chs = make_divisible(out_chs * bottle_ratio) + groups = num_groups(group_size, mid_chs) + + self.shortcut = create_shortcut( + downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation, + apply_act=False, layers=layers) + + self.conv1_kxk = layers.conv_norm_act(in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0]) + self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) + self.conv2_kxk = layers.conv_norm_act( + mid_chs, out_chs, kernel_size, dilation=dilation[1], groups=groups, drop_block=drop_block, apply_act=False) + self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.act = nn.Identity() if linear_out else layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + if zero_init_last and self.shortcut is not None: + nn.init.zeros_(self.conv2_kxk.bn.weight) + for attn in (self.attn, self.attn_last): + if hasattr(attn, 'reset_parameters'): + attn.reset_parameters() + + def forward(self, x): + shortcut = x + x = self.conv1_kxk(x) + x = self.conv2_kxk(x) + x = self.attn(x) + x = self.drop_path(x) + if self.shortcut is not None: + x = x + self.shortcut(shortcut) + return self.act(x) + + +class BottleneckBlock(nn.Module): + """ ResNet-like Bottleneck Block - 1x1 - kxk - 1x1 + """ + + def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1., group_size=None, + downsample='avg', attn_last=False, linear_out=False, extra_conv=False, bottle_in=False, + layers: LayerFn = None, drop_block=None, drop_path_rate=0.): + super(BottleneckBlock, self).__init__() + layers = layers or LayerFn() + mid_chs = make_divisible((in_chs if bottle_in else out_chs) * bottle_ratio) + groups = num_groups(group_size, mid_chs) + + self.shortcut = create_shortcut( + downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation, + apply_act=False, layers=layers) + + self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) + self.conv2_kxk = layers.conv_norm_act( + mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0], + groups=groups, drop_block=drop_block) + if extra_conv: + self.conv2b_kxk = layers.conv_norm_act( + mid_chs, mid_chs, kernel_size, dilation=dilation[1], groups=groups, drop_block=drop_block) + else: + self.conv2b_kxk = nn.Identity() + self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) + self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) + self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.act = nn.Identity() if linear_out else layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + if zero_init_last and self.shortcut is not None: + nn.init.zeros_(self.conv3_1x1.bn.weight) + for attn in (self.attn, self.attn_last): + if hasattr(attn, 'reset_parameters'): + attn.reset_parameters() + + def forward(self, x): + shortcut = x + x = self.conv1_1x1(x) + x = self.conv2_kxk(x) + x = self.conv2b_kxk(x) + x = self.attn(x) + x = self.conv3_1x1(x) + x = self.attn_last(x) + x = self.drop_path(x) + if self.shortcut is not None: + x = x + self.shortcut(shortcut) + return self.act(x) + + +class DarkBlock(nn.Module): + """ DarkNet-like (1x1 + 3x3 w/ stride) block + + The GE-Net impl included a 1x1 + 3x3 block in their search space. It was not used in the feature models. + This block is pretty much a DarkNet block (also DenseNet) hence the name. Neither DarkNet or DenseNet + uses strides within the block (external 3x3 or maxpool downsampling is done in front of the block repeats). + + If one does want to use a lot of these blocks w/ stride, I'd recommend using the EdgeBlock (3x3 /w stride + 1x1) + for more optimal compute. + """ + + def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None, + downsample='avg', attn_last=True, linear_out=False, layers: LayerFn = None, drop_block=None, + drop_path_rate=0.): + super(DarkBlock, self).__init__() + layers = layers or LayerFn() + mid_chs = make_divisible(out_chs * bottle_ratio) + groups = num_groups(group_size, mid_chs) + + self.shortcut = create_shortcut( + downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation, + apply_act=False, layers=layers) + + self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) + self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) + self.conv2_kxk = layers.conv_norm_act( + mid_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0], + groups=groups, drop_block=drop_block, apply_act=False) + self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.act = nn.Identity() if linear_out else layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + if zero_init_last and self.shortcut is not None: + nn.init.zeros_(self.conv2_kxk.bn.weight) + for attn in (self.attn, self.attn_last): + if hasattr(attn, 'reset_parameters'): + attn.reset_parameters() + + def forward(self, x): + shortcut = x + x = self.conv1_1x1(x) + x = self.attn(x) + x = self.conv2_kxk(x) + x = self.attn_last(x) + x = self.drop_path(x) + if self.shortcut is not None: + x = x + self.shortcut(shortcut) + return self.act(x) + + +class EdgeBlock(nn.Module): + """ EdgeResidual-like (3x3 + 1x1) block + + A two layer block like DarkBlock, but with the order of the 3x3 and 1x1 convs reversed. + Very similar to the EfficientNet Edge-Residual block but this block it ends with activations, is + intended to be used with either expansion or bottleneck contraction, and can use DW/group/non-grouped convs. + + FIXME is there a more common 3x3 + 1x1 conv block to name this after? + """ + + def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None, + downsample='avg', attn_last=False, linear_out=False, layers: LayerFn = None, + drop_block=None, drop_path_rate=0.): + super(EdgeBlock, self).__init__() + layers = layers or LayerFn() + mid_chs = make_divisible(out_chs * bottle_ratio) + groups = num_groups(group_size, mid_chs) + + self.shortcut = create_shortcut( + downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation, + apply_act=False, layers=layers) + + self.conv1_kxk = layers.conv_norm_act( + in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0], + groups=groups, drop_block=drop_block) + self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs) + self.conv2_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) + self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.act = nn.Identity() if linear_out else layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + if zero_init_last and self.shortcut is not None: + nn.init.zeros_(self.conv2_1x1.bn.weight) + for attn in (self.attn, self.attn_last): + if hasattr(attn, 'reset_parameters'): + attn.reset_parameters() + + def forward(self, x): + shortcut = x + x = self.conv1_kxk(x) + x = self.attn(x) + x = self.conv2_1x1(x) + x = self.attn_last(x) + x = self.drop_path(x) + if self.shortcut is not None: + x = x + self.shortcut(shortcut) + return self.act(x) + + +class RepVggBlock(nn.Module): + """ RepVGG Block. + + Adapted from impl at https://github.com/DingXiaoH/RepVGG + + This version does not currently support the deploy optimization. It is currently fixed in 'train' mode. + """ + + def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None, + downsample='', layers: LayerFn = None, drop_block=None, drop_path_rate=0.): + super(RepVggBlock, self).__init__() + layers = layers or LayerFn() + groups = num_groups(group_size, in_chs) + + use_ident = in_chs == out_chs and stride == 1 and dilation[0] == dilation[1] + self.identity = layers.norm_act(out_chs, apply_act=False) if use_ident else None + self.conv_kxk = layers.conv_norm_act( + in_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0], + groups=groups, drop_block=drop_block, apply_act=False) + self.conv_1x1 = layers.conv_norm_act(in_chs, out_chs, 1, stride=stride, groups=groups, apply_act=False) + self.attn = nn.Identity() if layers.attn is None else layers.attn(out_chs) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. and use_ident else nn.Identity() + self.act = layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + # NOTE this init overrides that base model init with specific changes for the block type + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + nn.init.normal_(m.weight, .1, .1) + nn.init.normal_(m.bias, 0, .1) + if hasattr(self.attn, 'reset_parameters'): + self.attn.reset_parameters() + + def forward(self, x): + if self.identity is None: + x = self.conv_1x1(x) + self.conv_kxk(x) + else: + identity = self.identity(x) + x = self.conv_1x1(x) + self.conv_kxk(x) + x = self.drop_path(x) # not in the paper / official impl, experimental + x = x + identity + x = self.attn(x) # no attn in the paper / official impl, experimental + return self.act(x) + + +class SelfAttnBlock(nn.Module): + """ ResNet-like Bottleneck Block - 1x1 - optional kxk - self attn - 1x1 + """ + + def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1., group_size=None, + downsample='avg', extra_conv=False, linear_out=False, bottle_in=False, post_attn_na=True, + feat_size=None, layers: LayerFn = None, drop_block=None, drop_path_rate=0.): + super(SelfAttnBlock, self).__init__() + assert layers is not None + mid_chs = make_divisible((in_chs if bottle_in else out_chs) * bottle_ratio) + groups = num_groups(group_size, mid_chs) + + self.shortcut = create_shortcut( + downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation, + apply_act=False, layers=layers) + + self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1) + if extra_conv: + self.conv2_kxk = layers.conv_norm_act( + mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0], + groups=groups, drop_block=drop_block) + stride = 1 # striding done via conv if enabled + else: + self.conv2_kxk = nn.Identity() + opt_kwargs = {} if feat_size is None else dict(feat_size=feat_size) + # FIXME need to dilate self attn to have dilated network support, moop moop + self.self_attn = layers.self_attn(mid_chs, stride=stride, **opt_kwargs) + self.post_attn = layers.norm_act(mid_chs) if post_attn_na else nn.Identity() + self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity() + self.act = nn.Identity() if linear_out else layers.act(inplace=True) + + def init_weights(self, zero_init_last: bool = False): + if zero_init_last and self.shortcut is not None: + nn.init.zeros_(self.conv3_1x1.bn.weight) + if hasattr(self.self_attn, 'reset_parameters'): + self.self_attn.reset_parameters() + + def forward(self, x): + shortcut = x + x = self.conv1_1x1(x) + x = self.conv2_kxk(x) + x = self.self_attn(x) + x = self.post_attn(x) + x = self.conv3_1x1(x) + x = self.drop_path(x) + if self.shortcut is not None: + x = x + self.shortcut(shortcut) + return self.act(x) + +_block_registry = dict( + basic=BasicBlock, + bottle=BottleneckBlock, + dark=DarkBlock, + edge=EdgeBlock, + rep=RepVggBlock, + self_attn=SelfAttnBlock, +) + + +def register_block(block_type:str, block_fn: nn.Module): + _block_registry[block_type] = block_fn + + +def create_block(block: Union[str, nn.Module], **kwargs): + if isinstance(block, (nn.Module, partial)): + return block(**kwargs) + assert block in _block_registry, f'Unknown block type ({block}' + return _block_registry[block](**kwargs) + + +class Stem(nn.Sequential): + + def __init__(self, in_chs, out_chs, kernel_size=3, stride=4, pool='maxpool', + num_rep=3, num_act=None, chs_decay=0.5, layers: LayerFn = None): + super().__init__() + assert stride in (2, 4) + layers = layers or LayerFn() + + if isinstance(out_chs, (list, tuple)): + num_rep = len(out_chs) + stem_chs = out_chs + else: + stem_chs = [round(out_chs * chs_decay ** i) for i in range(num_rep)][::-1] + + self.stride = stride + self.feature_info = [] # track intermediate features + prev_feat = '' + stem_strides = [2] + [1] * (num_rep - 1) + if stride == 4 and not pool: + # set last conv in stack to be strided if stride == 4 and no pooling layer + stem_strides[-1] = 2 + + num_act = num_rep if num_act is None else num_act + # if num_act < num_rep, first convs in stack won't have bn + act + stem_norm_acts = [False] * (num_rep - num_act) + [True] * num_act + prev_chs = in_chs + curr_stride = 1 + for i, (ch, s, na) in enumerate(zip(stem_chs, stem_strides, stem_norm_acts)): + layer_fn = layers.conv_norm_act if na else create_conv2d + conv_name = f'conv{i + 1}' + if i > 0 and s > 1: + self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat)) + self.add_module(conv_name, layer_fn(prev_chs, ch, kernel_size=kernel_size, stride=s)) + prev_chs = ch + curr_stride *= s + prev_feat = conv_name + + if pool and 'max' in pool.lower(): + self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat)) + self.add_module('pool', nn.MaxPool2d(3, 2, 1)) + curr_stride *= 2 + prev_feat = 'pool' + + self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat)) + assert curr_stride == stride + + +def create_byob_stem(in_chs, out_chs, stem_type='', pool_type='', feat_prefix='stem', layers: LayerFn = None): + layers = layers or LayerFn() + assert stem_type in ('', 'quad', 'quad2', 'tiered', 'deep', 'rep', '7x7', '3x3') + if 'quad' in stem_type: + # based on NFNet stem, stack of 4 3x3 convs + num_act = 2 if 'quad2' in stem_type else None + stem = Stem(in_chs, out_chs, num_rep=4, num_act=num_act, pool=pool_type, layers=layers) + elif 'tiered' in stem_type: + # 3x3 stack of 3 convs as in my ResNet-T + stem = Stem(in_chs, (3 * out_chs // 8, out_chs // 2, out_chs), pool=pool_type, layers=layers) + elif 'deep' in stem_type: + # 3x3 stack of 3 convs as in ResNet-D + stem = Stem(in_chs, out_chs, num_rep=3, chs_decay=1.0, pool=pool_type, layers=layers) + elif 'rep' in stem_type: + stem = RepVggBlock(in_chs, out_chs, stride=2, layers=layers) + elif '7x7' in stem_type: + # 7x7 stem conv as in ResNet + if pool_type: + stem = Stem(in_chs, out_chs, 7, num_rep=1, pool=pool_type, layers=layers) + else: + stem = layers.conv_norm_act(in_chs, out_chs, 7, stride=2) + else: + # 3x3 stem conv as in RegNet is the default + if pool_type: + stem = Stem(in_chs, out_chs, 3, num_rep=1, pool=pool_type, layers=layers) + else: + stem = layers.conv_norm_act(in_chs, out_chs, 3, stride=2) + + if isinstance(stem, Stem): + feature_info = [dict(f, module='.'.join([feat_prefix, f['module']])) for f in stem.feature_info] + else: + feature_info = [dict(num_chs=out_chs, reduction=2, module=feat_prefix)] + return stem, feature_info + + +def reduce_feat_size(feat_size, stride=2): + return None if feat_size is None else tuple([s // stride for s in feat_size]) + + +def override_kwargs(block_kwargs, model_kwargs): + """ Override model level attn/self-attn/block kwargs w/ block level + + NOTE: kwargs are NOT merged across levels, block_kwargs will fully replace model_kwargs + for the block if set to anything that isn't None. + + i.e. an empty block_kwargs dict will remove kwargs set at model level for that block + """ + out_kwargs = block_kwargs if block_kwargs is not None else model_kwargs + return out_kwargs or {} # make sure None isn't returned + + +def update_block_kwargs(block_kwargs: Dict[str, Any], block_cfg: ByoBlockCfg, model_cfg: ByoModelCfg, ): + layer_fns = block_kwargs['layers'] + + # override attn layer / args with block local config + attn_set = block_cfg.attn_layer is not None + if attn_set or block_cfg.attn_kwargs is not None: + # override attn layer config + if attn_set and not block_cfg.attn_layer: + # empty string for attn_layer type will disable attn for this block + attn_layer = None + else: + attn_kwargs = override_kwargs(block_cfg.attn_kwargs, model_cfg.attn_kwargs) + attn_layer = block_cfg.attn_layer or model_cfg.attn_layer + attn_layer = partial(get_attn(attn_layer), **attn_kwargs) if attn_layer is not None else None + layer_fns = replace(layer_fns, attn=attn_layer) + + # override self-attn layer / args with block local cfg + self_attn_set = block_cfg.self_attn_layer is not None + if self_attn_set or block_cfg.self_attn_kwargs is not None: + # override attn layer config + if self_attn_set and not block_cfg.self_attn_layer: # attn_layer == '' + # empty string for self_attn_layer type will disable attn for this block + self_attn_layer = None + else: + self_attn_kwargs = override_kwargs(block_cfg.self_attn_kwargs, model_cfg.self_attn_kwargs) + self_attn_layer = block_cfg.self_attn_layer or model_cfg.self_attn_layer + self_attn_layer = partial(get_attn(self_attn_layer), **self_attn_kwargs) \ + if self_attn_layer is not None else None + layer_fns = replace(layer_fns, self_attn=self_attn_layer) + + block_kwargs['layers'] = layer_fns + + # add additional block_kwargs specified in block_cfg or model_cfg, precedence to block if set + block_kwargs.update(override_kwargs(block_cfg.block_kwargs, model_cfg.block_kwargs)) + + +def create_byob_stages( + cfg: ByoModelCfg, drop_path_rate: float, output_stride: int, stem_feat: Dict[str, Any], + feat_size: Optional[int] = None, + layers: Optional[LayerFn] = None, + block_kwargs_fn: Optional[Callable] = update_block_kwargs): + + layers = layers or LayerFn() + feature_info = [] + block_cfgs = [expand_blocks_cfg(s) for s in cfg.blocks] + depths = [sum([bc.d for bc in stage_bcs]) for stage_bcs in block_cfgs] + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + dilation = 1 + net_stride = stem_feat['reduction'] + prev_chs = stem_feat['num_chs'] + prev_feat = stem_feat + stages = [] + for stage_idx, stage_block_cfgs in enumerate(block_cfgs): + stride = stage_block_cfgs[0].s + if stride != 1 and prev_feat: + feature_info.append(prev_feat) + if net_stride >= output_stride and stride > 1: + dilation *= stride + stride = 1 + net_stride *= stride + first_dilation = 1 if dilation in (1, 2) else 2 + + blocks = [] + for block_idx, block_cfg in enumerate(stage_block_cfgs): + out_chs = make_divisible(block_cfg.c * cfg.width_factor) + group_size = block_cfg.gs + if isinstance(group_size, Callable): + group_size = group_size(out_chs, block_idx) + block_kwargs = dict( # Blocks used in this model must accept these arguments + in_chs=prev_chs, + out_chs=out_chs, + stride=stride if block_idx == 0 else 1, + dilation=(first_dilation, dilation), + group_size=group_size, + bottle_ratio=block_cfg.br, + downsample=cfg.downsample, + drop_path_rate=dpr[stage_idx][block_idx], + layers=layers, + ) + if block_cfg.type in ('self_attn',): + # add feat_size arg for blocks that support/need it + block_kwargs['feat_size'] = feat_size + block_kwargs_fn(block_kwargs, block_cfg=block_cfg, model_cfg=cfg) + blocks += [create_block(block_cfg.type, **block_kwargs)] + first_dilation = dilation + prev_chs = out_chs + if stride > 1 and block_idx == 0: + feat_size = reduce_feat_size(feat_size, stride) + + stages += [nn.Sequential(*blocks)] + prev_feat = dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}') + + feature_info.append(prev_feat) + return nn.Sequential(*stages), feature_info + + +def get_layer_fns(cfg: ByoModelCfg): + act = get_act_layer(cfg.act_layer) + norm_act = convert_norm_act(norm_layer=cfg.norm_layer, act_layer=act) + conv_norm_act = partial(ConvBnAct, norm_layer=cfg.norm_layer, act_layer=act) + attn = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None + self_attn = partial(get_attn(cfg.self_attn_layer), **cfg.self_attn_kwargs) if cfg.self_attn_layer else None + layer_fn = LayerFn(conv_norm_act=conv_norm_act, norm_act=norm_act, act=act, attn=attn, self_attn=self_attn) + return layer_fn + + +class ByobNet(nn.Module): + """ 'Bring-your-own-blocks' Net + + A flexible network backbone that allows building model stem + blocks via + dataclass cfg definition w/ factory functions for module instantiation. + + Current assumption is that both stem and blocks are in conv-bn-act order (w/ block ending in act). + """ + def __init__(self, cfg: ByoModelCfg, num_classes=1000, in_chans=3, global_pool='avg', output_stride=32, + zero_init_last=True, img_size=None, drop_rate=0., drop_path_rate=0.): + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + layers = get_layer_fns(cfg) + if cfg.fixed_input_size: + assert img_size is not None, 'img_size argument is required for fixed input size model' + feat_size = to_2tuple(img_size) if img_size is not None else None + + self.feature_info = [] + stem_chs = int(round((cfg.stem_chs or cfg.blocks[0].c) * cfg.width_factor)) + self.stem, stem_feat = create_byob_stem(in_chans, stem_chs, cfg.stem_type, cfg.stem_pool, layers=layers) + self.feature_info.extend(stem_feat[:-1]) + feat_size = reduce_feat_size(feat_size, stride=stem_feat[-1]['reduction']) + + self.stages, stage_feat = create_byob_stages( + cfg, drop_path_rate, output_stride, stem_feat[-1], layers=layers, feat_size=feat_size) + self.feature_info.extend(stage_feat[:-1]) + + prev_chs = stage_feat[-1]['num_chs'] + if cfg.num_features: + self.num_features = int(round(cfg.width_factor * cfg.num_features)) + self.final_conv = layers.conv_norm_act(prev_chs, self.num_features, 1) + else: + self.num_features = prev_chs + self.final_conv = nn.Identity() + self.feature_info += [ + dict(num_chs=self.num_features, reduction=stage_feat[-1]['reduction'], module='final_conv')] + + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + # init weights + named_apply(partial(_init_weights, zero_init_last=zero_init_last), self) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + x = self.final_conv(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _init_weights(module, name='', zero_init_last=False): + if isinstance(module, nn.Conv2d): + fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels + fan_out //= module.groups + module.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Linear): + nn.init.normal_(module.weight, mean=0.0, std=0.01) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.BatchNorm2d): + nn.init.ones_(module.weight) + nn.init.zeros_(module.bias) + elif hasattr(module, 'init_weights'): + module.init_weights(zero_init_last=zero_init_last) + + +def _create_byobnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ByobNet, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=model_cfgs[variant], + feature_cfg=dict(flatten_sequential=True), + **kwargs) diff --git a/timm/models/cait.py b/timm/models/cait.py new file mode 100644 index 0000000..69b4ba0 --- /dev/null +++ b/timm/models/cait.py @@ -0,0 +1,394 @@ +""" Class-Attention in Image Transformers (CaiT) + +Paper: 'Going deeper with Image Transformers' - https://arxiv.org/abs/2103.17239 + +Original code and weights from https://github.com/facebookresearch/deit, copyright below + +""" +# Copyright (c) 2015-present, Facebook, Inc. +# All rights reserved. +from copy import deepcopy + +import torch +import torch.nn as nn +from functools import partial + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, overlay_external_default_cfg +from .layers import PatchEmbed, Mlp, DropPath, trunc_normal_ +from .registry import register_model + + +__all__ = ['Cait', 'ClassAttn', 'LayerScaleBlockClassAttn', 'LayerScaleBlock', 'TalkingHeadAttn'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 384, 384), 'pool_size': None, + 'crop_pct': 1.0, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = dict( + cait_xxs24_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/XXS24_224.pth', + input_size=(3, 224, 224), + ), + cait_xxs24_384=_cfg( + url='https://dl.fbaipublicfiles.com/deit/XXS24_384.pth', + ), + cait_xxs36_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/XXS36_224.pth', + input_size=(3, 224, 224), + ), + cait_xxs36_384=_cfg( + url='https://dl.fbaipublicfiles.com/deit/XXS36_384.pth', + ), + cait_xs24_384=_cfg( + url='https://dl.fbaipublicfiles.com/deit/XS24_384.pth', + ), + cait_s24_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/S24_224.pth', + input_size=(3, 224, 224), + ), + cait_s24_384=_cfg( + url='https://dl.fbaipublicfiles.com/deit/S24_384.pth', + ), + cait_s36_384=_cfg( + url='https://dl.fbaipublicfiles.com/deit/S36_384.pth', + ), + cait_m36_384=_cfg( + url='https://dl.fbaipublicfiles.com/deit/M36_384.pth', + ), + cait_m48_448=_cfg( + url='https://dl.fbaipublicfiles.com/deit/M48_448.pth', + input_size=(3, 448, 448), + ), +) + + +class ClassAttn(nn.Module): + # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + # with slight modifications to do CA + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.q = nn.Linear(dim, dim, bias=qkv_bias) + self.k = nn.Linear(dim, dim, bias=qkv_bias) + self.v = nn.Linear(dim, dim, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + q = self.q(x[:, 0]).unsqueeze(1).reshape(B, 1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + k = self.k(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + + q = q * self.scale + v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + + attn = (q @ k.transpose(-2, -1)) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x_cls = (attn @ v).transpose(1, 2).reshape(B, 1, C) + x_cls = self.proj(x_cls) + x_cls = self.proj_drop(x_cls) + + return x_cls + + +class LayerScaleBlockClassAttn(nn.Module): + # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + # with slight modifications to add CA and LayerScale + def __init__( + self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_block=ClassAttn, + mlp_block=Mlp, init_values=1e-4): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = attn_block( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = mlp_block(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) + self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) + + def forward(self, x, x_cls): + u = torch.cat((x_cls, x), dim=1) + x_cls = x_cls + self.drop_path(self.gamma_1 * self.attn(self.norm1(u))) + x_cls = x_cls + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x_cls))) + return x_cls + + +class TalkingHeadAttn(nn.Module): + # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + # with slight modifications to add Talking Heads Attention (https://arxiv.org/pdf/2003.02436v1.pdf) + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + + self.num_heads = num_heads + + head_dim = dim // num_heads + + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + + self.proj = nn.Linear(dim, dim) + + self.proj_l = nn.Linear(num_heads, num_heads) + self.proj_w = nn.Linear(num_heads, num_heads) + + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0] * self.scale, qkv[1], qkv[2] + + attn = (q @ k.transpose(-2, -1)) + + attn = self.proj_l(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + + attn = attn.softmax(dim=-1) + + attn = self.proj_w(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class LayerScaleBlock(nn.Module): + # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + # with slight modifications to add layerScale + def __init__( + self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_block=TalkingHeadAttn, + mlp_block=Mlp, init_values=1e-4): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = attn_block( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = mlp_block(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) + self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) + + def forward(self, x): + x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x))) + x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) + return x + + +class Cait(nn.Module): + # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + # with slight modifications to adapt to our cait models + def __init__( + self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., + drop_path_rate=0., + norm_layer=partial(nn.LayerNorm, eps=1e-6), + global_pool=None, + block_layers=LayerScaleBlock, + block_layers_token=LayerScaleBlockClassAttn, + patch_layer=PatchEmbed, + act_layer=nn.GELU, + attn_block=TalkingHeadAttn, + mlp_block=Mlp, + init_scale=1e-4, + attn_block_token_only=ClassAttn, + mlp_block_token_only=Mlp, + depth_token_only=2, + mlp_ratio_clstk=4.0 + ): + super().__init__() + + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim + + self.patch_embed = patch_layer( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [drop_path_rate for i in range(depth)] + self.blocks = nn.ModuleList([ + block_layers( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + act_layer=act_layer, attn_block=attn_block, mlp_block=mlp_block, init_values=init_scale) + for i in range(depth)]) + + self.blocks_token_only = nn.ModuleList([ + block_layers_token( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio_clstk, qkv_bias=qkv_bias, + drop=0.0, attn_drop=0.0, drop_path=0.0, norm_layer=norm_layer, + act_layer=act_layer, attn_block=attn_block_token_only, + mlp_block=mlp_block_token_only, init_values=init_scale) + for i in range(depth_token_only)]) + + self.norm = norm_layer(embed_dim) + + self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')] + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + B = x.shape[0] + x = self.patch_embed(x) + + cls_tokens = self.cls_token.expand(B, -1, -1) + + x = x + self.pos_embed + x = self.pos_drop(x) + + for i, blk in enumerate(self.blocks): + x = blk(x) + + for i, blk in enumerate(self.blocks_token_only): + cls_tokens = blk(x, cls_tokens) + + x = torch.cat((cls_tokens, x), dim=1) + + x = self.norm(x) + return x[:, 0] + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def checkpoint_filter_fn(state_dict, model=None): + if 'model' in state_dict: + state_dict = state_dict['model'] + checkpoint_no_module = {} + for k, v in state_dict.items(): + checkpoint_no_module[k.replace('module.', '')] = v + return checkpoint_no_module + + +def _create_cait(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + Cait, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def cait_xxs24_224(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=192, depth=24, num_heads=4, init_scale=1e-5, **kwargs) + model = _create_cait('cait_xxs24_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_xxs24_384(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=192, depth=24, num_heads=4, init_scale=1e-5, **kwargs) + model = _create_cait('cait_xxs24_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_xxs36_224(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=192, depth=36, num_heads=4, init_scale=1e-5, **kwargs) + model = _create_cait('cait_xxs36_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_xxs36_384(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=192, depth=36, num_heads=4, init_scale=1e-5, **kwargs) + model = _create_cait('cait_xxs36_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_xs24_384(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=288, depth=24, num_heads=6, init_scale=1e-5, **kwargs) + model = _create_cait('cait_xs24_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_s24_224(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=384, depth=24, num_heads=8, init_scale=1e-5, **kwargs) + model = _create_cait('cait_s24_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_s24_384(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=384, depth=24, num_heads=8, init_scale=1e-5, **kwargs) + model = _create_cait('cait_s24_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_s36_384(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=384, depth=36, num_heads=8, init_scale=1e-6, **kwargs) + model = _create_cait('cait_s36_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_m36_384(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=768, depth=36, num_heads=16, init_scale=1e-6, **kwargs) + model = _create_cait('cait_m36_384', pretrained=pretrained, **model_args) + return model + + +@register_model +def cait_m48_448(pretrained=False, **kwargs): + model_args = dict(patch_size=16, embed_dim=768, depth=48, num_heads=16, init_scale=1e-6, **kwargs) + model = _create_cait('cait_m48_448', pretrained=pretrained, **model_args) + return model diff --git a/timm/models/coat.py b/timm/models/coat.py new file mode 100644 index 0000000..18ff8ab --- /dev/null +++ b/timm/models/coat.py @@ -0,0 +1,661 @@ +""" +CoaT architecture. + +Paper: Co-Scale Conv-Attentional Image Transformers - https://arxiv.org/abs/2104.06399 + +Official CoaT code at: https://github.com/mlpc-ucsd/CoaT + +Modified from timm/models/vision_transformer.py +""" +from copy import deepcopy +from functools import partial +from typing import Tuple, List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, overlay_external_default_cfg +from .layers import PatchEmbed, Mlp, DropPath, to_2tuple, trunc_normal_ +from .registry import register_model +from .layers import _assert + + +__all__ = [ + "coat_tiny", + "coat_mini", + "coat_lite_tiny", + "coat_lite_mini", + "coat_lite_small" +] + + +def _cfg_coat(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed1.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + 'coat_tiny': _cfg_coat( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-coat-weights/coat_tiny-473c2a20.pth' + ), + 'coat_mini': _cfg_coat( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-coat-weights/coat_mini-2c6baf49.pth' + ), + 'coat_lite_tiny': _cfg_coat( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-coat-weights/coat_lite_tiny-461b07a7.pth' + ), + 'coat_lite_mini': _cfg_coat( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-coat-weights/coat_lite_mini-d7842000.pth' + ), + 'coat_lite_small': _cfg_coat( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-coat-weights/coat_lite_small-fea1d5a1.pth' + ), +} + + +class ConvRelPosEnc(nn.Module): + """ Convolutional relative position encoding. """ + def __init__(self, Ch, h, window): + """ + Initialization. + Ch: Channels per head. + h: Number of heads. + window: Window size(s) in convolutional relative positional encoding. It can have two forms: + 1. An integer of window size, which assigns all attention heads with the same window s + size in ConvRelPosEnc. + 2. A dict mapping window size to #attention head splits ( + e.g. {window size 1: #attention head split 1, window size 2: #attention head split 2}) + It will apply different window size to the attention head splits. + """ + super().__init__() + + if isinstance(window, int): + # Set the same window size for all attention heads. + window = {window: h} + self.window = window + elif isinstance(window, dict): + self.window = window + else: + raise ValueError() + + self.conv_list = nn.ModuleList() + self.head_splits = [] + for cur_window, cur_head_split in window.items(): + dilation = 1 + # Determine padding size. + # Ref: https://discuss.pytorch.org/t/how-to-keep-the-shape-of-input-and-output-same-when-dilation-conv/14338 + padding_size = (cur_window + (cur_window - 1) * (dilation - 1)) // 2 + cur_conv = nn.Conv2d(cur_head_split*Ch, cur_head_split*Ch, + kernel_size=(cur_window, cur_window), + padding=(padding_size, padding_size), + dilation=(dilation, dilation), + groups=cur_head_split*Ch, + ) + self.conv_list.append(cur_conv) + self.head_splits.append(cur_head_split) + self.channel_splits = [x*Ch for x in self.head_splits] + + def forward(self, q, v, size: Tuple[int, int]): + B, h, N, Ch = q.shape + H, W = size + _assert(N == 1 + H * W, '') + + # Convolutional relative position encoding. + q_img = q[:, :, 1:, :] # [B, h, H*W, Ch] + v_img = v[:, :, 1:, :] # [B, h, H*W, Ch] + + v_img = v_img.transpose(-1, -2).reshape(B, h * Ch, H, W) + v_img_list = torch.split(v_img, self.channel_splits, dim=1) # Split according to channels + conv_v_img_list = [] + for i, conv in enumerate(self.conv_list): + conv_v_img_list.append(conv(v_img_list[i])) + conv_v_img = torch.cat(conv_v_img_list, dim=1) + conv_v_img = conv_v_img.reshape(B, h, Ch, H * W).transpose(-1, -2) + + EV_hat = q_img * conv_v_img + EV_hat = F.pad(EV_hat, (0, 0, 1, 0, 0, 0)) # [B, h, N, Ch]. + return EV_hat + + +class FactorAtt_ConvRelPosEnc(nn.Module): + """ Factorized attention with convolutional relative position encoding class. """ + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., shared_crpe=None): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) # Note: attn_drop is actually not used. + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + # Shared convolutional relative position encoding. + self.crpe = shared_crpe + + def forward(self, x, size: Tuple[int, int]): + B, N, C = x.shape + + # Generate Q, K, V. + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # [B, h, N, Ch] + + # Factorized attention. + k_softmax = k.softmax(dim=2) + factor_att = k_softmax.transpose(-1, -2) @ v + factor_att = q @ factor_att + + # Convolutional relative position encoding. + crpe = self.crpe(q, v, size=size) # [B, h, N, Ch] + + # Merge and reshape. + x = self.scale * factor_att + crpe + x = x.transpose(1, 2).reshape(B, N, C) # [B, h, N, Ch] -> [B, N, h, Ch] -> [B, N, C] + + # Output projection. + x = self.proj(x) + x = self.proj_drop(x) + + return x + + +class ConvPosEnc(nn.Module): + """ Convolutional Position Encoding. + Note: This module is similar to the conditional position encoding in CPVT. + """ + def __init__(self, dim, k=3): + super(ConvPosEnc, self).__init__() + self.proj = nn.Conv2d(dim, dim, k, 1, k//2, groups=dim) + + def forward(self, x, size: Tuple[int, int]): + B, N, C = x.shape + H, W = size + _assert(N == 1 + H * W, '') + + # Extract CLS token and image tokens. + cls_token, img_tokens = x[:, :1], x[:, 1:] # [B, 1, C], [B, H*W, C] + + # Depthwise convolution. + feat = img_tokens.transpose(1, 2).view(B, C, H, W) + x = self.proj(feat) + feat + x = x.flatten(2).transpose(1, 2) + + # Combine with CLS token. + x = torch.cat((cls_token, x), dim=1) + + return x + + +class SerialBlock(nn.Module): + """ Serial block class. + Note: In this implementation, each serial block only contains a conv-attention and a FFN (MLP) module. """ + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, shared_cpe=None, shared_crpe=None): + super().__init__() + + # Conv-Attention. + self.cpe = shared_cpe + + self.norm1 = norm_layer(dim) + self.factoratt_crpe = FactorAtt_ConvRelPosEnc( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, shared_crpe=shared_crpe) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + # MLP. + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x, size: Tuple[int, int]): + # Conv-Attention. + x = self.cpe(x, size) + cur = self.norm1(x) + cur = self.factoratt_crpe(cur, size) + x = x + self.drop_path(cur) + + # MLP. + cur = self.norm2(x) + cur = self.mlp(cur) + x = x + self.drop_path(cur) + + return x + + +class ParallelBlock(nn.Module): + """ Parallel block class. """ + def __init__(self, dims, num_heads, mlp_ratios=[], qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, shared_crpes=None): + super().__init__() + + # Conv-Attention. + self.norm12 = norm_layer(dims[1]) + self.norm13 = norm_layer(dims[2]) + self.norm14 = norm_layer(dims[3]) + self.factoratt_crpe2 = FactorAtt_ConvRelPosEnc( + dims[1], num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, + shared_crpe=shared_crpes[1] + ) + self.factoratt_crpe3 = FactorAtt_ConvRelPosEnc( + dims[2], num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, + shared_crpe=shared_crpes[2] + ) + self.factoratt_crpe4 = FactorAtt_ConvRelPosEnc( + dims[3], num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, + shared_crpe=shared_crpes[3] + ) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + # MLP. + self.norm22 = norm_layer(dims[1]) + self.norm23 = norm_layer(dims[2]) + self.norm24 = norm_layer(dims[3]) + # In parallel block, we assume dimensions are the same and share the linear transformation. + assert dims[1] == dims[2] == dims[3] + assert mlp_ratios[1] == mlp_ratios[2] == mlp_ratios[3] + mlp_hidden_dim = int(dims[1] * mlp_ratios[1]) + self.mlp2 = self.mlp3 = self.mlp4 = Mlp( + in_features=dims[1], hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def upsample(self, x, factor: float, size: Tuple[int, int]): + """ Feature map up-sampling. """ + return self.interpolate(x, scale_factor=factor, size=size) + + def downsample(self, x, factor: float, size: Tuple[int, int]): + """ Feature map down-sampling. """ + return self.interpolate(x, scale_factor=1.0/factor, size=size) + + def interpolate(self, x, scale_factor: float, size: Tuple[int, int]): + """ Feature map interpolation. """ + B, N, C = x.shape + H, W = size + _assert(N == 1 + H * W, '') + + cls_token = x[:, :1, :] + img_tokens = x[:, 1:, :] + + img_tokens = img_tokens.transpose(1, 2).reshape(B, C, H, W) + img_tokens = F.interpolate( + img_tokens, scale_factor=scale_factor, recompute_scale_factor=False, mode='bilinear', align_corners=False) + img_tokens = img_tokens.reshape(B, C, -1).transpose(1, 2) + + out = torch.cat((cls_token, img_tokens), dim=1) + + return out + + def forward(self, x1, x2, x3, x4, sizes: List[Tuple[int, int]]): + _, S2, S3, S4 = sizes + cur2 = self.norm12(x2) + cur3 = self.norm13(x3) + cur4 = self.norm14(x4) + cur2 = self.factoratt_crpe2(cur2, size=S2) + cur3 = self.factoratt_crpe3(cur3, size=S3) + cur4 = self.factoratt_crpe4(cur4, size=S4) + upsample3_2 = self.upsample(cur3, factor=2., size=S3) + upsample4_3 = self.upsample(cur4, factor=2., size=S4) + upsample4_2 = self.upsample(cur4, factor=4., size=S4) + downsample2_3 = self.downsample(cur2, factor=2., size=S2) + downsample3_4 = self.downsample(cur3, factor=2., size=S3) + downsample2_4 = self.downsample(cur2, factor=4., size=S2) + cur2 = cur2 + upsample3_2 + upsample4_2 + cur3 = cur3 + upsample4_3 + downsample2_3 + cur4 = cur4 + downsample3_4 + downsample2_4 + x2 = x2 + self.drop_path(cur2) + x3 = x3 + self.drop_path(cur3) + x4 = x4 + self.drop_path(cur4) + + # MLP. + cur2 = self.norm22(x2) + cur3 = self.norm23(x3) + cur4 = self.norm24(x4) + cur2 = self.mlp2(cur2) + cur3 = self.mlp3(cur3) + cur4 = self.mlp4(cur4) + x2 = x2 + self.drop_path(cur2) + x3 = x3 + self.drop_path(cur3) + x4 = x4 + self.drop_path(cur4) + + return x1, x2, x3, x4 + + +class CoaT(nn.Module): + """ CoaT class. """ + def __init__( + self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dims=(0, 0, 0, 0), + serial_depths=(0, 0, 0, 0), parallel_depth=0, num_heads=0, mlp_ratios=(0, 0, 0, 0), qkv_bias=True, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6), + return_interm_layers=False, out_features=None, crpe_window=None, **kwargs): + super().__init__() + crpe_window = crpe_window or {3: 2, 5: 3, 7: 3} + self.return_interm_layers = return_interm_layers + self.out_features = out_features + self.embed_dims = embed_dims + self.num_features = embed_dims[-1] + self.num_classes = num_classes + + # Patch embeddings. + img_size = to_2tuple(img_size) + self.patch_embed1 = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, + embed_dim=embed_dims[0], norm_layer=nn.LayerNorm) + self.patch_embed2 = PatchEmbed( + img_size=[x // 4 for x in img_size], patch_size=2, in_chans=embed_dims[0], + embed_dim=embed_dims[1], norm_layer=nn.LayerNorm) + self.patch_embed3 = PatchEmbed( + img_size=[x // 8 for x in img_size], patch_size=2, in_chans=embed_dims[1], + embed_dim=embed_dims[2], norm_layer=nn.LayerNorm) + self.patch_embed4 = PatchEmbed( + img_size=[x // 16 for x in img_size], patch_size=2, in_chans=embed_dims[2], + embed_dim=embed_dims[3], norm_layer=nn.LayerNorm) + + # Class tokens. + self.cls_token1 = nn.Parameter(torch.zeros(1, 1, embed_dims[0])) + self.cls_token2 = nn.Parameter(torch.zeros(1, 1, embed_dims[1])) + self.cls_token3 = nn.Parameter(torch.zeros(1, 1, embed_dims[2])) + self.cls_token4 = nn.Parameter(torch.zeros(1, 1, embed_dims[3])) + + # Convolutional position encodings. + self.cpe1 = ConvPosEnc(dim=embed_dims[0], k=3) + self.cpe2 = ConvPosEnc(dim=embed_dims[1], k=3) + self.cpe3 = ConvPosEnc(dim=embed_dims[2], k=3) + self.cpe4 = ConvPosEnc(dim=embed_dims[3], k=3) + + # Convolutional relative position encodings. + self.crpe1 = ConvRelPosEnc(Ch=embed_dims[0] // num_heads, h=num_heads, window=crpe_window) + self.crpe2 = ConvRelPosEnc(Ch=embed_dims[1] // num_heads, h=num_heads, window=crpe_window) + self.crpe3 = ConvRelPosEnc(Ch=embed_dims[2] // num_heads, h=num_heads, window=crpe_window) + self.crpe4 = ConvRelPosEnc(Ch=embed_dims[3] // num_heads, h=num_heads, window=crpe_window) + + # Disable stochastic depth. + dpr = drop_path_rate + assert dpr == 0.0 + + # Serial blocks 1. + self.serial_blocks1 = nn.ModuleList([ + SerialBlock( + dim=embed_dims[0], num_heads=num_heads, mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr, norm_layer=norm_layer, + shared_cpe=self.cpe1, shared_crpe=self.crpe1 + ) + for _ in range(serial_depths[0])] + ) + + # Serial blocks 2. + self.serial_blocks2 = nn.ModuleList([ + SerialBlock( + dim=embed_dims[1], num_heads=num_heads, mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr, norm_layer=norm_layer, + shared_cpe=self.cpe2, shared_crpe=self.crpe2 + ) + for _ in range(serial_depths[1])] + ) + + # Serial blocks 3. + self.serial_blocks3 = nn.ModuleList([ + SerialBlock( + dim=embed_dims[2], num_heads=num_heads, mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr, norm_layer=norm_layer, + shared_cpe=self.cpe3, shared_crpe=self.crpe3 + ) + for _ in range(serial_depths[2])] + ) + + # Serial blocks 4. + self.serial_blocks4 = nn.ModuleList([ + SerialBlock( + dim=embed_dims[3], num_heads=num_heads, mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr, norm_layer=norm_layer, + shared_cpe=self.cpe4, shared_crpe=self.crpe4 + ) + for _ in range(serial_depths[3])] + ) + + # Parallel blocks. + self.parallel_depth = parallel_depth + if self.parallel_depth > 0: + self.parallel_blocks = nn.ModuleList([ + ParallelBlock( + dims=embed_dims, num_heads=num_heads, mlp_ratios=mlp_ratios, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr, norm_layer=norm_layer, + shared_crpes=(self.crpe1, self.crpe2, self.crpe3, self.crpe4) + ) + for _ in range(parallel_depth)] + ) + else: + self.parallel_blocks = None + + # Classification head(s). + if not self.return_interm_layers: + if self.parallel_blocks is not None: + self.norm2 = norm_layer(embed_dims[1]) + self.norm3 = norm_layer(embed_dims[2]) + else: + self.norm2 = self.norm3 = None + self.norm4 = norm_layer(embed_dims[3]) + + if self.parallel_depth > 0: + # CoaT series: Aggregate features of last three scales for classification. + assert embed_dims[1] == embed_dims[2] == embed_dims[3] + self.aggregate = torch.nn.Conv1d(in_channels=3, out_channels=1, kernel_size=1) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + else: + # CoaT-Lite series: Use feature of last scale for classification. + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + # Initialize weights. + trunc_normal_(self.cls_token1, std=.02) + trunc_normal_(self.cls_token2, std=.02) + trunc_normal_(self.cls_token3, std=.02) + trunc_normal_(self.cls_token4, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'cls_token1', 'cls_token2', 'cls_token3', 'cls_token4'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def insert_cls(self, x, cls_token): + """ Insert CLS token. """ + cls_tokens = cls_token.expand(x.shape[0], -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + return x + + def remove_cls(self, x): + """ Remove CLS token. """ + return x[:, 1:, :] + + def forward_features(self, x0): + B = x0.shape[0] + + # Serial blocks 1. + x1 = self.patch_embed1(x0) + H1, W1 = self.patch_embed1.grid_size + x1 = self.insert_cls(x1, self.cls_token1) + for blk in self.serial_blocks1: + x1 = blk(x1, size=(H1, W1)) + x1_nocls = self.remove_cls(x1) + x1_nocls = x1_nocls.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous() + + # Serial blocks 2. + x2 = self.patch_embed2(x1_nocls) + H2, W2 = self.patch_embed2.grid_size + x2 = self.insert_cls(x2, self.cls_token2) + for blk in self.serial_blocks2: + x2 = blk(x2, size=(H2, W2)) + x2_nocls = self.remove_cls(x2) + x2_nocls = x2_nocls.reshape(B, H2, W2, -1).permute(0, 3, 1, 2).contiguous() + + # Serial blocks 3. + x3 = self.patch_embed3(x2_nocls) + H3, W3 = self.patch_embed3.grid_size + x3 = self.insert_cls(x3, self.cls_token3) + for blk in self.serial_blocks3: + x3 = blk(x3, size=(H3, W3)) + x3_nocls = self.remove_cls(x3) + x3_nocls = x3_nocls.reshape(B, H3, W3, -1).permute(0, 3, 1, 2).contiguous() + + # Serial blocks 4. + x4 = self.patch_embed4(x3_nocls) + H4, W4 = self.patch_embed4.grid_size + x4 = self.insert_cls(x4, self.cls_token4) + for blk in self.serial_blocks4: + x4 = blk(x4, size=(H4, W4)) + x4_nocls = self.remove_cls(x4) + x4_nocls = x4_nocls.reshape(B, H4, W4, -1).permute(0, 3, 1, 2).contiguous() + + # Only serial blocks: Early return. + if self.parallel_blocks is None: + if not torch.jit.is_scripting() and self.return_interm_layers: + # Return intermediate features for down-stream tasks (e.g. Deformable DETR and Detectron2). + feat_out = {} + if 'x1_nocls' in self.out_features: + feat_out['x1_nocls'] = x1_nocls + if 'x2_nocls' in self.out_features: + feat_out['x2_nocls'] = x2_nocls + if 'x3_nocls' in self.out_features: + feat_out['x3_nocls'] = x3_nocls + if 'x4_nocls' in self.out_features: + feat_out['x4_nocls'] = x4_nocls + return feat_out + else: + # Return features for classification. + x4 = self.norm4(x4) + x4_cls = x4[:, 0] + return x4_cls + + # Parallel blocks. + for blk in self.parallel_blocks: + x2, x3, x4 = self.cpe2(x2, (H2, W2)), self.cpe3(x3, (H3, W3)), self.cpe4(x4, (H4, W4)) + x1, x2, x3, x4 = blk(x1, x2, x3, x4, sizes=[(H1, W1), (H2, W2), (H3, W3), (H4, W4)]) + + if not torch.jit.is_scripting() and self.return_interm_layers: + # Return intermediate features for down-stream tasks (e.g. Deformable DETR and Detectron2). + feat_out = {} + if 'x1_nocls' in self.out_features: + x1_nocls = self.remove_cls(x1) + x1_nocls = x1_nocls.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous() + feat_out['x1_nocls'] = x1_nocls + if 'x2_nocls' in self.out_features: + x2_nocls = self.remove_cls(x2) + x2_nocls = x2_nocls.reshape(B, H2, W2, -1).permute(0, 3, 1, 2).contiguous() + feat_out['x2_nocls'] = x2_nocls + if 'x3_nocls' in self.out_features: + x3_nocls = self.remove_cls(x3) + x3_nocls = x3_nocls.reshape(B, H3, W3, -1).permute(0, 3, 1, 2).contiguous() + feat_out['x3_nocls'] = x3_nocls + if 'x4_nocls' in self.out_features: + x4_nocls = self.remove_cls(x4) + x4_nocls = x4_nocls.reshape(B, H4, W4, -1).permute(0, 3, 1, 2).contiguous() + feat_out['x4_nocls'] = x4_nocls + return feat_out + else: + x2 = self.norm2(x2) + x3 = self.norm3(x3) + x4 = self.norm4(x4) + x2_cls = x2[:, :1] # [B, 1, C] + x3_cls = x3[:, :1] + x4_cls = x4[:, :1] + merged_cls = torch.cat((x2_cls, x3_cls, x4_cls), dim=1) # [B, 3, C] + merged_cls = self.aggregate(merged_cls).squeeze(dim=1) # Shape: [B, C] + return merged_cls + + def forward(self, x): + if self.return_interm_layers: + # Return intermediate features (for down-stream tasks). + return self.forward_features(x) + else: + # Return features for classification. + x = self.forward_features(x) + x = self.head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + out_dict = {} + for k, v in state_dict.items(): + # original model had unused norm layers, removing them requires filtering pretrained checkpoints + if k.startswith('norm1') or \ + (model.norm2 is None and k.startswith('norm2')) or \ + (model.norm3 is None and k.startswith('norm3')): + continue + out_dict[k] = v + return out_dict + + +def _create_coat(variant, pretrained=False, default_cfg=None, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + CoaT, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def coat_tiny(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=4, embed_dims=[152, 152, 152, 152], serial_depths=[2, 2, 2, 2], parallel_depth=6, + num_heads=8, mlp_ratios=[4, 4, 4, 4], **kwargs) + model = _create_coat('coat_tiny', pretrained=pretrained, **model_cfg) + return model + + +@register_model +def coat_mini(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=4, embed_dims=[152, 216, 216, 216], serial_depths=[2, 2, 2, 2], parallel_depth=6, + num_heads=8, mlp_ratios=[4, 4, 4, 4], **kwargs) + model = _create_coat('coat_mini', pretrained=pretrained, **model_cfg) + return model + + +@register_model +def coat_lite_tiny(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=4, embed_dims=[64, 128, 256, 320], serial_depths=[2, 2, 2, 2], parallel_depth=0, + num_heads=8, mlp_ratios=[8, 8, 4, 4], **kwargs) + model = _create_coat('coat_lite_tiny', pretrained=pretrained, **model_cfg) + return model + + +@register_model +def coat_lite_mini(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], serial_depths=[2, 2, 2, 2], parallel_depth=0, + num_heads=8, mlp_ratios=[8, 8, 4, 4], **kwargs) + model = _create_coat('coat_lite_mini', pretrained=pretrained, **model_cfg) + return model + + +@register_model +def coat_lite_small(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], serial_depths=[3, 4, 6, 3], parallel_depth=0, + num_heads=8, mlp_ratios=[8, 8, 4, 4], **kwargs) + model = _create_coat('coat_lite_small', pretrained=pretrained, **model_cfg) + return model \ No newline at end of file diff --git a/timm/models/convit.py b/timm/models/convit.py new file mode 100644 index 0000000..6ef1da7 --- /dev/null +++ b/timm/models/convit.py @@ -0,0 +1,351 @@ +""" ConViT Model + +@article{d2021convit, + title={ConViT: Improving Vision Transformers with Soft Convolutional Inductive Biases}, + author={d'Ascoli, St{\'e}phane and Touvron, Hugo and Leavitt, Matthew and Morcos, Ari and Biroli, Giulio and Sagun, Levent}, + journal={arXiv preprint arXiv:2103.10697}, + year={2021} +} + +Paper link: https://arxiv.org/abs/2103.10697 +Original code: https://github.com/facebookresearch/convit, original copyright below +""" +# Copyright (c) 2015-present, Facebook, Inc. +# All rights reserved. +# +# This source code is licensed under the CC-by-NC license found in the +# LICENSE file in the root directory of this source tree. +# +'''These modules are adapted from those of timm, see +https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py +''' + +import torch +import torch.nn as nn +from functools import partial +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import DropPath, to_2tuple, trunc_normal_, PatchEmbed, Mlp +from .registry import register_model +from .vision_transformer_hybrid import HybridEmbed +from .fx_features import register_notrace_module + +import torch +import torch.nn as nn + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # ConViT + 'convit_tiny': _cfg( + url="https://dl.fbaipublicfiles.com/convit/convit_tiny.pth"), + 'convit_small': _cfg( + url="https://dl.fbaipublicfiles.com/convit/convit_small.pth"), + 'convit_base': _cfg( + url="https://dl.fbaipublicfiles.com/convit/convit_base.pth") +} + + +@register_notrace_module # reason: FX can't symbolically trace control flow in forward method +class GPSA(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., + locality_strength=1.): + super().__init__() + self.num_heads = num_heads + self.dim = dim + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + self.locality_strength = locality_strength + + self.qk = nn.Linear(dim, dim * 2, bias=qkv_bias) + self.v = nn.Linear(dim, dim, bias=qkv_bias) + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.pos_proj = nn.Linear(3, num_heads) + self.proj_drop = nn.Dropout(proj_drop) + self.gating_param = nn.Parameter(torch.ones(self.num_heads)) + self.rel_indices: torch.Tensor = torch.zeros(1, 1, 1, 3) # silly torchscript hack, won't work with None + + def forward(self, x): + B, N, C = x.shape + if self.rel_indices is None or self.rel_indices.shape[1] != N: + self.rel_indices = self.get_rel_indices(N) + attn = self.get_attention(x) + v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + def get_attention(self, x): + B, N, C = x.shape + qk = self.qk(x).reshape(B, N, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k = qk[0], qk[1] + pos_score = self.rel_indices.expand(B, -1, -1, -1) + pos_score = self.pos_proj(pos_score).permute(0, 3, 1, 2) + patch_score = (q @ k.transpose(-2, -1)) * self.scale + patch_score = patch_score.softmax(dim=-1) + pos_score = pos_score.softmax(dim=-1) + + gating = self.gating_param.view(1, -1, 1, 1) + attn = (1. - torch.sigmoid(gating)) * patch_score + torch.sigmoid(gating) * pos_score + attn /= attn.sum(dim=-1).unsqueeze(-1) + attn = self.attn_drop(attn) + return attn + + def get_attention_map(self, x, return_map=False): + attn_map = self.get_attention(x).mean(0) # average over batch + distances = self.rel_indices.squeeze()[:, :, -1] ** .5 + dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / distances.size(0) + if return_map: + return dist, attn_map + else: + return dist + + def local_init(self): + self.v.weight.data.copy_(torch.eye(self.dim)) + locality_distance = 1 # max(1,1/locality_strength**.5) + + kernel_size = int(self.num_heads ** .5) + center = (kernel_size - 1) / 2 if kernel_size % 2 == 0 else kernel_size // 2 + for h1 in range(kernel_size): + for h2 in range(kernel_size): + position = h1 + kernel_size * h2 + self.pos_proj.weight.data[position, 2] = -1 + self.pos_proj.weight.data[position, 1] = 2 * (h1 - center) * locality_distance + self.pos_proj.weight.data[position, 0] = 2 * (h2 - center) * locality_distance + self.pos_proj.weight.data *= self.locality_strength + + def get_rel_indices(self, num_patches: int) -> torch.Tensor: + img_size = int(num_patches ** .5) + rel_indices = torch.zeros(1, num_patches, num_patches, 3) + ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1) + indx = ind.repeat(img_size, img_size) + indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1) + indd = indx ** 2 + indy ** 2 + rel_indices[:, :, :, 2] = indd.unsqueeze(0) + rel_indices[:, :, :, 1] = indy.unsqueeze(0) + rel_indices[:, :, :, 0] = indx.unsqueeze(0) + device = self.qk.weight.device + return rel_indices.to(device) + + +class MHSA(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def get_attention_map(self, x, return_map=False): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + attn_map = (q @ k.transpose(-2, -1)) * self.scale + attn_map = attn_map.softmax(dim=-1).mean(0) + + img_size = int(N ** .5) + ind = torch.arange(img_size).view(1, -1) - torch.arange(img_size).view(-1, 1) + indx = ind.repeat(img_size, img_size) + indy = ind.repeat_interleave(img_size, dim=0).repeat_interleave(img_size, dim=1) + indd = indx ** 2 + indy ** 2 + distances = indd ** .5 + distances = distances.to('cuda') + + dist = torch.einsum('nm,hnm->h', (distances, attn_map)) / N + if return_map: + return dist, attn_map + else: + return dist + + def forward(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_gpsa=True, **kwargs): + super().__init__() + self.norm1 = norm_layer(dim) + self.use_gpsa = use_gpsa + if self.use_gpsa: + self.attn = GPSA( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, **kwargs) + else: + self.attn = MHSA(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class ConViT(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=False, drop_rate=0., attn_drop_rate=0., + drop_path_rate=0., hybrid_backbone=None, norm_layer=nn.LayerNorm, global_pool=None, + local_up_to_layer=3, locality_strength=1., use_pos_embed=True): + super().__init__() + embed_dim *= num_heads + self.num_classes = num_classes + self.local_up_to_layer = local_up_to_layer + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.locality_strength = locality_strength + self.use_pos_embed = use_pos_embed + + if hybrid_backbone is not None: + self.patch_embed = HybridEmbed( + hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim) + else: + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + self.num_patches = num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + + if self.use_pos_embed: + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + trunc_normal_(self.pos_embed, std=.02) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + use_gpsa=True, + locality_strength=locality_strength) + if i < local_up_to_layer else + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + use_gpsa=False) + for i in range(depth)]) + self.norm = norm_layer(embed_dim) + + # Classifier head + self.feature_info = [dict(num_chs=embed_dim, reduction=0, module='head')] + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + for n, m in self.named_modules(): + if hasattr(m, 'local_init'): + m.local_init() + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + B = x.shape[0] + x = self.patch_embed(x) + + cls_tokens = self.cls_token.expand(B, -1, -1) + + if self.use_pos_embed: + x = x + self.pos_embed + x = self.pos_drop(x) + + for u, blk in enumerate(self.blocks): + if u == self.local_up_to_layer: + x = torch.cat((cls_tokens, x), dim=1) + x = blk(x) + + x = self.norm(x) + return x[:, 0] + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_convit(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + return build_model_with_cfg( + ConViT, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def convit_tiny(pretrained=False, **kwargs): + model_args = dict( + local_up_to_layer=10, locality_strength=1.0, embed_dim=48, + num_heads=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model = _create_convit(variant='convit_tiny', pretrained=pretrained, **model_args) + return model + + +@register_model +def convit_small(pretrained=False, **kwargs): + model_args = dict( + local_up_to_layer=10, locality_strength=1.0, embed_dim=48, + num_heads=9, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model = _create_convit(variant='convit_small', pretrained=pretrained, **model_args) + return model + + +@register_model +def convit_base(pretrained=False, **kwargs): + model_args = dict( + local_up_to_layer=10, locality_strength=1.0, embed_dim=48, + num_heads=16, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + model = _create_convit(variant='convit_base', pretrained=pretrained, **model_args) + return model diff --git a/timm/models/convmixer.py b/timm/models/convmixer.py new file mode 100644 index 0000000..a240078 --- /dev/null +++ b/timm/models/convmixer.py @@ -0,0 +1,101 @@ +import torch.nn as nn +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.models.registry import register_model +from .helpers import build_model_with_cfg + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .96, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': 'head', + 'first_conv': 'stem.0', + **kwargs + } + + +default_cfgs = { + 'convmixer_1536_20': _cfg(url='https://github.com/tmp-iclr/convmixer/releases/download/timm-v1.0/convmixer_1536_20_ks9_p7.pth.tar'), + 'convmixer_768_32': _cfg(url='https://github.com/tmp-iclr/convmixer/releases/download/timm-v1.0/convmixer_768_32_ks7_p7_relu.pth.tar'), + 'convmixer_1024_20_ks9_p14': _cfg(url='https://github.com/tmp-iclr/convmixer/releases/download/timm-v1.0/convmixer_1024_20_ks9_p14.pth.tar') +} + + +class Residual(nn.Module): + def __init__(self, fn): + super().__init__() + self.fn = fn + + def forward(self, x): + return self.fn(x) + x + + +class ConvMixer(nn.Module): + def __init__(self, dim, depth, kernel_size=9, patch_size=7, in_chans=3, num_classes=1000, activation=nn.GELU, **kwargs): + super().__init__() + self.num_classes = num_classes + self.num_features = dim + self.head = nn.Linear(dim, num_classes) if num_classes > 0 else nn.Identity() + self.stem = nn.Sequential( + nn.Conv2d(in_chans, dim, kernel_size=patch_size, stride=patch_size), + activation(), + nn.BatchNorm2d(dim) + ) + self.blocks = nn.Sequential( + *[nn.Sequential( + Residual(nn.Sequential( + nn.Conv2d(dim, dim, kernel_size, groups=dim, padding="same"), + activation(), + nn.BatchNorm2d(dim) + )), + nn.Conv2d(dim, dim, kernel_size=1), + activation(), + nn.BatchNorm2d(dim) + ) for i in range(depth)] + ) + self.pooling = nn.Sequential( + nn.AdaptiveAvgPool2d((1, 1)), + nn.Flatten() + ) + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.stem(x) + x = self.blocks(x) + x = self.pooling(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + + return x + + +def _create_convmixer(variant, pretrained=False, **kwargs): + return build_model_with_cfg(ConvMixer, variant, pretrained, default_cfg=default_cfgs[variant], **kwargs) + + +@register_model +def convmixer_1536_20(pretrained=False, **kwargs): + model_args = dict(dim=1536, depth=20, kernel_size=9, patch_size=7, **kwargs) + return _create_convmixer('convmixer_1536_20', pretrained, **model_args) + + +@register_model +def convmixer_768_32(pretrained=False, **kwargs): + model_args = dict(dim=768, depth=32, kernel_size=7, patch_size=7, activation=nn.ReLU, **kwargs) + return _create_convmixer('convmixer_768_32', pretrained, **model_args) + + +@register_model +def convmixer_1024_20_ks9_p14(pretrained=False, **kwargs): + model_args = dict(dim=1024, depth=20, kernel_size=9, patch_size=14, **kwargs) + return _create_convmixer('convmixer_1024_20_ks9_p14', pretrained, **model_args) \ No newline at end of file diff --git a/timm/models/crossvit.py b/timm/models/crossvit.py new file mode 100644 index 0000000..ddc4f64 --- /dev/null +++ b/timm/models/crossvit.py @@ -0,0 +1,517 @@ +""" CrossViT Model + +@inproceedings{ + chen2021crossvit, + title={{CrossViT: Cross-Attention Multi-Scale Vision Transformer for Image Classification}}, + author={Chun-Fu (Richard) Chen and Quanfu Fan and Rameswar Panda}, + booktitle={International Conference on Computer Vision (ICCV)}, + year={2021} +} + +Paper link: https://arxiv.org/abs/2103.14899 +Original code: https://github.com/IBM/CrossViT/blob/main/models/crossvit.py + +NOTE: model names have been renamed from originals to represent actual input res all *_224 -> *_240 and *_384 -> *_408 +""" + +# Copyright IBM All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + + +""" +Modifed from Timm. https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + +""" +from typing import Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.hub +from functools import partial +from typing import List + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .fx_features import register_notrace_function +from .helpers import build_model_with_cfg +from .layers import DropPath, to_2tuple, trunc_normal_, _assert +from .registry import register_model +from .vision_transformer import Mlp, Block + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 240, 240), 'pool_size': None, 'crop_pct': 0.875, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True, + 'first_conv': ('patch_embed.0.proj', 'patch_embed.1.proj'), + 'classifier': ('head.0', 'head.1'), + **kwargs + } + + +default_cfgs = { + 'crossvit_15_240': _cfg(url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_15_224.pth'), + 'crossvit_15_dagger_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_15_dagger_224.pth', + first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), + ), + 'crossvit_15_dagger_408': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_15_dagger_384.pth', + input_size=(3, 408, 408), first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), crop_pct=1.0, + ), + 'crossvit_18_240': _cfg(url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_18_224.pth'), + 'crossvit_18_dagger_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_18_dagger_224.pth', + first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), + ), + 'crossvit_18_dagger_408': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_18_dagger_384.pth', + input_size=(3, 408, 408), first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), crop_pct=1.0, + ), + 'crossvit_9_240': _cfg(url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_9_224.pth'), + 'crossvit_9_dagger_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_9_dagger_224.pth', + first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), + ), + 'crossvit_base_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_base_224.pth'), + 'crossvit_small_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_small_224.pth'), + 'crossvit_tiny_240': _cfg( + url='https://github.com/IBM/CrossViT/releases/download/weights-0.1/crossvit_tiny_224.pth'), +} + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, multi_conv=False): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + if multi_conv: + if patch_size[0] == 12: + self.proj = nn.Sequential( + nn.Conv2d(in_chans, embed_dim // 4, kernel_size=7, stride=4, padding=3), + nn.ReLU(inplace=True), + nn.Conv2d(embed_dim // 4, embed_dim // 2, kernel_size=3, stride=3, padding=0), + nn.ReLU(inplace=True), + nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=1, padding=1), + ) + elif patch_size[0] == 16: + self.proj = nn.Sequential( + nn.Conv2d(in_chans, embed_dim // 4, kernel_size=7, stride=4, padding=3), + nn.ReLU(inplace=True), + nn.Conv2d(embed_dim // 4, embed_dim // 2, kernel_size=3, stride=2, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=2, padding=1), + ) + else: + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + B, C, H, W = x.shape + # FIXME look at relaxing size constraints + _assert(H == self.img_size[0], + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") + _assert(W == self.img_size[1], + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +class CrossAttention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights + self.scale = qk_scale or head_dim ** -0.5 + + self.wq = nn.Linear(dim, dim, bias=qkv_bias) + self.wk = nn.Linear(dim, dim, bias=qkv_bias) + self.wv = nn.Linear(dim, dim, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + # B1C -> B1H(C/H) -> BH1(C/H) + q = self.wq(x[:, 0:1, ...]).reshape(B, 1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + # BNC -> BNH(C/H) -> BHN(C/H) + k = self.wk(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + # BNC -> BNH(C/H) -> BHN(C/H) + v = self.wv(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + + attn = (q @ k.transpose(-2, -1)) * self.scale # BH1(C/H) @ BH(C/H)N -> BH1N + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, 1, C) # (BH1N @ BHN(C/H)) -> BH1(C/H) -> B1H(C/H) -> B1C + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class CrossAttentionBlock(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = CrossAttention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + x = x[:, 0:1, ...] + self.drop_path(self.attn(self.norm1(x))) + + return x + + +class MultiScaleBlock(nn.Module): + + def __init__(self, dim, patches, depth, num_heads, mlp_ratio, qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + + num_branches = len(dim) + self.num_branches = num_branches + # different branch could have different embedding size, the first one is the base + self.blocks = nn.ModuleList() + for d in range(num_branches): + tmp = [] + for i in range(depth[d]): + tmp.append(Block( + dim=dim[d], num_heads=num_heads[d], mlp_ratio=mlp_ratio[d], qkv_bias=qkv_bias, + drop=drop, attn_drop=attn_drop, drop_path=drop_path[i], norm_layer=norm_layer)) + if len(tmp) != 0: + self.blocks.append(nn.Sequential(*tmp)) + + if len(self.blocks) == 0: + self.blocks = None + + self.projs = nn.ModuleList() + for d in range(num_branches): + if dim[d] == dim[(d + 1) % num_branches] and False: + tmp = [nn.Identity()] + else: + tmp = [norm_layer(dim[d]), act_layer(), nn.Linear(dim[d], dim[(d + 1) % num_branches])] + self.projs.append(nn.Sequential(*tmp)) + + self.fusion = nn.ModuleList() + for d in range(num_branches): + d_ = (d + 1) % num_branches + nh = num_heads[d_] + if depth[-1] == 0: # backward capability: + self.fusion.append( + CrossAttentionBlock( + dim=dim[d_], num_heads=nh, mlp_ratio=mlp_ratio[d], qkv_bias=qkv_bias, + drop=drop, attn_drop=attn_drop, drop_path=drop_path[-1], norm_layer=norm_layer)) + else: + tmp = [] + for _ in range(depth[-1]): + tmp.append(CrossAttentionBlock( + dim=dim[d_], num_heads=nh, mlp_ratio=mlp_ratio[d], qkv_bias=qkv_bias, + drop=drop, attn_drop=attn_drop, drop_path=drop_path[-1], norm_layer=norm_layer)) + self.fusion.append(nn.Sequential(*tmp)) + + self.revert_projs = nn.ModuleList() + for d in range(num_branches): + if dim[(d + 1) % num_branches] == dim[d] and False: + tmp = [nn.Identity()] + else: + tmp = [norm_layer(dim[(d + 1) % num_branches]), act_layer(), + nn.Linear(dim[(d + 1) % num_branches], dim[d])] + self.revert_projs.append(nn.Sequential(*tmp)) + + def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: + + outs_b = [] + for i, block in enumerate(self.blocks): + outs_b.append(block(x[i])) + + # only take the cls token out + proj_cls_token = torch.jit.annotate(List[torch.Tensor], []) + for i, proj in enumerate(self.projs): + proj_cls_token.append(proj(outs_b[i][:, 0:1, ...])) + + # cross attention + outs = [] + for i, (fusion, revert_proj) in enumerate(zip(self.fusion, self.revert_projs)): + tmp = torch.cat((proj_cls_token[i], outs_b[(i + 1) % self.num_branches][:, 1:, ...]), dim=1) + tmp = fusion(tmp) + reverted_proj_cls_token = revert_proj(tmp[:, 0:1, ...]) + tmp = torch.cat((reverted_proj_cls_token, outs_b[i][:, 1:, ...]), dim=1) + outs.append(tmp) + return outs + + +def _compute_num_patches(img_size, patches): + return [i[0] // p * i[1] // p for i, p in zip(img_size, patches)] + + +@register_notrace_function +def scale_image(x, ss: Tuple[int, int], crop_scale: bool = False): # annotations for torchscript + """ + Pulled out of CrossViT.forward_features to bury conditional logic in a leaf node for FX tracing. + Args: + x (Tensor): input image + ss (tuple[int, int]): height and width to scale to + crop_scale (bool): whether to crop instead of interpolate to achieve the desired scale. Defaults to False + Returns: + Tensor: the "scaled" image batch tensor + """ + H, W = x.shape[-2:] + if H != ss[0] or W != ss[1]: + if crop_scale and ss[0] <= H and ss[1] <= W: + cu, cl = int(round((H - ss[0]) / 2.)), int(round((W - ss[1]) / 2.)) + x = x[:, :, cu:cu + ss[0], cl:cl + ss[1]] + else: + x = torch.nn.functional.interpolate(x, size=ss, mode='bicubic', align_corners=False) + return x + + +class CrossViT(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + """ + + def __init__( + self, img_size=224, img_scale=(1.0, 1.0), patch_size=(8, 16), in_chans=3, num_classes=1000, + embed_dim=(192, 384), depth=((1, 3, 1), (1, 3, 1), (1, 3, 1)), num_heads=(6, 12), mlp_ratio=(2., 2., 4.), + qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., + norm_layer=partial(nn.LayerNorm, eps=1e-6), multi_conv=False, crop_scale=False, + ): + super().__init__() + + self.num_classes = num_classes + self.img_size = to_2tuple(img_size) + img_scale = to_2tuple(img_scale) + self.img_size_scaled = [tuple([int(sj * si) for sj in self.img_size]) for si in img_scale] + self.crop_scale = crop_scale # crop instead of interpolate for scale + num_patches = _compute_num_patches(self.img_size_scaled, patch_size) + self.num_branches = len(patch_size) + self.embed_dim = embed_dim + self.num_features = embed_dim[0] # to pass the tests + self.patch_embed = nn.ModuleList() + + # hard-coded for torch jit script + for i in range(self.num_branches): + setattr(self, f'pos_embed_{i}', nn.Parameter(torch.zeros(1, 1 + num_patches[i], embed_dim[i]))) + setattr(self, f'cls_token_{i}', nn.Parameter(torch.zeros(1, 1, embed_dim[i]))) + + for im_s, p, d in zip(self.img_size_scaled, patch_size, embed_dim): + self.patch_embed.append( + PatchEmbed(img_size=im_s, patch_size=p, in_chans=in_chans, embed_dim=d, multi_conv=multi_conv)) + + self.pos_drop = nn.Dropout(p=drop_rate) + + total_depth = sum([sum(x[-2:]) for x in depth]) + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, total_depth)] # stochastic depth decay rule + dpr_ptr = 0 + self.blocks = nn.ModuleList() + for idx, block_cfg in enumerate(depth): + curr_depth = max(block_cfg[:-1]) + block_cfg[-1] + dpr_ = dpr[dpr_ptr:dpr_ptr + curr_depth] + blk = MultiScaleBlock( + embed_dim, num_patches, block_cfg, num_heads=num_heads, mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr_, norm_layer=norm_layer) + dpr_ptr += curr_depth + self.blocks.append(blk) + + self.norm = nn.ModuleList([norm_layer(embed_dim[i]) for i in range(self.num_branches)]) + self.head = nn.ModuleList([ + nn.Linear(embed_dim[i], num_classes) if num_classes > 0 else nn.Identity() + for i in range(self.num_branches)]) + + for i in range(self.num_branches): + trunc_normal_(getattr(self, f'pos_embed_{i}'), std=.02) + trunc_normal_(getattr(self, f'cls_token_{i}'), std=.02) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + out = set() + for i in range(self.num_branches): + out.add(f'cls_token_{i}') + pe = getattr(self, f'pos_embed_{i}', None) + if pe is not None and pe.requires_grad: + out.add(f'pos_embed_{i}') + return out + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.ModuleList( + [nn.Linear(self.embed_dim[i], num_classes) if num_classes > 0 else nn.Identity() for i in + range(self.num_branches)]) + + def forward_features(self, x): + B = x.shape[0] + xs = [] + for i, patch_embed in enumerate(self.patch_embed): + x_ = x + ss = self.img_size_scaled[i] + x_ = scale_image(x_, ss, self.crop_scale) + x_ = patch_embed(x_) + cls_tokens = self.cls_token_0 if i == 0 else self.cls_token_1 # hard-coded for torch jit script + cls_tokens = cls_tokens.expand(B, -1, -1) + x_ = torch.cat((cls_tokens, x_), dim=1) + pos_embed = self.pos_embed_0 if i == 0 else self.pos_embed_1 # hard-coded for torch jit script + x_ = x_ + pos_embed + x_ = self.pos_drop(x_) + xs.append(x_) + + for i, blk in enumerate(self.blocks): + xs = blk(xs) + + # NOTE: was before branch token section, move to here to assure all branch token are before layer norm + xs = [norm(xs[i]) for i, norm in enumerate(self.norm)] + return [xo[:, 0] for xo in xs] + + def forward(self, x): + xs = self.forward_features(x) + ce_logits = [head(xs[i]) for i, head in enumerate(self.head)] + if not isinstance(self.head[0], nn.Identity): + ce_logits = torch.mean(torch.stack(ce_logits, dim=0), dim=0) + return ce_logits + + +def _create_crossvit(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + def pretrained_filter_fn(state_dict): + new_state_dict = {} + for key in state_dict.keys(): + if 'pos_embed' in key or 'cls_token' in key: + new_key = key.replace(".", "_") + else: + new_key = key + new_state_dict[new_key] = state_dict[key] + return new_state_dict + + return build_model_with_cfg( + CrossViT, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=pretrained_filter_fn, + **kwargs) + + +@register_model +def crossvit_tiny_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[96, 192], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]], + num_heads=[3, 3], mlp_ratio=[4, 4, 1], **kwargs) + model = _create_crossvit(variant='crossvit_tiny_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_small_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]], + num_heads=[6, 6], mlp_ratio=[4, 4, 1], **kwargs) + model = _create_crossvit(variant='crossvit_small_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_base_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[384, 768], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]], + num_heads=[12, 12], mlp_ratio=[4, 4, 1], **kwargs) + model = _create_crossvit(variant='crossvit_base_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_9_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]], + num_heads=[4, 4], mlp_ratio=[3, 3, 1], **kwargs) + model = _create_crossvit(variant='crossvit_9_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_15_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]], + num_heads=[6, 6], mlp_ratio=[3, 3, 1], **kwargs) + model = _create_crossvit(variant='crossvit_15_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_18_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]], + num_heads=[7, 7], mlp_ratio=[3, 3, 1], **kwargs) + model = _create_crossvit(variant='crossvit_18_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_9_dagger_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]], + num_heads=[4, 4], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs) + model = _create_crossvit(variant='crossvit_9_dagger_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_15_dagger_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]], + num_heads=[6, 6], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs) + model = _create_crossvit(variant='crossvit_15_dagger_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_15_dagger_408(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 384/408), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]], + num_heads=[6, 6], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs) + model = _create_crossvit(variant='crossvit_15_dagger_408', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_18_dagger_240(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]], + num_heads=[7, 7], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs) + model = _create_crossvit(variant='crossvit_18_dagger_240', pretrained=pretrained, **model_args) + return model + + +@register_model +def crossvit_18_dagger_408(pretrained=False, **kwargs): + model_args = dict( + img_scale=(1.0, 384/408), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]], + num_heads=[7, 7], mlp_ratio=[3, 3, 1], multi_conv=True, **kwargs) + model = _create_crossvit(variant='crossvit_18_dagger_408', pretrained=pretrained, **model_args) + return model diff --git a/timm/models/cspnet.py b/timm/models/cspnet.py new file mode 100644 index 0000000..39d1620 --- /dev/null +++ b/timm/models/cspnet.py @@ -0,0 +1,457 @@ +"""PyTorch CspNet + +A PyTorch implementation of Cross Stage Partial Networks including: +* CSPResNet50 +* CSPResNeXt50 +* CSPDarkNet53 +* and DarkNet53 for good measure + +Based on paper `CSPNet: A New Backbone that can Enhance Learning Capability of CNN` - https://arxiv.org/abs/1911.11929 + +Reference impl via darknet cfg files at https://github.com/WongKinYiu/CrossStagePartialNetworks + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import ClassifierHead, ConvBnAct, DropPath, create_attn, get_norm_act_layer +from .registry import register_model + + +__all__ = ['CspNet'] # model_registry will add each entrypoint fn to this + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), + 'crop_pct': 0.887, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = { + 'cspresnet50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnet50_ra-d3e8d487.pth'), + 'cspresnet50d': _cfg(url=''), + 'cspresnet50w': _cfg(url=''), + 'cspresnext50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnext50_ra_224-648b4713.pth', + input_size=(3, 224, 224), pool_size=(7, 7), crop_pct=0.875 # FIXME I trained this at 224x224, not 256 like ref impl + ), + 'cspresnext50_iabn': _cfg(url=''), + 'cspdarknet53': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspdarknet53_ra_256-d05c7c21.pth'), + 'cspdarknet53_iabn': _cfg(url=''), + 'darknet53': _cfg(url=''), +} + + +model_cfgs = dict( + cspresnet50=dict( + stem=dict(out_chs=64, kernel_size=7, stride=2, pool='max'), + stage=dict( + out_chs=(128, 256, 512, 1024), + depth=(3, 3, 5, 2), + stride=(1,) + (2,) * 3, + exp_ratio=(2.,) * 4, + bottle_ratio=(0.5,) * 4, + block_ratio=(1.,) * 4, + cross_linear=True, + ) + ), + cspresnet50d=dict( + stem=dict(out_chs=[32, 32, 64], kernel_size=3, stride=2, pool='max'), + stage=dict( + out_chs=(128, 256, 512, 1024), + depth=(3, 3, 5, 2), + stride=(1,) + (2,) * 3, + exp_ratio=(2.,) * 4, + bottle_ratio=(0.5,) * 4, + block_ratio=(1.,) * 4, + cross_linear=True, + ) + ), + cspresnet50w=dict( + stem=dict(out_chs=[32, 32, 64], kernel_size=3, stride=2, pool='max'), + stage=dict( + out_chs=(256, 512, 1024, 2048), + depth=(3, 3, 5, 2), + stride=(1,) + (2,) * 3, + exp_ratio=(1.,) * 4, + bottle_ratio=(0.25,) * 4, + block_ratio=(0.5,) * 4, + cross_linear=True, + ) + ), + cspresnext50=dict( + stem=dict(out_chs=64, kernel_size=7, stride=2, pool='max'), + stage=dict( + out_chs=(256, 512, 1024, 2048), + depth=(3, 3, 5, 2), + stride=(1,) + (2,) * 3, + groups=(32,) * 4, + exp_ratio=(1.,) * 4, + bottle_ratio=(1.,) * 4, + block_ratio=(0.5,) * 4, + cross_linear=True, + ) + ), + cspdarknet53=dict( + stem=dict(out_chs=32, kernel_size=3, stride=1, pool=''), + stage=dict( + out_chs=(64, 128, 256, 512, 1024), + depth=(1, 2, 8, 8, 4), + stride=(2,) * 5, + exp_ratio=(2.,) + (1.,) * 4, + bottle_ratio=(0.5,) + (1.0,) * 4, + block_ratio=(1.,) + (0.5,) * 4, + down_growth=True, + ) + ), + darknet53=dict( + stem=dict(out_chs=32, kernel_size=3, stride=1, pool=''), + stage=dict( + out_chs=(64, 128, 256, 512, 1024), + depth=(1, 2, 8, 8, 4), + stride=(2,) * 5, + bottle_ratio=(0.5,) * 5, + block_ratio=(1.,) * 5, + ) + ) +) + + +def create_stem( + in_chans=3, out_chs=32, kernel_size=3, stride=2, pool='', + act_layer=None, norm_layer=None, aa_layer=None): + stem = nn.Sequential() + if not isinstance(out_chs, (tuple, list)): + out_chs = [out_chs] + assert len(out_chs) + in_c = in_chans + for i, out_c in enumerate(out_chs): + conv_name = f'conv{i + 1}' + stem.add_module(conv_name, ConvBnAct( + in_c, out_c, kernel_size, stride=stride if i == 0 else 1, + act_layer=act_layer, norm_layer=norm_layer)) + in_c = out_c + last_conv = conv_name + if pool: + if aa_layer is not None: + stem.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=1, padding=1)) + stem.add_module('aa', aa_layer(channels=in_c, stride=2)) + else: + stem.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) + return stem, dict(num_chs=in_c, reduction=stride, module='.'.join(['stem', last_conv])) + + +class ResBottleneck(nn.Module): + """ ResNe(X)t Bottleneck Block + """ + + def __init__(self, in_chs, out_chs, dilation=1, bottle_ratio=0.25, groups=1, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_last=False, + attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): + super(ResBottleneck, self).__init__() + mid_chs = int(round(out_chs * bottle_ratio)) + ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, drop_block=drop_block) + + self.conv1 = ConvBnAct(in_chs, mid_chs, kernel_size=1, **ckwargs) + self.conv2 = ConvBnAct(mid_chs, mid_chs, kernel_size=3, dilation=dilation, groups=groups, **ckwargs) + self.attn2 = create_attn(attn_layer, channels=mid_chs) if not attn_last else None + self.conv3 = ConvBnAct(mid_chs, out_chs, kernel_size=1, apply_act=False, **ckwargs) + self.attn3 = create_attn(attn_layer, channels=out_chs) if attn_last else None + self.drop_path = drop_path + self.act3 = act_layer(inplace=True) + + def zero_init_last_bn(self): + nn.init.zeros_(self.conv3.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + if self.attn2 is not None: + x = self.attn2(x) + x = self.conv3(x) + if self.attn3 is not None: + x = self.attn3(x) + if self.drop_path is not None: + x = self.drop_path(x) + x = x + shortcut + # FIXME partial shortcut needed if first block handled as per original, not used for my current impl + #x[:, :shortcut.size(1)] += shortcut + x = self.act3(x) + return x + + +class DarkBlock(nn.Module): + """ DarkNet Block + """ + + def __init__(self, in_chs, out_chs, dilation=1, bottle_ratio=0.5, groups=1, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, + drop_block=None, drop_path=None): + super(DarkBlock, self).__init__() + mid_chs = int(round(out_chs * bottle_ratio)) + ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, drop_block=drop_block) + self.conv1 = ConvBnAct(in_chs, mid_chs, kernel_size=1, **ckwargs) + self.conv2 = ConvBnAct(mid_chs, out_chs, kernel_size=3, dilation=dilation, groups=groups, **ckwargs) + self.attn = create_attn(attn_layer, channels=out_chs) + self.drop_path = drop_path + + def zero_init_last_bn(self): + nn.init.zeros_(self.conv2.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + if self.attn is not None: + x = self.attn(x) + if self.drop_path is not None: + x = self.drop_path(x) + x = x + shortcut + return x + + +class CrossStage(nn.Module): + """Cross Stage.""" + def __init__(self, in_chs, out_chs, stride, dilation, depth, block_ratio=1., bottle_ratio=1., exp_ratio=1., + groups=1, first_dilation=None, down_growth=False, cross_linear=False, block_dpr=None, + block_fn=ResBottleneck, **block_kwargs): + super(CrossStage, self).__init__() + first_dilation = first_dilation or dilation + down_chs = out_chs if down_growth else in_chs # grow downsample channels to output channels + exp_chs = int(round(out_chs * exp_ratio)) + block_out_chs = int(round(out_chs * block_ratio)) + conv_kwargs = dict(act_layer=block_kwargs.get('act_layer'), norm_layer=block_kwargs.get('norm_layer')) + + if stride != 1 or first_dilation != dilation: + self.conv_down = ConvBnAct( + in_chs, down_chs, kernel_size=3, stride=stride, dilation=first_dilation, groups=groups, + aa_layer=block_kwargs.get('aa_layer', None), **conv_kwargs) + prev_chs = down_chs + else: + self.conv_down = None + prev_chs = in_chs + + # FIXME this 1x1 expansion is pushed down into the cross and block paths in the darknet cfgs. Also, + # there is also special case for the first stage for some of the model that results in uneven split + # across the two paths. I did it this way for simplicity for now. + self.conv_exp = ConvBnAct(prev_chs, exp_chs, kernel_size=1, apply_act=not cross_linear, **conv_kwargs) + prev_chs = exp_chs // 2 # output of conv_exp is always split in two + + self.blocks = nn.Sequential() + for i in range(depth): + drop_path = DropPath(block_dpr[i]) if block_dpr and block_dpr[i] else None + self.blocks.add_module(str(i), block_fn( + prev_chs, block_out_chs, dilation, bottle_ratio, groups, drop_path=drop_path, **block_kwargs)) + prev_chs = block_out_chs + + # transition convs + self.conv_transition_b = ConvBnAct(prev_chs, exp_chs // 2, kernel_size=1, **conv_kwargs) + self.conv_transition = ConvBnAct(exp_chs, out_chs, kernel_size=1, **conv_kwargs) + + def forward(self, x): + if self.conv_down is not None: + x = self.conv_down(x) + x = self.conv_exp(x) + split = x.shape[1] // 2 + xs, xb = x[:, :split], x[:, split:] + xb = self.blocks(xb) + xb = self.conv_transition_b(xb).contiguous() + out = self.conv_transition(torch.cat([xs, xb], dim=1)) + return out + + +class DarkStage(nn.Module): + """DarkNet stage.""" + + def __init__(self, in_chs, out_chs, stride, dilation, depth, block_ratio=1., bottle_ratio=1., groups=1, + first_dilation=None, block_fn=ResBottleneck, block_dpr=None, **block_kwargs): + super(DarkStage, self).__init__() + first_dilation = first_dilation or dilation + + self.conv_down = ConvBnAct( + in_chs, out_chs, kernel_size=3, stride=stride, dilation=first_dilation, groups=groups, + act_layer=block_kwargs.get('act_layer'), norm_layer=block_kwargs.get('norm_layer'), + aa_layer=block_kwargs.get('aa_layer', None)) + + prev_chs = out_chs + block_out_chs = int(round(out_chs * block_ratio)) + self.blocks = nn.Sequential() + for i in range(depth): + drop_path = DropPath(block_dpr[i]) if block_dpr and block_dpr[i] else None + self.blocks.add_module(str(i), block_fn( + prev_chs, block_out_chs, dilation, bottle_ratio, groups, drop_path=drop_path, **block_kwargs)) + prev_chs = block_out_chs + + def forward(self, x): + x = self.conv_down(x) + x = self.blocks(x) + return x + + +def _cfg_to_stage_args(cfg, curr_stride=2, output_stride=32, drop_path_rate=0.): + # get per stage args for stage and containing blocks, calculate strides to meet target output_stride + num_stages = len(cfg['depth']) + if 'groups' not in cfg: + cfg['groups'] = (1,) * num_stages + if 'down_growth' in cfg and not isinstance(cfg['down_growth'], (list, tuple)): + cfg['down_growth'] = (cfg['down_growth'],) * num_stages + if 'cross_linear' in cfg and not isinstance(cfg['cross_linear'], (list, tuple)): + cfg['cross_linear'] = (cfg['cross_linear'],) * num_stages + cfg['block_dpr'] = [None] * num_stages if not drop_path_rate else \ + [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg['depth'])).split(cfg['depth'])] + stage_strides = [] + stage_dilations = [] + stage_first_dilations = [] + dilation = 1 + for cfg_stride in cfg['stride']: + stage_first_dilations.append(dilation) + if curr_stride >= output_stride: + dilation *= cfg_stride + stride = 1 + else: + stride = cfg_stride + curr_stride *= stride + stage_strides.append(stride) + stage_dilations.append(dilation) + cfg['stride'] = stage_strides + cfg['dilation'] = stage_dilations + cfg['first_dilation'] = stage_first_dilations + stage_args = [dict(zip(cfg.keys(), values)) for values in zip(*cfg.values())] + return stage_args + + +class CspNet(nn.Module): + """Cross Stage Partial base model. + + Paper: `CSPNet: A New Backbone that can Enhance Learning Capability of CNN` - https://arxiv.org/abs/1911.11929 + Ref Impl: https://github.com/WongKinYiu/CrossStagePartialNetworks + + NOTE: There are differences in the way I handle the 1x1 'expansion' conv in this impl vs the + darknet impl. I did it this way for simplicity and less special cases. + """ + + def __init__(self, cfg, in_chans=3, num_classes=1000, output_stride=32, global_pool='avg', drop_rate=0., + act_layer=nn.LeakyReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, drop_path_rate=0., + zero_init_last_bn=True, stage_fn=CrossStage, block_fn=ResBottleneck): + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + assert output_stride in (8, 16, 32) + layer_args = dict(act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer) + + # Construct the stem + self.stem, stem_feat_info = create_stem(in_chans, **cfg['stem'], **layer_args) + self.feature_info = [stem_feat_info] + prev_chs = stem_feat_info['num_chs'] + curr_stride = stem_feat_info['reduction'] # reduction does not include pool + if cfg['stem']['pool']: + curr_stride *= 2 + + # Construct the stages + per_stage_args = _cfg_to_stage_args( + cfg['stage'], curr_stride=curr_stride, output_stride=output_stride, drop_path_rate=drop_path_rate) + self.stages = nn.Sequential() + for i, sa in enumerate(per_stage_args): + self.stages.add_module( + str(i), stage_fn(prev_chs, **sa, **layer_args, block_fn=block_fn)) + prev_chs = sa['out_chs'] + curr_stride *= sa['stride'] + self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{i}')] + + # Construct the head + self.num_features = prev_chs + self.head = ClassifierHead( + in_chs=prev_chs, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, mean=0.0, std=0.01) + nn.init.zeros_(m.bias) + if zero_init_last_bn: + for m in self.modules(): + if hasattr(m, 'zero_init_last_bn'): + m.zero_init_last_bn() + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_cspnet(variant, pretrained=False, **kwargs): + cfg_variant = variant.split('_')[0] + return build_model_with_cfg( + CspNet, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(flatten_sequential=True), model_cfg=model_cfgs[cfg_variant], + **kwargs) + + +@register_model +def cspresnet50(pretrained=False, **kwargs): + return _create_cspnet('cspresnet50', pretrained=pretrained, **kwargs) + + +@register_model +def cspresnet50d(pretrained=False, **kwargs): + return _create_cspnet('cspresnet50d', pretrained=pretrained, **kwargs) + + +@register_model +def cspresnet50w(pretrained=False, **kwargs): + return _create_cspnet('cspresnet50w', pretrained=pretrained, **kwargs) + + +@register_model +def cspresnext50(pretrained=False, **kwargs): + return _create_cspnet('cspresnext50', pretrained=pretrained, **kwargs) + + +@register_model +def cspresnext50_iabn(pretrained=False, **kwargs): + norm_layer = get_norm_act_layer('iabn') + return _create_cspnet('cspresnext50_iabn', pretrained=pretrained, norm_layer=norm_layer, **kwargs) + + +@register_model +def cspdarknet53(pretrained=False, **kwargs): + return _create_cspnet('cspdarknet53', pretrained=pretrained, block_fn=DarkBlock, **kwargs) + + +@register_model +def cspdarknet53_iabn(pretrained=False, **kwargs): + norm_layer = get_norm_act_layer('iabn') + return _create_cspnet('cspdarknet53_iabn', pretrained=pretrained, block_fn=DarkBlock, norm_layer=norm_layer, **kwargs) + + +@register_model +def darknet53(pretrained=False, **kwargs): + return _create_cspnet('darknet53', pretrained=pretrained, block_fn=DarkBlock, stage_fn=DarkStage, **kwargs) diff --git a/timm/models/densenet.py b/timm/models/densenet.py new file mode 100644 index 0000000..38a1972 --- /dev/null +++ b/timm/models/densenet.py @@ -0,0 +1,387 @@ +"""Pytorch Densenet implementation w/ tweaks +This file is a copy of https://github.com/pytorch/vision 'densenet.py' (BSD-3-Clause) with +fixed kwargs passthrough and addition of dynamic global avg/max pool. +""" +import re +from collections import OrderedDict +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from torch.jit.annotations import List + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import BatchNormAct2d, create_norm_act, BlurPool2d, create_classifier +from .registry import register_model + +__all__ = ['DenseNet'] + + +def _cfg(url=''): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'features.conv0', 'classifier': 'classifier', + } + + +default_cfgs = { + 'densenet121': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/densenet121_ra-50efcf5c.pth'), + 'densenet121d': _cfg(url=''), + 'densenetblur121d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/densenetblur121d_ra-100dcfbc.pth'), + 'densenet169': _cfg(url='https://download.pytorch.org/models/densenet169-b2777c0a.pth'), + 'densenet201': _cfg(url='https://download.pytorch.org/models/densenet201-c1103571.pth'), + 'densenet161': _cfg(url='https://download.pytorch.org/models/densenet161-8d451a50.pth'), + 'densenet264': _cfg(url=''), + 'densenet264d_iabn': _cfg(url=''), + 'tv_densenet121': _cfg(url='https://download.pytorch.org/models/densenet121-a639ec97.pth'), +} + + +class DenseLayer(nn.Module): + def __init__(self, num_input_features, growth_rate, bn_size, norm_layer=BatchNormAct2d, + drop_rate=0., memory_efficient=False): + super(DenseLayer, self).__init__() + self.add_module('norm1', norm_layer(num_input_features)), + self.add_module('conv1', nn.Conv2d( + num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)), + self.add_module('norm2', norm_layer(bn_size * growth_rate)), + self.add_module('conv2', nn.Conv2d( + bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)), + self.drop_rate = float(drop_rate) + self.memory_efficient = memory_efficient + + def bottleneck_fn(self, xs): + # type: (List[torch.Tensor]) -> torch.Tensor + concated_features = torch.cat(xs, 1) + bottleneck_output = self.conv1(self.norm1(concated_features)) # noqa: T484 + return bottleneck_output + + # todo: rewrite when torchscript supports any + def any_requires_grad(self, x): + # type: (List[torch.Tensor]) -> bool + for tensor in x: + if tensor.requires_grad: + return True + return False + + @torch.jit.unused # noqa: T484 + def call_checkpoint_bottleneck(self, x): + # type: (List[torch.Tensor]) -> torch.Tensor + def closure(*xs): + return self.bottleneck_fn(xs) + + return cp.checkpoint(closure, *x) + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (List[torch.Tensor]) -> (torch.Tensor) + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (torch.Tensor) -> (torch.Tensor) + pass + + # torchscript does not yet support *args, so we overload method + # allowing it to take either a List[Tensor] or single Tensor + def forward(self, x): # noqa: F811 + if isinstance(x, torch.Tensor): + prev_features = [x] + else: + prev_features = x + + if self.memory_efficient and self.any_requires_grad(prev_features): + if torch.jit.is_scripting(): + raise Exception("Memory Efficient not supported in JIT") + bottleneck_output = self.call_checkpoint_bottleneck(prev_features) + else: + bottleneck_output = self.bottleneck_fn(prev_features) + + new_features = self.conv2(self.norm2(bottleneck_output)) + if self.drop_rate > 0: + new_features = F.dropout(new_features, p=self.drop_rate, training=self.training) + return new_features + + +class DenseBlock(nn.ModuleDict): + _version = 2 + + def __init__(self, num_layers, num_input_features, bn_size, growth_rate, norm_layer=nn.ReLU, + drop_rate=0., memory_efficient=False): + super(DenseBlock, self).__init__() + for i in range(num_layers): + layer = DenseLayer( + num_input_features + i * growth_rate, + growth_rate=growth_rate, + bn_size=bn_size, + norm_layer=norm_layer, + drop_rate=drop_rate, + memory_efficient=memory_efficient, + ) + self.add_module('denselayer%d' % (i + 1), layer) + + def forward(self, init_features): + features = [init_features] + for name, layer in self.items(): + new_features = layer(features) + features.append(new_features) + return torch.cat(features, 1) + + +class DenseTransition(nn.Sequential): + def __init__(self, num_input_features, num_output_features, norm_layer=nn.BatchNorm2d, aa_layer=None): + super(DenseTransition, self).__init__() + self.add_module('norm', norm_layer(num_input_features)) + self.add_module('conv', nn.Conv2d( + num_input_features, num_output_features, kernel_size=1, stride=1, bias=False)) + if aa_layer is not None: + self.add_module('pool', aa_layer(num_output_features, stride=2)) + else: + self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2)) + + +class DenseNet(nn.Module): + r"""Densenet-BC model class, based on + `"Densely Connected Convolutional Networks" `_ + + Args: + growth_rate (int) - how many filters to add each layer (`k` in paper) + block_config (list of 4 ints) - how many layers in each pooling block + bn_size (int) - multiplicative factor for number of bottle neck layers + (i.e. bn_size * k features in the bottleneck layer) + drop_rate (float) - dropout rate after each dense layer + num_classes (int) - number of classification classes + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" `_ + """ + + def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16), bn_size=4, stem_type='', + num_classes=1000, in_chans=3, global_pool='avg', + norm_layer=BatchNormAct2d, aa_layer=None, drop_rate=0, memory_efficient=False, + aa_stem_only=True): + self.num_classes = num_classes + self.drop_rate = drop_rate + super(DenseNet, self).__init__() + + # Stem + deep_stem = 'deep' in stem_type # 3x3 deep stem + num_init_features = growth_rate * 2 + if aa_layer is None: + stem_pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + else: + stem_pool = nn.Sequential(*[ + nn.MaxPool2d(kernel_size=3, stride=1, padding=1), + aa_layer(channels=num_init_features, stride=2)]) + if deep_stem: + stem_chs_1 = stem_chs_2 = growth_rate + if 'tiered' in stem_type: + stem_chs_1 = 3 * (growth_rate // 4) + stem_chs_2 = num_init_features if 'narrow' in stem_type else 6 * (growth_rate // 4) + self.features = nn.Sequential(OrderedDict([ + ('conv0', nn.Conv2d(in_chans, stem_chs_1, 3, stride=2, padding=1, bias=False)), + ('norm0', norm_layer(stem_chs_1)), + ('conv1', nn.Conv2d(stem_chs_1, stem_chs_2, 3, stride=1, padding=1, bias=False)), + ('norm1', norm_layer(stem_chs_2)), + ('conv2', nn.Conv2d(stem_chs_2, num_init_features, 3, stride=1, padding=1, bias=False)), + ('norm2', norm_layer(num_init_features)), + ('pool0', stem_pool), + ])) + else: + self.features = nn.Sequential(OrderedDict([ + ('conv0', nn.Conv2d(in_chans, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)), + ('norm0', norm_layer(num_init_features)), + ('pool0', stem_pool), + ])) + self.feature_info = [ + dict(num_chs=num_init_features, reduction=2, module=f'features.norm{2 if deep_stem else 0}')] + current_stride = 4 + + # DenseBlocks + num_features = num_init_features + for i, num_layers in enumerate(block_config): + block = DenseBlock( + num_layers=num_layers, + num_input_features=num_features, + bn_size=bn_size, + growth_rate=growth_rate, + norm_layer=norm_layer, + drop_rate=drop_rate, + memory_efficient=memory_efficient + ) + module_name = f'denseblock{(i + 1)}' + self.features.add_module(module_name, block) + num_features = num_features + num_layers * growth_rate + transition_aa_layer = None if aa_stem_only else aa_layer + if i != len(block_config) - 1: + self.feature_info += [ + dict(num_chs=num_features, reduction=current_stride, module='features.' + module_name)] + current_stride *= 2 + trans = DenseTransition( + num_input_features=num_features, num_output_features=num_features // 2, + norm_layer=norm_layer, aa_layer=transition_aa_layer) + self.features.add_module(f'transition{i + 1}', trans) + num_features = num_features // 2 + + # Final batch norm + self.features.add_module('norm5', norm_layer(num_features)) + + self.feature_info += [dict(num_chs=num_features, reduction=current_stride, module='features.norm5')] + self.num_features = num_features + + # Linear layer + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + # Official init from torch repo. + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.constant_(m.bias, 0) + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + return self.features(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + # both classifier and block drop? + # if self.drop_rate > 0.: + # x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.classifier(x) + return x + + +def _filter_torchvision_pretrained(state_dict): + pattern = re.compile( + r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') + + for key in list(state_dict.keys()): + res = pattern.match(key) + if res: + new_key = res.group(1) + res.group(2) + state_dict[new_key] = state_dict[key] + del state_dict[key] + return state_dict + + +def _create_densenet(variant, growth_rate, block_config, pretrained, **kwargs): + kwargs['growth_rate'] = growth_rate + kwargs['block_config'] = block_config + return build_model_with_cfg( + DenseNet, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(flatten_sequential=True), pretrained_filter_fn=_filter_torchvision_pretrained, + **kwargs) + + +@register_model +def densenet121(pretrained=False, **kwargs): + r"""Densenet-121 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet121', growth_rate=32, block_config=(6, 12, 24, 16), pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenetblur121d(pretrained=False, **kwargs): + r"""Densenet-121 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenetblur121d', growth_rate=32, block_config=(6, 12, 24, 16), pretrained=pretrained, stem_type='deep', + aa_layer=BlurPool2d, **kwargs) + return model + + +@register_model +def densenet121d(pretrained=False, **kwargs): + r"""Densenet-121 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet121d', growth_rate=32, block_config=(6, 12, 24, 16), stem_type='deep', + pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenet169(pretrained=False, **kwargs): + r"""Densenet-169 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet169', growth_rate=32, block_config=(6, 12, 32, 32), pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenet201(pretrained=False, **kwargs): + r"""Densenet-201 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet201', growth_rate=32, block_config=(6, 12, 48, 32), pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenet161(pretrained=False, **kwargs): + r"""Densenet-161 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet161', growth_rate=48, block_config=(6, 12, 36, 24), pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenet264(pretrained=False, **kwargs): + r"""Densenet-264 model from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'densenet264', growth_rate=48, block_config=(6, 12, 64, 48), pretrained=pretrained, **kwargs) + return model + + +@register_model +def densenet264d_iabn(pretrained=False, **kwargs): + r"""Densenet-264 model with deep stem and Inplace-ABN + """ + def norm_act_fn(num_features, **kwargs): + return create_norm_act('iabn', num_features, **kwargs) + model = _create_densenet( + 'densenet264d_iabn', growth_rate=48, block_config=(6, 12, 64, 48), stem_type='deep', + norm_layer=norm_act_fn, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tv_densenet121(pretrained=False, **kwargs): + r"""Densenet-121 model with original Torchvision weights, from + `"Densely Connected Convolutional Networks" ` + """ + model = _create_densenet( + 'tv_densenet121', growth_rate=32, block_config=(6, 12, 24, 16), pretrained=pretrained, **kwargs) + return model diff --git a/timm/models/dla.py b/timm/models/dla.py new file mode 100644 index 0000000..f6e4dd2 --- /dev/null +++ b/timm/models/dla.py @@ -0,0 +1,443 @@ +""" Deep Layer Aggregation and DLA w/ Res2Net +DLA original adapted from Official Pytorch impl at: +DLA Paper: `Deep Layer Aggregation` - https://arxiv.org/abs/1707.06484 + +Res2Net additions from: https://github.com/gasvn/Res2Net/ +Res2Net Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169 +""" +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['DLA'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'base_layer.0', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + 'dla34': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla34-ba72cf86.pth'), + 'dla46_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla46_c-2bfd52c3.pth'), + 'dla46x_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla46x_c-d761bae7.pth'), + 'dla60x_c': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60x_c-b870c45c.pth'), + 'dla60': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60-24839fc4.pth'), + 'dla60x': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla60x-d15cacda.pth'), + 'dla102': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102-d94d9790.pth'), + 'dla102x': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102x-ad62be81.pth'), + 'dla102x2': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla102x2-262837b6.pth'), + 'dla169': _cfg(url='http://dl.yf.io/dla/models/imagenet/dla169-0914e092.pth'), + 'dla60_res2net': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net_dla60_4s-d88db7f9.pth'), + 'dla60_res2next': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next_dla60_4s-d327927b.pth'), +} + + +class DlaBasic(nn.Module): + """DLA Basic""" + + def __init__(self, inplanes, planes, stride=1, dilation=1, **_): + super(DlaBasic, self).__init__() + self.conv1 = nn.Conv2d( + inplanes, planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = nn.Conv2d( + planes, planes, kernel_size=3, stride=1, padding=dilation, bias=False, dilation=dilation) + self.bn2 = nn.BatchNorm2d(planes) + self.stride = stride + + def forward(self, x, shortcut=None): + if shortcut is None: + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + out += shortcut + out = self.relu(out) + + return out + + +class DlaBottleneck(nn.Module): + """DLA/DLA-X Bottleneck""" + expansion = 2 + + def __init__(self, inplanes, outplanes, stride=1, dilation=1, cardinality=1, base_width=64): + super(DlaBottleneck, self).__init__() + self.stride = stride + mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality) + mid_planes = mid_planes // self.expansion + + self.conv1 = nn.Conv2d(inplanes, mid_planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(mid_planes) + self.conv2 = nn.Conv2d( + mid_planes, mid_planes, kernel_size=3, stride=stride, padding=dilation, + bias=False, dilation=dilation, groups=cardinality) + self.bn2 = nn.BatchNorm2d(mid_planes) + self.conv3 = nn.Conv2d(mid_planes, outplanes, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(outplanes) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x, shortcut=None): + if shortcut is None: + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + out += shortcut + out = self.relu(out) + + return out + + +class DlaBottle2neck(nn.Module): + """ Res2Net/Res2NeXT DLA Bottleneck + Adapted from https://github.com/gasvn/Res2Net/blob/master/dla.py + """ + expansion = 2 + + def __init__(self, inplanes, outplanes, stride=1, dilation=1, scale=4, cardinality=8, base_width=4): + super(DlaBottle2neck, self).__init__() + self.is_first = stride > 1 + self.scale = scale + mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality) + mid_planes = mid_planes // self.expansion + self.width = mid_planes + + self.conv1 = nn.Conv2d(inplanes, mid_planes * scale, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(mid_planes * scale) + + num_scale_convs = max(1, scale - 1) + convs = [] + bns = [] + for _ in range(num_scale_convs): + convs.append(nn.Conv2d( + mid_planes, mid_planes, kernel_size=3, stride=stride, + padding=dilation, dilation=dilation, groups=cardinality, bias=False)) + bns.append(nn.BatchNorm2d(mid_planes)) + self.convs = nn.ModuleList(convs) + self.bns = nn.ModuleList(bns) + if self.is_first: + self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1) + + self.conv3 = nn.Conv2d(mid_planes * scale, outplanes, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(outplanes) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x, shortcut=None): + if shortcut is None: + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + spx = torch.split(out, self.width, 1) + spo = [] + for i, (conv, bn) in enumerate(zip(self.convs, self.bns)): + sp = spx[i] if i == 0 or self.is_first else sp + spx[i] + sp = conv(sp) + sp = bn(sp) + sp = self.relu(sp) + spo.append(sp) + if self.scale > 1: + spo.append(self.pool(spx[-1]) if self.is_first else spx[-1]) + out = torch.cat(spo, 1) + + out = self.conv3(out) + out = self.bn3(out) + + out += shortcut + out = self.relu(out) + + return out + + +class DlaRoot(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, shortcut): + super(DlaRoot, self).__init__() + self.conv = nn.Conv2d( + in_channels, out_channels, 1, stride=1, bias=False, padding=(kernel_size - 1) // 2) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU(inplace=True) + self.shortcut = shortcut + + def forward(self, *x): + children = x + x = self.conv(torch.cat(x, 1)) + x = self.bn(x) + if self.shortcut: + x += children[0] + x = self.relu(x) + + return x + + +class DlaTree(nn.Module): + def __init__(self, levels, block, in_channels, out_channels, stride=1, + dilation=1, cardinality=1, base_width=64, + level_root=False, root_dim=0, root_kernel_size=1, root_shortcut=False): + super(DlaTree, self).__init__() + if root_dim == 0: + root_dim = 2 * out_channels + if level_root: + root_dim += in_channels + self.downsample = nn.MaxPool2d(stride, stride=stride) if stride > 1 else nn.Identity() + self.project = nn.Identity() + cargs = dict(dilation=dilation, cardinality=cardinality, base_width=base_width) + if levels == 1: + self.tree1 = block(in_channels, out_channels, stride, **cargs) + self.tree2 = block(out_channels, out_channels, 1, **cargs) + if in_channels != out_channels: + # NOTE the official impl/weights have project layers in levels > 1 case that are never + # used, I've moved the project layer here to avoid wasted params but old checkpoints will + # need strict=False while loading. + self.project = nn.Sequential( + nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False), + nn.BatchNorm2d(out_channels)) + else: + cargs.update(dict(root_kernel_size=root_kernel_size, root_shortcut=root_shortcut)) + self.tree1 = DlaTree( + levels - 1, block, in_channels, out_channels, stride, root_dim=0, **cargs) + self.tree2 = DlaTree( + levels - 1, block, out_channels, out_channels, root_dim=root_dim + out_channels, **cargs) + if levels == 1: + self.root = DlaRoot(root_dim, out_channels, root_kernel_size, root_shortcut) + self.level_root = level_root + self.root_dim = root_dim + self.levels = levels + + def forward(self, x, shortcut=None, children=None): + children = [] if children is None else children + bottom = self.downsample(x) + shortcut = self.project(bottom) + if self.level_root: + children.append(bottom) + x1 = self.tree1(x, shortcut) + if self.levels == 1: + x2 = self.tree2(x1) + x = self.root(x2, x1, *children) + else: + children.append(x1) + x = self.tree2(x1, children=children) + return x + + +class DLA(nn.Module): + def __init__(self, levels, channels, output_stride=32, num_classes=1000, in_chans=3, + cardinality=1, base_width=64, block=DlaBottle2neck, shortcut_root=False, + drop_rate=0.0, global_pool='avg'): + super(DLA, self).__init__() + self.channels = channels + self.num_classes = num_classes + self.cardinality = cardinality + self.base_width = base_width + self.drop_rate = drop_rate + assert output_stride == 32 # FIXME support dilation + + self.base_layer = nn.Sequential( + nn.Conv2d(in_chans, channels[0], kernel_size=7, stride=1, padding=3, bias=False), + nn.BatchNorm2d(channels[0]), + nn.ReLU(inplace=True)) + self.level0 = self._make_conv_level(channels[0], channels[0], levels[0]) + self.level1 = self._make_conv_level(channels[0], channels[1], levels[1], stride=2) + cargs = dict(cardinality=cardinality, base_width=base_width, root_shortcut=shortcut_root) + self.level2 = DlaTree(levels[2], block, channels[1], channels[2], 2, level_root=False, **cargs) + self.level3 = DlaTree(levels[3], block, channels[2], channels[3], 2, level_root=True, **cargs) + self.level4 = DlaTree(levels[4], block, channels[3], channels[4], 2, level_root=True, **cargs) + self.level5 = DlaTree(levels[5], block, channels[4], channels[5], 2, level_root=True, **cargs) + self.feature_info = [ + dict(num_chs=channels[0], reduction=1, module='level0'), # rare to have a meaningful stride 1 level + dict(num_chs=channels[1], reduction=2, module='level1'), + dict(num_chs=channels[2], reduction=4, module='level2'), + dict(num_chs=channels[3], reduction=8, module='level3'), + dict(num_chs=channels[4], reduction=16, module='level4'), + dict(num_chs=channels[5], reduction=32, module='level5'), + ] + + self.num_features = channels[-1] + self.global_pool, self.fc = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1): + modules = [] + for i in range(convs): + modules.extend([ + nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride if i == 0 else 1, + padding=dilation, bias=False, dilation=dilation), + nn.BatchNorm2d(planes), + nn.ReLU(inplace=True)]) + inplanes = planes + return nn.Sequential(*modules) + + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() + + def forward_features(self, x): + x = self.base_layer(x) + x = self.level0(x) + x = self.level1(x) + x = self.level2(x) + x = self.level3(x) + x = self.level4(x) + x = self.level5(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.fc(x) + x = self.flatten(x) + return x + + +def _create_dla(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + DLA, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_strict=False, + feature_cfg=dict(out_indices=(1, 2, 3, 4, 5)), + **kwargs) + + +@register_model +def dla60_res2net(pretrained=False, **kwargs): + model_kwargs = dict( + levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024), + block=DlaBottle2neck, cardinality=1, base_width=28, **kwargs) + return _create_dla('dla60_res2net', pretrained, **model_kwargs) + + +@register_model +def dla60_res2next(pretrained=False,**kwargs): + model_kwargs = dict( + levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024), + block=DlaBottle2neck, cardinality=8, base_width=4, **kwargs) + return _create_dla('dla60_res2next', pretrained, **model_kwargs) + + +@register_model +def dla34(pretrained=False, **kwargs): # DLA-34 + model_kwargs = dict( + levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 128, 256, 512], + block=DlaBasic, **kwargs) + return _create_dla('dla34', pretrained, **model_kwargs) + + +@register_model +def dla46_c(pretrained=False, **kwargs): # DLA-46-C + model_kwargs = dict( + levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256], + block=DlaBottleneck, **kwargs) + return _create_dla('dla46_c', pretrained, **model_kwargs) + + +@register_model +def dla46x_c(pretrained=False, **kwargs): # DLA-X-46-C + model_kwargs = dict( + levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256], + block=DlaBottleneck, cardinality=32, base_width=4, **kwargs) + return _create_dla('dla46x_c', pretrained, **model_kwargs) + + +@register_model +def dla60x_c(pretrained=False, **kwargs): # DLA-X-60-C + model_kwargs = dict( + levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 64, 64, 128, 256], + block=DlaBottleneck, cardinality=32, base_width=4, **kwargs) + return _create_dla('dla60x_c', pretrained, **model_kwargs) + + +@register_model +def dla60(pretrained=False, **kwargs): # DLA-60 + model_kwargs = dict( + levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, **kwargs) + return _create_dla('dla60', pretrained, **model_kwargs) + + +@register_model +def dla60x(pretrained=False, **kwargs): # DLA-X-60 + model_kwargs = dict( + levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, cardinality=32, base_width=4, **kwargs) + return _create_dla('dla60x', pretrained, **model_kwargs) + + +@register_model +def dla102(pretrained=False, **kwargs): # DLA-102 + model_kwargs = dict( + levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, shortcut_root=True, **kwargs) + return _create_dla('dla102', pretrained, **model_kwargs) + + +@register_model +def dla102x(pretrained=False, **kwargs): # DLA-X-102 + model_kwargs = dict( + levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, cardinality=32, base_width=4, shortcut_root=True, **kwargs) + return _create_dla('dla102x', pretrained, **model_kwargs) + + +@register_model +def dla102x2(pretrained=False, **kwargs): # DLA-X-102 64 + model_kwargs = dict( + levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, cardinality=64, base_width=4, shortcut_root=True, **kwargs) + return _create_dla('dla102x2', pretrained, **model_kwargs) + + +@register_model +def dla169(pretrained=False, **kwargs): # DLA-169 + model_kwargs = dict( + levels=[1, 1, 2, 3, 5, 1], channels=[16, 32, 128, 256, 512, 1024], + block=DlaBottleneck, shortcut_root=True, **kwargs) + return _create_dla('dla169', pretrained, **model_kwargs) diff --git a/timm/models/dpn.py b/timm/models/dpn.py new file mode 100644 index 0000000..c4e380b --- /dev/null +++ b/timm/models/dpn.py @@ -0,0 +1,317 @@ +""" PyTorch implementation of DualPathNetworks +Based on original MXNet implementation https://github.com/cypw/DPNs with +many ideas from another PyTorch implementation https://github.com/oyam/pytorch-DPNs. + +This implementation is compatible with the pretrained weights from cypw's MXNet implementation. + +Hacked together by / Copyright 2020 Ross Wightman +""" +from collections import OrderedDict +from functools import partial +from typing import Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DPN_MEAN, IMAGENET_DPN_STD, IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import BatchNormAct2d, ConvBnAct, create_conv2d, create_classifier +from .registry import register_model + +__all__ = ['DPN'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DPN_MEAN, 'std': IMAGENET_DPN_STD, + 'first_conv': 'features.conv1_1.conv', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'dpn68': _cfg( + url='https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn68-66bebafa7.pth'), + 'dpn68b': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/dpn68b_ra-a31ca160.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'dpn92': _cfg( + url='https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn92_extra-b040e4a9b.pth'), + 'dpn98': _cfg( + url='https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn98-5b90dec4d.pth'), + 'dpn131': _cfg( + url='https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn131-71dfe43e0.pth'), + 'dpn107': _cfg( + url='https://github.com/rwightman/pytorch-dpn-pretrained/releases/download/v0.1/dpn107_extra-1ac7121e2.pth') +} + + +class CatBnAct(nn.Module): + def __init__(self, in_chs, norm_layer=BatchNormAct2d): + super(CatBnAct, self).__init__() + self.bn = norm_layer(in_chs, eps=0.001) + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (Tuple[torch.Tensor, torch.Tensor]) -> (torch.Tensor) + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (torch.Tensor) -> (torch.Tensor) + pass + + def forward(self, x): + if isinstance(x, tuple): + x = torch.cat(x, dim=1) + return self.bn(x) + + +class BnActConv2d(nn.Module): + def __init__(self, in_chs, out_chs, kernel_size, stride, groups=1, norm_layer=BatchNormAct2d): + super(BnActConv2d, self).__init__() + self.bn = norm_layer(in_chs, eps=0.001) + self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, groups=groups) + + def forward(self, x): + return self.conv(self.bn(x)) + + +class DualPathBlock(nn.Module): + def __init__( + self, in_chs, num_1x1_a, num_3x3_b, num_1x1_c, inc, groups, block_type='normal', b=False): + super(DualPathBlock, self).__init__() + self.num_1x1_c = num_1x1_c + self.inc = inc + self.b = b + if block_type == 'proj': + self.key_stride = 1 + self.has_proj = True + elif block_type == 'down': + self.key_stride = 2 + self.has_proj = True + else: + assert block_type == 'normal' + self.key_stride = 1 + self.has_proj = False + + self.c1x1_w_s1 = None + self.c1x1_w_s2 = None + if self.has_proj: + # Using different member names here to allow easier parameter key matching for conversion + if self.key_stride == 2: + self.c1x1_w_s2 = BnActConv2d( + in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=2) + else: + self.c1x1_w_s1 = BnActConv2d( + in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=1) + + self.c1x1_a = BnActConv2d(in_chs=in_chs, out_chs=num_1x1_a, kernel_size=1, stride=1) + self.c3x3_b = BnActConv2d( + in_chs=num_1x1_a, out_chs=num_3x3_b, kernel_size=3, stride=self.key_stride, groups=groups) + if b: + self.c1x1_c = CatBnAct(in_chs=num_3x3_b) + self.c1x1_c1 = create_conv2d(num_3x3_b, num_1x1_c, kernel_size=1) + self.c1x1_c2 = create_conv2d(num_3x3_b, inc, kernel_size=1) + else: + self.c1x1_c = BnActConv2d(in_chs=num_3x3_b, out_chs=num_1x1_c + inc, kernel_size=1, stride=1) + self.c1x1_c1 = None + self.c1x1_c2 = None + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor] + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor] + pass + + def forward(self, x) -> Tuple[torch.Tensor, torch.Tensor]: + if isinstance(x, tuple): + x_in = torch.cat(x, dim=1) + else: + x_in = x + if self.c1x1_w_s1 is None and self.c1x1_w_s2 is None: + # self.has_proj == False, torchscript requires condition on module == None + x_s1 = x[0] + x_s2 = x[1] + else: + # self.has_proj == True + if self.c1x1_w_s1 is not None: + # self.key_stride = 1 + x_s = self.c1x1_w_s1(x_in) + else: + # self.key_stride = 2 + x_s = self.c1x1_w_s2(x_in) + x_s1 = x_s[:, :self.num_1x1_c, :, :] + x_s2 = x_s[:, self.num_1x1_c:, :, :] + x_in = self.c1x1_a(x_in) + x_in = self.c3x3_b(x_in) + x_in = self.c1x1_c(x_in) + if self.c1x1_c1 is not None: + # self.b == True, using None check for torchscript compat + out1 = self.c1x1_c1(x_in) + out2 = self.c1x1_c2(x_in) + else: + out1 = x_in[:, :self.num_1x1_c, :, :] + out2 = x_in[:, self.num_1x1_c:, :, :] + resid = x_s1 + out1 + dense = torch.cat([x_s2, out2], dim=1) + return resid, dense + + +class DPN(nn.Module): + def __init__(self, small=False, num_init_features=64, k_r=96, groups=32, + b=False, k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128), output_stride=32, + num_classes=1000, in_chans=3, drop_rate=0., global_pool='avg', fc_act=nn.ELU): + super(DPN, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + self.b = b + assert output_stride == 32 # FIXME look into dilation support + norm_layer = partial(BatchNormAct2d, eps=.001) + fc_norm_layer = partial(BatchNormAct2d, eps=.001, act_layer=fc_act, inplace=False) + bw_factor = 1 if small else 4 + blocks = OrderedDict() + + # conv1 + blocks['conv1_1'] = ConvBnAct( + in_chans, num_init_features, kernel_size=3 if small else 7, stride=2, norm_layer=norm_layer) + blocks['conv1_pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.feature_info = [dict(num_chs=num_init_features, reduction=2, module='features.conv1_1')] + + # conv2 + bw = 64 * bw_factor + inc = inc_sec[0] + r = (k_r * bw) // (64 * bw_factor) + blocks['conv2_1'] = DualPathBlock(num_init_features, r, r, bw, inc, groups, 'proj', b) + in_chs = bw + 3 * inc + for i in range(2, k_sec[0] + 1): + blocks['conv2_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) + in_chs += inc + self.feature_info += [dict(num_chs=in_chs, reduction=4, module=f'features.conv2_{k_sec[0]}')] + + # conv3 + bw = 128 * bw_factor + inc = inc_sec[1] + r = (k_r * bw) // (64 * bw_factor) + blocks['conv3_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) + in_chs = bw + 3 * inc + for i in range(2, k_sec[1] + 1): + blocks['conv3_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) + in_chs += inc + self.feature_info += [dict(num_chs=in_chs, reduction=8, module=f'features.conv3_{k_sec[1]}')] + + # conv4 + bw = 256 * bw_factor + inc = inc_sec[2] + r = (k_r * bw) // (64 * bw_factor) + blocks['conv4_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) + in_chs = bw + 3 * inc + for i in range(2, k_sec[2] + 1): + blocks['conv4_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) + in_chs += inc + self.feature_info += [dict(num_chs=in_chs, reduction=16, module=f'features.conv4_{k_sec[2]}')] + + # conv5 + bw = 512 * bw_factor + inc = inc_sec[3] + r = (k_r * bw) // (64 * bw_factor) + blocks['conv5_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) + in_chs = bw + 3 * inc + for i in range(2, k_sec[3] + 1): + blocks['conv5_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) + in_chs += inc + self.feature_info += [dict(num_chs=in_chs, reduction=32, module=f'features.conv5_{k_sec[3]}')] + + blocks['conv5_bn_ac'] = CatBnAct(in_chs, norm_layer=fc_norm_layer) + + self.num_features = in_chs + self.features = nn.Sequential(blocks) + + # Using 1x1 conv for the FC layer to allow the extra pooling scheme + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() + + def forward_features(self, x): + return self.features(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.classifier(x) + x = self.flatten(x) + return x + + +def _create_dpn(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + DPN, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(feature_concat=True, flatten_sequential=True), + **kwargs) + + +@register_model +def dpn68(pretrained=False, **kwargs): + model_kwargs = dict( + small=True, num_init_features=10, k_r=128, groups=32, + k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64), **kwargs) + return _create_dpn('dpn68', pretrained=pretrained, **model_kwargs) + + +@register_model +def dpn68b(pretrained=False, **kwargs): + model_kwargs = dict( + small=True, num_init_features=10, k_r=128, groups=32, + b=True, k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64), **kwargs) + return _create_dpn('dpn68b', pretrained=pretrained, **model_kwargs) + + +@register_model +def dpn92(pretrained=False, **kwargs): + model_kwargs = dict( + num_init_features=64, k_r=96, groups=32, + k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128), **kwargs) + return _create_dpn('dpn92', pretrained=pretrained, **model_kwargs) + + +@register_model +def dpn98(pretrained=False, **kwargs): + model_kwargs = dict( + num_init_features=96, k_r=160, groups=40, + k_sec=(3, 6, 20, 3), inc_sec=(16, 32, 32, 128), **kwargs) + return _create_dpn('dpn98', pretrained=pretrained, **model_kwargs) + + +@register_model +def dpn131(pretrained=False, **kwargs): + model_kwargs = dict( + num_init_features=128, k_r=160, groups=40, + k_sec=(4, 8, 28, 3), inc_sec=(16, 32, 32, 128), **kwargs) + return _create_dpn('dpn131', pretrained=pretrained, **model_kwargs) + + +@register_model +def dpn107(pretrained=False, **kwargs): + model_kwargs = dict( + num_init_features=128, k_r=200, groups=50, + k_sec=(4, 8, 20, 3), inc_sec=(20, 64, 64, 128), **kwargs) + return _create_dpn('dpn107', pretrained=pretrained, **model_kwargs) diff --git a/timm/models/efficientnet.py b/timm/models/efficientnet.py new file mode 100644 index 0000000..b1c570b --- /dev/null +++ b/timm/models/efficientnet.py @@ -0,0 +1,2286 @@ +""" The EfficientNet Family in PyTorch + +An implementation of EfficienNet that covers variety of related models with efficient architectures: + +* EfficientNet-V2 + - `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + +* EfficientNet (B0-B8, L2 + Tensorflow pretrained AutoAug/RandAug/AdvProp/NoisyStudent weight ports) + - EfficientNet: Rethinking Model Scaling for CNNs - https://arxiv.org/abs/1905.11946 + - CondConv: Conditionally Parameterized Convolutions for Efficient Inference - https://arxiv.org/abs/1904.04971 + - Adversarial Examples Improve Image Recognition - https://arxiv.org/abs/1911.09665 + - Self-training with Noisy Student improves ImageNet classification - https://arxiv.org/abs/1911.04252 + +* MixNet (Small, Medium, and Large) + - MixConv: Mixed Depthwise Convolutional Kernels - https://arxiv.org/abs/1907.09595 + +* MNasNet B1, A1 (SE), Small + - MnasNet: Platform-Aware Neural Architecture Search for Mobile - https://arxiv.org/abs/1807.11626 + +* FBNet-C + - FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable NAS - https://arxiv.org/abs/1812.03443 + +* Single-Path NAS Pixel1 + - Single-Path NAS: Designing Hardware-Efficient ConvNets - https://arxiv.org/abs/1904.02877 + +* TinyNet + - Model Rubik's Cube: Twisting Resolution, Depth and Width for TinyNets - https://arxiv.org/abs/2010.14819 + - Definitions & weights borrowed from https://github.com/huawei-noah/CV-Backbones/tree/master/tinynet_pytorch + +* And likely more... + +The majority of the above models (EfficientNet*, MixNet, MnasNet) and original weights were made available +by Mingxing Tan, Quoc Le, and other members of their Google Brain team. Thanks for consistently releasing +the models and weights open source! + +Hacked together by / Copyright 2021 Ross Wightman +""" +from functools import partial +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .efficientnet_blocks import SqueezeExcite +from .efficientnet_builder import EfficientNetBuilder, decode_arch_def, efficientnet_init_weights,\ + round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT +from .features import FeatureInfo, FeatureHooks +from .helpers import build_model_with_cfg, default_cfg_for_features +from .layers import create_conv2d, create_classifier +from .registry import register_model + +__all__ = ['EfficientNet', 'EfficientNetFeatures'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'mnasnet_050': _cfg(url=''), + 'mnasnet_075': _cfg(url=''), + 'mnasnet_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_b1-74cb7081.pth'), + 'mnasnet_140': _cfg(url=''), + + 'semnasnet_050': _cfg(url=''), + 'semnasnet_075': _cfg(url=''), + 'semnasnet_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_a1-d9418771.pth'), + 'semnasnet_140': _cfg(url=''), + 'mnasnet_small': _cfg(url=''), + + 'mobilenetv2_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_100_ra-b33bc2c4.pth'), + 'mobilenetv2_110d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_110d_ra-77090ade.pth'), + 'mobilenetv2_120d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_120d_ra-5987e2ed.pth'), + 'mobilenetv2_140': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_140_ra-21a4e913.pth'), + + 'fbnetc_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetc_100-c345b898.pth', + interpolation='bilinear'), + 'spnasnet_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/spnasnet_100-048bc3f4.pth', + interpolation='bilinear'), + + # NOTE experimenting with alternate attention + 'efficientnet_b0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b0_ra-3dd342df.pth'), + 'efficientnet_b1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b1-533bc792.pth', + test_input_size=(3, 256, 256), crop_pct=1.0), + 'efficientnet_b2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2_ra-bcdf34b7.pth', + input_size=(3, 256, 256), pool_size=(8, 8), test_input_size=(3, 288, 288), crop_pct=1.0), + 'efficientnet_b3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth', + input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), crop_pct=1.0), + 'efficientnet_b4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b4_ra2_320-7eb33cd5.pth', + input_size=(3, 320, 320), pool_size=(10, 10), test_input_size=(3, 384, 384), crop_pct=1.0), + 'efficientnet_b5': _cfg( + url='', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'efficientnet_b6': _cfg( + url='', input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'efficientnet_b7': _cfg( + url='', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'efficientnet_b8': _cfg( + url='', input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), + 'efficientnet_l2': _cfg( + url='', input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.961), + + 'efficientnet_es': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_ra-f111e99c.pth'), + 'efficientnet_em': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_em_ra2-66250f76.pth', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'efficientnet_el': _cfg( + url='https://github.com/DeGirum/pruned-models/releases/download/efficientnet_v1.0/efficientnet_el.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + + 'efficientnet_es_pruned': _cfg( + url='https://github.com/DeGirum/pruned-models/releases/download/efficientnet_v1.0/efficientnet_es_pruned75.pth'), + 'efficientnet_el_pruned': _cfg( + url='https://github.com/DeGirum/pruned-models/releases/download/efficientnet_v1.0/efficientnet_el_pruned70.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + + 'efficientnet_cc_b0_4e': _cfg(url=''), + 'efficientnet_cc_b0_8e': _cfg(url=''), + 'efficientnet_cc_b1_8e': _cfg(url='', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + + 'efficientnet_lite0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_lite0_ra-37913777.pth'), + 'efficientnet_lite1': _cfg( + url='', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'efficientnet_lite2': _cfg( + url='', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'efficientnet_lite3': _cfg( + url='', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'efficientnet_lite4': _cfg( + url='', input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + + 'efficientnet_b1_pruned': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb1_pruned_9ebb3fe6.pth', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'efficientnet_b2_pruned': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb2_pruned_203f55bc.pth', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'efficientnet_b3_pruned': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb3_pruned_5abcc29f.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + + 'efficientnetv2_rw_t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_t_agc-3620981a.pth', + input_size=(3, 224, 224), test_input_size=(3, 288, 288), pool_size=(7, 7), crop_pct=1.0), + 'gc_efficientnetv2_rw_t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gc_efficientnetv2_rw_t_agc-927a0bde.pth', + input_size=(3, 224, 224), test_input_size=(3, 288, 288), pool_size=(7, 7), crop_pct=1.0), + 'efficientnetv2_rw_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_v2s_ra2_288-a6477665.pth', + input_size=(3, 288, 288), test_input_size=(3, 384, 384), pool_size=(9, 9), crop_pct=1.0), + 'efficientnetv2_rw_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_rw_m_agc-3d90cb1e.pth', + input_size=(3, 320, 320), test_input_size=(3, 416, 416), pool_size=(10, 10), crop_pct=1.0), + + 'efficientnetv2_s': _cfg( + url='', + input_size=(3, 288, 288), test_input_size=(3, 384, 384), pool_size=(9, 9), crop_pct=1.0), + 'efficientnetv2_m': _cfg( + url='', + input_size=(3, 320, 320), test_input_size=(3, 416, 416), pool_size=(10, 10), crop_pct=1.0), + 'efficientnetv2_l': _cfg( + url='', + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'efficientnetv2_xl': _cfg( + url='', + input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0), + + 'tf_efficientnet_b0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_aa-827b6e33.pth', + input_size=(3, 224, 224)), + 'tf_efficientnet_b1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_aa-ea7a6ee0.pth', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_b2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_aa-60c94f97.pth', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'tf_efficientnet_b3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_aa-84b4657e.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'tf_efficientnet_b4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_aa-818f208c.pth', + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + 'tf_efficientnet_b5': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ra-9a3e5369.pth', + input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'tf_efficientnet_b6': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_aa-80ba17e4.pth', + input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'tf_efficientnet_b7': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ra-6c08e654.pth', + input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'tf_efficientnet_b8': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ra-572d5dd9.pth', + input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), + + 'tf_efficientnet_b0_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ap-f262efe1.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 224, 224)), + 'tf_efficientnet_b1_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ap-44ef0a3d.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_b2_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ap-2f8e7636.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'tf_efficientnet_b3_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ap-aad25bdd.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'tf_efficientnet_b4_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ap-dedb23e6.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + 'tf_efficientnet_b5_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ap-9e82fae8.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'tf_efficientnet_b6_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ap-4ffb161f.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'tf_efficientnet_b7_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ap-ddb28fec.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'tf_efficientnet_b8_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ap-00e169fa.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), + + 'tf_efficientnet_b0_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ns-c0e6a31c.pth', + input_size=(3, 224, 224)), + 'tf_efficientnet_b1_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ns-99dd0c41.pth', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_b2_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ns-00306e48.pth', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'tf_efficientnet_b3_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ns-9d44bf68.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'tf_efficientnet_b4_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ns-d6313a46.pth', + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + 'tf_efficientnet_b5_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ns-6f26d0cf.pth', + input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'tf_efficientnet_b6_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ns-51548356.pth', + input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'tf_efficientnet_b7_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ns-1dbc32de.pth', + input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'tf_efficientnet_l2_ns_475': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns_475-bebbd00a.pth', + input_size=(3, 475, 475), pool_size=(15, 15), crop_pct=0.936), + 'tf_efficientnet_l2_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns-df73bb44.pth', + input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.96), + + 'tf_efficientnet_es': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_es-ca1afbfe.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 224, 224), ), + 'tf_efficientnet_em': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_em-e78cfe58.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_el': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_el-5143854e.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + + 'tf_efficientnet_cc_b0_4e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_4e-4362b6b2.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_efficientnet_cc_b0_8e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_8e-66184a25.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_efficientnet_cc_b1_8e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b1_8e-f7c79ae1.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + + 'tf_efficientnet_lite0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite0-0aa007d2.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res + ), + 'tf_efficientnet_lite1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite1-bde8b488.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882, + interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res + ), + 'tf_efficientnet_lite2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite2-dcccb7df.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890, + interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res + ), + 'tf_efficientnet_lite3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite3-b733e338.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, interpolation='bilinear'), + 'tf_efficientnet_lite4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite4-741542c3.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.920, interpolation='bilinear'), + + 'tf_efficientnetv2_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s-eb54923e.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), + 'tf_efficientnetv2_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m-cc09e0cd.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'tf_efficientnetv2_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l-d664b728.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + + 'tf_efficientnetv2_s_in21ft1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21ft1k-d7dafa41.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), + 'tf_efficientnetv2_m_in21ft1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21ft1k-bf41664a.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'tf_efficientnetv2_l_in21ft1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21ft1k-60127a9d.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'tf_efficientnetv2_xl_in21ft1k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21ft1k-06c35c48.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0), + + 'tf_efficientnetv2_s_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21k-6337ad01.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, + input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), + 'tf_efficientnetv2_m_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21k-361418a2.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'tf_efficientnetv2_l_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21k-91a19ec9.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, + input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), + 'tf_efficientnetv2_xl_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21k-fd7e8abf.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, + input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0), + + 'tf_efficientnetv2_b0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b0-c7cc451f.pth', + input_size=(3, 192, 192), test_input_size=(3, 224, 224), pool_size=(6, 6)), + 'tf_efficientnetv2_b1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b1-be6e41b0.pth', + input_size=(3, 192, 192), test_input_size=(3, 240, 240), pool_size=(6, 6), crop_pct=0.882), + 'tf_efficientnetv2_b2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b2-847de54e.pth', + input_size=(3, 208, 208), test_input_size=(3, 260, 260), pool_size=(7, 7), crop_pct=0.890), + 'tf_efficientnetv2_b3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b3-57773f13.pth', + input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.904), + + 'mixnet_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_s-a907afbc.pth'), + 'mixnet_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_m-4647fc68.pth'), + 'mixnet_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_l-5a9a2ed8.pth'), + 'mixnet_xl': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_xl_ra-aac3c00c.pth'), + 'mixnet_xxl': _cfg(), + + 'tf_mixnet_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_s-89d3354b.pth'), + 'tf_mixnet_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_m-0f4d8805.pth'), + 'tf_mixnet_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_l-6c92e0c8.pth'), + + "tinynet_a": _cfg( + input_size=(3, 192, 192), pool_size=(6, 6), # int(224 * 0.86) + url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_a.pth'), + "tinynet_b": _cfg( + input_size=(3, 188, 188), pool_size=(6, 6), # int(224 * 0.84) + url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_b.pth'), + "tinynet_c": _cfg( + input_size=(3, 184, 184), pool_size=(6, 6), # int(224 * 0.825) + url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_c.pth'), + "tinynet_d": _cfg( + input_size=(3, 152, 152), pool_size=(5, 5), # int(224 * 0.68) + url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_d.pth'), + "tinynet_e": _cfg( + input_size=(3, 106, 106), pool_size=(4, 4), # int(224 * 0.475) + url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_e.pth'), +} + + +class EfficientNet(nn.Module): + """ (Generic) EfficientNet + + A flexible and performant PyTorch implementation of efficient network architectures, including: + * EfficientNet-V2 Small, Medium, Large, XL & B0-B3 + * EfficientNet B0-B8, L2 + * EfficientNet-EdgeTPU + * EfficientNet-CondConv + * MixNet S, M, L, XL + * MnasNet A1, B1, and small + * FBNet C + * Single-Path NAS Pixel1 + + """ + + def __init__(self, block_args, num_classes=1000, num_features=1280, in_chans=3, stem_size=32, fix_stem=False, + output_stride=32, pad_type='', round_chs_fn=round_channels, act_layer=None, norm_layer=None, + se_layer=None, drop_rate=0., drop_path_rate=0., global_pool='avg'): + super(EfficientNet, self).__init__() + act_layer = act_layer or nn.ReLU + norm_layer = norm_layer or nn.BatchNorm2d + se_layer = se_layer or SqueezeExcite + self.num_classes = num_classes + self.num_features = num_features + self.drop_rate = drop_rate + + # Stem + if not fix_stem: + stem_size = round_chs_fn(stem_size) + self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size) + self.act1 = act_layer(inplace=True) + + # Middle stages (IR/ER/DS Blocks) + builder = EfficientNetBuilder( + output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, + act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, drop_path_rate=drop_path_rate) + self.blocks = nn.Sequential(*builder(stem_size, block_args)) + self.feature_info = builder.features + head_chs = builder.in_chs + + # Head + Pooling + self.conv_head = create_conv2d(head_chs, self.num_features, 1, padding=pad_type) + self.bn2 = norm_layer(self.num_features) + self.act2 = act_layer(inplace=True) + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + efficientnet_init_weights(self) + + def as_sequential(self): + layers = [self.conv_stem, self.bn1, self.act1] + layers.extend(self.blocks) + layers.extend([self.conv_head, self.bn2, self.act2, self.global_pool]) + layers.extend([nn.Dropout(self.drop_rate), self.classifier]) + return nn.Sequential(*layers) + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + x = self.blocks(x) + x = self.conv_head(x) + x = self.bn2(x) + x = self.act2(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return self.classifier(x) + + +class EfficientNetFeatures(nn.Module): + """ EfficientNet Feature Extractor + + A work-in-progress feature extraction module for EfficientNet, to use as a backbone for segmentation + and object detection models. + """ + + def __init__(self, block_args, out_indices=(0, 1, 2, 3, 4), feature_location='bottleneck', in_chans=3, + stem_size=32, fix_stem=False, output_stride=32, pad_type='', round_chs_fn=round_channels, + act_layer=None, norm_layer=None, se_layer=None, drop_rate=0., drop_path_rate=0.): + super(EfficientNetFeatures, self).__init__() + act_layer = act_layer or nn.ReLU + norm_layer = norm_layer or nn.BatchNorm2d + se_layer = se_layer or SqueezeExcite + self.drop_rate = drop_rate + + # Stem + if not fix_stem: + stem_size = round_chs_fn(stem_size) + self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size) + self.act1 = act_layer(inplace=True) + + # Middle stages (IR/ER/DS Blocks) + builder = EfficientNetBuilder( + output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, + act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, drop_path_rate=drop_path_rate, + feature_location=feature_location) + self.blocks = nn.Sequential(*builder(stem_size, block_args)) + self.feature_info = FeatureInfo(builder.features, out_indices) + self._stage_out_idx = {v['stage']: i for i, v in enumerate(self.feature_info) if i in out_indices} + + efficientnet_init_weights(self) + + # Register feature extraction hooks with FeatureHooks helper + self.feature_hooks = None + if feature_location != 'bottleneck': + hooks = self.feature_info.get_dicts(keys=('module', 'hook_type')) + self.feature_hooks = FeatureHooks(hooks, self.named_modules()) + + def forward(self, x) -> List[torch.Tensor]: + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + if self.feature_hooks is None: + features = [] + if 0 in self._stage_out_idx: + features.append(x) # add stem out + for i, b in enumerate(self.blocks): + x = b(x) + if i + 1 in self._stage_out_idx: + features.append(x) + return features + else: + self.blocks(x) + out = self.feature_hooks.get_output(x.device) + return list(out.values()) + + +def _create_effnet(variant, pretrained=False, **kwargs): + features_only = False + model_cls = EfficientNet + kwargs_filter = None + if kwargs.pop('features_only', False): + features_only = True + kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'global_pool') + model_cls = EfficientNetFeatures + model = build_model_with_cfg( + model_cls, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_strict=not features_only, + kwargs_filter=kwargs_filter, + **kwargs) + if features_only: + model.default_cfg = default_cfg_for_features(model.default_cfg) + return model + + +def _gen_mnasnet_a1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a mnasnet-a1 model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet + Paper: https://arxiv.org/pdf/1807.11626.pdf. + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_noskip'], + # stage 1, 112x112 in + ['ir_r2_k3_s2_e6_c24'], + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40_se0.25'], + # stage 3, 28x28 in + ['ir_r4_k3_s2_e6_c80'], + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112_se0.25'], + # stage 5, 14x14in + ['ir_r3_k5_s2_e6_c160_se0.25'], + # stage 6, 7x7 in + ['ir_r1_k3_s1_e6_c320'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mnasnet_b1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a mnasnet-b1 model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet + Paper: https://arxiv.org/pdf/1807.11626.pdf. + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_c16_noskip'], + # stage 1, 112x112 in + ['ir_r3_k3_s2_e3_c24'], + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40'], + # stage 3, 28x28 in + ['ir_r3_k5_s2_e6_c80'], + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c96'], + # stage 5, 14x14in + ['ir_r4_k5_s2_e6_c192'], + # stage 6, 7x7 in + ['ir_r1_k3_s1_e6_c320_noskip'] + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mnasnet_small(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a mnasnet-b1 model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet + Paper: https://arxiv.org/pdf/1807.11626.pdf. + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + ['ds_r1_k3_s1_c8'], + ['ir_r1_k3_s2_e3_c16'], + ['ir_r2_k3_s2_e6_c16'], + ['ir_r4_k5_s2_e6_c32_se0.25'], + ['ir_r3_k3_s1_e6_c32_se0.25'], + ['ir_r3_k5_s2_e6_c88_se0.25'], + ['ir_r1_k3_s1_e6_c144'] + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=8, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mobilenet_v2( + variant, channel_multiplier=1.0, depth_multiplier=1.0, fix_stem_head=False, pretrained=False, **kwargs): + """ Generate MobileNet-V2 network + Ref impl: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py + Paper: https://arxiv.org/abs/1801.04381 + """ + arch_def = [ + ['ds_r1_k3_s1_c16'], + ['ir_r2_k3_s2_e6_c24'], + ['ir_r3_k3_s2_e6_c32'], + ['ir_r4_k3_s2_e6_c64'], + ['ir_r3_k3_s1_e6_c96'], + ['ir_r3_k3_s2_e6_c160'], + ['ir_r1_k3_s1_e6_c320'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier=depth_multiplier, fix_first_last=fix_stem_head), + num_features=1280 if fix_stem_head else round_chs_fn(1280), + stem_size=32, + fix_stem=fix_stem_head, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'relu6'), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_fbnetc(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """ FBNet-C + + Paper: https://arxiv.org/abs/1812.03443 + Ref Impl: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_modeldef.py + + NOTE: the impl above does not relate to the 'C' variant here, that was derived from paper, + it was used to confirm some building block details + """ + arch_def = [ + ['ir_r1_k3_s1_e1_c16'], + ['ir_r1_k3_s2_e6_c24', 'ir_r2_k3_s1_e1_c24'], + ['ir_r1_k5_s2_e6_c32', 'ir_r1_k5_s1_e3_c32', 'ir_r1_k5_s1_e6_c32', 'ir_r1_k3_s1_e6_c32'], + ['ir_r1_k5_s2_e6_c64', 'ir_r1_k5_s1_e3_c64', 'ir_r2_k5_s1_e6_c64'], + ['ir_r3_k5_s1_e6_c112', 'ir_r1_k5_s1_e3_c112'], + ['ir_r4_k5_s2_e6_c184'], + ['ir_r1_k3_s1_e6_c352'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=16, + num_features=1984, # paper suggests this, but is not 100% clear + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_spnasnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates the Single-Path NAS model from search targeted for Pixel1 phone. + + Paper: https://arxiv.org/abs/1904.02877 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_c16_noskip'], + # stage 1, 112x112 in + ['ir_r3_k3_s2_e3_c24'], + # stage 2, 56x56 in + ['ir_r1_k5_s2_e6_c40', 'ir_r3_k3_s1_e3_c40'], + # stage 3, 28x28 in + ['ir_r1_k5_s2_e6_c80', 'ir_r3_k3_s1_e3_c80'], + # stage 4, 14x14in + ['ir_r1_k5_s1_e6_c96', 'ir_r3_k5_s1_e3_c96'], + # stage 5, 14x14in + ['ir_r4_k5_s2_e6_c192'], + # stage 6, 7x7 in + ['ir_r1_k3_s1_e6_c320_noskip'] + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnet(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """Creates an EfficientNet model. + + Ref impl: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py + Paper: https://arxiv.org/abs/1905.11946 + + EfficientNet params + name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) + 'efficientnet-b0': (1.0, 1.0, 224, 0.2), + 'efficientnet-b1': (1.0, 1.1, 240, 0.2), + 'efficientnet-b2': (1.1, 1.2, 260, 0.3), + 'efficientnet-b3': (1.2, 1.4, 300, 0.3), + 'efficientnet-b4': (1.4, 1.8, 380, 0.4), + 'efficientnet-b5': (1.6, 2.2, 456, 0.4), + 'efficientnet-b6': (1.8, 2.6, 528, 0.5), + 'efficientnet-b7': (2.0, 3.1, 600, 0.5), + 'efficientnet-b8': (2.2, 3.6, 672, 0.5), + 'efficientnet-l2': (4.3, 5.3, 800, 0.5), + + Args: + channel_multiplier: multiplier to number of channels per layer + depth_multiplier: multiplier to number of repeats per stage + + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16_se0.25'], + ['ir_r2_k3_s2_e6_c24_se0.25'], + ['ir_r2_k5_s2_e6_c40_se0.25'], + ['ir_r3_k3_s2_e6_c80_se0.25'], + ['ir_r3_k5_s1_e6_c112_se0.25'], + ['ir_r4_k5_s2_e6_c192_se0.25'], + ['ir_r1_k3_s1_e6_c320_se0.25'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=round_chs_fn(1280), + stem_size=32, + round_chs_fn=round_chs_fn, + act_layer=resolve_act_layer(kwargs, 'swish'), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnet_edge(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ Creates an EfficientNet-EdgeTPU model + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/edgetpu + """ + + arch_def = [ + # NOTE `fc` is present to override a mismatch between stem channels and in chs not + # present in other models + ['er_r1_k3_s1_e4_c24_fc24_noskip'], + ['er_r2_k3_s2_e8_c32'], + ['er_r4_k3_s2_e8_c48'], + ['ir_r5_k5_s2_e8_c96'], + ['ir_r4_k5_s1_e8_c144'], + ['ir_r2_k5_s2_e8_c192'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=round_chs_fn(1280), + stem_size=32, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'relu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnet_condconv( + variant, channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=1, pretrained=False, **kwargs): + """Creates an EfficientNet-CondConv model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/condconv + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16_se0.25'], + ['ir_r2_k3_s2_e6_c24_se0.25'], + ['ir_r2_k5_s2_e6_c40_se0.25'], + ['ir_r3_k3_s2_e6_c80_se0.25'], + ['ir_r3_k5_s1_e6_c112_se0.25_cc4'], + ['ir_r4_k5_s2_e6_c192_se0.25_cc4'], + ['ir_r1_k3_s1_e6_c320_se0.25_cc4'], + ] + # NOTE unlike official impl, this one uses `cc` option where x is the base number of experts for each stage and + # the expert_multiplier increases that on a per-model basis as with depth/channel multipliers + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, experts_multiplier=experts_multiplier), + num_features=round_chs_fn(1280), + stem_size=32, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'swish'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnet_lite(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """Creates an EfficientNet-Lite model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite + Paper: https://arxiv.org/abs/1905.11946 + + EfficientNet params + name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) + 'efficientnet-lite0': (1.0, 1.0, 224, 0.2), + 'efficientnet-lite1': (1.0, 1.1, 240, 0.2), + 'efficientnet-lite2': (1.1, 1.2, 260, 0.3), + 'efficientnet-lite3': (1.2, 1.4, 280, 0.3), + 'efficientnet-lite4': (1.4, 1.8, 300, 0.3), + + Args: + channel_multiplier: multiplier to number of channels per layer + depth_multiplier: multiplier to number of repeats per stage + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16'], + ['ir_r2_k3_s2_e6_c24'], + ['ir_r2_k5_s2_e6_c40'], + ['ir_r3_k3_s2_e6_c80'], + ['ir_r3_k5_s1_e6_c112'], + ['ir_r4_k5_s2_e6_c192'], + ['ir_r1_k3_s1_e6_c320'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, fix_first_last=True), + num_features=1280, + stem_size=32, + fix_stem=True, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + act_layer=resolve_act_layer(kwargs, 'relu6'), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_base( + variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ Creates an EfficientNet-V2 base model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + """ + arch_def = [ + ['cn_r1_k3_s1_e1_c16_skip'], + ['er_r2_k3_s2_e4_c32'], + ['er_r2_k3_s2_e4_c48'], + ['ir_r3_k3_s2_e4_c96_se0.25'], + ['ir_r5_k3_s1_e6_c112_se0.25'], + ['ir_r8_k3_s2_e6_c192_se0.25'], + ] + round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=round_chs_fn(1280), + stem_size=32, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_s( + variant, channel_multiplier=1.0, depth_multiplier=1.0, rw=False, pretrained=False, **kwargs): + """ Creates an EfficientNet-V2 Small model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + + NOTE: `rw` flag sets up 'small' variant to behave like my initial v2 small model, + before ref the impl was released. + """ + arch_def = [ + ['cn_r2_k3_s1_e1_c24_skip'], + ['er_r4_k3_s2_e4_c48'], + ['er_r4_k3_s2_e4_c64'], + ['ir_r6_k3_s2_e4_c128_se0.25'], + ['ir_r9_k3_s1_e6_c160_se0.25'], + ['ir_r15_k3_s2_e6_c256_se0.25'], + ] + num_features = 1280 + if rw: + # my original variant, based on paper figure differs from the official release + arch_def[0] = ['er_r2_k3_s1_e1_c24'] + arch_def[-1] = ['ir_r15_k3_s2_e6_c272_se0.25'] + num_features = 1792 + + round_chs_fn = partial(round_channels, multiplier=channel_multiplier) + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=round_chs_fn(num_features), + stem_size=24, + round_chs_fn=round_chs_fn, + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ Creates an EfficientNet-V2 Medium model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + """ + + arch_def = [ + ['cn_r3_k3_s1_e1_c24_skip'], + ['er_r5_k3_s2_e4_c48'], + ['er_r5_k3_s2_e4_c80'], + ['ir_r7_k3_s2_e4_c160_se0.25'], + ['ir_r14_k3_s1_e6_c176_se0.25'], + ['ir_r18_k3_s2_e6_c304_se0.25'], + ['ir_r5_k3_s1_e6_c512_se0.25'], + ] + + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=1280, + stem_size=24, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_l(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ Creates an EfficientNet-V2 Large model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + """ + + arch_def = [ + ['cn_r4_k3_s1_e1_c32_skip'], + ['er_r7_k3_s2_e4_c64'], + ['er_r7_k3_s2_e4_c96'], + ['ir_r10_k3_s2_e4_c192_se0.25'], + ['ir_r19_k3_s1_e6_c224_se0.25'], + ['ir_r25_k3_s2_e6_c384_se0.25'], + ['ir_r7_k3_s1_e6_c640_se0.25'], + ] + + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=1280, + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_efficientnetv2_xl(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ Creates an EfficientNet-V2 Xtra-Large model + + Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 + Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 + """ + + arch_def = [ + ['cn_r4_k3_s1_e1_c32_skip'], + ['er_r8_k3_s2_e4_c64'], + ['er_r8_k3_s2_e4_c96'], + ['ir_r16_k3_s2_e4_c192_se0.25'], + ['ir_r24_k3_s1_e6_c256_se0.25'], + ['ir_r32_k3_s2_e6_c512_se0.25'], + ['ir_r8_k3_s1_e6_c640_se0.25'], + ] + + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=1280, + stem_size=32, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'silu'), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mixnet_s(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MixNet Small model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet + Paper: https://arxiv.org/abs/1907.09595 + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16'], # relu + # stage 1, 112x112 in + ['ir_r1_k3_a1.1_p1.1_s2_e6_c24', 'ir_r1_k3_a1.1_p1.1_s1_e3_c24'], # relu + # stage 2, 56x56 in + ['ir_r1_k3.5.7_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish + # stage 3, 28x28 in + ['ir_r1_k3.5.7_p1.1_s2_e6_c80_se0.25_nsw', 'ir_r2_k3.5_p1.1_s1_e6_c80_se0.25_nsw'], # swish + # stage 4, 14x14in + ['ir_r1_k3.5.7_a1.1_p1.1_s1_e6_c120_se0.5_nsw', 'ir_r2_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish + # stage 5, 14x14in + ['ir_r1_k3.5.7.9.11_s2_e6_c200_se0.5_nsw', 'ir_r2_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish + # 7x7 + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + num_features=1536, + stem_size=16, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_mixnet_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MixNet Medium-Large model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet + Paper: https://arxiv.org/abs/1907.09595 + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c24'], # relu + # stage 1, 112x112 in + ['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], # relu + # stage 2, 56x56 in + ['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish + # stage 3, 28x28 in + ['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], # swish + # stage 4, 14x14in + ['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish + # stage 5, 14x14in + ['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish + # 7x7 + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'), + num_features=1536, + stem_size=24, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +def _gen_tinynet( + variant, model_width=1.0, depth_multiplier=1.0, pretrained=False, **kwargs +): + """Creates a TinyNet model. + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], + ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], + ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], + ['ir_r1_k3_s1_e6_c320_se0.25'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'), + num_features=max(1280, round_channels(1280, model_width, 8, None)), + stem_size=32, + fix_stem=True, + round_chs_fn=partial(round_channels, multiplier=model_width), + act_layer=resolve_act_layer(kwargs, 'swish'), + norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + **kwargs, + ) + model = _create_effnet(variant, pretrained, **model_kwargs) + return model + + +@register_model +def mnasnet_050(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 0.5. """ + model = _gen_mnasnet_b1('mnasnet_050', 0.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_075(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 0.75. """ + model = _gen_mnasnet_b1('mnasnet_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_100(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 1.0. """ + model = _gen_mnasnet_b1('mnasnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_b1(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 1.0. """ + return mnasnet_100(pretrained, **kwargs) + + +@register_model +def mnasnet_140(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 1.4 """ + model = _gen_mnasnet_b1('mnasnet_140', 1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def semnasnet_050(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 0.5 """ + model = _gen_mnasnet_a1('semnasnet_050', 0.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def semnasnet_075(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 0.75. """ + model = _gen_mnasnet_a1('semnasnet_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def semnasnet_100(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 1.0. """ + model = _gen_mnasnet_a1('semnasnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_a1(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 1.0. """ + return semnasnet_100(pretrained, **kwargs) + + +@register_model +def semnasnet_140(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 1.4. """ + model = _gen_mnasnet_a1('semnasnet_140', 1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mnasnet_small(pretrained=False, **kwargs): + """ MNASNet Small, depth multiplier of 1.0. """ + model = _gen_mnasnet_small('mnasnet_small', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_100(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.0 channel multiplier """ + model = _gen_mobilenet_v2('mobilenetv2_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_140(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.4 channel multiplier """ + model = _gen_mobilenet_v2('mobilenetv2_140', 1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_110d(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.1 channel, 1.2 depth multipliers""" + model = _gen_mobilenet_v2( + 'mobilenetv2_110d', 1.1, depth_multiplier=1.2, fix_stem_head=True, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv2_120d(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.2 channel, 1.4 depth multipliers """ + model = _gen_mobilenet_v2( + 'mobilenetv2_120d', 1.2, depth_multiplier=1.4, fix_stem_head=True, pretrained=pretrained, **kwargs) + return model + + +@register_model +def fbnetc_100(pretrained=False, **kwargs): + """ FBNet-C """ + if pretrained: + # pretrained model trained with non-default BN epsilon + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + model = _gen_fbnetc('fbnetc_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def spnasnet_100(pretrained=False, **kwargs): + """ Single-Path NAS Pixel1""" + model = _gen_spnasnet('spnasnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b0(pretrained=False, **kwargs): + """ EfficientNet-B0 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b1(pretrained=False, **kwargs): + """ EfficientNet-B1 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b2(pretrained=False, **kwargs): + """ EfficientNet-B2 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b2a(pretrained=False, **kwargs): + """ EfficientNet-B2 @ 288x288 w/ 1.0 test crop""" + # WARN this model def is deprecated, different train/test res + test crop handled by default_cfg now + return efficientnet_b2(pretrained=pretrained, **kwargs) + + +@register_model +def efficientnet_b3(pretrained=False, **kwargs): + """ EfficientNet-B3 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b3a(pretrained=False, **kwargs): + """ EfficientNet-B3 @ 320x320 w/ 1.0 test crop-pct """ + # WARN this model def is deprecated, different train/test res + test crop handled by default_cfg now + return efficientnet_b3(pretrained=pretrained, **kwargs) + + +@register_model +def efficientnet_b4(pretrained=False, **kwargs): + """ EfficientNet-B4 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b5(pretrained=False, **kwargs): + """ EfficientNet-B5 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b6(pretrained=False, **kwargs): + """ EfficientNet-B6 """ + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b7(pretrained=False, **kwargs): + """ EfficientNet-B7 """ + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b8(pretrained=False, **kwargs): + """ EfficientNet-B8 """ + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_l2(pretrained=False, **kwargs): + """ EfficientNet-L2.""" + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_es(pretrained=False, **kwargs): + """ EfficientNet-Edge Small. """ + model = _gen_efficientnet_edge( + 'efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + +@register_model +def efficientnet_es_pruned(pretrained=False, **kwargs): + """ EfficientNet-Edge Small Pruned. For more info: https://github.com/DeGirum/pruned-models/releases/tag/efficientnet_v1.0""" + model = _gen_efficientnet_edge( + 'efficientnet_es_pruned', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + +@register_model +def efficientnet_em(pretrained=False, **kwargs): + """ EfficientNet-Edge-Medium. """ + model = _gen_efficientnet_edge( + 'efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_el(pretrained=False, **kwargs): + """ EfficientNet-Edge-Large. """ + model = _gen_efficientnet_edge( + 'efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + +@register_model +def efficientnet_el_pruned(pretrained=False, **kwargs): + """ EfficientNet-Edge-Large pruned. For more info: https://github.com/DeGirum/pruned-models/releases/tag/efficientnet_v1.0""" + model = _gen_efficientnet_edge( + 'efficientnet_el_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + +@register_model +def efficientnet_cc_b0_4e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 8 Experts """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_condconv( + 'efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_cc_b0_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 8 Experts """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_condconv( + 'efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_cc_b1_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B1 w/ 8 Experts """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_condconv( + 'efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite0(pretrained=False, **kwargs): + """ EfficientNet-Lite0 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite1(pretrained=False, **kwargs): + """ EfficientNet-Lite1 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite2(pretrained=False, **kwargs): + """ EfficientNet-Lite2 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite3(pretrained=False, **kwargs): + """ EfficientNet-Lite3 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_lite4(pretrained=False, **kwargs): + """ EfficientNet-Lite4 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b1_pruned(pretrained=False, **kwargs): + """ EfficientNet-B1 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + variant = 'efficientnet_b1_pruned' + model = _gen_efficientnet( + variant, channel_multiplier=1.0, depth_multiplier=1.1, pruned=True, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b2_pruned(pretrained=False, **kwargs): + """ EfficientNet-B2 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'efficientnet_b2_pruned', channel_multiplier=1.1, depth_multiplier=1.2, pruned=True, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnet_b3_pruned(pretrained=False, **kwargs): + """ EfficientNet-B3 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'efficientnet_b3_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pruned=True, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_rw_t(pretrained=False, **kwargs): + """ EfficientNet-V2 Tiny (Custom variant, tiny not in paper). """ + model = _gen_efficientnetv2_s( + 'efficientnetv2_rw_t', channel_multiplier=0.8, depth_multiplier=0.9, rw=False, pretrained=pretrained, **kwargs) + return model + + +@register_model +def gc_efficientnetv2_rw_t(pretrained=False, **kwargs): + """ EfficientNet-V2 Tiny w/ Global Context Attn (Custom variant, tiny not in paper). """ + model = _gen_efficientnetv2_s( + 'gc_efficientnetv2_rw_t', channel_multiplier=0.8, depth_multiplier=0.9, + rw=False, se_layer='gc', pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_rw_s(pretrained=False, **kwargs): + """ EfficientNet-V2 Small (RW variant). + NOTE: This is my initial (pre official code release) w/ some differences. + See efficientnetv2_s and tf_efficientnetv2_s for versions that match the official w/ PyTorch vs TF padding + """ + model = _gen_efficientnetv2_s('efficientnetv2_rw_s', rw=True, pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_rw_m(pretrained=False, **kwargs): + """ EfficientNet-V2 Medium (RW variant). + """ + model = _gen_efficientnetv2_s( + 'efficientnetv2_rw_m', channel_multiplier=1.2, depth_multiplier=(1.2,) * 4 + (1.6,) * 2, rw=True, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_s(pretrained=False, **kwargs): + """ EfficientNet-V2 Small. """ + model = _gen_efficientnetv2_s('efficientnetv2_s', pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_m(pretrained=False, **kwargs): + """ EfficientNet-V2 Medium. """ + model = _gen_efficientnetv2_m('efficientnetv2_m', pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_l(pretrained=False, **kwargs): + """ EfficientNet-V2 Large. """ + model = _gen_efficientnetv2_l('efficientnetv2_l', pretrained=pretrained, **kwargs) + return model + + +@register_model +def efficientnetv2_xl(pretrained=False, **kwargs): + """ EfficientNet-V2 Xtra-Large. """ + model = _gen_efficientnetv2_xl('efficientnetv2_xl', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b0(pretrained=False, **kwargs): + """ EfficientNet-B0. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b1(pretrained=False, **kwargs): + """ EfficientNet-B1. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b2(pretrained=False, **kwargs): + """ EfficientNet-B2. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b3(pretrained=False, **kwargs): + """ EfficientNet-B3. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b4(pretrained=False, **kwargs): + """ EfficientNet-B4. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b5(pretrained=False, **kwargs): + """ EfficientNet-B5. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b6(pretrained=False, **kwargs): + """ EfficientNet-B6. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b7(pretrained=False, **kwargs): + """ EfficientNet-B7. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b8(pretrained=False, **kwargs): + """ EfficientNet-B8. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b0_ap(pretrained=False, **kwargs): + """ EfficientNet-B0 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b0_ap', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b1_ap(pretrained=False, **kwargs): + """ EfficientNet-B1 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b1_ap', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b2_ap(pretrained=False, **kwargs): + """ EfficientNet-B2 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b2_ap', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b3_ap(pretrained=False, **kwargs): + """ EfficientNet-B3 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b3_ap', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b4_ap(pretrained=False, **kwargs): + """ EfficientNet-B4 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b4_ap', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b5_ap(pretrained=False, **kwargs): + """ EfficientNet-B5 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b5_ap', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b6_ap(pretrained=False, **kwargs): + """ EfficientNet-B6 AdvProp. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b6_ap', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b7_ap(pretrained=False, **kwargs): + """ EfficientNet-B7 AdvProp. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b7_ap', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b8_ap(pretrained=False, **kwargs): + """ EfficientNet-B8 AdvProp. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b8_ap', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b0_ns(pretrained=False, **kwargs): + """ EfficientNet-B0 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b0_ns', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b1_ns(pretrained=False, **kwargs): + """ EfficientNet-B1 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b1_ns', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b2_ns(pretrained=False, **kwargs): + """ EfficientNet-B2 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b2_ns', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b3_ns(pretrained=False, **kwargs): + """ EfficientNet-B3 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b3_ns', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b4_ns(pretrained=False, **kwargs): + """ EfficientNet-B4 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b4_ns', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b5_ns(pretrained=False, **kwargs): + """ EfficientNet-B5 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b5_ns', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b6_ns(pretrained=False, **kwargs): + """ EfficientNet-B6 NoisyStudent. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b6_ns', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_b7_ns(pretrained=False, **kwargs): + """ EfficientNet-B7 NoisyStudent. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b7_ns', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_l2_ns_475(pretrained=False, **kwargs): + """ EfficientNet-L2 NoisyStudent @ 475x475. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_l2_ns_475', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_l2_ns(pretrained=False, **kwargs): + """ EfficientNet-L2 NoisyStudent. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_l2_ns', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_es(pretrained=False, **kwargs): + """ EfficientNet-Edge Small. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_edge( + 'tf_efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_em(pretrained=False, **kwargs): + """ EfficientNet-Edge-Medium. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_edge( + 'tf_efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_el(pretrained=False, **kwargs): + """ EfficientNet-Edge-Large. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_edge( + 'tf_efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_cc_b0_4e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 4 Experts. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_condconv( + 'tf_efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_cc_b0_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 8 Experts. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_condconv( + 'tf_efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_cc_b1_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B1 w/ 8 Experts. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_condconv( + 'tf_efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite0(pretrained=False, **kwargs): + """ EfficientNet-Lite0 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite1(pretrained=False, **kwargs): + """ EfficientNet-Lite1 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite2(pretrained=False, **kwargs): + """ EfficientNet-Lite2 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite3(pretrained=False, **kwargs): + """ EfficientNet-Lite3 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnet_lite4(pretrained=False, **kwargs): + """ EfficientNet-Lite4 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + + +@register_model +def tf_efficientnetv2_s(pretrained=False, **kwargs): + """ EfficientNet-V2 Small. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_s('tf_efficientnetv2_s', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_m(pretrained=False, **kwargs): + """ EfficientNet-V2 Medium. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_m('tf_efficientnetv2_m', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_l(pretrained=False, **kwargs): + """ EfficientNet-V2 Large. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_l('tf_efficientnetv2_l', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_s_in21ft1k(pretrained=False, **kwargs): + """ EfficientNet-V2 Small. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_s('tf_efficientnetv2_s_in21ft1k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_m_in21ft1k(pretrained=False, **kwargs): + """ EfficientNet-V2 Medium. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_m('tf_efficientnetv2_m_in21ft1k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_l_in21ft1k(pretrained=False, **kwargs): + """ EfficientNet-V2 Large. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_l('tf_efficientnetv2_l_in21ft1k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_xl_in21ft1k(pretrained=False, **kwargs): + """ EfficientNet-V2 Xtra-Large. Pretrained on ImageNet-21k, fine-tuned on 1k. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_xl('tf_efficientnetv2_xl_in21ft1k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_s_in21k(pretrained=False, **kwargs): + """ EfficientNet-V2 Small w/ ImageNet-21k pretrained weights. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_s('tf_efficientnetv2_s_in21k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_m_in21k(pretrained=False, **kwargs): + """ EfficientNet-V2 Medium w/ ImageNet-21k pretrained weights. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_m('tf_efficientnetv2_m_in21k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_l_in21k(pretrained=False, **kwargs): + """ EfficientNet-V2 Large w/ ImageNet-21k pretrained weights. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_l('tf_efficientnetv2_l_in21k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_xl_in21k(pretrained=False, **kwargs): + """ EfficientNet-V2 Xtra-Large w/ ImageNet-21k pretrained weights. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_xl('tf_efficientnetv2_xl_in21k', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_b0(pretrained=False, **kwargs): + """ EfficientNet-V2-B0. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_base('tf_efficientnetv2_b0', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_b1(pretrained=False, **kwargs): + """ EfficientNet-V2-B1. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_base( + 'tf_efficientnetv2_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_b2(pretrained=False, **kwargs): + """ EfficientNet-V2-B2. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_base( + 'tf_efficientnetv2_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_efficientnetv2_b3(pretrained=False, **kwargs): + """ EfficientNet-V2-B3. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnetv2_base( + 'tf_efficientnetv2_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_s(pretrained=False, **kwargs): + """Creates a MixNet Small model. + """ + model = _gen_mixnet_s( + 'mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_m(pretrained=False, **kwargs): + """Creates a MixNet Medium model. + """ + model = _gen_mixnet_m( + 'mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_l(pretrained=False, **kwargs): + """Creates a MixNet Large model. + """ + model = _gen_mixnet_m( + 'mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_xl(pretrained=False, **kwargs): + """Creates a MixNet Extra-Large model. + Not a paper spec, experimental def by RW w/ depth scaling. + """ + model = _gen_mixnet_m( + 'mixnet_xl', channel_multiplier=1.6, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mixnet_xxl(pretrained=False, **kwargs): + """Creates a MixNet Double Extra Large model. + Not a paper spec, experimental def by RW w/ depth scaling. + """ + model = _gen_mixnet_m( + 'mixnet_xxl', channel_multiplier=2.4, depth_multiplier=1.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mixnet_s(pretrained=False, **kwargs): + """Creates a MixNet Small model. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mixnet_s( + 'tf_mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mixnet_m(pretrained=False, **kwargs): + """Creates a MixNet Medium model. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mixnet_m( + 'tf_mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mixnet_l(pretrained=False, **kwargs): + """Creates a MixNet Large model. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mixnet_m( + 'tf_mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tinynet_a(pretrained=False, **kwargs): + model = _gen_tinynet('tinynet_a', 1.0, 1.2, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tinynet_b(pretrained=False, **kwargs): + model = _gen_tinynet('tinynet_b', 0.75, 1.1, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tinynet_c(pretrained=False, **kwargs): + model = _gen_tinynet('tinynet_c', 0.54, 0.85, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tinynet_d(pretrained=False, **kwargs): + model = _gen_tinynet('tinynet_d', 0.54, 0.695, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tinynet_e(pretrained=False, **kwargs): + model = _gen_tinynet('tinynet_e', 0.51, 0.6, pretrained=pretrained, **kwargs) + return model diff --git a/timm/models/efficientnet_blocks.py b/timm/models/efficientnet_blocks.py new file mode 100644 index 0000000..b1fec44 --- /dev/null +++ b/timm/models/efficientnet_blocks.py @@ -0,0 +1,323 @@ +""" EfficientNet, MobileNetV3, etc Blocks + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +import torch.nn as nn +from torch.nn import functional as F + +from .layers import create_conv2d, drop_path, make_divisible, create_act_layer +from .layers.activations import sigmoid + +__all__ = [ + 'SqueezeExcite', 'ConvBnAct', 'DepthwiseSeparableConv', 'InvertedResidual', 'CondConvResidual', 'EdgeResidual'] + + +class SqueezeExcite(nn.Module): + """ Squeeze-and-Excitation w/ specific features for EfficientNet/MobileNet family + + Args: + in_chs (int): input channels to layer + rd_ratio (float): ratio of squeeze reduction + act_layer (nn.Module): activation layer of containing block + gate_layer (Callable): attention gate function + force_act_layer (nn.Module): override block's activation fn if this is set/bound + rd_round_fn (Callable): specify a fn to calculate rounding of reduced chs + """ + + def __init__( + self, in_chs, rd_ratio=0.25, rd_channels=None, act_layer=nn.ReLU, + gate_layer=nn.Sigmoid, force_act_layer=None, rd_round_fn=None): + super(SqueezeExcite, self).__init__() + if rd_channels is None: + rd_round_fn = rd_round_fn or round + rd_channels = rd_round_fn(in_chs * rd_ratio) + act_layer = force_act_layer or act_layer + self.conv_reduce = nn.Conv2d(in_chs, rd_channels, 1, bias=True) + self.act1 = create_act_layer(act_layer, inplace=True) + self.conv_expand = nn.Conv2d(rd_channels, in_chs, 1, bias=True) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_se = x.mean((2, 3), keepdim=True) + x_se = self.conv_reduce(x_se) + x_se = self.act1(x_se) + x_se = self.conv_expand(x_se) + return x * self.gate(x_se) + + +class ConvBnAct(nn.Module): + """ Conv + Norm Layer + Activation w/ optional skip connection + """ + def __init__( + self, in_chs, out_chs, kernel_size, stride=1, dilation=1, pad_type='', + skip=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, drop_path_rate=0.): + super(ConvBnAct, self).__init__() + self.has_residual = skip and stride == 1 and in_chs == out_chs + self.drop_path_rate = drop_path_rate + self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, padding=pad_type) + self.bn1 = norm_layer(out_chs) + self.act1 = act_layer(inplace=True) + + def feature_info(self, location): + if location == 'expansion': # output of conv after act, same as block coutput + info = dict(module='act1', hook_type='forward', num_chs=self.conv.out_channels) + else: # location == 'bottleneck', block output + info = dict(module='', hook_type='', num_chs=self.conv.out_channels) + return info + + def forward(self, x): + shortcut = x + x = self.conv(x) + x = self.bn1(x) + x = self.act1(x) + if self.has_residual: + if self.drop_path_rate > 0.: + x = drop_path(x, self.drop_path_rate, self.training) + x += shortcut + return x + + +class DepthwiseSeparableConv(nn.Module): + """ DepthwiseSeparable block + Used for DS convs in MobileNet-V1 and in the place of IR blocks that have no expansion + (factor of 1.0). This is an alternative to having a IR with an optional first pw conv. + """ + def __init__( + self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, pad_type='', + noskip=False, pw_kernel_size=1, pw_act=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, + se_layer=None, drop_path_rate=0.): + super(DepthwiseSeparableConv, self).__init__() + self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip + self.has_pw_act = pw_act # activation after point-wise conv + self.drop_path_rate = drop_path_rate + + self.conv_dw = create_conv2d( + in_chs, in_chs, dw_kernel_size, stride=stride, dilation=dilation, padding=pad_type, depthwise=True) + self.bn1 = norm_layer(in_chs) + self.act1 = act_layer(inplace=True) + + # Squeeze-and-excitation + self.se = se_layer(in_chs, act_layer=act_layer) if se_layer else nn.Identity() + + self.conv_pw = create_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type) + self.bn2 = norm_layer(out_chs) + self.act2 = act_layer(inplace=True) if self.has_pw_act else nn.Identity() + + def feature_info(self, location): + if location == 'expansion': # after SE, input to PW + info = dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels) + else: # location == 'bottleneck', block output + info = dict(module='', hook_type='', num_chs=self.conv_pw.out_channels) + return info + + def forward(self, x): + shortcut = x + + x = self.conv_dw(x) + x = self.bn1(x) + x = self.act1(x) + + x = self.se(x) + + x = self.conv_pw(x) + x = self.bn2(x) + x = self.act2(x) + + if self.has_residual: + if self.drop_path_rate > 0.: + x = drop_path(x, self.drop_path_rate, self.training) + x += shortcut + return x + + +class InvertedResidual(nn.Module): + """ Inverted residual block w/ optional SE + + Originally used in MobileNet-V2 - https://arxiv.org/abs/1801.04381v4, this layer is often + referred to as 'MBConv' for (Mobile inverted bottleneck conv) and is also used in + * MNasNet - https://arxiv.org/abs/1807.11626 + * EfficientNet - https://arxiv.org/abs/1905.11946 + * MobileNet-V3 - https://arxiv.org/abs/1905.02244 + """ + + def __init__( + self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, pad_type='', + noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, se_layer=None, conv_kwargs=None, drop_path_rate=0.): + super(InvertedResidual, self).__init__() + conv_kwargs = conv_kwargs or {} + mid_chs = make_divisible(in_chs * exp_ratio) + self.has_residual = (in_chs == out_chs and stride == 1) and not noskip + self.drop_path_rate = drop_path_rate + + # Point-wise expansion + self.conv_pw = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs) + self.bn1 = norm_layer(mid_chs) + self.act1 = act_layer(inplace=True) + + # Depth-wise convolution + self.conv_dw = create_conv2d( + mid_chs, mid_chs, dw_kernel_size, stride=stride, dilation=dilation, + padding=pad_type, depthwise=True, **conv_kwargs) + self.bn2 = norm_layer(mid_chs) + self.act2 = act_layer(inplace=True) + + # Squeeze-and-excitation + self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity() + + # Point-wise linear projection + self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs) + self.bn3 = norm_layer(out_chs) + + def feature_info(self, location): + if location == 'expansion': # after SE, input to PWL + info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels) + else: # location == 'bottleneck', block output + info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels) + return info + + def forward(self, x): + shortcut = x + + # Point-wise expansion + x = self.conv_pw(x) + x = self.bn1(x) + x = self.act1(x) + + # Depth-wise convolution + x = self.conv_dw(x) + x = self.bn2(x) + x = self.act2(x) + + # Squeeze-and-excitation + x = self.se(x) + + # Point-wise linear projection + x = self.conv_pwl(x) + x = self.bn3(x) + + if self.has_residual: + if self.drop_path_rate > 0.: + x = drop_path(x, self.drop_path_rate, self.training) + x += shortcut + + return x + + +class CondConvResidual(InvertedResidual): + """ Inverted residual block w/ CondConv routing""" + + def __init__( + self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, pad_type='', + noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, se_layer=None, num_experts=0, drop_path_rate=0.): + + self.num_experts = num_experts + conv_kwargs = dict(num_experts=self.num_experts) + + super(CondConvResidual, self).__init__( + in_chs, out_chs, dw_kernel_size=dw_kernel_size, stride=stride, dilation=dilation, pad_type=pad_type, + act_layer=act_layer, noskip=noskip, exp_ratio=exp_ratio, exp_kernel_size=exp_kernel_size, + pw_kernel_size=pw_kernel_size, se_layer=se_layer, norm_layer=norm_layer, conv_kwargs=conv_kwargs, + drop_path_rate=drop_path_rate) + + self.routing_fn = nn.Linear(in_chs, self.num_experts) + + def forward(self, x): + shortcut = x + + # CondConv routing + pooled_inputs = F.adaptive_avg_pool2d(x, 1).flatten(1) + routing_weights = torch.sigmoid(self.routing_fn(pooled_inputs)) + + # Point-wise expansion + x = self.conv_pw(x, routing_weights) + x = self.bn1(x) + x = self.act1(x) + + # Depth-wise convolution + x = self.conv_dw(x, routing_weights) + x = self.bn2(x) + x = self.act2(x) + + # Squeeze-and-excitation + x = self.se(x) + + # Point-wise linear projection + x = self.conv_pwl(x, routing_weights) + x = self.bn3(x) + + if self.has_residual: + if self.drop_path_rate > 0.: + x = drop_path(x, self.drop_path_rate, self.training) + x += shortcut + return x + + +class EdgeResidual(nn.Module): + """ Residual block with expansion convolution followed by pointwise-linear w/ stride + + Originally introduced in `EfficientNet-EdgeTPU: Creating Accelerator-Optimized Neural Networks with AutoML` + - https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html + + This layer is also called FusedMBConv in the MobileDet, EfficientNet-X, and EfficientNet-V2 papers + * MobileDet - https://arxiv.org/abs/2004.14525 + * EfficientNet-X - https://arxiv.org/abs/2102.05610 + * EfficientNet-V2 - https://arxiv.org/abs/2104.00298 + """ + + def __init__( + self, in_chs, out_chs, exp_kernel_size=3, stride=1, dilation=1, pad_type='', + force_in_chs=0, noskip=False, exp_ratio=1.0, pw_kernel_size=1, act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, se_layer=None, drop_path_rate=0.): + super(EdgeResidual, self).__init__() + if force_in_chs > 0: + mid_chs = make_divisible(force_in_chs * exp_ratio) + else: + mid_chs = make_divisible(in_chs * exp_ratio) + self.has_residual = (in_chs == out_chs and stride == 1) and not noskip + self.drop_path_rate = drop_path_rate + + # Expansion convolution + self.conv_exp = create_conv2d( + in_chs, mid_chs, exp_kernel_size, stride=stride, dilation=dilation, padding=pad_type) + self.bn1 = norm_layer(mid_chs) + self.act1 = act_layer(inplace=True) + + # Squeeze-and-excitation + self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity() + + # Point-wise linear projection + self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type) + self.bn2 = norm_layer(out_chs) + + def feature_info(self, location): + if location == 'expansion': # after SE, before PWL + info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels) + else: # location == 'bottleneck', block output + info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels) + return info + + def forward(self, x): + shortcut = x + + # Expansion convolution + x = self.conv_exp(x) + x = self.bn1(x) + x = self.act1(x) + + # Squeeze-and-excitation + x = self.se(x) + + # Point-wise linear projection + x = self.conv_pwl(x) + x = self.bn2(x) + + if self.has_residual: + if self.drop_path_rate > 0.: + x = drop_path(x, self.drop_path_rate, self.training) + x += shortcut + + return x diff --git a/timm/models/efficientnet_builder.py b/timm/models/efficientnet_builder.py new file mode 100644 index 0000000..a23e827 --- /dev/null +++ b/timm/models/efficientnet_builder.py @@ -0,0 +1,463 @@ +""" EfficientNet, MobileNetV3, etc Builder + +Assembles EfficieNet and related network feature blocks from string definitions. +Handles stride, dilation calculations, and selects feature extraction points. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import logging +import math +import re +from copy import deepcopy +from functools import partial + +import torch.nn as nn + +from .efficientnet_blocks import * +from .layers import CondConv2d, get_condconv_initializer, get_act_layer, get_attn, make_divisible + +__all__ = ["EfficientNetBuilder", "decode_arch_def", "efficientnet_init_weights", + 'resolve_bn_args', 'resolve_act_layer', 'round_channels', 'BN_MOMENTUM_TF_DEFAULT', 'BN_EPS_TF_DEFAULT'] + +_logger = logging.getLogger(__name__) + + +_DEBUG_BUILDER = False + +# Defaults used for Google/Tensorflow training of mobile networks /w RMSprop as per +# papers and TF reference implementations. PT momentum equiv for TF decay is (1 - TF decay) +# NOTE: momentum varies btw .99 and .9997 depending on source +# .99 in official TF TPU impl +# .9997 (/w .999 in search space) for paper +BN_MOMENTUM_TF_DEFAULT = 1 - 0.99 +BN_EPS_TF_DEFAULT = 1e-3 +_BN_ARGS_TF = dict(momentum=BN_MOMENTUM_TF_DEFAULT, eps=BN_EPS_TF_DEFAULT) + + +def get_bn_args_tf(): + return _BN_ARGS_TF.copy() + + +def resolve_bn_args(kwargs): + bn_args = get_bn_args_tf() if kwargs.pop('bn_tf', False) else {} + bn_momentum = kwargs.pop('bn_momentum', None) + if bn_momentum is not None: + bn_args['momentum'] = bn_momentum + bn_eps = kwargs.pop('bn_eps', None) + if bn_eps is not None: + bn_args['eps'] = bn_eps + return bn_args + + +def resolve_act_layer(kwargs, default='relu'): + return get_act_layer(kwargs.pop('act_layer', default)) + + +def round_channels(channels, multiplier=1.0, divisor=8, channel_min=None, round_limit=0.9): + """Round number of filters based on depth multiplier.""" + if not multiplier: + return channels + return make_divisible(channels * multiplier, divisor, channel_min, round_limit=round_limit) + + +def _log_info_if(msg, condition): + if condition: + _logger.info(msg) + + +def _parse_ksize(ss): + if ss.isdigit(): + return int(ss) + else: + return [int(k) for k in ss.split('.')] + + +def _decode_block_str(block_str): + """ Decode block definition string + + Gets a list of block arg (dicts) through a string notation of arguments. + E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip + + All args can exist in any order with the exception of the leading string which + is assumed to indicate the block type. + + leading string - block type ( + ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct) + r - number of repeat blocks, + k - kernel size, + s - strides (1-9), + e - expansion ratio, + c - output channels, + se - squeeze/excitation ratio + n - activation fn ('re', 'r6', 'hs', or 'sw') + Args: + block_str: a string representation of block arguments. + Returns: + A list of block args (dicts) + Raises: + ValueError: if the string def not properly specified (TODO) + """ + assert isinstance(block_str, str) + ops = block_str.split('_') + block_type = ops[0] # take the block type off the front + ops = ops[1:] + options = {} + skip = None + for op in ops: + # string options being checked on individual basis, combine if they grow + if op == 'noskip': + skip = False # force no skip connection + elif op == 'skip': + skip = True # force a skip connection + elif op.startswith('n'): + # activation fn + key = op[0] + v = op[1:] + if v == 're': + value = get_act_layer('relu') + elif v == 'r6': + value = get_act_layer('relu6') + elif v == 'hs': + value = get_act_layer('hard_swish') + elif v == 'sw': + value = get_act_layer('swish') # aka SiLU + elif v == 'mi': + value = get_act_layer('mish') + else: + continue + options[key] = value + else: + # all numeric options + splits = re.split(r'(\d.*)', op) + if len(splits) >= 2: + key, value = splits[:2] + options[key] = value + + # if act_layer is None, the model default (passed to model init) will be used + act_layer = options['n'] if 'n' in options else None + exp_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1 + pw_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1 + force_in_chs = int(options['fc']) if 'fc' in options else 0 # FIXME hack to deal with in_chs issue in TPU def + + num_repeat = int(options['r']) + # each type of block has different valid arguments, fill accordingly + if block_type == 'ir': + block_args = dict( + block_type=block_type, + dw_kernel_size=_parse_ksize(options['k']), + exp_kernel_size=exp_kernel_size, + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + exp_ratio=float(options['e']), + se_ratio=float(options['se']) if 'se' in options else 0., + stride=int(options['s']), + act_layer=act_layer, + noskip=skip is False, + ) + if 'cc' in options: + block_args['num_experts'] = int(options['cc']) + elif block_type == 'ds' or block_type == 'dsa': + block_args = dict( + block_type=block_type, + dw_kernel_size=_parse_ksize(options['k']), + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + se_ratio=float(options['se']) if 'se' in options else 0., + stride=int(options['s']), + act_layer=act_layer, + pw_act=block_type == 'dsa', + noskip=block_type == 'dsa' or skip is False, + ) + elif block_type == 'er': + block_args = dict( + block_type=block_type, + exp_kernel_size=_parse_ksize(options['k']), + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + exp_ratio=float(options['e']), + force_in_chs=force_in_chs, + se_ratio=float(options['se']) if 'se' in options else 0., + stride=int(options['s']), + act_layer=act_layer, + noskip=skip is False, + ) + elif block_type == 'cn': + block_args = dict( + block_type=block_type, + kernel_size=int(options['k']), + out_chs=int(options['c']), + stride=int(options['s']), + act_layer=act_layer, + skip=skip is True, + ) + else: + assert False, 'Unknown block type (%s)' % block_type + + return block_args, num_repeat + + +def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'): + """ Per-stage depth scaling + Scales the block repeats in each stage. This depth scaling impl maintains + compatibility with the EfficientNet scaling method, while allowing sensible + scaling for other models that may have multiple block arg definitions in each stage. + """ + + # We scale the total repeat count for each stage, there may be multiple + # block arg defs per stage so we need to sum. + num_repeat = sum(repeats) + if depth_trunc == 'round': + # Truncating to int by rounding allows stages with few repeats to remain + # proportionally smaller for longer. This is a good choice when stage definitions + # include single repeat stages that we'd prefer to keep that way as long as possible + num_repeat_scaled = max(1, round(num_repeat * depth_multiplier)) + else: + # The default for EfficientNet truncates repeats to int via 'ceil'. + # Any multiplier > 1.0 will result in an increased depth for every stage. + num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier)) + + # Proportionally distribute repeat count scaling to each block definition in the stage. + # Allocation is done in reverse as it results in the first block being less likely to be scaled. + # The first block makes less sense to repeat in most of the arch definitions. + repeats_scaled = [] + for r in repeats[::-1]: + rs = max(1, round((r / num_repeat * num_repeat_scaled))) + repeats_scaled.append(rs) + num_repeat -= r + num_repeat_scaled -= rs + repeats_scaled = repeats_scaled[::-1] + + # Apply the calculated scaling to each block arg in the stage + sa_scaled = [] + for ba, rep in zip(stack_args, repeats_scaled): + sa_scaled.extend([deepcopy(ba) for _ in range(rep)]) + return sa_scaled + + +def decode_arch_def(arch_def, depth_multiplier=1.0, depth_trunc='ceil', experts_multiplier=1, fix_first_last=False): + arch_args = [] + if isinstance(depth_multiplier, tuple): + assert len(depth_multiplier) == len(arch_def) + else: + depth_multiplier = (depth_multiplier,) * len(arch_def) + for stack_idx, (block_strings, multiplier) in enumerate(zip(arch_def, depth_multiplier)): + assert isinstance(block_strings, list) + stack_args = [] + repeats = [] + for block_str in block_strings: + assert isinstance(block_str, str) + ba, rep = _decode_block_str(block_str) + if ba.get('num_experts', 0) > 0 and experts_multiplier > 1: + ba['num_experts'] *= experts_multiplier + stack_args.append(ba) + repeats.append(rep) + if fix_first_last and (stack_idx == 0 or stack_idx == len(arch_def) - 1): + arch_args.append(_scale_stage_depth(stack_args, repeats, 1.0, depth_trunc)) + else: + arch_args.append(_scale_stage_depth(stack_args, repeats, multiplier, depth_trunc)) + return arch_args + + +class EfficientNetBuilder: + """ Build Trunk Blocks + + This ended up being somewhat of a cross between + https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_models.py + and + https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_builder.py + + """ + def __init__(self, output_stride=32, pad_type='', round_chs_fn=round_channels, se_from_exp=False, + act_layer=None, norm_layer=None, se_layer=None, drop_path_rate=0., feature_location=''): + self.output_stride = output_stride + self.pad_type = pad_type + self.round_chs_fn = round_chs_fn + self.se_from_exp = se_from_exp # calculate se channel reduction from expanded (mid) chs + self.act_layer = act_layer + self.norm_layer = norm_layer + self.se_layer = get_attn(se_layer) + try: + self.se_layer(8, rd_ratio=1.0) # test if attn layer accepts rd_ratio arg + self.se_has_ratio = True + except TypeError: + self.se_has_ratio = False + self.drop_path_rate = drop_path_rate + if feature_location == 'depthwise': + # old 'depthwise' mode renamed 'expansion' to match TF impl, old expansion mode didn't make sense + _logger.warning("feature_location=='depthwise' is deprecated, using 'expansion'") + feature_location = 'expansion' + self.feature_location = feature_location + assert feature_location in ('bottleneck', 'expansion', '') + self.verbose = _DEBUG_BUILDER + + # state updated during build, consumed by model + self.in_chs = None + self.features = [] + + def _make_block(self, ba, block_idx, block_count): + drop_path_rate = self.drop_path_rate * block_idx / block_count + bt = ba.pop('block_type') + ba['in_chs'] = self.in_chs + ba['out_chs'] = self.round_chs_fn(ba['out_chs']) + if 'force_in_chs' in ba and ba['force_in_chs']: + # NOTE this is a hack to work around mismatch in TF EdgeEffNet impl + ba['force_in_chs'] = self.round_chs_fn(ba['force_in_chs']) + ba['pad_type'] = self.pad_type + # block act fn overrides the model default + ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer + assert ba['act_layer'] is not None + ba['norm_layer'] = self.norm_layer + ba['drop_path_rate'] = drop_path_rate + if bt != 'cn': + se_ratio = ba.pop('se_ratio') + if se_ratio and self.se_layer is not None: + if not self.se_from_exp: + # adjust se_ratio by expansion ratio if calculating se channels from block input + se_ratio /= ba.get('exp_ratio', 1.0) + if self.se_has_ratio: + ba['se_layer'] = partial(self.se_layer, rd_ratio=se_ratio) + else: + ba['se_layer'] = self.se_layer + + if bt == 'ir': + _log_info_if(' InvertedResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = CondConvResidual(**ba) if ba.get('num_experts', 0) else InvertedResidual(**ba) + elif bt == 'ds' or bt == 'dsa': + _log_info_if(' DepthwiseSeparable {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = DepthwiseSeparableConv(**ba) + elif bt == 'er': + _log_info_if(' EdgeResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = EdgeResidual(**ba) + elif bt == 'cn': + _log_info_if(' ConvBnAct {}, Args: {}'.format(block_idx, str(ba)), self.verbose) + block = ConvBnAct(**ba) + else: + assert False, 'Uknkown block type (%s) while building model.' % bt + + self.in_chs = ba['out_chs'] # update in_chs for arg of next block + return block + + def __call__(self, in_chs, model_block_args): + """ Build the blocks + Args: + in_chs: Number of input-channels passed to first block + model_block_args: A list of lists, outer list defines stages, inner + list contains strings defining block configuration(s) + Return: + List of block stacks (each stack wrapped in nn.Sequential) + """ + _log_info_if('Building model trunk with %d stages...' % len(model_block_args), self.verbose) + self.in_chs = in_chs + total_block_count = sum([len(x) for x in model_block_args]) + total_block_idx = 0 + current_stride = 2 + current_dilation = 1 + stages = [] + if model_block_args[0][0]['stride'] > 1: + # if the first block starts with a stride, we need to extract first level feat from stem + feature_info = dict( + module='act1', num_chs=in_chs, stage=0, reduction=current_stride, + hook_type='forward' if self.feature_location != 'bottleneck' else '') + self.features.append(feature_info) + + # outer list of block_args defines the stacks + for stack_idx, stack_args in enumerate(model_block_args): + last_stack = stack_idx + 1 == len(model_block_args) + _log_info_if('Stack: {}'.format(stack_idx), self.verbose) + assert isinstance(stack_args, list) + + blocks = [] + # each stack (stage of blocks) contains a list of block arguments + for block_idx, block_args in enumerate(stack_args): + last_block = block_idx + 1 == len(stack_args) + _log_info_if(' Block: {}'.format(block_idx), self.verbose) + + assert block_args['stride'] in (1, 2) + if block_idx >= 1: # only the first block in any stack can have a stride > 1 + block_args['stride'] = 1 + + extract_features = False + if last_block: + next_stack_idx = stack_idx + 1 + extract_features = next_stack_idx >= len(model_block_args) or \ + model_block_args[next_stack_idx][0]['stride'] > 1 + + next_dilation = current_dilation + if block_args['stride'] > 1: + next_output_stride = current_stride * block_args['stride'] + if next_output_stride > self.output_stride: + next_dilation = current_dilation * block_args['stride'] + block_args['stride'] = 1 + _log_info_if(' Converting stride to dilation to maintain output_stride=={}'.format( + self.output_stride), self.verbose) + else: + current_stride = next_output_stride + block_args['dilation'] = current_dilation + if next_dilation != current_dilation: + current_dilation = next_dilation + + # create the block + block = self._make_block(block_args, total_block_idx, total_block_count) + blocks.append(block) + + # stash feature module name and channel info for model feature extraction + if extract_features: + feature_info = dict( + stage=stack_idx + 1, reduction=current_stride, **block.feature_info(self.feature_location)) + module_name = f'blocks.{stack_idx}.{block_idx}' + leaf_name = feature_info.get('module', '') + feature_info['module'] = '.'.join([module_name, leaf_name]) if leaf_name else module_name + self.features.append(feature_info) + + total_block_idx += 1 # incr global block idx (across all stacks) + stages.append(nn.Sequential(*blocks)) + return stages + + +def _init_weight_goog(m, n='', fix_group_fanout=True): + """ Weight initialization as per Tensorflow official implementations. + + Args: + m (nn.Module): module to init + n (str): module name + fix_group_fanout (bool): enable correct (matching Tensorflow TPU impl) fanout calculation w/ group convs + + Handles layers in EfficientNet, EfficientNet-CondConv, MixNet, MnasNet, MobileNetV3, etc: + * https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_model.py + * https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py + """ + if isinstance(m, CondConv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + init_weight_fn = get_condconv_initializer( + lambda w: nn.init.normal_(w, 0, math.sqrt(2.0 / fan_out)), m.num_experts, m.weight_shape) + init_weight_fn(m.weight) + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + nn.init.normal_(m.weight, 0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + fan_out = m.weight.size(0) # fan-out + fan_in = 0 + if 'routing_fn' in n: + fan_in = m.weight.size(1) + init_range = 1.0 / math.sqrt(fan_in + fan_out) + nn.init.uniform_(m.weight, -init_range, init_range) + nn.init.zeros_(m.bias) + + +def efficientnet_init_weights(model: nn.Module, init_fn=None): + init_fn = init_fn or _init_weight_goog + for n, m in model.named_modules(): + init_fn(m, n) + diff --git a/timm/models/factory.py b/timm/models/factory.py new file mode 100644 index 0000000..d040a9f --- /dev/null +++ b/timm/models/factory.py @@ -0,0 +1,86 @@ +from .registry import is_model, is_model_in_modules, model_entrypoint +from .helpers import load_checkpoint +from .layers import set_layer_config +from .hub import load_model_config_from_hf + + +def split_model_name(model_name): + model_split = model_name.split(':', 1) + if len(model_split) == 1: + return '', model_split[0] + else: + source_name, model_name = model_split + assert source_name in ('timm', 'hf_hub') + return source_name, model_name + + +def safe_model_name(model_name, remove_source=True): + def make_safe(name): + return ''.join(c if c.isalnum() else '_' for c in name).rstrip('_') + if remove_source: + model_name = split_model_name(model_name)[-1] + return make_safe(model_name) + + +def create_model( + model_name, + pretrained=False, + checkpoint_path='', + scriptable=None, + exportable=None, + no_jit=None, + **kwargs): + """Create a model + + Args: + model_name (str): name of model to instantiate + pretrained (bool): load pretrained ImageNet-1k weights if true + checkpoint_path (str): path of checkpoint to load after model is initialized + scriptable (bool): set layer config so that model is jit scriptable (not working for all models yet) + exportable (bool): set layer config so that model is traceable / ONNX exportable (not fully impl/obeyed yet) + no_jit (bool): set layer config so that model doesn't utilize jit scripted layers (so far activations only) + + Keyword Args: + drop_rate (float): dropout rate for training (default: 0.0) + global_pool (str): global pool type (default: 'avg') + **: other kwargs are model specific + """ + source_name, model_name = split_model_name(model_name) + + # Only EfficientNet and MobileNetV3 models have support for batchnorm params or drop_connect_rate passed as args + is_efficientnet = is_model_in_modules(model_name, ['efficientnet', 'mobilenetv3']) + if not is_efficientnet: + kwargs.pop('bn_tf', None) + kwargs.pop('bn_momentum', None) + kwargs.pop('bn_eps', None) + + # handle backwards compat with drop_connect -> drop_path change + drop_connect_rate = kwargs.pop('drop_connect_rate', None) + if drop_connect_rate is not None and kwargs.get('drop_path_rate', None) is None: + print("WARNING: 'drop_connect' as an argument is deprecated, please use 'drop_path'." + " Setting drop_path to %f." % drop_connect_rate) + kwargs['drop_path_rate'] = drop_connect_rate + + # Parameters that aren't supported by all models or are intended to only override model defaults if set + # should default to None in command line args/cfg. Remove them if they are present and not set so that + # non-supporting models don't break and default args remain in effect. + kwargs = {k: v for k, v in kwargs.items() if v is not None} + + if source_name == 'hf_hub': + # For model names specified in the form `hf_hub:path/architecture_name#revision`, + # load model weights + default_cfg from Hugging Face hub. + hf_default_cfg, model_name = load_model_config_from_hf(model_name) + kwargs['external_default_cfg'] = hf_default_cfg # FIXME revamp default_cfg interface someday + + if is_model(model_name): + create_fn = model_entrypoint(model_name) + else: + raise RuntimeError('Unknown model (%s)' % model_name) + + with set_layer_config(scriptable=scriptable, exportable=exportable, no_jit=no_jit): + model = create_fn(pretrained=pretrained, **kwargs) + + if checkpoint_path: + load_checkpoint(model, checkpoint_path) + + return model diff --git a/timm/models/features.py b/timm/models/features.py new file mode 100644 index 0000000..b1d6890 --- /dev/null +++ b/timm/models/features.py @@ -0,0 +1,284 @@ +""" PyTorch Feature Extraction Helpers + +A collection of classes, functions, modules to help extract features from models +and provide a common interface for describing them. + +The return_layers, module re-writing idea inspired by torchvision IntermediateLayerGetter +https://github.com/pytorch/vision/blob/d88d8961ae51507d0cb680329d985b1488b1b76b/torchvision/models/_utils.py + +Hacked together by / Copyright 2020 Ross Wightman +""" +from collections import OrderedDict, defaultdict +from copy import deepcopy +from functools import partial +from typing import Dict, List, Tuple + +import torch +import torch.nn as nn + + +class FeatureInfo: + + def __init__(self, feature_info: List[Dict], out_indices: Tuple[int]): + prev_reduction = 1 + for fi in feature_info: + # sanity check the mandatory fields, there may be additional fields depending on the model + assert 'num_chs' in fi and fi['num_chs'] > 0 + assert 'reduction' in fi and fi['reduction'] >= prev_reduction + prev_reduction = fi['reduction'] + assert 'module' in fi + self.out_indices = out_indices + self.info = feature_info + + def from_other(self, out_indices: Tuple[int]): + return FeatureInfo(deepcopy(self.info), out_indices) + + def get(self, key, idx=None): + """ Get value by key at specified index (indices) + if idx == None, returns value for key at each output index + if idx is an integer, return value for that feature module index (ignoring output indices) + if idx is a list/tupple, return value for each module index (ignoring output indices) + """ + if idx is None: + return [self.info[i][key] for i in self.out_indices] + if isinstance(idx, (tuple, list)): + return [self.info[i][key] for i in idx] + else: + return self.info[idx][key] + + def get_dicts(self, keys=None, idx=None): + """ return info dicts for specified keys (or all if None) at specified indices (or out_indices if None) + """ + if idx is None: + if keys is None: + return [self.info[i] for i in self.out_indices] + else: + return [{k: self.info[i][k] for k in keys} for i in self.out_indices] + if isinstance(idx, (tuple, list)): + return [self.info[i] if keys is None else {k: self.info[i][k] for k in keys} for i in idx] + else: + return self.info[idx] if keys is None else {k: self.info[idx][k] for k in keys} + + def channels(self, idx=None): + """ feature channels accessor + """ + return self.get('num_chs', idx) + + def reduction(self, idx=None): + """ feature reduction (output stride) accessor + """ + return self.get('reduction', idx) + + def module_name(self, idx=None): + """ feature module name accessor + """ + return self.get('module', idx) + + def __getitem__(self, item): + return self.info[item] + + def __len__(self): + return len(self.info) + + +class FeatureHooks: + """ Feature Hook Helper + + This module helps with the setup and extraction of hooks for extracting features from + internal nodes in a model by node name. This works quite well in eager Python but needs + redesign for torcscript. + """ + + def __init__(self, hooks, named_modules, out_map=None, default_hook_type='forward'): + # setup feature hooks + modules = {k: v for k, v in named_modules} + for i, h in enumerate(hooks): + hook_name = h['module'] + m = modules[hook_name] + hook_id = out_map[i] if out_map else hook_name + hook_fn = partial(self._collect_output_hook, hook_id) + hook_type = h['hook_type'] if 'hook_type' in h else default_hook_type + if hook_type == 'forward_pre': + m.register_forward_pre_hook(hook_fn) + elif hook_type == 'forward': + m.register_forward_hook(hook_fn) + else: + assert False, "Unsupported hook type" + self._feature_outputs = defaultdict(OrderedDict) + + def _collect_output_hook(self, hook_id, *args): + x = args[-1] # tensor we want is last argument, output for fwd, input for fwd_pre + if isinstance(x, tuple): + x = x[0] # unwrap input tuple + self._feature_outputs[x.device][hook_id] = x + + def get_output(self, device) -> Dict[str, torch.tensor]: + output = self._feature_outputs[device] + self._feature_outputs[device] = OrderedDict() # clear after reading + return output + + +def _module_list(module, flatten_sequential=False): + # a yield/iter would be better for this but wouldn't be compatible with torchscript + ml = [] + for name, module in module.named_children(): + if flatten_sequential and isinstance(module, nn.Sequential): + # first level of Sequential containers is flattened into containing model + for child_name, child_module in module.named_children(): + combined = [name, child_name] + ml.append(('_'.join(combined), '.'.join(combined), child_module)) + else: + ml.append((name, name, module)) + return ml + + +def _get_feature_info(net, out_indices): + feature_info = getattr(net, 'feature_info') + if isinstance(feature_info, FeatureInfo): + return feature_info.from_other(out_indices) + elif isinstance(feature_info, (list, tuple)): + return FeatureInfo(net.feature_info, out_indices) + else: + assert False, "Provided feature_info is not valid" + + +def _get_return_layers(feature_info, out_map): + module_names = feature_info.module_name() + return_layers = {} + for i, name in enumerate(module_names): + return_layers[name] = out_map[i] if out_map is not None else feature_info.out_indices[i] + return return_layers + + +class FeatureDictNet(nn.ModuleDict): + """ Feature extractor with OrderedDict return + + Wrap a model and extract features as specified by the out indices, the network is + partially re-built from contained modules. + + There is a strong assumption that the modules have been registered into the model in the same + order as they are used. There should be no reuse of the same nn.Module more than once, including + trivial modules like `self.relu = nn.ReLU`. + + Only submodules that are directly assigned to the model class (`model.feature1`) or at most + one Sequential container deep (`model.features.1`, with flatten_sequent=True) can be captured. + All Sequential containers that are directly assigned to the original model will have their + modules assigned to this module with the name `model.features.1` being changed to `model.features_1` + + Arguments: + model (nn.Module): model from which we will extract the features + out_indices (tuple[int]): model output indices to extract features for + out_map (sequence): list or tuple specifying desired return id for each out index, + otherwise str(index) is used + feature_concat (bool): whether to concatenate intermediate features that are lists or tuples + vs select element [0] + flatten_sequential (bool): whether to flatten sequential modules assigned to model + """ + def __init__( + self, model, + out_indices=(0, 1, 2, 3, 4), out_map=None, feature_concat=False, flatten_sequential=False): + super(FeatureDictNet, self).__init__() + self.feature_info = _get_feature_info(model, out_indices) + self.concat = feature_concat + self.return_layers = {} + return_layers = _get_return_layers(self.feature_info, out_map) + modules = _module_list(model, flatten_sequential=flatten_sequential) + remaining = set(return_layers.keys()) + layers = OrderedDict() + for new_name, old_name, module in modules: + layers[new_name] = module + if old_name in remaining: + # return id has to be consistently str type for torchscript + self.return_layers[new_name] = str(return_layers[old_name]) + remaining.remove(old_name) + if not remaining: + break + assert not remaining and len(self.return_layers) == len(return_layers), \ + f'Return layers ({remaining}) are not present in model' + self.update(layers) + + def _collect(self, x) -> (Dict[str, torch.Tensor]): + out = OrderedDict() + for name, module in self.items(): + x = module(x) + if name in self.return_layers: + out_id = self.return_layers[name] + if isinstance(x, (tuple, list)): + # If model tap is a tuple or list, concat or select first element + # FIXME this may need to be more generic / flexible for some nets + out[out_id] = torch.cat(x, 1) if self.concat else x[0] + else: + out[out_id] = x + return out + + def forward(self, x) -> Dict[str, torch.Tensor]: + return self._collect(x) + + +class FeatureListNet(FeatureDictNet): + """ Feature extractor with list return + + See docstring for FeatureDictNet above, this class exists only to appease Torchscript typing constraints. + In eager Python we could have returned List[Tensor] vs Dict[id, Tensor] based on a member bool. + """ + def __init__( + self, model, + out_indices=(0, 1, 2, 3, 4), out_map=None, feature_concat=False, flatten_sequential=False): + super(FeatureListNet, self).__init__( + model, out_indices=out_indices, out_map=out_map, feature_concat=feature_concat, + flatten_sequential=flatten_sequential) + + def forward(self, x) -> (List[torch.Tensor]): + return list(self._collect(x).values()) + + +class FeatureHookNet(nn.ModuleDict): + """ FeatureHookNet + + Wrap a model and extract features specified by the out indices using forward/forward-pre hooks. + + If `no_rewrite` is True, features are extracted via hooks without modifying the underlying + network in any way. + + If `no_rewrite` is False, the model will be re-written as in the + FeatureList/FeatureDict case by folding first to second (Sequential only) level modules into this one. + + FIXME this does not currently work with Torchscript, see FeatureHooks class + """ + def __init__( + self, model, + out_indices=(0, 1, 2, 3, 4), out_map=None, out_as_dict=False, no_rewrite=False, + feature_concat=False, flatten_sequential=False, default_hook_type='forward'): + super(FeatureHookNet, self).__init__() + assert not torch.jit.is_scripting() + self.feature_info = _get_feature_info(model, out_indices) + self.out_as_dict = out_as_dict + layers = OrderedDict() + hooks = [] + if no_rewrite: + assert not flatten_sequential + if hasattr(model, 'reset_classifier'): # make sure classifier is removed? + model.reset_classifier(0) + layers['body'] = model + hooks.extend(self.feature_info.get_dicts()) + else: + modules = _module_list(model, flatten_sequential=flatten_sequential) + remaining = {f['module']: f['hook_type'] if 'hook_type' in f else default_hook_type + for f in self.feature_info.get_dicts()} + for new_name, old_name, module in modules: + layers[new_name] = module + for fn, fm in module.named_modules(prefix=old_name): + if fn in remaining: + hooks.append(dict(module=fn, hook_type=remaining[fn])) + del remaining[fn] + if not remaining: + break + assert not remaining, f'Return layers ({remaining}) are not present in model' + self.update(layers) + self.hooks = FeatureHooks(hooks, model.named_modules(), out_map=out_map) + + def forward(self, x): + for name, module in self.items(): + x = module(x) + out = self.hooks.get_output(x.device) + return out if self.out_as_dict else list(out.values()) diff --git a/timm/models/fx_features.py b/timm/models/fx_features.py new file mode 100644 index 0000000..5a25ee3 --- /dev/null +++ b/timm/models/fx_features.py @@ -0,0 +1,73 @@ +""" PyTorch FX Based Feature Extraction Helpers +Using https://pytorch.org/vision/stable/feature_extraction.html +""" +from typing import Callable +from torch import nn + +from .features import _get_feature_info + +try: + from torchvision.models.feature_extraction import create_feature_extractor + has_fx_feature_extraction = True +except ImportError: + has_fx_feature_extraction = False + +# Layers we went to treat as leaf modules +from .layers import Conv2dSame, ScaledStdConv2dSame, BatchNormAct2d, BlurPool2d, CondConv2d, StdConv2dSame, DropPath +from .layers.non_local_attn import BilinearAttnTransform +from .layers.pool2d_same import MaxPool2dSame, AvgPool2dSame + +# NOTE: By default, any modules from timm.models.layers that we want to treat as leaf modules go here +# BUT modules from timm.models should use the registration mechanism below +_leaf_modules = { + BatchNormAct2d, # reason: flow control for jit scripting + BilinearAttnTransform, # reason: flow control t <= 1 + BlurPool2d, # reason: TypeError: F.conv2d received Proxy in groups=x.shape[1] + # Reason: get_same_padding has a max which raises a control flow error + Conv2dSame, MaxPool2dSame, ScaledStdConv2dSame, StdConv2dSame, AvgPool2dSame, + CondConv2d, # reason: TypeError: F.conv2d received Proxy in groups=self.groups * B (because B = x.shape[0]) + DropPath, # reason: TypeError: rand recieved Proxy in `size` argument +} + +try: + from .layers import InplaceAbn + _leaf_modules.add(InplaceAbn) +except ImportError: + pass + + +def register_notrace_module(module: nn.Module): + """ + Any module not under timm.models.layers should get this decorator if we don't want to trace through it. + """ + _leaf_modules.add(module) + return module + + +# Functions we want to autowrap (treat them as leaves) +_autowrap_functions = set() + + +def register_notrace_function(func: Callable): + """ + Decorator for functions which ought not to be traced through + """ + _autowrap_functions.add(func) + return func + + +class FeatureGraphNet(nn.Module): + def __init__(self, model, out_indices, out_map=None): + super().__init__() + assert has_fx_feature_extraction, 'Please update to PyTorch 1.10+, torchvision 0.11+ for FX feature extraction' + self.feature_info = _get_feature_info(model, out_indices) + if out_map is not None: + assert len(out_map) == len(out_indices) + return_nodes = {info['module']: out_map[i] if out_map is not None else info['module'] + for i, info in enumerate(self.feature_info) if i in out_indices} + self.graph_module = create_feature_extractor( + model, return_nodes, + tracer_kwargs={'leaf_modules': list(_leaf_modules), 'autowrap_functions': list(_autowrap_functions)}) + + def forward(self, x): + return list(self.graph_module(x).values()) diff --git a/timm/models/ghostnet.py b/timm/models/ghostnet.py new file mode 100644 index 0000000..3b6f90a --- /dev/null +++ b/timm/models/ghostnet.py @@ -0,0 +1,276 @@ +""" +An implementation of GhostNet Model as defined in: +GhostNet: More Features from Cheap Operations. https://arxiv.org/abs/1911.11907 +The train script of the model is similar to that of MobileNetV3 +Original model: https://github.com/huawei-noah/CV-backbones/tree/master/ghostnet_pytorch +""" +import math +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .layers import SelectAdaptivePool2d, Linear, make_divisible +from .efficientnet_blocks import SqueezeExcite, ConvBnAct +from .helpers import build_model_with_cfg +from .registry import register_model + + +__all__ = ['GhostNet'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (1, 1), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'ghostnet_050': _cfg(url=''), + 'ghostnet_100': _cfg( + url='https://github.com/huawei-noah/CV-backbones/releases/download/ghostnet_pth/ghostnet_1x.pth'), + 'ghostnet_130': _cfg(url=''), +} + + +_SE_LAYER = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=partial(make_divisible, divisor=4)) + + +class GhostModule(nn.Module): + def __init__(self, inp, oup, kernel_size=1, ratio=2, dw_size=3, stride=1, relu=True): + super(GhostModule, self).__init__() + self.oup = oup + init_channels = math.ceil(oup / ratio) + new_channels = init_channels * (ratio - 1) + + self.primary_conv = nn.Sequential( + nn.Conv2d(inp, init_channels, kernel_size, stride, kernel_size//2, bias=False), + nn.BatchNorm2d(init_channels), + nn.ReLU(inplace=True) if relu else nn.Sequential(), + ) + + self.cheap_operation = nn.Sequential( + nn.Conv2d(init_channels, new_channels, dw_size, 1, dw_size//2, groups=init_channels, bias=False), + nn.BatchNorm2d(new_channels), + nn.ReLU(inplace=True) if relu else nn.Sequential(), + ) + + def forward(self, x): + x1 = self.primary_conv(x) + x2 = self.cheap_operation(x1) + out = torch.cat([x1, x2], dim=1) + return out[:, :self.oup, :, :] + + +class GhostBottleneck(nn.Module): + """ Ghost bottleneck w/ optional SE""" + + def __init__(self, in_chs, mid_chs, out_chs, dw_kernel_size=3, + stride=1, act_layer=nn.ReLU, se_ratio=0.): + super(GhostBottleneck, self).__init__() + has_se = se_ratio is not None and se_ratio > 0. + self.stride = stride + + # Point-wise expansion + self.ghost1 = GhostModule(in_chs, mid_chs, relu=True) + + # Depth-wise convolution + if self.stride > 1: + self.conv_dw = nn.Conv2d( + mid_chs, mid_chs, dw_kernel_size, stride=stride, + padding=(dw_kernel_size-1)//2, groups=mid_chs, bias=False) + self.bn_dw = nn.BatchNorm2d(mid_chs) + else: + self.conv_dw = None + self.bn_dw = None + + # Squeeze-and-excitation + self.se = _SE_LAYER(mid_chs, rd_ratio=se_ratio) if has_se else None + + # Point-wise linear projection + self.ghost2 = GhostModule(mid_chs, out_chs, relu=False) + + # shortcut + if in_chs == out_chs and self.stride == 1: + self.shortcut = nn.Sequential() + else: + self.shortcut = nn.Sequential( + nn.Conv2d( + in_chs, in_chs, dw_kernel_size, stride=stride, + padding=(dw_kernel_size-1)//2, groups=in_chs, bias=False), + nn.BatchNorm2d(in_chs), + nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False), + nn.BatchNorm2d(out_chs), + ) + + def forward(self, x): + shortcut = x + + # 1st ghost bottleneck + x = self.ghost1(x) + + # Depth-wise convolution + if self.conv_dw is not None: + x = self.conv_dw(x) + x = self.bn_dw(x) + + # Squeeze-and-excitation + if self.se is not None: + x = self.se(x) + + # 2nd ghost bottleneck + x = self.ghost2(x) + + x += self.shortcut(shortcut) + return x + + +class GhostNet(nn.Module): + def __init__(self, cfgs, num_classes=1000, width=1.0, dropout=0.2, in_chans=3, output_stride=32, global_pool='avg'): + super(GhostNet, self).__init__() + # setting of inverted residual blocks + assert output_stride == 32, 'only output_stride==32 is valid, dilation not supported' + self.cfgs = cfgs + self.num_classes = num_classes + self.dropout = dropout + self.feature_info = [] + + # building first layer + stem_chs = make_divisible(16 * width, 4) + self.conv_stem = nn.Conv2d(in_chans, stem_chs, 3, 2, 1, bias=False) + self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=f'conv_stem')) + self.bn1 = nn.BatchNorm2d(stem_chs) + self.act1 = nn.ReLU(inplace=True) + prev_chs = stem_chs + + # building inverted residual blocks + stages = nn.ModuleList([]) + block = GhostBottleneck + stage_idx = 0 + net_stride = 2 + for cfg in self.cfgs: + layers = [] + s = 1 + for k, exp_size, c, se_ratio, s in cfg: + out_chs = make_divisible(c * width, 4) + mid_chs = make_divisible(exp_size * width, 4) + layers.append(block(prev_chs, mid_chs, out_chs, k, s, se_ratio=se_ratio)) + prev_chs = out_chs + if s > 1: + net_stride *= 2 + self.feature_info.append(dict( + num_chs=prev_chs, reduction=net_stride, module=f'blocks.{stage_idx}')) + stages.append(nn.Sequential(*layers)) + stage_idx += 1 + + out_chs = make_divisible(exp_size * width, 4) + stages.append(nn.Sequential(ConvBnAct(prev_chs, out_chs, 1))) + self.pool_dim = prev_chs = out_chs + + self.blocks = nn.Sequential(*stages) + + # building last several layers + self.num_features = out_chs = 1280 + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.conv_head = nn.Conv2d(prev_chs, out_chs, 1, 1, 0, bias=True) + self.act2 = nn.ReLU(inplace=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled + self.classifier = Linear(out_chs, num_classes) if num_classes > 0 else nn.Identity() + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + # cannot meaningfully change pooling of efficient head after creation + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled + self.classifier = Linear(self.pool_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + x = self.blocks(x) + x = self.global_pool(x) + x = self.conv_head(x) + x = self.act2(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.flatten(x) + if self.dropout > 0.: + x = F.dropout(x, p=self.dropout, training=self.training) + x = self.classifier(x) + return x + + +def _create_ghostnet(variant, width=1.0, pretrained=False, **kwargs): + """ + Constructs a GhostNet model + """ + cfgs = [ + # k, t, c, SE, s + # stage1 + [[3, 16, 16, 0, 1]], + # stage2 + [[3, 48, 24, 0, 2]], + [[3, 72, 24, 0, 1]], + # stage3 + [[5, 72, 40, 0.25, 2]], + [[5, 120, 40, 0.25, 1]], + # stage4 + [[3, 240, 80, 0, 2]], + [[3, 200, 80, 0, 1], + [3, 184, 80, 0, 1], + [3, 184, 80, 0, 1], + [3, 480, 112, 0.25, 1], + [3, 672, 112, 0.25, 1] + ], + # stage5 + [[5, 672, 160, 0.25, 2]], + [[5, 960, 160, 0, 1], + [5, 960, 160, 0.25, 1], + [5, 960, 160, 0, 1], + [5, 960, 160, 0.25, 1] + ] + ] + model_kwargs = dict( + cfgs=cfgs, + width=width, + **kwargs, + ) + return build_model_with_cfg( + GhostNet, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(flatten_sequential=True), + **model_kwargs) + + +@register_model +def ghostnet_050(pretrained=False, **kwargs): + """ GhostNet-0.5x """ + model = _create_ghostnet('ghostnet_050', width=0.5, pretrained=pretrained, **kwargs) + return model + + +@register_model +def ghostnet_100(pretrained=False, **kwargs): + """ GhostNet-1.0x """ + model = _create_ghostnet('ghostnet_100', width=1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def ghostnet_130(pretrained=False, **kwargs): + """ GhostNet-1.3x """ + model = _create_ghostnet('ghostnet_130', width=1.3, pretrained=pretrained, **kwargs) + return model diff --git a/timm/models/gluon_resnet.py b/timm/models/gluon_resnet.py new file mode 100644 index 0000000..027a10b --- /dev/null +++ b/timm/models/gluon_resnet.py @@ -0,0 +1,248 @@ +"""Pytorch impl of MxNet Gluon ResNet/(SE)ResNeXt variants +This file evolved from https://github.com/pytorch/vision 'resnet.py' with (SE)-ResNeXt additions +and ports of Gluon variations (https://github.com/dmlc/gluon-cv/blob/master/gluoncv/model_zoo/resnet.py) +by Ross Wightman +""" + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import SEModule +from .registry import register_model +from .resnet import ResNet, Bottleneck, BasicBlock + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + 'gluon_resnet18_v1b': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet18_v1b-0757602b.pth'), + 'gluon_resnet34_v1b': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet34_v1b-c6d82d59.pth'), + 'gluon_resnet50_v1b': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1b-0ebe02e2.pth'), + 'gluon_resnet101_v1b': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1b-3b017079.pth'), + 'gluon_resnet152_v1b': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1b-c1edb0dd.pth'), + 'gluon_resnet50_v1c': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1c-48092f55.pth', + first_conv='conv1.0'), + 'gluon_resnet101_v1c': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1c-1f26822a.pth', + first_conv='conv1.0'), + 'gluon_resnet152_v1c': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1c-a3bb0b98.pth', + first_conv='conv1.0'), + 'gluon_resnet50_v1d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1d-818a1b1b.pth', + first_conv='conv1.0'), + 'gluon_resnet101_v1d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1d-0f9c8644.pth', + first_conv='conv1.0'), + 'gluon_resnet152_v1d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1d-bd354e12.pth', + first_conv='conv1.0'), + 'gluon_resnet50_v1s': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1s-1762acc0.pth', + first_conv='conv1.0'), + 'gluon_resnet101_v1s': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1s-60fe0cc1.pth', + first_conv='conv1.0'), + 'gluon_resnet152_v1s': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1s-dcc41b81.pth', + first_conv='conv1.0'), + 'gluon_resnext50_32x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext50_32x4d-e6a097c1.pth'), + 'gluon_resnext101_32x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_32x4d-b253c8c4.pth'), + 'gluon_resnext101_64x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_64x4d-f9a8e184.pth'), + 'gluon_seresnext50_32x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext50_32x4d-90cf2d6e.pth'), + 'gluon_seresnext101_32x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_32x4d-cf52900d.pth'), + 'gluon_seresnext101_64x4d': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_64x4d-f9926f93.pth'), + 'gluon_senet154': _cfg(url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_senet154-70a1a3c0.pth', + first_conv='conv1.0'), +} + + +def _create_resnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ResNet, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def gluon_resnet18_v1b(pretrained=False, **kwargs): + """Constructs a ResNet-18 model. + """ + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs) + return _create_resnet('gluon_resnet18_v1b', pretrained, **model_args) + + +@register_model +def gluon_resnet34_v1b(pretrained=False, **kwargs): + """Constructs a ResNet-34 model. + """ + model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('gluon_resnet34_v1b', pretrained, **model_args) + + +@register_model +def gluon_resnet50_v1b(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('gluon_resnet50_v1b', pretrained, **model_args) + + +@register_model +def gluon_resnet101_v1b(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs) + return _create_resnet('gluon_resnet101_v1b', pretrained, **model_args) + + +@register_model +def gluon_resnet152_v1b(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs) + return _create_resnet('gluon_resnet152_v1b', pretrained, **model_args) + + +@register_model +def gluon_resnet50_v1c(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet50_v1c', pretrained, **model_args) + + +@register_model +def gluon_resnet101_v1c(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet101_v1c', pretrained, **model_args) + + +@register_model +def gluon_resnet152_v1c(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet152_v1c', pretrained, **model_args) + + +@register_model +def gluon_resnet50_v1d(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('gluon_resnet50_v1d', pretrained, **model_args) + + +@register_model +def gluon_resnet101_v1d(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('gluon_resnet101_v1d', pretrained, **model_args) + + +@register_model +def gluon_resnet152_v1d(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('gluon_resnet152_v1d', pretrained, **model_args) + + +@register_model +def gluon_resnet50_v1s(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=64, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet50_v1s', pretrained, **model_args) + + + +@register_model +def gluon_resnet101_v1s(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], stem_width=64, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet101_v1s', pretrained, **model_args) + + +@register_model +def gluon_resnet152_v1s(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], stem_width=64, stem_type='deep', **kwargs) + return _create_resnet('gluon_resnet152_v1s', pretrained, **model_args) + + + +@register_model +def gluon_resnext50_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt50-32x4d model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('gluon_resnext50_32x4d', pretrained, **model_args) + + +@register_model +def gluon_resnext101_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt-101 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('gluon_resnext101_32x4d', pretrained, **model_args) + + +@register_model +def gluon_resnext101_64x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt-101 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=64, base_width=4, **kwargs) + return _create_resnet('gluon_resnext101_64x4d', pretrained, **model_args) + + +@register_model +def gluon_seresnext50_32x4d(pretrained=False, **kwargs): + """Constructs a SEResNeXt50-32x4d model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, + block_args=dict(attn_layer=SEModule), **kwargs) + return _create_resnet('gluon_seresnext50_32x4d', pretrained, **model_args) + + +@register_model +def gluon_seresnext101_32x4d(pretrained=False, **kwargs): + """Constructs a SEResNeXt-101-32x4d model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, + block_args=dict(attn_layer=SEModule), **kwargs) + return _create_resnet('gluon_seresnext101_32x4d', pretrained, **model_args) + + +@register_model +def gluon_seresnext101_64x4d(pretrained=False, **kwargs): + """Constructs a SEResNeXt-101-64x4d model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], cardinality=64, base_width=4, + block_args=dict(attn_layer=SEModule), **kwargs) + return _create_resnet('gluon_seresnext101_64x4d', pretrained, **model_args) + + +@register_model +def gluon_senet154(pretrained=False, **kwargs): + """Constructs an SENet-154 model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], cardinality=64, base_width=4, stem_type='deep', + down_kernel_size=3, block_reduce_first=2, block_args=dict(attn_layer=SEModule), **kwargs) + return _create_resnet('gluon_senet154', pretrained, **model_args) diff --git a/timm/models/gluon_xception.py b/timm/models/gluon_xception.py new file mode 100644 index 0000000..fbd668a --- /dev/null +++ b/timm/models/gluon_xception.py @@ -0,0 +1,246 @@ +"""Pytorch impl of Gluon Xception +This is a port of the Gluon Xception code and weights, itself ported from a PyTorch DeepLab impl. + +Gluon model: (https://gluon-cv.mxnet.io/_modules/gluoncv/model_zoo/xception.html) +Original PyTorch DeepLab impl: https://github.com/jfzhang95/pytorch-deeplab-xception + +Hacked together by / Copyright 2020 Ross Wightman +""" +from collections import OrderedDict + +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier, get_padding +from .registry import register_model + +__all__ = ['Xception65'] + +default_cfgs = { + 'gluon_xception65': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_xception-7015a15c.pth', + 'input_size': (3, 299, 299), + 'crop_pct': 0.903, + 'pool_size': (10, 10), + 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, + 'std': IMAGENET_DEFAULT_STD, + 'num_classes': 1000, + 'first_conv': 'conv1', + 'classifier': 'fc' + # The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 + }, +} + +""" PADDING NOTES +The original PyTorch and Gluon impl of these models dutifully reproduced the +aligned padding added to Tensorflow models for Deeplab. This padding was compensating +for Tensorflow 'SAME' padding. PyTorch symmetric padding behaves the way we'd want it to. +""" + + +class SeparableConv2d(nn.Module): + def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False, norm_layer=None): + super(SeparableConv2d, self).__init__() + self.kernel_size = kernel_size + self.dilation = dilation + + # depthwise convolution + padding = get_padding(kernel_size, stride, dilation) + self.conv_dw = nn.Conv2d( + inplanes, inplanes, kernel_size, stride=stride, + padding=padding, dilation=dilation, groups=inplanes, bias=bias) + self.bn = norm_layer(num_features=inplanes) + # pointwise convolution + self.conv_pw = nn.Conv2d(inplanes, planes, kernel_size=1, bias=bias) + + def forward(self, x): + x = self.conv_dw(x) + x = self.bn(x) + x = self.conv_pw(x) + return x + + +class Block(nn.Module): + def __init__(self, inplanes, planes, stride=1, dilation=1, start_with_relu=True, norm_layer=None): + super(Block, self).__init__() + if isinstance(planes, (list, tuple)): + assert len(planes) == 3 + else: + planes = (planes,) * 3 + outplanes = planes[-1] + + if outplanes != inplanes or stride != 1: + self.skip = nn.Sequential() + self.skip.add_module('conv1', nn.Conv2d( + inplanes, outplanes, 1, stride=stride, bias=False)), + self.skip.add_module('bn1', norm_layer(num_features=outplanes)) + else: + self.skip = None + + rep = OrderedDict() + for i in range(3): + rep['act%d' % (i + 1)] = nn.ReLU(inplace=True) + rep['conv%d' % (i + 1)] = SeparableConv2d( + inplanes, planes[i], 3, stride=stride if i == 2 else 1, dilation=dilation, norm_layer=norm_layer) + rep['bn%d' % (i + 1)] = norm_layer(planes[i]) + inplanes = planes[i] + + if not start_with_relu: + del rep['act1'] + else: + rep['act1'] = nn.ReLU(inplace=False) + self.rep = nn.Sequential(rep) + + def forward(self, x): + skip = x + if self.skip is not None: + skip = self.skip(skip) + x = self.rep(x) + skip + return x + + +class Xception65(nn.Module): + """Modified Aligned Xception. + + NOTE: only the 65 layer version is included here, the 71 layer variant + was not correct and had no pretrained weights + """ + + def __init__(self, num_classes=1000, in_chans=3, output_stride=32, norm_layer=nn.BatchNorm2d, + drop_rate=0., global_pool='avg'): + super(Xception65, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + if output_stride == 32: + entry_block3_stride = 2 + exit_block20_stride = 2 + middle_dilation = 1 + exit_dilation = (1, 1) + elif output_stride == 16: + entry_block3_stride = 2 + exit_block20_stride = 1 + middle_dilation = 1 + exit_dilation = (1, 2) + elif output_stride == 8: + entry_block3_stride = 1 + exit_block20_stride = 1 + middle_dilation = 2 + exit_dilation = (2, 4) + else: + raise NotImplementedError + + # Entry flow + self.conv1 = nn.Conv2d(in_chans, 32, kernel_size=3, stride=2, padding=1, bias=False) + self.bn1 = norm_layer(num_features=32) + self.act1 = nn.ReLU(inplace=True) + + self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=False) + self.bn2 = norm_layer(num_features=64) + self.act2 = nn.ReLU(inplace=True) + + self.block1 = Block(64, 128, stride=2, start_with_relu=False, norm_layer=norm_layer) + self.block1_act = nn.ReLU(inplace=True) + self.block2 = Block(128, 256, stride=2, start_with_relu=False, norm_layer=norm_layer) + self.block3 = Block(256, 728, stride=entry_block3_stride, norm_layer=norm_layer) + + # Middle flow + self.mid = nn.Sequential(OrderedDict([('block%d' % i, Block( + 728, 728, stride=1, dilation=middle_dilation, norm_layer=norm_layer)) for i in range(4, 20)])) + + # Exit flow + self.block20 = Block( + 728, (728, 1024, 1024), stride=exit_block20_stride, dilation=exit_dilation[0], norm_layer=norm_layer) + self.block20_act = nn.ReLU(inplace=True) + + self.conv3 = SeparableConv2d(1024, 1536, 3, stride=1, dilation=exit_dilation[1], norm_layer=norm_layer) + self.bn3 = norm_layer(num_features=1536) + self.act3 = nn.ReLU(inplace=True) + + self.conv4 = SeparableConv2d(1536, 1536, 3, stride=1, dilation=exit_dilation[1], norm_layer=norm_layer) + self.bn4 = norm_layer(num_features=1536) + self.act4 = nn.ReLU(inplace=True) + + self.num_features = 2048 + self.conv5 = SeparableConv2d( + 1536, self.num_features, 3, stride=1, dilation=exit_dilation[1], norm_layer=norm_layer) + self.bn5 = norm_layer(num_features=self.num_features) + self.act5 = nn.ReLU(inplace=True) + self.feature_info = [ + dict(num_chs=64, reduction=2, module='act2'), + dict(num_chs=128, reduction=4, module='block1_act'), + dict(num_chs=256, reduction=8, module='block3.rep.act1'), + dict(num_chs=728, reduction=16, module='block20.rep.act1'), + dict(num_chs=2048, reduction=32, module='act5'), + ] + + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + # Entry flow + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + + x = self.conv2(x) + x = self.bn2(x) + x = self.act2(x) + + x = self.block1(x) + x = self.block1_act(x) + # c1 = x + x = self.block2(x) + # c2 = x + x = self.block3(x) + + # Middle flow + x = self.mid(x) + # c3 = x + + # Exit flow + x = self.block20(x) + x = self.block20_act(x) + x = self.conv3(x) + x = self.bn3(x) + x = self.act3(x) + + x = self.conv4(x) + x = self.bn4(x) + x = self.act4(x) + + x = self.conv5(x) + x = self.bn5(x) + x = self.act5(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate: + F.dropout(x, self.drop_rate, training=self.training) + x = self.fc(x) + return x + + +def _create_gluon_xception(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + Xception65, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(feature_cls='hook'), + **kwargs) + + +@register_model +def gluon_xception65(pretrained=False, **kwargs): + """ Modified Aligned Xception-65 + """ + return _create_gluon_xception('gluon_xception65', pretrained, **kwargs) diff --git a/timm/models/hardcorenas.py b/timm/models/hardcorenas.py new file mode 100644 index 0000000..9988a04 --- /dev/null +++ b/timm/models/hardcorenas.py @@ -0,0 +1,152 @@ +from functools import partial + +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .efficientnet_blocks import SqueezeExcite +from .efficientnet_builder import decode_arch_def, resolve_act_layer, resolve_bn_args, round_channels +from .helpers import build_model_with_cfg, default_cfg_for_features +from .layers import get_act_fn +from .mobilenetv3 import MobileNetV3, MobileNetV3Features +from .registry import register_model + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (1, 1), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'hardcorenas_a': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_A_Green_38ms_75.9_23474aeb.pth'), + 'hardcorenas_b': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_B_Green_40ms_76.5_1f882d1e.pth'), + 'hardcorenas_c': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_C_Green_44ms_77.1_d4148c9e.pth'), + 'hardcorenas_d': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_D_Green_50ms_77.4_23e3cdde.pth'), + 'hardcorenas_e': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_E_Green_55ms_77.9_90f20e8a.pth'), + 'hardcorenas_f': _cfg(url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/public/HardCoReNAS/HardCoreNAS_F_Green_60ms_78.1_2855edf1.pth'), +} + + +def _gen_hardcorenas(pretrained, variant, arch_def, **kwargs): + """Creates a hardcorenas model + + Ref impl: https://github.com/Alibaba-MIIL/HardCoReNAS + Paper: https://arxiv.org/abs/2102.11646 + + """ + num_features = 1280 + se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels) + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + num_features=num_features, + stem_size=32, + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'hard_swish'), + se_layer=se_layer, + **kwargs, + ) + + features_only = False + model_cls = MobileNetV3 + kwargs_filter = None + if model_kwargs.pop('features_only', False): + features_only = True + kwargs_filter = ('num_classes', 'num_features', 'global_pool', 'head_conv', 'head_bias', 'global_pool') + model_cls = MobileNetV3Features + model = build_model_with_cfg( + model_cls, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_strict=not features_only, + kwargs_filter=kwargs_filter, + **model_kwargs) + if features_only: + model.default_cfg = default_cfg_for_features(model.default_cfg) + return model + + +@register_model +def hardcorenas_a(pretrained=False, **kwargs): + """ hardcorenas_A """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e6_c40_nre_se0.25'], + ['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25'], + ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_a', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_b(pretrained=False, **kwargs): + """ hardcorenas_B """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], + ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25', 'ir_r1_k3_s1_e3_c24_nre'], + ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre'], + ['ir_r1_k5_s2_e3_c80', 'ir_r1_k5_s1_e3_c80', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'], + ['ir_r1_k5_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'], + ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_b', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_c(pretrained=False, **kwargs): + """ hardcorenas_C """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', + 'ir_r1_k5_s1_e3_c40_nre'], + ['ir_r1_k5_s2_e4_c80', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'], + ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'], + ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_c', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_d(pretrained=False, **kwargs): + """ hardcorenas_D """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e3_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k3_s1_e3_c40_nre_se0.25'], + ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', + 'ir_r1_k3_s1_e3_c80_se0.25'], + ['ir_r1_k3_s1_e4_c112_se0.25', 'ir_r1_k5_s1_e4_c112_se0.25', 'ir_r1_k3_s1_e3_c112_se0.25', + 'ir_r1_k5_s1_e3_c112_se0.25'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', + 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_d', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_e(pretrained=False, **kwargs): + """ hardcorenas_E """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', + 'ir_r1_k3_s1_e3_c40_nre_se0.25'], ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e6_c80_se0.25'], + ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', + 'ir_r1_k5_s1_e3_c112_se0.25'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', + 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_e', arch_def=arch_def, **kwargs) + return model + + +@register_model +def hardcorenas_f(pretrained=False, **kwargs): + """ hardcorenas_F """ + arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], + ['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e6_c40_nre_se0.25'], + ['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', + 'ir_r1_k3_s1_e3_c80_se0.25'], + ['ir_r1_k3_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', + 'ir_r1_k3_s1_e3_c112_se0.25'], + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25', + 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] + model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_f', arch_def=arch_def, **kwargs) + return model diff --git a/timm/models/helpers.py b/timm/models/helpers.py new file mode 100644 index 0000000..16ce64d --- /dev/null +++ b/timm/models/helpers.py @@ -0,0 +1,518 @@ +""" Model creation / weight loading / state_dict helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +import logging +import os +import math +from collections import OrderedDict +from copy import deepcopy +from typing import Any, Callable, Optional, Tuple + +import torch +import torch.nn as nn +from torch.hub import load_state_dict_from_url + +from .features import FeatureListNet, FeatureDictNet, FeatureHookNet +from .fx_features import FeatureGraphNet +from .hub import has_hf_hub, download_cached_file, load_state_dict_from_hf +from .layers import Conv2dSame, Linear + + +_logger = logging.getLogger(__name__) + + +def load_state_dict(checkpoint_path, use_ema=False): + if checkpoint_path and os.path.isfile(checkpoint_path): + checkpoint = torch.load(checkpoint_path, map_location='cpu') + state_dict_key = '' + if isinstance(checkpoint, dict): + if use_ema and checkpoint.get('state_dict_ema', None) is not None: + state_dict_key = 'state_dict_ema' + elif use_ema and checkpoint.get('model_ema', None) is not None: + state_dict_key = 'model_ema' + elif 'state_dict' in checkpoint: + state_dict_key = 'state_dict' + elif 'model' in checkpoint: + state_dict_key = 'model' + if state_dict_key: + state_dict = checkpoint[state_dict_key] + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + # strip `module.` prefix + name = k[7:] if k.startswith('module') else k + new_state_dict[name] = v + state_dict = new_state_dict + else: + state_dict = checkpoint + _logger.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path)) + return state_dict + else: + _logger.error("No checkpoint found at '{}'".format(checkpoint_path)) + raise FileNotFoundError() + + +def load_checkpoint(model, checkpoint_path, use_ema=False, strict=True): + if os.path.splitext(checkpoint_path)[-1].lower() in ('.npz', '.npy'): + # numpy checkpoint, try to load via model specific load_pretrained fn + if hasattr(model, 'load_pretrained'): + model.load_pretrained(checkpoint_path) + else: + raise NotImplementedError('Model cannot load numpy checkpoint') + return + state_dict = load_state_dict(checkpoint_path, use_ema) + model.load_state_dict(state_dict, strict=strict) + + +def resume_checkpoint(model, checkpoint_path, optimizer=None, loss_scaler=None, log_info=True): + resume_epoch = None + if os.path.isfile(checkpoint_path): + checkpoint = torch.load(checkpoint_path, map_location='cpu') + if isinstance(checkpoint, dict) and 'state_dict' in checkpoint: + if log_info: + _logger.info('Restoring model state from checkpoint...') + new_state_dict = OrderedDict() + for k, v in checkpoint['state_dict'].items(): + name = k[7:] if k.startswith('module') else k + new_state_dict[name] = v + model.load_state_dict(new_state_dict) + + if optimizer is not None and 'optimizer' in checkpoint: + if log_info: + _logger.info('Restoring optimizer state from checkpoint...') + optimizer.load_state_dict(checkpoint['optimizer']) + + if loss_scaler is not None and loss_scaler.state_dict_key in checkpoint: + if log_info: + _logger.info('Restoring AMP loss scaler state from checkpoint...') + loss_scaler.load_state_dict(checkpoint[loss_scaler.state_dict_key]) + + if 'epoch' in checkpoint: + resume_epoch = checkpoint['epoch'] + if 'version' in checkpoint and checkpoint['version'] > 1: + resume_epoch += 1 # start at the next epoch, old checkpoints incremented before save + + if log_info: + _logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch'])) + else: + model.load_state_dict(checkpoint) + if log_info: + _logger.info("Loaded checkpoint '{}'".format(checkpoint_path)) + return resume_epoch + else: + _logger.error("No checkpoint found at '{}'".format(checkpoint_path)) + raise FileNotFoundError() + + +def load_custom_pretrained(model, default_cfg=None, load_fn=None, progress=False, check_hash=False): + r"""Loads a custom (read non .pth) weight file + + Downloads checkpoint file into cache-dir like torch.hub based loaders, but calls + a passed in custom load fun, or the `load_pretrained` model member fn. + + If the object is already present in `model_dir`, it's deserialized and returned. + The default value of `model_dir` is ``/checkpoints`` where + `hub_dir` is the directory returned by :func:`~torch.hub.get_dir`. + + Args: + model: The instantiated model to load weights into + default_cfg (dict): Default pretrained model cfg + load_fn: An external stand alone fn that loads weights into provided model, otherwise a fn named + 'laod_pretrained' on the model will be called if it exists + progress (bool, optional): whether or not to display a progress bar to stderr. Default: False + check_hash(bool, optional): If True, the filename part of the URL should follow the naming convention + ``filename-.ext`` where ```` is the first eight or more + digits of the SHA256 hash of the contents of the file. The hash is used to + ensure unique names and to verify the contents of the file. Default: False + """ + default_cfg = default_cfg or getattr(model, 'default_cfg', None) or {} + pretrained_url = default_cfg.get('url', None) + if not pretrained_url: + _logger.warning("No pretrained weights exist for this model. Using random initialization.") + return + cached_file = download_cached_file(default_cfg['url'], check_hash=check_hash, progress=progress) + + if load_fn is not None: + load_fn(model, cached_file) + elif hasattr(model, 'load_pretrained'): + model.load_pretrained(cached_file) + else: + _logger.warning("Valid function to load pretrained weights is not available, using random initialization.") + + +def adapt_input_conv(in_chans, conv_weight): + conv_type = conv_weight.dtype + conv_weight = conv_weight.float() # Some weights are in torch.half, ensure it's float for sum on CPU + O, I, J, K = conv_weight.shape + if in_chans == 1: + if I > 3: + assert conv_weight.shape[1] % 3 == 0 + # For models with space2depth stems + conv_weight = conv_weight.reshape(O, I // 3, 3, J, K) + conv_weight = conv_weight.sum(dim=2, keepdim=False) + else: + conv_weight = conv_weight.sum(dim=1, keepdim=True) + elif in_chans != 3: + if I != 3: + raise NotImplementedError('Weight format not supported by conversion.') + else: + # NOTE this strategy should be better than random init, but there could be other combinations of + # the original RGB input layer weights that'd work better for specific cases. + repeat = int(math.ceil(in_chans / 3)) + conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :] + conv_weight *= (3 / float(in_chans)) + conv_weight = conv_weight.to(conv_type) + return conv_weight + + +def load_pretrained(model, default_cfg=None, num_classes=1000, in_chans=3, filter_fn=None, strict=True, progress=False): + """ Load pretrained checkpoint + + Args: + model (nn.Module) : PyTorch model module + default_cfg (Optional[Dict]): default configuration for pretrained weights / target dataset + num_classes (int): num_classes for model + in_chans (int): in_chans for model + filter_fn (Optional[Callable]): state_dict filter fn for load (takes state_dict, model as args) + strict (bool): strict load of checkpoint + progress (bool): enable progress bar for weight download + + """ + default_cfg = default_cfg or getattr(model, 'default_cfg', None) or {} + pretrained_url = default_cfg.get('url', None) + hf_hub_id = default_cfg.get('hf_hub', None) + if not pretrained_url and not hf_hub_id: + _logger.warning("No pretrained weights exist for this model. Using random initialization.") + return + if pretrained_url: + _logger.info(f'Loading pretrained weights from url ({pretrained_url})') + state_dict = load_state_dict_from_url(pretrained_url, progress=progress, map_location='cpu') + elif hf_hub_id and has_hf_hub(necessary=True): + _logger.info(f'Loading pretrained weights from Hugging Face hub ({hf_hub_id})') + state_dict = load_state_dict_from_hf(hf_hub_id) + if filter_fn is not None: + # for backwards compat with filter fn that take one arg, try one first, the two + try: + state_dict = filter_fn(state_dict) + except TypeError: + state_dict = filter_fn(state_dict, model) + + input_convs = default_cfg.get('first_conv', None) + if input_convs is not None and in_chans != 3: + if isinstance(input_convs, str): + input_convs = (input_convs,) + for input_conv_name in input_convs: + weight_name = input_conv_name + '.weight' + try: + state_dict[weight_name] = adapt_input_conv(in_chans, state_dict[weight_name]) + _logger.info( + f'Converted input conv {input_conv_name} pretrained weights from 3 to {in_chans} channel(s)') + except NotImplementedError as e: + del state_dict[weight_name] + strict = False + _logger.warning( + f'Unable to convert pretrained {input_conv_name} weights, using random init for this layer.') + + classifiers = default_cfg.get('classifier', None) + label_offset = default_cfg.get('label_offset', 0) + if classifiers is not None: + if isinstance(classifiers, str): + classifiers = (classifiers,) + if num_classes != default_cfg['num_classes']: + for classifier_name in classifiers: + # completely discard fully connected if model num_classes doesn't match pretrained weights + del state_dict[classifier_name + '.weight'] + del state_dict[classifier_name + '.bias'] + strict = False + elif label_offset > 0: + for classifier_name in classifiers: + # special case for pretrained weights with an extra background class in pretrained weights + classifier_weight = state_dict[classifier_name + '.weight'] + state_dict[classifier_name + '.weight'] = classifier_weight[label_offset:] + classifier_bias = state_dict[classifier_name + '.bias'] + state_dict[classifier_name + '.bias'] = classifier_bias[label_offset:] + + model.load_state_dict(state_dict, strict=strict) + + +def extract_layer(model, layer): + layer = layer.split('.') + module = model + if hasattr(model, 'module') and layer[0] != 'module': + module = model.module + if not hasattr(model, 'module') and layer[0] == 'module': + layer = layer[1:] + for l in layer: + if hasattr(module, l): + if not l.isdigit(): + module = getattr(module, l) + else: + module = module[int(l)] + else: + return module + return module + + +def set_layer(model, layer, val): + layer = layer.split('.') + module = model + if hasattr(model, 'module') and layer[0] != 'module': + module = model.module + lst_index = 0 + module2 = module + for l in layer: + if hasattr(module2, l): + if not l.isdigit(): + module2 = getattr(module2, l) + else: + module2 = module2[int(l)] + lst_index += 1 + lst_index -= 1 + for l in layer[:lst_index]: + if not l.isdigit(): + module = getattr(module, l) + else: + module = module[int(l)] + l = layer[lst_index] + setattr(module, l, val) + + +def adapt_model_from_string(parent_module, model_string): + separator = '***' + state_dict = {} + lst_shape = model_string.split(separator) + for k in lst_shape: + k = k.split(':') + key = k[0] + shape = k[1][1:-1].split(',') + if shape[0] != '': + state_dict[key] = [int(i) for i in shape] + + new_module = deepcopy(parent_module) + for n, m in parent_module.named_modules(): + old_module = extract_layer(parent_module, n) + if isinstance(old_module, nn.Conv2d) or isinstance(old_module, Conv2dSame): + if isinstance(old_module, Conv2dSame): + conv = Conv2dSame + else: + conv = nn.Conv2d + s = state_dict[n + '.weight'] + in_channels = s[1] + out_channels = s[0] + g = 1 + if old_module.groups > 1: + in_channels = out_channels + g = in_channels + new_conv = conv( + in_channels=in_channels, out_channels=out_channels, kernel_size=old_module.kernel_size, + bias=old_module.bias is not None, padding=old_module.padding, dilation=old_module.dilation, + groups=g, stride=old_module.stride) + set_layer(new_module, n, new_conv) + if isinstance(old_module, nn.BatchNorm2d): + new_bn = nn.BatchNorm2d( + num_features=state_dict[n + '.weight'][0], eps=old_module.eps, momentum=old_module.momentum, + affine=old_module.affine, track_running_stats=True) + set_layer(new_module, n, new_bn) + if isinstance(old_module, nn.Linear): + # FIXME extra checks to ensure this is actually the FC classifier layer and not a diff Linear layer? + num_features = state_dict[n + '.weight'][1] + new_fc = Linear( + in_features=num_features, out_features=old_module.out_features, bias=old_module.bias is not None) + set_layer(new_module, n, new_fc) + if hasattr(new_module, 'num_features'): + new_module.num_features = num_features + new_module.eval() + parent_module.eval() + + return new_module + + +def adapt_model_from_file(parent_module, model_variant): + adapt_file = os.path.join(os.path.dirname(__file__), 'pruned', model_variant + '.txt') + with open(adapt_file, 'r') as f: + return adapt_model_from_string(parent_module, f.read().strip()) + + +def default_cfg_for_features(default_cfg): + default_cfg = deepcopy(default_cfg) + # remove default pretrained cfg fields that don't have much relevance for feature backbone + to_remove = ('num_classes', 'crop_pct', 'classifier', 'global_pool') # add default final pool size? + for tr in to_remove: + default_cfg.pop(tr, None) + return default_cfg + + +def overlay_external_default_cfg(default_cfg, kwargs): + """ Overlay 'external_default_cfg' in kwargs on top of default_cfg arg. + """ + external_default_cfg = kwargs.pop('external_default_cfg', None) + if external_default_cfg: + default_cfg.pop('url', None) # url should come from external cfg + default_cfg.pop('hf_hub', None) # hf hub id should come from external cfg + default_cfg.update(external_default_cfg) + + +def set_default_kwargs(kwargs, names, default_cfg): + for n in names: + # for legacy reasons, model __init__args uses img_size + in_chans as separate args while + # default_cfg has one input_size=(C, H ,W) entry + if n == 'img_size': + input_size = default_cfg.get('input_size', None) + if input_size is not None: + assert len(input_size) == 3 + kwargs.setdefault(n, input_size[-2:]) + elif n == 'in_chans': + input_size = default_cfg.get('input_size', None) + if input_size is not None: + assert len(input_size) == 3 + kwargs.setdefault(n, input_size[0]) + else: + default_val = default_cfg.get(n, None) + if default_val is not None: + kwargs.setdefault(n, default_cfg[n]) + + +def filter_kwargs(kwargs, names): + if not kwargs or not names: + return + for n in names: + kwargs.pop(n, None) + + +def update_default_cfg_and_kwargs(default_cfg, kwargs, kwargs_filter): + """ Update the default_cfg and kwargs before passing to model + + FIXME this sequence of overlay default_cfg, set default kwargs, filter kwargs + could/should be replaced by an improved configuration mechanism + + Args: + default_cfg: input default_cfg (updated in-place) + kwargs: keyword args passed to model build fn (updated in-place) + kwargs_filter: keyword arg keys that must be removed before model __init__ + """ + # Overlay default cfg values from `external_default_cfg` if it exists in kwargs + overlay_external_default_cfg(default_cfg, kwargs) + # Set model __init__ args that can be determined by default_cfg (if not already passed as kwargs) + default_kwarg_names = ('num_classes', 'global_pool', 'in_chans') + if default_cfg.get('fixed_input_size', False): + # if fixed_input_size exists and is True, model takes an img_size arg that fixes its input size + default_kwarg_names += ('img_size',) + set_default_kwargs(kwargs, names=default_kwarg_names, default_cfg=default_cfg) + # Filter keyword args for task specific model variants (some 'features only' models, etc.) + filter_kwargs(kwargs, names=kwargs_filter) + + +def build_model_with_cfg( + model_cls: Callable, + variant: str, + pretrained: bool, + default_cfg: dict, + model_cfg: Optional[Any] = None, + feature_cfg: Optional[dict] = None, + pretrained_strict: bool = True, + pretrained_filter_fn: Optional[Callable] = None, + pretrained_custom_load: bool = False, + kwargs_filter: Optional[Tuple[str]] = None, + **kwargs): + """ Build model with specified default_cfg and optional model_cfg + + This helper fn aids in the construction of a model including: + * handling default_cfg and associated pretained weight loading + * passing through optional model_cfg for models with config based arch spec + * features_only model adaptation + * pruning config / model adaptation + + Args: + model_cls (nn.Module): model class + variant (str): model variant name + pretrained (bool): load pretrained weights + default_cfg (dict): model's default pretrained/task config + model_cfg (Optional[Dict]): model's architecture config + feature_cfg (Optional[Dict]: feature extraction adapter config + pretrained_strict (bool): load pretrained weights strictly + pretrained_filter_fn (Optional[Callable]): filter callable for pretrained weights + pretrained_custom_load (bool): use custom load fn, to load numpy or other non PyTorch weights + kwargs_filter (Optional[Tuple]): kwargs to filter before passing to model + **kwargs: model args passed through to model __init__ + """ + pruned = kwargs.pop('pruned', False) + features = False + feature_cfg = feature_cfg or {} + default_cfg = deepcopy(default_cfg) if default_cfg else {} + update_default_cfg_and_kwargs(default_cfg, kwargs, kwargs_filter) + default_cfg.setdefault('architecture', variant) + + # Setup for feature extraction wrapper done at end of this fn + if kwargs.pop('features_only', False): + features = True + feature_cfg.setdefault('out_indices', (0, 1, 2, 3, 4)) + if 'out_indices' in kwargs: + feature_cfg['out_indices'] = kwargs.pop('out_indices') + + # Build the model + model = model_cls(**kwargs) if model_cfg is None else model_cls(cfg=model_cfg, **kwargs) + model.default_cfg = default_cfg + + if pruned: + model = adapt_model_from_file(model, variant) + + # For classification models, check class attr, then kwargs, then default to 1k, otherwise 0 for feats + num_classes_pretrained = 0 if features else getattr(model, 'num_classes', kwargs.get('num_classes', 1000)) + if pretrained: + if pretrained_custom_load: + load_custom_pretrained(model) + else: + load_pretrained( + model, + num_classes=num_classes_pretrained, + in_chans=kwargs.get('in_chans', 3), + filter_fn=pretrained_filter_fn, + strict=pretrained_strict) + + # Wrap the model in a feature extraction module if enabled + if features: + feature_cls = FeatureListNet + if 'feature_cls' in feature_cfg: + feature_cls = feature_cfg.pop('feature_cls') + if isinstance(feature_cls, str): + feature_cls = feature_cls.lower() + if 'hook' in feature_cls: + feature_cls = FeatureHookNet + elif feature_cls == 'fx': + feature_cls = FeatureGraphNet + else: + assert False, f'Unknown feature class {feature_cls}' + model = feature_cls(model, **feature_cfg) + model.default_cfg = default_cfg_for_features(default_cfg) # add back default_cfg + + return model + + +def model_parameters(model, exclude_head=False): + if exclude_head: + # FIXME this a bit of a quick and dirty hack to skip classifier head params based on ordering + return [p for p in model.parameters()][:-2] + else: + return model.parameters() + + +def named_apply(fn: Callable, module: nn.Module, name='', depth_first=True, include_root=False) -> nn.Module: + if not depth_first and include_root: + fn(module=module, name=name) + for child_name, child_module in module.named_children(): + child_name = '.'.join((name, child_name)) if name else child_name + named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True) + if depth_first and include_root: + fn(module=module, name=name) + return module + + +def named_modules(module: nn.Module, name='', depth_first=True, include_root=False): + if not depth_first and include_root: + yield name, module + for child_name, child_module in module.named_children(): + child_name = '.'.join((name, child_name)) if name else child_name + yield from named_modules( + module=child_module, name=child_name, depth_first=depth_first, include_root=True) + if depth_first and include_root: + yield name, module diff --git a/timm/models/hrnet.py b/timm/models/hrnet.py new file mode 100644 index 0000000..c56964f --- /dev/null +++ b/timm/models/hrnet.py @@ -0,0 +1,836 @@ +""" HRNet + +Copied from https://github.com/HRNet/HRNet-Image-Classification + +Original header: + Copyright (c) Microsoft + Licensed under the MIT License. + Written by Bin Xiao (Bin.Xiao@microsoft.com) + Modified by Ke Sun (sunk@mail.ustc.edu.cn) +""" +import logging +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .features import FeatureInfo +from .helpers import build_model_with_cfg, default_cfg_for_features +from .layers import create_classifier +from .registry import register_model +from .resnet import BasicBlock, Bottleneck # leveraging ResNet blocks w/ additional features like SE + +_BN_MOMENTUM = 0.1 +_logger = logging.getLogger(__name__) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'hrnet_w18_small': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnet_w18_small_v1-f460c6bc.pth'), + 'hrnet_w18_small_v2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnet_w18_small_v2-4c50a8cb.pth'), + 'hrnet_w18': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w18-8cb57bb9.pth'), + 'hrnet_w30': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w30-8d7f8dab.pth'), + 'hrnet_w32': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w32-90d8c5fb.pth'), + 'hrnet_w40': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w40-7cd397a4.pth'), + 'hrnet_w44': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w44-c9ac8c18.pth'), + 'hrnet_w48': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w48-abd2e6ab.pth'), + 'hrnet_w64': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-hrnet/hrnetv2_w64-b47cc881.pth'), +} + +cfg_cls = dict( + hrnet_w18_small=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(1,), + NUM_CHANNELS=(32,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2), + NUM_CHANNELS=(16, 32), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=1, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2, 2), + NUM_CHANNELS=(16, 32, 64), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=1, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2, 2, 2), + NUM_CHANNELS=(16, 32, 64, 128), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w18_small_v2=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(2,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2), + NUM_CHANNELS=(18, 36), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=3, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2, 2), + NUM_CHANNELS=(18, 36, 72), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=2, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(2, 2, 2, 2), + NUM_CHANNELS=(18, 36, 72, 144), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w18=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(18, 36), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(18, 36, 72), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(18, 36, 72, 144), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w30=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(30, 60), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(30, 60, 120), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(30, 60, 120, 240), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w32=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(32, 64), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(32, 64, 128), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(32, 64, 128, 256), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w40=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(40, 80), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(40, 80, 160), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(40, 80, 160, 320), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w44=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(44, 88), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(44, 88, 176), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(44, 88, 176, 352), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w48=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(48, 96), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(48, 96, 192), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(48, 96, 192, 384), + FUSE_METHOD='SUM', + ), + ), + + hrnet_w64=dict( + STEM_WIDTH=64, + STAGE1=dict( + NUM_MODULES=1, + NUM_BRANCHES=1, + BLOCK='BOTTLENECK', + NUM_BLOCKS=(4,), + NUM_CHANNELS=(64,), + FUSE_METHOD='SUM', + ), + STAGE2=dict( + NUM_MODULES=1, + NUM_BRANCHES=2, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4), + NUM_CHANNELS=(64, 128), + FUSE_METHOD='SUM' + ), + STAGE3=dict( + NUM_MODULES=4, + NUM_BRANCHES=3, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4), + NUM_CHANNELS=(64, 128, 256), + FUSE_METHOD='SUM' + ), + STAGE4=dict( + NUM_MODULES=3, + NUM_BRANCHES=4, + BLOCK='BASIC', + NUM_BLOCKS=(4, 4, 4, 4), + NUM_CHANNELS=(64, 128, 256, 512), + FUSE_METHOD='SUM', + ), + ) +) + + +class HighResolutionModule(nn.Module): + def __init__(self, num_branches, blocks, num_blocks, num_inchannels, + num_channels, fuse_method, multi_scale_output=True): + super(HighResolutionModule, self).__init__() + self._check_branches( + num_branches, blocks, num_blocks, num_inchannels, num_channels) + + self.num_inchannels = num_inchannels + self.fuse_method = fuse_method + self.num_branches = num_branches + + self.multi_scale_output = multi_scale_output + + self.branches = self._make_branches( + num_branches, blocks, num_blocks, num_channels) + self.fuse_layers = self._make_fuse_layers() + self.fuse_act = nn.ReLU(False) + + def _check_branches(self, num_branches, blocks, num_blocks, num_inchannels, num_channels): + error_msg = '' + if num_branches != len(num_blocks): + error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(num_branches, len(num_blocks)) + elif num_branches != len(num_channels): + error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(num_branches, len(num_channels)) + elif num_branches != len(num_inchannels): + error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(num_branches, len(num_inchannels)) + if error_msg: + _logger.error(error_msg) + raise ValueError(error_msg) + + def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1): + downsample = None + if stride != 1 or self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion: + downsample = nn.Sequential( + nn.Conv2d( + self.num_inchannels[branch_index], num_channels[branch_index] * block.expansion, + kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(num_channels[branch_index] * block.expansion, momentum=_BN_MOMENTUM), + ) + + layers = [block(self.num_inchannels[branch_index], num_channels[branch_index], stride, downsample)] + self.num_inchannels[branch_index] = num_channels[branch_index] * block.expansion + for i in range(1, num_blocks[branch_index]): + layers.append(block(self.num_inchannels[branch_index], num_channels[branch_index])) + + return nn.Sequential(*layers) + + def _make_branches(self, num_branches, block, num_blocks, num_channels): + branches = [] + for i in range(num_branches): + branches.append(self._make_one_branch(i, block, num_blocks, num_channels)) + + return nn.ModuleList(branches) + + def _make_fuse_layers(self): + if self.num_branches == 1: + return nn.Identity() + + num_branches = self.num_branches + num_inchannels = self.num_inchannels + fuse_layers = [] + for i in range(num_branches if self.multi_scale_output else 1): + fuse_layer = [] + for j in range(num_branches): + if j > i: + fuse_layer.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], num_inchannels[i], 1, 1, 0, bias=False), + nn.BatchNorm2d(num_inchannels[i], momentum=_BN_MOMENTUM), + nn.Upsample(scale_factor=2 ** (j - i), mode='nearest'))) + elif j == i: + fuse_layer.append(nn.Identity()) + else: + conv3x3s = [] + for k in range(i - j): + if k == i - j - 1: + num_outchannels_conv3x3 = num_inchannels[i] + conv3x3s.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False), + nn.BatchNorm2d(num_outchannels_conv3x3, momentum=_BN_MOMENTUM))) + else: + num_outchannels_conv3x3 = num_inchannels[j] + conv3x3s.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False), + nn.BatchNorm2d(num_outchannels_conv3x3, momentum=_BN_MOMENTUM), + nn.ReLU(False))) + fuse_layer.append(nn.Sequential(*conv3x3s)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def get_num_inchannels(self): + return self.num_inchannels + + def forward(self, x: List[torch.Tensor]): + if self.num_branches == 1: + return [self.branches[0](x[0])] + + for i, branch in enumerate(self.branches): + x[i] = branch(x[i]) + + x_fuse = [] + for i, fuse_outer in enumerate(self.fuse_layers): + y = x[0] if i == 0 else fuse_outer[0](x[0]) + for j in range(1, self.num_branches): + if i == j: + y = y + x[j] + else: + y = y + fuse_outer[j](x[j]) + x_fuse.append(self.fuse_act(y)) + + return x_fuse + + +blocks_dict = { + 'BASIC': BasicBlock, + 'BOTTLENECK': Bottleneck +} + + +class HighResolutionNet(nn.Module): + + def __init__(self, cfg, in_chans=3, num_classes=1000, global_pool='avg', drop_rate=0.0, head='classification'): + super(HighResolutionNet, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + + stem_width = cfg['STEM_WIDTH'] + self.conv1 = nn.Conv2d(in_chans, stem_width, kernel_size=3, stride=2, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(stem_width, momentum=_BN_MOMENTUM) + self.act1 = nn.ReLU(inplace=True) + self.conv2 = nn.Conv2d(stem_width, 64, kernel_size=3, stride=2, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(64, momentum=_BN_MOMENTUM) + self.act2 = nn.ReLU(inplace=True) + + self.stage1_cfg = cfg['STAGE1'] + num_channels = self.stage1_cfg['NUM_CHANNELS'][0] + block = blocks_dict[self.stage1_cfg['BLOCK']] + num_blocks = self.stage1_cfg['NUM_BLOCKS'][0] + self.layer1 = self._make_layer(block, 64, num_channels, num_blocks) + stage1_out_channel = block.expansion * num_channels + + self.stage2_cfg = cfg['STAGE2'] + num_channels = self.stage2_cfg['NUM_CHANNELS'] + block = blocks_dict[self.stage2_cfg['BLOCK']] + num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition1 = self._make_transition_layer([stage1_out_channel], num_channels) + self.stage2, pre_stage_channels = self._make_stage(self.stage2_cfg, num_channels) + + self.stage3_cfg = cfg['STAGE3'] + num_channels = self.stage3_cfg['NUM_CHANNELS'] + block = blocks_dict[self.stage3_cfg['BLOCK']] + num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels) + self.stage3, pre_stage_channels = self._make_stage(self.stage3_cfg, num_channels) + + self.stage4_cfg = cfg['STAGE4'] + num_channels = self.stage4_cfg['NUM_CHANNELS'] + block = blocks_dict[self.stage4_cfg['BLOCK']] + num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels) + self.stage4, pre_stage_channels = self._make_stage(self.stage4_cfg, num_channels, multi_scale_output=True) + + self.head = head + self.head_channels = None # set if _make_head called + if head == 'classification': + # Classification Head + self.num_features = 2048 + self.incre_modules, self.downsamp_modules, self.final_layer = self._make_head(pre_stage_channels) + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + elif head == 'incre': + self.num_features = 2048 + self.incre_modules, _, _ = self._make_head(pre_stage_channels, True) + else: + self.incre_modules = None + self.num_features = 256 + + curr_stride = 2 + # module names aren't actually valid here, hook or FeatureNet based extraction would not work + self.feature_info = [dict(num_chs=64, reduction=curr_stride, module='stem')] + for i, c in enumerate(self.head_channels if self.head_channels else num_channels): + curr_stride *= 2 + c = c * 4 if self.head_channels else c # head block expansion factor of 4 + self.feature_info += [dict(num_chs=c, reduction=curr_stride, module=f'stage{i + 1}')] + + self.init_weights() + + def _make_head(self, pre_stage_channels, incre_only=False): + head_block = Bottleneck + self.head_channels = [32, 64, 128, 256] + + # Increasing the #channels on each resolution + # from C, 2C, 4C, 8C to 128, 256, 512, 1024 + incre_modules = [] + for i, channels in enumerate(pre_stage_channels): + incre_modules.append(self._make_layer(head_block, channels, self.head_channels[i], 1, stride=1)) + incre_modules = nn.ModuleList(incre_modules) + if incre_only: + return incre_modules, None, None + + # downsampling modules + downsamp_modules = [] + for i in range(len(pre_stage_channels) - 1): + in_channels = self.head_channels[i] * head_block.expansion + out_channels = self.head_channels[i + 1] * head_block.expansion + downsamp_module = nn.Sequential( + nn.Conv2d( + in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1), + nn.BatchNorm2d(out_channels, momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True) + ) + downsamp_modules.append(downsamp_module) + downsamp_modules = nn.ModuleList(downsamp_modules) + + final_layer = nn.Sequential( + nn.Conv2d( + in_channels=self.head_channels[3] * head_block.expansion, + out_channels=self.num_features, kernel_size=1, stride=1, padding=0 + ), + nn.BatchNorm2d(self.num_features, momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True) + ) + + return incre_modules, downsamp_modules, final_layer + + def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer): + num_branches_cur = len(num_channels_cur_layer) + num_branches_pre = len(num_channels_pre_layer) + + transition_layers = [] + for i in range(num_branches_cur): + if i < num_branches_pre: + if num_channels_cur_layer[i] != num_channels_pre_layer[i]: + transition_layers.append(nn.Sequential( + nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, 1, 1, bias=False), + nn.BatchNorm2d(num_channels_cur_layer[i], momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True))) + else: + transition_layers.append(nn.Identity()) + else: + conv3x3s = [] + for j in range(i + 1 - num_branches_pre): + inchannels = num_channels_pre_layer[-1] + outchannels = num_channels_cur_layer[i] if j == i - num_branches_pre else inchannels + conv3x3s.append(nn.Sequential( + nn.Conv2d(inchannels, outchannels, 3, 2, 1, bias=False), + nn.BatchNorm2d(outchannels, momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True))) + transition_layers.append(nn.Sequential(*conv3x3s)) + + return nn.ModuleList(transition_layers) + + def _make_layer(self, block, inplanes, planes, blocks, stride=1): + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(planes * block.expansion, momentum=_BN_MOMENTUM), + ) + + layers = [block(inplanes, planes, stride, downsample)] + inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(inplanes, planes)) + + return nn.Sequential(*layers) + + def _make_stage(self, layer_config, num_inchannels, multi_scale_output=True): + num_modules = layer_config['NUM_MODULES'] + num_branches = layer_config['NUM_BRANCHES'] + num_blocks = layer_config['NUM_BLOCKS'] + num_channels = layer_config['NUM_CHANNELS'] + block = blocks_dict[layer_config['BLOCK']] + fuse_method = layer_config['FUSE_METHOD'] + + modules = [] + for i in range(num_modules): + # multi_scale_output is only used last module + reset_multi_scale_output = multi_scale_output or i < num_modules - 1 + modules.append(HighResolutionModule( + num_branches, block, num_blocks, num_inchannels, num_channels, fuse_method, reset_multi_scale_output) + ) + num_inchannels = modules[-1].get_num_inchannels() + + return nn.Sequential(*modules), num_inchannels + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.classifier = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def stages(self, x) -> List[torch.Tensor]: + x = self.layer1(x) + + xl = [t(x) for i, t in enumerate(self.transition1)] + yl = self.stage2(xl) + + xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for i, t in enumerate(self.transition2)] + yl = self.stage3(xl) + + xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for i, t in enumerate(self.transition3)] + yl = self.stage4(xl) + return yl + + def forward_features(self, x): + # Stem + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.act2(x) + + # Stages + yl = self.stages(x) + + # Classification Head + y = self.incre_modules[0](yl[0]) + for i, down in enumerate(self.downsamp_modules): + y = self.incre_modules[i + 1](yl[i + 1]) + down(y) + y = self.final_layer(y) + return y + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.classifier(x) + return x + + +class HighResolutionNetFeatures(HighResolutionNet): + """HighResolutionNet feature extraction + + The design of HRNet makes it easy to grab feature maps, this class provides a simple wrapper to do so. + It would be more complicated to use the FeatureNet helpers. + + The `feature_location=incre` allows grabbing increased channel count features using part of the + classification head. If `feature_location=''` the default HRNet features are returned. First stem + conv is used for stride 2 features. + """ + + def __init__(self, cfg, in_chans=3, num_classes=1000, global_pool='avg', drop_rate=0.0, + feature_location='incre', out_indices=(0, 1, 2, 3, 4)): + assert feature_location in ('incre', '') + super(HighResolutionNetFeatures, self).__init__( + cfg, in_chans=in_chans, num_classes=num_classes, global_pool=global_pool, + drop_rate=drop_rate, head=feature_location) + self.feature_info = FeatureInfo(self.feature_info, out_indices) + self._out_idx = {i for i in out_indices} + + def forward_features(self, x): + assert False, 'Not supported' + + def forward(self, x) -> List[torch.tensor]: + out = [] + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + if 0 in self._out_idx: + out.append(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.act2(x) + x = self.stages(x) + if self.incre_modules is not None: + x = [incre(f) for f, incre in zip(x, self.incre_modules)] + for i, f in enumerate(x): + if i + 1 in self._out_idx: + out.append(f) + return out + + +def _create_hrnet(variant, pretrained, **model_kwargs): + model_cls = HighResolutionNet + features_only = False + kwargs_filter = None + if model_kwargs.pop('features_only', False): + model_cls = HighResolutionNetFeatures + kwargs_filter = ('num_classes', 'global_pool') + features_only = True + model = build_model_with_cfg( + model_cls, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=cfg_cls[variant], + pretrained_strict=not features_only, + kwargs_filter=kwargs_filter, + **model_kwargs) + if features_only: + model.default_cfg = default_cfg_for_features(model.default_cfg) + return model + + +@register_model +def hrnet_w18_small(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w18_small', pretrained, **kwargs) + + +@register_model +def hrnet_w18_small_v2(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w18_small_v2', pretrained, **kwargs) + + +@register_model +def hrnet_w18(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w18', pretrained, **kwargs) + + +@register_model +def hrnet_w30(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w30', pretrained, **kwargs) + + +@register_model +def hrnet_w32(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w32', pretrained, **kwargs) + + +@register_model +def hrnet_w40(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w40', pretrained, **kwargs) + + +@register_model +def hrnet_w44(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w44', pretrained, **kwargs) + + +@register_model +def hrnet_w48(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w48', pretrained, **kwargs) + + +@register_model +def hrnet_w64(pretrained=True, **kwargs): + return _create_hrnet('hrnet_w64', pretrained, **kwargs) diff --git a/timm/models/hub.py b/timm/models/hub.py new file mode 100644 index 0000000..65e7ba9 --- /dev/null +++ b/timm/models/hub.py @@ -0,0 +1,171 @@ +import json +import logging +import os +from functools import partial +from pathlib import Path +from typing import Union + +import torch +from torch.hub import HASH_REGEX, download_url_to_file, urlparse +try: + from torch.hub import get_dir +except ImportError: + from torch.hub import _get_torch_home as get_dir + +from timm import __version__ +try: + from huggingface_hub import HfApi, HfFolder, Repository, cached_download, hf_hub_url + cached_download = partial(cached_download, library_name="timm", library_version=__version__) + _has_hf_hub = True +except ImportError: + cached_download = None + _has_hf_hub = False + +_logger = logging.getLogger(__name__) + + +def get_cache_dir(child_dir=''): + """ + Returns the location of the directory where models are cached (and creates it if necessary). + """ + # Issue warning to move data if old env is set + if os.getenv('TORCH_MODEL_ZOO'): + _logger.warning('TORCH_MODEL_ZOO is deprecated, please use env TORCH_HOME instead') + + hub_dir = get_dir() + child_dir = () if not child_dir else (child_dir,) + model_dir = os.path.join(hub_dir, 'checkpoints', *child_dir) + os.makedirs(model_dir, exist_ok=True) + return model_dir + + +def download_cached_file(url, check_hash=True, progress=False): + parts = urlparse(url) + filename = os.path.basename(parts.path) + cached_file = os.path.join(get_cache_dir(), filename) + if not os.path.exists(cached_file): + _logger.info('Downloading: "{}" to {}\n'.format(url, cached_file)) + hash_prefix = None + if check_hash: + r = HASH_REGEX.search(filename) # r is Optional[Match[str]] + hash_prefix = r.group(1) if r else None + download_url_to_file(url, cached_file, hash_prefix, progress=progress) + return cached_file + + +def has_hf_hub(necessary=False): + if not _has_hf_hub and necessary: + # if no HF Hub module installed and it is necessary to continue, raise error + raise RuntimeError( + 'Hugging Face hub model specified but package not installed. Run `pip install huggingface_hub`.') + return _has_hf_hub + + +def hf_split(hf_id): + rev_split = hf_id.split('@') + assert 0 < len(rev_split) <= 2, 'hf_hub id should only contain one @ character to identify revision.' + hf_model_id = rev_split[0] + hf_revision = rev_split[-1] if len(rev_split) > 1 else None + return hf_model_id, hf_revision + + +def load_cfg_from_json(json_file: Union[str, os.PathLike]): + with open(json_file, "r", encoding="utf-8") as reader: + text = reader.read() + return json.loads(text) + + +def _download_from_hf(model_id: str, filename: str): + hf_model_id, hf_revision = hf_split(model_id) + url = hf_hub_url(hf_model_id, filename, revision=hf_revision) + return cached_download(url, cache_dir=get_cache_dir('hf')) + + +def load_model_config_from_hf(model_id: str): + assert has_hf_hub(True) + cached_file = _download_from_hf(model_id, 'config.json') + default_cfg = load_cfg_from_json(cached_file) + default_cfg['hf_hub'] = model_id # insert hf_hub id for pretrained weight load during model creation + model_name = default_cfg.get('architecture') + return default_cfg, model_name + + +def load_state_dict_from_hf(model_id: str): + assert has_hf_hub(True) + cached_file = _download_from_hf(model_id, 'pytorch_model.bin') + state_dict = torch.load(cached_file, map_location='cpu') + return state_dict + + +def save_for_hf(model, save_directory, model_config=None): + assert has_hf_hub(True) + model_config = model_config or {} + save_directory = Path(save_directory) + save_directory.mkdir(exist_ok=True, parents=True) + + weights_path = save_directory / 'pytorch_model.bin' + torch.save(model.state_dict(), weights_path) + + config_path = save_directory / 'config.json' + hf_config = model.default_cfg + hf_config['num_classes'] = model_config.pop('num_classes', model.num_classes) + hf_config['num_features'] = model_config.pop('num_features', model.num_features) + hf_config['labels'] = model_config.pop('labels', [f"LABEL_{i}" for i in range(hf_config['num_classes'])]) + hf_config.update(model_config) + + with config_path.open('w') as f: + json.dump(hf_config, f, indent=2) + + +def push_to_hf_hub( + model, + local_dir, + repo_namespace_or_url=None, + commit_message='Add model', + use_auth_token=True, + git_email=None, + git_user=None, + revision=None, + model_config=None, +): + if repo_namespace_or_url: + repo_owner, repo_name = repo_namespace_or_url.rstrip('/').split('/')[-2:] + else: + if isinstance(use_auth_token, str): + token = use_auth_token + else: + token = HfFolder.get_token() + + if token is None: + raise ValueError( + "You must login to the Hugging Face hub on this computer by typing `transformers-cli login` and " + "entering your credentials to use `use_auth_token=True`. Alternatively, you can pass your own " + "token as the `use_auth_token` argument." + ) + + repo_owner = HfApi().whoami(token)['name'] + repo_name = Path(local_dir).name + + repo_url = f'https://huggingface.co/{repo_owner}/{repo_name}' + + repo = Repository( + local_dir, + clone_from=repo_url, + use_auth_token=use_auth_token, + git_user=git_user, + git_email=git_email, + revision=revision, + ) + + # Prepare a default model card that includes the necessary tags to enable inference. + readme_text = f'---\ntags:\n- image-classification\n- timm\nlibrary_tag: timm\n---\n# Model card for {repo_name}' + with repo.commit(commit_message): + # Save model weights and config. + save_for_hf(model, repo.local_dir, model_config=model_config) + + # Save a model card if it doesn't exist. + readme_path = Path(repo.local_dir) / 'README.md' + if not readme_path.exists(): + readme_path.write_text(readme_text) + + return repo.git_remote_url() diff --git a/timm/models/inception_resnet_v2.py b/timm/models/inception_resnet_v2.py new file mode 100644 index 0000000..7167284 --- /dev/null +++ b/timm/models/inception_resnet_v2.py @@ -0,0 +1,358 @@ +""" Pytorch Inception-Resnet-V2 implementation +Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is +based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License) +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['InceptionResnetV2'] + +default_cfgs = { + # ported from http://download.tensorflow.org/models/inception_resnet_v2_2016_08_30.tar.gz + 'inception_resnet_v2': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/inception_resnet_v2-940b1cd6.pth', + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), + 'crop_pct': 0.8975, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'conv2d_1a.conv', 'classifier': 'classif', + 'label_offset': 1, # 1001 classes in pretrained weights + }, + # ported from http://download.tensorflow.org/models/ens_adv_inception_resnet_v2_2017_08_18.tar.gz + 'ens_adv_inception_resnet_v2': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ens_adv_inception_resnet_v2-2592a550.pth', + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), + 'crop_pct': 0.8975, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'conv2d_1a.conv', 'classifier': 'classif', + 'label_offset': 1, # 1001 classes in pretrained weights + } +} + + +class BasicConv2d(nn.Module): + def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0): + super(BasicConv2d, self).__init__() + self.conv = nn.Conv2d( + in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) + self.bn = nn.BatchNorm2d(out_planes, eps=.001) + self.relu = nn.ReLU(inplace=False) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + +class Mixed_5b(nn.Module): + def __init__(self): + super(Mixed_5b, self).__init__() + + self.branch0 = BasicConv2d(192, 96, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(192, 48, kernel_size=1, stride=1), + BasicConv2d(48, 64, kernel_size=5, stride=1, padding=2) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(192, 64, kernel_size=1, stride=1), + BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1), + BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1) + ) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + BasicConv2d(192, 64, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class Block35(nn.Module): + def __init__(self, scale=1.0): + super(Block35, self).__init__() + + self.scale = scale + + self.branch0 = BasicConv2d(320, 32, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(320, 32, kernel_size=1, stride=1), + BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(320, 32, kernel_size=1, stride=1), + BasicConv2d(32, 48, kernel_size=3, stride=1, padding=1), + BasicConv2d(48, 64, kernel_size=3, stride=1, padding=1) + ) + + self.conv2d = nn.Conv2d(128, 320, kernel_size=1, stride=1) + self.relu = nn.ReLU(inplace=False) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + out = self.conv2d(out) + out = out * self.scale + x + out = self.relu(out) + return out + + +class Mixed_6a(nn.Module): + def __init__(self): + super(Mixed_6a, self).__init__() + + self.branch0 = BasicConv2d(320, 384, kernel_size=3, stride=2) + + self.branch1 = nn.Sequential( + BasicConv2d(320, 256, kernel_size=1, stride=1), + BasicConv2d(256, 256, kernel_size=3, stride=1, padding=1), + BasicConv2d(256, 384, kernel_size=3, stride=2) + ) + + self.branch2 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + return out + + +class Block17(nn.Module): + def __init__(self, scale=1.0): + super(Block17, self).__init__() + + self.scale = scale + + self.branch0 = BasicConv2d(1088, 192, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(1088, 128, kernel_size=1, stride=1), + BasicConv2d(128, 160, kernel_size=(1, 7), stride=1, padding=(0, 3)), + BasicConv2d(160, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)) + ) + + self.conv2d = nn.Conv2d(384, 1088, kernel_size=1, stride=1) + self.relu = nn.ReLU(inplace=False) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + out = torch.cat((x0, x1), 1) + out = self.conv2d(out) + out = out * self.scale + x + out = self.relu(out) + return out + + +class Mixed_7a(nn.Module): + def __init__(self): + super(Mixed_7a, self).__init__() + + self.branch0 = nn.Sequential( + BasicConv2d(1088, 256, kernel_size=1, stride=1), + BasicConv2d(256, 384, kernel_size=3, stride=2) + ) + + self.branch1 = nn.Sequential( + BasicConv2d(1088, 256, kernel_size=1, stride=1), + BasicConv2d(256, 288, kernel_size=3, stride=2) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(1088, 256, kernel_size=1, stride=1), + BasicConv2d(256, 288, kernel_size=3, stride=1, padding=1), + BasicConv2d(288, 320, kernel_size=3, stride=2) + ) + + self.branch3 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class Block8(nn.Module): + + def __init__(self, scale=1.0, no_relu=False): + super(Block8, self).__init__() + + self.scale = scale + + self.branch0 = BasicConv2d(2080, 192, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(2080, 192, kernel_size=1, stride=1), + BasicConv2d(192, 224, kernel_size=(1, 3), stride=1, padding=(0, 1)), + BasicConv2d(224, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) + ) + + self.conv2d = nn.Conv2d(448, 2080, kernel_size=1, stride=1) + self.relu = None if no_relu else nn.ReLU(inplace=False) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + out = torch.cat((x0, x1), 1) + out = self.conv2d(out) + out = out * self.scale + x + if self.relu is not None: + out = self.relu(out) + return out + + +class InceptionResnetV2(nn.Module): + def __init__(self, num_classes=1000, in_chans=3, drop_rate=0., output_stride=32, global_pool='avg'): + super(InceptionResnetV2, self).__init__() + self.drop_rate = drop_rate + self.num_classes = num_classes + self.num_features = 1536 + assert output_stride == 32 + + self.conv2d_1a = BasicConv2d(in_chans, 32, kernel_size=3, stride=2) + self.conv2d_2a = BasicConv2d(32, 32, kernel_size=3, stride=1) + self.conv2d_2b = BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1) + self.feature_info = [dict(num_chs=64, reduction=2, module='conv2d_2b')] + + self.maxpool_3a = nn.MaxPool2d(3, stride=2) + self.conv2d_3b = BasicConv2d(64, 80, kernel_size=1, stride=1) + self.conv2d_4a = BasicConv2d(80, 192, kernel_size=3, stride=1) + self.feature_info += [dict(num_chs=192, reduction=4, module='conv2d_4a')] + + self.maxpool_5a = nn.MaxPool2d(3, stride=2) + self.mixed_5b = Mixed_5b() + self.repeat = nn.Sequential( + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17), + Block35(scale=0.17) + ) + self.feature_info += [dict(num_chs=320, reduction=8, module='repeat')] + + self.mixed_6a = Mixed_6a() + self.repeat_1 = nn.Sequential( + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10), + Block17(scale=0.10) + ) + self.feature_info += [dict(num_chs=1088, reduction=16, module='repeat_1')] + + self.mixed_7a = Mixed_7a() + self.repeat_2 = nn.Sequential( + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20), + Block8(scale=0.20) + ) + self.block8 = Block8(no_relu=True) + self.conv2d_7b = BasicConv2d(2080, self.num_features, kernel_size=1, stride=1) + self.feature_info += [dict(num_chs=self.num_features, reduction=32, module='conv2d_7b')] + + self.global_pool, self.classif = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def get_classifier(self): + return self.classif + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.classif = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.conv2d_1a(x) + x = self.conv2d_2a(x) + x = self.conv2d_2b(x) + x = self.maxpool_3a(x) + x = self.conv2d_3b(x) + x = self.conv2d_4a(x) + x = self.maxpool_5a(x) + x = self.mixed_5b(x) + x = self.repeat(x) + x = self.mixed_6a(x) + x = self.repeat_1(x) + x = self.mixed_7a(x) + x = self.repeat_2(x) + x = self.block8(x) + x = self.conv2d_7b(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.classif(x) + return x + + +def _create_inception_resnet_v2(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + InceptionResnetV2, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def inception_resnet_v2(pretrained=False, **kwargs): + r"""InceptionResnetV2 model architecture from the + `"InceptionV4, Inception-ResNet..." ` paper. + """ + return _create_inception_resnet_v2('inception_resnet_v2', pretrained=pretrained, **kwargs) + + +@register_model +def ens_adv_inception_resnet_v2(pretrained=False, **kwargs): + r""" Ensemble Adversarially trained InceptionResnetV2 model architecture + As per https://arxiv.org/abs/1705.07204 and + https://github.com/tensorflow/models/tree/master/research/adv_imagenet_models. + """ + return _create_inception_resnet_v2('ens_adv_inception_resnet_v2', pretrained=pretrained, **kwargs) diff --git a/timm/models/inception_v3.py b/timm/models/inception_v3.py new file mode 100644 index 0000000..cbb1107 --- /dev/null +++ b/timm/models/inception_v3.py @@ -0,0 +1,470 @@ +""" Inception-V3 + +Originally from torchvision Inception3 model +Licensed BSD-Clause 3 https://github.com/pytorch/vision/blob/master/LICENSE +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg +from .registry import register_model +from .layers import trunc_normal_, create_classifier, Linear + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'Conv2d_1a_3x3.conv', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + # original PyTorch weights, ported from Tensorflow but modified + 'inception_v3': _cfg( + url='https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth', + has_aux=True), # checkpoint has aux logit layer weights + # my port of Tensorflow SLIM weights (http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz) + 'tf_inception_v3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_inception_v3-e0069de4.pth', + num_classes=1000, has_aux=False, label_offset=1), + # my port of Tensorflow adversarially trained Inception V3 from + # http://download.tensorflow.org/models/adv_inception_v3_2017_08_18.tar.gz + 'adv_inception_v3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/adv_inception_v3-9e27bd63.pth', + num_classes=1000, has_aux=False, label_offset=1), + # from gluon pretrained models, best performing in terms of accuracy/loss metrics + # https://gluon-cv.mxnet.io/model_zoo/classification.html + 'gluon_inception_v3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_inception_v3-9f746940.pth', + mean=IMAGENET_DEFAULT_MEAN, # also works well with inception defaults + std=IMAGENET_DEFAULT_STD, # also works well with inception defaults + has_aux=False, + ) +} + + +class InceptionA(nn.Module): + + def __init__(self, in_channels, pool_features, conv_block=None): + super(InceptionA, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch1x1 = conv_block(in_channels, 64, kernel_size=1) + + self.branch5x5_1 = conv_block(in_channels, 48, kernel_size=1) + self.branch5x5_2 = conv_block(48, 64, kernel_size=5, padding=2) + + self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) + self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) + self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, padding=1) + + self.branch_pool = conv_block(in_channels, pool_features, kernel_size=1) + + def _forward(self, x): + branch1x1 = self.branch1x1(x) + + branch5x5 = self.branch5x5_1(x) + branch5x5 = self.branch5x5_2(branch5x5) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionB(nn.Module): + + def __init__(self, in_channels, conv_block=None): + super(InceptionB, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch3x3 = conv_block(in_channels, 384, kernel_size=3, stride=2) + + self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) + self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) + self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, stride=2) + + def _forward(self, x): + branch3x3 = self.branch3x3(x) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) + + outputs = [branch3x3, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionC(nn.Module): + + def __init__(self, in_channels, channels_7x7, conv_block=None): + super(InceptionC, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch1x1 = conv_block(in_channels, 192, kernel_size=1) + + c7 = channels_7x7 + self.branch7x7_1 = conv_block(in_channels, c7, kernel_size=1) + self.branch7x7_2 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7_3 = conv_block(c7, 192, kernel_size=(7, 1), padding=(3, 0)) + + self.branch7x7dbl_1 = conv_block(in_channels, c7, kernel_size=1) + self.branch7x7dbl_2 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7dbl_3 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7dbl_4 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7dbl_5 = conv_block(c7, 192, kernel_size=(1, 7), padding=(0, 3)) + + self.branch_pool = conv_block(in_channels, 192, kernel_size=1) + + def _forward(self, x): + branch1x1 = self.branch1x1(x) + + branch7x7 = self.branch7x7_1(x) + branch7x7 = self.branch7x7_2(branch7x7) + branch7x7 = self.branch7x7_3(branch7x7) + + branch7x7dbl = self.branch7x7dbl_1(x) + branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionD(nn.Module): + + def __init__(self, in_channels, conv_block=None): + super(InceptionD, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch3x3_1 = conv_block(in_channels, 192, kernel_size=1) + self.branch3x3_2 = conv_block(192, 320, kernel_size=3, stride=2) + + self.branch7x7x3_1 = conv_block(in_channels, 192, kernel_size=1) + self.branch7x7x3_2 = conv_block(192, 192, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7x3_3 = conv_block(192, 192, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7x3_4 = conv_block(192, 192, kernel_size=3, stride=2) + + def _forward(self, x): + branch3x3 = self.branch3x3_1(x) + branch3x3 = self.branch3x3_2(branch3x3) + + branch7x7x3 = self.branch7x7x3_1(x) + branch7x7x3 = self.branch7x7x3_2(branch7x7x3) + branch7x7x3 = self.branch7x7x3_3(branch7x7x3) + branch7x7x3 = self.branch7x7x3_4(branch7x7x3) + + branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) + outputs = [branch3x3, branch7x7x3, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionE(nn.Module): + + def __init__(self, in_channels, conv_block=None): + super(InceptionE, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch1x1 = conv_block(in_channels, 320, kernel_size=1) + + self.branch3x3_1 = conv_block(in_channels, 384, kernel_size=1) + self.branch3x3_2a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) + self.branch3x3_2b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) + + self.branch3x3dbl_1 = conv_block(in_channels, 448, kernel_size=1) + self.branch3x3dbl_2 = conv_block(448, 384, kernel_size=3, padding=1) + self.branch3x3dbl_3a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) + self.branch3x3dbl_3b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) + + self.branch_pool = conv_block(in_channels, 192, kernel_size=1) + + def _forward(self, x): + branch1x1 = self.branch1x1(x) + + branch3x3 = self.branch3x3_1(x) + branch3x3 = [ + self.branch3x3_2a(branch3x3), + self.branch3x3_2b(branch3x3), + ] + branch3x3 = torch.cat(branch3x3, 1) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = [ + self.branch3x3dbl_3a(branch3x3dbl), + self.branch3x3dbl_3b(branch3x3dbl), + ] + branch3x3dbl = torch.cat(branch3x3dbl, 1) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionAux(nn.Module): + + def __init__(self, in_channels, num_classes, conv_block=None): + super(InceptionAux, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.conv0 = conv_block(in_channels, 128, kernel_size=1) + self.conv1 = conv_block(128, 768, kernel_size=5) + self.conv1.stddev = 0.01 + self.fc = Linear(768, num_classes) + self.fc.stddev = 0.001 + + def forward(self, x): + # N x 768 x 17 x 17 + x = F.avg_pool2d(x, kernel_size=5, stride=3) + # N x 768 x 5 x 5 + x = self.conv0(x) + # N x 128 x 5 x 5 + x = self.conv1(x) + # N x 768 x 1 x 1 + # Adaptive average pooling + x = F.adaptive_avg_pool2d(x, (1, 1)) + # N x 768 x 1 x 1 + x = torch.flatten(x, 1) + # N x 768 + x = self.fc(x) + # N x 1000 + return x + + +class BasicConv2d(nn.Module): + + def __init__(self, in_channels, out_channels, **kwargs): + super(BasicConv2d, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs) + self.bn = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return F.relu(x, inplace=True) + + +class InceptionV3(nn.Module): + """Inception-V3 with no AuxLogits + FIXME two class defs are redundant, but less screwing around with torchsript fussyness and inconsistent returns + """ + + def __init__(self, num_classes=1000, in_chans=3, drop_rate=0., global_pool='avg', aux_logits=False): + super(InceptionV3, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + self.aux_logits = aux_logits + + self.Conv2d_1a_3x3 = BasicConv2d(in_chans, 32, kernel_size=3, stride=2) + self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3) + self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1) + self.Pool1 = nn.MaxPool2d(kernel_size=3, stride=2) + self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1) + self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3) + self.Pool2 = nn.MaxPool2d(kernel_size=3, stride=2) + self.Mixed_5b = InceptionA(192, pool_features=32) + self.Mixed_5c = InceptionA(256, pool_features=64) + self.Mixed_5d = InceptionA(288, pool_features=64) + self.Mixed_6a = InceptionB(288) + self.Mixed_6b = InceptionC(768, channels_7x7=128) + self.Mixed_6c = InceptionC(768, channels_7x7=160) + self.Mixed_6d = InceptionC(768, channels_7x7=160) + self.Mixed_6e = InceptionC(768, channels_7x7=192) + if aux_logits: + self.AuxLogits = InceptionAux(768, num_classes) + else: + self.AuxLogits = None + self.Mixed_7a = InceptionD(768) + self.Mixed_7b = InceptionE(1280) + self.Mixed_7c = InceptionE(2048) + self.feature_info = [ + dict(num_chs=64, reduction=2, module='Conv2d_2b_3x3'), + dict(num_chs=192, reduction=4, module='Conv2d_4a_3x3'), + dict(num_chs=288, reduction=8, module='Mixed_5d'), + dict(num_chs=768, reduction=16, module='Mixed_6e'), + dict(num_chs=2048, reduction=32, module='Mixed_7c'), + ] + + self.num_features = 2048 + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): + stddev = m.stddev if hasattr(m, 'stddev') else 0.1 + trunc_normal_(m.weight, std=stddev) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def forward_preaux(self, x): + # N x 3 x 299 x 299 + x = self.Conv2d_1a_3x3(x) + # N x 32 x 149 x 149 + x = self.Conv2d_2a_3x3(x) + # N x 32 x 147 x 147 + x = self.Conv2d_2b_3x3(x) + # N x 64 x 147 x 147 + x = self.Pool1(x) + # N x 64 x 73 x 73 + x = self.Conv2d_3b_1x1(x) + # N x 80 x 73 x 73 + x = self.Conv2d_4a_3x3(x) + # N x 192 x 71 x 71 + x = self.Pool2(x) + # N x 192 x 35 x 35 + x = self.Mixed_5b(x) + # N x 256 x 35 x 35 + x = self.Mixed_5c(x) + # N x 288 x 35 x 35 + x = self.Mixed_5d(x) + # N x 288 x 35 x 35 + x = self.Mixed_6a(x) + # N x 768 x 17 x 17 + x = self.Mixed_6b(x) + # N x 768 x 17 x 17 + x = self.Mixed_6c(x) + # N x 768 x 17 x 17 + x = self.Mixed_6d(x) + # N x 768 x 17 x 17 + x = self.Mixed_6e(x) + # N x 768 x 17 x 17 + return x + + def forward_postaux(self, x): + x = self.Mixed_7a(x) + # N x 1280 x 8 x 8 + x = self.Mixed_7b(x) + # N x 2048 x 8 x 8 + x = self.Mixed_7c(x) + # N x 2048 x 8 x 8 + return x + + def forward_features(self, x): + x = self.forward_preaux(x) + x = self.forward_postaux(x) + return x + + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.fc(x) + return x + + +class InceptionV3Aux(InceptionV3): + """InceptionV3 with AuxLogits + """ + + def __init__(self, num_classes=1000, in_chans=3, drop_rate=0., global_pool='avg', aux_logits=True): + super(InceptionV3Aux, self).__init__( + num_classes, in_chans, drop_rate, global_pool, aux_logits) + + def forward_features(self, x): + x = self.forward_preaux(x) + aux = self.AuxLogits(x) if self.training else None + x = self.forward_postaux(x) + return x, aux + + def forward(self, x): + x, aux = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.fc(x) + return x, aux + + +def _create_inception_v3(variant, pretrained=False, **kwargs): + default_cfg = default_cfgs[variant] + aux_logits = kwargs.pop('aux_logits', False) + if aux_logits: + assert not kwargs.pop('features_only', False) + model_cls = InceptionV3Aux + load_strict = default_cfg['has_aux'] + else: + model_cls = InceptionV3 + load_strict = not default_cfg['has_aux'] + return build_model_with_cfg( + model_cls, variant, pretrained, + default_cfg=default_cfg, + pretrained_strict=load_strict, + **kwargs) + + +@register_model +def inception_v3(pretrained=False, **kwargs): + # original PyTorch weights, ported from Tensorflow but modified + model = _create_inception_v3('inception_v3', pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_inception_v3(pretrained=False, **kwargs): + # my port of Tensorflow SLIM weights (http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz) + model = _create_inception_v3('tf_inception_v3', pretrained=pretrained, **kwargs) + return model + + +@register_model +def adv_inception_v3(pretrained=False, **kwargs): + # my port of Tensorflow adversarially trained Inception V3 from + # http://download.tensorflow.org/models/adv_inception_v3_2017_08_18.tar.gz + model = _create_inception_v3('adv_inception_v3', pretrained=pretrained, **kwargs) + return model + + +@register_model +def gluon_inception_v3(pretrained=False, **kwargs): + # from gluon pretrained models, best performing in terms of accuracy/loss metrics + # https://gluon-cv.mxnet.io/model_zoo/classification.html + model = _create_inception_v3('gluon_inception_v3', pretrained=pretrained, **kwargs) + return model diff --git a/timm/models/inception_v4.py b/timm/models/inception_v4.py new file mode 100644 index 0000000..cc899e1 --- /dev/null +++ b/timm/models/inception_v4.py @@ -0,0 +1,316 @@ +""" Pytorch Inception-V4 implementation +Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is +based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License) +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['InceptionV4'] + +default_cfgs = { + 'inception_v4': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/inceptionv4-8e4777a0.pth', + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'features.0.conv', 'classifier': 'last_linear', + 'label_offset': 1, # 1001 classes in pretrained weights + } +} + + +class BasicConv2d(nn.Module): + def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0): + super(BasicConv2d, self).__init__() + self.conv = nn.Conv2d( + in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) + self.bn = nn.BatchNorm2d(out_planes, eps=0.001) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + +class Mixed3a(nn.Module): + def __init__(self): + super(Mixed3a, self).__init__() + self.maxpool = nn.MaxPool2d(3, stride=2) + self.conv = BasicConv2d(64, 96, kernel_size=3, stride=2) + + def forward(self, x): + x0 = self.maxpool(x) + x1 = self.conv(x) + out = torch.cat((x0, x1), 1) + return out + + +class Mixed4a(nn.Module): + def __init__(self): + super(Mixed4a, self).__init__() + + self.branch0 = nn.Sequential( + BasicConv2d(160, 64, kernel_size=1, stride=1), + BasicConv2d(64, 96, kernel_size=3, stride=1) + ) + + self.branch1 = nn.Sequential( + BasicConv2d(160, 64, kernel_size=1, stride=1), + BasicConv2d(64, 64, kernel_size=(1, 7), stride=1, padding=(0, 3)), + BasicConv2d(64, 64, kernel_size=(7, 1), stride=1, padding=(3, 0)), + BasicConv2d(64, 96, kernel_size=(3, 3), stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + out = torch.cat((x0, x1), 1) + return out + + +class Mixed5a(nn.Module): + def __init__(self): + super(Mixed5a, self).__init__() + self.conv = BasicConv2d(192, 192, kernel_size=3, stride=2) + self.maxpool = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.conv(x) + x1 = self.maxpool(x) + out = torch.cat((x0, x1), 1) + return out + + +class InceptionA(nn.Module): + def __init__(self): + super(InceptionA, self).__init__() + self.branch0 = BasicConv2d(384, 96, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(384, 64, kernel_size=1, stride=1), + BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(384, 64, kernel_size=1, stride=1), + BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1), + BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1) + ) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + BasicConv2d(384, 96, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class ReductionA(nn.Module): + def __init__(self): + super(ReductionA, self).__init__() + self.branch0 = BasicConv2d(384, 384, kernel_size=3, stride=2) + + self.branch1 = nn.Sequential( + BasicConv2d(384, 192, kernel_size=1, stride=1), + BasicConv2d(192, 224, kernel_size=3, stride=1, padding=1), + BasicConv2d(224, 256, kernel_size=3, stride=2) + ) + + self.branch2 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + return out + + +class InceptionB(nn.Module): + def __init__(self): + super(InceptionB, self).__init__() + self.branch0 = BasicConv2d(1024, 384, kernel_size=1, stride=1) + + self.branch1 = nn.Sequential( + BasicConv2d(1024, 192, kernel_size=1, stride=1), + BasicConv2d(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)), + BasicConv2d(224, 256, kernel_size=(7, 1), stride=1, padding=(3, 0)) + ) + + self.branch2 = nn.Sequential( + BasicConv2d(1024, 192, kernel_size=1, stride=1), + BasicConv2d(192, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)), + BasicConv2d(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)), + BasicConv2d(224, 224, kernel_size=(7, 1), stride=1, padding=(3, 0)), + BasicConv2d(224, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)) + ) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + BasicConv2d(1024, 128, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + x3 = self.branch3(x) + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class ReductionB(nn.Module): + def __init__(self): + super(ReductionB, self).__init__() + + self.branch0 = nn.Sequential( + BasicConv2d(1024, 192, kernel_size=1, stride=1), + BasicConv2d(192, 192, kernel_size=3, stride=2) + ) + + self.branch1 = nn.Sequential( + BasicConv2d(1024, 256, kernel_size=1, stride=1), + BasicConv2d(256, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)), + BasicConv2d(256, 320, kernel_size=(7, 1), stride=1, padding=(3, 0)), + BasicConv2d(320, 320, kernel_size=3, stride=2) + ) + + self.branch2 = nn.MaxPool2d(3, stride=2) + + def forward(self, x): + x0 = self.branch0(x) + x1 = self.branch1(x) + x2 = self.branch2(x) + out = torch.cat((x0, x1, x2), 1) + return out + + +class InceptionC(nn.Module): + def __init__(self): + super(InceptionC, self).__init__() + + self.branch0 = BasicConv2d(1536, 256, kernel_size=1, stride=1) + + self.branch1_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1) + self.branch1_1a = BasicConv2d(384, 256, kernel_size=(1, 3), stride=1, padding=(0, 1)) + self.branch1_1b = BasicConv2d(384, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) + + self.branch2_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1) + self.branch2_1 = BasicConv2d(384, 448, kernel_size=(3, 1), stride=1, padding=(1, 0)) + self.branch2_2 = BasicConv2d(448, 512, kernel_size=(1, 3), stride=1, padding=(0, 1)) + self.branch2_3a = BasicConv2d(512, 256, kernel_size=(1, 3), stride=1, padding=(0, 1)) + self.branch2_3b = BasicConv2d(512, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) + + self.branch3 = nn.Sequential( + nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), + BasicConv2d(1536, 256, kernel_size=1, stride=1) + ) + + def forward(self, x): + x0 = self.branch0(x) + + x1_0 = self.branch1_0(x) + x1_1a = self.branch1_1a(x1_0) + x1_1b = self.branch1_1b(x1_0) + x1 = torch.cat((x1_1a, x1_1b), 1) + + x2_0 = self.branch2_0(x) + x2_1 = self.branch2_1(x2_0) + x2_2 = self.branch2_2(x2_1) + x2_3a = self.branch2_3a(x2_2) + x2_3b = self.branch2_3b(x2_2) + x2 = torch.cat((x2_3a, x2_3b), 1) + + x3 = self.branch3(x) + + out = torch.cat((x0, x1, x2, x3), 1) + return out + + +class InceptionV4(nn.Module): + def __init__(self, num_classes=1000, in_chans=3, output_stride=32, drop_rate=0., global_pool='avg'): + super(InceptionV4, self).__init__() + assert output_stride == 32 + self.drop_rate = drop_rate + self.num_classes = num_classes + self.num_features = 1536 + + self.features = nn.Sequential( + BasicConv2d(in_chans, 32, kernel_size=3, stride=2), + BasicConv2d(32, 32, kernel_size=3, stride=1), + BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1), + Mixed3a(), + Mixed4a(), + Mixed5a(), + InceptionA(), + InceptionA(), + InceptionA(), + InceptionA(), + ReductionA(), # Mixed6a + InceptionB(), + InceptionB(), + InceptionB(), + InceptionB(), + InceptionB(), + InceptionB(), + InceptionB(), + ReductionB(), # Mixed7a + InceptionC(), + InceptionC(), + InceptionC(), + ) + self.feature_info = [ + dict(num_chs=64, reduction=2, module='features.2'), + dict(num_chs=160, reduction=4, module='features.3'), + dict(num_chs=384, reduction=8, module='features.9'), + dict(num_chs=1024, reduction=16, module='features.17'), + dict(num_chs=1536, reduction=32, module='features.21'), + ] + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def get_classifier(self): + return self.last_linear + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + return self.features(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.last_linear(x) + return x + + +def _create_inception_v4(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + InceptionV4, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(flatten_sequential=True), + **kwargs) + + +@register_model +def inception_v4(pretrained=False, **kwargs): + return _create_inception_v4('inception_v4', pretrained, **kwargs) diff --git a/timm/models/layers/__init__.py b/timm/models/layers/__init__.py new file mode 100644 index 0000000..4831af9 --- /dev/null +++ b/timm/models/layers/__init__.py @@ -0,0 +1,40 @@ +from .activations import * +from .adaptive_avgmax_pool import \ + adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d +from .blur_pool import BlurPool2d +from .classifier import ClassifierHead, create_classifier +from .cond_conv2d import CondConv2d, get_condconv_initializer +from .config import is_exportable, is_scriptable, is_no_jit, set_exportable, set_scriptable, set_no_jit,\ + set_layer_config +from .conv2d_same import Conv2dSame, conv2d_same +from .conv_bn_act import ConvBnAct +from .create_act import create_act_layer, get_act_layer, get_act_fn +from .create_attn import get_attn, create_attn +from .create_conv2d import create_conv2d +from .create_norm_act import get_norm_act_layer, create_norm_act, convert_norm_act +from .drop import DropBlock2d, DropPath, drop_block_2d, drop_path +from .eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn +from .evo_norm import EvoNormBatch2d, EvoNormSample2d +from .gather_excite import GatherExcite +from .global_context import GlobalContext +from .helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible +from .inplace_abn import InplaceAbn +from .linear import Linear +from .mixed_conv2d import MixedConv2d +from .mlp import Mlp, GluMlp, GatedMlp +from .non_local_attn import NonLocalAttn, BatNonLocalAttn +from .norm import GroupNorm, LayerNorm2d +from .norm_act import BatchNormAct2d, GroupNormAct +from .padding import get_padding, get_same_padding, pad_same +from .patch_embed import PatchEmbed +from .pool2d_same import AvgPool2dSame, create_pool2d +from .squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite +from .selective_kernel import SelectiveKernel +from .separable_conv import SeparableConv2d, SeparableConvBnAct +from .space_to_depth import SpaceToDepthModule +from .split_attn import SplitAttn +from .split_batchnorm import SplitBatchNorm2d, convert_splitbn_model +from .std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame +from .test_time_pool import TestTimePoolHead, apply_test_time_pool +from .trace_utils import _assert, _float_to_int +from .weight_init import trunc_normal_, variance_scaling_, lecun_normal_ diff --git a/timm/models/layers/__pycache__/__init__.cpython-36.pyc b/timm/models/layers/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bda2543c8771571c3916edc34bb63519dfe4d178 GIT binary patch literal 3100 zcmZuz*>>AT5*4*_CnZs`Y|C1_(;FSy@fOFnMA7mh$#N(=No6uf6KIjJx&XKVGP1v5 zJ|aJmU&3pi<`?E^sv0Dv%o+0VvhJ;7FV$TI-YXQcwYRJ7Khk4ke~q1bjg$R?KHksL8av?cah1Yx7{>{iz)6_IDVR#JKB1;@24;*ssnVE%3}zu~_>{_F9`aa#g5lGu zh$Sdt8Ok^dvp5HHI1lq!feJ3bf{B??i+Bdk;36yohE7S0+wqt4-ZIFHM)j2Ga7 z(Xwg#&ZO;F8e_>M~w|D|i*I;x)L2*WtR+i>it@ z;0E4=n|KRu;cd8W^pd)RHK^fTxNCS>eS>wV8+%sW!~1aG*mLSzd;kydAw0xK@CYBn zV|)TnjBj3jhfm?Du`B9(dA&fDiZ)J{o>O z{fIxoPxv$Zi~xYYz%NE$QUAbS;a6j?s(+#d7XAjm8NQ|jwxMn8b!DRij;F#Covfzf)Q#$7yeJ$IL~dVNqTfMrYz4lr>Q1~EN+s>c8o390Wn&P((eLbvC?Ad&y8H6+^bk0_UE zCi^1l#s!BgtIfJaj*OuJdFUsZrY&}Tr$-shHsr{zG!GA5+m&9_=nBu1YJ(TP+;Fws zQ$qhgFKw~d?E8DP?oAQdU7D#h;FQWh$+|dnZhA!O+8ZP_V3zV?sMw>G`z2kaPXtHvUg zW=hwCHlH6wCf6m$ry2;yalKAlX2O?hq@>9;yk=QCHoM!zw&Z~eRC!5BnJYQGE`g~; zI>L0TIXJ;W>vK=auQJJQJWIz!B~7(Hida06PKQ#4N=Zdi@LQ?rz-eNV0-?Fu_}bH! z=Kdxmb&w)8po|~*TQZ2c$x3LvOe;`s#Mg#KDjPCN5Dm33yuKlawLJ@chbE;9*@}kU zpfEDc#!D?a7wL?|c>aF`j^fD1S?^Hw(s4rGTx)qVb^gN23v8X*!4aEiP zQ2BzKJ~bV#?Lwq`o^6UyC@Y@r3+)QemR3k>paZnx0(HxtH;mY;{th{+e77$m*Y`rG zjQZ%QHmfJ75$j2YDTZkR$h4_tnic68;&Jw*J8JraF{U#d#f`|}9!2M%vn=O0c$yUI zS~|~4fuYDyVkk4rGR!el80HDgSbd9fP7BrxtYn8BiG)RtJHxQVaE{?D!+C}a49g5F z1o0&I61~cNjbWYPA^}WLGtrlrUS_z$aFrct*{2GpGSSyqyUtpnW4Z!$ZEg#?%385w zIs@;STZ6tqVm#B8Dv&zVH~F`>7;ZD%VW<&6&P_UiCEA|8OG-RV2S!~$f5YxPaw8Zf zvd;bqRDr%nmcGyMEr%67-?OM;(&2NL)(_b8kRYBkOMk@tF-NAEn@m1o?K_T{8I-wx z%F_1?&sfh<)3v*nF$%~s#?06XkThf$I_ zz02Aj!#jq3_7?wMUiv+Ieqc{7(mZi5a#g5XEFT#8cjthxF)i3; z_|LnVBO+0+ef{*MW>c34Yt2UEWBqQuUJH9#S}3Eg?}RliJ5+kx8+$dXajcn!64uO< zIjmV`8q2y9e2LRf$wyB-`XfpI)p^(H)Y5o9Ri^jl@p3Aks*uIFL~pk8sbzzShvO>5 M%j9KbKijPT7sKMal>h($ literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/activations.cpython-36.pyc b/timm/models/layers/__pycache__/activations.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91b3edf37c4ef08f0b18b5234246d7524abac132 GIT binary patch literal 6831 zcmcgxUvJyU5$8Yswd6RClQ@4a*-LXxbWJ41NqkQ3dKaH#Cv6ko!O69SUKmPCOY~8s zx}@URa8aNRdO(2#?zzYp=qKn`=wl!H#HRv%DDoBh)ZZ*AT8bU}5(p){EADd0*_r*# z?9BLPxm>LM>SFWn#}wt?%Fva;{XMi`O;r@GG!(9K%~cyJesx#vY7I@LIm6YvMk9m! zkL@hazEXLP=U-+Txd#d_@Zw8_7j11LzpFP2{AUTp1TTT22#PYF1jS@RF~P4T6vy}! zC`zC>&Zj{!olund^@QRCp8>@rC{FTIpg5IK9OKo5;xwNH#S|#c@Ux&en@}9*wS?jv zKM#s&P<+5IfZ{?zae~(qii@0qVg?i+@=Kt&lu(?+%uo1b%v{FIDM)vU->}t<({`46 z{?RL)|CC?ByR)FY$}6C(B=4T#+7qQRxBXAH*b1HJR_OR%P%JL8mhZYYrhJe2>&)7F z$ksh(d7O29-goVQZ8~9xS)lKBLF;&-E!M4;%>ti=9VTeU+%49`qfM*V zvw4**x-RQj&ux5=J#)OCix;0UOSGB&+{VY&ndgVBAAq)4d}y`4wK)s@wvBHJ*4$z> zw&eG=gwyVXY_2|6XKQ{Cu;0+Q3y!Y|LaP-u(mW4+alP+Ji7a;o!(76!r4bn}`lYsn z7BF->s+itUHWV~O)Svy?y^o8_JX^$nk8O1 z&#S&@*Q{nxyYb15dbNK0&aE3ohO44SMz`;pQ7&-WUEkr6j$uWM)EB}4`@brpfS>=a z)wmT}bG7Y_ty;_Hc2HYhTKYBQovQ_XVVhm(!{Wa=yba^Y>@2+HP_m* zMNk{s!D??S$^^9TNqjz#;_G4p_bs&aBBP@#xQ5PFU~Ec5<2hUB+AH-tWgVu+jhE$y z!81JjQfXwqQw*h%1)TCxMydr`mV%6ESjX3>1+(ZvB~*8mU2Q|#RaEGEM=^D-;1lX< z#lRx_JzIzp2&24dI-V1nru-t5WCXUm9uL&0V45x03IaSPm5CyCB;850fRw4Ilj`=Q zTvm1PseS8`J5TIFxfbj?SqIjlN%bVG1>BP&^3+LYDaRs8$@$2^lK z6?LVOBVjzNYnx_NG|kwMa9=Xb-}Nmwd15qu-<2|m=uPDE_kt~(HuI77!%v=k{a zt$hYfikw$x45_?>>ncc>DkchwC&#yC*@m&X9dUAD-cK*wy&ZuebFINJdCs_$KY z?87R3&uOt$yDuzv$aS!l#Kd3`Z?S6EV{gAs583M!iaGo zyolYuG!9ji$Ut`CkYcBwI0GOrcG#w%j~xO$`MgS8CGJ!?J{WiJ;bn7A;$<9;g(LlJ zjEDq*5`tqtclQ#lrj0FCj&j4pxwz4ix(~ zS1U2I0Kx$>j|~nU+f zKz4kO=Xj7tD)?$3mh?n4lOm1SrgF?-092hP7t3(~Zy-(PI)&Hhfw55EFa{{(`Z>5= z85S7(@TMq3z>5rvb2G{!!tVA=aS2Q$7R86SJH)3&r5JMYA@y4j%d=Ne4Rux%S1}$N z>?Wkv$9Ni6NXT~p-ZPSpa4Ap2nW5#p7xYLuKMH!B_$JUhG0b&5;3GAd#DYYV;G>xl z@KFS`L?oZ11Nc}BKEO;n4X1}BQRBw<+KkMfIh^1OB7ira0f-sI0NL%oF%p7N04T%# z-K^@e`mfqzfixqK6iWWCMNjJ)`GP# z_sPN>uh-U=7wvZr!Y%v)_J+BFhBgbYjdD1h9J(jSbhNZoWxr8KIYA(`e(puijR!A*B%2r+w)5KP$ z2I5!8V<@D6U?7w<6q*^?Eix3kf-ljHWSAw}urny~A+LO8$jS4%yYs8sq(LcPq`GQBbQ$l)ffh{)ZN! zDF66kg=#F5UcYPhEP-+aDtAbwsmdbN8D)hX_J!Ay`-C^>mzs|DM)d}j8;z^qQx%qC zd5!Q+;j^GD0l|sPIVrQ2AgLH}&Y4og+m{qFMo;uV5VK^{CZ&KWNMK+Hb$b0!+y>Ez zQ=%m<(VOJLW$XpVgG3`K=jcf+)uUi4%5K_JF~pLOfm+N{Cl|4|5~{%b5pA%Na=elO zc64!rUZUf7rBZq`>o||;NJnKf%B;J-74D@WQ6}_7s}p6P+FsxbnbaJX!;%Jf(9#_C ztS)G7IQ-kM4AP!d2IawVAUep+wOA2G8+tgzDT$xs%|D?f^2>1xAo3Y!Q1ofU-tm;*@~)$mRCz^NuSD27p8L4#i07 literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/activations_jit.cpython-36.pyc b/timm/models/layers/__pycache__/activations_jit.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a0a1e4151ec710b38ea3276240024123e1a645a GIT binary patch literal 4303 zcmb_f-E!N;6$Zdx(fYF_TTYzBn%8iPu(eRkR^6Wf(8M0 z7mzHC?&O(V^$qd@d4j$Rdecs?{0h11cb4FfGEQudgdvuTgM;()?Y9R9cb1lF?w@aT z{<~mV|Fh+o zTU12#Rq51jFXZx%Azy&JCN3Fy?K?{>iltYUSdz}*(rKZ+2+6Wofn+6@EalIBlG|Jr zm(8=w&|DMi&|J?oS8~l8x#otrVl-Ewc~xveb2HbxoNKaN^P2d;Xs$u?LvbCN*K^Hv z;XJdpKIQ+mEsO2=NpQfEAdYmcw!{253?)Qy#NsX+1j#M!t6-E!!T3y*btA?j!G^I& zL#f$ekn|bH!^0sS45CD;F83wVF-!V9!61*A&!d{;ItY(g2e%ISXe33Q?Svug^8*Qk z?E4@Zg?R9L##N8W0|{eY7R3omHFOamlo1qrV^Ca4hNoWyQ4cZPGM3l@SHU> zq+>PY5fTqHlmAG0$dZ`FiUA`{NJNAL>_<~nYX?%1XV~uMFvU^A141#+ARcLC{uqg2 z5t6tkVMDRb5p&sYJUUWAub;4HquF3jDaOCiHKM4!2EVQSEjx3f2xG0AMrL7o_z0Jk z9BSVoH5~L$=PpjYjN!x@&}pAqr#ARvAAkMqFwlK=i#?Qte+S_N(F4{`l97Jkx?H^s z4(hS$xxAy@+xKoa>W#bizq(yxIJTUuFigX&qJ!Qr4g{|4m#PRiKUu1Zzn`DGf+xJ` z9uJOOKNeEEd%L^Okyq2zsgm9hdB;M#O7?))cRTxT5)6kfC@4eihWrRXx^o=$z?1su zC|l4JHC?SyW+vUDs$mWks@|JmDBz}nVFMF_A+ZO}31Ao$reFk5VD$mpVb5gPWlv=< z4MB!uV6ukbq8RCC^ZaFr-rx>Io18e$K!- zwNP9wqSO9XAuH(=yGE@-1xtao2olc&HHsRb8k<<71<&(CuC?dQ(o}gD z38p?Vb0(T8u?eNbI~%Vj6%LCYm-()!rx= z$%UEDQvUIDt_xD~oV)Mk6*#~D=&frVD zo;X^WIGSHHbrl+FlZI)mFLBNmg(PQaM*!@S3Yk7ZSFwp= zA{^RO|8xenKp%mvP~Lw{J4NAjY`rdHFvT*6P%-(-9y}Zy;;cGx?3_?Nu4)Y5m&hle;dN!`;=jYF7l*_yE=iW3U`*`zg5bsSR zLgZ~UdNAkj>M#6V44WwVZSpeR;v_l4CMwpur1BAllV9P}1{K%xz9JpNDD^RB$nO&p z+`ur$D8)!fb@aFRv^?h19f6Cc~oA@xK^F z$f$0bE^hB7Uf018jWG{zvJOX6PF*#69kk8pNJ)|L6pfl<4}%_u9?2AEJTV5?wB_mH zlOgTY@yXzdvF4Bz@3-8?x5$j!3FD625WLwCUv>PuExCPvyR+SD-03#&Ht#?n?)WX< zYTj!#MZ(Op&ts z0`m)_{3XJd(mg-p=~eWOjDJ2&$38Y3%p-Vj!V?y6BmCdzoUPBLTfZ2-Q`&ze7gN5I zg!-}h-$qvPJQ4e7$4W`8{JtV$WK}b-N0BPvQZ16KfEQ*~9)Dnty8-%EP}FQ>N6T>^D9$!2o+_Pqe`>cFmBBMFzyV$p{`xiQ$?q0FWQTRvb|QObFI2o K#eZpTto|P$ifz{b literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/activations_me.cpython-36.pyc b/timm/models/layers/__pycache__/activations_me.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba3aa666dd8e9fa16a3e9de44b8ab3148356b4c7 GIT binary patch literal 9158 zcmb_iO>7&-72Y5IiIyx`rvLxOj^l)hE$YW|Xvc{q$ab8xVp2P83bp{VMGtDed^UPyqOReU@_bkuJ_n~(bKE(5n6`seJeDlDTULEGw`7j@O ztX4llNu>Tl8B>Rtr@8kTid_RO91%3oQj>O}Q^BepqKZfxRA>elzTKo?m#JuATtR zOZ*~eE{3C@3`cpHUy`Go3g2Jhugdq+=uzaap~q|SsAu?F{B?dA+@8hPD|`Z9C&Ka0 zh5bu>QuZfaNV-#eTE3r0j~RXyJ+8*%UEptrT+Z@q;PNHVyur($DTkw8kC&=o@0G&5Sl@Er)k)(`9Rxx55n2w_2dJ+MX$vjE2cvhj}Z8hr(QE4Wpem4cD^Q zSskC&j84bo6KvkL*^04hVnFt>)$Z8n_%SoYGBa0A47SADj>oz#X!H3GS4ex>q#W&jKEI48^9>R6Mn*ZNlKTRBWwU zDeAu7>e_zVb3|js&$`xf%dt2{R3H6b5jphw?{`bw@QmrwMsvN?z=GY$CZL|3{OG9y0pp@T6|Fx1WSUt z7j==vAO8U9X3gn(ov!B(J*tHZmMaa4AtLKzuD-?rc8Z%qp=fzk9K_!Sy+l7^aSo4r z6@{q)^OPzWtLkQoYh0HWm_|Lt)3TmHJ;SrI4!i@yL;k@BB#rkhZyxq@x4rQtlmR<3 z0r5-(TF4w|V#|UWGf7J}x5Ew_D`w+i$AJOm8Gi4!-Hs*D0}9n}+N6ZigjwBNWAB;5 zY^}3<&YB@OyTsc3$zkj(ek2@?e~>#|aonsVVz(R%D-&>lt3xElo|8 z%hR({lUJw8GqW>Ov(uB4a)5XbX)ohFBw>;!V%M;D_=|nq(zbMnL9h7wBD694AXY=o z=@d0zU6#AIeV20XdaLf(R^vQLUuulDaV#mxiHIh_4be)C8V|Vv52_05pud4-X#OD= zs8U;UM6C|{Bq>SN1V4Jido+-vEU1a_Wnw7On2n>`SPF?^j!1#%ksK95j<|Y4`9uRp zQa)0vMWEHtPB6x6p4oOC;Yx*sev@FuFcqZfJ7g8D;Wd2hv9?`W5mlv_^K-RY+i01! znxC)L0;EQLxK{g7*RaDc*;051pV-5P2a!q?ljtOKQs?{2{;kvb2 z53s~VbeY8yE^K2gsDUt` zhi!hgMJCuXKSFbeZ=!^HLUH#4+!>P0K5$#SL=2N(k>D2LCPPC6WpYUp&n}|%hj=2S z9t|dc4xsKA-mXAh>znRiINdHdEnWe=TuHG11O>f4iB=>RLu>v4jZO+L!I@e>!tf`d z21Fo@j8vFn_G2>kBX|0l)>JtT*8q}C<%y2(GSGQKT1CND%W9)A9xdY?zvFXgOv44X zI=;@W)zI6=iwJt5R64ZestC#y@^o#4b)>kY3i8IOL&P;9Q^7J)5P^dbq;3? zuAM?`M&}QC-bhtK)^c+(k5S7PV2^auA9C8JTeGc)W_T8nv6`>8_U0MDy@DY^Q$DMT z8+fBt#6RME4v$OP6~Q)w79?ThF(ZXK1upPI)ih6&r2WD@$R>=mOJvsG@;}zbAu+N{Zmvy5lcc-_JflneG=L?T)qX)B*^Rymp=h%1eZq>JUj<#_KS2^ z)ck4RtVfcwz8LT%3H+EQA)}DsNiB)UmUe4_NGN_r)Ho0~B?e_GVhS`8ImI+;5bF#z zQ0yF|5*q20ASgtAO29iFXj6Z3=4YUe0P}6sWBn!Ai2;*Ttq(BOhv@O@^}{DE z;uIkC7=qv}eODO$>Q)a%^&ljsLx-PULzXOoqvAT+w<;48xKm$xGM4l=gJasHz2Nv)w8$8aPsX#yuHeOh zG0F0o!1&h*eohVGCxYTwD8xOXrnHb>qLR!j(J7GYC}oVn)1L+EMsY_5Srr+7NSbUv zO<#MizkMr((;M{}qVpT&tz_RMjULkE5gX_-r*!~1t$EJ<+yXM^IHGydOmU1)b8o$4 zqLN)T?5$j~{F@|N~xJMp1h$AvV+Q&~X*^c4i(_#8VNam-J`k=Jt z3LLvU4Q?|CnU?ttC}qw?(Jrbt)r;!JAX!IQGvtA`yS+iDg97o;3)_3qE*&^&XM!w` zN=F1ZL28Y6F`?RnoFGx}=>$h;(X}YY^E;Z728Iiy16jhv_u~Q&#Co}J2e=5np3wpR z5u}j=48pb`)AStfuV0wEy1zg5&3U+Q)e$TV%J6)HlLM{(4>U#W{HSHH^P@BpCHf2G z0O2=ae8|Vq!9lvdbQFGnb?XW3d6*{cxj%S1@_J8kc!#9s$TKGL26P$mXAnpJ?tRo_ zRVC*T`#bVPef}<$@ONo>T+ryaK;CbMuggR{E76f5_R{}}oSyPU=>3#0fS{zcg60&r zQ1qW62Kv5jtAgwM3%bRw4~K{k@Sd;>THPEG(Sa{IMC^8t5$1jlk#^9rGIkvsU$dvU zyYNaanUZ8HjNyh>Q^~3UQG^Gto8U3NF`fTg!k{gW80_ zjNsOT#fhV3+{Sh5^5%jEl-OorTrZdIU7=x0b=#?zCb=;^$*DlQSH1L^5*(guHQJ&_LrTcg0Z!g}NXz~5zkVv8S^37k;F2uh1#18pJHkeRM+P($q zNMI~I)KhN|f;BZYU5qajyj-2l^=aMK_^;epQPW|#aAT6jZ zULsjGJ#vYI+yqhY=_ZDI#@;gjfgX{UxQ}`RPyE}Dm%xQ?U=jOv5)Z)7GdhWXfi!Xw z60m8H7R~4I5&e?f)kplhZ%P^ThEu+n^AWB6pEN`4BB+J@&bEIq2U;c(l2pXc1(&Do zwjiUB8FXI(FJLsko!F#kn(dkgB!TF sN%dVyailaP6x3mTEIpPf6!NL@9BSFZDAh9RcnYn0%p&ewPqBm1ew*ZIavjc<5ne{E*As=#dW8ZfmKvkuG^eifLjDP{wh zYkUistrWA#jpthZhWmG2)7You&qL8ZWs&E5ZRwu)9&@=nl)W>L4a3my1zr2_0)l4_vDz#Fx^05<#_0^u#>H49T5&5*$mHpcGo@Y-evEw!!u8~-T4{m?7%&z z*=O!~wt8V*I{ADwpxxbc2RGmhGZ{S--aAp-^*zo`#_Zw#d&?WNY5SuvXuHx2T-ghQ z|F)Owhe4NhJXel{$9Qk>fbnnyB}I!!+kQWULR{7v1#Q~W^%-&HvLM)h7V;643TC79 zFj8EqTGor$P=sea?(u!w{;k`70lsC}^*|&@fCiWyjzzC~D%ruqgNN+1FpAjk>G!}5 z>{oCw?Kn1})i%tj9{L)1kMKrY08{wi3qw@N?%c$y!T#29T!9j>5XyT+^si{)U$wd( zSBO1_%qWV$De(Ot8yw^ZjY)spXouX38Xq1V{SG`GG$Lrn8Gz@IM-Ab1A?VY?(D{_!*RTuPpG3w1bc~3b8dGyu+al?W<1R??{QK8+GuE5&wVVNPrr0{* z3rOfFMb@oYKabbKksOYs6ZIxuTsS9j=mWC3#zYzW$2*G~&$qdbxm2*KUeyH&x>}}t zn7K?QY4$CWs$+1-tK?GJsTYKeWnzs$6(BB13iH^6$?KL_C;SEhq%KX=2;`Pd_`Z?z zoqI{Df!m;7WSv~BO(vj+zPUM0@nedl2j|;KMKE9%EjR|&0QSBl^CY-Oz?r~3k!FtD z1I_~ObF8*gKpE(^Rk)pxX+{uRs((Myx&^LY~M`r!$}FpIM+GLzKB$ALTOwOKEK z4cQA%1fI{xlqy#Ua;>S>He;RMU6EsOf@Ssf{;( zC?x2K_5eH$Wk+iny!f@&f!4YCvV!tM1xIUHUn7261=3!;-E<>)n5O!}v+k$r=w|Me zq?OFN5uGVD3C+)pzA@8u*mw{FB1 zWbKHXn5T^6IBnmJBF9PmkU~fN6yQC)krD`kPj0W|aX;IBiC0>5M60g?O!dCT;X!@9 ziWK+_g-YRA5Xmc&2^^GOc-{~hzHTVAxI<`gIq=*d;nEPdfJ-cR!EU8aD86t7|0M__ zm6`O(t<}7yly$vXFOp*5i5?VVI}S1>Dh0+>$NAI9^^+N;1@Q(pj@MHqwLRa@6h(~G zbwM$y;#MjAXT$~x2XCi%tgdci0Qio$kHkxpnaue3XV)H zrR&@wo?vF~5Vuw_wb~!Z&dMJsA4=t~{Um9^Uk#Ojmsn6aa@o$x4DS;Isf};0JU%yZ zl8cMtO)L>_5qO(`l9ntg_6X!>WCAN$NiUD_&h1E;*I>upjOQa)BFtZ-DesXqKfshv zGx0QvZFUJ?Kgyaa1uG}IJZhCSKPDbhYu>qxgV$nE(oQpzSOq3wRvVOjEKl4z()FfF z-Y9uU-j=!}AZ@D~f`MAT1{Z3CG2B8MhB`4d#h$eK1#Zmrpomd%hBnnHP0`4p-hYr| z%E7n5QC|=wJwx;dS@}wr_SBrwPUW1k6mU>dx9}3UH8g-~9=(Ra3&8$GR&(4V;EKTg zO|Iv-J>W{f{R68va%_#4qkqZ@uS|`k*H9JCK8}+%RNRMO`nwrG!yz1VLPCCbcmxf5g2&;$EfdPHa&VAa-eis-{;mYvx5dNuuOVaXy*HHQK#A zr3yMMt+`3fP|2rESYqZgyb&cTibz|(p0^sdYB`o$kIAi7&NUC$#rp`2k7#o`3T0TU zQp<+Kt1Fi9Q9S0UFzG?pdXa;xk+_e(ej?g`0A zPk1pz0~G(O*sHYBCvJ#a4QjIW$<37tz9xy;do-OdlQ>HQQZq4 zPwWRQ?d2YSuh((mP%X%t*i~6HHX|uAPg`YMv_8Tc3cy?v^HPsWDA7Js(or$jtlJmL zYFu(09=5Te3abO6e1pKd1b$9nk3jBYOXZg0@e{lebq8g`(l@rtX1P!{1=xqsu{|Fr*9T?#`SpQ(H7+LqJWydj nk8#D(T#R|c9G8EU^pH>J-WO3CvJkup literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/blur_pool.cpython-36.pyc b/timm/models/layers/__pycache__/blur_pool.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e903b88c4605ec741b457e63501b747b87dfaa59 GIT binary patch literal 2072 zcmaJ?&5qkP5Edm__R8MP&n|+bKpPe)&=^S8iG!l(BK_I8`6<#EK~l7UFA6PEwzM*- zlJsu8a4vOFfj&ZkK1UBa73iVZSLmrT(yo(JDR4X_hn&A}#&!``E-qQ=?(fwX_>N54SmdBL}#LX zjz^E#K`JM7pyZ*-=QdT6<#Z_QkH+8p_!kp(z;>#fT- zc=*a6HD5cvGipJwI^{Z6ea`*_#RDyv6^1f8Q+%EaYNyQ7Sj~z#ZjT|da0r%jbtDa& z6(GXsBs~<8CUAAG1)Z^CH>5D03Gsj7p^4I<25xX+S(=;r)<9`3Oraz<5I8VE&~M)< zi*shob$T>S<0%+%YLmvap;=}=uB?J}SlO_zCuXnW(4rW!Bt>rMmvlIvjUgi{0epaS z>!Y=7lIAut>9L^KQfa$;beu{xO92sh0#~au^Z}!S7VOd(7$I%G07HsGX=~_hdX0;O z&2!tWEc~qY60I>-r+oeVI-=;ynTkrH_T;I6=*st;u}a6&-5-QBO7B&1vYJT)7R?ZI z)qbt)!~>+9xIl>Uy;U#aM(mpg60ELgo)3*0E&itlZ39ImB54whnyrf%SeNOW2S6 zr3l|h}A(v?LS6e?fb;$;vkktC)J zn6ZmOl+7ep%--%}o(d?b@|+eEv{Faod6Ed-4NBjLJkjUT;tNPFBDoA?)J8ByHGIIu zjVP)rCW?M@{(aKp%(8y(I9v2$#f9nJ8w`Hz-|Y8$1`0QtL6cJ4^iZFY5B7&Wo6crE z=mbzQy{eg-UiE$f-F3HE=q*tBAxwh?IAn`-NKk#3$S343Io`f|+KT&p_biNEwp-lNRanaadn@lHfwhO&%X~!Bl0ZKLlyL8j4;lA|WmAhJ7qP8V z?^f|BgNau2!gQM@xnFva3B3(ZW4z^IpX)X(YQct?vO)j^&=vhLlB;V%frFeVeZ(}s z1`AWw4LMJaFTUs9sQ^5J?OCHY-3`Q!WHSNS38Sd)sK9rk=;t}hPfwar#8n(cRWS7q zstf9()~K%BJWXLN@*JermUW5#2vt!eXcNN0pyF1?Cp+zGue+%+p7pb>x?@NQ_><-K z*=jeL%eeltE5moZ{sbYNF M76}0#Ezw#;@Gkp$7xd4jSU+i0&JGtCAs8s zmzr5pmO{RSPV;1-KcIi0zasq!dEUMhDA2-B?Q{8 z%-f5LcKM^%s(-ztD1TDsz8uQ8k;HG1c}hj`R8I@lj#klBD(j(M(dBQhWmF7VGFwI~ zU$MNLXM}~0U9me0l?9B>d1hGbELIj}IUg={mMhDu;%olOD~f0RR`m*={WMp3=>x@E z@QP0rujm`C)sbE~hn7We2`x+GmNid*pp=%~`M0B1)}GM>=VBhH==+je$L)v%_QlH8|AXg zp9KBQm^aF9Rg`zOx3)L8x36towe8!TK-9jNwQSgXy`%|(H`y0^-0cUE#~$z?3i_^K zE-DX2FKWE?X}~@XBH3^dv;%hEAbioo!TywtdGHzQ?cy?$=o(5D9aW8&hUb z!)7B$n!V~~E$)Ch@|a?L%JS7~Z(qG$edl_8tLE?S?q09fs$08mb+>x;ntSyfcgMCL zV(%>IbVG(;mp%UQKCAW;tkm}-mNeaj1^5@NI@w3k^k;hsgSHbGe%4fNuKFke`p*c_nYD@JD&m3th$}m63w=^$5Fg$HwtU>WE zDkFWEPxMx9koTZ~4Q-^gpnfBDXvtQ~(=i(3sAp&mEX=p4<$`jG26~{Fxvw-YH!!{e zb(%xy>_paT(xGTAo6wi|(>-hHdt=-M7>(_Un<8te~A#TVL)edj4^{SiHnj5U? zyPlvQlBo@L6nF_*aq!Y~H2CARF=K~a$@*bgw$F^2mz*2KC6Xf+_Y!cJs4=%zi@6to zu}KUa34|oiNywhVGAf*e_u%sdrPo z>w0Ovj*EqXO-=tv7se~ik2h8-r0QYCkO$)y7E0ASsd_I}AE!Dto2pML#uEo5ey4Ew z8RhSf%AT9JJLQwsak&fdAT|@{tmVkZFY~-McP6pcFwlM7RI;wu#uK2P)1ivG1yhv)P3r&a~(;u%ksit)9QQz|Az-%JnWwYRD{U(Ab_dm zIB=+m<8azlYG%5Z8dCRC3j*knR;F4Mr3H9a7t)1kMiTQ{Gc_RZUXu+x>@{RJkp$_RqS_EXQ+-1{ zS(}rU&B?B7q^|Ul;!xU05_M#71j=XnP$SnsPJy`DqNIEIQEsRvXhqHNEYEzV4-Jel zfx~hz`;)#rj5hU|19MnRclu4SzpHg=_SBg#r(+S-p@cvtRPd=qQ0iC=tXr!v(zQ^J zQTi#8AOv9La%wTpsjY@I{YVJt6FbP}O_8@KjZxmSo)%z?2sFbOWstdIN~1& zP#?Tsc-^2bai;K-Q;YT8u;;V&era|(!mAiY)5o+~xy+(o zW{ZFj=;VFY<#Ef;&g79*>I9K5&K%&>QinaCA4|yJ_K(^6VS8>(KrL`?N{2W%p349= zrfZ`-v55=^nw5n-+yRi<3#P44j&g-AEpj+;UVQ3YrLz?0s9_TE1eo5f#(jTdp2l?3 z+2J$7xaGs_s549{(asbJ&2OIqfF1&s?*nMqH(Q4Qm2S)~b1WBQf|*=Q&pkQlc+$U@ zrVS=wFX(i%&Ua*lF_G}VgMef^7!w;ej+G!=A3W(dST~MCc@&~G9(MC4+hOaTUw3;U zg0Jn;MBLfj#f}%~!pA{#pc_-olCX4c{_YHp(ov~5k;5Q=yX{EoMG)PyKacTd+3h1;Av-+& zFbC-)_yUrPU$NWlBPm>CUrYY+FJ$8t zC>^O^!ijg;Kpkpu=EiwIsC1FZT?iE+Nu%3<)B1Sq3B^ z_^?Lph%iw$W!aLT!ahgsg#ln20NO&UD8WYFlVe&IqRBiWNb6Y+{fo&GAO&b46}-YT z0)NYh70tHBzs1}Y&!l)ZGEshMM08g%@*F`%3G5aQPVVfH&98A-K}KK;VE`k%#X1yN zQJ{mM1Evz(&+PH~y$$vl_C~-s9QFe4dl`Zu?@4jEjMDK&gDSp1-kO7si16v=OPU08 z{7v*vhJ907bk#b~Y>NUB{TaW`?B;KU{4KOCD7pHYo^GF8i89pVHC}F6)BMD8x z0|`ha4`z(Ghml_*NeHK?m(^AENqs1q@u zO=?a@mkN!R+@uBlK>B#@Q7LMYN=XV!snF0VQhlZ)YA}a+JpPz1TPpklDR!&q86&;5 z2xZJ8MlkWTbh_7)F@bfeb;&EpIkvYTpOciM4+WguKvZL6*6P5<`RsuWFuQz^jsrfGlSpT3 zOolRh%^;4Reb+eCM%B`}?4I*9TwXS#((9DbW$+&(Ls-`hgM{CrhTD|Mutlm1#dBG( z@*b7;D0>rGnip{$uZK_g2dI@6GMRaWx+#cB)qQ@RDiY?m?_!jM$UmHAaFO=W&!2xRnf4P6hBWL#^#oYNF_>AYodYA?MksikIe z+=aE~@)1fh)slhsW)x+H?%brhS;xz1_5zpMvoc=>wKU)KLj?Ci#`I%jo9U|z_N5x9 z3wW%m`3|1(Lc!@~QymaX2A8S%3mLgdQz|z&J5yGo6Q$O>8Sr|K0z^UAgOCi-p^bcn M%$mOX>guci0xWFATL1t6 literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/cbam.cpython-36.pyc b/timm/models/layers/__pycache__/cbam.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0feef1845571746de81f7de161c4141a39f91ff GIT binary patch literal 5400 zcmchbTXWmS6~}QSK|)tcs_pcWY+JVu8=JJ1=2AC}E32`SiKEWQZZpH_3^|>DfPo3$L-tye1{?9IW(It)JaSINXi(P=&J?D4+dzSAmEO?#I z-`aU_#W4P5%p4BtzsD*6fg&=xMr1}-Vs=e@+liT4UCY#M_P`oA-CE>CwWOZ9T^H?6 zR8Jadv)fF)uBWcuWG?LiP2gM_DsWIn-4a=U~kJ@6u0wU zi)|DI&x$zDJnxgkeJvRHf7$3BA(eDI1vUB&tJ0G;~y#L<%UC;Y+V|(kftw;CRHkZSsknCAp?6P8) zOU{DvNLUgaaUt1maKPCP=NU`mLmsu+(_MbV!XRTZ?q_i?Mzu(enBdVc^H$m-{@7g9JDC;*8(!ivV8{iwh<>Pi32Z#?s)3@hoD)ef(gWAH-Q7 z1B<1yMBDQ|4Z=M1Jx#3@n>4h0Vi?gwGvt2vho`0?o_?q5uUJ?=;GK`i5)gopH#Az_paFv18PuYBdt{ERHDlyZ z6uz*(vI|TTZFXdj+{k(1j10d%szsRk3q0$)svl3y^f!v;sD47dUSz-o?p8HG<)S%} z53|Tm;xsPQNZ|%&Cl2IORNOe*PlAxQ=E|BJ0y#m0E*ridXK~^C!o{VqsI5+}UXMz< z7v3#x_?T+zqDFmAAAWB&N=NdfS2ijm%d#ONUjzuVat^ggvxf`CKrYJWlopSEr6Q-1 zCi^~(k|6W_FbSme{l6Li{=5?fMX=sE9vpSTJmRwR$>!#tf$Mrl4h8p9;G9RYBX}Rt z{b*yWQ^aZ7ftPtAJ316Q;ZBgYf%*~#e-)=(L}A#Arem&{Yvw%8!J=0Gd81TF`vacB@=9>27DZTEL z_)*T<<9t7g)0QO|n#500oT=BvDz&dr0ojLNQXXWb6M$v0Mt3{n2h?sJmbF74oYNO| zzDbX$;w|cTp*+t55z!l@ic6E>bG$lhlj@#|wZGqQHHn)nNV)Hqp6~0xLVe!%|2hnk zs>k*HC=Y>_qRQFkkN>3nAl{}oFI2t`d6M9s$+%jk_zC(24NoyjGnE8u!*Z;;d9|rp zLo`f|*Ci;+k4XY&rhdG!kug5IyxdIamS>#Go6y++N<2*Ec`50|LKbHwL^Ls8Eg`}~ z^{jl*1eJU+RHpJlqA{{Z&Zsu3kK7Ylm=dpbVlQlK}Zy}L49Ob;m{b%+CT44`A1xcpW!WM;$SvDT=8>siyKs&iw^Ul zvUn8~;upB3TR-$k^h} zmUB<`q1Az2IB%U>IHm5puy87xPt`I(_=ZF)9rR5c(6^*nP3fh@udr#s!!l+>wp!$z z0^+9@vASZ}+0ss?=iy^wwdHEK*HPCdqq&w&h=ob5Lt5i(t#NaETp$bZWqiyP<(C8g z8~jA&%|(d+@!E_LF48@o)h0dlF^YmD-y9%Mi7d=SOMg}GQ!ADL#kb-bwMynIrfWL# zpCWs*q3jxEQ@oE~Zq>_Lk&AG*w8Nm7q4?M6oSmrxSKOu62{=<)X^_?YerY0vS7+x} zfr{XHmmVhv!pN7g`cj0FV`%8=eb8ldu_mZ3#5p-$olmX9Gvhyg^51oMoVlIRV-?>l zKx&MIPhSX~z3b#|YKrg0GAIDc)}<`l+RNuKCi12j3*}bIrq1~GJ!&A0zMS4i=$WNg z3-pYMz60Tp@O&lEz?Wd12=wH7BG3rW3SgT;RpltP8BwO-0LhA8O|tXK*gl?4>;_@0 zMRs;#ubEx)#vjHnt=OfuGwhPg(IJWQiMGy@<*-4q2TE0Y*o;gzD79*Xf=vrHD6ZO| zc$-K+^(M2ge$M#j{%jgBiN%K@B?olSH=Sf+wLdzqrjv{S`M{_VnJ-*y2S`(E*hW~; zC2S*9BaY<(`6N|nQjig&IPvguA zZ+$n36@s*eX#^<;(tcCwshs!i=!xIa?Jt8G8TUMDRbDr-&i{|Mghr#cU5dAB=ls-u z?a+~hcgAn6prf|dnodYmlpSR|r8uRwb+VaxMB0kIi0Xy+o(EtDd literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/classifier.cpython-36.pyc b/timm/models/layers/__pycache__/classifier.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bd9f1beb6d93db504d6d877edf375b7454b4973 GIT binary patch literal 2200 zcmZ8i-H#hJ5V!X;*~{gw?ezl?Whn@C3%XR{kqWg$O^Ya1p-`k1(rRz*&273L!FI0A zY4bu(>Qny^{-u580rg+ti5YKlDYqy4X2uhH{F~p5zuw*T20wgqbgN6q-{if|0{K1+ zc?=UHAz_pmIW3IPpeUQUSy-VBvc>G&DcsQ2vXgg;ZrBC+GV3z$jD{Y&!gg2>q#bs^ zc3B^!-Xp^H*!~$|``noBuFSB{E94-8TKdNR6kxh=3KfF7<>;GIy>Hmg$5trUOxCIGlW0WP#UOF!m zl~rY&zoYHK!E>JTL_J{fOl2qhSykmj2CmMNtmLt1%=lz7P7oFRTo@>iV94)as?8o* zTD2i>)s)efhP+vu@}?rpSkXTLt)-=CZPXUC-Wp50Cbc~^R^)&zo!YLQ)PTF&PX)$u z`YWEDl@mXSOTQ}fg@43-mdW@ihdXDfpKJljq#x(9@&zxd6IiW;pH$_^1(&(xY#bO( zcLqC0YBA%Do93}nybR2y|3FGE5RHdIR6^JgMAJ=qtfFF`tH5n~<-CZ{Fc(}lJE-Xx zxD*|9?!W;No*VA1w~ewg!o!-KNnRbrd8=H(U!s|_^X>j3;u7rf?@t33t9Up#oh}AR z#kd?i9F1N;CWiwF$3_KYx?*x5_yi(-^x)}0WkoS4z%`cx%?~-)B6B!fG`%Ph5H#0E z_TUh?4-=sdwP;_@J=&w<3fK}8SKz~>Ixuu%?}4DxNyTMFrgTM?X8A>Jh&!MekVGg6 zW#Xy)S_`lfZdP@JQ!9nRI!Uw zAN`Cp(Ga!MWi)x4o2AKh&?WwY&<^$BW(Q`UiW{K+kF4i+x&teUV5C4tlhL=<@5a(v z+CWbmY-llol-8P*#?yf6FakpZrFRb8_hLi_Xb-W65<)C6sEaXQS|?+hqg&u5F)%_8 zAR*!dkP=ik-=^9m2F*!Gp${31h9>L2At_}xbI!V<#az}oBcc5UvKKl4yVvwb=QZ*e zddTTFTfJ>6uLf8sX*QV)tSi5zsgLBu(!-v^L^Z4C>QG>@|Fel{oal%_MKj7s9T~!; z2!Kdd239sD03nG?Wwk)pb_+Gw?4_fe+5z~{2BeAH+7?f0Q#?_fX%{<2>44;|AWhK@ zBXA>glLk$tpx4-PKI1}s2*);^2+A!}QPgxX=12hN+hpmCAQ#$?RUyX*& znvN1NfKpC^PRmQYNkhktyH0Ckr+F1C@fp}#bTkx(@U`RXi}ZEVO1z2%8^ZFs-aP0b zH=yP`dV1}`P1nwWsmUWgf**|mX2?_-7aTI>MG>^Fd5&@~ivF0#`Fh2TB331UFQO`L zV>G*K&=a2L8{n~NLTP@APS_JP70_E(REJtd-_z=7 zM_N8COM!$IpX2&LynYTNc>Rj_weF*0wX~8g85)_0{z8@8O6c>sXu4 Yz4r3oVukCjY(XdP88A$52mYM@0Fr+Q_W%F@ literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/cond_conv2d.cpython-36.pyc b/timm/models/layers/__pycache__/cond_conv2d.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08fd2ae4dc3c2658ab365f72a181c291ea1dd686 GIT binary patch literal 3761 zcma)9TaVku6&_whQPgEG-uNDeNrTpE-Icv|o7ymfrd!)_if)b6hygYM!PQVJ$`mQj zPP)9py)&0?+m5&!f?@OF~c)6XU<&CcfK=wv)%T3zx!yr zvBuc64W7H_{`ydBp^D@Rta%FRErZs+iy z{-cA(x#$o5%{=2tk>pvFrU(9`NJQhfh(&T3a~j*t(faEp&u#Iq%$azqbk_Qc*Ty4nNgS} z8(Bwy#Zyb(BAJ^P*emm-WnEw;#>Cyi=Jt{C+}Qu5sJ$|ejNh{F&d$tNM#)Od7&&C` z$S~Nm`%f6JVFS+Lm4yTSB!jP_6fPG2;vUwIO>9NbCPfX^;oTGU-AE+Sb{b26Je7q% zjCNyR4gmrGPuX-F#`^$6A+Pz=z&%G}`Q28xj*Ffsdm?hKtTfJ4O;1tYH*|aVgvf=e zi74BNmA#w9dtFO8doejqd2bCVBf&tcS|Y40G*4|`HBar}Z|vV+_jpu98@AO4bn^1cb{chkHdrSkLj_l=5ku++812lxoGEeJ{C z0#$m*syg0|pU03o|G!ovJxL&A#&Sbk#J5k4#jidWd?fI(!MdQhx~q2QFZ>qP)#HFKqDW4}i}PB^1xc?_nk|T*yO3OVB7~K1 z;ZIL5{6OT>3Dza01;Y)AU&MjtD8g}?#;G6g$Nh=`^MMcA@d5UqgB3s%KD0Ba43ZQZ zk_fAl!$32UK2KzSDkYG1M=$!G15OSKV&UH6n@BkYYHxgY`&T#h90jUy6fOiClB7>T*32Ff1$UU-V1_6B3pp1yG$Jx_{cwRI|1QVBF z@@HgCWMI%ASttRwOKfJH*lm|qla-*x*92s;+JRLY0Rd}dRYR+`bxd2sZ5WXTRM!;0 zK!-B6yGxpdg-){C+csqrwfry z>CAS;IJ|VqAMYaVm#>x;g33JGtkJWp*@zJSZKgPqijHHZUwpxtmblqXbM(|jBNNF{t2hWMSSrKvH#}crOzCF}5LTR}2v9;ECiR$UR8Q<{CoZgtUfm?|=xdojF`R z1+Te*Ze8DszCYo^P9;%*-4`FD?ly>S4(9KDygr zs63NJloiBM#7%-=FTO&hM~T1sahfhTOuxJgag&rZpHV^}rk5>Lu7d-aINpbDQ>TjV z8L>sGw^6Bvepu&uDs{}O8VD2rsp#k>zgCp*YWiQ#GGSBS7c{dzj8iyC>MT>aQ`5E1 tF7%XB&hPb6^`fWl|0J4~d$+ob&j}2Y-jlZJf?!?zfoM%4+m_|F{|kp?)h+-4 literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/config.cpython-36.pyc b/timm/models/layers/__pycache__/config.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..262bb7193ec71b12459ec7f58e4b993d020a76d9 GIT binary patch literal 3457 zcmcIm+iu%N5aseFine6u(&o}jSQIcIAc#^IDA1xv?YK$cHj#tG2nwM9vC*y-x)kXy z*GVkst$ykc^d&#gU*c<@`U?dL^vqJSq{N96#3ixI-Pz&J&diye`LfXn+dp1;{O^Kg z{bkKOHH_ELir>)*t7{1+?34}cu03$N4zq+KT;T~{)I=ca{aV)*A@|UHv>I9<7KC?b zb?bL5u_%H=O9a2!-B2`y1L^{(P2q#;gSse|gbQi|)MZfvwFYVvvooTOSsk+_arU9r zIyYR2?&U(JQ9HWJ52T7V@@yyRM@5qLQ>pVTDm2$J{2S-CT6XEIX9s1lwWpIj<7wGQ zida71%a!JjQ)#9}PbGWRw2sv*k9QMYHVdg|In8{AHKg@1?=p>3I!)X&r@7rD>63?H zuz^;%=(GhbSf|CxTHM)+f7smq(^89AFx@}*+JbAo+8*v6w0qFAXy4k{c(i(bb+ujW zD;W=@ev*r#tz;hx-C6Inbut*V2V|0>ZO%~OM>5H^y#wWA<4YvkMrX0PV>zI#p%vet zlUA3(&9-#N(JyRr@1YB)GU2^o0>>>YF&hw*93zy3#yI{I16Y+YYz6#TrMsu(Q`)DH>5d}Ssby4yID$A%vP8%6r zbJZ{Ag_7PR^bY+Vq8nMe7H7t8cIS4>R?8q7?J+h-I8THlKpd$ppOe}vv=71P7_}>o zRzOC|_&~;S8OAYOvY*m;F^+%T=jnLGeVpg1I!m(7QAY<=A5b?HZAN5vn2_(-s*q9~!tKzgE%1RIJ4 z%3Zcpr6;e!_Lc(=>lpM7Rz#>LHp&rY0b6s*-k6 z-Y+6B=2=f(3nM(6I}tbZ2)V@K z9jd&lxbmvJ`!Pf-N^**73w4FMX)ZKrkkaKNw1Ng!*kl27XWH-*UBk0=;Hih*E=eDU z-Q{DXofLRvXj6-j22_ju>~h9!?r>CkFdWq+UR9IKn@>gf;iPyRfTy~Gye2!=8sms` zJ9t{*OIZOGaV+v)94oqg-z`T_gghYel$-;{b&%okt}Hd3PHS0x25RZ+gFQq=RqK!l z5{M9iKthO7Q5nD(-6^n5<)g@Iin5R*uqwI{Oi0e)jt^0AXK?*+40$; Q>v}E@<~o7fT$;510Ax*pK>z>% literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/conv2d_same.cpython-36.pyc b/timm/models/layers/__pycache__/conv2d_same.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c82f558f822083efbe376c0cf7dadac3662d16f6 GIT binary patch literal 1899 zcmaJ>TW{k;6rS-dj+2zM1+*)1F}xHbs8`_)p%ts{1*sySZN*B~N+Z`ZNu4^jnQ^ux za-UMPZ@eNQ{=ohn9_N*({skUb&aqn>Az-3&=FBCYGvD{k`D`%o!o&AZezOVrooqB0 z;xEC}FW@+d2&df0XhfUdn3<8;MApol*^vXWnRVuFwa8H=C;o6M0d5`xmNwmX_BNA+{ z4*i27e=*|zdFUU-bK!p*bDri?&-*G)&II?XVk)XrA^nr3AHwExDbwj`<&W--?)gs( zrTk|&p2xX&1(zk`z|i*bqRfPLzbUJ<$m8sa!UD{@Wz3m^Q}i}`l)(s(VlU!sAp%D` z=K?>at&=oX+EJBExzL^KZQA8&7UKuCGnK`nR7nR9goZsregLNS!BwOtHJt&hocstt z)n;w4tqW2C^te(RwaKm8uAOxUV6ewm2K-B6npc0me*HQynz(U+C4pX|ZFL%#Let0E zNgBw4j;23IJX_-7vWw0`*F)C__ln3}=>B*b^0$bY9R%i z1H=Vap%hcN-;)R9uuA9ia1L8D6=v~LNCkgHq8j}6%cbsZj0bRtvcM7Z4wY{~pWxu8 zVgA_E$KV738Is806hzp4NhXxr+_~&VRwEh`*)NGjA_s8swAZQwm9cv0AB$WSa*`G2 zewLmIfA8qQ*N^smOr*#bNR59kxoJ_J{h ziq>Rp%#4<*+Tyf!>Q3#hU3d)yQZvRaIVSM3fCJ^*@IUP`mgZ^2SkTp$64^xe(wrry zah?N^x?e1++fnaKNSJ0SU5S4quVFQyy31IS#Y!>OkSX7UljI$6Xf5i7M_0SIQ0?EO zk)W)wVgI*6eHeg3;fgaf0}NC-v%qYyR+DrbbQ&iAUtyrsyYM-=gATQJo1^Xk3Fooo z@4~cdGNG%TjV!O11N)YgHbRb$LEZpo4$L#AJ;p$ciwxsFW8W|0?0Tic7%vjQ2GNl3 zfzyL)z!H&VH=to^0hO3a3n21^NGgQIH7tYBEvsQMK^ zi1B$_YlFhGuC3abz@z?&K8LjnySC;1>iVfqOq@R=m4)!4q9#y%Yb&L-`Lls5jiT&v z^LTapnjb?wGRvaWo)Q%olX#I;5)*2LX5>(XS}3t_H6l|{r`4B|@4!awoB{Y#)qd4W z72`|5EKUAN({>_7TnUEH2;Mi8yASZfSU(8u!*7RLbZE#=VEoFsWe8#{SdH79hjJ!A z#RaT{GDL?c2D=j9mbR-xCa2nMy8S$tdpNs$Gu)pn@}w=>+PjVA5O+D%vV@8)QA`qP zYU>@=mj)c|v?W5DX?Z$>yEkG*1!(t+R$KQmO@&Ifs7pP#`Op|zgnn-C G4E_QQo4OGI literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/conv_bn_act.cpython-36.pyc b/timm/models/layers/__pycache__/conv_bn_act.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e26afdd3cb12fba8c7ef82ca8071a07f3389e238 GIT binary patch literal 1588 zcmb7EOOM+&5GJXIC0X`5X_FS{BL)f-H5x>Ldo6-y7flMZ^I*W;qRQZ!-F5tDBQjpp4HWx zF&}nz@j}>1LR~zSTzNvw1TdKOa9Ip4UnZf3fc+tD+mo z1&y#FRds{+YA=(5HG*dC`CuDZO~{5;wJa&in#?*KukJxnzk(uAilH6cUv%H13)~hJroMo3czA_&I1NCLc(>I<9`oK6N~5>PMQIlb?wioods;!A(t$_@ zz4~#I^$)=Z@Qh$ti;^H3)L@NvWI=WaBOPF1(S7+T#}(18_O!1%I^g6w z(1^N?rz73nk;@Z(0y{(9{!G^El``%$XKF-D$X2UjV?i|$m-UKHi#l805GVnV53)I{ zDp9CWU_7;62`PQBn1Ir}${R|hC6#e)gCy_`88WR5VQe%ot`fzC>E;zZ9%ABp-Q2x( zmqNnXROMSC18|n16&6Nmn<`@~dLT^HcD`U6AtB?ep{<$*jO{gP`;hu;B`9NQ|HzZQ z5$Xsj%?hRzrGKKo&lAoXHcqySO_J4IsO0Ojv!8*R<3z2cpe1m(<|>h528g|WIZ2wl zERzzp7Ak2YD{!pd&}n6Xdj|A=2=Q&Y$`S!NeH;*i2YBE_Ah!>1sq^dzTxQ8`FZ;CX z43@GeXtY2J4E@BD5s~k}7x^BD5tfgw{?V=Y_-Oh76lFVx$oA1)sC&r&uW0YXLH`lW znx4QqMtgi0?S5;sHW{1GVYS6Qg%?|##yflo!?D1(8qjzd%Fo~%Iea?+;Oqf-7hL5d z5P*n9!Qems1%&Bb)be*G`6pmjmO}_fhkN_rF3rQPQEWL}u~JZKB1)mT>%!_WrN6CN zvA0N@zv-T`CYw*--!taMJs02o)VA9ME0`DfEp6KVNQ;deT8G|&ZJ8(v7?InK0jM9^ zO>N`s99fntOPc^P?c+f&c4N1lun7!Yhf-c=kdxQcGMl%_?C0&|@P|C|d_Q{r_iYia b|6S0%W<#3b%YB(I?9jGcb8$>!96SF29%Xr~ literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/create_act.cpython-36.pyc b/timm/models/layers/__pycache__/create_act.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26ac969d23dbfe1dcc0fad11bb56aafb9f0b2642 GIT binary patch literal 3650 zcmd5;y>lDK6~8Zhk^sRENu(@GXFHY*#u60DQWVE=OjD#}iezMh6yr-A|cza_l0V?`Ge=-`lsp zx3_q^54Q@1eC6Sl-G9#*#y^aaXA1f|Xi>v93}JYNFoo4J+m>gwZO?8yo?{xq7EUX* zmGtR;pW!~$%C>V}uATSt?J;i*zG*ow3;V_WiDUC1?Uh946rL2>Q+P_`PT^@eBTMSJ zJhy*7wB!ZopAPkD^#|y)u+O5OL%WFf0@@|C7tt=uE7FxO$tm>((97_AMNX=}LH`!? zSLIcae>UM=;~4*em14XmOV3SZ4be1E35ep9tgI#+Ph9{ zu;1%5++eghM8x-VAmXbqUkF5eH<_;oM10?o3(R**-jdhlwTA7z@$rIuMA%)U=A?s)b@i|J5kU&(z&LuMD!$x zntF`YV6WW?gm&66l_yjl)H=luszADMSFcdn7Z_fHC&N zrXHstwSg_4AP&zfsd3mehgC(y)1HF z8rv+LVLj^Jvt!|#!2Xi9$xVzF-GCoov@>q1i9@IhAb$9&;7Obm&q?8O}x84aG!QKsK z*f{yI%GQXU)A=9@!Uzdcmx^{-uX;LPnM4rKQI>Web()NXc3or?4ZUijsTS z(ff-Vl{jd(D{Z*8qDsr}N)=V=O8T*^VP|g~c6A;nm-JdA3=krstTa-N>6m%6DLZRs z&7$R)OhF2R9N@kA6-;N-fPegvG$_Moqi#XcvOxCaV?1s^^evSKvG-3A4!0cJSXG_`I(3v1SZ25JG)TbzAhfjc- zhpy8HPkgl(=^Q02FZS%GluoicariB*iB6uu-{6?)I&_dHcVoZXD1WFkwVLSEYc-w5 zp5yLO^m)k{rVrF(CwEcgO^g7lL0)mx(3U!#To)%NpMOMci$LiVUrpKyL!BSoMSjc6 z)tWv^9BP?YWOZ23ywV9yYEa*qYK(S7P>;2_a&iwPrRFON$k$hnar#&;A7iAQR45nF z%W>J{56x0c)=HxVFxiBYBFMPFcrZa-A*yUb)s|j{2cS%xB7z~AH*`!YZQom6W|sa% zS=FYm^O1~&Z1_j5Slh^D?No!cZFLon+F9o<+LUYCIz{r1&aF|YXN$c|=0~hMO9Y%M z%ljlV)j`4EqJls6fc{|gO$-L(IUY%BKdH%q%#ups)oZ+Dwyq*qGGrvB$j%8P;ql>Q zLL8#+s;&`5hAGyimnJj%!UzFivM5>y0REgM;n$^lm1ZZEhOdpPwxd|_ZLZVn9dXo> zeA@VMjGQT|I*RXz>|3ZJaC@7ITJ$-DW6oGq6SC$^s$}wOTc{KCQvDdbmPs#Jvpilj jlb6syFJdf@iZf--4y`BiNlRGB|LnZ4W|_Tk`P}~iRW{A< literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/create_attn.cpython-36.pyc b/timm/models/layers/__pycache__/create_attn.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1996b642037c63fc5ff5d811327ddcb25f855d22 GIT binary patch literal 1968 zcmYk6O>^5s7{|4eY|HYy^U_e#R>M%JhZzjv!hr!=leSDl4oL@Qc9=1;w6PtpBzJd{ z)*0tQdM(rGC*TU7phr&q1U=~pr*h)RFdX>r+98o5NsG3OU*YX!g{~OC%2$&$x|Xcdb;C7&CE1`G#;^0Y zlB@Kp@f-Z@WRq?t*XT8HGj2V#==HcAui$wjw)d^h>dD>Uwo)ur<17vChLOs|%=hny z(S*f;%7#phm0tkK4z-k3-w~QpG0HD zz_oik8-%=TZ_qWKqcV>%4I%RF?x7+t_EE zMQVJ^9x#zIj!=1j%ExNCXm5Xexv19m!NH>Ec_`AqsxPgL0$LAEazvFiw-hy8Ltg4`2K|bgu##b@E5?7x& zh`&I*hWIO013d-RL9bOEbPQ^Mepd~UUR@LPmuiA2s0I30wf6OYv?uz{eAPCzVrWCH z7+N)SORZwuJLtP+_*1n8ZpW49M2^&Yygs+qtuyi?NjFWdu9#fKbIW888A{!`&M1MF zuk7ZwWYl4jP86V_K=}P)EP$M-HuJY1N}aXW9l1ELMJSD zBgm8%xPD;b2`$Je4^>{|Mp@J!{4Z?kj-b5UPC0c3p*q?4Iz0~gI1WOnEl7;igIhOc zr?Zj!V>wQx3e$+?1)R2MgmGzJT&l~FO5_EeJ!V3*AS2P%rlgHlm+-VHYvXI9)h3GC zG_+}IQ`N@RrlJiBm?i2kdHuGO42LsIw}r?=?hdky>l3G!*;lhE%l*Y-`w^GFSmHg5 z`e&~jhbr9ao*d4)Q5G}V-R||iMG4sI$|HfiqC{k|>tO_fS_1tlX&>SYo2pq*#}QO^1Kz{ z7u{9NR3t^)CE^z5k97LR56ZhhoG*R zIXbR*7XxxH8igr}gv^VR$DtTXaT8&au7!DD_o`g_>4ykNy>}MY?%7o$K7ii|@>0Ja zXHmZ|u7UIN!BL75oN;M(n>H>qX419T)(v#MwDYSPjY9MX^4idp$v!i6(r6{G4^64; zM~hl%v_s2Uw2NuAP-t}_?7rDDqoTZ?W@(@6y1D3gp{1rgub2IaDVRopxsNk4UTm!q zV_uh!ju<;(=gYa!wB8hxg-dzUf)gfr-e(IXey+DmUWaj!xhgW(#9dw93$N*TU;GC- Ch68l~ literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/create_conv2d.cpython-36.pyc b/timm/models/layers/__pycache__/create_conv2d.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14b72b5fe43661b79c46953e26c2d198cc8b6463 GIT binary patch literal 1065 zcmZuw&uiN-6t--~$((esu8eM%iQNh%E@_Wrtb}%j(!$pLfLx4XDTx|eGLqaj!M(IQ z@4DOW`?6fB*TU(gmJ-_!Peedae&)X|2VRZO>_@m)CznnYE1^YFK-T_6O zfkP2>QkEEoh}H)!zL88m70#Mx-g-{QVv#4y~K;;cvUK1qyJ zGknC%MA9%kAjy}rDeAcr9O8z|$Hu0hwX+M%fQrcM3CxL5Gf+J0ee zaiYeB z4eJoYT692TA}ChO6iR3~#IPIz?<$#eh3GDL++4Qlgk)?J-ywJtOJ(<#40ppC_=8Qy zE({ZzpRsgiySFzQ@q|O}V`czC4tdJJJlkHTPhl7+7R;PWHPzi3r)rBLi`hlal+jf% zRchKPh!e#*6}9`0%lSAmf|rKD1Ja4RpDxw1`H zw%c&8%|)C{h!8B*RYw-))@x6hf^e+)oK?Z}9Qe}}%9NcI6@dEgs8#s ziS!yMCkRmob-Y!#aObQ>aMDS(t>x z0pBXyvnYvXoup%VzwFL>Nsl-@;vF9FkoQhnN#An46F1pl0Smu$k^$Rb9oGGlBsW-( z^ ^pP_hWS}U=DaB`V6{|X)@OYBtr{|eq(x}d1c2e^Ayig~x zcxvIg9VSDTl+h4g|M>(5b;2jG_LqSq1jCRCV)=R1W0Bu{)PNO7-&5RE?b~c}vOXJ9p+CA|b zh=+l0$$42Q?UuX}8%Eu{R=3b3@)PY&vE|EL6mvBUlV}}swVZR^troNN2t5P=mEx8; zX~Lj=(;|-Lb$ZYjloSK(B>na2qC&pW7i_B5WYZkncrM^~Ae4O&2QeLxE`5P`q)WuB zkmSZO^BKGtxYz>v84HDIDx#=zr=CJwj=U8`ECT~{=A4G|9yG}RY1}g!g>y;md7N8O z@sEa1L*%BtBE;kR`0vyW@SDt)->ZN@?cYjggPf}NhCiCqh!_{>ubzst4WTi!p_7`*19oOM0U!K?aPiW7mt`W77j{kh{ zak{g6m>&M^FME%4n5I=W<7q1JuXTjU^waX!x1OpW?EUqVhX;F)(w&0`Xck;Cev~ar zHEij~R;R_4cnt=Ujq7qv*D%!mCjzUb+r}e%@$>i_%s~?1hS>K=pNK!=#E>5D4{z!qO<~ z7$G{Uz4d9+)8X~IpNUE5s>!D4vbHOV-y4f>8)B-~mWI9l8Q)Ap6t=*^d+WdCd!_-& Xmq0u?64H=F=u)P_`XqA08&T(f0seXy literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/drop.cpython-36.pyc b/timm/models/layers/__pycache__/drop.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c4df045f9530132b2c0774cb54598e644b52c2d GIT binary patch literal 5677 zcmcgw&2JmW72hv@ttd*O6kAbRXPPvXnaZRsJC5z9b`m9y-Lytf8x24N*{*hmO0X5Ra~H@rAMZ@1oketYdXP5Y~M?5UvsI!gRY5U#Z~u5%;M+q!xieY0(K(H#Y@^bgd>EO7UQ zjGJKY4pF&g&m>=CZ#+JEAd+6hStpXri^AO~7?A>Qz(SGiMRF%*XM0I9jIXs?uDtK> zHY3?>x!ZB;!j%i>n-?~(Ts_~g?YD#&vOCi+_WY#B9#`|Jd@i5A*gSvr;+0MC*^0P0 zrmxx-=7;gnmx8nHeFmM_JrTx{>;%zXKA!a4gxzL=za#iHo1J4*g}cyWwB7WgK`WVk z(@KV;)^-qWw+7fmAzK5;4B}SQ>G+=S1}!OKtbOuw)M#_K|Kj&wz4R2`UJ&iNuHSOw zSPZrU!OwYNctQmD#nj>L#9(FHw_FdpvLxza_l2Z|wb)iP+?Rg0m$1$Ao9EdNqc~>o zQ+wcs_9Ga#cIUHCKBngnHy)MH{EYfE?eiNq8^x@`VP4KqM%$UKBssBvAhNlxI~cf5 z?74x+Y&sYxjQm(+B|jVnt|zj}wwrjpJ@k3Rl%8i$iYQws@!xe#OSQiCfi*6T%j3$} zPVKa^qK)SgJvC6!Iy4TAaWyqleML*>P^w&C)}GTaW~^%5IIX3XX>A4lb_&Nh)swk?HLcIl>Wy3{UU+EDY2$io{aly7O^cvCaGQU!tgwu+nv%4f7BKf0 z&|)F2V@DR#Wqea#)mET~u7xY9me#S)v)xX{WRbu1_fT<+JBhM3Yyur3d2>?v+m~}={x!A57_&mo)g3}?q%ij+6mDIr$ISB;; zEAUA0OrR$LjO!&$CFc=PTziBa(HeSI;K}|_Gz>Y9c2?wK*Y`xj%;t843`LN8UolRk z&*5xCmvgAwz4iM)ee%U$4z4$z$qI1~@SK?gKgA?uctQenx*mKVw+N8N){1!q333W)$@ey{NAl zr;M|BFB(-{ZsOZUNxp>Fm+$Bdc9$@92D?B0pTI7_?mnQwJOMx!fS>@dFw^-RZCnd$ zm{BAc%G*f^a9HbCFv~*U5x7|ot#M63TN!-+jIZn`^Hsn$r+{rCEyypxsmcN7tB}#3 z2Y}a(U@h+lw5eVNYxlqvb0MqNuL0hm3uG?vzC1c7_6znLIWaJhM zd5+*+f!vPZRnC!ZRmskB7*bHtFy#XEFA^b>>Z0KJ{6<4p=y3!>by>%NTqd$YQ585P5;fi$uN&(pdTmmM_0W)7OcdCDI`BGLdf+ z*&uR`NRvp52>E?ZE})jxk2!4z>7294Z=ydYCKJ!q;koeG7t9*Gbq${SvU;B~%BW3z z^&)0_^vU#@W*&11%MU7Ti&wb)Kx-F1(k!iAM8vFSbF-kg z$q%mnSJp*mJInN>37tO6^+Kk1&m%adhu$;=2)o# zT1esNPBBHY<&;wRw^L3_pcPJ`Ucb|@5t2rTF!BznQ zT3aabhahl#tqN9n>s8oxs^6tPLtaaeqf#%5f<3&k703^f^BtD%YDwkG$B#CvJk3G! zMbp|O+0j!YXC4ut15Hw4;W1+_AsFttlK&JFVv72jz6e__BEp<84E z^Eq~tDS(a}xNwu_s`M2ZEV54GBN%d6s*}{MC=7*{uQA)Ve2&RGVXUeLV`a@ z8<74S3Q^R-l3W&xAyUpnV0GAlW;2g4lMNHSc(dE@BM(npq+D2Tuy4SrK`5B%RFU{_cs4kbI(D*};D&>f)~WwC5>l zj|NWuk409*5e-Ml!7I1!zS+Uv`?AO84bdgVvFf?zAKA;Gdr~s?th;eA<0n}7Go`(ZE z+Nt~}V7)@y-w-nu!}Np2nd~XBBO;gNbxe^rh`dhZ4I(qNmTyv1t%%48jHaFV6-rDn z3yjulmfD?0QIfDR`(^j)H3cdVa!^}Ra ztIRPgs7q#M`eBk;+qj;pFqjqdKk-z=%!)ry0ZIjqd=J$@&Q_MMQ6+rk*YaR?eGv-9 Xq!pRGtye8gUu$XlnV0I%*H``pEV`IK literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/eca.cpython-36.pyc b/timm/models/layers/__pycache__/eca.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d433172db793df4d592f4bf702aab1954c624aa GIT binary patch literal 6178 zcmeHL&vP3|6`mPMmNfnmCmX!k-M}`6Dk9KIvBLuET3{7fj>X!Nk>!M_s8Y3NTGH5~ z8PPMcV-&=J3NdC?-#ho+33YOpHN-@!KjDQh&jNE8hT~wTT;}qHb6)c0pv-$gdF!LChoy&) zwm#f4&8Bn)4oyw8!YVyqO8-LGv34^N11B6#dL_#rl^=6&Fqk}>lraaIn&zQzJADT; zEJs=smrEth8I4^rg2G&>%srcNn~&%EPXfUlkA==?#Kzn@;{###F0onZ1Z>0LUnnyy8 zoFE{hV%edP0y+#N_d;P8VFDqT-)Gj4%Yi7e&}ZDcWH_h*6Z~Gt9nXO*&Md5GVq`dk zn85Fc7hDPq#-@Y7w;T?6%=WFxwMF`lD+2Z)916C1oXprPs6}kSU6YK1Z?jMA0!QFa zLMBBJO2;BY7GXilo!F$q?6d2PoW$ldU6~;;p*aj*RC*T4|D66M{tC6N@ucTE!LZ0| zheY=#Aw~w&sjOS1eU#yHfpA?DJ9VJ9+UvD;Y7E&2w!!*|i2~}m7{beM?8FJo{zQ6M z5=OxY+lR^2il@Q~sfUL5eb@CbXls`5*$(YDz(Ke1iTC_-q4pA=vge0TRhe5IfKnnp z)#Ot!A4{3?o{VM*BHy?y*YaIs3HU*e$Z!uZ~)^<73urnf1e? zgL(~p^+xsJWVhaU!X9H@qX}28!=WLx-DI?2BC1})yghbUYgP9lx$?MvP;WmkntSzj zgJkYCTdcy4Dy??CdU8-{v7?jLQS-Ql<#!>rQE%+EuuAQ))@YZoD*9OMDH`l}zjAOu zOPQ4uXx}3JShacdyj6d)-)8&GgWVcBAJ?E+a+TBt;i~^`Y{<~uhl#(nq*GQ zfOH7KyhbgSLZ(&*nx6=Ukikx{>6tEUw^liTEF4cmZE-fZWWELnXpMp%8J_oAM;+C5 zOA1`qF1Nz2%P)mQ-`a?uiLULOJAu=4g+ns5eZbkW;#SuIK1^gd|rXiT)`c14bfG5u)GCsoT7@mq8!2hX6 z4?+@wLa@UclTlB|BuuHknb$hr81bTy-AoXSMtrP*BMH25Fvc}H6Ozy+a9)aFE}oVk zlz|Z@xDOV0DYD0Rf?Jj^Rm{OnP4p<;m^t915ijDpg?tD1UwE@&6l4d^IXcE%@{tG; zNl9DiHl~~2pLi;;-H;|lAN#{v{tT@YR?ZRP8s5fse*xY~Xxe-}}IIJi(^ zFEZhAxOt#jtmp6mutdm)|8Pz50s+<`B%3QVpc|uZ%ML@G&hn;o93RwG;~TG2xGAsU z@tUMslntE0$an15IkfhyWt8-+;?3fvF@b^G8ETyfUV^q8plRoEgEd!`yFm zw2gZpe(!3pR;H%>Yb-r@p}ys?QcnIM%un@c_O3R~+vzWiSGj3s{HM@7U4E58xndjB zoNd?{d&$mzk(p}U)o^W^fi7D27Ja9$Q)9X^|Gw^R#`BW?+-#n$(HvE-L&lPlaog4q zH1Z8eMpAZ@WE=#itcpvZZ4|)Tg;cb>;6hP$%x$`?QJT(iuW%>I1QVQ}T*Bxm*X@GB zgxzkGa=b7~fgncd5f6t^%Jm0PYQ$eeYn31n3J+_bhEjsXYLZSbO1H$pNwh>Pbjyy? zx_C@1Z%q}UTJ zqc)o#nP?d{|B`lT$;2vrbNMp*<^&}6%fMzDrLgBi`_-+le%?Q63oBmYi=H0>`xFPIJG z{nwYFA@lemAE9VSg5Ya6Q9LdX1}TSm`+vx8;D4k(8tcA&Dx z1$M9!nN>JkLJ#i4MR3dJ^x$Ty74Iye2}veL`6ZASxKCV~ZX(5BMD$sXmV^a{n}uN* zg~0w&NHF*~Uc`ug;4gxi7iu{x@5hg%%LPUtg6M_yo;Lq`BY2>aT3V;fQ z22uro*9VsX+>}_wrF>>EjFBbY1F>>}XyaMPK)?xHN;QIsH6+-?8f?Ee^q5fftWBwuZcmGPsy+Jpi3T z+HN5olYVa!gdR48G;o&B0UJG}*A_Ap5;sDwtl$-Cpg)ms3Y*g^#pmdRB-t1|un77F zyJ8s;T7|DE<=2$*|A|t*fd&c;EW$5(2pOoZh~ytp`N~8x3nH0Kh-4N-GP^(|Glaw- zgV%)8zDX!S=JG@nK@N#_ra=TViXg6ps|mSVQ{-;xO>(Co85l}bN%0y8#amR|rQ#b@Y*0ajNbxm^A49bmv5BQAurA~kLd$Pahg#=-sxd0QO~ob^ z-$T*K4tYRX@eBs%NUoH)#E^h=01hUU2vjJu1}gOOt!pUz9}K9E`HBH$Z}6A2og)Sm zrcTqK(8PK0_7&OwB4$R4@hE1LeS`aetS@k%+?@M>LMSRto^j26Ku{Be1cyummBb<& zQbZ_bLwQ-Gi9`Wp2}PvuMJZrsY;Hx1hz!Xxii8Hqht#n}#hg>bWP)hN@9+l1TtI`2 zd!Rv}Kz9}lmvEp0yhN$2lqZ*tsZSv*e~5+ejO0x0H7k)?*>$RZ%ANr8SI+|68hF^1RY4W9M#!ANAtwox73ACr~j#)bwvdty1176R@NyIH|h)u`>Vpy>jD_Kabc30Wc?Y_)b z*VrCw{4|l0mz8*h2OdHE03LYaAMgX}HAs;kc|*LwcWP$5yCx9`3EHYN)u*e@rRqCh zmETxgY;}Kp>)=m|jD5_Wc|7#LhgSUvLa==%xNtJQ&+V_1y8Ethg_n7GZNJ9Z`%Kh? z{{a)ebW?xg?$@EIiv~1}nWiC}hpf|#e#aSm@1*RND&M&q+}??F+?S#gM1=@u`iIaz zjPgMyJ3&$e4@P^Xiu;8sYSV_0MWb@4D;U2Y#m5pcl~_1R^I#NogWYm4Qpr(Y2iteI?*>0Bt15Uz z?|D?TF0uElcZm&S9d7DjR8>;xn6&eUa6(VD1tA&m!S)@|{*WDVi-;z0VsWtViKb|M z!1ik&GLP;1z`}xAngfAAur2mJ`Sq{p`QtmE{I&n_M}PR+U+(o{-`Z!4e#x7QR(%Aa zndW0QaZ+c(I2$`-7P?~>(i?k_HSMQ$-AJ2bKC!5MLL*u;ZTmUx!q`#bU&j}YV3@Nw zX6ce}=e8yeys>ndyb6Xqyxty`XHiLjMae2WQjsw28TP`HNOe|Btr`NLiV`r5FieU> zhoNakgF!Y5qgb0pUgiMgFgLCoRK_U^(|i!AD3^ejscEVJBR#|r)kaimfdpuPnU+xH zAUw#*_!xOL!!XXU^I`aV_V=H5MWmzc?s+=u#-)%|_r2ZSp94DE-3q2cMB6V#)ul+v z;{A7fT>vug<`|k)U0_kFsw+>*ut2G90eUSsd;zUmgu|L}idU}0=c%_2@hV!?fS`ORqw`zWvber4F=apx%_5-N zY_^e`W@Dc63F!CO5srl(vb(VP33lB4C08wE<)t;yaU0E@co!%FR zUU+zKOvqkSxG%Do*+p&ar7dV!+FoPh+88!Aphq8eNV|dlI6^7KK0NV;{CCiluoJ7`?pC(v=CD&w>5=xSrmiqP_lY!tsrd z>xh-(m!+#V&*iBH*kj-kaYCO@f3KEwduTTvG3n zsN@HG9oM)b$xUNeP$fxW>c>(JpaS}9LXQUW@*j9QuAQpBNJhRy;xyQMUEPLson4`x5jwAdT0mRQQdhJl zE+`W>pxJwS;u8JiIPi&zpOB9xh#2%gC09Gr1qs9AjBMN1%i`P%W-M6E(o|!~cDbj% zfUG`^Y>8(Lvz$vLJLSUHpj2NWLFDy7I7Al9lV-)l^o?|BGl{jk=Sht z$8GA_J*MsL)Th9udx7CQhVL4F-|$E3EgE+1=qC2Lk2brCtm5ci<1Ji6R=|2IPp=|Z zoBj;xeV6Rrg8=C%0>1hdiEopz1xRIN`AvO?dRMOJrDLmx>Xo9Hvsw$RM!HTDm+w%N zG5C-Q3Wo<>2e*o1dS_|lEQw_}#D5Q!T{q+Z#B#0W#3>D2d7Sztf2Q&Fl#=gJh!qLX MY4Db_;I6**PrG1BVE_OC literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/gather_excite.cpython-36.pyc b/timm/models/layers/__pycache__/gather_excite.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f8ee710d62027e3f7ea92ac1ba315c252da5bbd GIT binary patch literal 3076 zcmZ`*&2JmW6`%bexmU^Aw|!7C<^2xpobiK>Y?s$c4$~%3Hg$o`Pq4uI6Sd-Yzip8Fgw>h*lrWcDsu;Mg1tE>44(g6anMBbSG6K`lL(!grQ5dQ$Log z^Z{jQat-S|3OP&__9W*CG_i#Xw+Ks(e1#M2av{$e4}8hxs_XXN-`>1AcXjIqeM5`F z_ev+Xw){wP8cNC^&%#|l((xeJM~`QmAMoo4sk|ItUyJL$;|G)CI76-jpga4ti`-B5 zMK~NOx_)DQjed|yNk7u#*iYOO)E4P=jND9;6I?mznE7HLaBbZ41LZ~jJ{Nfl_D+Y; zZp~GKG}&Eem}tlTgnKO94Q03!am>_k=ezGk(?EN8@oUTlc@F|7sAbYOnbEhH$t+#J ztPROw7PrS0JXKa7G(IB(gSo8nwBE0=CTl$um{r*6m>#&R4fj79j}>O9*4|&# zV%9n`?vY>Y9=7Ke>RFrF7s&6(@6FN|^64Pkr@sC<*KC~8JmWQMXDw!5Boj+~KG$rX zYGy8MwT_GU9)JE3N#14UxyiVq<3u*@F`U+0D%$1b_0uyvGvtT*6tG%h&I(xTqTait z$LN={8ZDKe#KWfC?C6FefG%9&j4Zq=qq-Qyzo^O$dWj&|#GBKy%IlAb=8+c|{lfT2U<9 z9V>54xJY>9X-j!kf>0RG>x@s;=t(GfUP)3Bubr5vp4`@3Pl__L^>l6lV;U=AGL8Iz z&+8E=H1dW>X9@G7Fb=g!{UsVZm+}hOiVIQKHXY9klTdkH-tZaoVt;R%rje-VwQ3o> zD4e^GKq8vJ{V1;v3ue#TcckRt&@|Z;A{Dt?m?(fEs@g*1uCIbo53#PZ+zJzw+cCE0 zc9ag+^9uNM$a8Zz!6QWtJA_jN^?+$lYCpH)Xe#upEVtp41FyE^(I9t%k)I%MGOz2q zDALz&o+UqVMenN3B==w=cLei<<~d}Bc`tXn7!g>+R%hi5Mx$h*i z*wdGg8=+3B=LL~3rRV*T{A;_*eC4lq561gl{l<~q&5ex@!I|q_iFcS6gHux`yMhmq zulsj;T@}W0H->5?y9H6oZkbn)7jMi}5btG#qYonQq9JzMs2UEQs^OwvM1S6}jq~QB z(Z;`OG>sLEUqye_)FWt{%g}Q$TE_grEBA|=E7@-K#oY(`hV@TpTHnA19{CS6nK34; z#mH~WOXSc5s6e@sy11$(O(};~@+oAKnINcNLfZnsK*Y!9p@n%{{E1aF3$UzWt-=iRBfS@rvAi}Z3jLQVnQa`Tz_kQTQ|3lY3wo7Z+-N-B*QD!0L#Y==W zCYJnbF_F3v&P#_P;tVVpY?&ZiLM9AbaJ zjq9(?C$~Db&}=X835JTB0tEPY$(I5kPyt;*G-DjEWY)1x^~8_#4vl44vNifo{{8&< z^Nv}DpuL}!Pq6k}ysc;3!0$+ZI=!kb6jg4=+)s-7&21kL&W$Z#ad+rxEUz>1ZLN6? zP3~g##cW4UGyqC2rfGyK7q4Rmhj~LU7<=Lt`o6vA4Y~LM#IpIJUaW!Pp7?@oVIXxJ z1m))%P17||hUOL1HZG(84(6_4<{?EpNrSvx&VoI2U6aeBQoGo1jUz8cMS1;t*L`Ac<&Mfsb9;E zX`2qPbn#j--&qn5wa&NDErd?weKsf%^e*g>PN$~&x literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/global_context.cpython-36.pyc b/timm/models/layers/__pycache__/global_context.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56bb63449af7033cd7b31707a14c20236fbe5c58 GIT binary patch literal 2402 zcmZWqOK%)S5bmCto!726CUG7_5sZM#3gU@PIK+y`!%B=qVo)ALS|Lrw)4jX1I}fM3 z*RieH3mYjX{|Ur}KY#OU`MMMn89e4(1y5g1IX~prAM7WoifaR> z3UR@L`~09R*h@cvaXlC*wJSHmFc$mi-fAg^VZ0;5wOcoCuCA@!ytVH8-wg(7lEO7f z$vBWgPBX<>5X(UD0T;YT_{OQ7VX8*cozKdibQthFfwt)Bjr@IvR6puB3h5knNyj9DHnE~%2OV*k1Wvl3%fq&SC;tc zf713s+E$&)pMxjSQq=<5tq5QkJYoc1>om`K1{rEkxF~oQ$@GwGYbTB6;^B2J+oGHn zEXvY6RiNyh<r8uq$h|05st_18 z;UK~IYA4TjqnLpTG7>6<3oLJK4FN#9Tbs;csiH$JO1&(&1R(9kBF?$uLiXC)k~|w| zHyOnRyh!%lsRZP~UD_SgELitzt}%ZF&AWm{0t-*Kfjc>?eOT0(EQL6SdOo_bcrVpn z6eZw{jG|x3-_Ju9t9U&;93O;Ob~1dhx%mTB$a*NJf=A%ssAMt}d)v*#(mW4y zP|ajmmxT<6bw7z9KG;uIq0Y|3wHIN^Ye0z6rLJ+!aOrt!(G|mPsNqwa9$tC}x~nJg zPp}Aa;1_eT2~*w$0$>0jOgs@N3Juc4H55w&Op~I#Rm*SJavRrUXKc;s0zlTbwg+(m ziPo0j*%Yv0;d^waD2psDcq~$N&~wEK+}1$A!VKzjlNhj63?qb3oJHDx&9Q+9e5tlE zw=&Y^AX)EGaTO1LdP+V+?n5A-!<2{`LVbD(=HW;0M&NyLBnUzr2+-3=`~i!Kj>(VK z%$S)oYX*ffhDHK$5y(|^4$-NgrBqhcS|M|DW-DkamCYPx92pQJ6N=jg8=zI0Ct9!% zCo6lIKnXiz8)aDbb8F@lUC?%*9YKRSGC)qoZhhm=$W;#5!-6McuX5*fh0KumYmMKK zJJoP6ql`i^drdJzw=`LrA7dvv#`i*Xi&gk)XTtd|ivEECn>S!ngg~HXT5^OxDom4h zAZ)nO^dU65asXX@UwhL+HvcGX?WO!Rf&=2nC`c*viILhsCEXWcpVG226h^#(=NdJ3SFVs=y?NqpLSvTPymM)-%a%5?4o7; zey8VQ`zfF&M3MHR2rxO#kncp%t7)7yk_8~2@JG+r{DuL zeuA&9F^DL-fbJp&U8u>c7hStyD{S2x8qfwS^_rG*A0+{m$BjGoKp{J=X5+CK6wx R>#_^Pq8-{XmXXqq`7bNPgB$<= literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/halo_attn.cpython-36.pyc b/timm/models/layers/__pycache__/halo_attn.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f376c4c1a38c2257e21ae358d185c7a44ea8264 GIT binary patch literal 7544 zcmdT}&2t;cb)PSM4L(Sbw7ypHw%0D3khz3NNxK$f?=B@vtMzVZd9~S15W8jv=m9a{ zUexn^%okid)8oeYPK(d(%>tgtjs8D+X-$vK@)TmfMM2yF0gTZ}05Bdwa*WKN$LP_vZ`Bmc2ht`dH=% z?C1M&-;ew3b2mP5L!Y@JXHSGX@Dui_J9N8!5QG~_`o3@{KH8rOKlCSVEQtsHkx$zl zxGF0Q_Hh&a_E|v`0(|+KE@!6LBnV$q6?cjff8JO@7J!}E#Vt+en9BQ0~|sz&vmi9N`5QZu!$V{i$) z9gc8j+;-q5{={QJbm%9Lj39zeLbVw8aXYb8S<MefM7H-Fv-TUGL7FJNG)>&aFFc=T7JDJMP_g-Cd~>KgRX?p&JCJtS6!& zP+OQcAB`C7H#xp(W%_N{yOcJJD9m3?st1)xLFcf7|o zJ8shLZ?V2d(zL}+e4e0+6F2AGz!>KUU3b|{w1*s0`!?HorPH$H>=A!Rj#xBKz+0M* zx!rCgIKdpWB!LRb`31?TAOJrHOlk+`|AFuP-;oKMT3(km%UL1rLj|)+0zisKQS4$C>^`W@5ALGPT71 zb1y67#Hyq}%L>mNO#5q1{0@fv+mjY|6L+`ud~n+8M%;^A4<9`E916PIipK&<1O<+G z+!Ed)?(4(-gI3}Xhpi#H25~EJPdyR0`Vcb*n+&%{r&*nrRpRID@ULT$_%brB+R$yi zW~}NB^Rm8SuIXPlnJ(T$OP8ASFIU)j6BJKt>v#(duD8u!YCWhLH(!+7P#s=)p|uOY z)GV!Cgc=mH>XRsbIP7@*Q!jY__YApG1dx3aH6|PB?vxPy|CRr4Va6-^C8;Bp#cgDX zHq{3DRMUZ5spc5K5X~_YYfwmv10V+1H#9<;)SMcrwV_R^SNXqbuSgTXdj5BFUAm}% zq=cOy>K;i6W1D`MH1`yRk@U@nQO8^m^KK!jY$qCveiZRsF~2ej((Lp-m*=Av?~U+?%p^;g70@y*OM} z&UYe+=UHWL(_{rq-t9vhp`l@BD=2cH3Ry+M5fz(-%!*+?iEq+^*D3oJO;eCN#M4!A zoNj=J%yFc03Q`{N7P1FOF~Nvdw)GXr;IS9SrIp>yg?ZuQQNqGKQkVGk?BrW!?omKb#kHB&+J8;x()(Nc_+Z+ zNSwHW{{)nH6`3YcZPl#lt43L0)t|2~-_ERevqWMQ!U`s_avXr@IG}RPagJdJ&ss{3 z!=o-_fNoygL6+5Lg339>JsvC8g=VH1;yylmCV3t%p+=57=wA_@XntuR)po16ilL2m~}#@@se zn9mi5V=`po?HT6Wl)V>5f?6$KOx-aV1SPTrI)6qI#krBpvo}0{G zB&u5$npL=_`nXIZ8#ea(_F@n_{%D0ZwuHR!jY?FIEcN`>pY^Rwk6kCW6jBq$2 z&`RiC4lywET^=uu{NB15o;Ejcu|Vw1X~PNLU#-+5gJL}iWE#R9^(2MVf$^Y zUKj`Yh3EX0E%Ek+jn3~|TFkQN?8fs^)W9IEB5+5`V{b1Qeta}jgt>+D74e-P9^#GZ z<_^q^BPV?*Mb4@mVU}0h*}k>B-o)wnkhaZE6a`JT$6O9wCkiG%-eN~kEd&>q(M35% z1jg><5YO33I2*l3Is+oG^w47?Ovt2ExD-EtOvVY>d4#u8toTfF6f@`SlS1T-qS*1s z;v|(oJ|=j=VU5L3*?{aan!Vbeg9FHEC~9@ z#3&(=9TT_V>n)kt#%MLw&kPtqh06#a92xOjiSeg8jG^jLG2WKt0vX|Har7gYS4LWa z2@db&B}QCF5W*>drVS8oK)DEaALiEvl_|`i4T`diKm?4k+B(rIX$`rZmMp4Ok!mU0 zojMG%GB-CggcvlZfzW~GtnkuVox%u5miWiCI{It$vE)8Y(CgxFxe>wDs2MU<~hX{T4w?;5!*T%&Nt!IDKN?egCR*D1gw!#{`wqKfYy-hb3IvZ4}_ z%pzfgE%ZKGn$|1kwfHVk4P}thl=&3DX7q(ZPqLmKsg#kika+k zX~Sm5(L~yHS!J#OS!I86_yo$d%d_%FoNTe=G_y#(vQ-2WyaYb~Ec_Zb8_FQQM#lnQ z<&Lw;{Lv8a5mWXllQy-qt{+q9at=LIQlQW-=L#+!p-a04^>&CvYv<Y0SARuWZ-8dW)`s3FT-R?HhJMw!VLZQ-o1Y7xhr%!Pk?%vU!UH7f z4@0!z44aWp#Bnpby>GwsUzNoUZtfTg#oGHj3NcKpnxNnrS~DC zW}NW6{u_85r6nY_qNleit_FSN;UI>aRj51Jy z1DKXJWSDedraFS36u7i_1@jfq2J`~nAfF=mK>xuY+~Y-Fnif@0iF{>3IG`KCn^Oyj zfnIPoX%1V?X(v@+ij8`GIUQ0j3amCbtwi``Oik42GUD|~g<1N22d!dsxC9` zLqud0_=!Xmr7GVgYAKF-_-Dbh%qDNmKO9HnII||ccOo9r(2pqlF|y{0Ql&SDKrVl# zKM+@_aur#-CZXL?HifF4%xalUZK_2@3~1Mxew68x%p$umGlB1~>L;PuuaOk2=?#4Y ze>VQghHY3BuQ8ZuTQ?B5`L=$;EbDI>%s?#ExU65+uN$`he08qLd9a{*Fbks43wR(X zS22?Bqx9&}qh?v$CE7cby-V5mD3d`0!V4KW5cjAwN0S_oWxQYyO)ioImGT>wfgr{= z%Oyed=DM6MD<-ESgiHlNtGpWZ9<9r0*LE0g_wauh70J)+MOn}_XQjRuAeI)-9#DmZ zGOQ*D39~Aq-d)eZe*%J7kSJv)d=weO$%?-xeRKKH$sH{Yp-Oawto(iDN4>X(T1=OV Ys0RWtcu5TTnwV>Eu6=9mPqfN^0*H6uc>n+a literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/helpers.cpython-36.pyc b/timm/models/layers/__pycache__/helpers.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f389844b1da649660b15150290778cc014297d2 GIT binary patch literal 998 zcmYjQ&2G~`5Z?7q;{3I!R3M53zUC5_6bT7c1r#A#0Xei1NWNH(cWK?Of3&*^P34|a z30{SFffv{-2NbDKz=;_LDP3#78INaozxigp*Xy-|_fIFc79qdMoneFj8m2l1U?e1r zGE30R3N4DhE$q-?4s&lv=yI1e7Bp;tZGvrr^}u>yTVPw<4clykby@F*h8<>~kpAY? zk^do{a~XUr*i>-;kc*N_)ovfg$pvS=E>5{VV>2@aNZ09JjBMIv*k~Ok^{H zsSW@tt87&C+5(L&O)FACTON~#WC3m2fwyhY_}ZzQ%C5*Zxghc-`1A$A7$|Rz|m&Q(W-FFS0nI=nTsVbytWoaJGVlm}mM;6nZMIy~oZQPuHjb?S3E2xXy$l?ng z)oW8|)+3~e1rzGgCsr3w?%;X0+yXO}ct`qO&?aI<5=Ew!LYs9_2vs-OY~HyD1Gong zg{VafTe^sz)x9w24%hD9+I_xuSADCGzrz~I$(p*i4_5(xpaVQYAw7ozVaM}4ppB1X F{{x@%_0s?V literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/inplace_abn.cpython-36.pyc b/timm/models/layers/__pycache__/inplace_abn.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1359520aa82251f2feeabcf2ca159f5743447637 GIT binary patch literal 3124 zcmZuz&y(Xu6>iD0Y->Co@9ZX-O{hevN>wlk_U-_eh1%@w*6uPSaczE^N%;HwObIIZzZcS30A8_zAX4?kAa4 zVVe4f<@pEq`*&WSC9^gCBvEgSm72*ry&lHn`LG-1)7~_kC21OpMQ=maE*9K7yWPFj zy}kcBRNFe7&T^q13XzMBr>$RW_hX)%jFonVNhozw%(E=XPJ$^9GrhH%orOZzl?W61 zr|Z*v$}=^e>I$Dp?S+v_&O(*s*`R$j4rZYUr+;JOI&}Td`@J|+;ePLYvgk#5%w_N4 zg9jh(-`d~r$+_S-PmS|f_5?oxpT`INo=T=r3QJSjOTz^h@_!VYEkp&3$*R&p$86g& zl!zwuSZK85Z8X_Jr`U)U?Dv3?F}Vd_n;o!=d-`!GPNb2w>~3I#AjUhiogBw3Ghg#3 zQjI!1avvRM12}VF#{{|7XKZA}^|*1_7&r!q43I2==zhi=HmJevJK9^hd6xAt>v=w&qj4hrlTeLe6F)51(K0{GVl@BC2mWZDfnS~hJXk5= zy_EYf>OAEouP5XruE5E#{{H$g(`-HsMm$vTko50>i85poqCcMwx$yImzg*x)<1owM zEjkOb;WL{nBU3(C7g?hTM|b?=tL$_9rhkkibZofMnI;y$Wv)I4 zijp(#%QVL+kl!4$$!O!uQiFB2Tj#$cK~bSnB5rK!kRWu4h`7**$F=Z9(!k*X{ zh|U7zpi(&Kox(<6Eh^~UqALEPYK5!n7q)m(xN-B;mG@NRQ>$>rPYXA*V_N5Z^f}gS zO`4~+7{;`JMcD>L#l(F@X@T3y?!{YcjwR(L*}^2G z?Y^-jvT8cl^(00BBx<4Aqx0Rv>GH)Ac)dX${^UN5HDlhchTez(J{Maxy#qFJgaH7 zE1p%y$TWM;F8}UYH|!?2+pP^H1Vod<{Nd=b&0BAjT zVtWa|s(7@fx+Qfzua&zD+$w!R?7yL|Q!oAI5FT`6m5PHaKpA6QAq}PTHugPRFmHiI zVoIvZ%xdFx(avAmQ1V&tM5ef6Ipqn0vQhRv2Fm1!2_mml8DC_JNfpgSL!?gXXpIZ+ z6I*$Ot+tRc?iMJ(H`yn4VVirEQL@uCmHtZp4aKOy8Yu4~AjxJAV8VU4-qG0TR7_l)}~gw1vGV3>863p+LO0-dvm?x@V7;wHv@ zc!Mydy&x!SHAQF;d^8W!W$e^~AkHHok?<$T>f*<=V|$6wh^Ofar*!{B>XD$FEnMm-v5PvoPD8vwV{+BtIaK!5&A7X|VaU?JbYmLS y$Ds@o+?xfJ0PU9b&G6qa8i`;8()FK|YThL%%3XAItBLzu8*$UJoQmhPoc{t2!8S7h literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/lambda_layer.cpython-36.pyc b/timm/models/layers/__pycache__/lambda_layer.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..904960fd1df47093a624aac42b671da000976c1d GIT binary patch literal 5497 zcmb_gOOG4J5uOL1v)qTIwImCM-A){9DXf;#I(8C4mMqDVCB)tcmSQEe?C~tSNe(%j z;dT$X+V${aI`BzA0{BnllE09Dk;9zwI0Spk*@q-w^$fXO+W`V3B&OL@-Cb2(UG-Hp z-&kID8b5z!_sw&L@i$}PQ$YV6JnB<4+-MryLc6V*5f0Hd2B!PcbRlcnw>E?T%un z6Gfc0x*ol&XdoCDvF8WiiY3^$f=wIy5=Px}oCg=jf<<9)pk<2z3<;h=)baurWXIHs z!ZF+RNY*e?Obm7f=is5R{2ol^v4;nbBiZU^>c?F#hOVt(#D!uJRF=nUSTxMeuGJN- zJ`T^BomB^iIojQ7MT15#YWZAxeyAFm3YWv&DIDj%*Xl!&IO+(^dG~-dV5i}M^gG>{ zkzLt`ks{~7`@jpGBe=D(Q#Vs943A9o>sDHgBX>I<4Fi#`47|Q@xj*)mzZ(e5TaM)@ zY`I}12VUT|XbF96JS9AL@TjlR#71IF&5044JqwSGr;s30UIaDoThyOgQ?pm(XeVZ3 za>UqgENbbuZNF$N(rAQe=Z%S#Sllcc7}q?xb>8?1l(=qyZsm0FU!)_#>>Wu8OVYMp zk`&x&F{aZ=izF7iqytAH_K84E;_$AsUy7<{{-MpC!~97x1fLy8VU@x5C+jo2Ry8Uu=AI!MI& z>^xolwlzFRS92k*A9BAX)GLsrYG@3zVs7Gl9$&eJ_ZAUnpGw|Q+vo@bSY43h{){@=Lt-(nNZ z+xf-gyVnp!vGgu;F^szid+EW%2&Rjpi-Zwv{~1Mp%&u;+2QlM5XJHgG4`2a|0fu4` z;RPZJe*Vml4-hrX;~Zg2A|7K50)z|^>fA@FI!<-fK+j`$h#3zJ}yGhe$X;3?G>Trd%-0qITqqbS7HbE!jbe%Y1>6B5VV$Qzs8U z=@B`<%p(>TkSEL*9UnvHvnTy%lPLrWrA3nS9q(YQExgzzoMRi0DFu8CRW>OUK7m4- zj-6(T(Nv9X-jKDC>gM~VL^$@O?}ahDzsa7=RaG&N6`Zr}<(lTi6YdUsk2^@HQIMr^ zD4I`u`)#&8=XZRZIZMI;#2|ufR`oFLy|Yl$=FMCxUjZ{5+bQ$anV;rqa%KwS>}U14Y4-Ygh-|cj$Rlt1j-SrA4nK|-!Z)|aeiz=4Ihna~k6n~@b?eHays^9M z!>=}WacU3;EM=Mi-g}5K94X>|5kQnyc1LI9Bg(}`l**2(X#S1|pMTuE*K)G1v4r0; z9?IS7Q>5|Om>NBEYM99R!?nbunjk5jMLp2B)axGII%f0==cp~6Gk7tn^3rqEFK!jHoY|bI3CfbDV4i-f zC1q~(tMX5vE@K_IBxTTwZ5wSFl61?>w#_S=;|eEjR&~1;K22;|ul^caUzqW!#713Y zXSapKei!x9#L+WX!2fi7rnjCrR8yfXQZ3XgOtFIaq5lG3ir0t(ukq#QHtba3<-YY> zQ{Knwv$&0vr&dV&J~t&D&STAsWKV9O3RuZ%rNdX4!aX9uVFIU72=Xld%tP;p$_*5T zWnCpkGApjEbg7Tzi6TkYOa=0UBiwHE%prUOCGxy@Bs!h{BRuLnnhwARP+Gk;?mR?zLR94er)yz+ zkQNAL(={moGw!^QQuQU&yCP_(HdLq6)%8YNohOL2ltHq5jYL*x0wHEbkAKQ_6LZk4(M_Hv0N-1s%h7P~)zf7tE6BSOwEUzh<5`SFDP43ABn` z!}tn%%v`sab-4b0Cp-5rs`|;pH2)?#bUo?eYLc}*n}UcJ&j2Ex<3bN=L5J}TJ+GkW z6?ti4yrjoJ(&Hsu^7T3$IOPn{I*@|TpKoHnwu*V@^YuTx_m{)3Z_Wn(IPcWUlI~9O zBAT?cFX(axSL@PUc9RBvNX=z5byL1ceY$fjnC{1TZR1fIEX|iq$2^PY@TEngeEWb~ z18|Mp{DlvP;)564>*ptGfm;fo)jY)}2m6$-Fa%}a0W@IEh7^4$&~Zo>V5)i33_Wso;KB0m|mHlhU-%1DX={Oe&x{i8G&3!l;eW3a?_+iI;k{q?)fT z(inHZ-8zDu;b)ac&#HG1^8nXjm?%iFY&ye@|gH4OY%$8+X#Kcb;JniTh4?T`3# z0M}v#2omxV4Vd@RvWnXHGvQfU9fb-dlmPmWvu3_aIXx|jXG5faOwGsN0JVNfJ9BD2 zOU>J2d z*}|V~Go+-zhg007NtEa$1@8tP9r~3E1q-`*wNlpf`igu?b11m8)N@Av*|8OdTW$P{ zLzeS`)H&|U2bh|cyCR^pr;|&T(v~t*(it-YtL$7q&^KT$ZNbhOo=Cm4ay{Ghjk9=D RL@&VBHM3@|nKipo{WlMS17H9E literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/linear.cpython-36.pyc b/timm/models/layers/__pycache__/linear.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea03df2e76ec8b153b843d8c8254527a656e4313 GIT binary patch literal 1050 zcmYjP&2AGh5VrR(X_O*_gcxQ{ zjw9TBoy^U=$Oqfaap6WTMu&)b%)dm;=WZ5UxKY5I6VwZq56Qb!a4Jbo7hICZG}l}T zsyShNl8RKPrRa5jK^k=2!^ROJvG%{?g0T;x_CRq21)#`bIC8m{`Bs?o&0#L{EFDh<-=cPO+RF<`1gbI=>RdX^AHziQUiUOWM z6RCns++y>7gWPkB-+-Awa$QT20L9I*6ZJt3KvV=uqYP)x1(L5;D03M)-;sOP%DfeZ z`Qj@q9EQF+I>=fp44(G2XXP;G$~{5XKKt?lZWsbCTuZ6nv;&4`yX@^4zZsBmvs9aI zs^ayiU?#E!_O6Re3d!Fp0X*LaHO6KVIeiT;k{O|X9-g_gWQxXcz|QZlZ> z*9Qk522TfrP}P#h1=q8ZsZjDMFn>5Y4s}`-VF97J3Y*=8PqQWcYGGQiBy%d+4Zv1y zPzZPMeSFVZ-dMBV-vk`@TBaSxf)+fEO(%{4vCggCjpNV2_{P(UV^$_{EG?J`PPtH} zl(*rhseen|vUipw8>wugZQsMqO|K>IT5p4K=l>o~YLVD|rn$5vo6gon*T!44tqgWx X(xx%(=j(D`+ILN#9&Y0gOqm+R literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/mixed_conv2d.cpython-36.pyc b/timm/models/layers/__pycache__/mixed_conv2d.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..398d3938a7a067877f4d20101c80f20ca9b7eeb7 GIT binary patch literal 2226 zcmZuy&2Aev5GMCe(n|hG;{r~Qwu|P_ss*%;(;$dp1a0Cb1%enwQ#8dQ!D79n%k}`OV-$hdTui(aHze}}MmaiztAYMoXPZW<+@NS#M z!-S<4szI}%dyBXH3WQT~O8XYZ>t$1sGdiPtWJae}Z86&=m&h4Ww6GIwv8k901s^~q}SOiIiEp_}jsbSA~mWy3&W zlV8BV31FB+ke|qA{we8GgWOi+%yApJrx5+fgPgQ=+dvf|xy>g3EP#6-1N$z3188>= z3DZ>>Jlw-AK~l0{p=`c0SjoowTm@M%%y)d;69h{P4>EN)I_N<>L&Zy3iGE(a2-R>D z9^}# z&`p-*2@;I<=GD+$2WUa^gjk0Q>t-BhB|tUSekQY0s-#T0b`Yl8&UtB|Bvw$hot1A8 ztvl7|ExvV&L%DGQ^q}oYHq<_x9z*GjazwTg0*e`(!>W+o3)+!9?~5xiDLyi$j}4v0 zGMjLLMK7@E#fP}oVoM6t-tmh>45TTLs0HrK4MsjXGnciUv9YGd+BM#4Z|-N*M%r2s z0+3lNj?+AmGLB7!nW_?-aNK3+@?#i?wQ6n9CiNFjKs%O0J!--CrZw3(R~Efx4F5-n zcx42f+<`%nBMN{aN08{5{R1ir?A7&qDk%>ZbX|{ z&W0N?goYb8091xx&qz%#fwehD&TnUE(iTV%nr51&5P`;c9a+Ew^l6Uam2Qyega+IyblBQCX_CJM;yD$dbyFS1=?KZ_t)IjeA{s^?e9Q-OJ rIAbcn6qEKJn!?!2Qe`&2&C%Oj=;rRcd~RdXBp&TpO@Ls>ZhHR#u*o?S literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/mlp.cpython-36.pyc b/timm/models/layers/__pycache__/mlp.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e7a05a0ab8db9bf6322fde8282f6dbcf58cf7ef GIT binary patch literal 4076 zcmb_fOOM;u73Sqzlx93pTzBj~!n9}swW&2Dr)g>gL4v6}O)?%7agw4!0hXi7k*J|a zwfgw5!=$Ve`#yO#QZ%Ras2aQ*2TxgoS1x>5gH2B6nt+P2#{JRIg_a_;jC6WK2>vNG!vsvzk zDfb6idK?dDB0R!cILPDsVIF6xpMo%d~2NgpT&}F z_(X)MJkG=<5^vC!U)+r!!Ui&1L`j%qa~kCjGI1h3ALDSSz6WAp*E0uaqdw#qlUzH~ z#_UXET47AL3iik-^yxOvdaf7N8GB^s#@H-ap`Yo67FdOOrr}Ilr?5G@uAMPlyK0wS z7;0CeUe)VWk87~bI27bSLGSN0OH0nCktp3Dh|@R^0zv*0Iz(w7#Ay_Y(l{RMmIk6& zI=`u6Tv`-tD31rdYToNOr75H2xNOF0a2$pCOhmH0GKx8m))Xyd$ZENPhXyJqN|T;g zdL)CX$c`j}-3Wp~5=t2af7AZ+VV8$_*z3;6r`^;Wn6Ye zG(?p4-#_f;@nq6PenyGxs+Y*_B$?hsmXZHAaL6`<#ysZIPv3;xVw-H z(4aFNdpP7<5QR3@3Wn&w_hYu4>C2h1oFR56F+ejf0$f5DI=B7m{wG4`2M|P5R_|N2fvYOv6bO1f>@Q6-F^{1;OXDFsWCZ zAmG^mAy?5h_wO8hsyriZ(Cap8=MSPJ!Jg&EbeOn?bwS@GEC@0}Qf9ll$+lZccRnKD z!KVKQk=*4CAk$UIY`}9~Y40a90%xqi3D(&^_|%vADQIR0ib;nGa`&R>wm-`Asl46o zhT>s-|7IqJ-S9|uci-CW-Q0P5=j~f>T(G0Jvx6PKL}tAdeE$NYE{1Ob>ba(I{X~~P z%k?i& zn&syEQbvO4_l^*mSOHwPv}cuwO}cz88y%I60yReyuaF?bh*wEmCGj1IMMf-BRlWD8 zSdp}6fv>G4!rE+prHbPUWW-4{$Pl6+1+ot@A&`yP7+$yzKi<$D!G~k=i_#aM_rEHC zF-_@hrFRy3s((+Y4ifcQHTC@nj~zbw`X68Y`=5V*7d&IgoOfL1iPAiZLkU(Kg`mX> z7(|Svm8FpkN+$*Jhe=S{4Te1#JQkg}y%wc0Ch;k^nen%-vHZ2oHP zg!Mg3wjq9AI^v*q#DYb}3`O4L#uNQd`Xf{cq>?t)Q61P-I0*9>nMb39TXGOwL0+h; z06f6s$vT$en8@fB$VTwd+~6R!KXym;^!oYnS@W`d_H;|OJtX%)OHQ<(S~aa(mumT zL|Zfzi=WF#I=Tewk}kpBr(#cRbp82OVTiuqdG;VHXwauoaOsd=L0rrpDE>A==BToM ze(`r!#lNt)cG3_z7()l=FmTSF^Trpf0Eejdz@rr)NKZXHv8h@JnwiC-WoY5bDIC7w zmyMcVDr#9}%;t(;UWQM83@b~Ma;0=XKtGVj`RQ88b1x*B(xaQI7U43&`7QaA@<`L4 zE$CF#k$upjw7;?l+KPH8*=W$2jy)XmJqV(bWd(n^`u;4+Q0Xf?tpRBl4_>pqD))lW zw94tKxKgtJ5yuini(>Dw`PR~m1&|sS0!h3^4_6?%3_m;AsmxdSK|Oa_yDdmNz_A?P z4t|`yEWJt$iqaR)%Mc87~y4T2}`t~zV;P|@ip z9ldOxL?VrnK*sY(`~)A>6Tw6^T>_;SxA}EA8&D@czm^)jN=pb^W|B|3Al`7n|KHi;uoM z`AhTTXuYrfrt0w~UPgJo=w|gBo*>;}$5UmgwDL?0Ms;->MG0C2sraw7@2da7s`*v* jMuA4%y$0)Lhg7=nR89H2RN|F^CTr;}qrKgJz3u-G%KkS+ literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/non_local_attn.cpython-36.pyc b/timm/models/layers/__pycache__/non_local_attn.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1d30fc9f8b14da0359522ddafc007fc105ed761 GIT binary patch literal 5634 zcmbtY&5z_p6?ggD?vL5bPIfn&uO@*gjhN|V-~dsSWpEBhLYPML4j30Ej;zToEUP1Xpf|V_iVvfab=T1Mqugch3hKB)}~{zj{@EuB!L( zd(~gI@yp+liaDhApPtQbH$mA1I+4spIxc8+IURg>6XMjid`nC+tGfZ6r&f z`hn74^6xQ4>D`QzIE#F~x>1%pa@y3PHp*v)y`^RuwG12@=28W zK@b(iS~-e>gWa_tPX|Gs(G0!KkKTLVTYqW&rNO97lfmXF_7>Cd>S^FnPs5{Wc+E7t z`C7fjO8mq-oyMtVZ$j%^BmM41=ub+lVVCD=FX!=IoMG}o9!9S$CcKBQOn2lgZ*H9K z-{9uP_UhZIzZb1;CVo-GyK&&t7P}|#0cEStgqmfb?(Q@TsKZZAU=#%+XEV4OQTOIE$z+ez|ZzXz}A#k8PaaBe|s zt~BBHC@y;U{G#Wd-L$z`|La{MuhwgMBBE~~j)cov_5luF4eWu}hRnZlGx z50!nJzffuwtF#rRGD7xHJ=QCguU9OzE-RTDYL(*Yr7<=!Mi1$SSbk_gYCTeFKT_&Q zsSd4YHMo$rDmKHMymqCGJ2b0Mf%$B5C`)=t&i62LTz|gNFn_xt7rWipL~B3dS(JE1 zd=v?@C^@!A=sU4r%wPH~T)`+1x}4MI2;1}GEG|8d(=MS4+Zb_`c$55eJrt}IY$Dj6 zU`L{L(=UV37BUDr^bAjNiHuPquDG8@WyFiVCG;Xnc10`BykO*Km{BowrbXmQ?ZW1v z$0=Gw3*3B!aM5U%D9rtPKHn=ST0BoWr04xY`TK{1&@cVKA0cC` z4~i*|JWODehsA(LdkDE(8(V`iPSZh(p-C}F{6kD{0B`Z6yLdiAVQq5Am$2NcxQi7K zg>{+DF0o~HrTMkl(c^PBrRZ6k?^!_mL>GCKfqN5o@kfw~9jmY)o-=j~pBXdB>EKk! z8I?BHX3EFF0|7ylvzlJvWLnfS{@Xg}c$?B*ydvu$we(ym|5lD#Zw;}l{z-5I0&_e8dFNEv>|b3$_!f_ z>iatXr5xu%+JYmXY`E(-TH4U^qeZV7n$){!x9av{bO)o|Mepa1zEh_cYYUs>XI?%n zh0dd7+E;~6K_l9fT4faxmxt2+O(Mb0N;vC^PKLM-Jq({FQ6XFaSyq&O7Sw5kKZd;# zIwcvgwC~4hob7p(H~`SXycflLqf%%|lnFhFi&D6!i9|Rk3`kY!@eZ``E2PSim`^FF zuhmYW(+mVlFBqJoPE)>tyC6nEK)0C*1FuMq0=$hdKYIF{nJ*@@ztXFU^1B;-eVdc5 zx3~k^w<&YdcRf$IUbA}eZO^+q^^>N@^1Lt)kX$LlaKdgmDbZOM?j61h=uP%JKj%hw{LTtWf(oVUF=Y zAKJ2LP=<~y5?qRK@%X%sqBOTCpB4a0{%+wW-!Fa)avnfy;0ytbfKF9{VhxpArB*5c zl|U6$%dYhQ1!SJ-#k=aF*OvNFM4!>;K$GIRNOg(_s3f420I6?D=Y-!neo%Tc(+ii} ztmd(?l%x9{-o=BzO+?y+vbHebX9q7!)T`aqB!M>(_495$U|qPfRBF(5k1F7TUnjMu zbVSMX_el45h_8o**nrGH z7ny`^a?j#7Ajt59EWnFnl(B3WUC#EoV&^dXg1>dZK%(H3qSwG1=>Z8TIwggNJXOxa`AJ@fC97OJy|*58H~ zI$J`S8`B8O!wD*xFgl0>6vzB|tnP%?&7UKVe7{B=2c?UI4B80(0+AO%gcbqwKcMEe zY@$lX$Vn2HeXBOwDs@W8S|e^iglsr$om%_l(-4*RL#+}Ks(Gnjeu%LJWhN>dsC3X_ z#_K2@uH$!&9W9PX=VEH@RKcQ(XhX*-q#(VPWFZ#XPbh#E^@;+^A#`NouX~(-@1Zwf zd$qPc-a|Eu8pJFKeoWFs0f;)lswD_7$fU#uhiYKPP)o=$@!Q54h^_UHDy zl~XwcT$EWJs!!4zs6imn7ZQw_Yp=g7^&3=oj63i^nvG52J+`mQRX6=z`ThD`>TTvl zU-+N((cEaIXZ5+zy@YkhO6@w%26EL)SO;qSu_`6BhD&l+TU863i~Vh(_pz!axfZBqv_kD>`GDCxl`|3DelU*^((@6Zb}k=!Qk%OK*iJbA?j zrFk6kEoy1SG! z+qY<7Z%*c!SSDdY0qQO&=xp^$tJ{&t5TLoZ?1n2|12czLdGe+bT^|2JD?)JLHg|0a zw{!}3hE{V5hYY812TsuzOE^EBtv8Ogd-x|o9$C-+8}Wbq|3D|}6X`^hA@}Op|KUbm zw(R}vr*!@PM&LHfB2=W&^eiSIB}&*aljmVWo`;_+fdqsR6-rm+bwk230G>__zy><| z0_G(mXlG<5#sNRhjYGjZc5ueg9};xC61okXZ8=S+N&4qt^D^YHy98~C*JLVApv`4u zkO_sl<4Xsw{lR&i=M#7;4yaaGv(4$@B0fJ!YMvtUG^ufDG#yN%(%_t)^q=t~s2f{YzYGeUWs@sJ7+#a4pNYM0=S5 zTe2$K$vl*J(g%b!iV_?Xi~7%tR>|@IkT)&kq>y=3nD5F{i7cAyqL03IW=ne1qH!^gZR0qIo3*v&uP=Y?UuL3CqyPW_ literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/norm.cpython-36.pyc b/timm/models/layers/__pycache__/norm.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef351ade4c31d2ccd88aae54c3ad503f98039cf6 GIT binary patch literal 1539 zcmY*Z&5q=5{r*d=Gj#%YZUhZEI;R<$31e0zRY}`Bmizj@r_+Saa5((!=k_M}f zl5wl%MLk;;-K=ery10=_Cq*NYYgH^3Fp0O|l#EN%LqkB=^v9qi0U#uIMEH_Ss9?go zjR24@f?JZamxPhr180$mE>yc*pu=qf=l=d@`s&v||NZOfW$D2b8}>d3B!K95Ky_q8 zD!O*obnUL0puaH+34q(2aq1XXF0}EB$z;|@n0k5%fKABxteJJ3r=DS2))RBoEa$wO z7EL2-ZDO2VAr!69p+b)-H4*1!U1-huALPGhnJBtql&z|ptZaqU**E9sKaD;ajWWGd zlFwx~ZH3O1yn?)4d^yg#*?gYOVQa0kK8;xep`I;nR0s|t5bb~>&gx{Jh_eA}X_s&n z1Oc6c==VT%m;kyal>_2}Fc5EpDW&7oH}qQ*+h{ntQ}^JA@vr6VYT6n0d{*>9rj9}c zhW?_Cq3_jwv;>n@T^CBAxd%vB4-Wu$tEsE7C<-fo30@(|O);08o0xNmdRgQ6i1QcA zqV890*y<7bINqhUly%Md0YOUD5#EYG6h@Aj(&!_k_nEca;T=N2rauL>=N-*m%REQ8 zw_(ntSNSM{{q9)WvpldY43YBY-VexeWZ_7xK1=qq1R_lak|gaUc|7|1aiSOSFp4_) z>ioN(lTJ3eh39dnSO^<&=?9>8e5D(@<0+KLQ30&jZ0xF_VlWJ`+j7_hVDP6PLlqwF zv5mx@C~yVtF}{-BQZ;v=?0GYS9X)R<2Y&|q4>rh~J%3>AZ#=sSYr@$uNRBsF6i#k2 zq%Fp@#c3L-H=r}IWwm9Q3E-j4mz^{ZkaYNeNWt~A0P{oGt4>hi;~FAH%!j)71xISU z1!4*CQU77pXV#Fz!@ugE9f@dWAy=a>O)W_w0E90jd4IuhuPU=*_8NC7PW~FW1AjJRuk<5u$LY% Z^dtM=!q5Bn@)XmokqV54GzK6i(f>pMTyg*a literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/norm_act.cpython-36.pyc b/timm/models/layers/__pycache__/norm_act.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15cb8d5e376b01f702a65e7cb9260332d7a5b95a GIT binary patch literal 3010 zcma)8OOG4J5$>KD&J2g#wIbQFA|>$vb{@2yrLzv=g9AIVVhcfxjbU#DgO|bVbdwzJ zJecmL*1MQJQGkO0&OP}D`2jiPr(~|qMYrS@_~fs8NNT0nKzh-&HT|rv`o1c@xwjXM zfBVwOKld2>l3i&Y^k1RtPay=GFu{dW@CnC%x8S8SaX5{6g_a5O1K7k7eBug6 zxWc19=`DKr->xr6!xxWAK1Oaq-pe`>8iYQH*vx&vjhZVpYg^Db*Cabo|X5;=GE}xJ;``s@Tj^ z6QAWp5i41ym^n$a)3a0wZI8|BatY6!;MB9+%;Phe&*nzQMq#ZK-Jq7sRHa77C)MjQ ze0I=T1B1@y=h%V^=ThY+o~pWx-#fA==an`x6>&X{>5^LK)hx!ERKvJ~lPWG{c_P(; zT|E2G{L4T7{lEWuXZF{>Q2WQb$46PTiNgT@19TT%PazC5ykTo+;WWH)8n^M9p5R{q zQpRlpc%#YJw?iqCexbB&Z^TxUCuK zf+0fQ(a39Exzue~)}^e>s%-sqI)!7}8*or|ny6J(;a!O~snN%PRghQ9BF*F|YI}Nx zKq(&vw_%dxRc?}`jdC~w%d1Sbt~N^bXx5Zr>UU$2n)Gmdu{a-RwUB!J{=It-4&OLD9BW*fl+w&=p~p(jz@+A^q+_Lv6@Ah?ws3{}7IRqQ?sgsv&v^K9*ZdCl zZqG2ig%2pPMSvcn_t7JCzHA_!Ap^|VVz3S`dBXvX-o+>J-8fsB_($oy??Es>;=bcMLxg`H z;lBpm0|185FYY`Gfh#BMU_fvnC|LFrlCH9UO~K$tuy}}>j%&f@l#yefp?-*&5m(=( z1=eP3?sGeHm*>EB^%kbJg`RWqgKx8|Tl$n~w8bLy0Ez)(7S5Wfn?Q@(a3mmJ5sti0 zht}I@8^is!2VZ9M)`i#FVXhOMseEZD$Ui2FMz)7@xh|Q#z*tKd6-yZvn+@msF$##jjZW5mnmYZdYQy4BnTV-7ePM8LQRG+ew)bg{8MIc zsS&E5(t*TgH6}sktDljedS9nrCZ9fmCjMIMmsY)hFn`-rG7>$%{=UHFn1Et&Nv$o$p%_cJ)_Xm0VXi2xjtoR`|xpiJlCyBbDVC)FHJyJSVNnqkDWnGi&SSstuI0^GOORcL_ zKGkW&@eFtz7y`yK80LKt1(4Sw1uKBG7HN_LBH1CCi$VYZ-qq-Q!$7K>FLh^?8G-yt%5NC&!q9S>O+X%f8F)- zcs%yU;F=ge31`5QzdH74GPB#jRIpmt;0Fd{BD6*I4rt(jzO-~?sj*Z>+#&HV6 z9D~S7PKN-qV2{aS1-f|$pO?Vt0Vzle2v}hDR^W9>G`D;lzO-T-<^wAt6(=2`ci~vw zK(UA70gAUY=sGNI17kPgSlt0YCxd(vs`o*5_&rkJrV69Sv~8qn#InP8$&zfJNIt@7 z#WGcT3wD$tmytk_n<>-V`2Ajx_1FX^hk}VP$^-DYixB5<#wJGV{1zrt72(PF;4h(7 zmsG!oNkN8WL8K$e@6Z-!Z0^k6oE8q$8mtReN{(_N!0v*bmv)pK=L^BeyfXKSiew{U zdWDBt|B<2e5}?NIuCaowcBpQFy7~9^_7=)V zVDFU4u=I)LKtX5ge{+kQEQ<#)F0BK8!C(- zq%0~c0VW&CmSB){&U1)ArwgXffKj#}hzEFJ`kR~=^nw=bHPxTz;%+`Czn)1zEybLt zhv4gg?{l=IwRJCGkY8ZYxZNi`fnk;v^P)^_EIW>^#wJdoB+^J(4ws5`lI&7xjrFkV zI*qMZR+n{0m!a+(TamLg9LLdpHulyUI4Eo9W7unuPC^B(GLL$!#&)=%F3enBqxuv0 z)?wxKQ;PNZn@xKMT$f$CXDdM7KZbtD}_fG!w2>F}b zYA)zsz^@;I5M)RM6;4hIXXsEOI>OC6g*$Yu?&V(L4}D6W65)&BoQOa=(_q~hh9dly z^t;P{*in&;B{LQKCNb%RJt;&D}tbNPvqS|%!;#IG_94!BWCsYjJ6q>7?vNqP$Lm})G+n_?#m7DME-h04Yg z!v+rq57_fcYxWYyMN&o=kUY|-jZ>Bvv{k#N;sZ0Es}{-vTn0!5M` zh4edu4m+avnvAFjMfbcrbVVdK&dJbwORZh}#s7$ZyN;99@sYvUrPZmurg zJj;EguCBoM+fagaimC^jICkVcc~@p{&?GfVfZ z9a|F)*iue#>N zTbY|ZLfVhP2(nW z_8ZQo-wmDHK4VeRu2eqrgWZrhS=tV>osc_s51fXxp7suS)ZWRQ=GEp^=T0hw^EG~V z{iN~)tVCLMT^gT9B9rD;zZZwn{-T#fY2wFEC=A!BEN7|L%y6u(NxSDWPr$))4$kr5 z3s3j<{J0-(Y}HLUwHxvzj6D$@hSCxlk60*6gL$$Xgi-8uQ|Oxu(teV8Q4+*`7UE8U zg$ltBYxLzh6ybo#NKSH^Ye)KhlF^PwAEBqv~`YqNV*{_{$AVb zDZAAzN$>G84DbpHfDqwT6g3c!2%m%MuR9IqXMVGB*g0qfpsZ+IUtj+UOxSFQJ`cSv zm^fvk!NWF~>c(2Dkwx8Z1Md}!M(iJiTr`RUm9bWP2XbblgzB4ygM&S`Uap+!ENMDr-ZiAWRNF zbhg4oqj7N0Q|>fBb^N`M`|Z#{0BF}J{!Erxniaw~s7qM-70Ffy3 zJn)<$!g%|c&O@krUJ(02c%JejpNAdz5{P9eLcxpL@IPFdu+Hkq8G#1#@Q3vffvfO= zSIbEU3!;!_ddC>`K(7hKf>Kg=q^-KG;5Vi%I-Jj-m;_y27i}-5k3#URByGihT?!ifI%085;Rb zn0P|-3%?13k3egSvCh~fnl7#{dbDA3euTYd>!LlmEwS6LRFf_xh>*HVrOFp}dB zBPD|7CKT~Ih+OMgIYjha@2E%Bk%qVcHqvd4>A4osT>nY?k$waH_s!ho-{tU>+SPaq zG601{H8y7xk~uSBtdl`rI6cNJ@i;SBDJMUZUm%NoPm?=?y?)J12(hpths4k9hoB9f zIo$6z7%x3-{Oq181C?7U63gN#AazL4QU?lc94>vCKnm)mJcAb%ra~t>=!Fira``OtU4sM6$LeEW+@MLq^+veB;lC-<-*D5>UKW~ibtfZjM_yI zw&23>vf!9vIRcL3BS@BZ!Wax8)C-`V#gl=4S|f!I%iqIm(SvV2=hWXDQj+D}04)U$N5lDe^WN{h`90oR zUT(BMdbRWKHN*Iqap|&Ae;2omQ3S@m5wO6F**?S5irLWIH<=OGK_#|^_P(v_PFxu} z`;M+xzaUl7PzEUH3r``6% zadfzqieB66$o8+_x_N8s=IuA$yxnRvKJ@&9FyJcfg{mJ4-g(H|Sa9@EM7_S^+c&pw z^3PH!`2+et^peH}bYkqaSZ*fC1w$RxI*E8M+3^+rtPH(_&<&!)NJgDFL`&86qzr}f zsXJXJZc_3tZg~fVGBQIkWp>SpDPB8X&5ZlT*Ns!Atc*?A=f(rP573=yeIRU&AeHC=9m38u;l}pBknYsfRAPjUqIF zJ!9V_9ER4w#$D;!kBu(y7}$^M`%X{^93ZXw7<%2WK`qt%`5BN39059sphYh@3g1CN z7d`1FCCYsi3YN%>i8(MQ1~W1NVXH zR?Eya?KWladf<>yC)v|@ok5y^k_O{AY&3Xz%{DOg1aFFdJ`DZ7mqc>NyPmI70Su>O z#iK-}JW`UUM+t{5Fx}RCK0$kYAEs`WP>|6HP|}G!`31o=N=C8ghkLC?UXf#XmsrJs zyym)55-Hcs>&bZNjy%coD@5xFzAqsID-FtpP|& z?Btc1+U9oOnr^`-x#^mkeHV54M-*^DHb8`X39dFU56l3r2=lW6j6Pul8?g)+dt#o# z%mjuB(=}&StIle*vWEG-vQN!Z>(oBQh(VpeG%z!Z$_*p4v&y6bCvOaz8O7GBo*nMq zs_$uA<%R~Vs6sw%p@T3S1nvFKsWU$>S7Z3%|!Q(V*vw&Zx2+r!XH#LIXmUZLX0 zRQ!ax+J{kigz%6Gzn@oS+C_XjZaLyrddZG+b}whUIr}hY4+_OoV9?sntKC!_c_R1) zS|pASIP$vIKRI&)PQ8KqI6qfDjm5l$qb3w9R zP+0oHrz%|paB9&Z=`|c0iy`UE8m*m~=T>HoI0d905Qc*S!H5~k&MZ}#nDTCB^$;D` zj8iLXh~P0ptVED{RLiXU#)O>%Z#||Q8by7Qm9tC#&&xLy+|DQf*3N)gx1KdoHh|uS z_>S5K)pa9q4ovwE)%=zfEgQzUsg}^+nV6@JS_aCUY$;pLRg#$)rb z)wRg09@W(K%sOKelO)q2^|`F}XEt)wD(0!K8CgB3!#A$0=Shx1HM>3`dDbwm;r=LS zz+;-;A7I<3qw6K|@Kk)=ksJq`FQN{@XOuiW`2V01{3ud=F4JL1aX0ElzDG1gUJAb- zd!iRg#VO+TLLNyD%7Jx92;6cMM{1KF^&`K}i90~BORuLvl6$@&HWGU1=FfQ%r=Yxv z0hrVWNtvD?osg50V|$+D9`A{CJc^PYPrDFRD7ncySe(Sb=|OUo3t{eYf@35+9(f7T zBi*O)Q67hfU}c~Y<8X_AK}%9tlCwo=x{)W~+6s(H&&0bT9rBJB$7!0707v~W(Q{tf zgx>HFC&eN59;C_ZiuXOxKrMACs#H-N^N~okhGt88JV{ju+Nb&tp;2Fio+QT74vK@n01*aDJNZh@QMNlYlfH%}+Ca z14nV|4lfVk(#kiwp{Kx}T8nyKm;@5c&cv&;lt%hrrnz*&>sdBAkwHjQynr4~LJrkD z4W*8Nw)zV)NhJWtsWjz38hqJ$3nKz3neaayGy6}YN*rlO9`(~W+~m=4gySD(pN<1U zHOQYKO@W8$%%CJ4*cJWD_SvSMF;oPuAh}AFbmP<`ooMc&#Vy#BR?Hj$rPbyLCu0-l z-CM`E_>E4Q##*uAhy&rgSogASN~OaECC8A>A_R~h0E#_GUl>K|b*Cy=TuuiF@Gx@H z>596ogw`xVz&Vrn;SUCiLby#WzFhEvI6Vr*=7JSzp9m*>A&ks|2y&$&?3A!9f(5)d zomzMS%Q_G&Odp7J!9=9Zh)dPY2dhL{*fjkDriw5NZ z8^&MEZ)_Y>rsj18ZPP5p%+0Nvf~W^bU2SUnInn*T@isZGpAJWNzoaFIz&ggs<`r5o zW4K#yL!n+Q@1imtD0H6cz}lh=b}q-atj~ zk$(j;d}=D=3`c+0q?{)({$`#Te>MNa%Kb9znE0W12L&h@rFAV!+@&Vkbk2g7l{YDl zxCdaAJ()Kzh0%QJGKA(@5$|Fwai~HajItM8F5kH(Vrw40IFXg6Czo{3=c z47tq0k0>kv0b6)nkw~Jcln5POr+$qBNt>u6S0vV1pj0P}^$@3Nh!ZjtpB5byzK@CD zp)Oi9+AeTSOYK7o+5>Xu*wx4{;#ncb0~(?e^;@VVxFz|B0f4Qu4RcL@_o-2m_`(lp zio6@wcki_7f`BXDpn}k={ZaguYHw0Op8|9)DBhwPiJ^KM^l#s@k#NPT3Fq?W?ni;&bwT%0R9YUR~_7=unqK@iHDO5jt8i?5RSoIzQo{)Ck~ nMU>FVWf6g9;>>N6*WND9wYxM>QV*N_(OiO0E?Kqe>MQ>R<$$BM literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/separable_conv.cpython-36.pyc b/timm/models/layers/__pycache__/separable_conv.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89f388f8df9ee394bb121612ea162b889be3253b GIT binary patch literal 2910 zcmcImOK;mo5Z)yzQKTMroU}=kqS>aes1GYpdMuL0&Z9tEHOR|_P=KJgYl}8Ts=Mnb z7MPO*^pqdczr<649*W#@?x{0N%CEX;4-JhS4rlf;JM+yaUvIbl-nW-Wzj=iGPL4bd z=wHK7_d#$nB%E?9p%ZIpQNnHRB=*D}+D5M>&ZIW1QF4#)I(J_X?h0$WzHbj*ur#;_ z7O%22xc`uJo1sq$VYkIpk7uzG?4g*3G8`p>t!L>|_I<{86QO*6EmU#DZf`tf5!lr# z`$3O|WKTmGhdRzu#j;J~7SCd(*(}y$HVSn#W@#oTER!sZbo{iMI}&;(M9N?f|Mmt8 zQ_k*hlshxE3yVU9z9Vr>%0O70LQJDL5d)#0^jVMH+1!kySfm;ZW}_b?xC}(J>ige> z(T?CuXIsKVJKBQ<>)CWq##>{}`q%o`*n>mfni={L7Oi zob{NNVVuP278_tKFxxP*Du&PCd1XVe5=BAoZk0E9ee=;E>XbTJf;S+O;1o3mp^2tB z*|)Z>eL{)0bE=)3=8ba@cW&oSUd!u<)R%5ff||CEc&*&a>$%JAmo@O!wVV5SbKiR2 z$ve4~xAHc3UfSSka00ii4IXu!qOP=zxhNVuP7H<$cN%gIcPc!-0B_flnEk>T#i8n2 zg%?hz$sSgy@VU&U!6?b1ovv5Z)b3PBS;xtZAc)gg2Z6*+E9^Mcg^Lvq_^eWibNHyY*!zCHFtyrbD-k6keD>-qrlK)8~0hM76y~uM90%4 z7P9C}XTgzmg>S%>gnQug;0A$FCDub?F^bLteWeF*=>E$Eedw+1~OPbNKRGq_OC9Hx3|K?Vl=tI#)R z;Ak9b83eIAmPI>s8RSLFr}ImH$K&cEy%Dw(KKRD54ns9St)_R|*aT@f5kXM+K~PdH=&d05c{fZdj~fI$i=a>NHbrv{$sPHm z&kN^)`0la6M$wo;qX9YYnL?S2$`5csyFzCqk_0wkU^4>peHnFATt!~Mz!-nBg58$_4EFDWy zpU=;oAX7t@5)U?HDG|PR|0v^leKRsShcA2|u;lW}&m0zHW-+%>VIQbEZk5PPr0*fm@MR5ocJXd~( zf-S@X6$TK9xoa>CAh3@K?f9mP(KG0JX6vG+GZ~Eyp_ISAOV$5ci{G~Lceh^t7it)w ggfoL(4v#juC>mdt#B~GP)6kzoTULvAui^YE58C)oNv}%+es>+t~6(7&(554=6vUT^Yz+Vt@-1fwHB&N!QJ$zKV5D-(b437@dgk&MiV zmcrtOYdk#~#>xa?*d27UFf{9rw2oEQO$U#aN|h1Ex*?3K9rx1cRj9k;xWP>| z45MC0>oEL_{rjRRJ6Y$jIZn=+Q7U8Ie0+5D^WlTT!=@goIPAySX)1M7#V^V9(?_jl z*6sJ3eOlVn&0gm$R=TNi#4t-ki9;R?&lC_N6q$x#yvFN%ytAB%gL&X(Hn6fnd za+HpA!&d?R@!t(^8GdB=_lEzXZorNeb-VKq$Ks&&;Am}zdKCA1VYmo`?7^)!t3LVN^q30;CH>LA}Zb^)*0;QIvcJ|FL1$_}+evA9KUZo?WRPM`D}Hq5fn`?MaCuTw$Ox^gR;G zG^TemkyERN&LGIB6-z4Pt%j!%O;Mu=+=z5&3$7uG*6{~vnAozhCAv5!Y7{#(It=vU z@|qamz8d9Knwdv?gzYd(1VVL}qxC5!u0AK-EPw*%MV@Vspqp?5*c3B9-n+(Y*{e^k zK)Gm1^$A&7pt#bANF#pF8?mCGSwRx%#$!`HZ$x_;Q5#+abQw`=T(G6f$TM|&3q4P5 z$kzjiGG7Le02B7^q2xGAfPwesX{usAgcS!H_IEkao@`)`)Yh_Wq#mwGl!8gTS^ewJU4OPHv&gS|C4{DqB{{UK_;U)k8 literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/split_attn.cpython-36.pyc b/timm/models/layers/__pycache__/split_attn.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6adc54f1b964afbe60ed2a058e3de73e70e7284 GIT binary patch literal 2956 zcmZWrOK&5`5uW#p6nCXn)`{c96Cj9}iC3eQW5Bz}Ya_&V9EWhUP8<*c#>43$H556+ zbT_q;^Eep)3a|Jx1T*>uGnq9p`=htLEpgeF{TB4)iXx>5F)X&CW~*`$>5b192TmUR^$*VXwEiysYZ9-kDV zSGAJlx5 zWCM{L9UlJl;PAbt@4g#F&sjE;1&fEgp2jsVM`e{w;!iF=t$BVLm($rK&SbnfXH?45 z`QRY0r^(f6R*mR=^7Or@$=aPLf}`cIqyyo-q(el`%bAR^XO_vgW4Mo{X;$UM+i_Mg zEL5TtxO{#Yzr^{mET_|Wa2cP}Ld2hyqf-g%=mwcEdRQGc!a(YbzV(GM)G2%w^lfG{=atcSzAzl4?;;(pYM*4Rym(O$@x_aL za{~R}yyE+qNJE;9v9!k4(lCt%ttI zufBEn@&_B2bBiqSPz2~U$9P2_qA|xPi>0~1&BrK@Glzc+okfut+gj%?ow36#W-qOU zDc!Nxz>YcBn8DI-EauTl70F;6Hgp#Bh@ju&lwnTaydeh=5}TFNoB!eA_el{UYUNd- zqo$nmvUsUur$WiIO3Z3eDD!>gi&ZX_&kJ#i(owF)Bo@liv@TiK<`Irl=7n+xnaoc) zsK8i>9a``K)|bfELrg>m!Z0KJy@8l@tjN6D|4P)>#$AtcUS-n)Igip5shCe_+)mTK z&a=sCMQ4G3m%g&I&Uao+CTV)>L)YXjtaEZjV~OrAh@Aqn?P^Vq$@b9>VkSO>*npmy z06UTZjzaK>sfeF24hm=N;=eWQ=)wqD^eXIoti{?W3||*UfOPDrP;(SWrn>qD;XKOD zGE!!;8^w5fHy zyN%sAjoWxbukrO60dxLoHTVB+{f!DbXgd5KO{dvinw5t=;nILMYIZ1k*JRPsCt1xN z^BbQ9*A8@T?D`VB_P(-f=gzK<-nG~4-r03e-p4KdB~i$(rjW3A6C*5LONG<2O3$;& zyimR-x|-{tV#yovdOj1%8I+m$6eZ%971EFwI`)9hkLZX!PBqTyxE0_i2`XZ|o~46H zou6@n6Q?Ru-m3K|2b_Gag5NUgNb*wIgX&N@SuPKiJ&3Cm9X-J2Z@}Lo|FqWuXDO!@ z)xv0dc7(|~HY$XdspdQtRp>LTG4JZ@ouQIPqVG>j+AaD4@ga>ib*7777aJjyI)|Vj z8g|DFz>ywgV1C;QtbNk~13biE+uUEvtG#WNe3L$j*y&77p8?Pgf&_(X$^(6FM-~tp z@<&*;wmN%5jbrC8&ca&Q3uocN0>3~&x)N0CY0`CpX=qcnLd|gpq#fWybLlKdyYk^bs3`yIUXT@e zv2SdB2vD1_L=6W`Fuu2>b{Y0Ic)f@CXmIl%V5AX!RunT3f7iOzulWBy{|Q+?CPB&N z2N3AGMDnXvN{p>+F`p`DT4WVZ$m)>96B0)x-XXCEp&~$DmZNz+7u`_fmw$)!=CAwK z$+0p&ROV-W_aYr-Gv!_AMyhOJ`j)F#Bu#YwbG+ga#Hv?Nzlf~Ps%uz;8?AxvAx1y8 z0`qG37CD<167n~20RJ8d`q82MG-Y*;?x0K6IVnE=<824wZ<5`%cWv;jp&Vn00xGD} z1#loB=)R^Yq1)mgLCar*P!Du(JgBOJ;k?SJ0JF*JYtbFWKAHNb#RP#Cnmm+;UwvqB hng>=)v-RXoD-C|O>Vv`EQ4M->upuMF*|AC%*>Y@M2ToWMhu{{+*5Ba^}ekg82Vur(+_ujmD^LrmZ zzi`28{`SZ1{R@nJ$<93v#*fg{FVV$po5eh~Qoha2*Xr5Z_J=ICW9K!C9l?8c&l%g> zmADqWarHIdc4O-~t1pBPEyn!kgET4p$6*n5H*?wdybb@^(N->_?tIcu!y_U6q8k>z zB6{T~*<`1#AA}OiVHRVglu*71K~i*OKHTkkp|1oe!b13uH%qY% zf2%9}P9oJrxSyz!H=eTLD@d@&py`B7`LB|q>%S7oZntQ*lTdkk;z$`%750T6mg8I| zyGaJc9&a}MjWqRh2$R!ekdCumAX*LE@0-Iaf0)JQ%)(!5gu}z2ovnEp1gk+9iH7HG zb)jdNrui#6qe{{vUy6QykYu|?3GgIo63U}H94tNZV5meK z2*bU$F=wCN%^IC47AlfSTf{KCq1=cMp2>XRKMMy!-i3XmBFQuV7me0kZ|#FbL3Wt! zreOF;gwpJLkc9q5=AYYQbItR99Y%YgD)L=nOw>N|oBor0a0Cl>3%_-*b=rc{^GCn{Oea+Z-87DRVRDV*G03@)bG$0Y|MK6 z%s%61))_l<#ud&+Zp`-_^;k%+vQvI)o!X~Pr((ROR=7lcgN^wI zXk!k_YRsTrW%CV({NsnuyZJDU{cecsf>WdbK;h>Zeujs$`}Xf-!+tPvm>E^89+3=s z=oB@yD`@H+bh~WCKD~Cri?`JOi?#1OXMbmPOIul{7tdLTrW*G&59$@|hCov$v@Hfo z*ZO%McR%dwB~qg!!eS_e(pLb#XfKe%jIa=>A}o|76Mple8HYvKY99BFno$m*HJ?6t z@&fnXYAUb>ec&RGRa1&xAne19&1RAG`^`Q`Q`IcprfH5Fn81)Bo-_tW-*^wwL={oq zAFSV^lhUzWcKrU^7u^T~_(c%}jR$ESg{fM{V9Hsq>53Wx;<`2^)5{x(Fp}W@RLWdx zZ*Ff%cGcd;c|1(TM-ZYb5^f-1Bjd~Us&`&kM zy>(JKaYs&X0p4Jbfd1=@D94<${q2{m@Oq0Q2U*4W3>jl04Xe+Qb&NMi;|)J6(|}5j zh2I~-5!%8>9Lo?9M8fy&xb;P!^FlX~7K#1ChnLQc&K~b`%!%swAw*+qCYLpdR6ZAprU zfG#jgxg!X)7X)w^fD+@SAfTLCt{C{p_du`REs?2QN>V~!m;f*mX&MBjilpaMXQM8` zhcs1W*qX)oa#hlt*?$HuvxwOzMENf6ZUiV5=nLLM+0a8U99#0%2$y?q*6md>`=6o) z;4GkdXp3k|XctC+#UBPz+-zE8H^p|>;CFV) zI%j8&Hwvq*c&A~+Y{*@8-;7AZi86O)qCYm;)$e0}s@KZ(8Ts5H?~`-3QJdtQd4)F9 zr!3S^DCK3{G?5lY66JuA_Vh694^nY|uFCrQy8lz!P~h-X5Y9|Md%UIKOe5kdc^>+d zT*x*^E{!P9VD6$!rtOSeNThd6fA?HmdDyBOQSc2DzHey{;b&rc<6C83sDcwNsPxiw zk&Fsg`^|)wN@2Y)GXT7Hb|a-<{ZQJ_OVh;K7a=nB^zR^)`U=_G$P_9tSy;r{Eh|{< zQ0~>0C`A>a-B=W;2DL+VZ5iX1DN4k_6kpm4<9HHD^wsY*Pu$OMf>&aRW5+5qMyH6ZNXP v8U%43A!wPXR-0bBxdXWX($bd1G>>cZAgpT-O8wU<c$sZZH_4{v zL3aCn1Ghfsh9c}WczA5J8tfm$vgq;WQz0(ICd>Hd!C5Q|R>(Y~ z`y-ZRBB6UlAu{BI;a*0M52#EhiAaH0>E%ieWyX@^oc5U(oWk0V&mZL~9)b4ektocH zcK9ySRLd+DRGj9?DQG^C#fUP_C7Ndm9Za*hn#$swc4(Yu138>3^P_ece#|C9?b65p z#bfx@lU$wX$8?j9iejR7I~}IZwmcRR=3;U+S%Cw*WP9E3HVgxLs5)`qJ0d;em*&eP>%|_y|vw?4|A>QXZW46 zEW84oSS?c8S#||E#p#V$308=R#YL2`bD@B@p0X1Wad|4W>?b0|Rg1@;PxLNKgw-Q} zJDZc9!<*dYHSP&_T!WcE@c&{Bh(T4o=kbs?o?1QNxmGU#RIQc4@xezqpC*F+%i!yP zR-Y6Az{Nmj81IZOfHOylp2L9-#FHf`bV%XZn?pn|^0jN&D7s6{ZgNFqmeIbTdLm*u zkS2WeM2HEEDQNwG`cY^~T*f=>+suNQsCESCykf@nXdObzKB zIiQM1%fA4Uv?%^xt6*%9tMOD9c`7d$;D7=~iwBNb1q5dfjqdhfLkxZnFl)FGaJ4dW}P!5 zE9;t|5nRH5qcv^;JpH4&`=&@5DxYRNN@OYvg^oUI*=6HIDCm5VmKUOQ`;uuW9baaX zgvFw@Ue@#!`jEoCl|dBAOcqg8HX!&B;UyBiW8=F6#dQm>XQy<1>5dijL$o9&V}wA?C*aH?Pa^8r%FUAw4a>o4!{Sn@Xp>* zr;usdNkKKy9Rmp6nZqm+cyx$1G^yvno1eqbw}4pAI`N20ZjlXgo2&!9xVh*L?M3in z3>7|@0*vcE4E-3$jEwPE0~|cFzknQ%iE8s2q6H`!+uW~Y8`cY!kvlW{l9(BwW(=B_ zWd7~VC4hyokMj-0LvVD3KfkJy4|&F(!DTu}$-f`8Z1qb}P_F^`#ylw9RIscHzRMu> z3OB5NhGYZDZ6q%MX*udec$MU=bk8C!)T<~lXjTh$4DEfTLlDWi>BA&-lRAJJ6C_eneq)FYy z->=O<5sM@NouzXvqA17}`e@+(JWMo}dl0yGKyEh-HwMP?K~UqP(egKrk-!`qP|r{6 zJ*Zf`s`P5+*zh0^_44NO0AB3&;;;0LCG&euWz&rgRi4tvKk0#t2hCyG96^SlDO6o< zu}`dYL@Zq4EWHEdTU`$Ol2gFZ3H^*)vw{D?jhMk(nezPgI3iRlJ74j%;bF-8le+7 zE?M_Xc8^+3W2LMWc!93UHN3vbRcD4j5(%6@+8i^bSKY3v^Jr~;H#S#SVQiMSueVZN j3FgA3Yb%{7_(Rn}{@6tC5OpqT+D+1QZoTR{x1RqRAn`{A literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/std_conv.cpython-36.pyc b/timm/models/layers/__pycache__/std_conv.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b020b1045f13cdbd9b6f00859e5c239984666a6d GIT binary patch literal 6033 zcmd^@O>^7E8OH$vBtejrY*}#}=WY8QrWPU5NgAtZVq3N2X=6FH6g%Z)3PS`)fd&C) z7nCf8>Y?&XPW}$L=GIHkz4Z(9hEr#H$ap%P>8&%pr2l6DQldinE$vKa3eBz-3+!U| z+2{G~e|f%IHLYL1y7BdCMft08=+aTXh$Mc8%vG9-tGc;BZL0EruB$aQS<<^&H{UF{ znyUxJp3yXVrDmyTHceG|Uvcwp;lAP)Jgr;a*P0d76kP)~MpjcrP02M;V`eoos42S@ z)Ks#XSvR+))T+)GIYnVBQMeTagV>Kkw(ZB8?6&83HeQl zW@{}c@Z7AXW|~=P*}UlR*lz{iXWOzDG1t$miaqY{`Y?j< zJ2cilkNQq0U57YgtsoK}i#I)nM(v3AoUrAwj?-s;$Od6ZHG#kDx$LGVR=rrFp*+&BN z!-}xW{(5Hog6kH{~*nmioHi;?croCvI)Bb?*2fEx3$8S+@XJ2tzMmtJ!j< z%TLia-MjCw1=&9>^~f(L7y7`lw;J+Q4ge^tSHF7j_dk5`&p-YC-IkeIivCm5ki;^w zVcu2l8hk!hhT2dc7WTEhe5~#(-P}+e=44%dSQr+EhO0g>RApEiDz@e-uwrhtmQM;I z=DzDC#nIv!t{=!5C4~-;27Qs}8@?lII;U?Yc`<-*Cx&gqF0pMVn%5UKGtq??v=h@0 z?N(}@q!JC{(TzyTcRdcH*#hp*4fMoIl+j#xnX(FHv&d>{QnKw<;0R&cUn>8)VYyE1 zG_2k3jztbHtSc)kx8Q^gOANSY_u#XUD=hAHfXeqTty;0)>sjg2T7k3UabbzrwOjC$ zI(&KtA7My>KB;JiT2TwCq0Xqgb7QZnkLK5+m7t4ePvc%e5;u{>N>?4i&Ry&d_^-S9 zeb^O$IakAd-qjxzx&?Y;U){^$?V?-}rE8!LtI#!(%1D*{;+{HGu_A?4x$x^9bT2PN zmJw$zm>9jN=Y{d0C(ZQmYh1NzqFznR4f0i+@RAg^CEt@G0)CU&HHVI5gT&0LY)2#o zGDgEqtv+QrY8pR*hKYKQzeF88H_otoDoGKcw2eLJb=(MALq*lqdG)l))ZLQ@M$F!- z6;gkexy-KeBHGl706#Kq8%s6_s9dq_PX|trJt^9@8?|g(E*$?38Yb1uL0VoA z;K6uwHI>ifIj2E#qRUw%LGv^6dQMSi3|ZSk#rS%52vx^3UBeu9|MnyxTE7ag_WFph z4eW|*m#$x7S>zpp)Ga{j+Rg?7GbPmjD4-IZ#y^Ep6wqTzUBnIOlu!8t3hAHZ_VRn? zF_g+lN+C9EO|nXttkPC%xfC1JZ!Lc;Lhy+69IsMFk)9Z|?aEzVFzB<3$5 ztDWE!fvsp6I7JtKkunL1m#H-74S$7hU!_d4UGknhBV!9^^k@oQWQbqJLqVoh zV9EIdx;!^&(@Dbc*QiS(c1D>RinSvDF4cUGvhP#&1Inmx`6d2CDosbokMO90G>H(Z zoj?eA4_)8XF=eS^=p;ZoM(Y1VI;N7$PRPxM>%0L$m2pIFU8(R$-#SKL9HGO1NvS?o z#2iwqQp2p?SiiC?#c+geNJ(W(uMfPdk4}v+;gwACMw&Iubv^6a;CAk-{P{o z6pv-!d5o98i7e5#d~Z7?=@AxA*>0YQiVnIxq_<8$aqT{T)SM$G8WMu()MRm;FVNWP zlu_y1kntSae}I&dv8ZbhS0<<0{}35Z9hnwMEa#J_r=$E$V%-Zx#M?o%y%6|!Cv_LY ziKZX%c=8bVe@s!ii$02^q^-sz-NMZjc~^Oqyqh4Yh9A_lDJ0c1k`~4!tq@5iPpgur z#U~YosU$dttusdiBeyUkzDU^;WwigNaFmfP1VwNyE zQk}CIW>27wwChPEQk`?ph}l!mpgqU@>YCsASpA|sw&DR?8p0YxYxGI>Z4D*^*zF2iKHcRl0TnWcNy ziLLc%jdIHWfE!2t1OK3}octF!0AKa4?eOT;R99D5SJ&(7{jl5h2cO@3`dfps-`Oj} z1OF*L9fAlJGQowD^N?e9b7$;^E?>yJ%*|?HU3j9FH^$Ad$(iI?OZsyjwjVN47mZUU z8q&==b2sdYwrC0el!raxe8u{m|am3O4T@N)?af2an_DR@nFb zM{#l>MNpL^S?x;|JUt2q!A?0jQt4>F3bt==-wwVkwGO_e^*Ap4Gq}!n``kE1amK-e zy&j8rQl*D7iVsKQ_<1zJmD?gA)$>8I)6nmMNQRhM=!p6YHbf4Dd)f+JiuRO+-V5fj zu!cAqW|hLAC@6?WG8SjZDN9_kgh58@_-L(v0;yQVXKe0d&YW>J10T6F2h^J}O6i=P zxK(}TR*kGVchtu-7kjPZds&=jE%qvUD>YUB_oZ}wS zSX*k)ywF(7Ue$23E5ajf!4O()SCP|ueaEy9q$*?{>GW6{?`axqW%GK~)bw;Bm8wC} zG@~dj(khCe5^HI?B}$YQac+}sdMIF&SBgd}cgyP&LWeD$f1mzP^WTnGaPP z+o}T+HptCrn4mve(ZYTd{mlOOdO)VP2gljbASs2^gZn!>-=NdB2O9oFV{~FE^gzi` zS{4uQ?GCDRJRXdpn(ILxA4#PL71|<#T-xZj(62u3T*IfYgD~FXb^LtZ;Z4Wm>-;VJ zjyL|@OJ_C&B4rDxQ z;A;wZ&QWm!|N-&(-TBv*ZnuG?pfBVxcKX3^}=i%=r9x{Xh9wD)+e}SoQ1Fc14Ud zN5+pL)L@zu--)6hrg6U5v8h*YL*I0lg-&FiN71D#u=g7F6?tUanL^MMLX-D;Rn4PKW7rB%ODd3LxP_q<7}1I|4w?_(5IWp;@>a?8GYw%qRiz zC=O=P-e5CNbT-(5r+!(=x>nX($f(4kQJw3r2==410hmMLiPiJf(76s7XH{)R4Z=ke zwp%Zq6DIr zn*qTtcvlB^gNc%riqirw_R=ckw}L64J_^WmK#&ON^@YT=h_Uw||Afgs^(Lq#XMdvY z)Gbv8oF z_WFFUzXCraQ6}ii2Kh^&*Q!z_`-_+Fs^zzeVrw`pk_wMXoE!g=R{;#ciU0WzvnVqy y)Zz=92=xwB)fVa3A6qckd{|driRu!4%}*AXxl6sSdmx_E1OPiu$MxMSH~t1E?IeT% literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/trace_utils.cpython-36.pyc b/timm/models/layers/__pycache__/trace_utils.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8172c06a1df2738c3117589ee14b8b8bb4cde1f1 GIT binary patch literal 684 zcmYjP&5qMB5Vq6wXS;OO6IXmdLb+^E;e-%kRjpt_Du|W%*-M(n(>9RU)p#OpR(qm6 z1~0%X<;say;KbM~p-wb29%uZ`%x6EDOvdTk2dj5dh~MHsj(Po()xBZUPPNdo61BXN zQhXBTVJsl}HpZt?_z!vz>*!~sdpiCRU&$}RM4tupVW9iGJJ3Vk9qQ4gNRIw6o=9Xj z%yQ=dDT%tH!WvytX-zk*!MS_`MdZ_1y!Se1byGIC!rdcrEBErd)PfP^g3(US0vJP_ zBU(%%>~Su}Y<>wGu>1QV)j8#}^yX`u7FL5x&*$@xv&XYp>KcTs2HINfQiKh|ygFN? zRMvG`b86+%DsKT@N|+arHB?sabl0Nq%-3+CT^V8?NM3gX1ZvO}%xe(u|IabvfM*bU z>HELgj}^;hTd!mZ+5sxE$~7xTB{c+UZBdnG)sz*fWoc-68VLt8V|QlAVK>RvmCY$5 zn{krQxSP0-{+%q6QP(Gn#WoPo9beQtixh66i&xfGUC$AA{Q!bvU%;O`sN!!3L#feo UX1uB3g=ft9Qjwg>6L~-W2XS1d_y7O^ literal 0 HcmV?d00001 diff --git a/timm/models/layers/__pycache__/weight_init.cpython-36.pyc b/timm/models/layers/__pycache__/weight_init.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2049d6c6a3c34e4927c8de05daf401748278d8ef GIT binary patch literal 2669 zcmai0OK%)S5bmCt-5qai;{-zt0nxm@QrJZfaIk`tC<+`v77&Dx39Ng!d)H&nBlPt6 z5yleWTqAw};)=u{K%5YNfIl!CK|+x@a7G;Xs%LFB5X4AbUENh(-SwUeb93$98&3@W zoMr51HuVL-FJt6y0w|_<%r@$oZ*aHvHayH;>?grSz?t$@aGPy3)M*u}ncIA$sm`cb zH3zJv=G6nBg^FKiod@qiB3tk9>hbxd4mlG9vds<{Nk%2tgXK?-P%Bj)C2>DktHs9HyS5pvw#Ovxu$ z8+QaMq?=22oxRQ8Vei9$Fu=_EkV6alXeKqUW8{wkC{B+q8K9@UBTf%I3oDTmlsC&6K%ILmG_JuM_Ky%mGz#DlBAb_HO_mnJkTcZ?dfQ|W#OO5c5fb3VVaqw zKU5>~O778%yj#otH0zIzRDElTbcmHnWZdswj5BySU+W$px=ROcZ6{i$BFaUQ87=G< z7AqpRN{mdFh!5m>G5AnKsYuhVyY^n8;@vGRRFqp24GN2gi)#9GvI^5l@ApwE-OATGsMl~ ziWkmPHK16p9=^XRiIzVK!2B=}yy=BrJ6PZg!DB(_FY>lO&&?B{{z@$LO_1L&9q(K| z$tikj6T{`y0uYNRMgDjcwMPvE8<#)AQLjaDoWnh2&zpr6?>c3~USzk9?X8U5OKZ1D z;oO9LR&9Z=j)8j|)pf3$xJttHdNhKQB6#LNOo_sk|0P1KQoyfn-k=Gw^~w6-@(ru^ z?I&d7!}FqEz|w~wt#me>nCSF^l zZB6WnS4FL)PHx(RM2ZRh#-5`d&!AtPD@FGNjIy&fBIsRr+7Duh4itN0Ubtm|Zu|;QdvL zG)3ZUI7IG_O`EnCPV#+H@5i5g^V7G#Jp0=v*xH%)S$BF;Ht$lIy!8<3ri>#*)pP;x zvz%#F*y`o>>9Uzxuih zfHR;_0MKuX^6&WgS0d8}wQ0@sAa^|T0w&WXoI6>VT$HwG-vQSf=3+f8(&O&>K8STh za&rOraiRaA6Bt1UpCs@Sfu{gEe!W31HRKr>@igJ&7xMtX#$26WlRT None: + super(PReLU, self).__init__(num_parameters=num_parameters, init=init) + + def forward(self, input: torch.Tensor) -> torch.Tensor: + return F.prelu(input, self.weight) + + +def gelu(x: torch.Tensor, inplace: bool = False) -> torch.Tensor: + return F.gelu(x) + + +class GELU(nn.Module): + """Applies the Gaussian Error Linear Units function (w/ dummy inplace arg) + """ + def __init__(self, inplace: bool = False): + super(GELU, self).__init__() + + def forward(self, input: torch.Tensor) -> torch.Tensor: + return F.gelu(input) diff --git a/timm/models/layers/activations_jit.py b/timm/models/layers/activations_jit.py new file mode 100644 index 0000000..b4a5165 --- /dev/null +++ b/timm/models/layers/activations_jit.py @@ -0,0 +1,90 @@ +""" Activations + +A collection of jit-scripted activations fn and modules with a common interface so that they can +easily be swapped. All have an `inplace` arg even if not used. + +All jit scripted activations are lacking in-place variations on purpose, scripted kernel fusion does not +currently work across in-place op boundaries, thus performance is equal to or less than the non-scripted +versions if they contain in-place ops. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn +from torch.nn import functional as F + + +@torch.jit.script +def swish_jit(x, inplace: bool = False): + """Swish - Described in: https://arxiv.org/abs/1710.05941 + """ + return x.mul(x.sigmoid()) + + +@torch.jit.script +def mish_jit(x, _inplace: bool = False): + """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + """ + return x.mul(F.softplus(x).tanh()) + + +class SwishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(SwishJit, self).__init__() + + def forward(self, x): + return swish_jit(x) + + +class MishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(MishJit, self).__init__() + + def forward(self, x): + return mish_jit(x) + + +@torch.jit.script +def hard_sigmoid_jit(x, inplace: bool = False): + # return F.relu6(x + 3.) / 6. + return (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster? + + +class HardSigmoidJit(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoidJit, self).__init__() + + def forward(self, x): + return hard_sigmoid_jit(x) + + +@torch.jit.script +def hard_swish_jit(x, inplace: bool = False): + # return x * (F.relu6(x + 3.) / 6) + return x * (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster? + + +class HardSwishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSwishJit, self).__init__() + + def forward(self, x): + return hard_swish_jit(x) + + +@torch.jit.script +def hard_mish_jit(x, inplace: bool = False): + """ Hard Mish + Experimental, based on notes by Mish author Diganta Misra at + https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md + """ + return 0.5 * x * (x + 2).clamp(min=0, max=2) + + +class HardMishJit(nn.Module): + def __init__(self, inplace: bool = False): + super(HardMishJit, self).__init__() + + def forward(self, x): + return hard_mish_jit(x) diff --git a/timm/models/layers/activations_me.py b/timm/models/layers/activations_me.py new file mode 100644 index 0000000..9a12bb7 --- /dev/null +++ b/timm/models/layers/activations_me.py @@ -0,0 +1,218 @@ +""" Activations (memory-efficient w/ custom autograd) + +A collection of activations fn and modules with a common interface so that they can +easily be swapped. All have an `inplace` arg even if not used. + +These activations are not compatible with jit scripting or ONNX export of the model, please use either +the JIT or basic versions of the activations. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn +from torch.nn import functional as F + + +@torch.jit.script +def swish_jit_fwd(x): + return x.mul(torch.sigmoid(x)) + + +@torch.jit.script +def swish_jit_bwd(x, grad_output): + x_sigmoid = torch.sigmoid(x) + return grad_output * (x_sigmoid * (1 + x * (1 - x_sigmoid))) + + +class SwishJitAutoFn(torch.autograd.Function): + """ torch.jit.script optimised Swish w/ memory-efficient checkpoint + Inspired by conversation btw Jeremy Howard & Adam Pazske + https://twitter.com/jeremyphoward/status/1188251041835315200 + """ + @staticmethod + def symbolic(g, x): + return g.op("Mul", x, g.op("Sigmoid", x)) + + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return swish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return swish_jit_bwd(x, grad_output) + + +def swish_me(x, inplace=False): + return SwishJitAutoFn.apply(x) + + +class SwishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(SwishMe, self).__init__() + + def forward(self, x): + return SwishJitAutoFn.apply(x) + + +@torch.jit.script +def mish_jit_fwd(x): + return x.mul(torch.tanh(F.softplus(x))) + + +@torch.jit.script +def mish_jit_bwd(x, grad_output): + x_sigmoid = torch.sigmoid(x) + x_tanh_sp = F.softplus(x).tanh() + return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp)) + + +class MishJitAutoFn(torch.autograd.Function): + """ Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 + A memory efficient, jit scripted variant of Mish + """ + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return mish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return mish_jit_bwd(x, grad_output) + + +def mish_me(x, inplace=False): + return MishJitAutoFn.apply(x) + + +class MishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(MishMe, self).__init__() + + def forward(self, x): + return MishJitAutoFn.apply(x) + + +@torch.jit.script +def hard_sigmoid_jit_fwd(x, inplace: bool = False): + return (x + 3).clamp(min=0, max=6).div(6.) + + +@torch.jit.script +def hard_sigmoid_jit_bwd(x, grad_output): + m = torch.ones_like(x) * ((x >= -3.) & (x <= 3.)) / 6. + return grad_output * m + + +class HardSigmoidJitAutoFn(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return hard_sigmoid_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return hard_sigmoid_jit_bwd(x, grad_output) + + +def hard_sigmoid_me(x, inplace: bool = False): + return HardSigmoidJitAutoFn.apply(x) + + +class HardSigmoidMe(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoidMe, self).__init__() + + def forward(self, x): + return HardSigmoidJitAutoFn.apply(x) + + +@torch.jit.script +def hard_swish_jit_fwd(x): + return x * (x + 3).clamp(min=0, max=6).div(6.) + + +@torch.jit.script +def hard_swish_jit_bwd(x, grad_output): + m = torch.ones_like(x) * (x >= 3.) + m = torch.where((x >= -3.) & (x <= 3.), x / 3. + .5, m) + return grad_output * m + + +class HardSwishJitAutoFn(torch.autograd.Function): + """A memory efficient, jit-scripted HardSwish activation""" + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return hard_swish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return hard_swish_jit_bwd(x, grad_output) + + @staticmethod + def symbolic(g, self): + input = g.op("Add", self, g.op('Constant', value_t=torch.tensor(3, dtype=torch.float))) + hardtanh_ = g.op("Clip", input, g.op('Constant', value_t=torch.tensor(0, dtype=torch.float)), g.op('Constant', value_t=torch.tensor(6, dtype=torch.float))) + hardtanh_ = g.op("Div", hardtanh_, g.op('Constant', value_t=torch.tensor(6, dtype=torch.float))) + return g.op("Mul", self, hardtanh_) + + +def hard_swish_me(x, inplace=False): + return HardSwishJitAutoFn.apply(x) + + +class HardSwishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSwishMe, self).__init__() + + def forward(self, x): + return HardSwishJitAutoFn.apply(x) + + +@torch.jit.script +def hard_mish_jit_fwd(x): + return 0.5 * x * (x + 2).clamp(min=0, max=2) + + +@torch.jit.script +def hard_mish_jit_bwd(x, grad_output): + m = torch.ones_like(x) * (x >= -2.) + m = torch.where((x >= -2.) & (x <= 0.), x + 1., m) + return grad_output * m + + +class HardMishJitAutoFn(torch.autograd.Function): + """ A memory efficient, jit scripted variant of Hard Mish + Experimental, based on notes by Mish author Diganta Misra at + https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md + """ + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return hard_mish_jit_fwd(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + return hard_mish_jit_bwd(x, grad_output) + + +def hard_mish_me(x, inplace: bool = False): + return HardMishJitAutoFn.apply(x) + + +class HardMishMe(nn.Module): + def __init__(self, inplace: bool = False): + super(HardMishMe, self).__init__() + + def forward(self, x): + return HardMishJitAutoFn.apply(x) + + + diff --git a/timm/models/layers/adaptive_avgmax_pool.py b/timm/models/layers/adaptive_avgmax_pool.py new file mode 100644 index 0000000..ebc6ada --- /dev/null +++ b/timm/models/layers/adaptive_avgmax_pool.py @@ -0,0 +1,118 @@ +""" PyTorch selectable adaptive pooling +Adaptive pooling with the ability to select the type of pooling from: + * 'avg' - Average pooling + * 'max' - Max pooling + * 'avgmax' - Sum of average and max pooling re-scaled by 0.5 + * 'avgmaxc' - Concatenation of average and max pooling along feature dim, doubles feature dim + +Both a functional and a nn.Module version of the pooling is provided. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def adaptive_pool_feat_mult(pool_type='avg'): + if pool_type == 'catavgmax': + return 2 + else: + return 1 + + +def adaptive_avgmax_pool2d(x, output_size=1): + x_avg = F.adaptive_avg_pool2d(x, output_size) + x_max = F.adaptive_max_pool2d(x, output_size) + return 0.5 * (x_avg + x_max) + + +def adaptive_catavgmax_pool2d(x, output_size=1): + x_avg = F.adaptive_avg_pool2d(x, output_size) + x_max = F.adaptive_max_pool2d(x, output_size) + return torch.cat((x_avg, x_max), 1) + + +def select_adaptive_pool2d(x, pool_type='avg', output_size=1): + """Selectable global pooling function with dynamic input kernel size + """ + if pool_type == 'avg': + x = F.adaptive_avg_pool2d(x, output_size) + elif pool_type == 'avgmax': + x = adaptive_avgmax_pool2d(x, output_size) + elif pool_type == 'catavgmax': + x = adaptive_catavgmax_pool2d(x, output_size) + elif pool_type == 'max': + x = F.adaptive_max_pool2d(x, output_size) + else: + assert False, 'Invalid pool type: %s' % pool_type + return x + + +class FastAdaptiveAvgPool2d(nn.Module): + def __init__(self, flatten=False): + super(FastAdaptiveAvgPool2d, self).__init__() + self.flatten = flatten + + def forward(self, x): + return x.mean((2, 3), keepdim=not self.flatten) + + +class AdaptiveAvgMaxPool2d(nn.Module): + def __init__(self, output_size=1): + super(AdaptiveAvgMaxPool2d, self).__init__() + self.output_size = output_size + + def forward(self, x): + return adaptive_avgmax_pool2d(x, self.output_size) + + +class AdaptiveCatAvgMaxPool2d(nn.Module): + def __init__(self, output_size=1): + super(AdaptiveCatAvgMaxPool2d, self).__init__() + self.output_size = output_size + + def forward(self, x): + return adaptive_catavgmax_pool2d(x, self.output_size) + + +class SelectAdaptivePool2d(nn.Module): + """Selectable global pooling layer with dynamic input kernel size + """ + def __init__(self, output_size=1, pool_type='fast', flatten=False): + super(SelectAdaptivePool2d, self).__init__() + self.pool_type = pool_type or '' # convert other falsy values to empty string for consistent TS typing + self.flatten = nn.Flatten(1) if flatten else nn.Identity() + if pool_type == '': + self.pool = nn.Identity() # pass through + elif pool_type == 'fast': + assert output_size == 1 + self.pool = FastAdaptiveAvgPool2d(flatten) + self.flatten = nn.Identity() + elif pool_type == 'avg': + self.pool = nn.AdaptiveAvgPool2d(output_size) + elif pool_type == 'avgmax': + self.pool = AdaptiveAvgMaxPool2d(output_size) + elif pool_type == 'catavgmax': + self.pool = AdaptiveCatAvgMaxPool2d(output_size) + elif pool_type == 'max': + self.pool = nn.AdaptiveMaxPool2d(output_size) + else: + assert False, 'Invalid pool type: %s' % pool_type + + def is_identity(self): + return not self.pool_type + + def forward(self, x): + x = self.pool(x) + x = self.flatten(x) + return x + + def feat_mult(self): + return adaptive_pool_feat_mult(self.pool_type) + + def __repr__(self): + return self.__class__.__name__ + ' (' \ + + 'pool_type=' + self.pool_type \ + + ', flatten=' + str(self.flatten) + ')' + diff --git a/timm/models/layers/attention_pool2d.py b/timm/models/layers/attention_pool2d.py new file mode 100644 index 0000000..66e49b8 --- /dev/null +++ b/timm/models/layers/attention_pool2d.py @@ -0,0 +1,182 @@ +""" Attention Pool 2D + +Implementations of 2D spatial feature pooling using multi-head attention instead of average pool. + +Based on idea in CLIP by OpenAI, licensed Apache 2.0 +https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math +from typing import List, Union, Tuple + +import torch +import torch.nn as nn + +from .helpers import to_2tuple +from .weight_init import trunc_normal_ + + +def rot(x): + return torch.stack([-x[..., 1::2], x[..., ::2]], -1).reshape(x.shape) + + +def apply_rot_embed(x: torch.Tensor, sin_emb, cos_emb): + return x * cos_emb + rot(x) * sin_emb + + +def apply_rot_embed_list(x: List[torch.Tensor], sin_emb, cos_emb): + if isinstance(x, torch.Tensor): + x = [x] + return [t * cos_emb + rot(t) * sin_emb for t in x] + + +class RotaryEmbedding(nn.Module): + """ Rotary position embedding + + NOTE: This is my initial attempt at impl rotary embedding for spatial use, it has not + been well tested, and will likely change. It will be moved to its own file. + + The following impl/resources were referenced for this impl: + * https://github.com/lucidrains/vit-pytorch/blob/6f3a5fcf0bca1c5ec33a35ef48d97213709df4ba/vit_pytorch/rvt.py + * https://blog.eleuther.ai/rotary-embeddings/ + """ + def __init__(self, dim, max_freq=4): + super().__init__() + self.dim = dim + self.register_buffer('bands', 2 ** torch.linspace(0., max_freq - 1, self.dim // 4), persistent=False) + + def get_embed(self, shape: torch.Size, device: torch.device = None, dtype: torch.dtype = None): + """ + NOTE: shape arg should include spatial dim only + """ + device = device or self.bands.device + dtype = dtype or self.bands.dtype + if not isinstance(shape, torch.Size): + shape = torch.Size(shape) + N = shape.numel() + grid = torch.stack(torch.meshgrid( + [torch.linspace(-1., 1., steps=s, device=device, dtype=dtype) for s in shape]), dim=-1).unsqueeze(-1) + emb = grid * math.pi * self.bands + sin = emb.sin().reshape(N, -1).repeat_interleave(2, -1) + cos = emb.cos().reshape(N, -1).repeat_interleave(2, -1) + return sin, cos + + def forward(self, x): + # assuming channel-first tensor where spatial dim are >= 2 + sin_emb, cos_emb = self.get_embed(x.shape[2:]) + return apply_rot_embed(x, sin_emb, cos_emb) + + +class RotAttentionPool2d(nn.Module): + """ Attention based 2D feature pooling w/ rotary (relative) pos embedding. + This is a multi-head attention based replacement for (spatial) average pooling in NN architectures. + + Adapted from the AttentionPool2d in CLIP w/ rotary embedding instead of learned embed. + https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py + + NOTE: While this impl does not require a fixed feature size, performance at differeing resolutions from + train varies widely and falls off dramatically. I'm not sure if there is a way around this... -RW + """ + def __init__( + self, + in_features: int, + out_features: int = None, + embed_dim: int = None, + num_heads: int = 4, + qkv_bias: bool = True, + ): + super().__init__() + embed_dim = embed_dim or in_features + out_features = out_features or in_features + self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias) + self.proj = nn.Linear(embed_dim, out_features) + self.num_heads = num_heads + assert embed_dim % num_heads == 0 + self.head_dim = embed_dim // num_heads + self.scale = self.head_dim ** -0.5 + self.pos_embed = RotaryEmbedding(self.head_dim) + + trunc_normal_(self.qkv.weight, std=in_features ** -0.5) + nn.init.zeros_(self.qkv.bias) + + def forward(self, x): + B, _, H, W = x.shape + N = H * W + sin_emb, cos_emb = self.pos_embed.get_embed(x.shape[2:]) + x = x.reshape(B, -1, N).permute(0, 2, 1) + + x = torch.cat([x.mean(1, keepdim=True), x], dim=1) + + x = self.qkv(x).reshape(B, N + 1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k, v = x[0], x[1], x[2] + + qc, q = q[:, :, :1], q[:, :, 1:] + q = apply_rot_embed(q, sin_emb, cos_emb) + q = torch.cat([qc, q], dim=2) + + kc, k = k[:, :, :1], k[:, :, 1:] + k = apply_rot_embed(k, sin_emb, cos_emb) + k = torch.cat([kc, k], dim=2) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, N + 1, -1) + x = self.proj(x) + return x[:, 0] + + +class AttentionPool2d(nn.Module): + """ Attention based 2D feature pooling w/ learned (absolute) pos embedding. + This is a multi-head attention based replacement for (spatial) average pooling in NN architectures. + + It was based on impl in CLIP by OpenAI + https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py + + NOTE: This requires feature size upon construction and well prevent adaptive sizing of the network. + """ + def __init__( + self, + in_features: int, + feat_size: Union[int, Tuple[int, int]], + out_features: int = None, + embed_dim: int = None, + num_heads: int = 4, + qkv_bias: bool = True, + ): + super().__init__() + + embed_dim = embed_dim or in_features + out_features = out_features or in_features + assert embed_dim % num_heads == 0 + self.feat_size = to_2tuple(feat_size) + self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias) + self.proj = nn.Linear(embed_dim, out_features) + self.num_heads = num_heads + self.head_dim = embed_dim // num_heads + self.scale = self.head_dim ** -0.5 + + spatial_dim = self.feat_size[0] * self.feat_size[1] + self.pos_embed = nn.Parameter(torch.zeros(spatial_dim + 1, in_features)) + trunc_normal_(self.pos_embed, std=in_features ** -0.5) + trunc_normal_(self.qkv.weight, std=in_features ** -0.5) + nn.init.zeros_(self.qkv.bias) + + def forward(self, x): + B, _, H, W = x.shape + N = H * W + assert self.feat_size[0] == H + assert self.feat_size[1] == W + x = x.reshape(B, -1, N).permute(0, 2, 1) + x = torch.cat([x.mean(1, keepdim=True), x], dim=1) + x = x + self.pos_embed.unsqueeze(0).to(x.dtype) + + x = self.qkv(x).reshape(B, N + 1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k, v = x[0], x[1], x[2] + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, N + 1, -1) + x = self.proj(x) + return x[:, 0] diff --git a/timm/models/layers/blur_pool.py b/timm/models/layers/blur_pool.py new file mode 100644 index 0000000..ca4ce75 --- /dev/null +++ b/timm/models/layers/blur_pool.py @@ -0,0 +1,42 @@ +""" +BlurPool layer inspired by + - Kornia's Max_BlurPool2d + - Making Convolutional Networks Shift-Invariant Again :cite:`zhang2019shiftinvar` + +Hacked together by Chris Ha and Ross Wightman +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +from .padding import get_padding + + +class BlurPool2d(nn.Module): + r"""Creates a module that computes blurs and downsample a given feature map. + See :cite:`zhang2019shiftinvar` for more details. + Corresponds to the Downsample class, which does blurring and subsampling + + Args: + channels = Number of input channels + filt_size (int): binomial filter size for blurring. currently supports 3 (default) and 5. + stride (int): downsampling filter stride + + Returns: + torch.Tensor: the transformed tensor. + """ + def __init__(self, channels, filt_size=3, stride=2) -> None: + super(BlurPool2d, self).__init__() + assert filt_size > 1 + self.channels = channels + self.filt_size = filt_size + self.stride = stride + self.padding = [get_padding(filt_size, stride, dilation=1)] * 4 + coeffs = torch.tensor((np.poly1d((0.5, 0.5)) ** (self.filt_size - 1)).coeffs.astype(np.float32)) + blur_filter = (coeffs[:, None] * coeffs[None, :])[None, None, :, :].repeat(self.channels, 1, 1, 1) + self.register_buffer('filt', blur_filter, persistent=False) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = F.pad(x, self.padding, 'reflect') + return F.conv2d(x, self.filt, stride=self.stride, groups=x.shape[1]) diff --git a/timm/models/layers/bottleneck_attn.py b/timm/models/layers/bottleneck_attn.py new file mode 100644 index 0000000..c3db464 --- /dev/null +++ b/timm/models/layers/bottleneck_attn.py @@ -0,0 +1,157 @@ +""" Bottleneck Self Attention (Bottleneck Transformers) + +Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605 + +@misc{2101.11605, +Author = {Aravind Srinivas and Tsung-Yi Lin and Niki Parmar and Jonathon Shlens and Pieter Abbeel and Ashish Vaswani}, +Title = {Bottleneck Transformers for Visual Recognition}, +Year = {2021}, +} + +Based on ref gist at: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + +This impl is a WIP but given that it is based on the ref gist likely not too far off. + +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .helpers import to_2tuple, make_divisible +from .weight_init import trunc_normal_ +from .trace_utils import _assert + + +def rel_logits_1d(q, rel_k, permute_mask: List[int]): + """ Compute relative logits along one dimension + + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + + Args: + q: (batch, heads, height, width, dim) + rel_k: (2 * width - 1, dim) + permute_mask: permute output dim according to this + """ + B, H, W, dim = q.shape + x = (q @ rel_k.transpose(-1, -2)) + x = x.reshape(-1, W, 2 * W -1) + + # pad to shift from relative to absolute indexing + x_pad = F.pad(x, [0, 1]).flatten(1) + x_pad = F.pad(x_pad, [0, W - 1]) + + # reshape and slice out the padded elements + x_pad = x_pad.reshape(-1, W + 1, 2 * W - 1) + x = x_pad[:, :W, W - 1:] + + # reshape and tile + x = x.reshape(B, H, 1, W, W).expand(-1, -1, H, -1, -1) + return x.permute(permute_mask) + + +class PosEmbedRel(nn.Module): + """ Relative Position Embedding + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + """ + def __init__(self, feat_size, dim_head, scale): + super().__init__() + self.height, self.width = to_2tuple(feat_size) + self.dim_head = dim_head + self.height_rel = nn.Parameter(torch.randn(self.height * 2 - 1, dim_head) * scale) + self.width_rel = nn.Parameter(torch.randn(self.width * 2 - 1, dim_head) * scale) + + def forward(self, q): + B, HW, _ = q.shape + + # relative logits in width dimension. + q = q.reshape(B, self.height, self.width, -1) + rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4)) + + # relative logits in height dimension. + q = q.transpose(1, 2) + rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2)) + + rel_logits = rel_logits_h + rel_logits_w + rel_logits = rel_logits.reshape(B, HW, HW) + return rel_logits + + +class BottleneckAttn(nn.Module): + """ Bottleneck Attention + Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605 + + The internal dimensions of the attention module are controlled by the interaction of several arguments. + * the output dimension of the module is specified by dim_out, which falls back to input dim if not set + * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim + * the query and key (qk) dimensions are determined by + * num_heads * dim_head if dim_head is not None + * num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None + * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used + + Args: + dim (int): input dimension to the module + dim_out (int): output dimension of the module, same as dim if not set + stride (int): output stride of the module, avg pool used if stride == 2 (default: 1). + num_heads (int): parallel attention heads (default: 4) + dim_head (int): dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set + qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) + qkv_bias (bool): add bias to q, k, and v projections + scale_pos_embed (bool): scale the position embedding as well as Q @ K + """ + def __init__( + self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=None, + qk_ratio=1.0, qkv_bias=False, scale_pos_embed=False): + super().__init__() + assert feat_size is not None, 'A concrete feature size matching expected input (H, W) is required' + dim_out = dim_out or dim + assert dim_out % num_heads == 0 + self.num_heads = num_heads + self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads + self.dim_head_v = dim_out // self.num_heads + self.dim_out_qk = num_heads * self.dim_head_qk + self.dim_out_v = num_heads * self.dim_head_v + self.scale = self.dim_head_qk ** -0.5 + self.scale_pos_embed = scale_pos_embed + + self.qkv = nn.Conv2d(dim, self.dim_out_qk * 2 + self.dim_out_v, 1, bias=qkv_bias) + + # NOTE I'm only supporting relative pos embedding for now + self.pos_embed = PosEmbedRel(feat_size, dim_head=self.dim_head_qk, scale=self.scale) + + self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() + + self.reset_parameters() + + def reset_parameters(self): + trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) # fan-in + trunc_normal_(self.pos_embed.height_rel, std=self.scale) + trunc_normal_(self.pos_embed.width_rel, std=self.scale) + + def forward(self, x): + B, C, H, W = x.shape + _assert(H == self.pos_embed.height, '') + _assert(W == self.pos_embed.width, '') + + x = self.qkv(x) # B, (2 * dim_head_qk + dim_head_v) * num_heads, H, W + + # NOTE head vs channel split ordering in qkv projection was decided before I allowed qk to differ from v + # So, this is more verbose than if heads were before qkv splits, but throughput is not impacted. + q, k, v = torch.split(x, [self.dim_out_qk, self.dim_out_qk, self.dim_out_v], dim=1) + q = q.reshape(B * self.num_heads, self.dim_head_qk, -1).transpose(-1, -2) + k = k.reshape(B * self.num_heads, self.dim_head_qk, -1) # no transpose, for q @ k + v = v.reshape(B * self.num_heads, self.dim_head_v, -1).transpose(-1, -2) + + if self.scale_pos_embed: + attn = (q @ k + self.pos_embed(q)) * self.scale # B * num_heads, H * W, H * W + else: + attn = (q @ k) * self.scale + self.pos_embed(q) + attn = attn.softmax(dim=-1) + + out = (attn @ v).transpose(-1, -2).reshape(B, self.dim_out_v, H, W) # B, dim_out, H, W + out = self.pool(out) + return out diff --git a/timm/models/layers/cbam.py b/timm/models/layers/cbam.py new file mode 100644 index 0000000..bacf5cf --- /dev/null +++ b/timm/models/layers/cbam.py @@ -0,0 +1,112 @@ +""" CBAM (sort-of) Attention + +Experimental impl of CBAM: Convolutional Block Attention Module: https://arxiv.org/abs/1807.06521 + +WARNING: Results with these attention layers have been mixed. They can significantly reduce performance on +some tasks, especially fine-grained it seems. I may end up removing this impl. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +from torch import nn as nn +import torch.nn.functional as F + +from .conv_bn_act import ConvBnAct +from .create_act import create_act_layer, get_act_layer +from .helpers import make_divisible + + +class ChannelAttn(nn.Module): + """ Original CBAM channel attention module, currently avg + max pool variant only. + """ + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(ChannelAttn, self).__init__() + if not rd_channels: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + self.fc1 = nn.Conv2d(channels, rd_channels, 1, bias=mlp_bias) + self.act = act_layer(inplace=True) + self.fc2 = nn.Conv2d(rd_channels, channels, 1, bias=mlp_bias) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_avg = self.fc2(self.act(self.fc1(x.mean((2, 3), keepdim=True)))) + x_max = self.fc2(self.act(self.fc1(x.amax((2, 3), keepdim=True)))) + return x * self.gate(x_avg + x_max) + + +class LightChannelAttn(ChannelAttn): + """An experimental 'lightweight' that sums avg + max pool first + """ + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(LightChannelAttn, self).__init__( + channels, rd_ratio, rd_channels, rd_divisor, act_layer, gate_layer, mlp_bias) + + def forward(self, x): + x_pool = 0.5 * x.mean((2, 3), keepdim=True) + 0.5 * x.amax((2, 3), keepdim=True) + x_attn = self.fc2(self.act(self.fc1(x_pool))) + return x * F.sigmoid(x_attn) + + +class SpatialAttn(nn.Module): + """ Original CBAM spatial attention module + """ + def __init__(self, kernel_size=7, gate_layer='sigmoid'): + super(SpatialAttn, self).__init__() + self.conv = ConvBnAct(2, 1, kernel_size, act_layer=None) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_attn = torch.cat([x.mean(dim=1, keepdim=True), x.amax(dim=1, keepdim=True)], dim=1) + x_attn = self.conv(x_attn) + return x * self.gate(x_attn) + + +class LightSpatialAttn(nn.Module): + """An experimental 'lightweight' variant that sums avg_pool and max_pool results. + """ + def __init__(self, kernel_size=7, gate_layer='sigmoid'): + super(LightSpatialAttn, self).__init__() + self.conv = ConvBnAct(1, 1, kernel_size, act_layer=None) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_attn = 0.5 * x.mean(dim=1, keepdim=True) + 0.5 * x.amax(dim=1, keepdim=True) + x_attn = self.conv(x_attn) + return x * self.gate(x_attn) + + +class CbamModule(nn.Module): + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(CbamModule, self).__init__() + self.channel = ChannelAttn( + channels, rd_ratio=rd_ratio, rd_channels=rd_channels, + rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias) + self.spatial = SpatialAttn(spatial_kernel_size, gate_layer=gate_layer) + + def forward(self, x): + x = self.channel(x) + x = self.spatial(x) + return x + + +class LightCbamModule(nn.Module): + def __init__( + self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, + spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): + super(LightCbamModule, self).__init__() + self.channel = LightChannelAttn( + channels, rd_ratio=rd_ratio, rd_channels=rd_channels, + rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias) + self.spatial = LightSpatialAttn(spatial_kernel_size) + + def forward(self, x): + x = self.channel(x) + x = self.spatial(x) + return x + diff --git a/timm/models/layers/classifier.py b/timm/models/layers/classifier.py new file mode 100644 index 0000000..2b74541 --- /dev/null +++ b/timm/models/layers/classifier.py @@ -0,0 +1,56 @@ +""" Classifier head and layer factory + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn +from torch.nn import functional as F + +from .adaptive_avgmax_pool import SelectAdaptivePool2d +from .linear import Linear + + +def _create_pool(num_features, num_classes, pool_type='avg', use_conv=False): + flatten_in_pool = not use_conv # flatten when we use a Linear layer after pooling + if not pool_type: + assert num_classes == 0 or use_conv,\ + 'Pooling can only be disabled if classifier is also removed or conv classifier is used' + flatten_in_pool = False # disable flattening if pooling is pass-through (no pooling) + global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=flatten_in_pool) + num_pooled_features = num_features * global_pool.feat_mult() + return global_pool, num_pooled_features + + +def _create_fc(num_features, num_classes, use_conv=False): + if num_classes <= 0: + fc = nn.Identity() # pass-through (no classifier) + elif use_conv: + fc = nn.Conv2d(num_features, num_classes, 1, bias=True) + else: + # NOTE: using my Linear wrapper that fixes AMP + torchscript casting issue + fc = Linear(num_features, num_classes, bias=True) + return fc + + +def create_classifier(num_features, num_classes, pool_type='avg', use_conv=False): + global_pool, num_pooled_features = _create_pool(num_features, num_classes, pool_type, use_conv=use_conv) + fc = _create_fc(num_pooled_features, num_classes, use_conv=use_conv) + return global_pool, fc + + +class ClassifierHead(nn.Module): + """Classifier head w/ configurable global pooling and dropout.""" + + def __init__(self, in_chs, num_classes, pool_type='avg', drop_rate=0., use_conv=False): + super(ClassifierHead, self).__init__() + self.drop_rate = drop_rate + self.global_pool, num_pooled_features = _create_pool(in_chs, num_classes, pool_type, use_conv=use_conv) + self.fc = _create_fc(num_pooled_features, num_classes, use_conv=use_conv) + self.flatten = nn.Flatten(1) if use_conv and pool_type else nn.Identity() + + def forward(self, x): + x = self.global_pool(x) + if self.drop_rate: + x = F.dropout(x, p=float(self.drop_rate), training=self.training) + x = self.fc(x) + x = self.flatten(x) + return x diff --git a/timm/models/layers/cond_conv2d.py b/timm/models/layers/cond_conv2d.py new file mode 100644 index 0000000..8b4bbca --- /dev/null +++ b/timm/models/layers/cond_conv2d.py @@ -0,0 +1,122 @@ +""" PyTorch Conditionally Parameterized Convolution (CondConv) + +Paper: CondConv: Conditionally Parameterized Convolutions for Efficient Inference +(https://arxiv.org/abs/1904.04971) + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import math +from functools import partial +import numpy as np +import torch +from torch import nn as nn +from torch.nn import functional as F + +from .helpers import to_2tuple +from .conv2d_same import conv2d_same +from .padding import get_padding_value + + +def get_condconv_initializer(initializer, num_experts, expert_shape): + def condconv_initializer(weight): + """CondConv initializer function.""" + num_params = np.prod(expert_shape) + if (len(weight.shape) != 2 or weight.shape[0] != num_experts or + weight.shape[1] != num_params): + raise (ValueError( + 'CondConv variables must have shape [num_experts, num_params]')) + for i in range(num_experts): + initializer(weight[i].view(expert_shape)) + return condconv_initializer + + +class CondConv2d(nn.Module): + """ Conditionally Parameterized Convolution + Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py + + Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion: + https://github.com/pytorch/pytorch/issues/17983 + """ + __constants__ = ['in_channels', 'out_channels', 'dynamic_padding'] + + def __init__(self, in_channels, out_channels, kernel_size=3, + stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4): + super(CondConv2d, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = to_2tuple(kernel_size) + self.stride = to_2tuple(stride) + padding_val, is_padding_dynamic = get_padding_value( + padding, kernel_size, stride=stride, dilation=dilation) + self.dynamic_padding = is_padding_dynamic # if in forward to work with torchscript + self.padding = to_2tuple(padding_val) + self.dilation = to_2tuple(dilation) + self.groups = groups + self.num_experts = num_experts + + self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size + weight_num_param = 1 + for wd in self.weight_shape: + weight_num_param *= wd + self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param)) + + if bias: + self.bias_shape = (self.out_channels,) + self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels)) + else: + self.register_parameter('bias', None) + + self.reset_parameters() + + def reset_parameters(self): + init_weight = get_condconv_initializer( + partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape) + init_weight(self.weight) + if self.bias is not None: + fan_in = np.prod(self.weight_shape[1:]) + bound = 1 / math.sqrt(fan_in) + init_bias = get_condconv_initializer( + partial(nn.init.uniform_, a=-bound, b=bound), self.num_experts, self.bias_shape) + init_bias(self.bias) + + def forward(self, x, routing_weights): + B, C, H, W = x.shape + weight = torch.matmul(routing_weights, self.weight) + new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size + weight = weight.view(new_weight_shape) + bias = None + if self.bias is not None: + bias = torch.matmul(routing_weights, self.bias) + bias = bias.view(B * self.out_channels) + # move batch elements with channels so each batch element can be efficiently convolved with separate kernel + x = x.view(1, B * C, H, W) + if self.dynamic_padding: + out = conv2d_same( + x, weight, bias, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=self.groups * B) + else: + out = F.conv2d( + x, weight, bias, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=self.groups * B) + out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1]) + + # Literal port (from TF definition) + # x = torch.split(x, 1, 0) + # weight = torch.split(weight, 1, 0) + # if self.bias is not None: + # bias = torch.matmul(routing_weights, self.bias) + # bias = torch.split(bias, 1, 0) + # else: + # bias = [None] * B + # out = [] + # for xi, wi, bi in zip(x, weight, bias): + # wi = wi.view(*self.weight_shape) + # if bi is not None: + # bi = bi.view(*self.bias_shape) + # out.append(self.conv_fn( + # xi, wi, bi, stride=self.stride, padding=self.padding, + # dilation=self.dilation, groups=self.groups)) + # out = torch.cat(out, 0) + return out diff --git a/timm/models/layers/config.py b/timm/models/layers/config.py new file mode 100644 index 0000000..f07b9d7 --- /dev/null +++ b/timm/models/layers/config.py @@ -0,0 +1,115 @@ +""" Model / Layer Config singleton state +""" +from typing import Any, Optional + +__all__ = [ + 'is_exportable', 'is_scriptable', 'is_no_jit', + 'set_exportable', 'set_scriptable', 'set_no_jit', 'set_layer_config' +] + +# Set to True if prefer to have layers with no jit optimization (includes activations) +_NO_JIT = False + +# Set to True if prefer to have activation layers with no jit optimization +# NOTE not currently used as no difference between no_jit and no_activation jit as only layers obeying +# the jit flags so far are activations. This will change as more layers are updated and/or added. +_NO_ACTIVATION_JIT = False + +# Set to True if exporting a model with Same padding via ONNX +_EXPORTABLE = False + +# Set to True if wanting to use torch.jit.script on a model +_SCRIPTABLE = False + + +def is_no_jit(): + return _NO_JIT + + +class set_no_jit: + def __init__(self, mode: bool) -> None: + global _NO_JIT + self.prev = _NO_JIT + _NO_JIT = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _NO_JIT + _NO_JIT = self.prev + return False + + +def is_exportable(): + return _EXPORTABLE + + +class set_exportable: + def __init__(self, mode: bool) -> None: + global _EXPORTABLE + self.prev = _EXPORTABLE + _EXPORTABLE = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _EXPORTABLE + _EXPORTABLE = self.prev + return False + + +def is_scriptable(): + return _SCRIPTABLE + + +class set_scriptable: + def __init__(self, mode: bool) -> None: + global _SCRIPTABLE + self.prev = _SCRIPTABLE + _SCRIPTABLE = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _SCRIPTABLE + _SCRIPTABLE = self.prev + return False + + +class set_layer_config: + """ Layer config context manager that allows setting all layer config flags at once. + If a flag arg is None, it will not change the current value. + """ + def __init__( + self, + scriptable: Optional[bool] = None, + exportable: Optional[bool] = None, + no_jit: Optional[bool] = None, + no_activation_jit: Optional[bool] = None): + global _SCRIPTABLE + global _EXPORTABLE + global _NO_JIT + global _NO_ACTIVATION_JIT + self.prev = _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT + if scriptable is not None: + _SCRIPTABLE = scriptable + if exportable is not None: + _EXPORTABLE = exportable + if no_jit is not None: + _NO_JIT = no_jit + if no_activation_jit is not None: + _NO_ACTIVATION_JIT = no_activation_jit + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: Any) -> bool: + global _SCRIPTABLE + global _EXPORTABLE + global _NO_JIT + global _NO_ACTIVATION_JIT + _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT = self.prev + return False diff --git a/timm/models/layers/conv2d_same.py b/timm/models/layers/conv2d_same.py new file mode 100644 index 0000000..75f0f98 --- /dev/null +++ b/timm/models/layers/conv2d_same.py @@ -0,0 +1,42 @@ +""" Conv2d w/ Same Padding + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import Tuple, Optional + +from .padding import pad_same, get_padding_value + + +def conv2d_same( + x, weight: torch.Tensor, bias: Optional[torch.Tensor] = None, stride: Tuple[int, int] = (1, 1), + padding: Tuple[int, int] = (0, 0), dilation: Tuple[int, int] = (1, 1), groups: int = 1): + x = pad_same(x, weight.shape[-2:], stride, dilation) + return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups) + + +class Conv2dSame(nn.Conv2d): + """ Tensorflow like 'SAME' convolution wrapper for 2D convolutions + """ + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, bias=True): + super(Conv2dSame, self).__init__( + in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) + + def forward(self, x): + return conv2d_same(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + + +def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs): + padding = kwargs.pop('padding', '') + kwargs.setdefault('bias', False) + padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs) + if is_dynamic: + return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs) + else: + return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs) + + diff --git a/timm/models/layers/conv_bn_act.py b/timm/models/layers/conv_bn_act.py new file mode 100644 index 0000000..33005c3 --- /dev/null +++ b/timm/models/layers/conv_bn_act.py @@ -0,0 +1,40 @@ +""" Conv2d + BN + Act + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn + +from .create_conv2d import create_conv2d +from .create_norm_act import convert_norm_act + + +class ConvBnAct(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding='', dilation=1, groups=1, + bias=False, apply_act=True, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, aa_layer=None, + drop_block=None): + super(ConvBnAct, self).__init__() + use_aa = aa_layer is not None + + self.conv = create_conv2d( + in_channels, out_channels, kernel_size, stride=1 if use_aa else stride, + padding=padding, dilation=dilation, groups=groups, bias=bias) + + # NOTE for backwards compatibility with models that use separate norm and act layer definitions + norm_act_layer = convert_norm_act(norm_layer, act_layer) + self.bn = norm_act_layer(out_channels, apply_act=apply_act, drop_block=drop_block) + self.aa = aa_layer(channels=out_channels) if stride == 2 and use_aa else None + + @property + def in_channels(self): + return self.conv.in_channels + + @property + def out_channels(self): + return self.conv.out_channels + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.aa is not None: + x = self.aa(x) + return x diff --git a/timm/models/layers/create_act.py b/timm/models/layers/create_act.py new file mode 100644 index 0000000..aa55769 --- /dev/null +++ b/timm/models/layers/create_act.py @@ -0,0 +1,153 @@ +""" Activation Factory +Hacked together by / Copyright 2020 Ross Wightman +""" +from typing import Union, Callable, Type + +from .activations import * +from .activations_jit import * +from .activations_me import * +from .config import is_exportable, is_scriptable, is_no_jit + +# PyTorch has an optimized, native 'silu' (aka 'swish') operator as of PyTorch 1.7. +# Also hardsigmoid, hardswish, and soon mish. This code will use native version if present. +# Eventually, the custom SiLU, Mish, Hard*, layers will be removed and only native variants will be used. +_has_silu = 'silu' in dir(torch.nn.functional) +_has_hardswish = 'hardswish' in dir(torch.nn.functional) +_has_hardsigmoid = 'hardsigmoid' in dir(torch.nn.functional) +_has_mish = 'mish' in dir(torch.nn.functional) + + +_ACT_FN_DEFAULT = dict( + silu=F.silu if _has_silu else swish, + swish=F.silu if _has_silu else swish, + mish=F.mish if _has_mish else mish, + relu=F.relu, + relu6=F.relu6, + leaky_relu=F.leaky_relu, + elu=F.elu, + celu=F.celu, + selu=F.selu, + gelu=gelu, + sigmoid=sigmoid, + tanh=tanh, + hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid, + hard_swish=F.hardswish if _has_hardswish else hard_swish, + hard_mish=hard_mish, +) + +_ACT_FN_JIT = dict( + silu=F.silu if _has_silu else swish_jit, + swish=F.silu if _has_silu else swish_jit, + mish=F.mish if _has_mish else mish_jit, + hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid_jit, + hard_swish=F.hardswish if _has_hardswish else hard_swish_jit, + hard_mish=hard_mish_jit +) + +_ACT_FN_ME = dict( + silu=F.silu if _has_silu else swish_me, + swish=F.silu if _has_silu else swish_me, + mish=F.mish if _has_mish else mish_me, + hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid_me, + hard_swish=F.hardswish if _has_hardswish else hard_swish_me, + hard_mish=hard_mish_me, +) + +_ACT_FNS = (_ACT_FN_ME, _ACT_FN_JIT, _ACT_FN_DEFAULT) +for a in _ACT_FNS: + a.setdefault('hardsigmoid', a.get('hard_sigmoid')) + a.setdefault('hardswish', a.get('hard_swish')) + + +_ACT_LAYER_DEFAULT = dict( + silu=nn.SiLU if _has_silu else Swish, + swish=nn.SiLU if _has_silu else Swish, + mish=nn.Mish if _has_mish else Mish, + relu=nn.ReLU, + relu6=nn.ReLU6, + leaky_relu=nn.LeakyReLU, + elu=nn.ELU, + prelu=PReLU, + celu=nn.CELU, + selu=nn.SELU, + gelu=GELU, + sigmoid=Sigmoid, + tanh=Tanh, + hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoid, + hard_swish=nn.Hardswish if _has_hardswish else HardSwish, + hard_mish=HardMish, +) + +_ACT_LAYER_JIT = dict( + silu=nn.SiLU if _has_silu else SwishJit, + swish=nn.SiLU if _has_silu else SwishJit, + mish=nn.Mish if _has_mish else MishJit, + hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoidJit, + hard_swish=nn.Hardswish if _has_hardswish else HardSwishJit, + hard_mish=HardMishJit +) + +_ACT_LAYER_ME = dict( + silu=nn.SiLU if _has_silu else SwishMe, + swish=nn.SiLU if _has_silu else SwishMe, + mish=nn.Mish if _has_mish else MishMe, + hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoidMe, + hard_swish=nn.Hardswish if _has_hardswish else HardSwishMe, + hard_mish=HardMishMe, +) + +_ACT_LAYERS = (_ACT_LAYER_ME, _ACT_LAYER_JIT, _ACT_LAYER_DEFAULT) +for a in _ACT_LAYERS: + a.setdefault('hardsigmoid', a.get('hard_sigmoid')) + a.setdefault('hardswish', a.get('hard_swish')) + + +def get_act_fn(name: Union[Callable, str] = 'relu'): + """ Activation Function Factory + Fetching activation fns by name with this function allows export or torch script friendly + functions to be returned dynamically based on current config. + """ + if not name: + return None + if isinstance(name, Callable): + return name + if not (is_no_jit() or is_exportable() or is_scriptable()): + # If not exporting or scripting the model, first look for a memory-efficient version with + # custom autograd, then fallback + if name in _ACT_FN_ME: + return _ACT_FN_ME[name] + if is_exportable() and name in ('silu', 'swish'): + # FIXME PyTorch SiLU doesn't ONNX export, this is a temp hack + return swish + if not (is_no_jit() or is_exportable()): + if name in _ACT_FN_JIT: + return _ACT_FN_JIT[name] + return _ACT_FN_DEFAULT[name] + + +def get_act_layer(name: Union[Type[nn.Module], str] = 'relu'): + """ Activation Layer Factory + Fetching activation layers by name with this function allows export or torch script friendly + functions to be returned dynamically based on current config. + """ + if not name: + return None + if isinstance(name, type): + return name + if not (is_no_jit() or is_exportable() or is_scriptable()): + if name in _ACT_LAYER_ME: + return _ACT_LAYER_ME[name] + if is_exportable() and name in ('silu', 'swish'): + # FIXME PyTorch SiLU doesn't ONNX export, this is a temp hack + return Swish + if not (is_no_jit() or is_exportable()): + if name in _ACT_LAYER_JIT: + return _ACT_LAYER_JIT[name] + return _ACT_LAYER_DEFAULT[name] + + +def create_act_layer(name: Union[nn.Module, str], inplace=None, **kwargs): + act_layer = get_act_layer(name) + if act_layer is None: + return None + return act_layer(**kwargs) if inplace is None else act_layer(inplace=inplace, **kwargs) diff --git a/timm/models/layers/create_attn.py b/timm/models/layers/create_attn.py new file mode 100644 index 0000000..028c0f7 --- /dev/null +++ b/timm/models/layers/create_attn.py @@ -0,0 +1,89 @@ +""" Attention Factory + +Hacked together by / Copyright 2021 Ross Wightman +""" +import torch +from functools import partial + +from .bottleneck_attn import BottleneckAttn +from .cbam import CbamModule, LightCbamModule +from .eca import EcaModule, CecaModule +from .gather_excite import GatherExcite +from .global_context import GlobalContext +from .halo_attn import HaloAttn +from .lambda_layer import LambdaLayer +from .non_local_attn import NonLocalAttn, BatNonLocalAttn +from .selective_kernel import SelectiveKernel +from .split_attn import SplitAttn +from .squeeze_excite import SEModule, EffectiveSEModule + + +def get_attn(attn_type): + if isinstance(attn_type, torch.nn.Module): + return attn_type + module_cls = None + if attn_type is not None: + if isinstance(attn_type, str): + attn_type = attn_type.lower() + # Lightweight attention modules (channel and/or coarse spatial). + # Typically added to existing network architecture blocks in addition to existing convolutions. + if attn_type == 'se': + module_cls = SEModule + elif attn_type == 'ese': + module_cls = EffectiveSEModule + elif attn_type == 'eca': + module_cls = EcaModule + elif attn_type == 'ecam': + module_cls = partial(EcaModule, use_mlp=True) + elif attn_type == 'ceca': + module_cls = CecaModule + elif attn_type == 'ge': + module_cls = GatherExcite + elif attn_type == 'gc': + module_cls = GlobalContext + elif attn_type == 'gca': + module_cls = partial(GlobalContext, fuse_add=True, fuse_scale=False) + elif attn_type == 'cbam': + module_cls = CbamModule + elif attn_type == 'lcbam': + module_cls = LightCbamModule + + # Attention / attention-like modules w/ significant params + # Typically replace some of the existing workhorse convs in a network architecture. + # All of these accept a stride argument and can spatially downsample the input. + elif attn_type == 'sk': + module_cls = SelectiveKernel + elif attn_type == 'splat': + module_cls = SplitAttn + + # Self-attention / attention-like modules w/ significant compute and/or params + # Typically replace some of the existing workhorse convs in a network architecture. + # All of these accept a stride argument and can spatially downsample the input. + elif attn_type == 'lambda': + return LambdaLayer + elif attn_type == 'bottleneck': + return BottleneckAttn + elif attn_type == 'halo': + return HaloAttn + elif attn_type == 'nl': + module_cls = NonLocalAttn + elif attn_type == 'bat': + module_cls = BatNonLocalAttn + + # Woops! + else: + assert False, "Invalid attn module (%s)" % attn_type + elif isinstance(attn_type, bool): + if attn_type: + module_cls = SEModule + else: + module_cls = attn_type + return module_cls + + +def create_attn(attn_type, channels, **kwargs): + module_cls = get_attn(attn_type) + if module_cls is not None: + # NOTE: it's expected the first (positional) argument of all attention layers is the # input channels + return module_cls(channels, **kwargs) + return None diff --git a/timm/models/layers/create_conv2d.py b/timm/models/layers/create_conv2d.py new file mode 100644 index 0000000..3a0cc03 --- /dev/null +++ b/timm/models/layers/create_conv2d.py @@ -0,0 +1,31 @@ +""" Create Conv2d Factory Method + +Hacked together by / Copyright 2020 Ross Wightman +""" + +from .mixed_conv2d import MixedConv2d +from .cond_conv2d import CondConv2d +from .conv2d_same import create_conv2d_pad + + +def create_conv2d(in_channels, out_channels, kernel_size, **kwargs): + """ Select a 2d convolution implementation based on arguments + Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d. + + Used extensively by EfficientNet, MobileNetv3 and related networks. + """ + if isinstance(kernel_size, list): + assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently + assert 'groups' not in kwargs # MixedConv groups are defined by kernel list + # We're going to use only lists for defining the MixedConv2d kernel groups, + # ints, tuples, other iterables will continue to pass to normal conv and specify h, w. + m = MixedConv2d(in_channels, out_channels, kernel_size, **kwargs) + else: + depthwise = kwargs.pop('depthwise', False) + # for DW out_channels must be multiple of in_channels as must have out_channels % groups == 0 + groups = in_channels if depthwise else kwargs.pop('groups', 1) + if 'num_experts' in kwargs and kwargs['num_experts'] > 0: + m = CondConv2d(in_channels, out_channels, kernel_size, groups=groups, **kwargs) + else: + m = create_conv2d_pad(in_channels, out_channels, kernel_size, groups=groups, **kwargs) + return m diff --git a/timm/models/layers/create_norm_act.py b/timm/models/layers/create_norm_act.py new file mode 100644 index 0000000..5b56294 --- /dev/null +++ b/timm/models/layers/create_norm_act.py @@ -0,0 +1,83 @@ +""" NormAct (Normalizaiton + Activation Layer) Factory + +Create norm + act combo modules that attempt to be backwards compatible with separate norm + act +isntances in models. Where these are used it will be possible to swap separate BN + act layers with +combined modules like IABN or EvoNorms. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import types +import functools + +import torch +import torch.nn as nn + +from .evo_norm import EvoNormBatch2d, EvoNormSample2d +from .norm_act import BatchNormAct2d, GroupNormAct +from .inplace_abn import InplaceAbn + +_NORM_ACT_TYPES = {BatchNormAct2d, GroupNormAct, EvoNormBatch2d, EvoNormSample2d, InplaceAbn} +_NORM_ACT_REQUIRES_ARG = {BatchNormAct2d, GroupNormAct, InplaceAbn} # requires act_layer arg to define act type + + +def get_norm_act_layer(layer_class): + layer_class = layer_class.replace('_', '').lower() + if layer_class.startswith("batchnorm"): + layer = BatchNormAct2d + elif layer_class.startswith("groupnorm"): + layer = GroupNormAct + elif layer_class == "evonormbatch": + layer = EvoNormBatch2d + elif layer_class == "evonormsample": + layer = EvoNormSample2d + elif layer_class == "iabn" or layer_class == "inplaceabn": + layer = InplaceAbn + else: + assert False, "Invalid norm_act layer (%s)" % layer_class + return layer + + +def create_norm_act(layer_type, num_features, apply_act=True, jit=False, **kwargs): + layer_parts = layer_type.split('-') # e.g. batchnorm-leaky_relu + assert len(layer_parts) in (1, 2) + layer = get_norm_act_layer(layer_parts[0]) + #activation_class = layer_parts[1].lower() if len(layer_parts) > 1 else '' # FIXME support string act selection? + layer_instance = layer(num_features, apply_act=apply_act, **kwargs) + if jit: + layer_instance = torch.jit.script(layer_instance) + return layer_instance + + +def convert_norm_act(norm_layer, act_layer): + assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial)) + assert act_layer is None or isinstance(act_layer, (type, str, types.FunctionType, functools.partial)) + norm_act_kwargs = {} + + # unbind partial fn, so args can be rebound later + if isinstance(norm_layer, functools.partial): + norm_act_kwargs.update(norm_layer.keywords) + norm_layer = norm_layer.func + + if isinstance(norm_layer, str): + norm_act_layer = get_norm_act_layer(norm_layer) + elif norm_layer in _NORM_ACT_TYPES: + norm_act_layer = norm_layer + elif isinstance(norm_layer, types.FunctionType): + # if function type, must be a lambda/fn that creates a norm_act layer + norm_act_layer = norm_layer + else: + type_name = norm_layer.__name__.lower() + if type_name.startswith('batchnorm'): + norm_act_layer = BatchNormAct2d + elif type_name.startswith('groupnorm'): + norm_act_layer = GroupNormAct + else: + assert False, f"No equivalent norm_act layer for {type_name}" + + if norm_act_layer in _NORM_ACT_REQUIRES_ARG: + # pass `act_layer` through for backwards compat where `act_layer=None` implies no activation. + # In the future, may force use of `apply_act` with `act_layer` arg bound to relevant NormAct types + norm_act_kwargs.setdefault('act_layer', act_layer) + if norm_act_kwargs: + norm_act_layer = functools.partial(norm_act_layer, **norm_act_kwargs) # bind/rebind args + return norm_act_layer diff --git a/timm/models/layers/drop.py b/timm/models/layers/drop.py new file mode 100644 index 0000000..6de9e3f --- /dev/null +++ b/timm/models/layers/drop.py @@ -0,0 +1,168 @@ +""" DropBlock, DropPath + +PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers. + +Papers: +DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890) + +Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382) + +Code: +DropBlock impl inspired by two Tensorflow impl that I liked: + - https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74 + - https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def drop_block_2d( + x, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: float = 1.0, + with_noise: bool = False, inplace: bool = False, batchwise: bool = False): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + + DropBlock with an experimental gaussian noise option. This layer has been tested on a few training + runs with success, but needs further validation and possibly optimization for lower runtime impact. + """ + B, C, H, W = x.shape + total_size = W * H + clipped_block_size = min(block_size, min(W, H)) + # seed_drop_rate, the gamma parameter + gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( + (W - block_size + 1) * (H - block_size + 1)) + + # Forces the block to be inside the feature map. + w_i, h_i = torch.meshgrid(torch.arange(W).to(x.device), torch.arange(H).to(x.device)) + valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \ + ((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2)) + valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype) + + if batchwise: + # one mask for whole batch, quite a bit faster + uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) + else: + uniform_noise = torch.rand_like(x) + block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype) + block_mask = -F.max_pool2d( + -block_mask, + kernel_size=clipped_block_size, # block_size, + stride=1, + padding=clipped_block_size // 2) + + if with_noise: + normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x) + if inplace: + x.mul_(block_mask).add_(normal_noise * (1 - block_mask)) + else: + x = x * block_mask + normal_noise * (1 - block_mask) + else: + normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(x.dtype) + if inplace: + x.mul_(block_mask * normalize_scale) + else: + x = x * block_mask * normalize_scale + return x + + +def drop_block_fast_2d( + x: torch.Tensor, drop_prob: float = 0.1, block_size: int = 7, + gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False, batchwise: bool = False): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + + DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid + block mask at edges. + """ + B, C, H, W = x.shape + total_size = W * H + clipped_block_size = min(block_size, min(W, H)) + gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( + (W - block_size + 1) * (H - block_size + 1)) + + if batchwise: + # one mask for whole batch, quite a bit faster + block_mask = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) < gamma + else: + # mask per batch element + block_mask = torch.rand_like(x) < gamma + block_mask = F.max_pool2d( + block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2) + + if with_noise: + normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x) + if inplace: + x.mul_(1. - block_mask).add_(normal_noise * block_mask) + else: + x = x * (1. - block_mask) + normal_noise * block_mask + else: + block_mask = 1 - block_mask + normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(dtype=x.dtype) + if inplace: + x.mul_(block_mask * normalize_scale) + else: + x = x * block_mask * normalize_scale + return x + + +class DropBlock2d(nn.Module): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + """ + def __init__(self, + drop_prob=0.1, + block_size=7, + gamma_scale=1.0, + with_noise=False, + inplace=False, + batchwise=False, + fast=True): + super(DropBlock2d, self).__init__() + self.drop_prob = drop_prob + self.gamma_scale = gamma_scale + self.block_size = block_size + self.with_noise = with_noise + self.inplace = inplace + self.batchwise = batchwise + self.fast = fast # FIXME finish comparisons of fast vs not + + def forward(self, x): + if not self.training or not self.drop_prob: + return x + if self.fast: + return drop_block_fast_2d( + x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise) + else: + return drop_block_2d( + x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise) + + +def drop_path(x, drop_prob: float = 0., training: bool = False): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for + changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use + 'survival rate' as the argument. + + """ + if drop_prob == 0. or not training: + return x + keep_prob = 1 - drop_prob + shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) + random_tensor.floor_() # binarize + output = x.div(keep_prob) * random_tensor + return output + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) diff --git a/timm/models/layers/eca.py b/timm/models/layers/eca.py new file mode 100644 index 0000000..e29be6a --- /dev/null +++ b/timm/models/layers/eca.py @@ -0,0 +1,145 @@ +""" +ECA module from ECAnet + +paper: ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks +https://arxiv.org/abs/1910.03151 + +Original ECA model borrowed from https://github.com/BangguWu/ECANet + +Modified circular ECA implementation and adaption for use in timm package +by Chris Ha https://github.com/VRandme + +Original License: + +MIT License + +Copyright (c) 2019 BangguWu, Qilong Wang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +""" +import math +from torch import nn +import torch.nn.functional as F + + +from .create_act import create_act_layer +from .helpers import make_divisible + + +class EcaModule(nn.Module): + """Constructs an ECA module. + + Args: + channels: Number of channels of the input feature map for use in adaptive kernel sizes + for actual calculations according to channel. + gamma, beta: when channel is given parameters of mapping function + refer to original paper https://arxiv.org/pdf/1910.03151.pdf + (default=None. if channel size not given, use k_size given for kernel size.) + kernel_size: Adaptive selection of kernel size (default=3) + gamm: used in kernel_size calc, see above + beta: used in kernel_size calc, see above + act_layer: optional non-linearity after conv, enables conv bias, this is an experiment + gate_layer: gating non-linearity to use + """ + def __init__( + self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid', + rd_ratio=1/8, rd_channels=None, rd_divisor=8, use_mlp=False): + super(EcaModule, self).__init__() + if channels is not None: + t = int(abs(math.log(channels, 2) + beta) / gamma) + kernel_size = max(t if t % 2 else t + 1, 3) + assert kernel_size % 2 == 1 + padding = (kernel_size - 1) // 2 + if use_mlp: + # NOTE 'mlp' mode is a timm experiment, not in paper + assert channels is not None + if rd_channels is None: + rd_channels = make_divisible(channels * rd_ratio, divisor=rd_divisor) + act_layer = act_layer or nn.ReLU + self.conv = nn.Conv1d(1, rd_channels, kernel_size=1, padding=0, bias=True) + self.act = create_act_layer(act_layer) + self.conv2 = nn.Conv1d(rd_channels, 1, kernel_size=kernel_size, padding=padding, bias=True) + else: + self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=padding, bias=False) + self.act = None + self.conv2 = None + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + y = x.mean((2, 3)).view(x.shape[0], 1, -1) # view for 1d conv + y = self.conv(y) + if self.conv2 is not None: + y = self.act(y) + y = self.conv2(y) + y = self.gate(y).view(x.shape[0], -1, 1, 1) + return x * y.expand_as(x) + + +EfficientChannelAttn = EcaModule # alias + + +class CecaModule(nn.Module): + """Constructs a circular ECA module. + + ECA module where the conv uses circular padding rather than zero padding. + Unlike the spatial dimension, the channels do not have inherent ordering nor + locality. Although this module in essence, applies such an assumption, it is unnecessary + to limit the channels on either "edge" from being circularly adapted to each other. + This will fundamentally increase connectivity and possibly increase performance metrics + (accuracy, robustness), without significantly impacting resource metrics + (parameter size, throughput,latency, etc) + + Args: + channels: Number of channels of the input feature map for use in adaptive kernel sizes + for actual calculations according to channel. + gamma, beta: when channel is given parameters of mapping function + refer to original paper https://arxiv.org/pdf/1910.03151.pdf + (default=None. if channel size not given, use k_size given for kernel size.) + kernel_size: Adaptive selection of kernel size (default=3) + gamm: used in kernel_size calc, see above + beta: used in kernel_size calc, see above + act_layer: optional non-linearity after conv, enables conv bias, this is an experiment + gate_layer: gating non-linearity to use + """ + + def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid'): + super(CecaModule, self).__init__() + if channels is not None: + t = int(abs(math.log(channels, 2) + beta) / gamma) + kernel_size = max(t if t % 2 else t + 1, 3) + has_act = act_layer is not None + assert kernel_size % 2 == 1 + + # PyTorch circular padding mode is buggy as of pytorch 1.4 + # see https://github.com/pytorch/pytorch/pull/17240 + # implement manual circular padding + self.padding = (kernel_size - 1) // 2 + self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=0, bias=has_act) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + y = x.mean((2, 3)).view(x.shape[0], 1, -1) + # Manually implement circular padding, F.pad does not seemed to be bugged + y = F.pad(y, (self.padding, self.padding), mode='circular') + y = self.conv(y) + y = self.gate(y).view(x.shape[0], -1, 1, 1) + return x * y.expand_as(x) + + +CircularEfficientChannelAttn = CecaModule diff --git a/timm/models/layers/evo_norm.py b/timm/models/layers/evo_norm.py new file mode 100644 index 0000000..6ef0c88 --- /dev/null +++ b/timm/models/layers/evo_norm.py @@ -0,0 +1,81 @@ +"""EvoNormB0 (Batched) and EvoNormS0 (Sample) in PyTorch + +An attempt at getting decent performing EvoNorms running in PyTorch. +While currently faster than other impl, still quite a ways off the built-in BN +in terms of memory usage and throughput (roughly 5x mem, 1/2 - 1/3x speed). + +Still very much a WIP, fiddling with buffer usage, in-place/jit optimizations, and layouts. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +import torch.nn as nn + +from .trace_utils import _assert + + +class EvoNormBatch2d(nn.Module): + def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, drop_block=None): + super(EvoNormBatch2d, self).__init__() + self.apply_act = apply_act # apply activation (non-linearity) + self.momentum = momentum + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features), requires_grad=True) + self.bias = nn.Parameter(torch.zeros(num_features), requires_grad=True) + self.v = nn.Parameter(torch.ones(num_features), requires_grad=True) if apply_act else None + self.register_buffer('running_var', torch.ones(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + if self.apply_act: + nn.init.ones_(self.v) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_type = x.dtype + if self.v is not None: + running_var = self.running_var.view(1, -1, 1, 1) + if self.training: + var = x.var(dim=(0, 2, 3), unbiased=False, keepdim=True) + n = x.numel() / x.shape[1] + running_var = var.detach() * self.momentum * (n / (n - 1)) + running_var * (1 - self.momentum) + self.running_var.copy_(running_var.view(self.running_var.shape)) + else: + var = running_var + v = self.v.to(dtype=x_type).reshape(1, -1, 1, 1) + d = x * v + (x.var(dim=(2, 3), unbiased=False, keepdim=True) + self.eps).sqrt().to(dtype=x_type) + d = d.max((var + self.eps).sqrt().to(dtype=x_type)) + x = x / d + return x * self.weight.view(1, -1, 1, 1) + self.bias.view(1, -1, 1, 1) + + +class EvoNormSample2d(nn.Module): + def __init__(self, num_features, apply_act=True, groups=32, eps=1e-5, drop_block=None): + super(EvoNormSample2d, self).__init__() + self.apply_act = apply_act # apply activation (non-linearity) + self.groups = groups + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features), requires_grad=True) + self.bias = nn.Parameter(torch.zeros(num_features), requires_grad=True) + self.v = nn.Parameter(torch.ones(num_features), requires_grad=True) if apply_act else None + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + if self.apply_act: + nn.init.ones_(self.v) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + B, C, H, W = x.shape + _assert(C % self.groups == 0, '') + if self.v is not None: + n = x * (x * self.v.view(1, -1, 1, 1)).sigmoid() + x = x.reshape(B, self.groups, -1) + x = n.reshape(B, self.groups, -1) / (x.var(dim=-1, unbiased=False, keepdim=True) + self.eps).sqrt() + x = x.reshape(B, C, H, W) + return x * self.weight.view(1, -1, 1, 1) + self.bias.view(1, -1, 1, 1) diff --git a/timm/models/layers/gather_excite.py b/timm/models/layers/gather_excite.py new file mode 100644 index 0000000..2d60dc9 --- /dev/null +++ b/timm/models/layers/gather_excite.py @@ -0,0 +1,90 @@ +""" Gather-Excite Attention Block + +Paper: `Gather-Excite: Exploiting Feature Context in CNNs` - https://arxiv.org/abs/1810.12348 + +Official code here, but it's only partial impl in Caffe: https://github.com/hujie-frank/GENet + +I've tried to support all of the extent both w/ and w/o params. I don't believe I've seen another +impl that covers all of the cases. + +NOTE: extent=0 + extra_params=False is equivalent to Squeeze-and-Excitation + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math + +from torch import nn as nn +import torch.nn.functional as F + +from .create_act import create_act_layer, get_act_layer +from .create_conv2d import create_conv2d +from .helpers import make_divisible +from .mlp import ConvMlp + + +class GatherExcite(nn.Module): + """ Gather-Excite Attention Module + """ + def __init__( + self, channels, feat_size=None, extra_params=False, extent=0, use_mlp=True, + rd_ratio=1./16, rd_channels=None, rd_divisor=1, add_maxpool=False, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, gate_layer='sigmoid'): + super(GatherExcite, self).__init__() + self.add_maxpool = add_maxpool + act_layer = get_act_layer(act_layer) + self.extent = extent + if extra_params: + self.gather = nn.Sequential() + if extent == 0: + assert feat_size is not None, 'spatial feature size must be specified for global extent w/ params' + self.gather.add_module( + 'conv1', create_conv2d(channels, channels, kernel_size=feat_size, stride=1, depthwise=True)) + if norm_layer: + self.gather.add_module(f'norm1', nn.BatchNorm2d(channels)) + else: + assert extent % 2 == 0 + num_conv = int(math.log2(extent)) + for i in range(num_conv): + self.gather.add_module( + f'conv{i + 1}', + create_conv2d(channels, channels, kernel_size=3, stride=2, depthwise=True)) + if norm_layer: + self.gather.add_module(f'norm{i + 1}', nn.BatchNorm2d(channels)) + if i != num_conv - 1: + self.gather.add_module(f'act{i + 1}', act_layer(inplace=True)) + else: + self.gather = None + if self.extent == 0: + self.gk = 0 + self.gs = 0 + else: + assert extent % 2 == 0 + self.gk = self.extent * 2 - 1 + self.gs = self.extent + + if not rd_channels: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + self.mlp = ConvMlp(channels, rd_channels, act_layer=act_layer) if use_mlp else nn.Identity() + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + size = x.shape[-2:] + if self.gather is not None: + x_ge = self.gather(x) + else: + if self.extent == 0: + # global extent + x_ge = x.mean(dim=(2, 3), keepdims=True) + if self.add_maxpool: + # experimental codepath, may remove or change + x_ge = 0.5 * x_ge + 0.5 * x.amax((2, 3), keepdim=True) + else: + x_ge = F.avg_pool2d( + x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2, count_include_pad=False) + if self.add_maxpool: + # experimental codepath, may remove or change + x_ge = 0.5 * x_ge + 0.5 * F.max_pool2d(x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2) + x_ge = self.mlp(x_ge) + if x_ge.shape[-1] != 1 or x_ge.shape[-2] != 1: + x_ge = F.interpolate(x_ge, size=size) + return x * self.gate(x_ge) diff --git a/timm/models/layers/global_context.py b/timm/models/layers/global_context.py new file mode 100644 index 0000000..de7fb5c --- /dev/null +++ b/timm/models/layers/global_context.py @@ -0,0 +1,67 @@ +""" Global Context Attention Block + +Paper: `GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond` + - https://arxiv.org/abs/1904.11492 + +Official code consulted as reference: https://github.com/xvjiarui/GCNet + +Hacked together by / Copyright 2021 Ross Wightman +""" +from torch import nn as nn +import torch.nn.functional as F + +from .create_act import create_act_layer, get_act_layer +from .helpers import make_divisible +from .mlp import ConvMlp +from .norm import LayerNorm2d + + +class GlobalContext(nn.Module): + + def __init__(self, channels, use_attn=True, fuse_add=False, fuse_scale=True, init_last_zero=False, + rd_ratio=1./8, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid'): + super(GlobalContext, self).__init__() + act_layer = get_act_layer(act_layer) + + self.conv_attn = nn.Conv2d(channels, 1, kernel_size=1, bias=True) if use_attn else None + + if rd_channels is None: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + if fuse_add: + self.mlp_add = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) + else: + self.mlp_add = None + if fuse_scale: + self.mlp_scale = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) + else: + self.mlp_scale = None + + self.gate = create_act_layer(gate_layer) + self.init_last_zero = init_last_zero + self.reset_parameters() + + def reset_parameters(self): + if self.conv_attn is not None: + nn.init.kaiming_normal_(self.conv_attn.weight, mode='fan_in', nonlinearity='relu') + if self.mlp_add is not None: + nn.init.zeros_(self.mlp_add.fc2.weight) + + def forward(self, x): + B, C, H, W = x.shape + + if self.conv_attn is not None: + attn = self.conv_attn(x).reshape(B, 1, H * W) # (B, 1, H * W) + attn = F.softmax(attn, dim=-1).unsqueeze(3) # (B, 1, H * W, 1) + context = x.reshape(B, C, H * W).unsqueeze(1) @ attn + context = context.view(B, C, 1, 1) + else: + context = x.mean(dim=(2, 3), keepdim=True) + + if self.mlp_scale is not None: + mlp_x = self.mlp_scale(context) + x = x * self.gate(mlp_x) + if self.mlp_add is not None: + mlp_x = self.mlp_add(context) + x = x + mlp_x + + return x diff --git a/timm/models/layers/halo_attn.py b/timm/models/layers/halo_attn.py new file mode 100644 index 0000000..f2ac64f --- /dev/null +++ b/timm/models/layers/halo_attn.py @@ -0,0 +1,233 @@ +""" Halo Self Attention + +Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones` + - https://arxiv.org/abs/2103.12731 + +@misc{2103.12731, +Author = {Ashish Vaswani and Prajit Ramachandran and Aravind Srinivas and Niki Parmar and Blake Hechtman and + Jonathon Shlens}, +Title = {Scaling Local Self-Attention for Parameter Efficient Visual Backbones}, +Year = {2021}, +} + +Status: +This impl is a WIP, there is no official ref impl and some details in paper weren't clear to me. +The attention mechanism works but it's slow as implemented. + +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import List + +import torch +from torch import nn +import torch.nn.functional as F + +from .helpers import make_divisible +from .weight_init import trunc_normal_ +from .trace_utils import _assert + + +def rel_logits_1d(q, rel_k, permute_mask: List[int]): + """ Compute relative logits along one dimension + + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + + Args: + q: (batch, height, width, dim) + rel_k: (2 * window - 1, dim) + permute_mask: permute output dim according to this + """ + B, H, W, dim = q.shape + rel_size = rel_k.shape[0] + win_size = (rel_size + 1) // 2 + + x = (q @ rel_k.transpose(-1, -2)) + x = x.reshape(-1, W, rel_size) + + # pad to shift from relative to absolute indexing + x_pad = F.pad(x, [0, 1]).flatten(1) + x_pad = F.pad(x_pad, [0, rel_size - W]) + + # reshape and slice out the padded elements + x_pad = x_pad.reshape(-1, W + 1, rel_size) + x = x_pad[:, :W, win_size - 1:] + + # reshape and tile + x = x.reshape(B, H, 1, W, win_size).expand(-1, -1, win_size, -1, -1) + return x.permute(permute_mask) + + +class PosEmbedRel(nn.Module): + """ Relative Position Embedding + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + + """ + def __init__(self, block_size, win_size, dim_head, scale): + """ + Args: + block_size (int): block size + win_size (int): neighbourhood window size + dim_head (int): attention head dim + scale (float): scale factor (for init) + """ + super().__init__() + self.block_size = block_size + self.dim_head = dim_head + self.height_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale) + self.width_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale) + + def forward(self, q): + B, BB, HW, _ = q.shape + + # relative logits in width dimension. + q = q.reshape(-1, self.block_size, self.block_size, self.dim_head) + rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4)) + + # relative logits in height dimension. + q = q.transpose(1, 2) + rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2)) + + rel_logits = rel_logits_h + rel_logits_w + rel_logits = rel_logits.reshape(B, BB, HW, -1) + return rel_logits + + +class HaloAttn(nn.Module): + """ Halo Attention + + Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones` + - https://arxiv.org/abs/2103.12731 + + The internal dimensions of the attention module are controlled by the interaction of several arguments. + * the output dimension of the module is specified by dim_out, which falls back to input dim if not set + * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim + * the query and key (qk) dimensions are determined by + * num_heads * dim_head if dim_head is not None + * num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None + * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used + + Args: + dim (int): input dimension to the module + dim_out (int): output dimension of the module, same as dim if not set + feat_size (Tuple[int, int]): size of input feature_map (not used, for arg compat with bottle/lambda) + stride: output stride of the module, query downscaled if > 1 (default: 1). + num_heads: parallel attention heads (default: 8). + dim_head: dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set + block_size (int): size of blocks. (default: 8) + halo_size (int): size of halo overlap. (default: 3) + qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) + qkv_bias (bool) : add bias to q, k, and v projections + avg_down (bool): use average pool downsample instead of strided query blocks + scale_pos_embed (bool): scale the position embedding as well as Q @ K + """ + def __init__( + self, dim, dim_out=None, feat_size=None, stride=1, num_heads=8, dim_head=None, block_size=8, halo_size=3, + qk_ratio=1.0, qkv_bias=False, avg_down=False, scale_pos_embed=False): + super().__init__() + dim_out = dim_out or dim + assert dim_out % num_heads == 0 + assert stride in (1, 2) + self.num_heads = num_heads + self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads + self.dim_head_v = dim_out // self.num_heads + self.dim_out_qk = num_heads * self.dim_head_qk + self.dim_out_v = num_heads * self.dim_head_v + self.scale = self.dim_head_qk ** -0.5 + self.scale_pos_embed = scale_pos_embed + self.block_size = self.block_size_ds = block_size + self.halo_size = halo_size + self.win_size = block_size + halo_size * 2 # neighbourhood window size + self.block_stride = 1 + use_avg_pool = False + if stride > 1: + use_avg_pool = avg_down or block_size % stride != 0 + self.block_stride = 1 if use_avg_pool else stride + self.block_size_ds = self.block_size // self.block_stride + + # FIXME not clear if this stride behaviour is what the paper intended + # Also, the paper mentions using a 3D conv for dealing with the blocking/gather, and leaving + # data in unfolded block form. I haven't wrapped my head around how that'd look. + self.q = nn.Conv2d(dim, self.dim_out_qk, 1, stride=self.block_stride, bias=qkv_bias) + self.kv = nn.Conv2d(dim, self.dim_out_qk + self.dim_out_v, 1, bias=qkv_bias) + + self.pos_embed = PosEmbedRel( + block_size=self.block_size_ds, win_size=self.win_size, dim_head=self.dim_head_qk, scale=self.scale) + + self.pool = nn.AvgPool2d(2, 2) if use_avg_pool else nn.Identity() + + self.reset_parameters() + + def reset_parameters(self): + std = self.q.weight.shape[1] ** -0.5 # fan-in + trunc_normal_(self.q.weight, std=std) + trunc_normal_(self.kv.weight, std=std) + trunc_normal_(self.pos_embed.height_rel, std=self.scale) + trunc_normal_(self.pos_embed.width_rel, std=self.scale) + + def forward(self, x): + B, C, H, W = x.shape + _assert(H % self.block_size == 0, '') + _assert(W % self.block_size == 0, '') + num_h_blocks = H // self.block_size + num_w_blocks = W // self.block_size + num_blocks = num_h_blocks * num_w_blocks + + q = self.q(x) + # unfold + q = q.reshape( + -1, self.dim_head_qk, + num_h_blocks, self.block_size_ds, num_w_blocks, self.block_size_ds).permute(0, 1, 3, 5, 2, 4) + # B, num_heads * dim_head * block_size ** 2, num_blocks + q = q.reshape(B * self.num_heads, self.dim_head_qk, -1, num_blocks).transpose(1, 3) + # B * num_heads, num_blocks, block_size ** 2, dim_head + + kv = self.kv(x) + # Generate overlapping windows for kv. This approach is good for GPU and CPU. However, unfold() is not + # lowered for PyTorch XLA so it will be very slow. See code at bottom of file for XLA friendly approach. + # FIXME figure out how to switch impl between this and conv2d if XLA being used. + kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]) + kv = kv.unfold(2, self.win_size, self.block_size).unfold(3, self.win_size, self.block_size).reshape( + B * self.num_heads, self.dim_head_qk + self.dim_head_v, num_blocks, -1).permute(0, 2, 3, 1) + k, v = torch.split(kv, [self.dim_head_qk, self.dim_head_v], dim=-1) + # B * num_heads, num_blocks, win_size ** 2, dim_head_qk or dim_head_v + + if self.scale_pos_embed: + attn = (q @ k.transpose(-1, -2) + self.pos_embed(q)) * self.scale + else: + attn = (q @ k.transpose(-1, -2)) * self.scale + self.pos_embed(q) + # B * num_heads, num_blocks, block_size ** 2, win_size ** 2 + attn = attn.softmax(dim=-1) + + out = (attn @ v).transpose(1, 3) # B * num_heads, dim_head_v, block_size ** 2, num_blocks + # fold + out = out.reshape(-1, self.block_size_ds, self.block_size_ds, num_h_blocks, num_w_blocks) + out = out.permute(0, 3, 1, 4, 2).contiguous().view( + B, self.dim_out_v, H // self.block_stride, W // self.block_stride) + # B, dim_out, H // block_stride, W // block_stride + out = self.pool(out) + return out + + +""" Three alternatives for overlapping windows. + +`.unfold().unfold()` is same speed as stride tricks with similar clarity as F.unfold() + + if is_xla: + # This code achieves haloing on PyTorch XLA with reasonable runtime trade-off, it is + # EXTREMELY slow for backward on a GPU though so I need a way of selecting based on environment. + WW = self.win_size ** 2 + pw = torch.eye(WW, dtype=x.dtype, device=x.device).reshape(WW, 1, self.win_size, self.win_size) + kv = F.conv2d(kv.reshape(-1, 1, H, W), pw, stride=self.block_size, padding=self.halo_size) + elif self.stride_tricks: + kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]).contiguous() + kv = kv.as_strided(( + B, self.dim_out_qk + self.dim_out_v, self.win_size, self.win_size, num_h_blocks, num_w_blocks), + stride=(kv.stride(0), kv.stride(1), kv.shape[-1], 1, self.block_size * kv.shape[-1], self.block_size)) + else: + kv = F.unfold(kv, kernel_size=self.win_size, stride=self.block_size, padding=self.halo_size) + + kv = kv.reshape( + B * self.num_heads, self.dim_head_qk + self.dim_head_v, -1, num_blocks).transpose(1, 3) +""" diff --git a/timm/models/layers/helpers.py b/timm/models/layers/helpers.py new file mode 100644 index 0000000..cc54ca7 --- /dev/null +++ b/timm/models/layers/helpers.py @@ -0,0 +1,31 @@ +""" Layer/Module Helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +from itertools import repeat +import collections.abc + + +# From PyTorch internals +def _ntuple(n): + def parse(x): + if isinstance(x, collections.abc.Iterable): + return x + return tuple(repeat(x, n)) + return parse + + +to_1tuple = _ntuple(1) +to_2tuple = _ntuple(2) +to_3tuple = _ntuple(3) +to_4tuple = _ntuple(4) +to_ntuple = _ntuple + + +def make_divisible(v, divisor=8, min_value=None, round_limit=.9): + min_value = min_value or divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < round_limit * v: + new_v += divisor + return new_v diff --git a/timm/models/layers/inplace_abn.py b/timm/models/layers/inplace_abn.py new file mode 100644 index 0000000..3aae7cf --- /dev/null +++ b/timm/models/layers/inplace_abn.py @@ -0,0 +1,87 @@ +import torch +from torch import nn as nn + +try: + from inplace_abn.functions import inplace_abn, inplace_abn_sync + has_iabn = True +except ImportError: + has_iabn = False + + def inplace_abn(x, weight, bias, running_mean, running_var, + training=True, momentum=0.1, eps=1e-05, activation="leaky_relu", activation_param=0.01): + raise ImportError( + "Please install InplaceABN:'pip install git+https://github.com/mapillary/inplace_abn.git@v1.0.12'") + + def inplace_abn_sync(**kwargs): + inplace_abn(**kwargs) + + +class InplaceAbn(nn.Module): + """Activated Batch Normalization + + This gathers a BatchNorm and an activation function in a single module + + Parameters + ---------- + num_features : int + Number of feature channels in the input and output. + eps : float + Small constant to prevent numerical issues. + momentum : float + Momentum factor applied to compute running statistics. + affine : bool + If `True` apply learned scale and shift transformation after normalization. + act_layer : str or nn.Module type + Name or type of the activation functions, one of: `leaky_relu`, `elu` + act_param : float + Negative slope for the `leaky_relu` activation. + """ + + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, apply_act=True, + act_layer="leaky_relu", act_param=0.01, drop_block=None): + super(InplaceAbn, self).__init__() + self.num_features = num_features + self.affine = affine + self.eps = eps + self.momentum = momentum + if apply_act: + if isinstance(act_layer, str): + assert act_layer in ('leaky_relu', 'elu', 'identity', '') + self.act_name = act_layer if act_layer else 'identity' + else: + # convert act layer passed as type to string + if act_layer == nn.ELU: + self.act_name = 'elu' + elif act_layer == nn.LeakyReLU: + self.act_name = 'leaky_relu' + elif act_layer == nn.Identity: + self.act_name = 'identity' + else: + assert False, f'Invalid act layer {act_layer.__name__} for IABN' + else: + self.act_name = 'identity' + self.act_param = act_param + if self.affine: + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + else: + self.register_parameter('weight', None) + self.register_parameter('bias', None) + self.register_buffer('running_mean', torch.zeros(num_features)) + self.register_buffer('running_var', torch.ones(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.constant_(self.running_mean, 0) + nn.init.constant_(self.running_var, 1) + if self.affine: + nn.init.constant_(self.weight, 1) + nn.init.constant_(self.bias, 0) + + def forward(self, x): + output = inplace_abn( + x, self.weight, self.bias, self.running_mean, self.running_var, + self.training, self.momentum, self.eps, self.act_name, self.act_param) + if isinstance(output, tuple): + output = output[0] + return output diff --git a/timm/models/layers/lambda_layer.py b/timm/models/layers/lambda_layer.py new file mode 100644 index 0000000..e50b43c --- /dev/null +++ b/timm/models/layers/lambda_layer.py @@ -0,0 +1,133 @@ +""" Lambda Layer + +Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention` + - https://arxiv.org/abs/2102.08602 + +@misc{2102.08602, +Author = {Irwan Bello}, +Title = {LambdaNetworks: Modeling Long-Range Interactions Without Attention}, +Year = {2021}, +} + +Status: +This impl is a WIP. Code snippets in the paper were used as reference but +good chance some details are missing/wrong. + +I've only implemented local lambda conv based pos embeddings. + +For a PyTorch impl that includes other embedding options checkout +https://github.com/lucidrains/lambda-networks + +Hacked together by / Copyright 2021 Ross Wightman +""" +import torch +from torch import nn +import torch.nn.functional as F + +from .helpers import to_2tuple, make_divisible +from .weight_init import trunc_normal_ + + +def rel_pos_indices(size): + size = to_2tuple(size) + pos = torch.stack(torch.meshgrid(torch.arange(size[0]), torch.arange(size[1]))).flatten(1) + rel_pos = pos[:, None, :] - pos[:, :, None] + rel_pos[0] += size[0] - 1 + rel_pos[1] += size[1] - 1 + return rel_pos # 2, H * W, H * W + + +class LambdaLayer(nn.Module): + """Lambda Layer + + Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention` + - https://arxiv.org/abs/2102.08602 + + NOTE: intra-depth parameter 'u' is fixed at 1. It did not appear worth the complexity to add. + + The internal dimensions of the lambda module are controlled via the interaction of several arguments. + * the output dimension of the module is specified by dim_out, which falls back to input dim if not set + * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim + * the query (q) and key (k) dimension are determined by + * dim_head = (dim_out * attn_ratio // num_heads) if dim_head is None + * q = num_heads * dim_head, k = dim_head + * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not set + + Args: + dim (int): input dimension to the module + dim_out (int): output dimension of the module, same as dim if not set + feat_size (Tuple[int, int]): size of input feature_map for relative pos variant H, W + stride (int): output stride of the module, avg pool used if stride == 2 + num_heads (int): parallel attention heads. + dim_head (int): dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set + r (int): local lambda convolution radius. Use lambda conv if set, else relative pos if not. (default: 9) + qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) + qkv_bias (bool): add bias to q, k, and v projections + """ + def __init__( + self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=16, r=9, + qk_ratio=1.0, qkv_bias=False): + super().__init__() + dim_out = dim_out or dim + assert dim_out % num_heads == 0, ' should be divided by num_heads' + self.dim_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads + self.num_heads = num_heads + self.dim_v = dim_out // num_heads + + self.qkv = nn.Conv2d( + dim, + num_heads * self.dim_qk + self.dim_qk + self.dim_v, + kernel_size=1, bias=qkv_bias) + self.norm_q = nn.BatchNorm2d(num_heads * self.dim_qk) + self.norm_v = nn.BatchNorm2d(self.dim_v) + + if r is not None: + # local lambda convolution for pos + self.conv_lambda = nn.Conv3d(1, self.dim_qk, (r, r, 1), padding=(r // 2, r // 2, 0)) + self.pos_emb = None + self.rel_pos_indices = None + else: + # relative pos embedding + assert feat_size is not None + feat_size = to_2tuple(feat_size) + rel_size = [2 * s - 1 for s in feat_size] + self.conv_lambda = None + self.pos_emb = nn.Parameter(torch.zeros(rel_size[0], rel_size[1], self.dim_qk)) + self.register_buffer('rel_pos_indices', rel_pos_indices(feat_size), persistent=False) + + self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() + + self.reset_parameters() + + def reset_parameters(self): + trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) # fan-in + if self.conv_lambda is not None: + trunc_normal_(self.conv_lambda.weight, std=self.dim_qk ** -0.5) + if self.pos_emb is not None: + trunc_normal_(self.pos_emb, std=.02) + + def forward(self, x): + B, C, H, W = x.shape + M = H * W + qkv = self.qkv(x) + q, k, v = torch.split(qkv, [ + self.num_heads * self.dim_qk, self.dim_qk, self.dim_v], dim=1) + q = self.norm_q(q).reshape(B, self.num_heads, self.dim_qk, M).transpose(-1, -2) # B, num_heads, M, K + v = self.norm_v(v).reshape(B, self.dim_v, M).transpose(-1, -2) # B, M, V + k = F.softmax(k.reshape(B, self.dim_qk, M), dim=-1) # B, K, M + + content_lam = k @ v # B, K, V + content_out = q @ content_lam.unsqueeze(1) # B, num_heads, M, V + + if self.pos_emb is None: + position_lam = self.conv_lambda(v.reshape(B, 1, H, W, self.dim_v)) # B, H, W, V, K + position_lam = position_lam.reshape(B, 1, self.dim_qk, H * W, self.dim_v).transpose(2, 3) # B, 1, M, K, V + else: + # FIXME relative pos embedding path not fully verified + pos_emb = self.pos_emb[self.rel_pos_indices[0], self.rel_pos_indices[1]].expand(B, -1, -1, -1) + position_lam = (pos_emb.transpose(-1, -2) @ v.unsqueeze(1)).unsqueeze(1) # B, 1, M, K, V + position_out = (q.unsqueeze(-2) @ position_lam).squeeze(-2) # B, num_heads, M, V + + out = (content_out + position_out).transpose(-1, -2).reshape(B, C, H, W) # B, C (num_heads * V), H, W + out = self.pool(out) + return out diff --git a/timm/models/layers/linear.py b/timm/models/layers/linear.py new file mode 100644 index 0000000..38fe338 --- /dev/null +++ b/timm/models/layers/linear.py @@ -0,0 +1,19 @@ +""" Linear layer (alternate definition) +""" +import torch +import torch.nn.functional as F +from torch import nn as nn + + +class Linear(nn.Linear): + r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b` + + Wraps torch.nn.Linear to support AMP + torchscript usage by manually casting + weight & bias to input.dtype to work around an issue w/ torch.addmm in this use case. + """ + def forward(self, input: torch.Tensor) -> torch.Tensor: + if torch.jit.is_scripting(): + bias = self.bias.to(dtype=input.dtype) if self.bias is not None else None + return F.linear(input, self.weight.to(dtype=input.dtype), bias=bias) + else: + return F.linear(input, self.weight, self.bias) diff --git a/timm/models/layers/median_pool.py b/timm/models/layers/median_pool.py new file mode 100644 index 0000000..40bd71a --- /dev/null +++ b/timm/models/layers/median_pool.py @@ -0,0 +1,49 @@ +""" Median Pool +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch.nn as nn +import torch.nn.functional as F +from .helpers import to_2tuple, to_4tuple + + +class MedianPool2d(nn.Module): + """ Median pool (usable as median filter when stride=1) module. + + Args: + kernel_size: size of pooling kernel, int or 2-tuple + stride: pool stride, int or 2-tuple + padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad + same: override padding and enforce same padding, boolean + """ + def __init__(self, kernel_size=3, stride=1, padding=0, same=False): + super(MedianPool2d, self).__init__() + self.k = to_2tuple(kernel_size) + self.stride = to_2tuple(stride) + self.padding = to_4tuple(padding) # convert to l, r, t, b + self.same = same + + def _padding(self, x): + if self.same: + ih, iw = x.size()[2:] + if ih % self.stride[0] == 0: + ph = max(self.k[0] - self.stride[0], 0) + else: + ph = max(self.k[0] - (ih % self.stride[0]), 0) + if iw % self.stride[1] == 0: + pw = max(self.k[1] - self.stride[1], 0) + else: + pw = max(self.k[1] - (iw % self.stride[1]), 0) + pl = pw // 2 + pr = pw - pl + pt = ph // 2 + pb = ph - pt + padding = (pl, pr, pt, pb) + else: + padding = self.padding + return padding + + def forward(self, x): + x = F.pad(x, self._padding(x), mode='reflect') + x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1], self.stride[1]) + x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] + return x diff --git a/timm/models/layers/mixed_conv2d.py b/timm/models/layers/mixed_conv2d.py new file mode 100644 index 0000000..fa0ce56 --- /dev/null +++ b/timm/models/layers/mixed_conv2d.py @@ -0,0 +1,51 @@ +""" PyTorch Mixed Convolution + +Paper: MixConv: Mixed Depthwise Convolutional Kernels (https://arxiv.org/abs/1907.09595) + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn + +from .conv2d_same import create_conv2d_pad + + +def _split_channels(num_chan, num_groups): + split = [num_chan // num_groups for _ in range(num_groups)] + split[0] += num_chan - sum(split) + return split + + +class MixedConv2d(nn.ModuleDict): + """ Mixed Grouped Convolution + + Based on MDConv and GroupedConv in MixNet impl: + https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/custom_layers.py + """ + def __init__(self, in_channels, out_channels, kernel_size=3, + stride=1, padding='', dilation=1, depthwise=False, **kwargs): + super(MixedConv2d, self).__init__() + + kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size] + num_groups = len(kernel_size) + in_splits = _split_channels(in_channels, num_groups) + out_splits = _split_channels(out_channels, num_groups) + self.in_channels = sum(in_splits) + self.out_channels = sum(out_splits) + for idx, (k, in_ch, out_ch) in enumerate(zip(kernel_size, in_splits, out_splits)): + conv_groups = in_ch if depthwise else 1 + # use add_module to keep key space clean + self.add_module( + str(idx), + create_conv2d_pad( + in_ch, out_ch, k, stride=stride, + padding=padding, dilation=dilation, groups=conv_groups, **kwargs) + ) + self.splits = in_splits + + def forward(self, x): + x_split = torch.split(x, self.splits, 1) + x_out = [c(x_split[i]) for i, c in enumerate(self.values())] + x = torch.cat(x_out, 1) + return x diff --git a/timm/models/layers/mlp.py b/timm/models/layers/mlp.py new file mode 100644 index 0000000..a85e28d --- /dev/null +++ b/timm/models/layers/mlp.py @@ -0,0 +1,119 @@ +""" MLP module w/ dropout and configurable activation layer + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn + +from .helpers import to_2tuple + + +class Mlp(nn.Module): + """ MLP as used in Vision Transformer, MLP-Mixer and related networks + """ + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + drop_probs = to_2tuple(drop) + + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.drop1 = nn.Dropout(drop_probs[0]) + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop2 = nn.Dropout(drop_probs[1]) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop1(x) + x = self.fc2(x) + x = self.drop2(x) + return x + + +class GluMlp(nn.Module): + """ MLP w/ GLU style gating + See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202 + """ + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.Sigmoid, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + assert hidden_features % 2 == 0 + drop_probs = to_2tuple(drop) + + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.drop1 = nn.Dropout(drop_probs[0]) + self.fc2 = nn.Linear(hidden_features // 2, out_features) + self.drop2 = nn.Dropout(drop_probs[1]) + + def init_weights(self): + # override init of fc1 w/ gate portion set to weight near zero, bias=1 + fc1_mid = self.fc1.bias.shape[0] // 2 + nn.init.ones_(self.fc1.bias[fc1_mid:]) + nn.init.normal_(self.fc1.weight[fc1_mid:], std=1e-6) + + def forward(self, x): + x = self.fc1(x) + x, gates = x.chunk(2, dim=-1) + x = x * self.act(gates) + x = self.drop1(x) + x = self.fc2(x) + x = self.drop2(x) + return x + + +class GatedMlp(nn.Module): + """ MLP as used in gMLP + """ + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, + gate_layer=None, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + drop_probs = to_2tuple(drop) + + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.drop1 = nn.Dropout(drop_probs[0]) + if gate_layer is not None: + assert hidden_features % 2 == 0 + self.gate = gate_layer(hidden_features) + hidden_features = hidden_features // 2 # FIXME base reduction on gate property? + else: + self.gate = nn.Identity() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop2 = nn.Dropout(drop_probs[1]) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop1(x) + x = self.gate(x) + x = self.fc2(x) + x = self.drop2(x) + return x + + +class ConvMlp(nn.Module): + """ MLP using 1x1 convs that keeps spatial dims + """ + def __init__( + self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=True) + self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity() + self.act = act_layer() + self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=True) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.norm(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + return x diff --git a/timm/models/layers/non_local_attn.py b/timm/models/layers/non_local_attn.py new file mode 100644 index 0000000..881fa36 --- /dev/null +++ b/timm/models/layers/non_local_attn.py @@ -0,0 +1,145 @@ +""" Bilinear-Attention-Transform and Non-Local Attention + +Paper: `Non-Local Neural Networks With Grouped Bilinear Attentional Transforms` + - https://openaccess.thecvf.com/content_CVPR_2020/html/Chi_Non-Local_Neural_Networks_With_Grouped_Bilinear_Attentional_Transforms_CVPR_2020_paper.html +Adapted from original code: https://github.com/BA-Transform/BAT-Image-Classification +""" +import torch +from torch import nn +from torch.nn import functional as F + +from .conv_bn_act import ConvBnAct +from .helpers import make_divisible +from .trace_utils import _assert + + +class NonLocalAttn(nn.Module): + """Spatial NL block for image classification. + + This was adapted from https://github.com/BA-Transform/BAT-Image-Classification + Their NonLocal impl inspired by https://github.com/facebookresearch/video-nonlocal-net. + """ + + def __init__(self, in_channels, use_scale=True, rd_ratio=1/8, rd_channels=None, rd_divisor=8, **kwargs): + super(NonLocalAttn, self).__init__() + if rd_channels is None: + rd_channels = make_divisible(in_channels * rd_ratio, divisor=rd_divisor) + self.scale = in_channels ** -0.5 if use_scale else 1.0 + self.t = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) + self.p = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) + self.g = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) + self.z = nn.Conv2d(rd_channels, in_channels, kernel_size=1, stride=1, bias=True) + self.norm = nn.BatchNorm2d(in_channels) + self.reset_parameters() + + def forward(self, x): + shortcut = x + + t = self.t(x) + p = self.p(x) + g = self.g(x) + + B, C, H, W = t.size() + t = t.view(B, C, -1).permute(0, 2, 1) + p = p.view(B, C, -1) + g = g.view(B, C, -1).permute(0, 2, 1) + + att = torch.bmm(t, p) * self.scale + att = F.softmax(att, dim=2) + x = torch.bmm(att, g) + + x = x.permute(0, 2, 1).reshape(B, C, H, W) + x = self.z(x) + x = self.norm(x) + shortcut + + return x + + def reset_parameters(self): + for name, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu') + if len(list(m.parameters())) > 1: + nn.init.constant_(m.bias, 0.0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 0) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.GroupNorm): + nn.init.constant_(m.weight, 0) + nn.init.constant_(m.bias, 0) + + +class BilinearAttnTransform(nn.Module): + + def __init__(self, in_channels, block_size, groups, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): + super(BilinearAttnTransform, self).__init__() + + self.conv1 = ConvBnAct(in_channels, groups, 1, act_layer=act_layer, norm_layer=norm_layer) + self.conv_p = nn.Conv2d(groups, block_size * block_size * groups, kernel_size=(block_size, 1)) + self.conv_q = nn.Conv2d(groups, block_size * block_size * groups, kernel_size=(1, block_size)) + self.conv2 = ConvBnAct(in_channels, in_channels, 1, act_layer=act_layer, norm_layer=norm_layer) + self.block_size = block_size + self.groups = groups + self.in_channels = in_channels + + def resize_mat(self, x, t: int): + B, C, block_size, block_size1 = x.shape + _assert(block_size == block_size1, '') + if t <= 1: + return x + x = x.view(B * C, -1, 1, 1) + x = x * torch.eye(t, t, dtype=x.dtype, device=x.device) + x = x.view(B * C, block_size, block_size, t, t) + x = torch.cat(torch.split(x, 1, dim=1), dim=3) + x = torch.cat(torch.split(x, 1, dim=2), dim=4) + x = x.view(B, C, block_size * t, block_size * t) + return x + + def forward(self, x): + _assert(x.shape[-1] % self.block_size == 0, '') + _assert(x.shape[-2] % self.block_size == 0, '') + B, C, H, W = x.shape + out = self.conv1(x) + rp = F.adaptive_max_pool2d(out, (self.block_size, 1)) + cp = F.adaptive_max_pool2d(out, (1, self.block_size)) + p = self.conv_p(rp).view(B, self.groups, self.block_size, self.block_size).sigmoid() + q = self.conv_q(cp).view(B, self.groups, self.block_size, self.block_size).sigmoid() + p = p / p.sum(dim=3, keepdim=True) + q = q / q.sum(dim=2, keepdim=True) + p = p.view(B, self.groups, 1, self.block_size, self.block_size).expand(x.size( + 0), self.groups, C // self.groups, self.block_size, self.block_size).contiguous() + p = p.view(B, C, self.block_size, self.block_size) + q = q.view(B, self.groups, 1, self.block_size, self.block_size).expand(x.size( + 0), self.groups, C // self.groups, self.block_size, self.block_size).contiguous() + q = q.view(B, C, self.block_size, self.block_size) + p = self.resize_mat(p, H // self.block_size) + q = self.resize_mat(q, W // self.block_size) + y = p.matmul(x) + y = y.matmul(q) + + y = self.conv2(y) + return y + + +class BatNonLocalAttn(nn.Module): + """ BAT + Adapted from: https://github.com/BA-Transform/BAT-Image-Classification + """ + + def __init__( + self, in_channels, block_size=7, groups=2, rd_ratio=0.25, rd_channels=None, rd_divisor=8, + drop_rate=0.2, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, **_): + super().__init__() + if rd_channels is None: + rd_channels = make_divisible(in_channels * rd_ratio, divisor=rd_divisor) + self.conv1 = ConvBnAct(in_channels, rd_channels, 1, act_layer=act_layer, norm_layer=norm_layer) + self.ba = BilinearAttnTransform(rd_channels, block_size, groups, act_layer=act_layer, norm_layer=norm_layer) + self.conv2 = ConvBnAct(rd_channels, in_channels, 1, act_layer=act_layer, norm_layer=norm_layer) + self.dropout = nn.Dropout2d(p=drop_rate) + + def forward(self, x): + xl = self.conv1(x) + y = self.ba(xl) + y = self.conv2(y) + y = self.dropout(y) + return y + x diff --git a/timm/models/layers/norm.py b/timm/models/layers/norm.py new file mode 100644 index 0000000..8529742 --- /dev/null +++ b/timm/models/layers/norm.py @@ -0,0 +1,24 @@ +""" Normalization layers and wrappers +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class GroupNorm(nn.GroupNorm): + def __init__(self, num_channels, num_groups=32, eps=1e-5, affine=True): + # NOTE num_channels is swapped to first arg for consistency in swapping norm layers with BN + super().__init__(num_groups, num_channels, eps=eps, affine=affine) + + def forward(self, x): + return F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) + + +class LayerNorm2d(nn.LayerNorm): + """ LayerNorm for channels of '2D' spatial BCHW tensors """ + def __init__(self, num_channels): + super().__init__(num_channels) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return F.layer_norm( + x.permute(0, 2, 3, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 3, 1, 2) diff --git a/timm/models/layers/norm_act.py b/timm/models/layers/norm_act.py new file mode 100644 index 0000000..2e15181 --- /dev/null +++ b/timm/models/layers/norm_act.py @@ -0,0 +1,85 @@ +""" Normalization + Activation Layers +""" +import torch +from torch import nn as nn +from torch.nn import functional as F + +from .create_act import get_act_layer + + +class BatchNormAct2d(nn.BatchNorm2d): + """BatchNorm + Activation + + This module performs BatchNorm + Activation in a manner that will remain backwards + compatible with weights trained with separate bn, act. This is why we inherit from BN + instead of composing it as a .bn member. + """ + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, + apply_act=True, act_layer=nn.ReLU, inplace=True, drop_block=None): + super(BatchNormAct2d, self).__init__( + num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats) + if isinstance(act_layer, str): + act_layer = get_act_layer(act_layer) + if act_layer is not None and apply_act: + act_args = dict(inplace=True) if inplace else {} + self.act = act_layer(**act_args) + else: + self.act = nn.Identity() + + def _forward_jit(self, x): + """ A cut & paste of the contents of the PyTorch BatchNorm2d forward function + """ + # exponential_average_factor is self.momentum set to + # (when it is available) only so that if gets updated + # in ONNX graph when this node is exported to ONNX. + if self.momentum is None: + exponential_average_factor = 0.0 + else: + exponential_average_factor = self.momentum + + if self.training and self.track_running_stats: + # TODO: if statement only here to tell the jit to skip emitting this when it is None + if self.num_batches_tracked is not None: + self.num_batches_tracked += 1 + if self.momentum is None: # use cumulative moving average + exponential_average_factor = 1.0 / float(self.num_batches_tracked) + else: # use exponential moving average + exponential_average_factor = self.momentum + + x = F.batch_norm( + x, self.running_mean, self.running_var, self.weight, self.bias, + self.training or not self.track_running_stats, + exponential_average_factor, self.eps) + return x + + @torch.jit.ignore + def _forward_python(self, x): + return super(BatchNormAct2d, self).forward(x) + + def forward(self, x): + # FIXME cannot call parent forward() and maintain jit.script compatibility? + if torch.jit.is_scripting(): + x = self._forward_jit(x) + else: + x = self._forward_python(x) + x = self.act(x) + return x + + +class GroupNormAct(nn.GroupNorm): + # NOTE num_channel and num_groups order flipped for easier layer swaps / binding of fixed args + def __init__(self, num_channels, num_groups=32, eps=1e-5, affine=True, + apply_act=True, act_layer=nn.ReLU, inplace=True, drop_block=None): + super(GroupNormAct, self).__init__(num_groups, num_channels, eps=eps, affine=affine) + if isinstance(act_layer, str): + act_layer = get_act_layer(act_layer) + if act_layer is not None and apply_act: + act_args = dict(inplace=True) if inplace else {} + self.act = act_layer(**act_args) + else: + self.act = nn.Identity() + + def forward(self, x): + x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) + x = self.act(x) + return x diff --git a/timm/models/layers/padding.py b/timm/models/layers/padding.py new file mode 100644 index 0000000..34afc37 --- /dev/null +++ b/timm/models/layers/padding.py @@ -0,0 +1,56 @@ +""" Padding Helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math +from typing import List, Tuple + +import torch.nn.functional as F + + +# Calculate symmetric padding for a convolution +def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int: + padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 + return padding + + +# Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution +def get_same_padding(x: int, k: int, s: int, d: int): + return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0) + + +# Can SAME padding for given args be done statically? +def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_): + return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0 + + +# Dynamically pad input x with 'SAME' padding for conv with specified args +def pad_same(x, k: List[int], s: List[int], d: List[int] = (1, 1), value: float = 0): + ih, iw = x.size()[-2:] + pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw, k[1], s[1], d[1]) + if pad_h > 0 or pad_w > 0: + x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2], value=value) + return x + + +def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]: + dynamic = False + if isinstance(padding, str): + # for any string padding, the padding will be calculated for you, one of three ways + padding = padding.lower() + if padding == 'same': + # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact + if is_static_pad(kernel_size, **kwargs): + # static case, no extra overhead + padding = get_padding(kernel_size, **kwargs) + else: + # dynamic 'SAME' padding, has runtime/GPU memory overhead + padding = 0 + dynamic = True + elif padding == 'valid': + # 'VALID' padding, same as padding=0 + padding = 0 + else: + # Default to PyTorch style 'same'-ish symmetric padding + padding = get_padding(kernel_size, **kwargs) + return padding, dynamic diff --git a/timm/models/layers/patch_embed.py b/timm/models/layers/patch_embed.py new file mode 100644 index 0000000..6a7face --- /dev/null +++ b/timm/models/layers/patch_embed.py @@ -0,0 +1,39 @@ +""" Image to Patch Embedding using Conv2d + +A convolution based approach to patchifying a 2D image w/ embedding projection. + +Based on the impl in https://github.com/google-research/vision_transformer + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn + +from .helpers import to_2tuple +from .trace_utils import _assert + + +class PatchEmbed(nn.Module): + """ 2D Image to Patch Embedding + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) + self.num_patches = self.grid_size[0] * self.grid_size[1] + self.flatten = flatten + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() + + def forward(self, x): + B, C, H, W = x.shape + _assert(H == self.img_size[0], f"Input image height ({H}) doesn't match model ({self.img_size[0]}).") + _assert(W == self.img_size[1], f"Input image width ({W}) doesn't match model ({self.img_size[1]}).") + x = self.proj(x) + if self.flatten: + x = x.flatten(2).transpose(1, 2) # BCHW -> BNC + x = self.norm(x) + return x diff --git a/timm/models/layers/pool2d_same.py b/timm/models/layers/pool2d_same.py new file mode 100644 index 0000000..4c2a1c4 --- /dev/null +++ b/timm/models/layers/pool2d_same.py @@ -0,0 +1,73 @@ +""" AvgPool2d w/ Same Padding + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import List, Tuple, Optional + +from .helpers import to_2tuple +from .padding import pad_same, get_padding_value + + +def avg_pool2d_same(x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0), + ceil_mode: bool = False, count_include_pad: bool = True): + # FIXME how to deal with count_include_pad vs not for external padding? + x = pad_same(x, kernel_size, stride) + return F.avg_pool2d(x, kernel_size, stride, (0, 0), ceil_mode, count_include_pad) + + +class AvgPool2dSame(nn.AvgPool2d): + """ Tensorflow like 'SAME' wrapper for 2D average pooling + """ + def __init__(self, kernel_size: int, stride=None, padding=0, ceil_mode=False, count_include_pad=True): + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + super(AvgPool2dSame, self).__init__(kernel_size, stride, (0, 0), ceil_mode, count_include_pad) + + def forward(self, x): + x = pad_same(x, self.kernel_size, self.stride) + return F.avg_pool2d( + x, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad) + + +def max_pool2d_same( + x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0), + dilation: List[int] = (1, 1), ceil_mode: bool = False): + x = pad_same(x, kernel_size, stride, value=-float('inf')) + return F.max_pool2d(x, kernel_size, stride, (0, 0), dilation, ceil_mode) + + +class MaxPool2dSame(nn.MaxPool2d): + """ Tensorflow like 'SAME' wrapper for 2D max pooling + """ + def __init__(self, kernel_size: int, stride=None, padding=0, dilation=1, ceil_mode=False): + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + super(MaxPool2dSame, self).__init__(kernel_size, stride, (0, 0), dilation, ceil_mode) + + def forward(self, x): + x = pad_same(x, self.kernel_size, self.stride, value=-float('inf')) + return F.max_pool2d(x, self.kernel_size, self.stride, (0, 0), self.dilation, self.ceil_mode) + + +def create_pool2d(pool_type, kernel_size, stride=None, **kwargs): + stride = stride or kernel_size + padding = kwargs.pop('padding', '') + padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, **kwargs) + if is_dynamic: + if pool_type == 'avg': + return AvgPool2dSame(kernel_size, stride=stride, **kwargs) + elif pool_type == 'max': + return MaxPool2dSame(kernel_size, stride=stride, **kwargs) + else: + assert False, f'Unsupported pool type {pool_type}' + else: + if pool_type == 'avg': + return nn.AvgPool2d(kernel_size, stride=stride, padding=padding, **kwargs) + elif pool_type == 'max': + return nn.MaxPool2d(kernel_size, stride=stride, padding=padding, **kwargs) + else: + assert False, f'Unsupported pool type {pool_type}' diff --git a/timm/models/layers/selective_kernel.py b/timm/models/layers/selective_kernel.py new file mode 100644 index 0000000..1aeb929 --- /dev/null +++ b/timm/models/layers/selective_kernel.py @@ -0,0 +1,120 @@ +""" Selective Kernel Convolution/Attention + +Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586) + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +from torch import nn as nn + +from .conv_bn_act import ConvBnAct +from .helpers import make_divisible +from .trace_utils import _assert + + +def _kernel_valid(k): + if isinstance(k, (list, tuple)): + for ki in k: + return _kernel_valid(ki) + assert k >= 3 and k % 2 + + +class SelectiveKernelAttn(nn.Module): + def __init__(self, channels, num_paths=2, attn_channels=32, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): + """ Selective Kernel Attention Module + + Selective Kernel attention mechanism factored out into its own module. + + """ + super(SelectiveKernelAttn, self).__init__() + self.num_paths = num_paths + self.fc_reduce = nn.Conv2d(channels, attn_channels, kernel_size=1, bias=False) + self.bn = norm_layer(attn_channels) + self.act = act_layer(inplace=True) + self.fc_select = nn.Conv2d(attn_channels, channels * num_paths, kernel_size=1, bias=False) + + def forward(self, x): + _assert(x.shape[1] == self.num_paths, '') + x = x.sum(1).mean((2, 3), keepdim=True) + x = self.fc_reduce(x) + x = self.bn(x) + x = self.act(x) + x = self.fc_select(x) + B, C, H, W = x.shape + x = x.view(B, self.num_paths, C // self.num_paths, H, W) + x = torch.softmax(x, dim=1) + return x + + +class SelectiveKernel(nn.Module): + + def __init__(self, in_channels, out_channels=None, kernel_size=None, stride=1, dilation=1, groups=1, + rd_ratio=1./16, rd_channels=None, rd_divisor=8, keep_3x3=True, split_input=True, + drop_block=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None): + """ Selective Kernel Convolution Module + + As described in Selective Kernel Networks (https://arxiv.org/abs/1903.06586) with some modifications. + + Largest change is the input split, which divides the input channels across each convolution path, this can + be viewed as a grouping of sorts, but the output channel counts expand to the module level value. This keeps + the parameter count from ballooning when the convolutions themselves don't have groups, but still provides + a noteworthy increase in performance over similar param count models without this attention layer. -Ross W + + Args: + in_channels (int): module input (feature) channel count + out_channels (int): module output (feature) channel count + kernel_size (int, list): kernel size for each convolution branch + stride (int): stride for convolutions + dilation (int): dilation for module as a whole, impacts dilation of each branch + groups (int): number of groups for each branch + rd_ratio (int, float): reduction factor for attention features + keep_3x3 (bool): keep all branch convolution kernels as 3x3, changing larger kernels for dilations + split_input (bool): split input channels evenly across each convolution branch, keeps param count lower, + can be viewed as grouping by path, output expands to module out_channels count + drop_block (nn.Module): drop block module + act_layer (nn.Module): activation layer to use + norm_layer (nn.Module): batchnorm/norm layer to use + """ + super(SelectiveKernel, self).__init__() + out_channels = out_channels or in_channels + kernel_size = kernel_size or [3, 5] # default to one 3x3 and one 5x5 branch. 5x5 -> 3x3 + dilation + _kernel_valid(kernel_size) + if not isinstance(kernel_size, list): + kernel_size = [kernel_size] * 2 + if keep_3x3: + dilation = [dilation * (k - 1) // 2 for k in kernel_size] + kernel_size = [3] * len(kernel_size) + else: + dilation = [dilation] * len(kernel_size) + self.num_paths = len(kernel_size) + self.in_channels = in_channels + self.out_channels = out_channels + self.split_input = split_input + if self.split_input: + assert in_channels % self.num_paths == 0 + in_channels = in_channels // self.num_paths + groups = min(out_channels, groups) + + conv_kwargs = dict( + stride=stride, groups=groups, drop_block=drop_block, act_layer=act_layer, norm_layer=norm_layer, + aa_layer=aa_layer) + self.paths = nn.ModuleList([ + ConvBnAct(in_channels, out_channels, kernel_size=k, dilation=d, **conv_kwargs) + for k, d in zip(kernel_size, dilation)]) + + attn_channels = rd_channels or make_divisible(out_channels * rd_ratio, divisor=rd_divisor) + self.attn = SelectiveKernelAttn(out_channels, self.num_paths, attn_channels) + self.drop_block = drop_block + + def forward(self, x): + if self.split_input: + x_split = torch.split(x, self.in_channels // self.num_paths, 1) + x_paths = [op(x_split[i]) for i, op in enumerate(self.paths)] + else: + x_paths = [op(x) for op in self.paths] + x = torch.stack(x_paths, dim=1) + x_attn = self.attn(x) + x = x * x_attn + x = torch.sum(x, dim=1) + return x diff --git a/timm/models/layers/separable_conv.py b/timm/models/layers/separable_conv.py new file mode 100644 index 0000000..1ddcb4e --- /dev/null +++ b/timm/models/layers/separable_conv.py @@ -0,0 +1,73 @@ +""" Depthwise Separable Conv Modules + +Basic DWS convs. Other variations of DWS exist with batch norm or activations between the +DW and PW convs such as the Depthwise modules in MobileNetV2 / EfficientNet and Xception. + +Hacked together by / Copyright 2020 Ross Wightman +""" +from torch import nn as nn + +from .create_conv2d import create_conv2d +from .create_norm_act import convert_norm_act + + +class SeparableConvBnAct(nn.Module): + """ Separable Conv w/ trailing Norm and Activation + """ + def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, + channel_multiplier=1.0, pw_kernel_size=1, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, + apply_act=True, drop_block=None): + super(SeparableConvBnAct, self).__init__() + + self.conv_dw = create_conv2d( + in_channels, int(in_channels * channel_multiplier), kernel_size, + stride=stride, dilation=dilation, padding=padding, depthwise=True) + + self.conv_pw = create_conv2d( + int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) + + norm_act_layer = convert_norm_act(norm_layer, act_layer) + self.bn = norm_act_layer(out_channels, apply_act=apply_act, drop_block=drop_block) + + @property + def in_channels(self): + return self.conv_dw.in_channels + + @property + def out_channels(self): + return self.conv_pw.out_channels + + def forward(self, x): + x = self.conv_dw(x) + x = self.conv_pw(x) + if self.bn is not None: + x = self.bn(x) + return x + + +class SeparableConv2d(nn.Module): + """ Separable Conv + """ + def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, + channel_multiplier=1.0, pw_kernel_size=1): + super(SeparableConv2d, self).__init__() + + self.conv_dw = create_conv2d( + in_channels, int(in_channels * channel_multiplier), kernel_size, + stride=stride, dilation=dilation, padding=padding, depthwise=True) + + self.conv_pw = create_conv2d( + int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) + + @property + def in_channels(self): + return self.conv_dw.in_channels + + @property + def out_channels(self): + return self.conv_pw.out_channels + + def forward(self, x): + x = self.conv_dw(x) + x = self.conv_pw(x) + return x diff --git a/timm/models/layers/space_to_depth.py b/timm/models/layers/space_to_depth.py new file mode 100644 index 0000000..a7e8e0b --- /dev/null +++ b/timm/models/layers/space_to_depth.py @@ -0,0 +1,53 @@ +import torch +import torch.nn as nn + + +class SpaceToDepth(nn.Module): + def __init__(self, block_size=4): + super().__init__() + assert block_size == 4 + self.bs = block_size + + def forward(self, x): + N, C, H, W = x.size() + x = x.view(N, C, H // self.bs, self.bs, W // self.bs, self.bs) # (N, C, H//bs, bs, W//bs, bs) + x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs) + x = x.view(N, C * (self.bs ** 2), H // self.bs, W // self.bs) # (N, C*bs^2, H//bs, W//bs) + return x + + +@torch.jit.script +class SpaceToDepthJit(object): + def __call__(self, x: torch.Tensor): + # assuming hard-coded that block_size==4 for acceleration + N, C, H, W = x.size() + x = x.view(N, C, H // 4, 4, W // 4, 4) # (N, C, H//bs, bs, W//bs, bs) + x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs) + x = x.view(N, C * 16, H // 4, W // 4) # (N, C*bs^2, H//bs, W//bs) + return x + + +class SpaceToDepthModule(nn.Module): + def __init__(self, no_jit=False): + super().__init__() + if not no_jit: + self.op = SpaceToDepthJit() + else: + self.op = SpaceToDepth() + + def forward(self, x): + return self.op(x) + + +class DepthToSpace(nn.Module): + + def __init__(self, block_size): + super().__init__() + self.bs = block_size + + def forward(self, x): + N, C, H, W = x.size() + x = x.view(N, self.bs, self.bs, C // (self.bs ** 2), H, W) # (N, bs, bs, C//bs^2, H, W) + x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # (N, C//bs^2, H, bs, W, bs) + x = x.view(N, C // (self.bs ** 2), H * self.bs, W * self.bs) # (N, C//bs^2, H * bs, W * bs) + return x diff --git a/timm/models/layers/split_attn.py b/timm/models/layers/split_attn.py new file mode 100644 index 0000000..dde601b --- /dev/null +++ b/timm/models/layers/split_attn.py @@ -0,0 +1,85 @@ +""" Split Attention Conv2d (for ResNeSt Models) + +Paper: `ResNeSt: Split-Attention Networks` - /https://arxiv.org/abs/2004.08955 + +Adapted from original PyTorch impl at https://github.com/zhanghang1989/ResNeSt + +Modified for torchscript compat, performance, and consistency with timm by Ross Wightman +""" +import torch +import torch.nn.functional as F +from torch import nn + +from .helpers import make_divisible + + +class RadixSoftmax(nn.Module): + def __init__(self, radix, cardinality): + super(RadixSoftmax, self).__init__() + self.radix = radix + self.cardinality = cardinality + + def forward(self, x): + batch = x.size(0) + if self.radix > 1: + x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2) + x = F.softmax(x, dim=1) + x = x.reshape(batch, -1) + else: + x = torch.sigmoid(x) + return x + + +class SplitAttn(nn.Module): + """Split-Attention (aka Splat) + """ + def __init__(self, in_channels, out_channels=None, kernel_size=3, stride=1, padding=None, + dilation=1, groups=1, bias=False, radix=2, rd_ratio=0.25, rd_channels=None, rd_divisor=8, + act_layer=nn.ReLU, norm_layer=None, drop_block=None, **kwargs): + super(SplitAttn, self).__init__() + out_channels = out_channels or in_channels + self.radix = radix + self.drop_block = drop_block + mid_chs = out_channels * radix + if rd_channels is None: + attn_chs = make_divisible(in_channels * radix * rd_ratio, min_value=32, divisor=rd_divisor) + else: + attn_chs = rd_channels * radix + + padding = kernel_size // 2 if padding is None else padding + self.conv = nn.Conv2d( + in_channels, mid_chs, kernel_size, stride, padding, dilation, + groups=groups * radix, bias=bias, **kwargs) + self.bn0 = norm_layer(mid_chs) if norm_layer else nn.Identity() + self.act0 = act_layer(inplace=True) + self.fc1 = nn.Conv2d(out_channels, attn_chs, 1, groups=groups) + self.bn1 = norm_layer(attn_chs) if norm_layer else nn.Identity() + self.act1 = act_layer(inplace=True) + self.fc2 = nn.Conv2d(attn_chs, mid_chs, 1, groups=groups) + self.rsoftmax = RadixSoftmax(radix, groups) + + def forward(self, x): + x = self.conv(x) + x = self.bn0(x) + if self.drop_block is not None: + x = self.drop_block(x) + x = self.act0(x) + + B, RC, H, W = x.shape + if self.radix > 1: + x = x.reshape((B, self.radix, RC // self.radix, H, W)) + x_gap = x.sum(dim=1) + else: + x_gap = x + x_gap = x_gap.mean((2, 3), keepdim=True) + x_gap = self.fc1(x_gap) + x_gap = self.bn1(x_gap) + x_gap = self.act1(x_gap) + x_attn = self.fc2(x_gap) + + x_attn = self.rsoftmax(x_attn).view(B, -1, 1, 1) + if self.radix > 1: + out = (x * x_attn.reshape((B, self.radix, RC // self.radix, 1, 1))).sum(dim=1) + else: + out = x * x_attn + return out.contiguous() diff --git a/timm/models/layers/split_batchnorm.py b/timm/models/layers/split_batchnorm.py new file mode 100644 index 0000000..830781b --- /dev/null +++ b/timm/models/layers/split_batchnorm.py @@ -0,0 +1,75 @@ +""" Split BatchNorm + +A PyTorch BatchNorm layer that splits input batch into N equal parts and passes each through +a separate BN layer. The first split is passed through the parent BN layers with weight/bias +keys the same as the original BN. All other splits pass through BN sub-layers under the '.aux_bn' +namespace. + +This allows easily removing the auxiliary BN layers after training to efficiently +achieve the 'Auxiliary BatchNorm' as described in the AdvProp Paper, section 4.2, +'Disentangled Learning via An Auxiliary BN' + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn + + +class SplitBatchNorm2d(torch.nn.BatchNorm2d): + + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, + track_running_stats=True, num_splits=2): + super().__init__(num_features, eps, momentum, affine, track_running_stats) + assert num_splits > 1, 'Should have at least one aux BN layer (num_splits at least 2)' + self.num_splits = num_splits + self.aux_bn = nn.ModuleList([ + nn.BatchNorm2d(num_features, eps, momentum, affine, track_running_stats) for _ in range(num_splits - 1)]) + + def forward(self, input: torch.Tensor): + if self.training: # aux BN only relevant while training + split_size = input.shape[0] // self.num_splits + assert input.shape[0] == split_size * self.num_splits, "batch size must be evenly divisible by num_splits" + split_input = input.split(split_size) + x = [super().forward(split_input[0])] + for i, a in enumerate(self.aux_bn): + x.append(a(split_input[i + 1])) + return torch.cat(x, dim=0) + else: + return super().forward(input) + + +def convert_splitbn_model(module, num_splits=2): + """ + Recursively traverse module and its children to replace all instances of + ``torch.nn.modules.batchnorm._BatchNorm`` with `SplitBatchnorm2d`. + Args: + module (torch.nn.Module): input module + num_splits: number of separate batchnorm layers to split input across + Example:: + >>> # model is an instance of torch.nn.Module + >>> model = timm.models.convert_splitbn_model(model, num_splits=2) + """ + mod = module + if isinstance(module, torch.nn.modules.instancenorm._InstanceNorm): + return module + if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): + mod = SplitBatchNorm2d( + module.num_features, module.eps, module.momentum, module.affine, + module.track_running_stats, num_splits=num_splits) + mod.running_mean = module.running_mean + mod.running_var = module.running_var + mod.num_batches_tracked = module.num_batches_tracked + if module.affine: + mod.weight.data = module.weight.data.clone().detach() + mod.bias.data = module.bias.data.clone().detach() + for aux in mod.aux_bn: + aux.running_mean = module.running_mean.clone() + aux.running_var = module.running_var.clone() + aux.num_batches_tracked = module.num_batches_tracked.clone() + if module.affine: + aux.weight.data = module.weight.data.clone().detach() + aux.bias.data = module.bias.data.clone().detach() + for name, child in module.named_children(): + mod.add_module(name, convert_splitbn_model(child, num_splits=num_splits)) + del module + return mod diff --git a/timm/models/layers/squeeze_excite.py b/timm/models/layers/squeeze_excite.py new file mode 100644 index 0000000..e5da29e --- /dev/null +++ b/timm/models/layers/squeeze_excite.py @@ -0,0 +1,74 @@ +""" Squeeze-and-Excitation Channel Attention + +An SE implementation originally based on PyTorch SE-Net impl. +Has since evolved with additional functionality / configuration. + +Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507 + +Also included is Effective Squeeze-Excitation (ESE). +Paper: `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 + +Hacked together by / Copyright 2021 Ross Wightman +""" +from torch import nn as nn + +from .create_act import create_act_layer +from .helpers import make_divisible + + +class SEModule(nn.Module): + """ SE Module as defined in original SE-Nets with a few additions + Additions include: + * divisor can be specified to keep channels % div == 0 (default: 8) + * reduction channels can be specified directly by arg (if rd_channels is set) + * reduction channels can be specified by float rd_ratio (default: 1/16) + * global max pooling can be added to the squeeze aggregation + * customizable activation, normalization, and gate layer + """ + def __init__( + self, channels, rd_ratio=1. / 16, rd_channels=None, rd_divisor=8, add_maxpool=False, + act_layer=nn.ReLU, norm_layer=None, gate_layer='sigmoid'): + super(SEModule, self).__init__() + self.add_maxpool = add_maxpool + if not rd_channels: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + self.fc1 = nn.Conv2d(channels, rd_channels, kernel_size=1, bias=True) + self.bn = norm_layer(rd_channels) if norm_layer else nn.Identity() + self.act = create_act_layer(act_layer, inplace=True) + self.fc2 = nn.Conv2d(rd_channels, channels, kernel_size=1, bias=True) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_se = x.mean((2, 3), keepdim=True) + if self.add_maxpool: + # experimental codepath, may remove or change + x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True) + x_se = self.fc1(x_se) + x_se = self.act(self.bn(x_se)) + x_se = self.fc2(x_se) + return x * self.gate(x_se) + + +SqueezeExcite = SEModule # alias + + +class EffectiveSEModule(nn.Module): + """ 'Effective Squeeze-Excitation + From `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 + """ + def __init__(self, channels, add_maxpool=False, gate_layer='hard_sigmoid', **_): + super(EffectiveSEModule, self).__init__() + self.add_maxpool = add_maxpool + self.fc = nn.Conv2d(channels, channels, kernel_size=1, padding=0) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_se = x.mean((2, 3), keepdim=True) + if self.add_maxpool: + # experimental codepath, may remove or change + x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True) + x_se = self.fc(x_se) + return x * self.gate(x_se) + + +EffectiveSqueezeExcite = EffectiveSEModule # alias diff --git a/timm/models/layers/std_conv.py b/timm/models/layers/std_conv.py new file mode 100644 index 0000000..d896ba5 --- /dev/null +++ b/timm/models/layers/std_conv.py @@ -0,0 +1,133 @@ +""" Convolution with Weight Standardization (StdConv and ScaledStdConv) + +StdConv: +@article{weightstandardization, + author = {Siyuan Qiao and Huiyu Wang and Chenxi Liu and Wei Shen and Alan Yuille}, + title = {Weight Standardization}, + journal = {arXiv preprint arXiv:1903.10520}, + year = {2019}, +} +Code: https://github.com/joe-siyuan-qiao/WeightStandardization + +ScaledStdConv: +Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 +Official Deepmind JAX code: https://github.com/deepmind/deepmind-research/tree/master/nfnets + +Hacked together by / copyright Ross Wightman, 2021. +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .padding import get_padding, get_padding_value, pad_same + + +class StdConv2d(nn.Conv2d): + """Conv2d with Weight Standardization. Used for BiT ResNet-V2 models. + + Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` - + https://arxiv.org/abs/1903.10520v2 + """ + def __init__( + self, in_channel, out_channels, kernel_size, stride=1, padding=None, + dilation=1, groups=1, bias=False, eps=1e-6): + if padding is None: + padding = get_padding(kernel_size, stride, dilation) + super().__init__( + in_channel, out_channels, kernel_size, stride=stride, + padding=padding, dilation=dilation, groups=groups, bias=bias) + self.eps = eps + + def forward(self, x): + weight = F.batch_norm( + self.weight.reshape(1, self.out_channels, -1), None, None, + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + x = F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + return x + + +class StdConv2dSame(nn.Conv2d): + """Conv2d with Weight Standardization. TF compatible SAME padding. Used for ViT Hybrid model. + + Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` - + https://arxiv.org/abs/1903.10520v2 + """ + def __init__( + self, in_channel, out_channels, kernel_size, stride=1, padding='SAME', + dilation=1, groups=1, bias=False, eps=1e-6): + padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) + super().__init__( + in_channel, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, + groups=groups, bias=bias) + self.same_pad = is_dynamic + self.eps = eps + + def forward(self, x): + if self.same_pad: + x = pad_same(x, self.kernel_size, self.stride, self.dilation) + weight = F.batch_norm( + self.weight.reshape(1, self.out_channels, -1), None, None, + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + x = F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + return x + + +class ScaledStdConv2d(nn.Conv2d): + """Conv2d layer with Scaled Weight Standardization. + + Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - + https://arxiv.org/abs/2101.08692 + + NOTE: the operations used in this impl differ slightly from the DeepMind Haiku impl. The impact is minor. + """ + + def __init__( + self, in_channels, out_channels, kernel_size, stride=1, padding=None, + dilation=1, groups=1, bias=True, gamma=1.0, eps=1e-6, gain_init=1.0): + if padding is None: + padding = get_padding(kernel_size, stride, dilation) + super().__init__( + in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, + groups=groups, bias=bias) + self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init)) + self.scale = gamma * self.weight[0].numel() ** -0.5 # gamma * 1 / sqrt(fan-in) + self.eps = eps + + def forward(self, x): + weight = F.batch_norm( + self.weight.reshape(1, self.out_channels, -1), None, None, + weight=(self.gain * self.scale).view(-1), + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + + +class ScaledStdConv2dSame(nn.Conv2d): + """Conv2d layer with Scaled Weight Standardization and Tensorflow-like SAME padding support + + Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - + https://arxiv.org/abs/2101.08692 + + NOTE: the operations used in this impl differ slightly from the DeepMind Haiku impl. The impact is minor. + """ + + def __init__( + self, in_channels, out_channels, kernel_size, stride=1, padding='SAME', + dilation=1, groups=1, bias=True, gamma=1.0, eps=1e-6, gain_init=1.0): + padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) + super().__init__( + in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, + groups=groups, bias=bias) + self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init)) + self.scale = gamma * self.weight[0].numel() ** -0.5 + self.same_pad = is_dynamic + self.eps = eps + + def forward(self, x): + if self.same_pad: + x = pad_same(x, self.kernel_size, self.stride, self.dilation) + weight = F.batch_norm( + self.weight.reshape(1, self.out_channels, -1), None, None, + weight=(self.gain * self.scale).view(-1), + training=True, momentum=0., eps=self.eps).reshape_as(self.weight) + return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) diff --git a/timm/models/layers/test_time_pool.py b/timm/models/layers/test_time_pool.py new file mode 100644 index 0000000..98c0bf5 --- /dev/null +++ b/timm/models/layers/test_time_pool.py @@ -0,0 +1,52 @@ +""" Test Time Pooling (Average-Max Pool) + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import logging +from torch import nn +import torch.nn.functional as F + +from .adaptive_avgmax_pool import adaptive_avgmax_pool2d + + +_logger = logging.getLogger(__name__) + + +class TestTimePoolHead(nn.Module): + def __init__(self, base, original_pool=7): + super(TestTimePoolHead, self).__init__() + self.base = base + self.original_pool = original_pool + base_fc = self.base.get_classifier() + if isinstance(base_fc, nn.Conv2d): + self.fc = base_fc + else: + self.fc = nn.Conv2d( + self.base.num_features, self.base.num_classes, kernel_size=1, bias=True) + self.fc.weight.data.copy_(base_fc.weight.data.view(self.fc.weight.size())) + self.fc.bias.data.copy_(base_fc.bias.data.view(self.fc.bias.size())) + self.base.reset_classifier(0) # delete original fc layer + + def forward(self, x): + x = self.base.forward_features(x) + x = F.avg_pool2d(x, kernel_size=self.original_pool, stride=1) + x = self.fc(x) + x = adaptive_avgmax_pool2d(x, 1) + return x.view(x.size(0), -1) + + +def apply_test_time_pool(model, config, use_test_size=True): + test_time_pool = False + if not hasattr(model, 'default_cfg') or not model.default_cfg: + return model, False + if use_test_size and 'test_input_size' in model.default_cfg: + df_input_size = model.default_cfg['test_input_size'] + else: + df_input_size = model.default_cfg['input_size'] + if config['input_size'][-1] > df_input_size[-1] and config['input_size'][-2] > df_input_size[-2]: + _logger.info('Target input size %s > pretrained default %s, using test time pooling' % + (str(config['input_size'][-2:]), str(df_input_size[-2:]))) + model = TestTimePoolHead(model, original_pool=model.default_cfg['pool_size']) + test_time_pool = True + return model, test_time_pool diff --git a/timm/models/layers/trace_utils.py b/timm/models/layers/trace_utils.py new file mode 100644 index 0000000..8397072 --- /dev/null +++ b/timm/models/layers/trace_utils.py @@ -0,0 +1,13 @@ +try: + from torch import _assert +except ImportError: + def _assert(condition: bool, message: str): + assert condition, message + + +def _float_to_int(x: float) -> int: + """ + Symbolic tracing helper to substitute for inbuilt `int`. + Hint: Inbuilt `int` can't accept an argument of type `Proxy` + """ + return int(x) diff --git a/timm/models/layers/weight_init.py b/timm/models/layers/weight_init.py new file mode 100644 index 0000000..305a2fd --- /dev/null +++ b/timm/models/layers/weight_init.py @@ -0,0 +1,89 @@ +import torch +import math +import warnings + +from torch.nn.init import _calculate_fan_in_and_fan_out + + +def _no_grad_trunc_normal_(tensor, mean, std, a, b): + # Cut & paste from PyTorch official master until it's in a few official releases - RW + # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1. + math.erf(x / math.sqrt(2.))) / 2. + + if (mean < a - 2 * std) or (mean > b + 2 * std): + warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " + "The distribution of values may be incorrect.", + stacklevel=2) + + with torch.no_grad(): + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + l = norm_cdf((a - mean) / std) + u = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [l, u], then translate to + # [2l-1, 2u-1]. + tensor.uniform_(2 * l - 1, 2 * u - 1) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.mul_(std * math.sqrt(2.)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clamp_(min=a, max=b) + return tensor + + +def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): + # type: (Tensor, float, float, float, float) -> Tensor + r"""Fills the input Tensor with values drawn from a truncated + normal distribution. The values are effectively drawn from the + normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` + with values outside :math:`[a, b]` redrawn until they are within + the bounds. The method used for generating the random values works + best when :math:`a \leq \text{mean} \leq b`. + Args: + tensor: an n-dimensional `torch.Tensor` + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + a: the minimum cutoff value + b: the maximum cutoff value + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.trunc_normal_(w) + """ + return _no_grad_trunc_normal_(tensor, mean, std, a, b) + + +def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'): + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) + if mode == 'fan_in': + denom = fan_in + elif mode == 'fan_out': + denom = fan_out + elif mode == 'fan_avg': + denom = (fan_in + fan_out) / 2 + + variance = scale / denom + + if distribution == "truncated_normal": + # constant is stddev of standard normal truncated to (-2, 2) + trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978) + elif distribution == "normal": + tensor.normal_(std=math.sqrt(variance)) + elif distribution == "uniform": + bound = math.sqrt(3 * variance) + tensor.uniform_(-bound, bound) + else: + raise ValueError(f"invalid distribution {distribution}") + + +def lecun_normal_(tensor): + variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal') diff --git a/timm/models/levit.py b/timm/models/levit.py new file mode 100644 index 0000000..9987e4b --- /dev/null +++ b/timm/models/levit.py @@ -0,0 +1,563 @@ +""" LeViT + +Paper: `LeViT: a Vision Transformer in ConvNet's Clothing for Faster Inference` + - https://arxiv.org/abs/2104.01136 + +@article{graham2021levit, + title={LeViT: a Vision Transformer in ConvNet's Clothing for Faster Inference}, + author={Benjamin Graham and Alaaeldin El-Nouby and Hugo Touvron and Pierre Stock and Armand Joulin and Herv\'e J\'egou and Matthijs Douze}, + journal={arXiv preprint arXiv:22104.01136}, + year={2021} +} + +Adapted from official impl at https://github.com/facebookresearch/LeViT, original copyright bellow. + +This version combines both conv/linear models and fixes torchscript compatibility. + +Modifications by/coyright Copyright 2021 Ross Wightman +""" + +# Copyright (c) 2015-present, Facebook, Inc. +# All rights reserved. + +# Modified from +# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py +# Copyright 2020 Ross Wightman, Apache-2.0 License +import itertools +from copy import deepcopy +from functools import partial +from typing import Dict + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN +from .helpers import build_model_with_cfg, overlay_external_default_cfg +from .layers import to_ntuple, get_act_layer +from .vision_transformer import trunc_normal_ +from .registry import register_model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.0.c', 'classifier': ('head.l', 'head_dist.l'), + **kwargs + } + + +default_cfgs = dict( + levit_128s=_cfg( + url='https://dl.fbaipublicfiles.com/LeViT/LeViT-128S-96703c44.pth' + ), + levit_128=_cfg( + url='https://dl.fbaipublicfiles.com/LeViT/LeViT-128-b88c2750.pth' + ), + levit_192=_cfg( + url='https://dl.fbaipublicfiles.com/LeViT/LeViT-192-92712e41.pth' + ), + levit_256=_cfg( + url='https://dl.fbaipublicfiles.com/LeViT/LeViT-256-13b5763e.pth' + ), + levit_384=_cfg( + url='https://dl.fbaipublicfiles.com/LeViT/LeViT-384-9bdaf2e2.pth' + ), +) + +model_cfgs = dict( + levit_128s=dict( + embed_dim=(128, 256, 384), key_dim=16, num_heads=(4, 6, 8), depth=(2, 3, 4)), + levit_128=dict( + embed_dim=(128, 256, 384), key_dim=16, num_heads=(4, 8, 12), depth=(4, 4, 4)), + levit_192=dict( + embed_dim=(192, 288, 384), key_dim=32, num_heads=(3, 5, 6), depth=(4, 4, 4)), + levit_256=dict( + embed_dim=(256, 384, 512), key_dim=32, num_heads=(4, 6, 8), depth=(4, 4, 4)), + levit_384=dict( + embed_dim=(384, 512, 768), key_dim=32, num_heads=(6, 9, 12), depth=(4, 4, 4)), +) + +__all__ = ['Levit'] + + +@register_model +def levit_128s(pretrained=False, use_conv=False, **kwargs): + return create_levit( + 'levit_128s', pretrained=pretrained, use_conv=use_conv, **kwargs) + + +@register_model +def levit_128(pretrained=False, use_conv=False, **kwargs): + return create_levit( + 'levit_128', pretrained=pretrained, use_conv=use_conv, **kwargs) + + +@register_model +def levit_192(pretrained=False, use_conv=False, **kwargs): + return create_levit( + 'levit_192', pretrained=pretrained, use_conv=use_conv, **kwargs) + + +@register_model +def levit_256(pretrained=False, use_conv=False, **kwargs): + return create_levit( + 'levit_256', pretrained=pretrained, use_conv=use_conv, **kwargs) + + +@register_model +def levit_384(pretrained=False, use_conv=False, **kwargs): + return create_levit( + 'levit_384', pretrained=pretrained, use_conv=use_conv, **kwargs) + + +class ConvNorm(nn.Sequential): + def __init__( + self, a, b, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1, resolution=-10000): + super().__init__() + self.add_module('c', nn.Conv2d(a, b, ks, stride, pad, dilation, groups, bias=False)) + bn = nn.BatchNorm2d(b) + nn.init.constant_(bn.weight, bn_weight_init) + nn.init.constant_(bn.bias, 0) + self.add_module('bn', bn) + + @torch.no_grad() + def fuse(self): + c, bn = self._modules.values() + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = c.weight * w[:, None, None, None] + b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 + m = nn.Conv2d( + w.size(1), w.size(0), w.shape[2:], stride=self.c.stride, + padding=self.c.padding, dilation=self.c.dilation, groups=self.c.groups) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + +class LinearNorm(nn.Sequential): + def __init__(self, a, b, bn_weight_init=1, resolution=-100000): + super().__init__() + self.add_module('c', nn.Linear(a, b, bias=False)) + bn = nn.BatchNorm1d(b) + nn.init.constant_(bn.weight, bn_weight_init) + nn.init.constant_(bn.bias, 0) + self.add_module('bn', bn) + + @torch.no_grad() + def fuse(self): + l, bn = self._modules.values() + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = l.weight * w[:, None] + b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 + m = nn.Linear(w.size(1), w.size(0)) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + def forward(self, x): + x = self.c(x) + return self.bn(x.flatten(0, 1)).reshape_as(x) + + +class NormLinear(nn.Sequential): + def __init__(self, a, b, bias=True, std=0.02): + super().__init__() + self.add_module('bn', nn.BatchNorm1d(a)) + l = nn.Linear(a, b, bias=bias) + trunc_normal_(l.weight, std=std) + if bias: + nn.init.constant_(l.bias, 0) + self.add_module('l', l) + + @torch.no_grad() + def fuse(self): + bn, l = self._modules.values() + w = bn.weight / (bn.running_var + bn.eps) ** 0.5 + b = bn.bias - self.bn.running_mean * self.bn.weight / (bn.running_var + bn.eps) ** 0.5 + w = l.weight * w[None, :] + if l.bias is None: + b = b @ self.l.weight.T + else: + b = (l.weight @ b[:, None]).view(-1) + self.l.bias + m = nn.Linear(w.size(1), w.size(0)) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + +def stem_b16(in_chs, out_chs, activation, resolution=224): + return nn.Sequential( + ConvNorm(in_chs, out_chs // 8, 3, 2, 1, resolution=resolution), + activation(), + ConvNorm(out_chs // 8, out_chs // 4, 3, 2, 1, resolution=resolution // 2), + activation(), + ConvNorm(out_chs // 4, out_chs // 2, 3, 2, 1, resolution=resolution // 4), + activation(), + ConvNorm(out_chs // 2, out_chs, 3, 2, 1, resolution=resolution // 8)) + + +class Residual(nn.Module): + def __init__(self, m, drop): + super().__init__() + self.m = m + self.drop = drop + + def forward(self, x): + if self.training and self.drop > 0: + return x + self.m(x) * torch.rand( + x.size(0), 1, 1, device=x.device).ge_(self.drop).div(1 - self.drop).detach() + else: + return x + self.m(x) + + +class Subsample(nn.Module): + def __init__(self, stride, resolution): + super().__init__() + self.stride = stride + self.resolution = resolution + + def forward(self, x): + B, N, C = x.shape + x = x.view(B, self.resolution, self.resolution, C)[:, ::self.stride, ::self.stride] + return x.reshape(B, -1, C) + + +class Attention(nn.Module): + ab: Dict[str, torch.Tensor] + + def __init__( + self, dim, key_dim, num_heads=8, attn_ratio=4, act_layer=None, resolution=14, use_conv=False): + super().__init__() + + self.num_heads = num_heads + self.scale = key_dim ** -0.5 + self.key_dim = key_dim + self.nh_kd = nh_kd = key_dim * num_heads + self.d = int(attn_ratio * key_dim) + self.dh = int(attn_ratio * key_dim) * num_heads + self.attn_ratio = attn_ratio + self.use_conv = use_conv + ln_layer = ConvNorm if self.use_conv else LinearNorm + h = self.dh + nh_kd * 2 + self.qkv = ln_layer(dim, h, resolution=resolution) + self.proj = nn.Sequential( + act_layer(), + ln_layer(self.dh, dim, bn_weight_init=0, resolution=resolution)) + + points = list(itertools.product(range(resolution), range(resolution))) + N = len(points) + attention_offsets = {} + idxs = [] + for p1 in points: + for p2 in points: + offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1])) + if offset not in attention_offsets: + attention_offsets[offset] = len(attention_offsets) + idxs.append(attention_offsets[offset]) + self.attention_biases = nn.Parameter(torch.zeros(num_heads, len(attention_offsets))) + self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N, N)) + self.ab = {} + + @torch.no_grad() + def train(self, mode=True): + super().train(mode) + if mode and self.ab: + self.ab = {} # clear ab cache + + def get_attention_biases(self, device: torch.device) -> torch.Tensor: + if self.training: + return self.attention_biases[:, self.attention_bias_idxs] + else: + device_key = str(device) + if device_key not in self.ab: + self.ab[device_key] = self.attention_biases[:, self.attention_bias_idxs] + return self.ab[device_key] + + def forward(self, x): # x (B,C,H,W) + if self.use_conv: + B, C, H, W = x.shape + q, k, v = self.qkv(x).view(B, self.num_heads, -1, H * W).split([self.key_dim, self.key_dim, self.d], dim=2) + + attn = (q.transpose(-2, -1) @ k) * self.scale + self.get_attention_biases(x.device) + attn = attn.softmax(dim=-1) + + x = (v @ attn.transpose(-2, -1)).view(B, -1, H, W) + else: + B, N, C = x.shape + qkv = self.qkv(x) + q, k, v = qkv.view(B, N, self.num_heads, -1).split([self.key_dim, self.key_dim, self.d], dim=3) + q = q.permute(0, 2, 1, 3) + k = k.permute(0, 2, 1, 3) + v = v.permute(0, 2, 1, 3) + + attn = q @ k.transpose(-2, -1) * self.scale + self.get_attention_biases(x.device) + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, N, self.dh) + x = self.proj(x) + return x + + +class AttentionSubsample(nn.Module): + ab: Dict[str, torch.Tensor] + + def __init__( + self, in_dim, out_dim, key_dim, num_heads=8, attn_ratio=2, + act_layer=None, stride=2, resolution=14, resolution_=7, use_conv=False): + super().__init__() + self.num_heads = num_heads + self.scale = key_dim ** -0.5 + self.key_dim = key_dim + self.nh_kd = nh_kd = key_dim * num_heads + self.d = int(attn_ratio * key_dim) + self.dh = self.d * self.num_heads + self.attn_ratio = attn_ratio + self.resolution_ = resolution_ + self.resolution_2 = resolution_ ** 2 + self.use_conv = use_conv + if self.use_conv: + ln_layer = ConvNorm + sub_layer = partial(nn.AvgPool2d, kernel_size=1, padding=0) + else: + ln_layer = LinearNorm + sub_layer = partial(Subsample, resolution=resolution) + + h = self.dh + nh_kd + self.kv = ln_layer(in_dim, h, resolution=resolution) + self.q = nn.Sequential( + sub_layer(stride=stride), + ln_layer(in_dim, nh_kd, resolution=resolution_)) + self.proj = nn.Sequential( + act_layer(), + ln_layer(self.dh, out_dim, resolution=resolution_)) + + self.stride = stride + self.resolution = resolution + points = list(itertools.product(range(resolution), range(resolution))) + points_ = list(itertools.product(range(resolution_), range(resolution_))) + N = len(points) + N_ = len(points_) + attention_offsets = {} + idxs = [] + for p1 in points_: + for p2 in points: + size = 1 + offset = ( + abs(p1[0] * stride - p2[0] + (size - 1) / 2), + abs(p1[1] * stride - p2[1] + (size - 1) / 2)) + if offset not in attention_offsets: + attention_offsets[offset] = len(attention_offsets) + idxs.append(attention_offsets[offset]) + self.attention_biases = nn.Parameter(torch.zeros(num_heads, len(attention_offsets))) + self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N_, N)) + self.ab = {} # per-device attention_biases cache + + @torch.no_grad() + def train(self, mode=True): + super().train(mode) + if mode and self.ab: + self.ab = {} # clear ab cache + + def get_attention_biases(self, device: torch.device) -> torch.Tensor: + if self.training: + return self.attention_biases[:, self.attention_bias_idxs] + else: + device_key = str(device) + if device_key not in self.ab: + self.ab[device_key] = self.attention_biases[:, self.attention_bias_idxs] + return self.ab[device_key] + + def forward(self, x): + if self.use_conv: + B, C, H, W = x.shape + k, v = self.kv(x).view(B, self.num_heads, -1, H * W).split([self.key_dim, self.d], dim=2) + q = self.q(x).view(B, self.num_heads, self.key_dim, self.resolution_2) + + attn = (q.transpose(-2, -1) @ k) * self.scale + self.get_attention_biases(x.device) + attn = attn.softmax(dim=-1) + + x = (v @ attn.transpose(-2, -1)).reshape(B, -1, self.resolution_, self.resolution_) + else: + B, N, C = x.shape + k, v = self.kv(x).view(B, N, self.num_heads, -1).split([self.key_dim, self.d], dim=3) + k = k.permute(0, 2, 1, 3) # BHNC + v = v.permute(0, 2, 1, 3) # BHNC + q = self.q(x).view(B, self.resolution_2, self.num_heads, self.key_dim).permute(0, 2, 1, 3) + + attn = q @ k.transpose(-2, -1) * self.scale + self.get_attention_biases(x.device) + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, -1, self.dh) + x = self.proj(x) + return x + + +class Levit(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage + + NOTE: distillation is defaulted to True since pretrained weights use it, will cause problems + w/ train scripts that don't take tuple outputs, + """ + + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=1000, + embed_dim=(192,), + key_dim=64, + depth=(12,), + num_heads=(3,), + attn_ratio=2, + mlp_ratio=2, + hybrid_backbone=None, + down_ops=None, + act_layer='hard_swish', + attn_act_layer='hard_swish', + distillation=True, + use_conv=False, + drop_rate=0., + drop_path_rate=0.): + super().__init__() + act_layer = get_act_layer(act_layer) + attn_act_layer = get_act_layer(attn_act_layer) + if isinstance(img_size, tuple): + # FIXME origin impl passes single img/res dim through whole hierarchy, + # not sure this model will be used enough to spend time fixing it. + assert img_size[0] == img_size[1] + img_size = img_size[0] + self.num_classes = num_classes + self.num_features = embed_dim[-1] + self.embed_dim = embed_dim + N = len(embed_dim) + assert len(depth) == len(num_heads) == N + key_dim = to_ntuple(N)(key_dim) + attn_ratio = to_ntuple(N)(attn_ratio) + mlp_ratio = to_ntuple(N)(mlp_ratio) + down_ops = down_ops or ( + # ('Subsample',key_dim, num_heads, attn_ratio, mlp_ratio, stride) + ('Subsample', key_dim[0], embed_dim[0] // key_dim[0], 4, 2, 2), + ('Subsample', key_dim[0], embed_dim[1] // key_dim[1], 4, 2, 2), + ('',) + ) + self.distillation = distillation + self.use_conv = use_conv + ln_layer = ConvNorm if self.use_conv else LinearNorm + + self.patch_embed = hybrid_backbone or stem_b16(in_chans, embed_dim[0], activation=act_layer) + + self.blocks = [] + resolution = img_size // patch_size + for i, (ed, kd, dpth, nh, ar, mr, do) in enumerate( + zip(embed_dim, key_dim, depth, num_heads, attn_ratio, mlp_ratio, down_ops)): + for _ in range(dpth): + self.blocks.append( + Residual( + Attention( + ed, kd, nh, attn_ratio=ar, act_layer=attn_act_layer, + resolution=resolution, use_conv=use_conv), + drop_path_rate)) + if mr > 0: + h = int(ed * mr) + self.blocks.append( + Residual(nn.Sequential( + ln_layer(ed, h, resolution=resolution), + act_layer(), + ln_layer(h, ed, bn_weight_init=0, resolution=resolution), + ), drop_path_rate)) + if do[0] == 'Subsample': + # ('Subsample',key_dim, num_heads, attn_ratio, mlp_ratio, stride) + resolution_ = (resolution - 1) // do[5] + 1 + self.blocks.append( + AttentionSubsample( + *embed_dim[i:i + 2], key_dim=do[1], num_heads=do[2], + attn_ratio=do[3], act_layer=attn_act_layer, stride=do[5], + resolution=resolution, resolution_=resolution_, use_conv=use_conv)) + resolution = resolution_ + if do[4] > 0: # mlp_ratio + h = int(embed_dim[i + 1] * do[4]) + self.blocks.append( + Residual(nn.Sequential( + ln_layer(embed_dim[i + 1], h, resolution=resolution), + act_layer(), + ln_layer(h, embed_dim[i + 1], bn_weight_init=0, resolution=resolution), + ), drop_path_rate)) + self.blocks = nn.Sequential(*self.blocks) + + # Classifier head + self.head = NormLinear(embed_dim[-1], num_classes) if num_classes > 0 else nn.Identity() + self.head_dist = None + if distillation: + self.head_dist = NormLinear(embed_dim[-1], num_classes) if num_classes > 0 else nn.Identity() + + @torch.jit.ignore + def no_weight_decay(self): + return {x for x in self.state_dict().keys() if 'attention_biases' in x} + + def get_classifier(self): + if self.head_dist is None: + return self.head + else: + return self.head, self.head_dist + + def reset_classifier(self, num_classes, global_pool='', distillation=None): + self.num_classes = num_classes + self.head = NormLinear(self.embed_dim[-1], num_classes) if num_classes > 0 else nn.Identity() + if distillation is not None: + self.distillation = distillation + if self.distillation: + self.head_dist = NormLinear(self.embed_dim[-1], num_classes) if num_classes > 0 else nn.Identity() + else: + self.head_dist = None + + def forward_features(self, x): + x = self.patch_embed(x) + if not self.use_conv: + x = x.flatten(2).transpose(1, 2) + x = self.blocks(x) + x = x.mean((-2, -1)) if self.use_conv else x.mean(1) + return x + + def forward(self, x): + x = self.forward_features(x) + if self.head_dist is not None: + x, x_dist = self.head(x), self.head_dist(x) + if self.training and not torch.jit.is_scripting(): + return x, x_dist + else: + # during inference, return the average of both classifier predictions + return (x + x_dist) / 2 + else: + x = self.head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + if 'model' in state_dict: + # For deit models + state_dict = state_dict['model'] + D = model.state_dict() + for k in state_dict.keys(): + if k in D and D[k].ndim == 4 and state_dict[k].ndim == 2: + state_dict[k] = state_dict[k][:, :, None, None] + return state_dict + + +def create_levit(variant, pretrained=False, default_cfg=None, fuse=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model_cfg = dict(**model_cfgs[variant], **kwargs) + model = build_model_with_cfg( + Levit, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=checkpoint_filter_fn, + **model_cfg) + #if fuse: + # utils.replace_batchnorm(model) + return model + diff --git a/timm/models/mlp_mixer.py b/timm/models/mlp_mixer.py new file mode 100644 index 0000000..727b655 --- /dev/null +++ b/timm/models/mlp_mixer.py @@ -0,0 +1,659 @@ +""" MLP-Mixer, ResMLP, and gMLP in PyTorch + +This impl originally based on MLP-Mixer paper. + +Official JAX impl: https://github.com/google-research/vision_transformer/blob/linen/vit_jax/models_mixer.py + +Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + +@article{tolstikhin2021, + title={MLP-Mixer: An all-MLP Architecture for Vision}, + author={Tolstikhin, Ilya and Houlsby, Neil and Kolesnikov, Alexander and Beyer, Lucas and Zhai, Xiaohua and Unterthiner, + Thomas and Yung, Jessica and Keysers, Daniel and Uszkoreit, Jakob and Lucic, Mario and Dosovitskiy, Alexey}, + journal={arXiv preprint arXiv:2105.01601}, + year={2021} +} + +Also supporting ResMlp, and a preliminary (not verified) implementations of gMLP + +Code: https://github.com/facebookresearch/deit +Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 +@misc{touvron2021resmlp, + title={ResMLP: Feedforward networks for image classification with data-efficient training}, + author={Hugo Touvron and Piotr Bojanowski and Mathilde Caron and Matthieu Cord and Alaaeldin El-Nouby and + Edouard Grave and Armand Joulin and Gabriel Synnaeve and Jakob Verbeek and Hervé Jégou}, + year={2021}, + eprint={2105.03404}, +} + +Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 +@misc{liu2021pay, + title={Pay Attention to MLPs}, + author={Hanxiao Liu and Zihang Dai and David R. So and Quoc V. Le}, + year={2021}, + eprint={2105.08050}, +} + +A thank you to paper authors for releasing code and weights. + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math +from copy import deepcopy +from functools import partial + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, overlay_external_default_cfg, named_apply +from .layers import PatchEmbed, Mlp, GluMlp, GatedMlp, DropPath, lecun_normal_, to_2tuple +from .registry import register_model + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': 0.875, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), + 'first_conv': 'stem.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = dict( + mixer_s32_224=_cfg(), + mixer_s16_224=_cfg(), + mixer_b32_224=_cfg(), + mixer_b16_224=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224-76587d61.pth', + ), + mixer_b16_224_in21k=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224_in21k-617b3de2.pth', + num_classes=21843 + ), + mixer_l32_224=_cfg(), + mixer_l16_224=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_l16_224-92f9adc4.pth', + ), + mixer_l16_224_in21k=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_l16_224_in21k-846aa33c.pth', + num_classes=21843 + ), + + # Mixer ImageNet-21K-P pretraining + mixer_b16_224_miil_in21k=_cfg( + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/mixer_b16_224_miil_in21k.pth', + mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear', num_classes=11221, + ), + mixer_b16_224_miil=_cfg( + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/mixer_b16_224_miil.pth', + mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear', + ), + + gmixer_12_224=_cfg(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + gmixer_24_224=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gmixer_24_224_raa-7daf7ae6.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + resmlp_12_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_12_no_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_24_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_24_no_dist.pth', + #url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resmlp_24_224_raa-a8256759.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_36_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_36_no_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_big_24_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_no_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + resmlp_12_distilled_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_12_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_24_distilled_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_24_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_36_distilled_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_36_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_big_24_distilled_224=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_dist.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + resmlp_big_24_224_in22ft1k=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_22k.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + resmlp_12_224_dino=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_12_dino.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + resmlp_24_224_dino=_cfg( + url='https://dl.fbaipublicfiles.com/deit/resmlp_24_dino.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + + gmlp_ti16_224=_cfg(), + gmlp_s16_224=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gmlp_s16_224_raa-10536d42.pth', + ), + gmlp_b16_224=_cfg(), +) + + +class MixerBlock(nn.Module): + """ Residual Block w/ token mixing and channel MLPs + Based on: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + def __init__( + self, dim, seq_len, mlp_ratio=(0.5, 4.0), mlp_layer=Mlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop=0., drop_path=0.): + super().__init__() + tokens_dim, channels_dim = [int(x * dim) for x in to_2tuple(mlp_ratio)] + self.norm1 = norm_layer(dim) + self.mlp_tokens = mlp_layer(seq_len, tokens_dim, act_layer=act_layer, drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp_channels = mlp_layer(dim, channels_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + x = x + self.drop_path(self.mlp_tokens(self.norm1(x).transpose(1, 2)).transpose(1, 2)) + x = x + self.drop_path(self.mlp_channels(self.norm2(x))) + return x + + +class Affine(nn.Module): + def __init__(self, dim): + super().__init__() + self.alpha = nn.Parameter(torch.ones((1, 1, dim))) + self.beta = nn.Parameter(torch.zeros((1, 1, dim))) + + def forward(self, x): + return torch.addcmul(self.beta, self.alpha, x) + + +class ResBlock(nn.Module): + """ Residual MLP block w/ LayerScale and Affine 'norm' + + Based on: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + def __init__( + self, dim, seq_len, mlp_ratio=4, mlp_layer=Mlp, norm_layer=Affine, + act_layer=nn.GELU, init_values=1e-4, drop=0., drop_path=0.): + super().__init__() + channel_dim = int(dim * mlp_ratio) + self.norm1 = norm_layer(dim) + self.linear_tokens = nn.Linear(seq_len, seq_len) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp_channels = mlp_layer(dim, channel_dim, act_layer=act_layer, drop=drop) + self.ls1 = nn.Parameter(init_values * torch.ones(dim)) + self.ls2 = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + x = x + self.drop_path(self.ls1 * self.linear_tokens(self.norm1(x).transpose(1, 2)).transpose(1, 2)) + x = x + self.drop_path(self.ls2 * self.mlp_channels(self.norm2(x))) + return x + + +class SpatialGatingUnit(nn.Module): + """ Spatial Gating Unit + + Based on: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + def __init__(self, dim, seq_len, norm_layer=nn.LayerNorm): + super().__init__() + gate_dim = dim // 2 + self.norm = norm_layer(gate_dim) + self.proj = nn.Linear(seq_len, seq_len) + + def init_weights(self): + # special init for the projection gate, called as override by base model init + nn.init.normal_(self.proj.weight, std=1e-6) + nn.init.ones_(self.proj.bias) + + def forward(self, x): + u, v = x.chunk(2, dim=-1) + v = self.norm(v) + v = self.proj(v.transpose(-1, -2)) + return u * v.transpose(-1, -2) + + +class SpatialGatingBlock(nn.Module): + """ Residual Block w/ Spatial Gating + + Based on: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + def __init__( + self, dim, seq_len, mlp_ratio=4, mlp_layer=GatedMlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop=0., drop_path=0.): + super().__init__() + channel_dim = int(dim * mlp_ratio) + self.norm = norm_layer(dim) + sgu = partial(SpatialGatingUnit, seq_len=seq_len) + self.mlp_channels = mlp_layer(dim, channel_dim, act_layer=act_layer, gate_layer=sgu, drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + x = x + self.drop_path(self.mlp_channels(self.norm(x))) + return x + + +class MlpMixer(nn.Module): + + def __init__( + self, + num_classes=1000, + img_size=224, + in_chans=3, + patch_size=16, + num_blocks=8, + embed_dim=512, + mlp_ratio=(0.5, 4.0), + block_layer=MixerBlock, + mlp_layer=Mlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + act_layer=nn.GELU, + drop_rate=0., + drop_path_rate=0., + nlhb=False, + stem_norm=False, + ): + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + + self.stem = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, + embed_dim=embed_dim, norm_layer=norm_layer if stem_norm else None) + # FIXME drop_path (stochastic depth scaling rule or all the same?) + self.blocks = nn.Sequential(*[ + block_layer( + embed_dim, self.stem.num_patches, mlp_ratio, mlp_layer=mlp_layer, norm_layer=norm_layer, + act_layer=act_layer, drop=drop_rate, drop_path=drop_path_rate) + for _ in range(num_blocks)]) + self.norm = norm_layer(embed_dim) + self.head = nn.Linear(embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + + self.init_weights(nlhb=nlhb) + + def init_weights(self, nlhb=False): + head_bias = -math.log(self.num_classes) if nlhb else 0. + named_apply(partial(_init_weights, head_bias=head_bias), module=self) # depth-first + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.stem(x) + x = self.blocks(x) + x = self.norm(x) + x = x.mean(dim=1) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _init_weights(module: nn.Module, name: str, head_bias: float = 0., flax=False): + """ Mixer weight initialization (trying to match Flax defaults) + """ + if isinstance(module, nn.Linear): + if name.startswith('head'): + nn.init.zeros_(module.weight) + nn.init.constant_(module.bias, head_bias) + else: + if flax: + # Flax defaults + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + else: + # like MLP init in vit (my original init) + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + if 'mlp' in name: + nn.init.normal_(module.bias, std=1e-6) + else: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv2d): + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)): + nn.init.ones_(module.weight) + nn.init.zeros_(module.bias) + elif hasattr(module, 'init_weights'): + # NOTE if a parent module contains init_weights method, it can override the init of the + # child modules as this will be called in depth-first order. + module.init_weights() + + +def checkpoint_filter_fn(state_dict, model): + """ Remap checkpoints if needed """ + if 'patch_embed.proj.weight' in state_dict: + # Remap FB ResMlp models -> timm + out_dict = {} + for k, v in state_dict.items(): + k = k.replace('patch_embed.', 'stem.') + k = k.replace('attn.', 'linear_tokens.') + k = k.replace('mlp.', 'mlp_channels.') + k = k.replace('gamma_', 'ls') + if k.endswith('.alpha') or k.endswith('.beta'): + v = v.reshape(1, 1, -1) + out_dict[k] = v + return out_dict + return state_dict + + +def _create_mixer(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for MLP-Mixer models.') + + model = build_model_with_cfg( + MlpMixer, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def mixer_s32_224(pretrained=False, **kwargs): + """ Mixer-S/32 224x224 + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=32, num_blocks=8, embed_dim=512, **kwargs) + model = _create_mixer('mixer_s32_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_s16_224(pretrained=False, **kwargs): + """ Mixer-S/16 224x224 + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=8, embed_dim=512, **kwargs) + model = _create_mixer('mixer_s16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b32_224(pretrained=False, **kwargs): + """ Mixer-B/32 224x224 + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=32, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b32_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b16_224(pretrained=False, **kwargs): + """ Mixer-B/16 224x224. ImageNet-1k pretrained weights. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b16_224_in21k(pretrained=False, **kwargs): + """ Mixer-B/16 224x224. ImageNet-21k pretrained weights. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b16_224_in21k', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_l32_224(pretrained=False, **kwargs): + """ Mixer-L/32 224x224. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=32, num_blocks=24, embed_dim=1024, **kwargs) + model = _create_mixer('mixer_l32_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_l16_224(pretrained=False, **kwargs): + """ Mixer-L/16 224x224. ImageNet-1k pretrained weights. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=24, embed_dim=1024, **kwargs) + model = _create_mixer('mixer_l16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_l16_224_in21k(pretrained=False, **kwargs): + """ Mixer-L/16 224x224. ImageNet-21k pretrained weights. + Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 + """ + model_args = dict(patch_size=16, num_blocks=24, embed_dim=1024, **kwargs) + model = _create_mixer('mixer_l16_224_in21k', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b16_224_miil(pretrained=False, **kwargs): + """ Mixer-B/16 224x224. ImageNet-21k pretrained weights. + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b16_224_miil', pretrained=pretrained, **model_args) + return model + + +@register_model +def mixer_b16_224_miil_in21k(pretrained=False, **kwargs): + """ Mixer-B/16 224x224. ImageNet-1k pretrained weights. + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs) + model = _create_mixer('mixer_b16_224_miil_in21k', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmixer_12_224(pretrained=False, **kwargs): + """ Glu-Mixer-12 224x224 + Experiment by Ross Wightman, adding (Si)GLU to MLP-Mixer + """ + model_args = dict( + patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=(1.0, 4.0), + mlp_layer=GluMlp, act_layer=nn.SiLU, **kwargs) + model = _create_mixer('gmixer_12_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmixer_24_224(pretrained=False, **kwargs): + """ Glu-Mixer-24 224x224 + Experiment by Ross Wightman, adding (Si)GLU to MLP-Mixer + """ + model_args = dict( + patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=(1.0, 4.0), + mlp_layer=GluMlp, act_layer=nn.SiLU, **kwargs) + model = _create_mixer('gmixer_24_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_12_224(pretrained=False, **kwargs): + """ ResMLP-12 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=4, block_layer=ResBlock, norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_12_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_24_224(pretrained=False, **kwargs): + """ ResMLP-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-5), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_24_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_36_224(pretrained=False, **kwargs): + """ ResMLP-36 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=36, embed_dim=384, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_36_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_big_24_224(pretrained=False, **kwargs): + """ ResMLP-B-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=8, num_blocks=24, embed_dim=768, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_big_24_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_12_distilled_224(pretrained=False, **kwargs): + """ ResMLP-12 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=4, block_layer=ResBlock, norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_12_distilled_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_24_distilled_224(pretrained=False, **kwargs): + """ ResMLP-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-5), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_24_distilled_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_36_distilled_224(pretrained=False, **kwargs): + """ ResMLP-36 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=16, num_blocks=36, embed_dim=384, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_36_distilled_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_big_24_distilled_224(pretrained=False, **kwargs): + """ ResMLP-B-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=8, num_blocks=24, embed_dim=768, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_big_24_distilled_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_big_24_224_in22ft1k(pretrained=False, **kwargs): + """ ResMLP-B-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + """ + model_args = dict( + patch_size=8, num_blocks=24, embed_dim=768, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_big_24_224_in22ft1k', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_12_224_dino(pretrained=False, **kwargs): + """ ResMLP-12 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + + Model pretrained via DINO (self-supervised) - https://arxiv.org/abs/2104.14294 + """ + model_args = dict( + patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=4, block_layer=ResBlock, norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_12_224_dino', pretrained=pretrained, **model_args) + return model + + +@register_model +def resmlp_24_224_dino(pretrained=False, **kwargs): + """ ResMLP-24 + Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 + + Model pretrained via DINO (self-supervised) - https://arxiv.org/abs/2104.14294 + """ + model_args = dict( + patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=4, + block_layer=partial(ResBlock, init_values=1e-5), norm_layer=Affine, **kwargs) + model = _create_mixer('resmlp_24_224_dino', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmlp_ti16_224(pretrained=False, **kwargs): + """ gMLP-Tiny + Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + model_args = dict( + patch_size=16, num_blocks=30, embed_dim=128, mlp_ratio=6, block_layer=SpatialGatingBlock, + mlp_layer=GatedMlp, **kwargs) + model = _create_mixer('gmlp_ti16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmlp_s16_224(pretrained=False, **kwargs): + """ gMLP-Small + Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + model_args = dict( + patch_size=16, num_blocks=30, embed_dim=256, mlp_ratio=6, block_layer=SpatialGatingBlock, + mlp_layer=GatedMlp, **kwargs) + model = _create_mixer('gmlp_s16_224', pretrained=pretrained, **model_args) + return model + + +@register_model +def gmlp_b16_224(pretrained=False, **kwargs): + """ gMLP-Base + Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 + """ + model_args = dict( + patch_size=16, num_blocks=30, embed_dim=512, mlp_ratio=6, block_layer=SpatialGatingBlock, + mlp_layer=GatedMlp, **kwargs) + model = _create_mixer('gmlp_b16_224', pretrained=pretrained, **model_args) + return model diff --git a/timm/models/mobilenetv3.py b/timm/models/mobilenetv3.py new file mode 100644 index 0000000..f810eb8 --- /dev/null +++ b/timm/models/mobilenetv3.py @@ -0,0 +1,562 @@ + +""" MobileNet V3 + +A PyTorch impl of MobileNet-V3, compatible with TF weights from official impl. + +Paper: Searching for MobileNetV3 - https://arxiv.org/abs/1905.02244 + +Hacked together by / Copyright 2021 Ross Wightman +""" +from functools import partial +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .efficientnet_blocks import SqueezeExcite +from .efficientnet_builder import EfficientNetBuilder, decode_arch_def, efficientnet_init_weights,\ + round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT +from .features import FeatureInfo, FeatureHooks +from .helpers import build_model_with_cfg, default_cfg_for_features +from .layers import SelectAdaptivePool2d, Linear, create_conv2d, get_act_fn, hard_sigmoid +from .registry import register_model + +__all__ = ['MobileNetV3', 'MobileNetV3Features'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (1, 1), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'mobilenetv3_large_075': _cfg(url=''), + 'mobilenetv3_large_100': _cfg( + interpolation='bicubic', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_large_100_ra-f55367f5.pth'), + 'mobilenetv3_large_100_miil': _cfg( + interpolation='bilinear', mean=(0, 0, 0), std=(1, 1, 1), + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/mobilenetv3_large_100_1k_miil_78_0.pth'), + 'mobilenetv3_large_100_miil_in21k': _cfg( + interpolation='bilinear', mean=(0, 0, 0), std=(1, 1, 1), + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/mobilenetv3_large_100_in21k_miil.pth', num_classes=11221), + 'mobilenetv3_small_075': _cfg(url=''), + 'mobilenetv3_small_100': _cfg(url=''), + + 'mobilenetv3_rw': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_100-35495452.pth', + interpolation='bicubic'), + + 'tf_mobilenetv3_large_075': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_large_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_large_minimal_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_small_075': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_small_100': _cfg( + url= 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_small_minimal_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + + 'fbnetv3_b': _cfg(), + 'fbnetv3_d': _cfg(), + 'fbnetv3_g': _cfg(), +} + + +class MobileNetV3(nn.Module): + """ MobiletNet-V3 + + Based on my EfficientNet implementation and building blocks, this model utilizes the MobileNet-v3 specific + 'efficient head', where global pooling is done before the head convolution without a final batch-norm + layer before the classifier. + + Paper: https://arxiv.org/abs/1905.02244 + """ + + def __init__(self, block_args, num_classes=1000, in_chans=3, stem_size=16, num_features=1280, head_bias=True, + pad_type='', act_layer=None, norm_layer=None, se_layer=None, se_from_exp=True, + round_chs_fn=round_channels, drop_rate=0., drop_path_rate=0., global_pool='avg'): + super(MobileNetV3, self).__init__() + act_layer = act_layer or nn.ReLU + norm_layer = norm_layer or nn.BatchNorm2d + se_layer = se_layer or SqueezeExcite + self.num_classes = num_classes + self.num_features = num_features + self.drop_rate = drop_rate + + # Stem + stem_size = round_chs_fn(stem_size) + self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size) + self.act1 = act_layer(inplace=True) + + # Middle stages (IR/ER/DS Blocks) + builder = EfficientNetBuilder( + output_stride=32, pad_type=pad_type, round_chs_fn=round_chs_fn, se_from_exp=se_from_exp, + act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, drop_path_rate=drop_path_rate) + self.blocks = nn.Sequential(*builder(stem_size, block_args)) + self.feature_info = builder.features + head_chs = builder.in_chs + + # Head + Pooling + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + num_pooled_chs = head_chs * self.global_pool.feat_mult() + self.conv_head = create_conv2d(num_pooled_chs, self.num_features, 1, padding=pad_type, bias=head_bias) + self.act2 = act_layer(inplace=True) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled + self.classifier = Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + efficientnet_init_weights(self) + + def as_sequential(self): + layers = [self.conv_stem, self.bn1, self.act1] + layers.extend(self.blocks) + layers.extend([self.global_pool, self.conv_head, self.act2]) + layers.extend([nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier]) + return nn.Sequential(*layers) + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + # cannot meaningfully change pooling of efficient head after creation + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled + self.classifier = Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + x = self.blocks(x) + x = self.global_pool(x) + x = self.conv_head(x) + x = self.act2(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.flatten(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return self.classifier(x) + + +class MobileNetV3Features(nn.Module): + """ MobileNetV3 Feature Extractor + + A work-in-progress feature extraction module for MobileNet-V3 to use as a backbone for segmentation + and object detection models. + """ + + def __init__(self, block_args, out_indices=(0, 1, 2, 3, 4), feature_location='bottleneck', in_chans=3, + stem_size=16, output_stride=32, pad_type='', round_chs_fn=round_channels, se_from_exp=True, + act_layer=None, norm_layer=None, se_layer=None, drop_rate=0., drop_path_rate=0.): + super(MobileNetV3Features, self).__init__() + act_layer = act_layer or nn.ReLU + norm_layer = norm_layer or nn.BatchNorm2d + se_layer = se_layer or SqueezeExcite + self.drop_rate = drop_rate + + # Stem + stem_size = round_chs_fn(stem_size) + self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size) + self.act1 = act_layer(inplace=True) + + # Middle stages (IR/ER/DS Blocks) + builder = EfficientNetBuilder( + output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, se_from_exp=se_from_exp, + act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, + drop_path_rate=drop_path_rate, feature_location=feature_location) + self.blocks = nn.Sequential(*builder(stem_size, block_args)) + self.feature_info = FeatureInfo(builder.features, out_indices) + self._stage_out_idx = {v['stage']: i for i, v in enumerate(self.feature_info) if i in out_indices} + + efficientnet_init_weights(self) + + # Register feature extraction hooks with FeatureHooks helper + self.feature_hooks = None + if feature_location != 'bottleneck': + hooks = self.feature_info.get_dicts(keys=('module', 'hook_type')) + self.feature_hooks = FeatureHooks(hooks, self.named_modules()) + + def forward(self, x) -> List[torch.Tensor]: + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + if self.feature_hooks is None: + features = [] + if 0 in self._stage_out_idx: + features.append(x) # add stem out + for i, b in enumerate(self.blocks): + x = b(x) + if i + 1 in self._stage_out_idx: + features.append(x) + return features + else: + self.blocks(x) + out = self.feature_hooks.get_output(x.device) + return list(out.values()) + + +def _create_mnv3(variant, pretrained=False, **kwargs): + features_only = False + model_cls = MobileNetV3 + kwargs_filter = None + if kwargs.pop('features_only', False): + features_only = True + kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'head_bias', 'global_pool') + model_cls = MobileNetV3Features + model = build_model_with_cfg( + model_cls, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_strict=not features_only, + kwargs_filter=kwargs_filter, + **kwargs) + if features_only: + model.default_cfg = default_cfg_for_features(model.default_cfg) + return model + + +def _gen_mobilenet_v3_rw(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MobileNet-V3 model. + + Ref impl: ? + Paper: https://arxiv.org/abs/1905.02244 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_nre_noskip'], # relu + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish + # stage 5, 14x14in + ['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish + # stage 6, 7x7 in + ['cn_r1_k1_s1_c960'], # hard-swish + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + head_bias=False, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=resolve_act_layer(kwargs, 'hard_swish'), + se_layer=partial(SqueezeExcite, gate_layer='hard_sigmoid'), + **kwargs, + ) + model = _create_mnv3(variant, pretrained, **model_kwargs) + return model + + +def _gen_mobilenet_v3(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MobileNet-V3 model. + + Ref impl: ? + Paper: https://arxiv.org/abs/1905.02244 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + if 'small' in variant: + num_features = 1024 + if 'minimal' in variant: + act_layer = resolve_act_layer(kwargs, 'relu') + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s2_e1_c16'], + # stage 1, 56x56 in + ['ir_r1_k3_s2_e4.5_c24', 'ir_r1_k3_s1_e3.67_c24'], + # stage 2, 28x28 in + ['ir_r1_k3_s2_e4_c40', 'ir_r2_k3_s1_e6_c40'], + # stage 3, 14x14 in + ['ir_r2_k3_s1_e3_c48'], + # stage 4, 14x14in + ['ir_r3_k3_s2_e6_c96'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c576'], + ] + else: + act_layer = resolve_act_layer(kwargs, 'hard_swish') + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s2_e1_c16_se0.25_nre'], # relu + # stage 1, 56x56 in + ['ir_r1_k3_s2_e4.5_c24_nre', 'ir_r1_k3_s1_e3.67_c24_nre'], # relu + # stage 2, 28x28 in + ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r2_k5_s1_e6_c40_se0.25'], # hard-swish + # stage 3, 14x14 in + ['ir_r2_k5_s1_e3_c48_se0.25'], # hard-swish + # stage 4, 14x14in + ['ir_r3_k5_s2_e6_c96_se0.25'], # hard-swish + # stage 6, 7x7 in + ['cn_r1_k1_s1_c576'], # hard-swish + ] + else: + num_features = 1280 + if 'minimal' in variant: + act_layer = resolve_act_layer(kwargs, 'relu') + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16'], + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24', 'ir_r1_k3_s1_e3_c24'], + # stage 2, 56x56 in + ['ir_r3_k3_s2_e3_c40'], + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112'], + # stage 5, 14x14in + ['ir_r3_k3_s2_e6_c160'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c960'], + ] + else: + act_layer = resolve_act_layer(kwargs, 'hard_swish') + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_nre'], # relu + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish + # stage 5, 14x14in + ['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish + # stage 6, 7x7 in + ['cn_r1_k1_s1_c960'], # hard-swish + ] + se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels) + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + num_features=num_features, + stem_size=16, + round_chs_fn=partial(round_channels, multiplier=channel_multiplier), + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=act_layer, + se_layer=se_layer, + **kwargs, + ) + model = _create_mnv3(variant, pretrained, **model_kwargs) + return model + + +def _gen_fbnetv3(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """ FBNetV3 + Paper: `FBNetV3: Joint Architecture-Recipe Search using Predictor Pretraining` + - https://arxiv.org/abs/2006.02049 + FIXME untested, this is a preliminary impl of some FBNet-V3 variants. + """ + vl = variant.split('_')[-1] + if vl in ('a', 'b'): + stem_size = 16 + arch_def = [ + ['ds_r2_k3_s1_e1_c16'], + ['ir_r1_k5_s2_e4_c24', 'ir_r3_k5_s1_e2_c24'], + ['ir_r1_k5_s2_e5_c40_se0.25', 'ir_r4_k5_s1_e3_c40_se0.25'], + ['ir_r1_k5_s2_e5_c72', 'ir_r4_k3_s1_e3_c72'], + ['ir_r1_k3_s1_e5_c120_se0.25', 'ir_r5_k5_s1_e3_c120_se0.25'], + ['ir_r1_k3_s2_e6_c184_se0.25', 'ir_r5_k5_s1_e4_c184_se0.25', 'ir_r1_k5_s1_e6_c224_se0.25'], + ['cn_r1_k1_s1_c1344'], + ] + elif vl == 'd': + stem_size = 24 + arch_def = [ + ['ds_r2_k3_s1_e1_c16'], + ['ir_r1_k3_s2_e5_c24', 'ir_r5_k3_s1_e2_c24'], + ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r4_k3_s1_e3_c40_se0.25'], + ['ir_r1_k3_s2_e5_c72', 'ir_r4_k3_s1_e3_c72'], + ['ir_r1_k3_s1_e5_c128_se0.25', 'ir_r6_k5_s1_e3_c128_se0.25'], + ['ir_r1_k3_s2_e6_c208_se0.25', 'ir_r5_k5_s1_e5_c208_se0.25', 'ir_r1_k5_s1_e6_c240_se0.25'], + ['cn_r1_k1_s1_c1440'], + ] + elif vl == 'g': + stem_size = 32 + arch_def = [ + ['ds_r3_k3_s1_e1_c24'], + ['ir_r1_k5_s2_e4_c40', 'ir_r4_k5_s1_e2_c40'], + ['ir_r1_k5_s2_e4_c56_se0.25', 'ir_r4_k5_s1_e3_c56_se0.25'], + ['ir_r1_k5_s2_e5_c104', 'ir_r4_k3_s1_e3_c104'], + ['ir_r1_k3_s1_e5_c160_se0.25', 'ir_r8_k5_s1_e3_c160_se0.25'], + ['ir_r1_k3_s2_e6_c264_se0.25', 'ir_r6_k5_s1_e5_c264_se0.25', 'ir_r2_k5_s1_e6_c288_se0.25'], + ['cn_r1_k1_s1_c1728'], + ] + else: + raise NotImplemented + round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.95) + se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=round_chs_fn) + act_layer = resolve_act_layer(kwargs, 'hard_swish') + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + num_features=1984, + head_bias=False, + stem_size=stem_size, + round_chs_fn=round_chs_fn, + se_from_exp=False, + norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), + act_layer=act_layer, + se_layer=se_layer, + **kwargs, + ) + model = _create_mnv3(variant, pretrained, **model_kwargs) + return model + + +@register_model +def mobilenetv3_large_075(pretrained=False, **kwargs): + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_large_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_large_100_miil(pretrained=False, **kwargs): + """ MobileNet V3 + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model = _gen_mobilenet_v3('mobilenetv3_large_100_miil', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_large_100_miil_in21k(pretrained=False, **kwargs): + """ MobileNet V3, 21k pretraining + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model = _gen_mobilenet_v3('mobilenetv3_large_100_miil_in21k', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_small_075(pretrained=False, **kwargs): + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_small_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + model = _gen_mobilenet_v3('mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def mobilenetv3_rw(pretrained=False, **kwargs): + """ MobileNet V3 """ + if pretrained: + # pretrained model trained with non-default BN epsilon + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + model = _gen_mobilenet_v3_rw('mobilenetv3_rw', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_large_075(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_large_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_large_minimal_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_large_minimal_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_small_075(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_small_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def tf_mobilenetv3_small_minimal_100(pretrained=False, **kwargs): + """ MobileNet V3 """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mobilenet_v3('tf_mobilenetv3_small_minimal_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +@register_model +def fbnetv3_b(pretrained=False, **kwargs): + """ FBNetV3-B """ + model = _gen_fbnetv3('fbnetv3_b', pretrained=pretrained, **kwargs) + return model + + +@register_model +def fbnetv3_d(pretrained=False, **kwargs): + """ FBNetV3-D """ + model = _gen_fbnetv3('fbnetv3_d', pretrained=pretrained, **kwargs) + return model + + +@register_model +def fbnetv3_g(pretrained=False, **kwargs): + """ FBNetV3-G """ + model = _gen_fbnetv3('fbnetv3_g', pretrained=pretrained, **kwargs) + return model diff --git a/timm/models/nasnet.py b/timm/models/nasnet.py new file mode 100644 index 0000000..2afe82c --- /dev/null +++ b/timm/models/nasnet.py @@ -0,0 +1,567 @@ +""" NasNet-A (Large) + nasnetalarge implementation grabbed from Cadene's pretrained models + https://github.com/Cadene/pretrained-models.pytorch +""" +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .helpers import build_model_with_cfg +from .layers import ConvBnAct, create_conv2d, create_pool2d, create_classifier +from .registry import register_model + +__all__ = ['NASNetALarge'] + +default_cfgs = { + 'nasnetalarge': { + 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/nasnetalarge-a1897284.pth', + 'input_size': (3, 331, 331), + 'pool_size': (11, 11), + 'crop_pct': 0.911, + 'interpolation': 'bicubic', + 'mean': (0.5, 0.5, 0.5), + 'std': (0.5, 0.5, 0.5), + 'num_classes': 1000, + 'first_conv': 'conv0.conv', + 'classifier': 'last_linear', + 'label_offset': 1, # 1001 classes in pretrained weights + }, +} + + +class ActConvBn(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=''): + super(ActConvBn, self).__init__() + self.act = nn.ReLU() + self.conv = create_conv2d( + in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) + self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1) + + def forward(self, x): + x = self.act(x) + x = self.conv(x) + x = self.bn(x) + return x + + +class SeparableConv2d(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride, padding=''): + super(SeparableConv2d, self).__init__() + self.depthwise_conv2d = create_conv2d( + in_channels, in_channels, kernel_size=kernel_size, + stride=stride, padding=padding, groups=in_channels) + self.pointwise_conv2d = create_conv2d( + in_channels, out_channels, kernel_size=1, padding=0) + + def forward(self, x): + x = self.depthwise_conv2d(x) + x = self.pointwise_conv2d(x) + return x + + +class BranchSeparables(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, pad_type='', stem_cell=False): + super(BranchSeparables, self).__init__() + middle_channels = out_channels if stem_cell else in_channels + self.act_1 = nn.ReLU() + self.separable_1 = SeparableConv2d( + in_channels, middle_channels, kernel_size, stride=stride, padding=pad_type) + self.bn_sep_1 = nn.BatchNorm2d(middle_channels, eps=0.001, momentum=0.1) + self.act_2 = nn.ReLU(inplace=True) + self.separable_2 = SeparableConv2d( + middle_channels, out_channels, kernel_size, stride=1, padding=pad_type) + self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1) + + def forward(self, x): + x = self.act_1(x) + x = self.separable_1(x) + x = self.bn_sep_1(x) + x = self.act_2(x) + x = self.separable_2(x) + x = self.bn_sep_2(x) + return x + + +class CellStem0(nn.Module): + def __init__(self, stem_size, num_channels=42, pad_type=''): + super(CellStem0, self).__init__() + self.num_channels = num_channels + self.stem_size = stem_size + self.conv_1x1 = ActConvBn(self.stem_size, self.num_channels, 1, stride=1) + + self.comb_iter_0_left = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) + self.comb_iter_0_right = BranchSeparables(self.stem_size, self.num_channels, 7, 2, pad_type, stem_cell=True) + + self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) + self.comb_iter_1_right = BranchSeparables(self.stem_size, self.num_channels, 7, 2, pad_type, stem_cell=True) + + self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) + self.comb_iter_2_right = BranchSeparables(self.stem_size, self.num_channels, 5, 2, pad_type, stem_cell=True) + + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(self.num_channels, self.num_channels, 3, 1, pad_type) + self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) + + def forward(self, x): + x1 = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x1) + x_comb_iter_0_right = self.comb_iter_0_right(x) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x1) + x_comb_iter_1_right = self.comb_iter_1_right(x) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x1) + x_comb_iter_2_right = self.comb_iter_2_right(x) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x1) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class CellStem1(nn.Module): + + def __init__(self, stem_size, num_channels, pad_type=''): + super(CellStem1, self).__init__() + self.num_channels = num_channels + self.stem_size = stem_size + self.conv_1x1 = ActConvBn(2 * self.num_channels, self.num_channels, 1, stride=1) + + self.act = nn.ReLU() + self.path_1 = nn.Sequential() + self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) + self.path_1.add_module('conv', nn.Conv2d(self.stem_size, self.num_channels // 2, 1, stride=1, bias=False)) + + self.path_2 = nn.Sequential() + self.path_2.add_module('pad', nn.ZeroPad2d((-1, 1, -1, 1))) + self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) + self.path_2.add_module('conv', nn.Conv2d(self.stem_size, self.num_channels // 2, 1, stride=1, bias=False)) + + self.final_path_bn = nn.BatchNorm2d(self.num_channels, eps=0.001, momentum=0.1) + + self.comb_iter_0_left = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) + self.comb_iter_0_right = BranchSeparables(self.num_channels, self.num_channels, 7, 2, pad_type) + + self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) + self.comb_iter_1_right = BranchSeparables(self.num_channels, self.num_channels, 7, 2, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) + self.comb_iter_2_right = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) + + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(self.num_channels, self.num_channels, 3, 1, pad_type) + self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) + + def forward(self, x_conv0, x_stem_0): + x_left = self.conv_1x1(x_stem_0) + + x_relu = self.act(x_conv0) + # path 1 + x_path1 = self.path_1(x_relu) + # path 2 + x_path2 = self.path_2(x_relu) + # final path + x_right = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) + + x_comb_iter_0_left = self.comb_iter_0_left(x_left) + x_comb_iter_0_right = self.comb_iter_0_right(x_right) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_left) + x_comb_iter_1_right = self.comb_iter_1_right(x_right) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_left) + x_comb_iter_2_right = self.comb_iter_2_right(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x_left) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class FirstCell(nn.Module): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(FirstCell, self).__init__() + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1) + + self.act = nn.ReLU() + self.path_1 = nn.Sequential() + self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) + self.path_1.add_module('conv', nn.Conv2d(in_chs_left, out_chs_left, 1, stride=1, bias=False)) + + self.path_2 = nn.Sequential() + self.path_2.add_module('pad', nn.ZeroPad2d((-1, 1, -1, 1))) + self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) + self.path_2.add_module('conv', nn.Conv2d(in_chs_left, out_chs_left, 1, stride=1, bias=False)) + + self.final_path_bn = nn.BatchNorm2d(out_chs_left * 2, eps=0.001, momentum=0.1) + + self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) + self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + + self.comb_iter_1_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) + self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_3_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + + def forward(self, x, x_prev): + x_relu = self.act(x_prev) + x_path1 = self.path_1(x_relu) + x_path2 = self.path_2(x_relu) + x_left = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_left) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_left + + x_comb_iter_3_left = self.comb_iter_3_left(x_left) + x_comb_iter_3_right = self.comb_iter_3_right(x_left) + x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right + + x_comb_iter_4_left = self.comb_iter_4_left(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_right + + x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class NormalCell(nn.Module): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(NormalCell, self).__init__() + self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) + self.comb_iter_0_right = BranchSeparables(out_chs_left, out_chs_left, 3, 1, pad_type) + + self.comb_iter_1_left = BranchSeparables(out_chs_left, out_chs_left, 5, 1, pad_type) + self.comb_iter_1_right = BranchSeparables(out_chs_left, out_chs_left, 3, 1, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_3_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + + def forward(self, x, x_prev): + x_left = self.conv_prev_1x1(x_prev) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_left) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_left + + x_comb_iter_3_left = self.comb_iter_3_left(x_left) + x_comb_iter_3_right = self.comb_iter_3_right(x_left) + x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right + + x_comb_iter_4_left = self.comb_iter_4_left(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_right + + x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class ReductionCell0(nn.Module): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(ReductionCell0, self).__init__() + self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) + self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) + + self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) + self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) + self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) + + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) + + def forward(self, x, x_prev): + x_left = self.conv_prev_1x1(x_prev) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_right) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2_right = self.comb_iter_2_right(x_left) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class ReductionCell1(nn.Module): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(ReductionCell1, self).__init__() + self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) + self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) + + self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) + self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) + + self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) + self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) + + self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) + self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) + + def forward(self, x, x_prev): + x_left = self.conv_prev_1x1(x_prev) + x_right = self.conv_1x1(x) + + x_comb_iter_0_left = self.comb_iter_0_left(x_right) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_right) + x_comb_iter_1_right = self.comb_iter_1_right(x_left) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2_right = self.comb_iter_2_right(x_left) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) + x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 + + x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) + x_comb_iter_4_right = self.comb_iter_4_right(x_right) + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class NASNetALarge(nn.Module): + """NASNetALarge (6 @ 4032) """ + + def __init__(self, num_classes=1000, in_chans=3, stem_size=96, channel_multiplier=2, + num_features=4032, output_stride=32, drop_rate=0., global_pool='avg', pad_type='same'): + super(NASNetALarge, self).__init__() + self.num_classes = num_classes + self.stem_size = stem_size + self.num_features = num_features + self.channel_multiplier = channel_multiplier + self.drop_rate = drop_rate + assert output_stride == 32 + + channels = self.num_features // 24 + # 24 is default value for the architecture + + self.conv0 = ConvBnAct( + in_channels=in_chans, out_channels=self.stem_size, kernel_size=3, padding=0, stride=2, + norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1), apply_act=False) + + self.cell_stem_0 = CellStem0( + self.stem_size, num_channels=channels // (channel_multiplier ** 2), pad_type=pad_type) + self.cell_stem_1 = CellStem1( + self.stem_size, num_channels=channels // channel_multiplier, pad_type=pad_type) + + self.cell_0 = FirstCell( + in_chs_left=channels, out_chs_left=channels // 2, + in_chs_right=2 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_1 = NormalCell( + in_chs_left=2 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_2 = NormalCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_3 = NormalCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_4 = NormalCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + self.cell_5 = NormalCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) + + self.reduction_cell_0 = ReductionCell0( + in_chs_left=6 * channels, out_chs_left=2 * channels, + in_chs_right=6 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_6 = FirstCell( + in_chs_left=6 * channels, out_chs_left=channels, + in_chs_right=8 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_7 = NormalCell( + in_chs_left=8 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_8 = NormalCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_9 = NormalCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_10 = NormalCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + self.cell_11 = NormalCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) + + self.reduction_cell_1 = ReductionCell1( + in_chs_left=12 * channels, out_chs_left=4 * channels, + in_chs_right=12 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_12 = FirstCell( + in_chs_left=12 * channels, out_chs_left=2 * channels, + in_chs_right=16 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_13 = NormalCell( + in_chs_left=16 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_14 = NormalCell( + in_chs_left=24 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_15 = NormalCell( + in_chs_left=24 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_16 = NormalCell( + in_chs_left=24 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.cell_17 = NormalCell( + in_chs_left=24 * channels, out_chs_left=4 * channels, + in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) + self.act = nn.ReLU(inplace=True) + self.feature_info = [ + dict(num_chs=96, reduction=2, module='conv0'), + dict(num_chs=168, reduction=4, module='cell_stem_1.conv_1x1.act'), + dict(num_chs=1008, reduction=8, module='reduction_cell_0.conv_1x1.act'), + dict(num_chs=2016, reduction=16, module='reduction_cell_1.conv_1x1.act'), + dict(num_chs=4032, reduction=32, module='act'), + ] + + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def get_classifier(self): + return self.last_linear + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x_conv0 = self.conv0(x) + + x_stem_0 = self.cell_stem_0(x_conv0) + x_stem_1 = self.cell_stem_1(x_conv0, x_stem_0) + + x_cell_0 = self.cell_0(x_stem_1, x_stem_0) + x_cell_1 = self.cell_1(x_cell_0, x_stem_1) + x_cell_2 = self.cell_2(x_cell_1, x_cell_0) + x_cell_3 = self.cell_3(x_cell_2, x_cell_1) + x_cell_4 = self.cell_4(x_cell_3, x_cell_2) + x_cell_5 = self.cell_5(x_cell_4, x_cell_3) + + x_reduction_cell_0 = self.reduction_cell_0(x_cell_5, x_cell_4) + x_cell_6 = self.cell_6(x_reduction_cell_0, x_cell_4) + x_cell_7 = self.cell_7(x_cell_6, x_reduction_cell_0) + x_cell_8 = self.cell_8(x_cell_7, x_cell_6) + x_cell_9 = self.cell_9(x_cell_8, x_cell_7) + x_cell_10 = self.cell_10(x_cell_9, x_cell_8) + x_cell_11 = self.cell_11(x_cell_10, x_cell_9) + + x_reduction_cell_1 = self.reduction_cell_1(x_cell_11, x_cell_10) + x_cell_12 = self.cell_12(x_reduction_cell_1, x_cell_10) + x_cell_13 = self.cell_13(x_cell_12, x_reduction_cell_1) + x_cell_14 = self.cell_14(x_cell_13, x_cell_12) + x_cell_15 = self.cell_15(x_cell_14, x_cell_13) + x_cell_16 = self.cell_16(x_cell_15, x_cell_14) + x_cell_17 = self.cell_17(x_cell_16, x_cell_15) + x = self.act(x_cell_17) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, self.drop_rate, training=self.training) + x = self.last_linear(x) + return x + + +def _create_nasnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + NASNetALarge, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(feature_cls='hook', no_rewrite=True), # not possible to re-write this model + **kwargs) + + +@register_model +def nasnetalarge(pretrained=False, **kwargs): + """NASNet-A large model architecture. + """ + model_kwargs = dict(pad_type='same', **kwargs) + return _create_nasnet('nasnetalarge', pretrained, **model_kwargs) diff --git a/timm/models/nest.py b/timm/models/nest.py new file mode 100644 index 0000000..22cf609 --- /dev/null +++ b/timm/models/nest.py @@ -0,0 +1,465 @@ +""" Nested Transformer (NesT) in PyTorch + +A PyTorch implement of Aggregating Nested Transformers as described in: + +'Aggregating Nested Transformers' + - https://arxiv.org/abs/2105.12723 + +The official Jax code is released and available at https://github.com/google-research/nested-transformer. The weights +have been converted with convert/convert_nest_flax.py + +Acknowledgments: +* The paper authors for sharing their research, code, and model weights +* Ross Wightman's existing code off which I based this + +Copyright 2021 Alexander Soare +""" + +import collections.abc +import logging +import math +from functools import partial + +import torch +import torch.nn.functional as F +from torch import nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .fx_features import register_notrace_function +from .helpers import build_model_with_cfg, named_apply +from .layers import PatchEmbed, Mlp, DropPath, create_classifier, trunc_normal_ +from .layers import _assert +from .layers import create_conv2d, create_pool2d, to_ntuple +from .registry import register_model + +_logger = logging.getLogger(__name__) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': [14, 14], + 'crop_pct': .875, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # (weights from official Google JAX impl) + 'nest_base': _cfg(), + 'nest_small': _cfg(), + 'nest_tiny': _cfg(), + 'jx_nest_base': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/jx_nest_base-8bc41011.pth'), + 'jx_nest_small': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/jx_nest_small-422eaded.pth'), + 'jx_nest_tiny': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/jx_nest_tiny-e3428fb9.pth'), +} + + +class Attention(nn.Module): + """ + This is much like `.vision_transformer.Attention` but uses *localised* self attention by accepting an input with + an extra "image block" dim + """ + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, 3*dim, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + """ + x is shape: B (batch_size), T (image blocks), N (seq length per image block), C (embed dim) + """ + B, T, N, C = x.shape + # result of next line is (qkv, B, num (H)eads, T, N, (C')hannels per head) + qkv = self.qkv(x).reshape(B, T, N, 3, self.num_heads, C // self.num_heads).permute(3, 0, 4, 1, 2, 5) + q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale # (B, H, T, N, N) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + # (B, H, T, N, C'), permute -> (B, T, N, C', H) + x = (attn @ v).permute(0, 2, 3, 4, 1).reshape(B, T, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x # (B, T, N, C) + + +class TransformerLayer(nn.Module): + """ + This is much like `.vision_transformer.Block` but: + - Called TransformerLayer here to allow for "block" as defined in the paper ("non-overlapping image blocks") + - Uses modified Attention layer that handles the "block" dimension + """ + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + y = self.norm1(x) + x = x + self.drop_path(self.attn(y)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class ConvPool(nn.Module): + def __init__(self, in_channels, out_channels, norm_layer, pad_type=''): + super().__init__() + self.conv = create_conv2d(in_channels, out_channels, kernel_size=3, padding=pad_type, bias=True) + self.norm = norm_layer(out_channels) + self.pool = create_pool2d('max', kernel_size=3, stride=2, padding=pad_type) + + def forward(self, x): + """ + x is expected to have shape (B, C, H, W) + """ + _assert(x.shape[-2] % 2 == 0, 'BlockAggregation requires even input spatial dims') + _assert(x.shape[-1] % 2 == 0, 'BlockAggregation requires even input spatial dims') + x = self.conv(x) + # Layer norm done over channel dim only + x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + x = self.pool(x) + return x # (B, C, H//2, W//2) + + +def blockify(x, block_size: int): + """image to blocks + Args: + x (Tensor): with shape (B, H, W, C) + block_size (int): edge length of a single square block in units of H, W + """ + B, H, W, C = x.shape + _assert(H % block_size == 0, '`block_size` must divide input height evenly') + _assert(W % block_size == 0, '`block_size` must divide input width evenly') + grid_height = H // block_size + grid_width = W // block_size + x = x.reshape(B, grid_height, block_size, grid_width, block_size, C) + x = x.transpose(2, 3).reshape(B, grid_height * grid_width, -1, C) + return x # (B, T, N, C) + + +@register_notrace_function # reason: int receives Proxy +def deblockify(x, block_size: int): + """blocks to image + Args: + x (Tensor): with shape (B, T, N, C) where T is number of blocks and N is sequence size per block + block_size (int): edge length of a single square block in units of desired H, W + """ + B, T, _, C = x.shape + grid_size = int(math.sqrt(T)) + height = width = grid_size * block_size + x = x.reshape(B, grid_size, grid_size, block_size, block_size, C) + x = x.transpose(2, 3).reshape(B, height, width, C) + return x # (B, H, W, C) + + +class NestLevel(nn.Module): + """ Single hierarchical level of a Nested Transformer + """ + def __init__( + self, num_blocks, block_size, seq_length, num_heads, depth, embed_dim, prev_embed_dim=None, + mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rates=[], + norm_layer=None, act_layer=None, pad_type=''): + super().__init__() + self.block_size = block_size + self.pos_embed = nn.Parameter(torch.zeros(1, num_blocks, seq_length, embed_dim)) + + if prev_embed_dim is not None: + self.pool = ConvPool(prev_embed_dim, embed_dim, norm_layer=norm_layer, pad_type=pad_type) + else: + self.pool = nn.Identity() + + # Transformer encoder + if len(drop_path_rates): + assert len(drop_path_rates) == depth, 'Must provide as many drop path rates as there are transformer layers' + self.transformer_encoder = nn.Sequential(*[ + TransformerLayer( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=drop_path_rates[i], + norm_layer=norm_layer, act_layer=act_layer) + for i in range(depth)]) + + def forward(self, x): + """ + expects x as (B, C, H, W) + """ + x = self.pool(x) + x = x.permute(0, 2, 3, 1) # (B, H', W', C), switch to channels last for transformer + x = blockify(x, self.block_size) # (B, T, N, C') + x = x + self.pos_embed + x = self.transformer_encoder(x) # (B, T, N, C') + x = deblockify(x, self.block_size) # (B, H', W', C') + # Channel-first for block aggregation, and generally to replicate convnet feature map at each stage + return x.permute(0, 3, 1, 2) # (B, C, H', W') + + +class Nest(nn.Module): + """ Nested Transformer (NesT) + + A PyTorch impl of : `Aggregating Nested Transformers` + - https://arxiv.org/abs/2105.12723 + """ + + def __init__(self, img_size=224, in_chans=3, patch_size=4, num_levels=3, embed_dims=(128, 256, 512), + num_heads=(4, 8, 16), depths=(2, 2, 20), num_classes=1000, mlp_ratio=4., qkv_bias=True, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0.5, norm_layer=None, act_layer=None, + pad_type='', weight_init='', global_pool='avg'): + """ + Args: + img_size (int, tuple): input image size + in_chans (int): number of input channels + patch_size (int): patch size + num_levels (int): number of block hierarchies (T_d in the paper) + embed_dims (int, tuple): embedding dimensions of each level + num_heads (int, tuple): number of attention heads for each level + depths (int, tuple): number of transformer layers for each level + num_classes (int): number of classes for classification head + mlp_ratio (int): ratio of mlp hidden dim to embedding dim for MLP of transformer layers + qkv_bias (bool): enable bias for qkv if True + drop_rate (float): dropout rate for MLP of transformer layers, MSA final projection layer, and classifier + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + norm_layer: (nn.Module): normalization layer for transformer layers + act_layer: (nn.Module): activation layer in MLP of transformer layers + pad_type: str: Type of padding to use '' for PyTorch symmetric, 'same' for TF SAME + weight_init: (str): weight init scheme + global_pool: (str): type of pooling operation to apply to final feature map + + Notes: + - Default values follow NesT-B from the original Jax code. + - `embed_dims`, `num_heads`, `depths` should be ints or tuples with length `num_levels`. + - For those following the paper, Table A1 may have errors! + - https://github.com/google-research/nested-transformer/issues/2 + """ + super().__init__() + + for param_name in ['embed_dims', 'num_heads', 'depths']: + param_value = locals()[param_name] + if isinstance(param_value, collections.abc.Sequence): + assert len(param_value) == num_levels, f'Require `len({param_name}) == num_levels`' + + embed_dims = to_ntuple(num_levels)(embed_dims) + num_heads = to_ntuple(num_levels)(num_heads) + depths = to_ntuple(num_levels)(depths) + self.num_classes = num_classes + self.num_features = embed_dims[-1] + self.feature_info = [] + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + act_layer = act_layer or nn.GELU + self.drop_rate = drop_rate + self.num_levels = num_levels + if isinstance(img_size, collections.abc.Sequence): + assert img_size[0] == img_size[1], 'Model only handles square inputs' + img_size = img_size[0] + assert img_size % patch_size == 0, '`patch_size` must divide `img_size` evenly' + self.patch_size = patch_size + + # Number of blocks at each level + self.num_blocks = (4 ** torch.arange(num_levels)).flip(0).tolist() + assert (img_size // patch_size) % math.sqrt(self.num_blocks[0]) == 0, \ + 'First level blocks don\'t fit evenly. Check `img_size`, `patch_size`, and `num_levels`' + + # Block edge size in units of patches + # Hint: (img_size // patch_size) gives number of patches along edge of image. sqrt(self.num_blocks[0]) is the + # number of blocks along edge of image + self.block_size = int((img_size // patch_size) // math.sqrt(self.num_blocks[0])) + + # Patch embedding + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dims[0], flatten=False) + self.num_patches = self.patch_embed.num_patches + self.seq_length = self.num_patches // self.num_blocks[0] + + # Build up each hierarchical level + levels = [] + dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] + prev_dim = None + curr_stride = 4 + for i in range(len(self.num_blocks)): + dim = embed_dims[i] + levels.append(NestLevel( + self.num_blocks[i], self.block_size, self.seq_length, num_heads[i], depths[i], dim, prev_dim, + mlp_ratio, qkv_bias, drop_rate, attn_drop_rate, dp_rates[i], norm_layer, act_layer, pad_type=pad_type)) + self.feature_info += [dict(num_chs=dim, reduction=curr_stride, module=f'levels.{i}')] + prev_dim = dim + curr_stride *= 2 + self.levels = nn.Sequential(*levels) + + # Final normalization layer + self.norm = norm_layer(embed_dims[-1]) + + # Classifier + self.global_pool, self.head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + self.init_weights(weight_init) + + def init_weights(self, mode=''): + assert mode in ('nlhb', '') + head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. + for level in self.levels: + trunc_normal_(level.pos_embed, std=.02, a=-2, b=2) + named_apply(partial(_init_nest_weights, head_bias=head_bias), self) + + @torch.jit.ignore + def no_weight_decay(self): + return {f'level.{i}.pos_embed' for i in range(len(self.levels))} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.head = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + """ x shape (B, C, H, W) + """ + x = self.patch_embed(x) + x = self.levels(x) + # Layer norm done over channel dim only (to NHWC and back) + x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + return x + + def forward(self, x): + """ x shape (B, C, H, W) + """ + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return self.head(x) + + +def _init_nest_weights(module: nn.Module, name: str = '', head_bias: float = 0.): + """ NesT weight initialization + Can replicate Jax implementation. Otherwise follows vision_transformer.py + """ + if isinstance(module, nn.Linear): + if name.startswith('head'): + trunc_normal_(module.weight, std=.02, a=-2, b=2) + nn.init.constant_(module.bias, head_bias) + else: + trunc_normal_(module.weight, std=.02, a=-2, b=2) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv2d): + trunc_normal_(module.weight, std=.02, a=-2, b=2) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm2d)): + nn.init.zeros_(module.bias) + nn.init.ones_(module.weight) + + +def resize_pos_embed(posemb, posemb_new): + """ + Rescale the grid of position embeddings when loading from state_dict + Expected shape of position embeddings is (1, T, N, C), and considers only square images + """ + _logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape) + seq_length_old = posemb.shape[2] + num_blocks_new, seq_length_new = posemb_new.shape[1:3] + size_new = int(math.sqrt(num_blocks_new*seq_length_new)) + # First change to (1, C, H, W) + posemb = deblockify(posemb, int(math.sqrt(seq_length_old))).permute(0, 3, 1, 2) + posemb = F.interpolate(posemb, size=[size_new, size_new], mode='bicubic', align_corners=False) + # Now change to new (1, T, N, C) + posemb = blockify(posemb.permute(0, 2, 3, 1), int(math.sqrt(seq_length_new))) + return posemb + + +def checkpoint_filter_fn(state_dict, model): + """ resize positional embeddings of pretrained weights """ + pos_embed_keys = [k for k in state_dict.keys() if k.startswith('pos_embed_')] + for k in pos_embed_keys: + if state_dict[k].shape != getattr(model, k).shape: + state_dict[k] = resize_pos_embed(state_dict[k], getattr(model, k)) + return state_dict + + +def _create_nest(variant, pretrained=False, default_cfg=None, **kwargs): + default_cfg = default_cfg or default_cfgs[variant] + model = build_model_with_cfg( + Nest, variant, pretrained, + default_cfg=default_cfg, + feature_cfg=dict(out_indices=(0, 1, 2), flatten_sequential=True), + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + + return model + + +@register_model +def nest_base(pretrained=False, **kwargs): + """ Nest-B @ 224x224 + """ + model_kwargs = dict( + embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs) + model = _create_nest('nest_base', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def nest_small(pretrained=False, **kwargs): + """ Nest-S @ 224x224 + """ + model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs) + model = _create_nest('nest_small', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def nest_tiny(pretrained=False, **kwargs): + """ Nest-T @ 224x224 + """ + model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs) + model = _create_nest('nest_tiny', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def jx_nest_base(pretrained=False, **kwargs): + """ Nest-B @ 224x224, Pretrained weights converted from official Jax impl. + """ + kwargs['pad_type'] = 'same' + model_kwargs = dict(embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs) + model = _create_nest('jx_nest_base', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def jx_nest_small(pretrained=False, **kwargs): + """ Nest-S @ 224x224, Pretrained weights converted from official Jax impl. + """ + kwargs['pad_type'] = 'same' + model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs) + model = _create_nest('jx_nest_small', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def jx_nest_tiny(pretrained=False, **kwargs): + """ Nest-T @ 224x224, Pretrained weights converted from official Jax impl. + """ + kwargs['pad_type'] = 'same' + model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs) + model = _create_nest('jx_nest_tiny', pretrained=pretrained, **model_kwargs) + return model diff --git a/timm/models/nfnet.py b/timm/models/nfnet.py new file mode 100644 index 0000000..973cbd6 --- /dev/null +++ b/timm/models/nfnet.py @@ -0,0 +1,968 @@ +""" Normalization Free Nets. NFNet, NF-RegNet, NF-ResNet (pre-activation) Models + +Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + +Paper: `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + +Official Deepmind JAX code: https://github.com/deepmind/deepmind-research/tree/master/nfnets + +Status: +* These models are a work in progress, experiments ongoing. +* Pretrained weights for two models so far, more to come. +* Model details updated to closer match official JAX code now that it's released +* NF-ResNet, NF-RegNet-B, and NFNet-F models supported + +Hacked together by / copyright Ross Wightman, 2021. +""" +import math +from dataclasses import dataclass, field +from collections import OrderedDict +from typing import Tuple, Optional +from functools import partial + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .fx_features import register_notrace_module +from .helpers import build_model_with_cfg +from .registry import register_model +from .layers import ClassifierHead, DropPath, AvgPool2dSame, ScaledStdConv2d, ScaledStdConv2dSame,\ + get_act_layer, get_act_fn, get_attn, make_divisible + + +def _dcfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.9, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv1', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = dict( + dm_nfnet_f0=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f0-604f9c3a.pth', + pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256), crop_pct=.9), + dm_nfnet_f1=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f1-fc540f82.pth', + pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320), crop_pct=0.91), + dm_nfnet_f2=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f2-89875923.pth', + pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352), crop_pct=0.92), + dm_nfnet_f3=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f3-d74ab3aa.pth', + pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416), crop_pct=0.94), + dm_nfnet_f4=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f4-0ac5b10b.pth', + pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512), crop_pct=0.951), + dm_nfnet_f5=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f5-ecb20ab1.pth', + pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544), crop_pct=0.954), + dm_nfnet_f6=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f6-e0f12116.pth', + pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576), crop_pct=0.956), + + nfnet_f0=_dcfg( + url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256)), + nfnet_f1=_dcfg( + url='', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320)), + nfnet_f2=_dcfg( + url='', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352)), + nfnet_f3=_dcfg( + url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416)), + nfnet_f4=_dcfg( + url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512)), + nfnet_f5=_dcfg( + url='', pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544)), + nfnet_f6=_dcfg( + url='', pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576)), + nfnet_f7=_dcfg( + url='', pool_size=(15, 15), input_size=(3, 480, 480), test_input_size=(3, 608, 608)), + + nfnet_f0s=_dcfg( + url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256)), + nfnet_f1s=_dcfg( + url='', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320)), + nfnet_f2s=_dcfg( + url='', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352)), + nfnet_f3s=_dcfg( + url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416)), + nfnet_f4s=_dcfg( + url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512)), + nfnet_f5s=_dcfg( + url='', pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544)), + nfnet_f6s=_dcfg( + url='', pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576)), + nfnet_f7s=_dcfg( + url='', pool_size=(15, 15), input_size=(3, 480, 480), test_input_size=(3, 608, 608)), + + nfnet_l0=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nfnet_l0_ra2-45c6688d.pth', + pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 288, 288), crop_pct=1.0), + eca_nfnet_l0=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l0_ra2-e3e9ac50.pth', + hf_hub='timm/eca_nfnet_l0', + pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 288, 288), crop_pct=1.0), + eca_nfnet_l1=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l1_ra2-7dce93cd.pth', + pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 320, 320), crop_pct=1.0), + eca_nfnet_l2=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l2_ra3-da781a61.pth', + pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 384, 384), crop_pct=1.0), + eca_nfnet_l3=_dcfg( + url='', + pool_size=(11, 11), input_size=(3, 352, 352), test_input_size=(3, 448, 448), crop_pct=1.0), + + nf_regnet_b0=_dcfg( + url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256), first_conv='stem.conv'), + nf_regnet_b1=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nf_regnet_b1_256_ra2-ad85cfef.pth', + pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 288, 288), first_conv='stem.conv'), # NOT to paper spec + nf_regnet_b2=_dcfg( + url='', pool_size=(8, 8), input_size=(3, 240, 240), test_input_size=(3, 272, 272), first_conv='stem.conv'), + nf_regnet_b3=_dcfg( + url='', pool_size=(9, 9), input_size=(3, 288, 288), test_input_size=(3, 320, 320), first_conv='stem.conv'), + nf_regnet_b4=_dcfg( + url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 384, 384), first_conv='stem.conv'), + nf_regnet_b5=_dcfg( + url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 456, 456), first_conv='stem.conv'), + + nf_resnet26=_dcfg(url='', first_conv='stem.conv'), + nf_resnet50=_dcfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nf_resnet50_ra2-9f236009.pth', + pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 288, 288), crop_pct=0.94, first_conv='stem.conv'), + nf_resnet101=_dcfg(url='', first_conv='stem.conv'), + + nf_seresnet26=_dcfg(url='', first_conv='stem.conv'), + nf_seresnet50=_dcfg(url='', first_conv='stem.conv'), + nf_seresnet101=_dcfg(url='', first_conv='stem.conv'), + + nf_ecaresnet26=_dcfg(url='', first_conv='stem.conv'), + nf_ecaresnet50=_dcfg(url='', first_conv='stem.conv'), + nf_ecaresnet101=_dcfg(url='', first_conv='stem.conv'), +) + + +@dataclass +class NfCfg: + depths: Tuple[int, int, int, int] + channels: Tuple[int, int, int, int] + alpha: float = 0.2 + stem_type: str = '3x3' + stem_chs: Optional[int] = None + group_size: Optional[int] = None + attn_layer: Optional[str] = None + attn_kwargs: dict = None + attn_gain: float = 2.0 # NF correction gain to apply if attn layer is used + width_factor: float = 1.0 + bottle_ratio: float = 0.5 + num_features: int = 0 # num out_channels for final conv, no final_conv if 0 + ch_div: int = 8 # round channels % 8 == 0 to keep tensor-core use optimal + reg: bool = False # enables EfficientNet-like options used in RegNet variants, expand from in_chs, se in middle + extra_conv: bool = False # extra 3x3 bottleneck convolution for NFNet models + gamma_in_act: bool = False + same_padding: bool = False + std_conv_eps: float = 1e-5 + skipinit: bool = False # disabled by default, non-trivial performance impact + zero_init_fc: bool = False + act_layer: str = 'silu' + + +def _nfres_cfg( + depths, channels=(256, 512, 1024, 2048), group_size=None, act_layer='relu', attn_layer=None, attn_kwargs=None): + attn_kwargs = attn_kwargs or {} + cfg = NfCfg( + depths=depths, channels=channels, stem_type='7x7_pool', stem_chs=64, bottle_ratio=0.25, + group_size=group_size, act_layer=act_layer, attn_layer=attn_layer, attn_kwargs=attn_kwargs) + return cfg + + +def _nfreg_cfg(depths, channels=(48, 104, 208, 440)): + num_features = 1280 * channels[-1] // 440 + attn_kwargs = dict(rd_ratio=0.5) + cfg = NfCfg( + depths=depths, channels=channels, stem_type='3x3', group_size=8, width_factor=0.75, bottle_ratio=2.25, + num_features=num_features, reg=True, attn_layer='se', attn_kwargs=attn_kwargs) + return cfg + + +def _nfnet_cfg( + depths, channels=(256, 512, 1536, 1536), group_size=128, bottle_ratio=0.5, feat_mult=2., + act_layer='gelu', attn_layer='se', attn_kwargs=None): + num_features = int(channels[-1] * feat_mult) + attn_kwargs = attn_kwargs if attn_kwargs is not None else dict(rd_ratio=0.5) + cfg = NfCfg( + depths=depths, channels=channels, stem_type='deep_quad', stem_chs=128, group_size=group_size, + bottle_ratio=bottle_ratio, extra_conv=True, num_features=num_features, act_layer=act_layer, + attn_layer=attn_layer, attn_kwargs=attn_kwargs) + return cfg + + +def _dm_nfnet_cfg(depths, channels=(256, 512, 1536, 1536), act_layer='gelu', skipinit=True): + cfg = NfCfg( + depths=depths, channels=channels, stem_type='deep_quad', stem_chs=128, group_size=128, + bottle_ratio=0.5, extra_conv=True, gamma_in_act=True, same_padding=True, skipinit=skipinit, + num_features=int(channels[-1] * 2.0), act_layer=act_layer, attn_layer='se', attn_kwargs=dict(rd_ratio=0.5)) + return cfg + + + +model_cfgs = dict( + # NFNet-F models w/ GELU compatible with DeepMind weights + dm_nfnet_f0=_dm_nfnet_cfg(depths=(1, 2, 6, 3)), + dm_nfnet_f1=_dm_nfnet_cfg(depths=(2, 4, 12, 6)), + dm_nfnet_f2=_dm_nfnet_cfg(depths=(3, 6, 18, 9)), + dm_nfnet_f3=_dm_nfnet_cfg(depths=(4, 8, 24, 12)), + dm_nfnet_f4=_dm_nfnet_cfg(depths=(5, 10, 30, 15)), + dm_nfnet_f5=_dm_nfnet_cfg(depths=(6, 12, 36, 18)), + dm_nfnet_f6=_dm_nfnet_cfg(depths=(7, 14, 42, 21)), + + # NFNet-F models w/ GELU (I will likely deprecate/remove these models and just keep dm_ ver for GELU) + nfnet_f0=_nfnet_cfg(depths=(1, 2, 6, 3)), + nfnet_f1=_nfnet_cfg(depths=(2, 4, 12, 6)), + nfnet_f2=_nfnet_cfg(depths=(3, 6, 18, 9)), + nfnet_f3=_nfnet_cfg(depths=(4, 8, 24, 12)), + nfnet_f4=_nfnet_cfg(depths=(5, 10, 30, 15)), + nfnet_f5=_nfnet_cfg(depths=(6, 12, 36, 18)), + nfnet_f6=_nfnet_cfg(depths=(7, 14, 42, 21)), + nfnet_f7=_nfnet_cfg(depths=(8, 16, 48, 24)), + + # NFNet-F models w/ SiLU (much faster in PyTorch) + nfnet_f0s=_nfnet_cfg(depths=(1, 2, 6, 3), act_layer='silu'), + nfnet_f1s=_nfnet_cfg(depths=(2, 4, 12, 6), act_layer='silu'), + nfnet_f2s=_nfnet_cfg(depths=(3, 6, 18, 9), act_layer='silu'), + nfnet_f3s=_nfnet_cfg(depths=(4, 8, 24, 12), act_layer='silu'), + nfnet_f4s=_nfnet_cfg(depths=(5, 10, 30, 15), act_layer='silu'), + nfnet_f5s=_nfnet_cfg(depths=(6, 12, 36, 18), act_layer='silu'), + nfnet_f6s=_nfnet_cfg(depths=(7, 14, 42, 21), act_layer='silu'), + nfnet_f7s=_nfnet_cfg(depths=(8, 16, 48, 24), act_layer='silu'), + + # Experimental 'light' versions of NFNet-F that are little leaner + nfnet_l0=_nfnet_cfg( + depths=(1, 2, 6, 3), feat_mult=1.5, group_size=64, bottle_ratio=0.25, + attn_kwargs=dict(rd_ratio=0.25, rd_divisor=8), act_layer='silu'), + eca_nfnet_l0=_nfnet_cfg( + depths=(1, 2, 6, 3), feat_mult=1.5, group_size=64, bottle_ratio=0.25, + attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), + eca_nfnet_l1=_nfnet_cfg( + depths=(2, 4, 12, 6), feat_mult=2, group_size=64, bottle_ratio=0.25, + attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), + eca_nfnet_l2=_nfnet_cfg( + depths=(3, 6, 18, 9), feat_mult=2, group_size=64, bottle_ratio=0.25, + attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), + eca_nfnet_l3=_nfnet_cfg( + depths=(4, 8, 24, 12), feat_mult=2, group_size=64, bottle_ratio=0.25, + attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), + + # EffNet influenced RegNet defs. + # NOTE: These aren't quite the official ver, ch_div=1 must be set for exact ch counts. I round to ch_div=8. + nf_regnet_b0=_nfreg_cfg(depths=(1, 3, 6, 6)), + nf_regnet_b1=_nfreg_cfg(depths=(2, 4, 7, 7)), + nf_regnet_b2=_nfreg_cfg(depths=(2, 4, 8, 8), channels=(56, 112, 232, 488)), + nf_regnet_b3=_nfreg_cfg(depths=(2, 5, 9, 9), channels=(56, 128, 248, 528)), + nf_regnet_b4=_nfreg_cfg(depths=(2, 6, 11, 11), channels=(64, 144, 288, 616)), + nf_regnet_b5=_nfreg_cfg(depths=(3, 7, 14, 14), channels=(80, 168, 336, 704)), + # FIXME add B6-B8 + + # ResNet (preact, D style deep stem/avg down) defs + nf_resnet26=_nfres_cfg(depths=(2, 2, 2, 2)), + nf_resnet50=_nfres_cfg(depths=(3, 4, 6, 3)), + nf_resnet101=_nfres_cfg(depths=(3, 4, 23, 3)), + + nf_seresnet26=_nfres_cfg(depths=(2, 2, 2, 2), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), + nf_seresnet50=_nfres_cfg(depths=(3, 4, 6, 3), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), + nf_seresnet101=_nfres_cfg(depths=(3, 4, 23, 3), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), + + nf_ecaresnet26=_nfres_cfg(depths=(2, 2, 2, 2), attn_layer='eca', attn_kwargs=dict()), + nf_ecaresnet50=_nfres_cfg(depths=(3, 4, 6, 3), attn_layer='eca', attn_kwargs=dict()), + nf_ecaresnet101=_nfres_cfg(depths=(3, 4, 23, 3), attn_layer='eca', attn_kwargs=dict()), + +) + + +class GammaAct(nn.Module): + def __init__(self, act_type='relu', gamma: float = 1.0, inplace=False): + super().__init__() + self.act_fn = get_act_fn(act_type) + self.gamma = gamma + self.inplace = inplace + + def forward(self, x): + return self.act_fn(x, inplace=self.inplace).mul_(self.gamma) + + +def act_with_gamma(act_type, gamma: float = 1.): + def _create(inplace=False): + return GammaAct(act_type, gamma=gamma, inplace=inplace) + return _create + + +class DownsampleAvg(nn.Module): + def __init__( + self, in_chs, out_chs, stride=1, dilation=1, first_dilation=None, conv_layer=ScaledStdConv2d): + """ AvgPool Downsampling as in 'D' ResNet variants. Support for dilation.""" + super(DownsampleAvg, self).__init__() + avg_stride = stride if dilation == 1 else 1 + if stride > 1 or dilation > 1: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + else: + self.pool = nn.Identity() + self.conv = conv_layer(in_chs, out_chs, 1, stride=1) + + def forward(self, x): + return self.conv(self.pool(x)) + + +@register_notrace_module # reason: mul_ causes FX to drop a relevant node. https://github.com/pytorch/pytorch/issues/68301 +class NormFreeBlock(nn.Module): + """Normalization-Free pre-activation block. + """ + + def __init__( + self, in_chs, out_chs=None, stride=1, dilation=1, first_dilation=None, + alpha=1.0, beta=1.0, bottle_ratio=0.25, group_size=None, ch_div=1, reg=True, extra_conv=False, + skipinit=False, attn_layer=None, attn_gain=2.0, act_layer=None, conv_layer=None, drop_path_rate=0.): + super().__init__() + first_dilation = first_dilation or dilation + out_chs = out_chs or in_chs + # RegNet variants scale bottleneck from in_chs, otherwise scale from out_chs like ResNet + mid_chs = make_divisible(in_chs * bottle_ratio if reg else out_chs * bottle_ratio, ch_div) + groups = 1 if not group_size else mid_chs // group_size + if group_size and group_size % ch_div == 0: + mid_chs = group_size * groups # correct mid_chs if group_size divisible by ch_div, otherwise error + self.alpha = alpha + self.beta = beta + self.attn_gain = attn_gain + + if in_chs != out_chs or stride != 1 or dilation != first_dilation: + self.downsample = DownsampleAvg( + in_chs, out_chs, stride=stride, dilation=dilation, first_dilation=first_dilation, conv_layer=conv_layer) + else: + self.downsample = None + + self.act1 = act_layer() + self.conv1 = conv_layer(in_chs, mid_chs, 1) + self.act2 = act_layer(inplace=True) + self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) + if extra_conv: + self.act2b = act_layer(inplace=True) + self.conv2b = conv_layer(mid_chs, mid_chs, 3, stride=1, dilation=dilation, groups=groups) + else: + self.act2b = None + self.conv2b = None + if reg and attn_layer is not None: + self.attn = attn_layer(mid_chs) # RegNet blocks apply attn btw conv2 & 3 + else: + self.attn = None + self.act3 = act_layer() + self.conv3 = conv_layer(mid_chs, out_chs, 1, gain_init=1. if skipinit else 0.) + if not reg and attn_layer is not None: + self.attn_last = attn_layer(out_chs) # ResNet blocks apply attn after conv3 + else: + self.attn_last = None + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() + self.skipinit_gain = nn.Parameter(torch.tensor(0.)) if skipinit else None + + def forward(self, x): + out = self.act1(x) * self.beta + + # shortcut branch + shortcut = x + if self.downsample is not None: + shortcut = self.downsample(out) + + # residual branch + out = self.conv1(out) + out = self.conv2(self.act2(out)) + if self.conv2b is not None: + out = self.conv2b(self.act2b(out)) + if self.attn is not None: + out = self.attn_gain * self.attn(out) + out = self.conv3(self.act3(out)) + if self.attn_last is not None: + out = self.attn_gain * self.attn_last(out) + out = self.drop_path(out) + + if self.skipinit_gain is not None: + out.mul_(self.skipinit_gain) # this slows things down more than expected, TBD + out = out * self.alpha + shortcut + return out + + +def create_stem(in_chs, out_chs, stem_type='', conv_layer=None, act_layer=None, preact_feature=True): + stem_stride = 2 + stem_feature = dict(num_chs=out_chs, reduction=2, module='stem.conv') + stem = OrderedDict() + assert stem_type in ('', 'deep', 'deep_tiered', 'deep_quad', '3x3', '7x7', 'deep_pool', '3x3_pool', '7x7_pool') + if 'deep' in stem_type: + if 'quad' in stem_type: + # 4 deep conv stack as in NFNet-F models + assert not 'pool' in stem_type + stem_chs = (out_chs // 8, out_chs // 4, out_chs // 2, out_chs) + strides = (2, 1, 1, 2) + stem_stride = 4 + stem_feature = dict(num_chs=out_chs // 2, reduction=2, module='stem.conv3') + else: + if 'tiered' in stem_type: + stem_chs = (3 * out_chs // 8, out_chs // 2, out_chs) # 'T' resnets in resnet.py + else: + stem_chs = (out_chs // 2, out_chs // 2, out_chs) # 'D' ResNets + strides = (2, 1, 1) + stem_feature = dict(num_chs=out_chs // 2, reduction=2, module='stem.conv2') + last_idx = len(stem_chs) - 1 + for i, (c, s) in enumerate(zip(stem_chs, strides)): + stem[f'conv{i + 1}'] = conv_layer(in_chs, c, kernel_size=3, stride=s) + if i != last_idx: + stem[f'act{i + 2}'] = act_layer(inplace=True) + in_chs = c + elif '3x3' in stem_type: + # 3x3 stem conv as in RegNet + stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=3, stride=2) + else: + # 7x7 stem conv as in ResNet + stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=7, stride=2) + + if 'pool' in stem_type: + stem['pool'] = nn.MaxPool2d(3, stride=2, padding=1) + stem_stride = 4 + + return nn.Sequential(stem), stem_stride, stem_feature + + +# from https://github.com/deepmind/deepmind-research/tree/master/nfnets +_nonlin_gamma = dict( + identity=1.0, + celu=1.270926833152771, + elu=1.2716004848480225, + gelu=1.7015043497085571, + leaky_relu=1.70590341091156, + log_sigmoid=1.9193484783172607, + log_softmax=1.0002083778381348, + relu=1.7139588594436646, + relu6=1.7131484746932983, + selu=1.0008515119552612, + sigmoid=4.803835391998291, + silu=1.7881293296813965, + softsign=2.338853120803833, + softplus=1.9203323125839233, + tanh=1.5939117670059204, +) + + +class NormFreeNet(nn.Module): + """ Normalization-Free Network + + As described in : + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + and + `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 + + This model aims to cover both the NFRegNet-Bx models as detailed in the paper's code snippets and + the (preact) ResNet models described earlier in the paper. + + There are a few differences: + * channels are rounded to be divisible by 8 by default (keep tensor core kernels happy), + this changes channel dim and param counts slightly from the paper models + * activation correcting gamma constants are moved into the ScaledStdConv as it has less performance + impact in PyTorch when done with the weight scaling there. This likely wasn't a concern in the JAX impl. + * a config option `gamma_in_act` can be enabled to not apply gamma in StdConv as described above, but + apply it in each activation. This is slightly slower, numerically different, but matches official impl. + * skipinit is disabled by default, it seems to have a rather drastic impact on GPU memory use and throughput + for what it is/does. Approx 8-10% throughput loss. + """ + def __init__(self, cfg: NfCfg, num_classes=1000, in_chans=3, global_pool='avg', output_stride=32, + drop_rate=0., drop_path_rate=0.): + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + assert cfg.act_layer in _nonlin_gamma, f"Please add non-linearity constants for activation ({cfg.act_layer})." + conv_layer = ScaledStdConv2dSame if cfg.same_padding else ScaledStdConv2d + if cfg.gamma_in_act: + act_layer = act_with_gamma(cfg.act_layer, gamma=_nonlin_gamma[cfg.act_layer]) + conv_layer = partial(conv_layer, eps=cfg.std_conv_eps) + else: + act_layer = get_act_layer(cfg.act_layer) + conv_layer = partial(conv_layer, gamma=_nonlin_gamma[cfg.act_layer], eps=cfg.std_conv_eps) + attn_layer = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None + + stem_chs = make_divisible((cfg.stem_chs or cfg.channels[0]) * cfg.width_factor, cfg.ch_div) + self.stem, stem_stride, stem_feat = create_stem( + in_chans, stem_chs, cfg.stem_type, conv_layer=conv_layer, act_layer=act_layer) + + self.feature_info = [stem_feat] + drop_path_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)] + prev_chs = stem_chs + net_stride = stem_stride + dilation = 1 + expected_var = 1.0 + stages = [] + for stage_idx, stage_depth in enumerate(cfg.depths): + stride = 1 if stage_idx == 0 and stem_stride > 2 else 2 + if net_stride >= output_stride and stride > 1: + dilation *= stride + stride = 1 + net_stride *= stride + first_dilation = 1 if dilation in (1, 2) else 2 + + blocks = [] + for block_idx in range(cfg.depths[stage_idx]): + first_block = block_idx == 0 and stage_idx == 0 + out_chs = make_divisible(cfg.channels[stage_idx] * cfg.width_factor, cfg.ch_div) + blocks += [NormFreeBlock( + in_chs=prev_chs, out_chs=out_chs, + alpha=cfg.alpha, + beta=1. / expected_var ** 0.5, + stride=stride if block_idx == 0 else 1, + dilation=dilation, + first_dilation=first_dilation, + group_size=cfg.group_size, + bottle_ratio=1. if cfg.reg and first_block else cfg.bottle_ratio, + ch_div=cfg.ch_div, + reg=cfg.reg, + extra_conv=cfg.extra_conv, + skipinit=cfg.skipinit, + attn_layer=attn_layer, + attn_gain=cfg.attn_gain, + act_layer=act_layer, + conv_layer=conv_layer, + drop_path_rate=drop_path_rates[stage_idx][block_idx], + )] + if block_idx == 0: + expected_var = 1. # expected var is reset after first block of each stage + expected_var += cfg.alpha ** 2 # Even if reset occurs, increment expected variance + first_dilation = dilation + prev_chs = out_chs + self.feature_info += [dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}')] + stages += [nn.Sequential(*blocks)] + self.stages = nn.Sequential(*stages) + + if cfg.num_features: + # The paper NFRegNet models have an EfficientNet-like final head convolution. + self.num_features = make_divisible(cfg.width_factor * cfg.num_features, cfg.ch_div) + self.final_conv = conv_layer(prev_chs, self.num_features, 1) + self.feature_info[-1] = dict(num_chs=self.num_features, reduction=net_stride, module=f'final_conv') + else: + self.num_features = prev_chs + self.final_conv = nn.Identity() + self.final_act = act_layer(inplace=cfg.num_features > 0) + + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + for n, m in self.named_modules(): + if 'fc' in n and isinstance(m, nn.Linear): + if cfg.zero_init_fc: + nn.init.zeros_(m.weight) + else: + nn.init.normal_(m.weight, 0., .01) + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='linear') + if m.bias is not None: + nn.init.zeros_(m.bias) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + x = self.final_conv(x) + x = self.final_act(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_normfreenet(variant, pretrained=False, **kwargs): + model_cfg = model_cfgs[variant] + feature_cfg = dict(flatten_sequential=True) + return build_model_with_cfg( + NormFreeNet, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=model_cfg, + feature_cfg=feature_cfg, + **kwargs) + + +@register_model +def dm_nfnet_f0(pretrained=False, **kwargs): + """ NFNet-F0 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f0', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f1(pretrained=False, **kwargs): + """ NFNet-F1 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f1', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f2(pretrained=False, **kwargs): + """ NFNet-F2 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f2', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f3(pretrained=False, **kwargs): + """ NFNet-F3 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f3', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f4(pretrained=False, **kwargs): + """ NFNet-F4 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f4', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f5(pretrained=False, **kwargs): + """ NFNet-F5 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f5', pretrained=pretrained, **kwargs) + + +@register_model +def dm_nfnet_f6(pretrained=False, **kwargs): + """ NFNet-F6 (DeepMind weight compatible) + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('dm_nfnet_f6', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f0(pretrained=False, **kwargs): + """ NFNet-F0 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f0', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f1(pretrained=False, **kwargs): + """ NFNet-F1 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f1', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f2(pretrained=False, **kwargs): + """ NFNet-F2 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f2', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f3(pretrained=False, **kwargs): + """ NFNet-F3 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f3', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f4(pretrained=False, **kwargs): + """ NFNet-F4 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f4', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f5(pretrained=False, **kwargs): + """ NFNet-F5 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f5', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f6(pretrained=False, **kwargs): + """ NFNet-F6 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f6', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f7(pretrained=False, **kwargs): + """ NFNet-F7 + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f7', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f0s(pretrained=False, **kwargs): + """ NFNet-F0 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f0s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f1s(pretrained=False, **kwargs): + """ NFNet-F1 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f1s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f2s(pretrained=False, **kwargs): + """ NFNet-F2 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f2s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f3s(pretrained=False, **kwargs): + """ NFNet-F3 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f3s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f4s(pretrained=False, **kwargs): + """ NFNet-F4 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f4s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f5s(pretrained=False, **kwargs): + """ NFNet-F5 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f5s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f6s(pretrained=False, **kwargs): + """ NFNet-F6 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f6s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_f7s(pretrained=False, **kwargs): + """ NFNet-F7 w/ SiLU + `High-Performance Large-Scale Image Recognition Without Normalization` + - https://arxiv.org/abs/2102.06171 + """ + return _create_normfreenet('nfnet_f7s', pretrained=pretrained, **kwargs) + + +@register_model +def nfnet_l0(pretrained=False, **kwargs): + """ NFNet-L0b w/ SiLU + My experimental 'light' model w/ F0 repeats, 1.5x final_conv mult, 64 group_size, .25 bottleneck & SE ratio + """ + return _create_normfreenet('nfnet_l0', pretrained=pretrained, **kwargs) + + +@register_model +def eca_nfnet_l0(pretrained=False, **kwargs): + """ ECA-NFNet-L0 w/ SiLU + My experimental 'light' model w/ F0 repeats, 1.5x final_conv mult, 64 group_size, .25 bottleneck & ECA attn + """ + return _create_normfreenet('eca_nfnet_l0', pretrained=pretrained, **kwargs) + + +@register_model +def eca_nfnet_l1(pretrained=False, **kwargs): + """ ECA-NFNet-L1 w/ SiLU + My experimental 'light' model w/ F1 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn + """ + return _create_normfreenet('eca_nfnet_l1', pretrained=pretrained, **kwargs) + + +@register_model +def eca_nfnet_l2(pretrained=False, **kwargs): + """ ECA-NFNet-L2 w/ SiLU + My experimental 'light' model w/ F2 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn + """ + return _create_normfreenet('eca_nfnet_l2', pretrained=pretrained, **kwargs) + + +@register_model +def eca_nfnet_l3(pretrained=False, **kwargs): + """ ECA-NFNet-L3 w/ SiLU + My experimental 'light' model w/ F3 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn + """ + return _create_normfreenet('eca_nfnet_l3', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b0(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B0 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b0', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b1(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B1 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b1', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b2(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B2 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b2', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b3(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B3 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b3', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b4(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B4 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b4', pretrained=pretrained, **kwargs) + + +@register_model +def nf_regnet_b5(pretrained=False, **kwargs): + """ Normalization-Free RegNet-B5 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_regnet_b5', pretrained=pretrained, **kwargs) + + +@register_model +def nf_resnet26(pretrained=False, **kwargs): + """ Normalization-Free ResNet-26 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_resnet26', pretrained=pretrained, **kwargs) + + +@register_model +def nf_resnet50(pretrained=False, **kwargs): + """ Normalization-Free ResNet-50 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_resnet50', pretrained=pretrained, **kwargs) + + +@register_model +def nf_resnet101(pretrained=False, **kwargs): + """ Normalization-Free ResNet-101 + `Characterizing signal propagation to close the performance gap in unnormalized ResNets` + - https://arxiv.org/abs/2101.08692 + """ + return _create_normfreenet('nf_resnet101', pretrained=pretrained, **kwargs) + + +@register_model +def nf_seresnet26(pretrained=False, **kwargs): + """ Normalization-Free SE-ResNet26 + """ + return _create_normfreenet('nf_seresnet26', pretrained=pretrained, **kwargs) + + +@register_model +def nf_seresnet50(pretrained=False, **kwargs): + """ Normalization-Free SE-ResNet50 + """ + return _create_normfreenet('nf_seresnet50', pretrained=pretrained, **kwargs) + + +@register_model +def nf_seresnet101(pretrained=False, **kwargs): + """ Normalization-Free SE-ResNet101 + """ + return _create_normfreenet('nf_seresnet101', pretrained=pretrained, **kwargs) + + +@register_model +def nf_ecaresnet26(pretrained=False, **kwargs): + """ Normalization-Free ECA-ResNet26 + """ + return _create_normfreenet('nf_ecaresnet26', pretrained=pretrained, **kwargs) + + +@register_model +def nf_ecaresnet50(pretrained=False, **kwargs): + """ Normalization-Free ECA-ResNet50 + """ + return _create_normfreenet('nf_ecaresnet50', pretrained=pretrained, **kwargs) + + +@register_model +def nf_ecaresnet101(pretrained=False, **kwargs): + """ Normalization-Free ECA-ResNet101 + """ + return _create_normfreenet('nf_ecaresnet101', pretrained=pretrained, **kwargs) diff --git a/timm/models/pit.py b/timm/models/pit.py new file mode 100644 index 0000000..460824e --- /dev/null +++ b/timm/models/pit.py @@ -0,0 +1,384 @@ +""" Pooling-based Vision Transformer (PiT) in PyTorch + +A PyTorch implement of Pooling-based Vision Transformers as described in +'Rethinking Spatial Dimensions of Vision Transformers' - https://arxiv.org/abs/2103.16302 + +This code was adapted from the original version at https://github.com/naver-ai/pit, original copyright below. + +Modifications for timm by / Copyright 2020 Ross Wightman +""" +# PiT +# Copyright 2021-present NAVER Corp. +# Apache License v2.0 + +import math +import re +from copy import deepcopy +from functools import partial +from typing import Tuple + +import torch +from torch import nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, overlay_external_default_cfg +from .layers import trunc_normal_, to_2tuple +from .registry import register_model +from .vision_transformer import Block + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.conv', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # deit models (FB weights) + 'pit_ti_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_ti_730.pth'), + 'pit_xs_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_xs_781.pth'), + 'pit_s_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_s_809.pth'), + 'pit_b_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_b_820.pth'), + 'pit_ti_distilled_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_ti_distill_746.pth', + classifier=('head', 'head_dist')), + 'pit_xs_distilled_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_xs_distill_791.pth', + classifier=('head', 'head_dist')), + 'pit_s_distilled_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_s_distill_819.pth', + classifier=('head', 'head_dist')), + 'pit_b_distilled_224': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_b_distill_840.pth', + classifier=('head', 'head_dist')), +} + + +class SequentialTuple(nn.Sequential): + """ This module exists to work around torchscript typing issues list -> list""" + def __init__(self, *args): + super(SequentialTuple, self).__init__(*args) + + def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: + for module in self: + x = module(x) + return x + + +class Transformer(nn.Module): + def __init__( + self, base_dim, depth, heads, mlp_ratio, pool=None, drop_rate=.0, attn_drop_rate=.0, drop_path_prob=None): + super(Transformer, self).__init__() + self.layers = nn.ModuleList([]) + embed_dim = base_dim * heads + + self.blocks = nn.Sequential(*[ + Block( + dim=embed_dim, + num_heads=heads, + mlp_ratio=mlp_ratio, + qkv_bias=True, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=drop_path_prob[i], + norm_layer=partial(nn.LayerNorm, eps=1e-6) + ) + for i in range(depth)]) + + self.pool = pool + + def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: + x, cls_tokens = x + B, C, H, W = x.shape + token_length = cls_tokens.shape[1] + + x = x.flatten(2).transpose(1, 2) + x = torch.cat((cls_tokens, x), dim=1) + + x = self.blocks(x) + + cls_tokens = x[:, :token_length] + x = x[:, token_length:] + x = x.transpose(1, 2).reshape(B, C, H, W) + + if self.pool is not None: + x, cls_tokens = self.pool(x, cls_tokens) + return x, cls_tokens + + +class ConvHeadPooling(nn.Module): + def __init__(self, in_feature, out_feature, stride, padding_mode='zeros'): + super(ConvHeadPooling, self).__init__() + + self.conv = nn.Conv2d( + in_feature, out_feature, kernel_size=stride + 1, padding=stride // 2, stride=stride, + padding_mode=padding_mode, groups=in_feature) + self.fc = nn.Linear(in_feature, out_feature) + + def forward(self, x, cls_token) -> Tuple[torch.Tensor, torch.Tensor]: + + x = self.conv(x) + cls_token = self.fc(cls_token) + + return x, cls_token + + +class ConvEmbedding(nn.Module): + def __init__(self, in_channels, out_channels, patch_size, stride, padding): + super(ConvEmbedding, self).__init__() + self.conv = nn.Conv2d( + in_channels, out_channels, kernel_size=patch_size, stride=stride, padding=padding, bias=True) + + def forward(self, x): + x = self.conv(x) + return x + + +class PoolingVisionTransformer(nn.Module): + """ Pooling-based Vision Transformer + + A PyTorch implement of 'Rethinking Spatial Dimensions of Vision Transformers' + - https://arxiv.org/abs/2103.16302 + """ + def __init__(self, img_size, patch_size, stride, base_dims, depth, heads, + mlp_ratio, num_classes=1000, in_chans=3, distilled=False, + attn_drop_rate=.0, drop_rate=.0, drop_path_rate=.0): + super(PoolingVisionTransformer, self).__init__() + + padding = 0 + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + height = math.floor((img_size[0] + 2 * padding - patch_size[0]) / stride + 1) + width = math.floor((img_size[1] + 2 * padding - patch_size[1]) / stride + 1) + + self.base_dims = base_dims + self.heads = heads + self.num_classes = num_classes + self.num_tokens = 2 if distilled else 1 + + self.patch_size = patch_size + self.pos_embed = nn.Parameter(torch.randn(1, base_dims[0] * heads[0], height, width)) + self.patch_embed = ConvEmbedding(in_chans, base_dims[0] * heads[0], patch_size, stride, padding) + + self.cls_token = nn.Parameter(torch.randn(1, self.num_tokens, base_dims[0] * heads[0])) + self.pos_drop = nn.Dropout(p=drop_rate) + + transformers = [] + # stochastic depth decay rule + dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depth)).split(depth)] + for stage in range(len(depth)): + pool = None + if stage < len(heads) - 1: + pool = ConvHeadPooling( + base_dims[stage] * heads[stage], base_dims[stage + 1] * heads[stage + 1], stride=2) + transformers += [Transformer( + base_dims[stage], depth[stage], heads[stage], mlp_ratio, pool=pool, + drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_prob=dpr[stage]) + ] + self.transformers = SequentialTuple(*transformers) + self.norm = nn.LayerNorm(base_dims[-1] * heads[-1], eps=1e-6) + self.num_features = self.embed_dim = base_dims[-1] * heads[-1] + + # Classifier head + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + self.head_dist = None + if distilled: + self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + if self.head_dist is not None: + return self.head, self.head_dist + else: + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + if self.head_dist is not None: + self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + x = self.pos_drop(x + self.pos_embed) + cls_tokens = self.cls_token.expand(x.shape[0], -1, -1) + x, cls_tokens = self.transformers((x, cls_tokens)) + cls_tokens = self.norm(cls_tokens) + if self.head_dist is not None: + return cls_tokens[:, 0], cls_tokens[:, 1] + else: + return cls_tokens[:, 0] + + def forward(self, x): + x = self.forward_features(x) + if self.head_dist is not None: + x, x_dist = self.head(x[0]), self.head_dist(x[1]) # x must be a tuple + if self.training and not torch.jit.is_scripting(): + return x, x_dist + else: + return (x + x_dist) / 2 + else: + return self.head(x) + + +def checkpoint_filter_fn(state_dict, model): + """ preprocess checkpoints """ + out_dict = {} + p_blocks = re.compile(r'pools\.(\d)\.') + for k, v in state_dict.items(): + # FIXME need to update resize for PiT impl + # if k == 'pos_embed' and v.shape != model.pos_embed.shape: + # # To resize pos embedding when using model at different size from pretrained weights + # v = resize_pos_embed(v, model.pos_embed) + k = p_blocks.sub(lambda exp: f'transformers.{int(exp.group(1))}.pool.', k) + out_dict[k] = v + return out_dict + + +def _create_pit(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + PoolingVisionTransformer, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def pit_b_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=14, + stride=7, + base_dims=[64, 64, 64], + depth=[3, 6, 4], + heads=[4, 8, 16], + mlp_ratio=4, + **kwargs + ) + return _create_pit('pit_b_224', pretrained, **model_kwargs) + + +@register_model +def pit_s_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[48, 48, 48], + depth=[2, 6, 4], + heads=[3, 6, 12], + mlp_ratio=4, + **kwargs + ) + return _create_pit('pit_s_224', pretrained, **model_kwargs) + + +@register_model +def pit_xs_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[48, 48, 48], + depth=[2, 6, 4], + heads=[2, 4, 8], + mlp_ratio=4, + **kwargs + ) + return _create_pit('pit_xs_224', pretrained, **model_kwargs) + + +@register_model +def pit_ti_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[32, 32, 32], + depth=[2, 6, 4], + heads=[2, 4, 8], + mlp_ratio=4, + **kwargs + ) + return _create_pit('pit_ti_224', pretrained, **model_kwargs) + + +@register_model +def pit_b_distilled_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=14, + stride=7, + base_dims=[64, 64, 64], + depth=[3, 6, 4], + heads=[4, 8, 16], + mlp_ratio=4, + distilled=True, + **kwargs + ) + return _create_pit('pit_b_distilled_224', pretrained, **model_kwargs) + + +@register_model +def pit_s_distilled_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[48, 48, 48], + depth=[2, 6, 4], + heads=[3, 6, 12], + mlp_ratio=4, + distilled=True, + **kwargs + ) + return _create_pit('pit_s_distilled_224', pretrained, **model_kwargs) + + +@register_model +def pit_xs_distilled_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[48, 48, 48], + depth=[2, 6, 4], + heads=[2, 4, 8], + mlp_ratio=4, + distilled=True, + **kwargs + ) + return _create_pit('pit_xs_distilled_224', pretrained, **model_kwargs) + + +@register_model +def pit_ti_distilled_224(pretrained, **kwargs): + model_kwargs = dict( + patch_size=16, + stride=8, + base_dims=[32, 32, 32], + depth=[2, 6, 4], + heads=[2, 4, 8], + mlp_ratio=4, + distilled=True, + **kwargs + ) + return _create_pit('pit_ti_distilled_224', pretrained, **model_kwargs) \ No newline at end of file diff --git a/timm/models/pnasnet.py b/timm/models/pnasnet.py new file mode 100644 index 0000000..9991815 --- /dev/null +++ b/timm/models/pnasnet.py @@ -0,0 +1,350 @@ +""" + pnasnet5large implementation grabbed from Cadene's pretrained models + Additional credit to https://github.com/creafz + + https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/pnasnet.py + +""" +from collections import OrderedDict +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .helpers import build_model_with_cfg +from .layers import ConvBnAct, create_conv2d, create_pool2d, create_classifier +from .registry import register_model + +__all__ = ['PNASNet5Large'] + +default_cfgs = { + 'pnasnet5large': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/pnasnet5large-bf079911.pth', + 'input_size': (3, 331, 331), + 'pool_size': (11, 11), + 'crop_pct': 0.911, + 'interpolation': 'bicubic', + 'mean': (0.5, 0.5, 0.5), + 'std': (0.5, 0.5, 0.5), + 'num_classes': 1000, + 'first_conv': 'conv_0.conv', + 'classifier': 'last_linear', + 'label_offset': 1, # 1001 classes in pretrained weights + }, +} + + +class SeparableConv2d(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride, padding=''): + super(SeparableConv2d, self).__init__() + self.depthwise_conv2d = create_conv2d( + in_channels, in_channels, kernel_size=kernel_size, + stride=stride, padding=padding, groups=in_channels) + self.pointwise_conv2d = create_conv2d( + in_channels, out_channels, kernel_size=1, padding=padding) + + def forward(self, x): + x = self.depthwise_conv2d(x) + x = self.pointwise_conv2d(x) + return x + + +class BranchSeparables(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, stem_cell=False, padding=''): + super(BranchSeparables, self).__init__() + middle_channels = out_channels if stem_cell else in_channels + self.act_1 = nn.ReLU() + self.separable_1 = SeparableConv2d( + in_channels, middle_channels, kernel_size, stride=stride, padding=padding) + self.bn_sep_1 = nn.BatchNorm2d(middle_channels, eps=0.001) + self.act_2 = nn.ReLU() + self.separable_2 = SeparableConv2d( + middle_channels, out_channels, kernel_size, stride=1, padding=padding) + self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.act_1(x) + x = self.separable_1(x) + x = self.bn_sep_1(x) + x = self.act_2(x) + x = self.separable_2(x) + x = self.bn_sep_2(x) + return x + + +class ActConvBn(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=''): + super(ActConvBn, self).__init__() + self.act = nn.ReLU() + self.conv = create_conv2d( + in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) + self.bn = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.act(x) + x = self.conv(x) + x = self.bn(x) + return x + + +class FactorizedReduction(nn.Module): + + def __init__(self, in_channels, out_channels, padding=''): + super(FactorizedReduction, self).__init__() + self.act = nn.ReLU() + self.path_1 = nn.Sequential(OrderedDict([ + ('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)), + ('conv', create_conv2d(in_channels, out_channels // 2, kernel_size=1, padding=padding)), + ])) + self.path_2 = nn.Sequential(OrderedDict([ + ('pad', nn.ZeroPad2d((-1, 1, -1, 1))), # shift + ('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)), + ('conv', create_conv2d(in_channels, out_channels // 2, kernel_size=1, padding=padding)), + ])) + self.final_path_bn = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x): + x = self.act(x) + x_path1 = self.path_1(x) + x_path2 = self.path_2(x) + out = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) + return out + + +class CellBase(nn.Module): + + def cell_forward(self, x_left, x_right): + x_comb_iter_0_left = self.comb_iter_0_left(x_left) + x_comb_iter_0_right = self.comb_iter_0_right(x_left) + x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right + + x_comb_iter_1_left = self.comb_iter_1_left(x_right) + x_comb_iter_1_right = self.comb_iter_1_right(x_right) + x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right + + x_comb_iter_2_left = self.comb_iter_2_left(x_right) + x_comb_iter_2_right = self.comb_iter_2_right(x_right) + x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right + + x_comb_iter_3_left = self.comb_iter_3_left(x_comb_iter_2) + x_comb_iter_3_right = self.comb_iter_3_right(x_right) + x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right + + x_comb_iter_4_left = self.comb_iter_4_left(x_left) + if self.comb_iter_4_right is not None: + x_comb_iter_4_right = self.comb_iter_4_right(x_right) + else: + x_comb_iter_4_right = x_right + x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right + + x_out = torch.cat([x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) + return x_out + + +class CellStem0(CellBase): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): + super(CellStem0, self).__init__() + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, kernel_size=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables( + in_chs_left, out_chs_left, kernel_size=5, stride=2, stem_cell=True, padding=pad_type) + self.comb_iter_0_right = nn.Sequential(OrderedDict([ + ('max_pool', create_pool2d('max', 3, stride=2, padding=pad_type)), + ('conv', create_conv2d(in_chs_left, out_chs_left, kernel_size=1, padding=pad_type)), + ('bn', nn.BatchNorm2d(out_chs_left, eps=0.001)), + ])) + + self.comb_iter_1_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=7, stride=2, padding=pad_type) + self.comb_iter_1_right = create_pool2d('max', 3, stride=2, padding=pad_type) + + self.comb_iter_2_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=5, stride=2, padding=pad_type) + self.comb_iter_2_right = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=3, stride=2, padding=pad_type) + + self.comb_iter_3_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=3, padding=pad_type) + self.comb_iter_3_right = create_pool2d('max', 3, stride=2, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables( + in_chs_right, out_chs_right, kernel_size=3, stride=2, stem_cell=True, padding=pad_type) + self.comb_iter_4_right = ActConvBn( + out_chs_right, out_chs_right, kernel_size=1, stride=2, padding=pad_type) + + def forward(self, x_left): + x_right = self.conv_1x1(x_left) + x_out = self.cell_forward(x_left, x_right) + return x_out + + +class Cell(CellBase): + + def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type='', + is_reduction=False, match_prev_layer_dims=False): + super(Cell, self).__init__() + + # If `is_reduction` is set to `True` stride 2 is used for + # convolution and pooling layers to reduce the spatial size of + # the output of a cell approximately by a factor of 2. + stride = 2 if is_reduction else 1 + + # If `match_prev_layer_dimensions` is set to `True` + # `FactorizedReduction` is used to reduce the spatial size + # of the left input of a cell approximately by a factor of 2. + self.match_prev_layer_dimensions = match_prev_layer_dims + if match_prev_layer_dims: + self.conv_prev_1x1 = FactorizedReduction(in_chs_left, out_chs_left, padding=pad_type) + else: + self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, kernel_size=1, padding=pad_type) + self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, kernel_size=1, padding=pad_type) + + self.comb_iter_0_left = BranchSeparables( + out_chs_left, out_chs_left, kernel_size=5, stride=stride, padding=pad_type) + self.comb_iter_0_right = create_pool2d('max', 3, stride=stride, padding=pad_type) + + self.comb_iter_1_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=7, stride=stride, padding=pad_type) + self.comb_iter_1_right = create_pool2d('max', 3, stride=stride, padding=pad_type) + + self.comb_iter_2_left = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=5, stride=stride, padding=pad_type) + self.comb_iter_2_right = BranchSeparables( + out_chs_right, out_chs_right, kernel_size=3, stride=stride, padding=pad_type) + + self.comb_iter_3_left = BranchSeparables(out_chs_right, out_chs_right, kernel_size=3) + self.comb_iter_3_right = create_pool2d('max', 3, stride=stride, padding=pad_type) + + self.comb_iter_4_left = BranchSeparables( + out_chs_left, out_chs_left, kernel_size=3, stride=stride, padding=pad_type) + if is_reduction: + self.comb_iter_4_right = ActConvBn( + out_chs_right, out_chs_right, kernel_size=1, stride=stride, padding=pad_type) + else: + self.comb_iter_4_right = None + + def forward(self, x_left, x_right): + x_left = self.conv_prev_1x1(x_left) + x_right = self.conv_1x1(x_right) + x_out = self.cell_forward(x_left, x_right) + return x_out + + +class PNASNet5Large(nn.Module): + def __init__(self, num_classes=1000, in_chans=3, output_stride=32, drop_rate=0., global_pool='avg', pad_type=''): + super(PNASNet5Large, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + self.num_features = 4320 + assert output_stride == 32 + + self.conv_0 = ConvBnAct( + in_chans, 96, kernel_size=3, stride=2, padding=0, + norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1), apply_act=False) + + self.cell_stem_0 = CellStem0( + in_chs_left=96, out_chs_left=54, in_chs_right=96, out_chs_right=54, pad_type=pad_type) + + self.cell_stem_1 = Cell( + in_chs_left=96, out_chs_left=108, in_chs_right=270, out_chs_right=108, pad_type=pad_type, + match_prev_layer_dims=True, is_reduction=True) + self.cell_0 = Cell( + in_chs_left=270, out_chs_left=216, in_chs_right=540, out_chs_right=216, pad_type=pad_type, + match_prev_layer_dims=True) + self.cell_1 = Cell( + in_chs_left=540, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) + self.cell_2 = Cell( + in_chs_left=1080, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) + self.cell_3 = Cell( + in_chs_left=1080, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) + + self.cell_4 = Cell( + in_chs_left=1080, out_chs_left=432, in_chs_right=1080, out_chs_right=432, pad_type=pad_type, + is_reduction=True) + self.cell_5 = Cell( + in_chs_left=1080, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type, + match_prev_layer_dims=True) + self.cell_6 = Cell( + in_chs_left=2160, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type) + self.cell_7 = Cell( + in_chs_left=2160, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type) + + self.cell_8 = Cell( + in_chs_left=2160, out_chs_left=864, in_chs_right=2160, out_chs_right=864, pad_type=pad_type, + is_reduction=True) + self.cell_9 = Cell( + in_chs_left=2160, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type, + match_prev_layer_dims=True) + self.cell_10 = Cell( + in_chs_left=4320, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type) + self.cell_11 = Cell( + in_chs_left=4320, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type) + self.act = nn.ReLU() + self.feature_info = [ + dict(num_chs=96, reduction=2, module='conv_0'), + dict(num_chs=270, reduction=4, module='cell_stem_1.conv_1x1.act'), + dict(num_chs=1080, reduction=8, module='cell_4.conv_1x1.act'), + dict(num_chs=2160, reduction=16, module='cell_8.conv_1x1.act'), + dict(num_chs=4320, reduction=32, module='act'), + ] + + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def get_classifier(self): + return self.last_linear + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x_conv_0 = self.conv_0(x) + x_stem_0 = self.cell_stem_0(x_conv_0) + x_stem_1 = self.cell_stem_1(x_conv_0, x_stem_0) + x_cell_0 = self.cell_0(x_stem_0, x_stem_1) + x_cell_1 = self.cell_1(x_stem_1, x_cell_0) + x_cell_2 = self.cell_2(x_cell_0, x_cell_1) + x_cell_3 = self.cell_3(x_cell_1, x_cell_2) + x_cell_4 = self.cell_4(x_cell_2, x_cell_3) + x_cell_5 = self.cell_5(x_cell_3, x_cell_4) + x_cell_6 = self.cell_6(x_cell_4, x_cell_5) + x_cell_7 = self.cell_7(x_cell_5, x_cell_6) + x_cell_8 = self.cell_8(x_cell_6, x_cell_7) + x_cell_9 = self.cell_9(x_cell_7, x_cell_8) + x_cell_10 = self.cell_10(x_cell_8, x_cell_9) + x_cell_11 = self.cell_11(x_cell_9, x_cell_10) + x = self.act(x_cell_11) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0: + x = F.dropout(x, self.drop_rate, training=self.training) + x = self.last_linear(x) + return x + + +def _create_pnasnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + PNASNet5Large, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(feature_cls='hook', no_rewrite=True), # not possible to re-write this model + **kwargs) + + +@register_model +def pnasnet5large(pretrained=False, **kwargs): + r"""PNASNet-5 model architecture from the + `"Progressive Neural Architecture Search" + `_ paper. + """ + model_kwargs = dict(pad_type='same', **kwargs) + return _create_pnasnet('pnasnet5large', pretrained, **model_kwargs) diff --git a/timm/models/pruned/ecaresnet101d_pruned.txt b/timm/models/pruned/ecaresnet101d_pruned.txt new file mode 100644 index 0000000..2589b2f --- /dev/null +++ b/timm/models/pruned/ecaresnet101d_pruned.txt @@ -0,0 +1 @@ +conv1.0.weight:[32, 3, 3, 3]***conv1.1.weight:[32]***conv1.3.weight:[32, 32, 3, 3]***conv1.4.weight:[32]***conv1.6.weight:[64, 32, 3, 3]***bn1.weight:[64]***layer1.0.conv1.weight:[45, 64, 1, 1]***layer1.0.bn1.weight:[45]***layer1.0.conv2.weight:[25, 45, 3, 3]***layer1.0.bn2.weight:[25]***layer1.0.conv3.weight:[26, 25, 1, 1]***layer1.0.bn3.weight:[26]***layer1.0.se.conv.weight:[1, 1, 5]***layer1.0.downsample.1.weight:[26, 64, 1, 1]***layer1.0.downsample.2.weight:[26]***layer1.1.conv1.weight:[53, 26, 1, 1]***layer1.1.bn1.weight:[53]***layer1.1.conv2.weight:[20, 53, 3, 3]***layer1.1.bn2.weight:[20]***layer1.1.conv3.weight:[26, 20, 1, 1]***layer1.1.bn3.weight:[26]***layer1.1.se.conv.weight:[1, 1, 5]***layer1.2.conv1.weight:[60, 26, 1, 1]***layer1.2.bn1.weight:[60]***layer1.2.conv2.weight:[27, 60, 3, 3]***layer1.2.bn2.weight:[27]***layer1.2.conv3.weight:[26, 27, 1, 1]***layer1.2.bn3.weight:[26]***layer1.2.se.conv.weight:[1, 1, 5]***layer2.0.conv1.weight:[81, 26, 1, 1]***layer2.0.bn1.weight:[81]***layer2.0.conv2.weight:[24, 81, 3, 3]***layer2.0.bn2.weight:[24]***layer2.0.conv3.weight:[142, 24, 1, 1]***layer2.0.bn3.weight:[142]***layer2.0.se.conv.weight:[1, 1, 5]***layer2.0.downsample.1.weight:[142, 26, 1, 1]***layer2.0.downsample.2.weight:[142]***layer2.1.conv1.weight:[93, 142, 1, 1]***layer2.1.bn1.weight:[93]***layer2.1.conv2.weight:[49, 93, 3, 3]***layer2.1.bn2.weight:[49]***layer2.1.conv3.weight:[142, 49, 1, 1]***layer2.1.bn3.weight:[142]***layer2.1.se.conv.weight:[1, 1, 5]***layer2.2.conv1.weight:[102, 142, 1, 1]***layer2.2.bn1.weight:[102]***layer2.2.conv2.weight:[54, 102, 3, 3]***layer2.2.bn2.weight:[54]***layer2.2.conv3.weight:[142, 54, 1, 1]***layer2.2.bn3.weight:[142]***layer2.2.se.conv.weight:[1, 1, 5]***layer2.3.conv1.weight:[122, 142, 1, 1]***layer2.3.bn1.weight:[122]***layer2.3.conv2.weight:[78, 122, 3, 3]***layer2.3.bn2.weight:[78]***layer2.3.conv3.weight:[142, 78, 1, 1]***layer2.3.bn3.weight:[142]***layer2.3.se.conv.weight:[1, 1, 5]***layer3.0.conv1.weight:[101, 142, 1, 1]***layer3.0.bn1.weight:[101]***layer3.0.conv2.weight:[25, 101, 3, 3]***layer3.0.bn2.weight:[25]***layer3.0.conv3.weight:[278, 25, 1, 1]***layer3.0.bn3.weight:[278]***layer3.0.se.conv.weight:[1, 1, 5]***layer3.0.downsample.1.weight:[278, 142, 1, 1]***layer3.0.downsample.2.weight:[278]***layer3.1.conv1.weight:[239, 278, 1, 1]***layer3.1.bn1.weight:[239]***layer3.1.conv2.weight:[160, 239, 3, 3]***layer3.1.bn2.weight:[160]***layer3.1.conv3.weight:[278, 160, 1, 1]***layer3.1.bn3.weight:[278]***layer3.1.se.conv.weight:[1, 1, 5]***layer3.2.conv1.weight:[234, 278, 1, 1]***layer3.2.bn1.weight:[234]***layer3.2.conv2.weight:[156, 234, 3, 3]***layer3.2.bn2.weight:[156]***layer3.2.conv3.weight:[278, 156, 1, 1]***layer3.2.bn3.weight:[278]***layer3.2.se.conv.weight:[1, 1, 5]***layer3.3.conv1.weight:[250, 278, 1, 1]***layer3.3.bn1.weight:[250]***layer3.3.conv2.weight:[176, 250, 3, 3]***layer3.3.bn2.weight:[176]***layer3.3.conv3.weight:[278, 176, 1, 1]***layer3.3.bn3.weight:[278]***layer3.3.se.conv.weight:[1, 1, 5]***layer3.4.conv1.weight:[253, 278, 1, 1]***layer3.4.bn1.weight:[253]***layer3.4.conv2.weight:[191, 253, 3, 3]***layer3.4.bn2.weight:[191]***layer3.4.conv3.weight:[278, 191, 1, 1]***layer3.4.bn3.weight:[278]***layer3.4.se.conv.weight:[1, 1, 5]***layer3.5.conv1.weight:[251, 278, 1, 1]***layer3.5.bn1.weight:[251]***layer3.5.conv2.weight:[175, 251, 3, 3]***layer3.5.bn2.weight:[175]***layer3.5.conv3.weight:[278, 175, 1, 1]***layer3.5.bn3.weight:[278]***layer3.5.se.conv.weight:[1, 1, 5]***layer3.6.conv1.weight:[230, 278, 1, 1]***layer3.6.bn1.weight:[230]***layer3.6.conv2.weight:[128, 230, 3, 3]***layer3.6.bn2.weight:[128]***layer3.6.conv3.weight:[278, 128, 1, 1]***layer3.6.bn3.weight:[278]***layer3.6.se.conv.weight:[1, 1, 5]***layer3.7.conv1.weight:[244, 278, 1, 1]***layer3.7.bn1.weight:[244]***layer3.7.conv2.weight:[154, 244, 3, 3]***layer3.7.bn2.weight:[154]***layer3.7.conv3.weight:[278, 154, 1, 1]***layer3.7.bn3.weight:[278]***layer3.7.se.conv.weight:[1, 1, 5]***layer3.8.conv1.weight:[244, 278, 1, 1]***layer3.8.bn1.weight:[244]***layer3.8.conv2.weight:[159, 244, 3, 3]***layer3.8.bn2.weight:[159]***layer3.8.conv3.weight:[278, 159, 1, 1]***layer3.8.bn3.weight:[278]***layer3.8.se.conv.weight:[1, 1, 5]***layer3.9.conv1.weight:[238, 278, 1, 1]***layer3.9.bn1.weight:[238]***layer3.9.conv2.weight:[97, 238, 3, 3]***layer3.9.bn2.weight:[97]***layer3.9.conv3.weight:[278, 97, 1, 1]***layer3.9.bn3.weight:[278]***layer3.9.se.conv.weight:[1, 1, 5]***layer3.10.conv1.weight:[244, 278, 1, 1]***layer3.10.bn1.weight:[244]***layer3.10.conv2.weight:[149, 244, 3, 3]***layer3.10.bn2.weight:[149]***layer3.10.conv3.weight:[278, 149, 1, 1]***layer3.10.bn3.weight:[278]***layer3.10.se.conv.weight:[1, 1, 5]***layer3.11.conv1.weight:[253, 278, 1, 1]***layer3.11.bn1.weight:[253]***layer3.11.conv2.weight:[181, 253, 3, 3]***layer3.11.bn2.weight:[181]***layer3.11.conv3.weight:[278, 181, 1, 1]***layer3.11.bn3.weight:[278]***layer3.11.se.conv.weight:[1, 1, 5]***layer3.12.conv1.weight:[245, 278, 1, 1]***layer3.12.bn1.weight:[245]***layer3.12.conv2.weight:[119, 245, 3, 3]***layer3.12.bn2.weight:[119]***layer3.12.conv3.weight:[278, 119, 1, 1]***layer3.12.bn3.weight:[278]***layer3.12.se.conv.weight:[1, 1, 5]***layer3.13.conv1.weight:[255, 278, 1, 1]***layer3.13.bn1.weight:[255]***layer3.13.conv2.weight:[216, 255, 3, 3]***layer3.13.bn2.weight:[216]***layer3.13.conv3.weight:[278, 216, 1, 1]***layer3.13.bn3.weight:[278]***layer3.13.se.conv.weight:[1, 1, 5]***layer3.14.conv1.weight:[256, 278, 1, 1]***layer3.14.bn1.weight:[256]***layer3.14.conv2.weight:[201, 256, 3, 3]***layer3.14.bn2.weight:[201]***layer3.14.conv3.weight:[278, 201, 1, 1]***layer3.14.bn3.weight:[278]***layer3.14.se.conv.weight:[1, 1, 5]***layer3.15.conv1.weight:[253, 278, 1, 1]***layer3.15.bn1.weight:[253]***layer3.15.conv2.weight:[149, 253, 3, 3]***layer3.15.bn2.weight:[149]***layer3.15.conv3.weight:[278, 149, 1, 1]***layer3.15.bn3.weight:[278]***layer3.15.se.conv.weight:[1, 1, 5]***layer3.16.conv1.weight:[254, 278, 1, 1]***layer3.16.bn1.weight:[254]***layer3.16.conv2.weight:[141, 254, 3, 3]***layer3.16.bn2.weight:[141]***layer3.16.conv3.weight:[278, 141, 1, 1]***layer3.16.bn3.weight:[278]***layer3.16.se.conv.weight:[1, 1, 5]***layer3.17.conv1.weight:[256, 278, 1, 1]***layer3.17.bn1.weight:[256]***layer3.17.conv2.weight:[190, 256, 3, 3]***layer3.17.bn2.weight:[190]***layer3.17.conv3.weight:[278, 190, 1, 1]***layer3.17.bn3.weight:[278]***layer3.17.se.conv.weight:[1, 1, 5]***layer3.18.conv1.weight:[256, 278, 1, 1]***layer3.18.bn1.weight:[256]***layer3.18.conv2.weight:[217, 256, 3, 3]***layer3.18.bn2.weight:[217]***layer3.18.conv3.weight:[278, 217, 1, 1]***layer3.18.bn3.weight:[278]***layer3.18.se.conv.weight:[1, 1, 5]***layer3.19.conv1.weight:[255, 278, 1, 1]***layer3.19.bn1.weight:[255]***layer3.19.conv2.weight:[156, 255, 3, 3]***layer3.19.bn2.weight:[156]***layer3.19.conv3.weight:[278, 156, 1, 1]***layer3.19.bn3.weight:[278]***layer3.19.se.conv.weight:[1, 1, 5]***layer3.20.conv1.weight:[256, 278, 1, 1]***layer3.20.bn1.weight:[256]***layer3.20.conv2.weight:[155, 256, 3, 3]***layer3.20.bn2.weight:[155]***layer3.20.conv3.weight:[278, 155, 1, 1]***layer3.20.bn3.weight:[278]***layer3.20.se.conv.weight:[1, 1, 5]***layer3.21.conv1.weight:[256, 278, 1, 1]***layer3.21.bn1.weight:[256]***layer3.21.conv2.weight:[232, 256, 3, 3]***layer3.21.bn2.weight:[232]***layer3.21.conv3.weight:[278, 232, 1, 1]***layer3.21.bn3.weight:[278]***layer3.21.se.conv.weight:[1, 1, 5]***layer3.22.conv1.weight:[256, 278, 1, 1]***layer3.22.bn1.weight:[256]***layer3.22.conv2.weight:[214, 256, 3, 3]***layer3.22.bn2.weight:[214]***layer3.22.conv3.weight:[278, 214, 1, 1]***layer3.22.bn3.weight:[278]***layer3.22.se.conv.weight:[1, 1, 5]***layer4.0.conv1.weight:[499, 278, 1, 1]***layer4.0.bn1.weight:[499]***layer4.0.conv2.weight:[289, 499, 3, 3]***layer4.0.bn2.weight:[289]***layer4.0.conv3.weight:[2042, 289, 1, 1]***layer4.0.bn3.weight:[2042]***layer4.0.se.conv.weight:[1, 1, 7]***layer4.0.downsample.1.weight:[2042, 278, 1, 1]***layer4.0.downsample.2.weight:[2042]***layer4.1.conv1.weight:[512, 2042, 1, 1]***layer4.1.bn1.weight:[512]***layer4.1.conv2.weight:[512, 512, 3, 3]***layer4.1.bn2.weight:[512]***layer4.1.conv3.weight:[2042, 512, 1, 1]***layer4.1.bn3.weight:[2042]***layer4.1.se.conv.weight:[1, 1, 7]***layer4.2.conv1.weight:[512, 2042, 1, 1]***layer4.2.bn1.weight:[512]***layer4.2.conv2.weight:[502, 512, 3, 3]***layer4.2.bn2.weight:[502]***layer4.2.conv3.weight:[2042, 502, 1, 1]***layer4.2.bn3.weight:[2042]***layer4.2.se.conv.weight:[1, 1, 7]***fc.weight:[1000, 2042]***layer1_2_conv3_M.weight:[256, 26]***layer2_3_conv3_M.weight:[512, 142]***layer3_22_conv3_M.weight:[1024, 278]***layer4_2_conv3_M.weight:[2048, 2042] \ No newline at end of file diff --git a/timm/models/pruned/ecaresnet50d_pruned.txt b/timm/models/pruned/ecaresnet50d_pruned.txt new file mode 100644 index 0000000..9a8b2bf --- /dev/null +++ b/timm/models/pruned/ecaresnet50d_pruned.txt @@ -0,0 +1 @@ +conv1.0.weight:[32, 3, 3, 3]***conv1.1.weight:[32]***conv1.3.weight:[32, 32, 3, 3]***conv1.4.weight:[32]***conv1.6.weight:[64, 32, 3, 3]***bn1.weight:[64]***layer1.0.conv1.weight:[47, 64, 1, 1]***layer1.0.bn1.weight:[47]***layer1.0.conv2.weight:[18, 47, 3, 3]***layer1.0.bn2.weight:[18]***layer1.0.conv3.weight:[19, 18, 1, 1]***layer1.0.bn3.weight:[19]***layer1.0.se.conv.weight:[1, 1, 5]***layer1.0.downsample.1.weight:[19, 64, 1, 1]***layer1.0.downsample.2.weight:[19]***layer1.1.conv1.weight:[52, 19, 1, 1]***layer1.1.bn1.weight:[52]***layer1.1.conv2.weight:[22, 52, 3, 3]***layer1.1.bn2.weight:[22]***layer1.1.conv3.weight:[19, 22, 1, 1]***layer1.1.bn3.weight:[19]***layer1.1.se.conv.weight:[1, 1, 5]***layer1.2.conv1.weight:[64, 19, 1, 1]***layer1.2.bn1.weight:[64]***layer1.2.conv2.weight:[35, 64, 3, 3]***layer1.2.bn2.weight:[35]***layer1.2.conv3.weight:[19, 35, 1, 1]***layer1.2.bn3.weight:[19]***layer1.2.se.conv.weight:[1, 1, 5]***layer2.0.conv1.weight:[85, 19, 1, 1]***layer2.0.bn1.weight:[85]***layer2.0.conv2.weight:[37, 85, 3, 3]***layer2.0.bn2.weight:[37]***layer2.0.conv3.weight:[171, 37, 1, 1]***layer2.0.bn3.weight:[171]***layer2.0.se.conv.weight:[1, 1, 5]***layer2.0.downsample.1.weight:[171, 19, 1, 1]***layer2.0.downsample.2.weight:[171]***layer2.1.conv1.weight:[107, 171, 1, 1]***layer2.1.bn1.weight:[107]***layer2.1.conv2.weight:[80, 107, 3, 3]***layer2.1.bn2.weight:[80]***layer2.1.conv3.weight:[171, 80, 1, 1]***layer2.1.bn3.weight:[171]***layer2.1.se.conv.weight:[1, 1, 5]***layer2.2.conv1.weight:[120, 171, 1, 1]***layer2.2.bn1.weight:[120]***layer2.2.conv2.weight:[85, 120, 3, 3]***layer2.2.bn2.weight:[85]***layer2.2.conv3.weight:[171, 85, 1, 1]***layer2.2.bn3.weight:[171]***layer2.2.se.conv.weight:[1, 1, 5]***layer2.3.conv1.weight:[125, 171, 1, 1]***layer2.3.bn1.weight:[125]***layer2.3.conv2.weight:[87, 125, 3, 3]***layer2.3.bn2.weight:[87]***layer2.3.conv3.weight:[171, 87, 1, 1]***layer2.3.bn3.weight:[171]***layer2.3.se.conv.weight:[1, 1, 5]***layer3.0.conv1.weight:[198, 171, 1, 1]***layer3.0.bn1.weight:[198]***layer3.0.conv2.weight:[126, 198, 3, 3]***layer3.0.bn2.weight:[126]***layer3.0.conv3.weight:[818, 126, 1, 1]***layer3.0.bn3.weight:[818]***layer3.0.se.conv.weight:[1, 1, 5]***layer3.0.downsample.1.weight:[818, 171, 1, 1]***layer3.0.downsample.2.weight:[818]***layer3.1.conv1.weight:[255, 818, 1, 1]***layer3.1.bn1.weight:[255]***layer3.1.conv2.weight:[232, 255, 3, 3]***layer3.1.bn2.weight:[232]***layer3.1.conv3.weight:[818, 232, 1, 1]***layer3.1.bn3.weight:[818]***layer3.1.se.conv.weight:[1, 1, 5]***layer3.2.conv1.weight:[256, 818, 1, 1]***layer3.2.bn1.weight:[256]***layer3.2.conv2.weight:[233, 256, 3, 3]***layer3.2.bn2.weight:[233]***layer3.2.conv3.weight:[818, 233, 1, 1]***layer3.2.bn3.weight:[818]***layer3.2.se.conv.weight:[1, 1, 5]***layer3.3.conv1.weight:[253, 818, 1, 1]***layer3.3.bn1.weight:[253]***layer3.3.conv2.weight:[235, 253, 3, 3]***layer3.3.bn2.weight:[235]***layer3.3.conv3.weight:[818, 235, 1, 1]***layer3.3.bn3.weight:[818]***layer3.3.se.conv.weight:[1, 1, 5]***layer3.4.conv1.weight:[256, 818, 1, 1]***layer3.4.bn1.weight:[256]***layer3.4.conv2.weight:[225, 256, 3, 3]***layer3.4.bn2.weight:[225]***layer3.4.conv3.weight:[818, 225, 1, 1]***layer3.4.bn3.weight:[818]***layer3.4.se.conv.weight:[1, 1, 5]***layer3.5.conv1.weight:[256, 818, 1, 1]***layer3.5.bn1.weight:[256]***layer3.5.conv2.weight:[239, 256, 3, 3]***layer3.5.bn2.weight:[239]***layer3.5.conv3.weight:[818, 239, 1, 1]***layer3.5.bn3.weight:[818]***layer3.5.se.conv.weight:[1, 1, 5]***layer4.0.conv1.weight:[492, 818, 1, 1]***layer4.0.bn1.weight:[492]***layer4.0.conv2.weight:[237, 492, 3, 3]***layer4.0.bn2.weight:[237]***layer4.0.conv3.weight:[2022, 237, 1, 1]***layer4.0.bn3.weight:[2022]***layer4.0.se.conv.weight:[1, 1, 7]***layer4.0.downsample.1.weight:[2022, 818, 1, 1]***layer4.0.downsample.2.weight:[2022]***layer4.1.conv1.weight:[512, 2022, 1, 1]***layer4.1.bn1.weight:[512]***layer4.1.conv2.weight:[500, 512, 3, 3]***layer4.1.bn2.weight:[500]***layer4.1.conv3.weight:[2022, 500, 1, 1]***layer4.1.bn3.weight:[2022]***layer4.1.se.conv.weight:[1, 1, 7]***layer4.2.conv1.weight:[512, 2022, 1, 1]***layer4.2.bn1.weight:[512]***layer4.2.conv2.weight:[490, 512, 3, 3]***layer4.2.bn2.weight:[490]***layer4.2.conv3.weight:[2022, 490, 1, 1]***layer4.2.bn3.weight:[2022]***layer4.2.se.conv.weight:[1, 1, 7]***fc.weight:[1000, 2022]***layer1_2_conv3_M.weight:[256, 19]***layer2_3_conv3_M.weight:[512, 171]***layer3_5_conv3_M.weight:[1024, 818]***layer4_2_conv3_M.weight:[2048, 2022] \ No newline at end of file diff --git a/timm/models/pruned/efficientnet_b1_pruned.txt b/timm/models/pruned/efficientnet_b1_pruned.txt new file mode 100644 index 0000000..0972b52 --- /dev/null +++ b/timm/models/pruned/efficientnet_b1_pruned.txt @@ -0,0 +1 @@ +conv_stem.weight:[32, 3, 3, 3]***bn1.weight:[32]***bn1.bias:[32]***bn1.running_mean:[32]***bn1.running_var:[32]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[32, 1, 3, 3]***blocks.0.0.bn1.weight:[32]***blocks.0.0.bn1.bias:[32]***blocks.0.0.bn1.running_mean:[32]***blocks.0.0.bn1.running_var:[32]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[8, 32, 1, 1]***blocks.0.0.se.conv_reduce.bias:[8]***blocks.0.0.se.conv_expand.weight:[32, 8, 1, 1]***blocks.0.0.se.conv_expand.bias:[32]***blocks.0.0.conv_pw.weight:[16, 32, 1, 1]***blocks.0.0.bn2.weight:[16]***blocks.0.0.bn2.bias:[16]***blocks.0.0.bn2.running_mean:[16]***blocks.0.0.bn2.running_var:[16]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[16, 1, 3, 3]***blocks.0.1.bn1.weight:[16]***blocks.0.1.bn1.bias:[16]***blocks.0.1.bn1.running_mean:[16]***blocks.0.1.bn1.running_var:[16]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[4, 16, 1, 1]***blocks.0.1.se.conv_reduce.bias:[4]***blocks.0.1.se.conv_expand.weight:[16, 4, 1, 1]***blocks.0.1.se.conv_expand.bias:[16]***blocks.0.1.conv_pw.weight:[16, 16, 1, 1]***blocks.0.1.bn2.weight:[16]***blocks.0.1.bn2.bias:[16]***blocks.0.1.bn2.running_mean:[16]***blocks.0.1.bn2.running_var:[16]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[48, 16, 1, 1]***blocks.1.0.bn1.weight:[48]***blocks.1.0.bn1.bias:[48]***blocks.1.0.bn1.running_mean:[48]***blocks.1.0.bn1.running_var:[48]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[48, 1, 3, 3]***blocks.1.0.bn2.weight:[48]***blocks.1.0.bn2.bias:[48]***blocks.1.0.bn2.running_mean:[48]***blocks.1.0.bn2.running_var:[48]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[4, 48, 1, 1]***blocks.1.0.se.conv_reduce.bias:[4]***blocks.1.0.se.conv_expand.weight:[48, 4, 1, 1]***blocks.1.0.se.conv_expand.bias:[48]***blocks.1.0.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.0.bn3.weight:[12]***blocks.1.0.bn3.bias:[12]***blocks.1.0.bn3.running_mean:[12]***blocks.1.0.bn3.running_var:[12]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[62, 12, 1, 1]***blocks.1.1.bn1.weight:[62]***blocks.1.1.bn1.bias:[62]***blocks.1.1.bn1.running_mean:[62]***blocks.1.1.bn1.running_var:[62]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[62, 1, 3, 3]***blocks.1.1.bn2.weight:[62]***blocks.1.1.bn2.bias:[62]***blocks.1.1.bn2.running_mean:[62]***blocks.1.1.bn2.running_var:[62]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[6, 62, 1, 1]***blocks.1.1.se.conv_reduce.bias:[6]***blocks.1.1.se.conv_expand.weight:[62, 6, 1, 1]***blocks.1.1.se.conv_expand.bias:[62]***blocks.1.1.conv_pwl.weight:[12, 62, 1, 1]***blocks.1.1.bn3.weight:[12]***blocks.1.1.bn3.bias:[12]***blocks.1.1.bn3.running_mean:[12]***blocks.1.1.bn3.running_var:[12]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[48, 12, 1, 1]***blocks.1.2.bn1.weight:[48]***blocks.1.2.bn1.bias:[48]***blocks.1.2.bn1.running_mean:[48]***blocks.1.2.bn1.running_var:[48]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[48, 1, 3, 3]***blocks.1.2.bn2.weight:[48]***blocks.1.2.bn2.bias:[48]***blocks.1.2.bn2.running_mean:[48]***blocks.1.2.bn2.running_var:[48]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[6, 48, 1, 1]***blocks.1.2.se.conv_reduce.bias:[6]***blocks.1.2.se.conv_expand.weight:[48, 6, 1, 1]***blocks.1.2.se.conv_expand.bias:[48]***blocks.1.2.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.2.bn3.weight:[12]***blocks.1.2.bn3.bias:[12]***blocks.1.2.bn3.running_mean:[12]***blocks.1.2.bn3.running_var:[12]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[70, 12, 1, 1]***blocks.2.0.bn1.weight:[70]***blocks.2.0.bn1.bias:[70]***blocks.2.0.bn1.running_mean:[70]***blocks.2.0.bn1.running_var:[70]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[70, 1, 5, 5]***blocks.2.0.bn2.weight:[70]***blocks.2.0.bn2.bias:[70]***blocks.2.0.bn2.running_mean:[70]***blocks.2.0.bn2.running_var:[70]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[6, 70, 1, 1]***blocks.2.0.se.conv_reduce.bias:[6]***blocks.2.0.se.conv_expand.weight:[70, 6, 1, 1]***blocks.2.0.se.conv_expand.bias:[70]***blocks.2.0.conv_pwl.weight:[35, 70, 1, 1]***blocks.2.0.bn3.weight:[35]***blocks.2.0.bn3.bias:[35]***blocks.2.0.bn3.running_mean:[35]***blocks.2.0.bn3.running_var:[35]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[61, 35, 1, 1]***blocks.2.1.bn1.weight:[61]***blocks.2.1.bn1.bias:[61]***blocks.2.1.bn1.running_mean:[61]***blocks.2.1.bn1.running_var:[61]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[61, 1, 5, 5]***blocks.2.1.bn2.weight:[61]***blocks.2.1.bn2.bias:[61]***blocks.2.1.bn2.running_mean:[61]***blocks.2.1.bn2.running_var:[61]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[10, 61, 1, 1]***blocks.2.1.se.conv_reduce.bias:[10]***blocks.2.1.se.conv_expand.weight:[61, 10, 1, 1]***blocks.2.1.se.conv_expand.bias:[61]***blocks.2.1.conv_pwl.weight:[35, 61, 1, 1]***blocks.2.1.bn3.weight:[35]***blocks.2.1.bn3.bias:[35]***blocks.2.1.bn3.running_mean:[35]***blocks.2.1.bn3.running_var:[35]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[51, 35, 1, 1]***blocks.2.2.bn1.weight:[51]***blocks.2.2.bn1.bias:[51]***blocks.2.2.bn1.running_mean:[51]***blocks.2.2.bn1.running_var:[51]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[51, 1, 5, 5]***blocks.2.2.bn2.weight:[51]***blocks.2.2.bn2.bias:[51]***blocks.2.2.bn2.running_mean:[51]***blocks.2.2.bn2.running_var:[51]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[10, 51, 1, 1]***blocks.2.2.se.conv_reduce.bias:[10]***blocks.2.2.se.conv_expand.weight:[51, 10, 1, 1]***blocks.2.2.se.conv_expand.bias:[51]***blocks.2.2.conv_pwl.weight:[35, 51, 1, 1]***blocks.2.2.bn3.weight:[35]***blocks.2.2.bn3.bias:[35]***blocks.2.2.bn3.running_mean:[35]***blocks.2.2.bn3.running_var:[35]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[175, 35, 1, 1]***blocks.3.0.bn1.weight:[175]***blocks.3.0.bn1.bias:[175]***blocks.3.0.bn1.running_mean:[175]***blocks.3.0.bn1.running_var:[175]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[175, 1, 3, 3]***blocks.3.0.bn2.weight:[175]***blocks.3.0.bn2.bias:[175]***blocks.3.0.bn2.running_mean:[175]***blocks.3.0.bn2.running_var:[175]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[10, 175, 1, 1]***blocks.3.0.se.conv_reduce.bias:[10]***blocks.3.0.se.conv_expand.weight:[175, 10, 1, 1]***blocks.3.0.se.conv_expand.bias:[175]***blocks.3.0.conv_pwl.weight:[74, 175, 1, 1]***blocks.3.0.bn3.weight:[74]***blocks.3.0.bn3.bias:[74]***blocks.3.0.bn3.running_mean:[74]***blocks.3.0.bn3.running_var:[74]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[188, 74, 1, 1]***blocks.3.1.bn1.weight:[188]***blocks.3.1.bn1.bias:[188]***blocks.3.1.bn1.running_mean:[188]***blocks.3.1.bn1.running_var:[188]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[188, 1, 3, 3]***blocks.3.1.bn2.weight:[188]***blocks.3.1.bn2.bias:[188]***blocks.3.1.bn2.running_mean:[188]***blocks.3.1.bn2.running_var:[188]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[20, 188, 1, 1]***blocks.3.1.se.conv_reduce.bias:[20]***blocks.3.1.se.conv_expand.weight:[188, 20, 1, 1]***blocks.3.1.se.conv_expand.bias:[188]***blocks.3.1.conv_pwl.weight:[74, 188, 1, 1]***blocks.3.1.bn3.weight:[74]***blocks.3.1.bn3.bias:[74]***blocks.3.1.bn3.running_mean:[74]***blocks.3.1.bn3.running_var:[74]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[137, 74, 1, 1]***blocks.3.2.bn1.weight:[137]***blocks.3.2.bn1.bias:[137]***blocks.3.2.bn1.running_mean:[137]***blocks.3.2.bn1.running_var:[137]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[137, 1, 3, 3]***blocks.3.2.bn2.weight:[137]***blocks.3.2.bn2.bias:[137]***blocks.3.2.bn2.running_mean:[137]***blocks.3.2.bn2.running_var:[137]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[20, 137, 1, 1]***blocks.3.2.se.conv_reduce.bias:[20]***blocks.3.2.se.conv_expand.weight:[137, 20, 1, 1]***blocks.3.2.se.conv_expand.bias:[137]***blocks.3.2.conv_pwl.weight:[74, 137, 1, 1]***blocks.3.2.bn3.weight:[74]***blocks.3.2.bn3.bias:[74]***blocks.3.2.bn3.running_mean:[74]***blocks.3.2.bn3.running_var:[74]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[164, 74, 1, 1]***blocks.3.3.bn1.weight:[164]***blocks.3.3.bn1.bias:[164]***blocks.3.3.bn1.running_mean:[164]***blocks.3.3.bn1.running_var:[164]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[164, 1, 3, 3]***blocks.3.3.bn2.weight:[164]***blocks.3.3.bn2.bias:[164]***blocks.3.3.bn2.running_mean:[164]***blocks.3.3.bn2.running_var:[164]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[20, 164, 1, 1]***blocks.3.3.se.conv_reduce.bias:[20]***blocks.3.3.se.conv_expand.weight:[164, 20, 1, 1]***blocks.3.3.se.conv_expand.bias:[164]***blocks.3.3.conv_pwl.weight:[74, 164, 1, 1]***blocks.3.3.bn3.weight:[74]***blocks.3.3.bn3.bias:[74]***blocks.3.3.bn3.running_mean:[74]***blocks.3.3.bn3.running_var:[74]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[399, 74, 1, 1]***blocks.4.0.bn1.weight:[399]***blocks.4.0.bn1.bias:[399]***blocks.4.0.bn1.running_mean:[399]***blocks.4.0.bn1.running_var:[399]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[399, 1, 5, 5]***blocks.4.0.bn2.weight:[399]***blocks.4.0.bn2.bias:[399]***blocks.4.0.bn2.running_mean:[399]***blocks.4.0.bn2.running_var:[399]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[20, 399, 1, 1]***blocks.4.0.se.conv_reduce.bias:[20]***blocks.4.0.se.conv_expand.weight:[399, 20, 1, 1]***blocks.4.0.se.conv_expand.bias:[399]***blocks.4.0.conv_pwl.weight:[67, 399, 1, 1]***blocks.4.0.bn3.weight:[67]***blocks.4.0.bn3.bias:[67]***blocks.4.0.bn3.running_mean:[67]***blocks.4.0.bn3.running_var:[67]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[201, 67, 1, 1]***blocks.4.1.bn1.weight:[201]***blocks.4.1.bn1.bias:[201]***blocks.4.1.bn1.running_mean:[201]***blocks.4.1.bn1.running_var:[201]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[201, 1, 5, 5]***blocks.4.1.bn2.weight:[201]***blocks.4.1.bn2.bias:[201]***blocks.4.1.bn2.running_mean:[201]***blocks.4.1.bn2.running_var:[201]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[28, 201, 1, 1]***blocks.4.1.se.conv_reduce.bias:[28]***blocks.4.1.se.conv_expand.weight:[201, 28, 1, 1]***blocks.4.1.se.conv_expand.bias:[201]***blocks.4.1.conv_pwl.weight:[67, 201, 1, 1]***blocks.4.1.bn3.weight:[67]***blocks.4.1.bn3.bias:[67]***blocks.4.1.bn3.running_mean:[67]***blocks.4.1.bn3.running_var:[67]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[160, 67, 1, 1]***blocks.4.2.bn1.weight:[160]***blocks.4.2.bn1.bias:[160]***blocks.4.2.bn1.running_mean:[160]***blocks.4.2.bn1.running_var:[160]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[160, 1, 5, 5]***blocks.4.2.bn2.weight:[160]***blocks.4.2.bn2.bias:[160]***blocks.4.2.bn2.running_mean:[160]***blocks.4.2.bn2.running_var:[160]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[28, 160, 1, 1]***blocks.4.2.se.conv_reduce.bias:[28]***blocks.4.2.se.conv_expand.weight:[160, 28, 1, 1]***blocks.4.2.se.conv_expand.bias:[160]***blocks.4.2.conv_pwl.weight:[67, 160, 1, 1]***blocks.4.2.bn3.weight:[67]***blocks.4.2.bn3.bias:[67]***blocks.4.2.bn3.running_mean:[67]***blocks.4.2.bn3.running_var:[67]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[213, 67, 1, 1]***blocks.4.3.bn1.weight:[213]***blocks.4.3.bn1.bias:[213]***blocks.4.3.bn1.running_mean:[213]***blocks.4.3.bn1.running_var:[213]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[213, 1, 5, 5]***blocks.4.3.bn2.weight:[213]***blocks.4.3.bn2.bias:[213]***blocks.4.3.bn2.running_mean:[213]***blocks.4.3.bn2.running_var:[213]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[28, 213, 1, 1]***blocks.4.3.se.conv_reduce.bias:[28]***blocks.4.3.se.conv_expand.weight:[213, 28, 1, 1]***blocks.4.3.se.conv_expand.bias:[213]***blocks.4.3.conv_pwl.weight:[67, 213, 1, 1]***blocks.4.3.bn3.weight:[67]***blocks.4.3.bn3.bias:[67]***blocks.4.3.bn3.running_mean:[67]***blocks.4.3.bn3.running_var:[67]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[637, 67, 1, 1]***blocks.5.0.bn1.weight:[637]***blocks.5.0.bn1.bias:[637]***blocks.5.0.bn1.running_mean:[637]***blocks.5.0.bn1.running_var:[637]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[637, 1, 5, 5]***blocks.5.0.bn2.weight:[637]***blocks.5.0.bn2.bias:[637]***blocks.5.0.bn2.running_mean:[637]***blocks.5.0.bn2.running_var:[637]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[27, 637, 1, 1]***blocks.5.0.se.conv_reduce.bias:[27]***blocks.5.0.se.conv_expand.weight:[637, 27, 1, 1]***blocks.5.0.se.conv_expand.bias:[637]***blocks.5.0.conv_pwl.weight:[192, 637, 1, 1]***blocks.5.0.bn3.weight:[192]***blocks.5.0.bn3.bias:[192]***blocks.5.0.bn3.running_mean:[192]***blocks.5.0.bn3.running_var:[192]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[806, 192, 1, 1]***blocks.5.1.bn1.weight:[806]***blocks.5.1.bn1.bias:[806]***blocks.5.1.bn1.running_mean:[806]***blocks.5.1.bn1.running_var:[806]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[806, 1, 5, 5]***blocks.5.1.bn2.weight:[806]***blocks.5.1.bn2.bias:[806]***blocks.5.1.bn2.running_mean:[806]***blocks.5.1.bn2.running_var:[806]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[48, 806, 1, 1]***blocks.5.1.se.conv_reduce.bias:[48]***blocks.5.1.se.conv_expand.weight:[806, 48, 1, 1]***blocks.5.1.se.conv_expand.bias:[806]***blocks.5.1.conv_pwl.weight:[192, 806, 1, 1]***blocks.5.1.bn3.weight:[192]***blocks.5.1.bn3.bias:[192]***blocks.5.1.bn3.running_mean:[192]***blocks.5.1.bn3.running_var:[192]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[798, 192, 1, 1]***blocks.5.2.bn1.weight:[798]***blocks.5.2.bn1.bias:[798]***blocks.5.2.bn1.running_mean:[798]***blocks.5.2.bn1.running_var:[798]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[798, 1, 5, 5]***blocks.5.2.bn2.weight:[798]***blocks.5.2.bn2.bias:[798]***blocks.5.2.bn2.running_mean:[798]***blocks.5.2.bn2.running_var:[798]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[48, 798, 1, 1]***blocks.5.2.se.conv_reduce.bias:[48]***blocks.5.2.se.conv_expand.weight:[798, 48, 1, 1]***blocks.5.2.se.conv_expand.bias:[798]***blocks.5.2.conv_pwl.weight:[192, 798, 1, 1]***blocks.5.2.bn3.weight:[192]***blocks.5.2.bn3.bias:[192]***blocks.5.2.bn3.running_mean:[192]***blocks.5.2.bn3.running_var:[192]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[891, 192, 1, 1]***blocks.5.3.bn1.weight:[891]***blocks.5.3.bn1.bias:[891]***blocks.5.3.bn1.running_mean:[891]***blocks.5.3.bn1.running_var:[891]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[891, 1, 5, 5]***blocks.5.3.bn2.weight:[891]***blocks.5.3.bn2.bias:[891]***blocks.5.3.bn2.running_mean:[891]***blocks.5.3.bn2.running_var:[891]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[48, 891, 1, 1]***blocks.5.3.se.conv_reduce.bias:[48]***blocks.5.3.se.conv_expand.weight:[891, 48, 1, 1]***blocks.5.3.se.conv_expand.bias:[891]***blocks.5.3.conv_pwl.weight:[192, 891, 1, 1]***blocks.5.3.bn3.weight:[192]***blocks.5.3.bn3.bias:[192]***blocks.5.3.bn3.running_mean:[192]***blocks.5.3.bn3.running_var:[192]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[990, 192, 1, 1]***blocks.5.4.bn1.weight:[990]***blocks.5.4.bn1.bias:[990]***blocks.5.4.bn1.running_mean:[990]***blocks.5.4.bn1.running_var:[990]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[990, 1, 5, 5]***blocks.5.4.bn2.weight:[990]***blocks.5.4.bn2.bias:[990]***blocks.5.4.bn2.running_mean:[990]***blocks.5.4.bn2.running_var:[990]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[48, 990, 1, 1]***blocks.5.4.se.conv_reduce.bias:[48]***blocks.5.4.se.conv_expand.weight:[990, 48, 1, 1]***blocks.5.4.se.conv_expand.bias:[990]***blocks.5.4.conv_pwl.weight:[192, 990, 1, 1]***blocks.5.4.bn3.weight:[192]***blocks.5.4.bn3.bias:[192]***blocks.5.4.bn3.running_mean:[192]***blocks.5.4.bn3.running_var:[192]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1152, 192, 1, 1]***blocks.6.0.bn1.weight:[1152]***blocks.6.0.bn1.bias:[1152]***blocks.6.0.bn1.running_mean:[1152]***blocks.6.0.bn1.running_var:[1152]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1152, 1, 3, 3]***blocks.6.0.bn2.weight:[1152]***blocks.6.0.bn2.bias:[1152]***blocks.6.0.bn2.running_mean:[1152]***blocks.6.0.bn2.running_var:[1152]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[48, 1152, 1, 1]***blocks.6.0.se.conv_reduce.bias:[48]***blocks.6.0.se.conv_expand.weight:[1152, 48, 1, 1]***blocks.6.0.se.conv_expand.bias:[1152]***blocks.6.0.conv_pwl.weight:[320, 1152, 1, 1]***blocks.6.0.bn3.weight:[320]***blocks.6.0.bn3.bias:[320]***blocks.6.0.bn3.running_mean:[320]***blocks.6.0.bn3.running_var:[320]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[1912, 320, 1, 1]***blocks.6.1.bn1.weight:[1912]***blocks.6.1.bn1.bias:[1912]***blocks.6.1.bn1.running_mean:[1912]***blocks.6.1.bn1.running_var:[1912]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[1912, 1, 3, 3]***blocks.6.1.bn2.weight:[1912]***blocks.6.1.bn2.bias:[1912]***blocks.6.1.bn2.running_mean:[1912]***blocks.6.1.bn2.running_var:[1912]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[80, 1912, 1, 1]***blocks.6.1.se.conv_reduce.bias:[80]***blocks.6.1.se.conv_expand.weight:[1912, 80, 1, 1]***blocks.6.1.se.conv_expand.bias:[1912]***blocks.6.1.conv_pwl.weight:[320, 1912, 1, 1]***blocks.6.1.bn3.weight:[320]***blocks.6.1.bn3.bias:[320]***blocks.6.1.bn3.running_mean:[320]***blocks.6.1.bn3.running_var:[320]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1280, 320, 1, 1]***bn2.weight:[1280]***bn2.bias:[1280]***bn2.running_mean:[1280]***bn2.running_var:[1280]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1280]***classifier.bias:[1000] \ No newline at end of file diff --git a/timm/models/pruned/efficientnet_b2_pruned.txt b/timm/models/pruned/efficientnet_b2_pruned.txt new file mode 100644 index 0000000..6e3fade --- /dev/null +++ b/timm/models/pruned/efficientnet_b2_pruned.txt @@ -0,0 +1 @@ +conv_stem.weight:[32, 3, 3, 3]***bn1.weight:[32]***bn1.bias:[32]***bn1.running_mean:[32]***bn1.running_var:[32]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[32, 1, 3, 3]***blocks.0.0.bn1.weight:[32]***blocks.0.0.bn1.bias:[32]***blocks.0.0.bn1.running_mean:[32]***blocks.0.0.bn1.running_var:[32]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[8, 32, 1, 1]***blocks.0.0.se.conv_reduce.bias:[8]***blocks.0.0.se.conv_expand.weight:[32, 8, 1, 1]***blocks.0.0.se.conv_expand.bias:[32]***blocks.0.0.conv_pw.weight:[16, 32, 1, 1]***blocks.0.0.bn2.weight:[16]***blocks.0.0.bn2.bias:[16]***blocks.0.0.bn2.running_mean:[16]***blocks.0.0.bn2.running_var:[16]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[16, 1, 3, 3]***blocks.0.1.bn1.weight:[16]***blocks.0.1.bn1.bias:[16]***blocks.0.1.bn1.running_mean:[16]***blocks.0.1.bn1.running_var:[16]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[4, 16, 1, 1]***blocks.0.1.se.conv_reduce.bias:[4]***blocks.0.1.se.conv_expand.weight:[16, 4, 1, 1]***blocks.0.1.se.conv_expand.bias:[16]***blocks.0.1.conv_pw.weight:[16, 16, 1, 1]***blocks.0.1.bn2.weight:[16]***blocks.0.1.bn2.bias:[16]***blocks.0.1.bn2.running_mean:[16]***blocks.0.1.bn2.running_var:[16]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[54, 16, 1, 1]***blocks.1.0.bn1.weight:[54]***blocks.1.0.bn1.bias:[54]***blocks.1.0.bn1.running_mean:[54]***blocks.1.0.bn1.running_var:[54]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[54, 1, 3, 3]***blocks.1.0.bn2.weight:[54]***blocks.1.0.bn2.bias:[54]***blocks.1.0.bn2.running_mean:[54]***blocks.1.0.bn2.running_var:[54]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[4, 54, 1, 1]***blocks.1.0.se.conv_reduce.bias:[4]***blocks.1.0.se.conv_expand.weight:[54, 4, 1, 1]***blocks.1.0.se.conv_expand.bias:[54]***blocks.1.0.conv_pwl.weight:[17, 54, 1, 1]***blocks.1.0.bn3.weight:[17]***blocks.1.0.bn3.bias:[17]***blocks.1.0.bn3.running_mean:[17]***blocks.1.0.bn3.running_var:[17]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[69, 17, 1, 1]***blocks.1.1.bn1.weight:[69]***blocks.1.1.bn1.bias:[69]***blocks.1.1.bn1.running_mean:[69]***blocks.1.1.bn1.running_var:[69]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[69, 1, 3, 3]***blocks.1.1.bn2.weight:[69]***blocks.1.1.bn2.bias:[69]***blocks.1.1.bn2.running_mean:[69]***blocks.1.1.bn2.running_var:[69]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[6, 69, 1, 1]***blocks.1.1.se.conv_reduce.bias:[6]***blocks.1.1.se.conv_expand.weight:[69, 6, 1, 1]***blocks.1.1.se.conv_expand.bias:[69]***blocks.1.1.conv_pwl.weight:[17, 69, 1, 1]***blocks.1.1.bn3.weight:[17]***blocks.1.1.bn3.bias:[17]***blocks.1.1.bn3.running_mean:[17]***blocks.1.1.bn3.running_var:[17]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[61, 17, 1, 1]***blocks.1.2.bn1.weight:[61]***blocks.1.2.bn1.bias:[61]***blocks.1.2.bn1.running_mean:[61]***blocks.1.2.bn1.running_var:[61]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[61, 1, 3, 3]***blocks.1.2.bn2.weight:[61]***blocks.1.2.bn2.bias:[61]***blocks.1.2.bn2.running_mean:[61]***blocks.1.2.bn2.running_var:[61]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[6, 61, 1, 1]***blocks.1.2.se.conv_reduce.bias:[6]***blocks.1.2.se.conv_expand.weight:[61, 6, 1, 1]***blocks.1.2.se.conv_expand.bias:[61]***blocks.1.2.conv_pwl.weight:[17, 61, 1, 1]***blocks.1.2.bn3.weight:[17]***blocks.1.2.bn3.bias:[17]***blocks.1.2.bn3.running_mean:[17]***blocks.1.2.bn3.running_var:[17]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[86, 17, 1, 1]***blocks.2.0.bn1.weight:[86]***blocks.2.0.bn1.bias:[86]***blocks.2.0.bn1.running_mean:[86]***blocks.2.0.bn1.running_var:[86]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[86, 1, 5, 5]***blocks.2.0.bn2.weight:[86]***blocks.2.0.bn2.bias:[86]***blocks.2.0.bn2.running_mean:[86]***blocks.2.0.bn2.running_var:[86]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[6, 86, 1, 1]***blocks.2.0.se.conv_reduce.bias:[6]***blocks.2.0.se.conv_expand.weight:[86, 6, 1, 1]***blocks.2.0.se.conv_expand.bias:[86]***blocks.2.0.conv_pwl.weight:[42, 86, 1, 1]***blocks.2.0.bn3.weight:[42]***blocks.2.0.bn3.bias:[42]***blocks.2.0.bn3.running_mean:[42]***blocks.2.0.bn3.running_var:[42]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[72, 42, 1, 1]***blocks.2.1.bn1.weight:[72]***blocks.2.1.bn1.bias:[72]***blocks.2.1.bn1.running_mean:[72]***blocks.2.1.bn1.running_var:[72]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[72, 1, 5, 5]***blocks.2.1.bn2.weight:[72]***blocks.2.1.bn2.bias:[72]***blocks.2.1.bn2.running_mean:[72]***blocks.2.1.bn2.running_var:[72]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[12, 72, 1, 1]***blocks.2.1.se.conv_reduce.bias:[12]***blocks.2.1.se.conv_expand.weight:[72, 12, 1, 1]***blocks.2.1.se.conv_expand.bias:[72]***blocks.2.1.conv_pwl.weight:[42, 72, 1, 1]***blocks.2.1.bn3.weight:[42]***blocks.2.1.bn3.bias:[42]***blocks.2.1.bn3.running_mean:[42]***blocks.2.1.bn3.running_var:[42]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[98, 42, 1, 1]***blocks.2.2.bn1.weight:[98]***blocks.2.2.bn1.bias:[98]***blocks.2.2.bn1.running_mean:[98]***blocks.2.2.bn1.running_var:[98]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[98, 1, 5, 5]***blocks.2.2.bn2.weight:[98]***blocks.2.2.bn2.bias:[98]***blocks.2.2.bn2.running_mean:[98]***blocks.2.2.bn2.running_var:[98]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[12, 98, 1, 1]***blocks.2.2.se.conv_reduce.bias:[12]***blocks.2.2.se.conv_expand.weight:[98, 12, 1, 1]***blocks.2.2.se.conv_expand.bias:[98]***blocks.2.2.conv_pwl.weight:[42, 98, 1, 1]***blocks.2.2.bn3.weight:[42]***blocks.2.2.bn3.bias:[42]***blocks.2.2.bn3.running_mean:[42]***blocks.2.2.bn3.running_var:[42]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[245, 42, 1, 1]***blocks.3.0.bn1.weight:[245]***blocks.3.0.bn1.bias:[245]***blocks.3.0.bn1.running_mean:[245]***blocks.3.0.bn1.running_var:[245]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[245, 1, 3, 3]***blocks.3.0.bn2.weight:[245]***blocks.3.0.bn2.bias:[245]***blocks.3.0.bn2.running_mean:[245]***blocks.3.0.bn2.running_var:[245]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[12, 245, 1, 1]***blocks.3.0.se.conv_reduce.bias:[12]***blocks.3.0.se.conv_expand.weight:[245, 12, 1, 1]***blocks.3.0.se.conv_expand.bias:[245]***blocks.3.0.conv_pwl.weight:[85, 245, 1, 1]***blocks.3.0.bn3.weight:[85]***blocks.3.0.bn3.bias:[85]***blocks.3.0.bn3.running_mean:[85]***blocks.3.0.bn3.running_var:[85]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[274, 85, 1, 1]***blocks.3.1.bn1.weight:[274]***blocks.3.1.bn1.bias:[274]***blocks.3.1.bn1.running_mean:[274]***blocks.3.1.bn1.running_var:[274]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[274, 1, 3, 3]***blocks.3.1.bn2.weight:[274]***blocks.3.1.bn2.bias:[274]***blocks.3.1.bn2.running_mean:[274]***blocks.3.1.bn2.running_var:[274]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[22, 274, 1, 1]***blocks.3.1.se.conv_reduce.bias:[22]***blocks.3.1.se.conv_expand.weight:[274, 22, 1, 1]***blocks.3.1.se.conv_expand.bias:[274]***blocks.3.1.conv_pwl.weight:[85, 274, 1, 1]***blocks.3.1.bn3.weight:[85]***blocks.3.1.bn3.bias:[85]***blocks.3.1.bn3.running_mean:[85]***blocks.3.1.bn3.running_var:[85]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[254, 85, 1, 1]***blocks.3.2.bn1.weight:[254]***blocks.3.2.bn1.bias:[254]***blocks.3.2.bn1.running_mean:[254]***blocks.3.2.bn1.running_var:[254]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[254, 1, 3, 3]***blocks.3.2.bn2.weight:[254]***blocks.3.2.bn2.bias:[254]***blocks.3.2.bn2.running_mean:[254]***blocks.3.2.bn2.running_var:[254]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[22, 254, 1, 1]***blocks.3.2.se.conv_reduce.bias:[22]***blocks.3.2.se.conv_expand.weight:[254, 22, 1, 1]***blocks.3.2.se.conv_expand.bias:[254]***blocks.3.2.conv_pwl.weight:[85, 254, 1, 1]***blocks.3.2.bn3.weight:[85]***blocks.3.2.bn3.bias:[85]***blocks.3.2.bn3.running_mean:[85]***blocks.3.2.bn3.running_var:[85]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[292, 85, 1, 1]***blocks.3.3.bn1.weight:[292]***blocks.3.3.bn1.bias:[292]***blocks.3.3.bn1.running_mean:[292]***blocks.3.3.bn1.running_var:[292]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[292, 1, 3, 3]***blocks.3.3.bn2.weight:[292]***blocks.3.3.bn2.bias:[292]***blocks.3.3.bn2.running_mean:[292]***blocks.3.3.bn2.running_var:[292]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[22, 292, 1, 1]***blocks.3.3.se.conv_reduce.bias:[22]***blocks.3.3.se.conv_expand.weight:[292, 22, 1, 1]***blocks.3.3.se.conv_expand.bias:[292]***blocks.3.3.conv_pwl.weight:[85, 292, 1, 1]***blocks.3.3.bn3.weight:[85]***blocks.3.3.bn3.bias:[85]***blocks.3.3.bn3.running_mean:[85]***blocks.3.3.bn3.running_var:[85]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[502, 85, 1, 1]***blocks.4.0.bn1.weight:[502]***blocks.4.0.bn1.bias:[502]***blocks.4.0.bn1.running_mean:[502]***blocks.4.0.bn1.running_var:[502]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[502, 1, 5, 5]***blocks.4.0.bn2.weight:[502]***blocks.4.0.bn2.bias:[502]***blocks.4.0.bn2.running_mean:[502]***blocks.4.0.bn2.running_var:[502]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[22, 502, 1, 1]***blocks.4.0.se.conv_reduce.bias:[22]***blocks.4.0.se.conv_expand.weight:[502, 22, 1, 1]***blocks.4.0.se.conv_expand.bias:[502]***blocks.4.0.conv_pwl.weight:[116, 502, 1, 1]***blocks.4.0.bn3.weight:[116]***blocks.4.0.bn3.bias:[116]***blocks.4.0.bn3.running_mean:[116]***blocks.4.0.bn3.running_var:[116]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[315, 116, 1, 1]***blocks.4.1.bn1.weight:[315]***blocks.4.1.bn1.bias:[315]***blocks.4.1.bn1.running_mean:[315]***blocks.4.1.bn1.running_var:[315]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[315, 1, 5, 5]***blocks.4.1.bn2.weight:[315]***blocks.4.1.bn2.bias:[315]***blocks.4.1.bn2.running_mean:[315]***blocks.4.1.bn2.running_var:[315]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[30, 315, 1, 1]***blocks.4.1.se.conv_reduce.bias:[30]***blocks.4.1.se.conv_expand.weight:[315, 30, 1, 1]***blocks.4.1.se.conv_expand.bias:[315]***blocks.4.1.conv_pwl.weight:[116, 315, 1, 1]***blocks.4.1.bn3.weight:[116]***blocks.4.1.bn3.bias:[116]***blocks.4.1.bn3.running_mean:[116]***blocks.4.1.bn3.running_var:[116]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[354, 116, 1, 1]***blocks.4.2.bn1.weight:[354]***blocks.4.2.bn1.bias:[354]***blocks.4.2.bn1.running_mean:[354]***blocks.4.2.bn1.running_var:[354]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[354, 1, 5, 5]***blocks.4.2.bn2.weight:[354]***blocks.4.2.bn2.bias:[354]***blocks.4.2.bn2.running_mean:[354]***blocks.4.2.bn2.running_var:[354]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[30, 354, 1, 1]***blocks.4.2.se.conv_reduce.bias:[30]***blocks.4.2.se.conv_expand.weight:[354, 30, 1, 1]***blocks.4.2.se.conv_expand.bias:[354]***blocks.4.2.conv_pwl.weight:[116, 354, 1, 1]***blocks.4.2.bn3.weight:[116]***blocks.4.2.bn3.bias:[116]***blocks.4.2.bn3.running_mean:[116]***blocks.4.2.bn3.running_var:[116]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[443, 116, 1, 1]***blocks.4.3.bn1.weight:[443]***blocks.4.3.bn1.bias:[443]***blocks.4.3.bn1.running_mean:[443]***blocks.4.3.bn1.running_var:[443]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[443, 1, 5, 5]***blocks.4.3.bn2.weight:[443]***blocks.4.3.bn2.bias:[443]***blocks.4.3.bn2.running_mean:[443]***blocks.4.3.bn2.running_var:[443]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[30, 443, 1, 1]***blocks.4.3.se.conv_reduce.bias:[30]***blocks.4.3.se.conv_expand.weight:[443, 30, 1, 1]***blocks.4.3.se.conv_expand.bias:[443]***blocks.4.3.conv_pwl.weight:[116, 443, 1, 1]***blocks.4.3.bn3.weight:[116]***blocks.4.3.bn3.bias:[116]***blocks.4.3.bn3.running_mean:[116]***blocks.4.3.bn3.running_var:[116]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[719, 116, 1, 1]***blocks.5.0.bn1.weight:[719]***blocks.5.0.bn1.bias:[719]***blocks.5.0.bn1.running_mean:[719]***blocks.5.0.bn1.running_var:[719]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[719, 1, 5, 5]***blocks.5.0.bn2.weight:[719]***blocks.5.0.bn2.bias:[719]***blocks.5.0.bn2.running_mean:[719]***blocks.5.0.bn2.running_var:[719]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[30, 719, 1, 1]***blocks.5.0.se.conv_reduce.bias:[30]***blocks.5.0.se.conv_expand.weight:[719, 30, 1, 1]***blocks.5.0.se.conv_expand.bias:[719]***blocks.5.0.conv_pwl.weight:[208, 719, 1, 1]***blocks.5.0.bn3.weight:[208]***blocks.5.0.bn3.bias:[208]***blocks.5.0.bn3.running_mean:[208]***blocks.5.0.bn3.running_var:[208]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[1148, 208, 1, 1]***blocks.5.1.bn1.weight:[1148]***blocks.5.1.bn1.bias:[1148]***blocks.5.1.bn1.running_mean:[1148]***blocks.5.1.bn1.running_var:[1148]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[1148, 1, 5, 5]***blocks.5.1.bn2.weight:[1148]***blocks.5.1.bn2.bias:[1148]***blocks.5.1.bn2.running_mean:[1148]***blocks.5.1.bn2.running_var:[1148]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[52, 1148, 1, 1]***blocks.5.1.se.conv_reduce.bias:[52]***blocks.5.1.se.conv_expand.weight:[1148, 52, 1, 1]***blocks.5.1.se.conv_expand.bias:[1148]***blocks.5.1.conv_pwl.weight:[208, 1148, 1, 1]***blocks.5.1.bn3.weight:[208]***blocks.5.1.bn3.bias:[208]***blocks.5.1.bn3.running_mean:[208]***blocks.5.1.bn3.running_var:[208]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[1160, 208, 1, 1]***blocks.5.2.bn1.weight:[1160]***blocks.5.2.bn1.bias:[1160]***blocks.5.2.bn1.running_mean:[1160]***blocks.5.2.bn1.running_var:[1160]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[1160, 1, 5, 5]***blocks.5.2.bn2.weight:[1160]***blocks.5.2.bn2.bias:[1160]***blocks.5.2.bn2.running_mean:[1160]***blocks.5.2.bn2.running_var:[1160]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[52, 1160, 1, 1]***blocks.5.2.se.conv_reduce.bias:[52]***blocks.5.2.se.conv_expand.weight:[1160, 52, 1, 1]***blocks.5.2.se.conv_expand.bias:[1160]***blocks.5.2.conv_pwl.weight:[208, 1160, 1, 1]***blocks.5.2.bn3.weight:[208]***blocks.5.2.bn3.bias:[208]***blocks.5.2.bn3.running_mean:[208]***blocks.5.2.bn3.running_var:[208]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[1182, 208, 1, 1]***blocks.5.3.bn1.weight:[1182]***blocks.5.3.bn1.bias:[1182]***blocks.5.3.bn1.running_mean:[1182]***blocks.5.3.bn1.running_var:[1182]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[1182, 1, 5, 5]***blocks.5.3.bn2.weight:[1182]***blocks.5.3.bn2.bias:[1182]***blocks.5.3.bn2.running_mean:[1182]***blocks.5.3.bn2.running_var:[1182]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[52, 1182, 1, 1]***blocks.5.3.se.conv_reduce.bias:[52]***blocks.5.3.se.conv_expand.weight:[1182, 52, 1, 1]***blocks.5.3.se.conv_expand.bias:[1182]***blocks.5.3.conv_pwl.weight:[208, 1182, 1, 1]***blocks.5.3.bn3.weight:[208]***blocks.5.3.bn3.bias:[208]***blocks.5.3.bn3.running_mean:[208]***blocks.5.3.bn3.running_var:[208]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[1228, 208, 1, 1]***blocks.5.4.bn1.weight:[1228]***blocks.5.4.bn1.bias:[1228]***blocks.5.4.bn1.running_mean:[1228]***blocks.5.4.bn1.running_var:[1228]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[1228, 1, 5, 5]***blocks.5.4.bn2.weight:[1228]***blocks.5.4.bn2.bias:[1228]***blocks.5.4.bn2.running_mean:[1228]***blocks.5.4.bn2.running_var:[1228]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[52, 1228, 1, 1]***blocks.5.4.se.conv_reduce.bias:[52]***blocks.5.4.se.conv_expand.weight:[1228, 52, 1, 1]***blocks.5.4.se.conv_expand.bias:[1228]***blocks.5.4.conv_pwl.weight:[208, 1228, 1, 1]***blocks.5.4.bn3.weight:[208]***blocks.5.4.bn3.bias:[208]***blocks.5.4.bn3.running_mean:[208]***blocks.5.4.bn3.running_var:[208]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1248, 208, 1, 1]***blocks.6.0.bn1.weight:[1248]***blocks.6.0.bn1.bias:[1248]***blocks.6.0.bn1.running_mean:[1248]***blocks.6.0.bn1.running_var:[1248]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1248, 1, 3, 3]***blocks.6.0.bn2.weight:[1248]***blocks.6.0.bn2.bias:[1248]***blocks.6.0.bn2.running_mean:[1248]***blocks.6.0.bn2.running_var:[1248]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[52, 1248, 1, 1]***blocks.6.0.se.conv_reduce.bias:[52]***blocks.6.0.se.conv_expand.weight:[1248, 52, 1, 1]***blocks.6.0.se.conv_expand.bias:[1248]***blocks.6.0.conv_pwl.weight:[352, 1248, 1, 1]***blocks.6.0.bn3.weight:[352]***blocks.6.0.bn3.bias:[352]***blocks.6.0.bn3.running_mean:[352]***blocks.6.0.bn3.running_var:[352]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[2112, 352, 1, 1]***blocks.6.1.bn1.weight:[2112]***blocks.6.1.bn1.bias:[2112]***blocks.6.1.bn1.running_mean:[2112]***blocks.6.1.bn1.running_var:[2112]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[2112, 1, 3, 3]***blocks.6.1.bn2.weight:[2112]***blocks.6.1.bn2.bias:[2112]***blocks.6.1.bn2.running_mean:[2112]***blocks.6.1.bn2.running_var:[2112]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[88, 2112, 1, 1]***blocks.6.1.se.conv_reduce.bias:[88]***blocks.6.1.se.conv_expand.weight:[2112, 88, 1, 1]***blocks.6.1.se.conv_expand.bias:[2112]***blocks.6.1.conv_pwl.weight:[352, 2112, 1, 1]***blocks.6.1.bn3.weight:[352]***blocks.6.1.bn3.bias:[352]***blocks.6.1.bn3.running_mean:[352]***blocks.6.1.bn3.running_var:[352]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1408, 352, 1, 1]***bn2.weight:[1408]***bn2.bias:[1408]***bn2.running_mean:[1408]***bn2.running_var:[1408]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1408]***classifier.bias:[1000] \ No newline at end of file diff --git a/timm/models/pruned/efficientnet_b3_pruned.txt b/timm/models/pruned/efficientnet_b3_pruned.txt new file mode 100644 index 0000000..4897817 --- /dev/null +++ b/timm/models/pruned/efficientnet_b3_pruned.txt @@ -0,0 +1 @@ +conv_stem.weight:[40, 3, 3, 3]***bn1.weight:[40]***bn1.bias:[40]***bn1.running_mean:[40]***bn1.running_var:[40]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[40, 1, 3, 3]***blocks.0.0.bn1.weight:[40]***blocks.0.0.bn1.bias:[40]***blocks.0.0.bn1.running_mean:[40]***blocks.0.0.bn1.running_var:[40]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[10, 40, 1, 1]***blocks.0.0.se.conv_reduce.bias:[10]***blocks.0.0.se.conv_expand.weight:[40, 10, 1, 1]***blocks.0.0.se.conv_expand.bias:[40]***blocks.0.0.conv_pw.weight:[24, 40, 1, 1]***blocks.0.0.bn2.weight:[24]***blocks.0.0.bn2.bias:[24]***blocks.0.0.bn2.running_mean:[24]***blocks.0.0.bn2.running_var:[24]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[24, 1, 3, 3]***blocks.0.1.bn1.weight:[24]***blocks.0.1.bn1.bias:[24]***blocks.0.1.bn1.running_mean:[24]***blocks.0.1.bn1.running_var:[24]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[6, 24, 1, 1]***blocks.0.1.se.conv_reduce.bias:[6]***blocks.0.1.se.conv_expand.weight:[24, 6, 1, 1]***blocks.0.1.se.conv_expand.bias:[24]***blocks.0.1.conv_pw.weight:[24, 24, 1, 1]***blocks.0.1.bn2.weight:[24]***blocks.0.1.bn2.bias:[24]***blocks.0.1.bn2.running_mean:[24]***blocks.0.1.bn2.running_var:[24]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[27, 24, 1, 1]***blocks.1.0.bn1.weight:[27]***blocks.1.0.bn1.bias:[27]***blocks.1.0.bn1.running_mean:[27]***blocks.1.0.bn1.running_var:[27]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[27, 1, 3, 3]***blocks.1.0.bn2.weight:[27]***blocks.1.0.bn2.bias:[27]***blocks.1.0.bn2.running_mean:[27]***blocks.1.0.bn2.running_var:[27]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[6, 27, 1, 1]***blocks.1.0.se.conv_reduce.bias:[6]***blocks.1.0.se.conv_expand.weight:[27, 6, 1, 1]***blocks.1.0.se.conv_expand.bias:[27]***blocks.1.0.conv_pwl.weight:[12, 27, 1, 1]***blocks.1.0.bn3.weight:[12]***blocks.1.0.bn3.bias:[12]***blocks.1.0.bn3.running_mean:[12]***blocks.1.0.bn3.running_var:[12]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[49, 12, 1, 1]***blocks.1.1.bn1.weight:[49]***blocks.1.1.bn1.bias:[49]***blocks.1.1.bn1.running_mean:[49]***blocks.1.1.bn1.running_var:[49]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[49, 1, 3, 3]***blocks.1.1.bn2.weight:[49]***blocks.1.1.bn2.bias:[49]***blocks.1.1.bn2.running_mean:[49]***blocks.1.1.bn2.running_var:[49]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[8, 49, 1, 1]***blocks.1.1.se.conv_reduce.bias:[8]***blocks.1.1.se.conv_expand.weight:[49, 8, 1, 1]***blocks.1.1.se.conv_expand.bias:[49]***blocks.1.1.conv_pwl.weight:[12, 49, 1, 1]***blocks.1.1.bn3.weight:[12]***blocks.1.1.bn3.bias:[12]***blocks.1.1.bn3.running_mean:[12]***blocks.1.1.bn3.running_var:[12]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[48, 12, 1, 1]***blocks.1.2.bn1.weight:[48]***blocks.1.2.bn1.bias:[48]***blocks.1.2.bn1.running_mean:[48]***blocks.1.2.bn1.running_var:[48]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[48, 1, 3, 3]***blocks.1.2.bn2.weight:[48]***blocks.1.2.bn2.bias:[48]***blocks.1.2.bn2.running_mean:[48]***blocks.1.2.bn2.running_var:[48]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[8, 48, 1, 1]***blocks.1.2.se.conv_reduce.bias:[8]***blocks.1.2.se.conv_expand.weight:[48, 8, 1, 1]***blocks.1.2.se.conv_expand.bias:[48]***blocks.1.2.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.2.bn3.weight:[12]***blocks.1.2.bn3.bias:[12]***blocks.1.2.bn3.running_mean:[12]***blocks.1.2.bn3.running_var:[12]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[83, 12, 1, 1]***blocks.2.0.bn1.weight:[83]***blocks.2.0.bn1.bias:[83]***blocks.2.0.bn1.running_mean:[83]***blocks.2.0.bn1.running_var:[83]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[83, 1, 5, 5]***blocks.2.0.bn2.weight:[83]***blocks.2.0.bn2.bias:[83]***blocks.2.0.bn2.running_mean:[83]***blocks.2.0.bn2.running_var:[83]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[8, 83, 1, 1]***blocks.2.0.se.conv_reduce.bias:[8]***blocks.2.0.se.conv_expand.weight:[83, 8, 1, 1]***blocks.2.0.se.conv_expand.bias:[83]***blocks.2.0.conv_pwl.weight:[40, 83, 1, 1]***blocks.2.0.bn3.weight:[40]***blocks.2.0.bn3.bias:[40]***blocks.2.0.bn3.running_mean:[40]***blocks.2.0.bn3.running_var:[40]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[90, 40, 1, 1]***blocks.2.1.bn1.weight:[90]***blocks.2.1.bn1.bias:[90]***blocks.2.1.bn1.running_mean:[90]***blocks.2.1.bn1.running_var:[90]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[90, 1, 5, 5]***blocks.2.1.bn2.weight:[90]***blocks.2.1.bn2.bias:[90]***blocks.2.1.bn2.running_mean:[90]***blocks.2.1.bn2.running_var:[90]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[12, 90, 1, 1]***blocks.2.1.se.conv_reduce.bias:[12]***blocks.2.1.se.conv_expand.weight:[90, 12, 1, 1]***blocks.2.1.se.conv_expand.bias:[90]***blocks.2.1.conv_pwl.weight:[40, 90, 1, 1]***blocks.2.1.bn3.weight:[40]***blocks.2.1.bn3.bias:[40]***blocks.2.1.bn3.running_mean:[40]***blocks.2.1.bn3.running_var:[40]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[85, 40, 1, 1]***blocks.2.2.bn1.weight:[85]***blocks.2.2.bn1.bias:[85]***blocks.2.2.bn1.running_mean:[85]***blocks.2.2.bn1.running_var:[85]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[85, 1, 5, 5]***blocks.2.2.bn2.weight:[85]***blocks.2.2.bn2.bias:[85]***blocks.2.2.bn2.running_mean:[85]***blocks.2.2.bn2.running_var:[85]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[12, 85, 1, 1]***blocks.2.2.se.conv_reduce.bias:[12]***blocks.2.2.se.conv_expand.weight:[85, 12, 1, 1]***blocks.2.2.se.conv_expand.bias:[85]***blocks.2.2.conv_pwl.weight:[40, 85, 1, 1]***blocks.2.2.bn3.weight:[40]***blocks.2.2.bn3.bias:[40]***blocks.2.2.bn3.running_mean:[40]***blocks.2.2.bn3.running_var:[40]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[215, 40, 1, 1]***blocks.3.0.bn1.weight:[215]***blocks.3.0.bn1.bias:[215]***blocks.3.0.bn1.running_mean:[215]***blocks.3.0.bn1.running_var:[215]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[215, 1, 3, 3]***blocks.3.0.bn2.weight:[215]***blocks.3.0.bn2.bias:[215]***blocks.3.0.bn2.running_mean:[215]***blocks.3.0.bn2.running_var:[215]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[12, 215, 1, 1]***blocks.3.0.se.conv_reduce.bias:[12]***blocks.3.0.se.conv_expand.weight:[215, 12, 1, 1]***blocks.3.0.se.conv_expand.bias:[215]***blocks.3.0.conv_pwl.weight:[93, 215, 1, 1]***blocks.3.0.bn3.weight:[93]***blocks.3.0.bn3.bias:[93]***blocks.3.0.bn3.running_mean:[93]***blocks.3.0.bn3.running_var:[93]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[261, 93, 1, 1]***blocks.3.1.bn1.weight:[261]***blocks.3.1.bn1.bias:[261]***blocks.3.1.bn1.running_mean:[261]***blocks.3.1.bn1.running_var:[261]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[261, 1, 3, 3]***blocks.3.1.bn2.weight:[261]***blocks.3.1.bn2.bias:[261]***blocks.3.1.bn2.running_mean:[261]***blocks.3.1.bn2.running_var:[261]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[24, 261, 1, 1]***blocks.3.1.se.conv_reduce.bias:[24]***blocks.3.1.se.conv_expand.weight:[261, 24, 1, 1]***blocks.3.1.se.conv_expand.bias:[261]***blocks.3.1.conv_pwl.weight:[93, 261, 1, 1]***blocks.3.1.bn3.weight:[93]***blocks.3.1.bn3.bias:[93]***blocks.3.1.bn3.running_mean:[93]***blocks.3.1.bn3.running_var:[93]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[219, 93, 1, 1]***blocks.3.2.bn1.weight:[219]***blocks.3.2.bn1.bias:[219]***blocks.3.2.bn1.running_mean:[219]***blocks.3.2.bn1.running_var:[219]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[219, 1, 3, 3]***blocks.3.2.bn2.weight:[219]***blocks.3.2.bn2.bias:[219]***blocks.3.2.bn2.running_mean:[219]***blocks.3.2.bn2.running_var:[219]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[24, 219, 1, 1]***blocks.3.2.se.conv_reduce.bias:[24]***blocks.3.2.se.conv_expand.weight:[219, 24, 1, 1]***blocks.3.2.se.conv_expand.bias:[219]***blocks.3.2.conv_pwl.weight:[93, 219, 1, 1]***blocks.3.2.bn3.weight:[93]***blocks.3.2.bn3.bias:[93]***blocks.3.2.bn3.running_mean:[93]***blocks.3.2.bn3.running_var:[93]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[254, 93, 1, 1]***blocks.3.3.bn1.weight:[254]***blocks.3.3.bn1.bias:[254]***blocks.3.3.bn1.running_mean:[254]***blocks.3.3.bn1.running_var:[254]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[254, 1, 3, 3]***blocks.3.3.bn2.weight:[254]***blocks.3.3.bn2.bias:[254]***blocks.3.3.bn2.running_mean:[254]***blocks.3.3.bn2.running_var:[254]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[24, 254, 1, 1]***blocks.3.3.se.conv_reduce.bias:[24]***blocks.3.3.se.conv_expand.weight:[254, 24, 1, 1]***blocks.3.3.se.conv_expand.bias:[254]***blocks.3.3.conv_pwl.weight:[93, 254, 1, 1]***blocks.3.3.bn3.weight:[93]***blocks.3.3.bn3.bias:[93]***blocks.3.3.bn3.running_mean:[93]***blocks.3.3.bn3.running_var:[93]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.3.4.conv_pw.weight:[236, 93, 1, 1]***blocks.3.4.bn1.weight:[236]***blocks.3.4.bn1.bias:[236]***blocks.3.4.bn1.running_mean:[236]***blocks.3.4.bn1.running_var:[236]***blocks.3.4.bn1.num_batches_tracked:[]***blocks.3.4.conv_dw.weight:[236, 1, 3, 3]***blocks.3.4.bn2.weight:[236]***blocks.3.4.bn2.bias:[236]***blocks.3.4.bn2.running_mean:[236]***blocks.3.4.bn2.running_var:[236]***blocks.3.4.bn2.num_batches_tracked:[]***blocks.3.4.se.conv_reduce.weight:[24, 236, 1, 1]***blocks.3.4.se.conv_reduce.bias:[24]***blocks.3.4.se.conv_expand.weight:[236, 24, 1, 1]***blocks.3.4.se.conv_expand.bias:[236]***blocks.3.4.conv_pwl.weight:[93, 236, 1, 1]***blocks.3.4.bn3.weight:[93]***blocks.3.4.bn3.bias:[93]***blocks.3.4.bn3.running_mean:[93]***blocks.3.4.bn3.running_var:[93]***blocks.3.4.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[480, 93, 1, 1]***blocks.4.0.bn1.weight:[480]***blocks.4.0.bn1.bias:[480]***blocks.4.0.bn1.running_mean:[480]***blocks.4.0.bn1.running_var:[480]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[480, 1, 5, 5]***blocks.4.0.bn2.weight:[480]***blocks.4.0.bn2.bias:[480]***blocks.4.0.bn2.running_mean:[480]***blocks.4.0.bn2.running_var:[480]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[24, 480, 1, 1]***blocks.4.0.se.conv_reduce.bias:[24]***blocks.4.0.se.conv_expand.weight:[480, 24, 1, 1]***blocks.4.0.se.conv_expand.bias:[480]***blocks.4.0.conv_pwl.weight:[120, 480, 1, 1]***blocks.4.0.bn3.weight:[120]***blocks.4.0.bn3.bias:[120]***blocks.4.0.bn3.running_mean:[120]***blocks.4.0.bn3.running_var:[120]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[235, 120, 1, 1]***blocks.4.1.bn1.weight:[235]***blocks.4.1.bn1.bias:[235]***blocks.4.1.bn1.running_mean:[235]***blocks.4.1.bn1.running_var:[235]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[235, 1, 5, 5]***blocks.4.1.bn2.weight:[235]***blocks.4.1.bn2.bias:[235]***blocks.4.1.bn2.running_mean:[235]***blocks.4.1.bn2.running_var:[235]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[34, 235, 1, 1]***blocks.4.1.se.conv_reduce.bias:[34]***blocks.4.1.se.conv_expand.weight:[235, 34, 1, 1]***blocks.4.1.se.conv_expand.bias:[235]***blocks.4.1.conv_pwl.weight:[120, 235, 1, 1]***blocks.4.1.bn3.weight:[120]***blocks.4.1.bn3.bias:[120]***blocks.4.1.bn3.running_mean:[120]***blocks.4.1.bn3.running_var:[120]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[217, 120, 1, 1]***blocks.4.2.bn1.weight:[217]***blocks.4.2.bn1.bias:[217]***blocks.4.2.bn1.running_mean:[217]***blocks.4.2.bn1.running_var:[217]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[217, 1, 5, 5]***blocks.4.2.bn2.weight:[217]***blocks.4.2.bn2.bias:[217]***blocks.4.2.bn2.running_mean:[217]***blocks.4.2.bn2.running_var:[217]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[34, 217, 1, 1]***blocks.4.2.se.conv_reduce.bias:[34]***blocks.4.2.se.conv_expand.weight:[217, 34, 1, 1]***blocks.4.2.se.conv_expand.bias:[217]***blocks.4.2.conv_pwl.weight:[120, 217, 1, 1]***blocks.4.2.bn3.weight:[120]***blocks.4.2.bn3.bias:[120]***blocks.4.2.bn3.running_mean:[120]***blocks.4.2.bn3.running_var:[120]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[226, 120, 1, 1]***blocks.4.3.bn1.weight:[226]***blocks.4.3.bn1.bias:[226]***blocks.4.3.bn1.running_mean:[226]***blocks.4.3.bn1.running_var:[226]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[226, 1, 5, 5]***blocks.4.3.bn2.weight:[226]***blocks.4.3.bn2.bias:[226]***blocks.4.3.bn2.running_mean:[226]***blocks.4.3.bn2.running_var:[226]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[33, 226, 1, 1]***blocks.4.3.se.conv_reduce.bias:[33]***blocks.4.3.se.conv_expand.weight:[226, 33, 1, 1]***blocks.4.3.se.conv_expand.bias:[226]***blocks.4.3.conv_pwl.weight:[120, 226, 1, 1]***blocks.4.3.bn3.weight:[120]***blocks.4.3.bn3.bias:[120]***blocks.4.3.bn3.running_mean:[120]***blocks.4.3.bn3.running_var:[120]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.4.4.conv_pw.weight:[340, 120, 1, 1]***blocks.4.4.bn1.weight:[340]***blocks.4.4.bn1.bias:[340]***blocks.4.4.bn1.running_mean:[340]***blocks.4.4.bn1.running_var:[340]***blocks.4.4.bn1.num_batches_tracked:[]***blocks.4.4.conv_dw.weight:[340, 1, 5, 5]***blocks.4.4.bn2.weight:[340]***blocks.4.4.bn2.bias:[340]***blocks.4.4.bn2.running_mean:[340]***blocks.4.4.bn2.running_var:[340]***blocks.4.4.bn2.num_batches_tracked:[]***blocks.4.4.se.conv_reduce.weight:[34, 340, 1, 1]***blocks.4.4.se.conv_reduce.bias:[34]***blocks.4.4.se.conv_expand.weight:[340, 34, 1, 1]***blocks.4.4.se.conv_expand.bias:[340]***blocks.4.4.conv_pwl.weight:[120, 340, 1, 1]***blocks.4.4.bn3.weight:[120]***blocks.4.4.bn3.bias:[120]***blocks.4.4.bn3.running_mean:[120]***blocks.4.4.bn3.running_var:[120]***blocks.4.4.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[802, 120, 1, 1]***blocks.5.0.bn1.weight:[802]***blocks.5.0.bn1.bias:[802]***blocks.5.0.bn1.running_mean:[802]***blocks.5.0.bn1.running_var:[802]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[802, 1, 5, 5]***blocks.5.0.bn2.weight:[802]***blocks.5.0.bn2.bias:[802]***blocks.5.0.bn2.running_mean:[802]***blocks.5.0.bn2.running_var:[802]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[34, 802, 1, 1]***blocks.5.0.se.conv_reduce.bias:[34]***blocks.5.0.se.conv_expand.weight:[802, 34, 1, 1]***blocks.5.0.se.conv_expand.bias:[802]***blocks.5.0.conv_pwl.weight:[232, 802, 1, 1]***blocks.5.0.bn3.weight:[232]***blocks.5.0.bn3.bias:[232]***blocks.5.0.bn3.running_mean:[232]***blocks.5.0.bn3.running_var:[232]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[1030, 232, 1, 1]***blocks.5.1.bn1.weight:[1030]***blocks.5.1.bn1.bias:[1030]***blocks.5.1.bn1.running_mean:[1030]***blocks.5.1.bn1.running_var:[1030]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[1030, 1, 5, 5]***blocks.5.1.bn2.weight:[1030]***blocks.5.1.bn2.bias:[1030]***blocks.5.1.bn2.running_mean:[1030]***blocks.5.1.bn2.running_var:[1030]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[58, 1030, 1, 1]***blocks.5.1.se.conv_reduce.bias:[58]***blocks.5.1.se.conv_expand.weight:[1030, 58, 1, 1]***blocks.5.1.se.conv_expand.bias:[1030]***blocks.5.1.conv_pwl.weight:[232, 1030, 1, 1]***blocks.5.1.bn3.weight:[232]***blocks.5.1.bn3.bias:[232]***blocks.5.1.bn3.running_mean:[232]***blocks.5.1.bn3.running_var:[232]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[924, 232, 1, 1]***blocks.5.2.bn1.weight:[924]***blocks.5.2.bn1.bias:[924]***blocks.5.2.bn1.running_mean:[924]***blocks.5.2.bn1.running_var:[924]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[924, 1, 5, 5]***blocks.5.2.bn2.weight:[924]***blocks.5.2.bn2.bias:[924]***blocks.5.2.bn2.running_mean:[924]***blocks.5.2.bn2.running_var:[924]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[58, 924, 1, 1]***blocks.5.2.se.conv_reduce.bias:[58]***blocks.5.2.se.conv_expand.weight:[924, 58, 1, 1]***blocks.5.2.se.conv_expand.bias:[924]***blocks.5.2.conv_pwl.weight:[232, 924, 1, 1]***blocks.5.2.bn3.weight:[232]***blocks.5.2.bn3.bias:[232]***blocks.5.2.bn3.running_mean:[232]***blocks.5.2.bn3.running_var:[232]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[1016, 232, 1, 1]***blocks.5.3.bn1.weight:[1016]***blocks.5.3.bn1.bias:[1016]***blocks.5.3.bn1.running_mean:[1016]***blocks.5.3.bn1.running_var:[1016]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[1016, 1, 5, 5]***blocks.5.3.bn2.weight:[1016]***blocks.5.3.bn2.bias:[1016]***blocks.5.3.bn2.running_mean:[1016]***blocks.5.3.bn2.running_var:[1016]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[58, 1016, 1, 1]***blocks.5.3.se.conv_reduce.bias:[58]***blocks.5.3.se.conv_expand.weight:[1016, 58, 1, 1]***blocks.5.3.se.conv_expand.bias:[1016]***blocks.5.3.conv_pwl.weight:[232, 1016, 1, 1]***blocks.5.3.bn3.weight:[232]***blocks.5.3.bn3.bias:[232]***blocks.5.3.bn3.running_mean:[232]***blocks.5.3.bn3.running_var:[232]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[1130, 232, 1, 1]***blocks.5.4.bn1.weight:[1130]***blocks.5.4.bn1.bias:[1130]***blocks.5.4.bn1.running_mean:[1130]***blocks.5.4.bn1.running_var:[1130]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[1130, 1, 5, 5]***blocks.5.4.bn2.weight:[1130]***blocks.5.4.bn2.bias:[1130]***blocks.5.4.bn2.running_mean:[1130]***blocks.5.4.bn2.running_var:[1130]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[58, 1130, 1, 1]***blocks.5.4.se.conv_reduce.bias:[58]***blocks.5.4.se.conv_expand.weight:[1130, 58, 1, 1]***blocks.5.4.se.conv_expand.bias:[1130]***blocks.5.4.conv_pwl.weight:[232, 1130, 1, 1]***blocks.5.4.bn3.weight:[232]***blocks.5.4.bn3.bias:[232]***blocks.5.4.bn3.running_mean:[232]***blocks.5.4.bn3.running_var:[232]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.5.5.conv_pw.weight:[1266, 232, 1, 1]***blocks.5.5.bn1.weight:[1266]***blocks.5.5.bn1.bias:[1266]***blocks.5.5.bn1.running_mean:[1266]***blocks.5.5.bn1.running_var:[1266]***blocks.5.5.bn1.num_batches_tracked:[]***blocks.5.5.conv_dw.weight:[1266, 1, 5, 5]***blocks.5.5.bn2.weight:[1266]***blocks.5.5.bn2.bias:[1266]***blocks.5.5.bn2.running_mean:[1266]***blocks.5.5.bn2.running_var:[1266]***blocks.5.5.bn2.num_batches_tracked:[]***blocks.5.5.se.conv_reduce.weight:[58, 1266, 1, 1]***blocks.5.5.se.conv_reduce.bias:[58]***blocks.5.5.se.conv_expand.weight:[1266, 58, 1, 1]***blocks.5.5.se.conv_expand.bias:[1266]***blocks.5.5.conv_pwl.weight:[232, 1266, 1, 1]***blocks.5.5.bn3.weight:[232]***blocks.5.5.bn3.bias:[232]***blocks.5.5.bn3.running_mean:[232]***blocks.5.5.bn3.running_var:[232]***blocks.5.5.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1392, 232, 1, 1]***blocks.6.0.bn1.weight:[1392]***blocks.6.0.bn1.bias:[1392]***blocks.6.0.bn1.running_mean:[1392]***blocks.6.0.bn1.running_var:[1392]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1392, 1, 3, 3]***blocks.6.0.bn2.weight:[1392]***blocks.6.0.bn2.bias:[1392]***blocks.6.0.bn2.running_mean:[1392]***blocks.6.0.bn2.running_var:[1392]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[58, 1392, 1, 1]***blocks.6.0.se.conv_reduce.bias:[58]***blocks.6.0.se.conv_expand.weight:[1392, 58, 1, 1]***blocks.6.0.se.conv_expand.bias:[1392]***blocks.6.0.conv_pwl.weight:[384, 1392, 1, 1]***blocks.6.0.bn3.weight:[384]***blocks.6.0.bn3.bias:[384]***blocks.6.0.bn3.running_mean:[384]***blocks.6.0.bn3.running_var:[384]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[2301, 384, 1, 1]***blocks.6.1.bn1.weight:[2301]***blocks.6.1.bn1.bias:[2301]***blocks.6.1.bn1.running_mean:[2301]***blocks.6.1.bn1.running_var:[2301]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[2301, 1, 3, 3]***blocks.6.1.bn2.weight:[2301]***blocks.6.1.bn2.bias:[2301]***blocks.6.1.bn2.running_mean:[2301]***blocks.6.1.bn2.running_var:[2301]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[96, 2301, 1, 1]***blocks.6.1.se.conv_reduce.bias:[96]***blocks.6.1.se.conv_expand.weight:[2301, 96, 1, 1]***blocks.6.1.se.conv_expand.bias:[2301]***blocks.6.1.conv_pwl.weight:[384, 2301, 1, 1]***blocks.6.1.bn3.weight:[384]***blocks.6.1.bn3.bias:[384]***blocks.6.1.bn3.running_mean:[384]***blocks.6.1.bn3.running_var:[384]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1536, 384, 1, 1]***bn2.weight:[1536]***bn2.bias:[1536]***bn2.running_mean:[1536]***bn2.running_var:[1536]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1536]***classifier.bias:[1000] \ No newline at end of file diff --git a/timm/models/registry.py b/timm/models/registry.py new file mode 100644 index 0000000..f92219b --- /dev/null +++ b/timm/models/registry.py @@ -0,0 +1,149 @@ +""" Model Registry +Hacked together by / Copyright 2020 Ross Wightman +""" + +import sys +import re +import fnmatch +from collections import defaultdict +from copy import deepcopy + +__all__ = ['list_models', 'is_model', 'model_entrypoint', 'list_modules', 'is_model_in_modules', + 'is_model_default_key', 'has_model_default_key', 'get_model_default_value', 'is_model_pretrained'] + +_module_to_models = defaultdict(set) # dict of sets to check membership of model in module +_model_to_module = {} # mapping of model names to module names +_model_entrypoints = {} # mapping of model names to entrypoint fns +_model_has_pretrained = set() # set of model names that have pretrained weight url present +_model_default_cfgs = dict() # central repo for model default_cfgs + + +def register_model(fn): + # lookup containing module + mod = sys.modules[fn.__module__] + module_name_split = fn.__module__.split('.') + module_name = module_name_split[-1] if len(module_name_split) else '' + + # add model to __all__ in module + model_name = fn.__name__ + if hasattr(mod, '__all__'): + mod.__all__.append(model_name) + else: + mod.__all__ = [model_name] + + # add entries to registry dict/sets + _model_entrypoints[model_name] = fn + _model_to_module[model_name] = module_name + _module_to_models[module_name].add(model_name) + has_pretrained = False # check if model has a pretrained url to allow filtering on this + if hasattr(mod, 'default_cfgs') and model_name in mod.default_cfgs: + # this will catch all models that have entrypoint matching cfg key, but miss any aliasing + # entrypoints or non-matching combos + has_pretrained = 'url' in mod.default_cfgs[model_name] and 'http' in mod.default_cfgs[model_name]['url'] + _model_default_cfgs[model_name] = deepcopy(mod.default_cfgs[model_name]) + if has_pretrained: + _model_has_pretrained.add(model_name) + return fn + + +def _natural_key(string_): + return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] + + +def list_models(filter='', module='', pretrained=False, exclude_filters='', name_matches_cfg=False): + """ Return list of available model names, sorted alphabetically + + Args: + filter (str) - Wildcard filter string that works with fnmatch + module (str) - Limit model selection to a specific sub-module (ie 'gen_efficientnet') + pretrained (bool) - Include only models with pretrained weights if True + exclude_filters (str or list[str]) - Wildcard filters to exclude models after including them with filter + name_matches_cfg (bool) - Include only models w/ model_name matching default_cfg name (excludes some aliases) + + Example: + model_list('gluon_resnet*') -- returns all models starting with 'gluon_resnet' + model_list('*resnext*, 'resnet') -- returns all models with 'resnext' in 'resnet' module + """ + if module: + all_models = list(_module_to_models[module]) + else: + all_models = _model_entrypoints.keys() + if filter: + models = [] + include_filters = filter if isinstance(filter, (tuple, list)) else [filter] + for f in include_filters: + include_models = fnmatch.filter(all_models, f) # include these models + if len(include_models): + models = set(models).union(include_models) + else: + models = all_models + if exclude_filters: + if not isinstance(exclude_filters, (tuple, list)): + exclude_filters = [exclude_filters] + for xf in exclude_filters: + exclude_models = fnmatch.filter(models, xf) # exclude these models + if len(exclude_models): + models = set(models).difference(exclude_models) + if pretrained: + models = _model_has_pretrained.intersection(models) + if name_matches_cfg: + models = set(_model_default_cfgs).intersection(models) + return list(sorted(models, key=_natural_key)) + + +def is_model(model_name): + """ Check if a model name exists + """ + return model_name in _model_entrypoints + + +def model_entrypoint(model_name): + """Fetch a model entrypoint for specified model name + """ + return _model_entrypoints[model_name] + + +def list_modules(): + """ Return list of module names that contain models / model entrypoints + """ + modules = _module_to_models.keys() + return list(sorted(modules)) + + +def is_model_in_modules(model_name, module_names): + """Check if a model exists within a subset of modules + Args: + model_name (str) - name of model to check + module_names (tuple, list, set) - names of modules to search in + """ + assert isinstance(module_names, (tuple, list, set)) + return any(model_name in _module_to_models[n] for n in module_names) + + +def has_model_default_key(model_name, cfg_key): + """ Query model default_cfgs for existence of a specific key. + """ + if model_name in _model_default_cfgs and cfg_key in _model_default_cfgs[model_name]: + return True + return False + + +def is_model_default_key(model_name, cfg_key): + """ Return truthy value for specified model default_cfg key, False if does not exist. + """ + if model_name in _model_default_cfgs and _model_default_cfgs[model_name].get(cfg_key, False): + return True + return False + + +def get_model_default_value(model_name, cfg_key): + """ Get a specific model default_cfg value by key. None if it doesn't exist. + """ + if model_name in _model_default_cfgs: + return _model_default_cfgs[model_name].get(cfg_key, None) + else: + return None + + +def is_model_pretrained(model_name): + return model_name in _model_has_pretrained diff --git a/timm/models/regnet.py b/timm/models/regnet.py new file mode 100644 index 0000000..6a38107 --- /dev/null +++ b/timm/models/regnet.py @@ -0,0 +1,494 @@ +"""RegNet + +Paper: `Designing Network Design Spaces` - https://arxiv.org/abs/2003.13678 +Original Impl: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py + +Based on original PyTorch impl linked above, but re-wrote to use my own blocks (adapted from ResNet here) +and cleaned up with more descriptive variable names. + +Weights from original impl have been modified +* first layer from BGR -> RGB as most PyTorch models are +* removed training specific dict entries from checkpoints and keep model state_dict only +* remap names to match the ones here + +Hacked together by / Copyright 2020 Ross Wightman +""" +import numpy as np +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import ClassifierHead, AvgPool2dSame, ConvBnAct, SEModule, DropPath +from .registry import register_model + + +def _mcfg(**kwargs): + cfg = dict(se_ratio=0., bottle_ratio=1., stem_width=32) + cfg.update(**kwargs) + return cfg + + +# Model FLOPS = three trailing digits * 10^8 +model_cfgs = dict( + regnetx_002=_mcfg(w0=24, wa=36.44, wm=2.49, group_w=8, depth=13), + regnetx_004=_mcfg(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22), + regnetx_006=_mcfg(w0=48, wa=36.97, wm=2.24, group_w=24, depth=16), + regnetx_008=_mcfg(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16), + regnetx_016=_mcfg(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18), + regnetx_032=_mcfg(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25), + regnetx_040=_mcfg(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23), + regnetx_064=_mcfg(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17), + regnetx_080=_mcfg(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23), + regnetx_120=_mcfg(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19), + regnetx_160=_mcfg(w0=216, wa=55.59, wm=2.1, group_w=128, depth=22), + regnetx_320=_mcfg(w0=320, wa=69.86, wm=2.0, group_w=168, depth=23), + regnety_002=_mcfg(w0=24, wa=36.44, wm=2.49, group_w=8, depth=13, se_ratio=0.25), + regnety_004=_mcfg(w0=48, wa=27.89, wm=2.09, group_w=8, depth=16, se_ratio=0.25), + regnety_006=_mcfg(w0=48, wa=32.54, wm=2.32, group_w=16, depth=15, se_ratio=0.25), + regnety_008=_mcfg(w0=56, wa=38.84, wm=2.4, group_w=16, depth=14, se_ratio=0.25), + regnety_016=_mcfg(w0=48, wa=20.71, wm=2.65, group_w=24, depth=27, se_ratio=0.25), + regnety_032=_mcfg(w0=80, wa=42.63, wm=2.66, group_w=24, depth=21, se_ratio=0.25), + regnety_040=_mcfg(w0=96, wa=31.41, wm=2.24, group_w=64, depth=22, se_ratio=0.25), + regnety_064=_mcfg(w0=112, wa=33.22, wm=2.27, group_w=72, depth=25, se_ratio=0.25), + regnety_080=_mcfg(w0=192, wa=76.82, wm=2.19, group_w=56, depth=17, se_ratio=0.25), + regnety_120=_mcfg(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, se_ratio=0.25), + regnety_160=_mcfg(w0=200, wa=106.23, wm=2.48, group_w=112, depth=18, se_ratio=0.25), + regnety_320=_mcfg(w0=232, wa=115.89, wm=2.53, group_w=232, depth=20, se_ratio=0.25), +) + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = dict( + regnetx_002=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_002-e7e85e5c.pth'), + regnetx_004=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_004-7d0e9424.pth'), + regnetx_006=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_006-85ec1baa.pth'), + regnetx_008=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_008-d8b470eb.pth'), + regnetx_016=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_016-65ca972a.pth'), + regnetx_032=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_032-ed0c7f7e.pth'), + regnetx_040=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_040-73c2a654.pth'), + regnetx_064=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_064-29278baa.pth'), + regnetx_080=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_080-7c7fcab1.pth'), + regnetx_120=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_120-65d5521e.pth'), + regnetx_160=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_160-c98c4112.pth'), + regnetx_320=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_320-8ea38b93.pth'), + regnety_002=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_002-e68ca334.pth'), + regnety_004=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_004-0db870e6.pth'), + regnety_006=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_006-c67e57ec.pth'), + regnety_008=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_008-dc900dbe.pth'), + regnety_016=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_016-54367f74.pth'), + regnety_032=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/regnety_032_ra-7f2439f9.pth', + crop_pct=1.0, test_input_size=(3, 288, 288)), + regnety_040=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_040-f0d569f9.pth'), + regnety_064=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_064-0a48325c.pth'), + regnety_080=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_080-e7f3eb93.pth'), + regnety_120=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_120-721ba79a.pth'), + regnety_160=_cfg( + url='https://dl.fbaipublicfiles.com/deit/regnety_160-a5fe301d.pth', # from Facebook DeiT GitHub repository + crop_pct=1.0, test_input_size=(3, 288, 288)), + regnety_320=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_320-ba464b29.pth'), +) + + +def quantize_float(f, q): + """Converts a float to closest non-zero int divisible by q.""" + return int(round(f / q) * q) + + +def adjust_widths_groups_comp(widths, bottle_ratios, groups): + """Adjusts the compatibility of widths and groups.""" + bottleneck_widths = [int(w * b) for w, b in zip(widths, bottle_ratios)] + groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_widths)] + bottleneck_widths = [quantize_float(w_bot, g) for w_bot, g in zip(bottleneck_widths, groups)] + widths = [int(w_bot / b) for w_bot, b in zip(bottleneck_widths, bottle_ratios)] + return widths, groups + + +def generate_regnet(width_slope, width_initial, width_mult, depth, q=8): + """Generates per block widths from RegNet parameters.""" + assert width_slope >= 0 and width_initial > 0 and width_mult > 1 and width_initial % q == 0 + widths_cont = np.arange(depth) * width_slope + width_initial + width_exps = np.round(np.log(widths_cont / width_initial) / np.log(width_mult)) + widths = width_initial * np.power(width_mult, width_exps) + widths = np.round(np.divide(widths, q)) * q + num_stages, max_stage = len(np.unique(widths)), width_exps.max() + 1 + widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist() + return widths, num_stages, max_stage, widths_cont + + +class Bottleneck(nn.Module): + """ RegNet Bottleneck + + This is almost exactly the same as a ResNet Bottlneck. The main difference is the SE block is moved from + after conv3 to after conv2. Otherwise, it's just redefining the arguments for groups/bottleneck channels. + """ + + def __init__(self, in_chs, out_chs, stride=1, dilation=1, bottleneck_ratio=1, group_width=1, se_ratio=0.25, + downsample=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, + drop_block=None, drop_path=None): + super(Bottleneck, self).__init__() + bottleneck_chs = int(round(out_chs * bottleneck_ratio)) + groups = bottleneck_chs // group_width + + cargs = dict(act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, drop_block=drop_block) + self.conv1 = ConvBnAct(in_chs, bottleneck_chs, kernel_size=1, **cargs) + self.conv2 = ConvBnAct( + bottleneck_chs, bottleneck_chs, kernel_size=3, stride=stride, dilation=dilation, + groups=groups, **cargs) + if se_ratio: + se_channels = int(round(in_chs * se_ratio)) + self.se = SEModule(bottleneck_chs, rd_channels=se_channels) + else: + self.se = None + cargs['act_layer'] = None + self.conv3 = ConvBnAct(bottleneck_chs, out_chs, kernel_size=1, **cargs) + self.act3 = act_layer(inplace=True) + self.downsample = downsample + self.drop_path = drop_path + + def zero_init_last_bn(self): + nn.init.zeros_(self.conv3.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + if self.se is not None: + x = self.se(x) + x = self.conv3(x) + if self.drop_path is not None: + x = self.drop_path(x) + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act3(x) + return x + + +def downsample_conv( + in_chs, out_chs, kernel_size, stride=1, dilation=1, norm_layer=None): + norm_layer = norm_layer or nn.BatchNorm2d + kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size + dilation = dilation if kernel_size > 1 else 1 + return ConvBnAct( + in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, norm_layer=norm_layer, act_layer=None) + + +def downsample_avg( + in_chs, out_chs, kernel_size, stride=1, dilation=1, norm_layer=None): + """ AvgPool Downsampling as in 'D' ResNet variants. This is not in RegNet space but I might experiment.""" + norm_layer = norm_layer or nn.BatchNorm2d + avg_stride = stride if dilation == 1 else 1 + pool = nn.Identity() + if stride > 1 or dilation > 1: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + return nn.Sequential(*[ + pool, ConvBnAct(in_chs, out_chs, 1, stride=1, norm_layer=norm_layer, act_layer=None)]) + + +class RegStage(nn.Module): + """Stage (sequence of blocks w/ the same output shape).""" + + def __init__(self, in_chs, out_chs, stride, dilation, depth, bottle_ratio, group_width, + block_fn=Bottleneck, se_ratio=0., drop_path_rates=None, drop_block=None): + super(RegStage, self).__init__() + block_kwargs = {} # FIXME setup to pass various aa, norm, act layer common args + first_dilation = 1 if dilation in (1, 2) else 2 + for i in range(depth): + block_stride = stride if i == 0 else 1 + block_in_chs = in_chs if i == 0 else out_chs + block_dilation = first_dilation if i == 0 else dilation + if drop_path_rates is not None and drop_path_rates[i] > 0.: + drop_path = DropPath(drop_path_rates[i]) + else: + drop_path = None + if (block_in_chs != out_chs) or (block_stride != 1): + proj_block = downsample_conv(block_in_chs, out_chs, 1, block_stride, block_dilation) + else: + proj_block = None + + name = "b{}".format(i + 1) + self.add_module( + name, block_fn( + block_in_chs, out_chs, block_stride, block_dilation, bottle_ratio, group_width, se_ratio, + downsample=proj_block, drop_block=drop_block, drop_path=drop_path, **block_kwargs) + ) + + def forward(self, x): + for block in self.children(): + x = block(x) + return x + + +class RegNet(nn.Module): + """RegNet model. + + Paper: https://arxiv.org/abs/2003.13678 + Original Impl: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py + """ + + def __init__(self, cfg, in_chans=3, num_classes=1000, output_stride=32, global_pool='avg', drop_rate=0., + drop_path_rate=0., zero_init_last_bn=True): + super().__init__() + # TODO add drop block, drop path, anti-aliasing, custom bn/act args + self.num_classes = num_classes + self.drop_rate = drop_rate + assert output_stride in (8, 16, 32) + + # Construct the stem + stem_width = cfg['stem_width'] + self.stem = ConvBnAct(in_chans, stem_width, 3, stride=2) + self.feature_info = [dict(num_chs=stem_width, reduction=2, module='stem')] + + # Construct the stages + prev_width = stem_width + curr_stride = 2 + stage_params = self._get_stage_params(cfg, output_stride=output_stride, drop_path_rate=drop_path_rate) + se_ratio = cfg['se_ratio'] + for i, stage_args in enumerate(stage_params): + stage_name = "s{}".format(i + 1) + self.add_module(stage_name, RegStage(prev_width, **stage_args, se_ratio=se_ratio)) + prev_width = stage_args['out_chs'] + curr_stride *= stage_args['stride'] + self.feature_info += [dict(num_chs=prev_width, reduction=curr_stride, module=stage_name)] + + # Construct the head + self.num_features = prev_width + self.head = ClassifierHead( + in_chs=prev_width, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, mean=0.0, std=0.01) + nn.init.zeros_(m.bias) + if zero_init_last_bn: + for m in self.modules(): + if hasattr(m, 'zero_init_last_bn'): + m.zero_init_last_bn() + + def _get_stage_params(self, cfg, default_stride=2, output_stride=32, drop_path_rate=0.): + # Generate RegNet ws per block + w_a, w_0, w_m, d = cfg['wa'], cfg['w0'], cfg['wm'], cfg['depth'] + widths, num_stages, _, _ = generate_regnet(w_a, w_0, w_m, d) + + # Convert to per stage format + stage_widths, stage_depths = np.unique(widths, return_counts=True) + + # Use the same group width, bottleneck mult and stride for each stage + stage_groups = [cfg['group_w'] for _ in range(num_stages)] + stage_bottle_ratios = [cfg['bottle_ratio'] for _ in range(num_stages)] + stage_strides = [] + stage_dilations = [] + net_stride = 2 + dilation = 1 + for _ in range(num_stages): + if net_stride >= output_stride: + dilation *= default_stride + stride = 1 + else: + stride = default_stride + net_stride *= stride + stage_strides.append(stride) + stage_dilations.append(dilation) + stage_dpr = np.split(np.linspace(0, drop_path_rate, d), np.cumsum(stage_depths[:-1])) + + # Adjust the compatibility of ws and gws + stage_widths, stage_groups = adjust_widths_groups_comp(stage_widths, stage_bottle_ratios, stage_groups) + param_names = ['out_chs', 'stride', 'dilation', 'depth', 'bottle_ratio', 'group_width', 'drop_path_rates'] + stage_params = [ + dict(zip(param_names, params)) for params in + zip(stage_widths, stage_strides, stage_dilations, stage_depths, stage_bottle_ratios, stage_groups, + stage_dpr)] + return stage_params + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + for block in list(self.children())[:-1]: + x = block(x) + return x + + def forward(self, x): + for block in self.children(): + x = block(x) + return x + + +def _filter_fn(state_dict): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + if 'model' in state_dict: + # For DeiT trained regnety_160 pretraiend model + state_dict = state_dict['model'] + return state_dict + + +def _create_regnet(variant, pretrained, **kwargs): + return build_model_with_cfg( + RegNet, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=model_cfgs[variant], + pretrained_filter_fn=_filter_fn, + **kwargs) + + +@register_model +def regnetx_002(pretrained=False, **kwargs): + """RegNetX-200MF""" + return _create_regnet('regnetx_002', pretrained, **kwargs) + + +@register_model +def regnetx_004(pretrained=False, **kwargs): + """RegNetX-400MF""" + return _create_regnet('regnetx_004', pretrained, **kwargs) + + +@register_model +def regnetx_006(pretrained=False, **kwargs): + """RegNetX-600MF""" + return _create_regnet('regnetx_006', pretrained, **kwargs) + + +@register_model +def regnetx_008(pretrained=False, **kwargs): + """RegNetX-800MF""" + return _create_regnet('regnetx_008', pretrained, **kwargs) + + +@register_model +def regnetx_016(pretrained=False, **kwargs): + """RegNetX-1.6GF""" + return _create_regnet('regnetx_016', pretrained, **kwargs) + + +@register_model +def regnetx_032(pretrained=False, **kwargs): + """RegNetX-3.2GF""" + return _create_regnet('regnetx_032', pretrained, **kwargs) + + +@register_model +def regnetx_040(pretrained=False, **kwargs): + """RegNetX-4.0GF""" + return _create_regnet('regnetx_040', pretrained, **kwargs) + + +@register_model +def regnetx_064(pretrained=False, **kwargs): + """RegNetX-6.4GF""" + return _create_regnet('regnetx_064', pretrained, **kwargs) + + +@register_model +def regnetx_080(pretrained=False, **kwargs): + """RegNetX-8.0GF""" + return _create_regnet('regnetx_080', pretrained, **kwargs) + + +@register_model +def regnetx_120(pretrained=False, **kwargs): + """RegNetX-12GF""" + return _create_regnet('regnetx_120', pretrained, **kwargs) + + +@register_model +def regnetx_160(pretrained=False, **kwargs): + """RegNetX-16GF""" + return _create_regnet('regnetx_160', pretrained, **kwargs) + + +@register_model +def regnetx_320(pretrained=False, **kwargs): + """RegNetX-32GF""" + return _create_regnet('regnetx_320', pretrained, **kwargs) + + +@register_model +def regnety_002(pretrained=False, **kwargs): + """RegNetY-200MF""" + return _create_regnet('regnety_002', pretrained, **kwargs) + + +@register_model +def regnety_004(pretrained=False, **kwargs): + """RegNetY-400MF""" + return _create_regnet('regnety_004', pretrained, **kwargs) + + +@register_model +def regnety_006(pretrained=False, **kwargs): + """RegNetY-600MF""" + return _create_regnet('regnety_006', pretrained, **kwargs) + + +@register_model +def regnety_008(pretrained=False, **kwargs): + """RegNetY-800MF""" + return _create_regnet('regnety_008', pretrained, **kwargs) + + +@register_model +def regnety_016(pretrained=False, **kwargs): + """RegNetY-1.6GF""" + return _create_regnet('regnety_016', pretrained, **kwargs) + + +@register_model +def regnety_032(pretrained=False, **kwargs): + """RegNetY-3.2GF""" + return _create_regnet('regnety_032', pretrained, **kwargs) + + +@register_model +def regnety_040(pretrained=False, **kwargs): + """RegNetY-4.0GF""" + return _create_regnet('regnety_040', pretrained, **kwargs) + + +@register_model +def regnety_064(pretrained=False, **kwargs): + """RegNetY-6.4GF""" + return _create_regnet('regnety_064', pretrained, **kwargs) + + +@register_model +def regnety_080(pretrained=False, **kwargs): + """RegNetY-8.0GF""" + return _create_regnet('regnety_080', pretrained, **kwargs) + + +@register_model +def regnety_120(pretrained=False, **kwargs): + """RegNetY-12GF""" + return _create_regnet('regnety_120', pretrained, **kwargs) + + +@register_model +def regnety_160(pretrained=False, **kwargs): + """RegNetY-16GF""" + return _create_regnet('regnety_160', pretrained, **kwargs) + + +@register_model +def regnety_320(pretrained=False, **kwargs): + """RegNetY-32GF""" + return _create_regnet('regnety_320', pretrained, **kwargs) diff --git a/timm/models/res2net.py b/timm/models/res2net.py new file mode 100644 index 0000000..282baba --- /dev/null +++ b/timm/models/res2net.py @@ -0,0 +1,216 @@ +""" Res2Net and Res2NeXt +Adapted from Official Pytorch impl at: https://github.com/gasvn/Res2Net/ +Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169 +""" +import math + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .registry import register_model +from .resnet import ResNet + +__all__ = [] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + 'res2net50_26w_4s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_4s-06e79181.pth'), + 'res2net50_48w_2s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_48w_2s-afed724a.pth'), + 'res2net50_14w_8s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_14w_8s-6527dddc.pth'), + 'res2net50_26w_6s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_6s-19041792.pth'), + 'res2net50_26w_8s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net50_26w_8s-2c7c9f12.pth'), + 'res2net101_26w_4s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2net101_26w_4s-02a759a1.pth'), + 'res2next50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-res2net/res2next50_4s-6ef7e7bf.pth'), +} + + +class Bottle2neck(nn.Module): + """ Res2Net/Res2NeXT Bottleneck + Adapted from https://github.com/gasvn/Res2Net/blob/master/res2net.py + """ + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, + cardinality=1, base_width=26, scale=4, dilation=1, first_dilation=None, + act_layer=nn.ReLU, norm_layer=None, attn_layer=None, **_): + super(Bottle2neck, self).__init__() + self.scale = scale + self.is_first = stride > 1 or downsample is not None + self.num_scales = max(1, scale - 1) + width = int(math.floor(planes * (base_width / 64.0))) * cardinality + self.width = width + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + + self.conv1 = nn.Conv2d(inplanes, width * scale, kernel_size=1, bias=False) + self.bn1 = norm_layer(width * scale) + + convs = [] + bns = [] + for i in range(self.num_scales): + convs.append(nn.Conv2d( + width, width, kernel_size=3, stride=stride, padding=first_dilation, + dilation=first_dilation, groups=cardinality, bias=False)) + bns.append(norm_layer(width)) + self.convs = nn.ModuleList(convs) + self.bns = nn.ModuleList(bns) + if self.is_first: + # FIXME this should probably have count_include_pad=False, but hurts original weights + self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1) + else: + self.pool = None + + self.conv3 = nn.Conv2d(width * scale, outplanes, kernel_size=1, bias=False) + self.bn3 = norm_layer(outplanes) + self.se = attn_layer(outplanes) if attn_layer is not None else None + + self.relu = act_layer(inplace=True) + self.downsample = downsample + + def zero_init_last_bn(self): + nn.init.zeros_(self.bn3.weight) + + def forward(self, x): + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + spx = torch.split(out, self.width, 1) + spo = [] + sp = spx[0] # redundant, for torchscript + for i, (conv, bn) in enumerate(zip(self.convs, self.bns)): + if i == 0 or self.is_first: + sp = spx[i] + else: + sp = sp + spx[i] + sp = conv(sp) + sp = bn(sp) + sp = self.relu(sp) + spo.append(sp) + if self.scale > 1: + if self.pool is not None: + # self.is_first == True, None check for torchscript + spo.append(self.pool(spx[-1])) + else: + spo.append(spx[-1]) + out = torch.cat(spo, 1) + + out = self.conv3(out) + out = self.bn3(out) + + if self.se is not None: + out = self.se(out) + + if self.downsample is not None: + shortcut = self.downsample(x) + + out += shortcut + out = self.relu(out) + + return out + + +def _create_res2net(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ResNet, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def res2net50_26w_4s(pretrained=False, **kwargs): + """Constructs a Res2Net-50 26w4s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=4), **kwargs) + return _create_res2net('res2net50_26w_4s', pretrained, **model_args) + + +@register_model +def res2net101_26w_4s(pretrained=False, **kwargs): + """Constructs a Res2Net-101 26w4s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 23, 3], base_width=26, block_args=dict(scale=4), **kwargs) + return _create_res2net('res2net101_26w_4s', pretrained, **model_args) + + +@register_model +def res2net50_26w_6s(pretrained=False, **kwargs): + """Constructs a Res2Net-50 26w6s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=6), **kwargs) + return _create_res2net('res2net50_26w_6s', pretrained, **model_args) + + +@register_model +def res2net50_26w_8s(pretrained=False, **kwargs): + """Constructs a Res2Net-50 26w8s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=8), **kwargs) + return _create_res2net('res2net50_26w_8s', pretrained, **model_args) + + +@register_model +def res2net50_48w_2s(pretrained=False, **kwargs): + """Constructs a Res2Net-50 48w2s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=48, block_args=dict(scale=2), **kwargs) + return _create_res2net('res2net50_48w_2s', pretrained, **model_args) + + +@register_model +def res2net50_14w_8s(pretrained=False, **kwargs): + """Constructs a Res2Net-50 14w8s model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=14, block_args=dict(scale=8), **kwargs) + return _create_res2net('res2net50_14w_8s', pretrained, **model_args) + + +@register_model +def res2next50(pretrained=False, **kwargs): + """Construct Res2NeXt-50 4s + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model_args = dict( + block=Bottle2neck, layers=[3, 4, 6, 3], base_width=4, cardinality=8, block_args=dict(scale=4), **kwargs) + return _create_res2net('res2next50', pretrained, **model_args) diff --git a/timm/models/resnest.py b/timm/models/resnest.py new file mode 100644 index 0000000..31eebd8 --- /dev/null +++ b/timm/models/resnest.py @@ -0,0 +1,237 @@ +""" ResNeSt Models + +Paper: `ResNeSt: Split-Attention Networks` - https://arxiv.org/abs/2004.08955 + +Adapted from original PyTorch impl w/ weights at https://github.com/zhanghang1989/ResNeSt by Hang Zhang + +Modified for torchscript compat, and consistency with timm by Ross Wightman +""" +import torch +from torch import nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import SplitAttn +from .registry import register_model +from .resnet import ResNet + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1.0', 'classifier': 'fc', + **kwargs + } + +default_cfgs = { + 'resnest14d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_resnest14-9c8fe254.pth'), + 'resnest26d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gluon_resnest26-50eb607c.pth'), + 'resnest50d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50-528c19ca.pth'), + 'resnest101e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest101-22405ba7.pth', + input_size=(3, 256, 256), pool_size=(8, 8)), + 'resnest200e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest200-75117900.pth', + input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=0.909, interpolation='bicubic'), + 'resnest269e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest269-0cc87c48.pth', + input_size=(3, 416, 416), pool_size=(13, 13), crop_pct=0.928, interpolation='bicubic'), + 'resnest50d_4s2x40d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50_fast_4s2x40d-41d14ed0.pth', + interpolation='bicubic'), + 'resnest50d_1s4x24d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-resnest/resnest50_fast_1s4x24d-d4a4f76f.pth', + interpolation='bicubic') +} + + +class ResNestBottleneck(nn.Module): + """ResNet Bottleneck + """ + # pylint: disable=unused-argument + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, + radix=1, cardinality=1, base_width=64, avd=False, avd_first=False, is_first=False, + reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, + attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): + super(ResNestBottleneck, self).__init__() + assert reduce_first == 1 # not supported + assert attn_layer is None # not supported + assert aa_layer is None # TODO not yet supported + assert drop_path is None # TODO not yet supported + + group_width = int(planes * (base_width / 64.)) * cardinality + first_dilation = first_dilation or dilation + if avd and (stride > 1 or is_first): + avd_stride = stride + stride = 1 + else: + avd_stride = 0 + self.radix = radix + self.drop_block = drop_block + + self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=1, bias=False) + self.bn1 = norm_layer(group_width) + self.act1 = act_layer(inplace=True) + self.avd_first = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and avd_first else None + + if self.radix >= 1: + self.conv2 = SplitAttn( + group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation, + dilation=first_dilation, groups=cardinality, radix=radix, norm_layer=norm_layer, drop_block=drop_block) + self.bn2 = nn.Identity() + self.act2 = nn.Identity() + else: + self.conv2 = nn.Conv2d( + group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation, + dilation=first_dilation, groups=cardinality, bias=False) + self.bn2 = norm_layer(group_width) + self.act2 = act_layer(inplace=True) + self.avd_last = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and not avd_first else None + + self.conv3 = nn.Conv2d(group_width, planes * 4, kernel_size=1, bias=False) + self.bn3 = norm_layer(planes*4) + self.act3 = act_layer(inplace=True) + self.downsample = downsample + + def zero_init_last_bn(self): + nn.init.zeros_(self.bn3.weight) + + def forward(self, x): + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + if self.drop_block is not None: + out = self.drop_block(out) + out = self.act1(out) + + if self.avd_first is not None: + out = self.avd_first(out) + + out = self.conv2(out) + out = self.bn2(out) + if self.drop_block is not None: + out = self.drop_block(out) + out = self.act2(out) + + if self.avd_last is not None: + out = self.avd_last(out) + + out = self.conv3(out) + out = self.bn3(out) + if self.drop_block is not None: + out = self.drop_block(out) + + if self.downsample is not None: + shortcut = self.downsample(x) + + out += shortcut + out = self.act3(out) + return out + + +def _create_resnest(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ResNet, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def resnest14d(pretrained=False, **kwargs): + """ ResNeSt-14d model. Weights ported from GluonCV. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[1, 1, 1, 1], + stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest14d', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest26d(pretrained=False, **kwargs): + """ ResNeSt-26d model. Weights ported from GluonCV. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[2, 2, 2, 2], + stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest26d', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest50d(pretrained=False, **kwargs): + """ ResNeSt-50d model. Matches paper ResNeSt-50 model, https://arxiv.org/abs/2004.08955 + Since this codebase supports all possible variations, 'd' for deep stem, stem_width 32, avg in downsample. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 4, 6, 3], + stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest50d', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest101e(pretrained=False, **kwargs): + """ ResNeSt-101e model. Matches paper ResNeSt-101 model, https://arxiv.org/abs/2004.08955 + Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 4, 23, 3], + stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest101e', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest200e(pretrained=False, **kwargs): + """ ResNeSt-200e model. Matches paper ResNeSt-200 model, https://arxiv.org/abs/2004.08955 + Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 24, 36, 3], + stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest200e', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest269e(pretrained=False, **kwargs): + """ ResNeSt-269e model. Matches paper ResNeSt-269 model, https://arxiv.org/abs/2004.08955 + Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 30, 48, 8], + stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, + block_args=dict(radix=2, avd=True, avd_first=False), **kwargs) + return _create_resnest('resnest269e', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest50d_4s2x40d(pretrained=False, **kwargs): + """ResNeSt-50 4s2x40d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 4, 6, 3], + stem_type='deep', stem_width=32, avg_down=True, base_width=40, cardinality=2, + block_args=dict(radix=4, avd=True, avd_first=True), **kwargs) + return _create_resnest('resnest50d_4s2x40d', pretrained=pretrained, **model_kwargs) + + +@register_model +def resnest50d_1s4x24d(pretrained=False, **kwargs): + """ResNeSt-50 1s4x24d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md + """ + model_kwargs = dict( + block=ResNestBottleneck, layers=[3, 4, 6, 3], + stem_type='deep', stem_width=32, avg_down=True, base_width=24, cardinality=4, + block_args=dict(radix=1, avd=True, avd_first=True), **kwargs) + return _create_resnest('resnest50d_1s4x24d', pretrained=pretrained, **model_kwargs) diff --git a/timm/models/resnet.py b/timm/models/resnet.py new file mode 100644 index 0000000..bbcae9a --- /dev/null +++ b/timm/models/resnet.py @@ -0,0 +1,1472 @@ +"""PyTorch ResNet + +This started as a copy of https://github.com/pytorch/vision 'resnet.py' (BSD-3-Clause) with +additional dropout and dynamic global avg/max pool. + +ResNeXt, SE-ResNeXt, SENet, and MXNet Gluon stem/downsample variants, tiered stems added by Ross Wightman +Copyright 2020 Ross Wightman +""" +import math +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import DropBlock2d, DropPath, AvgPool2dSame, BlurPool2d, GroupNorm, create_attn, get_attn, create_classifier +from .registry import register_model + +__all__ = ['ResNet', 'BasicBlock', 'Bottleneck'] # model_registry will add each entrypoint fn to this + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + # ResNet and Wide ResNet + 'resnet18': _cfg(url='https://download.pytorch.org/models/resnet18-5c106cde.pth'), + 'resnet18d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet18d_ra2-48a79e06.pth', + interpolation='bicubic', first_conv='conv1.0'), + 'resnet34': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34-43635321.pth'), + 'resnet34d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34d_ra2-f8dcfcaf.pth', + interpolation='bicubic', first_conv='conv1.0'), + 'resnet26': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26-9aa10e23.pth', + interpolation='bicubic'), + 'resnet26d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26d-69e92c46.pth', + interpolation='bicubic', first_conv='conv1.0'), + 'resnet26t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet26t_256_ra2-6f6fa748.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94), + 'resnet50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a1_0-14fe96d1.pth', + interpolation='bicubic', crop_pct=0.95), + 'resnet50d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50d_ra2-464e36ba.pth', + interpolation='bicubic', first_conv='conv1.0'), + 'resnet50t': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0'), + 'resnet101': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet101_a1h-36d3f2aa.pth', + interpolation='bicubic', crop_pct=0.95), + 'resnet101d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet101d_ra2-2803ffab.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=1.0, test_input_size=(3, 320, 320)), + 'resnet152': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet152_a1h-dc400468.pth', + interpolation='bicubic', crop_pct=0.95), + 'resnet152d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet152d_ra2-5cac0439.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=1.0, test_input_size=(3, 320, 320)), + 'resnet200': _cfg(url='', interpolation='bicubic'), + 'resnet200d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet200d_ra2-bdba9bf9.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=1.0, test_input_size=(3, 320, 320)), + 'tv_resnet34': _cfg(url='https://download.pytorch.org/models/resnet34-333f7ec4.pth'), + 'tv_resnet50': _cfg(url='https://download.pytorch.org/models/resnet50-19c8e357.pth'), + 'tv_resnet101': _cfg(url='https://download.pytorch.org/models/resnet101-5d3b4d8f.pth'), + 'tv_resnet152': _cfg(url='https://download.pytorch.org/models/resnet152-b121ed2d.pth'), + 'wide_resnet50_2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/wide_resnet50_racm-8234f177.pth', + interpolation='bicubic'), + 'wide_resnet101_2': _cfg(url='https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth'), + + # ResNets w/ alternative norm layers + 'resnet50_gn': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_gn_a1h2-8fe6c4d0.pth', + crop_pct=0.94, interpolation='bicubic'), + + # ResNeXt + 'resnext50_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnext50_32x4d_a1h-0146ab0a.pth', + interpolation='bicubic', crop_pct=0.95), + 'resnext50d_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50d_32x4d-103e99f8.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'resnext101_32x4d': _cfg(url=''), + 'resnext101_32x8d': _cfg(url='https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth'), + 'resnext101_64x4d': _cfg(url=''), + 'tv_resnext50_32x4d': _cfg(url='https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth'), + + # ResNeXt models - Weakly Supervised Pretraining on Instagram Hashtags + # from https://github.com/facebookresearch/WSL-Images + # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + 'ig_resnext101_32x8d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x8-c38310e5.pth'), + 'ig_resnext101_32x16d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x16-c6f796b0.pth'), + 'ig_resnext101_32x32d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x32-e4b90b00.pth'), + 'ig_resnext101_32x48d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x48-3e41cc8a.pth'), + + # Semi-Supervised ResNe*t models from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models + # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + 'ssl_resnet18': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet18-d92f0530.pth'), + 'ssl_resnet50': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet50-08389792.pth'), + 'ssl_resnext50_32x4d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext50_32x4-ddb3e555.pth'), + 'ssl_resnext101_32x4d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x4-dc43570a.pth'), + 'ssl_resnext101_32x8d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x8-2cfe2f8b.pth'), + 'ssl_resnext101_32x16d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x16-15fffa57.pth'), + + # Semi-Weakly Supervised ResNe*t models from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models + # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. + 'swsl_resnet18': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet18-118f1556.pth'), + 'swsl_resnet50': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet50-16a12f1b.pth'), + 'swsl_resnext50_32x4d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext50_32x4-72679e44.pth'), + 'swsl_resnext101_32x4d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x4-3f87e46b.pth'), + 'swsl_resnext101_32x8d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x8-b4712904.pth'), + 'swsl_resnext101_32x16d': _cfg( + url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x16-f3559a9c.pth'), + + # Squeeze-Excitation ResNets, to eventually replace the models in senet.py + 'seresnet18': _cfg( + url='', + interpolation='bicubic'), + 'seresnet34': _cfg( + url='', + interpolation='bicubic'), + 'seresnet50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet50_ra_224-8efdb4bb.pth', + interpolation='bicubic'), + 'seresnet50t': _cfg( + url='', + interpolation='bicubic', + first_conv='conv1.0'), + 'seresnet101': _cfg( + url='', + interpolation='bicubic'), + 'seresnet152': _cfg( + url='', + interpolation='bicubic'), + 'seresnet152d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet152d_ra2-04464dd2.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=1.0, test_input_size=(3, 320, 320) + ), + 'seresnet200d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)), + 'seresnet269d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)), + + + # Squeeze-Excitation ResNeXts, to eventually replace the models in senet.py + 'seresnext26d_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26d_32x4d-80fa48a3.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'seresnext26t_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26tn_32x4d-569cb627.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'seresnext50_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext50_32x4d_racm-a304a460.pth', + interpolation='bicubic'), + 'seresnext101_32x4d': _cfg( + url='', + interpolation='bicubic'), + 'seresnext101_32x8d': _cfg( + url='', + interpolation='bicubic'), + 'senet154': _cfg( + url='', + interpolation='bicubic', + first_conv='conv1.0'), + + # Efficient Channel Attention ResNets + 'ecaresnet26t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet26t_ra2-46609757.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=0.95, test_input_size=(3, 320, 320)), + 'ecaresnetlight': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNetLight_4f34b35b.pth', + interpolation='bicubic'), + 'ecaresnet50d': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNet50D_833caf58.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'ecaresnet50d_pruned': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45899/outputs/ECAResNet50D_P_9c67f710.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'ecaresnet50t': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet50t_ra2-f7ac63c4.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), + crop_pct=0.95, test_input_size=(3, 320, 320)), + 'ecaresnet101d': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNet101D_281c5844.pth', + interpolation='bicubic', first_conv='conv1.0'), + 'ecaresnet101d_pruned': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45610/outputs/ECAResNet101D_P_75a3370e.pth', + interpolation='bicubic', + first_conv='conv1.0'), + 'ecaresnet200d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)), + 'ecaresnet269d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet269d_320_ra2-7baa55cb.pth', + interpolation='bicubic', first_conv='conv1.0', input_size=(3, 320, 320), pool_size=(10, 10), + crop_pct=1.0, test_input_size=(3, 352, 352)), + + # Efficient Channel Attention ResNeXts + 'ecaresnext26t_32x4d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0'), + 'ecaresnext50t_32x4d': _cfg( + url='', + interpolation='bicubic', first_conv='conv1.0'), + + # ResNets with anti-aliasing blur pool + 'resnetblur18': _cfg( + interpolation='bicubic'), + 'resnetblur50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnetblur50-84f4748f.pth', + interpolation='bicubic'), + + # ResNet-RS models + 'resnetrs50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs50_ema-6b53758b.pth', + input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.91, test_input_size=(3, 224, 224), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs101': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs101_i192_ema-1509bbf6.pth', + input_size=(3, 192, 192), pool_size=(6, 6), crop_pct=0.94, test_input_size=(3, 288, 288), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs152': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs152_i256_ema-a9aff7f9.pth', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs200': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs200_ema-623d2f59.pth', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs270': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs270_ema-b40e674c.pth', + input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 352, 352), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs350': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs350_i256_ema-5a1aa8f1.pth', + input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0, test_input_size=(3, 384, 384), + interpolation='bicubic', first_conv='conv1.0'), + 'resnetrs420': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs420_ema-972dee69.pth', + input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, test_input_size=(3, 416, 416), + interpolation='bicubic', first_conv='conv1.0'), +} + + +def get_padding(kernel_size, stride, dilation=1): + padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 + return padding + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, + reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, + attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): + super(BasicBlock, self).__init__() + + assert cardinality == 1, 'BasicBlock only supports cardinality of 1' + assert base_width == 64, 'BasicBlock does not support changing base width' + first_planes = planes // reduce_first + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation) + + self.conv1 = nn.Conv2d( + inplanes, first_planes, kernel_size=3, stride=1 if use_aa else stride, padding=first_dilation, + dilation=first_dilation, bias=False) + self.bn1 = norm_layer(first_planes) + self.act1 = act_layer(inplace=True) + self.aa = aa_layer(channels=first_planes, stride=stride) if use_aa else None + + self.conv2 = nn.Conv2d( + first_planes, outplanes, kernel_size=3, padding=dilation, dilation=dilation, bias=False) + self.bn2 = norm_layer(outplanes) + + self.se = create_attn(attn_layer, outplanes) + + self.act2 = act_layer(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.drop_block = drop_block + self.drop_path = drop_path + + def zero_init_last_bn(self): + nn.init.zeros_(self.bn2.weight) + + def forward(self, x): + shortcut = x + + x = self.conv1(x) + x = self.bn1(x) + if self.drop_block is not None: + x = self.drop_block(x) + x = self.act1(x) + if self.aa is not None: + x = self.aa(x) + + x = self.conv2(x) + x = self.bn2(x) + if self.drop_block is not None: + x = self.drop_block(x) + + if self.se is not None: + x = self.se(x) + + if self.drop_path is not None: + x = self.drop_path(x) + + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act2(x) + + return x + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, + reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, + attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): + super(Bottleneck, self).__init__() + + width = int(math.floor(planes * (base_width / 64)) * cardinality) + first_planes = width // reduce_first + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation) + + self.conv1 = nn.Conv2d(inplanes, first_planes, kernel_size=1, bias=False) + self.bn1 = norm_layer(first_planes) + self.act1 = act_layer(inplace=True) + + self.conv2 = nn.Conv2d( + first_planes, width, kernel_size=3, stride=1 if use_aa else stride, + padding=first_dilation, dilation=first_dilation, groups=cardinality, bias=False) + self.bn2 = norm_layer(width) + self.act2 = act_layer(inplace=True) + self.aa = aa_layer(channels=width, stride=stride) if use_aa else None + + self.conv3 = nn.Conv2d(width, outplanes, kernel_size=1, bias=False) + self.bn3 = norm_layer(outplanes) + + self.se = create_attn(attn_layer, outplanes) + + self.act3 = act_layer(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.drop_block = drop_block + self.drop_path = drop_path + + def zero_init_last_bn(self): + nn.init.zeros_(self.bn3.weight) + + def forward(self, x): + shortcut = x + + x = self.conv1(x) + x = self.bn1(x) + if self.drop_block is not None: + x = self.drop_block(x) + x = self.act1(x) + + x = self.conv2(x) + x = self.bn2(x) + if self.drop_block is not None: + x = self.drop_block(x) + x = self.act2(x) + if self.aa is not None: + x = self.aa(x) + + x = self.conv3(x) + x = self.bn3(x) + if self.drop_block is not None: + x = self.drop_block(x) + + if self.se is not None: + x = self.se(x) + + if self.drop_path is not None: + x = self.drop_path(x) + + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act3(x) + + return x + + +def downsample_conv( + in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None): + norm_layer = norm_layer or nn.BatchNorm2d + kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size + first_dilation = (first_dilation or dilation) if kernel_size > 1 else 1 + p = get_padding(kernel_size, stride, first_dilation) + + return nn.Sequential(*[ + nn.Conv2d( + in_channels, out_channels, kernel_size, stride=stride, padding=p, dilation=first_dilation, bias=False), + norm_layer(out_channels) + ]) + + +def downsample_avg( + in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None): + norm_layer = norm_layer or nn.BatchNorm2d + avg_stride = stride if dilation == 1 else 1 + if stride == 1 and dilation == 1: + pool = nn.Identity() + else: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + + return nn.Sequential(*[ + pool, + nn.Conv2d(in_channels, out_channels, 1, stride=1, padding=0, bias=False), + norm_layer(out_channels) + ]) + + +def drop_blocks(drop_block_rate=0.): + return [ + None, None, + DropBlock2d(drop_block_rate, 5, 0.25) if drop_block_rate else None, + DropBlock2d(drop_block_rate, 3, 1.00) if drop_block_rate else None] + + +def make_blocks( + block_fn, channels, block_repeats, inplanes, reduce_first=1, output_stride=32, + down_kernel_size=1, avg_down=False, drop_block_rate=0., drop_path_rate=0., **kwargs): + stages = [] + feature_info = [] + net_num_blocks = sum(block_repeats) + net_block_idx = 0 + net_stride = 4 + dilation = prev_dilation = 1 + for stage_idx, (planes, num_blocks, db) in enumerate(zip(channels, block_repeats, drop_blocks(drop_block_rate))): + stage_name = f'layer{stage_idx + 1}' # never liked this name, but weight compat requires it + stride = 1 if stage_idx == 0 else 2 + if net_stride >= output_stride: + dilation *= stride + stride = 1 + else: + net_stride *= stride + + downsample = None + if stride != 1 or inplanes != planes * block_fn.expansion: + down_kwargs = dict( + in_channels=inplanes, out_channels=planes * block_fn.expansion, kernel_size=down_kernel_size, + stride=stride, dilation=dilation, first_dilation=prev_dilation, norm_layer=kwargs.get('norm_layer')) + downsample = downsample_avg(**down_kwargs) if avg_down else downsample_conv(**down_kwargs) + + block_kwargs = dict(reduce_first=reduce_first, dilation=dilation, drop_block=db, **kwargs) + blocks = [] + for block_idx in range(num_blocks): + downsample = downsample if block_idx == 0 else None + stride = stride if block_idx == 0 else 1 + block_dpr = drop_path_rate * net_block_idx / (net_num_blocks - 1) # stochastic depth linear decay rule + blocks.append(block_fn( + inplanes, planes, stride, downsample, first_dilation=prev_dilation, + drop_path=DropPath(block_dpr) if block_dpr > 0. else None, **block_kwargs)) + prev_dilation = dilation + inplanes = planes * block_fn.expansion + net_block_idx += 1 + + stages.append((stage_name, nn.Sequential(*blocks))) + feature_info.append(dict(num_chs=inplanes, reduction=net_stride, module=stage_name)) + + return stages, feature_info + + +class ResNet(nn.Module): + """ResNet / ResNeXt / SE-ResNeXt / SE-Net + + This class implements all variants of ResNet, ResNeXt, SE-ResNeXt, and SENet that + * have > 1 stride in the 3x3 conv layer of bottleneck + * have conv-bn-act ordering + + This ResNet impl supports a number of stem and downsample options based on the v1c, v1d, v1e, and v1s + variants included in the MXNet Gluon ResNetV1b model. The C and D variants are also discussed in the + 'Bag of Tricks' paper: https://arxiv.org/pdf/1812.01187. The B variant is equivalent to torchvision default. + + ResNet variants (the same modifications can be used in SE/ResNeXt models as well): + * normal, b - 7x7 stem, stem_width = 64, same as torchvision ResNet, NVIDIA ResNet 'v1.5', Gluon v1b + * c - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64) + * d - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64), average pool in downsample + * e - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128), average pool in downsample + * s - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128) + * t - 3 layer deep 3x3 stem, stem width = 32 (24, 48, 64), average pool in downsample + * tn - 3 layer deep 3x3 stem, stem width = 32 (24, 32, 64), average pool in downsample + + ResNeXt + * normal - 7x7 stem, stem_width = 64, standard cardinality and base widths + * same c,d, e, s variants as ResNet can be enabled + + SE-ResNeXt + * normal - 7x7 stem, stem_width = 64 + * same c, d, e, s variants as ResNet can be enabled + + SENet-154 - 3 layer deep 3x3 stem (same as v1c-v1s), stem_width = 64, cardinality=64, + reduction by 2 on width of first bottleneck convolution, 3x3 downsample convs after first block + + Parameters + ---------- + block : Block + Class for the residual block. Options are BasicBlockGl, BottleneckGl. + layers : list of int + Numbers of layers in each block + num_classes : int, default 1000 + Number of classification classes. + in_chans : int, default 3 + Number of input (color) channels. + cardinality : int, default 1 + Number of convolution groups for 3x3 conv in Bottleneck. + base_width : int, default 64 + Factor determining bottleneck channels. `planes * base_width / 64 * cardinality` + stem_width : int, default 64 + Number of channels in stem convolutions + stem_type : str, default '' + The type of stem: + * '', default - a single 7x7 conv with a width of stem_width + * 'deep' - three 3x3 convolution layers of widths stem_width, stem_width, stem_width * 2 + * 'deep_tiered' - three 3x3 conv layers of widths stem_width//4 * 3, stem_width, stem_width * 2 + block_reduce_first: int, default 1 + Reduction factor for first convolution output width of residual blocks, + 1 for all archs except senets, where 2 + down_kernel_size: int, default 1 + Kernel size of residual block downsampling path, 1x1 for most archs, 3x3 for senets + avg_down : bool, default False + Whether to use average pooling for projection skip connection between stages/downsample. + output_stride : int, default 32 + Set the output stride of the network, 32, 16, or 8. Typically used in segmentation. + act_layer : nn.Module, activation layer + norm_layer : nn.Module, normalization layer + aa_layer : nn.Module, anti-aliasing layer + drop_rate : float, default 0. + Dropout probability before classifier, for training + global_pool : str, default 'avg' + Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' + """ + + def __init__(self, block, layers, num_classes=1000, in_chans=3, + cardinality=1, base_width=64, stem_width=64, stem_type='', replace_stem_pool=False, + output_stride=32, block_reduce_first=1, down_kernel_size=1, avg_down=False, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, drop_rate=0.0, drop_path_rate=0., + drop_block_rate=0., global_pool='avg', zero_init_last_bn=True, block_args=None): + block_args = block_args or dict() + assert output_stride in (8, 16, 32) + self.num_classes = num_classes + self.drop_rate = drop_rate + super(ResNet, self).__init__() + + # Stem + deep_stem = 'deep' in stem_type + inplanes = stem_width * 2 if deep_stem else 64 + if deep_stem: + stem_chs = (stem_width, stem_width) + if 'tiered' in stem_type: + stem_chs = (3 * (stem_width // 4), stem_width) + self.conv1 = nn.Sequential(*[ + nn.Conv2d(in_chans, stem_chs[0], 3, stride=2, padding=1, bias=False), + norm_layer(stem_chs[0]), + act_layer(inplace=True), + nn.Conv2d(stem_chs[0], stem_chs[1], 3, stride=1, padding=1, bias=False), + norm_layer(stem_chs[1]), + act_layer(inplace=True), + nn.Conv2d(stem_chs[1], inplanes, 3, stride=1, padding=1, bias=False)]) + else: + self.conv1 = nn.Conv2d(in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False) + self.bn1 = norm_layer(inplanes) + self.act1 = act_layer(inplace=True) + self.feature_info = [dict(num_chs=inplanes, reduction=2, module='act1')] + + # Stem Pooling + if replace_stem_pool: + self.maxpool = nn.Sequential(*filter(None, [ + nn.Conv2d(inplanes, inplanes, 3, stride=1 if aa_layer else 2, padding=1, bias=False), + aa_layer(channels=inplanes, stride=2) if aa_layer else None, + norm_layer(inplanes), + act_layer(inplace=True) + ])) + else: + if aa_layer is not None: + self.maxpool = nn.Sequential(*[ + nn.MaxPool2d(kernel_size=3, stride=1, padding=1), + aa_layer(channels=inplanes, stride=2)]) + else: + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + # Feature Blocks + channels = [64, 128, 256, 512] + stage_modules, stage_feature_info = make_blocks( + block, channels, layers, inplanes, cardinality=cardinality, base_width=base_width, + output_stride=output_stride, reduce_first=block_reduce_first, avg_down=avg_down, + down_kernel_size=down_kernel_size, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, + drop_block_rate=drop_block_rate, drop_path_rate=drop_path_rate, **block_args) + for stage in stage_modules: + self.add_module(*stage) # layer1, layer2, etc + self.feature_info.extend(stage_feature_info) + + # Head (Pooling and Classifier) + self.num_features = 512 * block.expansion + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + self.init_weights(zero_init_last_bn=zero_init_last_bn) + + def init_weights(self, zero_init_last_bn=True): + for n, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + if zero_init_last_bn: + for m in self.modules(): + if hasattr(m, 'zero_init_last_bn'): + m.zero_init_last_bn() + + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate: + x = F.dropout(x, p=float(self.drop_rate), training=self.training) + x = self.fc(x) + return x + + +def _create_resnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ResNet, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def resnet18(pretrained=False, **kwargs): + """Constructs a ResNet-18 model. + """ + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs) + return _create_resnet('resnet18', pretrained, **model_args) + + +@register_model +def resnet18d(pretrained=False, **kwargs): + """Constructs a ResNet-18-D model. + """ + model_args = dict( + block=BasicBlock, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet18d', pretrained, **model_args) + + +@register_model +def resnet34(pretrained=False, **kwargs): + """Constructs a ResNet-34 model. + """ + model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('resnet34', pretrained, **model_args) + + +@register_model +def resnet34d(pretrained=False, **kwargs): + """Constructs a ResNet-34-D model. + """ + model_args = dict( + block=BasicBlock, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet34d', pretrained, **model_args) + + +@register_model +def resnet26(pretrained=False, **kwargs): + """Constructs a ResNet-26 model. + """ + model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2], **kwargs) + return _create_resnet('resnet26', pretrained, **model_args) + + +@register_model +def resnet26t(pretrained=False, **kwargs): + """Constructs a ResNet-26-T model. + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep_tiered', avg_down=True, **kwargs) + return _create_resnet('resnet26t', pretrained, **model_args) + + +@register_model +def resnet26d(pretrained=False, **kwargs): + """Constructs a ResNet-26-D model. + """ + model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet26d', pretrained, **model_args) + + +@register_model +def resnet50(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('resnet50', pretrained, **model_args) + + +@register_model +def resnet50d(pretrained=False, **kwargs): + """Constructs a ResNet-50-D model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet50d', pretrained, **model_args) + + +@register_model +def resnet50t(pretrained=False, **kwargs): + """Constructs a ResNet-50-T model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep_tiered', avg_down=True, **kwargs) + return _create_resnet('resnet50t', pretrained, **model_args) + + +@register_model +def resnet101(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs) + return _create_resnet('resnet101', pretrained, **model_args) + + +@register_model +def resnet101d(pretrained=False, **kwargs): + """Constructs a ResNet-101-D model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet101d', pretrained, **model_args) + + +@register_model +def resnet152(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs) + return _create_resnet('resnet152', pretrained, **model_args) + + +@register_model +def resnet152d(pretrained=False, **kwargs): + """Constructs a ResNet-152-D model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet152d', pretrained, **model_args) + + +@register_model +def resnet200(pretrained=False, **kwargs): + """Constructs a ResNet-200 model. + """ + model_args = dict(block=Bottleneck, layers=[3, 24, 36, 3], **kwargs) + return _create_resnet('resnet200', pretrained, **model_args) + + +@register_model +def resnet200d(pretrained=False, **kwargs): + """Constructs a ResNet-200-D model. + """ + model_args = dict( + block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnet200d', pretrained, **model_args) + + +@register_model +def tv_resnet34(pretrained=False, **kwargs): + """Constructs a ResNet-34 model with original Torchvision weights. + """ + model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('tv_resnet34', pretrained, **model_args) + + +@register_model +def tv_resnet50(pretrained=False, **kwargs): + """Constructs a ResNet-50 model with original Torchvision weights. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('tv_resnet50', pretrained, **model_args) + + +@register_model +def tv_resnet101(pretrained=False, **kwargs): + """Constructs a ResNet-101 model w/ Torchvision pretrained weights. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs) + return _create_resnet('tv_resnet101', pretrained, **model_args) + + +@register_model +def tv_resnet152(pretrained=False, **kwargs): + """Constructs a ResNet-152 model w/ Torchvision pretrained weights. + """ + model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs) + return _create_resnet('tv_resnet152', pretrained, **model_args) + + +@register_model +def wide_resnet50_2(pretrained=False, **kwargs): + """Constructs a Wide ResNet-50-2 model. + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 + channels, and in Wide ResNet-50-2 has 2048-1024-2048. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], base_width=128, **kwargs) + return _create_resnet('wide_resnet50_2', pretrained, **model_args) + + +@register_model +def wide_resnet101_2(pretrained=False, **kwargs): + """Constructs a Wide ResNet-101-2 model. + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], base_width=128, **kwargs) + return _create_resnet('wide_resnet101_2', pretrained, **model_args) + + +@register_model +def resnet50_gn(pretrained=False, **kwargs): + """Constructs a ResNet-50 model w/ GroupNorm + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('resnet50_gn', pretrained, norm_layer=GroupNorm, **model_args) + + +@register_model +def resnext50_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt50-32x4d model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('resnext50_32x4d', pretrained, **model_args) + + +@register_model +def resnext50d_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt50d-32x4d model. ResNext50 w/ deep stem & avg pool downsample + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, + stem_width=32, stem_type='deep', avg_down=True, **kwargs) + return _create_resnet('resnext50d_32x4d', pretrained, **model_args) + + +@register_model +def resnext101_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt-101 32x4d model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('resnext101_32x4d', pretrained, **model_args) + + +@register_model +def resnext101_32x8d(pretrained=False, **kwargs): + """Constructs a ResNeXt-101 32x8d model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs) + return _create_resnet('resnext101_32x8d', pretrained, **model_args) + + +@register_model +def resnext101_64x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt101-64x4d model. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=64, base_width=4, **kwargs) + return _create_resnet('resnext101_64x4d', pretrained, **model_args) + + +@register_model +def tv_resnext50_32x4d(pretrained=False, **kwargs): + """Constructs a ResNeXt50-32x4d model with original Torchvision weights. + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('tv_resnext50_32x4d', pretrained, **model_args) + + +@register_model +def ig_resnext101_32x8d(pretrained=True, **kwargs): + """Constructs a ResNeXt-101 32x8 model pre-trained on weakly-supervised data + and finetuned on ImageNet from Figure 5 in + `"Exploring the Limits of Weakly Supervised Pretraining" `_ + Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs) + return _create_resnet('ig_resnext101_32x8d', pretrained, **model_args) + + +@register_model +def ig_resnext101_32x16d(pretrained=True, **kwargs): + """Constructs a ResNeXt-101 32x16 model pre-trained on weakly-supervised data + and finetuned on ImageNet from Figure 5 in + `"Exploring the Limits of Weakly Supervised Pretraining" `_ + Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs) + return _create_resnet('ig_resnext101_32x16d', pretrained, **model_args) + + +@register_model +def ig_resnext101_32x32d(pretrained=True, **kwargs): + """Constructs a ResNeXt-101 32x32 model pre-trained on weakly-supervised data + and finetuned on ImageNet from Figure 5 in + `"Exploring the Limits of Weakly Supervised Pretraining" `_ + Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=32, **kwargs) + return _create_resnet('ig_resnext101_32x32d', pretrained, **model_args) + + +@register_model +def ig_resnext101_32x48d(pretrained=True, **kwargs): + """Constructs a ResNeXt-101 32x48 model pre-trained on weakly-supervised data + and finetuned on ImageNet from Figure 5 in + `"Exploring the Limits of Weakly Supervised Pretraining" `_ + Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=48, **kwargs) + return _create_resnet('ig_resnext101_32x48d', pretrained, **model_args) + + +@register_model +def ssl_resnet18(pretrained=True, **kwargs): + """Constructs a semi-supervised ResNet-18 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs) + return _create_resnet('ssl_resnet18', pretrained, **model_args) + + +@register_model +def ssl_resnet50(pretrained=True, **kwargs): + """Constructs a semi-supervised ResNet-50 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('ssl_resnet50', pretrained, **model_args) + + +@register_model +def ssl_resnext50_32x4d(pretrained=True, **kwargs): + """Constructs a semi-supervised ResNeXt-50 32x4 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('ssl_resnext50_32x4d', pretrained, **model_args) + + +@register_model +def ssl_resnext101_32x4d(pretrained=True, **kwargs): + """Constructs a semi-supervised ResNeXt-101 32x4 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('ssl_resnext101_32x4d', pretrained, **model_args) + + +@register_model +def ssl_resnext101_32x8d(pretrained=True, **kwargs): + """Constructs a semi-supervised ResNeXt-101 32x8 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs) + return _create_resnet('ssl_resnext101_32x8d', pretrained, **model_args) + + +@register_model +def ssl_resnext101_32x16d(pretrained=True, **kwargs): + """Constructs a semi-supervised ResNeXt-101 32x16 model pre-trained on YFCC100M dataset and finetuned on ImageNet + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs) + return _create_resnet('ssl_resnext101_32x16d', pretrained, **model_args) + + +@register_model +def swsl_resnet18(pretrained=True, **kwargs): + """Constructs a semi-weakly supervised Resnet-18 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs) + return _create_resnet('swsl_resnet18', pretrained, **model_args) + + +@register_model +def swsl_resnet50(pretrained=True, **kwargs): + """Constructs a semi-weakly supervised ResNet-50 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) + return _create_resnet('swsl_resnet50', pretrained, **model_args) + + +@register_model +def swsl_resnext50_32x4d(pretrained=True, **kwargs): + """Constructs a semi-weakly supervised ResNeXt-50 32x4 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('swsl_resnext50_32x4d', pretrained, **model_args) + + +@register_model +def swsl_resnext101_32x4d(pretrained=True, **kwargs): + """Constructs a semi-weakly supervised ResNeXt-101 32x4 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs) + return _create_resnet('swsl_resnext101_32x4d', pretrained, **model_args) + + +@register_model +def swsl_resnext101_32x8d(pretrained=True, **kwargs): + """Constructs a semi-weakly supervised ResNeXt-101 32x8 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs) + return _create_resnet('swsl_resnext101_32x8d', pretrained, **model_args) + + +@register_model +def swsl_resnext101_32x16d(pretrained=True, **kwargs): + """Constructs a semi-weakly supervised ResNeXt-101 32x16 model pre-trained on 1B weakly supervised + image dataset and finetuned on ImageNet. + `"Billion-scale Semi-Supervised Learning for Image Classification" `_ + Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs) + return _create_resnet('swsl_resnext101_32x16d', pretrained, **model_args) + + +@register_model +def ecaresnet26t(pretrained=False, **kwargs): + """Constructs an ECA-ResNeXt-26-T model. + This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels + in the deep stem and ECA attn. + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet26t', pretrained, **model_args) + + +@register_model +def ecaresnet50d(pretrained=False, **kwargs): + """Constructs a ResNet-50-D model with eca. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet50d', pretrained, **model_args) + + +@register_model +def resnetrs50(pretrained=False, **kwargs): + """Constructs a ResNet-RS-50 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs50', pretrained, **model_args) + + +@register_model +def resnetrs101(pretrained=False, **kwargs): + """Constructs a ResNet-RS-101 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs101', pretrained, **model_args) + + +@register_model +def resnetrs152(pretrained=False, **kwargs): + """Constructs a ResNet-RS-152 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs152', pretrained, **model_args) + + +@register_model +def resnetrs200(pretrained=False, **kwargs): + """Constructs a ResNet-RS-200 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs200', pretrained, **model_args) + + +@register_model +def resnetrs270(pretrained=False, **kwargs): + """Constructs a ResNet-RS-270 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[4, 29, 53, 4], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs270', pretrained, **model_args) + + + +@register_model +def resnetrs350(pretrained=False, **kwargs): + """Constructs a ResNet-RS-350 model. + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[4, 36, 72, 4], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs350', pretrained, **model_args) + + +@register_model +def resnetrs420(pretrained=False, **kwargs): + """Constructs a ResNet-RS-420 model + Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 + Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs + """ + attn_layer = partial(get_attn('se'), rd_ratio=0.25) + model_args = dict( + block=Bottleneck, layers=[4, 44, 87, 4], stem_width=32, stem_type='deep', replace_stem_pool=True, + avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) + return _create_resnet('resnetrs420', pretrained, **model_args) + + +@register_model +def ecaresnet50d_pruned(pretrained=False, **kwargs): + """Constructs a ResNet-50-D model pruned with eca. + The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet50d_pruned', pretrained, pruned=True, **model_args) + + +@register_model +def ecaresnet50t(pretrained=False, **kwargs): + """Constructs an ECA-ResNet-50-T model. + Like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels in the deep stem and ECA attn. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet50t', pretrained, **model_args) + + +@register_model +def ecaresnetlight(pretrained=False, **kwargs): + """Constructs a ResNet-50-D light model with eca. + """ + model_args = dict( + block=Bottleneck, layers=[1, 1, 11, 3], stem_width=32, avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnetlight', pretrained, **model_args) + + +@register_model +def ecaresnet101d(pretrained=False, **kwargs): + """Constructs a ResNet-101-D model with eca. + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet101d', pretrained, **model_args) + + +@register_model +def ecaresnet101d_pruned(pretrained=False, **kwargs): + """Constructs a ResNet-101-D model pruned with eca. + The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf + """ + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet101d_pruned', pretrained, pruned=True, **model_args) + + +@register_model +def ecaresnet200d(pretrained=False, **kwargs): + """Constructs a ResNet-200-D model with ECA. + """ + model_args = dict( + block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet200d', pretrained, **model_args) + + +@register_model +def ecaresnet269d(pretrained=False, **kwargs): + """Constructs a ResNet-269-D model with ECA. + """ + model_args = dict( + block=Bottleneck, layers=[3, 30, 48, 8], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnet269d', pretrained, **model_args) + + +@register_model +def ecaresnext26t_32x4d(pretrained=False, **kwargs): + """Constructs an ECA-ResNeXt-26-T model. + This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels + in the deep stem. This model replaces SE module with the ECA module + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnext26t_32x4d', pretrained, **model_args) + + +@register_model +def ecaresnext50t_32x4d(pretrained=False, **kwargs): + """Constructs an ECA-ResNeXt-50-T model. + This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels + in the deep stem. This model replaces SE module with the ECA module + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs) + return _create_resnet('ecaresnext50t_32x4d', pretrained, **model_args) + + +@register_model +def resnetblur18(pretrained=False, **kwargs): + """Constructs a ResNet-18 model with blur anti-aliasing + """ + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], aa_layer=BlurPool2d, **kwargs) + return _create_resnet('resnetblur18', pretrained, **model_args) + + +@register_model +def resnetblur50(pretrained=False, **kwargs): + """Constructs a ResNet-50 model with blur anti-aliasing + """ + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=BlurPool2d, **kwargs) + return _create_resnet('resnetblur50', pretrained, **model_args) + + +@register_model +def seresnet18(pretrained=False, **kwargs): + model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet18', pretrained, **model_args) + + +@register_model +def seresnet34(pretrained=False, **kwargs): + model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet34', pretrained, **model_args) + + +@register_model +def seresnet50(pretrained=False, **kwargs): + model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet50', pretrained, **model_args) + + +@register_model +def seresnet50t(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep_tiered', avg_down=True, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet50t', pretrained, **model_args) + + +@register_model +def seresnet101(pretrained=False, **kwargs): + model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet101', pretrained, **model_args) + + +@register_model +def seresnet152(pretrained=False, **kwargs): + model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet152', pretrained, **model_args) + + +@register_model +def seresnet152d(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet152d', pretrained, **model_args) + + +@register_model +def seresnet200d(pretrained=False, **kwargs): + """Constructs a ResNet-200-D model with SE attn. + """ + model_args = dict( + block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet200d', pretrained, **model_args) + + +@register_model +def seresnet269d(pretrained=False, **kwargs): + """Constructs a ResNet-269-D model with SE attn. + """ + model_args = dict( + block=Bottleneck, layers=[3, 30, 48, 8], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnet269d', pretrained, **model_args) + + +@register_model +def seresnext26d_32x4d(pretrained=False, **kwargs): + """Constructs a SE-ResNeXt-26-D model.` + This is technically a 28 layer ResNet, using the 'D' modifier from Gluon / bag-of-tricks for + combination of deep stem and avg_pool in downsample. + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, + stem_type='deep', avg_down=True, block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnext26d_32x4d', pretrained, **model_args) + + +@register_model +def seresnext26t_32x4d(pretrained=False, **kwargs): + """Constructs a SE-ResNet-26-T model. + This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels + in the deep stem. + """ + model_args = dict( + block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, + stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnext26t_32x4d', pretrained, **model_args) + + +@register_model +def seresnext26tn_32x4d(pretrained=False, **kwargs): + """Constructs a SE-ResNeXt-26-T model. + NOTE I deprecated previous 't' model defs and replaced 't' with 'tn', this was the only tn model of note + so keeping this def for backwards compat with any uses out there. Old 't' model is lost. + """ + return seresnext26t_32x4d(pretrained=pretrained, **kwargs) + + +@register_model +def seresnext50_32x4d(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnext50_32x4d', pretrained, **model_args) + + +@register_model +def seresnext101_32x4d(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnext101_32x4d', pretrained, **model_args) + + +@register_model +def seresnext101_32x8d(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, + block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('seresnext101_32x8d', pretrained, **model_args) + + +@register_model +def senet154(pretrained=False, **kwargs): + model_args = dict( + block=Bottleneck, layers=[3, 8, 36, 3], cardinality=64, base_width=4, stem_type='deep', + down_kernel_size=3, block_reduce_first=2, block_args=dict(attn_layer='se'), **kwargs) + return _create_resnet('senet154', pretrained, **model_args) diff --git a/timm/models/resnetv2.py b/timm/models/resnetv2.py new file mode 100644 index 0000000..e38eaf5 --- /dev/null +++ b/timm/models/resnetv2.py @@ -0,0 +1,672 @@ +"""Pre-Activation ResNet v2 with GroupNorm and Weight Standardization. + +A PyTorch implementation of ResNetV2 adapted from the Google Big-Transfoer (BiT) source code +at https://github.com/google-research/big_transfer to match timm interfaces. The BiT weights have +been included here as pretrained models from their original .NPZ checkpoints. + +Additionally, supports non pre-activation bottleneck for use as a backbone for Vision Transfomers (ViT) and +extra padding support to allow porting of official Hybrid ResNet pretrained weights from +https://github.com/google-research/vision_transformer + +Thanks to the Google team for the above two repositories and associated papers: +* Big Transfer (BiT): General Visual Representation Learning - https://arxiv.org/abs/1912.11370 +* An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale - https://arxiv.org/abs/2010.11929 +* Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237 + +Original copyright of Google code below, modifications by Ross Wightman, Copyright 2020. +""" +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict # pylint: disable=g-importing-member + +import torch +import torch.nn as nn +from functools import partial + +from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg, named_apply, adapt_input_conv +from .registry import register_model +from .layers import GroupNormAct, BatchNormAct2d, EvoNormBatch2d, EvoNormSample2d,\ + ClassifierHead, DropPath, AvgPool2dSame, create_pool2d, StdConv2d, create_conv2d + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = { + # pretrained on imagenet21k, finetuned on imagenet1k + 'resnetv2_50x1_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R50x1-ILSVRC2012.npz', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0), + 'resnetv2_50x3_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R50x3-ILSVRC2012.npz', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0), + 'resnetv2_101x1_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R101x1-ILSVRC2012.npz', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0), + 'resnetv2_101x3_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R101x3-ILSVRC2012.npz', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0), + 'resnetv2_152x2_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R152x2-ILSVRC2012.npz', + input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0), + 'resnetv2_152x4_bitm': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R152x4-ILSVRC2012.npz', + input_size=(3, 480, 480), pool_size=(15, 15), crop_pct=1.0), # only one at 480x480? + + # trained on imagenet-21k + 'resnetv2_50x1_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R50x1.npz', + num_classes=21843), + 'resnetv2_50x3_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R50x3.npz', + num_classes=21843), + 'resnetv2_101x1_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R101x1.npz', + num_classes=21843), + 'resnetv2_101x3_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R101x3.npz', + num_classes=21843), + 'resnetv2_152x2_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R152x2.npz', + num_classes=21843), + 'resnetv2_152x4_bitm_in21k': _cfg( + url='https://storage.googleapis.com/bit_models/BiT-M-R152x4.npz', + num_classes=21843), + + 'resnetv2_50x1_bit_distilled': _cfg( + url='https://storage.googleapis.com/bit_models/distill/R50x1_224.npz', + interpolation='bicubic'), + 'resnetv2_152x2_bit_teacher': _cfg( + url='https://storage.googleapis.com/bit_models/distill/R152x2_T_224.npz', + interpolation='bicubic'), + 'resnetv2_152x2_bit_teacher_384': _cfg( + url='https://storage.googleapis.com/bit_models/distill/R152x2_T_384.npz', + input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, interpolation='bicubic'), + + 'resnetv2_50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnetv2_50_a1h-000cdf49.pth', + interpolation='bicubic', crop_pct=0.95), + 'resnetv2_50d': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + 'resnetv2_50t': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + 'resnetv2_101': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnetv2_101_a1h-5d01f016.pth', + interpolation='bicubic', crop_pct=0.95), + 'resnetv2_101d': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + 'resnetv2_152': _cfg( + interpolation='bicubic'), + 'resnetv2_152d': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + + 'resnetv2_50d_gn': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + 'resnetv2_50d_evob': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), + 'resnetv2_50d_evos': _cfg( + interpolation='bicubic', first_conv='stem.conv1'), +} + + +def make_div(v, divisor=8): + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +class PreActBottleneck(nn.Module): + """Pre-activation (v2) bottleneck block. + + Follows the implementation of "Identity Mappings in Deep Residual Networks": + https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua + + Except it puts the stride on 3x3 conv when available. + """ + + def __init__( + self, in_chs, out_chs=None, bottle_ratio=0.25, stride=1, dilation=1, first_dilation=None, groups=1, + act_layer=None, conv_layer=None, norm_layer=None, proj_layer=None, drop_path_rate=0.): + super().__init__() + first_dilation = first_dilation or dilation + conv_layer = conv_layer or StdConv2d + norm_layer = norm_layer or partial(GroupNormAct, num_groups=32) + out_chs = out_chs or in_chs + mid_chs = make_div(out_chs * bottle_ratio) + + if proj_layer is not None: + self.downsample = proj_layer( + in_chs, out_chs, stride=stride, dilation=dilation, first_dilation=first_dilation, preact=True, + conv_layer=conv_layer, norm_layer=norm_layer) + else: + self.downsample = None + + self.norm1 = norm_layer(in_chs) + self.conv1 = conv_layer(in_chs, mid_chs, 1) + self.norm2 = norm_layer(mid_chs) + self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) + self.norm3 = norm_layer(mid_chs) + self.conv3 = conv_layer(mid_chs, out_chs, 1) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() + + def zero_init_last(self): + nn.init.zeros_(self.conv3.weight) + + def forward(self, x): + x_preact = self.norm1(x) + + # shortcut branch + shortcut = x + if self.downsample is not None: + shortcut = self.downsample(x_preact) + + # residual branch + x = self.conv1(x_preact) + x = self.conv2(self.norm2(x)) + x = self.conv3(self.norm3(x)) + x = self.drop_path(x) + return x + shortcut + + +class Bottleneck(nn.Module): + """Non Pre-activation bottleneck block, equiv to V1.5/V1b Bottleneck. Used for ViT. + """ + def __init__( + self, in_chs, out_chs=None, bottle_ratio=0.25, stride=1, dilation=1, first_dilation=None, groups=1, + act_layer=None, conv_layer=None, norm_layer=None, proj_layer=None, drop_path_rate=0.): + super().__init__() + first_dilation = first_dilation or dilation + act_layer = act_layer or nn.ReLU + conv_layer = conv_layer or StdConv2d + norm_layer = norm_layer or partial(GroupNormAct, num_groups=32) + out_chs = out_chs or in_chs + mid_chs = make_div(out_chs * bottle_ratio) + + if proj_layer is not None: + self.downsample = proj_layer( + in_chs, out_chs, stride=stride, dilation=dilation, preact=False, + conv_layer=conv_layer, norm_layer=norm_layer) + else: + self.downsample = None + + self.conv1 = conv_layer(in_chs, mid_chs, 1) + self.norm1 = norm_layer(mid_chs) + self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) + self.norm2 = norm_layer(mid_chs) + self.conv3 = conv_layer(mid_chs, out_chs, 1) + self.norm3 = norm_layer(out_chs, apply_act=False) + self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() + self.act3 = act_layer(inplace=True) + + def zero_init_last(self): + nn.init.zeros_(self.norm3.weight) + + def forward(self, x): + # shortcut branch + shortcut = x + if self.downsample is not None: + shortcut = self.downsample(x) + + # residual + x = self.conv1(x) + x = self.norm1(x) + x = self.conv2(x) + x = self.norm2(x) + x = self.conv3(x) + x = self.norm3(x) + x = self.drop_path(x) + x = self.act3(x + shortcut) + return x + + +class DownsampleConv(nn.Module): + def __init__( + self, in_chs, out_chs, stride=1, dilation=1, first_dilation=None, preact=True, + conv_layer=None, norm_layer=None): + super(DownsampleConv, self).__init__() + self.conv = conv_layer(in_chs, out_chs, 1, stride=stride) + self.norm = nn.Identity() if preact else norm_layer(out_chs, apply_act=False) + + def forward(self, x): + return self.norm(self.conv(x)) + + +class DownsampleAvg(nn.Module): + def __init__( + self, in_chs, out_chs, stride=1, dilation=1, first_dilation=None, + preact=True, conv_layer=None, norm_layer=None): + """ AvgPool Downsampling as in 'D' ResNet variants. This is not in RegNet space but I might experiment.""" + super(DownsampleAvg, self).__init__() + avg_stride = stride if dilation == 1 else 1 + if stride > 1 or dilation > 1: + avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d + self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) + else: + self.pool = nn.Identity() + self.conv = conv_layer(in_chs, out_chs, 1, stride=1) + self.norm = nn.Identity() if preact else norm_layer(out_chs, apply_act=False) + + def forward(self, x): + return self.norm(self.conv(self.pool(x))) + + +class ResNetStage(nn.Module): + """ResNet Stage.""" + def __init__(self, in_chs, out_chs, stride, dilation, depth, bottle_ratio=0.25, groups=1, + avg_down=False, block_dpr=None, block_fn=PreActBottleneck, + act_layer=None, conv_layer=None, norm_layer=None, **block_kwargs): + super(ResNetStage, self).__init__() + first_dilation = 1 if dilation in (1, 2) else 2 + layer_kwargs = dict(act_layer=act_layer, conv_layer=conv_layer, norm_layer=norm_layer) + proj_layer = DownsampleAvg if avg_down else DownsampleConv + prev_chs = in_chs + self.blocks = nn.Sequential() + for block_idx in range(depth): + drop_path_rate = block_dpr[block_idx] if block_dpr else 0. + stride = stride if block_idx == 0 else 1 + self.blocks.add_module(str(block_idx), block_fn( + prev_chs, out_chs, stride=stride, dilation=dilation, bottle_ratio=bottle_ratio, groups=groups, + first_dilation=first_dilation, proj_layer=proj_layer, drop_path_rate=drop_path_rate, + **layer_kwargs, **block_kwargs)) + prev_chs = out_chs + first_dilation = dilation + proj_layer = None + + def forward(self, x): + x = self.blocks(x) + return x + + +def is_stem_deep(stem_type): + return any([s in stem_type for s in ('deep', 'tiered')]) + + +def create_resnetv2_stem( + in_chs, out_chs=64, stem_type='', preact=True, + conv_layer=StdConv2d, norm_layer=partial(GroupNormAct, num_groups=32)): + stem = OrderedDict() + assert stem_type in ('', 'fixed', 'same', 'deep', 'deep_fixed', 'deep_same', 'tiered') + + # NOTE conv padding mode can be changed by overriding the conv_layer def + if is_stem_deep(stem_type): + # A 3 deep 3x3 conv stack as in ResNet V1D models + if 'tiered' in stem_type: + stem_chs = (3 * out_chs // 8, out_chs // 2) # 'T' resnets in resnet.py + else: + stem_chs = (out_chs // 2, out_chs // 2) # 'D' ResNets + stem['conv1'] = conv_layer(in_chs, stem_chs[0], kernel_size=3, stride=2) + stem['norm1'] = norm_layer(stem_chs[0]) + stem['conv2'] = conv_layer(stem_chs[0], stem_chs[1], kernel_size=3, stride=1) + stem['norm2'] = norm_layer(stem_chs[1]) + stem['conv3'] = conv_layer(stem_chs[1], out_chs, kernel_size=3, stride=1) + if not preact: + stem['norm3'] = norm_layer(out_chs) + else: + # The usual 7x7 stem conv + stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=7, stride=2) + if not preact: + stem['norm'] = norm_layer(out_chs) + + if 'fixed' in stem_type: + # 'fixed' SAME padding approximation that is used in BiT models + stem['pad'] = nn.ConstantPad2d(1, 0.) + stem['pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=0) + elif 'same' in stem_type: + # full, input size based 'SAME' padding, used in ViT Hybrid model + stem['pool'] = create_pool2d('max', kernel_size=3, stride=2, padding='same') + else: + # the usual PyTorch symmetric padding + stem['pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + return nn.Sequential(stem) + + +class ResNetV2(nn.Module): + """Implementation of Pre-activation (v2) ResNet mode. + """ + + def __init__( + self, layers, channels=(256, 512, 1024, 2048), + num_classes=1000, in_chans=3, global_pool='avg', output_stride=32, + width_factor=1, stem_chs=64, stem_type='', avg_down=False, preact=True, + act_layer=nn.ReLU, conv_layer=StdConv2d, norm_layer=partial(GroupNormAct, num_groups=32), + drop_rate=0., drop_path_rate=0., zero_init_last=False): + super().__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + wf = width_factor + + self.feature_info = [] + stem_chs = make_div(stem_chs * wf) + self.stem = create_resnetv2_stem( + in_chans, stem_chs, stem_type, preact, conv_layer=conv_layer, norm_layer=norm_layer) + stem_feat = ('stem.conv3' if is_stem_deep(stem_type) else 'stem.conv') if preact else 'stem.norm' + self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=stem_feat)) + + prev_chs = stem_chs + curr_stride = 4 + dilation = 1 + block_dprs = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)] + block_fn = PreActBottleneck if preact else Bottleneck + self.stages = nn.Sequential() + for stage_idx, (d, c, bdpr) in enumerate(zip(layers, channels, block_dprs)): + out_chs = make_div(c * wf) + stride = 1 if stage_idx == 0 else 2 + if curr_stride >= output_stride: + dilation *= stride + stride = 1 + stage = ResNetStage( + prev_chs, out_chs, stride=stride, dilation=dilation, depth=d, avg_down=avg_down, + act_layer=act_layer, conv_layer=conv_layer, norm_layer=norm_layer, block_dpr=bdpr, block_fn=block_fn) + prev_chs = out_chs + curr_stride *= stride + self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{stage_idx}')] + self.stages.add_module(str(stage_idx), stage) + + self.num_features = prev_chs + self.norm = norm_layer(self.num_features) if preact else nn.Identity() + self.head = ClassifierHead( + self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, use_conv=True) + + self.init_weights(zero_init_last=zero_init_last) + + def init_weights(self, zero_init_last=True): + named_apply(partial(_init_weights, zero_init_last=zero_init_last), self) + + @torch.jit.ignore() + def load_pretrained(self, checkpoint_path, prefix='resnet/'): + _load_weights(self, checkpoint_path, prefix) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.head = ClassifierHead( + self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, use_conv=True) + + def forward_features(self, x): + x = self.stem(x) + x = self.stages(x) + x = self.norm(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _init_weights(module: nn.Module, name: str = '', zero_init_last=True): + if isinstance(module, nn.Linear) or ('head.fc' in name and isinstance(module, nn.Conv2d)): + nn.init.normal_(module.weight, mean=0.0, std=0.01) + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv2d): + nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu') + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.BatchNorm2d, nn.LayerNorm, nn.GroupNorm)): + nn.init.ones_(module.weight) + nn.init.zeros_(module.bias) + elif zero_init_last and hasattr(module, 'zero_init_last'): + module.zero_init_last() + + +@torch.no_grad() +def _load_weights(model: nn.Module, checkpoint_path: str, prefix: str = 'resnet/'): + import numpy as np + + def t2p(conv_weights): + """Possibly convert HWIO to OIHW.""" + if conv_weights.ndim == 4: + conv_weights = conv_weights.transpose([3, 2, 0, 1]) + return torch.from_numpy(conv_weights) + + weights = np.load(checkpoint_path) + stem_conv_w = adapt_input_conv( + model.stem.conv.weight.shape[1], t2p(weights[f'{prefix}root_block/standardized_conv2d/kernel'])) + model.stem.conv.weight.copy_(stem_conv_w) + model.norm.weight.copy_(t2p(weights[f'{prefix}group_norm/gamma'])) + model.norm.bias.copy_(t2p(weights[f'{prefix}group_norm/beta'])) + if isinstance(getattr(model.head, 'fc', None), nn.Conv2d) and \ + model.head.fc.weight.shape[0] == weights[f'{prefix}head/conv2d/kernel'].shape[-1]: + model.head.fc.weight.copy_(t2p(weights[f'{prefix}head/conv2d/kernel'])) + model.head.fc.bias.copy_(t2p(weights[f'{prefix}head/conv2d/bias'])) + for i, (sname, stage) in enumerate(model.stages.named_children()): + for j, (bname, block) in enumerate(stage.blocks.named_children()): + cname = 'standardized_conv2d' + block_prefix = f'{prefix}block{i + 1}/unit{j + 1:02d}/' + block.conv1.weight.copy_(t2p(weights[f'{block_prefix}a/{cname}/kernel'])) + block.conv2.weight.copy_(t2p(weights[f'{block_prefix}b/{cname}/kernel'])) + block.conv3.weight.copy_(t2p(weights[f'{block_prefix}c/{cname}/kernel'])) + block.norm1.weight.copy_(t2p(weights[f'{block_prefix}a/group_norm/gamma'])) + block.norm2.weight.copy_(t2p(weights[f'{block_prefix}b/group_norm/gamma'])) + block.norm3.weight.copy_(t2p(weights[f'{block_prefix}c/group_norm/gamma'])) + block.norm1.bias.copy_(t2p(weights[f'{block_prefix}a/group_norm/beta'])) + block.norm2.bias.copy_(t2p(weights[f'{block_prefix}b/group_norm/beta'])) + block.norm3.bias.copy_(t2p(weights[f'{block_prefix}c/group_norm/beta'])) + if block.downsample is not None: + w = weights[f'{block_prefix}a/proj/{cname}/kernel'] + block.downsample.conv.weight.copy_(t2p(w)) + + +def _create_resnetv2(variant, pretrained=False, **kwargs): + feature_cfg = dict(flatten_sequential=True) + return build_model_with_cfg( + ResNetV2, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=feature_cfg, + pretrained_custom_load='_bit' in variant, + **kwargs) + + +def _create_resnetv2_bit(variant, pretrained=False, **kwargs): + return _create_resnetv2( + variant, pretrained=pretrained, stem_type='fixed', conv_layer=partial(StdConv2d, eps=1e-8), **kwargs) + + +@register_model +def resnetv2_50x1_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_50x1_bitm', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_50x3_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_50x3_bitm', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=3, **kwargs) + + +@register_model +def resnetv2_101x1_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_101x1_bitm', pretrained=pretrained, layers=[3, 4, 23, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_101x3_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_101x3_bitm', pretrained=pretrained, layers=[3, 4, 23, 3], width_factor=3, **kwargs) + + +@register_model +def resnetv2_152x2_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_152x2_bitm', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=2, **kwargs) + + +@register_model +def resnetv2_152x4_bitm(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_152x4_bitm', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=4, **kwargs) + + +@register_model +def resnetv2_50x1_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_50x1_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 4, 6, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_50x3_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_50x3_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 4, 6, 3], width_factor=3, **kwargs) + + +@register_model +def resnetv2_101x1_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_101x1_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 4, 23, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_101x3_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_101x3_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 4, 23, 3], width_factor=3, **kwargs) + + +@register_model +def resnetv2_152x2_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_152x2_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 8, 36, 3], width_factor=2, **kwargs) + + +@register_model +def resnetv2_152x4_bitm_in21k(pretrained=False, **kwargs): + return _create_resnetv2_bit( + 'resnetv2_152x4_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), + layers=[3, 8, 36, 3], width_factor=4, **kwargs) + + +@register_model +def resnetv2_50x1_bit_distilled(pretrained=False, **kwargs): + """ ResNetV2-50x1-BiT Distilled + Paper: Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237 + """ + return _create_resnetv2_bit( + 'resnetv2_50x1_bit_distilled', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=1, **kwargs) + + +@register_model +def resnetv2_152x2_bit_teacher(pretrained=False, **kwargs): + """ ResNetV2-152x2-BiT Teacher + Paper: Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237 + """ + return _create_resnetv2_bit( + 'resnetv2_152x2_bit_teacher', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=2, **kwargs) + + +@register_model +def resnetv2_152x2_bit_teacher_384(pretrained=False, **kwargs): + """ ResNetV2-152xx-BiT Teacher @ 384x384 + Paper: Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237 + """ + return _create_resnetv2_bit( + 'resnetv2_152x2_bit_teacher_384', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=2, **kwargs) + + +@register_model +def resnetv2_50(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_50', pretrained=pretrained, + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, **kwargs) + + +@register_model +def resnetv2_50d(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_50d', pretrained=pretrained, + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, + stem_type='deep', avg_down=True, **kwargs) + + +@register_model +def resnetv2_50t(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_50t', pretrained=pretrained, + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, + stem_type='tiered', avg_down=True, **kwargs) + + +@register_model +def resnetv2_101(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_101', pretrained=pretrained, + layers=[3, 4, 23, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, **kwargs) + + +@register_model +def resnetv2_101d(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_101d', pretrained=pretrained, + layers=[3, 4, 23, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, + stem_type='deep', avg_down=True, **kwargs) + + +@register_model +def resnetv2_152(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_152', pretrained=pretrained, + layers=[3, 8, 36, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, **kwargs) + + +@register_model +def resnetv2_152d(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_152d', pretrained=pretrained, + layers=[3, 8, 36, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, + stem_type='deep', avg_down=True, **kwargs) + + +# Experimental configs (may change / be removed) + +@register_model +def resnetv2_50d_gn(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_50d_gn', pretrained=pretrained, + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=GroupNormAct, + stem_type='deep', avg_down=True, **kwargs) + + +@register_model +def resnetv2_50d_evob(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_50d_evob', pretrained=pretrained, + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=EvoNormBatch2d, + stem_type='deep', avg_down=True, **kwargs) + + +@register_model +def resnetv2_50d_evos(pretrained=False, **kwargs): + return _create_resnetv2( + 'resnetv2_50d_evos', pretrained=pretrained, + layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=EvoNormSample2d, + stem_type='deep', avg_down=True, **kwargs) diff --git a/timm/models/rexnet.py b/timm/models/rexnet.py new file mode 100644 index 0000000..f27ce5d --- /dev/null +++ b/timm/models/rexnet.py @@ -0,0 +1,239 @@ +""" ReXNet + +A PyTorch impl of `ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network` - +https://arxiv.org/abs/2007.00992 + +Adapted from original impl at https://github.com/clovaai/rexnet +Copyright (c) 2020-present NAVER Corp. MIT license + +Changes for timm, feature extraction, and rounded channel variant hacked together by Ross Wightman +Copyright 2020 Ross Wightman +""" + +import torch +import torch.nn as nn +from functools import partial +from math import ceil + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import ClassifierHead, create_act_layer, ConvBnAct, DropPath, make_divisible, SEModule +from .registry import register_model +from .efficientnet_builder import efficientnet_init_weights + + +def _cfg(url=''): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.conv', 'classifier': 'head.fc', + } + + +default_cfgs = dict( + rexnet_100=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_100-1b4dddf4.pth'), + rexnet_130=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_130-590d768e.pth'), + rexnet_150=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_150-bd1a6aa8.pth'), + rexnet_200=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rexnet/rexnetv1_200-8c0b7f2d.pth'), + rexnetr_100=_cfg( + url=''), + rexnetr_130=_cfg( + url=''), + rexnetr_150=_cfg( + url=''), + rexnetr_200=_cfg( + url=''), +) + +SEWithNorm = partial(SEModule, norm_layer=nn.BatchNorm2d) + + +class LinearBottleneck(nn.Module): + def __init__(self, in_chs, out_chs, stride, exp_ratio=1.0, se_ratio=0., ch_div=1, + act_layer='swish', dw_act_layer='relu6', drop_path=None): + super(LinearBottleneck, self).__init__() + self.use_shortcut = stride == 1 and in_chs <= out_chs + self.in_channels = in_chs + self.out_channels = out_chs + + if exp_ratio != 1.: + dw_chs = make_divisible(round(in_chs * exp_ratio), divisor=ch_div) + self.conv_exp = ConvBnAct(in_chs, dw_chs, act_layer=act_layer) + else: + dw_chs = in_chs + self.conv_exp = None + + self.conv_dw = ConvBnAct(dw_chs, dw_chs, 3, stride=stride, groups=dw_chs, apply_act=False) + if se_ratio > 0: + self.se = SEWithNorm(dw_chs, rd_channels=make_divisible(int(dw_chs * se_ratio), ch_div)) + else: + self.se = None + self.act_dw = create_act_layer(dw_act_layer) + + self.conv_pwl = ConvBnAct(dw_chs, out_chs, 1, apply_act=False) + self.drop_path = drop_path + + def feat_channels(self, exp=False): + return self.conv_dw.out_channels if exp else self.out_channels + + def forward(self, x): + shortcut = x + if self.conv_exp is not None: + x = self.conv_exp(x) + x = self.conv_dw(x) + if self.se is not None: + x = self.se(x) + x = self.act_dw(x) + x = self.conv_pwl(x) + if self.use_shortcut: + if self.drop_path is not None: + x = self.drop_path(x) + x = torch.cat([x[:, 0:self.in_channels] + shortcut, x[:, self.in_channels:]], dim=1) + return x + + +def _block_cfg(width_mult=1.0, depth_mult=1.0, initial_chs=16, final_chs=180, se_ratio=0., ch_div=1): + layers = [1, 2, 2, 3, 3, 5] + strides = [1, 2, 2, 2, 1, 2] + layers = [ceil(element * depth_mult) for element in layers] + strides = sum([[element] + [1] * (layers[idx] - 1) for idx, element in enumerate(strides)], []) + exp_ratios = [1] * layers[0] + [6] * sum(layers[1:]) + depth = sum(layers[:]) * 3 + base_chs = initial_chs / width_mult if width_mult < 1.0 else initial_chs + + # The following channel configuration is a simple instance to make each layer become an expand layer. + out_chs_list = [] + for i in range(depth // 3): + out_chs_list.append(make_divisible(round(base_chs * width_mult), divisor=ch_div)) + base_chs += final_chs / (depth // 3 * 1.0) + + se_ratios = [0.] * (layers[0] + layers[1]) + [se_ratio] * sum(layers[2:]) + + return list(zip(out_chs_list, exp_ratios, strides, se_ratios)) + + +def _build_blocks( + block_cfg, prev_chs, width_mult, ch_div=1, act_layer='swish', dw_act_layer='relu6', drop_path_rate=0.): + feat_chs = [prev_chs] + feature_info = [] + curr_stride = 2 + features = [] + num_blocks = len(block_cfg) + for block_idx, (chs, exp_ratio, stride, se_ratio) in enumerate(block_cfg): + if stride > 1: + fname = 'stem' if block_idx == 0 else f'features.{block_idx - 1}' + feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=fname)] + curr_stride *= stride + block_dpr = drop_path_rate * block_idx / (num_blocks - 1) # stochastic depth linear decay rule + drop_path = DropPath(block_dpr) if block_dpr > 0. else None + features.append(LinearBottleneck( + in_chs=prev_chs, out_chs=chs, exp_ratio=exp_ratio, stride=stride, se_ratio=se_ratio, + ch_div=ch_div, act_layer=act_layer, dw_act_layer=dw_act_layer, drop_path=drop_path)) + prev_chs = chs + feat_chs += [features[-1].feat_channels()] + pen_chs = make_divisible(1280 * width_mult, divisor=ch_div) + feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=f'features.{len(features) - 1}')] + features.append(ConvBnAct(prev_chs, pen_chs, act_layer=act_layer)) + return features, feature_info + + +class ReXNetV1(nn.Module): + def __init__(self, in_chans=3, num_classes=1000, global_pool='avg', output_stride=32, + initial_chs=16, final_chs=180, width_mult=1.0, depth_mult=1.0, se_ratio=1/12., + ch_div=1, act_layer='swish', dw_act_layer='relu6', drop_rate=0.2, drop_path_rate=0.): + super(ReXNetV1, self).__init__() + self.drop_rate = drop_rate + self.num_classes = num_classes + + assert output_stride == 32 # FIXME support dilation + stem_base_chs = 32 / width_mult if width_mult < 1.0 else 32 + stem_chs = make_divisible(round(stem_base_chs * width_mult), divisor=ch_div) + self.stem = ConvBnAct(in_chans, stem_chs, 3, stride=2, act_layer=act_layer) + + block_cfg = _block_cfg(width_mult, depth_mult, initial_chs, final_chs, se_ratio, ch_div) + features, self.feature_info = _build_blocks( + block_cfg, stem_chs, width_mult, ch_div, act_layer, dw_act_layer, drop_path_rate) + self.num_features = features[-1].out_channels + self.features = nn.Sequential(*features) + + self.head = ClassifierHead(self.num_features, num_classes, global_pool, drop_rate) + + efficientnet_init_weights(self) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + x = self.features(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_rexnet(variant, pretrained, **kwargs): + feature_cfg = dict(flatten_sequential=True) + return build_model_with_cfg( + ReXNetV1, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=feature_cfg, + **kwargs) + + +@register_model +def rexnet_100(pretrained=False, **kwargs): + """ReXNet V1 1.0x""" + return _create_rexnet('rexnet_100', pretrained, **kwargs) + + +@register_model +def rexnet_130(pretrained=False, **kwargs): + """ReXNet V1 1.3x""" + return _create_rexnet('rexnet_130', pretrained, width_mult=1.3, **kwargs) + + +@register_model +def rexnet_150(pretrained=False, **kwargs): + """ReXNet V1 1.5x""" + return _create_rexnet('rexnet_150', pretrained, width_mult=1.5, **kwargs) + + +@register_model +def rexnet_200(pretrained=False, **kwargs): + """ReXNet V1 2.0x""" + return _create_rexnet('rexnet_200', pretrained, width_mult=2.0, **kwargs) + + +@register_model +def rexnetr_100(pretrained=False, **kwargs): + """ReXNet V1 1.0x w/ rounded (mod 8) channels""" + return _create_rexnet('rexnetr_100', pretrained, ch_div=8, **kwargs) + + +@register_model +def rexnetr_130(pretrained=False, **kwargs): + """ReXNet V1 1.3x w/ rounded (mod 8) channels""" + return _create_rexnet('rexnetr_130', pretrained, width_mult=1.3, ch_div=8, **kwargs) + + +@register_model +def rexnetr_150(pretrained=False, **kwargs): + """ReXNet V1 1.5x w/ rounded (mod 8) channels""" + return _create_rexnet('rexnetr_150', pretrained, width_mult=1.5, ch_div=8, **kwargs) + + +@register_model +def rexnetr_200(pretrained=False, **kwargs): + """ReXNet V1 2.0x w/ rounded (mod 8) channels""" + return _create_rexnet('rexnetr_200', pretrained, width_mult=2.0, ch_div=8, **kwargs) diff --git a/timm/models/selecsls.py b/timm/models/selecsls.py new file mode 100644 index 0000000..1f3379d --- /dev/null +++ b/timm/models/selecsls.py @@ -0,0 +1,362 @@ +"""PyTorch SelecSLS Net example for ImageNet Classification +License: CC BY 4.0 (https://creativecommons.org/licenses/by/4.0/legalcode) +Author: Dushyant Mehta (@mehtadushy) + +SelecSLS (core) Network Architecture as proposed in "XNect: Real-time Multi-person 3D +Human Pose Estimation with a Single RGB Camera, Mehta et al." +https://arxiv.org/abs/1907.00837 + +Based on ResNet implementation in https://github.com/rwightman/pytorch-image-models +and SelecSLS Net implementation in https://github.com/mehtadushy/SelecSLS-Pytorch +""" +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['SelecSLS'] # model_registry will add each entrypoint fn to this + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (4, 4), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.0', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + 'selecsls42': _cfg( + url='', + interpolation='bicubic'), + 'selecsls42b': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls42b-8af30141.pth', + interpolation='bicubic'), + 'selecsls60': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60-bbf87526.pth', + interpolation='bicubic'), + 'selecsls60b': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60b-94e619b5.pth', + interpolation='bicubic'), + 'selecsls84': _cfg( + url='', + interpolation='bicubic'), +} + + +class SequentialList(nn.Sequential): + + def __init__(self, *args): + super(SequentialList, self).__init__(*args) + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (List[torch.Tensor]) -> (List[torch.Tensor]) + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (torch.Tensor) -> (List[torch.Tensor]) + pass + + def forward(self, x) -> List[torch.Tensor]: + for module in self: + x = module(x) + return x + + +class SelectSeq(nn.Module): + def __init__(self, mode='index', index=0): + super(SelectSeq, self).__init__() + self.mode = mode + self.index = index + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (List[torch.Tensor]) -> (torch.Tensor) + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, x): + # type: (Tuple[torch.Tensor]) -> (torch.Tensor) + pass + + def forward(self, x) -> torch.Tensor: + if self.mode == 'index': + return x[self.index] + else: + return torch.cat(x, dim=1) + + +def conv_bn(in_chs, out_chs, k=3, stride=1, padding=None, dilation=1): + if padding is None: + padding = ((stride - 1) + dilation * (k - 1)) // 2 + return nn.Sequential( + nn.Conv2d(in_chs, out_chs, k, stride, padding=padding, dilation=dilation, bias=False), + nn.BatchNorm2d(out_chs), + nn.ReLU(inplace=True) + ) + + +class SelecSLSBlock(nn.Module): + def __init__(self, in_chs, skip_chs, mid_chs, out_chs, is_first, stride, dilation=1): + super(SelecSLSBlock, self).__init__() + self.stride = stride + self.is_first = is_first + assert stride in [1, 2] + + # Process input with 4 conv blocks with the same number of input and output channels + self.conv1 = conv_bn(in_chs, mid_chs, 3, stride, dilation=dilation) + self.conv2 = conv_bn(mid_chs, mid_chs, 1) + self.conv3 = conv_bn(mid_chs, mid_chs // 2, 3) + self.conv4 = conv_bn(mid_chs // 2, mid_chs, 1) + self.conv5 = conv_bn(mid_chs, mid_chs // 2, 3) + self.conv6 = conv_bn(2 * mid_chs + (0 if is_first else skip_chs), out_chs, 1) + + def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: + if not isinstance(x, list): + x = [x] + assert len(x) in [1, 2] + + d1 = self.conv1(x[0]) + d2 = self.conv3(self.conv2(d1)) + d3 = self.conv5(self.conv4(d2)) + if self.is_first: + out = self.conv6(torch.cat([d1, d2, d3], 1)) + return [out, out] + else: + return [self.conv6(torch.cat([d1, d2, d3, x[1]], 1)), x[1]] + + +class SelecSLS(nn.Module): + """SelecSLS42 / SelecSLS60 / SelecSLS84 + + Parameters + ---------- + cfg : network config dictionary specifying block type, feature, and head args + num_classes : int, default 1000 + Number of classification classes. + in_chans : int, default 3 + Number of input (color) channels. + drop_rate : float, default 0. + Dropout probability before classifier, for training + global_pool : str, default 'avg' + Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' + """ + + def __init__(self, cfg, num_classes=1000, in_chans=3, drop_rate=0.0, global_pool='avg'): + self.num_classes = num_classes + self.drop_rate = drop_rate + super(SelecSLS, self).__init__() + + self.stem = conv_bn(in_chans, 32, stride=2) + self.features = SequentialList(*[cfg['block'](*block_args) for block_args in cfg['features']]) + self.from_seq = SelectSeq() # from List[tensor] -> Tensor in module compatible way + self.head = nn.Sequential(*[conv_bn(*conv_args) for conv_args in cfg['head']]) + self.num_features = cfg['num_features'] + self.feature_info = cfg['feature_info'] + + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + for n, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1.) + nn.init.constant_(m.bias, 0.) + + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.stem(x) + x = self.features(x) + x = self.head(self.from_seq(x)) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.fc(x) + return x + + +def _create_selecsls(variant, pretrained, **kwargs): + cfg = {} + feature_info = [dict(num_chs=32, reduction=2, module='stem.2')] + if variant.startswith('selecsls42'): + cfg['block'] = SelecSLSBlock + # Define configuration of the network after the initial neck + cfg['features'] = [ + # in_chs, skip_chs, mid_chs, out_chs, is_first, stride + (32, 0, 64, 64, True, 2), + (64, 64, 64, 128, False, 1), + (128, 0, 144, 144, True, 2), + (144, 144, 144, 288, False, 1), + (288, 0, 304, 304, True, 2), + (304, 304, 304, 480, False, 1), + ] + feature_info.extend([ + dict(num_chs=128, reduction=4, module='features.1'), + dict(num_chs=288, reduction=8, module='features.3'), + dict(num_chs=480, reduction=16, module='features.5'), + ]) + # Head can be replaced with alternative configurations depending on the problem + feature_info.append(dict(num_chs=1024, reduction=32, module='head.1')) + if variant == 'selecsls42b': + cfg['head'] = [ + (480, 960, 3, 2), + (960, 1024, 3, 1), + (1024, 1280, 3, 2), + (1280, 1024, 1, 1), + ] + feature_info.append(dict(num_chs=1024, reduction=64, module='head.3')) + cfg['num_features'] = 1024 + else: + cfg['head'] = [ + (480, 960, 3, 2), + (960, 1024, 3, 1), + (1024, 1024, 3, 2), + (1024, 1280, 1, 1), + ] + feature_info.append(dict(num_chs=1280, reduction=64, module='head.3')) + cfg['num_features'] = 1280 + + elif variant.startswith('selecsls60'): + cfg['block'] = SelecSLSBlock + # Define configuration of the network after the initial neck + cfg['features'] = [ + # in_chs, skip_chs, mid_chs, out_chs, is_first, stride + (32, 0, 64, 64, True, 2), + (64, 64, 64, 128, False, 1), + (128, 0, 128, 128, True, 2), + (128, 128, 128, 128, False, 1), + (128, 128, 128, 288, False, 1), + (288, 0, 288, 288, True, 2), + (288, 288, 288, 288, False, 1), + (288, 288, 288, 288, False, 1), + (288, 288, 288, 416, False, 1), + ] + feature_info.extend([ + dict(num_chs=128, reduction=4, module='features.1'), + dict(num_chs=288, reduction=8, module='features.4'), + dict(num_chs=416, reduction=16, module='features.8'), + ]) + # Head can be replaced with alternative configurations depending on the problem + feature_info.append(dict(num_chs=1024, reduction=32, module='head.1')) + if variant == 'selecsls60b': + cfg['head'] = [ + (416, 756, 3, 2), + (756, 1024, 3, 1), + (1024, 1280, 3, 2), + (1280, 1024, 1, 1), + ] + feature_info.append(dict(num_chs=1024, reduction=64, module='head.3')) + cfg['num_features'] = 1024 + else: + cfg['head'] = [ + (416, 756, 3, 2), + (756, 1024, 3, 1), + (1024, 1024, 3, 2), + (1024, 1280, 1, 1), + ] + feature_info.append(dict(num_chs=1280, reduction=64, module='head.3')) + cfg['num_features'] = 1280 + + elif variant == 'selecsls84': + cfg['block'] = SelecSLSBlock + # Define configuration of the network after the initial neck + cfg['features'] = [ + # in_chs, skip_chs, mid_chs, out_chs, is_first, stride + (32, 0, 64, 64, True, 2), + (64, 64, 64, 144, False, 1), + (144, 0, 144, 144, True, 2), + (144, 144, 144, 144, False, 1), + (144, 144, 144, 144, False, 1), + (144, 144, 144, 144, False, 1), + (144, 144, 144, 304, False, 1), + (304, 0, 304, 304, True, 2), + (304, 304, 304, 304, False, 1), + (304, 304, 304, 304, False, 1), + (304, 304, 304, 304, False, 1), + (304, 304, 304, 304, False, 1), + (304, 304, 304, 512, False, 1), + ] + feature_info.extend([ + dict(num_chs=144, reduction=4, module='features.1'), + dict(num_chs=304, reduction=8, module='features.6'), + dict(num_chs=512, reduction=16, module='features.12'), + ]) + # Head can be replaced with alternative configurations depending on the problem + cfg['head'] = [ + (512, 960, 3, 2), + (960, 1024, 3, 1), + (1024, 1024, 3, 2), + (1024, 1280, 3, 1), + ] + cfg['num_features'] = 1280 + feature_info.extend([ + dict(num_chs=1024, reduction=32, module='head.1'), + dict(num_chs=1280, reduction=64, module='head.3') + ]) + else: + raise ValueError('Invalid net configuration ' + variant + ' !!!') + cfg['feature_info'] = feature_info + + # this model can do 6 feature levels by default, unlike most others, leave as 0-4 to avoid surprises? + return build_model_with_cfg( + SelecSLS, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=cfg, + feature_cfg=dict(out_indices=(0, 1, 2, 3, 4), flatten_sequential=True), + **kwargs) + + +@register_model +def selecsls42(pretrained=False, **kwargs): + """Constructs a SelecSLS42 model. + """ + return _create_selecsls('selecsls42', pretrained, **kwargs) + + +@register_model +def selecsls42b(pretrained=False, **kwargs): + """Constructs a SelecSLS42_B model. + """ + return _create_selecsls('selecsls42b', pretrained, **kwargs) + + +@register_model +def selecsls60(pretrained=False, **kwargs): + """Constructs a SelecSLS60 model. + """ + return _create_selecsls('selecsls60', pretrained, **kwargs) + + +@register_model +def selecsls60b(pretrained=False, **kwargs): + """Constructs a SelecSLS60_B model. + """ + return _create_selecsls('selecsls60b', pretrained, **kwargs) + + +@register_model +def selecsls84(pretrained=False, **kwargs): + """Constructs a SelecSLS84 model. + """ + return _create_selecsls('selecsls84', pretrained, **kwargs) diff --git a/timm/models/senet.py b/timm/models/senet.py new file mode 100644 index 0000000..3d0ba7b --- /dev/null +++ b/timm/models/senet.py @@ -0,0 +1,467 @@ +""" +SEResNet implementation from Cadene's pretrained models +https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/senet.py +Additional credit to https://github.com/creafz + +Original model: https://github.com/hujie-frank/SENet + +ResNet code gently borrowed from +https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py + +FIXME I'm deprecating this model and moving them to ResNet as I don't want to maintain duplicate +support for extras like dilation, switchable BN/activations, feature extraction, etc that don't exist here. +""" +import math +from collections import OrderedDict + +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['SENet'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'layer0.conv1', 'classifier': 'last_linear', + **kwargs + } + + +default_cfgs = { + 'legacy_senet154': + _cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth'), + 'legacy_seresnet18': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet18-4bb0ce65.pth', + interpolation='bicubic'), + 'legacy_seresnet34': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet34-a4004e63.pth'), + 'legacy_seresnet50': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet50-ce0d4300.pth'), + 'legacy_seresnet101': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet101-7e38fcc6.pth'), + 'legacy_seresnet152': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet152-d17c99b7.pth'), + 'legacy_seresnext26_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26_32x4d-65ebdb501.pth', + interpolation='bicubic'), + 'legacy_seresnext50_32x4d': + _cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth'), + 'legacy_seresnext101_32x4d': + _cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth'), +} + + +def _weight_init(m): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1.) + nn.init.constant_(m.bias, 0.) + + +class SEModule(nn.Module): + + def __init__(self, channels, reduction): + super(SEModule, self).__init__() + self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1) + self.relu = nn.ReLU(inplace=True) + self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1) + self.sigmoid = nn.Sigmoid() + + def forward(self, x): + module_input = x + x = x.mean((2, 3), keepdim=True) + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + x = self.sigmoid(x) + return module_input * x + + +class Bottleneck(nn.Module): + """ + Base class for bottlenecks that implements `forward()` method. + """ + + def forward(self, x): + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + shortcut = self.downsample(x) + + out = self.se_module(out) + shortcut + out = self.relu(out) + + return out + + +class SEBottleneck(Bottleneck): + """ + Bottleneck for SENet154. + """ + expansion = 4 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, + downsample=None): + super(SEBottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes * 2) + self.conv2 = nn.Conv2d( + planes * 2, planes * 4, kernel_size=3, stride=stride, + padding=1, groups=groups, bias=False) + self.bn2 = nn.BatchNorm2d(planes * 4) + self.conv3 = nn.Conv2d( + planes * 4, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes * 4, reduction=reduction) + self.downsample = downsample + self.stride = stride + + +class SEResNetBottleneck(Bottleneck): + """ + ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe + implementation and uses `stride=stride` in `conv1` and not in `conv2` + (the latter is used in the torchvision implementation of ResNet). + """ + expansion = 4 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, + downsample=None): + super(SEResNetBottleneck, self).__init__() + self.conv1 = nn.Conv2d( + inplanes, planes, kernel_size=1, bias=False, stride=stride) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d( + planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes * 4, reduction=reduction) + self.downsample = downsample + self.stride = stride + + +class SEResNeXtBottleneck(Bottleneck): + """ + ResNeXt bottleneck type C with a Squeeze-and-Excitation module. + """ + expansion = 4 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, + downsample=None, base_width=4): + super(SEResNeXtBottleneck, self).__init__() + width = math.floor(planes * (base_width / 64)) * groups + self.conv1 = nn.Conv2d( + inplanes, width, kernel_size=1, bias=False, stride=1) + self.bn1 = nn.BatchNorm2d(width) + self.conv2 = nn.Conv2d( + width, width, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False) + self.bn2 = nn.BatchNorm2d(width) + self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes * 4, reduction=reduction) + self.downsample = downsample + self.stride = stride + + +class SEResNetBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): + super(SEResNetBlock, self).__init__() + self.conv1 = nn.Conv2d( + inplanes, planes, kernel_size=3, padding=1, stride=stride, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d( + planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.se_module = SEModule(planes, reduction=reduction) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + shortcut = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + if self.downsample is not None: + shortcut = self.downsample(x) + + out = self.se_module(out) + shortcut + out = self.relu(out) + + return out + + +class SENet(nn.Module): + + def __init__(self, block, layers, groups, reduction, drop_rate=0.2, + in_chans=3, inplanes=64, input_3x3=False, downsample_kernel_size=1, + downsample_padding=0, num_classes=1000, global_pool='avg'): + """ + Parameters + ---------- + block (nn.Module): Bottleneck class. + - For SENet154: SEBottleneck + - For SE-ResNet models: SEResNetBottleneck + - For SE-ResNeXt models: SEResNeXtBottleneck + layers (list of ints): Number of residual blocks for 4 layers of the + network (layer1...layer4). + groups (int): Number of groups for the 3x3 convolution in each + bottleneck block. + - For SENet154: 64 + - For SE-ResNet models: 1 + - For SE-ResNeXt models: 32 + reduction (int): Reduction ratio for Squeeze-and-Excitation modules. + - For all models: 16 + dropout_p (float or None): Drop probability for the Dropout layer. + If `None` the Dropout layer is not used. + - For SENet154: 0.2 + - For SE-ResNet models: None + - For SE-ResNeXt models: None + inplanes (int): Number of input channels for layer1. + - For SENet154: 128 + - For SE-ResNet models: 64 + - For SE-ResNeXt models: 64 + input_3x3 (bool): If `True`, use three 3x3 convolutions instead of + a single 7x7 convolution in layer0. + - For SENet154: True + - For SE-ResNet models: False + - For SE-ResNeXt models: False + downsample_kernel_size (int): Kernel size for downsampling convolutions + in layer2, layer3 and layer4. + - For SENet154: 3 + - For SE-ResNet models: 1 + - For SE-ResNeXt models: 1 + downsample_padding (int): Padding for downsampling convolutions in + layer2, layer3 and layer4. + - For SENet154: 1 + - For SE-ResNet models: 0 + - For SE-ResNeXt models: 0 + num_classes (int): Number of outputs in `last_linear` layer. + - For all models: 1000 + """ + super(SENet, self).__init__() + self.inplanes = inplanes + self.num_classes = num_classes + self.drop_rate = drop_rate + if input_3x3: + layer0_modules = [ + ('conv1', nn.Conv2d(in_chans, 64, 3, stride=2, padding=1, bias=False)), + ('bn1', nn.BatchNorm2d(64)), + ('relu1', nn.ReLU(inplace=True)), + ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False)), + ('bn2', nn.BatchNorm2d(64)), + ('relu2', nn.ReLU(inplace=True)), + ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1, bias=False)), + ('bn3', nn.BatchNorm2d(inplanes)), + ('relu3', nn.ReLU(inplace=True)), + ] + else: + layer0_modules = [ + ('conv1', nn.Conv2d( + in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False)), + ('bn1', nn.BatchNorm2d(inplanes)), + ('relu1', nn.ReLU(inplace=True)), + ] + self.layer0 = nn.Sequential(OrderedDict(layer0_modules)) + # To preserve compatibility with Caffe weights `ceil_mode=True` is used instead of `padding=1`. + self.pool0 = nn.MaxPool2d(3, stride=2, ceil_mode=True) + self.feature_info = [dict(num_chs=inplanes, reduction=2, module='layer0')] + self.layer1 = self._make_layer( + block, + planes=64, + blocks=layers[0], + groups=groups, + reduction=reduction, + downsample_kernel_size=1, + downsample_padding=0 + ) + self.feature_info += [dict(num_chs=64 * block.expansion, reduction=4, module='layer1')] + self.layer2 = self._make_layer( + block, + planes=128, + blocks=layers[1], + stride=2, + groups=groups, + reduction=reduction, + downsample_kernel_size=downsample_kernel_size, + downsample_padding=downsample_padding + ) + self.feature_info += [dict(num_chs=128 * block.expansion, reduction=8, module='layer2')] + self.layer3 = self._make_layer( + block, + planes=256, + blocks=layers[2], + stride=2, + groups=groups, + reduction=reduction, + downsample_kernel_size=downsample_kernel_size, + downsample_padding=downsample_padding + ) + self.feature_info += [dict(num_chs=256 * block.expansion, reduction=16, module='layer3')] + self.layer4 = self._make_layer( + block, + planes=512, + blocks=layers[3], + stride=2, + groups=groups, + reduction=reduction, + downsample_kernel_size=downsample_kernel_size, + downsample_padding=downsample_padding + ) + self.feature_info += [dict(num_chs=512 * block.expansion, reduction=32, module='layer4')] + self.num_features = 512 * block.expansion + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + for m in self.modules(): + _weight_init(m) + + def _make_layer(self, block, planes, blocks, groups, reduction, stride=1, + downsample_kernel_size=1, downsample_padding=0): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d( + self.inplanes, planes * block.expansion, kernel_size=downsample_kernel_size, + stride=stride, padding=downsample_padding, bias=False), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [block(self.inplanes, planes, groups, reduction, stride, downsample)] + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes, groups, reduction)) + + return nn.Sequential(*layers) + + def get_classifier(self): + return self.last_linear + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.last_linear = create_classifier( + self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.layer0(x) + x = self.pool0(x) + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + return x + + def logits(self, x): + x = self.global_pool(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + x = self.last_linear(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.logits(x) + return x + + +def _create_senet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + SENet, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def legacy_seresnet18(pretrained=False, **kwargs): + model_args = dict( + block=SEResNetBlock, layers=[2, 2, 2, 2], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet18', pretrained, **model_args) + + +@register_model +def legacy_seresnet34(pretrained=False, **kwargs): + model_args = dict( + block=SEResNetBlock, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet34', pretrained, **model_args) + + +@register_model +def legacy_seresnet50(pretrained=False, **kwargs): + model_args = dict( + block=SEResNetBottleneck, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet50', pretrained, **model_args) + + +@register_model +def legacy_seresnet101(pretrained=False, **kwargs): + model_args = dict( + block=SEResNetBottleneck, layers=[3, 4, 23, 3], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet101', pretrained, **model_args) + + +@register_model +def legacy_seresnet152(pretrained=False, **kwargs): + model_args = dict( + block=SEResNetBottleneck, layers=[3, 8, 36, 3], groups=1, reduction=16, **kwargs) + return _create_senet('legacy_seresnet152', pretrained, **model_args) + + +@register_model +def legacy_senet154(pretrained=False, **kwargs): + model_args = dict( + block=SEBottleneck, layers=[3, 8, 36, 3], groups=64, reduction=16, + downsample_kernel_size=3, downsample_padding=1, inplanes=128, input_3x3=True, **kwargs) + return _create_senet('legacy_senet154', pretrained, **model_args) + + +@register_model +def legacy_seresnext26_32x4d(pretrained=False, **kwargs): + model_args = dict( + block=SEResNeXtBottleneck, layers=[2, 2, 2, 2], groups=32, reduction=16, **kwargs) + return _create_senet('legacy_seresnext26_32x4d', pretrained, **model_args) + + +@register_model +def legacy_seresnext50_32x4d(pretrained=False, **kwargs): + model_args = dict( + block=SEResNeXtBottleneck, layers=[3, 4, 6, 3], groups=32, reduction=16, **kwargs) + return _create_senet('legacy_seresnext50_32x4d', pretrained, **model_args) + + +@register_model +def legacy_seresnext101_32x4d(pretrained=False, **kwargs): + model_args = dict( + block=SEResNeXtBottleneck, layers=[3, 4, 23, 3], groups=32, reduction=16, **kwargs) + return _create_senet('legacy_seresnext101_32x4d', pretrained, **model_args) diff --git a/timm/models/sknet.py b/timm/models/sknet.py new file mode 100644 index 0000000..4dc2aa5 --- /dev/null +++ b/timm/models/sknet.py @@ -0,0 +1,215 @@ +""" Selective Kernel Networks (ResNet base) + +Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586) + +This was inspired by reading 'Compounding the Performance Improvements...' (https://arxiv.org/abs/2001.06268) +and a streamlined impl at https://github.com/clovaai/assembled-cnn but I ended up building something closer +to the original paper with some modifications of my own to better balance param count vs accuracy. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math + +from torch import nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .layers import SelectiveKernel, ConvBnAct, create_attn +from .registry import register_model +from .resnet import ResNet + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv1', 'classifier': 'fc', + **kwargs + } + + +default_cfgs = { + 'skresnet18': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet18_ra-4eec2804.pth'), + 'skresnet34': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet34_ra-bdc0ccde.pth'), + 'skresnet50': _cfg(), + 'skresnet50d': _cfg( + first_conv='conv1.0'), + 'skresnext50_32x4d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnext50_ra-f40e40bf.pth'), +} + + +class SelectiveKernelBasic(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, + sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): + super(SelectiveKernelBasic, self).__init__() + + sk_kwargs = sk_kwargs or {} + conv_kwargs = dict(drop_block=drop_block, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer) + assert cardinality == 1, 'BasicBlock only supports cardinality of 1' + assert base_width == 64, 'BasicBlock doest not support changing base width' + first_planes = planes // reduce_first + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + + self.conv1 = SelectiveKernel( + inplanes, first_planes, stride=stride, dilation=first_dilation, **conv_kwargs, **sk_kwargs) + conv_kwargs['act_layer'] = None + self.conv2 = ConvBnAct( + first_planes, outplanes, kernel_size=3, dilation=dilation, **conv_kwargs) + self.se = create_attn(attn_layer, outplanes) + self.act = act_layer(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.drop_block = drop_block + self.drop_path = drop_path + + def zero_init_last_bn(self): + nn.init.zeros_(self.conv2.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + if self.se is not None: + x = self.se(x) + if self.drop_path is not None: + x = self.drop_path(x) + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act(x) + return x + + +class SelectiveKernelBottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, + cardinality=1, base_width=64, sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, + drop_block=None, drop_path=None): + super(SelectiveKernelBottleneck, self).__init__() + + sk_kwargs = sk_kwargs or {} + conv_kwargs = dict(drop_block=drop_block, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer) + width = int(math.floor(planes * (base_width / 64)) * cardinality) + first_planes = width // reduce_first + outplanes = planes * self.expansion + first_dilation = first_dilation or dilation + + self.conv1 = ConvBnAct(inplanes, first_planes, kernel_size=1, **conv_kwargs) + self.conv2 = SelectiveKernel( + first_planes, width, stride=stride, dilation=first_dilation, groups=cardinality, + **conv_kwargs, **sk_kwargs) + conv_kwargs['act_layer'] = None + self.conv3 = ConvBnAct(width, outplanes, kernel_size=1, **conv_kwargs) + self.se = create_attn(attn_layer, outplanes) + self.act = act_layer(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.drop_block = drop_block + self.drop_path = drop_path + + def zero_init_last_bn(self): + nn.init.zeros_(self.conv3.bn.weight) + + def forward(self, x): + shortcut = x + x = self.conv1(x) + x = self.conv2(x) + x = self.conv3(x) + if self.se is not None: + x = self.se(x) + if self.drop_path is not None: + x = self.drop_path(x) + if self.downsample is not None: + shortcut = self.downsample(shortcut) + x += shortcut + x = self.act(x) + return x + + +def _create_skresnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + ResNet, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + + +@register_model +def skresnet18(pretrained=False, **kwargs): + """Constructs a Selective Kernel ResNet-18 model. + + Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this + variation splits the input channels to the selective convolutions to keep param count down. + """ + sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True) + model_args = dict( + block=SelectiveKernelBasic, layers=[2, 2, 2, 2], block_args=dict(sk_kwargs=sk_kwargs), + zero_init_last_bn=False, **kwargs) + return _create_skresnet('skresnet18', pretrained, **model_args) + + +@register_model +def skresnet34(pretrained=False, **kwargs): + """Constructs a Selective Kernel ResNet-34 model. + + Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this + variation splits the input channels to the selective convolutions to keep param count down. + """ + sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True) + model_args = dict( + block=SelectiveKernelBasic, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs), + zero_init_last_bn=False, **kwargs) + return _create_skresnet('skresnet34', pretrained, **model_args) + + +@register_model +def skresnet50(pretrained=False, **kwargs): + """Constructs a Select Kernel ResNet-50 model. + + Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this + variation splits the input channels to the selective convolutions to keep param count down. + """ + sk_kwargs = dict(split_input=True) + model_args = dict( + block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs), + zero_init_last_bn=False, **kwargs) + return _create_skresnet('skresnet50', pretrained, **model_args) + + +@register_model +def skresnet50d(pretrained=False, **kwargs): + """Constructs a Select Kernel ResNet-50-D model. + + Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this + variation splits the input channels to the selective convolutions to keep param count down. + """ + sk_kwargs = dict(split_input=True) + model_args = dict( + block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, + block_args=dict(sk_kwargs=sk_kwargs), zero_init_last_bn=False, **kwargs) + return _create_skresnet('skresnet50d', pretrained, **model_args) + + +@register_model +def skresnext50_32x4d(pretrained=False, **kwargs): + """Constructs a Select Kernel ResNeXt50-32x4d model. This should be equivalent to + the SKNet-50 model in the Select Kernel Paper + """ + sk_kwargs = dict(rd_ratio=1/16, rd_divisor=32, split_input=False) + model_args = dict( + block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, + block_args=dict(sk_kwargs=sk_kwargs), zero_init_last_bn=False, **kwargs) + return _create_skresnet('skresnext50_32x4d', pretrained, **model_args) + diff --git a/timm/models/swin_transformer.py b/timm/models/swin_transformer.py new file mode 100644 index 0000000..9205790 --- /dev/null +++ b/timm/models/swin_transformer.py @@ -0,0 +1,656 @@ +""" Swin Transformer +A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` + - https://arxiv.org/pdf/2103.14030 + +Code/weights from https://github.com/microsoft/Swin-Transformer, original copyright/license info below + +""" +# -------------------------------------------------------- +# Swin Transformer +# Copyright (c) 2021 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Ze Liu +# -------------------------------------------------------- +import logging +import math +from copy import deepcopy +from typing import Optional + +import torch +import torch.nn as nn +import torch.utils.checkpoint as checkpoint + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .fx_features import register_notrace_function +from .helpers import build_model_with_cfg, overlay_external_default_cfg +from .layers import PatchEmbed, Mlp, DropPath, to_2tuple, trunc_normal_ +from .layers import _assert +from .registry import register_model +from .vision_transformer import checkpoint_filter_fn, _init_vit_weights + + +_logger = logging.getLogger(__name__) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # patch models (my experiments) + 'swin_base_patch4_window12_384': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22kto1k.pth', + input_size=(3, 384, 384), crop_pct=1.0), + + 'swin_base_patch4_window7_224': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22kto1k.pth', + ), + + 'swin_large_patch4_window12_384': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22kto1k.pth', + input_size=(3, 384, 384), crop_pct=1.0), + + 'swin_large_patch4_window7_224': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22kto1k.pth', + ), + + 'swin_small_patch4_window7_224': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth', + ), + + 'swin_tiny_patch4_window7_224': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth', + ), + + 'swin_base_patch4_window12_384_in22k': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth', + input_size=(3, 384, 384), crop_pct=1.0, num_classes=21841), + + 'swin_base_patch4_window7_224_in22k': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth', + num_classes=21841), + + 'swin_large_patch4_window12_384_in22k': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth', + input_size=(3, 384, 384), crop_pct=1.0, num_classes=21841), + + 'swin_large_patch4_window7_224_in22k': _cfg( + url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth', + num_classes=21841), + +} + + +def window_partition(x, window_size: int): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows + + +@register_notrace_function # reason: int argument is a Proxy +def window_reverse(windows, window_size: int, H: int, W: int): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + window_size (int): Window size + H (int): Height of image + W (int): Width of image + + Returns: + x: (B, H, W, C) + """ + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class WindowAttention(nn.Module): + r""" Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + + Args: + dim (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + """ + + def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.): + + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", relative_position_index) + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + trunc_normal_(self.relative_position_bias_table, std=.02) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask: Optional[torch.Tensor] = None): + """ + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class SwinTransformerBlock(nn.Module): + r""" Swin Transformer Block. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resulotion. + num_heads (int): Number of attention heads. + window_size (int): Window size. + shift_size (int): Shift size for SW-MSA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Module, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0, + mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + if min(self.input_resolution) <= self.window_size: + # if window size is larger than input resolution, we don't partition windows + self.shift_size = 0 + self.window_size = min(self.input_resolution) + assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, qkv_bias=qkv_bias, + attn_drop=attn_drop, proj_drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + if self.shift_size > 0: + # calculate attention mask for SW-MSA + H, W = self.input_resolution + img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + else: + attn_mask = None + + self.register_buffer("attn_mask", attn_mask) + + def forward(self, x): + H, W = self.input_resolution + B, L, C = x.shape + _assert(L == H * W, "input feature has wrong size") + + shortcut = x + x = self.norm1(x) + x = x.view(B, H, W, C) + + # cyclic shift + if self.shift_size > 0: + shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + else: + shifted_x = x + + # partition windows + x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C + x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA + attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) + shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C + + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + else: + x = shifted_x + x = x.view(B, H * W, C) + + # FFN + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + return x + + +class PatchMerging(nn.Module): + r""" Patch Merging Layer. + + Args: + input_resolution (tuple[int]): Resolution of input feature. + dim (int): Number of input channels. + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) + self.norm = norm_layer(4 * dim) + + def forward(self, x): + """ + x: B, H*W, C + """ + H, W = self.input_resolution + B, L, C = x.shape + _assert(L == H * W, "input feature has wrong size") + _assert(H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even.") + + x = x.view(B, H, W, C) + + x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C + x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C + x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C + x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C + x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C + x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C + + x = self.norm(x) + x = self.reduction(x) + + return x + + def extra_repr(self) -> str: + return f"input_resolution={self.input_resolution}, dim={self.dim}" + + def flops(self): + H, W = self.input_resolution + flops = H * W * self.dim + flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim + return flops + + +class BasicLayer(nn.Module): + """ A basic Swin Transformer layer for one stage. + + Args: + dim (int): Number of input channels. + input_resolution (tuple[int]): Input resolution. + depth (int): Number of blocks. + num_heads (int): Number of attention heads. + window_size (int): Local window size. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__(self, dim, input_resolution, depth, num_heads, window_size, + mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., + drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False): + + super().__init__() + self.dim = dim + self.input_resolution = input_resolution + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList([ + SwinTransformerBlock( + dim=dim, input_resolution=input_resolution, num_heads=num_heads, window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2, mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer) + for i in range(depth)]) + + # patch merging layer + if downsample is not None: + self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer) + else: + self.downsample = None + + def forward(self, x): + for blk in self.blocks: + if not torch.jit.is_scripting() and self.use_checkpoint: + x = checkpoint.checkpoint(blk, x) + else: + x = blk(x) + if self.downsample is not None: + x = self.downsample(x) + return x + + def extra_repr(self) -> str: + return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}" + + +class SwinTransformer(nn.Module): + r""" Swin Transformer + A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - + https://arxiv.org/pdf/2103.14030 + + Args: + img_size (int | tuple(int)): Input image size. Default 224 + patch_size (int | tuple(int)): Patch size. Default: 4 + in_chans (int): Number of input image channels. Default: 3 + num_classes (int): Number of classes for classification head. Default: 1000 + embed_dim (int): Patch embedding dimension. Default: 96 + depths (tuple(int)): Depth of each Swin Transformer layer. + num_heads (tuple(int)): Number of attention heads in different layers. + window_size (int): Window size. Default: 7 + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 + qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True + drop_rate (float): Dropout rate. Default: 0 + attn_drop_rate (float): Attention dropout rate. Default: 0 + drop_path_rate (float): Stochastic depth rate. Default: 0.1 + norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. + ape (bool): If True, add absolute position embedding to the patch embedding. Default: False + patch_norm (bool): If True, add normalization after patch embedding. Default: True + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False + """ + + def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000, + embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), + window_size=7, mlp_ratio=4., qkv_bias=True, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, + norm_layer=nn.LayerNorm, ape=False, patch_norm=True, + use_checkpoint=False, weight_init='', **kwargs): + super().__init__() + + self.num_classes = num_classes + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.ape = ape + self.patch_norm = patch_norm + self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) + self.mlp_ratio = mlp_ratio + + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None) + num_patches = self.patch_embed.num_patches + self.patch_grid = self.patch_embed.grid_size + + # absolute position embedding + if self.ape: + self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + trunc_normal_(self.absolute_pos_embed, std=.02) + else: + self.absolute_pos_embed = None + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule + + # build layers + layers = [] + for i_layer in range(self.num_layers): + layers += [BasicLayer( + dim=int(embed_dim * 2 ** i_layer), + input_resolution=(self.patch_grid[0] // (2 ** i_layer), self.patch_grid[1] // (2 ** i_layer)), + depth=depths[i_layer], + num_heads=num_heads[i_layer], + window_size=window_size, + mlp_ratio=self.mlp_ratio, + qkv_bias=qkv_bias, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], + norm_layer=norm_layer, + downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, + use_checkpoint=use_checkpoint) + ] + self.layers = nn.Sequential(*layers) + + self.norm = norm_layer(self.num_features) + self.avgpool = nn.AdaptiveAvgPool1d(1) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + assert weight_init in ('jax', 'jax_nlhb', 'nlhb', '') + head_bias = -math.log(self.num_classes) if 'nlhb' in weight_init else 0. + if weight_init.startswith('jax'): + for n, m in self.named_modules(): + _init_vit_weights(m, n, head_bias=head_bias, jax_impl=True) + else: + self.apply(_init_vit_weights) + + @torch.jit.ignore + def no_weight_decay(self): + return {'absolute_pos_embed'} + + @torch.jit.ignore + def no_weight_decay_keywords(self): + return {'relative_position_bias_table'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.patch_embed(x) + if self.absolute_pos_embed is not None: + x = x + self.absolute_pos_embed + x = self.pos_drop(x) + x = self.layers(x) + x = self.norm(x) # B L C + x = self.avgpool(x.transpose(1, 2)) # B C 1 + x = torch.flatten(x, 1) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_swin_transformer(variant, pretrained=False, default_cfg=None, **kwargs): + if default_cfg is None: + default_cfg = deepcopy(default_cfgs[variant]) + overlay_external_default_cfg(default_cfg, kwargs) + default_num_classes = default_cfg['num_classes'] + default_img_size = default_cfg['input_size'][-2:] + + num_classes = kwargs.pop('num_classes', default_num_classes) + img_size = kwargs.pop('img_size', default_img_size) + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + SwinTransformer, variant, pretrained, + default_cfg=default_cfg, + img_size=img_size, + num_classes=num_classes, + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + + return model + + + +@register_model +def swin_base_patch4_window12_384(pretrained=False, **kwargs): + """ Swin-B @ 384x384, pretrained ImageNet-22k, fine tune 1k + """ + model_kwargs = dict( + patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs) + return _create_swin_transformer('swin_base_patch4_window12_384', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_base_patch4_window7_224(pretrained=False, **kwargs): + """ Swin-B @ 224x224, pretrained ImageNet-22k, fine tune 1k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs) + return _create_swin_transformer('swin_base_patch4_window7_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_large_patch4_window12_384(pretrained=False, **kwargs): + """ Swin-L @ 384x384, pretrained ImageNet-22k, fine tune 1k + """ + model_kwargs = dict( + patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs) + return _create_swin_transformer('swin_large_patch4_window12_384', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_large_patch4_window7_224(pretrained=False, **kwargs): + """ Swin-L @ 224x224, pretrained ImageNet-22k, fine tune 1k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs) + return _create_swin_transformer('swin_large_patch4_window7_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_small_patch4_window7_224(pretrained=False, **kwargs): + """ Swin-S @ 224x224, trained ImageNet-1k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), **kwargs) + return _create_swin_transformer('swin_small_patch4_window7_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_tiny_patch4_window7_224(pretrained=False, **kwargs): + """ Swin-T @ 224x224, trained ImageNet-1k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), **kwargs) + return _create_swin_transformer('swin_tiny_patch4_window7_224', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_base_patch4_window12_384_in22k(pretrained=False, **kwargs): + """ Swin-B @ 384x384, trained ImageNet-22k + """ + model_kwargs = dict( + patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs) + return _create_swin_transformer('swin_base_patch4_window12_384_in22k', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_base_patch4_window7_224_in22k(pretrained=False, **kwargs): + """ Swin-B @ 224x224, trained ImageNet-22k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs) + return _create_swin_transformer('swin_base_patch4_window7_224_in22k', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_large_patch4_window12_384_in22k(pretrained=False, **kwargs): + """ Swin-L @ 384x384, trained ImageNet-22k + """ + model_kwargs = dict( + patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs) + return _create_swin_transformer('swin_large_patch4_window12_384_in22k', pretrained=pretrained, **model_kwargs) + + +@register_model +def swin_large_patch4_window7_224_in22k(pretrained=False, **kwargs): + """ Swin-L @ 224x224, trained ImageNet-22k + """ + model_kwargs = dict( + patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs) + return _create_swin_transformer('swin_large_patch4_window7_224_in22k', pretrained=pretrained, **model_kwargs) diff --git a/timm/models/tnt.py b/timm/models/tnt.py new file mode 100644 index 0000000..d52f9ce --- /dev/null +++ b/timm/models/tnt.py @@ -0,0 +1,272 @@ +""" Transformer in Transformer (TNT) in PyTorch + +A PyTorch implement of TNT as described in +'Transformer in Transformer' - https://arxiv.org/abs/2103.00112 + +The official mindspore code is released and available at +https://gitee.com/mindspore/mindspore/tree/master/model_zoo/research/cv/TNT +""" +import math +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.models.helpers import build_model_with_cfg +from timm.models.layers import Mlp, DropPath, trunc_normal_ +from timm.models.layers.helpers import to_2tuple +from timm.models.layers import _assert +from timm.models.registry import register_model +from timm.models.vision_transformer import resize_pos_embed + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'pixel_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + 'tnt_s_patch16_224': _cfg( + url='https://github.com/contrastive/pytorch-image-models/releases/download/TNT/tnt_s_patch16_224.pth.tar', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + ), + 'tnt_b_patch16_224': _cfg( + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + ), +} + + +class Attention(nn.Module): + """ Multi-Head Attention + """ + def __init__(self, dim, hidden_dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.hidden_dim = hidden_dim + self.num_heads = num_heads + head_dim = hidden_dim // num_heads + self.head_dim = head_dim + self.scale = head_dim ** -0.5 + + self.qk = nn.Linear(dim, hidden_dim * 2, bias=qkv_bias) + self.v = nn.Linear(dim, dim, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop, inplace=True) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop, inplace=True) + + def forward(self, x): + B, N, C = x.shape + qk = self.qk(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k = qk.unbind(0) # make torchscript happy (cannot use tensor as tuple) + v = self.v(x).reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + """ TNT Block + """ + def __init__(self, dim, in_dim, num_pixel, num_heads=12, in_num_head=4, mlp_ratio=4., + qkv_bias=False, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + # Inner transformer + self.norm_in = norm_layer(in_dim) + self.attn_in = Attention( + in_dim, in_dim, num_heads=in_num_head, qkv_bias=qkv_bias, + attn_drop=attn_drop, proj_drop=drop) + + self.norm_mlp_in = norm_layer(in_dim) + self.mlp_in = Mlp(in_features=in_dim, hidden_features=int(in_dim * 4), + out_features=in_dim, act_layer=act_layer, drop=drop) + + self.norm1_proj = norm_layer(in_dim) + self.proj = nn.Linear(in_dim * num_pixel, dim, bias=True) + # Outer transformer + self.norm_out = norm_layer(dim) + self.attn_out = Attention( + dim, dim, num_heads=num_heads, qkv_bias=qkv_bias, + attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm_mlp = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), + out_features=dim, act_layer=act_layer, drop=drop) + + def forward(self, pixel_embed, patch_embed): + # inner + pixel_embed = pixel_embed + self.drop_path(self.attn_in(self.norm_in(pixel_embed))) + pixel_embed = pixel_embed + self.drop_path(self.mlp_in(self.norm_mlp_in(pixel_embed))) + # outer + B, N, C = patch_embed.size() + patch_embed = torch.cat( + [patch_embed[:, 0:1], patch_embed[:, 1:] + self.proj(self.norm1_proj(pixel_embed).reshape(B, N - 1, -1))], + dim=1) + patch_embed = patch_embed + self.drop_path(self.attn_out(self.norm_out(patch_embed))) + patch_embed = patch_embed + self.drop_path(self.mlp(self.norm_mlp(patch_embed))) + return pixel_embed, patch_embed + + +class PixelEmbed(nn.Module): + """ Image to Pixel Embedding + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, in_dim=48, stride=4): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + # grid_size property necessary for resizing positional embedding + self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) + num_patches = (self.grid_size[0]) * (self.grid_size[1]) + self.img_size = img_size + self.num_patches = num_patches + self.in_dim = in_dim + new_patch_size = [math.ceil(ps / stride) for ps in patch_size] + self.new_patch_size = new_patch_size + + self.proj = nn.Conv2d(in_chans, self.in_dim, kernel_size=7, padding=3, stride=stride) + self.unfold = nn.Unfold(kernel_size=new_patch_size, stride=new_patch_size) + + def forward(self, x, pixel_pos): + B, C, H, W = x.shape + _assert(H == self.img_size[0], + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") + _assert(W == self.img_size[1], + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") + x = self.proj(x) + x = self.unfold(x) + x = x.transpose(1, 2).reshape(B * self.num_patches, self.in_dim, self.new_patch_size[0], self.new_patch_size[1]) + x = x + pixel_pos + x = x.reshape(B * self.num_patches, self.in_dim, -1).transpose(1, 2) + return x + + +class TNT(nn.Module): + """ Transformer in Transformer - https://arxiv.org/abs/2103.00112 + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, in_dim=48, depth=12, + num_heads=12, in_num_head=4, mlp_ratio=4., qkv_bias=False, drop_rate=0., attn_drop_rate=0., + drop_path_rate=0., norm_layer=nn.LayerNorm, first_stride=4): + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + + self.pixel_embed = PixelEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, in_dim=in_dim, stride=first_stride) + num_patches = self.pixel_embed.num_patches + self.num_patches = num_patches + new_patch_size = self.pixel_embed.new_patch_size + num_pixel = new_patch_size[0] * new_patch_size[1] + + self.norm1_proj = norm_layer(num_pixel * in_dim) + self.proj = nn.Linear(num_pixel * in_dim, embed_dim) + self.norm2_proj = norm_layer(embed_dim) + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.patch_pos = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + self.pixel_pos = nn.Parameter(torch.zeros(1, in_dim, new_patch_size[0], new_patch_size[1])) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + blocks = [] + for i in range(depth): + blocks.append(Block( + dim=embed_dim, in_dim=in_dim, num_pixel=num_pixel, num_heads=num_heads, in_num_head=in_num_head, + mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, + drop_path=dpr[i], norm_layer=norm_layer)) + self.blocks = nn.ModuleList(blocks) + self.norm = norm_layer(embed_dim) + + self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + trunc_normal_(self.cls_token, std=.02) + trunc_normal_(self.patch_pos, std=.02) + trunc_normal_(self.pixel_pos, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'patch_pos', 'pixel_pos', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + B = x.shape[0] + pixel_embed = self.pixel_embed(x, self.pixel_pos) + + patch_embed = self.norm2_proj(self.proj(self.norm1_proj(pixel_embed.reshape(B, self.num_patches, -1)))) + patch_embed = torch.cat((self.cls_token.expand(B, -1, -1), patch_embed), dim=1) + patch_embed = patch_embed + self.patch_pos + patch_embed = self.pos_drop(patch_embed) + + for blk in self.blocks: + pixel_embed, patch_embed = blk(pixel_embed, patch_embed) + + patch_embed = self.norm(patch_embed) + return patch_embed[:, 0] + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + if state_dict['patch_pos'].shape != model.patch_pos.shape: + state_dict['patch_pos'] = resize_pos_embed(state_dict['patch_pos'], + model.patch_pos, getattr(model, 'num_tokens', 1), model.pixel_embed.grid_size) + return state_dict + + +def _create_tnt(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + TNT, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def tnt_s_patch16_224(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=16, embed_dim=384, in_dim=24, depth=12, num_heads=6, in_num_head=4, + qkv_bias=False, **kwargs) + model = _create_tnt('tnt_s_patch16_224', pretrained=pretrained, **model_cfg) + return model + + +@register_model +def tnt_b_patch16_224(pretrained=False, **kwargs): + model_cfg = dict( + patch_size=16, embed_dim=640, in_dim=40, depth=12, num_heads=10, in_num_head=4, + qkv_bias=False, **kwargs) + model = _create_tnt('tnt_b_patch16_224', pretrained=pretrained, **model_cfg) + return model diff --git a/timm/models/tresnet.py b/timm/models/tresnet.py new file mode 100644 index 0000000..372bfb7 --- /dev/null +++ b/timm/models/tresnet.py @@ -0,0 +1,297 @@ +""" +TResNet: High Performance GPU-Dedicated Architecture +https://arxiv.org/pdf/2003.13630.pdf + +Original model: https://github.com/mrT23/TResNet + +""" +from collections import OrderedDict + +import torch +import torch.nn as nn + +from .helpers import build_model_with_cfg +from .layers import SpaceToDepthModule, BlurPool2d, InplaceAbn, ClassifierHead, SEModule +from .registry import register_model + +__all__ = ['tresnet_m', 'tresnet_l', 'tresnet_xl'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': (0, 0, 0), 'std': (1, 1, 1), + 'first_conv': 'body.conv1.0', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = { + 'tresnet_m': _cfg( + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/tresnet_m_1k_miil_83_1.pth'), + 'tresnet_m_miil_in21k': _cfg( + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/tresnet_m_miil_in21k.pth', num_classes=11221), + 'tresnet_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_81_5-235b486c.pth'), + 'tresnet_xl': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_xl_82_0-a2d51b00.pth'), + 'tresnet_m_448': _cfg( + input_size=(3, 448, 448), pool_size=(14, 14), + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_m_448-bc359d10.pth'), + 'tresnet_l_448': _cfg( + input_size=(3, 448, 448), pool_size=(14, 14), + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_448-940d0cd1.pth'), + 'tresnet_xl_448': _cfg( + input_size=(3, 448, 448), pool_size=(14, 14), + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_xl_448-8c1815de.pth') +} + + +def IABN2Float(module: nn.Module) -> nn.Module: + """If `module` is IABN don't use half precision.""" + if isinstance(module, InplaceAbn): + module.float() + for child in module.children(): + IABN2Float(child) + return module + + +def conv2d_iabn(ni, nf, stride, kernel_size=3, groups=1, act_layer="leaky_relu", act_param=1e-2): + return nn.Sequential( + nn.Conv2d( + ni, nf, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, groups=groups, bias=False), + InplaceAbn(nf, act_layer=act_layer, act_param=act_param) + ) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True, aa_layer=None): + super(BasicBlock, self).__init__() + if stride == 1: + self.conv1 = conv2d_iabn(inplanes, planes, stride=1, act_param=1e-3) + else: + if aa_layer is None: + self.conv1 = conv2d_iabn(inplanes, planes, stride=2, act_param=1e-3) + else: + self.conv1 = nn.Sequential( + conv2d_iabn(inplanes, planes, stride=1, act_param=1e-3), + aa_layer(channels=planes, filt_size=3, stride=2)) + + self.conv2 = conv2d_iabn(planes, planes, stride=1, act_layer="identity") + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + rd_chs = max(planes * self.expansion // 4, 64) + self.se = SEModule(planes * self.expansion, rd_channels=rd_chs) if use_se else None + + def forward(self, x): + if self.downsample is not None: + shortcut = self.downsample(x) + else: + shortcut = x + + out = self.conv1(x) + out = self.conv2(out) + + if self.se is not None: + out = self.se(out) + + out += shortcut + out = self.relu(out) + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True, + act_layer="leaky_relu", aa_layer=None): + super(Bottleneck, self).__init__() + self.conv1 = conv2d_iabn( + inplanes, planes, kernel_size=1, stride=1, act_layer=act_layer, act_param=1e-3) + if stride == 1: + self.conv2 = conv2d_iabn( + planes, planes, kernel_size=3, stride=1, act_layer=act_layer, act_param=1e-3) + else: + if aa_layer is None: + self.conv2 = conv2d_iabn( + planes, planes, kernel_size=3, stride=2, act_layer=act_layer, act_param=1e-3) + else: + self.conv2 = nn.Sequential( + conv2d_iabn(planes, planes, kernel_size=3, stride=1, act_layer=act_layer, act_param=1e-3), + aa_layer(channels=planes, filt_size=3, stride=2)) + + reduction_chs = max(planes * self.expansion // 8, 64) + self.se = SEModule(planes, rd_channels=reduction_chs) if use_se else None + + self.conv3 = conv2d_iabn( + planes, planes * self.expansion, kernel_size=1, stride=1, act_layer="identity") + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + if self.downsample is not None: + shortcut = self.downsample(x) + else: + shortcut = x + + out = self.conv1(x) + out = self.conv2(out) + if self.se is not None: + out = self.se(out) + + out = self.conv3(out) + out = out + shortcut # no inplace + out = self.relu(out) + + return out + + +class TResNet(nn.Module): + def __init__(self, layers, in_chans=3, num_classes=1000, width_factor=1.0, global_pool='fast', drop_rate=0.): + self.num_classes = num_classes + self.drop_rate = drop_rate + super(TResNet, self).__init__() + + aa_layer = BlurPool2d + + # TResnet stages + self.inplanes = int(64 * width_factor) + self.planes = int(64 * width_factor) + conv1 = conv2d_iabn(in_chans * 16, self.planes, stride=1, kernel_size=3) + layer1 = self._make_layer( + BasicBlock, self.planes, layers[0], stride=1, use_se=True, aa_layer=aa_layer) # 56x56 + layer2 = self._make_layer( + BasicBlock, self.planes * 2, layers[1], stride=2, use_se=True, aa_layer=aa_layer) # 28x28 + layer3 = self._make_layer( + Bottleneck, self.planes * 4, layers[2], stride=2, use_se=True, aa_layer=aa_layer) # 14x14 + layer4 = self._make_layer( + Bottleneck, self.planes * 8, layers[3], stride=2, use_se=False, aa_layer=aa_layer) # 7x7 + + # body + self.body = nn.Sequential(OrderedDict([ + ('SpaceToDepth', SpaceToDepthModule()), + ('conv1', conv1), + ('layer1', layer1), + ('layer2', layer2), + ('layer3', layer3), + ('layer4', layer4)])) + + self.feature_info = [ + dict(num_chs=self.planes, reduction=2, module=''), # Not with S2D? + dict(num_chs=self.planes, reduction=4, module='body.layer1'), + dict(num_chs=self.planes * 2, reduction=8, module='body.layer2'), + dict(num_chs=self.planes * 4 * Bottleneck.expansion, reduction=16, module='body.layer3'), + dict(num_chs=self.planes * 8 * Bottleneck.expansion, reduction=32, module='body.layer4'), + ] + + # head + self.num_features = (self.planes * 8) * Bottleneck.expansion + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) + + # model initilization + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu') + elif isinstance(m, nn.BatchNorm2d) or isinstance(m, InplaceAbn): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # residual connections special initialization + for m in self.modules(): + if isinstance(m, BasicBlock): + m.conv2[1].weight = nn.Parameter(torch.zeros_like(m.conv2[1].weight)) # BN to zero + if isinstance(m, Bottleneck): + m.conv3[1].weight = nn.Parameter(torch.zeros_like(m.conv3[1].weight)) # BN to zero + if isinstance(m, nn.Linear): + m.weight.data.normal_(0, 0.01) + + def _make_layer(self, block, planes, blocks, stride=1, use_se=True, aa_layer=None): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + layers = [] + if stride == 2: + # avg pooling before 1x1 conv + layers.append(nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True, count_include_pad=False)) + layers += [conv2d_iabn( + self.inplanes, planes * block.expansion, kernel_size=1, stride=1, act_layer="identity")] + downsample = nn.Sequential(*layers) + + layers = [] + layers.append(block( + self.inplanes, planes, stride, downsample, use_se=use_se, aa_layer=aa_layer)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append( + block(self.inplanes, planes, use_se=use_se, aa_layer=aa_layer)) + return nn.Sequential(*layers) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='fast'): + self.head = ClassifierHead( + self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + return self.body(x) + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_tresnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + TResNet, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(out_indices=(1, 2, 3, 4), flatten_sequential=True), + **kwargs) + + +@register_model +def tresnet_m(pretrained=False, **kwargs): + model_kwargs = dict(layers=[3, 4, 11, 3], **kwargs) + return _create_tresnet('tresnet_m', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_m_miil_in21k(pretrained=False, **kwargs): + model_kwargs = dict(layers=[3, 4, 11, 3], **kwargs) + return _create_tresnet('tresnet_m_miil_in21k', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_l(pretrained=False, **kwargs): + model_kwargs = dict(layers=[4, 5, 18, 3], width_factor=1.2, **kwargs) + return _create_tresnet('tresnet_l', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_xl(pretrained=False, **kwargs): + model_kwargs = dict(layers=[4, 5, 24, 3], width_factor=1.3, **kwargs) + return _create_tresnet('tresnet_xl', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_m_448(pretrained=False, **kwargs): + model_kwargs = dict(layers=[3, 4, 11, 3], **kwargs) + return _create_tresnet('tresnet_m_448', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_l_448(pretrained=False, **kwargs): + model_kwargs = dict(layers=[4, 5, 18, 3], width_factor=1.2, **kwargs) + return _create_tresnet('tresnet_l_448', pretrained=pretrained, **model_kwargs) + + +@register_model +def tresnet_xl_448(pretrained=False, **kwargs): + model_kwargs = dict(layers=[4, 5, 24, 3], width_factor=1.3, **kwargs) + return _create_tresnet('tresnet_xl_448', pretrained=pretrained, **model_kwargs) diff --git a/timm/models/twins.py b/timm/models/twins.py new file mode 100644 index 0000000..67a939d --- /dev/null +++ b/timm/models/twins.py @@ -0,0 +1,424 @@ +""" Twins +A PyTorch impl of : `Twins: Revisiting the Design of Spatial Attention in Vision Transformers` + - https://arxiv.org/pdf/2104.13840.pdf + +Code/weights from https://github.com/Meituan-AutoML/Twins, original copyright/license info below + +""" +# -------------------------------------------------------- +# Twins +# Copyright (c) 2021 Meituan +# Licensed under The Apache 2.0 License [see LICENSE for details] +# Written by Xinjie Li, Xiangxiang Chu +# -------------------------------------------------------- +import math +from copy import deepcopy +from typing import Optional, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F +from functools import partial + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .layers import Mlp, DropPath, to_2tuple, trunc_normal_ +from .fx_features import register_notrace_module +from .registry import register_model +from .vision_transformer import Attention +from .helpers import build_model_with_cfg + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embeds.0.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + 'twins_pcpvt_small': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_small-e70e7e7a.pth', + ), + 'twins_pcpvt_base': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_base-e5ecb09b.pth', + ), + 'twins_pcpvt_large': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_large-d273f802.pth', + ), + 'twins_svt_small': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_svt_small-42e5f78c.pth', + ), + 'twins_svt_base': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_svt_base-c2265010.pth', + ), + 'twins_svt_large': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_svt_large-90f6aaa9.pth', + ), +} + +Size_ = Tuple[int, int] + + +@register_notrace_module # reason: FX can't symbolically trace control flow in forward method +class LocallyGroupedAttn(nn.Module): + """ LSA: self attention within a group + """ + def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., ws=1): + assert ws != 1 + super(LocallyGroupedAttn, self).__init__() + assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." + + self.dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=True) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.ws = ws + + def forward(self, x, size: Size_): + # There are two implementations for this function, zero padding or mask. We don't observe obvious difference for + # both. You can choose any one, we recommend forward_padding because it's neat. However, + # the masking implementation is more reasonable and accurate. + B, N, C = x.shape + H, W = size + x = x.view(B, H, W, C) + pad_l = pad_t = 0 + pad_r = (self.ws - W % self.ws) % self.ws + pad_b = (self.ws - H % self.ws) % self.ws + x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + _, Hp, Wp, _ = x.shape + _h, _w = Hp // self.ws, Wp // self.ws + x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3) + qkv = self.qkv(x).reshape( + B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5) + q, k, v = qkv[0], qkv[1], qkv[2] + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + attn = (attn @ v).transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C) + x = attn.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C) + if pad_r > 0 or pad_b > 0: + x = x[:, :H, :W, :].contiguous() + x = x.reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + # def forward_mask(self, x, size: Size_): + # B, N, C = x.shape + # H, W = size + # x = x.view(B, H, W, C) + # pad_l = pad_t = 0 + # pad_r = (self.ws - W % self.ws) % self.ws + # pad_b = (self.ws - H % self.ws) % self.ws + # x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + # _, Hp, Wp, _ = x.shape + # _h, _w = Hp // self.ws, Wp // self.ws + # mask = torch.zeros((1, Hp, Wp), device=x.device) + # mask[:, -pad_b:, :].fill_(1) + # mask[:, :, -pad_r:].fill_(1) + # + # x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3) # B, _h, _w, ws, ws, C + # mask = mask.reshape(1, _h, self.ws, _w, self.ws).transpose(2, 3).reshape(1, _h * _w, self.ws * self.ws) + # attn_mask = mask.unsqueeze(2) - mask.unsqueeze(3) # 1, _h*_w, ws*ws, ws*ws + # attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-1000.0)).masked_fill(attn_mask == 0, float(0.0)) + # qkv = self.qkv(x).reshape( + # B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5) + # # n_h, B, _w*_h, nhead, ws*ws, dim + # q, k, v = qkv[0], qkv[1], qkv[2] # B, _h*_w, n_head, ws*ws, dim_head + # attn = (q @ k.transpose(-2, -1)) * self.scale # B, _h*_w, n_head, ws*ws, ws*ws + # attn = attn + attn_mask.unsqueeze(2) + # attn = attn.softmax(dim=-1) + # attn = self.attn_drop(attn) # attn @v -> B, _h*_w, n_head, ws*ws, dim_head + # attn = (attn @ v).transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C) + # x = attn.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C) + # if pad_r > 0 or pad_b > 0: + # x = x[:, :H, :W, :].contiguous() + # x = x.reshape(B, N, C) + # x = self.proj(x) + # x = self.proj_drop(x) + # return x + + +class GlobalSubSampleAttn(nn.Module): + """ GSA: using a key to summarize the information for a group to be efficient. + """ + def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., sr_ratio=1): + super().__init__() + assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." + + self.dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.q = nn.Linear(dim, dim, bias=True) + self.kv = nn.Linear(dim, dim * 2, bias=True) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + self.sr_ratio = sr_ratio + if sr_ratio > 1: + self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio) + self.norm = nn.LayerNorm(dim) + else: + self.sr = None + self.norm = None + + def forward(self, x, size: Size_): + B, N, C = x.shape + q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + + if self.sr is not None: + x = x.permute(0, 2, 1).reshape(B, C, *size) + x = self.sr(x).reshape(B, C, -1).permute(0, 2, 1) + x = self.norm(x) + kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + k, v = kv[0], kv[1] + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + + return x + + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1, ws=None): + super().__init__() + self.norm1 = norm_layer(dim) + if ws is None: + self.attn = Attention(dim, num_heads, False, None, attn_drop, drop) + elif ws == 1: + self.attn = GlobalSubSampleAttn(dim, num_heads, attn_drop, drop, sr_ratio) + else: + self.attn = LocallyGroupedAttn(dim, num_heads, attn_drop, drop, ws) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x, size: Size_): + x = x + self.drop_path(self.attn(self.norm1(x), size)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class PosConv(nn.Module): + # PEG from https://arxiv.org/abs/2102.10882 + def __init__(self, in_chans, embed_dim=768, stride=1): + super(PosConv, self).__init__() + self.proj = nn.Sequential(nn.Conv2d(in_chans, embed_dim, 3, stride, 1, bias=True, groups=embed_dim), ) + self.stride = stride + + def forward(self, x, size: Size_): + B, N, C = x.shape + cnn_feat_token = x.transpose(1, 2).view(B, C, *size) + x = self.proj(cnn_feat_token) + if self.stride == 1: + x += cnn_feat_token + x = x.flatten(2).transpose(1, 2) + return x + + def no_weight_decay(self): + return ['proj.%d.weight' % i for i in range(4)] + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + + self.img_size = img_size + self.patch_size = patch_size + assert img_size[0] % patch_size[0] == 0 and img_size[1] % patch_size[1] == 0, \ + f"img_size {img_size} should be divided by patch_size {patch_size}." + self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1] + self.num_patches = self.H * self.W + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + self.norm = nn.LayerNorm(embed_dim) + + def forward(self, x) -> Tuple[torch.Tensor, Size_]: + B, C, H, W = x.shape + + x = self.proj(x).flatten(2).transpose(1, 2) + x = self.norm(x) + out_size = (H // self.patch_size[0], W // self.patch_size[1]) + + return x, out_size + + +class Twins(nn.Module): + """ Twins Vision Transfomer (Revisiting Spatial Attention) + + Adapted from PVT (PyramidVisionTransformer) class at https://github.com/whai362/PVT.git + """ + def __init__( + self, img_size=224, patch_size=4, in_chans=3, num_classes=1000, embed_dims=(64, 128, 256, 512), + num_heads=(1, 2, 4, 8), mlp_ratios=(4, 4, 4, 4), drop_rate=0., attn_drop_rate=0., drop_path_rate=0., + norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=(3, 4, 6, 3), sr_ratios=(8, 4, 2, 1), wss=None, + block_cls=Block): + super().__init__() + self.num_classes = num_classes + self.depths = depths + self.embed_dims = embed_dims + self.num_features = embed_dims[-1] + + img_size = to_2tuple(img_size) + prev_chs = in_chans + self.patch_embeds = nn.ModuleList() + self.pos_drops = nn.ModuleList() + for i in range(len(depths)): + self.patch_embeds.append(PatchEmbed(img_size, patch_size, prev_chs, embed_dims[i])) + self.pos_drops.append(nn.Dropout(p=drop_rate)) + prev_chs = embed_dims[i] + img_size = tuple(t // patch_size for t in img_size) + patch_size = 2 + + self.blocks = nn.ModuleList() + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule + cur = 0 + for k in range(len(depths)): + _block = nn.ModuleList([block_cls( + dim=embed_dims[k], num_heads=num_heads[k], mlp_ratio=mlp_ratios[k], drop=drop_rate, + attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, sr_ratio=sr_ratios[k], + ws=1 if wss is None or i % 2 == 1 else wss[k]) for i in range(depths[k])]) + self.blocks.append(_block) + cur += depths[k] + + self.pos_block = nn.ModuleList([PosConv(embed_dim, embed_dim) for embed_dim in embed_dims]) + + self.norm = norm_layer(self.num_features) + + # classification head + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + # init weights + self.apply(self._init_weights) + + @torch.jit.ignore + def no_weight_decay(self): + return set(['pos_block.' + n for n, p in self.pos_block.named_parameters()]) + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1.0) + m.bias.data.zero_() + + def forward_features(self, x): + B = x.shape[0] + for i, (embed, drop, blocks, pos_blk) in enumerate( + zip(self.patch_embeds, self.pos_drops, self.blocks, self.pos_block)): + x, size = embed(x) + x = drop(x) + for j, blk in enumerate(blocks): + x = blk(x, size) + if j == 0: + x = pos_blk(x, size) # PEG here + if i < len(self.depths) - 1: + x = x.reshape(B, *size, -1).permute(0, 3, 1, 2).contiguous() + x = self.norm(x) + return x.mean(dim=1) # GAP here + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _create_twins(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + model = build_model_with_cfg( + Twins, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + return model + + +@register_model +def twins_pcpvt_small(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], + depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_pcpvt_small', pretrained=pretrained, **model_kwargs) + + +@register_model +def twins_pcpvt_base(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], + depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_pcpvt_base', pretrained=pretrained, **model_kwargs) + + +@register_model +def twins_pcpvt_large(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], + depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_pcpvt_large', pretrained=pretrained, **model_kwargs) + + +@register_model +def twins_svt_small(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[64, 128, 256, 512], num_heads=[2, 4, 8, 16], mlp_ratios=[4, 4, 4, 4], + depths=[2, 2, 10, 4], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_svt_small', pretrained=pretrained, **model_kwargs) + + +@register_model +def twins_svt_base(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[96, 192, 384, 768], num_heads=[3, 6, 12, 24], mlp_ratios=[4, 4, 4, 4], + depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_svt_base', pretrained=pretrained, **model_kwargs) + + +@register_model +def twins_svt_large(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=4, embed_dims=[128, 256, 512, 1024], num_heads=[4, 8, 16, 32], mlp_ratios=[4, 4, 4, 4], + depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1], **kwargs) + return _create_twins('twins_svt_large', pretrained=pretrained, **model_kwargs) diff --git a/timm/models/vgg.py b/timm/models/vgg.py new file mode 100644 index 0000000..11f6d0e --- /dev/null +++ b/timm/models/vgg.py @@ -0,0 +1,263 @@ +"""VGG + +Adapted from https://github.com/pytorch/vision 'vgg.py' (BSD-3-Clause) with a few changes for +timm functionality. + +Copyright 2021 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import Union, List, Dict, Any, cast + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .fx_features import register_notrace_module +from .layers import ClassifierHead +from .registry import register_model + +__all__ = [ + 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', + 'vgg19_bn', 'vgg19', +] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (1, 1), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'features.0', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = { + 'vgg11': _cfg(url='https://download.pytorch.org/models/vgg11-bbd30ac9.pth'), + 'vgg13': _cfg(url='https://download.pytorch.org/models/vgg13-c768596a.pth'), + 'vgg16': _cfg(url='https://download.pytorch.org/models/vgg16-397923af.pth'), + 'vgg19': _cfg(url='https://download.pytorch.org/models/vgg19-dcbb9e9d.pth'), + 'vgg11_bn': _cfg(url='https://download.pytorch.org/models/vgg11_bn-6002323d.pth'), + 'vgg13_bn': _cfg(url='https://download.pytorch.org/models/vgg13_bn-abd245e5.pth'), + 'vgg16_bn': _cfg(url='https://download.pytorch.org/models/vgg16_bn-6c64b313.pth'), + 'vgg19_bn': _cfg(url='https://download.pytorch.org/models/vgg19_bn-c79401a0.pth'), +} + + +cfgs: Dict[str, List[Union[str, int]]] = { + 'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], + 'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], +} + + +@register_notrace_module # reason: FX can't symbolically trace control flow in forward method +class ConvMlp(nn.Module): + + def __init__(self, in_features=512, out_features=4096, kernel_size=7, mlp_ratio=1.0, + drop_rate: float = 0.2, act_layer: nn.Module = None, conv_layer: nn.Module = None): + super(ConvMlp, self).__init__() + self.input_kernel_size = kernel_size + mid_features = int(out_features * mlp_ratio) + self.fc1 = conv_layer(in_features, mid_features, kernel_size, bias=True) + self.act1 = act_layer(True) + self.drop = nn.Dropout(drop_rate) + self.fc2 = conv_layer(mid_features, out_features, 1, bias=True) + self.act2 = act_layer(True) + + def forward(self, x): + if x.shape[-2] < self.input_kernel_size or x.shape[-1] < self.input_kernel_size: + # keep the input size >= 7x7 + output_size = (max(self.input_kernel_size, x.shape[-2]), max(self.input_kernel_size, x.shape[-1])) + x = F.adaptive_avg_pool2d(x, output_size) + x = self.fc1(x) + x = self.act1(x) + x = self.drop(x) + x = self.fc2(x) + x = self.act2(x) + return x + + +class VGG(nn.Module): + + def __init__( + self, + cfg: List[Any], + num_classes: int = 1000, + in_chans: int = 3, + output_stride: int = 32, + mlp_ratio: float = 1.0, + act_layer: nn.Module = nn.ReLU, + conv_layer: nn.Module = nn.Conv2d, + norm_layer: nn.Module = None, + global_pool: str = 'avg', + drop_rate: float = 0., + ) -> None: + super(VGG, self).__init__() + assert output_stride == 32 + self.num_classes = num_classes + self.num_features = 4096 + self.drop_rate = drop_rate + self.feature_info = [] + prev_chs = in_chans + net_stride = 1 + pool_layer = nn.MaxPool2d + layers: List[nn.Module] = [] + for v in cfg: + last_idx = len(layers) - 1 + if v == 'M': + self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'features.{last_idx}')) + layers += [pool_layer(kernel_size=2, stride=2)] + net_stride *= 2 + else: + v = cast(int, v) + conv2d = conv_layer(prev_chs, v, kernel_size=3, padding=1) + if norm_layer is not None: + layers += [conv2d, norm_layer(v), act_layer(inplace=True)] + else: + layers += [conv2d, act_layer(inplace=True)] + prev_chs = v + self.features = nn.Sequential(*layers) + self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'features.{len(layers) - 1}')) + self.pre_logits = ConvMlp( + prev_chs, self.num_features, 7, mlp_ratio=mlp_ratio, + drop_rate=drop_rate, act_layer=act_layer, conv_layer=conv_layer) + self.head = ClassifierHead( + self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) + + self._initialize_weights() + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.head = ClassifierHead( + self.num_features, self.num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x: torch.Tensor) -> torch.Tensor: + x = self.features(x) + x = self.pre_logits(x) + return x + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.forward_features(x) + x = self.head(x) + return x + + def _initialize_weights(self) -> None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + nn.init.constant_(m.bias, 0) + + +def _filter_fn(state_dict): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + out_dict = {} + for k, v in state_dict.items(): + k_r = k + k_r = k_r.replace('classifier.0', 'pre_logits.fc1') + k_r = k_r.replace('classifier.3', 'pre_logits.fc2') + k_r = k_r.replace('classifier.6', 'head.fc') + if 'classifier.0.weight' in k: + v = v.reshape(-1, 512, 7, 7) + if 'classifier.3.weight' in k: + v = v.reshape(-1, 4096, 1, 1) + out_dict[k_r] = v + return out_dict + + +def _create_vgg(variant: str, pretrained: bool, **kwargs: Any) -> VGG: + cfg = variant.split('_')[0] + # NOTE: VGG is one of the only models with stride==1 features, so indices are offset from other models + out_indices = kwargs.get('out_indices', (0, 1, 2, 3, 4, 5)) + model = build_model_with_cfg( + VGG, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=cfgs[cfg], + feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), + pretrained_filter_fn=_filter_fn, + **kwargs) + return model + + +@register_model +def vgg11(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 11-layer model (configuration "A") from + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(**kwargs) + return _create_vgg('vgg11', pretrained=pretrained, **model_args) + + +@register_model +def vgg11_bn(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 11-layer model (configuration "A") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) + return _create_vgg('vgg11_bn', pretrained=pretrained, **model_args) + + +@register_model +def vgg13(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 13-layer model (configuration "B") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(**kwargs) + return _create_vgg('vgg13', pretrained=pretrained, **model_args) + + +@register_model +def vgg13_bn(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 13-layer model (configuration "B") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) + return _create_vgg('vgg13_bn', pretrained=pretrained, **model_args) + + +@register_model +def vgg16(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 16-layer model (configuration "D") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(**kwargs) + return _create_vgg('vgg16', pretrained=pretrained, **model_args) + + +@register_model +def vgg16_bn(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 16-layer model (configuration "D") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) + return _create_vgg('vgg16_bn', pretrained=pretrained, **model_args) + + +@register_model +def vgg19(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 19-layer model (configuration "E") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(**kwargs) + return _create_vgg('vgg19', pretrained=pretrained, **model_args) + + +@register_model +def vgg19_bn(pretrained: bool = False, **kwargs: Any) -> VGG: + r"""VGG 19-layer model (configuration 'E') with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `._ + """ + model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) + return _create_vgg('vgg19_bn', pretrained=pretrained, **model_args) \ No newline at end of file diff --git a/timm/models/visformer.py b/timm/models/visformer.py new file mode 100644 index 0000000..37284c9 --- /dev/null +++ b/timm/models/visformer.py @@ -0,0 +1,412 @@ +""" Visformer + +Paper: Visformer: The Vision-friendly Transformer - https://arxiv.org/abs/2104.12533 + +From original at https://github.com/danczs/Visformer + +""" +from copy import deepcopy + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg, overlay_external_default_cfg +from .layers import to_2tuple, trunc_normal_, DropPath, PatchEmbed, LayerNorm2d, create_classifier +from .registry import register_model + + +__all__ = ['Visformer'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.0', 'classifier': 'head', + **kwargs + } + + +default_cfgs = dict( + visformer_tiny=_cfg(), + visformer_small=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/visformer_small-839e1f5b.pth' + ), +) + + +class SpatialMlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, + act_layer=nn.GELU, drop=0., group=8, spatial_conv=False): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + drop_probs = to_2tuple(drop) + + self.in_features = in_features + self.out_features = out_features + self.spatial_conv = spatial_conv + if self.spatial_conv: + if group < 2: # net setting + hidden_features = in_features * 5 // 6 + else: + hidden_features = in_features * 2 + self.hidden_features = hidden_features + self.group = group + self.conv1 = nn.Conv2d(in_features, hidden_features, 1, stride=1, padding=0, bias=False) + self.act1 = act_layer() + self.drop1 = nn.Dropout(drop_probs[0]) + if self.spatial_conv: + self.conv2 = nn.Conv2d( + hidden_features, hidden_features, 3, stride=1, padding=1, groups=self.group, bias=False) + self.act2 = act_layer() + else: + self.conv2 = None + self.act2 = None + self.conv3 = nn.Conv2d(hidden_features, out_features, 1, stride=1, padding=0, bias=False) + self.drop3 = nn.Dropout(drop_probs[1]) + + def forward(self, x): + x = self.conv1(x) + x = self.act1(x) + x = self.drop1(x) + if self.conv2 is not None: + x = self.conv2(x) + x = self.act2(x) + x = self.conv3(x) + x = self.drop3(x) + return x + + +class Attention(nn.Module): + def __init__(self, dim, num_heads=8, head_dim_ratio=1., attn_drop=0., proj_drop=0.): + super().__init__() + self.dim = dim + self.num_heads = num_heads + head_dim = round(dim // num_heads * head_dim_ratio) + self.head_dim = head_dim + self.scale = head_dim ** -0.5 + self.qkv = nn.Conv2d(dim, head_dim * num_heads * 3, 1, stride=1, padding=0, bias=False) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Conv2d(self.head_dim * self.num_heads, dim, 1, stride=1, padding=0, bias=False) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, C, H, W = x.shape + x = self.qkv(x).reshape(B, 3, self.num_heads, self.head_dim, -1).permute(1, 0, 2, 4, 3) + q, k, v = x[0], x[1], x[2] + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = attn @ v + + x = x.permute(0, 1, 3, 2).reshape(B, -1, H, W) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + def __init__(self, dim, num_heads, head_dim_ratio=1., mlp_ratio=4., + drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=LayerNorm2d, + group=8, attn_disabled=False, spatial_conv=False): + super().__init__() + self.spatial_conv = spatial_conv + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + if attn_disabled: + self.norm1 = None + self.attn = None + else: + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, num_heads=num_heads, head_dim_ratio=head_dim_ratio, attn_drop=attn_drop, proj_drop=drop) + + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = SpatialMlp( + in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop, + group=group, spatial_conv=spatial_conv) # new setting + + def forward(self, x): + if self.attn is not None: + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class Visformer(nn.Module): + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, init_channels=32, embed_dim=384, + depth=12, num_heads=6, mlp_ratio=4., drop_rate=0., attn_drop_rate=0., drop_path_rate=0., + norm_layer=LayerNorm2d, attn_stage='111', pos_embed=True, spatial_conv='111', + vit_stem=False, group=8, global_pool='avg', conv_init=False, embed_norm=None): + super().__init__() + img_size = to_2tuple(img_size) + self.num_classes = num_classes + self.embed_dim = embed_dim + self.init_channels = init_channels + self.img_size = img_size + self.vit_stem = vit_stem + self.conv_init = conv_init + if isinstance(depth, (list, tuple)): + self.stage_num1, self.stage_num2, self.stage_num3 = depth + depth = sum(depth) + else: + self.stage_num1 = self.stage_num3 = depth // 3 + self.stage_num2 = depth - self.stage_num1 - self.stage_num3 + self.pos_embed = pos_embed + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] + + # stage 1 + if self.vit_stem: + self.stem = None + self.patch_embed1 = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, + embed_dim=embed_dim, norm_layer=embed_norm, flatten=False) + img_size = [x // patch_size for x in img_size] + else: + if self.init_channels is None: + self.stem = None + self.patch_embed1 = PatchEmbed( + img_size=img_size, patch_size=patch_size // 2, in_chans=in_chans, + embed_dim=embed_dim // 2, norm_layer=embed_norm, flatten=False) + img_size = [x // (patch_size // 2) for x in img_size] + else: + self.stem = nn.Sequential( + nn.Conv2d(in_chans, self.init_channels, 7, stride=2, padding=3, bias=False), + nn.BatchNorm2d(self.init_channels), + nn.ReLU(inplace=True) + ) + img_size = [x // 2 for x in img_size] + self.patch_embed1 = PatchEmbed( + img_size=img_size, patch_size=patch_size // 4, in_chans=self.init_channels, + embed_dim=embed_dim // 2, norm_layer=embed_norm, flatten=False) + img_size = [x // (patch_size // 4) for x in img_size] + + if self.pos_embed: + if self.vit_stem: + self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim, *img_size)) + else: + self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim//2, *img_size)) + self.pos_drop = nn.Dropout(p=drop_rate) + self.stage1 = nn.ModuleList([ + Block( + dim=embed_dim//2, num_heads=num_heads, head_dim_ratio=0.5, mlp_ratio=mlp_ratio, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + group=group, attn_disabled=(attn_stage[0] == '0'), spatial_conv=(spatial_conv[0] == '1') + ) + for i in range(self.stage_num1) + ]) + + # stage2 + if not self.vit_stem: + self.patch_embed2 = PatchEmbed( + img_size=img_size, patch_size=patch_size // 8, in_chans=embed_dim // 2, + embed_dim=embed_dim, norm_layer=embed_norm, flatten=False) + img_size = [x // (patch_size // 8) for x in img_size] + if self.pos_embed: + self.pos_embed2 = nn.Parameter(torch.zeros(1, embed_dim, *img_size)) + self.stage2 = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, head_dim_ratio=1.0, mlp_ratio=mlp_ratio, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + group=group, attn_disabled=(attn_stage[1] == '0'), spatial_conv=(spatial_conv[1] == '1') + ) + for i in range(self.stage_num1, self.stage_num1+self.stage_num2) + ]) + + # stage 3 + if not self.vit_stem: + self.patch_embed3 = PatchEmbed( + img_size=img_size, patch_size=patch_size // 8, in_chans=embed_dim, + embed_dim=embed_dim * 2, norm_layer=embed_norm, flatten=False) + img_size = [x // (patch_size // 8) for x in img_size] + if self.pos_embed: + self.pos_embed3 = nn.Parameter(torch.zeros(1, embed_dim*2, *img_size)) + self.stage3 = nn.ModuleList([ + Block( + dim=embed_dim*2, num_heads=num_heads, head_dim_ratio=1.0, mlp_ratio=mlp_ratio, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + group=group, attn_disabled=(attn_stage[2] == '0'), spatial_conv=(spatial_conv[2] == '1') + ) + for i in range(self.stage_num1+self.stage_num2, depth) + ]) + + # head + self.num_features = embed_dim if self.vit_stem else embed_dim * 2 + self.norm = norm_layer(self.num_features) + self.global_pool, self.head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + # weights init + if self.pos_embed: + trunc_normal_(self.pos_embed1, std=0.02) + if not self.vit_stem: + trunc_normal_(self.pos_embed2, std=0.02) + trunc_normal_(self.pos_embed3, std=0.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=0.02) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + if self.conv_init: + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + else: + trunc_normal_(m.weight, std=0.02) + if m.bias is not None: + nn.init.constant_(m.bias, 0.) + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + if self.stem is not None: + x = self.stem(x) + + # stage 1 + x = self.patch_embed1(x) + if self.pos_embed: + x = x + self.pos_embed1 + x = self.pos_drop(x) + for b in self.stage1: + x = b(x) + + # stage 2 + if not self.vit_stem: + x = self.patch_embed2(x) + if self.pos_embed: + x = x + self.pos_embed2 + x = self.pos_drop(x) + for b in self.stage2: + x = b(x) + + # stage3 + if not self.vit_stem: + x = self.patch_embed3(x) + if self.pos_embed: + x = x + self.pos_embed3 + x = self.pos_drop(x) + for b in self.stage3: + x = b(x) + + x = self.norm(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + x = self.head(x) + return x + + +def _create_visformer(variant, pretrained=False, default_cfg=None, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + model = build_model_with_cfg( + Visformer, variant, pretrained, + default_cfg=default_cfgs[variant], + **kwargs) + return model + + +@register_model +def visformer_tiny(pretrained=False, **kwargs): + model_cfg = dict( + init_channels=16, embed_dim=192, depth=(7, 4, 4), num_heads=3, mlp_ratio=4., group=8, + attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True, + embed_norm=nn.BatchNorm2d, **kwargs) + model = _create_visformer('visformer_tiny', pretrained=pretrained, **model_cfg) + return model + + +@register_model +def visformer_small(pretrained=False, **kwargs): + model_cfg = dict( + init_channels=32, embed_dim=384, depth=(7, 4, 4), num_heads=6, mlp_ratio=4., group=8, + attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True, + embed_norm=nn.BatchNorm2d, **kwargs) + model = _create_visformer('visformer_small', pretrained=pretrained, **model_cfg) + return model + + +# @register_model +# def visformer_net1(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=None, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111', +# spatial_conv='000', vit_stem=True, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net2(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111', +# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net3(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111', +# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net4(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111', +# spatial_conv='000', vit_stem=False, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net5(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111', +# spatial_conv='111', vit_stem=False, conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net6(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111', +# pos_embed=False, spatial_conv='111', conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model +# +# +# @register_model +# def visformer_net7(pretrained=False, **kwargs): +# model = Visformer( +# init_channels=32, embed_dim=384, depth=(6, 7, 7), num_heads=6, group=1, attn_stage='000', +# pos_embed=False, spatial_conv='111', conv_init=True, **kwargs) +# model.default_cfg = _cfg() +# return model + + + + diff --git a/timm/models/vision_transformer.py b/timm/models/vision_transformer.py new file mode 100644 index 0000000..3db6364 --- /dev/null +++ b/timm/models/vision_transformer.py @@ -0,0 +1,989 @@ +""" Vision Transformer (ViT) in PyTorch + +A PyTorch implement of Vision Transformers as described in: + +'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' + - https://arxiv.org/abs/2010.11929 + +`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` + - https://arxiv.org/abs/2106.10270 + +The official jax code is released and available at https://github.com/google-research/vision_transformer + +DeiT model defs and weights from https://github.com/facebookresearch/deit, +paper `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877 + +Acknowledgments: +* The paper authors for releasing code and weights, thanks! +* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out +for some einops/einsum fun +* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT +* Bert reference code checks against Huggingface Transformers and Tensorflow Bert + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math +import logging +from functools import partial +from collections import OrderedDict +from copy import deepcopy + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg, named_apply, adapt_input_conv +from .layers import PatchEmbed, Mlp, DropPath, trunc_normal_, lecun_normal_ +from .registry import register_model + +_logger = logging.getLogger(__name__) + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'patch_embed.proj', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # patch models (weights from official Google JAX impl) + 'vit_tiny_patch16_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'), + 'vit_tiny_patch16_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_small_patch32_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'), + 'vit_small_patch32_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_small_patch16_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'), + 'vit_small_patch16_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_patch32_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz'), + 'vit_base_patch32_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'B_32-i21k-300ep-lr_0.001-aug_light1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_patch16_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz'), + 'vit_base_patch16_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_patch8_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'B_8-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz'), + 'vit_large_patch32_224': _cfg( + url='', # no official model weights for this combo, only for in21k + ), + 'vit_large_patch32_384': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p32_384-9b920ba8.pth', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_large_patch16_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz'), + 'vit_large_patch16_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + + # patch models, imagenet21k (weights from official Google JAX impl) + 'vit_tiny_patch16_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_small_patch32_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_small_patch16_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_base_patch32_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_base_patch16_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_base_patch8_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/B_8-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz', + num_classes=21843), + 'vit_large_patch32_224_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_224_in21k-9046d2e7.pth', + num_classes=21843), + 'vit_large_patch16_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1.npz', + num_classes=21843), + 'vit_huge_patch14_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/imagenet21k/ViT-H_14.npz', + hf_hub='timm/vit_huge_patch14_224_in21k', + num_classes=21843), + + # SAM trained models (https://arxiv.org/abs/2106.01548) + 'vit_base_patch32_sam_224': _cfg( + url='https://storage.googleapis.com/vit_models/sam/ViT-B_32.npz'), + 'vit_base_patch16_sam_224': _cfg( + url='https://storage.googleapis.com/vit_models/sam/ViT-B_16.npz'), + + # deit models (FB weights) + 'deit_tiny_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'deit_small_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'deit_base_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), + 'deit_base_patch16_384': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, input_size=(3, 384, 384), crop_pct=1.0), + 'deit_tiny_distilled_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, classifier=('head', 'head_dist')), + 'deit_small_distilled_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, classifier=('head', 'head_dist')), + 'deit_base_distilled_patch16_224': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, classifier=('head', 'head_dist')), + 'deit_base_distilled_patch16_384': _cfg( + url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth', + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, input_size=(3, 384, 384), crop_pct=1.0, + classifier=('head', 'head_dist')), + + # ViT ImageNet-21K-P pretraining by MILL + 'vit_base_patch16_224_miil_in21k': _cfg( + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/vit_base_patch16_224_in21k_miil.pth', + mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear', num_classes=11221, + ), + 'vit_base_patch16_224_miil': _cfg( + url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm' + '/vit_base_patch16_224_1k_miil_84_4.pth', + mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear', + ), +} + + +class Attention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class VisionTransformer(nn.Module): + """ Vision Transformer + + A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` + - https://arxiv.org/abs/2010.11929 + + Includes distillation token & head support for `DeiT: Data-efficient Image Transformers` + - https://arxiv.org/abs/2012.12877 + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None, + act_layer=None, weight_init=''): + """ + Args: + img_size (int, tuple): input image size + patch_size (int, tuple): patch size + in_chans (int): number of input channels + num_classes (int): number of classes for classification head + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set + distilled (bool): model includes a distillation token and head as in DeiT models + drop_rate (float): dropout rate + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + embed_layer (nn.Module): patch embedding layer + norm_layer: (nn.Module): normalization layer + weight_init: (str): weight init scheme + """ + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.num_tokens = 2 if distilled else 1 + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + act_layer = act_layer or nn.GELU + + self.patch_embed = embed_layer( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.Sequential(*[ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, + attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) + for i in range(depth)]) + self.norm = norm_layer(embed_dim) + + # Representation layer + if representation_size and not distilled: + self.num_features = representation_size + self.pre_logits = nn.Sequential(OrderedDict([ + ('fc', nn.Linear(embed_dim, representation_size)), + ('act', nn.Tanh()) + ])) + else: + self.pre_logits = nn.Identity() + + # Classifier head(s) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + self.head_dist = None + if distilled: + self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + + self.init_weights(weight_init) + + def init_weights(self, mode=''): + assert mode in ('jax', 'jax_nlhb', 'nlhb', '') + head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. + trunc_normal_(self.pos_embed, std=.02) + if self.dist_token is not None: + trunc_normal_(self.dist_token, std=.02) + if mode.startswith('jax'): + # leave cls token as zeros to match jax impl + named_apply(partial(_init_vit_weights, head_bias=head_bias, jax_impl=True), self) + else: + trunc_normal_(self.cls_token, std=.02) + self.apply(_init_vit_weights) + + def _init_weights(self, m): + # this fn left here for compat with downstream users + _init_vit_weights(m) + + @torch.jit.ignore() + def load_pretrained(self, checkpoint_path, prefix=''): + _load_weights(self, checkpoint_path, prefix) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token', 'dist_token'} + + def get_classifier(self): + if self.dist_token is None: + return self.head + else: + return self.head, self.head_dist + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + if self.num_tokens == 2: + self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x, block_layers=[], get_tokens=False, local_id = [], side_length = 7): + x = self.patch_embed(x) + cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks + if self.dist_token is None: + x = torch.cat((cls_token, x), dim=1) + else: + x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1) + x = self.pos_drop(x + self.pos_embed) + + if local_id != []: + if x.shape[0] != 1: + print('Please enter one image at a time!') + x = x[:,1:,:] # 去除class_token + B, L, C = x.shape[0], x.shape[1], x.shape[2] + S = int(math.sqrt(L)) + if S * S != L: + print('Not a square!') + x = x.reshape(B, S, S, C) + + h_S = int(side_length / 2) + # print('h_S', h_S) + local_x_list = [] + for id in local_id: + row_id = int(id / S) + column_id = id % S + # if row_id - h_S >=0 and row_id + h_S =0 and column_id + h_S S: + row_1 = row_1 + S - row_2 + row_2 = S + if column_1 < 0: + column_2 = column_2 - column_1 + column_1 = 0 + if column_2 > S: + column_1 = column_1 + S - column_2 + column_2 = S + local_x = x[:,row_1 : row_2 , column_1 : column_2 , :] + + # print((row_id, column_id), local_x.shape) + + + local_x = local_x.flatten(1,2) + local_x_list.append(local_x) + + local_x_list = torch.cat(local_x_list, 0) + x = local_x_list + + + + + + # print('ceshi:', x.shape) + # x = x[:,50:,:] + if get_tokens: + # print(len(self.blocks)) + x_list = [] + for block_id, block in enumerate(self.blocks): + x = block(x) + if block_id in block_layers: + if local_id == []: + # print(self.norm) + x_list.append(self.norm(x[:,1:,:])) + else : + x_list.append(self.norm(x)) + if block_id == block_layers[-1]: + return x_list + else: + x = self.blocks(x) + x = self.norm(x) + if self.dist_token is None: + return self.pre_logits(x[:, 0]) + else: + return x[:, 0], x[:, 1] + + def forward(self, x, block_layers=[], get_tokens=False, local_id=[], side_length=7): + x = self.forward_features(x, get_tokens=get_tokens, block_layers=block_layers, local_id=local_id, side_length=side_length) + if not get_tokens: + if self.head_dist is not None: + x, x_dist = self.head(x[0]), self.head_dist(x[1]) # x must be a tuple + if self.training and not torch.jit.is_scripting(): + # during inference, return the average of both classifier predictions + return x, x_dist + else: + return (x + x_dist) / 2 + else: + x = self.head(x) + return x + + +def _init_vit_weights(module: nn.Module, name: str = '', head_bias: float = 0., jax_impl: bool = False): + """ ViT weight initialization + * When called without n, head_bias, jax_impl args it will behave exactly the same + as my original init for compatibility with prev hparam / downstream use cases (ie DeiT). + * When called w/ valid n (module name) and jax_impl=True, will (hopefully) match JAX impl + """ + if isinstance(module, nn.Linear): + if name.startswith('head'): + nn.init.zeros_(module.weight) + nn.init.constant_(module.bias, head_bias) + elif name.startswith('pre_logits'): + lecun_normal_(module.weight) + nn.init.zeros_(module.bias) + else: + if jax_impl: + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + if 'mlp' in name: + nn.init.normal_(module.bias, std=1e-6) + else: + nn.init.zeros_(module.bias) + else: + trunc_normal_(module.weight, std=.02) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif jax_impl and isinstance(module, nn.Conv2d): + # NOTE conv was left to pytorch default in my original init + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm2d)): + nn.init.zeros_(module.bias) + nn.init.ones_(module.weight) + + +@torch.no_grad() +def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''): + """ Load weights from .npz checkpoints for official Google Brain Flax implementation + """ + import numpy as np + + def _n2p(w, t=True): + if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: + w = w.flatten() + if t: + if w.ndim == 4: + w = w.transpose([3, 2, 0, 1]) + elif w.ndim == 3: + w = w.transpose([2, 0, 1]) + elif w.ndim == 2: + w = w.transpose([1, 0]) + return torch.from_numpy(w) + + w = np.load(checkpoint_path) + if not prefix and 'opt/target/embedding/kernel' in w: + prefix = 'opt/target/' + + if hasattr(model.patch_embed, 'backbone'): + # hybrid + backbone = model.patch_embed.backbone + stem_only = not hasattr(backbone, 'stem') + stem = backbone if stem_only else backbone.stem + stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel']))) + stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale'])) + stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias'])) + if not stem_only: + for i, stage in enumerate(backbone.stages): + for j, block in enumerate(stage.blocks): + bp = f'{prefix}block{i + 1}/unit{j + 1}/' + for r in range(3): + getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel'])) + getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale'])) + getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias'])) + if block.downsample is not None: + block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel'])) + block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale'])) + block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias'])) + embed_conv_w = _n2p(w[f'{prefix}embedding/kernel']) + else: + embed_conv_w = adapt_input_conv( + model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel'])) + model.patch_embed.proj.weight.copy_(embed_conv_w) + model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias'])) + model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False)) + pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False) + if pos_embed_w.shape != model.pos_embed.shape: + pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights + pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) + model.pos_embed.copy_(pos_embed_w) + model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale'])) + model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias'])) + if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]: + model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) + model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) + if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: + model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) + model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) + for i, block in enumerate(model.blocks.children()): + block_prefix = f'{prefix}Transformer/encoderblock_{i}/' + mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/' + block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) + block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) + block.attn.qkv.weight.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')])) + block.attn.qkv.bias.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')])) + block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) + block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) + for r in range(2): + getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel'])) + getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias'])) + block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale'])) + block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias'])) + + +def resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()): + # Rescale the grid of position embeddings when loading from state_dict. Adapted from + # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224 + _logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape) + ntok_new = posemb_new.shape[1] + if num_tokens: + posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:] + ntok_new -= num_tokens + else: + posemb_tok, posemb_grid = posemb[:, :0], posemb[0] + gs_old = int(math.sqrt(len(posemb_grid))) + if not len(gs_new): # backwards compatibility + gs_new = [int(math.sqrt(ntok_new))] * 2 + assert len(gs_new) >= 2 + _logger.info('Position embedding grid-size from %s to %s', [gs_old, gs_old], gs_new) + posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2) + posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode='bicubic', align_corners=False) + posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1) + posemb = torch.cat([posemb_tok, posemb_grid], dim=1) + return posemb + + +def checkpoint_filter_fn(state_dict, model): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + out_dict = {} + if 'model' in state_dict: + # For deit models + state_dict = state_dict['model'] + for k, v in state_dict.items(): + if 'patch_embed.proj.weight' in k and len(v.shape) < 4: + # For old models that I trained prior to conv based patchification + O, I, H, W = model.patch_embed.proj.weight.shape + v = v.reshape(O, -1, H, W) + elif k == 'pos_embed' and v.shape != model.pos_embed.shape: + # To resize pos embedding when using model at different size from pretrained weights + v = resize_pos_embed( + v, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) + out_dict[k] = v + return out_dict + + +def _create_vision_transformer(variant, pretrained=False, default_cfg=None, **kwargs): + default_cfg = default_cfg or default_cfgs[variant] + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Vision Transformer models.') + + # NOTE this extra code to support handling of repr size for in21k pretrained models + default_num_classes = default_cfg['num_classes'] + num_classes = kwargs.get('num_classes', default_num_classes) + repr_size = kwargs.pop('representation_size', None) + if repr_size is not None and num_classes != default_num_classes: + # Remove representation layer if fine-tuning. This may not always be the desired action, + # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? + _logger.warning("Removing representation layer for fine-tuning.") + repr_size = None + + model = build_model_with_cfg( + VisionTransformer, variant, pretrained, + default_cfg=default_cfg, + representation_size=repr_size, + pretrained_filter_fn=checkpoint_filter_fn, + pretrained_custom_load='npz' in default_cfg['url'], + **kwargs) + return model + + +@register_model +def vit_tiny_patch16_224(pretrained=False, **kwargs): + """ ViT-Tiny (Vit-Ti/16) + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer('vit_tiny_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_tiny_patch16_384(pretrained=False, **kwargs): + """ ViT-Tiny (Vit-Ti/16) @ 384x384. + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer('vit_tiny_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch32_224(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/32) + """ + model_kwargs = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch32_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch32_384(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/32) at 384x384. + """ + model_kwargs = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch32_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch16_224(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/16) + NOTE I've replaced my previous 'small' model definition and weights with the small variant from the DeiT paper + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch16_384(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/16) + NOTE I've replaced my previous 'small' model definition and weights with the small variant from the DeiT paper + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch32_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch32_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch32_384(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch32_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_384(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch8_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/8) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=8, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch32_224(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights. + """ + model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch32_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch32_384(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch32_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch16_224(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch16_384(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_sam_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) w/ SAM pretrained weights. Paper: https://arxiv.org/abs/2106.01548 + """ + # NOTE original SAM weights release worked with representation_size=768 + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, representation_size=0, **kwargs) + model = _create_vision_transformer('vit_base_patch16_sam_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch32_sam_224(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/32) w/ SAM pretrained weights. Paper: https://arxiv.org/abs/2106.01548 + """ + # NOTE original SAM weights release worked with representation_size=768 + model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, representation_size=0, **kwargs) + model = _create_vision_transformer('vit_base_patch32_sam_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_tiny_patch16_224_in21k(pretrained=False, **kwargs): + """ ViT-Tiny (Vit-Ti/16). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer('vit_tiny_patch16_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch32_224_in21k(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/16) + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch32_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_patch16_224_in21k(pretrained=False, **kwargs): + """ ViT-Small (ViT-S/16) + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('vit_small_patch16_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch32_224_in21k(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict( + patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch32_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_224_in21k(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch16_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch8_224_in21k(pretrained=False, **kwargs): + """ ViT-Base model (ViT-B/8) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict( + patch_size=8, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('vit_base_patch8_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch32_224_in21k(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has a representation layer but the 21k classifier head is zero'd out in original weights + """ + model_kwargs = dict( + patch_size=32, embed_dim=1024, depth=24, num_heads=16, representation_size=1024, **kwargs) + model = _create_vision_transformer('vit_large_patch32_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_patch16_224_in21k(pretrained=False, **kwargs): + """ ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has valid 21k classifier head and no representation (pre-logits) layer + """ + model_kwargs = dict( + patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer('vit_large_patch16_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_huge_patch14_224_in21k(pretrained=False, **kwargs): + """ ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + NOTE: this model has a representation layer but the 21k classifier head is zero'd out in original weights + """ + model_kwargs = dict( + patch_size=14, embed_dim=1280, depth=32, num_heads=16, representation_size=1280, **kwargs) + model = _create_vision_transformer('vit_huge_patch14_224_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit_tiny_patch16_224(pretrained=False, **kwargs): + """ DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer('deit_tiny_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit_small_patch16_224(pretrained=False, **kwargs): + """ DeiT-small model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer('deit_small_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit_base_patch16_224(pretrained=False, **kwargs): + """ DeiT base model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('deit_base_patch16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit_base_patch16_384(pretrained=False, **kwargs): + """ DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer('deit_base_patch16_384', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def deit_tiny_distilled_patch16_224(pretrained=False, **kwargs): + """ DeiT-tiny distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer( + 'deit_tiny_distilled_patch16_224', pretrained=pretrained, distilled=True, **model_kwargs) + return model + + +@register_model +def deit_small_distilled_patch16_224(pretrained=False, **kwargs): + """ DeiT-small distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer( + 'deit_small_distilled_patch16_224', pretrained=pretrained, distilled=True, **model_kwargs) + return model + + +@register_model +def deit_base_distilled_patch16_224(pretrained=False, **kwargs): + """ DeiT-base distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer( + 'deit_base_distilled_patch16_224', pretrained=pretrained, distilled=True, **model_kwargs) + return model + + +@register_model +def deit_base_distilled_patch16_384(pretrained=False, **kwargs): + """ DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). + ImageNet-1k weights from https://github.com/facebookresearch/deit. + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer( + 'deit_base_distilled_patch16_384', pretrained=pretrained, distilled=True, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_224_miil_in21k(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, **kwargs) + model = _create_vision_transformer('vit_base_patch16_224_miil_in21k', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_patch16_224_miil(pretrained=False, **kwargs): + """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). + Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K + """ + model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, **kwargs) + model = _create_vision_transformer('vit_base_patch16_224_miil', pretrained=pretrained, **model_kwargs) + return model diff --git a/timm/models/vision_transformer_hybrid.py b/timm/models/vision_transformer_hybrid.py new file mode 100644 index 0000000..d5f0a53 --- /dev/null +++ b/timm/models/vision_transformer_hybrid.py @@ -0,0 +1,363 @@ +""" Hybrid Vision Transformer (ViT) in PyTorch + +A PyTorch implement of the Hybrid Vision Transformers as described in: + +'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' + - https://arxiv.org/abs/2010.11929 + +`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` + - https://arxiv.org/abs/2106.TODO + +NOTE These hybrid model definitions depend on code in vision_transformer.py. +They were moved here to keep file sizes sane. + +Hacked together by / Copyright 2021 Ross Wightman +""" +from copy import deepcopy +from functools import partial + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .layers import StdConv2dSame, StdConv2d, to_2tuple +from .resnet import resnet26d, resnet50d +from .resnetv2 import ResNetV2, create_resnetv2_stem +from .registry import register_model +from timm.models.vision_transformer import _create_vision_transformer + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), + 'first_conv': 'patch_embed.backbone.stem.conv', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # hybrid in-1k models (weights from official JAX impl where they exist) + 'vit_tiny_r_s16_p8_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', + first_conv='patch_embed.backbone.conv'), + 'vit_tiny_r_s16_p8_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + first_conv='patch_embed.backbone.conv', input_size=(3, 384, 384), crop_pct=1.0), + 'vit_small_r26_s32_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R26_S_32-i21k-300ep-lr_0.001-aug_light0-wd_0.03-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.03-res_224.npz', + ), + 'vit_small_r26_s32_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R26_S_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_base_r26_s32_224': _cfg(), + 'vit_base_r50_s16_224': _cfg(), + 'vit_base_r50_s16_384': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_384-9fd3c705.pth', + input_size=(3, 384, 384), crop_pct=1.0), + 'vit_large_r50_s32_224': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R50_L_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz' + ), + 'vit_large_r50_s32_384': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/' + 'R50_L_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', + input_size=(3, 384, 384), crop_pct=1.0 + ), + + # hybrid in-21k models (weights from official Google JAX impl where they exist) + 'vit_tiny_r_s16_p8_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843, crop_pct=0.9, first_conv='patch_embed.backbone.conv'), + 'vit_small_r26_s32_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/R26_S_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.03-do_0.0-sd_0.0.npz', + num_classes=21843, crop_pct=0.9), + 'vit_base_r50_s16_224_in21k': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_224_in21k-6f7c7740.pth', + num_classes=21843, crop_pct=0.9), + 'vit_large_r50_s32_224_in21k': _cfg( + url='https://storage.googleapis.com/vit_models/augreg/R50_L_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0.npz', + num_classes=21843, crop_pct=0.9), + + # hybrid models (using timm resnet backbones) + 'vit_small_resnet26d_224': _cfg( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), + 'vit_small_resnet50d_s16_224': _cfg( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), + 'vit_base_resnet26d_224': _cfg( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), + 'vit_base_resnet50d_224': _cfg( + mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), +} + + +class HybridEmbed(nn.Module): + """ CNN Feature Map Embedding + Extract feature map from CNN, flatten, project to embedding dim. + """ + def __init__(self, backbone, img_size=224, patch_size=1, feature_size=None, in_chans=3, embed_dim=768): + super().__init__() + assert isinstance(backbone, nn.Module) + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.backbone = backbone + if feature_size is None: + with torch.no_grad(): + # NOTE Most reliable way of determining output dims is to run forward pass + training = backbone.training + if training: + backbone.eval() + o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1])) + if isinstance(o, (list, tuple)): + o = o[-1] # last feature if backbone outputs list/tuple of features + feature_size = o.shape[-2:] + feature_dim = o.shape[1] + backbone.train(training) + else: + feature_size = to_2tuple(feature_size) + if hasattr(self.backbone, 'feature_info'): + feature_dim = self.backbone.feature_info.channels()[-1] + else: + feature_dim = self.backbone.num_features + assert feature_size[0] % patch_size[0] == 0 and feature_size[1] % patch_size[1] == 0 + self.grid_size = (feature_size[0] // patch_size[0], feature_size[1] // patch_size[1]) + self.num_patches = self.grid_size[0] * self.grid_size[1] + self.proj = nn.Conv2d(feature_dim, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + x = self.backbone(x) + if isinstance(x, (list, tuple)): + x = x[-1] # last feature if backbone outputs list/tuple of features + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +def _create_vision_transformer_hybrid(variant, backbone, pretrained=False, **kwargs): + embed_layer = partial(HybridEmbed, backbone=backbone) + kwargs.setdefault('patch_size', 1) # default patch size for hybrid models if not set + return _create_vision_transformer( + variant, pretrained=pretrained, embed_layer=embed_layer, default_cfg=default_cfgs[variant], **kwargs) + + +def _resnetv2(layers=(3, 4, 9), **kwargs): + """ ResNet-V2 backbone helper""" + padding_same = kwargs.get('padding_same', True) + stem_type = 'same' if padding_same else '' + conv_layer = partial(StdConv2dSame, eps=1e-8) if padding_same else partial(StdConv2d, eps=1e-8) + if len(layers): + backbone = ResNetV2( + layers=layers, num_classes=0, global_pool='', in_chans=kwargs.get('in_chans', 3), + preact=False, stem_type=stem_type, conv_layer=conv_layer) + else: + backbone = create_resnetv2_stem( + kwargs.get('in_chans', 3), stem_type=stem_type, preact=False, conv_layer=conv_layer) + return backbone + + +@register_model +def vit_tiny_r_s16_p8_224(pretrained=False, **kwargs): + """ R+ViT-Ti/S16 w/ 8x8 patch hybrid @ 224 x 224. + """ + backbone = _resnetv2(layers=(), **kwargs) + model_kwargs = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_tiny_r_s16_p8_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_tiny_r_s16_p8_384(pretrained=False, **kwargs): + """ R+ViT-Ti/S16 w/ 8x8 patch hybrid @ 384 x 384. + """ + backbone = _resnetv2(layers=(), **kwargs) + model_kwargs = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_tiny_r_s16_p8_384', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_r26_s32_224(pretrained=False, **kwargs): + """ R26+ViT-S/S32 hybrid. + """ + backbone = _resnetv2((2, 2, 2, 2), **kwargs) + model_kwargs = dict(embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_small_r26_s32_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_r26_s32_384(pretrained=False, **kwargs): + """ R26+ViT-S/S32 hybrid. + """ + backbone = _resnetv2((2, 2, 2, 2), **kwargs) + model_kwargs = dict(embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_small_r26_s32_384', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_r26_s32_224(pretrained=False, **kwargs): + """ R26+ViT-B/S32 hybrid. + """ + backbone = _resnetv2((2, 2, 2, 2), **kwargs) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_r26_s32_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_r50_s16_224(pretrained=False, **kwargs): + """ R50+ViT-B/S16 hybrid from original paper (https://arxiv.org/abs/2010.11929). + """ + backbone = _resnetv2((3, 4, 9), **kwargs) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_r50_s16_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_r50_s16_384(pretrained=False, **kwargs): + """ R50+ViT-B/16 hybrid from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. + """ + backbone = _resnetv2((3, 4, 9), **kwargs) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_r50_s16_384', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_resnet50_384(pretrained=False, **kwargs): + # DEPRECATED this is forwarding to model def above for backwards compatibility + return vit_base_r50_s16_384(pretrained=pretrained, **kwargs) + + +@register_model +def vit_large_r50_s32_224(pretrained=False, **kwargs): + """ R50+ViT-L/S32 hybrid. + """ + backbone = _resnetv2((3, 4, 6, 3), **kwargs) + model_kwargs = dict(embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_large_r50_s32_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_large_r50_s32_384(pretrained=False, **kwargs): + """ R50+ViT-L/S32 hybrid. + """ + backbone = _resnetv2((3, 4, 6, 3), **kwargs) + model_kwargs = dict(embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_large_r50_s32_384', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_tiny_r_s16_p8_224_in21k(pretrained=False, **kwargs): + """ R+ViT-Ti/S16 w/ 8x8 patch hybrid. ImageNet-21k. + """ + backbone = _resnetv2(layers=(), **kwargs) + model_kwargs = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_tiny_r_s16_p8_224_in21k', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_r26_s32_224_in21k(pretrained=False, **kwargs): + """ R26+ViT-S/S32 hybrid. ImageNet-21k. + """ + backbone = _resnetv2((2, 2, 2, 2), **kwargs) + model_kwargs = dict(embed_dim=384, depth=12, num_heads=6, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_small_r26_s32_224_in21k', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_r50_s16_224_in21k(pretrained=False, **kwargs): + """ R50+ViT-B/16 hybrid model from original paper (https://arxiv.org/abs/2010.11929). + ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. + """ + backbone = _resnetv2(layers=(3, 4, 9), **kwargs) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, representation_size=768, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_r50_s16_224_in21k', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_resnet50_224_in21k(pretrained=False, **kwargs): + # DEPRECATED this is forwarding to model def above for backwards compatibility + return vit_base_r50_s16_224_in21k(pretrained=pretrained, **kwargs) + + +@register_model +def vit_large_r50_s32_224_in21k(pretrained=False, **kwargs): + """ R50+ViT-L/S32 hybrid. ImageNet-21k. + """ + backbone = _resnetv2((3, 4, 6, 3), **kwargs) + model_kwargs = dict(embed_dim=1024, depth=24, num_heads=16, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_large_r50_s32_224_in21k', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_resnet26d_224(pretrained=False, **kwargs): + """ Custom ViT small hybrid w/ ResNet26D stride 32. No pretrained weights. + """ + backbone = resnet26d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) + model_kwargs = dict(embed_dim=768, depth=8, num_heads=8, mlp_ratio=3, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_small_resnet26d_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_small_resnet50d_s16_224(pretrained=False, **kwargs): + """ Custom ViT small hybrid w/ ResNet50D 3-stages, stride 16. No pretrained weights. + """ + backbone = resnet50d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[3]) + model_kwargs = dict(embed_dim=768, depth=8, num_heads=8, mlp_ratio=3, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_small_resnet50d_s16_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_resnet26d_224(pretrained=False, **kwargs): + """ Custom ViT base hybrid w/ ResNet26D stride 32. No pretrained weights. + """ + backbone = resnet26d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_resnet26d_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def vit_base_resnet50d_224(pretrained=False, **kwargs): + """ Custom ViT base hybrid w/ ResNet50D stride 32. No pretrained weights. + """ + backbone = resnet50d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) + model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs) + model = _create_vision_transformer_hybrid( + 'vit_base_resnet50d_224', backbone=backbone, pretrained=pretrained, **model_kwargs) + return model \ No newline at end of file diff --git a/timm/models/vovnet.py b/timm/models/vovnet.py new file mode 100644 index 0000000..ec5b3e8 --- /dev/null +++ b/timm/models/vovnet.py @@ -0,0 +1,406 @@ +""" VoVNet (V1 & V2) + +Papers: +* `An Energy and GPU-Computation Efficient Backbone Network` - https://arxiv.org/abs/1904.09730 +* `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 + +Looked at https://github.com/youngwanLEE/vovnet-detectron2 & +https://github.com/stigma0617/VoVNet.pytorch/blob/master/models_vovnet/vovnet.py +for some reference, rewrote most of the code. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .registry import register_model +from .helpers import build_model_with_cfg +from .layers import ConvBnAct, SeparableConvBnAct, BatchNormAct2d, ClassifierHead, DropPath,\ + create_attn, create_norm_act, get_norm_act_layer + + +# model cfgs adapted from https://github.com/youngwanLEE/vovnet-detectron2 & +# https://github.com/stigma0617/VoVNet.pytorch/blob/master/models_vovnet/vovnet.py +model_cfgs = dict( + vovnet39a=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 2, 2], + residual=False, + depthwise=False, + attn='', + ), + vovnet57a=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 4, 3], + residual=False, + depthwise=False, + attn='', + + ), + ese_vovnet19b_slim_dw=dict( + stem_chs=[64, 64, 64], + stage_conv_chs=[64, 80, 96, 112], + stage_out_chs=[112, 256, 384, 512], + layer_per_block=3, + block_per_stage=[1, 1, 1, 1], + residual=True, + depthwise=True, + attn='ese', + + ), + ese_vovnet19b_dw=dict( + stem_chs=[64, 64, 64], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=3, + block_per_stage=[1, 1, 1, 1], + residual=True, + depthwise=True, + attn='ese', + ), + ese_vovnet19b_slim=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[64, 80, 96, 112], + stage_out_chs=[112, 256, 384, 512], + layer_per_block=3, + block_per_stage=[1, 1, 1, 1], + residual=True, + depthwise=False, + attn='ese', + ), + ese_vovnet19b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=3, + block_per_stage=[1, 1, 1, 1], + residual=True, + depthwise=False, + attn='ese', + + ), + ese_vovnet39b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 2, 2], + residual=True, + depthwise=False, + attn='ese', + ), + ese_vovnet57b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 4, 3], + residual=True, + depthwise=False, + attn='ese', + + ), + ese_vovnet99b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 3, 9, 3], + residual=True, + depthwise=False, + attn='ese', + ), + eca_vovnet39b=dict( + stem_chs=[64, 64, 128], + stage_conv_chs=[128, 160, 192, 224], + stage_out_chs=[256, 512, 768, 1024], + layer_per_block=5, + block_per_stage=[1, 1, 2, 2], + residual=True, + depthwise=False, + attn='eca', + ), +) +model_cfgs['ese_vovnet39b_evos'] = model_cfgs['ese_vovnet39b'] +model_cfgs['ese_vovnet99b_iabn'] = model_cfgs['ese_vovnet99b'] + + +def _cfg(url=''): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.0.conv', 'classifier': 'head.fc', + } + + +default_cfgs = dict( + vovnet39a=_cfg(url=''), + vovnet57a=_cfg(url=''), + ese_vovnet19b_slim_dw=_cfg(url=''), + ese_vovnet19b_dw=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ese_vovnet19b_dw-a8741004.pth'), + ese_vovnet19b_slim=_cfg(url=''), + ese_vovnet39b=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ese_vovnet39b-f912fe73.pth'), + ese_vovnet57b=_cfg(url=''), + ese_vovnet99b=_cfg(url=''), + eca_vovnet39b=_cfg(url=''), + ese_vovnet39b_evos=_cfg(url=''), + ese_vovnet99b_iabn=_cfg(url=''), +) + + +class SequentialAppendList(nn.Sequential): + def __init__(self, *args): + super(SequentialAppendList, self).__init__(*args) + + def forward(self, x: torch.Tensor, concat_list: List[torch.Tensor]) -> torch.Tensor: + for i, module in enumerate(self): + if i == 0: + concat_list.append(module(x)) + else: + concat_list.append(module(concat_list[-1])) + x = torch.cat(concat_list, dim=1) + return x + + +class OsaBlock(nn.Module): + + def __init__(self, in_chs, mid_chs, out_chs, layer_per_block, residual=False, + depthwise=False, attn='', norm_layer=BatchNormAct2d, act_layer=nn.ReLU, drop_path=None): + super(OsaBlock, self).__init__() + + self.residual = residual + self.depthwise = depthwise + conv_kwargs = dict(norm_layer=norm_layer, act_layer=act_layer) + + next_in_chs = in_chs + if self.depthwise and next_in_chs != mid_chs: + assert not residual + self.conv_reduction = ConvBnAct(next_in_chs, mid_chs, 1, **conv_kwargs) + else: + self.conv_reduction = None + + mid_convs = [] + for i in range(layer_per_block): + if self.depthwise: + conv = SeparableConvBnAct(mid_chs, mid_chs, **conv_kwargs) + else: + conv = ConvBnAct(next_in_chs, mid_chs, 3, **conv_kwargs) + next_in_chs = mid_chs + mid_convs.append(conv) + self.conv_mid = SequentialAppendList(*mid_convs) + + # feature aggregation + next_in_chs = in_chs + layer_per_block * mid_chs + self.conv_concat = ConvBnAct(next_in_chs, out_chs, **conv_kwargs) + + if attn: + self.attn = create_attn(attn, out_chs) + else: + self.attn = None + + self.drop_path = drop_path + + def forward(self, x): + output = [x] + if self.conv_reduction is not None: + x = self.conv_reduction(x) + x = self.conv_mid(x, output) + x = self.conv_concat(x) + if self.attn is not None: + x = self.attn(x) + if self.drop_path is not None: + x = self.drop_path(x) + if self.residual: + x = x + output[0] + return x + + +class OsaStage(nn.Module): + + def __init__(self, in_chs, mid_chs, out_chs, block_per_stage, layer_per_block, downsample=True, + residual=True, depthwise=False, attn='ese', norm_layer=BatchNormAct2d, act_layer=nn.ReLU, + drop_path_rates=None): + super(OsaStage, self).__init__() + + if downsample: + self.pool = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) + else: + self.pool = None + + blocks = [] + for i in range(block_per_stage): + last_block = i == block_per_stage - 1 + if drop_path_rates is not None and drop_path_rates[i] > 0.: + drop_path = DropPath(drop_path_rates[i]) + else: + drop_path = None + blocks += [OsaBlock( + in_chs, mid_chs, out_chs, layer_per_block, residual=residual and i > 0, depthwise=depthwise, + attn=attn if last_block else '', norm_layer=norm_layer, act_layer=act_layer, drop_path=drop_path) + ] + in_chs = out_chs + self.blocks = nn.Sequential(*blocks) + + def forward(self, x): + if self.pool is not None: + x = self.pool(x) + x = self.blocks(x) + return x + + +class VovNet(nn.Module): + + def __init__(self, cfg, in_chans=3, num_classes=1000, global_pool='avg', drop_rate=0., stem_stride=4, + output_stride=32, norm_layer=BatchNormAct2d, act_layer=nn.ReLU, drop_path_rate=0.): + """ VovNet (v2) + """ + super(VovNet, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + assert stem_stride in (4, 2) + assert output_stride == 32 # FIXME support dilation + + stem_chs = cfg["stem_chs"] + stage_conv_chs = cfg["stage_conv_chs"] + stage_out_chs = cfg["stage_out_chs"] + block_per_stage = cfg["block_per_stage"] + layer_per_block = cfg["layer_per_block"] + conv_kwargs = dict(norm_layer=norm_layer, act_layer=act_layer) + + # Stem module + last_stem_stride = stem_stride // 2 + conv_type = SeparableConvBnAct if cfg["depthwise"] else ConvBnAct + self.stem = nn.Sequential(*[ + ConvBnAct(in_chans, stem_chs[0], 3, stride=2, **conv_kwargs), + conv_type(stem_chs[0], stem_chs[1], 3, stride=1, **conv_kwargs), + conv_type(stem_chs[1], stem_chs[2], 3, stride=last_stem_stride, **conv_kwargs), + ]) + self.feature_info = [dict( + num_chs=stem_chs[1], reduction=2, module=f'stem.{1 if stem_stride == 4 else 2}')] + current_stride = stem_stride + + # OSA stages + stage_dpr = torch.split(torch.linspace(0, drop_path_rate, sum(block_per_stage)), block_per_stage) + in_ch_list = stem_chs[-1:] + stage_out_chs[:-1] + stage_args = dict(residual=cfg["residual"], depthwise=cfg["depthwise"], attn=cfg["attn"], **conv_kwargs) + stages = [] + for i in range(4): # num_stages + downsample = stem_stride == 2 or i > 0 # first stage has no stride/downsample if stem_stride is 4 + stages += [OsaStage( + in_ch_list[i], stage_conv_chs[i], stage_out_chs[i], block_per_stage[i], layer_per_block, + downsample=downsample, drop_path_rates=stage_dpr[i], **stage_args) + ] + self.num_features = stage_out_chs[i] + current_stride *= 2 if downsample else 1 + self.feature_info += [dict(num_chs=self.num_features, reduction=current_stride, module=f'stages.{i}')] + + self.stages = nn.Sequential(*stages) + + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) + + for n, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1.) + nn.init.constant_(m.bias, 0.) + elif isinstance(m, nn.Linear): + nn.init.zeros_(m.bias) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + return self.stages(x) + + def forward(self, x): + x = self.forward_features(x) + return self.head(x) + + +def _create_vovnet(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + VovNet, variant, pretrained, + default_cfg=default_cfgs[variant], + model_cfg=model_cfgs[variant], + feature_cfg=dict(flatten_sequential=True), + **kwargs) + + +@register_model +def vovnet39a(pretrained=False, **kwargs): + return _create_vovnet('vovnet39a', pretrained=pretrained, **kwargs) + + +@register_model +def vovnet57a(pretrained=False, **kwargs): + return _create_vovnet('vovnet57a', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet19b_slim_dw(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet19b_slim_dw', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet19b_dw(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet19b_dw', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet19b_slim(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet19b_slim', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet39b(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet39b', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet57b(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet57b', pretrained=pretrained, **kwargs) + + +@register_model +def ese_vovnet99b(pretrained=False, **kwargs): + return _create_vovnet('ese_vovnet99b', pretrained=pretrained, **kwargs) + + +@register_model +def eca_vovnet39b(pretrained=False, **kwargs): + return _create_vovnet('eca_vovnet39b', pretrained=pretrained, **kwargs) + + +# Experimental Models + +@register_model +def ese_vovnet39b_evos(pretrained=False, **kwargs): + def norm_act_fn(num_features, **nkwargs): + return create_norm_act('EvoNormSample', num_features, jit=False, **nkwargs) + return _create_vovnet('ese_vovnet39b_evos', pretrained=pretrained, norm_layer=norm_act_fn, **kwargs) + + +@register_model +def ese_vovnet99b_iabn(pretrained=False, **kwargs): + norm_layer = get_norm_act_layer('iabn') + return _create_vovnet( + 'ese_vovnet99b_iabn', pretrained=pretrained, norm_layer=norm_layer, act_layer=nn.LeakyReLU, **kwargs) diff --git a/timm/models/xception.py b/timm/models/xception.py new file mode 100644 index 0000000..86f558c --- /dev/null +++ b/timm/models/xception.py @@ -0,0 +1,232 @@ +""" +Ported to pytorch thanks to [tstandley](https://github.com/tstandley/Xception-PyTorch) + +@author: tstandley +Adapted by cadene + +Creates an Xception Model as defined in: + +Francois Chollet +Xception: Deep Learning with Depthwise Separable Convolutions +https://arxiv.org/pdf/1610.02357.pdf + +This weights ported from the Keras implementation. Achieves the following performance on the validation set: + +Loss:0.9173 Prec@1:78.892 Prec@5:94.292 + +REMEMBER to set your image size to 3x299x299 for both test and validation + +normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], + std=[0.5, 0.5, 0.5]) + +The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 +""" + +import torch.nn as nn +import torch.nn.functional as F + +from .helpers import build_model_with_cfg +from .layers import create_classifier +from .registry import register_model + +__all__ = ['Xception'] + +default_cfgs = { + 'xception': { + 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/xception-43020ad28.pth', + 'input_size': (3, 299, 299), + 'pool_size': (10, 10), + 'crop_pct': 0.8975, + 'interpolation': 'bicubic', + 'mean': (0.5, 0.5, 0.5), + 'std': (0.5, 0.5, 0.5), + 'num_classes': 1000, + 'first_conv': 'conv1', + 'classifier': 'fc' + # The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 + } +} + + +class SeparableConv2d(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1): + super(SeparableConv2d, self).__init__() + + self.conv1 = nn.Conv2d( + in_channels, in_channels, kernel_size, stride, padding, dilation, groups=in_channels, bias=False) + self.pointwise = nn.Conv2d(in_channels, out_channels, 1, 1, 0, 1, 1, bias=False) + + def forward(self, x): + x = self.conv1(x) + x = self.pointwise(x) + return x + + +class Block(nn.Module): + def __init__(self, in_channels, out_channels, reps, strides=1, start_with_relu=True, grow_first=True): + super(Block, self).__init__() + + if out_channels != in_channels or strides != 1: + self.skip = nn.Conv2d(in_channels, out_channels, 1, stride=strides, bias=False) + self.skipbn = nn.BatchNorm2d(out_channels) + else: + self.skip = None + + rep = [] + for i in range(reps): + if grow_first: + inc = in_channels if i == 0 else out_channels + outc = out_channels + else: + inc = in_channels + outc = in_channels if i < (reps - 1) else out_channels + rep.append(nn.ReLU(inplace=True)) + rep.append(SeparableConv2d(inc, outc, 3, stride=1, padding=1)) + rep.append(nn.BatchNorm2d(outc)) + + if not start_with_relu: + rep = rep[1:] + else: + rep[0] = nn.ReLU(inplace=False) + + if strides != 1: + rep.append(nn.MaxPool2d(3, strides, 1)) + self.rep = nn.Sequential(*rep) + + def forward(self, inp): + x = self.rep(inp) + + if self.skip is not None: + skip = self.skip(inp) + skip = self.skipbn(skip) + else: + skip = inp + + x += skip + return x + + +class Xception(nn.Module): + """ + Xception optimized for the ImageNet dataset, as specified in + https://arxiv.org/pdf/1610.02357.pdf + """ + + def __init__(self, num_classes=1000, in_chans=3, drop_rate=0., global_pool='avg'): + """ Constructor + Args: + num_classes: number of classes + """ + super(Xception, self).__init__() + self.drop_rate = drop_rate + self.global_pool = global_pool + self.num_classes = num_classes + self.num_features = 2048 + + self.conv1 = nn.Conv2d(in_chans, 32, 3, 2, 0, bias=False) + self.bn1 = nn.BatchNorm2d(32) + self.act1 = nn.ReLU(inplace=True) + + self.conv2 = nn.Conv2d(32, 64, 3, bias=False) + self.bn2 = nn.BatchNorm2d(64) + self.act2 = nn.ReLU(inplace=True) + + self.block1 = Block(64, 128, 2, 2, start_with_relu=False) + self.block2 = Block(128, 256, 2, 2) + self.block3 = Block(256, 728, 2, 2) + + self.block4 = Block(728, 728, 3, 1) + self.block5 = Block(728, 728, 3, 1) + self.block6 = Block(728, 728, 3, 1) + self.block7 = Block(728, 728, 3, 1) + + self.block8 = Block(728, 728, 3, 1) + self.block9 = Block(728, 728, 3, 1) + self.block10 = Block(728, 728, 3, 1) + self.block11 = Block(728, 728, 3, 1) + + self.block12 = Block(728, 1024, 2, 2, grow_first=False) + + self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1) + self.bn3 = nn.BatchNorm2d(1536) + self.act3 = nn.ReLU(inplace=True) + + self.conv4 = SeparableConv2d(1536, self.num_features, 3, 1, 1) + self.bn4 = nn.BatchNorm2d(self.num_features) + self.act4 = nn.ReLU(inplace=True) + self.feature_info = [ + dict(num_chs=64, reduction=2, module='act2'), + dict(num_chs=128, reduction=4, module='block2.rep.0'), + dict(num_chs=256, reduction=8, module='block3.rep.0'), + dict(num_chs=728, reduction=16, module='block12.rep.0'), + dict(num_chs=2048, reduction=32, module='act4'), + ] + + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + # #------- init weights -------- + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def get_classifier(self): + return self.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) + + def forward_features(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + + x = self.conv2(x) + x = self.bn2(x) + x = self.act2(x) + + x = self.block1(x) + x = self.block2(x) + x = self.block3(x) + x = self.block4(x) + x = self.block5(x) + x = self.block6(x) + x = self.block7(x) + x = self.block8(x) + x = self.block9(x) + x = self.block10(x) + x = self.block11(x) + x = self.block12(x) + + x = self.conv3(x) + x = self.bn3(x) + x = self.act3(x) + + x = self.conv4(x) + x = self.bn4(x) + x = self.act4(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + if self.drop_rate: + F.dropout(x, self.drop_rate, training=self.training) + x = self.fc(x) + return x + + +def _xception(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + Xception, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(feature_cls='hook'), + **kwargs) + + +@register_model +def xception(pretrained=False, **kwargs): + return _xception('xception', pretrained=pretrained, **kwargs) diff --git a/timm/models/xception_aligned.py b/timm/models/xception_aligned.py new file mode 100644 index 0000000..ea7f5c0 --- /dev/null +++ b/timm/models/xception_aligned.py @@ -0,0 +1,238 @@ +"""Pytorch impl of Aligned Xception 41, 65, 71 + +This is a correct, from scratch impl of Aligned Xception (Deeplab) models compatible with TF weights at +https://github.com/tensorflow/models/blob/master/research/deeplab/g3doc/model_zoo.md + +Hacked together by / Copyright 2020 Ross Wightman +""" +from functools import partial + +import torch.nn as nn +import torch.nn.functional as F + +from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD +from .helpers import build_model_with_cfg +from .layers import ClassifierHead, ConvBnAct, create_conv2d +from .layers.helpers import to_3tuple +from .registry import register_model + +__all__ = ['XceptionAligned'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (10, 10), + 'crop_pct': 0.903, 'interpolation': 'bicubic', + 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, + 'first_conv': 'stem.0.conv', 'classifier': 'head.fc', + **kwargs + } + + +default_cfgs = dict( + xception41=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_41-e6439c97.pth'), + xception65=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_65-c9ae96e8.pth'), + xception71=_cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_71-8eec7df1.pth'), +) + + +class SeparableConv2d(nn.Module): + def __init__( + self, inplanes, planes, kernel_size=3, stride=1, dilation=1, padding='', + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): + super(SeparableConv2d, self).__init__() + self.kernel_size = kernel_size + self.dilation = dilation + + # depthwise convolution + self.conv_dw = create_conv2d( + inplanes, inplanes, kernel_size, stride=stride, + padding=padding, dilation=dilation, depthwise=True) + self.bn_dw = norm_layer(inplanes) + if act_layer is not None: + self.act_dw = act_layer(inplace=True) + else: + self.act_dw = None + + # pointwise convolution + self.conv_pw = create_conv2d(inplanes, planes, kernel_size=1) + self.bn_pw = norm_layer(planes) + if act_layer is not None: + self.act_pw = act_layer(inplace=True) + else: + self.act_pw = None + + def forward(self, x): + x = self.conv_dw(x) + x = self.bn_dw(x) + if self.act_dw is not None: + x = self.act_dw(x) + x = self.conv_pw(x) + x = self.bn_pw(x) + if self.act_pw is not None: + x = self.act_pw(x) + return x + + +class XceptionModule(nn.Module): + def __init__( + self, in_chs, out_chs, stride=1, dilation=1, pad_type='', + start_with_relu=True, no_skip=False, act_layer=nn.ReLU, norm_layer=None): + super(XceptionModule, self).__init__() + out_chs = to_3tuple(out_chs) + self.in_channels = in_chs + self.out_channels = out_chs[-1] + self.no_skip = no_skip + if not no_skip and (self.out_channels != self.in_channels or stride != 1): + self.shortcut = ConvBnAct( + in_chs, self.out_channels, 1, stride=stride, norm_layer=norm_layer, act_layer=None) + else: + self.shortcut = None + + separable_act_layer = None if start_with_relu else act_layer + self.stack = nn.Sequential() + for i in range(3): + if start_with_relu: + self.stack.add_module(f'act{i + 1}', nn.ReLU(inplace=i > 0)) + self.stack.add_module(f'conv{i + 1}', SeparableConv2d( + in_chs, out_chs[i], 3, stride=stride if i == 2 else 1, dilation=dilation, padding=pad_type, + act_layer=separable_act_layer, norm_layer=norm_layer)) + in_chs = out_chs[i] + + def forward(self, x): + skip = x + x = self.stack(x) + if self.shortcut is not None: + skip = self.shortcut(skip) + if not self.no_skip: + x = x + skip + return x + + +class XceptionAligned(nn.Module): + """Modified Aligned Xception + """ + + def __init__(self, block_cfg, num_classes=1000, in_chans=3, output_stride=32, + act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, drop_rate=0., global_pool='avg'): + super(XceptionAligned, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + assert output_stride in (8, 16, 32) + + layer_args = dict(act_layer=act_layer, norm_layer=norm_layer) + self.stem = nn.Sequential(*[ + ConvBnAct(in_chans, 32, kernel_size=3, stride=2, **layer_args), + ConvBnAct(32, 64, kernel_size=3, stride=1, **layer_args) + ]) + + curr_dilation = 1 + curr_stride = 2 + self.feature_info = [] + self.blocks = nn.Sequential() + for i, b in enumerate(block_cfg): + b['dilation'] = curr_dilation + if b['stride'] > 1: + self.feature_info += [dict( + num_chs=to_3tuple(b['out_chs'])[-2], reduction=curr_stride, module=f'blocks.{i}.stack.act3')] + next_stride = curr_stride * b['stride'] + if next_stride > output_stride: + curr_dilation *= b['stride'] + b['stride'] = 1 + else: + curr_stride = next_stride + self.blocks.add_module(str(i), XceptionModule(**b, **layer_args)) + self.num_features = self.blocks[-1].out_channels + + self.feature_info += [dict( + num_chs=self.num_features, reduction=curr_stride, module='blocks.' + str(len(self.blocks) - 1))] + + self.head = ClassifierHead( + in_chs=self.num_features, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate) + + def get_classifier(self): + return self.head.fc + + def reset_classifier(self, num_classes, global_pool='avg'): + self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) + + def forward_features(self, x): + x = self.stem(x) + x = self.blocks(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def _xception(variant, pretrained=False, **kwargs): + return build_model_with_cfg( + XceptionAligned, variant, pretrained, + default_cfg=default_cfgs[variant], + feature_cfg=dict(flatten_sequential=True, feature_cls='hook'), + **kwargs) + + +@register_model +def xception41(pretrained=False, **kwargs): + """ Modified Aligned Xception-41 + """ + block_cfg = [ + # entry flow + dict(in_chs=64, out_chs=128, stride=2), + dict(in_chs=128, out_chs=256, stride=2), + dict(in_chs=256, out_chs=728, stride=2), + # middle flow + *([dict(in_chs=728, out_chs=728, stride=1)] * 8), + # exit flow + dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), + dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False), + ] + model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1), **kwargs) + return _xception('xception41', pretrained=pretrained, **model_args) + + +@register_model +def xception65(pretrained=False, **kwargs): + """ Modified Aligned Xception-65 + """ + block_cfg = [ + # entry flow + dict(in_chs=64, out_chs=128, stride=2), + dict(in_chs=128, out_chs=256, stride=2), + dict(in_chs=256, out_chs=728, stride=2), + # middle flow + *([dict(in_chs=728, out_chs=728, stride=1)] * 16), + # exit flow + dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), + dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False), + ] + model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1), **kwargs) + return _xception('xception65', pretrained=pretrained, **model_args) + + +@register_model +def xception71(pretrained=False, **kwargs): + """ Modified Aligned Xception-71 + """ + block_cfg = [ + # entry flow + dict(in_chs=64, out_chs=128, stride=2), + dict(in_chs=128, out_chs=256, stride=1), + dict(in_chs=256, out_chs=256, stride=2), + dict(in_chs=256, out_chs=728, stride=1), + dict(in_chs=728, out_chs=728, stride=2), + # middle flow + *([dict(in_chs=728, out_chs=728, stride=1)] * 16), + # exit flow + dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), + dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False), + ] + model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1), **kwargs) + return _xception('xception71', pretrained=pretrained, **model_args) diff --git a/timm/models/xcit.py b/timm/models/xcit.py new file mode 100644 index 0000000..ac5e802 --- /dev/null +++ b/timm/models/xcit.py @@ -0,0 +1,812 @@ +""" Cross-Covariance Image Transformer (XCiT) in PyTorch + +Same as the official implementation, with some minor adaptations. + - https://github.com/facebookresearch/xcit/blob/master/xcit.py + +Paper: + - https://arxiv.org/abs/2106.09681 +""" +# Copyright (c) 2015-present, Facebook, Inc. +# All rights reserved. + +import math +from functools import partial + +import torch +import torch.nn as nn + +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .helpers import build_model_with_cfg +from .vision_transformer import _cfg, Mlp +from .registry import register_model +from .layers import DropPath, trunc_normal_, to_2tuple +from .cait import ClassAttn +from .fx_features import register_notrace_module + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': 1.0, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'patch_embed.proj.0.0', 'classifier': 'head', + **kwargs + } + + +default_cfgs = { + # Patch size 16 + 'xcit_nano_12_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224.pth'), + 'xcit_nano_12_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224_dist.pth'), + 'xcit_nano_12_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_tiny_12_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224.pth'), + 'xcit_tiny_12_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224_dist.pth'), + 'xcit_tiny_12_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_tiny_24_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224.pth'), + 'xcit_tiny_24_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224_dist.pth'), + 'xcit_tiny_24_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_small_12_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224.pth'), + 'xcit_small_12_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224_dist.pth'), + 'xcit_small_12_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_small_24_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224.pth'), + 'xcit_small_24_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224_dist.pth'), + 'xcit_small_24_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_medium_24_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224.pth'), + 'xcit_medium_24_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224_dist.pth'), + 'xcit_medium_24_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_large_24_p16_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224.pth'), + 'xcit_large_24_p16_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224_dist.pth'), + 'xcit_large_24_p16_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_384_dist.pth', input_size=(3, 384, 384)), + + # Patch size 8 + 'xcit_nano_12_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224.pth'), + 'xcit_nano_12_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224_dist.pth'), + 'xcit_nano_12_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_tiny_12_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224.pth'), + 'xcit_tiny_12_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224_dist.pth'), + 'xcit_tiny_12_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_tiny_24_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224.pth'), + 'xcit_tiny_24_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224_dist.pth'), + 'xcit_tiny_24_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_small_12_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224.pth'), + 'xcit_small_12_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224_dist.pth'), + 'xcit_small_12_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_small_24_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224.pth'), + 'xcit_small_24_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224_dist.pth'), + 'xcit_small_24_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_medium_24_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224.pth'), + 'xcit_medium_24_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224_dist.pth'), + 'xcit_medium_24_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_384_dist.pth', input_size=(3, 384, 384)), + 'xcit_large_24_p8_224': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224.pth'), + 'xcit_large_24_p8_224_dist': _cfg(url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224_dist.pth'), + 'xcit_large_24_p8_384_dist': _cfg( + url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_384_dist.pth', input_size=(3, 384, 384)), +} + + +@register_notrace_module # reason: FX can't symbolically trace torch.arange in forward method +class PositionalEncodingFourier(nn.Module): + """ + Positional encoding relying on a fourier kernel matching the one used in the "Attention is all of Need" paper. + Based on the official XCiT code + - https://github.com/facebookresearch/xcit/blob/master/xcit.py + """ + + def __init__(self, hidden_dim=32, dim=768, temperature=10000): + super().__init__() + self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1) + self.scale = 2 * math.pi + self.temperature = temperature + self.hidden_dim = hidden_dim + self.dim = dim + self.eps = 1e-6 + + def forward(self, B: int, H: int, W: int): + device = self.token_projection.weight.device + y_embed = torch.arange(1, H+1, dtype=torch.float32, device=device).unsqueeze(1).repeat(1, 1, W) + x_embed = torch.arange(1, W+1, dtype=torch.float32, device=device).repeat(1, H, 1) + y_embed = y_embed / (y_embed[:, -1:, :] + self.eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + self.eps) * self.scale + dim_t = torch.arange(self.hidden_dim, dtype=torch.float32, device=device) + dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.hidden_dim) + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack([pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()], dim=4).flatten(3) + pos_y = torch.stack([pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()], dim=4).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + pos = self.token_projection(pos) + return pos.repeat(B, 1, 1, 1) # (B, C, H, W) + + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution + batch norm""" + return torch.nn.Sequential( + nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False), + nn.BatchNorm2d(out_planes) + ) + + +class ConvPatchEmbed(nn.Module): + """Image to Patch Embedding using multiple convolutional layers""" + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, act_layer=nn.GELU): + super().__init__() + img_size = to_2tuple(img_size) + num_patches = (img_size[1] // patch_size) * (img_size[0] // patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + + if patch_size == 16: + self.proj = torch.nn.Sequential( + conv3x3(in_chans, embed_dim // 8, 2), + act_layer(), + conv3x3(embed_dim // 8, embed_dim // 4, 2), + act_layer(), + conv3x3(embed_dim // 4, embed_dim // 2, 2), + act_layer(), + conv3x3(embed_dim // 2, embed_dim, 2), + ) + elif patch_size == 8: + self.proj = torch.nn.Sequential( + conv3x3(in_chans, embed_dim // 4, 2), + act_layer(), + conv3x3(embed_dim // 4, embed_dim // 2, 2), + act_layer(), + conv3x3(embed_dim // 2, embed_dim, 2), + ) + else: + raise('For convolutional projection, patch size has to be in [8, 16]') + + def forward(self, x): + x = self.proj(x) + Hp, Wp = x.shape[2], x.shape[3] + x = x.flatten(2).transpose(1, 2) # (B, N, C) + return x, (Hp, Wp) + + +class LPI(nn.Module): + """ + Local Patch Interaction module that allows explicit communication between tokens in 3x3 windows to augment the + implicit communication performed by the block diagonal scatter attention. Implemented using 2 layers of separable + 3x3 convolutions with GeLU and BatchNorm2d + """ + + def __init__(self, in_features, out_features=None, act_layer=nn.GELU, kernel_size=3): + super().__init__() + out_features = out_features or in_features + + padding = kernel_size // 2 + + self.conv1 = torch.nn.Conv2d( + in_features, in_features, kernel_size=kernel_size, padding=padding, groups=in_features) + self.act = act_layer() + self.bn = nn.BatchNorm2d(in_features) + self.conv2 = torch.nn.Conv2d( + in_features, out_features, kernel_size=kernel_size, padding=padding, groups=out_features) + + def forward(self, x, H: int, W: int): + B, N, C = x.shape + x = x.permute(0, 2, 1).reshape(B, C, H, W) + x = self.conv1(x) + x = self.act(x) + x = self.bn(x) + x = self.conv2(x) + x = x.reshape(B, C, N).permute(0, 2, 1) + return x + + +class ClassAttentionBlock(nn.Module): + """Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239""" + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1., tokens_norm=False): + super().__init__() + self.norm1 = norm_layer(dim) + + self.attn = ClassAttn( + dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop) + + if eta is not None: # LayerScale Initialization (no layerscale when None) + self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True) + self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True) + else: + self.gamma1, self.gamma2 = 1.0, 1.0 + + # See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721 + self.tokens_norm = tokens_norm + + def forward(self, x): + x_norm1 = self.norm1(x) + x_attn = torch.cat([self.attn(x_norm1), x_norm1[:, 1:]], dim=1) + x = x + self.drop_path(self.gamma1 * x_attn) + if self.tokens_norm: + x = self.norm2(x) + else: + x = torch.cat([self.norm2(x[:, 0:1]), x[:, 1:]], dim=1) + x_res = x + cls_token = x[:, 0:1] + cls_token = self.gamma2 * self.mlp(cls_token) + x = torch.cat([cls_token, x[:, 1:]], dim=1) + x = x_res + self.drop_path(x) + return x + + +class XCA(nn.Module): + """ Cross-Covariance Attention (XCA) + Operation where the channels are updated using a weighted sum. The weights are obtained from the (softmax + normalized) Cross-covariance matrix (Q^T \\cdot K \\in d_h \\times d_h) + """ + + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1)) + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + # Result of next line is (qkv, B, num (H)eads, (C')hannels per head, N) + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 4, 1) + q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) + + # Paper section 3.2 l2-Normalization and temperature scaling + q = torch.nn.functional.normalize(q, dim=-1) + k = torch.nn.functional.normalize(k, dim=-1) + attn = (q @ k.transpose(-2, -1)) * self.temperature + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + # (B, H, C', N), permute -> (B, N, H, C') + x = (attn @ v).permute(0, 3, 1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + @torch.jit.ignore + def no_weight_decay(self): + return {'temperature'} + + +class XCABlock(nn.Module): + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1.): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = XCA(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + + self.norm3 = norm_layer(dim) + self.local_mp = LPI(in_features=dim, act_layer=act_layer) + + self.norm2 = norm_layer(dim) + self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop) + + self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True) + self.gamma3 = nn.Parameter(eta * torch.ones(dim), requires_grad=True) + self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True) + + def forward(self, x, H: int, W: int): + x = x + self.drop_path(self.gamma1 * self.attn(self.norm1(x))) + # NOTE official code has 3 then 2, so keeping it the same to be consistent with loaded weights + # See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721 + x = x + self.drop_path(self.gamma3 * self.local_mp(self.norm3(x), H, W)) + x = x + self.drop_path(self.gamma2 * self.mlp(self.norm2(x))) + return x + + +class XCiT(nn.Module): + """ + Based on timm and DeiT code bases + https://github.com/rwightman/pytorch-image-models/tree/master/timm + https://github.com/facebookresearch/deit/ + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., + act_layer=None, norm_layer=None, cls_attn_layers=2, use_pos_embed=True, eta=1., tokens_norm=False): + """ + Args: + img_size (int, tuple): input image size + patch_size (int): patch size + in_chans (int): number of input channels + num_classes (int): number of classes for classification head + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + drop_rate (float): dropout rate after positional embedding, and in XCA/CA projection + MLP + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate (constant across all layers) + norm_layer: (nn.Module): normalization layer + cls_attn_layers: (int) Depth of Class attention layers + use_pos_embed: (bool) whether to use positional encoding + eta: (float) layerscale initialization value + tokens_norm: (bool) Whether to normalize all tokens or just the cls_token in the CA + + Notes: + - Although `layer_norm` is user specifiable, there are hard-coded `BatchNorm2d`s in the local patch + interaction (class LPI) and the patch embedding (class ConvPatchEmbed) + """ + super().__init__() + img_size = to_2tuple(img_size) + assert (img_size[0] % patch_size == 0) and (img_size[0] % patch_size == 0), \ + '`patch_size` should divide image dimensions evenly' + + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + act_layer = act_layer or nn.GELU + + self.patch_embed = ConvPatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, act_layer=act_layer) + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.use_pos_embed = use_pos_embed + if use_pos_embed: + self.pos_embed = PositionalEncodingFourier(dim=embed_dim) + self.pos_drop = nn.Dropout(p=drop_rate) + + self.blocks = nn.ModuleList([ + XCABlock( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, + attn_drop=attn_drop_rate, drop_path=drop_path_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta) + for _ in range(depth)]) + + self.cls_attn_blocks = nn.ModuleList([ + ClassAttentionBlock( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, + attn_drop=attn_drop_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta, tokens_norm=tokens_norm) + for _ in range(cls_attn_layers)]) + + # Classifier head + self.norm = norm_layer(embed_dim) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + # Init weights + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + B = x.shape[0] + # x is (B, N, C). (Hp, Hw) is (height in units of patches, width in units of patches) + x, (Hp, Wp) = self.patch_embed(x) + + if self.use_pos_embed: + # `pos_embed` (B, C, Hp, Wp), reshape -> (B, C, N), permute -> (B, N, C) + pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1) + x = x + pos_encoding + + x = self.pos_drop(x) + + for blk in self.blocks: + x = blk(x, Hp, Wp) + + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + + for blk in self.cls_attn_blocks: + x = blk(x) + + x = self.norm(x)[:, 0] + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + if 'model' in state_dict: + state_dict = state_dict['model'] + # For consistency with timm's transformer models while being compatible with official weights source we rename + # pos_embeder to pos_embed. Also account for use_pos_embed == False + use_pos_embed = getattr(model, 'pos_embed', None) is not None + pos_embed_keys = [k for k in state_dict if k.startswith('pos_embed')] + for k in pos_embed_keys: + if use_pos_embed: + state_dict[k.replace('pos_embeder.', 'pos_embed.')] = state_dict.pop(k) + else: + del state_dict[k] + # timm's implementation of class attention in CaiT is slightly more efficient as it does not compute query vectors + # for all tokens, just the class token. To use official weights source we must split qkv into q, k, v + if 'cls_attn_blocks.0.attn.qkv.weight' in state_dict and 'cls_attn_blocks.0.attn.q.weight' in model.state_dict(): + num_ca_blocks = len(model.cls_attn_blocks) + for i in range(num_ca_blocks): + qkv_weight = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.weight') + qkv_weight = qkv_weight.reshape(3, -1, qkv_weight.shape[-1]) + for j, subscript in enumerate('qkv'): + state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.weight'] = qkv_weight[j] + qkv_bias = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.bias', None) + if qkv_bias is not None: + qkv_bias = qkv_bias.reshape(3, -1) + for j, subscript in enumerate('qkv'): + state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.bias'] = qkv_bias[j] + return state_dict + + +def _create_xcit(variant, pretrained=False, default_cfg=None, **kwargs): + default_cfg = default_cfg or default_cfgs[variant] + model = build_model_with_cfg( + XCiT, variant, pretrained, default_cfg=default_cfg, pretrained_filter_fn=checkpoint_filter_fn, **kwargs) + return model + + +@register_model +def xcit_nano_12_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs) + model = _create_xcit('xcit_nano_12_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_nano_12_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs) + model = _create_xcit('xcit_nano_12_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_nano_12_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, img_size=384, **kwargs) + model = _create_xcit('xcit_nano_12_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p16_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p16_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p16_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p16_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p16_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p16_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +# Patch size 8x8 models +@register_model +def xcit_nano_12_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs) + model = _create_xcit('xcit_nano_12_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_nano_12_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs) + model = _create_xcit('xcit_nano_12_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_nano_12_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, **kwargs) + model = _create_xcit('xcit_nano_12_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_12_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_12_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_12_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_12_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_tiny_24_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_tiny_24_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_small_24_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_small_24_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_medium_24_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_medium_24_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p8_224(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p8_224', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p8_224_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p8_224_dist', pretrained=pretrained, **model_kwargs) + return model + + +@register_model +def xcit_large_24_p8_384_dist(pretrained=False, **kwargs): + model_kwargs = dict( + patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True, **kwargs) + model = _create_xcit('xcit_large_24_p8_384_dist', pretrained=pretrained, **model_kwargs) + return model diff --git a/timm/optim/__init__.py b/timm/optim/__init__.py new file mode 100644 index 0000000..7ee4958 --- /dev/null +++ b/timm/optim/__init__.py @@ -0,0 +1,15 @@ +from .adabelief import AdaBelief +from .adafactor import Adafactor +from .adahessian import Adahessian +from .adamp import AdamP +from .adamw import AdamW +from .lamb import Lamb +from .lars import Lars +from .lookahead import Lookahead +from .madgrad import MADGRAD +from .nadam import Nadam +from .nvnovograd import NvNovoGrad +from .radam import RAdam +from .rmsprop_tf import RMSpropTF +from .sgdp import SGDP +from .optim_factory import create_optimizer, create_optimizer_v2, optimizer_kwargs diff --git a/timm/optim/adabelief.py b/timm/optim/adabelief.py new file mode 100644 index 0000000..951d715 --- /dev/null +++ b/timm/optim/adabelief.py @@ -0,0 +1,201 @@ +import math +import torch +from torch.optim.optimizer import Optimizer + + +class AdaBelief(Optimizer): + r"""Implements AdaBelief algorithm. Modified from Adam in PyTorch + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-16) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + decoupled_decay (boolean, optional): (default: True) If set as True, then + the optimizer uses decoupled weight decay as in AdamW + fixed_decay (boolean, optional): (default: False) This is used when weight_decouple + is set as True. + When fixed_decay == True, the weight decay is performed as + $W_{new} = W_{old} - W_{old} \times decay$. + When fixed_decay == False, the weight decay is performed as + $W_{new} = W_{old} - W_{old} \times decay \times lr$. Note that in this case, the + weight decay ratio decreases with learning rate (lr). + rectify (boolean, optional): (default: True) If set as True, then perform the rectified + update similar to RAdam + degenerated_to_sgd (boolean, optional) (default:True) If set as True, then perform SGD update + when variance of gradient is high + reference: AdaBelief Optimizer, adapting stepsizes by the belief in observed gradients, NeurIPS 2020 + + For a complete table of recommended hyperparameters, see https://github.com/juntang-zhuang/Adabelief-Optimizer' + For example train/args for EfficientNet see these gists + - link to train_scipt: https://gist.github.com/juntang-zhuang/0a501dd51c02278d952cf159bc233037 + - link to args.yaml: https://gist.github.com/juntang-zhuang/517ce3c27022b908bb93f78e4f786dc3 + """ + + def __init__( + self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-16, weight_decay=0, amsgrad=False, + decoupled_decay=True, fixed_decay=False, rectify=True, degenerated_to_sgd=True): + + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + + if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict): + for param in params: + if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]): + param['buffer'] = [[None, None, None] for _ in range(10)] + + defaults = dict( + lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad, + degenerated_to_sgd=degenerated_to_sgd, decoupled_decay=decoupled_decay, rectify=rectify, + fixed_decay=fixed_decay, buffer=[[None, None, None] for _ in range(10)]) + super(AdaBelief, self).__init__(params, defaults) + + def __setstate__(self, state): + super(AdaBelief, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def reset(self): + for group in self.param_groups: + for p in group['params']: + state = self.state[p] + amsgrad = group['amsgrad'] + + # State initialization + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + + # Exponential moving average of squared gradient values + state['exp_avg_var'] = torch.zeros_like(p) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_var'] = torch.zeros_like(p) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.dtype in {torch.float16, torch.bfloat16}: + grad = grad.float() + if grad.is_sparse: + raise RuntimeError( + 'AdaBelief does not support sparse gradients, please consider SparseAdam instead') + + p_fp32 = p + if p.dtype in {torch.float16, torch.bfloat16}: + p_fp32 = p_fp32.float() + + amsgrad = group['amsgrad'] + beta1, beta2 = group['betas'] + state = self.state[p] + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p_fp32) + # Exponential moving average of squared gradient values + state['exp_avg_var'] = torch.zeros_like(p_fp32) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_var'] = torch.zeros_like(p_fp32) + + # perform weight decay, check if decoupled weight decay + if group['decoupled_decay']: + if not group['fixed_decay']: + p_fp32.mul_(1.0 - group['lr'] * group['weight_decay']) + else: + p_fp32.mul_(1.0 - group['weight_decay']) + else: + if group['weight_decay'] != 0: + grad.add_(p_fp32, alpha=group['weight_decay']) + + # get current state variable + exp_avg, exp_avg_var = state['exp_avg'], state['exp_avg_var'] + + state['step'] += 1 + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + # Update first and second moment running average + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + grad_residual = grad - exp_avg + exp_avg_var.mul_(beta2).addcmul_(grad_residual, grad_residual, value=1 - beta2) + + if amsgrad: + max_exp_avg_var = state['max_exp_avg_var'] + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_var, exp_avg_var.add_(group['eps']), out=max_exp_avg_var) + + # Use the max. for normalizing running avg. of gradient + denom = (max_exp_avg_var.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + else: + denom = (exp_avg_var.add_(group['eps']).sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + + # update + if not group['rectify']: + # Default update + step_size = group['lr'] / bias_correction1 + p_fp32.addcdiv_(exp_avg, denom, value=-step_size) + else: + # Rectified update, forked from RAdam + buffered = group['buffer'][int(state['step'] % 10)] + if state['step'] == buffered[0]: + num_sma, step_size = buffered[1], buffered[2] + else: + buffered[0] = state['step'] + beta2_t = beta2 ** state['step'] + num_sma_max = 2 / (1 - beta2) - 1 + num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) + buffered[1] = num_sma + + # more conservative since it's an approximated value + if num_sma >= 5: + step_size = math.sqrt( + (1 - beta2_t) * + (num_sma - 4) / (num_sma_max - 4) * + (num_sma - 2) / num_sma * + num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step']) + elif group['degenerated_to_sgd']: + step_size = 1.0 / (1 - beta1 ** state['step']) + else: + step_size = -1 + buffered[2] = step_size + + if num_sma >= 5: + denom = exp_avg_var.sqrt().add_(group['eps']) + p_fp32.addcdiv_(exp_avg, denom, value=-step_size * group['lr']) + elif step_size > 0: + p_fp32.add_(exp_avg, alpha=-step_size * group['lr']) + + if p.dtype in {torch.float16, torch.bfloat16}: + p.copy_(p_fp32) + + return loss diff --git a/timm/optim/adafactor.py b/timm/optim/adafactor.py new file mode 100644 index 0000000..0605743 --- /dev/null +++ b/timm/optim/adafactor.py @@ -0,0 +1,167 @@ +""" Adafactor Optimizer + +Lifted from https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py + +Original header/copyright below. + +""" +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +import torch +import math + + +class Adafactor(torch.optim.Optimizer): + """Implements Adafactor algorithm. + This implementation is based on: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost` + (see https://arxiv.org/abs/1804.04235) + + Note that this optimizer internally adjusts the learning rate depending on the + *scale_parameter*, *relative_step* and *warmup_init* options. + + To use a manual (external) learning rate schedule you should set `scale_parameter=False` and + `relative_step=False`. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining parameter groups + lr (float, optional): external learning rate (default: None) + eps (tuple[float, float]): regularization constants for square gradient + and parameter scale respectively (default: (1e-30, 1e-3)) + clip_threshold (float): threshold of root mean square of final gradient update (default: 1.0) + decay_rate (float): coefficient used to compute running averages of square gradient (default: -0.8) + beta1 (float): coefficient used for computing running averages of gradient (default: None) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + scale_parameter (bool): if True, learning rate is scaled by root mean square of parameter (default: True) + warmup_init (bool): time-dependent learning rate computation depends on + whether warm-up initialization is being used (default: False) + """ + + def __init__(self, params, lr=None, eps=1e-30, eps_scale=1e-3, clip_threshold=1.0, + decay_rate=-0.8, betas=None, weight_decay=0.0, scale_parameter=True, warmup_init=False): + relative_step = not lr + if warmup_init and not relative_step: + raise ValueError('warmup_init requires relative_step=True') + + beta1 = None if betas is None else betas[0] # make it compat with standard betas arg + defaults = dict(lr=lr, eps=eps, eps_scale=eps_scale, clip_threshold=clip_threshold, decay_rate=decay_rate, + beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter, + relative_step=relative_step, warmup_init=warmup_init) + super(Adafactor, self).__init__(params, defaults) + + @staticmethod + def _get_lr(param_group, param_state): + if param_group['relative_step']: + min_step = 1e-6 * param_state['step'] if param_group['warmup_init'] else 1e-2 + lr_t = min(min_step, 1.0 / math.sqrt(param_state['step'])) + param_scale = 1.0 + if param_group['scale_parameter']: + param_scale = max(param_group['eps_scale'], param_state['RMS']) + param_group['lr'] = lr_t * param_scale + return param_group['lr'] + + @staticmethod + def _get_options(param_group, param_shape): + factored = len(param_shape) >= 2 + use_first_moment = param_group['beta1'] is not None + return factored, use_first_moment + + @staticmethod + def _rms(tensor): + return tensor.norm(2) / (tensor.numel() ** 0.5) + + def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col): + r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1) + c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt() + return torch.mul(r_factor, c_factor) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.dtype in {torch.float16, torch.bfloat16}: + grad = grad.float() + if grad.is_sparse: + raise RuntimeError('Adafactor does not support sparse gradients.') + + state = self.state[p] + + factored, use_first_moment = self._get_options(group, grad.shape) + # State Initialization + if len(state) == 0: + state['step'] = 0 + + if use_first_moment: + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(grad) + if factored: + state['exp_avg_sq_row'] = torch.zeros(grad.shape[:-1]).to(grad) + state['exp_avg_sq_col'] = torch.zeros(grad.shape[:-2] + grad.shape[-1:]).to(grad) + else: + state['exp_avg_sq'] = torch.zeros_like(grad) + + state['RMS'] = 0 + else: + if use_first_moment: + state['exp_avg'] = state['exp_avg'].to(grad) + if factored: + state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad) + state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad) + else: + state['exp_avg_sq'] = state['exp_avg_sq'].to(grad) + + p_fp32 = p + if p.dtype in {torch.float16, torch.bfloat16}: + p_fp32 = p_fp32.float() + + state['step'] += 1 + state['RMS'] = self._rms(p_fp32) + lr_t = self._get_lr(group, state) + + beta2t = 1.0 - math.pow(state['step'], group['decay_rate']) + update = grad ** 2 + group['eps'] + if factored: + exp_avg_sq_row = state['exp_avg_sq_row'] + exp_avg_sq_col = state['exp_avg_sq_col'] + + exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=1.0 - beta2t) + exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=1.0 - beta2t) + + # Approximation of exponential moving average of square of gradient + update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) + update.mul_(grad) + else: + exp_avg_sq = state['exp_avg_sq'] + + exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t) + update = exp_avg_sq.rsqrt().mul_(grad) + + update.div_((self._rms(update) / group['clip_threshold']).clamp_(min=1.0)) + update.mul_(lr_t) + + if use_first_moment: + exp_avg = state['exp_avg'] + exp_avg.mul_(group['beta1']).add_(update, alpha=1 - group['beta1']) + update = exp_avg + + if group['weight_decay'] != 0: + p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * lr_t) + + p_fp32.add_(-update) + if p.dtype in {torch.float16, torch.bfloat16}: + p.copy_(p_fp32) + + return loss diff --git a/timm/optim/adahessian.py b/timm/optim/adahessian.py new file mode 100644 index 0000000..985c67c --- /dev/null +++ b/timm/optim/adahessian.py @@ -0,0 +1,156 @@ +""" AdaHessian Optimizer + +Lifted from https://github.com/davda54/ada-hessian/blob/master/ada_hessian.py +Originally licensed MIT, Copyright 2020, David Samuel +""" +import torch + + +class Adahessian(torch.optim.Optimizer): + """ + Implements the AdaHessian algorithm from "ADAHESSIAN: An Adaptive Second OrderOptimizer for Machine Learning" + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining parameter groups + lr (float, optional): learning rate (default: 0.1) + betas ((float, float), optional): coefficients used for computing running averages of gradient and the + squared hessian trace (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0.0) + hessian_power (float, optional): exponent of the hessian trace (default: 1.0) + update_each (int, optional): compute the hessian trace approximation only after *this* number of steps + (to save time) (default: 1) + n_samples (int, optional): how many times to sample `z` for the approximation of the hessian trace (default: 1) + """ + + def __init__(self, params, lr=0.1, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, + hessian_power=1.0, update_each=1, n_samples=1, avg_conv_kernel=False): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") + if not 0.0 <= hessian_power <= 1.0: + raise ValueError(f"Invalid Hessian power value: {hessian_power}") + + self.n_samples = n_samples + self.update_each = update_each + self.avg_conv_kernel = avg_conv_kernel + + # use a separate generator that deterministically generates the same `z`s across all GPUs in case of distributed training + self.seed = 2147483647 + self.generator = torch.Generator().manual_seed(self.seed) + + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, hessian_power=hessian_power) + super(Adahessian, self).__init__(params, defaults) + + for p in self.get_params(): + p.hess = 0.0 + self.state[p]["hessian step"] = 0 + + @property + def is_second_order(self): + return True + + def get_params(self): + """ + Gets all parameters in all param_groups with gradients + """ + + return (p for group in self.param_groups for p in group['params'] if p.requires_grad) + + def zero_hessian(self): + """ + Zeros out the accumalated hessian traces. + """ + + for p in self.get_params(): + if not isinstance(p.hess, float) and self.state[p]["hessian step"] % self.update_each == 0: + p.hess.zero_() + + @torch.no_grad() + def set_hessian(self): + """ + Computes the Hutchinson approximation of the hessian trace and accumulates it for each trainable parameter. + """ + + params = [] + for p in filter(lambda p: p.grad is not None, self.get_params()): + if self.state[p]["hessian step"] % self.update_each == 0: # compute the trace only each `update_each` step + params.append(p) + self.state[p]["hessian step"] += 1 + + if len(params) == 0: + return + + if self.generator.device != params[0].device: # hackish way of casting the generator to the right device + self.generator = torch.Generator(params[0].device).manual_seed(self.seed) + + grads = [p.grad for p in params] + + for i in range(self.n_samples): + # Rademacher distribution {-1.0, 1.0} + zs = [torch.randint(0, 2, p.size(), generator=self.generator, device=p.device) * 2.0 - 1.0 for p in params] + h_zs = torch.autograd.grad( + grads, params, grad_outputs=zs, only_inputs=True, retain_graph=i < self.n_samples - 1) + for h_z, z, p in zip(h_zs, zs, params): + p.hess += h_z * z / self.n_samples # approximate the expected values of z*(H@z) + + @torch.no_grad() + def step(self, closure=None): + """ + Performs a single optimization step. + Arguments: + closure (callable, optional) -- a closure that reevaluates the model and returns the loss (default: None) + """ + + loss = None + if closure is not None: + loss = closure() + + self.zero_hessian() + self.set_hessian() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None or p.hess is None: + continue + + if self.avg_conv_kernel and p.dim() == 4: + p.hess = torch.abs(p.hess).mean(dim=[2, 3], keepdim=True).expand_as(p.hess).clone() + + # Perform correct stepweight decay as in AdamW + p.mul_(1 - group['lr'] * group['weight_decay']) + + state = self.state[p] + + # State initialization + if len(state) == 1: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of Hessian diagonal square values + state['exp_hessian_diag_sq'] = torch.zeros_like(p) + + exp_avg, exp_hessian_diag_sq = state['exp_avg'], state['exp_hessian_diag_sq'] + beta1, beta2 = group['betas'] + state['step'] += 1 + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(p.grad, alpha=1 - beta1) + exp_hessian_diag_sq.mul_(beta2).addcmul_(p.hess, p.hess, value=1 - beta2) + + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + k = group['hessian_power'] + denom = (exp_hessian_diag_sq / bias_correction2).pow_(k / 2).add_(group['eps']) + + # make update + step_size = group['lr'] / bias_correction1 + p.addcdiv_(exp_avg, denom, value=-step_size) + + return loss diff --git a/timm/optim/adamp.py b/timm/optim/adamp.py new file mode 100644 index 0000000..ee18763 --- /dev/null +++ b/timm/optim/adamp.py @@ -0,0 +1,105 @@ +""" +AdamP Optimizer Implementation copied from https://github.com/clovaai/AdamP/blob/master/adamp/adamp.py + +Paper: `Slowing Down the Weight Norm Increase in Momentum-based Optimizers` - https://arxiv.org/abs/2006.08217 +Code: https://github.com/clovaai/AdamP + +Copyright (c) 2020-present NAVER Corp. +MIT license +""" + +import torch +import torch.nn.functional as F +from torch.optim.optimizer import Optimizer +import math + + +def _channel_view(x) -> torch.Tensor: + return x.reshape(x.size(0), -1) + + +def _layer_view(x) -> torch.Tensor: + return x.reshape(1, -1) + + +def projection(p, grad, perturb, delta: float, wd_ratio: float, eps: float): + wd = 1. + expand_size = (-1,) + (1,) * (len(p.shape) - 1) + for view_func in [_channel_view, _layer_view]: + param_view = view_func(p) + grad_view = view_func(grad) + cosine_sim = F.cosine_similarity(grad_view, param_view, dim=1, eps=eps).abs_() + + # FIXME this is a problem for PyTorch XLA + if cosine_sim.max() < delta / math.sqrt(param_view.size(1)): + p_n = p / param_view.norm(p=2, dim=1).add_(eps).reshape(expand_size) + perturb -= p_n * view_func(p_n * perturb).sum(dim=1).reshape(expand_size) + wd = wd_ratio + return perturb, wd + + return perturb, wd + + +class AdamP(Optimizer): + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=0, delta=0.1, wd_ratio=0.1, nesterov=False): + defaults = dict( + lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, + delta=delta, wd_ratio=wd_ratio, nesterov=nesterov) + super(AdamP, self).__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + + grad = p.grad + beta1, beta2 = group['betas'] + nesterov = group['nesterov'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p) + state['exp_avg_sq'] = torch.zeros_like(p) + + # Adam + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + + state['step'] += 1 + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + step_size = group['lr'] / bias_correction1 + + if nesterov: + perturb = (beta1 * exp_avg + (1 - beta1) * grad) / denom + else: + perturb = exp_avg / denom + + # Projection + wd_ratio = 1. + if len(p.shape) > 1: + perturb, wd_ratio = projection(p, grad, perturb, group['delta'], group['wd_ratio'], group['eps']) + + # Weight decay + if group['weight_decay'] > 0: + p.mul_(1. - group['lr'] * group['weight_decay'] * wd_ratio) + + # Step + p.add_(perturb, alpha=-step_size) + + return loss diff --git a/timm/optim/adamw.py b/timm/optim/adamw.py new file mode 100644 index 0000000..66478bc --- /dev/null +++ b/timm/optim/adamw.py @@ -0,0 +1,122 @@ +""" AdamW Optimizer +Impl copied from PyTorch master + +NOTE: Builtin optim.AdamW is used by the factory, this impl only serves as a Python based reference, will be removed +someday +""" +import math +import torch +from torch.optim.optimizer import Optimizer + + +class AdamW(Optimizer): + r"""Implements AdamW algorithm. + + The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_. + The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay coefficient (default: 1e-2) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + + .. _Adam\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + .. _Decoupled Weight Decay Regularization: + https://arxiv.org/abs/1711.05101 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=1e-2, amsgrad=False): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, amsgrad=amsgrad) + super(AdamW, self).__init__(params, defaults) + + def __setstate__(self, state): + super(AdamW, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + + # Perform stepweight decay + p.data.mul_(1 - group['lr'] * group['weight_decay']) + + # Perform optimization step + grad = p.grad + if grad.is_sparse: + raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') + amsgrad = group['amsgrad'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros_like(p) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + if amsgrad: + max_exp_avg_sq = state['max_exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) + # Use the max. for normalizing running avg. of gradient + denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + else: + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + + step_size = group['lr'] / bias_correction1 + + p.addcdiv_(exp_avg, denom, value=-step_size) + + return loss diff --git a/timm/optim/lamb.py b/timm/optim/lamb.py new file mode 100644 index 0000000..12c7c49 --- /dev/null +++ b/timm/optim/lamb.py @@ -0,0 +1,192 @@ +""" PyTorch Lamb optimizer w/ behaviour similar to NVIDIA FusedLamb + +This optimizer code was adapted from the following (starting with latest) +* https://github.com/HabanaAI/Model-References/blob/2b435114fe8e31f159b1d3063b8280ae37af7423/PyTorch/nlp/bert/pretraining/lamb.py +* https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py +* https://github.com/cybertronai/pytorch-lamb + +Use FusedLamb if you can (GPU). The reason for including this variant of Lamb is to have a version that is +similar in behaviour to APEX FusedLamb if you aren't using NVIDIA GPUs or cannot install/use APEX. + +In addition to some cleanup, this Lamb impl has been modified to support PyTorch XLA and has been tested on TPU. + +Original copyrights for above sources are below. + +Modifications Copyright 2021 Ross Wightman +""" +# Copyright (c) 2021, Habana Labs Ltd. All rights reserved. + +# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# MIT License +# +# Copyright (c) 2019 cybertronai +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +import math + +import torch +from torch.optim import Optimizer + + +class Lamb(Optimizer): + """Implements a pure pytorch variant of FuseLAMB (NvLamb variant) optimizer from apex.optimizers.FusedLAMB + reference: https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py + + LAMB was proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its norm. (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + grad_averaging (bool, optional): whether apply (1-beta2) to grad when + calculating running averages of gradient. (default: True) + max_grad_norm (float, optional): value used to clip global grad norm (default: 1.0) + trust_clip (bool): enable LAMBC trust ratio clipping (default: False) + always_adapt (boolean, optional): Apply adaptive learning rate to 0.0 + weight decay parameter (default: False) + + .. _Large Batch Optimization for Deep Learning - Training BERT in 76 minutes: + https://arxiv.org/abs/1904.00962 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__( + self, params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-6, + weight_decay=0.01, grad_averaging=True, max_grad_norm=1.0, trust_clip=False, always_adapt=False): + defaults = dict( + lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay, + grad_averaging=grad_averaging, max_grad_norm=max_grad_norm, + trust_clip=trust_clip, always_adapt=always_adapt) + super().__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + device = self.param_groups[0]['params'][0].device + one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly + global_grad_norm = torch.zeros(1, device=device) + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.') + global_grad_norm.add_(grad.pow(2).sum()) + + global_grad_norm = torch.sqrt(global_grad_norm) + # FIXME it'd be nice to remove explicit tensor conversion of scalars when torch.where promotes + # scalar types properly https://github.com/pytorch/pytorch/issues/9190 + max_grad_norm = torch.tensor(self.defaults['max_grad_norm'], device=device) + clip_global_grad_norm = torch.where( + global_grad_norm > max_grad_norm, + global_grad_norm / max_grad_norm, + one_tensor) + + for group in self.param_groups: + bias_correction = 1 if group['bias_correction'] else 0 + beta1, beta2 = group['betas'] + grad_averaging = 1 if group['grad_averaging'] else 0 + beta3 = 1 - beta1 if grad_averaging else 1.0 + + # assume same step across group now to simplify things + # per parameter step can be easily support by making it tensor, or pass list into kernel + if 'step' in group: + group['step'] += 1 + else: + group['step'] = 1 + + if bias_correction: + bias_correction1 = 1 - beta1 ** group['step'] + bias_correction2 = 1 - beta2 ** group['step'] + else: + bias_correction1, bias_correction2 = 1.0, 1.0 + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.div_(clip_global_grad_norm) + state = self.state[p] + + # State initialization + if len(state) == 0: + # Exponential moving average of gradient valuesa + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=beta3) # m_t + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) # v_t + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + update = (exp_avg / bias_correction1).div_(denom) + + weight_decay = group['weight_decay'] + if weight_decay != 0: + update.add_(p, alpha=weight_decay) + + if weight_decay != 0 or group['always_adapt']: + # Layer-wise LR adaptation. By default, skip adaptation on parameters that are + # excluded from weight decay, unless always_adapt == True, then always enabled. + w_norm = p.norm(2.0) + g_norm = update.norm(2.0) + # FIXME nested where required since logical and/or not working in PT XLA + trust_ratio = torch.where( + w_norm > 0, + torch.where(g_norm > 0, w_norm / g_norm, one_tensor), + one_tensor, + ) + if group['trust_clip']: + # LAMBC trust clipping, upper bound fixed at one + trust_ratio = torch.minimum(trust_ratio, one_tensor) + update.mul_(trust_ratio) + + p.add_(update, alpha=-group['lr']) + + return loss diff --git a/timm/optim/lars.py b/timm/optim/lars.py new file mode 100644 index 0000000..98198e6 --- /dev/null +++ b/timm/optim/lars.py @@ -0,0 +1,135 @@ +""" PyTorch LARS / LARC Optimizer + +An implementation of LARS (SGD) + LARC in PyTorch + +Based on: + * PyTorch SGD: https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100 + * NVIDIA APEX LARC: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py + +Additional cleanup and modifications to properly support PyTorch XLA. + +Copyright 2021 Ross Wightman +""" +import torch +from torch.optim.optimizer import Optimizer + + +class Lars(Optimizer): + """ LARS for PyTorch + + Paper: `Large batch training of Convolutional Networks` - https://arxiv.org/pdf/1708.03888.pdf + + Args: + params (iterable): iterable of parameters to optimize or dicts defining parameter groups. + lr (float, optional): learning rate (default: 1.0). + momentum (float, optional): momentum factor (default: 0) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + dampening (float, optional): dampening for momentum (default: 0) + nesterov (bool, optional): enables Nesterov momentum (default: False) + trust_coeff (float): trust coefficient for computing adaptive lr / trust_ratio (default: 0.001) + eps (float): eps for division denominator (default: 1e-8) + trust_clip (bool): enable LARC trust ratio clipping (default: False) + always_adapt (bool): always apply LARS LR adapt, otherwise only when group weight_decay != 0 (default: False) + """ + + def __init__( + self, + params, + lr=1.0, + momentum=0, + dampening=0, + weight_decay=0, + nesterov=False, + trust_coeff=0.001, + eps=1e-8, + trust_clip=False, + always_adapt=False, + ): + if lr < 0.0: + raise ValueError(f"Invalid learning rate: {lr}") + if momentum < 0.0: + raise ValueError(f"Invalid momentum value: {momentum}") + if weight_decay < 0.0: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + if nesterov and (momentum <= 0 or dampening != 0): + raise ValueError("Nesterov momentum requires a momentum and zero dampening") + + defaults = dict( + lr=lr, + momentum=momentum, + dampening=dampening, + weight_decay=weight_decay, + nesterov=nesterov, + trust_coeff=trust_coeff, + eps=eps, + trust_clip=trust_clip, + always_adapt=always_adapt, + ) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("nesterov", False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Args: + closure (callable, optional): A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + device = self.param_groups[0]['params'][0].device + one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly + + for group in self.param_groups: + weight_decay = group['weight_decay'] + momentum = group['momentum'] + dampening = group['dampening'] + nesterov = group['nesterov'] + trust_coeff = group['trust_coeff'] + eps = group['eps'] + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + + # apply LARS LR adaptation, LARC clipping, weight decay + # ref: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py + if weight_decay != 0 or group['always_adapt']: + w_norm = p.norm(2.0) + g_norm = grad.norm(2.0) + trust_ratio = trust_coeff * w_norm / (g_norm + w_norm * weight_decay + eps) + # FIXME nested where required since logical and/or not working in PT XLA + trust_ratio = torch.where( + w_norm > 0, + torch.where(g_norm > 0, trust_ratio, one_tensor), + one_tensor, + ) + if group['trust_clip']: + trust_ratio = torch.minimum(trust_ratio / group['lr'], one_tensor) + grad.add(p, alpha=weight_decay) + grad.mul_(trust_ratio) + + # apply SGD update https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100 + if momentum != 0: + param_state = self.state[p] + if 'momentum_buffer' not in param_state: + buf = param_state['momentum_buffer'] = torch.clone(grad).detach() + else: + buf = param_state['momentum_buffer'] + buf.mul_(momentum).add_(grad, alpha=1. - dampening) + if nesterov: + grad = grad.add(buf, alpha=momentum) + else: + grad = buf + + p.add_(grad, alpha=-group['lr']) + + return loss \ No newline at end of file diff --git a/timm/optim/lookahead.py b/timm/optim/lookahead.py new file mode 100644 index 0000000..462c3ac --- /dev/null +++ b/timm/optim/lookahead.py @@ -0,0 +1,61 @@ +""" Lookahead Optimizer Wrapper. +Implementation modified from: https://github.com/alphadl/lookahead.pytorch +Paper: `Lookahead Optimizer: k steps forward, 1 step back` - https://arxiv.org/abs/1907.08610 + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +from torch.optim.optimizer import Optimizer +from collections import defaultdict + + +class Lookahead(Optimizer): + def __init__(self, base_optimizer, alpha=0.5, k=6): + # NOTE super().__init__() not called on purpose + if not 0.0 <= alpha <= 1.0: + raise ValueError(f'Invalid slow update rate: {alpha}') + if not 1 <= k: + raise ValueError(f'Invalid lookahead steps: {k}') + defaults = dict(lookahead_alpha=alpha, lookahead_k=k, lookahead_step=0) + self._base_optimizer = base_optimizer + self.param_groups = base_optimizer.param_groups + self.defaults = base_optimizer.defaults + self.defaults.update(defaults) + self.state = defaultdict(dict) + # manually add our defaults to the param groups + for name, default in defaults.items(): + for group in self._base_optimizer.param_groups: + group.setdefault(name, default) + + @torch.no_grad() + def update_slow(self, group): + for fast_p in group["params"]: + if fast_p.grad is None: + continue + param_state = self._base_optimizer.state[fast_p] + if 'lookahead_slow_buff' not in param_state: + param_state['lookahead_slow_buff'] = torch.empty_like(fast_p) + param_state['lookahead_slow_buff'].copy_(fast_p) + slow = param_state['lookahead_slow_buff'] + slow.add_(fast_p - slow, alpha=group['lookahead_alpha']) + fast_p.copy_(slow) + + def sync_lookahead(self): + for group in self._base_optimizer.param_groups: + self.update_slow(group) + + @torch.no_grad() + def step(self, closure=None): + loss = self._base_optimizer.step(closure) + for group in self._base_optimizer.param_groups: + group['lookahead_step'] += 1 + if group['lookahead_step'] % group['lookahead_k'] == 0: + self.update_slow(group) + return loss + + def state_dict(self): + return self._base_optimizer.state_dict() + + def load_state_dict(self, state_dict): + self._base_optimizer.load_state_dict(state_dict) + self.param_groups = self._base_optimizer.param_groups diff --git a/timm/optim/madgrad.py b/timm/optim/madgrad.py new file mode 100644 index 0000000..a76713b --- /dev/null +++ b/timm/optim/madgrad.py @@ -0,0 +1,184 @@ +""" PyTorch MADGRAD optimizer + +MADGRAD: https://arxiv.org/abs/2101.11075 + +Code from: https://github.com/facebookresearch/madgrad +""" +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import math +from typing import TYPE_CHECKING, Any, Callable, Optional + +import torch +import torch.optim + +if TYPE_CHECKING: + from torch.optim.optimizer import _params_t +else: + _params_t = Any + + +class MADGRAD(torch.optim.Optimizer): + """ + MADGRAD_: A Momentumized, Adaptive, Dual Averaged Gradient Method for Stochastic + Optimization. + + .. _MADGRAD: https://arxiv.org/abs/2101.11075 + + MADGRAD is a general purpose optimizer that can be used in place of SGD or + Adam may converge faster and generalize better. Currently GPU-only. + Typically, the same learning rate schedule that is used for SGD or Adam may + be used. The overall learning rate is not comparable to either method and + should be determined by a hyper-parameter sweep. + + MADGRAD requires less weight decay than other methods, often as little as + zero. Momentum values used for SGD or Adam's beta1 should work here also. + + On sparse problems both weight_decay and momentum should be set to 0. + + Arguments: + params (iterable): + Iterable of parameters to optimize or dicts defining parameter groups. + lr (float): + Learning rate (default: 1e-2). + momentum (float): + Momentum value in the range [0,1) (default: 0.9). + weight_decay (float): + Weight decay, i.e. a L2 penalty (default: 0). + eps (float): + Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-6). + """ + + def __init__( + self, + params: _params_t, + lr: float = 1e-2, + momentum: float = 0.9, + weight_decay: float = 0, + eps: float = 1e-6, + decoupled_decay: bool = False, + ): + if momentum < 0 or momentum >= 1: + raise ValueError(f"Momentum {momentum} must be in the range [0,1]") + if lr <= 0: + raise ValueError(f"Learning rate {lr} must be positive") + if weight_decay < 0: + raise ValueError(f"Weight decay {weight_decay} must be non-negative") + if eps < 0: + raise ValueError(f"Eps must be non-negative") + + defaults = dict( + lr=lr, eps=eps, momentum=momentum, weight_decay=weight_decay, decoupled_decay=decoupled_decay) + super().__init__(params, defaults) + + @property + def supports_memory_efficient_fp16(self) -> bool: + return False + + @property + def supports_flat_params(self) -> bool: + return True + + @torch.no_grad() + def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]: + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + eps = group['eps'] + lr = group['lr'] + eps + weight_decay = group['weight_decay'] + momentum = group['momentum'] + ck = 1 - momentum + + for p in group["params"]: + if p.grad is None: + continue + grad = p.grad + if momentum != 0.0 and grad.is_sparse: + raise RuntimeError("momentum != 0 is not compatible with sparse gradients") + + state = self.state[p] + if len(state) == 0: + state['step'] = 0 + state['grad_sum_sq'] = torch.zeros_like(p) + state['s'] = torch.zeros_like(p) + if momentum != 0: + state['x0'] = torch.clone(p).detach() + + state['step'] += 1 + grad_sum_sq = state['grad_sum_sq'] + s = state['s'] + lamb = lr * math.sqrt(state['step']) + + # Apply weight decay + if weight_decay != 0: + if group['decoupled_decay']: + p.mul_(1.0 - group['lr'] * weight_decay) + else: + if grad.is_sparse: + raise RuntimeError("weight_decay option is not compatible with sparse gradients") + grad.add_(p, alpha=weight_decay) + + if grad.is_sparse: + grad = grad.coalesce() + grad_val = grad._values() + + p_masked = p.sparse_mask(grad) + grad_sum_sq_masked = grad_sum_sq.sparse_mask(grad) + s_masked = s.sparse_mask(grad) + + # Compute x_0 from other known quantities + rms_masked_vals = grad_sum_sq_masked._values().pow(1 / 3).add_(eps) + x0_masked_vals = p_masked._values().addcdiv(s_masked._values(), rms_masked_vals, value=1) + + # Dense + sparse op + grad_sq = grad * grad + grad_sum_sq.add_(grad_sq, alpha=lamb) + grad_sum_sq_masked.add_(grad_sq, alpha=lamb) + + rms_masked_vals = grad_sum_sq_masked._values().pow_(1 / 3).add_(eps) + + s.add_(grad, alpha=lamb) + s_masked._values().add_(grad_val, alpha=lamb) + + # update masked copy of p + p_kp1_masked_vals = x0_masked_vals.addcdiv(s_masked._values(), rms_masked_vals, value=-1) + # Copy updated masked p to dense p using an add operation + p_masked._values().add_(p_kp1_masked_vals, alpha=-1) + p.add_(p_masked, alpha=-1) + else: + if momentum == 0: + # Compute x_0 from other known quantities + rms = grad_sum_sq.pow(1 / 3).add_(eps) + x0 = p.addcdiv(s, rms, value=1) + else: + x0 = state['x0'] + + # Accumulate second moments + grad_sum_sq.addcmul_(grad, grad, value=lamb) + rms = grad_sum_sq.pow(1 / 3).add_(eps) + + # Update s + s.add_(grad, alpha=lamb) + + # Step + if momentum == 0: + p.copy_(x0.addcdiv(s, rms, value=-1)) + else: + z = x0.addcdiv(s, rms, value=-1) + + # p is a moving average of z + p.mul_(1 - ck).add_(z, alpha=ck) + + return loss diff --git a/timm/optim/nadam.py b/timm/optim/nadam.py new file mode 100644 index 0000000..6268d5d --- /dev/null +++ b/timm/optim/nadam.py @@ -0,0 +1,92 @@ +import math + +import torch +from torch.optim.optimizer import Optimizer + + +class Nadam(Optimizer): + """Implements Nadam algorithm (a variant of Adam based on Nesterov momentum). + + It has been proposed in `Incorporating Nesterov Momentum into Adam`__. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 2e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + schedule_decay (float, optional): momentum schedule decay (default: 4e-3) + + __ http://cs229.stanford.edu/proj2015/054_report.pdf + __ http://www.cs.toronto.edu/~fritz/absps/momentum.pdf + + Originally taken from: https://github.com/pytorch/pytorch/pull/1408 + NOTE: Has potential issues but does work well on some problems. + """ + + def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=0, schedule_decay=4e-3): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + defaults = dict( + lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, schedule_decay=schedule_decay) + super(Nadam, self).__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['m_schedule'] = 1. + state['exp_avg'] = torch.zeros_like(p) + state['exp_avg_sq'] = torch.zeros_like(p) + + # Warming momentum schedule + m_schedule = state['m_schedule'] + schedule_decay = group['schedule_decay'] + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + eps = group['eps'] + state['step'] += 1 + t = state['step'] + bias_correction2 = 1 - beta2 ** t + + if group['weight_decay'] != 0: + grad = grad.add(p, alpha=group['weight_decay']) + + momentum_cache_t = beta1 * (1. - 0.5 * (0.96 ** (t * schedule_decay))) + momentum_cache_t_1 = beta1 * (1. - 0.5 * (0.96 ** ((t + 1) * schedule_decay))) + m_schedule_new = m_schedule * momentum_cache_t + m_schedule_next = m_schedule * momentum_cache_t * momentum_cache_t_1 + state['m_schedule'] = m_schedule_new + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=1. - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1. - beta2) + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps) + p.addcdiv_(grad, denom, value=-group['lr'] * (1. - momentum_cache_t) / (1. - m_schedule_new)) + p.addcdiv_(exp_avg, denom, value=-group['lr'] * momentum_cache_t_1 / (1. - m_schedule_next)) + + return loss diff --git a/timm/optim/nvnovograd.py b/timm/optim/nvnovograd.py new file mode 100644 index 0000000..fda3f4a --- /dev/null +++ b/timm/optim/nvnovograd.py @@ -0,0 +1,120 @@ +""" Nvidia NovoGrad Optimizer. +Original impl by Nvidia from Jasper example: + - https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechRecognition/Jasper +Paper: `Stochastic Gradient Methods with Layer-wise Adaptive Moments for Training of Deep Networks` + - https://arxiv.org/abs/1905.11286 +""" + +import torch +from torch.optim.optimizer import Optimizer +import math + + +class NvNovoGrad(Optimizer): + """ + Implements Novograd algorithm. + + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.95, 0.98)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + grad_averaging: gradient averaging + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + """ + + def __init__(self, params, lr=1e-3, betas=(0.95, 0.98), eps=1e-8, + weight_decay=0, grad_averaging=False, amsgrad=False): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, + grad_averaging=grad_averaging, + amsgrad=amsgrad) + + super(NvNovoGrad, self).__init__(params, defaults) + + def __setstate__(self, state): + super(NvNovoGrad, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError('Sparse gradients are not supported.') + amsgrad = group['amsgrad'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + if amsgrad: + max_exp_avg_sq = state['max_exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + + norm = torch.sum(torch.pow(grad, 2)) + + if exp_avg_sq == 0: + exp_avg_sq.copy_(norm) + else: + exp_avg_sq.mul_(beta2).add_(norm, alpha=1 - beta2) + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) + # Use the max. for normalizing running avg. of gradient + denom = max_exp_avg_sq.sqrt().add_(group['eps']) + else: + denom = exp_avg_sq.sqrt().add_(group['eps']) + + grad.div_(denom) + if group['weight_decay'] != 0: + grad.add_(p, alpha=group['weight_decay']) + if group['grad_averaging']: + grad.mul_(1 - beta1) + exp_avg.mul_(beta1).add_(grad) + + p.add_(exp_avg, alpha=-group['lr']) + + return loss diff --git a/timm/optim/optim_factory.py b/timm/optim/optim_factory.py new file mode 100644 index 0000000..e174915 --- /dev/null +++ b/timm/optim/optim_factory.py @@ -0,0 +1,217 @@ +""" Optimizer Factory w/ Custom Weight Decay +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import Optional + +import torch +import torch.nn as nn +import torch.optim as optim + +from .adabelief import AdaBelief +from .adafactor import Adafactor +from .adahessian import Adahessian +from .adamp import AdamP +from .lamb import Lamb +from .lars import Lars +from .lookahead import Lookahead +from .madgrad import MADGRAD +from .nadam import Nadam +from .nvnovograd import NvNovoGrad +from .radam import RAdam +from .rmsprop_tf import RMSpropTF +from .sgdp import SGDP + +try: + from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD + has_apex = True +except ImportError: + has_apex = False + + +def add_weight_decay(model, weight_decay=1e-5, skip_list=()): + decay = [] + no_decay = [] + for name, param in model.named_parameters(): + if not param.requires_grad: + continue # frozen weights + if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: + no_decay.append(param) + else: + decay.append(param) + return [ + {'params': no_decay, 'weight_decay': 0.}, + {'params': decay, 'weight_decay': weight_decay}] + + +def optimizer_kwargs(cfg): + """ cfg/argparse to kwargs helper + Convert optimizer args in argparse args or cfg like object to keyword args for updated create fn. + """ + kwargs = dict( + opt=cfg.opt, + lr=cfg.lr, + weight_decay=cfg.weight_decay, + momentum=cfg.momentum) + if getattr(cfg, 'opt_eps', None) is not None: + kwargs['eps'] = cfg.opt_eps + if getattr(cfg, 'opt_betas', None) is not None: + kwargs['betas'] = cfg.opt_betas + if getattr(cfg, 'opt_args', None) is not None: + kwargs.update(cfg.opt_args) + return kwargs + + +def create_optimizer(args, model, filter_bias_and_bn=True): + """ Legacy optimizer factory for backwards compatibility. + NOTE: Use create_optimizer_v2 for new code. + """ + return create_optimizer_v2( + model, + **optimizer_kwargs(cfg=args), + filter_bias_and_bn=filter_bias_and_bn, + ) + + +def create_optimizer_v2( + model_or_params, + opt: str = 'sgd', + lr: Optional[float] = None, + weight_decay: float = 0., + momentum: float = 0.9, + filter_bias_and_bn: bool = True, + **kwargs): + """ Create an optimizer. + + TODO currently the model is passed in and all parameters are selected for optimization. + For more general use an interface that allows selection of parameters to optimize and lr groups, one of: + * a filter fn interface that further breaks params into groups in a weight_decay compatible fashion + * expose the parameters interface and leave it up to caller + + Args: + model_or_params (nn.Module): model containing parameters to optimize + opt: name of optimizer to create + lr: initial learning rate + weight_decay: weight decay to apply in optimizer + momentum: momentum for momentum based optimizers (others may use betas via kwargs) + filter_bias_and_bn: filter out bias, bn and other 1d params from weight decay + **kwargs: extra optimizer specific kwargs to pass through + + Returns: + Optimizer + """ + if isinstance(model_or_params, nn.Module): + # a model was passed in, extract parameters and add weight decays to appropriate layers + if weight_decay and filter_bias_and_bn: + skip = {} + if hasattr(model_or_params, 'no_weight_decay'): + skip = model_or_params.no_weight_decay() + parameters = add_weight_decay(model_or_params, weight_decay, skip) + weight_decay = 0. + else: + parameters = model_or_params.parameters() + else: + # iterable of parameters or param groups passed in + parameters = model_or_params + + opt_lower = opt.lower() + opt_split = opt_lower.split('_') + opt_lower = opt_split[-1] + if 'fused' in opt_lower: + assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' + + opt_args = dict(weight_decay=weight_decay, **kwargs) + if lr is not None: + opt_args.setdefault('lr', lr) + + # basic SGD & related + if opt_lower == 'sgd' or opt_lower == 'nesterov': + # NOTE 'sgd' refers to SGD + nesterov momentum for legacy / backwards compat reasons + opt_args.pop('eps', None) + optimizer = optim.SGD(parameters, momentum=momentum, nesterov=True, **opt_args) + elif opt_lower == 'momentum': + opt_args.pop('eps', None) + optimizer = optim.SGD(parameters, momentum=momentum, nesterov=False, **opt_args) + elif opt_lower == 'sgdp': + optimizer = SGDP(parameters, momentum=momentum, nesterov=True, **opt_args) + + # adaptive + elif opt_lower == 'adam': + optimizer = optim.Adam(parameters, **opt_args) + elif opt_lower == 'adamw': + optimizer = optim.AdamW(parameters, **opt_args) + elif opt_lower == 'adamp': + optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args) + elif opt_lower == 'nadam': + try: + # NOTE PyTorch >= 1.10 should have native NAdam + optimizer = optim.Nadam(parameters, **opt_args) + except AttributeError: + optimizer = Nadam(parameters, **opt_args) + elif opt_lower == 'radam': + optimizer = RAdam(parameters, **opt_args) + elif opt_lower == 'adamax': + optimizer = optim.Adamax(parameters, **opt_args) + elif opt_lower == 'adabelief': + optimizer = AdaBelief(parameters, rectify=False, **opt_args) + elif opt_lower == 'radabelief': + optimizer = AdaBelief(parameters, rectify=True, **opt_args) + elif opt_lower == 'adadelta': + optimizer = optim.Adadelta(parameters, **opt_args) + elif opt_lower == 'adagrad': + opt_args.setdefault('eps', 1e-8) + optimizer = optim.Adagrad(parameters, **opt_args) + elif opt_lower == 'adafactor': + optimizer = Adafactor(parameters, **opt_args) + elif opt_lower == 'lamb': + optimizer = Lamb(parameters, **opt_args) + elif opt_lower == 'lambc': + optimizer = Lamb(parameters, trust_clip=True, **opt_args) + elif opt_lower == 'larc': + optimizer = Lars(parameters, momentum=momentum, trust_clip=True, **opt_args) + elif opt_lower == 'lars': + optimizer = Lars(parameters, momentum=momentum, **opt_args) + elif opt_lower == 'nlarc': + optimizer = Lars(parameters, momentum=momentum, trust_clip=True, nesterov=True, **opt_args) + elif opt_lower == 'nlars': + optimizer = Lars(parameters, momentum=momentum, nesterov=True, **opt_args) + elif opt_lower == 'madgrad': + optimizer = MADGRAD(parameters, momentum=momentum, **opt_args) + elif opt_lower == 'madgradw': + optimizer = MADGRAD(parameters, momentum=momentum, decoupled_decay=True, **opt_args) + elif opt_lower == 'novograd' or opt_lower == 'nvnovograd': + optimizer = NvNovoGrad(parameters, **opt_args) + elif opt_lower == 'rmsprop': + optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=momentum, **opt_args) + elif opt_lower == 'rmsproptf': + optimizer = RMSpropTF(parameters, alpha=0.9, momentum=momentum, **opt_args) + + # second order + elif opt_lower == 'adahessian': + optimizer = Adahessian(parameters, **opt_args) + + # NVIDIA fused optimizers, require APEX to be installed + elif opt_lower == 'fusedsgd': + opt_args.pop('eps', None) + optimizer = FusedSGD(parameters, momentum=momentum, nesterov=True, **opt_args) + elif opt_lower == 'fusedmomentum': + opt_args.pop('eps', None) + optimizer = FusedSGD(parameters, momentum=momentum, nesterov=False, **opt_args) + elif opt_lower == 'fusedadam': + optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args) + elif opt_lower == 'fusedadamw': + optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args) + elif opt_lower == 'fusedlamb': + optimizer = FusedLAMB(parameters, **opt_args) + elif opt_lower == 'fusednovograd': + opt_args.setdefault('betas', (0.95, 0.98)) + optimizer = FusedNovoGrad(parameters, **opt_args) + + else: + assert False and "Invalid optimizer" + raise ValueError + + if len(opt_split) > 1: + if opt_split[0] == 'lookahead': + optimizer = Lookahead(optimizer) + + return optimizer diff --git a/timm/optim/radam.py b/timm/optim/radam.py new file mode 100644 index 0000000..eb8d22e --- /dev/null +++ b/timm/optim/radam.py @@ -0,0 +1,89 @@ +"""RAdam Optimizer. +Implementation lifted from: https://github.com/LiyuanLucasLiu/RAdam +Paper: `On the Variance of the Adaptive Learning Rate and Beyond` - https://arxiv.org/abs/1908.03265 +""" +import math +import torch +from torch.optim.optimizer import Optimizer + + +class RAdam(Optimizer): + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0): + defaults = dict( + lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, + buffer=[[None, None, None] for _ in range(10)]) + super(RAdam, self).__init__(params, defaults) + + def __setstate__(self, state): + super(RAdam, self).__setstate__(state) + + @torch.no_grad() + def step(self, closure=None): + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.float() + if grad.is_sparse: + raise RuntimeError('RAdam does not support sparse gradients') + + p_fp32 = p.float() + + state = self.state[p] + + if len(state) == 0: + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p_fp32) + state['exp_avg_sq'] = torch.zeros_like(p_fp32) + else: + state['exp_avg'] = state['exp_avg'].type_as(p_fp32) + state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_fp32) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + + state['step'] += 1 + buffered = group['buffer'][int(state['step'] % 10)] + if state['step'] == buffered[0]: + num_sma, step_size = buffered[1], buffered[2] + else: + buffered[0] = state['step'] + beta2_t = beta2 ** state['step'] + num_sma_max = 2 / (1 - beta2) - 1 + num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) + buffered[1] = num_sma + + # more conservative since it's an approximated value + if num_sma >= 5: + step_size = group['lr'] * math.sqrt( + (1 - beta2_t) * + (num_sma - 4) / (num_sma_max - 4) * + (num_sma - 2) / num_sma * + num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step']) + else: + step_size = group['lr'] / (1 - beta1 ** state['step']) + buffered[2] = step_size + + if group['weight_decay'] != 0: + p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * group['lr']) + + # more conservative since it's an approximated value + if num_sma >= 5: + denom = exp_avg_sq.sqrt().add_(group['eps']) + p_fp32.addcdiv_(exp_avg, denom, value=-step_size) + else: + p_fp32.add_(exp_avg, alpha=-step_size) + + p.copy_(p_fp32) + + return loss diff --git a/timm/optim/rmsprop_tf.py b/timm/optim/rmsprop_tf.py new file mode 100644 index 0000000..0817887 --- /dev/null +++ b/timm/optim/rmsprop_tf.py @@ -0,0 +1,139 @@ +""" RMSProp modified to behave like Tensorflow impl + +Originally cut & paste from PyTorch RMSProp +https://github.com/pytorch/pytorch/blob/063946d2b3f3f1e953a2a3b54e0b34f1393de295/torch/optim/rmsprop.py +Licensed under BSD-Clause 3 (ish), https://github.com/pytorch/pytorch/blob/master/LICENSE + +Modifications Copyright 2021 Ross Wightman +""" + +import torch +from torch.optim import Optimizer + + +class RMSpropTF(Optimizer): + """Implements RMSprop algorithm (TensorFlow style epsilon) + + NOTE: This is a direct cut-and-paste of PyTorch RMSprop with eps applied before sqrt + and a few other modifications to closer match Tensorflow for matching hyper-params. + + Noteworthy changes include: + 1. Epsilon applied inside square-root + 2. square_avg initialized to ones + 3. LR scaling of update accumulated in momentum buffer + + Proposed by G. Hinton in his + `course `_. + + The centered version first appears in `Generating Sequences + With Recurrent Neural Networks `_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-2) + momentum (float, optional): momentum factor (default: 0) + alpha (float, optional): smoothing (decay) constant (default: 0.9) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-10) + centered (bool, optional) : if ``True``, compute the centered RMSProp, + the gradient is normalized by an estimation of its variance + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + decoupled_decay (bool, optional): decoupled weight decay as per https://arxiv.org/abs/1711.05101 + lr_in_momentum (bool, optional): learning rate scaling is included in the momentum buffer + update as per defaults in Tensorflow + + """ + + def __init__(self, params, lr=1e-2, alpha=0.9, eps=1e-10, weight_decay=0, momentum=0., centered=False, + decoupled_decay=False, lr_in_momentum=True): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= momentum: + raise ValueError("Invalid momentum value: {}".format(momentum)) + if not 0.0 <= weight_decay: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + if not 0.0 <= alpha: + raise ValueError("Invalid alpha value: {}".format(alpha)) + + defaults = dict( + lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay, + decoupled_decay=decoupled_decay, lr_in_momentum=lr_in_momentum) + super(RMSpropTF, self).__init__(params, defaults) + + def __setstate__(self, state): + super(RMSpropTF, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('momentum', 0) + group.setdefault('centered', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError('RMSprop does not support sparse gradients') + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['square_avg'] = torch.ones_like(p) # PyTorch inits to zero + if group['momentum'] > 0: + state['momentum_buffer'] = torch.zeros_like(p) + if group['centered']: + state['grad_avg'] = torch.zeros_like(p) + + square_avg = state['square_avg'] + one_minus_alpha = 1. - group['alpha'] + + state['step'] += 1 + + if group['weight_decay'] != 0: + if group['decoupled_decay']: + p.mul_(1. - group['lr'] * group['weight_decay']) + else: + grad = grad.add(p, alpha=group['weight_decay']) + + # Tensorflow order of ops for updating squared avg + square_avg.add_(grad.pow(2) - square_avg, alpha=one_minus_alpha) + # square_avg.mul_(alpha).addcmul_(grad, grad, value=1 - alpha) # PyTorch original + + if group['centered']: + grad_avg = state['grad_avg'] + grad_avg.add_(grad - grad_avg, alpha=one_minus_alpha) + avg = square_avg.addcmul(grad_avg, grad_avg, value=-1).add(group['eps']).sqrt_() # eps in sqrt + # grad_avg.mul_(alpha).add_(grad, alpha=1 - alpha) # PyTorch original + else: + avg = square_avg.add(group['eps']).sqrt_() # eps moved in sqrt + + if group['momentum'] > 0: + buf = state['momentum_buffer'] + # Tensorflow accumulates the LR scaling in the momentum buffer + if group['lr_in_momentum']: + buf.mul_(group['momentum']).addcdiv_(grad, avg, value=group['lr']) + p.add_(-buf) + else: + # PyTorch scales the param update by LR + buf.mul_(group['momentum']).addcdiv_(grad, avg) + p.add_(buf, alpha=-group['lr']) + else: + p.addcdiv_(grad, avg, value=-group['lr']) + + return loss diff --git a/timm/optim/sgdp.py b/timm/optim/sgdp.py new file mode 100644 index 0000000..baf05fa --- /dev/null +++ b/timm/optim/sgdp.py @@ -0,0 +1,70 @@ +""" +SGDP Optimizer Implementation copied from https://github.com/clovaai/AdamP/blob/master/adamp/sgdp.py + +Paper: `Slowing Down the Weight Norm Increase in Momentum-based Optimizers` - https://arxiv.org/abs/2006.08217 +Code: https://github.com/clovaai/AdamP + +Copyright (c) 2020-present NAVER Corp. +MIT license +""" + +import torch +import torch.nn.functional as F +from torch.optim.optimizer import Optimizer, required +import math + +from .adamp import projection + + +class SGDP(Optimizer): + def __init__(self, params, lr=required, momentum=0, dampening=0, + weight_decay=0, nesterov=False, eps=1e-8, delta=0.1, wd_ratio=0.1): + defaults = dict( + lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, + nesterov=nesterov, eps=eps, delta=delta, wd_ratio=wd_ratio) + super(SGDP, self).__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + weight_decay = group['weight_decay'] + momentum = group['momentum'] + dampening = group['dampening'] + nesterov = group['nesterov'] + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + state = self.state[p] + + # State initialization + if len(state) == 0: + state['momentum'] = torch.zeros_like(p) + + # SGD + buf = state['momentum'] + buf.mul_(momentum).add_(grad, alpha=1. - dampening) + if nesterov: + d_p = grad + momentum * buf + else: + d_p = buf + + # Projection + wd_ratio = 1. + if len(p.shape) > 1: + d_p, wd_ratio = projection(p, grad, d_p, group['delta'], group['wd_ratio'], group['eps']) + + # Weight decay + if weight_decay != 0: + p.mul_(1. - group['lr'] * group['weight_decay'] * wd_ratio / (1-momentum)) + + # Step + p.add_(d_p, alpha=-group['lr']) + + return loss diff --git a/timm/scheduler/__init__.py b/timm/scheduler/__init__.py new file mode 100644 index 0000000..f1961b8 --- /dev/null +++ b/timm/scheduler/__init__.py @@ -0,0 +1,8 @@ +from .cosine_lr import CosineLRScheduler +from .multistep_lr import MultiStepLRScheduler +from .plateau_lr import PlateauLRScheduler +from .poly_lr import PolyLRScheduler +from .step_lr import StepLRScheduler +from .tanh_lr import TanhLRScheduler + +from .scheduler_factory import create_scheduler diff --git a/timm/scheduler/cosine_lr.py b/timm/scheduler/cosine_lr.py new file mode 100644 index 0000000..84ee349 --- /dev/null +++ b/timm/scheduler/cosine_lr.py @@ -0,0 +1,119 @@ +""" Cosine Scheduler + +Cosine LR schedule with warmup, cycle/restarts, noise, k-decay. + +Hacked together by / Copyright 2021 Ross Wightman +""" +import logging +import math +import numpy as np +import torch + +from .scheduler import Scheduler + + +_logger = logging.getLogger(__name__) + + +class CosineLRScheduler(Scheduler): + """ + Cosine decay with restarts. + This is described in the paper https://arxiv.org/abs/1608.03983. + + Inspiration from + https://github.com/allenai/allennlp/blob/master/allennlp/training/learning_rate_schedulers/cosine.py + + k-decay option based on `k-decay: A New Method For Learning Rate Schedule` - https://arxiv.org/abs/2004.05909 + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + t_initial: int, + lr_min: float = 0., + cycle_mul: float = 1., + cycle_decay: float = 1., + cycle_limit: int = 1, + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=False, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + k_decay=1.0, + initialize=True) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + assert t_initial > 0 + assert lr_min >= 0 + if t_initial == 1 and cycle_mul == 1 and cycle_decay == 1: + _logger.warning("Cosine annealing scheduler will have no effect on the learning " + "rate since t_initial = t_mul = eta_mul = 1.") + self.t_initial = t_initial + self.lr_min = lr_min + self.cycle_mul = cycle_mul + self.cycle_decay = cycle_decay + self.cycle_limit = cycle_limit + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.warmup_prefix = warmup_prefix + self.t_in_epochs = t_in_epochs + self.k_decay = k_decay + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + if self.warmup_prefix: + t = t - self.warmup_t + + if self.cycle_mul != 1: + i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) + t_i = self.cycle_mul ** i * self.t_initial + t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial + else: + i = t // self.t_initial + t_i = self.t_initial + t_curr = t - (self.t_initial * i) + + gamma = self.cycle_decay ** i + lr_max_values = [v * gamma for v in self.base_values] + k = self.k_decay + + if i < self.cycle_limit: + lrs = [ + self.lr_min + 0.5 * (lr_max - self.lr_min) * (1 + math.cos(math.pi * t_curr ** k / t_i ** k)) + for lr_max in lr_max_values + ] + else: + lrs = [self.lr_min for _ in self.base_values] + + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None + + def get_cycle_length(self, cycles=0): + cycles = max(1, cycles or self.cycle_limit) + if self.cycle_mul == 1.0: + return self.t_initial * cycles + else: + return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul))) diff --git a/timm/scheduler/multistep_lr.py b/timm/scheduler/multistep_lr.py new file mode 100644 index 0000000..a5d5fe1 --- /dev/null +++ b/timm/scheduler/multistep_lr.py @@ -0,0 +1,65 @@ +""" MultiStep LR Scheduler + +Basic multi step LR schedule with warmup, noise. +""" +import torch +import bisect +from timm.scheduler.scheduler import Scheduler +from typing import List + +class MultiStepLRScheduler(Scheduler): + """ + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + decay_t: List[int], + decay_rate: float = 1., + warmup_t=0, + warmup_lr_init=0, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + initialize=True, + ) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + self.decay_t = decay_t + self.decay_rate = decay_rate + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.t_in_epochs = t_in_epochs + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def get_curr_decay_steps(self, t): + # find where in the array t goes, + # assumes self.decay_t is sorted + return bisect.bisect_right(self.decay_t, t+1) + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + lrs = [v * (self.decay_rate ** self.get_curr_decay_steps(t)) for v in self.base_values] + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None diff --git a/timm/scheduler/plateau_lr.py b/timm/scheduler/plateau_lr.py new file mode 100644 index 0000000..4f2cacb --- /dev/null +++ b/timm/scheduler/plateau_lr.py @@ -0,0 +1,113 @@ +""" Plateau Scheduler + +Adapts PyTorch plateau scheduler and allows application of noise, warmup. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch + +from .scheduler import Scheduler + + +class PlateauLRScheduler(Scheduler): + """Decay the LR by a factor every time the validation loss plateaus.""" + + def __init__(self, + optimizer, + decay_rate=0.1, + patience_t=10, + verbose=True, + threshold=1e-4, + cooldown_t=0, + warmup_t=0, + warmup_lr_init=0, + lr_min=0, + mode='max', + noise_range_t=None, + noise_type='normal', + noise_pct=0.67, + noise_std=1.0, + noise_seed=None, + initialize=True, + ): + super().__init__(optimizer, 'lr', initialize=initialize) + + self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( + self.optimizer, + patience=patience_t, + factor=decay_rate, + verbose=verbose, + threshold=threshold, + cooldown=cooldown_t, + mode=mode, + min_lr=lr_min + ) + + self.noise_range = noise_range_t + self.noise_pct = noise_pct + self.noise_type = noise_type + self.noise_std = noise_std + self.noise_seed = noise_seed if noise_seed is not None else 42 + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + self.restore_lr = None + + def state_dict(self): + return { + 'best': self.lr_scheduler.best, + 'last_epoch': self.lr_scheduler.last_epoch, + } + + def load_state_dict(self, state_dict): + self.lr_scheduler.best = state_dict['best'] + if 'last_epoch' in state_dict: + self.lr_scheduler.last_epoch = state_dict['last_epoch'] + + # override the base class step fn completely + def step(self, epoch, metric=None): + if epoch <= self.warmup_t: + lrs = [self.warmup_lr_init + epoch * s for s in self.warmup_steps] + super().update_groups(lrs) + else: + if self.restore_lr is not None: + # restore actual LR from before our last noise perturbation before stepping base + for i, param_group in enumerate(self.optimizer.param_groups): + param_group['lr'] = self.restore_lr[i] + self.restore_lr = None + + self.lr_scheduler.step(metric, epoch) # step the base scheduler + + if self.noise_range is not None: + if isinstance(self.noise_range, (list, tuple)): + apply_noise = self.noise_range[0] <= epoch < self.noise_range[1] + else: + apply_noise = epoch >= self.noise_range + if apply_noise: + self._apply_noise(epoch) + + def _apply_noise(self, epoch): + g = torch.Generator() + g.manual_seed(self.noise_seed + epoch) + if self.noise_type == 'normal': + while True: + # resample if noise out of percent limit, brute force but shouldn't spin much + noise = torch.randn(1, generator=g).item() + if abs(noise) < self.noise_pct: + break + else: + noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct + + # apply the noise on top of previous LR, cache the old value so we can restore for normal + # stepping of base scheduler + restore_lr = [] + for i, param_group in enumerate(self.optimizer.param_groups): + old_lr = float(param_group['lr']) + restore_lr.append(old_lr) + new_lr = old_lr + old_lr * noise + param_group['lr'] = new_lr + self.restore_lr = restore_lr diff --git a/timm/scheduler/poly_lr.py b/timm/scheduler/poly_lr.py new file mode 100644 index 0000000..9c351be --- /dev/null +++ b/timm/scheduler/poly_lr.py @@ -0,0 +1,116 @@ +""" Polynomial Scheduler + +Polynomial LR schedule with warmup, noise. + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math +import logging + +import torch + +from .scheduler import Scheduler + + +_logger = logging.getLogger(__name__) + + +class PolyLRScheduler(Scheduler): + """ Polynomial LR Scheduler w/ warmup, noise, and k-decay + + k-decay option based on `k-decay: A New Method For Learning Rate Schedule` - https://arxiv.org/abs/2004.05909 + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + t_initial: int, + power: float = 0.5, + lr_min: float = 0., + cycle_mul: float = 1., + cycle_decay: float = 1., + cycle_limit: int = 1, + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=False, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + k_decay=1.0, + initialize=True) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + assert t_initial > 0 + assert lr_min >= 0 + if t_initial == 1 and cycle_mul == 1 and cycle_decay == 1: + _logger.warning("Cosine annealing scheduler will have no effect on the learning " + "rate since t_initial = t_mul = eta_mul = 1.") + self.t_initial = t_initial + self.power = power + self.lr_min = lr_min + self.cycle_mul = cycle_mul + self.cycle_decay = cycle_decay + self.cycle_limit = cycle_limit + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.warmup_prefix = warmup_prefix + self.t_in_epochs = t_in_epochs + self.k_decay = k_decay + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + if self.warmup_prefix: + t = t - self.warmup_t + + if self.cycle_mul != 1: + i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) + t_i = self.cycle_mul ** i * self.t_initial + t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial + else: + i = t // self.t_initial + t_i = self.t_initial + t_curr = t - (self.t_initial * i) + + gamma = self.cycle_decay ** i + lr_max_values = [v * gamma for v in self.base_values] + k = self.k_decay + + if i < self.cycle_limit: + lrs = [ + self.lr_min + (lr_max - self.lr_min) * (1 - t_curr ** k / t_i ** k) ** self.power + for lr_max in lr_max_values + ] + else: + lrs = [self.lr_min for _ in self.base_values] + + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None + + def get_cycle_length(self, cycles=0): + cycles = max(1, cycles or self.cycle_limit) + if self.cycle_mul == 1.0: + return self.t_initial * cycles + else: + return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul))) diff --git a/timm/scheduler/scheduler.py b/timm/scheduler/scheduler.py new file mode 100644 index 0000000..21d5150 --- /dev/null +++ b/timm/scheduler/scheduler.py @@ -0,0 +1,105 @@ +from typing import Dict, Any + +import torch + + +class Scheduler: + """ Parameter Scheduler Base Class + A scheduler base class that can be used to schedule any optimizer parameter groups. + + Unlike the builtin PyTorch schedulers, this is intended to be consistently called + * At the END of each epoch, before incrementing the epoch count, to calculate next epoch's value + * At the END of each optimizer update, after incrementing the update count, to calculate next update's value + + The schedulers built on this should try to remain as stateless as possible (for simplicity). + + This family of schedulers is attempting to avoid the confusion of the meaning of 'last_epoch' + and -1 values for special behaviour. All epoch and update counts must be tracked in the training + code and explicitly passed in to the schedulers on the corresponding step or step_update call. + + Based on ideas from: + * https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler + * https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + param_group_field: str, + noise_range_t=None, + noise_type='normal', + noise_pct=0.67, + noise_std=1.0, + noise_seed=None, + initialize: bool = True) -> None: + self.optimizer = optimizer + self.param_group_field = param_group_field + self._initial_param_group_field = f"initial_{param_group_field}" + if initialize: + for i, group in enumerate(self.optimizer.param_groups): + if param_group_field not in group: + raise KeyError(f"{param_group_field} missing from param_groups[{i}]") + group.setdefault(self._initial_param_group_field, group[param_group_field]) + else: + for i, group in enumerate(self.optimizer.param_groups): + if self._initial_param_group_field not in group: + raise KeyError(f"{self._initial_param_group_field} missing from param_groups[{i}]") + self.base_values = [group[self._initial_param_group_field] for group in self.optimizer.param_groups] + self.metric = None # any point to having this for all? + self.noise_range_t = noise_range_t + self.noise_pct = noise_pct + self.noise_type = noise_type + self.noise_std = noise_std + self.noise_seed = noise_seed if noise_seed is not None else 42 + self.update_groups(self.base_values) + + def state_dict(self) -> Dict[str, Any]: + return {key: value for key, value in self.__dict__.items() if key != 'optimizer'} + + def load_state_dict(self, state_dict: Dict[str, Any]) -> None: + self.__dict__.update(state_dict) + + def get_epoch_values(self, epoch: int): + return None + + def get_update_values(self, num_updates: int): + return None + + def step(self, epoch: int, metric: float = None) -> None: + self.metric = metric + values = self.get_epoch_values(epoch) + if values is not None: + values = self._add_noise(values, epoch) + self.update_groups(values) + + def step_update(self, num_updates: int, metric: float = None): + self.metric = metric + values = self.get_update_values(num_updates) + if values is not None: + values = self._add_noise(values, num_updates) + self.update_groups(values) + + def update_groups(self, values): + if not isinstance(values, (list, tuple)): + values = [values] * len(self.optimizer.param_groups) + for param_group, value in zip(self.optimizer.param_groups, values): + param_group[self.param_group_field] = value + + def _add_noise(self, lrs, t): + if self.noise_range_t is not None: + if isinstance(self.noise_range_t, (list, tuple)): + apply_noise = self.noise_range_t[0] <= t < self.noise_range_t[1] + else: + apply_noise = t >= self.noise_range_t + if apply_noise: + g = torch.Generator() + g.manual_seed(self.noise_seed + t) + if self.noise_type == 'normal': + while True: + # resample if noise out of percent limit, brute force but shouldn't spin much + noise = torch.randn(1, generator=g).item() + if abs(noise) < self.noise_pct: + break + else: + noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct + lrs = [v + v * noise for v in lrs] + return lrs diff --git a/timm/scheduler/scheduler_factory.py b/timm/scheduler/scheduler_factory.py new file mode 100644 index 0000000..72a979c --- /dev/null +++ b/timm/scheduler/scheduler_factory.py @@ -0,0 +1,107 @@ +""" Scheduler Factory +Hacked together by / Copyright 2021 Ross Wightman +""" +from .cosine_lr import CosineLRScheduler +from .multistep_lr import MultiStepLRScheduler +from .plateau_lr import PlateauLRScheduler +from .poly_lr import PolyLRScheduler +from .step_lr import StepLRScheduler +from .tanh_lr import TanhLRScheduler + + +def create_scheduler(args, optimizer): + num_epochs = args.epochs + + if getattr(args, 'lr_noise', None) is not None: + lr_noise = getattr(args, 'lr_noise') + if isinstance(lr_noise, (list, tuple)): + noise_range = [n * num_epochs for n in lr_noise] + if len(noise_range) == 1: + noise_range = noise_range[0] + else: + noise_range = lr_noise * num_epochs + else: + noise_range = None + noise_args = dict( + noise_range_t=noise_range, + noise_pct=getattr(args, 'lr_noise_pct', 0.67), + noise_std=getattr(args, 'lr_noise_std', 1.), + noise_seed=getattr(args, 'seed', 42), + ) + cycle_args = dict( + cycle_mul=getattr(args, 'lr_cycle_mul', 1.), + cycle_decay=getattr(args, 'lr_cycle_decay', 0.1), + cycle_limit=getattr(args, 'lr_cycle_limit', 1), + ) + + lr_scheduler = None + if args.sched == 'cosine': + lr_scheduler = CosineLRScheduler( + optimizer, + t_initial=num_epochs, + lr_min=args.min_lr, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + k_decay=getattr(args, 'lr_k_decay', 1.0), + **cycle_args, + **noise_args, + ) + num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs + elif args.sched == 'tanh': + lr_scheduler = TanhLRScheduler( + optimizer, + t_initial=num_epochs, + lr_min=args.min_lr, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + t_in_epochs=True, + **cycle_args, + **noise_args, + ) + num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs + elif args.sched == 'step': + lr_scheduler = StepLRScheduler( + optimizer, + decay_t=args.decay_epochs, + decay_rate=args.decay_rate, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + **noise_args, + ) + elif args.sched == 'multistep': + lr_scheduler = MultiStepLRScheduler( + optimizer, + decay_t=args.decay_epochs, + decay_rate=args.decay_rate, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + **noise_args, + ) + elif args.sched == 'plateau': + mode = 'min' if 'loss' in getattr(args, 'eval_metric', '') else 'max' + lr_scheduler = PlateauLRScheduler( + optimizer, + decay_rate=args.decay_rate, + patience_t=args.patience_epochs, + lr_min=args.min_lr, + mode=mode, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + cooldown_t=0, + **noise_args, + ) + elif args.sched == 'poly': + lr_scheduler = PolyLRScheduler( + optimizer, + power=args.decay_rate, # overloading 'decay_rate' as polynomial power + t_initial=num_epochs, + lr_min=args.min_lr, + warmup_lr_init=args.warmup_lr, + warmup_t=args.warmup_epochs, + k_decay=getattr(args, 'lr_k_decay', 1.0), + **cycle_args, + **noise_args, + ) + num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs + + return lr_scheduler, num_epochs diff --git a/timm/scheduler/step_lr.py b/timm/scheduler/step_lr.py new file mode 100644 index 0000000..f797e1a --- /dev/null +++ b/timm/scheduler/step_lr.py @@ -0,0 +1,63 @@ +""" Step Scheduler + +Basic step LR schedule with warmup, noise. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math +import torch + +from .scheduler import Scheduler + + +class StepLRScheduler(Scheduler): + """ + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + decay_t: float, + decay_rate: float = 1., + warmup_t=0, + warmup_lr_init=0, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + initialize=True, + ) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + self.decay_t = decay_t + self.decay_rate = decay_rate + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.t_in_epochs = t_in_epochs + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + lrs = [v * (self.decay_rate ** (t // self.decay_t)) for v in self.base_values] + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None diff --git a/timm/scheduler/tanh_lr.py b/timm/scheduler/tanh_lr.py new file mode 100644 index 0000000..f2d3c9c --- /dev/null +++ b/timm/scheduler/tanh_lr.py @@ -0,0 +1,117 @@ +""" TanH Scheduler + +TanH schedule with warmup, cycle/restarts, noise. + +Hacked together by / Copyright 2021 Ross Wightman +""" +import logging +import math +import numpy as np +import torch + +from .scheduler import Scheduler + + +_logger = logging.getLogger(__name__) + + +class TanhLRScheduler(Scheduler): + """ + Hyberbolic-Tangent decay with restarts. + This is described in the paper https://arxiv.org/abs/1806.01593 + """ + + def __init__(self, + optimizer: torch.optim.Optimizer, + t_initial: int, + lb: float = -7., + ub: float = 3., + lr_min: float = 0., + cycle_mul: float = 1., + cycle_decay: float = 1., + cycle_limit: int = 1, + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=False, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + initialize=True) -> None: + super().__init__( + optimizer, param_group_field="lr", + noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, + initialize=initialize) + + assert t_initial > 0 + assert lr_min >= 0 + assert lb < ub + assert cycle_limit >= 0 + assert warmup_t >= 0 + assert warmup_lr_init >= 0 + self.lb = lb + self.ub = ub + self.t_initial = t_initial + self.lr_min = lr_min + self.cycle_mul = cycle_mul + self.cycle_decay = cycle_decay + self.cycle_limit = cycle_limit + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.warmup_prefix = warmup_prefix + self.t_in_epochs = t_in_epochs + if self.warmup_t: + t_v = self.base_values if self.warmup_prefix else self._get_lr(self.warmup_t) + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in t_v] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t): + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + if self.warmup_prefix: + t = t - self.warmup_t + + if self.cycle_mul != 1: + i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) + t_i = self.cycle_mul ** i * self.t_initial + t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial + else: + i = t // self.t_initial + t_i = self.t_initial + t_curr = t - (self.t_initial * i) + + if i < self.cycle_limit: + gamma = self.cycle_decay ** i + lr_max_values = [v * gamma for v in self.base_values] + + tr = t_curr / t_i + lrs = [ + self.lr_min + 0.5 * (lr_max - self.lr_min) * (1 - math.tanh(self.lb * (1. - tr) + self.ub * tr)) + for lr_max in lr_max_values + ] + else: + lrs = [self.lr_min for _ in self.base_values] + return lrs + + def get_epoch_values(self, epoch: int): + if self.t_in_epochs: + return self._get_lr(epoch) + else: + return None + + def get_update_values(self, num_updates: int): + if not self.t_in_epochs: + return self._get_lr(num_updates) + else: + return None + + def get_cycle_length(self, cycles=0): + cycles = max(1, cycles or self.cycle_limit) + if self.cycle_mul == 1.0: + return self.t_initial * cycles + else: + return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul))) diff --git a/timm/utils/__init__.py b/timm/utils/__init__.py new file mode 100644 index 0000000..11de9c9 --- /dev/null +++ b/timm/utils/__init__.py @@ -0,0 +1,13 @@ +from .agc import adaptive_clip_grad +from .checkpoint_saver import CheckpointSaver +from .clip_grad import dispatch_clip_grad +from .cuda import ApexScaler, NativeScaler +from .distributed import distribute_bn, reduce_tensor +from .jit import set_jit_legacy +from .log import setup_default_logging, FormatterNoInfo +from .metrics import AverageMeter, accuracy +from .misc import natural_key, add_bool_arg +from .model import unwrap_model, get_state_dict, freeze, unfreeze +from .model_ema import ModelEma, ModelEmaV2 +from .random import random_seed +from .summary import update_summary, get_outdir diff --git a/timm/utils/__pycache__/__init__.cpython-36.pyc b/timm/utils/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16b980990df71b3740fd66d98955bad5a6c4de76 GIT binary patch literal 961 zcmYjP%TC-d6iw!t$s{wwz`y|IQD8HhrkZZ6st8e3A!dWb!W+xPCIozw$Z=?gKkA?K z8@%nRztB~EGkp*%SLd92eSLl8cVXy9mj}0(4afQ6y!b44KH9(hZUc9?D;;o&>mqK* z1~f^t#!cCRHfh(mB|Fe1UGRuk^S11PPkiW;e$6{FfRKbW?#clS$q+_lRC7Zq?!0y@(%+Jk9hqdlCu@d0G}s$M-J^ z8^%8^#MkSXNuf-zVAyEIW(^bC9a^Qgl@|0iGocc^iUrjoD|75PD21jEsismSEMD3j z6RT7O<>HQ2(&~AVq*-EypK=9EYoQkT=j<-GmBFcOG~s?HLYxV z|J+wCmssluzB?S zu#94BU>TjCo!!je&gW5CDM9UY_c<>kC6YAHE>0JbP647yr?QMFO*8u{=$m3`y!UpL WN{SEoTuVGBaB+?7TE&q&t?_TJr4uj! literal 0 HcmV?d00001 diff --git a/timm/utils/__pycache__/agc.cpython-36.pyc b/timm/utils/__pycache__/agc.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..657b32d20b344442c80e008faea30fa45db77512 GIT binary patch literal 1526 zcmYjR&2J+$6dz9}U(=S4QlttbFcK0C+Gf&hKeQFyZ8nR#uuG{n6|Ewnku!ECPUdTE zPusK!m#nz;UqD>v9#1)Std;}5WRN;NSJ>cg=Tl09sI292Ib}ooG<|jB2ED;<)$(P&t zmN6qxz3ymgPi?y$Q-%fWGpNmDavO^J6o1|CbB~h%f7v>|#&9Je8oTS}Wo;YZfapUl zX|L|Zp+gx21)vgf#E5&8>C1;o0_M;*A)^D)yNg zK=r*bc5pjR#sctGSYv>n#Zm&GumZGznN%6aWl^z342fYKN=F343XXd5H1PLx&PYn$G!f?09g8()qmI{svlVIx-c^ z4WZ?jN=Gms$g$VjcN7mpCsjO<4)MKuGR_uKAk-K#1rEWHE99DJ`K`2iLR08nUX&P=g5F?=*N&c2@*kBo1>n_3fV zSfIwJB37O+gKo`?X;Is9t)z;R;sijuG>4^$NzF?%ENe)oW$=`)J<|$krBow!nrkvA!nIg{zOqH~|6}Op`Sk7&z%G|T7-$}q^t~bzRY1N6>m{^^#7E!* zU^KyN1@R_K;w>$5gO!zCm&Ts!ig&b8QzAccE45~$$n#e|3uBrF?Ec$e%9&)LO2MxF E2Nx)~dH?_b literal 0 HcmV?d00001 diff --git a/timm/utils/__pycache__/checkpoint_saver.cpython-36.pyc b/timm/utils/__pycache__/checkpoint_saver.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2af661c7076e0497579e20f38e4a6405180dffb0 GIT binary patch literal 4431 zcmZu!OK%(36`m(KGo+}8Wj%}}X41e3i&%7>^kukiT*Zlj1WFsXmBY3ejOLCgQY44I zb7f2Pu!~ULwSZA{nMDf}1-c2k&MN4#KcI`d?aDu(n{3+eoRLULF{OLv%zd7F9^X0h z_S{_1{P60|e^(9TKgOA#4gOuU>>-FST81!%)iYZrKdqL7bqslen|nNkknJ6Ztsm zWg9_oA6uhKrR_xRC9=MARBzTd)8UcqwD(l~=8c;->Yt=pR^O)gel!T4LjlIta}%^- z>FUAYiHwF}KNU$&*RV^NsYoTE=)@|fsi(gH%)4k=3nVe1IfJ#v8YC^@5Bf;#-9VIu zcWgq3%fc4{I9FU26;TCO5-Xx6=E0T4s#p+<;5@M;E*%>!UzpJ0nx3DjkTtlagPFEk z-l>AL@&0oXDxL$OA3ZI!O|}g*=q?T%&?WR-$}6DDc@^|>ZgiK0z3*nfS1X@E zTezFECCiX&h7uQ}oOk|`|sjZ&UR$FO1?hkdPA038! zoeTp<+CTBX-GQ#%&9X!)>~c@aR5s?clO?@f=|e@53@s}}szk`tt;I7U7m~;o8oWeg znaCQEmx;Ur@^?eNipSRnJb)iNbt;*U!I%k%C9h^Cio2Dvc&UhAvY&x+v43IAat_^KuP^ZC z5ziW>I|L_pI5|Q7OUykWx!2IvcPV$&wj^P)wiRN;p$KJg_}aW4Q$PLaNr3SHE%QKf zq_K4Z7Hf+qvwt^CZ5}dLkk>F#d$)U0e@8@jenxB2jL~;4x*o^)%)d8o_fo)3b_Yzc z7Wu;7PmXk1Ch+_$8C~84xFpDQvsKxxVC2-Fj)PWEU>AYW^Z(%oA-5V=dL?vdGznc0&?Fmy$yuM5apzgI;H_uS+;4>L;>BbL?I+D8l+BdR-3VUJ?xs zhT$18RLo^&F1EEV>U?Y|@q&u`~jHj$Veh;3c5&%octUaP6y7p%i=FbRq(>WIn zIL{-qbw7q!*&PtWtePwMtKk`-Et?DG6|{h96`TdWW;r|-Sl1YMXg+3JqpO=20O-Ow zu|sFzM;KH>V;uS`nA}q2!H8kPpoyUfVF(%A2}Mq+Tb^*0^|?Ey5f8ED%a4e|EJxV4 zX*F<1IL!GxzK1@aR#K)07w}kx$By{~jyarwY6D|7kNk-+OO$RKs*1REfGfZ+&bZ~R z=bU0)c^e{ARz-pgW+e_M7F{NH2G&cu9H$vwo6Vv0D}RKMMol~F2uY1n5}`iXH4L@e zOP?fClBPJ(6xr&%)4e!bo!byW#Cc4+NP-Z^A*DB5_BBbCWoh|L2urD#rMch}vWb~7 zT1MH;uq^fg&-5(vg8+X^+|XkpR4>?>{g)vpCJoV#%)5xjCjJtPLYjaX2%@3VG4V(s zYsT0DY??>_H>Ts(zA4`!mD79z)!$j`#@IgG$!)-JYwR4orkt)TEF>YM0fxphn;!^9 zZ`;PB`;MXf{XqWNM5-~3(F#@s!o~`>Sg~z<3bRro`mOsA^Qmp~i74PKIuQu~XBVXw z!R2Umld$1@#eEC-G=q7L9xALYCDr@I3?xSS~!NaCO>>*I5Q&Ph-z{B3xvfyG06>*Ki#;{Hftj%T+de zeQKDqW@oFB*`JOZOPr$Q1~w_`$?ph-L1c*td4&9!2>ZiNiKBEY zKOsVa)hL{xz#@MqQt@8c5r>j+yomc>5cleoaPFRC1US9LF$PZ@p=#D)`YIg3VkaOs zK&Zcb(GkwApH>q#HloHJ@Dnf!@Qv7|7)L=#@eW(MKn22YBc3YSk=f8%^Z zExU-G=~v7ju?N>g22`>9LN%P5-!QlMpM`Ch#UqgSNwgrO@8N?UVS6^VP*U!sdW3=6b1m5G{9pB@;Pv2APz!227vDbRFcfJV zhVoO|V4Vm9OqmK5c&hx6y7=3Og`(u6eSE)v#DxrBr0_^O{6$isCM<4whp-?cFmNo& N6_i0dYrzie{{!_URJs5F literal 0 HcmV?d00001 diff --git a/timm/utils/__pycache__/clip_grad.cpython-36.pyc b/timm/utils/__pycache__/clip_grad.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..475c207a14c8331d23d3535985c8dcfe27a66dba GIT binary patch literal 960 zcmY*X%We}f6tz7wd5M+~5GzEzOGXNrDr``NP^;1cLaLAuEV7B*v6D! zEnfh90Keod3&bz5;@YVuVI=!{e0%2c#YS;M5Bn~_b6`u0S|N*W7}bzl_t9s<2298@l-KxLs_WB zZ}$Se!#J?fCCit>hKqs=+jQvIT$LgX*H4hMPRubZJ%epUIKCk2u|ab@C-Y#1R(KU~ z@-@Ino&p*G`s@}B9eM+DWgn0bkas5f&*K4)G&eO~Taj;w*M9^z_%r4Wmk8U#2+^0B zE}2PZ)D+aEkO^t1OQ_7`lrDss6};7=@ExhCKJX{^S2D!_6-v|I8!+rV7x4jgdFI<7 z&^ort&)0+YCV9aOysskp`((mWQ>et%sqeWc7p0J#NmDVn5VWU_+IDxBeOgFCiwW&I z|8AdlJ*hjeY1*v_sxE9~luy5Z@1H9e928?!2cuk{YyecJ*Y`ij3t4=U zUjJHX@0!Gev$$>}$RM3rBBhO%Cd;+GTMfwA$F}Xwg_k&1#BuK)X)Tn=3VEVbq2kcC zHf8qVRm}-;YJMWs2J`oI!kJ;icCA*d~uYU;JBumU`D%+d*HbT mP^WcPk?*w)pkv)zV#fv0UehYl`t@d9QNX~)?hcY=Ru{R1Wd literal 0 HcmV?d00001 diff --git a/timm/utils/__pycache__/cuda.cpython-36.pyc b/timm/utils/__pycache__/cuda.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf9fe8d6d9dc8cc87fddca081c8dd8d86a1f2e8d GIT binary patch literal 2215 zcma)7U2o$=6rCB{vE!s!XuG?xEZQI;fqB@hx)M(-)Dqf)fCzShE*8?r^_a$~9h(^k zNM&9~UswDL#9x|Mp7t;B#J%IRNtdEsM>C%5`8xOGoIKpv@CTpYJN~mp$lqk`>A-jw znwmf-NJs<~ETbWvZ^OvSSZ;+DCBhEvBQm{flf*go0>d&e)Q@;Oc6rU!d zUlk`wHBMyzcn!(H1_uTMcaO z@0H1~PvR&`q-3ypr0r=T^Mlw010N4s7upb-`U1L&OlVDOwqSp3{^r*1gw-uUpHWJV zAmU$>$K)xgEkRD(a<{TzwGC@&ZPm75&mi_Sn>Y)MHi5WV=xaBYNmM21RyhXS&I^$Q zErTcY6@ud#gh_kH5YS&DDI_`^Zsd`w63NR*M!C{97-itWJIQ|3w%O4x$|}v%vqb9l zW$2beyhZzFa{|XGi7tm7bDuC5{zc^L@a>=BKtxrvGdP>f2QlnYgZ<&~J4oNoK+R;r zb4YI?)IcUDX)!(A8x7#nd|;|z5YI$(znp82bMPeNd>f8bK6Hfk=q>8eZF;t~O2hq& zbulI%AADnOObTwnKu}D=X-jUxn=K%aCYWrbeN|y3MH*LuWdexzqRF|$ivuc=BgU_$ z+e8zm-h!quu!Q>b?B*Jx)uA!szz5%#nBD;664N$fs_6m>;sVu;tcV0(ow9@`Z^Ax_ z4h5`1g)m-(hV%EJt)SgmgZARd_DF&zkkLWlqUX~nPdL{;=aB6g)DhQR&VQOk*>a}0 zLd8#$x!gho1_s`{h6M{jnpin7LQ5evJZdxF^=ye>ujk|r%v|9l6BaPg78{0>bd8f8 z(S;dT*g%b*?vA1={W)paseMocZi-W0Ul|Av>tezt)`HZ8+a~^W6gb+c3K@^Jjm6lM zg?|VlvOL;do_l?9U>wtF3KjGo+@MSaKz*!(gPLttG|^wF2T}?nV@mf27}R89^kV@q zFLYz679ZCRs)l;MvVNy#oly=-DyMeV^aD?5p{9VA;Ml#QCWgh}2qq`+WFKDIX~NdV zac@tlL{`AleJKm6z1h^va-%5{d8u2o5(u9JZc~*ge{uu69(Fgddks7EZmmR2bzlLZ z3M*VofZHDJ!mkJIKQU|c;7T@;>n2zTqzM-CJAf=Q3Ym<#(@K{{KAnqd0EKHR6~fgc5ZztdapESWiar#1;a4inr02bklSA{OoCZ@Hj)Z^S1gJY62B)({kYI5#csUw_fZ3IHww2uKBc-~HoXtSJu(TJa5^5eTg1D14V+R85I7Www>D?78! zY;M)oVM{*bw%MUyGx(^OF{iebGi%LU?ldOZs$J8iG0HybH!nK1&09+}@Tk9!ql4zT zJD<=!7erEl6uJvlVP*i8#)V${pB=|ApkQi*riC7uM(JJ==YmJ$YBCWZwWCT%4#4yi zj8t$9bPRY1+L>f!taN7#Wz3UUD$}uM2DEolLdGMRo(kPs85g?aA~FPF3lGTAv}gr4 z%-{8{43EY|AFpH=fv`JtgWBvSUAN#a<^#GtXlpl$c$q{I+`_9>jZJyCO={FM4QYOG djmS&|G;HmsSZS4sXXdRmdTi`smvybS`xm|->_Pwl literal 0 HcmV?d00001 diff --git a/timm/utils/__pycache__/jit.cpython-36.pyc b/timm/utils/__pycache__/jit.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae89166b5fdd1c392bf8baa0592289850a7659a4 GIT binary patch literal 810 zcmZuvL2uJA6i(819fj>Q2_%H}IH6wZt!SJkM5zW0q%pAxq+TpHeob7GII^9w^hi1I zSNKagaN)#X;KXyrU@z&OWz1f|Aje_7uaHG*Yct*qCBO`-ZAVV2t z;VhIL+5HmCA{m_p!`|u?ULT)>jg2auDiY?5i0KcNQ<)tMUWxc4N^m+sm!g4r4GgBb ztc^-i2jji*9=y}m!UsRkMKSnE`GS+!+w0aLXG+5!l5v3xmyrzDk==0}8Lh+5p|ekVItxhJ~6OWS!&~8K>=ODqOwXC z1E6-EDhu?by2M45Wi<$JnA2`yYABFossi>$2aLvOpX?J4ZO~E@gc1#5lEN54R_H@< zDvAVci%v)Bg8mY7p4o_UqD7dT9)lo=Lc42zl*|%$4)$Uu`?%!*Uskl%iQEIfsBL{e?tb6$4{AAc~`l+zOIn(rrL!34dpElbQYtcBm ziz8#SF}^&SyZ_0RYl+Q+HlQ!i7$uR%qTqCq$aTS!vicU7TXeiNzn(KGoEWp!vSu-z zwd`m*eNS~UX0|fObE+FHEi;%<@|Tkn=2V`uw!9cg9hG&n?d5veSI@ZVOU0XDV<)^r q-P|UB*lGHlOC57=x-@SW{}H+J1{7#E4@pFo;ZwiOdeaRzJHG+tjq9-h literal 0 HcmV?d00001 diff --git a/timm/utils/__pycache__/log.cpython-36.pyc b/timm/utils/__pycache__/log.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9afd07b358af217b0f01e5d085121842d2550894 GIT binary patch literal 1369 zcmZuwUvJws5GN_gR#dwQQgkQ=wAD+7=%G>Iz7)k!boG|DXy*Y9umUOyLNRGusw8?O zT`Y$3?`TDPx%A|`qU$p2*xl8Jl^ro-Tm%{(oW#r?^JawgSpO0yJw{GZtLEW? z7fUWx5WGsKpEwJ(m~*}0GCVsE<8UmN=Q5iwbT~R59f$9PQsH}JmuVGTK^AB-HS*?z zjN1$!fWi@Eh7!X1j4%hHLvGBwM2X9KCW~hGM94DLn#+lJUCqR|9}f--{+Sn5TJlIe z4G#`Wu2eb)V@j@LnAwCG!%+JmG|F*KYE*7z>pWteZbEE;%CnE1@2P}7kHu%)6 zd3mn|+Jfu@NLw!<4L2J1_2lG_#!*@}0krY=ws_Jrd%c-0c0^>~nvze2WEOZUOxAK2 z#1MuuI1nDdzy9IB5L!=@2ust5$TMX%Eu*vvC@lqB6-FOW`qwJ8&7{;8#=74MX{s)5 zTvIJ$DTBkufg{aIgSsji3})E2&pEAETHCM^4Gk7(t zYAv719a9C~Zm(D;k?o=Wb#6FSxK&QG#}Umvt|h`eOT_$*x& znpz4aJ`k8LQ@u!br=n5=0TkSNRZ0_tFjN|~4fQw#-?z3bO|bc4mFZL!M%9I&^xVs}(({GVF}J~Mul M55k3*zOx^E2Wn+hGynhq literal 0 HcmV?d00001 diff --git a/timm/utils/__pycache__/metrics.cpython-36.pyc b/timm/utils/__pycache__/metrics.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6cc6e8bfd0dca5a89ba11b06174daf82a1230ab8 GIT binary patch literal 1610 zcmZ8hO>Y}F5GA=QX{|UuI8IU50kXGNJ$PLgXnQb>1a6uFMRaJ378P17)=NrWeXUGV zhAqgabztY%pOL@BYft_QxwLO6S+Prjv*d6@zIlAZuOB~dCOLNFBHkPqg!^|q#Z2p+# z10`cy_LMzQIzGOLllZ8dT!3>W#8W&%Qb;N0X^l3v|Yo8bbhzvqh8# zolmt^V;duxsml2h1bwL=p__G>IqS0uF3E5ZZ@0LsVK-$l2#p$c-!mN|?86U<%xt!w z%=#BeUP@(>S4T&`A>;kTOtnhE*GVZ&qE)Xb$FEzDotH8O-H4i4r#oerl-?vxLj$ZsVws})ph*GGo&|Y zLJmgUj)irtji7Ub=m^u+>`Y@JR&{TOkD%|NdtHe7o=Y4?!%+P&mAD0d5e+~D1LE4HBk3AlYxAwf!223zcg?XbVaX$Y{j_Q&0O z*aq#77q|bSk+Mg{*fpTXlWd|~tvf6;>vhu-PVDYEK4=B5d7Rn&BsIlMxkg^%ImoRG zN7?z{qwYU64~B)YIUbb57)SVuth3p(Y{|{;!LZD;p*h6hE>Kf$tw?X-11Q-!o(ei7 z1bG9;trO~0Kc&Srbw0rhnr5a|bMO$h%LxR?~;nbgz*&#zx zanT(ZTJ*0L6QA%V-{KMfl*~=hD;c2 GZ2kw_?Q-1! literal 0 HcmV?d00001 diff --git a/timm/utils/__pycache__/misc.cpython-36.pyc b/timm/utils/__pycache__/misc.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c530081868cfd5dfb3e976c8d709016f8425bc39 GIT binary patch literal 1006 zcmZ8f&u`N(6tW%u>+L=RiG$~y)>N@XSPFWm7Ut_ zkoYtBmwe^KzpxX}VS@=<&wkIg-uwK0@8Q-~CwQ}Sv?d7sMAwD`{xdLj4;G3hh+;}O zo?z-wca0{5wrP{L)_CGjGDh9b@~QteRk2^{l&enXbrhd4>T8iOJ!R5ATKIuK62(HM z$yEEp!EoTe6H56XY@9`T=NA-0hcUJr47y1^|+Wb*~O-* zv;>mEe_&-3@|ulnSzA}t5RqHp9~cx{RqSY7TCLDW2rN-nr4>1IL7}=;&heR7c{4)2 z3g1SROBJgozYoVK(h;B$F~@xi*`M>5#JHsN`C88>Uf2QZZ@VJj$49Uf#%?K*gkO zp@j8?SLgbl3UJCl|MzmiHU#&<3#p literal 0 HcmV?d00001 diff --git a/timm/utils/__pycache__/model.cpython-36.pyc b/timm/utils/__pycache__/model.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..443a1ba1b53a49cc93ca546ab24574fb4e650b8b GIT binary patch literal 11239 zcmeHNTXP#ncAgtR5CkDgqGijL?Kan05gP`esEaK|tF=}a%U+vSMA^zxQ3gZI02pX6 z1N97eEAS;$mGuy(HZOipvbCxFg*>J5ntw1)spLs-d3NO?-|3zUKv9;~*`!iQfI`op zr!VJp_c`C!r|10aY`O7^cQ-FK6y;x)v5$fKkMIls9*M1ND7I>Ap1PrmtG1!xs(X6h z*f3PZHtd2|=odGNxG&mMPn3-*XR58*C42gbx>2&r_6+i-?TS5%YuT>a$8eqbSh45q z<4+X(_<^=ju}|11?Rk5?W1!}1_9@hywO_a2K*_3o+I|z)V;#kQ%RbZA?YHfB@O+N` zw>A4+`wadaPyR9cJ^VXiYwJpFVefPHd0;ypYp^h~BFD7dR>X#p>xJd=ZL9Uju~`&! zoT%$?wz-R9*Mq?>cRSsPtt_uBv%5hUvitPEZ~5hC7!GBvrpCu_@nFyKe{Mys?pnb6 zEA}%LkMN`<`n%b;T9iQ_y6X5{$1hw$5-B4kQV%pfA88|{ryr>6N@R>M$jEyiIfZqU zSjWIa1#K-Z_5*w9IkjS3c0<>PB7V!^G)Hk|=x=jtV4{TMZRk$lil@3(Xhjj{1r&fl z1@Y|f6kf*v?`||~E3#G^d%fL83zfpg&Fj~{f~qTxaL66AkD&<8~7hzTWdsa zzuyoe)(|b#F+NjhY8Jn+h(uA#YFQI`Esg%;Ls$9GxVg2DKhg4^T-QDUn;NSzD46pAG(s>qF;uaNa?8~ttS?)t#@?W z_y$Y)B*~%)SqgjCSj+NSLl4V?Sz8XbIu2`fE#Jqo2_FwF?y$aN`7CI&U>FUC5wlv6 zyJbZfps>vF;WWk1NSWQvbzem?Ci@UV4UZyBNhJN@Q zlA6ko)5YK?DC;DWSVuFQ<|k2Q52CH$X3o)*_a;$@3=3C1)X zpS+f>$8|Wl@OBV9vgWZ8KZ&rKBPV20mj}a6mpN9eOVisFmfSoWGTgwmgv|17)+cX< z`!sI|XVYOr-`!yYcoc{GOe}X%jvj4VA^d0Hvt}1vnr+|of>ziRjgbQhQ76nEHsNU@ z39L!<$=o*mgLS!~Y}b+!)LB_{bB%X~eaDZ&Rq>Y$O^D4FeZT&>bcw4;R+|TXw%v81 z-nPTGUC(3APQ+oTqQ~4|1q?{5r3T04W{aOe;s;^GAJ$eG-HV}0iZmH?lQy60{Uwb} zv^#LnET<94Bo)%qcv9}R8+C;ozSS4zCG^Ng9~x76`Xs%Di7?kK&$Bi?XIvsG!rDV0 zlMV+&gSzRkbht4eR>T~*vE8gqYBwEo%#~;DI@}~t!e(6*63HiiIB>CC&?pnb*^Qz> zxY}sgnA3XO3ZuH^HsC6~P2gsoq^JWGy}F#OyG|Pr3SrV}H2w7sG#_r(TS32J^(~7B z+-h8ME?&BPVWr*Lyl~06^ueaJvV6XMetG%w=J{p&^2KG=4mg$&CeZ2+JV%ZVgz5&{ z)c(K=BDU^!e9L2ZcrXY$Qsz6z51CxE%_A%#(X~amNgskPx))H<#tlJ~sf+;0raID* zYLPzD4pgLo=tnyLNn`+`)jw0Cg0#qyegi;qK=q0{3(*wn>Id2l<>CCFX#2)~0Z?8V z8Kc61rYes=y)OaXSlcu2cn(a54S2BS+D>L6FdfoeSm~rmEzulKd|)~*XP)DCqAtuA zW@Ni33-D1=BM{m65Tl!tTs>IgvGQQF#9;3khXK{Es-mrSuNOtYrNT74N* zLYnm<&;8x>m5$>(I|F`I3?B7jD0__~2EA^YuJ1;sS-(Q&4*SM8Znai*O!s zDft)1I+TmcUs>MJxyi9|VuQ$ATp${Nr+e-oo=um%Nwq+bjL`AgqFK%$6@L#&JTumT zfSN=4#L!zxc?HEo!ZJlIitn7N;X04+-kI-HFC#uQ+UZZU^tJM}3XTdmep1@kM#M_> zwG7{A0n(44Gh9IO6jOwSs2%A0`cq8PQ}CD5N{<$n3VNNeC!PNu%5nnyxM&Rqj&B2m ziTLDj8|$DJU{-{!oDgLUdS91y%-k;uauwc2u}~O@T2c3|9jS4PJ4B>uO!G;7L3^T0 z5CPG>5abV1K_(;<9Is-IHT`ieN!8e6FOwu2P#S~Gs#o!6=K`z{gLt68E`JQW)c1`@ zqX8X(ZifqxDomxZD~YATCJJ(i&XTvZiDeU5##3!yU|{@Ow*FwZ+ZMOQ zu<$ziMOC474fkRWVuKoylQhYM_*s-$!!Il$QLxx5Say5w9i?>EA&vi9ZAz|zvYE(c znsLQ6L0UcP4F9G~6aL#Y`A^WY5KQzoW&uJLngIS55-KLdD9ssKX{IzKa-Wf4g7Q84 zXWV5zl=cA`-KMHUKWVLFxr%X?i`mw{(a9T*j01JQI4bmXSbVsZV>o!S`w-LVD^U(Mjl;ydnEJ>Z>&wHY>6 z&Mlv-FMoLH@`^;en+Z038Dh7PSNg%2ClI)U0TiF)=HGq>BVou9ZY*KhanZ+q)AGc; z2seO5K)a{BWIQAG7G~ImO0l{V8?;zr)f}63VR6S3j%0>|fph?0q9M5oD^<0smQ?;z z;YU~uWZgBuR4)e6?w;cGkAVX+|P;oIdOj+tsVDQc9v1^gs}7XVdt>) z=p?z#eFGS*Kbq#>qV_y6+7NjB?S1q{n^Egi0GOD7>wypA1Qr33!$`p{ z%LO4MA{7!zOn@8|I-#~cmS6L^7(uhnK9S@bZlnp`D|&S!0xe=Z>E522bikZIs3xTV zIXEQ55_2OW&@CxC=%4$7zKNBK{x4ruB62Myl4dD6EF?!LC+DFV1s&{)b{gq*(+f5m*2NDmUu=JPano6DpIcrz zxAMW|bLTEx+-y5`YkB3;Is5#j3wUt8A?x}+g+T}g8k@xL$r=$7`O#+|Tv`#sTYHTY zwG><8So8gODk&dNXBFa^Y#FWC@$AI9;zXz7ia+d|Z6GazG9fU&-FAH^E^a#%iUJct z5GO9Qynr%oC&IoiHa1-=j4OQT`xqAyH;8A_fAF8N?hL}XDxnM;6VC=&jOW|jarPWj z$Yzo@CPXS*b_#a@GBW#Ehah}~?h2xEJncK%*uPUd0z+~jAuJ;w7BVDC)llC-h^U0G zq*4{&TUk3Ha^_S{rH}S7e~l-(hk-ZofZXshu=7IVQHuOi{F~~PdeiVAW#K`lgpZk- z$eR&)74ks6inyN@_p{=@3J+3EJjk&L4>C95LAFr)c;-R2G7s|h(H`V^2k=s#F}5zA z^A<4m#8Z%!g9am5BbFTP=%4TFMXq7I3t`r;><|+7@&8WYw|5FBq*EZCba#ys z+C%Xhl-#7`79}53a+{J*DEX9hsXk8`)dV90SIw$+_qM>7C=ka`gPZ;@Bn$EJK#BwB%-Q~OhpOns#B-`G>WW|u}&X7w?V z0F6Jfr|t3+Wxpg#{7T{f9vvGM(c?7a%4N>)g!I2s`J1B>c5H>w^ns3z!;@%ktQL+n z_>;-!*jUVfa?Dy6FkHf#ZVB(p0PN{>g1&NacM&>+VFRLBIB;5S+Xb#=@C!4mLkkhKI=j~eNk|BX4b*@GNglObIl@D=n3dGIJBXI38fqrL?YW+OVT1c^^$kSe%n zwOURXx(LLvE!Seru>q<}JWT{$tTk(Ob`8O+{vg_Aq>`9s>W@COpP_IpVszBWb~iwi z<6Rx4&kd(-f$WGq&4Y(od#>N|hBlDfH&c`sp%J#Y83Z&(_q+033&ViOOOT8ap}pul zsU#wzkkf4mDl2#ZBObt3>;$gz`63<0#cAH-Lnn!>3H@-SbX5P+X|Tlwd9zTPSn-Ps z>Ed6&=uNbrR!I8b_uyvYQd)d(sfmFHmF^D_t;Jl^%7#UxwWyc8?Y+PGWiVu2Y&FN$ z1cJF>aZ+JH=$SbVYv9T`wIZm8r#R*wT(6MoRn?_3QzvvZhif$>F7j~T zxe;HWyssz`NSB1Y1Tme?+zFzDqVOcx#Z{AdZC2`^&`J_>Ax?Tm+gp6G@{?89Fv7GJ zIqjHhC*tb0FvQjo?oq@l^DO`uK>sr;`Vl1p@_&xISjU-x&?Yu$BKVK#v4Hk+?%*iI z`FQApdz&3@*@7%{8fvjksFnF2Q&Z(kr7(WuMScONkXWo7XZ=_gKEE%n1OBJfm>>!? zSUA$77P*KI%pD$$zr`bQgssfZY42!=jp}8!s2S=Bt)$Lt^IBCsr6O)r0^K*5TGdNh zS>-NjWy^)8l=es0sVO=qr0@YvJ%x?Fq1OpyaOkg4XdEH)At3`N1O45hBKv`e_);AF z@5R9XP577UG64n;9*#lHit8E$3|1-vANoXfbakm*&A)T0ZB_F0r$lzPMn<#kFtk3_$AWiI7NSO4+i!^V!`6 zy6YggWqD>=t=&31DTbViMIrIh7!iN4nu}Ic}THLvG@>}+z$97L{meY3o2g?s}R*WI2%RpCs0{rrh zZ^58t=;MWLoqyG~Xw4p44__ub>xt}?4#^RdvIq(QCg1?acpC{`Y!YyXGcN(7Ha!qe zCz*(-_%C=U-+FoXw}!{JfTKdZRMs&|{=Wv0`Fa(r@ISjx%x0a|7S`^6{XTJ`N8*C7 zzE7l8DR;fUL!?ZXx&Vr!w~0r@8&gD_(B}KlaUu|z#Um#7f8&U?At+AorO3B# z$oDj*KTG$Cf|maf!=#E}p^0J6N`8NmUnE0brsM>Yjd^hxu!Yw#1HT>&!g}8gTQYJ# zEgYJOS24van&Q1k@sgr=bC1*OZJb{Hk_UKnyf#Mai7My&im_Ooa36w{m#&}#oIhNp kjaSH!7-|XhyP{R}(p1GL73o`T%vVeAmEN8E!|K9+0NTn>yZ`_I literal 0 HcmV?d00001 diff --git a/timm/utils/__pycache__/model_ema.cpython-36.pyc b/timm/utils/__pycache__/model_ema.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3582048dbcd74896cb1e38f66e9295c700db271f GIT binary patch literal 5801 zcmeHL%aa>N8K0M=k@jKdX$Ml!RVfmST1#dfLXt|HY}RW#j}>NZ)+rS*H9OssMmrC= zyS1AYS`NrXxTLsn;KG^W-{Ck{4!{Y{T;TWhNUL24P#ic=WvQmsGyRzUe&4UZ@7KDy zw$^BW{PlxZU$m^>TjxF=>hI%H90j+!7Pq;R*j*dHZsMeF*R|=Hmw0KVTQPMfsdlTT zUv21jePIvlqA|0(D|ank;ngP=ug;wAD(-6B$DJ?y;o8jYu5;&s)vQl?Z0EC4o{3Dy zVZwIvqd4octs^1BzF^mOcDI@=@3A!JB4OhZ54BK@#=S5)6rAb2FZ4i2c5uSlY&##F z$hbeyZ1cwE4Yrpn#U9ap8fJ|@!6K{Ew2PJRN-m_}x8q3T*5^WuB8-fv!{Xzj@;)y0 zO%%cc09M!GcGu-A+~buecGu%oUVCD7E8ORG{8o7b=ByU}F0FJY4Z~jpP1r;0{&yDM z-nqZGv%U3j=XSHvVEBBD1+fqo8zjOx62L&9dZGt}PnJx4`pfD&-n!>)}&8Ky!D zsaTkCb};VsP;a)(%nt{#Vwgn5dB#xXVkAX`!F+?on(eSCb}!Vs0q9(uQO*>8uVIN*7t+9Q~qXKmeUYZ=B_`y8GnVdi`(m$1cD zn&-fWpBW4E2(M#7bim?Zr-+17p*&%|Tr!oXf|ldT|s3G}9(@goTBg zja7-p-tL2u%ts~qAyb1~g3hhiuis>1lt%;jEDBF(S#i#dixpw}$=mG6%742;V( zJITi^lmctVbY^lXl2FG-fHJ36Ip44>%E#!BMVCe_sFJfo;yRYbdNNPTiP+uy-=}q8 zI6Sn$lmIiuQ5=bPUyDZL*ILX_?3jqCusg6-4nSU5lY*1*=fPN;#qd~#2Z>-uARNL5 z*)gX9y>M=Ni|sF)&71?86YB@bIl>Zv$qc_~&%q&re634o!Ba${4F(2|P(C)89V3tl+J|QM;29P+0f*;%UBQzgi;Q>6Xc%X) z-Y}*TWSB4RXDK{pXaVcBjDZDhLE1Fr@?#l};00AO`43^jk~5fX9Em*9NV^&Guw?(vA@4ReNW9dmp)H zv#rKb1c<|foU67JeUR_&R;R7wG;NP{oT#>$KEP62qm#l90&->$d;{|;WM#{88g|33 z;_KP#_%*&A1NAjtqq^~~-zi+|Vz_uU4M*?{(htBX z)|W|)K#@KSnAA!Swk9up0uO@77duV1?u3Y}Bm-D4Mz*`yjL@M&sfqU{x)2M{*#N)T z{>}BGqH`Gy3XhtLMvPd7Vc2JdM;WzP-%?6omOR^$GMAFzE-JAWsgmndZT5eN=cE zlDr{dSrB97B@{PtsjDa~!x|s_dC{rbR~^q@rLQx2X~`h7&gHuAl}C{5e!)jk1W zS$}!2xhmICF^-a7MXji1x!LH(ZFALR+$I?{Bkt%h8kkumVNg^^8wMs0%pE45!}IQH zCInx^n68^DE}N!5MW3UBs4J4-=pUR^R7(ka8(r1&C@kk$>}=n@iXHC5Lsj@~a_P)R zW$#YYl@y>QMQ2kqYZNnx1wl|Wf_bPzeKiOkBVEl~Y60n|AdpvRAQ41vQbFPCTw&-# zv4eUj+Is#A2srEhGm@T0xXwQGfIjMN6c?27>gQGuM+2zi`j@F=z5m$ss z6^(_J`&drQ&ASKKk$>Eu}Yx%rRVohBwnZ z71(nzv-?zFKNZ;j9|F4*Ey5=Kl9HuMeS|_=8p?X+l*(GWbG1#`&l^IW&DD0*sO=h_ z*0^<8mv>CNztG%9TRBk9CUZ6GMMaGv^yPQ(hBElm3r(_=4XT~#T9f6Po{=IZvn`t- z(&|BAPAW(tCTEyTLCGY>rAYrkXq-vahCO+18Pt~&mEKJs%3){1Vu%}(sE`q#;$Uze z0@WJ2MnJ&qQdSTEGY3b78xRvN1O&u|G5;Q9DS?4Mo**0`EO5YZNM6hwBNeXwlT(>R5VGffKSkQiNhx$xgf!t{WLw|-31;<6^MoLhUj z^wvA1;1hKVm9jS`dKZ21bBlhQC3I%)#d{2qNQ+ExDRYK7ce>f%Y<&qdw0E|tI}L{O zVHcfl@SozpgvCbB2Q-AnPHjM7cgl4*@;y9~?^AKneR2!WeuAqsx|Ah5X^2JPzli_Z z;&YI?7=KC=&+T15hbNi<)|+VUH0$yve&y>_e4C0ls4yCiKrJ=g2dEWm^Zgm&@2SNe zC2IgP=`(3yrXa9~%TUpO5eLeKU-MlP9GaKR#!u(OE%EOLF3fLUK zO9|~8ozlv(!ogL?Kd_&e<4w8fSLn5bc2R|OP*7zIT0(4Uv@S3#H9c-ocXb&>sexA= L-)=Y!cYXWsz$HYa literal 0 HcmV?d00001 diff --git a/timm/utils/__pycache__/random.cpython-36.pyc b/timm/utils/__pycache__/random.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f36798a6d0da1770758567560e332987c3e84bf8 GIT binary patch literal 416 zcmY*VJx>EM47Km0lvYJ-jBH3{Al0!V1gfeINL`Q^ZmEtsqCGBmRW3Iab%Q^{%FoHl z)W5)leU(s2m|ow!36p1j2cVmtteKdc@!$$1QB}4BipJ1$mf92l2m%fXm`+82i{&cCqw* zla)AFv06od<-k6G-Lf^{uwsaSHhrho4S*HL1 literal 0 HcmV?d00001 diff --git a/timm/utils/__pycache__/summary.cpython-36.pyc b/timm/utils/__pycache__/summary.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95657c26c8e2778068e81d89835f9db34a6692b3 GIT binary patch literal 1487 zcmbVL&u<(x6t+D-c6K&dg*Fg`KwTj&i$pd`IYv>dq}w8-AX~=!i}-sv)>#0`SW|>W*e zgWg^>p33axRQI;GwzqnZE2Vme)-IAVya85n(5I&JmE=MSzMrLf2;0fZG^O}y#fLbM zhYlqMrOVl_FYHT#48LS|0macXBy)u&jyc120~w5IY$PG_(=+aTJj-8tPLGw;Rc z%M;$5%JbUg*887b@H=Nj?Q8!B`h5S8)LwJbGJ5SG74E@}h4_+wcEd6ETZUE2xZ^~h z8h2D>rSZj!Olf6;B0+XCsf?qvG~N4338}NHd>~~d`@RftUfOpW&mLEiUrjU)7%#0R zrH;ad86!1+5xHh>9S(S+lkLIfXgWwMF4W+`-rhGz^!7kaq=*X?qT*^G#R)EYc>iFa zv!WQ-nknpPD!4hGnh+I>7vWbCSA76Q=q zPO8qM(6;QM#2#tfGci?B=fYa2B0)z-L1W13qTiOc5j9#}BooY(S*oIy<^fB~1jkt} zEQn~OITtUZv~g+N+#k7!#SKf34dWO8dZOvRo$8P5Yi&{pSo=PPWp5X}cEM)(3H*IF z9QNC$700|v<5=3d$rhC9q*b1a)HbMU@*yo#Oy`SYTuI&ZY|{r#m^W}VNw;(`!47ge X$;GbanzB7(uMN;r4AP}O>#qF+`hjQK literal 0 HcmV?d00001 diff --git a/timm/utils/agc.py b/timm/utils/agc.py new file mode 100644 index 0000000..f514017 --- /dev/null +++ b/timm/utils/agc.py @@ -0,0 +1,42 @@ +""" Adaptive Gradient Clipping + +An impl of AGC, as per (https://arxiv.org/abs/2102.06171): + +@article{brock2021high, + author={Andrew Brock and Soham De and Samuel L. Smith and Karen Simonyan}, + title={High-Performance Large-Scale Image Recognition Without Normalization}, + journal={arXiv preprint arXiv:}, + year={2021} +} + +Code references: + * Official JAX impl (paper authors): https://github.com/deepmind/deepmind-research/tree/master/nfnets + * Phil Wang's PyTorch gist: https://gist.github.com/lucidrains/0d6560077edac419ab5d3aa29e674d5c + +Hacked together by / Copyright 2021 Ross Wightman +""" +import torch + + +def unitwise_norm(x, norm_type=2.0): + if x.ndim <= 1: + return x.norm(norm_type) + else: + # works for nn.ConvNd and nn,Linear where output dim is first in the kernel/weight tensor + # might need special cases for other weights (possibly MHA) where this may not be true + return x.norm(norm_type, dim=tuple(range(1, x.ndim)), keepdim=True) + + +def adaptive_clip_grad(parameters, clip_factor=0.01, eps=1e-3, norm_type=2.0): + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + for p in parameters: + if p.grad is None: + continue + p_data = p.detach() + g_data = p.grad.detach() + max_norm = unitwise_norm(p_data, norm_type=norm_type).clamp_(min=eps).mul_(clip_factor) + grad_norm = unitwise_norm(g_data, norm_type=norm_type) + clipped_grad = g_data * (max_norm / grad_norm.clamp(min=1e-6)) + new_grads = torch.where(grad_norm < max_norm, g_data, clipped_grad) + p.grad.detach().copy_(new_grads) diff --git a/timm/utils/checkpoint_saver.py b/timm/utils/checkpoint_saver.py new file mode 100644 index 0000000..6aad74e --- /dev/null +++ b/timm/utils/checkpoint_saver.py @@ -0,0 +1,150 @@ +""" Checkpoint Saver + +Track top-n training checkpoints and maintain recovery checkpoints on specified intervals. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import glob +import operator +import os +import logging + +import torch + +from .model import unwrap_model, get_state_dict + + +_logger = logging.getLogger(__name__) + + +class CheckpointSaver: + def __init__( + self, + model, + optimizer, + args=None, + model_ema=None, + amp_scaler=None, + checkpoint_prefix='checkpoint', + recovery_prefix='recovery', + checkpoint_dir='', + recovery_dir='', + decreasing=False, + max_history=10, + unwrap_fn=unwrap_model): + + # objects to save state_dicts of + self.model = model + self.optimizer = optimizer + self.args = args + self.model_ema = model_ema + self.amp_scaler = amp_scaler + + # state + self.checkpoint_files = [] # (filename, metric) tuples in order of decreasing betterness + self.best_epoch = None + self.best_metric = None + self.curr_recovery_file = '' + self.last_recovery_file = '' + + # config + self.checkpoint_dir = checkpoint_dir + self.recovery_dir = recovery_dir + self.save_prefix = checkpoint_prefix + self.recovery_prefix = recovery_prefix + self.extension = '.pth.tar' + self.decreasing = decreasing # a lower metric is better if True + self.cmp = operator.lt if decreasing else operator.gt # True if lhs better than rhs + self.max_history = max_history + self.unwrap_fn = unwrap_fn + assert self.max_history >= 1 + + def save_checkpoint(self, epoch, metric=None): + assert epoch >= 0 + tmp_save_path = os.path.join(self.checkpoint_dir, 'tmp' + self.extension) + last_save_path = os.path.join(self.checkpoint_dir, 'last' + self.extension) + self._save(tmp_save_path, epoch, metric) + if os.path.exists(last_save_path): + os.unlink(last_save_path) # required for Windows support. + os.rename(tmp_save_path, last_save_path) + worst_file = self.checkpoint_files[-1] if self.checkpoint_files else None + if (len(self.checkpoint_files) < self.max_history + or metric is None or self.cmp(metric, worst_file[1])): + if len(self.checkpoint_files) >= self.max_history: + self._cleanup_checkpoints(1) + filename = '-'.join([self.save_prefix, str(epoch)]) + self.extension + save_path = os.path.join(self.checkpoint_dir, filename) + os.link(last_save_path, save_path) + self.checkpoint_files.append((save_path, metric)) + self.checkpoint_files = sorted( + self.checkpoint_files, key=lambda x: x[1], + reverse=not self.decreasing) # sort in descending order if a lower metric is not better + + checkpoints_str = "Current checkpoints:\n" + for c in self.checkpoint_files: + checkpoints_str += ' {}\n'.format(c) + _logger.info(checkpoints_str) + + if metric is not None and (self.best_metric is None or self.cmp(metric, self.best_metric)): + self.best_epoch = epoch + self.best_metric = metric + best_save_path = os.path.join(self.checkpoint_dir, 'model_best' + self.extension) + if os.path.exists(best_save_path): + os.unlink(best_save_path) + os.link(last_save_path, best_save_path) + + return (None, None) if self.best_metric is None else (self.best_metric, self.best_epoch) + + def _save(self, save_path, epoch, metric=None): + save_state = { + 'epoch': epoch, + 'arch': type(self.model).__name__.lower(), + 'state_dict': get_state_dict(self.model, self.unwrap_fn), + 'optimizer': self.optimizer.state_dict(), + 'version': 2, # version < 2 increments epoch before save + } + if self.args is not None: + save_state['arch'] = self.args.model + save_state['args'] = self.args + if self.amp_scaler is not None: + save_state[self.amp_scaler.state_dict_key] = self.amp_scaler.state_dict() + if self.model_ema is not None: + save_state['state_dict_ema'] = get_state_dict(self.model_ema, self.unwrap_fn) + if metric is not None: + save_state['metric'] = metric + torch.save(save_state, save_path) + + def _cleanup_checkpoints(self, trim=0): + trim = min(len(self.checkpoint_files), trim) + delete_index = self.max_history - trim + if delete_index < 0 or len(self.checkpoint_files) <= delete_index: + return + to_delete = self.checkpoint_files[delete_index:] + for d in to_delete: + try: + _logger.debug("Cleaning checkpoint: {}".format(d)) + os.remove(d[0]) + except Exception as e: + _logger.error("Exception '{}' while deleting checkpoint".format(e)) + self.checkpoint_files = self.checkpoint_files[:delete_index] + + def save_recovery(self, epoch, batch_idx=0): + assert epoch >= 0 + filename = '-'.join([self.recovery_prefix, str(epoch), str(batch_idx)]) + self.extension + save_path = os.path.join(self.recovery_dir, filename) + self._save(save_path, epoch) + if os.path.exists(self.last_recovery_file): + try: + _logger.debug("Cleaning recovery: {}".format(self.last_recovery_file)) + os.remove(self.last_recovery_file) + except Exception as e: + _logger.error("Exception '{}' while removing {}".format(e, self.last_recovery_file)) + self.last_recovery_file = self.curr_recovery_file + self.curr_recovery_file = save_path + + def find_recovery(self): + recovery_path = os.path.join(self.recovery_dir, self.recovery_prefix) + files = glob.glob(recovery_path + '*' + self.extension) + files = sorted(files) + return files[0] if len(files) else '' diff --git a/timm/utils/clip_grad.py b/timm/utils/clip_grad.py new file mode 100644 index 0000000..7eb4069 --- /dev/null +++ b/timm/utils/clip_grad.py @@ -0,0 +1,23 @@ +import torch + +from timm.utils.agc import adaptive_clip_grad + + +def dispatch_clip_grad(parameters, value: float, mode: str = 'norm', norm_type: float = 2.0): + """ Dispatch to gradient clipping method + + Args: + parameters (Iterable): model parameters to clip + value (float): clipping value/factor/norm, mode dependant + mode (str): clipping mode, one of 'norm', 'value', 'agc' + norm_type (float): p-norm, default 2.0 + """ + if mode == 'norm': + torch.nn.utils.clip_grad_norm_(parameters, value, norm_type=norm_type) + elif mode == 'value': + torch.nn.utils.clip_grad_value_(parameters, value) + elif mode == 'agc': + adaptive_clip_grad(parameters, value, norm_type=norm_type) + else: + assert False, f"Unknown clip mode ({mode})." + diff --git a/timm/utils/cuda.py b/timm/utils/cuda.py new file mode 100644 index 0000000..9e7bddf --- /dev/null +++ b/timm/utils/cuda.py @@ -0,0 +1,55 @@ +""" CUDA / AMP utils + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch + +try: + from apex import amp + has_apex = True +except ImportError: + amp = None + has_apex = False + +from .clip_grad import dispatch_clip_grad + + +class ApexScaler: + state_dict_key = "amp" + + def __call__(self, loss, optimizer, clip_grad=None, clip_mode='norm', parameters=None, create_graph=False): + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward(create_graph=create_graph) + if clip_grad is not None: + dispatch_clip_grad(amp.master_params(optimizer), clip_grad, mode=clip_mode) + optimizer.step() + + def state_dict(self): + if 'state_dict' in amp.__dict__: + return amp.state_dict() + + def load_state_dict(self, state_dict): + if 'load_state_dict' in amp.__dict__: + amp.load_state_dict(state_dict) + + +class NativeScaler: + state_dict_key = "amp_scaler" + + def __init__(self): + self._scaler = torch.cuda.amp.GradScaler() + + def __call__(self, loss, optimizer, clip_grad=None, clip_mode='norm', parameters=None, create_graph=False): + self._scaler.scale(loss).backward(create_graph=create_graph) + if clip_grad is not None: + assert parameters is not None + self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place + dispatch_clip_grad(parameters, clip_grad, mode=clip_mode) + self._scaler.step(optimizer) + self._scaler.update() + + def state_dict(self): + return self._scaler.state_dict() + + def load_state_dict(self, state_dict): + self._scaler.load_state_dict(state_dict) diff --git a/timm/utils/distributed.py b/timm/utils/distributed.py new file mode 100644 index 0000000..3c5dba8 --- /dev/null +++ b/timm/utils/distributed.py @@ -0,0 +1,28 @@ +""" Distributed training/validation utils + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +from torch import distributed as dist + +from .model import unwrap_model + + +def reduce_tensor(tensor, n): + rt = tensor.clone() + dist.all_reduce(rt, op=dist.ReduceOp.SUM) + rt /= n + return rt + + +def distribute_bn(model, world_size, reduce=False): + # ensure every node has the same running bn stats + for bn_name, bn_buf in unwrap_model(model).named_buffers(recurse=True): + if ('running_mean' in bn_name) or ('running_var' in bn_name): + if reduce: + # average bn stats across whole group + torch.distributed.all_reduce(bn_buf, op=dist.ReduceOp.SUM) + bn_buf /= float(world_size) + else: + # broadcast bn stats from rank 0 to whole group + torch.distributed.broadcast(bn_buf, 0) diff --git a/timm/utils/jit.py b/timm/utils/jit.py new file mode 100644 index 0000000..185ab7a --- /dev/null +++ b/timm/utils/jit.py @@ -0,0 +1,18 @@ +""" JIT scripting/tracing utils + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch + + +def set_jit_legacy(): + """ Set JIT executor to legacy w/ support for op fusion + This is hopefully a temporary need in 1.5/1.5.1/1.6 to restore performance due to changes + in the JIT exectutor. These API are not supported so could change. + """ + # + assert hasattr(torch._C, '_jit_set_profiling_executor'), "Old JIT behavior doesn't exist!" + torch._C._jit_set_profiling_executor(False) + torch._C._jit_set_profiling_mode(False) + torch._C._jit_override_can_fuse_on_gpu(True) + #torch._C._jit_set_texpr_fuser_enabled(True) diff --git a/timm/utils/log.py b/timm/utils/log.py new file mode 100644 index 0000000..c99469e --- /dev/null +++ b/timm/utils/log.py @@ -0,0 +1,28 @@ +""" Logging helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +import logging +import logging.handlers + + +class FormatterNoInfo(logging.Formatter): + def __init__(self, fmt='%(levelname)s: %(message)s'): + logging.Formatter.__init__(self, fmt) + + def format(self, record): + if record.levelno == logging.INFO: + return str(record.getMessage()) + return logging.Formatter.format(self, record) + + +def setup_default_logging(default_level=logging.INFO, log_path=''): + console_handler = logging.StreamHandler() + console_handler.setFormatter(FormatterNoInfo()) + logging.root.addHandler(console_handler) + logging.root.setLevel(default_level) + if log_path: + file_handler = logging.handlers.RotatingFileHandler(log_path, maxBytes=(1024 ** 2 * 2), backupCount=3) + file_formatter = logging.Formatter("%(asctime)s - %(name)20s: [%(levelname)8s] - %(message)s") + file_handler.setFormatter(file_formatter) + logging.root.addHandler(file_handler) diff --git a/timm/utils/metrics.py b/timm/utils/metrics.py new file mode 100644 index 0000000..9fdbe13 --- /dev/null +++ b/timm/utils/metrics.py @@ -0,0 +1,32 @@ +""" Eval metrics and related + +Hacked together by / Copyright 2020 Ross Wightman +""" + + +class AverageMeter: + """Computes and stores the average and current value""" + def __init__(self): + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + +def accuracy(output, target, topk=(1,)): + """Computes the accuracy over the k top predictions for the specified values of k""" + maxk = min(max(topk), output.size()[1]) + batch_size = target.size(0) + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.reshape(1, -1).expand_as(pred)) + return [correct[:min(k, maxk)].reshape(-1).float().sum(0) * 100. / batch_size for k in topk] diff --git a/timm/utils/misc.py b/timm/utils/misc.py new file mode 100644 index 0000000..39c0097 --- /dev/null +++ b/timm/utils/misc.py @@ -0,0 +1,18 @@ +""" Misc utils + +Hacked together by / Copyright 2020 Ross Wightman +""" +import re + + +def natural_key(string_): + """See http://www.codinghorror.com/blog/archives/001018.html""" + return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] + + +def add_bool_arg(parser, name, default=False, help=''): + dest_name = name.replace('-', '_') + group = parser.add_mutually_exclusive_group(required=False) + group.add_argument('--' + name, dest=dest_name, action='store_true', help=help) + group.add_argument('--no-' + name, dest=dest_name, action='store_false', help=help) + parser.set_defaults(**{dest_name: default}) diff --git a/timm/utils/model.py b/timm/utils/model.py new file mode 100644 index 0000000..b95c453 --- /dev/null +++ b/timm/utils/model.py @@ -0,0 +1,273 @@ +""" Model / state_dict utils + +Hacked together by / Copyright 2020 Ross Wightman +""" +import fnmatch + +import torch +from torchvision.ops.misc import FrozenBatchNorm2d + +from .model_ema import ModelEma + + +def unwrap_model(model): + if isinstance(model, ModelEma): + return unwrap_model(model.ema) + else: + return model.module if hasattr(model, 'module') else model + + +def get_state_dict(model, unwrap_fn=unwrap_model): + return unwrap_fn(model).state_dict() + + +def avg_sq_ch_mean(model, input, output): + """ calculate average channel square mean of output activations + """ + return torch.mean(output.mean(axis=[0, 2, 3]) ** 2).item() + + +def avg_ch_var(model, input, output): + """ calculate average channel variance of output activations + """ + return torch.mean(output.var(axis=[0, 2, 3])).item() + + +def avg_ch_var_residual(model, input, output): + """ calculate average channel variance of output activations + """ + return torch.mean(output.var(axis=[0, 2, 3])).item() + + +class ActivationStatsHook: + """Iterates through each of `model`'s modules and matches modules using unix pattern + matching based on `hook_fn_locs` and registers `hook_fn` to the module if there is + a match. + + Arguments: + model (nn.Module): model from which we will extract the activation stats + hook_fn_locs (List[str]): List of `hook_fn` locations based on Unix type string + matching with the name of model's modules. + hook_fns (List[Callable]): List of hook functions to be registered at every + module in `layer_names`. + + Inspiration from https://docs.fast.ai/callback.hook.html. + + Refer to https://gist.github.com/amaarora/6e56942fcb46e67ba203f3009b30d950 for an example + on how to plot Signal Propogation Plots using `ActivationStatsHook`. + """ + + def __init__(self, model, hook_fn_locs, hook_fns): + self.model = model + self.hook_fn_locs = hook_fn_locs + self.hook_fns = hook_fns + if len(hook_fn_locs) != len(hook_fns): + raise ValueError("Please provide `hook_fns` for each `hook_fn_locs`, \ + their lengths are different.") + self.stats = dict((hook_fn.__name__, []) for hook_fn in hook_fns) + for hook_fn_loc, hook_fn in zip(hook_fn_locs, hook_fns): + self.register_hook(hook_fn_loc, hook_fn) + + def _create_hook(self, hook_fn): + def append_activation_stats(module, input, output): + out = hook_fn(module, input, output) + self.stats[hook_fn.__name__].append(out) + + return append_activation_stats + + def register_hook(self, hook_fn_loc, hook_fn): + for name, module in self.model.named_modules(): + if not fnmatch.fnmatch(name, hook_fn_loc): + continue + module.register_forward_hook(self._create_hook(hook_fn)) + + +def extract_spp_stats( + model, + hook_fn_locs, + hook_fns, + input_shape=[8, 3, 224, 224]): + """Extract average square channel mean and variance of activations during + forward pass to plot Signal Propogation Plots (SPP). + + Paper: https://arxiv.org/abs/2101.08692 + + Example Usage: https://gist.github.com/amaarora/6e56942fcb46e67ba203f3009b30d950 + """ + x = torch.normal(0., 1., input_shape) + hook = ActivationStatsHook(model, hook_fn_locs=hook_fn_locs, hook_fns=hook_fns) + _ = model(x) + return hook.stats + + +def freeze_batch_norm_2d(module): + """ + Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is + itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and + returned. Otherwise, the module is walked recursively and submodules are converted in place. + + Args: + module (torch.nn.Module): Any PyTorch module. + + Returns: + torch.nn.Module: Resulting module + + Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762 + """ + res = module + if isinstance(module, (torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm)): + res = FrozenBatchNorm2d(module.num_features) + res.num_features = module.num_features + res.affine = module.affine + if module.affine: + res.weight.data = module.weight.data.clone().detach() + res.bias.data = module.bias.data.clone().detach() + res.running_mean.data = module.running_mean.data + res.running_var.data = module.running_var.data + res.eps = module.eps + else: + for name, child in module.named_children(): + new_child = freeze_batch_norm_2d(child) + if new_child is not child: + res.add_module(name, new_child) + return res + + +def unfreeze_batch_norm_2d(module): + """ + Converts all `FrozenBatchNorm2d` layers of provided module into `BatchNorm2d`. If `module` is itself and instance + of `FrozenBatchNorm2d`, it is converted into `BatchNorm2d` and returned. Otherwise, the module is walked + recursively and submodules are converted in place. + + Args: + module (torch.nn.Module): Any PyTorch module. + + Returns: + torch.nn.Module: Resulting module + + Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762 + """ + res = module + if isinstance(module, FrozenBatchNorm2d): + res = torch.nn.BatchNorm2d(module.num_features) + if module.affine: + res.weight.data = module.weight.data.clone().detach() + res.bias.data = module.bias.data.clone().detach() + res.running_mean.data = module.running_mean.data + res.running_var.data = module.running_var.data + res.eps = module.eps + else: + for name, child in module.named_children(): + new_child = unfreeze_batch_norm_2d(child) + if new_child is not child: + res.add_module(name, new_child) + return res + + +def _freeze_unfreeze(root_module, submodules=[], include_bn_running_stats=True, mode='freeze'): + """ + Freeze or unfreeze parameters of the specified modules and those of all their hierarchical descendants. This is + done in place. + Args: + root_module (nn.Module, optional): Root module relative to which the `submodules` are referenced. + submodules (list[str]): List of modules for which the parameters will be (un)frozen. They are to be provided as + named modules relative to the root module (accessible via `root_module.named_modules()`). An empty list + means that the whole root module will be (un)frozen. Defaults to [] + include_bn_running_stats (bool): Whether to also (un)freeze the running statistics of batch norm 2d layers. + Defaults to `True`. + mode (bool): Whether to freeze ("freeze") or unfreeze ("unfreeze"). Defaults to `"freeze"`. + """ + assert mode in ["freeze", "unfreeze"], '`mode` must be one of "freeze" or "unfreeze"' + + if isinstance(root_module, (torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm)): + # Raise assertion here because we can't convert it in place + raise AssertionError( + "You have provided a batch norm layer as the `root module`. Please use " + "`timm.utils.model.freeze_batch_norm_2d` or `timm.utils.model.unfreeze_batch_norm_2d` instead.") + + if isinstance(submodules, str): + submodules = [submodules] + + named_modules = submodules + submodules = [root_module.get_submodule(m) for m in submodules] + + if not len(submodules): + named_modules, submodules = list(zip(*root_module.named_children())) + + for n, m in zip(named_modules, submodules): + # (Un)freeze parameters + for p in m.parameters(): + p.requires_grad = False if mode == 'freeze' else True + if include_bn_running_stats: + # Helper to add submodule specified as a named_module + def _add_submodule(module, name, submodule): + split = name.rsplit('.', 1) + if len(split) > 1: + module.get_submodule(split[0]).add_module(split[1], submodule) + else: + module.add_module(name, submodule) + + # Freeze batch norm + if mode == 'freeze': + res = freeze_batch_norm_2d(m) + # It's possible that `m` is a type of BatchNorm in itself, in which case `unfreeze_batch_norm_2d` won't + # convert it in place, but will return the converted result. In this case `res` holds the converted + # result and we may try to re-assign the named module + if isinstance(m, (torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm)): + _add_submodule(root_module, n, res) + # Unfreeze batch norm + else: + res = unfreeze_batch_norm_2d(m) + # Ditto. See note above in mode == 'freeze' branch + if isinstance(m, FrozenBatchNorm2d): + _add_submodule(root_module, n, res) + + +def freeze(root_module, submodules=[], include_bn_running_stats=True): + """ + Freeze parameters of the specified modules and those of all their hierarchical descendants. This is done in place. + Args: + root_module (nn.Module): Root module relative to which `submodules` are referenced. + submodules (list[str]): List of modules for which the parameters will be frozen. They are to be provided as + named modules relative to the root module (accessible via `root_module.named_modules()`). An empty list + means that the whole root module will be frozen. Defaults to `[]`. + include_bn_running_stats (bool): Whether to also freeze the running statistics of `BatchNorm2d` and + `SyncBatchNorm` layers. These will be converted to `FrozenBatchNorm2d` in place. Hint: During fine tuning, + it's good practice to freeze batch norm stats. And note that these are different to the affine parameters + which are just normal PyTorch parameters. Defaults to `True`. + + Hint: If you want to freeze batch norm ONLY, use `timm.utils.model.freeze_batch_norm_2d`. + + Examples:: + + >>> model = timm.create_model('resnet18') + >>> # Freeze up to and including layer2 + >>> submodules = [n for n, _ in model.named_children()] + >>> print(submodules) + ['conv1', 'bn1', 'act1', 'maxpool', 'layer1', 'layer2', 'layer3', 'layer4', 'global_pool', 'fc'] + >>> freeze(model, submodules[:submodules.index('layer2') + 1]) + >>> # Check for yourself that it works as expected + >>> print(model.layer2[0].conv1.weight.requires_grad) + False + >>> print(model.layer3[0].conv1.weight.requires_grad) + True + >>> # Unfreeze + >>> unfreeze(model) + """ + _freeze_unfreeze(root_module, submodules, include_bn_running_stats=include_bn_running_stats, mode="freeze") + + +def unfreeze(root_module, submodules=[], include_bn_running_stats=True): + """ + Unfreeze parameters of the specified modules and those of all their hierarchical descendants. This is done in place. + Args: + root_module (nn.Module): Root module relative to which `submodules` are referenced. + submodules (list[str]): List of submodules for which the parameters will be (un)frozen. They are to be provided + as named modules relative to the root module (accessible via `root_module.named_modules()`). An empty + list means that the whole root module will be unfrozen. Defaults to `[]`. + include_bn_running_stats (bool): Whether to also unfreeze the running statistics of `FrozenBatchNorm2d` layers. + These will be converted to `BatchNorm2d` in place. Defaults to `True`. + + See example in docstring for `freeze`. + """ + _freeze_unfreeze(root_module, submodules, include_bn_running_stats=include_bn_running_stats, mode="unfreeze") diff --git a/timm/utils/model_ema.py b/timm/utils/model_ema.py new file mode 100644 index 0000000..073d5c5 --- /dev/null +++ b/timm/utils/model_ema.py @@ -0,0 +1,126 @@ +""" Exponential Moving Average (EMA) of model updates + +Hacked together by / Copyright 2020 Ross Wightman +""" +import logging +from collections import OrderedDict +from copy import deepcopy + +import torch +import torch.nn as nn + +_logger = logging.getLogger(__name__) + + +class ModelEma: + """ Model Exponential Moving Average (DEPRECATED) + + Keep a moving average of everything in the model state_dict (parameters and buffers). + This version is deprecated, it does not work with scripted models. Will be removed eventually. + + This is intended to allow functionality like + https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + + A smoothed version of the weights is necessary for some training schemes to perform well. + E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use + RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA + smoothing of weights to match results. Pay attention to the decay constant you are using + relative to your update count per epoch. + + To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but + disable validation of the EMA weights. Validation will have to be done manually in a separate + process, or after the training stops converging. + + This class is sensitive where it is initialized in the sequence of model init, + GPU assignment and distributed training wrappers. + """ + def __init__(self, model, decay=0.9999, device='', resume=''): + # make a copy of the model for accumulating moving average of weights + self.ema = deepcopy(model) + self.ema.eval() + self.decay = decay + self.device = device # perform ema on different device from model if set + if device: + self.ema.to(device=device) + self.ema_has_module = hasattr(self.ema, 'module') + if resume: + self._load_checkpoint(resume) + for p in self.ema.parameters(): + p.requires_grad_(False) + + def _load_checkpoint(self, checkpoint_path): + checkpoint = torch.load(checkpoint_path, map_location='cpu') + assert isinstance(checkpoint, dict) + if 'state_dict_ema' in checkpoint: + new_state_dict = OrderedDict() + for k, v in checkpoint['state_dict_ema'].items(): + # ema model may have been wrapped by DataParallel, and need module prefix + if self.ema_has_module: + name = 'module.' + k if not k.startswith('module') else k + else: + name = k + new_state_dict[name] = v + self.ema.load_state_dict(new_state_dict) + _logger.info("Loaded state_dict_ema") + else: + _logger.warning("Failed to find state_dict_ema, starting from loaded model weights") + + def update(self, model): + # correct a mismatch in state dict keys + needs_module = hasattr(model, 'module') and not self.ema_has_module + with torch.no_grad(): + msd = model.state_dict() + for k, ema_v in self.ema.state_dict().items(): + if needs_module: + k = 'module.' + k + model_v = msd[k].detach() + if self.device: + model_v = model_v.to(device=self.device) + ema_v.copy_(ema_v * self.decay + (1. - self.decay) * model_v) + + +class ModelEmaV2(nn.Module): + """ Model Exponential Moving Average V2 + + Keep a moving average of everything in the model state_dict (parameters and buffers). + V2 of this module is simpler, it does not match params/buffers based on name but simply + iterates in order. It works with torchscript (JIT of full model). + + This is intended to allow functionality like + https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + + A smoothed version of the weights is necessary for some training schemes to perform well. + E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use + RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA + smoothing of weights to match results. Pay attention to the decay constant you are using + relative to your update count per epoch. + + To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but + disable validation of the EMA weights. Validation will have to be done manually in a separate + process, or after the training stops converging. + + This class is sensitive where it is initialized in the sequence of model init, + GPU assignment and distributed training wrappers. + """ + def __init__(self, model, decay=0.9999, device=None): + super(ModelEmaV2, self).__init__() + # make a copy of the model for accumulating moving average of weights + self.module = deepcopy(model) + self.module.eval() + self.decay = decay + self.device = device # perform ema on different device from model if set + if self.device is not None: + self.module.to(device=device) + + def _update(self, model, update_fn): + with torch.no_grad(): + for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()): + if self.device is not None: + model_v = model_v.to(device=self.device) + ema_v.copy_(update_fn(ema_v, model_v)) + + def update(self, model): + self._update(model, update_fn=lambda e, m: self.decay * e + (1. - self.decay) * m) + + def set(self, model): + self._update(model, update_fn=lambda e, m: m) diff --git a/timm/utils/random.py b/timm/utils/random.py new file mode 100644 index 0000000..a967998 --- /dev/null +++ b/timm/utils/random.py @@ -0,0 +1,9 @@ +import random +import numpy as np +import torch + + +def random_seed(seed=42, rank=0): + torch.manual_seed(seed + rank) + np.random.seed(seed + rank) + random.seed(seed + rank) diff --git a/timm/utils/summary.py b/timm/utils/summary.py new file mode 100644 index 0000000..9f5af9a --- /dev/null +++ b/timm/utils/summary.py @@ -0,0 +1,39 @@ +""" Summary utilities + +Hacked together by / Copyright 2020 Ross Wightman +""" +import csv +import os +from collections import OrderedDict +try: + import wandb +except ImportError: + pass + +def get_outdir(path, *paths, inc=False): + outdir = os.path.join(path, *paths) + if not os.path.exists(outdir): + os.makedirs(outdir) + elif inc: + count = 1 + outdir_inc = outdir + '-' + str(count) + while os.path.exists(outdir_inc): + count = count + 1 + outdir_inc = outdir + '-' + str(count) + assert count < 100 + outdir = outdir_inc + os.makedirs(outdir) + return outdir + + +def update_summary(epoch, train_metrics, eval_metrics, filename, write_header=False, log_wandb=False): + rowd = OrderedDict(epoch=epoch) + rowd.update([('train_' + k, v) for k, v in train_metrics.items()]) + rowd.update([('eval_' + k, v) for k, v in eval_metrics.items()]) + if log_wandb: + wandb.log(rowd) + with open(filename, mode='a') as cf: + dw = csv.DictWriter(cf, fieldnames=rowd.keys()) + if write_header: # first iteration (epoch == 1 can't be used) + dw.writeheader() + dw.writerow(rowd) diff --git a/timm/version.py b/timm/version.py new file mode 100644 index 0000000..2b8877c --- /dev/null +++ b/timm/version.py @@ -0,0 +1 @@ +__version__ = '0.5.0' diff --git a/train.py b/train.py new file mode 100644 index 0000000..cfd728a --- /dev/null +++ b/train.py @@ -0,0 +1,77 @@ +import time +import torch +from options.train_options import TrainOptions +from data import create_dataset +from models import create_model +from util.visualizer import Visualizer + + +if __name__ == '__main__': + opt = TrainOptions().parse() # get training options + dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options + dataset_size = len(dataset) # get the number of images in the dataset. + + model = create_model(opt) # create a model given opt.model and other options + print('The number of training images = %d' % dataset_size) + + visualizer = Visualizer(opt) # create a visualizer that display/save images and plots + opt.visualizer = visualizer + total_iters = 0 # the total number of training iterations + + optimize_time = 0.1 + + times = [] + for epoch in range(opt.epoch_count, opt.n_epochs + opt.n_epochs_decay + 1): # outer loop for different epochs; we save the model by , + + epoch_start_time = time.time() # timer for entire epoch + iter_data_time = time.time() # timer for data loading per iteration + epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch + visualizer.reset() # reset the visualizer: make sure it saves the results to HTML at least once every epoch + + dataset.set_epoch(epoch) + for i, data in enumerate(dataset): # inner loop within one epoch + iter_start_time = time.time() # timer for computation per iteration + if total_iters % opt.print_freq == 0: + t_data = iter_start_time - iter_data_time + + batch_size = data["A0"].size(0) + total_iters += batch_size + epoch_iter += batch_size + if len(opt.gpu_ids) > 0: + torch.cuda.synchronize() + optimize_start_time = time.time() + if epoch == opt.epoch_count and i == 0: + # model.data_dependent_initialize(data) + model.setup(opt) # regular setup: load and print networks; create schedulers + model.parallelize() + model.set_input(data) # unpack data from dataset and apply preprocessing + model.optimize_parameters() # calculate loss functions, get gradients, update network weights + if len(opt.gpu_ids) > 0: + torch.cuda.synchronize() + optimize_time = (time.time() - optimize_start_time) / batch_size * 0.005 + 0.995 * optimize_time + + if total_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file + save_result = total_iters % opt.update_html_freq == 0 + model.compute_visuals() + visualizer.display_current_results(model.get_current_visuals(), epoch, save_result) + + if total_iters % opt.print_freq == 0: # print training losses and save logging information to the disk + losses = model.get_current_losses() + visualizer.print_current_losses(epoch, epoch_iter, losses, optimize_time, t_data) + if opt.display_id is None or opt.display_id > 0: + visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses) + + if total_iters % opt.save_latest_freq == 0: # cache our latest model every iterations + print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters)) + print(opt.name) # it's useful to occasionally show the experiment name on console + save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest' + model.save_networks(save_suffix) + + iter_data_time = time.time() + + if epoch % opt.save_epoch_freq == 0: # cache our model every epochs + print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters)) + model.save_networks('latest') + model.save_networks(epoch) + + print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, opt.n_epochs + opt.n_epochs_decay, time.time() - epoch_start_time)) + model.update_learning_rate() # update learning rates at the end of every epoch. diff --git a/util/__init__.py b/util/__init__.py new file mode 100644 index 0000000..718f8f6 --- /dev/null +++ b/util/__init__.py @@ -0,0 +1,2 @@ +"""This package includes a miscellaneous collection of useful helper functions.""" +from util import * diff --git a/util/__pycache__/__init__.cpython-36.pyc b/util/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..251541412930cfbbca07c07e039e8e7838f9fdd2 GIT binary patch literal 262 zcmX|+!Ab)$6h)J9L=g)9f!U6WMy6XSmLe{cZY(a`gqXbP#J)*}dC8*vBY$bOuKEjI znRelWdwFo-9`0;1xtYD*eKm}+->m;D^7oWQx7jTFWR}|k`I^~_cUG1Aw{`1C^xACo z288nllLZo5bdC%lY7ZfiFaZ%vbis?zh(ypN6fIy6gJ_aJCp0}|2CMkU?^jhZUMXdR zQEDtxboj%D%bcF=xR6#yUCaIVPMVN`@_D)Zs2}QDQW`*Y5Zho$4zS6{m&Hn+Dx^}* XyQtK(-;LLgU9bsZ&T}iBW|sU9J1a{b literal 0 HcmV?d00001 diff --git a/util/__pycache__/html.cpython-36.pyc b/util/__pycache__/html.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6fec72e1508be2f0f47cc8af1564d7820c95ab9f GIT binary patch literal 3567 zcmai0Ta(*J6;?~KEPHHcW;Q#U;&u%y@w*^nZXYAK;{_t^62 z;&iv;8J9y*Yr)4`c$-+w27j>ULI(_;(-|1WJcJtOR zKRox^J7QC8iS300>CWhbg$0|~ zudQzDHFjhjbQ`6c2_2XA@K#yV@nI@TTT2vPvT@1c(n+#GX&=fy$*&#! z8Mi{4v(Vv}%syG@@*4MET49aXxsTT4o4m<4Ub3*xTfB`CpLbx#R_XrY$-~d%ub5>$ z873;Aub`jCN(FJ67E={WRG^DM#m_~MWbr^?BIm(WCRzkqoM;s!IRxTBCHWu~m4>4v z6?@HQK+gjm^ov|23ZlhPa5TyLI>A7oCjDU$tKd$|c{CI;7xHfKF+QM-GzZlf{az2x zDehjc6q|LGP3v{Cn9@&{mcF~v7=MR$gC%BDYt>6(o~YyQ-r98durIc^X{fMV92T7T2!B;fN?V~`jbrLaLa^z2Oal+ z^)UGK(Sz#m5@%xdDF*1Fb_3-eiuwxG+@4#JHD{=qwnxs$om(Tu*@DJvX2heB1yeu9 zSH0>pYdurL%;V(@`k3)Yjk(S3SERE!cer!x%0FRUbF^``gZUPBb$jkC7_Ysunf27- z-WS$m>$hlurF#4&EPjwDI*HR{26U_r&EQF>B_05YN3o27dm+_+nl{!JVbfyS*gm;$&~A zv-I`~A1iqR!UgGG1)$yq>vW=l@5;7-tUG5#kYhs^q@_55rNiK3gooM7&_Gj*e6>zd z2(82ttweeR%Z`E@w}ZS`O#m2cwWU{?)nyN7UhO z`k>p9Z2+dMFI`dEg(}@~tcRsLDw4dcgJj6>r6*1(OiMqDj|B$MTVWi8v4m^spjWyS zoYE$}4xN@;y0ihgi4pTV*+K8`mi!Ky|NOMaV;$e@%|@rae!+$6{p{Yo-`u=$^JY&? zq=*29VZl{TiUFwMvri9t6P=_zI;_3%Y3WB%0$fEA=~O$YEZ<`u+h!fsqGETL&zfx3 zroh@;N%tv&=%JR12_6p}fZ76N+3eb5p(nCBlCOu_%H{Gh)^(ZuHX5_3s`@TI%l1I% z$Ow`fe}ExHp0VuNwlV0!bl*(TEJ_2#SoJ-0$f~mr0*%cPwkE*XGQcQl*t6k-TnVWW z7XO19zd6JVv99Bb5WG?a#$6-rpli!3ut++n(5Fq8F0Flg>&-XheGNj&@RjGY*(I~{ zH?yxP$MkEEp~pw*qL{t^3muKb$ZhVtBKiUWjX-IrjUT8z`y2e?F08Y^Fze*cx(4F2 zK}8M{^B=x+UesQAFY4OkCh_t5E9dO@kgr4j0IjbZL}J{ZBi-}HD|QxOw8fiqcgI@K z)iAfQB(_%NcipCxt?P4dc^=DgP~WomZ2u_{;XC8Jg%fMXKf0DY-4lj7aH%sx--u zFX)0Q0C1&icsdpm1cU*|-43Qh5Zq}o!6hQ;A0zWsTfZJ8x}s-E!~#k;LgwO-iz&7+ zc=OxW6sxn#=03EW#?YqsgQMg`@Y`Lx^bQM&t3p{*`ZN`@ohd$dr!pSz2ZuChcw1GDBd;d;1#m5)z=c#;RR$v)91 zfx2Ycghc7!JSE|{bY{s|5}n98RfK7|Mb$P{?@>kZEt{yieicNx(1CpEe9D?}Sy?mx zE$t+OttYy4Y4~fytOC&uXj^h%2i)i5y}~w8@30?&6VC-+CBJ4tie)M$=12>tIJ0)F z7mOH(+Y9@JJ-0`QK)OyW%q17!HFIrsfM<8sUEi7d6C8VP?sk-A*3f?VK0W$p^7rvxr#D?q^lVrK@p^U&$XNH`>x+;UN$rI<19|{D4MxQ!o9Geh0+o2 zZniNLxM&9eZOUI0&}OYFlU(I{<9sly6YEA-dH0I^0JC9xm67*!JWwVB8r}E}q^0)dHDxY0v SzSH?RrRAo*382zT3H!e=VVBDQ literal 0 HcmV?d00001 diff --git a/util/__pycache__/util.cpython-36.pyc b/util/__pycache__/util.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c28b8b45f763e22427c436618c8f7a050332642e GIT binary patch literal 5239 zcma)A%WoUU8J~UfA&Qb^T9$1k*}6y~x{j2}X_KaTIf@+^Nrj+RYk+XVt~g6^spT#` zGt>*qOPKUfpzUAKYk&eh^&jZ1*PeQrQ-L0e+L1{de+S{L#kj@o#2VNw+oHOYI4#GNZp%aICjOw*Doy*! zt+MH>v?`{owyG$-q&k>u%}vMDS~c$T(pRiC&&#}mvd*h~4&_4R^%uJqukrb>tkxwS z@CCkz+7kZ(U*eZhyUcI$WquX424CUVFy{)t&TpVx=BxY#lvkrte+6s2$X~*!YutIr znm;^wZKoH@V36@q5(S+sRbiaUK*ob1+Px?lMk45r(vFHV@V~{;S<}+>p@>t}KJC~B z%X=^g_aam(+u|)|EEkeb;_FRMc-V-bZHW?*GLZ@hx~f#4BT^$WPoxgg^mH|rkdO+~ zPNdykEUvvIJB)-bhhlFS3cUT&U9mSBM5)?29!B?s$OOb<9sHSzD_G;d?{4r=g_|2E z{o{>J#v{3LZ)@whx#k4C) zyJA+=a;;kN*y1WUGts+f&>26$BVPwmtP9cSY-0Vv8rw&!x%CnIyfJo^o!hySTe)+M zO&p6oX*^=N4RKrBO-Il&+C9MWbV)`kR7y16)*O$z;V4n zHpeZEAk8Fb2=yM+RKo_ zmpCy_>27PQt=Y+=2WjxKyd9`sB%(m}vQff=-6#lyP7+EP9QHC91u0ZK7=)_RizJ2z zaT*vU=!EDzjDg=s1*4QlLUu9{t+kuYk}ip8n7}N=B^*gtU}Bk2NxZA)i)y>DLS1gR zdE8O$w)SEb4WzE$l`;}!frT>ptpx>9N2=XPWSjP){bFv*2Zxq)P->UD3@h91Uj(Dk zjZ=;VaHw{iPT&0!b|x1=n6+q>ssXhs+bgzM18)WpfkW0~0QwS@!k%Hpwp6ALi-mOu z0L~lRscWp<=GHaV_u<+wu3wt4ei=^%fI7L;hdaE+xC6ji%IePntjffOT3Y=Y>eY$O zovV!dHyAH9FoV`K^X5qr0N~uoKW}bj=>Y^Kf!0ox%1i{I5aBW4Q5ksYXfQ;B{8Z7g zUabcBeHID`N&&35O`CXeIvlBXF?LOv#hSruuLYzSMdx&(qOVx*Z1zCG^n*2`Os7P0o2va$PfoMOJ=Irnh=Oo$kV{TKtGghh@t#qwbxR083-F7YUHl3uDM(QvV`!hu)C7mg14-HN}18Q~jHu)6e zG1kW}c?+msMJ(ku`4&8e2cH7vdXtUe8M%wnd9q~uh`sNM9rBkq(5HM-L5rcZsWg$* zJNe62m~=)-sGw{q4@|@~SPv*10rwDy8&%wuSu#@5+MWn`jIbe_&(}+w5Nbsj1=GVC zwcI(+nW>?1Q)`G9W(;Yr;Y@XII^T>wRir{QwvDF1WmOT-rlz4?+MZY^IlFWO?S#8h zP)O09IUItcgEhsgV2Sq(zmttp=!#k8BC?A-4);3qi{kS{5w4MWun3Vexae(*B%R?{^F+6*=@Z%=W4`hK${NQS!771_X; zzW52XeF5)u5@`oMQjd4C@JSa=kvOFy-)3Qos*Q1PJYQgTwKR%-@ zcV@G&ED{$qRbVS z3}Y1JyNg^G&cDcY%P7So@MpO$MMTmQqazd$X4i~$Q$yNOV`Ba2#hfz5+SpPii$t!w z`iP}vv^z3xDV|SvI+Y}$A$BY7Iiq@GT+B=Kmi{HYy^N@`l$Xf? zJ$dC3!-&gBJMbal_jbQQsRh*1-!`e+Stf^@rNiUFcays-+UL%KJP6P9K?O%ehktoZQr~5%X<%ZiuhlppwXUYfJNTu zCRw_Nv~M_!QjW9`Z`{|m%9@qpb{)}DyYvEFgXra4Te0@CG-|7sPX)5j3Y8{?=*o0d zTU|g|H*ljOTWl92+DW(@C4a=qk}jFd@{u#RxKl17dss%(3Ls+-WfGt`NX( zzkUwj1;7tA;~oIMlsn%K@L9yRM2NjMmy8%i2XzCf;8Zj6b*N@QBkyOB6Aw_+b0%Xv zz^98W)m4+GcQb*jW%cwEPMEYRDw}5HqEFK))f4+f5|HyiA_2sAi((%G8jupnhzyB5 zAwu6e3Sut*^=D!G6MBo#e*rLnb%;Kg_|J>t9u@Y^{pNLD!p%7A;9^{Dk0otE zzO2}*QDO>6E$6cbpB5_o1@#6*D9XHHst;drOGhrdToR??lBVijeur+F+&r#x3KmFMeMs+X&c>SB4mTrJoC E2a$IhQUCw| literal 0 HcmV?d00001 diff --git a/util/__pycache__/visualizer.cpython-36.pyc b/util/__pycache__/visualizer.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01a528d9a33b5b3b3fe060ef1674ea846adf3840 GIT binary patch literal 8660 zcmb_hO>o>scE;aeFyQb<6h%t%T7qR!($q?%EqSw!M04$xWUqD|tzu~{d9p4PI1LUl z!~mm4k4Vm9wo)}t;)^OuRgPP!9CAo5x#W^t?t4ut2ON__N>w@JnA*$cd)=7fke1e! zRE9)%qkms_zxTZ#+?btpZ~Wx^)<1k#)BatX`k5$yh$qXDFpcT4*3@NgG!2=XO;hGp z(~^0mSz!h z_u%eJ13mtOI&0d0{otjB0j-L9)fSN3cy8m#{uN2Cb+nN-tO&hhBH*{@MH#ahd^B7ytXY={oWR>TpuH{Z=HlL$)^BPJs zvgD5Ip8ohT(La|vc`bKyi`maD9Cj6lWQ7uV^Oxd=DHq! z4+0)^LlN@qrhKQmy>P3C?%oyZ`=dvH@Tu2|gDh)!*RILovTEE(HlaEYUM5mbi|mA% zTrq5C-jys2J-VN@-hsEBa<7X%9t%Yf#aUyr`v4Y4Wb2028oxd zS`gk6QJMt&;K+(s671pXc#~Jc2qbV%gh_@4@t^d&y@Q6=@OnYSv&o|JQ2bsXc0i`! zay?peyie3cN!yDuub+jC4zI<-K$NG6$;8g#WI@&oTf*l72=uOO$7!H8FrG}cd#XL% z_A*=xD@mSEv==cVCQ8b&lA0-s%4^(<;@H~?J)CwlP?V{W`<~3z)jrw*<=g#49vK=i zh87(kle{fZka(BN)4x3C4|&1cQ5<@-sc1XG8e0e6TgF9)I6fW&oEmYV+58%MmH0m51 z3q`fuQc*3>pl}blg3n-nUYCnZsBOC0PRfO3o5GURno$=kihYq4l{iXvDGRp3xLJ+5 zJ_|%p7$B~kSdOSk!f1nkr}0Zz;5WC|spIwaLFZt-m9j8fzkB=k=hxr4etkXb^U#Nk z?xZYR=V2QH@X1H_*9rR871dw@4*4o(4)>eQW7g~(60Pp&bNYhe;#t&Py{@08ca&V@ z{0e%ssN48Q!1*DbjFM0SJwRRuzyq(o(g5%lt01p1n^lq960WsoRe^QW0f5hmo7jGUOdtv-Qj9w_h;&Glwkt99OF^%2c*PMl{% zx4a|_nN+sG=)lYRTZ-d>`acB1{j^6+dZ;-bim>Osd(*oev@jdZ zD0mPhV5xNxW`b7zX8-Fqy;hneP&owJfu~p+vcw@w6MZ75zJabvZ$Y9Ylcrl88c6Dx zBx{;^$r3m^{Jq2B-dNbAZosY$97M2TaM+|;Q)&nr(=-9xg{?$vaJJkZ>HrxW zdrKka&`N7-gE|rAgYY!~bfE_WW3@H--Ur8i-lOu!y-(5IyRr&v=UA5y+{Wd?#AOt2 zi5Y&x3M1(iHV1a3-J(i4fT^fPnVhd#O;VqXpTYqW$x?HEJa{ikNP8C!3O?x9 zZl|M=yDlnpNvyEdSroNioC>K|nzQ51Nh^(^c4Wp0`im+ZB2Sa14L%2TFsv1r`s%Ek zGvjx3EKJPZTmb*IcKwpCz;neJb+Eo#d8#GjWJL32T4`Pm>p|g)%qM+LN>vr2Zd(>A z60nuNw!6DhMLW0?Uur{3ne75HR5&oYwq>cZ$D!pLmBPxxc$=@&dhgSEav*VFVJ7LG zRG%ujEL(javLk%|J-lT!Kyz)Vx+TMb(sSe=y*s*%$JNgow!TE~XkXSBp%NF3Wu&e? zIEj;kRZ3U0HlDRbMi2ccCs3C_PAtwOkTU~tV-DW<3^p)C6T(c?ugM#PxCEk~eSE*6 z^Y3E>vL7KkzJm(PI@m{O$)wgW1`AV!$i@j3)M4Vv&}IdJJ}BwH;aJgzx`5i2U}`cm z*Gs6ia-CVCA}Vk&$XT#Kb9Ip3+m;0Pc-n?j0DS4k%u7}{ySkmXHpu*;0&(@PZ5cbmGj_cn3 zR#MhH7czW6P$qFn(V%7p46F@iZl~QYFa zvS89J;otl@Fn+Hi*H!}>nM=$dq9pJ2=^+*TK61su@f zeC{$kpIz3zuMORN25#znXBJMZ&Z=#bR)Y~xXH~S;^E#mwVSKbzO!nAc?QlX*3Vfg^jHD=){=9U zCYE+xtoJuMAV3;Dr^xvELQ9M%T9UKsuzv7l|Fg*%e*BFyB+8bL?w!`m>m`M=k~+xc z_$*)OPyg}p^y&t`@T%1@%gPIrZ|3F;Biq2+>AFVmjLf~$`*$WZy?>l0aYmYKTIV}3 zp^SW%_yfFj0yqCA0ud)h2vWQ-o_>mZJJ&gnnLW0M)S(+@C!d?c#r?k&Z&B!kEssou zLB1NE%oky%E#)T_=1hf5mWHQ>%lki`P_rqinLp$*&~Q@GFg%ejfy++F+doBN5m{Ou z*MX)2Jh0e#J73CA<;$SyB0EKxhL*pVEvs}p5|vkicLCaWlos?_ea^{b1D^I{k$LbB zo=I%GrM%?h?PO(@<`e0j(#IgH@O9QArh7$9i6KR+ zguS#i4h_&8q*}%_%C;lDX}GVp=3UuJ(-^@U{6#A6aAffK0FG^NGM%*C?sJ%9T2RFS zZe-#h4mT(5MBMaGVy5?tEg7@92D>BNrbn&vMK8YE#UP{@4epsEpKlJ z+Ws2Vp)cw6pmz%)oPBW(8n~Un2*quMeE2Qv3g-Pn#u&Q51VyfY)hHt+7E!EVhkL)q1 zmN!kI5D|$u9GuvYhowf4s@X&*89K-i&NN{d7O;&(_GqvmuXuIs2J=<0Ww3Pj(Cr{A z%KHF{t)VxoJlw{W?hIVTF@LnmzJ#GcQH+X1X(Wiez~Ie#DYWdn@;yyLnA*{9$Tz6dHLA2I zW{2nqO-}|DtcSb%tx)Y+B^T@E@R9 zR6qkpE>hbWN`8-OEfRpnin6cXkp1Lwlpzwu^Xd%rokHJh*5pn2tt{g|L-(dDuR)pY zbBAT0gEQs6%!uvt3$!4k>1X5yd>LtM&PZlffv2WJMlgkX_*v?xC;U8(@+f(W5{my! z8L%s3yS$X{QjD_>x@CWiM04t}T`72T&Z@(DwaPyg+*y|9Mjh!{8RQ}3gMvN=tlUM^ zF2TxO&=*V#H8jG;JPyir1cw&%Ipef$%P`StebK~&)`c=`L;<4(%sRL*Wy-vU*oh%S zurY^+SSMQqdA`OOW z#}PF|=xZ={ziIlHhNTXef+09&kpC zcd2wsc5FigC6NbS9Q1k=ksANvx3 z`NvU_GR&ZY4>BA_3#jOy2Yp(84JbqKM??NfnH9PZ4?urX=wB3e9JUb`<~=Yde~RRY zZ$)TB(3cVFwZV2@)`$An2-ls_{=#@_J)DwWAhxmgPN7ri+g6)6l3f$P%Zczz1 z4-S-ekQ${$(~-CHNRM zgmvM4O#+maGC)~@_iYDtaD$z&*?#1rUOihiV!Z}|R19Yl;|lS2Im%9cI+ z_(#@bxtF6-|NkWPG`~Rbs@t1 z`D-f>tsNPuJ4(8W#Z6Bw=fD?0GBp3MGY{6U$isx;%J6BEZbKD`Qk2$iY+secs&d1l zFLA2u@ip-7z+BDT`;Ce+-9ADMze5QzH~$1l!#xyJ`9+OqN<3|KSOs$0Nm1v1 z{cg(oafou=_n*S2BILkZ)%RK2@_qgjqUTde2!41%$uB7xQF2JAED^5o1`;}?jBFFf zvJB_Dj^$LHpE+;KHpF)*dc$i-8jA?5(idb2f66j_K#jAM+@<6pB_<`LF!0^6zXfgB z!m%BOjEXvVKLfWRKDnJ1wTTtXTQp|=whFSL!(Gzm^23~@>> from util.get_data import GetData + >>> gd = GetData(technique='cyclegan') + >>> new_data_path = gd.get(save_path='./datasets') # options will be displayed. + + Alternatively, You can use bash scripts: 'scripts/download_pix2pix_model.sh' + and 'scripts/download_cyclegan_model.sh'. + """ + + def __init__(self, technique='cyclegan', verbose=True): + url_dict = { + 'pix2pix': 'http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/', + 'cyclegan': 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets' + } + self.url = url_dict.get(technique.lower()) + self._verbose = verbose + + def _print(self, text): + if self._verbose: + print(text) + + @staticmethod + def _get_options(r): + soup = BeautifulSoup(r.text, 'lxml') + options = [h.text for h in soup.find_all('a', href=True) + if h.text.endswith(('.zip', 'tar.gz'))] + return options + + def _present_options(self): + r = requests.get(self.url) + options = self._get_options(r) + print('Options:\n') + for i, o in enumerate(options): + print("{0}: {1}".format(i, o)) + choice = input("\nPlease enter the number of the " + "dataset above you wish to download:") + return options[int(choice)] + + def _download_data(self, dataset_url, save_path): + if not isdir(save_path): + os.makedirs(save_path) + + base = basename(dataset_url) + temp_save_path = join(save_path, base) + + with open(temp_save_path, "wb") as f: + r = requests.get(dataset_url) + f.write(r.content) + + if base.endswith('.tar.gz'): + obj = tarfile.open(temp_save_path) + elif base.endswith('.zip'): + obj = ZipFile(temp_save_path, 'r') + else: + raise ValueError("Unknown File Type: {0}.".format(base)) + + self._print("Unpacking Data...") + obj.extractall(save_path) + obj.close() + os.remove(temp_save_path) + + def get(self, save_path, dataset=None): + """ + + Download a dataset. + + Parameters: + save_path (str) -- A directory to save the data to. + dataset (str) -- (optional). A specific dataset to download. + Note: this must include the file extension. + If None, options will be presented for you + to choose from. + + Returns: + save_path_full (str) -- the absolute path to the downloaded data. + + """ + if dataset is None: + selected_dataset = self._present_options() + else: + selected_dataset = dataset + + save_path_full = join(save_path, selected_dataset.split('.')[0]) + + if isdir(save_path_full): + warn("\n'{0}' already exists. Voiding Download.".format( + save_path_full)) + else: + self._print('Downloading Data...') + url = "{0}/{1}".format(self.url, selected_dataset) + self._download_data(url, save_path=save_path) + + return abspath(save_path_full) diff --git a/util/html.py b/util/html.py new file mode 100644 index 0000000..cc3262a --- /dev/null +++ b/util/html.py @@ -0,0 +1,86 @@ +import dominate +from dominate.tags import meta, h3, table, tr, td, p, a, img, br +import os + + +class HTML: + """This HTML class allows us to save images and write texts into a single HTML file. + + It consists of functions such as (add a text header to the HTML file), + (add a row of images to the HTML file), and (save the HTML to the disk). + It is based on Python library 'dominate', a Python library for creating and manipulating HTML documents using a DOM API. + """ + + def __init__(self, web_dir, title, refresh=0): + """Initialize the HTML classes + + Parameters: + web_dir (str) -- a directory that stores the webpage. HTML file will be created at /index.html; images will be saved at 0: + with self.doc.head: + meta(http_equiv="refresh", content=str(refresh)) + + def get_image_dir(self): + """Return the directory that stores images""" + return self.img_dir + + def add_header(self, text): + """Insert a header to the HTML file + + Parameters: + text (str) -- the header text + """ + with self.doc: + h3(text) + + def add_images(self, ims, txts, links, width=400): + """add images to the HTML file + + Parameters: + ims (str list) -- a list of image paths + txts (str list) -- a list of image names shown on the website + links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page + """ + self.t = table(border=1, style="table-layout: fixed;") # Insert a table + self.doc.add(self.t) + with self.t: + with tr(): + for im, txt, link in zip(ims, txts, links): + with td(style="word-wrap: break-word;", halign="center", valign="top"): + with p(): + with a(href=os.path.join('images', link)): + img(style="width:%dpx" % width, src=os.path.join('images', im)) + br() + p(txt) + + def save(self): + """save the current content to the HMTL file""" + html_file = '%s/index.html' % self.web_dir + f = open(html_file, 'wt') + f.write(self.doc.render()) + f.close() + + +if __name__ == '__main__': # we show an example usage here. + html = HTML('web/', 'test_html') + html.add_header('hello world') + + ims, txts, links = [], [], [] + for n in range(4): + ims.append('image_%d.png' % n) + txts.append('text_%d' % n) + links.append('image_%d.png' % n) + html.add_images(ims, txts, links) + html.save() diff --git a/util/image_pool.py b/util/image_pool.py new file mode 100644 index 0000000..6d086f8 --- /dev/null +++ b/util/image_pool.py @@ -0,0 +1,54 @@ +import random +import torch + + +class ImagePool(): + """This class implements an image buffer that stores previously generated images. + + This buffer enables us to update discriminators using a history of generated images + rather than the ones produced by the latest generators. + """ + + def __init__(self, pool_size): + """Initialize the ImagePool class + + Parameters: + pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created + """ + self.pool_size = pool_size + if self.pool_size > 0: # create an empty pool + self.num_imgs = 0 + self.images = [] + + def query(self, images): + """Return an image from the pool. + + Parameters: + images: the latest generated images from the generator + + Returns images from the buffer. + + By 50/100, the buffer will return input images. + By 50/100, the buffer will return images previously stored in the buffer, + and insert the current images to the buffer. + """ + if self.pool_size == 0: # if the buffer size is 0, do nothing + return images + return_images = [] + for image in images: + image = torch.unsqueeze(image.data, 0) + if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer + self.num_imgs = self.num_imgs + 1 + self.images.append(image) + return_images.append(image) + else: + p = random.uniform(0, 1) + if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer + random_id = random.randint(0, self.pool_size - 1) # randint is inclusive + tmp = self.images[random_id].clone() + self.images[random_id] = image + return_images.append(tmp) + else: # by another 50% chance, the buffer will return the current image + return_images.append(image) + return_images = torch.cat(return_images, 0) # collect all the images and return + return return_images diff --git a/util/util.py b/util/util.py new file mode 100644 index 0000000..5702d37 --- /dev/null +++ b/util/util.py @@ -0,0 +1,166 @@ +"""This module contains simple helper functions """ +from __future__ import print_function +import torch +import numpy as np +from PIL import Image +import os +import importlib +import argparse +from argparse import Namespace +import torchvision + + +def str2bool(v): + if isinstance(v, bool): + return v + if v.lower() in ('yes', 'true', 't', 'y', '1'): + return True + elif v.lower() in ('no', 'false', 'f', 'n', '0'): + return False + else: + raise argparse.ArgumentTypeError('Boolean value expected.') + + +def copyconf(default_opt, **kwargs): + conf = Namespace(**vars(default_opt)) + for key in kwargs: + setattr(conf, key, kwargs[key]) + return conf + + +def find_class_in_module(target_cls_name, module): + target_cls_name = target_cls_name.replace('_', '').lower() + clslib = importlib.import_module(module) + cls = None + for name, clsobj in clslib.__dict__.items(): + if name.lower() == target_cls_name: + cls = clsobj + + assert cls is not None, "In %s, there should be a class whose name matches %s in lowercase without underscore(_)" % (module, target_cls_name) + + return cls + + +def tensor2im(input_image, imtype=np.uint8): + """"Converts a Tensor array into a numpy image array. + + Parameters: + input_image (tensor) -- the input image tensor array + imtype (type) -- the desired type of the converted numpy array + """ + if not isinstance(input_image, np.ndarray): + if isinstance(input_image, torch.Tensor): # get the data from a variable + image_tensor = input_image.data + else: + return input_image + image_numpy = image_tensor[0].clamp(-1.0, 1.0).cpu().float().numpy() # convert it into a numpy array + if image_numpy.shape[0] == 1: # grayscale to RGB + image_numpy = np.tile(image_numpy, (3, 1, 1)) + image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling + else: # if it is a numpy array, do nothing + image_numpy = input_image + return image_numpy.astype(imtype) + + +def diagnose_network(net, name='network'): + """Calculate and print the mean of average absolute(gradients) + + Parameters: + net (torch network) -- Torch network + name (str) -- the name of the network + """ + mean = 0.0 + count = 0 + for param in net.parameters(): + if param.grad is not None: + mean += torch.mean(torch.abs(param.grad.data)) + count += 1 + if count > 0: + mean = mean / count + print(name) + print(mean) + + +def save_image(image_numpy, image_path, aspect_ratio=1.0): + """Save a numpy image to the disk + + Parameters: + image_numpy (numpy array) -- input numpy array + image_path (str) -- the path of the image + """ + + image_pil = Image.fromarray(image_numpy) + h, w, _ = image_numpy.shape + + if aspect_ratio is None: + pass + elif aspect_ratio > 1.0: + image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC) + elif aspect_ratio < 1.0: + image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC) + image_pil.save(image_path) + + +def print_numpy(x, val=True, shp=False): + """Print the mean, min, max, median, std, and size of a numpy array + + Parameters: + val (bool) -- if print the values of the numpy array + shp (bool) -- if print the shape of the numpy array + """ + x = x.astype(np.float64) + if shp: + print('shape,', x.shape) + if val: + x = x.flatten() + print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % ( + np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x))) + + +def mkdirs(paths): + """create empty directories if they don't exist + + Parameters: + paths (str list) -- a list of directory paths + """ + if isinstance(paths, list) and not isinstance(paths, str): + for path in paths: + mkdir(path) + else: + mkdir(paths) + + +def mkdir(path): + """create a single empty directory if it didn't exist + + Parameters: + path (str) -- a single directory path + """ + if not os.path.exists(path): + os.makedirs(path) + + +def correct_resize_label(t, size): + device = t.device + t = t.detach().cpu() + resized = [] + for i in range(t.size(0)): + one_t = t[i, :1] + one_np = np.transpose(one_t.numpy().astype(np.uint8), (1, 2, 0)) + one_np = one_np[:, :, 0] + one_image = Image.fromarray(one_np).resize(size, Image.NEAREST) + resized_t = torch.from_numpy(np.array(one_image)).long() + resized.append(resized_t) + return torch.stack(resized, dim=0).to(device) + + +def correct_resize(t, size, mode=Image.BICUBIC): + device = t.device + t = t.detach().cpu() + resized = [] + for i in range(t.size(0)): + one_t = t[i:i + 1] + one_image = Image.fromarray(tensor2im(one_t)).resize(size, Image.BICUBIC) + resized_t = torchvision.transforms.functional.to_tensor(one_image) * 2 - 1.0 + resized.append(resized_t) + return torch.stack(resized, dim=0).to(device) diff --git a/util/visualizer.py b/util/visualizer.py new file mode 100644 index 0000000..c17f2c6 --- /dev/null +++ b/util/visualizer.py @@ -0,0 +1,242 @@ +import numpy as np +import os +import sys +import ntpath +import time +from . import util, html +from subprocess import Popen, PIPE + +if sys.version_info[0] == 2: + VisdomExceptionBase = Exception +else: + VisdomExceptionBase = ConnectionError + + +def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256): + """Save images to the disk. + + Parameters: + webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details) + visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs + image_path (str) -- the string is used to create image paths + aspect_ratio (float) -- the aspect ratio of saved images + width (int) -- the images will be resized to width x width + + This function will save images stored in 'visuals' to the HTML file specified by 'webpage'. + """ + image_dir = webpage.get_image_dir() + short_path = ntpath.basename(image_path[0]) + name = os.path.splitext(short_path)[0] + + webpage.add_header(name) + ims, txts, links = [], [], [] + + for label, im_data in visuals.items(): + im = util.tensor2im(im_data) + image_name = '%s/%s.png' % (label, name) + os.makedirs(os.path.join(image_dir, label), exist_ok=True) + save_path = os.path.join(image_dir, image_name) + util.save_image(im, save_path, aspect_ratio=aspect_ratio) + ims.append(image_name) + txts.append(label) + links.append(image_name) + webpage.add_images(ims, txts, links, width=width) + + +class Visualizer(): + """This class includes several functions that can display/save images and print/save logging information. + + It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images. + """ + + def __init__(self, opt): + """Initialize the Visualizer class + + Parameters: + opt -- stores all the experiment flags; needs to be a subclass of BaseOptions + Step 1: Cache the training/test options + Step 2: connect to a visdom server + Step 3: create an HTML object for saveing HTML filters + Step 4: create a logging file to store training losses + """ + self.opt = opt # cache the option + if opt.display_id is None: + self.display_id = np.random.randint(100000) * 10 # just a random display id + else: + self.display_id = opt.display_id + self.use_html = opt.isTrain and not opt.no_html + self.win_size = opt.display_winsize + self.name = opt.name + self.port = opt.display_port + self.saved = False + if self.display_id > 0: # connect to a visdom server given and + import visdom + self.plot_data = {} + self.ncols = opt.display_ncols + if "tensorboard_base_url" not in os.environ: + self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env) + else: + self.vis = visdom.Visdom(port=2004, + base_url=os.environ['tensorboard_base_url'] + '/visdom') + if not self.vis.check_connection(): + self.create_visdom_connections() + + if self.use_html: # create an HTML object at /web/; images will be saved under /web/images/ + self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web') + self.img_dir = os.path.join(self.web_dir, 'images') + print('create web directory %s...' % self.web_dir) + util.mkdirs([self.web_dir, self.img_dir]) + # create a logging file to store training losses + self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt') + with open(self.log_name, "a") as log_file: + now = time.strftime("%c") + log_file.write('================ Training Loss (%s) ================\n' % now) + + def reset(self): + """Reset the self.saved status""" + self.saved = False + + def create_visdom_connections(self): + """If the program could not connect to Visdom server, this function will start a new server at port < self.port > """ + cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port + print('\n\nCould not connect to Visdom server. \n Trying to start a server....') + print('Command: %s' % cmd) + Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) + + def display_current_results(self, visuals, epoch, save_result): + """Display current results on visdom; save current results to an HTML file. + + Parameters: + visuals (OrderedDict) - - dictionary of images to display or save + epoch (int) - - the current epoch + save_result (bool) - - if save the current results to an HTML file + """ + if self.display_id > 0: # show images in the browser using visdom + ncols = self.ncols + if ncols > 0: # show all the images in one visdom panel + ncols = min(ncols, len(visuals)) + h, w = next(iter(visuals.values())).shape[:2] + table_css = """""" % (w, h) # create a table css + # create a table of images. + title = self.name + label_html = '' + label_html_row = '' + images = [] + idx = 0 + for label, image in visuals.items(): + image_numpy = util.tensor2im(image) + label_html_row += '%s' % label + images.append(image_numpy.transpose([2, 0, 1])) + idx += 1 + if idx % ncols == 0: + label_html += '%s' % label_html_row + label_html_row = '' + white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255 + while idx % ncols != 0: + images.append(white_image) + label_html_row += '' + idx += 1 + if label_html_row != '': + label_html += '%s' % label_html_row + try: + self.vis.images(images, ncols, 2, self.display_id + 1, + None, dict(title=title + ' images')) + label_html = '%s
    ' % label_html + self.vis.text(table_css + label_html, win=self.display_id + 2, + opts=dict(title=title + ' labels')) + except VisdomExceptionBase: + self.create_visdom_connections() + + else: # show each image in a separate visdom panel; + idx = 1 + try: + for label, image in visuals.items(): + image_numpy = util.tensor2im(image) + self.vis.image( + image_numpy.transpose([2, 0, 1]), + self.display_id + idx, + None, + dict(title=label) + ) + idx += 1 + except VisdomExceptionBase: + self.create_visdom_connections() + + if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved. + self.saved = True + # save images to the disk + for label, image in visuals.items(): + image_numpy = util.tensor2im(image) + img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label)) + util.save_image(image_numpy, img_path) + + # update website + webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=0) + for n in range(epoch, 0, -1): + webpage.add_header('epoch [%d]' % n) + ims, txts, links = [], [], [] + + for label, image_numpy in visuals.items(): + image_numpy = util.tensor2im(image) + img_path = 'epoch%.3d_%s.png' % (n, label) + ims.append(img_path) + txts.append(label) + links.append(img_path) + webpage.add_images(ims, txts, links, width=self.win_size) + webpage.save() + + def plot_current_losses(self, epoch, counter_ratio, losses): + """display the current losses on visdom display: dictionary of error labels and values + + Parameters: + epoch (int) -- current epoch + counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1 + losses (OrderedDict) -- training losses stored in the format of (name, float) pairs + """ + if len(losses) == 0: + return + + plot_name = '_'.join(list(losses.keys())) + + if plot_name not in self.plot_data: + self.plot_data[plot_name] = {'X': [], 'Y': [], 'legend': list(losses.keys())} + + plot_data = self.plot_data[plot_name] + plot_id = list(self.plot_data.keys()).index(plot_name) + + plot_data['X'].append(epoch + counter_ratio) + plot_data['Y'].append([losses[k] for k in plot_data['legend']]) + try: + self.vis.line( + X=np.stack([np.array(plot_data['X'])] * len(plot_data['legend']), 1), + Y=np.array(plot_data['Y']), + opts={ + 'title': self.name, + 'legend': plot_data['legend'], + 'xlabel': 'epoch', + 'ylabel': 'loss'}, + win=self.display_id - plot_id) + except VisdomExceptionBase: + self.create_visdom_connections() + + # losses: same format as |losses| of plot_current_losses + def print_current_losses(self, epoch, iters, losses, t_comp, t_data): + """print current losses on console; also save the losses to the disk + + Parameters: + epoch (int) -- current epoch + iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch) + losses (OrderedDict) -- training losses stored in the format of (name, float) pairs + t_comp (float) -- computational time per data point (normalized by batch_size) + t_data (float) -- data loading time per data point (normalized by batch_size) + """ + message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data) + for k, v in losses.items(): + message += '%s: %.3f ' % (k, v) + + print(message) # print the message + with open(self.log_name, "a") as log_file: + log_file.write('%s\n' % message) # save the message

    POdTVm zg(zvoJn_}kMQe6?AcYjBM5Lp4Sy$E~4ID@Gr;a@pJ9c4{qe7o-{?R@XI1qMGnZbQ6daJV zF3s(bUnmP!BWV${X!NNL!sSJwDUVsBH=OY9vEmeX1~Jti#I<^PiS;Il-mfS0{{RM# zj+&B09&^V>IX-1R(Q&@uw*ZbIhTPK%Tw}--B5b2!p_?Q6TJ$~C?5WbetKwMxKh>j!(&XvHc8(}0;1UzC z#y&!vDOYv(dR3lb`KC%1qstjnBOEsZ!DObw8|8#l6fseVfm1}WEu~o2!^;6}+WK)q zR;7}qe)>eFKpSzSMnCDJqJ2o}Jug}_o=#@YLXtcu5TUE7m{3)yLa_?EWGfWXL>z$3 zrq}ojCpIhvDMcWaZJN7RuQoIxSW|{&=ub?g%8ionIB9Z*0<_hbZB-5mRu}m^L;dK4 zHtp*h+T;UmIDz{%mxjPNQid;`3e<5ln8+PJpD$rkU{w7?Tu(3HP`+KI%6M#L5bT;$&XZFdssC+X>A~g%!@Q-)PNm4 zrm#2dZfQ3P5DCV;Zlweh6${MGc*MY0$|kO5!?@fY_6`qS%6bb+F@_jD(u9Us*AR+I_@QA?HwH6l}5S zD1j%IR@)%kf6=9zy~Vsab*U=^R@9{f;!Fe4!jsa7DUBo)_>;`yRY23lRp%mY+hSQY z2Hx+(jRk`mc2jFyb)I@^T$sf8Ye<-cD6-RC=;Td`-dWRm2iSR1@{*Y+3i3!v0Ye^f zripP(GFVnxoJLU$p`&S`X4~y5yH@7@8gYQ~PaJ8Yl#(@qcH%1~4lRc~RWEvqc*|{P zsJK^T7QbP*oj9Q?0U*}da1cP-QYo=KJ^>V_f$FBHaSUfc8iGdR*Eae%;B+Q>OI13I&3ingvAXlJMNg&RH#})01o=OOtE`|wunZ))5cV_guocpRun{RJSsgFGl(p28K-!rrdO3FN|^&l z$Lx|W0r+vY%ixI=<97|m86LyQr3M98RWl~+Gv+1M6H!!UW=+QS+!;U@_;Fid#R=K* zt=lb1AShx++H#7vV~Bqzn-asI#Aa~Or7R$nx4cyDECI0Zel&-PTx!I7Y1t8=#GW8i zon3ts)XxSbOHb#*HVTNzK{MLQSIQtaek=}!5mqfHVZ`-UC6Ou`phbd0;Xo-NjFm1& zV$!iD{McDg*#evh3D_{|*+*;9Kc#$k)Sh+6sq-#zha%;Uo5f9;ahx9kg(`8pLNC(I z6m$(U7^7{};1-ZKur9X{*K0fcyq4FyN_8#ZKFwfL}H`1 zuiGp1_b1d~Sat=LF*=G_(kH~D!)t{CGFO<&#Whq^hMRM_9`P2}$CxPML0p>8G^ha) zPjz`WF;~*y^_hejAdYJK)#Hi7jBoeKg{S_v3Q*cxr;xyCc-1k$H@qE7@$|YtE@*aBAl(?sp=L~v$Uf+xKJ&e;dQKHso|7^r3!3_ij&(Y^m{Db%nKRPmm@61MNr^v;8(G9-U#FR4 z*p}{iReGV1+Mk)MP3_})izr2cFw`|K%Ydsl^Qa|&%~MT6Vxub@n-!Uql_as-Yy2#E zk!`4w1!Gd#I&l=)S&3ndC8wsl_KkaNvEE@b&m@OEpB8yfE1jB377!>NMP? z19*;IYasL*qizR8nSs$xRhUi_iql4`BUZT{Sf~@qRyRGpz%G8CU26q6?Wa4)b5_{t zpk;F$I0vXsmP3WDs!&D(P&jR>lB47)o(%6u*ai^;VGEAZy{RLbBn$G;vbNdkA49jnt?j=YOMZlSY7JE@qSA%S%&x|bK4o*|4;VMu7FifI`^EeVYnN4v8r9rz^6?$@9N1EvV_qN}zH zU~%}+wbZVu!#+sE@tnsUcTckZV1`UCryaqm#F5h0tV>f%Ej-A5-Lj&R8tUWSw!AL2 zI*A%}LHAOpQUOT^Xs%c{Tl#I(98aV5c%>d>$@#l1VSm<0aOz4C9UW>lL(|lWsiBm{ zRiupU-U(ehan-V+hLAx!dMUQaBkuH}*D~i$mkjTev!*h#;W^6%!>Vc@8my*8Suom) zwDRdBOR!+(Qi{NPvT3I7HeZAkkP2ttPo=d3Amc%oQ(L4s=0vZ|nKm}XI#-@#Ie#%= zI7Sx~@XRX#Qe{`amnWEPNC4kJM;@@b(Ddw3rtVrQMmUOj^qhJxmojc<%DHzStyfR6 z%9*n^1^)mU+In`QhNSIkDu!2)_tUr{CULkWy@QFfOTP8pWSNX{r>rfOgp{XYr2ha~ z@Ggbo8MhY4^X!y(1|A5P3d00thCvpe9v7^nsXd*PftgP<>>1$!z``j)YivOPLeb*$t}#HLL9Zo z7UJP>r69^gbkDkxcY0Qm6^}rm->tvq!w;>pUO&O=Bqr)qz+DiN}C;b@mi#d`abc%?>d>FoI)O_+ra{8%*=On7}%GQwh#Dtd_GWMUVx z-%dukqv~z(rveQNELmA^#Bi-hI+sn+*mE9r%h_WKY_*ngym7D^DtBdSihMdc^-!@q zz~%k{~j(M2}`i`Ypf;t&kuOBSS{sQ4(;cEY;D^ zPC2iq_yhIF1EcB|6Nc4O)JutG995EvI_d~Kvm{Z*-Po2PK{h9kXlbz^^;X+;!+@e+ z>N6nvKBdMnEPE&VSA(iQ)%t8F4(bjIhE)9IG?Zeg4II@tVm;)UN#2{+c>_*5i-oKJ zk^rx1Qg)5BEqZt9r=$!^2*z=ldMQJdDr)7H3`YpdZ&Oi+)8mxv)RhnjrwY)=I7O3V zCC4Bq z@V!>nJ4w)U&#w@GKJC6-X*%0llBqi?S3!Lt^gANuJTE((UaEJdN-Emi(VVd#mySZz z!I@(aFxaI@QocMR{#MPpPJkkmTHLKj=%s~6N-+GRobz^Ts})$SC1XI{HpW*Oo}vh_ z%4i_O9X!W!(g2}rL_mA{+!amj$oBp#%wGV~&z8y2w z4>jhMPm?T&z>jo$rGUM-+hr?PnVP%8A_m$`=O0bkQ>ir>n*+>LG}ZOldj`rmo2A*Z zo=SX1mUPACnejKGj;&-{#>hh<`%AE%{191Pt%M+@UKL3rulG@7%_V9gF&s@$52`;+ za`ekQwkMl20Fi5F#Bm(ENk!yEPYf`*jTprkD6ZGOz%)FV^6RGV8k8TlDmu(NHAz%ulKOYHyfJf2Z7E1xD10d!DLwcKM2-iD z^P=~hGgdhk9Z6MJ)H+CLD=w4B zzG@s!EXg)COp|=fI5ICRu(qH3_}yj2S%QnIA7(%1Q| z9Hbq0q7kpK--S)KTXO@ms=aY>tVxnKQroGSORBkIx%1sl3p6=Of;tq%Kaf^YRnWy9 zdY6hgn{Z;*OSN5vs%9k=lAToreVmURQlPKVs`kqR>4!oqo zC?=V44Jx*lJ=!*7V8iVF9G*J}$<(ZkEv$`laj3g~uge$enTP;J%4wp^nX@6)S7226 zZE=`Hs|_tglgZ~D&2rD95&-rOtCEx9+c%0rLeg-c((SF^Z4Q;`Q0l^kEFY>_8!_SY z;Tfu)qbyXxdPk|mAo+?BCgMRJU%XYAY3}RBo!$o2p6$dL_g1YkRkGxzK#WWcvCmxz z#}Mft!cw9f<$|$}ACj6(Lk(&Z7-APgAoqpMuYOxBy|zFn2pl4>d1-Q@kO3t097Q(Z z-7Dz^W~;2Moigq@A#BLP+*g-^?CqF{pzc+V&UH z?xNwSFdmroLp$YcCcZ&aQA3yUTJMzeWBV@Aoz~hkHsJ(85Fq2T80eror5#iJRI91;?qbVX?+S+s=@uVNN7cG~hRRJA^&9KoEk zjwzgJf34YoNmGkfV9`Aca?}MSg*-AUv}^ABt8QeTQnp#NNK&+|DU5d1ADboDuM|j> zlRZ_34#9e{m$LLyMULTk&L8qy>FF*7Ol5~~=ef7Djkb%f?J7%vrVqZ8C033Y2~br0 zD#HH&4?Nq7<{C^Zq}if8y_Txx$1xm#qj~oqq^hpPYqPaIJW)|m#1t63kk>x{0JXUY z@BU=%qEzCNr81~WqYwwzpEomQ zh4Pu%gq~SwHlvhLeW&d$?c0$fi?&Jd>jQ;KY}5l`m3jX3xH+vSw40 zG1JYOsPZ;Qz-i;aBdCtb^i=hA0Le`90le z)b~^dX2qM;)-{y#7I?3R88~$wB}a*6{5aB580Dg=q(nh5k)$rOH@pgtJi~!&3Rsa5 zfrIB)THC}Y4@&>kEA&;FNc^Of6ctLMF$*e&N_Jp$+N&A0{7;8E%G;6&3E5n-L#YIq zBy;SiR>m+Fn8yV)bczXDuqtTfSc)-Q801^t;@8st&L>07l@o+~>bB0zm_0QI8OG=; zq^Kb&B&JP*jxRO{OO;Zr5P5B;y6!*N$$O+pt2pSy#s?n1IHnU#QB^poOnM9p50b`|#CbdZY#F1u8V zUiuTul_A3f8Jh7>Y=W$0R9UF7Ofm_v90ljCAPw}DvDFe#*buL?Yg@jC{{Rj$>!B`6 zL9L)52pnj^3+RDH_JjyH4^*mx3esZxomt$w=UY9SObalq>D<5O17Pq z)L3kv%wD4v#syV$N_^_pmJiVa2w39^*CXR=YB-&y(2PmcYUL0KjVIb_O+w+};H;Ar znz~Xg4Glk+xedWPghZQZ---~xB*&dr!|&RH$lvyNF`-voSwABE#g& zFXAR3fTPEhbrPU7k6L!$6oWWLJ;{VgMH^vsm_>h&SrM`# zozQ?b6W3U%a;7|$VIVZpQ^~ka1rSP=V7A!4XwzO*4D_p>nrA=>=$)sHN2AKv+Pcyv zVuFT_kQpPRt_vZJ!M%tYk+0Rwepn>QSR$FWNgxp*`#rUgQpUB@z9P$01cU?flu@j2 zC`0e70r1d{G}4o+iKR{k;bBJu@#9(J!0@`PMGDr*OHT?Y@}(?7hBhI-1p7FtlqXF4 zI_V;ig&EZtbktaItUYQbi~Sr-&d`Xd5oefLiBeV8!?$s#DAPFO!nE232@waPmGv2I zUk+K`p-Z?CttYz>XHae~!0EKy`#DR7(r~5fZ&C)L3HDQ-OX+Ocw=v*(CnM#w&Kchu zljU%13lD;68RTZQ+0ktpuX4=59uwyot&NjK*Axh%5ClJF_w3KO{A{m5J|w3hAA5`nk@KNqTZSH zYd%$BolNTMboU48ZXZb5f-F%M49N~d|q`pMb)+5v!L9r>UA0 zo-NM=aWzO(hDg;vLf6y-e!xda&#k{Or;B728Gx;^*S4g~jIl~WP)I(MtmqG=`YxSg z6=xUq1zVdkelbUe&}^^5sK2b3ZX<-W(^J;b&xnfE9zyShBa$gDuF5UglxMk$E*YH~ zNGFW#+wJ(#rd4m;5(YDc9`2s{F!Z03v5X5nV|BAe&g=d`M&6qHy6eQc&Aa4HZ}#*R&uUFsPI zhwXa@-OZdq5lNKPai%R(au%tCxBucdu1kE&JmHMLkBbtWZHa4M>Frbms@K7FD| zF4iE(RZ853y@4P@fo_5Xq|BN+X|bg0F#?r!FlGF?kh4~G!g?7SX3PZ)qb%1~Edtd^ zaUoM#TRKLusU+@Wv~c0KQv*yzA}Kn+)rCvRSf^VCb;2?1!HDG?vC+JJA;e+InR<$| z5XEPjqM9QU97-b{It}bV_gioR&958@02Hff*-6!)j3lUWn$*|VXG(D#gX;UP^i>!M z!IHllpEBW;GzkL6X)zG36v$3S8*;a9UvQAHsj?8BXvc({^5IG z!;ULjV;GvCVM%WTi9X7eNeu2hnu^M3i=mb%>d~4=&@kM^yAltFJ)p=lCX}{tI*b9c z=UA)bj%pZSSB_zIsFI>9Q<)WB*B>1~=GKs0RxUArBqd6*sV;pBh8jr7xYKY5cy%xy=}VM$M5Hkl`3iE z6saVU6yFs_8ypd=RcGFqMI*^k=E}nV0A!P`zC5y4fiNPd+kgyI1nru1`HEwT*2XAd zohiYZW-|mVNC02MPTbRy1PW~>CCJ+z%EBoy*&Uu$q=re+(c+F&3{;SHx|`bl`E@lZ z@j<5+fvA$9JyqTjm1%KoR}aDPx(X<&r*fvQuAm`SMJ_u4HMXAK6z183c1`z%G9(N- z>1JJG%g0LA5FnX5C!@^NU*tH1l&x1yJq0uqD=O9bmb)_>+^UifhkhHW7)2NUNUs6u zjWyF{Oiol$DVj7gC>QVmkOsGIQ7P5-jA_(>kp~{0 zx`1YQ0wrR}QB4>NB{-+2F{*d>TI9FY#==T=(=HK#Ad&F))Jd@_F)z$vv`b2^)<&sk zS%vMlZ?*EWSlMglrK2jhnhEM84YAbgt-<)* z9#p@iL5fz^n8fqtQX3tnK_1*v8p$QSJFIH1rZN5k$TuH}Hsh#!%AD^)fsQzA+f4QN zr5TnOC8LV5=3B!gZ2@&rZL4U=)LWJgAgivcj6m2u)s1m%P0eL2Qy3_Ul&Tc;%RsP*%O3ph4S%^8-9sB9K~m~}ra zRAL!Mn=xY3(u^!kBtoW^wl6uOH5BrFlyEdR@#JxFrL+l9$AIBO)D;q7Y^v_d_-CvP zA2;XBpVdC8V;HV;$rzp)MA<_lQPI)m{Bs4XiYYvpv3XoXk`FU@j5Y6nZk8ZLl5EcjlD91t% zeil1q&^>giyT$ra?>2ODpC8MHn<2+bM~%HqmGw3C7@UlWR)|$(bW^%jEp5ncrx|gk ziRhAY%nXw!asXBv&3?CDYn-#%hiXbWIV%Yv-iitz0 z1G=%dZdtol+)`55B}&?Oaw)g-1*I}@iqn(U52#+Cdd%vyS-%{iqNLB6!k&*Q;8|-Z z!egSK!r*iA&=dBWL%VdYbthijnzuuB*5in@w*^HvsdQ;PS z&Y5H!48+v-{0JobxR%LqQ z?hF=i;{$*cE60XnRP`8QMIA(wtZ%X^6hjrsx!rMXd=C!Rl@Yg%2T;m&1dw#aOG>J$ zN~emxjj6ngD;lSX7`X)42*CS~emKjDCSU>66}7dX#vMbAL$AjvsbY`JS~^9j+Ves3 zf}!PO-K={7#$QrqNG7h)?$Q$*aqFfu&zMw_!A(4kw6n+a4MP%wy}Y#wtEYWW61eau z0r=N~(}>$Xf}z!BEI$#FrmnJ|5Q>tG1vGHXSzuH;D*%OjeZ1bZl)(^b=YsVsVNaCQ ze^0oFO7VYLohQq^UXGdieN~N9(&E_8J2bFG8%sp~NtRd{9wH?pYX;Ex+nBZ5-Twe| z<wYYtM>7o_VN=vk= z(U7t++@s;lKzY`5NrCRHsX=s+8);6t&(q#DL!5BjH!nqs;aGiB#f#F>QOGfjI#|Z% ziDfLhZPY5tE!*M5sByhOL`^!hxHg&ta;|acZ%?{~jOKh63f`e~vp2y$6Nidw2x=zF z*p(EYC0PPIvw6Z9!AnLoS5w;huQyGpoJ0_Pw3$t~7JBic1BCjQ=+*~Y9!>sBh+#5< zi-6J1h6xPpk(tKerOFixW-@_eVXem#A>pDzWK-(4SvzU({QcxSPAb6ERrplPQ5-m$ zoI0JORCN;WETn=x>m8twc%csAI%$P8WP)gxdf}TXRbja&5X8D|{PP0B<*vaoifl@U z4mI%8OHK+@(r(`M3R7dMs4f8TAdVkocSjVAIEpRITE!&e6r1Uvsw|_P`Uy*cVBI;y zF{$!pS(6gO@eCDVqr@=!R4CEK^C*1zgkyM6ELZ?YzZrG4w(-<7&mFb4p=iL&Q6KAv ztEv4z^)msl%6SQJdHzyJa|JeQz)TaCmO^GrnZg90N3gRi6LI0eHd$iT%Ss9ak=OcB z7G;>Y!o+(j-aS8hh3L0ma(76&!-M5{1!XjKSwlAEn(QOa`Fg0HIbN8$f(_eb$@{Dg z@3;ZLK4ju;mbO)a28DTZYJIe=BMMI8*{i0Rf2h5G=pHo?lcl-gpwGE80>LnRT;e!= zMRn>XMQR9%2(ut$wu;(cj;iqsLL3RlVcCscR!*fvQkSG%Sitgj7gvR5I?7x=zXW+H zbCxZJ(N)tY$kW6TF&Um{L`9oI$_Xas*S8Q|T{NXg7;L8*ws8Ut2i~Ikx9PhR${A-B z>KwQv7-X~5;q^JQ7J-F5F_xX@p{$M7Tm+QId-#!V`f-oUs1h5VzlYA2A+>48Xrdw3 z%HD!{fy}uZH*`0txL!xbsAwBAVtALyVpwGg;@ETnR(K@Dsj1SY85MS&jLUs(eh4gF zU=0qSl$$&d9H2FDf7UBqi?gmjZu z#q+TFt1P5#Ykj5%#A;U0Zrs!*>M!Xw{H8cNhOtYC4ac2x9f=)*YWimr`5HHIz?@>IP}h721LjKhgVB2NTz z{N!!7YOsxVgZs8U_-U8sWw0FDu9_~owhK~{0fR;V04nsq5%lZSJk`>kk@|_H&A3%F zCS~cTPVs80ACW{7AfA*xV*n*(W)U(QZm>Q0mo;wI?DwE)ouqWqyG1>12-}7NtOq9a z^Csp?glNoB5CvC_hj2~!9bS;3sDcI*Xcloi zO%2|e^R^K~20znm$DC+$E_%vxK~0=%F#3oEOmLYVIBF%0bx}Uu#P%lqDb5-EyND2V|-qLUqY>PJShE9UIui)LC%N(?g%cKM3>fh9b(a5mvh6mOuCLGIXUZ@?A( zvTiT5riBc=w(a#RWoj3kvhGaJ_?}_@L5M99Vl}lgU^qHR9KtJSE|3A^w6I%_PTQW@ zE)uSai+7UhH7OX59&}>yybG=QHxTH)U+SJOUzBF)z{6{Dli)Ee8K=Yi{p&cx($P-i zYZ6&YHLQE`Lt}PrlW6MNaVqKa+Y`cqM>~Bkp)RDRYiNFqbe}wAY@3DEW)#D5ijrZ& zCab|Px;iX2YB`h?#Hj6ejU|YJ@B?7iw;~6bNF;+ZQd3JR0*!`C%ehWE8o0c)sKNJ3 z8q~)f6qGde3GUh$!XGS-`gb?uVJV4mvpE_C4dW^YAgTqen6^6SKmMPkajA*H$-sPMz1O+y|!E%^!S7Hn!Pd@syss)1s z8nJ$e;MsFEVD+`QCO3ykl_;l|4#Oat6rrZ2k{{80yp2-B^yW(ik%xPC;f>lfx)ZYu zYL<#21dbGn%VJ0>vaJ>+m5-H}G0bZ#<_TJAw1*F@ro6wTQd%@wLl)n-haHIobIpb7 zR2&3W)S-bmP}Pide-~`Ulba~xlvq|DPfav+SR|6Uqj;VHP}K;n?v*=Lc32ym-$BbO zLXjWEwoN#dM+$#j9xPi2%vfG0RDBp7@;%nNvEwb~2dO6<6|`o=9V*W}XG6cV8+~B9dCh)h zr9ngyBj-R}?sX_YfwN^(UMJIfWWyjzEN(jn`FdxTG-Qd~YwvCY;&i#>qSc7fF{tI+ zvnE9W{X@XAJ{d`vFg)2*@#A!_M<2{#OcIu=DMsa&XasInQmil7^LeGaEuf)i9udZ` zVRrF!6h@fFc8V@@<(`z{m6Fm!IrHG#JNYt_uo$(nqXI78I`BS=TXio??WMO@4+IiN z>rePhcpY_D$>j4f$5e@MsEUZ=-Zd(vg|s)&@JVdjAt5L-t`tV%+?iM!b{bOVB+j+| zTzT^GMv<(TU2-5*SqRy8WG2?yEycGs_fnm42i;Xej7NhUX+zVzt(bEjC5L6qxrfb~ zGcGkeR2W_pg~ugbWn`>uJd}x9nEL=6Msy&REnomP&J@a$qPR*4#*9a+Jjc|Ghf9#U zEt|3}PD;g^f|6{-mGQ{Dy1YVyS|oaEn1o`$k#Tdcf!6$h_uj~NTF|U9Awy=~H7n#U zxR_|85(YgKUHWhRPsebHu)2BY%e7&en++8*ZJlS_7i%!?ZaaH9KXzQwKbE$Iohl&W zHe*p1_bKFN3UCcc=;)=TUOk>Nk!GI;p~n&!)H6ioCz8i#7H~U{9t&=+U$y903MjX- zWzZ6Z6oa=4oq=NcV;HSyx`C9&10_IO^Hj@H!0EJBK%0bel-eYF~2#PLhHLQ;aZ@%PM9 zR_|1>HDO2p0HXN|5cK~5n`h4zzM&{6znjMSIj}Rx>;@w6UsUV6NEZg^&bKcHn8L;Cp*e_4ShvC3B3J*eA-4a>SzA;bAU0CLR)Y|snpK)&m1zY$US#vlO(|ytmDB8k4+WNsP1EfyZUmOj91oQg zUjEIstqI#&+-6*tM^ip0Q;XB3Rel$UPgf2Y)c#R{=ABkZtSrEz5sSpF-FtSw^cf^(~h9@-z?Q)I7StgvW_`fj75oJ^spMd5aJO- zPg97}Rn#oQddC1U`O(M{=F4-)gL^kJ+u&;yAVuRa-+*9xW9V zkcDcmCDv~>z|bqkyMY()erZZf$R;|g#HGWt&-A9bv0OmO{_}jzB1%G4Vs0i^FW4%` zrrP-OsalBuiqgnPfw!-soTSXyl{xb1@QON_J;}pIIe@O+J>`X0cP_WwE5HvUUQ?+^ zfOc02GhKZ1%_VLV2(5)q*ytknSqKCaGG5lO>^0*Eae7oR2W2sHauTs4#CqyPS(`E? zG*Q#xl7TR0kj$|J4&y*VfX4m}PYuP}z8ADEml4EAbs+9qO7<;MGTt7NePPH}klTyN>IksVQ)FFaKx~20J!$(NX9!wj+y2Jydvv?N21WV#dK$pzeglVIVz!txU<;4y%2^iy5Zc{Qa>87kkaY(dr;h6m) znm`tsHI_pm_pEmYhT5G-Af7d~KqnLBO(_@P+;r{>+Os`kcOp#aB919v4wqWXXzsvDd{Uo(GJ{3k$trwTbeYwBHp`M1Rt zI=?j~e3-5Yhg4!X6>?HhVPaaZIwqQ>>8^~@MgffO)PbNk<~lHiuI0TVaXv-{&wWqs z0vjh~6>4|se_Hcad%-&U)9fO%tT}p;nu1(rD#XKQ#HpRlOjS{$&KSVv6qxCE7OX#ia46g+e@iC3Dx_JY*yw>f|F4r-Px{ zeo*OUqm!;Sb^2&*E8p^-Yj8a;UGtG(;2k2oa8p7vYIP#X*X;29#Vg88u znyDDm<_YR2)9qB4zeuRPM`KynKV}A#FiTyOC@Zl_%1C7*{$I(#B1H^p294Mop5u(E zvf?L9B|B>63QQ0L8X9^BONCQlRMOyBhH1_ViSMnT#`?$eQDSv;u-x3!RwQynf%Hwt z2^#X(N>-l?2&%eB24Hzq97_-ND<&oUvo7@8B;a_aJB>aijpu6CnxSC9Q5h0_vB!C9 zG+QAc3+bTZJ4C8TUJy8R)6NE9C@7Khpf{~qQx?OjvDmTvBd9evo+nKNn6FelJK=97 zU4A1Ja-dX>6=P2#ZcVI$$FVlQ2;8)n2?-#1(VGvA7!l-pC`R=E0AG`HO&|4&r!nS? zQKyUDz`CDHT^(Evv&$~j^@fk|h|K|$6vS4M3wxD{rgOY|pvmW!B-`1{p=o;ru4hYAR0>mHV;i>n-5w_pPr_{De0lAvN^94x0TxV@>vd}!ra-l(x9OcR_{_KXBhIQ zSlbAj4v5c&QpYinva-im;HVY{W3jiu9yqNbBTzBMtfB&ZBgMd1cx0G0Z8WPxKqH7= zmKkUfDCHm>Lzxob3+dke-fO_>W}ZP>K@p5nMrJLdwf; zhi)+2K*)>*By&)Mu=!0tLzLE}OEo2A;Q&8a;ei=_s zHX~TmiOW2GeMEHRG7FD+g~x9WZtBgVjVi!*>YKnD=66^5@Eqo$51aFIzBElh~b zRaEk{E5F|I)nQiL2ejK>SxJZ~Bj-u(ZU9c0s2GN1qJ|VRBTHEO;))y&l_OSkBHKfbFWN{n`TRC)}}h*C(j z_^t~q;zc&Z8Z&hkHeJYAk*2zFpyIX=W|?inID_-4vb8=lP9>#GN=lec_#>cDU<-WKC(w@THCJ#>{~ zI+StptT>KU%e3nfR8>@B5`BqgMl8w>g5cW2mAJQSRvRltl%h(Es<~m+;6f~IJIW{3zk*H}*Fkuu_Zy0C9>cv1N>wP0SC9mIJ zVTWrSwocXCNKhx7GcBiY1)Gpo|hlOaGZ8p zike#d8mv|O)N#W>Nl^>PA8bBQi*lV9D7YkZ8qLrtk|weoH3}dRN_l-e^rJM_wmsAi zkW+OxHDXi|;C0#SH_t~cWn?If5Gpi#0*&M<*jStRbHY@eG2z?SHDKxo9BbL*EA)Rf zQsVe_BhhScGAhGOhT>QyMQuz`Lk%eu@yG74M~rRvt_e2waUH#^EciCNFafYEPAn+v%f$xoY{{F>E0vPynCn^jvtq@&Q_wvqrtJnsjrb~V_z}| zlo&TKX-izjpp$SojTV8!qwpOZ2$ncsfqT)GBX(C3_?Gi9hSd+ny?^-C3M1&5WG!oA4wWl0SR379(S+*zfc*1 z8q1quCl#yM0Q_j?)8p`^YtI%PJXDoB2@&TC8{4&IwY)oXC<>0F7z)Jp9Rf;#9erOa z=Qd%*V|gmtil+>k1@cqNouf}I{gY`6jn?C>x}=jEO=;H^s1SC0g?EEu7?onxdC3i2 zaSKxisgo&!S z5=j8aCa~1WAO)b5V)50mT47BrEV0Bq9ytnMww?J?0|fO}*7-W*2ptq&GX_19rqA-= zIMgPVscPfHs-&7AAuSji7|5c)1K67Z%xvEan`3_9RswkJthn4{Tq7-HpooBa>u;K<=tnT;c$jIcg`^2g zoOpEBRpigSwvwZQw(O%#jbS|&n;*_p_<_U#KhE!sB{$-=Cjo_G6G~w%m)5xOb zf+V4k8{8kILA?Y3Qg}g%VRmYec=b?~Ri3&zo1|PzE#`_0Mz0W{iy6an78grA^!0E| zw-LOt#6f2<`@2oX(|+7^K6Vil+U?b*llfCRbegj$ty|{$Y&EIpSO?Bh)Wx!*d3?Cz zktc~;-)#dCWAt(NF2Gb%D7I0U#)IEbc$Y+TrWclTR&B*5!?E1DVyVnBDpaLT6qFTW zg{oR0x<-Y_Eo&7Z_TsF+Fj`3o5=iK#uB_BzX*q%7y&>ukM=^}KN7X7!D?D`Wp>b@l znIrPzlMkLMg78$9AyuK8IX1H$HP@CIkq-i)Azd}J+7gTeMHN3**`uI6S?O*;%(Y!m z%Cq$sEMj%I4mUxHS5nefRns|PCz%}OSj!tJDRJ+3ci@9}Gj-d%q-h~1s)@7Aw_70L zH0}6a$Mkcm)EqM#IE^JXZLg!Et66C9`eQ8AFqtAl`WlhtlHshv>%?2*eA|wt zN^TEN6Cb|{dv3%OiQ!u#jOgKIfhUT187`1FwbqYcSmD=n# zb_Y%d?bMQz5}!&veGX1Es-nuh4b1cyMpdTF)EHE-frvg{%ataIdYYO|$n7_lRpbS( zP4p+f7#Hmk8g^6Pmvj=T#)3R&0O%fT$vA~9HP{w4h-J#ksvOZZbh~AzN*R{9dCOUc6;dfB6)Q!!P+*3V9PRvxv@U4eC5E7#>rLY@JZGW~ucsTLU? zAc8XRQELOlZ>tshcreSanp0S(9ws*W(0=CBg>lnDt}*G7w;IW~_E5#2En>M_8q3(e z0gB)vq*8w>%qtqz!%;w}-dYBAE&>Gt`gh?{?ivq;r)Ht*Hd5x2;*$n&28#Dr{VjD% zFnWCS<)5(3%(a=*D^tEn&zLq+u8yvjH44FTMf2H&H7g2~lgqFm{5WZmoV49K+R`+M zBd5xd<_Wk{DOz@l8+{^lud4YsJJn@gw&U1@xgwe3#PW75n5`F)2I&gRO)5eFo)ret zMBtzufaN!W&n^fC*lC@;WK&sP&EB)#_^8=qK7qPLis#&W2N*pd%BDJ;dMrEnyS^Dx z*qK>?6j98PMHF)eXqLrPpL7Fb!e7p5W<+W`Oi{^9y97w0-`CGZIHp672*z^GL8#9Z zc!f1~6PGg%9fwh4IHps?{mJNOhFZ5S)U7NbUG82mZZ11}EqSG(Nl}iGFbFlf-CDsI zkA)0t>9?pkTP4)BR5f_Eeak{>n6tfRCq|P~M=YkG2x(%5C3eo?F}$g` zy^>cuNVxIgE_jxb%2a|vl5xhD*saB^g<~~XUyMywBTbfiYmwu^(+T<7>b%EQiqBu> zEpY)L2xl2m z?QWEV!(gdrD$G0f0gA9JlhK)|-kI>sN_-CoV?A~ug(V-74P252{{VE9-qt$yAn>6m zDWf9j(;R3F>laFTU61u+Eko4&n%Mn%;rNt!Z#YuZMDWs&EEa-dyS7OE(S@}g#|+)L zB?$)+JyemlbjcN7-m&_pnK~zzGRI8vekf_P&UlWW0KxMeLL*aQsXT(7T8U7z-q*i& zBY~^?;*yjn6)cb`YFPc5R{_R~r(1eaoc$(xe92f3tNCsg+SOy zp_OoqPH5wc=WB#XG9<`IPcp*W+>4tL#LGz6I;m0?3TLL06&OZwq`AwmrrnP_f<;{V8QD!$7-Ag zI);Z4iGw32mgqw4i)lM=bKAERC8Y$xFm@Vg4<$#gf{MJYnlavz<5}K{f@~76FH_=c zS43&$rFJxm&MG5z1xX=&)Ammi+aQI6j-PEPcf=1>V(7O@^L8VdGDbIsf1r?~D7>MuQ3X58lZ+%i*B z)#ciDH+p4}MZ|jCBU;uy?S4;NqS8Mkg$xKD-nx#roy95t04k~MKPSO13q56iH8hp- z+DI^QwlpDIV&2B%U)~%Wp`bw^nKTA^;YxK$hy#rx^S?=={##dFUyrL|bhPZz(lAnx zzzEt&eOZURFU3bH?^r5Ywo?|lj60`J2W=LtPXxj++)^qz@exx5kVP##_xv$^Zna;MgMt$^S@{hz&SI~`s4*P8^cc~qlNM}IiW2tSvQuTT z4RyV|PclCyZH7`92!R4RFsM^Ba^-IqHOMoEtt{)Z{61?nE;AxHStF5Z8T{68G}h+e zcyUXX$Xbq-)2uqN20be#TAvrdal9f*(lJM@SEpKdnH0$4{hX(3THuZ}87K)+RO3k8 zQ%aDN7~>UXK9M~vV;yV78S)Ntpvw5RDNRziwH#NHSfY6X%^cA{(K5cvy8EOZx)6CA zZ1mL@=N zG_5q1A2Ay|<(!?tWIB5X5BGVi8-|qPL?wGfMl_E#H>@ZjN|g>fXuEX#J9S$ohGbXij%H1TOj*g=d>#B|YiTb|qp zJZ+^xTX3rf%s#Md=FC{WFBd@*BWKyL$Z%{KaI6}Fu8$9;r9twOR7Fx2c%<_PCPi>g zgI?SKX4{>aF4?ff#-``RClk^|4(vCI%?_)+XzK$A6*nb%`}HlKGTtB4UZZqR0`*NC z6>?317ZO| z6S6kpOW#W#uyP(u%sJyH^;e-0J@I zG2Fp4)fjyS3RnnaHBzhz^HLB1j?3QnHnAiAU3q5lRF3X2>7KC@LOSr^Soz0^?3qu$ip zn#*@L^A|x&K_H3nsKk$WG?M!{4Y8&`2c$(&t!8P@_1T9N#Pd}1V%79m)kQ{Vt%$10 zh|5tljM5Ml3KG{O6&m|Fk{Na7;#*FkB@sO_P;PB!ZsOcZHmDe*36^sPRiVS9p`+?e z1w}I;35U%rZtzOy+(mP?#1mo(KTieLm-gz74q>#<4yp_}VzPp#S~Jo=3OfFkx@nB& zyvvAVzvkC4^0Y^{kSCq0})E?{$`S6L(;~i9tV_J#k zoTk;dumJoyr*4v!VxbhFC?t4NX&=>U%$Ghy&iyaMI;~D?`h|ek(_q+LA}X^>NlNYW z6tXELytWF)+KX{@mC6$1YZ!udboEw}!sis>U;oyv3Yha9LkvD9nPQQaNFXyxlGxu- z{T=7^2V0G-SOt{0R3Hf2i%+mV`py~v>Ax4^_VHcY7RaMsyZ~zADC{v*9 zPYj%}eI%SCrfF@eRHQ*9ka%pR(^X9_XUuC}Td%8$-fhMZkF^Wdbpzy46RXErQ9N=yZ6j_QhVIN1upLNUXxxty;6fC?<*4Dh%}>H^NOxYu>rw*hcRYw`Z=r5KV2Rd}{g z*wUa$+A9>fZl+3lwElfPV6wVODrR_PSJg;m4P*3nJ^AG@+EEf}z1yo8m;;|}cr9)g zXd$JosFpf)y=*c{^TtAJaAZ|m_*fo!w*oL2fz#7lLn{iAkFLSf(KQ~U9nWx8$5Bm7 zK}$;`14}aAXEB|1*n|f5>wYCFcf9dOrCConKHa0sAI7oN<8jo?lh+8^V#nrZrjWXr zz_48ha6EZ+C@2{e(`r#ANydHNbnionV$uHqFQmb6>Z<5*IZ}rWh9;hQYNeU>BvC}G zW@cvEp!nOBUC(xzBmf5;gRjz;Sz9=_5{AK2l%NM$o)vsu9qG4Ljt|om>YYAS!Lx2r zqeyXxY1!zj(uWX*NW*amX0eg%ZdyCru;)E@-#fQ<@#5NwkfL_<^p2|O&gR&$}Q#wu{wC4o)M%+z8tC~w>=ueXI?n_5O(o@%DlUkx(NqSFigdR)x5aQ0_!$26-^CPl%}o+Nr#-v6e{l`xNjwxko@ z1e6gQBi5GLxD-H0J3e)3*mu@19f^0sNqpN8%bxu-ij(&IU|JZ2mv@#@)@ zSt!L#cr=nbE39j6ucC_!00$US350im4y}OG=w&$0+C6UjY!*XRtJC!6NEIZ4WF_BqVj|LSCi#1hDMxFe|nkdnYfx8Qi66T0n zQ>YjNZ5r4q1zJ=#?DMMd=|^9>hBJ;Ij2@)7Qu-||Yv9sTR%G1g=s4$@+*)cv@zL~OBixa z;dLXmy{Cf|qZryKCP*AAl>Y!4e5sYIdIQsJSsi4}*=IDvmva8bF-Wu()RMhaGLi{v zn8SwfBkJVm^!0M@^T!%b*rv%Kbd8k(eMIHWIg%mOV5F=^uMb+*8m|nJqvmkvp@L?l zV>?ehU86}aeY!%$uj~Z#H<@6OfWU059C45or1A2nNX8{bzcpCURI;FoCBwEr$fZ`` zx07RSJB#r~k^m&*NmQWNh>$Cu7l@iks-3CK-dhRg#4#5|iZr$V0Nl`)J{)xlib~6= zN=S{oDiUIH)QMz|8>NTh8<^iq==WbDE)rnk(P6QtI}D~DOPYz2 zB5Fl7R590?F#Jtv@VJkcQ1x#Zt{`i4Qr-0J$C5<-!nLdyH3KAi)0K5y1#Gcz&9#j^ zSGpP~*p;yCb_;V$0cqNC=%od~g~r27;+7vJw`R)RPBf70rk*nyM(wppx3FL7=a9gU z6w@xPTCh7hDcU%(NTW3I;ew`l%18kL)vf!gHM4QlHg=}2hX{lmL8cr}1jlNts(h>- zDoT0(08K1)6Rp&Ps1bvqzlR$QFK!;%(QFkIk9}mr@Vv(%m_=1h1W*q-lvBo5Fa?1( z)HlbEA)-XhCqqjmM+^_XovJ8v3c!=KSvCsE1T3(_BA?$Nc$3Bwz!*&^ap#F9ctl3e zD&a_RS}`26VewT@B(7#zrZ5XPZ!F(lI6()6X$lffKG+^&xf&_WT2@ox^z=%f=`Bqx ztsu8&7G@(}=bl1|F)%8+5KNJ?-(A(?RoFDuEe0+s!)*%{BF3n?eT@Z;kA8brgGwEI zrgWXW;ZW-FI?PfEN|uJLD`C+tV@RZCKW9ZA?T_rR9Q8_>^M2jOGJ(lGVRFNcl3USg=fFyxfDzR!ggp$FPBaSy!638KCc?j-6 zXKkn2dF41ljO?Zkm8CjB2b_AU3fAFu6sZ*O{E|y3FZ`l4S(+aeAwj=y6Us<95KQz| z($=^noH*01T-6l?WQ!KAj)t8I$A-sDzH+A90<5eF`mN2nna3J^?-5KH=)+|v=%%i% z+d)T9i{f%u$gs5(?$OB7xd@6p#<05(G}DEq;V4NWLPrzBq|=rRyz0{0aVjw$eO0oz zqz_7&JJW7j%RNVrKO}yjYN@F|tT>KfrJ}2%%k-5?^HWe$C3=>Qe=Gp?D|=qUOWy{2 zQ|(pU;l*1w;bAHx36)1rS5 zD;kJU*R3@3%(V$Gb*Nb5kTR6isZgWa$Vt`6A}VEVqqc&IOwEZz`Ku{VWm@QPoU0^Q z_8E!GFnm82qOL`$k~oyHFuKT$sysW}TXW2~6xvAPUQicCtH2bYc6qAaSH#LNTrWRO*T@xzjmfxj#4M48el7HH=o#&bVvj zxeXzsiRr)L zy+ZX^L6E7X!dTJa-92ck7760Q*d6j>6>QsHrHZ6W+kz~kE-nD$$1_iko4HnyD3F37S$Ym@3yd~Paz2G5;fuWlL8yUvo#4J@WQaaSUTH?25&mxlwX zf|-~=&asoUcH9EH_T;EtOh%X$4~u}5$r5AIpD5wPs+nf5jebU@Ps`6LyGqt1fK}~p zj)$5-ka5{d8X-X?O4=upO*L;(hsQk7F@ow$j#13=Td-Y#uZEZ9opD-lJ{m~dZIvoP zoPE@hml>qM=w%QImO7~xX$@ZE?vHpY0yG!mZuKp3jx^HlQj~$FXJDrF%?m3nRW1%{ z>SjJv^q%yIa%>qcJUng2m2g~Cq)0T;eQ1>|K__6PZ9QW|3ROb|Czw=-W^J1R8mkf7 z{7{t74V1ChfRaq%O#K(i88tK2aN*}!!`XW+s@~nVP8viS*(cMdOKAN#=B!`(om^+gSjJ<-a*JWvg16*m zrK+niR#1N~R3J{Wd9p_7{t}V9gFyQK;YUP+PR&CbPxYxsh)G`6WjTGrA2d;``4iB3%uTZn* z1B~R{D<|cAvT$41KJVrKzW_pD*C=Jxk$LYefw|lU;_)p`xgP68jxa>-&AYv2Rkg*29T|J7@~b z9X!Ber<#Km#xVS)m|}=!be39LNfK&=oJ_Iocv)=3Zcs;l7$xKkG)aD)rImqX5IQbtYuXd4B~JHB~jE8pd!ew}wNC&*ib6m85!Qh&;p{j_wa_0O9`c z=8iVu)~!N{^NS=nj{;!%nj^k~I%OpvUa7HMrJ69TGM^NdyC&l}UNbE&9+->_iBSw9 zGRS6y%og%(Fwjf3n4+pl55oK)Ndjj>CEr&uLM4-eLZ z$uR0n$JYFn)!vn06qw!-)Ru=N^^~$acq3!d6JrFYGjU2n5GlMtGl7Qr($1p*uo ze3qt?n-YY}ERig!`YXns^LyHGlQYR2LKdJt(}hY3tdhO#Havj$MbX_i>xm1`u2Wla@ia)v6S+yd7pgwI>+vnv?;h&*WB ztCgnY6NVgk(U#8HJ3Z#^nBuuhDdTw0Oz6!!MHXVo*zG-RwDC<)BaUVH`5|N~q?SW} z5H4-P3mb$sw30OpWKpZh)(99;RqAG5>X)Z3pA5}OisLaqp*EUmP z_4N2oe5{g^rjKOeI(Z~iZ?UT)u~|X4eh%ysRG_j=2JTm;K}3<$TL*p+o}l{soUrb( z^v4;^@l|2@QE4%(8xF!~WTKv!loYJHRe#-n(Q)1NJc)PDD@C)&C{*g1<5Cvv-xpAI zrX!-R^8(B`_`tEbGfsNMDrw>(qNbWwW%+7)bt&h2pfqhFfX=7KfYA~-(5fWig;IY} z{c_|ilb!Q6d(6FJ#(JSX4_8$|ljt(H%~Iv67*v#Mc?(!cH@d7{yhpp^z;*8Rp{1!Q zNm_>vG-S?O-hIbNpkJi9cCXZSeZc)rbqA?<&R@*+6xDQDM+?9ON+l*0<{13R{{VEw zDvcC)^)Ngaf938ZCYlbdiCle4(3hOs$r1yaNi)a%LLnc(r`Lm|IJEp@ZR;pBzbBR)! zrKYKlC7Po1JPbe}GOq2Nmil;ELY~&GQGu#=K{!z+^-I@IxM4jbqUo+o$E&f-YpCr@ zf{JV|lAZCo>@vPX6Yh=dJxx>OWBNta7rhOwCy%(9_aKAf^;_>J}tH=0{)^K?EDzp8Uvsp2W1! zN=$^|QC@X?U394XD6C^N^>{U124ZoG(YY*Tu1R+~$0*yhB(gH?_KR>(gsCYcj3_$e zp<6v^Lk3uz8^EivY)=ua#p|12GLiB&ewnub)hX*0Ss6ujzf{0>?rR)xhPosRyj=qic(>s2=K*1Ro()O^H)g ze=WoU2%rz;7W0n2=vCZ7@xKick_?U%fTz7oU_qt6FOE>-6 znjHGK*Zdlb1M6m9!l;uGgQ&TKFypkCE+rCGNr%f*Txb{0nxo5=nm?B%JD+<|z&250 z)z)IefcQ}ajs^t{Gi%k_w)$cUu#vWkXHT%Wu+K7&8_AfiDII)G6+K=IsVI&;5z$&H zOQQ`A)))2*a(>J1rhFuho{EQ7*+>OS8jT3DVm&&d5Yf~@ieb2no8e#QH1dL>1DsDI=)TCmF*s_9GoaV0q#;s-@dihf_+*l0wb3fM&QL zlY5Rj`q^5*O7QEiTe!5&w&6WgX#Fu9Lj}&&c%@fRrpP&`EYZb}V9$osJP>1(qlpDG z%$D~K+c;}`r%Rp&x3f}`mX_AAt%O02gGQUa6D7niOmx;hr)TUhG-sS%STO3dLy5e& z-99aew2x65yItWd%-dNzSg_NV%HLaP2-AviJTVmRYy!zCUM7%uZg7hSu7@7V`G!oj zjnYi9Qr1zD8d%M@d&6+Gw0m&c=rH>wE+R)?O%XDe*fNe3VDjIt9=LRWGS%Sp7>*f> zN1iK8nTG?$usHEIt^Q{Kr4flq^9pFJss)sb@ByA=`Fn8TQBIM-z>caHhb4NfVF*t+tE%7bQcPefk5|6x+4xFjApt@F1p~@$51iK zs@F{0kmsd_o}z^}Nk@fOQ_?X3RM_q|U`CwLAD3}OG8P>hUtu<8jlg!i<)6^Z07%gq&_ABARm9rPN zESFFUwRog#iVNl!Hj0*?qRKrJvOB~d%M>?5l zs$-rna!)og31#fscyemXZ(6!aA5I}?j*LY`nf;5_(1EokL7k(9G#b+6OfH3En+Kzc zDoXY=d4mj;F4~G|JKFyMybibHj^!z6cZl29(Mg-k6yl-DkT5+4hQ5bnZnQ_8^2S5x zZg$5qjw6ZUa$;Fa3Cq}xL=ne|)=~^US@*=jhUgb-p4)r&;1_u}Gb>vwxqTxkGDIgMA@b(8OEN;HA7L*PYDcGuS-04U=}D-rZ3)1Im3EUk%k2R~z3R~pL| z^%y;7Q_FaRRbcqM14YN7zLBV7+;eR;phwh~gH4$NtLxr>AlHc`36`cauO zr}8Ynh0;>iWLz!aqNg;`GGd8?K>()|h2@drcU>GvW85un_B^q)LQ@DHRLWnTGHd_W zBvhSNf}*0BR?$U=I&Tk8hSR&q;9qaNv7daf7So+oPG*u$5_+qa32-UE2sm@bp79*r zNmqr_(czSMof(={`Q>r?Kv>?zS(SH-+wEH1vfu9(HpfM6dQJeuoDY`MT&9lBA~ z%ubzwR~NTqdvTVV)(8pVvYd1P0+MEMr@C~8f(Q+E0Yy(5AgO9hHjZ{;7qx?`mDcy= zmhW^y0~p~~ZIS@PX33_i%-=m@G&nw0FaoVpQ7sf`>;}`|P1uk-TT5{z!_SnNOaV+> zT{S6>N{NQ!IhLA+FDETxSj2Z}suHq4mpg1o3K#-Mj^0zfQd`szPr54V3K$=^j2chX z=FIan;=2gSQ9#wGqABMoEXZ%L-Fx2Sw}%>W`@Mb?jT6&I&>)}bpE{FORh)4uB69?z z30P!}K338fB}hB{wkFs1amAqRT7VvOr%6&U5U-s!e=VAM#MEsKIjY=@%_>I`7qzU{ zzZ5rW58DTgBylP!GBoYL)QIpLz|whXO488NzW8e>zsYV2Y+gSW*IqK4MV+a`8g-81Ikv2v_RfF`a+0SZVYcnMoM)IkiB~_=6N3pV~ zTLRYaZZPGVXHwjiQ<{hn1Y?I->7`W_F=}HRl0`eBJknLePSHj}jY}BjOWWEkc=-cK zH1Bo+iI5LPKH<$JBsBE#Rjwf{r68uJ!X&7pg(HzmZWfV>`!0?j=VP9{F>kLuDzl5@+Q?We->|tSDjB4S`bA*G$sY#XcEDPx5g2GO3aV@}-Z-l2o~|R$p$q6&G1< ztU~s4g;k5NQYM6at<@;-EK-2eW!PZKQ$tfvnR7K%kwUa`!l1(z=A9LRxeQ1IcD49{ z%Mw8aQ4s^CmG{Y+jkF`u;(2Zy%^gk)iN%8B*z)|I1B+n-f|{cds1)ky5SAG?$txog z5XcVIxm|dWGVYQI)f^8*)6JSbzX1xiZ_0yPeAeifyZ3{+#{Xw3_ewvKicipNn!O5|<)|wat!BN) z5~1)lB#AV+7R07&C#S3vIN~{zJ!e@mmOp^Si-_qe^5;-%Y9wZ-j_Aox1kt5BdA9eO zH*i7F>BrcGGo}VLd!Q2%W|Dc^(Y-bw*8CH$y*tNh^3E-n4J`O(P?{)e@Rb8dGt|>Inva@Xq+hU<@_GK=n_0mt?Kc**ZyHuS4irR z`Gt?@<4|v98$r{(_$ap!h@v-CHAvZ1zxb>5Z!6|cPnoS~XkH3@pPlebnOlouQLHn^ zR}`x})ijF!`y(yl+&}~~SetTU>;*sa@gYG_6oujGu=@O}8}0SPwY#LK0(B-2MmuX- z{7*VzZB;arVziX;mm|-X8?a-bWl{x+_V(s_vvQP-I93AOTqSENCJFY6V6U#Eshx-Q z!S+YxysJj3OBJq2xO-~aU%=ayO3=;JYmT{mp|EJlXhqHPi+m3!-8 zl0fS8ri#3!hf_w1xt=OYgk&VoM zPiGtax-isaSCA8{0qp!KzMlij5Y|0@ABjMWjkSeS7fY$N!4^00=b5x&Bf-X+Z3{^z zNb}iQ@gs=R$V7O(WL42xM4_VEft7UoRwmkZza4(rK!_*Ow~n*?QMN@hxyNg&e8v@1 zQzHDxNopfq)aX4NooJpxLt%>D9P_xgu z%IGZ03j_Rk;tQ>h+7anBcMy}t3FivIK5)Qdl!{u}mbI8kQB@pH0b$sFkg*{9Nacg6 z+K_6lXi|v;?c>!?U(PW)`e$XQUzL?NIhooJ2^ae)Q1~5nHj=_H8On$Fz=7)H<;f0-Puyq>ddw4=Q=4#9BDg96@D@F=-)4 z%{(n3A8nL6vmJES+<27ffw$vTNlH{T8Jcgc#W0*I30epgNmv>=nWTwImp9MS0 zNmRB61|F)+sQ86L0^hyu_-m!D%akalN%3roqD;6GrvtaDph1b_wUWtLBcqf_BSh5H z@Ks3dVt&a|qQ~3YjRZNU>NC|%D6KklfOg^!3YIf{P8701M}n%l%BdqyL}4~d-&el7 zk8dA`ETzF1#XoKV0t8OcQ>!uf<|3lL4bwfO--m7rpq*4OU_rNqxm+S3(}|L~kriN{ zhF?T!`mfhWGxjG91`pHTmmlX?rY)Xox6;ju$hM}VQ$ECL0B5a2-kM^{j~cdE9@_W+L%v)Z0_Eaq9A|jd48XOPO(rFd8njWs0g38{nDY@K|U} zkwju}1WXK!WsgsEj@Lb{%oHxUtHrWC^`4!|bnJ=`>+;Te{#_jvcdV)y>nSP?6td+^ zy@tg*(6)J4j_0PqLrbff-0Xs5MNULR;K7)c}|FWm5&(3rm4ZQ zrgZsQcwt-@3$311rhlAp395Mr3SuOx2PaKWwWfcF&!1j##CVf~D$Wsg4>+wlxei-Rd7PRi3`Ol<--*E(7Smbyux!fIeOaSBI-RuXoO`jg|wlFaSEbW=gG zsYN)Ro;9A6FVn)9jZ%|?vI``*g+RC-YyQqQlmW2*6#5G!5^y~6tTloPN?{#c zT_z&(evU~i1bP?o_k&oEZxklsu$Y*yEeM$@ky5Czdq`%37pIiKg_uyoPP-Hfd#(@* zk}rF9<7f&9nZVQSsgg*H1u;^?iL8~TD!Nytss?W^zN!Y52Jp9K*e~!G9Azm4;A3So zr1*lzfD9`SrTndm=dV_|MX{k9s`gw4qc#tIwoaCIj#RKqD7Ly{wd3p5Rt$v?XH{giq7V}V?4hEo749EXyD`3>Q zHytq4WqiY5oM47(s+EBo$n#=?X_Uky0D_Kgy4!_Jtdj*^w7G4x2Fg$8e7~PMk%whz z@yr7amlg!kVz_1>JR*8Ub=guF6e>d%46zj+>Cp1+DaOK*2!qC`;B3Us6ejemC+F-3 zGUD{AwA9dH^jLi*1~DsBFoqv4Sm9C??5P>t6m8JAayx9G;U>IXY=ebz>Shm`lRRMc zRmTpdN!Fd4GYYzG^Gv4FG_;Z{Dug|VYuj6nJk7%NpA|CWZO_|C>{F*1_Olh3OPT1Y z+cDABV7QGGv$V8TK4eP-PYFjT$UMEmz%uR}N`m4}+I2y_Fa-o2j&UBYH6}IF`7?eJ zl{rGTr!7`cM&@0=Az8c5j!K~nrA%@ZKWJ|3d^ls8CD5RF9BDO=bVL!22wtvw9o0^# zbT2tz*mh)|hd$-Hh%x*h1Ho(K6qUxD-C3qB6BLm}?V$(2@yw3-Ej$XK&K0cHu(mXh z4N?C9rrd*?^Zx)+I%PwMCJBRM8Fv$!lcyB8lQ^xL9fG0i-_B}%=gW+v+_i{i16%NS zonEOn>KbGpbpvv%WcQ%SkakU824}7H2i0DmW|=bXB{ovcIdUno?qw2S@>SMSJWA0& zlT0O7a^T$6~&H0}#N`kKv zslqBFj<$-foJ8`^C>ux?_t2`4M>OC;LW*!6b5&_xFcfj2_o&$ZYOTbwo(~pS2Aj|F zD&UC`A=^w_$*878A+2w82DQ0Mc?qkKkgRr50`;?&bAC0S@J_5zVHkxzVCdyNCO4RI z2%xX?Mi7oRPJ$#mso|2wWKvj*>24fluPC;r){RM4!PiCEf)EHPR?1##jc+1M%Ie2b82EBP-0SBY z1xsG8qt8VKH>$y)2ufqFiMOY3Q8}lgIQ>OU1}BMOd0RGCip=qcO6GWHNaO`&j5Ww; z$8DX51li_Rn}nA;vRvT@Z|gz4w)t$RAxeagZCzhl{a)ASo|WXjwPgIwhvPj;=#5gQ zd!y;CWK|T{?pv#oXAaSev}}l}CdmcVvs;lpn+YyB(FYxqhRziuys%J}jAP+PW`9wc zS2|^^7axNYnY97iB{PoM`CyuXqt;K1x6)2h%V=5WlkyqdH&B*#M!(5n$ZuDb_IErTV zqT5n&rT#(nMVIs5T+7+Ie8pRsaZ21ejtoMdAkYc0m8gakb&@o7VB6VPsrZ%dya;E3 z)usf9KMFI;(t-gXMQiu?uV;)u8ubYkRuO|#VD(ivJrtPq6_6SRj-)$n4;{7Krssxx zoOIhlw#lO1;z92MhQUcLkM#$pxc>mA%xk2%<1a;)s8-^YXdH;b$-u&`2a1a0b|ZQCeq)Vid_UN5A6 zr}Q^HdXwpoS#oYICQZxO_6mAeoVt5Wgw#{fRpS*1C0VAbjnIiBboVY9d1j@rA5EE}iv7@b{h9b}cum?ac7baDl3>*ubwlvm^!d~NxW7=U^921u!m4pevo#-2vqZ+4hLkfkKxg?%a4J?A zFSZx^Lf2z!6i;N<$yr)c5@xJ!R!}j-QHsNAs_@JmaIsB=Pf3J0z^Y`Vr9jcqs&7cC zimLmhXuw>LZxxtM!x2p;RUq-ArPqF{WUR5*u8<}_(^_0^Y*PcP!1Cq^O0v^obE{V) ztuhKzADLScqs!&g4=kb3?$?UieecT`I88Tjf!-uZniA*g=TEWBzldQOgQwUg7fqPF z+_^)WGQ{zXG9^H?WxfF2NJUP00*ro6SUA>>PIs4i=lZIw>rUqeBM~e zR5cie0Y?@)Jd!RgQlbiadEsc7&*nJ@6gugz7HBREFzM=wY%M8?JZKH*jBs_c6X|cL z-lO3(7!Oe}*s3yX;WRasD;^a^D({MsV7L*v)AY@_DfW&bwtKu}2SD(d1}jetNJ+rd zZ^WWmaGy>+N5-fogD+uGRZ(S}Zv?HYM@r9_H+K>y?aJW+J4yZ^4R=AkSCTLwO%!Kj z3v*hk*XDdNaePB|C+AV`K}UySyUcXxs7JfI+ih-nFW5YwsGYSH-4p^N-%MGPGSg;E zHKK~S{z^3fl0a?0G5Hk~=Iddhx{WQtGM`C=jg&}&K#`3jaSX*piqc`U5aZQ2C(2RF zg;dnxamet4zugbDTS3J)E(i%GBcio#6rU6WMan~qOfVrHHjVwJdV0I(%Q#Q-IV)DmRPmOtD9 z8JxfF?c_n*Q>~1*!GKbLosm)xXG}V)!K_Y^K6H}xg_C zk&+o&MRLpTs4ezEJ}18xuEO<;*lz0h=9Nw0`Y*t z-rMo}WSG>j3E@;)fI3r!KlJnZ?b2MyMUG^wN?fms;4J)wB`61)L6|FHMv#ehMNt< zsbY>9<7loS0@rmCeXY8Iz_N3zmhaNgQPM=>3C53&ox1JHiXxjjQG#K8RnByI7LTPB zxrZLbA!z6;a-@m`bxaJ3W;59%g68Fm4;~oj``P(M&=!P{ubma-*(c^4oo55GlsQwW zT|MYcc5Y0on`FVdw)EAxUaBll3W~5&)%pJbDN{pBTfNbXrs9%>+@yP~4UYnsEO)9( zE^SYCRe=ZJLNr55CAWZ784<>;d(tLs&wWP0G0ef$-%yn~Epw7#bzLx~ml&*isHUf@ zXzLb5o=IX7y{{mVOZcfAjkBv9wY&Xnw5UXdU}AWF`Wj_-S1PsrqLYrpwyi==o?+Qz z2g9Q4pIqe6xIS3LaXz8oIEu+oBKaBMmX0W1SQE`Gfq>t>?)SMoK-J4S&vpn(;z7iS z+eZm)!jLeYtSYAKGT&D623s9b%Xud*<~%c~(ADAb?1m0DBER(R{F zuVX6iCjRy9!73cfu3uV3!3ihBs9-RNpFNaOaNW`dq_!kc3Cx++o2mU-tMO0?Nc9%jr&n`MTY~_`vVA2EM8fdw8D*vvSauaB$qmrG_8&3W z$SwxrbhiaNzQWqeic&tpaXWE}BF^qZ_ubmY5HR6|A!xdRi(vS5H}f1Ww-WMW_u7?X zsD?=r*EdxJUxtk52fF>D3BzcjRAp`JA*JaDg$embuXz3ijA5AOR>W#@hHJ|&6;>IH z;LlMFWHKuI#eAj&TeA!5FT(8aG`6|7EIOYF!`7GOnNzJe7TToktXu_P#+#|wWc703bK!OIG;oPjVi45DQqD+aRj5*R?bu>%>uR*RTO=8o9b?ah zWIk1JAGS))-G+^pVf8)JybnF({+{NZnpEPI*^ZLG7s2!PP{b*53@;CQP)f%;?v6;E zfYcQQc-6(xWxZz<%pQ@~6lj}$TMawnooC1Ks+^aGQsXpqAJ)nV@hmpBhDqyhB@{^W zM+`>Nu-e;M*;PZ1HT=;xjfMLx@8`68Uu0u+yV{km+#j2vB{4jwmfMdT^&1xkzk4*<7&9 zTCf>mN;;%=znxQ9^vbd;4R@9d{rer<^Jl}S=T%RL>D7l`+T%-6iBZi{0EV)El?x>` z4jqU)sAeGDkq}&aE>Fjv8sbth>!whS?sk40@TUWh<|;U#hQAGx7$;XRODUA9I}Xli z{JS3>{B=(gf{(3POVW)z4^+=xOT%gM^3WP;r;4r`Rf?hN=ccHQQb|4UenT@mTK&KW zAER#=+RfO!xSLLHO}Oxk{D- z2-&lun^SGU1tv$ouDw1zK@}Ns6g3PGtdQaLY{EUF*AYJOz>ReuP`5U#kveDF`cqy0 zltdWlnoiG+;pr8OXf8^GAv&(D2%`(sIl2lkuVq3YIvbeC=dwS&s{a$sU#@j!={_-a^y7zdGX9@ zS*W8S=w!rc>8Y>&D{Az#4^oo5R#(_#;GHqttJSf zF4EK@u+!ZqX!x6U;#AtWu``EO%CbpF01P2^P|CtLp?zX1x-zP01I5KX-*W6 ztbH6zGSCuAN%NXWw$xQ5;CRvK^jFl{KAB+sRmnKcCyAyroFR)G$+-RnK-0b#RfDvW z)V$`sgeC@skCAw+vPfZOAW`XV#69b3RlK5^w?zr;#Crbfg^bXJjl#Nh&Cfqd5m}YN}3@t2hOs- z&iFdAw2(ohh8Ihd^JlE@R5`0M=0?FW94crsMI9WnK@74%g<;h6Rh5u37DkFTj>6Nz&Z6ma_zyP6wD_ zrNXhQnk+)9nhBP9XlF+-Lp*IGSsb|v8ZvARj~heoD+t6+&b|k#o|NDwj5RJ5E=$aK zhAo_P`cqNh)Xh~xQ(P(I^ChQE>TJzEyj9Xm}z96 z0`H3nXt;pIWgpGG84z?B{f$Vr%^A41PaOkDUUNAPAGk6Ye%Gr5o zrfMuf=OM)!JtXL(^nw8yW&i=d?2g*}IkFT?4Yb#HfhLR&8%tY=(BaishF&yt)$Njc zS*MIi0KL_i-sjt4%@z}@rnDU(VX~6lN9xy3Iw673;~hrE*C}QCy0pQY8Znlnr<$e6 zj7avd)O+3+;w#(cE!2>+8jKG*QE6)H)1w&ar0!<*BhfsWntFYdF@B@KGY3&iPnM#M zqhnJ}$UL@vqRA7M0bJN$PB3#TWZZW;ym7Lq8gPx2w9J`PEcGi3h73|VY}JHP)Wu&6 zK62DktwlSD6}BNOy%={o@p73$;O(j*)uCM#GM5F+9#$=f(Bib2YYv#BVNy~?46z49 zTd9!QZ?V16RRFk@pCuUi&_lFa0eO+!AxWPJBdRg2FR=>D z7Ou6Rf=WhTEVPu>+ryF%?wFV?@vg>z!#N_8j<4I^N<{#g*hC3ML8R3OzLJXlW^FX>l5XJya^jStO8b4oUk?ib?$WqS{dein5ZVkWwQL zI?haW9%SoO_;ad>#YHqs?6DKGM4xy+x0lkIZ%V)cMmhG=LSdAZvd2s_ZF*DGmY(jD zEO998cE-V0{{RujjfSve5ltzeQ;8r>-ZbMC1_6XbqPrNLVt`Fe6p9^Wi^4!3?31pS z<4**Ma~g_N9ib%Bg@k%c%`1e*yswixs+PiEaqJV3Cq8pyIPp4*G@=MYHLHqu+C zAscY#g+pA)kC-VHE2(gxfudrR##RhX$G#(dzke#J{*j)FtG5dx1QEp781YMV zW{2uLR$VwittH7yWa2sm)N1OAkg`-(VAN2{A!JZ0C{&oi(KLJR`njUvP8dx-NzfYWjM+Qc}XR1O_U)n9Qje1cmntUs2#b{%H*eGDOozT8w}NC&eTZ)(9l5st&v2 znyQ{UfgC^r30taMe0(`r7^Kyaq;a2gS9F+@KHM80}cQ^Iz6gHQ`7+`xStaL5FK#;lYwm=l1mCl;^rB50wKI*HK9 z3{lc7U@7$XOaah13hB1w?ya^@VBM=i0!H4G#M8DNpAJeP`jyh$FQj;FO)QnPxTnml zeq?)B-Se0s5(v+~{t~2c92+y`LdX#_P8~_>22#s3G+CBx4e?BkTTi+S0FT`qD0K!f;nPN7qCuJ< z-n)8ZQHEf6mr(L0Zb|8fS2Bcj7|un+Gky_8QGijsY_%(h;*!NIO&hc@KtwMhy^rqQ zuf;iaz)JM0be*xc)6DIpcgzJMb`e@!buL=0M-0tU}Mm;wz)cc>(lD<6#wzLPKsaGax2nQ@#gYK~(u z&4yJ@DiS()BT;`PBXCX6d-F7VAjy|&m#M|3JHsHD#C{cpy}9S8xy(?@YLM84bsxr! zbtI;?ea#gH3rqoyIpvN)ARu1tx@EQdk7p(3R7jZup_`xDE>t#p58q8VtyUh1JT+MC z6|@lrLsdy2hDDRO2%~X6&3M9`WElz}CabSZg<>PlDq~mR^3kl6n2eD^01Z`&l9B;$ zcmlAV{@dS=YEF@~R_%cc)C`YxbwzD?y^_mU5pXKkgdOYaD`{{Rn<9%MGS zF~YR-ODV*Hl6F(AY&ml-NfdD5(McS|C4y-~R1%rwR@_PzxAdg- zPBDX5mc!v{iZIT_CW4+wm8^AC(&OtKPT>j|3Ts#%9XQO?2d1Qswt4AFP{S6_etHS2 zXL1K;3!n$v#@_`Xa5VDTM5_!)>Elm6o-$4&n0-w2;;IgvN1Lak%ur%0hv5|`4w`g@ z{L?GQrF@_VbGw+2+6~KL7YjQTB6|A8XI9`L(2I28fEaXjR?%3eO|k}eq`+$Ga9qt3 z!lX$~mr& zs5uut^xG^};~ z-;T3t(}^;CKznMv`qD&fG>Vg=c_j6t2d&RN6~%r>sZ}-cQD+s!1ssit(Zx?!41g=H z$C@z0PZCu4@k@(8mj*^33ae5b>oj%iGJXY%)5%SQtQZAEF`O7+)iX;`2@5vmsz4gX zpmtV0kA4&;AYtsIKKRKJDw?xbPr7x`FHr9;cIfsr{W8Syd^-#ha9C^ zh-+%`3j98LiXk$wiWxk>z$uI{L-8IwkFHdy(r}6vw;)K0NWdwp^T$naJjs$VyuF*U zgtZv!(^J=0rW03BPa{CpFc{?v=KH&zHn=QL7ZiTXfMgm@mCEKksMun79vh0u1lUGv z#AvFqY%)w*nx?bngiSk6rIvI_5TJ5|F471WvF~)uv=TAIihWL%nE*uwSavChON-*& zHqISjtER2271&N)ptV%BwKzp(Io||l-B*$qx0cLF+o8W6b+REzBvl$*U{VRH`jU%1 zO_DP!bRX1qGT5h2a}8}~ErVB4)K^DSO@r1oA*74u8KtL{QX81dIx}9t4LGR@NOXrm z@eQ<_5+NfILSLw;s`~rWOt~%()~YI;(MyJ681-%uoFk5|kz%OBP3EMh-4u{jC7df3 zbVmdg7aV^ixEAgw5(c4&`SjCQ><5}qjuo#a9Y>w8T-TNx1kMzBwQ3$;l)`f+96m}$ zjqJ$H8H{QNgN{uXPo+pO=|`90QEP0a4u5l+yzZBE69b2)y*|v5OOmO2gGq<~04VCF zS*Od`RvxvuG)`jko{_6)GKcdFHd(dxA6 z08sgusdElDlg(xwHeby6eKsFHAJlBUis9)h*Haa+Ew$96FYkT7qlm5DT3!%f?WJzE zg5Dx#gWii^x$hte@)dS?>Hh#v=4~RB&w2I;nwY$U3Wp=lmxd!_FAhINlc~8(Z@j6;eFQ%myn{&AK?iJB>twWV&%zFyBBNVx`ar7p?@1CI&~ zXE}07Of2GgikstJyXrS3bXP6rj-h6}r;3cHLr)$v zg2jYVwOE>3mfH+t$zo8m5$_Nb3tWrxGh52gN>ee6RDjcAPyyRSOCslN zjfP^eVi=u$HFhC_Nk;WXh319BM&c!H#zCk)HrIs?sHhcTvX->iN%Nz_*{j1<2uRi3dmn0ZLj)z|9b^#48fWHQtjk zdX>X!-x$w$z7}yTFBDQMp`0O|j4Lgq`Fp>*;8=Fx({a|Ja^WLlW73Lug&jV#j=9d zUZN8dhCIb0?Kbc`bd-bkiVCp2TCWfkxrUyCqkUX|DN{8p6!R?7!b-n{e(FR4w$N|f zk~t`qXaQVC5!3LXJ*q(w1KMfUi!fnWmzLG^2CEFNEi2>ePGXf}iLK?iL|wKQ0f8fm zZ7!IHE|LZd6;TXL2>T(0GtI2)lU>W&tTz?GC|0<}DVc2r2XG zq5GxYs=ABPti9AcpO!Evf2sNY2ElV@n}$U_O=>zcDq5fhkx@gEs~KA#6R#_sZlHbU z-ca!)uL{;po2Jw$v_?2h0aUdaM-`ngujB^}#PHastDd%^XkbN#B?VbL`2jK#bP5>o z&HK_+5}_b;{OGwh>%~CK@T0TPUZwQ1k0D{11E_dDC7N)2Ga82w%h+Z|qAQsuGs(Jm z%}lJ2&1>C%e0IIK49)WUj{V0NB1&=8d_Rcwp-x?V+FK2;U{?W%tKZ4k_ooaqEXCHW z)7NZqdVi5Hx_pZkO{t!auBwh2>FO%wt*=^GXZ=D9=w~I4H zlVt!wJs%pNoWYkQ&AE3GX&oslW~Hg06;_@_c^&@%)1?j-efHd>l6&{&OlBEaeJPIy z57XMIBHQleiaTJ8!cAl$|||ytHLUyhB?}!4_S-_KpKlF1U0qcS}ocv2}uw( zjy2|%>Q<;BM3c`|PlGGrPgP%v&{RL0iJj=x1N2+0qjHM^4xCK=lPZ-UbbKh$o24g6 zR{@Mw_kJ813#qwh3+Y!>dTG~+43Cs+vnc8BRJwT*!!8nFZP zV%Iuz9{1@CspnE#CDcNJAJLpasc&@MC6uWO!eEY?*bAjOdoanIv$YO#&Y3SX;exgb zj4!D9ZKqt>i9nErEs8?~L1C$kjkg*V@Z@cDg%yLYQb!T9tIDL*ypk{wXcqMu>Qgav z=cN5a#PWwyYBHBmdS?Fs%cqA9H1)I*K^;n^Jv^~UzH9vTJEoD2=h`@}qyXZOp}@iU z*3{5lfB(@Xn9Mm;7MuA+eq*hyV5>Zsn#$0|$5P@_g4ed6P5qqevXGQSB0(}w3gx@Z zx8Vep$GgfYi2nd1qL@#H(O|UExG7ZyMj}MugMFpo3@&>M5_v|RX}lAr4$gSf4%(;! zNIf`w=A5ZlFV8%aQr1z@N>lWdl&gzYy4+bnHbf@+{{Tl8Hquk}y9x%ZivR7&=-H@LqVb@ZgdkX7q|tK7E$kYIr^6p5EP z<%klMPFFNeviXu$M3PjY9lhYvAk*Lv5T@-ZQ1B2bGvx&-l^HnG^XKd`wG9f@$rT`G zm(8Py)5!7=x|WC%Be3y3{CKzXC4NVoYRJ1t81U(?6Xr-OCiy%GDynL3jWsoGHC0lx z+!OXBP1<{V@t*y_A~cQ$DX_ITMup0!9;|8`ejsxxQI#oXXVcD}al$+x;CA8o(whv+wK0ZPlvW2;ig}iyo{~+gdlzD;H3Hy) z@Z#k#5Q9v*H9B;d9USql6iI^NlT<;7V)PMIVmGIb942}>yv4PyvKI&npAFaMns(ho z90$YJom$j@mf}@h*9gJk`RZIR%Ejxc?IH?_`ZBH@e(%f3$J_LCO@N^Z5+`p>G_W*~ zk^zr>ajKxqNR;Ywtvv-I5Mg;Gl3>y%inqVL<8F8`_NgFsMQYptiJhDy+v!da;Q6MS zN$M$ZgCvZ6ua&83AdNxQtU)?o+sA`t160ZA=&d0xDnJ`{8qT?IFi#}Yz7vNG4~0p}^W?gm-GEPCoj z$X?7WbTFMoCr}gBW91ZRyt@=>+*Ej;Eq|LpJAX%)M0D zZUv3!d?Jz>v}Rv6NavPFf`~!_ts(c6Xbt&=x%#gA!`~nwSm9aWmiHx;lrK_t%>ZOKTB?e%42Y+7INChYHB@D}(}L!61{urP zWho^o9}zR{_)!XMmaC<@4%qil=ar}F<}aA$IL1GODdudHk<$jBvu;PuRTcSv4#(iY zb0!59bT~a;A(in;WUHcj6vJuuNr4CXdeoo_0!5>M4S%Dp|hu0 zZa*GkrNMfMLYZGIQ&v^tGUnx6Ev88+V^yb>>nZK;-9@A^d7FLP>Uf7R2v`Lsl~|!b z#To3UnXoLMh|*;YGpU%5QYK1?X{Y(^zS?L(-Oz1sdXra#qfl9*enJ*_*SA3=4xYWq0oXyZUgs% z(OHX1Z6Qh124H)rLzXfvRo==~)g)C&bqlB6YNYGNN62`mPIDG_iaJNZY3s8EEI4&0 zJ%UA2kcxz?Mfo&ZM`ZJoa;D4*2lsda%eP+bcS~QbHh#vnI*<>94igRaEwz7tc7Lyd7sRnk>Y7X9nEE3JU4K8xw%En zsuUhqmOMIWuQtC+%>xp@DzJQGrg_7unYS=tSe|_p*j`g=N}3^uO)Jc^vb97FG*B2y z5RwC{9oM~pwZx*-tm-Pn8YoNp5-`BfbLwl;z9pW0KK0W9{<&f?)#j|bSrcJ0qbgP6 zcyu2#48B;A%SSPYMq>nA6Mi0U{p)#tV8z0qgsd1lIz8J*SfU;2vsLz7^8*5;n5`f& zD5>cxJx+$EY1a`MNabo&bl$n%~BMq}$$jz<0rxqi`3?ELT zQv&1o%ur85h|NYMRgPRc!*$5CLCFdrUNOn^*N4zc2bC!)QGU@bJV&{NedL5x~zYWlB~ zR4bt+HTvLbNy~K$@vz!ZmF@ zb%tykG*-s5wQE#Ngny$x+bLhN8M_PLPE+zO>C{wDoi^^3DP9;L#@d&K;d#cYpg|@Y zkw+0OQxK6}F@E+`S8+&LlaEC< z;+c~=RAdGRaJP3MdU0Pek|)4np$Y&nm_oJ&Wsv?PLg*GEk5HYbJl8iw&_VGL2>W|$N;;w=~ z!g{8&(?v4jv(v&U<)@W=^F0iYO(6}X%yF`#>7ls46d@}K6ORh7dZGv%YrHEC!sKHE zQ8Ldai!rFH@{?-Zdy6OpG5fE_3Swt%I*CvTBOa=J!EoAhOEgsq)H0&GtgQ1WNk7C# zn{o#nEvgDn3U<^oQ_y}C$yp8}WT&XZGYxIrw5;?u5d;HZ2YVBxw6_}WE?sUs>0f!&@%kf&h;Ukwi9-3ZcpXQIHMD`DPy2$0Enii#xjAXohZ#_9${fD zwDj@IAZZSs>8K>1*G_1-h=@2+Wwwzdq;u6Y#Rf5eLt9WM!mCvgerBR)La!kw-Cz?# zaok^CG8{=hAp8JZ^Qi&tgp3|AS$8mBJHp6RE=>S$nVsdS2( zsTKn%8yj#g0niT|Mf!-}MJY+MWke+>#~TUZ?5x%LZQOR4VBb2BRMeFfru7l(%MYWiro^Jnm?bY!Dl)boS>?qcXn)b?^I~NTS|>pm5+iDN z1w8Ww#kf?Gc5EE&s=U)mL=@IenDmYwgvXn5b`k1wpB?EwHC0oKQeark23EuH>_v%h*>L*aT zE7qJ7GUs~ssKxVURLofRA5WHRnNQ7ZTs9b%N^|-)@)^S_opl^p`Mbn65L7@IOj8T9 zU?`m)f`?y8`P0%CD}mEtK9u^)MU(5UGbd8=J`0V+YLca!%c!KNrIII#ofWiZjH;i{ zj9)Ww*agYc(;I2qa)p3E_E8G_TsoK1yceUDy)Db>isE=bP->yBY|zuZ(b1eHXP&of z&fBD=n7W}z?xMtKJMSX3D!A5@0Ce%BnZvN$9yO+=Q&Dx-thi=U${196dkn9|qK-#n-u{A)VB zO=X^jhK@BAYdlvM{3OP|`v|PNiY*H9P(DRZ%-W zQ1WUZJH=&0e(jK5nJwTsRJ0U4N!hZMAOK-7K`B#f(=I%@L`Zd^I~8K6GX#}ER7s?EVfW?Nj?_iPIqa*nKbhL z*xT}~U(azgu8=`lQn+6@-K-=;boH8zM~Pz96Uy?6 zznZ1<87e8sX^qGwu5<+Hergq{z=29F(y1yc@dHj3{{YCDg4s3OO*E8} zCzkG>Zi^#$9}Z!L_&033br)O-6F-G#mh6Yxc_~9)D1%!;^wSFJ#ZOG|nn<&L9gS7t zIA$A(Ve-L=EDJSQQw>ygEQan$OR*l%{;>ksbVc1?RjML$Ggf$^Y=-or2vs@^2AlL@8Ufip0B@wAdNsX074-9F>X{V#u zuU>IJo8a_0sW9xdi{Ud0{6i3$y#rSm*esGPDoP@$VYO3B>P?R-?Xp6M3!jxZmr}3b ziZGJpyjs4L;?*5=rCBj|Sumt?MME_d%IY>Ohz9V7_Z>(*j-2w)5-~AXN|K{GHhbwA zpYa^aNz<&_v|UN2{(qJwiy5WFX{q9tHL0l!HK!0_iZ@)12E0&)00kji zdVSQ8>L4hMlp^A|jUFqIsdMg97-lDlW7UsIf=OG6M*ye9r(brNk)vi;l23FYx4e0_ z5VWiXC{>(V6r48EMD<63_0KZ&VyCE?GZc>m&iQi%!Dg@Ny;U_HCb)$xn2XgzS4tou zmU?Lnkhs+Qz8)rdC~v(fQosgr=Vc_mNzo}uo`Xgo)Avz&yn-iv$AxV$hLrYT(&tn6zW({%MK=xr~P_jb79aL#$aN3a&L{t9&T{7=ba<5mq z6P%!}p1&x{wfQ#_rG}9_s#>f{@v8b_JU0DVAC}D>1rym-6bMi5XOU)N8$5 zvutMJ99&dXuozUTx|RO>k=7pLH`m^pc{P@+KQsg6DO>@Cxaw6Ar|w zmE@6@njNebRdsSl`9W7H|!26*Fn=EM+8%d)6#l9 z$0C%lQzUN62aBz{k#Yu_bxsIO(Zh;wM%ffDdT{GT3DmBfnH>pi|{|1q%EZYboJ4lvVd?&pDNe0@nPx~c*ZlHMaex( z^w(39Dlj^HM=-}(gw|2w6xDcrTOwb~fwh%cM&8{2& zkGx1f4heF1>|JHZNd%Bc`O$6bPoqZ2puhUJpYblSWGtgY)eIgUs^%P56Rxh08KQ(5 zRh~9Vf0&Y0JC<-tgl^myT)SvlD7ufpgtFC6%RUB+lN-ZD&zRw$6 z_}`UNNiONr)+@c_)qR!q?tKt>8GY6rHq@`6f;f zDgOWmjRhi8VmTdZ@VXf4U2Cu!o7Xp)BR1(_8?Ef^;lm4MN?;~<_0ks>BtnSQ?4t3F z)2XSBvYDsKu{`YzQe#y6rC7nUY!1MiTwn6x1;PO`JS3hXj@mFF_^_oe6NgtNL@aHEiO}7O;j%(Y;{pd`b3gtxQwA_-FEKWO@ZWB+TKd*c0uGBD4ChqePV|_p62YT zhWtpMKxpO*TF~vhCmq%)LH4!tiw3k~GadW}9 zXEql$*;87ON>zwFMD)=v{=IDN;=&x#P#}r|X#R5ytc@j}VV5G2%dAnVOeAJN@_-07 zByDej;L|nB(5P4*e~kvR%fR1&kY>2n;`wI|#6DXSmbVL~sBmJ8B$V*8zhw(D09Ct? zq~F7V;Dj^|CmJ(Z)CQQEP}I=nXfY*{N+`1(9wtO}RWZxu%DX@c?FIWGzZLhPDg_X! z+B&JV>rzaZil3CSRXmbn75@M}xK3Wn^>WFKncg|P$fieilt;KQjBT+Nu)h)JcR_^| zAuv>D3`bonvScg*NQm+@aB_Ahn`u2S;<#r?>Z;M5&Ez$)@3B@R-WwwgGz zj$US~tgc#!yvs~tznG{_){SmK2a7Cour{{Sluu^J+F8hgmzkuluC zo%2<1P;m;36A!?a9APql$W?sSBTqM)^yvzeylXehl~!w5u)j5$?-X32D2P}%$d7G5 zmgM10%CL$YI*~s^^3EBPCd;{3Fy-unn(6DH#Bzm2B{e1~OJ9bujN%kxYk6`^i@lfa z6Q>en&ON2gK_zKa9)%9NcKX@B2|+qf#+4qOJ#_jT>8JI*Ba=5|JW32ON@(#qXs3@H zl98Dsjw7|anaOkJD`?jz!+_o6%kM5OTR}*P`-aia3K8WGu)|6x?u=)R70f@>Z}s!k zES*Hb@mYFJma|mCgyrlTWeycV9MPIs>x?k%Ln1VbERLe&jtlN`HW{*wB}z`xHmGs+ zp!2!Qj@$wWN=D2mqXMm^MjSeerh#^uEG9Zp9|T(?n&t$cMfl_w6QN_l61(}hne zNf65z0`?#qo*v}QHp*E;K$L(Cbn&9?=elvpO4J14DGh~VOPTQ+oGS#I=0#9;uE4Q4 zL=#T_U%R$rVhHbV9!V!PL(W5L(n!P+g-63LRj>~Fbq5(LG!j(&ya`j@BhD*~2T107LWS%(+ z+^m51KMpEg>pGLRIP}qLb}y+VN+(yl(ybrRUJF4zEOx(2LQ-J>6F7+=j6Eu5+*X&}Yf&+( zD`Q$ts^%>94OUso{UO4a`l(iwLonuyUPvWr)xyD3OG^we$jAn)e9}Pn@)ULOfuNZ& zQp#F7v8j*fw;bv`a%UWWsF*wTCmb==;_rb|(qb{?$rjg2N?ctkYGNK zO(l4!$Q&lG-yi}b3jfj~_#Dwx$yW{^9wS4A>>o3TVw4g|X^V};jB3m~ou=LS)wW#= z3Iu5rfHliB037$K6b<>S5Tg((NMJ0Y@HB|KslR$9Bq9ixk1SasqU-1qoZ7@hu{8ZzthLlO_ z^sIGRDthmiu3U1b6Qg-qr)p@Xj%%BS(g546dy%It`BcbJIFZwZd0Lz>V@y=445-x3 zl(R9GN=TNWoWf>6u1$&afETxi9Ss7KMB#;LwL-#30Mo=-Hl4yYBTqck;TlHY4K#5L z&2{}E_HM`MzbtfxTP>^SbJSz=#P9vA7rl|f;Bfypv64E0*KF}|JGxv_2 z&no@s7)PBUSpb4C9YQINqcEj)EOqeE(?*l|t9q$KwJgX7=>n;}!PeYH^Zpzl0xF6D z3M6eNW1W=TE9%rkG?Z9wHx?ozN#Mk3HVN1RVq+fau+sJy<7-x=_htz4_S4OfRVf(! zDW&m>_F9#qqot&dH?q>Pw?Z*mM(dN3CA&sgqDXUfG`MDrU zdSOt3La8LOg*O9M+i}GRbzq4TR%D$ctJ@!qI^$XU0xv}d7nNEHzc0y#vMNI?(7c}W z0O;dw#+ngsCj8=%zqIYA-Eko$0|cKsZ#kP2tQ8Rd03wo(8mYngY)PTm?Gk{c<(lrc z7wmN$@OjXnb`i3)E+w!A&bsA^Vlq-wOJ9n$9v@2tVx8$}q!POlrp_Z*k8lUVeO>qSTgNa*24xpln0 zfRnKC#BJ!ShxG57{{RwP&yr%s>9A~0Y_lN z0X=k{-sx#1I56rS>W7W>&!LT#i7EF{Btqx9`#4@*x=<7KAjmSUCQ3|dXsR%*XA+fC7lMkRs?3$n{&mX8 ztiW85rF1AR6%w!vc}5;wDBSA6lZp1xrNTW+;WOo0Y|nyp_G<6YbIco>0tCEr!F={f2SsgM) zWvYr`!)Pkog0ia<*dF{X(&X1Pg@TU;dV1f^wbEE3VtL4D5rd;W!M0y!=tE%$QCe2iM-re|LcDub$os?jKY`H4<*F4+PKUp1L!t#{3 zz9C(P(&Fh^h#VltM*~yQ;;{@r6T)!`WMp)JCzm}nmw1spKo?JQYjWVyqk+KrQ|Wfq zE7Sx>ywIJH{S{{Xy+bVyY3gbITZTaY03OKBmGdQEgtFUs=%&jF=D@qwDC7qHv_3=@( zwY)&ni|*1&O&1`h2$-z{y-Cn#42#yjuw~3jiR$9Qa~>;3l+_gQ(YKZ>8JEkAz$0+& zZgh*Y``4Yx?UaxeC>(mWS2DXzZ@JBG5J5=_JSY;V%-P1enE4n=;Z(4Vv7)lGAZ>RQ zPZ9?QPIvlbh(2{1rRNnVr>YOFVWFA3cN$6o2#+>UK;pL6;Ta#A6W{`510&%2`jYumSr|N+$rO3+_|XZ zg)`h4K2;oMtY}WLO^!h##@0mLB11Dduo0@pZ*Jc6ckjkPQ6LI*yr_wR!)M`H8wkba zGDNwM!7WbYkzdc10VEBiZvj|Z))pLBpg0K%ie)S%w1Kv1ilYyboHA7AIqQC1zFum2 zWe_>r_IB^{3k}EH@$lnK+R2hk8uP88E7cf|o-?!AOn8nk2Ff}b`svn~2d*mQp$sUz!J!9H~6wTbQRZ5UQfV$uh`T{R5QL0f3w*Z78_b zykHpkR3@#(q8rPIaOq?Die!Vxv{vp3edLdTztP9sO4EkWAnoFMDYnewPv|NoW^cqG z!cs4nlr2HrYSN)2^CAv5lq<@?l}2TfhV0!cVX<5A?q za>XjDvg4ugdt)9omhVEcF-Az;o010EzN+D z6a^Za+;bRMLRh;hQ=p7+0)I-#=__arimONH&qR8+nEGpnWnWZ&g|c2XkI7997AKi0 zGgb|YMNrYx4XRdYe8|8XcL94aByk(9wuO~8x%X44T8cEBDOTQ{df1Co{ZHtRO)z?C zYSJ8E2k*aQ*B}AVhW(VZ?FrI*bDNO$hks+OR7j69BR)l*zHLtu9O`u z^gruA8HQR8vS*%^P+;{iM}TI0qmr`Ojq2%zER@jgsz9MkDryS^iPI4w|$(<+Z-%~nbxZg)(l9M;)9IH=V^E6oFu_@)y)476qyk+xlS5x1W zxy&U*xK2L`qZXq~wlJze{BU}|f#BJHC3L?G#3(anGd>h(v7Et|X}&^CKDjGrmb6j8 zmhw`e&%b}P0-qip?t39>E5;`dgF&t3aHItFRVsQ<%NTtQB~P4@XrVL;YgSofGFqX* zMMA^5u_TXgGXDUvESN6wcQgIWN2#nz?dv%#*do;<7gP!V02L;FF;+&Ho_DThk_<|Q zsychLNlbz_j9cv}{{RX0o@FXqxPoVkWR(R^OgtU$$s)Dkj-b9o*TwYs+ zR^Ax8ZQc)!`E@12lO&#+b-Q4Lt^|>@-BFD$H&IJlFv~EtKbPf2Otn)-Dm}qwP+Rw` zdlAom(4Vv!rki0=fQ`E&ZDFUx<$|sn390FDfhdkS(p9KvLE1g_C7a&Aamu4ubEC?h zWz`^nAQ1QL&_Jz?H3B1VG)!8uXqrFqB&D{mjk|FPY>-u}(w_=mogp*QYa>mM)X_+B7Md5I zNO2TsQb$YJF3@eX`z_6>YbX=4yi!YQmdxK|{I-)?~j7Bs#?DHrW8PgyPM_&!UD$kK+%J>(kY`U4pEL2zE zxV!@gsHLNZtL6n%qsd2Ap{S&G3dcm=FYV?ZXUAQN;WHaJ&&IQRCP`}T1mJd7z&@Y) z3G$en`A-(4#N@;BRYoq@F%#l2(O2PklnE@qFruPHTTJfd8*xHT(sew5a|9u3)PuKg zI8?u4;=*BDaidYw?vdoqoy(e_!17)*nDX9hmX{NJBt3lCdN_%x%EK=7djYweOM8n8 zi`$JY5|OA==u)^EY|LwsFpOs);?$i#t<5sz%9@Ej2upwV0DdJv)UnKFX%q z6pD8#xv{m^jxEBKK{5!fHH_nk_E6)LaU8#2MO}(v88)_R9C=z?Nh+ycC~D+bmL@6Z zKG_KgW?i88i;Z}cIth)0Q~lW54hD-(XO6D}>qkvq#5-S$x4cYkR*AC3OZ$ zQkW4m`zlc#d}v6=m2fQA(p+QIy=EDlC@Pk|hXbv|y!z@oN=9j!yyJF7a~Td8 z+yX7i+*>>n6fp;)td%6Fr9vn}&iH&e!#rn<0~6|QBZCZI1f|6AS>cxQ(ay3tN#SSr zkfV(cvxu5-%2l>0NlHkDx{{F zKxv~{7)Y^_P}@PT1o<_~0o^X0An_j#svykm+$fZZG*mhn>Z>*8+)|G!^*26I%TJYY zp;{<%;RK)8fVNp$R(UX98S~cMfqc>bHeIjK zKSz3z(GIM2W|F5d5fql_wsPub6^Hd33Bm9@5*myvHC5vC<*YIa7NQij z(s_^M`AV$}_iL4Fi}P8H>#JU)#CvOYjI;)jHc^bO#&P^}FJ{V$T=N!AtB#r&Dys>g zrxdftBQm>9Qj!#kKnCR_Lwj%`b#QeAsCzM^#t?ys;abZ6DZNY1xu>i7M-Qj#HXE5> zz%VtWtCuXzM>KTv%T@YN@}k_Z^3}l553hoK$3tzhpj$Yqi8rZwiaqoE><_n+zE-8Ekk|zzV>}HxY_&6}q`~VH2Krz1Vd=l5IOkQeja6o2$=wLWaaw%O(;PxN zb*jYh8itfayP&ADOdbXi&A4h&Pqyo~pCva5L#a%;r^V4o@{HKBr6o$h?8k)|%+cz< zqc}bpnkDGRTk{oWE^)cBEZL6bjH6Fc1JRWW8LN(_XNi)aHqh{U>h?Ph)63X$8mbh0OB}i zTFeqfRIemeuquh9hLK7z-?%F}lC5SI2K*)tv`l$Xxk|RsW02}_x^=F{FvPL|@BO*SP@ow0mY zqP_}D?OjLm?TA)ylAuXZOHCyrpFS6cM~&bJ*4jHT=9^Tgf;P_GlT|~3Vnt8N*xyfT za!bR5O^oApxHc}yl=99mh72OB`9jnrj8Y6`X^X(1G8Y4I>@I8yOOS-Dh>nhl^Q8=G zjkr-L{7`zMmGJLMb-0!Vl`Hbp*|NSYn?r-EM(c`UNjpr;L@etYJ2AfFdv^nZ?#}~% zn=2{BB8qpF;)_G#_g1L5)*(+(dWt-rQjrt~IE40gWgr7(z3sU2cL@qlhM|?mdSu~9 z+RSRHXlf~>mJBE({Lh<55#F{Rx=8kSw;qrOK%~iREqZ_?)8<`s28H_Zw*0MM5 zkb&$j$G;0MUt6msIDzk@cAHCR2~wh;=xZ?wN`odZfmGp-P5@@6I<#-3ir{|eQ^N~) z6q5x$v`*?0uv7@HSXLVJS%SVC3oy3$on^u5FzJ}x7+>Gp_IBS;c%lGY0%<$zPJl?; zgv;7;pMl zahb5n+>)^eqBvpGg$(yCC~Douf^bav{_3f?>+_X9FD_Z?g=F=(1}#w4^;m?=@)+J{ zEx1TO);BzxY?yY<9m}6&VbxKtWRyJNeV_}U9=aCe$(3T1N7MZB)fAOh5m92UDxeE% zotHbVNVn0!19oXtcPq!PiC1`($aS+jaKoyWnST$j%z0N2Y~hX3QsQ`gvwW2$Ln4`_ zjO{Ao_p$Qe;ljIMLvf*9SrI?2CnT%QkeEHsqQPo$aOQwrR>4f(qPk z4M~^GbBP+r2hDAALaG$npaIDK$969*TrR>FukJ~}PZB6(?t5(FJCh&|G@8i0TEO~) zopM%g&z)PVp?dsUi!Ck_M~Br`;~1py)g>Gi7`s(gENl#Q3aNJ~U_nu9l{ubPqiW)R zE_@Fg^hlfxxO!0^Ghx)IG znhGgt*A&JoQx%r09Kt4lJ`d&9;K*VY+R`f(VAlkJ&70fHciT4$QdD;EfOd1jteZ<$ z5>S96s;dX9-k0KBiC5H|3#Qb$inlL9CC*fM79~p_4}#)f&6Y_iB&MR3rk*CHDt*n2 zg8s{i64!Zyi!H6ur7}U-VlXqarvpJtyn!f^kabSVA-U@prOcg6$0}mWw0UXp%4|0! zgMd=UO;ZfC^r(`@Q5)Uaw*ohxwcPSg?A$ixxF;ABs$`k@RBgV??%oNG0!BM}sTrCw z?FD5WG!>M<)d>d5>@=)3bnFTD!vds`G~nMf%h*~;BZk@yWjD^HQVAHGDJM~uu^LyW z#ju>qh|=PiQCMl|>g0Ne>QTw~M zbEQ2VBU?!%6rv&K`MHuWnU;Bh*d=0-pi%z-Arae&EnK}?lvT8G@~gf1RYU_FA1b&n z!VjnXRYb&DrCsQqbc41zwk98ODYqh}X}Y zjO?<;P?-JPJ=&s{P5Kph3ATlK6g*1F2L%o_Dj`gSnbdr!RHnc^LE$|^$u(2dWo$nK z%~Y*~W?HPDMP|XOAa6Mdma1qTSmjnKHpOtmT@M>})xeaJMxt@@s)yJqFbUIN|I;8- zoXJZK9YodAs#36YsFh-jm0`PRSkP{;Ud?mPt6VneAd@D!T!w-{0N{PQDKi>CvpsCM zRYoaT^7)J_-POJS0Nw`sf%spVQ(!27fjxAd(Q7GClRG@-rO5Ns)V#G>OwCadXx1}T zQszVg-uOTcgo_Y+afGd4lLH=(9dx0G#+{@Swm{*+v&EWmn4XGd%h5{o!J&pJq^w0H zPU`~m*n#ZYc%fb3SWp< zZG_)lRFTaG+?myc<4YVtTEyUaiKuY(12)m6a>t7b-!fk&C?Th&gs@UeZ3W{1*}rC% z<$J|w0Frk3(l>73Dk%{?LYd^n^9Cs-ikN+VDL8MKqxs3{-6Brqc1F^!q-Z$HZ8)(y zOjAy)LIKUok0PFrHmfMwy`Y*?tWQA-VyD78>XH-Ex zWR94ng$#%u0WK9uPZcY969HaJ#cjkCj9$Q8``0(qio#Blr~o>tY67(ZCNZToH8vA1 zEY;S(M0+SK)s}v~-1tvX)Ytp2!#P0YWAx_c{GCKCW zXUzj?5EeUaU@y4+9Mhyk1L;rqL^&i3aq0O|OA*Ag2zOX$p=v||nG&kL51Usjxru6V zIX(b@ykI5e@Db}&?=d8a=#8JgnhY7Tt|()o#OI|fzHL=58&4pbH~Tw@k~p+J-Mp)e zt-<$U9T7||rKKe)f(|3jnkzbejn~tPiuvm3Ya?bT_|-%Fzxc$b%-99{>JPAS8@XbL z)D;JXGjzgE3O|RfFjC|ihkCiOJlP#hQLBd^D!sdgeg9?JP5TU@E=Z*_44NMf2)JZ)GGD#s0WIkgq zfRZdZgBC9`L#tYWStrPoT@rC5s3c4PBA|7nH0Jp!>M(r8hEJ3;9Y!%7231L_YcS@B z!6A~KSq#ym%&Z6uvD{dZ#Cc|7w1A|5tPJ!=N1bahu~UXN5NT_XeL&UX6c}z7oiawu zZztA4T5%YwFsNXG<(gP(;f^Y~QdnuojGe`7H;d?a-RpWnoJPVU_w~}I0a$`D2Z-T9 z9WHUo)Y!fkh~gbIpD5SXs9Xe4($G{287btl%`PEKSr(nmm27^|2ZX4&aS1X{UzIKH z5>!B-Gp(4nN;-^ku4L*jNNcgYP8p@kje=9*&`K*tDI3j8Sd-C$8YvPP2b!ndWh>lq zMWyoFO45(wgO3QLcQ%MyiBURZqK9sebpxc&)6BV-vSv@rU&`?63`-vA_FluVm!+qs zsKR89m?PD?4=z`pU8uzFHz&6Xuby>szA^4JZrfr+QJkXd)+3j&>?TZmCr|S=^#&Y9 zj|~w}%M299-)Uo;M;I9aK}qGFW>)5Or7hWeV$*rqj>XfSNO767tP{{UGrT11*$ zNhjWRh=ulmzB_=_^TSd}JOx|;e5iKCGxlf4aBMRa%#^WVX+>2QElY>sBB<92Xs2ie zSOM>`fg#X?ZFs|}P$2QEwo}5f<<~o>6d5Za^y)eqT+)=z_>_t%@j9A#qY~A^gJf|HSrjPu5E~0| zN=xQj$l|GHHcJW*f;sTr^RXNhw|!AHxPz&Wu;jPdCzTJ z3;Alo<3LN?ace@-pE^{5-6Rrm94ky;Q@Q64>knUgiPT)%MEE{w%{Zk>)}|2gK}SyX z?rH84V(xyIwK{1KIPCnX!{Idq75iTJ~8l((m zt%D9_jRT!kRQbEKweC+gTx@u`^`=2O0Kgt5qP91cF!`$Fq{Hc`V3`PyeM4_&2U1<# zwfF(beqkqwrV^y25+)AbkJ6I=0L|iAtLkZU4J|mV1!VaFQe==Yw%>9MzgZ*m=8~YY zM^8SLN0qN{4V0M_t%y5QT3kX{R8&P(h%B)rSGc$?6gR@$pkOL|FnZ||$Z=sf<2_h< zitps}1w^!%fAdNTnQ7zw9v>^CIBQz>R$l)AN#>Lm3<5l5GIg941 z49Y5MBLzUhzHUT_+f}?i?!OLMrj-zWbd}az8r7(%z~}&VRO)k>pz?4^2qu*&H&xX~ zbPKfT4(fuIxjeR_2^}0Mb6}_>Y#!Qhs>^uPHBvY88jqN&RFUYStboXlS9ZG`uI}Br zcG?P+6IQrvq`(q(#x;ixq|9kr)KTXTI!a;ZO-Z&lm^1cWZgd_aPI;7_I5{ShvZoa0 zl>!sb8du@iUU4$Yl=$sjlhT0n%^F1_i=B5c7VY1UBp4nj^QGuo6r}|SjhN5BjY&)+ zH5pb)>_T~I=3xxZW~KL3t@f2>4863|juE0k<4z=@M2t#$HdM;&sSHp<0Hdg{nr&l| zmAs&=!@)(rrrc1c6FNwN(@%F=03#9x%7s1`lIY_3Y)=Z84yaIFE9uxpPQ?4Z1fLIQ zHQz8N2*Q&h-AluU*rd!$g3>!nPlZxs z9Q0NwvDu{gswi@lkt4RmrqSeyI}^C}{cXz}!AeLPHrA5bd%{(--{n&vf=S&cs){To zNdEvs8bI?hF*@xa_an!T$A}diDJx{HQs2J{_9IypEOXRU4fGL_Pf-;V?{<{i-ft17 zHTjN6&EN7beqhV?%fj|0lt(hur8wSLBnHO@f}ss9?vtz+)FOtQnZYE;<&?c?iNsSOL~IlUVtsnVA;R#~%6S@|s|~>B@~QEhMmmWNOlU&+ zaK_Jd1xc}gW`l{HQlL}mR0{Sf*GxYP$1ofMt|ZCT8NRMcY#H#1%5*frlg#}xu~QnB zxJGXMx$AZ{NG7$Y4mkDEP4#{Lcr*S7)Gm==U21-kblW=M_*NxT>Kd~fOda4#Tt{Xr zgwY5c8LB3x+iH;3Qu}Osg*Iz#3JQ%x9iNUBM^aRnCMKceY%isq63UrREp(SIVfc0j zQAoH}7l37GO;gEP1XnZFR&rgV&$%lVwn`9)PQhB9{yMo5w^#JhEzq2;*?+39Ah8fERBgxja5l3HWDiADvYt%X<$5W$q#U>-F)|8Nh*K^0k^5t zv48 zjhLPtJyjTDnA{6fO^adk;?)&JUW~%fM-*s#m#|BH{B1WXM$_w6a_M0*l#pX*MRi?M zHEE0Hv3hDX!+%Fq9!Q)98h}y-b~pHPs(danfyC48v=OKgsLwqWgA<;%x|%5)97-8! zSX5C#N)(A$3kH4bdkS|iR-6o+Gl;P8dU1nZqm4B5ssENeY)%4;_sZIR1 z7s2|2TY^$!bViLaHh5j7o=DM+JjzQGBub(-VWrgaHXwwOXkG`lg-jn6smqlyR>#$5 zgDBP1G;N2eR-7}SgK7h*CPoKub8&7mp{Q-AQ^J*bizIaO5QazwV5zI8K^8AxgHe+S ztek+tM`3T$-%x$LK=TDS&e}0T%CL$o`RAd1CB|tndTb{b#BjWC=LTxab@Y|BRI<5@4X`+M`vyjn4;sN-1dtZ?;Xqqt5DJ`tR=HYb*`j6gsi6q>egI%o41nnNH4 z(xR~euW@6?JAy$_GfbsLD$^KOJS#3_>@t@v;kb@##UP@Vvac0`8?I{MhvLanK5CYD z*L9$hG;po9+Kx8sf&!*$=tA{ufmm=H&Czb5R#V|(zP}Kx$k?>>8Jdn7+K45mm{GL! z^|1#b*gAt^4a0cp#YMPA-L#mtqNCx*rn{iYeKP6>WyCWzCTv_zmpxFv0huv;W?NSp zcazC3<^=U>HYK^VdxAZnsy^ zVcDjxlREV7S_Q+P65!a9R6|h`LTSkpQ%o%-t?saGFU{OEp6EHGV~42yDTE&x90fMw z`7XN!s_NbgFIRD>vFrt9!SMXo1vVDglhPPV)BMYr36?S1%rEifT1tDwj3S(LDM>NL zqIx;%HD)mmFPA9#rw%KMVU^I=)?gJ_CJ4&2)ghVWf--~3R$$trYJ2O==eDX!l1JKW zSyjSJkK(Hv&6z_a=jwdPf@3({0c_!iO@&w2Q()8vt|^`ph+Wlv?=9@fy7nX4!bV+E zV4OhfqlKY6Xt-e61FA6QN_;!1dD|%FY*vOj4^6YC9fV6v*o_>hK#H&m4DnWNSDN zw5{w^_uw-#%a+5PDONIZnm^0$65=-ag*SBD)8;Q0d-YL*V6d8eO(upZGX@`Bh*Q*4 zVze~=dI>1QP2O1puHG@yq9F30W+RT@d@JejX ztxJf_nDIH0m<{DytPSr7@4ha zzY$MOxRzM;0gpBjg=EB`$ME`0T36}M^9XRqvb0hviPSPS`0ha@Soh$EcC+wM6s=J) zvWj316Q>5drt_-IUO`vii1IWS&zWplr9fhu$DCWTCTXX} zYw)VfQdW9@ldfo^T7;3Iks8(!+zmU6@ObM@sHWZJ9ctMdI;bYwN?+|8NE&?crH?@~ z?t98vqJu8xIq@nCu4Ad|W<{x|%hF@Dlrx%nLQl4hnqda;cZ61O1@`H}-QuuPnk6dx zp(Jb+W0c(^^AQ6HkDW34&h>4Qx|h>>sCo~XX>jaIt2nGt^;f4Djthv<#Vu7NXcrTv zhG<8VBbD1A0d7Ub@5I>-RdU0_zS^CHY>tXsonJCpAp!}ixy%QtdAhP;ic3k2Pe8F( z1dfWLF(gqa7L+Otg#fTw-V~62DYkdO{lS6MqJwRqYg6A0MDyVR*bEf&X z4*5Lemm}R2HIT@qb!k*v$|mY=HSNN@&n$N3qNxdSCj%JydeN<&+ji~(0V!7vwP_hI zK4eTMHDr9PJ_(j`PfGgRfKX=Kk&2qNqp5bF#VV$Q`HB)4RK>qxb7IS4G~{`k-Ms50 zfK;TYg+~eDLpw&=L738$@vCvq{;~Q~lCT`VTaI;KCg9aLNTa34=xTA8RG;Pent3=4xCk0?Sx_EiP%o@XW1-2pU=Bh(mc^+uf`?lf&$XciPLz?#$|p zXty-aRdn1DvWY$+E;))(zs=%pM)3KO!~*$VQ!7}4y~yD0v4In^f{qlWN5eGAvSqlb z$Crz=^A-6@hNoSnB=I_fq4DFX?!d&-QeM~g(C0~Aj$qUO05rxjtsZQmtEQGbI%rZb z%L^z9rIcG`yI)bmHeAqYracyGz zn{?jq9-=%grwF0mT;i>ArGI>Scu^|Knac~!SakS4dZ!fiX%Ci|j7lzN`z4pbNIo3M zD?8-6)H~A5Mf={?(Had&kj#0dg zWcQ7$6T{(8CptovrPPjar-dCWtD%CY8=r(mjuHz&TyK)Amddq z(PL8N`WjWQqJ>KCmKHJvb-uExU`Q5Sc;@n1-~LRNQXE(!MEg&zHM`f=iGNCoY#cUC z7>YWdNjN=qHUzngGGS*6jM5xm5X)6Hbg!^kWb-KIos3L7HLj<)w!8=&&oyAA8!bX7 z1q?kRh2P6E^`xK74`l{jX3GAWAj?afG6!6+jP*@4>qqjm5!OS9Q&hBTFcZ>MM+~T% z?cBSDhhASR-h|yeS$U;2V~CJEj~dL0*0Jw@+P`GHyr6`6DM_v(YP6 zW^BopuzW_B7NV$xr5tg2)$++4fz?E(Vx4u;++x*-Sh!d#Qh~rp!a2gLaHf}RfK!bM zo|)@9h3iwV8Q|vby4T_TLV}r6c9JT^{%eN_eqKs+mj|S%K;?bXUNNVL;Vx%%mYqUW zl2A4n7*VBzigYAMN%+v4js73qVd=IjkZN#g{(4;dO!DOXb0*ZZRLzI1rjv75%3%@8 zWJipomLkIAQFTvemM*lVxI76lh?6{U+w!fsliZ-ELbjfoclCRkF@HZJ;22(FhYrn8 zgHvU0qETcC$eHTt=KfsGS0p4uHCrGFxP5*3T!z7gWy{oo1ehdn;A;7$^T@eCLX3^Q zbzJ_T^9FXp{X%B!V!H;f$}(m-;g*vP#U_ob)JGDeK6LQ=jKlz2hugluwXBN+0{y_JmZm<7RE68ir?k+G#jO@si~%Pr|j^*sjgaG3L8sOjO+&Yy}R=sGl!Ih`^-;S_tf8IY@AcVjo()D{U%81 z&SmPhP0pBYO-2)jRAUtM82%v5MO-0IGi*n3E2+@iP8wJ?E|!F9i7`prz#T%r|J1MM zk2_UiwDi>U=9ZpCEEg86vPjZhcOhtFZN#4Uu=@Gcw{6_iU<^9qxn8@K=nB#XJ2;=v z)G2WOs@9r$KYV~P6nJfuEE&|EC-%X{*)5PJzRi}s)wFV_Z3~=b{q|PmfV9i-l2!jZL z5VK;we)%2G_+kt_>-PX$dpO^Bs1dNzTZJVmCLnqAel*=x6(ls2KO-w*sH);g9}k(M zSmSH44B=0_d!AWu9aE zj1P;ag;fp{r9nUe*NzmMrOdTBbRVR~p`NatS_vsP)2EvlvD3?EZsUnu%nAkzk_V-E zcMdHDOHZ>NPBj{zJ;-Wes+J>9O-;F3DW#GancqtRARF!N;%hs}0c6DNKfbKDloz57 z&LU&kPYaD=Sgg^{h2phIGyockrIKfxVPf9x#=);{DO!}|5MezvR*BUlnG~~zOw(Oe z0fR8jLp3^=dfA;Ilx`mIaJ|V<^`0#5HJwE4_tM7DtvW$~J(EvZg&r}3VK7TwndPBa zztUv+Si{Y^+rh@Ie){tFAOwV{kPRn$ya!UxpyzERX+NuTJ#odc+(R0SpP80g=v|^h z`kMi%IswFuDWoLKojVOT-vvln7@d_iTI{1AnNb!mO*oLnO%4}n>Ewnk#FCaLcVm8O z+xE!`2gGUK>IfqQ)XJLNIV7gT)+wn%C!O<{e9VG2KWkIq-Ztjk#A%N{y4z0`E>f%` z9+{yRqqxQhy-UbcSWv_`JSyJuuTni$bf2vl&O29xc|9n~!d#J?sd1U-ic0w@ z;Q|_%mI%lprITz7Zl|#xJj7dcF#VcPunN{>9w78qZTF70>WG~?B8^6E^%D4QY=RG6=b}vtdW{ULb;#I|#=&Gsl32Lb3G)c68%?xo83yWubHqv%m zPk#v`3Lo*#v2?pFVOSPbmjjmqYBe}1${3y=URx>iLMybJ8YTdhEJ*q|K!d47MjL3^ zQk-ZY&G?5xb0oA_-3#N{R~X9}ttB-LMn+2NN?c88QMOP?2;CDMr*Ux@HY8s6;)`V^ zE+pv(P%=9DX*p#?V4@(<@k?3J+^b1JRxtW3xrEi?G4&Qu^%d07VNyp$E6Y$RqZ>Y1 z)f?txKm!8IMajAwkP|X?NcPgU3bhIqaqg(}UGU5cGG==G4n#EVNkdlHZF!`6Nt3i_ zV_@;HJ|OABklN6qNP)(mQj$y!wEqBv&*_gA%XI$$)vA0xg9pnrZ&8L~F(kNV9|Exz zK2(x>jVf+Ss9-=|)(4L`7X?yeRaPLyFR+fM^pcF@P5Lny}T@ViP;863VE?Ul(fn{VanK!HrdxJO2+dHqYuki3^LD5q3iJadgzT^ z9N~r3M2WOo_p>ni4;n8mTPnz80VbO<43;fjh~^ABzpB{GX;Dvt;J75v+J>#+FC#RS zQo$NAX4fG22`F_p?sCmJg-tJ>F_t8 zsi=}Vs@y<;C90WZK`NsdmuLZ)lXg>~w8qsQKPtP6Q-%gLtLNgc(P~VG>gzM&xD|dM zE)UeYDsH9xy)8UR6)g3wvUpG-3jXoF;CY;XPubJAnItU9Wm;#6{nddTzvNBXXDTBE zyftCfRgFi2;xtQAaSHg(8&V0Rrjif~lkWnrNMt%2iyls`wE*Ka07CVhSVqmX&xd6^ z3l^iqaY~9fu*zz1z^!FnTnx{yzFVfGp5ERlLIF&uVYm0vCz)<_84@@iJyp#zUR-!7 zu?k7yNE8!39Dhj>W&Q2)Sbbc-bjTp#=}ng*IFlU&Q-oEU5XXbzUVSp4iEEZ1jaF{qqC!wKVE{{TA9txpor~r6?jJ6ICj3jR&GnT|dK@ zGc9c=pI?_?d`^)Xc9#^K{{WI!xlk3abK2eb+KPajCZ0oNC>%hfmM-+T1yv-qm^BQ6 zZzd!Bp2m84U2Fq0+=9Oj+}4#yKKgFr>$@aQ8#d#%k<^&(60*-O9<_CnFk?JbCRsx% zy0N)7J;xHJ?m+@b6p^+Pg@T?tai@gF@S!TA(nRq%_i*jAtikQ6P+G_1rxc_lVk64D zZpl`hXQzoB0<%e3^MPQeqDn~8)5%RbOC#ApWgDHXsrC*!P@*a6*1R?W$8Arj&v?EA z7nz9Rzb{ECQySBmR^h$F+3W?6ZZfOYHke-ucIx3#uz0YhH0rHQL()@E8shZ0eA}Xl zL`vmLc-lLgdpN^ci;gWIfN@bo8aJS?!Rn+#IhH9GYaa$T zT^8o{;?~c5Bnau@TQ2_43}nd|QbH`jPlnG{xQf@}bGz=EcUbB)4YUpAbicEe3ravh z5@~^Wv`Undf#(X0*q$nhBovu`s+I{zEAy0voso&L-i)jXKOQg~a&b&X6x)k4fs-3C z_I{O?mMr?kX{gb#`d2EhlfbBVSoYrJxi{yJe~zU<^8IN$Zm$%fFbT$LBVc&MzdDuk z)k{}0w&y?|6t!1e+{SktjSD`9Zg^QJYHnaCzRV`eZKXJZUb9z(F#J7BVncr z7B=HV$JNKBpG{t;x&@oFcS0+^Z>c#q4s6?iXRNbVh}KJpr6QV@q1i=IBg&G7vH+LA zZW*zBt9K$WI1oooA-uFx=<6FQxW5ixMEz94vH5zx>Nf<<*`pT4XyqL+>1R;|RYo6x z!(C%hJhXU};y4|WHAIcUdlGGMIJY^@n}FM3PZBz-S$yO7h7m8rvoc}!X#0*c7Sm;tcT-WL4eEZbD*{L^_#JJI!48f zPn23ZidZbMg?8>yucf>EIXnG4Y5s17FJM0^fxCZk#_}h$7$ir&h$VI()YQhZJ{ZqWJyg)oP`8qjo;QXiaBKqZ141=CuziNlc6QZE zT7gmoj=iFj_`OD76s|Bzd8ZXEx@vij>jDWjB-r-t&8#I!*)+rG0F5GgWYaYOqMcP$&_l6j1GSZEWCbN$ti`lA)(bk?o`@Y$ZC$GHNW4Vc4Z?7~c$dr=LfUR=OZ^X(xyo6!$u> zg@=A9O`=qS3W=KAmg`BBU)fg*HOeyGAVHcfIECdyWo>fLY zu0{C_cJ%OXDJfa|raa=Z7wPLSTC-;~j6y`7so7S`)m+n6)Zbnnnfhy%FzJOiPwS-3 z*u55Hsg}AC42o(hSp4HU%NU6c&^5aHy zKrL%HxgSW=jFwTS#WeVJM2Oo+eB@%qX=&?LDx4n)l2-Xzfix(yMpSJ8!viu}`gq@o z6vIafQXXF8`E@i?Ami+$FEHPGu1JbCI7VRU zwt9y*Omf>{7|s`4Qx0Q;6KN_{5ns!7N$D6#EPg7ak_QRyKQh{cju=s+0jB{V&=p1L z8>D!?XqpO)xcOqe%hli+gQs}jA&1q=hEC#WMC#C#8)jEk)QfoD;2D27xYGeTl6rVj z6PLD^FjQ5A<^GxBI2K-9BQIk3m2NwiVW+RevVC1;JeZYSNn)f;BBHu67Ty>O_qPb| zZX7~@jlC3YYLL8Uq32AmhSKE0o2W4A^@k3IIr8NYti#0<)V_-YyE_|;*p)UO-d^36 z1j+k$({4Hljx-Zw?w!}=TOC%8JO zV142TpSuAdfwLO$LX@Qg#*3Cn=}vLW9;b0`l~i@^qdC#U?L&uQ_@-c>hQ9`)il#c+ zHHsN0^O{?b4dXk~;2n5tae9T@_lZGC+py#EqV(ITE6_}it`uAP>FKM~=TG{T)ZB*H z)+yBPn4B($7s`3_9huEW9Y=*Jm<3pOv1o2pbhx$rczVs-4<)6A1SsJhGex%zxZ2h< z$Rlk89+<&z zUYV($ItuE#bzLRyvK6@}>fkNM0z*2LDLi^8T9qYQfK4F%8T2ncV3d7A%l&oh6$WR_ z7?v#A#|ow}wji`K)J7JjjYQ*MSm#zMJK3&y8{f<3MZj?hAQ6o9QHDgzg|d|yHBbKl zi(f^s{IAu#y;()nxkeX_(LHTu4Ta&f^bJo_9M^3i05mKtK|0?2*5c=Ft)=)j9y=@B z&zGyhLBP@C^mRu`(e8kB2d25FHPus4X3E)<60gLm5isvR7Y$Vcmy{I-%mOg(c;mUa zd2YDaG6>njg(J7i`=o>{f|JuhuciFo(agbJ)EqYo$ygM${{ZjjCG{q+6j&D+#3}Jy zUQ-Oz3s@N=%XOdm@UR01^M;pdf^Au7~cAjlK6bkJVigjkcLYDwC9 zI;yklb3at^s#c(Ep+sJ;cdm*$xh3- zx(m@Rv--ElIBs3XsIat;qZoZxQF7K9oGI2y+PYSMDyD{ld0kncCIUhh+8CPwz(w|Q z)s@Lkhz{W*B6@vjPnXy#>8&BzBM~!)RRo=V>YqyblUtPH!#y`rWDLjCd^s}KDT>kg zHB#b>6x-sOXAv)$$QZ0;i#n~5n1vQN-OG0^(xA0#8+^K^kmsvIpa3+H#(q=}!FDenLt`Yi=8-atEsfW>7+&}iuDGTSF6NpD5_8m^y@;mjH7mCC9i)12<6bAt)O8{EvD2X z8KgLQ>+uAs##iPfu5F^ASyD0=L2z{V@$_mcnoju}0fSG_VbpM9bf_bysHK>ckxG@j z6e9W)y50DpY$zyWRcXmdBWJ>dnDWgG8EjS4*Vj?fBFQrRjP2#b7z6%I#kQYk-N!>J zN?^@43JUN7O#m3SAC#-=C&n6yc+Ml;*GyZILC`QeCy1ka)qMG1 zUj{;^&(#pYhEw1axqmKAONPTnwD89!{q~G|`_zldb-#WGcQ+_z*5TzSgR5bsncdQ^ zEm{qx0O3LYA<{@n#r<5WhHN&yMRW;V#l8OItH*nWFwrLVeg_K3=?WTpb|yL!11(;h9K~aeJTFsms2y33^Ad$njVH?_$4kM zibE}4IXkO@!gZBuXZJ{AS8FPF2KM1sduHRXz9x*-`KHF3R^CKi;>5J-|G zR@$-~_GV$irCvA;trBR}+ccEm;Q({&rf#HXd^(2?tEI&9g(W3Y#q&_lCy-=GQ9u_K z`(P7pYcS6cuz#p@*ZrBI*xUFs{5; zbjuiA=B}FIby=@9;?Y;t(%?a`+&YSaIOGf=ea1v)kOkTetS@Wu z($jM@B%~W_r~n8ANIu#xx!#*M3WLn_i5qrNUUG8=WsaF}*a}js;xssREijd-LHjKn z_8wx9p&Xd(?Uc=`8(LCH9YY?GQH9&huHdNzf$hV9J#{A%z;L`ykxnOt%ZAj`#_0`C z5P(MvY6{#megq!YzdTqPNy)gq9WEn5mulsfww?T~=1AHlNX3dO zI-cj2UQ=?AP?NCMlYGL6Qh}KA#-BYp;~iD$r&;n=a>DR@OfCaU zTKJkuAZf~j=It9xTn4eWoR9avC3fc3(CR?a2?`_Mg$VL{*4T9&(vIA5t;O>?WZtP~ z-iY<0m!PLAW-gyq&_`XEsIXXS^G;k+yTllkJyg{&%L_08kC--+Bo?)~jBn-&TO!=z z5jLt|@c&Z;fbBu>*@vU5_G3E{KLjPz1h`NnR;VTuY|*99qH z_KiXC;&r-GKI}ZHBEK*eMhfWasIueqZHGbg@zrBB3m7$(fn)+nBh^;g5o_4r$CrKm z2r5EJ!_I#ScsPV81rKflq~UqVB92JWYjE)*ooeHHpc3G!D1OIMKE7EugdiMe-&RGz zUa0_S{*YUC9;hv(~R_CY0I$kjR+*|bM++Z$2bjyiQ82uTyS)1cE0f+9&&?BO1C>mC(H z4GlyxJe1XL#KM$QQ^f?agSECJEz6tgILpaNPL%B%M@Xf;!BnSA$E9;P4J{okaLt68 z=+Y&Yo>^z}V%!eCV0(+3TEr2}sj!rp7|zO0wUn<(O!e2Wj5V>D8NaFY(3((^!z&mm z7h$>FWu}P>UqPW3<=AMH9}F6@`>?!GG63@9=SuoYS*WSZuu@S|Le5~8I=YGKe70=~ zSm!pshMZEVzz`$KpK94CY?1`??5Wk2)thP8{SF8bgHzB)6-}Lrjm%yLj5%)BCf^P= zqOfO&M@3z40|6hpm}oO1VRa$uY3Z=6za*_1wiQz}Z1WMkhl|Z1?e?4eIGq=WLXNyT zIr-BpuTnrK&Y5v~_^Y0x8eC3%LW&^j!w#s41P;31Q!2X^KGfrHEGbxHOt|6LM}%{Y zC8osj3_`vf7K)N3P_eVq#37n%B5P*aEKhrtH#|$<7Z#+>9b0KQTGp=$K|bo5l+`f) zixsY)4oKwOY9!rp86$1HsFK7U*BV<+DGh~0lRY)CumZKGWn!$s=pd`4LzwX_K}how zIbmp-n)d-EWhyV;{AENXMhdy>tF>bz#S1>qbq$`MGUWzh>J|ZAm}&E6K2Xc@)ZJ=(>N4d`C zS&~$_Lrr2x!yW*6)!2G}>NZ!CD&U#J0`+5#{PRR<8_c> zmG~T3y-c1=>qwInmh{+xj!_ozwU`1l)bK9V!9?SMAkmf0xgec23LFA@%;l77;hQbu zm?a(*(A3gn*r~%>g1o~lP}ES07^O$N4nYh>fClE=GPrpQVaWz(2-*RG`)xh`dn@UfugjRR;*yh z57v$jpq{!8u+E0#dorZhJw_{t)IBut=KMk_D3Pja8a49g4J6_y=4akRpd!ZB@#3Wh zflvbgFs7SPk2(-?k5BO|B1s^`x&u*FC1i&SmmlgLVZ*nwv>_P7PYchmJ&Lma@NcuP z6J6XiKp|f4G@+eSvV=@`qghiuR(~?g8D6#rngpO26-87sQXo~ZSpHPBV`Z!WH};Mr z3hSD*ox3PZ!8&`8aLRlv9H~+%D3j#V1C3GW5=VI?l20i6%+h(JF|h{1-MG{0Dkf`b z*@ZnGmEhF)z8O=8<|@h@1uCj*F^s43XwgXxO$*Y*o16xW#Oxh5j^J?#0tr1H`cg2! zQ^f~O`gu)^;rN~nlolqUXsamTu9+Z)8knDM^BHybt>J5NmsUyXr(0R{qcxQ>E?UMg zs+t)x#U@_FerYji@w__{SE(+tJi)^&T?sNUWja{fiKQVzDem;r^&`tUQyf`FB|}k; z#3aV5FkCHBu?nYn{D+cOU2Ig_UL;DCm37m&s0k1%T{T`YoiVIJ3==Y(p-F^Osgo_q z7nLlsNSEI^YioAAh{ ztC=HmWwn`w(ab>TrsqyF*hmpGPc3SR0xFIF035!ZVOe`HbQeG3c(Tz=iBVSMDJtlo z-1EUshN>1}5E~d2wTE%cp89IeidA=Ny_hHg?5s=royroVTU;c(Bu!zP(3M2ZW~Z8txoP8%7ObcFW>-ooBOj1r zR=KlDsU8B{T*AVDBMNQ8H9$}sBi~LGwb&IcJw%dXB0($f*(@m}lD~r}H`qPAah0Jd z>ZcrSGb`cjrn&PDATbF|EkwU9OxC7^DW-?Z+jeqNK%2eq1>-1nQRPUjiB1uK$Ir{7 z%9<*199C)Ks;!xu9Sj97AIsU3j z+B(Wv>s#eOiYOYj!Ys+6xmS+bdo9H8+^f9r6D0H;F!HJ@{D?}95s8jH6ze8(&bYh6 z;~uJrh@L67e3Xu{wy(2vckX_kDj{wY8!F8$z+kRSZH=5_l9jbpPaQohQ`T2gljiT5 zqMnJG$KGZrHP_pIY4s&0K*y-keQIBaQBp@#Q{k1vJ$W27!JBr)9vUbLv9!iwcK)GGR<98OZ#S)7aXgqv^a%yLO9e@RU^V;UgX_Pm=ZgG9w)iWb(7+Q)imrCVh9S_Nu<+di7BRn zpA(M}%s(Po*&1jgryGIWxU{Fl4LRXxHB1#Kc||noDgYQG+sUq~@d1NUM;0HGH&*+^ zRY@dZp$E6Rj0pJOo^jWNrKZHkl-AwS)JZt-jD4b_Q|FA=Q1Q3&s$3nYP%E&GS(b1$ zJH6iDMx02^)zK0_9@FoNcV40hnHUTzN@whC*uTzGO|Wl<3TR`BD9^{aE$q1Bq`XET zf_>DSIO0_b5sn1>`s*bwbj=A@HFhd0Vx(fNG{!b{3_#iyfC?|o2i}>}9|}%gQb`VC zj;*u$(xQC#Bf=_iS)itj26aV{O(OyVhYE(_y@QUIp$48q`oCzcgNGX8xwZ_&Qj}SH z3axt%W^&Dd)SgOc5uAbUKAwmuEN>^;_ViL%OC2VwJ2; zyX@8td;MIxZqk5BG6zLeU8t=sB{-kmMeC^b!!+i>j!|Yfu_~e=N=j+GwO1ufb~`RC zHrR6-H=8y^Yyn+W-4>sOx4u8pm>sNW*<%2-v-BUtVaXQ zv^bDD)&s#8DZ>Eq({Mx@O~9rokqenW7Z~3XwL6B>w;@ z1^$u8Go0Gkwm?Y+HkxxPu(&EEYTH+pmvbH|LxECY8REWtUpQsBlB%N^!f235EQl3% zCyI;&}30Qbxf{t)%MP6|N`aV}n*y=l+3pVvh(JN}7gDLnoXW>KdX2DKDOr ze}z2NlzV__ZFx5K6`PxU*|KBq<54GUSy{Tt-as?qKI*HBW)CI_W}~C1YMDpMMIAgU zx{}+AIQw6-oiIw42tUMqmCMz4p}0E%*~WO|Ss=mjd`hlp_-x~q@+n0 zjgeGCK)VBB+dF!VFDmdVX)tO5I54F_O0iFd(l?m#EzZ_vVApQe;`C_-Q_)Y0H7~$S z#wW|po2JF^>^*2>!4t|>_n`TxToyO87I&}(djZN5CPjI|(n?MwPfat{B37788QCFP zrgW^se6T#VBXb8Tgth*jCDM{kC#-C*I+9?vsFQ)>Cv0O`62C5rRHmV>sA~K>bU1Wa zddn0}&7s`#Fwma>4-{%?WE6-M@64g3=_gblZ>>B&W}|`zYI>T*#h?oeQPd%tD436V zUA6+_j4kCQQ?#CNr;~NUG???F>-ccv`L7rC2R>E8vFtMpe9?ecVfD}@9FqxrsbQ61 z{{Sq3LkpX=*6Z8EW5?8U!C7-UjKYtm6_Y#RlyPYQ8TQ2QDq};;G*qdPNSP; zw}?3G=L+)ONP>1z%E~e2{PBfiIJ99Znna2ksvL4jX=kUMY~EW3RA2|UZoEmLpSqt) z$MDT4SB@6$YGB$}!Qp_Plp6zHHwbvvLpvoB-A0cenvq{syA;AWEx zvI)j%d6c@Ll6tw8Y4g@lj*h<&!|=0&X1p^6s*Q0>K9;Qx!7x{1WR65orq;g_+*y`< z8!`6NtXy}PgMjx@i@+;!{A(cP{8J)O)Z$obD^piu^m9Q?RZUMI+GHD;EP+dIVSPAE zEsnBPuyCU#I&_59VRYLO#4`s}I+M}vq2@|F`JQsEHekvLn6otE8hm35sIQ9=!`+Dc zH577{A;y}6ehD)hRknmJBfzLWU%ra1?oziDg>4jTI#P3nT2*N~xzpwnYfll~Q!CHf1iL)8T9~)RWWUGGfuR zvNW`~tkKe#WCrFl9Y9T_`+htxQ(u&D1w?OwD>aC^qUIBVx+}JABRi) z$)6ggj%|xi8;>qX*=z%>TU&6raiy=yAt+R)0O3tI7LhZuD7JdHlPut!S@gx~UoYUT zj8kV|`7<-s(o+SNN}6~T=_iQTFrFYB1_t^L9A&JBtlnhpMIya)A@nR94I0l<`R@eE z8Rw!tqG>T_#5!-Aa5?hEZ!t*l(xp(7dQ?<_U>I*9wC*@xl`tM)qC9CcORk^Ltzh74 zo4s%3I(!4vzEOiHPBHbWqrkI%U#q5yI!Bl0v0AE%e|`cy)em4~zQ0bqkn$esc~a7> zCTH0|%+)S_akVECR-=OE?B+duWqdkp27DT`K6I*!49pYE(<+&$$C?X`!*{#_mQ%O` zamd2wEooU>N}sh`Xll*lpP0npMHtUb`DX&leNF4835jKqg0?u=W^1CS#H(k-Dk`I` zr2y2%)e3f$b!GD}xvj5mN1O9=qqrQNjh8|LDPlXjc}-NYVd=z(x2Xj$AW(EbqN{Qqkf^X zZ%ZnxI$M}940ETLK3c(ZPA`(bp21aJhEGFEvo%dUB}6C+uwB7zx7mISw^6%C)|9)c zDpHK}A7uvfw=NLUl++wOl~5E}f=m|`%^1dK$kJo@zBRFz6~HTJz}}hUm(8fz9vGr0 z5~<2c|Rbkox01l?a&3*+c)Lt3QB?{4`s*XMO0!1oMyM7WY za<5PA5CW8(MiEBV`_XN$yDETvXb{dmv;8|^y)@0aPt%@Gqs8(@IZ-B7#PG~(3W}2y zrk)B&Ql7GrYo(TIsF?0Ix)6LWHES>3yMuO>J0W8nHXqilb(%1iRuDlE6;`>68Jir# zD~y<2)RU(6f>#YmDgZ9mAhA<@K-ZG`EoCGCVAOD@lqPGAPwMV*${P+Ro@=nh!s$Pg z;jzPABo1h5n7gE5K_G4>{u-VYzq|I!SBV^El(U&W6N0!zj;c{&mAO|D!na3Q@QTLY z4OBHu!Q_+QVZ3N8Z@bxkZ8a;h8b z?!B(Q!Z>W+R?<`y4&Ag&o7W0);Akn%wAj5BdcG50^wP>VSEVd~-N_)IppFvVW$8D39u?4g=r*mHJ zF&rNdS`5=nt%c(>6*eNWDs!{UW^D;70X#5u)F~Jr5ci5it9KJ2W5YNRLS_$w{{Wnt zY;1_WQd|||q{Hapo<|>~Qqm86ovQn9?BhGLW-ecLcti@ed32VNgmH-`obXJupYr)M z)DzE!!xIrl7o2J!x(oe=><;$XfyslQ>x{{WmW1tG{2gwjK3v?(e{F-sgNQAtk<;@E6;fKyREB{ciC z19aMn_O|xeY!47L)*7Jtno{WsF(93^$5D%BV_6Im<~b{CDj3n!QsOGm$mto_P(pMr z2=|XHZeH6YotV`fuxS0snjLb-RVbsRtZYvSsiu567K;ajHs_i}_Zf;A7h?DV3HmL> zNX;D!(xid~EpEY}MwJnV8Vj>eQJXJfRTz#TfeOlb-dN_Kqfsf5N!+b3m`fJ9z58&w z=P*%psQ@0MO8)>S4J9g69-7OMu}+X?d1uA*c3JZv#KT30VVR|54?KX9Wg7^(DYypw zZpPl&{?Y6s%+q&n+@(PY$F6AldUf=<&J147*_0hKNp-OxQ5rF8LGCf%IE9mo~eO{o% zb9YuTT%Cx~VbEvpp5);{3dYp*a(uKHeD$ho8)-qKv{xVlz=L*Imw9jvwmY*TXNbg~ zDi&QX*_(|^Q3UL$y=L?+(2k~LzQ1)!Zn9xx7{IVddWVwf@;)Z8)5}CDOG!(J)3jD< zlEos0q6nv3#93t2>1#Q4uKuO;^LmzYO#^CdwffO@Jjb0}%Yira7l(L#n>>K9D0 zkxT|RU6xW!@+#eyGAGgo-`pr994C&NkfWZ zd6I&E^lGJ{niEi^g_YDqL#Y7xjeWc~7fmfXkm6K2DFeaA)C!VPW729JE?%s_erlHp zTHG;Yl#0rkO31PT*K(~b`(D~{5?E47qYk~IDe~qhm{~K`6=9x^{WaC~D>`&@J7(PX zhGl#LT6O+!rhkdbP?$pi*9457FC2P4h#&&zB+(Ir9#F~FJ^WH}*q zZMZOlv&yy_^qcDnpFCzv%hOzABP<I3i%`8<(2)vI1MuYUS z+T{1QF_U<#U9ilM$xfuG0-qz(*HhhzO4v{*5yGfT@e}nwKdFAXXA_EMCZmt1a6yx@ z{v(#~;YEkkEH^UC5U82%?ntwAf2dL)lMl-<4l<yhm1DjFx?_z9VnO{Jp)(wyVYE zNlUP^-XbTX%Z)!rTZcc-@29J#k3f}p*nkOcF3zm(wjbg5LBF?`RnPHYD$PA9f;Q|u zG>FABwhuhjbYIM&quL$_BB+|Ot7WuSRi~#&@9eYisad0L5(2S30;;04^$w0SkEhGH zb$nknU6>7AszP~D(z@74w|72iiEg8{i9B!ejkp0Ml6q+#D31&(c{SZ8Rl{MefuqlY zjIgprMM+T2<@P)Lw2?=D6R$RlLEb_ZXQ*NCt7&Wlty4Wc6t=|j&-HmB#!e$Fu@)57 z6*#C^rIAScahQhY0QMFfS`Zn)98Er6NtH|_VbN0Xyx~hh6l*3t_;iSoZea2)-S^X&LuZO(O{&1A{djYSm|T<+~l zN_JP2^2$?29d-AE5-)MgO6_|xP1ba#kOGi_6N!z4?CZj_kFo5KS>%nM>G+I}ou7>k z*_$m=WemZE(Pj*-m$Sxv&Gvk)23(*@YLC)gy#D|#32n6UjqCqoC5>O`3D&3)R=WEc-|ilDMWXN;9+ z0?fQFX~5p4m3UHMj*&(#yD`^HG&wJ)nd-kEn=4e-QPktqG+1^MgV0s2aiM~px0l02 zU!s-KSTQ=<=KdVo6i`HCjL%nHX-%k5@tCm1>MlvCp{~X8XzOSx;>h%qK+;fSb+ok7 zEjv_A7M}cW>nu{n6oKPnM+#qsz7@ckJs-I7qZAlG>!gNz$T%xz3^nsFQnAxZ1{q2! zrC0EnkhvT)&0vFs}r>DFGR&Un5% zTa4y9O26gUeGV5JVf6+o+JPGg8cC-rLLI?3lFi$15UtdbksE2|#xx~inJcFinS&Cg z%s9?9i&V{r$(X;Zl{Iv@RdAq^s$0}~G_n$WfS&UNEnpkDw-G3|l0*zR()OHm1Ellp zqqES@>TWqph34LxoHc22dlK`c!m&_GEv7(PHHQvW-L*usww4WS)pd^FQC*B#0zJS6Hhjv z6&po0CJeb#59#F=AyhK{V8myS7HJ`pMW%vwjUJ*XqPy%y`xbU#W$%4?S3D*rls2NG zt|F>`@yN>fRyWXWo1QVe=-7T~>CQh-L4)O*SY@4Y3VdNrJG+RPHhEFANE~=AkA6&@ zfM#yBo-L3hID;pmF&~9v-_(Xohq6>ZBo@>oteids)~nPwb{g#QPm2lRg&t~}n!0IW zV1cYcvq)Kwz-i3#vCEZsD_scp7T@x05iN`sB0E165EBXx0+fCjvW-4NiG0{ogkBkN3BVd zEYw3BHMF?3F`k?BMxO(P>`6arM?0)7<8Pyv3^)J)gXv7aXxLCGF~H+YanWXktXM1X zyeepE*wmTprchk5xeDc!Tzp2HM$&He%ECY$2CkbnRDzTeR5+OS)8z`{Qqxze;Fw)p zDI|tUWv+-+BxHa{5EeUrn_r6&E0mG%tsx5`S~kzLM+|9{E>XkXr?1K;NMt57dPX3tHM=i<4|3 zK)`XO7YINpNgg72bd8hgSgGsrx>|b0z|Afk#Czs?U<{V;7&2}8&l^<40~FWUIzS}U z3VcVZGMHMNQ_d+vUXoTsj!$%NR|rYCwfuT}KQ!T!*4M!Jh95i>ieki|DkEfF#Dau&KGHbw zzig<95l^)HLAg&IzolfN%o#RmnJKAg>FMCJLx>7QE9SM-4|rJj)cEtsc?1xY?5737 zR#U1x>4zK4PhSJliumNGs%3$x@LG7)Traq{1a}tWYFdo#AaJ7PT_mJI8fT9aNd>~P z`qj-1P8z`_+xmBq#W;~ptORxz1nb6?r9@#BZ_0rsQfZ44p-Nes=P_yO46o$-rI)pWcqTR(RQ%4X1GkO^>sMr6mel zLttSuUTMXzNtiMOM*;Alj~_@Xqm~$FQbC%Yr+UR?b(FXOfUWKO`H7j4({!+SlUenZ ztQ$s%kF4C>4Q_lXxH!pB}j zy{nPkFs)IE@;-evD|T@3{h*H(8cwS8^AYt6)cQ=R)Lx!LLr+&j4hM#Hmj}(+l9{P$ z>E?ec5{Ko$ik<2rbux()Rz36t##VQUZK9+r8QB64O%EPvZaJnsXzJs>opF4FmognM zOM20T^nNTK2Yj8Jb8ZbuhvO30$4zF5YwIy#B-Kb7MlZaA#r=SAmCNK8R9r@_Ts9A2 zqmgW&j1I~Z`Ulc{qtg6O4aKmW%9(W4B+H86y*q&*%ebX$v5~4~rl*g0p5*QGJrN1Bx^gS3^lc>)04B{E zi>HYUPwtKk_aLd8@I)WdgImOs<-8GvRDMpysc}3H44S_atHdXoDC-`S($g$7vPQ-$ z1^~8#$6j@BcPu(~ww5CqmT9pGCE8{1LGBQzjVMg% z5f!AikhHE05i^glgIV7h#bu5vTu!-WhBER)1q?IJ+s*V~upb|f8#hD_uA1|#l#MEh zQQ5+x(_y&YD~iqX^Rh-`64fB0nmxO1-C929>E?h^-0J75teav}B!Ev`MLEMsf?_Wf z9aTj5U2P@iRq~oNlV!=$+kyD=%4kX9(G=iXPl5&;O2BEEOchLt=%a)9bI{c1 zzQ6TLGS_ChjI$uaX3TTRg%euUuvAtf%}nf6-4r*zJCHd5Z}sEh zpBZ=w8idpJ2+cUHWkxH2$AnT%O(YdKo-I`*@j~!S1Vxn`fCC{rTf>N6%u;3)eU!B} zkUFYI%j62_Dzb)n&Dcy>B`J>;qLMsS*Am0zGVYAQFktARv{QcCcH@Y-Fck(k)fCnw z3Ji1QQ{&j)Kar)wW5+VaEr&~id6P6X0x4v-l~UlTX7_gy;C0gceK+ASM%kvnEK0ag z5uY-xYUGTMQH0ZFtp5O+u-YSpWgJ5=&W4Itr+va2i6bafvkNFXDdB~k`gI*jDHMw4 z-C%+MGzHll>z50P)kgED;)I7V1&xITl1k98YM>wg1KbTiBA@g9+*f+c`ENpmU zoYc0hC19ZS9(|O7c6x;AAklB>wk4T;KYEnuo>#*g8(A}TZCw^%!E=oaUnf(B)J<~K zh_X1@3pMYdKMU|jf0#FMWTlwHqKK_+R?XI)2W-`!;92uHWNcqH7+TKkwM)Ahc$pa)$8EL8P%%#9AFMIH5ad=Z^f|UgTI8br6iMm!8)tL1# ze^ax^r;PEIEAianQv=FiX;>jG+4kGbb>=1mWE{WL&i1*C2euS3mnC@vZSj4 zGe(ydYf8d|bX5G=)Y{DLnydPANk@!jd}jiwiwUa4VJ{SsRf%3$r&sxg42)wQVvCP^ zOLpS+p3;CsL{knZg~6k7lIF*{x0CT48yw6JmZrz>`mDQ{5|_(n6)}SYOH3bnB%76F zQ0jf**p3pd@Y8}t7_^;85s9VdV8*eWwbIJGxtXznh*!jvG&Nbf$uzLlB1o~8X#DN5 z$jkf6+Bd_Ew&>DguIdyHBaH+(Ci!K zQ>PKWwU)J{L>hP`1!J()fBM_P=(6XgtUnpTGj&YW-51YTHZNO*)zQle#FMnt>E-$C zr)}5*Q$wc(o2J`~d1yOSnk3KC65g?d8aMeb5y`!A=~q#DS(~XSF-*NYcyCjBeGVv) zB+#xaPL&3ntZd0FD=B3+{oN0@ghfxV@F@|rA4*=xL#;u^s#ozX>1Gqs%-7L)GJZ1H zJ`8ZlTPO)?eEO^|kXH)U!`edb&mAPLOd6XZ+pgR4NZmVwacbZx7@U2S65b>hmVyl( zoQc(*mi6xu=*~HfW~|tHqtje6j}?Qd`AU+Onw_bski}6=R+Vg};gB00icR(48v8GP z(YFBrq?`v4LK|DOI#3VXDE4Gr*^(#fU0wr*X20gx<`adz6(${wMfzDN;%&>kz^u|o z8Ept6*W^9Ft0=cZmXi}h{Dm5pbew98e-+%Jh`(1dt~%H(5#YGa9f>to6P0TvXc}4u zSth9RW_{7QMjOqnc8>PtyEndVVM73Q4jnu{3QxWbEFfdTt0N^vnK8*?#4v84*HSA+ zDE8u&6#`>V7_vmxKJqBg1Esl^*=|W>hLWWw2Yau3Z#*dUQRIfoR%Fl{GVVE}PxI@VafDG)X@_WlNlFE}OZ50f=h4hY*vt1ujsj*ChNrzND77>L|Ve(dD zK1{4tWORrTwiEtW#@rFReR$Flp#ewEirlidTGUj%AW&aH7fYeY-_K|<`Eo~7FnW=O z*JXTGnre!CQkqdC{{S$CL{hMX0a4>N*HPb_Yrac`Do=xGBj9P2rLv3+pN%VXwJcK2 zfqz%?z88+AZWT+3;}w`slPIQnH>`UGy%Dzv#$&`N<2eceBWC%9=7BgH($ z1{mj0DRUe`OiK}pXR$Q()Uu-$*fkn<*Tc6A$`GIi+AeY33It(M@LaPJBv^2*rK_5b z$xjx@!$-Y#uwVzhya9eVEgL=)vaD_wiiaJvqLU4Vu112o8cAZOgep zxT=9ilEhYxa@}5GH6s!p7q%LZTq-KhGF*`>fNRY!4Z3+d!Eo%Tl z;w87^3D_xGR*=zF(Mk+8@bs27$C+N4(f(HuNc`U>svT@avQ6HVn$K?DAbS}6N2?KA}V@D*X3TrfMO;z1bDwfw!O5*CvHj6jOx zhV{~v)yqYgFp8(eXeJbtRWv1Pz>s$LW(dkJcPB<+ouaI1?8fabLD8z&4X zV$59+%lMvfNbqctK3JoKP)Km9D(JCPhB>1WBKgsP8-LrkarJO5-`aj(P;(eK?4F&p ze6sj(N_NE6fcjkgF-KVc0IdC8%=9Zshh;j1!)b5~$pu9!*3PNsGf3?gkVZ)NK)ZW* z2+XtQ&{ErKFnErN6J^;+bds^@K}TNy01y20kn8Y_Q zBxatWU}TB5lqBc_Ui><@&%ZFpZrnZH!BV zjnJ%I+U4+Mq+KNhiPO4QX)UqS67kdV_ybR`STRU*1skJ1e0%xRgO&Qy3 zZ3$3NAe;#K(D&*;637_VGP&c_9t)4+8KWWL>6LT7O2eX~sHsP!nW(5T-UAQ?0r!dB+;DNZ8LiT8f65amPugQ=PvbzX>#(&| zqKh&W(bGlhn+~I@rcpqY_>474=fJ9}z@v+X0C*lw?*>-bT1ZsvJ8BzgmA0}MDLiQf zj&x@s;_z0-QI~}!RakF6qL4I9tWN8w9f&7{*W13shZe+)oZwjO_(m%PO-n5CGacKG<$DvO3wInOPUPB_#+eEnXxwj8rAkVKq@9OtIpzEV zt9d61s>1SaYp0qz*QBeJvmH3Co(XBB5<5jCp|+&9vncK~JPlrMZNvhHl#*m%PBe1H z^EMDl*$1wz%h5-zj;rThwc$NE>rX(sUyVnI{{S$o>5glu!07OdGXuq;!|c*lH3~-W z1o6gIi1(D6gUEY*cTY3R8P*o1qCj5~dJm-3mAiY%V{qs~k`p+Hr%zBb`pfiLFIDPr z-%fd2lQ(+asHmEw9l~=hRyh_EfK$XHjyhRbs|q={llo%D$8U!N9m=mRMd31rfJ#Jh z;(X|A`9-s2q{&rMo-u=Zz0Dbhnml9qPFJd}m8m5-Wg>YiCf$Ly!~%do`&>5`rEvts z5UG1iVk`gGuR2=VD57fYCn(Hbb1bygc+{d<+hG?5IOhN=>!1gnRWB$bP!a1~u5oAt z6oWka(pwSE`I3U6g$@c~j}TIkv#03elo9WQncl>FZNkOh-0CPwePmL19#`CP2WAEj zpG9WEvsPln=bC!j8j4>rC3>MvBtWvelX#5U4ZZ`v3>jO+saj(Sx?CFkO2eOhJz)~1Iumgx*d}}vLOC=*3 zYG=aUFe6^$dMPBg(Zu1D#-%Ki!-Z(FkO`0p^`@$fQi_)hZ#xjJ%w{)Gw9_g_P9X$c zrIJ#Ib^8E0ryd|8JW*Fw?YwH4N%OB`7`_ISsZ{uU@TGiyOVBhB#sRwRZ@C`wEkM2i#y8e|+zJJ4gXVvxfQ9fbb? zGJ+3!f6*T;z$HzKi??px98`ss93+4#_SPU65^$`w4k|>fv=uSYK*MB{R7XJ?Fd8Cx zL2qv#Xya}+CL;s1%vB47ZS>%jL4nL`omt^9UrZ&F!cMzZBcF+Hio8 zPB6d{PSAFm6f$2-AnG1%!*cFS$@1o$!-x-`rGv_8sh&7J^3^cvf;9tMjea~O-h9H+ z!pUzdfcMbja&k+Y$1-KGn1up*fICfI6#Ycaxpy1M^x4~_xIgn~DW0nj&$QIfgh2$9 z6ssI`fCQV$iqW|mfquYSTp$-|E;!;zo{h66Nj0CaT1rrn0w_$$ex>o~DXQ`OTOi@` zW$N^Xvk-i_4NuS1$yqwl#KLNthJ(umwzPqKMwY(`Z_-w+IO1@^jIBBxX zztrzp4o$0kmC+2*o^u8ZCLkrud1Tq#)xz#zz{4~d73}n zYC<6O(vfb7+kv7nnX#`|eyFo%ASUWQHR`zw5NhCXzNGY1p%{1d4qQr~2+H|lh78+) zPnzQjXen_RgLxvXru5p^T}|u)+>ve^Kb$fiQb~nLw$a&IvDXNot;$3 zF@eB!QD(t9uj}ozgjlTJlIbh(YCdDU_;7HBG0EBJ!@iRj) z3x%X;C#Iip+u{>GbbPYrY*?|bo^?*UD%N0ljb$5T%CCd_`g(MELcC~MWt($t zQi^@v_9omrQ;3D9fydUFZ4gXsp$8kr=EO4o7l?Gal8*?@vDL+2HAXFh%K&sT?5850 zY3bw^^GhFo+8=b12N@~|IN~Bb)a(<5J>pnhS6y42l`@<^_`SMqg~H zfk7^5%h*w&!|3C|By1Xzj;*S`b}Fdih;9-yM#0YGyS26AH2|0+hR?#5q0;8lM$Q~> zs#x^cZZC$=O;3eL4H=+TiZw(*WpX6cgWQmB@aBM2V@xSYn53>?`T8g%%$-b;YB1at zU8nNjY}3egh+>Tv`WCn+#B0LW&J)I!Td2$mtZ&7`rWu2(-jMNrpipBjk#x5lq0RW# z7O8n_Fxr@!jj2+??c{mE)Ogz5lRFU28@1jPCBY%!QHI$y5O({KIeoj!C_miJsOgH- z+xn`Mlcp(%%+XVy@=#*QGZ@wGY(zkvr2IM1%RIFx1xnl3T;X-)caal@Fb#T+mo3U% z(qio{BM}u0l+REhjkg8z$dLu?HNS2{R7FpT z!j$y#A*rdIK3$Q>uq^TceSCQHYC+f{DLZ!RQ`ngUr-W4c?7N9iM>EmmODy$T7$~;M z3dVGH1H_wO>ErLP3LqGrzoj8=&1HJY>w(uy2LNg~{$8^mnxSHeZ&4(sAhCjd*D3Z+ zx?f%;Qc#3|NioKpDLRNy1k!b%Eehd9Rf9)}Q`u@-y(ONZf-voCk>81$3@y(Rc2}Hz zNlwG*ha#QC7#XR8rz~N0brX=%ADKmq%iKbph5G;ogz3tG94lFoU<<(@qVU zFe)ZmT6trHSjATO)UnjDlG=hT?)T7+Sl%N9gXvyP*d&on*VACsabdB@7AWI-$EPG93v*-G z7AD+j`=>g+wXvAWOeDyr*s)AQ8isr>db)af>Eqi~iVCWVvZ>>+Ow2BPY&o`GDi93Q zO1UJRLOOh@$l)~+EGG)gB3R*!LsJzsQ&3Iz0p8@RZr9<*G^pV+54N0L#2*F#ba5VZ znZ@G9sc`A%^XoGT6c(0R^mL~SeafqFTSK5Dlz6Eij8thcf&m*N+4#~9f{zfds4_zX z(>jX^VYWMvG!{t7;|yCkPM{7v38b=>grtcH!Zx3O;Zq{PC@JTuhPtYh;)HIJM_FVM z6R89y!pG_3NXyU@2}ld})7|cpLeyiUfa>QO^$r~Pg4BGDC}9+D`c$5m^im?Nn`)5O z@5e1!aX@KSCvG2&Fy*^P#H0WRnU6t@EGe)I&m6Kg7X>v$Y@(r_P|nT=Ywao*>bDl9 zmp_F1owTtG5OD@4ghBSw;|Pwm6d5ph>Z?FmCW*YjbvN5^7~5a*8~5VI;Unz?^wFSF zR1T>Vj2d#O#TN=`o-8b3(@4tG(9Y&hd%=(}8%O*X9JgADDG2A`Os~5u8mAh|Lqj$z zB-7Wy6(n;4R!M6kJB{@y6;HdTd)t;niFATS0+sh+X(|*~^9*@bT)~?uf2%NwY3mgv zo|<1VB`FZv%2j}~2f04oIf&cy+_#xI;M3wYm>Y`X&REZmd^BqR01a#dCu2USYVvho zQS(1ZFw9#6#476X4xK?AReMQC6ICK!RF=p}Yh+@6&9&q^+B0mNw^#v1D%sR{)XlhV zyZO~7U?^?ttK!eTn>{yU7-Ni7|G`QB;%VxFc* zg+xtN{KD=fm6vM<0N&OX;a%P7Tc=RQmNjgo>^M{3O?^W{h~#Z+)9>16T>`iL;ay}KsOzaD(%8h2r)IJ+Y&g^vAVCEb9NbDM?*`1;ds6sD65uQ zdXZ3)BFYFZh_@lak*1gS+l?yqI2ra;zbzO<(07bYn0ljI9uEd^!tg8&W*D)<@VqK{ zu{E9~WtK50RaRL=)Y#m2@aF#jG!YmQYQ>y(MG-H={{W@5b^e$--I=DQYHD0F`4%@x zT*Siy%|P1`C5y92LGRq2{5ah_!Djiifwu}R-Uffk6*z%fso>e3zMg_iK9dS%iXFhn zL1&SC%bmb>xj%P0o!NG1b3T%HCIAEF#UFt(KMg}K8x{h0O5766ASuCjn6|$__iqSJ9kA-x} zMkJPMx^+srt+B-w4D#A`3NH7!wfLX8NSTklM2rn3>MLuosEjof3sX%F)TOAb*x>_<_KXj& zg{~n;As|mg(%#@giHPd>(T4Q3mhj%8^*=OVmAGWp(*FP?s;JA40f7#*ji+fxwE}Wkoevu>Sy9 zvZgOr)BN$%%3Q+?OG%DpzNBR6TL*@m*PW5aTPdo9%C;9t6f-W0Km-6GX@I1KrC4+r zqGdd^tc4tUXgKRWP0ct@ReC##VX@;i6^%4_)?LhX($8BE#HlOk)X;g6KG94=20rm( zFMcVsN`ETsq+^C19X@oDT8N>`vF3lrlZKNm`EO;@#$VswOfJAyvT?ZJ=3T{G3nLf(BjKNbDZN_m- zzl&wO624|!K1#amJU$&%C}eeim~~D60KB1>ViLCGc7UL8I8e}#MF2f_#d7aWx}%kI z7{uEL%NXupqrmZ+>`I_DA%;j1sOv>ss~+2($+E;Xg~&V-*|?(0fW(1?5#2Nci9I+P zQeybF3DP{>nX_E|O~i7RR%}X}8OLku@XDF+dWEToOHWk<&83H5D6&;eE=HFWs>;G@7;0;3X@(Jl%K6Arm$MU^3b`sSSLyO?6C2LdFGHwAR zNE>H}Cr%yZw@6CRpyrdYP<|d)M4k%9os~dkTZJ0l-rfl_n;Wmm1xQwv zW723nm)`UW=>vND^kEC zjaPPClM%|evd~~zl9rAP1C45CT*#TZ=^bgJM^#@qGT(2Cju&0-?W^wxwSlIT z+vhK`l7cWajL80@`m5=89?aNg0ftoM{N;^Alz-NGV6Tkf^|b72QKOQiNK`wp5+Tu( zL&vjhtX?G&WG8?)A4;vA@V0eFiJ?ah>TG>s=@+SdOE>i=3BkI_Q8gD)@GLtS!)fVi zF?v;CF)D06b+bn+CELr@TeY>}d;b8Lx4UHlGKe7c5wd8-l{Ik6!62yYq6Jfr;uw^! zhD5|7#7~h`RAIDAjG!nsAc14392%+Ot)~M)UhP_l=;)`ayfYHZ*k&6=U0D?L)lNk) zq>@r2k8Qv}hSvW8iC>N(#U&V-#;A~FoFIH@p1O+|!)oG|ng*)FD!?@}QOO{l7AH+Z z0B_ryc*;VNO(7-@zM623s)SBNUoix-#?emK3h+t*R!?-3?|1$jRGh$;&KQACw8@_l z+etH-DH9(Ud*g2}n-fPzL0<~Dmo9?qE{5LFc+zfUrjr&Fl##>bQ))51aWN=qA;jQE zbx|!WYkM#yK?dVf$DMXagN;!sQ)8&oBROGpI2}zrRMM}RSs^V^bl+z;AcDYH9wEQC zQ)DSZjIHiViGjDGg81tza4PvHqL{i?Fp{AouvQ&|TfmF)8)_&djg<53rD@xZI87~9 z%BfkTo}xwD?xpQ=b+BXg@X*OKG+s264w_8lhpL}5RzVQ`8q?M^lGG`&{R!@G+#cLv zmm$fgu8}Q`J1A|-4=Pj8*0oRb3MO$$QxiI=P2KkHI@tIQ8Cz)!)Nz_ExoF~nx2}S` z&rGYIYU$yQN|@zDqN16I7I78Mt!tlc_=@Ix*%;F8QqTsNYAEE1WQtd5BSEr%D0{5B z_yeH?aZzG%os_|H4z00E3M$&^^8{;&VP=mFA}U(itP~cGaEoB!LDzSCcN}Wz)kr4+ zOre=Uf(aWaU(574Ul*ooJVKzyM?Fl?;gt}VNt4S|ERwCeEx`i!@4+VSu-cngCuzcq zvkv?@IAI>N`94yVOG8CPT$s)c51AxW)gh@_BUQfAz-|WphldUS03fYE1y344A4p2D z35w5zQKnGLS#G>kPl1n_NHLr>PWpN2&D+{Or$M76Exd1a4~`o&YCl_ zW-gH9m7Yx=a-$RBR-{v>|$c zoDKkWP_r$w)Yw@N3=cZt(u}X^yDR4oph=r@USsKoV95DaoiMCMYGaaCsd19(n zMp3${b|g_rbE(qY1zu#AuIOz=D@xQsjgWr|7qOWz!QwQIiW_k6U40|V88;us^8n2? zFVzgaE(z59PbWoJAcr%_4%%3vF3l7Lme)4Z!rWrhck?FSXh;FFMj(BoWjKuD)o4<& zk};yshqM*$ zjZ+>}YnD3t)VAJGFgiBXb$%roKMu_Kj}Pe=O|i@u7|MM(p0=w3#`5h9bdngcnUcPI zI;ur!mSIBU%Ydn{QhleBe(LU`t}@YbkWlNm!Ndr|4vDDKc{ia9UKR)nZ9GQ;YPBQ4 zb4EtR^7TbcW+^h^MI)q23E3&(LN2K!Y680Mwb#et$6U*vvkB4(Dd@tAnp<}*s4YMP zx2A=OdW)YDWvXVP#3_`#HPn$(%M78&4wt(y2FFpu2J?%B5hMdipUSQDVIv$2);u$- z7=)PD@@#J)&q^u*nAB0slcj=&7TmFsat)UEjVBPr4+kKPBkJdLv*@vzMv7Ok&a zYXlVx2qSFJ({yX%Lm-l+AC9J41@rNWaA%eY%$*h5f6(N7f^Vs0$y6ps8dXnq#4JJ51GszeYEQYRdzR4@{T<$^tmHE%^esIEKYZ9-574u#?c9tbr+d?hk567KU zTEdbFkbQX9DqVdGSRqoMKZQ2Ih&%N)JMk{J05I4| z=&vPdRwET2pVx&u&}K~GG*Dr+`DHMeWdx&q^E+YDwt?7GSG<&F#RY3>c+#sUz=snQwMEb(x@bum>yC1%{7_mFiM%J zNiOqC8&p&; zo!pb=Nw(hIIODnz7#O6M$!Woef__yZmZJ`@pdU3%l~nB^rt>SG$Y_;?lzxcG?g8)z zk0nT2B}3W)t2m-iP7yKsQ_AJaYM~t^1tly{>qS%&<*Lt#^6EgZE((#c1JVK6J3cRS-gBHG`_jY1Z^I;YbL^X!nX2!M8) zVyViQL;^@Kx;n+GOVZBlVu##geeLc`eRR1zqHRc!j0qH#ZM3)r5(XZgwC90gdB+T; zhMJcx#>Sn`lZaGQ39OzW)^=1R4}cs|Y*v)QfCsLO7KH!;5Fu$DYBj4*fx zr)g={MNxkG8}gSqi(yb!4SDbZ34%e{&pasjdLP5{hA){W&KWPO)Lk~s7-|e2hYOr7 zWSD@Md)tv#ILt_OZO%09$Xm9Y_F8R}qf$;MtDY5;{efhQL3ZS;Z-fr7Tx#38gJ0@8 zO3WD4*JUi>g<+HtrZbiBiJ}J-s+MN=nX4QKBY5N{{{W=hpw{G!W3akt>Xe*L9aWQ# zB}DDQk~oJ@dg;^bL-|Dqr)n(Klj!26mX`y|m6Vm0lHqbpveif_p%!12Dr|t=&8#)J zR?6FWsI5?DI)_Ct=u#4pHcm9N%-wRTmWK_)=I9nHmM~1!OH}!w)hxS&W?asCQ_`-WVR&dv8!};)RW<1y73Y$^t~xO@`7%sdqeKUCk;tz% zbgt>Zr9lQ`&xf5@-Y|t}P}Ro{6b|IhvE-b=(_Aa0xo;c7X?n|x=c+u-mwJOCe7lCx zVert+OpQ@KBCv?qAFwVJRGXd6ZYp8U`G%WqP~tTbXK1Heyi2Rq5T1@SCgp6?*Q{cn z3z}b3_^(acj-p(-hsB6vQL@iG^*~8K%Db6nq6($=fno-i;tO2b?P@aM@fhc#g)7OL zC{CFiYWKQR{G$Zvk4!S&ea;Qm9L~5h%bN4P4SM*oEKae_T||*h7$J??%i>gWLVfV^ zju8NSD2uu+5+=D~mAxjzsd21Z6V4UXS$>j9EAf}dX=;h8S12N)hDBSfaFyJun(qn@ zoKTdK0NYGzO;4-OJu8Bix@ofobp{2NWrAqxC}}5>DwwuiVQE=*$ah%nU!d`|wy4>S zY%IisQ1Gnn(34w+V;P4U#Kvc%!Qsu*VxK8AaYGcWe9e*~hWCp7i{AF-USu{E0~w0* za&QKYpGi84h}F*91bx=wDV$agy()nTA4vxIfBvlg~(;+n5S8BN{7*H%Za;!Y5uu=tZ$EdJ6 znJ67m_t~np1A2L~x;BIP~5w zq4ayH9UIKa9xl1-2(H5@>b$a4Ni`gd9e~hD8wJ`{+S``O^H(hHo47~Z(t#d^ljoUw zFD#bIhlNQZv_HnN3j7uEM#re6p?oTNs4Ae9zFm@7q$ts_NX_*g&UGTkIYRBw@%|`1 zJ8PBst)F!I+B4uJbZnsZyQo-1p!~fo#&;XmmWCKiKrsckJm_w02ZtPUI!Rn6N%smN zJ;+AQzS@UJnq|cCNK&&dM}^YFg_Y_WHdeR2y^=ss-`Q?IYWma(T6Fo+5c@!aG3ljj znBEglg-t7F8YpQU_pTL5Ophd>>PSdJ0DJ|A98@kFLUj`~`p8-^ICKLX#U&)p_!Uew zNfk{@P#{5v;nWl{Y#`h>`BL8h0N2EiK9G%Ke4?0T_Tx%%)r8jwsUyVU^RoUSQB4(M zgI&IwI%M0$Lf(^1N+C(cSkkiTO<@3Q3f z@#E}TB_aX9N}90$jw)T-Q5En(FU8bn&AorIq$R}yX)7qFLp(Qjuh`IN$v zlLwwPv1Za#6r}lK?5q`SmGPQWTS*+W+dbN+sA^v=oY)I8z4YU2X{EOzKHA;UN>!;x zPYgCkl{aBx8R}$`p1M!vm}uVAln@1$O@icpj7f9!w;f>#PusK03NLA;I8+Y7jQ;=> zd31GC!dC~F)g#iRbL>{B#Uhp3%)eyal#)IJo>~Z)!11M)`gM4RgW2i$)SL|{sxhoZ zQnr!t4=6Gw98}Y?A9r=6e&gB8G^DJYWc5{$+JI3Q4!WIBf#cIu%|l;_(g>ln%3fxM zszfKS@{z5_8Es?k@}7@6P!ORBJ2sOa1Nv5aypS4j^fN9FibK_6hy$=FfbH`f?Kj*7 z$sDKUfij!~`uf(?zzXX=wBIH>mGD?5ro^hI#4!bP4Ae5nulh2VzvuxPp9}KqeE?#pqRbDM}^5GafA-MaHSADrAC2 zrp8y~U*1wX8FyIssHI?7;;E*=ZeZ97ysDGwtp#pz!6tD44_wZMPIS)ZDFv=Nc@#S%Xs4;#1bv!&&6# zJda0*Va~x@%!q9A!Yre37AkMt-0{_=0N@}GWq7aFgRGa;Tst~pF=Wi_nTspp8BV#W zF>J$|D;EpIBgCV1H1FoqZAqe;z0wE?t%xA;mw-+qk40Szk~n%%Yy3?5k%eIXlzQ1s ziQ^QMSVmx=r@*jS@P;)_Q;JP5xVK}saqn?qek$FyYRp?mOvNP63R7sJO=?Zk4B$#; zN-S3jooHdH`KcLd%{@bgUn~;HL1K2_L(ZA@4Vnis=WWPmjEkl{9XlBEc4#TR=RK&+cRftrHpejgUEl};Y#Cv%@7rV5;9@@vmB)I7B2qg4j zJsUXF!N#8-#60?%8d{24eW^6nbH(nKHVb4WK)y&iba9A^;Vy5% zAjBGqY+DP$4LdzOJaKtdh@d6Myj88M5yx6uRv-~h+}kZH#Uv&pXFfuan14`k8fu(E zhL)_Fq70O;O-C!jt>beUP{j7scH(5+p=(l745w}ck8L#0T;grgN4ZH|mlBx3c#ckVNN-dWVl_`YH(}^2KBd%)7e+)^w zagKG5J<-r&Sj1S=S!Ske7M2PKXL|Pz5%&jl@|q%7VF2z2xVE@i^$zn$x<~?cNvziF z8qH%b#DNk3+v`0Bj0aEpeU9W#qV-oE%UA_AFPHN+Ah;$6nz7?H162iEQpm+#IU=}U zFp<49ECBK38K35CyxVBkuxyE`r!2E|w^p(+*+$c+eO2nDv)0g6)X?K|;a+`SCx^)v zDsc*!%k7b)Sn1R&BLdfdZvoe~?PMBNm9w-bFUFOaE|j}_S4NYK!J@{w^H%WhE!r5LFq41Gp1(Bxz8+O z6**+$@?nACcqMr2u=t!V{u>~itiLO{A>5OSV)NG=-wo~bDF^zSW znvSBI9m6W7%rzK8)5T3&MB6FduAr#&XLruJ6hQw%9i6iiXF-$-v%U~5=Ds^IvI zKUgyn%J_7JYVj(}!YtvC7$x03)3@PnOBT-sTBImr5$>qceDB;hr9 z-2>}JN$MX@I%SIFY{i$c?xe#@{Z#5EQ_NYAU|97dQMi!;?q%M}RPApOr?-)<<%*dk zC=i|oFrnR@-*|vV-ii|aOySwNvWzqT06rK-0f`9cOowOZC?1DIE*Mm^!JbAQ*{3T1Q?cepvSUpds;6b6o(PgmKCU^;!Gx%V ziv(`hw}%49)s-zU}5tKAr!if?$&?ziH(~jNSglSqv1n{Qqoh0d2&?s)n`SU+veHyF4>T^yV zHXW5BmV7ps5~6y6Q$sBZ&XLuZX4xAx{IZI6?e=IAu*g-wjyP4FbOaJM(INEmI&Z&Z2cIDB?BM)R5Jku~;FmL7wl|>a zAu5T)(LJDUORht4(7RLAjeQ(0dM!EsC~CauG;ObZn#E1@i^uN<#QP^DLQ z*aLTQr+yz??;m&qSsuPrN-{N~O12tCddbVf(Z&gwx^YR!4JW*oJP)8>k} zuu5u*N_eEGNNJ2r9GMxJM7vo74*kkq*l6D^Hw8dCi3)*<2Vv?lPiHe~f`sCQ0DO&C z#F_IW)yEw*9$3Y&XJyz`9vZ}X2)R;Yx%YjxJp8hfrDKfV`Mrk6MT(MOCK$}kUl(Z*T88+HVA`-RhHDJ0!pK-P=V@G zqE#N9`l8g?)2o;-QE_};1DI1D%ec(=qAXIfhz#uEiad%#*|iI6@doeaja(;KNkVo- z8RoWbuwr!{6cMH+jN*(ap`@h7>EC5VRyztoI)(1347WlFJ?t;T_lR*`70_{&Xn;mE z(^*?lg~~8d151j|O3OtBGvD3Nm1_ky?V;k3+R^|7Nvo~YsLUTqbgahU!^%gA390KL zMTuexP{u8|kg_j-0zN!gr6_t*bQGkSGffrv+u@PCmALfsL0%T3h9b~;cWnyZGpEtd zD&b*UBvn+_vn0W!#TI445Y|)-R%J7@p^YIk6Cy3f*IxGR+muDnP*BoETOB|WCYbOn zSNXi=7%9B`Oit3vSy=-{V+;<{?=OcMw78S9t#r%{v?F2YDc{botf!uOwggQSeld#? zxQ`ASA<~&UX%d<$Av{GPx``yU@YTIcvDCp$9_Upflo;8F+jaupJTASuR+u!p#U(Kw zv<=mmCyp4zOrj#OOk-_H0_NA(iI#~@+EWE3MD3?0mPj6WCYE?nVq{8Sq$ zGc=jc9@zA!evycxt<1GGkd=Q`p|s_VFt2N>Q>^HLmC^9AIqgp#p}E zvl8=he6e1UV-q-!GvM?;qHyTt$*>>l-u=z^e`W~*Nie=6qL5so2ui^z8x1NjOn#FQ z*$qNI89WntvsBU(9QTdGxM6YL+MgaATRuR?3PW?Klf;giRbp_{<|!n_qpE`xYFBr_ zQnf)byq4P=q-9RsE)RY!eInt%xKdkB*(hP*<3}%|JvsEtowN5z@N91h#xR-jcvxoq zYAU!6>|&XodXOWoSqI5S3Wh2yNZd8kkv4nZv2L@6cohS09hD(vDNC-T53ud48|dFh zJquGehQBm2lfmtH|+pw{2z( zp~6rV6X~~Y4sGqN*4p&JG$3?0)8DL|3!_;(AmCkhz-w!;j6SN93ZpdGY{Lu09!3d7 z$K|Q&Y2++Rd9q1+5pW3ABJ*2OaI#WBE5{SFf%2=`+d3NoL`MuNnL5wbKU0}gH+4&< z9cRsArE75rtMg_bkJJ3kO;#;~14j+&(d2m{8^1@m?KcE+Rj_3&l&vHHMi^iLpdNDD zuIolK(=@v1)*&@dOYTgSPCKJ;kNR-+2U(V>af#u=A1mhQ@Jwgq zD@)MGgAqg#8bb@HcxUp?@H`F239RnXcC{h)M4(9^#Ef=ygSL*IwN<-8ZK&gfZKF3H zd&KjNjzeGv;t=g`-tFPp*=xlEbo+Z6uA~02B)U@r7kH; zCLKu*ABKq|rCdHu(lj-b2JXjmH?WQzBO+zSZ9Af^Q$F3Z)F~smv#dVpTxkNR)}s&1 zJt(M)7s}Dr=4`1PjSWszuaZe8rKXl`(1;1$?;@S7PjD}8Oh1vk%ZYK7YDh7p^mPsu z6nvv!|5s-Otn*4A*T6U(;C8&PBlW)0UL3o4Sz=}S+RplI3~C=txTjVBDedH*2my@Qr1XTxjPTL&WtWIV21U< zz|c>aGOtkSDdM85%$U5GBdk<#VBgIvrio;Tkbo(WqNughSOLJUW4evBO7sAFgnT+^ zO=Y~UxY;g!@i_+BxXu}n$frNpP2R@)RYzTm;x^|2>Gz&>r3ge;yh zfT3npm@W3ipWR7R=>9E;M~UOov*Gwa{??|~$6 z75~wyg~SC6Nj4o-H54OOr<)KB1VnFX$8Qa_$+Gw<(Zn8rHdK0i za}lJLN`p^ZioIB9Bc!CBvN$6`2`Y;xo4eXQ<>C^hw}L?=?Ku1>$G>)bLQki+XUc?_ zvt52J(BYW20{CRpLTRC%q{+G<-@muau_X33;kb26R4hRpX~$h4N?<66>!($GB0ZF{ z=DPY50W8RvFm{--h1_J3OC3g*;~LYfVoVL3ig79;Nf^f`Z8$-@6q=eCTDugaea|`~ zQAAQS7A);k3A=nfoKO;0W*~FXTWz$Y&XP?{tD=&chsncWq_3Kz-*Js%5=ij|P%N=h z(=ffe>UHL>LefG&nd#YEOMyuNKJ0!}$3vH=;-{H2YAo zZ2-2O&UqmzFr{Htb%y6unH@3kigL_#5J`tLFI<@YN(7FMY&uzHB^yf~>N5_k%3;nByre1^ps2+FnJSyEU2l%6KJ0Cc~2xrrq(GeyVl?hPlp=?(Hse={7p`xc)YAW&B%5JTu#04a2WMlT_jer}q+uYo7(v`LV zK%TRODOJ{@B`|%*o)pQ3XR4uyM@Na^O-VwnC!6NVT9ui--I7c8MTd>;#EoAltcM02 zJSxmwrN~NPnKY2cXz=9n%3_mK(p5=O91Dk2@7jn!_gD{kMf7W%@df90tte9QG*A#( zaHsI_P$8)4jS?7B=J5*nlq8Qy7u!2dtj=`!@vqL=4+?Q3s(tji7Rv*@DLCK` z%sAmvCe7J$dTMHV2%^HKC}B0y$gL*Z-)`Q@LF2Dqwzv`#KKgBLt5FF_K3^J@QAo)a z7HaBgD^ZB8Ri%tg9>oOicTr$SvA4sDl$co&C!#Q>oNrPRl^(g{)$gml^h1|Ac{fbc zDD^8T&xdBps)_K7*@#!sv<3=#*jlCrW9NuL0-{b!)vnTpvGKK|_(NF=?auXJwu-R*_1rV5|`D1G)okEy$UN z+*)?Pnf6qmu#jL-OPMlsIuKV%n>eGtEJnA zQ%G4DKF3l*?e~;;@g>8pxS-O~0a4gKK6LYlK^UAm>95m|rPw__=A6qPM{}J96Oi+^ z9PdHX{L37b)Ra#}O&E%e_;BYk4SBOsOAqD{soS#)U5mF%nn$fTYV+ z`F|d(%ZK^eZOb^e3k^g`6ljKcrFL0YY+_VWP3>#&zc5>6vOy_OZ`FTI$FF#%Ly%AH1w0N=&76Cf*^dm4M!rYdy8a; zBx_R79s{S=pMNG*zy%{n9TXg8CFwq6#qza1R!7Bfyh|&l6@^z&<%)``tU{(bdWjaY zti4LqJ4rNhCzMpIXjitpb%a`rkfb2@NA3fF1L96TRBoFm5+Q05HWNl{^!Fa>ZfUKp z=*LJgJZ~oA^(P6-a%Oo1v^ab!1R&Q}!W5a3LK(m|P;Fzx?C#61x)hueOwUytjM*q| z2@};;%g~$`GIWaq7&a%CCcvmMFOt9t%Q5_UftZhq6Pf97&o0Y!iDPY&P z1{R@KTIn4D8Zr7)<9P@FqD;xAi6K25-qDO6F)m`yau)c0T+mMNY^biIjP z!sCsh2_UOcs*DUq6qjl)u+idF;)LMIgVocu@!Up&Dw>Cmo+y$AE4eH~qk@hIQM7}5 z-l|eK%+=XQnXFlFGyeb~$1#X=4r-pdzxkz3HT=3q2C^DN!(pCC{J0S>5qQfWU<(6n zM-baVXh7K`-%Ff6A<;(XBIgW&1{H{$xkFg^&L1Q!^>h)e#z?9dN%Ikc#CYBBbAJw6 z;RFFxPl;VrqKc<8*H__>9Q6?36Ca<$DDxCdvqZHuGD#&vh@SDH?Hhd51QToUx#6^Q zaivId1P&CpTCSA$5#h91;OHAdNU! zP^A(^!+{-Cs5sb3ka}xZ{{Y16qIg$Ky8A_(DS?kK^~Q;F)inePB!-6(3}dZflGhrIw-OSy0j~ zJj}yjzTbaAfAxsvokpaCK9tg1WiCbrcA8678uXsL{0WM1*He%wyG zL?L>UB%fs=Re}_gIHfp_c+*^%%#>qgG?7z9PZLVfPZWN3_d9OEnMu>dLFJrxMg;BR zX_l5FOJfx918+rhL7Fi~6|m-SB9(wwo`57Y@u<^AZEeJ&*?Pe)A4*)ORG@qy9CqX9 zvY#Q#IDQ}`tHwHqEU{Z_EEOr_wB8GKHq>!SEfI>f5>K+M+JeAAP$TF1Q>Gh$V7P2@ z!-(UFJu4)Ve6>wm?ngiwNYii8bFUQ?B>)kyj+)i1rcmOf^U+hM#&Ud=?6S*gN%x_l zj4C53u=5%~1&_e-5ni-XJk2`DHj4q0%|y9c=CgyKdZR#|ScZa2+3wCvoK{5gmAZDYlcS zcuG@NQw$a?Hl{7ndGXVvg~4%V47ql z;~hd*CzUMmM;kg*k?yH1H!FNOWTEAP0MVs+&iK<|SNGriPX_H<+wX6A;5|0-<*I@EqB+LhuBSg;BN~M3upju*t48*;0*TSt~Of zRIs6HS!9sO_O02rHh}SBY>}c;a07|LlZ!&yu~rADNT%v}sxUlO98WCQM~PF>#UvH$ z0A*Vs-+Rhr*3I9xoX4KmGW7>rb+bBzMmv3LC3_XjN?BVf$4aCV&}X8osQNwYW?RAf zN!R>6u>8T6u)Jd~XHke`t@Bq?(>^6meB(`13T`p(xwhaed^w7@Thh^S#ui8b49 zmX;X`(cQS5M9plAPl{%|0>3ijST=6{Lzn8NzAuPFheH-6MTp3MO}+|hDo-z+Azm!k`>HzYm=zTZ#n8nl%3a){MyqPO)07doI@Cv*37ic%>impF4IZ zSRp1z;-y$3VhPZ@k?rlouLSDb?W?80M8z$AJ$0|8{X^;KpBu)c!SOmO+N?6OEd10b z9*M56dFddMLaZ2_q+fDaj$Z1~NZYcnZuviGr!3*q?4zFRG0Zy#!j`i-M~BZO_-=Gr z>G1q#37ps|m6i68$gjOdf!;q&`HYoO^{yYj*>KebKX0ZVSo~kwIm{%*$cg_wn)PK>nLvWyD;tq|CyHrD3mc)=JRD#K4l0 zIAi;%`x4F=ehPer!{A4ZG^lH;;&v)O)DCa$)0rKaF5QLrfD$@>RgYGAORwURVFv&b zcAmbEI_^%a;I;T-#f4B|=t)GX+GdQ%%k5CU_V0n>MlV`e3SxUEq4kK?GKe^r8R*<0Tbpx{q6IUnrKT^`>&sF(i zinkqHy8i&m>*{0qqTlH*c0{p_);7JEkGq(Q^&Nc0>ejRj29fL6SsB=tUhSp2P~pcP zWZ_r3-9C_XBdG9W*ktXPGR`ZA<7r1yCkr(6HU9uIs)2p+49OT8cphSn!#b`KX9$YO zW)%ojBuz1N%O1nA?xbQ^WpySOSCg^IF*YBLVS;K^G1Ne^D(FF3P!Kim#5Xx{2n3T( zTHb=>X%w)+`eT%HzCF~83a2mmnDlIohe2IS1kGKCm{0^ssujP!l^a zL8RBG?x0ZP`8ulutifwAEXR?BUR%Y}6`)FKC60lE z>PelVpYrB&TzjV35~jBVo`W5a8;8uQnwbhpvOQEnq!a*MKsVF)^GmcqTDkzQ86w80a3b2_QnB7)1b-r8`j>r^RN2&K(k7^>*?Wt-_Tu5pj4 zn4U_hp%{8nVD)(7)5xNF7ij5>%3fAb$>Mg9M{6B;bCzXkb-f~r?{Ada!JH~0(`4*c zj)y4EQBj<*49PgHmYXcYTS@Zpm}RPtp-WBNGB3Y&AdM^zh>>Wf$y~%zrQ8Ll0k)-7 zVZAi!2d93$^nU?Gp~fh1tWo7(mZ+84yoQoFXJMJ5OG=^4Qo+pn1zPqvTFb^JWBoL;{zj-J=xRG2nyO4^J{UZmkBrxz3L7M7n7PnD2JOBNBk zjih&O5n68(>On3i#3Nx59#n62k|7Hv1cdRUYg6f-DjtW5Jin5#`fBRj3My>RieVV0 zEmMz8jbZ9`PZdRNT=B-_+z=B%?czqWqL=(a1n~kk^-}d^;Yje3NTnB`{QZ&e&Ya*m zI~1VCpAN^<6L@fpa%wDHF#JOftDZWGD2Z7bPy<ZQ&S41t9U z-6XEV@mv~JhPwyCaeTK>QlkRNnB65;%hNQBfn!N*rUnB-EpGwB>*PTs3RQ06h=mMk z#ZnC2NaIh66bsYVBi;q60PB|L3TCilupvH+XeZM3z!a>Yd|1tPq^5F(1c zJJNdGD-+9D-Z7HRUSR2fm1r?~`g}s2YcR@%D8@FAcKcrD=EH_)bd+g`1pDa;W(XPp}@0c1w~9X6}7QXLq-&NVkdSxqEbr>GT2)~#@|FKDvUwdPBU<# zVOt`h`XQz1-%C1yh~>PAl`#4&KO9-}CLN97wXju9RI=5GY7%EceDt|38>sg+2fr(y zKJwvfB%g&=#gwF&8)$X>L%LznF0yoc6Uf=Gs#p(6v+YQy#&Tw2&X}D&DO#FhCy#HM z;fgkw03V~2%C8GL!lO`h^? zZW&t@MRf%{R3*#NPa+uRjSrG;R zE~Ap7R~M34BT}jeH&VptZ^N0;)*X*EcB0j~j?4`*+huO11jQ|VcjYY2gmt>BD`OcRhbh-&dMa83Oe+tk zqH5|HRMf(2K3KGJ)FR8}v#B==W2oUq?68o+G%9BuyC#ow=T;R3F&K0B(PF`?VwVMI zDDbtSdMSL^#S^qbYiaK&HYb9_02D}|`Em#YjXBg}_|1D#QY}kU$xKYYI|P8t>to$d zU_U-R-78SyC*4U*kY=66Kb`R^EIL{YYs@s#c_FK*<8|Jb0N&r>Cr&uzg-=Jqynr+) zVZxEPhH%YQc&brmW}>RYXJG1QYa8;4TGn6~uO*bHh7{11lyhQOfJ_dW z3Rz4`O*JF?vBM}CDa@FJwKWi-gvldT%@t4uJkr-D z`VcMRJNr4d7+Tc=tm?vc3@bJ}S2)68XQqyGO!g1Acq;d~MgVrxLw*}w9Vi5fEw*s2 zaGFctRhX?-DFpNt3qcHUe62%ONOnf-b^G`ib>gMbyMZA%rcD`&&z+*eXzFq5Cc|Y3 z9Zg1R%f960Y!C^MDBdhbyblJ+SyKro?&%a6LZQe>ka!bH8amnDDy6EUrp=gpYzc^9 zYO&S4dv`aO2Bd-9@$m86_YZwG`V5i=lN@Mr^tsavnblsV*5J54I3UJj!zau+TL;Fi zPLkD8oyL}Tl&D*%@2$8W-c0ot%R-YnP6wn>-bG@!(o(3yXI(A2g^Xc2zdYg@+b`f0 zSyGn*U*(hFI5kYaBTn?HX=i9AP*0g9w>A6;@H}}I<@7d!;8K7m9z7nSwjAzMd<8yL zb{#zFU!p1xN_ajuh36^yLyz?p@IIaA{;5KRj;<%k!y}c z1?%&EqPD|}KrlVRq#F@$`40;8gS7FQ4(M{vKnz_w>UT?hP-1-)!>VyAybBn`^L2A# zE1I!dO5;r~AtAQfcAIQcEK}_zTpL*NmtJv7dAF_g7S1I+2LT&kQm*A4(iDfHdU|Mp zdc(@N)>!M8Q1drd=xDK=Z!6+hZ5=)>M=Wnok2WZ&QA@GuT3CzBfHNpQ<&TKq>o2)x z=G7@_UJwED^Yx&vYijFn>QRzMwwK)l>z-uG_>`GnG~qaY1(Kx0>hkt$%(=!xJ_$($ zLK=EGAb^AA)FP0(n%j>8yWy7$Gl9DE_V_8i}NrWi| zs-+DHQ6d5eCj1&NFl|;zQT?MU5Ro`)>D99tqGVf?y`W&R@S^?dZzOc{I%clFXL{V- znDb=}xhDpwOxKw3`EbgK^F=HO%Pn>o6(oQY+`53LUK(yvobPQJD#VjB(ZUZ^CEk<- zp~WBJPYSBcm0z4NywM(A&Qi5)He+|I!?8G3q=uQ1oAhCa+64-pWIdhr&7*`p;tD1R z_f!vl%U2RufK#?;ZN_qT2I3IZ^w^rubZO;PaVsD2ljD9myK7~gMhw%As^KA8Rj_On z9?8)Bql2YQN>RNV;-%_A!{%U7h}OUzEyB<2{kU^fKs_HCv-!QsfYfFN6&C=&bH-Q0 z@d`}8oF}QIo;f~IE8~u<%Or-6%QJ@BZ*Xni{Ac-9!Vm1$bdq9~+vYaw@hu6}g*j%v zqV-=AI7A~UVAy3tgrSBE9+E)>trC0E%*@AWa5c7<;ccV2jk9`Ib*f@WqdTS8+!+H( zM_v@rolT0CuA>jk**1E5`6F4W#T3R!$w~qOxs&tqlg_%{Vl7gK=I;jVZ z^^+fm16i9b#UNW6it3hdV^`HekQC|h;BWJb2s%?j2t$hi;e~(y(ydX1MJ!0zLSfii zK;~DZaMP5s?je|vpbL$}o^?^?ibRMbg>uO(q^4q3KKf^d97<_gdhB*AGY!^xXQG`N zIM`iA&_G)s4S3ja=_@ekaX?p>-{U9F`hB}O+!P1!$M(^YHCCe5!}R!?+V&j@~+(6m5c_N>894S z3DSH+5ziWWpC7`pY$lzmu$uYmFzKIp!fF-Kq?6rcDH%2ejrg_J!h%#1RZk}Ay>hP! zFfkL1(~S)-60{A6N0(O$mBGbiO80M_QZ=%H_Ct2|`#AVhVK|MIeG2b13}R&URwSg$ z(K}UQIc_?-3~3eU>T7Wdhbes-B8PUYEp10A^H$`5p*(u&+frLn@ep7qtDc&gDX>Uq zOa}@O$MdRXr;8CXEj%I4&AaXn?&OQ{^|Xy0+D;!Xs-~rtrZGDw1MQ@g8D5(U#4B*x zoP6NefHEsmVQUjSN`SWQ%nj6!VZRa~w1i-@hgZ6iI>JMQ=^cDL=`V^5Dr(vahKhVv zt$)$yug2k3R$E)Vs>0)lT(&`QD4-b|O*D&u6$GA`2M=CmpK$CBlC8tx@I)(VnL;Hr zl=1GkHaCp`w|nWu%rJBk+CUvt(ropV>OLW}Z_b)4vX*73fXPz}tW?|1>?S&8D3{c< zu_~4a<9;K0#JG{GZ%6|Qvos25-)ot_rdF z(b7?=tAaSpk<6q|BtSBRCtz+zqf4#=HXWT*tHb(@>7yvr zVY2@KP;pF`g~^v;&DaK5%i^Fz1uh|3B~t^uJVVTukP=H+7QTdV(=T)O5R>Dbr{V9U zH_aVqiJ)^9^!w@GtT@FU8R`qHI3^*PbKMGKrlK^E$Ce^sYGA`K*y5f69KtbqCy+bH z-@9YQ3d=3j6bzN1?dd;-AuK6M1mGyuWM5DH5$N^-n`7xlW2wu(&9RIpE#p;rpBTh( zY+ADqFeJ$-K~yA4cM_0ETQSzwunDc0As|i!lO1E{MyXDM1PlcfzhAk7I&}9Y&C;B| zo?8!#t+>4>O?qeZxUFOr@J}x6VK`Q3%T#niizP#a;*^z!d`2nvxxwNRt%mP)-cWb< zvG2m`&Dt!W3<8Pp&fi|HsyDIDykHDdRtxFJrCzK$ONM7|rsm9rmDnY_1vaQ9#cdr}Qx}(I#Uu@sWGSj@ zx&hJJK9l8MmEscRE}cUR^$}$`n-iL=6RoeOl+;wkM^Ph4X7gotb|Tv?w%YM-VyGzy z;eZ?>lC-31lYp#!FzQ}$>ZMjAO^jv3k};^Tx_bP@ieix%Jo%b-hisA6%+d#sZI(^8 zWz$X;U0Wh2h%~vU5+`X*3zRuqso%`&>hi{Riz?IBH6bp9BsDWqJw$Q=01gKAl?LKQ zsJGLGEw91}#+X?H05(yQ!RhmK)l>ffTe^_XgcxF`!mBE$j*wLn>*h5`2%#B8#*TM* zb9zuyv(Bhg1|p$L(`;W6ucM9njY0iijq1e=v{iD=Sth6=OKnv?P(odAeR$T9g!EI5 zogm1j+{aDRx(dw0fJv6Ca}G6%#xV>>IbbTkrSf8m76Ns0K}93X0DG!?ah(zbVFtFD zBuxvsOETw*ifL#tjH888JzWbVk<;PjidKL)OX!t4;s34D@g{f+8~JOrM?4# zW7%gcjIDsyF~aMTPwRzr)l*jDvzKOeNGEHARgu{3y@!Vjo;tBs&J?{4CQ7)_&Y8}$ z3&Z1}!Yd-f>i+=|C^J_x!69`ke=}=GCdERS51oIlf<<1U{T?wU9`~RFhx@LxE zodre`6OJ26>#l#vrIKZeu7#v}^a_oO*N-`)TW-3>yH34^ow=})id7y{wwpl`l_VWQ zowXWl2P8o}7MLmo+u3R3;-1E!>CfgQjGbTuHOcdo|;)P z3V1+G(NW7(=1YLq+JYa<_v7J~$v8~blw9s*L;w#SieRI{vee(qO302(hpV5lZo;Ex>`S#GQhe zUWXPGfJp-aaKo;6^hG{lbU6KFRT!l%DJ~x>p@`2Mu|~6{^}5dJbogt<&$P7wqUN6{ zkKa_cN8P0|CydPcQr3qT#Dxt#U-hI))WH0{>WCwDPUTrt%+|lOaeckO&>=!b$)i%T z6p~5lh&=mgEZEIzRo2ytYBs8C#6F08Am+jD*mRd z`I8}2w6wW{DP^fVs>!61CfeJbR2ab?^@%6IYsg2oWIOo;_izeK37Hi(_8{v5^<~La zfz&Yl^|aqcI^ENJTcrImgQxgKTsd~94SJltfmIbr;%X9HLKbii(wA1!-(o7Zws^@uS8Y6cY6kkEvOmazrO8rV)iy;#Am1KP)c|s&MP9uoD`vXrp4gNY_va zwZ|Q|2rDCq6HPcEP;;B}-f+p7CUWWRW;oamP0YAN)EQ>GF_~&UPY|Y%C&~#{@)Vs- zlqk64h3O-SpLJ;is*&9Q=r&{Ym!B#6#fD{w`k#UIVt9o3%EvuaSVT3?xgb+)k=L4uDfX1pT=hYh2e*d_y8Q5>|mm1ScA zNJ66tLd4^93u|k5@G=&yQYhpkz^!M07R(17%AH*4rcJ2E=xOJ`^7)>&lK`Tp!=a2# zF?QatfMnGBtlFMP`;zffZXPq|K#k-CIF%=jRHT0`Ly2L+rxG#LO$)(0Rnv-)st)Da zr@I^TE_(&NOF6k{s10d6C)he`9(!A|E6TgTH7N=PHEH&qiq1_M(=8NL6}8g>YPT2! zN|!qAy6LBY=3O?13JOuEsAGtMLA4=` zVpePR_m~rYI=r!NjcEpB03dtmIKN`u-t4I-97&QR%9vWG6PdnFNDWVre@pzvRP&S( zZu$Z~94QwLsuHBh>p0Qywzfd;5U>aLqXYO;q?jdj9Xu5fQ7$O6f~^b%RRj1Ns5*}w zc&+Tc+jLKN-UIISqzTP!Zb=2+=%jJPnx3^(G#FTlx}Hojw|R|Ynh9o!w+oWb8wOi& z$=jacjU@#H@gSYPRFR7VbpHT&mji>K5R->d5nSo;3<9!_Gf`5Qgrw|_S7{Oe&UaZv zq_NY9n#%2*Z92$Q@x*&3ul(BSmlpt_Z~z4!8%V>-mNXd)45wNuh_FPGmKg-I;xfC) zyGa*8vu?a5-veor<)z;26&wL4@T0tn>gO@cJEFA&U`UJ&8~*@HSpI0~&!}#ibr&Gf zrFK1oVs!NQei>4YEK)A95cXGV1#@-)+r7Djo7bW1S4#1bSvR|%vA9A~A~;su`B$i& zM(P4)Y({KmqXWe$YodB4gASyMpUZet%z(X9GdNZ!YXC3eJUNT+tw>NBYJRs^QgNc$ znIz1;KI{GiQB#yB#4s+Q$hdueV8fwShK^;X3mr)jc~K;H7GM`o0C;M`Q01=KDcMC% zrAT&TZ40?`s5tfynK3*+He|)jvmjAHnee)VrkX~M7fs5DOGtNs7;L?b?ne_mgz3~N zb54zBaHBDmGh~>)6^G)mR#lo-ljLf$#x`O~ig^IMh$D%8;B+cj1LMNSO5{kR7Rqsh zG>Fd`E?T@x`sh88`%ICHyqVW(kR z4Hf5XDaNHgaH{9@n^B)LterW)`jw8>g1MtTQm+_|_wrad_JfY*!Da^CEbv zE?|~DIkDAVytxJS9HQ{9#RRBz|j4tm2iM!B9@r3sf(sv zL&IvIyg7cc=E1A{mWOpdvcS&q+y{>P9_Pa6jJEr3IFZsRVo;{hBW)F4o95_p9vhZ3 z?@h9OW;d4L#LxM}%UODwEK+zWF$Y(gsz~W~%jK+xa4Ie=Z(w+$?%6KtN{H3K(iR@k zIAKm5YR`E7NXN4NXQxarAIXzXHEkVj7<7mE6?f^>I0DO25sat}Yq0~Hw969W4x9~M z<1tcH1PGxoqZubFR#sKwxoS)~^DY*$QxO}MqpQKHi7`rvC585obn(-KckQ~BM94nc zHf2o_BV{ckuS};^NtUrpU{GcpFA+>ai4~vau}Wp!$nnUg=W5V@o}=`i66xyE(Mv&!RpQxiA!D?ZWm68sQZU6#6`(mZyL2gNf^G37MUVHkolM9(ALr-YCM zM1!{d_;b10xNZLLT!je~#_;9A#);F2<68CC(?^ENqMnuGsHj5o#IfB&0q+}vx)I53 zo5qR(CmM@hwNgo+-B6AlhtAaX6c~am6zOWDs~|`T7a)dPYr{zkf(BDBt=XKPf1fy&W|lTyP%bVy4<)ESPP!l<$`fN z6;&=rpU0}1D&Y$R^NLl5IDn9CR|jqE2ff^%XEa*`Y{IsJ1_FJxlD%SjXc4g3>FALY z$^}>!({+!tzZ4`UCNQKj>TnbtVG$zcO1fAf-%P=0dS-IK>JQ!F#%<%iJqDFOA+(K# zh6)T0JU&|Lj3z_;uM&Wjh`O*zZE}oB_dq;VZa9ia>f;3pk%(=>6Ax5%l-E;8#qLx`rTnrydIQ(FZDvSiGDShupU zD@<4r+mX+@N$fb=&9)X#+HsDIX;VejMzN_L`gzLL+0!fLs4MDn+!)+xx#=;Q4S<U=A-<9(1B{pU1 zUrk(}k*3A=v^7uXRmK;`>PQoxG4T6Lja@F28l%`Y1=T@QW z2h(3my2I2inPnV}RY}pTl|EIEs94-fI#9kM)e~r@noGwVux9u%BU>Im9ff>vgYoP=Df>Qi*(zi(@$4|e9VbbsY8d< z*wm!X8ByKjTQ~0CLBX2u7_?+9DIc^Lj+3?zk)XxZgU-HE6iQRZy2S7tm72P39uJ(e zRYX~TD7GbET|*No6*#jwN+Qo5(?665W_P(Zu;B(>cH{Y(L#a|**#{gC3Ng7!YSlg3 zfm!)<(0}Q_EcMH(Jxrpase{hx@fdbo#xr(sHFR-9L0cTNQ3;X=obHuZxE2FxIuXck zdG{*W<*gQ&K_pCc<3r4|coc*qz5!FwTL$!R(xxBRTpy!(zcO_cW$a_2n6u#Y75Qot zL5tO46jMzrBo%b*r}Q2}Y}~9QLgK)XMz?D6>e|8ly--mi0ol$z*sbQ4Ou2BE5g?wr ztlq!oyz^U{{diVBKS9-h^S4^vG@IcdX(SvftguYoQAIu}gX7bhnW@ZvOmUAcc#hvG%9bPD zHYF|L@Z`e9>!VXo86#yyFWCewFE@futkQ!T#Qy-BQBq;ld5*IT!zgWALa?%{Zj2kV zd5CYa{jaYH?^|iahQZPg#*S>AalCL*Jv%A-L5}8Wps9}(rjCLrgtFFBEV0!#)w$Lu zC{&Yj3Uk(MJo11FkUIVp)9)d)#+-fBAmlt78frS4%u_T_)8Ull$njODn#KZ*WAeiJ zPD`>K-S+9DtNl^@JaMLV|jA`1Cf<4x;Hj`zC zve!IN=4U1k3UT`s>6GKtc+g9ZW{jk@70FvG()mSWtE!p?Xkm97l}kh(#Dk}|l)TKE z`v@UGPBp7*C1}(!2Vk9)8soV3Ov13=nTB|(gS+gSILX^AY36JU$`l(|jdkD)IX260 zT1=5bys4)fS=2D1)tmF>2g$FPuHQQ)#WhOC4|{>`k}trEkN8O@geYL@fx^H4)2iA$ zDu)-vCs}cMsy=d8hFXj{V`@F$Zsnp;tFe@K_DDSJ+xwM5m}>NRd3 zXzAmlc_4Wdn<+(B+i~J}mgg#IP)Re?YN=tud@4!lkS4tjSHU5gN=Y}(Fpq5N(Lpq> zqvEQj=H4d!Qky0c6g1Bsl4(1Z2|9`ta52%$RGOTlh0x(K&sRWbFz@%F;8 z9J{w48;b$PR?1pHNf>a7Qo9s5piD@KnuUbHm8jrFhYd9oPa&Bp+9saseM7sHL8pa} z)5k}4lmzMX5lF3rD^%%_HW@#SPoTwN4P7=Lhhi9hA4$ALh0?&WQ#%(V7c@;C_SeU- z@#9P>KhZr~K=)S8zUY-fGuD0e#=fr-ksr#ff*J7zSY#BG^2+cL*#TJ>alWT@z~wHr zl0@QqDWvO5M1wTdf#p2YF-%SCBdMsn&joyx%<{wUoy4{Gq$Cw9R~=Cv zku>8$#p(%6VS-7+;3;AKLa)Ur$;=qK)X+Tsa197Vk=EC3#;ffDe|QV=K(!=F$iv-6 zUB`tafFlAyi0Z5pV31CYOt`iOPaPo}=8jo@a>{LSw7T2c06f!*3nRn>_LmfO+Q%V*>Y{oK-RoJN0ciqJ1;R$Tx0PO&6>cXmS30Vmf9V85TLB^Sy zzY#Nh<1o&O zO7%9}2#y@g{_4+8$E+E*$k3#>p-aT^lRzh}9HZ7>QN3jJx6yu}^oDG+(fqrK$qsF; z%9w>EZDua(56xA<7nl*FsT8p|+uG-GHWv^;wN1uyr&I=%CjsTh%A~b2;(})gqZjIf z)h=}O!|DG35x}awHDVOmKQ>{bDHB5*!-rv1b#Z)@6xEVb)4E40BBz%Zm`Oh!c<%MQ zr@(C~)Kq-El?tLPPE{Q(;kUsKBnX>V+MS|gMTK>7=9~D9I+aF78qDV9F<}s z45=!}>RV`89A4*whb`M$q>-oqW3C|kDxt%+HkvoX8T65l&4y?AGFB6XXDl{qd{YVj zwZlaN87g9E0Uk`!M>8Pzwka&2THH(+UZ_diJ1I)kkemf2`i+%(8`Mn2fMvSOGYNWV z5?seNXv1-M=@qG>r$}kirIJ9?`D++99_kAMZVxmTprsVdr{nn6(mXOMs{KTCpDcBw zsu=DUhG%LlBc;BcvV6Tu#2*o>lCqjusp|a6ju%sQ24)ev1ijg`7t?`(Xt%D7+ROwg zDd;k3#5{)63jl+LKN_@-m-IKLILA)%&NY^E4SsJJH9aIaO%?@=K~B^xEnQC{#AFNS z(}D!b-RyWL&Xm6QgwELLty%(<^n5AL1nI|6vaN4gqRd@DpAf<8b45lyfMf7aKnr2= zVbn3miFfy^mTl0&54*7m@!)FIEU<)e1kbjY6rccPP_xtgqnI+jWWj3jKB3|*mhg%~ zS0hwUO-oMo63DVgRf#kWx~6go4SS(oJUHdcfQ2brYV4#C1YtA~<=n}avCR2Bb^}=- zQD7@B4Nr|a?Mj$qOD~ks`;rCw8sE3KjN7!5jBuKDYcU)R7%ZoidK;Kh8P`kjLn-^TrhIC5(hgBLYbx{7Io{5z;vowJ%Va#=O|ARVGiwX|m2tt*)V!*H6-HzRccbShCkmRS^uv9hMwp^>d+PH-3Qejx*vl;TO zUS*pNrl_S{%R^O(7}c4osN$6*LUjbG*ho5Sesl#6=}agRM-lI?a=wCd&OD#HXEH9~%q_fy*rMRtXxKbh`uaQi!;7LfFX9SKuwT7D=uQN-CVs$kbZ}RwB zh{;ump_nNfu8B)dsLkHrPktphN|s}%%9|GjC%_3H`}#3FDXND*8S2`Kmm87rbA8j| zgv&MW;Rzu@@VFP_%v_;(10r}t(pH^bvV$o-Ct$4ai@?;i7)}|P>uK=pQKj>eww>Q= z*b)FCS${rh)sldTB6b*>v3FCdLJCX_M@ghy8CMjY618#312b)o3ePDVq8o-qF06Qo zIb+CKS9!yTq=>ov!#Y$nk&YO9>K$fE#~sBKwTO~TT2-E@mc;#{ZoWTXJkD0~P?Tzz z>89RvsnALSK^R8Z9VVqNUdQQ(Nby$LkR=Cvev zh8B`ZNa>6zOPZZ^0j(pZPCmg- z@_$)sqosjzoK^88tg$Uv*se+OSjTWin)WXp*fi6A`vODaEL~nMlR@ zdmGB%qL&Jyj-p0XIu{oJ4LH)7ZU(f;Jre?#w(`)aKq(Q#jC#dD#IQ^nmX=0`0{%qr zFp7-SQxPCV)PyX3;yGuUX*dMOmfE{+lAT(y>-W>P9g5V~P?>1)1i_+)^zj@?>5w=A z;lcyBwueqDE{z(MfY>IzY#UeFc6AxqvXON)7~GJ&_%9WU0inWDx{qmZQjN8Do%}fK#@C!_^O`{L>;pscJ?7_aX6ehPwuIY zvGU88#VBn;2l=D*HL?e%uBRmRoeHGHGp!UoCCT_andc_;PN2<)&q-e`Kgm>3)IiRm zBz2Xp2q1t6=Qi`(#W?Hg{vu$H!n3a?WeuSS;T2+8Bh_{ln6n-&gl3$@{a2--0wk6i zlOU(5Q{^y@L|`R~MFEz;sU+|q^0|t=Vd)--nw*$DO6y%b}1EO(dOzp zfs4$TY;|<*)ilJ*DSCp)7C?U2kB<>QcFn`VfQxCvrz;aJfE3-aB z#J`--VEH^x1`gEpwDknj8Ipo9&UTg?E~7xo-uD$=W^Dv`$qK;i!jh#VC}|UgEpp#o z{Xd+MMU3aF2hH78iWxARIp~D2HWwW;Dnw$dkr~zF3v(olT(*GQg!eg%XdVR&2ZCM6Qx-Al03;YKV#WvHd9yEVOTQ;#A#wCL5{}_BgqgK1T2#1Mx$Q*&dZB? zrqWAG1yL||dR8P@BFRbpqh=;B;ph5$DVqtvvkqaNlAgXa!E2RRRn);%Ni{sBG#=`< z;770>c}r_&t`*)a=vWxU5KomXa^ls4p|TtbG6Jh6;+$^0@9rr8CjQ zt4U)tA!lY*1X-1qrB`XUYYXthy?ZQcS_HSl6YmBoDR$E}cp=3@ra|zDj-r_qsKK+w zA61sOV>u$VCoa&8LV{#^NeS*rD$RTNUyCyR;d$mqc*=zAj6wI|@1pF>cCEw5x_v58 zjj{;zjC~@ZVszDV(nUY@bk$PR{{WMzsh%jNh+OU4847RlVaIb6tt1sQAPEGKgmr$E zti4Tz6bHfyFjAD0sGXi8G3aSogxBH}xq3QU_(X3!qIZUxPxm#{23^+O@dudE+J62g zGAXtgB^-KcZ`_U4HU)0@wJ)a_9Y(BA@b$#-923;%MsYlTtYO$?T}@0h^GgYlMMYE% z99lk`Nxr@ZnEwF2j4e51ia?!)rj6HHP3Q{f!nRfEK6z;{?7{r%2&!UCUb_ITSy%>F zNsMVQ3J%e>$4xnpS&HRAfxuMHY7>baHH)hFjuTS3Hl}PwOiCClrKhKx1C;p+^8%={ zkSw=jZ+gP~F1LAUNs3VF6C)Zz(pO-wi&E6%w4$34qL5Tkw5DH}q?x?P3o$`p2)GyG zMc&<%;G8_^#IbLHQJQqiJ!Q*TKQq_hc%Ej8JW^VPsA(xFXQ&nNvAk0&M69<`3X%?o zzZC;;un82>10hB*rM6}3ED>T9_?`ur8U)MJLsd33(X?>?0HhlTW({j0I$yx?b-ODm z$Jt5VaWjFXS5dmbn#BbkEeusOM;oW4j(WTX8Lm!g$8&1%xY>B|JbD#&m8f_qw z2iz&ex{zmqrW_+b=Nxk|R0qoO;#0YfvGB}3NZLB6N{=z4cJFVrFXN{Ot_{kzM0@FT z3bqj%o9(oh^v3zG`PaOR%{#U-MZytkSuY+5EdkUOd36{R3`3Vfb5Nj|drMMseRROH8@ zhN5f>Jz|(vY*?gZD>U_3ou^0xjg~Bg?IXvME=7Ff)(gZ+L7-M|W`td&k%2XAol@w$ zxuOhLG4#6=s_Cy#m-GvWSI13PK~p?bjFHpT)5@eqvuK2%1RD@^;4aGL#Q>K)Nceit zq2^l&)Pg~zw?z80)I1j{&ziGl4A_MoB~?}x)T|E-mPt%Pk^V-uKPZ7$35A_L|vVshHV;=4ig@y-7WLDU;HgM?;e-vbJQ#E2?A3w9;i-3||t9%bnW~ zk(Xq#LS}T3M5;&!X&T#%X39x;k;KpIRFSCkd$Cm-C#c|MT=XmjKzl_vV@27a` z+vQa!bX50Cl}n$pS#`e|Vby?0Po*h&w4J*vA5-$}E?B^FCM8{g)MHfC(1pRPqKbNo zC}e_lk~EdnI7SVkxEBMz6I(A*)&atwcxgzAO7$lgz%!Ot%h~C1N^CnF!Jwn3toa(( zix#Ds?IC~`hEY*ZBD*6UfQI~@vEki({I)0etip-ZTZOi!h%bAO%7{odI z1d|e_sx+2r)p=p76zWWYl5icAd%F<$9vC>dIus-*QYO+cBMfG)XDj902dmjHEM{!m zOHGSria03c!!X*3Bovf&kN`}S>{WcnJ;7x*;9a*6bpkd~(p0p7M#DfCTKYB9%4}m2 z>Lfilz$md?UICHw9%ZVKMNwBgPOD6l0+NHG?O>zc)3+W&{ozc^C!>XUv*A4_*%TAS zlqROnint{V@?bdr7WfV$lyGH+3VI5~qkYc_J4Uu!4FK=(JCTf_ffFEmSR{AWWc5^4MrCu;gvPGUir{K#p-FLm3{V7O4{gjJTt$_wYA~_RTL|HrJ*Prr zI`%wMyE2=kr$95as-4XawpHTTD^^_Jgy58zg<8^7Pl%*)1frfQcU5?ZzVJx6+w3Eg z#hsu9I)y{Gj)SO$Z1>U%y9c3gBbtgiX`a@piA-+)0P!G444<(kAi$#5Zj@7w%0**1 zb{R+?bZZi~`Yjx^;OM(ZJ*{K!8=f}hvw(fr4V7ZxoCP*u`CAaA5lv56W2SpmM3d^Q zI|U)HdyXSvZ;U~&JX%tZNE>Ogrv|TvNLm@ziaN6+xG|epNOrc;KCT#`I;N%s(@qxB zp*(2`=hRY^R(<_i`}>8~`4ho>6Ttv7@WD?T44F;g?i9ve+FRUM=;b#HX& z4ZWOH&C2HyT2W~NNyO8nmu$D$>&H8l7$T}v*fB{QLWYi7 zL^QEVh`_V617+Ia-(3J4X+lOAO*oZ+42)^75h1WBYT^$R={ZDUd$F~(w~xn`LyJpz z3TS{{>WW0s;q?@lscD)@cEpdC9FhoTHyV$R6DT;4R-qGxSd=J7MGd%~4SpvRYy@yga*2!f*!yNf!khc?If^Lcpf}z_U@FsBZ&2^ZUnia zwxw%CUQIeQ0_Se>w_rV&;VXkt0+ylFp*V?}RbW*(RyiaXrWaChJZd5lObU0QT`%R_ z0tqKk!6lXI65PxhEV*ILYD{V#%2`zBTxm==b~l34<~k~OZQ6o(^Aha6tSYk9WMC+rh0+6vWL2b*5ge%SDZ;w zN(?86rFQ9AI!Zx3G%V%bmuC#~TY*OnZ5};{RR@C$pAk^9RYweOAt@kTj6`7EwXLVO zi8Cv*hC%y;^-}Dz-5u;{iS|*$^mWsYxXqpE@%)+4>_;r z%J4Nrx+6Qr14^WlE~}~JbHBG%D{KWT#-5Bw^r1%U+g!HkDNY3OqrrsDmFfLeQfD5# z^dmQ9c)ClM;lMN7VO5xBGk`RUN#^{~pA~qJfLOA|J?jy)lcxd7ZEMThw`pLgszwlK zX0aD~%61LBJ(NeY23pH`qa@}@dOwNbb(uS~e8TOX!hS2<^=7^6_b zjYmzKR!jzuKJ@!C(o0o^R$$b0;+`tHdfG!%B2N`Mp(sF&J z&GP3Js3y`^R7CQ~OtP+pw7+X|e%(7K3R3e}t$rhqyBdl&y6GM!+*BuIos?f_>iS#M z38lv>;!Han!h}>6&|=c1Jya4p?^R*G$7v*A!;*jV3p<%ohL;nzYBBtU{k$b7QVz(* zo~P-|(ZLjW1}wF-t|UJxRs^cXL2wpCCqhRF?raicx==Gl!!S6Nwo@HnDsPJk&G|bO zo$2sViBLK$@~$4iO1{9Xfl1YGuO4c~;f)X^9*Xm~j5;A$9=s{z0?wF?D1x6dWg%5V zEVD12O(w~jA{9_rfp08}@z2g&rAkthDd?)9i#0+~gkn!kV#f2u1lWuen0kDZ)7?dw zJ0p*?eMX1j!qwkGrXUTpW`^Mj$7MHER$#crH1sJVN-9FtVie0F1z>{LD&5E(xbv4N zSWIm-R$J2sA~sM1ovE_+6@^pMVRXWLHfo_!BoUp!0DwS$zsg(P3_o=HcnRJlwv-Tq zwuBi$T9l=$6Y``@U2Qa}8;mI$F*&D^$Uvf17Xd)=;Aw=ZCI%^^lD$K2&6WSus;s@0 zv+7qj{A%UEsHHn+#5{!AwTUBd%cBrYwLTo_^BHa6SNCOUAHxIiu36jbTPg}j)J_IY z&YEwHb1f*GZ`{{SZ(Dp~E~1+B!2ZNPT(=}cQbOC+nq7#*@bpjSAs_o|2Hv1;fe zMNs0tf@g$A!sBy|wqkvQmUImmm_1x+S472NLW}<#2AgcDMJogYB*<`%3M&R1cC~DLP?_y zCAJV2`?bG*D?=I*0zg*{yeUCK@e)WPeSE!YZ68hP@vDkdkt#u1XQ!M^7z{OSk`))R z?guV%X4eE038#Zc@out6?28(brbP>5mKa z-A|OV&K;XEEK8>OhZc8gs3<0@n-Q(9@|kL*iV5x_5iCS@zq@hUm`%S|%2oTXr9g!l z5zsbQMfVdaO}13aAVEq;zMu0JbjrDt)c#n^SyQfcIe#K#thHZ*;(3O%5K6pH3#`QJ z>1p7jQCeJyYANECi(2ZTfza|T-Ss%9-&sf@Q5}0v%7#yr0g1p0E*`(~CV%T?ZcojW zzNCQ3S$b$HlO*OJDu*wnO(cOtka;^rMQ|qJUgw3Yvwe5rXjB;|vw!0U3C^Ici5cQQ*RIyanNhI$rGeb2*LOC{GNI(NX zbkm3}9wERrcvHunwENA#rrO*-qIs%k%9&u|Sm#pfu$({SvqmjQu;G|=Qy{7^r9C*j zaQQ0E8A-QxwZWyz@Y6U6B*(IyWurTJiX1R*nqqk?1lI$!mt?$C+K2%gy_j|pR`AAM1V!O+AIl`YNV-x1@PZafdjvBP|*fs}& z)FE1_8fPCcT4iS|z1whh_kDyO9Jul{fjfQFjc7I-cqw%Hsfn{ww!fWFi(ip&AN8YDdNf%Rn=&T(&~IG8WXNRhD%Rg zvC4ei8a5tPrH8^qI%o~MaVp*bU}gnTEhcH}%EM|S#W37YHVm_jRZehhPY+s!dP!N6 z<-)8|tY*gG762P~>!TVJq$V*2oB$vbEaKxaQ`De=WLuSD z5>nG8$GiX_c06TkSVC}XBv0t7HqDY2*hQn$`E0>dc9V%OnR!N7AUKCmoE%R zq(=eW?w!}aapTM17W77}M+&vNNL~QagE(eY%DAMMg>EH11|9t3n-HMKu>Sy@mTI$I zLhu+QF$n;)Hi;-)Yl`cliZ0Gu|8O7)W;qa6+FqnWCrlCLXfO2{ek zrjDM_tqY5|d)dFkejV=OgE_sbrXj>}>!7CjlIBl#f8c8PR;sv%Q_?&xl3Ayu#A=LU zPcs$W=P0oEPnNonG}LSC=S*GA8%Wd$fyQf>7Z-(Y08(`kvMH8K{{Vzyl!+BC6-a8N zbb;ih6Eixnxhh#yi(1wL$B&~lZRHI({xpTc>QF&QPB49V(nk!$;(D68>fPc7V;7Rb zNKqd5(|(FRlh0U!5T!VAKI%&9+HGJYNE)+tV21J2uJ381t;S8f2NOB&Ee@{JD^_rEp24kzef%-dddxf#uSha(>8A z<4)VKu+*(X#j_draHRq!3sZV*KAyV_siKfg1|VM1&E^fJ;k5+YOYuN(I4}f7c~+=S z2_SL9Z(P&NxO8bEn5h2%nJpT$l{l_EQsGEjMih4-lds2(sVYSW9j(iaD?4zlIcB7w2!%}4^qwj@m?)veQx|D=0|c(_ zPywM5Ugd3T@lSZTjiR15sBOTqs8Oz1-Ytn``tuee4ID94Y@U{eo}h_Jk+d7B_dzE2 z9vnc%WRj%>ka4BYAzG3Z6ZmnYwO(qA53j10TCpoeyH=W5yvW=B569qd$}RLaGOj#n zhMrh*5+p?al%A~3IF2zmj}FCVjtQ%Dqwq zq4qZ6&ch8xy0fq&R&nI84;3=d@ESbu-A{XlmQLU$+ol+pEM{IzaU`>uZe)LvMhs zQ;z}FSkn-x#9^r~T~^aZsvf5fjxs>hx)2q1AES@6b>E_3_;%9qZKQ+}FcJ)WDLakQ zuglO@D>WoF2jyXt7_TNk=RiX1-~DYkvHZQ|#+|!yJ36VW21;B}Qeh-;8+z#ni{rJ_ zbx_d0Y>tiyfQCvi?C{xp1uLfxZ0t))8l6tfk@KZUXu8so-4#Z~PBe|jx`~6)(PEET zic!f-h@{OTznEl<+$3NE32xpW9wg4UpDwVsTaKwPoOTjP=*J9bMU(D@?b^`OXjH3g zzyNgNjuaN2^hyS`-_X6yS}A5Ir_f`_$|g($7RN%t8#Un{cmGIN%f* zwpz@z)p(rKJeKFl6kfP8Jp#r&rHtc@g~?SbO+=skYw7pC&tNWu?YNyB=*)NI3?r@%A3 z)R}x&R#neKlrtn$RQQ;NT2bUREmW=L%T7^&k}{=;8-eaPU@|Y*cZgh&1|nk(vNnoL zGXxdsP>JW+OS;Tw5XLg>`DY!^Rk++(T#=aH3#~9gH@&t>Ar)L~sUKgG!sqyT7nO=_p!lGA>DSV~9Ig+S^DYn<~ z9k0dtj!$~+p2Wu!IQywBmE%cwuc9c>W}MN}N@$x2%@t;nxga>6D_X%8j0cb`u{+VL zpD=~G+-c%?0A5#ruTn^mFavK_r5dcZqzS@!^j5f@ubU{r*BtWsTN0Y92CkYqoGNPN zyuMS5>cF3<{#?m@!%N34XuBG~#8h9Nq$TsD%6jOuWb9T9J2qga=jFqs4)M_9R4gHM zl-k!~MTghSknC#dhI6zR6jLHF!y1YEL$PsLQ=QsExR7z9`%6ujD``bbW3*V^MkcYx z9GIgApMPy6RyH;k*PRBpxN$^;tS_Qt z8mr6k)z@RuW-5vZqM)9Ff+&_*Ko+xHUDOS3I1TM5H*(J=TF_-j3G>@Reag#|b(*0I z5R~|_o*O7G!)cviiY?eVB?N|82xiNK61>i+-? z%rBximVL!w>ID97Yr`=0#xi|0u{%>?veQh_nN5$p&H(sZfPT@lJCs`uG8@=W6GKhO zvt`FgKYUfT=RDDe^qVqb7_AjOOqhOp{$C|L^f4LejH0N!m5uJG&bl2voP?71dLZo| zg-SuSC0$=SOm$nTGG@%Di{iL&p`xYA^)z^%F(Ic3H1u^AN7)n9-L>z^xYB}rt4T^`D}_EYnrr%j z)NHo}G&lwYj^XQ5ht#`)x@1fHrHffx*g;e5;>KUngvQbErrl%}5GdaCvlh%0JvGV^ zr4=lY{Gg`Au-a&qY40L!Wf4e9k#Iu+r?3tcKXDGI@HAl6xF8L*1NG|}s_Cu?26xLC zY?%)Xz@}%-ErTM68XCd!mod<{{W`GsAw}z7__wb zq!^`k1oYIuC5SnvTtW#US>m*oC1tbtfC=W@Afl?#Nmkli=4|tsC^KFSj#bjS;5da! zLj?{IS1;&{ku8>JW{{0+ZEXqE9v1*qgTj=l02&H&jXh>p$Lms?5~YfdF=7U60y)^U zzIzWbsrJllO|C%Q%BY1;hhVMg0PU)t{7&;8CsFDbp_yw8BNBYKK|`46@Y0DA#algN z%+aGIn#4k-M~4TStb{7T!cNgaec*J5&??So@uceBR>dH$!CIED8N+GwHW7|tc$BcL ztqmfq@y6e6XM$L-wS|GP?Qy_!E)*;gj{`^D(pHF~>C>K+;8?#>I-P{oVi~6|#h<9D zb42msH5F8my=3kpo<((8Bi|s^_JBSdQGJ-WmQYebRFRKIrB>R}A!i&YQ^~kiCDUww zsr@?1_?3PukK{^U=h=S&&UpLR{O%VtB6AHq)5jCVv(06(EDgKyDz02`A!I;T5l?pr z6O8%Lg`V+TQx@oEV5P?J#=tRJ3V89HBBKYB&2|e3kq9^3SQaRC{EsPa_)FOAn60c0 z6hv~jTX5c+_1cwjJkbtc$Q@|Fu>5MG41Ry*^U&jzbW4ZNB`a5KBhFaH>y1vACx=du z?XW3HDs09W$l*v4`!^u}02VROXkX~=b;~&2HfQPPCyGm$>GE&QMU}C-&?Ka*!tpx4 znn-1S{$O>uat793*4LE1Sq4T0Tq(wqOodV4%Gugn>(abiD#?V^)Z!RC-^#I!I-YpS z+*@f^9hsNm!1J%EtAULgvXD{bMZ+}oyA?ej&Lbycm05!whYiFqS>wlJjvP{u(mPet z#1Tc=lTfew{rJAqFblX6B9gsn;KbuaFQa~=rHcx!!p?NTv$cOv@CU&#{H27-l{K{( zy3;IzI=VQ+O$}6X!05wnpo8JVTlF~6)}$1GaVMkBi*ob@_-3ph)3;T+g@ykBQ{?L2 zU#!XX)U}kjc^HKR)NqPP?F?+GC#F{Fd5A}JN zeM;sG`uSf5IaaqO)>A=((M-;`&0@lK8Dxm*7=49J`x{@1FRaCZKv#$;A51t>3rCZ3 zNZ3Uo^A}cXx?K)?#j}-86Ph!gUC&B!m&UOccEB)awNn~+qCVq=ZDLQg?7s~ecw7*X zASn_&`stStP);)jn|*xxp1}H3l&bLVnqassM4q59=y8XF&#u7IXy$eQAHB0{@|3E0Pk;M@!^vfL3qt4TLcXtQ;g72G(+a6 zf-zGGtXat0zk6R%p%&AI5|s(ljUi6Hl79+FRYymL;ni5L%t0h@mK5}m$VroE-C=Xw z+fFj$2nh-`?)ZQ}+evJ95T%wHX%?1Y9b{=NboD@aipI^iPT(GC_-qIm8fmwSgz@R7 zx+?52mrPoiy?soI$xkP`7$7Iz+AX02m$zjrCQViM)R6;cI8$8JEl*h_^h@%qIPznX z6&r&d_XqfnHsgs;8hM7v0Xu6oZAC&+O&wH<)e>42sXEz}cM?Dy_2$EpD^3LC9#kF8 ziXWD#!^iq$lRB@=DwROKC^QH2<4i%AtCSH1W&~CoQx?N;`g}4f#8qUKs|+y>c3;Qi z#x5_~VIC|EZ7sO97#JQjhQ+g<35jML5}KZ#S1mMg6%iKqi>Lz2>=)ukbJiRnCmyO& zGRGMOO5;O5e~Ptb!^8}0Xs1=9G}53BEH`_m-T-#t+DVBRQY8e8IM+Nc38bRKs_H2w z62}BGRAMyMM24`vwyvxQ@V5+XnKRKvZ!DcDoC&1nOU!i`)HR0{h`_WvQiXuAWV!oA zhkFhert?||SK5<>J9TB<5K?A#)auBhlA{sD{GXMMNPkD>6;%O6w-Iem(Zo);f@BOS zQX~YV@WP%lT^1LO&=@ef#s0OzUNKDNVwz~xi5^LpYQ4S&;PGvXDRmJ(Do1$fZDgzM z;rjGbl1G@Z`qiVNda0@^WGz(=J4xicfQw0KR_Ud`#m!tew%idiYt57`2LS+5`=z}= zk1OYICop9RYvRV`r^6~@fm!5+MR1VJh*r)$tKvAd%i@rwtug?L^A<-^6qRFEy&fHk zVVN7Ky-Um4uL+MKV03GURp$EoX=(mewAD)mMI9Y0syLEXkfeYOwozliRi<%acXE^f zw$=%dc6A<#7Jn(dX}J)w@S_RnFFr||>+)tm!*G0iCg2dwLr2rB;g;)`YI^)h@hIym zTM?tLXkl22uG1F%M`Lqt2KT1dy42>RtR16{I07i|B)Qad6sUYAfbUv9vHIbgu}-}7 zO>k9ArB#`#rp0kdB9fM_qL!J3EHti+KSra=+)xu@ZF_KWk#5MgD0Kr;vn2%6a~RJbIHAhJv>*;!#($FjL|X5gP*p9`ur=a@h$MusZX~ zwpe^=1Ykmk!i(*1bhNEdM*~53OL8u1&G_9`T*xl?qEfEvd@wju1WZ9SGc<xY zR8=nJnN=lae=LwLJ)Fnw<z^ux66td{pYse2=?(+k96&TN|B{&J4W0#duw+sxE};7 zz&IMFpH%s$r8$o^8vc+jBaw3!BT$&UnQtDMS|ZAZIUjs>5+Q$dDb;x_?vo`cDp}I$ z5I9ezN1Nj6T|osJkwlvjz-Ps4%dn6z*4z|bFyo6Q z*1_u(6lvEQY$dk{os-TK{ekb{5-eJlo6vG!FOAdO_@#Y;my01HK`>k*f~CQ+ZT@%ok`<{6a;3>wSqn| z924W!6&Q3O>MW9~lPfoE<6XAW$ABF8e$z-ZLpwJIa!!~gDLshNU>-~qbk+X=B(tzk z?AxA32eOy(JWFWUO7O$Zw|uoFCk=wX|JAA4aeU2NLr~G2TQ6a)eaiqcLcZ}gXF%6H z>D|L_x{OMZK2!0oRsLti7!l-S*P^H3*iI!iJ5*u#wOrFmeA9}@QwWR@wo(>WR^5K! zjX1fxLfQ&KpvKcrHnAo!1EXbns=8?8RK~iGQBo-*Y0VZMzGZ9F9X4)`>#xM|Gq*t? z%bKGMHr3v2g#~Rre;+DQU^Eo?^#oJTh}ShdU{6l?l!(Zr-_08@a>K)m*jP~uGdKt} z;*=&*vpjiyhT8JpJ%_*(KA}GmQQjt=$N~Hdf9WHJ=4*|!2H7e5Dc1)T{Z0mjG zB<=L5c!praD5H9)YVh`{-t`mJR>KVSV_m0K1TVB(h4(8U#QQ1Y=BE59gG|)Wr1)KI zc%CaDS?yI#i&jXx(iQtl5*>B80F68GhgJ%7sa_y@cu{*ywBu*Ph%q~6v(wF!>uMr1 zLi5$ph@c>tsn*T*UCp)l+mCmxlOi=4<|(zHBUXZ7;Zo=*aXKWLR795vZO7$cr;4dv z`x9|uPsE;W45)3`M#vPV-L$ru7>%>E(tf&dDx`xE#-2I}%E)4pqDEk#Tq4B9)dK$j zt8OK6qzMG=;Z2*D60i;c@!21*ok#vzfIU44%W_uMPYp+#HAB2_aMwOsq{NQ;_WF3k z7EPf6P0#D=TkqDh0ecS~k7YZyZL5}^N$c|E1wv7uDz2JiGD;fv+^I`60H1G%7F_0t z2?_(yZLP55=s^qP$Avf0=Nz`5%p{)(N*AaWhB{=KSSdDTCZcJre+{4>CthllqyjNK zDz>hrb0%X6oIVsSbgwdDbvgGT3`t_AiSZgp=x8QtStFK1Ao9el(1uZe65B@uoA;ZR zGM9)XDM=&SQg3874PNJrHgN#zJs5DSK=lJ(jAeX@)ZVD$`F{q*sq*ATOtDc%gkjay z*p*dC@+hWIk*l4v2H$NgG|-XGa_3077Ld}(ad-jLHv6lhF5EQPQPEYQ>5nCPs>%MH zbDyXlobOiH^l)+Q8pJv9|1AtH_zmguUbS&e}<(~7O!z02*I zKt?!VHu$iTXqQW?G82KIE3Vx+^=sBztkXl7Jt|l9uCJuDiJUs9m}V)C8Y%SEKtK(f_#45ApR&3qqYF*vG!VrM zDSe~f0_6RxHTWOSvfG!Q(zzsJPej(;X6X@(>Z>*MD<7(CBd0hYO!3-GBcZrfPpPcO z@OQ_m#YQ`b)tabGuksMWsUVD_1Ljn#5W?3{q?wfFX#BcTymY5e@1wh0XJf=MtAkA2 zj^MPIrWaq-h%t(+MAO#d6xeih{%Hcr%wt(*krP{+7B=ih6JK4iq6QK1qdQAC6l5gD z1G!5uXPhe~;n+?MUqyjpcy&OgOgj~#s;+XzQd~N#%yzmE%Xb@ZVg;?l>}E_T0wl*( zEPlyn4VB7@mk`Hsr^Io8lE8)oB(o+Tm*M%BjrK_!ypoiUY;uVUGK=bN*8wKY^Q)Kg+{8717X5vxZEl>*(^AFGyT{D7kr z^OlDcgF#Iky-fanH7;7~rXEQRQ2?)5jB!ArO4H37CjS7_lewJ%Ay01CUX0ZweYBJL zdxlOFDrJ1ZpYr}!Pv@EPI-J83%~0DaY9*yvCf&9bGDM8=ARC6@G}DRQ&D&N07^?pO zEp{e-^sg9(X2s*m_=R47^RWyvWPEq|J3ig9#1uniTdZz%CzNwFX$0V6J@s^1E;i6R z)65==s5wVHXX@;;ineB@&A8SPir{$VN=s8k{JcRG@bV3w2~s7V6W>-GD-XEaZMZ(t zF!_&4S}j{X0!-75U2YeHVVLevGb40qnXe7Tk>j94C2n7QHW>{()OA$pBBz%awUJJQckjiQ8B!9^Kg2fE z3A2hG__`nblDlA*C`TqW^*60a4enyM)zhACu($4@Z*F5R1~1?_KWTItlxEu>=_*S4%VYIbWxMV2|7g&*`iD2_27EMTRAE%uyMp)l@3QB%+ zT&Mv`Y$$3xHpC7T?LmQIFvkr@f}%N-aV!bjyZ23$ToJ|W+9YgGzMJldgs8|L(#Oi2 z>9T%Nrjju`R7*8lnP-WWqA2#&l}-G6ar9-3B`OFQ9=FN2{Z#G_?5rRt#n$mbspa z2Sq3Y2NqBO*TaPO`H^XQbtsOYKMGl8mn5PSp*%cO?b}V+pA&pC8Zkkgv0QFR)8=s; zMw|3ehVw`Zv4ewq0u8j{E322glCTtzJ+#}_=tIFelA=MtU=Lo|_tG+a_lnhs!%|gC z=iWMsgFLMyh@G#u%hY)3!t`eDF9ft;`OR+T(6!=ZE^|-BV-Id zG?;7?CsfAr$&9`mJY-EK7YtKNxq&wz$cg~?6T?I^VzMDv^kWpH8>Y~$NRW0D0Q*G) z{-CgYD+tI~Tj4cI>xyDgoy^dQ%Etq-{Th{l|6{!@of7bsUuPqw+dq7>4z2* z_LS_b_5DZ0GOcx0T-d#3b4|VGf}%spYmwrzr85 zb7Yupd^n^Md28m=W0-YQLyEvomt$~Q!5R+S@)%nZ2qUC$r-(zIB#&M+AU1KYr>4W{ zu`EKV>Znx8*q%7wqN&?u4Io{2i;LgCe-0A4XcVHP0gn%jD``v80op3LI+>2XEt~PW zp(4oBN_WW)iq<|K4oewDzjnfBqJX#W>RFsk29wkzut^MuexBeXvHEx*Zc?B{6+8iu zFco9okF(ZS%$+*Sy+6zp;)@o`K29@9ibPk=si*S-`Czy5_2tjw)MN}0u^s>@Q>TFu>uHgGfJjk58c$shj zutDiOY7We@co>kbt~4oOAIq^Ca~Z~HaN0Q8WuuJ7tt0QT2GPc$PQKr#f@EBzpTdfN zmZm@(DJzS~MS;C_JeU=BF0ng&#w=2r6)O6H9D)+$eSEUsOivm{Z1kO@ZjDFn}_MJ~6=1Fi%mK$VaH$K%yQ z9&O9L5y$#tjA9Gw8vv>51rL*$6D(v1*_M`t>EclpOtnzJeDrB-+ZmHfSoh`x_Oc0}EZ3y!*yxY-*&8YkUbmY1j4mIanEDXFo{Gce)N z)L;7BqT+Yfgo{uHW_|IH2 z4l9&;tqw=56>-P><1?=7OO*WQvuY{P9X-Hevy zx6_~xI@yY&1k7}B&|uY>(x0fAaw@Nzo|6s1YH4cd8RuCU5?YB|JZrR+J;)aD*B{^B zD>DaPlT#~sYr$BgS1jd`)~gA{@T}1@W0_kNq@Iepo}z|28RTNWp{s;KbP)|CA$9M! zlxKMDL7jMwm8Y_&6qpGRX(f|z3e1G~tt|Pbe3^vd^-YA+WrU)S5D`lh4TAC`F+RQpyYA-f7 zUn-(MBYMZHglO5l;Mczko!?750!f~zpVpTQk?o}-D3E%T#QKTST)~jKtCVxaO>RKP z=S;CxS(uMbUA!Z8@>Yp~k<3W8c{XycwRq{AeHd5X*l*Ht=jitl@^b!pT= z=rn(6k~~D|6CUaja}IFy`H$6NT`$kwLVUFHQp-nMfMyIe)}FqKux&<;rhSpUy{g2I z8*vx*mfS*uiSnfw=BD`3`Y=zEOk??I{dtjLQJRZ+>xQ+G-WQd?t)-^XjA#*EwDkS{+>;m zar`?l;g~;EaxQCpHWoQ9rb?%0c%VY&1xOC)%x*T{!^e(2m9^p26alT^?e}i4#)0$XSGPrOI6DJ8?hcj-?W*6VV2g zH*%dgrkgs0>9+>YbT|j|7&87dj^L8XU6d>FKPjqdqL7!Gnx1k=-4k4>ziwHk97(}s z16pxNQe>nOD1u|SFJ5{-)r??ai%prZOl}HVTw@BBB}yMTC23)o^q0EIit7HxxfbAs z+`YFg#H$g!c?SfZuMWlOY3qrp-oGx<&l^Uv zZW~Dj8EtXEd-e_~wfQO+2`s zR>{~b6!?lIRXnv761`k#cPdGD53rxLsNjLCL4L}aae7V|lQMj$k@X98g@lp~7R=4~ zlzPbZSBd5*x^vO^uuPkP88NKiUx7&lMlVl?(jlH`7N)k*GM$971T02_UK$P4wX`}4 zR0l))<%wXPsRvTC&KWLX%kxx*N z6S9+)n8p=_ z&m3l|qYo?@6?8H$@r+7G1SkSu%QvA`AV5lCQURb?5FF$DQ%p%6aw)TtBkEGofsN3 zG`(S;Gho!X-4dGE>yV=@ferj}SOhHYbTy z*43TUBLJeF`Va!~3-7q*D#X&T(ox_TR6GwGd=DO{s*agbsP0IsBvo}n)?Gi=;;S;I zmJ|ghxDBnp5~$2!?Tlh{a93h7ijzjdUzCWJoo+O6J%^HRQ1YE>XcK(im4K7Bn=>5g ziOV^riA6@oj#+7!YOo{st?WJcVRivTaTJ!_VJaH19dwh)Z6y{Tih0PA!iG4>Pu-U0 z%n!hK93W*&(n%XBUD-yl2aeiwn--FlaojA)zX`*s!p>y?+F2M_gWyH@nb&lsL#kF|`9u-V6!H`AB#UFI8lD^EiVHw0*+qGgN))9H zym~2bOGP00N}LZcODq*N4NE=&n2z|SSi>)uxatpu{CK3c3W*Bi5lw1H;lgXxIHoU! zlk+wKPYpa1S0NAN;xVGH=zBN2U|d}5$kkiEn`^W! zyjzGfwxnB3-6~;8a%lPbE%WnkPQh!lHYO5W+d#N(b-=4CDCk9HTt+Bx=AvqJLmDh< z<(fdml@>R*0om=KX~pc@9Yhg~^MxJd7lp)#5F)B~)b1VBo~nAW&skQ3Fjhf|&zWj+ z#uYNGH1kzc!B%2csPab1rrJK&iPw@P6X!WA4ERREG4-O= zm9xa@ja6eY+ihN5VPdrl35k5=C{$0Ta_GMsHgTq|G%V%Uf(4Ggbi?a4^ z8c-~`;Tu7#tNcGYebri?lvd{aAFLSmRPr$?b0$!ss>Nt<%8U;a!#2Poc;uj{mPM`_ zI>gI$(XXc=ZRd8$%jE=#;H)Rq9;%Y}b$4CKQt;^gXz2Avs=a8*`K0wGpQx2tqdaB$ z1;(=%OfsOs>rzUnV{rfaTdKW7#j<`cmg}-MErV3m;+O?yFvV-(7=)&v#~?z1fOGEc3J-mwlkP~u zA4hiS8kGbnbF!hm;=6r?mk>NNich|fuw>q&P*YQkH#pOoC6Qa_YRgEE<+C7+D*{#6 z01mtoFDSc72@AungKE6CS_xBTJ_fp*A3;`d%>Mw9rJ;!=`Fi}%Oon}|+U`_97=vS| zKMpdhlF07fYDXBS?QPPr-ZbbZpy5;LQzO>m2CFP%H5h&$5-dqs95od5?g82t^x%@f z_}`UfR?AQgsHfXkxK+v%;{f?ki-*$YN;-T-j}fAe5Tlw;B+V!SN`)Gb%mS%4u)hRO z_l<~A)F_h0%Xby2J{2Av6p+X8tS&lgiixs%MTfwv0{n~absMgBlmqM6f#ml|G2O^B z=Ry^_vVr2nP;rUjeL2gxE7#(Ui9of8(bH20Z#Gt6vesY-BH9tew*uX^lu0AbpR^Ya zJT?)=fZ;Y(#OmU#jqBowF_w-J<%<;t`+yyoo7<84oJ*=Aa2#k>Wgr3TPPjczUKxs( zs=l70qMn9iu8KN{*oE@V?6wx+_EfI*(W_vhOw~$DDhVDVU;oyv7pcqA%T%WYt*ZH0 zScqw-t1DBvi7daR0e1b|>VxS_6Nx<^I_1MjPO?VOF`jnQ6g3z$dn#kgRPQF%jas?q ztJ)g>0Hbd-kfe^}c=E_8PSG@p7kbGFG7iYYXO&3A#V#XR4k-p$rh-~H{E8ZQVwzav z0BL28rT~7i_wgHTC^@MK5^48X1rnpcnB%jgV_9dQ#VZ3gJ6kM91xKf&SfHh+jysh; zTEHdmYa8qBH{z7CpbCHX2Ej3Mj;qASkq#WAo*6o453o%eP)oM zL2ml?<0jdcUc*pVRsuTdN)z46&}5CXiSi#h$k}=bB9bh(hX^qVt>;zMRjpM-s7(qWKt4-oa0p$##)+xnW~hSsi(x8h~v~TeTlxp z`dC|pr*+fbslYr_iBTu?s=iX|hJ4;t808i0kXFsPioq6m$Oor+KX-uGpvfc&Sp|$VL4j zxc>lH4myjZBnePGGx}DW4mOekNST5-c~sBqo?lrX4Vw?c@k)sX>^3N=mIgzs9r7fE zHOLmX6DI8);Yt8fz0uL<(Oy-qrhr_?>EnjqO3w_GWKz`6gC!eO%j8aQg_Yw~VgsGt zT$ahk~G|=y-rdD8*xC?&f`f?oY zkR(_HputwonxFoitdP!R^06Q!W7cTe^(U*{8qOV9%JkV%zX|?TnHHXwEGsg;H;2k< zqh+2zDOQ$Pk~oV3zGI;`BHj6jjM**6%XpHINQ`~|04mIFU?iTAh@&UbT)UX^ZV8TJ z6SY)$P8BvMUxU=(A@K@XAqwf_JslEASVYxcaN0FHJMBEx+loU#EXsh{PO#umz(mj$ zne(q)rN!~AJ1u6|I#HTBqmBm2*k4ny$;5*Trxs?AyGV{@x4RzT$bH?qaVA#dZlNut zf?$$3*550TCuJ+@`h0p((p>)l)DEche=7BR)D*aWBb+L_<%`tQ)K_BI{ISa%H1Sio z6Dg#m5r|`rHpS)KkWKhYyu}S#MX)JW0wWVUPqwZtQh-SlRmJ#u=4kM2Kdl+-J>_bQ z!_~EwGyV{mom`Pu*{sZ?I5I;o#J*Ojs9vs=>C;W$nKfzeG}{aguB^=nFL0+66@v1?z%@RxS? zomw2!f>f?NE68=nFi_K0Q_=5GsnyCXe-^~@9$?G(ju}~o&`Oy5(oxS*CLoVewa~=m z1%f4wQN5+d$AfH&;isAfA|`hA9%7A1RQ~`+4{aZe0|sy8`14^E(nPgXroOW=<5gf# z$17;45+iq0d!4#*>zhHVVD?k(Jfw*xg5Id*zNQ?RjAopZmtw@TZCxQatv*XMu&nhl ztVTIl%%qDG?^|0QBW4h!js)=)XURD9a}X?*pba!unUK-nS7BwbRJJvo$%aVOm+$INTr5M z125NAX52X{YRqzB>XS7*>UJo3BYS|hQhpp#q@<=a?V4l7wrO#cvfgE$EJCXe%=MHa zs(Is}ji9EK{IYI#v6U={P4^JHjfIaBID~}aI8&^o`)d|krRoM1RhX)9+{Z^%fmKT# zbI%-f^2W191HB}xWN=+=wbsOVo-*Q48vu%VCRZI4A@r=}+!qz<6$4bZ7n<=}m@)Ws zUL0IW2%44}k1jao^A(~-ciVAsxcKYDWwa*QDNe+bwlSpzYf{0c?zUvCJ}jizq;XeP zRALj;ViMCJdcWo~twK6TVU@NWMYfT$#DX=yG{TBqOyD{yg%hx2LhnpEmyxpM_%&K( z3K;(Ynn0OKzIq8FDMM330zy?O&%6jXS4FTk2gio*vXuo1fN>O?{c@Z-t%+h4__jxq z97?AQj}NMw46l-oh8U@z52{V}j7HMTCTB?ER}Elv+rkj&OS@wVSfRiUGzR+8&6o~b z^fA>;CXW%50h=DCrot(zRu48>m#(Txc7y_Vuqwn!rh@z)c4K4tjpw#WPNbz;h>6*W zsQ0ydrLnuWB`Sp!>DoBqg=`J09U@}d9WE?+(bldK zRD3vBHr=;NP@rZA!lYK`80g-TJUa!a!>e|+JT**e=8%T&`xYI9uZ6hhdZM5?_*I`F zNje7KEwmlu*|pN3%ZuXj;!}v#BBsLV;sl%AtGufn$@pJ|SC?sHU@D!EIMRE2aQRlC zH79Jw%E5zRDv*O~n`?bIN15i~Af&cWJyfNOa^MbX0CfI_ znO`q4d z(S<0yyjK##u+)*=zFY;RrlLn^{B9md(Z-k3=YnThb3#Tl?xM7}-z@4Nz>$O~0%S-r zQDw^&)NM3%^};HN2(>QeE^aTOV1ViDJgas};;eX5MY}5jNCa`hJ=oOTOAesJP*dVj zJ!F-Uq><7C^6aZ%0|x9!@c{Cd_Kmp>kV*9z(#ul2ao!IPP>B3$1XQ)LUJN!58m@wsl}S=aott{J@blwcM^7|Thp4Qk#NmqQ5!w8? z0XMmDEqn1hj5ZFnnKfozbsuJ>LhOh@g94Sjd9$>~HTA_tU9JkpOWQYSRAA3MR40O|r8bjoQ69y^7 zHrArvtw=jC*%dAvB43!EEN-drii&HNTB?80+550%7W#Q)4I5PywX0Uvty*y&`i^q; zRHv;HRAVhpZdoc+v~fDM)PM=`ww&5gZk2@Cdb~#oqr+DLr>)&b zUjAH?yv}xjZ}vxP{B-5+F3CDVu%#CFO{O)Z!P$q#pkf(wBI3r9ykT^YUsC3l7Q^Oh zR&t*0q$ozL2iL_Zy+Bt0sPd;6yDAa}p*YSlMNiaUMRE#ZO;Lqz!*SZV1zA|CB&a}u zSgE-n8`#`(7wv;5VfGU2oqaM*OTCQxUKgBl*rl3|iz|^W91+w@AC{9m zl;_N4PWu5DHrI$Uy#SX$Xur}!ZUXG6`NdlMf6CGWaFA&1bYmX;FtdI&ma4kH>5jK1 zoF0M3BZ_qUHe%7!RM&yEWqL~UJ;R@Pg=GtW9uHf~Emo6^PfkBN4YR%K3jSf|THx)o_7rKXx_fR;zmRZiQJ--niYYeg7s z5&Qh8I5(wBGDMmowcQTQ6pqa;K!zw;kQouwl}0L91~y^z9tbQqS#QKBK{b1=YH1-hRHEmO={svG?F^$cbiiY#PWtqnC1k;y~qW$kZQoc3jFPsWa=hp$XW4eYjG(l7Z}M=$R3ti*&q+0&#&RKMEM`Bj{6#ZOAH&D`G5bta!#-%aYemi9?i^EIfIF zmj;&^p{=0BuyLfmWlG6)kwY;H_JKpDoX2EamsS8ulnxUHL8*K5S`!3-M_mY*wtLI{ zUg^C)Zt31>q;{fb{#i0sEp;?iZ553?fVbIe+uq_vlW}hLmlCKOaHrO`-D*-qnk{i> zZnW3YZaK)=I;7gnk7 z4}gLW*x^RazI1?bnk3(dEq*?IIp)(IJ4a81$&5i;n((|mm1CMXr4j~keeW<@=Gq?P zgDt|gwEk#?X*!|-=%Ov^xO4KW=;KCD(JpLGvU+#v#$Cnoq?GtI6+Jc?f||;jY1b2l zY=U$;vJ&ec0Qd`VvwLisZv9~=#g4%q`d5(OEq2K#0TJw@DbyZ^b+U^(U^R4$hER0k zs*Ny8IvjGW)~q!tDR2@QNmXY-YxeNqcw4j)<3TH(qvmMO^Co`i1XPg?WezcysPP(w zr;0dDJzB%^GM1=->}#ExLc{k3h&{Mi{fPjOjCuFchAz>7)rBZ2a&}Rx#ntg?u{^Dc z;Mc<@qxrc0UFqs%W`?1C(&9N1%Hwsk9y;=^wx;xeKpSLIg|A-IBAw}0yCmkknOPQV ziv>&~qG;0)lM#1nntG_7G>n^@jpcP3_#5zFmv0M=5=ozx5#43M&45v8^_SD$pL&wX z*=IX-4?L$%dV|xtxpKBMS5s88VYT#WQ1k5|579}r?u<0{gUOq8`%+UtHW8>R+v`9r z;M}0_Bp8ZI^wXo!^zr`ymGl#>I7{^RF;LUh)?qoH3c~TWsH~i%%OQ8z&Y&gLbO!$b zdG+r6*}kBsM~h_aqT7AieS{b^YV!X8r!JsU%|n>7j%(@GZjThIl3K`V^1{%xGP2#~ zSEQ~|j2pUwIdQzUXKIK%F*Re9Jdp)7)jy_xQ3f-E;W#Hvw$N0X=qPJ&y7i`6T1T~9 zLJ78mG$mG5Al;VWi}BUw_F9PxGH}~lMU$zI4I;6wfclBZ_{+nSIsw!Ce+kAasWDpo zM-Vm4)b*6e>i%3}XxU*VLQu0Y7e5X@-e+nPDp{JMtrk`G)F^UCs@hCej*|%V*^^TU zs&=fYudKzfDo7%kM(4b1B+RTvfdc)`;K!gGMmZB0joR1qaYTV*_z1jQ_@p-S1i-^Ytt&h6Vk(!GadQ!eG}D>5*O z3i@H_7uCl{aT>hKkL9=OU3bnx)%kX(IU2D zA5|4@K*Hk@*J4$52+17nQ|ElgljfU{z&JW;G~r;*noYpgqE-|!jg;$oaZ^Mj2sNj= zW{eK2ul-HSxSk4HF z*$2`GM*0ua8St8nC#@9ud#KpP8$+I`s`CyNLs^H_;PneDJvI*}D^W@fbS}T8A7}xh zw<3M)--O))P}$I#7;qzG{JLsF-M!Z>kPK=xP`?s9wdp#GI?!gHPgQVBiDu{<8P7O| z6^YQk6l>Voio+X&1gzICgaAA^d%9c4a&6`9v4qiX^KSDfT&5?CRc2rqj#);0>28Xj2TdX0{yE1I4NU3STaQXs5ItYjU*_xUm9FW|bZud#KKF%s*cut5S zX$#piWF0$dHgU_jCS|2;HyMIC5Gze2&=rjUI`vhd>)TYe}_nG9qG%hGOZh zE+vX%l@#WlN_0YFsetJw+c9B%dGE@HCrOwUuKPg}2v@q2E$Q>;={)=*NhGRZCVFiM zu{Hn$t?$K0cVtRV(M&y=!iXbf3%DOnaXg`vsBp@|I#trUN@?l9)XKu&8;&n;D=f1! zh@?XBIKGf+DOZKp87n0cOo3gN%EfFzy^jJ{fyFpTRUg}y$^`8c4Ab>SEHfFX!sK@p z(KAN#)h_TITiARD3*K#9cG8$Qoq~j1J2FyZlO(kDlG<` zG*y^|EaEXdMV_7@7F3m%%oVgG6R!?S$x=0rJUS?}Yb(;B9d*Y9!=r~TK~YyMlT=SE zl|{~$0>A<c-w_n zO{2S99kguQptzni?UP^2!KfClh>CAEN@_}w-t>cUwVefk7UKM^)eCteXBscem?h|% zP|qDo)F;a$8 zD5GPA*3!3^P>#diRv?|oM~UH!uc!|Zo{T8Hi*$qGG(_R;VQ77!SgQcIyfW17&QJ>5tI2 zPqG~z5uS4@R&sAe`4XdBp%Q z2`3PGc6wB=mDne`2aHuZ^$Vs*zPPXoE`Vb7_@#ArXRX^!n(1R%BgFn?$%ZKBk{HZG z#f$P&y|`EM!d*K6>Dl%-^DDqP)wLyqU%^IF(09@3gv=_)HC zr%gQid07|AViWBU#en-)98+taIMu$kzz9T7SMS?RUS+K~`71JdxKUxsSYC9a#c?Qb zjH6t+yC};p8H&GwC_{aMI)gDB>hCWZ2!R`FaizP|~d8TpOUMN;&HnrUPKQPhiHTaHK9 z%v-&AsS65iLl6iG9R`M3Rm#zBK}iSQfoDeWpSPT2g(xXC4EhmRjFB9U*csoGO_8LH__L z%)NEZ64&OKb9H0lCM-9rnPFO=50)Rd66|{wWnDJ4h&+{HlkAC z&}WZd#+-1>>5t|-HPbL)Z%a=Eh2p5M8#D~6zW0DSfx{-Vr(Q`)k)#f)OZ>UE7Ic}N zzq*&S^!Yw`#S2n47Hh*(pqvC%br!cr4afqrYW_rA>v{ABB)|K74WgXQ~ zwFnOjH;(MBB`QoPZKFJyq=6w=9h6z=aU61IsHeiR>{PcUDeD`txws@B1qYGYbP_^F z6eEBz4^_S~kJykCi~C z#Ijx)7nImlMkQ4l4g9kTt9hbg4${tpX}KERLE~lSkaU4K_2FB#WF?(kdUjH@VmMlf zp05Fm45TGyrv6}pk)am2_a>XobL;_o@k$nvDpv}jE)?Jf!glg0hNm}FEd_YO;Esmj zpxc;aiU^}tJC)T+nLX?Hp8QU?WilXh0TB8ofstKuyAPyK}&=yRMqC`s%Dlph72SIXKlAe~7s@bd! z8(#?XffAe=Gs>Y9*qW+6|8Qcoo|9fFu4Fc%c=?wgxs*WJ*!hqH@T;?kW<2~U>V zOw))^PL!T7aKJ?{V_j2#*1wk(MSdX+3ltcgM1DkK*1GvCVpQ9`ufj2&_l_tb21gTw z(S^oasR=4v`Bcm!IaSuvy!eJLS6{S*cDFHiw|v3G zzSC))2>YY1vajj$E7NTI>+qi+H1u z2Uh)BW*)Hh1FLw}IVKG(xQ|FWPlmzf%SS^_H&a6hs#<{}qJgbMWc=2ky7pK{O0ihI>M!6)cmI>n#h>4NF zc}l1Z8{GoJ%-=0*Mv)V+A9VX@-7Bv3!tzEP z)a-vF=3$hp`khvowN_wEKM`nTubh=g>Ex@PNtq<^Bmr-a4lQ!lkk~>YB53I?2Gu)g z$Km}r%J~B;Lk2yI*H>m7Wnsg!7GSKvXS{HqCKVLI3$XnlR0WB>i8kS>DQ!V5fF#b| zH0norjA0aIGA=cip`on71x!`7m>M(H;q)stF65_Mu#riTx4wXEu|Cnnw|4;_v{A!l zd9>_-jU^K{;CSs~P~`k>Rm>v4`R-Sz!ezv1j5INYl2w)^SQJF7Ar6w0d+5g8Dod(J z3L-}z-$qGoK{${P$IgT)GOZjH8Lu;C%IT876Qzcg6KJWWqXuRsYFc)7K{FOA#9Hg2 z?aeUrngU7;6Ue6Aw@4ss^Ps;t&}L_i{{Wt63_K+?Z(Ow)T_aIYP?E!H%ONwMYwFUH zy7nAa;BdHM82QpQ>5L>(Moh{%sN$Hd24u%@irQLgrLD!VtT;7P>o`?NqiG>40s?_$ zvH(dW>wQ!sQjgmLKYdgbq)$y4Rmru|*GWn-Y0{?}@-XZ^qAFiDDA2a&vnWtB9`SFo zKAt3SkdgrD<5ocg&k9daWbD5s4Rv4hyh5d-R(i?kDBy{nc#ruxEWYU=Ti*Id5(KXS zfEXHmDibuIz%UFQ>+>EZmMG-LD=|zb`LzX2Y*RFbXlXzYouDAT&7#&f=C5stwjh5A zryE%+9duo?=UF<{nlgqKC+SuvM~`8cu)(IoYhZR0VJ#t9C%dW?Xsx&>?*U+Vl}>7L zCsx#*y)jBw+CVaB6o;%?TNcfkn<(NLj~&Q4?xz>TYoWuTyWSt>18?-s|}=xqQI0KbwwU7g!Jz>;W&L*&6wsdQI|2eCdIHCJi!r3>0wv} zqGIH@nWRxA^cT6)LBfm2m#b2Ml=M-G5EO!uKD1l?Z{^HWHhMVDxxSYeTn(}^VVQ<0 zt0H*?H9T0PODfn6S-bm5wm(j+1epdw9ci-=~L zsbY?w8pf)z%qCIgPg#d4RFQ?O%dip;f#!I#n1a{`Qo-T}MRV6OXa&!OF}6%pV~nLN z^rnGpWJuJE97>4ARQJEUNWaI1YPum{0f`uOQD1P4A#IsZ_xjT|J42YN*)ckd1`V~| z@?tcNAGziBDy%>a?fCGi^gXKt?c+6TlpxAV$S0}+^%RmBdo$EUR4T-gODhR1?d~&@ zP1ymk_JhO@WY=pS-7y;p+2s|su5D0&tr%>>Z1mHO4q5Ws$i*?yLh}Yhg%#g(+p}%# z3Gv~-^0HfnFU7_MZTHd@DQ*x()6>=|?+?jXMjaeAH3hLMi7Z1=hJClxh94X8MV3m# zaimXQ8nf>>QK`j{t^i;og(@D3qKa5jj{+E7I)*T{Q;qU&(X15O;@V%exUtqkL=BZ& zWi4wGk^mcedXKi5nuTL(YPl*Ts)8>uDk$fuNy3#k({;$OH?{a))v{8sq4w8{(w^)6 zby8;jIX5*vhxZ`}j44-v)hzNDmq6G|CbP~s2>$F7)Zv1Xn) zV3DaLD()llB50pzASpt8HTrndP6dG{8cTA8IF-ON1Y=q1sPM`6PaP;V4h$hc9$&|C zRaW}%$1ZI`fmBUBDk%hQrj(xu!i7AN=8dtWeq1n7(xI40G_Yc%g+3QNFID?CbPC1FiJY<41`GaHFL}DI_v}i!u4hjAMR*S^V#pEjazA2d&dHY zP!b1Ou7B1n&g>ed60pm5nvz*%e>d1R0zvu@9$m}oSd`(>Nt<~oY=n~_otVZwbq=C} ze0C(=wI+$l5!8eVx3%`}DYv!l&pB$RN>>m8=hCcP(t;$Cu9+1x4$C-Wgv9zqz)m2kD7Nh-a`UL$ut0V@#JNDm#GSmWejYu zR;13*cGMNXrgmu(WFGKPh)Wxv_=wYm4lXv*#;#OWWhOf5SBK=>W~B^>H-)ipHLgCQ zK?8-Z-JM33erd6Q+fT6NT8d(bu%k8hNw;tdeRVs3J~S?IBM@n~ZU`iUJn*OJFd$e% zBO|!bEiI%yye;hS##eD0X(@HhJ5Qg>&XnZe6JqQK60~Vof51UvO5`l5NW~a-}vpjW+8^L!KZ>KI%tQ-~^Ino;i@6 z%B>-5ZSepPyN0%X;@5$~i`=*gOptc@Q^YxE2a^wmEJclcx|R|a{Teavaml-MZc@Mn zW=|2(G#1RS9C=4h1B7ukmaCGmLb*GQ6g;d_G!6rX(7O6`fp@rs6w%FT@u+zWu`WkWxnwXisIm zjG+Jp>Jd2M6@r*%j1oVU{&1uqvqMoCU`Kmt!A+{O3ub^**c@PqqL&%VuAIi-9g++R zi7qu?h|erA)1_pv1Qk)#(z+y?-)iZ~v79dG=|U2+I!D5c{{WFQY>=g>p(mku^;AHx z$5~$uOf>m_4yla%{Y?xqGb#_X0q`gC;3u>d+%=V4cTA*6#y#{a+`Cp+e9w6Eii3uu zx6+K}8R_$+naT{Qi^H3t!g94{$~W1rp&EVTbgS6b)M+O7G9^Us!Y9&85(-T!|9DwrU?|4EWu9IBKP7; zeDUPz4Fc`9lyEwOJ|9zER*})AmS%p+A z@le%GE8(?tvtMFJpcC#|!skm{i!&CR4WZ2gN`~Ge%9yvja!8rS(uCa04rNT z)C>@5L+Tr;RefUUnTD2w6RXAO;*O^>ViVWWwGB)W`|!%JB#I(tQMG_uam3vgCDXfx$?*q{)oqv&Qr@ z(#9paPcWFWJBt8xTiDyUf?0muyPXSBAeg{SN@2$q+@Jx47R`y!3GJ1^LsM$_K>Uvk~EE72aL80V^s((=@GbkMDnb7m))aZl0D zm~f062Emc*V$C?SRaC7eAt0xX#G(AAgcLz@eXKPC{7n0dr2-O?b#M_ev_{%op7LW< z2&L{*%-ut!%GkzB%(&eZb{$m=m9bRPP&I7OwJe~pidLREVq*y`83PT&LBhMF+AYmt z1Y_h4D`m$LVMI+=FRFaSiPq(OlTTaKdGX$z{{UU_oEh@ijcG9&31>+5#aS}S#-&JN zVq%{Qlb{;z(+s8ON*U7Tgqhk8mq%R%GmED}Mx{!15r@u+yq#j3rxk|{z&gKQgjD2= zdFZnD&WWkvl8TuK2`CG^8$GN+wS~DMx09jsJ=h^>ljlLSXFc5G5O~qA=uhI2)I5Qn zaEe^u(_=YOrm!`AZWmCHv_fCeG_V2$acD+G0=U(}5YfZMgpLQU zg>9zX>ICfgQm;7vGF6!#t0`t5kfg>k?lDZT=L|O%pqe=fNC%ke@iVBFJ!}+^cEdsF zq_BISjNbPb*NDHhh%R?&Z^U+BaiuBU^=XwX&?Aw3aV{m8aZJ4p6d9Kf$FTLz88;Qe zJeg8TT6xwf(VZk>S}<-@i`!dr_j|_L846l-kYLXPwzS?-Ntuv;bp^Sv@jLW~)9$L} z{DWE6m4BbCs=%?VK3Zyt69kyI`8Q~lwx2WH+u(S)jJq*?gK*k3rZ}CWq*E`sZEoOY zWCKmpV*Z4ELuMS|ojRM7;LaJZ5Ur`8#VhftCZei<;Bm)Kww89-k(iOy*-5!VZ>JR4 z=eHI z1f5OwJ6(ul*p4NRVl>s&IihB&t9&YsnH0#{HIg+7fcpRgj<>rlwAe}x3F)sdI&iG1 zaA&H6v)5b?$bl-TV5&+{>?%-5{rB1@h3-<9PLTR`wV!_7Tv{Q!kgl#@_dC8h0B&R@y}E*-#O*(uz@09a7QY zayA)*JVF|pXq_q@<+bjt;@}&0zbIRMf&eo$(H4gkp2%WsSjeHAW zZ>QMWcx`Q(x{<{EsIvCaqPZI=MVeV^Y`iED7Lr&Ow#%d}B*Y%lIw=Nb z#3edJ8Xx4Wr^52~2$9Ns>`Qe-i?qrJX4<-qM{WzQiUV;tlSQp0uThDPc2`8bNLgX7 z5LD5lM+>TrvH}g`kaya96T&yh)ufzh%|}2Wb>T|vn~B#_;8?yEFo^1@aQO`KP*V-e zfB#!}z_^+g_9W+@FP_((HbRDK?PUFiP+P`V?C%6fT^Q07q`RZeNH%=GeBM)Sy$ zy9#n{0>{7Hx9;;Dv+TKS-@#Og9#AS`$Zx}z!11cf^xaL<99z}ieXGZ?{6_eUkE~IbTtSWsIh)h+-Ip4OTAIa8GDS^5lzCy?eLAimmOPwYznsue=Tk1w#Rb zBc7^m_UXHnB}F<)4+DwDiS}>lhf}g%cFt7)05^%()C@8_(U!#UEY+~n)XJtgu?osh zaxly95`2B$=Yj^7x9yuNNGUQl^mGa#d1yn+(n@v_*H$ahjPYHVJtNU&PN`uuxpJ!z z&al$;0=BCTs-;|29g=yWf_Rt|Wr%?(0?psKg$ZA~UKfMY#D!zr4<)ew0On~df|jD0Rb_P}-CK78 z!Gzs9Ryfu^mb=HbFy62nZ{#4jL7hjCkVweUWOFPuy^)8c@ z_gZ4i742&l)0rBJWg(;kF$2`}R6Cm8caAuufwl~7igv-VM9a`eicb|r4TIFoWQz>T zp8OCKt6#Yw_^*c!EH1+UD99ZHWd?3Xe4`OPdueq=jze1|4JY+xY(_eohOCOZh3+Y! zl}a7RTH%WLe%=U$?|K4KiIYCsD|XjRtho>(I2u_`L6z~^Xz8#fS+QAV3o2KGvXW#Q4T!O0Yd}PNqko==KLawRZ=>NKQ&Sqww5eZ4YD(?);uLu zoyq}&An^PtQeI(ZHXe~(Nt<(B5u>GCJ>sjUZzK7%rDSF#f?ZNVG_cxu<4?ZO7-KtX z>Sd*voG2gaMsBT|cBiPWO8K0LCY=B-B~6IbOyZd-=%i(7 zY__+aGzsHwbyWyuZQykug>vC+X;IXCjdXM73x!idg5cE^*j&+}IGtt`(blthzwT=* zUco@{@#Edxj7fpF3Zr+;f%gfVW+{%lJ7mmqcciSuB$=Txx(vxFhu&5fA>%&iBg2gs z*mE628oHZPsURen>DXqWO^eWB)b*&iwH-{9sJ=}i$qj7GPuMV)48+*`!;fn~MC#QP zm@U9kg(P%!dQ(MqXqI{fro&+VWr(yzaVo6C8nQ0nQ5zMhN7;TkLKy(DGfFrivmfsXk-utQhppRUBU@bFr`R=(@7ME zNg8}9P&8_sjD*TQo60;s->FJ$_H^AlH9BJ;)`$Fq?M;hNt_Ne_11)h zsU(z<9aGmzDJ8&YWJCOpTr!$gWrkcz8hV@+)<#dB_xR*HB(^FBf^xJF^ui% zrZbdrD(PMdywyPzj@xF1>f{0~i~Gt8k}cQ{{7T)Tm!y?MbRE8Rv2V5n$m7B|g9Dy4 zkglVWihm-f6QOEKHdxI~B`VJ;3~WnpP~Elh;}4-pAxlzHXM$w;Y^IyHN|X_yY9Hei ziS&xkLiAWEYOD`1!#RB>-G>QX2XnQifa*Q~4qYg0EcoF+;^Xk8Nkiypf`FxTYQy8V z)}EQ9-G!XAX!5N!CLTR0fzkJr3Mz(7H{K@T@y2;8?XCbv+h%&F?)7xU9Zo`4b{}k;N=8`=#V$x@ zwd^^EyWgCy=Izk4w}oex>}05B%?M%u1K-A)dUw$Nsr1&evkb>-Qj-VA@M!1D8LE0h zm~~{eNScIM2f%_nZrMi$R0BoHu~ zG95I+sqn1pnX{frq{A?r*+=RsqRaBzuGy?@qs(|E4qvLqBw82#RZ(3O z)fB?3%8qE%5LZnvYhQ|;vv9r%Uy2X7SG(Xgq(sdNsnaZnPlZ?IWzU$*0oY>6`f90T zf-UwRD^*t#C+OH%qLN1u3uUMbb{baY-X!T2$28VIwS^Hv@2v*8Yi-o8d<3k@!GEM=MunP3Tu$s843!EZM(TmE` z;wse(`e~ndyGV!$OMzjnxW&-~4VcoF!pb9orLRqJEUA@oYulvst`*RKpWTxx1LkuyU_#6sMWllsuLOvF95+R;7T@vo^tTVic>70B=rw)Y!uzM zXl=$;wZI~NRbrkU6jk$P3mR5tT0CAksUpS}Tr~twvYljg_e5&;R$zVW9w}|mQP)iV zUs1ytta(QS7#?1&%5z|;mMRTRHZz6PS5&J=)?~!6jLVgAS?Sqm z7D~!$(T3BsP^|RMk|OL;*`sfC`0eBNiz+%#8rZ;2$`|_MnK4XbCE>W9SxD-!Tp8fZ zJ0Or#R06BME093|k?#YgyYPj^3N8>tKsfb|ie8n4Aob8~ma`s382)awwCN2FGyJm) z`D$8!KlzPRD>ERO7jRZBzur3EhRM`WlNiDH(Rx}FClf+m2beKD4;IGob*VVDJn_Sb z?Yl~2mZl<)EKJ0WjoV<*8{36fra%PaKI&f5KuJ7kx_Z8%#Bkq4eRRWRppLqHLoU=) zQ?4L(vq>DDWu#%bQJCN_``3`lFYLXz>lS_$1nlyGKH6~(DGESYhZZmd z`%VL%s_XeqKT9jL!w;aE2?N1PB(u~24{Hs~H1_x5dv=$*Ri!3)NyCL*Z`+j#DpYX5 z0(x+#oIakurdfZOV@9H+%%AAEJt&j`?OnkvKE5t)-G9WcJU$fipE^`ZaOV>gATA4r zSJ2B6V>8gbD>8hpCjo_A6T3%@u>k!XHC4GxMTf`|1roJbmNk*bWA2l2$B=}o zK}>bhg3`$VD^g~5LGrFcFVRU+H9i<%s8<9vCMUSqq1t`>fDO&{@Z}J~2_YtHYu4hy zj5u}q)9pT0%#_|l&{NbA2v?8#w=^-lVUFDczJlJ~dFE7*F*|rusAPAMID2VT9zd49 zl59?FLaIp7ZyfZi^R*&g_HQIG1bFMG72Gg_ok38lph7%H7^H1Z4^2}^B_&l2Wl5gV znu%$jd3c)S3xEamKSAM3q$xT~kUb`@w-(t20t`g$!hxL6li;JQlCqphhSQ~^NGesl zsSAGL2SM=Q8$7dVzY$b*8a~Uj7oUd61o7$WptBUiu=u2S5;*Y%d$YtW3O)9fBiW}h z*L*HOcr?+}Iz>u*z27OlZpsMh*-@!-^kE9YgTY3?2i+k60^T<9KF&p1t)q;XS@faB zo4V5}lN^3kE;=(x7)LzO7)JSgl(5}zXyAFrO&$wnEn9J?I&2`%8qCyER!HK~R7%7s zzJ!3>vyNh7uA2yn+9~#r28&`Oa7RN5h6;8AA+Y$4QBPshR8C*2HDOMvg;P`e$r8+1 z-M1-K8sAP^Nl+7BX{4q~t1OexAd$XVCfl|Wstwct!7Zd+#cAX`K(MSTtwmcQA_UII=C_uv=Bac&#g7$eKe-|Sur-Qp~B#l2|h~= zN2x1gV|y{N9}~f4t>XV+V{E0`lsKkB(;#Ac`Y0mCaz19O%T-ur7}8MV44bPP z9wGwBt!GqF?c8Z|ZevGnd-QUPBw51syfG)Es8&mMUApMZ?w58HjSP|i1NgY(0)$MB zgyftSsI+;OS{mTAv18e8sS&8^UMQmhky!y!ed`|o03Kl8WbMPVDFhvaZS}0C%a}?q zoO&ob>K;?fd1EzI<}Q@uv*I~Us;Wx*nmA@?YH24h-e?@mg5i*x8xThf@~p+TS_rsL zj1MtL^9vVScTx!{<4X)Dtr^7S%%5ADsPOC?4a%6H#4tLDu&Ncr=4ntu?rLcujinYJ zW!Qjqwob#dCc{erZMT8DH&1v{TLv&AXR?oOYQ#5rs zA3Ut|Se_w?Kqrnxs+2o4B#9O>-TuwK95Bzai#yiA(lw(P==^BEE^*c!gE~zGec0!3c5lW zq84z#Yr(2>3u~(`?NXBz9d{>vhxH6<;ks4o5?ouO7{5;|`bYf3G}F^!I4w0~f!_?S zhJP|@GEu@>MZcH3wY%~pzQ`<_f4xhvl@NCOXnmJlTo$4+9?EZ>l4W|0@P4332Xk32GnHce{Ciy`+`@b05g-fmV zank(F1_@QuY=NBUQo5C?<;7*7r@}D$Hxdhp1#=;qGhiGKf#pu*29&Pxs z=L=980Ta_qR2@g?<_n9_LDedZ&w^s~7^HcJ4;gJLYvG=TswRebC8(zifkxoU5StEJ z-P>`*DNLy_M;?)m0MaG8w5!BOG+8>yntc)1^oKFv`Ewu4_!m&{?0%yPA(#)&W8XSpCK>X{Q&Yu0|CWPJUYhGEON zV&PI^u}58)X(5hSq5fj3Ba!1r(L)6W%ci`OGHi|8MuHT%jfbj%cW(!rBqtgR6E<&( zmXf-y>ZxcC`WhA?LU^F|4jj3cjj7{IHt_&X(N-JN?^1mas-FtQa|Hq|RSp*xU$3Rc z6=bY&PN7t#LV@WZqh2@eY)>#@ z-8{x{S{yy_!G}X88&Qa3P^#XkcA1eRS6#;9&G6y|cW&z|%adr9l;RA`nSgk7)2`-i z-d&fJxKN1mq3f+aqPiv248wF-pAAhEieJl`UL~5zxyMfiv)MjZK5#UWIt!ZS@}D~M;qy#0%}Xao^|hP>Z+0gbfTwE&nq zX=y`@R8+{mMi)H^YcZS~0pZ-+l~!e>;BlsoUbw*0^9-%0qlyZO=cuchueLV3D89CC zFS5=OcA84|UaeRPmlim$F_9*ooGs-~HpNwSuookTOlB-B#N$mZryCPOUSF@nYiQ<@ zY(^T1bedXc_gM|J@$bVsOO{}u@uU|FFTz0*dB&A;*5Vh28k&<;M#On=RVRT1L2K?D zGGVsVe%vW4ipDeSpffyl>kY#3vyA2G8ffuf)8cSM0Tj|$-INavH69!+%A?>EO`1zEd>k+rWA}%T}7pgN^5B=Kw068 zwHwAXXC~x*9C3@KERa2G&omPgL56HuXGtP3kv$%{EAaYaeDuR)K?ALZ z-E{;H*Io#39MKU;7-%UVP6m~c*{7_mf@``3f0Q6v#O*3>JbXA-=}&qToN=d6G`FHZ z3S0ESqo=ONq^2npHOF$$)v}HIPNK(6J88rGp5;xrr2q&xMHOGQf>LBhMKAGMDloo( zkl1xxQeKj3IWDMONFe(ZkO#AZ6grJkphBq$WPu@42ey;xB_<&p6*!`No+U76gBCy( zZA}LL_V#ee#zTNC(iUI?jFVjO>S(4^;X?Sy;8H-+v! zyeQ0&@)<6Xs0v$ZQm0bh!ZDiIJEOj`I%Cj$CnIEDuj2T|B`y_)VQEc@VNX!>^;8tK zbtqu0jhrdl6Dg0~xCDm1me5`P>nZ(Cwg5>a;xU{mU9iql%0gAPgRe;47{>Ac08*W6 z>gEHOusr2Ui6%Q&f@E27YK%I%y=$r7s+tt9M6q-P0e>A0hZpZM+bL-z6zs++K3tN! zrx!R=wn*ugalv}UK54DWRdu*OQ7W-%FbODY9%?E)TAqRFWrn?()pV10yhW}qFWZdf zw_W3D5OpAcp~jXl%244k;X5fQ(%zEioPn5e+$X5mb|#rWJ;zI%@qF2o>e{A;xI>F1 zFNahJH%4G(Qly|PtW@pext4U{(zeJEDTA~K+Bkb@ExuHOvmqXc0 zUEARaI82JXk4{;a)|XE(YN{OT){LRoOg^fDJ-8-v!|N;Hs-=c2JX9El6wwHY1jl*e z(XDViV%yZ&I{Hc*Er3V2ug->9dzTHS6m=Ta@2?rkOfRh&qagJ!BS%4m)#BAsLs<(v zQBjHKG6hCUfMdP1ZAUU08^DbTY1D$*j73CwmQ1v`w;ErLc2agOoYGcdn3g@$Y~1P+ zQ&AonGre3z;x^a=d%zoe4-Au#HtwZbyXm%x4sM|6MZN1J}@{?txa2+FqN9G5W(Qb z4kHYvD%u$%6%kkw?y=MAzPu5m^9U(g)2$|UQ8l`mT}8dRg#1-#sgX z{I;70&2cP5p#-#vB&rq48->@5w|3%NB`P69f`_`4xWj8$){q9@56_~Nb7xA*1);6Z z8FnmvDWh*XzXWjg0U0(43;=3CmA@<8G4>N}c%tJlb*pf-pb$8XqL{YJkhi=lBddX; zf0}ch3=dz3!Go?H8I{^7s+Q%W2SeZ8`Z){c_nqUaBV^Q;%LM{Bo)lU$r&OzPx@l=4 z3Tfp#Hy{cH@1onldw5K%op@5QZ=_+yh7U|LV!3vY8?B<~BF z?PKuZ*K!oL)>jiaPYMQaT2M$%*cJcLqnZjCF!N0YHHcHiO;H(0@cKF9j;3{8HE_Xo z_L0t|SlR??ZPybg7@xwqZtJB<3T+CJAQ=nv94q2y&Qen+c6+;Nb@JCed8!FuGjw3d|Pc*!^?TsgBBDReo%_;G4RDqAKA39l6 zV0gs_4l5}r(pcqxmpFzu3%uh00B2+o2;3VDNgP_^ZX_irA`cQJUn)oWEh>0481c#GaLW}(aTRh zq)SB#9bgFZowV~uhF~$_%@rOAkK%Q6T`6iHqFP|K?A~PUxq$6$J8;Od0ze5+JnfjI z2xZN3RwK?wTmd4Tyo}cl85Qser}DPcQBOP4M$AJCtI6+N*jn13hZL;88iP`y7)bpo z5JPH!6}EAMhnL`KxpT&7%k4cIU{usV`_9r;K*l5^Qmrbj$?-pz5dL!DgWdomZX*wE zDrM%=;YcN2X*l~NQL6OQmoe_D*5mn(y(P>YJL$D9?8Iw>3CxEGkvE9~P&F6gkMJ=}^6a6k9kM5&mQISSlml3ag z(Jo)Xaf+HudoD*Rv(U>-s$-Uy%t5*r*jcVcz#X}bY0L|yI+H3)g?VkPq!y||Oc>%Q zQotvz!Rxw-lWEIls_NWQn-GU2VOWpP#e^zG0=z-Z%1BhOWg1@H_(OKIEH#q`RCpy4t~qfZ;{Os*3PCMigX=s7}gu z!f{M?3Y?9VvW7&%b5;$F#WikZ#&L{JGfS7{o}_uNEX?k@zUXoOHEM8OHoY1OP7UgSOykL>#wtnCB+uR za5L?zDm9dzG|S{+lJxeIE8>~E1;MH@D(ZUNwN2Dn=^$7uDXk)`i8;RZF_%^F2VNz( zD^sCK7^kA0YybxeTBlR-9KBdDT+5fIsEHyKl48_pM^Ysd9lIK$mhr95tL+{5fx)N| zvMKQ41W4OK{wLLZs4=`_49yti7+nQSZC`}Z;;SVW&cQ4Uj}26<`ZbvfZX0d;w&hU= zAL8@(iqe))BZWrv_18?<)%jg18iH8`FXSyFLd<}?lgppEwJkv!Fqurdzhn+uC^EBA7c0Enj( zY_fbwq(vZrfh!}g@28ti(HGfkQCA1f1?sIrw66VujI)^2dN3tJ0^^2(c=ZriQ+WBHdvBqI|@+qP_& zoCe>*kA4>%HMmr;Rzq2h*G7h%SK<;uPPEM;d1gqySrDTOn>T-rxK;U$Dv%8N(w^-K z0}uyAFy?A}BMYaK9?aEJ;t}9cr2}JB*o9qG(ZHi{^CE=pn{Q*d5(gR=D^jo!O)3q* zfrzGD>zVKx3=rZGMN>Rj)m9&eMzqYZQbRLF-c#JN1xMIapKXV3Z1|QEP9kc!1mZ9i z6lKcnbl|v$@`^>8rc)jrUo%LO$30559LWA#g>@l<1#9YVILecyT1FV`s}-(euiZgr zW9W`~>3>%<25QPx7+dtWH{p~uIL=j}fIU;<*mWXPvoW1PP2n$fCjHxvvzVz36evok z1JEg>QY+c6lOYiXx-sM3bKNM*+~}{7t;Jh;xxS^ z$fW=RB9LCS;d+lty>!7SA)KORx~GDvO0b0+Z)1Oli?Js8(BiRS4r`l%ACIvL8NViQR`Q2?mkUfUtP#q}U? z$ILySoH);06o_=XhuSPWP59?_vY?zt zPZ3N!v@N5E@R7s=w!4a!qM774PGHX-CmxPI`m$2` zV8*1Aha+u7Ma8BQ{O%oDUjg4!OeRR?xF2cCB#W=n#!!a>NK9c*AKh zsbZvpo|)?^%d}9`QPWMb$KO!9)kkq_@qMuHfS3{Nrju;ZTtQKcG5EynrHkR%wMJw;+ej)qK7Ts8f>h#?o)phjQI|2eWi+{#wt|Y7nD;5rLw#AGGV z2wvV36yRXC;(-{Bs$1dskMi%7q^GA^O2~>!OeGhbvM9dk99qZY!*1zB1njM$WGZ4* zc+>q3Y{IDPYU(jO%N7XLyS&J88dZgvoLGfmEvJVnw^EeoDv7PKbtnRMPY-`pKfLkH z6%%1tjW#ujS05r6YHAS^W$a42TI7!d+m0z>QUnkw)PlK5#wuXJqpFrfsKROmLZXkK z!XSbrhFXk9Nno-Y5)qkF z;IDITBznNf8jMoVqEK}}Bcp92j!CGFvN>>k$yHed_7PUCWkPR}PP>svx%{|@WvfXg zDkO^XkW#I_aTT_h&=7U!H{Ndzr;@TAE=Sv}Dp z^@*V4FkuFsfu6Fj=UQO?V2)OIMeG>f{yKKxZ#lDVOccA1l^9zvwicC$o;)~oP-Y^a z^ZfBlw4-T)o}ralfw#SV-N`=QM7gt>UAhWL2{8bTzrLgls?zr3N!!$RVN8of1g4=x zEb4?KllG3czq&he3EmcMl(44H9A_GmGGTM#Ly5y^BMMmH^jLK)At0kj_b$m~DoyTv zpSO|LV2>K0;nhQ106VnmlRmWI;06&ys?4fvy8sM9*SY(+jUefT7&No;=|Y5KHR!H4 zFi?g4&Ba0Q+#h{g@cM1VE0G6&J5WX zNzy@));LgI#M**Fhy?U>)_O?jqcEA_YDa>_Az-9Ut;uVhKAuY1PJY8qwxKsZOf=?ZwP@{%oIRZ)M74fK|kfm44Y?d;U#9k%yDwXw8$VC=&9b@dD29a6rYzh zbACFd{rbRK=jQ@!)xfsm# zQMa-B?aGTK02QF&*Hl(=*&6FpS&@)4H$(*zm1sV^Nf-8d?0n zB?4ScIpO%fs7w*?`~s$%1+J$LJW&ew`GO%}Y{0$#!22h^7`U;^%()64>UE6O)8Pt9 z6kGFOR656jOm`XAe0a5;+_h|#1movSJDl99Fm_fd9=`QU2aQ$? z&mE&*FJ)v9$_NW#e=$$qIk)$E(PtE6T1Itp1_xyV8H=nL^B>G-AH*@KXReMjL?lu| zv_Ii1ds~L~*+VTOKonJRnmo58oN0_^+)h$mKxlm z2s2J7<2(gnlNFZC?NF*k)cEqPq{*g3X^xqwkBYcL1i;@=PjhZBoSgQs#SiD3`FY}aOO>V3NKuOV%;rBSYD_^I2MRN)%{qOY3q zTF0l3mPyh?ko)2tY@=OI1?(UhiY;-HoFskjWynB6$sQm?L2$j z_a5F@gEZ1rBV~4#QX>*9z+Ybx$^^;BVXKD-;Zx$qB;3Ac^KAyF!`Z~;vE(Vv^}ZCV zOj3ecRh{H9Niy6?*KJAHp8TGnvZ|Hi6!{J| zU2gcl%Y*^6HaZ^MOtuLA8rob6+Ht98Fjdoqk}PY^N4eW2v?u7}UzeSUif|E77cy7X zxJj`s{#n12rofM96e8b7%459-oX%#fh(-J6rSlvNHm$wGvV?1b3#$)sofNV#Hkt*FU@C; zM8~?Ev~20|aHUsCFg#-&nhI#DTMxl#B{kKl6Eb|nfZbH>AXs}ivvyi}&jJ+*7}0)e zLt<5lI8f^NCLvJj=!+nyl2@o`q#L9l?hRsm2A01C*H(rmHc?A2qi8(&)D;rizI3fJ z6|_bYJA-gk+S+_SJ_Ji=y)<%QC$5AHu?+=nEHv_?#vXQg?yR#$z*$PPY%V>cg1nC0 zs1eueM0tygnnkzegq+P%hH3*Jqb01QFr4mTAY*d?YARMY7Q&Rs{35uzZzP z+Ns4r!jsMa0Do8vGO_t~2I$eyHRRp}i6hzHfizjANKsD8I9Pw^&NoYlD#t1nh_e}@Cdfe`i=O9(sjv;cTv)+l zj?hOAoJKU9Uw4TupyNWXhkDoey)E=CAho{Rp7gDm;4T@tq z0~e?$BsHrOHvBT8N_&^`QsE>$?R`fpky^KTYPJa}T44E2J)5b<)|JK|8Z8|}^@~@Q z`nyA#Jv8PbGOV=GV|1Bk6T-zhQB`8pYfWE<;xRjHo(!x}6x!Vv1L7=4@Y zqd2@PLX#vY4U}VLXi{bb8Yn$T#OAETrJ#zZ{SE={$sLHC+UHf|XL|^tjj+|U<3OZp z#rHgB#E_661mEdAVU>7HZK^s8i|VTz>FibcKc_io4#jDp{#{!MsEZI-gtJqb{nj0> zx?5iSnzu=!&4?vGcOH-5QO0Epx+Nzcb$|cTpy)7KiCYb-sfQ23o>VTBn1Gsj3|@K z0-~YxEDT;kI2sY&=Y|gES#_}i(j*O$kGNA-wwISFUviZcsO^{?dr0_KIl7ZUh)q2< z3h_|VQ3XnBsp%bLoiB1;HxU*q-?u4YoFe5Ur0El~K3i#SW0x(GkWxGI9s>M2Zs9~hDp3cbJ@kOu0ODn|HWCLJC(L0} zF$`l5z;Ok{@oWxSTB!kw1CQb{#XQYmr1=Z>tUMGGelhvRSUBKMNyPe6hOOPW)Sx7f z???mJ=UHE<7~Wt+q{8yOWp%6A51D&%27=%FZGI|llhG;r>3NirfH*1+FnLcwV8dLt;bZ$YWs#g9Tl)%1Hm$Q znnYsQl|~ha%}pI`MRk2*&m2`WlB^hDfT;wsUi++c-SqI4X;M@|iPVXhApjUzA4n+Xd<3b`d5`DNaXlQ826o z!4b3JO4$0lisDO8QB_@4fWZPXytP!+?+b!Xh^jXquZJfv1wG=WDHur}$}e%VDMAWV zv%nZ4gG&7K9aTisvf}j>m{hWFZ_@cFro``z4MD#O7k-DG2`V~A5M#=(WtUvG!8!n3 zjv9#^cJ)$Y`q6~qnSjBE7xfpaUC5LW3%pH!n4JGU!ucef=axr zPghkCcBpR9tVPKM{p$nykS5hCPxnJSIw;vHJ?4;{DAM6M z3j@S!VtDG_qMo53ndOhoTGx&+7{&gFBx@$0WIB+)PXP-{fkcva^?WEvZd(C3V~sv_ z+aXYOA1_UUQBW=kRE8=%KC)a-6NXv-M({-x<~b4EqOkxL8u;;AU2rQn1rZajdSJ62(O}B~K&8VKpj93fWDO)Rt|&_U$dlZtnbg zlA;Lcq{WpKZ1+)K$$dKYJCSNI+HRftbyzUT zx7yHfxyIOJYCtMIB#{)Pp-voQ$WguM*G6T?S!RbV;Qs(QQ4SU%j-m=k-kvG^sQ@B1 zc}M|4JDT@lbAJvrrbC`0WExr=P)rOWg!-J9h0)@EMMfuyUB6EoM>KH2Nn(G>v&KO_ zAa@)vVK9Ib7*Z!vNs~ezRLAg2H>1Po7CM!py)5Qx)G@QeVBu7`I{AXY78-kap5-Ym znWYUX6+x~wI^wjr@Do9b3YV#a4uIA~5lQU~ZEtwDYZHDW)R2^@&pkC_%9Ob2pqzA5 zsd$0G(WSz1yx~EL9Kn+six5mLFdP<2RE_D)iCJnl2E2iQ{P=BWdXP%M#vsxc*&2#* zq3tA(#RziBoza_yf~WT_C$<4@THdMI=ZNI zs-FU&Nho5-)5n*l{{TkvacGh$>ZM_|nPUyPRQq1sDen-!xY2|iqm2PwP{nY3p_Xyn ze=NZcGASc?Yci~{thCQiuNZsz1m|pw?d35fqgY&NZAJN|T`Thd0kWL@xkGasPZ~7liiZ};l=aw@ zJ8LM}OjK@EG=z=5StmjmY2Su-2msPH)^Qybmn3Bz9$b@}X)q|{jzo!}qoS>He28kd zIS5X+M%)4Tajd{AOrI)nDOhyTQ|s;+)O{-IA5H6V29mEf<=j^(W#y7c;hv(ts}7bm zF_~Bwv$EgebHM&rn5nGR+0H0&Ck_HdJDykzoVE7j6(K|2T82TF@h73Hr>o2{)z#3D zl~^_}JSwp>cG}LsbFYotthSJ)Y8y}Qu1+~-mR;~CNCb_zU~8Q|2>5xYHF%~ULq^Xz)V4on%{T8>D9Mwb}+mXFd_ML`Fhw@yvZv~p?LH{ca1?m z;kCKG<+Z3SSo^)Soz#8ENz;t@Vtou_O?7xaR=_K1s;Y3~SkfaZE(p<05~*7@_o-Jp z_-HM}cTF&dLW6QoMLyAa0HnPL82GlI#-6bJFEDwu4TjWW4?^*Vl_iyaVm2q<2w{pu zDt;rkvx=IxP;d=KDf@CBAwmfq69ShQwobw^De3U~>a0@?nIaNBRb(q9LclX9GAb|J zcw+6k3Z$F|o;7A01;UsebJ0vWZw%>16IY0t>uYfrp>~py;EXJ9B98Hh9@ZW=;d399 zK$IU4^Q8%K$kea2{5~|iIPMc$9TZ{esf3EqER@l_afo-ZQn4dHfAx-shKX?tNhhPO znhzat4ksLT)(I%i0&+kK^Jc+mgJuc-{Hno@)9Cs9g)J6nJN2# z^PFiFiD9|2vK!&Ga#PDN1w}mOnjqKEsRfU2+Hiw0wzzd#TT}}5y3m;`@f7f+yWseV ztC8w4scUP{UE!Xi%~5XzXFY-Jc!?HZxJgkYa6cMU>yC!>Bo7xFWvgnonxYvi<5?qC zN{L!UvfOq7cN6_Q4CWW@w3Q^f2T$S2qinw~xp_%fc1gny7;&0K$kozRHQ^HNCOyw( zB>1qn*O=*@<*#OssYx0~PtGc2%Um{XBVIi*Qt0EUR)(fu^nJr@OAXkykMDbYc@<{O z_a#m3DcWdxmn6DE;sN+n*=inYrB#+%I?boc79LxVtAx$6Lq%8+eY9fu(&QxR_)>)- zl*1Hq#F4U%WNp`Gx%(sz+(fN2G+-NW2s|qk+orO^9D$PC{KaHlpx*kA4<0y+Vh@On zX@dDtgMk$`IBDDTk2DJ;Yqd(X%Ma=1+D8M9HKl6W4lz)M8%a^~5CmN*iy3XBs5XpZvK z1gPl%@T@T4TOu_GFFKRG=f1b_*S7`EWrfrb(xh|_6c+E&*#&8X@udzGLr>*SlhH-A z5W%JzmJ9LH+!|y#l(G<{iBI&XGo4#Dhl+xa4V)?82BgE~^4^~grb$*nOp!Y|`&t!3 zbk~F10Jq3eQjr@>{3s=G=xJI?lvNlHJ|>qCQcxt)Ep0_b3x;B3E3zOF*^A0z_bbpD%I3!_LQ<<_)oHt z*{>?o%Y{{9RJm|L9XJq3(#g6sEIW|i!N3moW$&qizUeW>gn2Gwq0o}EtAtTQ#4sNz zvk!a2qP?xpg7|*+LZ#6@<~!8fU`c+hd^cCd2^DquIi@ZXDNwjcf(E zB$)H8QsEdSm8&q3!V5Y~y>tdb#xq(L8ppNmSvnN zjU-D2gUq;q}DjIF8d*!y^;8_4uvCY2`r=@WqG-$}`RAz&`EttK|=%DOX^LIBtI zh~tBK)D$qM(U_>r?4%AW=`#_gWAa(DCkhVLoGtY5_bF|*YnXd0Us0upFQnB1hCMay;2R#~ z{6AphgG_Z(LL?gQ?S0nlEpu+%r-1A=?-r>w#GiB%cE7UL=754R!k$Wr1lIz@8-hW$ zw$u1?KpBdvqzdF6w_kY{y^YQJMPe$1@tkU1J|$gEx67!JO4to={6{FZ(s)y;I;rB9 zHexjUk=1Gyi9e+pi~SsBb}g$AVzu6JB9%F7KGb1MVIU{$W8!2&JwcjzvxpP$Z^CF7nc_45hWyHw9Mvz><H&P}0Lp zF-nylb(qf@0!aS=5d}u3{x;zgo)?OaG`VUkM5h2LcarI`frwM$QliO*nE(-nWwGoS z5c3E)mNRhu9o1Ew*dCm*k@!s$(PaeU1r(n$?`RoXl%$8Eqp`{+k+9!R&q zgiJ^sG)TzQVJ4yB(^I_EP*hU+w9juQshGyf-)RVAVjAOw7S8B&1NNG6Or^x?1jwgT zhgRbizI0Kx9gS1J&576+H4?;o>!T_09y~C+XfKDIQKtY;P}xc-mPVHpNn(nUuEJI^ zDT8dU9{s>+pl$~;tL#*jZKJF9H4w4TD&YSB4UWCmNztySbmp4B0K_r8DxVO=D)Pp3 zMm1QKEReSI5+f*-f$sadi|feKdM&%$$qG!#AV}f(Q1^Bm0hOff_|Z`5FR9LfWsacM z()FJw;yoC_YAII(qsF>)rxKlNzG>z2+s_VTvN=!}OK|%?$?i%=b>oA_;YGP^+a+gF zj4J5;I(2i?&sVVwPcUE}qj1dKl=Ag8PF&las!kb<)8O#YyG=aFBO^Oqf~#9l2=L@- zn&q!Er5)ULaL0uWa`#(tL?v4&Q5hXd`tv|lJmNF)f7A}Hw$wzj%!T3=LoCaP)5u`f!f+~a^3s&U0NWWqVii5e=XWb&y5fM{6;zV-Fi+#BSrGpL7Er4$Lz%l*jigp}|w-(igH8oP{>8eiZeLW^A2};qb{x zG%#WKlC-$QJ0n@-5e>|Z;!XAEvpf4&ldTvi?WZhQc?*Ih`>Lnq^#*2Xme(gIO9<|d4XcNhcU*~SbUY07;`OG$-y6(2pXN<09)J3 z<@V*p1f)r%xAK;bvjEZN!72tJf=g3PSyF43Z!UZ-qb?&60yavkeX7T?UQ9OfSs-B% zg#le`rSA~HjkW*N*DhRzRast*U5SK|Sj6oUw228HYjll&ubn7oaGNi8i*^)xHdi>E z-g|^Kp$w8ZOiA<`I;cmc$+&JEhgHiqBQ;Gl3My$aNNI`!HjT<1{ncUgT<~4|sg~LC z7$f+j8Y(Zih)*7xTV-s!kZKnVYOG9Dj9dvRX2w=s%HrUMdQzo8Hq`JzXJ*O> zKXTQ_iKH(esVYf?gz&~FWW%y<6G=p|VvM-;<`Q8varx~ONJY*3=>n+s5;fw7nhM$^ z@t%rPZqgb~nE(!Ela!CsoT}eBS%jog`R)ACqY71bu@S%M&Cb%l9SGu7vddV20X$%B zr-Q}v0VP2gaoJF6dWnKY)iihpGh0@bdub&=9T~_K%?Z!p>YfhLd z+c8^Rl2Cx6peLu~dTAF~jAjfjmZ;L>`zUMAok<2@nk51Nfny>nazG($ECAwn3y5T( zs7d~o*regqD1)S!IF#}Bj3^VX#6?w9A32U0s^@VFm|&4(WCr_-J6zb>-wp?mu(vdo z%>Mv;h8s4{6leKmq-sl1#|2}L9vH<EM(Np30-vmgfN69feC;kFp}BSK{;H)Tu2s4NAptDNK~1rP`niX|x-U z5yjb+!tN3Z5TX^d5$)&UNG&V_3DD}1DcC}Y^6SE~QRTBM!+JV~RZ%3=_=Qb9JYh+; zn}Z#{9xJxW5Z=Tnr=XBI6?9&eEdaQHzL1hn$WNtUt;%$`dsWoe%Z6by(;=#+g(|4( zWK}D0S)^d3{ak#u+E=Ms5Uqn@_{A7nDQZAS4mbppf`s&apw*K4E$b~MRUKYY$8lczT7-yC?gtRDQ0rsmhvjE!x7bloS+JmTi@#G}W?F=iVaw{}DJ_|N1 zt!UCpM5kMkSwSMqYOqM^+>Zn9|T@}4u6seHPaP_Q8#V|8^NYi(t-1EIf% zBFh^9RjIODUmhU!#y;vBTwdw@n!qXdk1iCEti>q)ZmKD!o=Tc_q{pUQdCEr8I0b`G z5QEzC_kt>rTxf-F4CZpyv-CGB4&(jtW*=KHsebCD+$S%m1TY! ztyPG{MA6l?T}?s&tc$#^j7be{<3q<7x?X^waTMFli17-Qmoq+7ryEQTc_fGqfHtTJGBb5CqbD8|zjfB(h=D)P`87l4xFf zgb_+eU~I9)dxQ;Q-G`46uJ^kJK^4EsNd!hT4^(wq8pLI-j%uoWS>~7h0VAi9bwzCf zO^e_=j^~A~W^)83USermwqyJ!T(TrazLKqqnBFQ%6G zaKhd75PT%0^;1eH7y>D81~byU2s)y>O{ z_#p5iy`VUp50xUY408aZ%wA3vjtp|U0)=IP)X36CK(vt)d$Qi-2H~T3eiO609^2F!HR!-c}pb_0F`gw@r zxh$!zAj#m)-&FJT7l;0Q1YT+sIl|Dv{^5jqPpH zw$_zkk_~B~@nGqU%}4y~QC2-fk+l{WCh~}Hd49>zYw_o4cUs}MZ5hCY6edSOisb%( zW)qk`2?|JBpq;ThD;UNb0dq$+4JyXy-cWyr3Xy$A0BfFikh?=vFBwMJh zz$P%qiQ^Q}CM5Md$4O`@at}roME6DIB_avh zuCdQeG$jTRG_NKfTU%RE8z72~mSYO`xwvT!?c?p@HMG){6bh82@SU{DxpWm05)!x* z8zXKw&y^}GO>GrD8&*QJh9_5OiY(8tjb7^-oqd-)b$ZgUNZHj}c`cb#@PWhGwz*YT z$vrAYF}G%n`IN;O+yyrG7(2#w_FGGM@{Y8n>C&ufh;W4JSd{GOrmvrpXNnnc!Av4z z(!3>yn1E}iy?glUrw|8F2`a)p^qIAu=xKtPf;N%()cR>nOGiksM~A@j5fw$zWHF6R z{ri#d;>H|G(c%D4U*Jt9eV~U3N_7;NGw=eQDr2Hpso?pVm!4H^uQVn)Z~?I+%G}JP z3wFO8e1$isnT)-WRR@zfJnL7wJhI%DQd$|pmFP`E+5sR2oblE zYd!%}hT#>Jwf_LkXO@xSG>}r(O5q$>{*P>mw_R_3G=S4VM7T_t#M1uwypS}3q;ULc z&jZXq%i>CO!sSS&Sq(GOK^*kx3Hv~keJ|gNZQe*sZ0p)9XjY{tjuE%6qhb`4)NqRU zacHHjn5)O;qsY<%du&g#!@m&TskU*Pel)G7o>It2l=~y7nrW=W>8NCeEISXDdihwh zPC{%|B(Lbu>OH#l;43)K+-bx%$`n+olN&uKLzLx+aVk=j3U~?k14&xyTKejYK+?lA znAuFS1r`oQm$sew2!HdJiz(1_gS3sOu7x(imJ=moR7_6}icaFDX!gZ4EY&d)8;Ho= zr+YB);3s&m9C6J!r19#C5@jqWLJ7oAwrR?i>>`afqemL~P{(WTW4lxF(~%Z$m%P)& zE)4Oh8!ov+i3L113U#Jxm7ZBEDR%+2wzFz3Y;IThaE08sM5ksrgGZw76m@W?MT$w? zw#Xhd{{SLY$WTUvaJ09kFr~XeJ4Xu0B$b7lA>~2(IU^@=ZxA)`=aSG09xb(1Dom8( zDr`!&R;e+?3AsZO*o`}B?d`^dl@z2J^KG3nKmxPK$uvx9%A|@9l+!2$NBBs#yjyC^ zt*#FmO5)T(ilGaI+9^pX>QdBmN)}x%$G}PeK5>iedyY;DS24kv{mN)+Z zOr;=9DPLeM;57&GZ3-YM-8MtP~@!v0cSB3g*udjg%46p{31o4-Pc7!{-q~)UQ(u$WFwbX@h{5`ujuAXI4`UoUb1vzWl_HCrtdw3q+8*NR)hEKYe<@YI4i9`X%r`tfCWfVva zQ`3-}{G^F?vAT<2+_&GzH#o}>+3yNW8jy19hZG8ROmns=H&a_g9HLUXy@?MnzV8ET z0rYY$yRuV@JSK$9tzr(?tTa-sM&(%61RdnF5J4UsA%85=Ang=j#oUr&eYjGRDoCQ+ zX>_nvjbm-M`n-5X^C<@!Hf$^I0}50^&?3f-2fd(23fI51+zmK~x*Il%P=?k0EGfb^ z+X5=oM;VOnNeZ^(Zr9U^ok7(H(zW6X*(BB)y85<^i6>dWB~~bJm2T5c18pzv zp(Z*hhOZH*U=N;PPqVUD+HK%H>vrSf(K|rKnh6OQY&4gALl81LAd-Egsw_!$x6|YF z@xyMCX~4*k3B=UeN#&9y6%;}^hOE*?%x~ay`Z&Ux1w9$3XKJZc z4fjZP#xK|t?B;@%j>_L~nH?0{QnYj}u_KSjkQGJ;RU`Lqbmq{P0GX$qWg~=5BdRkU zST9T{5;RTBCr@|~P4pt%Qq`a*#A)`d#u$UuNgBLzda_Zg+A=S;Gja$WmgiDF1o3k= zKpX`uV$>56Nc#MCQ@hI4M{V+m!h!HUJXV`vc8XSnhQtAdC+Eym@y6kunpZyPgaQS{ z$p=m;ZriGqCAh|r`L{ea7epGKS!R%epD#CvBfyR-x3^K-5l^DoAy{pq$&2GvIF(z< z=gU*6P)?t5*M|sBkrbd9G|IJ3NCX0-QZ?6`RL7lp1OXL33i^ey5eW#g+=GAP$|?#407%BM$z!0y=*uFB zrFROecVGb|+go!;B}AFPR@@MiOU!T6_A%3Z$BbY(*B~WUH6&t|N(>JZpsjjoSX?kP z(#d8YysW=>23b=?09HJxn)Mt~s2pfR%iRdXaNIKwRfdNUp{Qu_3YzM7TEj;Rd!`A~ z-6+9<1nsr_cr#|n2u#Hhy5TCqW}mvH>Hh#OWv-m!+4CEtl9?xVj|ZcvSm~sVf>;n8 ztW@6P+BmA?D``y^R3K9-dKTaeJ712l1>J$MkJcf z*d`XByL_V>NQMYR0%5D7W}~9Y1F_2qSH6V4aoiLjZL@XaVfylBpEmb_W{ftW}8v zZ~y~-Hit3Bu9!w68t&V@?%_?zBkAVAoGQvuB*xn6RX4hwz6)<}HdT#jN}ozoN$RxA*61Y?%+S_)Q;g(qH2!e5>xAyA_2AnD|YHyhnY-j)h z%+Xuh+}poyEi5)kDKSk4)CMt1T6t!bBWOW*BW-0s3osrfPMwE;j}NR`*564KPVoZ? z#+R8%>MCJsstFn4^IPUejC9FqtRAOnpB!G(^INm%LXaG7GmW~K9YNE(V!?<%U;jf^jG>>eBr z+*=e(2cu^+pUx$2*CU) z!lIk!stZjwG0c3LQ|E)B%5o1NK&FdDjZCiZr{qEhQRu5GW_f89C;}F-&DE z8kT}AKz}c(R&dnprshbVFba}%(}dO;S_{iFDUlSi?(tzED8R(kf%<;w&qDf*nCP(1 zu;Nt_(c!Tw)0isZr1`m(nlZL_icO^yr|z4R^zpsoy1ljmEQv|u6Z+EZj@gIEDpVM& zE9pPs^U$1;O+Q_7)(waC4;8>G>M5uxI*UCWP6tB(Ri}~(7Y$cxnWT!L&D&@jLD2Fv z&U4$lrIwZ9fFJ<|eQ}}AU6iueDom4C6O<&$JwEB|ICo6w^6nUo6+}2D8<$#IN*ajD z7i98T0S*qP;^W$R8!1|y)Urya8XMaqOJOM(RUUpOm_9Rx^`kA+W{L_3aeP}YP)QCQ zhQG{147-_KR$#Hbi0^ko=(Y`O3vy}Pn}nvuPXcN@-nNH$I_jVLgfdKGnx8-6^vD)f zfhwv_@UH&tiCq;<{CO*7Z7@hKiB8i|eqx7`pV<;Tw$Lw*bb@>ulD`q@US@n68BwI0 z3Z1@YmQ-Q;t*yQqaP|C!Y!bU=M2U@}iR`n~zI5CsK)@Y_nJBOdcFVX9NzU-iNth{& zwb*@D1sg0Cs$vobM>kUU@7rE9##WWE8GPwdvBQt8Tk~p5rPgsy%y?t6o2Scoz7S?NJooX=ba2Tgq5PPANpLpC}{0_Tj0Fgj$zvBxfkVqU295-;%{0;~+giuL| z;lR}2G1p5CZEO(J(anf}7j%Z~%iBfpzYw^TsUc1nJ#=K!Rk11Xr0_o)Zm7r;e8fV6 zNb3Y%Y>!tA$oGr@1g14N02AZF#pT9fCQQvOMc;=J9RiC6bm^8O{bR45ol4QXv8)xU ztgy;kRv-{9z_xFfFTB8E)7M89mr2u(6hCw;H((i81jVwoeiAT>*>Ce1v?yaU`G)FS zxE2;16YgEZHsqni;&{++cx_#EMQB#8suJ;xlZIy+sIfe=n1&Y3{`LO=E@NqwDYoD| zLD!NC`C4uWQ%lnnM9ySbNkfR$U;osj3^S^@p@|ZoBh=z%Bjrp@cbk`1orn1!)BOGeF_K8$& z4q|3hB}Z!|xJjB{<|HW%GBDyy{Dg>3E#T~U(+`!UB&Ayc+3C0AM(*zE zQ$e+`eo4Zqk3$736O3X#Bm)oVga?fSAUEl4TKNiEuqt1V?fUo>H-yvPA8_a0jpNflca!pIEpg(MtjRx)KgI9Oj5HG zsHUoxz8;@uXrz(Sl^H4(;;LO^DYE67*0!7~wzzc)4kVL-9#rkYMwn2;riLniQ?wIP z%M|%ys!x@tlM06$%pq%IDafUfht`d6;eRZf)9rcd)8iHjXeXeXUNkC zZEUF|D`i<%OR~3D@ob%h)8ly78&MuDM%2i0sJFQrnGh=4;Hz1*)Q;Q<-sR1-qoA1` z6d_H@)C7SvBjG(8>)tIE8HwWg)o?iF5WyxGDRxp*K@cKIV2V&kk#5EP2XmnCycUj5PBadrYkys0WiC``*M|kH?;N z$*`y?6=dBw69C0LVfiEGA#!MD!LcZ&mK+{sh8d+X_fb&oWSlQ$-lTGl+O&_dE6KZA z7@f4;h~)gqhSk$!m|sv|Gh!`I@_6IaG$uI=1(GRzx_yd|7zJ2?s5a~cmRUNKq^D;` zSFI&@6A?&U6Df7NyA`8s+nTcFc3g?u<%SjW@K!z>G>XzwIcXS~3Wq3k7Xsk&&bY#a zl%0{)T3{5ec2J>Bk@|U4gGolxV)XPFa?K3|9v@ph4DC$BmfUIHAv^m_fRn^U!brqZ z>9;||i8SYg^lvIH6T=XcCmz{d4@)VSypIs$YLd+i?X*xYyguD(xX86+!n? zYEOs5(?u(+{VD0DQl-TDm4{}EI!p#4Bd^QYAgS{!e8pf9W1^BEp~o6+*j96ux`P@{n0jITkj* zJ#wU=$yW}Enb}JjxJ(d6G2 z=_R4Bf=S~oBoIU`y?nC4YzP4N;Rtm_5}7hpx2lhp%0jw^6cc_WT}i~KJvn6Tzlu*T zTCUC2IL%gJ#bt^p<)_1fH6$^ZM2B)t&Z}ZUw!Ou;>lX+ag}$Mvh$M95_)%5kDpM{5 z7z7bo)#BN9OeVH$G774iY4G$%h8kK`o>^gIte$q|8&8KjMY*}4>Yk%!f03vg5auv7us~;T5{Krqoe?% z#}ii$%0{wEa2Nqj;|9!e{)-aCC8Psv6tTq`%N_g$(2{tmn@0#b)g<*&N8L(Rq$L9# zn|5{KP^xKgY&N8SF{_pF1!QJ4@JNSnHYx!C1OEUL<0UP(IGtTpV9G!Zl)?bwY0c{s z3zN*j*u50URj0&dS!zY~Af?GqqnikIKVgv^cGEXcq7odCHf+L^&w*k%ja*AYYUYk+ zEmIvjFcGlTwO|S0VR?IsSkScr&e}9f>1>wDQbq(5i5^3ultU{}Qu%F(W~ngf9_S;K z5MJZn3w?C9ym{^4X-EN1s~!`Ddfzp8Ijt?Ejy@iC)9nskz_7V~ZW)F(O*)oYD`oQ8 zP4p{cYjJg!P|dL89VFMBzkT(RQb$A$l=oMU;8L|Lvfwb+fw(HeB05MA0=kVr2e=$C zd1wjP4HY)xl(D2q2iw)9Cxp&s?Sl#dC#sAd zZN)5x(n$km!6%@aV}ltOoQ>x#MD+3#1fL-oRJhPwE~FlLn}obT0Fks|#*m?vvSE5k zwn;wfmnIvTK#60+?}xh$f~8tf69yoaEJ+8ju<+x~TR4~;I8OsgL&Z*Nl1Re?7<5yo z@@&;Vl{Gx~EA^6#$l_7ijAz z!hkg}Dn!wtYmssP09{?(1$ zX%6WU6dQFO45fki>%^B9Kmggsex<&3++f z*8rjtN1ZHtH32{*n((EXz9}f9s;iDztsqY(D9pO}2JCq4t-4a6tNFhq==qiX0{)(d`3OE`KA#1~aq-c+4I!4N*)x-gpe6IMO>A zgA2yzcczxEdKg>ulGm{^J(l`>FT&fmn!5!=5;#(nGNu6yKqO<01$ey$T|kTFsjW{) z9`IXeBKk1Cr{ll|@Y*TUDNlrOp*B-=+7fXxel#D}P&G!@n)Is}&@u5-+vwz7njr}@ z8hR*WZ1j)@IAdJsX(_=kB#z?YQl2riHkupo8+x_G!1mCB-APh(j0GvyB~6HeMRfou zIucK}fVUbdl^v#qEvrZg;Z9M(D+00w?Q|sHXdd1pQc`wPh6d3+Vy4qjm1y2s10xNs zZ~}rofbq8(Js67ExKUUUg-e<>+q5c`9f(k;aXg3J<>^HscEp8}jxG<4Db?bzjbr_(|qJo-4aT7c;?y(1C zfZtsL072sZ;EsrjN~dS-6x}8cR&*1<)r~Tf7nGn2dm71m^2lva1uS7o7!yfsVTmnGGD^1%hoG~JV zT#wMt>uzJGz-s!1f@PnY1%@@4>WwuJZv`^4*-!jR!;YU8%CRJHt3C9$_nvmXEsq>oUY~N^|4+L3j zjj&;RdT9;wZY-S}D57P*N|-($iso!55y9cYu_#zzxYSsyvQHeaN#+?O+H4B;*T;j~ zB&BKajZ;B8mloTYotVW`Pp%x{TrAo$) zV4Nt=EFKMF1n}cTUVJ)Qs#KDmPdUNbqALd7>>%sFJEOt~qeP@A5>7Oo`MgICa|v@! z$bQAFZV$J^iTm{@3aLmUYAC6M%4Vp9voS3t$+LYn<-6cTc#j{IX7bI^F$G@cj)%A!S-suQ(CjYfp) zuO03LKsZ*?JHb0Y8o)4V*fS#+Yi`@N+Wq`@^7DYL2Ej=ajd?J60+*?MsI~-sl6-CB z$A2thnpq&>SY^VuOsKPLMkC8<1?_)nzZq4j20^Db5z#^~O6k^zHHwKUyqAtQRWYT( z0@pXcJ=~Om(@EAzF%l@_wq?N3LIki$QB!D`Ol-0z3fKfE)U(oE>k75j2b4sfUTQo zTItpy2!WlZYHEmQG@es7)nfkWEw8utm+KrAZrYGRGqCeCLz@B|AaqcTUp^cez=T64 z=+Ty9S~4x;b8t2Ga9D43IshUltXhZ>}z*|j|y_?*;0?_;f)A+dke!Ta>W#JzzsEW zdD6uwwTo)kx5rK|V%Q5nc+w*1@6|KmpgT9oS5JsbNk=nM;dDQwr}q#QOk8`!>Bu70^)1CX+I0{nJgQj@X-3$pJM~eUa#vKaY-*u# znJTkSN@<~@%h}fumK$R@d{fClX^Nru<&CMxAbpYz`7G{xmq>HEQe_IC^GyJ^j544` zuM%pSI-7%1brS}~vh_^g%bvXvDC^@llg~*5i|*3b15alq3k$O0?q_ZjKr5JOvam21 z(Ke*P`tOygD&xZ{D(IHmDOyiMUt}yAk-P2ff4|P{F#kJKrZ-=$J)1U3Nqa4+Ubtv)k4M}sG$4VeZ`&0 zbplX!(z-0S)V!HMwHQ_}{cppAh+huGqrE~SFacM>z<1``MfIC{hmAc4Z8!PLJEvh% zg%4#VCc|>}WNGr=A%aPTW(k8=OM=$0LmL6F%kJaF)zF^+ZLPT<-r6|JgrJ^^6xv;(#Enr% ztUm(Jm`)fejIE8(?HHyXKif3HPU4pR>)WxoL#SD2a8L_H(WN8gu6tR`qqUr9N*y?Y_m2Ca8idrQ?l#9D{kK1@mSO3+Z{6{}j z;1a08@fumpb3JU-vQ$7+jk+DMyRc*QYtFtuoN2+eB>A6!#Md{ivdAS)wy=0W;nt&u z6dYnz`GX$B>T99Q&4#^8tZhSwE39b!$UbTU05{-uR?oV#=Q4*dWxv!cvj>B+P30uLg;PA{{XTDM5#`d95663nspn_ z8xV$4kT}VR>Jd@~K&Nb0p9}Np4ONOgMQn{BM;-+&wY1{4H+_=!pp=;6ai2|;g?XjU zojQ~W#HBC7Z0JDC?ijsS!(ve9eXnbdlPQNop_Di-?umfMxH!9l)b#( z<5n&LP5?&UplcOQ3;eeYsG6S!poJhIl1YOX^8Jc?H^-0MT6YcWODGe^Z6j|oO~awL zpIW^3ew5XKOB6FfQn;iu%Td0isE3sjM4_z7U_H`6@5VCc5X!Jtt~>z8=Ss6XUho}g z(l$tvPZ8--YAZ06#0KmdI#;ZjQ_PyKpu9|2ZIL{-0>ks>n|TF62?A7joM)z%7L=S+ zRiq;V5+XQ)Bi%w3Tp4!{UzW$K7Pl2A=O)9gD@zFX>uA@Zu zcI9dhHz`2dgbzBImoN;mnd|UuAmhuH=xZxfOCBV~Kq=+-ND;;fCg-{AK8^zWL*ACM zESXnccA`&t8oDCkBeK=)KqT!Qg9Hoga4~xwarNZ&L zcWEf!$X_XAcv-Inz|l~tKHhS>cr%NK-&^h|sZJR)#(392tTI5uX=%6#NQ&Tfj#Zx0Hqf^u{@|i&oAX02C?BQy6ky=Y?PCpL0tK8(; zh*lHR@}#yW&`z>bqSNB|CKFEv9fwrZVOh(jl}j2=GsgAMQOwl)eqf_hw*LSz8))oD z4y^O!%CUlYc~X0u4ImJoIu5C2>q}txo+Xs=DKNNAJW?hd=2EhsEbMZ3s_$kX`^1Cs zJXpNZ1c}G5s;DhalcZrm8P2of*?XvYn>k^X7!Omj>@ecFe>VX1^5Ow&#SBW#w8rs7 zP~Y9$*YCq1q=yi;)e>^i13U-x5p&@c>xQv z7^f}>7rKiN9u?mo6r|v2!7sE<%5RU?mr*gwEN(2ngVg-KTrVyqf#Q`gQwpeLRtQnm zq9r%`;AwAZI&lN2SWuNr@ui{Er3BN?6Y7VlSpEerH&KJpLro0pgHlb8V{aslR#$MP zQv~uh@;g4zw(;iOvm=dr^UbFWXfMtFu=DmnnXw~5m+Nua*wWGE{2HdVbq-`G&Hjrp z_Y-dU$#NFr*F0&2e-5J*nBLFYHhbtSKD7GDM476hZnWUpZxzF7&S4m3TGUh~Cnffp zdKsOSiz>UH?WrZT8UcP@_bN)&s2n`&$jtB_A3727KUzIQ#xM+L1;uk_2Zcq6 z%^ne#TMDkNuBb5}bTUC55SiJY>$x5^+Qb8=2(K+<0#tV6G`WkUDELHq&~KX-48wrs z{N;~kitNi@fMRe|W?UW$?~{_Yik;(UGvV_`NZ>}QNoco#MUUF!A@-5AP?ZcijcVyH z6A7R-ahyY@c=aY=$K=H#!eXYHfZ;Tikr`ovu4ZWW{{W;Awn8*(5$v|ST8oy6C75lq+z$4@8Yvwm>Q*gLhhg}RATd6p zWenj@CLw~+RP6JpE0O~eOT3-vBYhn4c5v%G=7OMwrZMRh?V{9D6r56M*7TpN8LtNG zZ6;>vb{UA_kke)QRO&5uYe7`v3NyIV)1klVl2G<>-B=#ufm^HgTeetIglaltujLeR zVyAoQf<$99C3Roa7A4ebUZ&D!{4QAVyvKk~CPu?+bp>i04DtC2bvWgACehX=YJ!rsbL6aumVrrKgn3e2T#p-ZQBYX$t_?h` zO^|eI_4n39!Lgc*MAO%2>S}-+Y(FPVk~$lZF2cZZhS_|mq`4!@-Bfigs7g{omA4b_ ztW^{eLbWShJ}sK-70n7eMnyWS8-cu7i}x1o##Y%#Q+SYTYGJ)0WB^AFn#k2Te=pF* z7Aci!(-WDk^R;rYSna53Hg_Ht=G<+T%0fgMX~xv0c)Up>W3S~+V>(dPEE7wKJT+k3W<0E6irLZA#@m`@m_#V4q)R%&`mN(i8r z%tjY$Fof8UdN4i8Kjnh5a7O0bcR+Z~O!>pW`W-DSv03FoS}X`~>R5=?Nz z@SlA^Y{i@D%(ZPml*P?dlukKslE#VC@5m2^PE zuNe5#UL}g-c&+NOH(Xj;f*6A$!lvf>`AQUgFT`1zoKW!ikFb9K06O+ow-y0#1dMoN z@8L~9HHpJBEj}2AN$whCs74`3wu4~@={zn{uR;lJp^qI??xmM%vs-}Pn28_8;0@tUzcr*mmLUYzU@6Gow!>FY^~)L5wrkeX-y6*jIb2-7-#bt z4dxY8@g~AZ1Kwcv@a^A*Z5PD0Mx_osKU&j>WUVS56rLP^N{?TMiiCM(!|GCDb5m00 z5~9R6x`lyJZ8aPXXM3>Hn=Uv>RP*UZnI?AUdK*XDCuE-~Bn)aKG?ZRZs7fiJ62tG) z)m?nTecM@gSaK-L>=|m|DML$f0Qx7{Qf^+QxWQVM5VdgPYbw#Z1ch*EBt>1d5>qfL zfY{g$`fxYA7xsb8X+9u6)3T2;dk2CN6qBU!no&~Ww5A9eIO6iB%&N!tIG24X?S<6zETEk$U*o~PT>buJN}Rz5VCkTiWUEU7 zrL+=wsQf0m(#o+)w6%2eRj595nt2%P)3)c7p}auOpHgngw{!` za+M^{{)vehCE6L{+>Ew2Bpokq9^JcYg=#Ih2^=PhZIa{^g`x*da8MEkUwM=`15DtSvELFLd7QHTGMcatzgs05FqWlAd%M zo(nJ`h2#te)K3_I0h-ZCB$LC3MK~oZ$#-jNz;;CuS142z0mPcQw^YCt!7%w8uaxFyDgy#SfCrDaji1^Bgl?S{N_l334^T~Mzm9+lOj+#qjcm^917OG^Z*1_c$ zZ9Smz)PMwz0*`IOrxfv^%Cj$JTKSp&K_?*VCc;O@PiW@KRi_#?LR37(CUXu-!)1dv@<|Gq z#=J64S*M6gS1Znqakeoa*WxeF1-Ycg+Ob-MM;cn?5tn4Ero*Umy(Dl_?v`9irdEPL zMxle7+pyyHR^)>?^wrdb@D*Vl55&DuXY4|meK==4Qfx;luk%b#F2jvf3NcSiiiN~( zSV(MbpuLX`hGB4|;$(SJSWY#uS5LDZdat0#c*m#=i-z?3GA%4r_>N%1alu;H+bIpQ zIADj(1&F+eU*LGW5~zvmrZotlzt=yf4vb+v5YPUimRUM|n^ulYZxF(sR;0u8GOml| z(8aPy9h$+KGB5kr;Nx}g9S+9P8+s^{GqXdHkF99&o~v-^@p_4|Y&JY19LDd7;qg#N z(i%}Cn}g;tI$yEjHT$7Y60S8ym$Od{C@I8faB6pyt{~MS5(>z$*}juwZ?A=F(hk`a z`rS}S#tkKL7;u_|3kuCqJ?|w%wg3;?%B?Bewy2iMql{BiVU+I{=w`HvY-#1v=jq~Q zkuWK}M9R7;j#^fPsx*FBZ)wx-w>(wE<6hK$=)lvx1XKi=Yb6Rn0Y|-S9WF@bwMwUj zS;vfFOKJ=rhBsGAlx@R$l>w{(@IM|t+cCD9DkQ)YLLD6?79fI^>*7e^PcT!dMQIM} z5r1gq!h#416pbrXgB24R%J^k9D^yWp5lJciBEs^n8#&>W@JV^(COG0`W(-(%vV*S^y5LZN zVzleX0FW@Fmp)dds7ha(rKYNhMa+@iU0d0-fn$Cdx0e-^gA`hB$RkNKovF&k3ZT`P zZXh8arB}n;Fj&`H+s981BX=uNaD?GYn#>cYgdUne_NIw8LrOS9CF((>j4-N#=<~|MOf(@f;`n-`^vdEzZ*hnu zk*=C>gS89gdXDr~2lIyUXTLq(|O5`bu;n<7; z;I1m0nmInvkT_i&kU`hOn3I<86HPze>e8u!*Z9^*zmz(r^=%ycD^8xaA24_aN9lB`nN{{WTj=| zplX~pgK2qENRToD>~2Z1{J0;v+>p2Rwz$IzH?!V__i0MfHpKArrXHMi%N)s)e<04& zRAN-plEzBaWUHoR?|(aO)Z5#_Ot*S2E!W-Ll0hU%+9PEeW;?crlio7``>9W!>oVV0 zC@{>+mvMjPIQAK6nVgX+ja#tP5=XQjtB0A|*}t_)+F90vh7?beH*sUBwZTdF&;!&= z1F6`i4IKqWEZ@rTPcc$z2K_wZU*2-IW&+ke&H%GvdZg&u1~bNm*=5DB1o1Q)f-sFV4jBVpRf1CY_CpY9!hU+g|tLTP*JK*|5fj zVD%=84#Q)fmOk>~Gl8%NCfi8|a|6P+h06;IoB(DYX%``8+P%YnXYuXd3x~f$xnCX5z&n-AgRbEf$DJT zbirh;g`lg9&m}l{TlXEpmcE*RY&&uLm)Hm)q7yp=d%9^m*2^jda^!W7Z%3UXr_Okq z(P8)$R8-%~DZXJUDd4V^!y~1WYDh@|ZGQK^3K5vP;Y!oM9 z2OlhJE8^6+Br97@SuG3H?S@QBq5fV?rH{N6D;pE1bH!IT$+r*{r%~E^x}%M5&ei8! zFg0o5G26mP;pI_3teB;A&+}OHSggvjNlP^|K!P$^eZhDb-GK1oRxb*cLY5Y;kr5pm zDNF37v;rF_Oo5D(#~ch-Q%#HFf-0EkOBhIv7=OI!xZTSMcCG#ac=CgeI22G7haH|& zn&?}AQ>=hbK*yp86Yun_)mTO+PPDV)IYx;vwzSepDVV&^U@qD#hq&P_G0DL|F7)z6ou#Uy(~(FGA)D%f44xk3f6 zw~Tk}C1mK8D~8W}ZStip*j%`fg*pOaW@p*Jk2-L}vyKUo>SL=od*O9;P(4D;hBUHG zOl5y58Cqp0P))8rJj0Kv9Q>)~vi8V?_>?g?o(6iWD0>dd&vfP6gpzzIQ2<~z#B|Z@ z>3%!%cqKhP8JX(dnA5|VD(EStNLI;Wrb!qgJcL{eYkv-5)$P)od&D+iIQy$3TWj`8 zlP7MH&JP}-(FHo*fEBTrbJKT7Y%e#Du!9R_ALUGh& zjkJrV>W)y(7>o@DUWOV91vOMuxN5|vO|otZ!BhvoU;y~(#cH`zV^X98?xtBlSA{gi zi{>t!^+KmLVVDL)#4(t1#HN=K!A28O&_Oj+UST)b3&xEha0iIcXn2U+-652^r3`i+ zicY&^IpK(+OP+d5o3Lt%Djb&7b8AJVkTzQr7ZZZ#vn5HC4(l?4Da zoue5v<96j+Mkn1tPIKsXOwaU+xtlV}gkn@}S5Jml;7qbZQ9KCJ7l{U_N`e_r)5qA& zuG&~rc!LrT0BWw-h4__$97O||mqhb*47g<2UOf1^$rDmdM$@WH^pP+5RD0}F%lB~c zjO|iVu?CPmYNUW>pEBP|u^g?8K>3dW#^~^R_?j9FOAW54aSJT^uvI5v-ERK?1H^Zk zv#uf#;M5Mt_fwYblo>Jw9KMtEyB*89l7kiz)nLZ92a6NT2M}pwt)!X3KnS^ppt6M} zYzQX&4BzK(xYCdnvxICTF=Z+JjF2#cNxQJFqFd> zCM!WnJYuG}N}%njM>|BJt+YpANz`#Q!mln85*(S?)z2DHZDs+Tf{uL#LPz1Woj2BTvSdcwq>1#a?NfdvLmGdlsJyDse`9AdFyT~^5* zG{v(&^7CL{iE^QUR}Zi zh7pe139wF{bXE+X)Lx`y?BjvtTssGrmp$j23=VngaA=^%6`kg%i2Lxza6`tT!H;e! zzRT}c;u5J)9u&{rJp}di()Lo)8zle$J)fmND3fr}6o_Obvwj#URqNPI_7}IQ6F==a+Nw8X~7knNYW4l(hy2@DmLt@fT+PL*E(YH zS7NYNP~uV=+#0LTaWRHNy;49B>bRWFdMgC}pcCBQl1WmII1D6rvjyM%DRaDc`CCIG)T}dH~>Rn!FwN)D1aXeUU8;MulJAZW}I>reaM?G}%oi@S6 zMAeaF_=5^IFh>+pVU*C$M`co7LyLB9Cj77q?==`aFsB<&gd{*cBpHsZY0|2n8li~E zgw@N3Q$W_0TBe^9EUddg*-L$`b<+L5jy$xfM*|%CQ!99CJ7PKM_){WbIICh4*1Qz8 z^U8o#(qpt!O%p{YyS)uC(U-oXzY!$`K+hVo*3lpn5zu&7vkhid!mz4{acJ<0-!Tm| zRFso09aJrAt8R`$Y;-zn#F>TLEhEJq(Mm1tiBKs)I*iUTIqIZ(;&@$j6tZEo^jTVh zi~(A-c&uwG#zl!u^x=s;v_1Hr^MOgggpQ&xrWDc=@`*~6uC$+UnsLP{vTD@DJw7{# zVuQ;POdX8-R4-*<0BiV>$NvDCHWvw0kUagdTDxv_DLXh}5!KU8FN$C^(Z`A5zcD3T z4ps~=5m{Xp`uT@q``!Crh^{j*gq=K2+-i+mYb2aN7y?97N@7vXBjQGk!!MW~NigbJ zf!*KiNP81vK;sRppAaWd>cX@xP|-+M&KQnxrwMT^$yQ{eK4ntv^66<4&t1`-bQ>-A zoAJfnIJG23JOGi8T-KXPNme8Sw_!e*)5S&|Syp26FiKjfR?R4iDH=uv6alyk8+e~@ z99mFdl!Nc48F(`Z3OyKMhm}XgFjhs4+LBBOW&tVUkR*^W@dZKcq1TC>Lu8F7K|Wm6 z9u{U73R(d{)8|TTY{N%V#yv1UeIum7mPsI&&GQx{v%JiOi6-_87Uxa`a~sQaLbXic z8)#2qYWY#v0OL~Q!|Kv4?GO4gXy&3l_D1YA*MMy5)%LB5hk*p`Cllqyg*j#GF1i6a zmN;z{%99MEqMAWZTp^sVoRlOs;9T5TkaX7kif&Tc%2GjIF(ZaMI_f}a)i)tNA3q_F zk*CTW6B(L6)5f!vJCY#BRb#D*9gVwj7H@55Y*ba@QP?R~UvqJAN!kJCDb&%yBfyg^ zO%z0|9$sBY z6v`14TfWG^K04pO1=$6iqbJAr9uYJZ%-(VE0%Rl|wZvg^z&yp3rIt`ca7N6rn+x9Z zc0UeC7MW_)=j@K3Gq>R$+6Q0VAt)h3RPBLJ6w_8m5HexvtnvPeqB+%DeY6|i;C%;! zhC|Lal$K>fh&w1CSz)rWmB8yB?z)lu+8AP@Tvrj28KYC?Ls4mDRxB3kRSn0$_;66= z<96D6)e|FRarRM5HcPn0DNls(r|RF3#O0)#I=XsD5rnSwG^R=1TK5JYEp;BwDel<1 zf{4SOaHQle-B^R^v)NM`1V}0>X{r(uK{V9SKn(H)8Ws|{+kez>!AuaHPzeII+Pp#) z0-7Vj@#@N0s)inmDOMEl%oKwmxdUib_FUhLEel&SrVmEiyJ4iL=~f(gQX3t>W5W`9 zynh#tixF7-&*ju%y!7AU{tZr~Urq!wD_-d(Pq;On0)!cJ^0J28@DFtd&XiS%>bQRT zfD-jJEJpne*>thE8tcf1dULi$xK4tR(3ds7I$M!l)s4-|LDx(Ul5zG>*3#rK?t~0aR?3+OYNHehPKY1h4KXarJ|KATiuWZ$ zK&>nEqsFs!Ii<3KXWdT0CS@rJ5=jUE2!T*7p}wCU2b^s|GqPymx7o%)@-@LQ>ekxz z3Q{0J-p_w`>h^IlaYG22T$dWr0OL-yv@P5bG_iboo0?-0ac#-CxO zs&i>21yo$H0?TbYc*?|?rc|yVO;4d(Km~x7CHJH)_V;*c#E&+hbq%zokeKldDoiU+ zAehHO(b(-1l?0oM{{T-IP%0pFRoYS#pAOpR@=@*I<+khY^CcHOyMTFq?J62b#;oPS z;(BUSYLU3}RbodVOCui2dl7GM8A1>VF)AD>rYk;6<%|{vS^$LbYHprH$GW zQvy#OifqJjxT=QOtD>qyapi_8Ng4sC?d4iXK#jC_YivN(r)4Fp=p?CBb!e#M-S(LT z%-;`qTes20XmSaoL^hEaNv0X_%4$%LENJqsjL{-$JZ=|h=A3hFEN!P0I_ZkOd^Vk= z+vUeE*-M2a-~Cc_&z4qR$J7@<7Wz0nLCJ7EpfX)Cjq2T02{CzbN}`B`fV zAhcqjvxKqnBa1EV$WYdBs<`1=AvhkIJ{fC1^t&%-JQez9j?3xO2g@raV@z)!tEM=M z*?hl|hO-Mwh)I=X+q!sI4-=;Zh899w9C+bKh#+i}?yCUlcdZ_S3Y;et!te~$m@teg zu6m4XG}XgNiP8$%k+jbxX!%tP#NT8w52KDcSBiB>9Ps>Vr2vUggF%nu6G2N?(cHDy z90RO*f+|eMhe3qlI3+TiV=qlDfLNMPn<*HeyC;~$oo(NO%$;R+s~~}-duXcc5U`Li ztxfB&Y+o0q#j!e?x=c>H4~*5c6QeV5B! z?$TdNYvau+BpDkkNj@A#G`zq?WVA|an{zkV+iQ?KPYrDpV9v_cwF6VCa%@=InQ2mQ z<}OtEz2j@xZBcCvxp(aXgz=_()h0&KS(l`BfKM%+p()%#e2ea8+qkF)wY*Oeeo4X% z4V6`0B`OrW%UN7yNU$8yNk-A)Wtc}MB1e}hu7_a&kVlE)^OqGelNi%$x+IWJ0*$6# z#3}KLYIdrYB4r8Wh@5$h@Aj{8*c0{e?$0fdB++ZvkYPJ1=N+Ph2&SGTr&^dRcSc#s z8yKmxSo=Mrhil6mX&xm^(o0;xIPM|{G+o~UpscBmD(lB1p;^#|vN!xoT?YUzvJ~kA zV?uEAnk6QbxO~-^H87)t%EU0RJfud^l~@3RKz+a1h4CHvcAQg0h>=uWEY8D5OCxT< z+8LG^K1!4WGQ0Vb!lic~bn3`_HkArZ%Y>|`Jd_R`$*sshnc zWYd<-tlWbhl(VUllN-irU`@3&_(zf=Hu7324YwG|YAkfI@508elIpvpfC6aEZK^DwmRr|^4w|jALDlHmVuE^H`H5%k zuLW8%#uX|Ao^%B7@Y?I_93r;OUbtScWji95yJ(ggDNy;)otJQ0T(XT-i2(S7;Lj^7 z2@}h0ZScR*!A?__h8rq4^ic)#tGG}J9vaUlR&y>_tf0&}V;88)n9c(-{{SP!uqiyb z>BjKN2|m-I_#P5(g{_`xXjpj?j*hX$k1`8~>~3|mpa|Reb=J#1kNsz5UsPQM&KN#! z&9u1#so5_Cn-a$xHA;}d6pIOlJVpxHm12@D;{L*PB=F6b)>a9_8T6u? zfRG4IG+(Q7M^Cz<_~t2sq?vE1*nDOG0F%@&%gqI3BKwsH8-XLj{1z(0()vg`Tu7k5 z=C17ymwZi{Y{0s~o$_`oS%q~o0%SOHq9t6`S13RVRlducYir+|wa=~?QA_E-4Yg`xvb?WJE#x`~NZWAzwTdBoC#1EivHPghc&^i?r3l9Oq2SxEzlvW(Sgd^Yl- zCu!qMX8CXpg)$`0$`5L~yIYw$haO!SavI#dS6Ln%hsMjP1-6G<>#g|h-gbpucS#a} zV!d^ht*cVzs8AGau`ZeB9JNaxEs-&%hMrYcslgqYfFNGt{l)nsCdT5~Ahvex6c=v! z52yC25k!Mc;1a5*HU&3Ih6i&e;4D)=HQz(%(#@c>%fV$*osLez%@gT|k_%34~I zw$-HcSO3@63l+_{&NU=Bgmf@rSc#A9Rx1^7J2Is*AW%Lw9O-spon5&OHWJeMQGN$JPVp{@&4h+->? zt~*;Nn!*_u52ul1jSiM=n-u`~l6YBdke>HapOCJZkC61EZ|1PjPvyc2c48)aq|QA% zDNr(=9f!h|lV)i#8hJNE6ca`6k1U()dXNT(O*m?dxt5#M2X-UNm!LHxJ?l~l%#juZQwNLjn#v15RE#A@8&5(EZsWX z!irB%O?8Xo_{3DIGM)(#ilnHXqL$JsC^!36E}gx*M;g7lxIjp05|gyy^CFyVT9v3i z=9MR6O#c9&3i#J!565u`3eA{Wk?{rJQ-{{pQYd!p7=Yw0yNhvRgN~4r?v(+=iJu`f zo$G`&3j3h8%tQrj^#UqAJ|BVMHD-?(%`{boqH2jD!VJ}Q0pn7_(goVBa0Z%iwVUgE zeO>dZ2~UWb#(HPvMnf)f^o1hbM4SMC1aw6E%{EqbCW8W~-GJfq*2x2fo zl_WRz(yJf!aa~D@Jv9ypm|8qcaU3fZaq`nzMs<=FT|+54?&ECk`)h20)9Y1MV3dgA zL9S%=FI$}{=x`XZ3M@Hpt%}vsv>~Q_-eHVy(SZWsoevf#BPmf)1l3LA7MyA41M9vu zhGq<_RfQ~gHfD-g)iKHw97duzTzPRjMP&$Wa=s^zeoI0XrxcNhrkZg9MmErp-mm(- zhS$(zxJEZmLxGc&a z6ZMswF?#4E%s5ItRRz9oSP)B3TLMlQK4K&bxsWkXz=j^%a)T~FP}2Za{{T8C5sgo) z^`)7yEX5|psiKY+T52gMS-|p?*#oC%e`|R0rmPSVff0#=@~i%KOaNw?>wQ_~y4u`D z=f|;JH{~6pk_eoMYAMjQq>}eKw&AdQ2QT?$6bK>@RcWWw1QCd+kLqs{%n`Sjn=xD` zFsnrs9Mn}!6H5e)714qBR0O&0#O>vZX+cm3@~2SyYXl9ntB1$GaIV3awHaSFUX zo=G5w5*T|_c}=i}MwUImmqu40i|MBeyM>hn=z+$pqS5e^Ca__+#!;qR5!5QIkGFdZ)KDS@+YAA5` z%TZ!kQaUpRCY00|+loY~dZaQTixG;a^m4`SDONzFf=$OM-2i1^5I9CLw}&1y+m?tQ zZqAxq8U8{DC1({nM=Ous-2dh#jcE^Jvi;gR`+%lsU&+N zqYf17F0@JE9U_xB->Hhs*#wwoTf?idQ4QdaA4-U+rlm5S%qmhuQzVE2t(2VrJH4g% z7K)!5qCJP^DN|}pz(^*7eBv`6Ekz2hX-s1WfF>mrH8hJIPo9mx%#r6Lw$@tV z-O5m9DTR5~+_hTq+9@k8XUke%Bswi6pmT71dQ+YDVD%PXQTO@6uTgKPX@1=mYyiV5#Z;Q00%LK|g#Az9uzXyPurN|L76weCHfUX`+@yQQxX zK^;ednn1ReRWadEj&P>#h-Z9{((3#}BVm&@76+a-Pfa~+m17gat7Bpn>6j>1jQy4t z+%>-pvqmnike4UKk0>Lc_0oGZHp_(rVfN99&KRvdHAs^cm2r8alsmyAnLco>Fp*TV zTXuupy}N7MfvYCevT%+xe}3pnK%JCRGj=myissp*!&#~)*czq*8bnpR5N#R|x^tkv zPF5F|*;z>OB%B9V9BU2xY`(R2PIJv9|2RX>_lrHIA4GRmg2gEcnu{uzkQ+3jAtJL8@viY3tyWhN5qfjj7R@2-42h9~0xv z3$(Zi@j>Vm&CRlr2~kc2K_s7Or1#8Hz|lurjpDH^mkEbYBvOQ5`3}Q`h)88b6|cp> z#uTw-S13K$LDRH}6XjVcvt>?Ls`;EF5uXofPnPn{Q5y*qfW(yCb-!*jGlOclnbpE( zeJbNGUD`CEI45b+N573K=reY5#M$aFn%Yu%giY_oOZw-M~Cr8#QAUv{mX zIQ(hmmjTTNpLR1DSk#GCFtkQ+2sa`#-%k!1BF*0H>%ephF1Shw@M33bu=mu=J!v2n zRWx z{Mg#cE$z3%mS*!(U_nsos^3~@QBVXV#~BqGT*Xl(V4*XbI*O?8<}wD2K(dc41v;B+ z8fa_N#>Aa#-&JToJUUv-M{hLt6%6h;?~+I z8+A_2f|I!1t6G3b8)ALcz{gh6v;P2BW2c5;`U!B_!j%!7-Xc+B_1}qHZPcF24Unu+lXhUBZ;NRl-(5LZTv}Q<#Z@bn1nLE2 z&l>g}kK*#uM@JrrbyEbt*ad*Hra6qJpX3 zJU~b%_n?}nLeB`@a=Yaw$~zl(9FB*_R1^JwSC~G0DyZqGuv0vai8Vl}m8Y{P zAp5dg^5JXH%phsUu40!skY!S$BWF!PhMgWb>g!UKHcPRn=;ac-?YS*$D7YGd!1=I4 zcL;q;8bCO0^`kZ}mRasX4z)%k;5^MmYHGTb`Q((Wer1|kw^WT4E>vtyZU+f-wK00% zdgEtE**gjHot~6$mp1L9l7`d?J7Rt{hkV~E^$A%CZ5oy#w1Nl*!Q2b|9vleYKVX}5 zq%TW^{h=`fo-`t{YVW^Npp?hlOv&mPK*F(%MaUI1)(R?y{Tro2I7r)Xakk*{GIGf% zE7moR7~?|mezH;$V!4b~SwlxlQ$;ipS-kgoH8;5+1w18k*|oWXND)Sey~FMkN+Sjx zEj%?1Mt05r03$IHRZYdfyQbUQLU>rp7TcsC%yjd@l`Z5gv8q5l6yj>I$~C7%A~{)D zBZ&gCgQbCMgWtgMg)rlQ;XL$pQ>AUT02p($(*?sO6d1)`B^=S+QX<8X;FOdJ{w~)X5e(Uuk^-cu9k8-!4aUORq{YXrIWi+G+!xy8%D2~Lm@4iUD03La%)qDq2^By6YX z>DHZuQooR1#UrN|Hagno{0Yf(#sq=W_)*FNKnXKX)U@eNs{4aR$8$$7yvj)S{C%7z z%x)010F;eGqn;FE)xMwO!1V2`>l;Wz%7jD(;fN1(-{EiL#A-rD0-aUBNR?oEMLE+? z8^Gu#M2SF+i1!?k+premSJ_ZIX;Lpx1RQ$nH5yVKvCbXVQa{SMCn+rL>ecWHHpFU1Zcnk z{{RAgyl1>ZK!J@q({3p^oGXgW6Z^k1-!j7N>V=JmjqhMM>f9iM6x%j|Sn7)PLZ;Z` zhiqtAO43vk<&C@RYXS80$WX+QCa(3FA~qj&bu85IPY;L z0DD)r-N!q{jiR=^!4ZIyQCBR%R?QpN>|M4FRBhAa;eU@E14x>2tnppo6!}A!Fxp8x zw24|`30WKl2ik52C}~PFRSbBF%Ts~WHRXe2Mw&50#DFO=SC(48 zbgLW2%XPb0oxoeLwug@t^qeV8wtPv)Z6&HYd0N#jsOdX0+VWU{JNMF#m;V5e zJ|dZ~2CK~Ml=40SCzxW63ef6Rq+9#xZP#C;hPL-fKX8hot^p?;C{Fa(j^ zNPnncI^r2xoI$ogRI0hAsykn4Hz13i9AyeiiqkMD9n1Pt2~7N|$bD$~vgl4~^xu2_G^-$?qQg=WaJqE*w^WhJU37Qy9?;gYJJLwLoFl=zM!%dA{% zcZp1=@uqGaLyCe-imv)RjfKP^_5l6K4OD7{oC`-D6U0P z0&x^2Vbp1cVe|=Agcv4uGfJ|}_SB6Cv1@SB$*_Pm1C3D$E&~{zihIlX?-#)$n+Bn# zih6nFT9G867LCBMD#~syJB}H*bEK5%!nbo|k^+f}6!2`-MTp_}gs()z zeVa&W0d?9Ck}f{(Eoe3pOk+it4N8C zIYnJEmckXboi@t^n81yckjC@Ob=c%oxjv}VVHENTAD*2`>gQ_6pd0NzH{n)gn;bI8)8QD#H9b*{Edw;Js{5DSDje9~+7B!Fi-MqqsEMt9&$}ci z*UE;tXeX9xDtdahO1T0?%P1Dm1FeAu{{UAF6Kv@K4-ur_mfFA2U+wJyy zI4;R2x$B_*W2;Y!dE`F&Fko&CfGd9<2lryN-dmLmzhz#6H(n)nV* z-RLMFDn&;qws0B@r^K3XJ8W^(mEFDuOR*;88;v*;xqVvnqzR*@Qe#oZj~}M)uw@J< zq5XL1&U?ctmle)96G{B52pv0szS6fU=KcG3&^0Bc zf}zpSM_ox4l~f)?A#G8FLap%O4!1_l!>o+8uz^K?322FW#-R6x=r`h@YKN7NKj^Zn#=;i819t zeMU&k*$%X4ewSu>a~?|$YNWw%_Y6fmV2~sSMqAu9#qGietS+ryOSY@j*x^R)-r8J% z-NJOaI3B*LQ(>J=%lYn-pkcmV8(g4CiD6Z{Oe;{M{mq}-;0G2zow1lTq+qeM8cv+O z%Oy&NsrJ(zHPMVSG-0*1_(X<_HB|v#@k~#bFwsJ~oh_z>Yr<4++HM*v*yj%_UHza^ z76Ollpil>j^h-Qt%ufx+s&ICyte_vvY3eYtW!TZSgsp)kNFF>PR_on1d%~Y##|k-X zJGXhViGXm&u7r%Y)okmBRZ94?$%xWMtvoPd5PYk%9ZiW*p!&xId4BL+p~RJpB8zVF zn{_t{@Q`?XX>Et~!zg31d6Qw!NmEBCNonWURf}jD75ddlcb7bc z1pUPEum8~3_LHU9B@Dt!>Nzo3BrAC;7D!ZpLA!1j1K>H=2KRo?Occr5W^t}@Kb&2x zgR4S7*aknolyhZ@+yKG+x|%wAYPkto{3eh@(Y$^P(*f7SY(M$RR5e6po(hV2Q6pdu)+9x)qr~ww!|iv#yD1}PIAg~OQE6`B zY=#O**he0%w8@EJxPCWRO;#}pq`2}-lcZ8Yv4Ld^xEmh}aGvicVb@CasDtdK`F>+? z%Va5_4%h_EHw>YL(Mv64d1T_zqVAST)|NX+q#nvZ`T*l8-TPJvT3RYSB%QS5dHv(5 zhaA$GA`$_>)Z98uLlSnLJOGGk!G-YX>Dghj_hxt0+v(!gu3UQ<)HZCS?^~wa=?a50 zIoEpHXM|d-e{(%cd7E%Yqh^dfL~6;-%b~oGRDrOBS{-^Gd&U4M+jpw zbZBj5X;v{3BdpMI*IYv#$~jhgJRcih$mk`anG3Vfv=1%NozB8dg693X0CwY_ zCHpq5*^m@ujyP-;E%sHC7Vp;U(FjZkI8Rj+8Vu!`aI-)2>?*#Nj}RfHGDAHve=KbQ z9#v7G_8wx5)?WYtfvl9azX})`r)u28Ly6(SXrhujbq^#_y-LX>TSDBAHuksYeio+P z#Hv12xlSaEcGoZK#vA;ug*>{KofK1}z^w}!jc%aS*6F4b;Bs>@VOvhW=EMy zrqR0LyO2j=#VuW}t+Hy0XIznlP=EZV6Qi1B`PnJ|0C^goqzmow_grhkb99oBVv0*; zCmNrJV)?d?qDbMz=W}xLdGf5oa3zMW#>Z2}oNcASQfbsW;&BF*l-avKH5im*QhCho z5*Z_muHY^9ybiobt6O>daPy@}eZV&3PL#bz&5r~NC2)?SiIpl!rZFZwpRZt%3lh<1=iD zQO23^o~lqw4D}RIByvkHn^8*JBbh)ttWFG_{ilj7@}igu;0$%sPG*xN0gXVLuDP=RuwyHZs=GUeC@W{MURco z3-hZ-P<$!F%8fGHq%06f^`V#g*7%JK3q?@|As!PTL~{v3U04=YL+?I{Z*gP72dxTK zs}VG6o0No(stR#z)tE4RZcJibl$2OZ?eaABMZ|N#Db>uWakTgL0mC+}8gZ~AQiu|0 zrTc}UdWI43tZ$p?=R4@}OfDgbPAKMzj zn{Ept5;jL3iqBbzWYCo~HQ5rU=G`X#XmK_w!U=U(g{KnP#f7c$s}~zR>{Il`THpnk#a-;2UHAcs;4AiRK^pA z;uw}AhR3hW;Pvvtk}d6YA`)y5i5zs;ce-We1Gf#7${%qUme4@M5GUDCDsrwB6w$>V zYOjIpwE}Ed)U(E|auiB@yPJ6J*8H;V+nqh>)HZPa4HPK~AgDBw6mh_EDtxfx*+#Dk ztf8-m9Hnq#S*M6Kl0Zh~L^onQ8M$TaX-Pm5l~W8f*$UJkr({yq9a2SpX%Tu-k4SepBRMgNI7)GUocLUwi zPjzj?-Q#LXl#!1<%2NH3QU{9;zCL|5BNEB&QYxI|bhu48kNK2TwNkupQAoAff%m<; zey%k2x^xM{%bX~QM5Oo(`O;MM=N5JqbahnquP_1}LaP#46Z~PLh$%MLu^cOM(3Q$$ z7{W~`H*PkRrvr|_I7b|7ygvr6jE<1Q-UE7}Tau$ESxAuz57 z_taN~>MBEGkVqs8V8YbunrZ$}Sns{Z(aja5#Q!fdiwUsq}~;SSK^g2BsdJ`<=LZp*`hKw z&jP^>@4_bXfT5`^kWUjv#`S4QNnF6#I)4gO)M54U(#;$*&m@#ZZwz$n0TS<*a_z?4txF(2(`$E>Z@Zd`7^=!D1mpmii_|Sot67y$LF|0J00#i21YM9}QOUL9@ zFmziB_BwqBlzTK;M~F%4ry0uuScP!eHHusaR8+{-n5`XTEwm*eF|2Y}{{V;$E%4&| zJjK55JGDvTHjm%Nimq~{7c~`+DZ)6`Y4WyS3MQ78hACpR5exZ@;2mrY^x{h^)Dxj| z=g~?oY*G`gAQfR!o{KLilWd5Hpa|(3aSB1#Xuo@F+k&fX+V{f*$k<8v(M|qeVyg zikb#*(k;6k0Jjvec-mLs60Rnbx`lUSIH*bB0FHX=1~X4gX<%6@5w`x4MUE>;9_?5b zVlQLch%>DVN|aO}^-|2WKmY)eJ@k-VE;L)L;iGmkM$AdDy@9d00RFb*IlTG9cJQ4= zMEkvHbCR&9WWZ0)&~d5p;dE&x)-lCvUP$gsH-NDxzapozM-p}Y$R}k;Y_i!J4~0>; z^r;Dl$pnr12dPO8_Nqn~LVO5vI7a?pm3^mARV`_k5=R;8sc}=0V3RAzjkJlQw!*sj z9Y@E7>wEwR5j0?ncNC+JG~@F!!=$w_Iw&fFFn-^&$G;M()SZHtHEv2oflcPR8PoKL zvbno&RRjXQ8WFG6%Q$0YRd(oK+&0%5YUXI&L~9z}?BtduMw*Lh#@2|&DQXbf0P3#S z8Jo#;^N^zu7&!vN;05@|wa0Bdn}Hq_NURl=vO6qsIcVG!c>x*_ds~g6t(`QZz3`fB zYTAleV~z1tq)7}9u{Boj8%hZSWp`1I)=6U_FsLDt zHyb;9eW#u%Z3hEMmZIh()1sUg5l8?MpDyEm;6V3}VaAfyqtjLl5STbqt{;lS0rMr7 zU8T0Sdw5-bHszMsFd~^ntU{^HGa4jOOD5WNxke5{clL2OU>Orj^`JyzYE$A4=^T(a z3oBeYAOMd2hhxnpy()rco=TD{!x3h=cMbp{bpqnr0z3%i<85tHP6E04u>hkHEk=!8&uB>a>?$?A`--lcDUE+=H_swTuPR9ijN78<0WfBDnlhBLb~mWjfY}AfNOqIaKe=#N?9gx>ZFD+ z_&iu`PA7}i(?Kk2Wi0LI5HG5$3td3g+*xgGRtW~27Xh`2PBc(+Fvk|V`OXQ6LtjiK z4Mw|c>#gl@2Axm1HHqnNnus}Y_VwlGx28b0s$Z{KLI-bLcl)jCWaOT4}NI(h`6prI8+oxfpd(*1=e94#aOkopzm2~P%MebNBwa+As zp~jTbRTxk*`D;m1MiEGy_aLaRuK8KnYRd>?RKJ+2Yzm*E#@r2O>k7Cj9Ta<)qf00; zLw-%hTN9z8jiE@f>KGFaXxwRE5L@n#!_6KehuIUsY>hZ6KKd@ZWeHgaVaLj|;y7%W zm3(odG*e2Uqq7@DBArI3Lwo7phR>a6XN#hdvHLNOBc`74+(#SA*o{=!gvOr^^4aP! z#ffSa*hF?+SP@`!HykRvdE_DpqYF0KQiN?a*Qeac79);l>>9Q;nmCBATvVuyq6_jFr>k zIc@sOg4+;e0Z(q#@|wz&qlQ?4-UvI(sTzT0JcccDu+z!X zOvs_N-O7~8un`nIMO8FZ)4@dqQb9_0`RQ>b&X=(|^Cs=xQUD3r9<>ai0G}V-NX#P! ztE@??s_9iK43ksF)bD4u;z8F#Yj@&HdtUWwQlt?}-+UoK0|86CrG=HucugZfj87kzc7_8A%sY}H0#$k}Yqk9!&+m1A_ zkp~bkG~%9YV3%DdhkXr1&u$6foHU3DT&Jst>wQvZ+W?XC9C! zNy2)sNk}KaI+KJw62lV3S6?bLl+K}j?=Uirej?-9!)r|8X(bl|aOlE{p2{`&nOQT! zX~wq#S|p>+9U_)0x*wM-Rg_|KDJyAZzUGWRyT@xD{8-8e9p$*${5?H1!>BW6$$6$Dv-+)y5l*sH4|{O)2qx#*z(#Jxld`sDw>XZv7TDZf zHbP#7r2haA;Z6Bdulb(@#i_82^F~G=13`vKUeI|{Q>qPv-XCmOcJJ}xJhOZqVPUru z;U5l~renKES_;}Qq3@%e{dmc>>4n!}nW~EtqKtgCPnSGs>*hws-d8H#7VXJF%jVQU zO43q!#(~Y+INNI0k`ytX7*q_&fMvYJhCMxY7?_P7-SqB-Rn+&eVXn9B#{U46FzCcc z;Tvh><}RgsV4j*OdGn*sLs>^vf-6$cI!7c?Og6k`P}dD_07n7W*;z-40!Kli1(s#0 z9xP5Z|Iwn1PZO=crCMr`s^&n)4Z~$g=bB9o#gK(n1omOhyj)&GfEj_q@vd#M)1D=u z4SY$!c?t>h9y6LT%w;NTvn0553SH@_3r55-Vs6_(1fPxg7hSdLt^$Y!kMjq-j zW$QGwKw!F~w3+$v>K@7vvi&YL)Ulv*IN_LSXrIQ-xF=Ir6LH#p-J4u~%TlzG7OcctG%pY1lIX@fL3y@TH0PSfcx&!SkuM`qp zQc{cuP>J=5MAAssrO6mzRU_F_+Mh3AQ&X$vs$2)ndF7N>nwpA`t#&R<%<8VhTGrEE zG`Co`R)sDHS)YhB`o+c5Wl2Pxm;o4U*&gao<_z(c4Fvgmi#I+uKv7v*IAN!@y6xWg zBz=EJ3$OFbC9( zgR{vN3u|{5;Bj}BfwEB6wGJvGdGe#xGj*>)dqNNJwg}nQW7lm4Iqwq2YpH1}aN63G zo}7rp)fE*Jv^E44Lq36RU;r9&72o?_@kmo*6*DHKEz!)D-k`Dw^!ZS0pE`}z9-!rn zGbQJ2()j*f!N`0%mZFk@9yd+ktDWzeh=!Pc5k-dU6>5-LutL9w_7#A)r} za@Y#M()P1;0jgvgaGH{8+LJ%aq7Y1;O**)Z0q_Fn>fh)yL2HH} z_wVi3n_EEfs|@6)g=CipoR+W6E@~KJrU?T%%_0yLNNGGn6t1>e)#+NeYEY&^{1`|Zh)&5$aico!y3 zHdEkJ!X2W5D4yk%m?Lo40{09J#m8gILt+dHySdATal*Qw$>{{)c(9e%YLs?HWX*9LAY3kxdhDIRhL5Knmx7);j%GwGM!i_(*i-`HsfM)E|6;>Hk z@GP}hY=0~yX=hZO6j?;68>+RwBVIVpRcYOj6SkUjdLAm39BN(>p7Smn{NNlX2*e`B zsoxK(rpB<^dWh9r4|*jbe70ZRWw8gh3@xqLZM`}H_Q;~^&ERpxVo1k@DHExAsd7_f zYJ3|AhYK@`<6QJUePx7Y1WPST$+kP4)&lnzzZ2YBu(tw|*aUGCR~fT~1scp#di4vc zFO?w3@aL>f45>^RS1wT0yc9NLxZXI5gK}@L9z%W@JrPN+vtz1Aiiv?`>ENeox#|{4 zqXtr~y`A*8y=&>ObHPSmVx%-kj5tts{{S@#SiqhW(L=Ufrjmvl39$-Ts%U~RmIZ+v z1u94<-BaJWz4<=wq@|QC-~tHt8i%)y)UCxOzq}kV;l_kKGv%l}kCeql2h3jn^wjkC z-b&wZmM%Vzuagy}!&cf-g@km1NScluV&&B-OF>vWA~5&Rv4vx)jM2*Vxrs4aigL7q z3dV6DAd_^G>L+#+++U*N$9bsNn8CJY?Af!*NaE7-Y!RunK!Pg3;mlB(0E*Y6r zypl{#jd+1jeV5};7gBsWWaH6ITU{e(h^MMQO79k}%rY`nR^_;4@(2>X8BrF(#mhID z{!9UWz=OsVRjNuo0o(hj)~~`f4RE2v@EuV!uEXG}#wcmtv!^uh;Q`0#D5|3p$!`Ww z9B57M4;41j6hKpP&lshs%vA|-3MbI>?yR`YK1jmSI=XtQ+^oAz9Ml*r%~R%X*J(i+ zRk8SvG+Sjbb-?Jxn>S9ZoMv&y)Ttyfcq(|Hkl$#AUB0yVoE6UUJ1bLAUYnxVFGT?wK!r-R_k|%{GYjWOHp{0Tf3kd2J zMM6OvPVq3;^jp2{J{(Bu8%YTR4iuRRbtOco%}0w3S^-+vHA~`^H6c96>!Dx*?^|9s z`0|J)2pTq>wQUJ55@I6>t`${vaWXXH`E6XAj8f1+8C#Qf2TynFMf@jcQMrQN)v|MHdD5Cg)I$a4PJ-$);gS5Fx7dm#UGNg0TaV2%eiAF_Y91@0cPdr z-X?UWah;Sav1FkEFba)N!tuAGa}3ij%*MOKPO_HN4{r$W^5itp14^Xu>7!dr z#nSeYjS3u2!}`ZGLXn>fsI)7eE680-wFKw^Kn$Zcf^4St_vq04-(9^mq+E z{VELNp%e`WUvLrh9xZ*=6ta}8tbx(T;Yb>3X-1-ukv`gSfK*jhd6;z!7?Q=XoJLu0 zTYbPP1Zp;mYjRGUal53U3QNXy4jd^e?A%j;p+9Np+rp<$x{S#VBag(MSLw|(5i2`L zdnh4x?Y*_X6|?}S$AaHoo_l&e%MhX}b2M($VyKcjTHzE!o9ka%8=&Taw{OBQ4kS-IxK{@%U>BC62 zu@7y6-$5sW9Ituv7e^uvr>5N81?nkIrEKmbZtuoN@3KbnPsVwqKyj;EXPCl z4kNw2x>CYW0GvVCKU%%EUdT$x1Y-nkrWD1EYShJ9T@YBVjL{!|aMu0xUJ_qimrBEq zB*)qT+eWroY8fdlpy}dx)NkgL@TbgPMo>&ph}!L8Z9w*G$j>)#^G>M%V}y14C|{NY zG&{m}a34DKbu|WK=T9LvEaYu60K-B6J^=8OHOdg!N#@CkckpWU)^|RxFv0U*E}g(5FnkDY2T`F1JmbO+M!`W z?*a0c-eQb4lHSA0ArhmFDN}Sjmnk9w4_yQb4n_FNoK7bMfY+%uGkVo-Krl z8%-wX#xUt5Ff}bjJiA14R2!Y&5pLH5j*E3f$ckZCE#rZnnrr@ZgG;_BC}y1_{U4bq zdmp<_I9hNhl0|8^TOtZ(qf3ZW$Q5X)%wj!5!W(yM5p$;;O8^g)6F(56&ZktmPBPQ8F;r8KUw1o zY!QuICJqC`@2N|U2Z+=phuk+|D{WFwu~c8U_VdnK} zCUhS2Wgzo5c$mixO(khE(@B4rgf-m>zS2Oq)yGnirx8t})x|Gbcv4#&d_yycrmPC8 zR8QwS*r~RbPp|9c*l;6dEN0Ee6ioThWsJj&Qd3PyPd}RsJDukV!;ijXyU8| z-Jy!(-i&*_ym<2tX;?E?P)d^{Z5iI0P;~)oDC;w}od^-P3QfI2 z!-3(nl`svaUz}QiF6bM;dq+H#2_|u;lt!We;Y?N2)4N0|qBLtye=<2qyElE;`0~oq za5bbP1dMT|r7d(A)HJb~0?iTykxhd&xLlC}DR{ZU2*QrONo+2IAZ4ebno2sFh=nYE zMbVwXu>g^4+;UFI$xWb8IGPAv34Dl=HQgfBVp7M7)~}sg8k%_}Dyt-cbiKv55G`@z z!hFT0sm)5G+W>Juj=w8}gpiR!K3l1(r+DaLNRmS`S5Q_@_MoUj8BMkKPh-XT63SFS z*|v*w6d`b~J19!Q1R1LlsA~FOlWNBDF`-DtGbKuB9wH5ffFF+srR2OoCJEYTi7l;_ zVaTC7`6XTz8+p@=YYVA`)Xv*?zq(OYbppkOj-Y)!V*IVPR5qm3wmsX90+X=gL9a*f zS}1C>t|+j4t!+f@hRqD1l#&$(?2)$C-~)473R9|~F7mjL;s-#&jFc?chFG5kSNTby zSn6QI>k-`5s0VtZcruF>x#8CN>NcnV7~@HIY2=4gOk?Rl#%-VZbaXP%tKk@o3}mIP zsZg=RV_m~>EC>g&)7!{C?(DQevD7L>X4wv;gRs)a10=HGpE#icSmL*U;o9BUB4UHhH3eEz6tAnrY3lIP zh*3&jmuQ8m2rB9?s+;)VgcoWpsX!bJ2yL7yND3amr3G1%w-1Ld;<%kW^V2mPEY)~= zub6f*DsA+RD9Y{tf$K=~6sk_nqeX`&V;BYvSNz0MPl!}uRi)|SrBd@%K`1P$$iq^d z&krqb?Ubb{V5ou!{33|%tX?F5K-4ibqRx0$DMcF^EHSa{GZuM#{2HS2${?1^B!~b? z0qj0Jb2C!Jts+9WbyM~WX-5;Jfrkuf9f$nZUaG)xyp@Pz7+xVDYGlFj_=uvbNLcQR zf7wPlfDexfGPj&WEeas){VD78CDGlw0*6-6dTDE#bAL>Fh(DKN)MAergeUq0qpeGX>97pQqK0|fYu5&>CG zBm|E%+UCIM!-!L08)s6{tsPrw_O65O9_?Bj_fin_FD=i2{{UDrPH$LH!?%`;Ii;Mv@fc3-IlOONMZv;c>I8jB)Fz7`$Ce$`jI4)792vQPBj%Uat<` z(6nx$g~_<>;l~wwcE|}5e5*)e+36~Y>!-R7xmDxT)6wD>ca=Rdh#L`$?EA`YOlM00 z?BR?ihLds}f`k9!xHa62mZLtd3SIJ~1_I6%)IIsPZah zxMA25LZ5FLe$k+#r4TmAi9XX$hn_A8c=SXP2eOmdkEU2fT0E7M5@3w6inCKuRTWG7 z&PTpz+sj2g^*U+o;bq?MTRxCnl#$kb_{BWVd3wV1vY@bfzZ`xuLDg0bmU8tWmkGy? zCp(b@uu)E2!{SM37H@Q)8}PYS)@-CD4iYCFCP%tx*-vIJx#LPeJV^XV6_y&@VQGw5 zP9Z%78jxx#Dg(XA&;xLmA5RS-_fVec%_torN4E+oFQKIn`(a8u5(r5>drY38O;uEX zFpQNL$;BsNURI`-DZIOrdxhO|@V^Ug?b~#nP8AcjAp0p*seLE}ARz$ZG7da?ebo^% zjt?{qSyzN&Sdu~SPdyzWjxa{H_pQRj?(L4A@lCj>=tKkSOBJ5pC1oM@k`B)tkSrDJkhRY4Y$D7AN!X4@&hGk^}YrdLo!&HOJg}ZrpGoqR*;#eihLSEo@B9@fz?%1*!cedhmEbp6BOfzVW|<} zqyzhh!s*}J+;c(G72<++ZK)LaHSS9Q6|AgJjrH)iE3ByDj8ow%oJ~GOfo{I}7CpZE zSQgUX0pFD^>W>v|HwjE&PeHCNfI};|NyD^XXKImu%Z-N0K;Z6uD0x>k@#os|}YFU<2v3?LPb>%7=q zj<&m4ab{k@T9gxk^Q+vv5|B)eu0F~Ya9q1QbhNS4QmoP9Dda2EDb>llBG_sH*8G*S zOrGKr19Fv!lZ3|5I_L$N=8Pz-Ql&}6#}0yGg`6Y(_F$6IRNxY;OCr-lG+Kt)a8&yb z!1ExtZC&Mx0+}Nnks<{|8R`@cH4c+^qF8g!e|bU4yVD-ySgS z`z(-zqe+_4vT;c&D&f<{p;1;WsIt-1ykT}o-c=(Hcz5@$pdSt?YT-KY2iZlh+xKYH zqabvuvO$XCDN^vzR#z*{`gTe|DON9Iw$1G3(RQs;34zpol)+#G!Ni_FjVnK_dAXtk z5X358WwdH)6f32Lf!X#*@YjZHso5mRn)2>Y5C~W^>G{+44WA*!8CsJq;}BJ0@hlTpVhI$mg1P|P1b*%N9&XP3EWm7o(sonCw2)Qa zB%Tp8(O;S#V=YtGRpC`MTb>H~YIKoHG0?DSEq|bLmo5MdN>x-~cz^&&^Y{KViLJ!( zs%8wld8r4MR+A$lH-1y&)x~F<)rUo9h=F4Szbikd2i&-A338N8x=YhpqR&LawpO%V`iK(l_rY_!)vCg zf6-HURIduBh-JI5H}(<6?=yKuhESsC_`54s))KI%RG#R6eKK*{aHT#GjhaMRahIuB zwF}Oc?QONC&xqIF{)R60`8pebk)IPC^&B`z%uY=W9hG*pJMW~QitNwD{b z+HKpnZZfnyTT_Z$8gvHKzU?Rwb^%Uwv=K*H1JPmTuAUpt@K)9#+Js+imv*h;Yw)HV z7NskIrEJ6EXxH+9JRC|&fDwtpX;DigdpFb5L5D=lDiW4wmAu_-0-dB-+fm`bre@hm zAxBZ^z(?!VM|Li!U)!V(70lGL*NVf8OHEZL=whUR7~zdK0M-sIZFN6i6Pps#Ka2`=L z0!-6}%T)AmElG+fuQdp#5kLn0r9_6p!GRnc#r=Gn>ei^L%(_Hod9zBI%4d4;Q* z{{Xrq?2(N$PlRQUmUA9&j!KBy@vT{nf&kj>+oAYztgCb^++M!^5d;*IPUacK${PT& zD<(vI>(5ID6Et(+c&#omEkQ>Jq2(%DQ@IprZyq0iW-e{sn^zLzP@F$nEM94D<8g3; zg0%@1Dyj^}Stp&2)}#!~R-D666*?}|+^RMJZf(MpH+OQ9Zw`^O0kfTyVr`hWF>H{o zCYopYb372mOHWd*v{qt|8y4Mrk!yfHCyBG1zTL2tG!TUU07&Cb<&G?pwdzu%05BB^ z>S?VsaH7eu`Dpt#mIRU59`@iLI%4E3cOV3T!;e|{P@gSw*d(cA6&bFmsomL{nY@Xi zKJzv1Py&2zbl_t)y+T_Aw~6WLqkOu=_mGwB_*BMxLYZc7mJ_v2m1HGYZ1#ci@!(qV z=GK4$ieqRUGO=x)FPQ93**yIL#U%=FEtgH1Fi3aK*Lac!HOBoR6&A zTk(OUotr7}-k}OGDXl8%CVRoKoetp_0ZMm2`IJW5n1 zDvEj|DxWfCxoy__MOawzrqq<}#8awD)x=FjrHP`Y-(QXV(NGIhq*3o?I%-nn4sA=2 z2u)SCl%L{}Q4Dg!D23KI;;}69Ff2j#1zu6agR>fa3Oa&g4~=rImZF}JEhRenBUo5~ zn%`5~U#FXELBO18L7k}>QbtK}_jy_;r&vv+iJ2tZZCc*wI`QP8tYG8kOhct$jg^v_ zYeaII%B|8MFA_r$)cXeG`SQw8B_JlMS|Vn$!{y2nC;c;z(+42vPwtQh98$F6Fs-Gu zEP!@ac8W-xK*il;ZG5{OfZw-|hZCo7m089k0gX6(t>LhH$eor(_pN^cZg~pYc-2%S z2{i!x+*NZ?Q_#uhFbZWUz9FF2+IHiJBTXYhpqLRf=^hDt_2k7ZpYJ0oQBbVE0g`5oAS4K+@uPzdq?%`} zih8L1#fqB#P(6SE_>XTY8dTD&TeP7OAes(w!-&;~sHw!#lAa;vNktWfM7o^^!|3A1 zTO$ODbMt7^jRTnLS2IafhQ!f+@k*JbC5LXr_u|%Msts%NX;OGa1G8rdGYxmi(?<>+ zR$ItvWjj@webc`8?`|tYI!qBXk;}jpf@$UqDqI{4Mtc7MCbE*!O-NI8TaE9j(1Ja@ zH@9!PIBl&w>0AiF)(pjtVVJbgRa4-GTF6vMP|>kOxbXpd4m_vAqY7d403<7AG5-KQ zqlIQjs46Jtm|8_<6Da`d1;xkT#SOmF5g=nY%~`ey8)Z9bbyHuLGjahlqysZOI@9msrVb734c9q`vJERf#N8P<>j{&;ui;^i=S993_IEo5Pmvv3zJ;uZX+>yc{kTz15$r5Iy z1dLcP4`O}YZ?7~~p;c1B7#Xetib+;kHi+rrKJy(-`|-w=BMr3Sj+qq-Nn@sKNf~8p zUGJs*E%5g9MReOpQo$2g{{WjvNk|s#zT(^X*mABkB6?}?z^w4%c=UG^)ictpzoT}h zX(B?WLw%$J_44gG-ID~Rdk)Hs!Ja+O~%V=E_)mNIOee!)d`vCqoR_x(m%6JeCa|&BF%x z`*BpF=`D1QoF}N-E?L z$tRsEOhlK6sL*&^`$r!xf;A^$uUnu42X9q5WqdU->Ks;d7TlWTS8vJFmOgIRbABKsT(CAk?9hJPXhc%1q?E zVz$2wTp~O&NzD)%AXZ1FEnopr+;-r@b91KDz-kPMpvLiK{oC57155d6@a!grpAn_P zWx}iK11L!Y31ef~t>O>m$rCJTDgB_^&}bui;U+j!EX!4h%Be?K@=qqyI?Tm@u+)Hi zeO}%zxN*foM%ludY@`D?kwT_PlCFeS;h4Ysx_sPhq?H5rihz7Mx60Cw6>8820W-Fm zv%Hv-&^loC2~SxujySO@z@{`(z*<1S*f6o;tgHK?M;;Yi#VBo~BLU3Ocd4VpC&cP$ zvmFy)*sQgX$%kSXRB|*G69Kdr3T&Vo8-fTNJi1GI%3Tn8MF-hYQ(;I{{uPmPMs3RR ze9kir*v1tO+&;d#l2n$ql(D#YB?=jkY4(Bm@}l+TPm9C{M@3um=x~(mJSSlkyUjC8 zobV_pXns>Q;EhPhRF#q%de53;y(LtKYVJR-1=Km;AU4AD~nTe_5zLV@5U zM_Hf_hbQJ7*@YZZcVv>?rqyuxQF>O=Z>G*KPlVtN`nMN3n zx@ng0T74*;NjQUCDDX_t24ALZiuq`mbg3NS zNv>XH4y(Noqw(pW^A?u@pAHyKCz0jI&p+r+I3n|*N0F>Q&D(WS z7s577^?|aJxs<#U;%@?ac6#mGLXIDfbi)9ysH??laU4l%)jv%w2;CLf6JVfyTv=%rhy}6Hpxu#(;swQ<>Vo z;U+}vjlSA_c}>dzC=3rr#%a~F#R5`M&|z6QV5fzeVzS22z~HDGohexqk*9}#+%jtC zNYIpN>WBh~?h8sf%Jl*2X|AUpude{n;WV;1A1<{k9B8T=?K!&>xON2V%0G6cDgcqs zT{g>Yi8CB_aQ0&~)|gfkSLU`UUrUEXNl{%i^SC6n?iX8q9AzxD0Z}lc)=emMyx~a4 zNW@?V!jl;G6M#RNQ3fS0DNzwIKsL|i6$3^BcX5AZxPs#@K_NkUl^u9fsl9bF+yn!R z?D$Y;QG}q862@r-*m<$7we9-bfakKn4&88giWFMjm84IEotjN2u~k(RY#azY<4rTh7y?5Wwc6k=r`j#wfV{;BQBcx)s6@b2i69!pDFmn!VU6Qm>?aGT z?lcF#30`SMFcab(RAPmR)J%@5ij=OTnGgV=oy%(x!Zo(BB&MJ529O6D!;EP+li?OK>=luMr;5qN`rrA-^9TXaiuOYNd%mHxKzln%44ir z2KSr4U_38wXoVSyqExXkCZ4J2BXk~9yCD_=Jq`XB!xZAD{%9#xh)Nfc}qtZpu)ReSt(zbYh+wP#RLMgwNijbfIbtswHG zsc1x=`jUpm=Ta;=wMiqUn{l)xM9oT~!FPoHJF?&1k%faDE(eD^3Z`coY1HGgo<<)e zZWqaElGoZ-Qb(}(@~#sVMI^^TsV=d+aruYJWordy+Cc0yJZV4^g*l-@3{6Xpk>p^} z$+(|&WSK5>9r)N7^;XJBR1QAs%VkOSd$les&+b`o^>f-lG~#WH&+x@Mc?l)Mz@xU} z0Mz#Ya_Ufs74GFgZICK@K^#iHK@iBp>_&sXk17l!3iD`{Pr9=hT0m||j>B=?($h7*RVy4^q*DM0Zo=Nv z!ItfJEd@dXim=CrSfYI4PC2GjQU|`3RP@om%SSWR(-@y^F;ln)y~)}RmiGHOHSV_W zi>DH|l%P^D{*6Z4#hQj2(#%OX$i(xql+=lgPXyElH5yuYfp)22o0%=z(uUA-U&&m! z(56d)AsaB6_Rs@1w`{hTd$OgjBpx17vYmgbwNg^N)m3m~LS-KGb2><@zUghdwt_8f zID2E0D4>U+ldy~^rsFeFNK+usUOy2~rptn$stjlOY)}A+RtabN7?of~m*w2blQ3M< z#6)fD!kcF0)io!!(hFc5h$jNK_4>pqKvXJ6R zwt>dI`82>+az_af9`I^J&|`R9EVa3rVschECZwf#%!%7Z20^dHgUvkk!V){h=`+zk zA}NJh+nLafP(P#xyAO3>j}B7lJ#IZ$Qw)emY2%S$Ko}jQGJ**2+zvV1r6*BvsC9^> z>?O<+meaGQO*~aDA4LQ|$xa^aJ;PJd$ot$8a%E-~#s-Rs;kZizpcIiv8uuXaBU)ZbQ>fdvBk`p72}%?@tw5)wk?xFX zvJ5jUe9?+*H{~hf471*%J?(D~^n#k-O*o#~!o-6u$n^QqimqFl+Q9I`sZ(Jz)3ww(D=lh}d)?cdqNEn4O4e8I!0?4{=N>^I^HOmNe*9yF=LI#q#B zQ58gahPUOm8L6PdQ9faT_fCr`0e(C;Zz<9|E|8wCiYr~hkt$LFAPurqho`5l?kAx#=n_Q50xVn%Z5XxVS#v zRok6j1cfKl4?0lAvuY*XNL1nk<33c)h~yfXE1{X-%oR9_0(Qw9Zp;DOxRp9Dw)|?( zD=n2m)=BnLmyVYZ0YV8IMl|6jTg@Vz`85#H(o+B#E0Q;qTw2T&=x%MV44v z6R_6-fRqB7n;xme3~G!#$)ScDfv(iO&%@ioTd{8c0PPTUgyLw*%s$73egJR~B+0Dx zI1<_n*wbp3pd1Q@U;-K`UG&8r#+wM>YhBkGorA1GO z&_H0CAD*HcY9pnEcOY*5>wda(+4C7{LKMQtcT7)6rFmob(Bp0(7IjQ?Ryv=TsFGG} z5v2uIcz#hDB9HslwXQxM91k~cN)jAdiNtU-vW^^cQkJAQ3Xcl9BW+`%#Nv)9V=UN9 zG=_PEPU1-|;dZ{K;l{MJ^ zz>fSGB}}fwIuZ#p;g~+ZDhvx}(RiT-1Y;Ou!k;9{d6EaGq{8c#zDOfAi9uN)MUhXq zzz0@3mhn6gZd3QqIo+xPB0>~){Ajm&EiF)v@FP-zf}x2OGAyx}rLLLrthq&ld62VH zO3;Zc-Tu+AvFt{i8|Jxt<+5C101hLou7bHu(`s^1;`JT}@S5uo$Wzqel*1E@Vlfc2 zOF@IcL;1EUp&7Rg6b|6)z}8c{i*W_Q&?r&Eff&*4{$$b=5Eep{w8@WUCu(x03rA9` zPmr;C_~Hm^R}`WGMNc0rq3}K+eOwfbs>Pj1Qz}||W5hjZh}~VSA!&6fRyz(6>r*DH z%9uEsJT)=uD5O9hp9-T>qzX2&w`KSeV}{y|Bv`C-D~T2{Xi&~X*+SKPK1RrheC7H?Fg6cof7bcHErl4?eX ztd}h#DjhV{Lv}nPbhw`sVMj;}sNiZubWo~Fw9%G#u(10iTU(thc*@jd6ITF~k;0o~ zz;<3*++Ih5(5>x{m(MM4l4=nDu+}{E@ zpshP9st75>Y>Jr%5h}&yVbb|mzW52=iDE^GBVJRy#DXTSnAUVx5r@k(LMNFi-(BfU z;cmlAd%%HzqrW_-06QkMghEI-ADv*ShPpWlsz~rcfkAlU3S5rY+kJUiO0lPWW5Xbb zrgXs!axGm|Jv|)G2`x;4g@LdaHW%dz_z9Z25v3wW&ZA;q%0z6nD$uUqDHRJ{i5|o{ za=;VDl_6jPCMY$;+MZiwLHgLN|pv+yQb26nD$iAWb&o03Lxz+)FHB^oEvI z7nVm^K4F=pR=GA7ATKVfr6|?Kj*uturW{KSp%cs}rG=p`u&k3w?_R`{VWyrOx)ik1 z&&jDgaG-LxC}lRTpBGFeHVFWC0heEHosZMQ7WK~Bcw0(w6rRWDq?SjU5~xa$avQ+#pwf!EN=Y#WT7NOFLU~1Pw)zqna`x9?E2@$Qbt+Y| zxYyBUcfv6msvIs!sw6^?#)zS#Bx|tyey#|bUwBLbPA|iOGCcTEfBtoiQb{Z|)m1dr z5s?I{S)(@>`#@WT&Z+d6*-0fSCNUKbp%$-e)Vog2*`$5PXz;o4;x}GekBH4%WJVPh zN=jf$d3q=tE#Fbb(K8ig=>mAyo)I3_M)HFT6MLK6$JxszQMVdsqpB+uVqY<>Fu6N@ z;x#AX&CKByWhF@+6~<_oHR=E+GyYG)tFC;0~A4+$&0oR7X{KDMZIJQ63~Hg?-5d#^0mp=A)-&IJN0A zF{yMI%~lU5=r*8T9^(6lu>BnIDI;ZguzD%Y@%5>u`FK?MZyjV>^(`%e=zb+JoL6r?$NZr1EBruyHv4e}h-C|LneIMIG(V**cz(6N^> z94fxA52>ZZ3sFrnHZ1hEH3*x)k(FGLxC`3)UxKyXhtRMOiZJM+L2g)5dM36X$$pPL zDEgB0ozyI;(!5e^V;7s{YVxLc#p&dy#%M94hO;D6GAIvEB%@=;s`!I&Erq?NZw2;W zn6w=*n1VP#CbBbda@$LWR7NU1c-FO@Kgt=yC}ynTm+^lxpAyTnH8pk%DzZ;B)Uz0$ z%G|6jE!d7reV<(FP=W@`Xal~MrKce9np#hup{s^kIk4BNf|5p(lBLeqg6*traBuPW z@^k#e5O#`-cKU0YoTMP0O_Z+aK7%z<$n;faEf?k|v9Co2` z3<7L^inkcw6wv(nSB6?!?PY|Lr_2D@kaYUEZEEwH)RI9bWYHbnvQy%SOmLx2Y%c{U z8c5#-#{|F2=xHNiH8Q(>o&Nv^`0&9kB1j;E(MZRWng$X^(!&upYmc-}ynp$gj2E+{ar3JFO(t{^LXBLxTNla{IhJ&!7@bRe z?WL Q})4210Mr?BwO+%)4wSOY}MamLnDiBDD}3JS9xEF!@qix$gJ);%?U$mR&6 zl@d=bXt&eI9d&TP*qt(E`O*EMp67Zf+6tD{M23Ge%qqX7Q^3QwupMF|*ul&yuW6;);* zSn?q9Xc4BTD$X}rNg`5Pzku(-1I;Pc@B_+zG+O6$!3rdmPS_s}l?sn6txGem6-7-o z7-f>93zk_F8(z+{n5+-bPZGCksMVn&I_L1GF2FLBsYg~g$FfCEnvGbj6;-KtTgi%& zksxAChSt#>z3<1HX(|MeC!cL^#FL-`5;jt9x{h7b2{5Y6Vo2D`8Q^GMOIN{~jNjmyB8%+AuwZCNA9CmlId_E0ECmY1Mwh5B#k?wbrxO`zg5MqY;PHx&rMzJdP zQv|qTMq&i8XC;-RRv|#}(0h0Z%r|^CqYCj1XhCC~DQCnm9aRnBskYaB7(!-I}f{W9yT2Y zY4=zl!NRkO80NO(BUcw!*81zA9H>A$Oj1Y)APgp^!jzDm=&dl0@t-p7ueV-ZDe%p0 z=3G*-`*_zN(>#i^N$hM7kJH9WMMjkckR+Kko+=`)%EI?w(lXqZ zWB&jW_IUAYHmTF%>7=W0o-r{?YFe12 zk)C`Ss}B_DqM~@-IRdfOh&NJDokhpDzbA*crrb1!!c;ID1p8v68$5+Y<+!D!kvnjn zta>VJ^*Amq{){R|nwBw5Byno0rr_R1I_Nkl%3u8Jg*t)=9C(j8qI}v}Y$+%vP9}Co zMiEOa4Q)TdNfC`h*Ts!t=9Wh&LEM)uVtfVf#rC%r@2DRMBdQG-xn}c8CKaUh$fc6y9H~6$ z{{YV+DN>>Qsz#8nng)a-U-Py2sIr#^jZ!v_G=cD>t6GTF*YK}JgUBYS!ztpHk|KPe zQX-tI*$WLMO``U`r;RPX@&+JzSHCjQGC=$(HHzdF-aNc=pB9#(lr1DLN?tJfZXsj^ zNa1zHR+kbGNFr+An=Qi7B04D)*;afOyftPRsA-?bfAq-FWDWpQts5OjayX9LE)6;M zMB!V1Q_3X{l1|@d%3n*8b1UJupst}+sscGFsub0>(05!p8%CY=;f3w3$wW311tqfX z65%dtQHo-q%4UPl#BqGYvBB=cQ3hSKgt0$hDqVs0@Ub~ESze+}JsTRl1g>O(8&bsF%g7cV@$EvV}0jkM~&VFkp9#&MI?6u5&o=Q>KS zHxtAoc|5deYa_BTTUzJJu=sHGS2qX*EOd^PklAzTe>r)Niow-x7~%xqDiP#*uD)+Tlp<8X}}24Q>+own1TC6 z3WLA%=9PRT>BO8o>DMhuSW2)=?HmVRU3!nsl+N-=1qwVR)<2Xm&9`j(`49ty%f5l6Wl(Y8x^iCaJ&zm0IEpAf31sG1xo z)f$H-WlHvqm1EP~O#rviz{S$m>*)@tq>Y$hXh~+IA%zrzWboliiYTfos2Z*4UTUm8 zi#0pX8^}n1^lM-t9NR8?6LWhcG^3}QYMtIPRQF2OA|Rw0~|5sEex z_=P}}(}lWQ-gLdeJ%Aniab=Ss^-`ltA?ZH*H@GH9^C{*zTV0@;EpUG9M65>xr zoukjnllhYoq>`32{->@SV5t2iO-M$OuW_fA#CvWJ0y*wr8&-zd(Igo=Jt*qJ)GNlP z3bu#`96CnYg;`yORZ$tLskYL_5osc#M~OnOa6?+>mCkc3MYa;+h*2T{#z)~>%k3RP z%FvxaOv(7uIeZSLNW~pK%BN7#Nmg8oUhJxSTz2?y9lV*vi&!C&soHVEgjp@6%Y=ow zvk@G8{Hh%F(MV*cuAp~NchgG}GZIt-eaq|`@;bU@2v)X~MDRQ(s)ejvNeOW{@HF>? zQbY2sJtDknqjf@7v#aj!xO_QG)zYP*g`@oLnpu^t0kt@jj}bKK4HPodZ;p;hTAUx6 zPc?|SCdF)fpAo_v^(ijtQlo|)9duyGN{|*db<~PFS}0{khN`BPvShohLXf&2@wW)B z0)s_3C=YEJ+NC8!6RIgwNek3S)5`z^pD@aQEaZZ(19meJDGGraIN;pMW6j^Tk&Eq-k!AmA{owW%rS>#hxl zMO7JUn)BzeHcS125BRj!+}Znv6=s0IlQh9K5tc!Zq^V=J!s6iG_XP2@K9hi^oN^R!!kH^^ z3PzD2q>_5c0+x{}`-Ab^zq6D|G|ILPiofS1KopUyT54G3s;a7#%Tc=2O)@sycRLi^ z5%gP+XiHL01tUW_3d4{Lz8 zz5f6%DhLufD*phQMBtf$K`mZY`A8}%aTy~@s@D06Dp^>QrOkl9d+DzX5{+xaWj5{3 zkO4T*Pn=|n2+MVt z)y*{O!03D5#2zbt$Qms|H4;^cp}VA&xqiKB@%%y$CZ!!JaL1^PXQwWNZjNYQLX5Z9 z!12o@Gzm+CXBxldRHWfXR==oSAjQ9!t{H}66*Y8~9&6V@4LnGtz4zJtx+B zvOeL&QF1&A0*d}O)4b7^@W}BjS{QKs>K-gUSyihYPyBQ>w*gAGD3YX+Mgg@ba5RsG zrCV8T!iOhU~UoW%KLu9v^Sb7g4?*-fFcRnlrjb`?DYa7`jCD^Pz&3mZP) zkH=nJK_G0JPU2Ovo11EjJCWeQNxyzDowy3SKocP2Q_q(mP{>(rV$u=r`#qzFYl*MaH0y4h;8OFiQnUUc<7Avs)!Ppmq^Pk3UeX z*d%zS8B$%UQGLk>JEC^l3WKe|KX(uFmIx9&=`Dk}kRp@V8VXpR322iQR8>ln@or?e z?xRm01ap_PAtG_123DO)B;Yz|QlzKC=%}9(q^$idN+fmkSMyV9FWL)xf;2jKe0W$c zh)|J)QKpST3}aa7PoFG}7RvaUM1op3R|Kdm0;$?YgR$Z}dyOu{QoEL=BB#=>A%_?$ zu>Sxv38<8W2|w7i&$?_#)ckmE&FMjupJf-W)i{DDuCIsi?)7h5({E2bXO6c5#POq( z)(l4xs>5R+ElVgB62^-=?L{Ol_LjN6gP6J7^}^rG8EXKg#B|_BF;l-|)ey`eLN zbyYlJom@=e)av}xj#X7qNmngiAz3;Q?Y?;#{{TqF!%O>lB<^#KwzQIV2=kym_65DF z5-~GN%Bn0TzG}IQzbi{#MMV`{AxaYCcF6)0V|`8eIb|89vkEYt6&Gf91VjNl3{p0( z8dF!~R@BBLfUL7gNcMICtSlPVzXvY76a*bYC@_YK1Q7lY2KtEQhAq@O6`Gy}`T1{AwxV><;iqIlTYkUTHOR@w*w zeg%Fs+7#jvqKH@{kHUmZD>>uKh2l6&IEv=H46?0WCpw5~*<$xVLUlI+;PAzZ#h_`O zVDadRO>t_1prSOYA~;atlyfc@nJMZMFJkZt3Mdj6!|_F3%tGCj$J%R+J_m*t4yEr3 zNS-uXdu&-y3I|cg4iz64%G7w}U0xpyi$@@VYUpsuRD7NP0DVIa_Z|m|F7ouP$l=jR z*}hU!s~kA-psN_lCoihuxJ0ty6^SzWQo%E=6-aOx?Ie(!N$?yXQvr!i!cN*aX6U}1 zN;CmR)4}08Vp`2DW}I^R9P9RB4V>Fi(=pa1^v2lcA!4JzyD^Co~keE(cz?g%M?>f&(wTc|S&7o?XRFj-+tOE42s-L71dU4<*90 zGsIGQzW~d46f_igY&9ucRaGPq!$oR{=XfHwm|E+!SlIi0oGx9G5hhL~Ql9%+@kSN@ z)u0dMcuXhc<*aIaTAK53c+Zlgi)~2bQQvj7I`geY)wap*$qF7-&9&XW{Obcw0(xVl z^AyQ6xLpJc_~u%x%ym19OD%a6FvS~bqBNVnKWBxPPqq^ZX-Ygv!{MCV|d>R@o3{!xsP9U0Ht9z+BkaX^N>ulXA!QE*<9%h>tZKVqDlBZ{^ z3Q$&KIJuFj@mku+QbiRqVi;urh9-S15mBQ9z}nnHH{x}^Ho zEK3i=m;EdGCN5-cv%@5j#}H>-36bP}-vP=zhSKPSD-Ixh>C0P%siG9(C0GQL@)Z*h zuEJ<(=_=~7{Y)qtEuCF^DDj=$%rxqC&jPp& zqPoNSpPSRm!*IxOsyca;q@tdbO6&=+8;H0z_Hm1R^7YpOVGuTeG4|4hWi}{fOHf%R zF(x?`ki(2(B)P$pv!AGT#$OYpBe2b^_N z{Z?15mYNE>oF0n@ra&TA#ASu1bKiJX()?2PR_W5d?FjQCX|tN7Ecn#m;1UQFk*KLk ze>x_`Dd(sJMNLnKss#(nzRx6K=x&uYMxVZ<%CinURR&Oy%}Zx}_>G1_$Fvxn5eiohaifj>VUHmwkLM z+mYSVD{&+xFh>#dp}n(fY$%ms#2Ugdj6$XDS0aye#@=8GKvSaJeH;imlUyw&PMIAP zBWE#XB_S#DHG-!Gf&m=9^lZec#x?gyBe}i)?glga3u`qbgS2}n*Dbwy&{9*d)OacG z5uP+Jd#2qxdu?t6uAM}rz%+V-=FfFIQh6fxkIQEkykw9?@7wC(Ez4qMCk`~W)N1pr zo=oi}tqX1;LRp1?KM~{c=F(czi6WYHx0HHmMFNFF!URV_vh8hc>=y09hg7J6Mr_2A zM$WqF7I{|U6mzI+SSe9{+eU;QRHY^c+VRqHBDhIKIr2cI2!Ls6SZYUq4ppIv#wzPZ z%5#bqg_V%3(YetqY)af8jrmDYOaoh3;&xQXQ5cgT4X_dffZPuq0PW3aQB+MbmgHm8 zPEbHg5fLP^-BvX>`^X&Iji`y+72?}-q+vMMCRKzvF_9XDv$~K+eGRUDj%YS;=`~W) z)RHSCZy8_F%80UnBMjpH?xc+e9R%sa16LNxRD)8ZD>DR&W@8WwC9QBzv-^0;6e40x zT{fdxgXvu7q?&sOYuL!A%<|(+#M`hS9d0>fqylFeyUaT&(x3%%r=*o6Dk$1TAWYJ` zd1B+(r(1F7-I0L;k_%-65hsb+Qsk0)W4KeitNXFWLlJY|;ePi#^9`i^#ysiQ32e+5 zu2Dx=S~g@9s_Z{VzWD%?-{Z&oCiUU?2bpHSon86b6odHs&5PagyYyh`` z;OBSjQuARdOt?=2j8K0wb!`&hPy#8+mS2^xqn#=$;%M(PQqKXEM_>VEy@t2spONMb z<%*kQJPx5Cbo=N7n%FA$!*d!`Bte5pf97nLh9# z)CB~fyiU=uamIo-`6`)E#HF~|GwkWC@upX*KQ%QQNL=lNWxAKBorxD!D&+0pysDKDiJqD+trL0KeuEwzZVTb9Xj}9LrI*Ws}`FlZNM`O=gxLh!zl^(1O zQssoSl$0k)>HZ$7d79lmDk__;IX)>QBKS!7S%eqa1{AZR|It) zdSX>6O&;oZ!laU9RUd2Yv}D{9sJ^^1ZDDFE)J_{I&geFcIAC_v$g`$AD+9q(O;1m_ z2Zx%kuO(#IIb=}D zsMS|e-ZnPlPF%1Qbpi}<6}DQ|+H@)>%L-=xYnJi)*%)G2rUeS^6fw}jG_1}++!&W) z2`AfQ#Oy5&lC5e_KTT!bJ7qP^vJhGzO0i$0~NY+w(Zh9b5_r?*dg> zcWc{Tc%j9ODNv6u(wqE~Z9|$)pkicH+I+Qw!#zzsPArk2kIbcxYL-+M?pX@P5Bv`k zx0W=7kflU*@u%E*@bX}iHW&lhJ8KwR2APC(IifjZHVLt$Rf<4vN-0MKTTM0Q{{Sl5 z%y+U4IuPOxy#oWx*E~ZX!0=BhqO%N4Ov&z3M?Eq;5;5-nV#ym^chn!FiLTVc2?+@* zSC`J7u|l12t!l%F$J{AL^SCWVEj}d$6(Ydm^T$myLPHP@fdRF@g^A<+`c$F}0qM8r zR$B;gVJ0Af)lG?tWZ9XiV48Y-O~vH$kDgja&;>0SZ)No3D!OGuN)#|2wPTmLGNI`> zdLEi(t-}1K5y6*V5uC9CqI-t9{{V=n;bWG~t+14}B=q51N*0Ipy7T}r07(_jn=RH& zI*4!q1|Y*~y2l)gsV3GE3m>fTD&|(Et_W?Rr254Pn{^J`4NeljWYV_>%Q%clB)E+w zDpS*4W|DHDiLtl^80+-%nWpWl`>IvqPBJM)mT&>?)-X6n&a=|xEVk_pbt=aZk-Ww8 zt9Amu#XGI9j|lGUT46~SGYS|gIMLO+j=YtAT)_tv%u>dK3d^k3Qyf0tR>fM3GN_3n zdw0;A2GD9a8s3Ih!y~zLD-DEv=<_Hjb;6g?#B4OJqsn-3kI$!^r1c-95!EsCt=OC2 zX&=XtUH0&$OF}!LBWW8A51Yxf%97!O!0@Rx5LP{9WUVzUu{im8qw-jJWN0;CbnfvN z@5|ds!=c41s_x$#OjlAfx*jtn|7WPCFer?ioQsVn1an}O`% zt=;X0gp>sZVkJL1E3>w}1f;1tW?+nBxz^R;wUNbD4O-U67%2Y$$|Z`K#gDsXVhdk{ z)^`_-$|e*2@Sj>$c#C()GmLPq@QG=0Y)YDiuv&Rx^AuA%E$Sp20AVC&_kkA&$AUJL zq@6`lqCEuh9du0oli>-31LRD6XmtKfgJH18RFvr?(IJItj6OgTF#w&!Yh}6k@mOX6 z(6X+xP6vXPb%^PTi;v(PD9cdO)#l7fTFQ!r^Xa6-05L{~Qp+EY4%jVsi6w9-weYaf z7%2I0qSu*vwS~rhOAD%sEGBl651Edj(@cgoupwc!(2HrwD=Oaa$~E1sX;kbpuyxQ5 z{{Sc#CQu0Z(=8@%%kI$PKh|1mf0!zUDu*6(?e0TcaKE&h@~!6$E%cJ=M+4JLeoaV0 z8VO8!duhuW%(z5|&{AirIw(qoN1}lzFL81irB)yVp*%ak&FoxHc5ae7;7_d_#F92HZ`rmBpRb5hySK&EYzl}3#Kk@Q>eot!Byo