From f8ac368ab39fe2488bc39628a40fced02c7ee95d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Steinsberger-D=C3=BChr=C3=9Fen?= Date: Wed, 12 Aug 2020 06:03:06 +0200 Subject: [PATCH 1/8] LayerPathParameter tagging --- pom.xml | 2 +- .../emadl/generator/EMADLAbstractSymtab.java | 2 + .../emadl/generator/EMADLGenerator.java | 38 ++++++++++++++++++- 3 files changed, 40 insertions(+), 2 deletions(-) diff --git a/pom.xml b/pom.xml index 06b0cb48..dd8ee7b4 100644 --- a/pom.xml +++ b/pom.xml @@ -17,7 +17,7 @@ 0.2.11-SNAPSHOT - 0.3.10-SNAPSHOT + 0.3.11-SNAPSHOT 0.0.6-SNAPSHOT 0.2.17-SNAPSHOT 0.2.14-SNAPSHOT diff --git a/src/main/java/de/monticore/lang/monticar/emadl/generator/EMADLAbstractSymtab.java b/src/main/java/de/monticore/lang/monticar/emadl/generator/EMADLAbstractSymtab.java index dbc5cf31..5a35d39f 100644 --- a/src/main/java/de/monticore/lang/monticar/emadl/generator/EMADLAbstractSymtab.java +++ b/src/main/java/de/monticore/lang/monticar/emadl/generator/EMADLAbstractSymtab.java @@ -7,6 +7,7 @@ import de.monticore.lang.embeddedmontiarc.LogConfig; import de.monticore.lang.embeddedmontiarc.helper.ConstantPortHelper; import de.monticore.lang.monticar.emadl._symboltable.EMADLLanguage; import de.monticore.lang.monticar.emadl.tagging.dltag.DataPathTagSchema; +import de.monticore.lang.monticar.emadl.tagging.dltag.LayerPathParameterTagSchema; import de.monticore.lang.monticar.enumlang._symboltable.EnumLangLanguage; import de.monticore.lang.monticar.generator.cpp.converter.MathConverter; import de.monticore.lang.monticar.generator.optimization.ThreadingOptimizer; @@ -41,6 +42,7 @@ public class EMADLAbstractSymtab { TagThresholdTagSchema.registerTagTypes(tagging); TagDelayTagSchema.registerTagTypes(tagging); DataPathTagSchema.registerTagTypes(tagging); + LayerPathParameterTagSchema.registerTagTypes(tagging); return tagging; } diff --git a/src/main/java/de/monticore/lang/monticar/emadl/generator/EMADLGenerator.java b/src/main/java/de/monticore/lang/monticar/emadl/generator/EMADLGenerator.java index 8fecb447..2bc671da 100644 --- a/src/main/java/de/monticore/lang/monticar/emadl/generator/EMADLGenerator.java +++ b/src/main/java/de/monticore/lang/monticar/emadl/generator/EMADLGenerator.java @@ -11,6 +11,7 @@ import de.monticore.lang.embeddedmontiarc.embeddedmontiarc._symboltable.instance import de.monticore.lang.math._symboltable.MathStatementsSymbol; import de.monticore.lang.monticar.cnnarch._symboltable.ArchitectureSymbol; import de.monticore.lang.monticar.cnnarch._symboltable.NetworkInstructionSymbol; +import de.monticore.lang.monticar.cnnarch._symboltable.LayerPathParameterTagSymbol; import de.monticore.lang.monticar.cnnarch.generator.CNNArchGenerator; import de.monticore.lang.monticar.cnnarch.generator.CNNTrainGenerator; import de.monticore.lang.monticar.cnnarch.generator.DataPathConfigParser; @@ -26,6 +27,7 @@ import de.monticore.lang.monticar.cnntrain._symboltable.PreprocessingComponentSy import de.monticore.lang.monticar.emadl._cocos.DataPathCocos; import de.monticore.lang.monticar.emadl._cocos.EMADLCocos; import de.monticore.lang.monticar.emadl.tagging.dltag.DataPathSymbol; +import de.monticore.lang.monticar.emadl.tagging.dltag.LayerPathParameterSymbol; import de.monticore.lang.monticar.generator.FileContent; import de.monticore.lang.monticar.generator.cpp.ArmadilloHelper; import de.monticore.lang.monticar.generator.cpp.GeneratorEMAMOpt2CPP; @@ -394,7 +396,7 @@ public class EMADLGenerator { // TODO: Replace warinings with errors, until then use this method stopGeneratorIfWarning(); - Log.warn("Tagging info for symbol was found, ignoring data_paths.txt: " + dataPath); + Log.warn("Tagging info for DataPath symbol was found, ignoring data_paths.txt: " + dataPath); } else { Path dataPathDefinition = Paths.get(getModelsPath(), "data_paths.txt"); @@ -426,6 +428,38 @@ public class EMADLGenerator { return weightsPath; } + protected List getLayerPathParameterTagSymbols(TaggingResolver taggingResolver, EMAComponentSymbol component, EMAComponentInstanceSymbol instance){ + List instanceTags = new LinkedList<>(); + + boolean isChildComponent = instance.getEnclosingComponent().isPresent(); + + if (isChildComponent) { + // get all instantiated components of parent + List instantiationSymbols = (List) instance + .getEnclosingComponent().get().getComponentType().getReferencedSymbol().getSubComponents(); + + // filter corresponding instantiation of instance and add tags + instantiationSymbols.stream().filter(e -> e.getName().equals(instance.getName())).findFirst() + .ifPresent(symbol -> instanceTags.addAll(taggingResolver.getTags(symbol, LayerPathParameterSymbol.KIND))); + } + + List tags = !instanceTags.isEmpty() ? instanceTags + : (List) taggingResolver.getTags(component, LayerPathParameterSymbol.KIND); + + List layerPathParameterTagSymbols = new ArrayList(); + if (!tags.isEmpty()) { + for(TagSymbol tag: tags) { + //LayerPathParameterTagSymbol layerPathParameterTagSymbol = (LayerPathParameterSymbol) tag; + //layerPathParameterTagSymbols.add(layerPathParameterTagSymbol); + //layerPathParameterCocos.check(layerPathParameterSymbol); TODO: Implement this Coco + } + // TODO: Replace warinings with errors, until then use this method + stopGeneratorIfWarning(); + Log.warn("Tagging info for LayerPathParameter symbols was found."); + } + return layerPathParameterTagSymbols; + } + protected void generateComponent(List fileContents, Set allInstances, TaggingResolver taggingResolver, @@ -448,8 +482,10 @@ public class EMADLGenerator { cnnArchGenerator.check(architecture.get()); String dPath = getDataPath(taggingResolver, EMAComponentSymbol, componentInstanceSymbol); String wPath = getWeightsPath(EMAComponentSymbol, componentInstanceSymbol); + List layerPathParameterTagSymbols = getLayerPathParameterTagSymbols(taggingResolver, EMAComponentSymbol, componentInstanceSymbol); architecture.get().setDataPath(dPath); architecture.get().setWeightsPath(wPath); + architecture.get().setLayerPathParameterTagSymbols(layerPathParameterTagSymbols); architecture.get().setComponentName(EMAComponentSymbol.getFullName()); generateCNN(fileContents, taggingResolver, componentInstanceSymbol, architecture.get()); if (processedArchitecture != null) { -- GitLab From 7e9f1b0ce38830a6a23b667a3c8ed55eed9d1a17 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Steinsberger-D=C3=BChr=C3=9Fen?= Date: Thu, 13 Aug 2020 02:30:55 +0200 Subject: [PATCH 2/8] Finished LayerPathParameter tagging --- .../monticar/emadl/generator/EMADLGenerator.java | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/src/main/java/de/monticore/lang/monticar/emadl/generator/EMADLGenerator.java b/src/main/java/de/monticore/lang/monticar/emadl/generator/EMADLGenerator.java index 2bc671da..c3a465ba 100644 --- a/src/main/java/de/monticore/lang/monticar/emadl/generator/EMADLGenerator.java +++ b/src/main/java/de/monticore/lang/monticar/emadl/generator/EMADLGenerator.java @@ -11,7 +11,6 @@ import de.monticore.lang.embeddedmontiarc.embeddedmontiarc._symboltable.instance import de.monticore.lang.math._symboltable.MathStatementsSymbol; import de.monticore.lang.monticar.cnnarch._symboltable.ArchitectureSymbol; import de.monticore.lang.monticar.cnnarch._symboltable.NetworkInstructionSymbol; -import de.monticore.lang.monticar.cnnarch._symboltable.LayerPathParameterTagSymbol; import de.monticore.lang.monticar.cnnarch.generator.CNNArchGenerator; import de.monticore.lang.monticar.cnnarch.generator.CNNTrainGenerator; import de.monticore.lang.monticar.cnnarch.generator.DataPathConfigParser; @@ -428,7 +427,7 @@ public class EMADLGenerator { return weightsPath; } - protected List getLayerPathParameterTagSymbols(TaggingResolver taggingResolver, EMAComponentSymbol component, EMAComponentInstanceSymbol instance){ + protected HashMap getLayerPathParameterTags(TaggingResolver taggingResolver, EMAComponentSymbol component, EMAComponentInstanceSymbol instance){ List instanceTags = new LinkedList<>(); boolean isChildComponent = instance.getEnclosingComponent().isPresent(); @@ -446,18 +445,17 @@ public class EMADLGenerator { List tags = !instanceTags.isEmpty() ? instanceTags : (List) taggingResolver.getTags(component, LayerPathParameterSymbol.KIND); - List layerPathParameterTagSymbols = new ArrayList(); + HashMap layerPathParameterTags = new HashMap(); if (!tags.isEmpty()) { for(TagSymbol tag: tags) { - //LayerPathParameterTagSymbol layerPathParameterTagSymbol = (LayerPathParameterSymbol) tag; - //layerPathParameterTagSymbols.add(layerPathParameterTagSymbol); - //layerPathParameterCocos.check(layerPathParameterSymbol); TODO: Implement this Coco + LayerPathParameterSymbol layerPathParameterSymbol = (LayerPathParameterSymbol) tag; + layerPathParameterTags.put(layerPathParameterSymbol.getId(), layerPathParameterSymbol.getPath()); } // TODO: Replace warinings with errors, until then use this method stopGeneratorIfWarning(); Log.warn("Tagging info for LayerPathParameter symbols was found."); } - return layerPathParameterTagSymbols; + return layerPathParameterTags; } protected void generateComponent(List fileContents, @@ -482,10 +480,10 @@ public class EMADLGenerator { cnnArchGenerator.check(architecture.get()); String dPath = getDataPath(taggingResolver, EMAComponentSymbol, componentInstanceSymbol); String wPath = getWeightsPath(EMAComponentSymbol, componentInstanceSymbol); - List layerPathParameterTagSymbols = getLayerPathParameterTagSymbols(taggingResolver, EMAComponentSymbol, componentInstanceSymbol); + HashMap layerPathParameterTags = getLayerPathParameterTags(taggingResolver, EMAComponentSymbol, componentInstanceSymbol); architecture.get().setDataPath(dPath); architecture.get().setWeightsPath(wPath); - architecture.get().setLayerPathParameterTagSymbols(layerPathParameterTagSymbols); + architecture.get().processLayerPathParameterTags(layerPathParameterTags); architecture.get().setComponentName(EMAComponentSymbol.getFullName()); generateCNN(fileContents, taggingResolver, componentInstanceSymbol, architecture.get()); if (processedArchitecture != null) { -- GitLab From 2102b34d60b371713992f5933cf9598eca25ad0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Steinsberger-D=C3=BChr=C3=9Fen?= Date: Tue, 25 Aug 2020 03:48:59 +0200 Subject: [PATCH 3/8] bug fixes, added tests for EpisodicMemory, increased version number --- pom.xml | 10 +- .../lang/monticar/emadl/GenerationTest.java | 13 +- .../monticar/emadl/IntegrationGluonTest.java | 10 + .../models/episodicMemorySimple/Network.cnnt | 13 + .../models/episodicMemorySimple/Network.emadl | 16 + .../episodicMemorySimple.tag | 8 + .../simple_embedding-0000.params | Bin 0 -> 32 bytes .../simple_embedding-symbol.json | 18 + .../CNNCreator_mnist_mnistClassifier_net.py | 106 ++++- .../gluon/CNNNet_mnist_mnistClassifier_net.py | 419 +++++++++++++++++- .../CNNPredictor_mnist_mnistClassifier_net.h | 186 +++++--- ...rvisedTrainer_mnist_mnistClassifier_net.py | 126 ++++-- ...efaultGAN_defaultGANConnector_predictor.py | 106 ++++- ...efaultGAN_defaultGANConnector_predictor.py | 26 +- ...efaultGAN_defaultGANConnector_predictor.py | 419 +++++++++++++++++- ...defaultGAN_defaultGANConnector_predictor.h | 187 +++++--- ...efaultGAN_defaultGANConnector_predictor.py | 2 - ...ator_defaultGAN_defaultGANDiscriminator.py | 106 ++++- ...NNet_defaultGAN_defaultGANDiscriminator.py | 419 +++++++++++++++++- ...ator_infoGAN_infoGANConnector_predictor.py | 106 ++++- ...iner_infoGAN_infoGANConnector_predictor.py | 26 +- ...NNet_infoGAN_infoGANConnector_predictor.py | 419 +++++++++++++++++- ...ictor_infoGAN_infoGANConnector_predictor.h | 191 ++++---- ...iner_infoGAN_infoGANConnector_predictor.py | 2 - ...CNNCreator_infoGAN_infoGANDiscriminator.py | 106 ++++- .../gan/CNNCreator_infoGAN_infoGANQNetwork.py | 106 ++++- .../CNNNet_infoGAN_infoGANDiscriminator.py | 419 +++++++++++++++++- .../gan/CNNNet_infoGAN_infoGANQNetwork.py | 419 +++++++++++++++++- .../gluon/mnist_mnistClassifier_net.h | 2 + .../CNNCreator_cartpole_master_dqn.py | 106 ++++- .../cartpole/CNNNet_cartpole_master_dqn.py | 419 +++++++++++++++++- .../CNNPredictor_cartpole_master_dqn.h | 186 +++++--- .../CNNTrainer_cartpole_master_dqn.py | 1 + .../cartpole/cartpole_master_dqn.h | 2 + .../CNNCreator_mountaincar_master_actor.py | 106 ++++- .../CNNNet_mountaincar_master_actor.py | 419 +++++++++++++++++- .../CNNPredictor_mountaincar_master_actor.h | 186 +++++--- .../CNNTrainer_mountaincar_master_actor.py | 1 + .../mountaincar/mountaincar_master_actor.h | 2 + ...tor_mountaincar_agent_mountaincarCritic.py | 106 ++++- ...Net_mountaincar_agent_mountaincarCritic.py | 419 +++++++++++++++++- .../CNNCreator_torcs_agent_torcsAgent_dqn.py | 106 ++++- .../CNNNet_torcs_agent_torcsAgent_dqn.py | 419 +++++++++++++++++- .../CNNPredictor_torcs_agent_torcsAgent_dqn.h | 186 +++++--- .../CNNTrainer_torcs_agent_torcsAgent_dqn.py | 1 + .../torcs/torcs_agent_torcsAgent_dqn.h | 2 + ...CNNCreator_torcs_agent_torcsAgent_actor.py | 106 ++++- .../CNNNet_torcs_agent_torcsAgent_actor.py | 419 +++++++++++++++++- ...NNPredictor_torcs_agent_torcsAgent_actor.h | 187 +++++--- ...CNNTrainer_torcs_agent_torcsAgent_actor.py | 1 + ...Creator_torcs_agent_network_torcsCritic.py | 106 ++++- .../CNNNet_torcs_agent_network_torcsCritic.py | 419 +++++++++++++++++- .../torcs_agent_network_reward_executor.cpp | 2 +- .../torcs_agent_network_reward_executor.h | 2 +- .../torcs_agent_network_reward_executor.i | 2 +- .../torcs_td3/torcs_agent_torcsAgent_actor.h | 2 + .../episodicMemorySimple/test.h5 | Bin 0 -> 90048 bytes .../episodicMemorySimple/train.h5 | Bin 0 -> 90048 bytes 58 files changed, 7153 insertions(+), 746 deletions(-) create mode 100644 src/test/resources/models/episodicMemorySimple/Network.cnnt create mode 100644 src/test/resources/models/episodicMemorySimple/Network.emadl create mode 100644 src/test/resources/models/episodicMemorySimple/episodicMemorySimple.tag create mode 100644 src/test/resources/pretrained/episodicMemorySimple/simple_embedding-0000.params create mode 100644 src/test/resources/pretrained/episodicMemorySimple/simple_embedding-symbol.json create mode 100644 src/test/resources/training_data/episodicMemorySimple/test.h5 create mode 100644 src/test/resources/training_data/episodicMemorySimple/train.h5 diff --git a/pom.xml b/pom.xml index dd8ee7b4..3011a371 100644 --- a/pom.xml +++ b/pom.xml @@ -9,19 +9,19 @@ de.monticore.lang.monticar embedded-montiarc-emadl-generator - 0.4.0 + 0.4.1 - 0.2.11-SNAPSHOT - 0.3.11-SNAPSHOT - 0.0.6-SNAPSHOT + 0.2.12-SNAPSHOT + 0.3.12-SNAPSHOT + 0.0.7-SNAPSHOT 0.2.17-SNAPSHOT 0.2.14-SNAPSHOT - 0.2.11-SNAPSHOT + 0.2.12-SNAPSHOT 0.1.0-SNAPSHOT 0.0.19-SNAPSHOT 0.1.6 diff --git a/src/test/java/de/monticore/lang/monticar/emadl/GenerationTest.java b/src/test/java/de/monticore/lang/monticar/emadl/GenerationTest.java index 6c988f46..36821ea7 100644 --- a/src/test/java/de/monticore/lang/monticar/emadl/GenerationTest.java +++ b/src/test/java/de/monticore/lang/monticar/emadl/GenerationTest.java @@ -106,6 +106,13 @@ public class GenerationTest extends AbstractSymtabTest { assertTrue(Log.getFindings().isEmpty()); } + @Test + public void testEpisodicMemorySimpleGeneration() throws IOException, TemplateException { + Log.getFindings().clear(); + String[] args = {"-m", "src/test/resources/models", "-r", "episodicMemorySimple.Network", "-b", "GLUON", "-f", "n", "-c", "n"}; + EMADLGeneratorCli.main(args); + } + @Test public void testMultipleInstances() throws IOException, TemplateException { try { @@ -183,7 +190,6 @@ public class GenerationTest extends AbstractSymtabTest { "CNNPredictor_mnist_mnistClassifier_net.h", "CNNDataLoader_mnist_mnistClassifier_net.py", "CNNSupervisedTrainer_mnist_mnistClassifier_net.py", - "mnist_mnistClassifier_net.h", "HelperA.h", "CNNTranslator.h", "mnist_mnistClassifier_calculateClass.h", @@ -300,9 +306,6 @@ public class GenerationTest extends AbstractSymtabTest { "CNNTrainer_defaultGAN_defaultGANConnector_predictor.py", "defaultGAN_defaultGANConnector.cpp", "defaultGAN_defaultGANConnector.h", - "defaultGAN_defaultGANConnector_predictor.h", - "defaultGAN_defaultGANConnector.cpp", - "defaultGAN_defaultGANConnector.h", "defaultGAN_defaultGANConnector_predictor.h" ) ); @@ -361,7 +364,7 @@ public class GenerationTest extends AbstractSymtabTest { EMADLGeneratorCli.main(args); assertEquals(Log.getFindings().size(), 1); assertEquals(Log.getFindings().get(0).toString(), - "Tagging info for symbol was found, ignoring data_paths.txt: src/test/resources/models"); + "Tagging info for DataPath symbol was found, ignoring data_paths.txt: src/test/resources/models"); assertTrue(Log.getErrorCount() == 0); } diff --git a/src/test/java/de/monticore/lang/monticar/emadl/IntegrationGluonTest.java b/src/test/java/de/monticore/lang/monticar/emadl/IntegrationGluonTest.java index 1a64af83..91afa746 100644 --- a/src/test/java/de/monticore/lang/monticar/emadl/IntegrationGluonTest.java +++ b/src/test/java/de/monticore/lang/monticar/emadl/IntegrationGluonTest.java @@ -70,6 +70,16 @@ public class IntegrationGluonTest extends IntegrationTest { assertTrue(Log.getFindings().isEmpty()); } + @Test + public void testEpisodicMemorySimple() { + Log.getFindings().clear(); + + deleteHashFile(Paths.get("./target/generated-sources-emadl/episodicMemorySimple/episodicMemorySimple.training_hash")); + + String[] args = {"-m", "src/test/resources/models", "-r", "episodicMemorySimple.Network", "-b", "GLUON"}; + EMADLGeneratorCli.main(args); + } + @Test public void testGluonPreprocessingWithSupervised() { Log.getFindings().clear(); diff --git a/src/test/resources/models/episodicMemorySimple/Network.cnnt b/src/test/resources/models/episodicMemorySimple/Network.cnnt new file mode 100644 index 00000000..af61f458 --- /dev/null +++ b/src/test/resources/models/episodicMemorySimple/Network.cnnt @@ -0,0 +1,13 @@ +/* (c) https://github.com/MontiCore/monticore */ +configuration Network{ + num_epoch:1 + batch_size:5 + normalize:false + context:cpu + load_checkpoint:false + loss:cross_entropy + optimizer:adam{ + learning_rate:0.00003 + weight_decay:0.01 + } +} diff --git a/src/test/resources/models/episodicMemorySimple/Network.emadl b/src/test/resources/models/episodicMemorySimple/Network.emadl new file mode 100644 index 00000000..17341b27 --- /dev/null +++ b/src/test/resources/models/episodicMemorySimple/Network.emadl @@ -0,0 +1,16 @@ +/* (c) https://github.com/MontiCore/monticore */ +package episodicMemorySimple; + +component Network{ + ports in Z(0:oo)^{10} data, + out Q(0:1)^{33} softmax; + + implementation CNN { + data -> + EpisodicMemory(replayInterval=10, replayBatchSize=100, replaySteps=1, replayGradientSteps=1, replayMemoryStoreProb=0.5, localAdaptionGradientSteps=30, maxStoredSamples=-1, localAdaptionK=32, queryNetDir="tag:simple", queryNetPrefix="simple_embedding-", queryNetNumInputs=1) -> + LoadNetwork(networkDir="tag:simple", networkPrefix="simple_embedding-", numInputs=1, outputShape=(1,768)) -> + FullyConnected(units=33) -> + Softmax() -> + softmax; + } +} diff --git a/src/test/resources/models/episodicMemorySimple/episodicMemorySimple.tag b/src/test/resources/models/episodicMemorySimple/episodicMemorySimple.tag new file mode 100644 index 00000000..0b5b8796 --- /dev/null +++ b/src/test/resources/models/episodicMemorySimple/episodicMemorySimple.tag @@ -0,0 +1,8 @@ +/* (c) https://github.com/MontiCore/monticore */ +package episodicMemorySimple; +conforms to dltag.DataPathTagSchema, dltag.LayerPathParameterTagSchema; + +tags episodic { +tag Network with DataPath = {path = src/test/resources/training_data/episodicMemorySimple, type = HDF5}; +tag Network with LayerPathParameter = {path = src/test/resources/pretrained/episodicMemorySimple, id = simple}; +} diff --git a/src/test/resources/pretrained/episodicMemorySimple/simple_embedding-0000.params b/src/test/resources/pretrained/episodicMemorySimple/simple_embedding-0000.params new file mode 100644 index 0000000000000000000000000000000000000000..3288444a7f261cd83beb3be9c9655c2e5a376e3f GIT binary patch literal 32 LcmWe)WWWmm0)GG$ literal 0 HcmV?d00001 diff --git a/src/test/resources/pretrained/episodicMemorySimple/simple_embedding-symbol.json b/src/test/resources/pretrained/episodicMemorySimple/simple_embedding-symbol.json new file mode 100644 index 00000000..31c823ad --- /dev/null +++ b/src/test/resources/pretrained/episodicMemorySimple/simple_embedding-symbol.json @@ -0,0 +1,18 @@ +{ + "nodes": [ + { + "op": "null", + "name": "data", + "inputs": [] + }, + { + "op": "_copy", + "name": "simpleembedding0_identity0", + "inputs": [[0, 0, 0]] + } + ], + "arg_nodes": [0], + "node_row_ptr": [0, 1, 2], + "heads": [[1, 0, 0]], + "attrs": {"mxnet_version": ["int", 10501]} +} \ No newline at end of file diff --git a/src/test/resources/target_code/gluon/CNNCreator_mnist_mnistClassifier_net.py b/src/test/resources/target_code/gluon/CNNCreator_mnist_mnistClassifier_net.py index 21ce4267..814f8eba 100644 --- a/src/test/resources/target_code/gluon/CNNCreator_mnist_mnistClassifier_net.py +++ b/src/test/resources/target_code/gluon/CNNCreator_mnist_mnistClassifier_net.py @@ -2,6 +2,8 @@ import mxnet as mx import logging import os import shutil +import warnings +import inspect from CNNNet_mnist_mnistClassifier_net import Net_0 @@ -20,6 +22,10 @@ class CNNCreator_mnist_mnistClassifier_net: for i, network in self.networks.items(): lastEpoch = 0 param_file = None + if hasattr(network, 'episodic_sub_nets'): + num_episodic_sub_nets = len(network.episodic_sub_nets) + lastMemEpoch = [0]*num_episodic_sub_nets + mem_files = [None]*num_episodic_sub_nets try: os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest-0000.params") @@ -30,22 +36,77 @@ class CNNCreator_mnist_mnistClassifier_net: except OSError: pass + if hasattr(network, 'episodic_sub_nets'): + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(0) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(0) + "-symbol.json") + except OSError: + pass + + for j in range(len(network.episodic_sub_nets)): + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(j+1) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(j+1) + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_query_net_' + str(j+1) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_query_net_' + str(j+1) + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_loss' + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_loss' + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest_episodic_memory_sub_net_" + str(j + 1) + "-0000") + except OSError: + pass + if os.path.isdir(self._model_dir_): for file in os.listdir(self._model_dir_): - if ".params" in file and self._model_prefix_ + "_" + str(i) in file: - epochStr = file.replace(".params","").replace(self._model_prefix_ + "_" + str(i) + "-","") + if ".params" in file and self._model_prefix_ + "_" + str(i) in file and not "loss" in file: + epochStr = file.replace(".params", "").replace(self._model_prefix_ + "_" + str(i) + "-", "") epoch = int(epochStr) - if epoch > lastEpoch: + if epoch >= lastEpoch: lastEpoch = epoch param_file = file + elif hasattr(network, 'episodic_sub_nets') and self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_" in file: + relMemPathInfo = file.replace(self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_", "").split("-") + memSubNet = int(relMemPathInfo[0]) + memEpochStr = relMemPathInfo[1] + memEpoch = int(memEpochStr) + if memEpoch >= lastMemEpoch[memSubNet-1]: + lastMemEpoch[memSubNet-1] = memEpoch + mem_files[memSubNet-1] = file + if param_file is None: earliestLastEpoch = 0 else: logging.info("Loading checkpoint: " + param_file) network.load_parameters(self._model_dir_ + param_file) + if hasattr(network, 'episodic_sub_nets'): + for j, sub_net in enumerate(network.episodic_sub_nets): + if mem_files[j] != None: + logging.info("Loading Replay Memory: " + mem_files[j]) + mem_layer = [param for param in inspect.getmembers(sub_net, lambda x: not(inspect.isroutine(x))) if param[0].startswith("memory")][0][1] + mem_layer.load_memory(self._model_dir_ + mem_files[j]) - if earliestLastEpoch == None or lastEpoch < earliestLastEpoch: - earliestLastEpoch = lastEpoch + if earliestLastEpoch == None or lastEpoch + 1 < earliestLastEpoch: + earliestLastEpoch = lastEpoch + 1 return earliestLastEpoch @@ -56,27 +117,52 @@ class CNNCreator_mnist_mnistClassifier_net: for i, network in self.networks.items(): # param_file = self._model_prefix_ + "_" + str(i) + "_newest-0000.params" param_file = None + if hasattr(network, 'episodic_sub_nets'): + num_episodic_sub_nets = len(network.episodic_sub_nets) + lastMemEpoch = [0] * num_episodic_sub_nets + mem_files = [None] * num_episodic_sub_nets + if os.path.isdir(self._weights_dir_): lastEpoch = 0 for file in os.listdir(self._weights_dir_): - if ".params" in file and self._model_prefix_ + "_" + str(i) in file: + if ".params" in file and self._model_prefix_ + "_" + str(i) in file and not "loss" in file: epochStr = file.replace(".params","").replace(self._model_prefix_ + "_" + str(i) + "-","") epoch = int(epochStr) - if epoch > lastEpoch: + if epoch >= lastEpoch: lastEpoch = epoch param_file = file + elif hasattr(network, 'episodic_sub_nets') and self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_" in file: + relMemPathInfo = file.replace(self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_").split("-") + memSubNet = int(relMemPathInfo[0]) + memEpochStr = relMemPathInfo[1] + memEpoch = int(memEpochStr) + if memEpoch >= lastMemEpoch[memSubNet-1]: + lastMemEpoch[memSubNet-1] = memEpoch + mem_files[memSubNet-1] = file + logging.info("Loading pretrained weights: " + self._weights_dir_ + param_file) network.load_parameters(self._weights_dir_ + param_file, allow_missing=True, ignore_extra=True) + if hasattr(network, 'episodic_sub_nets'): + assert lastEpoch == lastMemEpoch + for j, sub_net in enumerate(network.episodic_sub_nets): + if mem_files[j] != None: + logging.info("Loading pretrained Replay Memory: " + mem_files[j]) + mem_layer = \ + [param for param in inspect.getmembers(sub_net, lambda x: not (inspect.isroutine(x))) if + param[0].startswith("memory")][0][1] + mem_layer.load_memory(self._model_dir_ + mem_files[j]) else: logging.info("No pretrained weights available at: " + self._weights_dir_ + param_file) def construct(self, context, data_mean=None, data_std=None): - self.networks[0] = Net_0(data_mean=data_mean, data_std=data_std) - self.networks[0].collect_params().initialize(self.weight_initializer, ctx=context) + self.networks[0] = Net_0(data_mean=data_mean, data_std=data_std, mx_context=context, prefix="") + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + self.networks[0].collect_params().initialize(self.weight_initializer, force_reinit=False, ctx=context) self.networks[0].hybridize() - self.networks[0](mx.nd.zeros((1, 1,28,28,), ctx=context)) + self.networks[0](mx.nd.zeros((1, 1,28,28,), ctx=context[0])) if not os.path.exists(self._model_dir_): os.makedirs(self._model_dir_) diff --git a/src/test/resources/target_code/gluon/CNNNet_mnist_mnistClassifier_net.py b/src/test/resources/target_code/gluon/CNNNet_mnist_mnistClassifier_net.py index e376a095..a402a77e 100644 --- a/src/test/resources/target_code/gluon/CNNNet_mnist_mnistClassifier_net.py +++ b/src/test/resources/target_code/gluon/CNNNet_mnist_mnistClassifier_net.py @@ -1,7 +1,10 @@ import mxnet as mx import numpy as np import math -from mxnet import gluon +import os +import abc +import warnings +from mxnet import gluon, nd class ZScoreNormalization(gluon.HybridBlock): @@ -86,9 +89,419 @@ class CustomGRU(gluon.HybridBlock): output, [state0] = self.gru(data, [F.swapaxes(state0, 0, 1)]) return output, F.swapaxes(state0, 0, 1) + +class DotProductSelfAttention(gluon.HybridBlock): + def __init__(self, + scale_factor, + num_heads, + dim_model, + dim_keys, + dim_values, + use_proj_bias, + use_mask, + **kwargs): + super(DotProductSelfAttention, self).__init__(**kwargs) + with self.name_scope(): + self.num_heads = num_heads + self.dim_model = dim_model + self.use_proj_bias = use_proj_bias + self.use_mask = use_mask + + if dim_keys == -1: + self.dim_keys = int(dim_model / self.num_heads) + else: + self.dim_keys = dim_keys + if dim_values == -1: + self.dim_values = int(dim_model / self.num_heads) + else: + self.dim_values = dim_values + + if scale_factor == -1: + self.scale_factor = math.sqrt(self.dim_keys) + else: + self.scale_factor = scale_factor + + self.proj_q = gluon.nn.Dense(self.num_heads*self.dim_keys, use_bias=self.use_proj_bias, flatten=False) + self.proj_k = gluon.nn.Dense(self.num_heads*self.dim_keys, use_bias=self.use_proj_bias, flatten=False) + self.proj_v = gluon.nn.Dense(self.num_heads*self.dim_values, use_bias=self.use_proj_bias, flatten=False) + self.proj_o = gluon.nn.Dense(self.dim_model, use_bias=self.use_proj_bias, flatten=False) + + def hybrid_forward(self, F, queries, keys, values, *args, **kwargs): + + queries = F.Reshape(queries, shape=(0, 0,-1)) + keys = F.Reshape(queries, shape=(0, 0, -1)) + values = F.Reshape(queries, shape=(0, 0, -1)) + + head_queries = self.proj_q(queries) + head_keys = self.proj_k(keys) + head_values = self.proj_v(values) + + head_queries = F.reshape(head_queries, shape=(0, 0, self.num_heads, -1)) + head_queries = F.transpose(head_queries, axes=(0,2,1,3)) + head_queries = F.reshape(head_queries, shape=(-1, 0, 0), reverse=True) + + head_keys = F.reshape(head_keys, shape=(0, 0, self.num_heads, -1)) + head_keys = F.transpose(head_keys, axes=(0,2,1,3)) + head_keys = F.reshape(head_keys, shape=(-1, 0, 0), reverse=True) + + score = F.batch_dot(head_queries, head_keys, transpose_b=True) + score = score * self.scale_factor + if self.use_mask: + mask = F.tile(mask, self.num_heads) + mask = F.repeat(mask, self.dim_model) + mask = F.reshape(mask, shape=(-1, self.dim_model)) + weights = F.softmax(score, mask, use_length=self.use_mask) + + head_values = F.reshape(head_values, shape=(0, 0, self.num_heads, -1)) + head_values = F.transpose(head_values, axes=(0,2,1,3)) + head_values = F.reshape(head_values, shape=(-1, 0, 0), reverse=True) + + ret = F.batch_dot(weights, head_values) + ret = F.reshape(ret, shape=(-1, self.num_heads, 0, 0), reverse=True) + ret = F.transpose(ret, axes=(0, 2, 1, 3)) + ret = F.reshape(ret, shape=(0, 0, -1)) + + ret = self.proj_o(ret) + + return ret + + +class EpisodicReplayMemoryInterface(gluon.HybridBlock): + __metaclass__ = abc.ABCMeta + + def __init__(self, use_replay, replay_interval, replay_batch_size, replay_steps, replay_gradient_steps, num_heads, **kwargs): + super(EpisodicReplayMemoryInterface, self).__init__(**kwargs) + + self.use_replay = use_replay + self.replay_interval = replay_interval + self.replay_batch_size = replay_batch_size + self.replay_steps = replay_steps + self.replay_gradient_steps = replay_gradient_steps + self.num_heads = num_heads + + @abc.abstractmethod + def store_samples(self, data, y, query_network, store_prob, mx_context): + pass + + @abc.abstractmethod + def sample_memory(self, batch_size, mx_context): + pass + + @abc.abstractmethod + def get_query_network(self, mx_context): + pass + + @abc.abstractmethod + def save_memory(self, path): + pass + + @abc.abstractmethod + def load_memory(self, path): + pass + +#Memory layer +class LargeMemory(gluon.HybridBlock): + def __init__(self, + sub_key_size, + query_size, + query_act, + dist_measure, + k, + num_heads, + values_dim, + **kwargs): + super(LargeMemory, self).__init__(**kwargs) + with self.name_scope(): + #Memory parameters + self.dist_measure = dist_measure + self.k = k + self.num_heads = num_heads + self.query_act = query_act + self.query_size = query_size + self.num_heads = num_heads + + #Batch norm sub-layer + self.batch_norm = gluon.nn.BatchNorm() + + #Memory sub-layer + self.sub_key_size = sub_key_size + sub_key_shape = (self.num_heads, self.sub_key_size, int(query_size[-1] / 2)) + + if values_dim == -1: + values_shape = (self.sub_key_size * self.sub_key_size, self.query_size[-1]) + else: + values_shape = (self.sub_key_size*self.sub_key_size, values_dim) + + self.sub_keys1 = self.params.get("sub_keys1", shape=sub_key_shape, differentiable=True) + self.sub_keys2 = self.params.get("sub_keys2", shape=sub_key_shape, differentiable=True) + self.values = self.params.get("values", shape=values_shape, differentiable=True) + self.label_memory = nd.array([]) + + self.get_query_network() + + def hybrid_forward(self, F, x, sub_keys1, sub_keys2, values): + x = self.batch_norm(x) + + x = F.reshape(x, shape=(0, -1)) + + q = self.query_network(x) + + q = F.reshape(q, shape=(0, self.num_heads, -1)) + + q_split = F.split(q, num_outputs=2, axis=-1) + + if self.dist_measure == "l2": + q_split_resh = F.reshape(q_split[0], shape=(0,0,1,-1)) + sub_keys1_resh = F.reshape(sub_keys1, shape=(1,0,0,-1), reverse=True) + q1_diff = F.broadcast_sub(q_split_resh, sub_keys1_resh) + q1_dist = F.norm(q1_diff, axis=-1) + q_split_resh = F.reshape(q_split[1], shape=(0,0,1,-1)) + sub_keys2_resh = F.reshape(sub_keys2, shape=(1,0,0,-1), reverse=True) + q2_diff = F.broadcast_sub(q_split_resh, sub_keys2_resh) + q2_dist = F.norm(q2_diff, axis=-1) + else: + q1 = F.split(q_split[0], num_outputs=self.num_heads, axis=1) + q2 = F.split(q_split[1], num_outputs=self.num_heads, axis=1) + sub_keys1_resh = F.split(sub_keys1, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + sub_keys2_resh = F.split(sub_keys2, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + if self.num_heads == 1: + q1 = [q1] + q2 = [q2] + sub_keys1_resh = [sub_keys1_resh ] + sub_keys2_resh = [sub_keys2_resh ] + + q1_dist = F.dot(q1[0], sub_keys1_resh[0], transpose_b=True) + q2_dist = F.dot(q2[0], sub_keys2_resh[0], transpose_b=True) + for h in range(1, self.num_heads): + q1_dist = F.concat(q1_dist, F.dot(q1[0], sub_keys1_resh[h], transpose_b=True), dim=1) + q2_dist = F.concat(q2_dist, F.dot(q2[0], sub_keys1_resh[h], transpose_b=True), dim=1) + + i1 = F.topk(q1_dist, k=self.k, ret_typ="indices") + i2 = F.topk(q2_dist, k=self.k, ret_typ="indices") + + # Calculate cross product for keys at indices I1 and I2 + + # def head_take(data, state): + # return [F.take(data[0], data[2]), F.take(data[1], data[3])], state, + # + # i1 = F.transpose(i1, axes=(1,0,2)) + # i2 = F.transpose(i2, axes=(1, 0, 2)) + # st = F.zeros(1) + # (k1, k2), _ = F.contrib.foreach(head_take, [sub_keys1, sub_keys2,i1,i2], st) + # k1 = F.reshape(k1, shape=(-1, 0, 0), reverse=True) + # k2 = F.reshape(k2, shape=(-1, 0, 0), reverse=True) + i1 = F.split(i1, num_outputs=self.num_heads, axis=1) + i2 = F.split(i2, num_outputs=self.num_heads, axis=1) + sub_keys1 = F.split(sub_keys1, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + sub_keys2 = F.split(sub_keys2, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + if self.num_heads == 1: + i1 = [i1] + i2 = [i2] + sub_keys1 = [sub_keys1] + sub_keys2 = [sub_keys2] + + k1 = F.take(sub_keys1[0], i1[0]) + k2 = F.take(sub_keys2[0], i2[0]) + for h in range(1, self.num_heads): + k1 = F.concat(k1, F.take(sub_keys1[h], i1[h]), dim=1) + k2 = F.concat(k2, F.take(sub_keys2[h], i2[h]), dim=1) + + k1 = F.tile(k1, (1, 1, self.k, 1)) + k2 = F.repeat(k2, self.k, 2) + c_cart = F.concat(k1, k2, dim=3) + + q = F.reshape(q, shape=(-1,0), reverse=True) + q = F.reshape(q, shape=(0, 1, -1)) + c_cart = F.reshape(c_cart, shape=(-1, 0, 0), reverse=True) + if self.dist_measure == "l2": + k_diff = F.broadcast_sub(q, c_cart) + k_dist = F.norm(k_diff, axis=-1) + else: + k_dist = F.batch_dot(q, c_cart, transpose_b=True) #F.contrib.foreach(loop_batch_dot, [q, c_cart], init_states=state_batch_dist) + k_dist = F.reshape(k_dist, shape=(0, -1)) + + i = F.topk(k_dist, k=self.k, ret_typ="both") + + w = F.softmax(i[0]) + w = F.reshape(w, shape=(0,1,-1)) + vi = F.take(values, i[1]) + aggr_value = F.batch_dot(w, vi) #F.contrib.foreach(loop_batch_dot, [w, vi], init_states=state_batch_dist) + + ret = F.reshape(aggr_value, shape=(-1, self.num_heads, 0), reverse=True) + one_vec = F.ones((1, 1, self.num_heads)) + one_vec = F.broadcast_like(one_vec, ret, lhs_axes=0, rhs_axes=0) + ret = F.batch_dot(one_vec, ret) + ret = F.reshape(ret, shape=(-1, 0), reverse=True) + + return ret + + def get_query_network(self): + if hasattr(self, 'query_network'): + return self.query_network + else: + self.query_network = gluon.nn.HybridSequential() + for size in self.query_size: + if self.query_act == "linear": + self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, flatten=False)) + else: + self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, activation=self.query_act, flatten=False)) + return self.query_network + + +#EpisodicMemory layer +class EpisodicMemory(EpisodicReplayMemoryInterface): + def __init__(self, + replay_interval, + replay_batch_size, + replay_steps, + replay_gradient_steps, + store_prob, + max_stored_samples, + use_replay, + query_net_dir, + query_net_prefix, + query_net_num_inputs, + **kwargs): + super(EpisodicMemory, self).__init__(use_replay, replay_interval, replay_batch_size, replay_steps, replay_gradient_steps, 1, **kwargs) + with self.name_scope(): + #Replay parameters + self.store_prob = store_prob + self.max_stored_samples = max_stored_samples + + self.query_net_dir = query_net_dir + self.query_net_prefix = query_net_prefix + self.query_net_num_inputs = query_net_num_inputs + + #Memory + self.key_memory = nd.array([]) + self.value_memory = nd.array([]) + self.label_memory = nd.array([]) + + def hybrid_forward(self, F, *args): + #propagate the input as the rest is only used for replay + return [args, []] + + def store_samples(self, data, y, query_network, store_prob, context): + num_pus = len(data) + sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] + num_inputs = len(data[0][0]) + num_outputs = len(y) + mx_context = context[0] + + if len(self.key_memory) == 0: + self.key_memory = nd.empty(0, ctx=mx.cpu()) + self.value_memory = [] + self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) + + ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] + + max_inds = [nd.max(ind[i]) for i in range(num_pus)] + if any(max_inds): + to_store_values = [] + for i in range(num_inputs): + tmp_values = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_values, list): + tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) + else: + tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) + to_store_values.append(tmp_values) + + to_store_labels = [] + for i in range(num_outputs): + tmp_labels = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_labels, list): + tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) + else: + tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) + to_store_labels.append(tmp_labels) + + to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) + + if self.key_memory.shape[0] == 0: + self.key_memory = to_store_keys.as_in_context(mx.cpu()) + for i in range(num_inputs): + self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + for i in range(num_outputs): + self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) + elif self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: + num_to_store = to_store_keys.shape[0] + self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=1) + else: + self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + + def sample_memory(self, batch_size): + num_stored_samples = self.key_memory.shape[0] + if self.replay_batch_size == -1: + sample_ind = nd.random.randint(0, num_stored_samples, (self.replay_steps, batch_size), ctx=mx.cpu()) + else: + sample_ind = nd.random.randint(0, num_stored_samples, (self.replay_steps, self.replay_batch_size), ctx=mx.cpu()) + + num_outputs = len(self.label_memory) + + sample_labels = [[self.label_memory[i][ind] for i in range(num_outputs)] for ind in sample_ind] + sample_batches = [[[self.value_memory[j][ind] for j in range(len(self.value_memory))], sample_labels[i]] for i, ind in enumerate(sample_ind)] + + return sample_batches + + def get_query_network(self, context): + lastEpoch = 0 + for file in os.listdir(self.query_net_dir): + if self.query_net_prefix in file and ".json" in file: + symbolFile = file + + if self.query_net_prefix in file and ".param" in file: + epochStr = file.replace(".params", "").replace(self.query_net_prefix, "") + epoch = int(epochStr) + if epoch >= lastEpoch: + lastEpoch = epoch + weightFile = file + + inputNames = [] + if self.query_net_num_inputs == 1: + inputNames.append("data") + else: + for i in range(self.query_net_num_inputs): + inputNames.append("data" + str(i)) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + net = mx.gluon.nn.SymbolBlock.imports(self.query_net_dir + symbolFile, inputNames, self.query_net_dir + weightFile, ctx=context[0]) + net.hybridize() + return net + + def save_memory(self, path): + mem_arr = [("keys", self.key_memory)] + [("values_"+str(k),v) for (k,v) in enumerate(self.value_memory)] + [("labels_"+str(k),v) for (k,v) in enumerate(self.label_memory)] + mem_dict = {entry[0]:entry[1] for entry in mem_arr} + nd.save(path, mem_dict) + + def load_memory(self, path): + mem_dict = nd.load(path) + self.value_memory = [] + self.label_memory = [] + for key in sorted(mem_dict.keys()): + if key == "keys": + self.key_memory = mem_dict[key] + elif key.startswith("values_"): + self.value_memory.append(mem_dict[key]) + elif key.startswith("labels_"): + self.label_memory.append(mem_dict[key]) + + +#Stream 0 class Net_0(gluon.HybridBlock): - def __init__(self, data_mean=None, data_std=None, **kwargs): + def __init__(self, data_mean=None, data_std=None, mx_context=None, **kwargs): super(Net_0, self).__init__(**kwargs) with self.name_scope(): if data_mean: @@ -146,5 +559,5 @@ class Net_0(gluon.HybridBlock): softmax3_ = F.softmax(fc3_, axis=-1) predictions_ = F.identity(softmax3_) - return predictions_ + return [[predictions_]] diff --git a/src/test/resources/target_code/gluon/CNNPredictor_mnist_mnistClassifier_net.h b/src/test/resources/target_code/gluon/CNNPredictor_mnist_mnistClassifier_net.h index 75d7c61e..55a9d632 100644 --- a/src/test/resources/target_code/gluon/CNNPredictor_mnist_mnistClassifier_net.h +++ b/src/test/resources/target_code/gluon/CNNPredictor_mnist_mnistClassifier_net.h @@ -1,107 +1,149 @@ #ifndef CNNPREDICTOR_MNIST_MNISTCLASSIFIER_NET #define CNNPREDICTOR_MNIST_MNISTCLASSIFIER_NET -#include +#include #include #include #include + +#include +#include -#include - +using namespace mxnet::cpp; + class CNNPredictor_mnist_mnistClassifier_net_0{ public: - const std::string json_file = "model/mnist.LeNetNetwork/model_0_newest-symbol.json"; - const std::string param_file = "model/mnist.LeNetNetwork/model_0_newest-0000.params"; - const std::vector input_keys = { + const std::string file_prefix = "model/mnist.LeNetNetwork/model_0_newest"; + + //network + const std::vector network_input_keys = { "data" }; - const std::vector> input_shapes = {{1, 1, 28, 28}}; - const bool use_gpu = false; - - PredictorHandle handle; - + const std::vector> network_input_shapes = {{1, 1, 28, 28}}; + std::vector network_input_sizes; + std::vector> network_arg_names; + std::vector network_handles; + + + //misc + Context ctx = Context::cpu(); //Will be updated later in init according to use_gpu + int dtype = 0; //use data type (float32=0 float64=1 ...) + + explicit CNNPredictor_mnist_mnistClassifier_net_0(){ - init(json_file, param_file, input_keys, input_shapes, use_gpu); + init(file_prefix, network_input_keys, network_input_shapes); } ~CNNPredictor_mnist_mnistClassifier_net_0(){ - if(handle) MXPredFree(handle); + for(Executor * handle : network_handles){ + delete handle; + } + MXNotifyShutdown(); } void predict(const std::vector &in_image_, std::vector &out_predictions_){ - MXPredSetInput(handle, input_keys[0].c_str(), in_image_.data(), static_cast(in_image_.size())); - - MXPredForward(handle); - mx_uint output_index; - mx_uint *shape = 0; - mx_uint shape_len; - size_t size; - - output_index = 0; - MXPredGetOutputShape(handle, output_index, &shape, &shape_len); - size = 1; - for (mx_uint i = 0; i < shape_len; ++i) size *= shape[i]; - assert(size == out_predictions_.size()); - MXPredGetOutput(handle, output_index, &(out_predictions_[0]), out_predictions_.size()); + NDArray input_temp; + input_temp = NDArray(network_input_shapes[0], ctx, false, dtype); + input_temp.SyncCopyFromCPU(in_image_.data(), network_input_sizes[0]); + input_temp.CopyTo(&(network_handles[0]->arg_dict()[network_input_keys[0]])); + NDArray::WaitAll(); + + network_handles[0]->Forward(false); + CheckMXNetError("Forward, predict, handle ind. 0"); + + + std::vector output = network_handles.back()->outputs; + std::vector curr_output_shape; + size_t curr_output_size; + curr_output_shape = output[0].GetShape(); + curr_output_size = 1; + for (mx_uint i : curr_output_shape) curr_output_size *= i; + //Fix due to a bug in the in how the output arrays are initialized when there are multiple outputs + assert((curr_output_size == out_predictions_.size()) || (curr_output_size == out_predictions_[0])); + output[0].SyncCopyToCPU(&out_predictions_); + } + + + + Executor* initExecutor(Symbol &sym, + std::map ¶m_map, + const std::vector &exec_input_keys, + const std::vector> &exec_input_shapes){ + + const mx_uint num_exec_input_nodes = exec_input_keys.size(); + for(mx_uint i = 0; i < num_exec_input_nodes; i++){ + param_map[exec_input_keys[i]] = NDArray(exec_input_shapes[i], ctx, false, dtype); + } - void init(const std::string &json_file, - const std::string ¶m_file, - const std::vector &input_keys, - const std::vector> &input_shapes, - const bool &use_gpu){ + std::vector param_arrays; + std::vector grad_array; + std::vector grad_reqs; + std::vector aux_arrays; + std::map< std::string, NDArray> aux_map; - BufferFile json_data(json_file); - BufferFile param_data(param_file); + sym.InferExecutorArrays(ctx, ¶m_arrays, &grad_array, &grad_reqs, + &aux_arrays, param_map, std::map(), + std::map(), aux_map); - int dev_type = use_gpu ? 2 : 1; - int dev_id = 0; + Executor *handle = new Executor(sym, ctx, param_arrays, grad_array, grad_reqs, aux_arrays); + assert(handle); + return handle; + } - if (json_data.GetLength() == 0 || - param_data.GetLength() == 0) { - std::exit(-1); + std::vector getSizesOfShapes(const std::vector> shapes){ + std::vector sizes; + for(std::vector shape : shapes){ + mx_uint val = 1; + for(mx_uint i: shape){ + val *= i; + } + sizes.push_back(val); } + return sizes; + } - const mx_uint num_input_nodes = input_keys.size(); - - const char* input_keys_ptr[num_input_nodes]; - for(mx_uint i = 0; i < num_input_nodes; i++){ - input_keys_ptr[i] = input_keys[i].c_str(); + void CheckMXNetError(std::string loc){ + const char* err = MXGetLastError(); + if (err && err[0] != 0) { + std::cout << "MXNet error at " << loc << err << std::endl; + exit(-1); } - - mx_uint shape_data_size = 0; - mx_uint input_shape_indptr[input_shapes.size() + 1]; - input_shape_indptr[0] = 0; - for(mx_uint i = 0; i < input_shapes.size(); i++){ - shape_data_size += input_shapes[i].size(); - input_shape_indptr[i+1] = shape_data_size; + } + + void init(const std::string &file_prefix, + const std::vector &network_input_keys, + const std::vector> &network_input_shapes){ + + CNNLAOptimizer_mnist_mnistClassifier_net optimizer_creator = CNNLAOptimizer_mnist_mnistClassifier_net(); + + if(optimizer_creator.getContextName() == "gpu"){ + ctx = Context::gpu(); } - - mx_uint input_shape_data[shape_data_size]; - mx_uint index = 0; - for(mx_uint i = 0; i < input_shapes.size(); i++){ - for(mx_uint j = 0; j < input_shapes[i].size(); j++){ - input_shape_data[index] = input_shapes[i][j]; - index++; - } + + network_input_sizes = getSizesOfShapes(network_input_shapes); + + ModelLoader model_loader(file_prefix, 0, ctx); + + std::vector network_symbols = model_loader.GetNetworkSymbols(); + std::vector> network_param_maps; + network_param_maps = model_loader.GetNetworkParamMaps(); + + //Init handles + std::map> in_shape_map; + for(mx_uint i=0; i < network_input_keys.size(); i++){ + in_shape_map[network_input_keys[i]] = network_input_shapes[i]; } - - MXPredCreate(static_cast(json_data.GetBuffer()), - static_cast(param_data.GetBuffer()), - static_cast(param_data.GetLength()), - dev_type, - dev_id, - num_input_nodes, - input_keys_ptr, - input_shape_indptr, - input_shape_data, - &handle); - assert(handle); + std::vector> in_shapes; + std::vector> aux_shapes; + std::vector> out_shapes; + network_symbols[0].InferShape(in_shape_map, &in_shapes, &aux_shapes, &out_shapes); + network_handles.push_back(initExecutor(network_symbols[0], network_param_maps[0], network_input_keys, network_input_shapes)); + } }; - #endif // CNNPREDICTOR_MNIST_MNISTCLASSIFIER_NET diff --git a/src/test/resources/target_code/gluon/CNNSupervisedTrainer_mnist_mnistClassifier_net.py b/src/test/resources/target_code/gluon/CNNSupervisedTrainer_mnist_mnistClassifier_net.py index b6f1a593..7403768f 100644 --- a/src/test/resources/target_code/gluon/CNNSupervisedTrainer_mnist_mnistClassifier_net.py +++ b/src/test/resources/target_code/gluon/CNNSupervisedTrainer_mnist_mnistClassifier_net.py @@ -7,7 +7,13 @@ import shutil import pickle import math import sys +import inspect from mxnet import gluon, autograd, nd +try: + import AdamW +except: + pass + class CrossEntropyLoss(gluon.loss.Loss): def __init__(self, axis=-1, sparse_label=True, weight=None, batch_axis=0, **kwargs): @@ -54,7 +60,7 @@ class SoftmaxCrossEntropyLossIgnoreIndices(gluon.loss.Loss): loss = -(pred * label).sum(axis=self._axis, keepdims=True) # ignore some indices for loss, e.g. tokens in NLP applications for i in self._ignore_indices: - loss = loss * mx.nd.logical_not(mx.nd.equal(mx.nd.argmax(pred, axis=1), mx.nd.ones_like(mx.nd.argmax(pred, axis=1))*i) * mx.nd.equal(mx.nd.argmax(pred, axis=1), label)) + loss = F.broadcast_mul(loss, F.logical_not(F.broadcast_equal(F.argmax(pred, axis=1), F.ones_like(F.argmax(pred, axis=1))*i) * F.broadcast_equal(F.argmax(pred, axis=1), label))) return loss.mean(axis=self._batch_axis, exclude=True) class DiceLoss(gluon.loss.Loss): @@ -277,12 +283,21 @@ class CNNSupervisedTrainer_mnist_mnistClassifier_net: shuffle_data=False, clip_global_grad_norm=None, preprocessing = False): + num_pus = 1 if context == 'gpu': - mx_context = mx.gpu() + num_pus = mx.context.num_gpus() + if num_pus >= 1: + if num_pus == 1: + mx_context = [mx.gpu(0)] + else: + mx_context = [mx.gpu(i) for i in range(num_pus)] + else: + logging.error("Context argument is '" + context + "'. But no gpu is present in the system.") elif context == 'cpu': - mx_context = mx.cpu() + mx_context = [mx.cpu()] else: logging.error("Context argument is '" + context + "'. Only 'cpu' and 'gpu are valid arguments'.") + single_pu_batch_size = int(batch_size/num_pus) if preprocessing: preproc_lib = "CNNPreprocessor_mnist_mnistClassifier_net_executor" @@ -327,7 +342,10 @@ class CNNSupervisedTrainer_mnist_mnistClassifier_net: if not os.path.isdir(self._net_creator._model_dir_): raise - trainers = [mx.gluon.Trainer(network.collect_params(), optimizer, optimizer_params) for network in self._networks.values() if len(network.collect_params().values()) != 0] + if optimizer == "adamw": + trainers = [mx.gluon.Trainer(network.collect_params(), AdamW.AdamW(**optimizer_params)) for network in self._networks.values() if len(network.collect_params().values()) != 0] + else: + trainers = [mx.gluon.Trainer(network.collect_params(), optimizer, optimizer_params) for network in self._networks.values() if len(network.collect_params().values()) != 0] margin = loss_params['margin'] if 'margin' in loss_params else 1.0 sparseLabel = loss_params['sparse_label'] if 'sparse_label' in loss_params else True @@ -372,9 +390,16 @@ class CNNSupervisedTrainer_mnist_mnistClassifier_net: loss_function = LogCoshLoss() else: logging.error("Invalid loss parameter.") + + loss_function.hybridize() + + tic = None + avg_speed = 0 + n = 0 + for epoch in range(begin_epoch, begin_epoch + num_epoch): if shuffle_data: if preprocessing: @@ -389,31 +414,36 @@ class CNNSupervisedTrainer_mnist_mnistClassifier_net: loss_total = 0 train_iter.reset() for batch_i, batch in enumerate(train_iter): + + with autograd.record(): - labels = [batch.label[i].as_in_context(mx_context) for i in range(1)] + labels = [gluon.utils.split_and_load(batch.label[i], ctx_list=mx_context, even_split=False) for i in range(1)] - image_ = batch.data[0].as_in_context(mx_context) + image_ = gluon.utils.split_and_load(batch.data[0], ctx_list=mx_context, even_split=False) - predictions_ = mx.nd.zeros((batch_size, 10,), ctx=mx_context) + predictions_ = [mx.nd.zeros((single_pu_batch_size, 10,), ctx=context) for context in mx_context] nd.waitall() - lossList = [] + for i in range(num_pus): + lossList.append([]) - predictions_ = self._networks[0](image_) + net_ret = [self._networks[0](image_[i]) for i in range(num_pus)] + predictions_ = [net_ret[i][0][0] for i in range(num_pus)] + [lossList[i].append(loss_function(predictions_[i], labels[0][i])) for i in range(num_pus)] - lossList.append(loss_function(predictions_, labels[0])) - - loss = 0 - for element in lossList: - loss = loss + element - loss.backward() + losses = [0]*num_pus + for i in range(num_pus): + for element in lossList[i]: + losses[i] = losses[i] + element - loss_total += loss.sum().asscalar() + for loss in losses: + loss.backward() + loss_total += loss.sum().asscalar() + global_loss_train += loss.sum().asscalar() - global_loss_train += loss.sum().asscalar() train_batches += 1 if clip_global_grad_norm: @@ -426,7 +456,7 @@ class CNNSupervisedTrainer_mnist_mnistClassifier_net: for trainer in trainers: trainer.step(batch_size) - + if tic is None: tic = time.time() else: @@ -440,36 +470,39 @@ class CNNSupervisedTrainer_mnist_mnistClassifier_net: loss_total = 0 logging.info("Epoch[%d] Batch[%d] Speed: %.2f samples/sec Loss: %.5f" % (epoch, batch_i, speed, loss_avg)) - + + avg_speed += speed + n += 1 + tic = time.time() global_loss_train /= (train_batches * batch_size) tic = None - if eval_train: train_iter.reset() metric = mx.metric.create(eval_metric, **eval_metric_params) for batch_i, batch in enumerate(train_iter): - labels = [batch.label[i].as_in_context(mx_context) for i in range(1)] - - image_ = batch.data[0].as_in_context(mx_context) + labels = [gluon.utils.split_and_load(batch.label[i], ctx_list=mx_context, even_split=False)[0] for i in range(1)] + image_ = gluon.utils.split_and_load(batch.data[0], ctx_list=mx_context, even_split=False)[0] - predictions_ = mx.nd.zeros((batch_size, 10,), ctx=mx_context) + predictions_ = mx.nd.zeros((single_pu_batch_size, 10,), ctx=mx_context[0]) nd.waitall() - outputs = [] lossList = [] + outputs = [] attentionList = [] - predictions_ = self._networks[0](image_) + net_ret = self._networks[0](image_) + predictions_ = net_ret[0][0] outputs.append(predictions_) lossList.append(loss_function(predictions_, labels[0])) + if save_attention_image == "True": import matplotlib matplotlib.use('Agg') @@ -510,7 +543,6 @@ class CNNSupervisedTrainer_mnist_mnistClassifier_net: os.makedirs(target_dir) plt.savefig(target_dir + '/attention_train.png') plt.close() - predictions = [] for output_name in outputs: if mx.nd.shape_array(mx.nd.squeeze(output_name)).size > 1: @@ -518,7 +550,8 @@ class CNNSupervisedTrainer_mnist_mnistClassifier_net: else: predictions.append(output_name) - metric.update(preds=predictions, labels=labels) + metric.update(preds=predictions, labels=[labels[j] for j in range(len(labels))]) + train_metric_score = metric.get()[1] else: train_metric_score = 0 @@ -529,25 +562,26 @@ class CNNSupervisedTrainer_mnist_mnistClassifier_net: test_iter.reset() metric = mx.metric.create(eval_metric, **eval_metric_params) for batch_i, batch in enumerate(test_iter): - if True: - labels = [batch.label[i].as_in_context(mx_context) for i in range(1)] - - image_ = batch.data[0].as_in_context(mx_context) + if True: + labels = [gluon.utils.split_and_load(batch.label[i], ctx_list=mx_context, even_split=False)[0] for i in range(1)] + image_ = gluon.utils.split_and_load(batch.data[0], ctx_list=mx_context, even_split=False)[0] - predictions_ = mx.nd.zeros((batch_size, 10,), ctx=mx_context) + predictions_ = mx.nd.zeros((single_pu_batch_size, 10,), ctx=mx_context[0]) nd.waitall() - outputs = [] lossList = [] + outputs = [] attentionList = [] - predictions_ = self._networks[0](image_) + net_ret = self._networks[0](image_) + predictions_ = net_ret[0][0] outputs.append(predictions_) lossList.append(loss_function(predictions_, labels[0])) + if save_attention_image == "True": if not eval_train: import matplotlib @@ -594,26 +628,40 @@ class CNNSupervisedTrainer_mnist_mnistClassifier_net: loss = loss + element global_loss_test += loss.sum().asscalar() + test_batches += 1 predictions = [] for output_name in outputs: predictions.append(output_name) - metric.update(preds=predictions, labels=labels) + metric.update(preds=predictions, labels=[labels[j] for j in range(len(labels))]) + test_metric_score = metric.get()[1] - global_loss_test /= (test_batches * batch_size) + global_loss_test /= (test_batches * single_pu_batch_size) logging.info("Epoch[%d] Train metric: %f, Test metric: %f, Train loss: %f, Test loss: %f" % (epoch, train_metric_score, test_metric_score, global_loss_train, global_loss_test)) - if (epoch - begin_epoch) % checkpoint_period == 0: + if (epoch+1) % checkpoint_period == 0: for i, network in self._networks.items(): network.save_parameters(self.parameter_path(i) + '-' + str(epoch).zfill(4) + '.params') + if hasattr(network, 'episodic_sub_nets'): + for j, net in enumerate(network.episodic_sub_nets): + episodic_layers[i][j].save_memory(self.parameter_path(i) + "_episodic_memory_sub_net_" + str(j + 1) + "-" + str(epoch).zfill(4)) for i, network in self._networks.items(): - network.save_parameters(self.parameter_path(i) + '-' + str(num_epoch + begin_epoch + 1).zfill(4) + '.params') + network.save_parameters(self.parameter_path(i) + '-' + str((num_epoch-1) + begin_epoch).zfill(4) + '.params') network.export(self.parameter_path(i) + '_newest', epoch=0) + + if hasattr(network, 'episodic_sub_nets'): + network.episodicsubnet0_.export(self.parameter_path(i) + '_newest_episodic_sub_net_' + str(0), epoch=0) + for j, net in enumerate(network.episodic_sub_nets): + net.export(self.parameter_path(i) + '_newest_episodic_sub_net_' + str(j+1), epoch=0) + episodic_query_networks[i][j].export(self.parameter_path(i) + '_newest_episodic_query_net_' + str(j+1), epoch=0) + episodic_layers[i][j].save_memory(self.parameter_path(i) + "_episodic_memory_sub_net_" + str(j + 1) + "-" + str((num_epoch - 1) + begin_epoch).zfill(4)) + episodic_layers[i][j].save_memory(self.parameter_path(i) + "_newest_episodic_memory_sub_net_" + str(j + 1) + "-0000") + loss_function.export(self.parameter_path(i) + '_newest_loss', epoch=0) def parameter_path(self, index): return self._net_creator._model_dir_ + self._net_creator._model_prefix_ + '_' + str(index) diff --git a/src/test/resources/target_code/gluon/ganModel/defaultGAN/CNNCreator_defaultGAN_defaultGANConnector_predictor.py b/src/test/resources/target_code/gluon/ganModel/defaultGAN/CNNCreator_defaultGAN_defaultGANConnector_predictor.py index 9e5e33e6..fafc619d 100644 --- a/src/test/resources/target_code/gluon/ganModel/defaultGAN/CNNCreator_defaultGAN_defaultGANConnector_predictor.py +++ b/src/test/resources/target_code/gluon/ganModel/defaultGAN/CNNCreator_defaultGAN_defaultGANConnector_predictor.py @@ -2,6 +2,8 @@ import mxnet as mx import logging import os import shutil +import warnings +import inspect from CNNNet_defaultGAN_defaultGANConnector_predictor import Net_0 @@ -20,6 +22,10 @@ class CNNCreator_defaultGAN_defaultGANConnector_predictor: for i, network in self.networks.items(): lastEpoch = 0 param_file = None + if hasattr(network, 'episodic_sub_nets'): + num_episodic_sub_nets = len(network.episodic_sub_nets) + lastMemEpoch = [0]*num_episodic_sub_nets + mem_files = [None]*num_episodic_sub_nets try: os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest-0000.params") @@ -30,22 +36,77 @@ class CNNCreator_defaultGAN_defaultGANConnector_predictor: except OSError: pass + if hasattr(network, 'episodic_sub_nets'): + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(0) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(0) + "-symbol.json") + except OSError: + pass + + for j in range(len(network.episodic_sub_nets)): + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(j+1) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(j+1) + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_query_net_' + str(j+1) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_query_net_' + str(j+1) + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_loss' + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_loss' + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest_episodic_memory_sub_net_" + str(j + 1) + "-0000") + except OSError: + pass + if os.path.isdir(self._model_dir_): for file in os.listdir(self._model_dir_): - if ".params" in file and self._model_prefix_ + "_" + str(i) in file: - epochStr = file.replace(".params","").replace(self._model_prefix_ + "_" + str(i) + "-","") + if ".params" in file and self._model_prefix_ + "_" + str(i) in file and not "loss" in file: + epochStr = file.replace(".params", "").replace(self._model_prefix_ + "_" + str(i) + "-", "") epoch = int(epochStr) - if epoch > lastEpoch: + if epoch >= lastEpoch: lastEpoch = epoch param_file = file + elif hasattr(network, 'episodic_sub_nets') and self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_" in file: + relMemPathInfo = file.replace(self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_", "").split("-") + memSubNet = int(relMemPathInfo[0]) + memEpochStr = relMemPathInfo[1] + memEpoch = int(memEpochStr) + if memEpoch >= lastMemEpoch[memSubNet-1]: + lastMemEpoch[memSubNet-1] = memEpoch + mem_files[memSubNet-1] = file + if param_file is None: earliestLastEpoch = 0 else: logging.info("Loading checkpoint: " + param_file) network.load_parameters(self._model_dir_ + param_file) + if hasattr(network, 'episodic_sub_nets'): + for j, sub_net in enumerate(network.episodic_sub_nets): + if mem_files[j] != None: + logging.info("Loading Replay Memory: " + mem_files[j]) + mem_layer = [param for param in inspect.getmembers(sub_net, lambda x: not(inspect.isroutine(x))) if param[0].startswith("memory")][0][1] + mem_layer.load_memory(self._model_dir_ + mem_files[j]) - if earliestLastEpoch == None or lastEpoch < earliestLastEpoch: - earliestLastEpoch = lastEpoch + if earliestLastEpoch == None or lastEpoch + 1 < earliestLastEpoch: + earliestLastEpoch = lastEpoch + 1 return earliestLastEpoch @@ -56,27 +117,52 @@ class CNNCreator_defaultGAN_defaultGANConnector_predictor: for i, network in self.networks.items(): # param_file = self._model_prefix_ + "_" + str(i) + "_newest-0000.params" param_file = None + if hasattr(network, 'episodic_sub_nets'): + num_episodic_sub_nets = len(network.episodic_sub_nets) + lastMemEpoch = [0] * num_episodic_sub_nets + mem_files = [None] * num_episodic_sub_nets + if os.path.isdir(self._weights_dir_): lastEpoch = 0 for file in os.listdir(self._weights_dir_): - if ".params" in file and self._model_prefix_ + "_" + str(i) in file: + if ".params" in file and self._model_prefix_ + "_" + str(i) in file and not "loss" in file: epochStr = file.replace(".params","").replace(self._model_prefix_ + "_" + str(i) + "-","") epoch = int(epochStr) - if epoch > lastEpoch: + if epoch >= lastEpoch: lastEpoch = epoch param_file = file + elif hasattr(network, 'episodic_sub_nets') and self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_" in file: + relMemPathInfo = file.replace(self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_").split("-") + memSubNet = int(relMemPathInfo[0]) + memEpochStr = relMemPathInfo[1] + memEpoch = int(memEpochStr) + if memEpoch >= lastMemEpoch[memSubNet-1]: + lastMemEpoch[memSubNet-1] = memEpoch + mem_files[memSubNet-1] = file + logging.info("Loading pretrained weights: " + self._weights_dir_ + param_file) network.load_parameters(self._weights_dir_ + param_file, allow_missing=True, ignore_extra=True) + if hasattr(network, 'episodic_sub_nets'): + assert lastEpoch == lastMemEpoch + for j, sub_net in enumerate(network.episodic_sub_nets): + if mem_files[j] != None: + logging.info("Loading pretrained Replay Memory: " + mem_files[j]) + mem_layer = \ + [param for param in inspect.getmembers(sub_net, lambda x: not (inspect.isroutine(x))) if + param[0].startswith("memory")][0][1] + mem_layer.load_memory(self._model_dir_ + mem_files[j]) else: logging.info("No pretrained weights available at: " + self._weights_dir_ + param_file) def construct(self, context, data_mean=None, data_std=None): - self.networks[0] = Net_0(data_mean=data_mean, data_std=data_std) - self.networks[0].collect_params().initialize(self.weight_initializer, ctx=context) + self.networks[0] = Net_0(data_mean=data_mean, data_std=data_std, mx_context=context, prefix="") + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + self.networks[0].collect_params().initialize(self.weight_initializer, force_reinit=False, ctx=context) self.networks[0].hybridize() - self.networks[0](mx.nd.zeros((1, 100,), ctx=context)) + self.networks[0](mx.nd.zeros((1, 100,), ctx=context[0])) if not os.path.exists(self._model_dir_): os.makedirs(self._model_dir_) diff --git a/src/test/resources/target_code/gluon/ganModel/defaultGAN/CNNGanTrainer_defaultGAN_defaultGANConnector_predictor.py b/src/test/resources/target_code/gluon/ganModel/defaultGAN/CNNGanTrainer_defaultGAN_defaultGANConnector_predictor.py index 42fa27f0..9cfe8224 100644 --- a/src/test/resources/target_code/gluon/ganModel/defaultGAN/CNNGanTrainer_defaultGAN_defaultGANConnector_predictor.py +++ b/src/test/resources/target_code/gluon/ganModel/defaultGAN/CNNGanTrainer_defaultGAN_defaultGANConnector_predictor.py @@ -184,16 +184,16 @@ class CNNGanTrainer_defaultGAN_defaultGANConnector_predictor: del discriminator_optimizer_params['learning_rate_decay'] if normalize: - self._net_creator_dis.construct(mx_context, data_mean=data_mean, data_std=data_std) + self._net_creator_dis.construct([mx_context], data_mean=data_mean, data_std=data_std) else: - self._net_creator_dis.construct(mx_context) + self._net_creator_dis.construct([mx_context]) - self._net_creator_gen.construct(mx_context) + self._net_creator_gen.construct([mx_context]) if self.use_qnet: - self._net_creator_qnet.construct(mx_context) + self._net_creator_qnet.construct([mx_context]) if load_checkpoint: - self._net_creator_qnet.load(mx_context) + self._net_creator_qnet.load([mx_context]) else: if os.path.isdir(self._net_creator_qnet._model_dir_): shutil.rmtree(self._net_creator_qnet._model_dir_) @@ -206,8 +206,8 @@ class CNNGanTrainer_defaultGAN_defaultGANConnector_predictor: begin_epoch = 0 if load_checkpoint: - begin_epoch = self._net_creator_dis.load(mx_context) - self._net_creator_gen.load(mx_context) + begin_epoch = self._net_creator_dis.load([mx_context]) + self._net_creator_gen.load([mx_context]) else: if os.path.isdir(self._net_creator_dis._model_dir_): shutil.rmtree(self._net_creator_dis._model_dir_) @@ -351,9 +351,9 @@ class CNNGanTrainer_defaultGAN_defaultGANConnector_predictor: gen_input, exp_qnet_output = create_generator_input(batch) with autograd.record(): - fake_data = gen_net(*gen_input) + fake_data = gen_net(*gen_input)[0][0] fake_data.detach() - discriminated_fake_dis = dis_net(fake_data, *dis_conditional_input) + discriminated_fake_dis = dis_net(fake_data, *dis_conditional_input)[0][0] if self.use_qnet: discriminated_fake_dis, _ = discriminated_fake_dis @@ -361,7 +361,7 @@ class CNNGanTrainer_defaultGAN_defaultGANConnector_predictor: real_labels = mx.nd.ones(discriminated_fake_dis.shape, ctx=mx_context) loss_resultF = dis_loss(discriminated_fake_dis, fake_labels) - discriminated_real_dis = dis_net(real_data, *dis_conditional_input) + discriminated_real_dis = dis_net(real_data, *dis_conditional_input)[0][0] if self.use_qnet: discriminated_real_dis, _ = discriminated_real_dis loss_resultR = dis_loss(discriminated_real_dis, real_labels) @@ -372,8 +372,8 @@ class CNNGanTrainer_defaultGAN_defaultGANConnector_predictor: if batch_i % k_value == 0: with autograd.record(): - fake_data = gen_net(*gen_input) - discriminated_fake_gen = dis_net(fake_data, *dis_conditional_input) + fake_data = gen_net(*gen_input)[0][0] + discriminated_fake_gen = dis_net(fake_data, *dis_conditional_input)[0][0] if self.use_qnet: discriminated_fake_gen, features = discriminated_fake_gen loss_resultG = dis_loss(discriminated_fake_gen, real_labels) @@ -381,7 +381,7 @@ class CNNGanTrainer_defaultGAN_defaultGANConnector_predictor: condition = batch.data[traindata_to_index[generator_target_name + "_"]] loss_resultG = loss_resultG + gen_loss_weight * generator_loss_func(fake_data, condition) if self.use_qnet: - qnet_discriminated = [q_net(features)] + qnet_discriminated = [q_net(features)[0][0]] for i, qnet_out in enumerate(qnet_discriminated): loss_resultG = loss_resultG + qnet_losses[i](qnet_out, exp_qnet_output[i]) loss_resultG.backward() diff --git a/src/test/resources/target_code/gluon/ganModel/defaultGAN/CNNNet_defaultGAN_defaultGANConnector_predictor.py b/src/test/resources/target_code/gluon/ganModel/defaultGAN/CNNNet_defaultGAN_defaultGANConnector_predictor.py index 32d8a55f..a83e0589 100644 --- a/src/test/resources/target_code/gluon/ganModel/defaultGAN/CNNNet_defaultGAN_defaultGANConnector_predictor.py +++ b/src/test/resources/target_code/gluon/ganModel/defaultGAN/CNNNet_defaultGAN_defaultGANConnector_predictor.py @@ -1,7 +1,10 @@ import mxnet as mx import numpy as np import math -from mxnet import gluon +import os +import abc +import warnings +from mxnet import gluon, nd class ZScoreNormalization(gluon.HybridBlock): @@ -86,9 +89,419 @@ class CustomGRU(gluon.HybridBlock): output, [state0] = self.gru(data, [F.swapaxes(state0, 0, 1)]) return output, F.swapaxes(state0, 0, 1) + +class DotProductSelfAttention(gluon.HybridBlock): + def __init__(self, + scale_factor, + num_heads, + dim_model, + dim_keys, + dim_values, + use_proj_bias, + use_mask, + **kwargs): + super(DotProductSelfAttention, self).__init__(**kwargs) + with self.name_scope(): + self.num_heads = num_heads + self.dim_model = dim_model + self.use_proj_bias = use_proj_bias + self.use_mask = use_mask + + if dim_keys == -1: + self.dim_keys = int(dim_model / self.num_heads) + else: + self.dim_keys = dim_keys + if dim_values == -1: + self.dim_values = int(dim_model / self.num_heads) + else: + self.dim_values = dim_values + + if scale_factor == -1: + self.scale_factor = math.sqrt(self.dim_keys) + else: + self.scale_factor = scale_factor + + self.proj_q = gluon.nn.Dense(self.num_heads*self.dim_keys, use_bias=self.use_proj_bias, flatten=False) + self.proj_k = gluon.nn.Dense(self.num_heads*self.dim_keys, use_bias=self.use_proj_bias, flatten=False) + self.proj_v = gluon.nn.Dense(self.num_heads*self.dim_values, use_bias=self.use_proj_bias, flatten=False) + self.proj_o = gluon.nn.Dense(self.dim_model, use_bias=self.use_proj_bias, flatten=False) + + def hybrid_forward(self, F, queries, keys, values, *args, **kwargs): + + queries = F.Reshape(queries, shape=(0, 0,-1)) + keys = F.Reshape(queries, shape=(0, 0, -1)) + values = F.Reshape(queries, shape=(0, 0, -1)) + + head_queries = self.proj_q(queries) + head_keys = self.proj_k(keys) + head_values = self.proj_v(values) + + head_queries = F.reshape(head_queries, shape=(0, 0, self.num_heads, -1)) + head_queries = F.transpose(head_queries, axes=(0,2,1,3)) + head_queries = F.reshape(head_queries, shape=(-1, 0, 0), reverse=True) + + head_keys = F.reshape(head_keys, shape=(0, 0, self.num_heads, -1)) + head_keys = F.transpose(head_keys, axes=(0,2,1,3)) + head_keys = F.reshape(head_keys, shape=(-1, 0, 0), reverse=True) + + score = F.batch_dot(head_queries, head_keys, transpose_b=True) + score = score * self.scale_factor + if self.use_mask: + mask = F.tile(mask, self.num_heads) + mask = F.repeat(mask, self.dim_model) + mask = F.reshape(mask, shape=(-1, self.dim_model)) + weights = F.softmax(score, mask, use_length=self.use_mask) + + head_values = F.reshape(head_values, shape=(0, 0, self.num_heads, -1)) + head_values = F.transpose(head_values, axes=(0,2,1,3)) + head_values = F.reshape(head_values, shape=(-1, 0, 0), reverse=True) + + ret = F.batch_dot(weights, head_values) + ret = F.reshape(ret, shape=(-1, self.num_heads, 0, 0), reverse=True) + ret = F.transpose(ret, axes=(0, 2, 1, 3)) + ret = F.reshape(ret, shape=(0, 0, -1)) + + ret = self.proj_o(ret) + + return ret + + +class EpisodicReplayMemoryInterface(gluon.HybridBlock): + __metaclass__ = abc.ABCMeta + + def __init__(self, use_replay, replay_interval, replay_batch_size, replay_steps, replay_gradient_steps, num_heads, **kwargs): + super(EpisodicReplayMemoryInterface, self).__init__(**kwargs) + + self.use_replay = use_replay + self.replay_interval = replay_interval + self.replay_batch_size = replay_batch_size + self.replay_steps = replay_steps + self.replay_gradient_steps = replay_gradient_steps + self.num_heads = num_heads + + @abc.abstractmethod + def store_samples(self, data, y, query_network, store_prob, mx_context): + pass + + @abc.abstractmethod + def sample_memory(self, batch_size, mx_context): + pass + + @abc.abstractmethod + def get_query_network(self, mx_context): + pass + + @abc.abstractmethod + def save_memory(self, path): + pass + + @abc.abstractmethod + def load_memory(self, path): + pass + +#Memory layer +class LargeMemory(gluon.HybridBlock): + def __init__(self, + sub_key_size, + query_size, + query_act, + dist_measure, + k, + num_heads, + values_dim, + **kwargs): + super(LargeMemory, self).__init__(**kwargs) + with self.name_scope(): + #Memory parameters + self.dist_measure = dist_measure + self.k = k + self.num_heads = num_heads + self.query_act = query_act + self.query_size = query_size + self.num_heads = num_heads + + #Batch norm sub-layer + self.batch_norm = gluon.nn.BatchNorm() + + #Memory sub-layer + self.sub_key_size = sub_key_size + sub_key_shape = (self.num_heads, self.sub_key_size, int(query_size[-1] / 2)) + + if values_dim == -1: + values_shape = (self.sub_key_size * self.sub_key_size, self.query_size[-1]) + else: + values_shape = (self.sub_key_size*self.sub_key_size, values_dim) + + self.sub_keys1 = self.params.get("sub_keys1", shape=sub_key_shape, differentiable=True) + self.sub_keys2 = self.params.get("sub_keys2", shape=sub_key_shape, differentiable=True) + self.values = self.params.get("values", shape=values_shape, differentiable=True) + self.label_memory = nd.array([]) + + self.get_query_network() + + def hybrid_forward(self, F, x, sub_keys1, sub_keys2, values): + x = self.batch_norm(x) + + x = F.reshape(x, shape=(0, -1)) + + q = self.query_network(x) + + q = F.reshape(q, shape=(0, self.num_heads, -1)) + + q_split = F.split(q, num_outputs=2, axis=-1) + + if self.dist_measure == "l2": + q_split_resh = F.reshape(q_split[0], shape=(0,0,1,-1)) + sub_keys1_resh = F.reshape(sub_keys1, shape=(1,0,0,-1), reverse=True) + q1_diff = F.broadcast_sub(q_split_resh, sub_keys1_resh) + q1_dist = F.norm(q1_diff, axis=-1) + q_split_resh = F.reshape(q_split[1], shape=(0,0,1,-1)) + sub_keys2_resh = F.reshape(sub_keys2, shape=(1,0,0,-1), reverse=True) + q2_diff = F.broadcast_sub(q_split_resh, sub_keys2_resh) + q2_dist = F.norm(q2_diff, axis=-1) + else: + q1 = F.split(q_split[0], num_outputs=self.num_heads, axis=1) + q2 = F.split(q_split[1], num_outputs=self.num_heads, axis=1) + sub_keys1_resh = F.split(sub_keys1, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + sub_keys2_resh = F.split(sub_keys2, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + if self.num_heads == 1: + q1 = [q1] + q2 = [q2] + sub_keys1_resh = [sub_keys1_resh ] + sub_keys2_resh = [sub_keys2_resh ] + + q1_dist = F.dot(q1[0], sub_keys1_resh[0], transpose_b=True) + q2_dist = F.dot(q2[0], sub_keys2_resh[0], transpose_b=True) + for h in range(1, self.num_heads): + q1_dist = F.concat(q1_dist, F.dot(q1[0], sub_keys1_resh[h], transpose_b=True), dim=1) + q2_dist = F.concat(q2_dist, F.dot(q2[0], sub_keys1_resh[h], transpose_b=True), dim=1) + + i1 = F.topk(q1_dist, k=self.k, ret_typ="indices") + i2 = F.topk(q2_dist, k=self.k, ret_typ="indices") + + # Calculate cross product for keys at indices I1 and I2 + + # def head_take(data, state): + # return [F.take(data[0], data[2]), F.take(data[1], data[3])], state, + # + # i1 = F.transpose(i1, axes=(1,0,2)) + # i2 = F.transpose(i2, axes=(1, 0, 2)) + # st = F.zeros(1) + # (k1, k2), _ = F.contrib.foreach(head_take, [sub_keys1, sub_keys2,i1,i2], st) + # k1 = F.reshape(k1, shape=(-1, 0, 0), reverse=True) + # k2 = F.reshape(k2, shape=(-1, 0, 0), reverse=True) + i1 = F.split(i1, num_outputs=self.num_heads, axis=1) + i2 = F.split(i2, num_outputs=self.num_heads, axis=1) + sub_keys1 = F.split(sub_keys1, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + sub_keys2 = F.split(sub_keys2, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + if self.num_heads == 1: + i1 = [i1] + i2 = [i2] + sub_keys1 = [sub_keys1] + sub_keys2 = [sub_keys2] + + k1 = F.take(sub_keys1[0], i1[0]) + k2 = F.take(sub_keys2[0], i2[0]) + for h in range(1, self.num_heads): + k1 = F.concat(k1, F.take(sub_keys1[h], i1[h]), dim=1) + k2 = F.concat(k2, F.take(sub_keys2[h], i2[h]), dim=1) + + k1 = F.tile(k1, (1, 1, self.k, 1)) + k2 = F.repeat(k2, self.k, 2) + c_cart = F.concat(k1, k2, dim=3) + + q = F.reshape(q, shape=(-1,0), reverse=True) + q = F.reshape(q, shape=(0, 1, -1)) + c_cart = F.reshape(c_cart, shape=(-1, 0, 0), reverse=True) + if self.dist_measure == "l2": + k_diff = F.broadcast_sub(q, c_cart) + k_dist = F.norm(k_diff, axis=-1) + else: + k_dist = F.batch_dot(q, c_cart, transpose_b=True) #F.contrib.foreach(loop_batch_dot, [q, c_cart], init_states=state_batch_dist) + k_dist = F.reshape(k_dist, shape=(0, -1)) + + i = F.topk(k_dist, k=self.k, ret_typ="both") + + w = F.softmax(i[0]) + w = F.reshape(w, shape=(0,1,-1)) + vi = F.take(values, i[1]) + aggr_value = F.batch_dot(w, vi) #F.contrib.foreach(loop_batch_dot, [w, vi], init_states=state_batch_dist) + + ret = F.reshape(aggr_value, shape=(-1, self.num_heads, 0), reverse=True) + one_vec = F.ones((1, 1, self.num_heads)) + one_vec = F.broadcast_like(one_vec, ret, lhs_axes=0, rhs_axes=0) + ret = F.batch_dot(one_vec, ret) + ret = F.reshape(ret, shape=(-1, 0), reverse=True) + + return ret + + def get_query_network(self): + if hasattr(self, 'query_network'): + return self.query_network + else: + self.query_network = gluon.nn.HybridSequential() + for size in self.query_size: + if self.query_act == "linear": + self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, flatten=False)) + else: + self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, activation=self.query_act, flatten=False)) + return self.query_network + + +#EpisodicMemory layer +class EpisodicMemory(EpisodicReplayMemoryInterface): + def __init__(self, + replay_interval, + replay_batch_size, + replay_steps, + replay_gradient_steps, + store_prob, + max_stored_samples, + use_replay, + query_net_dir, + query_net_prefix, + query_net_num_inputs, + **kwargs): + super(EpisodicMemory, self).__init__(use_replay, replay_interval, replay_batch_size, replay_steps, replay_gradient_steps, 1, **kwargs) + with self.name_scope(): + #Replay parameters + self.store_prob = store_prob + self.max_stored_samples = max_stored_samples + + self.query_net_dir = query_net_dir + self.query_net_prefix = query_net_prefix + self.query_net_num_inputs = query_net_num_inputs + + #Memory + self.key_memory = nd.array([]) + self.value_memory = nd.array([]) + self.label_memory = nd.array([]) + + def hybrid_forward(self, F, *args): + #propagate the input as the rest is only used for replay + return [args, []] + + def store_samples(self, data, y, query_network, store_prob, context): + num_pus = len(data) + sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] + num_inputs = len(data[0][0]) + num_outputs = len(y) + mx_context = context[0] + + if len(self.key_memory) == 0: + self.key_memory = nd.empty(0, ctx=mx.cpu()) + self.value_memory = [] + self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) + + ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] + + max_inds = [nd.max(ind[i]) for i in range(num_pus)] + if any(max_inds): + to_store_values = [] + for i in range(num_inputs): + tmp_values = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_values, list): + tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) + else: + tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) + to_store_values.append(tmp_values) + + to_store_labels = [] + for i in range(num_outputs): + tmp_labels = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_labels, list): + tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) + else: + tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) + to_store_labels.append(tmp_labels) + + to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) + + if self.key_memory.shape[0] == 0: + self.key_memory = to_store_keys.as_in_context(mx.cpu()) + for i in range(num_inputs): + self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + for i in range(num_outputs): + self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) + elif self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: + num_to_store = to_store_keys.shape[0] + self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=1) + else: + self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + + def sample_memory(self, batch_size): + num_stored_samples = self.key_memory.shape[0] + if self.replay_batch_size == -1: + sample_ind = nd.random.randint(0, num_stored_samples, (self.replay_steps, batch_size), ctx=mx.cpu()) + else: + sample_ind = nd.random.randint(0, num_stored_samples, (self.replay_steps, self.replay_batch_size), ctx=mx.cpu()) + + num_outputs = len(self.label_memory) + + sample_labels = [[self.label_memory[i][ind] for i in range(num_outputs)] for ind in sample_ind] + sample_batches = [[[self.value_memory[j][ind] for j in range(len(self.value_memory))], sample_labels[i]] for i, ind in enumerate(sample_ind)] + + return sample_batches + + def get_query_network(self, context): + lastEpoch = 0 + for file in os.listdir(self.query_net_dir): + if self.query_net_prefix in file and ".json" in file: + symbolFile = file + + if self.query_net_prefix in file and ".param" in file: + epochStr = file.replace(".params", "").replace(self.query_net_prefix, "") + epoch = int(epochStr) + if epoch >= lastEpoch: + lastEpoch = epoch + weightFile = file + + inputNames = [] + if self.query_net_num_inputs == 1: + inputNames.append("data") + else: + for i in range(self.query_net_num_inputs): + inputNames.append("data" + str(i)) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + net = mx.gluon.nn.SymbolBlock.imports(self.query_net_dir + symbolFile, inputNames, self.query_net_dir + weightFile, ctx=context[0]) + net.hybridize() + return net + + def save_memory(self, path): + mem_arr = [("keys", self.key_memory)] + [("values_"+str(k),v) for (k,v) in enumerate(self.value_memory)] + [("labels_"+str(k),v) for (k,v) in enumerate(self.label_memory)] + mem_dict = {entry[0]:entry[1] for entry in mem_arr} + nd.save(path, mem_dict) + + def load_memory(self, path): + mem_dict = nd.load(path) + self.value_memory = [] + self.label_memory = [] + for key in sorted(mem_dict.keys()): + if key == "keys": + self.key_memory = mem_dict[key] + elif key.startswith("values_"): + self.value_memory.append(mem_dict[key]) + elif key.startswith("labels_"): + self.label_memory.append(mem_dict[key]) + + +#Stream 0 class Net_0(gluon.HybridBlock): - def __init__(self, data_mean=None, data_std=None, **kwargs): + def __init__(self, data_mean=None, data_std=None, mx_context=None, **kwargs): super(Net_0, self).__init__(**kwargs) with self.name_scope(): if data_mean: @@ -177,5 +590,5 @@ class Net_0(gluon.HybridBlock): tanh5_ = self.tanh5_(upconvolution5_) data_ = F.identity(tanh5_) - return data_ + return [[data_]] diff --git a/src/test/resources/target_code/gluon/ganModel/defaultGAN/CNNPredictor_defaultGAN_defaultGANConnector_predictor.h b/src/test/resources/target_code/gluon/ganModel/defaultGAN/CNNPredictor_defaultGAN_defaultGANConnector_predictor.h index a99f9c1b..d7ad1aab 100644 --- a/src/test/resources/target_code/gluon/ganModel/defaultGAN/CNNPredictor_defaultGAN_defaultGANConnector_predictor.h +++ b/src/test/resources/target_code/gluon/ganModel/defaultGAN/CNNPredictor_defaultGAN_defaultGANConnector_predictor.h @@ -1,108 +1,149 @@ #ifndef CNNPREDICTOR_DEFAULTGAN_DEFAULTGANCONNECTOR_PREDICTOR #define CNNPREDICTOR_DEFAULTGAN_DEFAULTGANCONNECTOR_PREDICTOR -#include +#include #include #include #include + +#include +#include -#include - +using namespace mxnet::cpp; + class CNNPredictor_defaultGAN_defaultGANConnector_predictor_0{ public: - const std::string json_file = "model/defaultGAN.DefaultGANGenerator/model_0_newest-symbol.json"; - const std::string param_file = "model/defaultGAN.DefaultGANGenerator/model_0_newest-0000.params"; - const std::vector input_keys = { + const std::string file_prefix = "model/defaultGAN.DefaultGANGenerator/model_0_newest"; + + //network + const std::vector network_input_keys = { "data" }; - const std::vector> input_shapes = {{1, 100}}; - const bool use_gpu = false; - - PredictorHandle handle; - + const std::vector> network_input_shapes = {{1, 100}}; + std::vector network_input_sizes; + std::vector> network_arg_names; + std::vector network_handles; + + + //misc + Context ctx = Context::cpu(); //Will be updated later in init according to use_gpu + int dtype = 0; //use data type (float32=0 float64=1 ...) + + explicit CNNPredictor_defaultGAN_defaultGANConnector_predictor_0(){ - init(json_file, param_file, input_keys, input_shapes, use_gpu); + init(file_prefix, network_input_keys, network_input_shapes); } ~CNNPredictor_defaultGAN_defaultGANConnector_predictor_0(){ - if(handle) MXPredFree(handle); + for(Executor * handle : network_handles){ + delete handle; + } + MXNotifyShutdown(); } void predict(const std::vector &in_noise_, std::vector &out_data_){ - MXPredSetInput(handle, input_keys[0].c_str(), in_noise_.data(), static_cast(in_noise_.size())); - - MXPredForward(handle); - mx_uint output_index; - mx_uint *shape = 0; - mx_uint shape_len; - size_t size; - - output_index = 0; - MXPredGetOutputShape(handle, output_index, &shape, &shape_len); - size = 1; - for (mx_uint i = 0; i < shape_len; ++i) size *= shape[i]; - - assert(size == out_data_.size()); - MXPredGetOutput(handle, output_index, &(out_data_[0]), out_data_.size()); + NDArray input_temp; + input_temp = NDArray(network_input_shapes[0], ctx, false, dtype); + input_temp.SyncCopyFromCPU(in_noise_.data(), network_input_sizes[0]); + input_temp.CopyTo(&(network_handles[0]->arg_dict()[network_input_keys[0]])); + NDArray::WaitAll(); + + network_handles[0]->Forward(false); + CheckMXNetError("Forward, predict, handle ind. 0"); + + + std::vector output = network_handles.back()->outputs; + std::vector curr_output_shape; + size_t curr_output_size; + curr_output_shape = output[0].GetShape(); + curr_output_size = 1; + for (mx_uint i : curr_output_shape) curr_output_size *= i; + //Fix due to a bug in the in how the output arrays are initialized when there are multiple outputs + assert((curr_output_size == out_data_.size()) || (curr_output_size == out_data_[0])); + output[0].SyncCopyToCPU(&out_data_); + } + + + + Executor* initExecutor(Symbol &sym, + std::map ¶m_map, + const std::vector &exec_input_keys, + const std::vector> &exec_input_shapes){ + + const mx_uint num_exec_input_nodes = exec_input_keys.size(); + for(mx_uint i = 0; i < num_exec_input_nodes; i++){ + param_map[exec_input_keys[i]] = NDArray(exec_input_shapes[i], ctx, false, dtype); + } - void init(const std::string &json_file, - const std::string ¶m_file, - const std::vector &input_keys, - const std::vector> &input_shapes, - const bool &use_gpu){ + std::vector param_arrays; + std::vector grad_array; + std::vector grad_reqs; + std::vector aux_arrays; + std::map< std::string, NDArray> aux_map; - BufferFile json_data(json_file); - BufferFile param_data(param_file); + sym.InferExecutorArrays(ctx, ¶m_arrays, &grad_array, &grad_reqs, + &aux_arrays, param_map, std::map(), + std::map(), aux_map); - int dev_type = use_gpu ? 2 : 1; - int dev_id = 0; + Executor *handle = new Executor(sym, ctx, param_arrays, grad_array, grad_reqs, aux_arrays); + assert(handle); + return handle; + } - if (json_data.GetLength() == 0 || - param_data.GetLength() == 0) { - std::exit(-1); + std::vector getSizesOfShapes(const std::vector> shapes){ + std::vector sizes; + for(std::vector shape : shapes){ + mx_uint val = 1; + for(mx_uint i: shape){ + val *= i; + } + sizes.push_back(val); } + return sizes; + } - const mx_uint num_input_nodes = input_keys.size(); - - const char* input_keys_ptr[num_input_nodes]; - for(mx_uint i = 0; i < num_input_nodes; i++){ - input_keys_ptr[i] = input_keys[i].c_str(); + void CheckMXNetError(std::string loc){ + const char* err = MXGetLastError(); + if (err && err[0] != 0) { + std::cout << "MXNet error at " << loc << err << std::endl; + exit(-1); } - - mx_uint shape_data_size = 0; - mx_uint input_shape_indptr[input_shapes.size() + 1]; - input_shape_indptr[0] = 0; - for(mx_uint i = 0; i < input_shapes.size(); i++){ - shape_data_size += input_shapes[i].size(); - input_shape_indptr[i+1] = shape_data_size; + } + + void init(const std::string &file_prefix, + const std::vector &network_input_keys, + const std::vector> &network_input_shapes){ + + CNNLAOptimizer_defaultGAN_defaultGANConnector_predictor optimizer_creator = CNNLAOptimizer_defaultGAN_defaultGANConnector_predictor(); + + if(optimizer_creator.getContextName() == "gpu"){ + ctx = Context::gpu(); } - - mx_uint input_shape_data[shape_data_size]; - mx_uint index = 0; - for(mx_uint i = 0; i < input_shapes.size(); i++){ - for(mx_uint j = 0; j < input_shapes[i].size(); j++){ - input_shape_data[index] = input_shapes[i][j]; - index++; - } + + network_input_sizes = getSizesOfShapes(network_input_shapes); + + ModelLoader model_loader(file_prefix, 0, ctx); + + std::vector network_symbols = model_loader.GetNetworkSymbols(); + std::vector> network_param_maps; + network_param_maps = model_loader.GetNetworkParamMaps(); + + //Init handles + std::map> in_shape_map; + for(mx_uint i=0; i < network_input_keys.size(); i++){ + in_shape_map[network_input_keys[i]] = network_input_shapes[i]; } - - MXPredCreate(static_cast(json_data.GetBuffer()), - static_cast(param_data.GetBuffer()), - static_cast(param_data.GetLength()), - dev_type, - dev_id, - num_input_nodes, - input_keys_ptr, - input_shape_indptr, - input_shape_data, - &handle); - assert(handle); + std::vector> in_shapes; + std::vector> aux_shapes; + std::vector> out_shapes; + network_symbols[0].InferShape(in_shape_map, &in_shapes, &aux_shapes, &out_shapes); + network_handles.push_back(initExecutor(network_symbols[0], network_param_maps[0], network_input_keys, network_input_shapes)); + } }; - #endif // CNNPREDICTOR_DEFAULTGAN_DEFAULTGANCONNECTOR_PREDICTOR diff --git a/src/test/resources/target_code/gluon/ganModel/defaultGAN/CNNTrainer_defaultGAN_defaultGANConnector_predictor.py b/src/test/resources/target_code/gluon/ganModel/defaultGAN/CNNTrainer_defaultGAN_defaultGANConnector_predictor.py index 82954430..7119c688 100644 --- a/src/test/resources/target_code/gluon/ganModel/defaultGAN/CNNTrainer_defaultGAN_defaultGANConnector_predictor.py +++ b/src/test/resources/target_code/gluon/ganModel/defaultGAN/CNNTrainer_defaultGAN_defaultGANConnector_predictor.py @@ -55,5 +55,3 @@ if __name__ == "__main__": log_period=10, print_images=True, ) - - diff --git a/src/test/resources/target_code/gluon/ganModel/defaultGAN/gan/CNNCreator_defaultGAN_defaultGANDiscriminator.py b/src/test/resources/target_code/gluon/ganModel/defaultGAN/gan/CNNCreator_defaultGAN_defaultGANDiscriminator.py index 0e750160..1920ecee 100644 --- a/src/test/resources/target_code/gluon/ganModel/defaultGAN/gan/CNNCreator_defaultGAN_defaultGANDiscriminator.py +++ b/src/test/resources/target_code/gluon/ganModel/defaultGAN/gan/CNNCreator_defaultGAN_defaultGANDiscriminator.py @@ -2,6 +2,8 @@ import mxnet as mx import logging import os import shutil +import warnings +import inspect from CNNNet_defaultGAN_defaultGANDiscriminator import Net_0 @@ -20,6 +22,10 @@ class CNNCreator_defaultGAN_defaultGANDiscriminator: for i, network in self.networks.items(): lastEpoch = 0 param_file = None + if hasattr(network, 'episodic_sub_nets'): + num_episodic_sub_nets = len(network.episodic_sub_nets) + lastMemEpoch = [0]*num_episodic_sub_nets + mem_files = [None]*num_episodic_sub_nets try: os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest-0000.params") @@ -30,22 +36,77 @@ class CNNCreator_defaultGAN_defaultGANDiscriminator: except OSError: pass + if hasattr(network, 'episodic_sub_nets'): + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(0) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(0) + "-symbol.json") + except OSError: + pass + + for j in range(len(network.episodic_sub_nets)): + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(j+1) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(j+1) + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_query_net_' + str(j+1) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_query_net_' + str(j+1) + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_loss' + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_loss' + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest_episodic_memory_sub_net_" + str(j + 1) + "-0000") + except OSError: + pass + if os.path.isdir(self._model_dir_): for file in os.listdir(self._model_dir_): - if ".params" in file and self._model_prefix_ + "_" + str(i) in file: - epochStr = file.replace(".params","").replace(self._model_prefix_ + "_" + str(i) + "-","") + if ".params" in file and self._model_prefix_ + "_" + str(i) in file and not "loss" in file: + epochStr = file.replace(".params", "").replace(self._model_prefix_ + "_" + str(i) + "-", "") epoch = int(epochStr) - if epoch > lastEpoch: + if epoch >= lastEpoch: lastEpoch = epoch param_file = file + elif hasattr(network, 'episodic_sub_nets') and self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_" in file: + relMemPathInfo = file.replace(self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_", "").split("-") + memSubNet = int(relMemPathInfo[0]) + memEpochStr = relMemPathInfo[1] + memEpoch = int(memEpochStr) + if memEpoch >= lastMemEpoch[memSubNet-1]: + lastMemEpoch[memSubNet-1] = memEpoch + mem_files[memSubNet-1] = file + if param_file is None: earliestLastEpoch = 0 else: logging.info("Loading checkpoint: " + param_file) network.load_parameters(self._model_dir_ + param_file) + if hasattr(network, 'episodic_sub_nets'): + for j, sub_net in enumerate(network.episodic_sub_nets): + if mem_files[j] != None: + logging.info("Loading Replay Memory: " + mem_files[j]) + mem_layer = [param for param in inspect.getmembers(sub_net, lambda x: not(inspect.isroutine(x))) if param[0].startswith("memory")][0][1] + mem_layer.load_memory(self._model_dir_ + mem_files[j]) - if earliestLastEpoch == None or lastEpoch < earliestLastEpoch: - earliestLastEpoch = lastEpoch + if earliestLastEpoch == None or lastEpoch + 1 < earliestLastEpoch: + earliestLastEpoch = lastEpoch + 1 return earliestLastEpoch @@ -56,27 +117,52 @@ class CNNCreator_defaultGAN_defaultGANDiscriminator: for i, network in self.networks.items(): # param_file = self._model_prefix_ + "_" + str(i) + "_newest-0000.params" param_file = None + if hasattr(network, 'episodic_sub_nets'): + num_episodic_sub_nets = len(network.episodic_sub_nets) + lastMemEpoch = [0] * num_episodic_sub_nets + mem_files = [None] * num_episodic_sub_nets + if os.path.isdir(self._weights_dir_): lastEpoch = 0 for file in os.listdir(self._weights_dir_): - if ".params" in file and self._model_prefix_ + "_" + str(i) in file: + if ".params" in file and self._model_prefix_ + "_" + str(i) in file and not "loss" in file: epochStr = file.replace(".params","").replace(self._model_prefix_ + "_" + str(i) + "-","") epoch = int(epochStr) - if epoch > lastEpoch: + if epoch >= lastEpoch: lastEpoch = epoch param_file = file + elif hasattr(network, 'episodic_sub_nets') and self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_" in file: + relMemPathInfo = file.replace(self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_").split("-") + memSubNet = int(relMemPathInfo[0]) + memEpochStr = relMemPathInfo[1] + memEpoch = int(memEpochStr) + if memEpoch >= lastMemEpoch[memSubNet-1]: + lastMemEpoch[memSubNet-1] = memEpoch + mem_files[memSubNet-1] = file + logging.info("Loading pretrained weights: " + self._weights_dir_ + param_file) network.load_parameters(self._weights_dir_ + param_file, allow_missing=True, ignore_extra=True) + if hasattr(network, 'episodic_sub_nets'): + assert lastEpoch == lastMemEpoch + for j, sub_net in enumerate(network.episodic_sub_nets): + if mem_files[j] != None: + logging.info("Loading pretrained Replay Memory: " + mem_files[j]) + mem_layer = \ + [param for param in inspect.getmembers(sub_net, lambda x: not (inspect.isroutine(x))) if + param[0].startswith("memory")][0][1] + mem_layer.load_memory(self._model_dir_ + mem_files[j]) else: logging.info("No pretrained weights available at: " + self._weights_dir_ + param_file) def construct(self, context, data_mean=None, data_std=None): - self.networks[0] = Net_0(data_mean=data_mean, data_std=data_std) - self.networks[0].collect_params().initialize(self.weight_initializer, ctx=context) + self.networks[0] = Net_0(data_mean=data_mean, data_std=data_std, mx_context=context, prefix="") + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + self.networks[0].collect_params().initialize(self.weight_initializer, force_reinit=False, ctx=context) self.networks[0].hybridize() - self.networks[0](mx.nd.zeros((1, 1,64,64,), ctx=context)) + self.networks[0](mx.nd.zeros((1, 1,64,64,), ctx=context[0])) if not os.path.exists(self._model_dir_): os.makedirs(self._model_dir_) diff --git a/src/test/resources/target_code/gluon/ganModel/defaultGAN/gan/CNNNet_defaultGAN_defaultGANDiscriminator.py b/src/test/resources/target_code/gluon/ganModel/defaultGAN/gan/CNNNet_defaultGAN_defaultGANDiscriminator.py index 027ed194..8e862bb3 100644 --- a/src/test/resources/target_code/gluon/ganModel/defaultGAN/gan/CNNNet_defaultGAN_defaultGANDiscriminator.py +++ b/src/test/resources/target_code/gluon/ganModel/defaultGAN/gan/CNNNet_defaultGAN_defaultGANDiscriminator.py @@ -1,7 +1,10 @@ import mxnet as mx import numpy as np import math -from mxnet import gluon +import os +import abc +import warnings +from mxnet import gluon, nd class ZScoreNormalization(gluon.HybridBlock): @@ -86,9 +89,419 @@ class CustomGRU(gluon.HybridBlock): output, [state0] = self.gru(data, [F.swapaxes(state0, 0, 1)]) return output, F.swapaxes(state0, 0, 1) + +class DotProductSelfAttention(gluon.HybridBlock): + def __init__(self, + scale_factor, + num_heads, + dim_model, + dim_keys, + dim_values, + use_proj_bias, + use_mask, + **kwargs): + super(DotProductSelfAttention, self).__init__(**kwargs) + with self.name_scope(): + self.num_heads = num_heads + self.dim_model = dim_model + self.use_proj_bias = use_proj_bias + self.use_mask = use_mask + + if dim_keys == -1: + self.dim_keys = int(dim_model / self.num_heads) + else: + self.dim_keys = dim_keys + if dim_values == -1: + self.dim_values = int(dim_model / self.num_heads) + else: + self.dim_values = dim_values + + if scale_factor == -1: + self.scale_factor = math.sqrt(self.dim_keys) + else: + self.scale_factor = scale_factor + + self.proj_q = gluon.nn.Dense(self.num_heads*self.dim_keys, use_bias=self.use_proj_bias, flatten=False) + self.proj_k = gluon.nn.Dense(self.num_heads*self.dim_keys, use_bias=self.use_proj_bias, flatten=False) + self.proj_v = gluon.nn.Dense(self.num_heads*self.dim_values, use_bias=self.use_proj_bias, flatten=False) + self.proj_o = gluon.nn.Dense(self.dim_model, use_bias=self.use_proj_bias, flatten=False) + + def hybrid_forward(self, F, queries, keys, values, *args, **kwargs): + + queries = F.Reshape(queries, shape=(0, 0,-1)) + keys = F.Reshape(queries, shape=(0, 0, -1)) + values = F.Reshape(queries, shape=(0, 0, -1)) + + head_queries = self.proj_q(queries) + head_keys = self.proj_k(keys) + head_values = self.proj_v(values) + + head_queries = F.reshape(head_queries, shape=(0, 0, self.num_heads, -1)) + head_queries = F.transpose(head_queries, axes=(0,2,1,3)) + head_queries = F.reshape(head_queries, shape=(-1, 0, 0), reverse=True) + + head_keys = F.reshape(head_keys, shape=(0, 0, self.num_heads, -1)) + head_keys = F.transpose(head_keys, axes=(0,2,1,3)) + head_keys = F.reshape(head_keys, shape=(-1, 0, 0), reverse=True) + + score = F.batch_dot(head_queries, head_keys, transpose_b=True) + score = score * self.scale_factor + if self.use_mask: + mask = F.tile(mask, self.num_heads) + mask = F.repeat(mask, self.dim_model) + mask = F.reshape(mask, shape=(-1, self.dim_model)) + weights = F.softmax(score, mask, use_length=self.use_mask) + + head_values = F.reshape(head_values, shape=(0, 0, self.num_heads, -1)) + head_values = F.transpose(head_values, axes=(0,2,1,3)) + head_values = F.reshape(head_values, shape=(-1, 0, 0), reverse=True) + + ret = F.batch_dot(weights, head_values) + ret = F.reshape(ret, shape=(-1, self.num_heads, 0, 0), reverse=True) + ret = F.transpose(ret, axes=(0, 2, 1, 3)) + ret = F.reshape(ret, shape=(0, 0, -1)) + + ret = self.proj_o(ret) + + return ret + + +class EpisodicReplayMemoryInterface(gluon.HybridBlock): + __metaclass__ = abc.ABCMeta + + def __init__(self, use_replay, replay_interval, replay_batch_size, replay_steps, replay_gradient_steps, num_heads, **kwargs): + super(EpisodicReplayMemoryInterface, self).__init__(**kwargs) + + self.use_replay = use_replay + self.replay_interval = replay_interval + self.replay_batch_size = replay_batch_size + self.replay_steps = replay_steps + self.replay_gradient_steps = replay_gradient_steps + self.num_heads = num_heads + + @abc.abstractmethod + def store_samples(self, data, y, query_network, store_prob, mx_context): + pass + + @abc.abstractmethod + def sample_memory(self, batch_size, mx_context): + pass + + @abc.abstractmethod + def get_query_network(self, mx_context): + pass + + @abc.abstractmethod + def save_memory(self, path): + pass + + @abc.abstractmethod + def load_memory(self, path): + pass + +#Memory layer +class LargeMemory(gluon.HybridBlock): + def __init__(self, + sub_key_size, + query_size, + query_act, + dist_measure, + k, + num_heads, + values_dim, + **kwargs): + super(LargeMemory, self).__init__(**kwargs) + with self.name_scope(): + #Memory parameters + self.dist_measure = dist_measure + self.k = k + self.num_heads = num_heads + self.query_act = query_act + self.query_size = query_size + self.num_heads = num_heads + + #Batch norm sub-layer + self.batch_norm = gluon.nn.BatchNorm() + + #Memory sub-layer + self.sub_key_size = sub_key_size + sub_key_shape = (self.num_heads, self.sub_key_size, int(query_size[-1] / 2)) + + if values_dim == -1: + values_shape = (self.sub_key_size * self.sub_key_size, self.query_size[-1]) + else: + values_shape = (self.sub_key_size*self.sub_key_size, values_dim) + + self.sub_keys1 = self.params.get("sub_keys1", shape=sub_key_shape, differentiable=True) + self.sub_keys2 = self.params.get("sub_keys2", shape=sub_key_shape, differentiable=True) + self.values = self.params.get("values", shape=values_shape, differentiable=True) + self.label_memory = nd.array([]) + + self.get_query_network() + + def hybrid_forward(self, F, x, sub_keys1, sub_keys2, values): + x = self.batch_norm(x) + + x = F.reshape(x, shape=(0, -1)) + + q = self.query_network(x) + + q = F.reshape(q, shape=(0, self.num_heads, -1)) + + q_split = F.split(q, num_outputs=2, axis=-1) + + if self.dist_measure == "l2": + q_split_resh = F.reshape(q_split[0], shape=(0,0,1,-1)) + sub_keys1_resh = F.reshape(sub_keys1, shape=(1,0,0,-1), reverse=True) + q1_diff = F.broadcast_sub(q_split_resh, sub_keys1_resh) + q1_dist = F.norm(q1_diff, axis=-1) + q_split_resh = F.reshape(q_split[1], shape=(0,0,1,-1)) + sub_keys2_resh = F.reshape(sub_keys2, shape=(1,0,0,-1), reverse=True) + q2_diff = F.broadcast_sub(q_split_resh, sub_keys2_resh) + q2_dist = F.norm(q2_diff, axis=-1) + else: + q1 = F.split(q_split[0], num_outputs=self.num_heads, axis=1) + q2 = F.split(q_split[1], num_outputs=self.num_heads, axis=1) + sub_keys1_resh = F.split(sub_keys1, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + sub_keys2_resh = F.split(sub_keys2, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + if self.num_heads == 1: + q1 = [q1] + q2 = [q2] + sub_keys1_resh = [sub_keys1_resh ] + sub_keys2_resh = [sub_keys2_resh ] + + q1_dist = F.dot(q1[0], sub_keys1_resh[0], transpose_b=True) + q2_dist = F.dot(q2[0], sub_keys2_resh[0], transpose_b=True) + for h in range(1, self.num_heads): + q1_dist = F.concat(q1_dist, F.dot(q1[0], sub_keys1_resh[h], transpose_b=True), dim=1) + q2_dist = F.concat(q2_dist, F.dot(q2[0], sub_keys1_resh[h], transpose_b=True), dim=1) + + i1 = F.topk(q1_dist, k=self.k, ret_typ="indices") + i2 = F.topk(q2_dist, k=self.k, ret_typ="indices") + + # Calculate cross product for keys at indices I1 and I2 + + # def head_take(data, state): + # return [F.take(data[0], data[2]), F.take(data[1], data[3])], state, + # + # i1 = F.transpose(i1, axes=(1,0,2)) + # i2 = F.transpose(i2, axes=(1, 0, 2)) + # st = F.zeros(1) + # (k1, k2), _ = F.contrib.foreach(head_take, [sub_keys1, sub_keys2,i1,i2], st) + # k1 = F.reshape(k1, shape=(-1, 0, 0), reverse=True) + # k2 = F.reshape(k2, shape=(-1, 0, 0), reverse=True) + i1 = F.split(i1, num_outputs=self.num_heads, axis=1) + i2 = F.split(i2, num_outputs=self.num_heads, axis=1) + sub_keys1 = F.split(sub_keys1, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + sub_keys2 = F.split(sub_keys2, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + if self.num_heads == 1: + i1 = [i1] + i2 = [i2] + sub_keys1 = [sub_keys1] + sub_keys2 = [sub_keys2] + + k1 = F.take(sub_keys1[0], i1[0]) + k2 = F.take(sub_keys2[0], i2[0]) + for h in range(1, self.num_heads): + k1 = F.concat(k1, F.take(sub_keys1[h], i1[h]), dim=1) + k2 = F.concat(k2, F.take(sub_keys2[h], i2[h]), dim=1) + + k1 = F.tile(k1, (1, 1, self.k, 1)) + k2 = F.repeat(k2, self.k, 2) + c_cart = F.concat(k1, k2, dim=3) + + q = F.reshape(q, shape=(-1,0), reverse=True) + q = F.reshape(q, shape=(0, 1, -1)) + c_cart = F.reshape(c_cart, shape=(-1, 0, 0), reverse=True) + if self.dist_measure == "l2": + k_diff = F.broadcast_sub(q, c_cart) + k_dist = F.norm(k_diff, axis=-1) + else: + k_dist = F.batch_dot(q, c_cart, transpose_b=True) #F.contrib.foreach(loop_batch_dot, [q, c_cart], init_states=state_batch_dist) + k_dist = F.reshape(k_dist, shape=(0, -1)) + + i = F.topk(k_dist, k=self.k, ret_typ="both") + + w = F.softmax(i[0]) + w = F.reshape(w, shape=(0,1,-1)) + vi = F.take(values, i[1]) + aggr_value = F.batch_dot(w, vi) #F.contrib.foreach(loop_batch_dot, [w, vi], init_states=state_batch_dist) + + ret = F.reshape(aggr_value, shape=(-1, self.num_heads, 0), reverse=True) + one_vec = F.ones((1, 1, self.num_heads)) + one_vec = F.broadcast_like(one_vec, ret, lhs_axes=0, rhs_axes=0) + ret = F.batch_dot(one_vec, ret) + ret = F.reshape(ret, shape=(-1, 0), reverse=True) + + return ret + + def get_query_network(self): + if hasattr(self, 'query_network'): + return self.query_network + else: + self.query_network = gluon.nn.HybridSequential() + for size in self.query_size: + if self.query_act == "linear": + self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, flatten=False)) + else: + self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, activation=self.query_act, flatten=False)) + return self.query_network + + +#EpisodicMemory layer +class EpisodicMemory(EpisodicReplayMemoryInterface): + def __init__(self, + replay_interval, + replay_batch_size, + replay_steps, + replay_gradient_steps, + store_prob, + max_stored_samples, + use_replay, + query_net_dir, + query_net_prefix, + query_net_num_inputs, + **kwargs): + super(EpisodicMemory, self).__init__(use_replay, replay_interval, replay_batch_size, replay_steps, replay_gradient_steps, 1, **kwargs) + with self.name_scope(): + #Replay parameters + self.store_prob = store_prob + self.max_stored_samples = max_stored_samples + + self.query_net_dir = query_net_dir + self.query_net_prefix = query_net_prefix + self.query_net_num_inputs = query_net_num_inputs + + #Memory + self.key_memory = nd.array([]) + self.value_memory = nd.array([]) + self.label_memory = nd.array([]) + + def hybrid_forward(self, F, *args): + #propagate the input as the rest is only used for replay + return [args, []] + + def store_samples(self, data, y, query_network, store_prob, context): + num_pus = len(data) + sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] + num_inputs = len(data[0][0]) + num_outputs = len(y) + mx_context = context[0] + + if len(self.key_memory) == 0: + self.key_memory = nd.empty(0, ctx=mx.cpu()) + self.value_memory = [] + self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) + + ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] + + max_inds = [nd.max(ind[i]) for i in range(num_pus)] + if any(max_inds): + to_store_values = [] + for i in range(num_inputs): + tmp_values = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_values, list): + tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) + else: + tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) + to_store_values.append(tmp_values) + + to_store_labels = [] + for i in range(num_outputs): + tmp_labels = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_labels, list): + tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) + else: + tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) + to_store_labels.append(tmp_labels) + + to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) + + if self.key_memory.shape[0] == 0: + self.key_memory = to_store_keys.as_in_context(mx.cpu()) + for i in range(num_inputs): + self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + for i in range(num_outputs): + self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) + elif self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: + num_to_store = to_store_keys.shape[0] + self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=1) + else: + self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + + def sample_memory(self, batch_size): + num_stored_samples = self.key_memory.shape[0] + if self.replay_batch_size == -1: + sample_ind = nd.random.randint(0, num_stored_samples, (self.replay_steps, batch_size), ctx=mx.cpu()) + else: + sample_ind = nd.random.randint(0, num_stored_samples, (self.replay_steps, self.replay_batch_size), ctx=mx.cpu()) + + num_outputs = len(self.label_memory) + + sample_labels = [[self.label_memory[i][ind] for i in range(num_outputs)] for ind in sample_ind] + sample_batches = [[[self.value_memory[j][ind] for j in range(len(self.value_memory))], sample_labels[i]] for i, ind in enumerate(sample_ind)] + + return sample_batches + + def get_query_network(self, context): + lastEpoch = 0 + for file in os.listdir(self.query_net_dir): + if self.query_net_prefix in file and ".json" in file: + symbolFile = file + + if self.query_net_prefix in file and ".param" in file: + epochStr = file.replace(".params", "").replace(self.query_net_prefix, "") + epoch = int(epochStr) + if epoch >= lastEpoch: + lastEpoch = epoch + weightFile = file + + inputNames = [] + if self.query_net_num_inputs == 1: + inputNames.append("data") + else: + for i in range(self.query_net_num_inputs): + inputNames.append("data" + str(i)) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + net = mx.gluon.nn.SymbolBlock.imports(self.query_net_dir + symbolFile, inputNames, self.query_net_dir + weightFile, ctx=context[0]) + net.hybridize() + return net + + def save_memory(self, path): + mem_arr = [("keys", self.key_memory)] + [("values_"+str(k),v) for (k,v) in enumerate(self.value_memory)] + [("labels_"+str(k),v) for (k,v) in enumerate(self.label_memory)] + mem_dict = {entry[0]:entry[1] for entry in mem_arr} + nd.save(path, mem_dict) + + def load_memory(self, path): + mem_dict = nd.load(path) + self.value_memory = [] + self.label_memory = [] + for key in sorted(mem_dict.keys()): + if key == "keys": + self.key_memory = mem_dict[key] + elif key.startswith("values_"): + self.value_memory.append(mem_dict[key]) + elif key.startswith("labels_"): + self.label_memory.append(mem_dict[key]) + + +#Stream 0 class Net_0(gluon.HybridBlock): - def __init__(self, data_mean=None, data_std=None, **kwargs): + def __init__(self, data_mean=None, data_std=None, mx_context=None, **kwargs): super(Net_0, self).__init__(**kwargs) with self.name_scope(): if data_mean: @@ -172,5 +585,5 @@ class Net_0(gluon.HybridBlock): sigmoid5_ = self.sigmoid5_(conv5_) dis_ = F.identity(sigmoid5_) - return dis_ + return [[dis_]] diff --git a/src/test/resources/target_code/gluon/ganModel/infoGAN/CNNCreator_infoGAN_infoGANConnector_predictor.py b/src/test/resources/target_code/gluon/ganModel/infoGAN/CNNCreator_infoGAN_infoGANConnector_predictor.py index ba115b47..c84c36bf 100644 --- a/src/test/resources/target_code/gluon/ganModel/infoGAN/CNNCreator_infoGAN_infoGANConnector_predictor.py +++ b/src/test/resources/target_code/gluon/ganModel/infoGAN/CNNCreator_infoGAN_infoGANConnector_predictor.py @@ -2,6 +2,8 @@ import mxnet as mx import logging import os import shutil +import warnings +import inspect from CNNNet_infoGAN_infoGANConnector_predictor import Net_0 @@ -20,6 +22,10 @@ class CNNCreator_infoGAN_infoGANConnector_predictor: for i, network in self.networks.items(): lastEpoch = 0 param_file = None + if hasattr(network, 'episodic_sub_nets'): + num_episodic_sub_nets = len(network.episodic_sub_nets) + lastMemEpoch = [0]*num_episodic_sub_nets + mem_files = [None]*num_episodic_sub_nets try: os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest-0000.params") @@ -30,22 +36,77 @@ class CNNCreator_infoGAN_infoGANConnector_predictor: except OSError: pass + if hasattr(network, 'episodic_sub_nets'): + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(0) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(0) + "-symbol.json") + except OSError: + pass + + for j in range(len(network.episodic_sub_nets)): + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(j+1) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(j+1) + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_query_net_' + str(j+1) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_query_net_' + str(j+1) + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_loss' + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_loss' + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest_episodic_memory_sub_net_" + str(j + 1) + "-0000") + except OSError: + pass + if os.path.isdir(self._model_dir_): for file in os.listdir(self._model_dir_): - if ".params" in file and self._model_prefix_ + "_" + str(i) in file: - epochStr = file.replace(".params","").replace(self._model_prefix_ + "_" + str(i) + "-","") + if ".params" in file and self._model_prefix_ + "_" + str(i) in file and not "loss" in file: + epochStr = file.replace(".params", "").replace(self._model_prefix_ + "_" + str(i) + "-", "") epoch = int(epochStr) - if epoch > lastEpoch: + if epoch >= lastEpoch: lastEpoch = epoch param_file = file + elif hasattr(network, 'episodic_sub_nets') and self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_" in file: + relMemPathInfo = file.replace(self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_", "").split("-") + memSubNet = int(relMemPathInfo[0]) + memEpochStr = relMemPathInfo[1] + memEpoch = int(memEpochStr) + if memEpoch >= lastMemEpoch[memSubNet-1]: + lastMemEpoch[memSubNet-1] = memEpoch + mem_files[memSubNet-1] = file + if param_file is None: earliestLastEpoch = 0 else: logging.info("Loading checkpoint: " + param_file) network.load_parameters(self._model_dir_ + param_file) + if hasattr(network, 'episodic_sub_nets'): + for j, sub_net in enumerate(network.episodic_sub_nets): + if mem_files[j] != None: + logging.info("Loading Replay Memory: " + mem_files[j]) + mem_layer = [param for param in inspect.getmembers(sub_net, lambda x: not(inspect.isroutine(x))) if param[0].startswith("memory")][0][1] + mem_layer.load_memory(self._model_dir_ + mem_files[j]) - if earliestLastEpoch == None or lastEpoch < earliestLastEpoch: - earliestLastEpoch = lastEpoch + if earliestLastEpoch == None or lastEpoch + 1 < earliestLastEpoch: + earliestLastEpoch = lastEpoch + 1 return earliestLastEpoch @@ -56,27 +117,52 @@ class CNNCreator_infoGAN_infoGANConnector_predictor: for i, network in self.networks.items(): # param_file = self._model_prefix_ + "_" + str(i) + "_newest-0000.params" param_file = None + if hasattr(network, 'episodic_sub_nets'): + num_episodic_sub_nets = len(network.episodic_sub_nets) + lastMemEpoch = [0] * num_episodic_sub_nets + mem_files = [None] * num_episodic_sub_nets + if os.path.isdir(self._weights_dir_): lastEpoch = 0 for file in os.listdir(self._weights_dir_): - if ".params" in file and self._model_prefix_ + "_" + str(i) in file: + if ".params" in file and self._model_prefix_ + "_" + str(i) in file and not "loss" in file: epochStr = file.replace(".params","").replace(self._model_prefix_ + "_" + str(i) + "-","") epoch = int(epochStr) - if epoch > lastEpoch: + if epoch >= lastEpoch: lastEpoch = epoch param_file = file + elif hasattr(network, 'episodic_sub_nets') and self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_" in file: + relMemPathInfo = file.replace(self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_").split("-") + memSubNet = int(relMemPathInfo[0]) + memEpochStr = relMemPathInfo[1] + memEpoch = int(memEpochStr) + if memEpoch >= lastMemEpoch[memSubNet-1]: + lastMemEpoch[memSubNet-1] = memEpoch + mem_files[memSubNet-1] = file + logging.info("Loading pretrained weights: " + self._weights_dir_ + param_file) network.load_parameters(self._weights_dir_ + param_file, allow_missing=True, ignore_extra=True) + if hasattr(network, 'episodic_sub_nets'): + assert lastEpoch == lastMemEpoch + for j, sub_net in enumerate(network.episodic_sub_nets): + if mem_files[j] != None: + logging.info("Loading pretrained Replay Memory: " + mem_files[j]) + mem_layer = \ + [param for param in inspect.getmembers(sub_net, lambda x: not (inspect.isroutine(x))) if + param[0].startswith("memory")][0][1] + mem_layer.load_memory(self._model_dir_ + mem_files[j]) else: logging.info("No pretrained weights available at: " + self._weights_dir_ + param_file) def construct(self, context, data_mean=None, data_std=None): - self.networks[0] = Net_0(data_mean=data_mean, data_std=data_std) - self.networks[0].collect_params().initialize(self.weight_initializer, ctx=context) + self.networks[0] = Net_0(data_mean=data_mean, data_std=data_std, mx_context=context, prefix="") + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + self.networks[0].collect_params().initialize(self.weight_initializer, force_reinit=False, ctx=context) self.networks[0].hybridize() - self.networks[0](mx.nd.zeros((1, 62,), ctx=context), mx.nd.zeros((1, 10,), ctx=context)) + self.networks[0](mx.nd.zeros((1, 62,), ctx=context[0]), mx.nd.zeros((1, 10,), ctx=context[0])) if not os.path.exists(self._model_dir_): os.makedirs(self._model_dir_) diff --git a/src/test/resources/target_code/gluon/ganModel/infoGAN/CNNGanTrainer_infoGAN_infoGANConnector_predictor.py b/src/test/resources/target_code/gluon/ganModel/infoGAN/CNNGanTrainer_infoGAN_infoGANConnector_predictor.py index b49f11dc..426cc112 100644 --- a/src/test/resources/target_code/gluon/ganModel/infoGAN/CNNGanTrainer_infoGAN_infoGANConnector_predictor.py +++ b/src/test/resources/target_code/gluon/ganModel/infoGAN/CNNGanTrainer_infoGAN_infoGANConnector_predictor.py @@ -184,16 +184,16 @@ class CNNGanTrainer_infoGAN_infoGANConnector_predictor: del discriminator_optimizer_params['learning_rate_decay'] if normalize: - self._net_creator_dis.construct(mx_context, data_mean=data_mean, data_std=data_std) + self._net_creator_dis.construct([mx_context], data_mean=data_mean, data_std=data_std) else: - self._net_creator_dis.construct(mx_context) + self._net_creator_dis.construct([mx_context]) - self._net_creator_gen.construct(mx_context) + self._net_creator_gen.construct([mx_context]) if self.use_qnet: - self._net_creator_qnet.construct(mx_context) + self._net_creator_qnet.construct([mx_context]) if load_checkpoint: - self._net_creator_qnet.load(mx_context) + self._net_creator_qnet.load([mx_context]) else: if os.path.isdir(self._net_creator_qnet._model_dir_): shutil.rmtree(self._net_creator_qnet._model_dir_) @@ -206,8 +206,8 @@ class CNNGanTrainer_infoGAN_infoGANConnector_predictor: begin_epoch = 0 if load_checkpoint: - begin_epoch = self._net_creator_dis.load(mx_context) - self._net_creator_gen.load(mx_context) + begin_epoch = self._net_creator_dis.load([mx_context]) + self._net_creator_gen.load([mx_context]) else: if os.path.isdir(self._net_creator_dis._model_dir_): shutil.rmtree(self._net_creator_dis._model_dir_) @@ -351,9 +351,9 @@ class CNNGanTrainer_infoGAN_infoGANConnector_predictor: gen_input, exp_qnet_output = create_generator_input(batch) with autograd.record(): - fake_data = gen_net(*gen_input) + fake_data = gen_net(*gen_input)[0][0] fake_data.detach() - discriminated_fake_dis = dis_net(fake_data, *dis_conditional_input) + discriminated_fake_dis = dis_net(fake_data, *dis_conditional_input)[0][0] if self.use_qnet: discriminated_fake_dis, _ = discriminated_fake_dis @@ -361,7 +361,7 @@ class CNNGanTrainer_infoGAN_infoGANConnector_predictor: real_labels = mx.nd.ones(discriminated_fake_dis.shape, ctx=mx_context) loss_resultF = dis_loss(discriminated_fake_dis, fake_labels) - discriminated_real_dis = dis_net(real_data, *dis_conditional_input) + discriminated_real_dis = dis_net(real_data, *dis_conditional_input)[0][0] if self.use_qnet: discriminated_real_dis, _ = discriminated_real_dis loss_resultR = dis_loss(discriminated_real_dis, real_labels) @@ -372,8 +372,8 @@ class CNNGanTrainer_infoGAN_infoGANConnector_predictor: if batch_i % k_value == 0: with autograd.record(): - fake_data = gen_net(*gen_input) - discriminated_fake_gen = dis_net(fake_data, *dis_conditional_input) + fake_data = gen_net(*gen_input)[0][0] + discriminated_fake_gen = dis_net(fake_data, *dis_conditional_input)[0][0] if self.use_qnet: discriminated_fake_gen, features = discriminated_fake_gen loss_resultG = dis_loss(discriminated_fake_gen, real_labels) @@ -381,7 +381,7 @@ class CNNGanTrainer_infoGAN_infoGANConnector_predictor: condition = batch.data[traindata_to_index[generator_target_name + "_"]] loss_resultG = loss_resultG + gen_loss_weight * generator_loss_func(fake_data, condition) if self.use_qnet: - qnet_discriminated = [q_net(features)] + qnet_discriminated = [q_net(features)[0][0]] for i, qnet_out in enumerate(qnet_discriminated): loss_resultG = loss_resultG + qnet_losses[i](qnet_out, exp_qnet_output[i]) loss_resultG.backward() diff --git a/src/test/resources/target_code/gluon/ganModel/infoGAN/CNNNet_infoGAN_infoGANConnector_predictor.py b/src/test/resources/target_code/gluon/ganModel/infoGAN/CNNNet_infoGAN_infoGANConnector_predictor.py index 4aeb33b3..dedd0090 100644 --- a/src/test/resources/target_code/gluon/ganModel/infoGAN/CNNNet_infoGAN_infoGANConnector_predictor.py +++ b/src/test/resources/target_code/gluon/ganModel/infoGAN/CNNNet_infoGAN_infoGANConnector_predictor.py @@ -1,7 +1,10 @@ import mxnet as mx import numpy as np import math -from mxnet import gluon +import os +import abc +import warnings +from mxnet import gluon, nd class ZScoreNormalization(gluon.HybridBlock): @@ -86,9 +89,419 @@ class CustomGRU(gluon.HybridBlock): output, [state0] = self.gru(data, [F.swapaxes(state0, 0, 1)]) return output, F.swapaxes(state0, 0, 1) + +class DotProductSelfAttention(gluon.HybridBlock): + def __init__(self, + scale_factor, + num_heads, + dim_model, + dim_keys, + dim_values, + use_proj_bias, + use_mask, + **kwargs): + super(DotProductSelfAttention, self).__init__(**kwargs) + with self.name_scope(): + self.num_heads = num_heads + self.dim_model = dim_model + self.use_proj_bias = use_proj_bias + self.use_mask = use_mask + + if dim_keys == -1: + self.dim_keys = int(dim_model / self.num_heads) + else: + self.dim_keys = dim_keys + if dim_values == -1: + self.dim_values = int(dim_model / self.num_heads) + else: + self.dim_values = dim_values + + if scale_factor == -1: + self.scale_factor = math.sqrt(self.dim_keys) + else: + self.scale_factor = scale_factor + + self.proj_q = gluon.nn.Dense(self.num_heads*self.dim_keys, use_bias=self.use_proj_bias, flatten=False) + self.proj_k = gluon.nn.Dense(self.num_heads*self.dim_keys, use_bias=self.use_proj_bias, flatten=False) + self.proj_v = gluon.nn.Dense(self.num_heads*self.dim_values, use_bias=self.use_proj_bias, flatten=False) + self.proj_o = gluon.nn.Dense(self.dim_model, use_bias=self.use_proj_bias, flatten=False) + + def hybrid_forward(self, F, queries, keys, values, *args, **kwargs): + + queries = F.Reshape(queries, shape=(0, 0,-1)) + keys = F.Reshape(queries, shape=(0, 0, -1)) + values = F.Reshape(queries, shape=(0, 0, -1)) + + head_queries = self.proj_q(queries) + head_keys = self.proj_k(keys) + head_values = self.proj_v(values) + + head_queries = F.reshape(head_queries, shape=(0, 0, self.num_heads, -1)) + head_queries = F.transpose(head_queries, axes=(0,2,1,3)) + head_queries = F.reshape(head_queries, shape=(-1, 0, 0), reverse=True) + + head_keys = F.reshape(head_keys, shape=(0, 0, self.num_heads, -1)) + head_keys = F.transpose(head_keys, axes=(0,2,1,3)) + head_keys = F.reshape(head_keys, shape=(-1, 0, 0), reverse=True) + + score = F.batch_dot(head_queries, head_keys, transpose_b=True) + score = score * self.scale_factor + if self.use_mask: + mask = F.tile(mask, self.num_heads) + mask = F.repeat(mask, self.dim_model) + mask = F.reshape(mask, shape=(-1, self.dim_model)) + weights = F.softmax(score, mask, use_length=self.use_mask) + + head_values = F.reshape(head_values, shape=(0, 0, self.num_heads, -1)) + head_values = F.transpose(head_values, axes=(0,2,1,3)) + head_values = F.reshape(head_values, shape=(-1, 0, 0), reverse=True) + + ret = F.batch_dot(weights, head_values) + ret = F.reshape(ret, shape=(-1, self.num_heads, 0, 0), reverse=True) + ret = F.transpose(ret, axes=(0, 2, 1, 3)) + ret = F.reshape(ret, shape=(0, 0, -1)) + + ret = self.proj_o(ret) + + return ret + + +class EpisodicReplayMemoryInterface(gluon.HybridBlock): + __metaclass__ = abc.ABCMeta + + def __init__(self, use_replay, replay_interval, replay_batch_size, replay_steps, replay_gradient_steps, num_heads, **kwargs): + super(EpisodicReplayMemoryInterface, self).__init__(**kwargs) + + self.use_replay = use_replay + self.replay_interval = replay_interval + self.replay_batch_size = replay_batch_size + self.replay_steps = replay_steps + self.replay_gradient_steps = replay_gradient_steps + self.num_heads = num_heads + + @abc.abstractmethod + def store_samples(self, data, y, query_network, store_prob, mx_context): + pass + + @abc.abstractmethod + def sample_memory(self, batch_size, mx_context): + pass + + @abc.abstractmethod + def get_query_network(self, mx_context): + pass + + @abc.abstractmethod + def save_memory(self, path): + pass + + @abc.abstractmethod + def load_memory(self, path): + pass + +#Memory layer +class LargeMemory(gluon.HybridBlock): + def __init__(self, + sub_key_size, + query_size, + query_act, + dist_measure, + k, + num_heads, + values_dim, + **kwargs): + super(LargeMemory, self).__init__(**kwargs) + with self.name_scope(): + #Memory parameters + self.dist_measure = dist_measure + self.k = k + self.num_heads = num_heads + self.query_act = query_act + self.query_size = query_size + self.num_heads = num_heads + + #Batch norm sub-layer + self.batch_norm = gluon.nn.BatchNorm() + + #Memory sub-layer + self.sub_key_size = sub_key_size + sub_key_shape = (self.num_heads, self.sub_key_size, int(query_size[-1] / 2)) + + if values_dim == -1: + values_shape = (self.sub_key_size * self.sub_key_size, self.query_size[-1]) + else: + values_shape = (self.sub_key_size*self.sub_key_size, values_dim) + + self.sub_keys1 = self.params.get("sub_keys1", shape=sub_key_shape, differentiable=True) + self.sub_keys2 = self.params.get("sub_keys2", shape=sub_key_shape, differentiable=True) + self.values = self.params.get("values", shape=values_shape, differentiable=True) + self.label_memory = nd.array([]) + + self.get_query_network() + + def hybrid_forward(self, F, x, sub_keys1, sub_keys2, values): + x = self.batch_norm(x) + + x = F.reshape(x, shape=(0, -1)) + + q = self.query_network(x) + + q = F.reshape(q, shape=(0, self.num_heads, -1)) + + q_split = F.split(q, num_outputs=2, axis=-1) + + if self.dist_measure == "l2": + q_split_resh = F.reshape(q_split[0], shape=(0,0,1,-1)) + sub_keys1_resh = F.reshape(sub_keys1, shape=(1,0,0,-1), reverse=True) + q1_diff = F.broadcast_sub(q_split_resh, sub_keys1_resh) + q1_dist = F.norm(q1_diff, axis=-1) + q_split_resh = F.reshape(q_split[1], shape=(0,0,1,-1)) + sub_keys2_resh = F.reshape(sub_keys2, shape=(1,0,0,-1), reverse=True) + q2_diff = F.broadcast_sub(q_split_resh, sub_keys2_resh) + q2_dist = F.norm(q2_diff, axis=-1) + else: + q1 = F.split(q_split[0], num_outputs=self.num_heads, axis=1) + q2 = F.split(q_split[1], num_outputs=self.num_heads, axis=1) + sub_keys1_resh = F.split(sub_keys1, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + sub_keys2_resh = F.split(sub_keys2, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + if self.num_heads == 1: + q1 = [q1] + q2 = [q2] + sub_keys1_resh = [sub_keys1_resh ] + sub_keys2_resh = [sub_keys2_resh ] + + q1_dist = F.dot(q1[0], sub_keys1_resh[0], transpose_b=True) + q2_dist = F.dot(q2[0], sub_keys2_resh[0], transpose_b=True) + for h in range(1, self.num_heads): + q1_dist = F.concat(q1_dist, F.dot(q1[0], sub_keys1_resh[h], transpose_b=True), dim=1) + q2_dist = F.concat(q2_dist, F.dot(q2[0], sub_keys1_resh[h], transpose_b=True), dim=1) + + i1 = F.topk(q1_dist, k=self.k, ret_typ="indices") + i2 = F.topk(q2_dist, k=self.k, ret_typ="indices") + + # Calculate cross product for keys at indices I1 and I2 + + # def head_take(data, state): + # return [F.take(data[0], data[2]), F.take(data[1], data[3])], state, + # + # i1 = F.transpose(i1, axes=(1,0,2)) + # i2 = F.transpose(i2, axes=(1, 0, 2)) + # st = F.zeros(1) + # (k1, k2), _ = F.contrib.foreach(head_take, [sub_keys1, sub_keys2,i1,i2], st) + # k1 = F.reshape(k1, shape=(-1, 0, 0), reverse=True) + # k2 = F.reshape(k2, shape=(-1, 0, 0), reverse=True) + i1 = F.split(i1, num_outputs=self.num_heads, axis=1) + i2 = F.split(i2, num_outputs=self.num_heads, axis=1) + sub_keys1 = F.split(sub_keys1, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + sub_keys2 = F.split(sub_keys2, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + if self.num_heads == 1: + i1 = [i1] + i2 = [i2] + sub_keys1 = [sub_keys1] + sub_keys2 = [sub_keys2] + + k1 = F.take(sub_keys1[0], i1[0]) + k2 = F.take(sub_keys2[0], i2[0]) + for h in range(1, self.num_heads): + k1 = F.concat(k1, F.take(sub_keys1[h], i1[h]), dim=1) + k2 = F.concat(k2, F.take(sub_keys2[h], i2[h]), dim=1) + + k1 = F.tile(k1, (1, 1, self.k, 1)) + k2 = F.repeat(k2, self.k, 2) + c_cart = F.concat(k1, k2, dim=3) + + q = F.reshape(q, shape=(-1,0), reverse=True) + q = F.reshape(q, shape=(0, 1, -1)) + c_cart = F.reshape(c_cart, shape=(-1, 0, 0), reverse=True) + if self.dist_measure == "l2": + k_diff = F.broadcast_sub(q, c_cart) + k_dist = F.norm(k_diff, axis=-1) + else: + k_dist = F.batch_dot(q, c_cart, transpose_b=True) #F.contrib.foreach(loop_batch_dot, [q, c_cart], init_states=state_batch_dist) + k_dist = F.reshape(k_dist, shape=(0, -1)) + + i = F.topk(k_dist, k=self.k, ret_typ="both") + + w = F.softmax(i[0]) + w = F.reshape(w, shape=(0,1,-1)) + vi = F.take(values, i[1]) + aggr_value = F.batch_dot(w, vi) #F.contrib.foreach(loop_batch_dot, [w, vi], init_states=state_batch_dist) + + ret = F.reshape(aggr_value, shape=(-1, self.num_heads, 0), reverse=True) + one_vec = F.ones((1, 1, self.num_heads)) + one_vec = F.broadcast_like(one_vec, ret, lhs_axes=0, rhs_axes=0) + ret = F.batch_dot(one_vec, ret) + ret = F.reshape(ret, shape=(-1, 0), reverse=True) + + return ret + + def get_query_network(self): + if hasattr(self, 'query_network'): + return self.query_network + else: + self.query_network = gluon.nn.HybridSequential() + for size in self.query_size: + if self.query_act == "linear": + self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, flatten=False)) + else: + self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, activation=self.query_act, flatten=False)) + return self.query_network + + +#EpisodicMemory layer +class EpisodicMemory(EpisodicReplayMemoryInterface): + def __init__(self, + replay_interval, + replay_batch_size, + replay_steps, + replay_gradient_steps, + store_prob, + max_stored_samples, + use_replay, + query_net_dir, + query_net_prefix, + query_net_num_inputs, + **kwargs): + super(EpisodicMemory, self).__init__(use_replay, replay_interval, replay_batch_size, replay_steps, replay_gradient_steps, 1, **kwargs) + with self.name_scope(): + #Replay parameters + self.store_prob = store_prob + self.max_stored_samples = max_stored_samples + + self.query_net_dir = query_net_dir + self.query_net_prefix = query_net_prefix + self.query_net_num_inputs = query_net_num_inputs + + #Memory + self.key_memory = nd.array([]) + self.value_memory = nd.array([]) + self.label_memory = nd.array([]) + + def hybrid_forward(self, F, *args): + #propagate the input as the rest is only used for replay + return [args, []] + + def store_samples(self, data, y, query_network, store_prob, context): + num_pus = len(data) + sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] + num_inputs = len(data[0][0]) + num_outputs = len(y) + mx_context = context[0] + + if len(self.key_memory) == 0: + self.key_memory = nd.empty(0, ctx=mx.cpu()) + self.value_memory = [] + self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) + + ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] + + max_inds = [nd.max(ind[i]) for i in range(num_pus)] + if any(max_inds): + to_store_values = [] + for i in range(num_inputs): + tmp_values = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_values, list): + tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) + else: + tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) + to_store_values.append(tmp_values) + + to_store_labels = [] + for i in range(num_outputs): + tmp_labels = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_labels, list): + tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) + else: + tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) + to_store_labels.append(tmp_labels) + + to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) + + if self.key_memory.shape[0] == 0: + self.key_memory = to_store_keys.as_in_context(mx.cpu()) + for i in range(num_inputs): + self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + for i in range(num_outputs): + self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) + elif self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: + num_to_store = to_store_keys.shape[0] + self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=1) + else: + self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + + def sample_memory(self, batch_size): + num_stored_samples = self.key_memory.shape[0] + if self.replay_batch_size == -1: + sample_ind = nd.random.randint(0, num_stored_samples, (self.replay_steps, batch_size), ctx=mx.cpu()) + else: + sample_ind = nd.random.randint(0, num_stored_samples, (self.replay_steps, self.replay_batch_size), ctx=mx.cpu()) + + num_outputs = len(self.label_memory) + + sample_labels = [[self.label_memory[i][ind] for i in range(num_outputs)] for ind in sample_ind] + sample_batches = [[[self.value_memory[j][ind] for j in range(len(self.value_memory))], sample_labels[i]] for i, ind in enumerate(sample_ind)] + + return sample_batches + + def get_query_network(self, context): + lastEpoch = 0 + for file in os.listdir(self.query_net_dir): + if self.query_net_prefix in file and ".json" in file: + symbolFile = file + + if self.query_net_prefix in file and ".param" in file: + epochStr = file.replace(".params", "").replace(self.query_net_prefix, "") + epoch = int(epochStr) + if epoch >= lastEpoch: + lastEpoch = epoch + weightFile = file + + inputNames = [] + if self.query_net_num_inputs == 1: + inputNames.append("data") + else: + for i in range(self.query_net_num_inputs): + inputNames.append("data" + str(i)) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + net = mx.gluon.nn.SymbolBlock.imports(self.query_net_dir + symbolFile, inputNames, self.query_net_dir + weightFile, ctx=context[0]) + net.hybridize() + return net + + def save_memory(self, path): + mem_arr = [("keys", self.key_memory)] + [("values_"+str(k),v) for (k,v) in enumerate(self.value_memory)] + [("labels_"+str(k),v) for (k,v) in enumerate(self.label_memory)] + mem_dict = {entry[0]:entry[1] for entry in mem_arr} + nd.save(path, mem_dict) + + def load_memory(self, path): + mem_dict = nd.load(path) + self.value_memory = [] + self.label_memory = [] + for key in sorted(mem_dict.keys()): + if key == "keys": + self.key_memory = mem_dict[key] + elif key.startswith("values_"): + self.value_memory.append(mem_dict[key]) + elif key.startswith("labels_"): + self.label_memory.append(mem_dict[key]) + + +#Stream 0 class Net_0(gluon.HybridBlock): - def __init__(self, data_mean=None, data_std=None, **kwargs): + def __init__(self, data_mean=None, data_std=None, mx_context=None, **kwargs): super(Net_0, self).__init__(**kwargs) with self.name_scope(): if data_mean: @@ -186,5 +599,5 @@ class Net_0(gluon.HybridBlock): tanh7_ = self.tanh7_(upconvolution7_) data_ = F.identity(tanh7_) - return data_ + return [[data_]] diff --git a/src/test/resources/target_code/gluon/ganModel/infoGAN/CNNPredictor_infoGAN_infoGANConnector_predictor.h b/src/test/resources/target_code/gluon/ganModel/infoGAN/CNNPredictor_infoGAN_infoGANConnector_predictor.h index 76240ebe..e0968814 100644 --- a/src/test/resources/target_code/gluon/ganModel/infoGAN/CNNPredictor_infoGAN_infoGANConnector_predictor.h +++ b/src/test/resources/target_code/gluon/ganModel/infoGAN/CNNPredictor_infoGAN_infoGANConnector_predictor.h @@ -1,109 +1,152 @@ #ifndef CNNPREDICTOR_INFOGAN_INFOGANCONNECTOR_PREDICTOR #define CNNPREDICTOR_INFOGAN_INFOGANCONNECTOR_PREDICTOR -#include +#include #include #include #include + +#include +#include -#include - +using namespace mxnet::cpp; + class CNNPredictor_infoGAN_infoGANConnector_predictor_0{ public: - const std::string json_file = "model/infoGAN.InfoGANGenerator/model_0_newest-symbol.json"; - const std::string param_file = "model/infoGAN.InfoGANGenerator/model_0_newest-0000.params"; - const std::vector input_keys = { + const std::string file_prefix = "model/infoGAN.InfoGANGenerator/model_0_newest"; + + //network + const std::vector network_input_keys = { "data0", "data1" }; - const std::vector> input_shapes = {{1, 62}, {1, 10}}; - const bool use_gpu = false; - - PredictorHandle handle; - + const std::vector> network_input_shapes = {{1, 62}, {1, 10}}; + std::vector network_input_sizes; + std::vector> network_arg_names; + std::vector network_handles; + + + //misc + Context ctx = Context::cpu(); //Will be updated later in init according to use_gpu + int dtype = 0; //use data type (float32=0 float64=1 ...) + + explicit CNNPredictor_infoGAN_infoGANConnector_predictor_0(){ - init(json_file, param_file, input_keys, input_shapes, use_gpu); + init(file_prefix, network_input_keys, network_input_shapes); } ~CNNPredictor_infoGAN_infoGANConnector_predictor_0(){ - if(handle) MXPredFree(handle); + for(Executor * handle : network_handles){ + delete handle; + } + MXNotifyShutdown(); } void predict(const std::vector &in_noise_, const std::vector &in_c1_, std::vector &out_data_){ - MXPredSetInput(handle, input_keys[0].c_str(), in_noise_.data(), static_cast(in_noise_.size())); - MXPredSetInput(handle, input_keys[1].c_str(), in_c1_.data(), static_cast(in_c1_.size())); - - MXPredForward(handle); - mx_uint output_index; - mx_uint *shape = 0; - mx_uint shape_len; - size_t size; - - output_index = 0; - MXPredGetOutputShape(handle, output_index, &shape, &shape_len); - size = 1; - for (mx_uint i = 0; i < shape_len; ++i) size *= shape[i]; - - assert(size == out_data_.size()); - MXPredGetOutput(handle, output_index, &(out_data_[0]), out_data_.size()); + NDArray input_temp; + input_temp = NDArray(network_input_shapes[0], ctx, false, dtype); + input_temp.SyncCopyFromCPU(in_noise_.data(), network_input_sizes[0]); + input_temp.CopyTo(&(network_handles[0]->arg_dict()[network_input_keys[0]])); + input_temp = NDArray(network_input_shapes[1], ctx, false, dtype); + input_temp.SyncCopyFromCPU(in_c1_.data(), network_input_sizes[1]); + input_temp.CopyTo(&(network_handles[0]->arg_dict()[network_input_keys[1]])); + NDArray::WaitAll(); + + network_handles[0]->Forward(false); + CheckMXNetError("Forward, predict, handle ind. 0"); + + + std::vector output = network_handles.back()->outputs; + std::vector curr_output_shape; + size_t curr_output_size; + curr_output_shape = output[0].GetShape(); + curr_output_size = 1; + for (mx_uint i : curr_output_shape) curr_output_size *= i; + //Fix due to a bug in the in how the output arrays are initialized when there are multiple outputs + assert((curr_output_size == out_data_.size()) || (curr_output_size == out_data_[0])); + output[0].SyncCopyToCPU(&out_data_); + } + + + + Executor* initExecutor(Symbol &sym, + std::map ¶m_map, + const std::vector &exec_input_keys, + const std::vector> &exec_input_shapes){ + + const mx_uint num_exec_input_nodes = exec_input_keys.size(); + for(mx_uint i = 0; i < num_exec_input_nodes; i++){ + param_map[exec_input_keys[i]] = NDArray(exec_input_shapes[i], ctx, false, dtype); + } - void init(const std::string &json_file, - const std::string ¶m_file, - const std::vector &input_keys, - const std::vector> &input_shapes, - const bool &use_gpu){ + std::vector param_arrays; + std::vector grad_array; + std::vector grad_reqs; + std::vector aux_arrays; + std::map< std::string, NDArray> aux_map; - BufferFile json_data(json_file); - BufferFile param_data(param_file); + sym.InferExecutorArrays(ctx, ¶m_arrays, &grad_array, &grad_reqs, + &aux_arrays, param_map, std::map(), + std::map(), aux_map); - int dev_type = use_gpu ? 2 : 1; - int dev_id = 0; + Executor *handle = new Executor(sym, ctx, param_arrays, grad_array, grad_reqs, aux_arrays); + assert(handle); + return handle; + } - if (json_data.GetLength() == 0 || - param_data.GetLength() == 0) { - std::exit(-1); + std::vector getSizesOfShapes(const std::vector> shapes){ + std::vector sizes; + for(std::vector shape : shapes){ + mx_uint val = 1; + for(mx_uint i: shape){ + val *= i; + } + sizes.push_back(val); } + return sizes; + } - const mx_uint num_input_nodes = input_keys.size(); - - const char* input_keys_ptr[num_input_nodes]; - for(mx_uint i = 0; i < num_input_nodes; i++){ - input_keys_ptr[i] = input_keys[i].c_str(); + void CheckMXNetError(std::string loc){ + const char* err = MXGetLastError(); + if (err && err[0] != 0) { + std::cout << "MXNet error at " << loc << err << std::endl; + exit(-1); } - - mx_uint shape_data_size = 0; - mx_uint input_shape_indptr[input_shapes.size() + 1]; - input_shape_indptr[0] = 0; - for(mx_uint i = 0; i < input_shapes.size(); i++){ - shape_data_size += input_shapes[i].size(); - input_shape_indptr[i+1] = shape_data_size; + } + + void init(const std::string &file_prefix, + const std::vector &network_input_keys, + const std::vector> &network_input_shapes){ + + CNNLAOptimizer_infoGAN_infoGANConnector_predictor optimizer_creator = CNNLAOptimizer_infoGAN_infoGANConnector_predictor(); + + if(optimizer_creator.getContextName() == "gpu"){ + ctx = Context::gpu(); } - - mx_uint input_shape_data[shape_data_size]; - mx_uint index = 0; - for(mx_uint i = 0; i < input_shapes.size(); i++){ - for(mx_uint j = 0; j < input_shapes[i].size(); j++){ - input_shape_data[index] = input_shapes[i][j]; - index++; - } + + network_input_sizes = getSizesOfShapes(network_input_shapes); + + ModelLoader model_loader(file_prefix, 0, ctx); + + std::vector network_symbols = model_loader.GetNetworkSymbols(); + std::vector> network_param_maps; + network_param_maps = model_loader.GetNetworkParamMaps(); + + //Init handles + std::map> in_shape_map; + for(mx_uint i=0; i < network_input_keys.size(); i++){ + in_shape_map[network_input_keys[i]] = network_input_shapes[i]; } - - MXPredCreate(static_cast(json_data.GetBuffer()), - static_cast(param_data.GetBuffer()), - static_cast(param_data.GetLength()), - dev_type, - dev_id, - num_input_nodes, - input_keys_ptr, - input_shape_indptr, - input_shape_data, - &handle); - assert(handle); + std::vector> in_shapes; + std::vector> aux_shapes; + std::vector> out_shapes; + network_symbols[0].InferShape(in_shape_map, &in_shapes, &aux_shapes, &out_shapes); + network_handles.push_back(initExecutor(network_symbols[0], network_param_maps[0], network_input_keys, network_input_shapes)); + } }; - #endif // CNNPREDICTOR_INFOGAN_INFOGANCONNECTOR_PREDICTOR diff --git a/src/test/resources/target_code/gluon/ganModel/infoGAN/CNNTrainer_infoGAN_infoGANConnector_predictor.py b/src/test/resources/target_code/gluon/ganModel/infoGAN/CNNTrainer_infoGAN_infoGANConnector_predictor.py index dda1486b..97e91e46 100644 --- a/src/test/resources/target_code/gluon/ganModel/infoGAN/CNNTrainer_infoGAN_infoGANConnector_predictor.py +++ b/src/test/resources/target_code/gluon/ganModel/infoGAN/CNNTrainer_infoGAN_infoGANConnector_predictor.py @@ -58,5 +58,3 @@ if __name__ == "__main__": log_period=10, print_images=True, ) - - diff --git a/src/test/resources/target_code/gluon/ganModel/infoGAN/gan/CNNCreator_infoGAN_infoGANDiscriminator.py b/src/test/resources/target_code/gluon/ganModel/infoGAN/gan/CNNCreator_infoGAN_infoGANDiscriminator.py index 1254ae9a..41c442dd 100644 --- a/src/test/resources/target_code/gluon/ganModel/infoGAN/gan/CNNCreator_infoGAN_infoGANDiscriminator.py +++ b/src/test/resources/target_code/gluon/ganModel/infoGAN/gan/CNNCreator_infoGAN_infoGANDiscriminator.py @@ -2,6 +2,8 @@ import mxnet as mx import logging import os import shutil +import warnings +import inspect from CNNNet_infoGAN_infoGANDiscriminator import Net_0 @@ -20,6 +22,10 @@ class CNNCreator_infoGAN_infoGANDiscriminator: for i, network in self.networks.items(): lastEpoch = 0 param_file = None + if hasattr(network, 'episodic_sub_nets'): + num_episodic_sub_nets = len(network.episodic_sub_nets) + lastMemEpoch = [0]*num_episodic_sub_nets + mem_files = [None]*num_episodic_sub_nets try: os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest-0000.params") @@ -30,22 +36,77 @@ class CNNCreator_infoGAN_infoGANDiscriminator: except OSError: pass + if hasattr(network, 'episodic_sub_nets'): + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(0) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(0) + "-symbol.json") + except OSError: + pass + + for j in range(len(network.episodic_sub_nets)): + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(j+1) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(j+1) + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_query_net_' + str(j+1) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_query_net_' + str(j+1) + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_loss' + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_loss' + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest_episodic_memory_sub_net_" + str(j + 1) + "-0000") + except OSError: + pass + if os.path.isdir(self._model_dir_): for file in os.listdir(self._model_dir_): - if ".params" in file and self._model_prefix_ + "_" + str(i) in file: - epochStr = file.replace(".params","").replace(self._model_prefix_ + "_" + str(i) + "-","") + if ".params" in file and self._model_prefix_ + "_" + str(i) in file and not "loss" in file: + epochStr = file.replace(".params", "").replace(self._model_prefix_ + "_" + str(i) + "-", "") epoch = int(epochStr) - if epoch > lastEpoch: + if epoch >= lastEpoch: lastEpoch = epoch param_file = file + elif hasattr(network, 'episodic_sub_nets') and self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_" in file: + relMemPathInfo = file.replace(self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_", "").split("-") + memSubNet = int(relMemPathInfo[0]) + memEpochStr = relMemPathInfo[1] + memEpoch = int(memEpochStr) + if memEpoch >= lastMemEpoch[memSubNet-1]: + lastMemEpoch[memSubNet-1] = memEpoch + mem_files[memSubNet-1] = file + if param_file is None: earliestLastEpoch = 0 else: logging.info("Loading checkpoint: " + param_file) network.load_parameters(self._model_dir_ + param_file) + if hasattr(network, 'episodic_sub_nets'): + for j, sub_net in enumerate(network.episodic_sub_nets): + if mem_files[j] != None: + logging.info("Loading Replay Memory: " + mem_files[j]) + mem_layer = [param for param in inspect.getmembers(sub_net, lambda x: not(inspect.isroutine(x))) if param[0].startswith("memory")][0][1] + mem_layer.load_memory(self._model_dir_ + mem_files[j]) - if earliestLastEpoch == None or lastEpoch < earliestLastEpoch: - earliestLastEpoch = lastEpoch + if earliestLastEpoch == None or lastEpoch + 1 < earliestLastEpoch: + earliestLastEpoch = lastEpoch + 1 return earliestLastEpoch @@ -56,27 +117,52 @@ class CNNCreator_infoGAN_infoGANDiscriminator: for i, network in self.networks.items(): # param_file = self._model_prefix_ + "_" + str(i) + "_newest-0000.params" param_file = None + if hasattr(network, 'episodic_sub_nets'): + num_episodic_sub_nets = len(network.episodic_sub_nets) + lastMemEpoch = [0] * num_episodic_sub_nets + mem_files = [None] * num_episodic_sub_nets + if os.path.isdir(self._weights_dir_): lastEpoch = 0 for file in os.listdir(self._weights_dir_): - if ".params" in file and self._model_prefix_ + "_" + str(i) in file: + if ".params" in file and self._model_prefix_ + "_" + str(i) in file and not "loss" in file: epochStr = file.replace(".params","").replace(self._model_prefix_ + "_" + str(i) + "-","") epoch = int(epochStr) - if epoch > lastEpoch: + if epoch >= lastEpoch: lastEpoch = epoch param_file = file + elif hasattr(network, 'episodic_sub_nets') and self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_" in file: + relMemPathInfo = file.replace(self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_").split("-") + memSubNet = int(relMemPathInfo[0]) + memEpochStr = relMemPathInfo[1] + memEpoch = int(memEpochStr) + if memEpoch >= lastMemEpoch[memSubNet-1]: + lastMemEpoch[memSubNet-1] = memEpoch + mem_files[memSubNet-1] = file + logging.info("Loading pretrained weights: " + self._weights_dir_ + param_file) network.load_parameters(self._weights_dir_ + param_file, allow_missing=True, ignore_extra=True) + if hasattr(network, 'episodic_sub_nets'): + assert lastEpoch == lastMemEpoch + for j, sub_net in enumerate(network.episodic_sub_nets): + if mem_files[j] != None: + logging.info("Loading pretrained Replay Memory: " + mem_files[j]) + mem_layer = \ + [param for param in inspect.getmembers(sub_net, lambda x: not (inspect.isroutine(x))) if + param[0].startswith("memory")][0][1] + mem_layer.load_memory(self._model_dir_ + mem_files[j]) else: logging.info("No pretrained weights available at: " + self._weights_dir_ + param_file) def construct(self, context, data_mean=None, data_std=None): - self.networks[0] = Net_0(data_mean=data_mean, data_std=data_std) - self.networks[0].collect_params().initialize(self.weight_initializer, ctx=context) + self.networks[0] = Net_0(data_mean=data_mean, data_std=data_std, mx_context=context, prefix="") + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + self.networks[0].collect_params().initialize(self.weight_initializer, force_reinit=False, ctx=context) self.networks[0].hybridize() - self.networks[0](mx.nd.zeros((1, 1,28,28,), ctx=context)) + self.networks[0](mx.nd.zeros((1, 1,28,28,), ctx=context[0])) if not os.path.exists(self._model_dir_): os.makedirs(self._model_dir_) diff --git a/src/test/resources/target_code/gluon/ganModel/infoGAN/gan/CNNCreator_infoGAN_infoGANQNetwork.py b/src/test/resources/target_code/gluon/ganModel/infoGAN/gan/CNNCreator_infoGAN_infoGANQNetwork.py index 7316199f..de6c36ac 100644 --- a/src/test/resources/target_code/gluon/ganModel/infoGAN/gan/CNNCreator_infoGAN_infoGANQNetwork.py +++ b/src/test/resources/target_code/gluon/ganModel/infoGAN/gan/CNNCreator_infoGAN_infoGANQNetwork.py @@ -2,6 +2,8 @@ import mxnet as mx import logging import os import shutil +import warnings +import inspect from CNNNet_infoGAN_infoGANQNetwork import Net_0 @@ -20,6 +22,10 @@ class CNNCreator_infoGAN_infoGANQNetwork: for i, network in self.networks.items(): lastEpoch = 0 param_file = None + if hasattr(network, 'episodic_sub_nets'): + num_episodic_sub_nets = len(network.episodic_sub_nets) + lastMemEpoch = [0]*num_episodic_sub_nets + mem_files = [None]*num_episodic_sub_nets try: os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest-0000.params") @@ -30,22 +36,77 @@ class CNNCreator_infoGAN_infoGANQNetwork: except OSError: pass + if hasattr(network, 'episodic_sub_nets'): + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(0) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(0) + "-symbol.json") + except OSError: + pass + + for j in range(len(network.episodic_sub_nets)): + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(j+1) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(j+1) + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_query_net_' + str(j+1) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_query_net_' + str(j+1) + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_loss' + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_loss' + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest_episodic_memory_sub_net_" + str(j + 1) + "-0000") + except OSError: + pass + if os.path.isdir(self._model_dir_): for file in os.listdir(self._model_dir_): - if ".params" in file and self._model_prefix_ + "_" + str(i) in file: - epochStr = file.replace(".params","").replace(self._model_prefix_ + "_" + str(i) + "-","") + if ".params" in file and self._model_prefix_ + "_" + str(i) in file and not "loss" in file: + epochStr = file.replace(".params", "").replace(self._model_prefix_ + "_" + str(i) + "-", "") epoch = int(epochStr) - if epoch > lastEpoch: + if epoch >= lastEpoch: lastEpoch = epoch param_file = file + elif hasattr(network, 'episodic_sub_nets') and self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_" in file: + relMemPathInfo = file.replace(self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_", "").split("-") + memSubNet = int(relMemPathInfo[0]) + memEpochStr = relMemPathInfo[1] + memEpoch = int(memEpochStr) + if memEpoch >= lastMemEpoch[memSubNet-1]: + lastMemEpoch[memSubNet-1] = memEpoch + mem_files[memSubNet-1] = file + if param_file is None: earliestLastEpoch = 0 else: logging.info("Loading checkpoint: " + param_file) network.load_parameters(self._model_dir_ + param_file) + if hasattr(network, 'episodic_sub_nets'): + for j, sub_net in enumerate(network.episodic_sub_nets): + if mem_files[j] != None: + logging.info("Loading Replay Memory: " + mem_files[j]) + mem_layer = [param for param in inspect.getmembers(sub_net, lambda x: not(inspect.isroutine(x))) if param[0].startswith("memory")][0][1] + mem_layer.load_memory(self._model_dir_ + mem_files[j]) - if earliestLastEpoch == None or lastEpoch < earliestLastEpoch: - earliestLastEpoch = lastEpoch + if earliestLastEpoch == None or lastEpoch + 1 < earliestLastEpoch: + earliestLastEpoch = lastEpoch + 1 return earliestLastEpoch @@ -56,27 +117,52 @@ class CNNCreator_infoGAN_infoGANQNetwork: for i, network in self.networks.items(): # param_file = self._model_prefix_ + "_" + str(i) + "_newest-0000.params" param_file = None + if hasattr(network, 'episodic_sub_nets'): + num_episodic_sub_nets = len(network.episodic_sub_nets) + lastMemEpoch = [0] * num_episodic_sub_nets + mem_files = [None] * num_episodic_sub_nets + if os.path.isdir(self._weights_dir_): lastEpoch = 0 for file in os.listdir(self._weights_dir_): - if ".params" in file and self._model_prefix_ + "_" + str(i) in file: + if ".params" in file and self._model_prefix_ + "_" + str(i) in file and not "loss" in file: epochStr = file.replace(".params","").replace(self._model_prefix_ + "_" + str(i) + "-","") epoch = int(epochStr) - if epoch > lastEpoch: + if epoch >= lastEpoch: lastEpoch = epoch param_file = file + elif hasattr(network, 'episodic_sub_nets') and self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_" in file: + relMemPathInfo = file.replace(self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_").split("-") + memSubNet = int(relMemPathInfo[0]) + memEpochStr = relMemPathInfo[1] + memEpoch = int(memEpochStr) + if memEpoch >= lastMemEpoch[memSubNet-1]: + lastMemEpoch[memSubNet-1] = memEpoch + mem_files[memSubNet-1] = file + logging.info("Loading pretrained weights: " + self._weights_dir_ + param_file) network.load_parameters(self._weights_dir_ + param_file, allow_missing=True, ignore_extra=True) + if hasattr(network, 'episodic_sub_nets'): + assert lastEpoch == lastMemEpoch + for j, sub_net in enumerate(network.episodic_sub_nets): + if mem_files[j] != None: + logging.info("Loading pretrained Replay Memory: " + mem_files[j]) + mem_layer = \ + [param for param in inspect.getmembers(sub_net, lambda x: not (inspect.isroutine(x))) if + param[0].startswith("memory")][0][1] + mem_layer.load_memory(self._model_dir_ + mem_files[j]) else: logging.info("No pretrained weights available at: " + self._weights_dir_ + param_file) def construct(self, context, data_mean=None, data_std=None): - self.networks[0] = Net_0(data_mean=data_mean, data_std=data_std) - self.networks[0].collect_params().initialize(self.weight_initializer, ctx=context) + self.networks[0] = Net_0(data_mean=data_mean, data_std=data_std, mx_context=context, prefix="") + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + self.networks[0].collect_params().initialize(self.weight_initializer, force_reinit=False, ctx=context) self.networks[0].hybridize() - self.networks[0](mx.nd.zeros((1, 512,4,4,), ctx=context)) + self.networks[0](mx.nd.zeros((1, 512,4,4,), ctx=context[0])) if not os.path.exists(self._model_dir_): os.makedirs(self._model_dir_) diff --git a/src/test/resources/target_code/gluon/ganModel/infoGAN/gan/CNNNet_infoGAN_infoGANDiscriminator.py b/src/test/resources/target_code/gluon/ganModel/infoGAN/gan/CNNNet_infoGAN_infoGANDiscriminator.py index d6000f49..46dec5b1 100644 --- a/src/test/resources/target_code/gluon/ganModel/infoGAN/gan/CNNNet_infoGAN_infoGANDiscriminator.py +++ b/src/test/resources/target_code/gluon/ganModel/infoGAN/gan/CNNNet_infoGAN_infoGANDiscriminator.py @@ -1,7 +1,10 @@ import mxnet as mx import numpy as np import math -from mxnet import gluon +import os +import abc +import warnings +from mxnet import gluon, nd class ZScoreNormalization(gluon.HybridBlock): @@ -86,9 +89,419 @@ class CustomGRU(gluon.HybridBlock): output, [state0] = self.gru(data, [F.swapaxes(state0, 0, 1)]) return output, F.swapaxes(state0, 0, 1) + +class DotProductSelfAttention(gluon.HybridBlock): + def __init__(self, + scale_factor, + num_heads, + dim_model, + dim_keys, + dim_values, + use_proj_bias, + use_mask, + **kwargs): + super(DotProductSelfAttention, self).__init__(**kwargs) + with self.name_scope(): + self.num_heads = num_heads + self.dim_model = dim_model + self.use_proj_bias = use_proj_bias + self.use_mask = use_mask + + if dim_keys == -1: + self.dim_keys = int(dim_model / self.num_heads) + else: + self.dim_keys = dim_keys + if dim_values == -1: + self.dim_values = int(dim_model / self.num_heads) + else: + self.dim_values = dim_values + + if scale_factor == -1: + self.scale_factor = math.sqrt(self.dim_keys) + else: + self.scale_factor = scale_factor + + self.proj_q = gluon.nn.Dense(self.num_heads*self.dim_keys, use_bias=self.use_proj_bias, flatten=False) + self.proj_k = gluon.nn.Dense(self.num_heads*self.dim_keys, use_bias=self.use_proj_bias, flatten=False) + self.proj_v = gluon.nn.Dense(self.num_heads*self.dim_values, use_bias=self.use_proj_bias, flatten=False) + self.proj_o = gluon.nn.Dense(self.dim_model, use_bias=self.use_proj_bias, flatten=False) + + def hybrid_forward(self, F, queries, keys, values, *args, **kwargs): + + queries = F.Reshape(queries, shape=(0, 0,-1)) + keys = F.Reshape(queries, shape=(0, 0, -1)) + values = F.Reshape(queries, shape=(0, 0, -1)) + + head_queries = self.proj_q(queries) + head_keys = self.proj_k(keys) + head_values = self.proj_v(values) + + head_queries = F.reshape(head_queries, shape=(0, 0, self.num_heads, -1)) + head_queries = F.transpose(head_queries, axes=(0,2,1,3)) + head_queries = F.reshape(head_queries, shape=(-1, 0, 0), reverse=True) + + head_keys = F.reshape(head_keys, shape=(0, 0, self.num_heads, -1)) + head_keys = F.transpose(head_keys, axes=(0,2,1,3)) + head_keys = F.reshape(head_keys, shape=(-1, 0, 0), reverse=True) + + score = F.batch_dot(head_queries, head_keys, transpose_b=True) + score = score * self.scale_factor + if self.use_mask: + mask = F.tile(mask, self.num_heads) + mask = F.repeat(mask, self.dim_model) + mask = F.reshape(mask, shape=(-1, self.dim_model)) + weights = F.softmax(score, mask, use_length=self.use_mask) + + head_values = F.reshape(head_values, shape=(0, 0, self.num_heads, -1)) + head_values = F.transpose(head_values, axes=(0,2,1,3)) + head_values = F.reshape(head_values, shape=(-1, 0, 0), reverse=True) + + ret = F.batch_dot(weights, head_values) + ret = F.reshape(ret, shape=(-1, self.num_heads, 0, 0), reverse=True) + ret = F.transpose(ret, axes=(0, 2, 1, 3)) + ret = F.reshape(ret, shape=(0, 0, -1)) + + ret = self.proj_o(ret) + + return ret + + +class EpisodicReplayMemoryInterface(gluon.HybridBlock): + __metaclass__ = abc.ABCMeta + + def __init__(self, use_replay, replay_interval, replay_batch_size, replay_steps, replay_gradient_steps, num_heads, **kwargs): + super(EpisodicReplayMemoryInterface, self).__init__(**kwargs) + + self.use_replay = use_replay + self.replay_interval = replay_interval + self.replay_batch_size = replay_batch_size + self.replay_steps = replay_steps + self.replay_gradient_steps = replay_gradient_steps + self.num_heads = num_heads + + @abc.abstractmethod + def store_samples(self, data, y, query_network, store_prob, mx_context): + pass + + @abc.abstractmethod + def sample_memory(self, batch_size, mx_context): + pass + + @abc.abstractmethod + def get_query_network(self, mx_context): + pass + + @abc.abstractmethod + def save_memory(self, path): + pass + + @abc.abstractmethod + def load_memory(self, path): + pass + +#Memory layer +class LargeMemory(gluon.HybridBlock): + def __init__(self, + sub_key_size, + query_size, + query_act, + dist_measure, + k, + num_heads, + values_dim, + **kwargs): + super(LargeMemory, self).__init__(**kwargs) + with self.name_scope(): + #Memory parameters + self.dist_measure = dist_measure + self.k = k + self.num_heads = num_heads + self.query_act = query_act + self.query_size = query_size + self.num_heads = num_heads + + #Batch norm sub-layer + self.batch_norm = gluon.nn.BatchNorm() + + #Memory sub-layer + self.sub_key_size = sub_key_size + sub_key_shape = (self.num_heads, self.sub_key_size, int(query_size[-1] / 2)) + + if values_dim == -1: + values_shape = (self.sub_key_size * self.sub_key_size, self.query_size[-1]) + else: + values_shape = (self.sub_key_size*self.sub_key_size, values_dim) + + self.sub_keys1 = self.params.get("sub_keys1", shape=sub_key_shape, differentiable=True) + self.sub_keys2 = self.params.get("sub_keys2", shape=sub_key_shape, differentiable=True) + self.values = self.params.get("values", shape=values_shape, differentiable=True) + self.label_memory = nd.array([]) + + self.get_query_network() + + def hybrid_forward(self, F, x, sub_keys1, sub_keys2, values): + x = self.batch_norm(x) + + x = F.reshape(x, shape=(0, -1)) + + q = self.query_network(x) + + q = F.reshape(q, shape=(0, self.num_heads, -1)) + + q_split = F.split(q, num_outputs=2, axis=-1) + + if self.dist_measure == "l2": + q_split_resh = F.reshape(q_split[0], shape=(0,0,1,-1)) + sub_keys1_resh = F.reshape(sub_keys1, shape=(1,0,0,-1), reverse=True) + q1_diff = F.broadcast_sub(q_split_resh, sub_keys1_resh) + q1_dist = F.norm(q1_diff, axis=-1) + q_split_resh = F.reshape(q_split[1], shape=(0,0,1,-1)) + sub_keys2_resh = F.reshape(sub_keys2, shape=(1,0,0,-1), reverse=True) + q2_diff = F.broadcast_sub(q_split_resh, sub_keys2_resh) + q2_dist = F.norm(q2_diff, axis=-1) + else: + q1 = F.split(q_split[0], num_outputs=self.num_heads, axis=1) + q2 = F.split(q_split[1], num_outputs=self.num_heads, axis=1) + sub_keys1_resh = F.split(sub_keys1, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + sub_keys2_resh = F.split(sub_keys2, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + if self.num_heads == 1: + q1 = [q1] + q2 = [q2] + sub_keys1_resh = [sub_keys1_resh ] + sub_keys2_resh = [sub_keys2_resh ] + + q1_dist = F.dot(q1[0], sub_keys1_resh[0], transpose_b=True) + q2_dist = F.dot(q2[0], sub_keys2_resh[0], transpose_b=True) + for h in range(1, self.num_heads): + q1_dist = F.concat(q1_dist, F.dot(q1[0], sub_keys1_resh[h], transpose_b=True), dim=1) + q2_dist = F.concat(q2_dist, F.dot(q2[0], sub_keys1_resh[h], transpose_b=True), dim=1) + + i1 = F.topk(q1_dist, k=self.k, ret_typ="indices") + i2 = F.topk(q2_dist, k=self.k, ret_typ="indices") + + # Calculate cross product for keys at indices I1 and I2 + + # def head_take(data, state): + # return [F.take(data[0], data[2]), F.take(data[1], data[3])], state, + # + # i1 = F.transpose(i1, axes=(1,0,2)) + # i2 = F.transpose(i2, axes=(1, 0, 2)) + # st = F.zeros(1) + # (k1, k2), _ = F.contrib.foreach(head_take, [sub_keys1, sub_keys2,i1,i2], st) + # k1 = F.reshape(k1, shape=(-1, 0, 0), reverse=True) + # k2 = F.reshape(k2, shape=(-1, 0, 0), reverse=True) + i1 = F.split(i1, num_outputs=self.num_heads, axis=1) + i2 = F.split(i2, num_outputs=self.num_heads, axis=1) + sub_keys1 = F.split(sub_keys1, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + sub_keys2 = F.split(sub_keys2, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + if self.num_heads == 1: + i1 = [i1] + i2 = [i2] + sub_keys1 = [sub_keys1] + sub_keys2 = [sub_keys2] + + k1 = F.take(sub_keys1[0], i1[0]) + k2 = F.take(sub_keys2[0], i2[0]) + for h in range(1, self.num_heads): + k1 = F.concat(k1, F.take(sub_keys1[h], i1[h]), dim=1) + k2 = F.concat(k2, F.take(sub_keys2[h], i2[h]), dim=1) + + k1 = F.tile(k1, (1, 1, self.k, 1)) + k2 = F.repeat(k2, self.k, 2) + c_cart = F.concat(k1, k2, dim=3) + + q = F.reshape(q, shape=(-1,0), reverse=True) + q = F.reshape(q, shape=(0, 1, -1)) + c_cart = F.reshape(c_cart, shape=(-1, 0, 0), reverse=True) + if self.dist_measure == "l2": + k_diff = F.broadcast_sub(q, c_cart) + k_dist = F.norm(k_diff, axis=-1) + else: + k_dist = F.batch_dot(q, c_cart, transpose_b=True) #F.contrib.foreach(loop_batch_dot, [q, c_cart], init_states=state_batch_dist) + k_dist = F.reshape(k_dist, shape=(0, -1)) + + i = F.topk(k_dist, k=self.k, ret_typ="both") + + w = F.softmax(i[0]) + w = F.reshape(w, shape=(0,1,-1)) + vi = F.take(values, i[1]) + aggr_value = F.batch_dot(w, vi) #F.contrib.foreach(loop_batch_dot, [w, vi], init_states=state_batch_dist) + + ret = F.reshape(aggr_value, shape=(-1, self.num_heads, 0), reverse=True) + one_vec = F.ones((1, 1, self.num_heads)) + one_vec = F.broadcast_like(one_vec, ret, lhs_axes=0, rhs_axes=0) + ret = F.batch_dot(one_vec, ret) + ret = F.reshape(ret, shape=(-1, 0), reverse=True) + + return ret + + def get_query_network(self): + if hasattr(self, 'query_network'): + return self.query_network + else: + self.query_network = gluon.nn.HybridSequential() + for size in self.query_size: + if self.query_act == "linear": + self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, flatten=False)) + else: + self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, activation=self.query_act, flatten=False)) + return self.query_network + + +#EpisodicMemory layer +class EpisodicMemory(EpisodicReplayMemoryInterface): + def __init__(self, + replay_interval, + replay_batch_size, + replay_steps, + replay_gradient_steps, + store_prob, + max_stored_samples, + use_replay, + query_net_dir, + query_net_prefix, + query_net_num_inputs, + **kwargs): + super(EpisodicMemory, self).__init__(use_replay, replay_interval, replay_batch_size, replay_steps, replay_gradient_steps, 1, **kwargs) + with self.name_scope(): + #Replay parameters + self.store_prob = store_prob + self.max_stored_samples = max_stored_samples + + self.query_net_dir = query_net_dir + self.query_net_prefix = query_net_prefix + self.query_net_num_inputs = query_net_num_inputs + + #Memory + self.key_memory = nd.array([]) + self.value_memory = nd.array([]) + self.label_memory = nd.array([]) + + def hybrid_forward(self, F, *args): + #propagate the input as the rest is only used for replay + return [args, []] + + def store_samples(self, data, y, query_network, store_prob, context): + num_pus = len(data) + sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] + num_inputs = len(data[0][0]) + num_outputs = len(y) + mx_context = context[0] + + if len(self.key_memory) == 0: + self.key_memory = nd.empty(0, ctx=mx.cpu()) + self.value_memory = [] + self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) + + ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] + + max_inds = [nd.max(ind[i]) for i in range(num_pus)] + if any(max_inds): + to_store_values = [] + for i in range(num_inputs): + tmp_values = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_values, list): + tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) + else: + tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) + to_store_values.append(tmp_values) + + to_store_labels = [] + for i in range(num_outputs): + tmp_labels = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_labels, list): + tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) + else: + tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) + to_store_labels.append(tmp_labels) + + to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) + + if self.key_memory.shape[0] == 0: + self.key_memory = to_store_keys.as_in_context(mx.cpu()) + for i in range(num_inputs): + self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + for i in range(num_outputs): + self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) + elif self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: + num_to_store = to_store_keys.shape[0] + self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=1) + else: + self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + + def sample_memory(self, batch_size): + num_stored_samples = self.key_memory.shape[0] + if self.replay_batch_size == -1: + sample_ind = nd.random.randint(0, num_stored_samples, (self.replay_steps, batch_size), ctx=mx.cpu()) + else: + sample_ind = nd.random.randint(0, num_stored_samples, (self.replay_steps, self.replay_batch_size), ctx=mx.cpu()) + + num_outputs = len(self.label_memory) + + sample_labels = [[self.label_memory[i][ind] for i in range(num_outputs)] for ind in sample_ind] + sample_batches = [[[self.value_memory[j][ind] for j in range(len(self.value_memory))], sample_labels[i]] for i, ind in enumerate(sample_ind)] + + return sample_batches + + def get_query_network(self, context): + lastEpoch = 0 + for file in os.listdir(self.query_net_dir): + if self.query_net_prefix in file and ".json" in file: + symbolFile = file + + if self.query_net_prefix in file and ".param" in file: + epochStr = file.replace(".params", "").replace(self.query_net_prefix, "") + epoch = int(epochStr) + if epoch >= lastEpoch: + lastEpoch = epoch + weightFile = file + + inputNames = [] + if self.query_net_num_inputs == 1: + inputNames.append("data") + else: + for i in range(self.query_net_num_inputs): + inputNames.append("data" + str(i)) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + net = mx.gluon.nn.SymbolBlock.imports(self.query_net_dir + symbolFile, inputNames, self.query_net_dir + weightFile, ctx=context[0]) + net.hybridize() + return net + + def save_memory(self, path): + mem_arr = [("keys", self.key_memory)] + [("values_"+str(k),v) for (k,v) in enumerate(self.value_memory)] + [("labels_"+str(k),v) for (k,v) in enumerate(self.label_memory)] + mem_dict = {entry[0]:entry[1] for entry in mem_arr} + nd.save(path, mem_dict) + + def load_memory(self, path): + mem_dict = nd.load(path) + self.value_memory = [] + self.label_memory = [] + for key in sorted(mem_dict.keys()): + if key == "keys": + self.key_memory = mem_dict[key] + elif key.startswith("values_"): + self.value_memory.append(mem_dict[key]) + elif key.startswith("labels_"): + self.label_memory.append(mem_dict[key]) + + +#Stream 0 class Net_0(gluon.HybridBlock): - def __init__(self, data_mean=None, data_std=None, **kwargs): + def __init__(self, data_mean=None, data_std=None, mx_context=None, **kwargs): super(Net_0, self).__init__(**kwargs) with self.name_scope(): if data_mean: @@ -173,5 +586,5 @@ class Net_0(gluon.HybridBlock): dis_ = F.identity(sigmoid5_1_) features_ = F.identity(leakyrelu4_) - return dis_, features_ + return [[dis_, features_]] diff --git a/src/test/resources/target_code/gluon/ganModel/infoGAN/gan/CNNNet_infoGAN_infoGANQNetwork.py b/src/test/resources/target_code/gluon/ganModel/infoGAN/gan/CNNNet_infoGAN_infoGANQNetwork.py index a39b2020..abe4ad7a 100644 --- a/src/test/resources/target_code/gluon/ganModel/infoGAN/gan/CNNNet_infoGAN_infoGANQNetwork.py +++ b/src/test/resources/target_code/gluon/ganModel/infoGAN/gan/CNNNet_infoGAN_infoGANQNetwork.py @@ -1,7 +1,10 @@ import mxnet as mx import numpy as np import math -from mxnet import gluon +import os +import abc +import warnings +from mxnet import gluon, nd class ZScoreNormalization(gluon.HybridBlock): @@ -86,9 +89,419 @@ class CustomGRU(gluon.HybridBlock): output, [state0] = self.gru(data, [F.swapaxes(state0, 0, 1)]) return output, F.swapaxes(state0, 0, 1) + +class DotProductSelfAttention(gluon.HybridBlock): + def __init__(self, + scale_factor, + num_heads, + dim_model, + dim_keys, + dim_values, + use_proj_bias, + use_mask, + **kwargs): + super(DotProductSelfAttention, self).__init__(**kwargs) + with self.name_scope(): + self.num_heads = num_heads + self.dim_model = dim_model + self.use_proj_bias = use_proj_bias + self.use_mask = use_mask + + if dim_keys == -1: + self.dim_keys = int(dim_model / self.num_heads) + else: + self.dim_keys = dim_keys + if dim_values == -1: + self.dim_values = int(dim_model / self.num_heads) + else: + self.dim_values = dim_values + + if scale_factor == -1: + self.scale_factor = math.sqrt(self.dim_keys) + else: + self.scale_factor = scale_factor + + self.proj_q = gluon.nn.Dense(self.num_heads*self.dim_keys, use_bias=self.use_proj_bias, flatten=False) + self.proj_k = gluon.nn.Dense(self.num_heads*self.dim_keys, use_bias=self.use_proj_bias, flatten=False) + self.proj_v = gluon.nn.Dense(self.num_heads*self.dim_values, use_bias=self.use_proj_bias, flatten=False) + self.proj_o = gluon.nn.Dense(self.dim_model, use_bias=self.use_proj_bias, flatten=False) + + def hybrid_forward(self, F, queries, keys, values, *args, **kwargs): + + queries = F.Reshape(queries, shape=(0, 0,-1)) + keys = F.Reshape(queries, shape=(0, 0, -1)) + values = F.Reshape(queries, shape=(0, 0, -1)) + + head_queries = self.proj_q(queries) + head_keys = self.proj_k(keys) + head_values = self.proj_v(values) + + head_queries = F.reshape(head_queries, shape=(0, 0, self.num_heads, -1)) + head_queries = F.transpose(head_queries, axes=(0,2,1,3)) + head_queries = F.reshape(head_queries, shape=(-1, 0, 0), reverse=True) + + head_keys = F.reshape(head_keys, shape=(0, 0, self.num_heads, -1)) + head_keys = F.transpose(head_keys, axes=(0,2,1,3)) + head_keys = F.reshape(head_keys, shape=(-1, 0, 0), reverse=True) + + score = F.batch_dot(head_queries, head_keys, transpose_b=True) + score = score * self.scale_factor + if self.use_mask: + mask = F.tile(mask, self.num_heads) + mask = F.repeat(mask, self.dim_model) + mask = F.reshape(mask, shape=(-1, self.dim_model)) + weights = F.softmax(score, mask, use_length=self.use_mask) + + head_values = F.reshape(head_values, shape=(0, 0, self.num_heads, -1)) + head_values = F.transpose(head_values, axes=(0,2,1,3)) + head_values = F.reshape(head_values, shape=(-1, 0, 0), reverse=True) + + ret = F.batch_dot(weights, head_values) + ret = F.reshape(ret, shape=(-1, self.num_heads, 0, 0), reverse=True) + ret = F.transpose(ret, axes=(0, 2, 1, 3)) + ret = F.reshape(ret, shape=(0, 0, -1)) + + ret = self.proj_o(ret) + + return ret + + +class EpisodicReplayMemoryInterface(gluon.HybridBlock): + __metaclass__ = abc.ABCMeta + + def __init__(self, use_replay, replay_interval, replay_batch_size, replay_steps, replay_gradient_steps, num_heads, **kwargs): + super(EpisodicReplayMemoryInterface, self).__init__(**kwargs) + + self.use_replay = use_replay + self.replay_interval = replay_interval + self.replay_batch_size = replay_batch_size + self.replay_steps = replay_steps + self.replay_gradient_steps = replay_gradient_steps + self.num_heads = num_heads + + @abc.abstractmethod + def store_samples(self, data, y, query_network, store_prob, mx_context): + pass + + @abc.abstractmethod + def sample_memory(self, batch_size, mx_context): + pass + + @abc.abstractmethod + def get_query_network(self, mx_context): + pass + + @abc.abstractmethod + def save_memory(self, path): + pass + + @abc.abstractmethod + def load_memory(self, path): + pass + +#Memory layer +class LargeMemory(gluon.HybridBlock): + def __init__(self, + sub_key_size, + query_size, + query_act, + dist_measure, + k, + num_heads, + values_dim, + **kwargs): + super(LargeMemory, self).__init__(**kwargs) + with self.name_scope(): + #Memory parameters + self.dist_measure = dist_measure + self.k = k + self.num_heads = num_heads + self.query_act = query_act + self.query_size = query_size + self.num_heads = num_heads + + #Batch norm sub-layer + self.batch_norm = gluon.nn.BatchNorm() + + #Memory sub-layer + self.sub_key_size = sub_key_size + sub_key_shape = (self.num_heads, self.sub_key_size, int(query_size[-1] / 2)) + + if values_dim == -1: + values_shape = (self.sub_key_size * self.sub_key_size, self.query_size[-1]) + else: + values_shape = (self.sub_key_size*self.sub_key_size, values_dim) + + self.sub_keys1 = self.params.get("sub_keys1", shape=sub_key_shape, differentiable=True) + self.sub_keys2 = self.params.get("sub_keys2", shape=sub_key_shape, differentiable=True) + self.values = self.params.get("values", shape=values_shape, differentiable=True) + self.label_memory = nd.array([]) + + self.get_query_network() + + def hybrid_forward(self, F, x, sub_keys1, sub_keys2, values): + x = self.batch_norm(x) + + x = F.reshape(x, shape=(0, -1)) + + q = self.query_network(x) + + q = F.reshape(q, shape=(0, self.num_heads, -1)) + + q_split = F.split(q, num_outputs=2, axis=-1) + + if self.dist_measure == "l2": + q_split_resh = F.reshape(q_split[0], shape=(0,0,1,-1)) + sub_keys1_resh = F.reshape(sub_keys1, shape=(1,0,0,-1), reverse=True) + q1_diff = F.broadcast_sub(q_split_resh, sub_keys1_resh) + q1_dist = F.norm(q1_diff, axis=-1) + q_split_resh = F.reshape(q_split[1], shape=(0,0,1,-1)) + sub_keys2_resh = F.reshape(sub_keys2, shape=(1,0,0,-1), reverse=True) + q2_diff = F.broadcast_sub(q_split_resh, sub_keys2_resh) + q2_dist = F.norm(q2_diff, axis=-1) + else: + q1 = F.split(q_split[0], num_outputs=self.num_heads, axis=1) + q2 = F.split(q_split[1], num_outputs=self.num_heads, axis=1) + sub_keys1_resh = F.split(sub_keys1, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + sub_keys2_resh = F.split(sub_keys2, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + if self.num_heads == 1: + q1 = [q1] + q2 = [q2] + sub_keys1_resh = [sub_keys1_resh ] + sub_keys2_resh = [sub_keys2_resh ] + + q1_dist = F.dot(q1[0], sub_keys1_resh[0], transpose_b=True) + q2_dist = F.dot(q2[0], sub_keys2_resh[0], transpose_b=True) + for h in range(1, self.num_heads): + q1_dist = F.concat(q1_dist, F.dot(q1[0], sub_keys1_resh[h], transpose_b=True), dim=1) + q2_dist = F.concat(q2_dist, F.dot(q2[0], sub_keys1_resh[h], transpose_b=True), dim=1) + + i1 = F.topk(q1_dist, k=self.k, ret_typ="indices") + i2 = F.topk(q2_dist, k=self.k, ret_typ="indices") + + # Calculate cross product for keys at indices I1 and I2 + + # def head_take(data, state): + # return [F.take(data[0], data[2]), F.take(data[1], data[3])], state, + # + # i1 = F.transpose(i1, axes=(1,0,2)) + # i2 = F.transpose(i2, axes=(1, 0, 2)) + # st = F.zeros(1) + # (k1, k2), _ = F.contrib.foreach(head_take, [sub_keys1, sub_keys2,i1,i2], st) + # k1 = F.reshape(k1, shape=(-1, 0, 0), reverse=True) + # k2 = F.reshape(k2, shape=(-1, 0, 0), reverse=True) + i1 = F.split(i1, num_outputs=self.num_heads, axis=1) + i2 = F.split(i2, num_outputs=self.num_heads, axis=1) + sub_keys1 = F.split(sub_keys1, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + sub_keys2 = F.split(sub_keys2, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + if self.num_heads == 1: + i1 = [i1] + i2 = [i2] + sub_keys1 = [sub_keys1] + sub_keys2 = [sub_keys2] + + k1 = F.take(sub_keys1[0], i1[0]) + k2 = F.take(sub_keys2[0], i2[0]) + for h in range(1, self.num_heads): + k1 = F.concat(k1, F.take(sub_keys1[h], i1[h]), dim=1) + k2 = F.concat(k2, F.take(sub_keys2[h], i2[h]), dim=1) + + k1 = F.tile(k1, (1, 1, self.k, 1)) + k2 = F.repeat(k2, self.k, 2) + c_cart = F.concat(k1, k2, dim=3) + + q = F.reshape(q, shape=(-1,0), reverse=True) + q = F.reshape(q, shape=(0, 1, -1)) + c_cart = F.reshape(c_cart, shape=(-1, 0, 0), reverse=True) + if self.dist_measure == "l2": + k_diff = F.broadcast_sub(q, c_cart) + k_dist = F.norm(k_diff, axis=-1) + else: + k_dist = F.batch_dot(q, c_cart, transpose_b=True) #F.contrib.foreach(loop_batch_dot, [q, c_cart], init_states=state_batch_dist) + k_dist = F.reshape(k_dist, shape=(0, -1)) + + i = F.topk(k_dist, k=self.k, ret_typ="both") + + w = F.softmax(i[0]) + w = F.reshape(w, shape=(0,1,-1)) + vi = F.take(values, i[1]) + aggr_value = F.batch_dot(w, vi) #F.contrib.foreach(loop_batch_dot, [w, vi], init_states=state_batch_dist) + + ret = F.reshape(aggr_value, shape=(-1, self.num_heads, 0), reverse=True) + one_vec = F.ones((1, 1, self.num_heads)) + one_vec = F.broadcast_like(one_vec, ret, lhs_axes=0, rhs_axes=0) + ret = F.batch_dot(one_vec, ret) + ret = F.reshape(ret, shape=(-1, 0), reverse=True) + + return ret + + def get_query_network(self): + if hasattr(self, 'query_network'): + return self.query_network + else: + self.query_network = gluon.nn.HybridSequential() + for size in self.query_size: + if self.query_act == "linear": + self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, flatten=False)) + else: + self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, activation=self.query_act, flatten=False)) + return self.query_network + + +#EpisodicMemory layer +class EpisodicMemory(EpisodicReplayMemoryInterface): + def __init__(self, + replay_interval, + replay_batch_size, + replay_steps, + replay_gradient_steps, + store_prob, + max_stored_samples, + use_replay, + query_net_dir, + query_net_prefix, + query_net_num_inputs, + **kwargs): + super(EpisodicMemory, self).__init__(use_replay, replay_interval, replay_batch_size, replay_steps, replay_gradient_steps, 1, **kwargs) + with self.name_scope(): + #Replay parameters + self.store_prob = store_prob + self.max_stored_samples = max_stored_samples + + self.query_net_dir = query_net_dir + self.query_net_prefix = query_net_prefix + self.query_net_num_inputs = query_net_num_inputs + + #Memory + self.key_memory = nd.array([]) + self.value_memory = nd.array([]) + self.label_memory = nd.array([]) + + def hybrid_forward(self, F, *args): + #propagate the input as the rest is only used for replay + return [args, []] + + def store_samples(self, data, y, query_network, store_prob, context): + num_pus = len(data) + sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] + num_inputs = len(data[0][0]) + num_outputs = len(y) + mx_context = context[0] + + if len(self.key_memory) == 0: + self.key_memory = nd.empty(0, ctx=mx.cpu()) + self.value_memory = [] + self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) + + ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] + + max_inds = [nd.max(ind[i]) for i in range(num_pus)] + if any(max_inds): + to_store_values = [] + for i in range(num_inputs): + tmp_values = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_values, list): + tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) + else: + tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) + to_store_values.append(tmp_values) + + to_store_labels = [] + for i in range(num_outputs): + tmp_labels = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_labels, list): + tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) + else: + tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) + to_store_labels.append(tmp_labels) + + to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) + + if self.key_memory.shape[0] == 0: + self.key_memory = to_store_keys.as_in_context(mx.cpu()) + for i in range(num_inputs): + self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + for i in range(num_outputs): + self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) + elif self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: + num_to_store = to_store_keys.shape[0] + self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=1) + else: + self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + + def sample_memory(self, batch_size): + num_stored_samples = self.key_memory.shape[0] + if self.replay_batch_size == -1: + sample_ind = nd.random.randint(0, num_stored_samples, (self.replay_steps, batch_size), ctx=mx.cpu()) + else: + sample_ind = nd.random.randint(0, num_stored_samples, (self.replay_steps, self.replay_batch_size), ctx=mx.cpu()) + + num_outputs = len(self.label_memory) + + sample_labels = [[self.label_memory[i][ind] for i in range(num_outputs)] for ind in sample_ind] + sample_batches = [[[self.value_memory[j][ind] for j in range(len(self.value_memory))], sample_labels[i]] for i, ind in enumerate(sample_ind)] + + return sample_batches + + def get_query_network(self, context): + lastEpoch = 0 + for file in os.listdir(self.query_net_dir): + if self.query_net_prefix in file and ".json" in file: + symbolFile = file + + if self.query_net_prefix in file and ".param" in file: + epochStr = file.replace(".params", "").replace(self.query_net_prefix, "") + epoch = int(epochStr) + if epoch >= lastEpoch: + lastEpoch = epoch + weightFile = file + + inputNames = [] + if self.query_net_num_inputs == 1: + inputNames.append("data") + else: + for i in range(self.query_net_num_inputs): + inputNames.append("data" + str(i)) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + net = mx.gluon.nn.SymbolBlock.imports(self.query_net_dir + symbolFile, inputNames, self.query_net_dir + weightFile, ctx=context[0]) + net.hybridize() + return net + + def save_memory(self, path): + mem_arr = [("keys", self.key_memory)] + [("values_"+str(k),v) for (k,v) in enumerate(self.value_memory)] + [("labels_"+str(k),v) for (k,v) in enumerate(self.label_memory)] + mem_dict = {entry[0]:entry[1] for entry in mem_arr} + nd.save(path, mem_dict) + + def load_memory(self, path): + mem_dict = nd.load(path) + self.value_memory = [] + self.label_memory = [] + for key in sorted(mem_dict.keys()): + if key == "keys": + self.key_memory = mem_dict[key] + elif key.startswith("values_"): + self.value_memory.append(mem_dict[key]) + elif key.startswith("labels_"): + self.label_memory.append(mem_dict[key]) + + +#Stream 0 class Net_0(gluon.HybridBlock): - def __init__(self, data_mean=None, data_std=None, **kwargs): + def __init__(self, data_mean=None, data_std=None, mx_context=None, **kwargs): super(Net_0, self).__init__(**kwargs) with self.name_scope(): if data_mean: @@ -120,5 +533,5 @@ class Net_0(gluon.HybridBlock): softmax2_ = F.softmax(fc2_, axis=-1) c1_ = F.identity(softmax2_) - return c1_ + return [[c1_]] diff --git a/src/test/resources/target_code/gluon/mnist_mnistClassifier_net.h b/src/test/resources/target_code/gluon/mnist_mnistClassifier_net.h index 8b21dbcd..fdb1bb17 100644 --- a/src/test/resources/target_code/gluon/mnist_mnistClassifier_net.h +++ b/src/test/resources/target_code/gluon/mnist_mnistClassifier_net.h @@ -20,8 +20,10 @@ predictions=colvec(classes); } void execute(){ vector image_ = CNNTranslator::translate(image); + vector predictions_(10); + _predictor_0_.predict(image_, predictions_); predictions = CNNTranslator::translateToCol(predictions_, std::vector {10}); diff --git a/src/test/resources/target_code/gluon/reinforcementModel/cartpole/CNNCreator_cartpole_master_dqn.py b/src/test/resources/target_code/gluon/reinforcementModel/cartpole/CNNCreator_cartpole_master_dqn.py index 0f6f1d26..000d6bb9 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/cartpole/CNNCreator_cartpole_master_dqn.py +++ b/src/test/resources/target_code/gluon/reinforcementModel/cartpole/CNNCreator_cartpole_master_dqn.py @@ -2,6 +2,8 @@ import mxnet as mx import logging import os import shutil +import warnings +import inspect from CNNNet_cartpole_master_dqn import Net_0 @@ -20,6 +22,10 @@ class CNNCreator_cartpole_master_dqn: for i, network in self.networks.items(): lastEpoch = 0 param_file = None + if hasattr(network, 'episodic_sub_nets'): + num_episodic_sub_nets = len(network.episodic_sub_nets) + lastMemEpoch = [0]*num_episodic_sub_nets + mem_files = [None]*num_episodic_sub_nets try: os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest-0000.params") @@ -30,22 +36,77 @@ class CNNCreator_cartpole_master_dqn: except OSError: pass + if hasattr(network, 'episodic_sub_nets'): + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(0) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(0) + "-symbol.json") + except OSError: + pass + + for j in range(len(network.episodic_sub_nets)): + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(j+1) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(j+1) + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_query_net_' + str(j+1) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_query_net_' + str(j+1) + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_loss' + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_loss' + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest_episodic_memory_sub_net_" + str(j + 1) + "-0000") + except OSError: + pass + if os.path.isdir(self._model_dir_): for file in os.listdir(self._model_dir_): - if ".params" in file and self._model_prefix_ + "_" + str(i) in file: - epochStr = file.replace(".params","").replace(self._model_prefix_ + "_" + str(i) + "-","") + if ".params" in file and self._model_prefix_ + "_" + str(i) in file and not "loss" in file: + epochStr = file.replace(".params", "").replace(self._model_prefix_ + "_" + str(i) + "-", "") epoch = int(epochStr) - if epoch > lastEpoch: + if epoch >= lastEpoch: lastEpoch = epoch param_file = file + elif hasattr(network, 'episodic_sub_nets') and self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_" in file: + relMemPathInfo = file.replace(self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_", "").split("-") + memSubNet = int(relMemPathInfo[0]) + memEpochStr = relMemPathInfo[1] + memEpoch = int(memEpochStr) + if memEpoch >= lastMemEpoch[memSubNet-1]: + lastMemEpoch[memSubNet-1] = memEpoch + mem_files[memSubNet-1] = file + if param_file is None: earliestLastEpoch = 0 else: logging.info("Loading checkpoint: " + param_file) network.load_parameters(self._model_dir_ + param_file) + if hasattr(network, 'episodic_sub_nets'): + for j, sub_net in enumerate(network.episodic_sub_nets): + if mem_files[j] != None: + logging.info("Loading Replay Memory: " + mem_files[j]) + mem_layer = [param for param in inspect.getmembers(sub_net, lambda x: not(inspect.isroutine(x))) if param[0].startswith("memory")][0][1] + mem_layer.load_memory(self._model_dir_ + mem_files[j]) - if earliestLastEpoch == None or lastEpoch < earliestLastEpoch: - earliestLastEpoch = lastEpoch + if earliestLastEpoch == None or lastEpoch + 1 < earliestLastEpoch: + earliestLastEpoch = lastEpoch + 1 return earliestLastEpoch @@ -56,27 +117,52 @@ class CNNCreator_cartpole_master_dqn: for i, network in self.networks.items(): # param_file = self._model_prefix_ + "_" + str(i) + "_newest-0000.params" param_file = None + if hasattr(network, 'episodic_sub_nets'): + num_episodic_sub_nets = len(network.episodic_sub_nets) + lastMemEpoch = [0] * num_episodic_sub_nets + mem_files = [None] * num_episodic_sub_nets + if os.path.isdir(self._weights_dir_): lastEpoch = 0 for file in os.listdir(self._weights_dir_): - if ".params" in file and self._model_prefix_ + "_" + str(i) in file: + if ".params" in file and self._model_prefix_ + "_" + str(i) in file and not "loss" in file: epochStr = file.replace(".params","").replace(self._model_prefix_ + "_" + str(i) + "-","") epoch = int(epochStr) - if epoch > lastEpoch: + if epoch >= lastEpoch: lastEpoch = epoch param_file = file + elif hasattr(network, 'episodic_sub_nets') and self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_" in file: + relMemPathInfo = file.replace(self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_").split("-") + memSubNet = int(relMemPathInfo[0]) + memEpochStr = relMemPathInfo[1] + memEpoch = int(memEpochStr) + if memEpoch >= lastMemEpoch[memSubNet-1]: + lastMemEpoch[memSubNet-1] = memEpoch + mem_files[memSubNet-1] = file + logging.info("Loading pretrained weights: " + self._weights_dir_ + param_file) network.load_parameters(self._weights_dir_ + param_file, allow_missing=True, ignore_extra=True) + if hasattr(network, 'episodic_sub_nets'): + assert lastEpoch == lastMemEpoch + for j, sub_net in enumerate(network.episodic_sub_nets): + if mem_files[j] != None: + logging.info("Loading pretrained Replay Memory: " + mem_files[j]) + mem_layer = \ + [param for param in inspect.getmembers(sub_net, lambda x: not (inspect.isroutine(x))) if + param[0].startswith("memory")][0][1] + mem_layer.load_memory(self._model_dir_ + mem_files[j]) else: logging.info("No pretrained weights available at: " + self._weights_dir_ + param_file) def construct(self, context, data_mean=None, data_std=None): - self.networks[0] = Net_0(data_mean=data_mean, data_std=data_std) - self.networks[0].collect_params().initialize(self.weight_initializer, ctx=context) + self.networks[0] = Net_0(data_mean=data_mean, data_std=data_std, mx_context=context, prefix="") + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + self.networks[0].collect_params().initialize(self.weight_initializer, force_reinit=False, ctx=context) self.networks[0].hybridize() - self.networks[0](mx.nd.zeros((1, 4,), ctx=context)) + self.networks[0](mx.nd.zeros((1, 4,), ctx=context[0])) if not os.path.exists(self._model_dir_): os.makedirs(self._model_dir_) diff --git a/src/test/resources/target_code/gluon/reinforcementModel/cartpole/CNNNet_cartpole_master_dqn.py b/src/test/resources/target_code/gluon/reinforcementModel/cartpole/CNNNet_cartpole_master_dqn.py index 98fc737d..15a7103b 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/cartpole/CNNNet_cartpole_master_dqn.py +++ b/src/test/resources/target_code/gluon/reinforcementModel/cartpole/CNNNet_cartpole_master_dqn.py @@ -1,7 +1,10 @@ import mxnet as mx import numpy as np import math -from mxnet import gluon +import os +import abc +import warnings +from mxnet import gluon, nd class ZScoreNormalization(gluon.HybridBlock): @@ -86,9 +89,419 @@ class CustomGRU(gluon.HybridBlock): output, [state0] = self.gru(data, [F.swapaxes(state0, 0, 1)]) return output, F.swapaxes(state0, 0, 1) + +class DotProductSelfAttention(gluon.HybridBlock): + def __init__(self, + scale_factor, + num_heads, + dim_model, + dim_keys, + dim_values, + use_proj_bias, + use_mask, + **kwargs): + super(DotProductSelfAttention, self).__init__(**kwargs) + with self.name_scope(): + self.num_heads = num_heads + self.dim_model = dim_model + self.use_proj_bias = use_proj_bias + self.use_mask = use_mask + + if dim_keys == -1: + self.dim_keys = int(dim_model / self.num_heads) + else: + self.dim_keys = dim_keys + if dim_values == -1: + self.dim_values = int(dim_model / self.num_heads) + else: + self.dim_values = dim_values + + if scale_factor == -1: + self.scale_factor = math.sqrt(self.dim_keys) + else: + self.scale_factor = scale_factor + + self.proj_q = gluon.nn.Dense(self.num_heads*self.dim_keys, use_bias=self.use_proj_bias, flatten=False) + self.proj_k = gluon.nn.Dense(self.num_heads*self.dim_keys, use_bias=self.use_proj_bias, flatten=False) + self.proj_v = gluon.nn.Dense(self.num_heads*self.dim_values, use_bias=self.use_proj_bias, flatten=False) + self.proj_o = gluon.nn.Dense(self.dim_model, use_bias=self.use_proj_bias, flatten=False) + + def hybrid_forward(self, F, queries, keys, values, *args, **kwargs): + + queries = F.Reshape(queries, shape=(0, 0,-1)) + keys = F.Reshape(queries, shape=(0, 0, -1)) + values = F.Reshape(queries, shape=(0, 0, -1)) + + head_queries = self.proj_q(queries) + head_keys = self.proj_k(keys) + head_values = self.proj_v(values) + + head_queries = F.reshape(head_queries, shape=(0, 0, self.num_heads, -1)) + head_queries = F.transpose(head_queries, axes=(0,2,1,3)) + head_queries = F.reshape(head_queries, shape=(-1, 0, 0), reverse=True) + + head_keys = F.reshape(head_keys, shape=(0, 0, self.num_heads, -1)) + head_keys = F.transpose(head_keys, axes=(0,2,1,3)) + head_keys = F.reshape(head_keys, shape=(-1, 0, 0), reverse=True) + + score = F.batch_dot(head_queries, head_keys, transpose_b=True) + score = score * self.scale_factor + if self.use_mask: + mask = F.tile(mask, self.num_heads) + mask = F.repeat(mask, self.dim_model) + mask = F.reshape(mask, shape=(-1, self.dim_model)) + weights = F.softmax(score, mask, use_length=self.use_mask) + + head_values = F.reshape(head_values, shape=(0, 0, self.num_heads, -1)) + head_values = F.transpose(head_values, axes=(0,2,1,3)) + head_values = F.reshape(head_values, shape=(-1, 0, 0), reverse=True) + + ret = F.batch_dot(weights, head_values) + ret = F.reshape(ret, shape=(-1, self.num_heads, 0, 0), reverse=True) + ret = F.transpose(ret, axes=(0, 2, 1, 3)) + ret = F.reshape(ret, shape=(0, 0, -1)) + + ret = self.proj_o(ret) + + return ret + + +class EpisodicReplayMemoryInterface(gluon.HybridBlock): + __metaclass__ = abc.ABCMeta + + def __init__(self, use_replay, replay_interval, replay_batch_size, replay_steps, replay_gradient_steps, num_heads, **kwargs): + super(EpisodicReplayMemoryInterface, self).__init__(**kwargs) + + self.use_replay = use_replay + self.replay_interval = replay_interval + self.replay_batch_size = replay_batch_size + self.replay_steps = replay_steps + self.replay_gradient_steps = replay_gradient_steps + self.num_heads = num_heads + + @abc.abstractmethod + def store_samples(self, data, y, query_network, store_prob, mx_context): + pass + + @abc.abstractmethod + def sample_memory(self, batch_size, mx_context): + pass + + @abc.abstractmethod + def get_query_network(self, mx_context): + pass + + @abc.abstractmethod + def save_memory(self, path): + pass + + @abc.abstractmethod + def load_memory(self, path): + pass + +#Memory layer +class LargeMemory(gluon.HybridBlock): + def __init__(self, + sub_key_size, + query_size, + query_act, + dist_measure, + k, + num_heads, + values_dim, + **kwargs): + super(LargeMemory, self).__init__(**kwargs) + with self.name_scope(): + #Memory parameters + self.dist_measure = dist_measure + self.k = k + self.num_heads = num_heads + self.query_act = query_act + self.query_size = query_size + self.num_heads = num_heads + + #Batch norm sub-layer + self.batch_norm = gluon.nn.BatchNorm() + + #Memory sub-layer + self.sub_key_size = sub_key_size + sub_key_shape = (self.num_heads, self.sub_key_size, int(query_size[-1] / 2)) + + if values_dim == -1: + values_shape = (self.sub_key_size * self.sub_key_size, self.query_size[-1]) + else: + values_shape = (self.sub_key_size*self.sub_key_size, values_dim) + + self.sub_keys1 = self.params.get("sub_keys1", shape=sub_key_shape, differentiable=True) + self.sub_keys2 = self.params.get("sub_keys2", shape=sub_key_shape, differentiable=True) + self.values = self.params.get("values", shape=values_shape, differentiable=True) + self.label_memory = nd.array([]) + + self.get_query_network() + + def hybrid_forward(self, F, x, sub_keys1, sub_keys2, values): + x = self.batch_norm(x) + + x = F.reshape(x, shape=(0, -1)) + + q = self.query_network(x) + + q = F.reshape(q, shape=(0, self.num_heads, -1)) + + q_split = F.split(q, num_outputs=2, axis=-1) + + if self.dist_measure == "l2": + q_split_resh = F.reshape(q_split[0], shape=(0,0,1,-1)) + sub_keys1_resh = F.reshape(sub_keys1, shape=(1,0,0,-1), reverse=True) + q1_diff = F.broadcast_sub(q_split_resh, sub_keys1_resh) + q1_dist = F.norm(q1_diff, axis=-1) + q_split_resh = F.reshape(q_split[1], shape=(0,0,1,-1)) + sub_keys2_resh = F.reshape(sub_keys2, shape=(1,0,0,-1), reverse=True) + q2_diff = F.broadcast_sub(q_split_resh, sub_keys2_resh) + q2_dist = F.norm(q2_diff, axis=-1) + else: + q1 = F.split(q_split[0], num_outputs=self.num_heads, axis=1) + q2 = F.split(q_split[1], num_outputs=self.num_heads, axis=1) + sub_keys1_resh = F.split(sub_keys1, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + sub_keys2_resh = F.split(sub_keys2, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + if self.num_heads == 1: + q1 = [q1] + q2 = [q2] + sub_keys1_resh = [sub_keys1_resh ] + sub_keys2_resh = [sub_keys2_resh ] + + q1_dist = F.dot(q1[0], sub_keys1_resh[0], transpose_b=True) + q2_dist = F.dot(q2[0], sub_keys2_resh[0], transpose_b=True) + for h in range(1, self.num_heads): + q1_dist = F.concat(q1_dist, F.dot(q1[0], sub_keys1_resh[h], transpose_b=True), dim=1) + q2_dist = F.concat(q2_dist, F.dot(q2[0], sub_keys1_resh[h], transpose_b=True), dim=1) + + i1 = F.topk(q1_dist, k=self.k, ret_typ="indices") + i2 = F.topk(q2_dist, k=self.k, ret_typ="indices") + + # Calculate cross product for keys at indices I1 and I2 + + # def head_take(data, state): + # return [F.take(data[0], data[2]), F.take(data[1], data[3])], state, + # + # i1 = F.transpose(i1, axes=(1,0,2)) + # i2 = F.transpose(i2, axes=(1, 0, 2)) + # st = F.zeros(1) + # (k1, k2), _ = F.contrib.foreach(head_take, [sub_keys1, sub_keys2,i1,i2], st) + # k1 = F.reshape(k1, shape=(-1, 0, 0), reverse=True) + # k2 = F.reshape(k2, shape=(-1, 0, 0), reverse=True) + i1 = F.split(i1, num_outputs=self.num_heads, axis=1) + i2 = F.split(i2, num_outputs=self.num_heads, axis=1) + sub_keys1 = F.split(sub_keys1, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + sub_keys2 = F.split(sub_keys2, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + if self.num_heads == 1: + i1 = [i1] + i2 = [i2] + sub_keys1 = [sub_keys1] + sub_keys2 = [sub_keys2] + + k1 = F.take(sub_keys1[0], i1[0]) + k2 = F.take(sub_keys2[0], i2[0]) + for h in range(1, self.num_heads): + k1 = F.concat(k1, F.take(sub_keys1[h], i1[h]), dim=1) + k2 = F.concat(k2, F.take(sub_keys2[h], i2[h]), dim=1) + + k1 = F.tile(k1, (1, 1, self.k, 1)) + k2 = F.repeat(k2, self.k, 2) + c_cart = F.concat(k1, k2, dim=3) + + q = F.reshape(q, shape=(-1,0), reverse=True) + q = F.reshape(q, shape=(0, 1, -1)) + c_cart = F.reshape(c_cart, shape=(-1, 0, 0), reverse=True) + if self.dist_measure == "l2": + k_diff = F.broadcast_sub(q, c_cart) + k_dist = F.norm(k_diff, axis=-1) + else: + k_dist = F.batch_dot(q, c_cart, transpose_b=True) #F.contrib.foreach(loop_batch_dot, [q, c_cart], init_states=state_batch_dist) + k_dist = F.reshape(k_dist, shape=(0, -1)) + + i = F.topk(k_dist, k=self.k, ret_typ="both") + + w = F.softmax(i[0]) + w = F.reshape(w, shape=(0,1,-1)) + vi = F.take(values, i[1]) + aggr_value = F.batch_dot(w, vi) #F.contrib.foreach(loop_batch_dot, [w, vi], init_states=state_batch_dist) + + ret = F.reshape(aggr_value, shape=(-1, self.num_heads, 0), reverse=True) + one_vec = F.ones((1, 1, self.num_heads)) + one_vec = F.broadcast_like(one_vec, ret, lhs_axes=0, rhs_axes=0) + ret = F.batch_dot(one_vec, ret) + ret = F.reshape(ret, shape=(-1, 0), reverse=True) + + return ret + + def get_query_network(self): + if hasattr(self, 'query_network'): + return self.query_network + else: + self.query_network = gluon.nn.HybridSequential() + for size in self.query_size: + if self.query_act == "linear": + self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, flatten=False)) + else: + self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, activation=self.query_act, flatten=False)) + return self.query_network + + +#EpisodicMemory layer +class EpisodicMemory(EpisodicReplayMemoryInterface): + def __init__(self, + replay_interval, + replay_batch_size, + replay_steps, + replay_gradient_steps, + store_prob, + max_stored_samples, + use_replay, + query_net_dir, + query_net_prefix, + query_net_num_inputs, + **kwargs): + super(EpisodicMemory, self).__init__(use_replay, replay_interval, replay_batch_size, replay_steps, replay_gradient_steps, 1, **kwargs) + with self.name_scope(): + #Replay parameters + self.store_prob = store_prob + self.max_stored_samples = max_stored_samples + + self.query_net_dir = query_net_dir + self.query_net_prefix = query_net_prefix + self.query_net_num_inputs = query_net_num_inputs + + #Memory + self.key_memory = nd.array([]) + self.value_memory = nd.array([]) + self.label_memory = nd.array([]) + + def hybrid_forward(self, F, *args): + #propagate the input as the rest is only used for replay + return [args, []] + + def store_samples(self, data, y, query_network, store_prob, context): + num_pus = len(data) + sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] + num_inputs = len(data[0][0]) + num_outputs = len(y) + mx_context = context[0] + + if len(self.key_memory) == 0: + self.key_memory = nd.empty(0, ctx=mx.cpu()) + self.value_memory = [] + self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) + + ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] + + max_inds = [nd.max(ind[i]) for i in range(num_pus)] + if any(max_inds): + to_store_values = [] + for i in range(num_inputs): + tmp_values = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_values, list): + tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) + else: + tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) + to_store_values.append(tmp_values) + + to_store_labels = [] + for i in range(num_outputs): + tmp_labels = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_labels, list): + tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) + else: + tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) + to_store_labels.append(tmp_labels) + + to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) + + if self.key_memory.shape[0] == 0: + self.key_memory = to_store_keys.as_in_context(mx.cpu()) + for i in range(num_inputs): + self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + for i in range(num_outputs): + self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) + elif self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: + num_to_store = to_store_keys.shape[0] + self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=1) + else: + self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + + def sample_memory(self, batch_size): + num_stored_samples = self.key_memory.shape[0] + if self.replay_batch_size == -1: + sample_ind = nd.random.randint(0, num_stored_samples, (self.replay_steps, batch_size), ctx=mx.cpu()) + else: + sample_ind = nd.random.randint(0, num_stored_samples, (self.replay_steps, self.replay_batch_size), ctx=mx.cpu()) + + num_outputs = len(self.label_memory) + + sample_labels = [[self.label_memory[i][ind] for i in range(num_outputs)] for ind in sample_ind] + sample_batches = [[[self.value_memory[j][ind] for j in range(len(self.value_memory))], sample_labels[i]] for i, ind in enumerate(sample_ind)] + + return sample_batches + + def get_query_network(self, context): + lastEpoch = 0 + for file in os.listdir(self.query_net_dir): + if self.query_net_prefix in file and ".json" in file: + symbolFile = file + + if self.query_net_prefix in file and ".param" in file: + epochStr = file.replace(".params", "").replace(self.query_net_prefix, "") + epoch = int(epochStr) + if epoch >= lastEpoch: + lastEpoch = epoch + weightFile = file + + inputNames = [] + if self.query_net_num_inputs == 1: + inputNames.append("data") + else: + for i in range(self.query_net_num_inputs): + inputNames.append("data" + str(i)) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + net = mx.gluon.nn.SymbolBlock.imports(self.query_net_dir + symbolFile, inputNames, self.query_net_dir + weightFile, ctx=context[0]) + net.hybridize() + return net + + def save_memory(self, path): + mem_arr = [("keys", self.key_memory)] + [("values_"+str(k),v) for (k,v) in enumerate(self.value_memory)] + [("labels_"+str(k),v) for (k,v) in enumerate(self.label_memory)] + mem_dict = {entry[0]:entry[1] for entry in mem_arr} + nd.save(path, mem_dict) + + def load_memory(self, path): + mem_dict = nd.load(path) + self.value_memory = [] + self.label_memory = [] + for key in sorted(mem_dict.keys()): + if key == "keys": + self.key_memory = mem_dict[key] + elif key.startswith("values_"): + self.value_memory.append(mem_dict[key]) + elif key.startswith("labels_"): + self.label_memory.append(mem_dict[key]) + + +#Stream 0 class Net_0(gluon.HybridBlock): - def __init__(self, data_mean=None, data_std=None, **kwargs): + def __init__(self, data_mean=None, data_std=None, mx_context=None, **kwargs): super(Net_0, self).__init__(**kwargs) with self.name_scope(): if data_mean: @@ -121,5 +534,5 @@ class Net_0(gluon.HybridBlock): fc3_ = self.fc3_(tanh2_) qvalues_ = F.identity(fc3_) - return qvalues_ + return [[qvalues_]] diff --git a/src/test/resources/target_code/gluon/reinforcementModel/cartpole/CNNPredictor_cartpole_master_dqn.h b/src/test/resources/target_code/gluon/reinforcementModel/cartpole/CNNPredictor_cartpole_master_dqn.h index 9c02b066..b1dd0cf6 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/cartpole/CNNPredictor_cartpole_master_dqn.h +++ b/src/test/resources/target_code/gluon/reinforcementModel/cartpole/CNNPredictor_cartpole_master_dqn.h @@ -1,107 +1,149 @@ #ifndef CNNPREDICTOR_CARTPOLE_MASTER_DQN #define CNNPREDICTOR_CARTPOLE_MASTER_DQN -#include +#include #include #include #include + +#include +#include -#include - +using namespace mxnet::cpp; + class CNNPredictor_cartpole_master_dqn_0{ public: - const std::string json_file = "model/cartpole.agent.CartPoleDQN/model_0_newest-symbol.json"; - const std::string param_file = "model/cartpole.agent.CartPoleDQN/model_0_newest-0000.params"; - const std::vector input_keys = { + const std::string file_prefix = "model/cartpole.agent.CartPoleDQN/model_0_newest"; + + //network + const std::vector network_input_keys = { "data" }; - const std::vector> input_shapes = {{1, 4}}; - const bool use_gpu = false; - - PredictorHandle handle; - + const std::vector> network_input_shapes = {{1, 4}}; + std::vector network_input_sizes; + std::vector> network_arg_names; + std::vector network_handles; + + + //misc + Context ctx = Context::cpu(); //Will be updated later in init according to use_gpu + int dtype = 0; //use data type (float32=0 float64=1 ...) + + explicit CNNPredictor_cartpole_master_dqn_0(){ - init(json_file, param_file, input_keys, input_shapes, use_gpu); + init(file_prefix, network_input_keys, network_input_shapes); } ~CNNPredictor_cartpole_master_dqn_0(){ - if(handle) MXPredFree(handle); + for(Executor * handle : network_handles){ + delete handle; + } + MXNotifyShutdown(); } void predict(const std::vector &in_state_, std::vector &out_qvalues_){ - MXPredSetInput(handle, input_keys[0].c_str(), in_state_.data(), static_cast(in_state_.size())); - - MXPredForward(handle); - mx_uint output_index; - mx_uint *shape = 0; - mx_uint shape_len; - size_t size; - - output_index = 0; - MXPredGetOutputShape(handle, output_index, &shape, &shape_len); - size = 1; - for (mx_uint i = 0; i < shape_len; ++i) size *= shape[i]; - assert(size == out_qvalues_.size()); - MXPredGetOutput(handle, output_index, &(out_qvalues_[0]), out_qvalues_.size()); + NDArray input_temp; + input_temp = NDArray(network_input_shapes[0], ctx, false, dtype); + input_temp.SyncCopyFromCPU(in_state_.data(), network_input_sizes[0]); + input_temp.CopyTo(&(network_handles[0]->arg_dict()[network_input_keys[0]])); + NDArray::WaitAll(); + + network_handles[0]->Forward(false); + CheckMXNetError("Forward, predict, handle ind. 0"); + + + std::vector output = network_handles.back()->outputs; + std::vector curr_output_shape; + size_t curr_output_size; + curr_output_shape = output[0].GetShape(); + curr_output_size = 1; + for (mx_uint i : curr_output_shape) curr_output_size *= i; + //Fix due to a bug in the in how the output arrays are initialized when there are multiple outputs + assert((curr_output_size == out_qvalues_.size()) || (curr_output_size == out_qvalues_[0])); + output[0].SyncCopyToCPU(&out_qvalues_); + } + + + + Executor* initExecutor(Symbol &sym, + std::map ¶m_map, + const std::vector &exec_input_keys, + const std::vector> &exec_input_shapes){ + + const mx_uint num_exec_input_nodes = exec_input_keys.size(); + for(mx_uint i = 0; i < num_exec_input_nodes; i++){ + param_map[exec_input_keys[i]] = NDArray(exec_input_shapes[i], ctx, false, dtype); + } - void init(const std::string &json_file, - const std::string ¶m_file, - const std::vector &input_keys, - const std::vector> &input_shapes, - const bool &use_gpu){ + std::vector param_arrays; + std::vector grad_array; + std::vector grad_reqs; + std::vector aux_arrays; + std::map< std::string, NDArray> aux_map; - BufferFile json_data(json_file); - BufferFile param_data(param_file); + sym.InferExecutorArrays(ctx, ¶m_arrays, &grad_array, &grad_reqs, + &aux_arrays, param_map, std::map(), + std::map(), aux_map); - int dev_type = use_gpu ? 2 : 1; - int dev_id = 0; + Executor *handle = new Executor(sym, ctx, param_arrays, grad_array, grad_reqs, aux_arrays); + assert(handle); + return handle; + } - if (json_data.GetLength() == 0 || - param_data.GetLength() == 0) { - std::exit(-1); + std::vector getSizesOfShapes(const std::vector> shapes){ + std::vector sizes; + for(std::vector shape : shapes){ + mx_uint val = 1; + for(mx_uint i: shape){ + val *= i; + } + sizes.push_back(val); } + return sizes; + } - const mx_uint num_input_nodes = input_keys.size(); - - const char* input_keys_ptr[num_input_nodes]; - for(mx_uint i = 0; i < num_input_nodes; i++){ - input_keys_ptr[i] = input_keys[i].c_str(); + void CheckMXNetError(std::string loc){ + const char* err = MXGetLastError(); + if (err && err[0] != 0) { + std::cout << "MXNet error at " << loc << err << std::endl; + exit(-1); } - - mx_uint shape_data_size = 0; - mx_uint input_shape_indptr[input_shapes.size() + 1]; - input_shape_indptr[0] = 0; - for(mx_uint i = 0; i < input_shapes.size(); i++){ - shape_data_size += input_shapes[i].size(); - input_shape_indptr[i+1] = shape_data_size; + } + + void init(const std::string &file_prefix, + const std::vector &network_input_keys, + const std::vector> &network_input_shapes){ + + CNNLAOptimizer_cartpole_master_dqn optimizer_creator = CNNLAOptimizer_cartpole_master_dqn(); + + if(optimizer_creator.getContextName() == "gpu"){ + ctx = Context::gpu(); } - - mx_uint input_shape_data[shape_data_size]; - mx_uint index = 0; - for(mx_uint i = 0; i < input_shapes.size(); i++){ - for(mx_uint j = 0; j < input_shapes[i].size(); j++){ - input_shape_data[index] = input_shapes[i][j]; - index++; - } + + network_input_sizes = getSizesOfShapes(network_input_shapes); + + ModelLoader model_loader(file_prefix, 0, ctx); + + std::vector network_symbols = model_loader.GetNetworkSymbols(); + std::vector> network_param_maps; + network_param_maps = model_loader.GetNetworkParamMaps(); + + //Init handles + std::map> in_shape_map; + for(mx_uint i=0; i < network_input_keys.size(); i++){ + in_shape_map[network_input_keys[i]] = network_input_shapes[i]; } - - MXPredCreate(static_cast(json_data.GetBuffer()), - static_cast(param_data.GetBuffer()), - static_cast(param_data.GetLength()), - dev_type, - dev_id, - num_input_nodes, - input_keys_ptr, - input_shape_indptr, - input_shape_data, - &handle); - assert(handle); + std::vector> in_shapes; + std::vector> aux_shapes; + std::vector> out_shapes; + network_symbols[0].InferShape(in_shape_map, &in_shapes, &aux_shapes, &out_shapes); + network_handles.push_back(initExecutor(network_symbols[0], network_param_maps[0], network_input_keys, network_input_shapes)); + } }; - #endif // CNNPREDICTOR_CARTPOLE_MASTER_DQN diff --git a/src/test/resources/target_code/gluon/reinforcementModel/cartpole/CNNTrainer_cartpole_master_dqn.py b/src/test/resources/target_code/gluon/reinforcementModel/cartpole/CNNTrainer_cartpole_master_dqn.py index f650e1da..a8315885 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/cartpole/CNNTrainer_cartpole_master_dqn.py +++ b/src/test/resources/target_code/gluon/reinforcementModel/cartpole/CNNTrainer_cartpole_master_dqn.py @@ -58,6 +58,7 @@ if __name__ == "__main__": 'state_dtype': 'float32', 'action_dtype': 'uint8', 'rewards_dtype': 'float32' + }, 'strategy_params': { 'method':'epsgreedy', diff --git a/src/test/resources/target_code/gluon/reinforcementModel/cartpole/cartpole_master_dqn.h b/src/test/resources/target_code/gluon/reinforcementModel/cartpole/cartpole_master_dqn.h index 8ecc9075..9269801b 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/cartpole/cartpole_master_dqn.h +++ b/src/test/resources/target_code/gluon/reinforcementModel/cartpole/cartpole_master_dqn.h @@ -19,8 +19,10 @@ qvalues=colvec(2); } void execute(){ vector state_ = CNNTranslator::translate(state); + vector qvalues_(2); + _predictor_0_.predict(state_, qvalues_); qvalues = CNNTranslator::translateToCol(qvalues_, std::vector {2}); diff --git a/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/CNNCreator_mountaincar_master_actor.py b/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/CNNCreator_mountaincar_master_actor.py index 3ad33708..74e18c53 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/CNNCreator_mountaincar_master_actor.py +++ b/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/CNNCreator_mountaincar_master_actor.py @@ -2,6 +2,8 @@ import mxnet as mx import logging import os import shutil +import warnings +import inspect from CNNNet_mountaincar_master_actor import Net_0 @@ -20,6 +22,10 @@ class CNNCreator_mountaincar_master_actor: for i, network in self.networks.items(): lastEpoch = 0 param_file = None + if hasattr(network, 'episodic_sub_nets'): + num_episodic_sub_nets = len(network.episodic_sub_nets) + lastMemEpoch = [0]*num_episodic_sub_nets + mem_files = [None]*num_episodic_sub_nets try: os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest-0000.params") @@ -30,22 +36,77 @@ class CNNCreator_mountaincar_master_actor: except OSError: pass + if hasattr(network, 'episodic_sub_nets'): + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(0) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(0) + "-symbol.json") + except OSError: + pass + + for j in range(len(network.episodic_sub_nets)): + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(j+1) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(j+1) + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_query_net_' + str(j+1) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_query_net_' + str(j+1) + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_loss' + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_loss' + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest_episodic_memory_sub_net_" + str(j + 1) + "-0000") + except OSError: + pass + if os.path.isdir(self._model_dir_): for file in os.listdir(self._model_dir_): - if ".params" in file and self._model_prefix_ + "_" + str(i) in file: - epochStr = file.replace(".params","").replace(self._model_prefix_ + "_" + str(i) + "-","") + if ".params" in file and self._model_prefix_ + "_" + str(i) in file and not "loss" in file: + epochStr = file.replace(".params", "").replace(self._model_prefix_ + "_" + str(i) + "-", "") epoch = int(epochStr) - if epoch > lastEpoch: + if epoch >= lastEpoch: lastEpoch = epoch param_file = file + elif hasattr(network, 'episodic_sub_nets') and self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_" in file: + relMemPathInfo = file.replace(self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_", "").split("-") + memSubNet = int(relMemPathInfo[0]) + memEpochStr = relMemPathInfo[1] + memEpoch = int(memEpochStr) + if memEpoch >= lastMemEpoch[memSubNet-1]: + lastMemEpoch[memSubNet-1] = memEpoch + mem_files[memSubNet-1] = file + if param_file is None: earliestLastEpoch = 0 else: logging.info("Loading checkpoint: " + param_file) network.load_parameters(self._model_dir_ + param_file) + if hasattr(network, 'episodic_sub_nets'): + for j, sub_net in enumerate(network.episodic_sub_nets): + if mem_files[j] != None: + logging.info("Loading Replay Memory: " + mem_files[j]) + mem_layer = [param for param in inspect.getmembers(sub_net, lambda x: not(inspect.isroutine(x))) if param[0].startswith("memory")][0][1] + mem_layer.load_memory(self._model_dir_ + mem_files[j]) - if earliestLastEpoch == None or lastEpoch < earliestLastEpoch: - earliestLastEpoch = lastEpoch + if earliestLastEpoch == None or lastEpoch + 1 < earliestLastEpoch: + earliestLastEpoch = lastEpoch + 1 return earliestLastEpoch @@ -56,27 +117,52 @@ class CNNCreator_mountaincar_master_actor: for i, network in self.networks.items(): # param_file = self._model_prefix_ + "_" + str(i) + "_newest-0000.params" param_file = None + if hasattr(network, 'episodic_sub_nets'): + num_episodic_sub_nets = len(network.episodic_sub_nets) + lastMemEpoch = [0] * num_episodic_sub_nets + mem_files = [None] * num_episodic_sub_nets + if os.path.isdir(self._weights_dir_): lastEpoch = 0 for file in os.listdir(self._weights_dir_): - if ".params" in file and self._model_prefix_ + "_" + str(i) in file: + if ".params" in file and self._model_prefix_ + "_" + str(i) in file and not "loss" in file: epochStr = file.replace(".params","").replace(self._model_prefix_ + "_" + str(i) + "-","") epoch = int(epochStr) - if epoch > lastEpoch: + if epoch >= lastEpoch: lastEpoch = epoch param_file = file + elif hasattr(network, 'episodic_sub_nets') and self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_" in file: + relMemPathInfo = file.replace(self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_").split("-") + memSubNet = int(relMemPathInfo[0]) + memEpochStr = relMemPathInfo[1] + memEpoch = int(memEpochStr) + if memEpoch >= lastMemEpoch[memSubNet-1]: + lastMemEpoch[memSubNet-1] = memEpoch + mem_files[memSubNet-1] = file + logging.info("Loading pretrained weights: " + self._weights_dir_ + param_file) network.load_parameters(self._weights_dir_ + param_file, allow_missing=True, ignore_extra=True) + if hasattr(network, 'episodic_sub_nets'): + assert lastEpoch == lastMemEpoch + for j, sub_net in enumerate(network.episodic_sub_nets): + if mem_files[j] != None: + logging.info("Loading pretrained Replay Memory: " + mem_files[j]) + mem_layer = \ + [param for param in inspect.getmembers(sub_net, lambda x: not (inspect.isroutine(x))) if + param[0].startswith("memory")][0][1] + mem_layer.load_memory(self._model_dir_ + mem_files[j]) else: logging.info("No pretrained weights available at: " + self._weights_dir_ + param_file) def construct(self, context, data_mean=None, data_std=None): - self.networks[0] = Net_0(data_mean=data_mean, data_std=data_std) - self.networks[0].collect_params().initialize(self.weight_initializer, ctx=context) + self.networks[0] = Net_0(data_mean=data_mean, data_std=data_std, mx_context=context, prefix="") + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + self.networks[0].collect_params().initialize(self.weight_initializer, force_reinit=False, ctx=context) self.networks[0].hybridize() - self.networks[0](mx.nd.zeros((1, 2,), ctx=context)) + self.networks[0](mx.nd.zeros((1, 2,), ctx=context[0])) if not os.path.exists(self._model_dir_): os.makedirs(self._model_dir_) diff --git a/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/CNNNet_mountaincar_master_actor.py b/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/CNNNet_mountaincar_master_actor.py index 2bec1be4..ddc42e41 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/CNNNet_mountaincar_master_actor.py +++ b/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/CNNNet_mountaincar_master_actor.py @@ -1,7 +1,10 @@ import mxnet as mx import numpy as np import math -from mxnet import gluon +import os +import abc +import warnings +from mxnet import gluon, nd class ZScoreNormalization(gluon.HybridBlock): @@ -86,9 +89,419 @@ class CustomGRU(gluon.HybridBlock): output, [state0] = self.gru(data, [F.swapaxes(state0, 0, 1)]) return output, F.swapaxes(state0, 0, 1) + +class DotProductSelfAttention(gluon.HybridBlock): + def __init__(self, + scale_factor, + num_heads, + dim_model, + dim_keys, + dim_values, + use_proj_bias, + use_mask, + **kwargs): + super(DotProductSelfAttention, self).__init__(**kwargs) + with self.name_scope(): + self.num_heads = num_heads + self.dim_model = dim_model + self.use_proj_bias = use_proj_bias + self.use_mask = use_mask + + if dim_keys == -1: + self.dim_keys = int(dim_model / self.num_heads) + else: + self.dim_keys = dim_keys + if dim_values == -1: + self.dim_values = int(dim_model / self.num_heads) + else: + self.dim_values = dim_values + + if scale_factor == -1: + self.scale_factor = math.sqrt(self.dim_keys) + else: + self.scale_factor = scale_factor + + self.proj_q = gluon.nn.Dense(self.num_heads*self.dim_keys, use_bias=self.use_proj_bias, flatten=False) + self.proj_k = gluon.nn.Dense(self.num_heads*self.dim_keys, use_bias=self.use_proj_bias, flatten=False) + self.proj_v = gluon.nn.Dense(self.num_heads*self.dim_values, use_bias=self.use_proj_bias, flatten=False) + self.proj_o = gluon.nn.Dense(self.dim_model, use_bias=self.use_proj_bias, flatten=False) + + def hybrid_forward(self, F, queries, keys, values, *args, **kwargs): + + queries = F.Reshape(queries, shape=(0, 0,-1)) + keys = F.Reshape(queries, shape=(0, 0, -1)) + values = F.Reshape(queries, shape=(0, 0, -1)) + + head_queries = self.proj_q(queries) + head_keys = self.proj_k(keys) + head_values = self.proj_v(values) + + head_queries = F.reshape(head_queries, shape=(0, 0, self.num_heads, -1)) + head_queries = F.transpose(head_queries, axes=(0,2,1,3)) + head_queries = F.reshape(head_queries, shape=(-1, 0, 0), reverse=True) + + head_keys = F.reshape(head_keys, shape=(0, 0, self.num_heads, -1)) + head_keys = F.transpose(head_keys, axes=(0,2,1,3)) + head_keys = F.reshape(head_keys, shape=(-1, 0, 0), reverse=True) + + score = F.batch_dot(head_queries, head_keys, transpose_b=True) + score = score * self.scale_factor + if self.use_mask: + mask = F.tile(mask, self.num_heads) + mask = F.repeat(mask, self.dim_model) + mask = F.reshape(mask, shape=(-1, self.dim_model)) + weights = F.softmax(score, mask, use_length=self.use_mask) + + head_values = F.reshape(head_values, shape=(0, 0, self.num_heads, -1)) + head_values = F.transpose(head_values, axes=(0,2,1,3)) + head_values = F.reshape(head_values, shape=(-1, 0, 0), reverse=True) + + ret = F.batch_dot(weights, head_values) + ret = F.reshape(ret, shape=(-1, self.num_heads, 0, 0), reverse=True) + ret = F.transpose(ret, axes=(0, 2, 1, 3)) + ret = F.reshape(ret, shape=(0, 0, -1)) + + ret = self.proj_o(ret) + + return ret + + +class EpisodicReplayMemoryInterface(gluon.HybridBlock): + __metaclass__ = abc.ABCMeta + + def __init__(self, use_replay, replay_interval, replay_batch_size, replay_steps, replay_gradient_steps, num_heads, **kwargs): + super(EpisodicReplayMemoryInterface, self).__init__(**kwargs) + + self.use_replay = use_replay + self.replay_interval = replay_interval + self.replay_batch_size = replay_batch_size + self.replay_steps = replay_steps + self.replay_gradient_steps = replay_gradient_steps + self.num_heads = num_heads + + @abc.abstractmethod + def store_samples(self, data, y, query_network, store_prob, mx_context): + pass + + @abc.abstractmethod + def sample_memory(self, batch_size, mx_context): + pass + + @abc.abstractmethod + def get_query_network(self, mx_context): + pass + + @abc.abstractmethod + def save_memory(self, path): + pass + + @abc.abstractmethod + def load_memory(self, path): + pass + +#Memory layer +class LargeMemory(gluon.HybridBlock): + def __init__(self, + sub_key_size, + query_size, + query_act, + dist_measure, + k, + num_heads, + values_dim, + **kwargs): + super(LargeMemory, self).__init__(**kwargs) + with self.name_scope(): + #Memory parameters + self.dist_measure = dist_measure + self.k = k + self.num_heads = num_heads + self.query_act = query_act + self.query_size = query_size + self.num_heads = num_heads + + #Batch norm sub-layer + self.batch_norm = gluon.nn.BatchNorm() + + #Memory sub-layer + self.sub_key_size = sub_key_size + sub_key_shape = (self.num_heads, self.sub_key_size, int(query_size[-1] / 2)) + + if values_dim == -1: + values_shape = (self.sub_key_size * self.sub_key_size, self.query_size[-1]) + else: + values_shape = (self.sub_key_size*self.sub_key_size, values_dim) + + self.sub_keys1 = self.params.get("sub_keys1", shape=sub_key_shape, differentiable=True) + self.sub_keys2 = self.params.get("sub_keys2", shape=sub_key_shape, differentiable=True) + self.values = self.params.get("values", shape=values_shape, differentiable=True) + self.label_memory = nd.array([]) + + self.get_query_network() + + def hybrid_forward(self, F, x, sub_keys1, sub_keys2, values): + x = self.batch_norm(x) + + x = F.reshape(x, shape=(0, -1)) + + q = self.query_network(x) + + q = F.reshape(q, shape=(0, self.num_heads, -1)) + + q_split = F.split(q, num_outputs=2, axis=-1) + + if self.dist_measure == "l2": + q_split_resh = F.reshape(q_split[0], shape=(0,0,1,-1)) + sub_keys1_resh = F.reshape(sub_keys1, shape=(1,0,0,-1), reverse=True) + q1_diff = F.broadcast_sub(q_split_resh, sub_keys1_resh) + q1_dist = F.norm(q1_diff, axis=-1) + q_split_resh = F.reshape(q_split[1], shape=(0,0,1,-1)) + sub_keys2_resh = F.reshape(sub_keys2, shape=(1,0,0,-1), reverse=True) + q2_diff = F.broadcast_sub(q_split_resh, sub_keys2_resh) + q2_dist = F.norm(q2_diff, axis=-1) + else: + q1 = F.split(q_split[0], num_outputs=self.num_heads, axis=1) + q2 = F.split(q_split[1], num_outputs=self.num_heads, axis=1) + sub_keys1_resh = F.split(sub_keys1, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + sub_keys2_resh = F.split(sub_keys2, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + if self.num_heads == 1: + q1 = [q1] + q2 = [q2] + sub_keys1_resh = [sub_keys1_resh ] + sub_keys2_resh = [sub_keys2_resh ] + + q1_dist = F.dot(q1[0], sub_keys1_resh[0], transpose_b=True) + q2_dist = F.dot(q2[0], sub_keys2_resh[0], transpose_b=True) + for h in range(1, self.num_heads): + q1_dist = F.concat(q1_dist, F.dot(q1[0], sub_keys1_resh[h], transpose_b=True), dim=1) + q2_dist = F.concat(q2_dist, F.dot(q2[0], sub_keys1_resh[h], transpose_b=True), dim=1) + + i1 = F.topk(q1_dist, k=self.k, ret_typ="indices") + i2 = F.topk(q2_dist, k=self.k, ret_typ="indices") + + # Calculate cross product for keys at indices I1 and I2 + + # def head_take(data, state): + # return [F.take(data[0], data[2]), F.take(data[1], data[3])], state, + # + # i1 = F.transpose(i1, axes=(1,0,2)) + # i2 = F.transpose(i2, axes=(1, 0, 2)) + # st = F.zeros(1) + # (k1, k2), _ = F.contrib.foreach(head_take, [sub_keys1, sub_keys2,i1,i2], st) + # k1 = F.reshape(k1, shape=(-1, 0, 0), reverse=True) + # k2 = F.reshape(k2, shape=(-1, 0, 0), reverse=True) + i1 = F.split(i1, num_outputs=self.num_heads, axis=1) + i2 = F.split(i2, num_outputs=self.num_heads, axis=1) + sub_keys1 = F.split(sub_keys1, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + sub_keys2 = F.split(sub_keys2, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + if self.num_heads == 1: + i1 = [i1] + i2 = [i2] + sub_keys1 = [sub_keys1] + sub_keys2 = [sub_keys2] + + k1 = F.take(sub_keys1[0], i1[0]) + k2 = F.take(sub_keys2[0], i2[0]) + for h in range(1, self.num_heads): + k1 = F.concat(k1, F.take(sub_keys1[h], i1[h]), dim=1) + k2 = F.concat(k2, F.take(sub_keys2[h], i2[h]), dim=1) + + k1 = F.tile(k1, (1, 1, self.k, 1)) + k2 = F.repeat(k2, self.k, 2) + c_cart = F.concat(k1, k2, dim=3) + + q = F.reshape(q, shape=(-1,0), reverse=True) + q = F.reshape(q, shape=(0, 1, -1)) + c_cart = F.reshape(c_cart, shape=(-1, 0, 0), reverse=True) + if self.dist_measure == "l2": + k_diff = F.broadcast_sub(q, c_cart) + k_dist = F.norm(k_diff, axis=-1) + else: + k_dist = F.batch_dot(q, c_cart, transpose_b=True) #F.contrib.foreach(loop_batch_dot, [q, c_cart], init_states=state_batch_dist) + k_dist = F.reshape(k_dist, shape=(0, -1)) + + i = F.topk(k_dist, k=self.k, ret_typ="both") + + w = F.softmax(i[0]) + w = F.reshape(w, shape=(0,1,-1)) + vi = F.take(values, i[1]) + aggr_value = F.batch_dot(w, vi) #F.contrib.foreach(loop_batch_dot, [w, vi], init_states=state_batch_dist) + + ret = F.reshape(aggr_value, shape=(-1, self.num_heads, 0), reverse=True) + one_vec = F.ones((1, 1, self.num_heads)) + one_vec = F.broadcast_like(one_vec, ret, lhs_axes=0, rhs_axes=0) + ret = F.batch_dot(one_vec, ret) + ret = F.reshape(ret, shape=(-1, 0), reverse=True) + + return ret + + def get_query_network(self): + if hasattr(self, 'query_network'): + return self.query_network + else: + self.query_network = gluon.nn.HybridSequential() + for size in self.query_size: + if self.query_act == "linear": + self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, flatten=False)) + else: + self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, activation=self.query_act, flatten=False)) + return self.query_network + + +#EpisodicMemory layer +class EpisodicMemory(EpisodicReplayMemoryInterface): + def __init__(self, + replay_interval, + replay_batch_size, + replay_steps, + replay_gradient_steps, + store_prob, + max_stored_samples, + use_replay, + query_net_dir, + query_net_prefix, + query_net_num_inputs, + **kwargs): + super(EpisodicMemory, self).__init__(use_replay, replay_interval, replay_batch_size, replay_steps, replay_gradient_steps, 1, **kwargs) + with self.name_scope(): + #Replay parameters + self.store_prob = store_prob + self.max_stored_samples = max_stored_samples + + self.query_net_dir = query_net_dir + self.query_net_prefix = query_net_prefix + self.query_net_num_inputs = query_net_num_inputs + + #Memory + self.key_memory = nd.array([]) + self.value_memory = nd.array([]) + self.label_memory = nd.array([]) + + def hybrid_forward(self, F, *args): + #propagate the input as the rest is only used for replay + return [args, []] + + def store_samples(self, data, y, query_network, store_prob, context): + num_pus = len(data) + sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] + num_inputs = len(data[0][0]) + num_outputs = len(y) + mx_context = context[0] + + if len(self.key_memory) == 0: + self.key_memory = nd.empty(0, ctx=mx.cpu()) + self.value_memory = [] + self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) + + ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] + + max_inds = [nd.max(ind[i]) for i in range(num_pus)] + if any(max_inds): + to_store_values = [] + for i in range(num_inputs): + tmp_values = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_values, list): + tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) + else: + tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) + to_store_values.append(tmp_values) + + to_store_labels = [] + for i in range(num_outputs): + tmp_labels = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_labels, list): + tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) + else: + tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) + to_store_labels.append(tmp_labels) + + to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) + + if self.key_memory.shape[0] == 0: + self.key_memory = to_store_keys.as_in_context(mx.cpu()) + for i in range(num_inputs): + self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + for i in range(num_outputs): + self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) + elif self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: + num_to_store = to_store_keys.shape[0] + self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=1) + else: + self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + + def sample_memory(self, batch_size): + num_stored_samples = self.key_memory.shape[0] + if self.replay_batch_size == -1: + sample_ind = nd.random.randint(0, num_stored_samples, (self.replay_steps, batch_size), ctx=mx.cpu()) + else: + sample_ind = nd.random.randint(0, num_stored_samples, (self.replay_steps, self.replay_batch_size), ctx=mx.cpu()) + + num_outputs = len(self.label_memory) + + sample_labels = [[self.label_memory[i][ind] for i in range(num_outputs)] for ind in sample_ind] + sample_batches = [[[self.value_memory[j][ind] for j in range(len(self.value_memory))], sample_labels[i]] for i, ind in enumerate(sample_ind)] + + return sample_batches + + def get_query_network(self, context): + lastEpoch = 0 + for file in os.listdir(self.query_net_dir): + if self.query_net_prefix in file and ".json" in file: + symbolFile = file + + if self.query_net_prefix in file and ".param" in file: + epochStr = file.replace(".params", "").replace(self.query_net_prefix, "") + epoch = int(epochStr) + if epoch >= lastEpoch: + lastEpoch = epoch + weightFile = file + + inputNames = [] + if self.query_net_num_inputs == 1: + inputNames.append("data") + else: + for i in range(self.query_net_num_inputs): + inputNames.append("data" + str(i)) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + net = mx.gluon.nn.SymbolBlock.imports(self.query_net_dir + symbolFile, inputNames, self.query_net_dir + weightFile, ctx=context[0]) + net.hybridize() + return net + + def save_memory(self, path): + mem_arr = [("keys", self.key_memory)] + [("values_"+str(k),v) for (k,v) in enumerate(self.value_memory)] + [("labels_"+str(k),v) for (k,v) in enumerate(self.label_memory)] + mem_dict = {entry[0]:entry[1] for entry in mem_arr} + nd.save(path, mem_dict) + + def load_memory(self, path): + mem_dict = nd.load(path) + self.value_memory = [] + self.label_memory = [] + for key in sorted(mem_dict.keys()): + if key == "keys": + self.key_memory = mem_dict[key] + elif key.startswith("values_"): + self.value_memory.append(mem_dict[key]) + elif key.startswith("labels_"): + self.label_memory.append(mem_dict[key]) + + +#Stream 0 class Net_0(gluon.HybridBlock): - def __init__(self, data_mean=None, data_std=None, **kwargs): + def __init__(self, data_mean=None, data_std=None, mx_context=None, **kwargs): super(Net_0, self).__init__(**kwargs) with self.name_scope(): if data_mean: @@ -123,5 +536,5 @@ class Net_0(gluon.HybridBlock): tanh3_ = self.tanh3_(fc3_) action_ = F.identity(tanh3_) - return action_ + return [[action_]] diff --git a/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/CNNPredictor_mountaincar_master_actor.h b/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/CNNPredictor_mountaincar_master_actor.h index 4a59ca94..7f843f82 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/CNNPredictor_mountaincar_master_actor.h +++ b/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/CNNPredictor_mountaincar_master_actor.h @@ -1,107 +1,149 @@ #ifndef CNNPREDICTOR_MOUNTAINCAR_MASTER_ACTOR #define CNNPREDICTOR_MOUNTAINCAR_MASTER_ACTOR -#include +#include #include #include #include + +#include +#include -#include - +using namespace mxnet::cpp; + class CNNPredictor_mountaincar_master_actor_0{ public: - const std::string json_file = "model/mountaincar.agent.MountaincarActor/model_0_newest-symbol.json"; - const std::string param_file = "model/mountaincar.agent.MountaincarActor/model_0_newest-0000.params"; - const std::vector input_keys = { + const std::string file_prefix = "model/mountaincar.agent.MountaincarActor/model_0_newest"; + + //network + const std::vector network_input_keys = { "data" }; - const std::vector> input_shapes = {{1, 2}}; - const bool use_gpu = false; - - PredictorHandle handle; - + const std::vector> network_input_shapes = {{1, 2}}; + std::vector network_input_sizes; + std::vector> network_arg_names; + std::vector network_handles; + + + //misc + Context ctx = Context::cpu(); //Will be updated later in init according to use_gpu + int dtype = 0; //use data type (float32=0 float64=1 ...) + + explicit CNNPredictor_mountaincar_master_actor_0(){ - init(json_file, param_file, input_keys, input_shapes, use_gpu); + init(file_prefix, network_input_keys, network_input_shapes); } ~CNNPredictor_mountaincar_master_actor_0(){ - if(handle) MXPredFree(handle); + for(Executor * handle : network_handles){ + delete handle; + } + MXNotifyShutdown(); } void predict(const std::vector &in_state_, std::vector &out_action_){ - MXPredSetInput(handle, input_keys[0].c_str(), in_state_.data(), static_cast(in_state_.size())); - - MXPredForward(handle); - mx_uint output_index; - mx_uint *shape = 0; - mx_uint shape_len; - size_t size; - - output_index = 0; - MXPredGetOutputShape(handle, output_index, &shape, &shape_len); - size = 1; - for (mx_uint i = 0; i < shape_len; ++i) size *= shape[i]; - assert(size == out_action_.size()); - MXPredGetOutput(handle, output_index, &(out_action_[0]), out_action_.size()); + NDArray input_temp; + input_temp = NDArray(network_input_shapes[0], ctx, false, dtype); + input_temp.SyncCopyFromCPU(in_state_.data(), network_input_sizes[0]); + input_temp.CopyTo(&(network_handles[0]->arg_dict()[network_input_keys[0]])); + NDArray::WaitAll(); + + network_handles[0]->Forward(false); + CheckMXNetError("Forward, predict, handle ind. 0"); + + + std::vector output = network_handles.back()->outputs; + std::vector curr_output_shape; + size_t curr_output_size; + curr_output_shape = output[0].GetShape(); + curr_output_size = 1; + for (mx_uint i : curr_output_shape) curr_output_size *= i; + //Fix due to a bug in the in how the output arrays are initialized when there are multiple outputs + assert((curr_output_size == out_action_.size()) || (curr_output_size == out_action_[0])); + output[0].SyncCopyToCPU(&out_action_); + } + + + + Executor* initExecutor(Symbol &sym, + std::map ¶m_map, + const std::vector &exec_input_keys, + const std::vector> &exec_input_shapes){ + + const mx_uint num_exec_input_nodes = exec_input_keys.size(); + for(mx_uint i = 0; i < num_exec_input_nodes; i++){ + param_map[exec_input_keys[i]] = NDArray(exec_input_shapes[i], ctx, false, dtype); + } - void init(const std::string &json_file, - const std::string ¶m_file, - const std::vector &input_keys, - const std::vector> &input_shapes, - const bool &use_gpu){ + std::vector param_arrays; + std::vector grad_array; + std::vector grad_reqs; + std::vector aux_arrays; + std::map< std::string, NDArray> aux_map; - BufferFile json_data(json_file); - BufferFile param_data(param_file); + sym.InferExecutorArrays(ctx, ¶m_arrays, &grad_array, &grad_reqs, + &aux_arrays, param_map, std::map(), + std::map(), aux_map); - int dev_type = use_gpu ? 2 : 1; - int dev_id = 0; + Executor *handle = new Executor(sym, ctx, param_arrays, grad_array, grad_reqs, aux_arrays); + assert(handle); + return handle; + } - if (json_data.GetLength() == 0 || - param_data.GetLength() == 0) { - std::exit(-1); + std::vector getSizesOfShapes(const std::vector> shapes){ + std::vector sizes; + for(std::vector shape : shapes){ + mx_uint val = 1; + for(mx_uint i: shape){ + val *= i; + } + sizes.push_back(val); } + return sizes; + } - const mx_uint num_input_nodes = input_keys.size(); - - const char* input_keys_ptr[num_input_nodes]; - for(mx_uint i = 0; i < num_input_nodes; i++){ - input_keys_ptr[i] = input_keys[i].c_str(); + void CheckMXNetError(std::string loc){ + const char* err = MXGetLastError(); + if (err && err[0] != 0) { + std::cout << "MXNet error at " << loc << err << std::endl; + exit(-1); } - - mx_uint shape_data_size = 0; - mx_uint input_shape_indptr[input_shapes.size() + 1]; - input_shape_indptr[0] = 0; - for(mx_uint i = 0; i < input_shapes.size(); i++){ - shape_data_size += input_shapes[i].size(); - input_shape_indptr[i+1] = shape_data_size; + } + + void init(const std::string &file_prefix, + const std::vector &network_input_keys, + const std::vector> &network_input_shapes){ + + CNNLAOptimizer_mountaincar_master_actor optimizer_creator = CNNLAOptimizer_mountaincar_master_actor(); + + if(optimizer_creator.getContextName() == "gpu"){ + ctx = Context::gpu(); } - - mx_uint input_shape_data[shape_data_size]; - mx_uint index = 0; - for(mx_uint i = 0; i < input_shapes.size(); i++){ - for(mx_uint j = 0; j < input_shapes[i].size(); j++){ - input_shape_data[index] = input_shapes[i][j]; - index++; - } + + network_input_sizes = getSizesOfShapes(network_input_shapes); + + ModelLoader model_loader(file_prefix, 0, ctx); + + std::vector network_symbols = model_loader.GetNetworkSymbols(); + std::vector> network_param_maps; + network_param_maps = model_loader.GetNetworkParamMaps(); + + //Init handles + std::map> in_shape_map; + for(mx_uint i=0; i < network_input_keys.size(); i++){ + in_shape_map[network_input_keys[i]] = network_input_shapes[i]; } - - MXPredCreate(static_cast(json_data.GetBuffer()), - static_cast(param_data.GetBuffer()), - static_cast(param_data.GetLength()), - dev_type, - dev_id, - num_input_nodes, - input_keys_ptr, - input_shape_indptr, - input_shape_data, - &handle); - assert(handle); + std::vector> in_shapes; + std::vector> aux_shapes; + std::vector> out_shapes; + network_symbols[0].InferShape(in_shape_map, &in_shapes, &aux_shapes, &out_shapes); + network_handles.push_back(initExecutor(network_symbols[0], network_param_maps[0], network_input_keys, network_input_shapes)); + } }; - #endif // CNNPREDICTOR_MOUNTAINCAR_MASTER_ACTOR diff --git a/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/CNNTrainer_mountaincar_master_actor.py b/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/CNNTrainer_mountaincar_master_actor.py index a2827e42..198280f4 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/CNNTrainer_mountaincar_master_actor.py +++ b/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/CNNTrainer_mountaincar_master_actor.py @@ -61,6 +61,7 @@ if __name__ == "__main__": 'state_dtype': 'float32', 'action_dtype': 'float32', 'rewards_dtype': 'float32' + }, 'strategy_params': { 'method':'ornstein_uhlenbeck', diff --git a/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/mountaincar_master_actor.h b/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/mountaincar_master_actor.h index 4cc18f44..39061da1 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/mountaincar_master_actor.h +++ b/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/mountaincar_master_actor.h @@ -19,8 +19,10 @@ action=colvec(1); } void execute(){ vector state_ = CNNTranslator::translate(state); + vector action_(1); + _predictor_0_.predict(state_, action_); action = CNNTranslator::translateToCol(action_, std::vector {1}); diff --git a/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/reinforcement_learning/CNNCreator_mountaincar_agent_mountaincarCritic.py b/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/reinforcement_learning/CNNCreator_mountaincar_agent_mountaincarCritic.py index 2f3f2bda..0a13ecf5 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/reinforcement_learning/CNNCreator_mountaincar_agent_mountaincarCritic.py +++ b/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/reinforcement_learning/CNNCreator_mountaincar_agent_mountaincarCritic.py @@ -2,6 +2,8 @@ import mxnet as mx import logging import os import shutil +import warnings +import inspect from CNNNet_mountaincar_agent_mountaincarCritic import Net_0 @@ -20,6 +22,10 @@ class CNNCreator_mountaincar_agent_mountaincarCritic: for i, network in self.networks.items(): lastEpoch = 0 param_file = None + if hasattr(network, 'episodic_sub_nets'): + num_episodic_sub_nets = len(network.episodic_sub_nets) + lastMemEpoch = [0]*num_episodic_sub_nets + mem_files = [None]*num_episodic_sub_nets try: os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest-0000.params") @@ -30,22 +36,77 @@ class CNNCreator_mountaincar_agent_mountaincarCritic: except OSError: pass + if hasattr(network, 'episodic_sub_nets'): + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(0) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(0) + "-symbol.json") + except OSError: + pass + + for j in range(len(network.episodic_sub_nets)): + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(j+1) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(j+1) + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_query_net_' + str(j+1) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_query_net_' + str(j+1) + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_loss' + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_loss' + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest_episodic_memory_sub_net_" + str(j + 1) + "-0000") + except OSError: + pass + if os.path.isdir(self._model_dir_): for file in os.listdir(self._model_dir_): - if ".params" in file and self._model_prefix_ + "_" + str(i) in file: - epochStr = file.replace(".params","").replace(self._model_prefix_ + "_" + str(i) + "-","") + if ".params" in file and self._model_prefix_ + "_" + str(i) in file and not "loss" in file: + epochStr = file.replace(".params", "").replace(self._model_prefix_ + "_" + str(i) + "-", "") epoch = int(epochStr) - if epoch > lastEpoch: + if epoch >= lastEpoch: lastEpoch = epoch param_file = file + elif hasattr(network, 'episodic_sub_nets') and self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_" in file: + relMemPathInfo = file.replace(self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_", "").split("-") + memSubNet = int(relMemPathInfo[0]) + memEpochStr = relMemPathInfo[1] + memEpoch = int(memEpochStr) + if memEpoch >= lastMemEpoch[memSubNet-1]: + lastMemEpoch[memSubNet-1] = memEpoch + mem_files[memSubNet-1] = file + if param_file is None: earliestLastEpoch = 0 else: logging.info("Loading checkpoint: " + param_file) network.load_parameters(self._model_dir_ + param_file) + if hasattr(network, 'episodic_sub_nets'): + for j, sub_net in enumerate(network.episodic_sub_nets): + if mem_files[j] != None: + logging.info("Loading Replay Memory: " + mem_files[j]) + mem_layer = [param for param in inspect.getmembers(sub_net, lambda x: not(inspect.isroutine(x))) if param[0].startswith("memory")][0][1] + mem_layer.load_memory(self._model_dir_ + mem_files[j]) - if earliestLastEpoch == None or lastEpoch < earliestLastEpoch: - earliestLastEpoch = lastEpoch + if earliestLastEpoch == None or lastEpoch + 1 < earliestLastEpoch: + earliestLastEpoch = lastEpoch + 1 return earliestLastEpoch @@ -56,27 +117,52 @@ class CNNCreator_mountaincar_agent_mountaincarCritic: for i, network in self.networks.items(): # param_file = self._model_prefix_ + "_" + str(i) + "_newest-0000.params" param_file = None + if hasattr(network, 'episodic_sub_nets'): + num_episodic_sub_nets = len(network.episodic_sub_nets) + lastMemEpoch = [0] * num_episodic_sub_nets + mem_files = [None] * num_episodic_sub_nets + if os.path.isdir(self._weights_dir_): lastEpoch = 0 for file in os.listdir(self._weights_dir_): - if ".params" in file and self._model_prefix_ + "_" + str(i) in file: + if ".params" in file and self._model_prefix_ + "_" + str(i) in file and not "loss" in file: epochStr = file.replace(".params","").replace(self._model_prefix_ + "_" + str(i) + "-","") epoch = int(epochStr) - if epoch > lastEpoch: + if epoch >= lastEpoch: lastEpoch = epoch param_file = file + elif hasattr(network, 'episodic_sub_nets') and self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_" in file: + relMemPathInfo = file.replace(self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_").split("-") + memSubNet = int(relMemPathInfo[0]) + memEpochStr = relMemPathInfo[1] + memEpoch = int(memEpochStr) + if memEpoch >= lastMemEpoch[memSubNet-1]: + lastMemEpoch[memSubNet-1] = memEpoch + mem_files[memSubNet-1] = file + logging.info("Loading pretrained weights: " + self._weights_dir_ + param_file) network.load_parameters(self._weights_dir_ + param_file, allow_missing=True, ignore_extra=True) + if hasattr(network, 'episodic_sub_nets'): + assert lastEpoch == lastMemEpoch + for j, sub_net in enumerate(network.episodic_sub_nets): + if mem_files[j] != None: + logging.info("Loading pretrained Replay Memory: " + mem_files[j]) + mem_layer = \ + [param for param in inspect.getmembers(sub_net, lambda x: not (inspect.isroutine(x))) if + param[0].startswith("memory")][0][1] + mem_layer.load_memory(self._model_dir_ + mem_files[j]) else: logging.info("No pretrained weights available at: " + self._weights_dir_ + param_file) def construct(self, context, data_mean=None, data_std=None): - self.networks[0] = Net_0(data_mean=data_mean, data_std=data_std) - self.networks[0].collect_params().initialize(self.weight_initializer, ctx=context) + self.networks[0] = Net_0(data_mean=data_mean, data_std=data_std, mx_context=context, prefix="") + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + self.networks[0].collect_params().initialize(self.weight_initializer, force_reinit=False, ctx=context) self.networks[0].hybridize() - self.networks[0](mx.nd.zeros((1, 2,), ctx=context), mx.nd.zeros((1, 1,), ctx=context)) + self.networks[0](mx.nd.zeros((1, 2,), ctx=context[0]), mx.nd.zeros((1, 1,), ctx=context[0])) if not os.path.exists(self._model_dir_): os.makedirs(self._model_dir_) diff --git a/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/reinforcement_learning/CNNNet_mountaincar_agent_mountaincarCritic.py b/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/reinforcement_learning/CNNNet_mountaincar_agent_mountaincarCritic.py index fe452eb8..6414df26 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/reinforcement_learning/CNNNet_mountaincar_agent_mountaincarCritic.py +++ b/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/reinforcement_learning/CNNNet_mountaincar_agent_mountaincarCritic.py @@ -1,7 +1,10 @@ import mxnet as mx import numpy as np import math -from mxnet import gluon +import os +import abc +import warnings +from mxnet import gluon, nd class ZScoreNormalization(gluon.HybridBlock): @@ -86,9 +89,419 @@ class CustomGRU(gluon.HybridBlock): output, [state0] = self.gru(data, [F.swapaxes(state0, 0, 1)]) return output, F.swapaxes(state0, 0, 1) + +class DotProductSelfAttention(gluon.HybridBlock): + def __init__(self, + scale_factor, + num_heads, + dim_model, + dim_keys, + dim_values, + use_proj_bias, + use_mask, + **kwargs): + super(DotProductSelfAttention, self).__init__(**kwargs) + with self.name_scope(): + self.num_heads = num_heads + self.dim_model = dim_model + self.use_proj_bias = use_proj_bias + self.use_mask = use_mask + + if dim_keys == -1: + self.dim_keys = int(dim_model / self.num_heads) + else: + self.dim_keys = dim_keys + if dim_values == -1: + self.dim_values = int(dim_model / self.num_heads) + else: + self.dim_values = dim_values + + if scale_factor == -1: + self.scale_factor = math.sqrt(self.dim_keys) + else: + self.scale_factor = scale_factor + + self.proj_q = gluon.nn.Dense(self.num_heads*self.dim_keys, use_bias=self.use_proj_bias, flatten=False) + self.proj_k = gluon.nn.Dense(self.num_heads*self.dim_keys, use_bias=self.use_proj_bias, flatten=False) + self.proj_v = gluon.nn.Dense(self.num_heads*self.dim_values, use_bias=self.use_proj_bias, flatten=False) + self.proj_o = gluon.nn.Dense(self.dim_model, use_bias=self.use_proj_bias, flatten=False) + + def hybrid_forward(self, F, queries, keys, values, *args, **kwargs): + + queries = F.Reshape(queries, shape=(0, 0,-1)) + keys = F.Reshape(queries, shape=(0, 0, -1)) + values = F.Reshape(queries, shape=(0, 0, -1)) + + head_queries = self.proj_q(queries) + head_keys = self.proj_k(keys) + head_values = self.proj_v(values) + + head_queries = F.reshape(head_queries, shape=(0, 0, self.num_heads, -1)) + head_queries = F.transpose(head_queries, axes=(0,2,1,3)) + head_queries = F.reshape(head_queries, shape=(-1, 0, 0), reverse=True) + + head_keys = F.reshape(head_keys, shape=(0, 0, self.num_heads, -1)) + head_keys = F.transpose(head_keys, axes=(0,2,1,3)) + head_keys = F.reshape(head_keys, shape=(-1, 0, 0), reverse=True) + + score = F.batch_dot(head_queries, head_keys, transpose_b=True) + score = score * self.scale_factor + if self.use_mask: + mask = F.tile(mask, self.num_heads) + mask = F.repeat(mask, self.dim_model) + mask = F.reshape(mask, shape=(-1, self.dim_model)) + weights = F.softmax(score, mask, use_length=self.use_mask) + + head_values = F.reshape(head_values, shape=(0, 0, self.num_heads, -1)) + head_values = F.transpose(head_values, axes=(0,2,1,3)) + head_values = F.reshape(head_values, shape=(-1, 0, 0), reverse=True) + + ret = F.batch_dot(weights, head_values) + ret = F.reshape(ret, shape=(-1, self.num_heads, 0, 0), reverse=True) + ret = F.transpose(ret, axes=(0, 2, 1, 3)) + ret = F.reshape(ret, shape=(0, 0, -1)) + + ret = self.proj_o(ret) + + return ret + + +class EpisodicReplayMemoryInterface(gluon.HybridBlock): + __metaclass__ = abc.ABCMeta + + def __init__(self, use_replay, replay_interval, replay_batch_size, replay_steps, replay_gradient_steps, num_heads, **kwargs): + super(EpisodicReplayMemoryInterface, self).__init__(**kwargs) + + self.use_replay = use_replay + self.replay_interval = replay_interval + self.replay_batch_size = replay_batch_size + self.replay_steps = replay_steps + self.replay_gradient_steps = replay_gradient_steps + self.num_heads = num_heads + + @abc.abstractmethod + def store_samples(self, data, y, query_network, store_prob, mx_context): + pass + + @abc.abstractmethod + def sample_memory(self, batch_size, mx_context): + pass + + @abc.abstractmethod + def get_query_network(self, mx_context): + pass + + @abc.abstractmethod + def save_memory(self, path): + pass + + @abc.abstractmethod + def load_memory(self, path): + pass + +#Memory layer +class LargeMemory(gluon.HybridBlock): + def __init__(self, + sub_key_size, + query_size, + query_act, + dist_measure, + k, + num_heads, + values_dim, + **kwargs): + super(LargeMemory, self).__init__(**kwargs) + with self.name_scope(): + #Memory parameters + self.dist_measure = dist_measure + self.k = k + self.num_heads = num_heads + self.query_act = query_act + self.query_size = query_size + self.num_heads = num_heads + + #Batch norm sub-layer + self.batch_norm = gluon.nn.BatchNorm() + + #Memory sub-layer + self.sub_key_size = sub_key_size + sub_key_shape = (self.num_heads, self.sub_key_size, int(query_size[-1] / 2)) + + if values_dim == -1: + values_shape = (self.sub_key_size * self.sub_key_size, self.query_size[-1]) + else: + values_shape = (self.sub_key_size*self.sub_key_size, values_dim) + + self.sub_keys1 = self.params.get("sub_keys1", shape=sub_key_shape, differentiable=True) + self.sub_keys2 = self.params.get("sub_keys2", shape=sub_key_shape, differentiable=True) + self.values = self.params.get("values", shape=values_shape, differentiable=True) + self.label_memory = nd.array([]) + + self.get_query_network() + + def hybrid_forward(self, F, x, sub_keys1, sub_keys2, values): + x = self.batch_norm(x) + + x = F.reshape(x, shape=(0, -1)) + + q = self.query_network(x) + + q = F.reshape(q, shape=(0, self.num_heads, -1)) + + q_split = F.split(q, num_outputs=2, axis=-1) + + if self.dist_measure == "l2": + q_split_resh = F.reshape(q_split[0], shape=(0,0,1,-1)) + sub_keys1_resh = F.reshape(sub_keys1, shape=(1,0,0,-1), reverse=True) + q1_diff = F.broadcast_sub(q_split_resh, sub_keys1_resh) + q1_dist = F.norm(q1_diff, axis=-1) + q_split_resh = F.reshape(q_split[1], shape=(0,0,1,-1)) + sub_keys2_resh = F.reshape(sub_keys2, shape=(1,0,0,-1), reverse=True) + q2_diff = F.broadcast_sub(q_split_resh, sub_keys2_resh) + q2_dist = F.norm(q2_diff, axis=-1) + else: + q1 = F.split(q_split[0], num_outputs=self.num_heads, axis=1) + q2 = F.split(q_split[1], num_outputs=self.num_heads, axis=1) + sub_keys1_resh = F.split(sub_keys1, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + sub_keys2_resh = F.split(sub_keys2, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + if self.num_heads == 1: + q1 = [q1] + q2 = [q2] + sub_keys1_resh = [sub_keys1_resh ] + sub_keys2_resh = [sub_keys2_resh ] + + q1_dist = F.dot(q1[0], sub_keys1_resh[0], transpose_b=True) + q2_dist = F.dot(q2[0], sub_keys2_resh[0], transpose_b=True) + for h in range(1, self.num_heads): + q1_dist = F.concat(q1_dist, F.dot(q1[0], sub_keys1_resh[h], transpose_b=True), dim=1) + q2_dist = F.concat(q2_dist, F.dot(q2[0], sub_keys1_resh[h], transpose_b=True), dim=1) + + i1 = F.topk(q1_dist, k=self.k, ret_typ="indices") + i2 = F.topk(q2_dist, k=self.k, ret_typ="indices") + + # Calculate cross product for keys at indices I1 and I2 + + # def head_take(data, state): + # return [F.take(data[0], data[2]), F.take(data[1], data[3])], state, + # + # i1 = F.transpose(i1, axes=(1,0,2)) + # i2 = F.transpose(i2, axes=(1, 0, 2)) + # st = F.zeros(1) + # (k1, k2), _ = F.contrib.foreach(head_take, [sub_keys1, sub_keys2,i1,i2], st) + # k1 = F.reshape(k1, shape=(-1, 0, 0), reverse=True) + # k2 = F.reshape(k2, shape=(-1, 0, 0), reverse=True) + i1 = F.split(i1, num_outputs=self.num_heads, axis=1) + i2 = F.split(i2, num_outputs=self.num_heads, axis=1) + sub_keys1 = F.split(sub_keys1, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + sub_keys2 = F.split(sub_keys2, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + if self.num_heads == 1: + i1 = [i1] + i2 = [i2] + sub_keys1 = [sub_keys1] + sub_keys2 = [sub_keys2] + + k1 = F.take(sub_keys1[0], i1[0]) + k2 = F.take(sub_keys2[0], i2[0]) + for h in range(1, self.num_heads): + k1 = F.concat(k1, F.take(sub_keys1[h], i1[h]), dim=1) + k2 = F.concat(k2, F.take(sub_keys2[h], i2[h]), dim=1) + + k1 = F.tile(k1, (1, 1, self.k, 1)) + k2 = F.repeat(k2, self.k, 2) + c_cart = F.concat(k1, k2, dim=3) + + q = F.reshape(q, shape=(-1,0), reverse=True) + q = F.reshape(q, shape=(0, 1, -1)) + c_cart = F.reshape(c_cart, shape=(-1, 0, 0), reverse=True) + if self.dist_measure == "l2": + k_diff = F.broadcast_sub(q, c_cart) + k_dist = F.norm(k_diff, axis=-1) + else: + k_dist = F.batch_dot(q, c_cart, transpose_b=True) #F.contrib.foreach(loop_batch_dot, [q, c_cart], init_states=state_batch_dist) + k_dist = F.reshape(k_dist, shape=(0, -1)) + + i = F.topk(k_dist, k=self.k, ret_typ="both") + + w = F.softmax(i[0]) + w = F.reshape(w, shape=(0,1,-1)) + vi = F.take(values, i[1]) + aggr_value = F.batch_dot(w, vi) #F.contrib.foreach(loop_batch_dot, [w, vi], init_states=state_batch_dist) + + ret = F.reshape(aggr_value, shape=(-1, self.num_heads, 0), reverse=True) + one_vec = F.ones((1, 1, self.num_heads)) + one_vec = F.broadcast_like(one_vec, ret, lhs_axes=0, rhs_axes=0) + ret = F.batch_dot(one_vec, ret) + ret = F.reshape(ret, shape=(-1, 0), reverse=True) + + return ret + + def get_query_network(self): + if hasattr(self, 'query_network'): + return self.query_network + else: + self.query_network = gluon.nn.HybridSequential() + for size in self.query_size: + if self.query_act == "linear": + self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, flatten=False)) + else: + self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, activation=self.query_act, flatten=False)) + return self.query_network + + +#EpisodicMemory layer +class EpisodicMemory(EpisodicReplayMemoryInterface): + def __init__(self, + replay_interval, + replay_batch_size, + replay_steps, + replay_gradient_steps, + store_prob, + max_stored_samples, + use_replay, + query_net_dir, + query_net_prefix, + query_net_num_inputs, + **kwargs): + super(EpisodicMemory, self).__init__(use_replay, replay_interval, replay_batch_size, replay_steps, replay_gradient_steps, 1, **kwargs) + with self.name_scope(): + #Replay parameters + self.store_prob = store_prob + self.max_stored_samples = max_stored_samples + + self.query_net_dir = query_net_dir + self.query_net_prefix = query_net_prefix + self.query_net_num_inputs = query_net_num_inputs + + #Memory + self.key_memory = nd.array([]) + self.value_memory = nd.array([]) + self.label_memory = nd.array([]) + + def hybrid_forward(self, F, *args): + #propagate the input as the rest is only used for replay + return [args, []] + + def store_samples(self, data, y, query_network, store_prob, context): + num_pus = len(data) + sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] + num_inputs = len(data[0][0]) + num_outputs = len(y) + mx_context = context[0] + + if len(self.key_memory) == 0: + self.key_memory = nd.empty(0, ctx=mx.cpu()) + self.value_memory = [] + self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) + + ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] + + max_inds = [nd.max(ind[i]) for i in range(num_pus)] + if any(max_inds): + to_store_values = [] + for i in range(num_inputs): + tmp_values = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_values, list): + tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) + else: + tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) + to_store_values.append(tmp_values) + + to_store_labels = [] + for i in range(num_outputs): + tmp_labels = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_labels, list): + tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) + else: + tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) + to_store_labels.append(tmp_labels) + + to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) + + if self.key_memory.shape[0] == 0: + self.key_memory = to_store_keys.as_in_context(mx.cpu()) + for i in range(num_inputs): + self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + for i in range(num_outputs): + self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) + elif self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: + num_to_store = to_store_keys.shape[0] + self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=1) + else: + self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + + def sample_memory(self, batch_size): + num_stored_samples = self.key_memory.shape[0] + if self.replay_batch_size == -1: + sample_ind = nd.random.randint(0, num_stored_samples, (self.replay_steps, batch_size), ctx=mx.cpu()) + else: + sample_ind = nd.random.randint(0, num_stored_samples, (self.replay_steps, self.replay_batch_size), ctx=mx.cpu()) + + num_outputs = len(self.label_memory) + + sample_labels = [[self.label_memory[i][ind] for i in range(num_outputs)] for ind in sample_ind] + sample_batches = [[[self.value_memory[j][ind] for j in range(len(self.value_memory))], sample_labels[i]] for i, ind in enumerate(sample_ind)] + + return sample_batches + + def get_query_network(self, context): + lastEpoch = 0 + for file in os.listdir(self.query_net_dir): + if self.query_net_prefix in file and ".json" in file: + symbolFile = file + + if self.query_net_prefix in file and ".param" in file: + epochStr = file.replace(".params", "").replace(self.query_net_prefix, "") + epoch = int(epochStr) + if epoch >= lastEpoch: + lastEpoch = epoch + weightFile = file + + inputNames = [] + if self.query_net_num_inputs == 1: + inputNames.append("data") + else: + for i in range(self.query_net_num_inputs): + inputNames.append("data" + str(i)) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + net = mx.gluon.nn.SymbolBlock.imports(self.query_net_dir + symbolFile, inputNames, self.query_net_dir + weightFile, ctx=context[0]) + net.hybridize() + return net + + def save_memory(self, path): + mem_arr = [("keys", self.key_memory)] + [("values_"+str(k),v) for (k,v) in enumerate(self.value_memory)] + [("labels_"+str(k),v) for (k,v) in enumerate(self.label_memory)] + mem_dict = {entry[0]:entry[1] for entry in mem_arr} + nd.save(path, mem_dict) + + def load_memory(self, path): + mem_dict = nd.load(path) + self.value_memory = [] + self.label_memory = [] + for key in sorted(mem_dict.keys()): + if key == "keys": + self.key_memory = mem_dict[key] + elif key.startswith("values_"): + self.value_memory.append(mem_dict[key]) + elif key.startswith("labels_"): + self.label_memory.append(mem_dict[key]) + + +#Stream 0 class Net_0(gluon.HybridBlock): - def __init__(self, data_mean=None, data_std=None, **kwargs): + def __init__(self, data_mean=None, data_std=None, mx_context=None, **kwargs): super(Net_0, self).__init__(**kwargs) with self.name_scope(): if data_mean: @@ -134,5 +547,5 @@ class Net_0(gluon.HybridBlock): fc4_ = self.fc4_(relu4_) qvalues_ = F.identity(fc4_) - return qvalues_ + return [[qvalues_]] diff --git a/src/test/resources/target_code/gluon/reinforcementModel/torcs/CNNCreator_torcs_agent_torcsAgent_dqn.py b/src/test/resources/target_code/gluon/reinforcementModel/torcs/CNNCreator_torcs_agent_torcsAgent_dqn.py index 508fc938..0632c404 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/torcs/CNNCreator_torcs_agent_torcsAgent_dqn.py +++ b/src/test/resources/target_code/gluon/reinforcementModel/torcs/CNNCreator_torcs_agent_torcsAgent_dqn.py @@ -2,6 +2,8 @@ import mxnet as mx import logging import os import shutil +import warnings +import inspect from CNNNet_torcs_agent_torcsAgent_dqn import Net_0 @@ -20,6 +22,10 @@ class CNNCreator_torcs_agent_torcsAgent_dqn: for i, network in self.networks.items(): lastEpoch = 0 param_file = None + if hasattr(network, 'episodic_sub_nets'): + num_episodic_sub_nets = len(network.episodic_sub_nets) + lastMemEpoch = [0]*num_episodic_sub_nets + mem_files = [None]*num_episodic_sub_nets try: os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest-0000.params") @@ -30,22 +36,77 @@ class CNNCreator_torcs_agent_torcsAgent_dqn: except OSError: pass + if hasattr(network, 'episodic_sub_nets'): + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(0) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(0) + "-symbol.json") + except OSError: + pass + + for j in range(len(network.episodic_sub_nets)): + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(j+1) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(j+1) + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_query_net_' + str(j+1) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_query_net_' + str(j+1) + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_loss' + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_loss' + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest_episodic_memory_sub_net_" + str(j + 1) + "-0000") + except OSError: + pass + if os.path.isdir(self._model_dir_): for file in os.listdir(self._model_dir_): - if ".params" in file and self._model_prefix_ + "_" + str(i) in file: - epochStr = file.replace(".params","").replace(self._model_prefix_ + "_" + str(i) + "-","") + if ".params" in file and self._model_prefix_ + "_" + str(i) in file and not "loss" in file: + epochStr = file.replace(".params", "").replace(self._model_prefix_ + "_" + str(i) + "-", "") epoch = int(epochStr) - if epoch > lastEpoch: + if epoch >= lastEpoch: lastEpoch = epoch param_file = file + elif hasattr(network, 'episodic_sub_nets') and self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_" in file: + relMemPathInfo = file.replace(self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_", "").split("-") + memSubNet = int(relMemPathInfo[0]) + memEpochStr = relMemPathInfo[1] + memEpoch = int(memEpochStr) + if memEpoch >= lastMemEpoch[memSubNet-1]: + lastMemEpoch[memSubNet-1] = memEpoch + mem_files[memSubNet-1] = file + if param_file is None: earliestLastEpoch = 0 else: logging.info("Loading checkpoint: " + param_file) network.load_parameters(self._model_dir_ + param_file) + if hasattr(network, 'episodic_sub_nets'): + for j, sub_net in enumerate(network.episodic_sub_nets): + if mem_files[j] != None: + logging.info("Loading Replay Memory: " + mem_files[j]) + mem_layer = [param for param in inspect.getmembers(sub_net, lambda x: not(inspect.isroutine(x))) if param[0].startswith("memory")][0][1] + mem_layer.load_memory(self._model_dir_ + mem_files[j]) - if earliestLastEpoch == None or lastEpoch < earliestLastEpoch: - earliestLastEpoch = lastEpoch + if earliestLastEpoch == None or lastEpoch + 1 < earliestLastEpoch: + earliestLastEpoch = lastEpoch + 1 return earliestLastEpoch @@ -56,27 +117,52 @@ class CNNCreator_torcs_agent_torcsAgent_dqn: for i, network in self.networks.items(): # param_file = self._model_prefix_ + "_" + str(i) + "_newest-0000.params" param_file = None + if hasattr(network, 'episodic_sub_nets'): + num_episodic_sub_nets = len(network.episodic_sub_nets) + lastMemEpoch = [0] * num_episodic_sub_nets + mem_files = [None] * num_episodic_sub_nets + if os.path.isdir(self._weights_dir_): lastEpoch = 0 for file in os.listdir(self._weights_dir_): - if ".params" in file and self._model_prefix_ + "_" + str(i) in file: + if ".params" in file and self._model_prefix_ + "_" + str(i) in file and not "loss" in file: epochStr = file.replace(".params","").replace(self._model_prefix_ + "_" + str(i) + "-","") epoch = int(epochStr) - if epoch > lastEpoch: + if epoch >= lastEpoch: lastEpoch = epoch param_file = file + elif hasattr(network, 'episodic_sub_nets') and self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_" in file: + relMemPathInfo = file.replace(self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_").split("-") + memSubNet = int(relMemPathInfo[0]) + memEpochStr = relMemPathInfo[1] + memEpoch = int(memEpochStr) + if memEpoch >= lastMemEpoch[memSubNet-1]: + lastMemEpoch[memSubNet-1] = memEpoch + mem_files[memSubNet-1] = file + logging.info("Loading pretrained weights: " + self._weights_dir_ + param_file) network.load_parameters(self._weights_dir_ + param_file, allow_missing=True, ignore_extra=True) + if hasattr(network, 'episodic_sub_nets'): + assert lastEpoch == lastMemEpoch + for j, sub_net in enumerate(network.episodic_sub_nets): + if mem_files[j] != None: + logging.info("Loading pretrained Replay Memory: " + mem_files[j]) + mem_layer = \ + [param for param in inspect.getmembers(sub_net, lambda x: not (inspect.isroutine(x))) if + param[0].startswith("memory")][0][1] + mem_layer.load_memory(self._model_dir_ + mem_files[j]) else: logging.info("No pretrained weights available at: " + self._weights_dir_ + param_file) def construct(self, context, data_mean=None, data_std=None): - self.networks[0] = Net_0(data_mean=data_mean, data_std=data_std) - self.networks[0].collect_params().initialize(self.weight_initializer, ctx=context) + self.networks[0] = Net_0(data_mean=data_mean, data_std=data_std, mx_context=context, prefix="") + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + self.networks[0].collect_params().initialize(self.weight_initializer, force_reinit=False, ctx=context) self.networks[0].hybridize() - self.networks[0](mx.nd.zeros((1, 5,), ctx=context)) + self.networks[0](mx.nd.zeros((1, 5,), ctx=context[0])) if not os.path.exists(self._model_dir_): os.makedirs(self._model_dir_) diff --git a/src/test/resources/target_code/gluon/reinforcementModel/torcs/CNNNet_torcs_agent_torcsAgent_dqn.py b/src/test/resources/target_code/gluon/reinforcementModel/torcs/CNNNet_torcs_agent_torcsAgent_dqn.py index a13ab817..448c1394 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/torcs/CNNNet_torcs_agent_torcsAgent_dqn.py +++ b/src/test/resources/target_code/gluon/reinforcementModel/torcs/CNNNet_torcs_agent_torcsAgent_dqn.py @@ -1,7 +1,10 @@ import mxnet as mx import numpy as np import math -from mxnet import gluon +import os +import abc +import warnings +from mxnet import gluon, nd class ZScoreNormalization(gluon.HybridBlock): @@ -86,9 +89,419 @@ class CustomGRU(gluon.HybridBlock): output, [state0] = self.gru(data, [F.swapaxes(state0, 0, 1)]) return output, F.swapaxes(state0, 0, 1) + +class DotProductSelfAttention(gluon.HybridBlock): + def __init__(self, + scale_factor, + num_heads, + dim_model, + dim_keys, + dim_values, + use_proj_bias, + use_mask, + **kwargs): + super(DotProductSelfAttention, self).__init__(**kwargs) + with self.name_scope(): + self.num_heads = num_heads + self.dim_model = dim_model + self.use_proj_bias = use_proj_bias + self.use_mask = use_mask + + if dim_keys == -1: + self.dim_keys = int(dim_model / self.num_heads) + else: + self.dim_keys = dim_keys + if dim_values == -1: + self.dim_values = int(dim_model / self.num_heads) + else: + self.dim_values = dim_values + + if scale_factor == -1: + self.scale_factor = math.sqrt(self.dim_keys) + else: + self.scale_factor = scale_factor + + self.proj_q = gluon.nn.Dense(self.num_heads*self.dim_keys, use_bias=self.use_proj_bias, flatten=False) + self.proj_k = gluon.nn.Dense(self.num_heads*self.dim_keys, use_bias=self.use_proj_bias, flatten=False) + self.proj_v = gluon.nn.Dense(self.num_heads*self.dim_values, use_bias=self.use_proj_bias, flatten=False) + self.proj_o = gluon.nn.Dense(self.dim_model, use_bias=self.use_proj_bias, flatten=False) + + def hybrid_forward(self, F, queries, keys, values, *args, **kwargs): + + queries = F.Reshape(queries, shape=(0, 0,-1)) + keys = F.Reshape(queries, shape=(0, 0, -1)) + values = F.Reshape(queries, shape=(0, 0, -1)) + + head_queries = self.proj_q(queries) + head_keys = self.proj_k(keys) + head_values = self.proj_v(values) + + head_queries = F.reshape(head_queries, shape=(0, 0, self.num_heads, -1)) + head_queries = F.transpose(head_queries, axes=(0,2,1,3)) + head_queries = F.reshape(head_queries, shape=(-1, 0, 0), reverse=True) + + head_keys = F.reshape(head_keys, shape=(0, 0, self.num_heads, -1)) + head_keys = F.transpose(head_keys, axes=(0,2,1,3)) + head_keys = F.reshape(head_keys, shape=(-1, 0, 0), reverse=True) + + score = F.batch_dot(head_queries, head_keys, transpose_b=True) + score = score * self.scale_factor + if self.use_mask: + mask = F.tile(mask, self.num_heads) + mask = F.repeat(mask, self.dim_model) + mask = F.reshape(mask, shape=(-1, self.dim_model)) + weights = F.softmax(score, mask, use_length=self.use_mask) + + head_values = F.reshape(head_values, shape=(0, 0, self.num_heads, -1)) + head_values = F.transpose(head_values, axes=(0,2,1,3)) + head_values = F.reshape(head_values, shape=(-1, 0, 0), reverse=True) + + ret = F.batch_dot(weights, head_values) + ret = F.reshape(ret, shape=(-1, self.num_heads, 0, 0), reverse=True) + ret = F.transpose(ret, axes=(0, 2, 1, 3)) + ret = F.reshape(ret, shape=(0, 0, -1)) + + ret = self.proj_o(ret) + + return ret + + +class EpisodicReplayMemoryInterface(gluon.HybridBlock): + __metaclass__ = abc.ABCMeta + + def __init__(self, use_replay, replay_interval, replay_batch_size, replay_steps, replay_gradient_steps, num_heads, **kwargs): + super(EpisodicReplayMemoryInterface, self).__init__(**kwargs) + + self.use_replay = use_replay + self.replay_interval = replay_interval + self.replay_batch_size = replay_batch_size + self.replay_steps = replay_steps + self.replay_gradient_steps = replay_gradient_steps + self.num_heads = num_heads + + @abc.abstractmethod + def store_samples(self, data, y, query_network, store_prob, mx_context): + pass + + @abc.abstractmethod + def sample_memory(self, batch_size, mx_context): + pass + + @abc.abstractmethod + def get_query_network(self, mx_context): + pass + + @abc.abstractmethod + def save_memory(self, path): + pass + + @abc.abstractmethod + def load_memory(self, path): + pass + +#Memory layer +class LargeMemory(gluon.HybridBlock): + def __init__(self, + sub_key_size, + query_size, + query_act, + dist_measure, + k, + num_heads, + values_dim, + **kwargs): + super(LargeMemory, self).__init__(**kwargs) + with self.name_scope(): + #Memory parameters + self.dist_measure = dist_measure + self.k = k + self.num_heads = num_heads + self.query_act = query_act + self.query_size = query_size + self.num_heads = num_heads + + #Batch norm sub-layer + self.batch_norm = gluon.nn.BatchNorm() + + #Memory sub-layer + self.sub_key_size = sub_key_size + sub_key_shape = (self.num_heads, self.sub_key_size, int(query_size[-1] / 2)) + + if values_dim == -1: + values_shape = (self.sub_key_size * self.sub_key_size, self.query_size[-1]) + else: + values_shape = (self.sub_key_size*self.sub_key_size, values_dim) + + self.sub_keys1 = self.params.get("sub_keys1", shape=sub_key_shape, differentiable=True) + self.sub_keys2 = self.params.get("sub_keys2", shape=sub_key_shape, differentiable=True) + self.values = self.params.get("values", shape=values_shape, differentiable=True) + self.label_memory = nd.array([]) + + self.get_query_network() + + def hybrid_forward(self, F, x, sub_keys1, sub_keys2, values): + x = self.batch_norm(x) + + x = F.reshape(x, shape=(0, -1)) + + q = self.query_network(x) + + q = F.reshape(q, shape=(0, self.num_heads, -1)) + + q_split = F.split(q, num_outputs=2, axis=-1) + + if self.dist_measure == "l2": + q_split_resh = F.reshape(q_split[0], shape=(0,0,1,-1)) + sub_keys1_resh = F.reshape(sub_keys1, shape=(1,0,0,-1), reverse=True) + q1_diff = F.broadcast_sub(q_split_resh, sub_keys1_resh) + q1_dist = F.norm(q1_diff, axis=-1) + q_split_resh = F.reshape(q_split[1], shape=(0,0,1,-1)) + sub_keys2_resh = F.reshape(sub_keys2, shape=(1,0,0,-1), reverse=True) + q2_diff = F.broadcast_sub(q_split_resh, sub_keys2_resh) + q2_dist = F.norm(q2_diff, axis=-1) + else: + q1 = F.split(q_split[0], num_outputs=self.num_heads, axis=1) + q2 = F.split(q_split[1], num_outputs=self.num_heads, axis=1) + sub_keys1_resh = F.split(sub_keys1, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + sub_keys2_resh = F.split(sub_keys2, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + if self.num_heads == 1: + q1 = [q1] + q2 = [q2] + sub_keys1_resh = [sub_keys1_resh ] + sub_keys2_resh = [sub_keys2_resh ] + + q1_dist = F.dot(q1[0], sub_keys1_resh[0], transpose_b=True) + q2_dist = F.dot(q2[0], sub_keys2_resh[0], transpose_b=True) + for h in range(1, self.num_heads): + q1_dist = F.concat(q1_dist, F.dot(q1[0], sub_keys1_resh[h], transpose_b=True), dim=1) + q2_dist = F.concat(q2_dist, F.dot(q2[0], sub_keys1_resh[h], transpose_b=True), dim=1) + + i1 = F.topk(q1_dist, k=self.k, ret_typ="indices") + i2 = F.topk(q2_dist, k=self.k, ret_typ="indices") + + # Calculate cross product for keys at indices I1 and I2 + + # def head_take(data, state): + # return [F.take(data[0], data[2]), F.take(data[1], data[3])], state, + # + # i1 = F.transpose(i1, axes=(1,0,2)) + # i2 = F.transpose(i2, axes=(1, 0, 2)) + # st = F.zeros(1) + # (k1, k2), _ = F.contrib.foreach(head_take, [sub_keys1, sub_keys2,i1,i2], st) + # k1 = F.reshape(k1, shape=(-1, 0, 0), reverse=True) + # k2 = F.reshape(k2, shape=(-1, 0, 0), reverse=True) + i1 = F.split(i1, num_outputs=self.num_heads, axis=1) + i2 = F.split(i2, num_outputs=self.num_heads, axis=1) + sub_keys1 = F.split(sub_keys1, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + sub_keys2 = F.split(sub_keys2, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + if self.num_heads == 1: + i1 = [i1] + i2 = [i2] + sub_keys1 = [sub_keys1] + sub_keys2 = [sub_keys2] + + k1 = F.take(sub_keys1[0], i1[0]) + k2 = F.take(sub_keys2[0], i2[0]) + for h in range(1, self.num_heads): + k1 = F.concat(k1, F.take(sub_keys1[h], i1[h]), dim=1) + k2 = F.concat(k2, F.take(sub_keys2[h], i2[h]), dim=1) + + k1 = F.tile(k1, (1, 1, self.k, 1)) + k2 = F.repeat(k2, self.k, 2) + c_cart = F.concat(k1, k2, dim=3) + + q = F.reshape(q, shape=(-1,0), reverse=True) + q = F.reshape(q, shape=(0, 1, -1)) + c_cart = F.reshape(c_cart, shape=(-1, 0, 0), reverse=True) + if self.dist_measure == "l2": + k_diff = F.broadcast_sub(q, c_cart) + k_dist = F.norm(k_diff, axis=-1) + else: + k_dist = F.batch_dot(q, c_cart, transpose_b=True) #F.contrib.foreach(loop_batch_dot, [q, c_cart], init_states=state_batch_dist) + k_dist = F.reshape(k_dist, shape=(0, -1)) + + i = F.topk(k_dist, k=self.k, ret_typ="both") + + w = F.softmax(i[0]) + w = F.reshape(w, shape=(0,1,-1)) + vi = F.take(values, i[1]) + aggr_value = F.batch_dot(w, vi) #F.contrib.foreach(loop_batch_dot, [w, vi], init_states=state_batch_dist) + + ret = F.reshape(aggr_value, shape=(-1, self.num_heads, 0), reverse=True) + one_vec = F.ones((1, 1, self.num_heads)) + one_vec = F.broadcast_like(one_vec, ret, lhs_axes=0, rhs_axes=0) + ret = F.batch_dot(one_vec, ret) + ret = F.reshape(ret, shape=(-1, 0), reverse=True) + + return ret + + def get_query_network(self): + if hasattr(self, 'query_network'): + return self.query_network + else: + self.query_network = gluon.nn.HybridSequential() + for size in self.query_size: + if self.query_act == "linear": + self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, flatten=False)) + else: + self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, activation=self.query_act, flatten=False)) + return self.query_network + + +#EpisodicMemory layer +class EpisodicMemory(EpisodicReplayMemoryInterface): + def __init__(self, + replay_interval, + replay_batch_size, + replay_steps, + replay_gradient_steps, + store_prob, + max_stored_samples, + use_replay, + query_net_dir, + query_net_prefix, + query_net_num_inputs, + **kwargs): + super(EpisodicMemory, self).__init__(use_replay, replay_interval, replay_batch_size, replay_steps, replay_gradient_steps, 1, **kwargs) + with self.name_scope(): + #Replay parameters + self.store_prob = store_prob + self.max_stored_samples = max_stored_samples + + self.query_net_dir = query_net_dir + self.query_net_prefix = query_net_prefix + self.query_net_num_inputs = query_net_num_inputs + + #Memory + self.key_memory = nd.array([]) + self.value_memory = nd.array([]) + self.label_memory = nd.array([]) + + def hybrid_forward(self, F, *args): + #propagate the input as the rest is only used for replay + return [args, []] + + def store_samples(self, data, y, query_network, store_prob, context): + num_pus = len(data) + sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] + num_inputs = len(data[0][0]) + num_outputs = len(y) + mx_context = context[0] + + if len(self.key_memory) == 0: + self.key_memory = nd.empty(0, ctx=mx.cpu()) + self.value_memory = [] + self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) + + ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] + + max_inds = [nd.max(ind[i]) for i in range(num_pus)] + if any(max_inds): + to_store_values = [] + for i in range(num_inputs): + tmp_values = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_values, list): + tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) + else: + tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) + to_store_values.append(tmp_values) + + to_store_labels = [] + for i in range(num_outputs): + tmp_labels = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_labels, list): + tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) + else: + tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) + to_store_labels.append(tmp_labels) + + to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) + + if self.key_memory.shape[0] == 0: + self.key_memory = to_store_keys.as_in_context(mx.cpu()) + for i in range(num_inputs): + self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + for i in range(num_outputs): + self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) + elif self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: + num_to_store = to_store_keys.shape[0] + self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=1) + else: + self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + + def sample_memory(self, batch_size): + num_stored_samples = self.key_memory.shape[0] + if self.replay_batch_size == -1: + sample_ind = nd.random.randint(0, num_stored_samples, (self.replay_steps, batch_size), ctx=mx.cpu()) + else: + sample_ind = nd.random.randint(0, num_stored_samples, (self.replay_steps, self.replay_batch_size), ctx=mx.cpu()) + + num_outputs = len(self.label_memory) + + sample_labels = [[self.label_memory[i][ind] for i in range(num_outputs)] for ind in sample_ind] + sample_batches = [[[self.value_memory[j][ind] for j in range(len(self.value_memory))], sample_labels[i]] for i, ind in enumerate(sample_ind)] + + return sample_batches + + def get_query_network(self, context): + lastEpoch = 0 + for file in os.listdir(self.query_net_dir): + if self.query_net_prefix in file and ".json" in file: + symbolFile = file + + if self.query_net_prefix in file and ".param" in file: + epochStr = file.replace(".params", "").replace(self.query_net_prefix, "") + epoch = int(epochStr) + if epoch >= lastEpoch: + lastEpoch = epoch + weightFile = file + + inputNames = [] + if self.query_net_num_inputs == 1: + inputNames.append("data") + else: + for i in range(self.query_net_num_inputs): + inputNames.append("data" + str(i)) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + net = mx.gluon.nn.SymbolBlock.imports(self.query_net_dir + symbolFile, inputNames, self.query_net_dir + weightFile, ctx=context[0]) + net.hybridize() + return net + + def save_memory(self, path): + mem_arr = [("keys", self.key_memory)] + [("values_"+str(k),v) for (k,v) in enumerate(self.value_memory)] + [("labels_"+str(k),v) for (k,v) in enumerate(self.label_memory)] + mem_dict = {entry[0]:entry[1] for entry in mem_arr} + nd.save(path, mem_dict) + + def load_memory(self, path): + mem_dict = nd.load(path) + self.value_memory = [] + self.label_memory = [] + for key in sorted(mem_dict.keys()): + if key == "keys": + self.key_memory = mem_dict[key] + elif key.startswith("values_"): + self.value_memory.append(mem_dict[key]) + elif key.startswith("labels_"): + self.label_memory.append(mem_dict[key]) + + +#Stream 0 class Net_0(gluon.HybridBlock): - def __init__(self, data_mean=None, data_std=None, **kwargs): + def __init__(self, data_mean=None, data_std=None, mx_context=None, **kwargs): super(Net_0, self).__init__(**kwargs) with self.name_scope(): if data_mean: @@ -121,5 +534,5 @@ class Net_0(gluon.HybridBlock): fc3_ = self.fc3_(tanh2_) qvalues_ = F.identity(fc3_) - return qvalues_ + return [[qvalues_]] diff --git a/src/test/resources/target_code/gluon/reinforcementModel/torcs/CNNPredictor_torcs_agent_torcsAgent_dqn.h b/src/test/resources/target_code/gluon/reinforcementModel/torcs/CNNPredictor_torcs_agent_torcsAgent_dqn.h index 8e45765a..adb17d99 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/torcs/CNNPredictor_torcs_agent_torcsAgent_dqn.h +++ b/src/test/resources/target_code/gluon/reinforcementModel/torcs/CNNPredictor_torcs_agent_torcsAgent_dqn.h @@ -1,107 +1,149 @@ #ifndef CNNPREDICTOR_TORCS_AGENT_TORCSAGENT_DQN #define CNNPREDICTOR_TORCS_AGENT_TORCSAGENT_DQN -#include +#include #include #include #include + +#include +#include -#include - +using namespace mxnet::cpp; + class CNNPredictor_torcs_agent_torcsAgent_dqn_0{ public: - const std::string json_file = "model/torcs.agent.dqn.TorcsDQN/model_0_newest-symbol.json"; - const std::string param_file = "model/torcs.agent.dqn.TorcsDQN/model_0_newest-0000.params"; - const std::vector input_keys = { + const std::string file_prefix = "model/torcs.agent.dqn.TorcsDQN/model_0_newest"; + + //network + const std::vector network_input_keys = { "data" }; - const std::vector> input_shapes = {{1, 5}}; - const bool use_gpu = false; - - PredictorHandle handle; - + const std::vector> network_input_shapes = {{1, 5}}; + std::vector network_input_sizes; + std::vector> network_arg_names; + std::vector network_handles; + + + //misc + Context ctx = Context::cpu(); //Will be updated later in init according to use_gpu + int dtype = 0; //use data type (float32=0 float64=1 ...) + + explicit CNNPredictor_torcs_agent_torcsAgent_dqn_0(){ - init(json_file, param_file, input_keys, input_shapes, use_gpu); + init(file_prefix, network_input_keys, network_input_shapes); } ~CNNPredictor_torcs_agent_torcsAgent_dqn_0(){ - if(handle) MXPredFree(handle); + for(Executor * handle : network_handles){ + delete handle; + } + MXNotifyShutdown(); } void predict(const std::vector &in_state_, std::vector &out_qvalues_){ - MXPredSetInput(handle, input_keys[0].c_str(), in_state_.data(), static_cast(in_state_.size())); - - MXPredForward(handle); - mx_uint output_index; - mx_uint *shape = 0; - mx_uint shape_len; - size_t size; - - output_index = 0; - MXPredGetOutputShape(handle, output_index, &shape, &shape_len); - size = 1; - for (mx_uint i = 0; i < shape_len; ++i) size *= shape[i]; - assert(size == out_qvalues_.size()); - MXPredGetOutput(handle, output_index, &(out_qvalues_[0]), out_qvalues_.size()); + NDArray input_temp; + input_temp = NDArray(network_input_shapes[0], ctx, false, dtype); + input_temp.SyncCopyFromCPU(in_state_.data(), network_input_sizes[0]); + input_temp.CopyTo(&(network_handles[0]->arg_dict()[network_input_keys[0]])); + NDArray::WaitAll(); + + network_handles[0]->Forward(false); + CheckMXNetError("Forward, predict, handle ind. 0"); + + + std::vector output = network_handles.back()->outputs; + std::vector curr_output_shape; + size_t curr_output_size; + curr_output_shape = output[0].GetShape(); + curr_output_size = 1; + for (mx_uint i : curr_output_shape) curr_output_size *= i; + //Fix due to a bug in the in how the output arrays are initialized when there are multiple outputs + assert((curr_output_size == out_qvalues_.size()) || (curr_output_size == out_qvalues_[0])); + output[0].SyncCopyToCPU(&out_qvalues_); + } + + + + Executor* initExecutor(Symbol &sym, + std::map ¶m_map, + const std::vector &exec_input_keys, + const std::vector> &exec_input_shapes){ + + const mx_uint num_exec_input_nodes = exec_input_keys.size(); + for(mx_uint i = 0; i < num_exec_input_nodes; i++){ + param_map[exec_input_keys[i]] = NDArray(exec_input_shapes[i], ctx, false, dtype); + } - void init(const std::string &json_file, - const std::string ¶m_file, - const std::vector &input_keys, - const std::vector> &input_shapes, - const bool &use_gpu){ + std::vector param_arrays; + std::vector grad_array; + std::vector grad_reqs; + std::vector aux_arrays; + std::map< std::string, NDArray> aux_map; - BufferFile json_data(json_file); - BufferFile param_data(param_file); + sym.InferExecutorArrays(ctx, ¶m_arrays, &grad_array, &grad_reqs, + &aux_arrays, param_map, std::map(), + std::map(), aux_map); - int dev_type = use_gpu ? 2 : 1; - int dev_id = 0; + Executor *handle = new Executor(sym, ctx, param_arrays, grad_array, grad_reqs, aux_arrays); + assert(handle); + return handle; + } - if (json_data.GetLength() == 0 || - param_data.GetLength() == 0) { - std::exit(-1); + std::vector getSizesOfShapes(const std::vector> shapes){ + std::vector sizes; + for(std::vector shape : shapes){ + mx_uint val = 1; + for(mx_uint i: shape){ + val *= i; + } + sizes.push_back(val); } + return sizes; + } - const mx_uint num_input_nodes = input_keys.size(); - - const char* input_keys_ptr[num_input_nodes]; - for(mx_uint i = 0; i < num_input_nodes; i++){ - input_keys_ptr[i] = input_keys[i].c_str(); + void CheckMXNetError(std::string loc){ + const char* err = MXGetLastError(); + if (err && err[0] != 0) { + std::cout << "MXNet error at " << loc << err << std::endl; + exit(-1); } - - mx_uint shape_data_size = 0; - mx_uint input_shape_indptr[input_shapes.size() + 1]; - input_shape_indptr[0] = 0; - for(mx_uint i = 0; i < input_shapes.size(); i++){ - shape_data_size += input_shapes[i].size(); - input_shape_indptr[i+1] = shape_data_size; + } + + void init(const std::string &file_prefix, + const std::vector &network_input_keys, + const std::vector> &network_input_shapes){ + + CNNLAOptimizer_torcs_agent_torcsAgent_dqn optimizer_creator = CNNLAOptimizer_torcs_agent_torcsAgent_dqn(); + + if(optimizer_creator.getContextName() == "gpu"){ + ctx = Context::gpu(); } - - mx_uint input_shape_data[shape_data_size]; - mx_uint index = 0; - for(mx_uint i = 0; i < input_shapes.size(); i++){ - for(mx_uint j = 0; j < input_shapes[i].size(); j++){ - input_shape_data[index] = input_shapes[i][j]; - index++; - } + + network_input_sizes = getSizesOfShapes(network_input_shapes); + + ModelLoader model_loader(file_prefix, 0, ctx); + + std::vector network_symbols = model_loader.GetNetworkSymbols(); + std::vector> network_param_maps; + network_param_maps = model_loader.GetNetworkParamMaps(); + + //Init handles + std::map> in_shape_map; + for(mx_uint i=0; i < network_input_keys.size(); i++){ + in_shape_map[network_input_keys[i]] = network_input_shapes[i]; } - - MXPredCreate(static_cast(json_data.GetBuffer()), - static_cast(param_data.GetBuffer()), - static_cast(param_data.GetLength()), - dev_type, - dev_id, - num_input_nodes, - input_keys_ptr, - input_shape_indptr, - input_shape_data, - &handle); - assert(handle); + std::vector> in_shapes; + std::vector> aux_shapes; + std::vector> out_shapes; + network_symbols[0].InferShape(in_shape_map, &in_shapes, &aux_shapes, &out_shapes); + network_handles.push_back(initExecutor(network_symbols[0], network_param_maps[0], network_input_keys, network_input_shapes)); + } }; - #endif // CNNPREDICTOR_TORCS_AGENT_TORCSAGENT_DQN diff --git a/src/test/resources/target_code/gluon/reinforcementModel/torcs/CNNTrainer_torcs_agent_torcsAgent_dqn.py b/src/test/resources/target_code/gluon/reinforcementModel/torcs/CNNTrainer_torcs_agent_torcsAgent_dqn.py index 4c697411..7c8fd235 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/torcs/CNNTrainer_torcs_agent_torcsAgent_dqn.py +++ b/src/test/resources/target_code/gluon/reinforcementModel/torcs/CNNTrainer_torcs_agent_torcsAgent_dqn.py @@ -65,6 +65,7 @@ if __name__ == "__main__": 'state_dtype': 'float32', 'action_dtype': 'uint8', 'rewards_dtype': 'float32' + }, 'strategy_params': { 'method':'epsgreedy', diff --git a/src/test/resources/target_code/gluon/reinforcementModel/torcs/torcs_agent_torcsAgent_dqn.h b/src/test/resources/target_code/gluon/reinforcementModel/torcs/torcs_agent_torcsAgent_dqn.h index f80f2b16..93514655 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/torcs/torcs_agent_torcsAgent_dqn.h +++ b/src/test/resources/target_code/gluon/reinforcementModel/torcs/torcs_agent_torcsAgent_dqn.h @@ -20,8 +20,10 @@ qvalues=colvec(discrete_actions); } void execute(){ vector state_ = CNNTranslator::translate(state); + vector qvalues_(30); + _predictor_0_.predict(state_, qvalues_); qvalues = CNNTranslator::translateToCol(qvalues_, std::vector {30}); diff --git a/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/CNNCreator_torcs_agent_torcsAgent_actor.py b/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/CNNCreator_torcs_agent_torcsAgent_actor.py index 6a1b7fa3..092dfdb9 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/CNNCreator_torcs_agent_torcsAgent_actor.py +++ b/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/CNNCreator_torcs_agent_torcsAgent_actor.py @@ -2,6 +2,8 @@ import mxnet as mx import logging import os import shutil +import warnings +import inspect from CNNNet_torcs_agent_torcsAgent_actor import Net_0 @@ -20,6 +22,10 @@ class CNNCreator_torcs_agent_torcsAgent_actor: for i, network in self.networks.items(): lastEpoch = 0 param_file = None + if hasattr(network, 'episodic_sub_nets'): + num_episodic_sub_nets = len(network.episodic_sub_nets) + lastMemEpoch = [0]*num_episodic_sub_nets + mem_files = [None]*num_episodic_sub_nets try: os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest-0000.params") @@ -30,22 +36,77 @@ class CNNCreator_torcs_agent_torcsAgent_actor: except OSError: pass + if hasattr(network, 'episodic_sub_nets'): + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(0) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(0) + "-symbol.json") + except OSError: + pass + + for j in range(len(network.episodic_sub_nets)): + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(j+1) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(j+1) + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_query_net_' + str(j+1) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_query_net_' + str(j+1) + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_loss' + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_loss' + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest_episodic_memory_sub_net_" + str(j + 1) + "-0000") + except OSError: + pass + if os.path.isdir(self._model_dir_): for file in os.listdir(self._model_dir_): - if ".params" in file and self._model_prefix_ + "_" + str(i) in file: - epochStr = file.replace(".params","").replace(self._model_prefix_ + "_" + str(i) + "-","") + if ".params" in file and self._model_prefix_ + "_" + str(i) in file and not "loss" in file: + epochStr = file.replace(".params", "").replace(self._model_prefix_ + "_" + str(i) + "-", "") epoch = int(epochStr) - if epoch > lastEpoch: + if epoch >= lastEpoch: lastEpoch = epoch param_file = file + elif hasattr(network, 'episodic_sub_nets') and self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_" in file: + relMemPathInfo = file.replace(self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_", "").split("-") + memSubNet = int(relMemPathInfo[0]) + memEpochStr = relMemPathInfo[1] + memEpoch = int(memEpochStr) + if memEpoch >= lastMemEpoch[memSubNet-1]: + lastMemEpoch[memSubNet-1] = memEpoch + mem_files[memSubNet-1] = file + if param_file is None: earliestLastEpoch = 0 else: logging.info("Loading checkpoint: " + param_file) network.load_parameters(self._model_dir_ + param_file) + if hasattr(network, 'episodic_sub_nets'): + for j, sub_net in enumerate(network.episodic_sub_nets): + if mem_files[j] != None: + logging.info("Loading Replay Memory: " + mem_files[j]) + mem_layer = [param for param in inspect.getmembers(sub_net, lambda x: not(inspect.isroutine(x))) if param[0].startswith("memory")][0][1] + mem_layer.load_memory(self._model_dir_ + mem_files[j]) - if earliestLastEpoch == None or lastEpoch < earliestLastEpoch: - earliestLastEpoch = lastEpoch + if earliestLastEpoch == None or lastEpoch + 1 < earliestLastEpoch: + earliestLastEpoch = lastEpoch + 1 return earliestLastEpoch @@ -56,27 +117,52 @@ class CNNCreator_torcs_agent_torcsAgent_actor: for i, network in self.networks.items(): # param_file = self._model_prefix_ + "_" + str(i) + "_newest-0000.params" param_file = None + if hasattr(network, 'episodic_sub_nets'): + num_episodic_sub_nets = len(network.episodic_sub_nets) + lastMemEpoch = [0] * num_episodic_sub_nets + mem_files = [None] * num_episodic_sub_nets + if os.path.isdir(self._weights_dir_): lastEpoch = 0 for file in os.listdir(self._weights_dir_): - if ".params" in file and self._model_prefix_ + "_" + str(i) in file: + if ".params" in file and self._model_prefix_ + "_" + str(i) in file and not "loss" in file: epochStr = file.replace(".params","").replace(self._model_prefix_ + "_" + str(i) + "-","") epoch = int(epochStr) - if epoch > lastEpoch: + if epoch >= lastEpoch: lastEpoch = epoch param_file = file + elif hasattr(network, 'episodic_sub_nets') and self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_" in file: + relMemPathInfo = file.replace(self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_").split("-") + memSubNet = int(relMemPathInfo[0]) + memEpochStr = relMemPathInfo[1] + memEpoch = int(memEpochStr) + if memEpoch >= lastMemEpoch[memSubNet-1]: + lastMemEpoch[memSubNet-1] = memEpoch + mem_files[memSubNet-1] = file + logging.info("Loading pretrained weights: " + self._weights_dir_ + param_file) network.load_parameters(self._weights_dir_ + param_file, allow_missing=True, ignore_extra=True) + if hasattr(network, 'episodic_sub_nets'): + assert lastEpoch == lastMemEpoch + for j, sub_net in enumerate(network.episodic_sub_nets): + if mem_files[j] != None: + logging.info("Loading pretrained Replay Memory: " + mem_files[j]) + mem_layer = \ + [param for param in inspect.getmembers(sub_net, lambda x: not (inspect.isroutine(x))) if + param[0].startswith("memory")][0][1] + mem_layer.load_memory(self._model_dir_ + mem_files[j]) else: logging.info("No pretrained weights available at: " + self._weights_dir_ + param_file) def construct(self, context, data_mean=None, data_std=None): - self.networks[0] = Net_0(data_mean=data_mean, data_std=data_std) - self.networks[0].collect_params().initialize(self.weight_initializer, ctx=context) + self.networks[0] = Net_0(data_mean=data_mean, data_std=data_std, mx_context=context, prefix="") + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + self.networks[0].collect_params().initialize(self.weight_initializer, force_reinit=False, ctx=context) self.networks[0].hybridize() - self.networks[0](mx.nd.zeros((1, 29,), ctx=context)) + self.networks[0](mx.nd.zeros((1, 29,), ctx=context[0])) if not os.path.exists(self._model_dir_): os.makedirs(self._model_dir_) diff --git a/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/CNNNet_torcs_agent_torcsAgent_actor.py b/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/CNNNet_torcs_agent_torcsAgent_actor.py index 26e6138d..9e1417c6 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/CNNNet_torcs_agent_torcsAgent_actor.py +++ b/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/CNNNet_torcs_agent_torcsAgent_actor.py @@ -1,7 +1,10 @@ import mxnet as mx import numpy as np import math -from mxnet import gluon +import os +import abc +import warnings +from mxnet import gluon, nd class ZScoreNormalization(gluon.HybridBlock): @@ -86,9 +89,419 @@ class CustomGRU(gluon.HybridBlock): output, [state0] = self.gru(data, [F.swapaxes(state0, 0, 1)]) return output, F.swapaxes(state0, 0, 1) + +class DotProductSelfAttention(gluon.HybridBlock): + def __init__(self, + scale_factor, + num_heads, + dim_model, + dim_keys, + dim_values, + use_proj_bias, + use_mask, + **kwargs): + super(DotProductSelfAttention, self).__init__(**kwargs) + with self.name_scope(): + self.num_heads = num_heads + self.dim_model = dim_model + self.use_proj_bias = use_proj_bias + self.use_mask = use_mask + + if dim_keys == -1: + self.dim_keys = int(dim_model / self.num_heads) + else: + self.dim_keys = dim_keys + if dim_values == -1: + self.dim_values = int(dim_model / self.num_heads) + else: + self.dim_values = dim_values + + if scale_factor == -1: + self.scale_factor = math.sqrt(self.dim_keys) + else: + self.scale_factor = scale_factor + + self.proj_q = gluon.nn.Dense(self.num_heads*self.dim_keys, use_bias=self.use_proj_bias, flatten=False) + self.proj_k = gluon.nn.Dense(self.num_heads*self.dim_keys, use_bias=self.use_proj_bias, flatten=False) + self.proj_v = gluon.nn.Dense(self.num_heads*self.dim_values, use_bias=self.use_proj_bias, flatten=False) + self.proj_o = gluon.nn.Dense(self.dim_model, use_bias=self.use_proj_bias, flatten=False) + + def hybrid_forward(self, F, queries, keys, values, *args, **kwargs): + + queries = F.Reshape(queries, shape=(0, 0,-1)) + keys = F.Reshape(queries, shape=(0, 0, -1)) + values = F.Reshape(queries, shape=(0, 0, -1)) + + head_queries = self.proj_q(queries) + head_keys = self.proj_k(keys) + head_values = self.proj_v(values) + + head_queries = F.reshape(head_queries, shape=(0, 0, self.num_heads, -1)) + head_queries = F.transpose(head_queries, axes=(0,2,1,3)) + head_queries = F.reshape(head_queries, shape=(-1, 0, 0), reverse=True) + + head_keys = F.reshape(head_keys, shape=(0, 0, self.num_heads, -1)) + head_keys = F.transpose(head_keys, axes=(0,2,1,3)) + head_keys = F.reshape(head_keys, shape=(-1, 0, 0), reverse=True) + + score = F.batch_dot(head_queries, head_keys, transpose_b=True) + score = score * self.scale_factor + if self.use_mask: + mask = F.tile(mask, self.num_heads) + mask = F.repeat(mask, self.dim_model) + mask = F.reshape(mask, shape=(-1, self.dim_model)) + weights = F.softmax(score, mask, use_length=self.use_mask) + + head_values = F.reshape(head_values, shape=(0, 0, self.num_heads, -1)) + head_values = F.transpose(head_values, axes=(0,2,1,3)) + head_values = F.reshape(head_values, shape=(-1, 0, 0), reverse=True) + + ret = F.batch_dot(weights, head_values) + ret = F.reshape(ret, shape=(-1, self.num_heads, 0, 0), reverse=True) + ret = F.transpose(ret, axes=(0, 2, 1, 3)) + ret = F.reshape(ret, shape=(0, 0, -1)) + + ret = self.proj_o(ret) + + return ret + + +class EpisodicReplayMemoryInterface(gluon.HybridBlock): + __metaclass__ = abc.ABCMeta + + def __init__(self, use_replay, replay_interval, replay_batch_size, replay_steps, replay_gradient_steps, num_heads, **kwargs): + super(EpisodicReplayMemoryInterface, self).__init__(**kwargs) + + self.use_replay = use_replay + self.replay_interval = replay_interval + self.replay_batch_size = replay_batch_size + self.replay_steps = replay_steps + self.replay_gradient_steps = replay_gradient_steps + self.num_heads = num_heads + + @abc.abstractmethod + def store_samples(self, data, y, query_network, store_prob, mx_context): + pass + + @abc.abstractmethod + def sample_memory(self, batch_size, mx_context): + pass + + @abc.abstractmethod + def get_query_network(self, mx_context): + pass + + @abc.abstractmethod + def save_memory(self, path): + pass + + @abc.abstractmethod + def load_memory(self, path): + pass + +#Memory layer +class LargeMemory(gluon.HybridBlock): + def __init__(self, + sub_key_size, + query_size, + query_act, + dist_measure, + k, + num_heads, + values_dim, + **kwargs): + super(LargeMemory, self).__init__(**kwargs) + with self.name_scope(): + #Memory parameters + self.dist_measure = dist_measure + self.k = k + self.num_heads = num_heads + self.query_act = query_act + self.query_size = query_size + self.num_heads = num_heads + + #Batch norm sub-layer + self.batch_norm = gluon.nn.BatchNorm() + + #Memory sub-layer + self.sub_key_size = sub_key_size + sub_key_shape = (self.num_heads, self.sub_key_size, int(query_size[-1] / 2)) + + if values_dim == -1: + values_shape = (self.sub_key_size * self.sub_key_size, self.query_size[-1]) + else: + values_shape = (self.sub_key_size*self.sub_key_size, values_dim) + + self.sub_keys1 = self.params.get("sub_keys1", shape=sub_key_shape, differentiable=True) + self.sub_keys2 = self.params.get("sub_keys2", shape=sub_key_shape, differentiable=True) + self.values = self.params.get("values", shape=values_shape, differentiable=True) + self.label_memory = nd.array([]) + + self.get_query_network() + + def hybrid_forward(self, F, x, sub_keys1, sub_keys2, values): + x = self.batch_norm(x) + + x = F.reshape(x, shape=(0, -1)) + + q = self.query_network(x) + + q = F.reshape(q, shape=(0, self.num_heads, -1)) + + q_split = F.split(q, num_outputs=2, axis=-1) + + if self.dist_measure == "l2": + q_split_resh = F.reshape(q_split[0], shape=(0,0,1,-1)) + sub_keys1_resh = F.reshape(sub_keys1, shape=(1,0,0,-1), reverse=True) + q1_diff = F.broadcast_sub(q_split_resh, sub_keys1_resh) + q1_dist = F.norm(q1_diff, axis=-1) + q_split_resh = F.reshape(q_split[1], shape=(0,0,1,-1)) + sub_keys2_resh = F.reshape(sub_keys2, shape=(1,0,0,-1), reverse=True) + q2_diff = F.broadcast_sub(q_split_resh, sub_keys2_resh) + q2_dist = F.norm(q2_diff, axis=-1) + else: + q1 = F.split(q_split[0], num_outputs=self.num_heads, axis=1) + q2 = F.split(q_split[1], num_outputs=self.num_heads, axis=1) + sub_keys1_resh = F.split(sub_keys1, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + sub_keys2_resh = F.split(sub_keys2, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + if self.num_heads == 1: + q1 = [q1] + q2 = [q2] + sub_keys1_resh = [sub_keys1_resh ] + sub_keys2_resh = [sub_keys2_resh ] + + q1_dist = F.dot(q1[0], sub_keys1_resh[0], transpose_b=True) + q2_dist = F.dot(q2[0], sub_keys2_resh[0], transpose_b=True) + for h in range(1, self.num_heads): + q1_dist = F.concat(q1_dist, F.dot(q1[0], sub_keys1_resh[h], transpose_b=True), dim=1) + q2_dist = F.concat(q2_dist, F.dot(q2[0], sub_keys1_resh[h], transpose_b=True), dim=1) + + i1 = F.topk(q1_dist, k=self.k, ret_typ="indices") + i2 = F.topk(q2_dist, k=self.k, ret_typ="indices") + + # Calculate cross product for keys at indices I1 and I2 + + # def head_take(data, state): + # return [F.take(data[0], data[2]), F.take(data[1], data[3])], state, + # + # i1 = F.transpose(i1, axes=(1,0,2)) + # i2 = F.transpose(i2, axes=(1, 0, 2)) + # st = F.zeros(1) + # (k1, k2), _ = F.contrib.foreach(head_take, [sub_keys1, sub_keys2,i1,i2], st) + # k1 = F.reshape(k1, shape=(-1, 0, 0), reverse=True) + # k2 = F.reshape(k2, shape=(-1, 0, 0), reverse=True) + i1 = F.split(i1, num_outputs=self.num_heads, axis=1) + i2 = F.split(i2, num_outputs=self.num_heads, axis=1) + sub_keys1 = F.split(sub_keys1, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + sub_keys2 = F.split(sub_keys2, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + if self.num_heads == 1: + i1 = [i1] + i2 = [i2] + sub_keys1 = [sub_keys1] + sub_keys2 = [sub_keys2] + + k1 = F.take(sub_keys1[0], i1[0]) + k2 = F.take(sub_keys2[0], i2[0]) + for h in range(1, self.num_heads): + k1 = F.concat(k1, F.take(sub_keys1[h], i1[h]), dim=1) + k2 = F.concat(k2, F.take(sub_keys2[h], i2[h]), dim=1) + + k1 = F.tile(k1, (1, 1, self.k, 1)) + k2 = F.repeat(k2, self.k, 2) + c_cart = F.concat(k1, k2, dim=3) + + q = F.reshape(q, shape=(-1,0), reverse=True) + q = F.reshape(q, shape=(0, 1, -1)) + c_cart = F.reshape(c_cart, shape=(-1, 0, 0), reverse=True) + if self.dist_measure == "l2": + k_diff = F.broadcast_sub(q, c_cart) + k_dist = F.norm(k_diff, axis=-1) + else: + k_dist = F.batch_dot(q, c_cart, transpose_b=True) #F.contrib.foreach(loop_batch_dot, [q, c_cart], init_states=state_batch_dist) + k_dist = F.reshape(k_dist, shape=(0, -1)) + + i = F.topk(k_dist, k=self.k, ret_typ="both") + + w = F.softmax(i[0]) + w = F.reshape(w, shape=(0,1,-1)) + vi = F.take(values, i[1]) + aggr_value = F.batch_dot(w, vi) #F.contrib.foreach(loop_batch_dot, [w, vi], init_states=state_batch_dist) + + ret = F.reshape(aggr_value, shape=(-1, self.num_heads, 0), reverse=True) + one_vec = F.ones((1, 1, self.num_heads)) + one_vec = F.broadcast_like(one_vec, ret, lhs_axes=0, rhs_axes=0) + ret = F.batch_dot(one_vec, ret) + ret = F.reshape(ret, shape=(-1, 0), reverse=True) + + return ret + + def get_query_network(self): + if hasattr(self, 'query_network'): + return self.query_network + else: + self.query_network = gluon.nn.HybridSequential() + for size in self.query_size: + if self.query_act == "linear": + self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, flatten=False)) + else: + self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, activation=self.query_act, flatten=False)) + return self.query_network + + +#EpisodicMemory layer +class EpisodicMemory(EpisodicReplayMemoryInterface): + def __init__(self, + replay_interval, + replay_batch_size, + replay_steps, + replay_gradient_steps, + store_prob, + max_stored_samples, + use_replay, + query_net_dir, + query_net_prefix, + query_net_num_inputs, + **kwargs): + super(EpisodicMemory, self).__init__(use_replay, replay_interval, replay_batch_size, replay_steps, replay_gradient_steps, 1, **kwargs) + with self.name_scope(): + #Replay parameters + self.store_prob = store_prob + self.max_stored_samples = max_stored_samples + + self.query_net_dir = query_net_dir + self.query_net_prefix = query_net_prefix + self.query_net_num_inputs = query_net_num_inputs + + #Memory + self.key_memory = nd.array([]) + self.value_memory = nd.array([]) + self.label_memory = nd.array([]) + + def hybrid_forward(self, F, *args): + #propagate the input as the rest is only used for replay + return [args, []] + + def store_samples(self, data, y, query_network, store_prob, context): + num_pus = len(data) + sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] + num_inputs = len(data[0][0]) + num_outputs = len(y) + mx_context = context[0] + + if len(self.key_memory) == 0: + self.key_memory = nd.empty(0, ctx=mx.cpu()) + self.value_memory = [] + self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) + + ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] + + max_inds = [nd.max(ind[i]) for i in range(num_pus)] + if any(max_inds): + to_store_values = [] + for i in range(num_inputs): + tmp_values = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_values, list): + tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) + else: + tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) + to_store_values.append(tmp_values) + + to_store_labels = [] + for i in range(num_outputs): + tmp_labels = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_labels, list): + tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) + else: + tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) + to_store_labels.append(tmp_labels) + + to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) + + if self.key_memory.shape[0] == 0: + self.key_memory = to_store_keys.as_in_context(mx.cpu()) + for i in range(num_inputs): + self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + for i in range(num_outputs): + self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) + elif self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: + num_to_store = to_store_keys.shape[0] + self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=1) + else: + self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + + def sample_memory(self, batch_size): + num_stored_samples = self.key_memory.shape[0] + if self.replay_batch_size == -1: + sample_ind = nd.random.randint(0, num_stored_samples, (self.replay_steps, batch_size), ctx=mx.cpu()) + else: + sample_ind = nd.random.randint(0, num_stored_samples, (self.replay_steps, self.replay_batch_size), ctx=mx.cpu()) + + num_outputs = len(self.label_memory) + + sample_labels = [[self.label_memory[i][ind] for i in range(num_outputs)] for ind in sample_ind] + sample_batches = [[[self.value_memory[j][ind] for j in range(len(self.value_memory))], sample_labels[i]] for i, ind in enumerate(sample_ind)] + + return sample_batches + + def get_query_network(self, context): + lastEpoch = 0 + for file in os.listdir(self.query_net_dir): + if self.query_net_prefix in file and ".json" in file: + symbolFile = file + + if self.query_net_prefix in file and ".param" in file: + epochStr = file.replace(".params", "").replace(self.query_net_prefix, "") + epoch = int(epochStr) + if epoch >= lastEpoch: + lastEpoch = epoch + weightFile = file + + inputNames = [] + if self.query_net_num_inputs == 1: + inputNames.append("data") + else: + for i in range(self.query_net_num_inputs): + inputNames.append("data" + str(i)) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + net = mx.gluon.nn.SymbolBlock.imports(self.query_net_dir + symbolFile, inputNames, self.query_net_dir + weightFile, ctx=context[0]) + net.hybridize() + return net + + def save_memory(self, path): + mem_arr = [("keys", self.key_memory)] + [("values_"+str(k),v) for (k,v) in enumerate(self.value_memory)] + [("labels_"+str(k),v) for (k,v) in enumerate(self.label_memory)] + mem_dict = {entry[0]:entry[1] for entry in mem_arr} + nd.save(path, mem_dict) + + def load_memory(self, path): + mem_dict = nd.load(path) + self.value_memory = [] + self.label_memory = [] + for key in sorted(mem_dict.keys()): + if key == "keys": + self.key_memory = mem_dict[key] + elif key.startswith("values_"): + self.value_memory.append(mem_dict[key]) + elif key.startswith("labels_"): + self.label_memory.append(mem_dict[key]) + + +#Stream 0 class Net_0(gluon.HybridBlock): - def __init__(self, data_mean=None, data_std=None, **kwargs): + def __init__(self, data_mean=None, data_std=None, mx_context=None, **kwargs): super(Net_0, self).__init__(**kwargs) with self.name_scope(): if data_mean: @@ -123,5 +536,5 @@ class Net_0(gluon.HybridBlock): tanh3_ = self.tanh3_(fc3_) commands_ = F.identity(tanh3_) - return commands_ + return [[commands_]] diff --git a/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/CNNPredictor_torcs_agent_torcsAgent_actor.h b/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/CNNPredictor_torcs_agent_torcsAgent_actor.h index b196a599..dc3ee21a 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/CNNPredictor_torcs_agent_torcsAgent_actor.h +++ b/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/CNNPredictor_torcs_agent_torcsAgent_actor.h @@ -1,108 +1,149 @@ #ifndef CNNPREDICTOR_TORCS_AGENT_TORCSAGENT_ACTOR #define CNNPREDICTOR_TORCS_AGENT_TORCSAGENT_ACTOR -#include +#include #include #include #include + +#include +#include -#include - +using namespace mxnet::cpp; + class CNNPredictor_torcs_agent_torcsAgent_actor_0{ public: - const std::string json_file = "model/torcs.agent.network.TorcsActor/model_0_newest-symbol.json"; - const std::string param_file = "model/torcs.agent.network.TorcsActor/model_0_newest-0000.params"; - const std::vector input_keys = { + const std::string file_prefix = "model/torcs.agent.network.TorcsActor/model_0_newest"; + + //network + const std::vector network_input_keys = { "data" }; - const std::vector> input_shapes = {{1, 29}}; - const bool use_gpu = false; - - PredictorHandle handle; - + const std::vector> network_input_shapes = {{1, 29}}; + std::vector network_input_sizes; + std::vector> network_arg_names; + std::vector network_handles; + + + //misc + Context ctx = Context::cpu(); //Will be updated later in init according to use_gpu + int dtype = 0; //use data type (float32=0 float64=1 ...) + + explicit CNNPredictor_torcs_agent_torcsAgent_actor_0(){ - init(json_file, param_file, input_keys, input_shapes, use_gpu); + init(file_prefix, network_input_keys, network_input_shapes); } ~CNNPredictor_torcs_agent_torcsAgent_actor_0(){ - if(handle) MXPredFree(handle); + for(Executor * handle : network_handles){ + delete handle; + } + MXNotifyShutdown(); } void predict(const std::vector &in_state_, std::vector &out_commands_){ - MXPredSetInput(handle, input_keys[0].c_str(), in_state_.data(), static_cast(in_state_.size())); - - MXPredForward(handle); - mx_uint output_index; - mx_uint *shape = 0; - mx_uint shape_len; - size_t size; - - output_index = 0; - MXPredGetOutputShape(handle, output_index, &shape, &shape_len); - size = 1; - for (mx_uint i = 0; i < shape_len; ++i) size *= shape[i]; - - assert(size == out_commands_.size()); - MXPredGetOutput(handle, output_index, &(out_commands_[0]), out_commands_.size()); + NDArray input_temp; + input_temp = NDArray(network_input_shapes[0], ctx, false, dtype); + input_temp.SyncCopyFromCPU(in_state_.data(), network_input_sizes[0]); + input_temp.CopyTo(&(network_handles[0]->arg_dict()[network_input_keys[0]])); + NDArray::WaitAll(); + + network_handles[0]->Forward(false); + CheckMXNetError("Forward, predict, handle ind. 0"); + + + std::vector output = network_handles.back()->outputs; + std::vector curr_output_shape; + size_t curr_output_size; + curr_output_shape = output[0].GetShape(); + curr_output_size = 1; + for (mx_uint i : curr_output_shape) curr_output_size *= i; + //Fix due to a bug in the in how the output arrays are initialized when there are multiple outputs + assert((curr_output_size == out_commands_.size()) || (curr_output_size == out_commands_[0])); + output[0].SyncCopyToCPU(&out_commands_); + } + + + + Executor* initExecutor(Symbol &sym, + std::map ¶m_map, + const std::vector &exec_input_keys, + const std::vector> &exec_input_shapes){ + + const mx_uint num_exec_input_nodes = exec_input_keys.size(); + for(mx_uint i = 0; i < num_exec_input_nodes; i++){ + param_map[exec_input_keys[i]] = NDArray(exec_input_shapes[i], ctx, false, dtype); + } - void init(const std::string &json_file, - const std::string ¶m_file, - const std::vector &input_keys, - const std::vector> &input_shapes, - const bool &use_gpu){ + std::vector param_arrays; + std::vector grad_array; + std::vector grad_reqs; + std::vector aux_arrays; + std::map< std::string, NDArray> aux_map; - BufferFile json_data(json_file); - BufferFile param_data(param_file); + sym.InferExecutorArrays(ctx, ¶m_arrays, &grad_array, &grad_reqs, + &aux_arrays, param_map, std::map(), + std::map(), aux_map); - int dev_type = use_gpu ? 2 : 1; - int dev_id = 0; + Executor *handle = new Executor(sym, ctx, param_arrays, grad_array, grad_reqs, aux_arrays); + assert(handle); + return handle; + } - if (json_data.GetLength() == 0 || - param_data.GetLength() == 0) { - std::exit(-1); + std::vector getSizesOfShapes(const std::vector> shapes){ + std::vector sizes; + for(std::vector shape : shapes){ + mx_uint val = 1; + for(mx_uint i: shape){ + val *= i; + } + sizes.push_back(val); } + return sizes; + } - const mx_uint num_input_nodes = input_keys.size(); - - const char* input_keys_ptr[num_input_nodes]; - for(mx_uint i = 0; i < num_input_nodes; i++){ - input_keys_ptr[i] = input_keys[i].c_str(); + void CheckMXNetError(std::string loc){ + const char* err = MXGetLastError(); + if (err && err[0] != 0) { + std::cout << "MXNet error at " << loc << err << std::endl; + exit(-1); } - - mx_uint shape_data_size = 0; - mx_uint input_shape_indptr[input_shapes.size() + 1]; - input_shape_indptr[0] = 0; - for(mx_uint i = 0; i < input_shapes.size(); i++){ - shape_data_size += input_shapes[i].size(); - input_shape_indptr[i+1] = shape_data_size; + } + + void init(const std::string &file_prefix, + const std::vector &network_input_keys, + const std::vector> &network_input_shapes){ + + CNNLAOptimizer_torcs_agent_torcsAgent_actor optimizer_creator = CNNLAOptimizer_torcs_agent_torcsAgent_actor(); + + if(optimizer_creator.getContextName() == "gpu"){ + ctx = Context::gpu(); } - - mx_uint input_shape_data[shape_data_size]; - mx_uint index = 0; - for(mx_uint i = 0; i < input_shapes.size(); i++){ - for(mx_uint j = 0; j < input_shapes[i].size(); j++){ - input_shape_data[index] = input_shapes[i][j]; - index++; - } + + network_input_sizes = getSizesOfShapes(network_input_shapes); + + ModelLoader model_loader(file_prefix, 0, ctx); + + std::vector network_symbols = model_loader.GetNetworkSymbols(); + std::vector> network_param_maps; + network_param_maps = model_loader.GetNetworkParamMaps(); + + //Init handles + std::map> in_shape_map; + for(mx_uint i=0; i < network_input_keys.size(); i++){ + in_shape_map[network_input_keys[i]] = network_input_shapes[i]; } - - MXPredCreate(static_cast(json_data.GetBuffer()), - static_cast(param_data.GetBuffer()), - static_cast(param_data.GetLength()), - dev_type, - dev_id, - num_input_nodes, - input_keys_ptr, - input_shape_indptr, - input_shape_data, - &handle); - assert(handle); + std::vector> in_shapes; + std::vector> aux_shapes; + std::vector> out_shapes; + network_symbols[0].InferShape(in_shape_map, &in_shapes, &aux_shapes, &out_shapes); + network_handles.push_back(initExecutor(network_symbols[0], network_param_maps[0], network_input_keys, network_input_shapes)); + } }; - #endif // CNNPREDICTOR_TORCS_AGENT_TORCSAGENT_ACTOR diff --git a/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/CNNTrainer_torcs_agent_torcsAgent_actor.py b/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/CNNTrainer_torcs_agent_torcsAgent_actor.py index c11dfc7e..62ea6ad0 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/CNNTrainer_torcs_agent_torcsAgent_actor.py +++ b/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/CNNTrainer_torcs_agent_torcsAgent_actor.py @@ -68,6 +68,7 @@ if __name__ == "__main__": 'state_dtype': 'float32', 'action_dtype': 'float32', 'rewards_dtype': 'float32' + }, 'strategy_params': { 'method':'ornstein_uhlenbeck', diff --git a/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/reinforcement_learning/CNNCreator_torcs_agent_network_torcsCritic.py b/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/reinforcement_learning/CNNCreator_torcs_agent_network_torcsCritic.py index 62e7853e..14449c6c 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/reinforcement_learning/CNNCreator_torcs_agent_network_torcsCritic.py +++ b/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/reinforcement_learning/CNNCreator_torcs_agent_network_torcsCritic.py @@ -2,6 +2,8 @@ import mxnet as mx import logging import os import shutil +import warnings +import inspect from CNNNet_torcs_agent_network_torcsCritic import Net_0 @@ -20,6 +22,10 @@ class CNNCreator_torcs_agent_network_torcsCritic: for i, network in self.networks.items(): lastEpoch = 0 param_file = None + if hasattr(network, 'episodic_sub_nets'): + num_episodic_sub_nets = len(network.episodic_sub_nets) + lastMemEpoch = [0]*num_episodic_sub_nets + mem_files = [None]*num_episodic_sub_nets try: os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest-0000.params") @@ -30,22 +36,77 @@ class CNNCreator_torcs_agent_network_torcsCritic: except OSError: pass + if hasattr(network, 'episodic_sub_nets'): + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(0) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(0) + "-symbol.json") + except OSError: + pass + + for j in range(len(network.episodic_sub_nets)): + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(j+1) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_sub_net_' + str(j+1) + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_query_net_' + str(j+1) + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_episodic_query_net_' + str(j+1) + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_loss' + "-0000.params") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + '_newest_loss' + "-symbol.json") + except OSError: + pass + try: + os.remove(self._model_dir_ + self._model_prefix_ + "_" + str(i) + "_newest_episodic_memory_sub_net_" + str(j + 1) + "-0000") + except OSError: + pass + if os.path.isdir(self._model_dir_): for file in os.listdir(self._model_dir_): - if ".params" in file and self._model_prefix_ + "_" + str(i) in file: - epochStr = file.replace(".params","").replace(self._model_prefix_ + "_" + str(i) + "-","") + if ".params" in file and self._model_prefix_ + "_" + str(i) in file and not "loss" in file: + epochStr = file.replace(".params", "").replace(self._model_prefix_ + "_" + str(i) + "-", "") epoch = int(epochStr) - if epoch > lastEpoch: + if epoch >= lastEpoch: lastEpoch = epoch param_file = file + elif hasattr(network, 'episodic_sub_nets') and self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_" in file: + relMemPathInfo = file.replace(self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_", "").split("-") + memSubNet = int(relMemPathInfo[0]) + memEpochStr = relMemPathInfo[1] + memEpoch = int(memEpochStr) + if memEpoch >= lastMemEpoch[memSubNet-1]: + lastMemEpoch[memSubNet-1] = memEpoch + mem_files[memSubNet-1] = file + if param_file is None: earliestLastEpoch = 0 else: logging.info("Loading checkpoint: " + param_file) network.load_parameters(self._model_dir_ + param_file) + if hasattr(network, 'episodic_sub_nets'): + for j, sub_net in enumerate(network.episodic_sub_nets): + if mem_files[j] != None: + logging.info("Loading Replay Memory: " + mem_files[j]) + mem_layer = [param for param in inspect.getmembers(sub_net, lambda x: not(inspect.isroutine(x))) if param[0].startswith("memory")][0][1] + mem_layer.load_memory(self._model_dir_ + mem_files[j]) - if earliestLastEpoch == None or lastEpoch < earliestLastEpoch: - earliestLastEpoch = lastEpoch + if earliestLastEpoch == None or lastEpoch + 1 < earliestLastEpoch: + earliestLastEpoch = lastEpoch + 1 return earliestLastEpoch @@ -56,27 +117,52 @@ class CNNCreator_torcs_agent_network_torcsCritic: for i, network in self.networks.items(): # param_file = self._model_prefix_ + "_" + str(i) + "_newest-0000.params" param_file = None + if hasattr(network, 'episodic_sub_nets'): + num_episodic_sub_nets = len(network.episodic_sub_nets) + lastMemEpoch = [0] * num_episodic_sub_nets + mem_files = [None] * num_episodic_sub_nets + if os.path.isdir(self._weights_dir_): lastEpoch = 0 for file in os.listdir(self._weights_dir_): - if ".params" in file and self._model_prefix_ + "_" + str(i) in file: + if ".params" in file and self._model_prefix_ + "_" + str(i) in file and not "loss" in file: epochStr = file.replace(".params","").replace(self._model_prefix_ + "_" + str(i) + "-","") epoch = int(epochStr) - if epoch > lastEpoch: + if epoch >= lastEpoch: lastEpoch = epoch param_file = file + elif hasattr(network, 'episodic_sub_nets') and self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_" in file: + relMemPathInfo = file.replace(self._model_prefix_ + "_" + str(i) + "_episodic_memory_sub_net_").split("-") + memSubNet = int(relMemPathInfo[0]) + memEpochStr = relMemPathInfo[1] + memEpoch = int(memEpochStr) + if memEpoch >= lastMemEpoch[memSubNet-1]: + lastMemEpoch[memSubNet-1] = memEpoch + mem_files[memSubNet-1] = file + logging.info("Loading pretrained weights: " + self._weights_dir_ + param_file) network.load_parameters(self._weights_dir_ + param_file, allow_missing=True, ignore_extra=True) + if hasattr(network, 'episodic_sub_nets'): + assert lastEpoch == lastMemEpoch + for j, sub_net in enumerate(network.episodic_sub_nets): + if mem_files[j] != None: + logging.info("Loading pretrained Replay Memory: " + mem_files[j]) + mem_layer = \ + [param for param in inspect.getmembers(sub_net, lambda x: not (inspect.isroutine(x))) if + param[0].startswith("memory")][0][1] + mem_layer.load_memory(self._model_dir_ + mem_files[j]) else: logging.info("No pretrained weights available at: " + self._weights_dir_ + param_file) def construct(self, context, data_mean=None, data_std=None): - self.networks[0] = Net_0(data_mean=data_mean, data_std=data_std) - self.networks[0].collect_params().initialize(self.weight_initializer, ctx=context) + self.networks[0] = Net_0(data_mean=data_mean, data_std=data_std, mx_context=context, prefix="") + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + self.networks[0].collect_params().initialize(self.weight_initializer, force_reinit=False, ctx=context) self.networks[0].hybridize() - self.networks[0](mx.nd.zeros((1, 29,), ctx=context), mx.nd.zeros((1, 3,), ctx=context)) + self.networks[0](mx.nd.zeros((1, 29,), ctx=context[0]), mx.nd.zeros((1, 3,), ctx=context[0])) if not os.path.exists(self._model_dir_): os.makedirs(self._model_dir_) diff --git a/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/reinforcement_learning/CNNNet_torcs_agent_network_torcsCritic.py b/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/reinforcement_learning/CNNNet_torcs_agent_network_torcsCritic.py index 506ddf9c..e79ac15d 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/reinforcement_learning/CNNNet_torcs_agent_network_torcsCritic.py +++ b/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/reinforcement_learning/CNNNet_torcs_agent_network_torcsCritic.py @@ -1,7 +1,10 @@ import mxnet as mx import numpy as np import math -from mxnet import gluon +import os +import abc +import warnings +from mxnet import gluon, nd class ZScoreNormalization(gluon.HybridBlock): @@ -86,9 +89,419 @@ class CustomGRU(gluon.HybridBlock): output, [state0] = self.gru(data, [F.swapaxes(state0, 0, 1)]) return output, F.swapaxes(state0, 0, 1) + +class DotProductSelfAttention(gluon.HybridBlock): + def __init__(self, + scale_factor, + num_heads, + dim_model, + dim_keys, + dim_values, + use_proj_bias, + use_mask, + **kwargs): + super(DotProductSelfAttention, self).__init__(**kwargs) + with self.name_scope(): + self.num_heads = num_heads + self.dim_model = dim_model + self.use_proj_bias = use_proj_bias + self.use_mask = use_mask + + if dim_keys == -1: + self.dim_keys = int(dim_model / self.num_heads) + else: + self.dim_keys = dim_keys + if dim_values == -1: + self.dim_values = int(dim_model / self.num_heads) + else: + self.dim_values = dim_values + + if scale_factor == -1: + self.scale_factor = math.sqrt(self.dim_keys) + else: + self.scale_factor = scale_factor + + self.proj_q = gluon.nn.Dense(self.num_heads*self.dim_keys, use_bias=self.use_proj_bias, flatten=False) + self.proj_k = gluon.nn.Dense(self.num_heads*self.dim_keys, use_bias=self.use_proj_bias, flatten=False) + self.proj_v = gluon.nn.Dense(self.num_heads*self.dim_values, use_bias=self.use_proj_bias, flatten=False) + self.proj_o = gluon.nn.Dense(self.dim_model, use_bias=self.use_proj_bias, flatten=False) + + def hybrid_forward(self, F, queries, keys, values, *args, **kwargs): + + queries = F.Reshape(queries, shape=(0, 0,-1)) + keys = F.Reshape(queries, shape=(0, 0, -1)) + values = F.Reshape(queries, shape=(0, 0, -1)) + + head_queries = self.proj_q(queries) + head_keys = self.proj_k(keys) + head_values = self.proj_v(values) + + head_queries = F.reshape(head_queries, shape=(0, 0, self.num_heads, -1)) + head_queries = F.transpose(head_queries, axes=(0,2,1,3)) + head_queries = F.reshape(head_queries, shape=(-1, 0, 0), reverse=True) + + head_keys = F.reshape(head_keys, shape=(0, 0, self.num_heads, -1)) + head_keys = F.transpose(head_keys, axes=(0,2,1,3)) + head_keys = F.reshape(head_keys, shape=(-1, 0, 0), reverse=True) + + score = F.batch_dot(head_queries, head_keys, transpose_b=True) + score = score * self.scale_factor + if self.use_mask: + mask = F.tile(mask, self.num_heads) + mask = F.repeat(mask, self.dim_model) + mask = F.reshape(mask, shape=(-1, self.dim_model)) + weights = F.softmax(score, mask, use_length=self.use_mask) + + head_values = F.reshape(head_values, shape=(0, 0, self.num_heads, -1)) + head_values = F.transpose(head_values, axes=(0,2,1,3)) + head_values = F.reshape(head_values, shape=(-1, 0, 0), reverse=True) + + ret = F.batch_dot(weights, head_values) + ret = F.reshape(ret, shape=(-1, self.num_heads, 0, 0), reverse=True) + ret = F.transpose(ret, axes=(0, 2, 1, 3)) + ret = F.reshape(ret, shape=(0, 0, -1)) + + ret = self.proj_o(ret) + + return ret + + +class EpisodicReplayMemoryInterface(gluon.HybridBlock): + __metaclass__ = abc.ABCMeta + + def __init__(self, use_replay, replay_interval, replay_batch_size, replay_steps, replay_gradient_steps, num_heads, **kwargs): + super(EpisodicReplayMemoryInterface, self).__init__(**kwargs) + + self.use_replay = use_replay + self.replay_interval = replay_interval + self.replay_batch_size = replay_batch_size + self.replay_steps = replay_steps + self.replay_gradient_steps = replay_gradient_steps + self.num_heads = num_heads + + @abc.abstractmethod + def store_samples(self, data, y, query_network, store_prob, mx_context): + pass + + @abc.abstractmethod + def sample_memory(self, batch_size, mx_context): + pass + + @abc.abstractmethod + def get_query_network(self, mx_context): + pass + + @abc.abstractmethod + def save_memory(self, path): + pass + + @abc.abstractmethod + def load_memory(self, path): + pass + +#Memory layer +class LargeMemory(gluon.HybridBlock): + def __init__(self, + sub_key_size, + query_size, + query_act, + dist_measure, + k, + num_heads, + values_dim, + **kwargs): + super(LargeMemory, self).__init__(**kwargs) + with self.name_scope(): + #Memory parameters + self.dist_measure = dist_measure + self.k = k + self.num_heads = num_heads + self.query_act = query_act + self.query_size = query_size + self.num_heads = num_heads + + #Batch norm sub-layer + self.batch_norm = gluon.nn.BatchNorm() + + #Memory sub-layer + self.sub_key_size = sub_key_size + sub_key_shape = (self.num_heads, self.sub_key_size, int(query_size[-1] / 2)) + + if values_dim == -1: + values_shape = (self.sub_key_size * self.sub_key_size, self.query_size[-1]) + else: + values_shape = (self.sub_key_size*self.sub_key_size, values_dim) + + self.sub_keys1 = self.params.get("sub_keys1", shape=sub_key_shape, differentiable=True) + self.sub_keys2 = self.params.get("sub_keys2", shape=sub_key_shape, differentiable=True) + self.values = self.params.get("values", shape=values_shape, differentiable=True) + self.label_memory = nd.array([]) + + self.get_query_network() + + def hybrid_forward(self, F, x, sub_keys1, sub_keys2, values): + x = self.batch_norm(x) + + x = F.reshape(x, shape=(0, -1)) + + q = self.query_network(x) + + q = F.reshape(q, shape=(0, self.num_heads, -1)) + + q_split = F.split(q, num_outputs=2, axis=-1) + + if self.dist_measure == "l2": + q_split_resh = F.reshape(q_split[0], shape=(0,0,1,-1)) + sub_keys1_resh = F.reshape(sub_keys1, shape=(1,0,0,-1), reverse=True) + q1_diff = F.broadcast_sub(q_split_resh, sub_keys1_resh) + q1_dist = F.norm(q1_diff, axis=-1) + q_split_resh = F.reshape(q_split[1], shape=(0,0,1,-1)) + sub_keys2_resh = F.reshape(sub_keys2, shape=(1,0,0,-1), reverse=True) + q2_diff = F.broadcast_sub(q_split_resh, sub_keys2_resh) + q2_dist = F.norm(q2_diff, axis=-1) + else: + q1 = F.split(q_split[0], num_outputs=self.num_heads, axis=1) + q2 = F.split(q_split[1], num_outputs=self.num_heads, axis=1) + sub_keys1_resh = F.split(sub_keys1, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + sub_keys2_resh = F.split(sub_keys2, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + if self.num_heads == 1: + q1 = [q1] + q2 = [q2] + sub_keys1_resh = [sub_keys1_resh ] + sub_keys2_resh = [sub_keys2_resh ] + + q1_dist = F.dot(q1[0], sub_keys1_resh[0], transpose_b=True) + q2_dist = F.dot(q2[0], sub_keys2_resh[0], transpose_b=True) + for h in range(1, self.num_heads): + q1_dist = F.concat(q1_dist, F.dot(q1[0], sub_keys1_resh[h], transpose_b=True), dim=1) + q2_dist = F.concat(q2_dist, F.dot(q2[0], sub_keys1_resh[h], transpose_b=True), dim=1) + + i1 = F.topk(q1_dist, k=self.k, ret_typ="indices") + i2 = F.topk(q2_dist, k=self.k, ret_typ="indices") + + # Calculate cross product for keys at indices I1 and I2 + + # def head_take(data, state): + # return [F.take(data[0], data[2]), F.take(data[1], data[3])], state, + # + # i1 = F.transpose(i1, axes=(1,0,2)) + # i2 = F.transpose(i2, axes=(1, 0, 2)) + # st = F.zeros(1) + # (k1, k2), _ = F.contrib.foreach(head_take, [sub_keys1, sub_keys2,i1,i2], st) + # k1 = F.reshape(k1, shape=(-1, 0, 0), reverse=True) + # k2 = F.reshape(k2, shape=(-1, 0, 0), reverse=True) + i1 = F.split(i1, num_outputs=self.num_heads, axis=1) + i2 = F.split(i2, num_outputs=self.num_heads, axis=1) + sub_keys1 = F.split(sub_keys1, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + sub_keys2 = F.split(sub_keys2, num_outputs=self.num_heads, axis=0, squeeze_axis=True) + if self.num_heads == 1: + i1 = [i1] + i2 = [i2] + sub_keys1 = [sub_keys1] + sub_keys2 = [sub_keys2] + + k1 = F.take(sub_keys1[0], i1[0]) + k2 = F.take(sub_keys2[0], i2[0]) + for h in range(1, self.num_heads): + k1 = F.concat(k1, F.take(sub_keys1[h], i1[h]), dim=1) + k2 = F.concat(k2, F.take(sub_keys2[h], i2[h]), dim=1) + + k1 = F.tile(k1, (1, 1, self.k, 1)) + k2 = F.repeat(k2, self.k, 2) + c_cart = F.concat(k1, k2, dim=3) + + q = F.reshape(q, shape=(-1,0), reverse=True) + q = F.reshape(q, shape=(0, 1, -1)) + c_cart = F.reshape(c_cart, shape=(-1, 0, 0), reverse=True) + if self.dist_measure == "l2": + k_diff = F.broadcast_sub(q, c_cart) + k_dist = F.norm(k_diff, axis=-1) + else: + k_dist = F.batch_dot(q, c_cart, transpose_b=True) #F.contrib.foreach(loop_batch_dot, [q, c_cart], init_states=state_batch_dist) + k_dist = F.reshape(k_dist, shape=(0, -1)) + + i = F.topk(k_dist, k=self.k, ret_typ="both") + + w = F.softmax(i[0]) + w = F.reshape(w, shape=(0,1,-1)) + vi = F.take(values, i[1]) + aggr_value = F.batch_dot(w, vi) #F.contrib.foreach(loop_batch_dot, [w, vi], init_states=state_batch_dist) + + ret = F.reshape(aggr_value, shape=(-1, self.num_heads, 0), reverse=True) + one_vec = F.ones((1, 1, self.num_heads)) + one_vec = F.broadcast_like(one_vec, ret, lhs_axes=0, rhs_axes=0) + ret = F.batch_dot(one_vec, ret) + ret = F.reshape(ret, shape=(-1, 0), reverse=True) + + return ret + + def get_query_network(self): + if hasattr(self, 'query_network'): + return self.query_network + else: + self.query_network = gluon.nn.HybridSequential() + for size in self.query_size: + if self.query_act == "linear": + self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, flatten=False)) + else: + self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, activation=self.query_act, flatten=False)) + return self.query_network + + +#EpisodicMemory layer +class EpisodicMemory(EpisodicReplayMemoryInterface): + def __init__(self, + replay_interval, + replay_batch_size, + replay_steps, + replay_gradient_steps, + store_prob, + max_stored_samples, + use_replay, + query_net_dir, + query_net_prefix, + query_net_num_inputs, + **kwargs): + super(EpisodicMemory, self).__init__(use_replay, replay_interval, replay_batch_size, replay_steps, replay_gradient_steps, 1, **kwargs) + with self.name_scope(): + #Replay parameters + self.store_prob = store_prob + self.max_stored_samples = max_stored_samples + + self.query_net_dir = query_net_dir + self.query_net_prefix = query_net_prefix + self.query_net_num_inputs = query_net_num_inputs + + #Memory + self.key_memory = nd.array([]) + self.value_memory = nd.array([]) + self.label_memory = nd.array([]) + + def hybrid_forward(self, F, *args): + #propagate the input as the rest is only used for replay + return [args, []] + + def store_samples(self, data, y, query_network, store_prob, context): + num_pus = len(data) + sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] + num_inputs = len(data[0][0]) + num_outputs = len(y) + mx_context = context[0] + + if len(self.key_memory) == 0: + self.key_memory = nd.empty(0, ctx=mx.cpu()) + self.value_memory = [] + self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) + + ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] + + max_inds = [nd.max(ind[i]) for i in range(num_pus)] + if any(max_inds): + to_store_values = [] + for i in range(num_inputs): + tmp_values = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_values, list): + tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) + else: + tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) + to_store_values.append(tmp_values) + + to_store_labels = [] + for i in range(num_outputs): + tmp_labels = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_labels, list): + tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) + else: + tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) + to_store_labels.append(tmp_labels) + + to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) + + if self.key_memory.shape[0] == 0: + self.key_memory = to_store_keys.as_in_context(mx.cpu()) + for i in range(num_inputs): + self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + for i in range(num_outputs): + self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) + elif self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: + num_to_store = to_store_keys.shape[0] + self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=1) + else: + self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + + def sample_memory(self, batch_size): + num_stored_samples = self.key_memory.shape[0] + if self.replay_batch_size == -1: + sample_ind = nd.random.randint(0, num_stored_samples, (self.replay_steps, batch_size), ctx=mx.cpu()) + else: + sample_ind = nd.random.randint(0, num_stored_samples, (self.replay_steps, self.replay_batch_size), ctx=mx.cpu()) + + num_outputs = len(self.label_memory) + + sample_labels = [[self.label_memory[i][ind] for i in range(num_outputs)] for ind in sample_ind] + sample_batches = [[[self.value_memory[j][ind] for j in range(len(self.value_memory))], sample_labels[i]] for i, ind in enumerate(sample_ind)] + + return sample_batches + + def get_query_network(self, context): + lastEpoch = 0 + for file in os.listdir(self.query_net_dir): + if self.query_net_prefix in file and ".json" in file: + symbolFile = file + + if self.query_net_prefix in file and ".param" in file: + epochStr = file.replace(".params", "").replace(self.query_net_prefix, "") + epoch = int(epochStr) + if epoch >= lastEpoch: + lastEpoch = epoch + weightFile = file + + inputNames = [] + if self.query_net_num_inputs == 1: + inputNames.append("data") + else: + for i in range(self.query_net_num_inputs): + inputNames.append("data" + str(i)) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + net = mx.gluon.nn.SymbolBlock.imports(self.query_net_dir + symbolFile, inputNames, self.query_net_dir + weightFile, ctx=context[0]) + net.hybridize() + return net + + def save_memory(self, path): + mem_arr = [("keys", self.key_memory)] + [("values_"+str(k),v) for (k,v) in enumerate(self.value_memory)] + [("labels_"+str(k),v) for (k,v) in enumerate(self.label_memory)] + mem_dict = {entry[0]:entry[1] for entry in mem_arr} + nd.save(path, mem_dict) + + def load_memory(self, path): + mem_dict = nd.load(path) + self.value_memory = [] + self.label_memory = [] + for key in sorted(mem_dict.keys()): + if key == "keys": + self.key_memory = mem_dict[key] + elif key.startswith("values_"): + self.value_memory.append(mem_dict[key]) + elif key.startswith("labels_"): + self.label_memory.append(mem_dict[key]) + + +#Stream 0 class Net_0(gluon.HybridBlock): - def __init__(self, data_mean=None, data_std=None, **kwargs): + def __init__(self, data_mean=None, data_std=None, mx_context=None, **kwargs): super(Net_0, self).__init__(**kwargs) with self.name_scope(): if data_mean: @@ -130,5 +543,5 @@ class Net_0(gluon.HybridBlock): fc5_ = self.fc5_(relu4_) qvalues_ = F.identity(fc5_) - return qvalues_ + return [[qvalues_]] diff --git a/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/reward/pylib/torcs_agent_network_reward_executor.cpp b/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/reward/pylib/torcs_agent_network_reward_executor.cpp index f162dd4e..799093ac 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/reward/pylib/torcs_agent_network_reward_executor.cpp +++ b/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/reward/pylib/torcs_agent_network_reward_executor.cpp @@ -14,4 +14,4 @@ torcs_agent_network_reward_output torcs_agent_network_reward_executor::execute(t output.reward = instance.reward; return output; -} \ No newline at end of file +} diff --git a/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/reward/pylib/torcs_agent_network_reward_executor.h b/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/reward/pylib/torcs_agent_network_reward_executor.h index 5e42cf3f..84b105fb 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/reward/pylib/torcs_agent_network_reward_executor.h +++ b/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/reward/pylib/torcs_agent_network_reward_executor.h @@ -19,4 +19,4 @@ public: void init(); torcs_agent_network_reward_output execute(torcs_agent_network_reward_input input); }; -#endif \ No newline at end of file +#endif diff --git a/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/reward/pylib/torcs_agent_network_reward_executor.i b/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/reward/pylib/torcs_agent_network_reward_executor.i index a593d023..a04ca846 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/reward/pylib/torcs_agent_network_reward_executor.i +++ b/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/reward/pylib/torcs_agent_network_reward_executor.i @@ -6,4 +6,4 @@ %} %include "armanpy/armanpy.i" -%include "torcs_agent_network_reward_executor.h" \ No newline at end of file +%include "torcs_agent_network_reward_executor.h" diff --git a/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/torcs_agent_torcsAgent_actor.h b/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/torcs_agent_torcsAgent_actor.h index 14cd12da..6cc68756 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/torcs_agent_torcsAgent_actor.h +++ b/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/torcs_agent_torcsAgent_actor.h @@ -19,8 +19,10 @@ commands=colvec(3); } void execute(){ vector state_ = CNNTranslator::translate(state); + vector commands_(3); + _predictor_0_.predict(state_, commands_); commands = CNNTranslator::translateToCol(commands_, std::vector {3}); diff --git a/src/test/resources/training_data/episodicMemorySimple/test.h5 b/src/test/resources/training_data/episodicMemorySimple/test.h5 new file mode 100644 index 0000000000000000000000000000000000000000..e7a7d697033b4c691f92dbf780f87c1261bd4dd6 GIT binary patch literal 90048 zcmeFaeV80|neSadNMuw)^MH_!7+`mcl2%NZEwtZ{to|`u*K`pj{g|$ z|F*L)_NzQYZns@-ZSy1h{QvGpQIKN{_HuRnVm}&kPyfH~|MzGh@HXyJ@zT%td*w6x zz3BNqZ{#QaUi$YwZ+r{CUrAjK0@M+B>Hd7bpwCN|_It^)K5t-z^L&N} zTgwq#(&x?Ex8nJGK@+*FKS3_=ZFpzJt6x>nJ@7JM_U-pJe5T^nJmdral!X;<`jNo! z-RP!VTjG510>}6ax6kQ{Z~u@&FYyt`@Xnm)1x;weyY!vuuj_w?8~+~i&oAI=e|#~n zI=ZRO?Ds~{xA74a3ORPY8adIGY@g^jGClyG`qVMQ zH+nDl*Ytb!f2nxOuBdo7?OE}LKHZ<`pmu9PH~e|O*ArE|O+U@`FIl@D#s|S&KCOVO z?Z>hG`#pbI0oP3NUHoVDp9?toPP_y9od%B=AwvXT;2PT-yWtDQ-;&uC??V??ywHcI z5A}PSB4p<;eJyPdKifWzSY zdHr7F4Y^O%$ACd+w_k%?d`d2ZvwW4|oeJOBaU2~S0j%<(!L9ufxIO|s>4WPd;z)QF zTnkQz?mBcg^^r;j)7&SAQ4H?-gNirq`02o7oa4Ga^7QyzVv+Cla=$lY5AqK38(rg1 zIBLjKo6+xWelNarbidd2n?f&J7C;l(I&+JUU(}p9x@Bd~Ti4?`Z?%`<2dDRY;~Y;N zIgD=5jsGvfhSSh5Jb3tztG|pMdYJ#CiuVWN&chZ%tAJHL6nxuX3|~ohhxSeK#VzE{ z8=;T?8(bNF-Fnp1%zdmM%k+3YBmB|eeDs;_n){T# zhqI7JaY5efT3Nt7zKT3Y-InfUhv)<77<*Nx@;SYLQywb6{Kd4*jz+f=-?Np9*LNJY z{_kAh1{Xo2MjTd^!-xm#6Lj1kF6^7W`o%N_Q~$4iZ}?q>{vu>kE@~lX!zYKHMVuc> zTpfgL@NRUs;%`>bAB=pH>yX{xypwXfSv)p;ev_;8J0KnmZd-cEWN_kqTW}_e;fsh{ z`9Szq{NYXT&Ue}7Mfkqc@2&m@Jns$Mef{3Df30|9FXLMI0s1j~=FfF%&D;YjUN?Di zl)9ySUAnT6w~X$(f7S2(As@S~$wl1qP{%aX-iN50xrLI!NBKDKrNHSP}T3N_yFNtdnj zpsN~GN#R9+}gE8u)T&kwcZp-YY` zPoby8pUcEmbAACQy~;OR=i~c5{oZxY;O}Q*OL)ERi-o*yKE3M%WF&4&%JuLfxr{&M zIO$^jK*gIvZk0V|E+gLPEA`#rYA@t8%J3<_$@JOArDHDzuJ!Wb^Q2^cly% zKln2LmCS8<$M0wSFitp`CyCs**@_(4? zEymUtaOyYatBx=2^X6WJ%#2Os$IAKPKNoO4547_fu@EofIC5{gHP=gNP5~!>kq_uk zJj|z-N)Gk+(WwQT&PnevpYh3DgP!A==nA;i^#aZvGs+&(d(kt#TRj>WCoXlaL7YjB z2;Q?X)^N^4;MB{k&cIvg<@h}@b? z@>~l)jq$_rZx(R!AN4x*W47i|x~}e0z*VuQd~%c+Nx_%!C;K<(L0a!A;9MPBjxSMj zOn#H2vws#J5kBD2uf~TL_@;jiJE4~_>i2rd&oy*8RwnNLlUO*ch$A;H1tNcJ^4>N zuX$V0!yR0|7d7pRWsQeah#y?n_*Mmi{%5 z9%9>#p9T)w7~S&D74J{XGs_lkT$Iq4WHY!HJPgeyH{*AHAAenWk05(D$=3#FbwTqv z0dgx}$mT!$9%Br~83w1mIJ!2c(K#;5{Gx5&=4*g~hGeKyFCL<=_lbd6{9annova^M zo>reSfXqgBGX1HRo4x-LoO~@_N=&x; z%?=iK1{d~I^Gn30Y>$2u%aLzE-ZvKdlTM@Q)F1UU=vedVI#!#8zE%`))|Y5bx(wdC zF3#h}rw7T-wdE*t{SbOn&Pe{m^~6qsPx|B=@nQT~{`=Et#oNG`ew11ot}5g*IK`#- z>Be56a@ACDK{Mfd`E3P%3uoi!uOi2&4@0*@9-mydnLOQkYXRrZN0k4b+jp{g<JR(9HS}*Ox-vM;^OTRyb&$e;551{> zG5r1>9-O?Pb)m;k!A~3<$uRRoPf)85Ebs}(Wb=e6wX<}b14k}tk%I=wivfP8{GuMT z_eSh7}p`c;AU-C$?}SNx`_Pd`g3?}u3L9Z zHrY5zb1jnJYC=E^DF!;nJ>G8E{QzPMooY_Udh;sB#EbDKzf1W&@MPZeTCXU%T|dw@ zA!D4F&-CiNr_(XbGc@rX!&m=B#e3)n_!4vm?08pu71C74NSbE8fH81@Dr6?{v=l@U`IJ00FUHTjd$m>&T4Sm z^5bCmfA*Kj_Ra2x9j8bicimX=UjGpBbSUF#*0PMQ z=}#~tU+nkh(X&+Xy||s53{K}YM=pOZW8bV6!k6a4<+~Gbs$=MN!+Q9^_Fcq)e8+n; zKK;`|j{1AK?w;JQI_J>cI1lihI{7+*{-%xKI*YtT@1eE0qp^$8ZIt`Hh4e@_Fo&(< zCB{GU2l4HpGlOfAmr{K2mhS?CABVuSevDk?!_*o7st2h*#2Ww0b6Zs304()y(}(bv zy$gJqUm>4*Iq}`O4q|l6*mC5WinnS-#d~xtms`9IB8&P|gIn~DJPyhqrGBkqEs>m* zOyLjM-q8Y1*Ve1p0a?-!KGDbYk@irG z{4OBiNd|h9b zzd#SWX#8k!d;BWT|871IeEC#lcjzX3_wlpjVB&@zabgTgE_eM@{OPy;l#_hubzPkI z5gxydZbG~We{)tbucKaL;L@JV-6yx#SN90|2#-4N?L z=yl{ud~AFCYF}zH{-?Mxd={6Z%v&x-k9UzXoAdg;2f(+M`ebmLXEiyb4|lFeV@-pr zUzYov{M6t@Zw)oNgl`*M`AGV;;ePK1^qp*>|D(TZk=qwd<9c{BIICOodDkX3?{9FL z-=)VA-8^xKm0j34Lq zt-5;ww+X+~wdxl&U#4{);l2Bd#JS_A(Jt(#V(g)FvPX+NuUM*m3YmI~IG=2sXX9k; zeK7unJH#)_Clb52=QNwdV7Y?+&M)9BUrEmR-T2m3g*f-^OM1In7 z;R~pJtDh!L$xAwB{0VM4W3WUzr5>d1PhF$=AA_@eHAYU;eoLMAYs3~h(U`&DCX@Gt zPolr0*;DxXLJnQu$M2X=jkB7Jp~h~@{kfN1>*Dj!{a-eM1Km8p_%25Gx?b^RaK>Nl zb&Sals(5Rd&ufrpJg$kaeleV`uQka7KVn~5o!lD&!#KzA>3qU?BK-;aYSSC~Uwq#` zm)+ouuGSpNb@r4|qt%=Cd=A*x7Wk@<&{vS7%IHkC);_ki8WvWGZ~{yGfs9a7I^q4ONG_=xY~Uny+mMel-;k38X zZlA38Zs$MEmFn6tVkQOG=yUm}^tENAu&?%UX7d1TdA)NO>oX@9b8Wg`L3e=uC3zV* zKTeM}omzEVj$b`ZO06>fGQV1e4*mri_-O-MMd!bG{M&mt`+F{gj_NeJj5m?57J-M@ zNZ`@%iFfIwOs#5M%XwhC*5$Tok&g_nMyyVM4Y4?fbHtovuVSD0Xl%J_0q6EjTBozP zP*3gBoow8rHCFNKf!pX>zFJFt>w6FN;4{Gd8C|P~b>L=FM~znNW5{w8K1lA6Jv1L~ z_?oO;1o)6--f{@B^hWdt9{pbV=E80^KF{hAIJDnn(JtWQ+R{LQFF^KCIq^dNu*asa zLi@0y)*4;YbGG)oGh?m$kOR8nuSuL3oPM{C;T)d}KLmf&{gL0dYpRn^vvuc};0LR3 zqgH@#`(g6(TK4B^jt<`*@6KzX(Y1UUDsRs~C*al^#MBLWKV@*+nm_dwQ|Qp(UTXeJ z|HgQU@kbZANp-SHe8{fV_qJ`^jyK_xE%Y~^Il2@--9D~(>vqE@e^}_xBNnV~iJ$YX zu6XmvMVXD37W}P$7yaJ7&^UoVbRB`Owg)}HJJ9ukg&$<+IA6Y|u&Ix|mFrbYOy0C@ z%Vb*4XLK6y)aeFw@x-SKd?7i?9n*T~t#K)`lQ&)cB{yILdY}+{HJIn_BBwsSH@-WC zz6t*}x~{wlzO3I?r=yDtILnh+Eu0E&e5vQ>In4vz@#P))u>tzE5Si4I)`)reL%gMs zSAMFxQ|kpD-RZaXCT9Eg`ELAKzOsJ2dIb6bujZ$cdkX$$GNwq;O$m87GJf3jZO+4E z^L^L{{DyC`{T8Ba<2i%7`2lJV{nu0Yj&?OxbXYAE+*S|BfJKu7|`$ZGR9L$UMH7Od&+xlZ8tZ5 z1MKhfS5ltAwz8k$OZhHY)dNZoI;ZiB@Sf!m^6x6}hEIMTkh^2x z*1^{qClOO^n%Nv_JHH4|bQ`(Wr@tl##_Byj4=i}ahvA!dJw5IV1#i*zTjvFX6F%MR zpnWe*@=Wr!-1o+Q4Q}K!xP{p8_U}}@eJ;jd(Pgt6_{G5RU2_fwr}bC$VcIv8fIFrK z5+2Q~MCj7sCbO^Xvoqs{CNHWgia)jjT^N8KSA8;HWX}p>m_yDlTrM<|e7aPA#&~L|4`6}i!n5Q?m;XTmj>CBHL zB~H zaoOVqU5&4`{^jNsTW^Z+S>sRpVmDD6S6{)n>;qii&OruiaGR+qBeyb#a0IdYU2OVU z=4@1J#S6y`&hpTdzP!(xi%qI$0@ufW5c*kz3$dZ)Mcs>|*hycGPamGkXK+L7xptVb z8a-wDU(iAR`sdIKGPwJ0YK(zu=w5ha|518B`F!ck`7y(n=@T3=Ff*A0(i)87Y3BQg zVaNY6{+EIa8>x@7`JtjSb81XUxXfu0DnJ@Sh^jJmkR# zulu(`F9~>(b_|90fop2czvXcN9t!xfLyrrw~F{i(3kZ;6ea13&g12m5-U%mDx z%%5Ic=x?(9tfHTyN9or1^TAQx4SDIA+&(Mo*U+QEY0juQpE*bRoYnuyye|27_2=QG zqaJ1Y8^T`DkY9WJX@9ndj=IsM;j=u{qzBVHV1sdY4IZ?f?)qP6KcsyrRbb4&4d0UK z#Mi#$lCgeo`F7+E?7ZpU=_TpmQ^sNj*J%%rx+`8ics}Q$LVulfC0F$fXfG#^0c-e_ zpF{efPTj@yzc*h|;7dQwHP}b%&+*InLvO0*(n}wWY2tv>e;kDk;1#? zGB;7bTABlI>rZPeuD>Zy)s~Qx?#{0_xUKFb!Ui63ni3mTbmQtpag^=zfcF5~mys(1 z-siVF?`{6&2rsXFfxHsNaYUpF@}GE%4V;UxClff5H_U3EsW) z8X7tIAoFwd8j2tMOFB#NKix}`a-H#~{e<%I;7u>K%lJZ#R( z1>iP(8E*1v3=Qo`Y`j00Q@=ySW?Np}hxj5tfX|0_)jbG>J!Y|QL-tfn^`uv7rTEof z_~Qc3>>EL+_HpdMr`Gm@^YI&e%;;`8l08df%$E{7>LtpH;Ss#LuDeypD_<^u0{s!M zOYVY)dAS^=-{tx;e5vH;`UJkjr{<8xmQc%L^!%;@UkonUxPBC$I&q-dCencryO9eqQ?`b52vK_y3AK=tOJ9TC2-^ zxNVPY9032?@6p(Y^U@o17ja#}cf(ghXZ}^-nUi1NdoOx~p5{g6V+NBtKWkmCUl=|cr?`1nHj|I7SXkf_&)$j5 zvCpRuCLVOpy6zkIs1u39-(>q}Z@+;WAs;19jc)QN`HXrczsvN3jtq@Nm)T|JC(+|#%8{D?$9W}o$pEkJc{=KF69(CPs&(kUA z$TwPi8k~Dy^kniFoZC0W`;m(d!55}sKk{>WTA}BK-$1Tw;C)Z{hL&t$zvJx+x|uFG zKSWN`e3XsHGXE+U__92Qo@P|RhYf~q#i!7V@Rsi>@F@m{s4FWONA;Y=^}y)b5&DD7 z_HBRGJ`e5n?HpGqPTg@ui9D2oBZZD=)pmgHe}JnU*QOWU$CBOW#`Ug$Dz~(bFZdIV zW$+boyh?A{BG=c^Q-$)PPtYZojsA){e+4*B<2X3$FGr^N z==-Au+;sUEy3<^8*9p-4Px7#G3%Is54=i1K=Q}+o{$O<1e1?3&IoV0~KQ-u&8f#e} zxSc-W{(`RS_vX)9d+~OGKL?-rbEZeeMae5!cVSIkdvFrR@3)l?I_D=>{}vr!gA{$# z(CuM&nZ9d|>M?Sj!8M63^}CCE zD&C6wh`rzC^b>dpUtQpH?V((;t^HRo6)xLH3!aJoiSN>#{yOa|oXU~P_gT%xXFA6p zChzne=~tei|H3~9={;lQP>*2phWu{+3>%}uQ@&DcSKpTVS2UYA#%FZ@WsLtAoNUy54>aBmJl`A0vFe-ng>neF z&fp~Hc~|8*VC^}~E2__?)=Jh5j$f7D1V0}G7qMn*-?HD(nYmvZKIvS0h%~pQHF5Ps z9{6hLNqwrpNiSN@tifZQxd{1cN}QB|JC9z@;Cznz$R&A7^d2#v`o8HQuHB#O+I3+) z^S{!|CV0;5Ex^zI|{QL?$gWwOs=E(%`m{1p1(dnDc9H{n3RUk3Mu z{95xD)tun%_%rq~e7bI&o}-2BlPO$}96sYw&HFC6rr=NbROeUrp&#U|_U1Xp_6{39 z*-hhL+c&EoP~SuFrrPRcfd}GLbHWCv+-LbkJZ!%vuLsTP z90#YtDSsJU`D?i^si)Okl?x|%moL*?Bgg3_Y`@u6tB}j#&!>O0I$b^`$6dx(b>IBz z`yD;wYvF78jm_QIl&;HrIB@h`)z{?utqs4}E+$7+@c>T6Yk)2lKTD~>KDN=^iorD( z@ysN2F?27-;d|CW)Ms*#Vp#dc;4IDs6a5&y2tVJ`IFCPrvpz!OZrd9)=U#L|ZFKcl zqTVZix?>6(gWbuxtl_i!I!fcvW6o|6yUt`B9l_%n^wb6?d&r+W{#xY5k?-dGwO*gw z(BOs{k7?{OANmQp&E|H{QJq>Q90oV`3w-lwbU=P71^wQuiP6m8@fFwawWmX4Jsq3u z8p9Xtg`UI{a8EJDi13*#pYq#B7K2l+mCY(^nbZCw@bnJSb3)CI96!zWdmqJh)R&Z= zr|<3H#3lj$$!{~+^_}(TuEzEW90Dl^{5c*$b+~w|b3ok(*!^p)s?Z@uSF@~>vORo3c<*a)V zZw>nM@v~T`BwkARu)!%-q)+X6DZO>;S}6M&T-L{S#Cc4fR{p#2hlL#KHzvE!)nqb! z(M9C53-}`k>eJ>dOn!sYbG>X&ka$(j)H=NM=n>Nfr+J~;ZtdgUZGQzuGHfJY8eB?V z(Kz4chw3LG19-H4?&G%x7Y;+~n~VvGH|+PPh>Hv`$d}5IDf8z0S1@UW`wB^+xb3kDAl_J!An^@^s=;O^VTl_F-#2 zr{|G8Cm5XT&$4M6(uc$6ytlF5@k~M24I-y_)SvF1i@|Slc;p;^3$^<#@Jv3h zf=_u)bCqr$vVP-n*4j2cgzd0}^xTa<`0ps>oou~AbUUAy)=i$(@pX?ft~nUJ>`R~V zd2B;oOwTUx%~Nc{W4aD|;!o0r&9AxZN1N8u?$kMbHrMf8`pE1!r&{(LMz}9n-vF2#j=O&p;E8 zu3iGhLfWqq3;qlEz?X7-RpRo{D+_$Wqy3Y*wzf05U>G?^iCyVVe6;0lZUGm%$@b!) z6Fn1GzOtEhX7%R=XYnT9bT3P1{xvw|y|E_xZIACo-}ZKkXMDGG}2cU_>yN>t2_kWu;nuRIN?+GE%d^-t~a`FKa@RVl$<4g!*{{Aqi4x0 zTy7txXWpgoCLI~RlpHF*n{@?q+FF0WceLMMKC}}x%HXp39PqDr1!Kp3b03f#vTcJp zYH+RFIfu{BWqeY)HJ2ytLyzzx-!-^QfBd$6s?(_ry05N8yeH2WaDz7xm&8MMPXqC& z`jE8wP>dPA_4uClLrV9(j}Uk7DPr8+*QYw&#QyTRV?Jxpb+v^H+ji;jdAe;&^tSD*fqoHtu{QSOioiU*_X-H-0^ zGu40^IY@R*Sl4Uq$UQ^n6m;FXuYI5o)91)nv|nrO^a9Q&E+yYp)XLUD=$Aa7z_aFy z-1&RSp}FRVv7i6PoKN|qU^S0t{GCDnBmGbIj1KW+aMm}e&xtQ;+v;Fqj(8Zkx4@^E zQLay+qdjUG%Y_fZ8~)M#)dEg)K4#&ZxW18EnGe2i=RcdNXjLm%ybg`{`I)-Q2QhXbn z=GC>vs(Up2kHgn@>D9;|{+k%f$-mn1VR2-89V$=3JNUKdA%zEn%btr5-)_Fz>Y6q_ zH~(p_PIl8AzU-vl&hTyPeize?!MXSj{02Gb5OCqo4f5gux|$4bHG0iO`tlCA%eCiJhlSm{kNW_)1BEFX}24PTQSB^=owb@@E}&~lQl zH8|b3pnFNgOJh3x;U}-ZjJX?PM*9&BZv3}}4t(;U{BzM&$axyQ0`mX{m*N`%e6-MS z?I`$b`)TKW4j#EWfS&ZLy;#~KuD)sd{rK%!1>MQ|ape*7Go#yCk0QnQ$vJ-M$l$u+ zHTpiZ=J7`_EqP<~q5i5w&NDd6lP)dkQuER~4HxnXcY0l}>y~OCFxr!>wF%t|VEEki zU&UhmIL`4|dk1mk*du#}1M*3~;S%PInV(9a(Yg?Qp-aQ3`(5hFk;gIyCQs-YP4yG< zV;gQM=tjUj@jY@HFd=$g^7Tq4bN0N3*A#Huk~cn=eX!pt;4B{mJEHC>Mxdnct>nizC zGHm!vfzRSR#ipb1s@y19pCzta0|lI(n-m{JKTyICU&*``wV;M96FO|?$?=!_O2hr0WQax+P9oL$??%I*$(6dHDe9+|3Si8h;n?J8F;~c!E#HhR8C_1V;nqN!# zu6m?=p}hdgZHCX}U3we)C_YOqIS{>&X98pk84DQPgr4EaVm6DBOVB&lbi(Oe`ZV)k zC*c=dqc{rb`3+8WOY#Lj;#&1U^m7Tnb)BAdWpLWJG}&IM?0!tgPu==a`U7K;JGiH4 zBXdfO*M?>n_)O26h;{L$_-wrnxzV3!8(o9T_QSQ|{<;!>cq_h-z70-#)^jklPNiN; zIu5Y&+7XU9{29JT*G}Oa*Xg`}LT*39r@mG=Hk0Qy-qtfq^=yW87Ww^=LeH+esx7+r zcr7(Rd0Nj&G<==w_TpFmSfh4H{%ygvlwT$HgV=3BfiIzkg?Hxs*T}yfy3@W=`IvAU zK7Fqb7IK!SRr!vp~w;Zo%NS5-ng1_#cpabCCeS*Sm zK5O`fZ^I|3d3u(N@|ulnyxYJ%iQTj(CWfBQDHiJQ>Gv+VrSL1=Q#9+Bj1!6X@@w&h zzY%MU&1=|Z{b2>1>@U7$GsSUd&NTiO&cgl=VgEz$f7PQc&^#U9xW1b>H8?#dutp!@ zb&P46zXbNx1>E{e@GJB(_RNcEmX;Q9o^lmy-g^H>4gS;Za(!kx!5@vT;(0l>A!*B^ zIiV&xNp}MNcn8k0k)C0rXW8hUVeyg>_l7Toj`l{U$d`)8tARs@CH!7<#|F2pd43yH z8Jw+?RIqcWjDusFp6u-Fv;4}Og28?N8gvHV3#o_F z*)r~jkl%<-eb+(`%cs(V_Q9zCmoA?@3S5p}bluL3-@|~qH7)l|-Lvk2U+dIHSI^C} zzEJqm*E8QmoJf{z?^j!1izCs^_QfvcJi4~pZurEXjaRL%nvadhhHZQ`YMGuDIse1> z%r3dFi06Pj>DHMtex>`zrBj^`gWT5o?lEREeEMy=DUpK`erudnLf#?#(%>wg>U}5= z&tuMnI;{1YbH4x#@nUd>%gq6j-L5mZfqUSO+_a2wQ+fb7ZcCqc^e{TcC&h!oxp`Oi zRSmZ_olKUtUS4XytFcq3e@0)&XIEFeo1Q7;ZBj=&W$0l%I=+g0b7moLKwi<>MD+)` zEk=QnKSi?lzZCd14m1w>eZP`D4o-e~srgy5N9cXzj@+(8SHK58l)jyw z_0WPpcl|e@&V@a|KcDj_TbZ6M-YhmW7m&y&q2tEU+S_tDrez*UVj^3mOeAB5Pv`CHzn@hJ1Lv#|wZck!D&dx?5&_{$VT=HhmDBXgNR6pB&vcY*L5JSY0o=c>Cso{~p;5({ase>;f z*UN62OOfo_U$2_&L09)~7(O>|M8HS#3qH$lBlnLMaHV6>^B&CmP*a-kLXYqnkYfVy zdHA8>YfRzT`TQP6K4fT83(9{ge9+(~%R^Zn7e2qBtMN?pF>sv1G4h}2%3k7CbBcy9 zrr#gEf|%ua{X~wVlQO(C&&cI5II~;rLwOGyqhIT>+<2R8pMq$1`m>H@JrjLy+KpY& z_b@qMeCV9feX00T_||&|8tB?@=anjTQ@m-6Z}?0OXFlRf95jytILKCf8uo$Ie8QQs_&Xlsh*mT9X#U3=w^H9;qigbRkE6?`?^!$ zAMWT6LhM*tPA_&(9zU(s=y`-St99_^^btPs*TCmGYt1lYe)TIx*WIHRQv24@YuraJ zwCfB`b0T`bNCJ=Y&2$R9LoWeW$T5SH&U9a8J0Ud1xN3LyLNuGaMPFp_Sn6@?D^~^imMXY9fF!#!J z5<{~yj*%=)p_mxP@oHt0RZL*U2xE?}+>ISkKl1kzv;rh5pvA$P}!PQ^a@BQ*C*!HlkX{tVj za>)m)L)FRe#6oj$g6 zURii-ty8{j`0V~h#qDHkTn0DUb0zhxu<#o4kmJvD8M}3VjNSxa9p$@dHPBy-9V5rS zHDr=b!z=kj|G_VTp8;G-Q+hEICcb!B8!{s{3w(chCbY`4w` zM$fI(yg~0BxtuA!ELaboCh(DO0*!s>*)`Ulw~G3J-^d3a_*CKd32-OiFM5BYY`1za zzb4_kcr|>J$7d~_h9;A^=Ep2j`&V`_{7fgmGbQcoXMdvo%(xSllf#j{~A8CZ)Y89 zsCQ#-(crqF8(z*HVR(}tXia5fKk{SW?)wWk%LjrPO5dIJE$fSy;z#5P zobgkJB4%x-_4Io_|W=r>4K~l{{DVJJ5fC7(q_ybH0x|f^MBL z)_T@Rdg(=D;A*EKH-0yVv3zu70jKA1U4o4zpXPMs5AMBGCP#wbW-!Xd7XS-iSzpI> z;?dwt4n1?<=B$)+wf9kdoWVu#A|DrTlA}h9Esn?q$K*N2;IyxP1vcosPeifN^>l$R z^E+^kU(PY~HpbkMj+MIexHEh<580f}Sc>(8P&q6dB(T$OoU*}e8&#z}7khog7(JO<~!x4`P8-Xl5tv$^gK&ibvc|KM79%jO!O zx8?z2pttaY2s>zuqIcqI{3}~%U##Z!74wEq&t(Kji=nCCBFF6m405WpI*7yfrJpLt8S{@Oxd?qUSR>-G{6mxP_hK<5@dIKSQ&rPdgFs zjvT_TIRnLvWKvJ7{kFZorhbw`4WG-O_R;FT%SH3R`!ewDSimL2m-Zub$OA&bRRACbYy$HZ~&`49@hSTHRpGRz56;t5G|o3xk`;2mgt@%)7Ok z?1P;~oKxqdFM~_ZMb`EF!FR^j-_c#E=uHeRdTY+Fo9~ebnD&Jc4~xhXef9;7ktdXLm*x^#U5VDhC*xbk!u6X>XZwzj{)= zhji0iatFsJ;K)^)r`Niw`KIcUWY9g-2A6hcR(;0DicDzqjBg@G;EX$#stuei}sZ{&8|L)~E881f1 z0XKkOT?I|Xg~R)C44ejM{fl(4<(xdP*gY4g|GMB${f57V9tm46zLo2VvBe94=Tp6> z;VV&RYQ%bsjV|Z-{9ooh$Atdda%g?4ybM{Tzn@Se7~9BKR#VqAJ9W%QNI#k%j z;;o!N4B6WC(fDW~lVa20B4pCszt(-E8;`X_=|eeA*BV^?)ECFa81n@6&#;H+&cbfb z&dl|sXR^lFL%777*2e?<+VENYcGmv{SUnHP;Ii>AJPxcu-`G{-YK?>CFT!hZ5&UYN zRQpa^*iXG=fIaja%G7}i@e|R|wbrW%)}Bda_>8|k`j(JhIQbPlAT;&5yw};^Pj)}7Jv%GH*Nm?5m+k3;o_tRJC>zQb4NmVr zQcUW7O8)nYH4C$g!AT#QBhuct`a!vVHGWggNx(k=NA9-VzK%IS^Uo|7IPy-`y9|Dj z^_-oFPsbh!u_fAi-(zhFI)s+yd^~(Q)2GEbFah>YF39bvYrHQI@5)DxUs-;s9h>9T zGsPdrcCp}>7IJt~@meP0x?1$JX<*JTJ91MD8~UHHO&Q}^CPZac2u+vnTlc4|+RxmJTy?=4-4ewI&!54)A` zgAYf4?zkto9Xe+)9>jODu`Ti&zQIeG&!g5(%wdrqJ@i=XBVRl9L;Knr#IDX&-<{h< z{-FDQno|lnLiq9VGu;Cx*aSJ07kqrC`*#Igi#o6M!#j<(p!lmXbXp)!K!!8tG*ju+xMV& z-RE=I;K&^JD0*HoQrM#kj%OJ&YR_F2IxFc5Zl-S+zuDNL?Wfw0b{D=Ye9B4DNAauo zBRBDuGXGbcZgNPzR;UwT{2I_7%?Xly>h&KJcA*tG)tWu=iwhaA{P~ zjleJ6`1n_HB6>ld^A4dFKEJgei0|QA#aqT)TyiFG*jDdMNdKAN@NRNEs`)bLx;>=s z*%o}u;ZO5B;zM>*9Ei2sl&pqrJ=0q2aT7B>Mj0>{0j{&n`G9&!t)`Fu2Lq4g6v7klWnxLJxV@zYH4_ z!<};t@^!-}{)@`COJe?P#k^aQ7`t9XweO${I>BVxet*?O7A6=P55 z_EH^>{yFy{gHs$$R?kfqgUj~S%h&GA{YlRrvhlL^JsR9g)r?Ec(QV@n`l>|E#%)MPF9GioYHUEV;tqZ2zen=cZ%VU-Ue#N3Q_~ zxTGJ=F}V4`@JWu5hjSSQzS8e$&sG3$!qt3bA+PKqT?t-$Q}7q-V|>T;IXy7eB3UBG24K{O*r`E1#i5^(Y1xUx>edAn!GWCxj2( ze;I>Yv1M=#`id5M9V8b>Uz@>g_E@s0@GIp%>0z?`tT`9Mr@cuLyhvW%Lsp%i$GM)d zJ@)wm?qk=1oAY{)tn@6NN&;Nq^F_dSw@G<_6|I$PqEu z)X!lq=NarmuM>W=fHOb!cSIj;8-(;ZwS98^7(RomQM(^!F5;I@=k|~drBj2``{@Ga zXBrC&n+o5B)O5iboUT#dultd+dt8rX&JG=9`PYdf&Btn9S?`F@bE?Mn%<1ZS_kEhd z6zc4u@J#*fMLwP1%=j%9P3C2cu7^DWYLwur(>dPuiScjHyB}KkRThuvUp}NY{e=7& zp&t)A8P0Zo5uJtSY@MIi{%3B-#q>HxSL66nkgsn)dS}HuDWW$Z&!^=7B|j|aYCP72 zrsmQVxALtz{#2XFANHQWN6*tDF6AS7zPog8YtSWp)!^#L`oLApP2pP!yz0G{l6mSa z#455FoSt{1^Xgd}(2|VhyP3lv*KORp;7{?OdaiN0p55GdE4?C;WQPg>;|6 zkWUkiX@9f39-tg%_57|A@K@-TkR<_M0&c_S!nrkR2EOX{_p$OC{QEo6JJI`x{C3yU z4WH($N^6K)Y*Z%Jmw3nmf2w=#bND8kXO$kcCnnnuZSjV`*>OGJOS#oUet#N0s%+kd zTYWt}5H;NP=t*zGXQDUc4yc6&r*)kbALRK&^vzi;J%~=w$DY@7?1jP)wAP}v5__(^ z_7P^iF0w4Rwy=l!i{3Yy5;vRQ3ts7o_^64#$pJr|y^Zu@b>M%33%^1gy|BQS9)NCF z<}!HDE2Fz?d;!i~zgoa)zDay*ysJDQzCz${{%vj-qpS7L+P8q=oW_>{vE&gKDfl#R zX>j5>xs`aEj;-)t`9lhR|4Y#4oWXUzA2#zK9`4qP1Mf;T|(MJ>yEc()(mpYeMv~@fduRW9xP<>|yyJxPkcvd|PX@dQR{F zHDvtB0#0@-?Lf{azLleE)DzM7e+Mso8r=|H8_b>7m+N1qg6Zk$0{(bokt?7)JrQf7Q-*^%{IQr{)in`N{9D5RX zz$NHWuyd(F>d#A9;oA?~(dex3ie}XTr zQIFW^&l(?zuH-I%C66)58UwGM!0r_r{!NOkLO$8V0B5kWScE@w+?y!g72Tnf(tGaMh#v zPVQ_?Bi|E40X>P&wZRZHId1sE!+75t{;i&G=r_zEQL|nA2B-BN>z~uv*o^s-Y#ac7 z>E7VhG6$>uokI!v?c3m;LtNa9?#V|Hwlg?eR}(GOb@`(1Ln{&E>U|7O_pxuGhin{? z^R4{|x@H5sddO^W=J)b9w+?DA>T^zfxOG3fhXQzwwX`1B`BER|`MiAJ z<@x|N+v^nr(_te=z9JnWn}dXe8QnSl@dEXTJk|BK0;=98Ll$#}!g z^SE}#Z1A?@XgTAz6dFAbKpTCuZsQYK?EMo4XZcDoJMeAdhnTFPOUWc23~pQc>&16d z2hQ?y%=OO_zv4mkeLl1NZ}pOELiC~>pqSHrjrCRd7j@8c@|t+C=V>fq-dA(0x;Jet z{*&pvjc??7>gU7Y*sVXyKk?z^$qZEA3xBk%l$#No;Gdsow&e42f6_(9axo#uKb@R2vhUQSNHccl-> zrTc8u54rP`?mlVl;nz5zi@5aRMfJh(NuNG+JpNRV0?xbR5QDSl4{6_*#ua*okM&U5 zy$5amZKl?Wudc5{gZQt15!eTFS@rA*!>4(MCDc&CSgcj(HP#)4@1j?OGe1~)ATlck zkUM=ha*!kMdZxgqbrjh_e_ECR!c18ZI0mCP_V7olm>e;^Pnf3gV2s(-xgLC(^huCEFGuW2% z;cWbG_d*UGtKyF_HcdZ=y!dkrd;q?+mltrZynXmoDxaG~UW;Gpb(DTC%Z;nB)yYmXEY_M#qx{q<3HHj=!%zh?}w-Jx6b2w|CV?#IOUdS zR#X4-eyWi*lY4d_~u(b{SpmyYHI^J&x<0 zUXQWf6MmWVCUFg;D zsrQ%8D!1mfL%LMnD;)xl@_+Ly{JGPvJ~gkwT;k90S%XW^iJl{({al$2=hBa07p;+u zy|sWVQ8zZNgD2p1uZZ@P=w8TXG5E~i?NOLto=a-XYnh0)&_H!2%8C3 z#|+Nrn4bMO3%lw$ydgYQ@$r}#a^XtE^g{GuH74lG`PUdy{2D&ZS*-mjen1Wqu0ed$ z$9CEL6aHXuOU}>pNmI5Y9yJb9-7S3regC|G^OnN%nc!4hz;~0JJMd7%1U*q<2ibdFk<G z_tH}Pte$GVsY-5>4;!57ulTohAI*gsyxwJMaNF!-rJtaHlWjDgJXELm9M5^RJp^ul zrMS`jkmRuUb(sv!eG7akvNzGg{4*=w((BPJwoW(};0NiH0?yq#lfc6=`kA3k)WR$9 z4X1C?GjX+Um>i7$q5bL+Y=vDEf8xdDU3^-e!?k9Upj-JuHXrkL-c`c5z~EGC_5K0b zqF#d^c#|*2?c8N>Zr{|z_oS!sh#2Ct=ZFH|bzda5zsY%Wg4tGlX1)H>fBKL#%X-9}3%zt7kjrPikdJSb(5L1u>Sq^ptshd%$Opt%`e%CC=ks_mIg~dd^pV9e zpVC>+mK@%Vx5=K{9ZV(WuHt%pe*zzL>g1ged!>9=ud3%+Ykx-Px^6ap#pa@^G24n& zdHz?gV)%@P`BJ?P+`qs!)Y}Gi%;3~NC*)7<;qPHg6Ys?Mm%1Syw6y^Y7{7HAWkh3B_wddqK#*>Cm$CR7(EH%ODx8gnXJ$%RDOpdx_ z!`EHAC}$a5MuXbZM3w|xjn~ryzOns%dO=s~U;%Y-gqjyaNAk~PydvLvidc2=+4FNk z;6}d(uWuucF3E8koQLi0orj(8ATN*K+w3x02HzVGl-y`=gI5=<0cSI@k46 z^s*1%`P05BjZK?9g}f;^)DxP0^^C05)FjEF`xuSCcro9B)m&AXnx=Z9J(9^Az^`*w zr#W8xJAEnf?hF1vV9=xV6U=nrIHnqs&3mJN&1d@9#p2rVSzc3&{G;c(1k_Kx3q@-O zW%`0(OK#(Yo~<@}Yr3ladd{8t;s&u2!;9>${bfd1_f;s5X>1`~to|Fmh+pZL>YTy3 z^;|fV_q8XZ=U31@tFT8Zzf;aT4Lry>i2bz=sMwVq4WIgpEmO!PyVHZ=N9voJRR8RK+X?3kPVZ(7!J$0Xx*0gd8ozblWA}4h+qRoxVhi(dk~QY~ z7XG5|+8f{G&)}@zNd6tX=udlz#D6v)4Q_*T?dImTiRV(a=_+TWry%d=&>J%7{T2~^ zB6(AO8@?(u8@^jI9a+x}xLzO-I7(ENVx*DIy?-F#X=fuX2o~1w8Y9akzawK#= z$shD?_`%k2wnunp=6AKPc-A!P;J3heGVn_ac^BUWU-SZcHcjcCHjmKQ1YU1XM-E+! zUbT;7fH(`DK#v!2fAa70p3LO6`jPd~r-PR`^5CzBF^+U&aHEfbkNmEC5ERFXA&s{b zpRKNf?&1~n58uz>Y`s~qAv*EDR=|nIDEybuulgG8Hgd-%}zT{q%8 zcVoNr8Lz#s@Oz(QLtiCN(q}cP!NQ-}0p3!_9w**e@t$H`I;NJ&@ANEO#f#!W&-XXF z=2unlSf3HlyW7~n;Pg!Un;3`bU4E(u(wRROUB4mEH3k8J=~?edjNwPU>zcFj7*gM0a3)9nT*e@KFlYT~&Y=_4^wiOd?k}h>CKpV@#?aEU z5Ng)}+m_w%1^7vPN$%s)iE3mMy$DyxwFZ~HbC_d-(VTdhbDF1<{mQ!)a5}H`rI=6Y zEa}Vj_S6REM(N`x@SVebYPS;W)aVUALwE1XZJb??&l=nS^-}hTujKssTlb&-Aq76I z|Eix}N3W)NnM}_|AoKn?kA_e9BmBzZ&?k0n9$h`X!L4~W{rI8anVsvg%sK5D$?`S2 zG&oz63)_0V`N5pu!T*9Tv!qp|yL}8R4kEpYPaN$K827%R|zg_AX5iE8hFQMjfMXNH}KrG)~C!Cwwj%VqK5l z(XqKrvi%V4e4z1>^4;=ZqUYQ3!M~u(qjJ3@M;H8+kLSHk-vG}5esT=CNIKu2Ic@To z>^VXA6FCjrizI*2-w1Ph>RSzN@U!rXzRo)p-tQsz;P)}Q=^3V;>`}l~8Q;txmyVI! zgFT@QOat0U+dhUb^E2Y_yjSIMqu*M4U-~1~3!xdI|L*np622;%O2@0AyO=+N zYrGpi)?nYAp$UA5O(aK)-@3oZ;LIK-=S}GIrd8Yr#P~GCKMXG154}LEq4?xe-!+e; zG47Gl&iPlL?tKCrnwNSFaPTVMF?_WIyP=2K^lA}&dB900np0XaT=-vO6nqPF zn?xt@*%5PwPtSi6U&$Z_RB?SJ%_a6SHN_%y!M{E_Y9*E~`- z55YCA{`6cb)17oIy(Bvkqrc&~N*eRF{nYB9?xAZ@OUJGv7lA9gr^uyyC;Ti0k24SZ z%wxnp{Jd{nflqX7tex(k`&DaYuBQ~dMz@SyRtNQ6GN)YI`Y87JkHT)*e%21%W_y-7 z=FaC>KB!I4b99W%TRwuFv2kX{w!ChgDN%RBVR*WLvC%j1zi9OJ5;E~jB@FQ#|5A0HS4H~CzCmb8<%%2=-S+-dT~9c zB0(oZ*Ag@6*?kUf{k^;!q?G&UWAo`D@F&TU(N?e3meZ|7j2E2o z99GCK$Y^(Pd7~P(ybDNIQcVznd0P?~^ z&p#12j{eMU@|!w7e+KKpEo2|U&(ud7KF#6O4`I%SS{6UU8V@Z1jr80z`Q;EUQToJHP$#v_@vEG8JzOJ`0jodzv%`Rm@0W)I0I~MaH@mLhy%Sh@y7e<6OYMz zMd>6$2L|`}vWoZY*?GMEqx*Iwr=EkYIXd-yXHYk5J0dIbB-(~=GP`L_D28XBdTel- zQ)wWx-rZFHZ~9lpR1vaC*G=RyIIZ!D=heTaPA$m!l>C|-(7h%GC%Up@_Wa_doOkm1 z_)iNsH&15vC+5||h(F~DH9522IAzbNGJ(1p*n7H5I-J-*jad?1_2QAOw3GXRi9vNsQ9@4(loGyJY0zriK= zriag|FVZ|+3T*Z~Q+gY2D1#I`%9BD(Q8=<=chU#C6f`+Da42KxTTPcHPV`vOEu zYjB&|c91OEj~F7K@z*5xG;YgzNp&6{lfDzKmA@IB^#_tw`@tor_T2hhlknZ(2En!X z`<$PHO`xMWI`N_F2RFH8VxIqlteLz5yn=Psg=Sxh?}BUe9O6m-$u&)MSt3t+j=Yj< z%l70y@`VSj?lJPq?#S>X{KbJYImGuS#wxZyNWXioFXWBDVe8WJ=>R>BVxt&3(x=I* zJ^Ff=o5uQ@7ix{>^u&*@GdS%9kI|{@r937)S#E##*8aErOZoF4G-i{3$Zs>7h}X*t z{>tb}$3;iqZO&EszCKXES>MvxEB4Xzx_YsCx#8>7w@0uow1s=Y6PPEiV>Yc}lzTZnNAtxD}&Ue0Jz^yq3Cp)Uo zz3*D;ExA@cQhNv=l3!}v+0H}4H|JQ6^V#)PZ2L3pfY0bzn!;gllikO>(|YnDeMjpD z1-^|(f{SAsQ)tW=;=`sBjnxgG>3QKA_}jvsSNbo_pS6%xIlo zCtho}<~ddKM;n=2N#tY5wTT!tIK`i>Uuy3~@008se>Fb9_3AGTPU9Zyzw|6V?G?~F z?`rft(xt%(pFOWY@9j~n+dC`WdgM{Vx>n*J?YQ2Aexo*jn$IwNG5zm88|lT-g`VN= zwf#@)i|UgMPI8UJ@UPtWCB}v5F(g)`mFqod@uzJre5849<-Q-3y`TIh63#1e%F_%!%Kyt=qT{UDJa`JMt+zzOI0ipJw$4 z9cDR+&n%x_h@bCL_>1J#9t5B3{mXOPO+{BhN1v$m717W-o_wH8p3)vl^-q3O(9NF7 zd{Pdxmg9YUBTKt}DYv?EXg{vx)4jSOvdSm?A-AjuWi%@==`Cjsb_Bb1yXoPzJi>|8h zsyrxO^-N#S(VyO@R3Z&K4fddDBhX|0bIUQGM#(n7@nK=yvv3o%PV*?EMgu(e~roUJvD0gS+-V zWR(2yv-~#rx&U9nPc$E1nzFV2GQUTL&9ljS`y(&cNhg{IH++ie%qI9#qCN+`H&Oy<}7>y{3ge?@^a7PZGD;DxK6R7oN9ZwRSy&!`ZIjviPR!k|FV=(-=o?&(xY0gf_4mSg`!zYe3@5S}T!cPnDQ18_qn5)cXLNT}wwh1e zqOl@)y66Kn5A5<*dw(IX>g}d)KvO(-=d|Uk^@E5vt~a_(df;AoZ+(($sgc^F>%og^ zS2o^j>qY&3^Yq+z?f&%g4$Rq{!g1s`eBxj7r^u@{T|Ea*`P}vx8=UazzI?4c*;<<9 z>bx)C;6nM^_u;XfuLhJm;N9@)Sy467aeT0kJ~Z=xXc(OEbzjM~#Dkt2K0Gz=59Bl1 zTv=P*h~IXPo#fGa^ZR}RzsRS(H-^vZzTh?QAw2r7{k*QfRJTL#@}Vi{3*BaW0I$gX zUNL6)ddN5F@45Ehz=dziFIwcT_>J()cY|AdIq%Atjc&2C>2K{bz%!1>Y~Geb`}-Fs z`LPCdFXNLwNPKFp)bN=c)n8yY=rpM{7yg=l>sVlx;g@+yQG3ia+7ednIc77Iei^m2ru9s?Oz}8*06J%ZOtx-9hACM-283 zvxbgtbWYc*RwsNnx_VEc{MYRvf~DB(V?0CbZBBfUKPT%qny2Q^+cA7*51Zdipy@uJ zvlC9w3>Iv9AI}H9H;*~Rp20;&=eAJ27T;xT8orh9H zQ>ejiKF!W!6XDXhwSFFc_7pUTDbMlK$>zJf{Ot%9M(Rb{Y z!^XRBP1lECAAKqpD8ETA>*YPpP4IV8t`F-8B5JApF~EK^_b>R1!4+cjSha^buY1kL zUjXj{=sW(L*>@H+&&CeSm)X7D<^SNA(;w7`#ZJF(aLt_Ss85a`q&{%!9TxGtr}H>> z6gr3hKz4TM4iK}Q_@qPGSnp*pdF?(R^>q<+f=5{=pGu5_??A?22B-Z8nI3+`wfM37 zema+~dge@4c+v5ReNwu{k)vl8G~m5CkmJjE!~b=S?in{)?^?*-;4I^Wyu?jtg|QF2)T&y($I6)l5P z|2$b9vi{THvTF{*-n)-&DhjNKljSGfcH z@MLf)yvhglj75)Ys+-Xz#}+?F+#FiyPjGrZN{D|*cY3#!dXN!ne2N|opZS5CpM|ee zUek;0xrV+!!Va=u<5p@;TF}+CdJcm<|6s+%*cW~~{Z#zv_#(#8!dX5Lzo#GT`4i7$ z=qvP>?1GLDA`aUz9_^FMlbpu=ri?R94yy~wJHnlLUgjt7(j$UQ)nC*ih?EKK-O@Y;zbPbLH!pOE-M+9AXgOw3g7k z0C;pFy(h$iWHC7Rxe$6^?hN`e_0bV93GfDIbtc>YN8c3u4*cMh9^82|y+1JOK`-=S zF*!lw?aAWO@Y%Xgw$6h*s(HZyg}fv9ui~Yp9tpqC{1IKH&=qat&%6ix<^hj zY94Gn%4IRQ2pq~~V;jLi{7NUnS-P0toHIDh>)Bj^?CBGif{O!u!J!*)%?9SVOY{ug zUm@1`Ext5n)ZQt>=dORKf4oZj*68sRPh~fLsiiggmDS&4zsP+0>vQuQSe={uhv5?q zJr^nFQ+sNLnV+8M{pe#1ZfHHVhx5{*eCj#ITe?QqFQ!Hs-1fwjY;N~oXZqu}^i!Hr zz>TjY?vYE!o9{$MVkAb7Ru2uI`Mu~ob`j%z`c2Wb`}zz{=lu_0FV$dhHZJS;R*_RR z*4l_q8=QNerq<$2o=x<;F}<(B)h^}vmEUA6@JudG=iUN?(|9^UCvN`J9umVl0SA9t zGnZ~c?5`TF`x%t~WAf7koO}nnHmPUYW2?PsDc4Fq%?}%##$*0ee&a71U;5xvZEHTl zZ^!?%MxvY{9*bfAl_5@Mq6`n{{`_bB1AeqNqm%9E;94 z(%u^BEM3MYIYa!bM(K0G?FGI$Dd+JG9oPJZ_?X35A?BEJveDIj?T3w)+~;IDnu z*ERHFHEe`k3@)X&2!fo>C99abBu~a~D%R%>&JW4C33!krg3l1Y(Ec0s5b2geFFvu{ zdvCvY&S$^_T=Q6H$#>z;^9#CW-+*}2c*^!v%MVmbjBa(`e7#EV8nyhbTv#Q?X?$aF z(!*xvOFigF*UK4)mpI?R9@QBIocT*ce;|EJ7Q07yY)PSKA3Py^3vV61N2F_h`|6qa zG`c3o2I_|5-KYNur4v2_{Mz8OH(j~M&8Mzx250YI?tIpvYOUlkIQe4d-%ZS~$uG3e zY69+!(8W&K{iq+s|B%=FJbVuG>EsRR!JfIanR=#vaS(q>kTsC)+VSzfYdiaB%Z{>6 zA0&hp0^ZROTV>z?4YYjdl?f6sOE!?cG&72K6qrWBctFfF5b=spn|$aYX`l^=R|KYo zfQJtu0|74|V#5SDLX@5m@CpGlh&AyL@K|&nJZi2uyIplVZQA%?|)yLSK}Y}>ptBPGP>w4{P)Y%V9Cch??C2104>UV`FM|8 ze++%D7h@Bc=do`ygRZnaGp@E9`neDE*fUtydkc7YpS+4Zp<}C*XXK*2AKintfI7QQ z-%h@ku$ilryLBNr%gOOubjE#WpTw>ViEF<)xahO#r;oPla^fjFXrF@HgB=*5Pv~aq zr}+%2OFpzm;mN&<1KK*rH^J?` zl6j?-mgel)_*1u~bryXi&+QM+^M#kLp+9gzYv)}P_51jSc0t_^?)aa!dYp5Jq^12) zFC3c&=lpD0+tSX`H&9Q?cpZJ4(ryP=^hq5YvR8|FfMxI#?4)N!Plywo`|Xdv6x)t| zPDpEOAN)QOdwvD9DqPmLBC`Xu51#onC5~gT(91ber!j7oUg6_T?bu_DwkJ6Kr?ma6 zf6pG;A8C2gPuuBSOO3nc4C>AO&`os8yq2;-Hs@c}=QsLn{r`IB|DE^LR-arWo{^qs z3YhNXxyXRFH0jRn0d4fwFy|{KVyHjG&Q&VN;bntcSrwjVQlqa_KbbJ$4TFL^CkG1;x>*vFP=N-InGD0 zv!>&J_ATZ)`SAZ{?bm7>&qNQK&!UUV$nqlSSM{UF-4y4XS91S zLpPicP!7)f)%gy+w7;kQXuH028oF>feao%$$pdl-Zr5+E3?^rvTIZD2J!#{D3%#_1 zcfpQ+{=4`_iL%=ukHF9;0DNl^MYQV?H7)oLfN7VjrN>&Xo))V)qhV}GS*4D z1N6=L@uL6qo1*)(t~`TN7VaM%J)uo=`Q5F27W=2#1NYaSWNcN-O?}aBhn{+T8#?8; zU#|O?K9#!hDEc0c=yA$vz+A-InT-3<7j1}Z?gxIb&2#99^Nc9Ve2KQ;IQq-rJ--S;)@YhKxYj>%dzKAQIr4#Nv= zzVh(t+Pi1AC0*zLVw?8RRxZjgxX_!vo%*;y(|+b6PM#ljJ$UESa>$~b|=t+3|>bo4gqUzkVvbkFqY$0;{# z(boHkkBtp3eH_~gc^W;C?^Lnb(lTyvwcYUSg`?Q+=8{$qZ~PtWN<+CcWls6JXVO+7 zo0;thd@a#m^c#~im@f*h)E9Iw zXU8)G^Ihzx1+FW^Z&uD{vnb7_# z*Wf1Cw{-l@-Ze|-&};(+-$r>5FSyxllqdWwmfbFNP5(LWd)a^TX4(&Is{cc~bR+4L zC+F%1bvttqn|7G;TH=3CzQ|sJ&#w&b^#@?E33a)tN6Oee`O7bbr#IuaY)RLACEc;M zQ@#i6Gh$7A48NXjFv9llhlk)id+%IiIzX4yWpC3sIph5YdR)p)UD@~oy8apFi+-!6 zf8-x~T&-6Jze0M-7DIX0-ot-3e5Tb=^{DifI{ouZ=0k6w9sLCIc@BDa z5qYK`SmXk)q33+3y!)TdW6E>)f~^crxp=mf-xYoGH?bGUQN25YtzNC#ndYvoy`Nmx z>eFIhF!XF=hUiR#j`|&(dcEs^QxBfcUW9ui*DfC!S7h9%ZkcZO#RO;nrOew}SFwW= z@>}azaN|Ehm!3q})7giAFZvwM-5Iy}du=+)5ADm>xxx``k^dnRdMRIZ$ue-CkNXeQ zmkD(~ICXLSWy*rS=GtlS$Gi1n#I5{;a~{ch_>KBuo+o|asVjr0Y{1hF_BZsjNn7+) zW{lnH{lKPQ&pXPmJo~Hu4ZDjpwaA&k- zW4N|%nKh84>-(+r^^Q@$H=og#Oa2>r?aE#*k)Lz%TgRaXtr2oDU-iDe-7iL7+;y9w z9=Lu{>vLl~bZ-MYr2qbFks)$xkasN?(>cOiIl0-J-3oC^)k`KV3wcns#?4ao99i;sQhYE%2RBm|CPCVXXs&51qA6%(Nq}!kmp3No?sYiFgt_L^ypQn~>_7FMm zKgYe9u>^I^KHL!54dH)An$q+Pj^Jk368;j_6)6AN%kc-NeLPNmTEC*l?X)l4vb9Y< z_Qf8z_qA^QD~cG6AxZp-6`p##mM6}@~PaNfg_!2_U;EVLU9xF7#yFPE&Z+P6`5 z-b30ipN^N3ZuYHfLz}zd@0wzlkNO-r`x#5;a$_s)YwV`3%#<&A*r2|Lo@W*t-*V1( z8@8ci^+vj#zHGh+Ip4}2GV0O*+vi#T4K{8HW_i@p+i5@Mw((EeU(&1h4=qzK)TvGM zw(JRvo<5p7PC6$agFdhpRoX(@R^>9Q%2)o~FXVqY?f)3IsDZ!xlV|+3o5_c`CG^U) zbT`z2wezvnZ*A9Cf}2t2{f6+OJ22OMC+4(&iuv)Y`m+@0IB3Yzu2;3Pm8NTYGvuOO z4Nkcfo++0zzn3}(?k5kg3~q+@ZH$qEn;xcrj_p~(Mu}H{b|aG^{V3obOvt#-`X7QU2=G@Q3Luegz(XqsKk-e=wK9e8;s5BpAjhvRzbCV8=apI!j=+j@GgZ#i#v;!W_5tX;EQ-9tEWCeTQ_ z{tuA4uMIpzekPRJ(!E&UdO14t-95btv`l;HiS73$<9D?7!9K{Xuj=_TAFd4_{}Oqj zkFq)DT#P>={4(NVkLKyV4L3Hvd7?|>h$$;UU_FWHT)K6!$CIu0{k=&atC^eEeFez7kP^o9U~`+nzoj;qN^9IYZTDZuax{6*Kw@pPlVn%FWWbG#f&=se9G?v@{aw^{^!vLdz|C_B3H_{LB`Gl7a6P1 z@9uE}Xm320dxgH|*RWg6N3Y$Rypi_CU3;9otYRaS|I*p``}|Mvi9HY-{ydMzI#Jsz z|MO<@ny$~$m0QjS%ow_^Q>BbLTiv-QnO(sh{|CzQw_ARi`?Pg&`1Dp^FQhLKoO4g^^(gCa^wZqCrwsr3 zcUJ~qdr^<`+#_YZj%`=g%MU`v)Q$brBl!uvo=nJ zd-`;byrI>D=2yX?54&kMH{R0vO(~bq%Xo90y{nEV3T^r?o~du1f^$8_^NgqDW0mp7 z`VY188I!(p49@>4^1oW&`3cqwXdhgU@>`qbRqCeL=pQm2(iiu;nbE({w%rjteQs@+ zatO{n-u5B)zlU_O7kI<@=goG%K_HL}vq_92$`_URg@oPUSzx+bsgd(NkMpWN2d^Belb^{?jb@PIsB4@|msU7bSv z=%3IZJEPw-7+gc2N&TJCwzxOR^K@3xmBUMI{R%F&XVJDb#5-_WuLoIQShPb&p=GYbY@nSo978C`zhsMo^P*R{but~$``$zkcMu@xOIIAf38U`ty2J&`^GjZTjK!24^tFmDY*1UjL#m*?T7stLISHkk{bL zJ-r!nI)r^3LTl3HL-|Z`JJw0MGv56Vjv?vV=4$ts9?%PC^oBbSwm;v_EK33Ab>f0GdUyOWTO4^-0RS)<60CJ}<5S-t;sN-X3ly6`5 zwYFuI_EWhBw{~afT-n+c{~OtS8)qGzJc=&PakK+z3ti_dZ6R3C4)^=J;8vfH%+<@! zL+eXzn&YQ}e;VyPHZHi-`(^q+GtygP-eC-1TiCw=W1HZzzQ1T++&fa*rRdeglo7f+ zeJJ#x6T4RON`EZ$>=%xa)9}gIofDMF>(Hq`Z`*`vB^UEIr9Z#)Ncj2hZMn(cCVVRY z(A#ED(Wxb9PhY}bnio>%=(7eFeYT7pFUspY4^_K=;22?Dxh?-Xazi#puch96<>2qzCCbTv53bb>r4v`fPWw)v3W?3cI8@7#Zft=&&vUXHE32YjKoANnKcSw>ZP z825rdgO|hPLH&bv!|k_2Q}i(Dx~Iqbs4bY>gEB^jb$toWv-#a`w|;u1f5`m^@kiMG z;MQJ`oSwp5h4uG`!9HVU@F@C%Gjx3OemySxSzXVveQ+FCmrv+<=G7Rx^XlJR8JyqH z?$36vmxn29x}@;Ulj<30QbCh>OP+^1{*%#kLh2*BR11-17p)0^s%NlS|$zB z*|5$N_K-c$#|BR;T;`F!(YopCX~^jYXkmX((BIhnd35V5_)(9|d(s_1JKu*Yzl+e7 zbDw}tVXFs~-H1JCuq~FI`g|#4ni;fedNbQ|%IM6WpkKN~dOrjIVD5f1^FQzE<>EQ9 z7u>{rGi%M`JG67CW9sCDa`FtK@Haz8hUiGCtCtfW+I}}T0=J8N1-FX7WvNabp>7W7 zXHTtj)T{cu&6J0Hn_tJm#&fQ+u18Ch=Y=0br|*YdydyG0N6n|UxVESDc>uPlu0cc( zO(Xox;J3Elo{wZ+&wdtj0Qc|nF@_h{fR6kz{b15feaip0gx+HR%DlQhHTnp&$V+g^ zue?ubA17VD9UqLo+~YEzQ}CC7CBIn*tMxShA64U&`6lz__hb%dFrd;Ix;5QeYw*&!5eaD(Ba@_l*KyZam(Sa-vhn75{ACWr+09-(LRdt zyA$hB&8KMvSNM56Y2BAO$Q@oZt2Py}NrGp7QxNozQbH zm){F&qvhTI%axBcZ58DlT-}cyQD*XPeJ}q7Ug7eejTy_T?+>A0AaAA>daExa9oqc? zx;K3SV;;(W>pSs-@3rUkxSi&6#Khi(o_@!uo62Q|dxUM>sXbD*i(?7r{@;5sd#&Eu zr#qwm-NYP#`LxaX=Pz=m9A$ZU9a&X+FD~D)_zlhabD;6?_WPbOVKslA{do1~ux*E0 zeU@kYNlUa(bKJL3R+lqR%Qp$u@0*Tci+&M4C_~4r!Nsn*HYSg2>^1SfQf%9&=k+-A ztIciDxe|wW^~rRfgwBQDm_EhroyR!PrgnPecfIO|f zRsA@)tM5mgW&e;o8@J4L;tsv750i&;+VwX7lW1~JWDOm8@I5%&e9Ql4+Bnx%mCfs4 zf=n#O-|camA8+F>e%CfcUnl4GxGC+jwsQ%(>gwj%JfGX9QP=ajzvezy^HBPq)+=;- z&L47@-mxpO8BgBcH=YnDxa^CxtX3aSKHy85!`~-fHBPZ#YWgLkUhoJ1 z0=q(;nBGQS?n1sL626v2l)BFN;?ycA|baXfJ_1yd2 z^xMYdCAie{%oi_F@077?0;Un%$=_@Jowxl)^&|5D%O6D_-UA)e>&iLw#?*l~I{*0_ zgj1iUd*F}y=sq-M9h~#i^|y+@!n~OCnH%`m?MK?x^#{NwdVb^cppA}w>Q9j+W5>|T z{J41=qx1fU%dN~?3`z6q*Y&tkrj{H2&izezS{DX9>;FwVn>pgEzK=cw{eI(&_dx@F zX}~qN_4)8ylPP|FADO+;WnKV2uD-rc_p|J~v>sRn=FjyId3BG6-+d(?2i^sSGPhjM zd;$NZ{%p?7c;(9A8u;AJw-u|D27i>i6d8Ve{TzRJ0jDEySn{Y`>d)s+m_;36WeDSo8qs_XN{=N0O zp{|FX^~3UC#(#ud7S9n2&U~qN4QYxs-Q(1$5%oh^1($lRoV{I_HJ;y8)%|Dv_R=Of zAAEOoQ62_gMgM=ftplO==AWY-V(d}>=lM?KDElCr^n-`+S)ciPGh+?&7<%$|)~8#$ z=eqO^ylL;cJASrJo&H6iwF~yUj^3x$iA{8EK%C%W$G=8|DP8J+e$0xxR3FdK&(m>r)@rv7_sM+Uh}F-@>1H^bD3QbqN_L zr}`Y0A$}8hTY6|)o_U`*4Xn-{~~ zkJC_2p|{h%0%h+1M5#Z))%|Ddf7UnL2RK0=gLA&hIV0zRXXIx;b9Cm}cFzA646deU zoha#nkDP+5&zD~;3;T}2S+0IhGa`?sE1fYs`2RS=hxE9VN6x9S9M=AUc<5+@&V-(A zl;bPE+i)M!?k|x~X!;H&;{a}xK*1M_Cm&5n7`g!1z_aXJLY6n+oBW6#AFZv*pF75G!V0Ph0zLKtK zxPM^t*~kpJm9f|pNplGudVG(I{M7pmbZ_}xt?hO1&FXiYircsX`+O8#g7zA9YjAN} zHc3}Hju&RyW9q;v^=Rv(#JRH7pWrf1p6d;DK>aM=W_+wqcc=NujJ4`^CiBi?XiP}^ z#NWar>8EUxuKUOiT*moLZzc_NWaCfTdSN?lT?#I8@tpc8vdkH6p20dHUU2!JsrS7W zxgnE-*kI3bSAIq2oxTi~(F1g0^8?7^L6igKbrSuW5XW*1y`k;yYHK5IcpG|oSu2<1 zFF19|vu~%7+Z}P+mOK{ytN2Sk4!oMaJ#{(XbS(b=v~p^w&+43gHErz#_cAtV_O)Iv z{)fmAn`Qo#pL=6w&)hfo)N57-EASKk+=E^E!sv?i#(A{ny^NQTzvU8K_;U=eohtw9 z#eC~<3A&YBW*gtkXJidZ5c|HB7pIs@mx6bSNvmTzfnR56tXC5%0Y@0qIFJt7m z@$)@ygYs~W_{iOum!h9tctm%#eaqCX@VB_fXb<+oe0#pNwk9~^Z#g)y~;$Wp(^ldili;rT;gEuc_&gPLX}5SJ8_j%o#=B)giy7 z+c>jNcX6D&c+OAwbDX>T&-w26a_CW)W~Axa5U%ylu)V=W-{nhvd*cV$m;Ooe4BoN| zZZGy#zg;haf9Sc#Z!i7X)ki{$c)=+@=aDDGbzjIcX=l{u4gAJce=>HX$Okz})3`(C zZi-##l*F(4+QfG?84`IT=c** z_fNd5Z;MR3mh;?>;_rWGn2$Y-lf|qu2a|k+zeAMQb^BKaKY46rZWE?pgZp~?{V@H@ z;G6?F!Pv<2_t(DI=T)7k`{PI7iS65i%xJ69o(?))ai2rz&ER2#POY!uNBHzM@`*j4 z;BQ`n%Q*?B$OWDF8sn+p>@N?YQTH_l*xX_lpdXz44tXy+as)m}YxbkG$;L%zg45qJ z_pI#YFW^tSCF=JKf5$|@IWO!wqT`t{H2gn9=f%ggg{u|4djE!YVR8N^^gL&=p7-{9 zB<16pSOX8iY2Q~*lFp^Aybdr1u#Rl}EIdBH=WqR$yfZdh`@hUnjM4i$qT8>*Zh#wC za*4id?j!Ce*)u}@mA3y`Gp)*FdJpCdXt%Waw;n*JsFMef#{`Tr3xCmP(}?Yoj&0?* zvM(9G$@}aFX|F#`KEO@E&ED7Nqqz!OOWoSKtk)}Tk^47|n{?&jEAQ{d5Xj5-vt9(B zJaq>*g)jf>#60?++|Jdkqqlo+fnL>a#hzyVWdnV%{EpBLmvzj}4n=;$Ut^5-12?Pkd;OIwgY)QnYI{>J*TGotYMkx4{`TwCyV0GHIrcF1D)JjcyL>lI zySI#uoIZ{=-*sZ*7#5uW`EpNaj47w}L+GD&UYUdO?7rZ9cm89IvKUi;)T>X@*P1-K z8v}K6cJ6a3jmMA|+V2_hjO)I|Rs2KGZ(uw#bU;~#-T<28i`#k~+)n!GXSMj? zCdkt=_iQ%hHbhT~ts-A*)RExw-Pjb&r5mg%pg%XTPBZ}H7;TO_AG`5}mevfJ`M<5k z`Rbida7een^ z@@AjoTIMq>TV=HR{;n=Ou9vg&RY#BhT3aWSndea2&zn{9lg4Q;XMPyIJu_zbRPsST zp#0Ay^(5&kKjm*9VHsH{!_jT{q0{3#^*H4^1S{QRvV5X8Lwx(hlyM%}4CZPIIy2D!=#rAob6-fxeY} z$k>?Vy-wFM@H;v4u(&1`T;0!6F18ukrJEl~S>L&rU$M{7sr!k}U#{Lr`OsepJ^7H% z+9z81SkA|(lj{EN@9J@4{J*-r_iDoo9}stn?CNrJd^y73ZzQDU-pupw%(-avC6>Oo zrx*E6&4cMbka8j3Cgn7JL0=xhNz4C$@tdpjuOP3qvoqo)AF(Zq<)5)~a67e^%6*+N z$fTm@{*s%19ozjT>;mQH_^$a?=D?3qK6mKl+%U#4o^~}lHT!!o)S<1)?#_6hQddn& zzRk~gPdgX0_j$cs4qVyReZ#N8PPxa^|Ftafeyqohp|R^p>? z>wg4!#*yQ8>zw=B4v$s_FZ(Upz3=SlMb6p)F{3}Dzx6t3!N-Ze>FF(wkEVwhPtb1~ zUDAFdmUPG14dvuH6wXykr~H2t{*(XQr@P5mcl;}0$%AX$)7|LTt)xf&S5Bd)&N|jU z`}c_ZZSYS!G`%-n=yzi8muPc_&{=;b`KaifNtkoee)E~K zv96@Q9eV1ib7sqrhfmro$4LGk(I#^avr6x6R~`3B%Y7z;@1&gX2!G_&Jtm=dd+Rf0 zUFrM#ayt!~9{M0Gxl^oHoj=ppTxd+F!eaLaD;)2*z7vtRbkF>;kh z!dH>c8s&O$oqS+pf>WmEV?^FIez}#edUzxKsP+HaF2dW#(n5u z=$*V4oqPa&yFIk)2aqHE^C>cV@3*#n&)^DArxWi{$nCM@=?UOyD_xIwED&5-?iq%wICa`b|E|Y1uV#<*Um!oqb4Gs3IWoA*S&+m{y0*!F|C{sBXXv3i zuAH=&!4-Q#J-GTX;m~srfOX^Y-=*BD^3A?e$LOBNdYnBWB~4^JRChC9q8mNV|4Nm5 zytk#@RC<3C?U?_&Zdtjnd2K+slK!O+<{P}zdj1MsY5!Z^<42>rJKXeczwTw6LDNFy zFEsS=-K$`#XYp_Pa504jeu?ih@$(cGTKeVIUw6Xt)ZeSTH#od=HF5Om7%m;3#>qX8 z*ZJ2t(5Ivu8b0e=dU+p&Ru^A?ukvjAvOGXOC!(3iJ$LYhI_?7@2xMtO?UGMPc=4p{c`IUj8CtG#Xod| z4Nm{!=2<@ zKMy0gTn)e6b-0*C-2A=68mGQ@O55=G&#^=+SJU%Y!te|>&lcG7BBn{4EFLeG@XIx)Rr$?vfOLBYSHe2%Vpz?4!6lK7PW5^UUE?N= z8mE7KC;gr1C#>K*BJq>9ce#9W*M8!HO_nv$%PRsohCW|5MWJ@N*2&Lth%fxriAA$8$~-0^)qnjo0?8>FJjM-lY|K#`n%u+`LOqcb@V08Gpmd z)%gXJI9+I=F#uDpJb5X2J@M1Mco+JezwY{-SaaTVOJj~H9_^2Q(mmzEFZ7J-mH2tK zz8$U$FMdty7rMchcfx$eJ;3cHG%x z&+pE!rkywhwX}6`g`RPB&!0FGCv?gaO#L2r=;TTCav8ppU!9KOUcr`^ru^=hR>Dg> zaN!|O{cD=RC%nA#<9qG_aT1e#Sp^42wI@#rVc4SHo|2mw3MNUhsM1=Vf@(4ZRwt zyT+7A{OF&s0q$J!J0*@7Zo|EN7B|mr*vdZ`pJK{|yM7Oi`dQq9!Fu~1Kj{?r4*sBf z?Cpe0uiQMm!uy;qe$ub`t?`C=>z6q3TYQR%A2H@>>0gHh<8#~PyLYapr8oEEz3>@- zaHr5BOuV?#*QU*h0AR#Cs)9!8h&u zJe>D}<5~Q8t?@~t_6xoaGp=|1Ak)&r&ub^zZHR%}QuI-YGm-pC!D*>DI4=6MsH{D%mc7DOmaX?ErPp`zOpL1-}hbN9W!+p-v(W~>r zd5T+n;us!U@yok3yz@{^)M4=p2J}2_{k+8&oUh%x{`&b| zE}p*EcQEl2SG(tN4QTw~YIdf(h^^`9F5$%W@;!8lN7KIR=Os4ynobGq!b3w`F!9T? z;H&V^Oq>B>UgB%NZSP(9qTd`}D0DwfMMp2*=e*#~rC&`?TJ<%LgJ*FY*!a_3&Gu(7 zb2^0Up1%XHX%<>F26wJ`9Q(?93Dl)&EK*YTtOKencP1poj5 literal 0 HcmV?d00001 diff --git a/src/test/resources/training_data/episodicMemorySimple/train.h5 b/src/test/resources/training_data/episodicMemorySimple/train.h5 new file mode 100644 index 0000000000000000000000000000000000000000..e7a7d697033b4c691f92dbf780f87c1261bd4dd6 GIT binary patch literal 90048 zcmeFaeV80|neSadNMuw)^MH_!7+`mcl2%NZEwtZ{to|`u*K`pj{g|$ z|F*L)_NzQYZns@-ZSy1h{QvGpQIKN{_HuRnVm}&kPyfH~|MzGh@HXyJ@zT%td*w6x zz3BNqZ{#QaUi$YwZ+r{CUrAjK0@M+B>Hd7bpwCN|_It^)K5t-z^L&N} zTgwq#(&x?Ex8nJGK@+*FKS3_=ZFpzJt6x>nJ@7JM_U-pJe5T^nJmdral!X;<`jNo! z-RP!VTjG510>}6ax6kQ{Z~u@&FYyt`@Xnm)1x;weyY!vuuj_w?8~+~i&oAI=e|#~n zI=ZRO?Ds~{xA74a3ORPY8adIGY@g^jGClyG`qVMQ zH+nDl*Ytb!f2nxOuBdo7?OE}LKHZ<`pmu9PH~e|O*ArE|O+U@`FIl@D#s|S&KCOVO z?Z>hG`#pbI0oP3NUHoVDp9?toPP_y9od%B=AwvXT;2PT-yWtDQ-;&uC??V??ywHcI z5A}PSB4p<;eJyPdKifWzSY zdHr7F4Y^O%$ACd+w_k%?d`d2ZvwW4|oeJOBaU2~S0j%<(!L9ufxIO|s>4WPd;z)QF zTnkQz?mBcg^^r;j)7&SAQ4H?-gNirq`02o7oa4Ga^7QyzVv+Cla=$lY5AqK38(rg1 zIBLjKo6+xWelNarbidd2n?f&J7C;l(I&+JUU(}p9x@Bd~Ti4?`Z?%`<2dDRY;~Y;N zIgD=5jsGvfhSSh5Jb3tztG|pMdYJ#CiuVWN&chZ%tAJHL6nxuX3|~ohhxSeK#VzE{ z8=;T?8(bNF-Fnp1%zdmM%k+3YBmB|eeDs;_n){T# zhqI7JaY5efT3Nt7zKT3Y-InfUhv)<77<*Nx@;SYLQywb6{Kd4*jz+f=-?Np9*LNJY z{_kAh1{Xo2MjTd^!-xm#6Lj1kF6^7W`o%N_Q~$4iZ}?q>{vu>kE@~lX!zYKHMVuc> zTpfgL@NRUs;%`>bAB=pH>yX{xypwXfSv)p;ev_;8J0KnmZd-cEWN_kqTW}_e;fsh{ z`9Szq{NYXT&Ue}7Mfkqc@2&m@Jns$Mef{3Df30|9FXLMI0s1j~=FfF%&D;YjUN?Di zl)9ySUAnT6w~X$(f7S2(As@S~$wl1qP{%aX-iN50xrLI!NBKDKrNHSP}T3N_yFNtdnj zpsN~GN#R9+}gE8u)T&kwcZp-YY` zPoby8pUcEmbAACQy~;OR=i~c5{oZxY;O}Q*OL)ERi-o*yKE3M%WF&4&%JuLfxr{&M zIO$^jK*gIvZk0V|E+gLPEA`#rYA@t8%J3<_$@JOArDHDzuJ!Wb^Q2^cly% zKln2LmCS8<$M0wSFitp`CyCs**@_(4? zEymUtaOyYatBx=2^X6WJ%#2Os$IAKPKNoO4547_fu@EofIC5{gHP=gNP5~!>kq_uk zJj|z-N)Gk+(WwQT&PnevpYh3DgP!A==nA;i^#aZvGs+&(d(kt#TRj>WCoXlaL7YjB z2;Q?X)^N^4;MB{k&cIvg<@h}@b? z@>~l)jq$_rZx(R!AN4x*W47i|x~}e0z*VuQd~%c+Nx_%!C;K<(L0a!A;9MPBjxSMj zOn#H2vws#J5kBD2uf~TL_@;jiJE4~_>i2rd&oy*8RwnNLlUO*ch$A;H1tNcJ^4>N zuX$V0!yR0|7d7pRWsQeah#y?n_*Mmi{%5 z9%9>#p9T)w7~S&D74J{XGs_lkT$Iq4WHY!HJPgeyH{*AHAAenWk05(D$=3#FbwTqv z0dgx}$mT!$9%Br~83w1mIJ!2c(K#;5{Gx5&=4*g~hGeKyFCL<=_lbd6{9annova^M zo>reSfXqgBGX1HRo4x-LoO~@_N=&x; z%?=iK1{d~I^Gn30Y>$2u%aLzE-ZvKdlTM@Q)F1UU=vedVI#!#8zE%`))|Y5bx(wdC zF3#h}rw7T-wdE*t{SbOn&Pe{m^~6qsPx|B=@nQT~{`=Et#oNG`ew11ot}5g*IK`#- z>Be56a@ACDK{Mfd`E3P%3uoi!uOi2&4@0*@9-mydnLOQkYXRrZN0k4b+jp{g<JR(9HS}*Ox-vM;^OTRyb&$e;551{> zG5r1>9-O?Pb)m;k!A~3<$uRRoPf)85Ebs}(Wb=e6wX<}b14k}tk%I=wivfP8{GuMT z_eSh7}p`c;AU-C$?}SNx`_Pd`g3?}u3L9Z zHrY5zb1jnJYC=E^DF!;nJ>G8E{QzPMooY_Udh;sB#EbDKzf1W&@MPZeTCXU%T|dw@ zA!D4F&-CiNr_(XbGc@rX!&m=B#e3)n_!4vm?08pu71C74NSbE8fH81@Dr6?{v=l@U`IJ00FUHTjd$m>&T4Sm z^5bCmfA*Kj_Ra2x9j8bicimX=UjGpBbSUF#*0PMQ z=}#~tU+nkh(X&+Xy||s53{K}YM=pOZW8bV6!k6a4<+~Gbs$=MN!+Q9^_Fcq)e8+n; zKK;`|j{1AK?w;JQI_J>cI1lihI{7+*{-%xKI*YtT@1eE0qp^$8ZIt`Hh4e@_Fo&(< zCB{GU2l4HpGlOfAmr{K2mhS?CABVuSevDk?!_*o7st2h*#2Ww0b6Zs304()y(}(bv zy$gJqUm>4*Iq}`O4q|l6*mC5WinnS-#d~xtms`9IB8&P|gIn~DJPyhqrGBkqEs>m* zOyLjM-q8Y1*Ve1p0a?-!KGDbYk@irG z{4OBiNd|h9b zzd#SWX#8k!d;BWT|871IeEC#lcjzX3_wlpjVB&@zabgTgE_eM@{OPy;l#_hubzPkI z5gxydZbG~We{)tbucKaL;L@JV-6yx#SN90|2#-4N?L z=yl{ud~AFCYF}zH{-?Mxd={6Z%v&x-k9UzXoAdg;2f(+M`ebmLXEiyb4|lFeV@-pr zUzYov{M6t@Zw)oNgl`*M`AGV;;ePK1^qp*>|D(TZk=qwd<9c{BIICOodDkX3?{9FL z-=)VA-8^xKm0j34Lq zt-5;ww+X+~wdxl&U#4{);l2Bd#JS_A(Jt(#V(g)FvPX+NuUM*m3YmI~IG=2sXX9k; zeK7unJH#)_Clb52=QNwdV7Y?+&M)9BUrEmR-T2m3g*f-^OM1In7 z;R~pJtDh!L$xAwB{0VM4W3WUzr5>d1PhF$=AA_@eHAYU;eoLMAYs3~h(U`&DCX@Gt zPolr0*;DxXLJnQu$M2X=jkB7Jp~h~@{kfN1>*Dj!{a-eM1Km8p_%25Gx?b^RaK>Nl zb&Sals(5Rd&ufrpJg$kaeleV`uQka7KVn~5o!lD&!#KzA>3qU?BK-;aYSSC~Uwq#` zm)+ouuGSpNb@r4|qt%=Cd=A*x7Wk@<&{vS7%IHkC);_ki8WvWGZ~{yGfs9a7I^q4ONG_=xY~Uny+mMel-;k38X zZlA38Zs$MEmFn6tVkQOG=yUm}^tENAu&?%UX7d1TdA)NO>oX@9b8Wg`L3e=uC3zV* zKTeM}omzEVj$b`ZO06>fGQV1e4*mri_-O-MMd!bG{M&mt`+F{gj_NeJj5m?57J-M@ zNZ`@%iFfIwOs#5M%XwhC*5$Tok&g_nMyyVM4Y4?fbHtovuVSD0Xl%J_0q6EjTBozP zP*3gBoow8rHCFNKf!pX>zFJFt>w6FN;4{Gd8C|P~b>L=FM~znNW5{w8K1lA6Jv1L~ z_?oO;1o)6--f{@B^hWdt9{pbV=E80^KF{hAIJDnn(JtWQ+R{LQFF^KCIq^dNu*asa zLi@0y)*4;YbGG)oGh?m$kOR8nuSuL3oPM{C;T)d}KLmf&{gL0dYpRn^vvuc};0LR3 zqgH@#`(g6(TK4B^jt<`*@6KzX(Y1UUDsRs~C*al^#MBLWKV@*+nm_dwQ|Qp(UTXeJ z|HgQU@kbZANp-SHe8{fV_qJ`^jyK_xE%Y~^Il2@--9D~(>vqE@e^}_xBNnV~iJ$YX zu6XmvMVXD37W}P$7yaJ7&^UoVbRB`Owg)}HJJ9ukg&$<+IA6Y|u&Ix|mFrbYOy0C@ z%Vb*4XLK6y)aeFw@x-SKd?7i?9n*T~t#K)`lQ&)cB{yILdY}+{HJIn_BBwsSH@-WC zz6t*}x~{wlzO3I?r=yDtILnh+Eu0E&e5vQ>In4vz@#P))u>tzE5Si4I)`)reL%gMs zSAMFxQ|kpD-RZaXCT9Eg`ELAKzOsJ2dIb6bujZ$cdkX$$GNwq;O$m87GJf3jZO+4E z^L^L{{DyC`{T8Ba<2i%7`2lJV{nu0Yj&?OxbXYAE+*S|BfJKu7|`$ZGR9L$UMH7Od&+xlZ8tZ5 z1MKhfS5ltAwz8k$OZhHY)dNZoI;ZiB@Sf!m^6x6}hEIMTkh^2x z*1^{qClOO^n%Nv_JHH4|bQ`(Wr@tl##_Byj4=i}ahvA!dJw5IV1#i*zTjvFX6F%MR zpnWe*@=Wr!-1o+Q4Q}K!xP{p8_U}}@eJ;jd(Pgt6_{G5RU2_fwr}bC$VcIv8fIFrK z5+2Q~MCj7sCbO^Xvoqs{CNHWgia)jjT^N8KSA8;HWX}p>m_yDlTrM<|e7aPA#&~L|4`6}i!n5Q?m;XTmj>CBHL zB~H zaoOVqU5&4`{^jNsTW^Z+S>sRpVmDD6S6{)n>;qii&OruiaGR+qBeyb#a0IdYU2OVU z=4@1J#S6y`&hpTdzP!(xi%qI$0@ufW5c*kz3$dZ)Mcs>|*hycGPamGkXK+L7xptVb z8a-wDU(iAR`sdIKGPwJ0YK(zu=w5ha|518B`F!ck`7y(n=@T3=Ff*A0(i)87Y3BQg zVaNY6{+EIa8>x@7`JtjSb81XUxXfu0DnJ@Sh^jJmkR# zulu(`F9~>(b_|90fop2czvXcN9t!xfLyrrw~F{i(3kZ;6ea13&g12m5-U%mDx z%%5Ic=x?(9tfHTyN9or1^TAQx4SDIA+&(Mo*U+QEY0juQpE*bRoYnuyye|27_2=QG zqaJ1Y8^T`DkY9WJX@9ndj=IsM;j=u{qzBVHV1sdY4IZ?f?)qP6KcsyrRbb4&4d0UK z#Mi#$lCgeo`F7+E?7ZpU=_TpmQ^sNj*J%%rx+`8ics}Q$LVulfC0F$fXfG#^0c-e_ zpF{efPTj@yzc*h|;7dQwHP}b%&+*InLvO0*(n}wWY2tv>e;kDk;1#? zGB;7bTABlI>rZPeuD>Zy)s~Qx?#{0_xUKFb!Ui63ni3mTbmQtpag^=zfcF5~mys(1 z-siVF?`{6&2rsXFfxHsNaYUpF@}GE%4V;UxClff5H_U3EsW) z8X7tIAoFwd8j2tMOFB#NKix}`a-H#~{e<%I;7u>K%lJZ#R( z1>iP(8E*1v3=Qo`Y`j00Q@=ySW?Np}hxj5tfX|0_)jbG>J!Y|QL-tfn^`uv7rTEof z_~Qc3>>EL+_HpdMr`Gm@^YI&e%;;`8l08df%$E{7>LtpH;Ss#LuDeypD_<^u0{s!M zOYVY)dAS^=-{tx;e5vH;`UJkjr{<8xmQc%L^!%;@UkonUxPBC$I&q-dCencryO9eqQ?`b52vK_y3AK=tOJ9TC2-^ zxNVPY9032?@6p(Y^U@o17ja#}cf(ghXZ}^-nUi1NdoOx~p5{g6V+NBtKWkmCUl=|cr?`1nHj|I7SXkf_&)$j5 zvCpRuCLVOpy6zkIs1u39-(>q}Z@+;WAs;19jc)QN`HXrczsvN3jtq@Nm)T|JC(+|#%8{D?$9W}o$pEkJc{=KF69(CPs&(kUA z$TwPi8k~Dy^kniFoZC0W`;m(d!55}sKk{>WTA}BK-$1Tw;C)Z{hL&t$zvJx+x|uFG zKSWN`e3XsHGXE+U__92Qo@P|RhYf~q#i!7V@Rsi>@F@m{s4FWONA;Y=^}y)b5&DD7 z_HBRGJ`e5n?HpGqPTg@ui9D2oBZZD=)pmgHe}JnU*QOWU$CBOW#`Ug$Dz~(bFZdIV zW$+boyh?A{BG=c^Q-$)PPtYZojsA){e+4*B<2X3$FGr^N z==-Au+;sUEy3<^8*9p-4Px7#G3%Is54=i1K=Q}+o{$O<1e1?3&IoV0~KQ-u&8f#e} zxSc-W{(`RS_vX)9d+~OGKL?-rbEZeeMae5!cVSIkdvFrR@3)l?I_D=>{}vr!gA{$# z(CuM&nZ9d|>M?Sj!8M63^}CCE zD&C6wh`rzC^b>dpUtQpH?V((;t^HRo6)xLH3!aJoiSN>#{yOa|oXU~P_gT%xXFA6p zChzne=~tei|H3~9={;lQP>*2phWu{+3>%}uQ@&DcSKpTVS2UYA#%FZ@WsLtAoNUy54>aBmJl`A0vFe-ng>neF z&fp~Hc~|8*VC^}~E2__?)=Jh5j$f7D1V0}G7qMn*-?HD(nYmvZKIvS0h%~pQHF5Ps z9{6hLNqwrpNiSN@tifZQxd{1cN}QB|JC9z@;Cznz$R&A7^d2#v`o8HQuHB#O+I3+) z^S{!|CV0;5Ex^zI|{QL?$gWwOs=E(%`m{1p1(dnDc9H{n3RUk3Mu z{95xD)tun%_%rq~e7bI&o}-2BlPO$}96sYw&HFC6rr=NbROeUrp&#U|_U1Xp_6{39 z*-hhL+c&EoP~SuFrrPRcfd}GLbHWCv+-LbkJZ!%vuLsTP z90#YtDSsJU`D?i^si)Okl?x|%moL*?Bgg3_Y`@u6tB}j#&!>O0I$b^`$6dx(b>IBz z`yD;wYvF78jm_QIl&;HrIB@h`)z{?utqs4}E+$7+@c>T6Yk)2lKTD~>KDN=^iorD( z@ysN2F?27-;d|CW)Ms*#Vp#dc;4IDs6a5&y2tVJ`IFCPrvpz!OZrd9)=U#L|ZFKcl zqTVZix?>6(gWbuxtl_i!I!fcvW6o|6yUt`B9l_%n^wb6?d&r+W{#xY5k?-dGwO*gw z(BOs{k7?{OANmQp&E|H{QJq>Q90oV`3w-lwbU=P71^wQuiP6m8@fFwawWmX4Jsq3u z8p9Xtg`UI{a8EJDi13*#pYq#B7K2l+mCY(^nbZCw@bnJSb3)CI96!zWdmqJh)R&Z= zr|<3H#3lj$$!{~+^_}(TuEzEW90Dl^{5c*$b+~w|b3ok(*!^p)s?Z@uSF@~>vORo3c<*a)V zZw>nM@v~T`BwkARu)!%-q)+X6DZO>;S}6M&T-L{S#Cc4fR{p#2hlL#KHzvE!)nqb! z(M9C53-}`k>eJ>dOn!sYbG>X&ka$(j)H=NM=n>Nfr+J~;ZtdgUZGQzuGHfJY8eB?V z(Kz4chw3LG19-H4?&G%x7Y;+~n~VvGH|+PPh>Hv`$d}5IDf8z0S1@UW`wB^+xb3kDAl_J!An^@^s=;O^VTl_F-#2 zr{|G8Cm5XT&$4M6(uc$6ytlF5@k~M24I-y_)SvF1i@|Slc;p;^3$^<#@Jv3h zf=_u)bCqr$vVP-n*4j2cgzd0}^xTa<`0ps>oou~AbUUAy)=i$(@pX?ft~nUJ>`R~V zd2B;oOwTUx%~Nc{W4aD|;!o0r&9AxZN1N8u?$kMbHrMf8`pE1!r&{(LMz}9n-vF2#j=O&p;E8 zu3iGhLfWqq3;qlEz?X7-RpRo{D+_$Wqy3Y*wzf05U>G?^iCyVVe6;0lZUGm%$@b!) z6Fn1GzOtEhX7%R=XYnT9bT3P1{xvw|y|E_xZIACo-}ZKkXMDGG}2cU_>yN>t2_kWu;nuRIN?+GE%d^-t~a`FKa@RVl$<4g!*{{Aqi4x0 zTy7txXWpgoCLI~RlpHF*n{@?q+FF0WceLMMKC}}x%HXp39PqDr1!Kp3b03f#vTcJp zYH+RFIfu{BWqeY)HJ2ytLyzzx-!-^QfBd$6s?(_ry05N8yeH2WaDz7xm&8MMPXqC& z`jE8wP>dPA_4uClLrV9(j}Uk7DPr8+*QYw&#QyTRV?Jxpb+v^H+ji;jdAe;&^tSD*fqoHtu{QSOioiU*_X-H-0^ zGu40^IY@R*Sl4Uq$UQ^n6m;FXuYI5o)91)nv|nrO^a9Q&E+yYp)XLUD=$Aa7z_aFy z-1&RSp}FRVv7i6PoKN|qU^S0t{GCDnBmGbIj1KW+aMm}e&xtQ;+v;Fqj(8Zkx4@^E zQLay+qdjUG%Y_fZ8~)M#)dEg)K4#&ZxW18EnGe2i=RcdNXjLm%ybg`{`I)-Q2QhXbn z=GC>vs(Up2kHgn@>D9;|{+k%f$-mn1VR2-89V$=3JNUKdA%zEn%btr5-)_Fz>Y6q_ zH~(p_PIl8AzU-vl&hTyPeize?!MXSj{02Gb5OCqo4f5gux|$4bHG0iO`tlCA%eCiJhlSm{kNW_)1BEFX}24PTQSB^=owb@@E}&~lQl zH8|b3pnFNgOJh3x;U}-ZjJX?PM*9&BZv3}}4t(;U{BzM&$axyQ0`mX{m*N`%e6-MS z?I`$b`)TKW4j#EWfS&ZLy;#~KuD)sd{rK%!1>MQ|ape*7Go#yCk0QnQ$vJ-M$l$u+ zHTpiZ=J7`_EqP<~q5i5w&NDd6lP)dkQuER~4HxnXcY0l}>y~OCFxr!>wF%t|VEEki zU&UhmIL`4|dk1mk*du#}1M*3~;S%PInV(9a(Yg?Qp-aQ3`(5hFk;gIyCQs-YP4yG< zV;gQM=tjUj@jY@HFd=$g^7Tq4bN0N3*A#Huk~cn=eX!pt;4B{mJEHC>Mxdnct>nizC zGHm!vfzRSR#ipb1s@y19pCzta0|lI(n-m{JKTyICU&*``wV;M96FO|?$?=!_O2hr0WQax+P9oL$??%I*$(6dHDe9+|3Si8h;n?J8F;~c!E#HhR8C_1V;nqN!# zu6m?=p}hdgZHCX}U3we)C_YOqIS{>&X98pk84DQPgr4EaVm6DBOVB&lbi(Oe`ZV)k zC*c=dqc{rb`3+8WOY#Lj;#&1U^m7Tnb)BAdWpLWJG}&IM?0!tgPu==a`U7K;JGiH4 zBXdfO*M?>n_)O26h;{L$_-wrnxzV3!8(o9T_QSQ|{<;!>cq_h-z70-#)^jklPNiN; zIu5Y&+7XU9{29JT*G}Oa*Xg`}LT*39r@mG=Hk0Qy-qtfq^=yW87Ww^=LeH+esx7+r zcr7(Rd0Nj&G<==w_TpFmSfh4H{%ygvlwT$HgV=3BfiIzkg?Hxs*T}yfy3@W=`IvAU zK7Fqb7IK!SRr!vp~w;Zo%NS5-ng1_#cpabCCeS*Sm zK5O`fZ^I|3d3u(N@|ulnyxYJ%iQTj(CWfBQDHiJQ>Gv+VrSL1=Q#9+Bj1!6X@@w&h zzY%MU&1=|Z{b2>1>@U7$GsSUd&NTiO&cgl=VgEz$f7PQc&^#U9xW1b>H8?#dutp!@ zb&P46zXbNx1>E{e@GJB(_RNcEmX;Q9o^lmy-g^H>4gS;Za(!kx!5@vT;(0l>A!*B^ zIiV&xNp}MNcn8k0k)C0rXW8hUVeyg>_l7Toj`l{U$d`)8tARs@CH!7<#|F2pd43yH z8Jw+?RIqcWjDusFp6u-Fv;4}Og28?N8gvHV3#o_F z*)r~jkl%<-eb+(`%cs(V_Q9zCmoA?@3S5p}bluL3-@|~qH7)l|-Lvk2U+dIHSI^C} zzEJqm*E8QmoJf{z?^j!1izCs^_QfvcJi4~pZurEXjaRL%nvadhhHZQ`YMGuDIse1> z%r3dFi06Pj>DHMtex>`zrBj^`gWT5o?lEREeEMy=DUpK`erudnLf#?#(%>wg>U}5= z&tuMnI;{1YbH4x#@nUd>%gq6j-L5mZfqUSO+_a2wQ+fb7ZcCqc^e{TcC&h!oxp`Oi zRSmZ_olKUtUS4XytFcq3e@0)&XIEFeo1Q7;ZBj=&W$0l%I=+g0b7moLKwi<>MD+)` zEk=QnKSi?lzZCd14m1w>eZP`D4o-e~srgy5N9cXzj@+(8SHK58l)jyw z_0WPpcl|e@&V@a|KcDj_TbZ6M-YhmW7m&y&q2tEU+S_tDrez*UVj^3mOeAB5Pv`CHzn@hJ1Lv#|wZck!D&dx?5&_{$VT=HhmDBXgNR6pB&vcY*L5JSY0o=c>Cso{~p;5({ase>;f z*UN62OOfo_U$2_&L09)~7(O>|M8HS#3qH$lBlnLMaHV6>^B&CmP*a-kLXYqnkYfVy zdHA8>YfRzT`TQP6K4fT83(9{ge9+(~%R^Zn7e2qBtMN?pF>sv1G4h}2%3k7CbBcy9 zrr#gEf|%ua{X~wVlQO(C&&cI5II~;rLwOGyqhIT>+<2R8pMq$1`m>H@JrjLy+KpY& z_b@qMeCV9feX00T_||&|8tB?@=anjTQ@m-6Z}?0OXFlRf95jytILKCf8uo$Ie8QQs_&Xlsh*mT9X#U3=w^H9;qigbRkE6?`?^!$ zAMWT6LhM*tPA_&(9zU(s=y`-St99_^^btPs*TCmGYt1lYe)TIx*WIHRQv24@YuraJ zwCfB`b0T`bNCJ=Y&2$R9LoWeW$T5SH&U9a8J0Ud1xN3LyLNuGaMPFp_Sn6@?D^~^imMXY9fF!#!J z5<{~yj*%=)p_mxP@oHt0RZL*U2xE?}+>ISkKl1kzv;rh5pvA$P}!PQ^a@BQ*C*!HlkX{tVj za>)m)L)FRe#6oj$g6 zURii-ty8{j`0V~h#qDHkTn0DUb0zhxu<#o4kmJvD8M}3VjNSxa9p$@dHPBy-9V5rS zHDr=b!z=kj|G_VTp8;G-Q+hEICcb!B8!{s{3w(chCbY`4w` zM$fI(yg~0BxtuA!ELaboCh(DO0*!s>*)`Ulw~G3J-^d3a_*CKd32-OiFM5BYY`1za zzb4_kcr|>J$7d~_h9;A^=Ep2j`&V`_{7fgmGbQcoXMdvo%(xSllf#j{~A8CZ)Y89 zsCQ#-(crqF8(z*HVR(}tXia5fKk{SW?)wWk%LjrPO5dIJE$fSy;z#5P zobgkJB4%x-_4Io_|W=r>4K~l{{DVJJ5fC7(q_ybH0x|f^MBL z)_T@Rdg(=D;A*EKH-0yVv3zu70jKA1U4o4zpXPMs5AMBGCP#wbW-!Xd7XS-iSzpI> z;?dwt4n1?<=B$)+wf9kdoWVu#A|DrTlA}h9Esn?q$K*N2;IyxP1vcosPeifN^>l$R z^E+^kU(PY~HpbkMj+MIexHEh<580f}Sc>(8P&q6dB(T$OoU*}e8&#z}7khog7(JO<~!x4`P8-Xl5tv$^gK&ibvc|KM79%jO!O zx8?z2pttaY2s>zuqIcqI{3}~%U##Z!74wEq&t(Kji=nCCBFF6m405WpI*7yfrJpLt8S{@Oxd?qUSR>-G{6mxP_hK<5@dIKSQ&rPdgFs zjvT_TIRnLvWKvJ7{kFZorhbw`4WG-O_R;FT%SH3R`!ewDSimL2m-Zub$OA&bRRACbYy$HZ~&`49@hSTHRpGRz56;t5G|o3xk`;2mgt@%)7Ok z?1P;~oKxqdFM~_ZMb`EF!FR^j-_c#E=uHeRdTY+Fo9~ebnD&Jc4~xhXef9;7ktdXLm*x^#U5VDhC*xbk!u6X>XZwzj{)= zhji0iatFsJ;K)^)r`Niw`KIcUWY9g-2A6hcR(;0DicDzqjBg@G;EX$#stuei}sZ{&8|L)~E881f1 z0XKkOT?I|Xg~R)C44ejM{fl(4<(xdP*gY4g|GMB${f57V9tm46zLo2VvBe94=Tp6> z;VV&RYQ%bsjV|Z-{9ooh$Atdda%g?4ybM{Tzn@Se7~9BKR#VqAJ9W%QNI#k%j z;;o!N4B6WC(fDW~lVa20B4pCszt(-E8;`X_=|eeA*BV^?)ECFa81n@6&#;H+&cbfb z&dl|sXR^lFL%777*2e?<+VENYcGmv{SUnHP;Ii>AJPxcu-`G{-YK?>CFT!hZ5&UYN zRQpa^*iXG=fIaja%G7}i@e|R|wbrW%)}Bda_>8|k`j(JhIQbPlAT;&5yw};^Pj)}7Jv%GH*Nm?5m+k3;o_tRJC>zQb4NmVr zQcUW7O8)nYH4C$g!AT#QBhuct`a!vVHGWggNx(k=NA9-VzK%IS^Uo|7IPy-`y9|Dj z^_-oFPsbh!u_fAi-(zhFI)s+yd^~(Q)2GEbFah>YF39bvYrHQI@5)DxUs-;s9h>9T zGsPdrcCp}>7IJt~@meP0x?1$JX<*JTJ91MD8~UHHO&Q}^CPZac2u+vnTlc4|+RxmJTy?=4-4ewI&!54)A` zgAYf4?zkto9Xe+)9>jODu`Ti&zQIeG&!g5(%wdrqJ@i=XBVRl9L;Knr#IDX&-<{h< z{-FDQno|lnLiq9VGu;Cx*aSJ07kqrC`*#Igi#o6M!#j<(p!lmXbXp)!K!!8tG*ju+xMV& z-RE=I;K&^JD0*HoQrM#kj%OJ&YR_F2IxFc5Zl-S+zuDNL?Wfw0b{D=Ye9B4DNAauo zBRBDuGXGbcZgNPzR;UwT{2I_7%?Xly>h&KJcA*tG)tWu=iwhaA{P~ zjleJ6`1n_HB6>ld^A4dFKEJgei0|QA#aqT)TyiFG*jDdMNdKAN@NRNEs`)bLx;>=s z*%o}u;ZO5B;zM>*9Ei2sl&pqrJ=0q2aT7B>Mj0>{0j{&n`G9&!t)`Fu2Lq4g6v7klWnxLJxV@zYH4_ z!<};t@^!-}{)@`COJe?P#k^aQ7`t9XweO${I>BVxet*?O7A6=P55 z_EH^>{yFy{gHs$$R?kfqgUj~S%h&GA{YlRrvhlL^JsR9g)r?Ec(QV@n`l>|E#%)MPF9GioYHUEV;tqZ2zen=cZ%VU-Ue#N3Q_~ zxTGJ=F}V4`@JWu5hjSSQzS8e$&sG3$!qt3bA+PKqT?t-$Q}7q-V|>T;IXy7eB3UBG24K{O*r`E1#i5^(Y1xUx>edAn!GWCxj2( ze;I>Yv1M=#`id5M9V8b>Uz@>g_E@s0@GIp%>0z?`tT`9Mr@cuLyhvW%Lsp%i$GM)d zJ@)wm?qk=1oAY{)tn@6NN&;Nq^F_dSw@G<_6|I$PqEu z)X!lq=NarmuM>W=fHOb!cSIj;8-(;ZwS98^7(RomQM(^!F5;I@=k|~drBj2``{@Ga zXBrC&n+o5B)O5iboUT#dultd+dt8rX&JG=9`PYdf&Btn9S?`F@bE?Mn%<1ZS_kEhd z6zc4u@J#*fMLwP1%=j%9P3C2cu7^DWYLwur(>dPuiScjHyB}KkRThuvUp}NY{e=7& zp&t)A8P0Zo5uJtSY@MIi{%3B-#q>HxSL66nkgsn)dS}HuDWW$Z&!^=7B|j|aYCP72 zrsmQVxALtz{#2XFANHQWN6*tDF6AS7zPog8YtSWp)!^#L`oLApP2pP!yz0G{l6mSa z#455FoSt{1^Xgd}(2|VhyP3lv*KORp;7{?OdaiN0p55GdE4?C;WQPg>;|6 zkWUkiX@9f39-tg%_57|A@K@-TkR<_M0&c_S!nrkR2EOX{_p$OC{QEo6JJI`x{C3yU z4WH($N^6K)Y*Z%Jmw3nmf2w=#bND8kXO$kcCnnnuZSjV`*>OGJOS#oUet#N0s%+kd zTYWt}5H;NP=t*zGXQDUc4yc6&r*)kbALRK&^vzi;J%~=w$DY@7?1jP)wAP}v5__(^ z_7P^iF0w4Rwy=l!i{3Yy5;vRQ3ts7o_^64#$pJr|y^Zu@b>M%33%^1gy|BQS9)NCF z<}!HDE2Fz?d;!i~zgoa)zDay*ysJDQzCz${{%vj-qpS7L+P8q=oW_>{vE&gKDfl#R zX>j5>xs`aEj;-)t`9lhR|4Y#4oWXUzA2#zK9`4qP1Mf;T|(MJ>yEc()(mpYeMv~@fduRW9xP<>|yyJxPkcvd|PX@dQR{F zHDvtB0#0@-?Lf{azLleE)DzM7e+Mso8r=|H8_b>7m+N1qg6Zk$0{(bokt?7)JrQf7Q-*^%{IQr{)in`N{9D5RX zz$NHWuyd(F>d#A9;oA?~(dex3ie}XTr zQIFW^&l(?zuH-I%C66)58UwGM!0r_r{!NOkLO$8V0B5kWScE@w+?y!g72Tnf(tGaMh#v zPVQ_?Bi|E40X>P&wZRZHId1sE!+75t{;i&G=r_zEQL|nA2B-BN>z~uv*o^s-Y#ac7 z>E7VhG6$>uokI!v?c3m;LtNa9?#V|Hwlg?eR}(GOb@`(1Ln{&E>U|7O_pxuGhin{? z^R4{|x@H5sddO^W=J)b9w+?DA>T^zfxOG3fhXQzwwX`1B`BER|`MiAJ z<@x|N+v^nr(_te=z9JnWn}dXe8QnSl@dEXTJk|BK0;=98Ll$#}!g z^SE}#Z1A?@XgTAz6dFAbKpTCuZsQYK?EMo4XZcDoJMeAdhnTFPOUWc23~pQc>&16d z2hQ?y%=OO_zv4mkeLl1NZ}pOELiC~>pqSHrjrCRd7j@8c@|t+C=V>fq-dA(0x;Jet z{*&pvjc??7>gU7Y*sVXyKk?z^$qZEA3xBk%l$#No;Gdsow&e42f6_(9axo#uKb@R2vhUQSNHccl-> zrTc8u54rP`?mlVl;nz5zi@5aRMfJh(NuNG+JpNRV0?xbR5QDSl4{6_*#ua*okM&U5 zy$5amZKl?Wudc5{gZQt15!eTFS@rA*!>4(MCDc&CSgcj(HP#)4@1j?OGe1~)ATlck zkUM=ha*!kMdZxgqbrjh_e_ECR!c18ZI0mCP_V7olm>e;^Pnf3gV2s(-xgLC(^huCEFGuW2% z;cWbG_d*UGtKyF_HcdZ=y!dkrd;q?+mltrZynXmoDxaG~UW;Gpb(DTC%Z;nB)yYmXEY_M#qx{q<3HHj=!%zh?}w-Jx6b2w|CV?#IOUdS zR#X4-eyWi*lY4d_~u(b{SpmyYHI^J&x<0 zUXQWf6MmWVCUFg;D zsrQ%8D!1mfL%LMnD;)xl@_+Ly{JGPvJ~gkwT;k90S%XW^iJl{({al$2=hBa07p;+u zy|sWVQ8zZNgD2p1uZZ@P=w8TXG5E~i?NOLto=a-XYnh0)&_H!2%8C3 z#|+Nrn4bMO3%lw$ydgYQ@$r}#a^XtE^g{GuH74lG`PUdy{2D&ZS*-mjen1Wqu0ed$ z$9CEL6aHXuOU}>pNmI5Y9yJb9-7S3regC|G^OnN%nc!4hz;~0JJMd7%1U*q<2ibdFk<G z_tH}Pte$GVsY-5>4;!57ulTohAI*gsyxwJMaNF!-rJtaHlWjDgJXELm9M5^RJp^ul zrMS`jkmRuUb(sv!eG7akvNzGg{4*=w((BPJwoW(};0NiH0?yq#lfc6=`kA3k)WR$9 z4X1C?GjX+Um>i7$q5bL+Y=vDEf8xdDU3^-e!?k9Upj-JuHXrkL-c`c5z~EGC_5K0b zqF#d^c#|*2?c8N>Zr{|z_oS!sh#2Ct=ZFH|bzda5zsY%Wg4tGlX1)H>fBKL#%X-9}3%zt7kjrPikdJSb(5L1u>Sq^ptshd%$Opt%`e%CC=ks_mIg~dd^pV9e zpVC>+mK@%Vx5=K{9ZV(WuHt%pe*zzL>g1ged!>9=ud3%+Ykx-Px^6ap#pa@^G24n& zdHz?gV)%@P`BJ?P+`qs!)Y}Gi%;3~NC*)7<;qPHg6Ys?Mm%1Syw6y^Y7{7HAWkh3B_wddqK#*>Cm$CR7(EH%ODx8gnXJ$%RDOpdx_ z!`EHAC}$a5MuXbZM3w|xjn~ryzOns%dO=s~U;%Y-gqjyaNAk~PydvLvidc2=+4FNk z;6}d(uWuucF3E8koQLi0orj(8ATN*K+w3x02HzVGl-y`=gI5=<0cSI@k46 z^s*1%`P05BjZK?9g}f;^)DxP0^^C05)FjEF`xuSCcro9B)m&AXnx=Z9J(9^Az^`*w zr#W8xJAEnf?hF1vV9=xV6U=nrIHnqs&3mJN&1d@9#p2rVSzc3&{G;c(1k_Kx3q@-O zW%`0(OK#(Yo~<@}Yr3ladd{8t;s&u2!;9>${bfd1_f;s5X>1`~to|Fmh+pZL>YTy3 z^;|fV_q8XZ=U31@tFT8Zzf;aT4Lry>i2bz=sMwVq4WIgpEmO!PyVHZ=N9voJRR8RK+X?3kPVZ(7!J$0Xx*0gd8ozblWA}4h+qRoxVhi(dk~QY~ z7XG5|+8f{G&)}@zNd6tX=udlz#D6v)4Q_*T?dImTiRV(a=_+TWry%d=&>J%7{T2~^ zB6(AO8@?(u8@^jI9a+x}xLzO-I7(ENVx*DIy?-F#X=fuX2o~1w8Y9akzawK#= z$shD?_`%k2wnunp=6AKPc-A!P;J3heGVn_ac^BUWU-SZcHcjcCHjmKQ1YU1XM-E+! zUbT;7fH(`DK#v!2fAa70p3LO6`jPd~r-PR`^5CzBF^+U&aHEfbkNmEC5ERFXA&s{b zpRKNf?&1~n58uz>Y`s~qAv*EDR=|nIDEybuulgG8Hgd-%}zT{q%8 zcVoNr8Lz#s@Oz(QLtiCN(q}cP!NQ-}0p3!_9w**e@t$H`I;NJ&@ANEO#f#!W&-XXF z=2unlSf3HlyW7~n;Pg!Un;3`bU4E(u(wRROUB4mEH3k8J=~?edjNwPU>zcFj7*gM0a3)9nT*e@KFlYT~&Y=_4^wiOd?k}h>CKpV@#?aEU z5Ng)}+m_w%1^7vPN$%s)iE3mMy$DyxwFZ~HbC_d-(VTdhbDF1<{mQ!)a5}H`rI=6Y zEa}Vj_S6REM(N`x@SVebYPS;W)aVUALwE1XZJb??&l=nS^-}hTujKssTlb&-Aq76I z|Eix}N3W)NnM}_|AoKn?kA_e9BmBzZ&?k0n9$h`X!L4~W{rI8anVsvg%sK5D$?`S2 zG&oz63)_0V`N5pu!T*9Tv!qp|yL}8R4kEpYPaN$K827%R|zg_AX5iE8hFQMjfMXNH}KrG)~C!Cwwj%VqK5l z(XqKrvi%V4e4z1>^4;=ZqUYQ3!M~u(qjJ3@M;H8+kLSHk-vG}5esT=CNIKu2Ic@To z>^VXA6FCjrizI*2-w1Ph>RSzN@U!rXzRo)p-tQsz;P)}Q=^3V;>`}l~8Q;txmyVI! zgFT@QOat0U+dhUb^E2Y_yjSIMqu*M4U-~1~3!xdI|L*np622;%O2@0AyO=+N zYrGpi)?nYAp$UA5O(aK)-@3oZ;LIK-=S}GIrd8Yr#P~GCKMXG154}LEq4?xe-!+e; zG47Gl&iPlL?tKCrnwNSFaPTVMF?_WIyP=2K^lA}&dB900np0XaT=-vO6nqPF zn?xt@*%5PwPtSi6U&$Z_RB?SJ%_a6SHN_%y!M{E_Y9*E~`- z55YCA{`6cb)17oIy(Bvkqrc&~N*eRF{nYB9?xAZ@OUJGv7lA9gr^uyyC;Ti0k24SZ z%wxnp{Jd{nflqX7tex(k`&DaYuBQ~dMz@SyRtNQ6GN)YI`Y87JkHT)*e%21%W_y-7 z=FaC>KB!I4b99W%TRwuFv2kX{w!ChgDN%RBVR*WLvC%j1zi9OJ5;E~jB@FQ#|5A0HS4H~CzCmb8<%%2=-S+-dT~9c zB0(oZ*Ag@6*?kUf{k^;!q?G&UWAo`D@F&TU(N?e3meZ|7j2E2o z99GCK$Y^(Pd7~P(ybDNIQcVznd0P?~^ z&p#12j{eMU@|!w7e+KKpEo2|U&(ud7KF#6O4`I%SS{6UU8V@Z1jr80z`Q;EUQToJHP$#v_@vEG8JzOJ`0jodzv%`Rm@0W)I0I~MaH@mLhy%Sh@y7e<6OYMz zMd>6$2L|`}vWoZY*?GMEqx*Iwr=EkYIXd-yXHYk5J0dIbB-(~=GP`L_D28XBdTel- zQ)wWx-rZFHZ~9lpR1vaC*G=RyIIZ!D=heTaPA$m!l>C|-(7h%GC%Up@_Wa_doOkm1 z_)iNsH&15vC+5||h(F~DH9522IAzbNGJ(1p*n7H5I-J-*jad?1_2QAOw3GXRi9vNsQ9@4(loGyJY0zriK= zriag|FVZ|+3T*Z~Q+gY2D1#I`%9BD(Q8=<=chU#C6f`+Da42KxTTPcHPV`vOEu zYjB&|c91OEj~F7K@z*5xG;YgzNp&6{lfDzKmA@IB^#_tw`@tor_T2hhlknZ(2En!X z`<$PHO`xMWI`N_F2RFH8VxIqlteLz5yn=Psg=Sxh?}BUe9O6m-$u&)MSt3t+j=Yj< z%l70y@`VSj?lJPq?#S>X{KbJYImGuS#wxZyNWXioFXWBDVe8WJ=>R>BVxt&3(x=I* zJ^Ff=o5uQ@7ix{>^u&*@GdS%9kI|{@r937)S#E##*8aErOZoF4G-i{3$Zs>7h}X*t z{>tb}$3;iqZO&EszCKXES>MvxEB4Xzx_YsCx#8>7w@0uow1s=Y6PPEiV>Yc}lzTZnNAtxD}&Ue0Jz^yq3Cp)Uo zz3*D;ExA@cQhNv=l3!}v+0H}4H|JQ6^V#)PZ2L3pfY0bzn!;gllikO>(|YnDeMjpD z1-^|(f{SAsQ)tW=;=`sBjnxgG>3QKA_}jvsSNbo_pS6%xIlo zCtho}<~ddKM;n=2N#tY5wTT!tIK`i>Uuy3~@008se>Fb9_3AGTPU9Zyzw|6V?G?~F z?`rft(xt%(pFOWY@9j~n+dC`WdgM{Vx>n*J?YQ2Aexo*jn$IwNG5zm88|lT-g`VN= zwf#@)i|UgMPI8UJ@UPtWCB}v5F(g)`mFqod@uzJre5849<-Q-3y`TIh63#1e%F_%!%Kyt=qT{UDJa`JMt+zzOI0ipJw$4 z9cDR+&n%x_h@bCL_>1J#9t5B3{mXOPO+{BhN1v$m717W-o_wH8p3)vl^-q3O(9NF7 zd{Pdxmg9YUBTKt}DYv?EXg{vx)4jSOvdSm?A-AjuWi%@==`Cjsb_Bb1yXoPzJi>|8h zsyrxO^-N#S(VyO@R3Z&K4fddDBhX|0bIUQGM#(n7@nK=yvv3o%PV*?EMgu(e~roUJvD0gS+-V zWR(2yv-~#rx&U9nPc$E1nzFV2GQUTL&9ljS`y(&cNhg{IH++ie%qI9#qCN+`H&Oy<}7>y{3ge?@^a7PZGD;DxK6R7oN9ZwRSy&!`ZIjviPR!k|FV=(-=o?&(xY0gf_4mSg`!zYe3@5S}T!cPnDQ18_qn5)cXLNT}wwh1e zqOl@)y66Kn5A5<*dw(IX>g}d)KvO(-=d|Uk^@E5vt~a_(df;AoZ+(($sgc^F>%og^ zS2o^j>qY&3^Yq+z?f&%g4$Rq{!g1s`eBxj7r^u@{T|Ea*`P}vx8=UazzI?4c*;<<9 z>bx)C;6nM^_u;XfuLhJm;N9@)Sy467aeT0kJ~Z=xXc(OEbzjM~#Dkt2K0Gz=59Bl1 zTv=P*h~IXPo#fGa^ZR}RzsRS(H-^vZzTh?QAw2r7{k*QfRJTL#@}Vi{3*BaW0I$gX zUNL6)ddN5F@45Ehz=dziFIwcT_>J()cY|AdIq%Atjc&2C>2K{bz%!1>Y~Geb`}-Fs z`LPCdFXNLwNPKFp)bN=c)n8yY=rpM{7yg=l>sVlx;g@+yQG3ia+7ednIc77Iei^m2ru9s?Oz}8*06J%ZOtx-9hACM-283 zvxbgtbWYc*RwsNnx_VEc{MYRvf~DB(V?0CbZBBfUKPT%qny2Q^+cA7*51Zdipy@uJ zvlC9w3>Iv9AI}H9H;*~Rp20;&=eAJ27T;xT8orh9H zQ>ejiKF!W!6XDXhwSFFc_7pUTDbMlK$>zJf{Ot%9M(Rb{Y z!^XRBP1lECAAKqpD8ETA>*YPpP4IV8t`F-8B5JApF~EK^_b>R1!4+cjSha^buY1kL zUjXj{=sW(L*>@H+&&CeSm)X7D<^SNA(;w7`#ZJF(aLt_Ss85a`q&{%!9TxGtr}H>> z6gr3hKz4TM4iK}Q_@qPGSnp*pdF?(R^>q<+f=5{=pGu5_??A?22B-Z8nI3+`wfM37 zema+~dge@4c+v5ReNwu{k)vl8G~m5CkmJjE!~b=S?in{)?^?*-;4I^Wyu?jtg|QF2)T&y($I6)l5P z|2$b9vi{THvTF{*-n)-&DhjNKljSGfcH z@MLf)yvhglj75)Ys+-Xz#}+?F+#FiyPjGrZN{D|*cY3#!dXN!ne2N|opZS5CpM|ee zUek;0xrV+!!Va=u<5p@;TF}+CdJcm<|6s+%*cW~~{Z#zv_#(#8!dX5Lzo#GT`4i7$ z=qvP>?1GLDA`aUz9_^FMlbpu=ri?R94yy~wJHnlLUgjt7(j$UQ)nC*ih?EKK-O@Y;zbPbLH!pOE-M+9AXgOw3g7k z0C;pFy(h$iWHC7Rxe$6^?hN`e_0bV93GfDIbtc>YN8c3u4*cMh9^82|y+1JOK`-=S zF*!lw?aAWO@Y%Xgw$6h*s(HZyg}fv9ui~Yp9tpqC{1IKH&=qat&%6ix<^hj zY94Gn%4IRQ2pq~~V;jLi{7NUnS-P0toHIDh>)Bj^?CBGif{O!u!J!*)%?9SVOY{ug zUm@1`Ext5n)ZQt>=dORKf4oZj*68sRPh~fLsiiggmDS&4zsP+0>vQuQSe={uhv5?q zJr^nFQ+sNLnV+8M{pe#1ZfHHVhx5{*eCj#ITe?QqFQ!Hs-1fwjY;N~oXZqu}^i!Hr zz>TjY?vYE!o9{$MVkAb7Ru2uI`Mu~ob`j%z`c2Wb`}zz{=lu_0FV$dhHZJS;R*_RR z*4l_q8=QNerq<$2o=x<;F}<(B)h^}vmEUA6@JudG=iUN?(|9^UCvN`J9umVl0SA9t zGnZ~c?5`TF`x%t~WAf7koO}nnHmPUYW2?PsDc4Fq%?}%##$*0ee&a71U;5xvZEHTl zZ^!?%MxvY{9*bfAl_5@Mq6`n{{`_bB1AeqNqm%9E;94 z(%u^BEM3MYIYa!bM(K0G?FGI$Dd+JG9oPJZ_?X35A?BEJveDIj?T3w)+~;IDnu z*ERHFHEe`k3@)X&2!fo>C99abBu~a~D%R%>&JW4C33!krg3l1Y(Ec0s5b2geFFvu{ zdvCvY&S$^_T=Q6H$#>z;^9#CW-+*}2c*^!v%MVmbjBa(`e7#EV8nyhbTv#Q?X?$aF z(!*xvOFigF*UK4)mpI?R9@QBIocT*ce;|EJ7Q07yY)PSKA3Py^3vV61N2F_h`|6qa zG`c3o2I_|5-KYNur4v2_{Mz8OH(j~M&8Mzx250YI?tIpvYOUlkIQe4d-%ZS~$uG3e zY69+!(8W&K{iq+s|B%=FJbVuG>EsRR!JfIanR=#vaS(q>kTsC)+VSzfYdiaB%Z{>6 zA0&hp0^ZROTV>z?4YYjdl?f6sOE!?cG&72K6qrWBctFfF5b=spn|$aYX`l^=R|KYo zfQJtu0|74|V#5SDLX@5m@CpGlh&AyL@K|&nJZi2uyIplVZQA%?|)yLSK}Y}>ptBPGP>w4{P)Y%V9Cch??C2104>UV`FM|8 ze++%D7h@Bc=do`ygRZnaGp@E9`neDE*fUtydkc7YpS+4Zp<}C*XXK*2AKintfI7QQ z-%h@ku$ilryLBNr%gOOubjE#WpTw>ViEF<)xahO#r;oPla^fjFXrF@HgB=*5Pv~aq zr}+%2OFpzm;mN&<1KK*rH^J?` zl6j?-mgel)_*1u~bryXi&+QM+^M#kLp+9gzYv)}P_51jSc0t_^?)aa!dYp5Jq^12) zFC3c&=lpD0+tSX`H&9Q?cpZJ4(ryP=^hq5YvR8|FfMxI#?4)N!Plywo`|Xdv6x)t| zPDpEOAN)QOdwvD9DqPmLBC`Xu51#onC5~gT(91ber!j7oUg6_T?bu_DwkJ6Kr?ma6 zf6pG;A8C2gPuuBSOO3nc4C>AO&`os8yq2;-Hs@c}=QsLn{r`IB|DE^LR-arWo{^qs z3YhNXxyXRFH0jRn0d4fwFy|{KVyHjG&Q&VN;bntcSrwjVQlqa_KbbJ$4TFL^CkG1;x>*vFP=N-InGD0 zv!>&J_ATZ)`SAZ{?bm7>&qNQK&!UUV$nqlSSM{UF-4y4XS91S zLpPicP!7)f)%gy+w7;kQXuH028oF>feao%$$pdl-Zr5+E3?^rvTIZD2J!#{D3%#_1 zcfpQ+{=4`_iL%=ukHF9;0DNl^MYQV?H7)oLfN7VjrN>&Xo))V)qhV}GS*4D z1N6=L@uL6qo1*)(t~`TN7VaM%J)uo=`Q5F27W=2#1NYaSWNcN-O?}aBhn{+T8#?8; zU#|O?K9#!hDEc0c=yA$vz+A-InT-3<7j1}Z?gxIb&2#99^Nc9Ve2KQ;IQq-rJ--S;)@YhKxYj>%dzKAQIr4#Nv= zzVh(t+Pi1AC0*zLVw?8RRxZjgxX_!vo%*;y(|+b6PM#ljJ$UESa>$~b|=t+3|>bo4gqUzkVvbkFqY$0;{# z(boHkkBtp3eH_~gc^W;C?^Lnb(lTyvwcYUSg`?Q+=8{$qZ~PtWN<+CcWls6JXVO+7 zo0;thd@a#m^c#~im@f*h)E9Iw zXU8)G^Ihzx1+FW^Z&uD{vnb7_# z*Wf1Cw{-l@-Ze|-&};(+-$r>5FSyxllqdWwmfbFNP5(LWd)a^TX4(&Is{cc~bR+4L zC+F%1bvttqn|7G;TH=3CzQ|sJ&#w&b^#@?E33a)tN6Oee`O7bbr#IuaY)RLACEc;M zQ@#i6Gh$7A48NXjFv9llhlk)id+%IiIzX4yWpC3sIph5YdR)p)UD@~oy8apFi+-!6 zf8-x~T&-6Jze0M-7DIX0-ot-3e5Tb=^{DifI{ouZ=0k6w9sLCIc@BDa z5qYK`SmXk)q33+3y!)TdW6E>)f~^crxp=mf-xYoGH?bGUQN25YtzNC#ndYvoy`Nmx z>eFIhF!XF=hUiR#j`|&(dcEs^QxBfcUW9ui*DfC!S7h9%ZkcZO#RO;nrOew}SFwW= z@>}azaN|Ehm!3q})7giAFZvwM-5Iy}du=+)5ADm>xxx``k^dnRdMRIZ$ue-CkNXeQ zmkD(~ICXLSWy*rS=GtlS$Gi1n#I5{;a~{ch_>KBuo+o|asVjr0Y{1hF_BZsjNn7+) zW{lnH{lKPQ&pXPmJo~Hu4ZDjpwaA&k- zW4N|%nKh84>-(+r^^Q@$H=og#Oa2>r?aE#*k)Lz%TgRaXtr2oDU-iDe-7iL7+;y9w z9=Lu{>vLl~bZ-MYr2qbFks)$xkasN?(>cOiIl0-J-3oC^)k`KV3wcns#?4ao99i;sQhYE%2RBm|CPCVXXs&51qA6%(Nq}!kmp3No?sYiFgt_L^ypQn~>_7FMm zKgYe9u>^I^KHL!54dH)An$q+Pj^Jk368;j_6)6AN%kc-NeLPNmTEC*l?X)l4vb9Y< z_Qf8z_qA^QD~cG6AxZp-6`p##mM6}@~PaNfg_!2_U;EVLU9xF7#yFPE&Z+P6`5 z-b30ipN^N3ZuYHfLz}zd@0wzlkNO-r`x#5;a$_s)YwV`3%#<&A*r2|Lo@W*t-*V1( z8@8ci^+vj#zHGh+Ip4}2GV0O*+vi#T4K{8HW_i@p+i5@Mw((EeU(&1h4=qzK)TvGM zw(JRvo<5p7PC6$agFdhpRoX(@R^>9Q%2)o~FXVqY?f)3IsDZ!xlV|+3o5_c`CG^U) zbT`z2wezvnZ*A9Cf}2t2{f6+OJ22OMC+4(&iuv)Y`m+@0IB3Yzu2;3Pm8NTYGvuOO z4Nkcfo++0zzn3}(?k5kg3~q+@ZH$qEn;xcrj_p~(Mu}H{b|aG^{V3obOvt#-`X7QU2=G@Q3Luegz(XqsKk-e=wK9e8;s5BpAjhvRzbCV8=apI!j=+j@GgZ#i#v;!W_5tX;EQ-9tEWCeTQ_ z{tuA4uMIpzekPRJ(!E&UdO14t-95btv`l;HiS73$<9D?7!9K{Xuj=_TAFd4_{}Oqj zkFq)DT#P>={4(NVkLKyV4L3Hvd7?|>h$$;UU_FWHT)K6!$CIu0{k=&atC^eEeFez7kP^o9U~`+nzoj;qN^9IYZTDZuax{6*Kw@pPlVn%FWWbG#f&=se9G?v@{aw^{^!vLdz|C_B3H_{LB`Gl7a6P1 z@9uE}Xm320dxgH|*RWg6N3Y$Rypi_CU3;9otYRaS|I*p``}|Mvi9HY-{ydMzI#Jsz z|MO<@ny$~$m0QjS%ow_^Q>BbLTiv-QnO(sh{|CzQw_ARi`?Pg&`1Dp^FQhLKoO4g^^(gCa^wZqCrwsr3 zcUJ~qdr^<`+#_YZj%`=g%MU`v)Q$brBl!uvo=nJ zd-`;byrI>D=2yX?54&kMH{R0vO(~bq%Xo90y{nEV3T^r?o~du1f^$8_^NgqDW0mp7 z`VY188I!(p49@>4^1oW&`3cqwXdhgU@>`qbRqCeL=pQm2(iiu;nbE({w%rjteQs@+ zatO{n-u5B)zlU_O7kI<@=goG%K_HL}vq_92$`_URg@oPUSzx+bsgd(NkMpWN2d^Belb^{?jb@PIsB4@|msU7bSv z=%3IZJEPw-7+gc2N&TJCwzxOR^K@3xmBUMI{R%F&XVJDb#5-_WuLoIQShPb&p=GYbY@nSo978C`zhsMo^P*R{but~$``$zkcMu@xOIIAf38U`ty2J&`^GjZTjK!24^tFmDY*1UjL#m*?T7stLISHkk{bL zJ-r!nI)r^3LTl3HL-|Z`JJw0MGv56Vjv?vV=4$ts9?%PC^oBbSwm;v_EK33Ab>f0GdUyOWTO4^-0RS)<60CJ}<5S-t;sN-X3ly6`5 zwYFuI_EWhBw{~afT-n+c{~OtS8)qGzJc=&PakK+z3ti_dZ6R3C4)^=J;8vfH%+<@! zL+eXzn&YQ}e;VyPHZHi-`(^q+GtygP-eC-1TiCw=W1HZzzQ1T++&fa*rRdeglo7f+ zeJJ#x6T4RON`EZ$>=%xa)9}gIofDMF>(Hq`Z`*`vB^UEIr9Z#)Ncj2hZMn(cCVVRY z(A#ED(Wxb9PhY}bnio>%=(7eFeYT7pFUspY4^_K=;22?Dxh?-Xazi#puch96<>2qzCCbTv53bb>r4v`fPWw)v3W?3cI8@7#Zft=&&vUXHE32YjKoANnKcSw>ZP z825rdgO|hPLH&bv!|k_2Q}i(Dx~Iqbs4bY>gEB^jb$toWv-#a`w|;u1f5`m^@kiMG z;MQJ`oSwp5h4uG`!9HVU@F@C%Gjx3OemySxSzXVveQ+FCmrv+<=G7Rx^XlJR8JyqH z?$36vmxn29x}@;Ulj<30QbCh>OP+^1{*%#kLh2*BR11-17p)0^s%NlS|$zB z*|5$N_K-c$#|BR;T;`F!(YopCX~^jYXkmX((BIhnd35V5_)(9|d(s_1JKu*Yzl+e7 zbDw}tVXFs~-H1JCuq~FI`g|#4ni;fedNbQ|%IM6WpkKN~dOrjIVD5f1^FQzE<>EQ9 z7u>{rGi%M`JG67CW9sCDa`FtK@Haz8hUiGCtCtfW+I}}T0=J8N1-FX7WvNabp>7W7 zXHTtj)T{cu&6J0Hn_tJm#&fQ+u18Ch=Y=0br|*YdydyG0N6n|UxVESDc>uPlu0cc( zO(Xox;J3Elo{wZ+&wdtj0Qc|nF@_h{fR6kz{b15feaip0gx+HR%DlQhHTnp&$V+g^ zue?ubA17VD9UqLo+~YEzQ}CC7CBIn*tMxShA64U&`6lz__hb%dFrd;Ix;5QeYw*&!5eaD(Ba@_l*KyZam(Sa-vhn75{ACWr+09-(LRdt zyA$hB&8KMvSNM56Y2BAO$Q@oZt2Py}NrGp7QxNozQbH zm){F&qvhTI%axBcZ58DlT-}cyQD*XPeJ}q7Ug7eejTy_T?+>A0AaAA>daExa9oqc? zx;K3SV;;(W>pSs-@3rUkxSi&6#Khi(o_@!uo62Q|dxUM>sXbD*i(?7r{@;5sd#&Eu zr#qwm-NYP#`LxaX=Pz=m9A$ZU9a&X+FD~D)_zlhabD;6?_WPbOVKslA{do1~ux*E0 zeU@kYNlUa(bKJL3R+lqR%Qp$u@0*Tci+&M4C_~4r!Nsn*HYSg2>^1SfQf%9&=k+-A ztIciDxe|wW^~rRfgwBQDm_EhroyR!PrgnPecfIO|f zRsA@)tM5mgW&e;o8@J4L;tsv750i&;+VwX7lW1~JWDOm8@I5%&e9Ql4+Bnx%mCfs4 zf=n#O-|camA8+F>e%CfcUnl4GxGC+jwsQ%(>gwj%JfGX9QP=ajzvezy^HBPq)+=;- z&L47@-mxpO8BgBcH=YnDxa^CxtX3aSKHy85!`~-fHBPZ#YWgLkUhoJ1 z0=q(;nBGQS?n1sL626v2l)BFN;?ycA|baXfJ_1yd2 z^xMYdCAie{%oi_F@077?0;Un%$=_@Jowxl)^&|5D%O6D_-UA)e>&iLw#?*l~I{*0_ zgj1iUd*F}y=sq-M9h~#i^|y+@!n~OCnH%`m?MK?x^#{NwdVb^cppA}w>Q9j+W5>|T z{J41=qx1fU%dN~?3`z6q*Y&tkrj{H2&izezS{DX9>;FwVn>pgEzK=cw{eI(&_dx@F zX}~qN_4)8ylPP|FADO+;WnKV2uD-rc_p|J~v>sRn=FjyId3BG6-+d(?2i^sSGPhjM zd;$NZ{%p?7c;(9A8u;AJw-u|D27i>i6d8Ve{TzRJ0jDEySn{Y`>d)s+m_;36WeDSo8qs_XN{=N0O zp{|FX^~3UC#(#ud7S9n2&U~qN4QYxs-Q(1$5%oh^1($lRoV{I_HJ;y8)%|Dv_R=Of zAAEOoQ62_gMgM=ftplO==AWY-V(d}>=lM?KDElCr^n-`+S)ciPGh+?&7<%$|)~8#$ z=eqO^ylL;cJASrJo&H6iwF~yUj^3x$iA{8EK%C%W$G=8|DP8J+e$0xxR3FdK&(m>r)@rv7_sM+Uh}F-@>1H^bD3QbqN_L zr}`Y0A$}8hTY6|)o_U`*4Xn-{~~ zkJC_2p|{h%0%h+1M5#Z))%|Ddf7UnL2RK0=gLA&hIV0zRXXIx;b9Cm}cFzA646deU zoha#nkDP+5&zD~;3;T}2S+0IhGa`?sE1fYs`2RS=hxE9VN6x9S9M=AUc<5+@&V-(A zl;bPE+i)M!?k|x~X!;H&;{a}xK*1M_Cm&5n7`g!1z_aXJLY6n+oBW6#AFZv*pF75G!V0Ph0zLKtK zxPM^t*~kpJm9f|pNplGudVG(I{M7pmbZ_}xt?hO1&FXiYircsX`+O8#g7zA9YjAN} zHc3}Hju&RyW9q;v^=Rv(#JRH7pWrf1p6d;DK>aM=W_+wqcc=NujJ4`^CiBi?XiP}^ z#NWar>8EUxuKUOiT*moLZzc_NWaCfTdSN?lT?#I8@tpc8vdkH6p20dHUU2!JsrS7W zxgnE-*kI3bSAIq2oxTi~(F1g0^8?7^L6igKbrSuW5XW*1y`k;yYHK5IcpG|oSu2<1 zFF19|vu~%7+Z}P+mOK{ytN2Sk4!oMaJ#{(XbS(b=v~p^w&+43gHErz#_cAtV_O)Iv z{)fmAn`Qo#pL=6w&)hfo)N57-EASKk+=E^E!sv?i#(A{ny^NQTzvU8K_;U=eohtw9 z#eC~<3A&YBW*gtkXJidZ5c|HB7pIs@mx6bSNvmTzfnR56tXC5%0Y@0qIFJt7m z@$)@ygYs~W_{iOum!h9tctm%#eaqCX@VB_fXb<+oe0#pNwk9~^Z#g)y~;$Wp(^ldili;rT;gEuc_&gPLX}5SJ8_j%o#=B)giy7 z+c>jNcX6D&c+OAwbDX>T&-w26a_CW)W~Axa5U%ylu)V=W-{nhvd*cV$m;Ooe4BoN| zZZGy#zg;haf9Sc#Z!i7X)ki{$c)=+@=aDDGbzjIcX=l{u4gAJce=>HX$Okz})3`(C zZi-##l*F(4+QfG?84`IT=c** z_fNd5Z;MR3mh;?>;_rWGn2$Y-lf|qu2a|k+zeAMQb^BKaKY46rZWE?pgZp~?{V@H@ z;G6?F!Pv<2_t(DI=T)7k`{PI7iS65i%xJ69o(?))ai2rz&ER2#POY!uNBHzM@`*j4 z;BQ`n%Q*?B$OWDF8sn+p>@N?YQTH_l*xX_lpdXz44tXy+as)m}YxbkG$;L%zg45qJ z_pI#YFW^tSCF=JKf5$|@IWO!wqT`t{H2gn9=f%ggg{u|4djE!YVR8N^^gL&=p7-{9 zB<16pSOX8iY2Q~*lFp^Aybdr1u#Rl}EIdBH=WqR$yfZdh`@hUnjM4i$qT8>*Zh#wC za*4id?j!Ce*)u}@mA3y`Gp)*FdJpCdXt%Waw;n*JsFMef#{`Tr3xCmP(}?Yoj&0?* zvM(9G$@}aFX|F#`KEO@E&ED7Nqqz!OOWoSKtk)}Tk^47|n{?&jEAQ{d5Xj5-vt9(B zJaq>*g)jf>#60?++|Jdkqqlo+fnL>a#hzyVWdnV%{EpBLmvzj}4n=;$Ut^5-12?Pkd;OIwgY)QnYI{>J*TGotYMkx4{`TwCyV0GHIrcF1D)JjcyL>lI zySI#uoIZ{=-*sZ*7#5uW`EpNaj47w}L+GD&UYUdO?7rZ9cm89IvKUi;)T>X@*P1-K z8v}K6cJ6a3jmMA|+V2_hjO)I|Rs2KGZ(uw#bU;~#-T<28i`#k~+)n!GXSMj? zCdkt=_iQ%hHbhT~ts-A*)RExw-Pjb&r5mg%pg%XTPBZ}H7;TO_AG`5}mevfJ`M<5k z`Rbida7een^ z@@AjoTIMq>TV=HR{;n=Ou9vg&RY#BhT3aWSndea2&zn{9lg4Q;XMPyIJu_zbRPsST zp#0Ay^(5&kKjm*9VHsH{!_jT{q0{3#^*H4^1S{QRvV5X8Lwx(hlyM%}4CZPIIy2D!=#rAob6-fxeY} z$k>?Vy-wFM@H;v4u(&1`T;0!6F18ukrJEl~S>L&rU$M{7sr!k}U#{Lr`OsepJ^7H% z+9z81SkA|(lj{EN@9J@4{J*-r_iDoo9}stn?CNrJd^y73ZzQDU-pupw%(-avC6>Oo zrx*E6&4cMbka8j3Cgn7JL0=xhNz4C$@tdpjuOP3qvoqo)AF(Zq<)5)~a67e^%6*+N z$fTm@{*s%19ozjT>;mQH_^$a?=D?3qK6mKl+%U#4o^~}lHT!!o)S<1)?#_6hQddn& zzRk~gPdgX0_j$cs4qVyReZ#N8PPxa^|Ftafeyqohp|R^p>? z>wg4!#*yQ8>zw=B4v$s_FZ(Upz3=SlMb6p)F{3}Dzx6t3!N-Ze>FF(wkEVwhPtb1~ zUDAFdmUPG14dvuH6wXykr~H2t{*(XQr@P5mcl;}0$%AX$)7|LTt)xf&S5Bd)&N|jU z`}c_ZZSYS!G`%-n=yzi8muPc_&{=;b`KaifNtkoee)E~K zv96@Q9eV1ib7sqrhfmro$4LGk(I#^avr6x6R~`3B%Y7z;@1&gX2!G_&Jtm=dd+Rf0 zUFrM#ayt!~9{M0Gxl^oHoj=ppTxd+F!eaLaD;)2*z7vtRbkF>;kh z!dH>c8s&O$oqS+pf>WmEV?^FIez}#edUzxKsP+HaF2dW#(n5u z=$*V4oqPa&yFIk)2aqHE^C>cV@3*#n&)^DArxWi{$nCM@=?UOyD_xIwED&5-?iq%wICa`b|E|Y1uV#<*Um!oqb4Gs3IWoA*S&+m{y0*!F|C{sBXXv3i zuAH=&!4-Q#J-GTX;m~srfOX^Y-=*BD^3A?e$LOBNdYnBWB~4^JRChC9q8mNV|4Nm5 zytk#@RC<3C?U?_&Zdtjnd2K+slK!O+<{P}zdj1MsY5!Z^<42>rJKXeczwTw6LDNFy zFEsS=-K$`#XYp_Pa504jeu?ih@$(cGTKeVIUw6Xt)ZeSTH#od=HF5Om7%m;3#>qX8 z*ZJ2t(5Ivu8b0e=dU+p&Ru^A?ukvjAvOGXOC!(3iJ$LYhI_?7@2xMtO?UGMPc=4p{c`IUj8CtG#Xod| z4Nm{!=2<@ zKMy0gTn)e6b-0*C-2A=68mGQ@O55=G&#^=+SJU%Y!te|>&lcG7BBn{4EFLeG@XIx)Rr$?vfOLBYSHe2%Vpz?4!6lK7PW5^UUE?N= z8mE7KC;gr1C#>K*BJq>9ce#9W*M8!HO_nv$%PRsohCW|5MWJ@N*2&Lth%fxriAA$8$~-0^)qnjo0?8>FJjM-lY|K#`n%u+`LOqcb@V08Gpmd z)%gXJI9+I=F#uDpJb5X2J@M1Mco+JezwY{-SaaTVOJj~H9_^2Q(mmzEFZ7J-mH2tK zz8$U$FMdty7rMchcfx$eJ;3cHG%x z&+pE!rkywhwX}6`g`RPB&!0FGCv?gaO#L2r=;TTCav8ppU!9KOUcr`^ru^=hR>Dg> zaN!|O{cD=RC%nA#<9qG_aT1e#Sp^42wI@#rVc4SHo|2mw3MNUhsM1=Vf@(4ZRwt zyT+7A{OF&s0q$J!J0*@7Zo|EN7B|mr*vdZ`pJK{|yM7Oi`dQq9!Fu~1Kj{?r4*sBf z?Cpe0uiQMm!uy;qe$ub`t?`C=>z6q3TYQR%A2H@>>0gHh<8#~PyLYapr8oEEz3>@- zaHr5BOuV?#*QU*h0AR#Cs)9!8h&u zJe>D}<5~Q8t?@~t_6xoaGp=|1Ak)&r&ub^zZHR%}QuI-YGm-pC!D*>DI4=6MsH{D%mc7DOmaX?ErPp`zOpL1-}hbN9W!+p-v(W~>r zd5T+n;us!U@yok3yz@{^)M4=p2J}2_{k+8&oUh%x{`&b| zE}p*EcQEl2SG(tN4QTw~YIdf(h^^`9F5$%W@;!8lN7KIR=Os4ynobGq!b3w`F!9T? z;H&V^Oq>B>UgB%NZSP(9qTd`}D0DwfMMp2*=e*#~rC&`?TJ<%LgJ*FY*!a_3&Gu(7 zb2^0Up1%XHX%<>F26wJ`9Q(?93Dl)&EK*YTtOKencP1poj5 literal 0 HcmV?d00001 -- GitLab From 9ef68d0b61ba139d032160f78727fcd3422275e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Steinsberger-D=C3=BChr=C3=9Fen?= Date: Wed, 26 Aug 2020 05:19:45 +0200 Subject: [PATCH 4/8] updated targets for tests --- .../gluon/CNNNet_mnist_mnistClassifier_net.py | 121 +++++++++--------- ...efaultGAN_defaultGANConnector_predictor.py | 121 +++++++++--------- ...NNet_defaultGAN_defaultGANDiscriminator.py | 121 +++++++++--------- ...NNet_infoGAN_infoGANConnector_predictor.py | 121 +++++++++--------- .../CNNNet_infoGAN_infoGANDiscriminator.py | 121 +++++++++--------- .../gan/CNNNet_infoGAN_infoGANQNetwork.py | 121 +++++++++--------- .../cartpole/CNNNet_cartpole_master_dqn.py | 121 +++++++++--------- .../CNNNet_mountaincar_master_actor.py | 121 +++++++++--------- ...Net_mountaincar_agent_mountaincarCritic.py | 121 +++++++++--------- .../CNNNet_torcs_agent_torcsAgent_dqn.py | 121 +++++++++--------- .../CNNNet_torcs_agent_torcsAgent_actor.py | 121 +++++++++--------- .../CNNNet_torcs_agent_network_torcsCritic.py | 121 +++++++++--------- 12 files changed, 744 insertions(+), 708 deletions(-) diff --git a/src/test/resources/target_code/gluon/CNNNet_mnist_mnistClassifier_net.py b/src/test/resources/target_code/gluon/CNNNet_mnist_mnistClassifier_net.py index a402a77e..415132dc 100644 --- a/src/test/resources/target_code/gluon/CNNNet_mnist_mnistClassifier_net.py +++ b/src/test/resources/target_code/gluon/CNNNet_mnist_mnistClassifier_net.py @@ -346,8 +346,8 @@ class LargeMemory(gluon.HybridBlock): else: self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, activation=self.query_act, flatten=False)) return self.query_network - - + + #EpisodicMemory layer class EpisodicMemory(EpisodicReplayMemoryInterface): def __init__(self, @@ -357,6 +357,7 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): replay_gradient_steps, store_prob, max_stored_samples, + memory_replacement_strategy, use_replay, query_net_dir, query_net_prefix, @@ -367,7 +368,8 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): #Replay parameters self.store_prob = store_prob self.max_stored_samples = max_stored_samples - + self.memory_replacement_strategy = memory_replacement_strategy + self.query_net_dir = query_net_dir self.query_net_prefix = query_net_prefix self.query_net_num_inputs = query_net_num_inputs @@ -382,64 +384,65 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): return [args, []] def store_samples(self, data, y, query_network, store_prob, context): - num_pus = len(data) - sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] - num_inputs = len(data[0][0]) - num_outputs = len(y) - mx_context = context[0] - - if len(self.key_memory) == 0: - self.key_memory = nd.empty(0, ctx=mx.cpu()) - self.value_memory = [] - self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) - - ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] - - max_inds = [nd.max(ind[i]) for i in range(num_pus)] - if any(max_inds): - to_store_values = [] - for i in range(num_inputs): - tmp_values = [] - for j in range(0, num_pus): - if max_inds[j]: - if isinstance(tmp_values, list): - tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) - else: - tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) - to_store_values.append(tmp_values) - - to_store_labels = [] - for i in range(num_outputs): - tmp_labels = [] - for j in range(0, num_pus): - if max_inds[j]: - if isinstance(tmp_labels, list): - tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) - else: - tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) - to_store_labels.append(tmp_labels) - - to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) - - if self.key_memory.shape[0] == 0: - self.key_memory = to_store_keys.as_in_context(mx.cpu()) + if not (self.memory_replacement_strategy == "no_replacement" and self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples): + num_pus = len(data) + sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] + num_inputs = len(data[0][0]) + num_outputs = len(y) + mx_context = context[0] + + if len(self.key_memory) == 0: + self.key_memory = nd.empty(0, ctx=mx.cpu()) + self.value_memory = [] + self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) + + ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] + + max_inds = [nd.max(ind[i]) for i in range(num_pus)] + if any(max_inds): + to_store_values = [] for i in range(num_inputs): - self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + tmp_values = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_values, list): + tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) + else: + tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) + to_store_values.append(tmp_values) + + to_store_labels = [] for i in range(num_outputs): - self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) - elif self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: - num_to_store = to_store_keys.shape[0] - self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) - for i in range(num_inputs): - self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) - for i in range(num_outputs): - self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=1) - else: - self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) - for i in range(num_inputs): - self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) - for i in range(num_outputs): - self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + tmp_labels = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_labels, list): + tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) + else: + tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) + to_store_labels.append(tmp_labels) + + to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) + + if self.key_memory.shape[0] == 0: + self.key_memory = to_store_keys.as_in_context(mx.cpu()) + for i in range(num_inputs): + self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + for i in range(num_outputs): + self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) + elif self.memory_replacement_strategy == "replace_oldest" and self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: + num_to_store = to_store_keys.shape[0] + self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + else: + self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) def sample_memory(self, batch_size): num_stored_samples = self.key_memory.shape[0] diff --git a/src/test/resources/target_code/gluon/ganModel/defaultGAN/CNNNet_defaultGAN_defaultGANConnector_predictor.py b/src/test/resources/target_code/gluon/ganModel/defaultGAN/CNNNet_defaultGAN_defaultGANConnector_predictor.py index a83e0589..4333be26 100644 --- a/src/test/resources/target_code/gluon/ganModel/defaultGAN/CNNNet_defaultGAN_defaultGANConnector_predictor.py +++ b/src/test/resources/target_code/gluon/ganModel/defaultGAN/CNNNet_defaultGAN_defaultGANConnector_predictor.py @@ -346,8 +346,8 @@ class LargeMemory(gluon.HybridBlock): else: self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, activation=self.query_act, flatten=False)) return self.query_network - - + + #EpisodicMemory layer class EpisodicMemory(EpisodicReplayMemoryInterface): def __init__(self, @@ -357,6 +357,7 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): replay_gradient_steps, store_prob, max_stored_samples, + memory_replacement_strategy, use_replay, query_net_dir, query_net_prefix, @@ -367,7 +368,8 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): #Replay parameters self.store_prob = store_prob self.max_stored_samples = max_stored_samples - + self.memory_replacement_strategy = memory_replacement_strategy + self.query_net_dir = query_net_dir self.query_net_prefix = query_net_prefix self.query_net_num_inputs = query_net_num_inputs @@ -382,64 +384,65 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): return [args, []] def store_samples(self, data, y, query_network, store_prob, context): - num_pus = len(data) - sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] - num_inputs = len(data[0][0]) - num_outputs = len(y) - mx_context = context[0] - - if len(self.key_memory) == 0: - self.key_memory = nd.empty(0, ctx=mx.cpu()) - self.value_memory = [] - self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) - - ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] - - max_inds = [nd.max(ind[i]) for i in range(num_pus)] - if any(max_inds): - to_store_values = [] - for i in range(num_inputs): - tmp_values = [] - for j in range(0, num_pus): - if max_inds[j]: - if isinstance(tmp_values, list): - tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) - else: - tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) - to_store_values.append(tmp_values) - - to_store_labels = [] - for i in range(num_outputs): - tmp_labels = [] - for j in range(0, num_pus): - if max_inds[j]: - if isinstance(tmp_labels, list): - tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) - else: - tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) - to_store_labels.append(tmp_labels) - - to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) - - if self.key_memory.shape[0] == 0: - self.key_memory = to_store_keys.as_in_context(mx.cpu()) + if not (self.memory_replacement_strategy == "no_replacement" and self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples): + num_pus = len(data) + sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] + num_inputs = len(data[0][0]) + num_outputs = len(y) + mx_context = context[0] + + if len(self.key_memory) == 0: + self.key_memory = nd.empty(0, ctx=mx.cpu()) + self.value_memory = [] + self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) + + ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] + + max_inds = [nd.max(ind[i]) for i in range(num_pus)] + if any(max_inds): + to_store_values = [] for i in range(num_inputs): - self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + tmp_values = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_values, list): + tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) + else: + tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) + to_store_values.append(tmp_values) + + to_store_labels = [] for i in range(num_outputs): - self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) - elif self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: - num_to_store = to_store_keys.shape[0] - self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) - for i in range(num_inputs): - self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) - for i in range(num_outputs): - self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=1) - else: - self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) - for i in range(num_inputs): - self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) - for i in range(num_outputs): - self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + tmp_labels = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_labels, list): + tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) + else: + tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) + to_store_labels.append(tmp_labels) + + to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) + + if self.key_memory.shape[0] == 0: + self.key_memory = to_store_keys.as_in_context(mx.cpu()) + for i in range(num_inputs): + self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + for i in range(num_outputs): + self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) + elif self.memory_replacement_strategy == "replace_oldest" and self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: + num_to_store = to_store_keys.shape[0] + self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + else: + self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) def sample_memory(self, batch_size): num_stored_samples = self.key_memory.shape[0] diff --git a/src/test/resources/target_code/gluon/ganModel/defaultGAN/gan/CNNNet_defaultGAN_defaultGANDiscriminator.py b/src/test/resources/target_code/gluon/ganModel/defaultGAN/gan/CNNNet_defaultGAN_defaultGANDiscriminator.py index 8e862bb3..6c220b51 100644 --- a/src/test/resources/target_code/gluon/ganModel/defaultGAN/gan/CNNNet_defaultGAN_defaultGANDiscriminator.py +++ b/src/test/resources/target_code/gluon/ganModel/defaultGAN/gan/CNNNet_defaultGAN_defaultGANDiscriminator.py @@ -346,8 +346,8 @@ class LargeMemory(gluon.HybridBlock): else: self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, activation=self.query_act, flatten=False)) return self.query_network - - + + #EpisodicMemory layer class EpisodicMemory(EpisodicReplayMemoryInterface): def __init__(self, @@ -357,6 +357,7 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): replay_gradient_steps, store_prob, max_stored_samples, + memory_replacement_strategy, use_replay, query_net_dir, query_net_prefix, @@ -367,7 +368,8 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): #Replay parameters self.store_prob = store_prob self.max_stored_samples = max_stored_samples - + self.memory_replacement_strategy = memory_replacement_strategy + self.query_net_dir = query_net_dir self.query_net_prefix = query_net_prefix self.query_net_num_inputs = query_net_num_inputs @@ -382,64 +384,65 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): return [args, []] def store_samples(self, data, y, query_network, store_prob, context): - num_pus = len(data) - sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] - num_inputs = len(data[0][0]) - num_outputs = len(y) - mx_context = context[0] - - if len(self.key_memory) == 0: - self.key_memory = nd.empty(0, ctx=mx.cpu()) - self.value_memory = [] - self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) - - ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] - - max_inds = [nd.max(ind[i]) for i in range(num_pus)] - if any(max_inds): - to_store_values = [] - for i in range(num_inputs): - tmp_values = [] - for j in range(0, num_pus): - if max_inds[j]: - if isinstance(tmp_values, list): - tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) - else: - tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) - to_store_values.append(tmp_values) - - to_store_labels = [] - for i in range(num_outputs): - tmp_labels = [] - for j in range(0, num_pus): - if max_inds[j]: - if isinstance(tmp_labels, list): - tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) - else: - tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) - to_store_labels.append(tmp_labels) - - to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) - - if self.key_memory.shape[0] == 0: - self.key_memory = to_store_keys.as_in_context(mx.cpu()) + if not (self.memory_replacement_strategy == "no_replacement" and self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples): + num_pus = len(data) + sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] + num_inputs = len(data[0][0]) + num_outputs = len(y) + mx_context = context[0] + + if len(self.key_memory) == 0: + self.key_memory = nd.empty(0, ctx=mx.cpu()) + self.value_memory = [] + self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) + + ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] + + max_inds = [nd.max(ind[i]) for i in range(num_pus)] + if any(max_inds): + to_store_values = [] for i in range(num_inputs): - self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + tmp_values = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_values, list): + tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) + else: + tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) + to_store_values.append(tmp_values) + + to_store_labels = [] for i in range(num_outputs): - self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) - elif self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: - num_to_store = to_store_keys.shape[0] - self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) - for i in range(num_inputs): - self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) - for i in range(num_outputs): - self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=1) - else: - self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) - for i in range(num_inputs): - self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) - for i in range(num_outputs): - self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + tmp_labels = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_labels, list): + tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) + else: + tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) + to_store_labels.append(tmp_labels) + + to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) + + if self.key_memory.shape[0] == 0: + self.key_memory = to_store_keys.as_in_context(mx.cpu()) + for i in range(num_inputs): + self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + for i in range(num_outputs): + self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) + elif self.memory_replacement_strategy == "replace_oldest" and self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: + num_to_store = to_store_keys.shape[0] + self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + else: + self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) def sample_memory(self, batch_size): num_stored_samples = self.key_memory.shape[0] diff --git a/src/test/resources/target_code/gluon/ganModel/infoGAN/CNNNet_infoGAN_infoGANConnector_predictor.py b/src/test/resources/target_code/gluon/ganModel/infoGAN/CNNNet_infoGAN_infoGANConnector_predictor.py index dedd0090..ae8d9f10 100644 --- a/src/test/resources/target_code/gluon/ganModel/infoGAN/CNNNet_infoGAN_infoGANConnector_predictor.py +++ b/src/test/resources/target_code/gluon/ganModel/infoGAN/CNNNet_infoGAN_infoGANConnector_predictor.py @@ -346,8 +346,8 @@ class LargeMemory(gluon.HybridBlock): else: self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, activation=self.query_act, flatten=False)) return self.query_network - - + + #EpisodicMemory layer class EpisodicMemory(EpisodicReplayMemoryInterface): def __init__(self, @@ -357,6 +357,7 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): replay_gradient_steps, store_prob, max_stored_samples, + memory_replacement_strategy, use_replay, query_net_dir, query_net_prefix, @@ -367,7 +368,8 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): #Replay parameters self.store_prob = store_prob self.max_stored_samples = max_stored_samples - + self.memory_replacement_strategy = memory_replacement_strategy + self.query_net_dir = query_net_dir self.query_net_prefix = query_net_prefix self.query_net_num_inputs = query_net_num_inputs @@ -382,64 +384,65 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): return [args, []] def store_samples(self, data, y, query_network, store_prob, context): - num_pus = len(data) - sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] - num_inputs = len(data[0][0]) - num_outputs = len(y) - mx_context = context[0] - - if len(self.key_memory) == 0: - self.key_memory = nd.empty(0, ctx=mx.cpu()) - self.value_memory = [] - self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) - - ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] - - max_inds = [nd.max(ind[i]) for i in range(num_pus)] - if any(max_inds): - to_store_values = [] - for i in range(num_inputs): - tmp_values = [] - for j in range(0, num_pus): - if max_inds[j]: - if isinstance(tmp_values, list): - tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) - else: - tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) - to_store_values.append(tmp_values) - - to_store_labels = [] - for i in range(num_outputs): - tmp_labels = [] - for j in range(0, num_pus): - if max_inds[j]: - if isinstance(tmp_labels, list): - tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) - else: - tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) - to_store_labels.append(tmp_labels) - - to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) - - if self.key_memory.shape[0] == 0: - self.key_memory = to_store_keys.as_in_context(mx.cpu()) + if not (self.memory_replacement_strategy == "no_replacement" and self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples): + num_pus = len(data) + sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] + num_inputs = len(data[0][0]) + num_outputs = len(y) + mx_context = context[0] + + if len(self.key_memory) == 0: + self.key_memory = nd.empty(0, ctx=mx.cpu()) + self.value_memory = [] + self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) + + ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] + + max_inds = [nd.max(ind[i]) for i in range(num_pus)] + if any(max_inds): + to_store_values = [] for i in range(num_inputs): - self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + tmp_values = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_values, list): + tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) + else: + tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) + to_store_values.append(tmp_values) + + to_store_labels = [] for i in range(num_outputs): - self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) - elif self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: - num_to_store = to_store_keys.shape[0] - self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) - for i in range(num_inputs): - self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) - for i in range(num_outputs): - self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=1) - else: - self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) - for i in range(num_inputs): - self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) - for i in range(num_outputs): - self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + tmp_labels = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_labels, list): + tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) + else: + tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) + to_store_labels.append(tmp_labels) + + to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) + + if self.key_memory.shape[0] == 0: + self.key_memory = to_store_keys.as_in_context(mx.cpu()) + for i in range(num_inputs): + self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + for i in range(num_outputs): + self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) + elif self.memory_replacement_strategy == "replace_oldest" and self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: + num_to_store = to_store_keys.shape[0] + self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + else: + self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) def sample_memory(self, batch_size): num_stored_samples = self.key_memory.shape[0] diff --git a/src/test/resources/target_code/gluon/ganModel/infoGAN/gan/CNNNet_infoGAN_infoGANDiscriminator.py b/src/test/resources/target_code/gluon/ganModel/infoGAN/gan/CNNNet_infoGAN_infoGANDiscriminator.py index 46dec5b1..0147901b 100644 --- a/src/test/resources/target_code/gluon/ganModel/infoGAN/gan/CNNNet_infoGAN_infoGANDiscriminator.py +++ b/src/test/resources/target_code/gluon/ganModel/infoGAN/gan/CNNNet_infoGAN_infoGANDiscriminator.py @@ -346,8 +346,8 @@ class LargeMemory(gluon.HybridBlock): else: self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, activation=self.query_act, flatten=False)) return self.query_network - - + + #EpisodicMemory layer class EpisodicMemory(EpisodicReplayMemoryInterface): def __init__(self, @@ -357,6 +357,7 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): replay_gradient_steps, store_prob, max_stored_samples, + memory_replacement_strategy, use_replay, query_net_dir, query_net_prefix, @@ -367,7 +368,8 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): #Replay parameters self.store_prob = store_prob self.max_stored_samples = max_stored_samples - + self.memory_replacement_strategy = memory_replacement_strategy + self.query_net_dir = query_net_dir self.query_net_prefix = query_net_prefix self.query_net_num_inputs = query_net_num_inputs @@ -382,64 +384,65 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): return [args, []] def store_samples(self, data, y, query_network, store_prob, context): - num_pus = len(data) - sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] - num_inputs = len(data[0][0]) - num_outputs = len(y) - mx_context = context[0] - - if len(self.key_memory) == 0: - self.key_memory = nd.empty(0, ctx=mx.cpu()) - self.value_memory = [] - self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) - - ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] - - max_inds = [nd.max(ind[i]) for i in range(num_pus)] - if any(max_inds): - to_store_values = [] - for i in range(num_inputs): - tmp_values = [] - for j in range(0, num_pus): - if max_inds[j]: - if isinstance(tmp_values, list): - tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) - else: - tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) - to_store_values.append(tmp_values) - - to_store_labels = [] - for i in range(num_outputs): - tmp_labels = [] - for j in range(0, num_pus): - if max_inds[j]: - if isinstance(tmp_labels, list): - tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) - else: - tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) - to_store_labels.append(tmp_labels) - - to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) - - if self.key_memory.shape[0] == 0: - self.key_memory = to_store_keys.as_in_context(mx.cpu()) + if not (self.memory_replacement_strategy == "no_replacement" and self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples): + num_pus = len(data) + sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] + num_inputs = len(data[0][0]) + num_outputs = len(y) + mx_context = context[0] + + if len(self.key_memory) == 0: + self.key_memory = nd.empty(0, ctx=mx.cpu()) + self.value_memory = [] + self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) + + ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] + + max_inds = [nd.max(ind[i]) for i in range(num_pus)] + if any(max_inds): + to_store_values = [] for i in range(num_inputs): - self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + tmp_values = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_values, list): + tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) + else: + tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) + to_store_values.append(tmp_values) + + to_store_labels = [] for i in range(num_outputs): - self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) - elif self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: - num_to_store = to_store_keys.shape[0] - self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) - for i in range(num_inputs): - self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) - for i in range(num_outputs): - self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=1) - else: - self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) - for i in range(num_inputs): - self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) - for i in range(num_outputs): - self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + tmp_labels = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_labels, list): + tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) + else: + tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) + to_store_labels.append(tmp_labels) + + to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) + + if self.key_memory.shape[0] == 0: + self.key_memory = to_store_keys.as_in_context(mx.cpu()) + for i in range(num_inputs): + self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + for i in range(num_outputs): + self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) + elif self.memory_replacement_strategy == "replace_oldest" and self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: + num_to_store = to_store_keys.shape[0] + self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + else: + self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) def sample_memory(self, batch_size): num_stored_samples = self.key_memory.shape[0] diff --git a/src/test/resources/target_code/gluon/ganModel/infoGAN/gan/CNNNet_infoGAN_infoGANQNetwork.py b/src/test/resources/target_code/gluon/ganModel/infoGAN/gan/CNNNet_infoGAN_infoGANQNetwork.py index abe4ad7a..df2d89c8 100644 --- a/src/test/resources/target_code/gluon/ganModel/infoGAN/gan/CNNNet_infoGAN_infoGANQNetwork.py +++ b/src/test/resources/target_code/gluon/ganModel/infoGAN/gan/CNNNet_infoGAN_infoGANQNetwork.py @@ -346,8 +346,8 @@ class LargeMemory(gluon.HybridBlock): else: self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, activation=self.query_act, flatten=False)) return self.query_network - - + + #EpisodicMemory layer class EpisodicMemory(EpisodicReplayMemoryInterface): def __init__(self, @@ -357,6 +357,7 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): replay_gradient_steps, store_prob, max_stored_samples, + memory_replacement_strategy, use_replay, query_net_dir, query_net_prefix, @@ -367,7 +368,8 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): #Replay parameters self.store_prob = store_prob self.max_stored_samples = max_stored_samples - + self.memory_replacement_strategy = memory_replacement_strategy + self.query_net_dir = query_net_dir self.query_net_prefix = query_net_prefix self.query_net_num_inputs = query_net_num_inputs @@ -382,64 +384,65 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): return [args, []] def store_samples(self, data, y, query_network, store_prob, context): - num_pus = len(data) - sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] - num_inputs = len(data[0][0]) - num_outputs = len(y) - mx_context = context[0] - - if len(self.key_memory) == 0: - self.key_memory = nd.empty(0, ctx=mx.cpu()) - self.value_memory = [] - self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) - - ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] - - max_inds = [nd.max(ind[i]) for i in range(num_pus)] - if any(max_inds): - to_store_values = [] - for i in range(num_inputs): - tmp_values = [] - for j in range(0, num_pus): - if max_inds[j]: - if isinstance(tmp_values, list): - tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) - else: - tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) - to_store_values.append(tmp_values) - - to_store_labels = [] - for i in range(num_outputs): - tmp_labels = [] - for j in range(0, num_pus): - if max_inds[j]: - if isinstance(tmp_labels, list): - tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) - else: - tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) - to_store_labels.append(tmp_labels) - - to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) - - if self.key_memory.shape[0] == 0: - self.key_memory = to_store_keys.as_in_context(mx.cpu()) + if not (self.memory_replacement_strategy == "no_replacement" and self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples): + num_pus = len(data) + sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] + num_inputs = len(data[0][0]) + num_outputs = len(y) + mx_context = context[0] + + if len(self.key_memory) == 0: + self.key_memory = nd.empty(0, ctx=mx.cpu()) + self.value_memory = [] + self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) + + ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] + + max_inds = [nd.max(ind[i]) for i in range(num_pus)] + if any(max_inds): + to_store_values = [] for i in range(num_inputs): - self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + tmp_values = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_values, list): + tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) + else: + tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) + to_store_values.append(tmp_values) + + to_store_labels = [] for i in range(num_outputs): - self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) - elif self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: - num_to_store = to_store_keys.shape[0] - self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) - for i in range(num_inputs): - self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) - for i in range(num_outputs): - self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=1) - else: - self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) - for i in range(num_inputs): - self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) - for i in range(num_outputs): - self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + tmp_labels = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_labels, list): + tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) + else: + tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) + to_store_labels.append(tmp_labels) + + to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) + + if self.key_memory.shape[0] == 0: + self.key_memory = to_store_keys.as_in_context(mx.cpu()) + for i in range(num_inputs): + self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + for i in range(num_outputs): + self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) + elif self.memory_replacement_strategy == "replace_oldest" and self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: + num_to_store = to_store_keys.shape[0] + self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + else: + self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) def sample_memory(self, batch_size): num_stored_samples = self.key_memory.shape[0] diff --git a/src/test/resources/target_code/gluon/reinforcementModel/cartpole/CNNNet_cartpole_master_dqn.py b/src/test/resources/target_code/gluon/reinforcementModel/cartpole/CNNNet_cartpole_master_dqn.py index 15a7103b..acc3c897 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/cartpole/CNNNet_cartpole_master_dqn.py +++ b/src/test/resources/target_code/gluon/reinforcementModel/cartpole/CNNNet_cartpole_master_dqn.py @@ -346,8 +346,8 @@ class LargeMemory(gluon.HybridBlock): else: self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, activation=self.query_act, flatten=False)) return self.query_network - - + + #EpisodicMemory layer class EpisodicMemory(EpisodicReplayMemoryInterface): def __init__(self, @@ -357,6 +357,7 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): replay_gradient_steps, store_prob, max_stored_samples, + memory_replacement_strategy, use_replay, query_net_dir, query_net_prefix, @@ -367,7 +368,8 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): #Replay parameters self.store_prob = store_prob self.max_stored_samples = max_stored_samples - + self.memory_replacement_strategy = memory_replacement_strategy + self.query_net_dir = query_net_dir self.query_net_prefix = query_net_prefix self.query_net_num_inputs = query_net_num_inputs @@ -382,64 +384,65 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): return [args, []] def store_samples(self, data, y, query_network, store_prob, context): - num_pus = len(data) - sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] - num_inputs = len(data[0][0]) - num_outputs = len(y) - mx_context = context[0] - - if len(self.key_memory) == 0: - self.key_memory = nd.empty(0, ctx=mx.cpu()) - self.value_memory = [] - self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) - - ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] - - max_inds = [nd.max(ind[i]) for i in range(num_pus)] - if any(max_inds): - to_store_values = [] - for i in range(num_inputs): - tmp_values = [] - for j in range(0, num_pus): - if max_inds[j]: - if isinstance(tmp_values, list): - tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) - else: - tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) - to_store_values.append(tmp_values) - - to_store_labels = [] - for i in range(num_outputs): - tmp_labels = [] - for j in range(0, num_pus): - if max_inds[j]: - if isinstance(tmp_labels, list): - tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) - else: - tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) - to_store_labels.append(tmp_labels) - - to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) - - if self.key_memory.shape[0] == 0: - self.key_memory = to_store_keys.as_in_context(mx.cpu()) + if not (self.memory_replacement_strategy == "no_replacement" and self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples): + num_pus = len(data) + sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] + num_inputs = len(data[0][0]) + num_outputs = len(y) + mx_context = context[0] + + if len(self.key_memory) == 0: + self.key_memory = nd.empty(0, ctx=mx.cpu()) + self.value_memory = [] + self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) + + ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] + + max_inds = [nd.max(ind[i]) for i in range(num_pus)] + if any(max_inds): + to_store_values = [] for i in range(num_inputs): - self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + tmp_values = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_values, list): + tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) + else: + tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) + to_store_values.append(tmp_values) + + to_store_labels = [] for i in range(num_outputs): - self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) - elif self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: - num_to_store = to_store_keys.shape[0] - self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) - for i in range(num_inputs): - self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) - for i in range(num_outputs): - self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=1) - else: - self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) - for i in range(num_inputs): - self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) - for i in range(num_outputs): - self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + tmp_labels = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_labels, list): + tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) + else: + tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) + to_store_labels.append(tmp_labels) + + to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) + + if self.key_memory.shape[0] == 0: + self.key_memory = to_store_keys.as_in_context(mx.cpu()) + for i in range(num_inputs): + self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + for i in range(num_outputs): + self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) + elif self.memory_replacement_strategy == "replace_oldest" and self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: + num_to_store = to_store_keys.shape[0] + self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + else: + self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) def sample_memory(self, batch_size): num_stored_samples = self.key_memory.shape[0] diff --git a/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/CNNNet_mountaincar_master_actor.py b/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/CNNNet_mountaincar_master_actor.py index ddc42e41..8f389630 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/CNNNet_mountaincar_master_actor.py +++ b/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/CNNNet_mountaincar_master_actor.py @@ -346,8 +346,8 @@ class LargeMemory(gluon.HybridBlock): else: self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, activation=self.query_act, flatten=False)) return self.query_network - - + + #EpisodicMemory layer class EpisodicMemory(EpisodicReplayMemoryInterface): def __init__(self, @@ -357,6 +357,7 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): replay_gradient_steps, store_prob, max_stored_samples, + memory_replacement_strategy, use_replay, query_net_dir, query_net_prefix, @@ -367,7 +368,8 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): #Replay parameters self.store_prob = store_prob self.max_stored_samples = max_stored_samples - + self.memory_replacement_strategy = memory_replacement_strategy + self.query_net_dir = query_net_dir self.query_net_prefix = query_net_prefix self.query_net_num_inputs = query_net_num_inputs @@ -382,64 +384,65 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): return [args, []] def store_samples(self, data, y, query_network, store_prob, context): - num_pus = len(data) - sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] - num_inputs = len(data[0][0]) - num_outputs = len(y) - mx_context = context[0] - - if len(self.key_memory) == 0: - self.key_memory = nd.empty(0, ctx=mx.cpu()) - self.value_memory = [] - self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) - - ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] - - max_inds = [nd.max(ind[i]) for i in range(num_pus)] - if any(max_inds): - to_store_values = [] - for i in range(num_inputs): - tmp_values = [] - for j in range(0, num_pus): - if max_inds[j]: - if isinstance(tmp_values, list): - tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) - else: - tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) - to_store_values.append(tmp_values) - - to_store_labels = [] - for i in range(num_outputs): - tmp_labels = [] - for j in range(0, num_pus): - if max_inds[j]: - if isinstance(tmp_labels, list): - tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) - else: - tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) - to_store_labels.append(tmp_labels) - - to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) - - if self.key_memory.shape[0] == 0: - self.key_memory = to_store_keys.as_in_context(mx.cpu()) + if not (self.memory_replacement_strategy == "no_replacement" and self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples): + num_pus = len(data) + sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] + num_inputs = len(data[0][0]) + num_outputs = len(y) + mx_context = context[0] + + if len(self.key_memory) == 0: + self.key_memory = nd.empty(0, ctx=mx.cpu()) + self.value_memory = [] + self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) + + ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] + + max_inds = [nd.max(ind[i]) for i in range(num_pus)] + if any(max_inds): + to_store_values = [] for i in range(num_inputs): - self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + tmp_values = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_values, list): + tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) + else: + tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) + to_store_values.append(tmp_values) + + to_store_labels = [] for i in range(num_outputs): - self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) - elif self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: - num_to_store = to_store_keys.shape[0] - self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) - for i in range(num_inputs): - self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) - for i in range(num_outputs): - self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=1) - else: - self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) - for i in range(num_inputs): - self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) - for i in range(num_outputs): - self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + tmp_labels = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_labels, list): + tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) + else: + tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) + to_store_labels.append(tmp_labels) + + to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) + + if self.key_memory.shape[0] == 0: + self.key_memory = to_store_keys.as_in_context(mx.cpu()) + for i in range(num_inputs): + self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + for i in range(num_outputs): + self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) + elif self.memory_replacement_strategy == "replace_oldest" and self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: + num_to_store = to_store_keys.shape[0] + self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + else: + self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) def sample_memory(self, batch_size): num_stored_samples = self.key_memory.shape[0] diff --git a/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/reinforcement_learning/CNNNet_mountaincar_agent_mountaincarCritic.py b/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/reinforcement_learning/CNNNet_mountaincar_agent_mountaincarCritic.py index 6414df26..9e90a181 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/reinforcement_learning/CNNNet_mountaincar_agent_mountaincarCritic.py +++ b/src/test/resources/target_code/gluon/reinforcementModel/mountaincar/reinforcement_learning/CNNNet_mountaincar_agent_mountaincarCritic.py @@ -346,8 +346,8 @@ class LargeMemory(gluon.HybridBlock): else: self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, activation=self.query_act, flatten=False)) return self.query_network - - + + #EpisodicMemory layer class EpisodicMemory(EpisodicReplayMemoryInterface): def __init__(self, @@ -357,6 +357,7 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): replay_gradient_steps, store_prob, max_stored_samples, + memory_replacement_strategy, use_replay, query_net_dir, query_net_prefix, @@ -367,7 +368,8 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): #Replay parameters self.store_prob = store_prob self.max_stored_samples = max_stored_samples - + self.memory_replacement_strategy = memory_replacement_strategy + self.query_net_dir = query_net_dir self.query_net_prefix = query_net_prefix self.query_net_num_inputs = query_net_num_inputs @@ -382,64 +384,65 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): return [args, []] def store_samples(self, data, y, query_network, store_prob, context): - num_pus = len(data) - sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] - num_inputs = len(data[0][0]) - num_outputs = len(y) - mx_context = context[0] - - if len(self.key_memory) == 0: - self.key_memory = nd.empty(0, ctx=mx.cpu()) - self.value_memory = [] - self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) - - ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] - - max_inds = [nd.max(ind[i]) for i in range(num_pus)] - if any(max_inds): - to_store_values = [] - for i in range(num_inputs): - tmp_values = [] - for j in range(0, num_pus): - if max_inds[j]: - if isinstance(tmp_values, list): - tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) - else: - tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) - to_store_values.append(tmp_values) - - to_store_labels = [] - for i in range(num_outputs): - tmp_labels = [] - for j in range(0, num_pus): - if max_inds[j]: - if isinstance(tmp_labels, list): - tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) - else: - tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) - to_store_labels.append(tmp_labels) - - to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) - - if self.key_memory.shape[0] == 0: - self.key_memory = to_store_keys.as_in_context(mx.cpu()) + if not (self.memory_replacement_strategy == "no_replacement" and self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples): + num_pus = len(data) + sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] + num_inputs = len(data[0][0]) + num_outputs = len(y) + mx_context = context[0] + + if len(self.key_memory) == 0: + self.key_memory = nd.empty(0, ctx=mx.cpu()) + self.value_memory = [] + self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) + + ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] + + max_inds = [nd.max(ind[i]) for i in range(num_pus)] + if any(max_inds): + to_store_values = [] for i in range(num_inputs): - self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + tmp_values = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_values, list): + tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) + else: + tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) + to_store_values.append(tmp_values) + + to_store_labels = [] for i in range(num_outputs): - self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) - elif self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: - num_to_store = to_store_keys.shape[0] - self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) - for i in range(num_inputs): - self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) - for i in range(num_outputs): - self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=1) - else: - self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) - for i in range(num_inputs): - self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) - for i in range(num_outputs): - self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + tmp_labels = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_labels, list): + tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) + else: + tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) + to_store_labels.append(tmp_labels) + + to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) + + if self.key_memory.shape[0] == 0: + self.key_memory = to_store_keys.as_in_context(mx.cpu()) + for i in range(num_inputs): + self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + for i in range(num_outputs): + self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) + elif self.memory_replacement_strategy == "replace_oldest" and self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: + num_to_store = to_store_keys.shape[0] + self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + else: + self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) def sample_memory(self, batch_size): num_stored_samples = self.key_memory.shape[0] diff --git a/src/test/resources/target_code/gluon/reinforcementModel/torcs/CNNNet_torcs_agent_torcsAgent_dqn.py b/src/test/resources/target_code/gluon/reinforcementModel/torcs/CNNNet_torcs_agent_torcsAgent_dqn.py index 448c1394..a743e0b1 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/torcs/CNNNet_torcs_agent_torcsAgent_dqn.py +++ b/src/test/resources/target_code/gluon/reinforcementModel/torcs/CNNNet_torcs_agent_torcsAgent_dqn.py @@ -346,8 +346,8 @@ class LargeMemory(gluon.HybridBlock): else: self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, activation=self.query_act, flatten=False)) return self.query_network - - + + #EpisodicMemory layer class EpisodicMemory(EpisodicReplayMemoryInterface): def __init__(self, @@ -357,6 +357,7 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): replay_gradient_steps, store_prob, max_stored_samples, + memory_replacement_strategy, use_replay, query_net_dir, query_net_prefix, @@ -367,7 +368,8 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): #Replay parameters self.store_prob = store_prob self.max_stored_samples = max_stored_samples - + self.memory_replacement_strategy = memory_replacement_strategy + self.query_net_dir = query_net_dir self.query_net_prefix = query_net_prefix self.query_net_num_inputs = query_net_num_inputs @@ -382,64 +384,65 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): return [args, []] def store_samples(self, data, y, query_network, store_prob, context): - num_pus = len(data) - sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] - num_inputs = len(data[0][0]) - num_outputs = len(y) - mx_context = context[0] - - if len(self.key_memory) == 0: - self.key_memory = nd.empty(0, ctx=mx.cpu()) - self.value_memory = [] - self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) - - ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] - - max_inds = [nd.max(ind[i]) for i in range(num_pus)] - if any(max_inds): - to_store_values = [] - for i in range(num_inputs): - tmp_values = [] - for j in range(0, num_pus): - if max_inds[j]: - if isinstance(tmp_values, list): - tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) - else: - tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) - to_store_values.append(tmp_values) - - to_store_labels = [] - for i in range(num_outputs): - tmp_labels = [] - for j in range(0, num_pus): - if max_inds[j]: - if isinstance(tmp_labels, list): - tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) - else: - tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) - to_store_labels.append(tmp_labels) - - to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) - - if self.key_memory.shape[0] == 0: - self.key_memory = to_store_keys.as_in_context(mx.cpu()) + if not (self.memory_replacement_strategy == "no_replacement" and self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples): + num_pus = len(data) + sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] + num_inputs = len(data[0][0]) + num_outputs = len(y) + mx_context = context[0] + + if len(self.key_memory) == 0: + self.key_memory = nd.empty(0, ctx=mx.cpu()) + self.value_memory = [] + self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) + + ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] + + max_inds = [nd.max(ind[i]) for i in range(num_pus)] + if any(max_inds): + to_store_values = [] for i in range(num_inputs): - self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + tmp_values = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_values, list): + tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) + else: + tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) + to_store_values.append(tmp_values) + + to_store_labels = [] for i in range(num_outputs): - self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) - elif self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: - num_to_store = to_store_keys.shape[0] - self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) - for i in range(num_inputs): - self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) - for i in range(num_outputs): - self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=1) - else: - self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) - for i in range(num_inputs): - self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) - for i in range(num_outputs): - self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + tmp_labels = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_labels, list): + tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) + else: + tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) + to_store_labels.append(tmp_labels) + + to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) + + if self.key_memory.shape[0] == 0: + self.key_memory = to_store_keys.as_in_context(mx.cpu()) + for i in range(num_inputs): + self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + for i in range(num_outputs): + self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) + elif self.memory_replacement_strategy == "replace_oldest" and self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: + num_to_store = to_store_keys.shape[0] + self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + else: + self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) def sample_memory(self, batch_size): num_stored_samples = self.key_memory.shape[0] diff --git a/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/CNNNet_torcs_agent_torcsAgent_actor.py b/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/CNNNet_torcs_agent_torcsAgent_actor.py index 9e1417c6..3194456b 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/CNNNet_torcs_agent_torcsAgent_actor.py +++ b/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/CNNNet_torcs_agent_torcsAgent_actor.py @@ -346,8 +346,8 @@ class LargeMemory(gluon.HybridBlock): else: self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, activation=self.query_act, flatten=False)) return self.query_network - - + + #EpisodicMemory layer class EpisodicMemory(EpisodicReplayMemoryInterface): def __init__(self, @@ -357,6 +357,7 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): replay_gradient_steps, store_prob, max_stored_samples, + memory_replacement_strategy, use_replay, query_net_dir, query_net_prefix, @@ -367,7 +368,8 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): #Replay parameters self.store_prob = store_prob self.max_stored_samples = max_stored_samples - + self.memory_replacement_strategy = memory_replacement_strategy + self.query_net_dir = query_net_dir self.query_net_prefix = query_net_prefix self.query_net_num_inputs = query_net_num_inputs @@ -382,64 +384,65 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): return [args, []] def store_samples(self, data, y, query_network, store_prob, context): - num_pus = len(data) - sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] - num_inputs = len(data[0][0]) - num_outputs = len(y) - mx_context = context[0] - - if len(self.key_memory) == 0: - self.key_memory = nd.empty(0, ctx=mx.cpu()) - self.value_memory = [] - self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) - - ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] - - max_inds = [nd.max(ind[i]) for i in range(num_pus)] - if any(max_inds): - to_store_values = [] - for i in range(num_inputs): - tmp_values = [] - for j in range(0, num_pus): - if max_inds[j]: - if isinstance(tmp_values, list): - tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) - else: - tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) - to_store_values.append(tmp_values) - - to_store_labels = [] - for i in range(num_outputs): - tmp_labels = [] - for j in range(0, num_pus): - if max_inds[j]: - if isinstance(tmp_labels, list): - tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) - else: - tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) - to_store_labels.append(tmp_labels) - - to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) - - if self.key_memory.shape[0] == 0: - self.key_memory = to_store_keys.as_in_context(mx.cpu()) + if not (self.memory_replacement_strategy == "no_replacement" and self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples): + num_pus = len(data) + sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] + num_inputs = len(data[0][0]) + num_outputs = len(y) + mx_context = context[0] + + if len(self.key_memory) == 0: + self.key_memory = nd.empty(0, ctx=mx.cpu()) + self.value_memory = [] + self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) + + ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] + + max_inds = [nd.max(ind[i]) for i in range(num_pus)] + if any(max_inds): + to_store_values = [] for i in range(num_inputs): - self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + tmp_values = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_values, list): + tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) + else: + tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) + to_store_values.append(tmp_values) + + to_store_labels = [] for i in range(num_outputs): - self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) - elif self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: - num_to_store = to_store_keys.shape[0] - self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) - for i in range(num_inputs): - self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) - for i in range(num_outputs): - self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=1) - else: - self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) - for i in range(num_inputs): - self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) - for i in range(num_outputs): - self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + tmp_labels = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_labels, list): + tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) + else: + tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) + to_store_labels.append(tmp_labels) + + to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) + + if self.key_memory.shape[0] == 0: + self.key_memory = to_store_keys.as_in_context(mx.cpu()) + for i in range(num_inputs): + self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + for i in range(num_outputs): + self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) + elif self.memory_replacement_strategy == "replace_oldest" and self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: + num_to_store = to_store_keys.shape[0] + self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + else: + self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) def sample_memory(self, batch_size): num_stored_samples = self.key_memory.shape[0] diff --git a/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/reinforcement_learning/CNNNet_torcs_agent_network_torcsCritic.py b/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/reinforcement_learning/CNNNet_torcs_agent_network_torcsCritic.py index e79ac15d..0f027405 100644 --- a/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/reinforcement_learning/CNNNet_torcs_agent_network_torcsCritic.py +++ b/src/test/resources/target_code/gluon/reinforcementModel/torcs_td3/reinforcement_learning/CNNNet_torcs_agent_network_torcsCritic.py @@ -346,8 +346,8 @@ class LargeMemory(gluon.HybridBlock): else: self.query_network.add(gluon.nn.Dense(units=self.num_heads*size, activation=self.query_act, flatten=False)) return self.query_network - - + + #EpisodicMemory layer class EpisodicMemory(EpisodicReplayMemoryInterface): def __init__(self, @@ -357,6 +357,7 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): replay_gradient_steps, store_prob, max_stored_samples, + memory_replacement_strategy, use_replay, query_net_dir, query_net_prefix, @@ -367,7 +368,8 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): #Replay parameters self.store_prob = store_prob self.max_stored_samples = max_stored_samples - + self.memory_replacement_strategy = memory_replacement_strategy + self.query_net_dir = query_net_dir self.query_net_prefix = query_net_prefix self.query_net_num_inputs = query_net_num_inputs @@ -382,64 +384,65 @@ class EpisodicMemory(EpisodicReplayMemoryInterface): return [args, []] def store_samples(self, data, y, query_network, store_prob, context): - num_pus = len(data) - sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] - num_inputs = len(data[0][0]) - num_outputs = len(y) - mx_context = context[0] - - if len(self.key_memory) == 0: - self.key_memory = nd.empty(0, ctx=mx.cpu()) - self.value_memory = [] - self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) - - ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] - - max_inds = [nd.max(ind[i]) for i in range(num_pus)] - if any(max_inds): - to_store_values = [] - for i in range(num_inputs): - tmp_values = [] - for j in range(0, num_pus): - if max_inds[j]: - if isinstance(tmp_values, list): - tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) - else: - tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) - to_store_values.append(tmp_values) - - to_store_labels = [] - for i in range(num_outputs): - tmp_labels = [] - for j in range(0, num_pus): - if max_inds[j]: - if isinstance(tmp_labels, list): - tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) - else: - tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) - to_store_labels.append(tmp_labels) - - to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) - - if self.key_memory.shape[0] == 0: - self.key_memory = to_store_keys.as_in_context(mx.cpu()) + if not (self.memory_replacement_strategy == "no_replacement" and self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples): + num_pus = len(data) + sub_batch_sizes = [data[i][0][0].shape[0] for i in range(num_pus)] + num_inputs = len(data[0][0]) + num_outputs = len(y) + mx_context = context[0] + + if len(self.key_memory) == 0: + self.key_memory = nd.empty(0, ctx=mx.cpu()) + self.value_memory = [] + self.label_memory = []#nd.empty((num_outputs, 0), ctx=mx.cpu()) + + ind = [nd.sample_multinomial(store_prob, sub_batch_sizes[i]).as_in_context(mx_context) for i in range(num_pus)] + + max_inds = [nd.max(ind[i]) for i in range(num_pus)] + if any(max_inds): + to_store_values = [] for i in range(num_inputs): - self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + tmp_values = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_values, list): + tmp_values = nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]) + else: + tmp_values = nd.concat(tmp_values, nd.contrib.boolean_mask(data[j][0][i].as_in_context(mx_context), ind[j]), dim=0) + to_store_values.append(tmp_values) + + to_store_labels = [] for i in range(num_outputs): - self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) - elif self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: - num_to_store = to_store_keys.shape[0] - self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) - for i in range(num_inputs): - self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) - for i in range(num_outputs): - self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=1) - else: - self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) - for i in range(num_inputs): - self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) - for i in range(num_outputs): - self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + tmp_labels = [] + for j in range(0, num_pus): + if max_inds[j]: + if isinstance(tmp_labels, list): + tmp_labels = nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]) + else: + tmp_labels = nd.concat(tmp_labels, nd.contrib.boolean_mask(y[i][j].as_in_context(mx_context), ind[j]), dim=0) + to_store_labels.append(tmp_labels) + + to_store_keys = query_network(*to_store_values[0:self.query_net_num_inputs]) + + if self.key_memory.shape[0] == 0: + self.key_memory = to_store_keys.as_in_context(mx.cpu()) + for i in range(num_inputs): + self.value_memory.append(to_store_values[i].as_in_context(mx.cpu())) + for i in range(num_outputs): + self.label_memory.append(to_store_labels[i].as_in_context(mx.cpu())) + elif self.memory_replacement_strategy == "replace_oldest" and self.max_stored_samples != -1 and self.key_memory.shape[0] >= self.max_stored_samples: + num_to_store = to_store_keys.shape[0] + self.key_memory = nd.concat(self.key_memory[num_to_store:], to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i][num_to_store:], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i][num_to_store:], to_store_labels[i].as_in_context(mx.cpu()), dim=0) + else: + self.key_memory = nd.concat(self.key_memory, to_store_keys.as_in_context(mx.cpu()), dim=0) + for i in range(num_inputs): + self.value_memory[i] = nd.concat(self.value_memory[i], to_store_values[i].as_in_context(mx.cpu()), dim=0) + for i in range(num_outputs): + self.label_memory[i] = nd.concat(self.label_memory[i], to_store_labels[i].as_in_context(mx.cpu()), dim=0) def sample_memory(self, batch_size): num_stored_samples = self.key_memory.shape[0] -- GitLab From a0bbc584eed23abdf2eb5cfda0f32b49ece3ab83 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julian=20Steinsberger-D=C3=BChr=C3=9Fen?= Date: Thu, 27 Aug 2020 07:47:49 +0200 Subject: [PATCH 5/8] adjusted installation instructions in readme --- README.md | 18 +++++----- .../install_after_cuda | 0 .../install_before_cuda | 0 .../mxnet_gluon_installation_script.sh | 29 +++++++++++++++ src/test/resources/docker/mxnet/Dockerfile | 33 ++++++++++-------- .../docker/mxnet/armadillo-9.600.6.zip | Bin 0 -> 6852949 bytes 6 files changed, 55 insertions(+), 25 deletions(-) rename src/main/resources/installation_scripts/{ => legacy_script_all_backends}/install_after_cuda (100%) rename src/main/resources/installation_scripts/{ => legacy_script_all_backends}/install_before_cuda (100%) create mode 100644 src/main/resources/installation_scripts/mxnet_gluon_installation_script.sh create mode 100644 src/test/resources/docker/mxnet/armadillo-9.600.6.zip diff --git a/README.md b/README.md index 5e5f7d46..d9f381ef 100644 --- a/README.md +++ b/README.md @@ -17,11 +17,11 @@ See example project [EMADL-Demo](https://git.rwth-aachen.de/thomas.timmermanns/E * Deep learning backend: * MXNet * training - generated is Python code. Required is Python 2.7 or higher, Python packages `h5py`, `mxnet` (for training on CPU) or e.g. `mxnet-cu75` for CUDA 7.5 (for training on GPU with CUDA, concrete package should be selected according to CUDA version). Follow [official instructions on MXNet site](https://mxnet.incubator.apache.org/install/index.html?platform=Linux&language=Python&processor=CPU) - * prediction - generated code is C++. Install MXNet using [official instructions on MXNet site](https://mxnet.incubator.apache.org) for C++. + * prediction - generated code is C++. * Caffe2 * training - generated is Python code. Follow [ official instructions on Caffe2 site ](https://caffe2.ai/docs/getting-started.html?platform=ubuntu&configuration=prebuilt) - * See the scripts under Installation for better instructions, as an old caffe vversion is used that needs special considerations. + * See the scripts under Installation for better instructions, as an old caffe version is used that needs special considerations. * Gluon @@ -30,16 +30,14 @@ See example project [EMADL-Demo](https://git.rwth-aachen.de/thomas.timmermanns/E * prediction - generated code is C++. ## Installation -The two bash scripts found under [installation scripts](https://git.rwth-aachen.de/monticore/EmbeddedMontiArc/generators/EMADL2CPP/tree/tensorflow_group/src/main/resources/installation_scripts) -should build and install all prerequisits for all backends as of 26.09.2019. -Note that the installation may take some time (hours) and you will need some disk space (> 60GB) for all backends. Also enough RAM or a big -enough swapspace is advisable (>10GB) for the installation of the cpp part of tensorflow. This scripts were tested with a completly clean Ubuntu 16.04, +A new bash script for mxnet/gluon can be found [installation scripts](https://git.rwth-aachen.de/monticore/EmbeddedMontiArc/generators/EMADL2CPP/tree/tensorflow_group/src/main/resources/installation_scripts) +changing the installation process for mxnet for cpp. This fill now install the full cpp api and not the reduced c api. This script will install all dependencies both for python and cpp as of 26.08.2020. +Additionally a simmilar docker script used for the git ci pipeline can be found in the gluon subfolder at [Docker images](https://git.rwth-aachen.de/monticore/EmbeddedMontiArc/generators/EMADL2CPP/tree/tensorflow_group/src/test/resources/docker). +The other two bash scripts found in the installation_scripts folder are outdated but may be consulted for installation guidlines for other backends. +Note that the installation may take some time (hours) enough RAM or a big enough swapspace is advisable (>10GB). This scripts were tested with a completly clean Ubuntu 16.04, without system updates installed. Using another Ubuntu version or installing other stuff, system updates included might/ have caused problems. If you want to install the backends with CUDA GPU support(only MXNet/Gluon and Tensorflow, the used caffe2 version does not work with GPU support anymore), -you have to install CUDA 10.0(!!), CUDNN and NCCL (Obtainable from the nvidai webpage. You can follow their instructions.) inbetween the two scripts. -Furthermore you will have to change the pip commands for mxnet and tensorflow to the respective commented out parts. -Also docker images for the cpu version of each backend are provided at [Docker images](https://git.rwth-aachen.de/monticore/EmbeddedMontiArc/generators/EMADL2CPP/tree/tensorflow_group/src/test/resources/docker), -though some of these might be outdated. +you have to install CUDA 10.0(mxnet/ gluon also works with newer version and maybe older), CUDNN and NCCL (Obtainable from the nvidai webpage). ### HowTo 1. Define a EMADL component containing architecture of a neural network and save it in a `.emadl` file. For more information on architecture language please refer to [CNNArchLang project](https://git.rwth-aachen.de/monticore/EmbeddedMontiArc/languages/CNNArchLang). An example of NN architecture: diff --git a/src/main/resources/installation_scripts/install_after_cuda b/src/main/resources/installation_scripts/legacy_script_all_backends/install_after_cuda similarity index 100% rename from src/main/resources/installation_scripts/install_after_cuda rename to src/main/resources/installation_scripts/legacy_script_all_backends/install_after_cuda diff --git a/src/main/resources/installation_scripts/install_before_cuda b/src/main/resources/installation_scripts/legacy_script_all_backends/install_before_cuda similarity index 100% rename from src/main/resources/installation_scripts/install_before_cuda rename to src/main/resources/installation_scripts/legacy_script_all_backends/install_before_cuda diff --git a/src/main/resources/installation_scripts/mxnet_gluon_installation_script.sh b/src/main/resources/installation_scripts/mxnet_gluon_installation_script.sh new file mode 100644 index 00000000..da2d42b6 --- /dev/null +++ b/src/main/resources/installation_scripts/mxnet_gluon_installation_script.sh @@ -0,0 +1,29 @@ +sudo apt-get update -y +sudp apt-get install -y build-essential git openjdk-8-jdk maven ninja-build ccache libopenblas-dev libblas-dev / + liblapack-dev libopencv-dev libarmadillo-dev cmake python2.7 python-dev / + python-numpy python3-pip python3-pip swig unzip libboost-all-dev + +sudo update-alternatives --config java + +pip3 install --user --upgrade "cmake>=3.13.2" + +wget https://bootstrap.pypa.io/get-pip.py +python get-pip.py +pip install --user h5py matplotlib numpy==1.16.5 mxnet==1.5.1 #Not alll test work with mxnet v1.6.0, the curent standard version installed for the cpu installation of mxnet. + #You could alternativly also use python 3.6 instead of 2.7, then you could also use the newest numpy version. + #Note that you then have to also set the PYTHON_PATH acordingly, or specifiy the python path for all applications in their build scripts + #and test currently only run on the python specified in the PYTHON_PATH. + #If you want to use mxnet with cuda install f.e. mxnet-cu100 for cuda 10.0 (for this currently v1.5.1 is already the newest version), + #of course then you have to install cuda and cudnn beforehand. + +git clone --recursive https://github.com/apache/incubator-mxnet.git mxnet +cd mxnet && git checkout tags/1.5.0 && git submodule update --recursive --init +cd mxnet && mkdir build && cd build && cmake -DUSE_CPP_PACKAGE=1 -DUSE_CUDA=0 -GNinja .. && ninja -v +cd mxnet && cp -r include/mxnet /usr/include/mxnet && cp -r cpp-package/include/mxnet-cpp /usr/include/ && cp -r 3rdparty/tvm/nnvm/include/nnvm /usr/include/ && cp -r 3rdparty/dmlc-core/include/dmlc /usr/include/ + +#you have tohave armadillo-9.600.6.zip in your current folder +unzip armadillo.zip -d . +cd armadillo-9.600.6 && cmake . && make && make install + +mkdir -p /root/.config/matplotlib +echo "backend : Agg" > /root/.config/matplotlib/matplotlibrc \ No newline at end of file diff --git a/src/test/resources/docker/mxnet/Dockerfile b/src/test/resources/docker/mxnet/Dockerfile index e374b94a..151a9b71 100644 --- a/src/test/resources/docker/mxnet/Dockerfile +++ b/src/test/resources/docker/mxnet/Dockerfile @@ -1,19 +1,22 @@ FROM maven:3-jdk-8 -RUN apt-get update && \ - apt-get install -y --no-install-recommends \ - git \ - libgtk2.0-dev \ - python-subprocess32 \ - python-tk \ - wget python gcc \ - build-essential cmake \ - liblapack-dev libblas-dev libboost-dev libarmadillo-dev && \ - rm -rf /var/lib/apt/lists/* -RUN git clone https://github.com/apache/incubator-mxnet.git mxnet-source && \ - cd mxnet-source && git checkout tags/1.4.0 && cd .. && \ - cp -r mxnet-source/include/mxnet /usr/include/mxnet && \ - rm -r mxnet-source +RUN apt-get update +RUN apt-get install -y build-essential git ninja-build ccache libopenblas-dev libblas-dev liblapack-dev libopencv-dev libarmadillo-dev cmake python2.7 python-dev python-numpy python3-pip python3-pip swig unzip libboost-all-dev + +RUN pip3 install --user --upgrade "cmake>=3.13.2" + RUN wget https://bootstrap.pypa.io/get-pip.py RUN python get-pip.py -RUN pip install mxnet h5py opencv-python matplotlib +RUN pip install --user h5py matplotlib numpy==1.16.5 mxnet==1.5.1 + +RUN git clone --recursive https://github.com/apache/incubator-mxnet.git mxnet +RUN cd mxnet && git checkout tags/1.5.0 && git submodule update --recursive --init +RUN cd mxnet && mkdir build && cd build && cmake -DUSE_CPP_PACKAGE=1 -DUSE_CUDA=0 -GNinja .. && ninja -v +RUN cd mxnet && cp -r include/mxnet /usr/include/mxnet && cp -r cpp-package/include/mxnet-cpp /usr/include/ && cp -r 3rdparty/tvm/nnvm/include/nnvm /usr/include/ && cp -r 3rdparty/dmlc-core/include/dmlc /usr/include/ + +ADD armadillo-9.600.6.zip /root/armadillo.zip +RUN unzip /root/armadillo.zip -d /root/armadillo +RUN cd /root/armadillo/armadillo-9.600.6 && cmake . && make && make install + +RUN mkdir -p /root/.config/matplotlib +RUN echo "backend : Agg" > /root/.config/matplotlib/matplotlibrc diff --git a/src/test/resources/docker/mxnet/armadillo-9.600.6.zip b/src/test/resources/docker/mxnet/armadillo-9.600.6.zip new file mode 100644 index 0000000000000000000000000000000000000000..e053f3bc1670da20fb0a40eea7ee27a8ede111eb GIT binary patch literal 6852949 zcmZs@V~j3Lur)fiZQHhO+qSufdu-dbZQC~Y*m%aC`<^fN-sDS8r&m>VvVL_^onF*79zUe0EyL9WRsS*ul}$@qeb{BfnRs`|z&caeY1N%Nnj@&)<*# zY+raAkxRPWA%rZ!sPO>Kx^&ZL!!kfjvSxexr|IT4n_Mf*;V?X~+xYu7W%H9Oawqn# zX$)ze1Ab_K;PnsEBl$L}Oq!mn#ho6aVO9BPA29VlW5?EgBY2Powe;05D6HFI{_U<; zIw;h(cy6UPX5Dw(xwqVLPv}R`)6i{vDM$iKNWo5v$iaPxs5`VMr^Jo`58(dcWg7VL z9k2u$xbWe^8yJpl?q2PN1Lql%pXt4Lrs+|BS5Mx7Zdj%c zXGFS~jP5wDkE8@Gelh<;$SaKKVRaWLQUp3#!lp;ePqAt0;S54cAQ3qtqcncw@5oVu zjpUyaaJ>DOa>E!4)t6d+g6(1J{mh2N40p+4gTGsZ@Bp{5>jTg0>Ys*nlO4!VD3l%X zfB6|t(m}q`hgnW}#zyEeVTbsyU#WM-27}&rb=Tu_ zll9SlW_n8wBy>G5965-u4D=fv9!C&+X8k8|cWrNXpH7Cc(GYgyc_4pCT#9|p5Ux-` zPo~LwLNkv)-dha76e?7VovLt_a+$XM%f2T%>j+T3OLn@VXc&hd0ljN`SoblnV1wJr56E$& zX~cZ*K1SxI=CcqmAL8nvE&mrSA+J5~wNkg993tyJ3 z*zUQWbwWY6ln1vs1na&4n&|P)qQ$g;>~E zcw5Tz=&vBM3%G>NKL^j3yScCnYSqX_-eAUvcAtrUER0>-(W_*ihmuM}PvFHJ{`EVc zm*zkC*U-}bHCHu z(C>2kO?=FbHN!YQr~L~PA7ODL{OeQBnICt`e>Rf`UxpPpZwlQ1vSnKX1+52&>qb=aNF<3n95ftn3|7HisLAF`k(%j&{W_^CkJK@qC4_3U@TTEpPs zQ2tTibwLLC@WkN79n?h@xWPVdR|zF2W3=!T$cao&)^iO3L`jftwjF zoxdkW*b!qWJ%-GM@IuS?*w#UQV;(bL`_aw5nDN|_J~Yg1#82$nzGoC-RpNKA${m3# zc`{u=YkraSSXgo268JI_`=iYN7an4MA0&qwhgkrII0#@ny;N#E;s>l;{fmxc1x^G0 z9S|I(V$4X#R$3TZ@-2w{6+p&&^xS1$l`6yUY^i-Bs+P=&JOmy?c? zHhMCAm%$NEPH`A5zJ(nH-iHWy`Ii)&IyXME$%C{oC8c|(wI>>KfKlfZWBb{>GfYI` zZ@E>)iVu}0Y6uj$hI9Zzyd4j#O~iY59>ysxRCwL!|nStC~fL(AW|go z2h{>>XO_Ju5g`Z~Au5u55AA$H)l*1??vOxycH7St7kbD$rf7%x;39k39pptb2V5#zZ_RTTU3Wpkl?ocMzIL~2v+f?DH07SJrm zR7kXjdL$ME^Pl{|H4V5>zjQ44xUgm?yNDXtETSGP;i}_CquSb>%;}P8%ULxuF1x8L zvb^AcNfKFiTQ-PHF5;v3-1k5Mj$~HkcahRif$*(~lzKvF&ajy}>E4l$gORuOXl2K+ zW(4tyf;?LvqcvrnX21rv1>!o3^VRJg59WbO{6b5yNZye$(BE;^Pb(1E*CDO;GgNmP z`Ux#%e}&i5Zt0zWpKrKdd36b8$Kgy}#%*WL$FdCoEfQ)BiA;xex8`{mxQn6Zfd_Qm zq$>(i38uafDnoiQ08NbAsXt&w1J@H@H%ed2W0JyVUcMQ+w?&7hH0LI4?g?lwU zdp*qOK!5VAH)D!^$u_1Te3tl)$E%5xv-XzyMxm?HXmOc2f2n9mnTn=?dC6$d=liA= z$2D~Hn&Q|aI(#y#EIcbbTa#S|UIxC!M5@v#MBgk5mro0XlIb)GsSJd$Wtu43rtB9u%~SW<+RfqIbiAELoH!0Jq_3bf0Y z$PQ8tfoieTMg-G&#z7^~a$vJ$7wbXx+>6Dsj9@ZwICK2%pYqfxFzj=fmg7TV{=l$`7YJ0HqeN)^)%#4a*}=>YA?J_|hwFI34HiIb=Fn!$DZ zR6pBB0Onl5Ce3)6d~WiZdJRef3nKhYrD&y>7rVgT0Ze(BL-scQu8eV(nE4@WF(XMr#TwC~ zZheZ2lmR24GG{UxDNT{j@vRb5LNCH7+OCai|?*aYITtan4j`fXucBVqTHuCy!Lpn6+KXh5Dm*@W(eCU1AesM?CUiKE~`z zENev&WAbaZQ7)8a&1XL6`=kW|Nyh#GsEIXKIQ;NAI-%G{ga+F zyas#~NAcUtrA$?e#>M5Pm*6W>qNm-0N z_k7J=CZ%)>lFKupX)3`Cc@M_)sOg-3ZIp=yywFCqA`rKkxQ z(`)3DING5j_XG-C;)*t0BFp;54%CVtnV;F@THtLawLBCwL%Idk317rfTS7iV*b;8a zka!aacu3$mHtYzpB5cnBOKz!M0Q z#wjhq-ki9``F)i;)hg$CzS#nV#8ud#FO^%m(_?))RZCqOqDW=2{2(%_5NnNS(@AbH zK1~aAo_s1~AOnFnr0Eab#?!U+YZ9cW|Cx8Bh5?*~z=ALZ?YVFY%^DZ1 zN0021s*@~b)8|5Z7Y2B`l7+OUOzyF$oSJugfAo`CIOT(ZSyE#1J{LRmb$y(_RxU{? zdN-t;^_*Z2vE=-SQ6AoRM4H-H2G&Y*okclV9^=xdc`=Sqa#hkm?~+&iUXlmsO=04v zW^thUq4t3q6*WuQc==UxKSGkARVk1l3=SX$Ycr2NEWuW74S1DFG)C@O*yRurs~ z>eOOFFarHJ=akU{uL5O5s+g3tX_1Tqkd}{>GX^8*fX_@Api%3Mlu~e|&EBDHa>oZF z*Z}7%Ehc-ggDrA{C?f=}!}uOZCbM#C(cq+NUZy!fl7exsIh|?+8P}$@)&E$*9`auS zixp> z=%uu7$O)^kG}yQ9PPH}osq@rj)2vsmIv56%RVi2VG|Pr`9zVQrQz|0`hlw!(>8Cdvavx{IC@~UN`DP;;Kyei48F|m)3i0Us?W#5+ zqAB}H)%Gn|p`C#v{2k;>Bt^A-?v`CH?*HT{uJL<{AKO|sE?=>v{^t1)zC+iM?E5y4 zkUy=UQ|S8SnT53R(kwP!&{KgU{_--vafN-%elBY7}^XHkcIQ!-(mzi=a-N8$9W$7@b%siXd#nVAD`XD@y z5o7DyRn{K@k61>NIaL#6n*Gbq_Ggtr0F`rHv4042>I0F&7L!DVL?r=4dAHnP-L$V` z+tRHwzj9>YZb-zpE0}p>H5YW=rj>B zY|#~~yg0EUr3aV}$W~;A&n!ruKn7C`*H|=xsmsLcsLR3)aP&)FB>~_(dQCQUq2c8MndlqN7WA~MxAB*kno6EsH5+)>>orufeQQ-HE zXG`cT`n>_ncL>%p!4Uqu#cuj~U%Kk)IC?^jTl9RBfVy~gQMd96mwZ|Tq(f62=`LfYgi-%@hN1oHaUhRF(){DzhO_u!op6!2v0qD?KkLU7#f(Y*Ugji0Y-qNNxa zL(xicGnyOtB45k@0L({Bb4ABI=DXQ(K(!A|_l&p5p(ViXmQ@*P_sAsIAuUglNNd<~ zs{{_-%P~Q^^;=__LXOay$Y?+;z^#0-1d%WlAaQg@Go0xAq)y(1YQ+=UHDfd09yppq zyJ((en2ao@Jbx!1>jY!FyvMeqc*DE|o179qXu*%8IIAt*yRmn~`^R^xX6xt{^V>1! zp%aTAq=l#TTlySe$8}?IFI0i+ z%y}S@@K7YmfM@PC+h3MonFej)UbA$C2vKbJGG3+5QJsmO??qp2=IaoX!q-JJt~Rz* zkKw9Am8KaO!xGv38CPYJ1YjqJSXF#h43PrnI7Tm@bny50{JlCGznbZw5SZ5_sMd6! z1H#2@-oT(1zk*I~+Bhpd_}6{vT53j7ANu@tapJr_-(*veNw=n~j0#H%A1)$ip0~8L zRR1Ikx*Aux1z(B}#;nt3i)Fw&lv0aoQMa>^v|rWmDUzpL-*_-^oI5t4Xb`( zu{T~8O{z_o-7D=#@EyvuX-PlxXerLMloxq_asywc^)Gm4x6z;Qw~|{_?cAFkS`?&m z5(7u=B6H=SM)~Y=kwSKT&r7{ZSFlSS_!ISCERMy~xi#0NsV*EpgX7~d;{RQx%p5h8 z21Nt{qM`=^!v9~ZlycId;tHzb|EEN$Bkg)5hSsM#`qxTNJWP{lFOJn zN>tZo8@@@$O_up^{js-vkB)mrX(t18L*eS?&PVp@d3|L1N58FUhZ>N}pnkL>C~zO# zq>wGq;|Wk8?n{(sydm22Im<=N^S4=dzp-@e_v6B2AY<)f^7!=NXK{7x|G^RT{_FSp zEu*)m7kSpssX6_5ax*LA>*Yg9ps^m>(e3Z->GOP_ujy*NdcS zQ;fSSEaExX|Ii=0Zb!VMu-3XkyBUFi<2iMkn;=gvv#fUHYGPV*o=X#=Udyg-xHKPH zJ{nGY!&AM^|1B&#b7>sogGT6J++gw2~=%XVY* z?6D5*YL$YX026=dfYfy7F?QLXQB3J}iTp*u3*4tm3ovU&*ME3OgtMWwb)H?Zba{fe zo}{57mMuaB52SbESYYNl2wjx}e}EWMoSiBgwBGe=r!4`-Ro#Ol_rhpg|3Gs{NboT&J32 zN(bp*zr)6TKWcJsCEHVSj``wp7xh&hd{7Nq?1fnED-KA4Xzr#PWE9&t-a(EGIx}`= zQu&c08ZZin_qMJ8(Cf-2l&ZseKQYbe;j{(T-(bU!tYvf4S%SZGo=t(r?sD226N}2& zLD7FJzt>|EOj?kaY2#9##LBuzVs+%9EwuRRuQZ5J$l}OqAH|?A1>*kQ3QbTx8mQ|; zIatNJ|1NIqTM9W3#FMcPqmJv#K<|R7tBAxvJayfemxlPJbfsPK7>NYYxntV@d`sTsmlUVlR}XwDnrYf2 zoOM#yrL4v`oED>)p|@v@fHZU4LpcTtMZqJ78p=u<*l}^wniSC>kcBawjyeph64g!X z$06P8Skf5>sK;md$uPrzzV#r(cyMs|vU0rf&NC>+UKO}N;dlgr@v6GUL4kH(LD-%4 zP_$r0qSlUEH*@#||_QWbizKXnz{zymU|*$|g%tBEE1GkEqu>N$M>dOWV-VJ2o9)8Sb7&l7w^bOHk@@Cp`UnaKndhwBOGVE|TwuSQ zFzvtC^64R>wb;{#lYE0Tj`LjKN3XyCIh~daj#$_+nD;te6}a;Z%HNy+dICYP|Djai z8FsC@p*qmROiC7aJ0f#u(9z)ZB*F1j+Mbjg-8Djsv~+GBKc?MoTT^4r^2U<4&w&|AZC&v>n?L=ig)B?T<|?!fw)0TXnVsxv z1S(;IqQ<{6i378|)tlI2o6?juBFReQg1C2GhUCMS2&ka89+)MH<~(vvU{HUBZQN19 zDnXtC?nT<4WY67;6Pc(j5k$QY-Tb9nZCZ)cJ=$Sy!?9M$%Im5DbVHykPX}MQOKz>% zL|bm!D1WW6$`w2i3M2XkozKl*Vi&S>THR-X*WaVfgU5ZgN=t>c(ZnpO$vgOL+i#pvyoGA5`nPHFXi#2n3r_- z5!=OJX^zM_QlodTKBz3PR$|r_o@-6j<1QN>KP!EMUijx*g9dZkszTnFhA!_C0at0c;>dM#37(%tv+R4 zRzEYz41qEw_40Fe_38KZ?-dgH*=*cRj0}W#zu>Os+ecxV>Bw(l?4~b9m(+wdoQkTF z#0b1BaZZFWgH81zF2Fw;t$?*v?ul}Dm7l6_t)@c;&0PXSzLYo7+{u56d4tI#pV8emN1aAW@oN zlV~J1tSU_+sg4rR#YuTsn3({(=uI`CrjA+!SI`_M9x2YSi>S4!^1sX8;d3EbVtQ6n zDPtPfi0eku&R3w@^NVEH#U6TI7J3@L4O=nk2rpnrv`05f-Kj1Aqb zn3SiVxF{1`;P#UT=UMzI{F^f^I_|;)6tave*LE+By*}nP9zZx1B11_ILf59s+0-Ln}=4UiM+x^>i51;%PMWth?Qn~vpXHzZDrw-lJcIm?S7 zP9D>X{*HNhRad*uweC1>wq3-=N+8f8*j-qv3Q5`{4#aW{wCvu`(Dmh6jr6jQum&*S z4z?aO%4+kx7Ppw?5Sxi5dGN=eGAZ7%<&hg1c|L3;+pk4g1qVPdNc%uIdp$E{J3k(E zxsDr*u~Y7|GcahHbm~MfGH6b#Baf$_e6P17z8;QGcXv-`f1W(c?ChP~LGAcQz5@GW z>@zsl!4$HUvYU5@=4G;EHI_|jG%)A?fyCW{+}n;nU9w41-vl4~e)#FW0vYS~YU_D_ zaPWFueSTf%d<}j3`M7;wPk4NYKJlFX%;!$))7#tEs~>B}W&lmZO!su!8TL%~JRBX~ zjow~;)`}hXnaXltDiDEf%W~v=!?41mp>zg?)LGV%|K_9Z70y}2jvIj*c{6vZ{HND{ z2245B;Q{wPm%^j474veCr5Wh$KaX2zhcK6~_b=T@i!aR4rPj7@#8AZ=;}Y3I{S-%im@ky^X(p`(LA^OC?A>IMz4RG%v;;lH?8g={j2Prydk*uE`j0(RA3+74DJcfKiQYqb$g>#(O`C$y#h3O`qWMByBWYWXMduKO5R8*LM^TL~~W@S{eLUfsu79U}m=f5~4 zZ#f4*K&XV|W9y@~HI3}Rm%?U)Jz3wRjEf#zEC!`-+s^%EacP*R*o16P$Z#H$T#(eH zOnY3qh0YS8&7;dS_jI)HDm>bnld7e2p}w|^);>N2-j6lofo~9Dv9tL>q^4{BVUb2P zdH*^XgA?w}vn(%p2Z5K7VI`Zx0@&lL6Eu5StHaIJh|4 z<&zFjHX}lb^>T6=`BED1cLoyMuIUfUzy10{%(Pv*vh|++B~WNZ5l`m$5|Y2!>Ny8O z-l|Vuu0_a@*yjgV+55}kYlfWuapW^*>=dvecAiFSJpt34i_}%-k|zCIw9Wg4NB!FR zg25X2<_F^cR?iS%zU@o@sb&NaARxT|RXr;xs{N;*|3^HhDor~K z3BmP9P6@tAiNiueest8fVU|S6gc34s(l@r$kk+@W|MXEuz!H_{(Z$|mF2*E+^1uWR zDwN*~FBS(t`}CvY{bMR+gq_xBY;i=#P=9`Cskqc^zyrs(W8z_qB$d-yy|ou`j0<<$ zo1d6v65K)29>PiGG6_iC4Vsh&2PKwU1LeTX`FJ<86YDtp`#VyUg4n*lr|hgP7_?i{ zCoo+^*G5l%9-{AnM#VwQn@UjP9``p#5_|%1s<(Gp~x6BoE)F z8Ame>w5;1rtm*nRWptPsHDC}&Ee~0is~)^7S)J{sx&U1|(15qat=4{<>pBMSj_x#9z}Cj z^g{uT@Wa@rEwv`3@^MX*JuXM{!qN?|;QljDg&Tg$gNDtlT4Ma1Mgz{1a416=M#NW( z!?%a0!&)RWxg@eD;kwVt#Z8}wC7<;^)W63Ts^}1D=u5O`p^q-gRL1Is|0#l7zJFD_ ze>n6%n=;z2o617wH{}qfnkrd7hM_y8%tGB1I{zMJcLL9gHwOu@(|JZ*?8myy+!+(d zzIz%m-v=3SCgAd`xjp$Zq94XDPw6Ie)|}?^=HHTg-vjYnqb@c&#Cliutb~Fd_$Rd@ zCiC%u0aQu+E0MK(t=rG?REV9>_My@h}T z!e3pWyGRPpiGyMG2XF5Yy6IFNnn{$J{O2Pd>$^Y_cJO=iC~^H=CG#7uc87@}&c^ij6l@=K4y zooW7)$na({A>vm+{oc#SliAwK+c=pA1;|M6Yn?<4?nMZ-g>%au2VC4E0|bC}zrNJ;iueZ7{ig{&z>&a&ow!;OTNU(5jL!5t z(MC?NJA9+<`k*#v_zbi|Q|f#h(Oq8+P7cd6{*63p2VE;}U0cRYOCbV_<-36sYq>#T z*DLxToo|-Tq})sN%=YwXbk|Sb!5;c#q+C)gCQ<+>nB`Pips7FcVi?d;#Z&y6pKz^H z-!=zR`XB?*W$NM8B5f;^#+bKAR-ZKRB^JWT!wEd`Lc?@NaNO}z6_N-$XVHT>G($Kj zMYByQyZRQ*8y9B@1e3bR+du$e!=-nkBTyLF)_?zo-7e5Sv8-TA6_VKJOY>sq%utZDocHBj9@xirx@kKF!2BZ<$t9;~miRX-m| zf~}{*>stjgMKr&^f4z-o1`wAWPFUZ8{IM}NHk=HctE~)lw`XMwbf=@60Od)rN{0! zNKJs(D1BfFw5DyK6V* z(;H(Bu7~w#>jjOF(-0CUF(P2E;TTpuZ1 zS#@4`DbY%*6d_B%(Ot)c+Mhv!?h~Mp8}&V$S)kp||4rD+-_CNR zhLqmg;`Dl@l_BB|ANl+3Va&ckJbo<0++1z7*>1NfogSxz-C2Y`PCP~2X1i*EpN|Nu zqNi8^c1{;k$L{!RM6s8l#%KQ6C*w$bb&=TNc!|&6uKZx_%ia(=mKZYb$q|B5)u!VW zHL0M1wa`{y&fj$VB$rnjX%C9X+G7N&qAMCtd6mqKn@|Q zZvL9xrri)=`BJGY9OjDNGN(dU^KACm4$Aq=gtC6edgf%|6KQbx$0u1=9mV8u8peiN zI{ECvo8pZd4Jmm@s!_=tCi70vga$=hUXNX@^gbMGEAd^}D2E$~=b8Tl7d2y~h6Dj0 z`(mD>zAq&&t$k&OPdBO$Rz*ImZZxi+ZD-+qMl7jAlcM_q&2H(`$QSu6 z&lq(ooHT=d0tW*ZA+CqZ&xc>IvL;qr!8=M!8Uy_Z`w^WRr7ybKoG+S@8I{j!JP&*z zj(omlKSve0ycTm!k0t!-^h1OSISZ+$!d%644HwpDyU8W0S0o zC84fB%EK#VDNlS8V^Y}+$Q+M9B=&+IP?Z6&VTJQ#p7E4bQe_Wa)m11ACZvL51Z0Wy zYl>W?R*fYWFmbRfJ4ac_bDzV4{9N68&lp@QLDGw;>Yl5gRauL{*}@Y_or%7?nA$zn zvT#U^!0Zv<$OOp}MCD+!VVW_qRFDV~Y2(6f>zF;-4jE<(#UaLzLstN%w9E>~I_9;W z@8aS`U1`k5VGaHTPj35lKoTlCn*TPF0{-PfF8T%TRoXO;FRx3V;J|vus3gh?=9AdU z@m_Y+1{-de@;UU@28FzYl2&mDMd_K5AOdh1`FNQl7^OU2>T|=wX0||Ts9Lu_UeF{I z#(kKM1~ybwnPD;UBpXYkaZwVv)C-^@$PBXKW_Ag)rdyBMs|Z@yMu$%iSMJIVjxQPD zTE`8HrnsCPII`J_NpQ*(gkNq70_jr0iQ1MYa_0d4cw_~V{pH9Irw7ALmcgc?NW%pf zyS&4N1!FSr1%A_X^=W@LQ&@>@=YN;;J}t6>&Y08?@1v?Q%8IXdwC3$UeN;`b)Uw_F zO_cYN-lg^7<6XkvY1B}oH-`J+sfQ4tA&f`rP;W7QEOH{JP{4}IPC3ouo>`)S?K}N< zdVk~1W(a>0Krk8D0nn?El>b?0{FKJ-^{EFO+zmY|_3KXRT8}^INdFzd@1=cUCe17T z9Dct~{wqAhB`Q&CqE;4U<~YDP$0`}z%IVQw0iWXV?C37=35v3lde^`z&zXZfC<`Acu(7#>Ke6ON(FV8DA=1 z*cc7Eb~dOlJnv4p)IW8MU?We9U)G8|KnVqsiLm zmQSmV^doMBG@XhIJ(FufvJ(#UXcUnAu= zthVw=)qdO)Xjg17U|@(v4!=n*Omo4F@)kAsAGWvgmxS`&>hy&~Zr%!=Ts51fP#0q=W*ff{a;H zoVy#sNB;hC>jcRjRW8W{pT5ZC?yJ8INd9>?(nl6i8>6dF zKM0uqSk*dc7|pjv=PgIMX)@Auz}#ICfqejxB9%H;|4^Alnx=FMcY2iN@7ZVv6}PuC z*2c)i`@>l+3(vhFz3dE+!sart zmc*E(*)x1o!AP>BGNOEhgQ3v(U;Sbyz6HU@Fd}az&JZ6U&}e+4URzFu?tcAEr{u8b zP{&bQTBJrgXTk^CyRacg4 z!dw3#Tg7`%daEaGLwKJl5qmi0Bd`PG{bF;I9AIOqmxi7j@;cia@5eH&M}Z)OmunIh zVaay6=o?PX?O`6V>Uvk(p!1O$UYp;XazQMJbJ*(^@_{L}Rn-WzRI+DT@?`-MzLi#gGK=991{ zc$;~D!FhL6iqlg5xho3<$_EZI$uB#SCox$co9rFUg&#OiVpfo1$Ej~hh7+Wk7@SX)!dDM zEw*NZ0&s44dkYhaePlB>n14!yA;v1`%#mZO;~ zry~@2dxw*3)~l_^U2$7Ld&LhDNhDiSQ6xcH$|^U7EfK#Wkf^H7OfiPNevdizJAEH- zr<2P^uf^@-imZvi2#$<5n&jSkfwVD-5K5E>r?d6{^;^h0I5xwW19S48H?>rf^g8fV z9q(jH7F_~6M1svrcR}{(&Tsf8HHBb-+NAsWeFC_AKHud%z7D@-GlkP5=J;ke@=)tF zQMV)}=o3e5Ox$#T1n8@LRWw|WS=PAAz(7oaIJoe3dYFxfjHKFy7cYX=m8VbiQBLCu zE8H^XVHDwrJa-jnwG!*KXfvb4?9OX-Qz-^xujTh76fu`sAWJ-&e-ewk*pW;k8AheT z>JQtqOn6E0thYhiLE7vWhzKBCN_R7)Ni_V9RqQxyHoMAw}c$#}y`YJn%^tjol$O{|#2{27;wB#MNEp zX&E5y2kY)^0-k-n7XVQSjL}DYt97KksFoC&+6 zF}-WMU#yMXpwU{Qgq|TX|8s#zoL5bkz@DDd%RVSEP-Grc8*KJ5dItpZbY*`bRJ3|G z`IK1nVzN&$w8q<>0or#|CTD}qC%B!&BcyJ>hcpqhL#(`TiWJ#yf}G&ZTDB`GBt?Ry z$!S%-w{F>(x$$lPI3Yx!@Alg?l-cHli`_Yf6LCTDrt+HVJP?$Y)@inzie`}S(}lP!3|bFl{^v8FX1DUvS|yAz{{6q{t}Tmt^H!Eo@?q&*{VXq z_EHo|!YK>0E|+#b0em6VL;Cl>HYXr-LL%l%3goASZ#0bus&i)f7+WtE|K~#hU=gs+ zeB!^|v2jfaMMcq~gs$@L0&5w!LL95KA2A@nD$`3zjt8qlH6>nXqBQ4XctXa;t=^NN z9=#;HSo7c?_^g${N8x1V51f*C+%VNOTV?(3cAA-(fT!(Am7B9fiqNT{m{f?#sOC;3 z{%xM--_D@;4hRfS9K$W5chaG37!OIsNb|cUia7+Otz3ZoOb2EjVz2EKUpAej&RKi>EjNfa5^W>A*`XaDI>rtPxmVUYwk+$ z_IcrwRmER8)uUOiv2cZNO?q~b#IiH%gOw&*RERx0bfiY{gU_LECPl+vr|@XDddlh1 z94GwBV{HEr7Nh5VAbWfQpHa<$(+b8f3t_L_w`v9LmN1$BAJGfsYR=!Qm7!CclssSmVO=ZIE=bGNCOPDI$g`BO%sre z7>}W$-!y|c&J{b7xB5)SJ5sY=?kp(X~XiEe2%8%*p8wPNNo)S7U=S+eX&E zh#E>~%%>GQR;Gi@^2W}mF=H|lRzn_sJ+xh$5OEBz7CA=)TzSVC&fsy$)wV#oB-`q2 z8B2dj6~an9gvp@{2CffM_N#3^uQ+<1$Bt*pRacVALeQ?j5MfUFR#zF7mj_F)!?%MN zEnr%$p#z>I5Dgy4Q(^2=Xxg^bj)J*rR39BX^$|^MDh=7S>HhhR6;qs~y`} z-Qo~HqI$u=$N;ddQ1Lf`M*6qG+jO}nS-VO*AVqnCI4n0?;9(e*?Z@Uo1=gfnqM(+# zH8^7)m?2&g;KXKP5ND16$U1}@phXOI;0Vr$hK~SRzXkqvH8rp9d}1V&9zd;Tt6n!C z`h9VVD2<`e3YAC6oP&;KW=qcFTP*8%YwLker=_0H-A|hOJsfAWUg}SO_)&uXhFJov zhD!lOj`Pjwl(cp!xXKN6SSq;C#t_W@gJ8?1OR~OoqzqM#g|QUjuOqQ^PdhacS0B=KQhvVH?ey*kVNxd zC6NODB5+jCw`pKe)5s$?ZsFq{a_aH6q*XE2t;$DjqJ-rXxF67qMY?;iN~)J3ZTtWT z2n|TR;D%BquZ1bLjpHEJFtSEghYD=s66d0|4r`t%$}k?<2|ZVy&LpeI{Ykk*zZVd1e?`|@7UeHpF|54#2= zDj6t`krz^2D0#Xy1Dts3R4@HeM$whkS)Pxlr*qIw?UshB~ml53tZ;`r@7<6_H5>m^kgd-(aT0AgeN33nj(`wrlm9T+F!Eq^A{6#aDsa^ccL zxDe?Q@aXtX5==QS`K`i+?LAog1mb#S*uW9IE;SSfe770QEYzQV(7e5<*r6UBY?&?JmbULSx_-9*4di7z^VnjC2=rnB>!mfg3+v4}O4`aCpPH&V zmebhp31rxMO-XB|bto0=j3){^nzhN>NJcM_Mf<*MAkutp-Ov2KyYvYRR%8F~rD!m` zZkOSaIF`i(Y^_O^zgEJsfz{_=hBFO4d|CDXy41uxLEZ{YuV2!r|mKMKHK6GdnM1_JsYko~U@VgLWv z0fyG5jt&e?4wjKh3Xi+;3C@Tl+KZ3fnn6Mhxbm=m)6`C><#J{__Wf2r)VYFH?^0Y>0Saft0 zBs3L>U6Rn)+=|?uxf{As5%Ai)HG}jmw*LoPK%~EqfLa8CfsjHJEFz>+1hJWeW4ZMT zr%JqH5|MYTUy-2CY~A;83qad=PJN5})vEjJ}T3*eWT{4JL^9;~j=2 z<)j>+|x2l?YU)9vN zF~qI5IO0s?-SG)ltEN7Mqj5o&O3Lp$d^sEWO4Zb#G1HITK7m!7|A!ck;ae=*QZ)U) z_zMNrTZ6zBe-!`#002ovPDHLkV1iIf0|XQU000O8gObolPs0}C#~%X#+9v}5DF6Tf zVRCI@WNB<{Z!I}4HZU+QHZNgvZDC|-Y;12|YHxFMUotQ;HZE{vW|W*$i~!M+K*zRi z+qP}nw(U2zZQHhO+qONcERua6yPb5>x4UzHZ=E_-MXDeoM$1UY3Pn1VUZjIvAu~Y6dxayvx}3d zp)HihW{Z}#`xYCL-&@^0u`kwAcX-?J#LUJkiEO6L(F`AnX8?tSkW>PUpw?^OUWZ_z zNo&RSZ4@Yspn(~~(+v3hfDJFS8NWWrw@59l}$csGUG~?TmC)SLedK1EyCgW7lV3?$R zT9f>)Y@4dIvg*v!R22sdkEr>~8CCb=I<$k`Th5TaiKiHSVO4TY5XMfd#>hiR}Ln`@cK5qzcI z1J`szJIZXC6@bM8 zbS(fnbhfQsZDs9moBcgm<)QETu$2c?91(_s7VCMZD=iX9rN^~wu7$z5X<;pIrXgXRHP(^n_me4!*ESJ4kqtv>ToYg1J73uzU}W zqtU>JppMJABxh?VX^=@;pZ1>R_1TNcaqtNk7Y(DxV^q>F7>We+kB}LHT2+3Rb_yv# zc&i;DV@RA;n%*FE6ObbSi93N+7c`qZ@D6bf)kl^J z5Xxw}uzw^OLcL%&h537II*3n2D{KN()bz8=R+=p2)KX<=npyY%L%9nH6cA3}x=q%S z;K8D%13c6-v0shE22vG$39elqM4LdfQqOVfwYoSf6iWOw5{qS+6CunJ&!Y~2Z51cO zWDv&}lf9^7YqVb9_^bru6$AS12PI&62<;xYfF_bB7AB+^f(3gM?J`sEP{>j!cR=u( z8s~sS`o1a^6N2mEM$J_&oDFHIA8rAlkvxv+?#MK7D$}g~JGV-lMa2*VgZje1_;7eN zx-e$aA56|e=lstJ)Y!vX465oM0j0iob<-TP|_J2l7B z=yO~oH_iaE)EgXTe~g_SU&HJ4c%*0o(K8t2l4j0)7(vx=rC)7J!y0WI=)seIaUjwl zM3jP@BZd+EvhGVL3Z4VxexVLO=hHa@ZmCXPW>KE^>*?!Hr=oEgHD&K4R|xlfS>eTO z7*xTnRJ{?h&L=JtRcOXZeP6Ipq_DkI~*w*vf?k+M8Dx% zfVgOfSZDPIv{49mVY>rlQnSw@Oery#{dXLTkI|YmIKSaaK|mf}_IM_;T>ttuJdyGC zu%BBMwz6y{jL>Ow%#7H+BP)P%sjX98J*ggLx_16p_=4i}mxPJ_fvY&qpKWS;_m0cp9zF;4mw*_?bGCP? zX@(d`=CHtbqYwNF7qjO(jM&HNCY$2k6yPF+VPHHP@Uigo9GtPWfv&5!E93=Qzh~vA z0r?X0mzN&71CR7EMbMP=3p4*eXz0Q#5wj0} z0M015%Oy^L$2K+ALTX{3w+`JbhebxG` zmLPE%aW22)%*;N+i<0f|yYWm>a@y+)J~`ek%)^;a_bh~*4}%S1MMOW`pQg&68-`t= zJ=LkEq(l3Fr3)Gfmi-W%FD*H}+O%OmV<6(gw2Q>h|HzgB8c>!EQhyRBBL(l4h!uoO zvvaj9H<2by)ke2o4P&~7>fT_I=h>c29xu9lF_{<^X3|}-u-tDk)u`uJ%%5J3#Xm6? z3)3||fxQ^Q+!mk|B@+92I9c?->J=Mgch9-FAO?~p@ADb*xVc67NYDCYND zRKzP=xNt>*S$ebD0zxj&4rpW^Q6`drPc00$(DITAe1J>3wS4e0%k|{XcyVaY=a4^f zkTUJEbSum}JfZasFq)+oIXCO9+Y13zGd`DPW*@i`n+gAulvl-+BatE)rB*vF5pp)TFDzFGTx zcivcD8(@I{p^43VYa^vSbx&SA%UgWD6)Qw|9@v9z(9v*$^oUR$ z$m5FYWBGC>#YZP$dR5;)l5hHA2FnLixf%eSeb9Z!xUH`S^0R1%BN-SkD$h{W~}Bni~uGCw%Hyh_Tl-tO1b=zHSAn&G~!V-ii5%yTcM)e{1!osU_dcEm9}cha4J>m_DYIAe_SC&1RL!HThhdY z^u+Pe+v}J(njUZM{bd*X`}7q*U-!qQcBDEOrsT=x(2x`*NrxsSU4l{-eAbG*J%2ZG zNcYTb+ea5S)xGv(n>#+g4}2eS(H`SJq7@h{zLN0a-duOv$Ev{JI;Y&#bTdEASGTHb_uzxM1{({X z?=pQKI?QFWtq2K(@u7exofA+-~Ay!2MyaSz55tM936L*JR9!&M2tWNXfPD2s6!Wevz+1D5xPZ-uLL$L!V1aTh`=_VBo|(T0(FR z0TDuInx5s2(2tlPAu)FfA(}}fCdwVnnB<^95_f?)R|GmJ67qb#hxpU#RYATEfB;QH zZw4IEVEC_;fYCfzB`q)abzx59@Fkn;s->2)rT(7kLJ_;rOH9xPV@qD2I!Z90id8+fG+Ga)1lRRg2hhTTVn}KiriI0T$)Lck-#g=utY94z zvMQMmS87{Hw#5ZuT&`XD*r*BOz09OJn8AjwP3B~kQ4F?1wz*&{NqNZpmL3>YLIZ1Z zRMGUpRd=Nv#`ztMEMj^G9aUcJxWv8ObxJAF^^~m1p4GpaWyKu%g1Sv(*YlydKaW6ZAS);3F4UxVlZ*?MWp93Bl)e3bhfhm3;&#F!}I^$)Q| z;G&`54Qj7C@f?F~e6H`Q%CADMTWqQ zV>ArYnG&N3PNSeQz<+E3WXtqB39aKpC?chS2L3rWwH43xLj*GuFZPd<9D^+*D(tZP zhVNOw@p)f7C0gwr-0Ps4#)&A4UiXt(H$LfsstH8PAl^xZx74WXhd3ZcL2)o4w68mY z^iTpQ*F-=D$o$*Ph7873Npybb^;l7gMRoX8-&0~^Bqw3^P(9`zv$^PSLI5n-S#fIs~g z#8+A&yb=BAAi?I0vXU`z}U|cZf^EpG6Lt101C$uNswqtmTtaKR9lbm+1$f+cu71DUUTO?B^NrsOE9@QLlo~l zt}AU&d;=}WOS^|gBeom2 z0Q0374Z0#}_yFjU(Hayjqo{KF>M9)^vpCF=As0RYI?Eh>r&Y-JDT29;E@0<=xgSHE1$qJ z$RHTS3MT47C8%}*6*y!U=L*rx3r?9V+ClN5|` z#s2p!3M~RTru|azhZQRVIw%?`jSzGbGiDexco?(L z-%FZ6?)vVA!A*R}R@(H6d@5D0YcFs!vkiYI>c>wR_qOc={T7-5Vh9f>iGW$ma8XT^ zgvmogxNr0TGM3`11dR{{Q>35!2H!J!A0?WRR$NCoXl5~_7tkP;Urq?1W4(3O`IKD% zie6`Si=SgIIqj-wp8g4N^VTovAHKN4?XPq6eIzCarP;dO29leg*UmTWQBd{?GbS^e zIjr2XGGD_w2eLw!RYE~Fg5R#an}aD}U74kp1nGZ1H8TyJM|bbSnz$KxDz(l~KI1zu zr;z<&fywl48d-X1vb_NQ6$s>(m1}^I=J))@N5Fg@mwzbVgs?0+B>nhcO50^5`-R() zXbu7r!=jnp6a+@Kn>@E5)LeWpA%B$n&hHa0a9s#K7!ntB>L1B<==rmthh(y)DARth zmpSHugIQL8WdL*i*or9t9$*8MFM&EOVJ4=4uhGNAykCn&HYi*a|u*-PTO(q z(llp8@c-ENX z0_lN?=+-gF#w4KtD}xkIckCfTKXAyL-zt^dFwcu0W_7>c*WFcD2@+8ur1_F@0TQaD zIE+8mGlAAJjMM;bOY3jOe)Mt{sGl0SmPoG1fAJEehYGftyk}9A6&OT16*XmFZN{zI zfH1(O*)R7eHNjZd8KA4Sa}{zLrmGhLfK&#_o|nx{7T!MtE0oG}xJtvc6TD5K0D+~@ z7_6?0e#zF@$uvzl4u`*dm(EsgbrDIrOm*??hjf7!Ga7!jeU*N$M4e+nv}W+d>fzd& z6BeLZpHD_mr3Rao(%(#Gs8HK>#oen_V?am(%sXX?>|$#t@xhwO6)GZ22e}|F3ltJ1 zrU2utQlk+C8L0}OhW5Q%7xgwKXs4q%6OW?HH9lEmox#Al%d~o_Kv8`ur)DPA)K_Q z+`=*cc?l-d(xRp;oPFAz4p;~JiXIRSztqWgit_)Cd#~wKeGW;X=KlcY~b(LJe1&$8k zs7oZ-ipy!u%1v!HP&ST&c+Ih&jB`c{;wGNNFM7Q;wqimRpB{%tnMg!hdmb$GJ$X`g zmdVI@U)^eIn!-CkBN127-4^KOAu58-ITs{(teUii(11C-Q(=frOO2JSs_N2pH#fUU zipvOoMM@VOtdJ!I>4i^ycp$_u;KR-j`yFCHifh-xWw*3aJ@L7Mkx2Mh7BcLhBH-a; zljsF1__n!$KX$R(s%gNN(AKA(8o{ImOepA05c5e2o8tz~O45QJa7t|+S<7ne7j$(yK#Ew{i=d}X`UBZDM#Hly}x z-7F>>s$4f+_#>r%+OFPWy?Yf+BIDw5=S9M%DVtgabtc9zu*cl_aj>bo5X;}Q%^DD| zjA{W6kaeNL3{JNz4lz`NV&FaL6Us0P4~ax1wmv)+5q~v8a=8mWZhq$d_;8XmiMW#6 z26IJR2q2S0cF4G7Tls5ZWrhK@n#VMJ8we-;L`9>25)Lo~L*6R?wz3&uD}LtR{0A!V zyeT99KvFZiwNG?V?yuxKBP#S#$Axn>6 z9g*ZXky460|6xNfEX7b)ztc^V-A2Ja!c~WUfn%v0K|d#`nhpXjLYXCbo zhBjN4;5TvshiZo|S^Y9w?{tlIW}l$pofU;Wp(o8q*szbFkw}?ggIsAaIg5ll14+nr zz%-H#L_A}{aYnirse70zqBF*#lY05RQoN#;khY-?)Pc)*yFWm?SebBYM>0}2Lk$|1 zU?64&0EaZ~5x9+Tw77}l#7t&bu^wF%c{CK>Hl@&vVU{K%nn;UQI3WIh#c5``;8vZd z_BqgPxf@IJEL#TMZ%B=o$U;Rl6CM~U!Lg7-T845w*Aa&}#5P{rYRSI)Y3KdN#q$1T zP+&hghp*_eq(xl&h4F}u0^V&4)Hvw}duRw2X$T@$Mra@L`*@WE@exj82rQ8hMB#0w z5*}Zkf5oz&(PMpcEHh?_H>1$ERHrP_6{M<=DzIrA41Xf1=peROA{bIq;Rf1N7XSQE zVvHPAOQ|$C4aJ3sFj{`q&)4ZNgEyjjwg4BxgGJfG`<`4%Ahv0o!$M-F)jv`l06bLE zSudpDXEKKfxwfyv625VrQqWXGBLMAA>_{o+Jlb2yKzt8mq>@6G0BK>lhW~l!s>&7& z{#HXxj>lEaN(%8*s!6z=kOESTaQnJMfT%SI28W0*sr&g2tyH(u_T^8|_tywA+VuSr_=}7)l*~shCh&kEXC1U2+>B19Yq8j%nSlu4Ieuw5(T`8k%R! z_51gYV;@3dVE%)`jR^dJX~H_3)7C-A1$?~yuL!8j`mz(d(2QZ!v;|o&D$el12qiXZ zm?avaFfGd6rd&Xxp@>RqEEYL&oltz@2MC8FYavcvvO&n*Qm%w?=G)lt6p7~CefIUL- zU<8-TTi@E8ZoHk+KXn_V>$E*SjTfpFeU{!@+jCw2_IFc25w&CJV=2=~ZQnHaG}YYJ zKEVRn;)aeemX_iBTa>Dqob~O@(r6f6*N+P^WDjG*U<=;(21Jie;C+TI_Y4mp=8R8# zm5TqHl*BJrS~}1=!HW#ppL(0V>crUUKJp-fJ{Wtd2jr&Gc*H(9ll#Nf=yvvNJo^W5 z%kjeTzmo-QZ2vo1z|6_X^nXnjtmtURZLy*G&DGuW(Xa&%)7+)GDHH7M&?z*p7fS{j zEb&;_n2MFta;EYoUDWLfOD0Rel1?#8HTXi<$WuIS&$)X@9E_|0(f|H{!29^Uy-uj_ zDR(oBOHiTP^V30?hg)|jL=#Mp9Nu_6{9bH93ym)wJ0a{s5`h9?d?t+wP{bcF!6<^$2RH)vClyA_mTO{P7p~a8` zEFxo|1d{z80CMfuT&wnVgRZyJ7zbE=I(v?zW3HVA%dp~VM1R%FoG%4Tu`qkpe*~kg z^dRnFy`8ZSR$3M-Hkr&^I!2@z4M7i&9{$T@W!<<#ify8|H0i4wLYe{=;%0^pvE(Ko z?hCpw4{gh9)ugGSuQojb&9{8xQj{@+8Np!oIp16vR5YcBUFT4J`ow=0wZrKsSkT0wO_)4 zHLlgCQv0n6rA2B=!wVQIIla1)M1Sm%?j42{u*$cu5d^D9{%d7p-B6}+nxNo?C$XhK zDng?Z>lHYX#LU=9QDP?y?v1=~82V+QUDSY9NH!d-bUaP;BnmczNWU_n*bMJO$k8yM zk!#jNRS?;^Q5CjM_XPp+7Ri#C^NoKyF~-!7Kp0?NOiVELLi)2#kx`j>B07+YQ`sj! z*OO(!*)uEUqP$uj6cLCZ!;mlR`l9^00wDlhMd#H^;*}okd_ma+UNnn{IR$@xRDhY} z+tn>8#j#;*z)nFY59RZuqI!l9c7ZXYod%i+F=RH;6qJBzVq%!)i>Oo*XqkdlFlA6+ zfaFwA6maKFHN%gKEbDq+f~QB4O?<2?6`>Yp6oOMe%Vcl zz5Kyf*rOktsZ$iS>X=Lx4)L}8KI*QHJ~ev3Og!F6Mnhr(uI$>)mF>qTPhZLwR(q-Z zr5soIm#qLX%v$LRlGl!U=Pwd8)SqM&IbZ{Bq$~0?J{2~zy+;my?V?xxygJf%L_zB| z(r4#3;vX;aO7NC`%|zq}YCbM9hE< zAi$h;S|bGA(|ND#V{AE!iKL~;g_gHN^^yt z0F@du!4tr(M2hWYXZ=mH+no8WyW}pNx4!D#mHq+fYWk^RS0wsqFK?49Sr`g1@o2(2 zok2*(1`*D>Cl}|r%lVIgzoIGh5JXSccu)g|iZA|kU$R=M&J@|u@%SX1C$w?inBhIBkh)=ZJ@ zZ2nMdJ^^5WpVrI;a*aeI@6}wEUh-;NKSr(lnFKOmT8%dKq*bF@jregNlUIX&G_P;k zEmtn8HtwPss=#^d#7Qd!>*l71e~eT1i@q6LA(?gD7@HXanl{(v3BNU9f`OKf75OAs zcY8t-gQ~Xt%Ob2Gy_ukGU1+6(@O$hs;yfZ-Xd$?8y&?D;y{!A+`|AUXMK{`GZ2|H& zvu9Z%?_fFsnn$@+SA|q+yB$`h0iLr7NO93mv7Y`v{{5epA6vS^)k0M?cdc8?LQkLO zyys-PwM+T~6OY&NxlpJ;!frOy!|o0JQ`c%dhKBpHO;zCr40SBy19|04XQ&AU8udaN zYs`jgS-o3wicrQ9m|Ss6-7A5H>aZ~sBo`Ma)Zqu(7-`wD8(5Fs80sL=4X7`ekpWuS z3!KjY$cURZHvt5t6hTVGm|5b8zmMjYK%u4H;IyOjKI?~TPL&@5#Zew zaK^OC*1T9}*J$(wMB9PEE5>eg2>J{|mBb!#4Nl{|U}Qn@Bc@q)ZdZ_xW}5_r&oqy4 z5aE;F1V`ND>FKW#p$fzk7xktY#zuJIBLJz+JLS&K&4oV=kOX}!d3AIUtnyj-WyiHF zndCEJA*gLLq05eK&Y&7RbHkhRFbTH+(_QBaxV4*`O$W_9okh9|&bWuZ-~6pjHVT2>|s^_Wzrh>d-}@}fO)8Fcz3 zo;wCkf!^w+W3CN2(z&Q4Ihg`Cnmj{t=ujdr^Fw7&jXickvvbJ7G%Ctsv%ov)HBh3! zWO{!-dM2P=BJk#O_!_v1vM|xyoFZz4p8BnfHbMj-^fvv+q5M$U0;1Ga5#^R{Edo{_ z!po2!kVl^N2fSX9wyjQzWVO2k?FP~#54yz{jsvMY)sm-qesz<{t#Ti=ABV%Qd#BDu zghTo93C#b1&zmmAnUHdiI-0rsZ2uF6#s)QUd_9TdVe%FWdER6!F?E;v4qX(>%m4uK zNKu3ZJbJWicsnK%XBg(d1&3x3qgX-?*?HtGYAgkqL(KvQRn`_&jEmikRr8c+E8t<% z%q9DBxZ?6ciiMo_bT=Uv$z)_Mz&alD8u)(YvMt8n2qRQYG8Neu)V)QRXZ)k3w+jPK zN~tq=0ziXO4NaEoXIGYzh9ZSiegesM_@L?Ivdg};xuWE4nm2UYOOMv(h(^>1Gv#5x zlTJah90<_Ih`?{t1%*0#k(spBX*cnd+EmDOV1i%mXzR_x!Ke2vp=uN>fj1^W?s<1& zFOe$(Sn5azCsimHP42?`IE5RO+Gq_ozNs}-Zw5`|fF{m%%s{3Fm^NU;5a@|yhx_P` z8RAmL-xRjCb%N6lVz02p@9Z`7)ZK2C7uZN`(H*ZI0^3T6Eojyyn{Pf);Ei)PY~d9k z8@2Os>Q5|f#JZqkhjoXcEXc><#&J3VzE$0+3XFzI!@I%Y>NRvE`uOlkg4mM+_;sc8 zQ!lyceC~3+)I!Z-y0xld0dP0`Zcks58X_ORlio86yV5J!<0k5$%psER!!%Ubyjrlr10xlWW^Eve0u}oQi#21Vve-39ctvb^Xjq#U^TaJ!{ED#+ z8d!bkvqI$6v#E8wy?5uLdKfUiEQrL~&?yz;qRyOgEs&J%iQ7LOn|;!IoQ#q0X*QoB zBeSmhV@A^<+05NJ1H5*Ear(+;VyA~V%mQ#QGr~;u<~V_;Vk-={06Rg8p4QhSCh$v9fh^*}J)407 zRXd27YEuX+-gXi9-Nd?LuGIW@Fgdv|d@q#Es4%wl&Wf1)Cg$(yfK`wW;e2-T(4=H! zd#1*tMa^5p@dh;~>f?gWFWS$`z9?E*o6f5VvFk(6VzY0XiA5C(+n51Z^KuE4f9#(C z{hf-^?A{l#@ws0yFwC(ruh)&&Ej%kdi%<){iW-THB;yU;xHxvU=QTRtD8X zc+%(l_o7>KB&CyWffuc?YuzSSS2_h@F6^L6avqVASoRKin0sq#z2-$O>~w*%!9 zRT`MaZClwZ)An3)`;w$&T-u4e$)O^xNU4*%Cm3%}hJf&yMPMMQunO9=!}ptMNn8myu+_CB$H{`-ZRl6Z>Y zr09k@O%Ni6`M57AMA&yVC+?e42)bM{Pj+%d$>95WsQ=2OTkJ7k)vyzb>HMd4C@(S z@fNq0>QlTX$P#m(SjZ>DXHc#DX5!+*wj@{lQ!Rqzt)BZN!PCW6i&&;yv3LY@@wnP| zt$^?34?vigH}HRRKKno6d=^$l*8eN#pXf?EAF(0z&ehE?>|!x`AQ>oaRFZUUR9;(B zJ4-v|ldhYE60s7YZarPK2_ldP2EuQon^e6PC&&QJ+=?B2bzlt^z9Zz%-!*K ze_e_Pu}LNmlkEDm?+r5vq1c0%B9>*rf6dUd)+0e)Xq*8V(AUp!CCrFx>b6?>vyzQ;7yfP zdoZjm$6ccBnUFu-#G8q7t9WvWv}s4Y&%~WfaDOYS@xKPM3^pAIGt&^wB2oYFaeL%C zOc|jtV%fK1h(n&8Za%w2oy-h<VKg2-?&{X!Bn3PH+X9E7hS<8#c- zmJNx6`3OXm@gG&5Bn(+un6+XCA0|#lFpFanoy>p2s$I%%&+>No*korM3DDEk*>37XJW%#(a#XP;A3k`P*VNGf7u2w&_FQYfgW@-I|z7&n0-EF_#dwS`& zcsbhJ{Okjt9Z$1*KthBHk9GwVZLB?TQ-tH1kVi#hmYM0 zTiy^LPn|~~tqhoA?l`Fxv9wx2WZ@N`K2FA4_*Uvyyc;1sWPI#j9lyWNAZF_JKlv00Nj_(!_qbMDr(IUCFk-{r@;RR_>A^Bp~g5s44jLs|jL zWgH8?dzY2fS`V@2)nc3_uf|&?(DN1e3@vSbtf(gtMc&Ih4q~0EON!ss>m8N`G#Z305T=mi)P}pK7YE$Y7$Nm_-{P4kzkqDNZT@ zSmKA*h6xy=;VGF1i6=s^75iD=rW7));l_XjO+mIGO@E#>cWSl{ecGD<{V-vzZzDIX zs4vH^(ebaTOSl!^+PY^($}3szLAxe3|E%tzL`Bs z;(of&&cZxvge6j$V8&0e+9TKH{5ei9JD}zI^6)>ihkAZI9cG$g4WMN|ZTjJp0Hye$ z9nJ{=BY_E1G~j{-HC5rpGuEmsoDv0>FHuWi_fM~WyQ|C%%%_V&LIMa(;DP{XP;{+o zRh6^=DR1tP+8wZYal?hSV3fcH}v2f5prB9!*kd_CI{~;$!WZ1(MC^yOx-vq3fr$8q0Qbvsk zR7G%0b_>tTrOsFv^ilx+PGe3Bw^8LavI=RF>3}y1P_(fExoXcPSEedJ6e{gu2AqOe z;Is-0DJ(!QmT#C>0?%+c56?eK4M;&EIVd0qhmt70TGGB;G4l5^cqCLjqEidsk`he! z!u)nnms2c#*Kw>91!B< zFwW82OcC--O#?*|x5yA4Z+$7Xd0gRDD}Q0vpF)QU0nLB=HobOvw@Yza-zoP%D8(yypIht(IW!3v<`)q6 zBjf}ym;z!=Rm=E)+&wjd0m_l64FRJV2s>;WT0hrODD^0)n-Guu?U1 zm0Js#~V73A0Y^WwsAHbMcEgPA$Ae*FWmv@ zO4ZvWx8d6Z@9f!T%@fSC%8Gtn^n-HkGr{Z{Pp2Cjgg#}mLRY8$V}gnU>DyH8{ByJ0 z48R^V=gKJbsC9$TS3~ZIX=j9Ju%8m_Sbd%55mc4w3NvIf@_Csk)!3_2KV<_|lEmoL z#EsrbC0vB1)Xon35VB&{U3N+ORnxWixfhzD}IChSpx!i&X#MalP7?Vc|B;O1B*yXTkE}=4oR==ebgRh=Xw}X zd)CtucTs?o9_%@a(Wu6IdMW4eg+YO!puL03>^(*nCegrM@BVpE@&RAvXWb;#90zg& z!WnHeL5I{P1`SKHw^6R4rt#C}iIL#8p?Y~!3oQw;B+4f-)lQFQA*Oun&h+%ca`_nmf&-WdUHTDhiwvYb2{T-h>l`?6zNG7&Fsl4#U=5m1R-QvEGp-6COS)=e3gaM=a($Vh=ecFo4x5i`qDvBGjp1O39#vT%ceI87Zc`v9t`!GoJXFA|34Em)%$ib57KzZ9 z>8k;o?HY4*)(9J<3u&Cmx$Lm?dz2dJr_!B%bKR~tGrqV2dbi(I={L|`GkZ)lBWZ?? z_O((?jR10%JY;wd1~Mk2d2mU9QI;dn^a6r%^QZ~@hn~VAR{F5PeACUpY&FQ zt879J5O%C0M*hV(wFi|>8h%bxvNJGn5#Z3d}tTxa)DXBU!-YK{KDTIU0 zrcA5WaLQ-2r}AxIq9z+9i~bIBZt0d*-0pygK8;0GD%+dzu6Jh*q&>=fS?R+K8ua#0 zRJy+>ma!tqCl8Md;q>B*4TU7KIa={4afLr=h+PxV^Hj(0;O<+W{P*@N%DMgxB{dde zVml_2Z8yst6)Q~YSc+g0z#HsHEK5$CP>lMY0cYFdtt|}4OU$cZHLPRK*r6(&yR8bI zwE4$c$GBCV1yZVJAC?*&i!(v+gTy#umY-}GG2rZ|1BO89T+zscM0#Ig5#k{FD0Sw; z^AIu)SR4s?y`0Z%RsbGV3!=68@EP;U7bHR^hH)&$Ha~@F&CmOqc zCt)_*jK1JA#37T=nH0NPJ-eEnfJMW0f;~2#;IvL{aTY>1b z(;0QvB`{NC*Kv|NOr}c`OQQOfp=skh_cX+?7v>k5{!2O@?z?j*>NHT`PIr{T4j(qz zoCHM_1JbXt=}_`02oBhr`A@C;BU@TTFX|*M)0c9zf%{^f(5`1#vhVK11z!E#*SzX; zL~ycPX}Q4l)weGz&SzwH$Rg*mqf4$ z`IhdRQtp+gJAni;Y$MR3r5CGx!3#FzzC^ zmojRy!k1ZnAjm&UGy@qpg=Q?@>FsJ762Rtq{af@{ug7eX&)hRDN>d=ZBc~X`@qsIn zgYUpbc78=W8wBZf+PIkxbk*%aAg<}SnQmwR6Q$o>ue;a@&(NolmmH@ zkic@d2LEB_jqOR!)A=%aYHEyDug1;lVI1l@(c|Rr&a(2x(=*!VE9;`gWiK2wPX_Wx zdAeuZvK=pg%#3hWXsxI`mo&?82P6O|fdl5t#gx&86{}~-u;jD4b(y^5`8f&3rhn=C zToy*Q#zYFi&21 zLR?i%57oq3A96_yiXv>7aFX29l?b|}2)@DrOQD~D*8+|{PkxW=2S6#PVro2gR$12> zy{$d27dP(B#OsOS-b*Vl^R=c@>WdZkFA1+j2`Y6%#!xUU_JFpUe)SYy|s(6Dbj^LhZ0g{2#8Y((VaH#IVzO&B#30FA&6kZqk(7jX8I@+ zq3=%S#W!F4fpv`2o|#E>sy%1H(!N2GHJX8T<<<{T0Yov0z2!OgbZ^EP?M z&cl36z}>z%(vBS+>7*?-q~0&}dyd){0uTP| zotnrbjuVxSdQ|-E$4lFwoCBzC$?mj+PBmzjt;&ParA-(Dam=%CtKi?e-O!6!c za{lgq;yQc2?)OXDkqALCr3{~Tc@)ufl1N4oQdz$0Bz-%+ZjzAgV)w1<6I#@Fv0lHA zt1$cnXOnx*e=F+!wpL}slsoC1w_O;Vr`tkeXq-VR;ZO?nv)orcQ?DTTr zZy~qXOMbh#Ixnpt3Ww+`u_y9Y3&%Lc7kCYngWU7?c=&?$Hpq?-L5OWui68=l986ld z9ii_#G=cU1W*i8|%;8~;yG1Icq?oAfl=bs^$dk*!LQCl$NHi+$05AlRoZTGR`%K#6 z4Y8(HT^!6icY<$0q*T#yqjwQc&(p9UjVLISpD_p7)4{?eBDMx2fVU`^)0Ya zbKGWEhzx`w5rN3c-B@!4c@yR;d2~>a3!xVfXz;7v+8q%X05NO=C!l~Vpg<8of8y$$ z9013%b1WQd5Q$4d5p+4?EZi=Zfc9qE%T>6!V6p1zB%-=a{;5MBN>C;c3dpjXQ0^VN z6mXwDG#0mG72$PGxnEb_DDMW)Fr@Zq5@K z{h0s|Dk$re127=!Kjvp&D|I_}_@Lq6U0!eqe8O#YPBp5}@(riZT4g>_6ksH%5ZZOmBx&JGRb7+A6G|e&9ctIuo@H$a>3|R*csEYpAfnBP?-A<1ukjoG(f7CUgEkC!s>Rk>z<#i zA^cAm5N2tvuHW^s3fz#&W!89RqS6the`f1wLy2f=oZ!2;7S1bq+V`93qg>sGkaTjk zKk8R{dml9KUO=7~H~arF%3LHt%{sZr^Q?n;uag#gOT@}}KaeZ(SLJKqVGn$b1cDrq zyjr&6u5rK!BC6{|9j{bcWbxpYD}nnAF-K)MZ&TLx1@T4>6!F_SdTq4x z;eahlW14D+FwkIIK5A9BA%0z-y@a=CR^*4RV9$VidxX!(Lt%;6jTP@d2TXz&SdA*x zp%8oAADMh{F2+|@F%Xyn*(jsw+3Kfw%jBW^56AJOy0B?g{e*D9+WyZgO=@a0u6`RB z&YnWrsOhs%$~SN)qT!W^)G<4JP6e77-`W}@ZqjgsCGqwrWar^8K4C7Rkyqn0X+T1F z;&MYLNhthVQmRn_ULkY)(#H%!l%SiqCr8YL!Gr^+*9d`e2)gX6kM`&Y#&Bh3ZJ8W2 zM{hx&6baal5E25Ii>C@Cv6Glc z*%SvJo1#CL?yAO4-|a@NTWX}6f*wi zy!DdQESAE_A-JAy)amVR+8ur1hsl(ZSNK$roVh>-Q35wty6#!`1YyLO~?h_c|0o&Z>QgW{e1S!)br3270F((cbPu@6yP&4dRRTEfmR#t{@;m>s3O|Y5} ziltRzTXmsMp_hutnG*()uX1~3o!o*~Bl@0xd-@GQ(4SANky5-@uxxxqygdMcZW+9qg5Ex)--tb0gctM1At(%PW>+IC)%)^{hjoNO) z@6}9R@BSi9l6Fpd2lq~0meElAbUnpynJi#`dA0`g!$?(gD~HvbJw8C;DuAy1LrV;Nns4mvDu19+!_*(syc%KT*}h*>fusY9dn;?_ z7Dr7XJjdBs!5)z~*AW^P1XTE8CV&Qu*cV5J{V21!C-5vb%u^OlU6<97#s=nQ(zSKR zEhm*#feZb)is1c@p!cjThOUS5J07o!>Wv-h?n(Z|(PQLwSIW-6S?zo1>&@}&B_X;w zCP|bC^VoJG+YLXU8Ft44^Z|gW#v2^dai1n_9?1i+NLztlpHRh!8>v zzb#hPJ%d~1g;9IuNQXdyanT+?J(XYi8bZW*V+mWn=oBoOmDNb<(o(>pXhM z4_<^8S#qu|{_e*$VMmtP?P^gy!H!>Dx9|QpF$Th0a;VJaIF)KerVI>kS??W9t>6s8 z08%lObeS!ntS+BUw%KqAXItuU9g$@#9V2Wy4|GUGLr*c@}~J$6MFChb@0KF`K>sHh#DxeHje& z%+Mu5v;sDoMF$1wpdEA_ON>St10>4N6WT)ZEadZOU>5CIh#vWxLxk2}-rV>)zD37` zYWkH7eZaRJR(r)w8JEv~z2_gE_gq9ygfv2It7cp@O!w5VQaPP49Y(&#LT*uU*`Irqy=^)h8=wltm7J#Y|UZrW;4$ReMtM$(PtL*PRpKC6t6q$N$l(AAdKNW#Vd1j>x=> zEzZ^V!br_{Q_vvHHkUi=f)K+l*k>(aT>Sox@7+7<`6h5Z8B2vJ)c3=WxrG==CgjJg z%g_rn;9hfNg9!j>S0v+g3OfzJrtpX=ir6B4-^3H-C{^Y{H07f6Cgx{zw6!> zgwR073sdx708tSg2!~QN$|F8>4ic3^0b%xAV5}-OS~ZEP^@qh0P0_KwrK*{07NmfH z_3<*}R8dy#1Ajwg#IWxl>&P`t8|e-daXQF)HF$^ah5ZTZ;YEuDWqMT_lnc&dvN?Js zmV9mHS9%niGo)*z(pEaNEAKcdNy;hW#6cm#GF9eoA_xjy4vV^3CSkk{Zcht@PtMAQ z?d@d*`^wE4&K3AjB6p!JWSv4*|U>v-RgAvCL4A$DU znO^m-#kCS#=yZ@8l4a|8MUCM4v-OQx^v|j{*;^Qs*@UL=L=?^tN13CyO5#byxGGmQ zFK6x#4;qoK_Vc2!qz39(t)psoUw1QYVQvzu=In*IyT*QdbuSM)$>kw0mX75m9*pFl{67I!snfbC+zHg#KWJ!KD}FzOOGuHmvOc2Q-c=p(&a%~JQu8Go-aU$5JpmnJk8$#F-KL>!|^w3s3>fhiwW8*B`Jy2Myc_%R;=u?Ryi z4mGHAG{1id!$VO#AwIl_BatsThYMU)nG)elF}{=Xewgy~{uFkb|WS`9l6r)Tq8NLAO21v=wPSRCGsy{ z&}?$7#-B(b7k)o?af~$()_EpMd`TA&O+9R98&tSQpC7iat%ofw*hba9E%6 zz+XwMxm2EwkvNP=MqQFmt8$|yMJSYFgBIe%B9c5~Qu~r{@?iCeemR!8c4EoU^D5-J zeVYZTO1OxN2(%>#@ItMu^GevG=2gWFek;eMpQSd5+uK6w4t5&kdY_fh0q@2ntst$( z9NJIdhK1Y`{Iy-#=*t3F#3ay5oV+j)I9Ax}G9b)=0PS>3;0EH|>hP+oE|DG2jjD6b zz%Ra?lYS%VwKmV`_Ut3!FWw19Cma)8{~!(o@|cbvTu$8n2cB~{9Kayr4)}viVHcVV zqixsI&qQD>wo5rW96Y2r)(>0vJFQKC`z& z82undJ|#qHc|DmRfQg_Bh4SMuMji;l0e4~-2Qfua!T^Q?Q#B~@5!=fZxjabnHxViw z&P}+8H|zW=liU(=Z;On&<7Q)@L5lwrmq9D;zJ!mu!{x&v{?X_P4`ub#iJ2e)05WU9 z3ljL$CFqnjb;Hr4)2u0p;O7}lqK4OZBe3RT(26XQ7kyHH=42~mc-r@mTsZ_b7uG4w zPN?Eyo3y6>Q4(E&NBgblxFwih0C7X1OI{sm#GH#08plH(ICv*z^r2st*a zw-Z~_D>OoSaF-H@!F|t0rBDbs<}o^gEb~a*NiF<^mRwpJfMmIKzDcg7LszSBdl9-x zGl81}Md`WIeTFEIAP5R&4n~%sw$n#i)&$Y1?kum>@ljmb!1yR|tZ^QfabcDPRG@qZ z-GaD6IqsZ9p1myjM<>WA*JZOWmo!l?swE6!Op8ZNgLC*r*(YrVWD;^8>z;h!a4(Nahr5i&n*NKLyDDT-SXA^kMCrb&?vc;ya z?+_d$IwqO(QRB#QkaS5(pnXTJ=ZBExLgttOKoNF=M8hEQoGHdlPyv3=5VxWN&4zBg zG!1g2Krftxr zQHO4xA{c8ct};A&quvU^%?aa!Gs_=e6(~g_+x|c5(P>-RYyM!=hB+PlGWbVnBnvYS zv|A6bTf}v_Mu2wOo%l^iEZn&|o=$r3&6QL2qnXIf9q1(|fk`Rg893OMs0lXh;bVc= zJ$S2vwxL;sqb~(-gKYhaB0jK5#*Q|Y?UxoOIzmZdz{lblKjmgM>yi^>0Pc0W9{S9f-e zkl#r?aK@kxO8{X2mJxNRS{oIc=Sca4O5Fsz6KiE}M+$ofo3O;1Vt7X}n0)HGJj@Y# zcQHxBtR+1P;E`LVOW1;}*qgF7yQ_%aV8Z_p4YSN;)GN$m4f;a-)Nmy|h(ZumbK$_O z$*sawEoIC!^TfYSFl<~xR6@G$i&B|DxPBsB-?mzs%AM@bKj(uG4%zsaA{a>Q>Z{

f=5v6LGGa1YD}>gl-w{v> zP=U>8ZvpSqZF@YKakMW?1*}KOeVKY#9!C=`8s)?}crXEmfx(I@pqiDW8}!;@e2ho4E3kG1B+IyqR!;hVj@C3ilbUI&L0)< zMkdsOqw$5?%z13J+W>U&wcXX*Cz_Qb^iyPP4t#9tM5^oBu0iwa+S|kVctAM(%;(!|7rVI z?&o~`zZyC1@^pO=bY9WkyLwuV`969zhg>O3oi8ntRfrX{SE+zVfepNSB-|I`&R3}4Qbw2hIh3RTyOu7byHf4xFJd1+Hzjv z>n9;;I8dW=TGsamhqhEo-QsGa2Wo&dJN+_aKyYJ&bEA{5x1GC_N8?wxMu^3Dt8ngf z`$jMn%MNfx5%SLC$lEdfE}_6@&ag3|i)zjIar=XNMI;wWkoRqY<#xs*9Q0{ADx|SW zm_8*byp=+UsLpC+*lAXzb;MtCvliP8S9?EpVJl1TLa(aof-f_Jv|*vPtE0O$2Xs#> zEA6|^Y(>+gLa>Rf=c+|@v?fE|h-9&zO-?1Zzmtcc04L+h4oV?(20A3 zm63m=g)u_8prwF02vazVda^nHA#mM|%85J1>Wlqx_Go`yY_+Xw(j49yi4FzvgCw`B z)?Is+6SfQzDYLy${IbsC>rrdLbDUP_Co0<5i3YvZ129slfcOY!Ug%{)eQiUQmQ5;UN#!^GBM&<;bkKQ{LC%{e7At5b_gD z%1d;}?J228?E2$H3zeqF_q_1*qZtJFJ&7?GlGu(bc(&y9`p|Ckjap_Lp$Syed*k** zr^12Fse48GbwGV`O8Hh9>d9s^x@oSvD3*0JHYn4RY`I)(93049Ifx)cB#-rAj!p6W zY*ZUBXD5*y^Ah7`e;6V-{-jA09Lgg0O^6!wW}CiohG}E?jYkQ3N|x5~I9au1A@Dyq z!~kzzG^ey|?Z`CnCY<;lhx%U5n?pGuzcs7N9cfJ@eiU| zT8?K54tIxg`Je*U4puVRwTyL>#oN`-tqA9>-Di?E zYqG985xX$&gDJnP?bboO>K?A#?&~eB=2d{0FS#D=%~nHTYr<;TOH3`lw0<851` zgov=|Q?MbL_i@o~(5RVA7@%ee^I=*xHv&+hHTtI%AsYsq=ks3bE-VFXa(K933^{|_ z7QTUS^QGkbY_4JB@Bv)2lB;gy_K?3wUKqX5(CmMte?w`7taG!nmhD-C1t*T?o*;64QpaGieLYA;+u1&Q{`m0MloJmM~6xw)8%`=jgx zvuLbi;xbC@=ja+SO}!R?HR77u?GsLdpX%-Yn!NgkYd0i`q3NVi!Dc|lG7OsGpj?W8 z9h^Ag>REYZ*|k2&+?tqKNT`yjv;U-rQliMRCJSH8?rLOEu;r|;?{+r#qbN@wGSzji z>hzBFN9M~xzzqORdhf}7jE#xMOqZU6t+JnV`m!8Qp_jZa9o|96r+f?;=DzB6ivZ)b z+b4;b!ROd$JCH?dxU4MyLzjt%A>Iq%e!M5D%1ZN$NV2IK!~`NZY=kNFQ=oEEg%qdd zZg!kGrTTNRUXUU93Q(1TB)3G>-USsovLb5(A_%TqT?PWTXG$}G125?D5K`nHRcI|r zKc%uqqmf^;a^-IcKRSbtzDan%xrYdzz5J7LAu!#1UdGM%h2mzrOD^`q~oX|@@5yl#Ghm1-V|0R z@jKJLCETBx9{x485e2aUOTCAgi)SrG%o(R{u94+B=><81B%M_{=Ht# zEzPaFzdn9FI%1m~o;9ckiqtCYUV0p`>Z62A z;sscW_!6~fV0M}lgEp4=$@BjRWbwuIx01y7vziynyLfJi)GJZ0z|w^-*MCW)P2FnT zQRgBocO6`=78mBj6SpywEODUABn+IYi|0}LL5~2v($nXuEX8|lD}d=v(1zZ6pM!-a z{uOsb#C3i0SM>}4ena>0yiq1i@{o50mhq8#L6wsHvl<#O@gohPbdY%y1|`j>LAjWn z4j&Y6FzUphT$wk{@9EcpD6}UOUiOqkb*8+X(A@oB12FD0m{T82gRE z#OIkc`B5!vFDs9O+Mv_>eLE5Oldn@AIs$nh$UbKj^>aD@1NazRHU8h{FC!Dv|2lvf z2^g6kf?Sf+7?L(2p}LJq|#g{2$k?ZKXaXTpZ=_U_cB{@Jzsa%YQK7G!%`&I z0>pRG?0^*mhvInz^!y+L3R4>?#>9pZWTCW4*3xx z|8-l>D`g-;`#>!RjQxZ#W&jEZfJg}dQPTGhK%npMLx11{2*?1Wabzp#`9n~ipdo@= zkm^VWb$pQJT|m#RvpaPF?sQ-P6m;~H-yFDj_dtSt0|c0SP$4b>9XswI!C3%c4h$&4 z&+j6Tu4G`O3#x&!vy+n%XeY;2**XdI;G7_Njqc1nUs^s~wRV zfMRe7?EFDt^xwqm5rBX=TsedW5i-1Op}?opf&jMPz%eMSft%LVJOvJ31G*`gLkNhACZS)%9D@ZIwDtuMn7hB*@{J>d zxPonL1Np_l0hNzm0SdHX|8C6TMhA2f?Rf4K%=Wn*f2p3nrl%OTMmnIQL%>|f-COgK zQUC$<^19>q@~2&a1bYuC_-nf)&qSek;~Gs0ZxaU?KRI;~u}p0i3`+h5*Dvz=!i+ z@&o@hLj(u}(4s_xtP5NhFa-Il_qhsR|2Ep+;RC*aY25EE!b9x;y?m`@tHzXUkU+S|Cx1?_A&6kzXE>qKmE8*{>7d4 zResmW{M|*U@MP!sk8S(`ee*lULoaWf!5^k};*D!#S`65a0DRe(Lmt!LSP9w`;L-iz zs)`5Moe;n`x&N*S3hk)o?;>dHQXpO5qZ|J|V*e~=GNXVk{G&_Q-&YraN5Jp*FLDP8 z=Gx^8)UmJRW+cP|do}(qZe*{;RE? z5WyZI-M|eCE)~YP8ru3xk=TDO%G_7wllZE0!Wj;4nWk{(h#b<6O5D|Koaj)6&$T|x z#ZEQjN930I2-T{r1CYJ(Bt!4&AA2WBWuHe^H6YB!%Rl%~?#EYO=MQ&J7?ZZ|x*Z>w z;M|66Meck?GpZ2mCY6f28omdvoB(*tB1=B7#GJ06J#L|ZTEOPbqQCf!uVQ#r8gIK@B5SWGs=PhTV@Z29#D52-XEUrkXxO_I`&Tyj9 zMrHfvy(jb>qERxuJUx27q_NZ)G4F(iW*%%Y-K9Dm~baVcxln1HDI z4M}Z1dR}VPFfw9#%7EDJwR^YaMFof*JWQ^gQFkFJ0-P-?$VF+(khxVFQQVlF`DK=I_KP*?OD{x(8sQP)hKn?cyLE)S~EoSngb z62`oQ9O3$pEBJTo2G~X}$!UZQDgzmOcxrFF%FzC*M^6O2e}F()IJn9pmHDT@K91?T)T1AC6$qS} zF+<@rZct(sbLWZ=m~Y?hL>%8`E?;59l%ROMl`W+^RqOM# zBWK-U^j<@*RSdewefq|28uo3%)b0)IYapR54HxsiH<6Ehs$&yvu)HntzE~c7E=PHI z?}{p4(Bk;;k0C|(jvb9$Uq>A#v{Bqq0THk4+@WqIXt>-tZ=t?tZ3-REArMLg$B+0K&RS&A& zHJR3zIGMHT?&7KL>z&7i=K((|Q$aBliNdroAI16PW$5x|dzC?_u-qq%w)0^|V(&B3 zs0!Bq@EmSky1L;2ij!NaTs5HsDE!2KC=_R<5;;n3$n}?!8N|*jxL_km@lkK;(4H_Q zOfJ69LB8>`BG1iI2q?C&>p>F0VBQu%8``gaIUpZcUVQimG4#w!&(q`5(3nkmyRgn%^sqJU9zO)dmOu8Zg zoZaRHPZp%JaF-(c57)y;i5;IGMdSeQ@#^LkOa2ttVx7WJd7}r%2;Vv=z0?fmDJ%Ga zcI(Uu)fYpXuF^*rqHf~OBJIK0((PoKDsQ_q68eywZW4e9WRx{&P|zVB&4@R=QK?IW7sS78g*Wc#|fe(#Le>ld-rPE z3blu3DRkG>L~N!@Zrc}Z(4VVUS(yyaNU-^tjNLPbt_DEwObl}^c*u5xT6wIVg#(}8 zGGS>2I5yWdl2^K|rV@)Tr;d|o^jv0YRK~Sa^_AF=O(<8j0@38KZa({baV7USj(m%B7=4XeD)y5iJvknB$(+Ec?Hs z;nEfG#C#71EJ7h0CWOnxm3vv9ytop6kq9gH(r~BDEKtdR9tsIlKz*hA>1+0?chkGBQ5g}nA&hzG-%lj@yg!;WuDl?7RcfuBFeMJRWx8x+1N%wy;QZDVSExuOX5a7 z^C)dhzhz>ILO49fEij8Q%RJRQR2yA-tU?*TCb&+0fb>SfB7$qQ)JN~+`(ydL|Kjd& z69E0gtv`fcXP=wGG}aABeqVGGhGM;Cn|Ik-3u+wia-z;Kr+hx{v57<}z{(qESy=g)Y z!Bsm7k{|aq#YAC7)UBsltD9}cdU>&b7p!!sC)@AknG?<2VrZaL{)ahJvm2ZpB2u&h zj6_!qW*G%{Ijid}5J{Xs{IZgd7L)1z#8hHl!lm$2>z(7FszhGJ(eX-+<2ldLy0e2( zwKq?4x%^~?M4mefv8uQN6XaG|@UG9>i@h{Tjz~3S(dOlys`@0$insF)Pm5)87-t5+ zqT#g_axb|fnP&R_2|@Kp9&&4zy+tP8tmZmi-g`xOB{x$eG-Sl(xFRRj#9C0&h0n)A z+j*}oW~cOktoL+31;Eh1bOt?inRe!7Gv?i6a>5i+Ib^h!KG97~w-^7Z`hXt(tW<1t zPrFDl)+Rl=3PnD%uy*B0U)F%&?d){QT&{vO1KYXPl9+0mux+Ufc?t`uh&eW?glV}k znZ;|dr#1s`y-oVu0yGb{t!&Oes(<5;VLzs_dQ$&sb)2>S7l|iAY!AXkOx<_-iBPx7-CAZX(&0ScJa* z7Wufnoi<&|S|Ct`N%cOK8OH478@zRDy<7#v;<_a_Frgb+r-2Ghv8QbIFy0A_N6MK5e8F|WY48tGKKe(V zb}lcz;ys{u0?%8?EYStN6G*%e7B;`1$`?u2a+~OGK%P}{B)Y&w=ZhZa@o&ydixJf> zR9QdZi@B<_0ik!rMU7f!3q8~p?b#`JY7PdJ3e3sDARM7)cxgDd%Oq%rp-O8UWRfT>Au!ZA@3+6m+n^;^=uDKJ)9z3c!>Ktj& zN3J2Z837W9q&XJ-6+YrvOJ)tHh1%!&kTE{z{>MBSB(Xu*4xp)u`-gjSDhfrLw=47PWP2cQ$D{uNsj%mwYii%{y$>1P(}PY7?7;@ zJ(B;SGU(`^lP{QDUoKtzFzegYQrj^0nfn|bN~?JdhINC#G>UwA)YRKQYIHZI+4-nl zq|6=#h>FeM$l8d}z#BgS#x7qoBb=u4QiBPjd;{vf()o|nCccIkVV38JnyJ&EoW^#& zH2Qg3`uIf7Pg?e<*?}~zm1bAR?eU_dalDPV0|;4=TU%Px{Mn47UL%Vm+=E$uu6LfH zF4t#V>?aS)Xhl>z$4lV@&97v2-l_Z;YD1ok>dJ?V9C4{FhlMQw^qy`l z?od7V72+DjEXG({U*^D*HYQa*pw@*&4SON+3$2*jVm!kW!B;mkx<1yT2+?szDaT2+ zT3d!360(Qe&tr`XE|Pf*`M8ugV9MPt&=))#f@zBDw=1h=B&=s}wRa=Q@wP6=*t~SO z+34C9w`&5P^<#8hUIG-IA@`aY2h(jL0J1$9)h!Vl_`+GPY00Dr?zr+PNrK^wE0hFw z4?;?EaeijlEdOKpTU?$9m^()woT(dmC?(}blEn%N4AnRdTV--m$BgA`E{nR0j$U~YxCiv7IV9zWo>_ThE;)Sgydl1i#6$+zCqlZI!$`)` z$hUuReB!w1ky{^FZS>+2+U`ogb?1_80z&1m+-K;C^tpkr%s(>U8Be6$sj$Fh3n0*^ zW+mnzGoUu#4IdLZ4L6RSSx@(Z5%>%n;~IULv<<*DZCJ|i4`B>|B%5jI9}T)5N^JFg zgP8j7Mv z?kz`f+!`EEJ$t5-4f2o`)y)Z$4KP_$PHbAH%BJM8l%v>c>rL1;1UPFds-jjT+PCLu zW8j$b+d#o#R^8a7Q4|M9!Hbk?6}}Ep!P{@*Xz-56ug5R9KZ(IWC5ys}dihqs3;S1Y zx4+7w`DPrWF-Pd`N!oMmC|dsJCnA4;Yof>L6*Nk2r6)S&+_l$xvq9Gp#v;Y5ZzyaE30yN!s04K z#Kk_y;Tobxy@Jx^DQ|?n{-Qh$j-1VOdRrh5XKM~s2EJiX;p|fH)||VWiT<5mVS&Xc zQ zQFSphkU7MdRM7%Xv6=k?f6JX{Zdn*wH*`_nV*~#^=n7DyKs88iV8R04fP7EhVuQlFarv#P95-XY*pb^xfH>H3SW_^rJ#RZ|8Go7hma>(~C>K%%vG&bwa9WInUyHEV41R z&g#9*ZdztBzO}c8z?~tQdRP9j&-pDS+WlmB2?Dbdy|o*x)NB&(o(W^-Mwc=*O(_ye zrYkX2t};aDl`YcpF`PHh!s(MH5awupXSI^Jvh3$& z`0u$rH=c9?@V8Kl*Nz0@Eh;D`{0J97> zNUDBM+=%o$RK~VS&5X>|UtveiCVCb2?BRAZ-dAgemuNQRj9jAgv&pp4XXV^B+L&Ku z-0Gvl37%*?yM?-HnKdd}m}lcEob*ZlaML&*1wM5i-#<07d5)YG&PC&6J!KP%T+D}t@Mcj%jRilcrMC3;rBd^Ap<)f!3Lf>%(o!eg}Rd*MG zLIXiw@LZkRjPPG(er%hA5v2=jd5<0O#;;qAI~{xFQqLg z9%h8cFFUz|y<40N|2hfIeW}i(taol;=1^vd$fUH$qI-gH!4oh1K4FT@83%BmXeQ&)wa-x| znqGWg$)ug{ASycTiL1RcPDW<3R&vLU_9B(>@Z5Pzi5Rz9;yfu_NDl-H41CDIJL{Tq zsT7XCkVcnfEp9fg?9V}=lyi`KG+N&`SJDjR`CyQ4_6Hl{}&e=eo)%pI4qb6T4`5?0M- ziy+EiP9B1vo(_A@>qtEp?J`vG;&BBf8}FIi*16&Q0C-u4FpoNuA6qe_Pd$JjYU1?nvg2f7;TGR>7bmB5jotm!YO zb|pLfV<_VR#X3J0VMkd%X)0ztLCr7mP*z3u>?`F)s4?jhGqpImok_A;OG(QnP(27x zG4_v*jk0S37W(cC`8Pl3$pe*jff@Bu;}&M`Zn)nxafg&}kqAN}ESx2Ak&tqyfMt-q z{4D#<|5o>Em(_aa`pwh0?{0VQDGQU#3)h!UlRF(&0}@Qs;lT-b1mH0!h}Y5E3uwn6^I;o8{SbBj$p$G7A_V(JJUqF%x!VNh>~`r@!6?|| z{m4NMAm@QQ{k3=o-2FTK0JQPU?{g;G9x?#H;^ABPI{0RB_aGfXfD-+XM$n<1gv{Fo zZU*WAPR;?nm0$wcaG}4DtMADDkni4X0D5-2{%K!XzUmM_KRGasE&t#g5>%jvSOzqJ zcLV}%PGM*5kMf1hh^G#KO+c8tfm79!MVTluxqVF;24bA zf<1I$Cz{j6Jf@@;x;ZwSoi(5sguScf2MwoQNAtYaS%APITHp9YKufB+H=`49~NsD}pl-d;^OtL+X%$NAox@CGL)zkdlF z+6VAHMljG1=f(I4JUaq=1OP^;y*1F!`@4BB`^VT6bbatT5dfP3I0VWa|4IVG`fprJ zw~zX`a{wp81g{>z{q4(L3TdLA8$)^0n*X5x9(`PAZe?g8##$RMG?ERdP76Xa( z0Kn>B{5B8;M6+n`!>_;cze&2719#P5eUiVs(B$o$Y`-o||F1uI_HoR^;}>=SNiBB} zC4fwZVwn6tTq~GgVlCNv+~J*%zMe`51SN<@1#ps#4iC`wKp=mI@GPWb&UHEPpnBJD z;SB!vgPi4T0)=|GX;`N>8$mO`z`qmW%c)n)$B%>1;}d-Jaac;W;XCW@rU41gpC)+G za@y%zId`8RA%OTQ>Tzgz&ld@4g#Do?lycnn4|_9UkM;rv5GK%o>E;B_# z0QhsbPg_VpKpr{wtv$fM^|95R71WJ!doO~l_p~4FYlC~|dfMG-l4&)bcT3g#u_q}_ zD*4(LLdfahXJ=%-+5#YI9Ferc^Q04zpX+MP7k4SixJu}OA#VD*Bzcpc&&1bA?$lqdviH}rY@J>+OqTE4ccV|x#y|zm>cP2OqufU1OrA*G%#HP z&spweDp=h9e0!Nr58+?DZc?z8y z>g{*29+P$agJB@cp3aGsiU&urPSjsG$KppZy;HJ*ZW;ZS%4<$}6+?*liy|E}n^&kr zbo_PIVJ`B?y^5&4>vnr60A6lMQPM!gJLN1;Lfa2@T8cA$WMh>OC1McJ?w5UM;migW zU!tnjJh$PJ+U)PNPH`eB(>JLyW+5XV=hKQ*ciTzi9<;DqQn&)+GMBq2Yevhk6nFA3d zrNG|mqRN2kN2ZXlni(+i4qK3QxR!J_z}hi2gt|&sld$wqheCY9(YQ-At@7kj#qlJi z%bU4_;m%s9C|lTiT1$ITy3!f#d+>D~(Tdi&Zmhcf-Px52$g$R1uB{EJCg@(0x@Zs4 zJKq)`$90z=Ad-)G*uaFVO5Z%??sL?oqha^Ajry4Bm)~!5S3?POn-)N&Fk(`|2NM*p zFNf6~c&QB&u=JN73Id+CBf-O@>;VNIJxba8QSu^ue2{Aih$X{>IUW_u>h4e@W{ESE zEBrOeFQVu7{6#+Y)_A;E<67@YnzvDQ`71u1G^ih_8B=^nFKUN6V~m}sRbSL%C~r@u zzVUVH<7bK6;ePGy)ofwr4IROxWRE9%Qg?>O@PXTcqHwx7fEvUZHl+D<_nXb_I-Fjh z(wDp`O!7OVd8={xoe1601^1}l5%0m%Qr3%A zT+tO_YDuYQ+l1EIYyN3sg?4RL5%g6^T=D?@XF8ukcRtb2NfBES2cnM^`Kaj+J{@HK0d8bfdYbhi%VyYTNk&PGwF0CV;oNvb9tF} zg?Knrn5vjS$~r;K zvX~urz82}a-GTvNzuPw3Hoy6OxyD_sRH{jP`b=#ek`-=Q?vP+M%aDa&!mQF?>+iDo z^+*@%Ca2*>6Si*U$~h%D+Vo8rn}wG{6o#J9bXAv9BA6$<97KbouvC0nME9RM^&*2G zuBdEh5@PdNGp?d2pIlsVw>JUfyz|!goo<+(*c#t(?+?BY>R+q}!*4TpId(?yArs#= zYm*Opezu-23krqO^RBpXgFj)Cc2IBEbf1=TIr`FZRC{$!K5G>$g4CBpe%f{9Rr(oX>iHEhn)uP?8^$ z9EC0rC&QjcsjNQl(s)==vC(IAh~TTi#~#5Lv|UG>*dfWCAALGsr_WKR;UBi>7b|1T zmSaQy(K$vXsN~8It{f#~cd2t@`3XQU-4n}q?LiZVo2zt?$G9E1PCF$#%^_`%_yUpoq#289Z8nZ68$hfwtFNMNFyvkP<)=L8?P||r=mmLYIP2lN zM88fMeGavT#N~-|bQ!9OF&V+d$whwS^2v;@9oBlsACuDi35R5Uytbq3k(I?S-S)mG7(><8kOJAu;*AR;*s2!G3w9VydXp#t3#z_66xzOUmr|H_l?ex}qV z$oVd6oE>1;SMl($n1zf@IAY(TD~9!}_~u(cbA=|NWKX%~yR25Fz|K{7A9QW^EN>DO zb{NFMvGSqeSoH$EHj?Gzw>+m9YBTg8KD4P_?5+7hm6f7YoAfOYS6^G^8l%Zgw1A7O00Y5{GfghAG@a zr`|9l9~qrrIrAB^S}h@}s-W72rzzfq>|{uan1^Zlhdb|qf*~L_lPK&HO3f%9UB*x7 ze$EtN?ncKTE_{Gy&N*Rs-iq1n!#&9;i-RGly^k-kU^bewx)+mmp2gpf*sptUW;M2c zIwDN%NTWOr{nl(5*b{W_9?X|e<_>a_)zE_+LE_d(?AnGzy0jUUzHwp zAlkN7b5PB3M3K<{cp!*<;Z3OUYb1Bk!A@8twrmDj#V+FnsC(=bR7Zk?MLoW3z?o{I z9@$$Z$Y8Z`%6d-`bjiIZwO;BdkFeJqAS^V=gjgf5;alnKhiaVtJKp&G#xc}W7BRxb zEAz&L+*?+%IsaJ#5b_~WYNoIDkBk!TC-Ct4JudpZCfv$u&-EFXRNDnAP;?|_vZ#v9 z6f8`6q$o@SylD8iYZOraRHBg-3D^6_iU)$aCbo6)tKzPUW((_0AA1$ae)1jpp+X0U zP7-aflCX(W{!fGAe=LZ^WmV<`r_SN%c2+VQ>Dzb@R$3Vpi{Wa)tDSREYU^Y&eV|#1 z`T9B{^ExglzEEvSP}&7u`RsLaPsk(&wswz+O5?oGmze*`maH2yjdH-9ls0tJv0OTw zAzOuSh}OB6cSd@=!1m^^oko z$l1zepT=b4l4(lIn~~%BH=g9a7sHr;Jf}5>2xVdh6=dlWjNWQD-DiV0Oh}L7x-9Rkr{KWfmVGt8WeCWWk@`IF5<8&y!mWJ;KP0k2tr8i-(+(VfY{*%FuUmOz6gF zd(qEgH+DRuzQfGb1e|vTyW)vK!$%LF=C$*u;_eN%v7JFR`W57f#n+W?21g{KA)P!% zAXdKa;8e2_cICp`v=R`Izcxv^Uy}PaGF^=NM!V^T;UB|B>>ZxJ!1GD?OoK_QFYtZ) zJnpgP{l!qr#%v6R6Xy68%^=pRHE}^!Y}5`PdrOG0ZvYmQ`U(@hT#dI7FZuK{S^QX) z%+gZpW7XfJ!oRG$seqJbLF(sGo|c8oJNP8KnZ5%m@~hz% zM2aywjB#I-mu)m^Rm3C9Iod?e`16$|hp`GbX?1f{!Y&sZ?JBAs47pfvot@kH_)h4d z+?z8pcT6av%!l1{eZ0xrP0+cw@vtSlbzh{0y%wv~ zgS`!h+pMp#K(v~{$v4A-RH)#qbb1Ugm0LkXeVC6iWy74F&e#2v|6s?w`W&IMC3b9) zpzbk7Tm%=lqB#MhFCNbX z9?Jz;>{T0il=k(~RINhOG$~e?oGAc zp{cJb`(fBoCz|DdX(&?aF#q$ix%$L#D-d8_YQ#JG*j0UdYC47Sa*19$pz#eGn}d6o zkBL0QqW2a=vVaRw6P?rc<268K@g zkz0SCU!L=s*FrgmK5@0|1^pa(8l~nvf@3=JMa6{`jG8INh7{%#X}fNdL$Q^9s;{o? zX)x|o@IMm{26qVihe9Nd&(gM7;YM847Xc^X_Qm~=KELxnbk>zFBD!47Rm$IbMYS0` zQOvG4Yjd8#Rwk0M>DiB|J1W01)`Qm#; zgzH4J>S26bWV_k4i3De1kV%4{YX*)l3v(3H+7_l@S#863N)`QPVC2t0jwkH!vu|`K zy(~{OZHimY>!~cAVzaKmE2?lsaM|o!?&y+lLcjhog6=_{!J8 z)A$zDUK$e~c`IT)^*BeyYsS*mYnamr^buCSHn+;!&d&`Uvj=N<$WZ!Rb%GLjkZJY~ z*%8?>c>L^!Y>3inXTLgxgCq?B!<3iP;k zX|@qQ#ezOmYMP9rZ1nD{ilJWCp(Ea{?UH9EObM9*s^`;^x9NP0sK5qHTp6<#9=wQu z9*?Dgz*Odyt*LUG?e6g%pUrCjFj2775UhBQD~bEYuj{JJvFfd|2((VBCHgyMi;hx@ zjJxl?idN?xWVa7wz$#S>6$3SKsz+?zE$kQbAlVJlza5o=eLtsE~NB7*B^{EnA=jgA=p9_RR$ z!9EQu!Efskz}8lo9@|N%q$c}}FpW}z@liAy>Z%P4ji$IO zSseH;d*1PM>rgHjoq$jj)$b1ib8W;tex7g*{ zGNTF|Xmk@md&H@l(b$G8<;PPt3jUal8L4r7IPSNmW&QZRbIRIB4|IDHDYg2cN?^J0yLvo=HK?rb zUr-0)28Bf((W49&)O`&xRXYE2?HzJh8<`&Xkp$JsILlM4=VdKjfQ|7%uB1?FVweSB zry~Wdb08(eORjMLBrReSuyX=QJi@lq5;LL*VNb^st{&QtoCHbqdtNS)T^9kJTc@K0#~fH~or?5yb$}y; zno|IYI&igDVCq|RNydyF!n`f<%m&Xm4TJe^2FJ*U_8LO|GVGG<%5qjC1a^*hc-ZD8 zcPR~;Lv91%(Ak2}%V`?W^dA$h%XB(M(_S#~dhg9+N5astKmzo2`~8$OKQ{HSH9<%L z&~B{v24iwuQwTX(K6@(d*gDsBw@~0o;^bHThntiF^Nnv`oXV%U?ir-5~W_@<#!T1iT)i2^58w)1O61_1O2b%P`Z{bpFyM2rk#6etu*M~~HPy2BwcB!l9WPB@ySY&n?eA)3P zEYg*I?4ld;9{(`CNbJi@N#WMu6L1-!ob{!+e1@!)c-WVAf>f0l7PeNhw4Qm9`u2)gm^w z`^Nr<4WK0N-u)O3yaf{7@#}TOuEwtob=>hIf*kRb+`%8)joB{<(Uit@@=V*=+9v8! zpP25XPj6F{SQf1k0Y- z=O>3fzg&~YQy6IFaZui-MDBnDi{S6e?T{?;Dc}Oi^*8FfBG9fCmA!T9^%s4fDSq%F zX*k4QX0(|mU*QWWtIO;B4G>?>EW9A)?){K!6{czLjt3dp>f2PG-Lyt6z!}rFA84|y zquGF<8usOv$FZ{2wiER2eAN$yVS^%Z=xoGR$?fk?EIE;_O%lyf#-7z~*y1(o2m5sx zvOL$uuv4!Oj)X_P+n2#3aHnM>5E;9-WpeU& zX9@%IM;Dz~4BAX{)%HEo$KK91t%w9)lm0Z3=GN_YUcs z1fQWjdbWz-)sc3CBk1xU;3+SL>pzp1Z2weVa&Z2)ykuu#W%_6Gl7oen{v3k-7#O`v=3z0zxwVEv=$_0Irj(gwy z-hTdCe*I^(n9c8gu6zlCq;mFLhJknYE&x2m zDS*JhI02glB(%$ky~G*D0rTO(hw>%r{8RQ5TSbX-NI^k(czE6gV{5-1LSkUp>HC30 zn?cV7bqgTeDM0R{7zD77A%2FU^8#=U44|EVj@H7oiFpMeC;)C80s;%G@ zJYbfO#l46;0SU;*=?TUMP-ADoJAw`12&h^9O<)2j=%E7$A%92ZQeyx*i*-JB4dVDI zqP(ku`A$yCA&9mY5idhtNZnWQlVSitiQ#vp_2*@^xCeQ2e0-Z~3}q9n_DKkAXM@7z z7|_`%u$J@@SfCH^^I=mU5sJcyEbE6{fvp}^8uYg zGN6iB%R>;nUp>8DVruCjv?m3x@!#*gQ5zUk*VI+bK7~E(F@l1&llO!7kmuE>69`CX zs6gBU^xXfrqv9f5*U^FeNvb+Gh60Cx{JBK&mHxS0|E2-7|6;+}_4h?t4lPvv1E~EM zu_GftodDy9^!bnWWsmZY{U3jokAFxX{_G$w&JORmr=Pfg{DTDI>A`FI0L>OV@#lc^ zK}BQz7kvft`Tn75&`^OLoWJfWu#iIVLETBPTGKv4h2KGi{@)Dh1k=b+3xR|Kt^OoW zWA^XX%Qyyd3b^gm*K1orvq&i4br@c~{jcY5L#Q=V{Dkq;UhDCFbq{H&avCx?|JHP<2XbB|u5DyNnh%Zw^zvO}69CYMj1wUTn9unIRDA#uq`yW(2Zj*; zck*c*0Stoj+e(5503z_G0S^eI=C3Ve@ee40KgNHA00PbaEenF{;tJHZ{sDx50s`gs z&G^5-y9pD_gZNLu$=>q6-id=iy@Uioo|+#RR2Xz+?zcWsd2+$`#xPoL4=pG= zoxYFrolr)tfc#WXgTkUX^r}Nx*sWF6>^tTDTnxGA&9%t5>J^sx$n@J{lU22EWGPQy zsPmaW(=?IDVtC3YqGFInA^Ku)!6dlv?WX=?{rRFPiRY~PBE$P+K=2prU4XY(w(H1t zB}+zD$_uinhn3f)X(fLkbe)mad^XeAX5K4V(<9Q2D=O4w3{)^k{A4F3uwY=sy9Y)$ z0r2uR5LH$eD8zh*N{DhXcf+?X zPKjHH1lE>w#?DDWX6D$70W1`>9aV8VnsN-xk8~%BNj0Ak#SWb2h#SPJZGcN8T1ET+mRZRu7N(p zVM0|>_fQ*S!XNWsm%u}-{l{Rt{OHU6KX?^?rQdQg;5%<0CuKfogn(I`Bl&P9ru3)K zLcxP)(1a9D@@LiBvA0I33c_>8&w2UQCh1%NR$_1D*_k!bThqJ$E(Iq3@G1x)n zrHR~VdN(z25v{pyk##u_+$CM8(w&;2)1isP)!@YOb}sMS!LjO^8bLS&ZcumEr`|RT zi@0x`lC(-Y_|=5eghP%X3f065w&eH*RmN9WSF( zcmZ1f+E1a)6SqUtKT<_vVAUek@Fj}NsTvlUXGA7PhYxmQYdukMxtN8t3*`1vNWDG} zMXaMw4KC+ZFBXKHggW@1la~$^nK~CrF_0?|CWqX&ZG21yd-ar%RC>H;d)nI?jaJ~nhHYijQEsnUZAg=3)kWo14~if6Vr)-RQMs4NOks)qaW9vQYsHL zVry1mX7f5Ct;tvIrOJtb2cSpNk5MYrkrwnrdWv;Juh@j}R&uy9@~qy5bX&aAh&Ry6 zT#=7lvo!Fe&_p$}yWA7Yq(P$@&US`o#qeaHuxnKnS)j)65Y~4?JTE;gPCf0#d#*n> zrc$|H-=|inQnNh?_HZ;$@M5Ft6=yyeM?ltRi9MbP_z{%HhBef#8KA;Wq~Be${QCM` z%ghH;!G+k5P|7$kT7}phf;*CCV1Jyv0_RU8-8>|6d;jrJ9V)&Z>aHW;%E6xVQR~WW zAY1cG&DmuIQ~(%?%|rg}y$u#bzY{Nx?Rz(ZT*-4fpsHI9`i!i>DcSPNRO5N(o^qOzHJfyo#KoTpQa}r z?27|$EwXSVn^}Vzi~XYMOnwKqLgnK--~CfgaXcHDp_p?|h~AY(wu5ck0$#^I#ln-Z za$T?~0%w@JZd=;ht(ZY_n5#Ee>WV;oDO-lHvx7J%yrFTmt^;Tgg49os%jHXB>JVCJC>A=AAL&?@zQ_Vx zYx$OSvHgA3KbU1#>>SGPZuxl48fylUW);OdlqepKshzeo-ZQ2k@)1N3m!`;Ubm=f1 zO7i#s!xj~FP9J^%Gr`yqvf`MM8^-quz|f+?Pd(T?!Iqt{JBMh946K6 zXMx8lS1G%EGz^JxR~~Bv+16*Teb8=J`>iEEwBv0HMSJ?hoLOnRL1~usQ$GoIXH>$l zsowliAdmdTp?b2e_QnZ&W6Be&=ei5b{uoT#Mo}0EQo@(U%Qr`1$@No&4;HJ>?h-pZ zw*of^?GbzCWo}Fv^)x7{s{Y(X&QabcKdJoyK1~+Vn0f>(xK?(EWthZ7H9+y>c=@R` z7~MjrA$&4f_SLhpUeOADqetBBXzR5%!MEnGM_Wwa8rUsQ5LdwZPQkiFwRiIbGm=;8 z=EL;baV$wY<#iIRnpe!KTJgYJQ|lLLY?@J}{za&|s7%{aBo0>X#yX_!#>UEl z((EL#mC-Kvz~y@*TQ5)WafIg8M{?atE^`*SR@Zr~iK?JnOd*_GN~j3h6vzXHLo;+b zjMt0F4D%GTVef^_L<}ra+H@fwdemlzds40ssT(uO%%O}4YQy9iSd|9W_<$-;)J@-N zRgPw1xhH>z&c@V(5I+-|eXkqACgRy;x(3r|YZ+UP^LQvVhWi+T-Rt>;=O4?fDzo=6 z(nhF%ajOtH77~uGUJ!vUv$5SXoPC2UUwNhWTEHqwm%Qd&fv^ZYmuq*EB;^=miDjKB zoU&D|<*m?HoVZ$M8o~{wtc&uBEFU&$N4|MBzUl2FDKsti`g%AMuMelTj@l5eTw@Ur zV0e7pxDpkoV9wl~S_z{4kefCC6TTyKixb>HHUqT+Q?3GcW8$6Tqwn%?idRssYk@(W zOW4aBA#do`^YmT86yBs+DP12@8TfvzdJmiy{sVD-+!G4w?Mj=44~jA|1_aFuUwVpK zux!AiLc87;d?P#V#W~N+NrPv!fD32p<7?kf0bJLK&8NBb7$h`+jacIV3M zb?9@XyQ`n<>UP~(=EcrPA6nKlY91HkrqIx!^F4i&u$N0hJ?X(T=nM_noKWvRlYu@@ zC*`J6PZM>(_Wp?TsYGOm7oCDIYPf=_Eb&FczeCYX%R1nj=gpLXJCIQtYwCkILCayNVpQ>Ufd*A^w2kf9l1_1Ezp-;zEMU(8nJ!&0p6W;Yafw!BnHVLqgD_~$$B`z)rLvEA~eDob)ir9g^piNnD* z@pSL&2RAX5)>U5Y&RTd^eor+CG@;j1kP<>~b_V5341LSf$$sif8jWhrC6V-cF3nvT z;4`P(XW|{Br??2gY1}#6F5PlFUc3kR)U%Br-G8bcnlDu|Yay*2{YiAs)|oCUefo|| z%je|_b6!!>o~Uza$xt2{d=jYul0ZrlA%PWoRn|z0{M6T7JfE3X&fh$5V%iV=MdHS+ zYOQI}wym*tTeEL%{3|k?XXP$4QguHq1l7Vc4(v@_PZmWl4){&D?5_{a=*J(gO;gTd zq16(p6NhkmjJU6eu2Mtl-nG)3Cfglo>Zv>_&Ln-~*VBaTr}Z^g5YO-{T011=DA(Ot z?>N?)wHEu2vARrGOWA2?5Ne_8D_e`+Pt+(8Re{{6T2ww>XrjU&xmL~Gy#+F|J&eInrUfJ;z+?r-uNZ*XV4n~#;F!NJ1^RnRG zQBLnV-6HS_XDqVz*|)tx%WhWsdGR}3t8p4`t$k3^)(>(Fo}Y#|@_65?@mU9zT`R7r zC-qxoIPDUJr}0dDxU4v^5IPZG||7`Q~6D~d#nmIVBR9A z;lgHp-Ctn;qbOX|q5fCpw-y;n=JQ&kuOLcO3Hd!~$~^k<+F)1gEXw&-gYCEq#{ExK z;@jJ2NM`MPlRDa__Gco5T&O-A>{n8LS(NlNt!qKh6%-(Xl*3fBB&uhqE5sp2V&MvN zaAC54+2*6TE+YhTKJQR4EcgdyUfeG5WcN}hfkO?qAeRHyq2owYmW)Z@ZLV`vyN4P1 zVM~FEKFFse=tI>ILcinotc0G!8zU^638vzA1u6P_>vlT1$QWL&YRK#^FTu<4-NxU- z$4Q>(hr4FYZqc(=Z4)B&d1wjS?VfQ<{BUY~(A6p_A{Irm`X~zQ4pQukhTQzYj#%qQ z-JSijcys_`ZU@@*+nBPrDbYo<5BDLCKuZseivyq&!;j@>nG9p1d*tnrDxpAAu;(DU zdbvzE+JjnY@S@PILoTusHZi!N-_a6?dv*m1qa>>Xi@G+nggI$4zg%-4>aMOi#zBL!-ncWFQDBE>n@`Qv&!>#Gozjm7p!T_G$9~%5>Awps2GArBDhIp_O%^v)nBNk@ zs@Cc>vs>Oln-q*Yx58Rz+N9}MX;R0B{Pc?N9$%zkS;VIDxa9DA%btB0cHce3+;U#P z*q7ghu(VsHfd~+a>s*2rx6yj5BvpF9DOzZEAM*JR0OoRl3X%6ah=OhJY4&y1##XB~ zINilqM&4?L%O>DMlr4k0MG4T~`^HMS;6x)v7?M`Iv!Kq~QIaeJPebqyz|}U~VlokY z4D-NWo|*EJoO4uLxqRNJdvcZDMjC7cczEVb>+`PLuCW;j+fmjfVLRz%k4x;PAW{~o zbn?P9ER4pa$}X zytY{;3=QnV_PPUlu=3bBE$wpC4~)E-*i_v75xHXwQ$@H5$qP(`4*HjJF}^t}?EI0W z@G2kMY%(U{6UxMfP{2+6qP2uyhiGn*%E``2CbBEUyv;pO+0JRi`@gm{8eXJEsoE@V z{P;0`hv^JZ7Tk71Ioe%0s}Q59%^SiE1CsJhl+v^MZjbY*Cssl?a$IPUn25%KnOhpr z%di5b(nVfox$|Rhu$Ryo_0^@1s6x>yOrK!3y>o=(xnS^)omyf~h@m&*Sr$|WZhXXB z$gHS!G}~{ZdLXyPxx^TQf{Yjo}dBWW9z6HTylC870;tDrcu}FYAXjm#U3j z0`56aqBB;U+;A5JdQ(DYUt^3UU=yVRO!?8bPrjEA z$~R~l%+f`w7-*R7fO}p^p;_Q`MblPJxFEHw6*ar0bB}Na;L>MUEDIg2&9hWEX&NYI zv!#G+RV&^V#N~Has+n5ErFAKbxz3q$;AL%j-RfV{e<>JwgT>kqT}vwUJw@ofy3V3$ zV=R>+7t?m|uzr;0zB4-+?ug887xTLE3^q-72)%@Sz~5c-#dd6qNbW_t_O=j#TSU# z&71kirmFw|p=cYqw?hix1yrUb%&%d@C$f))kw+w;$9ZRIN*YgDjds5Lu$XmW;HQoe zQKg?*#vJomqD*#i?tNpYlP1%o_GumtWypI=Y9hK9T~q zfD5YwwSMkYdS8u>4w@Of3cAMi9MQ40U0~BMK%?~0My1Ry0LyOR@JrMd~S)%`_wDv4>73#AIOx*R3D^_7A&Bv%d%XfjuL;#{j0alH;?560X zbz|k!{Kf~H-oy)oC|>KH^Hwuk6H35iz1-tOVEaK7Qk5BLJDxc&@_rs zqj~PA(HET-#`2@|4CQAN01;xKZgk`#=X~Y{az{I5(Aqyhx-r*#Jqp3V>E}!{o%DS> z^Pem)_=_oV*U!f#0^!^6O}0ob+^@3u^gcgR^5`~QI*oq&b47PP;3}LvIr+}OvXuTY zAoB;i_D)U3YPKiU8F^t}CKG(_oBD75Nvei53 z7eUq5!U6SbD*G*^PQ#(}?d^|pVU96c;8EJ-@iVo}1V3{ypHr)~)(P*%$o|2ub>&8O z2)yN=R`d$MKy7|?*jI_eH#HSHkTlTkI!u)q+!V{T2KfSBOIb$fPVxonX0BJE=jutL zD@7R!Ni^VhqWm+yEo(qlsv9xV*EUDqyAY}$K3V!_-?~GAkX#9@rStJG<0KE5jHo7r zZLCTa(&+)7NeC5>kJTrpr}K`989UP-kH;|e?+f;h`~29@OmAvi&_tc# z;RRrrQjn<$!F)6)HK+-BB0=(uDtLP_;)wCyidcG=g6(2T5-;v9-`>Vwjb!nF$CdR{ zNBP#v<_3-zDa~1_5^e7}x{Iuj%_+HBMhDFjOPy>o({Rg-km2@a{$1)AO3jDj8*EvDJB;^l#E2PYn$9whJ?YVNqB%zH z;&D0g@-Edi^Gau;<<776FkJu8OV6%IN%+0S?`*_8HI1kJ1T`L1j55#C-}I=3Gt{?V zcRflm+{j`B+=dLtG{5>^XBqDGhs(cui4Ut6? z>s4D5Y$wg%$ik=<5cjGSt==~h5_1@>>U#?OSR^(z1V8We;lGMiFvrTf_Bj~lsn#8L z`dwq$;@V$FiQmMa0D)T9Trtgs+amZfS{{J4Bfrg0je7f_Q(jh_7pd2|)LI}~^G_V2 zVwl)kmb0frJHLkNCykVeB=HIu6W;!hI5BwBa{GoJW!N1?)Z>>VH{7C|u}{vD9?Z1A z*sDkeDF&CW+y`VcVFta8k0SBqQanbrR4Rz~lnIH_(3axuOTOQ$ZVM^iOhf&*ywya@ ztO?H^mJ(z$z@1rc(WLfX-B;D*wsqK{sPoCXv42EqQLSFDiH_ZXol=CxK8{sz!r$xx z{a}$yEsuUG-v!#4+h-9P1s>c2fU$@;= z59DIBbD<$*iBcW)?y1IqD04Dp(A>_H}5LYNgAb5s%N zQ;b*=3)cElyngM8dIvt&;dQ2I+85WUYPyiK>Pv`*ptx>QecQ^Ca0){0-j!p>3?-W` z3*1iyt!lVU$d#9N>eX~)=U(MSN#aYR@1Nf(eT%%YeZKgX?Kbp2_zJ!;Ba7XgoLH(q@-7kiu&I!6eJ80`GLzfsA59Od5;c`| z-c!y_3~gGQ&%w5GP(e61VoL8Y9=l^E2X1DF(<`^Dq8fK({f@MRyw8=SuK@*er8}nQ zt0QC^D>;&%f|yZ8GF#wdwfz;Jn}l!f(da?Fa~P>U^mt*)WUs5BRb*~n6EFP%h?W5< zO_dM_bm3<*IdEZOt_s#ppc}Z0tANr8saFn_Y4Sb#^$=U>;Bh}1R7I@{ew{tEkki!I z=7ju~ag6J*7S}Uds|$`L`Lqt)h-!ykeh2&aM1Rx*sv~f9!(?aD4)emH;%O9V- z-<)qOxm(GPtTHp75$UpW@XphWy0OK%NfDw=kqe=BY3()`M5zw79ii8$qA~K&RrVKu z)0ijS82am9Bx&Ef^B(LnU9fyC141-(Y5Q@N2!ZF})dzN?=+9%~64?iBUK5RLq^xtt z=N}8%UQBgRk$%QCL0{Ko=58M?iSg`o61h$bS(6In#?G3A!o41yTOj;iy?ux%v%x#FK4iuD`2v-)A@8wAT=sy5m!Imo zXVt`M?5wG9^bRAYo}A_qXW`OTz#Y0&9=W>GmChku{GcJ*aSdroMfGl)QTP9l+>G_> zxT`glM5FM&l(=~T86;y};D(kz=g6y!)u2%!(#DqIt-x!m?VrY?KJ+ir@Myzv_NaYC z?Z_?7$fn!gzEpGTv52>v;3554uiWraH9CL$0|aZc==x_B7##n+eVqA!wvRJ1|I_wy zMvni-oM&(q<(oyiAPMD4sWI4sH4uo!;=iOq7q+P-ILV|J%S%J!D zs(;-zKRm!Cfj`xG_AkeKt<&AKCWn`KZkqdP_Q?iLjaL}Z6TBH$2Q+37N&sXaF`%%u zwUYw?ED9oMV6YemM+|}n>32p}V-h~l z2?z&Kf+N4GkTm>dCPMJ&(Quam1j#H6`1=+ItbF_mm|;7>e{5*on`pZM zLl6e&!&;<3-_)V4I@(D=q^n~qnaA(CK&47nuACIF19b! z;ob?INsN%Ed;e0(pO6Fn!rw>Efd&9HY7Y$&9dI8HV0&#>^IYANnBV>Vj^H>An_bW# zfO`|yVHg$=UW8-N(Z3iFBb^?GBIVjb@-;M?E%-@C^T`zpWtG=BGBD?A7j`i8dqj{d|kPD2Gge?a#&uKqDX zDS#cb1bEq3P~w4Wn!0~Lf`2XJ1H)!h$x z^WXOve@oa1fq|EKjN!UCXgX+^-#QFAgV)-2cnC1@X?{WsurjvmJJ#RE0}kGwCV10i z-pzxw`;Y=7faxXVQ}IY`-q|1^K!j`|-2L9Z3Rnop!g&oHsDLa^1P2lC#9ytao2`KO zSNb)3X&4{~!+3`1C;K^mxW>1~=V8~!Ec|x5U(>%-Zk$`_Hu9Vtp)G0AcAgjyR~`Df zbK?3}xl9N0t92QTeccLH##U|5X*13h26}h7CMnZa=t85L#nPgE$Lv0i9{l=EhpGkn zXNCRL$cTx1$TjG3C~zaV`#5Y$t{7Bd<@h z(?IyM7pasjtb{rjt*t9+=N3CA$#fr0#{S;=D^{&wzp^7L4M%CHzjOo4tcs+WQpz1K zq0CEV@s8)#KKI!GK^lx#&f4rU9Tmo>bVvS^l zZi-fexfJ(Aj#$#WozwUSg#Qa==MW_TuwB8iZQHhO+qP}nwr$TJ+qP}nc6fr5ERs#{ zdtLQzP3t%r=i(>$l@)vF=KNGnW8x-x5&_-7pwH1{7(}~8K|3VqDKJ7<`ZD6~@QZn~ zNVhNdMUhzJTOQ1U?l}ADbfDv~5ZGhfC8=856B1ZxLF5-vUKX9cG=L}L_9%p!NPV5U zr{9{kh)H2_1B)NJKhF&}GrfJ=X!>MO)srYfFnCpV)hSPSqq~rJkz$e2z!Mfl4L?(5 zM#$zIW!m|~{OXC$AaTDW>Ln?AW?4(+brEU#)G-yvk<*b@d|iSp2*OUeMrva`a$9}C zP2&N&Q9HHqDP@PF?864}97w~bL?*}CH#~h7lY!#(S!O<<6o>>)n!1z`Kd~7xGV16H zb3)DxziDSYRGjzzdBp4gJu4iWDyp427q+r(kw>Xa5ViGX6Ks46pRy&x%U?rOlUcg% zdrdoK(Tb40et;n!O3fZ3spikYMM3ZU@S~Yo-yByQgKEpxne}*`xTPHRHdX;f7-$S+DO`gDa@z@q*yW zf(3_}+_EF$yG@FoE-^XDxO{$ou97>&d6yFIJ&EV@=F}Xvx2F|LPpG|(j18YrY8)NR z@kkqGcX*wsPsPaUq(PMfu;CuJ^1|GkbP*^Ao2h4GvC+(3U zo+ocUp(+~AM2O2C#D?2hYeSd)2E~lXS=Z1-KBi+v-NwRu@1>82DC@p*;AosbD0f^f za7r&MWk$xkM3U_A=v4Q6Cv?rG3?DJQ{sTvtAh@Q&yk;7~PwV8_k|lb6$D&6zPD7hw z-!6JbT{a@l^7q5qY*hKml14T{3Dgx;MYqz=UI_h~$u&1k#iKRJ)JR}_h)u0U~K{?Hesj+n%&$X$bh4j^+`^Hm7u$P`u*y(aaCb1 z7L21pv0JMB+2+QGOxl}Z&Fb{B$}s{SI6RN>2H%TIhHWrRi(_1X{12=Bh9BSBCc8v|M)2Y+kNO$ttbD4C&A4gxJWm{-W)R z#r=5=Y>Eqeyj~+`#B-c!>EprWfJe)YHjxli(Oi@26yGy+?Ay<4^em?sULzQX8fd7~ z7Xt>k)tgAf4?&q?`wK!0zk2sihj&9>69#XAZask7=+i*4?{&4?URa#4M62heY&Y+- zQ~q7vDCNL;!e_)nhl)ZL3VG^@B@}z)E9#%8y6|m=H2K^SiL^{c8}_{e;{k(N zFPG%T+1J{omna9l8fY(NAKMmk=htQTa2N4dpQfC18|wgK^NJIm%(HJEd~p&DD+WZ} z@-lrzQ;BspeQUwAsR^zM%qGGK2bAaEK~R#VWa#6eOp95H(pmUDTCk&zMU_Z~*JH?F zIf7ZmI^Oxw#*t{k2vJIMEcK;B9bE!dL*-Q)Y7SN_n_HR&uxIg;k^nYy)3?7ndyS zfx?W5oO?X=_kg5F%X9qQsR}52Dy6G*=?UmMJ-_VfrMAR*!E#HtrsrZ+L7j&uD^2@v zo_x@|Q?TQ#hxdVBW#Be4r(kGM|KyY@q|wMhq2b^UsTg8s>JC5V(0R6dy=HBB)_HfT z{)tLH1`1oib^c5O=`8KIYAZ4<1V0q|N9-t_H1o*h`)akKd$W)PPb~`b0Cm1-<&{9PRIk z3f$RZD)3&qI+WYt5=64IW-sy=s#r%JON*XN`)5_`4q8Lx9}hD3$269X31XTJ$8tcH=z1oPSHV3}oT88c@Q_VYsM)nKQ# zFBp5fCi?|T{nP5)TEjN~oL63$?~?sbq(zvId$w#8>h#pQycZ~Z^`?W^rk7xJ4e}a8 zxkPU|mwWWHDXWSy5g$>cr+Jc>pZgqb`nb>c!j|B&t3c_SgqT2Dd1P(dp;S|d8yo%SOi1*8Tym49vLAe-HI6 zYS*Eql9XsNLUU?^8p%hCA-FEFjbnE3OdUD!OngHA$t>z&tag9mqx*4kfA`T^KMRd2 zHXRrDW-jw|E+;u9?zX6$>C2wUBH2!Zi5c4Lbb%s@)vlDU^d9C#a+a=Tm#0khQ8wg? zsh>MpRg&3uG$a$=Th$SIW{b+(B62F9xSiJEKkQuwqE2VrA%6H?+wC zHO{rm`j$#E1HNKyz8Drpz?x5F-_TY9Y#*7|YXd^CFkEN?$z6}6I^lKzT~PRsx$l6_ zTDk0jlze12S0mgImmQk3vZeR6N9`LT*1hdgr;X+@%v|lmPqf_016B9{6w$Cv{T3FNG5*f!jwA#IVni&U&{e|_4c59XMok}8) zh3J~Hh&*`R&uP{*;kyK(k+%aoVUrP0)%KEQKE1bqnp1?esmNo{;9?Kc_xqMTP)jI!tbxCc-TS*;~qV#ZZ1-l6CKC?Mq z+5h9CDUXW|zb>VxU=W15rMhh~{sgdK&Ge#6NJX{j=Ll^iLsC>J92>jm}}R$NP?aKOSKWIa(4h6%o6Kbf$5gk5*QIE@r3w zDe4j$b~V(;Ht4l@_@%n^7`0g%69{S69nIPwA9n zywaI%55I8J;W|8=zKxT<+NISRNhL{qF0k&2;wOtxD*8g-hodgvv6GGHZ0D0LZztEa zC~TEM2FI5xZFYfz2KsBCiH@Nq2A||2=au0Lgh^zHrixQxu=06=`uGRr2}+TRug5Uw z1L*`Q)Vbfq@0dY+E6+YkwcQ=6-UVx0V?nr%JJ9*5xM&{}eX^M)$8b($H0c!v2Ka#S zR6p7>y7&*v;^d2XdaJ)76I@$JGibr9P0_2hAYwwgnK2a_%+;2Mx-)iu#=`cBy*fiz zYRCx;A#|N^G)h}*T9FJFFFijT7LBeUD|Cr`N7No(m+s-i@CntRI!#*(x3mGPlwuiy zHf9&=iABYa$t4xOKJ5>amBU!#(9tt2=|VoW+(vOB3SHPK;^U$@$E>?K9Eg_cDMck{wD7#|A)mxOM{tb&86c^ z%SHl5lr~`uaT3XJ-MSA}&rXq+P7gf?TR+{)|Loj)Q?2KnaP-bu8+ola6t%S5UbruQ zWH}AG__E1I_a(1l%kktpc%Jq7^Pm{sR-UVLOy^{hli@P0we|6GU|3!SWO0S>$iKv} zvB|gl8xxhuiwdsHj&|$BqyCR*VUyRqy?lxq#yEA=fFg@{xnF`i`*@{HqIu&V^poiR zqz9daq7h^4?pq65pGo$GjuNGtrOgfD28w{d`JQeLoyVLn(`sk24fzFzfA+YJ-!4^J z!dk<_d?Ng#QH`v%p+`#cN-kYZFvG7}_%Qx+ z+e!lBlBp$TfCrdwoE4yk=^~+8Tp+ifcO6?CCTukry}GD+U;9wrweI*cNAS=wNsxJ8 zD2XRkoy^E3DWJAnp>hSL_a`;SE806Yqgi-7WE&TslPc<3rHWekVZTV`z8&-{wVoVN zptyTj^f$f2C{>13Z$)kh@x|QEhJ<^l@V+}!6jrW&Jd7cU_XI$U{w-RX6>W^SADe=; z70_0+Vtue%FJ5BmC*d#>F;a6Hzew2|*J`s*15Fv8grHZ+aCSi@AugOwDBKiwF+pzP zwZ@Ky#VoKg-wh2T4aSqn{#Gl1@ur-P_6~VBlEY`X@oQr~ni=?wCKZcPmurHMJ>*iA zDwLDgua=v%Q92tvBN@zuNrEp~(b6%nOKX>{n6ou@-rzQK2P}u) zShw%7Y@!)kkd3k*$qu^t$**7$m*%bBF?K>re1UBpI)zw2BO$s{_ZucLmpjF{uM0NP zv=;u3qc85|&gRJ;j3sfV??SmKMk<}*;eM8s#SI5y_~4|Cx1@QR7jlTbEwav)8#%TC z3XdZq=GnUtXHFDr6Tci^Vcdm4ABEUTs@3!CyunFu?!F%sJJRSt#kFDybX(djWWGQo&^@hyZ06#&vCljQd~`811O?=h^zrc=8?{zt9b-giR^?Gvorsm2n=gXpAz<_r*eqMgI{> zaJZsI*i8P2oz4LeJAb7-p27V-HXP$!LRDCk%_@<#iH9!dXHxQ=kcq>~M&A56_)t>T zVi30Ay}OuQTYJ}GYR$_PGNYN^NSTvqZ-ZnwrwWo-iI59s>K@;5JCx*5H$I!RG^F(R zOABB3Z711~rEt2qKC9SWTgweT&beqamJhL8K3(LW;rkY<`tDC(;|b3?w>q}kxnL&oboHr0(Q8QyD3H`koCQ$WJ`FKCDyu# zr5?dM6$-O2x+9ZLU!3lE|E7EvHsEsNG~}L6(6=jJSNg}%e2s%+p}Vw`vqlsbS(jym z4I_fcVLs&vfw#Hw(vnOdQF*)gTy_8M$n8*W-b#nU>u^o=YxVJg`Pgci~?n3MBB*ze)er@+FB2}%&i^Wfj-p!MZ z(OETZ7lwIZ1J#8%ODIyrAGhK+-X#8&AS z3N@Lb0x5s{?a{{KHsrH6JM1pG_`HTSY3`1Y~{y-`ce)xuhZgMeyVg9 zto&K4Gn8lA;&q!2!!fb?PTxCIzc7;Uiuc)Q+%l*u(>9Sf;iiN?y5`5M^WGG!V_Bwu zH55H*4u=-A9PjT1d#~+i;9U1HNjLK7@X3p5`gcA(FOF8X6!~{3sHh&dkhxfz5!Yem zjstA`z1^jJKD-$!6z+u3OzEZaCHoU*{bby%B0Zx-Qia@?1suY<>DT88{2QeqA5X}g z=>!5DJ`;Qi#0lmZnz-u+Bv$lv+2s}%7>62zpAAwC_r0|)es(MlQuZ#Gji1-L-{G=% zomdFk7bO(6T7m><;?MiU0@6CCbDg!|(F-fsux#0tz31H}+bNP*srN2ltAitEusni( zphvie9u29_z^!cUEe8T+lXrn$PTP>1RFq3F{>vUi1T4xft}6@MJ?{Y+|HW}*r6B?G zKm6E_25Ba=+8|zLWg-F8Dv-dg=a- z-g`j3Qwff)n5Dp$D!;m1ajRTrby(%F=81FRFMoW69tj7YNDB@V`Mw*8dutW#?r7Um+wX1M~kYHoJVYO20!00Rh2OLcr=G;U4Z1IW!F* zJdGe6?7~j56SPQ3N~lXjSj)lzL9w{_5L%9YcZ|9ZECqzp(y~GdAW(pSk3b%EwGDu2?iXz4`7S1h<6#&iwNTcgh9~HK(Tnn zFXSM#L7YgJ^!=@;r{_ap?#>sIoeP5w-wz$u49*kSDZIl|h|qr*1jt2Tex5)19 z_R8?a?*$w}y9j*@23mkB$H0Jc8~1u2AY5Pv@O%OQ>`EI@2aWRkuM86Sy^9?HkT1|b z{A=sWodDyT-W&lV#=#A6z}LWmJAif?5^7#`;ZWF%*b{(&t6koARwxn4WdEggc*m`ocZ(yKktVs|F^Vf+#1q(4mBT;qE)dz)pdo z^M>!+pX>@k++*P97tO}dA>!tahyai7Tg)M&8eB#9!X83P^f`VOE(#<9U}Pj@I22Gp z9bg&TX6}!>yD%R5nZCH+p0zUwfFC^sx*ts!gayD1TJ#U`qa&E75TGO*oTdNHANyCC z2nhi|z`%f{AIu6o82JYow>E6^H*sOeJ=hbNCcxqr5+I<@|Bp|z@B&n@KiB6s@Q+pB z&)C@&*c9}ZAN6mmtRUbX?EL{s3cv$YGynjwAOk`G#uws!j=_syea{l$??E+ui)bFt ze^K=9LZ8z6cLrd#uPqq*K7YUk!D3SgFz4?d+ej2ZAjSMZe|tKAQzw674||Hg`ZRxc zAu2pMIloNKzoviu(*))j$#Z)EEY`dU=dkl)1#bYqdkV8H?GqA_PA$Yu6NPzE<1?(t5 zFTc^70DgQNK#^{M1=er?>qOfTe({<@F#YhKwV%Y1!0)!dw2=}3?+e*C-|@NtvNC?> zeWHqj0DU`msi72HknZFOT|fMaAPZ|Af26NXychfMgHOkTC71nb~@$~vLa{{&IV<8ryAXwN&L zox}Yoyv)S%36b~6RB64=ut_qrmuDzf{*lAfFq_yjM9C_lUhC~P+vE$%MWS6vInxY<9HMk8o@f!DNNY+> zyf>w?4{VUr92+FK!|FsF$_ROiNIGx;C6o^pUZZU zFg;msnbCG)cCNgmkJor;OP*B+H-yP`0IAw*Qms5-&t9F%O4@fC+?v{5PTMJZ3eWQO zX`g=UzV_C^gZ9AZnU=&90gUqX=I%-sK7pJxhj|K=#Y?kCejlmSj9I?|X=nhpU@kS6 ziRRoGF*}hw1*7utlMTPTu}Vu1-a+PrjdU!^^1LCnqz;|FoVyd(^!D45&8O<|hS+@U zmge_~zudA_`xBqTk(A{@$#1j?5~1cY%V!XoHnP~tvFW)9jo`-qGeM+YM-{XWF!LghG<> zp&eVD^te(a!sCroT?%+d<6p#1>zN+VK7l*~jRk0ggC50c_o7pkKiHk;)caF{QmQ-b z;RyvQJIx3qx*c;b%JcyK*>k-AMgz^%JS8OAml3rSv;S>sBWBZsT z(0GH9jLdW4?u{GW?Q#6@I?X9*Y<|hhsl_J8>%{q$iXx^P3F(L*{W0LGImLFPE{jhv z?8({%Uh1OTs4VRM@x9OyAP4ex)$tqnK_R|si(SG&99{#QG{W_E_>`SCnI4Im2T%Hp z$PE@mi{Yjmt>mZU;n{UINstUovivYxG^*>_1G^?+bJeCNzt%s+n*IL#$DuHrXn6JZ zWaYNLW7qZf;CWH7H#HA=WmC59F!}aHSU`(oe~@P{bfusrLLIy5`Y&&U^UIANo-&_HbQzwOn4D#ix)dXsF;*ITm<|DX9rYIs472{6$tVhQ zZ+Rxeub~1?fz{XWjj<>ZzLdf9k`ohRZA<-7m0%t3)O>50ZSPk{U~cV?f%`V+cVD@N;{(UCauTv_YYj888FjCG=M+ntY15m?c z0K?S`hXupOOyL-ncfO617-)uZ>(7M(Km1rViOhQ3+$SaIOJF8W`9Gf9l&O(0b?|#l zC|A$|(0ruZf7$+F>k#~?@|`5Vne|L$z*nH5{BtVXE6vBO-e$${i!j{%75E*lxs8-Y zoFKj*l{U!@-emBUYAX`wS0XV))WF!pJ0z}N?BUcC<5XjDq*JZ#;VsCDZO@L&W(XAR zns+u==DfV-SGnR7^Q}hM(qmc0et4pi+MR|xr&+Yd@B>}ggnT`bI|(*;omlUCinXNQ z(4AL?c9hQFS?fzcf>?v0S{;zphs;K;&bzymx9wJo@!FmP(wDKeOaDjnzEeQU_A;h4 z7=4zY_Itlwf8Eo+a>pcC$S<=f#hCflQt{d?TYKa4=qD8co?`P*D}O4U17>*T9HxA~ zvzNF}z~wMQZj)9@@*RYT)o7bOWz`_pLAcRtKh+CWl+gnxE0?L9!!jC6WdF20K-Mij zhWm*+mP$`ytSM57!dj0|XZaS{hEr`sMvK0(V|{c?F{kbEhY3C&bhMlc02w4+IwE%x ziwh}#ALvW~pGR6RqB0 z?{Jt3)kuQl0gmQpinolIga7NfaeKAp^;r3+{>g~ZJE-st7A=(LK2^3mqU+!OKfh=!34wMlC0i400|*P{^nbLrzT8CGUhZsAsb1aw7t6B^DO!Co@DJOA$#$d zeNo@A5`xc}ccD2WO}R*6>yKU&?P+ftJ+au{7bci+up^tKISxyGqMr_{BBPO{fAnW{ zqZr{+*5$0PgMJm(CD=1rH1g(m3X@J?I6s-tUOrILr-Ho!TzTU7tt`6h8)eQQxCFD+_qhkE8nIcqL8>v=3MW(CyN_$7 z1Wt1#SV0>_TZn7Jw4P*P4psMA)M32Hq02Q!vPhO^B(Q^f-vTMDcTz7e3S*P!J{ z^T3Fl&FxX`Ic zu9IYZVw=Qpn>XwAdwRrUZ7r1XQT?R+b++WBPfE`N7k9*3q%(#@`r+8IqU5jmt%7M7 z-$?I?PTsj#XU4ZK|1}=vu&j7WRUAB{_A)G+49j2MQx#2Bn8ljHN}?n6=sXd+8sT%b z(gV6iVHKuT*?fw;VyV*h3!h8Z;|$aO-adGC-1@j)EkOK~aP^C!-3Q%^CjGK8U*y#r zejCZLQyi*NjEdnCf@(OeG~5Mp5Scf*9Bog;!?RE!yrQA5=p@Zf-9O@vfaZY5V=7 zRB@4po+dMFiN&j8v@MEm9Xblp9HH01;r9!&pAdN6($*6_*^r-2{g6kx9oYq;^+W_; z=iMrZ=9>NclFMyhP?KK3n(!R;^X-hvQu)wRb+a*NMN3#ZmC9f(JS4taZ3FhL%!EWk zUgN%e_N_?0jq%yQd@@^J=x}1LH#^MC&jQ!e6#fl8GRgKtx`B>+!8SNSSCRhnjGB!r z6eY6?Y(>g@@j>fTJ+s+OIElPsPzvv zN2sYI>gr4NyGqolOc9R;^iQu@FYqN0fG+-0;l=(b8_E2Y+RVL&H3&mS7_=0s5_IrF zm3*Ts4~f2&JKr2t`+Wtl${{`}}; zDR>1K9yF7*6tteQySXkXB9qL2D6jp!8iR$9j#>+r`kZMe>ue}rnt$t`C}v?@j?9Id z$P!g>V^|%xY*)lV(tYdozan8C=?BqWSE^n~8MqMY5tpx2&k|5IiI~g(ggAPfjaE9> zIXRZ7hF4b(HA1G%ZH`09$*gXt)W#Bxa-_voW~v!&J|j06_h}H@E37dfsLz}aMK@v>26n_P45vy{wR&iLK)>nf z2>a=QMSOihO#5vxoSA--9 zMZ;B5;;}54%?}rx^GTP3B2M(6%9z%k(Zs6i-qA>s6T`Ohhsx3RtZ9fPwe&=qGlHuS z+gkhP8Il~vgo4u7An9>q!_n9p1=Qbh-?i90pAZPu}BekcvlKrq?LQ%AjsQVHIkpqNC2aV;}8Qt|MVH)8=n zmUu{9fBcup11{Msldi{c|>GQ_yV%J+`4p z@=2L^`BnTnvnfb?Xuf>Jag0M3w-I=@MlBZxX$TQTxt1|V*m&{rMhuL;%hB{+@*kbu zwgby=Kv*Z$j2u`-2e-G(*7S+{tEZE9y<0JAuXd*YZE&b(wLSD z;N=HM$~dwIPt1LieXZu_{t8zmJdZvn2k8btw?j^MFid$*YlBS}A%s(MD8&1NFp!DN zt^o%7h6~{k5&UpDA{ynzr%7e!BRSadRskkvwY#QxF@aTN^%!<&1f%(bR(!Mzj|wOU zUMVN7DOlZ~d2o7tOL zfx;iN!`Ka3VOxqCK9qxYDw|R|Mfc41IJI-YNA1w*`EcLQ3Yq{XU1jcNst+}vOf&g6 zewfk(Wt}KvoDLKNxG_2Jc;n1@!MrABFQT_ea~$yGUk6s5@paq)eH2 zByVIbIz+2y`)e^ecymFqGOvOVga^_^iZ-Xzh><%IUwO`pFZG`y?U~mPOlQWs$yh;# zVOp`$;jAqY_ahy;6BadTjp{>8${6`&uJC9pTU92a3ez=ZxcXLxBomu3hB0x4~- z^*ENU;>hNgyv;~hWT~wjsK`e!-zom)f(n@r7w0>mu|bvlPR%)2HWW% zWeQQUH3_QKXW8w2U$Essip>?>mFzYT^I@j++0HZNlzqD15oO`=d_iu~U)Os+g^FSt z&>n!QX!k~#+8)ejzdqYwdF07qYDx<&qmkWE>$qON+_?(Fij}>H)KPcuron`FoXLGy zGj8jeS-sxtkZT@}Jzh1VYENmU=-v0t{ae4Y^mS6`SFEdAK3(e9Woq^q$+{ID+$YUq zD#%BH(mEOcb&SVhY&qE06uJ1+9d`ml= z+$@BgcU^Bx7p5Kip{q3=b0fY>sLVq;6+Bj)I`lywqwlctIOEYi7wLGg5q^P1B%e4& zT+=Fk8xbzAZ6lP9H{qKu=}BGr71VexDz&8)srqON@kPB45hPJP6u4rpYK`$|dFQA< zh%`bOASUOrY`Y3Ul7Ul;0w-vK@vpJKBa@Z1-hV%MFM9HgX6eA)k^Pje4;7_K z&$vvTeXD<^!VFUsp@faqbzkdvu;)?iVUTxp1Q;;roVLc2C9<78xAyuuVIM-tk4i!! zeHEvnjU~iyqHmhAzh#R{e~>b~gMbeAoD_5UB+KFk|2Y<3S@la5khyjfgzL&$@6+99 z>>=+H{n@tqBVE)MsCqe(U2ii)zP2T$?+e?`3u3D+z^roD!>Hy&^4Z&^!f4Zglc6vg zafGM~JuzZZViCJU$&jK_d7~Yy>Y}47;T_4P!sdHpzG!SkgF`)&DdHTtv;z1rdSJJy0Hs?M9fv1hwO z_{T@6?OXS>KLw&{?Y)yx?{t$E!rK$Pc(jwdaUa@_Y>mQ-*WPVZ6G!Ywie4(g6}J^e zOPQ8N`~zk!@Bru4>|jg2TZjG?GLUiaal0SaWKE^ z**x=U!jFvW&KH1uZ*KEm`==yDD%!_DD!D@LFG55IdMOiqa{I6H88ojnz{l+w|cXM6OKDS3gspIx-YeX!oy0s78SY}0KchtD2ws=O6LgOmPjV2@8fyb ziqE*N6&nyg`XAnKoN~*zw6t(y_-ZrC;bT_X+crK^>|WtjZ}dLg7a#Sv8L6&Kn9i-7 zI_U7RU{|BnYo$UJ>h)AN~w%~wvIhf}5UtWy&SI_2Kw4rZmIU7~q=L;+v{t?%_ z3wBhwGFAegJC^v6I&7`UKiP>v!fs=v?x|iUeqLzjx7m@W7lGgNS7wvSNJYlknm##j z2`&Q88DT$M23+7awUsN4AeDIO1K`r0Vh>fhhF4x%!7@j#01Hy?0NTKYX2k<~L}{dQ zT}FKG)G_S98~UF2ro?>Fa-yYni-94*a0FSZPA8rp#c|)|G`>g`wcuJ2g-^g8TdbIU&vfBJ0A;PoKsU!CtD{aOV`|;op z6x2v|%Es$Fbhzydl)QcsBU&7s_f=pzz36JWp>98fTV6ZGoHuUL!a^1%^Zaex$_Hkn zSP#y;$8Yn8v~0n}9jh1F@#Acke9>#$uiej0dTXYArP|IZ(v`qFYRb!mVo2eWnUdULUk%trTEHHcUe(4~(V zKM!!mCCRBz`CbXGeJ%G=Uv*Q7=h`{Bl%jz;r4%AM18_RPBw%;yglIE=e8nB?>Tn(B zTTk#7TbkW}l=@?JErE>ta%LK|<#={0lau+dNTXBPs;#-~eqWBP&?b^mnr0mCqp)_% zKGIE00V?XR>KDKuxPSCQs9VpBxL^796$$shk>@jNVe$*-_MqfRB`G~?;Lhf%{NgN$ zIh?_4;4c_!&pWw04H5en{zFz*>umEX5XN+d-WEZ0ea^E|BYf1JT@zaji#*ys3FJyk}^nD{MSc@$TkRZTjjib?c`FKrF}I;7%VA@NlR3|iw_Iw z3xaICNG$&1DY^Ovd3D%(QofhZ-bt)T4GaZn=tJ5tp4$(z#L@tZw`P-ySo$`o0b-5; zzbn-^8>VDd`;4Z<*N+8|v;DC7iM3{j9wsbCp9xIIDa!TY^BLygTxYf1lXO^d<_w?R zCQG}bq`_#!%Gw%4XPLL5Yp||e%fAm@_K)L}R6yA#JuwKU{D@Fy(JApPE=3*sBiVNi zCbnB$gvH#&P`(vYoquvNtg(a=o^fn?I)rx^>Mut>Vl0~)z$l9dUWTox-1diZ7YMQ4 z%WH1+Izb`rmm_#P!C9=mR&pY;wWuD_7L_>=;4~NoHF9d&~Dj{@fO>wRrQws_d5M~UO9CHx`b1R7_~%;*MiS+>bo%tKDzqL$2>7Gj2|X9VW|z!1 zjZ*ebv`nx0C$=~L5L`Wg2f?HCW4&Yic_KoBQpx*fp+xPZCwJI?H)Xe01Y@1_Vm1Rg z^y)#EFo$#AHch)Qq!T}cTI0VNh9!_3-Hc#uaCvfCh^Mec)X@IvhjO>KKW|}wdHx0X zc&=J+x{2Yg=V$P|&^|~5WlrnWKHN4r91d|v+{1|->H@}OydA(`-pdlmyw4$7t8R^z zU_xYdbAy#&(Y)YUoZci`UlQD!YdlwxXq1cBhjZMRJ((~#4vAUdJ*>%qb8gTd=5drV zD1|gPyufSZ+6~+8k=)Ph=Aova3%b)`g{tMEp(oK0i=Upa3a1obyW+G!PC#U%ZUsK+ zQ~n1}>Jwq;bQ@Vy27?6UxExC2ECdz>JguL=L?fZ) z4)FD3d%gJFIdMLTbPy_edQ{134GA113)FNOJ1ww7iF6&GL_X@g5?D#I^BlY0L+|*t zqBNaj0_*S->>fs5q;CDYo7#Y5ZZXI+_7CXZvA^98;;O{a!CfHJ*&6*6G!uEtYAgJ< zFVT}?t}eOdLcg1ajN94S*gBjkZID%yf>2qrh8LZ-^BOdKT4kN;P*n`N4T=Urw%mg# zb=wi5rs8!}?3!M)+V${&hU>b!#3@qWdahEB8HrW8iIq0)iQf}VBH3kx$<+e5<(|V> zdWQ4>J<=~Y6i1lu|EBLS{crjXGb{Ul>N~8QtPKB~zQe}B!TEpIcho>tP_)|0v^x=T z1f;Ca5lBjexk|t>3?eWM!oVa+K`u}bFOpL1N|Y0B5lTcXF+g@J-u~{o{=BVw+RSP* zonPDD_`LIfe9d98n_BVeb7-frYCwXCJV4(Ak^;uEI*9-X6e8duP)H1nm%<5h2K;M4 zY{CrOIY7WtzwO_R-5DSP8z}=2DA@nCL4N>)fB*y$Aq5p72^0t*NT6`2AIIP>82~hn zYy>@j4E!$*dJe!Xw7T_a{o}>qggroLo2S1D%HOj< z2E>2sdH^B-0s#UA867-e2f)A{8~j&);>A~IpieMR-%RBP(65d`Jpi`~)B#`^F6NW) zJ~)_X03f6boTLA)5BP5z5ds3hhL8;+fOQ=Yi1eMCdlIJgt4UmT7xxgB0kkNB901_= z^V{1r?l6rO=JNWf?d$H~(b^gl6NB&ccjvy3(9<)6y+2+Z0Ae2%1+b_c3I!~p5DoBe zx(GJtTNU*FPD8Z<7lz=WTw|H`r`lkyuWarkouM7zZ~6r!%!me~|AT#CCV)r)dpW)T zFZ;Al_?O-7Z}QO};hX1L9DOEN(3h`h#BV&6A7^GSLc$E+>fg8U{=>-uK0LQm*kAwoT7%j;EV(XY#K)?RZ z7y$6&ql5{B0w~;o0$dy1g7lqI(g5y@^Gf%{fc#0=939-oC9T{I6wCdr{kDY-7});< zc3_)j`qVJP*i~anv=k>8{qD5l*}PS@n1xQSkMm6po`lhkNnO-zJnAb`=TQLt;xLTp ztn&K1$GWd)6u$jHHj-;b{g z#9rNa%Iyh(e08?4{r*zzweEsa6T^?Or?N?vuuAT|$s~w6V=C+^Q23qj2_~ypiACdXez=}ijQaZ zmb@>wNR0VNJ*97*`)Y?G)&K4WH*Z-~PpQ{-U=0;vCw=}iUNvvX*%nB@F&WSh2c`!_ zAE`R(ijsCmv$=vQi&kHWpeiJ%KW59;?eS^ZtaEAF2eMrA!@CsN7A3<5#IcjtapAij zsCTU6zRx3@xDCKlm58N9pzBIs?>y=5XOw-2K*;?Q*p|nmH>JXR#?Ng7#%&JmAF)54 zLrtyMscaoNix;Bo*o_c<9PrjZPY(|(73Ft`x9b!zl9VF2V_QPq54;*g&y5kgbv;IE z^z!h^sTa`?oSDJ@KwFlwq-QbLUCx)Yrt}L_R8fiO#YR-rJ^W1iQ7!#=h%vlV_hNKB zq);0yc`YY?-HhLSvqRfRr#*B|*Qp3dt|Yb7~nV^xE$eV^l~c>nBFi|pyZhAVsfMGr5j4-W$vf( z%yH|~kYbA%o(L{pN^_=7Mta0^x36bNBVV?^dYY#plg}&;&utvf#;;nkVfadmh3|JU zGA18-ps@_?!DexuD5MmxDGdJ(f<#kphn6Bw+sqVPlUrFNkJ9%)*78oFt;k#Edp!mr z*ehjrTU(XNFTO!|P2Y#U!ozJ%laUAnCR!Rb`vvPuE?W=122a?fMDV$_u zt5;B;R5||YF5#&}A&w7YX|biZDml}kgYRd?n=^eNdynI%`v-OmEF+g^}pDh4)DN(@d^rbimik zDU$w-2Rsf}_!UpvBRp82Vho(D_#J>(vkelP_kD?OlEz#jR_B`~3&V1f0Lo^snc@sT zY2x;h`}t~1a^W-N_+Wm5j(yYQ>ba(72`Kk=i;)cr>`SLs?8wib(X<8br7~j zwxsmqZv?5f%f8&bo9y)L;tZy+S(r4n8(v-SalfA3Ucl7E&?69Zr~)0=L@U`gT7|r` zNCh-vdBR;a(&UBKNvadsBaCy)Pds zF@w^oRn=yi(#?;|(dLv)ewG!Uz7eK){>u|g5?RFUEi7YvEjL9dbPwGUx|e^8a_B|p zd*ixTAg=te=}aPTtgziOtT zi+i27iQV410v@%SsuB1LIeF;aB47QI6e;YTtd1-7EgT@zcbMgnV8hhWfZKM#+f69> zf|3pSLrYt4o+sKCA8qw0HuEZa8l&7`h!}(N9fL8F;@Gi_l00-7pytc@op@Q!qt{z6AWw(}HSiNxR10rbJp zvd+p`iaxH%M>Kmg3>OO*#*8L-VDpYc>)`YhSt6A(^ap(&&?_runR7^G&u?#1UzD=f zIMr+@6ffy-EYnA>F#l0yO*CTfmsA1a$0f-35Yp&{1@{UJ6yt+33e=>KYjGb+v8wR-KBy- z9v)wy+?pWahgBVSMANm^Q@5a`EAaPT0ll-yUo9XOzv|-f>(@SEDU{`*Yf5~2+-$KN z1{{qaU?D`6_f^X(dzg36y2cKV>&hNvo}LCwLynGR(_oS_)uQ0!ntQo6`H$j!Hzgu* z1D7EtYO&g~xriLtTDjNBM|ABudmVNezHoWPl^z1N*k%ECY8TGtZV_s0d@GtB-p6xE%23=-L{Asw0a^s1 zx>tj~Z0REpw$#e*0>YC*R7`N!5C!&?Wt6l_-3bQ6zd|>jSrrJJPnY$Tuij*>;?8Ix ze~5JQ=nL^OMXIU9MuJ$IeDyNv>nlWStvwO# zThcUSzEhRVQ&Lj7U9O%O%Qy(n z!jX`?lLM1G&Z|n4ho8|#K^|3-F>+NK5gAC2&xYvB{j}8=0cOIlnMgskX4t=B_Ju(F zS@28t2D>kQbNUeqKtDGVvdGFk)Eo(i|< zI1^Z={@NamDL2h`q6p#7Na9B)9LuBavyY!vH$$VyG7-kLt69rKlVxWYHQ}+`g2%7D zuOe0OWX4Bee&l=T@z^9sT^=9VkDXezq=nF&dDpT{SBKb5BGCvK3K@8Z8VIO$raIA z&@@~G__ULD6m)VaB3Lz;WQxtCmTbsp+4i^B>5tSi`-K^777i_MABPniX5Sy%+*0JI zPuJRBBwIZ5laJo{zP6YJvJO{O~el zzQnt@WR7b7UyywZnTQ->*3-_YaqZNlbxGWRRDwi*vW2Q7rOEjjA2L@_A$uvPkc4oY zeK9hlYWI2<8i^l#6^jzjOLL06G@iS`2o1$AsTSOgx_y9Bjj8U}aHB?R`q?(9)gm~l z%4sq#@OcdO8E_L7$3;Ym_^FTklb)}xoK*xgB&=;lZcu;Z9up_JVGhq0nMnd}l~*ax z6?x6q%uvC61SzL~L%YSq99L9ThzDMq zCXW!)Uv@ssAQF}W0Xwo`Ydv`luZCeBibp;pfhjUmsH@$95USmj2h&;Gh zyrN7PTW8OtBR^H|Ivq~A+}cvF9*AbiT92KUrdDPx;^2UJ{VjqqlB7tZMA{&WsElRIpXM zA-PqR?jPu81}?8;-%LO%oJG=tWu_$=p`Y*g&Br)xu(yjYdi#vk5M@^&uIQ|i*4Jb!2tG(DeQ z@ZebkXuNYhU6(B2otf7DEPD6=mCN!NI58aM^m_S-Ju~XYm7AesEm;@2YzXz;I4LzU=8+C(HB8K5A3qJoG>yEK6SIIDx4pZMj`Pf`bex#n7U_E}9^M4TqwTDgr*>Vg^2Mne+h7UJMWbvq zh|eude2#3BM&ROst+nwSmv(Wp^KBl)!=S5oTm21?0+;^DaW7<=~yUbtYp_1{)*|HbV2{fglS(+6-`U;lf>WRhPLtJpwW&b+Doa=C6+W z3HiBJmHZ9A>i;^Txf(#N^?)UQRAnHOp`&rl$MVKW6LLKY-FM#J|0%dohh-ef1N+P> z&i2a?@o>wBdsQi7JlV}#DRX1>F-oWSK*LsBd*EvPD}<#R67RG(TxS*z7gnmr?nA+A zA+CjYfjGqP20Z^T94ME$O`kSue<_cF;ma}NOhUXvwbB#uufN0?q+10)cF50)?;gOe ze5rbz+_Z3l^>rZ^<}%?IJtqD>@c@EGyl^gL(t{_DyI!78Ga-&#Q7k`12(+>DBtan` zekXq2K2I+SSzf_j!AC8O#Qab*=$xY~7r4Cmkl%#V@eRaSF1(9BaJ&?HEGrLpXRGd` zw6HL%e_`qpJoVk%a;gVw?JHvRcSoIEB-{S>`#s7l;mq&&wWE;jo|mk&&-giSjPqq{Tm)}&;EWuZqqQ@GFcG$odnFz@strzY6OVjyT zvH!;%<(?`sTAg5L^GUSr_Jmd8WmPPD!_2y(o^zsLKk}&Gl+VpFa>(|nbOA?hn0b+q z=FL#C`quE(>fM3$`VqCc>25O;7)dmX{|qKw9{B|y z)lI}tBg=gEkZid`PX=;#KAF>aTK+u47(H)H-MV66AZ zmR9DWr<}w0;FfL<^_fjH9NfC>txeH_R8~oyiVA*6Z}&F1(6LH-I3HgqvL*=a)}4zh z`pJ!dRx&trlJOmdtr-n5aA?y9G5qZt*QILIqH%&4tkAi9zl*He8ZtC`ee}%xZO=NcK zB=^-bBW8TNfH7k*{|?$ZXdmR?7_9Qbzdc0SgL|(I0+I10s?s$JvG(|1hC67o!)d%f zp$-Z_G+@>|FHQZF2(iP}KPbQ3FLo7otpIc@J>yLqpZ+0AN^h(-Xh|h~-$wYq#@C9}0N2cRMc(;LP`bG}{>V5ARecAhlbzDwXuw;emHADk1uRD(d8b(5kAB zm}uQ@B~}+kG!}!_SjQI_$B~EO;f0g8+W?f#z?V~nAHnNA;50dT4rQMLm(>DQv-#QJ zW|S}(OMdXKoxt&*kqRIAJOmcq-jnE<(!?K%1yMdogrj)JJ%HDeS&XyYp3do zl&4&;kMN5RUl1KB_7eEm;OBLOoEzX!jh%^=^?%KzGqL^OGwJUC0@&8r za)A~J8C{0vcXn_WfngYcm>FPpC?O~aC5_2O!WH zymcYt3fKV3T_L$x`Xv0g9ECd!T=O>eg0O>P65I_2zWer3U2$Uj{i|l>n5uf z#7Qx@yEC91guYSpn?m4RJFL0-|L(0_LWO<^0sqzH5-5n5>yvVLdEaLX5a#U~z@GHc z+!A#AGng{~5deVzjSQ0l7{~*x&kcnBsXG|^!{6I0?W6mo`#ThXPXq6}r~`3e9D?+I z4M0BweFOkPw6k+?-1{T{?H)iv0N@b9K?LxwV*-tQk$>XCxP3PDb$N0O!y7<$Bj*7C zyk9-#r@rL2;utEh|M^e*>(li`R;Kl4fxg`Z{RvZ24SoXzb_WRn)E7V?5D*X$f*^Eh zgZyzv@j*PQqxJjyuolQM1Pte!+;u*?5Ayl%@QvvQ4?w=V)99|}8Vvx%`GeSjq5yJj z`$B&EPkpyd{mJh16@BxA{O)~Jcy)FBm~r|o{DDI|g|~nFjqF{lU^|xk;;!$22>fj= z!+e|Txc0{(*uVXmu_6?7VJZZ0b^Vk_BG9Ww;IAv<2RS%=5U29DPt*fC2MY{*GSL4E zW9z2|0Qp6CXIF)*bB_;O%f<6o-tQ{>cuS;G0M9t8zd%Ak1P$1CNZ{YyLEXC%0SE+g zH$I1W{i^*p%GT}-xSfg);}A6P;I~7L&|eSc=rX{qEpzC>wgD@(VpAyD#U=`+Agi26pEUrn^Ok9aq~omW^9_j!U_% zSn(whEpcD3IUtr=f%fZq z!C_w{8Kmm*Kw{nN`-*~!j24s*7ao1wA8+yaY!gJ{4l+@YIUS4MolT*FV$neK{_I3$ z(6FsBU!9y4x)~G&_mgt`grQwZ6)gtM&jeQtvF069f_NqIXGL#Z*i0S=ut<%cD1H1n#*<3)*3l9#PPEA}yrJqqj1 zw8;yVoqH17@M*Yfi>fJ9XcUry-JTo^DXwv3wJm-y+3Wo-I!!?v=i&WcPu7Ec> zd_a_?bV-d^XpcRm@s@?N6vtBNR_dfq-&KCeX@C7Z5oG#wTwugQVQg3|dEb=3l`hpY zw#)2yLpf`e{ViCbkP3S9end57*;Ml~>BBG?91vjcSI|B=wI~jW~ z6MX_d;5BSlN%W71FN6NI%yRu|BAS;Fgi}7+YGOTlRakmJ9z`Z{;d1K&c`qBOYbm)? zo~bE(Rli1C3jSFJ^kX-R_#%|qJuV~!#oAPP`)Eb2NU#ILd^8M3j%kD@PrBuzMIT1R z&OU4`dGdjvT}ww1u9I{7o3H|ZsJ?u0nq=wJ%nwPnz!Vwkn3pGx^In1M6+G}Di`XFf zP`>?4OmQ0Lkht5kW3UB!LLFu*r6t^P#ZtY?AgYAe{*FG02P5z<>83tpy;$^zA9|m| z1L75;)Lb|OBUZFTLG%6Iw1zF(&as!6@0=P|*2Rk}oUD!aw+c_jU~{W7?n>&D`N1#3 z;0~tM3~e+?H^^a7?>Xakwt1o9d-hDHh)0J!UrC4c4Bod=V78NDRQ?V|NS_SYlP{C2 z@`b{Db;a-d*g)3Uovve~7(HCwQN@}wlycqb(_ZZ)aBSBwCB}%TY0avdv+dQ{iIuGq zv}6WNN#dL*{?U9xQgC~5De~T*i@!oYu$$;&Rjk#-W~4_1csQOUPgbL6hFX(mL~6;#OLeVn7yGqM^)|;V5GC|H&weG02_^llI_ZArCY~kR3ChsTVjMDQD$JdHh#c{BL z1+(RHbRt^$qEHYs?rrdIHWU5rY^nOSsa_(o`r8tA^JbmsjO>{q4=Z=h{on%^6qIf& zkJJ$AX%zWD)IrIZ6WQOPv)x9hXh}`6F4b>t=E~dPAdj%;W%&<5SU?N6p!GvqifJ> zASx@oLV(L;z3IM=)_-*B_pgEP(RZYyKi1Aw69-@9{;-v{6#9#afnj&+V|wg~axYm# zId}00sF(|%UP&)%lf9bp_ROHS^|=3x!J-_?8F%nIBI(fbHa+s4oF`}QXx^Ndu-^s< z)WUoTDxu9i7fDOLDVA8@HFe;UlBd=1N_8s>eTI@s?Ca9m9`PBMYVn>Wj*p0u;w=m zk_4uSH}j#!MrwDoU0~c6(b~q=_S1G2eh5D04!^y8#Fg+qDhxb{FUEY^1)0|C`@OgB z5DG12t1Vr`u!zcD43+Af4U%e4BCU-tU^)JSYhrW7xP6u`E>i*uj?J&ZSFZOxS%oND z%`?Z?dLGbL$cUeR2bxD1H}~di=)R$Soyhpcft6;1x-@eVFVTx$q(PufA^)K0N?>QZ z!yh7BvmwqkBZp7(o2iTe_K}snh12!zc(cPIPk@${Rg*>a`n9)T+ETdeLd1>S0<_*B zjWMIC!TzWvNYh@SJ>mI7I4`F;OxpAddvCIRK9OO-f?(0b-Y~3Xw1^2=KP;~7TdkPC zlYZ|->f&>(*TXfWyGD?q$G6$4mvofv!cnrHu}d5;TM-*7U6L2tcjp@VbB#itSimjJ z7>h}nGmhU!$rjVB_WW$7QF<0tT}#f|MsiC8D~ex;mKiR6BTlB(9&4Hq6EvgJS+BkL+OI`!~lo*3F%WDkoHDMSQ5=*l-z z7*^QTVOdH0y8t>?()XG(*!WXOH z2?~nVbfy&h6FSa-czLMpoICb(hLQ6)EIVJ}`X-b5L={O3$ZOVS4I}IO6*f7OsE$Izu->Z z>`I%bQ*|2;?Y*RXqga~7+SBGOO#!=B;A|w;I|=RyHjhy6d38v=@TFGM)R?B&vkTSm zi;)}y-Ya-Fg3TO+vBsR_gAwqo}5C@?y-M0-qa#bcP)az&;= z62{N&;iw9(`a73Sxwo^%oPk>XNyvu5%39Bz6pVf1YL^g8agS&OEJ7(ge3$wg3yEk& zUYFkWC#5#rsq})9-{FX_8DRDk)_nfO-k7Jg86q=Ig7z0}C+~pJ-O*-`f+`WE&ZF)# zEnMoR62l_CaSG+?;8|#L4i|)W0Q`BKIadK6TvHH=`Ls>wr2mfF0XGc_fi6iPcyQ-z z<$NJ^*NuBg8;@sELNbm=tu!~S^alx7qWJqaVa2l56m}JIchJ%Zhsf3^RL6{YLF5d< zs?tJwm;QYajWI~CZ*lso6h{Ah~1EvK!#*tOOB8To$H4D<99w>ekwb2wIsYkB@#wh^p#|sVL;VEl$~6#@CNYFRn8U9kq)|_vAe| zfFr=a>)uMU!r=tb(}_@>96Xn->bE+c=2|rr6>2Yr9t#T}Che1v)rY4+9vu#`m5n^d zYD2=3zg#-KB84H%E_?dtS;`vw$JNEZ4kWZx%%Jl()rLg21fsC84H$b&k8)%K#G!DR zJiGmsfnNQRz(M+a6rT9Khn8BOJIt4EoyI=Cy;%Uk=D4jo28&Scuk#wvhTYhi)#2Co zOzp@_jR8(AoIYht=8qu0hG`#lGdhH(($a27L!|j4;o~4{8liWk{Vf}9gylM(KTeh- z#m8&G_hi}J0iQ5|kYchURK+g>UmZD-R$X4@S()bRCqiM8>>b&<_z@JFn1gz(UHuZ9 zIl>8YjBf?zUDO{<&WeYgPzGs5!M!f5Ql^QYE+cex)1K$N!KwPaCin0#pw-!SPF|x5 z^}5qG%d@K_<|cIIrI*?4&tsCxm+$&r26*xZ*XjBUDzR>0?T|8?U|{GPbXcEZ+7guy*}5gC z4&tl|Ps0Bg3fD&wlUN)|q5qWKq^eMvq*KlQHdN}TZ!>fOcanZOdb<5AZe)=Op7you zQC|Tuk&hK+!R)Z06I!FBkEJ+?c4ZsR7}+Ru_uNC#Km7<7uJO8XnwUGFW1X` zUs|dGJ<5{n@EQSlEXEvaC_L>30&d(4XiUPiMpCMOS31fKPKNaL{HcHu+O}Zb?IjH; z43QIG_YxTSbX?~uM^IEc&6B&rbW8LSg=^glhL=!wyG|kEi-WAcq^?XIC@|Hg;XT@} z#IbqGb%f;hVx6w=UZeRma}M1P|4IyhGG>yas!0y@Vp*2w#Yv4u+mQ$QWRx!Ru2IjP zFt*y0MK=U<;ainD8NFX3V0RsP8mUX<;aspBn03a_M#Sr;^R?KV&GG+eDWz=EJnP)= z|C|9;*B_X+j$}R5fEgxDg<#(PnNoL4#bh9iG396~;IMVf;TFJ|q$dXVWldN&5uMG=6;fqfHE61 zAHA(4Yej1Ed&^U5*sA^xWGZaGK)>{k8*0#CMM$1dleb9rv#k6KR=Ftsnaqs`Qh}b zRL03pr9mLOPN4e7LXo*k#d`mFML;ZZr?R2#=;ijsKkRbcQ(98C6b>T-Q&u8TudP}a zd{H{Z*G((AN%PajOdoDz?1ZB>Moa~zueqBFR8kj>gawD^umpkhNt4V~<&!hXFuZ85 zep07czC1I+uZ8)CHAP0UoFW0gBhr^zdL>VW`Eg}m@Sh>2lMz<-mkkC@4P|aK5Bvlr+bsNF@5!NQe9a@&Mnz~x~s z729Nu!R@Ee-_b4O%S`hsO^k5mPL!baC?K2Q@Ycf9)RUo|xCdchwMHqS#@|ajlb(`# zN%X;}e}0jv@UcQnoFhKh8}cdcm5cFX)#%$nRZU#uV+n#3vFeP!x02p z0A(IAb%{>deI@0uLLik*eLdP;)8If```o=ehBxX|d3S07@C2TR?7|lpUy+Hlrr`0( zSvk>Lv>}H1l3}9ZHUb<<+DUmdlO11bKvYxl?_Hq~e5bN>jnY#NbMzX~%sBQp5KEt5 zo!{$t%N2m5a+(ZjI2Xu7Dr2P$C!fm34Lx2^?Fpzw$}B`IRVXGKar?;${DM30C5|2^ zHSAXd0OkmgBxe(nFZ=oxON3@uK9pOFb9_G@oJu5U0DGVXEmADkr$o8)W|Z;U6Bu`t zk0Sd`O|lU>^uE4l{90{=50x)+4=Nx}bnUi{NG_A0KagZ~Y7uxZ$|004R$L+WUIXj7 zVohF#8KkM>3Yf$_j%Khk8>4y}A-!ldJX@)y-S{OP!P8Z8ZqMhP0h%(Y_^{D1WhXeF z-sd`Hji_7>Ln`nkv~e2Ga1j(A9Rd7nrKK6dKR z#A#EZ-a}YNEoUuu)*cqg&r8j`-QIhU?vKt&*1Q->;SImmcm#k&kU&C72|c)iC?h61 z#P3qYNb&q}D|vw#0l)Hdsz zn-;sNNAkv}9qtB-;{Asb{X6LW)0F4#QYwP;lHP5vt-WR{-j;o6ytPSq9T|T*cMAP# z=gp`$f#!v=6SB*0>bHABOSyGw!Y(v!Qk?-CT*mZy+1fXZ|8S;`{8_(#JU4u=u-A2!h!QOi0IUNZ=qSn*Hht0s zSAHv_1lQGabi%+fBPe3|wCIBg<&goZK~G>nDAa7}5VN`c{Jw@w+3Gn@jpiKhaTt)( zL7}b>F5o`qru4j3&npCbKGr%-&P%zk2t&*RpBlvVn~K_k>_zM-Y@2vF=-t=@^;BX7 zi^kTs*}PZGb0~W1XC0qB6G{?ZqMBk09f?~&E1kfrR!ud4;`I8LL5a}XQ|JY@Y9kcG z9@+dgCe^8j{2o!^iQY{91X)azzla+R$QSDs-^^i{j{GyL>VG^}8CKCXcJ@bWR=q^q z|7|ERQB@f%3}1k4%^)UdSsXYDl+jgOwiMe*D;T2)ei(iRM*0GO1R7GiS~> z)W3Bex2Kmiy&ij+@3%s{TyynWv7Gi#cI_XhEtt;|z1#4V)md^0Zj_-v4*aYA|4$9DnVif1Nw4Q$-KqVm* zElGbxE-L@dqiIOCu>2-Wg;~3b^74s!^veaoyYE$RGcMsIcWcySKx``Q7K#0ki!PGb z6E%EdZ!SN=%ns${MO#J&cNz^Br?Cv_0q%Bf{y4O^<=r)lEp;ykHQ$@g8y;ghKtO_s zuucOUO*=Q*dn>X#_c_>qniVeBhl)|%c64(iA+r=Q!k;Zukd32l%cekzBf_Xp6LpZm zo+tpSrQIm@($EI$c%#B@dX*VD?3ADcA_BHzQX!oV1HjXZQ${Gl%X5Hwi>%@((vt;h zkm(DbKRWNUPr#?KN=rs}jY(%|@WOJ`4XAE4OM#Yh{LD3kdLy9Ua?50VVTDhbcc)Bi zj@AS0bi%4LgTu>>N9iJ#-i3A^a5T=Dmdas*kHXN*)TjOMmm0p%N2~OTXlbod;T*}S z8sy;{kO?I5&*he9`j+iHS{=cmyi)4h2HA)%2beGfNK+i=hhEjfu!CjJbrbivqOcuw z*;TC7vq;kq4NJZB(B_+mb3k=>VUnyAJMB}Q^d&7YQLtN*>U){E*C-IM4@yE42k$;> zOta5K07_{j*vXEx-Z8S%nfey+t@KW!TJN;^lCri0IgFHkFm8qwoUoJU6~P%#*|l;j z=D?__Ue{*$K%5mzS}vaIp?89{nxlSoh4?-#n>>_a`5Nr^>N9%LaifHMYV>KzC%W&f z=X=1CGPo_B3yKow;N9}xCuLSaS}d-J_pYn6Sqn+?%Gu3-5}`jWgbz)orq0spAGsyVM(z|8k71H8Moz&V)K z7haz+Hm5ria)oYm@pI!%zsC?=vtC$q4rCU_);FuPfUrQej)=xzAJb6@89hbn5`7Tk zN*Dg<9degnSgkO)bNZ0j9W?8*$NM3pGsMXhg{r4raqw!UOk=y)tR%a-*q}|PpcXwM zn(&CHYv`%jTFWdR3B6g}`Izh$V}Z_NsA$?7H$gxwhU1rUc+m&%W0lkr87k)bPvXJdDB$n8o#i%3?SOyx(pK5+v6ejjZpv%$_!4J zZ05O&%wDo3B}JFehv+#*<@RYqUD<%_5OmZL-BHqRgl9WrJ1|mN_PX`<@XXrASI_=& zR877Npk^4N6=^wgkk6D3>Dw!Yo_#aTnHe#%6U^qr)&6$WL$Ha}0Ff!vh*=s{d;yUM zE02U7yFKawc$L<}Hmn<($y0}_UL&{1v*q8L+h)7a-$RD}^|P}pEn9r)ckK}^EI7@5 zN3X+BScPHM>y(NECMR8^LTRh+l@IIvjOH}PW06vafAI%!ox3yppAN%+JrP#%bTB2L zmp8IfcCm$`mm^?gVE7j~!O7W$fP;zoKhy;lW`_T>x^SbtVTUb_=6k08Y#bEWWvvf3 z0PvVb5y^~4{lLKp6I_Oo87Trqg6e&Gb(W*Oqtr%1RU&e1Nl?+L;aYupD^1_2lv-O; zr(@b!<#RF*pBmp9rMlcIrTW-0nW`pLT-oe~CR-}IN^DE@{fxTb91q}>8V4qTama*A zmCIb$z_e+|qdGbqGBC*jS2gJoFQ}WDa5SDVNfPY{Z3AGAqi{r|uNnL!Q&BTiMy9?> zuB=Ib|BpyeNJ0b08XJ|?$AD5{wx7f;Hx;vjDnOfe0CcvM|I~4|Gen8||9v zI9CM$^XJqNg>}akL$n7B^$fC>P#N(BVG`_+GC})HPpapspHaorIv*0z05%d#Q0Js` zm`+m|ZUrnJXpdkmpj6A1G!8X(jShgMne926LCMnNGFUnNZ!p!ajn zys%L(SWz^D;aL$~*OmX;LVN3qvuz*K>-8W zL~L|p8wQiA3GG0y5BT)<^Vu8L^C*fL*4vmCOTh*7G_F+XxO8MVQMO+;2GVh#H-^%A zpEpL*b!9Y0Q~tMSs%s$tH0ZH{v-ABQX?uHfr+x8RW6>uME|kMn?lbH&hEE{;4x`*o z=8a(T$<@kI%`uxom4*YZL5bdHR@vj(!IxP$y&2Q1OP?DaJsExMQeX6xF+BvvMRmY1 z;~yHzng=7709%3B6{->0iJn}%+}U}(;n*=>Lts`~Lx6)2zO_9SjeW^8$X=C8l`XG%e{RJ^{I5Tzez=f83l}zxWLzehJEcq@k3extP#D#u0h-iHU5*&MbJwW~&dqmxb>a9*c*@Ml`&K z4WXf@V0s*4@bIwl3e-N`erv|?<;q`7e0uX}Pg^$dnVv^~kx&a%xiS7;D~^8(lWc1% zI~ks)zXd9@Fk@XECU*Z?Y#bHhK=PLekC1L?t^;Cr@R`Ueus(1W+1HVkuOb2<2{k>A zX&D^9meU~IbP5O8b2DY_BBS<*m7P*BEL*Bjm4vDj#1w=CU@vyi#zTIp=#Zp<>#tJi ztFW8*%bw6GhJOl;3JUonAbFb46Hz8coD=UfFuOO2o%4;d(!%;4I+Ccse<){JdPp|LFPZ*3SIxvmPF8bkaZ1 z)&EwJ(`yEcNKnlZz=aLXDm;o~k|^KRtbWM7q>UKRtS+Ui{LjUZP0wTMriiYD|^p ziypWaZ#}|Gu;o=1bol|cTH{riUGC9T*2C|qLaw@@ZvDEU{HXO{`?d54v4vf`VJoHp zV6_7q6(GB$_oIr}N*2-uh6#1?l{!_8I-+#(ckC=HjNFqVRh!vOyyQ7Yv7KVxQjE9d z;vU#I$x;>{2)Xvoeo@))CX!3+?n!o%$vwIAj?ht@aY1 zG54G?d@}_(mAoL1oO@^AyLW!HYp+|d+baoDp*1@SvY{V3=F_36ogYv>>bk1^^T;`k z6+s^h;iMS(JOt(?IOvVGIeVPTSTv{8kB(H`=Zf=i*N&pU${kn7Ha(%ag*FVS%@g`M zKx|xZlJ+pWPCR_wCRuKOF)UNB0D>R%*3inVQLlyZ=mHJh-j*NRnwCmQ($0>Y|55WH z++{}e_TG%Uh97tb+z!r039$xj&agm83Adle-mcZgT$Rt~TP1&r9&}^%aP^6t(yl49 zdc>Dt8pxe$g0R@*LX+488ftw$5Y zt&bG=C$d-u*i}he?ukxE_*I^YSZh@Cfd`}%YFEZ9xCs~atmpdWz@T8Dx(URTu>y$l z{N^RGHCEGnLW7SiM`=4XHE8+XxvdD>6s>xQD}WMu?C6$73?%tvDtOCb*Dg;!UPBkS zS!){In*ZL)3 z0j>jAqw6_TuM+qkILSyqYf=()THNo5xVy#XBcBoR-2g)!a=3lNf+$PWcdnSU9@tdk zLHs9HERL_3NkB7@QBol>dL#i}S)1+}u}jnTz95It+EoRnek3FY(R5?7m{qY+9qu^_ z)JmEjw2d4J*N&714BZtT51|3Dww%!);TK|F=BbG^T3|cmQKm&W3Y%%#P7d{R{X^x3N=5yBl6%%z zBQw8jwC`ftR8wxc-7Bj%rcW||*9X%E7Y;NSSW=dE8Y1Y1^lK!M;frQC=$mPwf$>_u zQ#Ml&=EKR-Tf%)gSu&1|+XK=nf^hOq?}vdEct38D((Bog+%|!?BvJO!QCzUmrO47g zpb&`SLg8qs|Im~Pi~t#;)!e~mw%~-Fj=Tz()ZWC~%Q+Kd-|7|6KpbhM z=87fK@dyr%eP}<9#qMqdTnPmhk>84YsPb{wB@5aygA!WOndYWp5)C(6mZ%=&FPS9dNEddJ za#;0a3k3gJlJOOr&xIYWeZs^qO%+L&`DvX_X=!}RpsWY~H=L`Y7&AluBHYL5*0tR3 zG zJ+`!OzsWLW9?bDFF0m?NOlUkg6)ur}Me*%@sP)B@z9cqEa#X)|b9ZF~B5^#q{omCp zmi{N6tIk_KQjaE9jv=W(eENNN77p0e?I*fNy-ECS{?WpM<2WqvK<-(iJQgROH-|&s z(RWJ75pIpnR>v*-8WhgksE&o_m~&V4K5Ftk^rOHHqG?Ps)(4xY?x2f98E?Az1N&J= zn)PvCYW%#JOdEJ9g{{Iogr9?uPtx`q0RBhS)=eXd&$zwkYnx`8Sn1$VMpW>P$ppMg zYl7)m{MY2-1UQ5GV{{bPah(ZfN!F;RDBJ>y=0hvb-hNc%SGyCekIE1>->XvEiI_0; zcvlFe#A9YWb@&r(t#ly?@if~iDzOm%!%@-j97P0xv)M>|(K(cYn`2BJWL=20OFPQ# z%TqMPrxh`UJ3mBOix2R~8ZRsKS0Dm+e;QgDb1kI15pvY4O%QNHs8JM88#4CSgkGgP zxn5yy_OSp`CfrTNANPdp@ycW^^YCm1eXhV(dGh5yQiM6<8fHIUC@HuA)xBO)!rF%O z<*J%&L3q5uNO8e~wF567u56rXdAaf&9xeGfJz-JtnQXVv zCM;|p9D2{cpXk#eC!WkaIe%ULyxbfg%>)m=AtPNr9xOhuO_^o)dbmFSSLO&W$8-Bq zNVAWK$->$rbeqd4qGdSWB%wy$A{O#55#Gf*V(RKxli2K~3oF)=u^XYeczZvN&$Tuo z`Sw&iiN1N`WA72)`Q&dgFSx9+f(v10+u7`u3y+tE4;~V_YoYC@^gX|~dSF7jox*^U zIDMGg@Xic2rMIGamJ*5^p@l1bxcEF?w5Uh0C@1N}C_jCa4({hWVP?&o+*rbfXQS$62=<$1>&K%mf?3HT}f;PzID&2Aak2(Lp=TFFSf7h-D-XIk1D`SZ6WG`3r zmvlPPlh|hZ(?%DYAG&7sK_B3l^ygGlH*chxGkaEC?f*+@+fJ|6h_69*fK{-W7 zK_N0RW)3JQ*ypDp%tpir4Cyya`io8%4-lkaDFYBHoMAa&2*|=l0YCu*7$FHfObQAD zAQTjg^v4`TOacH+;2uURn1ELZ9_p76V)JZ|Mh7yw3lPeGtS1dv3WEknNkBOJxq(Y? z1|ue5Bft332dFbH6)zb?yWi|8BP3|gfsx)AJhQXmPZ8l)a`-k(C_RY*kWgQXGcKpfa`_Q z0AGUF{pb8b_$2=_DMz^7@5lF-{eBjQ00FWtWTOCJ{SRIcvG@FQ3k;h#^Y~jHphHLo zWD#h12!!vKueV8{T3T%mn9CRZr@a|e1th-ZRgKjS}+ znNiq40pz}w0J#(Wuf1ScFz+B+2=lUiHYIvoYdyAH)u{#Vds`UE_$FH(@0pP^=3ooA zE`m|%gQ`iNh7gWK=?iP}Tz%zZp0r%k&exCE1~~c}Ldmv=#kWZ+L3@|t#QP}-*Efd& z_KrArc6L1lw}=f{iWG@4u+RURaa zT0=wfXT^r^0~LMHsHEzz*RXFz*2{yS!F{7-;gv{d%)BuJAnf_vkWpPE@RAt3l@ytj z=v9&W9}B!ES&fe2sgO_pqXcoC`C{N8p0@VqK_Yl`^jF6GWhc%%Xqdb77v>I{ifs|S z)ZB!Apm*(Hvl8fN^=m-2*^pMB&fP5rVlrqsQVp4wf7LyVFAKujXS!5#0SgJY^A+PS z?g`0FreV2%O&t;@k_6RnvsD0E`G;__9zMr4=mt!*iX5vQL;uTC%;X&9)pi3qc3B|s zl@YjFYD{?RVt5bzNhd-nmVxj6C+5{t|9L$oCQ1;csLWRB)5JjuC3>49X}dX;Fv0cB z1d$@^T^GlB=7`;os6 zO&KE*?&*AH9-KnI%fy><+gt(D?d{nSlB8{+V04xn)QiQorHv*gamiIl6k(a#*4k>r z`O;Xd4rtJXto82Dq90upZfA{zhj7U4=|{*=A>9Ce1` zFq7MpD>}ONo_QDrTW<1ymy#;HQ04c1g{V{j zD#wy{_$$1dw!rRnGE-VuJ=QyApEMN2t0j$%Rk1%Bh4So z58b~T%iK=^hE2Tpy2;fa`I(mcdN6V53z?rg*&I47L}m3?$Hcz>(J6(9MEMJage{Cl z9_CwWHkvy>3&4XDAS2#Hx0*#ih1*G>al|W<96clXsatT+aX`jUpdA=pyW#hTw_FJu zj#<^#b^FWAAVsn{$sE#cIdut0ja!$v@g}`Zg1U5dt^86Y9nvxOkX<$mz~1Uxo#jGG zG}Q&GvG{R?0jZrriLBXE_*m#RQ@`b<r9||}tNx1AWN-iaqI6?jxwV7vCh#nFrH5*Eg+ins2mDRAzAQKlN%Ik%{ zJ%qv9^)qQc16v^eFBQLw9Cljbka2(H5J9rB@$p9fz1$c8grrZBRVepkRY3D73^4?LF6- z0_`xjV}DlQit@YlwZnX{VpaqZ%fPSh6AbKbF8)y&%Va$x=Op=co!ZkH_YdSCIpQ1p zl!U^Wdda3z5_l@S&p0p6{KrU`$A{Oy@osDMBA3ID>mZJ@j+&2p40MwoM(3CeYW;0# zS)@4Ms4I3|$0li-{ye+gA+QqjJ^iZBkIHq-+)VRhu{N4C@3=$k!OW=qomhvZMUI9i z7@D^g`S-^hlOq3TUwM4_WBtU&mxC-F#{vIK2mE~T5+CZ>nS^~hciD~AT+|8ZvywKY zc+8Ci`d!8+#9rMSZ<8Fx@uNXHr^5`f7P+rqk*!9^_uJaQXzv)I2vaK_Q*&}WWtUH! zg+%hJtG-u{>jLqsyhzAqd$y6SsuFd}z)vIjb9e+5^sHyYU4XoDc5b%0S6Vr9G#{Dia(B*J#5%INX1feDVNHwj!wyML)TFy+GnbPJ z8^-KcA#!qAB|i|~HP^%EQY_0hF4*}w^z$2jUl1uKt~=ACfVs{l45$&N^gA?8My`m< z$zj}H%9G6)nFe!p@1f2)6%k1^j)l3dZRWEId%=-k%=YSIE~fsSt|8tv^PE{+Y;elZ zu%G3&C8;ur7e`i8f{Mfbq#MoF5x!tS%q!nI&1G)XxJL&2u0_Z)5d~*3GDng((nb5& z9}%4X&bIBMTv;57jm2=K!RANkMYhA@jdIu= z!X7zwsf6DKtSO2S+pHywC#MEl_9PWdJXk(bm6%+${Z!)}P#FB)JdV8$V!^hu;%K+j zk{;q*4h(%(1tYyA$~GxWjzi#FfGuY|^UFppiC4OdvfoE4q=>|-4-hYo_J3gJ`x|z0 zWeUf3S%)f3Q58GgNLmjmb5HsuUmvf2Emxls%3HcZDwwJRKhJzNiO~>v5);w>h~Gbm zz3rH<%B-O2bTXPO#knzbxBH=o6D-22h-NJ(cz*jlPY_Sw>gC^OVO?0$up*5Z5H^{o zLc#Ju=^j4C#vs{}KjT;hMsJi8W2eIhiVq2lsZ1zh%rV9};oZ5cG1x<`fQbU2coJ_n z-wR2;tO}TN%7qdhdlHRGc6yshPqrn&TDCg+&6lBj_S7EXV?A5ggygP|Wt%NKIp0Ncz$4;j`C76{ zU%KAoHX2NqAn5925u_-1?_obZ2;My$FLM+cfIb{+#r@2=Zn9Aefx)j&lIP!$>2|<# zFw_(!#ZSIUR#DX#(cYs(&Q?dlBku$1ifOjtvjJs`kZ=y&f6c7HK*?m>C&crwoUYRM zUCw_}G|5zCE_vI>TVxl1Hm!K~x&XV`n0q?Ar=yNGDJ{IITB<2GSBaRkecL%{{k&}* ziBO3vHmYme?<_vC4wS83>wrpVE`CY53Gc&QvTu_>^vA0$+YftQUri3f@KnJf$lRYCplRKu!lG zEi>*)?(HumoLN**Q6N61e)ckE3I3jW66H*?o1~Wm#tDDazZtJ}znMw@F}ULmS$6h} zmtQ`hx&1=E&)_j{y7R^cI$J$-FQXjiN7~rrd!16UHWpfMtxHZAr8wlySkZ%wLmz%5 zS3fIC7L5uo@@!?&{$8SrT;^JYq(_@m|6aXuWVMkV^pF~!)uu7mfYb;^4jh&IuZ5ev zyM31x9p+i9wx_Q>P+JU>58-%$27Y8?i-y)eop&Ef#cHW#3eyaE|3 z1{$=-rsx)Oc8i^mJP(Ss*5BF1b`pVy&kEmYOVEIVk?tR-U#Aw^7VR7TQ8`|Lxp6C0 z#W`FeSQ%~?vWz7>wcUpsO|J6$av;((UxQSoOU4{>jyJ}7w;>r-U;1(qW_WVh-vRQ{ zbb;$fxF_fqL0;9JyPY=+IMwX@^tHF{*6|~mxS$4*IVPRD9swwdj2p56;R>e$edys2 zrta!>P5ee;`k@?tkkB(Q<#dPpU{{)X;F^-D?ZS7ze;w>sNzgMizl7~ z6D|OjrIF9=+VZY-tO|FnS&yOT$AJUCjDFoNjAp?Zr2P~(FS3Bvuy)UZn8HQ6-=Fil z%eU3gn#{*3$YTAK5y@Idfi(i9y>1r_(;_oDBOU2>#(P`f?`3G=d}JT>*g4a%b~HH- zEz~bRnIif&vuQjU(cwHJD!c{iCGkIT#j4UxPMjM={;85SBdIML1L7?+;-Zu+;D_5Z z*V7n*!or#-$L5UJHT*s)b8pZ`CV9fylFnRG9YMvV>%Kd}tdW*j_p@5ujWhC%kuNU~ zs85ls``~ig-)jfC#abKBjMt5GjFMx|OU0yd10O$QuJwK@;jeLU;)>(DZ>dUfQCn!P(Zh#lx zDc@}s-f3^5P`u*#yp_%z4?el6M_pzbBmQ9_5K7LvULLxIxO8#R15zm3o$7v2*5xner8iLyS1TT_lZ4Gj<8{{dKQGR3-EFCL58ZK5u?(LVC=cX-T zliaO8M#2o?udu^-v5wS&u-fE&KH!U*^PGz*x7{G`lqFGJr)pj1U8vm5N60;$Ub~_6 z_AvLkb;fa;zD`CEQw9-iMib8Yj(MqG>>5IsWf7#c4&P0Q-Adlp3}c9$xQ)rEWphj@Gai4l^|MLWf#*L;2&s~?4_!NcB;kSiPYrD@3;7!P*h0)f?#3HZYCF2z zO)s6is(}wW6N}YyQJ*P$PeoZJwdc7W&XhM{yOmRSmM06&p4v@5dnw0o2&l{)o02J& zUj=^4Z!eJsi1>bkRdJ#w-U_eCL#3h5GppC#9SUcpjWm_!f-_>XF9#4sxj zQ}e1w#c*xyL=oAl9|W%nbn79Y`~(o-|AEQ;zTdQGG~ckipV0AZUC~)&^I_il)vgQL zNrUWh6&>zvBL)>tig=>f0E?+oXq=7T^O`YK3Q`%3lmC>?;486lz>{8)so&E`YCtE2 zmJ${1O==Lff$92wTuAM+!nR5z{Uu3V*UXNuzG1uJtv%W#^kuG~uR+i|Y63eD&sxeM zn%V&4>`wvyL;J8kHS9MTPtD&8Z#&$jTNjC$769E<2lSdso!{EeH{4Qf4JTrvYssu9 zRvsK*hvv`-JQj@9>xwbK(f=aSqc=%Y(sAatn!P*jw0!f_%;nvKrkn0daFN6-w&MDa z*t<=O_igJ01q;l-nOJ_>u4{u_G$|?$uD4SeEbEHTTfyyX#&7K{a^yUvm)nk*f_910 zI69RG_ws&ld*1T?0B30XT^oz=5C?AW9o?yQZ_^aUHof%R>z9+$5c|Q)ZO_M@Sa%An z-i`@qU4u%sQ>44Y&oi8b4e)YOqS84H4qY~v>?k=W&1VuYm~)m`8IPHb zpMt3GO34ol}0@<@wf1*3Rjnyukie;<|9$3RpXnp3!Kt+Mpw=%{@gE|RY~oy9O3y>ULkFmu z#plJRY#^_k8qwB>swux4?Jf=h&lF$CVerK-QqvD)HEE32-QJv@`$IU%^fysF;jJ$% zC;!+a428D72)|ODa44i;;^#=>%7<#e-30IL?GpTHRrVmK=q9`>l)F3eGXPmr61IBi zpS0A!VD8Zy8S!Mh*7TC-IK0Fi)94s=ev8b*4IFG8Z_RMC%&Wnmx$aOA;O$z4mgwya z-0tt$did0XSxxXeeQt1qOkqSzdoIO~2e=W#*gAn$ezLyIe7qkc-c{KEeoc!Dv7`4s zw+lN?!e!qkVry@jWMZKhwoPvzX&VN^-{#j}{lZ)0rxMK@TDR4`KU~3(!;=G#R zq|C0fVlf)U5LsyFU?ES}$V@A^%pDJH!*&E_eE8Zg;3SH-bJtQ+S2yM1U%GN-L)mj2P!pqDJ`Gf;$O^|63iaw$)F zSiekpVh`r)yn_U^`bQcS8bihir(pll^Rg!u2LB6^B=H~~6BaUgqh$&FoR)@9dW)Kw zoWix*8_YDB9B+^8XUPZ*PSdUMwJ{Tqf&Q3netX8*3G$^q9Q%Ge-0VTb{vr#luTV#~ z8pr4p0_SFOU$m(ji{oZ0JU6x?|`c zfg<>14k`VfG>Atz(s>Wj>L{m1R>5n@rVcYRNJtvePF*2;*uUy>En=S82rkWS?oO#- zGzLN|DwO@3o6=ZHh|T;~KcS+pwOb*JF+uaE=iFOSC&*uex`Cr^o(s!ydAM|YQdyqS z!{T8#B>ce07q_z>NR>|v2*winez57SKfP z$CaQ}xoNBOFe3Zx9oFt?^uuqz)$>yxj{3n~>s;w-@Zc&5iw)4HABk@#2qah+3l~%i z_3gRQzv!h4?L5iXn@p21>~{VDiF|Ne|2O=}`M=>$Ru0DhvVzCL!tuYZ;IVN2-|=TN zsLJve8XHY6(4qt^fdsRQyQI4`3`0M{zwF?V?v$2LP)bQdNI*-tr%Qwr5y0M6oaH&s z{q(l%x;L!e@6Fxzx}}te=m^9^5CINA_aEFC zw~(!%-&W`akWPSqb}`uLm<4wLj(@;c1J?+81RNLvApg>rs^DH60Jee-15W4w^rE^1 zpjBshj&JzH2bSyCF9y7Kdiuq^rN7e;B$(fuCBVkch^B_P1P}ICJ~gzSNmI#&H5_ zVe2CUIywW&LEKk0A1DNlO>j;p{M=m&m!P4ZLLdL08$w0xPCc3puZ|~70RlVxKcWWp z(Ig<=@Tc)&03d(@1q>DW0M=mvURxdaKTvh{#!$btC%&M?R`K`GpqxS13AF*h1+M$Y z{oDO+3FH|70N((>-+$T;_mJr5>HE=)f&nywZwM8({fmeV|FMIQ+2P4CjBNlWf)tMq z@cr`kmd=@^-O5nWG{=A1mp)ZoV_INaG5OGS(5LF;#9$wQkKRWS5G{`Yu@CwP4H%M$ z5BL}N?|mtJebj&HOMRAW9H;y$|Km)5%lzkxVIANvt`t(32@S^JCwyN%Fh~IV82;{$ zdB(r%`>)|kJ>?I2;I9pj6yE%7Pis~G@-H6k7|8MULpuJr8XRcC7Z&3YsNm1q3gp+h z4$DA0B7Ex4N)>_N7+-E$C#Tz zJ^suX{X?*!gb8#2Cf(kOuU-|1)*zIf|TZC$ zee?cjXaZzTuCES%*aC5p;0G`I3yGD!1NZ#>ePHHCfZ}1o-(+k~Rey>8eToMEU|$Dw zx5F|c#=F%(-gVL=mA)BiVWi<4=r(-LiJh{C+r4y@3`m_-%S7%3`lHO=-IHu?uN(KG zVwU$mYFkqYNBtmFE&rJO+ho#ow<*SScT4$Rk~qk-v4y9FgH7+9F8hnk?lX)))-sQ;LqzDpL0p2KQNkfzo_NQ_vT(9rc##DYF0iRU_|5Z zxPeiP!t7ft8j)|>l|>SL{&?eLM(>DdZ~JJZChTmO539498|aAd*4Hls9>iDnC;G_J z(p?p>f;3+yscfsCx8V&bkwJ z6$;&(_t(S6$(Zq?5e$=-qy+~`a z%SG@zXMx$&yXR2c0E;_`P?mRFZ(G~B!@bJh$q#i0_Sp`$ZoBBgpD)tuM;BeX0`28Lvh?_@kV3Zy1{fn z>feda=6eUYPrrBrLV_-+)ysf5HQnwnAI)*_)hdjHy^?G?;SXAKBN!dl$h)oNIZ}pSJE3 zd)_J*M6YppSw0`m0uCdQki?_QyWDs1OICO6Tl+D6UwN&2F<^3{@6tUMd3xFB9SS2> zy+KM)0+aWu-wk5g<(9kni)8*Fn*1Wqh?3@pVOx&vZMq%o=yJri0VlHBX8-OZvpo5eY_vKXBS zS{YV$^0KAZ`M&Y#@O9t3hKv}Oq{Py_B4|lPSCkH;rd~z^3I+;4{w(9<0%ouej_u3J z!EC$rE^uN**)Qwb+Pu#9ws30QA6Ir$4)g@m*yGcm5kpLOzt)b;+J+5>G-8I#-eeo&l@TVw*%elBiY;QL5Q-jr!H(^8- zY*Y^Q3XxT56~qEF8@SK`b}V*hv_qOF1$gg&{%`5pnMKm=$8~C`ki2z}-c3=6>gj?3 z-j;4{_qc`U#=&BrRN>dM^a`@M$l8nf$^w}6ev>@_Ib(NFg$MAUuC;)$>I{0rNnuM$ zkc{=3>Ii2Xu+o08J4GW~>u4KA$H~SeiCdZ#xz!7JcoREh)i%ZK!vkP=E?>+O9qJ8C zn#>dF?&^_wT~L-;+jS@3ZKh@X>u`-HoRuj68NCUAwQf=J`$p<)EeIcaT}A=_P)EOB z@}%Jvg>kMZ=5RU5%EdsWT^mA?tx_6xrFn9B^VG*N&W_z41?Q*NnclXnt^52|5qx`3 zoeW77J@G5jB|nK-e+FT~*0Y)~BG$E>yqp;tf1#!rs+cW2C9wJTgOP<>e_MC%H+|2Q zz22SCjK{T+J+~1G&F3*9U&B8jW~Stl;LKvs6Kik1!jcf)95rv!)(0BvCM}#6Y@I8d z6IUv1(33(J-xUb(hjHX|BB*OJN7Ntss?gvbQsNM?MO9F$;1Mppy5gYbnRJ^8#q+^5;m|$$tf>MV7xz z#R}~0BLVSp6wWxk#R!^3_>KayG{{nsT(47#xBX`bLT-69Vs+COUVc%|M?R`Zy5g$r zG99?env&?aNnw`2aE}~qJ9jV^ATohu%H3I8oI>nAS0KZg`WQpJO(UbV#3_%R6|QrvYxT5 zBq%=FZ?#JlybQ)~c=If*+f z&JfcvAz+YQgB9>2z1nM*gL<`(W|l;nm?!;z{9~pLdX~uwv)%0Dt!o*rVwGTOuW3Ot zS^poY2rU=`jQ;X&O&K9AG7U6Ls0@DX-r;(FYU`J3>*s~Y$^3Zi;Af61_?%XeM@h9> ze&n>sd^smzhNJz+63JDs+cUR__Wz`;ggyw&|0mQ4&4MI|4>;3AZbieeV9vS0evbK0}Uqm0y+w3ow7fRDE zI1U62?_c`v#|j+FK9atfjJVExfk?a|G8SUmaOt^iJ6#g*TEaVT)IveQDk7QO#Ob{`dKUsDOE1L%19O7~t1 zAvD>nCFe&fX3A;ZZ>x7E0M6b(&TKUBng>9rcmz zuB6uOl27YoeU_aMbTI<@IUz&QpSf%3&ar1x}XckCX1I^{ME!5_63q3$ZvQu3bonotV6j?7PWA4gLxdUHgrEA@^2S;9*N6L}`|b#oQ6Nhy$Kcjn7D zoyPZ=mkXXn9) z_=kk$HXhBaj@KpExh#&y5cLyiHFQ^s=37VOxts2aVK{7C%Rh)lbwI58yW;O8Ui|(+ zg(e4E2^L2Mf_Sba?s24{DIC1p;P(&lY{+Ee>Iw2#A_OvHi3RAbTpGMy z-G-n8Y2!#e6_Ip~cU#I4?#M~re$}&DjxFyqVB?iS6*)f;A4X-<*}phT^i*Z(7L8$k zhnob(+pTfKB^CR0HQ4FAmaYx&ujucnA3-hVGJya#T#Wbx(9>-pNSP0x*Y+PjVCRI$ z<&%FHHj&xzm$?BuGvJ21jQ$3x?wcZv`Z~dKmL&fu@Hci%;J#@?O1Y ztqGikn$Qc{1+`i!A|(1zyA@5dXd8N><331-#fzl1#)~?ZS%pvvzU6~K?lDS6QHuE^ zN1Ax3`;xC*tv-`X5A_(4oip}%nBNI1IZX=W-IkFBp=ObUN{_#X0sJxF3eygD!K)27 zW;Xl-e0TL{59KaD#@y7pp^A)#HE)^Vvms?^$QxIHZB-}Xh!>@vi+3T>>xo9qvMfw3 zhPSM+ZEx-l5}uZaSTVzC|JY>GGa_lB{k7HKY)`}K0K8NtN7f3*S9F*`uPUH9dwESG z><9MXdhD^2_?^{gPUj-85pO6Jwpw+bzt4RiABwt?5(&K`{?tq5A{i{FxKxOQs?6)T(C0?5HD}GUK+a)uxfiJP62mM%iAP-YN9m#?sdvylxpA+n$4iSRc)b*N6Uf+h)Tpfn{toLhQ52oNspc$vazOZ zZ;{V5i4Y)loxD(&p9l@d?>GRJjfhY74#{R;cl7kH$XvK>eFZLjN5qog2NF`Ui+e!0 zjzP!kb-p&9Hz~(59)!W?jSV7aur{}M$wNxK@{;-!NIi!3<^?1DC4CcxU#<*pgLXl8 zjhB65xOjn>zZq$Ua5AT!)!Wh?Bfzn7v}^+y) z=}g81?CO3~OYg>p^rRA?sy0NCnyX}X1?VVqpLKoUCaOO>sCokW@5bPS==}IA5{1m? zcLR4K`$Ne~;T8Us0!T?$32y7gO|Jeqy+82 z$N|ks6x)zjeo@sFF#?Z%10(KP)qocn5IdLbri7Z!3Xq8bK1-uE-y#a{8V#v>8=Eik z%=v0fV%j)HiPW99Sg(@QE)a&yt!mCA`u3smkTdMdQ`=(7Ab-{;8;4!5am~1#)59po z(QF6OERxbYK+omcz^YQUQ_-SgD5b5MYWFAYQkEZzKQVj0Cd)A*PIu!>Fy{&own!=R za_Vf2J7hDqwRi_ETcm2vLQ5DKFacHwYNgi`6FGy*)r1l+z{(ho*?+punonlce0T_9 zkj1H6nAnNDU#XH{)u4JLXt9o@aLD=s>tb2?OHzpL5qYZW+ov%Mb;6cY9mB!6m??2m zZ~KyG_kqav|_bIeg(%5~@{f>qJI!NrxO ziM&zz=#oL4XIh}r>e=ALcf+PB0k$anrQJ86t_Mwd%#=J}W(2T2v&1g^SeUvb552cD z1V~n})O>1T;tLJs$Dp;Yw#V)LC=XpeYtQb<(eUxbid8cOs_#8QAn0c^lH2^{{8Ws^ z)NU*~E)-ZB*Zq42t(;=>2SS=Rjv-$oC8g>xf|#;T^YZQ)O0>l4+k#hzrI#fBgg6PV zh$Fcc(_#`LZ#}bODh_nFJ=9iK82}!w{iYG=y`Y?pVRjwri*!KBgMX=MH2;_TfG$;F zz_cbX)-mHU()umAo%o{@EF&C$7m(KRpq9-Z;DZG&BuVtIS9y#&>`tz1KW$)6K?3HU z=v{*Z==cy#gyAY@Wf(zvyiM!vhb^m3CNtSbnW8f>wyMvVpWn*3|3!6DDNYvhAO+m%LFI3r2<m1;28$Z=gM-s>}Z+*_rvn=2jy0clUNx#15}F9l;sF?-Tej z(P>}fAET=Da82F9y{)IE*%)>dSrrxR>tIVd?t^pcy;!u5!Oiij;60YNvT>@uWYyNH zubm37i%_)h^D!8B^y;$FOc4=^6YHIz(RZHlX-MH92Dxh73#rZO`7_6O)D+A2aM>H-_J21xTd<#*>uPpQ8tyRGfS_iI9;d~ zytxudUB*CxWhi6SoUi_W;YzT52-9Os{mrX@j656 z0VdVlw)XcrytC3)3OZ`o4yv)TPB%eT!qJ!;!;&(ipgMT`C(7<&K@=dmqG;Q;ZQHhO+qP}nwr%6JZQHh$WRwgt zNPeNFXWiQL+$*fh0ni8Qd|vw){%W_XfPNW&2fE*Y)XeRuZ*%x>h&PoTYANErf{LD7 z{8vbYJ=kynLKS!GKpO1stQnAA2al;Pn$=J*Znv1F>gDNa;N50-oPkTc_SqTR=1arm4{dKIGzLk&sOha@tFmc*U9RJ+k{@4892TuOf(_2j1%(RW7AvCwXteY z7NU2W9qWoKj5oot6TY}aE)#3|cck{o zewfp&Uh=VGbcf>U*F^RVTLilW!5yV~bF9@7jJlq}-vbZIYPtQ8S#bcqdu2T!nUo$T zuL(e}u189Q8MlbyZc>(Ll{99#O4?H_b{Cd2V;o_bR;#Kw-#_cF{dwAmvqX{b@v+~# z5WXCwC9zUnme-u)Zjd{MreyXIqtrY0uti6d@$3l{b|xlRq>V_Mkq>|@4A3<(gHtQE z>7UC=jZ2oDqzkKL3~^>ij~#W&aiLqU@h%^c|4dtrL^_O!V3b)%ZgV5`lNtfqz--)1 zv6Fc}<#$_^ZtmOPaT(Mj*i}LHbx7Edy$?Yxc;$$gZabfkI3pqe42DT1t~i*3l>fZIzTH}&IxUMpmE+76$mes0YL*!xVbTc~Tz-+@s+u z)zMZCaS+Iq8EC!;CQmHYbXU<72T#&R#cr-WG1*LA&mT`I@u+(*P!9(gR=RG5AMig; z2js)vaQ&z;+VkrgJS5 zTNsf0f|{mj`8dOB?g5JI26|~6RNQNJi z^5UP-#fXV>Ian$n3E-s!K_0doV&inWV({YKo%w9(<7e*vZq9 zb{v|XbvA}8nvmQ-!ytvu?FlKT9s3{S%g}2Bd5bDyvIZ@JYyOsy zk8~`;_8q9o?8xLM{Sf_Jy6hM^7g_Kiqj-P|3zqDSNU~Os*8<+!k1}-s#XIa_KlQiM z{B+&4fBtY>^jMOio?_&z65z5|R@L{TUWe zs_Z@kdrNE8bUGI0XRyN2*fBB1eAmmBAi&Nrx@x*m#mrdRtkoNq;-y%vR9T3Sq6CRH zv2BGV3m->Q;DWU*FEGyyHvTNFWep=<7mfyP#YsisPi{a-l|pxqxWV%7ZQ9}Kj(@Sm zJ_)N?HY=RKRNi6CsWq%Kmu1WAK-2-v%sqnHFR`+^- zZ{35QH*0SL%K7ZyMYDUSug01kFK)4{qoq?!V&&6@N~56 zRX4rCtE&HUPKy6&s4OfWny^pN&jF3TTK+7nwwik=rd>%u1sMf7jW&|qBjJ6B$2CVy zpJJeQ`e-tfM&-%Wce`KKR`B@l{cESsFDnfv_goZ$T(>&C7(r8n|4{>{93gG<7JGH~ zkS*6o<8Mh~g3`OamfK{!uC1NrwLe&Ly&5bO08v1$zl<;f#qGX~uWyIq1@67MdO>|+F+YoI6vqg?iY1<_OL7r6epRxJ&p*NntDl89T; zaQ7kVUVUp6Rb~8jLwLAkBX65*uTI&XuwW?~{e@H6Cp7Tv!lu)$5TPp|A};=_j27%# z-Ewk#>rmPBB{bMw6%PqoSo!Ww(M z4L<k{)Vks-9exAYY{b2IPJL05Cj*34!J^H`_6 z4Fu3va?uwN5HLMRC|x83V_aiq2kE%c%Qbcipi%9@S+GCmL%q~xrn$_t0IfHo}C zw(L&E`>hh*D-Bu?R#l`3abcF56VFJe728*e1DbP4$}h`LfD1>E1O#f>c1r3!i$Xbm zxDy$r4>pePQ;)<};k)Uht!I0phc>wt@h#cyen6&?)}s3mKCy|$piUprg^rFKfzm=^ z${Jj)LD{sMTqV**i)z3jaEIM4?fp<-MmtfV>3l85hhMc4!8_sn=^17s5YMujRsGdn z>UBV7isD*wOxbw-a7}wH$c=2cnYyQ~1!$Ha3|~tPb#x!eMRfw7LZfdiYss(`?)UH& zW+Z&XawM_hbb13-3#0Amc1QHxVSy^qD6&;~RUyc_mc1of%y@W)uH$!Nep%gK|JNqo zJtg|X}v^uHht7RLYeM*V*x4K@ZQ&i@T*uyL|;{-2SC2RMt)&1M^c*0RMy__=G? z`GWVeoCu~dLk#Bhxk&wjH#8v^WYHS|M3Fdr5xEpZ5X6J*<0EO3dhb%Af^|0(K+-&wT)}01-e!28x6Z6a)waaA1-@aN?~JfN2By zG+Y5KghzmgfT9SFl!Q9IdqyxZmh#!1HUN7FI6zVg%Gn<-oPtAO*D!$rhyZqsTS%7y zmC%4g03i}kaFOqCV$fF~G1d(gDarB42`Pk|ASBTau8HS6pdLdD@c=+nP?xvR4M4vb z*m;0%fxj3s$sqs+=a7OwCrsk21$_fIN&w@4Ap!+ZDCC2{y5J6A#wCC-ENws?HOd3J z#uYsP|Nh)5fIv`zf9V(U4>cmp12+atAc3yVAOt?f5TF5kurT0%^5P&1dlr6xn1(M< zkYPm$8J=}ynBZVr#(=%rV8AM3ZJq+GNq+_m^Cw2Wu>a8F?y0F2JTfdKq{e_hQZ z4Kr|&0N*~?zuyp{D=VoouqqyYFhBI=L`6&32jm4bkO&AU!9W582p9u8Iw%PC^F5yj z8u)#`#cRNZ@E{Yut97UOes-G9@ZlW(5Fz0A^E+JS_L{7yi15RpB|l+w;HZ2mX%0y9f69{1Grty$Tqq=EaO!0^aQ_D4*#stb}h2 z@aX<-Rs@0>trP?}3@~T`y2v4V-qLZyltMj)Z=MY-B98QHKBnxxrR^OsfU3ep2K{&q z859Nf{XZCpf;o)yh$vuK{ksb=V1JtHpn!r}zDU^&6&V56cW|$TLpZIcrBL?4hbk7~ zE`PG80SE;_kYXVL4a{Hw)`@o_{cvd{PzX$-H;7>d8QqHfkHb7Vx{X6#vq3V3{OkRz zv7lbUgd$GO6A&&K%GC{T&RC&f^nkL7J*ca57a~u66^krpH_ZLwwB9$YlI^0{+~{v% z%+F_1#bu`7neY8L@n8IwbFq7J>hj`8l`gc^c~B@YzWugu08HD27rA31%|+^!aY!zh z6E`&WwH!(_W_7}%-y&@Kx2d`=MQr&~wm~}pq~s=;&l!A14r^H+r}1s-!$7J=CHK1yt23U(g~eZDmJufUx|W};wIk!t#=(B z0dQTFZiC5C(8`O>E0sL8h_H1Vr9sY^;$q()<12@L+e!cts!kcN=?mA zwbr3y8{gxFMQhqd?{c>OnUV*F&@P$mBLU;*Q%7A4X6lCD$fDETl><^-a8kUmIJju! zs_};1Jwp3=?}Ase7)uJ~_*nUE@hu~JMwt8!iST0PSga=YewmS1N(w(e^M*p!M`3iS zIiqcu&Lk(KGoV@|5L!g#q%3|& zb1qGVQZ>5InFwW>E|LTV#G{YF+f!WKktVGDCj`9v@LQrtU35sWFH=B&LBPx1WX0^T z`}xLI9vlAk7@b}_^6tgevE16dghem!2v2Dfwo(GNF&w#{90gfWD=qcAv@nkLYf$@= zWDfM6|Nd;_|>^KuD9aYmm4|kaIJWLwL1gS#y zLFSkZ!n2DoA0&#A?pWHVsOl5QU5R;z*EGOn!K3J zN*A(T8@helYe6YaMgEBxXu(T6CBn5^3)d4w#(Q8wlSoq_5 z?Dp8|?zVXuG-vPxgRo~x(MDfS0~AZeR#UA*#3rFXJT=Tx?Ho>SwApGq%9r)!a_lwN z9t?%Qc0svpQB(J>&kW9E+<>EFiYa62l? z>`P4p^<`7fX)~qtH4E5^Z*C`{IZ@#*KT@VKSnwN6c+3mS<@FDX_>{Q9^T9uTvFYk9 z=-1F!o^Iq(8YnHlX-0Ny$8)stJG07gv6Ah+{+cn=zx|JUqXP3q7Jq(k-t1z>%C>FE zow{wSDC#?zj1KY(n%e0iJP6c=IG3=z=F30`bZetg%ikX z@=Bm6d0SbhgfQf=Nd)hwgU`&pTRk*nVi zo|FdO^u@-xN3GbGfvL*$$20`9$P_J*-VSVaik(&1nTjdz*)FD*FMjG1^BBMhQ~dpJ zX)lRX@~IZ>+4Z|-56Kb8Z3h}OjfdA+x-79XK3QI&wor|kCU(bBJ3M?%C5eG0B(^LC z+8jf}9id2TYc-yY8nJBqpaGJx^=ei9)3GZx%OGx;4X6@izEWK;4MR*ZWXcFU(@4rm z1sXIjBDCh1RY*M((&f0;=1}>ZRAnuwgIEXhVryM+9hNVB*L$ykfjO*o3j|hgxV2vfJJg=)VaA;o+-)gh)<>IiP zJec|qx=$Wv=!B+y7eB=|qn@1Y5EQkn`=fiC5EJT$!FSmPyN&j_CUC29`Dw+m(_qmO zHzsITvJ{V$oWirh!}>X$R~c;?{Cn%U-2ak|1av>m&$>qRzEOjHZ$mE_sSh~eoCw;6 zPFC`yHL%sLDK^YI`lTtHE^qWcc)5ON)GpBtLHXny>6as;4 z53kl$`2KFNZe!7sFY2iotxQ!N9uQ=`!IY`?Oy7L2d^7Q_Z2WXk-@=HeTL4M7b=|#| ztcc;xeM0$`S!ciGtX}wFp6nyl`UbcCRDP-kfTk$cv01Bh$5HCgXSDxdplIbO8tE_oJv!+{^bGsd!5t!! zQ%DCij>NvqmOCI_*ztfq%j~iUiGJy?`K$C%+cM)qNeE z;!=Ak1o9@#b6VQ;c{tG2N1gA$bdT4Ij=#I`OP4}vzUFgOh4jJI94Yq9CDv*vv*#M^ zLZ5#_?BY}6c?R+%Nt`b|FS=_)InSJcYAPm$LCpRW;l;h#s3l!xiuCh&eEodf4h?~V z!y|1xF4#mip>CA4p>Fr9+ygO&DcN)I_1#3D;|Yn6ml{QTL1clJ9Wczn@tk;ajc%N; zFu<=qUWO~l)IZnJ{Gc1Pq- zCi)!0Lu?6yzn4987Tm7vUhqYubdSyuO1-PnH-*?twmKmFl7+M_!aN*bv`77U`iC)+HJ4}#t< zLgTnZ9)`W(4B}{{vHk$P%Z)uxc@5WT ze&h9Ydbg%szvPEZZ^cT{MWy!g8S%r{uuXi&7INLdU307JwGSA>5pLrkKv3G^_T5sp z)CE6`D;3-csnF$RGxE^!ZO;YUIVtbpQ1^~vAq07gE$BPhmOv5e_V4fSWE*`X3FmO1 zdbsj1X|df!XHDkSb~?-Xdf~@n2P~x}>o!Sb(iizNdD30(B{}k)o;&Bf>LWoQxsg3e zx~p?!nhX8`v8!eD{>UKcejb?o+eXcj1#3 zIN|#j45JAQqn@0LTUpQ2OtaJXf1XR69sYGriyz~3qB+OC03x{H)b{NPCly?`t~%Li zqKIq{CP?&JzfH@NW_ne0yhW+pMNM)|3&vC!FCVX`7zJHmz?o5P4z+U8OvCvm9t zm!no7aVK=6eAzqFNAI#2{c`-2+RMM49ENt|)M^ii?8h#y+?(3KyE$U zaWd+!)tFFcz4<*BFg+On;%(e7sGh(m|I7Ia`bmXB{TWNx(CNhG_C$n$?<|!7_2s&l zWjSI&1Fv@r;4-H7T|GPGu_Wz2tIlKx-_hkhw-Qj1Q~#(!c|O)vOmIL!=Caj~y|R(L zQ1q&%hQEc*Q|`&{T&3AS3FT>B+NMS49D!|bY1NLzx*U?&{^aGvt`46~QDYNzX~1g5 zE4u87dFJ^}vQW6GHD*2Y@|1rE$?Kz_XYfl>tp1#N*NLx{b!H(7s&DdL>_MiIqXe%x zmg6Q3*kEKZv0jnp<~Wje+dE}jLy(9G-Rk*%72Pz;^7588%n{J7`aNk@b#^7wR*)Lj z%Ki+{Z&p7K2u%GDVZ){5Uto4yX?nZ3oO}`|dv>Z5f9FHSq*d{n!l`1iCFBMEiAwY+ ze<}yH%5wIKbB%Ysq|I_<4Xbf43QB>1i?#4cC%=JghrE+7Eg#hb+iU0y|4>x}oZ;EG z>etdWYKLSX_(u#qs1MObOKO=u#*UYpGjjsZWCdz`7lw9T>JU}DV6tA0XVtfu1^0#)M}C-5n$6>}&bKD1 z@53M?efMyU!PoO$_S_*?cO__%tl!TuO{a%ls4#a%DWxI;P^K<+7czH=a;7&R=E+Vn)ANzJK|;#Z@RnhyKg#_K>wM`JgT*G6py_45*18B&%G({;}GXi>eHD;h~lg1cOF znWhrLvxLTRRTu2D{sP&8u9=0~N|F!tVN*C4mM4g&TLZG6Z>m@$J*`|Q(WU*fKk2M> zW>gt{Yr%VwmvJm^fLWgZXW}r_rIhC*XC$liQS7d=qYCG7b{(6XmuaH47~O_*2aYm| zQQlbm>-44XU%mvj)&==_T6u)e-Jxm^sS^5SpvADEz1G4x`A+9-`|3#WZ;Pk=zody@ zuMsj50f8hf{d3ueGB;cn3VFLaZvzMOrm_Pt=8wE1usBnh$QwiSgO#(kT&XMEgD>PW z;)@@jj$?0&(Yz?E=*I0$iY{{f)U{erIaEZ_-d~&27j1HVB=d+z za4_16@)jeUr^cfhd*!uMJE0 zdl5O^oK1HcUezas>+iV^H0Zx4kmACCOX2Ov5{_c{+Xq%voY+Z}Tkrd&%^xjrF!c#h zX8dcYx(iznPnWHD@oIph4OVo~BJuFenznL2?hap+kTtF97iVd1rpX_iCh(c4(~fGw zH}cf5cVP8n2$z0;UDnRt5e3VvoJ;kdr82@XYLvQ9Qjp@~)u)xvsD3Bmgu`#w^e_Gl z@)VZ2D0t+oOCIK4h>hB9|3?Mjj8r^wC`ug`gDj}1%mMGqe5#nYH66 zB0SX);HxgqJml4w*rE0wNXQ?0Hdw6m`Y^y}=@mA2d8gYA;}0*G(s^cMp+Qw>x#gnF z*DxrrHXUC>CqGE0cApuOZn$E$<9Z~j_Fv=nFS_j*^?C^3ex>u(bi4o$()>Ml#TL|h zaONk^au&Qjl=^rdzXY)Y19brIjl;&qXNYQZ5U*(3X8V}wW9fwcR%#ofiTDGb9GknZ zXKG9I%6IoN=8*5K7i2BV=d(Hx$=E3`%gLVoKP}P&NTqw?4u_@Z40$c9ex@EPY3ajk z+JJM7ygVB_nCerOy14j9^{)Vz0;c69g=wRec+QHpP3R`WIgT_ShgN*FJZxy z9{Fd<3DtuGACKQxP;J~puy`VwRP!ba&VB4ptQCvl;yTYDDy_7odrbUxTVfaeMf{vK zj3Ilo?w8!4B#R4Qhu({ZiS*#Lyr&2y%g??g!m0?!ZN8m;V#Q6tbNz6vhuggYJw6^jmRXe zi#bnDC&Sy@7^XwJanp70H!4runlI&N_VDiSs3M5MSkzUFmO`CQ>32p=YLYaA6oX#4 zh1OBqEdCFjBptJ&sl#=DT~&D?&YwM-#>51I+=OSb#$~faaP&Vh9?{yJ$aMPFWe)v! zwyDa`CHI&F=vXhM^cuIP`WMb0vtWxAI58iN+tH)_rnPRIS=7;=4KL4yY2`NZf!yWq zxs_}6d?r4`BMT(HaA-NicZfyBqfwgoowz~7%Y<68d|?^=ZqQm!SPs_SUjPyLT92md zO$K&0n^l?#9N(HO=DSJ7;}cmpsDLJ3s#gohQRm_H?aua%Ojg&njY zW}!Rd%h$~U1HG1MXDUx9jAUbpkUn)seDvlspDw~}99L^lQ&T%4(_O5sIfZL`lhW42 ztY7HdM)LXf>xb?mlcKCA<*+5g=wxkhyzVG+s&Ja4DrF;=6U1@3G#jiLqou==@fqK; zn3psZqCSEoE7)uiiX(G^G})G$3ss>-aHFPSs>uWhL-sFt>498tT=@0Q3P13vOQ9hD+jPhL zzke_NKc+iYPA0bhb-LqV|G(c?yMZe#?XA)REmpH2CIAF2Cjb-{FM8jJ5|RKCkpLob zK}gjvM!O&=R4SFIm!e-lNhx0xNT5^d)4c88`R`?W+A}48&u&`ZHLr16|B?0RfQ=GG zImGY^!3K{;1_Tml1~k<*mj0&=Mgk8KFhc3+A(O~~KBSi!v<`C^BVx4J_!Cud6Blsw zkO7SvesW>72uKTu2B3ihke&=GH4!8TzFMJoV$5g7vP5(q#8iyct9r!v;@ z-A;^?7@C2?aIU#I(2#2l0&GG>ad^uz$A!Fd%T>KlL;7 zs|peMn+p><$Uvb^u?ZgO2Esw0ivZxuic2tyO%4-KbkHB6fKCIJ5C0NEq+9UnF?2Uu z2*4^T41hv^U(Z7`je8L_4Adx*u8$<@pE{V|M1@rsBUp%Ffs=Nq?oTD*4g%wU%~#}a zw~872AoTU4%R#W<5Ux+Ok5^YzW>Jz2E~I=y4+^jN4el(m4}g#mQ&JLw0&oB(;4z#d z;-AX?;3)bH7vw+a0Sf3>hoK$-tOr5_a1k)@r|@oAm&WdRN&a4O&YuKcM! zoacX+|958t0r0CS4Ij)-3pDVDAHNkqK!EWJ{rz|Iyhr)R9{gAJ`1knk&t7tRaQMhQ z{m1?5A5utB5%?pKlVK+>o1!HjTFl;V363aNBd5LaNck9{t(1@q>r!BL+c-o z@P6@e)>mbL0rqP>{f{6CprV_5IUFd&JsAT8=pOj!3^DHh*VZI}0wH$rKpDUz77zeD zhI^#mdSpvzc?CBzJwBZBR}t&<;5IJ5VGnNL?yLUg&QXk@2azEv(-KC8?%2;&V#+a< zC_8=pkzry;N!z5y)Exw5?2T@2PBQ#C^0Om53Ag9_(jaT_>&(!2>DMMmKVH1Yma7?W zw%qFY5JIJ!pZX@^`0a1~%0)yc+_;XjFI6h~Tjq~;Fd=w#Hf#OUEa~IfvO*>O-D+fV zshRD%Ds`F+0F;8SnbD(D1aVv|)H)aHBqDF1v>1Vy2Xa0pCmB6_s`_?GA z4)HUH9*^A_O+J1U2!DbfqYitS{Ncr73A+B4gm7(E%e6@ti<_l;y?=w{O6;&mp4j-DK4TsO4l{e*b%Fk#ISpMv4lI>W__ez9c#rj-<9T;btZ6L7afux@1 z_*A{9mwpA?!m8QQ^NUoXqke>zJ-9SOr1L6QHAH%2YrTU@r_FU&aab2yn7g~WmWf~4 zQ(8VL#>oks79#36{pQsdn3Rf2CZyPFORNsP^4Yzu`dQnQ77(sUn}fWE4QV%D0kVh3 z6BKCZAyCkd_g!4wWoe9qBbX48#<9JA(zEFOsooP1f2zv| zq%QtZ=k(#ehP3L$vn&Is_rlnffg*nCNxYz6o80~UQaAmStPZ{CN68!MW_b=4E#xD> zN;1oN-@JGLO1=#eLK3sa78m2az7eK}tqa!0<=8;0uxZsuO_P~qn-Ak;{lV_k=qgj8 zFOtC+L?cD|%sFBg{=y_Bo#yX1lkkyysNPv)f}FEie3mUFv{1QywVg2?2dsNj>1Y-! zoNfCT*77g^jp7Ku)3BUkg*M;^ckf1uljMb=E^PCo#<2WMeboWgMkk?8?3po-%CTyu zVpOfE&j-_BxR)w3wxLc!O^XONDXd^@gX(_HJEX4)Iz8&(NVLV6`pBpDpz_A!=4%*p z{0viezq1_%p2t-X9`A?J8*RKDnRs~HVrjhHUxB|hiA7v=ljL%fZL5^ujxK>5F@Bwf zjx;4XpKgaHmZ_NUFPCaEFzPFdTqO1W7NTN=(BXuXc}scizho;EH%rl~78}c-M+(BL z5W9%p4%&huh$)`YN5Z72TQAy0Fdstzb7%G&+rF~}&^*_@k5!5uJVes|hu@cC87Yz- zSN!Pw(+=;-d>ECZ!uSs8Mx0D65k`hf)phAJwC#QQSykZ7+PmONUIzmk&7T;uTd_M` zshcD1D`~~*vBg(9e7ffPJ9VF!q~+Reqm!js_7NasO3=eRY>C+lKC(zw>LiobXf=|rt%I9p_xGSnwh9Yres&7q`i#ov}LhbnSV2#}v z7oQqa$T{?Mo+eMyKZ8N~)0%uC3?)#)4ay$WbAF8J-JSfJ*>}Y=BF9Q0CDY-rh19tio0ha;ju58$INUt3)U>H*G$iT za~&^ZgnmrD(2J&HFhihv!&N>tWa&|i)H_*OZ%Gnq@0lBMtE`z7eDxnU9>3ZO& z^>tR~GG20QIAQp&ZrM9V&6eC6@ZiW>0TEM`q99E26E-o=AzT;A;wXXjlU;GV=yBq?>>iJ8Wf3nZeb zY@(WUI{0S(l~pU#z}`I= zTi^S0n6JE^3JbdWF$z;I>W(;7vNW|a=)QwMm=^gQgp{LTCg%(uKO|PLaOq5A$$r7j*W;qfT z6exL?!okEfwOQMr$vYA&LxyXpo~*lPh`m9>RV3N?Q1&o7#Fw>DKLbcutw+8rd&>Qa z7}ietW2FT4Evx_NdPvv~D9v~5DEW^XK6Z3zeDV_))wJ)(w{p%Js+Lo^oW7{|>V^s; zO6V1uR6!fUXG!-^KSgfc3WJ}881ai{iMnw>^$OmOMjMUOk#)!N_PNwvHUc6s8@bt? zUhTtqlH4@*c4l=*jB7TW;hk*Ea-ww4rC{S)YEGh>yb(P_h~lLz9|;H`%xC`;UG5+o zr+ru%Bg%X4`A)Jmf3R_b&vDj#Uc54Q&|Ouw1Lr2clSlm1z!L7!EfF-$!*|HDsvGD5 zy5_z$D!au7IIVYuO-f7Q4gyNAIXuQ!x*sD>+1wcA==q&fihIrQ(N0MkG65ad%dA?A zWf-ULX(=K;c`=W3wx? zjZy`sS!b{NC?k+A`;2v)1EQB<%*`XI1?E<;SyEUSM)VzdyA8<|?6?j8%%Y2j2~ z)*JpsgSZd_+{YwzkoLco%y(3=QI`8Cy7WKeNFY#fZdK(&lVprDlg40{Izp>>T<*c@ z=PRE%#?b@kwQg;!FUN#}n!+J^*wL`OuF7h4<>G+`Y_kcr@vZ5hvEvuUIn>j~5R3M$ zjL>52dhZ5)=&7XYFXzje#hjNiJrSD%&Bu8{eGv7Q1hlH5Hbn|C#>~!aX!<6c4*U!4 z=3|Q7pn*@b&kjCWHveW$ju$bGkjtT+w#67NG&}FYg4C=WK@^^}jo}L3t3c@L`af5qOMZIw`15|aKEAq zSoYK`d1edpYAZJSF220-j&==?RiK!ivzJNOotT;pG_L}_Pou(02342r-cuO(DvH2*ZsY0Hoq}fK~1YV&?mUE z%4h0C-1;y5IyoGf+-YhqNxLX!U81a$gmRd;I70MNO8)9KeG2F{JXcGYsFROWv6YAR zJB$3+i+GFMES7KcvF4g%PjU$rX(@Ve1=4XkHS*c@U%=}acn^j1w{lr0Y(-OSP+m%Q zHpnta|GX6TqtlVG?;>%|RaGL`8R*l6nw^Yf{FTYwv}?@=Xc~+U(uI+2H)EdHt^Z^` zpE&|uZE7HeEGxxr!H)qw$}ch>KZ2oS;7^5*YBuTN2RSRg^`evPME3L;_C)n=tqe&q zpnd-foaZhwzcVbUHG2z3JBxz&*{m%R`gsdHoNwinup&-*j|5|3!QM&^jtf!&3J&`lOtj#=lIo>_5s8% zAw*EN-_A3nf1;#zuOjBp4FBm9t3IEKkGprB?j@zHcY@m#lc~So_Blnd*+_ZHs-vm= zFJiw+;7NCUNc#tJx6|5TV)?nv2B4lXRowR@_|n2FL249@{qUIRcy^y?OMUZN$N4OL zltA`Hmm26)Yg$8HRlQe&ySHhIlZ~P2b#QKVIhk}bAIu=)+Z31FuqWf^UX<858d+tu&rq#&{5}*Bb-1Uq1|`@?0#fSKUK{H#mXRwbbn$$$sP( zBcU%ga=svxqbuTv`S*VncarhD510;v$I}}<&|_90(s}#2$SDQ>T4QUoY`CJ)wY}DkZCUboxxgBt$bJLTrQ^HU@o;XS zK{HhYVm%d-LnMe6&DzwN*!~HQUZOyR;o$eqAcQ~0$<&ze)w=8-DIduES*7o*j?MnaAEAzEphh5jIbY}cjn{;YKHLAbWQQ;npbB+Bkw%D;@90D^PM{$s%R8#eQEWU-#&IElC3|0OVB21O>hR;1<9*2~- zH!tt8=qCa!L!zS8s#}mwb-QMG19HmftRcAb zblqXTQXua&$tExa4NKlxyJ`iG78RQwf}704wHoMBy!B;D90 zN=yT6VoAF8#giunWhoQV<*TjgU9v_06iQ4qAq+@pdO;IalaR zq}Ba=Si~RuD3l86-A%B@PF{RCSj4}}cIS4i##&`U82Y8ktbcDKf7gVzy{{O70|m1N z>wb%oIrW`C&sw)+##+PO_hU)P`lPnJvwz5LpR~5b%rL|`KOd{mlYywi_0FNsp8ma% z;Oc=!+NjW5Pb`B7{zJyHZY%_=o1u&*P}{pMdY>}}zRezu=JFisSQj{j&v)mYte9() zR6IBf-%UxOOIJ25T7#$>y|7{1uMWlMI=07%bhFD$T8nM-)0)vKZh6}I^x2vzKHAta zdJMo?9qhzfV|sOK4Gg>_&uN#(u1MR~UWp0g`NxjZcv)7kMZLQd9Y#DHl z)oX~Zq^mo|%Is;n?YWXqAmSKp@SwjKYye;$UNrg9ZOk;=t#2Z3JyNOA|(OEalsF;T5s&>KfdnuH1u&a?8`+c^_Pj*DXjiJwoa4zD+*C zr=l9~b#UNKq8cQhUZdYVr{lJx@UGD|LqlKpWQ59XA$4iDofwGRkL&zMuF#%EIn6D@R(;vKSqEv;>E*{v-(;6=4r7<;OTQ0H89@$5-%gS1 zg!OA0CE9M}Js_v3p>UEqCtwz8$mOn7zd7=Wf6w*Q{QV9d>LTGB0|)!m@Fjn~omodc zwGn+lbTBwqiGM#Sj@mvmMs6*X`}l|e`Q3Hd%~OSyABs)i%H1+@cQrRTtr|LX60%Xs z!gxWweSWrNU~J;#^FUk7yi*(EpWY+mUIYzLfaLAYD?e15cI2`416S3ue$XTu2I8F> zfBhcv23Ub$eYKW^bzIavPJUMnjr z+y8k`a|KmN+54oUjU!k@nw;O+sVxa{;hKhF_y;o$vy-c|NFYTaK}v6p3naJb^GS|+Q4+lcd8mfU{e5(;J{K(kRO1ep!wBU38Ek% zkRdN105LFW1t82V_%{TgMofVODm+m7n;y)B1SMq5fQo>1R2~}u%(|Tu5Hutp$Ve&Z zNC^m7kPr}X+>e7OL0JGbjtd58^#sU@;6MR2P!;3wAT)G+y^vx1`vQ3k;0y#xN;=}{ z7Y>Y?TPT5n0RnCnJpZa-J{@O}fPBvg1QT4;`Qk zhgSd&V++3=UQ{6AP66^>ZMaXI!2B$4?gOA!5XcMj0`4EvAnF~Epg;iT5ENMGfI>C| z3l~Tfm@yL=ztS4OMW^tNZ(hp}$1~7x4vcb){O#V=ztE2~xZfWvXuvSE0SfXKGU;1Xu|;4PYn);PjLw-m;v_J77j#gNN0h9 z4|V!nE&8i($xvPc6;45%lUwM(0(D=_hl+s&HHy=X{+B<&HH3)gklzojje%qIRSg-D z4c$+Ob9xD;s{U~m6bSG)cwJyoP!Uj)kr6?Ha0wOS6?F6AU&?;(6!^;q^&7;9`~J~E zpcBxRK~~_$piOZKdM_BbT`0h^2zz<<)DQco0|){dh=73s+Y-1rxF7ImGB%*F#cw@* zpU5Cz&Tn6ogbH;OHn>u*bi29%SG@h43z4!?leDnDVVmdz$g9+W4gpp@XO1v%r*LF{FQ*61c?|b|70-b%l_5=dH{-LWGFBh zd4hq(LO6S2YZNNGXCCKQ z^HF4U!^4{)>yv8G^qC4)<_UZO^Zl!0?wCrK^YSRp8GZ*N8B9&Ig?t&>+kc6KGfO)# z%dp{ejz$KKV^Xn=EaiGrr;eEUr>A^o?(U_Kf746X6$sSOSml)Y9@3X7dCd{#=*sD``JJRnl!WR>LT3)b&mdm|5hXT6zjw@LlQXB z)jp;Dd(DNjgZ+|=a7mN`(>V(?s!G~z*yyOEcC{Oi)_0mO69P(-czDG0_CTx<^b*A< z#n$dISu)kQb`BL*gZ6PhI6hqQtqN|yI2-k_-dV#CL&qi8MRvcHU zzm3BHty$sZ$K+9Fzd7cMbQ7rT-eQxZgz;+)#uW`MwUyqykhiAnfp{N3_yGrCegZ^)Op zkHq7u+gSVg-N(Q<{v1QTeOGHF0b0Ayrv_P051|w0!4AI#zT>Lu0lM8pnS{s_GEWMu&&c>)UO*A_353nu(lo@E2Y3uWK zyMU+3*H-TIcch3<0@r>c8!6SyP+MeaNiYkk^fYg718T~~x5Hr4SO*=Hi1A#bh!yPW z1Sar)l?2flN?(9|S;Y@mdw`95EiEaZ$t+OZ4M~Xa405-C5-CMbESHEM3LhAS`-BW~ zOBu)lTumCS@;D470P_*#P|{RUM(Fz-`6<&f zC?`#$>H;{(fM6VAmd(Nro|E8)7lN|{cP=v6uN4h9G zpU#^dcHK9;)^gTi#Vzj(Z({iuoQRUk8At=DWunQR)&?6n%oaCpuCp377XaNE5(z#T zTPCIwpjW?aigVMQjnqj|2{0kOR#qfqO1+jNJ^^b2mXJx7=gQ++`#`JV8y#Gfx?HV- z*!gtUT&t8KN7^(-ky~oj<4eYCCiK3V<}za+`5@7P-$qc~%dTnIAGWwXMi?-fzqch@ zr&tJRITE?iFQ(z#_es0GjyU%~Z=aafy_edHFr+ils*)5a>+^Yxux{Y)u=ISfe4sr( z+#JaS`<^C*Nj5Dmkp-rv_-#se2VvPx+1K#BU5{r+FUsoA`@qqILDC(tiS#Gg$x$= zdTde&84*99QG>SVrAEZODRCMG#~?PA$WToQSUQ=X=6_}0Tx-B$j!pttLDQpb^%{=( z@i!j=jD&wpF)qKrHJ1xgXH!ETSbmBN!l0gX-{rb*9Pahqw0$E!VAc+qMOj{DEBg-aPq5G9P^aD&6h)bpaL5~F>QzO-MlkljS_Fg>^dQMiKG|+&sG3CurWWd@bkhStVlN(~ z9lCLLDJgYse(rX^0wVv&03)wZzakPF?%vTgjE&Lu@L&$B^LVeM6%M*oCIaJ3c{l~# zcD-$c9UeNWMAiohLN~z zhglPn&fco7T6&8vKLzdFRS8yM+Edz)*{c?M?BWHFBoFtNxmZq=$00KbkU`IQRdUgD z+>Sl#CLVj?jc~Me3Y|aylOMeG4k!}21QwbS%YTzJWvUkb*Y|e4YUyW@1+yoP(ivha zD|V(178BgtT;@B@6$2SDU8)Cqh0auFaf}P$a?}w3`ZO??$`#Av7>m&i0Xw;=o-4#P zEVI|!R~#qog@=6ceA|t#o-#kv=uV6!*gWqbaRkqJa^M+P;?#W9EJ{1UXbq}_rL5kW z4#<+1>+$yPthD;H=we3NL>+a9%rId}mx~LMhQ{A^;#-Y8F6Wmno4v^*{db7`@4cw! zGa-@A+*#uw!aqBP5oxS{YbC#Pyd=7P{RO z(4zz$E@nm+!<(i3MYn|61|f^6K&{&)X!f(C0W*2|F~mp~HUQn9M3?dTn5(;nMF$mU z&LhvQ{MouFgYF{!q8L!h_ViH)aueo{@QBSsR+{=FG8a2FyNRmAxWZ>Ez*s|$^(RK& z<|O;95r)xP2MT9)9UGdsd->rN^p9>5athU*=co&hJ_T zarmVwH!qE8jTU;xhD^xF8qu8;HOQ<&?s-Iv;SH7km8X0=R<#!v{Wmgpq zwL~X>n#lRPSQKf+s8G3aSn=cw#Ivf8x26kGK1Y{t^9)=wc9_2pKn)u1eSYt%D+bbL z<#RteY8RkBmjIgQV;}>N`YWk~-wgSU9yvE;_*`qW;n{4_-Ca@e6)-6}qildef)FX`eH|3BP#5EcWpV#BIj_>%LS^5+y^hlB%<`tY zkk3D9#n|7FRI20|IWIE^S<8GjmrJ^DVBdyU1ZsS^mAq411Xnk$M!+>B=Rz|0oT0a? zOUL_Ge6Xm}6&XB*D*D@qgbx`v--psCdcmdt77eT+99(n7Zhcl9fV9SL7#DRLrWokN3*aS9@H{4J|ZAZ%X)B=yS9_s@SZ!WULQry>Vy3tDz+iE2KH$ z{rK`2;H;)m$}5;IHg!I!e0=G2QomFZrwxEyx`!sIWhc=NOvi5?B)vjN{9aya@7YW` z1RFjyg~K3D7&&`cS(&e|7k5-H@*I15Iv=-u6SDsay+@cbQls7%%UkVt>S-wRqv_T^(VbyGY$7^ zI+LuWUZz>&Zd1JO#+{uy`S!LIN~`13+N18J4>6qiK%RZKs|zJ5ofX*TH&!hM0LK^N zDz|P=hBjoC?0+n8gBY4EfYmC1gccJvJ8mOn@MJboY6y}if`Wep8MIw`ib~VdH&4mA z|CxDai7|pE*vO_PHhkuIUuykh`I$oCu8#;!<$2AqwYmm-Xapv7HgXD14ws+A-XU}{ z7O8c2vxB&Grr0VJVwdM*RQ_5pMOnaYB0NS5KzjtNj9a#O7fasmC7<@^%g`{HZQCZH z8BC9xD%;G}+Bmm&Lw)B+pdTl*XzsI;1}q?9yQM@V(VlWi%uZoJUB9EIBs6+1JStb4 zSgHsDQAY}6XkU8j61FD%sgCX=ZwSM*+#l);5bEB63CpZ#*&P8sc5iJz=Bf7v!g|mlP7D^39riN`NB(MQyu_%)qYo#q>Be~^sMt43^mUqP zcfWf-I7(H}y6I+1?!xHpW+^ELNO;+fx%zn5aP5G{-jM3}`C%-=NvF_ZQ7*m;K;}*r zcoN|u(^JZA?@pV#C`u-s+n2pYt?-J#x*6|rcL;aYU)Gew7GmVcbwZ=pqyjUOXWmRW z?WM!MljJ-JRh-aht>4&QP`s*y(JPZ3_qX3=e)eb`x1oFluLas}#7O$P9F+f{_QKdX=>O7&}&3;#2WWm;ndD0qv&4hkq%dN z;obIQ=rJwQ_m9>Vj*9mfedOQ^gD%qQLBRZOB$LcY;n&`{9S>*I+eP$M%Hl{zX%@bR z-Eq6Gd4P^y|Dqo=<7SkSO)6`6nD8Cc)(UP{v_Uqmbxoh4UPRP8$~U6Du+);p9NMst z!uLBSwRUx1=qz-f25A8BO!dLszHHR-FH>2~C;K6|OcJyFsA&2KZhbgA=Ru@S!+&z$ zV^FmuCUjL`%-u3Q$D&(2PtXOctxNPca=SU zD4W^E_b=u@H~9UiGfpVxH>EUY;z#}+^;m>#Gbn+^RkXQ25nO)S(IZy7&MzSiSeh`i z#x)E|9hL-SrArkVDjf=z_c0>bod%%*uRFjLi;FMzGi?lvowkZSyLBA=+VyipeK#@w zNVS?%jsb(K16pg4dsxtYUi6VUleA*8&`|Yu#wb{ExLM$o z{aCpJjxeHrLOy(oGy5d}QoBUXYyNbk4aOHSVy^*9;eW<+s1Y3YVzS9tpzO$ZW8WQb z<~{>$6kd>fuNuPBrPaD%JRBlKs6~?%cWV#%fy^P5dh7LK94ZQCrj@?N+BOlF^!Fs| z7gOt!as+T2Nh~3f0Esp`24VN7TA!1C-PUMx8H_#s+`=6E>5*Jq@-yphy~ea^Lu)^) zAX0W!G4b&pP*x-vJF+$NYZ&weIyq^|bcYe{Z3;)ST~Fm&O|&e_BQ(BUE}EbT6#>5h zU>}kAjfU;~Evj=uKoSFOTFhdf@^&fkD_5(Vuj*!TAkO7}oJBSCKwTh&@3R+-#34Vh z+A4@u{0h&yyVT0HgJ&<@OzwGq6glnyL{>l6&HrgiYV5IY$8L2qt%oRX=S_+Pay@fn zvs9OU#L-VV#?qP7HusojUb06~&dK*VO~!}7}ayt8S}9*kk#XP>qc>T=@xf2L6U(X<`Znj$16Xf znI+|dM5pIIc3iTX&PIj0z}cR2;Nb?~cT?sRt~3YvwQGnRt~fq7uvrN3s<~q>B3NDs zFSguL@jd%Ri()CcE#-y`VOl~v8e$4R!NJShe@rrpmF0e~;57mQ z8=kh*n9ZJB&*A>EGN*E=_Tk66pc;GMNGWpe2^HQ#_A;G`N4dv6q5UQKtgJRY@S)uw zJ)@D;rsYs)u2Fg?AIlW}S#EsA->B_0xpp79HbX5Z9l8A~eg0D`292>MQN*1#=B1Vn z8Mqqw#6dM9bwyfTKPL`?ZkQ=4U>4*%W=EbIR4dv>pztHTp_#je1-*S1_D&UNoEg)K zpX8ujMc#nToQfhUN0!CCJE37ZuMpzL-k7qu6x(>b(z&E1SSX=8hml;kfpzOu4fbERJJ<)UOz@X!&eD)5dT1B0kv)Ohrg;%(?hG1JczcBpS@kp#KZfTQLUvZ-6nL6u-SF-^^;x1O}?c6~=k8$;B874*yAQ`~btR>ERFWz9a*{MkGn5lw=3&;h$cI5_e9?Sn7G zDtB{)5Mmr~;(|m4=u-_!(_fIyZ5?(nNKL(BgC2^zy(0N(ufB7dW-Do?OXNITQwmoShlqX8oEzUL2*YR`%epO6Q$mUWVIkLmidfVVsM>n#v zlqOWDC5q$UK1caTq)cTb-DIQZ+_elN^sZ&d*g{3+>}x$$vH+ zy)C)!Lg64UG2ngk@Yz`IEVJeg*M%~Rgic*n1rpR(&y~!;r_OBnDluU;wV_Pb$v}b^)wG2^mex_`Z(p$mNx`0Za+&K(mS`^ zh~{19%nOV9O=n<8W~PuK7upb}u|7JdhU?Hjxy!x!@#|!eL9(TZE}zxcpBlt@K;3Va zZE1rKTdw#DHF6ldX3i8VQ>tjQPc z&j!S7ZR?ZgO7$QG?mARq)btB}%2groziq$l|7)z4o#lVTYMIzL|JPV8Gc(KoBUY<{ zY=g}vLlq<;84M$tim?+*0I`V6G#W6tKFY>L0&-zjBFt5q0w&s(f@2JNPUfqUQ9|SvuRTLCp>;d{7kOV*~%Zm&NfI#sN90Ur< z+2LH6kzrh)fiavABs7d@Kk4tJAS5V&K|=;JWbo6HXdl289Ya6>f`EXEl7I{n1PCNh zAhI79kwP+nX&luGc>Wl;f_NW+g)tln=pry^bHlh{?fVCP0JaYtAt3?b=;ObBK@BP> zFhIa5fE-#E&S_u|5@ZO#1A+-P;Q19Dq_vG7Bq06x*xBBG7m%~y1DTY2#e*M!jcNox z7n(;%q5cPUUu`geoPc~RZ|d_82f){x;uF3uykVpxPyqn|EkJM(fddQs0H{5%0APIz z(8~%7fR+U0cTDpr9vFVVeis1b9pqQ|P5qgkh~nFw0Rl#hgHX@`o?;u$06J6)d>3LaKWFcE_V9}DVBmFSNi z%O)Lv$TkH*P7Z+sO!k|a-;@FhXtZr7_;1bx_W**uV@)4in__$PRUMIyPQnkEV|fF# zsO&o(8ja`|ygAqafJgvRQ4x^<;1DLDBlzaSf8olTfUrLnsNcW_luu6r1j!(p7U&ST zHPpaAqIaG^9ReWODe&Fnr~P1$+z1db02U<%P(%2-z~Rt;y^-PH>l`Mx56UvG0oXq) zc>sX#mybDq=$alI1c!_<{@ebusrsLF4f%(MV1CrUv9gK?2XK!EC@BCBQjvdvKtMx9 z0E~=`0dxJ9ErRp?ivYjK)nM(S03^RA(Y8x_nBBjm0Jr^VM|k^tI`Rk_D?$LjrN7+AfAROszrn%_R0{49v1};7ZGXg>{2hb! zs=2(}2Ph>xf@|1&v+bwm}AKXM1maA;E}j{jV|#A}9btg#vj0 z1Zr|XL`4AffYw7>0z3ZHm;&<|AGR8G%!Gcgbfh1r~LK0$x0 zB@NI~proI)r`F+qeSd$L1PN{t7#^NMfM{N}3r{_tBvXNkvG3jnt(~E33mM7oghUu} zk~j2TvDTJU#&S?=Z6+^1lUvK9g2l;uFz@X1VLG$jd!F-Uy;4F4J>TsV9mCnn-(32K z;he_61`XJ0(r~&Y9*EV#Gp8qG24bjpEe?q!YLqQXjX3|BPES4LMoI!pn$BHyntoGLG@%8%r(`j1L zzdEXwT6i*|e%s2qtj+v&;CWrIrC-2U;L3}}6O|l|rgcAo$_-{UeW)8Zo*uT}?2Wxc zbS|&+C_a)cKH!$SmxqQHML&EH9yeUJX<*-Tk6LxAAW0?b1b4ibVZpE&sd`e?d!?Ry zPyLcApqolLic!4ccKlvdVr#~|c{~%f{FHP1d>!YrC~St}rH#<+9WuOH`+aA(T)(6ta;28T4wZyy-fq31 zg3HrGRE;8u;2w6Yd|9jJ%7_Y@W8WZ5lX=qIpK{?iBa zm@Z%`5bSNCp*cxOKKtFj*v7YVga-(Rjo~WZnac#j26m{bdYe``kD$CA&qp%0ToiqP zZ!zlvUC|Nf^@_90OHZDDZnb%}(BX)aUnpxqA`wi`YOMZGShLgK%8vZMPpbokw5~i2yulDM(-ZCC4 zw6uQFpPa4c59XT?;(OuY>&YHZQ>`A$;!Aog9jSSbvL|aTo86BrGem7BE+@{;QQTbjNb2->fC+O@uoO(o?Y*%YPzNO%HTCR0g8|4u3 z>BL>3zVeu_x{+;GW#xPwvZ=cDsP@eWTm&WS)qXinAo+BgNPmP%HURpuDw(;m?=fOi zUwxSQ9WDgbn&L2X$IJZEsEV^4|MGCW*G5#-qm~Y5)TAkG25)}k%PhP3=ff`GwY2Fa zt&;szKsaJ%f&} zogU79zT0F0B}1Mb?2F0=d@Mfy+~i~(po(Le*e!{&QsmHjbRY-3N`CP&7v^T1hNVr~`dqR^AI*W#J*JBY+EhFe~B-l4Mb zlrP6(tRRGsnM$v>pE{`Sg;{lDo>Xqrw&F4Ou#r_ccd8Cx%C!v>_lMa~6#279>hB&V z7|bFORWMirmYP(LS6-*P?997;f#mRlX$xD_Pgi+PYuCan-EgR=yw871IeA0>@S)Fm z2^YUDPeKdXW9^D?{ntv5sv+g9!C;#`h zM<+K6SI;5CpwD^x-)089!F}?>qNPP)rFv5MNpve_XmmIV#^`qZ{H74MDcv-^s>h-k z?-PgIJtYQl(xXCXOEh=Y`()S98<8_#haYOJD3|m~rdP4POEbTzT$LQkRhP5nn~G1c zo&UVi>=>*UukBg!?Pm5H3|XHw#iIo4qJ4}vs9zmfGx{%4H(v3nsJHXeZgy^j388cN zYbp09(VRZs{R8#c%LN*2F&Z1MmNuWr>(R{24lc%HJ}+?{^E^f+lcC32MJ3K;ma;)w z@#5LB+f`-zd5wmFLFo1AfL_4oR$dFN@=W~%2nT^X zX$&_q(%7y0MBu#*VOa{AN3swv{t6N4@zr>%JO=lntMu~CCM~Wbe({@C#GT#w*HGEp z;v$@$4`;LSepRa5n|!&!+RzNFjkUsGPZFiy-01>IGlD;tWMo8_iW2#Yu#?%d?2_b=`CQWPqpm!=v>9F^6ZymOYUtaX10qq?wjtm1(aCdGY17hjXm%XUBB+s~tY4fD#pS~;{29*+$l{KSNN|Ra zE%0zj(oF+CzD-NV$-TT$g!wJ(hF>2gFr~B7yy~F|*=T{WNopL6+r%0>vGl9SL3QjWSr#2%ZmL>(bBXf#q#P za?yWYZ41IF`Z1BiWe^RItlex>Hb)O#icko}@%2Qdp=1~Ct75T=8kJ5~g)!ksWw+7( z*p5O2x&PMtnPla=d!@eJh7f*)$ajI?H$~aSVPJE(Rd9;xQI&<+h+UTpN|^(X?TP zvc=!dVsVduMwE!ls@)_zQwCTk`{jg=&Xo)rsKL5^*vx$+Qs5O1vl9LQ=;4A@ukG#H z(Tb$a=3!j+VmvAL-{zMh$W8`s8xa!|-DdFlTIHbLCm4pz#OB!vYe3D&@Uzs#;MZc| z%wu<*Bfb^han^zgMVzAqf0go)TD)YAvzNtKopkdqygf;l$;Gb6XqgnIjz>d7gl;P5q?R z#KLQR#$~%kVm0H;)=kKEs}Re?MZFf-%d#m#-n^aqv9Nt+ zQ%YyoGMUVqrKKGwRgYELB%CTAjlN?uFjO+VNp&Sn)GNMk^85R&Q>`DJq{Y46R(1n# zD%OMbd*j`yG7tPvlh6D53~O$L?7YO;0^8TIidB&r-R1A0z?Q>$JSSoDppNbwCVmEE ztX4%|LJtf>L2=`I>xR^8s{0_j*t*fl@ER?KMA3hNk1eac?7eNLX0Q9@yBgaPIL+T5 zKHfstjg3Qj8USwTzCQ2Kt}kd+sYbQ^mVbzXWJf1zR@qh8BPq*{2$_7mYJSICHrxn( zKD)lpfb22M5P@ zVE?(j4%gm?0c}uQ=P)rm9;`NG_2C}fqgx|C;g{k3N13?9RTrzC4f5V_N|O>76EmP@ z9X@;YRH}O}B67zYR$TGfRa!@y=f{Js^0oXPY8A4vaNZXBIA`-(z~1fAEm_*zr`{EJ2x?`ufe)80pI`=w7-9EXv6g^o_R-pceUEdDA+d72u%ro%gZ;=asy%QSDVqWI(2zr;ZyHl$}ws z-scSkk^GJ!UhdOUI*D|8A+~4~{NMTZ<1~*>k*(!z>+^eu!XE2Y*`mT*gndUO`tC;x zN|H^q2}izRl{}mL@uJcawiwB3kIgJ5xjHC1-?6{oX^uOLL-b_tZ>J=Ip$;aG%vckr zKQjhyTL!%yOEn#D6eW+2o?g(QG$Zy$tXO%fMC`XiWFv5_=qT^ZLAFy2*3eS>l)S?E z>lkWL5`Sjowp3PQW{*eKEl2x$xrR6v@@MS3L)g7a-UJ@Mh`9V^;B_v%&hZ-cE<8}Z zWmg1W_OlU?>@I=9WpASXz7VTRy=Yz5BWzI|W z)iT87Hl|2F0&DP6OT4EGi-D4A%>M{bq6lC>Pc&F@M8n zNnt>g87uL@o46$U3OpRef6C{qlly;qQ>A9o%mn?i3rr*tjlBN50=}VwbU2QwYjlyb z=<;pm@Kg+l#N=1d@s}bi8n60E&lO^d@zy&pNR9U?-0b7Ajcr|BJr5DyhYOM_dp8K_ zuKCdg&*wEEMzvKp_ZU%dlB$KJ;9blVqQt&B(CA~)3kuqU_$n-J*Ip%hX0Q2_e5{;a zZP1_+IcI{zd}rzZ`9zfe?15I?74$`r;ca@hcdOhf;6zTR+TNRQtFTxTdNY%0G~F0= zkGKdV-QjM>#df-UxP>eScxHcBj7h{<^;PtQJQ+{5ax0iU@4Tgi-T=|t(azHA_GQZ> zixBKWZ?2B4$;<0aSH`bovkxC{qjN|3!7Z~#^!dL$ffte1F>Mek&u(9h(GsL&xvlRd z4O7+}YokTo9xUn;RAj^7RxYO+l^MLmDfNsn)5sB?KC#8WLEVj-|B}rn$;uAbAvpiASPk(T@v zW%f1Mj`IFWy-ut{GuZker&2+bfie<050wY_-jUEY#iMP$^pbgNYCH*sQr^Ucl2;t+ z<50U9yF}(i;WmB`jLOP33uprqd$-@Mr%wsh@#tRDFk$BDU%B`;R!qx2=1^qy8EIX$ zdFzE(L=YK{7SI>AyO5v@*8AwVU2~>v+{GCu|-E<8f6o_qDNeHM`*2oRZ_jir} zHaER3_*&)W6A)&0p@ejM$E!52*MeT-@PlP7E7h_Wbp$rWCqw)OJ;FygpG@Ipa5^Y5 zGow9qZGG;-4k*0`4^opPw=J=@N?n!&iMBZz`?JLD>F|T8o5^Akzq}{;J=c3aSwBsa@>UY#-8&=z+Ra1VR}ab0Uw{K_9B1!uPWxD+7w)KSm6q zG1%iD7j8|F$bcwZEc}q(xN_OvxYcCkSD)mpDA&(YDo`L z7r6+lsmMc*pVJTU2e-d^V^ z4Czol1qMo(OdC=s=0TX>fZ8mf5yzR(`iuMiBt%H%X4N_-y0+dDo(2clh2LhzLApPZoff8Y;7eJzW8CJwTwV&dE9xG1|RN)^Y9%Nk9xj*i`LS`($ZG# zS3qc)^8A*P>Q-F7l)}56_{7L1QnJ35fgBxvvryN165>>!dOB*1{M6>R*HO|aiWCB_26JLX!zH77a6-5+SSZ_+=RNgdBidW?&RJ4A8F{YMDV^0%)zR*T2E z?shY;@+_dHbnAY}m-(DADY(Wy@5reej>!5V2|9VJ6A!az13R(xNP%~cQP@vE35>o} zY8o50Ha640y4|8|FOkoE@IddGuzH?d8(SJ5-!b}=$;kN18vUkPi5Mw?E3BNxp6Mh* zi;e(;`iuc?;09lroc7cOB;m#BbfU%>a8ZTndj07UxOs-8f!mQaqq>&P;=LM~`mA~A zX@mnwpvj>Zj8n;sv`69)tPrzW)L&3l3}3?sZCqJ`6TBBHC@*kyiA3c+=ojK^>Dpmy3M3>577hn zvdFSSd*ckgq=V#ka95Rq#kj9S?2h3LV{GX(7xeAXDwAtCPFvj}w@z4uI)Nw0)43Is zWc$4(Z)zw1={YfQRJgu9?cTfE9)C}AI-a$L_oL@V?fAi%7_ow+ zkr{b<=ZLVrN+6W6i&|vA#Yr>|3h+MFk;b-{47&*cI9c2V)g3<717gN;2QOjGkyUlL z6|rs?7=DPkEEP`4l?^>V?Z{cH^)|Yeo$>E&wqX@dk6UQUo%hBa+_PX8TL+CxJ{%oZL-gfk&0R)#*}#SggEl50a3UZac@?OAJE!@)NnX zwV|ML@NFvlmbo=Lq*z9XzWVXYItBtBhFfHQ3qvxlS``DwwfFNQSvq41`kT7m3_aHo zaK~DLV~cz_)4JCR?*uxt=+GDbg2Z zLD7JfJH3pS$o-r6s~<=)uI`p&$?9y1N12&_-g~irPVMENFc5t9pVLCp59b3f7#S(j z$Lcf1_!5GJ%z}HS9;)R02a3jjkqDfWP5q7nPI6h(oQ}^#;ZusP9~>!+Gui7ApMAN( zA>J0@HAO20G}+`!yRTyuJz%^;I108EEyFP^EQq z>mAQ)u%4I%7OR&Mo7{QgUP|{(sxP0z*1~7{vHV~h$M_L`;C*!!_WR&f@2M1XIxeZ~ z5%pqcuaN)g>78b@Jj`@e8CDpK8tjcmKIgzbz+02qGcf;NyS!*K8kv{es=nO zBkv2)y!*AY|M84_eaS6V`|#4&uVbq(UQmho@zgx-q5#qBT+-6g!1=;LQ@B91dap3Z zk2~8Rf9d`7w3pT}XHa8(dyK?E6Pp`;>a}UMPG@Wle>HL z7MEe2)z(CMcjVI^y1rQsYC@21%6^2M4gLFg_B%RRNx3J-Ui;EM5PTOJ$=Mw@XF2JV z!DWaOz|<)w@)x(pOq|QVqAg2ZSk-NRfnScS9P$;X|C$1J$rd4xUG${Z$gc9fYeov- z4tM@sRn30v?_tSBhww)+DBx~XQTrWFw>Sz4GH@P=){Z_$lkVc=;LigVtx%=PzNK9L zT5*rGBL240r+EWY=T??{M(`-uAJcvH9ePY$q55`jV}?{Lo6LRAOKrEdziH~)yRlG- z;i@vXzm#})W3IXje63Xrql7J&O6tnbD*{i0#7r!8SHZJ;SKx}qpz{Zsh5ZMdZ4Epj z%x>PQ#dmMF!*Tc2JBMv~e{iTcdXQ7+d1UZ~p*P$`GJEY{YbS7;Fv2D$pU|a`eCr~6 z(Q9+5u5$((?)QM+gGsW7Dtz-oMaBZTEW{E&-b;yPgH=%eGPl>a4r5bl$6O!^qN|<# zZSg4yvfYPzS*aTx{gt(D#1~NN?0T3`oq9yLtu;Mlxq!yGKtSIvXT#BOZ8JE3_abFE-dV}8RTSE?zn@hPi*uR;0YHtz%cdBUYFr5SNu&*lO@ z>0%pJz<8UA$iqxU8RS@BT?o>n+Ek&R|4L(Y{BS#crXWn^SvWBxxfh+2136`h?XS`hLM z?$~b7@crA{O_o?tEbAcdL0XN3eGqs#2Md~x?r;LZAn^9Fw_NYL&ZC~M-)(olYsS~A zDpqT?87=GU8G+);VM5az8_+cOFab^vj!sd4%U3xyyj!bT1XVcOhFb9bk@5K10fHki zFrk}V!X^O<4G;r>Hn8y^!2ou90G^?tp>r4jOmlR5c5iKNiU3Xdt$@(j*q{8gejxz0 z-M&NtH+av<|eh;CWfcbu>?ydd~z*#o>#MN1X_yxUiVEq^8h@Qy&+K)cmA1NS%d4D{>% zuBSOJz`t6rg6Qbtx4rJ%zqn6&xK{=DhGJN9uDR^=9D;LcJ8f?Wt~LGOB9KN$uzQEU zVY{1y$NauGgZ6&xf8T4<4Lu0}THL}oy@0C**e-Og=I7E<08f8%rnCO}<9}e0f3!e< z+Q&bC$Zz_rpZ4MJe{Vd0)Ga;X#l>9n7p4Hej)MStb{OUXRQvVZqlo9P{-MCR*SNiZ z;jn*L768BC6Mx`wNGR`%Fq9ED(FEH$+kXt3-BVlMfwinRg#_FIu=5Y4%Klp~1gi$@ z;@%MNq6_`mko)@wr@!d=b-BFfxX1OYPk+OpVH$tbALxm{WwWLAh2*8BKp=WZP!IR-fAAZ>VFw>)TCfnV z&2K;%Jt@;OPwoeQ58uwFuk;K~2no$E`preaph7kJqgVYf{+)t&c6RhtzrKh1P``68 zd%1UH82*ij<;`$n#p(=@I!C#bfhm`qM(ARv$}y7iTokdH278g z4s0>eJgpvcxE(xp)XldrLlHQcU5f)_A1J>rG&`Uj)-kq0Hr#|dr=%H};b4t@j#&HmR#5P8n8|N- z^%4lVFuoJ;MS1D=CYX1Y`+V5I8aX;e ztObns&EFPP?!*jR4MIkLF`FjTWDQ-&^tY;AH>ktypq43tGvd(SFr`IT$=5ra)}^W- zk>9TtvCh+wA+*El#a>zy=3s@Wx+%ShT>T2J+6SP&LE(X+QJ!^D;;2^G?B88ZNSbDy zeN`m83N=lYdQHWl)&W1?@4CStvijP<10h>>d+6iqGYFrMAp2x%CQOUUd!^;P?d`0f zZvM@!1xawSBnpOz5E*6sorme5F_H&?(8D2TW44x9M6=r!wgQoV)=p;JO`|)XQaXVcRHTr^FE$|pu|<6W z_yo0xpiTTcmT>wbttHbsZa`LQWYnUwUWjd<5mv7oHfH`172sy|qtU$j)+bV>DY$+W zhf0NBT??I5q#pzBZaAwv<9sT|pQ6q>-1X?H$|Hzes?x@0p>>8aZ*86h8PT#LrWNax zRv3wPMJAAMr+opX8^LW{)K1EhsdB7i>17y4<76i(gaSq!{^qIE8>A8VqL1RTlIjnrB^(bxtW^8w3D(rD?Y7he-T$oUMHy>J<`cOQ0E&Q1d~G(CCgXotTnZ@ zaA-z>UtgL_xksanMILgfzN$c1^y?ZkW6*dX1|v?4GS}6)%&G7HM{) z?Tq|>W&6kB`@9W&SVmS#3B{`6P3^I1OR7 z7O!#FM)jA$s;SP3VEsEFWP``%Z(xs7&ATWWlgT`vsSdLAZoH0wI<-v`dtS#hF)fo^ z>gwblzGIU6D$K{Mhhp*r$09P|O+<;{+_svpt)C5%p&I^^075{$zXfe%Pl^x1phRCY2Jj(YA7S+fq2Q<|QJFM!wGPadJzV7(GjA6kDd) z%Bbq$z+w|kUr(z=m)|u7hMsBdVEcS|5TOy9Y;>GU0u5Yyi-#WF$ICjc5v@FiQLyUn zPLBGQp~&cm$Nw5+ulLWm24l~fte8_i%o241-IbK^yxMvwzA$w;-vR=#GR4rb;e$9Vc`$KK^S20FJI0Qy zu>+%+(ydj}vs%BqdH;Wj++q;Ts~Hcw*iH4G<@NW zE4YNIZ?a}4+=UMp4>f_8^}8Qi-s|I+)ps;#peBz`m*;4ZFl zoBznf{a&j}Sr;W+5Hm_`iKdqvA>_kky=nlXIbZcng@Cbr0MI?fi6)_sZ(aw2*R+5Uo? zv*~xi@+kE=MZs`rD*Nwq;-dZ?G`ydcB0~(7L!dU8-jB?ivx|8~~>mj}gx|`Gs`HXArakm~k|2!*dCA z5ZI7n{DUfrC$Y_^kbkZ>V1A#!F15&cg~Jcx;@$(YP!s?6pWys8nz$-!AS4p>jbbX^ztZE=1fB zBnRk6;GWsNKNK#64U618ZQZXm`)$-}4TPTByy?aCu<-Nn?e1CN{rSDNkBW`n4gSx_ zUo~*oOyUgply{qGF%l$nw*(iBsP~NvdcgIJY&+CizhD3N)lx0~C>mq?J-le5_G)FV zk7pQmnimi5tpt{1ozLav8^TkPczyW>+|RqA;Ki*vDMu3#ixfg}-M37R!nE|@q0|ke z8_}4&qdP?2^HZQU*XDTWco&k!MIcPonu}#I4>fUla$L&#Yy}P0$ z+TRiJGa#oS2TLtN?h~()#|Jq3U-2huu)kL2I|m_RrGfJ|J2&iEHdn~>lkcyOMl2Hn3Q~|j?>AN zX3#MltQ;r9(19D&2(%r{-t`FcXo;)7J0r|?RX%d9KQgH(B!|FeWU!NiY6d>llO6D_ zp3~t!LkAJT@d`me2?``e1*rf91Oh2o2~)5j*!c1)zQL=q4`HE@>`; zO0=&GnsLIKwYVJZc@v(0`yym*Opji!3`z85Ib)i(g6=+A)brlez?UN^`rb9sN`77e zTB$qBVwl*3-{5FT9}Co|dR{1}6O2n*f$X?8{W2XbZGw>%b?KZw;m0%QAtGz5YhGpj z4|#tx=FVg2Y>}F(ouR1WiBAJJI^`Z~kcA|14EJzi&EU~)(#$iI_XT1>a}D>YI` zFz%cK@Op{G(-UM4Q^0om$rLScLR~(9e2b0mIGst&5M!g@m}^y8y_?U=VspUT?Ui{v zT67a;{pHvjrH@$Pl%8-y-oAU=n|p-X<&)X5R|$hS2$esc=;^XEG#-`g*JwG1N|c-}-C%UW-Cb-UnsS8AfNVfv_%Gy)J;o(hs9XELpx|6*e~ zIV^2F(8d72$p?V1?fiPAXo{}YQb&+(DlP?zYWBWAw)gIP^d#L)5FmZ&bby6r?g=GJGYnmI}RGEX+WnfvyLVm>Am0BTelREtdvPMGYXkn2~oNe zdynU)#1Rh~v({fVMK9+60vb?Qkz%N;Ks}Kfb#Bq{GS6gOL2IbClbwPw%k|swbh#*( z^I&sjqb9tR#y-ndN1CZxEFnniUQ0|0q#lk8z-)eAC_1AD7O-idgYJ}y^nI@x?v%Gk=j3H{cXqF8> zLh;r3LWa>9yIY#eqaLPNuRX6PWuAFCK^4oV-VKMUv&CNbQbS*mq{6Gsqis998Dy6m zYRD21FdtmAaNsFzRxVWcZlmPNfJWT0SZPupk5@eo2evOH*dt)0#OB>9r{wsu>d zW%hQ$iryi<-C}<0%{*hE(4VOgd5C#7eWqPqUCg(2DK4SP8^X5p=A7Srge1yqf=`%>l4_cAI##I;b^rV)Mqlmnf z+Z!IUgldr_EkcWNEPvy>g?*V#G;xyOoxYd59LiXOcna)CZYsqt<Q>;_bNwMM?t`y)bBo6&3Ik^brqrQyrLr^OQ5Aq(q*hRlryzY5iqqS!)~KGQ@5kv8uu>m-0mW(}e)fyt>_#qQ`O z4HwK;DZb5JOgVU)j7mFx44A<T^quP6Wtjk*vzkkSCSUrIvsW9h_7ArDo*)#Y)M|b*OG32b zvG5To`_xWu?;V_)^Gl&{>-I(I8;=q%W!7cnJ7%m{p{CUkhmyv ziot5B>UxG!*tR3 zhA9*wE@`K4@no)n^gFKbPHA<_I&+&#LLe7KLFtlS<(;-fN{pO|<@;nHy^xWd)zn#x zqf9!cl|4oZIhLo%Zi9Rp>p7o5{j)ZQVMDoIFaY+t@R+v4F8UGa+0GUg?<@cxoq5pW zZOWN$@nnS?rZ1$~^toWC%%z4G^Qw35C}bNQa%pO!)mG$})CHhxTh|t z`vO3_aj$AFYo?qy_vZewLvj|KzaH!Rpb+aMnG{s==Q-=SkWF8NZ_@EDeC;DC`tW}R znMJpH#AIvP8!ZdPsz^Kl@avqp30sQ@?XGMQhtIHE{LsaU`DD@xslK*KnQ;#`OPRX! zf_A{+%vR|nB>SZdGO6JVI+FaqdQY*^F`T#WcOz#l3aiypH`}Lb(B0*laUbfYt5t%7 zJs#RgkxP2eAt_?b8PgkljW|1!Ee$C7uRQJ_0%WLfB0#61_H@F%!pIIdhAnIF9hrH-j z&Z8%eO&`I29JW&xOjG_FyP*5v-CPsz#X4?2mKLl3al>jg*WP1Bt00t+Z*kDQ z6ClK+nh+irffzIn_8(@JS;sb{o4406hcH1gu1EA5S$Gf#Y~Vz;)Vel*2k3T(A{kDo zL1o)~twNV*eYAb=!l$4V(meA7M=zxAS2jSzPw9B1%g**p;S@b~F(sw1u;uQt8CQ?} z87cRZD341i+!Bs1O4$DsPHEM3yVkZ~WAwdGECL{OSEtQt`ICCe^>1n%4N8tT&ULD$ zTS}5IXKwFl^G0ssyIyjXhqy*5t|i+fA!2iMr|fw-x}p*!UpSZ$pPqus38Fxslz-Qr z=;9dvoY%SN>{jFVl;ktB<4x=Yq)?|g^dLWK*WU!U#g9+;ms(i0?>Y9p!ANTV@B2 zg~{cutUKh1ZtW52C?S_tP^Z@6V_dZ|K9RukR$7!{r#pJXT{_$nr>p8O;+B&yQAB;2 zl=oR`q}FmxBlcLz5!Or9%f1C0f!B8&RYWCMu#|S$JN@Wlnjr{t?cV zewHNW|8JOyz@-CytJ*yyG}BqsBI>VANZ&J*c( z>vlX)yMn|_|MyO>mH8V+UxWn4i_!R@R9zV>p99DuX30YCyQjg1#`ruoHCWeUUty?s z=GjHt!k#NRR(kjt#KHAqiR*T${a@RtawjQjeEOk;{K0APa%9dSR7NPYdo%#(<~@{MXPFf5tB*mh&%DFNG@oypLvsa^WXWFgvBm^&U+M*{i?#t;~KjU9_Ez) zEqf;RRU(9qE$R=YC?_}XqQ(pDVXt>S@)^wbTyvc6AmRCV3l_`-ae$a^L2z};#+ffz z6{1}d^CZ&x($u1<-*A81#_lt*oBQZ}>Q&XY@*6yDCQCgeF525|a?bW%WLo&j$Rsf4 zhnwj+HDJCM-vuY~2mKBoCxu{O1PBl!t_ofBZsAu<3^;0Ai7eKHa$Csg zPGSz36Ym0~ygfxf&N@+V zp5wJHVLeXmnG0BoWM*CfPnFio3JWN&4EP%|M7pi&%lk)rbQgkMq4@h7Ns_-(#NZ%1kRx^`_+(tFdBLK#!}|*7lo1w`{SG& zHX*}sydMu4WnSb!XieY9D%;Kus_Utz_MG$YH%7I z1;l76rrBCZM2#|MvKK_da(|LNwa#0yeD6L@O+0m zm212Z?1nz%9!S-c**f93OOULTlSPxF55GMYX9}PqeL1J-W-5Tz$kv5u^X^GI15_cH zq%3(dKG6u)nNpB4q})pThS^eB)+u&t@@pH& zl1cOdrrs)Skf!AMu}%x;AjK3+=HEvS=QJ?DYF=dY=(sbqOq?peYUTJXhJQ43@uD^o! z+`V4UbwQ*`nF0>OV+0Cqd>f7IFk@>jXH;HpBue{t zWE}JaR-)S%{q z*2lw3Y%bR3yc#sYL~kJV+-OeNGYq8=8A$wvbwiS(@Q>7Yu-a%Z`RF4&!tQFJRwE(L zEmN)JQ!*oe?-hYIiYs#GpkIda${Ym?OWlWeZ5v5JCFvxzexujSR?Cv<=( z4<(q*wQh*q6~-sZBf>kt;HfIDrs6u^RT6H_YPlrkJ$czKi93)JwvA5jUi7_k--3eK z*5z5T4T8j3{`e&YER{69@6RU(;Go{4caR1OP~k`xiRRy$KM{fbK0KkTjp z=x#3Ps%`Cj?Qs%md1mC5(YP1*9zDtp`0*^$auupoSM^8LHn^@|6_SzG;$m*iva36} z$|WSYO>I^%DKW+6JwmaCF#|zwR5?t7(I?6Dfi=ALW9%5JNG-t0W4+~2EQd{yG~>{! z?JCH@A%#TJ^>N{=fYj5B(RRahz-gA<-aDs|s#!|V=xf8Bt!v#o>c%{7 zQnU!_9OJ)I{IDv@;*j-EQ*rkHL|2HTcWt~-!UJb0wUvo^rCmCobDbl}$oQ|HTz!c; zL0Hlj)EDL!BK%vt)pRY|Fd&}pv28RAPZPuXwJOu>CW#sDkon0*)>vY^ zQ;Bfya!d2E`LcDjjOq4U)Cvre{HFy0_SM zbpP?enTIjgxYq&KuE*yn>Pb-{;V5tp-2MZ(Sq~d^vwiisyC@S#z-;;TOM7h-dn*2` z@b^Yn#T0C5f`83N`TJvHOwm|_O%xH7Uq#!- zyS;C1=?G9NWW&=-rUk!vR|)qa{i19Ex=a^rU|Zsl*Axmf(OPOQq{i#iiXNu*xK_;1 z7pITmb=LmfsP9j*oadC1NsIy#!_iU}jvspXjO)3vCIDNbbCBNoF!rMRt(CH2EeBbLLJE8u zf&6V_Oe{QyO#jAO49Y+&e%vYxcnJ6YZi~Mle*2p4TSHh_HECnd6w6|9r z_sLfBDD;BfX6btxo7beb8xAd}A_ynjZAzBmKwzd~=J?2}_S{oeHNEy#20=Pu94v(< zjG-}&iNs>L-%hoc@%Q?ml0v+TOX;AX#e!;_V9>ae#iDnlT3H&F^HJ4nUow|c9P!^h z3%{xj*-lFJ0q@lS>wS$OI}#O7a;j33KDdads#9!*BguL-$Sfx5qKlkDx)7LxEtebP{Gq!EEJ85g84^OVN(4E>YIVNeDMw%0Xz3$?1BuTN81 zT34dm(SmIOC zBm4jAbvam>8UK%7x5mTR#dI&-)@DnNs!g-e_GxrB)^@ASc1x19lA}@sl^gYFqmIMerNkYKLjTi_g8HF=j8`f*~tO0D}zg$6G-w`O%*Bmojp%L+IXM8Rk!B1 zOA=@LPb`Zwqw9BRvERqv@YgGFRPB|HL?8hCK3|Ir0Ew9zU0mCEuD|s!h?7hECpxS; zI<u-a6;%}k1vADSQtiK8O7xkr|Qv*;JpzSoq zfu_daIt%D;wXLBk-1yVHT4F1kJE4K$FMDTt<$>SF4&clmHsUj%SNJJCq%kwNx0XB- za{wn!auD#nodojbFMBfP&za~CUgDdd=>9LF=gMpW|4coG6yXw>!enE1Lg^ftkrKdvR)X zNGkY%%mkE$m86lK&6m5>UsLr~Heilz?KPnP%J&+9B!)&t=3lh@%;Mzo=x=_b^LIA1 zf&Hs~=%7UDgRXPDo?K8($~A(aqf(tyAgg0A|g0El3f@Y zoEj1sn`s(<_-%bY9vhs#KiLs~z{c-0tzC?Bn-jsJulAe%%s-aDyZ%3&i$8P>M{i~D zX8+eHs`>TZ>$}&_y)ggnvbxz>f7PG0$iJ;;|C+zn1wa7!0Ltyps9;L{2v;#TvIanI z|LgFK+j#)!cZ~{T&t@%2MS)!5et)rlbn>kW-o6yZQ#5zxP+KT{SqQu2X_?Mo`Jf`td&=6X}4x62;j+4vB?1__)jEjHT6N1CvT|6HeQhY|& z{VL5%C{|$e~@ni1{#cdW&sJ6xCW7@ zzMuTs>II`TgKzW4@9R|9_VA(fYk))-aoiSRpuy)1CyL;e_O$~nLt)F#jm@GCr~Td= zWq`0r0sGaL2Hz)^0=RRrOE{h&FD9LypDLGS9QhXIn)_J|-`f*_fDO>TM&C_IO7UQr z73tr2#Aw*Afy6q`C(j>%S0wgC!z}u|iUi@0`l>Q@g@CHejl)&}9|&o)4-`}bhhbA% z9Lr6@A@Uv*a^JEdz z^q$Tv0P$pCJ122Y_882O*(R4q`L&7ubG}rKSfczQF0dK5< zWksW=nO`*t`-BN3oX(<=fPr=m+qe@-^TCha!z3qC>!nz&L8b8Vp0#Mcpk;22&)pa` z53AS@Lf;IU9Q9)L+KkkEK(FQr>maC_MSaqxfYT^}AQzPrY!47D8Tp&9uq3Td^4c%< zo91{>NToseK5@RtA^73CLL92Dj;_&wfyntL9mll|0QKq4**Ox^6N6tdUU&Yp6t?Z2 z9VdP;&Y$F0NNrBhEL-DD(xG9pbU&|N9SBiH(7yl?+CISRO6Wn;diL&G#w-*ad?PY` zhF}5e21Any)9b&ZqTah031cU15le-TlCfOa#rs?SkQ@@c%OtLYMRRdy9@vPO=yoF! zG7w3(JUGp{X@b|gmfgSl`{sI?fs@%;&%j9)S%}>Y?5TT=_s)2+bYZE0z z;zJO{Z!6^&eoD}n?C*}hg4*%Qfd^u>QA6Q&7Uir!vjKh= zG|~O|(&>fAY)XvkNx~>2B~|!#jL6%nPP#}@sJWkeWr!EG&+U_E)joI~VZGM~Dv3I; zX|>7Hg0gEhOmlciv7erjA9`THhjED?ngT5@YKC2YZ?Xsqz`|-^33vUdx~H|E?V9JH zM14&&%1jkg@tspr8R|B7I3??s5N$hd^;Ev|bZPN%U>$O_DM3__ww=T2%`kpErf)|1 zjzrN&((wJo1&1%QHBhZpySfRjVp0cPe|!Zuu32Dr!H&qYPsqN4PD`Qikfx#?Cwv}S zW^ufNPxhj~=tvh4PQtG^;DgI#O~MAYsk!)9ZAn-Gf0Q*~nbRBvS3SJ)MxkSgxofmI zN${dxF3nsLcY~9!I?VCOLfXR3Cg53WYfmbBU3)f}cUK*wH${*`FL0#6r?JuTo7=M= z_aX#Ccf8V^WNuGX?88 z1yB?HK6(l{8e8?SG2=U>9R;5@cy2<>8#_~8>=MuPp_eIQn`ay!kPvLsS~R{FUEale zeXuLxkIiHna8<708nVdYTGG2IP&oetT>g00(Vqr}3tLT;4q(vR4#ekK{S05z|5NSe zvv>rCjhJ6id@Y`RLqpYhxJo}bg!D?@7|b?37G82TLe=v6Q_|tU2pFzQy#{ehaX(up zE5{OR)PnW;mBjr;Czd<#b$Tw^(6AGADomE&XZW-S!7=ozvM|l9aa(oIzF}Ojjy;6sINx1#UXa*{bmIao#$B#`pDHyG z3mbSFvbQZ1XAV+pU|0sugD8vNT5h`|pq+_tL-w79)sWRgNz`Z!#g|H#-eb*?KYCdJ zU@<^n@%I7A&N*m7wMD_wcO4g>VqUp(ugzRppwWbwF)`mrv&cTm1ehA_vFE1$9qji` z?l#}p^PaXH6MGz^9<8fL7JZZkd)wU6b1`WpuAnSP(Yai&Agtj!=yz10Ei3tvLv7Si ztbI!HSmz9deU!mR$poxYafb>Lp_9RBnL%sTX0!Ro=nZF=8Wbjo)g!-spLZhF^wVV; z`9Gw-fy2DEP(X-g#e)$EsYyjjlmei4KDjF2NP8JX~g-(dk z#BwSx(?34v(VMa>CQj9NiD0YbIbONlPwsuS-M-ZFX;_Q}yhW;{X{iyODOgKW9ddeo zI{V;af2*N6f2ngxyBB?#9(S9 z>AOtr%+jdnZ{GIjN-s0P=mFpkN1p>yCc`vpX??8iO`5S9eTF_9WD9z+un1*Hygih9`mXIqYFA=!SIr-o_CRlgt)9kJ z1lK}Io)zAL-}gaO)K3>gP6azgmm!5_jW1eB!S47HI$Bv+>-+tkMHWt&q6)J z6{27x+Rpsub3sxOzs0Y!`1sCP4qYfOC-syJ%f2eeVWy1#4^-k}m1CrQ57c61dN_Jq z~GqzTRd&{quKFzJ0@vP*bV?`^t{7sa@NQ9 zg&~xXXEcjzu%_V&YXEk!>s(6mpg24kxHLP3aJS&qVd?;WC%G7b5(*c^nGFtZt248H!MFNEJyt#dkZ>)PYcfRsxY3)69-++YJ% zYkorDRFq*nUkWd#h!I^kdER`HhOE-LXRz7l%@~m z%_enF5+_dQ6eiYrmVR+1BjjnYmgQAdhg>tVTMgDM=!fuSK0kRgw=;izIP)~C+r)B0 z(XOGT%ZbnILgPy_wZzveVGQ^a4{pY=fs!LEzP0mYFfcH9g7HM}XFctLJlY3T*eB71 zJPbxkt!OIRqL;%@lkG#lpsf^-@9qm>H$$a;(hAHG7KxAj9v;U9f*K z3@T4;{9<;v@f)^qgzR8`nU8m&r$hsf=Q2xfvCO`XaF0(U=jD*~;rehW{h<+W37Xwy zyR*%>Xw|9k@POg7ZN$=U=z96`9qtV3dC75DtX{}jF*mTc-H4ysI_KHl=H)$xufL`-(%8U*R9{v{^PZ?Y!qE{0~$07vQ zOb%!Fp{h+>r!(8fiF21Vd@l09m@%-K>g$vT4$(j2wiv1B4T_6Fa&8Z)N|+H& zJ|?SL6VQvbOCJIr6d4-&#g8Cckf-%IzO7P5mtaFVCF4zJ23lL(2|xa~-94O^;^&?4ZmSTaxY#(xH?`#&GMf7Iq@2&cb?@`VaxFYgwH&J{ z#HX6sN?!d*=USKV>40auPpm`74D){&TQz!h9_cD3#tLj>Ajs!C;Nrilnfjj(-iVyo zVn~sfVJvve62CY@v4Z2LRvRQB#qUC(pfuq_(87|#xK>$OL{wKa`d37DKK+8^T!i^N zIavuzG``}{W>SCZhXkZS&vgr2IgG)iSm!W-Kh`!x5O(z402U+}S$&}Y&i4Ox?!KzzZO*hNw$2j&LN4!Gv1@~d%-Ma#aTL;lvE;Kjf4{R)se^{Hks-Ko4osxhy+f>sy zb1R$zR!>aAU!aC0%(S1XD|P=$htuCGkCptVuW%Ees|Y&-I8O4}SUpHVh;@c83^`*m zt24abWZjXUJhDQt5xpl)Yv^b2$W@0EpL=D-%!(F=_{%3Tp5A3+{0!WUuOh3Yf)g2n z+Iq@W8D+w0WSI~KAo`DT1$^wt7YW^7Fg9Ji1?my%N75fgXw-Fq7i2oT^QB6x{vFAh zLmlkCXquC|3ufb?+vo6R2+^LmZCs%WhVaB-Z&!L$3#)9&z61vHuWSK|hL%Zuxg5o# zW->-w^-nHcI0`%+NQqa+K-eG&lBrs54i3&M8*CtKGf~oj5J-oj7U~NtCOq1w4}Y=? zvc=upucQ%XZBCN7?dZIQTvG_4VF`XClCnznZr|?}1|r$1msZ?-tIy2m+t+)2?oE_HEJhvu{TbD?gdxmd4$_(TCbVEtI3B&I zqS-QajhsY!zMvczW3B4K!Ctv~e0m4XI{=x88I1_!0;EuKjZ$Q-vhlmyqcFdnk8;7is4N-a}G3>ye&VMj= z&kCYIO9DmPxNY0EZQHhO+qP}nwr$(CtwA!$3nn|ksj5F%+b+v+SMJrP4|@rV^$U1k z)rrjs&c(&kI#QYE!zl-r&01j1Vz^4CxZ`qU=APmp%E4URvX^@%E3l?!ZHIW(Eiqb| zITk<($XLjs`UNDQ!tZcGUsDrJ?Onag`=59bCI>ysd(mu6J+F}&%5|fvGDmJBJVnhD zPu7;Z!|Zy{qbS!;jyMZ<8faKTtwhTinzu0;*cm4xYIw*5Bku7zkg1@8Ks%l?PnQTyQ`}(X(M>o2)2eJ1wNRx6* zXR^Za^UZ?`tVL5hT9H4-uS~p2T7RJ@^J>NS(n;K27}6UdCvuGn*biH z5ijO`_!CKx5bZIq3W?X{dvE0n7J%(B%VGAElvyrb2K&Z~qv;O%p+l1?wD|k(|yL zNBFunQ?ya6B^EEN}O&++{o4?AlChGf=s{~6m)n2x8DBy1o%f-|dpb(tts!wb4_&ReND#;@Az|afn^#jdE=dbO zL{^RlBG}6|1D)gqQ*f)F=uj;lm3H&P=#3dmqj$w3YvJ}V7s;R>D^3WCmRvC#Z8Xolb43}eYMFH~{KRb2v*=Vw*Z$wmT3 zTeA#-B##0h;YhM;tNu!?k4$&X-n<@q#Pi(SbNB69IQhaL!|DHwr=BVmQ>j_mgq?eL z06fr%&ubq%nDB_g1Hg{VF-p*9ai!tqdh)!_WK88yJaD26B=bS@z0H?>wAM`9rSslo z>lRt73zg%@ID`D8(TkxcXmd{ebRFcg@r$&eWt-Q8o3R_3GnWJ4CDPEcU;M|{DK6qL znEOd+yG^HzN#!r*4%`hFKppD}i0CR4?5lBoylV{w;LV1$ol6GU4K0>qhjEU(s2+u- znZ#;*Gm6tXAB2ZwP{vJOpBm~3gTi96DhFHDYkMx3GHwrZOxkS^cMwXd${Iyh(!UOV z{E_^-EU3nrsoh%=!O=*|-Ogpr#Fm)g>-s(VfghNZR-5(%(l!XC(SR$^2fOjDxmp!O z-X53`xRY~fS6zrRWZEOoof}gP-Y=W{zDPuR4|0t*X1nX92zOiSVASkBWagE?sb9gF zt_a#lp!i_4e79Koe|9mT&hkC3sq0vnCz-j@Sga}AqImjmKIFM9z0)(Qp|i&s5gOjX zO7fT|c|EKROt&dMca@)l56+mh5>86n#vFk^e;79mt_{68ISvrj`ToHmX0M~GyG7%4 zr*9gh$n7jZQ&ofNW%Y3&q(4HRR%~IHyjqI^AnVyOatcg#zD?+#S%?XAg$bQ*G!kwm zy|f-y=!Ay{08xfF3k6cTn3#S#6%;$5&X3}DRp!PF(4XzQK7+!AoNui5*o+ZI*g$Ut zb}Nq%oJ-_l-2y0=i!Q&S;kJtmwmYC8y4NpMrX)^2R$}R8z}dw#*x8xWfEKEtR+}2% zb`Q~&a?rx5d>)@gJ4OyJ5elS)&tHr6o3}c5%S#I*4RM_NaEp-4Y#^McOa*H z%M3pSAu7@HBW}V+2q2*sU`bA)80YgR3eCA&NZ~CFL%oksZWn<#mk&gp-sb(RQDspg zHJz5S7@Li`Zf|4Ni?`L`HaeXh)_q+zb-3*2D>3HH^TrfdvBGtU&hA*a__psx8M5N1 zt@%y`I`Y%+EIkx+Ej?SnBYmGbt@*>PIx-0xpXBf=;3m$(GK8=)@FYM*cFcfn@U49O zDqQ7JM81UEMu}CyuzBld2>S_0!w7!`WzrYPC_#%gMpN+Y?rL-uNoMN`H%zguNQbnX z0Q?z~KotodBpe{+pDcCZCa}ckvH(io>T5-a=_+HKn8r^{>=W8GCN}*rtC>vGhK`#Q z;i5R%ID1&T+%SL%1;in7SLp=1%_39nrASwCFj!lXxSEF>W%9Yy0#+ zWs4QhmDZ5*(S#dwz==|@?5^e_zwEu{Mg;@Kmj52`+C4^qN7Uol*`3fCz}Om7e1@ND z`z8<2bR?Ej8(uR;tD1LB6X5Qc!%gngnekUrNy;`uheuiX;v6`kgX7q&WY{X}WUu2( z4G}c2RG0IR3a+8=QDVZBSzvHAQpXLqfCYOA{3Wb`-r{4Kd%M?UxJ6ewm#}^lA&)|vAiE^t1wmHw) zsV?q8-se34O+d20NqPM~wOgHFf)CcXzU!r&{&T+-l(s30Bs7Tw=>9C5`OIn+K(E>IbbN6V_qoY@Q1))@xnp%}yS z%hk(It{iG2Jr^{mcKni*#pSdH?RHah*u@CR;K;|~x7Qc}M28znV;uJ*At2h3Z&7@nfwOV$%y>9yl@cP{dZ(11zm|BDA6OL^eue z2j^nSFwwv<+I#Q^-h-c{10NCMDujCcd;Q~Q5Ps8E>^43OgXPcF)dtY;;`YdTnx@Ge zn{Hrr_vS(aqpWNn;sZUczkhrL-7l_7mY_aaKAu$&I8N#ZUn*TLaaP$qq>9n5zqC+` zG{gUxEd7LShVeC8YVCBHyq2pzR|5aa|8Ue$LDIH&;E^)5no1fe^HPnzS9CV09uL%V zj18nkjHqHWmxb=2cGDQHSdCscbsgK5 z4>cxy@H(1fsAO+ZZcsu$$KS-o>J6uIwYHul(b7Tp){lN_i+`lgpD?*L=(m^X)RZo3 zsg8`lS@mdCfzk)icI(@Il^3 z!$k(>2Dz8I?ug39G5_UN`=>mhYZmPeTu( zrtl^?yzoG|kBxh_vneZyib){8_A+veS^v-sG`=)mthHh(L1ss*Uo2Am z>sh~jZ)11Z=z}hgm1X5B-52aUm*@z_uwg#hO3%wm9Y%El%HlLnS0MB;fQ>dOFyj6< zX&KfWd0}DB70IxCzhs?fo>0=o3a2Qh=?3Gz6dU(F^tcuhefq&ZGtKTJ4Loi{m6$!8 zDYG?H{$|}9RayGA8IbEY!6aPQo_|$ZO|`q0h2sfrKy2_=Ydr|(S;d~Q_H|jB=WQi2 zbMK!?oxBfoI6YCrJi87t0!oX8aJp;d(`USn^Ah5ZF~5xq z!cNq7o%24GH95j#B+02-Z!YB}Rmbi84%?j#N*7Nx;;K$TPsgc#-3u(GVatwT$*mXB z+`lB5%OXLGOtUb>h68D%7GLh!r!Z1Lc6hgDhaezF?rfvEUCW(Ztqho+ozD>cMFI)T9wwLr`CkJ< z_RAsI>47eY3)e78uLJm%ijmaF$SPGw)z~%-M-KV+5k&UrCA!_2gTQc9Afbfev=DfCx&RxjM2g8UY=`#U_(+&+b?ZjUZZa(|wV zBvsP18=h1{;hB4IiVm+X>@6sp0m%N*VT*1%;?aIoMG00raCq)FuLXDz~!7W%E z_4`C|pRciF7wj7?cdSrzPNkE>>L0N;FYH^pIML3ly)lc#baGxcYus+b<6;SZlq4ba zwrx*zR{G#Yi6em8_u##_ZmZ=O&-u@k$-2w(0TiZyF1Z&m4hiH!#6Av>;5(@ZQi0nB zTIs#CeAco++H>7dwPM|?_kIvCFA2gX5_Qblo|tDun8S&Aos!i1f?SO|O9tIfvhb8U zZNdTxkETi?Y%(5thI%CtXA>}YCABk$`it$*plnh!X_h-7dTLNZ<(Mw2-Yb^T6|?-W z+Az@N&L_~Q?rS3-+3(ta^hGURhvisshz1$H(>j3$_|TI@O6A3>w^T?_V`4W*N%|5x ztY{QsejR*)lji@1gek~SQ`F{{=4_oR=FjN@^<=^02$dAmV{%lg&T5N;yzu0GZK-<5qCoOn!^K5; zWC*p+rsWP$7u&Lc^jT6GTHg#5EaSq`tC=taKsUiD05I#OK!Ym_(NtJ$s&`>oCTn(6&u~<^6 zwIyy)rpEUTS94o=i05S8NA|*6G`%7SZ6I#6QEyHteR~s>pGbfS>ztzLshR za!7B+?A7~+A3FWL&t0QcjUdpis8ERxT_=I{%$TagS~_T8cO^$84w!TES!WD3Gg31U zd7ViBN*m23CM+JUoeRDGGFfK!)034?|9uE2EHVEgL=Z$SlbEj*f~Bq9^V>L}Ygbo* zzZN&<4DtFv(s^qZnS89)`IDh3#H$lgL)@1W$L?Yr>EUstOvK0= zPs-Zc#c?Y~<}(yswkl`MVu;75(|j+tY_5pHj}nnc0}VFmW46P2I%8<%Ae6Rye^lz* z*cGzC&*Rk*xZ3r2`m)G2EOw1r!@yvTQ!u!_ZE4j`ep=%7-HGLFe`>;n`7y0Q)C_$Qu5c-2 zTWyK9uVPIkA#3uCCa6vA6G9AXS1u1Xn{@w31NWT$;8JfhDUy<+0<0O5=zf1#Bz ze#JIHnVo`pt~oYwC;vI+NxPdU&4M?Xdb3KVpQuLRfk^C9tuX-mRVm#)mqHmtDi-sX zOg&Q=+Ww}GEK+!i5J6VC!?VE|>c~616?}MAp6~=LXNc3Z0GvXb8>*iwJxU?2H~pMa{v!alvr``F>1s9z1HCE zn)`y$T1IqwiRK2QmbIB9K<~E|+#L7Kv4WuGRS>Tc*S&7_$k#_H6+!0V<9a+`546X+ zB#vK&hhjN#l0J7gVL=rv4-UnLxEW_GXTiaRYIg!_OqRkg^q>R7?q?JZVU@WBpgw0LPF*3R^*jQ#t1q| z(9InyK7|}8zUe4T3LGcA=XW43T+>gF#zXUvD$Ut|dr~%$<2Xw@H!JCnMxMPiAgr{? z8}t}Uf=B$AsKes7knv&j@tE*oXxOA6IIx;TV@RX5BIpvypexl5Shabs+46u8GV)`{ zjC$RT?Clc8zO(%YDz25Yc9E~0dm$kW^<4#F3M5k;sZu~N(pM{Fn{tTKrnTWuC57OS3V}#bxu3*Tm+;Y{eV_cGvr^v&el(h_)mw?)$!GAU~3)Yc5Pt zOZMGIF-gCJDhR?92zEfOM3s+}wsU#tACczboVVcBgHs!=pfeS$@gT5NcN&vTan1d! z#(2NHK~?82$w~V^=AUe(N}66(nn-kH*aS_M&GK^Bt$@nmRx1%AMzm%WZ__8sK~D(c z1;tGyC0?QBwNg3STcGvU-5V=4SK?+jp>LyaIJwRVZ@E%INi0{^+%cdw_g*DTTBQ}| zQDtNNFc5Cc?JLVJGxQ&d-rKgh=bn@0qkAQ#n=0+c2O^BSuuu^SBx;B4;taG*{7#hZU1C2|K4?(t4%qz7>~e&&(YtH7)qhaP0XUW^a}l zEGP!gVe@RFo(jnP=%}~xq}V*0bL50ZjHmO+ExyO`My9#giRhqlg}5WeD#|xcs1+p_ zO>tG?F_=5~>7~0Hym3i9f}imQA{DRllmTKTr6|cM6{!+PqS~)?;?6tQfg5OxWiNw8 z2H$-ySO*nZg~mXLb=PJ1X2QqYKo6R)Qs-iT1&+Lu3h$4nMBa$^hs$ttm5KRFDrIgc zfAn>XeXQ#geWTPT-T=XnnI0RZHyxNUJ*@TiAY=*NukVgR9Xw6-EZs2@LcYubPY*3% z1I^?(kMaG3QE%7$QnuGjL0QXIX(?bWSW1pFgM{B=bX#GaQh^m=x~&CeCgIBWp?GTG z$IHohd%I9hTzGYL9aEcTD>Uo6xR?6T4dgN zHKi~7n73-%>?z9#g>fV4yp=8w^bOsrEmNyp zNc7n`dyv?;UCPEZgr_&g-N2F9;GpnETx6LpGpJahCF3`v#JSf@AZ7CQ*t|wF)*vahIA2ka$!-j(^}~??tz~SNV}LysJYB7GBQM_yVtO z=Qd++LW<7F8}BM`4^~lwwKUpLII5A$ccIcp+=p%30;NB>7D^u$K{rhMeK?lDpc+#i z0_p9upBf6bpEC>E^IoI#kw50t?p(+mcAU8jM1qVE#y>fH@xC03e9`UXhoLP)M?u@1 z=P&e`T!Ol(Mu~Ngh(q9C;ht8dq{i=0P7mA7oY3C&t?k}Z9pseb2`>PyjuFHNJ0sx( z70iN6(z~u|xY&gzZ99erqAp+JjxT7FjaIrkFhn-Z%0ZPwh#x*-58>gBv#T=V;fFR} zI2FCGs*LBAW=Kn`3FhY(^sBrZ}92Q&RX^F~W*+T`&`U!BPGEki{i zL~dgrl33i^dfB2)BJ8zS)_GOvsg3H6tLqMag-C{O;~U`ox#^iIc2AH+pN3Gyu=F7k zX;vc-imYSrln_CWHh1bXRuZwe#)A6_ghrGI-;Om@z9G;O7Uvy{1s%b{PzotTr`l{4 zy=d%|Or<+Ta**$@{FOd+5bPdu8`lKnBs{vkgE=abj6ajcHnBy_aF|hO&Or;D9S5? zmc~`2dITNmkXNq0cro2rIw&+3J|UF{E6pUGYNc=Y5nP=d*U}1aC+K>R4>65R|newqJtVlS7=|hl9jTtIzCp0+?O(a z=x8^?G;o-ZM)uS@_idB+glv?6Z?wC5P;puUMguV;d7d8seLu+atep>IjW~}qK(|gX5ZbC1fkp;?(GU9?&#b#G1#4>wa{02= zwZV&hcwK>PG=vPxD?)~crogIfGYwo~{SJzw95&B`rFQ?bF!+ci!cRoBqU!}3Q1fUEh;US;i8lKeXM^dblxt=qxOUt=JKn=%p&qxlPDUgINQ zsf*+b(~VAl+e}oh<{C_5H5_?6*e#|OvVYN+J`-?=&{$=W?`X2Oto8NB|t#tlcg5FS39 zJ8k;9v0K}nPtNhGdcX^@XA;nU!N&3c*EVqTez-0LFKb;`pm@{k=}6sBbbh5T_?-ap zh2%%S;$|xr+a!tG=CXr4c4IAk>a&ykcKAj6UQ9Y=(UQxKwc0$D&V&(b(CH6D+zBio>$z8kfrUrc1|xd`hXv}r?G9-S$L79&+iT^M z%mGWsJQBxGK(IBWlET4F*B3)!)2v7CRj%TPj*Lj03~K%xDqgYD`nK{W)%RDU;2A&q zYSKWRb$UmN1GvZe&yrP!Y$`It_I}%UQaj6j+;M@1jOC+7tA0PgSgu{G3A-ObKyRI` zpv;R!#-&j!Ku9WBNQwIq;^iqLV@_lCeW=F<*X6M1g^iC_yLBhoZpDY`VYkK7m1JJG zMuEtNoPJj5jkE=Vv8owX^K zV5fCh${|$N8Ty=F)C^8sSJa+F)GcaWgG53C9PxE7La(Ue-C1lGvFL056i$U6W|`^( z+uFIr&s6nxZvG@?B6^qUw84srJwm&-*ujZYj4lOhU|~>6qRYW?kZZ^6ZSp9k71`EEJuKM|aV@iItf)?w*=Oo{Ldx1L zGbVsKAyuNaz>>WP@vtBdViZ8~oW%(=2Y&rh`RzM+IR=U! zy@`kHgnTiEds5%UNJ<`*zXj4y#NPHF zOe@c;j)G_xa`xM>KN8J8SAbOchKb2GECbP!fq2E(uAFXY;BG-rmzs(?@laEC^71BN z8(G!*UYq2-@2i$Ev5(YHTxX74wtALi{4R0#{E{P^xHs$g)jdG=zbLJmK$)CkHg?+f zJ*^kC^)DZ%ul@QP_7CC``@5iouQp8fmlFGE^KuexkJe-?SwCFq`Ggmf&|LQ@MkYX>@-MZkn@X zwr(dzdMBj^ReI6nDvF8Ja`7pk#RK!ph+x^HPmNzaONxKP9)nm@>aq_zwQLAn8dhxA z=Ee(_RDDRBTn{*Gh^8BCP_c+U|0v^8XVD4z@7HT9>&H_P-bDZhwkHElG=28o_HAlh zb>a$?XgR7rHX;e&8AMu}UT?EWdo4ot6bsTNZf5|=;wSt%cbC$a#1O^T>~oV;jp=JcvX}3~eJ=HrJHIcFZ%M ztL1JSu_vDs7k8P_Ll_Buy7sZrc(KR~aR|J3^ch#_SmKTce%>bgdDB2o(}!V>Up!Mn zAbn9O8rdmfap0kcMGN$s9!a(ot#X&M$%^6E#8E|=h{e8usSu%QRHVbi`IrS=q=i62 zb8wg@_ltX4D*C{!{8knoEK3bu5o8luR%lENsNb=K4$!DJ5e*q(*R=%p$AU$6p!M)W6+f=cdjGW6r(LKBBH~E zcreNrGrtf%2-e;VfM_Lwx%Zk2&0cp;bBS}c z>;D4)i}_m0=tWr}JC*Om+jk_J$&k=ED;dN-Q6fh@8&kCssP@4Cu`TYnb8TLfdMDRw z47g{ofi&OBL(|Zme6{&t->suh;bxZ&wHdajde+z4n|B-jJ``XU9kkJ6Rg)r`aE^yX zqmH_%v66-eXxt2RG!)NP!|LtmjFAz|Qn#6L`+!H4;yIMOA86JZ!FW5tx>y&zZkwLkB4aM)swJ z*3%H`gy;taPHn~N7nQ4JDbjOk474BzixnVq*1(Pyj}wv7?L0cBP@PeUxY9W5RHUc+ z*UG2|R+VQZkhFF=PY8WbY?R599oKCpcnxpz`SgSP!x6(}wk<(sr^Rs{^tP7dB(y9o zcGl&lQnmi5Kf|jxrd33NDb=Geb6@A2TRbu?4x_u|K9TD>VZGniySOkE@E{jL-yTyt+BrUEeKk;I{`i`Lfr2jl>pNd$rfZ-keo{t}uj+S-nLo?c<(y;!`fqv`W?0Cq;^=i~ zm?D>7C6>;ZWWNW0T^JS2HY)Lhb=AR*+$PtracZUeK6HEGUktKrW~&q4EPLG*YAf}q zQz61jIk1c2EjG%$KXtjEE{g4*?gpY|8YcKfk<>a1!tR)%;_HWvBSCh@mbJe>A`5Z^ z$4tH(tcHh^S1Kas`2Eh*CW=O+6Ze=84l^)tRe}Hg$HvX$m^XNju5-NDy1XT{$QEuG zsB+>C$&sPWhQKGpyxL$N$q_OAvgufd(J|Pdy1Hhe{Fjia0C#m7CIdps@s1yBTzZd= zA?l!ODNuBogf)BvC2@o+6+7D0QfX9;2Jtu(bV0t5PAIu@cyW-Py|rQkMI9*X=roRH z?m^jTYwm6E(FsvqDDBowi8!f@f)4hc6)sy3|0%2=04X(H4n~o`=sD4tmp=S7SMF^n zLw0XOMEFyN`{|wN_0b4H7S(tB9jnE&>>6H_=5p!`H78tFli0FC?U{>&S#LAQffetJ zT8O|hj&?k`q1im0XYB4{c%)a|Mcsk617grZ4dxi#ZkcqfNVrvLfITZ}F7GNQ1YKo^ z(@lti${ndBDY;a{+GN7B85ntAy+yR`I`1S}KZuGEBIEr>O?VfbqI7uvyM$Lwt0lZd<~~+3_F;L`x;id#X8H;=r_M4?_SA zxE9C60OXCjSqt?CKTuWhig)lZ=o6ZIqTebTEh3yXR&`&WIs-JFTY1A4=f) zwv6NP*T`fI#$FZ1-!q3O|1CtA}9QFUugB$gCH z9uiOVi6Bi1XV_Rpj`>kg(HnVeDnSC=42GnNBGo|csn3at?n0L~xds+x=fWU6+t)Yx zy+VuQ7E6tk%m5r9OwfM2@EW7Hj-chj64IN{4PF(quUVV9S`+xqx&K>^Q3v) zFnPAc1x$~DI=Z%zT4Z=6V+X`D1I?^ptq!OGqrLW#T~}ZBSw@$eEt~6~zdu2^Nb12( zSVpQO0FPFNaltVjd5ZI~w}pu<$%)hVKuz6>k6KmL6^lQ~Iu_I9f66Cny8B(A`!>jA z^48o#>bXR>6kqUogrqD>I^R12HKf3omHK`bzOeD*0G%_FOD{aiswy@esD@-uGKt5p zSU%%kXM&Zvp`3ui!L`6?EIiOFp>lVI>DwXvpQ`U@D+90x{(z6*&5Q2V*%js`Ks=v* zM^}R`Ry?Y`#d}6@>ms*vj(vwDJ8?5AX|j)=%miaqEB4_R{-Ucfd1VgC>oo3UvxPsT z0fk9|NRzXnTw`%s5wOo8-Zch4pG%TTD5&MZ>_iyX z-VDo!oy*C*+)2!182>$Zyhf~bV3ltw*CBH%V+gDR0Tu!IgfrtZQ)#OR9cGUFlfqqC z-9ZGFJ>Tg!E`DB2ycDC^bxPy1@8Gw~JN2oPCcKzWKR+vehO9Re|3cQ3Rk$47Ol}hI zxS(iaWm}rc`aF)63i2L;LS`;8gMBS(q+%ROFyhRzIxF4uwy8P*b8|K&bzOLcmqn{8 zWqUB!Z_oLeQpfE~`WG{GkMndMBP`>sK>p7B#Y|M6K8y4^RDNdPg3XuWyp}asU z;7O(=cbwW}#r34mtgT`EH+DjN_>9nLM|4_6*e)UxkQd!MKY)MMC-xhjH}Zu(rSshhvMI>*zS=v~%B`EhK0G4i@tc;6iGK+=`Iqa=EEBUgIj_wo4PK)7$}VGJByR=h z>G)ID?sC}u{4V4kr)cIlD4d#8U^`EuROsNjC1!m3sl3N%o=sSbb&!VJ-3KH$07WyN zu{gJ39}X_dVKf(!=}aAcKT@cPj7Is}ttIHM2T+S!mIjVWaLac?X*ICaRtlb|AZWy- z5wIX2>YRi)0;>j&e!^^Hm34Z>R*O>er(K&m*$I&lw{&OOj=llV2xwbVC@^pOE|ita zoV?=k_3^JkGHZO9nOPJ_l3eWcsnY-wm3qH%_o1H#=4TwQ5nhgxEoQYcxN_|GLq35` z^_((Bz{5D96xQ)9&`bWg(Gq*kOT?Ej7A^UC5~W*3n=0D=!75S}sXTjTuAa888J*@S zHcYjN56t^`veZ82ZBY=<31c5ef*ve)*-m2wXys`8~~;kk3aYu>|PS z?%{G{G|Bdqj*T$+sO_;U?%lW)!hIr}j>y=LKn17AMTJODG0N_v#>vrUms$zmGQnmX zb{9hz#Sa3t3znKa+i1xkdn_l-&@#r;d6;*&19=qPk|A@^-eyGY7tlMg^?>vj?hF=U zzU4+P&iMMoYr3bw05hJKY42uc5${I}-=ry^qE>gFK?GHWY$Omqp_30}Ra*Hf27fOn zduKU*pesPLW~UY8?6qjf_(y!ccdeGW(XyMkfG^M82uz?Iw*eg<7I~Z^7EcOI7fpxg zqDR8ss!;6ATHLLf|2?9l!KC*gutBmb4^@P}x(&-M^)^XUgc3mfLGSDsQ0utObOwk) zA0Umb7gVy_(rv{=%$bJ9pqPX(X8264N}i{Un@r^eV<|D@fI}c}WVRX zaSO}qfRLDX;DJhX`QYqU;Kn>|MHUI%;i}&?M(1?U*#Z!oGi}{>$0u>r*>G0$3r`>M zK1v3Jj`o67Eo06!IYI0}^0i&pK@(EqEyZa^PG%Y4L{>pFdvokpG62O5-k zR{Uq7Y+)*XX+$hv3}s00PVo!vl31dUpk~4b>Oa{=O#j(7V*Nkch>@9zf$=}tMvN>B z%pCt;+o;pX*~X@x1bfTX)%t%-BaQ}}Em>J>GP2VM2g9w_r<}JQpRd>Uoxaa2Jg?{% zc3!sXr8~Nr?=9kln@^O@4KL^jGCaLoQ$3+z3{)QD9ZnyMw4Rl??|b^;1?E9kdo8| z;-?^}1B<6`Vsm0_1SRjp+|tYdFs_lFzRsB`2RMb51=#vy2av|TmZ|xrn92C1OO0ph zhuFV1H#K~D%ghLi$T;Go)$}jQNXkiRdfGGdPhsjGoEYCdv4hjb22gz^ZLKZ+gMYVT zb9+yVFG?EPgXrq(?bDBhou!>6l~s+U_3zU_5gGXlDvK&fnBwcmsrl^$ggx~mlEIPI z{-e0i+tW+$(?uhq@-p*}$_V@KAJH=arK+>Bt)jVpPJf_I&W)dGp;b-)V;eE#gR*C6 zpmStwW^!=-g?vQ)$hrMi$@c##_9XnTZPbSI8}pfmU9FuxV?A?Wke>c`%IxrwwKX`2 z8+wqZkkH8H20+j78@98&^1$a~YbyK48vdAIKDivyNaZ5X$m`pf4Be0M&keY zCu{Qg=SKL)7ytej(f=oR{PRb4>$i69r*Zh#_w~J3Frs5)a}sks{o4cbKIaC_k9t2R zHTvrp#oE}^`0~v+^7keV)AWb`;TN3X$nd%KkH9Oc&yjwS={Ia)Qe%5^Xj*w;a%31$ z-`4o6P3~`V^0$pk0*pS&Fa9%PE*;`KB}>4 zkbOgwhaW!UZ?y1>zF9|6T3JF)#yg`5Ch()k4W zu}0FB#6o|~Fx|iCaGYAop}0=@&r^%rI@q}prm;C-i_4)sAM{q_9t%^o5cghUCVV4H-C$p)oHzoLKRQcJz zn+;+iCAmuaM{fdZ*M;n!Xr~!&a{^2K3zwe_)@H6U;;E?g%C(tp(?Vj)2GPR1=h=mC zpP=-kt-;OuCJ{%t^a!!;;>h`I`=MrB4sW2ALlV9~U*Ki9MVG#z#lQvgW@30*5@M^>*UB{UZ(FQ5n}l*+7E~Wny55J0S23o~Pv9Zt z;5;L*vd|2xylM&>99gPA4n7v7lwC>aG9|=Cl-k}+e`l3k_UZMo}=-|%aSQa_BaK{x7UT8gi|xO5b@PZKbAy00Lv@12d6V>`K+#>h|_IZzXd?gJ4K8eQ#xXQOxa!4?f)c_WUCDSrw+Z0wo>`&Nr>lKme!O$kzQ6RGKUQjI{kcceTPJmx?pNG*e{w;(YTz zvVn?qQ-mnJ=xxUl)Lmdz28!;9Adsex-Y+4O04Smrr`xL!9Ci*rM6EKyfsgw46^?*~ z9eU38dqS{v>wSm!E`urnGiixgp?8*4p3LF8!1z)cu^3Ybru#FBhCRk+D`9q(`e#g! z9)8*o$^44FnU8PNgn-)~W~)N)t4Y?0xuL?P;llv}Gr`&El9u}jN$%)?xI2zOdxTZ! z5`Li=NIv>t1+Zap#FhT%Y#xi3*?dYx~D*#OT%>W9I& z<0i4aG&&3Cm>0NMHcH@|AO4r@$;51-J)igLi4SdzO|+Je^Me#}fC zRHPwmZEdgY9aFdaX%H^!qKGsp%((gJxU^sSO(QSC%6U$+6lxZIaOdba5q+Qd^EEdd zXhIl30xvjfUDBo0JP$Wjg>6dKZDzOrdNvO^%#z{o*SLZfHREo|;l48GZGeQ;^ z0gV;u^GD)G0KfOP4L-_Pkx~+ZP$hb#KN4$b(@LBSl2sJ(1Wm7P{dcKB$7jJ~Gk`2h z+@>KlZ|(GJ6U@>@NC3L}g_fdyIP>v@)*kNMfyZm8J?qRueykUH=wC-X$X^$9ezxR_ zrJVQ3@gX8RJdwKBYk^~Ka|I~ch)?(?0~`V1RNg3T{(PY4Iiux{0#BF_S4fD z#_nD2uUL0E;X;DNhj2^l%yV~JK zx?}G=oJK}f!LVpX92{oAE}SL*6-}-!7calBnC$o17Whi(#Qz*YtL~S_(8#f!*+GzIFeBS}}VA9a(0*0rj z4$^3%0s~LsZ`gT9)!{e{_~p=>Kr!i}h8h^%uDfi571LXVNdAR#XUfA4GwZ=<#1QCc;asAJp$2hjS)1W{m#XWPpI<92uL z8}^iEuE|%z!-&<9fc5)E4!Y#nUDC4>h3!3;qQ+i{`|Q*zQ13LC_Nn75eLLgVuQ9jy zZ$YSFffw%rEuwzjt;|;qI9F(G3o@d}gvEjZ5+rt#7J+R8=W}JcV=UCGESLUHKD}Xx z12bOuC^YXgsWI(9oAt5Y*ifx@-9KDHi)cavw|R?IOdl2=FS3m|h+#55n{|3(Zmm|k zJr!ZIx|-Zt{1r1=cdgvWms%*?1Jw*dmw5JolCTarPL^|KZ1V9{W{~PSkb8Faitd`aIfmty?v43M4FNTtHUk8_Pojv^3oMD}T)9 zp5a15D?hIqX(%dND?4s zA4)jwfY0Uz150LYyRwPxa~`q@e;M+s?Lkj&u_iV5R&eNuP#vgU3i#!4~PD+I)9p4r;ULj+s`He^WPGI2PSVD8ox&7^%_W34Ai4S zYjC_oWsNjt9)txSn! z|3O#>Ip!tZp;6JV{X|ZsVlVbe1o2ixN8eI$n?71q>(x+}DiIJwd-%@=T!-uqM^R<` zF}`w(ePr#*PGw^Xq!!L9ioM!!kBHuU433lfnte>A5_GGjC-}}*WHYNDzSopduZ|QX zG2Q=DVRb1%dk+&5@;$y1ehAou$;@I|LU=6}Tk3w)DYrjFSanOhOJJT?kaiZ@s|B{p zp`D{7f99Wfuf726Gj#pL*~)gVYmfaI%X1M=N;|-CDg=Dx#LIp@dY%)&-hZ#Y5<|1A zO&wI+zu|R)T=v`kBC@yl z(n*92A&W4GGlbx!I$tx0<%`A+oAch^hgTz5GpI1~Pu?73R#AkD%I%}L zU|Kqi-$L9^9G=5d1~D9G*j744BMaVd5fAge!6j-yUM$pm^E+1nlViF>n`bb5_*D|g zC@zDU*M04a;QjG327%#mndY%X32Gv~O5$`A-y@&tq#Vh`0|IOYi+#L%Ve@{=n;StD zH-`hwmp61+-%t@78$@%SacZeYoarjw$3aP)&AeXl8UB)(3K@@vIg*n;HCpycE!7_3 zMulECHn?9Eb68}TCtj~w>17A+5`ctdhQd+%2d7;VR#YpvKlY8SGM8jBUOAuI9?K*g za&lZ!Bd#o!UWRe5V4>QZrQ22*?lVIgp>-{K#h{p=u%%+TZVP8>jry9~qEm;8Ge`&f zvXe>)U8(@r7Hgx!8#3)YkC3pFDUh4^Wqfg!fXlg`9BBg8S67`Jssk!ws+v1B*_yR`i z8m#w4$M)O(Dajo9a|lJFUMi5#`{iu6@h!JQ`r-#E7ekHG^CQbZHdB%RRxI+ISR$dH z=#i{uWJWst#^C+63?P2-;q)@xUa^Lkw#Q(-W-o32R6G}A#nkg4`TP6#RU5U9{@6u* zJW$DvI#~CBq%UdE&O0OB5#2T+gue!h0rp6rmD8q)GvG$pc5@(g8Gc>EM{2@ z?uN4S)o`ZPJ=|o=R;iJ|3P7#O1-*xO#=gx*O%miC?zh^ZF4}n<K=ax}m6OfAvKwtmb~a+Rit1_n$O7P}M8@P=WP>-MA@J_mscZh|B!p8Fu1Vi4?o z{@LG00w^a2q8>LG839+=+vNQ);1NV7SjDaTntMm$p&jR}THzcgyffBev@krDwu5hZ zoe|24HSr);H1#%W`35y>U29B7tWdHy4)n|#?vWK=S(Fh;aE=tueu!;!!sb@(CbB58 z{n~>Zq3!@@A$z)UUm29c8@Okk-{`= z&~^@+`%t1}Ro3Z)>m8UmF7q_J!Y;FtgR>K{GTLy}OyTj%7@K9iidlDsRJuFn^Iz5B zy$Wsno^hqaZg0O};FHqAo1r`a3v<@)HF%GNU+t1T%)03nR(ur*;;B!~O?_=+LQg%* zalX6FV9nYT(4i)xA>69VF`@PrhC;WMN%>edhNvIaE-}n$!eD0{wfi3#%7C$TXvJ+z z2A&ofeR)iF=u~GIm8y4JZEDW+@u^&oCJ`M4(j`fatsw66G=|TT6c|Jn^V1gJ$Yea0 z2eACY2`9PB#pSToC+4L}df%_N%!Uu#QywRtoVK!_+7S%FXL3RYi~u`8#J_&rt`pak zDz(M6tb7em>p>t0A?#$GnDx|e|B;327NF|hs;Y9lC-m_Kn>oq$uLk2~(ka&0)#zQk zw?g+9v&77bvfK0E2cwl?%OUE70%}PR80qmjoW@T?SL_yX90pH%bV&LqwGEz8obmp< zmQ2Q-$)K8%690{f`A}VPgqWPSJ0D8seJX7M3Yo;33&Le_| zF02@?DY?47QrjU}n~RxL4DC+ZQPq-9Wk}qY6sP zMw$h2c z2~cMefYgrd&3oVJD|&UR@h&X!()rVUscY;{Q+$|M9Y}}nU+xe6&&iq3Q1yg)6anw{9kF3v22d`z>nNj)l$F1=xVhkPU3EnZNXY-RtMk~UiA?R*V?0rUM7G| z@PuF4(d|>trm@;^WSUnXXYsCqJXJ})fB9dsTydr+sT11i_$iE`FqZk-cB+XG`QZs& zl>$-lo8uyN9xK=4=!0VpJ^V+*p72tX>BE%TsRMwCN0)oeP}O4s7>=ld(pC)ur|jaT zS_Xd^!((=8J*nqg26o){1u!ccCz5LBhtPXS#_6z`A6%PG(i+K(TML3TQ||$*GwJF6 ztzsieGn7@JCT9Ty1TIq_L<(dj8PYyU_pitZMbo!X^qYG6=C+yNA~|18l`2uUT6b_k zgVpTNnS>XRJo3m*fS_y4>OchNHdA!K$p9)z-hyFJ;KaskH~ZQQp9>rm3~IHkVPi>g z>jN^1P0crKUNxIb;_lSG<Y}<(jJD6T%#El!ovZS6C=OCjeARwPM zN2*@S!9d@Mmn`ev;=Xl)BH=edbkiTrQ6J-Z8?dj6fmNH|YMh1tM2$ZR;{r}rq`xw8io>LtG0EOR8F$vMiSlVhD0i9xXT9Xb#E=;d#sGZx8xH7;R{G=j`XoTSSLjM$>EPq!x*-|Qia!M9Yz4Dp5oD%_9pVnjx2@= z%7Bauh6e9K&KyU)+0OUbPF*qzN3?UVYA!?jUNr1uBM4q19Qg7j1adD$0+GDcTN`}~ zSxq%8^eOM~&^*uTI0*m>F2zgmZ;>oYvBf-3Ibq0q^c)b4M%ojGIAAQai_+Gk_2x@0 zIFy%Kx$33WzFOBaNUT!d13>O_c0>xg+dj4wBIn<~o?{##P(R!tk{}{2G(V6eTHj45 z6{}QN%>gOod*ExK?|(|jwPNX@BEutPb@wV(Fozm}95?5JsA_*tR%+YDh!XMVmb_zS+JEmbp ziM@cYWm7@Ljj1`K{NBcB>H9{F&vhZ1tC#X<2}&h`1w}M&(*WCcl#;>gX(p2=It*D3 zM5|`I@eCEXXPaL}TV{$$Z3mKck<(iO76^-i{J5t$JwO=Lv)oqv{^|%+R`xjqyn3L< z0bJOLj-jSFZtM{;VE;Qn3(g zMna0D0RDhA8hy~-U90-AxbXG;@%Sk;#XX8285*<8)`rNU_?Sk+7 z5jy>;vV31Bp2PD~5EMt@Qzo|fPyPa_=hr7I^}~tXv@<$Pvl&fVJ5c2&<~d41^hic! ze3Rui(hPSRF!*&1_YJZt1QrJiOzE=BoiY9(|G3NNs}mgpRyBE?TS83Sdh#-)Ho`wN zNz(qBzJxoVHMM#msx|`h?)tBUn26*5v8yJn_P*I^i3vI$CfHfr`1$e)mUqw0q9YGA zkqiuGfUq`m8xB9*A!zCDvA>zb#Z-I~H|8Sl2PK{#QTJz`5;zIAZuc+Ul?Yx42vTfsT;ZQ0wFxVtg|Uy= z5(j9;FCx04f?5eag(bHJ(Ju_M)9Q^WO_j#6WDTt5KxcqqSmGkN+-=I5Ue+;1PZ@(|P!ToqRMZb=THHCjv79Ic*#h>#ISZ1zDIr@pR zG%hy&A)+(6{xhI~Hf8e&7appV;{S)*0A$4N9Wto_~C<*z66yyr*#V(_U&9;fCk$C6LbmX)4K zU5HoYnv)5kZ?zC~-6p7s6&4U0XLF3~ByC?u>U4P%JF zba>(jIW901-a6iSy~()m7w>l`8LjDGO%v=PzNi~*beRY>u!-ZZf)ddt+F@9G4Q;6W zWYBpqC%2d$u1|L<((YDJdxgIY!?&Vc*e{_~j;t*WsFk^~{TuNvivCktomx&Gt4j_-yn}0pb;K0eIsb7&n|} z57)XzK8d7eW6{ZKhAvc@!}K`WI;--=J$@Z_OA_qfZVeTs!2MHBQNAoWKW0mg5R-t7il~!| zHuCkY%j(vsUiZ2Ox*88Nx7e4QQs?~$(t*=ejQW;Xd3#}X|N4c^fTIpc?HLXxk|w;7 z4tSi6`F1CKhIMZ$KPgmdTTXA-#774=cQ`QXp*Ok61^xr|UWLc?!@GKyV!p1--y!R^_p>W!z=HdFuN(vYH7HWYeQ<^9V#_1f{+d{odm5*b`-_AR2QZG_Np{Mi$_Gw3iWlE@*#w@+@ z0ruzM>-%w0y!SS7z_!0>Z3E%{GU2xspR03*Wek~ZnjepVF5QV845 zP|eR8ollW~>d~-@UQr)Us&YM$kLII!b+8)i-rItkB}NJC7F{)`D&6NjVW&S0w&klO zLKeA6O8;bT#ZL0xWl3(FnGNz4k9K>5jmg)n{0rb0$c0xtBLsfbHQ7EpGrj5l1Z5p} z{K~ck^&|&8UZ&NFOrExnb60Z)sp-Du|NX(O+rW|3U06}bF0WEVDsdw8e)?G~(pA~> zq;IY!q}vE{!KvR8z9VyPRQs%$)gVlWve3{EHnwXC1-Xmezu$i5lmMBAdg^UUI=JYW zCb{_Kk{q`}R+Z8_T$0XGS9pelOP4KOh4$>%-DqZQUF*9kG|Mb z=eKGuPQvCz3{OV6><&G{1WsLf{o3YlPP?RKULfL;3MDUO9lh`i-3Pv~h|=UMi9ek; zsIQ7Cs`Wa&3mdt=ZnEPM5OwHUXpAdnE~Dx}j>cZ%HGx+Pd-}}(gOBAvQxbJC{%e%N4@0Ghgc#6Jtv z>&AM6(DJd&^Shy%%Gb_2`r{izRoqC)ZO2LcprVE4QpYWk%U86Ab)AN=s_VqgwSb(EoJ_Ym6|O=4_ZWlw{UD11;8~DlUybLXf zZ_wwHCTh4`KyHVJrQb8&5W#AEO)7nT@Hlz)=7_i&%@+qMBdX}&=eK25DI}q_X)VqK zJY#`RJ(@=WU$Pm@){!XNof+Tzw5F;W>~$;OnvJxQmUh0*(u2vPTCk; z2e3SuQ*;UQcNN|f+QEm{k(A6+S~9EO)%n&-2!ybpXpcYHWZd$4+s36O`Y)FfDb@7F z*|Amm4h=kfM6(FRHZ#XGl_(4_szlNK-&*Me6r#UVD?>}zG-9WOTTOyEo6YW-vUF3Yo%qkhh+^(^I6UJ9)8$Cj_KJ z*RI|yCaLsYlK7F&+4zQhYZfeE)&B7df5&M9C!U-~Y z&QnW>8vJS?0}sK!d$(s7w7odtva%?4vBr)g9EhxMc=B*=UpzAVy7VGnf~lL4Ms$p@ z?Zpb`{e%oh@z3!Lev0o$-MX9PC~CPJ6db^Vj0b{v-(eREJ>v-27bRfulgbe-|H2`- z$)bchd?Kd{? zaXqiDp_;<0Op!i&*z#e5%dZXfZ&Gv4$NLgVrHO8Rr#K?7mF~%P*cy8OLXK^b*%;ue zZ^)qRlQW8aUK|?}%k*q11+-X!tQ38nLD=~VRTOAe*`yln5oUV$ zxSlOvu7D9npmu;!N~fu`wezqu6u|jiqFI5*9gcn!z?}&wEm#xXWU0;knP27tUVnx~ z6oA~#-s$^@(uvgIzXEham*9SWm?X#1u>OW+1SP+AwLdXL1aq$-cSNAg?WH~}#;VuF|H@vXdojNw(uK{{#$#waIIioC)D0>o1DUoDGI4Cd>eO36Yt4jHT;^&oK`9a1S@GZS zmn~w_P`jUH_=qy@j?|6XFsSu{rz8M2pM&7m!TjL3cxY2{>vh^)ZUfK0er1g5~xtc&G60~DMEoTB8l_IGI zQq|9t17JW3QhTAU2N++(t5M8pzCQ3Kpb~Eg4$=j8!7W+q`*`QQoO&_t^W#_@AANm` znfblnC=13FHiKZazM+lH9Cg{Z6I@RqCF9z}bV-+NSg;W?eVA@KSc09d3r164bA5i05lH?fDD!R{xx8F^d+y4v zQzxs=j&F2Ws1dv(9eLiy;>act16jN+z9J0Hblyo6C$?`*VJR|$vt@BLCU`=?N}?nYDSku zB8lt*H~NZn4(M3a9AAR@x~W2w>ZEViR^I0e2#3^)?6+5)*c0b~A^sHopw|S&7aV`ZhM=lb0sag^_O*cPudJ-w^$GYV8r4N5Q}-vvnFUQk6TO8b^tmr!!={Wntxh?2a1Xk6@0-(13ei1=3-%aOvB z*f+}vrkPT`%HTL&!S9-Pe&R_Y8Qc-W^HV8XVVdTtsvz*{tA;kx9UuLR1JcM?pkQ!k6AC!fy*%VzA75}RL&{ecT#>2(MXz5vH;wn+)T}M&BZ}I3b5i- z(haOUT~v)9b{nZjK@Wc_uag}a;F3RE+v*@sHpGP;UMn)-=Z9!%e8LicoqUA)b0`!S z(_5&-*~FJ^h^Jpb^yOXW1flJ!)GnT`^BF$ND9_GpCmpue1{_~eTHxlaUR7%tYwN~G zn7K+>9sB~MwFpyqg<7Qhn*XL5WX>$|l!vCKB6X5rYEK|DmP_8`V5h@&sc^&I`%>1b z;=$9?aC2(Xc8ijczq;&&3R#wR_Bd}EoNzbe#gHmNmY`3A1?`GnmoPMxg4Csm5*Y{$Sg3%?sU{H%6S2RyF8(xXKaa_`o*dbZ@89MVJ5p#T0 z>~~)o(f@{Lba=xuj1bIXxbC;pgAb)FH6q;KxR?wQp9*J9YI@3~x#{j6BN5rjvUpo= zk&-&PxRnm}7=oG|r3WM;e$25+IS%+U`HtCy6k`;1G`uV)$c#mR5g&TJ~QM-o|9frb|w-O2^GAp#h(_oIaCV* zSsWp;kE9xA?KSBlT%Xdvg1i&gZ+mIqzl+om%8yAyU?fYk#fST(M|5Kt5NJpT7U7{= zay`fA^h#Ay#OS+&Tfx;OtmnHr_q9O4LP?1B;l{1T=kCIrA%p}Is@mmjI0rH--zi*RC3mUuwqz7xGyFV5qXDHSCt~=$Y zuoJLiEeu)%Q!d(dQ!B>JbDxeRN<@{8db=C@Uqhfnba_e_;%O|P;UXyJZ3F)S(?+r;oLWvu((T_z5zo0$8|D6*3Yhy+?FH&n& z$4>+LB$(y*#2ju@myyY&^0c<;e|sF;4tzwi-TE>qe!8;hVrbTOBA+s#s=l|aQ;27O z$`oCz{9A}x+6&2KO$<-;a2G`uH)ya&6>*(1qsG#7dcTK-xELe<@;`)8n|1EmV*|7f z(p5xeed9-ui2+d8smm0rFsJ}WTe6z&FkfEl@;aiX-gY;lEf)jb#E=W9HIz5V{#R>^4Z|GsupFl! z!{D6!L~zcldQyqWa{>^CzJ^&<9Tfr-RKtF-ss{e7NkjxXm~ulh4)GPq1syX`u9>%; zNsW<|qdpktpKDa0mABw{xAl4@BE3mXekXRq^TS4lmXx}_VQ5#`P;NTy`eUIacDr9M zzeln+{$74_Ljco>zzr+ig~Y%!X(JBg7n#i_#HB63`x#%uJ}`G0I!&#xY|+U~9~gLr z-L!tqPk|-7_VhXKkPuNp-A^X;(puzS5#!w6M;%2?arIPXdVdJlM-=I<>d43yJlmm- zOVF+}t38$Us1`&FcFTh^wcxopn!T6IQWcZw0-cTOB_XUSxNt8diF;R3ekr67Vv&wm z-ai{-YIJ>WmzES^J@oMKMxc=lzCE`5A{>PATb46kY%SI86Ki_d{H0K2Hi*r6b6L6D z58BP3XSr;kLpb=kXY-zmvSUs9)^WL6N)9tCB4vzLT-Z_>MYNZ!gUp1t4azvfkYKh8 zmGn`1R%s(bV~pHHKOo_A+wfiy!Lcgzqvx>F3v)%L=WWDIiUnSmYkM2J_aV>g_C(Cd zWseF4!vA}T;fy34A;9}~4K*&`v+x|x)=NNV_ljl!t+PxnW7uGr`c2FXfmLS{wi&UJ z5?@Uj!+63+-yR}gj&B#q<{vC2B7g%Num7TFNV%@Z0VtOeM$k=>G5Kw>NOJHG6C9Gd zIxK|>ay-hnn|aWU1_g~M#c-;1z*$h##WCdK^`DsU@Z_!&Z@FX+4|Zekr{%MtQN>k$ zFGx(S4tot9K`$Wq4&tC{-Srcwi)n&O&OhYz>A?<&UT9+yC6RS{-bcj~oeQD?#_LXP zxVWH>AJ;brD6iUfm6(+At}KPyd*5y(%VIg?&u45&XE=|CX}s3>U@#EBTCZG$DV_Z; zs}4BHo)6Z{i-5p>`EE;EqOoV~4{Dj%p^JW4M?9HyS@z|N)-$fied&=v3dutagyya1 z#twaiD>!gG(tV4E1$4hAV(X?&4XtG%eE}9_(UYmLI1r3OW)JIY;rAGm8R9kdcn+wO zTZh%1xN(IRx<`mY(-h5=5j%d6!Kj=${QY4CiX#3caU~ByS-V;l6*!Z*D-c$1BWoU+faZ4CSk+jUy0@ zUn4GgpHTl8Z~orpS|etvOgwxXEP+xAm2UvDXjMtS_^sJYILU%t*C+h_zF{uTK<)XH z$W4aTHy|c7XxH!O9$ROqP+gtXbh!2y!huCxcPaAp*1DkQRm>d&lZ4-X-xvy?@RcP{ znZWN4mIa&XQv?vS*7o#0+5TbM+t5tz-F%Fzq9in?y8jypoZ>_8=3MwG@Qa}JWvix{ zq2=}6dl0fg7VY9WMr!tU4e;seMl)*b(Aw-F< zP@tQWE6378ladh~29ikR928-s`3~bpydHiu%x23c|4F=PGKqOj z4o(jaC#R5d5ioj(@|qycr`+4%{Y&GNDILTgCo*|dG+VR(_I&Za@N1={O)YXJI7ju4 zW4|ECJ!LKgshhYhD@KgMjf-){x;o1iwzP32Y_(09&Tu48#zi#K1Aj~N!bwYmrQ|Lo za5r#ySF8v{4+W$`V}(1+zmilEoJTacS-mQuHc6HcSLrwE4l+83W?X!}s(nt8U8+MR zM2yU~ioUMwUOf^S+5$Qxz+4i(K}&^N~7ky%c^Led%(;tY>mzXwQlOIO1T(gO!;~*Gaj{k6y86;RfPHv}(D5XXS zH6hXSJC8kOT~Zov zVeQ2RHk_Ye(^6apxbzqut^2yVIJXdC1s;anemI%oO`B`!*@R9I1{A?I_2>wU)TbY7 zA3WTk{?0T`Caq)@L5Il-H*vVo4=Zy~I*??LM89eDF=i(kH*5X;+j;AA=hb|8(i-;7 zm+Yv|z|5BG4Wxz;130gSh5f|~r8hQ8Chz62Bbd7UsU-&#K~{s{q-=GT(wqzw*4Z24 zFa+!8c=Cx?z(chhaNl0mZUFsYUXm&-3*>g}!bKyTwGAt}kPXi*tr1p>5xb)P-!bH8 z#jA*M;$7t&(Ri%l!B^7$u!DR&uUK#Mp6X|({>*T54ru=>T2BejvA8_3@!-OO9v}iG zyo|^|?2rCNAe`|9W8yKyRk45_@>j`ATA|uYkY0;OAH%{3GC9j!fs^mVIKxTeScw}< z-mxT;kUCvOni;}4oejM)D&)3^Jc+UzFTVQnzdu4@e5i#s-&^*HO3cxQlYVcsWrRU0 zr+Qc`)lcFBdZhsbqc@;HrVTltEx{ArBpGQ;pv%e>r&~&8(DJ247dhQ+b|Be9Og$4Q zaU51#k9$55qCQ8=kMZ6QdE(s5mX{vc3-bPsD6gk-r?lKQGjWPV?zfqI1-je><+yYG zgk0)7o2UG~&FgrK3&NoeU&h-|CZ*omJ+$bTQ8pek-6Nct%F}1aR;P12E<*abrhm)$ zh6+3tAxOYR=_17jDT<^?w@K(}3(2NI^`W7cdb;+DybfCH%ByTyxfLFAHADz|%LUmV`iV zmYpPw!)StWsCpt8vBWU6xB5g#WMs`N3%n}QX)jzDufWl_rkGw9Pl{c$m@_&GMwvSi zk1<1Emd~K3=WVlF6vK7p%6h$;slbc{pNG*TI%f`t*ombIb1RH!MHr%{&YVct~h?PUo4eZZX@_HEbH>eR!f;kbKn zmsJ}`*jd6`WfkesPeppuI|5^?_M=flX(2(=JehIOLoaB1@(dm4W>c6)%b;6I65aw_ zEw~*WO2yQ%kP@)pOmGp4Jp3U@vJBxx5<<4dk98y}>q}5*E*cR)q;>1pCR)7i z(ON*;8`+Zwl+Ri!QICFKXu&f<3-1^FA^o1wWrb0!zUinRnDb}iE>#GLmT30@K;~Fr zDR}lO7Nlz!1ujTGn~cZYphCtDR%opRb`Cjd-@M2G4z#>I#ks<6!DnZ$Z15H3W{&@6NO8T|UuC1ErOs!~;Ha zTdF^xhR^+Kw-7qp=qeJ|EF+9&^mw0~3Q=5T)|)%oo5t1jGd%#px8+e;a9>3cK`q!y zE`RJ6k56d@Qszf9W8;eYvm37IY7f|8#4V=M4lp^9EuJz3*tO89_RLGJR2$C>iVP!c z4DLE)eWbFJa^{=0{f`YimdGCvJYg`)u{0l&>BjwzP#1_KYZpVm3;ZWJ-x-3(9^VZ&qzj z^?5^djg5Uk(|IgGA>Jlf5h>Z2or?~~%iSy_&9>zT3j~{4i$G~_ zY3f_%EFMepZpw$5QkPAKc^wnc%wPQJ-8`j~d|oHxN)|b|q~!ADbRQ=f;BESNkQhh& z#rLywaEhO+DwSY^taL;HMn4Gia26!9gp=R8gcw@UFC5C%V6aPVBjR>q_^y?(VA_?Z zkai}vm0o5vvi#rqlVISoN|D~Yp9(A1MFzKKq#ox*WXw*v`+1j22Ck77$~6Pc!0GuT z>j>^V_>tekqIi6JCO)4Eqyc-@^Ov^Iz%uSZF(Qm={nlE89&)&z_P%B_Co?kvg~yJ; zwK4@wh1YL!{as#(*o6k}W(b_-w!h#m;s+)mLB@<)zry8xrwr()hl`ymU3p#&nBi3T zkBu}Ma7B<987~qNb7^;AkcT62KT>)|tUjNgD1X8`66*WlzExxyD1r6D6DaZFIAVi4 zV{U`D8|-C7f^zm1vDeVCbQ+W@0x@@uN)jwK;AVVdWlQpN{#xprF-r16NhnxKnqQ92 z7d;HA3AcB+-R$}}fCtU8cr%1)`4k1?eVGN}O1^)~6NB~%n^J9H2hWH1FYP0x>th_r zB+NVxymNPk9^vJvAFd75F{Pw$ux(A-a#){yleapxB#Q3Gh11Y{1f#*qs}II)VF!B< zDNrlb#`x_GzUsczL0EK{hcVM!ZtL~9y65EZyS|hgqe5z5i8E23`GSlH^|G)|z$|9H zU!oUyf~Miray%$v%zz*HL3Lh$@0 zu=usF+GfDQ~;CO}o zu*5G5rPM{^{6RBr`?7##YzcLWK^L&#FS7tewR-A=5=+K~_t?${=4OO16?l0%t2&^Y z#JYY)Zw5xk!1C#1_3;D?^Iq`Gup?rD;2KeplgQNENy$x)Cp9^GR%Z^Nliuka?k>@U z2=!BD=uKbrI%2D{syR^}YV7o(>N1?0TgAtiLy$71+~wPbM>*MKHkE31%y2Jz+Vubr zn(JS5;o`cv?w>mL^_M%O_TZ621ZPjghJCvg<+k&o90HcgDrC)Sa1Td@4CtAE?jk$I z-^O++_gpIfNmH&1X}@ONedBtEXl+MwG=~yR7Dp)S<4DfJ_B`Doo&BJErw2+Xk79vt z{YhGk7xQXU=y1qjo~1e!cT=p9Shw1nkQDvQ%tQz6F4AW;GQIg-bhYAvU1eB=xGSpN z^Sn7vB2MjA)jw&7^X-JTcqjKoJ3=WmKcSmPZ(x5 zQmKIg8KT|A0OM_Fpr9QPW!9p^6gLM;jJ6;o)?X!X1UloaDKRi}FzEYq2Wcl~XmTic zs!@PSSYPoZ9MX?0T}{l0@7!L>#jS#~5Nm2ru($KNerjB$|mW*X!tR=tn>HiQCqhuGK_Rf>^ce) z%{u~V4=;cd24}G5w^o?{b6CjLdGqE6=$@Dabt~HcBZ7)wVP_-KVGo41kS}j)fW3Kj zybx;=^a$N54!-i^!ziMD%PvDU1qhEPPLb@H?DA}Cu@g@$ovHqO?>1Y%LI*?wXE&#c z{Q2Mlq@p)RgALpR5Nv&0oBTVh&h@qLxbI4Eu(S$=X65x$T|7veb@ic}bK~fR0KmdG z|Iqll2e4LByN*zG7oQoi!}B4)$x;)wJIPA|PnZ2r-}j+Od6~bG+B~DZ0oYVRz6Nvu zbJ&?pkvyYf2>tWien^BSsD$q3+248TFg8nWa(#J8bxMd-v1Kh_vy;g{v{G87%vX6U z4ZR(un@OE&L%B+wty$=)%y;~lX$ZiECQzn-vQ2E=i;i(4N4m!QLTa0)`lUE)bTV<2 z@HE%xiaG_5d0jef<;R4Tj#b;b@6d(jC0H!{c_~qmRRS(!a6e-2lzyf;VJ;AzL z>H!TQdCqqVWP-Vz$J*CNZ2hP?W@Oc7E7!)_!x#&2@Z}TIX7leFvU_18uAQ8#n1?J1 z@MvgI@poIm=3yCDLcPh;PKgAowwai}LRE)tP6vGea=fH;|70fF_#*LfonzJ!XqWNsBg+ooFvH#)_B?6nJkLu*-~mn-b+up{kr? z9m;*AVUPVzz|Ms&A~%fT=ij8Vt1jki(w?bt4l@q1gIZjrjDH|DC^`s-XTmp zQ57kempSgo$L2j_0l&rCaSYWpDGNWop^cwirWuLyzTrabfXrik|y z@*yQRxm4T-(PNj~Y0sZR1!HT$T!LSfVKQ1dt+b#fFeLVNu5&|wCJObXA#laITY$Eu z8XTswXF*38n;snMZjgGFf5FuU(3w4jE>uk!_I7Y~L%=XPqX0km!17|Yig#QJj$!fI zeKyn*FLep~%y)HaSE}HR1g-pBD76(2%-)RDYe_B#&P_Y9eYR?_13DiTUV12G3RGdE zs)mp`n8Nbe^ygoR;R|J^(?v%+>__pfP`4r@k( zqHu4~6b}IEmJq3A;NCs_xL;iy@S6^UET_smSDZsgT3wh$<)ZF^6LxqvQ`y$#c*OLi6H8?tS|?6$=ycVA3P&1BBKJ>% zU5iPAwnF{yhK(8Eh=yHkHjkxfS_8J1+RaK3dSC@08x&ZSRzw-BsbH|C=uO2Sn*H)x z&imd(N_wj;E@~2YR8a~?%KZ9)g(<)I7ply9MZa*Sk*c>FO zJpS!A3>#<(K#5X-^~GH8M?~Nvwy>vID-_giaz&tMYpI`Ae=}eTb)( z=JoVybI~=MGX~D4;$hW=M1Zr%$Alvt{~vXPof+UAZruvxBmP(=g^a1~iNI-fab)QO z_Sn+y^OBrC0mZ~OL&*mnCvG+$RVkNP!1gu)X_83~K0Ykl;>^ymB?k*$-BoqsL>Z$h zWEE0$XAl_uWH-*DmDre&fX)tqnt!LmF?*zJqgwVne=-%s=&$l;GlI@?>-&AH_lgrq zOOf=DV#6)+V6-CVj4AM)WX3Nea2-dWDn}U8zvE z=jiu?DPU5r?_4*Kh&Y6rbE=gLj)M$3xXA$2RrwMPTboXrZbQQ$R~9Gf?Fat*r$P0{ z9H0@>aaL!gqV5Ge3`xro?kqoDoSGxWQ>va8-u_thAEXDcs|xU!Bk3jcoTc&LQ~bhY zKP)B*uUJfdu`Abk`!e=yk!cP{ZXpU`Y%*Dm1(?x(I8N#%_F6lw)4uAoOA77{Vj4%C zD@UL8CmBEE*HKx6ROyZ37e3p54sHpt)XB?%M`AgGN10hq;rR2*uGiAPLYTxaSHLX% zo|jB{6n!izo|wkg-)eOsh!bt7b$An=vL@)m_`mXWcRTy_jzXjeAGU|v*u zt*Y3#LMGPv8J#%GebaEr8kK9G9pI}d7cLjWRabR38IZ;fmdpGyDOVDGwD_&HiVvl?h>iwaf4IH)nK zd(zeyWgTFN$9@RE{MTN}`$L^7!g($F4*B8)ruu*bj!JRA6+``rSG)+s;Y$ zV0`^^*Fols{D93k=#$y^8zP?(q>|N0a4L>y!~En<(Q9isFMpb#rXlMp3Zy7(=qVu3 zzJ>_9d`E3KXKuy>`@rniOu?qgn-9p|5YSBZw@q%h&jHCTrB&!hD_`N-Si>^`$IE%a z0@tQ>+*|n^C~#{lmNgP7_iQfqnHAtU$2qajI5$_a`@d1F3-_!UJg-#cr}tBPrgMEX zc~i$8HVB!3${21kqLFS*72DB|HH1V7>T*lzWfqZPHMP@l$Yxpd^z5@xf}ru{Gr>h} zn>@X;$F6m^kG7L#CnHAH)D@wHwv^EDtm@({mYZ3UF5dOp-I549bD@ASi`gG8L9|tl z(WzAFiEw9-3>!i&^l4;vgkc;_&{I8}mHFVc`sGkOXqYt>hTi^M4&r>nZ3|>s&a+tI z;n=Xh!xRmr*|6yLQ^`zeQ3Kkjq+aBG(~+-+y!gTfD_9@~q8=0R+8m&iU*a_G1lqh7 zPi=>7qLI?X+|P3aPaw@eigOa~6)$mExF&MkUvBvP=;fyAi4cBl&Z`~^#`;h(`Het@ zqPy_Q+X^Ft$y+~iMC$4wYiq|ZY|}b2j{rXo!2qV~$1Gp-+#(Qr@?0P$x85SNlnjl! zU(ypIsFT^=P?j4;!0cA7%XLd`qE zOJwXbd2oU?@u7%xkf|4I&@7^|qH01D+k&_OHRvzrfguysh5S<_Pry zbEYC3IB)?RE6!{xG}N4EyP18%?lUq<;l*&(CpeiZnLnP-0{sm6vM;n+*Z}HrR@7M) zATF{Sm0;l9nNdXaq{jN?Z!S(on+`x)8~T%`b4B9f@kGQc)MX0LvQ39cP;m6ZoMp?^ zwO+tktvu&hCOD0fJah${!8_A@cm@E|Ir)+vUw{|`ISubIb^8r<8$_evCV5yUUi~(B z8VBzjbd$68N?S%-rf`ZK>3SxDiAwm|LvDD>DpGhg7ti?);uRRU@+;weI6iw|8$PB^|>uNY+w%=zt7luJX}T$m<-G zcWc-JVd8%~tQy&uYBWa-iYntfOzBg%^$1rZEphWhUCrJ8_z>|gg=GbVI}$5BtkbxV zBn4>^TWAi_tI~O9EAO6~$PobR6Gx`j6ES7B26gbFK`D)#&-ay0@~rK>Kyl%n@V*UP zMw$ciLMruCq4O*TF8{u{*<>5g7CL4gqCZRRm=8@XVzOMrL+#fknCTzc+H(WVh_34| zQ4hlNR-p*dWTQ=UUu-7qA*F?jbIkn9YroZ*)n!Hsmy(fqgR*VuX@~X(#&?8VU56vf zj=i$54h_Pc&h^*LKS024_s;QjDP0AU0rkr)Jb;z)GtJH36QJG8eA@vC9hlQL13UGE zUzpv2GO!$8PVleFVNUYC)+T(LzX>B&$g`G!laK%@UR)I0mft3gE$Gw@lt;`21s zl!a3nWR2Furn}S&EHio6Wryb&m>g2{)z>|QoiG0qJN}HjJ_m5Pr;DtX zb2B-_+x|fU?rAO4{>5rM{)S z$Cq4(x>6&tS1jzWIumXE47`%MqL!;)eP&y}sWYpm*&KLQr1o7we%CF$-;4(W6HgHm zSM5ief8v?C=Ylh%x}I%Pr_;Ue%4^csX&kkXC!z*(d%6gM_7@tyz;x`^PZJbMeX)1m zH2EET0`tU)=WkD8Um-yxE0JDXubnD3#D=YmJ388D^x_3+zHDZ}8_Car1+@ehbfztq z%0&e^)e|475}D0N%&H<6Cv_ZZ&y$)Jasl#Ab=r5MmE+A3Iho~=(Q3)|Yp%%{r_L7L zi#KX6mc2MYZz$;xU86+)8q*F5y))Ie8f}*U_-Vr!B-R6il567fULWmg1Wbym};L9OH zG$i0%mGO4>4{p0XL(eA*~DT(DC5xeQ|nO*g>ZJDQ*Cgpzl($6?iXcmf>C#5 z*69`|SM{889X<3lTDyV~kNrK*OS_l#Pc?6nY1nXvQM3^>U0~C8C_C*d$^<7FNx*qY z*Eye{Q#&D6KkR&yCO;`^Kb|m;G0jVqds1S9`yQcT7nMUEG4Emz1TLQ&RdU3wF~DgV%tj zplc#~E+-Wf+xJ}WJ)udJC%|?4XL>w?0iJIUE1;y9fZ9?#o@!@!duhO~)qS7F5!Be? zhs|B`1q^?#IZ4=D8zYK8J!7x+ZM?H2{h;6le%=qB!c_$)G5vA|Dj1P5ag#6(pXSFi zc(_SuUOH8KeE;;+DBU!o@|#kGM6ub4GyLmL<+uh@un>(2lQxa(fgth%LCnnrjbz^* zh;H>YC0H0DC~JRZ7Y&=)Q-=A#>fMjGw;PMJSR~vZO%xD8@Yjhr&bitg)+yhiHmnnP zJ3Py(s65Vxx4MOFDw39t=ex31i3=6UZNJYX&9kfbw)T1rRY?6fI6|o~ZBP_%lzim; z-wH;^Yj=ty5!II=>=8Ns3W&YSII(cBa>3I7Ug#x<5Py(A+r>881F=A}Rawxl7|nKa z%D7f7r)|=x3{x8hG1T^8tkIG0;Ji(0zo`ffM3O?fV(6;ofv3Jh{a_Cp>3fS5VyrAl zUXn7P8G@%R2!>P**4%pMEE~(b=h{}qA#)n^W0Z8ce*x?FMy=I6HA^91Bl!3xGYJt$ z=4TGsZ|Vrpz2vYD%`3~}oBR@=Ou6_6Dtk`L^tR;jBa@{Y2N_*ES+}*2&q*xZM9a`9 zo#WanN$`#OUfUiDRd2fSd^(C<2$?Bb^PM-J?XTSKpS@ezP2;eCeoNd;fYUSg3QIC= zJ!G^w)Z*gPp0SEy7Oy@f!?b@MuOV@!mYLhg5jm6Nsc^$EH)5iT2FoFhK}?H;w{XN` zWLaXAFY^pE{{vCw(*rS%JDBjs_7IJtpkv5`x5W_4Cl%q~bYp5l^^VUt~ulG^{*agmPSVaQW1q&i)no00qFt&!oURWABlL%KaY|L&ufo#(&n0Ae`q-h zm@h(4jfGQa7ynuD5I8we5fvN{GAtwhSHrl7fI2tu+o4aZ8VOHoD5jN4ei&} zPYz$_RoXhCgl@*W89-KIUcZaq1+Q7@YgfRVBRbY$!=hDK?XKTQp)Zj1psSd~QXG8Q zZelV{Sy=>q=djG$ z%nuZFhEuA(f${B1+$Ca9P9~}2a>poM#W@6iu)m-CypE2`!vRLxViK%L05RRT+D4z# z4wU_EvANz(cY?X2#xwYzr&$TLYBD}4g9Gv;;)0j1qdsc+vhU7wgiju>wrJA3 z0dlbL*s`;c`w5CAf8Hpo$Pc$^nDR0GCU_+zw~1f{(W;veZk+ z=h6tadV)G6FelVq*|$)`Wozw+a>#vOsgGTrhD3DJLc5Y?%-Y4@u;L%tWLMTU;OK)* zjGS$p30})&Q^0koVmdCj?d<41y=S^vL*DKT67^KZjz}jOQ7aDIRc)gtI_o_DP8?9| zV=7j*gsCT9R{iK3J6}wgBhm0VKG?4k2Ru9AY5i&0>q#*ON$)GoZ{D;Br)ZVmg2j#Pgpx_v#&xe{uKXYWRBJoI@Zy z2%tVvt4!sI;v_euD){C~TWN{*3s(&2L>knv`MpG*_$d^7kEypgH`j4#DA7Yi9gNl6 zKg&D7?nenjVdtR5kS((}9{jdUpIco2d6vjfX$akb`!v8C_oJB|$jxdu9Uod^7&N{j zVi%1Lv*luh?GfePYhF?Rz;pE(IyH;#zr99meC_+0Qj818Iz%wV`I}~vo`A-n2!X5I zv+Te*NyOct1T6$7DtlxfTh))s@N}65wK?os4qYD(?OZb(lhhxuChUaxvtjaUa!Dz@ zE%bBPflxap4Eglqhj$NrV4Ib@UI6>VGrbWQ?@8|fLRA76SV6Q#=kSt5I%5$>#U^gDqeJmBFRCUaEVF57hHE2_X}}hkZDHeh<-1-6)UBn zw1EH>{diy_+8%_0m0o5|E^IW{QQS59jnyD!6(`oq(SozX0Z-&>7BUv{qLNMPlGFOx z^<$oPqZpt==rzG;WwCAo-vauKvzVSiC)2GD+p+Mel~bkhkr$XSJ%*GWiw};yYJ6g$ zo%AK^zJ4xcz-y_adgXR$^5mmJf>akv#@1Qb&Acs4mdndC*l;*YvTq^n}DOq3wN3f zN&q6Tl||`r2C6(+N9>JhuNi#1{wqgsVjs9u=)7x+h@rzzG|nwn5BqHGBYoGw+y$SQ{-t&mWE$|^bTWEu9_S@w$WE-gr_SF1k^`Xqrw-I1c%jtT%3RL^+VfVwK~bH#$8 z_p;i6!kL4If10KU{T{+Sx$j$Q7Ec=LRu=e{a=*@QLPuGmyZNjqWYoNYyNMaXEk=?5 zsanDEAFCCN^#50_U|^%CXZcUn3I;ZICbs`?wW7_)*>tCs279YQUG9If6;Ba1MTW}% z{ZerlkpM;6a(!IsdUKoYxY_=?+I=#rX+1qRdJf<{v$jb+ZmzFbq-Qro}gXS!>;8NKBdbn`nPdzLrnAF{Lvj(+I#LvyR^ z7q&M&MlrMfzDav0z)h`RHBe^&Gbbq{GbSx3w6s2EzmC6*S-%&^cfC$FSLf!JPqpTu zzV6>9u>7j1jv7#4Y^JHb?9y64CoQ0x1N6TQ%EebySOC*fzpa^V>_1>9Hn-n3|MK4r zre(hS&w4*-JfF7cV9!Gp+od4?USb7Fhru zCZr|5q2+4-Qr6c$=)9l3_>P_pKf1p<=|9R`1yywoS*_Gpzdp6Uld@+uKQ$IQF{l2& zJYl8XfondrzxwcSc6T7}^9+s*fahxIYrnXiy`Zs~(d#$A@pn4H(?88AtS(Mq9>7j% zXldzb|Gv9F_g|}$zdiHe8yTGG-wlS!+R;~2Kei1&%-;Z+o$Q=H%da}8zqX4%^*^hw ztSqn0tic~$SNao}7TLK;`^1Z$zLRA3jg_hKkKT!Aq{o${kbI#ShIw<`%NbA8G|9q2HhGA!V#IQyJ6<1a+NyHpt@{dzmU!yp`+H zOp>kcJ~p^$=lCJisqs=$cKy>4g-=P?HoL@rJPJv1V)!BG}flBJ)tBG2{weKtzZCmPdNp#ZHvOSWprPXj_q zKl;dN4nN^2!>6T>=_3PVD`iZFpI}V#dLQ^35GP#$^K_*EIZ3d9$fr?-p5@utMCx!y z3nblo%gFklc~?Vre%sDGZI|S}5U5o7pJz%Cmf<6Tw{0MlEeZk5E)4lz3q}}vm91Nb z=E1t9PW)ogcK;B?^jAt|>$3JoL8xSiq(*HHm%CcZefS{c%QYrqgvIO&D?VFne|L4B z#>I*ll(22`fVt@}1#~9E?csY(DpkdvZ-fGhRTuFXh#9u9mFWwcu%XGr&3yxu&=9LL z;L!iEd+d;2xjUYEzN8g7h%Jn}7sslWMFU$n+n+g$z$Rv_23_nucnk#25k3jA8)>NT zkR|T`26z-((!t5tQtbM-B9C)dE}0CA#jyjv{#6?&*ltM&<`4=2_w*xvaFpzJ5~h2W zk}Q!*@T2(0O<}{bgvO%%O}QD~N_IU!_t)vjYOoW9FTcRo;`V+uXoWd67BS`7^se|E z`}#HUY_8fuYTk3Icfz?r*%PLoXe67fjN0|(7QpAzs(i$3ZqPOhes8}z>pO-v2lZ4C zxw6;SO4xR%k5R**gq1E#iG9HM#He0-MC4Y*pUlcsH+Fj0Y^RQ4gj;QFNFBGxC8lC~ zQgueRQ*`)9(f(Mt1Xwz;ZB-ZExphPdUMu&$x@2bxln&-`Mc=GUFkPw#O@Y3mNh|2 zI>Xy74N7Xupl5zwJp-*=+JoMBYZ}SWzcIz6hW=ob{*iq$3J0ga5r6x+h~`uxS7>-g@VMDS`Cd)h~8r7=B(H>WTZK#$W8X?OxRRSp)R-owN9c?)h zQUh~c{@?-3;>3RLD510<1#qLpMyK!5;B~Ma{1WU(Z)@SYw#fKaGuyA4w11I)5(?F( z;JX$Km=#l+>)&2v(>(A5IWb9glu&QD)Y-dSU*YC8;AJeoH<8lob<)dV&r@qEGtLeL z^Cf43yFeRjrAh-5D4cB5<j=9ZXFWyG}zmwM#B3{q~J7nNQJ?2Zi@3GP|vS2z7sV;{uP5Cn4F+G`GQ~ z`FDvk#Z7rx*M>sVjsoMhU#rE)`Jl_lJvSDM#_^*xkC3nzpYw&9eAke_m5HpFT{p zmGhK)<3bKdK`Dnl662alm}+YbSGqJkVzl2-H(TKATLZ*aI5b&W?Aqkz0eZ<7M0a_Z zl8&=A@4(U2SUr~eS#}$wgSzG8@kgDUmx-~pKqv3f9!G9ir^?qTM)lQr5&h$p)v2gh z!_Z1tZMdw4{ov)FrOTM=v-O^0*72fT%ryyoE5hkRD`tobF688Gp_{q7CI6R-ta{Qz#<#yOZsWnq0TFOzeV#IUE2^q8NaA`0<;9z?A# z*`Q1c6LiUW|J>nK`g>L5!R@vbcZ8V_A+)fme0CB>&{X=pjeQEIm(idXhRTYDXdgP5 zd3Q1u(y({JPv%Zk6nOzlUpqwxUD45Rkf>Z9EjrAkELC)2G%$bmRCExtPSEy<4A;`9 z#q=Q=Y30H)5J?Pb*Ntz2JTOt8usjThUEu17A2!<_P-MbC)2nVvMky9UUV6yN-17%a z%D|vuB)w%Cv=wI)JMw*p8@Z#)K0t{$V#Q*%BQ0NJ#9L*&YxX7Fp{`Q@Jgw(OizX!w zxO^`w-`gqAun|MO!}L~%+%S!N{xL>p??+ED^~-W=*VlI-N}9Vn0)Wf$qU<2ggN|Y8 z6?d60Uq2({KNzJdOAMH?FU1}P0El3{eykRcN#%?zRQA$J)K%NmBz(`t*wDy9eR$-> zO8fLGMOoxE#GL*%r<2vkp70sHC|>o%lI~t`A%@tC(}$8S0e$k<=ZwdtW9pg&jz&Y^ z2w%rGwN?}*Ql!FAW)eNy_S$xL{?;o59&y9dJt5`^+%gtFn_~s8ur47ICw})d6BPq8 z(UaNx<~$^M^s(g9x^02SaZu=JI9H;Gss+V?4u_A;`a{*G2}JxV5b%czV)}&~_%@7C z&#+I$o@=R0UZz>)E?8rQebDZ7nF-s^t^58!%r=30sygEtYGfT(U~%|zON1)eA4t$s zWeiUD#2+}{CyjlVlXx$BtMyhZpF$xdbwJ1xz<4Jax+b0tozUII@`ns8MFCB+58HM`Z(j%-BJPqX)3WFp zk`OsCx!*nD%o2}&a9!hayNMAaqCw8F>-zl-o6}hO5(|_8j!9Z2W7J@EaUN+jA2Ds568SINK_63u4)uNYK>*jUtu!O~!(%y5+kP}<8EZP%ETg0X5EE5_qvxDEuc{I+SaX`0!;^u>S3#I1o%P{%&N(~$JKtVWIuvL*!vSZq zBE#dLOw3}!i(Xif4PJ52CYfi+=v4$sMaK52ytg~A!M01Jf=1=edRllk-&Yq=MUV{7 z`zYYh5JHJhq2L?U6Vi8=+P_SYHdi;O>W13A7K_Uw&cX~JcF$k%x__{5Uml@C3qNwJ z57kPTv4$0P896nUHkg%M=3^XB+vo^qZ}T||XjZtkd6ewCm<7`AFXLP06@Jqym*;#{*`~u z&4Wjx_sxRIaBhkxIDjmIaY)l+%sGHuZB1?cVZEsUOHj*1Zvr~8!#1Au-Iv{S>rg)q zJE&NXfK?5;e%ny_p1pNg-Cj^fZ&q75nCJ49PivtG*lpu0yV2`3c&Kjg=43>9`&wgG zWj4#XMB(a|YG^>?{dh5j>hd(PA*`mr*qOtJEU&Fu4~+Ij2gNx0R5 zG_S#QUJ45)^%{C5+VOH~#fqDj@O*s$Y)Ugiyys_(PiqJinzpoelz%vq!iE3lkx-9F zNaxoja!;p-1&*%j&$65L3_vvDKnoEHy)Gz!;wBQQVoojnJS|Ho(cgg-iJL!rs?9eD z@;CbD$VQPHXggaN?j{6t5wQX(6JRbrI4j-yR&9iMR!R4C?KTo_hE=aP#n!SY0Xy4EmgF*1Ep%|&oVH27)KQjHL@zNsW9(vVI?Ed)W>fA{}ni;n)0Qb%DqM$?4 z0W9U<2Qtvhdn_a^rKq8`_u~uBhD5Sjqe<#^0Lw2VRj9Z4FWZw$(bN_;_v5N$_)tQ4 z8EvT*(IK0Qo4y@n%5NjnrE13j%wwRAvkVSSc-ehGWVF=TPp}`_;%-W@nZLxa)tNXW&d2$(wB`+z=_Os zDkC4<=s@o}oI;*i}TWjHaT$Rz8sN8uV6gEDgl zCFgt>=Wl_#pgnU|WYwvwpT^+G8ks$tq0Y1Xx}C+Jm+=@Se3z7@`3$qImT8G!cy6>R)@us(uO zLM;ubCs;%fG+%Sy3GUlu0UuNq$Yn6W@|n~UypvlF(zwtiV_SN)#V8q6W|tO^%6b7C zvOpy`cs>=4Db3vVC9+opL_eyFYuA(ge_jvlf4K#jJ;9J|XIL)~9br~(BL2@P`$e8a zUYt+XN5=ub)BUB#ySjXRd{Yt2AN>?s6e+F%D4>sCtfu?tojx3`(#kieqHV#wcMyh8 zJk_xz;RZ;;l}10GID<3Bv?s^!`O02;Aas)glVoE-=&X+GE#223K;Z2{y*T27!8{@tS|E!I%&;-+# z?WjAT+GM5!gNzdyj3|XL;}o?OsC98=IE}pQH$G1=H#j%|W`jKEG&8B>9B#dnK5lc* zr%xc0MSaCl&}%MrfJbCQ3gW{CAK+gF>?g{_dT%$`5f35seRc9ZspmmcHdO`I_(RC# z>dRX|AFP2db7@T!>1yr4dOf*YcKP@4{o>T1ct~QSao&+e^_jBLgCa6sYg(COq}`g= zQX>$6T1qY_2EwZ-dl#jQ1Z*2aeR?`xQ5B=#hvRAa1TIiV0e7Rt2p;1vNI;tuwy;bQ zG0yTGUFXlZpqpfSJQZ4zsSB8L2j{D>Mg47~MVk?hg*<^KLP6@V#Ne|i)W$l6#>+a9 zw*2eWBR0u17*^!WRiKi?(x(P~#BEgC?`ojYyLOgNp6?ay*Mu_6YaXg0QYS=^HsF|e zU*?RK@Y!MR=kVc6WQ2punD%Ueh96P=+EPQ3^%kN(8Y0HX)rBs+&r{9IA(dzTDTNM} z@3_dk>UqR*Xn#-VVE#e3Rt$a&1e0BZ%hies?H@xZF*y$6VbnnakKt8bhbMC#@dNfw z|A^L6HHi^&k4}J3=-@B)F-{tSAKq>_x98rOCjj_lv*xX7wHRllvEJN@)O3nV5R~^v z{CSMfsa}uBZ;fcp=zB=PVIEcjG1B>e%D_%t3^b=JYR>`DBR|mGlb47BEmq8B;3Okp zP+1&6LlL8**f!9LqMh+epLuh{sRR{D=%CjuC=m%`!Ev)OxbpGNh>&s zq!OLVpTFxI+R%cv>}mM7MoI#P5e`X0ZN$t}&GXYmvS;ebANP~Hjj&Gv?O$JhJ-yJ` zV(D#Y6d_vuY|G-p4g{tljaR<*7~7@F7Gl|7F3}scJmhm2qF`j-1EpinSS3KART04*) zQ(JfodJ6s~#?U4-%1dglA;xVBCA8rs{ZB?+5t04|0^auaH2sl=T9$NP<+2LWD9%^R zFzkZRSZ0yz49x9AyYu=)*13TKhI3+T#z9FGuvytr6x}0>wZk#Jzr@~Wu*ZQ^**q?q zH6hd}p~Tw~?Oy~O1v+pQ)4$Acr^kzp%Df3Kn4Tk8dro@-)F?9})u7X$+t#ut638nH zg>AStXeFzcI7mwRUf_+>-F*7Q{BJM2H|7nf2_$ovGvp%fpr=~J_Pu{re)!C86lG`u z<|O;N3djZl|f06vH z6T?jnomri>5Dlx=Ylg1W>v+*wh`|;Oa7<6Mqe_T(%H*Auk@$Iarcqh_$zt@_6M=2i zX668&Gw#!^=ymZcVP(`AdhEQW1#T$G@{}T*AyXWkW+7UzpD`YfYVxPsxEH5xMgzz>_fUnz|Jcb8uOy*f)9~|u?EBH-fz-maFMMlH=8l@k;;pN%lglk(FN}5kk zA+cJ))kGds?n{NKVhq5JI8i&mfP6H`{8XR|CEss$OeQv{8#9xh+Vd*OshO1E)&?5%#&SZKoE%{m26~V;KOB3=NSKJ9dY*|8%wS!;1pB~40w-(}gEUB@ za8;=9l9*gB8GFA1A~gxJsV*e79@RR-GI$?P5_}x7L=H<`U5n(rcTUx>g~ll)sGUwC z(E^Me=F}S{yBo6`2+=1_Vna$a==~E=)UP6)qXZdsXeh^K_4Z};MKzFCghqg@Z`4ZD z&?M$^Rv6iT=F5vJC-6C$$(|F{yVrSw@b;2G>7fHH5jB)kC@23hHE>IRIRe>1>EsoN zgYHBJeI1yZ)RKQkVa4DYXH1yKoy=gq>DqIdpS05m0>91k^J5jiy`lDLs}c<)n@ z(|DbtRg>zzOiABMo`Ky5G>B$S1}^~~u19hw*2CbYFtfH0rUxUnO%_LT7{wV`={#4EArLXe%5bCNdnfGCR49Z(%zQVlo$cqVJZaR`<3|FE1iUV4Fxxt*ZL20A75F{KRE~~>O3;oH8 zo!_B0PQnj*F_Aax8m21PliIV?Bz2D7lOF!wYS02v1xp>(j1DDqPENGCU&JQ=!qL5z z!q0`DTfk)tD%gtK0wPr7>$PnK?c)umOfm>fq3L?Rh+n@C^(~K=e<6VapGr%d1{;$$ z6Z?+r-lD9NhK{BdWKWKZi{)gm0HJWKAT3GX`eSOkhlPFYAWX3}DRk>@5sMPDdd4*s z2IGMGEP%5>oh?W!M}S>TtXLq%b*hD^s`h4dt^s7xeAGe=#+MD#spCQ5h5wh{l18&c z*H$QU^A14RtA=`Kzm#_9@l=^tOv20g56;r92+$hYM35IwU4rg@MXlWOIT0LNUzf&n z4TBO^(Qs^t@F&+h%1_|9i{2Z&XSqj5oe#tFABX_BzPZjGX1)|=Z|5jd1kwZcQKzW> z;|7d7byLe-St{IwSopB@C)K{guoQMM=3N!; zXjUKd60rE!X`a1)t_O7}NIA1tJ)_5UZW zJyBG#J-UR1H<-fs(W}%^+9C-s;>_99YcZz3tCp9Iw&6d!9NMbFay6+;=i*?xk z6#)7Xu%m$@wiu1-k6Abg+EcHW4a>?(WuRNG72sXdh~K9?>-^Agi$m+j0CFJ{i#g$Y ztdBjSWYj`7-;oZopxWN^yo_JcIGHv%x(m|qxb2DMe2|#LFTPHpao^~jHP_OD(kKo4 zvEp?|HUrK8e$970*<0o=rk$Nn2qU(ov2`})Zo1gzk@?z61n&gM%U-vd6nC1(Nc zH)SiySTQ*lpoK=G1q z*lVx?v_Mqq{yN|8HXk|M<8i#%`)Z}^*7&U~JuG%qt$M7C;iwq9M{Pf}$fA6oE(mMTc5kChV!8YJ8Y2{K^#fN6PL9Q1^ZKL!c}6 z6%TK1Ov;GCCnJ(1FcD9=&Y7#iF){fUrM0UcfR2J@ZBY_Vg(ZV%cU!Yx2O5wLqpGx- z-wedOozfQ~m#mBndkyQQ#i8qnJRlu=+m<{Z+^Yv}HY$LC4RVIo;K%o-D*GPus7vd` zC@D+7tj)Hw1QHQ`pQz%M5>6WdM$zS#s1xmm6MD3xUtE*Y${BB|2kh(CqG9Kyg#>mP z#2iKbMFmx3G^xsmvDT1g*R0Jcx)Hp;LlgQkI6?_l5HK*O-AyKSyBC8=Lf6ymUSKwa zWg}@~0Xt=NnyQ!!Wf6PpmuXM~jud(Ng~?)^w15TkkirYD^c7P;z&f2l?r2)65MXl0 zT305)7*ArV1qDFF?}KnpiLqv}dFW}#s!>4M}k$4so32v@HEvEa-H@LClgoiItr%YB z6D|=@u4qF_AJ-e0sB0EeYxeeR5T4#o2Nd?LIr{v)uiz)D)VMwDfz%SR=+$UjE$H>N z|K#yPSJ_%RamH}jyA+PLOxw9>sk0k~V|BP*N|N2jEPOHoTY{??ISK;Tsu}~#Zkm!y ztiw-8ej|_I_=CysX|}DQT#!+x*8(ivjX;a{u)jxv`tMip<<7Cf; z2Y2jJ{jmIiGKt^5RgZdY+oIn9+J*GniBImaeyiH#`(iE;v#P=(f=nm8%VGKp>QVJ5gve?W2jCg#AL~SJ~yLwo-&!!5?EnW%gDZ zMJ^sC$urb#%5?=`WggI>E*>OJIA=P71C+NTx?RJp5E26AaSF94!RiV3e@=_V5&&KG zQ7pANet9WxQt+S&`D!95jA6Pqq@IA*U;Y`kPaa8LOtrRB)DqMfpxm>tdzQ4*iL&i4 zX%InN@>YFQ>^w8_Y(`1ae_CFG5^;)5!qk{I{cN^+_b%M*WpIq8T>W9zK)c%8zQK1@ z@rIFIiX^A`#~uEKcCcto}RfRD&Q0__FC#@Wxfj zn>4-QRkCb};pj6K`&gzLFh&`h6yH*b^T=lJi$XE$bz zArA5H_c(&pJ3F9>u<9HP2_f2e1u6D>sLov`E*p(%yI+B4|7cp57wo($q_ zW>2PDm@P!ubeh9d##eB287~F3x`)TK6ueX+Xs1O<&nay0nDo|jtZ`M_XPsu9SYlsR zEGU>d)<>T9G6Elx(s%u~Vfsh%@$9aRRVF^}YTSsNw=1e-3U3-f`;?D@k*dOI!3q9m z>5iL;FVvlbj7ow3xI6a{G17UK3~a=H<-T;PC}~?(%??)4j8#@>e!cGDkj5lyj}OHt zlJ>Y0@@#DdUcM=3pO7pD2c!_=w-#|_&UlsE>^?g~a1=!Z`c@$_+Y@kzsA+`QRF_YR z^fGmtJ@G$witVygqixvaHwm?_9Xs!oe?nA@#x$}-eUd)aSF%$?ME7$U=={oYe|7$N z)!NU~T*CDZo8TL>YGk8}uY z4-&QPOi@wQC5{^1x9%0_}R)IU-GUcj0u;)4kU1P>Qxg^Zz zWdP|1 zD;C;I!D?NE@(KrSysjmNsH5fX>|cpD-(cp%F4|#24L!vx3OQtHasrf!klV_ffqrea zg66%Piv$hSm3)5mP58jL3Iz@aK zp_5K}tMRhdymeWAEc`$<9gJ^yPKdj0tjpHB!vzD`^<$Iw_WhumwF!9kidQutJiqfiEV@getrjfa{cH0O zz15I9BikE^XGG7HU5K9Q_E?_bP>44%#>m7Usw3NFQAit2dQ2i}?Mc$|`AuYl8fOaA zw|MZ-1p?j+wvyr)O`wa|F6<74MS4qB!0Q5WN3OtqF4$&l zvGB5l_ap!UrdDo=&rUkg3K&@IEV*9cRQJZ?{d%EZ)6C#~S=bJbtIqSyk6W$bTMdJg zR7>rvHSqhU-5uE0)i{t<8Hv|@f1#*9PSOpY*jkv|O;d&4xC<$YjbT*)uj*jM)~At% zw{#G|+0gGs=XaIcN5J%X8@}@MC;l3O!19?6t6IDu963hRsJ4T+xhX3aGSD9;YSN*q z`UeAxC)PqI5^YCpH;S;gnspQp{1W$zRC|vQ4kQXQGWpsYx%CDviUaOe)qgMd}Drh0hcGxsCVD4J|ztJy=s)(#L=05(y)( zaDj4Gd27~pT#)y(OP>v~hJ{Fd9LG$5C46Q(8WoryLuXB4AM~~}k)wgJ%r~?gz+-D3 zKePH`;-S%2naJIBt9rplBac1eKMwXL1C_OBl(0qc)C{CPea3pwtRYJe_F1ByVgVgH zKw`EF2krc=_GjYlfy9NgUonk@&dRu_d-4G+4o(g2!oJ>yv(>KJVn*MGhj1Bqq-PmK z+T8P{YaLck*z592Jmmc9U57KJ7C+JN-J4|Y8h8m0_y%)rG8@c3?iu-fE}x(V6XrN> zu9x<8v!|&s?0vTqV5xEvgVXi^Q6ro#iI995W8$Ww$EKL*-aUt1zL-;~{np)qzH-vI z#i|mySSY6-FjQZ)4%X`e^j6t<@E#a=Gr=)=AxO*?5jq92%Ft- zxYj4nmgRs($zDTW*bJjUrH&ftQL7YpVJlo&8TXU?&YnE^Hk&*_s=IWaXT^WZXMhv} z32k8BceJ?R&`;uKhUs^Zp`|I^O9sskug{F#T{BE=wkZpyr_|>eoxU@CFKWXb!%<-# zNDrDi{L{xcHSIE;Sd^UmRuY{x0-^}-!apl+aXs4u$K|d8VPy0kC7N*R4y+5 zrYiyOppw6=GO@~91CV}-!`^0Zmt!uffVf{$g#b0mrX9Be0{k`bZyrniOI)LGsfIDZ z^!30=%#hkeUOl&l8ZDt=*fNFd6UgX;%yaEf;TF(DW0w=wJE8LQU_H+oMpTLbGd+!) z^lGY3Dsa8a0HWx9T&=0#z9Z+X-rR+sn%`h&D5~f#9v#L&xt4>dnsfkwmEbdpvrlq& zQ!T;;@!cAM-e9+x;G=sDmIB_q;lvieSaRbkN@;Cix*>6}bUKpN)*#xh&0R4Mi$v91 z^vWSl5E|s%Zk=LZ)^{lUA7c_?TQc=}LZ?2GItG0<0pHpq0_K$5D`gLdMv~Y!v4Szx zBn((JVm|*6LU(ejHAYNpqm3nPrj~!k7x(kA@^-;Vlp(`L4T?Gke<$5btz(_QG_7e; z)gc1f6e{yfE~gOVzj5B-aFr0T+_s-2+F|zaUeolX%lAt)%$P#{s*b;Jp2&$+lBMET zL1#F2CaEHIJTJv638;5Emw(=7ph@l6@nUawMqtP%sc-=fVN;Un$X7D?WjsBeQX^j4 zu7f8{wLl%!r)6`PG`>}>DQ6oNh6Kc*;B2MrZfhNPO=S>P`1Ob?_67G!$5)506XtPZ ziJVR7cw08$!qg5j4-(5x;!$W4y5*124d%%=XA=gQK4rUva4%}vjY^qMs_>CKA6HUk zDD)n~4<-8r21V?CwSa@R9EPkUp)WMg-|S{Tra|w`(67UBCc2jMTy(ZeiU9F)^H_w7 zl?foM?>r*-ISI4sML5t@q|Jl8!EDg1!cP=~;Erno?sd{i(Rivqt$EZaVCwlSHPGuBxQkkb%;4^>d(-FpP7lhm_{C6E^}{ zur6TreFiuCp(x+r8$UgBe4NKIC;V|{UIoxMp%cT4U&IU6jmtWM?Wl$pwj5oYvh7_t zp(vthK;1Nd2x*ffFXO$$T{b;+8!Uvbxl58D;F!E0aY9&&RD);SB=?E-}?h ze*&+rTCm%~oi)DBT5hGBDrT^yKWV+GaAwQDAC47C7i<4m0$V80<0{Kx#|RnKPgaP@Guru>Rnr?M7Dr<$waKiFVYffGG+5^4rQo|H;uF-W&py!P6OOt=86m zTe^BWitwtkm3c`%cY=w|?ulYS=r(-v=3?`+`nZgFspR`ANDY0@2Tf~jaPg^@RJyWY zfd29Mf~hzdYmVn+z&IC59^Ac*9~GE($JD7nz@fCN+UWmPakH#$MfQ+aVE|P?s=tCY zJ}Fd#%ao8e*Aj6sHM!DgOnbqWAS19u>bYZY`6$aV62n0UU^`aBZ9}?5jBf5VOV1=+ z8YudZrgDs$%80>hxzG|u9dQi3K$x`^!&q#)&6jGo4UEoLL=e1WfU#HY$2M|&AEP$2 zA9}fg$Yqo^spsd~@QZm!AQ0XVKf;BmApcDoo9yz#IpY}%;%sJ&aZ1gh1XI*_kCkXy zv$g*hjY1Lly~S*#>+ylG8l8_RRCD+yopyLWv=TB=))hsKN%_fU@BSH&1#ka-^f)GX zn|JQHTDfgK14~z?V3p}g$Gki5rp*>4^o*FA=?FWHJ2)FnE7;Lw}sv{`P z+})oJW~+x6!d*8G%};*-5wAOSaOB~oQ4{;cA|wViKz$xYESwvJ)8Y+>UxLAT+V&ph z+WiUlBS(Ia?dd~jIXpVmC-C7UnIpIaQ$)tT)~o@!dJaUgE3U{mN}x|`OA!I#R4q`9 zqDm<>gVkP8|i`ubX$yg=Co~BrOBqQ226-gl< z0UC|x*Yw?)Ry9*`W>iR=&JjYQ0@v^CHD4MPy(P@;n+@!7?a4#)wLnvo1KWMY9T=*O zkqa&!CD1d{M*vUWCE(ZKBC?FUcHf;DH{5~>GYNrS!N0=y*xUjk;Gb|avp6!2#fvqP zYgvp*yy-BXWGzCe=X16WY|OMQ>U6!AOt=$U8(Tr(JT1iB{%n*8#iV|!%Il4UZ}q>f zqzC8RiEhH@Y>2xsx_YrQD2{T*ylkvvFBG8p()m9-q1gW_?lm+J4P2iA@-W$&+3CeE~?P#vxRR2-mah-0J}K zm|c1%pRG#`k{j9C`6KBJ1|6He1d%46i6*;6ePyw=MP(1%VmkIg-z9|>| z>2DwxX`9BFx|#iJ`;YDhu6-J?HQiGmLf+jbGNk@xLm&k=4*HW(W|c3LhqdmT4DHw+ zhSh5&6+u_PaAg#^e^%erZ2--@83`Jl#OaZF3z_xzkG4Xv{G&|nKN>3$lNZ;UOXPq{E-f8XU%Mlk z3>yEPlyUg5kqbCQtg}nk+a|b$3|`^cfayob#=@5i0)Ag(ic%3;=};Rwv&l+b6t*m` zb9Q6E!0(_zwXte=$`qe++C&8F9p(JR=syWj9nP_23af=(%yOxCA)`Dh9jNQD17cq~SUEA-=5#e|YEBa??glI1}iF?{f^MoxdBewTv zL=4|6QjGjqI_u_NQUQZMK-Xim15ACx#t(O!8Y$&>e$p$7)A9T!IX+4DRy%2V+v{9O zpr=jJ(FGbS9YHT{6GaL(YfNG6bMyC=^>Iw-pDL~Mj|mfnrwvmmIfm=KyxjXNv;Bq( z=ZgxUMR?+Pinn+>cz*;dRgI{W4*pY2=pjnl-HVDJSeSuA%nc}Z1VwUTnhWSV%1pT56AFN8br|1=fbKga zELS+b|NYwuH2lZsKp$~q{N9d5%*rryt-lNrd?}9*d{wlGgu9)7Hd*KQ_6@o=$Q~UF z$H^7;UkOtUvyhAAe^Sd!DjyXv!hOt|Z!a?pz+gJ5?uI;|)d{ry*DrI|RiRK=;>yIe z*s6aOYV{NTO551L4BTlb1J$(wDsbr)Xk9@1X1(;}Vx6lpgPgH8!=l z0I&s5`WMxdnjN|!$qXYlm#|cT5gakB@D%7Zb1KJS*APW>P?I@T$6{fcC&yMNfazma zUvw*?Ph~}yf2f;e$;NmpEXIm!l;$WL+q;{%rF~idHlnpC4*A|SM|ZyBxbJMUKT@^^ zrEIzvXdB8Ig%x?d*xZ)-QjfGwR5hqHaDozh+Mcan(l`n254@HiJ!`0NPDMBu`qZ2o znbZwKK|?6el2Ee7Kd^ZuoH=-#nnDy*skAuvx_^D{EAFD@6h1;B4RUmVu*b6udJBW| zQe$@~{uCL%=>k3}!F3(kfqAbAjZ*VR!>K;}^z^Uz^Il2oJ=SDETghOr^x@a;H?}5o2wPjlAbRi`i3;4!L zkI6P?CCN%JYjcwmy(i|DrX908@)`?q6xciU*>+Upj(rK6g_#>(29wsbDh4n?6qD8G zyBm*RKB3UBizT6tzzy)SRB&5kHctW^-;iE;^Zem*t&A@GPXB4pi}>L$WqcPwcmKtn**XOF2!)a&!^qlzB&qTKyA*T z+vkA&Zn?he0co=Z>BmPW(HxBa#5ohEVbB-P!Sc1jnN#)TwcO~(MxB1`bd zjC8o0oq#l1d@I5+Otz%$M+Oxa^gAULU8wo4^M|cOq=OLk;>;9Cr9!O6hLjlNEYQjD z{p!yTo<)4iSxbzrCAIqDJHQhszf9UTl^i5(u=yk=GLr0E*#7;L;cXTX z)%#biY3AHrvs$QN8U!3fjl-~#yoV_q0@6}%h17+eDor#!M%0;8nVG|j@^AKg1{9N#) z?@rwe_9XaBJB@1%f8W*UL$eJMb*$jM>|58#y+|ZwLZRfM++1 z{R0|k89(xyS`kC*vy=PreVD*x1&5!c8D6iH>akUfTTs!Bz%uLMy0=*Xqa+OUbyE4W^} zAISuV_mN6#J2~yyq)~Sz1B{;eVLlL#V~0_q0HM$v)FqU)z0qTj#EomS zUcE5?|6i`wEiI0V(I-E}*H2kQm>CAxNX}2MwqFGL{ARu=3zS#2-~txMtVDz|)@R0Lels&bT*8K=WynQAIWktdr@tNwlRHt*8;lz!H+7lT`c0&b!)ND=g%L6 z+-*Bsl7rFQmeJhk$13*m&ZBA!@!PXhf@Kr;Fgpr1< zi#iH`Gm>&(&h-HPXVD}pGhE_t4{Ut5jhtueNI3Vh^Ld*I&4omT%dW_G=t%-N#*P_T@+op@L^qg z#VnI>SHx@fjFdaPjab6--B>NLyO^|FeGyC&{~ZE^_?2tcmiL@_sAV)58rQwYMmhalrfnB6}z~_L+YvgJQZtT@{0~ z4X=JlD*&uD5zLB(=pM}Gbk62sgARIQi7%*xyJj*UQ3jTUq{~y_Ez2H{Z3>Gs3 zbu3l8`n7dDUcT**Fs3*CKV!%Yj?*W+&GSMJHwj+^hGi6XoGQaBVw~8lqd@NcTeaSe>OZd| zb8AS1cak7RG?Ck2CR2MjDVgLao+HesJr;(-c}E+#7ZW#`<$Y8mLOk(NrU(vBI8-o- z+IAOY0(uabXAAV9aKm(|{wrz?vmvjdDC4P-aq<~tXQGWk=x1Dv zKbN7|WT-fQ6nx+a91SnTy+S8~$A9YO3Y(jP{Bfm()IX|x`F-0LbF)t;xs5n1QME48+i#U*V0MDECE(RJIDrQYs-es#qOj$dw~GT`r*f`&y#yTFHZDJ^Q_9?_Liv zxmKvsB~RH)ykhOV?^b}2=o&mGnpfCU4eQwqysIL{5l&s?yd{i){!CmH>SL(B=$JC0 z)+{D5uKo{FMht;%s4e=QCTd^49^`iLzQVSp2wB;< z9ylI)p5T+$#MH2mCO6AxTP){R7QZ?f&)#M9@!Pv+QerAu6p^=FKk!SX-jm*x!y+i2l zyZD3sZ*_zAxp*;&ao?ys?)T_O>7=qikxgfB?3$_?K56&af4^jd%AEGzDHT|-Qm$ss zOyl&nB|ba5ITrCb_7v?fdh&xWM)rwN?bH*?Mb^my0JZ>0|DyUO6}q$I!VN>zqXKRm z80z_OBN1~Q)}b(v>lr2qb;PeRwGea)>XH3!scK zl3(br11`}4)BFZpMU-*r%tcO z!@CRZl;7f%zjX>GeiNsh-#9f%gXwpbN9r-(-lw`ZRzKE3gy%TI2~J{#S3#;`)6Lu! zomSC0!};F{{zs)rnHYaBVotjEx2HhN=rbkF)PO<{d@VUjff`r5Py;;eLCpbmw&XaZ zm7K{sic~q&4&BG2i~eXlJ~kf7W``W`M;8hXp1GcU1Ahc-^=FlaOI3D<@8eGIxZfM! z7rgi6~4wpe2YKfI~|B6Fp>rF3{M(pH6izh|xyoR(R?O_o6m>r!t`JlG!+ zMv?Dui*+i>6@u8Td#r_Z&tAo)UsbxM{TPTp#b-eN1@7a)a|muA2qOr?7y@yRAMj`V zr4*2t9BJPQw1rH?GtG6eQ&7LAQLr>&+(a^J2EF!ne2CO2A}(7q3DRV2nBYoke;w+5(??*SS8VAtRxhl zm(r6~mJ(R!*Y<>|oLqPuTkUt*Tq@u)?kVJ$z7r^zgg z=Ri>S_y{XpJXIO`iy0Fm#Q8>=aImq8H=M03Pc0i7Eq=}tV{^gA#_NqUg{-u}#&fN! zLScp_lMqIIMu$cEm<-s;b1PJ{&TRJH#R3HD<*TyXN@pnjrHn?tw2~$;fi4!nxmoV) zIIbW>wYa>jsIVdkYgN_zhaIV^PsE$D@wJ{)Ama5TH;?)yh`bkMO1B_9IJ-R>c41$3 z{9sN9r4-c5k`V?z3DYdsnm3#KK>*kSC;ebO+=sJ#k?Hedb4Kq%@86yejz0tX<+*48 z7gca+gZu6aIOO@>;GZ6@MmHA*TsDBaHE?QyQ)anN>Hg>^!;Ah+*Pwe1=>AT0*LI!l z4}SOMe01D1;C>yP(<->Gnk}xsBI?8I`|;hOKCU+eo!1rT+luoo#rgYVkMH`!(=Qsk zA?Ca$IMU`f@%G0qe|xE7*YsuwoZA8C`$Lz%kW`=@vI`p2ZDGq-U{^juRMA-tUD)u# zLVrcTD<2H1;7(0#o~<^|iufyBuN?2I;IfuyCA|@Dxh`IjXXP_@72Vq@cFHVp=M^QU z?F3sfL)65_OW|i${K+Oiw_6jWKl1LMhV5s)s%5@dL{-~r$}QDs zVbv-|-omsUd+@4ha;IrFYt;KMrVWF+%3#l(to2uo4m_uN!>&J|o6-Gn20K$z7Eu8X zn>%xHHy2Yl;Jl|VUw2GS+a~wD-wk!2?I(cyS+uQ%)T)~ocE~sj<{(%T&H;$Lks25Lamk~wOp#`8})q`=5wu<;~-DW5CIL+ zZkWwfG+C=0_NlsEgR#O?JP4OzUa_)mRLwKf?>yEWF8c#O^(@q@yYr*-?zgME-k>`I zMBDWdoE6~2P?$@g(XkpI!XSPMzg@3XTnsHffunbi;e4H{kJDh4he`aA()UPg2Cg75 zAK|&!M%jK1w7(l&E=AEMOyp*VdWBqDgPVQ8m7SV=6r(PUy|s6cF^N%hi6zmPzIV;_IzIWsdUb!lAaIH5Ax!r_U9F)&seMo!90cu~Lc=hNNa?=1A2Yh-NgcSdOP zuYHo96CAicDac<6U}z0sNe2M707?I#7)qcN&O#ZSgK{_z7oY+zLM2>+%aD;-SyWI3 z)j6Exq@;$#yPn~40>$<9z(Be@q2vb9^8jfK2-~GmNW1Kp29PD4ggyK zr2jAACCtJcyn@#-4-2peOYjDkW!cjvuE@Uri?k}++K#j)`;m#XF8i|+DJwf%kMvfm z+>i85n*2p_Q!4W%H>~du;!@J)he(OE+Y%`y?R}1vC;j~tDPKC;h?^32>6X|^hm*%k zmN=>IuXjsYVcR!*Vd1j_uND2Kek?q8Tv9$iY}K#&bqkisSu-v=?q-dd8jDL3i%Ker zfXNS-Bn*v>xl~nM{oS|#P)h>@6axSN2mphU&`6n<^B5}X0sxZr0strg003cfZDC|- zY;12WIW9IZFfKMPVRCI@WNB<{Z(nU=VP7&ZF*z=9WM+(=W6;1rbH^WB?_=AxZQHhO z+qP}nwr$(C(YKhkGwn>9NixZ0v-h9-lKtJ@WJ%q{O5JLG*wHHxyCgn*^mWkdbiF3E$tivqJElU*R|PK1=S9zwQqwPnqLYtDyrV?mJdROs^i7;;(;kmQu5=}bfOPx zgaEWu<3<`P(KA&D{T|K88#G5c^HopvUZ1MHN$DckmV5(7>hiz|GXJKJ6WY%D5>OcA zG2)XJ+SOcG6;Ss;9V{Tg%dAH1qaI9$lldW(I^b*`(qeFaIBK$gH zj#=Hj`^0$yM|SL+2G(qo0Oo^87y?KZVE>lYND0{sbbcK4`nU~ie$Dx@{DAzv%+OF8|an}^!EXh7O2Y*Y<;C`rOX7f7ylyA~@34@ao>-?gh ztMJ(Wj+nhn;AfQRumL}qxzcX5cH%$EPhY`3;`MfS9;?87Kge7*#2P909~d3Q0XYK6|FQKg3om9gU^MJ;y3qqOeE=a=*;?o!n;CL1 zpp6m>zhAj7wGf;7xAl`)7v_nV= zvTnHt+i8-Y#55=|n+Hn!>oco2`TY+qgV?E`mr@ABJ8=Be;gGV+&3uS}mf(@0eM`G% z&ggErD`LR{BKtKLmRL#nV@JXXxs>Ayw&~Vq^a1;|>+1jl@D?!4IO)ojPE-0k9nUS7 zak9i^Pz%ja=Nj!(@yO<@6#+OtuTW5oek;~e#`|0e^nMjA9fj>ciZG!-A?buKcqGRv z0l<5rsPS!8VkR917FO#j4oX7-5r=Z<|AF<`I(=0Wy=;+?Y-&uxey^|@Sr=3?bwFe28-aE6`UkvjLBv=XtP+N zyN_3c9R}F@Z_ENsAuPKRsRLwElt&f|{zHd8Qqj)N^-G3nX+d;eY#C+WTos74gmMY}-X{~FLwnuz^+6nMRHUWAtdg1sgqjcu2;?M^eLoE_nz2RR^dG?I zlW>T#1owKmdJjUkJE59Pn8YhHbt-YQ^8giFa-l3(J#Y6QMdFYkC&~R0dDb;3MEU|y zn6Sv+R6!gd`w+URaKLV?HOV4txN!cT$P6%5Y}fO7E-ZxLb8A^Jh#Bw#nCS12Aqamr z1UsYBSZ?fc(&1k6M4aJu>wdyfk^($$V8(3-(aYvs^q|#%;zm#o|4{fx;Fe+-n4*0c zI{!ol+i)%irSKL{Pm1ZY#`wUDl7M_Lnl0wSxY-KM@TP{@0~GBy@;Ob`gMR}NrJ=aV zZBf9OJd(hx)VeQ2Opv>X_KYoLDPmgrNWR~VU|lx9D~y*3XSrnBFJML2;DeVG*% zslLubGjSvhnz96f2ROuMmXuu*$U)2Ey{Doszg{^+A-*4g)>%XQHUh0IXr}3X@pxdj z8GZM|zyQAGxYJbQI2R>X#%^OzFDEv^rQ!sXL@AkS$fC_C1z(h*r|`EJ9i)`}ll@OJ z8<1rx8&D_@`(TFloB)yn3o=7|ySdnB?f^Qh-w}4wKQfnwEzs;EH|kQJ*egJzD?cE{ z$=&#!1EeM=@|_LR;DF5@*l|x=UIvdk62GzsLz@4wJR@Zpi0#~>R)YbTrXr3Zuz^CR zTnplhE*gI)=8KHxy$3Ilu6ON!xu1NM=Icut^;fVK^7dXYs_^=9?_Izuo)5&AqH!

~#X6vN-t)4n4mikL3E_+0uGg`N_FGU?JaBU{> z{dZ3fgL53Y+G0b0R{0!}bS4OCIieDGY=5sRxsNnM53#S9P}JG>`u=??%29Wfr^w-) zZ9MN=b%PX>?}S@W3LB*D{v9|-D);dI)E?sB;6`0}@Nw8dRo)TASQILlK&EHOk+XHa z<&teQMc8AmLCx8}cd_B)(ser_ivxSRYVxeCb~_j~jpg*K82fNK>0M@|D>#l?Ijb0R(iuw@h@6nOCMjc#p>wFbo7hMEOI8e5)8+ZU}s?zJsBsdScmq zaEu3a*V+`_gNNk?oLsH7G5g8Sl&aHjnh_?SC^+a3M+d6s4|V6n@}23Qi=#yg@Q)W7 zGQ?#cs0IiNybRHja_IpAK2R8S2=+~?fgDzGAR>EDuwwBakYMYO@xX2bYvT-Te@)_@ zUMbguan#vBL}a?Sn($Hs8bd&Fg*Y_Gc1OW^iYkHfWAi3O+_mXP_Ui9Dj7mNy4Ey=U zq3_bU3ja&-Q{nw5XE+LR&NHW}1FwY-<4Mj6W0TlZ7&$x`;}CTfRLPx4BGwsTxlsen zfQnM;jD{2kR^JBK!~tuDNmjaZj&ttW)(C%0UdCqwxm+L$e=*(m?Z)0&8l)VW%9M8< zB7!jLfF`n6*kvy)?afW9rY)`7l1Qez0S90p=L~J;V*Pf+T6YCy4w62d`|E`8%_?I4u0lsPhJ z>Bta6mh(b~Ys=w^q|9W%Lrh3kD_>vZ>#s7yeJ?smy1~RZsyl-{ zfU;9Zq%pVb-1pcfjy=fL@lBV>2UY42_N*rd1)ecQp0QfbURE46$)e3gUFFFtS4cxM z6;j!WSA&|vi|*#qj@IZimxb!pQx4U2&ojTNDs7nJRlgxBQVC=ADMODlOxgA$q|eBW zMiMr5)>UZ?IMgTfjmGt_iCd1-lBmn+9@4d`(1{XGqG~Ihwn(kDg$oRizGrTot|^x= zaT7==YQUD?10WBdFaS!W;AvVP+@zjM0uJ&r*I(UxYz&# zrm=%UeW=ViSPAX^0!UT`bHRRG!3bNo!wJadYQ zpq_ug&q7SEk7IH8Ge=KW!6UV!BtawtwSr!4-kh6qIiS1_)3ArNA}Ver40YS?m?lC} z^Q>SzJ*73mKPj-8%5e(zP+i2^jP2~xp<2}4=KjFDX?d$dnb;ct2f_UF>R;)Hfu8k$ zau72;1N;A*gD@vjw^$!<>&}T@tS7dIO&lA!rkbfh_6f`#5&r+4#C2PyT*(s@nF<9%Oo_lIS}+y80x7bP!9}EKctl zxw|{4?0jE(ZD!JBusu6>t&fNw;Ihc?bL)HFI+INEIhnKQU(gfAB1>tVyEeco)VA2N zZg%H->W178bCyXZO_?NnAO-KD8ak6zKJFGP3z_dwWac8OVef9|15MFXNAK!K0{c>pVQE+2GG762IVO?rt|pMlh(X`8q~Ec=CTO9I@MC2wKWhOo?=24= z(S0c4zO2)ZKm1|ee0zv>Z)V|%I)p@9G;^qIZZ0byeA9q?t1bI7YuOh9nwGiFV@*Ix zXeJT$MY}eH(!!)xC_>}b0Q8N?CNZ)>Noe4_<4d5!gDc{q7segE8Kxv)=u;iwHlc=J zONBIq$+LfJk}x?2q%DOri{_O|6~my!p`ZNS210wz2ft6h3Ldd>(tYPQ44h8j6_~cHelpJ~qMWwO`IM5Bfg5{$8JGzCrEW zoAFK&5+D}e%H@Pf%$3KAitX4n-=LH=Pt%`D7R~qzh3EyH=8Qyygc;;pO1W}a#4&Vq z5w5@S^CYI+QOJ8Rw7DK2G$rwa3Xl8idM~F-fC^G#{t+iWEUJMCQiL4*WxzY7tnvT_ z=jo#ffOsNF4F2g%`p$+gqbI#lB}uC0DFyy`=IF0x*q6a%bp?k5)`btFdwM~pW4otT z>CWFV)!ij?>s17IJUlLUcZW>`D1r`2V@k{nC*x-L?3&~?Q(%_h%9c`tgBvfSGp{_4bw6=JN56gD#GI^g+hwXtMktWrXJU8 zD^yM@N#!e=uOb>|lZ(!a;(`Et=*O_!0JJ2FeO5#If6(Y2{tZ=aiKa5PT+W{acf1u1 zbd91`Y>&kaQUueN)r({gii*TOUAMK z!ej*h4bMS=tuHl-=q?Qw4yfRMgbr^iV8M2?hVYG*8eAvSNVk2g__#X!h$2{iTi4Lr{C~6>t{+ql%5NH;g2?cF5%N}_cWL1gV7ucCQig=SlO+sSXV`_F;d9ysIll^A@<1#TDw~K9 z5N^w5o3I9oxKt3CKI+9)lq zkhGpVR}wb>kqo2yGNjjM>OoyO6s>^&d0@wv1u|D`MRG3?ff;BEq2xGvuZPX8X{oli zN}zd9M0ni@wSZ__A9SQ~qxIH+-K4+P;{XTZy(~mK!z?Y-W2avSGKX&k*eqlsI49l> zI0iztT71`UrG7*COVO`o#mAD%g1A#;EnD`d)>~=H@M87vi;Psz(@TB4YR@^tP6mh;-o%mjGmjD3;qk^pCXB z;zp{jbSRjaBCaK4^ny6!U1loT$LcfHK&`Ha0VRtPwpV39k1a*`L;1rQ17V)_c2t(p zo1(?HC!>*wuKqkXJTwqs-gxhmG}ChvElq?R3MG<+e^R3cym{Q)t)DHzCB;4!PYy~b zWf|LyDJUQ&ck_r_)o=7~bq~9x0<9BYKFyBDCk&&Z(Bd%Zy0C-4 zZS^bXIF1(}>B?uK>O4a|#5X-b>@EC9fj*v1pelN&QB=Ef)Ps0>iNzpiuCzyZ6DqdU z#>C=$HX;qv^hdQ6@XsVD#&PD}1obrjxFiQWa6L*>BDb09Hy<;(a(Fqhd()!Z1+%LW zXHft(U`||>cKPK5ra&1)3>3*NZ9Q8TbZwR~t<8vo($nR+SVwE4Y&4a_G>VI$A+ni0 zS&nOf4>bYVU=}ojO*Lj1IcEd|7JD9!^#Ch&Z@De~Gb)WJve#Y`L7>1-`TU_#da5wO z>C;YjEQh;>j=j=^o4m~eqKkYWV$6rl0@EOeV8h)BfhfZ%a?KAQG6v}Jz1v<5 ztbq%_Ry8`Ah|QchS9ZG;0?Y0ysl(Mw00Li_K(0Ac(ijZ?fG`QG2}}xK+@@#+<^MIx zKPh*|YH_{u^M3mi3ux`y5W)x!8{JEMPS z$}Z|lQ{z+jWP)~P9}K<`>@py~p>p0K%UADcMl^FpEMTLS#KZ_^1pp!_ed(@#(oq@YgYly9|fL z&(X2aAgEVJRFrD%g;$WsJMG}n5D*{VOr+{}ygt8UVO*^WXe!+H8JMAZlKhD+2z>aA z3^h&Ng7Nv$p$$zw^(3Bnb@Y6EH3xD&)-w=MQeVK0uT>j-AOd)nxiD0vqe02~7%p_~4?q-D$DW-<%U-)&( zqeBk=ILPaxKQ>yJ(Je=KRa8XNb+T7+59f0?N-|W6m*(C{=^)Gv+by_5M}fbv`v>8y=1MmIBB?D6W!ktveBIjUft3}xnXGs5 zjJ$1Tmm)U6h3qRwUy?jA06(pZ;?W>Uu9W$(_-k?d6nDO}wv-@ghQ=kz z4~C%5R<=ojhx`aTtr}N(EbgK?I0yBKaqnAC)h~e^mRdqgVtiDa+*~SGu{Sd5!7~Mz z=g|lmZK^csNk4HiIPP{~&=CwgIs}GD$eEKn6mUqlz9CjSWI^UuPN8#dR2ygjpd1U( zD5DM&P2wVyBlLENS7)n|r)pxdzbhuvLjo~nrxVNk-E_jG(tPn8n5Z-uEoCc!fXy1a z)!Iwk+oiQOg%GkHG7b@wsxlfN*sSPd7~-6={@Nt^6WX1*H*DO1gl1^W1TwrhBP@)O z6%_enD0?0C;zB70B`N$jEk?(h=Qk>lbw|`)1+uxZ-ejEe49;wKxG=3{Y%nsdgH8pb7dIvtUBzF@3g9vCx3- zYU&(@DqO2&F#WZ}(%CZlZTdO}2OqM*O7aj5#R&k&;u<0f>ekYboV=(BKJiphE7Nl_ z9DX(Tc5Dr&{G^QAakzm)DY1h>Z_&o1nv4 z)Un_6z8$a_Ch`a#i`m60}bz8?KsoY*v-C_rL|8x-^8|M^WWj z>DBcKBRD6+q0G?GcaI*wJe&~d$lEJV zHNV>SL^quUozI;W9dcJ*0VymlYh^BT^4m8OXFnU*8}QTsG^?LE5^FQ_4EbN*+LxN5 zX3_}3TR~CY=C@r(#D*GwR1sQOnUHxBuLS2?rU|t~EPL zc;tjKT9=@X8g6cA!;@du8CbJ7soU8##NWo2m&{q6L*WdW@SW2LmngUK zZ|0m00Q?A*ZS$6D&?b!uO!eVbdNztWo}C%jd3Z`Ng+;|Hn;geod=6=!EKjnbXF=^D zS^TmJPDVdI<}b6i##n12YpUX9*SmkdX{dz!Y(tkcP|rW6$b~4B;6eQ^08BQmc+qi) z{Y5uJ!oOiqat1HOO`%3-a@a6SW0x0o+KT@Sz17`w?>Bn7V$b;V_p z!s}4^^J98y!4FxSl>qUw8m(I?8*@3^C+Q=r^DJ>c>USvCsfk8HVFw;Lu-ujkKZZu> z_;{^(7f~fcH|P=RvyTBbn+J%68eY0mx$+HGebvv|mz#Ac6C=nh+9IacItYI2U$hg2 z(wDNBqI?OhT-Cb&@tQE}D_|j6T@6w+BGW1`Ki?DJtOJ}$Vwe%F{Er}ilIat2uq53; ze=>f^b(|m>a3dyFxC(U*Q} z*6*w-urvl;*KqBNMpeAxu{B`wV~$BPRde+8jji|5Z;{jnw+H8qr3;z`Fu`XwVwld1@jCJp9LqF?}fU5P(m~S|C&V zp%wPrcBmU|w|(GXFkW)z#!{x|hCQLiejF3CQ>KNjCr*)`bZ)*PgkR6VA*;YTpNp17 z!T_j2kZnB=RRtaADBaM3s!t&Eos1boJmCyJN$XTDq9nL=$!3wO2DzT zS57ME+i3n@|00rJ|2C1i2>eo%jd5W_SDKi^sEFHoGS|kUAVL7T+}`tRNIsi@rp1g~ zG%vin+(pHQ$MC#sDv04v|C>z-1ag?ygX>%)FM)BhK8JwXBlguVDnBsvsAxb`h1{F= zgK3|tbqpaHYpk{q8wag*<6r)uZBzkMTj;N%t;5?E*-7MI{~8+x?p++M_=)UV&go~kJ^6-pkjZ3Csh?(zm$L+F$w7aH$ce0%PuN{3+NW~NLu+| zNM?%XF6SNkYcMasrb#V15)bgo5~@SMOaj3(f?-4lPE0|9V1_^7h%*B8P`h%ohLU

34oF?>jE=p@QlY-6SKX@L|qI|VD{-X`hAh>s6 z9r+eXZHwU(N;(ou2=V4tl3{xo2=|c1Kg-z00wZ_k7hlk?o~smNQ1*=0A zh$j)aUT;foB;aQ>Who`ZS5;x59dH4?pX7Plpb0T|nGLx&y`lCHDC@|Y=<#|2b3vK5 z)XW2e)6RFJR-6Fj0;4D5PRXYMe;T;)kW^r;FkZ+dD!~w|HeZ^gR2QL;eQmog9B$V zaT;9*-$cYn$N?CQKP8Mz0KHwbX_z+nHUqEioWS8QR8jZO=*pk}$5vtOq#y)Y22_Ww zzfXQhU2ITxJd!WAL`T$M8P|Ym8*-}e@ST%hwBXbJZ5Na2ZMPA&6FK7y-s z7ZbQN0SeiwbCe)F9|A$2zs<#xMBexO=N8dz>)*!%4H4!*b3-zk=AFV9g08p3f*-zy zfPPM~xZ=}3I{o{AN`?agZ7x;-d$1@r?w`2sDhQhMBs9$67N2f_c_&d%FDy2F`JHYT z>eJ06B1Bj;w)j4*AX;D`Kf1dM+30wO`SGf=qyRIfUPjCv!IXdd(;QJm0*)|k!Z~Sc z)nSYonPj|%pYJ-~_v?P7X$5&}5&6cueqy(%2useE*m{)3jd90n9WJF8`1IJEvSStLdgYjL4fY z<$j1_f6-=>UC&yXLGhvQ!DJ7OHiCRCYn@kzdJS^wkKDKApg@{pcNSw8 z1G5L_o-E4%u}5I=6x9_iW&vg`LX}bE~EaDP-w>YG3AV ze`BB*w%-S_`TP&=pczd#-aSVcZh8CP6ZJi`c!ijb|8KhFfociyYB!$WS5qn(qxT;J zAc{SP;ElI8_0YUYCV^F6R;^ytKh`Y;M!83d*S#n{+9?G6z5kEira%f zKHv2Rei-yl29S!ke>xHce5(#BnQZ$N;`wwcD-gt&Po7xwv8(9cjR6aA{A1qt+?_1yl8N@wfg(iZe zV+J)#C?dVbxXy%HRmIfl0h+4nD3d&@Od5GGB7tiEk>28!6)V!;r{@`N#5Hkr=6a5! ziUFoX^bJTz30KyMR%yNPoiLr(e)h3@to5DX1p#L20wew5^mPoZUPYp>HK`?-*#b>2 zI6MhU2VxR0~jLUZxh$iBw)|+H_%BY#$a>;azr!L2Hvw zwB>Y5H;MRre51#lo!e&#PLx*c){<-ggp9G{5h<({cqjC9=Iucm$`CV(9A|~vL0c0- z3{TQ$K4cDRacHZ7F3_Atsi${pT}-1y@cz-L%g0%7@W24SQnsnA zh%SginTI-H%esm7M`}cbz+oI6YmX>r3P{yB@Qu;me+3f%in)^Y!$=$OJ=SoQxmlyT zB*#Ldento$5l*Bu1{^0cI|M($t&LA=-62L5_FS&QWUSE^B%l*Df5=`*DY&fa~|`DGk7vX(Mctb^}Yy&5bbZ3F){dvGv(R}Tl)Cgop&P;ZA|@%fUqd%>pAbB~(yUb}Dk%>=KWs=ewJnVY)!= z5hpVj7~;ewY$$ghzD6OwPE!S^;@~x4WqWsol*K)2+&(f8>?^^yOA_#dK3I#LCqLYP zXobnwek@VDn_9ERm>3W|MjIhCoT z_b8XYd#h+_>XT1BE`ytqWaPhuh%Sduh;G$x7GQ?Ii^-@o-fI2wV@ifd&z*oxl<&q6 zYnv|L8r*ZRh8r>^nTHXmaK+X;FeZ_Ept0|oaX+O@#HZ-g2b_?uY_*Z>s}a)J%xv2J ze}WB3gM?w&fcUjy!bBfd;On)krOFPzAYiC%w+e3)hvmW~#Cyzd4TnVOuLE zD_{<>-!(mRp;|7rFf2c@p3v8X z7g%7H8%L7jHb^DoAoy^%&N)t+ysChGHb1o-d32As6(Jhs_a?s3Lv-*JIeQ)^!HaK2 zWEmagH=A;jl0v_S_X__9?5qYg`oB;*E5rY!bVg=I`u{Jb&uCiMZI5F7*6KCzE9j$G zh&kOyUjeBuqMI+2x-JX)TY+dhx{8F4g@#eLo%`K!oh2@ZOL0hS!Lx>KbL>8O#Qj6} zM$P@@6-F+XFXtcbhbS5!fKZ05P)5-}eCj198)0$oDFb$X+6Kdi<}VVn)M~oI^2kH= z>Oc0cv$hDq;iLEWRo-fBnU&Ob2iG=xCoU|!0yh<-9lteQOEBL3X~#t$HAk@0xHz@5w(xvFR+gJs zFYY$M1j0+u3eXJNxFdyO=mvzz^r~n$Ps*<%X?T%yMM$lmEO*QgIM^$=mhnA8+(L+T zV#Jp>y^D6`Os)sb_(F<_CYeLb6p5lr@_02<;XE5_E5hcURaV5lEvn>adflvs8GO7) z6xow;p+#Zv+&7joun@k+u5gI@otcAAJmsxECTQ4vPt7?hv$Sky^j+aTXqOKXa>A{<@<~d*)KPD8H|?&b;)joLJrvp__2NO!OmVt<1}`#X zV5LMgqF-xO5h_p+DT@YY4;K<3i}40gfQtKmq2O8;LLM{Cl;^~>u=@nD!DT@OArH3U z#w5aH%^;@+5v(5kh7i#;W-)F42QUFMLU1s02(FF?v2bv(CWcQ6WiSc*{r5{-x6A+* zg~l29hmQPZak%_EVipSlnn+Q&CHWu>2nNqcj~4V0@-I_?+~nIh>I2a6hiLDfWH!g! z8rgDHVQjMi#?7#J<`M_&T3+)Z34R1%i_>D~`BWuNdXRx>5oAW8`{#QTx~Gx(hU6md zEKlcY+9j5-n9=@ryc#Z4?LSA5&SNEqR5hnKysKEu-p@w=kvYMpw{E!t{w%syBwV%5 z`Z_Cn`ZdAk$c)F60}9<%Dqb+`;%l|8nG z4CAi}_PGfp6ec7chhV0G<`{*6g{k!{y85M*QnU=&5s-?) z4rdw!<^Xhh9b+&m_g2ub{~unrEm?i&41%clZ!3zw*q@F}kOy?J=Z-Nm^cd4jkmM3a zWwGW+@s|7WX-9R}i(H&ngv=jRBkf__M_{i2G)dQ&N=aG%8O8W1uGbwleK?J{a zXK8dGXIxuqaXDX3p;LwazMWrAO-HOmWG(uff5Jgwn* zO-)x?2@$KeWu%O9Gh|ieifC$e>*>z={8c2KoRakl*TI9M^5q{Tc9w21`qgcK^KIG* zeQ1&{s(xGOZMU|ms}IL4%XP=&jCeNL#ST4u_A0m${yjrvxd;;e35zQNP)eTML_P0( zGV@Zk^DA@t5Pi|m(0JpJ_B*l=?}?0Ki6TorX<`fL`%zl>&zFr^O$MLq;*(TSqy*uI z31>L@qkX+NcwaQsIw4B-qQ4kmXsHEYCNv!KB7|W~jVnp?uo!Xk^TchE@}^T|U?U8; z>#x^BO1#1Vexi*Y&D0`f$@rTYFcB;%!AWmL$x08yOp=J{3t2^dWZ7Fc17MA146Di( z{?*c~Ksy%}F(5L$?z^5ic*YsgwH$xV0XPa@O^}ZSvF9ATNUo;7Z?F18!kBJ6Gy2%J zAck8X#iN%`Kdr@`GxULlDg1>C`+^Io#$f!|5Mga^KxErp`xH)3ffzP6mq+}O7X7wz zi>&9UX#u6}N(px0;FGH?knnhSfsh7pDZQiJfUn{)4c^3li2oUXCe2{}I~j@{07@JlqRtn<6%6R1%yfqX$tiiU zziyC5DLGy%VSuWkjkIx~%N_?a2xm0V)xfvHzS=HFQJl@nMOB)%Te7Uw-wu#MS7++* z5W;MF!;Wzsf#;%QMhjri2(>u}>S)hw2d76im%b#P-bm%G46v2}!d53*0SM^lyiP+I<9F@6z_m8K{;?D?n=!UJ z!2#WTa6IDBmeXq+bS?e86Z}p52-hIY>OPEC1pKhDpZQK}M=UTj4e#-T8HxUd$Vy|ngYTLmCmx=Rm~f#+RB5)AYXtacuuf5XZ#A!14cHZK665`&PH!rZ^LMFnxcC324m* zr`XJbKsO&oY)=u(F%1V=IHxC0*Gl*eQ?`!Q>Ryv@B0!p7GPar||9 zJJNQycu)G&q&~*x_Qs}gJ(kfSvDh!ihfp~lM6 zYC%`$w%x%~Ls*CI8NIgs6mF|$FQkSYJ43-&vr74xYa*fTtJTciY~9gv!#2}E1@AHP z^0h?ylo_;>fED$XBH5!C$Zt1QxH}m*R$9%4I}U<=r=(Gx{EDlV@oKBqxBCMd+ow9J zmT<^)T3<7p?6#NP&QGj#kZf17jvbYh4ObOcCGWK;LiSfgcR^Kjb?MQ%T04!Gi5VB% zQb-9WvNJI&UWo$(;`=pwo`rl%!s<>ZD&mVc%+H$ewRz_YSqR{3*%BMr|Hb6Dd%rx9 z5<0qOU}%OaCsx>CbAZj;U}!Tq4<9JaK6rE+uVSyIvWs#hvx{m!mLO63)Ijlao;#_^ z9u(Bt;e0}cLM;=T2f{|UXng!`lv zRn+JXvIPUebNAQ>5H+++b=6Ash||WDJ-DA>S&gu(Ke2%(s`QXcQfZnU!k}|>O;!U5 z_1HG7qFpD%pp7sv{ccgtM;Ej?r|d4m(?48(9%=6nS8^ep4%#wE|0JY;z+riiLzYX` zLTOAD5)x!0Ti$QX0^l%$^cHo-+qz0<>DK@rKtCtaHS`~juMOarPp|H0*8qz9&QNo? zOt1UXi;`h(N3{R70@()+o}yj4HlV^7w+>Za2!UV)F&UYqagrZ1i409oqZN z{!;Nd-~3VSy``bDCfJQ?Rk@wlYsKHaejqvpv?ajR4R*$A8{iiOk8OTBtDizjXylJ+X%|ZHoXIZvGSlU8Uq_mTMo4`c04wtXaI4B z8b&W(!Bvj@z?1a;oPR9z-H@~aP1ORNX;=r;U5snr7J~k<2$^5OO`B;^WyJzX{cPQrcUyDL0XG0&N|zdH`W85NBmWu9(w8B|{9>RvG!2MAs4 zlQU?4b`~9(@`@&aCIEb8f#q{(>UVNAnmSEoh9|`96%sWHF4fuk_7)}PS z8n~2DqzDYw_F(@=m_R4Yj}pSpoaP8#g5rMFp(^$tA7A8%3tN9`M(*dyl?0Q+fu80+ zz^pg+ry`Lw zBqan|V1NpuXeD(ZzTv6mUW|T1><2&&WnGDIApko#aQ^LiC>}BOutIVsz?PG_j@}#M zbv_ch=>tX?Z8q>AR0<5e@7|ggvS%bv^xE=>^VFD6#}0G`4I7QeomYMHVX2*}eqt^e zNYD)&5uYm4mCRxZlUir<1Kl?v=};VVpY@tcls?%z8&tlPM^pa-PPkBB)?%(a$N)q71^`y zz7YcF@9$N}4XM5pLk0qAkY{&C{kT58^(h1acuKH67CCipq|Kg2r;`S0E;_D#^k_cf z_fhTf^m?`!pm=Al_4{yE_CeoJq3O~|Nwb1t^bL|Ja-wk{%i$Pt(y%w9#5CEV1qm7n z`;*Fj%WVN<#*2$=liOk46GNAcSZe(UOp_kdjH(=;L z5TwP+OBb!^*3Mw=x_*aC5mf?iSaiD>vY}ScUbdvS}6F4!?gD#yg)+AKDqozxJ)hdx0va z@0p*5=rbLdOD z3K0e96P>1k3*{?RVG(eF)Fu)5nTeX6o`zYS|5c@FvyS1VNnc1|lUyY$0d z#VmJ7@{@Matu5H=&ndOe_uNo(NPq{7qtJO)(nxHy5s)IWzzFC-%R>C3b0A(?R5Rdn zBM<3i(+Zp$Qp*C*&_avz9wT34*X7#tV@G?qJ&gSJ$%=?Z;kG}9jRYDZ1pse&KEA8U~{_daq*HkOM zo(?UN0y16k)?&t*n6tS+G$HuP z?I;SUL2wv%6EL!5N(@Ua8`eYlRpXC#HAf`C@LO?m^-MEcl7tWgp?YHjI^?qibNw$P z`3HvOYLX*RovhrVC&PzgSx@i-%vf#PN}zw>i#jyu@`!LtRtP+JLdVavqES%Q#ICG0 z>4bGBEA*s@86HhlxCdDw3X(@7Mr`;Ns((14@ZWf=1ypP*14(??_Sm(Ct3CHGx|aD` z#fXq<3mqGV*5X~z+S9uuIqo~K8;OR_&EQw(lFOfR0~*~ZWjrDJ&|JhDx>E<)@?RY? zhhybfnc`40YtU)3^Yr8!(%ZLuGmYgi^?>g#YPM;6I(;0$(g(-{K30ataT#52Dn|I0 zqEfyPzWG&u@JSQrmb*AG9&xqK_eCDMd$+|Qz2V?kP79NNFPzzQN^93< zFgv!3<_xn?Yt-s=1}7v$aEBhc850k8cRZhdH%uR88j~csr+ZIpaRuz-|D$-A!I~CA zYqB&AQWiNR318NLw*Hnlfg9fNG5-?&} zSfdQlEW@n!HqzCLlCAi@{-PA&C?FSNbY(vXMi&xB4A?Q33bF|KdxSvy^Xw3>9Px-u zpgG>ysXk}pw_E$&9K7yKL#Z#s0|z7v-{;RsLYG7eu*{~<;D29ur|e;Gf=?%BXsP6E14So`&p=QAuRQ4Jp9T|<2%mPE^WsN%dt&* zePzTO5p+Ho4W66X_n=J-ve71RInw&G z2VLXc{^;@MQ`M%s6yi*H3+|}I#ef-{5~$Ri7^sEm>{Er|)1asf-BHwi#8+Fl}ew3S{I}g3B`9YKUa^3 zi8g4?5=URr^iP}=IMAV+v#x&z+(ZrZ6i`DnP0^$;tCxm^cMNp~mg1D^zu)KP_=k4? zfILz}gn?#a0LAAmeQTgAG4>D^d)#$3;W;pNKNcf0GR`=j!ECt2cE!rRktmoS4!Il(vO;%o6vS`hG{7Vpyj!jI*sa7;?L)yz32PY z6ipG#B)jAj9vte&bt29hMZw*|z{6_qC#rzTTomG@!ChpjiC6?cN#jimFg6}9H^(Y{ zJWh4DB~^Jrx&h#cOQl5EHGYepBIO3KiVcWqU&ZMHyZ@kLI%MSrW2h$I=yN?5Fy$uW zXnua&arX$d-uz^yVeX|Y6BhRH3yvOLU1aCg`zNPf_2Z21%^kuVYL#gcxoawVGH(}k z6Ca+zZ)F0|jEDcW_oiCyGtaPZqJ>Bhra)o^26{Vp+ZYig+h z1T{jou~SM{7$_bb81XI2%#f=VwgMQ0DF2@$?%56U7AVA0k6Vs zLB>B6J4M}QCXR7KW!)2T`gN3cVCiHV{fzCJgt+l>a}aVhrg#a2AHvfuk+ZcyCVRDs zywG8@D4{zpE~`h0FYjLrp*;Ua{%22c>A8B@WHv2RG!IDdKVVdrP60)bUXLDx7VtvRMpO!y8)IOB(4 zsXe%)xjZ@Y8-0GJ$SyxD|+>~EGOMfgj731&RHi^B0X0|nMWB_U) zhGxMT1UB#Zf;K|7!U0s#j_)y~37(tuKd+5+W=bJt13C113^p7(E%0o2zz>9=-spW1 z@VsOJAbHN$k79V1*Xej)^@}dBw%BUzy!W23;(o}7iN93gYA`ot0xsO{C?&cgy)k1g zBbpXm0q~x3HCqg{0?2CkD^=CXB*lMzz`Sq?i+I8atnIP0ad5HSgL}uJ%3)T4H^qXp z(V|3LKIh<PTQ%v^jE=&%NjM2Crh@6+Mp0v?)0*99i(QRe3K1uinz^$4Rv9`H5F#XY^i zCnm=gXzfIv%RX8HW6Oh=NgYLANSuXRurI}c+Hn<<&YUoc%)_(vP;bJuX4q?Q0Wbgt zUiaY66-0eXOE@EiLt21Y0+EVT$PpFvolMyF%S~;3b3~QFwMo^q0DYnaEFEO+J-((2QXN|Bk2#wOZZF!>(@hv&q;QY}*{~|1oXpwa()4qQt42j;n_e^Q0pF3MD zM{-!!q694~Zgl~yS~+13+ICh9{&n~%f1qc1{U$Y!#0^G#*d5E~_H?&4Mx%)N|vK7RZdiA;qRhUs#YuO04zg zT+c(tTX#8Z_OY_gpccSAkCWLNIAjX(jeN#DUr?whS7hO?P=5lenn0jF}wbc1^j&jG#?>g6p{Jf8uQYoDCMOC|7IiAUU8{)d1pn* zgmKAAe-$^yOSbx+yd^{52Fl;GNhRwycGC>VP6@Q&e`sX6qAv>(D?pY{eV>G(jAE*f zx!F^Tr#aYLbAs*xoy~Do<LUsqNFl#?I~@i;n?DVQh)@v{?_{=;sDoXu~e zJCo(RP={7`*A2bxYh=}DV=IdWzP3B5e4&;r5O+cEcIV6cjPB~zzgT&vkCSK2>)Ytg zVeT21ZTV|9)>Gk#x{Qm8C;^F_%{`gz-lr(OhjqsRI(yeQI>K89X;Ed^i$y%_p|Ie%iD+>eL|2GcZ(a}n}Vs-T8>l2L2eEdgR zJK{61iH=p8Qib*GXYu2Hdn-GlnW&Y{XrVLh$IFE%Fg`@!@-%r0dzVR;aT6pnFY~9B zwx|{@F51aqn45{?&(fHi-+OL@r$MBqRmb~#{d}dqHf?1jO5(-H*v#P>N#EDW5#JCv zRcYiTJ)$L@atJ*gqRNZZGNk}vL?NHIow2pH!eEnpNcB$G$xh_JU+SpNl>H0oQ=xKX z@|CcJS;|Shv7d?)iC4)biD@zsNoOZ2{Fjib2+gn{ApIl(o#T!|j6e_J;|@dJX_rvJ z`A!~R@7E1Z5Zg&0O?rHY{7NCEhe8m})lXNR9|angJ0u<&(C2xR%Fa+s*bW~t~)+yHMznZUqwtsCs928j19kr5t4z(t%RP9<@Vqr!S zbt+b>?(cX2{&od5cC-83@&Q~Z6+)PNCkK*48BsdB-`BAjstZ|}$l&T>tQe6S!Q`Z> zIW{nf&@NySwhIiRi_3iJO)yDWC=m->zCGX6fa@2 z7vm13o0HIUS|NpDxB2*qobw0uYpJv_6rCZD1Q^1Jk|b!_2`T_BseEHb%3ApsVix?< zoIJ4sGCZU8u1*riHib4>o2PWG3stwCvc_RB3w~E#=@;rJj|*41DX@DO!C$wYomc zp_*-Ix~(5Q+yhlqu7e@kFfsj!{*#$7fI)_jG|y<5yX)APC!DATL$lX_|E-#vEgyJq z7Vcb`YHso(TvjnIVhp{ypGw03{y^O>Gs!N1UjC6fIM><;;)9svjKKLrC{RDsRr+70 z#m@Y@v|w-O$bdLAg=cB3BKDav4`iR$_(63De@M&&qdi|;R4_J1M;&?s%M7&8IvlmQ z_eOV2n3l88z zms{nck+#RCG<5?rWHSw`Q2vT)V~|b*IhX((fmF**Tu28`7bs408edkntt`w!`2VmN zMB~wtOQ@KcKo%3NSgTgx_nvQYrx>L(tZ}bcz;d$)oe0%1*AZKZ<#Gz@^U*~uD5VE6 zFcC=8<4~DZ;7lWZv&xI;*rUs<@w;dO%a1vY2veQ7HoAgbq6hucIgA0QL`J1PW<1Kj z$&wy{f>U-!s)b@`2cx2ZmB*!_0(s&ek+QeZ00LB-AI!NaQ+ANF zqkelj#kA-HvdmLpt|T|1_z>rM8k{Ao61-4!>KcG_y9!)FX$Mi6qayW|FwWsH%V5_XjDCte7Mq$Gy>KPkfP}ouGN=Sf+V$R`9bnd zUzk8nsd=)184udCF*wJEVZh?{M!y}RS3w69Dyj^4)=iXtC3#`cRi0O)*JG-rW{xur zz1Kt)6!aTJf3eh^f?NEfR;k$C8%U0$f#r^7=lZc^x6Jvd8vm`|G5#@0ahXC9DpS&y zi6nSCTV6wm7@#5ewQ$ym<&rI!#vl9zk7L;%XIx!Iph!35q^w5^sr;u<7$g3nc>vN2 zDdSH1$Tf!@0b?IJ^$n}nmlU0FEh&K6;2*m;ZEr`xZ>VFCf!7rvgo~-9;UsVjM%w% zvDE}d+%$*jZfHIk{3lMbKIUC)dW3Ps zFbDT+59zRv*^n23!&1dtg#2m07v*>0nhWs?dvZ#3;bEZpd_oWNvpYF$yU1ShxL2Tu z0D21za;g>Gha7I+8pl7Lqemre9|$J!1D3us;XPUs;2CbS#ZR)yf2ec%rhAee?&GmFyCS12#YO&royZWGVwhnmJy^*64ydJ&gly1Ty6m zM}LeN10)%B;RcbHP_U;Z!#rv3Uc7ebSLU!l&*+9?J;O4Es<~lokUy>IDS^^G{LEV1 z@9?hd5}%^_c=AvCAy_gH+F5vzsj=u--}v_PS=CT zc_f2P?Ah@PPXcSCylFV0BIm3Dj4V>}uUP$SO9ErFmX!n!5S9vTI7BL|{kL}jrCUg4 zF^RL1`l6D+E=kp@T@-Y2=8Bo^hZib63X{1^^F@8%yVyCS|B|` ztp(RsYQAA4Qkk!z__#{GY27PR&dzo>gY%or9-9Wy`|a~kWOLT@L{=2?e~QXJRR_VS zl}Tz5T9qd8HpE*FE_z*69=B! zJ;CH0W=N@?2GIrO6gW19Eq*oRapZQ9fw@cOLIK8BpZ=j=&Ma_Y0Wxkv>(q$e*{`B; zusraqA&g_S8)!cF;1mz`8q08!gq;Vz;^Ylw$(oaaHi`g-IR{k+_c?cXY5q*A4Sey> ziwo{im(1sXcDN|#fK&g;`^U}Bb6zkBwL}y`bdquO$A;~5p`mvnc>f7?#Z?07DcMXq z`&R}I<@7PrO%Fy-ZX5zxX>@|I+IPZlF!$*XiuP}I5%QVlGWEWmrSw?`)6n2%j6XiA z>A1_-6mv3Lb^|Q+ztpw4b7u}R&84R|7aB9#p-gnx*o4l@DM=+~P)aNRbO)(C_t})r zDHum^qEwwHw%p_!X-?afku)+7Iyni2#6LF4RP6^{$a6I(fcfr?x}0-)xXxpCIkXy<7;i$|=I}#kXbfyZrk^fCu^QCP{FO)U)Lr@Y8-a!{WB)OhHDk55 zHr}GGVdYF8&`Wvi4!NtJ0fO}>R)Lk>ZVI<##$UT-5CYb{ub+k23$-h%YT#D}u<~5OL-hV% zI}SJC1m3~BJgng>?M*d^5Q%#Xl^j`(5Z>vdp~2${j0_Cy`qWfC-j;4y-BqbG8N_E~ zomGJi`%Qz(3eAmnW4uv%es5`g^D?s^QC%sDrXN2JZAE|^ey=lZy~{&oToZr-fslH? z*Mmf0=SxLV%YHuxIEVxE&Vk2h=QauvCuUkuD4rMERyybEJ8b8783`cjt$O{HhVs^) zE&u2=3qq6&5>UN+?J-Hf1Pzu?ow|$tI!nT{VA1+T79W$c6XJKuac>HazX z)J!$(M^N3b?pNeW)cKV}zm>%BS2SQuxXUEI4 z#+WTVb+)_}?)l#QFbf2l>T_03QqPU+5G6ed*DfxQLi0cn$*>FifvlyuDR3r_?*LU& zE#9r@=ipPE7nkLOP=&xTH1d-s`LV(jue*%54qnydr+`d<4J2l6E+!(0k74{ngZ^f5cj6|$sfmT3kN#b)oli*@);7QR2U_Gh zZXK+S;c!T`(eT3y$e{~Cl_#nr*s|_hwX-cAO})A)0P0l{fB`Iqk~oG*=&a{)+bT-% zfWsE*UBm%9yQ|H+{cv{E{NaZ)2K!TdgC5Fwt*n8_1*H(-uJ<`XpKKMMpU6*_JRe!i@Oj`7 z2iGOKoSZn}f11wBy*usP#)gdk4j5`8!yE03WmCW%p{-pP1Dac669{QF<8Ub(U4=S!KT5m)SjOjwq8wbcT(cfm2Z$c0X>k= zvrtdtT7MJ2B2s$jpryT$^IZr5O)<7^VV**HL(?kfbmYMo9i$D-DP;+t!K^U1*_z1_DE>y7M56Lz_LfZ(OC-mb(i=Y8eKct}8K(Qu2=}JotkVwYQO{ zCtE2eI_*IOdQ$6;qd%#5Zm%JUeMONxZb^k7krO_HU(AMda^8`S_uMEd0%E2 zOxuXj9Mrv*8B=6NQo|x0A$Nk2@xtq~dj_#}{*|@&7mPJ4*V~!jLnD?=%m=;VrDdE=s9eTZi#3OIe zyb0*42+ezV+fSC9DjdQq?J)fZnY1Z#%kx40kSl7PDZ%6d6uvX4fQ=3bd+|vC6r|7F zwCIa``ABn)akGM>&3m>(b#d$8+>o8+zoz=RvyXQ)#kzs*^*t{*ku!!dr?;l`sY*$H zbfej6mT8l~juU?ibyY<~`XKq|Ms1yaN#||Iduy(ORHYmMz|cN@$0)5Z>NU`I-kd9- z2*02@CaLs)cvtQl_JX6cO?>Ms3%@|sjfF&1K3|D24!az%0@LGQIEv7KQ?c`8$09QY zg&hd`XFzLW`aO`6w+FJ_Y$ScW@<$1_(+3jp9uXJP8Z43}wdA*fr+sSLS1@N~$)cyH zEB`nX?y0;Ztqs*yBEf!NRr_`nPCF=Tn7??zlqqJ`gPG%1#kWX=8iT_ghSA)}A8(cf zs9dozTQ8PBj_lBr7d676c#zTZv3AY5&Tv=n3nWRtQEK+bNh*lU4Bz}H_nLP?8}MhT zu~q)u1V(VShOFWwU80zxdD?c2Ay?=12|8EcSO2~9m&l1inD~1RYx@IOY1F?~IGo15 z{tzRy#CxKHc(*rF>AR)|{zFRDHWFMnGS>ZamiWd@?S6EWupRf#!D9@JK>nlIrqMxG zz>ug;%$;>@sDH5KR}XInAU)SUNgx#D{iG+$>Ps!6jRbxD_0n@|WTv5u`4rT~?A?!u z0IZof<}LGD6|Eb$S}8EqWZ|9ZOwYFk>zRV~9K|sF0 z&3!v+V3yU1Yb!by{C&gH)n%me!8w5B+Ae8E8np#`*0}bTtz*S>8>J97X{WNwdd|1% zEWkA(f5deGYV62UDu{k_zTaz8!Np6gYwY!RS^i_}>=hYR;GT%7bY0R@?A2 z^8I%2w}(ij8joa$so7_3_{)gARg?|O(>(Va)ot`E8c~;9-T(D@zZKMeh}*0I#Tx!Y zr!j04%w;Ies=ugY>tM~)mrOYp6^p?Lb<%^|TLu@NO(+gL;KhCD2NP=>H0|xuoylH% z=tq`o+h*G=tOzCi~0F|zL^nQVNQ$(=DpT7&op@H&?%a?ftl7K`=70|O|rr){P=Bpv{plTcXs5S>VrqdWw^4EqMjnd;kNx zq`3g3nD#Qrz3C9j}xLncG{^NL%u# zPkjUr?x>0@JtPeBM!yCYR<+%5dA9Z;uQkuStF-bAY$F_VZ>~5+Uinm6K>)mgJWDX= zcKddxCdSd-iF;kdyB*pCkau2w(C*s-?YkUO*qqNt+?j2rqd`j$p^3KEn-J#f^>BH1 z?6^zF>Mf^G?R7YSt5RbFDGA`?wfifxU{`PJbwj37Zst?1vn433!vhdXuu_cf=d@9W z!{Ma^N}-6yxwoWM|7za(O!z?>Qy&JMQa>94!MizTAF3lPjR%b6)f4WYKOMsO&u|0@t5EQ8$a)7uS=rn9% z_)ORiTQd3FDCm%O-3SU%(z6|Vl~=R>D3smHr}ZPIFv!B)WrJB^;>9^Kw(6?_;FLxT zQ3EpZwZql*dD6k%L-9=rz1SH#pjGn4G;!0wefRjs zF3^o{6yA{@oV5oku!uy8$zE<*jpX z^htSFtF^nd&wBYbm-ElK(5KAdN}mq1&l_VOlIM+lmII03re~4horF2dONFgT|6G z-Bp-_RtoSfwQNs8HHYj&3akxp#^>i(MR7LVv82jTU`no*1* z5U+ET*T^Ut53WmBJ_95_>Ry`Bgie^RlKYMYLxI(6cRm&8b$Mq~XvwzCWJ6)92z{aj zzkwk(r4t;Y346FB@75Mm8I91%b~4Hh@?`VWULSrAQEiwQ=v$e`E_n&}MT@X$Wf*MQ z0CoG8@MJS4tR6(bF)FHMt)u+j+Uxg2ZSkZR4Uc12%9^e$7q=XB6OFv-6Qgol6h)ci z^@|xT-=cu+;I!8u&D z@cL7;up4iXOlNcchBSLT7fc8s>XD)w?h9lzL1o`Pw{~Swe3D0603C&*$Sf~|*{Ltl zK*QBYG??KHP;2m)l!-746NPy0tWlR?nQ-1iWs$3K!kh6M1_{Qd?6>q~YC9Sd<(XqV zBK08XD&4h_ApKL)EvQ;9MbIt_OIwg+_4T9nTXCW}p8$zs z`~HdeWE$!qb0xs46Jymj(PmVM6D*W#YKI|qy>)g#^qq&8c<3{r)`w+jh}Td<2a+XA zT%K`Q&2^-^>r{!-j4ZsG`bJSbPZOx>uyNWr%pA??q>o#_baEMtLLv7xtdWEY}VY(4x)TkGLoj8Kbq|jW^X>Q`83|cAcK`=eV~Ye{r-)8Lueuc&U#;t?eqII z(R9Iyz3S)?^gdO$=$$9OWQ)F#5v>zL+DI@epEm!H-+P2sSZ%}LA5=GV-N8=Jy1yy{ ziOtPOPt7?Yel4JKoh{Y28DhE*=3SHweNOFH$xc%w>G=yB5Bl=2VvMRi{j>GY4p~nb z7EsJ9YNxOsRt&(^hV%A>A+jGnJHGUOE*=Xcvp}DaVCaC#nzroCtMV`Sv!RKZ*ZNze z7^$g9(m`Yz%;Pzu>mv={WOtRj{vg0%Tw9FMI@T?x_U0ry#nqZlLcS9lVZbANo!>Tc z!Q1DJUdL}DYPG(18>nGke}oS|p}{G5h&b_U+EW`PfYf*IuH7C$=-2oaU;O!(rfan2 z%kK>E1HK0v?!UNLmTEBA-;zh{vo%h%Xdu zz=MiCRcG0Y)7&>TGw@%%pe2@BLLTbHVrNmz@xe&6KJuqQCVHeh#_ButH+_Sm-5(7$ zoK+#+H+B6av^@PR@SPqEQd^heAj`<1A$3GcKp*<-0(L!bba;{foE@eoCtK$tk$_Ek z+K_$0Jp>h#9zV^!3d6hqE~0-CX?py?EKv(Y(gsf$6Lr?;F1RxbE=fe`I5Ty>e)3U5 zk1IsdRH_s!ITS|?dXUdKz=}>m{KI*Y$_pVKS4hys-voeNC*43zbT8038O%VaiC*( zV1F}0j5;<-YvNS>Pv~Is(qLz?6XOw8J4(4q&#uEDgz6s0ymqGMg%A&>t&nc4YBmBb z$eAXN%g}E7iSFUv8FN$;z4%Hpy}tz25p)Q#Gj9J*Ho11L}V2HJ7g zj)q{ubvg8u*uEon*<;&L@vEIkV>luKWe++rOr&W3+DrOOuAuwo)2 z3Jal4Fi212&(qgSd)*eoKr^~hn_L>pYd{r%9IUHN7CY%!2p$OLId9xbto{CEBAegY zcP7#jU558*xn?M}O7wHErfTQZobNkr5W#ROrUDK89r6HiF;Ln8@aT)j8oeji#>4Vw zaZk>ie2r1&PX1UA9>N9c<|}*6W!*Z=pcnh*U$gvu#iSHMqV+_F(YQ+PclKrtVkhEP zR6wf89^#xU1lu_$3%IvEgBC|g*uL;glogd^_LRx9u8V2ACzFu6f#-PBM>y!AC)3H1 zPn?k@AGgVhllXE^-26QT_e1s|9i%2jiwF}o(FTus~ff6U1MsV3L!TN?w1kfcWKBy+10 z&lXT&1UWg#cAbR>jyHds6k|pKyKq+TAtA)sggLS|!bV_-$k0X6Mh#Vfm@z^K75Ck{ zxLlEL6ZkHX4mu&T;GO8QB`YOuQ%2 zuEAO`Y#eD+8k&-;+=e9${J2KKt}WpiApXS`%c8EQ$nE%sAokyI$Nh#0hcuWR8mN&E z-A_NBpDLLu13DpVbdJAF_vjvgSD;MWvdpB4=*c+*OqWW}vr!o<>XjVOC`@Yv&YURq zR%^a^!O0Pl8!g^p#M4(t4~Y5q@XyUHyi7WJz;Pz`Q3rY_E~K$wHsqPoIYxi7T`c!k zM-&M0v8~3n&|Aw;V~r0Erg)gbCa4r7e$ae0QdSes>KL3r&S36@te z)%@e@bu16eVBc;BS=mnE{NT~WgxZCOqR{pOde~ST{7PU4?;p4hMy%6dCyk0*m-U}yG9&YziR!lLVCH!3OVw(V$`4-I`L8iMr({CR)8x z=J+Sfe{$VZMn}h#&jS{(b zn2@SF=B~G%9m2od@C)q6wOroZ18BxcdX(`9HTKMRJ*);n-Dz1Grp80`=B zQ$n9=t(?Jdv|4g8T?Qqq|dIMvipAZm#DcV%uYDvjudEWQe(aPMlL=DI9q+; zv7Ag8c@#wo%s#(0n7Y0-n1eaa09<$Imy+WH2)W&}Vjl(i@$xlTWfX^w3H=)nOg$*{ z_!@@Up&@_BD@cg&$T#`zr8vgiSuKGf&l>YwOKYopb#@xZ77P4V41!y755QmI=pW(y zG9h2io0BPtrFmO@UZ^p9WkFY8OD47+53p|7kD@{^5}jQvxX7<$&oLT#vBLmVPOtp3u#Kl z%XzG-nfJW1mcY)2?uUQec-WRK%-{=@C?D|LPXiNe4M=hgh5~iU;c((oXWUWG7VKfB z--uw2CxW(iOi<)`#e=>!0EN(A3RiK~Jbr9AoU|0pHOx-|FDse=wOlA@O$r^8+DHSC zLmx!XiRAEctWiWBEPg_uiXp2ZdE>Q!EL7+ep8UF9Y>FCox}XjXxmvj?CBw`=FlRdO zVFMB#vT^qwbb;fxhND`CBC#BHrfGrrPGGI6{e%kOA`dDCXmXn<) zgeQAvtMSMk2DGlQ=bymKYJ#7@@*Z9elUksSZ8bFslWMZDOaJ)2I9Txmq)CZj`Cn+C zk^X&3{x`v0Z<6itn|qtSo(zSYfZG51HB;&sCu8>}@r_16dO0DkLZL69gYdXXz0 zG%zLU<#lF;3D0cO88)f=sH{Dxv<$8|pw#~Us<^*=-@mV{?Ufx+LN}_9RgDc%Mh3An zRT0AmF)vk({amKCz)la@#r~?e-Zq^wf`+hCS-OICMPFs;Zg;vE(29n3PPmUo)TL_6bag%qwKHeWHsdXI^^5n^hJR#IXVR%IVKW!dPY&us4(y!KwYHqrQ1mc)Z*Qbwv_xd*3WID9}qZSa%1vK&x_VI<*8BGXZd*MJs)Qb%>>L#&$c3n0fSudJN<V>$=!u`!?TBM?%~zs>qG( zwrOb$(jgiQrf9ODo7O-4!)S)H1B&-*Z{1!IshBfsYlaqFrj=9YNc4WAjXiB;qO&T) z(IghytXLTn{VTOAE|47;o&QjxaP_YoPe1hxyo#Wb99A``=9Hiz9|8-@s!>y0U^Y(2 zr}r6~fI=8BV%Qn?h;%VlL^cYiO<*Zs;&G#S8r)GtHn)Hqb&J%DgTf{#9(zw%{sk0> zNOsZD*;nY7vzNo4WJcB*A@=oWsIV{E@CYMYS{tuZ5Mgj-Vm~$^8`^9(uJzno&exUf zk`SNRPJfS7tFLu$I;|&SK`$r|bSqMch=HkHl=UC@A!AAn^#VnK;#(Ovt*u4QWLkAa<|`_zpC72oa0dx8*w z6U3L9m~ePC7-BukwU_N0FK0FjZKAY}5`8jtj(61l~^DCkpJ)p@%FnrL)ut@7_8Y}VpwotR;mgdZ`>O$hT^jA&gS zOonU#2{+vQv=KZ0F2xHv4O%jWY#BKbJH`vj*YBOwt`PkTTdDcLEP>Gqm+Z&*F4?(4 zSn?$6N?OFUPkK)}64^2Y$^na-(gNCJ*U0{3K+Q7443Np*aaNK^EbSJy_uTXEuS;#) z>B6qb*bww|Qoqcc0^Vih<9Ng7Y+Kofgo);I^|}~A`&-_kY8|sDjL#*uIRK)c!*hWG zXU>+n8xy>O;fW11-LLY!Yz}Zl-6kWaWO1GPP94X#$nck@5GcJwc^jwMf3b{&!2iG| zssop!gF7eeC%Vj`qKu$6(cybRPP*Gr=Z_r=?3h@&FuoQ!93CCU?+vi{hn!1eSoh&*f@;bUCt7p7ekIrb zc77)}Ams-NQ7`lc>Ufz+SQa8e6&8D!Q&eF1&ihe6t{! zNu}3)Lg076qV8j8He~(cu0dD2qp==P|ETB{Pf0vI6a{;9|70{$#4r6vEF>VF*N;I^ zlQYaQmvQ~^U~@FQF)N4p^isjP?31CpQOQk}kM50b#&8D=91S-a1B1C>EZr~`U!d@` zlz=`L;#iXvG_CRh8NI9q!k6l}PRo*8)i70<2nK)_@!E*|4d3T&vFj4UP^g`To}n`}%aVnH%* z_YqizgqY!%-JN%BF!LN2I(?NxF<8^cYs$tUanZ&s_+e8Z80Co08suas#A2Z@LM^M+ z=u@#x%@H|>^1za6YJG^vBRD4&mnPOCm>jS(g8Is*ICj0mk2L9Y5@d6mFBp;RqAmsi z-QdLGJ7n^xPpP{c(K0kD8wB!?b#mhoF&&82>Gt!W-43CqGAHyR`$rJH&fcI1W>!$4 zY!O$}umXY(nhh$01vW7eVqa$PwBBiXx-ey{g~ZD@ZUut?!+ZcvcM5wnO#n&YCn1)( z&1of3_(-|6Fm-jx=1@p9A>Su;WbNvx-eWhfy5Sxx9~!r2TAQ3S*E>aqR9NBJWv3^z zMv&UEchs3|RV64qz7})=s4|51?r#=JndmG$&~we!@`g-zi8!sCE(ZO8Eug~7N$y37 z7o|DLW|0nckn(hv-g-a!Pk&hJ8x1urev~zj2zF}?R<{+t2!m%zVBv5mGD%Z_Wa#zd z!_gNW-;Y-Z4Erg15Z%E;8@y-@h7W(M5wKC6^1hHRPlJflalq4$)()POCWZW7`SN;* zAndx?DKs2=YLdaL;$0F&eK-)}WgJ;X)&%Q@zD|TiTI>1SOYJV&w?ik>J2iR=s_sfo z9qm8PVnyw?K=l?MBpjzZ0H&?#4pkl&cjAcoUe2YKtfdv5QEBm8r4h7PKbM)Yd?=qY zK9ql_M}({+BFqlO0(21g7D+Ej$nEwG_10xWyRcZm@G3#c4T1<7M-X=0A+RAqMuiv~ zjFaG&Tc2~bJN9j}Pxt+i`s7#Bn94XcoufH(irrM^dz(`xAQ3YQc_y}do~yf*2#SJ) zCoU|wsBPjKW4n{lw)+d}T($q4an=BU;K&iD6_410a@(oNd~N?^0|C5v4oSe7vw861 zGO*u?!%JUZ`(ps?hj4=-$SnMo`a3iNK}aqm<(?+Rrc=IZqx=EP8?n#?3b-FO62(|- z^d3z?e)pXY{h&+zr$CQAEh~Z?UpV_~7v< zjvej`Pw3Q=A8?@t_vXSMAjrgc@j0BIj~x!z%gP$&fc%vim(c@~P&x{bAzW|Q#K%82Z}PFK zOh<+hqh39IsBcl4p#8#l4L`fcGifJo)cNcU_v^$%kAPs^#+1ecxN~$c?ZqakPcXBm zri4k~LV`8k$LJEj_S>9(h1s(CT^>_x8mQT9?!=8uVE7{#+(o2evbR@&-?rcHK1C+s zw_DAS!owh-ZmgFTw=GXN`YU{7a_MHtDPAGzeTzWQZHO7V@iqSONIU+Iu6(EzWs8`m ziFJw+Kl-l^=`d{gh7;Ctk3J)T^rIE%)NT4Le!OTscOG3~N{s8%dr2tk6lIe8E#-@c zp_!e3$hz#?EqQ}oI3uy(SOeH`H(sM@AlTD*+$u25TLkn@#t@-qD5OYMW63`-E?@Z& z5!R%e6uP}nu0`;E1dV#}z3&z*kztOl?`@0=oxH$jSueABE zr3Lnh8-@j)%@lq-4Qwhvo{UbXLAi!lTh9#x(;zmrufJ;ZyU zWQ0LyBoxQ4cOKJ&UecUkX$r&Rc{hM$CN3bYhlRIZT*~MPKv;EU(`FQdlMzK->zZ~$ z0T#ByQ5MkfO9`ZC`LT_=L#`khp6S+l;A`!ou&L3YyIILAf;P08qfO4@)(T|NE|MWl z17N^WU9ktKgJGJ5w0G(<_FO{O;-aKLYZ$ou)XlA6d*6D#LZhG(@m0$Q1H%DakhXB{$;Bdx9vdi|yNKihOC8vzza+wGhTq7&d;WqzL~xw=QJ^-j5? zv<-CP(y+a#6jZ5Lrhu_Vjhg7z%igat3XMZGJ?Ie=M?Sz?5ywtQ>b2h-Kdu=6!#Gdv z@Pu#h0aVCT)IT`_Z|JgW8{){VgRDPw8wtURtA=(yM1B;Y=j3$%C!WJ>)8X)giRo8` zBK99AT%T$X_DUCMy<8ZU;DA`vJ;@?K(snzk88ePDlzV4sw){6W#IU%=%}V}pl@tw0 zRjO^_d$9!ns2~CnFENZ-{(!|^^?tmyM9%9Kmx0fnLkK_d)z9t8?)E~2O8sM(8`}Lb zbZ`g!D_+_(p8}b++xG~7o7Iyu*V`UUZ@-BXE2z5IUC8dS>SxPCsv@|O0_`c?^&9Lr znk^yaK#Nv6wc+YhE#=FnL@`#cIs$>ZB5NElf5~ib2E1W`{itsw6{V3g#U?*EER6FLkNr4tciz*}XR@%&5`s z-CoE)5HwRof&VBjUgjN!CpeyV^xR>UmApB+zIQaQpA@ga4vjdC_@MBo{_bH7FlNN8 zwCvzh^Czy8`s)F*7D*KmfIAY%T9HNSAri0_qCxZIN1Tkj% z#h*xnGh;9ulkUtc8i^kjz1ldg*^wa(qk#>C!&1%L&Y8|Fw1YPmg)U=PZ^QI^F5n-Q zKU>suMPXBETv4r?(1T2^2s9Z z=DpeMT!&|SGOqNm2pg)J(f8UYJ(tP_(LGc2fi-vTQt_DfRRpsn(1F7lRSN|6?@}+2 z>CT_XEmA<|jGLU|M2(M#TIo?z$S5|WtVZQwNf(+f$i}DRsIcsY44?&6!N>QHA5G{} z-Zq!6ltty$@5j!O+8;O??{mZdGLgZ={=Y0+U}a|f|4wAoXlf;GwW9d$=@n%3%FjfS z@&}@MeIj^eFXuE%9OaBM#gYw`kV6_pDvM8E-PGN603f?>ymU?=1pw~@u&>?mL#ckL z)PMd`rhEJKA>PBP8kQLH%?_$0g8Eh;syK9e{t>9#7+m93^}{zgyNEHxRgt6kW(qWE z_Vj$dF+x`)*6BYyo?dRa6=;|EqK+M0nzGdgdI(Uev2Cq*-R*c51@?bo`E?5B-TvGr zx9ME8dG!3S$Ok|K?q`nWnC^oEiH>{@!D1V?BsAcGgTDkI6+thb5tT<}z+5&`4pGe@6X#auyBRg~d463A@Wglwf)s`A$f98~8@zI=_3)~6 zj!T%xDatOO#8I53EdUW^_H8~51y-%G#ga`>KNQhxRZzc{gyTAqq~}8FfnH4GBuHSA z{=p}82!#@YnnrRnri^-YF4>VAs6nrC$(0{HrTILLBr5TGnR1_Rv(*RCYS*|0VQIwt zeLtQ`VYXp|r`$c({!#8Z+dSgzTxHt0p4!u)(txG;FFhny?DeQ`j^ut+@zmMe>d&Wd z3WIX#sLbVQoqQ8KXi3n#?{Twg=9Z~yp{#>W-Jar(_skdNJ{#Ujk|vc?KOYhQ5P86A zjMp9LCsBJSVb-&W$a*SS9FHY*9J_h?*$T87V0s=nbwf}`W{V`;6Qux&W&R3vTzNcF z)J@j+O1(#bPN7D&;9gTRK|6J zXGH|ATdhq}`SD$?Qp5B-GF?i&O@qN2;Yh^A6dh5tnfQqfFJ=cRv9wLViX3X#?WA|v z1V00tNSF5UrSwaaCQ6Ht7Nntp>(qukyYt~vyICc8_JIX3Ghmyg=1m<&zdi$fRg}4f z3e7~#y11?S_-ds{x0MYsyg3Wc7nG(A$? z`e{hr|FnwQ%)L$c`(SUa@TTh|y7OKA0LBocqFKQtztJQL*2kqvmvgbaq}o7~*Y$pc zVi5s@!maWfgu^nsMuTtWr>gp~R#$aj)h?8SK(z4DhZ$^4&k7@aLh37}Dh{WmZ`4f> zle{xr@-MblNZIbx3LRij+JwS#M#=rd9I8no6k}X|v^ZdVt`I^Q^;01m9S123L6J=O z#oLZ^t8va{#Q{8!m2DKNP zcazTA+08uNcY4j>6ob`eDN9r%Stl*Y!8SU724$zaSR)>A80@_?(=&3OXuP@*DS$?!Kt%` zflk+c+GQs#t3+$nLnj%(S=@9NYg<8{_gt%AZEd;%u*%Y7_&6oj>%6}BLOz&wy}v;L zhE-aORj%3FJez`(ziHW(D;)Eck=sxS$Zet7(J9gU!BVEjLbiwZv>eR-eZah)i5DLs zmK03zX?1$QJD@c@FdRhGh!FlGc_st(?B!>k^C{F3GhKcf;jH(@om2<6_v8$FXUkfU zJ`mjrUl3sF7B=zY4s0E+{poig$x|^V?ql~$KQ{O@GJ9s@lme_G+3?gXy39;l{`cUwbw!KykKki` zKhgUb;hrr4s@o6lg(CJ`^NsUPYDnLjs@*X({g}iMp4L)k_#`j}V}D?!)@2FOdRU`- z7b1Q*;4C{70I3ux(%+$f)?~8{_Ef!saa>w9-BMOeRF@tUyYv|KtZE&M-&*D_B%3^W ztcX8EK^^jLX12b#8AOSrl+a>poyY+-GVHsas|XYI7Dn*yIn_|exPgupK;3|*AccqcpCt#7Arb`3jM*EJ@dUWU!JR%%>>OAdA4rh`9eM=LX;i6u;;5 zVa)8gG340ey?;rbS3bHnzTZlVFFQP0PHg8|X-goMpPrkzde9fnqpWv4%2^11S#wJmzr=?9bujQO;O| z$VQ+#gYB7@8|Y*Gi?_elv8;FuBh3V!nnb}BP5{#5#IU`zM2dKbN9(s8~zPwy&bpzVc{jS)$AryTwD*)6jJ+H=?ql#+)U z;D9g%$z7%RTdnx{)s=*EXI@aj+_uXy#w|~8**kV=gF16x;jNR$Om}zigw{PT zm;#KhpHB28EC@x%e4vD;BdJcLYlZiP7%1zflNbXCo!lEf9Lh^`B;`LPn3b38*?obD zYUB0`jlrVick!8Pwa%wcyR0bD7VR=Dq1CPJT)7;!0#_SONgFHIZjJZquf&NifI?@PrQ)Frf8D3@MKs z3S{q`(n#blM0}&`D9Q(?wKYEs3Eh0Zd_qpa5nYo`q^4Qr^HN?T5&Fq67P*sUWusm* zKPXuU!h{w`VXmu05g4h8?T29me`kOxjv#gbOz8p2)dP>?Rol7Vt; zHq!Jw<+y-63)=jL?}JDEmj{y*Fz2xmP)WUYicg>Ikx57AJ7Ur*sP~qm*e>q<6EfYA z6hn+UmT{nM*dhAD*u_4DAN;8!P`7}ovz(AYeG3cAJZ48{aVJdb#e507mRBfFXeyIr z4lGNxCFNL&GGajhcVGm0_^1r)tlq8mopS3#TEHvPC(=#7&4PSB{J73%qgToucOGZVRn zD9Ds4xKtj<@u`sDQ()6C73B@RIAo;kbsLPejMirMy%%}@%(Ffel%hQ9;Z!pw`Y16r zfP2_5bmPGyMbvu}{=pw?Oqd-;L+l$E5*()m!F+OJHe;9iLXjwt=jf1iefX*OxbbY| zjoJX%uj|L`(FMUI5@6K<$V*^`|s70N|vA=c5!kva7c9g04P~}`F)_kAC)rJ0vqUU-2V2kZYCo;*xsp!o7-f2kH)U)dR+oG}h;-I@tLp40 zvckno=tu0QeLZUH?{s!#C`iJ~ogbhJ^1BF@fgur$ifG6tXUee4{eBES?xp+YUs`W# zx__wt`s`{V+w-P-A5PR3n%nq_U>Fu(k+yG5@19tHzS)S|^R~Mn24}3r(U1*Rq^~4v z-@ZFt!kE)|{a&eZd%4|tTAO>w+MQ?mHXj8sh&(9cW?*FsR@i~*-PF}x*#l$5Nq`h3 zre=umfk9?0tPkFBO_)OzA}6%oMX=tRal4{qfN2w!Scw0TM$xgkdMw-~np^2sNBe@> z)JU|DY`$Tl!R==z;h?6C|Iw_jn>X`2GhyA*DFZX)7@x=GLV&opY?6r~ zVs3~WV+2tV_(1vcVLx}w5gaBs7QjHgda>1AHnC*MKd#hq>GB70!xy*m z7~bpJ`Fc~)D05FvlwY{qda2YU47zgk0-S?fkWjCaxWF^=9`v`EO4kFeZZo>Ju*@Sj z2xLK)pl~dW0KCkltPC>r?%N6?Rw`@=&QcA>izmFR(fWtcR)BjV3wj+(4O>MGVRCDv zv=S5qCgm`LVDQShYRVpNCY6a#ro0v6(QH4%6_Zs)p)CPE(E_-X4dm`PFbh&C-se9;sP_@I3b;7PX0c@Pv20oe!kue%RZI z+2ZPM#iBt>tgG^yTQU~IeT1E=UDCJ2{W7&*zQvg>-D9p_GsP?M^`+5y9q!8Baz1b@ zN|??W{Be4~oigQ}oyD&R2NUb*^n1fBHQ!1Gtg$#tLj8U&jD}?#2R@re;;#6BTub8D zMGWpGtQ;JNk=>G|aRdV}iy=k-6ovwh>bA~`5r7QR$cO{Ljl1<{I>e1R$dLjj{>ML( zUyt+5jw~kp@EI&PIndJ%aC;%eY1^;-o>FB|=f93XdZ%xWH^XC3m^5Jbt##c>K-zlB&vI|Kf%( zoU0w|U}npa@;ISF+;Y!EB)Ox#@_a%1#ft*Us3WwR@iWDS6g@G=#f4z_cRaFO-v&5v zK+oJxYLD< z3NS0cT{)L6gq~@>*?|g*_MIb}FsUVuP3b53Z!^t(07%5Jo(M-$L+J;W^MWJ$){Umn z5!mXPM2Eo{Eb>5Mv4EfoNc1VkPDw?LWC*X|PcDl)Q zt2O1)LDA--Z_whREoT~_V80SvxnJmzXW$nwN5P+Ht+c!@e_cjNHRPnCV>1cb+g{O8 zzu|FSeVIcByF;J#Ct{Oj6A5o?yav=KZ5rj+x2fAMA_cjq%htqViMrbcoo#}&?iho6 z7oZO`qk^J(?{#6q%tcpeixTz7#wTZlv=gdC=#w8ge9u^bw$JYjvWZ3o&YC0MGI3e5 zL~he`vvRsk0$uCyhU?eXbwf=J8IgHo-(~0G+M``FHQdRr5+7ZkkWEs!Bcdbha-)*mz(HKoOVGl zbn)8^`M>9#4YYLmy?>;(4+($dWoI#korR^JSh@brUuWZF<DFr&yQ(G`cThuW-U>5eC*2g6bT?VPqsOVf@B}W7BVNK$CEAHwM zziU&i|HH%8E`|iGNvoojpu4GcN$s84=e~BkWo{vZA^ z9SsDvf#0Kr35&sxMvkw)j%r(#(w2cXq6Wb( zhZvB0so@5YAV+bo#15&$J|5m{b?4^7vbqtl1g?LQ(-llkkR8Nej89y3!|e1j1F_(0 zhV==W+h>w6`2bFD4sL+%ZlVR{y>x-#=f9w6K))>n-$(0%G=CtrW9Lc(k4GpEI1ZoY zKP0aaY8uzKUDR%6@lQwwZ)E$P2;oRXRHEi?OmFOL9;!hGIoj>7kn4Peu*qerwwp#& zY%68u+st`FkFWcmeSO9kr80XaVp?pegU)5ZZ+eMcGpet+DNZH~dZl;b&S$J+{)GNd z+IaorrBeOwidv2acTkEwJie*=6mT7Z`aGXkapwmLIIY!YZ= z&*Wu6(z`hSa2a68abT#wG8iS1t%Sp=YO?Pa*zT)s$7!{5MO*~f3em(HSi0Y!$J_N> zC4+ZxO_akuJ2o=B2!)u9)8Ozo9|2oIkj9w~>2LRx1qEJ=c zxIdU%mX+}<%wGK>aOaU^+;KZ4uG*)8w-pnuv~c|tn#}t^k(S%qKS-5i7KD{v4$$v* zw!}h^1vRLP5iV5;Law^1;sYhnq8_)z6nhAvOq5EX=$Tb9`9BD2f&}H;!*d_j8KG1v2z=FY@CM3$m>p&Gutoit>%RJ4+{jFBp zYFs~Z@=7UTxqkGFd3=)6!XNoiuD>oVa7|M*P_yU0`#>dO4MyT*J83L0!T|blU3dx-F#EQ zjHupom3W9+XKf_11-CJ@w>HoiBo800wt#D9fO(kjluh81B6o5L)Gh?bIZ+aGkiN>W-3fD{WpE|3RG~(J!4xRJ}@VPM|JMmjE>1A$q z1k`H3GHPJF7dw2jtx^}jv1T?`&@z-B^K46PqEJNm~Bt-^+QO z9JjNCuSyr?#asN|@ZOfZ0RTEc#lP-YUO3lJgxB$@zmP((C|UCnTnNv00eZd#lMuLQ zFq_S~-8=l-oN*Ktol49oB&W zNQ1305(&Fv@S!2DW9 zmq5p>v59vMoyJjWfZY((Y+2J@;06g=cP1~LcZWG_#`u+H#cdz?AXpT9*kPG8xn(g; z;jLpxQ>)G&E65J8TP@n)25<3ng8SCHn2fR{@bz2Sb5J@5i+lpcV$Pvo~-cFv1&(pX`!vE zYHG=bvQQ3W>7{FPTYyWZ5Prk}>wA*@2vt^MYtHQLk2og(L`Y|IY;mX$Cj1l3ugD+^<*-~z4gZ5?1@}{m30p4o z6C4#V*Y^FZl_Of1i`Uwb$d=aK(AwKBF2>%{BTYG3VdX|wx1>%DC8=NF>#PBz-GU!1 zbI4)+tsm*!uU&>@5DMi+{<*jntrZ050aYR!RS-X9QE^y=R3BcdAPj{N%3qvEcnv;v zyZNEEE<=>w*2JnP5!qQ{R^%GO8YtHS_Dxh`k<#+Po00RLJ#tgt1z6@bVD4vR*k3DU zL8}`TXorv6+u`pvSN}Ad3B=$-vdJ{RV6K_=>`IpXb1fHe8+-}N;>^S{?O$S)Wn9BOEW!8$G$)eB=8 zM*6E3$IO9m_0nLYqao876S8A)Vpmv-&6Z_P;*pWy;!E87jIW_aEB_$Z7<3*%1X?$S zJUHb{Sjd_{9Bj6bHG76>@7}zr{;Ow~m@YrE4Tmnp+Qiwx$h~CgGp>Vv;$Pq0;5lE$ zn`2q`xl0M!GTxWHG4n-Z@1krvSK-3`da!r4LdV0+ronvX&MUVHEw!oOP` zs~8_QJa|{)!>%|%r4S)?k4t5uG$eH1^34y8an{G~9*@no$9;!}4p4w}`bOE2&E!XvXCu&q%35s9hxR>w4gUq#l4J3a zAY+go#6@TIaKPu|#ex!M)EWj{ki(zVAShI8Ms=zNnbrM=Jo-vjsj5r|1i|tXlr#3+p3i_sNrB-{`T6mLp%QvKPnTGgW zWkqM6E8r%?do+!djWF_Q`)Db>PZiyk-hnX&p`pxYW8pE&{{d=9OVD(F6O0BcpG4WM zMsIYR1%hJ80og=8VGjKs#F--1;7JfpA*i?WRR=69K)WSunkPlC+#LOB zu(NvlMZ&p*#}(5vID?jBps`-b%)L1F-dUt@SIdv0Qp@k5nxAdBKp87tg9YnkzaNCB z7N~ce#n+Tr_Lq2Vt9J)Bzzl8-t}WS^-#coo;Zps1Sm$OL+T0;knMJaq-TWldsHl!^ z9fNAh!;A$sUHv>_jO&F`L;}E!jY%nDbbwOklmSUDCMMAUVjQ=v6CsZYDsCf590enc zd3g>u#U=+72D*kAH|zE|tU28_jUFY5_HC=JsIeIp*x%aM zp>0XczA4$Ka1q5at)e*2TIv{Z;!nJK5{Iy)C$idbu02g)46$bqlg)QGzvGWjPK0M~!O)?<~rcJ956ja1Z&$ zGhRMrCgTggPd5Wxex)v^7MH&4 z)@GEZ>>%}EN4a^+G@t|%idy)S{Wsm2A&@})V#P@tLZoV>JB1`B@kLl|8BEtJdek&Z zc=?hu{Hd(Jii~Qua zv9kniy(hC4BZth`drjiTgDba7meR}KZ=*i``NrLWUgu=rSLIHu zjlD)20k)x59Vw*=MileUOfcCT&omW${1_)%_9Q^V5_=Z)9Sb7PnPSjIUcPQ!E_qNj z$rsTCFH=AC+RLg1Y8B+~ie_!{u;RN;$9Y>nVUqjgg>D@_pdyr>|C8pO%8JB4Kv#96 zNo_JHp{4#2F99`Cm_6mMnGz6I1_&~nX%(_!@;z?8rVL)mu%8cY+tAZ?@Dah;``Nx9 zL4I&6^1wALOk3WK+{RAF6Y<-s3MY9;IEAMCi-ZUS+5Sk+#0Px{bordvj*JoBpDJ_Jle*b!uITZ2htf*Yj>1}fY!DXQSf ziNvVvcJGLXS*^B@`{mm`UyCrKpK7=LPSTAyeMb*Hz^~(&9J7ZnK@undTc*9NmM!*& zm;{Uyz#2Yh0~61ml|`6|IdRqsk|7c&EIS!M4;F=qhIt&~mro#-U{uh1l*)3z0cy#y zO5Hj(I@8f*lr6DW*9FLgnUS{8XW8}~6RK4cN>;c{>bQROnqO^I_G~Saq0W= zrpF*LG0=elKXj29+uPY!mZ0ZcZ4Tj7Q0k&EE;=-ib#?6E7)fn`F%lO_N&`8|q9Kxv z0x6?kY_3$I-I*-g#bm5j=SX#M%qZPLL58yBEHROEdj7m!b-f0&b89m1%(o*yu@4c@85#}t@maS&e5JpgklzY z_Io%cHg2-AiK}MO@hVA7G#?c8smM4XQ79de_KY4nxc23B+1xqR${OO$mgmG+XwfUf z?F+SIx4pK5CYH%gWu+0p(lr#q^fpZ(C;rlOQE{h|66~3TgZ_jtaLf!OoH9|ZkDv;9 z`1yi|MT)DiSQf4D^iEtXqxB{FlgCjts#LU6NxsXtNcm5I=s>{{iQLbeY+-STh`fUm z>KE!KMOp!cg6d6QmjOPCo(Muul$hN*h(6%J|FT5F^<+|p@JTg;yl9Y@zu#n`6E126n%w<4G+su2|X<{E}Bh_v^V@X0S&_3f|z&R4Eaa^(ig2S zy`8majz>m=%?b{jx~qRDPHSwDbub3Q;ssDju&`k}FBCi4f`Hdl431{+C063Uv(Wse z1t$h)XZSjNsd(eQA~U?;#x=L!i&t}pf$OP;2&oJ&0;$a{5E>c#(@n9obXlV7Ec6)* zazc)RVd$lA7@>3TK-s}fTG~*aKJ4xkW7~tMCz@;IbZ%-;yTNsPVP2Bkp&bWAt*7VZ zn*EUqk)7dnoNyENPdI7W+%zaogfL*gq_1W}d$?x8!`gE~gDQsHS%8?_=V?EO=h0r4 zm_H?Kgv9Q~qInVuFfKX490`=x-LFUbFCnBr)Ja%NIm~*PbMZ+qQNh85ekA9lmxnE_ zUW0WJAM;+~)a9yU5R%yN@8m_Z#DdFOulh>-Q zThoPz!-^Pe5 zYf%@L#%UvU>YbjBDYPSUp_)gWep7QHy!E5#VoGt@A$&@IGszqZyjx)CzsvTPDpR03 z!$%6kJ5gcoc7R8ewbgw{(wtnK@!TkR&Kv|+Lm9)hQ z)BC0NPTtlqSw{YvDxD?kW!1$rBfS-*x>~z2xwx9xB;)G?7`Qz3D4RJwTTPA#0EqC- zJ8u2eZnA~PYW;A0pJ(%-YGz@2!g@r#GHpAxbebnqS-mm}EgkAhQ5h0p=ga+Y<92gu z_8ukmrSHhs@NsJUG_$2`V`IsbU~`At$~~n>Goz}NBx%#;7It$tfhXMr^!u=r=f-E# z+lOT>X{u=ONN2C4-GO1#dtG*@k$LsKIrlX%m##Q(yoJro6q4zc(U5HB*@Z?u6HL6) zcKhI1>*|%zMEr&N@)KH=IlZJ^n56sNmV+Xpu=dqxJJlyq<5s4hPxo7!NpJlyfwbnl z(oxn?Su3luPCx?1|4Vr(#j?p2;S`%(*4cLU6m)w)r{6t8Sk zKHV;!v3fis}LrkP()x@wY@k4VMLa5*g@s47hFj35> z`qXn?!+z&{sAkO`e~F;#>6sgKI-e^JB-?tfvjt+ZrXAIMhNBjr^fX(3T;iUezqEb2 z^6;({FIqCX`9bjxkv__qL@o8(ft<4yN8n!p49ShW^ADi0nGBiSIF)Vi zRmyUtMLX{n?xknq9CBgX#O$lV)*tIHf;f56z|coh zQ#tG1QELhgfYMLl>0fWQ6`N__g}LwzHD6x>Q>q!nrqB(^Qo$*$^cc(F4PW8(n=l%l zsX|&hmj$s1BXs3I#Rr!hv9Oe_*nH;15un~;0EJYIqd8f?(k=3hyr@ROhc$~hc;(N_ z8Q6UPWOIT-%I#7Vxpo;)H8y>-+i);3ux*a^!+(emb~SEc86?(SJOna%OTSP_F8gDz zZAs)Xa#_E5ju1|ssx8b!U!y4Z9z1FNn(nNkYYEGghyW0PztuSdPTo;%?;<6x7^gNXM6r za9HvO0`Sd{96vAi7f9A2iFrUG&uSZozw9Dc^Ags?{KEM9A8OeD(4gF~XW6R(w3|F` z4ioU26H}`r2&m`Rbp&h9%mp zUCTt|W{A=Zl6YrjZ{-4bGaH8chv3LS6L(Jf_Jfw*VqH zwPg9a^|cpY0K}Lx0Hk`&Y6amY8ul!QUQt_}UB(Kc=DByZp&t7rK;qY^CeVR{CbU0z zw3t5{dLRIuD;&F&f7wMBB-tPox$Y+_x1&kA-XY%Q^n=`!eu&J2-9~Qwez^NT;1<3f zs`%Hhl@8`EUEY~A9#cEba<<#j;h8!4-1a=OK#Z*15=rm`!E+u`1A^?}h}B1EI^LcDS7N>&5f%e2czznP4YdV*dJ18sH4*6XBtxeU%lq zL-7bwLHGN*3&Qc^Tn_Fkb9}DGfa}%3{Vk`UAv@9OXlv3U15ZHBpVh6K<#l%Znl|Eb&7 z`Jc`Bbc*c%Y-a!0W?tTZKKRebhJ&5?{|sreYt*J}vDlDz=G5$w_(cg`dEc)Y1r`Rg z@-c2QZ7sP%gUCg$0YrX#)8;T@Xk=0#z@!t-?jP^(9~tzJ&6VSSEj^m>e0|SWewO?% zeb+OWO&1bdBY}$#A8g<4vF(pf${H^C#?qnf<@u`s$1D9vUdIGPnF$h=`f!{#;YSs! zyfH-&{vpO+AJK z*2l=fxQjuewWw1LXQ9lTsVZ^QU2Lba)Jae27M|KmHf4K5FpYI3f!QIuEi%5A;egmn zeOB|XaUL87)KW;GwVM0WbtaJw8my4Eyh?zRJO3RtC?FU{pQ1{6*qP#}I@wM~>S+d8 zY}z;}o?e5*9~+66Y_9P^0|c^wnT>BDqtucS#llsCB8M`3tP;@U7HT-(;CBMP5#Xw; zW)gGBG^&bu7!`A*`ZU{lqzYr8qe4mS3xUwANwX5HUDm;xm5n-l)WQfI66``g`#uAk$BEJVzrj_&$BSqNhdn2xNL^&2a?L0o7a+NDEqo0zA5j-=@nD@ zR+a7&3X;2kWmZaN zcwfRYE3te2zBBFj*{#&ty83gOyX7f}COJlN*`91e8Pig@=AeMIpw+qw?{7ykaR28| z8m~C#KxvlMXwcFT+k0CM>c#@t{+~bkU6)8&ZYesxoGpT~fy-$nDH&};CoeaTOGB&n z>Rs_e^A(6AEo*h0NAag&a%! zCo+K9jJSC*6CoGU!CmyAHh;NzG?kJFI+hM>r+S7>?@gpMyMYLHN^?G^UK2>>HL*EM zf+w(NIy94K5>E6S&t??aGKV_Xhw##-z)EsN2atHfP4A;YdazFb$y0|~ioo<%Ds(-VDI>Y;R%sF!<0 z-_M}jJe@|!;+lF(a0iPp%hA!;;6tn~i4Qs+ynQDf2PQxF`f@LMUmz1q0eF^ymw{AoVizTB2Mt`v7i5)zE-vd~xa1o;9?ooyTW{odS-hi~hnNUZi#3a@l{ zc+L*x1n1(B4?SF?2>b;G$spkSJBrBp?;B|ST~+vZUHm7#WEB4Q7X7R&tp7BKEd(Q=jdY9*Wg>zQg`%ZU z6f8i{`$1!Z5n&CGn5_VsWmz%}gG@+TARstVA866?7(JF?5mUqtP zp7ZTBD$EToezZYXkJ1vIDG{R7riy_oC!!Fy!hA+MmVQ_KNug$qhbr2td{==m6`ZJ$ zL=l%QvzUxBq7a^pU@DZMWS-2)5)>#IRsnDgO`A`A8UtH`A%cO!go)S^=@OM6{yE^9 zu#wC-Dm#K~j#{7gp@U61k@hZ$*M#hfw>2JvxrXh5UB&PN{Zj@+5W5F7dX5zhr_c|k zpfO434xwcyBV7hx(gf~Kr+e0`n%X23*GEqMn-(OgKyK!{vw?#|p6znGZ)Et@$sn43*>)?&mndZs+ z7oApDoeQJUy)al#11C7%bWdkY#9avtKb|+knhp$LjLqBSM=W(PGTX-;M*yK`wIC2M zL7}yznrH!nVD|j5Aw6kXBmN8iFbWMw{ca!-zQ#U76#8SR#vpI|1;1{`%I)*&iYq*< z?&S}+e3N=`r~*jxne-mXA}wN5M-MJHmJ;^RO&6-iHwa8V6^&;^Iu;=2 zS0Hj}P5B|hcNs(RrA&Z1E|zPU>IY&K!*c#5iMH`$+EyIe#O* zmN@Gr?S!eio`*WlX6dL7-v(fK`t}|9F1hkHx_)+?Py&gWRS;-WB?ck&PTT5VSGX&m zpYLLBaIh^d+r+q*Bbalp$gC{{3D#NexFKt?!`V@W&J867je6 zoC;wV3c?GO#HZ3zRB~IPGPyRB(Y-OrH#xoJ$9K$a&}HrXgDWNE&^l#@scXOIG7jKV z`Q8EClZTcvdHSPe48tFzE-^a%i#lpU>1wL!Yz5h8OR}ZCSHCMhCw;8;$T;M|K?NoK zq%uz0%as65z^4)9M)3JjrLNYVn)}!r^h&)A7ecb+)|(odRBT9O4*G21(?_P}xU1lA z@3psh7&hm<lCy&;Wl**Aie=n3Y$08+!x6o+nf3cKC?75qf_^b`ej2l z8S}fWS~JF+$>_NG9dILsv8J4=+$`Smz3#ERZzYEPO+k(#vqB}K}bvm%gA@C6a zSpAX;NQjV2E*&|z4+@~*VDF?Ggie5Et$(?Ha|3xPhs`5g@NO>rS@84&;T7*E;e)@> zTe#L`#%&DsKms>|zrNSGYMIB&Hdqp!>j%}r;{{gs%h5y)(X{T*`AKsK+aeF*Y40Dn zoZeo_R=>Ag1lCrhn-u>EZ{n9UGyEx!`LGyZRB481yWu`qoY!7ji}MrmpR$y#ljcNM z#fn*Dq~`s`Fs#eK>C*8+OueSweVO=cC{rRd37W~N?bvp=lO3zEOhFc;M_)KY*8M^i zv79_nsNO;>os7(viJVw@8;3OmAw~9ViUE0}UyTF?GQ1slujXoq8gh0jMb85)Z3_lG z9OmaW$6UJ=lfuPHLZhg9J1ZLgbuzLNXSA*ungFB`0jdbUAF-s`-mqE%5Qb%ksW0V2o#f?eZ|b zwB}_+zoxgj`R1$kggLwD*yCzs65SQH{v;^W#bd-)=3U$@bYz@eKbiwMB0?9mxIL$W z;V?nNmFnA$f0 z3)*L%@Yr!c5*aG-9|ZBea@)?{%D`-Pib)Rub*}lV?2E{#ue;r@*ZhfV7eV!T=G#jX zZy4GC9)MHQZ)F}~p?c94KwFILRwnjXJ|Gw*3qwfV=l~+60X2n%k+vvBNx^9?@+$h5N&Bq5m(xJG_Ppi~v&Lp^x-ZnPm9J&ObSeOH(_&bJh+G_2zr;gLv9I zf?GwhF)>mSF)=dpZ0vumvpomgQm~c363IOKA-d^giKHiY<6U!d)6;vJRxe)%dUt*b z`Gr^eaPtvRGZl81RrGhKlX+b2ts*=#A`O5Yy=X@GE+dPL&_hmJ46F&9UoeXU)=&@# z9Rv*g;Yf%PW_n{nKtB|eaV7#$e6;$p0-=LAF9)!aIj!en4%V78zG__r*7QP2mlz>Y zz1%D%3K(HG`!z^Z21wz+1rgXr1&@qn!~_L)5JU>T@DBQ;!mRJ8!{rUBIZDIR4WxG$ zhO!=@R@7xmp5^fB(}hauGQD9zZDZhKVRtXW8gLAw7d=mj=s;Y^Ukbr1ia(URhhS&s zAj*g`zGmaAuo-oKwB(IYksNWkl9kim95=+J6Nn%D+#UTsxEU=kI8q{*CLb^UQortv z_Y;ke?xp2(zA}2A06S2+Kn{9g07+e7*QKRl-`Z$9S zD$qf)v5$qctB8P#M1{V*&AwV^|C&IGpmH_tJH6jLy*Z0)33pc;*Y45-n z*n+Vh@;bM$HJ`G%BV5Fqia_W#y)9Z=RBYi3 zK$lQ1SRLU$p~z7TdR)Y&*%1w2oW|HoR4QU{aUtl*pF3of(q6GpoNT8t)k$HZlQx{I z39?|-JT5k_PFWrmfxp5$Vr&M;;w?941Bpp2@k&1+l7wc>&H!RiuR1f8i8SiBc<)DK zDA`S=h}If4r9Es)bJT?HLhEUc(~TCyL4 ztLxj|{rU3!>z%241LoFjy`lN*JhuGFBLlX%k*k= zxmnnFEw|f624=Z8pO2M$d2{<+TD%Qa;MK#5uSpTBcaBSXYBMek%-o(zqeoJqxvVIc zH8UaC(!rH1_)i9y{3waged*v#I*gh&7SDQ8GC`Np{)tpRAudm*({h{u7d|CUeCf4- z