Commit e30c8932 authored by Evgeny Kusmenko's avatar Evgeny Kusmenko

Merge branch 'develop' into 'master'

Develop

See merge request !34
parents 716c9542 c152d842
Pipeline #269180 passed with stages
in 6 minutes and 46 seconds
# (c) https://github.com/MontiCore/monticore
# (c) https://github.com/MontiCore/monticore
stages:
- windows
#- windows
- linux
- deploy
......@@ -9,7 +9,7 @@ stages:
git masterJobLinux:
stage: deploy
image: maven:3-jdk-8
script:
script:
- mvn -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn -B -U clean deploy --settings settings.xml -DskipTests
# - cat target/site/jacoco/index.html
# - mvn package sonar:sonar -s settings.xml
......@@ -19,7 +19,7 @@ git masterJobLinux:
integrationMXNetJobLinux:
stage: linux
image: registry.git.rwth-aachen.de/monticore/embeddedmontiarc/generators/emadl2cpp/integrationtests/mxnet:v0.0.4
image: registry.git.rwth-aachen.de/monticore/embeddedmontiarc/applications/gans/mnist-infogan/gans_mxnet:latest
script:
- mvn -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn -B -U clean install --settings settings.xml -Dtest=IntegrationMXNetTest
......@@ -33,7 +33,7 @@ integrationCaffe2JobLinux:
integrationGluonJobLinux:
stage: linux
image: registry.git.rwth-aachen.de/monticore/embeddedmontiarc/generators/emadl2cpp/integrationtests/mxnet:v0.0.4
image: registry.git.rwth-aachen.de/monticore/embeddedmontiarc/applications/gans/mnist-infogan/gans_mxnet:latest
script:
- mvn -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn -B -U clean install --settings settings.xml -Dtest=IntegrationGluonTest
......@@ -51,19 +51,19 @@ integrationPythonWrapperTest:
- mvn -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn -B -U clean install --settings settings.xml -Dtest=IntegrationPythonWrapperTest
masterJobWindows:
stage: windows
script:
- mvn -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn -B -U clean install --settings settings.xml -Dtest="GenerationTest,SymtabTest"
tags:
- Windows10
#masterJobWindows:
# stage: windows
# script:
# - mvn -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn -B -U clean install --settings settings.xml -Dtest="GenerationTest,SymtabTest"
# tags:
# - Windows10
UnitTestJobLinux:
stage: linux
image: registry.git.rwth-aachen.de/monticore/embeddedmontiarc/applications/gans/mnist-infogan/gans_mxnet:latest
# image: maven:3-jdk-8
image: registry.git.rwth-aachen.de/monticore/embeddedmontiarc/generators/emadl2cpp/integrationtests/mxnet:v0.0.4
script:
- mvn -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn -B -U clean install sonar:sonar --settings settings.xml -Dtest="GenerationTest,SymtabTest*"
script:
- mvn -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn -B -U clean install sonar:sonar --settings settings.xml -Dtest="GenerationTest,SymtabTest*"
# - cat target/site/jacoco/index.html
This diff is collapsed.
......@@ -9,7 +9,7 @@
<groupId>de.monticore.lang.monticar</groupId>
<artifactId>embedded-montiarc-emadl-generator</artifactId>
<version>0.3.8-SNAPSHOT</version>
<version>0.3.9-SNAPSHOT</version>
<!-- == PROJECT DEPENDENCIES ============================================= -->
......@@ -17,11 +17,11 @@
<!-- .. SE-Libraries .................................................. -->
<emadl.version>0.2.11-SNAPSHOT</emadl.version>
<CNNTrain.version>0.3.9-SNAPSHOT</CNNTrain.version>
<cnnarch-generator.version>0.0.5-SNAPSHOT</cnnarch-generator.version>
<CNNTrain.version>0.3.10-SNAPSHOT</CNNTrain.version>
<cnnarch-generator.version>0.0.6-SNAPSHOT</cnnarch-generator.version>
<cnnarch-mxnet-generator.version>0.2.17-SNAPSHOT</cnnarch-mxnet-generator.version>
<cnnarch-caffe2-generator.version>0.2.14-SNAPSHOT</cnnarch-caffe2-generator.version>
<cnnarch-gluon-generator.version>0.2.10-SNAPSHOT</cnnarch-gluon-generator.version>
<cnnarch-gluon-generator.version>0.2.11-SNAPSHOT</cnnarch-gluon-generator.version>
<cnnarch-tensorflow-generator.version>0.1.0-SNAPSHOT</cnnarch-tensorflow-generator.version>
<Common-MontiCar.version>0.0.19-SNAPSHOT</Common-MontiCar.version>
<embedded-montiarc-math-opt-generator>0.1.6</embedded-montiarc-math-opt-generator>
......@@ -94,7 +94,7 @@
<artifactId>common-monticar</artifactId>
<version>${Common-MontiCar.version}</version>
</dependency>
<dependency>
<groupId>de.monticore.lang.monticar</groupId>
<artifactId>cnnarch-tensorflow-generator</artifactId>
......@@ -246,12 +246,13 @@
</execution>
</executions>
</plugin>
<plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>2.19.1</version>
<configuration>
<useSystemClassLoader>false</useSystemClassLoader>
<argLine>-Xmx1024m -XX:MaxPermSize=256m</argLine>
</configuration>
</plugin>
<plugin>
......
......@@ -14,10 +14,15 @@ import de.monticore.lang.monticar.cnnarch._symboltable.NetworkInstructionSymbol;
import de.monticore.lang.monticar.cnnarch.generator.CNNArchGenerator;
import de.monticore.lang.monticar.cnnarch.generator.CNNTrainGenerator;
import de.monticore.lang.monticar.cnnarch.generator.DataPathConfigParser;
import de.monticore.lang.monticar.cnnarch.generator.WeightsPathConfigParser;
import de.monticore.lang.monticar.cnnarch.gluongenerator.CNNTrain2Gluon;
import de.monticore.lang.monticar.cnnarch.gluongenerator.annotations.ArchitectureAdapter;
import de.monticore.lang.monticar.cnnarch.gluongenerator.preprocessing.PreprocessingComponentParameterAdapter;
import de.monticore.lang.monticar.cnnarch.gluongenerator.preprocessing.PreprocessingPortChecker;
import de.monticore.lang.monticar.cnntrain._cocos.CNNTrainCocos;
import de.monticore.lang.monticar.cnntrain._symboltable.ConfigurationSymbol;
import de.monticore.lang.monticar.cnntrain._symboltable.LearningMethod;
import de.monticore.lang.monticar.cnntrain._symboltable.PreprocessingComponentSymbol;
import de.monticore.lang.monticar.emadl._cocos.DataPathCocos;
import de.monticore.lang.monticar.emadl._cocos.EMADLCocos;
import de.monticore.lang.monticar.emadl.tagging.dltag.DataPathSymbol;
......@@ -30,6 +35,7 @@ import de.monticore.lang.monticar.generator.pythonwrapper.GeneratorPythonWrapper
import de.monticore.lang.monticar.generator.cpp.converter.TypeConverter;
import de.monticore.lang.monticar.generator.pythonwrapper.GeneratorPythonWrapperFactory;
import de.monticore.lang.monticar.generator.pythonwrapper.GeneratorPythonWrapperStandaloneApi;
import de.monticore.lang.monticar.generator.pythonwrapper.symbolservices.data.ComponentPortInformation;
import de.monticore.lang.tagging._symboltable.TagSymbol;
import de.monticore.lang.tagging._symboltable.TaggingResolver;
import de.monticore.symboltable.Scope;
......@@ -241,7 +247,7 @@ public class EMADLGenerator {
String b = backend.getBackendString(backend);
String trainingDataHash = "";
String testDataHash = "";
if (architecture.get().getDataPath() != null) {
if (b.equals("CAFFE2")) {
trainingDataHash = getChecksumForLargerFile(architecture.get().getDataPath() + "/train_lmdb/data.mdb");
......@@ -405,6 +411,21 @@ public class EMADLGenerator {
return dataPath;
}
protected String getWeightsPath(EMAComponentSymbol component, EMAComponentInstanceSymbol instance){
String weightsPath;
Path weightsPathDefinition = Paths.get(getModelsPath(), "weights_paths.txt");
if (weightsPathDefinition.toFile().exists()) {
WeightsPathConfigParser newParserConfig = new WeightsPathConfigParser(getModelsPath() + "weights_paths.txt");
weightsPath = newParserConfig.getWeightsPath(component.getFullName());
} else {
Log.info("No weights path definition found in " + weightsPathDefinition + ": "
+ "No pretrained weights will be loaded.", "EMADLGenerator");
weightsPath = null;
}
return weightsPath;
}
protected void generateComponent(List<FileContent> fileContents,
Set<EMAComponentInstanceSymbol> allInstances,
TaggingResolver taggingResolver,
......@@ -426,7 +447,9 @@ public class EMADLGenerator {
if (architecture.isPresent()){
cnnArchGenerator.check(architecture.get());
String dPath = getDataPath(taggingResolver, EMAComponentSymbol, componentInstanceSymbol);
String wPath = getWeightsPath(EMAComponentSymbol, componentInstanceSymbol);
architecture.get().setDataPath(dPath);
architecture.get().setWeightsPath(wPath);
architecture.get().setComponentName(EMAComponentSymbol.getFullName());
generateCNN(fileContents, taggingResolver, componentInstanceSymbol, architecture.get());
if (processedArchitecture != null) {
......@@ -621,7 +644,6 @@ public class EMADLGenerator {
}
discriminator.get().setComponentName(fullDiscriminatorName);
configuration.setDiscriminatorNetwork(new ArchitectureAdapter(fullDiscriminatorName, discriminator.get()));
//CNNTrainCocos.checkCriticCocos(configuration);
}
// Resolve QNetwork if present
......@@ -643,11 +665,16 @@ public class EMADLGenerator {
}
qnetwork.get().setComponentName(fullQNetworkName);
configuration.setQNetwork(new ArchitectureAdapter(fullQNetworkName, qnetwork.get()));
//CNNTrainCocos.checkCriticCocos(configuration);
}
if (configuration.getLearningMethod() == LearningMethod.GAN)
CNNTrainCocos.checkGANCocos(configuration);
if (configuration.hasPreprocessor()) {
String fullPreprocessorName = configuration.getPreprocessingName().get();
PreprocessingComponentSymbol preprocessingSymbol = configuration.getPreprocessingComponent().get();
List<String> fullNameOfComponent = preprocessingSymbol.getPreprocessingComponentName();
String fullPreprocessorName = String.join(".", fullNameOfComponent);
int indexOfFirstNameCharacter = fullPreprocessorName.lastIndexOf('.') + 1;
fullPreprocessorName = fullPreprocessorName.substring(0, indexOfFirstNameCharacter)
+ fullPreprocessorName.substring(indexOfFirstNameCharacter, indexOfFirstNameCharacter + 1).toUpperCase()
......@@ -665,13 +692,16 @@ public class EMADLGenerator {
try {
emamGen.generateFile(fileContent);
} catch (IOException e) {
//todo: fancy error message
e.printStackTrace();
}
}
String targetPath = getGenerationTargetPath();
pythonWrapper.generateAndTryBuilding(processor_instance, targetPath + "/pythonWrapper", targetPath);
ComponentPortInformation componentPortInformation;
componentPortInformation = pythonWrapper.generateAndTryBuilding(processor_instance, targetPath + "/pythonWrapper", targetPath);
PreprocessingComponentParameterAdapter componentParameter = new PreprocessingComponentParameterAdapter(componentPortInformation);
PreprocessingPortChecker.check(componentParameter);
preprocessingSymbol.setPreprocessingComponentParameter(componentParameter);
}
cnnTrainGenerator.setInstanceName(componentInstance.getFullName().replaceAll("\\.", "_"));
......
......@@ -22,7 +22,7 @@ public class RewardFunctionCppGenerator implements RewardFunctionSourceGenerator
.<EMAComponentInstanceSymbol>resolve(rootModel, EMAComponentInstanceSymbol.KIND);
if (!instanceSymbol.isPresent()) {
Log.error("Generation of reward function is not possible: Cannot resolve component instance "
Log.error("Generation of reward is not possible: Cannot resolve component instance "
+ rootModel);
}
......
......@@ -82,7 +82,7 @@ public class GenerationTest extends AbstractSymtabTest {
assertTrue(Log.getFindings().isEmpty());
}
/*@Test
@Test
public void testThreeInputGeneration() throws IOException, TemplateException {
Log.getFindings().clear();
String[] args = {"-m", "src/test/resources/models/", "-r", "ThreeInputCNN_M14", "-b", "MXNET", "-f", "n", "-c", "n"};
......@@ -96,7 +96,7 @@ public class GenerationTest extends AbstractSymtabTest {
String[] args = {"-m", "src/test/resources/models/", "-r", "MultipleOutputs", "-b", "MXNET", "-f", "n", "-c", "n"};
EMADLGeneratorCli.main(args);
assertTrue(Log.getFindings().size() == 1);
}*/
}
@Test
public void testVGGGeneration() throws IOException, TemplateException {
......@@ -163,7 +163,7 @@ public class GenerationTest extends AbstractSymtabTest {
"mnist_mnistClassifier_calculateClass.h",
"CNNTrainer_mnist_mnistClassifier_net.py"));
}
@Test
public void testMnistClassifierForGluon() throws IOException, TemplateException {
Log.getFindings().clear();
......@@ -237,7 +237,7 @@ public class GenerationTest extends AbstractSymtabTest {
@Test
public void testHashFunction() {
EMADLGenerator tester = new EMADLGenerator(Backend.MXNET);
try{
tester.getChecksumForFile("invalid Path!");
assertTrue("Hash method should throw IOException on invalid path", false);
......@@ -281,6 +281,78 @@ public class GenerationTest extends AbstractSymtabTest {
);
}
@Test
public void testGluonDefaultGANGeneration() {
Log.getFindings().clear();
String[] args = {"-m", "src/test/resources/models/ganModel", "-r", "defaultGAN.DefaultGANConnector", "-b", "GLUON", "-f", "n", "-c", "n"};
EMADLGeneratorCli.main(args);
assertTrue(Log.getFindings().stream().filter(Finding::isError).collect(Collectors.toList()).isEmpty());
checkFilesAreEqual(
Paths.get("./target/generated-sources-emadl"),
Paths.get("./src/test/resources/target_code/gluon/ganModel/defaultGAN"),
Arrays.asList(
"gan/CNNCreator_defaultGAN_defaultGANDiscriminator.py",
"gan/CNNNet_defaultGAN_defaultGANDiscriminator.py",
"CNNCreator_defaultGAN_defaultGANConnector_predictor.py",
"CNNGanTrainer_defaultGAN_defaultGANConnector_predictor.py",
"CNNNet_defaultGAN_defaultGANConnector_predictor.py",
"CNNPredictor_defaultGAN_defaultGANConnector_predictor.h",
"CNNTrainer_defaultGAN_defaultGANConnector_predictor.py",
"defaultGAN_defaultGANConnector.cpp",
"defaultGAN_defaultGANConnector.h",
"defaultGAN_defaultGANConnector_predictor.h",
"defaultGAN_defaultGANConnector.cpp",
"defaultGAN_defaultGANConnector.h",
"defaultGAN_defaultGANConnector_predictor.h"
)
);
}
@Test
public void testGluonInfoGANGeneration() {
Log.getFindings().clear();
String[] args = {"-m", "src/test/resources/models/ganModel", "-r", "infoGAN.InfoGANConnector", "-b", "GLUON", "-f", "n", "-c", "n"};
EMADLGeneratorCli.main(args);
assertTrue(Log.getFindings().stream().filter(Finding::isError).collect(Collectors.toList()).isEmpty());
checkFilesAreEqual(
Paths.get("./target/generated-sources-emadl"),
Paths.get("./src/test/resources/target_code/gluon/ganModel/infoGAN"),
Arrays.asList(
"gan/CNNCreator_infoGAN_infoGANDiscriminator.py",
"gan/CNNNet_infoGAN_infoGANDiscriminator.py",
"gan/CNNCreator_infoGAN_infoGANQNetwork.py",
"gan/CNNNet_infoGAN_infoGANQNetwork.py",
"CNNCreator_infoGAN_infoGANConnector_predictor.py",
"CNNDataLoader_infoGAN_infoGANConnector_predictor.py",
"CNNGanTrainer_infoGAN_infoGANConnector_predictor.py",
"CNNNet_infoGAN_infoGANConnector_predictor.py",
"CNNPredictor_infoGAN_infoGANConnector_predictor.h",
"CNNTrainer_infoGAN_infoGANConnector_predictor.py",
"infoGAN_infoGANConnector.cpp",
"infoGAN_infoGANConnector.h",
"infoGAN_infoGANConnector_predictor.h"
)
);
}
@Test
public void testGluonPreprocessingWithSupervised() throws IOException, TemplateException {
Log.getFindings().clear();
String[] args = {"-m", "src/test/resources/models/", "-r", "PreprocessingNetwork", "-b", "GLUON", "-f", "n", "-c", "n"};
EMADLGeneratorCli.main(args);
Log.info(Log.getFindings().toString(), "testGluonPreprocessinWithSupervised");
assertTrue(Log.getFindings().size() == 0);
}
@Test
public void testGluonPreprocessingWithGAN() throws IOException, TemplateException {
Log.getFindings().clear();
String[] args = {"-m", "src/test/resources/models/ganModel", "-r", "defaultGANPreprocessing.GeneratorWithPreprocessing", "-b", "GLUON", "-f", "n", "-c", "n"};
EMADLGeneratorCli.main(args);
Log.info(Log.getFindings().toString(), "testGluonPreprocessingWithGAN");
assertTrue(Log.getFindings().size() == 0);
}
@Test
public void testAlexNetTagging() {
Log.getFindings().clear();
......
......@@ -70,6 +70,25 @@ public class IntegrationGluonTest extends IntegrationTest {
assertTrue(Log.getFindings().isEmpty());
}
@Test
public void testGluonPreprocessingWithSupervised() {
Log.getFindings().clear();
deleteHashFile(Paths.get("./target/generated-sources-emadl/PreprocessingNetwork.training_hash"));
String[] args = {"-m", "src/test/resources/models/", "-r", "PreprocessingNetwork", "-b", "GLUON"};
EMADLGeneratorCli.main(args);
assertTrue(Log.getFindings().toString(),Log.getFindings().size() == 0);
}
@Test
public void testGluonPreprocessingWithGAN() {
Log.getFindings().clear();
deleteHashFile(Paths.get("./target/generated-sources-emadl/defaultGANPreprocessing/GeneratorWithPreprocessing.training_hash"));
String[] args = {"-m", "src/test/resources/models/ganModel", "-r", "defaultGANPreprocessing.GeneratorWithPreprocessing", "-b", "GLUON"};
EMADLGeneratorCli.main(args);
assertTrue(Log.getFindings().toString(), Log.getFindings().size() == 0);
}
private void deleteHashFile(Path hashFile) {
try {
Files.delete(hashFile);
......
/* (c) https://github.com/MontiCore/monticore */
configuration PreprocessingNetwork{
num_epoch:1
batch_size:1
log_period: 1
normalize:false
preprocessing_name: PreprocessingProcessing
context:cpu
load_checkpoint:false
optimizer:sgd{
learning_rate:0.1
learning_rate_decay:0.85
step_size:1000
weight_decay:0.0
}
}
/* (c) https://github.com/MontiCore/monticore */
component PreprocessingNetwork {
ports in Z(0:255)^{3, 32, 32} data,
out Q(0:1)^{10} softmax;
implementation CNN {
def conv(channels, kernel=1, stride=1){
Convolution(kernel=(kernel,kernel),channels=channels) ->
Relu() ->
Pooling(pool_type="max", kernel=(2,2), stride=(stride,stride))
}
data ->
conv(kernel=5, channels=20, stride=2) ->
conv(kernel=5, channels=50, stride=2) ->
FullyConnected(units=500) ->
Relu() ->
Dropout() ->
FullyConnected(units=10) ->
Softmax() ->
softmax;
}
}
/* (c) https://github.com/MontiCore/monticore */
component PreprocessingProcessing
{
ports in Q(-oo:oo)^{3,32,32} data,
in Q(0:1) softmax_label,
out Q(-1:1)^{3,32,32} data_out,
out Q(0:1) softmax_label_out;
implementation Math
{
data = data * 2;
data_out = data - 1;
softmax_label_out = softmax_label;
}
}
/* (c) https://github.com/MontiCore/monticore */
package cNNCalculator;
component Network{
ports in Z(0:255)^{1, 28, 28} image,
out Q(0:1)^{10} predictions;
......
cifar10.CifarNetwork src/test/resources/training_data/Cifar
simpleCifar10.CifarNetwork src/test/resources/training_data/Cifar
cNNCalculator.Network src/test/resources/training_data/Cifar
PreprocessingNetwork src/test/resources/training_data/Cifar
InstanceTest.NetworkB data/InstanceTest.NetworkB
Alexnet data/Alexnet
ThreeInputCNN_M14 data/ThreeInputCNN_M14
......
defaultGANPreprocessing.GeneratorWithPreprocessing src/test/resources/training_data/Cifar
defaultGAN.DefaultGANGenerator src/test/resources/training_data/Cifar
infoGAN.InfoGANGenerator src/test/resources/training_data/Cifar
/* (c) https://github.com/MontiCore/monticore */
package defaultGAN;
component DefaultGANConnector {
ports in Q(0:1)^{100} noise,
out Q(0:1)^{1, 64, 64} res;
instance DefaultGANGenerator predictor;
connect noise -> predictor.noise;
connect predictor.data -> res;
}
/* (c) https://github.com/MontiCore/monticore */
package defaultGAN;
component DefaultGANDiscriminator{
ports in Q(-1:1)^{1, 64, 64} data,
out Q(-oo:oo)^{1} dis;
implementation CNN {
data ->
Convolution(kernel=(4,4),channels=64, stride=(2,2)) ->
LeakyRelu(alpha=0.2) ->
Convolution(kernel=(4,4),channels=128, stride=(2,2)) ->
BatchNorm() ->
LeakyRelu(alpha=0.2) ->
Convolution(kernel=(4,4),channels=256, stride=(2,2)) ->
BatchNorm() ->
LeakyRelu(alpha=0.2) ->
Convolution(kernel=(4,4),channels=512, stride=(2,2)) ->
BatchNorm() ->
LeakyRelu(alpha=0.2) ->
Convolution(kernel=(4,4),channels=1, stride=(1,1)) ->
Sigmoid() ->
dis;
}
}
/* (c) https://github.com/MontiCore/monticore */
configuration DefaultGANGenerator{
learning_method:gan
discriminator_name: defaultGAN.DefaultGANDiscriminator
num_epoch:10
batch_size:64
normalize:false
context:cpu
noise_input: "noise"
print_images: true
log_period: 10
load_checkpoint:false
optimizer:adam{
learning_rate:0.0002
beta1:0.5
}
discriminator_optimizer:adam{
learning_rate:0.0002
beta1:0.5
}
noise_distribution:gaussian{
mean_value:0
spread_value:1
}
}
/* (c) https://github.com/MontiCore/monticore */
package defaultGAN;
component DefaultGANGenerator{
ports in Q(0:1)^{100} noise,
out Q(-1:1)^{1, 64, 64} data;
implementation CNN {
noise ->
Reshape(shape=(100,1,1)) ->
UpConvolution(kernel=(4,4), channels=512, stride=(1,1), padding="valid", no_bias=true) ->
BatchNorm() ->
Relu() ->
UpConvolution(kernel=(4,4), channels=256, stride=(2,2), no_bias=true) ->
BatchNorm() ->
Relu() ->
UpConvolution(kernel=(4,4), channels=128, stride=(2,2), no_bias=true) ->
BatchNorm() ->
Relu() ->
UpConvolution(kernel=(4,4), channels=64, stride=(2,2), no_bias=true) ->
BatchNorm() ->
Relu() ->
UpConvolution(kernel=(4,4), channels=1, stride=(2,2), no_bias=true) ->
Tanh() ->
data;
}
}
/* (c) https://github.com/MontiCore/monticore */
package defaultGANPreprocessing;
component DiscriminatorWithPreprocessing{
ports in Q(-1:1)^{3, 64, 64} data,
out Q(-oo:oo)^{1} dis;
implementation CNN {
data ->
Convolution(kernel=(4,4),channels=64, stride=(2,2)) ->
LeakyRelu(alpha=0.2) ->
Convolution(kernel=(4,4),channels=128, stride=(2,2)) ->
BatchNorm() ->
LeakyRelu(alpha=0.2) ->
Convolution(kernel=(4,4),channels=256, stride=(2,2)) ->
BatchNorm() ->
LeakyRelu(alpha=0.2) ->
Convolution(kernel=(4,4),channels=512, stride=(2,2)) ->
BatchNorm() ->
LeakyRelu(alpha=0.2) ->
Convolution(kernel=(4,4),channels=1, stride=(1,1)) ->
Sigmoid() ->
dis;
}
}
/* (c) https://github.com/MontiCore/monticore */
configuration GeneratorWithPreprocessing{
learning_method:gan
discriminator_name: defaultGANPreprocessing.DiscriminatorWithPreprocessing
num_epoch:1
batch_size:1
normalize:false
preprocessing_name: defaultGANPreprocessing.ProcessingWithPreprocessing
context:cpu
noise_input: "noise"
print_images: false
log_period: 1
load_checkpoint:false
optimizer:adam{
learning_rate:0.0002
beta1:0.5
}
discriminator_optimizer:adam{
learning_rate:0.0002
beta1:0.5
}
noise_distribution:gaussian{
mean_value:0
spread_value:1
}
}
/* (c) https://github.com/MontiCore/monticore */
package defaultGANPreprocessing;
component GeneratorWithPreprocessing{
ports in Q(0:1)^{100} noise,
out Q(-1:1)^{3, 64, 64} data;
implementation CNN {