...
 
Commits (8)
......@@ -8,15 +8,16 @@
<groupId>de.monticore.lang.monticar</groupId>
<artifactId>cnnarch-caffe2-generator</artifactId>
<version>0.2.11-SNAPSHOT</version>
<version>0.2.12-SNAPSHOT</version>
<!-- == PROJECT DEPENDENCIES ============================================= -->
<properties>
<!-- .. SE-Libraries .................................................. -->
<CNNArch.version>0.3.0-SNAPSHOT</CNNArch.version>
<CNNTrain.version>0.2.6</CNNTrain.version>
<CNNArch.version>0.3.1-SNAPSHOT</CNNArch.version>
<CNNTrain.version>0.3.2-SNAPSHOT</CNNTrain.version>
<CNNArch2X.version>0.0.1-SNAPSHOT</CNNArch2X.version>
<embedded-montiarc-math-opt-generator>0.1.4</embedded-montiarc-math-opt-generator>
<!-- .. Libraries .................................................. -->
......@@ -59,6 +60,12 @@
<!-- MontiCore Dependencies -->
<dependency>
<groupId>de.monticore.lang.monticar</groupId>
<artifactId>cnnarch-generator</artifactId>
<version>${CNNArch2X.version}</version>
</dependency>
<dependency>
<groupId>de.monticore.lang.monticar</groupId>
<artifactId>cnn-arch</artifactId>
......
......@@ -20,8 +20,10 @@
*/
package de.monticore.lang.monticar.cnnarch.caffe2generator;
import de.monticore.lang.monticar.cnnarch.CNNArchGenerator;
import de.monticore.lang.monticar.cnnarch.DataPathConfigParser;
import de.monticore.lang.monticar.cnnarch.generator.CNNArchGenerator;
import de.monticore.lang.monticar.cnnarch.generator.DataPathConfigParser;
import de.monticore.lang.monticar.cnnarch.generator.Target;
import de.monticore.lang.monticar.cnnarch._cocos.CNNArchCocos;
import de.monticore.lang.monticar.cnnarch._symboltable.ArchitectureSymbol;
import de.monticore.lang.monticar.cnnarch._symboltable.ArchitectureElementSymbol;
......@@ -31,72 +33,16 @@ import de.monticore.lang.monticar.generator.FileContent;
import de.monticore.lang.monticar.generator.cmake.CMakeConfig;
import de.monticore.lang.monticar.generator.cmake.CMakeFindModule;
import de.monticore.lang.monticar.generator.cpp.GeneratorCPP;
import de.monticore.symboltable.Scope;
import de.se_rwth.commons.logging.Log;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.List;
public class CNNArch2Caffe2 extends CNNArchGenerator{
private boolean isSupportedLayer(ArchitectureElementSymbol element, LayerSupportChecker layerChecker){
List<ArchitectureElementSymbol> constructLayerElemList;
if (element.getResolvedThis().get() instanceof CompositeElementSymbol) {
constructLayerElemList = ((CompositeElementSymbol)element.getResolvedThis().get()).getElements();
for (ArchitectureElementSymbol constructedLayerElement : constructLayerElemList) {
if (!isSupportedLayer(constructedLayerElement, layerChecker)) {
return false;
}
}
}
if (!layerChecker.isSupported(element.toString())) {
Log.error("Unsupported layer " + "'" + element.getName() + "'" + " for the backend CAFFE2.");
return false;
} else {
return true;
}
}
private boolean supportCheck(ArchitectureSymbol architecture){
LayerSupportChecker layerChecker = new LayerSupportChecker();
for (ArchitectureElementSymbol element : ((CompositeElementSymbol)architecture.getBody()).getElements()){
if(!isSupportedLayer(element, layerChecker)) {
return false;
}
}
return true;
}
public class CNNArch2Caffe2 extends CNNArchGenerator {
public CNNArch2Caffe2() {
setGenerationTargetPath("./target/generated-sources-cnnarch/");
}
public void generate(Scope scope, String rootModelName){
Optional<CNNArchCompilationUnitSymbol> compilationUnit = scope.resolve(rootModelName, CNNArchCompilationUnitSymbol.KIND);
if (!compilationUnit.isPresent()){
Log.error("could not resolve architecture " + rootModelName);
quitGeneration();
}
CNNArchCocos.checkAll(compilationUnit.get());
if (!supportCheck(compilationUnit.get().getArchitecture())){
quitGeneration();
}
try{
String confPath = getModelsDirPath() + "/data_paths.txt";
DataPathConfigParser newParserConfig = new DataPathConfigParser(confPath);
String dataPath = newParserConfig.getDataPath(rootModelName);
compilationUnit.get().getArchitecture().setDataPath(dataPath);
compilationUnit.get().getArchitecture().setComponentName(rootModelName);
generateFiles(compilationUnit.get().getArchitecture());
} catch (IOException e){
Log.error(e.toString());
}
architectureSupportChecker = new CNNArch2Caffe2ArchitectureSupportChecker();
layerSupportChecker = new CNNArch2Caffe2LayerSupportChecker();
}
//check cocos with CNNArchCocos.checkAll(architecture) before calling this method.
......@@ -114,19 +60,9 @@ public class CNNArch2Caffe2 extends CNNArchGenerator{
temp = archTc.process("execute", Target.CPP);
fileContentMap.put(temp.getKey().replace(".h", ""), temp.getValue());
checkValidGeneration(architecture);
return fileContentMap;
}
public void generateFromFilecontentsMap(Map<String, String> fileContentMap) throws IOException {
GeneratorCPP genCPP = new GeneratorCPP();
genCPP.setGenerationTargetPath(getGenerationTargetPath());
for (String fileName : fileContentMap.keySet()){
genCPP.generateFile(new FileContent(fileContentMap.get(fileName), fileName));
}
}
public Map<String, String> generateCMakeContent(String rootModelName) {
// model name should start with a lower case letter. If it is a component, replace dot . by _
rootModelName = rootModelName.replace('.', '_').replace('[', '_').replace(']', '_');
......
package de.monticore.lang.monticar.cnnarch.caffe2generator;
import de.monticore.lang.monticar.cnnarch.generator.ArchitectureSupportChecker;
public class CNNArch2Caffe2ArchitectureSupportChecker extends ArchitectureSupportChecker {
public CNNArch2Caffe2ArchitectureSupportChecker() {}
}
......@@ -19,81 +19,14 @@
* *******************************************************************************
*/
package de.monticore.lang.monticar.cnnarch.caffe2generator;
import de.se_rwth.commons.logging.Log;
import org.apache.commons.cli.*;
import java.nio.file.Path;
import java.nio.file.Paths;
import de.monticore.lang.monticar.cnnarch.generator.CNNArchGenerator;
import de.monticore.lang.monticar.cnnarch.generator.GenericCNNArchCli;
public class CNNArch2Caffe2Cli {
public static final Option OPTION_MODELS_PATH = Option.builder("m")
.longOpt("models-dir")
.desc("full path to the directory with the CNNArch model")
.hasArg(true)
.required(true)
.build();
public static final Option OPTION_ROOT_MODEL = Option.builder("r")
.longOpt("root-model")
.desc("name of the architecture")
.hasArg(true)
.required(true)
.build();
public static final Option OPTION_OUTPUT_PATH = Option.builder("o")
.longOpt("output-dir")
.desc("full path to output directory for tests")
.hasArg(true)
.required(false)
.build();
private CNNArch2Caffe2Cli() {
}
public static void main(String[] args) {
Options options = getOptions();
CommandLineParser parser = new DefaultParser();
CommandLine cliArgs = parseArgs(options, parser, args);
if (cliArgs != null) {
runGenerator(cliArgs);
}
}
private static Options getOptions() {
Options options = new Options();
options.addOption(OPTION_MODELS_PATH);
options.addOption(OPTION_ROOT_MODEL);
options.addOption(OPTION_OUTPUT_PATH);
return options;
}
private static CommandLine parseArgs(Options options, CommandLineParser parser, String[] args) {
CommandLine cliArgs;
try {
cliArgs = parser.parse(options, args);
} catch (ParseException e) {
Log.error("argument parsing exception: " + e.getMessage());
quitGeneration();
return null;
}
return cliArgs;
}
private static void quitGeneration(){
Log.error("Code generation is aborted");
System.exit(1);
}
private static void runGenerator(CommandLine cliArgs) {
Path modelsDirPath = Paths.get(cliArgs.getOptionValue(OPTION_MODELS_PATH.getOpt()));
String rootModelName = cliArgs.getOptionValue(OPTION_ROOT_MODEL.getOpt());
String outputPath = cliArgs.getOptionValue(OPTION_OUTPUT_PATH.getOpt());
CNNArch2Caffe2 generator = new CNNArch2Caffe2();
if (outputPath != null){
generator.setGenerationTargetPath(outputPath);
}
generator.generate(modelsDirPath, rootModelName);
CNNArchGenerator generator = new CNNArch2Caffe2();
GenericCNNArchCli cli = new GenericCNNArchCli(generator);
cli.run(args);
}
}
package de.monticore.lang.monticar.cnnarch.caffe2generator;
import de.monticore.lang.monticar.cnnarch.predefined.AllPredefinedLayers;
import de.monticore.lang.monticar.cnnarch.generator.LayerSupportChecker;
public class CNNArch2Caffe2LayerSupportChecker extends LayerSupportChecker {
public CNNArch2Caffe2LayerSupportChecker() {
supportedLayerList.add(AllPredefinedLayers.FULLY_CONNECTED_NAME);
supportedLayerList.add(AllPredefinedLayers.CONVOLUTION_NAME);
supportedLayerList.add(AllPredefinedLayers.SOFTMAX_NAME);
supportedLayerList.add(AllPredefinedLayers.SIGMOID_NAME);
supportedLayerList.add(AllPredefinedLayers.TANH_NAME);
supportedLayerList.add(AllPredefinedLayers.RELU_NAME);
supportedLayerList.add(AllPredefinedLayers.DROPOUT_NAME);
supportedLayerList.add(AllPredefinedLayers.POOLING_NAME);
supportedLayerList.add(AllPredefinedLayers.GLOBAL_POOLING_NAME);
supportedLayerList.add(AllPredefinedLayers.LRN_NAME);
supportedLayerList.add(AllPredefinedLayers.FLATTEN_NAME);
}
}
/**
*
* ******************************************************************************
* MontiCAR Modeling Family, www.se-rwth.de
* Copyright (c) 2017, Software Engineering Group at RWTH Aachen,
* All rights reserved.
*
* This project is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3.0 of the License, or (at your option) any later version.
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this project. If not, see <http://www.gnu.org/licenses/>.
* *******************************************************************************
*/
package de.monticore.lang.monticar.cnnarch.caffe2generator;
import de.monticore.lang.monticar.cnnarch.generator.TrainParamSupportChecker;
import de.monticore.lang.monticar.cnntrain._ast.*;
import de.monticore.lang.monticar.cnntrain._visitor.CNNTrainVisitor;
import de.se_rwth.commons.logging.Log;
import java.util.ArrayList;
import java.util.List;
public class TrainParamSupportChecker implements CNNTrainVisitor {
private List<String> unsupportedElemList = new ArrayList();
private void printUnsupportedEntryParam(String nodeName){
Log.warn("Unsupported training parameter " + "'" + nodeName + "'" + " for the backend CAFFE2. It will be ignored.");
}
private void printUnsupportedOptimizer(String nodeName){
Log.warn("Unsupported optimizer parameter " + "'" + nodeName + "'" + " for the backend CAFFE2. It will be ignored.");
}
private void printUnsupportedOptimizerParam(String nodeName){
Log.warn("Unsupported training optimizer parameter " + "'" + nodeName + "'" + " for the backend CAFFE2. It will be ignored.");
}
public TrainParamSupportChecker() {
}
public static final String unsupportedOptFlag = "unsupported_optimizer";
public List getUnsupportedElemList(){
return this.unsupportedElemList;
}
//Empty visit method denotes that the corresponding training parameter is supported.
//To set a training parameter as unsupported, add the corresponding node to the unsupportedElemList
public void visit(ASTNumEpochEntry node){}
public void visit(ASTBatchSizeEntry node){}
public class CNNArch2Caffe2TrainParamSupportChecker extends TrainParamSupportChecker {
public void visit(ASTLoadCheckpointEntry node){
printUnsupportedEntryParam(node.getName());
......@@ -47,18 +35,6 @@ public class TrainParamSupportChecker implements CNNTrainVisitor {
this.unsupportedElemList.add(node.getName());
}
public void visit(ASTTrainContextEntry node){}
public void visit(ASTEvalMetricEntry node){}
public void visit(ASTSGDOptimizer node){}
public void visit(ASTAdamOptimizer node){}
public void visit(ASTRmsPropOptimizer node){}
public void visit(ASTAdaGradOptimizer node){}
public void visit(ASTNesterovOptimizer node){
printUnsupportedOptimizer(node.getName());
this.unsupportedElemList.add(this.unsupportedOptFlag);
......@@ -69,19 +45,11 @@ public class TrainParamSupportChecker implements CNNTrainVisitor {
this.unsupportedElemList.add(this.unsupportedOptFlag);
}
public void visit(ASTLearningRateEntry node){}
public void visit(ASTMinimumLearningRateEntry node){
printUnsupportedOptimizerParam(node.getName());
this.unsupportedElemList.add(node.getName());
}
public void visit(ASTWeightDecayEntry node){}
public void visit(ASTLRDecayEntry node){}
public void visit(ASTLRPolicyEntry node){}
public void visit(ASTRescaleGradEntry node){
printUnsupportedOptimizerParam(node.getName());
this.unsupportedElemList.add(node.getName());
......@@ -92,18 +60,6 @@ public class TrainParamSupportChecker implements CNNTrainVisitor {
this.unsupportedElemList.add(node.getName());
}
public void visit(ASTStepSizeEntry node){}
public void visit(ASTMomentumEntry node){}
public void visit(ASTBeta1Entry node){}
public void visit(ASTBeta2Entry node){}
public void visit(ASTEpsilonEntry node){}
public void visit(ASTGamma1Entry node){}
public void visit(ASTGamma2Entry node){
printUnsupportedOptimizerParam(node.getName());
this.unsupportedElemList.add(node.getName());
......@@ -119,6 +75,4 @@ public class TrainParamSupportChecker implements CNNTrainVisitor {
this.unsupportedElemList.add(node.getName());
}
public void visit(ASTRhoEntry node){}
}
}
\ No newline at end of file
......@@ -20,6 +20,9 @@
*/
package de.monticore.lang.monticar.cnnarch.caffe2generator;
import de.monticore.lang.monticar.cnnarch.generator.LayerNameCreator;
import de.monticore.lang.monticar.cnnarch.generator.Target;
import de.monticore.lang.monticar.cnnarch._symboltable.*;
import de.monticore.lang.monticar.cnnarch.predefined.Sigmoid;
import de.monticore.lang.monticar.cnnarch.predefined.Softmax;
......
package de.monticore.lang.monticar.cnnarch.caffe2generator;
import de.monticore.io.paths.ModelPath;
import de.monticore.lang.monticar.cnntrain.CNNTrainGenerator;
import de.monticore.lang.monticar.cnnarch.generator.CNNTrainGenerator;
import de.monticore.lang.monticar.cnntrain._ast.ASTCNNTrainNode;
import de.monticore.lang.monticar.cnntrain._ast.ASTOptimizerEntry;
import de.monticore.lang.monticar.cnntrain._cocos.CNNTrainCocos;
......@@ -18,96 +18,13 @@ import java.io.IOException;
import java.nio.file.Path;
import java.util.*;
public class CNNTrain2Caffe2 implements CNNTrainGenerator {
private String generationTargetPath;
private String instanceName;
private void supportCheck(ConfigurationSymbol configuration){
checkEntryParams(configuration);
checkOptimizerParams(configuration);
}
private void checkEntryParams(ConfigurationSymbol configuration){
TrainParamSupportChecker funcChecker = new TrainParamSupportChecker();
Iterator it = configuration.getEntryMap().keySet().iterator();
while (it.hasNext()) {
String key = it.next().toString();
ASTCNNTrainNode astTrainEntryNode = (ASTCNNTrainNode) configuration.getEntryMap().get(key).getAstNode().get();
astTrainEntryNode.accept(funcChecker);
}
it = configuration.getEntryMap().keySet().iterator();
while (it.hasNext()) {
String key = it.next().toString();
if (funcChecker.getUnsupportedElemList().contains(key)) {
it.remove();
}
}
}
private void checkOptimizerParams(ConfigurationSymbol configuration){
TrainParamSupportChecker funcChecker = new TrainParamSupportChecker();
if (configuration.getOptimizer() != null) {
ASTOptimizerEntry astOptimizer = (ASTOptimizerEntry) configuration.getOptimizer().getAstNode().get();
astOptimizer.accept(funcChecker);
if (funcChecker.getUnsupportedElemList().contains(funcChecker.unsupportedOptFlag)) {
OptimizerSymbol adamOptimizer = new OptimizerSymbol("adam");
configuration.setOptimizer(adamOptimizer); //Set default as adam optimizer
}else {
Iterator it = configuration.getOptimizer().getOptimizerParamMap().keySet().iterator();
while (it.hasNext()) {
String key = it.next().toString();
if (funcChecker.getUnsupportedElemList().contains(key)) {
it.remove();
}
}
}
}
}
private static void quitGeneration(){
Log.error("Code generation is aborted");
System.exit(1);
}
public class CNNTrain2Caffe2 extends CNNTrainGenerator {
public CNNTrain2Caffe2() {
setGenerationTargetPath("./target/generated-sources-cnnarch/");
}
public String getInstanceName() {
String parsedInstanceName = this.instanceName.replace('.', '_').replace('[', '_').replace(']', '_');
parsedInstanceName = parsedInstanceName.substring(0, 1).toLowerCase() + parsedInstanceName.substring(1);
return parsedInstanceName;
}
public void setInstanceName(String instanceName) {
this.instanceName = instanceName;
}
public String getGenerationTargetPath() {
if (generationTargetPath.charAt(generationTargetPath.length() - 1) != '/') {
this.generationTargetPath = generationTargetPath + "/";
}
return generationTargetPath;
}
public void setGenerationTargetPath(String generationTargetPath) {
this.generationTargetPath = generationTargetPath;
}
public ConfigurationSymbol getConfigurationSymbol(Path modelsDirPath, String rootModelName) {
final ModelPath mp = new ModelPath(modelsDirPath);
GlobalScope scope = new GlobalScope(mp, new CNNTrainLanguage());
Optional<CNNTrainCompilationUnitSymbol> compilationUnit = scope.resolve(rootModelName, CNNTrainCompilationUnitSymbol.KIND);
if (!compilationUnit.isPresent()) {
Log.error("could not resolve training configuration " + rootModelName);
quitGeneration();
}
setInstanceName(compilationUnit.get().getFullName());
CNNTrainCocos.checkAll(compilationUnit.get());
supportCheck(compilationUnit.get().getConfiguration());
return compilationUnit.get().getConfiguration();
trainParamSupportChecker = new CNNArch2Caffe2TrainParamSupportChecker();
}
@Override
public void generate(Path modelsDirPath, String rootModelName) {
ConfigurationSymbol configuration = getConfigurationSymbol(modelsDirPath, rootModelName);
Map<String, String> fileContents = generateStrings(configuration);
......@@ -122,6 +39,7 @@ public class CNNTrain2Caffe2 implements CNNTrainGenerator {
}
}
@Override
public Map<String, String> generateStrings(ConfigurationSymbol configuration) {
ConfigurationData configData = new ConfigurationData(configuration, getInstanceName());
List<ConfigurationData> configDataList = new ArrayList<>();
......@@ -131,5 +49,4 @@ public class CNNTrain2Caffe2 implements CNNTrainGenerator {
String templateContent = TemplateConfiguration.processTemplate(ftlContext, "CNNTrainer.ftl");
return Collections.singletonMap("CNNTrainer_" + getInstanceName() + ".py", templateContent);
}
}
/**
*
* ******************************************************************************
* MontiCAR Modeling Family, www.se-rwth.de
* Copyright (c) 2017, Software Engineering Group at RWTH Aachen,
* All rights reserved.
*
* This project is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3.0 of the License, or (at your option) any later version.
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this project. If not, see <http://www.gnu.org/licenses/>.
* *******************************************************************************
*/
package de.monticore.lang.monticar.cnnarch.caffe2generator;
import de.monticore.lang.monticar.cnnarch._symboltable.*;
import de.monticore.lang.monticar.cnnarch.predefined.Convolution;
import de.monticore.lang.monticar.cnnarch.predefined.FullyConnected;
import de.monticore.lang.monticar.cnnarch.predefined.Pooling;
import java.util.*;
public class LayerNameCreator {
private Map<ArchitectureElementSymbol, String> elementToName = new HashMap<>();
private Map<String, ArchitectureElementSymbol> nameToElement = new HashMap<>();
public LayerNameCreator(ArchitectureSymbol architecture) {
name(architecture.getBody(), 1, new ArrayList<>());
}
public ArchitectureElementSymbol getArchitectureElement(String name){
return nameToElement.get(name);
}
public String getName(ArchitectureElementSymbol architectureElement){
return elementToName.get(architectureElement);
}
protected int name(ArchitectureElementSymbol architectureElement, int stage, List<Integer> streamIndices){
if (architectureElement instanceof CompositeElementSymbol){
return nameComposite((CompositeElementSymbol) architectureElement, stage, streamIndices);
} else{
if (architectureElement.isAtomic()){
if (architectureElement.getMaxSerialLength().get() > 0){
return add(architectureElement, stage, streamIndices);
} else {
return stage;
}
} else {
ArchitectureElementSymbol resolvedElement = architectureElement.getResolvedThis().get();
return name(resolvedElement, stage, streamIndices);
}
}
}
protected int nameComposite(CompositeElementSymbol compositeElement, int stage, List<Integer> streamIndices){
if (compositeElement.isParallel()){
int startStage = stage + 1;
streamIndices.add(1);
int lastIndex = streamIndices.size() - 1;
List<Integer> endStages = new ArrayList<>();
for (ArchitectureElementSymbol subElement : compositeElement.getElements()){
endStages.add(name(subElement, startStage, streamIndices));
streamIndices.set(lastIndex, streamIndices.get(lastIndex) + 1);
}
streamIndices.remove(lastIndex);
return Collections.max(endStages) + 1;
} else {
int endStage = stage;
for (ArchitectureElementSymbol subElement : compositeElement.getElements()){
endStage = name(subElement, endStage, streamIndices);
}
return endStage;
}
}
protected int add(ArchitectureElementSymbol architectureElement, int stage, List<Integer> streamIndices){
int endStage = stage;
if (!elementToName.containsKey(architectureElement)) {
String name = createName(architectureElement, endStage, streamIndices);
while (nameToElement.containsKey(name)) {
endStage++;
name = createName(architectureElement, endStage, streamIndices);
}
elementToName.put(architectureElement, name);
nameToElement.put(name, architectureElement);
}
return endStage;
}
protected String createName(ArchitectureElementSymbol architectureElement, int stage, List<Integer> streamIndices){
if (architectureElement instanceof IOSymbol){
String name = createBaseName(architectureElement);
IOSymbol ioElement = (IOSymbol) architectureElement;
if (ioElement.getArrayAccess().isPresent()){
int arrayAccess = ioElement.getArrayAccess().get().getIntValue().get();
name = name + "_" + arrayAccess + "_";
}
return name;
} else {
return createBaseName(architectureElement) + stage + createStreamPostfix(streamIndices) + "_";
}
}
protected String createBaseName(ArchitectureElementSymbol architectureElement){
if (architectureElement instanceof LayerSymbol) {
LayerDeclarationSymbol layerDeclaration = ((LayerSymbol) architectureElement).getDeclaration();
if (layerDeclaration instanceof Convolution) {
return "conv";
} else if (layerDeclaration instanceof FullyConnected) {
return "fc";
} else if (layerDeclaration instanceof Pooling) {
return "pool";
} else {
return layerDeclaration.getName().toLowerCase();
}
} else if (architectureElement instanceof CompositeElementSymbol){
return "group";
} else {
return architectureElement.getName();
}
}
protected String createStreamPostfix(List<Integer> streamIndices){
StringBuilder stringBuilder = new StringBuilder();
for (int streamIndex : streamIndices){
stringBuilder.append("_");
stringBuilder.append(streamIndex);
}
return stringBuilder.toString();
}
}
package de.monticore.lang.monticar.cnnarch.caffe2generator;
import static de.monticore.lang.monticar.cnnarch.predefined.AllPredefinedLayers.*;
import java.util.ArrayList;
import java.util.List;
public class LayerSupportChecker {
private List<String> unsupportedLayerList = new ArrayList();
public LayerSupportChecker() {
//Set the unsupported layers for the backend
this.unsupportedLayerList.add(ADD_NAME);
this.unsupportedLayerList.add(SPLIT_NAME);
this.unsupportedLayerList.add(GET_NAME);
this.unsupportedLayerList.add(CONCATENATE_NAME);
this.unsupportedLayerList.add(BATCHNORM_NAME);
}
public boolean isSupported(String element) {
return !this.unsupportedLayerList.contains(element);
}
}
/**
*
* ******************************************************************************
* MontiCAR Modeling Family, www.se-rwth.de
* Copyright (c) 2017, Software Engineering Group at RWTH Aachen,
* All rights reserved.
*
* This project is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3.0 of the License, or (at your option) any later version.
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this project. If not, see <http://www.gnu.org/licenses/>.
* *******************************************************************************
*/
package de.monticore.lang.monticar.cnnarch.caffe2generator;
//can be removed
public enum Target {
PYTHON{
@Override
public String toString() {
return ".py";
}
},
CPP{
@Override
public String toString() {
return ".h";
}
};
}
......@@ -72,7 +72,7 @@ class ${tc.fileNameWithoutEnding}:
def create_model(self, model, data, device_opts, is_test):
with core.DeviceScope(device_opts):
${tc.include(tc.architecture.body)}
${tc.include(tc.architecture.streams[0])}
# this adds the loss and optimizer
def add_training_operators(self, model, output, label, device_opts, loss, opt_type, base_learning_rate, policy, stepsize, epsilon, beta1, beta2, gamma, momentum) :
......
......@@ -23,7 +23,7 @@ CAFFE2_DEFINE_string(predict_net_${tc.fileNameWithoutEnding}, "./model/${tc.comp
using namespace caffe2;
class ${tc.fileNameWithoutEnding}{
class ${tc.fileNameWithoutEnding}_0{
private:
TensorCPU input;
Workspace workSpace;
......@@ -32,11 +32,11 @@ class ${tc.fileNameWithoutEnding}{
public:
const std::vector<TIndex> input_shapes = {<#list tc.architecture.inputs as input>{1,${tc.join(input.definition.type.dimensions, ",")}}<#if input?has_next>,</#if></#list>};
explicit ${tc.fileNameWithoutEnding}(){
explicit ${tc.fileNameWithoutEnding}_0(){
init(input_shapes);
}
~${tc.fileNameWithoutEnding}(){};
~${tc.fileNameWithoutEnding}_0(){};
void init(const std::vector<TIndex> &input_shapes){
int n = 0;
......
......@@ -3,7 +3,7 @@
vector<float> CNN_${tc.getName(output)}(<#list shape as dim>${dim?c}<#if dim?has_next>*</#if></#list>);
</#list>
_cnn_.predict(<#list tc.architecture.inputs as input>CNNTranslator::translate(${input.name}<#if input.arrayAccess.isPresent()>[${input.arrayAccess.get().intValue.get()?c}]</#if>),
_predictor_0_.predict(<#list tc.architecture.inputs as input>CNNTranslator::translate(${input.name}<#if input.arrayAccess.isPresent()>[${input.arrayAccess.get().intValue.get()?c}]</#if>),
</#list><#list tc.architecture.outputs as output>CNN_${tc.getName(output)}<#if output?has_next>,
</#if></#list>);
......
......@@ -55,7 +55,7 @@ public class SymtabTest extends AbstractSymtabTest {
CNNArchCompilationUnitSymbol.KIND).orElse(null);
assertNotNull(a);
a.resolve();
a.getArchitecture().getBody().getOutputTypes();
a.getArchitecture().getStreams().get(0).getOutputTypes();
}
@Ignore
......@@ -67,7 +67,7 @@ public class SymtabTest extends AbstractSymtabTest {
CNNArchCompilationUnitSymbol.KIND).orElse(null);
assertNotNull(a);
a.resolve();
a.getArchitecture().getBody().getOutputTypes();
a.getArchitecture().getStreams().get(0).getOutputTypes();
}
@Ignore
......@@ -79,7 +79,7 @@ public class SymtabTest extends AbstractSymtabTest {
CNNArchCompilationUnitSymbol.KIND).orElse(null);
assertNotNull(a);
a.resolve();
a.getArchitecture().getBody().getOutputTypes();
a.getArchitecture().getStreams().get(0).getOutputTypes();
}
}
......@@ -39,5 +39,5 @@ architecture Alexnet(img_height=224, img_width=224, img_channels=3, classes=10){
fc(->=2) ->
FullyConnected(units=10) ->
Softmax() ->
predictions
predictions;
}
\ No newline at end of file
......@@ -11,5 +11,5 @@ architecture LeNet(img_height=28, img_width=28, img_channels=1, classes=10){
Relu() ->
FullyConnected(units=classes) ->
Softmax() ->
predictions
predictions;
}
\ No newline at end of file
......@@ -40,5 +40,5 @@ architecture ResNeXt50(img_height=224, img_width=224, img_channels=3, classes=10
GlobalPooling(pool_type="avg") ->
FullyConnected(units=classes) ->
Softmax() ->
predictions
predictions;
}
\ No newline at end of file
......@@ -33,5 +33,5 @@ architecture ResNet152(img_height=224, img_width=224, img_channels=3, classes=10
GlobalPooling(pool_type="avg") ->
FullyConnected(units=classes) ->
Softmax() ->
predictions
predictions;
}
\ No newline at end of file
......@@ -31,5 +31,5 @@ architecture ResNet34(img_height=224, img_width=224, img_channels=3, classes=100
GlobalPooling(pool_type="avg") ->
FullyConnected(units=classes) ->
Softmax() ->
predictions
predictions;
}
......@@ -25,5 +25,5 @@ architecture SequentialAlexnet(img_height=224, img_width=224, img_channels=3, cl
fc() ->
FullyConnected(units=classes) ->
Softmax() ->
predictions
predictions;
}
......@@ -28,5 +28,5 @@ architecture ThreeInputCNN_M14(img_height=200, img_width=300, img_channels=3, cl
Relu() ->
FullyConnected(units=classes) ->
Softmax() ->
predictions
predictions;
}
\ No newline at end of file
......@@ -27,5 +27,5 @@ architecture VGG16(img_height=224, img_width=224, img_channels=3, classes=1000){
fc() ->
FullyConnected(units=classes) ->
Softmax() ->
predictions
predictions;
}
\ No newline at end of file
......@@ -2,4 +2,6 @@ Alexnet data/Alexnet
CifarClassifierNetwork data/CifarClassifierNetwork
LeNet data/LeNet
VGG16 data/VGG16
MultipleOutputs data/MultipleOutputs
\ No newline at end of file
MultipleOutputs data/MultipleOutputs
ResNeXt50 data/ResNeXt50
ThreeInputCNN_M14 data/ThreeInputCNN_M14
\ No newline at end of file
architecture ArgumentConstraintTest1(img_height=224, img_width=224, img_channels=3, classes=1000){
def input Z(0:255)^{img_channels, img_height, img_width} image
def output Q(0:1)^{classes} predictions
def conv(kernel, channels, stride=1, act=true){
Convolution(kernel=(kernel,kernel), channels=channels, stride=(stride,stride)) ->
BatchNorm() ->
Relu(?=act)
}
def skip(channels, stride){
Convolution(kernel=(1,1), channels=75, stride=(stride,stride)) ->
BatchNorm()
}
def resLayer(channels, stride=1){
(
conv(kernel=3, channels=channels, stride=stride) ->
conv(kernel=3, channels=channels, stride=stride, act=false)
|
skip(channels=channels, stride=stride, ?=(stride!=1))
) ->
Add() ->
Relu()
}
image ->
conv(kernel=7, channels=64, stride=2) ->
Pooling(pool_type="max", kernel=(3,3), stride=(2,2)) ->
resLayer(channels=64, ->=3) ->
resLayer(channels=128, stride=2) ->
GlobalPooling(pool_type="avg") ->
FullyConnected(units=classes, ->=true) ->
Softmax() ->
predictions
}
architecture ArgumentConstraintTest2(img_height=224, img_width=224, img_channels=3, classes=1000){
def input Z(0:255)^{img_channels, img_height, img_width} image
def output Q(0:1)^{classes} predictions
def conv(kernel, channels, stride=1, act=true){
Convolution(kernel=(kernel,kernel), channels=channels, stride=(stride,stride)) ->
BatchNorm() ->
Relu(?=act)
}
def skip(channels, stride){
Convolution(kernel=(1,1), channels=96, stride=(stride,-stride)) ->
BatchNorm()
}
def resLayer(channels, stride=1){
(
conv(kernel=3, channels=channels, stride=stride) ->
conv(kernel=3, channels=channels, stride=stride, act=false)
|
skip(channels=channels, stride=stride, ?=(stride!=1))
) ->
Add() ->
Relu()
}
image ->
conv(kernel=7, channels=64, stride=2) ->
Pooling(pool_type="max", kernel=(3,3), stride=(2,2)) ->
resLayer(channels=64, ->=3) ->
resLayer(channels=128, stride=2) ->
GlobalPooling(pool_type="avg") ->
FullyConnected(units=classes) ->
Softmax() ->
predictions
}
architecture ArgumentConstraintTest3(img_height=224, img_width=224, img_channels=3, classes=1000){
def input Z(0:255)^{img_channels, img_height, img_width} image
def output Q(0:1)^{classes} predictions
def conv(kernel, channels, stride=1, act=true){
Convolution(kernel=(kernel,kernel), channels=channels, stride=(stride,stride)) ->
BatchNorm() ->
Relu(?=act)
}
def skip(channels, stride){
Convolution(kernel=(1,1), channels=64, stride=(stride,stride)) ->
BatchNorm()
}
def resLayer(channels, stride=1){
(
conv(kernel=3, channels=channels, stride=stride) ->
conv(kernel=3, channels=channels, stride=stride, act=false)
|
skip(channels=channels, stride=stride, ?=(stride!=1))
) ->
Add() ->
Relu()
}
image ->
conv(kernel=7, channels=64, stride=2) ->
Pooling(pool_type="max", kernel=(3,3), stride=(2,2), padding="valid") ->
resLayer(channels=64, ->=3) ->
resLayer(channels=128, stride=2) ->
GlobalPooling(pool_type="avg", ?=1) ->
FullyConnected(units=classes) ->
Softmax() ->
predictions
}
architecture ArgumentConstraintTest4(img_height=224, img_width=224, img_channels=3, classes=1000){
def input Z(0:255)^{img_channels, img_height, img_width} image
def output Q(0:1)^{classes} predictions
def conv(kernel, channels, stride=1, act=true){
Convolution(kernel=(kernel,kernel), channels=channels, stride=(stride,stride)) ->
BatchNorm() ->
Relu(?=act)
}
def skip(channels, stride){
Convolution(kernel=(1,1), channels=96, stride=(stride,stride)) ->
BatchNorm()
}
def resLayer(channels, stride=1){
(
conv(kernel=3, channels=channels, stride=stride) ->
conv(kernel=3, channels=channels, stride=stride, act=false)
|
skip(channels=channels, stride=stride, ?=(stride!=1))
) ->
Add() ->
Relu()
}
image ->
conv(kernel=7, channels=64, stride=2) ->
Pooling(pool_type="max", kernel=(3,3), stride=(2,2), padding=1) ->
resLayer(channels=64, ->=3) ->
resLayer(channels=128, stride=2) ->
GlobalPooling(pool_type="avg") ->
FullyConnected(units=classes) ->
Softmax() ->
predictions
}
architecture ArgumentConstraintTest5(img_height=224, img_width=224, img_channels=3, classes=1000){
def input Z(0:255)^{img_channels, img_height, img_width} image
def output Q(0:1)^{classes} predictions
def conv(kernel, channels, stride=1, act=true){
Convolution(kernel=(kernel,kernel), channels=channels, stride=(stride,stride)) ->
BatchNorm() ->
Relu(?=act)
}
def skip(channels, stride){
Convolution(kernel=(1,-1), channels=96, stride=(stride,stride)) ->
BatchNorm()
}
def resLayer(channels, stride=1){
(
conv(kernel=3, channels=channels, stride=stride) ->
conv(kernel=3, channels=channels, stride=stride, act=false)
|
skip(channels=channels, stride=stride, ?=(stride!=1))
) ->
Add() ->
Relu()
}
image ->
conv(kernel=7, channels=64, stride=2) ->
Pooling(pool_type="max", kernel=(3,3), stride=(2,2), padding="same") ->
resLayer(channels=64, ->=3) ->
resLayer(channels=128, stride=2) ->
GlobalPooling(pool_type="avg") ->
FullyConnected(units=classes) ->
Softmax() ->
predictions
}
architecture ArgumentConstraintTest6(img_height=224, img_width=224, img_channels=3, classes=1000){
def input Z(0:255)^{img_channels, img_height, img_width} image
def output Q(0:1)^{classes} predictions
def conv(kernel, channels, stride=1, act=true){
Convolution(kernel=(kernel,kernel), channels=channels, stride=(stride,stride)) ->
BatchNorm() ->
Relu(?=act)
}
def skip(channels, stride){
Convolution(kernel=(1,1), channels=false, stride=(stride,stride)) ->
BatchNorm()
}
def resLayer(channels, stride=1){
(
conv(kernel=3, channels=channels, stride=stride) ->
conv(kernel=3, channels=channels, stride=stride, act=false)
|
skip(channels=channels, stride=stride, ?=(stride!=1))
) ->
Add() ->
Relu()
}
image ->
conv(kernel=7, channels=64, stride=2) ->
Pooling(pool_type="max", kernel=(3,3), stride=(2,2), padding="valid") ->
resLayer(channels=64, ->=3) ->
resLayer(channels=128, stride=2) ->
GlobalPooling(pool_type="avg") ->
FullyConnected(units=classes) ->
Softmax() ->
predictions
}
architecture DuplicatedArgument(){
def input Q(-oo:+oo)^{10} in1
def output Q(0:1)^{2} out1
in1 ->
FullyConnected(units=64, units=32) ->
Tanh() ->
FullyConnected(units=2) ->
Softmax() ->
out1
}
\ No newline at end of file
architecture DuplicatedIONames(inputs=10, classes=2){
def input Q(-oo:+oo)^{inputs} in1
def input Q(-oo:+oo)^{inputs} in1
def output Q(0:1)^{classes} out1
def fc(){
FullyConnected(units=64) ->
Tanh()
}
in1 ->
fc() ->
FullyConnected(units=classes) ->
Softmax() ->
out1
}
\ No newline at end of file
architecture DuplicatedNames(inputs=10, inputs=10, classes=2){
def input Q(-oo:+oo)^{inputs} in1
def output Q(0:1)^{classes} out1
def fc(){
FullyConnected(units=64) ->
Tanh()
}
def fc(){
FullyConnected(units=64) ->
Tanh()
}
in1 ->
fc() ->
FullyConnected(units=classes) ->
Softmax() ->
out1
}
\ No newline at end of file
architecture IllegalIOName{
def input Q(-oo:+oo)^{10} data_
def output Q(0:1)^{2} predictions_
data_ ->
FullyConnected(units=64, no_bias=true) ->
Tanh() ->
FullyConnected(units=2, no_bias=true) ->
Softmax() ->
predictions_
}
\ No newline at end of file
architecture IllegalName(inputs=10, classes=2, Tg = 1){
def input Q(-oo:+oo)^{inputs} in1
def output Q(0:1)^{classes} out1
def Fc(){
FullyConnected(units=10)
}
in1 ->
FullyConnected(units=64) ->
Tanh() ->
FullyConnected(units=classes) ->
Softmax() ->
out1
}
\ No newline at end of file
architecture InvalidArrayAccessValue(img_height=200, img_width=300, img_channels=3, classes=3){
def input Z(0:255)^{img_channels, img_height, img_width} image[3]
def output Q(0:1)^{classes} predictions
def conv(kernel, channels){
Convolution(kernel=kernel, channels=channels) ->
Relu()
}
def inputGroup(index){
[index] ->
conv(kernel=(3,3), channels=32, ->=3) ->
Pooling(pool_type="max", kernel=(2,2), stride=(2,2))
}
(image[0] | image[1] | image[2] | image[3]) ->
inputGroup(index=[0|..|2]) ->
Concatenate() ->
conv(kernel=(3,3), channels=64) ->
Pooling(pool_type="max", kernel=(2,2), stride=(2,2)) ->
FullyConnected(units=32) ->
Relu() ->
FullyConnected(units=classes) ->
Softmax() ->
predictions
}
architecture InvalidIOShape1(){
def input Q(-oo:+oo)^{10, 2} in1
def output Q(0:1)^{10, 2, 2, 2} out1
in1 ->
FullyConnected(units=64) ->
Tanh() ->
FullyConnected(units=10) ->
Softmax() ->
out1
}
\ No newline at end of file
architecture InvalidIOShape2(){
def input Q(-oo:+oo)^{10.5} in1
def output Q(0:1)^{-10} out1
in1 ->
FullyConnected(units=64) ->
Tanh() ->
FullyConnected(units=10) ->
Softmax() ->
out1
}
\ No newline at end of file
architecture InvalidInputShape(inputs=10, classes=2){
def input Q(-oo:+oo)^{inputs} in1[2]
def output Q(0:1)^{classes} out1[2]
in1 ->
FullyConnected(units=64) ->
Tanh() ->
FullyConnected(units=classes) ->
Softmax() ->
out1
}
\ No newline at end of file
architecture InvalidRecursion(img_height=224, img_width=224, img_channels=3, classes=1000){
def input Z(0:255)^{img_channels, img_height, img_width} image
def output Q(0:1)^{classes} predictions
def conv(kernel, channels, stride=1, act=true){
Convolution(kernel=(kernel,kernel), channels=channels, stride=(stride,stride)) ->
BatchNorm() ->
resLayer(channels = 8) ->
Relu(?=act)
}
def skip(channels, stride){
Convolution(kernel=(1,1), channels=channels, stride=(stride,stride)) ->
BatchNorm()
}
def resLayer(channels, stride=1){
(
conv(kernel=3, channels=channels, stride=stride) ->
conv(kernel=3, channels=channels, stride=stride, act=false)
|
skip(channels=channels, stride=stride, ?=(stride!=1))
) ->
Add() ->
Relu()
}
image ->
conv(kernel=7, channels=64, stride=2) ->
Pooling(pool_type="max", kernel=(3,3), stride=(2,2)) ->
resLayer(channels=64, ->=3) ->
resLayer(channels=128, stride=2) ->
resLayer(channels=128, ->=3) ->
resLayer(channels=256, stride=2) ->
resLayer(channels=256, ->=5) ->
resLayer(channels=512, stride=2) ->
resLayer(channels=512, ->=2) ->
GlobalPooling(pool_type="avg") ->
FullyConnected(units=classes) ->
Softmax() ->
predictions
}
architecture MissingArgument(img_height=224, img_width=224, img_channels=3, classes=1000){
def input Z(0:255)^{img_channels, img_height, img_width} image
def output Q(0:1)^{classes} predictions
def conv(kernel, channels, stride=1, act=true){
Convolution(kernel=(kernel,kernel), channels=channels, stride=(stride,stride)) ->
BatchNorm() ->
Relu(?=act)
}
def skip(channels, stride){
Convolution(channels=96, stride=(stride,stride)) ->
BatchNorm()
}
def resLayer(channels, stride=1){
(
conv(kernel=3, channels=channels, stride=stride) ->
conv(kernel=3, channels=channels, stride=stride, act=false)
|
skip(channels=channels, stride=stride, ?=(stride!=1))
) ->
Add() ->
Relu()
}
image ->
conv(kernel=7, stride=2) ->
Pooling(pool_type="max") ->
resLayer(channels=64, ->=3) ->
resLayer(channels=128, stride=2) ->
GlobalPooling(pool_type="avg") ->
FullyConnected(units=classes) ->
Softmax() ->
predictions
}
\ No newline at end of file
architecture MissingIO2(inputs=10, classes=2){
def input Q(-oo:+oo)^{inputs} in1[2]
def output Q(0:1)^{classes} out1[2]
in1[0] ->
FullyConnected(units=64, no_bias=true) ->
Tanh() ->
FullyConnected(units=classes, no_bias=true) ->
Softmax() ->
out1[0]
}
\ No newline at end of file
architecture MissingLayerOperator(){
def input Q(-oo:+oo)^{10} in1
def output Q(0:1)^{2} out1
in1 ->
FullyConnected(units=64, no_bias=true) ->
Tanh()
FullyConnected(units=2, no_bias=true)
Softmax() ->
out1
}
\ No newline at end of file
architecture MissingMerge(inputs=10, classes=2){
def input Q(-oo:+oo)^{inputs} in1
def output Q(0:1)^{classes} out1
in1 ->
(
(
FullyConnected(units=16)
|
FullyConnected(units=16)
)
|
(
FullyConnected(units=16)
|
FullyConnected(units=16)
)
) ->
Add() ->
Tanh() ->
FullyConnected(units=classes) ->
Softmax() ->
out1
}
\ No newline at end of file
architecture MissingParallelBrackets(img_height=224, img_width=224, img_channels=3, classes=10){
def input Z(0:255)^{img_channels, img_height, img_width} image
def output Q(0:1)^{classes} predictions
def conv(kernel, channels, hasPool=true, convStride=(1,1)){
Convolution(kernel=kernel, channels=channels, stride=convStride) ->
Relu() ->
Pooling(pool_type="max", kernel=(3,3), stride=(2,2), ?=hasPool)
}
def fc(){
FullyConnected(units=4096) ->
Relu() ->
Dropout()
}
image ->
conv(kernel=(11,11), channels=96, convStride=(4,4)) ->
Lrn(nsize=5, alpha=0.0001, beta=0.75) ->
SplitData(index=0, n=2) ->
conv(kernel=(5,5), channels=128) ->
Lrn(nsize=5, alpha=0.0001, beta=0.75)
|
SplitData(index=1, n=2) ->
conv(kernel=(5,5), channels=128) ->
Lrn(nsize=5, alpha=0.0001, beta=0.75)
->
conv(kernel=(3,3), channels=384 ,hasPool=false) ->
SplitData(index=0, n=2) ->
conv(kernel=(3,3), channels=192, hasPool=false) ->
conv(kernel=(3,3), channels=128)
|
SplitData(index=1, n=2) ->
conv(kernel=(3,3), channels=192, hasPool=false) ->
conv(kernel=(3,3), channels=128)
->
fc() ->
fc() ->
FullyConnected(units=classes) ->
Softmax() ->
predictions
}
\ No newline at end of file
architecture NotIOArray(inputs=10, classes=2){
def input Q(-oo:+oo)^{inputs} in1
def output Q(0:1)^{classes} out1
in1[1] ->
FullyConnected(units=64, no_bias=true) ->
Tanh() ->
FullyConnected(units=classes, no_bias=true) ->
Softmax() ->
out1[0]
}
\ No newline at end of file
architecture UnfinishedArchitecture(inputs=10, classes=2){
def input Q(-oo:+oo)^{inputs} in1
def output Q(0:1)^{classes} out1
in1 ->
FullyConnected(units=64, no_bias=true) ->
Tanh() ->
(
FullyConnected(units=classes, no_bias=true) ->
Softmax() ->
out1
|
)
}
\ No newline at end of file
architecture UnknownIO(){
in1 ->
FullyConnected(units=64) ->
Tanh() ->
FullyConnected(units=10) ->
Softmax() ->
out1
}
\ No newline at end of file
architecture UnknownMethod(inputs=10, classes=2){
def input Q(-oo:+oo)^{inputs} in1
def output Q(0:1)^{classes} out1
in1 ->
FllyConnected(units=64, no_bias=true) ->
Tanh() ->
FullyConnected(units=classes, no_bias=true) ->
Softmax() ->
out1
}
\ No newline at end of file
architecture UnknownVariableName(inputs=10){
def input Q(-oo:+oo)^{inputs} in1
def output Q(0:1)^{2} out1
in1 ->
FullyConnected(units=64) ->
Tanh() ->
FullyConnected(units=classes) ->
Softmax() ->
out1
}
\ No newline at end of file
<
architecture WrongArgument(inputs=10, classes=2){
def input Q(-oo:+oo)^{inputs} in1
def output Q(0:1)^{classes} out1
in1 ->
FullyConnected(units=64, bias=true) ->
Tanh(asd=1) ->
FullyConnected(unit=classes) ->
Softmax() ->
out1
}