Commit 1b5f065c authored by Svetlana Pavlitskaya's avatar Svetlana Pavlitskaya

Dpnet code generation using EMADL2CPP jar.

parent 185a79a9
target/
output/
generated/
out/
.idea/
.git
*.iml
#!/usr/bin/env bash
echo "Generating files.."
java -jar embedded-montiarc-emadl-generator-0.2.1-SNAPSHOT-jar-with-dependencies.jar -m src/models -r Dpnet -o generated
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>de.monticore.lang.monticar</groupId>
<artifactId>cnn-model-training</artifactId>
<version>1.0-SNAPSHOT</version>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.3</version>
<configuration>
<useIncrementalCompilation>true</useIncrementalCompilation>
<source>1.8</source>
<target>1.8</target>
</configuration>
</plugin>
</plugins>
</build>
<dependencies>
<dependency>
<groupId>de.monticore.lang.monticar</groupId>
<artifactId>embedded-montiarc-emadl-generator</artifactId>
<version>0.2.1-SNAPSHOT</version>
</dependency>
</dependencies>
</project>
package de.monticore.lang.monticar.torcs_dl;
import de.monticore.lang.monticar.emadl.generator.EMADLGenerator;
import freemarker.template.TemplateException;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
public class CNNCodeGenerator {
public static final String TARGET_PATH_GENERATED = "./target/generated-sources-cnn-model-training/";
public static final String MODELS_PATH = "src/main/models/";
public static void main(String[] args) {
System.out.println("Starting code generation...");
EMADLGenerator gen = new EMADLGenerator();
gen.setGenerationTargetPath(TARGET_PATH_GENERATED);
try {
gen.generate(MODELS_PATH, "Dpnet");
} catch (IOException | TemplateException e) {
e.printStackTrace();
}
}
}
......@@ -2,10 +2,13 @@ configuration Dpnet{
num_epoch : 100
batch_size : 64
context:cpu
normalize: false
normalize: true
optimizer : sgd{
learning_rate : 0.01
weight_decay : 0.0005
// reduce the learning rate starting from 0.01 every 8000 iterations by a factor of 0.9 (decrease by 10%)
learning_rate_decay: 0.9
step_size: 8000
learning_rate_minimum: 0.01
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment