Commit 9e4a0b62 authored by Evgeny Kusmenko's avatar Evgeny Kusmenko

Merge branch 'tensorflow_group' into 'master'

Tensorflow group

See merge request !28
parents e87ae34f 2c9e2402
Pipeline #187938 failed with stages
in 8 minutes and 5 seconds
......@@ -37,6 +37,12 @@ integrationGluonJobLinux:
script:
- mvn -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn -B clean install --settings settings.xml -Dtest=IntegrationGluonTest
integrationTensorflowJobLinux:
stage: linux
image: registry.git.rwth-aachen.de/monticore/embeddedmontiarc/generators/emadl2cpp/integrationtests/tensorflow
script:
- mvn -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn -B clean install --settings settings.xml -Dtest=IntegrationTensorflowTest
integrationPythonWrapperTest:
stage: linux
......
<!-- (c) https://github.com/MontiCore/monticore -->
![pipeline](https://git.rwth-aachen.de/monticore/EmbeddedMontiArc/generators/EMADL2CPP/badges/master/build.svg)
![coverage](https://git.rwth-aachen.de/monticore/EmbeddedMontiArc/generators/EMADL2CPP/badges/master/coverage.svg)
# EMADL2CPP
Generates CPP/Python code for EmbeddedMontiArcDL.
See example project [EMADL-Demo](https://git.rwth-aachen.de/thomas.timmermanns/EMADL-Demo) for more information on how the generated code can be used.
......@@ -20,7 +21,25 @@ See example project [EMADL-Demo](https://git.rwth-aachen.de/thomas.timmermanns/E
* Caffe2
* training - generated is Python code. Follow [ official instructions on Caffe2 site ](https://caffe2.ai/docs/getting-started.html?platform=ubuntu&configuration=prebuilt)
* Gluon
* See the scripts under Installation for better instructions, as an old caffe vversion is used that needs special considerations.
* Gluon
* Tensorflow
* training - generated is Python code.
* prediction - generated code is C++.
## Installation
The two bash scripts found under [installation scripts](https://git.rwth-aachen.de/monticore/EmbeddedMontiArc/generators/EMADL2CPP/tree/tensorflow_group/src/main/resources/installation_scripts)
should build and install all prerequisits for all backends as of 26.09.2019.
Note that the installation may take some time (hours) and you will need some disk space (> 60GB) for all backends. Also enough RAM or a big
enough swapspace is advisable (>10GB) for the installation of the cpp part of tensorflow. This scripts were tested with a completly clean Ubuntu 16.04,
without system updates installed. Using another Ubuntu version or installing other stuff, system updates included might/ have caused problems.
If you want to install the backends with CUDA GPU support(only MXNet/Gluon and Tensorflow, the used caffe2 version does not work with GPU support anymore),
you have to install CUDA 10.0(!!), CUDNN and NCCL (Obtainable from the nvidai webpage. You can follow their instructions.) inbetween the two scripts.
Furthermore you will have to change the pip commands for mxnet and tensorflow to the respective commented out parts.
Also docker images for the cpu version of each backend are provided at [Docker images](https://git.rwth-aachen.de/monticore/EmbeddedMontiArc/generators/EMADL2CPP/tree/tensorflow_group/src/test/resources/docker),
though some of these might be outdated.
### HowTo
1. Define a EMADL component containing architecture of a neural network and save it in a `.emadl` file. For more information on architecture language please refer to [CNNArchLang project](https://git.rwth-aachen.de/monticore/EmbeddedMontiArc/languages/CNNArchLang). An example of NN architecture:
......
This diff is collapsed.
This diff is collapsed.
......@@ -22,6 +22,7 @@
<cnnarch-mxnet-generator.version>0.2.17-SNAPSHOT</cnnarch-mxnet-generator.version>
<cnnarch-caffe2-generator.version>0.2.13-SNAPSHOT</cnnarch-caffe2-generator.version>
<cnnarch-gluon-generator.version>0.2.8-SNAPSHOT</cnnarch-gluon-generator.version>
<cnnarch-tensorflow-generator.version>0.1.0-SNAPSHOT</cnnarch-tensorflow-generator.version>
<embedded-montiarc-math-opt-generator>0.1.4</embedded-montiarc-math-opt-generator>
<!-- .. Libraries .................................................. -->
......@@ -81,11 +82,17 @@
<version>${cnnarch-mxnet-generator.version}</version>
</dependency>
<dependency>
<dependency>
<groupId>de.monticore.lang.monticar</groupId>
<artifactId>cnnarch-gluon-generator</artifactId>
<version>${cnnarch-gluon-generator.version}</version>
</dependency>
<dependency>
<groupId>de.monticore.lang.monticar</groupId>
<artifactId>cnnarch-tensorflow-generator</artifactId>
<version>${cnnarch-tensorflow-generator.version}</version>
</dependency>
<dependency>
<groupId>de.monticore.lang.monticar</groupId>
......
/* (c) https://github.com/MontiCore/monticore */
package de.monticore.lang.monticar.emadl.generator;
import de.monticore.lang.monticar.cnnarch.generator.CNNArchGenerator;
import de.monticore.lang.monticar.cnnarch.generator.CNNTrainGenerator;
import de.monticore.lang.monticar.cnnarch.gluongenerator.CNNArch2Gluon;
import de.monticore.lang.monticar.cnnarch.gluongenerator.CNNTrain2Gluon;
import de.monticore.lang.monticar.cnnarch.mxnetgenerator.CNNArch2MxNet;
import de.monticore.lang.monticar.cnnarch.caffe2generator.CNNArch2Caffe2;
import de.monticore.lang.monticar.cnnarch.mxnetgenerator.CNNTrain2MxNet;
import de.monticore.lang.monticar.cnnarch.caffe2generator.CNNArch2Caffe2;
import de.monticore.lang.monticar.cnnarch.caffe2generator.CNNTrain2Caffe2;
import de.monticore.lang.monticar.emadl.generator.reinforcementlearning.RewardFunctionCppGenerator;
import de.monticore.lang.monticar.cnnarch.tensorflowgenerator.CNNArch2Tensorflow;
import de.monticore.lang.monticar.cnnarch.tensorflowgenerator.CNNTrain2Tensorflow;
import java.util.Optional;
......@@ -43,9 +44,18 @@ public enum Backend {
@Override
public CNNTrainGenerator getCNNTrainGenerator() {
return new CNNTrain2Gluon(new RewardFunctionCppGenerator());
}
},
TENSORFLOW{
@Override
public CNNArchGenerator getCNNArchGenerator() {
return new CNNArch2Tensorflow();
}
@Override
public CNNTrainGenerator getCNNTrainGenerator() {
return new CNNTrain2Tensorflow();
}
};
public abstract CNNArchGenerator getCNNArchGenerator();
public abstract CNNTrainGenerator getCNNTrainGenerator();
......@@ -56,9 +66,12 @@ public enum Backend {
case "CAFFE2":
return Optional.of(CAFFE2);
case "GLUON":
return Optional.of(GLUON);
case "TENSORFLOW":
return Optional.of(TENSORFLOW);
default:
return Optional.empty();
......@@ -69,8 +82,13 @@ public enum Backend {
switch (backend){
case CAFFE2:
return "CAFFE2";
case GLUON:
return "GLUON";
case TENSORFLOW:
return "TENSORFLOW";
default:
return "MXNET";
}
......
......@@ -44,6 +44,8 @@ import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.*;
import java.io.File;
public class EMADLGenerator {
private GeneratorEMAMOpt2CPP emamGen;
......@@ -181,6 +183,15 @@ public class EMADLGenerator {
}
}
public String getChecksumForLargerFile(String filePath) throws IOException {
try {
return (new File(filePath)).lastModified() + "";
} catch (Exception e) {
e.printStackTrace();
return "Exception_calculating_hash_large_file";
}
}
public void generateFiles(TaggingResolver taggingResolver, EMAComponentInstanceSymbol EMAComponentSymbol, Scope symtab, String pythonPath, String forced) throws IOException {
Set<EMAComponentInstanceSymbol> allInstances = new HashSet<>();
List<FileContent> fileContents = generateStrings(taggingResolver, EMAComponentSymbol, symtab, allInstances, forced);
......@@ -220,15 +231,16 @@ public class EMADLGenerator {
String b = backend.getBackendString(backend);
String trainingDataHash = "";
String testDataHash = "";
if (architecture.get().getDataPath() != null) {
if (b.equals("CAFFE2")) {
trainingDataHash = getChecksumForFile(architecture.get().getDataPath() + "/train_lmdb/data.mdb");
testDataHash = getChecksumForFile(architecture.get().getDataPath() + "/test_lmdb/data.mdb");
} else {
trainingDataHash = getChecksumForFile(architecture.get().getDataPath() + "/train.h5");
testDataHash = getChecksumForFile(architecture.get().getDataPath() + "/test.h5");
}
}
trainingDataHash = getChecksumForLargerFile(architecture.get().getDataPath() + "/train_lmdb/data.mdb");
testDataHash = getChecksumForLargerFile(architecture.get().getDataPath() + "/test_lmdb/data.mdb");
}else{
trainingDataHash = getChecksumForLargerFile(architecture.get().getDataPath() + "/train.h5");
testDataHash = getChecksumForLargerFile(architecture.get().getDataPath() + "/test.h5");
}
}
String trainingHash = emadlHash + "#" + cnntHash + "#" + trainingDataHash + "#" + testDataHash;
boolean alreadyTrained = newHashes.contains(trainingHash) || isAlreadyTrained(trainingHash, componentInstance);
......
......@@ -31,9 +31,10 @@ public class EMADLGeneratorCli {
.hasArg(true)
.required(false)
.build();
public static final Option OPTION_BACKEND = Option.builder("b")
.longOpt("backend")
.desc("deep-learning-framework backend. Options: MXNET, CAFFE2, GLUON")
.desc("deep-learning-framework backend. Options: MXNET, CAFFE2, GLUON, TENSORFLOW")
.hasArg(true)
.required(false)
.build();
......
#!/bin/bash
############################ Part 2/2 ######################################
# This script should help you to get ready to do machine learning #
# at SE at RWTH, installing all backends (mxnet/gluon, tensorflow, caffe2. #
# - Jonas Eckhardt & Julian Steinsberger-Dührßen - #
############################################################################
# It is based both on instrcutions and docker files from the git
# repos as well as our own work
############################################################################
# Requirements: #
############################################################################
# -Git set up to work with RWTH Gitlab
# -Virgin Ubuntu 16.04.(No touching, no other version) and not isntalling the system updates
# -Time: Installation may need more than one hour. It may prompt you
# for your passowrd multiple times
# -Disable Screensaver: It just saves you some time
# - > 10GB RAM or a swapfile so you have ram+swap > 10GB
# - > 60GB disk space
###########################################################################
# Usage: #
###########################################################################
# chmod u+x installer_bomb.sh
# ./installer_bomb.sh
# Dont use twice. It will fill your ~/.bashrc with garbage#!/bin/bash
sudo apt install -y --no-install-recommends cuda-command-line-tools-10-0
#Install MXNet
pip install mxnet --user
#for cuda support: use mxnet-cu100 --user instead
git clone https://github.com/apache/incubator-mxnet.git mxnet-source
cd mxnet-source
git checkout tags/1.4.0
cd ..
sudo cp -r mxnet-source/include/mxnet /usr/include/mxnet
#Test mxnet
git clone git@git.rwth-aachen.de:monticore/EmbeddedMontiArc/applications/mnistcalculator.git
cd mnistcalculator
cd mxnet
./build.sh
cd ..
cd ..
#Install OpenCV
git clone https://github.com/opencv/opencv.git
cd opencv
git checkout 3.3.1
cd ..
git clone https://github.com/opencv/opencv_contrib.git
cd opencv_contrib
git checkout 3.3.1
cd ..
cd opencv
mkdir build
cd build
cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local \
-D INSTALL_C_EXAMPLES=ON \
-D INSTALL_PYTHON_EXAMPLES=ON \
-D WITH_TBB=ON \
-D WITH_V4L=ON \
-D WITH_QT=ON \
-D WITH_OPENGL=ON \
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules \
-D BUILD_EXAMPLES=ON ..
make -j$(nproc)
sudo make install
sudo sh -c 'echo "/usr/local/lib" >> /etc/ld.so.conf.d/opencv.conf'
sudo ldconfig
#Install Tensorflow
pip install --upgrade tensorflow --user
#for cuda support: pip install --upgrade tensorflow-gpu --user
git clone https://github.com/FloopCZ/tensorflow_cc.git
#Install bazel
sudo wget -P /tmp https://github.com/bazelbuild/bazel/releases/download/0.21.0/bazel-0.21.0-installer-linux-x86_64.sh
sudo chmod +x /tmp/bazel-0.21.0-installer-linux-x86_64.sh
sudo /tmp/bazel-0.21.0-installer-linux-x86_64.sh
sudo rm /tmp/bazel-0.21.0-installer-linux-x86_64.sh
sudo apt-get -y clean
sudo updatedb
cd tensorflow_cc/tensorflow_cc/
mkdir build
cd build
cmake -DTENSORFLOW_STATIC=OFF -DTENSORFLOW_SHARED=ON ..
make
sudo rm -rf /home/tensorflow_cc/.cache
make install
cd ..
sudo rm -rf build
\ No newline at end of file
#!/bin/bash
############################ Part 1/2 ######################################
# This script should help you to get ready to do machine learning #
# at SE at RWTH, installing all backends (mxnet/gluon, tensorflow, caffe2. #
# - Jonas Eckhardt & Julian Steinsberger-Dührßen - #
############################################################################
# It is based both on instrcutions and docker files from the git
# repos as well as our own work
############################################################################
# Requirements: #
############################################################################
# -Git set up to work with RWTH Gitlab
# -Virgin Ubuntu 16.04.(No touching, no other version) and not isntalling the system updates
# -Time: Installation may need more than one hour. It may prompt you
# for your passowrd multiple times
# -Disable Screensaver: It just saves you some time
# - > 10GB RAM or a swapfile so you have ram+swap > 10GB
# - > 60GB disk space
###########################################################################
# Usage: #
###########################################################################
# chmod u+x installer_bomb.sh
# ./installer_bomb.sh
# Dont use twice. It will fill your ~/.bashrc with garbage
sudo apt update
sudo apt upgrade -y
sudo apt install -y --no-install-recommends maven libopencv-dev\
python-pip openjdk-8-jdk git wget python python3-numpy gcc build-essential cmake \
liblapack-dev libblas-dev libopenblas-dev libboost-dev libarmadillo-dev build-essential \
libgoogle-glog-dev libgtest-dev libiomp-dev libleveldb-dev liblmdb-dev \
libopenmpi-dev libsnappy-dev libprotobuf-dev openmpi-bin openmpi-doc python-numpy swig
sudo apt install -y --no-install-recommends protobuf-compiler python-dev python-setuptools \
libgflags-dev checkinstall pkg-config yasm gfortran libjpeg8-dev libjasper-dev libpng12-dev \
libtiff5-dev libavcodec-dev libavformat-dev libswscale-dev libdc1394-22-dev libxine2-dev \
libgstreamer-plugins-base0.10-dev qt5-default libgtk2.0-dev libtbb-dev libatlas-base-dev \
libfaac-dev libmp3lame-dev libtheora-dev libvorbis-dev libxvidcore-dev \
libopencore-amrnb-dev libopencore-amrwb-dev x264 v4l-utils libgphoto2-dev libeigen3-dev \
libhdf5-dev doxygen curl unzip autoconf autogen libtool mlocate zlib1g-dev g++ sudo zip automake
#Fix JDK version
sudo update-alternatives --config java
pip install --upgrade pip --user
pip install numpy opencv-python six lmdb graphviz pyyaml future typing --user
pip install --no-deps h5py --user
#prepare path variables
echo "export PATH=\$PATH:\"/usr/local/lib\"" >> ~/.bashrc
echo "export PYTHON_PATH=\"/usr/local:$(pwd)/pytorch/build:/usr/bin/python:/usr/local/lib\"">> ~/.bashrc
echo "export LD_LIBRARY_PATH=\"/usr/local/lib\"">> ~/.bashrc
source ~/.bashrc
#Install caffee2
git clone --recursive https://github.com/pytorch/pytorch.git
cd pytorch
git checkout v0.4.0
git -c submodule.'third_party/nervanagpu'.update=none submodule update --init
sudo rm -r build
mkdir build
cd build
cmake -DUSE_MPI=OFF ..
sudo make clean install
cd ..
sudo FULL_CAFFE2=1 python setup.py clean install
sudo cp -r /usr/local/lib/libcaffe2.so /usr/lib/
cd ..
#Fix protocbuf
pip2 install protobuf==3.5.1 --user
sudo snap install --classic notepadqq
sudo snap install pycharm-community --classic
\ No newline at end of file
......@@ -139,6 +139,29 @@ public class GenerationTest extends AbstractSymtabTest {
"CNNTrainer_mnist_mnistClassifier_net.py"));
}
@Test
public void testMnistClassifierForTensorflow() throws IOException, TemplateException {
Log.getFindings().clear();
String[] args = {"-m", "src/test/resources/models/", "-r", "mnist.MnistClassifier", "-b", "TENSORFLOW", "-f", "n", "-c", "n"};
EMADLGeneratorCli.main(args);
assertTrue(Log.getFindings().isEmpty());
checkFilesAreEqual(
Paths.get("./target/generated-sources-emadl"),
Paths.get("./src/test/resources/target_code/tensorflow"),
Arrays.asList(
"mnist_mnistClassifier.cpp",
"mnist_mnistClassifier.h",
"CNNCreator_mnist_mnistClassifier_net.py",
"CNNPredictor_mnist_mnistClassifier_net.h",
"CNNDataLoader_mnist_mnistClassifier_net.py",
"mnist_mnistClassifier_net.h",
"HelperA.h",
"CNNTranslator.h",
"mnist_mnistClassifier_calculateClass.h",
"CNNTrainer_mnist_mnistClassifier_net.py"));
}
@Test
public void testMnistClassifierForGluon() throws IOException, TemplateException {
Log.getFindings().clear();
......@@ -181,7 +204,7 @@ public class GenerationTest extends AbstractSymtabTest {
EMADLGeneratorCli.main(args);
assertTrue(Log.getFindings().size() == 0);
}
@Test
public void testGluonReinforcementModelGymEnvironment() {
Log.getFindings().clear();
......@@ -215,7 +238,7 @@ public class GenerationTest extends AbstractSymtabTest {
)
);
}
@Test
public void testHashFunction() {
EMADLGenerator tester = new EMADLGenerator(Backend.MXNET);
......
/**
*
* ******************************************************************************
* MontiCAR Modeling Family, www.se-rwth.de
* Copyright (c) 2017, Software Engineering Group at RWTH Aachen,
* All rights reserved.
*
* This project is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3.0 of the License, or (at your option) any later version.
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this project. If not, see <http://www.gnu.org/licenses/>.
* *******************************************************************************
*/
package de.monticore.lang.monticar.emadl;
import de.monticore.lang.monticar.emadl.generator.EMADLGeneratorCli;
import de.se_rwth.commons.logging.Log;
import org.junit.Ignore;
import org.junit.Test;
import java.nio.file.Files;
import java.nio.file.NoSuchFileException;
import java.nio.file.Path;
import java.nio.file.Paths;
import static junit.framework.TestCase.assertTrue;
import static org.junit.Assert.assertFalse;
public class IntegrationTensorflowTest extends IntegrationTest {
private Path multipleStreamsHashFile = Paths.get("./target/generated-sources-emadl/MultipleStreams.training_hash");
public IntegrationTensorflowTest() {
super("TENSORFLOW", "39253EC049D4A4E5FA0536AD34874B9D#1DBAEE1B1BD83FB7CB5F70AE91B29638#C4C23549E737A759721D6694C75D9771#5AF0CE68E408E8C1F000E49D72AC214A");
}
@Test
public void testMultipleStreams() {
Log.getFindings().clear();
deleteHashFile(multipleStreamsHashFile);
String[] args = {"-m", "src/test/resources/models/", "-r", "MultipleStreams", "-b", "TENSORFLOW"};
EMADLGeneratorCli.main(args);
assertTrue(Log.getFindings().isEmpty());
}
private void deleteHashFile(Path hashFile) {
try {
Files.delete(hashFile);
}
catch (NoSuchFileException e) {
}
catch(Exception e) {
assertFalse("Could not delete hash file", true);
}
}
}
FROM floopcz/tensorflow_cc:ubuntu-shared
#Install everything needed with apt
RUN apt-get update
ARG DEBIAN_FRONTEND=noninteractive
RUN apt-get install -y --no-install-recommends \
tzdata \
git \
wget python gcc \
build-essential cmake \
maven unzip\
python3 python3-pip python-dev python-setuptools python python-pip\
libopencv-dev libarmadillo-dev
#Fix Java version
RUN apt remove -y *openjdk*
RUN apt-get install -y --no-install-recommends openjdk-8-jdk
#Install stuff for python and python3
RUN pip install tensorflow==1.13.1
RUN pip3 install opencv-python h5py numpy matplotlib scipy Pillow
......@@ -5,8 +5,8 @@ component MultipleStreams{
implementation CNN {
data[0] ->
Convolution(kernel=(5,5), channels=8, padding="valid") ->
Convolution(kernel=(5,5), channels=8, padding="valid") ->
Convolution(kernel=(5,5), channels=8) ->
Convolution(kernel=(5,5), channels=8) ->
FullyConnected(units=128) ->
Dropout()->
FullyConnected(units=10) ->
......@@ -14,8 +14,8 @@ component MultipleStreams{
softmax[0];
data[1] ->
Convolution(kernel=(5,5), channels=8, padding="valid") ->
Convolution(kernel=(5,5), channels=8, padding="valid") ->
Convolution(kernel=(5,5), channels=8) ->
Convolution(kernel=(5,5), channels=8) ->
FullyConnected(units=128) ->
Dropout()->
FullyConnected(units=10) ->
......
......@@ -7,10 +7,10 @@ component Network{
implementation CNN {
image ->
Convolution(kernel=(5,5), channels=20, padding="valid") ->
Pooling(pool_type="max", kernel=(2,2), stride=(2,2), padding="valid") ->
Convolution(kernel=(5,5), channels=50, padding="valid") ->
Pooling(pool_type="max", kernel=(2,2), stride=(2,2), padding="valid") ->
Convolution(kernel=(5,5), channels=20) ->
Pooling(pool_type="max", kernel=(2,2), stride=(2,2)) ->
Convolution(kernel=(5,5), channels=50) ->
Pooling(pool_type="max", kernel=(2,2), stride=(2,2)) ->
FullyConnected(units=500) ->
Relu() ->
FullyConnected(units=10) ->
......
......@@ -7,8 +7,8 @@ component CifarNetwork<Z(2:oo) classes = 10>{
implementation CNN {
data ->
Convolution(kernel=(5,5), channels=8, padding="valid") ->
Convolution(kernel=(5,5), channels=8, padding="valid") ->
Convolution(kernel=(5,5), channels=8) ->
Convolution(kernel=(5,5), channels=8) ->
FullyConnected(units=128) ->
Dropout()->
FullyConnected(units=classes) ->
......
......@@ -7,10 +7,10 @@ component LeNetNetwork<Z(2:oo) classes = 10>{
implementation CNN {
image ->
Convolution(kernel=(5,5), channels=20, padding="valid") ->
Pooling(pool_type="max", kernel=(2,2), stride=(2,2), padding="valid") ->
Convolution(kernel=(5,5), channels=50, padding="valid") ->
Pooling(pool_type="max", kernel=(2,2), stride=(2,2), padding="valid") ->
Convolution(kernel=(5,5), channels=20) ->
Pooling(pool_type="max", kernel=(2,2), stride=(2,2)) ->
Convolution(kernel=(5,5), channels=50) ->
Pooling(pool_type="max", kernel=(2,2), stride=(2,2)) ->
FullyConnected(units=500) ->
Relu() ->
FullyConnected(units=classes) ->
......
......@@ -7,8 +7,8 @@ component CifarNetwork<Z(2:oo) classes = 10>{
implementation CNN {
data ->
Convolution(kernel=(5,5), channels=8, padding="valid") ->
Convolution(kernel=(5,5), channels=8, padding="valid") ->
Convolution(kernel=(5,5), channels=8) ->
Convolution(kernel=(5,5), channels=8) ->
FullyConnected(units=128) ->
Dropout()->
FullyConnected(units=classes) ->
......
/* (c) https://github.com/MontiCore/monticore */
#ifndef CNNBUFFERFILE_H
#define CNNBUFFERFILE_H
......
# (c) https://github.com/MontiCore/monticore
import mxnet as mx
import logging
import os
......
# (c) https://github.com/MontiCore/monticore
from caffe2.python import workspace, core, model_helper, brew, optimizer
from caffe2.python.predictor import mobile_exporter
from caffe2.proto import caffe2_pb2
......@@ -75,15 +74,15 @@ class CNNCreator_mnist_mnistClassifier_net:
image_ = data
# image_, output shape: {[1,28,28]}
conv1_ = brew.conv(model, image_, 'conv1_', dim_in=1, dim_out=20, kernel=5, stride=1)
# conv1_, output shape: {[20,24,24]}
pool1_ = brew.max_pool(model, conv1_, 'pool1_', kernel=2, stride=2)
# pool1_, output shape: {[20,12,12]}
conv2_ = brew.conv(model, pool1_, 'conv2_', dim_in=20, dim_out=50, kernel=5, stride=1)
# conv2_, output shape: {[50,8,8]}
pool2_ = brew.max_pool(model, conv2_, 'pool2_', kernel=2, stride=2)
# pool2_, output shape: {[50,4,4]}
fc2_ = brew.fc(model, pool2_, 'fc2_', dim_in=50 * 4 * 4, dim_out=500)
conv1_ = brew.conv(model, image_, 'conv1_', dim_in=1, dim_out=20, kernel=5, stride=1, pad=1)
# conv1_, output shape: {[20,28,28]}
pool1_ = brew.max_pool(model, conv1_, 'pool1_', kernel=2, stride=2, pad=1)
# pool1_, output shape: {[20,14,14]}
conv2_ = brew.conv(model, pool1_, 'conv2_', dim_in=20, dim_out=50, kernel=5, stride=1, pad=1)
# conv2_, output shape: {[50,14,14]}
pool2_ = brew.max_pool(model, conv2_, 'pool2_', kernel=2, stride=2, pad=1)
# pool2_, output shape: {[50,7,7]}