Commit acd16e48 authored by Jonas Ritz's avatar Jonas Ritz
Browse files

Merge branch 'ba_weber' into 'master'

BA Weber

See merge request !25
parents a5aa6050 4e7b81e8
Pipeline #901976 failed with stages
in 20 minutes and 38 seconds
.idea
emadl-maven-plugin/input
emadl-maven-plugin/output
# (c) https://github.com/MontiCore/monticore
stages:
- linux
- prebuild
- test
- build
#TestMXNET:
# stage: linux
# stage: build
# image: registry.git.rwth-aachen.de/monticore/embeddedmontiarc/applications/mnistcalculator/mxnet:v0.0.5
# script:
# script:
# - cd mxnet
# - chmod +x build.sh
# - ./build.sh
# - RES=$(./build/src/cpp/DigitCalculator resources/images/1.png resources/images/2.png resources/images/3.png resources/images/4.png resources/images/5.png resources/images/6.png)
# - "if [[ $RES != *\"SUM: 579\"* ]]; then echo \"Wrong result:\" $RES; exit 1; fi;"
#TestCAFFE2:
# stage: linux
# stage: build
# image: registry.git.rwth-aachen.de/monticore/embeddedmontiarc/applications/mnistcalculator/caffe2:v0.0.2
# script:
# script:
# - cd caffe2
# - chmod +x build.sh
# - ./build.sh
......@@ -25,76 +26,89 @@ stages:
# - "if [[ $RES != *\"SUM: 857\"* ]]; then echo \"Wrong result:\" $RES; exit 1; fi;"
TestGLUON:
stage: linux
stage: test
image: registry.git.rwth-aachen.de/monticore/embeddedmontiarc/applications/mnistcalculator/mxnet:v0.0.5
script:
- cd gluon-cpp
- chmod +x build.sh
- ./build.sh
- RES=$(./build/src/cpp/DigitCalculator resources/images/1.png resources/images/2.png resources/images/3.png resources/images/4.png resources/images/5.png resources/images/6.png)
- "if [[ $RES != *\"SUM: 579\"* ]]; then echo \"Wrong result:\" $RES; exit 1; fi;"
when: always
script:
- cd gluon-cpp
- chmod +x build.sh
- ./build.sh
- RES=$(./build/src/cpp/DigitCalculator resources/images/1.png resources/images/2.png resources/images/3.png resources/images/4.png resources/images/5.png resources/images/6.png)
- 'if [[ $RES != *"SUM: 579"* ]]; then echo "Wrong result:" $RES; exit 1; fi;'
TestTENSORFLOW:
stage: linux
stage: test
image: registry.git.rwth-aachen.de/monticore/embeddedmontiarc/applications/mnistcalculator/tensorflow
when: always
script:
- cd tensorflow
- chmod +x build.sh
- ./build.sh
- RES=$(./build/src/cpp/DigitCalculator resources/images/1.png resources/images/2.png resources/images/3.png resources/images/4.png resources/images/5.png resources/images/6.png)
- "if [[ $RES != *\"SUM: 579\"* ]]; then echo \"Wrong result:\" $RES; exit 1; fi;"
- cd tensorflow
- chmod +x build.sh
- ./build.sh
- RES=$(./build/src/cpp/DigitCalculator resources/images/1.png resources/images/2.png resources/images/3.png resources/images/4.png resources/images/5.png resources/images/6.png)
- 'if [[ $RES != *"SUM: 579"* ]]; then echo "Wrong result:" $RES; exit 1; fi;'
TestMnistCalculator-II:
stage: linux
stage: test
image: registry.git.rwth-aachen.de/monticore/embeddedmontiarc/generators/emadl2cpp/dockerimages/mxnet170:v0.0.1
when: always
allow_failure: true
script:
- mv /mxnet/build/libmxnet.so /mxnet/build/libmxnet.a /usr/lib/
- cd mnist-calculator
- mvn streamtest:streamtest-build -s settings.xml
- mv /mxnet/build/libmxnet.so /mxnet/build/libmxnet.a /usr/lib/
- cd mnist-calculator
- mvn streamtest:streamtest-build -s settings.xml
TestEMADLMavenPlugin:
stage: linux
stage: test
image: registry.git.rwth-aachen.de/monticore/embeddedmontiarc/generators/emadl2cpp/dockerimages/mxnet170:v0.0.1
when: always
script:
- mv /mxnet/build/libmxnet.so /mxnet/build/libmxnet.a /usr/lib/
- cd emadl-maven-plugin
- python3 -m pip install -U pip
- python3 -m pip install scikit-image
- python3 -m pip install opencv-python
- mvn dependency:resolve emadl:train -s settings.xml -U
- mv /mxnet/build/libmxnet.so /mxnet/build/libmxnet.a /usr/lib/
- cd emadl-maven-plugin
- python3 -m pip install -U pip
- python3 -m pip install scikit-image
- python3 -m pip install opencv-python
- mvn dependency:resolve emadl:train -s settings.xml -U
- mkdir output
- python3 calculator.py
artifacts:
paths:
- emadl-maven-plugin/output/*
expire_in: 1 week
TestMnistWithAdaNet:
stage: linux
stage: test
image: registry.git.rwth-aachen.de/monticore/embeddedmontiarc/generators/emadl2cpp/dockerimages/mxnet170:v0.0.1
when: always
script:
- mv /mxnet/build/libmxnet.so /mxnet/build/libmxnet.a /usr/lib/
- cd AdaNet
- mvn dependency:resolve emadl:train -s settings.xml
TestMNISTwithCustomLayer:
stage: linux
stage: test
image: registry.git.rwth-aachen.de/monticore/embeddedmontiarc/generators/emadl2cpp/dockerimages/mxnet170:v0.0.1
when: always
script:
- shopt -s expand_aliases
- echo 'alias python='\''/usr/bin/python3'\''' >> ~/.bashrc
- . ~/.bashrc
- cat ~/.bashrc
- echo $PYTHONPATH
- python --version
- python3 --version
- which python
- which python3
- alias python=/usr/bin/python3
- python --version
- python3 --version
- which python
- type -a python
- mv /mxnet/build/libmxnet.so /mxnet/build/libmxnet.a /usr/lib/
- cd mnist-custom-layer
- mvn dependency:resolve emadl:train -s settings.xml
- shopt -s expand_aliases
- echo 'alias python='\''/usr/bin/python3'\''' >> ~/.bashrc
- . ~/.bashrc
- cat ~/.bashrc
- echo $PYTHONPATH
- python --version
- python3 --version
- which python
- which python3
- alias python=/usr/bin/python3
- python --version
- python3 --version
- which python
- type -a python
- mv /mxnet/build/libmxnet.so /mxnet/build/libmxnet.a /usr/lib/
- cd mnist-custom-layer
- mvn dependency:resolve emadl:train -s settings.xml
TrainTensorflowONNX:
stage: linux
stage: test
image: registry.git.rwth-aachen.de/monticore/embeddedmontiarc/generators/emadl2cpp/dockerimages/tensorflow-onnx:latest
artifacts:
paths:
......@@ -104,13 +118,49 @@ TrainTensorflowONNX:
- cd onnx/tensorflow-pretrained
- mvn dependency:resolve emadl:train -s settings.xml
.docker: &docker
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker push $CI_REGISTRY_IMAGE/$IMAGE_NAME:$CI_COMMIT_REF_NAME
- >
if [ "$CI_COMMIT_REF_NAME" == "master" ];
then
docker tag $CI_REGISTRY_IMAGE/$IMAGE_NAME:$CI_COMMIT_REF_NAME $CI_REGISTRY_IMAGE/$IMAGE_NAME:latest
docker push $CI_REGISTRY_IMAGE/$IMAGE_NAME:latest
fi
base/mxnet:
stage: prebuild
image: docker:dind
only:
changes:
- docker/base/*
variables:
IMAGE_NAME: base/mxnet
script:
- cd docker/base
- docker build -t $CI_REGISTRY_IMAGE/$IMAGE_NAME:$CI_COMMIT_REF_NAME .
- *docker
#mnistcalculator:
# stage: build
# image: docker:dind
# variables:
# IMAGE_NAME: mnistcalculator
# script:
# - cd emadl-maven-plugin
# - docker build -t $CI_REGISTRY_IMAGE/$IMAGE_NAME:$CI_COMMIT_REF_NAME --build-arg BASE_IMAGE=$CI_REGISTRY_IMAGE/base/mxnet --build-arg CI_JOB_TOKEN=${CI_JOB_TOKEN} .
# - *docker
TestGluonONNX:
stage: linux
stage: test
image: registry.git.rwth-aachen.de/monticore/embeddedmontiarc/generators/emadl2cpp/dockerimages/mxnet170-onnx:v0.0.1
needs:
- TrainTensorflowONNX
script:
- mv /mxnet/build/libmxnet.so /mxnet/build/libmxnet.a /usr/lib/
- python3 -m pip install -U pip
- python3 -m pip install scikit-image
- python3 -m pip install opencv-python
- cd onnx/tensorflow-pretrained
- python3 -m pip install -U pip
- python3 -m pip install scikit-image
......@@ -122,4 +172,4 @@ TestGluonONNX:
- chmod +x build.sh
- ./build.sh
- RES=$(./build/src/cpp/DigitCalculator resources/images/1.png resources/images/2.png resources/images/3.png resources/images/4.png resources/images/5.png resources/images/6.png)
- "if [[ $RES != *\"SUM: 579\"* ]]; then echo \"Wrong result:\" $RES; exit 1; fi;"
\ No newline at end of file
- "if [[ $RES != *\"SUM: 579\"* ]]; then echo \"Wrong result:\" $RES; exit 1; fi;"
.git
\ No newline at end of file
FROM ubuntu:18.04
SHELL ["/bin/bash", "-c"]
# Install Python, OpenJDK and build dependencies
RUN apt-get update && \
apt-get -y upgrade && \
apt-get -y install python3-pip openjdk-8-jdk g++ wget unzip gfortran maven git ninja-build liblapack-dev && \
rm -rf /var/lib/apt/lists/*
# Create aliases for python and pip
RUN ln -s $(which python3) /usr/bin/python && \
ln -s $(which pip3) /usr/bin/pip
# Change Java version to 8
RUN update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java
# Upgrade pip
RUN python3 -m pip install --upgrade pip
# Install CMake (The version in the package manager is too old...)
WORKDIR /opt
RUN wget https://github.com/Kitware/CMake/releases/download/v3.22.3/cmake-3.22.3-linux-x86_64.sh && \
chmod +x cmake-3.22.3-linux-x86_64.sh && \
mkdir cmake && \
./cmake-3.22.3-linux-x86_64.sh --prefix=cmake --skip-license && \
rm cmake-3.22.3-linux-x86_64.sh && \
ln -s /opt/cmake/bin/* /usr/bin
# Install OpenCV
WORKDIR /opt
RUN wget -O opencv.zip https://github.com/opencv/opencv/archive/4.x.zip && \
unzip opencv.zip && \
rm opencv.zip && \
mkdir -p opencv-4.x/build && \
cd opencv-4.x/build && \
cmake -GNinja .. && \
ninja && \
ninja install
ENV OpenCV_DIR=/opt/opencv-4.x/build/include
# Install OpenBLAS (also available in the package registry, but only an outdated version)
WORKDIR /opt
RUN wget -O openblas.zip https://sourceforge.net/projects/openblas/files/latest/download && \
unzip openblas.zip -d openblas && \
# Manually handle strip-components (unzip does not seem to have such an option)
dir=(/opt/openblas/*) && mv $dir/* $dir/.[!.]* /opt/openblas && rmdir $dir && \
rm openblas.zip
WORKDIR /opt/openblas
RUN make && make install && \
cp libopenblas.so.0 libopenblas.a libopenblas.so /usr/lib
# Install Armadillo
WORKDIR /opt
RUN wget -O armadillo.tar.xz https://sourceforge.net/projects/arma/files/latest/download && \
mkdir armadillo && \
tar xf armadillo.tar.xz -C armadillo --strip-components=1 && \
rm armadillo.tar.xz
WORKDIR /opt/armadillo
RUN cmake . && \
make install
# Install MXNet and H5Py
WORKDIR /opt
COPY mxnet.cmake /tmp/mxnet.cmake
RUN pip3 install mxnet h5py
RUN git clone --recursive https://github.com/apache/incubator-mxnet mxnet && \
cd mxnet && \
git checkout tags/1.9.0 && \
git submodule update --recursive --init && \
mv /tmp/mxnet.cmake config.cmake && \
mkdir build && \
cd build && \
cmake -GNinja .. && \
ninja && \
ninja install && \
cp libmxnet.so libmxnet.a /usr/lib
RUN pip install dgl matplotlib numpy torch
ENV DGLBACKEND=mxnet
ENV JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/jre
\ No newline at end of file
1. Build the image. It takes really long.
```
docker build -t base/mxnet .
```
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#-------------------------------------------------------------------------------
# Template configuration for compiling MXNet
#
# If you want to change the configuration, please use the following steps.
# Assume you are on the root directory of mxnet. First copy this file so that
# any local changes will be ignored by git
#
# $ cp config/linux.cmake config.cmake
#
# Next modify the according entries, and then compile by
#
# $ mkdir build; cd build
# $ cmake ..
# $ cmake --build .
#
# Specify `cmake --build . --parallel N` to set the number of parallel compilation jobs.
# Default is derived from CPUs available.
#
#-------------------------------------------------------------------------------
#---------------------------------------------
# GPU support
#---------------------------------------------
set(USE_CUDA OFF CACHE BOOL "Build with CUDA support")
set(USE_CUDNN OFF CACHE BOOL "Build with cudnn support, if found")
# Target NVIDIA GPU achitecture.
# Valid options are "Auto" for autodetection, "All" for all available
# architectures or a list of architectures by compute capability number, such as
# "7.0" or "7.0;7.5" as well as name, such as "Volta" or "Volta;Turing".
# The value specified here is passed to cmake's CUDA_SELECT_NVCC_ARCH_FLAGS to
# obtain the compilation flags for nvcc.
#
# When compiling on a machine without GPU, autodetection will fail and you
# should instead specify the target architecture manually to avoid excessive
# compilation times.
set(MXNET_CUDA_ARCH "Auto" CACHE STRING "Target NVIDIA GPU achitecture")
#---------------------------------------------
# Common libraries
#---------------------------------------------
set(USE_BLAS "open" CACHE STRING "BLAS Vendor")
set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support")
set(OPENCV_ROOT "" CACHE BOOL "OpenCV install path. Supports autodetection.")
set(USE_OPENMP ON CACHE BOOL "Build with Openmp support")
set(USE_MKL_IF_AVAILABLE ON CACHE BOOL "Use Intel MKL if found")
set(USE_MKLDNN ON CACHE BOOL "Build with MKL-DNN support")
set(USE_LAPACK ON CACHE BOOL "Build with lapack support")
set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.")
#---------------------
# Compilers
#--------------------
set(CMAKE_GENERATOR "Ninja" CACHE STRING "Build Tool Generator used by CMake")
# Compilers are usually autodetected. Uncomment and modify the next 3 lines to
# choose manually:
# set(CMAKE_C_COMPILER "" CACHE BOOL "C compiler")
# set(CMAKE_CXX_COMPILER "" CACHE BOOL "C++ compiler")
# set(CMAKE_CUDA_COMPILER "" CACHE BOOL "Cuda compiler (nvcc)")
# Uncomment the following line to compile with debug information
# set(CMAKE_BUILD_TYPE Debug CACHE STRING "CMake build type")
#---------------------------------------------
# CPU instruction sets: The support is autodetected if turned ON
#---------------------------------------------
set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support")
set(USE_F16C ON CACHE BOOL "Build with x86 F16C instruction support")
#----------------------------
# distributed computing
#----------------------------
set(USE_DIST_KVSTORE OFF CACHE BOOL "Build with DIST_KVSTORE support")
#----------------------------
# performance settings
#----------------------------
set(USE_OPERATOR_TUNING ON CACHE BOOL "Enable auto-tuning of operators")
set(USE_GPERFTOOLS OFF CACHE BOOL "Build with GPerfTools support")
set(USE_JEMALLOC OFF CACHE BOOL "Build with Jemalloc support")
#----------------------------
# additional operators
#----------------------------
# path to folders containing projects specific operators that you don't want to
# put in src/operators
SET(EXTRA_OPERATORS "" CACHE PATH "EXTRA OPERATORS PATH")
#----------------------------
# other features
#----------------------------
# Create C++ interface package
set(USE_CPP_PACKAGE ON CACHE BOOL "Build C++ Package")
# Use int64_t type to represent the total number of elements in a tensor
# This will cause performance degradation reported in issue #14496
# Set to 1 for large tensor with tensor size greater than INT32_MAX i.e. 2147483647
# Note: the size of each dimension is still bounded by INT32_MAX
set(USE_INT64_TENSOR_SIZE OFF CACHE BOOL "Use int64_t to represent the total number of elements in a tensor")
# Other GPU features
set(USE_NCCL "Use NVidia NCCL with CUDA" OFF)
set(NCCL_ROOT "" CACHE BOOL "NCCL install path. Supports autodetection.")
set(ENABLE_CUDA_RTC ON CACHE BOOL "Build with CUDA runtime compilation support")
set(USE_NVTX ON CACHE BOOL "Build with NVTX support")
\ No newline at end of file
ARG BASE_IMAGE=base/mxnet
FROM $BASE_IMAGE
COPY . /build/mnistcalc/emadl-maven-plugin
WORKDIR /build/mnistcalc/emadl-maven-plugin
ARG PRIVATE_TOKEN
ARG CI_JOB_TOKEN
RUN mvn -e dependency:resolve emadl:train -s settings.xml -U
RUN chmod +x calculator.sh
RUN mkdir -p input output
ENTRYPOINT [ "/bin/bash", "calculator.sh" ]
\ No newline at end of file
## Build Dockerimage
1. If you don't have the `mxnet/base` image locally, please navigate to the `docker/base` directory of this repository and build the image first.
1. Create a personal gitlab access token. Only scope `read_api` is needed! https://git.rwth-aachen.de/-/profile/personal_access_tokens
1. Replace the following lines in the file `settings.xml`
```
<server>
<id>gitlab-maven</id>
<configuration>
<httpHeaders>
<property>
<name>Job-Token</name>
<value>${env.CI_JOB_TOKEN}</value>
</property>
</httpHeaders>
</configuration>
</server>
```
with:
```
<server>
<id>gitlab-maven</id>
<configuration>
<httpHeaders>
<property>
<name>Private-Token</name>
<value>${env.PRIVATE_TOKEN}</value>
</property>
</httpHeaders>
</configuration>
</server>
```
1. Build the image
```
docker build -t registry.git.rwth-aachen.de/monticore/embeddedmontiarc/applications/mnistcalculator/mxnet/run --build-arg PRIVATE_TOKEN=${PRIVATE_TOKEN} .
```
Replace `${PRIVATE_TOKEN}` with your generated token.
## Run Dockercontainer
### Use custom input images
1. Create two directories. One for input files and one for output files.
1. If you like to run the calculator with your custom input, please place exactly 6 files in the input directory.
The files should have the format: "[0-9].png", where [0-9] is the label of the image.
1. Start the container:
```
docker run -v $(pwd)/input:/build/mnistcalc/emadl-maven-plugin/input -v $(pwd)/output:/build/mnistcalc/emadl-maven-plugin/output registry.git.rwth-aachen.de/monticore/embeddedmontiarc/applications/mnistcalculator/mxnet/run
```
1. You should see the output in the command line and a file called img.png in your output directory.
### Use random images from test dataset
1. Start the container:
```
docker run registry.git.rwth-aachen.de/monticore/embeddedmontiarc/applications/mnistcalculator/mxnet/run
```
import argparse
import pathlib
import random
import typing as t
import warnings
import h5py
import mxnet as mx
import mxnet.ndarray as nd
import numpy as np
from matplotlib import image
from matplotlib import pyplot as plt
from mxnet import gluon, nd
def load_h5(test_file: pathlib.Path) -> np.ndarray:
test = h5py.File(test_file, "r")
return np.array(test["data"]), np.array(test["softmax_label"])
def save_image(input_img: t.List[np.ndarray], label: t.List[int], prediction) -> None:
# Save the input images
n: int = len(input_img)
f = plt.figure(figsize=(6, 2))
for i in range(n):
# Debug, plot figure
f.add_subplot(1, n, i + 1)
plt.imshow(input_img[i][0])
plt.suptitle(f"\nLabel: {label}\nPrediction: {prediction}")
plt.show(block=True)
plt.savefig("output/img.png")
def get_prediction(net: gluon.block.Block, input, label) -> int:
# Display the predictions
data = mx.nd.array(input)
out = net(data)
predictions = nd.argmax(out, axis=1)
return int(predictions.asnumpy()[0]), int(label[0])
if __name__ == "__main__":
# Use GPU if one exists, else use CPU
ctx = mx.gpu() if mx.context.num_gpus() else mx.cpu()
# load model from saved files in /emadl-maven-plugin/model
with warnings.catch_warnings():
warnings.simplefilter("ignore")
deserialized_net = gluon.nn.SymbolBlock.imports(
"model/calculator.Network/newest-symbol.json",
["data"],
"model/calculator.Network/newest-0000.params",
ctx=ctx,
)
print("\n\t\t------------ Start Prediction ------------\n")
# Init parser
parser = argparse.ArgumentParser()
# Add arguments
parser.add_argument(
"--image_input",
type=str,
required=False,
help="Use own input image for prediction. Pass 6 paths to input images, saved in input_images/ folder named as [0-9].png.",
)
args = parser.parse_args()
if args.image_input:
# get image array and labels from input paths
data = args.image_input.split()
input_list = [np.array([image.imread(path)]) for path in data]
label_list = [np.array([int(pathlib.Path(path).stem)]) for path in data]
digits = []
for i in range(0, 6):
# input format should've shape (1, 28, 28)
digits.append(
get_prediction(deserialized_net, [input_list[i]], [label_list[i]])[0]
)
label_list[i] = label_list[i][0]
num_1 = int(f"{digits[0]}{digits[1]}{digits[2]}")
num_2 = int(f"{digits[3]}{digits[4]}{digits[5]}")
else:
# default method
_data_dir = (
pathlib.Path().home()