Complete example

parent 0211902e
train.log
target/
data/vGG16/mnist.pkl.gz
data/vGG16/st.h5
resources/training_data
model/
build/
\ No newline at end of file
rm -r target/;
java -jar embedded-montiarc-emadl-generator-0.2.5-SNAPSHOT-jar-with-dependencies.jar -m src/emadl/models/ -r cNNCalculator.Connector -o target -b MXNET -p /home/christopher/anaconda3/bin/python
rm -rf build
mkdir build && cd build
echo "Building DigitClassifier.."
cmake ..
cp -r /home/christopher/Documents/Master/Semester2/Praktikum/mxnet/include/mxnet/ ../target/
#cp -r /home/christopher/Documents/Master/Semester2/Praktikum/mxnet/include/mxnet/ ../target/
make
\ No newline at end of file
echo "Creating HDF5 dataset from image files.."
cd resources/
wget https://git.rwth-aachen.de/thomas.timmermanns/EMADL-Demo/raw/master/src/resources/data.tar.gz
tar xf data.tar.gz
rm data.tar.gz
python imgDir-to-h5.py --in_port data --out_port softmax --data_path data --target_path training_data
rm -r data
\ No newline at end of file
from __future__ import print_function
from __future__ import division
import h5py
import numpy as np
import cv2
import os
import argparse
import errno
import random
import sys
def create_img_list(name, data_path):
dir_name = data_path + "/" + name
image_paths = []
image_class_indices = []
print(dir_name)
for class_index_name in os.listdir(dir_name):
class_dir_path = dir_name + "/" + class_index_name
if os.path.isdir(class_dir_path):
for image_name in os.listdir(class_dir_path):
image_path = class_dir_path + "/" + image_name
image_paths.append(image_path)
class_index = float(class_index_name)
image_class_indices.append(class_index)
return image_paths, image_class_indices
def create_h5_from_list(image_paths, image_class_indices, target_dir, target_file_name, input_port_name, output_port_name, shuffle=True):
img = cv2.imread(image_paths[0])
t_img = np.transpose(img, (2,0,1)).astype(np.float32)
#t_img = t_img[-1:,:,:]
#print(t_img)
channels = t_img.shape[0]
height = t_img.shape[1]
width = t_img.shape[2]
data_size = len(image_paths)
target_file = target_dir + "/" + target_file_name + ".h5"
if os.path.isfile(target_file):
print("File", target_file, "already exists. Skipping data file creation.")
return
try:
os.makedirs(target_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if shuffle:
combined = list(zip(image_paths, image_class_indices))
random.shuffle(combined)
image_paths[:], image_class_indices[:] = zip(*combined)
print("Creating " + target_file + " (images:" + str(data_size) + ", channels:" + str(channels) + ", height:" + str(height) + ", width:" + str(width) + "):")
with h5py.File(target_file, "w") as ofile:
in_dset = ofile.create_dataset(input_port_name, (data_size,channels, height, width), dtype=np.float32)
out_dset = ofile.create_dataset(output_port_name + "_label", (data_size,), dtype=np.float32)
for i in range(data_size):
img = cv2.imread(image_paths[i])
t_img = np.transpose(img, (2,0,1)).astype(np.float32)
#t_img = t_img[-1:,:,:]
in_dset[i] = t_img
out_dset[i] = image_class_indices[i]
#print progress
if i % 100 == 0:
percentage = 100*i / data_size
sys.stdout.write("\r{:0.1f}%".format(percentage))
sys.stdout.flush()
sys.stdout.write("\r100.0%\n")
sys.stdout.flush()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Translate image directories into hdf5 training sets for EMADL.')
parser.add_argument("--in_port", action="store", dest="in_port", default="data")
parser.add_argument("--out_port", action="store", dest="out_port", default="softmax")
parser.add_argument("--data_path", action="store", dest="data_path", default=".")
parser.add_argument("--target_path", action="store", dest="target_path", default=".")
args = parser.parse_args()
for file_name in os.listdir(args.data_path):
if file_name == "train":
image_paths, image_class_indices = create_img_list(file_name, args.data_path)
create_h5_from_list(image_paths, image_class_indices, args.target_path, file_name, args.in_port, args.out_port)
if file_name == "test":
image_paths, image_class_indices = create_img_list(file_name, args.data_path)
create_h5_from_list(image_paths, image_class_indices, args.target_path, file_name, args.in_port, args.out_port)
mkdir 0
mv *-num0.png 0/.
mkdir 1
mv *-num1.png 1/.
mkdir 2
mv *-num2.png 2/.
mkdir 3
mv *-num3.png 3/.
mkdir 4
mv *-num4.png 4/.
mkdir 5
mv *-num5.png 5/.
mkdir 6
mv *-num6.png 6/.
mkdir 7
mv *-num7.png 7/.
mkdir 8
mv *-num8.png 8/.
mkdir 9
mv *-num9.png 9/.
......@@ -10,52 +10,63 @@
#include <map>
int main(int argc, char* argv[]) {
if(argc < 2){ //Note: argc=1 if no arguments are provided
std::cout << "Missing argument: Path to test image must be provided " << std::endl;
if(argc < 7){ //Note: argc=1 if no arguments are provided
std::cout << "Missing argument: Path to 6 test images must be provided " << std::endl;
exit(1);
}
std::string filePath = argv[1];
if (!std::ifstream(filePath).good()) {
std::cerr << "Image loading failure, test image '" << filePath << "' does not exist." << std::endl;
exit(1);
}
cNNCalculator_connector connector;
connector.init();
for(int n = 0; n < 6; n++) {
std::string filePath = argv[n+1];
cv::Mat img = cv::imread(filePath);
std::cout << "== original image size: " << img.size() << " ==" << std::endl;
if (!std::ifstream(filePath).good()) {
std::cerr << "Image loading failure, test image '" << filePath << "' does not exist." << std::endl;
exit(1);
}
// scale image to fit
cv::Size scale(28,28);
cv::resize(img, img, scale);
std::cout << "== simply resize: " << img.size() << " ==" << std::endl;
cv::Mat img = cv::imread(filePath);
std::cout << "== original image size: " << img.size() << " ==" << std::endl;
size_t channels = 1;
size_t height = img.rows;
size_t width = img.cols;
vector<float> data(channels*height*width);
// scale image to fit
cv::Size scale(32,32);
cv::resize(img, img, scale);
std::cout << "== simply resize: " << img.size() << " ==" << std::endl;
for(size_t j=0; j<height; j++){
for(size_t k=0; k<width; k++){
cv::Vec3b intensity = img.at<cv::Vec3b>(j, k);
for(size_t i=0; i<channels; i++){
data[i*height*width + j*height + k] = (float) intensity[i];
size_t channels = 3;
size_t height = img.rows;
size_t width = img.cols;
vector<float> data(channels*height*width);
for(size_t j=0; j<height; j++){
for(size_t k=0; k<width; k++){
cv::Vec3b intensity = img.at<cv::Vec3b>(j, k);
for(size_t i=0; i<channels; i++){
data[i*height*width + j*height + k] = (float) intensity[i];
}
}
}
if(n == 0)
connector.image1 = conv_to< icube >::from( CNNTranslator::translateToCube(data, vector<size_t> {channels,height,width}) );
if(n == 1)
connector.image2 = conv_to< icube >::from( CNNTranslator::translateToCube(data, vector<size_t> {channels,height,width}) );
if(n == 2)
connector.image3 = conv_to< icube >::from( CNNTranslator::translateToCube(data, vector<size_t> {channels,height,width}) );
if(n == 3)
connector.image4 = conv_to< icube >::from( CNNTranslator::translateToCube(data, vector<size_t> {channels,height,width}) );
if(n == 4)
connector.image5 = conv_to< icube >::from( CNNTranslator::translateToCube(data, vector<size_t> {channels,height,width}) );
if(n == 5)
connector.image6 = conv_to< icube >::from( CNNTranslator::translateToCube(data, vector<size_t> {channels,height,width}) );
}
cNNCalculator_connector connector;
connector.init();
connector.image1 = conv_to< icube >::from( CNNTranslator::translateToCube(data, vector<size_t> {channels,height,width}) );
connector.image2 = conv_to< icube >::from( CNNTranslator::translateToCube(data, vector<size_t> {channels,height,width}) );
connector.image3 = conv_to< icube >::from( CNNTranslator::translateToCube(data, vector<size_t> {channels,height,width}) );
connector.image4 = conv_to< icube >::from( CNNTranslator::translateToCube(data, vector<size_t> {channels,height,width}) );
connector.image5 = conv_to< icube >::from( CNNTranslator::translateToCube(data, vector<size_t> {channels,height,width}) );
connector.image6 = conv_to< icube >::from( CNNTranslator::translateToCube(data, vector<size_t> {channels,height,width}) );
connector.execute();
int classIndex = (int)connector.res;
std::cout << "== SUM: " << classIndex << std::endl;
return 0;
}
......@@ -22,12 +22,12 @@ component Calculator {
instance Add add;
connect in1_1 -> number1_ones.inputVector;
connect in1_1 -> number1_hundreds.inputVector;
connect in1_2 -> number1_tens.inputVector;
connect in1_3 -> number1_hundreds.inputVector;
connect in2_1 -> number2_ones.inputVector;
connect in1_3 -> number1_ones.inputVector;
connect in2_1 -> number2_hundreds.inputVector;
connect in2_2 -> number2_tens.inputVector;
connect in2_3 -> number2_hundreds.inputVector;
connect in2_3 -> number2_ones.inputVector;
connect number1_ones.maxIndex -> number1.ones;
connect number1_tens.maxIndex -> number1.tens;
......
......@@ -9,27 +9,29 @@ component Connector {
in Z(0:255)^{1, 28, 28} image6,
out Z(0:1998) res;
instance VGG16 predictor1;
instance VGG16 predictor2;
instance VGG16 predictor3;
instance VGG16 predictor4;
instance VGG16 predictor5;
instance VGG16 predictor6;
instance Network<10> predictor1;
instance Network<10> predictor2;
instance Network<10> predictor3;
instance Network<10> predictor4;
instance Network<10> predictor5;
instance Network<10> predictor6;
instance Calculator cal;
connect image1 -> predictor1.image;
connect image2 -> predictor2.image;
connect image3 -> predictor3.image;
connect image4 -> predictor4.image;
connect image5 -> predictor5.image;
connect image6 -> predictor6.image;
instance ArgMax<10> maxi;
connect predictor1.predictions -> cal.in1_1;
connect predictor2.predictions -> cal.in1_2;
connect predictor3.predictions -> cal.in1_3;
connect predictor4.predictions -> cal.in2_1;
connect predictor5.predictions -> cal.in2_2;
connect predictor6.predictions -> cal.in2_3;
connect image1 -> predictor1.data;
connect image2 -> predictor2.data;
connect image3 -> predictor3.data;
connect image4 -> predictor4.data;
connect image5 -> predictor5.data;
connect image6 -> predictor6.data;
connect predictor1.softmax -> cal.in1_1;
connect predictor2.softmax -> cal.in1_2;
connect predictor3.softmax -> cal.in1_3;
connect predictor4.softmax -> cal.in2_1;
connect predictor5.softmax -> cal.in2_2;
connect predictor6.softmax -> cal.in2_3;
connect cal.out1 -> res;
......
configuration Network{
num_epoch:5
batch_size:100
normalize:true
context:gpu
load_checkpoint:false
optimizer:sgd{
learning_rate:0.2
learning_rate_decay:0.85
step_size:1000
weight_decay:0.0
}
}
package cNNCalculator;
component Network<Z(2:oo) classes = 10>{
ports in Z(0:255)^{3, 32, 32} data,
out Q(0:1)^{classes} softmax;
implementation CNN {
def conv(channels, kernel=1, stride=1){
Convolution(kernel=(kernel,kernel),channels=channels) ->
Relu() ->
Pooling(pool_type="max", kernel=(2,2), stride=(stride,stride))
}
data ->
conv(kernel=5, channels=20, stride=2) ->
conv(kernel=5, channels=50, stride=2) ->
FullyConnected(units=500) ->
Relu() ->
Dropout() ->
FullyConnected(units=classes) ->
Softmax() ->
softmax
}
}
cNNCalculator.VGG16 src/resources/data/vGG16
cNNCalculator.Network resources/training_data
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment