Commit a9103600 authored by Svetlana's avatar Svetlana

Merge branch 'develop' of git.rwth-aachen.de:autonomousdriving/torcs_dl into develop

parents dc46cbcf eeda3474
target/ generated/
output/ out/
.idea/ .idea/
.git .git
*.iml *.iml
\ No newline at end of file
#!/usr/bin/env bash
echo "Generating files.."
java -jar embedded-montiarc-emadl-generator-0.2.1-SNAPSHOT-jar-with-dependencies.jar -m src/models -r Dpnet -o generated
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>de.monticore.lang.monticar</groupId>
<artifactId>cnn-model-training</artifactId>
<version>1.0-SNAPSHOT</version>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.3</version>
<configuration>
<useIncrementalCompilation>true</useIncrementalCompilation>
<source>1.8</source>
<target>1.8</target>
</configuration>
</plugin>
</plugins>
</build>
<dependencies>
<dependency>
<groupId>de.monticore.lang.monticar</groupId>
<artifactId>embedded-montiarc-emadl-generator</artifactId>
<version>0.2.1-SNAPSHOT</version>
</dependency>
</dependencies>
</project>
package de.monticore.lang.monticar.torcs_dl;
import de.monticore.lang.monticar.emadl.generator.EMADLGenerator;
import freemarker.template.TemplateException;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
public class CNNCodeGenerator {
public static final String TARGET_PATH_GENERATED = "./target/generated-sources-cnn-model-training/";
public static final String MODELS_PATH = "src/main/models/";
public static void main(String[] args) {
System.out.println("Starting code generation...");
EMADLGenerator gen = new EMADLGenerator();
gen.setGenerationTargetPath(TARGET_PATH_GENERATED);
try {
gen.generate(MODELS_PATH, "Dpnet");
} catch (IOException | TemplateException e) {
e.printStackTrace();
}
}
}
...@@ -2,10 +2,13 @@ configuration Dpnet{ ...@@ -2,10 +2,13 @@ configuration Dpnet{
num_epoch : 100 num_epoch : 100
batch_size : 64 batch_size : 64
context:cpu context:cpu
normalize: false normalize: true
optimizer : sgd{ optimizer : sgd{
learning_rate : 0.01
weight_decay : 0.0005 weight_decay : 0.0005
// reduce the learning rate starting from 0.01 every 8000 iterations by a factor of 0.9 (decrease by 10%)
learning_rate_decay: 0.9
step_size: 8000
learning_rate_minimum: 0.01
} }
} }
...@@ -4,6 +4,7 @@ import csv ...@@ -4,6 +4,7 @@ import csv
import cv2 import cv2
import datetime import datetime
import h5py import h5py
import matplotlib.pyplot as plt
import mxnet as mx import mxnet as mx
import numpy as np import numpy as np
from PIL import Image from PIL import Image
...@@ -12,6 +13,9 @@ from sklearn.cross_validation import train_test_split ...@@ -12,6 +13,9 @@ from sklearn.cross_validation import train_test_split
import tarfile import tarfile
import os import os
TEST_REC = "torcs_test.rec"
TRAIN_REC = "torcs_train.rec"
ARCHIVE = False ARCHIVE = False
CHUNK_SIZE = 10000 CHUNK_SIZE = 10000
LEVELDB_PATH = "/media/sveta/4991e634-dd81-4cb9-bf46-2fa9c7159263/TORCS_Training_1F" LEVELDB_PATH = "/media/sveta/4991e634-dd81-4cb9-bf46-2fa9c7159263/TORCS_Training_1F"
...@@ -19,34 +23,63 @@ HDF5_PATH = "/media/sveta/4991e634-dd81-4cb9-bf46-2fa9c7159263/TORCS_HDF5_3/" ...@@ -19,34 +23,63 @@ HDF5_PATH = "/media/sveta/4991e634-dd81-4cb9-bf46-2fa9c7159263/TORCS_HDF5_3/"
RAW_PATH = "/media/sveta/4991e634-dd81-4cb9-bf46-2fa9c7159263/TORCS_raw/" RAW_PATH = "/media/sveta/4991e634-dd81-4cb9-bf46-2fa9c7159263/TORCS_raw/"
EXAMPLES_PATH = "/media/sveta/4991e634-dd81-4cb9-bf46-2fa9c7159263/TORCS_examples/" EXAMPLES_PATH = "/media/sveta/4991e634-dd81-4cb9-bf46-2fa9c7159263/TORCS_examples/"
TRAIN_MEAN = [99.39394537, 110.60877108, 117.86127587]
TRAIN_STD = [42.04910545, 49.47874084, 62.61726178]
def main(): def main():
start_date = datetime.datetime.now() start_date = datetime.datetime.now()
# leveldb_to_rec(start_date) # leveldb_to_rec(start_date)
read_from_recordio() # read_from_recordio()
# compute_train_mean() # compute_train_mean()
test_normalization()
def compute_train_mean(): def compute_train_mean():
record = mx.recordio.MXRecordIO(RAW_PATH + "torcs_train.rec", "r") record = mx.recordio.MXRecordIO(RAW_PATH + TRAIN_REC, "r")
img_stats = [] all_means = list()
for i in range(50): all_std = list()
for i in range(387851):
if i % 1000 == 0:
print(i)
item = record.read() item = record.read()
header, img_as_array = mx.recordio.unpack_img(item) header, img_as_array = mx.recordio.unpack_img(item)
# img is RGB of shape (210, 280, 3) # img is RGB of shape (210, 280, 3)
# img = Image.fromarray(img_as_array)
# img.show()
mean, std = cv2.meanStdDev(img_as_array) mean, std = cv2.meanStdDev(img_as_array)
mean_as_img = Image.fromarray(mean) all_means.append(mean)
mean_as_img.show() all_std.append(std)
img_stats.append(np.array([mean[::-1] / 255, std[::-1] / 255])) # img_stats.append(np.array([mean[::-1] / 255, std[::-1] / 255]))
img_stats = np.mean(img_stats, axis=0) mean = np.mean(all_means, axis=0)
print(img_stats) print("MEAN")
print(mean)
std = np.mean(all_std, axis=0)
print("STD")
print(std)
def test_normalization():
width = 280
height = 210
data_mean = np.asarray([[[a] * width] * height for a in TRAIN_MEAN]) # (3, 280, 210)
data_std = np.asarray([[[a] * width] * height for a in TRAIN_STD])
record = mx.recordio.MXRecordIO(RAW_PATH + TRAIN_REC, "r")
item = record.read()
header, img_as_array = mx.recordio.unpack_img(item) # (210, 280,3)
img = Image.fromarray(img_as_array)
img.show()
normalized_img = (img_as_array - np.transpose(data_mean, (1, 2, 0)))/np.transpose(data_std, (1, 2, 0))
plt.matshow(normalized_img[:,:,0])
plt.show()
plt.matshow(normalized_img[:,:,1])
plt.show()
plt.matshow(normalized_img[:,:,2])
plt.show()
def read_from_recordio(): def read_from_recordio():
record = mx.recordio.MXRecordIO(RAW_PATH + "torcs_train.rec", "r") record = mx.recordio.MXRecordIO(RAW_PATH + TRAIN_REC, "r")
for i in range(50): for i in range(50):
item = record.read() item = record.read()
header, img = mx.recordio.unpack_img(item) header, img = mx.recordio.unpack_img(item)
...@@ -60,8 +93,8 @@ def read_from_recordio(): ...@@ -60,8 +93,8 @@ def read_from_recordio():
def leveldb_to_rec(start_date): def leveldb_to_rec(start_date):
train_record = mx.recordio.MXRecordIO(RAW_PATH + "torcs_train.rec", "w") train_record = mx.recordio.MXRecordIO(RAW_PATH + TRAIN_REC, "w")
test_record = mx.recordio.MXRecordIO(RAW_PATH + "torcs_test.rec", "w") test_record = mx.recordio.MXRecordIO(RAW_PATH + TEST_REC, "w")
keys = range(1, 484815) keys = range(1, 484815)
train_keys, test_keys = train_test_split(keys,test_size=0.2) train_keys, test_keys = train_test_split(keys,test_size=0.2)
......
...@@ -84,7 +84,7 @@ class CNNCreator_dpnet: ...@@ -84,7 +84,7 @@ class CNNCreator_dpnet:
def load_h5_files(self): def load_h5_files(self):
train_h5 = None train_h5 = None
test_h5 = None test_h5 = None
train_path = self._data_dir_ + "train_2.h5" train_path = self._data_dir_ + "train.h5"
test_path = self._data_dir_ + "test.h5" test_path = self._data_dir_ + "test.h5"
if os.path.isfile(train_path): if os.path.isfile(train_path):
train_h5 = h5py.File(train_path, 'r') train_h5 = h5py.File(train_path, 'r')
...@@ -113,7 +113,7 @@ class CNNCreator_dpnet: ...@@ -113,7 +113,7 @@ class CNNCreator_dpnet:
optimizer_params=(('learning_rate', 0.001),), optimizer_params=(('learning_rate', 0.001),),
load_checkpoint=True, load_checkpoint=True,
context='gpu', context='gpu',
checkpoint_period=1, checkpoint_period=5,
normalize=True): normalize=True):
if context == 'gpu': if context == 'gpu':
mx_context = mx.gpu() mx_context = mx.gpu()
...@@ -161,8 +161,8 @@ class CNNCreator_dpnet: ...@@ -161,8 +161,8 @@ class CNNCreator_dpnet:
self.module.fit( self.module.fit(
train_data=train_iter, train_data=train_iter,
eval_metric='mse',
eval_data=test_iter, eval_data=test_iter,
eval_metric='mse',
optimizer=optimizer, optimizer=optimizer,
optimizer_params=optimizer_params, optimizer_params=optimizer_params,
batch_end_callback=mx.callback.Speedometer(batch_size), batch_end_callback=mx.callback.Speedometer(batch_size),
...@@ -334,8 +334,3 @@ class CNNCreator_dpnet: ...@@ -334,8 +334,3 @@ class CNNCreator_dpnet:
data_names=self._input_names_, data_names=self._input_names_,
label_names=self._output_names_, label_names=self._output_names_,
context=context) context=context)
# print("start viz")
# graph = mx.viz.plot_network(predictions, shape={'image':(0,3,210,280), 'predictions_label':(0,14)}, node_attrs={"shape":'rect',"fixedsize":'false'})
# # graph.format = 'png'
# graph.render('graph')
...@@ -5,18 +5,23 @@ import CNNCreator_dpnet ...@@ -5,18 +5,23 @@ import CNNCreator_dpnet
if __name__ == "__main__": if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG) logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger() logger = logging.getLogger()
handler = logging.FileHandler("train.log","w+", encoding=None, delay="true") handler = logging.FileHandler("train.log","w", encoding=None, delay="true")
logger.addHandler(handler) logger.addHandler(handler)
dpnet = CNNCreator_dpnet.CNNCreator_dpnet() dpnet = CNNCreator_dpnet.CNNCreator_dpnet()
dpnet.train( dpnet.train(
batch_size = 64, batch_size = 64,
num_epoch = 10, num_epoch = 100,
context = 'cpu', context = 'cpu',
normalize = False, normalize = True,
optimizer = 'sgd', optimizer = 'sgd',
optimizer_params = { optimizer_params = {
'weight_decay': 5.0E-4, 'weight_decay': 5.0E-4,
'learning_rate': 0.01} 'learning_rate_minimum': 0.01,
'learning_rate_decay': 0.9,
'step_size': 8000}
) )
import mxnet as mx import mxnet as mx
import numpy as np
TRAIN_MEAN = [99.39394537, 110.60877108, 117.86127587]
TRAIN_STD = [42.04910545, 49.47874084, 62.61726178]
def load_data_rec(self, batch_size): def load_data_rec(self, batch_size):
width = 280
height = 210
train_iter = mx.image.ImageIter( train_iter = mx.image.ImageIter(
path_imgrec=self._data_dir_ + "torcs_train.rec", path_imgrec=self._data_dir_ + "torcs_train.rec",
data_shape=(3, 210, 280), # (channels, height, width) data_shape=(3, height, width), # (channels, height, width)
batch_size=batch_size, batch_size=batch_size,
label_width=14, label_width=14,
data_name='image', data_name='image',
...@@ -13,14 +19,14 @@ def load_data_rec(self, batch_size): ...@@ -13,14 +19,14 @@ def load_data_rec(self, batch_size):
) )
test_iter = mx.image.ImageIter( test_iter = mx.image.ImageIter(
path_imgrec=self._data_dir_ + "torcs_test.rec", path_imgrec=self._data_dir_ + "torcs_test.rec",
data_shape=(3, 210, 280), # (channels, height, width) data_shape=(3, height, width), # (channels, height, width)
batch_size=batch_size, batch_size=batch_size,
label_width=14, label_width=14,
data_name='image', data_name='image',
label_name='predictions_label' label_name='predictions_label'
) )
data_mean = None
data_std = None
data_mean = np.asarray([[[a] * width] * height for a in TRAIN_MEAN])
data_std = np.asarray([[[a] * width] * height for a in TRAIN_STD])
return train_iter, test_iter, data_mean, data_std return train_iter, test_iter, data_mean, data_std
mxnet
h5py
\ No newline at end of file
mxnet-cu75
h5py
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment