Commit f0cebd01 authored by Svetlana Pavlitskaya's avatar Svetlana Pavlitskaya

Correct (un)normalization of affordance indicators. Added generated CNN code...

Correct (un)normalization of affordance indicators. Added generated CNN code with scripts for data collection.
parent 8a9afe13
model/
__pycache__/
venv/
.idea/
.git
*.iml
import mxnet as mx
import logging
import os
import errno
import shutil
import h5py
import sys
import numpy as np
@mx.init.register
class MyConstant(mx.init.Initializer):
def __init__(self, value):
super(MyConstant, self).__init__(value=value)
self.value = value
def _init_weight(self, _, arr):
arr[:] = mx.nd.array(self.value)
class CNNCreator_dpnet:
module = None
_data_dir_ = "/media/sveta/4991e634-dd81-4cb9-bf46-2fa9c7159263/TORCS_HDF5/"
_model_dir_ = "model/dpnet/"
_model_prefix_ = "dpnet"
_input_names_ = ['image']
_input_shapes_ = [(3,210,280)]
_output_names_ = ['predictions_label']
def load(self, context):
lastEpoch = 0
param_file = None
try:
os.remove(self._model_dir_ + self._model_prefix_ + "_newest-0000.params")
except OSError:
pass
try:
os.remove(self._model_dir_ + self._model_prefix_ + "_newest-symbol.json")
except OSError:
pass
if os.path.isdir(self._model_dir_):
for file in os.listdir(self._model_dir_):
if ".params" in file and self._model_prefix_ in file:
epochStr = file.replace(".params","").replace(self._model_prefix_ + "-","")
epoch = int(epochStr)
if epoch > lastEpoch:
lastEpoch = epoch
param_file = file
if param_file is None:
return 0
else:
logging.info("Loading checkpoint: " + param_file)
self.module.load(prefix=self._model_dir_ + self._model_prefix_,
epoch=lastEpoch,
data_names=self._input_names_,
label_names=self._output_names_,
context=context)
return lastEpoch
def load_data(self, batch_size):
train_h5, test_h5 = self.load_h5_files()
data_mean = train_h5[self._input_names_[0]][:].mean(axis=0)
data_std = train_h5[self._input_names_[0]][:].std(axis=0) + 1e-5
train_iter = mx.io.NDArrayIter(train_h5[self._input_names_[0]],
train_h5[self._output_names_[0]],
batch_size=batch_size,
data_name=self._input_names_[0],
label_name=self._output_names_[0])
test_iter = None
if test_h5 != None:
test_iter = mx.io.NDArrayIter(test_h5[self._input_names_[0]],
test_h5[self._output_names_[0]],
batch_size=batch_size,
data_name=self._input_names_[0],
label_name=self._output_names_[0])
return train_iter, test_iter, data_mean, data_std
def load_h5_files(self):
train_h5 = None
test_h5 = None
train_path = self._data_dir_ + "train_1.h5"
test_path = self._data_dir_ + "test.h5"
if os.path.isfile(train_path):
train_h5 = h5py.File(train_path, 'r')
if not (self._input_names_[0] in train_h5 and self._output_names_[0] in train_h5):
logging.error("The HDF5 file '" + os.path.abspath(train_path) + "' has to contain the datasets: "
+ "'" + self._input_names_[0] + "', '" + self._output_names_[0] + "'")
sys.exit(1)
test_iter = None
if os.path.isfile(test_path):
test_h5 = h5py.File(test_path, 'r')
if not (self._input_names_[0] in test_h5 and self._output_names_[0] in test_h5):
logging.error("The HDF5 file '" + os.path.abspath(test_path) + "' has to contain the datasets: "
+ "'" + self._input_names_[0] + "', '" + self._output_names_[0] + "'")
sys.exit(1)
else:
logging.warning("Couldn't load test set. File '" + os.path.abspath(test_path) + "' does not exist.")
return train_h5, test_h5
else:
logging.error("Data loading failure. File '" + os.path.abspath(train_path) + "' does not exist.")
sys.exit(1)
def train(self, batch_size,
num_epoch=10,
optimizer='sgd',
optimizer_params=(('learning_rate', 0.01),),
load_checkpoint=True,
context=mx.cpu(),
checkpoint_period=5,
normalize=True):
if 'weight_decay' in optimizer_params:
optimizer_params['wd'] = optimizer_params['weight_decay']
del optimizer_params['weight_decay']
if 'learning_rate_decay' in optimizer_params:
min_learning_rate = 1e-08
if 'learning_rate_minimum' in optimizer_params:
min_learning_rate = optimizer_params['learning_rate_minimum']
del optimizer_params['learning_rate_minimum']
optimizer_params['lr_scheduler'] = mx.lr_scheduler.FactorScheduler(
optimizer_params['step_size'],
factor=optimizer_params['learning_rate_decay'],
stop_factor_lr=min_learning_rate)
del optimizer_params['step_size']
del optimizer_params['learning_rate_decay']
train_iter, test_iter, data_mean, data_std = self.load_data(batch_size)
if self.module == None:
if normalize:
self.construct(context, data_mean, data_std)
else:
self.construct(context)
begin_epoch = 0
if load_checkpoint:
begin_epoch = self.load(context)
else:
if os.path.isdir(self._model_dir_):
shutil.rmtree(self._model_dir_)
try:
os.makedirs(self._model_dir_)
except OSError:
if not os.path.isdir(self._model_dir_):
raise
self.module.fit(
train_data=train_iter,
eval_data=test_iter,
optimizer=optimizer,
optimizer_params=optimizer_params,
batch_end_callback=mx.callback.Speedometer(batch_size),
epoch_end_callback=mx.callback.do_checkpoint(prefix=self._model_dir_ + self._model_prefix_, period=checkpoint_period),
begin_epoch=begin_epoch,
num_epoch=num_epoch + begin_epoch)
self.module.save_checkpoint(self._model_dir_ + self._model_prefix_, num_epoch + begin_epoch)
self.module.save_checkpoint(self._model_dir_ + self._model_prefix_ + '_newest', 0)
def construct(self, context, data_mean=None, data_std=None):
image = mx.sym.var("image",
shape=(0,3,210,280))
# image, output shape: {[3,210,280]}
if not data_mean is None:
assert(not data_std is None)
_data_mean_ = mx.sym.Variable("_data_mean_", shape=(3,210,280), init=MyConstant(value=data_mean.tolist()))
_data_mean_ = mx.sym.BlockGrad(_data_mean_)
_data_std_ = mx.sym.Variable("_data_std_", shape=(3,210,280), init=MyConstant(value=data_mean.tolist()))
_data_std_ = mx.sym.BlockGrad(_data_std_)
image = mx.symbol.broadcast_sub(image, _data_mean_)
image = mx.symbol.broadcast_div(image, _data_std_)
conv1_ = mx.symbol.pad(data=image,
mode='constant',
pad_width=(0,0,0,0,5,4,4,3),
constant_value=0)
conv1_ = mx.symbol.Convolution(data=conv1_,
kernel=(11,11),
stride=(4,4),
num_filter=96,
no_bias=False,
name="conv1_")
# conv1_, output shape: {[96,53,70]}
relu1_ = mx.symbol.Activation(data=conv1_,
act_type='relu',
name="relu1_")
pool1_ = mx.symbol.pad(data=relu1_,
mode='constant',
pad_width=(0,0,0,0,1,1,1,0),
constant_value=0)
pool1_ = mx.symbol.Pooling(data=pool1_,
kernel=(3,3),
pool_type="max",
stride=(2,2),
name="pool1_")
# pool1_, output shape: {[96,27,35]}
conv2_ = mx.symbol.pad(data=pool1_,
mode='constant',
pad_width=(0,0,0,0,1,1,1,1),
constant_value=0)
conv2_ = mx.symbol.Convolution(data=conv2_,
kernel=(5,5),
stride=(4,4),
num_filter=256,
no_bias=False,
name="conv2_")
# conv2_, output shape: {[256,7,9]}
relu2_ = mx.symbol.Activation(data=conv2_,
act_type='relu',
name="relu2_")
pool2_ = mx.symbol.pad(data=relu2_,
mode='constant',
pad_width=(0,0,0,0,1,1,1,1),
constant_value=0)
pool2_ = mx.symbol.Pooling(data=pool2_,
kernel=(3,3),
pool_type="max",
stride=(2,2),
name="pool2_")
# pool2_, output shape: {[256,4,5]}
conv3_ = mx.symbol.pad(data=pool2_,
mode='constant',
pad_width=(0,0,0,0,1,1,1,1),
constant_value=0)
conv3_ = mx.symbol.Convolution(data=conv3_,
kernel=(3,3),
stride=(1,1),
num_filter=384,
no_bias=False,
name="conv3_")
# conv3_, output shape: {[384,4,5]}
relu3_ = mx.symbol.Activation(data=conv3_,
act_type='relu',
name="relu3_")
conv4_ = mx.symbol.pad(data=relu3_,
mode='constant',
pad_width=(0,0,0,0,1,1,1,1),
constant_value=0)
conv4_ = mx.symbol.Convolution(data=conv4_,
kernel=(3,3),
stride=(1,1),
num_filter=384,
no_bias=False,
name="conv4_")
# conv4_, output shape: {[384,4,5]}
relu4_ = mx.symbol.Activation(data=conv4_,
act_type='relu',
name="relu4_")
conv5_ = mx.symbol.pad(data=relu4_,
mode='constant',
pad_width=(0,0,0,0,1,1,1,1),
constant_value=0)
conv5_ = mx.symbol.Convolution(data=conv5_,
kernel=(3,3),
stride=(1,1),
num_filter=256,
no_bias=False,
name="conv5_")
# conv5_, output shape: {[256,4,5]}
relu5_ = mx.symbol.Activation(data=conv5_,
act_type='relu',
name="relu5_")
pool5_ = mx.symbol.pad(data=relu5_,
mode='constant',
pad_width=(0,0,0,0,1,0,1,1),
constant_value=0)
pool5_ = mx.symbol.Pooling(data=pool5_,
kernel=(3,3),
pool_type="max",
stride=(2,2),
name="pool5_")
# pool5_, output shape: {[256,2,3]}
fc5_ = mx.symbol.flatten(data=pool5_)
fc5_ = mx.symbol.FullyConnected(data=fc5_,
num_hidden=4096,
no_bias=False,
name="fc5_")
relu6_ = mx.symbol.Activation(data=fc5_,
act_type='relu',
name="relu6_")
dropout6_ = mx.symbol.Dropout(data=relu6_,
p=0.5,
name="dropout6_")
fc6_ = mx.symbol.FullyConnected(data=dropout6_,
num_hidden=4096,
no_bias=False,
name="fc6_")
relu7_ = mx.symbol.Activation(data=fc6_,
act_type='relu',
name="relu7_")
dropout7_ = mx.symbol.Dropout(data=relu7_,
p=0.5,
name="dropout7_")
fc7_ = mx.symbol.FullyConnected(data=dropout7_,
num_hidden=13,
no_bias=False,
name="fc7_")
predictions = mx.symbol.LinearRegressionOutput(data=fc7_,
name="predictions")
self.module = mx.mod.Module(symbol=mx.symbol.Group([predictions]),
data_names=self._input_names_,
label_names=self._output_names_,
context=context)
import logging
import mxnet as mx
import CNNCreator_dpnet
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
handler = logging.FileHandler("train.log","w", encoding=None, delay="true")
logger.addHandler(handler)
dpnet = CNNCreator_dpnet.CNNCreator_dpnet()
dpnet.train(
batch_size = 64,
num_epoch = 5,
optimizer = 'sgd',
optimizer_params = {
'learning_rate': 0.01}
)
import datetime
import h5py
import os
HDF5_PATH = "/media/sveta/4991e634-dd81-4cb9-bf46-2fa9c7159263/TORCS_HDF5/"
start_date = datetime.datetime.now()
big_file = h5py.File(HDF5_PATH + "all_train.h5")
dset_image = big_file.create_dataset("image", (0, 3, 210, 280), maxshape=(None, 3,210,280), chunks=True)
dset_labels = big_file.create_dataset("predictions_label", (0, 13), maxshape=(None, 13), chunks=True)
data_start_idx = 0
for file_num in range(1, 485):
file_path = HDF5_PATH + "train_" + str(file_num) + ".h5"
print(file_path)
with h5py.File(file_path, "r") as f:
images = f["image"]
labels = f["predictions_label"]
dset_image.resize( (dset_image.shape[0]+1000, 3,210,280) )
dset_image[data_start_idx:data_start_idx+1000,:,:,:] = images
dset_labels.resize( (dset_labels.shape[0]+1000, 13) )
dset_labels[data_start_idx:data_start_idx+1000,:] = labels
data_start_idx += 1000
os.remove(file_path)
end_time = datetime.datetime.now()
elapsed_time = end_time - start_date
print("Elapsed time " + str(elapsed_time))
import caffe
from caffe.proto import caffe_pb2
import datetime
import h5py
import numpy as np
import plyvel
import tarfile
import os
CHUNK_SIZE = 10000
LEVELDB_PATH = "/media/sveta/4991e634-dd81-4cb9-bf46-2fa9c7159263/TORCS_Training_1F"
HDF5_PATH = "/media/sveta/4991e634-dd81-4cb9-bf46-2fa9c7159263/TORCS_HDF5"
def main():
start_date = datetime.datetime.now()
db = plyvel.DB(LEVELDB_PATH, paranoid_checks=True, create_if_missing=False)
datum = caffe_pb2.Datum()
all_images = []
all_indicators = []
file_idx = 1
for key, value in db:
datum = datum.FromString(value)
indicators = np.array(datum.float_data, dtype='f')
indicators = normalize(indicators)
image = caffe.io.datum_to_array(datum).astype(np.float32) # shape is (3, 210, 280)
all_images.append(image)
all_indicators.append(indicators)
if len(all_images) >= CHUNK_SIZE:
print("File " + str(file_idx))
write_to_hdf5(all_images, all_indicators, file_idx, start_date)
all_images = []
all_indicators = []
file_idx += 1
# final file
print("File " + str(file_idx))
write_to_hdf5(all_images, all_indicators, file_idx, start_date)
def write_to_hdf5(images, indicators, file_idx, start_date):
filename = HDF5_PATH + "/train_" + str(file_idx) + ".h5"
with h5py.File(filename, 'w') as f:
f['image'] = images
f['predictions_label'] = indicators
f.close()
print("Finished dumping to file " + filename)
init_end_date = datetime.datetime.now()
elapsed_time = init_end_date - start_date
print("Dumping took " + str(elapsed_time))
# archive and remove original file
tar = tarfile.open(filename + ".tar.bz2", 'w:bz2')
os.chdir(HDF5_PATH)
tar.add("train_" + str(file_idx) + ".h5")
tar.close()
os.remove(filename)
def normalize(indicators):
indicators_normalized = np.zeros(len(indicators))
indicators_normalized[0] = normalize_value(indicators[0], -0.5, 0.5) # angle. Range: ~ [-0.5, 0.5]
indicators_normalized[1] = normalize_value(indicators[1], -7, -2.5) # toMarking_L. Range: ~ [-7, -2.5]
indicators_normalized[2] = normalize_value(indicators[2], -2, 3.5) # toMarking_M. Range: ~ [-2, 3.5]
indicators_normalized[3] = normalize_value(indicators[3], 2.5, 7) # toMarking_R. Range: ~ [2.5, 7]
indicators_normalized[4] = normalize_value(indicators[4], 0, 75) # dist_L. Range: ~ [0, 75]
indicators_normalized[5] = normalize_value(indicators[5], 0, 75) # dist_R. Range: ~ [0, 75]
indicators_normalized[6] = normalize_value(indicators[6], -9.5, -4) # toMarking_LL. Range: ~ [-9.5, -4]
indicators_normalized[7] = normalize_value(indicators[7], -5.5, -0.5) # toMarking_ML. Range: ~ [-5.5, -0.5]
indicators_normalized[8] = normalize_value(indicators[8], 0.5, 5.5) # toMarking_MR. Range: ~ [0.5, 5.5]
indicators_normalized[9] = normalize_value(indicators[9], 4, 9.5) # toMarking_RR. Range: ~ [4, 9.5]
indicators_normalized[10] = normalize_value(indicators[10], 0, 75) # dist_LL. Range: ~ [0, 75]
indicators_normalized[11] = normalize_value(indicators[11], 0, 75) # dist_MM. Range: ~ [0, 75]
indicators_normalized[12] = normalize_value(indicators[12], 0, 75) # dist_RR. Range: ~ [0, 75]
indicators_normalized[13] = normalize_value(indicators[13], 0, 1) # fast range ~ [0, 1]
return indicators_normalized
def normalize_value(old_value, old_min, old_max):
new_min = 0.1
new_max = 0.9
new_range = new_max - new_min
old_range = old_max - old_min
new_value = (((old_value - old_min) * new_range) / old_range) + new_min
return new_value
main()
h5py
scipy
scikit-image
protobuf
leveldb
tables
glog
\ No newline at end of file
......@@ -2,6 +2,13 @@ package dp.subcomponents;
struct Affordance {
Q(-0.5rad:0.001rad:0.5rad) angle;
Q(-7m:0.01m:-2.5m) toMarkingL;
Q(-2m:0.01m:3.5m) toMarkingM;
Q(2.5m:0.01m:7m) toMarkingR;
Q(0m:0.1m:75m) distL;
Q(0m:0.1m:75m) distR;
Q(-9.5m:0.01m:-4m) toMarkingLL;
Q(-5.5m:0.01m:-0.5m) toMarkingML;
Q(0.5m:0.01m:5.5m) toMarkingMR;
......@@ -10,9 +17,7 @@ struct Affordance {
Q(0m:0.1m:75m) distMM;
Q(0m:0.1m:75m) distRR;
Q(-7m:0.01m:-2.5m) toMarkingL;
Q(-2m:0.01m:3.5m) toMarkingM;
Q(2.5m:0.01m:7m) toMarkingR;
Q(0m:0.1m:75m) distL;
Q(0m:0.1m:75m) distR;
}
\ No newline at end of file
Q(0:0.1:1) fast;
}
......@@ -2,7 +2,7 @@ package dp.subcomponents;
component Dpnet{
ports in Z(0:255)^{3, 210, 280} image,
out Q(0:1)^{13,1,1} predictions;
out Q(0:1)^{14,1,1} predictions;
implementation CNN {
......
......@@ -3,7 +3,7 @@ package dp.subcomponents;
component Safetycontroller {
ports
in Z(0:255)^{3, 210, 280} imageIn,
in Q(0:1)^{13,1,1} affordanceIn,
in Q(0:1)^{14,1,1} affordanceIn,
out Q(0:1)^{1,1,1} safetyLevelOut;
implementation Math {
......
package dp.subcomponents;
component Unnormalizer {
ports in Q^{13,1,1} normalizedPredictions,
ports in Q^{14,1,1} normalizedPredictions,
out Affordance affordance;
implementation Math {
affordance.angle = (normalizedPredictions(0,0,0) - 0.5)*1.1;
affordance.toMarkingLL = (normalizedPredictions(0,0,1) - 1.48181) * 6.8752;
affordance.toMarkingML = (normalizedPredictions(0,0,2) - 0.98)*6.25;
affordance.toMarkingMR = (normalizedPredictions(0,0,3) - 0.02)*6.25;
affordance.toMarkingRR = (normalizedPredictions(0,0,4) + 0.48181)*6.8752;
affordance.distLL = (normalizedPredictions(0,0,5) - 0.12)*95;
affordance.distMM = (normalizedPredictions(0,0,6) - 0.12)*95;
affordance.distRR = (normalizedPredictions(0,0,7) - 0.12)*95;
affordance.toMarkingL = (normalizedPredictions(0,0,8) - 1.34445)*5.6249;
affordance.toMarkingM = (normalizedPredictions(0,0,9) - 0.39091)*6.8752;
affordance.toMarkingR = (normalizedPredictions(0,0,10) + 0.34445)*5.6249;
affordance.distL = (normalizedPredictions(0,0,11) - 0.12)*95;
affordance.distR = (normalizedPredictions(0,0,12) - 0.12)*95;
oldMin = 0.1
oldMax = 0.9
oldRange = oldMax - oldMin
newMin = -0.5
newMax = 0.5
newRange = newMax - newMin
affordance.angle = (((normalizedPredictions(0,0,0) - oldMin) * newRange) / oldRange) + newMin
newMin = -7
newMax = -2.5
newRange = newMax - newMin
affordance.toMarkingL = (((normalizedPredictions(0,0,1) - oldMin) * newRange) / oldRange) + newMin
newMin = -2
newMax = 3.5
newRange = newMax - newMin
affordance.toMarkingM = (((normalizedPredictions(0,0,2) - oldMin) * newRange) / oldRange) + newMin
newMin = 2.5
newMax = 7
newRange = newMax - newMin
affordance.toMarkingR = (((normalizedPredictions(0,0,3) - oldMin) * newRange) / oldRange) + newMin
newMin = 0
newMax = 75
newRange = newMax - newMin
affordance.distL = (((normalizedPredictions(0,0,4) - oldMin) * newRange) / oldRange) + newMin
affordance.distR = (((normalizedPredictions(0,0,5) - oldMin) * newRange) / oldRange) + newMin
newMin = -9.5
newMax = -4
newRange = newMax - newMin
affordance.toMarkingLL = (((normalizedPredictions(0,0,6) - oldMin) * newRange) / oldRange) + newMin
newMin = -5.5
newMax = -0.5
newRange = newMax - newMin
affordance.toMarkingML = (((normalizedPredictions(0,0,7) - oldMin) * newRange) / oldRange) + newMin
newMin = 0.5
newMax = 5.5
newRange = newMax - newMin
affordance.toMarkingMR = (((normalizedPredictions(0,0,8) - oldMin) * newRange) / oldRange) + newMin
newMin = 4
newMax = 9.5
newRange = newMax - newMin
affordance.toMarkingRR = (((normalizedPredictions(0,0,9) - oldMin) * newRange) / oldRange) + newMin
newMin = 0
newMax = 75
newRange = newMax - newMin
affordance.distLL = (((normalizedPredictions(0,0,10) - oldMin) * newRange) / oldRange) + newMin
affordance.distMM = (((normalizedPredictions(0,0,11) - oldMin) * newRange) / oldRange) + newMin
affordance.distRR = (((normalizedPredictions(0,0,12) - oldMin) * newRange) / oldRange) + newMin
newMin = 0
newMax = 1
newRange = newMax - newMin
affordance.fast = (((normalizedPredictions(0,0,13) - oldMin) * newRange) / oldRange) + newMin
}
}
\ No newline at end of file
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment