Commit f0cebd01 authored by Svetlana Pavlitskaya's avatar Svetlana Pavlitskaya

Correct (un)normalization of affordance indicators. Added generated CNN code...

Correct (un)normalization of affordance indicators. Added generated CNN code with scripts for data collection.
parent 8a9afe13
model/
__pycache__/
venv/
.idea/
.git
*.iml
This diff is collapsed.
import logging
import mxnet as mx
import CNNCreator_dpnet
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
handler = logging.FileHandler("train.log","w", encoding=None, delay="true")
logger.addHandler(handler)
dpnet = CNNCreator_dpnet.CNNCreator_dpnet()
dpnet.train(
batch_size = 64,
num_epoch = 5,
optimizer = 'sgd',
optimizer_params = {
'learning_rate': 0.01}
)
import datetime
import h5py
import os
HDF5_PATH = "/media/sveta/4991e634-dd81-4cb9-bf46-2fa9c7159263/TORCS_HDF5/"
start_date = datetime.datetime.now()
big_file = h5py.File(HDF5_PATH + "all_train.h5")
dset_image = big_file.create_dataset("image", (0, 3, 210, 280), maxshape=(None, 3,210,280), chunks=True)
dset_labels = big_file.create_dataset("predictions_label", (0, 13), maxshape=(None, 13), chunks=True)
data_start_idx = 0
for file_num in range(1, 485):
file_path = HDF5_PATH + "train_" + str(file_num) + ".h5"
print(file_path)
with h5py.File(file_path, "r") as f:
images = f["image"]
labels = f["predictions_label"]
dset_image.resize( (dset_image.shape[0]+1000, 3,210,280) )
dset_image[data_start_idx:data_start_idx+1000,:,:,:] = images
dset_labels.resize( (dset_labels.shape[0]+1000, 13) )
dset_labels[data_start_idx:data_start_idx+1000,:] = labels
data_start_idx += 1000
os.remove(file_path)
end_time = datetime.datetime.now()
elapsed_time = end_time - start_date
print("Elapsed time " + str(elapsed_time))
import caffe
from caffe.proto import caffe_pb2
import datetime
import h5py
import numpy as np
import plyvel
import tarfile
import os
CHUNK_SIZE = 10000
LEVELDB_PATH = "/media/sveta/4991e634-dd81-4cb9-bf46-2fa9c7159263/TORCS_Training_1F"
HDF5_PATH = "/media/sveta/4991e634-dd81-4cb9-bf46-2fa9c7159263/TORCS_HDF5"
def main():
start_date = datetime.datetime.now()
db = plyvel.DB(LEVELDB_PATH, paranoid_checks=True, create_if_missing=False)
datum = caffe_pb2.Datum()
all_images = []
all_indicators = []
file_idx = 1
for key, value in db:
datum = datum.FromString(value)
indicators = np.array(datum.float_data, dtype='f')
indicators = normalize(indicators)
image = caffe.io.datum_to_array(datum).astype(np.float32) # shape is (3, 210, 280)
all_images.append(image)
all_indicators.append(indicators)
if len(all_images) >= CHUNK_SIZE:
print("File " + str(file_idx))
write_to_hdf5(all_images, all_indicators, file_idx, start_date)
all_images = []
all_indicators = []
file_idx += 1
# final file
print("File " + str(file_idx))
write_to_hdf5(all_images, all_indicators, file_idx, start_date)
def write_to_hdf5(images, indicators, file_idx, start_date):
filename = HDF5_PATH + "/train_" + str(file_idx) + ".h5"
with h5py.File(filename, 'w') as f:
f['image'] = images
f['predictions_label'] = indicators
f.close()
print("Finished dumping to file " + filename)
init_end_date = datetime.datetime.now()
elapsed_time = init_end_date - start_date
print("Dumping took " + str(elapsed_time))
# archive and remove original file
tar = tarfile.open(filename + ".tar.bz2", 'w:bz2')
os.chdir(HDF5_PATH)
tar.add("train_" + str(file_idx) + ".h5")
tar.close()
os.remove(filename)
def normalize(indicators):
indicators_normalized = np.zeros(len(indicators))
indicators_normalized[0] = normalize_value(indicators[0], -0.5, 0.5) # angle. Range: ~ [-0.5, 0.5]
indicators_normalized[1] = normalize_value(indicators[1], -7, -2.5) # toMarking_L. Range: ~ [-7, -2.5]
indicators_normalized[2] = normalize_value(indicators[2], -2, 3.5) # toMarking_M. Range: ~ [-2, 3.5]
indicators_normalized[3] = normalize_value(indicators[3], 2.5, 7) # toMarking_R. Range: ~ [2.5, 7]
indicators_normalized[4] = normalize_value(indicators[4], 0, 75) # dist_L. Range: ~ [0, 75]
indicators_normalized[5] = normalize_value(indicators[5], 0, 75) # dist_R. Range: ~ [0, 75]
indicators_normalized[6] = normalize_value(indicators[6], -9.5, -4) # toMarking_LL. Range: ~ [-9.5, -4]
indicators_normalized[7] = normalize_value(indicators[7], -5.5, -0.5) # toMarking_ML. Range: ~ [-5.5, -0.5]
indicators_normalized[8] = normalize_value(indicators[8], 0.5, 5.5) # toMarking_MR. Range: ~ [0.5, 5.5]
indicators_normalized[9] = normalize_value(indicators[9], 4, 9.5) # toMarking_RR. Range: ~ [4, 9.5]
indicators_normalized[10] = normalize_value(indicators[10], 0, 75) # dist_LL. Range: ~ [0, 75]
indicators_normalized[11] = normalize_value(indicators[11], 0, 75) # dist_MM. Range: ~ [0, 75]
indicators_normalized[12] = normalize_value(indicators[12], 0, 75) # dist_RR. Range: ~ [0, 75]
indicators_normalized[13] = normalize_value(indicators[13], 0, 1) # fast range ~ [0, 1]
return indicators_normalized
def normalize_value(old_value, old_min, old_max):
new_min = 0.1
new_max = 0.9
new_range = new_max - new_min
old_range = old_max - old_min
new_value = (((old_value - old_min) * new_range) / old_range) + new_min
return new_value
main()
h5py
scipy
scikit-image
protobuf
leveldb
tables
glog
\ No newline at end of file
......@@ -2,6 +2,13 @@ package dp.subcomponents;
struct Affordance {
Q(-0.5rad:0.001rad:0.5rad) angle;
Q(-7m:0.01m:-2.5m) toMarkingL;
Q(-2m:0.01m:3.5m) toMarkingM;
Q(2.5m:0.01m:7m) toMarkingR;
Q(0m:0.1m:75m) distL;
Q(0m:0.1m:75m) distR;
Q(-9.5m:0.01m:-4m) toMarkingLL;
Q(-5.5m:0.01m:-0.5m) toMarkingML;
Q(0.5m:0.01m:5.5m) toMarkingMR;
......@@ -10,9 +17,7 @@ struct Affordance {
Q(0m:0.1m:75m) distMM;
Q(0m:0.1m:75m) distRR;
Q(-7m:0.01m:-2.5m) toMarkingL;
Q(-2m:0.01m:3.5m) toMarkingM;
Q(2.5m:0.01m:7m) toMarkingR;
Q(0m:0.1m:75m) distL;
Q(0m:0.1m:75m) distR;
Q(0:0.1:1) fast;
}
......@@ -2,7 +2,7 @@ package dp.subcomponents;
component Dpnet{
ports in Z(0:255)^{3, 210, 280} image,
out Q(0:1)^{13,1,1} predictions;
out Q(0:1)^{14,1,1} predictions;
implementation CNN {
......
......@@ -3,7 +3,7 @@ package dp.subcomponents;
component Safetycontroller {
ports
in Z(0:255)^{3, 210, 280} imageIn,
in Q(0:1)^{13,1,1} affordanceIn,
in Q(0:1)^{14,1,1} affordanceIn,
out Q(0:1)^{1,1,1} safetyLevelOut;
implementation Math {
......
package dp.subcomponents;
component Unnormalizer {
ports in Q^{13,1,1} normalizedPredictions,
ports in Q^{14,1,1} normalizedPredictions,
out Affordance affordance;
implementation Math {
affordance.angle = (normalizedPredictions(0,0,0) - 0.5)*1.1;
affordance.toMarkingLL = (normalizedPredictions(0,0,1) - 1.48181) * 6.8752;
affordance.toMarkingML = (normalizedPredictions(0,0,2) - 0.98)*6.25;
affordance.toMarkingMR = (normalizedPredictions(0,0,3) - 0.02)*6.25;
affordance.toMarkingRR = (normalizedPredictions(0,0,4) + 0.48181)*6.8752;
affordance.distLL = (normalizedPredictions(0,0,5) - 0.12)*95;
affordance.distMM = (normalizedPredictions(0,0,6) - 0.12)*95;
affordance.distRR = (normalizedPredictions(0,0,7) - 0.12)*95;
affordance.toMarkingL = (normalizedPredictions(0,0,8) - 1.34445)*5.6249;
affordance.toMarkingM = (normalizedPredictions(0,0,9) - 0.39091)*6.8752;
affordance.toMarkingR = (normalizedPredictions(0,0,10) + 0.34445)*5.6249;
affordance.distL = (normalizedPredictions(0,0,11) - 0.12)*95;
affordance.distR = (normalizedPredictions(0,0,12) - 0.12)*95;
oldMin = 0.1
oldMax = 0.9
oldRange = oldMax - oldMin
newMin = -0.5
newMax = 0.5
newRange = newMax - newMin
affordance.angle = (((normalizedPredictions(0,0,0) - oldMin) * newRange) / oldRange) + newMin
newMin = -7
newMax = -2.5
newRange = newMax - newMin
affordance.toMarkingL = (((normalizedPredictions(0,0,1) - oldMin) * newRange) / oldRange) + newMin
newMin = -2
newMax = 3.5
newRange = newMax - newMin
affordance.toMarkingM = (((normalizedPredictions(0,0,2) - oldMin) * newRange) / oldRange) + newMin
newMin = 2.5
newMax = 7
newRange = newMax - newMin
affordance.toMarkingR = (((normalizedPredictions(0,0,3) - oldMin) * newRange) / oldRange) + newMin
newMin = 0
newMax = 75
newRange = newMax - newMin
affordance.distL = (((normalizedPredictions(0,0,4) - oldMin) * newRange) / oldRange) + newMin
affordance.distR = (((normalizedPredictions(0,0,5) - oldMin) * newRange) / oldRange) + newMin
newMin = -9.5
newMax = -4
newRange = newMax - newMin
affordance.toMarkingLL = (((normalizedPredictions(0,0,6) - oldMin) * newRange) / oldRange) + newMin
newMin = -5.5
newMax = -0.5
newRange = newMax - newMin
affordance.toMarkingML = (((normalizedPredictions(0,0,7) - oldMin) * newRange) / oldRange) + newMin
newMin = 0.5
newMax = 5.5
newRange = newMax - newMin
affordance.toMarkingMR = (((normalizedPredictions(0,0,8) - oldMin) * newRange) / oldRange) + newMin
newMin = 4
newMax = 9.5
newRange = newMax - newMin
affordance.toMarkingRR = (((normalizedPredictions(0,0,9) - oldMin) * newRange) / oldRange) + newMin
newMin = 0
newMax = 75
newRange = newMax - newMin
affordance.distLL = (((normalizedPredictions(0,0,10) - oldMin) * newRange) / oldRange) + newMin
affordance.distMM = (((normalizedPredictions(0,0,11) - oldMin) * newRange) / oldRange) + newMin
affordance.distRR = (((normalizedPredictions(0,0,12) - oldMin) * newRange) / oldRange) + newMin
newMin = 0
newMax = 1
newRange = newMax - newMin
affordance.fast = (((normalizedPredictions(0,0,13) - oldMin) * newRange) / oldRange) + newMin
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment