Commit 60245204 authored by Evgeny Kusmenko's avatar Evgeny Kusmenko

Merge branch 'develop' into 'master'

Develop

See merge request !2
parents 72100ddd fb1fd8a1
*.idea*
*cmake-build-*
*.a
*.o
*.so
*.depend
torcs/*-bin
torcs/*.spec
torcs/export/
torcs/.torcs.rc
torcs/Make-config
torcs/config.log
torcs/config.status
torcs/autom4te.cache/
torcs/src/doc/torcsdoc.conf
torcs/src/linux/torcs
torcs/src/tools/accc/accc
torcs/src/tools/nfs2ac/nfs2ac
torcs/src/tools/nfsperf/nfsperf
torcs/src/tools/texmapper/texmapper
torcs/src/tools/trackgen/trackgen
torcs/src/libs/txml/gennmtab/gennmtab
torcs/src/linux/torcs-bin
torcs/src/tools/accc/accc-bin
torcs/src/tools/nfs2ac/nfs2ac-bin
torcs/src/tools/nfsperf/nfsperf-bin
torcs/src/tools/package/specfiles/torcs-data-cars-Patwo-Design.spec
torcs/src/tools/package/specfiles/torcs-data-cars-extra.spec
torcs/src/tools/package/specfiles/torcs-data-cars-kcendra-gt.spec
torcs/src/tools/package/specfiles/torcs-data-cars-kcendra-roadsters.spec
torcs/src/tools/package/specfiles/torcs-data-cars-kcendra-sport.spec
torcs/src/tools/package/specfiles/torcs-data-tracks-base.spec
torcs/src/tools/package/specfiles/torcs-data.spec
torcs/src/tools/package/specfiles/torcs-robot-K1999.spec
torcs/src/tools/package/specfiles/torcs-robot-base.spec
torcs/src/tools/package/specfiles/torcs.spec
torcs/src/tools/texmapper/texmapper-bin
torcs/src/tools/trackgen/trackgen-bin
/TorcsClient/build/
\ No newline at end of file
File added
model/
__pycache__/
venv/
.idea/
.git
*.iml
*.pyc
*.log
import caffe
import csv
import numpy as np
from PIL import Image
import sys
blob = caffe.proto.caffe_pb2.BlobProto()
data = open("./driving_mean_1F.binaryproto" , 'rb' ).read()
blob.ParseFromString(data)
data = np.array(blob.data)
arr = np.array( caffe.io.blobproto_to_array(blob) )
arr = arr[0]
# np.save("./mean_image.npy", arr[0]) # shape is (210, 280, 3)
np.savetxt("./mean_image_R.txt", arr[0])
np.savetxt("./mean_image_G.txt", arr[1])
np.savetxt("./mean_image_B.txt", arr[2])
\ No newline at end of file
import mxnet as mx
import custom_functions
CONTEXT = mx.cpu()
MODEL_PATH = "model/alexnet_pretrained/caffenet"
MODEL_PATH_FINETUNED = "model/alexnet_finetuned/caffenet"
DATA_DIR = "/media/sveta/4991e634-dd81-4cb9-bf46-2fa9c7159263/TORCS_raw/"
MODEL_PREFIX = "dpnet"
batch_size = 64
num_epoch = 1
begin_epoch = 0
symbol, arg_params, aux_params = mx.model.load_checkpoint(MODEL_PATH, 0)
last_layer_to_stay = "flatten_0"
all_layers = symbol.get_internals()
net = all_layers[last_layer_to_stay+'_output']
new_args = dict({k:arg_params[k] for k in arg_params if 'fc' not in k})
# fc5_ = mx.symbol.flatten(data=net)
fc5_ = mx.symbol.FullyConnected(data=net,
num_hidden=4096,
no_bias=False,
name="fc5_")
relu6_ = mx.symbol.Activation(data=fc5_,
act_type='relu',
name="relu6_")
dropout6_ = mx.symbol.Dropout(data=relu6_,
p=0.5,
name="dropout6_")
fc6_ = mx.symbol.FullyConnected(data=dropout6_,
num_hidden=4096,
no_bias=False,
name="fc6_")
relu7_ = mx.symbol.Activation(data=fc6_,
act_type='relu',
name="relu7_")
dropout7_ = mx.symbol.Dropout(data=relu7_,
p=0.5,
name="dropout7_")
fc7_ = mx.symbol.FullyConnected(data=dropout7_,
num_hidden=256,
no_bias=False,
name="fc7_")
relu8_ = mx.symbol.Activation(data=fc7_,
act_type='relu',
name="relu8_")
dropout8_ = mx.symbol.Dropout(data=relu8_,
p=0.5,
name="dropout8_")
fc8_ = mx.symbol.FullyConnected(data=dropout8_,
num_hidden=14,
no_bias=True,
name="fc8_")
predictions = mx.symbol.LinearRegressionOutput(data=fc8_,
name="predictions")
optimizer = 'sgd'
optimizer_params = {
'learning_rate': 0.01,
'learning_rate_decay': 0.9,
'step_size': 8000}
if 'weight_decay' in optimizer_params:
optimizer_params['wd'] = optimizer_params['weight_decay']
del optimizer_params['weight_decay']
if 'learning_rate_decay' in optimizer_params:
min_learning_rate = 1e-08
if 'learning_rate_minimum' in optimizer_params:
min_learning_rate = optimizer_params['learning_rate_minimum']
del optimizer_params['learning_rate_minimum']
optimizer_params['lr_scheduler'] = mx.lr_scheduler.FactorScheduler(
optimizer_params['step_size'],
factor=optimizer_params['learning_rate_decay'],
stop_factor_lr=min_learning_rate)
del optimizer_params['step_size']
del optimizer_params['learning_rate_decay']
train_iter, test_iter, data_mean, data_std = custom_functions.load_data_rec(DATA_DIR, batch_size)
module = mx.mod.Module(symbol=predictions, #mx.symbol.Group([predictions]),
data_names=['data'],
label_names=['predictions_label'],
context=CONTEXT)
module.bind(data_shapes=[('data', (64, 3, 210, 280))], force_rebind=True)
module.set_params(arg_params=new_args,
aux_params=aux_params,
allow_missing=True)
module.fit(
allow_missing=True,
force_rebind=True,
force_init = True,
train_data=train_iter,
eval_data=test_iter,
eval_metric='mse',
optimizer=optimizer,
optimizer_params=optimizer_params,
batch_end_callback=mx.callback.Speedometer(batch_size),
epoch_end_callback=mx.callback.do_checkpoint(prefix=MODEL_PATH_FINETUNED + MODEL_PREFIX, period=1),
begin_epoch=begin_epoch,
num_epoch=num_epoch + begin_epoch)
module.save_checkpoint(MODEL_PATH_FINETUNED + MODEL_PREFIX, num_epoch + begin_epoch)
module.save_checkpoint(MODEL_PATH_FINETUNED + MODEL_PREFIX + '_newest', 0)
import matplotlib.pyplot as plt
import numpy as np
import re
EPOCH_WISE = False
NETWORK= "DPNet"
LOGFILE = "../../dpnet_weights/normalized/train.log"
STEP = 50
rows = open(LOGFILE).read().strip()
train_mse = list()
validation_mse = list()
train_iterations = list()
validation_iterations = list()
speeds = list()
# grab the set of training epochs
epochs = set(re.findall(r'Epoch\[(\d+)\]', rows))
epochs = sorted([int(e) for e in epochs])
for e in epochs:
train_mse_regexp = r'Epoch\[' + str(e) + '\].*(\s)mse=(.*)'
mse = re.findall(train_mse_regexp, rows)
mse = [float(a[1]) for a in mse]
if EPOCH_WISE:
train_mse.append(mse[-1])
else:
train_mse += mse
speed_regexp = r'Epoch\[' + str(e) + '\].*(\s)Speed: (.*) samples'
speed = re.findall(speed_regexp, rows)
speed = [float(a[1]) for a in speed]
speeds += speed
validation_mse_regexp = r'Epoch\[' + str(e) + '\].*Validation-mse=(.*)'
current_validation_mse = re.findall(validation_mse_regexp, rows)
validation_mse.append(float(current_validation_mse[0]))
last_iteration = train_iterations[-1] if len(train_iterations) > 0 else 0
if EPOCH_WISE:
train_iterations.append(e)
validation_iterations.append(e)
else:
current_iterations = range(last_iteration+STEP, last_iteration+STEP * len(mse) + STEP, STEP)
train_iterations += current_iterations
validation_iterations.append(last_iteration+STEP * len(mse) + STEP)
print("Mean speed is " + str(np.mean(speeds)))
# plot the accuracies
# plt.style.use("ggplot")
plt.figure()
plt.plot(train_iterations, train_mse,
label="train")
plt.plot(validation_iterations, validation_mse,
label="validation")
if EPOCH_WISE:
plt.xlabel("Epochs #")
else:
plt.xlabel("Iterations")
plt.ylabel("MSE")
plt.legend(loc="upper right")
plt.grid()
plt.show()
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
h5py
scipy
scikit-image
protobuf
leveldb
tables
glog
mxnet
opencv-python
sklearn
\ No newline at end of file
import csv
import cv2
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
import mxnet as mx
import os
EXAMPLES_PATH = "/media/sveta/4991e634-dd81-4cb9-bf46-2fa9c7159263/TORCS_examples/"
MODEL_PATH = "../../dpnet_weights/normalized/dpnet_newest"
RAW_PATH = "/media/sveta/4991e634-dd81-4cb9-bf46-2fa9c7159263/TORCS_raw/"
def main():
# Load saved checkpoint
sym, arg_params, aux_params = mx.model.load_checkpoint(MODEL_PATH, 0)
mod = mx.mod.Module(symbol=sym,
context=mx.cpu(),
data_names=['data'],
label_names=['predictions_label'])
mod.bind(for_training=False,
data_shapes=[('data', (1, 3, 210, 280))],
label_shapes=mod._label_shapes)
mod.set_params(arg_params, aux_params, allow_missing=True)
# Get Test Data
files = [f for f in os.listdir(EXAMPLES_PATH) if f.endswith(".png")]
for f in files:
key = f[:-4]
img = get_image(key)
labels = get_labels(key)
# Predict
from collections import namedtuple
Batch = namedtuple('Batch', ['data'])
mod.forward(Batch([mx.nd.array(img)]))
prob = mod.get_outputs()[0].asnumpy()
prob = prob[0].tolist()
# Plot ground truth against predicted
plt.scatter(range(len(labels)), labels, marker='x', label='Ground truth')
plt.scatter(range(len(prob)), prob, marker = 'x', label='Predicted')
plt.legend()
plt.grid()
ax = plt.gca()
ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
plt.show()
def get_image(key):
filename = EXAMPLES_PATH + key + ".png"
img = cv2.imread(filename) # read image in b,g,r order
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # change to r,g,b order
# img = cv2.resize(img, (224, 224)) # resize to 224*224 to fit model
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 1, 2) # change to (channel, height, width)
img = img[np.newaxis, :] # extend to (example, channel, heigth, width)
return img
def get_labels(key):
labels_file = EXAMPLES_PATH + key + "_labels.csv"
with open(labels_file, 'rb') as csvfile:
csv_reader = csv.reader(csvfile, delimiter=' ', quotechar='|')
labels = [row for row in csv_reader][0]
labels = [float(a) for a in labels]
return labels
main()
import caffe
from caffe.proto import caffe_pb2
import csv
import cv2
import datetime
import h5py
import math
import matplotlib.pyplot as plt
import mxnet as mx
import numpy as np
from PIL import Image
import plyvel
from sklearn.cross_validation import train_test_split
import tarfile
import os
TRAIN_LENGTH = 387851
TEST_REC = "torcs_test.rec"
TRAIN_REC = "torcs_train.rec"
ARCHIVE = False
CHUNK_SIZE = 10000
LEVELDB_PATH = "/media/sveta/4991e634-dd81-4cb9-bf46-2fa9c7159263/TORCS_Training_1F"
HDF5_PATH = "/media/sveta/4991e634-dd81-4cb9-bf46-2fa9c7159263/TORCS_HDF5_3/"
RAW_PATH = "/media/sveta/4991e634-dd81-4cb9-bf46-2fa9c7159263/TORCS_raw/"
EXAMPLES_PATH = "/media/sveta/4991e634-dd81-4cb9-bf46-2fa9c7159263/TORCS_examples/"
TRAIN_MEAN = [99.39394537, 110.60877108, 117.86127587]
TRAIN_STD = [42.04910545, 49.47874084, 62.61726178]
def main():
start_date = datetime.datetime.now()
# leveldb_to_rec(start_date)
# read_from_recordio()
# compute_train_mean()
# test_normalization()
# check_saved_labels()
check_saved_images()
def compute_train_mean():
record = mx.recordio.MXRecordIO(RAW_PATH + TRAIN_REC, "r")
all_means = list()
all_std = list()
for i in range(TRAIN_LENGTH):
if i % 1000 == 0:
print(i)
item = record.read()
header, img_as_array = mx.recordio.unpack_img(item)
# img is RGB of shape (210, 280, 3)
mean, std = cv2.meanStdDev(img_as_array)
all_means.append(mean)
all_std.append(std)
# img_stats.append(np.array([mean[::-1] / 255, std[::-1] / 255]))
mean = np.mean(all_means, axis=0)
print("MEAN")
print(mean)
std = np.mean(all_std, axis=0)
print("STD")
print(std)
def test_normalization():
width = 280
height = 210
data_mean = np.asarray([[[a] * width] * height for a in TRAIN_MEAN]) # (3, 280, 210)
data_std = np.asarray([[[a] * width] * height for a in TRAIN_STD])
record = mx.recordio.MXRecordIO(RAW_PATH + TRAIN_REC, "r")
item = record.read()
header, img_as_array = mx.recordio.unpack_img(item) # (210, 280,3)
img = Image.fromarray(img_as_array)
img.show()
normalized_img = (img_as_array - np.transpose(data_mean, (1, 2, 0)))/np.transpose(data_std, (1, 2, 0))
plt.matshow(normalized_img[:,:,0])
plt.show()
plt.matshow(normalized_img[:,:,1])
plt.show()
plt.matshow(normalized_img[:,:,2])
plt.show()
def read_from_recordio():
record = mx.recordio.MXRecordIO(RAW_PATH + TRAIN_REC, "r")
for i in range(50):
item = record.read()
header, img = mx.recordio.unpack_img(item)
key = str(header[2])
convert_to_image_and_save(img, key)
labels_file = EXAMPLES_PATH + key + "_labels.csv"
with open(labels_file, 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=' ',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(header[1].tolist())
def check_saved_labels():
record = mx.recordio.MXRecordIO(RAW_PATH + TRAIN_REC, "r")
for i in range(TRAIN_LENGTH):
item = record.read()
header, img = mx.recordio.unpack_img(item)
affordance = header[1].tolist()
# if not any([True if a>=0.1 and a<=0.9 else False for a in affordance ]):
if any(math.isnan(item) for item in affordance):
print(affordance)
def check_saved_images():
record = mx.recordio.MXRecordIO(RAW_PATH + TRAIN_LENGTH, "r")
for i in range(TRAIN_LENGTH):
item = record.read()
header, img = mx.recordio.unpack_img(item)
try:
img = Image.fromarray(img)
img.verify()
except (IOError, SyntaxError) as e:
print('Bad file:', i)
def leveldb_to_rec(start_date):
train_record = mx.recordio.MXRecordIO(RAW_PATH + TRAIN_REC, "w")
test_record = mx.recordio.MXRecordIO(RAW_PATH + TEST_REC, "w")
keys = range(1, 484815)
train_keys, test_keys = train_test_split(keys,test_size=0.2)
print str(len(train_keys)) + " samples for training"
print str(len(test_keys)) + " samples for testing"
db, datum = read_db()
for key, value in db:
key_as_int = int(float(key))
datum = datum.FromString(value)
indicators = np.array(datum.float_data, dtype='f')
indicators = normalize(indicators)
image_data = caffe.io.datum_to_array(datum) # shape is (3, 210, 280)
image_data = np.transpose(image_data, (1, 2, 0)) # shape is (210, 280, 3)
image_data = image_data[:, :, ::-1] # BGR to RGB
# convert_to_image_and_save(image_data, key_as_str)
header = mx.recordio.IRHeader(0, indicators, key_as_int, 0)
image_record = mx.recordio.pack_img(header, image_data, img_fmt='.png')
if key_as_int in train_keys:
train_record.write(image_record)
elif key_as_int in test_keys:
test_record.write(image_record)
else:
raise Exception("Unknown key " + key)
if key_as_int % 1000 == 0:
print str(key_as_int) + "/" + str(len(keys))
current_time = datetime.datetime.now()
elapsed_time = current_time - start_date
print("\t Total time spent: " + str(elapsed_time))
def read_db():
db = plyvel.DB(LEVELDB_PATH, paranoid_checks=True, create_if_missing=False)
datum = caffe_pb2.Datum()
return db, datum
def convert_to_image_and_save(image_data, key):
img = Image.fromarray(image_data)
img.save(EXAMPLES_PATH + key + ".png")
def write_to_hdf5(images, indicators, file_idx, start_date):
filename = HDF5_PATH + "/train_" + str(file_idx) + ".h5"
with h5py.File(filename, 'w') as f:
f['image'] = images
f['predictions_label'] = indicators
f.close()
print("Finished dumping to file " + filename)
if ARCHIVE:
# archive and remove original file
tar = tarfile.open(filename + ".tar.bz2", 'w:bz2')
os.chdir(HDF5_PATH)
tar.add("train_" + str(file_idx) + ".h5")
tar.close()
os.remove(filename)
current_time = datetime.datetime.now()
elapsed_time = current_time - start_date
print("Finished archiving. Total time spent: " + str(elapsed_time))
def normalize(indicators):
indicators_normalized = np.zeros(len(indicators))
indicators_normalized[0] = normalize_value(indicators[0], -0.5, 0.5) # angle. Range: ~ [-0.5, 0.5]
indicators_normalized[1] = normalize_value(indicators[1], -7, -2.5) # toMarking_L. Range: ~ [-7, -2.5]
indicators_normalized[2] = normalize_value(indicators[2], -2, 3.5) # toMarking_M. Range: ~ [-2, 3.5]
indicators_normalized[3] = normalize_value(indicators[3], 2.5, 7) # toMarking_R. Range: ~ [2.5, 7]
indicators_normalized[4] = normalize_value(indicators[4], 0, 75) # dist_L. Range: ~ [0, 75]
indicators_normalized[5] = normalize_value(indicators[5], 0, 75) # dist_R. Range: ~ [0, 75]
indicators_normalized[6] = normalize_value(indicators[6], -9.5, -4) # toMarking_LL. Range: ~ [-9.5, -4]
indicators_normalized[7] = normalize_value(indicators[7], -5.5, -0.5) # toMarking_ML. Range: ~ [-5.5, -0.5]
indicators_normalized[8] = normalize_value(indicators[8], 0.5, 5.5) # toMarking_MR. Range: ~ [0.5, 5.5]
indicators_normalized[9] = normalize_value(indicators[9], 4, 9.5) # toMarking_RR. Range: ~ [4, 9.5]
indicators_normalized[10] = normalize_value(indicators[10], 0, 75) # dist_LL. Range: ~ [0, 75]
indicators_normalized[11] = normalize_value(indicators[11], 0, 75) # dist_MM. Range: ~ [0, 75]
indicators_normalized[12] = normalize_value(indicators[12], 0, 75) # dist_RR. Range: ~ [0, 75]
indicators_normalized[13] = normalize_value(indicators[13], 0, 1) # fast range ~ [0, 1]
return indicators_normalized
def normalize_value(old_value, old_min, old_max):
new_min = 0.1
new_max = 0.9
new_range = new_max - new_min
old_range = old_max - old_min
new_value = (((old_value - old_min) * new_range) / old_range) + new_min
return new_value
def leveldb_to_hdf5(start_date):
db, datum = read_db()
all_images = []
all_indicators = []
file_idx = 1
for key, value in db:
datum = datum.FromString(value)
indicators = np.array(datum.float_data, dtype='f')
indicators = normalize(indicators)
image_data = caffe.io.datum_to_array(datum) # .astype(np.float32) # shape is (3, 210, 280)
image_data = np.transpose(image_data, (1, 2, 0))
image_data = image_data[:, :, ::-1]
all_images.append(image_data)
all_indicators.append(indicators)
if len(all_images) >= CHUNK_SIZE:
print("File " + str(file_idx))
write_to_hdf5(all_images, all_indicators, file_idx, start_date)
all_images = []
all_indicators = []
file_idx += 1
# final file
print("File " + str(file_idx))
write_to_hdf5(all_images, all_indicators, file_idx, start_date)
main()
#!/usr/bin/env zsh
### Job name
#BSUB -J TrainDPNET
### File / path where STDOUT & STDERR will be written
### %J is the job ID, %I is the array ID
#BSUB -o train_dpnet.%J.%I
#BSUB -B
#BSUB -N
#BSUB -u svetlana.pavlitskaya@rwth-aachen.de
### Request the time you need for execution in minutes
### Format [hour:]minute,
### that means for 80 minutes you could also use this: 1:20
#BSUB -W 10:00
### Request vitual memory (in MB)
#BSUB -M 4096
### Request GPU Queue
#BSUB -gpu -
#BSUB -R gpu
module load cuda
hostname
nvcc --version
export MXNET_CPU_WORKER_NTHREADS=32
export MXNET_ENGINE_TYPE=ThreadedEnginePerDevice
cd /home/sp705423/GeneratedDpnetTrainingCode
pip install --user -r requirements.txt
python CNNTrainer_Dpnet.py
import mxnet as mx
import numpy as np
DATA_NAME = 'data'
PREDICTIONS_LABEL = 'predictions_label'
TRAIN_MEAN = [99.39394537, 110.60877108, 117.86127587]
TRAIN_STD = [42.04910545, 49.47874084, 62.61726178]
def load_data_rec(data_dir, batch_size):
width = 280
height = 210
data_mean = np.load("./mean_image.npy")
train_iter = mx.image.ImageIter(
path_imgrec=data_dir + "torcs_train.rec",
data_shape=(3, height, width), # (channels, height, width)
batch_size=batch_size,
label_width=14,
data_name=DATA_NAME,
label_name=PREDICTIONS_LABEL
)
test_iter = mx.image.ImageIter(
path_imgrec=data_dir + "torcs_test.rec",
data_shape=(3, height, width), # (channels, height, width)
batch_size=batch_size,
label_width=14,
data_name=DATA_NAME,
label_name=PREDICTIONS_LABEL
)
data_std = None
# data_mean = np.asarray([[[a] * width] * height for a in TRAIN_MEAN])
# data_std = np.asarray([[[a] * width] * height for a in TRAIN_STD])