Commit 5b26318b authored by Svetlana Pavlitskaya's avatar Svetlana Pavlitskaya

clearing up folders

parent d5763822
#!/usr/bin/env bash
echo "Generating files.."
java -jar embedded-montiarc-emadl-generator-0.2.4-SNAPSHOT-jar-with-dependencies.jar -m src/models -r Dpnet -o target --backend=MXNET
......@@ -4,7 +4,7 @@ component Dpnet{
implementation CNN {
def conv(kernel, channels, hasPool=true, convStride=(1,1)){
def conv(kernel, channels, hasPool=true, convStride=(1,1)){
Convolution(kernel=kernel, channels=channels, stride=convStride) ->
Relu() ->
Pooling(pool_type="max", kernel=(3,3), stride=(2,2), ?=hasPool)
......@@ -21,9 +21,13 @@ component Dpnet{
conv(kernel=(3,3), channels=384, hasPool=false) ->
conv(kernel=(3,3), channels=384, hasPool=false) ->
conv(kernel=(3,3), channels=256) ->
fc() ->
fc() ->
FullyConnected(units=2, no_bias=true) ->
fc() ->
fc() ->
FullyConnected(units=256) ->
Relu() ->
Dropout() ->
Relu() ->
FullyConnected(units=14, no_bias=true) ->
predictions
}
......
generated/
out/
.idea/
.git
*.iml
#!/usr/bin/env bash
echo "Generating files.."
java -jar embedded-montiarc-emadl-generator-0.2.1-SNAPSHOT-jar-with-dependencies.jar -m src/models -r Dpnet -o generated
#!/usr/bin/env bash
echo "Generating files.."
java -jar embedded-montiarc-emadl-generator-0.2.1-SNAPSHOT-jar-with-dependencies.jar -m src/models -r Safetynet -o generated
configuration Safetynet{
num_epoch : 100
batch_size : 64
context:cpu
normalize: true
optimizer : sgd{
learning_rate: 0.01
// reduce the learning rate starting from 0.01 every 8000 iterations by a factor of 0.9 (decrease by 10%)
learning_rate_decay: 0.9
step_size: 8000
weight_decay : 0.0005
}
}
component Safetynet{
ports in Z(0:255)^{3, 210, 280} data,
out Q(0:1)^{2,1,1} predictions;
implementation CNN {
def conv(kernel, channels, hasPool=true, convStride=(1,1)){
Convolution(kernel=kernel, channels=channels, stride=convStride) ->
Relu() ->
Pooling(pool_type="max", kernel=(3,3), stride=(2,2), ?=hasPool)
}
def fc(){
FullyConnected(units=4096) ->
Relu() ->
Dropout()
}
image ->
conv(kernel=(11,11), channels=96, convStride=(4,4)) ->
conv(kernel=(5,5), channels=256, convStride=(4,4)) ->
conv(kernel=(3,3), channels=384, hasPool=false) ->
conv(kernel=(3,3), channels=384, hasPool=false) ->
conv(kernel=(3,3), channels=256) ->
fc() ->
fc() ->
FullyConnected(units=2) ->
Softmax() ->
predictions
}
}
model/
__pycache__/
venv/
.idea/
.git
*.iml
*.pyc
*.log
import mxnet as mx
import logging
import os
import errno
import shutil
import h5py
import sys
import numpy as np
import custom_functions
@mx.init.register
class MyConstant(mx.init.Initializer):
def __init__(self, value):
super(MyConstant, self).__init__(value=value)
self.value = value
def _init_weight(self, _, arr):
arr[:] = mx.nd.array(self.value)
class CNNCreator_dpnet:
module = None
_data_dir_ = "/media/sveta/4991e634-dd81-4cb9-bf46-2fa9c7159263/TORCS_raw/"
_model_dir_ = "model/dpnet/"
_model_prefix_ = "dpnet"
_input_names_ = ['data']
_input_shapes_ = [(3,210,280)]
_output_names_ = ['predictions_label']
def load(self, context):
lastEpoch = 0
param_file = None
try:
os.remove(self._model_dir_ + self._model_prefix_ + "_newest-0000.params")
except OSError:
pass
try:
os.remove(self._model_dir_ + self._model_prefix_ + "_newest-symbol.json")
except OSError:
pass
if os.path.isdir(self._model_dir_):
for file in os.listdir(self._model_dir_):
if ".params" in file and self._model_prefix_ in file:
epochStr = file.replace(".params","").replace(self._model_prefix_ + "-","")
epoch = int(epochStr)
if epoch > lastEpoch:
lastEpoch = epoch
param_file = file
if param_file is None:
return 0
else:
logging.info("Loading checkpoint: " + param_file)
self.module.load(prefix=self._model_dir_ + self._model_prefix_,
epoch=lastEpoch,
data_names=self._input_names_,
label_names=self._output_names_,
context=context)
return lastEpoch
def load_data(self, batch_size):
train_h5, test_h5 = self.load_h5_files()
data_mean = train_h5[self._input_names_[0]][:].mean(axis=0)
data_std = train_h5[self._input_names_[0]][:].std(axis=0) + 1e-5
train_iter = mx.io.NDArrayIter(train_h5[self._input_names_[0]],
train_h5[self._output_names_[0]],
batch_size=batch_size,
data_name=self._input_names_[0],
label_name=self._output_names_[0])
test_iter = None
if test_h5 != None:
test_iter = mx.io.NDArrayIter(test_h5[self._input_names_[0]],
test_h5[self._output_names_[0]],
batch_size=batch_size,
data_name=self._input_names_[0],
label_name=self._output_names_[0])
return train_iter, test_iter, data_mean, data_std
def load_h5_files(self):
train_h5 = None
test_h5 = None
train_path = self._data_dir_ + "train.h5"
test_path = self._data_dir_ + "test.h5"
if os.path.isfile(train_path):
train_h5 = h5py.File(train_path, 'r')
if not (self._input_names_[0] in train_h5 and self._output_names_[0] in train_h5):
logging.error("The HDF5 file '" + os.path.abspath(train_path) + "' has to contain the datasets: "
+ "'" + self._input_names_[0] + "', '" + self._output_names_[0] + "'")
sys.exit(1)
test_iter = None
if os.path.isfile(test_path):
test_h5 = h5py.File(test_path, 'r')
if not (self._input_names_[0] in test_h5 and self._output_names_[0] in test_h5):
logging.error("The HDF5 file '" + os.path.abspath(test_path) + "' has to contain the datasets: "
+ "'" + self._input_names_[0] + "', '" + self._output_names_[0] + "'")
sys.exit(1)
else:
logging.warning("Couldn't load test set. File '" + os.path.abspath(test_path) + "' does not exist.")
return train_h5, test_h5
else:
logging.error("Data loading failure. File '" + os.path.abspath(train_path) + "' does not exist.")
sys.exit(1)
def train(self, batch_size,
num_epoch=10,
optimizer='adam',
optimizer_params=(('learning_rate', 0.001),),
load_checkpoint=True,
context='gpu',
checkpoint_period=5,
normalize=True):
if context == 'gpu':
mx_context = mx.gpu()
elif context == 'cpu':
mx_context = mx.cpu()
else:
logging.error("Context argument is '" + context + "'. Only 'cpu' and 'gpu are valid arguments'.")
if 'weight_decay' in optimizer_params:
optimizer_params['wd'] = optimizer_params['weight_decay']
del optimizer_params['weight_decay']
if 'learning_rate_decay' in optimizer_params:
min_learning_rate = 1e-08
if 'learning_rate_minimum' in optimizer_params:
min_learning_rate = optimizer_params['learning_rate_minimum']
del optimizer_params['learning_rate_minimum']
optimizer_params['lr_scheduler'] = mx.lr_scheduler.FactorScheduler(
optimizer_params['step_size'],
factor=optimizer_params['learning_rate_decay'],
stop_factor_lr=min_learning_rate)
del optimizer_params['step_size']
del optimizer_params['learning_rate_decay']
# train_iter, test_iter, data_mean, data_std = self.load_data(batch_size)
train_iter, test_iter, data_mean, data_std = custom_functions.load_data_rec(self._data_dir_, batch_size)
if self.module == None:
if normalize:
self.construct(mx_context, data_mean, data_std)
else:
self.construct(mx_context)
begin_epoch = 0
if load_checkpoint:
begin_epoch = self.load(mx_context)
else:
if os.path.isdir(self._model_dir_):
shutil.rmtree(self._model_dir_)
try:
os.makedirs(self._model_dir_)
except OSError:
if not os.path.isdir(self._model_dir_):
raise
self.module.fit(
train_data=train_iter,
eval_data=test_iter,
eval_metric='mse',
optimizer=optimizer,
optimizer_params=optimizer_params,
batch_end_callback=mx.callback.Speedometer(batch_size),
epoch_end_callback=mx.callback.do_checkpoint(prefix=self._model_dir_ + self._model_prefix_, period=checkpoint_period),
begin_epoch=begin_epoch,
num_epoch=num_epoch + begin_epoch)
self.module.save_checkpoint(self._model_dir_ + self._model_prefix_, num_epoch + begin_epoch)
self.module.save_checkpoint(self._model_dir_ + self._model_prefix_ + '_newest', 0)
def construct(self, context, data_mean=None, data_std=None):
image = mx.sym.var("data",
shape=(0,3,210,280))
# image, output shape: {[3,210,280]}
if not data_mean is None:
# assert(not data_std is None)
_data_mean_ = mx.sym.Variable("_data_mean_", shape=(3,210,280), init=MyConstant(value=data_mean.tolist()))
_data_mean_ = mx.sym.BlockGrad(_data_mean_)
#_data_std_ = mx.sym.Variable("_data_std_", shape=(3,210,280), init=MyConstant(value=data_mean.tolist()))
#_data_std_ = mx.sym.BlockGrad(_data_std_)
image = mx.symbol.broadcast_sub(image, _data_mean_)
# image = mx.symbol.broadcast_div(image, _data_std_)
conv1_ = mx.symbol.pad(data=image,
mode='constant',
pad_width=(0,0,0,0,5,4,4,3),
constant_value=0)
conv1_ = mx.symbol.Convolution(data=conv1_,
kernel=(11,11),
stride=(4,4),
num_filter=96,
no_bias=False,
name="conv1_")
# conv1_, output shape: {[96,53,70]}
relu1_ = mx.symbol.Activation(data=conv1_,
act_type='relu',
name="relu1_")
pool1_ = mx.symbol.pad(data=relu1_,
mode='constant',
pad_width=(0,0,0,0,1,1,1,0),
constant_value=0)
pool1_ = mx.symbol.Pooling(data=pool1_,
kernel=(3,3),
pool_type="max",
stride=(2,2),
name="pool1_")
# pool1_, output shape: {[96,27,35]}
conv2_ = mx.symbol.pad(data=pool1_,
mode='constant',
pad_width=(0,0,0,0,1,1,1,1),
constant_value=0)
conv2_ = mx.symbol.Convolution(data=conv2_,
kernel=(5,5),
stride=(4,4),
num_filter=256,
no_bias=False,
name="conv2_")
# conv2_, output shape: {[256,7,9]}
relu2_ = mx.symbol.Activation(data=conv2_,
act_type='relu',
name="relu2_")
pool2_ = mx.symbol.pad(data=relu2_,
mode='constant',
pad_width=(0,0,0,0,1,1,1,1),
constant_value=0)
pool2_ = mx.symbol.Pooling(data=pool2_,
kernel=(3,3),
pool_type="max",
stride=(2,2),
name="pool2_")
# pool2_, output shape: {[256,4,5]}
conv3_ = mx.symbol.pad(data=pool2_,
mode='constant',
pad_width=(0,0,0,0,1,1,1,1),
constant_value=0)
conv3_ = mx.symbol.Convolution(data=conv3_,
kernel=(3,3),
stride=(1,1),
num_filter=384,
no_bias=False,
name="conv3_")
# conv3_, output shape: {[384,4,5]}
relu3_ = mx.symbol.Activation(data=conv3_,
act_type='relu',
name="relu3_")
conv4_ = mx.symbol.pad(data=relu3_,
mode='constant',
pad_width=(0,0,0,0,1,1,1,1),
constant_value=0)
conv4_ = mx.symbol.Convolution(data=conv4_,
kernel=(3,3),
stride=(1,1),
num_filter=384,
no_bias=False,
name="conv4_")
# conv4_, output shape: {[384,4,5]}
relu4_ = mx.symbol.Activation(data=conv4_,
act_type='relu',
name="relu4_")
conv5_ = mx.symbol.pad(data=relu4_,
mode='constant',
pad_width=(0,0,0,0,1,1,1,1),
constant_value=0)
conv5_ = mx.symbol.Convolution(data=conv5_,
kernel=(3,3),
stride=(1,1),
num_filter=256,
no_bias=False,
name="conv5_")
# conv5_, output shape: {[256,4,5]}
relu5_ = mx.symbol.Activation(data=conv5_,
act_type='relu',
name="relu5_")
pool5_ = mx.symbol.pad(data=relu5_,
mode='constant',
pad_width=(0,0,0,0,1,0,1,1),
constant_value=0)
pool5_ = mx.symbol.Pooling(data=pool5_,
kernel=(3,3),
pool_type="max",
stride=(2,2),
name="pool5_")
# pool5_, output shape: {[256,2,3]}
fc5_ = mx.symbol.flatten(data=pool5_)
fc5_ = mx.symbol.FullyConnected(data=fc5_,
num_hidden=4096,
no_bias=False,
name="fc5_")
relu6_ = mx.symbol.Activation(data=fc5_,
act_type='relu',
name="relu6_")
dropout6_ = mx.symbol.Dropout(data=relu6_,
p=0.5,
name="dropout6_")
fc6_ = mx.symbol.FullyConnected(data=dropout6_,
num_hidden=4096,
no_bias=False,
name="fc6_")
relu7_ = mx.symbol.Activation(data=fc6_,
act_type='relu',
name="relu7_")
dropout7_ = mx.symbol.Dropout(data=relu7_,
p=0.5,
name="dropout7_")
fc7_ = mx.symbol.FullyConnected(data=dropout7_,
num_hidden=256,
no_bias=False,
name="fc7_")
relu8_ = mx.symbol.Activation(data=fc7_,
act_type='relu',
name="relu8_")
dropout8_ = mx.symbol.Dropout(data=relu8_,
p=0.5,
name="dropout8_")
fc8_ = mx.symbol.FullyConnected(data=dropout8_,
num_hidden=14,
no_bias=True,
name="fc8_")
predictions = mx.symbol.LinearRegressionOutput(data=fc8_,
name="predictions")
self.module = mx.mod.Module(symbol=mx.symbol.Group([predictions]),
data_names=self._input_names_,
label_names=self._output_names_,
context=context)
import logging
import mxnet as mx
import CNNCreator_dpnet
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
handler = logging.FileHandler("train.log","a", encoding=None, delay="true")
logger.addHandler(handler)
dpnet = CNNCreator_dpnet.CNNCreator_dpnet()
dpnet.train(
batch_size = 64,
num_epoch = 100,
context = 'cpu',
normalize = True,
optimizer = 'sgd',
optimizer_params = {
'learning_rate': 0.01,
'learning_rate_decay': 0.9,
'step_size': 8000}
)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment