Commit ba09fcce authored by Marius Laska's avatar Marius Laska
Browse files

multiple pipelines per script

parent 4c700ad7
......@@ -6,6 +6,40 @@ from il_pipeline.models.layer_generator import hidden_layers
from il_pipeline.models.lr_normalizer import lr_normalizer
def reg_model_for_talos(x_train, y_train, x_val, y_val, params):
model = Sequential()
model.add(Dense(params['first_neuron'], input_dim=x_train.shape[1],
activation=params['activation'],
kernel_regularizer=regularizers.l2(
params['regularization_penalty'])
))
model.add(Dropout(params['dropout']))
hidden_layers(model, params, x_train.shape[1])
model.add(Dense(y_train.shape[1], activation=params['last_activation'],
# kernel_initializer=params['kernel_initializer'],
kernel_regularizer=regularizers.l2(
params['regularization_penalty']))) # ,
# kernel_initializer=initializers.RandomUniform(minval=-10, maxval=15)))#,
# bias_initializer=initializers.RandomUniform(minval=0, maxval=0)))
# compile the model
model.compile(loss=params['losses'],
optimizer=params['optimizer'](
lr=lr_normalizer(params['lr'], params['optimizer'])))
out = model.fit(x_train, y_train,
batch_size=params['batch_size'],
epochs=params['epochs'],
validation_data=[x_val, y_val],
verbose=0)
return out, model
def bbox_model_for_talos(x_train, y_train, x_val, y_val, params):
model = Sequential()
......
import copy
import logging
import numpy as np
import argparse
from debug_tools.logger import getLogger
from il_pipeline.models.dnn_model import DnnModel
from il_pipeline.utility.config_reader import ConfigReader
from ldce.base import ClusterBase
from ldce.plotting.floorplan_plot import FloorplanPlot
from base.BboxModel import BboxModel
from base.bbox_model_definition import bbox_model_for_talos
from base.bbox_model_definition import bbox_model_for_talos, reg_model_for_talos
log = getLogger(level=logging.INFO)
from base.bbox_pipeline import BboxPipeline
import talos as ta
def execute(conf_file):
def execute(conf_file, exp_number, downsample):
area_classification = True
logging.basicConfig(level="INFO")
......@@ -84,7 +86,8 @@ def execute(conf_file):
data_provider.area_labels = data_provider.labels
# compute grid encoding
data_provider.transform_to_grid_encoding()
if "GRID" in model_params['type']:
data_provider.transform_to_grid_encoding()
#data_provider.transform_to_2dim_grid_encoding(grid_size=20)
pipeline = BboxPipeline(data_provider,
......@@ -94,44 +97,53 @@ def execute(conf_file):
pipe_params['name'])
if model_params is not None:
train_model(model_params, pipeline)
train_model(copy.deepcopy(model_params), pipeline, exp_number, downsample)
pipeline.store()
if model_params is not None:
pipe_files = [conf.output_dir + p_name for p_name in p_names]
BboxPipeline.merge_summaries(pipe_files, pipeline_params['name'])
def train_model(params, pipe: BboxPipeline):
def train_model(params, pipe: BboxPipeline, exp_number, downsample):
dp = pipe.data_provider
x_train, y_train = dp.get_train_data(labels=dp.grid_labels, split_idx=0, area_labels=False)
x_test, y_test = dp.get_test_data(labels=dp.grid_labels, split_idx=0, area_labels=False)
x_val, y_val = pipe.data_provider.get_val_data(labels=dp.grid_labels, split_idx=0, area_labels=False)
sup_labels = None
if "GRID" in params['type']:
sup_labels = dp.grid_labels
x_train, y_train = dp.get_train_data(labels=sup_labels, split_idx=0, area_labels=False)
x_test, y_test = dp.get_test_data(labels=sup_labels, split_idx=0, area_labels=False)
x_val, y_val = pipe.data_provider.get_val_data(labels=sup_labels, split_idx=0, area_labels=False)
x_train_val = np.concatenate((x_train, x_val), axis=0)
y_train_val = np.concatenate((y_train, y_val), axis=0)
m_type = params['type']
model = BboxModel(params['type'], pipe.summary, pipe.data_provider, params,
pipe.config.output_dir, pipe.filename)
model.setup_params()
num_epochs = params['epochs']
model_func = None
if "BBOX" in m_type:
model = BboxModel(params['type'], pipe.summary, pipe.data_provider, params,
pipe.config.output_dir, pipe.filename)
model.setup_params()
model.type = m_type
m_params = model.params
del m_params['loss']
del m_params['type']
del m_params['augmentation']
if 'pretrain' in params:
model.pre_train_model(params['pretrain'])
model_func = bbox_model_for_talos
model.type = m_type
model.params.update({'epochs': num_epochs})
elif "DNN" in m_type:
m_type = params['pred'] if 'pred' in params else "classification"
model = DnnModel(m_type, pipe.summary, pipe.data_provider, params,
pipe.config.output_dir, pipe.filename)
model.setup_params()
m_params = model.params
del m_params['loss']
del m_params['type']
del m_params['pred']
del m_params['augmentation']
m_params = model.params
del m_params['loss']
del m_params['type']
#del m_params['pred']
del m_params['augmentation']
model_func = reg_model_for_talos
# put every element in list
m_params = {k: [v] if type(v) not in [list, tuple] else v for (k, v) in m_params.items()}
......@@ -140,13 +152,11 @@ def train_model(params, pipe: BboxPipeline):
y=y_train_val,
x_val=x_test,
y_val=y_test,
model=bbox_model_for_talos,
grid_downsample=0.1,
model=model_func,
grid_downsample=downsample,
params=m_params,
dataset_name='indoor_loc_box',
experiment_no='1')
print("test")
dataset_name=m_type,
experiment_no=exp_number)
def report():
......@@ -162,9 +172,18 @@ def report():
def main():
file = "config/lohan_talos.yml"
execute(file)
#point_uncertainty_estimation(file, "evaluation/lohan/old_loss/progress/output/BBOX_256_base.hdf5")
parser = argparse.ArgumentParser()
parser.add_argument("-c", help="Path to yml config file")
parser.add_argument("-n", help="Exp number")
parser.add_argument("-d", help="Downsample ratio")
args = parser.parse_args()
downsample = None
if args.d:
downsample = float(args.d)
execute(args.c, exp_number=args.n, downsample=downsample)
if __name__ == "__main__":
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment