Aufgrund einer Wartung wird GitLab am 18.01. zwischen 8:00 und 9:00 Uhr kurzzeitig nicht zur Verfügung stehen. / Due to maintenance, GitLab will be temporarily unavailable on 18.01. between 8:00 and 9:00 am.

Commit cdc619ad authored by Evgeny Kusmenko's avatar Evgeny Kusmenko
Browse files

Merge branch 'develop' into 'master'

Updated to work with new CNNArchLang version

See merge request !41
parents 4f9c06d2 edc4763a
Pipeline #171108 passed with stages
in 8 minutes and 26 seconds
......@@ -8,16 +8,16 @@
<groupId>de.monticore.lang.monticar</groupId>
<artifactId>cnnarch-caffe2-generator</artifactId>
<version>0.2.12-SNAPSHOT</version>
<version>0.2.13-SNAPSHOT</version>
<!-- == PROJECT DEPENDENCIES ============================================= -->
<properties>
<!-- .. SE-Libraries .................................................. -->
<CNNArch.version>0.3.1-SNAPSHOT</CNNArch.version>
<CNNTrain.version>0.3.2-SNAPSHOT</CNNTrain.version>
<CNNArch2X.version>0.0.1-SNAPSHOT</CNNArch2X.version>
<CNNArch.version>0.3.2-SNAPSHOT</CNNArch.version>
<CNNTrain.version>0.3.6-SNAPSHOT</CNNTrain.version>
<CNNArch2X.version>0.0.3-SNAPSHOT</CNNArch2X.version>
<embedded-montiarc-math-opt-generator>0.1.4</embedded-montiarc-math-opt-generator>
<!-- .. Libraries .................................................. -->
......
......@@ -119,16 +119,16 @@ public class CNNArchTemplateController {
public List<String> getArchitectureInputs(){
List<String> list = new ArrayList<>();
for (IOSymbol ioElement : getArchitecture().getInputs()){
list.add(nameManager.getName(ioElement));
for (VariableSymbol element : getArchitecture().getInputs()){
list.add(nameManager.getName(element));
}
return list;
}
public List<String> getArchitectureOutputs(){
List<String> list = new ArrayList<>();
for (IOSymbol ioElement : getArchitecture().getOutputs()){
list.add(nameManager.getName(ioElement));
for (VariableSymbol element : getArchitecture().getOutputs()){
list.add(nameManager.getName(element));
}
return list;
}
......@@ -149,18 +149,18 @@ public class CNNArchTemplateController {
TemplateConfiguration.processTemplate(ftlContext, templatePath, writer);
}
public void include(IOSymbol ioElement, Writer writer){
public void include(VariableSymbol element, Writer writer){
ArchitectureElementData previousElement = getCurrentElement();
setCurrentElement(ioElement);
setCurrentElement(element);
if (ioElement.isAtomic()){
if (ioElement.isInput()){
if (element.isAtomic()){
if (element.isInput()){
include(TEMPLATE_ELEMENTS_DIR_PATH, "Input", writer);
} else {
include(TEMPLATE_ELEMENTS_DIR_PATH, "Output", writer);
}
} else {
include(ioElement.getResolvedThis().get(), writer);
include(element.getResolvedThis().get(), writer);
}
setCurrentElement(previousElement);
......@@ -200,7 +200,7 @@ public class CNNArchTemplateController {
} else if (architectureElement instanceof LayerSymbol){
include((LayerSymbol) architectureElement, writer);
} else {
include((IOSymbol) architectureElement, writer);
include((VariableSymbol) architectureElement, writer);
}
}
......
......@@ -30,7 +30,7 @@ class ${tc.fileNameWithoutEnding}_0{
NetDef initNet, predictNet;
public:
const std::vector<TIndex> input_shapes = {<#list tc.architecture.inputs as input>{1,${tc.join(input.definition.type.dimensions, ",")}}<#if input?has_next>,</#if></#list>};
const std::vector<TIndex> input_shapes = {<#list tc.architecture.inputs as input>{1,${tc.join(input.ioDeclaration.type.dimensions, ",")}}<#if input?has_next>,</#if></#list>};
explicit ${tc.fileNameWithoutEnding}_0(){
init(input_shapes);
......@@ -105,9 +105,9 @@ class ${tc.fileNameWithoutEnding}_0{
// Get output blob
<#list tc.architectureOutputs as outputName>
#ifdef USE_GPU
auto ${outputName + "Blob"} = TensorCPU(workSpace.GetBlob("${outputName}")->Get<TensorCUDA>());
auto ${outputName + "Blob"} = TensorCPU(workSpace.GetBlob("${outputName?keep_before_last("_")}")->Get<TensorCUDA>());
#else
auto ${outputName + "Blob"} = workSpace.GetBlob("${outputName}")->Get<TensorCPU>();
auto ${outputName + "Blob"} = workSpace.GetBlob("${outputName?keep_before_last("_")}")->Get<TensorCPU>();
#endif
${outputName}.assign(${outputName + "Blob"}.data<float>(),${outputName + "Blob"}.data<float>() + ${outputName + "Blob"}.size());
......
<#list tc.architecture.outputs as output>
<#assign shape = output.definition.type.dimensions>
<#assign shape = output.ioDeclaration.type.dimensions>
vector<float> CNN_${tc.getName(output)}(<#list shape as dim>${dim?c}<#if dim?has_next>*</#if></#list>);
</#list>
......@@ -8,7 +8,7 @@
</#if></#list>);
<#list tc.architecture.outputs as output>
<#assign shape = output.definition.type.dimensions>
<#assign shape = output.ioDeclaration.type.dimensions>
<#if shape?size == 1>
${output.name}<#if output.arrayAccess.isPresent()>[${output.arrayAccess.get().intValue.get()?c}]</#if> = CNNTranslator::translateToCol(CNN_${tc.getName(output)}, std::vector<size_t> {${shape[0]?c}});
</#if>
......
......@@ -72,9 +72,9 @@ class CNNCreator_LeNet:
def create_model(self, model, data, device_opts, is_test):
with core.DeviceScope(device_opts):
image = data
# image, output shape: {[1,28,28]}
conv1_ = brew.conv(model, image, 'conv1_', dim_in=1, dim_out=20, kernel=5, stride=1)
image_ = data
# image_, output shape: {[1,28,28]}
conv1_ = brew.conv(model, image_, 'conv1_', dim_in=1, dim_out=20, kernel=5, stride=1)
# conv1_, output shape: {[20,24,24]}
pool1_ = brew.max_pool(model, conv1_, 'pool1_', kernel=2, stride=2)
# pool1_, output shape: {[20,12,12]}
......@@ -87,9 +87,9 @@ class CNNCreator_LeNet:
relu2_ = brew.relu(model, fc2_, fc2_)
fc3_ = brew.fc(model, relu2_, 'fc3_', dim_in=500, dim_out=10)
# fc3_, output shape: {[10,1,1]}
predictions = brew.softmax(model, fc3_, 'predictions')
predictions_ = brew.softmax(model, fc3_, 'predictions_')
return predictions
return predictions_
# this adds the loss and optimizer
def add_training_operators(self, model, output, label, device_opts, loss, opt_type, base_learning_rate, policy, stepsize, epsilon, beta1, beta2, gamma, momentum) :
......@@ -150,10 +150,10 @@ class CNNCreator_LeNet:
# == Training model ==
train_model= model_helper.ModelHelper(name="train_net", arg_scope=arg_scope)
data, label, train_dataset_size = self.add_input(train_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'train_lmdb'), db_type='lmdb', device_opts=device_opts)
predictions = self.create_model(train_model, data, device_opts=device_opts, is_test=False)
self.add_training_operators(train_model, predictions, label, device_opts, loss, opt_type, base_learning_rate, policy, stepsize, epsilon, beta1, beta2, gamma, momentum)
predictions_ = self.create_model(train_model, data, device_opts=device_opts, is_test=False)
self.add_training_operators(train_model, predictions_, label, device_opts, loss, opt_type, base_learning_rate, policy, stepsize, epsilon, beta1, beta2, gamma, momentum)
if not loss == 'euclidean':
self.add_accuracy(train_model, predictions, label, device_opts, eval_metric)
self.add_accuracy(train_model, predictions_, label, device_opts, eval_metric)
with core.DeviceScope(device_opts):
brew.add_weight_decay(train_model, weight_decay)
......@@ -185,9 +185,9 @@ class CNNCreator_LeNet:
# == Testing model. ==
test_model= model_helper.ModelHelper(name="test_net", arg_scope=arg_scope, init_params=False)
data, label, test_dataset_size = self.add_input(test_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'test_lmdb'), db_type='lmdb', device_opts=device_opts)
predictions = self.create_model(test_model, data, device_opts=device_opts, is_test=True)
predictions_ = self.create_model(test_model, data, device_opts=device_opts, is_test=True)
if not loss == 'euclidean':
self.add_accuracy(test_model, predictions, label, device_opts, eval_metric)
self.add_accuracy(test_model, predictions_, label, device_opts, eval_metric)
workspace.RunNetOnce(test_model.param_init_net)
workspace.CreateNet(test_model.net, overwrite=True)
......
......@@ -72,9 +72,9 @@ class CNNCreator_VGG16:
def create_model(self, model, data, device_opts, is_test):
with core.DeviceScope(device_opts):
data = data
# data, output shape: {[3,224,224]}
conv1_ = brew.conv(model, data, 'conv1_', dim_in=3, dim_out=64, kernel=3, stride=1, pad=1)
data_ = data
# data_, output shape: {[3,224,224]}
conv1_ = brew.conv(model, data_, 'conv1_', dim_in=3, dim_out=64, kernel=3, stride=1, pad=1)
# conv1_, output shape: {[64,224,224]}
relu1_ = brew.relu(model, conv1_, conv1_)
conv2_ = brew.conv(model, relu1_, 'conv2_', dim_in=64, dim_out=64, kernel=3, stride=1, pad=1)
......@@ -133,9 +133,9 @@ class CNNCreator_VGG16:
dropout15_ = brew.dropout(model, relu15_, 'dropout15_', ratio=0.5, is_test=False)
fc15_ = brew.fc(model, dropout15_, 'fc15_', dim_in=4096, dim_out=1000)
# fc15_, output shape: {[1000,1,1]}
predictions = brew.softmax(model, fc15_, 'predictions')
predictions_ = brew.softmax(model, fc15_, 'predictions_')
return predictions
return predictions_
# this adds the loss and optimizer
def add_training_operators(self, model, output, label, device_opts, loss, opt_type, base_learning_rate, policy, stepsize, epsilon, beta1, beta2, gamma, momentum) :
......@@ -196,10 +196,10 @@ class CNNCreator_VGG16:
# == Training model ==
train_model= model_helper.ModelHelper(name="train_net", arg_scope=arg_scope)
data, label, train_dataset_size = self.add_input(train_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'train_lmdb'), db_type='lmdb', device_opts=device_opts)
predictions = self.create_model(train_model, data, device_opts=device_opts, is_test=False)
self.add_training_operators(train_model, predictions, label, device_opts, loss, opt_type, base_learning_rate, policy, stepsize, epsilon, beta1, beta2, gamma, momentum)
predictions_ = self.create_model(train_model, data, device_opts=device_opts, is_test=False)
self.add_training_operators(train_model, predictions_, label, device_opts, loss, opt_type, base_learning_rate, policy, stepsize, epsilon, beta1, beta2, gamma, momentum)
if not loss == 'euclidean':
self.add_accuracy(train_model, predictions, label, device_opts, eval_metric)
self.add_accuracy(train_model, predictions_, label, device_opts, eval_metric)
with core.DeviceScope(device_opts):
brew.add_weight_decay(train_model, weight_decay)
......@@ -231,9 +231,9 @@ class CNNCreator_VGG16:
# == Testing model. ==
test_model= model_helper.ModelHelper(name="test_net", arg_scope=arg_scope, init_params=False)
data, label, test_dataset_size = self.add_input(test_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'test_lmdb'), db_type='lmdb', device_opts=device_opts)
predictions = self.create_model(test_model, data, device_opts=device_opts, is_test=True)
predictions_ = self.create_model(test_model, data, device_opts=device_opts, is_test=True)
if not loss == 'euclidean':
self.add_accuracy(test_model, predictions, label, device_opts, eval_metric)
self.add_accuracy(test_model, predictions_, label, device_opts, eval_metric)
workspace.RunNetOnce(test_model.param_init_net)
workspace.CreateNet(test_model.net, overwrite=True)
......@@ -314,4 +314,4 @@ class CNNCreator_VGG16:
net_def.ParseFromString(f.read())
net_def.device_option.CopyFrom(device_opts)
workspace.CreateNet(net_def.SerializeToString(), overwrite=True)
print("*** Loaded init_net and predict_net ***")
\ No newline at end of file
print("*** Loaded init_net and predict_net ***")
......@@ -85,9 +85,9 @@ class CNNPredictor_LeNet_0{
input.Resize(input_shapes);
}
void predict(const std::vector<float> &image, std::vector<float> &predictions){
void predict(const std::vector<float> &image_, std::vector<float> &predictions_){
//Note: ShareExternalPointer requires a float pointer.
input.ShareExternalPointer((float *) image.data());
input.ShareExternalPointer((float *) image_.data());
// Get input blob
#ifdef USE_GPU
......@@ -104,11 +104,11 @@ class CNNPredictor_LeNet_0{
// Get output blob
#ifdef USE_GPU
auto predictionsBlob = TensorCPU(workSpace.GetBlob("predictions")->Get<TensorCUDA>());
auto predictions_Blob = TensorCPU(workSpace.GetBlob("predictions")->Get<TensorCUDA>());
#else
auto predictionsBlob = workSpace.GetBlob("predictions")->Get<TensorCPU>();
auto predictions_Blob = workSpace.GetBlob("predictions")->Get<TensorCPU>();
#endif
predictions.assign(predictionsBlob.data<float>(),predictionsBlob.data<float>() + predictionsBlob.size());
predictions_.assign(predictions_Blob.data<float>(),predictions_Blob.data<float>() + predictions_Blob.size());
google::protobuf::ShutdownProtobufLibrary();
}
......
......@@ -85,9 +85,9 @@ class CNNPredictor_VGG16_0{
input.Resize(input_shapes);
}
void predict(const std::vector<float> &data, std::vector<float> &predictions){
void predict(const std::vector<float> &data_, std::vector<float> &predictions_){
//Note: ShareExternalPointer requires a float pointer.
input.ShareExternalPointer((float *) data.data());
input.ShareExternalPointer((float *) data_.data());
// Get input blob
#ifdef USE_GPU
......@@ -104,11 +104,11 @@ class CNNPredictor_VGG16_0{
// Get output blob
#ifdef USE_GPU
auto predictionsBlob = TensorCPU(workSpace.GetBlob("predictions")->Get<TensorCUDA>());
auto predictions_Blob = TensorCPU(workSpace.GetBlob("predictions")->Get<TensorCUDA>());
#else
auto predictionsBlob = workSpace.GetBlob("predictions")->Get<TensorCPU>();
auto predictions_Blob = workSpace.GetBlob("predictions")->Get<TensorCPU>();
#endif
predictions.assign(predictionsBlob.data<float>(),predictionsBlob.data<float>() + predictionsBlob.size());
predictions_.assign(predictions_Blob.data<float>(),predictions_Blob.data<float>() + predictions_Blob.size());
google::protobuf::ShutdownProtobufLibrary();
}
......
......@@ -18,7 +18,6 @@ if __name__ == "__main__":
batch_size=100,
context='gpu',
eval_metric='mse',
loss='cross_entropy',
opt_type='rmsprop',
epsilon=1.0E-6,
weight_decay=0.01,
......
vector<float> CNN_predictions(10);
vector<float> CNN_predictions_(10);
_predictor_0_.predict(CNNTranslator::translate(image),
CNN_predictions);
CNN_predictions_);
predictions = CNNTranslator::translateToCol(CNN_predictions, std::vector<size_t> {10});
predictions = CNNTranslator::translateToCol(CNN_predictions_, std::vector<size_t> {10});
vector<float> CNN_predictions(1000);
vector<float> CNN_predictions_(1000);
_predictor_0_.predict(CNNTranslator::translate(data),
CNN_predictions);
CNN_predictions_);
predictions = CNNTranslator::translateToCol(CNN_predictions, std::vector<size_t> {1000});
\ No newline at end of file
predictions = CNNTranslator::translateToCol(CNN_predictions_, std::vector<size_t> {1000});
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment