Commit e69f6d76 authored by Bichen Li's avatar Bichen Li Committed by Bichen Li

-Correct the file name, use comparison as function

parent 00cc7059

Too many changes to show.

To preserve performance only 289 of 289+ files are displayed.

# ignore results
*.csv
*.mat
# ignore symbolic links
*.egg-info
*.eggs
# ignore compiled python files
*.pyc
# ignore logging files
*.log
# ignore generated dymola files
buildlog.txt
dsfinal.txt
dsin.txt
dslog.txt
dsmodel*
dymosim*
# ignore matlab dumping file
*.mdmp
# ignore spyder project
.spyderproject
.spyproject
# ignore pycharm files
.idea
__pycache__
# ignore jupyter notebook files
# ignore results
*.csv
*.mat
# ignore symbolic links
*.egg-info
*.eggs
# ignore compiled python files
*.pyc
# ignore logging files
*.log
# ignore generated dymola files
buildlog.txt
dsfinal.txt
dsin.txt
dslog.txt
dsmodel*
dymosim*
# ignore matlab dumping file
*.mdmp
# ignore spyder project
.spyderproject
.spyproject
# ignore pycharm files
.idea
__pycache__
# ignore jupyter notebook files
.ipynb_checkpoints
\ No newline at end of file
Test:
script: Try.sh
# Dataprocessing toolkit for RWTH ACS simulators
## Copyright
2017, Institute for Automation of Complex Power Systems, EONERC, RWTH Aachen University
## License
This project is released under the terms of the [GPL version 3](COPYING.md).
```
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
```
For other licensing options please consult [Prof. Antonello Monti](mailto:amonti@eonerc.rwth-aachen.de).
## Contact
[![EONERC ACS Logo](doc/eonerc_logo.png)](http://www.acs.eonerc.rwth-aachen.de)
- Markus Mirz <mmirz@eonerc.rwth-aachen.de>
- Jan Dinkelbach <JDinkelbach@eonerc.rwth-aachen.de>
- Steffen Vogel <stvogel@eonerc.rwth-aachen.de>
[Institute for Automation of Complex Power Systems (ACS)](http://www.acs.eonerc.rwth-aachen.de)
[EON Energy Research Center (EONERC)](http://www.eonerc.rwth-aachen.de)
[RWTH University Aachen, Germany](http://www.rwth-aachen.de)
# Dataprocessing toolkit for RWTH ACS simulators
## Copyright
2017, Institute for Automation of Complex Power Systems, EONERC, RWTH Aachen University
## License
This project is released under the terms of the [GPL version 3](COPYING.md).
```
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
```
For other licensing options please consult [Prof. Antonello Monti](mailto:amonti@eonerc.rwth-aachen.de).
## Contact
[![EONERC ACS Logo](doc/eonerc_logo.png)](http://www.acs.eonerc.rwth-aachen.de)
- Markus Mirz <mmirz@eonerc.rwth-aachen.de>
- Jan Dinkelbach <JDinkelbach@eonerc.rwth-aachen.de>
- Steffen Vogel <stvogel@eonerc.rwth-aachen.de>
[Institute for Automation of Complex Power Systems (ACS)](http://www.acs.eonerc.rwth-aachen.de)
[EON Energy Research Center (EONERC)](http://www.eonerc.rwth-aachen.de)
[RWTH University Aachen, Germany](http://www.rwth-aachen.de)
#!/bin/bash
net_name="Slack_ZLoad"
python /home/cafi/Desktop/data-processing/examples/Assert_Results/Assert_Results.py $net_name
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import re
import os
import sys
sys.path.append(r'/home/cafi/Desktop/data-processing/dataprocessing')
from readtools import *
def compare_modelica_neplan(Net_Name): # compare the result file from NEPLAN and Modelica
# Read in original nepaln result file
file_Neplan = os.path.abspath("/home/cafi/Desktop/" + Net_Name + "/" + Net_Name + ".rlf")
# Read in original Modelica result file
file_Modelica = os.path.abspath("/home/cafi/Desktop/" + Net_Name + "/" + Net_Name + ".mat")
result_neplan = read_timeseries_NEPLAN_loadflow(file_Neplan)
result_modelica = read_timeseries_Modelica(file_Modelica)
list_del = []
for i in range(len(result_neplan)):
result_neplan[i].name = result_neplan[i].name.replace(' ', '')
result_neplan[i].name = result_neplan[i].name.upper()
if 'ANGLE' in result_neplan[i].name:
pass
else:
result_neplan[i].values = result_neplan[i].values * 1000 # unification of the unit,which is kV/kA in neplan
for i in range(len(result_modelica)):
result_modelica[i].name = result_modelica[i].name.upper()
if 'ANGLE' in result_modelica[i].name:
result_modelica[i].values = result_modelica[i].values / cmath.pi * 180 # unification of the unit
#f_modelica.write('%s is %s \n' % (result_modelica[i].name, result_modelica[i].values[1]))
timeseries_names = [] # list for names
timeseries_error = [] # list for error
len_limit = len(result_modelica)
for i in range(len(result_neplan)):
flag_NOT_found = False
for j in range(len_limit):
if result_neplan[i].name == result_modelica[j].name: # Find the same variable
timeseries_names.append(result_neplan[i].name)
timeseries_error.append(TimeSeries.rmse(result_modelica[j], result_neplan[i]))
flag_NOT_found = True
if not flag_NOT_found:
timeseries_error.append(TimeSeries(result_neplan[i].name, 0, -1))
# No such variable in Modelica model, set the error to -1
return dict(zip(timeseries_names, timeseries_error))
def assert_modelia_neplan_results(net_name): # Assert the model using the function above
fail_list = []
error = compare_modelica_neplan(net_name)
for name in error.keys():
if abs(error[name]) > 0.5:
fail_list.append(name)
else:
print("Test on %s Passed" % name)
if len(fail_list) is 0:
print("\033[1;36;40mModel Passed\033[0m")
else:
for name in fail_list:
print("\033[1;31;40mTest on %s Failed\033[0m" % name)
from dataprocessing.readtools import *
from dataprocessing.timeseries import *
def get_node_voltage_phasors(dpsim_timeseries_list):
"""Calculate voltage phasors of all nodes
:param dpsim_timeseries_list: timeseries list retrieved from dpsim results
:return:
"""
voltage_phasor_list = {}
for ts in dpsim_timeseries_list:
ts_abs = ts.abs(ts.name + '_abs')
ts_phase = ts.phase(ts.name + '_phase')
ts_phasor = {}
ts_phasor['abs'] = ts_abs
ts_phasor['phase'] = ts_phase
voltage_phasor_list[ts.name] = ts_phasor
return voltage_phasor_list
def get_node_emt_voltages(timeseries_list, freq):
"""Calculate voltage phasors of all nodes
:param timeseries_list: timeseries list retrieved from dpsim results
:return:
"""
voltages_list = {}
for ts in timeseries_list:
ts_emt = ts.dynphasor_shift_to_emt(ts.name, freq)
voltages_list[ts.name] = ts_emt
return voltages_list
from dataprocessing.readtools import *
from dataprocessing.timeseries import *
def get_node_voltage_phasors(dpsim_timeseries_list):
"""Calculate voltage phasors of all nodes
:param dpsim_timeseries_list: timeseries list retrieved from dpsim results
:return:
"""
voltage_phasor_list = {}
for ts in dpsim_timeseries_list:
ts_abs = ts.abs(ts.name + '_abs')
ts_phase = ts.phase(ts.name + '_phase')
ts_phasor = {}
ts_phasor['abs'] = ts_abs
ts_phasor['phase'] = ts_phase
voltage_phasor_list[ts.name] = ts_phasor
return voltage_phasor_list
def get_node_emt_voltages(timeseries_list, freq):
"""Calculate voltage phasors of all nodes
:param timeseries_list: timeseries list retrieved from dpsim results
:return:
"""
voltages_list = {}
for ts in timeseries_list:
ts_emt = ts.dynphasor_shift_to_emt(ts.name, freq)
voltages_list[ts.name] = ts_emt
return voltages_list
import matplotlib.pyplot as plt
import numpy as np
from .timeseries import *
def plot_timeseries(figure_id, timeseries, plt_linestyle='-', plt_linewidth=2, plt_color=None, plt_legend_loc='lower right'):
"""
This function plots either a single timeseries or several timeseries in the figure defined by figure_id.
Several timeseries (handed over in a list) are plotted in several subplots.
In order to plot several timeseries in one plot, the function is to be called several times (hold is activated).
"""
plt.figure(figure_id)
if not isinstance(timeseries, list):
if plt_color:
plt.plot(timeseries.time, timeseries.values, linestyle=plt_linestyle, label=timeseries.label, linewidth=plt_linewidth, color=plt_color)
else:
plt.plot(timeseries.time, timeseries.values, linestyle=plt_linestyle, label=timeseries.label, linewidth=plt_linewidth)
plt.gca().autoscale(axis='x', tight=True)
plt.legend(loc=plt_legend_loc)
else:
for ts in timeseries:
plt.subplot(len(timeseries), 1, timeseries.index(ts) + 1)
if plt_color:
plt.plot(ts.time, ts.values, linestyle=plt_linestyle, label=ts.label, linewidth=plt_linewidth, color=plt_color)
else:
plt.plot(ts.time, ts.values, linestyle=plt_linestyle, label=ts.label, linewidth=plt_linewidth)
plt.gca().autoscale(axis='x', tight=True)
plt.legend()
def set_timeseries_labels(timeseries, timeseries_labels):
"""
Sets label attribute of timeseries, later used in plotting functions.
Suitable for single timeseries as well as for several timeseries (handed over in a list).
"""
if not isinstance(timeseries, list):
timeseries.label = timeseries_labels
else:
for ts in timeseries:
ts.label = timeseries_labels[timeseries.index(ts)]
import matplotlib.pyplot as plt
import numpy as np
from .timeseries import *
def plot_timeseries(figure_id, timeseries, plt_linestyle='-', plt_linewidth=2, plt_color=None, plt_legend_loc='lower right'):
"""
This function plots either a single timeseries or several timeseries in the figure defined by figure_id.
Several timeseries (handed over in a list) are plotted in several subplots.
In order to plot several timeseries in one plot, the function is to be called several times (hold is activated).
"""
plt.figure(figure_id)
if not isinstance(timeseries, list):
if plt_color:
plt.plot(timeseries.time, timeseries.values, linestyle=plt_linestyle, label=timeseries.label, linewidth=plt_linewidth, color=plt_color)
else:
plt.plot(timeseries.time, timeseries.values, linestyle=plt_linestyle, label=timeseries.label, linewidth=plt_linewidth)
plt.gca().autoscale(axis='x', tight=True)
plt.legend(loc=plt_legend_loc)
else:
for ts in timeseries:
plt.subplot(len(timeseries), 1, timeseries.index(ts) + 1)
if plt_color:
plt.plot(ts.time, ts.values, linestyle=plt_linestyle, label=ts.label, linewidth=plt_linewidth, color=plt_color)
else:
plt.plot(ts.time, ts.values, linestyle=plt_linestyle, label=ts.label, linewidth=plt_linewidth)
plt.gca().autoscale(axis='x', tight=True)
plt.legend()
def set_timeseries_labels(timeseries, timeseries_labels):
"""
Sets label attribute of timeseries, later used in plotting functions.
Suitable for single timeseries as well as for several timeseries (handed over in a list).
"""
if not isinstance(timeseries, list):
timeseries.label = timeseries_labels
else:
for ts in timeseries:
ts.label = timeseries_labels[timeseries.index(ts)]
import numpy as np
import pandas as pd
from .timeseries import *
import re
import cmath
def read_timeseries_Modelica(filename, timeseries_names=None, is_regex=False):
from modelicares import SimRes
sim = SimRes(filename)
if timeseries_names is None and is_regex is False:
# No trajectory names or regex specified, thus read in all
timeseries = []
for name in sim.names():
timeseries.append(TimeSeries(name, sim(name).times(), sim(name).values()))
timeseries_names = sim.names()
elif is_regex is True:
# Read in variables which match with regex
timeseries = []
p = re.compile(timeseries_names)
timeseries_names = [name for name in sim.names() if p.search(name)]
timeseries_names.sort()
for name in timeseries_names:
timeseries.append(TimeSeries(name, sim(name).times(), sim(name).values()))
else:
# Read in specified time series
if not isinstance(timeseries_names, list):
timeseries = TimeSeries(timeseries_names, sim(timeseries_names).times(), sim(timeseries_names).values())
else:
timeseries = []
for name in timeseries_names:
timeseries.append(TimeSeries(name, sim(name).times(), sim(name).values()))
print('Modelica results column names: ' + str(timeseries_names))
print('Modelica results number: ' + str(len(timeseries_names)))
return timeseries
def read_timeseries_PLECS(filename, timeseries_names=None):
pd_df = pd.read_csv(filename)
timeseries_list = []
if timeseries_names is None:
# No trajectory names specified, thus read in all
timeseries_names = list(pd_df.columns.values)
timeseries_names.remove('Time')
for name in timeseries_names:
timeseries_list.append(TimeSeries(name, pd_df['Time'].values, pd_df[name].values))
else:
# Read in specified time series
for name in timeseries_names:
timeseries_list.append(TimeSeries(name, pd_df['Time'].values, pd_df[name].values))
print('PLECS results column names: ' + str(timeseries_names))
print('PLECS results number: ' + str(len(timeseries_list)))
return timeseries_list
def read_timeseries_dpsim_real(filename, timeseries_names=None):
"""Reads real time series data from DPsim log file which may have a header.
Timeseries names are assigned according to the header names if available.
:param filename: name of the csv file that has the data
:param timeseries_names: column names which should be read
:return: list of Timeseries objects
"""
timeseries_list = []
pd_df = pd.read_csv(filename)
if timeseries_names is None:
# No column names specified, thus read in all and strip spaces
pd_df.rename(columns=lambda x: x.strip(), inplace=True)
column_names = list(pd_df.columns.values)
# Remove timestamps column name and store separately
column_names.remove('time')
timestamps = pd_df.iloc[:,0]
for name in column_names:
timeseries_list.append(TimeSeries(name, timestamps, pd_df[name].values))
else:
# Read in specified time series
print('no column names specified yet')
print('DPsim results column names: ' + str(column_names))
print('DPsim results number: ' + str(len(timeseries_list)))
return timeseries_list
def read_timeseries_dpsim_cmpl(filename, timeseries_names=None):
"""Reads complex time series data from DPsim log file. Real and
imaginary part are stored in one complex variable.
:param filename: name of the csv file that has the data
:param timeseries_names: column name which should be read
:return: list of Timeseries objects
"""
pd_df = pd.read_csv(filename)
timeseries_list = []
if timeseries_names is None:
# No column names specified, thus read in all and strip off spaces
pd_df.rename(columns=lambda x: x.strip(), inplace=True)
column_names = list(pd_df.columns.values)
# Remove timestamps column name and store separately
column_names.remove('time')
timestamps = pd_df.iloc[:,0]
# Calculate number of network nodes since array is [real, imag]
node_number = int(len(column_names) / 2)
node_index = 1
for column in column_names:
if node_index <= node_number:
ts_name = 'n'+ str(node_index)
timeseries_list.append(
TimeSeries(ts_name, timestamps, np.vectorize(complex)(pd_df.iloc[:,node_index],pd_df.iloc[:,node_index + node_number])))
else:
break
node_index = node_index + 1
else:
# Read in specified time series
print('cannot read specified columns yet')
print('DPsim results column names: ' + str(column_names))
print('DPsim results number: ' + str(len(timeseries_list)))
return timeseries_list
def read_timeseries_dpsim_cmpl_separate(filename, timeseries_names=None):
"""Deprecated - Reads complex time series data from DPsim log file. Real and
imaginary part are stored separately.
:param filename: name of the csv file that has the data
:param timeseries_names: column name which should be read
:return: list of Timeseries objects
"""
pd_df = pd.read_csv(filename, header=None)
timeseries_list = []
if timeseries_names is None:
# No trajectory names specified, thus read in all
column_names = list(pd_df.columns.values)
# Remove timestamps column name and store separately
column_names.remove(0)
timestamps = pd_df.iloc[:, 0]
# Calculate number of network nodes since array is [real, imag]
node_number = int(len(column_names) / 2)
node_index = 1
for column in column_names:
if node_index <= node_number:
node_name = 'node '+ str(node_index) +' Re'
timeseries_list.append(TimeSeries(node_name, timestamps, pd_df.iloc[:,column]))
else:
node_name = 'node '+ str(node_index - node_number) +' Im'
timeseries_list.append(TimeSeries(node_name, timestamps, pd_df.iloc[:,column]))
node_index = node_index + 1
else:
# Read in specified time series
print('no column names specified yet')
print('DPsim results file length:')
print(len(timeseries_list))
for result in timeseries_list:
print(result.name)
return timeseries_list
def read_timeseries_NEPLAN_loadflow(file_name, timeseries_names=None, is_regex=False):
"""
Read in NEPLAN loadflow result from result file, the result is in angle notation, amplitude and angle are stored
separately
:param file_name: name of the mat file for the loadflow result from neplan
:param timeseries_names: column name to be read
:param is_regex: flag for using regular expression
:return: list of Timeseries objects
"""
str_tmp = open(file_name, "r") # Read in files
low = 0 # flag for the start of a new data in str_cmp
high = 0 # flag for the end of this new data in str_cmp
flag = True # To judge if this the first line of the file, which will be the names for the data type
# Read in data from result file of neplan
seq = [] # list for data type names
value = [] # list for data
i = 0
namelist = ['Vpp', 'Vangle', 'I', 'Iangle']
timeseries = []
line_del = [] # a list for the value to be deleted
isfloat = re.compile(r'^[-+]?[0-9]+\.[0-9]+$') # regular expression to find float values
for line in str_tmp.readlines():
line = line.replace(",", ".")
high -= high
low -= low
del value[:]
for letter in line:
if letter == " " or letter == "\n": # different data( separated by ' ') or end(/n)
if low is not high: # if low is equal to high, no data read in
if flag: # first line of the file, list for data-type name
seq.append(line[low:high])
else: # not first line of the file,list for data
if isfloat.match(line[low:high]):
value.append(float(line[low:high]))
else:
value.append(line[low:high])
else: # no data for this datatype
value.append(r'#') # No value, set as #
low = high + 1 # refresh low flag
high += 1