readtools.py 14.2 KB
Newer Older
1 2 3
#!/usr/bin/python
# -*- coding: UTF-8 -*-

4 5
import numpy as np
import pandas as pd
6
import re
7
from .timeseries import *
8

9

10
def read_timeseries_Modelica(filename, timeseries_names=None, is_regex=False):
11
    from modelicares import SimRes
12
    sim = SimRes(filename)
13 14 15 16 17
    if timeseries_names is None and is_regex is False:
        # No trajectory names or regex specified, thus read in all
        timeseries = []
        for name in sim.names():
            timeseries.append(TimeSeries(name, sim(name).times(), sim(name).values()))
Jan Dinkelbach's avatar
Jan Dinkelbach committed
18
        timeseries_names = sim.names()
19 20 21 22 23 24 25 26
    elif is_regex is True:
        # Read in variables which match with regex
        timeseries = []
        p = re.compile(timeseries_names)
        timeseries_names = [name for name in sim.names() if p.search(name)]
        timeseries_names.sort()
        for name in timeseries_names:
            timeseries.append(TimeSeries(name, sim(name).times(), sim(name).values()))
27 28
    else:
        # Read in specified time series
29 30 31
        if not isinstance(timeseries_names, list):
            timeseries = TimeSeries(timeseries_names, sim(timeseries_names).times(), sim(timeseries_names).values())
        else:
Markus Mirz's avatar
Markus Mirz committed
32
            timeseries = []
33 34
            for name in timeseries_names:
                timeseries.append(TimeSeries(name, sim(name).times(), sim(name).values()))
35 36

    print('Modelica results column names: ' + str(timeseries_names))
37
    print('Modelica results number: ' + str(len(timeseries_names)))
38

39
    return timeseries
40

41
def read_timeseries_csv(filename, timeseries_names=None, print_status=True):
42 43 44 45 46 47 48
    """Reads complex time series data from DPsim log file. Real and
    imaginary part are stored in one complex variable.
    :param filename: name of the csv file that has the data
    :param timeseries_names: column name which should be read
    :return: list of Timeseries objects
    """
    pd_df = pd.read_csv(filename)
Markus Mirz's avatar
Markus Mirz committed
49
    timeseries_list = {}
50 51 52 53 54 55 56 57 58 59 60 61 62
    cmpl_result_columns = []
    real_result_columns = []

    if timeseries_names is None:
        # No column names specified, thus read in all and strip off spaces
        pd_df.rename(columns=lambda x: x.strip(), inplace=True)
        column_names = list(pd_df.columns.values)

        # Remove timestamps column name and store separately
        column_names.remove('time')
        timestamps = pd_df.iloc[:, 0]

        # Find real and complex variable names
Markus Mirz's avatar
Markus Mirz committed
63
        suffixes = [ ('_re', '_im'), ('.re', '.im'), ('.real', '.imag') ]
64
        for column in column_names:
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
            is_complex = False
            for suffix in suffixes:
                real_suffix = suffix[0]
                imag_suffix = suffix[1]

                if column.endswith(imag_suffix):
                    is_complex = True
                    break # Ignore imag columns

                if column.endswith(real_suffix):
                    is_complex = True
                    column_base = column.replace(real_suffix, '')

                    if column_base + imag_suffix not in column_names:
                        continue

                    cmpl_result_columns.append(column_base)
                    timeseries_list[column_base] = TimeSeries(column_base, timestamps,
                        np.vectorize(complex)(
                            pd_df[column_base + real_suffix],
                            pd_df[column_base + imag_suffix]
                        )
                    )
                    break

            if is_complex:
                continue

            real_result_columns.append(column)
Markus Mirz's avatar
Markus Mirz committed
94
            timeseries_list[column] = TimeSeries(column, timestamps, pd_df[column])
95

96 97 98 99
    else:
        # Read in specified time series
        print('cannot read specified columns yet')

Jan Dinkelbach's avatar
Jan Dinkelbach committed
100
    if print_status :
101 102 103
        print('column number: ' + str(len(timeseries_list)))
        print('results length: ' + str(len(timestamps)))
        print('real column names: ' + str(real_result_columns))
Steffen Vogel's avatar
Steffen Vogel committed
104
        print('complex column names: ' + str(cmpl_result_columns))
105 106

    return timeseries_list
Jan Dinkelbach's avatar
Jan Dinkelbach committed
107

108 109 110 111 112 113
def read_timeseries_dpsim(filename, timeseries_names=None, print_status=True):
    return read_timeseries_csv(filename, timeseries_names, print_status)

def read_timeseries_simulink(filename, timeseries_names=None, print_status=True):
    return read_timeseries_csv(filename, timeseries_names, print_status)

Jan Dinkelbach's avatar
Jan Dinkelbach committed
114 115
def read_dpsim_log(log_path):
    log_file = open(log_path, "r")
Steffen Vogel's avatar
Steffen Vogel committed
116
    log_lines = [line.rstrip() for line in log_file]
Jan Dinkelbach's avatar
Jan Dinkelbach committed
117 118 119 120 121 122
    log_file.close()

    # Sectionize
    log_sections = {'init':[], 'none':[], 'sysmat_stamp':[], 'sysmat_final':[], 'sourcevec_stamp':[], 'sourcevec_final':[], 'ludecomp':[]}
    section = 'init'
    for line_pos in range(len(log_lines)):
123
        if re.search('\[D\] Stamping .+ into system matrix:', log_lines[line_pos]):
Jan Dinkelbach's avatar
Jan Dinkelbach committed
124
            section = 'sysmat_stamp'
125
        elif re.search('[I] System matrix:', log_lines[line_pos]):
Jan Dinkelbach's avatar
Jan Dinkelbach committed
126
            section = 'sysmat_final'
127
        elif re.search('\[D\] Stamping .+ into source vector:', log_lines[line_pos]):
Jan Dinkelbach's avatar
Jan Dinkelbach committed
128
            section = 'sourcevec_stamp'
129
        elif re.search('\[I\] Right side vector:', log_lines[line_pos]):
Jan Dinkelbach's avatar
Jan Dinkelbach committed
130
            section = 'sourcevec_final'
131
        elif re.search('\[I\] LU decomposition:', log_lines[line_pos]):
Jan Dinkelbach's avatar
Jan Dinkelbach committed
132
            section = 'ludecomp'
133
        elif re.search('\[I\] Number of network simulation nodes:', log_lines[line_pos]):
Jan Dinkelbach's avatar
Jan Dinkelbach committed
134
            section = 'none'
135
        elif re.search('\[I\] Added .+ to simulation.', log_lines[line_pos]):
Jan Dinkelbach's avatar
Jan Dinkelbach committed
136
            section = 'none'
137 138 139
        elif re.search('\[I\] Initial switch status:', log_lines[line_pos]):
            section = 'none'
        elif re.search('\[(.*?)\]', log_lines[line_pos]):
Jan Dinkelbach's avatar
Jan Dinkelbach committed
140 141 142
            section = 'none'
        log_sections[section].append(line_pos)

143
    return log_lines, log_sections
Jan Dinkelbach's avatar
Jan Dinkelbach committed
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163

def read_timeseries_PLECS(filename, timeseries_names=None):
    pd_df = pd.read_csv(filename)
    timeseries_list = []
    if timeseries_names is None:
        # No trajectory names specified, thus read in all
        timeseries_names = list(pd_df.columns.values)
        timeseries_names.remove('Time')
        for name in timeseries_names:
            timeseries_list.append(TimeSeries(name, pd_df['Time'].values, pd_df[name].values))
    else:
        # Read in specified time series
        for name in timeseries_names:
            timeseries_list.append(TimeSeries(name, pd_df['Time'].values, pd_df[name].values))

    print('PLECS results column names: ' + str(timeseries_names))
    print('PLECS results number: ' + str(len(timeseries_list)))

    return timeseries_list

164
def read_timeseries_NEPLAN_loadflow(filename, timeseries_names=None, is_regex=False):
Jan Dinkelbach's avatar
Jan Dinkelbach committed
165 166 167 168 169 170
    """
    Read in NEPLAN loadflow result from result file, the result is in angle notation, amplitude and angle are stored
    separately
    To keep consistent with the names of voltage in most cases, the name of voltage variables are changed into '.V*'
    instead of '.U*' as in the result file

171
    :param filename: name of the mat file for the loadflow result from neplan
Jan Dinkelbach's avatar
Jan Dinkelbach committed
172 173 174 175
    :param timeseries_names: column name to be read
    :param is_regex: flag for using regular expression
    :return: list of Timeseries objects
    """
176
    str_tmp = open(filename, "r")  # Read in files
Jan Dinkelbach's avatar
Jan Dinkelbach committed
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
    low = 0  # flag for the start of a new data in str_cmp
    high = 0  # flag for the end of this new data in str_cmp
    flag = True  # To judge if this is the first line of the file, which will be the names for the data type

    # Read in data from result file of neplan
    seq = []  # list for data type names
    value = []  # list for data


    namelist = ['U', 'ANGLEU', 'P', 'Q', 'I', 'ANGLEI']  # Suffix of the data name
    timeseries = []
    line_del = []  # a list for the value to be deleted
    isfloat = re.compile(r'^[-+]?[0-9]+\.[0-9]+$')  # regular expression to find float values

    # Transfer ',' in the floats in result file to '.'
    for line in str_tmp.readlines():  # Check the data to find out floats with ','
        line = line.replace(",", ".")
        high -= high
        low -= low
        del value[:]
        # read in different data and start processing
        for letter in line:
            if letter == "	" or letter == "\n":  # different data(separated by '	') or end(/n)
                if low is not high:  # if low is equal to high, no data read in
                    if flag:  # first line of the file, list for data-type name
                        seq.append(line[low:high])
                    else:  # not first line of the file,list for data
                        if isfloat.match(line[low:high]):
                            value.append(float(line[low:high]))
                        else:
                            value.append(line[low:high])
                else:  # no data for this datatype
                    value.append(r'#')  # No value, set as #
                low = high + 1  # refresh low flag
            high += 1

        """
214 215 216
        A typical line current in neplan has two parts from both end, but we doesn't have to calculate them
        with the assumption that the topology of the gird should be correct with which we can validate the
        current by comparing the voltage of the nodes connected to the ends of the line
Jan Dinkelbach's avatar
Jan Dinkelbach committed
217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
        """
        if flag is not True:  # flag is true when it's the first line
            if value[3] is not '#':
                for m in range(6):
                    timeseries.append(TimeSeries(value[3] + '.' + namelist[m],
                                                 np.array([0., 1.]), np.array([value[m + 6], value[m + 6]])))
            else:
                for m in range(2):
                    timeseries.append(TimeSeries(value[1] + '.' + namelist[m],
                                                 np.array([0., 1.]), np.array([value[m + 6], value[m + 6]])))
        flag = False
    str_tmp.close()

    # Read in variables which match with regex
    if is_regex is True:
        p = re.compile(timeseries_names)
        length = len(timeseries)
        for rule_check in range(length):
            if p.search(timeseries[rule_check].name):
                pass
            else:
                line_del.append(rule_check)

    # Read in specified time series
    elif timeseries_names is not None:
        length = len(timeseries)
        for rule_check in range(length):
            if timeseries_names == timeseries[rule_check].name:
                pass
            else:
                line_del.append(rule_check)

    # delete those values that are not needed.
    line_del = set(line_del)
    line_del = sorted(line_del)
    for num_to_del in range(len(line_del)):
        del timeseries[line_del[len(line_del) - num_to_del - 1]]

    return timeseries


258
def read_timeseries_simulink_loadflow(filename, timeseries_names=None, is_regex=False):
Jan Dinkelbach's avatar
Jan Dinkelbach committed
259 260 261 262 263 264 265
    """
    Read in simulink load-flow result from result file(.rep), the result is in angle notation, amplitude and angle are stored
    separately.
    A suffix is used to tag different data for a component:
        .Arms/.IDegree for current/current angle,
        .Vrms/.VDegree for voltage/voltage angle.

266
    :param filename:path of the .rep file for the loadflow result from simulink
Jan Dinkelbach's avatar
Jan Dinkelbach committed
267 268 269 270
    :param timeseries_names: specific values to be read
    :param is_regex: flag for using regular expression
    :return: list of Timeseries objects
    """
271
    str_tmp = open(filename, 'r', encoding='latin-1')  # Read in files, using latin-1 to decode /xb0
Jan Dinkelbach's avatar
Jan Dinkelbach committed
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321

    # Read in data from result file of neplan
    name = []  # list for data type names
    value = []  # list for data
    timeseries = []
    line_del = []  # a list for the value to be deleted

    for line in str_tmp.readlines():
        line = line.replace("°", "")
        del value[:]
        del name[:]
        # read in different data and start processing
        if len(line) > 37:
            if line[31:35] == '--->':
                if line[13:17] == 'Arms':
                    name = [line[37:len(line)].rstrip() + '.Arms', line[37:len(line)].rstrip() + '.IDegree']
                elif line[13:17] == 'Vrms':
                    name = [line[37:len(line)].rstrip() + '.Vrms', line[37:len(line)].rstrip() + '.VDegree']
                value = [float(line[0:13]), float(line[18:31])]
                timeseries.append(TimeSeries(name[0],
                                             np.array([0., 1.]), np.array([value[0], value[0]])))
                timeseries.append(TimeSeries(name[1],
                                             np.array([0., 1.]), np.array([value[1], value[1]])))

    # Read in variables which match with regex
    if is_regex is True:
        p = re.compile(timeseries_names)
        length = len(timeseries)
        for rule_check in range(length):
            if p.search(timeseries[rule_check].name):
                pass
            else:
                line_del.append(rule_check)

    # Read in specified time series
    elif timeseries_names is not None:
        length = len(timeseries)
        for rule_check in range(length):
            if timeseries_names == timeseries[rule_check].name:
                pass
            else:
                line_del.append(rule_check)

    # delete those values that are not needed.
    line_del = set(line_del)
    line_del = sorted(line_del)
    for num_to_del in range(len(line_del)):
        del timeseries[line_del[len(line_del) - num_to_del - 1]]
    return timeseries

322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
def read_timeseries_villas(filename):
    """
    Read data in "villas.human" format.

    See: https://villas.fein-aachen.org/doc/node-formats.html
    Format:   seconds.nanoseconds+offset(sequenceno)      value0 value1 ... valueN
    Example:  1438959964.162102394(6)     3.489760        -1.882725       0.860070

    :param filename: name of the file that contains the data
    """

    from villas.node.sample import Sample

    with open(filename, 'r') as fp:

        timeseries = [ ]
        times = [ ]
        fields = [ ]

        for line in fp.readlines():
            if line[0] == '#':
                continue

            sample = Sample.parse(line)

            times.append(sample.ts)

            for index, field in enumerate(sample.values, start=0):
                if len(fields) <= index:
                    fields.append([])

                fields[index].append(field)

        for index, field in enumerate(fields):
            name = 'signal_{}'.format(index)

            series = TimeSeries(name, times, field)

            timeseries.append(series)

        return timeseries