Commit 86732f74 authored by Hans Vrapi's avatar Hans Vrapi
Browse files

add new files

parent 93e3eb9f
## perform_feasibility_analysis.sh
- runs all experiments and plots the results
- takes one boolean argument (optional)
- 'true': rerun the experiments, parse the results and plot them (if not set, the existing results will be plotted)
- command to run the script: ./perform_feasibility_analysis.sh [true/false] ([] indicates that the argument is optional)
## csv_files
- contains the csv files generated from Storm's results
## generated_plots
- contains all the generated plots (also included in the paper)
## latex_source
- contains the latex source needed to generate the plots
## pso-qcqp-gd-benchmarks
- contains all the benchmarks fed into storm/prophesy and the results
## storm
- version of storm used to perform feasibility analysis with GD
# Storm-bn
[![Build Status](https://travis-ci.org/joemccann/dillinger.svg?branch=master)](https://travis-ci.org/joemccann/dillinger)
This repository facilitates performing tasks related to inference in parametric Bayesian networks (pBNs) such as:
- Feasibility Analysis
- Parameter Space Partitioning
- Sensitivity Analysis
by means of probabilistic model checking (PMC) techniques. For this purpose, we have implemented the tool 'storm-bn-refactoring' that [transforms] a (parametric) Bayesian network (pBN) to their corresponding (parametric) Markov Chain (pMC). The generated pMC is fed into the probabilsitic model checker [Storm] to perform the computations for each task.
## Requirements
- [Storm] 1.6.4
- [Prophesy]
This has been developed at RWTH Aachen University.
[Storm]: <https://www.stormchecker.org/>
[Prophesy]: <https://moves-rwth.github.io/prophesy/index.html>
[transforms]: <https://www.semanticscholar.org/paper/Fine-Tuning-the-Odds-in-Bayesian-Networks-Salmani-Katoen/dbecf8eaf6f0adb03bca9f69e8401ca2036a20ad>
import argparse
def get_tables(bif_content: str):
probability_str = bif_content.split('probability ')[1:]
table_dict = {}
for prob in probability_str:
node_and_parents = prob.split('(')[1].split(')')[0].strip()
node = node_and_parents.split('|')[0].strip()
parents = []
if len(node_and_parents.split('|')) > 1:
parents = node_and_parents.split('|')[1].split(',')
parents = [p.strip() for p in parents]
if len(parents) == 0:
probabilities = prob.split('table')[1].split(';')[0].split(',')
probabilities = [p.strip() for p in probabilities]
else:
probabilities = {}
lines = prob.split('{\n')[1].split('}')[0].split(';')[:-1]
parent_valuations = {}
i = 0
for line in lines:
parent_values = line.split('(')[1].split(')')[0].split(',')
parent_values = [pv.strip() for pv in parent_values]
parent_values = tuple(parent_values)
parent_valuations[i] = parent_values
i += 1
prob_array = line.split(')',1)[1].split(',')
prob_array = [p.strip() for p in prob_array]
probabilities[parent_values] = prob_array
temp_dict = {}
temp_dict['parents'] = parents
temp_dict['probabilities'] = probabilities
if len(node_and_parents.split('|')) > 1:
temp_dict['parent_valuations_in_order'] = parent_valuations
table_dict[node] = temp_dict
return table_dict
def add_parameters_to_cpt_by_parents(cpt, evaluation_of_parents, number_of_existing_parameters):
number_of_params = number_of_existing_parameters
if evaluation_of_parents != None:
#print(evaluation_of_parents)
for evaluation in evaluation_of_parents:
#print(evaluation)
evaluation = evaluation.split(',')
evaluation = [e.strip() for e in evaluation]
evaluation = tuple(evaluation)
probability_row_old = cpt['probabilities'][evaluation]
probability_row_new = [f'p{number_of_params}']
for entry in probability_row_old[1:]:
probability_row_new.append(f'{entry}*((1-p{number_of_params})/(1-{probability_row_old[0]}))')
cpt['probabilities'][evaluation] = probability_row_new
number_of_params += 1
parameter_string = ''
for i in range(number_of_params):
parameter_string += f'parameter p{i}'+ '{\n}\n\n'
return (cpt['probabilities']), parameter_string
def add_parameters_to_cpt_by_number(cpt, number):
number_of_params = 0
if number != None:
if len(cpt['parents']) > 0:
for i in range(number):
evaluation = cpt['parent_valuations_in_order'][i]
probability_row_old = cpt['probabilities'][evaluation]
probability_row_new = [f'p{number_of_params}']
for entry in probability_row_old[1:]:
probability_row_new.append(f'{entry}*((1-p{number_of_params})/(1-{probability_row_old[0]}))')
cpt['probabilities'][evaluation] = probability_row_new
number_of_params += 1
else:
probability_row_old = cpt['probabilities']
probability_row_new = [f'p{number_of_params}']
for entry in probability_row_old[1:]:
probability_row_new.append(f'{entry}*((1-p{number_of_params})/(1-{probability_row_old[0]}))')
cpt['probabilities'] = probability_row_new
number_of_params += 1
parameter_string = ''
for i in range(number_of_params):
parameter_string += f'parameter p{i}' + '{\n}\n\n'
return (cpt['probabilities']), parameter_string
def add_params_to_bif(bif_file_path, node, evaluation_of_parents, number, output_path, number_of_existing_parameters=0):
if (evaluation_of_parents != None) and (number != None):
print('ERROR: You can either use evaluation_of_parents or number, but not both options!')
if (evaluation_of_parents == None) and (number == None):
print('ERROR: You have to use number or evaluation_of_parents!')
f = open(bif_file_path, 'r')
bif_content = f.read()
f.close()
table_dict = get_tables(bif_content)
cpt = table_dict[node]
if evaluation_of_parents != None:
cpt['probabilities'], parameter_string = add_parameters_to_cpt_by_parents(cpt, evaluation_of_parents, number_of_existing_parameters)
if number != None:
cpt['probabilities'], parameter_string = add_parameters_to_cpt_by_number(cpt, number)
new_bif_content = bif_content.split('probability', 1)[0]
for node_name, node_dict in table_dict.items():
parent_string = ''
if len(node_dict['parents']) > 0:
parent_string = '| ' + ', '.join(node_dict['parents']) + ' '
table_string = f'probability ( {node_name} {parent_string})' + '{\n'
if len(node_dict['parents']) > 0:
for parents_valuation, probs in node_dict['probabilities'].items():
table_string += f'({", ".join(parents_valuation)}) '
table_string += f'{", ".join(probs)};\n'
new_bif_content += table_string + '}\n\n'
else:
new_bif_content += table_string + f'table {", ".join(node_dict["probabilities"])};\n' + '}\n\n'
new_bif_content += parameter_string
#print(new_bif_content)
if output_path != None:
g = open(output_path, 'w')
g.write(new_bif_content)
g.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Read Network from bif and add parameters')
parser.add_argument('path', help='path to NON-PARAMETRIC bif-file', type=str)
parser.add_argument('node', help='node, in which parameters should be added', type=str)
parser.add_argument('--evaluation_of_parents', help='evaluation of parents marks the line in the CTL should be made parametric; not usable, when number is set', type=str, default=None, nargs='+')
parser.add_argument('--number', help='number of lines that should be made parametric; not usable, when evaluation_of_parents is set', type=int, default=None)
parser.add_argument('--existing_params',
help='number of already existing parameters',
type=int, default=0)
parser.add_argument('--output_path', help='path to output-file', type=str)
args = parser.parse_args()
bif_file_path = args.path
node = args.node
# evaluation_of_parents is an array
evaluation_of_parents = args.evaluation_of_parents
number = args.number
existing_params = args.existing_params
output_path = args.output_path
add_params_to_bif(bif_file_path, node, evaluation_of_parents, number, output_path, existing_params)
import argparse
import json
import itertools
def is_float(string):
try:
float(string)
return True
except ValueError:
return False
def separate_name_and_value(statename):
attribute = statename[:-1]
value = statename[-1]
if not value.isnumeric():
value = None
attribute = statename
return attribute, value
def read_drn_file(filepath: str):
with open(filepath, "r") as read_file:
drn_file = read_file.read()
drn_states_text = drn_file.split('state ')[1:]
header = drn_file.split('state ')[0]
type = (header.split('@parameters')[0].split('@type:')[1]).strip()
parameters = (header.split('@parameters')[1]).split('@')[0].strip()
if parameters != '':
print('ERROR: drn file should not be parametric! ')
return -1
placeholders = ''
reward_models = header.split('@reward_models')[1].split('@')[0].strip()
nr_states = header.split('@nr_states')[1].split('@')[0].strip()
nr_choices = header.split('@nr_choices')[1].split('@')[0].strip()
drn_states_array = [[y.strip() for y in x.split('\n') if y != ''] for x in drn_states_text]
drn = Drn(type, parameters, placeholders, reward_models, nr_states, nr_choices)
for state_array in drn_states_array:
state_number, name = state_array[0].split(' ',1)
new_state = State(state_number, name)
for transition in state_array[2:]:
child_number, prob = transition.split(':')
child_number = child_number.strip()
prob = prob.strip()
new_state.add_child(child_number, prob)
drn.states[state_number] = new_state
for state in drn.states.values():
for child_number in state.children:
drn.states[child_number.strip()].add_parent(state)
#for state in drn.states.values():
# print(state)
return drn
class State():
def __init__(self, number, name):
self.number = number
self.name = name
self.parents = []
self.children = []
self.probs = {}
attribute, value = separate_name_and_value(name)
if value != None:
self.valuation = {attribute: value}
else:
self.valuation = {}
def add_parent(self, parent):
self.parents.append(parent.number)
for attribute, value in parent.valuation.items():
if attribute in self.valuation and value != self.valuation[attribute]:
self.valuation[attribute] = None
else:
self.valuation[attribute] = value
def add_child(self, child_number, prob):
self.children.append(child_number)
self.probs[child_number] = prob
def __str__(self):
return f'''number: {self.number}
name = {self.name}
parents = {self.parents}
children = {self.children}
probs = {self.probs}
valuation = {self.valuation}
'''
class Drn():
def __init__(self, type, parameters, placeholders, reward_models, nr_states, nr_choices):
self.states = {}
self.type = type
self.parameters = parameters
self.placeholders = {}
self.reward_models = reward_models
self.nr_states = nr_states
self.nr_choices = nr_choices
self.number_of_params = 0
self.number_of_placefolder = 0
def make_entry_parametric_by_parent_value(self, state_name, parent_names, parent_values, number):
if self.number_of_params == number:
return
transitions = self.find_transitions_for_making_parametric_by_parent_value(state_name, parent_names, parent_values)
#print(state_name)
#print(transitions)
#print(transitions)
#print(transitions)
added = False
for param_child, parent_list in transitions.items():
for parent in parent_list:
old_param_child_prob = self.states[parent].probs[param_child]
if old_param_child_prob == '1.0' or old_param_child_prob == '1':
break
elif not is_float(old_param_child_prob):
continue
else:
added = True
for child in self.states[parent].children:
if child == param_child:
self.states[parent].probs[child] = f'p{self.number_of_params}'
else:
old_prob = self.states[parent].probs[child]
self.states[parent].probs[child] = f'{old_prob} * ((1 - p{self.number_of_params})/(1 - {old_param_child_prob}))'
#print(self.states[parent].probs[child])
#print(parent)
#print(self.states[parent].probs)
if added:
self.number_of_params += 1
def make_CPT_parametric(self, attribute, number):
#print(attribute)
parent_names = attribute["parent_names"]
for parent_values in list(itertools.product(*attribute["possible_parent_values"])): # zip(*(attribute["possible_parent_values"])):
#print(attribute["node"])
#print(parent_names)
#print(list(parent_values))
self.make_entry_parametric_by_parent_value(attribute['node'],parent_names,list(parent_values), number)
def find_transitions_for_making_parametric_by_parent_value(self, state_name, parent_names, parent_values):
#print('###### BEGIN OF FINDING TRANSISTION')
#print(state_name)
state_numbers_with_name = []
for number, state in self.states.items():
if state_name == state.name or f"{state_name} deadlock" == state.name or f"deadlock {state_name}" == state.name:
state_numbers_with_name.append(number)
#print('state_numbers_with_name')
#print(state_numbers_with_name)
#print(state_numbers_with_name)
#print('STATE_NAME')
#print(state_name)
parent_state_numbers_with_correct_values = {}
#print(state_numbers_with_name)
for child_number in state_numbers_with_name:
parent_state_numbers_with_correct_values[child_number] = []
for number in self.states[child_number].parents:
#print(f'number: {number}')
#print('valutation')
#print(self.states[number].valuation)
correct_parent_valuation = True
for parent, parent_value in zip(parent_names, parent_values):
#print(number)
#print(self.states[number].valuation)
#print(f'parent: {parent}')
#print(f'parent_value: {parent_value}')
if parent not in self.states[number].valuation:
correct_parent_valuation = False
elif self.states[number].valuation[parent] == None:
correct_parent_valuation = False
elif self.states[number].valuation[parent] != parent_value:
correct_parent_valuation = False
if correct_parent_valuation:
parent_state_numbers_with_correct_values[child_number].append(number)
#print(parent_state_numbers_with_correct_values)
return parent_state_numbers_with_correct_values
def write_to(self, output_file: str, number):
header = f'''@type: {self.type}
@parameters
{' '.join(['p'+str(i) for i in range(self.number_of_params)])}
@reward_models
{self.reward_models}
@nr_states
{self.nr_states}
@nr_choices
{self.nr_choices}
'''
body = "@model"
for state in self.states.values():
state_str = f'''
state {state.number} {state.name}
\taction 0
'''
for child in state.children:
state_str += f'\t\t{child} : {state.probs[child]}\n'
body += state_str[:-1]
if number != -1:
output_location = output_file[:-4]+ '_'+str(number) + '.drn'
else:
output_location = output_file[:-4]+ '_all' + '.drn'
g = open(output_location, 'w')
g.write(header + body)
def make_drn_parametric(config_path: str, number: int, output_path: str):
with open(config_path, "r") as read_file:
config = json.load(read_file)
network = config['network']
drn_location = config['drn_location']
if output_path == None:
output_path = config['output_file']
CPTs = config['parameters']['CPT']
by_parent_evaluation = config['parameters']['by_parent_evaluation'].copy()
drn = read_drn_file(drn_location)
for param in CPTs:
drn.make_CPT_parametric(param, number)
for param in by_parent_evaluation:
drn.make_entry_parametric_by_parent_value(param['node'], param['parent_names'], param['parent_values'], number)
drn.write_to(output_path, number)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='make drn file parametric')
parser.add_argument('config_path', help='path to config_file', type=str)
parser.add_argument('--number', help='number of params, that should be added', type=int, default=-1)
parser.add_argument('--begin', help='begin number of params, that should be added', type=int, default=-1)
parser.add_argument('--end', help='end number of params, that should be added', type=int, default=-1)
parser.add_argument('--step', help='step number of params, that should be added', type=int, default=-1)
parser.add_argument('--output_path', help='output_path', type=str, default = None)
args = parser.parse_args()
config_path = args.config_path
output_path = args.output_path
number = args.number
begin = args.begin
end = args.end
step = args.step
if begin != -1 and end != -1 and step != -1:
count = 0
i = 1
while i <= end + 1 :
make_drn_parametric(config_path, i, output_path)
count += 1
i = 2 ** count
else:
make_drn_parametric(config_path, number, output_path)
import re
import pandas as pd
import math
filepaths = {
'Rain' : 7
}
data = pd.DataFrame(columns=['network','time', 'bisimulation','property','nr_params', 'nr_states', 'nr_transitions', 'degree', 'nr_summands'])
for network, number in filepaths.items():
print(network)
for filepath,b in {f"dbn/{network}/{network}-{number}-out.txt": 'no'}.items(): #, f'../storm_pars_compute_function_test_bisimulation/{network}/{network}-{number}-out.txt' : 'yes'}.items():
print(filepath)
print(b)
# filepath = f"dbn/{network}/{network}-{number}-out.txt"
f = open(filepath,'r')
content = f.read()
begin_prop = content.find("--prop") +6
end_prop = content.find("\n",begin_prop)
property = content[begin_prop:end_prop]
begin_time = content.find("Time for model construction: ") +29
end_time = content.find("\n",begin_time)
time = content[begin_time:end_time].strip()
begin_states = int(content.find("States:") + 7)
end_states = int(content.find("\n",begin_states))
nr_states = content[begin_states:end_states].strip()
print(f"States: {nr_states}")
begin_transitions = int(content.find("Transitions:") + 12)
end_transitions = int(content.find("\n", begin_transitions))
nr_transitions = content[begin_transitions:end_transitions].strip()
print(f"Transitions: {nr_transitions}")
begin_function = int(content.find("Result (initial states): ") )#+ 25)
end_function = int(content.find("\nTime",begin_function))
function = content[begin_function:end_function]
print(function)
number_of_params = 0
for i in range(number):
if content.find(f"p{i}",begin_function) != -1:
number_of_params += 1
pos = begin_function
number_of_p = 0
max_number_of_p = 0
nr_terms = 0
while pos < len(content):
next_plus = content.find('+',pos)
nr_terms += 1
if next_plus == -1:
next_plus = len(content)
while pos < next_plus:
next_p = content.find('p', pos, next_plus)
if next_p == -1:
pos = next_plus +1
else:
next_p2 = content.find('p', next_p+1, next_plus)
sub_str = ""
exponent = 1
if next_p2 == -1:
sub_str = content[next_p: next_plus]
else:
sub_str = content[next_p: next_p2]
if sub_str.find('^') != -1:
sub_str = re.sub('[a-zA-Z]+[0-9]+', '', sub_str)
sub_str = sub_str.replace('^','')
exponent = re.findall('\d+', sub_str)[0]
pos = int(next_p) +2 + int(math.log10(int(exponent)))+1
else:
pos = next_p + 1
number_of_p += int(exponent)
if max_number_of_p < number_of_p:
max_number_of_p = number_of_p
number_of_p = 0
print(f"Maximum Degree: {max_number_of_p}")
print(f"Number of Summands : {nr_terms}")
print(f"Number of parameters: {number_of_params}")
data = data.append({'network': network,'time': time, 'bisimulation': b, 'property': property,'nr_params':number_of_params, 'nr_states': nr_states, 'nr_transitions': nr_transitions, 'degree':max_number_of_p, 'nr_summands':nr_terms},ignore_index=True)
print(data)
data.to_csv('solution_function_compute.csv')
storm-bn-refactoring @ e48e2982
Subproject commit e48e29828640b208f54169df39768ac97e38d010
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment