In [38]:
from ltlcross_runner import LtlcrossRunner
from IPython.display import display
import pandas as pd
import spot
import sys
import re
spot.setup(show_default='.a')
pd.options.display.float_format = '{: .0f}'.format
pd.options.display.latex.multicolumn_format = 'c'
from tables_utils import split_cols, high_min, high_max, highlight_by_level
from tables_utils import fix_latex, fix_type, fix_tool
from tables_utils import cummulative_to_latex
In [39]:
rerun = False
In [56]:
%%bash
ltl3tela -v
ltl2tgba --version
delag --version
ltl2dgra --version # Rabinizer 4
autfilt --version
In [41]:
def generate(n=1000,func=(lambda x: True),filename=None,priorities='',ap=['a','b','c','d','e']):
if filename is None:
file_h = sys.stdout
else:
file_h = open(filename,'w')
f = spot.randltl(ap,
ltl_priorities=priorities,
simplify=3,tree_size=15).relabel_bse(spot.Abc)
i = 0
printed = set()
while(i < n):
form = next(f)
if form in printed:
continue
if func(form) and not form.is_tt() and not form.is_ff():
print(form,file=file_h)
printed.add(form)
i += 1
In [42]:
f_rand = 'formulae/atva19/rand.ltl'
f_patterns = 'formulae/atva19/patterns.ltl'
# generate(1000, filename = f_rand)
In [43]:
d_tools = {
"ltl3tela-D1": "ltl3tela -D1 -f %f > %O",
"ltl2tgba-DG": "ltl2tgba -DG %f > %O",
"delag": "delag %f | autfilt -G --deterministic --high > %O",
"rabinizer4": "ltl2dgra %f | autfilt -G --deterministic --high > %O"
}
d_order = ["ltl3tela-D1", "ltl2tgba-DG", "delag", "rabinizer4"]
d_cols = ["states", "edges", "acc"]
In [44]:
d_csv_rand = 'formulae/atva19/det.rand.autfilt.csv'
d_data_rand = LtlcrossRunner(d_tools, formula_files = [f_rand], res_filename = d_csv_rand, cols = d_cols)
if rerun:
d_data_rand.run_ltlcross(automata = False, timeout = '60')
d_data_rand.parse_results()
In [45]:
det_rand = d_data_rand.cummulative(col = d_cols).unstack(level = 0).loc[d_order, d_cols]
det_rand
Out[45]:
In [46]:
d_csv_patterns = 'formulae/atva19/det.patterns.autfilt.csv'
d_data_patterns = LtlcrossRunner(d_tools, formula_files = [f_patterns], res_filename = d_csv_patterns, cols = d_cols)
if rerun:
d_data_patterns.run_ltlcross(automata = False, timeout = '60')
d_data_patterns.parse_results()
In [47]:
det_to = pd.DataFrame(d_data_patterns.get_error_count(),columns=['TO.literature'])
det_err = pd.DataFrame(d_data_patterns.get_error_count('parse error',False),columns=['PE.literature'])
det_lit = d_data_patterns.cummulative(col = d_cols).unstack(level = 0).loc[d_order, d_cols]
det_lit = pd.concat([det_lit,det_to,det_err],axis=1,join='inner',sort=False)
det_lit
Out[47]:
In [48]:
to = d_data_rand.exit_status
to[to != "ok"].dropna(how='all')
Out[48]:
In [49]:
det_tmp = pd.merge(det_rand, det_lit, suffixes=('.random','.literature'),on='tool')
det_tmp
Out[49]:
In [50]:
det = split_cols(det_tmp,'.').swaplevel(axis=1)
det
Out[50]:
In [51]:
rerun = False
In [52]:
d_fails = d_data_patterns.values[d_data_patterns.values.isnull().any(axis = 1)]['states']\
.join(d_data_patterns.exit_status, lsuffix = '.states', rsuffix = '.response')
for tool in d_order:
d_fails[tool] = d_fails[tool + '.states'].combine_first(d_fails[tool + '.response'])
d_fails_out = d_fails[d_order]
d_fails_out
Out[52]:
In [53]:
def large_diff(res, tools, threshold):
df = res.values.dropna()['states']
df['diff'] = df.loc[:, tools].max(axis = 1) - df.loc[:, tools].min(axis = 1)
return df[df['diff'] > threshold][tools]
In [54]:
large_diff(d_data_patterns, d_tools, 20)
Out[54]:
In [55]:
large_diff(d_data_rand, d_tools, 10)
Out[55]: