In [2]:
from ltlcross_runner import LtlcrossRunner
from IPython.display import display
import pandas as pd
import spot
import sys
spot.setup(show_default='.a')
pd.options.display.float_format = '{: .0f}'.format
pd.options.display.latex.multicolumn_format = 'c'
Hack that alows to parse ltl3ba automata without universal branching.
In [3]:
import os
os.environ['SPOT_HOA_TOLERANT']='TRUE'
In [4]:
%%bash
ltl3ba -v
ltl3tela -v
ltl2tgba --version
ltlcross --version
In [5]:
# If there are already files with results, and rerun is False, ltlcross is not run again.
rerun = True
$\newcommand{\F}{\mathsf{F}}$ $\newcommand{\G}{\mathsf{G}}$ $\newcommand{\FG}{\mathsf{F,G}}$
We evaluate our translation on randomly generated formulae. We use Spot to generate them. The following function uses spot's randltl to generate and process formulae. It allows to filter the formulae using the function func
(this feature is not used here) and also removes formulae $\top$ and $\bot$. Further, we remove all $\mathsf{M}$ and $\mathsf{W}$ operators as LTL3BA does not understand them. The relabel_bse
function renames $\G(a \lor b)$ into $\G a$ and thus it keeps only on formula of such type.
In [6]:
def generate(n=100,func=(lambda x: True),filename=None,priorities='M=0,W=0,xor=0',ap=['a','b','c','d','e']):
if filename is None:
file_h = sys.stdout
else:
file_h = open(filename,'w')
f = spot.randltl(ap,
ltl_priorities=priorities,
simplify=3,tree_size=15).relabel_bse(spot.Abc)\
.unabbreviate('WM')
i = 0
printed = set()
while(i < n):
form = next(f)
if form in printed:
continue
if func(form) and not form.is_tt() and not form.is_ff():
print(form,file=file_h)
printed.add(form)
i += 1
In [7]:
fg_f = 'formulae/fg.ltl'
!mkdir -p formulae
total_r = 1000
generate(total_r,filename=fg_f,priorities='xor=0,implies=0,equiv=0,X=0,W=0,M=0,R=0,U=0,F=3,G=3')
In [8]:
tmp_file = 'formulae/tmp.ltl'
lit_pref = 'formulae/literature'
lit_file = lit_pref + '.ltl'
In [9]:
!genltl --dac-patterns --eh-patterns --sb-patterns --beem-patterns --hkrss-patterns > $tmp_file
In [10]:
!ltlfilt --negate $tmp_file | \
ltlfilt $tmp_file -F - --unique -r3 --remove-wm --relabel-bool=abc | \
ltlfilt -v --equivalent-to=0 | ltlfilt -v --equivalent-to=1> $lit_file
In [11]:
count = sum(1 for line in open(lit_file))
print('Number of formulae from literature:\t{}'.format(count))
total_l = count
We compare the $\F$- and $\FG$-merging translation to the basic one. We compare the sizes of SLAA (alternating) and TELA (nondeterministic). To really measure the impact of merging, the SPOT's reductions techniques are disabled here (-u0 -z0
).
Here we use a wrapper script ltlcross_runner for ltlcross that uses the pandas library to manipulate data. It requires some settings.
In [12]:
### Tools' setting ###
ltl3tela_shared = "ltl3tela -p1 -t0 -u0 -z0 -f %f -a3 -n0 "
#end = " | awk '!p;/^--END--/{p=1}' > %O"
end = " > %O"
tools = {"FG-merging" : ltl3tela_shared + end,
"FG-merging+compl" : ltl3tela_shared + "-n1" + end,
"F-merging" : ltl3tela_shared + "-G0" + end,
"G-merging" : ltl3tela_shared + "-F0" + end,
"basic" : ltl3tela_shared + "-F0 -G0" + end,
"LTL3BA" : "ltl3ba -H1 -f %s" + end,
}
### Order in which we want to sort the translations
MI_order = ["LTL3BA",
"basic","F-merging","FG-merging"]
### File with measured statistics ###
merging_imp_alt_r = 'MI_alt-rand.csv'
merging_imp_alt_l = 'MI_alt-lit.csv'
### Measures to be measured
cols = ["states","transitions","nondet_states","nondet_aut"]
Here we use the LtlcrossRunner
object to run ltlcross, or parse already computed data.
In [13]:
MI_alt_r = LtlcrossRunner(tools,res_filename=merging_imp_alt_r,
formula_files=[fg_f],
cols=cols)
if rerun:
MI_alt_r.run_ltlcross()
MI_alt_r.parse_results()
The cumulative number of states and transitions of all SLAA for each tool.
In [14]:
tmp = MI_alt_r.cummulative(col=cols).unstack(level=0).loc[MI_order,cols]
t1_alt_r = tmp.loc[:,['states','transitions']]
t1_alt_r["det. aut."] = total_r-tmp.nondet_aut
t1_alt_r
Out[14]:
In [15]:
MI_alt_l = LtlcrossRunner(tools,res_filename=merging_imp_alt_l,
formula_files=[lit_file],
cols=cols)
if rerun:
MI_alt_l.run_ltlcross()
MI_alt_l.parse_results()
In [16]:
tmp = MI_alt_l.cummulative(col=cols).unstack(level=0).loc[MI_order,cols]
t1_alt_l = tmp.loc[:,['states','transitions']]
t1_alt_l["det. aut."] = total_l-tmp.nondet_aut
t1_alt_l
Out[16]:
In [17]:
t1 = pd.concat([t1_alt_r,t1_alt_l],axis=1,keys=['random (1000)','literature (221)']).loc[MI_order,:]
t1
Out[17]:
In [18]:
row_map={"basic" : 'basic translation',
"F-merging" : '$\F$-merging',
"G-merging" : '$\G$-merging',
"FG-merging" : '$\FG$-merging',
"FG-merging+compl" : "$\FG$-merging + complement"}
t1.rename(row_map,inplace=True);
t1 = t1.rename_axis(['',"translation"],axis=1)
t1.index.name = None
t1
Out[18]:
In [19]:
print(t1.to_latex(escape=False,bold_rows=False),file=open('lpar_t1.tex','w'))
In [20]:
def fix_tools(tool):
return tool.replace('FG-','$\\FG$-').replace('F-','$\\F$-')
In [33]:
def sc_plot(r,t1,t2,filename=None,include_equal = True,col='states',log=None,size=(5.5,5),kw=None,clip=None, add_count=True):
merged = isinstance(r,list)
if merged:
vals = pd.concat([run.values[col] for run in r])
vals.index = vals.index.droplevel(0)
vals = vals.groupby(vals.index).first()
else:
vals = r.values[col]
to_plot = vals.loc(axis=1)[[t1,t2]] if include_equal else\
vals[vals[t1] != vals[t2]].loc(axis=1)[[t1,t2]]
to_plot['count'] = 1
to_plot.dropna(inplace=True)
to_plot = to_plot.groupby([t1,t2]).count().reset_index()
if filename is not None:
print(scatter_plot(to_plot, log=log, size=size,kw=kw,clip=clip, add_count=add_count),file=open(filename,'w'))
else:
return scatter_plot(to_plot, log=log, size=size,kw=kw,clip=clip, add_count=add_count)
In [34]:
def scatter_plot(df, short_toolnames=True, log=None, size=(5.5,5),kw=None,clip=None,add_count = True):
t1, t2, _ = df.columns.values
if short_toolnames:
t1 = fix_tools(t1.split('/')[0])
t2 = fix_tools(t2.split('/')[0])
vals = ['({},{}) [{}]\n'.format(v1,v2,c) for v1,v2,c in df.values]
plots = '''\\addplot[
scatter, scatter src=explicit,
only marks, fill opacity=0.5,
draw opacity=0] coordinates
{{{}}};'''.format(' '.join(vals))
start_line = 0 if log is None else 1
line = '\\addplot[darkgreen,domain={}:{}]{{x}};'.format(start_line, min(df.max(axis=0)[:2])+1)
axis = 'axis'
mins = 'xmin=0,ymin=0,'
clip_str = ''
if clip is not None:
clip_str = '\\draw[red,thick] ({},{}) rectangle ({},{});'.format(*clip)
if log:
if log == 'both':
axis = 'loglogaxis'
mins = 'xmin=1,ymin=1,'
else:
axis = 'semilog{}axis'.format(log)
mins = mins + '{}min=1,'.format(log)
args = ''
if kw is not None:
if 'title' in kw and add_count:
kw['title'] = '{{{} ({})}}'.format(kw['title'],df['count'].sum())
args = ['{}={},\n'.format(k,v) for k,v in kw.items()]
args = ''.join(args)
res = '''%\\begin{{tikzpicture}}
\\pgfplotsset{{every axis legend/.append style={{
cells={{anchor=west}},
draw=none,
}}}}
\\pgfplotsset{{colorbar/width=.3cm}}
\\pgfplotsset{{title style={{align=center,
font=\\small}}}}
\\pgfplotsset{{compat=1.14}}
\\begin{{{0}}}[
{1}
colorbar,
%thick,
axis x line* = bottom,
axis y line* = left,
width={2}cm, height={3}cm,
xlabel={{{4}}},
ylabel={{{5}}},
cycle list={{%
{{darkgreen, solid}},
{{blue, densely dashed}},
{{red, dashdotdotted}},
{{brown, densely dotted}},
{{black, loosely dashdotted}}
}},
{6}%
]
{7}%
{8}%
{9}%
\\end{{{0}}}
%\\end{{tikzpicture}}
'''.format(axis,mins,
size[0],size[1],t1,t2,
args,plots,line,
clip_str)
return res
In [35]:
ltl3ba = 'LTL3BA'
fgm = 'FG-merging'
fm = 'F-merging'
margin_size = (4.3,6)
clip_names = ('xmin','ymin','xmax','ymax')
kw = {}
kw['title'] = 'random'
In [36]:
sc_plot(MI_alt_r,ltl3ba,fgm,'sc_fgm_3ba.tex',kw=kw.copy())
sc_plot(MI_alt_r,fm,fgm,'sc_fgm_fm.tex',kw=kw.copy())
In [ ]: