Official repository on GitHub - ARM Software:
https://github.com/ARM-software/lisa
Installation dependencies are listed in the main page of the repository:
https://github.com/ARM-software/lisa#required-dependencies
Once cloned, source init_env to initialized the LISA Shell, which provides a convenient set of shell commands for easy access to many LISA related functions.
$ source init_env
To start the IPython Notebook Server required to use this Notebook, on a LISAShell run:
[LISAShell lisa] \> lisa-ipython start
Starting IPython Notebooks...
Starting IPython Notebook server...
IP Address : http://127.0.0.1:8888/
Folder : /home/derkling/Code/lisa/ipynb
Logfile : /home/derkling/Code/lisa/ipynb/server.log
PYTHONPATH :
/home/derkling/Code/lisa/libs/bart
/home/derkling/Code/lisa/libs/trappy
/home/derkling/Code/lisa/libs/devlib
/home/derkling/Code/lisa/libs/wlgen
/home/derkling/Code/lisa/libs/utils
Notebook server task: [1] 24745
The main folder served by the server is:
http://127.0.0.1:8888/
While the tutorial notebooks are accessible starting from this link:
http://127.0.0.1:8888/notebooks/tutorial/00_LisaInANutshell.ipynb
Note that the lisa-ipython
command allows to specify also interface and port in case you have several network interfaces on your host:
lisa-ipython start [interface [port]]
Let's do some example!
In [4]:
import logging
from conf import LisaLogging
LisaLogging.setup()
In [5]:
# Execute this cell to enable verbose SSH commands
logging.getLogger('ssh').setLevel(logging.DEBUG)
In [6]:
# Other python modules required by this notebook
import json
import os
Advanced usage: get more confident with IPython notebooks and discover some hidden features
notebooks/tutorial/01_IPythonNotebooksUsage.ipynb
In [4]:
# Setup a target configuration
conf = {
# Target is localhost
"platform" : 'linux',
# Board descriptions are described through json files in lisa/libs/utils/platforms/
"board" : "juno",
# Login credentials
"host" : "192.168.0.1",
"username" : "root",
"password" : "",
# Binary tools required to run this experiment
# These tools must be present in the tools/ folder for the architecture
"tools" : ['rt-app', 'taskset', 'trace-cmd'],
# Comment the following line to force rt-app calibration on your target
# "rtapp-calib" : {
# "0": 355, "1": 138, "2": 138, "3": 355, "4": 354, "5": 354
# },
# FTrace events end buffer configuration
"ftrace" : {
"events" : [
"sched_switch",
"sched_wakeup",
"sched_wakeup_new",
"sched_contrib_scale_f",
"sched_load_avg_cpu",
"sched_load_avg_task",
"sched_tune_config",
"sched_tune_tasks_update",
"sched_tune_boostgroup_update",
"sched_tune_filter",
"sched_boost_cpu",
"sched_boost_task",
"sched_energy_diff",
"cpu_frequency",
"cpu_capacity",
],
"buffsize" : 10240
},
# Where results are collected
"results_dir" : "LisaInANutshell",
# Devlib module required (or not required)
'modules' : [ "cpufreq", "cgroups", "cpufreq" ],
#"exclude_modules" : [ "hwmon" ],
}
In [5]:
# Support to access the remote target
from env import TestEnv
# Initialize a test environment using:
# the provided target configuration (my_target_conf)
# the provided test configuration (my_test_conf)
te = TestEnv(conf)
target = te.target
print "DONE"
In [8]:
# Enable Energy-Aware scheduler
target.execute("echo ENERGY_AWARE > /sys/kernel/debug/sched_features");
# Check which sched_feature are enabled
sched_features = target.read_value("/sys/kernel/debug/sched_features");
print "sched_features:"
print sched_features
In [9]:
# It's possible also to run custom script
# my_script = target.get_installed()
# target.execute(my_script)
In [10]:
target.cpufreq.set_all_governors('sched');
# Check which governor is enabled on each CPU
enabled_governors = target.cpufreq.get_all_governors()
print enabled_governors
In [11]:
cpuset = target.cgroups.controller('cpuset')
# Configure a big partition
cpuset_bigs = cpuset.cgroup('/big')
cpuset_bigs.set(cpus=te.target.bl.bigs, mems=0)
# Configure a LITTLE partition
cpuset_littles = cpuset.cgroup('/LITTLE')
cpuset_littles.set(cpus=te.target.bl.littles, mems=0)
# Dump the configuraiton of each controller
cgroups = cpuset.list_all()
for cgname in cgroups:
cgroup = cpuset.cgroup(cgname)
attrs = cgroup.get()
cpus = attrs['cpus']
print '{}:{:<15} cpus: {}'.format(cpuset.kind, cgroup.name, cpus)
Advanced usage: exploring more APIs exposed by TestEnv and Devlib
notebooks/tutorial/02_TestEnvUsage.ipynb
In [12]:
# RTApp configurator for generation of PERIODIC tasks
from wlgen import RTA, Periodic, Ramp
# Light workload
light = Periodic(
duty_cycle_pct = 10,
duration_s = 3,
period_ms = 32,
)
# Ramp workload
ramp = Ramp(
start_pct=10,
end_pct=60,
delta_pct=20,
time_s=0.5,
period_ms=16
)
# Heavy workload
heavy = Periodic(
duty_cycle_pct=60,
duration_s=3,
period_ms=16
)
# Composed workload
lrh_task = light + ramp + heavy
# Create a new RTApp workload generator using the calibration values
# reported by the TestEnv module
rtapp = RTA(target, 'test', calibration=te.calibration())
# Configure this RTApp instance to:
rtapp.conf(
# 1. generate a "profile based" set of tasks
kind = 'profile',
# 2. define the "profile" of each task
params = {
# 3. Composed task
'task_lrh': lrh_task.get(),
},
#loadref='big',
loadref='LITTLE',
run_dir=target.working_directory
# Alternatively, it is possible to specify a json file for rt-app through:
# kind = 'custom',
# params = '/path/file.json',
);
In [13]:
# Inspect the JSON file used to run the application
with open('./test_00.json', 'r') as fh:
rtapp_json = json.load(fh)
logging.info('Generated RTApp JSON file:')
print json.dumps(rtapp_json, indent=4, sort_keys=True)
Advanced usage: using WlGen to create more complex RTApp configurations or run other banchmarks (e.g. hackbench)
notebooks/tutorial/03_WlGenUsage.ipynb
In [14]:
def execute(te, wload, res_dir):
logging.info('# Setup FTrace')
te.ftrace.start()
logging.info('## Start energy sampling')
te.emeter.reset()
logging.info('### Start RTApp execution')
wload.run(out_dir=res_dir)
logging.info('## Read energy consumption: %s/energy.json', res_dir)
nrg_report = te.emeter.report(out_dir=res_dir)
logging.info('# Stop FTrace')
te.ftrace.stop()
trace_file = os.path.join(res_dir, 'trace.dat')
logging.info('# Save FTrace: %s', trace_file)
te.ftrace.get_trace(trace_file)
logging.info('# Save platform description: %s/platform.json', res_dir)
plt, plt_file = te.platform_dump(res_dir)
logging.info('# Report collected data:')
logging.info(' %s', res_dir)
!tree {res_dir}
return nrg_report, plt, plt_file, trace_file
In [15]:
nrg_report, plt, plt_file, trace_file = execute(te, rtapp, te.res_dir)
In [16]:
import pandas as pd
df = pd.DataFrame(list(nrg_report.channels.iteritems()),
columns=['Cluster', 'Energy'])
df = df.set_index('Cluster')
df
Out[16]:
In [17]:
# Show the collected platform description
with open(os.path.join(te.res_dir, 'platform.json'), 'r') as fh:
platform = json.load(fh)
print json.dumps(platform, indent=4)
logging.info('LITTLE cluster max capacity: %d',
platform['nrg_model']['little']['cpu']['cap_max'])
Advanced Workload Execution: using the Executor module to automate data collection for multiple tests
notebooks/tutorial/04_ExecutorUsage.ipynb
In [18]:
# Let's look at the trace using kernelshark...
trace_file = te.res_dir + '/trace.dat'
!kernelshark {trace_file} 2>/dev/null
In [19]:
# Suport for FTrace events parsing and visualization
import trappy
# NOTE: The interactive trace visualization is available only if you run
# the workload to generate a new trace-file
trappy.plotter.plot_trace(trace_file)
In [20]:
# Load the LISA::Trace parsing module
from trace import Trace
# Define which event we are interested into
trace = Trace(te.res_dir, [
"sched_switch",
"sched_load_avg_cpu",
"sched_load_avg_task",
"sched_boost_cpu",
"sched_boost_task",
"cpu_frequency",
"cpu_capacity",
], te.platform)
In [21]:
# Let's have a look at the set of events collected from the trace
ftrace = trace.ftrace
logging.info("List of events identified in the trace:")
for event in ftrace.class_definitions.keys():
logging.info(" %s", event)
In [22]:
# Trace events are converted into tables, let's have a look at one
# of such tables
df = trace.data_frame.trace_event('sched_load_avg_task')
df.head()
Out[22]:
In [23]:
# Simple selection of events based on conditional values
#df[df.comm == 'task_lrh'].head()
# Simple selection of specific signals
#df[df.comm == 'task_lrh'][['util_avg']].head()
# Simple statistics reporting
#df[df.comm == 'task_lrh'][['util_avg']].describe()
Advanced DataFrame usage: filtering by columns/rows, merging tables, plotting data
notebooks/tutorial/05_TrappyUsage.ipynb
In [24]:
# Signals can be easily plot using the ILinePlotter
trappy.ILinePlot(
# FTrace object
ftrace,
# Signals to be plotted
signals=[
'sched_load_avg_cpu:util_avg',
'sched_load_avg_task:util_avg'
],
# # Generate one plot for each value of the specified column
# pivot='cpu',
# # Generate only plots which satisfy these filters
# filters={
# 'comm': ['task_lrh'],
# 'cpu' : [0,5]
# },
# Formatting style
per_line=2,
drawstyle='steps-post',
marker = '+'
).view()
In [25]:
from bart.sched.SchedMultiAssert import SchedAssert
# Create an object to get/assert scheduling pbehaviors
sa = SchedAssert(ftrace, te.topology, execname='task_lrh')
In [26]:
# Check the residency of a task on the LITTLE cluster
print "Task residency [%] on LITTLE cluster:",\
sa.getResidency(
"cluster",
te.target.bl.littles,
percent=True
)
In [27]:
# Check on which CPU the task start its execution
print "Task initial CPU:",\
sa.getFirstCpu()
In [28]:
import operator
# Define the time window where we want focus our assertions
start_s = sa.getStartTime()
little_residency_window = (start_s, start_s + 10)
# Defined the expected task residency
EXPECTED_RESIDENCY_PCT=99
result = sa.assertResidency(
"cluster",
te.target.bl.littles,
EXPECTED_RESIDENCY_PCT,
operator.ge,
window=little_residency_window,
percent=True
)
print "Task running {} [%] of its time on LITTLE? {}"\
.format(EXPECTED_RESIDENCY_PCT, result)
In [29]:
result = sa.assertFirstCpu(te.target.bl.bigs)
print "Task starting on a big CPU? {}".format(result)
In [30]:
# Focus on sched_switch events
df = ftrace.sched_switch.data_frame
# # Select only interesting columns
# df = df.ix[:,'next_comm':'prev_state']
# # Group sched_switch event by task switching into the CPU
# df = df.groupby('next_pid').describe(include=['object'])
# # Sort sched_switch events by number of time a task switch into the CPU
# df = df['next_comm'].sort_values(by=['count'], ascending=False)
df.head()
# # Get topmost task name and PID
# most_switching_pid = df.index[1]
# most_switching_task = df.values[1][2]
# task_name = "{}:{}".format(most_switching_pid, most_switching_task)
# # Print result
# logging.info("The most swithing task is: [%s]", task_name)
Out[30]:
In [42]:
# Focus on cpu_frequency events for CPU0
df = ftrace.cpu_frequency.data_frame
df = df[df.cpu == 0]
# # Compute the residency on each OPP before switching to the next one
# df.loc[:,'start'] = df.index
# df.loc[:,'delta'] = (df['start'] - df['start'].shift()).fillna(0).shift(-1)
# # Group by frequency and sum-up the deltas
# freq_residencies = df.groupby('frequency')['delta'].sum()
# logging.info("Residency time per OPP:")
# df = pd.DataFrame(freq_residencies)
df.head()
# # Compute the relative residency time
# tot = sum(freq_residencies)
# #df = df.apply(lambda delta : 100*delta/tot)
# for f in freq_residencies.index:
# logging.info("Freq %10dHz : %5.1f%%", f, 100*freq_residencies[f]/tot)
In [47]:
# Plot residency time
import matplotlib.pyplot as plt
# Enable generation of Notebook emebedded plots
%matplotlib inline
fig, axes = plt.subplots(1, 1, figsize=(16, 5));
df.plot(kind='bar', ax=axes);
In [31]:
from perf_analysis import PerfAnalysis
# Full analysis function
def analysis(t_min=None, t_max=None):
test_dir = te.res_dir
platform_json = '{}/platform.json'.format(test_dir)
trace_file = '{}/trace.dat'.format(test_dir)
# Load platform description data
with open(platform_json, 'r') as fh:
platform = json.load(fh)
# Load RTApp Performance data
pa = PerfAnalysis(test_dir)
logging.info("Loaded performance data for tasks: %s", pa.tasks())
# Load Trace data
#events = my_tests_conf['ftrace']['events']
events = [
"sched_switch",
"sched_contrib_scale_f",
"sched_load_avg_cpu",
"sched_load_avg_task",
"cpu_frequency",
"cpu_capacity",
]
trace = Trace(test_dir, events, platform)
# Define time ranges for all the temporal plots
trace.setXTimeRange(t_min, t_max)
# Tasks performances plots
for task in pa.tasks():
pa.plotPerf(task)
# Tasks plots
trace.analysis.tasks.plotTasks(pa.tasks())
# Cluster and CPUs plots
trace.analysis.frequency.plotClusterFrequencies()
In [32]:
analysis()
Advanced TraceAnalysis and PerfAnalysis usage: use pre-defined functions to plot trace data and RTApp performance metrics
notebooks/tutorial/06_TraceAnalysis.ipynb
notebooks/tutorial/07_PerfAnalysis.ipynb