In [1]:
import logging
from conf import LisaLogging
LisaLogging.setup()
In [2]:
# Generate plots inline
%matplotlib inline
import json
import os
# Support to access the remote target
import devlib
from env import TestEnv
# Support for workload generation
from wlgen import RTA, Ramp
# Support for trace analysis
from trace import Trace
# Support for plotting
import numpy
import pandas as pd
import matplotlib.pyplot as plt
import trappy
In [3]:
# Setup target configuration
my_conf = {
# Target platform and board
"platform" : 'linux',
"board" : 'juno',
"host" : '192.168.0.1',
"password" : 'juno',
# Folder where all the results will be collected
"results_dir" : "TraceAnalysis_TaskLatencies",
# Define devlib modules to load
"modules" : ['cpufreq'],
"exclude_modules" : [ 'hwmon' ],
# FTrace events to collect for all the tests configuration which have
# the "ftrace" flag enabled
"ftrace" : {
"events" : [
"sched_switch",
"sched_wakeup",
"sched_load_avg_cpu",
"sched_load_avg_task",
],
"buffsize" : 100 * 1024,
},
# Tools required by the experiments
"tools" : [ 'trace-cmd', 'rt-app' ],
# Comment this line to calibrate RTApp in your own platform
# "rtapp-calib" : {"0": 360, "1": 142, "2": 138, "3": 352, "4": 352, "5": 353},
}
In [4]:
# Initialize a test environment using:
te = TestEnv(my_conf, wipe=False, force_new=True)
target = te.target
In [5]:
def experiment(te):
# Create and RTApp RAMP task
rtapp = RTA(te.target, 'ramp', calibration=te.calibration())
rtapp.conf(kind='profile',
params={
'ramp' : Ramp(
start_pct = 60,
end_pct = 20,
delta_pct = 5,
time_s = 0.5).get()
})
# FTrace the execution of this workload
te.ftrace.start()
rtapp.run(out_dir=te.res_dir)
te.ftrace.stop()
# Collect and keep track of the trace
trace_file = os.path.join(te.res_dir, 'trace.dat')
te.ftrace.get_trace(trace_file)
# Collect and keep track of the Kernel Functions performance data
stats_file = os.path.join(te.res_dir, 'trace.stats')
te.ftrace.get_stats(stats_file)
# Dump platform descriptor
te.platform_dump(te.res_dir)
In [6]:
experiment(te)
In [7]:
# Base folder where tests folder are located
res_dir = te.res_dir
logging.info('Content of the output folder %s', res_dir)
!tree {res_dir}
In [8]:
with open(os.path.join(res_dir, 'platform.json'), 'r') as fh:
platform = json.load(fh)
logging.info('LITTLE cluster max capacity: %d',
platform['nrg_model']['little']['cpu']['cap_max'])
In [9]:
trace_file = os.path.join(res_dir, 'trace.dat')
trace = Trace(trace_file, my_conf['ftrace']['events'], platform)
In [10]:
trappy.plotter.plot_trace(trace.ftrace)
In [11]:
print trace.data_frame.latency_df.__doc__
In [12]:
# Report full set of task status informations available from the trace
trace.data_frame.latency_df('ramp').head()
Out[12]:
In [13]:
# Report information on sched_switch events
df = trace.data_frame.trace_event('sched_switch')
df.head()
Out[13]:
In [14]:
print trace.data_frame.latency_wakeup_df.__doc__
In [15]:
# Report WAKEUP events and their duration
trace.data_frame.latency_wakeup_df('ramp').head()
Out[15]:
In [16]:
print trace.data_frame.latency_preemption_df.__doc__
In [17]:
# Report PREEMPTION events and their duration
trace.data_frame.latency_preemption_df('ramp').head()
Out[17]:
In [18]:
print trace.analysis.latency.plotLatency.__doc__
In [19]:
# Plot latency events for a specified task
latency_stats_df = trace.analysis.latency.plotLatency('ramp')
In [20]:
# Plot statistics on task latencies
latency_stats_df.T
Out[20]:
In [21]:
print trace.analysis.latency.plotLatencyBands.__doc__
In [22]:
# Plot latency events for a specified task
trace.analysis.latency.plotLatencyBands('ramp')
In [23]:
# Zoom into a spefific time frame
trace.setXTimeRange(4.28,4.29)
trace.analysis.latency.plotLatencyBands('ramp')
In [24]:
print trace.data_frame.activations_df.__doc__
In [25]:
# Report the sequence of activations intervals:
# Time: wakeup time
# activation_internal: time interval wrt previous wakeup
trace.data_frame.activations_df('ramp').head()
Out[25]:
In [26]:
print trace.analysis.latency.plotActivations.__doc__
In [27]:
# Plot activation internvals for a specified task
activations_df = trace.analysis.latency.plotActivations('ramp', threshold_ms=120)
In [28]:
# Plot statistics on task activation intervals
activations_df.T
Out[28]:
In [29]:
print trace.data_frame.runtimes_df.__doc__
In [30]:
# Report the sequence of running times:
# Time: task block time (i.e. sleep or exit)
# running_time: cumulative ruinning times since last wakeup event
trace.data_frame.runtimes_df('ramp').head()
Out[30]:
In [31]:
print trace.analysis.latency.plotRuntimes.__doc__
In [32]:
# Plot activation internvals for a specified task
runtimes_df = trace.analysis.latency.plotRuntimes('ramp', threshold_ms=120)
In [33]:
# Plot statistics on task running times
runtimes_df.T
Out[33]: