In [1]:
import logging
from conf import LisaLogging
LisaLogging.setup()
In [2]:
# Generate plots inline
%matplotlib inline
import json
import os
# Support to access the remote target
import devlib
from env import TestEnv
from executor import Executor
# RTApp configurator for generation of PERIODIC tasks
from wlgen import RTA, Ramp
# Support for trace events analysis
from trace import Trace
In [3]:
# Setup target configuration
my_conf = {
# Target platform and board
"platform" : 'linux',
"board" : 'juno',
"host" : '192.168.0.1',
"password" : 'juno',
# Folder where all the results will be collected
"results_dir" : "TraceAnalysis_FunctionsProfiling",
# Define devlib modules to load
"modules": ['cpufreq'],
"exclude_modules" : [ 'hwmon' ],
# FTrace events to collect for all the tests configuration which have
# the "ftrace" flag enabled
"ftrace" : {
"functions" : [
"pick_next_task_fair",
"select_task_rq_fair",
"enqueue_task_fair",
"update_curr_fair",
"dequeue_task_fair",
],
"buffsize" : 100 * 1024,
},
# Tools required by the experiments
"tools" : [ 'trace-cmd', 'rt-app' ],
# Comment this line to calibrate RTApp in your own platform
# "rtapp-calib" : {"0": 360, "1": 142, "2": 138, "3": 352, "4": 352, "5": 353},
}
In [4]:
# Initialize a test environment using:
te = TestEnv(my_conf, wipe=False, force_new=True)
target = te.target
In [5]:
def experiment(te):
# Create and RTApp RAMP task
rtapp = RTA(te.target, 'ramp', calibration=te.calibration())
rtapp.conf(kind='profile',
params={
'ramp' : Ramp(
start_pct = 60,
end_pct = 20,
delta_pct = 5,
time_s = 0.5).get()
})
# FTrace the execution of this workload
te.ftrace.start()
rtapp.run(out_dir=te.res_dir)
te.ftrace.stop()
# Collect and keep track of the trace
trace_file = os.path.join(te.res_dir, 'trace.dat')
te.ftrace.get_trace(trace_file)
# Collect and keep track of the Kernel Functions performance data
stats_file = os.path.join(te.res_dir, 'trace.stats')
te.ftrace.get_stats(stats_file)
# Dump platform descriptor
te.platform_dump(te.res_dir)
In [6]:
experiment(te)
In [7]:
# Base folder where tests folder are located
res_dir = te.res_dir
logging.info('Content of the output folder %s', res_dir)
!tree {res_dir}
In [8]:
with open(os.path.join(res_dir, 'platform.json'), 'r') as fh:
platform = json.load(fh)
print json.dumps(platform, indent=4)
logging.info('LITTLE cluster max capacity: %d',
platform['nrg_model']['little']['cpu']['cap_max'])
In [9]:
trace = Trace(res_dir, platform=platform)
In [10]:
# Get the DataFrame for the specified list of kernel functions
df = trace.data_frame.functions_stats(['enqueue_task_fair', 'dequeue_task_fair'])
df
Out[10]:
In [11]:
# Get the DataFrame for the single specified kernel function
df = trace.data_frame.functions_stats('select_task_rq_fair')
df
Out[11]:
The only method of the FunctionsAnalysis class that is used for functions profiling is plotProfilingStats. This method is used to plot functions profiling metrics for the specified kernel functions. For each speficied metric a barplot is generated which reports the value of the metric when the kernel function has been executed on each CPU. The default metric is avg if not otherwise specified. A list of kernel functions to plot can also be passed to plotProfilingStats. Otherwise, by default, all the kernel functions are plotted.
In [12]:
# Plot Average and Total execution time for the specified
# list of kernel functions
trace.analysis.functions.plotProfilingStats(
functions = [
'select_task_rq_fair',
'enqueue_task_fair',
'dequeue_task_fair'
],
metrics = [
# Average completion time per CPU
'avg',
# Total execution time per CPU
'time',
]
)
In [13]:
# Plot Average execution time for the single specified kernel function
trace.analysis.functions.plotProfilingStats(
functions = 'update_curr_fair',
)