Kernel Functions Profiling
In [1]:
import logging
reload(logging)
logging.basicConfig(
format='%(asctime)-9s %(levelname)-8s: %(message)s',
datefmt='%I:%M:%S')
# Enable logging at INFO level
logging.getLogger().setLevel(logging.INFO)
In [2]:
# Generate plots inline
%pylab inline
import json
import os
import re
import collections
import pandas
# Support to tests execution
from executor import Executor
In [3]:
# Setup a target configuration
target_conf = {
# Platform and board to target
"platform" : "linux",
"board" : "juno",
# Login credentials
"host" : "192.168.0.1",
"username" : "root",
"password" : "",
# Local installation path
"tftp" : {
"folder" : "/var/lib/tftpboot",
"kernel" : "kern.bin",
"dtb" : "dtb.bin",
},
# RTApp calibration values (comment to let LISA do a calibration run)
"rtapp-calib" : {
"0": 358, "1": 138, "2": 138, "3": 357, "4": 359, "5": 355
},
}
tests_conf = {
# Kernel functions to profile for all the test
# configurations which have the "ftrace" flag enabled
"ftrace" : {
"functions" : [
"select_task_rq_fair",
"enqueue_task_fair",
"dequeue_task_fair",
],
"buffsize" : 80 * 1024,
},
# Platform configurations to test
"confs" : [
{
"tag" : "base",
"flags" : "ftrace",
"sched_features" : "NO_ENERGY_AWARE",
"cpufreq" : {
"governor" : "performance",
},
},
{
"tag" : "eas",
"flags" : "ftrace",
"sched_features" : "ENERGY_AWARE",
"cpufreq" : {
"governor" : "performance",
},
},
],
# Workloads to run (on each platform configuration)
"wloads" : {
"rta" : {
"type" : "rt-app",
"conf" : {
"class" : "profile",
"params" : {
"p20" : {
"kind" : "periodic",
"params" : {
"duty_cycle_pct" : 20,
},
"tasks" : "cpus",
},
},
},
},
},
# Number of iterations for each configuration/workload pair
"iterations" : 3,
# Tools to deploy
"tools" : [ "rt-app", 'trace-cmd' ],
# Where results are collected
# NOTE: this folder will be wiped before running the experiments
"results_dir" : "KernelFunctionsProfilingExample",
# Modules required by these experiments
"exclude_modules" : [ "hwmon" ],
}
In [4]:
# Setup tests executions based on our configuration
executor = Executor(target_conf, tests_conf)
In [5]:
# Execute all the configured test
executor.run()
In [6]:
res_dir = "/home/derkling/Code/lisa/results/KernelFunctionsProfilingExample"
out_dir = "/home/derkling/Code/lisa/results/KernelFunctionsProfilingExample/rtapp:eas:rta/2/trace.dat"
out_dir.replace(res_dir, "<res_dir>")
print executor.te.res_dir
In [7]:
# Check content of the output folder
res_dir = executor.te.res_dir
logging.info('Content of the output folder %s', res_dir)
!tree {res_dir}
In [8]:
def autodict():
return collections.defaultdict(autodict)
def parse_perf_stat(res_dir):
TEST_DIR_RE = re.compile(r'.*/([^:]*):([^:]*):([^:]*)')
profiling_data = autodict()
for test_idx in sorted(os.listdir(res_dir)):
test_dir = os.path.join(res_dir, test_idx)
if not os.path.isdir(test_dir):
continue
match = TEST_DIR_RE.search(test_dir)
if not match:
continue
wtype = match.group(1)
tconf = match.group(2)
wload = match.group(3)
#logging.info('Processing %s:%s:%s', wtype, tconf, wload)
trace_stat_file = os.path.join(test_dir, '1', 'trace_stat.json')
if not os.path.isfile(trace_stat_file):
continue
with open(trace_stat_file, 'r') as fh:
data = json.load(fh)
for cpu_id, cpu_stats in sorted(data.items()):
for fname in cpu_stats:
profiling_data[cpu_id][tconf][fname] = cpu_stats[fname]
return profiling_data
profiling_data = parse_perf_stat(res_dir)
#logging.info("Profiling data:\n%s", json.dumps(profiling_data, indent=4))
#profiling_data
In [9]:
def get_df(profiling_data):
cpu_ids = []
cpu_frames = []
for cpu_id, cpu_data in sorted(profiling_data.items()):
cpu_ids.append(cpu_id)
conf_ids = []
conf_frames = []
for conf_id, conf_data in cpu_data.iteritems():
conf_ids.append(conf_id)
function_data = pandas.DataFrame.from_dict(conf_data, orient='index')
conf_frames.append(function_data)
df = pandas.concat(conf_frames, keys=conf_ids)
cpu_frames.append(df)
df = pandas.concat(cpu_frames, keys=cpu_ids)
#df.head()
return df
stats_df = get_df(profiling_data)
#stats_df
In [10]:
def plot_stats(df, fname, axes=None):
func_data = df.xs(fname, level=2)
func_stats = func_data.xs(['avg', 's_2'], axis=1)
#func_stats
func_avg = func_stats.unstack(level=1)['avg']
func_std = func_stats.unstack(level=1)['s_2'].apply(numpy.sqrt)
func_avg.plot(kind='bar', title=fname, yerr=func_std, ax=axes);
#plot_stats(stats_df, 'select_task_rq_fair')
In [11]:
def plot_all_functions(df):
functions = df.index.get_level_values(2).unique()
fcount = len(functions)
fig, pltaxes = plt.subplots(fcount, 1, figsize=(16, 8*fcount))
fig_id = 0
for fname in functions:
logging.info("Plotting stats for [%s] function", fname)
if fcount > 1:
axes = pltaxes[fig_id]
else:
axes = pltaxes
plot_stats(df, fname, axes)
fig_id = fig_id + 1
plot_all_functions(stats_df)