Trace Analysis Examples
Kernel Functions Profiling
In [1]:
    
import logging
reload(logging)
logging.basicConfig(
    format='%(asctime)-9s %(levelname)-8s: %(message)s',
    datefmt='%I:%M:%S')
# Enable logging at INFO level
logging.getLogger().setLevel(logging.INFO)
    
In [2]:
    
# Generate plots inline
%matplotlib inline
import json
import os
# Support to access the remote target
import devlib
from env import TestEnv
# RTApp configurator for generation of PERIODIC tasks
from wlgen import RTA, Ramp
# Support for trace events analysis
from trace import Trace
    
    
In [3]:
    
# Setup target configuration
my_conf = {
    # Target platform and board
    "platform"    : 'linux',
    "board"       : 'juno',
    "host"        : '192.168.0.1',
    # Folder where all the results will be collected
    "results_dir" : "TraceAnalysis_FunctionsProfiling",
    # Define devlib modules to load
    "exclude_modules" : [ 'hwmon' ],
    # FTrace events to collect for all the tests configuration which have
    # the "ftrace" flag enabled
    "ftrace"  : {
        "functions" : [
            "pick_next_task_fair",
            "select_task_rq_fair",
            "enqueue_task_fair",
            "update_curr_fair",
            "dequeue_task_fair",
        ],
        
         "buffsize" : 100 * 1024,
    },
    # Tools required by the experiments
    "tools"   : [ 'trace-cmd', 'rt-app' ],
    
    # Comment this line to calibrate RTApp in your own platform
    "rtapp-calib" :  {"0": 360, "1": 142, "2": 138, "3": 352, "4": 352, "5": 353},
}
    
In [4]:
    
# Initialize a test environment using:
te = TestEnv(my_conf, wipe=False, force_new=True)
target = te.target
    
    
In [5]:
    
def experiment(te):
    # Create and RTApp RAMP task
    rtapp = RTA(te.target, 'ramp', calibration=te.calibration())
    rtapp.conf(kind='profile',
               params={
                    'ramp' : Ramp(
                        start_pct =  60,
                        end_pct   =  20,
                        delta_pct =   5,
                        time_s    =   0.5).get()
              })
    # FTrace the execution of this workload
    te.ftrace.start()
    rtapp.run(out_dir=te.res_dir)
    te.ftrace.stop()
    # Collect and keep track of the trace
    trace_file = os.path.join(te.res_dir, 'trace.dat')
    te.ftrace.get_trace(trace_file)
    
    # Collect and keep track of the Kernel Functions performance data
    stats_file = os.path.join(te.res_dir, 'trace.stats')
    te.ftrace.get_stats(stats_file)
    # Dump platform descriptor
    te.platform_dump(te.res_dir)
    
In [6]:
    
experiment(te)
    
    
In [7]:
    
# Base folder where tests folder are located
res_dir = te.res_dir
logging.info('Content of the output folder %s', res_dir)
!tree {res_dir}
    
    
    
In [8]:
    
with open(os.path.join(res_dir, 'platform.json'), 'r') as fh:
    platform = json.load(fh)
#print json.dumps(platform, indent=4)
logging.info('LITTLE cluster max capacity: %d',
             platform['nrg_model']['little']['cpu']['cap_max'])
    
    
In [9]:
    
trace = Trace(platform, res_dir, events=[])
    
    
In [10]:
    
# Get the DataFrame for the specified list of kernel functions
df = trace.data_frame.functions_stats(['enqueue_task_fair', 'dequeue_task_fair'])
df
    
    Out[10]:
In [10]:
    
# Get the DataFrame for the single specified kernel function
df = trace.data_frame.functions_stats('select_task_rq_fair')
df
    
    Out[10]:
In [12]:
    
# Plot Average and Total execution time for the specified
# list of kernel functions
trace.analysis.functions.plotProfilingStats(
    functions = [
        'select_task_rq_fair',
        'enqueue_task_fair',
        'dequeue_task_fair'
    ],
    metrics = [
        'avg',
        'time',
    ]
)
    
    
    
In [14]:
    
# Plot Average execution time for the single specified kernel function
trace.analysis.functions.plotProfilingStats(
    functions = 'update_curr_fair',
)
    
    
In [ ]: