This tutorial aims to show how to configure and run a synthetic workload using the wlgen module provided by LISA.
In [ ]:
    
import logging
from conf import LisaLogging
LisaLogging.setup()
    
In [1]:
    
# Execute this cell to enabled devlib debugging statements
logging.getLogger('ssh').setLevel(logging.DEBUG)
    
In [2]:
    
# Other python modules required by this notebook
import json
import os
    
In [3]:
    
# Setup a target configuration
conf = {
    
    # Target is localhost
    "platform"    : 'linux',
    "board"       : "juno",
    
    # Login credentials
    "host"        : "192.168.0.1",
    "username"    : "root",
    "password"    : "",
    # Binary tools required to run this experiment
    # These tools must be present in the tools/ folder for the architecture
    "tools"   : ['rt-app', 'taskset', 'trace-cmd'],
    
    # Comment the following line to force rt-app calibration on your target
#     "rtapp-calib" : {
#        "0": 355, "1": 138, "2": 138, "3": 355, "4": 354, "5": 354
#     },
    
    # FTrace events end buffer configuration
    "ftrace"  : {
         "events" : [
             "sched_switch",
             "cpu_frequency"
         ],
         "buffsize" : 10240
    },
    # Where results are collected
    "results_dir" : "WlgenExample",
    
    # Devlib modules we'll need
    "modules": ["cpufreq"]
}
    
In [4]:
    
# Support to access the remote target
from env import TestEnv
# Initialize a test environment using:
# the provided target configuration (my_target_conf)
# the provided test configuration   (my_test_conf)
te = TestEnv(conf)
target = te.target
    
    
In [27]:
    
def execute(te, wload, res_dir):
    
    logging.info('# Create results folder for this execution')
    !mkdir {res_dir}
    
    logging.info('# Setup FTrace')
    te.ftrace.start()
    logging.info('## Start energy sampling')
    te.emeter.reset()
    logging.info('### Start RTApp execution')
    wload.run(out_dir=res_dir)
    logging.info('## Read energy consumption: %s/energy.json', res_dir)
    nrg_report = te.emeter.report(out_dir=res_dir)
    logging.info('# Stop FTrace')
    te.ftrace.stop()
    trace_file = os.path.join(res_dir, 'trace.dat')
    logging.info('# Save FTrace: %s', trace_file)
    te.ftrace.get_trace(trace_file)
    logging.info('# Save platform description: %s/platform.json', res_dir)
    plt, plt_file = te.platform_dump(res_dir)
    
    logging.info('# Report collected data:')
    logging.info('   %s', res_dir)
    !ls -la {res_dir}
    
    return nrg_report, plt, plt_file, trace_file
    
In [28]:
    
# Support to configure and run RTApp based workloads
from wlgen import RTA
# Create a new RTApp workload generator using the calibration values
# reported by the TestEnv module
rtapp_name = 'example1'
rtapp = RTA(target, rtapp_name, calibration=te.calibration())
    
    
In [29]:
    
# RTApp configurator for generation of PERIODIC tasks
from wlgen import Periodic 
# Configure this RTApp instance to:
rtapp.conf(
    # 1. generate a "profile based" set of tasks
    kind = 'profile',
    
    # 2. define the "profile" of each task
    params = {
        
        # 3. PERIODIC task with
        'task_p20': Periodic (
            period_ms      = 100, # period [ms]
            duty_cycle_pct =  20, # duty cycle [%]
            duration_s     =  5,  # duration [s]
            delay_s        =  0,  # start after that delay [s]
            sched          = {    # run as a low-priority SCHED_OTHER task
                'policy'   : 'OTHER',
                'priotity' : 130,
            },
            cpus           =      # pinned on first online CPU
                              str(target.list_online_cpus()[0])
            # ADD OTHER PARAMETERS
        ).get(),
    },
    
);
    
    
In [30]:
    
# Inspect the JSON file used to run the application
with open('./{}_00.json'.format(rtapp_name), 'r') as fh:
    rtapp_json = json.load(fh)
logging.info('Generated RTApp JSON file:')
print json.dumps(rtapp_json, indent=4, sort_keys=True)
    
    
    
In [31]:
    
res_dir = os.path.join(te.res_dir, rtapp_name)
nrg_report, plt, plt_file, trace_file = execute(te, rtapp, res_dir)
    
    
    
In [32]:
    
# Dump the energy measured for the LITTLE and big clusters
logging.info('Energy: %s', nrg_report.report_file)
print json.dumps(nrg_report.channels, indent=4, sort_keys=True)
    
    
    
In [33]:
    
# Dump the platform descriptor, which could be useful for further analysis
# of the generated results
logging.info('Platform description: %s', plt_file)
print json.dumps(plt, indent=4, sort_keys=True)
    
    
    
In [34]:
    
!kernelshark {trace_file} 2>/dev/null
    
    
In [37]:
    
# Support to configure and run RTApp based workloads
from wlgen import RTA
# Create a new RTApp workload generator using the calibration values
# reported by the TestEnv module
rtapp_name = 'example2'
rtapp = RTA(target, rtapp_name, calibration=te.calibration())
    
    
In [40]:
    
# RTApp configurator for generation of PERIODIC tasks
from wlgen import Periodic, Ramp
cpus = str(target.bl.bigs_online[0])
# Light workload
light  = Periodic(duty_cycle_pct=10, duration_s=1.0, period_ms= 10,
              cpus=cpus)
# Ramp workload
ramp   = Ramp(start_pct=10, end_pct=90, delta_pct=20, time_s=1, period_ms=50,
             cpus=cpus)
# Heavy workload
heavy  = Periodic(duty_cycle_pct=90, duration_s=0.1, period_ms=100,
                 cpus=cpus)
# Composed workload
lrh_task = light + ramp + heavy
# Configure this RTApp instance to:
rtapp.conf(
    # 1. generate a "profile based" set of tasks
    kind = 'profile',
    
    # 2. define the "profile" of each task
    params = {
        
        # 3. PERIODIC task with
        'task_ramp': lrh_task.get() 
    },
    
);
    
    
In [41]:
    
# Inspect the JSON file used to run the application
with open('./{}_00.json'.format(rtapp_name), 'r') as fh:
    rtapp_json = json.load(fh)
logging.info('Generated RTApp JSON file:')
print json.dumps(rtapp_json, indent=4, sort_keys=True)
    
    
    
In [42]:
    
res_dir = os.path.join(te.res_dir, rtapp_name)
nrg_report, plt, plt_file, trace_file = execute(te, rtapp, res_dir)
    
    
    
In [48]:
    
!kernelshark {trace_file} 2>/dev/null
    
    
In [51]:
    
# Support to configure and run RTApp based workloads
from wlgen import RTA
# Create a new RTApp workload generator using the calibration values
# reported by the TestEnv module
rtapp_name = 'example3'
rtapp = RTA(target, rtapp_name, calibration=te.calibration())
# Configure this RTApp to use a custom JSON
rtapp.conf(
    # 1. generate a "custom" set of tasks
    kind = 'custom',
    
    # 2. define the "profile" of each task
    params = "../../assets/mp3-short.json",
    
    # In this case only few values of the orignal JSON can be tuned:
    #    DURATION : maximum duration of the workload [s]
    #    PVALUE   : calibration value
    #    LOGDIR   : folder used for generated logs
    #    WORKDIR  : working directory on target
    
    # 3. defined a maximum duration for that workload
    duration = 5,
    
);
    
    
In [52]:
    
res_dir = os.path.join(te.res_dir, rtapp_name)
nrg_report, plt, plt_file, trace_file = execute(te, rtapp, res_dir)
    
    
    
In [53]:
    
# Inspect the JSON file used to run the application
with open('./{}_00.json'.format(rtapp_name), 'r') as fh:
    rtapp_json = json.load(fh)
logging.info('Generated RTApp JSON file:')
print json.dumps(rtapp_json, indent=4, sort_keys=True)
    
    
    
In [72]:
    
# Support to configure and run RTApp based workloads
from wlgen import PerfMessaging
# Create a "perf bench sched messages" (i.e. hackbench) workload
perf_name = 'hackbench'
perf = PerfMessaging(target, perf_name)
perf.conf(group=1, loop=100, pipe=True, thread=True)
    
    
    Out[72]:
In [73]:
    
res_dir = os.path.join(te.res_dir, perf_name)
nrg_report, plt, plt_file, trace_file = execute(te, perf, res_dir)
    
    
    
    
    
In [76]:
    
# Inspect the generated performance report
perf_file = os.path.join(te.res_dir, perf_name, 'performance.json')
with open(perf_file, 'r') as fh:
    perf_json = json.load(fh)
logging.info('Generated performance JSON file:')
print json.dumps(perf_json, indent=4, sort_keys=True)
    
    
    
In [77]:
    
!kernelshark {trace_file} 2>/dev/null