This complex example shows multiple workloads being executed in multiple configurations.
Please check the notebooks in examples/android/benchmarks/ and examples/android/workloads/ to get more details on each of the possible workloads and how you can visualise their results.
In [1]:
import logging
from conf import LisaLogging
LisaLogging.setup()
In [2]:
%pylab inline
import collections
import copy
import json
import os
from time import sleep
# Support to access the remote target
import devlib
from env import TestEnv
# Import support for Android devices
from android import Screen, Workload, System
# Support for trace events analysis
from trace import Trace
# Suport for FTrace events parsing and visualization
import trappy
import datetime
In [3]:
def set_performance():
target.cpufreq.set_all_governors('performance')
def set_powersave():
target.cpufreq.set_all_governors('powersave')
def set_interactive():
target.cpufreq.set_all_governors('interactive')
def set_sched():
target.cpufreq.set_all_governors('sched')
def set_ondemand():
target.cpufreq.set_all_governors('ondemand')
for cpu in target.list_online_cpus():
tunables = target.cpufreq.get_governor_tunables(cpu)
target.cpufreq.set_governor_tunables(
cpu,
'ondemand',
**{'sampling_rate' : tunables['sampling_rate_min']}
)
In [4]:
def experiment(wl, res_dir, conf_name, wload_name, collect=''):
##################################
# Initial setup
# Load workload params
wload_kind = wload_name.split()[0]
iterations = int(wload_name.split()[1])
wload_tag = wload_name.split()[2]\
.replace('https://youtu.be/', '')\
.replace('?t=', '_')
# Check for workload being available
wload = Workload.getInstance(te, wload_kind)
if not wload:
return {}
# Setup test results folder
exp_dir = os.path.join(res_dir, conf_name, "{}_{}".format(wload_kind, wload_tag))
os.system('mkdir -p {}'.format(exp_dir));
# Configure governor
confs[conf_name]['set']()
###########################
# Run the required workload
# Jankbench
if 'Jankbench' in wload_name:
wload.run(exp_dir, wload_tag, iterations, collect)
# UiBench
elif 'UiBench' in wload_name:
test_name = wload_name.split()[2]
duration_s = int(wload_name.split()[3])
wload.run(exp_dir, test_name, duration_s, collect)
# YouTube
elif 'YouTube' in wload_name:
video_url = wload_name.split()[2]
video_duration_s = int(wload_name.split()[3])
wload.run(exp_dir, video_url, video_duration_s, collect)
# RTApp based workloads
elif 'RTApp' in wload_name:
rtapp_kind = wload_name.replace('RTApp ', '')
rtapp_run(rtapp_kind)
# Dump platform descriptor
te.platform_dump(exp_dir)
In [5]:
def run_experiments(test_confs, wloads, confs, verbose=False):
# Make sure we have a list of configuraitons to test
if not isinstance(test_confs, list):
test_confs = [test_confs]
# Intialize Workloads for this test environment
wl = Workload(te)
# Change to info once the notebook runs ok
if verbose:
LisaLogging.setup(level=logging.DEBUG)
else:
LisaLogging.setup(level=logging.INFO)
# Run the benchmark in all the configured configurations
for conf_name in test_confs:
# Setup data to be collected
try:
collect = confs[conf_name]['collect']
logging.info("Enabling collection of: %s", collect)
except:
collect = ''
# Enable energy collection only if an emeter has been configured
if 'energy' in collect:
if 'emeter' not in my_conf or not te.emeter:
logging.warning('Disabling ENERGY collection')
logging.info('EMeter not configured or not available')
collect = collect.replace('energy', '')
else:
logging.debug('Enabling ENERGY collection')
# Run each workload
idx = 0
for wload_name in wloads:
# Skip workload if not enabled by the configuration
try:
enabled = False
enabled_workloads = confs[conf_name]['wloads']
for wload in enabled_workloads:
if wload in wload_name:
enabled = True
break
if not enabled:
logging.debug('Workload [%s] disabled',
wload_name)
continue
except:
# No workload filters defined, execute all workloads
logging.debug('All workloads enabled')
pass
# Log test being executed
idx = idx + 1
wload_kind = wload_name.split()[0]
logging.info('------------------------')
logging.info('Test %d: %s in %s configuration',
idx, wload_kind.upper(), conf_name.upper())
logging.info(' %s', wload_name)
experiment(wl, te.res_dir, conf_name, wload_name, collect)
devlib requires the ANDROID_HOME environment variable configured to point to your local installation of the Android SDK. If you have not this variable configured in the shell used to start the notebook server, you need to run a cell to define where your Android SDK is installed or specify the ANDROID_HOME in your target configuration.
In case more than one Android device are conencted to the host, you must specify the ID of the device you want to target in my_target_conf. Run adb devices on your host to get the ID.
In [6]:
# Setup target configuration
my_conf = {
# Target platform and board
"platform" : 'android',
"device" : "FA6A10306347",
"ANDROID_HOME" : '/home/vagrant/lisa/tools/android-sdk-linux/',
# Folder where all the results will be collected
"results_dir" : "Android_Multiple_Workloads",
# Define devlib modules to load
"modules" : [
'cpufreq' # enable CPUFreq support
],
# FTrace events to collect for all the tests configuration which have
# the "ftrace" flag enabled
"ftrace" : {
"events" : [
"sched_switch",
"sched_overutilized",
"sched_contrib_scale_f",
"sched_load_avg_cpu",
"sched_load_avg_task",
"sched_tune_tasks_update",
"sched_boost_cpu",
"sched_boost_task",
"sched_energy_diff",
"cpu_frequency",
"cpu_idle",
"cpu_capacity",
],
"buffsize" : 10 * 1024,
},
# Tools required by the experiments
"tools" : [ 'trace-cmd' ],
}
In [7]:
# List of possible workloads to run, each workload consists of a workload name
# followed by a list of workload specific parameters
test_wloads = [
# YouTube workload:
# Params:
# - iterations: number of read/write operations to execute
# - URL: link to the video to use (with optional start time)
# - duration: playback time in [s]
'YouTube 1 https://youtu.be/XSGBVzeBUbk?t=45s 60',
# Jankbench workload:
# Params:
# - iterations: number of read/write operations to execute
# - id: benchmakr to run
'Jankbench 1 list_view',
'Jankbench 1 image_list_view',
'Jankbench 1 shadow_grid',
'Jankbench 1 low_hitrate_text',
'Jankbench 1 high_hitrate_text',
'Jankbench 1 edit_text',
# Multi iterations
'Jankbench 3 list_view',
'Jankbench 3 image_list_view',
'Jankbench 3 shadow_grid',
'Jankbench 3 low_hitrate_text',
'Jankbench 3 high_hitrate_text',
'Jankbench 3 edit_text',
# UiBench workload:
# Params:
# - test_name: The name of the test to start
# - duration: playback time in [s]
'UiBench 1 TrivialAnimationActivity 10',
# RT-App workload:
# Params:
# - configration: tasks configuration to run
# - [configuration specific parameters]
'RTApp STAccount 6',
'RTApp RAMP',
]
In [8]:
# Available test configurations
# 'set' : a setup function to be called before starting the test
# 'collect' defines what we want to collect as a list of strings.
# Supported values are
# energy - Use the my_conf's defined emeter to measure energy consumption across experiments
# ftrace - Collect an execution trace using trace-cmd
# systrace - Collect an execution trace using Systrace/Atrace
# NOTE: energy is automatically enabled in case an "emeter" configuration is defined in my_conf
confs = {
'j_std' : {
'set' : set_interactive,
'wloads' : ['Jankbench 1 list_view'],
'collect' : 'ftrace',
},
'j_eas' : {
'set' : set_sched,
'wloads' : ['Jankbench 1 list_view'],
'collect' : 'ftrace',
},
'y_std' : {
'set' : set_interactive,
'wloads' : ['YouTube 1 https://youtu.be/XSGBVzeBUbk?t=45s'],
'collect' : 'ftrace',
},
'u_eas' : {
'set' : set_sched,
'wloads' : ['UiBench 1 TrivialAnimationActivity'],
'collect' : 'systrace',
}
}
In [9]:
# List of experiments to run
experiments = ['j_std', 'j_eas', 'y_std', 'u_eas']
In [10]:
# Initialize a test environment using:
te = TestEnv(my_conf, wipe=False)
target = te.target
In [11]:
run_experiments(experiments, test_wloads, confs, True)
In [13]:
!tree {te.res_dir}