convergence



In [ ]:
import sys
sys.path.append('../')
sys.path.append('../tests/python-support/')

%matplotlib notebook

In [ ]:
import matplotlib
import experiments
import itertools
import util
import numpy as np
import json
from collections import defaultdict

import matplotlib.pyplot as plt

from pprint import pprint

Configuration


In [ ]:
# Whether to only run the offline part of the experiment,
# or both offline and online
offline_only = False

# Whether to import/export homogenized basis
import_basis = False
export_basis = False

# Names of experiments to run
experiment_names = [ "homogenized_l_shaped_amg" ]

# Offline parameters. 
# Use integer exponents so that we can have reliable filenames
h_base = 0.5
h_exponent = [ 6 ]
oversampling = [ 2 ]
dense_fallback_threshold = [ 300 ]
corrector_iterative_tolerance = [ 1e-12 ]

# Online parameters
end_time = [0.01]
sample_count = [ 101 ]
integrator = [ "iterative_leapfrog" ]
use_coarse_rhs = [ True ]
use_coarse_mass_matrix = [ True ]
integrator_iterative_tolerance = [ 1e-8 ]
measure_error = False

In [ ]:
def make_offline_param(h_exponent, oversampling, threshold, iterative_tol):
    p = {
        'mesh_resolution': h_base ** h_exponent,
        'oversampling': oversampling,
        'dense_fallback_threshold': threshold,
        'iterative_tolerance': iterative_tol
    }
    basis_file = 'basis_{}_{}.h5'.format(h_exponent, oversampling)
    if import_basis:
        p['basis_import_file'] = basis_file
    if export_basis:
        p['basis_export_file'] = basis_file
    return p

def make_online_param(end_time, sample_count, integrator, 
                      use_coarse_rhs, use_coarse_mass_matrix,
                      iterative_tol):
    return {
        'end_time': end_time,
        'sample_count': sample_count,
        'integrator': integrator,
        'load_quadrature_strength': 2,
        'use_coarse_rhs': use_coarse_rhs,
        'use_coarse_mass_matrix': use_coarse_mass_matrix,
        'iterative_tolerance': iterative_tol,
        'measure_error': measure_error
    }

def aggregate_offline_params():
    offline_param_product = itertools.product(h_exponent, oversampling, 
                                              dense_fallback_threshold, 
                                              corrector_iterative_tolerance)
    return [ make_offline_param(h_exp, ovs, threshold, tol)
            for (h_exp, ovs, threshold, tol)
            in offline_param_product]

def aggregate_online_params():
    online_param_product = itertools.product(end_time, sample_count, 
                                             integrator, use_coarse_rhs, use_coarse_mass_matrix,
                                             integrator_iterative_tolerance)
    return [ make_online_param(e, s, i, rhs, mm, tol)
            for (e, s, i, rhs, mm, tol)
            in online_param_product ]

def aggregate_input():
    offline_params = aggregate_offline_params()
    online_params = aggregate_online_params()
    if offline_only:
        return [{ 
                    'experiment': experiment,
                    'offline': offline
                }
                for (experiment, offline) 
                in itertools.product(experiment_names, offline_params)
               ]
    else:
        return [{ 
                    'experiment': experiment,
                    'offline': offline,
                    'online': online 
                }
                for (experiment, offline, online) 
                in itertools.product(experiment_names, offline_params, online_params)
               ]
    
experiment_input = aggregate_input()

In [ ]:
# Run experiments and collect results
results = experiments.run_experiments(experiment_input)

In [ ]:
# Uncomment to inspect raw results
pprint(results)

Analysis of results


In [ ]:
def is_successful(result):
    return 'error' not in result

def key(experiment):
    name = experiment['experiment']
    offline_params = experiment['offline']['parameters']
    corrector_iterative_tol = offline_params['iterative_tolerance']
    oversampling = offline_params['oversampling']
    if offline_only:
        return (name, "no integration", oversampling, corrector_iterative_tol, None, None, None)
    else:
        online_params = experiment['online']['parameters']
        integrator = online_params['integrator']
        use_coarse_rhs = online_params['use_coarse_rhs']
        use_coarse_mass_matrix = online_params['use_coarse_mass_matrix']
        integrator_tol = online_params['iterative_tolerance']
        return (name, integrator, oversampling, corrector_iterative_tol, 
                use_coarse_rhs, use_coarse_mass_matrix, integrator_tol)
    


def flatten_timing(timing):
    flattened_timing = defaultdict(list)
    for experiment_timing in timing:
        for k, v in experiment_timing.items():
            flattened_timing[k].append(v)
    return flattened_timing

def print_timing(flattened_timing):
    timing_pad = len(max(flattened_timing.keys(), key = lambda s: len(s)))

    for timing_name, values in sorted(flattened_timing.items(), key=lambda pair: pair[0]):
        padded_name = str("{}:".format(timing_name).ljust(timing_pad + 1))
        formatted_values = ", ".join([ "{:10.4g}".format(val) for val in values])
        print("{} [ {} ]".format(padded_name, formatted_values))

success_results = [ result for result in results if is_successful(result) ]
failure_results = [ result for result in results if not is_successful(result) ]
success_results = sorted(success_results, key = key)
grouped_results = itertools.groupby(success_results, key = key)
for (name, integrator, oversampling, corrector_iterative_tol, 
     use_coarse_rhs, use_coarse_mass_matrix, integrator_tol), result in grouped_results:
    result = list(result)
    offline_results = [ result['offline']['result'] for result in result ]
    coarse_dof = [ result['mesh_details']['num_coarse_vertices'] for result in offline_results ]
    fine_dof = [ result['mesh_details']['num_fine_vertices'] for result in offline_results ]
    
    offline_timings = [ result['offline']['result']['timing'] for result in result ]
    offline_timings = flatten_timing(offline_timings)
    
    print("=============================================")
    print("Experiment name:          {name}".format(name=name))
    print("Integrator:               {}".format(integrator))
    print("Oversampling:             {}".format(oversampling))
    print("Corrector iterative tol:  {}".format(corrector_iterative_tol))
    print("Coarse dof:               {}".format(coarse_dof))
    print("Fine dof:                 {}".format(fine_dof))
    
    if not offline_only:
        online_results = [ result['online']['result'] for result in result ]
        l2_error = [ result['error_summary']['l2'] for result in online_results ]
        h1_error = [ result['error_summary']['h1'] for result in online_results ]
        h1_semi_error = [ result['error_summary']['h1_semi'] for result in online_results ]
        l2_coarse_slope = util.estimate_slope(coarse_dof, l2_error)
        h1_coarse_slope = util.estimate_slope(coarse_dof, h1_error)
        l2_fine_slope = util.estimate_slope(fine_dof, l2_error)
        h1_fine_slope = util.estimate_slope(fine_dof, h1_error)
        
        print("Use coarse rhs:           {}".format(use_coarse_rhs))
        print("Use coarse mass:          {}".format(use_coarse_mass_matrix))
        print("Integrator iterative tol: {}".format(integrator_tol))
        
        online_timings = [ result['online']['result']['timing'] for result in result ]
        online_timings = flatten_timing(online_timings)
        
        convergence = [ result['converged'] for result in online_results ]
        print("")
        print("Convergence: {}".format(convergence))
        print("H1 semi:     {}".format(h1_semi_error))
        print("H1:          {}".format(h1_error))
        print("L2:          {}".format(l2_error))
        print("")
        print("H1 coarse slope: {}".format(h1_coarse_slope))
        print("L2 coarse slope: {}".format(l2_coarse_slope))
        print("H1 fine slope:   {}".format(h1_fine_slope))
        print("L2 fine slope:   {}".format(l2_fine_slope))
        print("")
        print("Timing (online):")
        print("---------------------------------------------")
        print_timing(online_timings)

    
    print("")
    print("Timing (offline):")
    print("---------------------------------------------")
    print_timing(offline_timings)
    print("=============================================")
    print("")

Offline statistics summary


In [ ]:
def visualize_statistics(summary):
    for name, summary in summary.items():
        histogram = summary['distribution']
        groups = np.arange(0, len(histogram))
        proportions = [ entry['accumulated'] for entry in histogram ]
        plt.figure()
        plt.title("{}".format(name))
        plt.bar(groups, proportions)
        plt.show()
        
        

grouped_results = itertools.groupby(success_results, key = key)
for (name, integrator, oversampling), result in grouped_results:
    offline_results = [ result['offline']['result'] for result in result ]
    coarse_dof = [ result['mesh_details']['num_coarse_vertices'] for result in offline_results ]
    fine_dof = [ result['mesh_details']['num_fine_vertices'] for result in offline_results ]  
    
    print("=============================================")
    print("Experiment name: {name}".format(name=name))
    print("Oversampling:    {}".format(oversampling))
    print("Coarse dof:      {}".format(coarse_dof))
    print("Fine dof:        {}".format(fine_dof))
    
    for statistic in [ result['stats'] for result in offline_results ]:
        for name, summary in statistic.items():
#             print("{} max: {}".format(name, summary['max']))
#             print("{} median: {}".format(name, summary['median']))
#             print("{} mean: {}".format(name, summary['mean']))
             pprint(summary['distribution'])
        visualize_statistics(statistic)
    
    print("=============================================")