the Monte Carlo experiment


In [1]:
import numpy as np

%matplotlib inline
import matplotlib.pyplot as plt

A handy routines to store and recover python objects, in particular, the experiment resutls dictionaires.


In [2]:
import time, gzip
import os, pickle

def save(obj, path, prefix=None):
    prefix_ = "" if prefix is None else "%s_"%(prefix,)
    filename_ = os.path.join(path, "%s%s.gz"%(prefix_, time.strftime("%Y%m%d-%H%M%S"),))
    with gzip.open(filename_, "wb+", 9) as fout_:
        pickle.dump(obj, fout_)
    return filename_

def load(filename):
    with gzip.open(filename, "rb") as f:
        return pickle.load(f)

The path analyzer


In [3]:
from crossing_tree import structural_statistics

Collect a list of results returned by path_analyze into aligned data tensors.


In [4]:
from crossing_tree import collect_structural_statistics

A function implementing various delta choices.


In [5]:
import warnings

def get_delta_method(delta=1.0):
    if isinstance(delta, str):
        if delta == "std":
            # the standard deviation of increments
            delta_ = lambda X: np.diff(X).std()
        elif delta == "med":
            # Use the median absolute difference [Jones, Rolls; 2009] p. 11 (arxiv:0911.5204v2)
            delta_ = lambda X: np.median(np.abs(np.diff(X)))
        elif delta == "mean":
            # Use the mean absolute difference
            delta_ = lambda X: np.mean(np.abs(np.diff(X)))
        elif delta == "iqr":
            # Interquartile range
            delta_ = lambda X: np.subtract(*np.percentile(np.diff(X), [75, 25]))
        elif delta == "rng":
            # Use the range estimate as suggested by Geoffrey on 2015-05-28
            warnings.warn("""Use of `range`-based grid resolution """
                          """is discouraged since it may cause misaligned """
                          """crossing trees.""", RuntimeWarning)
            delta_ = lambda X: (X.max() - X.min()) / (2**12)
        else:
            raise ValueError("""Invalid `delta` setting. Accepted values """
                             """are: [`iqr`, `std`, `med`, `rng`, `mean`].""")
    elif isinstance(delta, float) and delta > 0:
        delta_ = lambda X: delta
    else:
        raise TypeError("""`delta` must be either a float, or a method """
                        """identifier.""")
    return delta_

An MC experiment kernel.


In [6]:
from sklearn.base import clone

def experiment(experiment_id, n_replications, methods, generator):
    generator = clone(generator)
    generator.start()

    deltas = [get_delta_method(method_) for method_ in methods]

    results = {method_: list() for method_ in methods}
    for j in xrange(n_replications):
        T, X = generator.draw()

        # Apply all methods to the same sample path.
        for delta, method in zip(deltas, methods):
            result_ = structural_statistics(X, T, scale=delta(X), origin=X[0])
            results[method].append(result_)

    generator.finish()

    return experiment_id, results

Experiments


In [7]:
from joblib import Parallel, delayed

A couple of random seeds from here.


In [8]:
# Extra random seeds should be prepended to the array.
master_seeds = [0xD5F60A17, 0x26F9935C, 0x0E4C1E75, 0xDA7C4291, 0x7ABE722E,
                0x126F3E10, 0x045300B1, 0xB0A9AD11, 0xEED05353, 0x824736C7,
                0x7AA17C9C, 0xB695D6B1, 0x7E214411, 0x538CDEEF, 0xFD55FF46,
                0xE14E1801, 0x872F687C, 0xA58440D9, 0xB8A273FD, 0x0BD1DD28,
                0xAB6A6AE6, 0x7180E905, 0x870E7BAB, 0x846D0C7A, 0xAEF0422D,
                0x16C53C83, 0xE32EA61D, 0xE0AD0A26, 0xCC90CA9A, 0x7D4020D2,]

the Monte Carlo experiemnt is run in parallel batches, with each initialized to a randomly picked seed.


In [9]:
MAX_RAND_SEED = np.iinfo(np.int32).max

The folder to store the results in


In [10]:
OUTPUT_PATH = "../results/"

fBM experiment


In [11]:
from crossing_tree.processes import FractionalBrownianMotion

seed = master_seeds.pop()
print("Using seed %X"%(seed,))
random_state = np.random.RandomState(seed)

skip = False


Using seed 7D4020D2

Setup


In [12]:
n_samples, methods = 1 << 23, ["med", "std", "iqr", "mean",]
hurst_exponents = [0.500, 0.550, 0.600, 0.650, 0.700, 0.750, 0.800, 0.850, 0.900,
                   0.910, 0.915, 0.920, 0.925, 0.930, 0.935, 0.940, 0.945, 0.950,
                   0.990,]
n_per_batch, n_batches = 125, 8

Run the experiment for the Fractional Brownian Motion.


In [13]:
if not skip:
    par_ = Parallel(n_jobs=-1, verbose=0)
    for hurst_ in hurst_exponents:
        name_ = "FBM-%d-%0.3f-%dx%d"%(n_samples, hurst_, n_per_batch, n_batches)
        print(name_,)

        # Schedule the experiments
        seeds = random_state.randint(MAX_RAND_SEED, size=(n_batches,))
        schedule_ = (delayed(experiment)(seed_, n_per_batch, methods,
                                         FractionalBrownianMotion(N=n_samples,
                                                                  hurst=hurst_,
                                                                  random_state=seed_))
                                    for seed_ in seeds)

        # Run the experiment and collect the results
        tick_ = time.time()
        experiment_ids = list()
        results_ = {method: list() for method in methods}
        for id_, dict_ in par_(schedule_):
            experiment_ids.append(id_)
            for method in methods:
                results_[method].extend(dict_[method])
        results = {key_: collect_structural_statistics(list_)
                   for key_, list_ in results_.iteritems()}
        tock_ = time.time()

        # Save the results and log
        filename_ = save((tick_, tock_, experiment_ids, results), OUTPUT_PATH, name_)
        print("%0.3fsec."%(tock_ - tick_,), filename_)


FBM-8388608-0.500-125x8 1056.620sec. ../results/FBM-8388608-0.500-125x8_20161108-162012.gz
FBM-8388608-0.550-125x8 1133.918sec. ../results/FBM-8388608-0.550-125x8_20161108-164004.gz
FBM-8388608-0.600-125x8 1186.027sec. ../results/FBM-8388608-0.600-125x8_20161108-170050.gz
FBM-8388608-0.650-125x8 1231.505sec. ../results/FBM-8388608-0.650-125x8_20161108-172209.gz
FBM-8388608-0.700-125x8 1288.416sec. ../results/FBM-8388608-0.700-125x8_20161108-174427.gz
FBM-8388608-0.750-125x8 1351.370sec. ../results/FBM-8388608-0.750-125x8_20161108-180744.gz
FBM-8388608-0.800-125x8 1420.173sec. ../results/FBM-8388608-0.800-125x8_20161108-183207.gz
FBM-8388608-0.850-125x8 1500.556sec. ../results/FBM-8388608-0.850-125x8_20161108-185753.gz
FBM-8388608-0.900-125x8 1604.555sec. ../results/FBM-8388608-0.900-125x8_20161108-192519.gz
FBM-8388608-0.910-125x8 1635.019sec. ../results/FBM-8388608-0.910-125x8_20161108-195316.gz
FBM-8388608-0.915-125x8 1634.690sec. ../results/FBM-8388608-0.915-125x8_20161108-202113.gz
FBM-8388608-0.920-125x8 1645.911sec. ../results/FBM-8388608-0.920-125x8_20161108-204921.gz
FBM-8388608-0.925-125x8 1658.832sec. ../results/FBM-8388608-0.925-125x8_20161108-211742.gz
FBM-8388608-0.930-125x8 1667.693sec. ../results/FBM-8388608-0.930-125x8_20161108-214610.gz
FBM-8388608-0.935-125x8 1683.263sec. ../results/FBM-8388608-0.935-125x8_20161108-221451.gz
FBM-8388608-0.940-125x8 1697.326sec. ../results/FBM-8388608-0.940-125x8_20161108-224346.gz
FBM-8388608-0.945-125x8 1714.388sec. ../results/FBM-8388608-0.945-125x8_20161108-231258.gz
FBM-8388608-0.950-125x8 1726.202sec. ../results/FBM-8388608-0.950-125x8_20161108-234226.gz
FBM-8388608-0.990-125x8 2272.542sec. ../results/FBM-8388608-0.990-125x8_20161109-002058.gz

Hermite process experiment


In [14]:
from crossing_tree.processes import HermiteProcess

seed = master_seeds.pop()
print("Using seed %X"%(seed,))
random_state = np.random.RandomState(seed)

skip = False


Using seed CC90CA9A

Setup: use no downsampling.


In [15]:
n_samples, n_downsample = 1 << 23, 1
degrees, methods = [2, 3, 4], ["med", "std", "iqr", "mean",]
hurst_exponents = [       0.550, 0.600, 0.650, 0.700, 0.750, 0.800, 0.850, 0.900,
                   0.910, 0.915, 0.920, 0.925, 0.930, 0.935, 0.940, 0.945, 0.950,
                   0.990,]
n_per_batch, n_batches = 125, 8

Run the experiment for the Hermite process.


In [16]:
if not skip:
    par_ = Parallel(n_jobs=-1, verbose=0)
    for degree_ in degrees:
        for hurst_ in hurst_exponents:
            name_ = "HRP%d_%d-%d-%0.3f-%dx%d"%(degree_, n_downsample, n_samples, hurst_, n_per_batch, n_batches)
            print(name_,)

            # Schedule the experiments
            seeds = random_state.randint(MAX_RAND_SEED, size=(n_batches,))
            schedule_ = (delayed(experiment)(seed_, n_per_batch, methods,
                                             HermiteProcess(N=n_samples,
                                                            degree=degree_,
                                                            n_downsample=n_downsample,
                                                            hurst=hurst_,
                                                            random_state=seed_))
                                        for seed_ in seeds)

            # Run the experiment and collect the results
            tick_ = time.time()
            experiment_ids = list()
            results_ = {method: list() for method in methods}
            for id_, dict_ in par_(schedule_):
                experiment_ids.append(id_)
                for method in methods:
                    results_[method].extend(dict_[method])
            results = {key_: collect_structural_statistics(list_)
                       for key_, list_ in results_.iteritems()}
            tock_ = time.time()

            # Save the results and log
            filename_ = save((tick_, tock_, experiment_ids, results), OUTPUT_PATH, name_)
            print("%0.3fsec."%(tock_ - tick_,), filename_)


HRP2_1-8388608-0.550-125x8 1313.578sec. ../results/HRP2_1-8388608-0.550-125x8_20161109-004331.gz
HRP2_1-8388608-0.600-125x8 1335.730sec. ../results/HRP2_1-8388608-0.600-125x8_20161109-010637.gz
HRP2_1-8388608-0.650-125x8 1373.954sec. ../results/HRP2_1-8388608-0.650-125x8_20161109-013022.gz
HRP2_1-8388608-0.700-125x8 1412.096sec. ../results/HRP2_1-8388608-0.700-125x8_20161109-015439.gz
HRP2_1-8388608-0.750-125x8 1457.555sec. ../results/HRP2_1-8388608-0.750-125x8_20161109-021942.gz
HRP2_1-8388608-0.800-125x8 1519.344sec. ../results/HRP2_1-8388608-0.800-125x8_20161109-024547.gz
HRP2_1-8388608-0.850-125x8 1585.849sec. ../results/HRP2_1-8388608-0.850-125x8_20161109-031254.gz
HRP2_1-8388608-0.900-125x8 1681.800sec. ../results/HRP2_1-8388608-0.900-125x8_20161109-034137.gz
HRP2_1-8388608-0.910-125x8 1697.750sec. ../results/HRP2_1-8388608-0.910-125x8_20161109-041035.gz
HRP2_1-8388608-0.915-125x8 1705.061sec. ../results/HRP2_1-8388608-0.915-125x8_20161109-043942.gz
HRP2_1-8388608-0.920-125x8 1718.075sec. ../results/HRP2_1-8388608-0.920-125x8_20161109-050900.gz
HRP2_1-8388608-0.925-125x8 1729.296sec. ../results/HRP2_1-8388608-0.925-125x8_20161109-053826.gz
HRP2_1-8388608-0.930-125x8 1743.978sec. ../results/HRP2_1-8388608-0.930-125x8_20161109-060809.gz
HRP2_1-8388608-0.935-125x8 1758.062sec. ../results/HRP2_1-8388608-0.935-125x8_20161109-063805.gz
HRP2_1-8388608-0.940-125x8 1777.296sec. ../results/HRP2_1-8388608-0.940-125x8_20161109-070819.gz
HRP2_1-8388608-0.945-125x8 1794.985sec. ../results/HRP2_1-8388608-0.945-125x8_20161109-073852.gz
HRP2_1-8388608-0.950-125x8 1814.537sec. ../results/HRP2_1-8388608-0.950-125x8_20161109-080945.gz
HRP2_1-8388608-0.990-125x8 2981.882sec. ../results/HRP2_1-8388608-0.990-125x8_20161109-090005.gz
HRP3_1-8388608-0.550-125x8 1268.157sec. ../results/HRP3_1-8388608-0.550-125x8_20161109-092151.gz
HRP3_1-8388608-0.600-125x8 1291.862sec. ../results/HRP3_1-8388608-0.600-125x8_20161109-094411.gz
HRP3_1-8388608-0.650-125x8 1323.764sec. ../results/HRP3_1-8388608-0.650-125x8_20161109-100705.gz
HRP3_1-8388608-0.700-125x8 1363.573sec. ../results/HRP3_1-8388608-0.700-125x8_20161109-103038.gz
HRP3_1-8388608-0.750-125x8 1409.588sec. ../results/HRP3_1-8388608-0.750-125x8_20161109-105452.gz
HRP3_1-8388608-0.800-125x8 1466.354sec. ../results/HRP3_1-8388608-0.800-125x8_20161109-112004.gz
HRP3_1-8388608-0.850-125x8 1536.353sec. ../results/HRP3_1-8388608-0.850-125x8_20161109-114620.gz
HRP3_1-8388608-0.900-125x8 1634.421sec. ../results/HRP3_1-8388608-0.900-125x8_20161109-121413.gz
HRP3_1-8388608-0.910-125x8 1663.819sec. ../results/HRP3_1-8388608-0.910-125x8_20161109-124237.gz
HRP3_1-8388608-0.915-125x8 1701.041sec. ../results/HRP3_1-8388608-0.915-125x8_20161109-131137.gz
HRP3_1-8388608-0.920-125x8 1731.294sec. ../results/HRP3_1-8388608-0.920-125x8_20161109-134107.gz
HRP3_1-8388608-0.925-125x8 1745.824sec. ../results/HRP3_1-8388608-0.925-125x8_20161109-141052.gz
HRP3_1-8388608-0.930-125x8 1753.505sec. ../results/HRP3_1-8388608-0.930-125x8_20161109-144042.gz
HRP3_1-8388608-0.935-125x8 1778.977sec. ../results/HRP3_1-8388608-0.935-125x8_20161109-151059.gz
HRP3_1-8388608-0.940-125x8 1786.511sec. ../results/HRP3_1-8388608-0.940-125x8_20161109-154127.gz
HRP3_1-8388608-0.945-125x8 1786.920sec. ../results/HRP3_1-8388608-0.945-125x8_20161109-161152.gz
HRP3_1-8388608-0.950-125x8 1798.446sec. ../results/HRP3_1-8388608-0.950-125x8_20161109-164228.gz
HRP3_1-8388608-0.990-125x8 2753.285sec. ../results/HRP3_1-8388608-0.990-125x8_20161109-172901.gz
HRP4_1-8388608-0.550-125x8 1313.006sec. ../results/HRP4_1-8388608-0.550-125x8_20161109-175133.gz
HRP4_1-8388608-0.600-125x8 1342.353sec. ../results/HRP4_1-8388608-0.600-125x8_20161109-181446.gz
HRP4_1-8388608-0.650-125x8 1375.779sec. ../results/HRP4_1-8388608-0.650-125x8_20161109-183835.gz
HRP4_1-8388608-0.700-125x8 1417.405sec. ../results/HRP4_1-8388608-0.700-125x8_20161109-190300.gz
HRP4_1-8388608-0.750-125x8 1471.192sec. ../results/HRP4_1-8388608-0.750-125x8_20161109-192813.gz
HRP4_1-8388608-0.800-125x8 1529.610sec. ../results/HRP4_1-8388608-0.800-125x8_20161109-195425.gz
HRP4_1-8388608-0.850-125x8 1609.494sec. ../results/HRP4_1-8388608-0.850-125x8_20161109-202153.gz
HRP4_1-8388608-0.900-125x8 1697.865sec. ../results/HRP4_1-8388608-0.900-125x8_20161109-205053.gz
HRP4_1-8388608-0.910-125x8 1707.985sec. ../results/HRP4_1-8388608-0.910-125x8_20161109-212000.gz
HRP4_1-8388608-0.915-125x8 1725.050sec. ../results/HRP4_1-8388608-0.915-125x8_20161109-214924.gz
HRP4_1-8388608-0.920-125x8 1737.762sec. ../results/HRP4_1-8388608-0.920-125x8_20161109-221901.gz
HRP4_1-8388608-0.925-125x8 1751.786sec. ../results/HRP4_1-8388608-0.925-125x8_20161109-224851.gz
HRP4_1-8388608-0.930-125x8 1761.887sec. ../results/HRP4_1-8388608-0.930-125x8_20161109-231853.gz
HRP4_1-8388608-0.935-125x8 1780.897sec. ../results/HRP4_1-8388608-0.935-125x8_20161109-234912.gz
HRP4_1-8388608-0.940-125x8 1798.063sec. ../results/HRP4_1-8388608-0.940-125x8_20161110-001948.gz
HRP4_1-8388608-0.945-125x8 1811.657sec. ../results/HRP4_1-8388608-0.945-125x8_20161110-005038.gz
HRP4_1-8388608-0.950-125x8 1831.093sec. ../results/HRP4_1-8388608-0.950-125x8_20161110-012148.gz
HRP4_1-8388608-0.990-125x8 2651.580sec. ../results/HRP4_1-8388608-0.990-125x8_20161110-020638.gz

Weierstrass experiment -- $\lambda_0 = 1.2$


In [17]:
from crossing_tree.processes import WeierstrassFunction

seed = master_seeds.pop()
print("Using seed %X"%(seed,))
random_state = np.random.RandomState(seed)

skip = False


Using seed E0AD0A26

Setup


In [18]:
n_samples, lambda_0 = 1 << 23, 1.2
methods = ["med", "std", "iqr", "mean",]
holder_exponents = [0.500, 0.550, 0.600, 0.650, 0.700, 0.750, 0.800, 0.850, 0.900,
                    0.910, 0.915, 0.920, 0.925, 0.930, 0.935, 0.940, 0.945, 0.950,
                    0.990,]
n_per_batch, n_batches = 125, 8

Run the experimnet for the random Weierstrass function $[0, 1]\mapsto \mathbb{R}$: $$ W_H(t) = \sum_{k\geq 0} \lambda_0^{-k H} \bigl(\cos(2 \pi \lambda_0^k t + \phi_k) - \cos \phi_k\bigr)\,, $$ with $(\phi_k)_{k\geq0} \sim \mathbb{U}[0, 2\pi]$, and $\lambda_0 > 1$ -- the fundamental harmonic.


In [19]:
if not skip:
    par_ = Parallel(n_jobs=-1, verbose=0)
    for holder_ in holder_exponents:
        name_ = "WEI_%g-%d-%0.3f-%dx%d"%(lambda_0, n_samples, holder_, n_per_batch, n_batches)
        print(name_,)

        # Schedule the experiments
        seeds = random_state.randint(MAX_RAND_SEED, size=(n_batches,))
        schedule_ = (delayed(experiment)(seed_, n_per_batch, methods,
                                         WeierstrassFunction(N=n_samples,
                                                             lambda_0=lambda_0,
                                                             holder=holder_,
                                                             random_state=seed_,
                                                             one_sided=False))
                                    for seed_ in seeds)

        # Run the experiment and collect the results
        tick_ = time.time()
        experiment_ids = list()
        results_ = {method: list() for method in methods}
        for id_, dict_ in par_(schedule_):
            experiment_ids.append(id_)
            for method in methods:
                results_[method].extend(dict_[method])
        results = {key_: collect_structural_statistics(list_)
                   for key_, list_ in results_.iteritems()}
        tock_ = time.time()

        # Save the results and log
        filename_ = save((tick_, tock_, experiment_ids, results), OUTPUT_PATH, name_)
        print("%0.3fsec."%(tock_ - tick_,), filename_)


WEI_1.2-8388608-0.500-125x8 13288.305sec. ../results/WEI_1.2-8388608-0.500-125x8_20161110-054850.gz
WEI_1.2-8388608-0.550-125x8 13345.497sec. ../results/WEI_1.2-8388608-0.550-125x8_20161110-093202.gz
WEI_1.2-8388608-0.600-125x8 13390.412sec. ../results/WEI_1.2-8388608-0.600-125x8_20161110-131601.gz
WEI_1.2-8388608-0.650-125x8 13429.911sec. ../results/WEI_1.2-8388608-0.650-125x8_20161110-170034.gz
WEI_1.2-8388608-0.700-125x8 13551.845sec. ../results/WEI_1.2-8388608-0.700-125x8_20161110-204707.gz
WEI_1.2-8388608-0.750-125x8 13604.540sec. ../results/WEI_1.2-8388608-0.750-125x8_20161111-003431.gz
WEI_1.2-8388608-0.800-125x8 13661.736sec. ../results/WEI_1.2-8388608-0.800-125x8_20161111-042252.gz
WEI_1.2-8388608-0.850-125x8 13766.987sec. ../results/WEI_1.2-8388608-0.850-125x8_20161111-081257.gz
WEI_1.2-8388608-0.900-125x8 13917.185sec. ../results/WEI_1.2-8388608-0.900-125x8_20161111-120532.gz
WEI_1.2-8388608-0.910-125x8 13925.786sec. ../results/WEI_1.2-8388608-0.910-125x8_20161111-155816.gz
WEI_1.2-8388608-0.915-125x8 13833.949sec. ../results/WEI_1.2-8388608-0.915-125x8_20161111-194928.gz
WEI_1.2-8388608-0.920-125x8 13777.711sec. ../results/WEI_1.2-8388608-0.920-125x8_20161111-233944.gz
WEI_1.2-8388608-0.925-125x8 13778.000sec. ../results/WEI_1.2-8388608-0.925-125x8_20161112-032958.gz
WEI_1.2-8388608-0.930-125x8 13784.810sec. ../results/WEI_1.2-8388608-0.930-125x8_20161112-072020.gz
WEI_1.2-8388608-0.935-125x8 13807.603sec. ../results/WEI_1.2-8388608-0.935-125x8_20161112-111107.gz
WEI_1.2-8388608-0.940-125x8 13819.266sec. ../results/WEI_1.2-8388608-0.940-125x8_20161112-150202.gz
WEI_1.2-8388608-0.945-125x8 13829.962sec. ../results/WEI_1.2-8388608-0.945-125x8_20161112-185307.gz
WEI_1.2-8388608-0.950-125x8 13843.953sec. ../results/WEI_1.2-8388608-0.950-125x8_20161112-224428.gz
WEI_1.2-8388608-0.990-125x8 14000.682sec. ../results/WEI_1.2-8388608-0.990-125x8_20161113-023826.gz

Additional experiments

Hermite process experiment: with downsampling


In [20]:
from crossing_tree.processes import HermiteProcess

seed = master_seeds.pop()
print("Using seed %X"%(seed,))
random_state = np.random.RandomState(seed)

skip = False


Using seed E32EA61D

Setup


In [21]:
n_samples, n_downsample = 1 << 19, 1 << 4
degrees, methods = [2, 3, 4], ["med", "std", "iqr", "mean",]
hurst_exponents = [       0.550, 0.600, 0.650, 0.700, 0.750, 0.800, 0.850, 0.900,
                   0.910, 0.915, 0.920, 0.925, 0.930, 0.935, 0.940, 0.945, 0.950,
                   0.990,]
n_per_batch, n_batches = 125, 8

Run the experiment for the Hermite process.


In [22]:
if not skip:
    par_ = Parallel(n_jobs=-1, verbose=0)
    for degree_ in degrees:
        for hurst_ in hurst_exponents:
            name_ = "HRP%d_%d-%d-%0.3f-%dx%d"%(degree_, n_downsample, n_samples, hurst_, n_per_batch, n_batches)
            print(name_,)

            # Schedule the experiments
            seeds = random_state.randint(MAX_RAND_SEED, size=(n_batches,))
            schedule_ = (delayed(experiment)(seed_, n_per_batch, methods,
                                             HermiteProcess(N=n_samples,
                                                            degree=degree_,
                                                            n_downsample=n_downsample,
                                                            hurst=hurst_,
                                                            random_state=seed_))
                                        for seed_ in seeds)

            # Run the experiment and collect the results
            tick_ = time.time()
            experiment_ids = list()
            results_ = {method: list() for method in methods}
            for id_, dict_ in par_(schedule_):
                experiment_ids.append(id_)
                for method in methods:
                    results_[method].extend(dict_[method])
            results = {key_: collect_structural_statistics(list_)
                       for key_, list_ in results_.iteritems()}
            tock_ = time.time()

            # Save the results and log
            filename_ = save((tick_, tock_, experiment_ids, results), OUTPUT_PATH, name_)
            print("%0.3fsec."%(tock_ - tick_,), filename_)


HRP2_16-524288-0.550-125x8 408.249sec. ../results/HRP2_16-524288-0.550-125x8_20161113-024553.gz
HRP2_16-524288-0.600-125x8 417.433sec. ../results/HRP2_16-524288-0.600-125x8_20161113-025327.gz
HRP2_16-524288-0.650-125x8 419.657sec. ../results/HRP2_16-524288-0.650-125x8_20161113-030100.gz
HRP2_16-524288-0.700-125x8 415.866sec. ../results/HRP2_16-524288-0.700-125x8_20161113-030827.gz
HRP2_16-524288-0.750-125x8 418.563sec. ../results/HRP2_16-524288-0.750-125x8_20161113-031559.gz
HRP2_16-524288-0.800-125x8 426.054sec. ../results/HRP2_16-524288-0.800-125x8_20161113-032334.gz
HRP2_16-524288-0.850-125x8 430.168sec. ../results/HRP2_16-524288-0.850-125x8_20161113-033113.gz
HRP2_16-524288-0.900-125x8 431.081sec. ../results/HRP2_16-524288-0.900-125x8_20161113-033851.gz
HRP2_16-524288-0.910-125x8 434.201sec. ../results/HRP2_16-524288-0.910-125x8_20161113-034633.gz
HRP2_16-524288-0.915-125x8 437.488sec. ../results/HRP2_16-524288-0.915-125x8_20161113-035418.gz
HRP2_16-524288-0.920-125x8 443.848sec. ../results/HRP2_16-524288-0.920-125x8_20161113-040207.gz
HRP2_16-524288-0.925-125x8 434.932sec. ../results/HRP2_16-524288-0.925-125x8_20161113-040949.gz
HRP2_16-524288-0.930-125x8 436.632sec. ../results/HRP2_16-524288-0.930-125x8_20161113-041731.gz
HRP2_16-524288-0.935-125x8 437.331sec. ../results/HRP2_16-524288-0.935-125x8_20161113-042514.gz
HRP2_16-524288-0.940-125x8 441.858sec. ../results/HRP2_16-524288-0.940-125x8_20161113-043302.gz
HRP2_16-524288-0.945-125x8 441.805sec. ../results/HRP2_16-524288-0.945-125x8_20161113-044051.gz
HRP2_16-524288-0.950-125x8 444.093sec. ../results/HRP2_16-524288-0.950-125x8_20161113-044842.gz
HRP2_16-524288-0.990-125x8 488.234sec. ../results/HRP2_16-524288-0.990-125x8_20161113-045717.gz
HRP3_16-524288-0.550-125x8 455.658sec. ../results/HRP3_16-524288-0.550-125x8_20161113-050519.gz
HRP3_16-524288-0.600-125x8 460.889sec. ../results/HRP3_16-524288-0.600-125x8_20161113-051335.gz
HRP3_16-524288-0.650-125x8 458.261sec. ../results/HRP3_16-524288-0.650-125x8_20161113-052146.gz
HRP3_16-524288-0.700-125x8 459.884sec. ../results/HRP3_16-524288-0.700-125x8_20161113-052959.gz
HRP3_16-524288-0.750-125x8 464.974sec. ../results/HRP3_16-524288-0.750-125x8_20161113-053818.gz
HRP3_16-524288-0.800-125x8 468.294sec. ../results/HRP3_16-524288-0.800-125x8_20161113-054637.gz
HRP3_16-524288-0.850-125x8 471.936sec. ../results/HRP3_16-524288-0.850-125x8_20161113-055457.gz
HRP3_16-524288-0.900-125x8 479.142sec. ../results/HRP3_16-524288-0.900-125x8_20161113-060323.gz
HRP3_16-524288-0.910-125x8 481.161sec. ../results/HRP3_16-524288-0.910-125x8_20161113-061152.gz
HRP3_16-524288-0.915-125x8 481.866sec. ../results/HRP3_16-524288-0.915-125x8_20161113-062019.gz
HRP3_16-524288-0.920-125x8 483.183sec. ../results/HRP3_16-524288-0.920-125x8_20161113-062851.gz
HRP3_16-524288-0.925-125x8 479.245sec. ../results/HRP3_16-524288-0.925-125x8_20161113-063717.gz
HRP3_16-524288-0.930-125x8 483.988sec. ../results/HRP3_16-524288-0.930-125x8_20161113-064548.gz
HRP3_16-524288-0.935-125x8 485.709sec. ../results/HRP3_16-524288-0.935-125x8_20161113-065421.gz
HRP3_16-524288-0.940-125x8 482.878sec. ../results/HRP3_16-524288-0.940-125x8_20161113-070249.gz
HRP3_16-524288-0.945-125x8 486.342sec. ../results/HRP3_16-524288-0.945-125x8_20161113-071123.gz
HRP3_16-524288-0.950-125x8 488.548sec. ../results/HRP3_16-524288-0.950-125x8_20161113-071957.gz
HRP3_16-524288-0.990-125x8 533.592sec. ../results/HRP3_16-524288-0.990-125x8_20161113-072917.gz
HRP4_16-524288-0.550-125x8 502.737sec. ../results/HRP4_16-524288-0.550-125x8_20161113-073806.gz
HRP4_16-524288-0.600-125x8 508.714sec. ../results/HRP4_16-524288-0.600-125x8_20161113-074707.gz
HRP4_16-524288-0.650-125x8 509.853sec. ../results/HRP4_16-524288-0.650-125x8_20161113-075614.gz
HRP4_16-524288-0.700-125x8 509.548sec. ../results/HRP4_16-524288-0.700-125x8_20161113-080515.gz
HRP4_16-524288-0.750-125x8 513.141sec. ../results/HRP4_16-524288-0.750-125x8_20161113-081420.gz
HRP4_16-524288-0.800-125x8 515.822sec. ../results/HRP4_16-524288-0.800-125x8_20161113-082324.gz
HRP4_16-524288-0.850-125x8 522.850sec. ../results/HRP4_16-524288-0.850-125x8_20161113-083235.gz
HRP4_16-524288-0.900-125x8 528.553sec. ../results/HRP4_16-524288-0.900-125x8_20161113-084151.gz
HRP4_16-524288-0.910-125x8 524.890sec. ../results/HRP4_16-524288-0.910-125x8_20161113-085101.gz
HRP4_16-524288-0.915-125x8 530.642sec. ../results/HRP4_16-524288-0.915-125x8_20161113-090019.gz
HRP4_16-524288-0.920-125x8 531.237sec. ../results/HRP4_16-524288-0.920-125x8_20161113-090938.gz
HRP4_16-524288-0.925-125x8 529.015sec. ../results/HRP4_16-524288-0.925-125x8_20161113-091854.gz
HRP4_16-524288-0.930-125x8 528.211sec. ../results/HRP4_16-524288-0.930-125x8_20161113-092810.gz
HRP4_16-524288-0.935-125x8 532.982sec. ../results/HRP4_16-524288-0.935-125x8_20161113-093729.gz
HRP4_16-524288-0.940-125x8 533.696sec. ../results/HRP4_16-524288-0.940-125x8_20161113-094650.gz
HRP4_16-524288-0.945-125x8 531.881sec. ../results/HRP4_16-524288-0.945-125x8_20161113-095609.gz
HRP4_16-524288-0.950-125x8 539.908sec. ../results/HRP4_16-524288-0.950-125x8_20161113-100535.gz
HRP4_16-524288-0.990-125x8 593.903sec. ../results/HRP4_16-524288-0.990-125x8_20161113-101556.gz

Weierstrass experiment -- $\lambda_0 = 3$


In [23]:
from crossing_tree.processes import WeierstrassFunction

seed = master_seeds.pop()
print("Using seed %X"%(seed,))
random_state = np.random.RandomState(seed)

skip = True


Using seed 16C53C83

Setup


In [24]:
n_samples, lambda_0 = 1 << 23, 3.0
methods = ["med", "std", "iqr", "mean",]
holder_exponents = [0.500, 0.550, 0.600, 0.650, 0.700, 0.750, 0.800, 0.850, 0.900,
                    0.910, 0.915, 0.920, 0.925, 0.930, 0.935, 0.940, 0.945, 0.950,
                    0.990,]
n_per_batch, n_batches = 125, 8

Run the experimnet for the random Weierstrass function $[0, 1]\mapsto \mathbb{R}$: $$ W_H(t) = \sum_{k\geq 0} \lambda_0^{-k H} \bigl(\cos(2 \pi \lambda_0^k t + \phi_k) - \cos \phi_k\bigr)\,, $$ with $(\phi_k)_{k\geq0} \sim \mathbb{U}[0, 2\pi]$, and $\lambda_0 > 1$ -- the fundamental harmonic.


In [25]:
if not skip:
    par_ = Parallel(n_jobs=-1, verbose=0)
    for holder_ in holder_exponents:
        name_ = "WEI_%g-%d-%0.3f-%dx%d"%(lambda_0, n_samples, holder_, n_per_batch, n_batches)
        print(name_,)

        # Schedule the experiments
        seeds = random_state.randint(MAX_RAND_SEED, size=(n_batches,))
        schedule_ = (delayed(experiment)(seed_, n_per_batch, methods,
                                         WeierstrassFunction(N=n_samples,
                                                             lambda_0=lambda_0,
                                                             holder=holder_,
                                                             random_state=seed_,
                                                             one_sided=False))
                                    for seed_ in seeds)

        # Run the experiment and collect the results
        tick_ = time.time()
        experiment_ids = list()
        results_ = {method: list() for method in methods}
        for id_, dict_ in par_(schedule_):
            experiment_ids.append(id_)
            for method in methods:
                results_[method].extend(dict_[method])
        results = {key_: collect_structural_statistics(list_)
                   for key_, list_ in results_.iteritems()}
        tock_ = time.time()

        # Save the results and log
        filename_ = save((tick_, tock_, experiment_ids, results), OUTPUT_PATH, name_)
        print("%0.3fsec."%(tock_ - tick_,), filename_)

Weierstrass experiment -- $\lambda_0 = 1.7$


In [26]:
from crossing_tree.processes import WeierstrassFunction

seed = master_seeds.pop()
print("Using seed %X"%(seed,))
random_state = np.random.RandomState(seed)

skip = True


Using seed AEF0422D

Setup


In [27]:
n_samples, lambda_0 = 1 << 23, 1.7
methods = ["med", "std", "iqr", "mean",]
holder_exponents = [0.500, 0.550, 0.600, 0.650, 0.700, 0.750, 0.800, 0.850, 0.900,
                    0.910, 0.915, 0.920, 0.925, 0.930, 0.935, 0.940, 0.945, 0.950,
                    0.990,]
n_per_batch, n_batches = 125, 8

Run the experimnet for the random Weierstrass function $[0, 1]\mapsto \mathbb{R}$: $$ W_H(t) = \sum_{k\geq 0} \lambda_0^{-k H} \bigl(\cos(2 \pi \lambda_0^k t + \phi_k) - \cos \phi_k\bigr)\,, $$ with $(\phi_k)_{k\geq0} \sim \mathbb{U}[0, 2\pi]$, and $\lambda_0 > 1$ -- the fundamental harmonic.


In [28]:
if not skip:
    par_ = Parallel(n_jobs=-1, verbose=0)
    for holder_ in holder_exponents:
        name_ = "WEI_%g-%d-%0.3f-%dx%d"%(lambda_0, n_samples, holder_, n_per_batch, n_batches)
        print(name_,)

        # Schedule the experiments
        seeds = random_state.randint(MAX_RAND_SEED, size=(n_batches,))
        schedule_ = (delayed(experiment)(seed_, n_per_batch, methods,
                                         WeierstrassFunction(N=n_samples,
                                                             lambda_0=lambda_0,
                                                             holder=holder_,
                                                             random_state=seed_,
                                                             one_sided=False))
                                    for seed_ in seeds)

        # Run the experiment and collect the results
        tick_ = time.time()
        experiment_ids = list()
        results_ = {method: list() for method in methods}
        for id_, dict_ in par_(schedule_):
            experiment_ids.append(id_)
            for method in methods:
                results_[method].extend(dict_[method])
        results = {key_: collect_structural_statistics(list_)
                   for key_, list_ in results_.iteritems()}
        tock_ = time.time()

        # Save the results and log
        filename_ = save((tick_, tock_, experiment_ids, results), OUTPUT_PATH, name_)
        print("%0.3fsec."%(tock_ - tick_,), filename_)

fBM experiment: super long


In [29]:
from crossing_tree.processes import FractionalBrownianMotion

seed = master_seeds.pop()
print("Using seed %X"%(seed,))
random_state = np.random.RandomState(seed)

skip = True


Using seed 846D0C7A

Setup


In [30]:
n_samples, methods = 1 << 25, ["med", "std", "iqr", "mean",]
hurst_exponents = [0.500, 0.550, 0.600, 0.650, 0.700, 0.750, 0.800, 0.850, 0.900,
                   0.910, 0.915, 0.920, 0.925, 0.930, 0.935, 0.940, 0.945, 0.950,
                   0.990,]
n_per_batch, n_batches, n_threads = 334, 3, 4

Run the experiment for the Fractional Brownian Motion.


In [31]:
if not skip:
    par_ = Parallel(n_jobs=-1, verbose=0)
    for hurst_ in hurst_exponents:
        name_ = "FBM-%d-%0.3f-%dx%d"%(n_samples, hurst_, n_per_batch, n_batches)
        print(name_,)

        # Schedule the experiments
        seeds = random_state.randint(MAX_RAND_SEED, size=(n_batches,))
        schedule_ = (delayed(experiment)(seed_, n_per_batch, methods,
                                         FractionalBrownianMotion(N=n_samples,
                                                                  hurst=hurst_,
                                                                  random_state=seed_,
                                                                  n_threads=n_threads))
                                    for seed_ in seeds)

        # Run the experiment and collect the results
        tick_ = time.time()
        experiment_ids = list()
        results_ = {method: list() for method in methods}
        for id_, dict_ in par_(schedule_):
            experiment_ids.append(id_)
            for method in methods:
                results_[method].extend(dict_[method])
        results = {key_: collect_structural_statistics(list_)
                   for key_, list_ in results_.iteritems()}
        tock_ = time.time()

        # Save the results and log
        filename_ = save((tick_, tock_, experiment_ids, results), OUTPUT_PATH, name_)
        print("%0.3fsec."%(tock_ - tick_,), filename_)


FBM-33554432-0.500-334x3 7922.633sec. ../results/FBM-33554432-0.500-334x3_20161108-184220.gz
FBM-33554432-0.550-334x3 8994.173sec. ../results/FBM-33554432-0.550-334x3_20161108-211311.gz
FBM-33554432-0.600-334x3 9671.584sec. ../results/FBM-33554432-0.600-334x3_20161108-235516.gz
FBM-33554432-0.650-334x3 9985.724sec. ../results/FBM-33554432-0.650-334x3_20161109-024230.gz
FBM-33554432-0.700-334x3 10591.847sec. ../results/FBM-33554432-0.700-334x3_20161109-053945.gz
FBM-33554432-0.750-334x3 11290.630sec. ../results/FBM-33554432-0.750-334x3_20161109-084839.gz
FBM-33554432-0.800-334x3 11924.460sec. ../results/FBM-33554432-0.800-334x3_20161109-120805.gz
FBM-33554432-0.850-334x3 12146.259sec. ../results/FBM-33554432-0.850-334x3_20161109-153112.gz
FBM-33554432-0.900-334x3 13212.555sec. ../results/FBM-33554432-0.900-334x3_20161109-191203.gz
FBM-33554432-0.910-334x3 13205.301sec. ../results/FBM-33554432-0.910-334x3_20161109-225250.gz
FBM-33554432-0.915-334x3 13467.640sec. ../results/FBM-33554432-0.915-334x3_20161110-023756.gz
FBM-33554432-0.920-334x3 13369.743sec. ../results/FBM-33554432-0.920-334x3_20161110-062125.gz
FBM-33554432-0.925-334x3 13347.232sec. ../results/FBM-33554432-0.925-334x3_20161110-100430.gz
FBM-33554432-0.930-334x3 13490.713sec. ../results/FBM-33554432-0.930-334x3_20161110-135002.gz
FBM-33554432-0.935-334x3 13567.554sec. ../results/FBM-33554432-0.935-334x3_20161110-173648.gz
FBM-33554432-0.940-334x3 13844.194sec. ../results/FBM-33554432-0.940-334x3_20161110-212810.gz
FBM-33554432-0.945-334x3 13908.407sec. ../results/FBM-33554432-0.945-334x3_20161111-012036.gz
FBM-33554432-0.950-334x3 13572.029sec. ../results/FBM-33554432-0.950-334x3_20161111-050726.gz
FBM-33554432-0.990-334x3 8535.372sec. ../results/FBM-33554432-0.990-334x3_20161111-173449.gz

In [ ]: