In [1]:
%load_ext autoreload
%autoreload 2
%matplotlib inline

from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import scipy as sp
import seaborn as sns
np.set_printoptions(precision=2, linewidth=120)
from copy import copy
from tqdm import *
from drift_qec.Q import *


/Users/yan/.miniconda/lib/python2.7/site-packages/matplotlib/__init__.py:872: UserWarning: axes.color_cycle is deprecated and replaced with axes.prop_cycle; please use the latter.
  warnings.warn(self.msg_depr % (key, alt_key))
  • Original basis Q0
  • Recovered basis Qc (Controlled-bsais)
  • Effected basis Qeff = Qt.T * Q0
  • Use effected basis for error sampling
  • Learn Qt progressively better
  • When data comes in from the Qeff alignment, you must transform it back to the standard basis before average with the existing channel estimate

Error x Time x Cycle_ratio


In [ ]:
D = 0.01
N_ERRORS = 1e6
N_TRIALS = 100
N_CYCLES = np.logspace(1, 3, 10).astype(np.int)
RECORDS = []
for trial in tqdm(range(N_TRIALS)):
    for n_cycles in N_CYCLES:
        n = int(N_ERRORS / n_cycles)
        channel = Channel(kx=0.7, ky=0.2, kz=0.1,
                          Q=np.linalg.qr(np.random.randn(3,3))[0],
                          n=n, d=D)

        RECORDS.append({
                "trial": trial,
                "cycle_length": n,
                "n_cycles": n_cycles,
                "time": 0,
                "Mdist": np.linalg.norm(channel.Mhat-channel.C),
                "Qdist": np.linalg.norm(np.dot(channel.Qc.T, channel.Q) - np.eye(3))
            })
        for cycle in range(n_cycles):
            channel.update()
            RECORDS.append({
                    "trial": trial,
                    "cycle_length": n,
                    "n_cycles": n_cycles,
                    "time": (cycle+1)*n,
                    "Mdist": np.linalg.norm(channel.Mhat-channel.C),
                    "Qdist": np.linalg.norm(np.dot(channel.Qc.T, channel.Q) - np.eye(3))
                })

df = pd.DataFrame(RECORDS)
df.to_csv("{}errorsd{}.csv".format(N_ERRORS,D))

In [ ]:
df["cycle_length"] = (N_ERRORS / df["n_cycles"]).astype(np.int)

In [ ]:
df.tail(10)

In [ ]:
PAL = sns.color_palette("hls", len(N_CYCLES))
fig, ax = plt.subplots(1, 1, figsize=(8,6))

for idx, n_cycles in enumerate(N_CYCLES):
    sel = (df["n_cycles"] == n_cycles)
    subdf = df.loc[sel, :]
    v = subdf.groupby("time").mean()
    s = subdf.groupby("time").std()
    t = v.index.values
    y = v["Mdist"].values
    e = s["Mdist"].values
    ax.loglog(t, y, label=str(subdf.iloc[0, 2]), c=PAL[idx])
    ax.fill_between(t, y-e, y+e, alpha=0.1, color=PAL[idx])
plt.title("Recover error over time for varied ratios of cycles to realignments")
plt.xlabel("Time [cycles]")
plt.ylabel("Basis recovery error")
plt.legend()

Regime 1 basis alignment


In [73]:
D = 0.01
N_TRIALS = 100
MAX_N = int(1e6)
N_STEP = int(1e3)
RECORDS = []
for trial in tqdm(range(N_TRIALS)):
    channel = Channel(kx=0.7, ky=0.2, kz=0.1,
                      Q=np.linalg.qr(np.random.randn(3,3))[0],
                      n=N_STEP, d=D)
    pxhat, pyhat, pzhat = list(np.linalg.svd(channel.Mhat)[1])
    RECORDS.append({
            "trial": trial,
            "time": 0,
            "Mdist": np.linalg.norm(channel.Mhat-channel.C),
            "Qdist": np.linalg.norm(np.dot(channel.Qc.T, channel.Q) - np.eye(3)),
            "pxval": channel.kx, "pyval": channel.ky, "pzval": channel.kz,
            "pxhat": pxhat, "pyhat": pyhat, "pzhat": pzhat
        })
    for time in range(0, MAX_N, N_STEP):
        channel.update()
        pxhat, pyhat, pzhat = list(np.linalg.svd(channel.Mhat)[1])
        RECORDS.append({
                "trial": trial,
                "time": time,
                "Mdist": np.linalg.norm(channel.Mhat-channel.C),
                "Qdist": np.linalg.norm(np.dot(channel.Qc.T, channel.Q) - np.eye(3)),
                "pxval": channel.kx, "pyval": channel.ky, "pzval": channel.kz,
                "pxhat": pxhat, "pyhat": pyhat, "pzhat": pzhat
            })

df = pd.DataFrame(RECORDS)
df.to_csv("regime1.csv")




In [24]:
df = pd.read_csv("regime1.csv")
v = df.groupby("time").mean()["Qdist"]
s = df.groupby("time").std()["Qdist"]

fig, ax = plt.subplots(1, 1, figsize=(8, 6))
t = v.index.values
y = v.values
e = s.values
ax.plot(t, y,)
ax.fill_between(t, y-e, y+e, alpha=0.25)
plt.ylabel("Measure of orthonormality between $Q_{hat}$ and $Q_{val}$")
plt.xlabel("Time [n_errors]")


Out[24]:
<matplotlib.text.Text at 0x11820c790>

In [44]:
df = pd.read_csv("regime1.csv")
v = df.groupby("time").mean()["Mdist"]
s = df.groupby("time").std()["Mdist"]

fig, ax = plt.subplots(1, 1, figsize=(8, 6))
t = v.index.values
y = v.values
e = s.values
ax.loglog(t, y,)
ax.fill_between(t, y-e, y+e, alpha=0.25)
plt.ylabel("Norm distance between $M_{hat}$ and $M_{val}$")
plt.xlabel("Time [n_errors]")


Out[44]:
<matplotlib.text.Text at 0x118a31410>

Regime 2 basis alignment


In [72]:
D = 0.01
N_TRIALS = 100
MAX_N = int(1e6)
N_STEP = int(1e3)
RECORDS = []
for trial in tqdm(range(N_TRIALS)):
    channel = Channel(kx=0.985, ky=0.01, kz=0.005,
                      Q=np.linalg.qr(np.random.randn(3,3))[0],
                      n=N_STEP, d=D)
    pxhat, pyhat, pzhat = list(np.linalg.svd(channel.Mhat)[1])
    RECORDS.append({
            "trial": trial,
            "time": 0,
            "Mdist": np.linalg.norm(channel.Mhat-channel.C),
            "Qdist": np.linalg.norm(np.dot(channel.Qc.T, channel.Q) - np.eye(3)),
            "pxval": channel.kx, "pyval": channel.ky, "pzval": channel.kz,
            "pxhat": pxhat, "pyhat": pyhat, "pzhat": pzhat
        })
    for time in range(0, MAX_N, N_STEP):
        channel.update()
        pxhat, pyhat, pzhat = list(np.linalg.svd(channel.Mhat)[1])
        RECORDS.append({
                "trial": trial,
                "time": time,
                "Mdist": np.linalg.norm(channel.Mhat-channel.C),
                "Qdist": np.linalg.norm(np.dot(channel.Qc.T, channel.Q) - np.eye(3)),
                "pxval": channel.kx, "pyval": channel.ky, "pzval": channel.kz,
                "pxhat": pxhat, "pyhat": pyhat, "pzhat": pzhat
            })

df = pd.DataFrame(RECORDS)
df.to_csv("regime2.csv")




In [41]:
df = pd.read_csv("regime2.csv")
v = df.groupby("time").mean()["Qdist"]
s = df.groupby("time").std()["Qdist"]

fig, ax = plt.subplots(1, 1, figsize=(8, 6))
t = v.index.values
y = v.values
e = s.values
ax.plot(t, y)
ax.plot(t, y-e, ls="--")
ax.plot(t, y+e, ls="--")
plt.ylabel("Measure of orthonormality between $Q_{hat}$ and $Q_{val}$")
plt.xlabel("Time [n_errors]")


Out[41]:
<matplotlib.text.Text at 0x122118950>

In [39]:
df = pd.read_csv("regime2.csv")
v = df.groupby("time").mean()["Mdist"]
s = df.groupby("time").std()["Mdist"]

fig, ax = plt.subplots(1, 1, figsize=(8, 6))
t = v.index.values
y = v.values
e = s.values
ax.loglog(t, y)
ax.fill_between(t, y-e, y+e, alpha=0.25)
plt.ylabel("Norm distance between $M_{hat}$ and $M_{val}$")
plt.xlabel("Time [n_errors]")


Out[39]:
<matplotlib.text.Text at 0x1179a4890>

The only thing that matters: effective error probabilities


In [128]:
df1 = pd.read_csv("regime1_1e3_1e6.csv")
df1["dpx"] = np.abs(df1["pxval"] - df1["pxhat"])
df1["dpy"] = np.abs(df1["pyval"] - df1["pyhat"])
df1["dpz"] = np.abs(df1["pzval"] - df1["pzhat"])

v1 = df1.groupby("time").mean()
s1 = df1.groupby("time").std()

df2 = pd.read_csv("regime2_1e3_1e6.csv")
df2["dpx"] = np.abs(df2["pxval"] - df2["pxhat"])
df2["dpy"] = np.abs(df2["pyval"] - df2["pyhat"])
df2["dpz"] = np.abs(df2["pzval"] - df2["pzhat"])

v2 = df2.groupby("time").mean()
s2 = df2.groupby("time").std()


fig, axs = plt.subplots(2, 3, figsize=(12, 8), sharey=True, sharex=True,
                        tight_layout={"h_pad": 1.0, "rect": [0.0, 0.0, 1.0, 0.95]})
for idx, stat in enumerate(["dpx", "dpy", "dpz"]):
    t1 = v1[stat].index.values
    y1 = v1[stat].values
    e1 = s1[stat].values
    x = np.log(v1.loc[1:, stat].index.values)
    y = np.log(v1.loc[1:, stat].values)
    reg = sp.stats.linregress(x, y)
    fitted = np.exp(reg.intercept + reg.slope * x)
    axs[0, idx].semilogy(t1, y1, ls="", marker=".", color=sns.color_palette()[idx], alpha=0.05)
    axs[0, idx].semilogy(t1, y1+e1, ls="--", color=sns.color_palette()[idx])
    axs[0, idx].semilogy(t1[1:], fitted, ls="-", color=sns.color_palette()[idx],
                         label="{} = {:0.2f} e^({:0.2f}*n)".format(stat, np.exp(reg.intercept), reg.slope))
    axs[0, idx].set_title(stat)
    axs[0, idx].legend(frameon=True)
    
    t2 = v2[stat].index.values
    y2 = v2[stat].values
    e2 = s2[stat].values
    x = np.log(v2.loc[1:, stat].index.values)
    y = np.log(v2.loc[1:, stat].values)
    reg = sp.stats.linregress(x, y)
    fitted = np.exp(reg.intercept + reg.slope * x)
    axs[1, idx].semilogy(t2, y2, ls="", marker=".", color=sns.color_palette()[idx], alpha=0.05)
    axs[1, idx].semilogy(t2, y2+e2, ls="--", color=sns.color_palette()[idx])
    axs[1, idx].semilogy(t2[1:], fitted, ls="-", color=sns.color_palette()[idx],
                         label="{} = {:0.2f} e^({:0.2f}*n)".format(stat, np.exp(reg.intercept), reg.slope))
    axs[1, idx].set_xlabel("Number of errors")
    axs[1, idx].legend(frameon=True)

fig.suptitle("Average difference in effective error probability (steps are 1e3, max is 1e6)")
axs[0, 0].set_ylabel("kx=0.7, ky=0.2, kz=0.1")
axs[1, 0].set_ylabel("kx=0.985, ky=0.01, kz=0.005")
fig.savefig("dp_1e3_1e6.pdf")


Out[128]:
<matplotlib.text.Text at 0x150257c10>

In [130]:
df1 = pd.read_csv("regime1_1e5_1e8.csv")
df1["dpx"] = np.abs(df1["pxval"] - df1["pxhat"])
df1["dpy"] = np.abs(df1["pyval"] - df1["pyhat"])
df1["dpz"] = np.abs(df1["pzval"] - df1["pzhat"])

v1 = df1.groupby("time").mean()
s1 = df1.groupby("time").std()

df2 = pd.read_csv("regime2_1e5_1e8.csv")
df2["dpx"] = np.abs(df2["pxval"] - df2["pxhat"])
df2["dpy"] = np.abs(df2["pyval"] - df2["pyhat"])
df2["dpz"] = np.abs(df2["pzval"] - df2["pzhat"])

v2 = df2.groupby("time").mean()
s2 = df2.groupby("time").std()


fig, axs = plt.subplots(2, 3, figsize=(12, 8), sharey=True, sharex=True,
                        tight_layout={"h_pad": 1.0, "rect": [0.0, 0.0, 1.0, 0.95]})
for idx, stat in enumerate(["dpx", "dpy", "dpz"]):
    t1 = v1[stat].index.values
    y1 = v1[stat].values
    e1 = s1[stat].values
    x = np.log(v1.loc[1:, stat].index.values)
    y = np.log(v1.loc[1:, stat].values)
    reg = sp.stats.linregress(x, y)
    fitted = np.exp(reg.intercept + reg.slope * x)
    axs[0, idx].semilogy(t1, y1, ls="", marker=".", color=sns.color_palette()[idx], alpha=0.05)
    axs[0, idx].semilogy(t1, y1+e1, ls="--", color=sns.color_palette()[idx])
    axs[0, idx].semilogy(t1[1:], fitted, ls="-", color=sns.color_palette()[idx],
                         label="{} = {:0.2f} e^({:0.2f}*n)".format(stat, np.exp(reg.intercept), reg.slope))
    axs[0, idx].set_title(stat)
    axs[0, idx].legend(frameon=True)
    
    t2 = v2[stat].index.values
    y2 = v2[stat].values
    e2 = s2[stat].values
    x = np.log(v2.loc[1:, stat].index.values)
    y = np.log(v2.loc[1:, stat].values)
    reg = sp.stats.linregress(x, y)
    fitted = np.exp(reg.intercept + reg.slope * x)
    axs[1, idx].semilogy(t2, y2, ls="", marker=".", color=sns.color_palette()[idx], alpha=0.05)
    axs[1, idx].semilogy(t2, y2+e2, ls="--", color=sns.color_palette()[idx])
    axs[1, idx].semilogy(t2[1:], fitted, ls="-", color=sns.color_palette()[idx],
                         label="{} = {:0.2f} e^({:0.2f}*n)".format(stat, np.exp(reg.intercept), reg.slope))
    axs[1, idx].set_xlabel("Number of errors")
    axs[1, idx].legend(frameon=True)

fig.suptitle("Average difference in effective error probability (steps are 1e5, max is 1e8)")
axs[0, 0].set_ylabel("kx=0.7, ky=0.2, kz=0.1")
axs[1, 0].set_ylabel("kx=0.985, ky=0.01, kz=0.005")
fig.savefig("dp_1e5_1e8.pdf")



In [139]:
sel = (df1["pxhat"] + df1["pyhat"] + df1["pzhat"]) != 1.0
df1.loc[sel, :]


Out[139]:
Unnamed: 0 Mdist Qdist pxhat pxval pyhat pyval pzhat pzval time trial dpx dpy dpz
0 0 0.734847 2.803587 0.000000 0.7 0.000000 0.2 0.000000 0.1 0 0 0.700000 0.200000 0.100000
1 1 0.405185 2.370878 0.470949 0.7 0.118071 0.2 0.089020 0.1 0 0 0.229051 0.081929 0.010980
2 2 0.242568 1.763047 0.571959 0.7 0.076309 0.2 0.018399 0.1 100000 0 0.128041 0.123691 0.081601
3 3 0.201645 1.169681 0.575882 0.7 0.119263 0.2 0.054855 0.1 200000 0 0.124118 0.080737 0.045145
4 4 0.136823 0.133849 0.608238 0.7 0.175259 0.2 0.016503 0.1 300000 0 0.091762 0.024741 0.083497
5 5 0.117847 0.280169 0.641452 0.7 0.158583 0.2 0.033298 0.1 400000 0 0.058548 0.041417 0.066702
6 6 0.142610 0.597606 0.650735 0.7 0.190954 0.2 0.015455 0.1 500000 0 0.049265 0.009046 0.084545
7 7 0.142894 0.715272 0.656896 0.7 0.194477 0.2 0.023627 0.1 600000 0 0.043104 0.005523 0.076373
8 8 0.122484 0.597707 0.664567 0.7 0.193098 0.2 0.031224 0.1 700000 0 0.035433 0.006902 0.068776
9 9 0.157521 0.854233 0.689928 0.7 0.192033 0.2 0.018039 0.1 800000 0 0.010072 0.007967 0.081961
10 10 0.147476 0.623534 0.688467 0.7 0.203968 0.2 0.016655 0.1 900000 0 0.011533 0.003968 0.083345
11 11 0.107013 0.421520 0.691421 0.7 0.193794 0.2 0.031452 0.1 1000000 0 0.008579 0.006206 0.068548
12 12 0.093844 0.455179 0.685097 0.7 0.195639 0.2 0.042341 0.1 1100000 0 0.014903 0.004361 0.057659
13 13 0.083185 0.414504 0.682582 0.7 0.196932 0.2 0.049058 0.1 1200000 0 0.017418 0.003068 0.050942
14 14 0.075731 0.406774 0.694610 0.7 0.184113 0.2 0.054611 0.1 1300000 0 0.005390 0.015887 0.045389
15 15 0.087235 0.589019 0.699738 0.7 0.180037 0.2 0.057725 0.1 1400000 0 0.000262 0.019963 0.042275
16 16 0.083831 0.564597 0.699602 0.7 0.182345 0.2 0.059230 0.1 1500000 0 0.000398 0.017655 0.040770
17 17 0.090723 0.638937 0.691377 0.7 0.193235 0.2 0.059833 0.1 1600000 0 0.008623 0.006765 0.040167
18 18 0.072302 0.426226 0.695658 0.7 0.192464 0.2 0.059246 0.1 1700000 0 0.004342 0.007536 0.040754
19 19 0.073315 0.493670 0.697475 0.7 0.189278 0.2 0.063247 0.1 1800000 0 0.002525 0.010722 0.036753
20 20 0.080918 0.521957 0.679344 0.7 0.209560 0.2 0.063477 0.1 1900000 0 0.020656 0.009560 0.036523
21 21 0.076801 0.472768 0.679009 0.7 0.211069 0.2 0.064468 0.1 2000000 0 0.020991 0.011069 0.035532
22 22 0.057387 0.389389 0.687137 0.7 0.198044 0.2 0.071340 0.1 2100000 0 0.012863 0.001956 0.028660
23 23 0.043781 0.256340 0.695485 0.7 0.188706 0.2 0.074143 0.1 2200000 0 0.004515 0.011294 0.025857
24 24 0.052466 0.332701 0.683702 0.7 0.203906 0.2 0.072393 0.1 2300000 0 0.016298 0.003906 0.027607
25 25 0.042800 0.252064 0.694627 0.7 0.193023 0.2 0.073889 0.1 2400000 0 0.005373 0.006977 0.026111
26 26 0.040362 0.178399 0.694569 0.7 0.196299 0.2 0.072095 0.1 2500000 0 0.005431 0.003701 0.027905
27 27 0.038396 0.198470 0.690274 0.7 0.199408 0.2 0.074603 0.1 2600000 0 0.009726 0.000592 0.025397
28 28 0.041859 0.261097 0.688235 0.7 0.201361 0.2 0.075921 0.1 2700000 0 0.011765 0.001361 0.024079
29 29 0.045411 0.268792 0.682186 0.7 0.208296 0.2 0.076184 0.1 2800000 0 0.017814 0.008296 0.023816
... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
100070 100070 0.017026 0.085084 0.695836 0.7 0.204535 0.2 0.098600 0.1 97000000 99 0.004164 0.004535 0.001400
100071 100071 0.016999 0.085057 0.695816 0.7 0.204545 0.2 0.098611 0.1 97100000 99 0.004184 0.004545 0.001389
100072 100072 0.016657 0.082861 0.695990 0.7 0.204513 0.2 0.098470 0.1 97200000 99 0.004010 0.004513 0.001530
100073 100073 0.016768 0.082856 0.695959 0.7 0.204556 0.2 0.098460 0.1 97300000 99 0.004041 0.004556 0.001540
100074 100074 0.016417 0.082388 0.696094 0.7 0.204407 0.2 0.098474 0.1 97400000 99 0.003906 0.004407 0.001526
100075 100075 0.016919 0.082781 0.695941 0.7 0.204594 0.2 0.098441 0.1 97500000 99 0.004059 0.004594 0.001559
100076 100076 0.016599 0.081418 0.696083 0.7 0.204522 0.2 0.098373 0.1 97600000 99 0.003917 0.004522 0.001627
100077 100077 0.016223 0.081247 0.696238 0.7 0.204323 0.2 0.098417 0.1 97700000 99 0.003762 0.004323 0.001583
100078 100078 0.016115 0.081995 0.696305 0.7 0.204171 0.2 0.098504 0.1 97800000 99 0.003695 0.004171 0.001496
100079 100079 0.015962 0.081791 0.696378 0.7 0.204088 0.2 0.098514 0.1 97900000 99 0.003622 0.004088 0.001486
100080 100080 0.016186 0.081694 0.696341 0.7 0.204163 0.2 0.098477 0.1 98000000 99 0.003659 0.004163 0.001523
100081 100081 0.015776 0.081738 0.696518 0.7 0.203918 0.2 0.098547 0.1 98100000 99 0.003482 0.003918 0.001453
100082 100082 0.015908 0.081639 0.696431 0.7 0.204028 0.2 0.098525 0.1 98200000 99 0.003569 0.004028 0.001475
100083 100083 0.015951 0.081962 0.696411 0.7 0.204020 0.2 0.098554 0.1 98300000 99 0.003589 0.004020 0.001446
100084 100084 0.015981 0.081228 0.696403 0.7 0.204091 0.2 0.098492 0.1 98400000 99 0.003597 0.004091 0.001508
100085 100085 0.015984 0.082150 0.696422 0.7 0.203990 0.2 0.098575 0.1 98500000 99 0.003578 0.003990 0.001425
100086 100086 0.015768 0.083507 0.696490 0.7 0.203768 0.2 0.098730 0.1 98600000 99 0.003510 0.003768 0.001270
100087 100087 0.015789 0.084627 0.696470 0.7 0.203689 0.2 0.098830 0.1 98700000 99 0.003530 0.003689 0.001170
100088 100088 0.015692 0.085226 0.696510 0.7 0.203579 0.2 0.098901 0.1 98800000 99 0.003490 0.003579 0.001099
100089 100089 0.015699 0.084009 0.696510 0.7 0.203680 0.2 0.098801 0.1 98900000 99 0.003490 0.003680 0.001199
100090 100090 0.015747 0.081899 0.696516 0.7 0.203859 0.2 0.098617 0.1 99000000 99 0.003484 0.003859 0.001383
100091 100091 0.015660 0.082230 0.696513 0.7 0.203813 0.2 0.098667 0.1 99100000 99 0.003487 0.003813 0.001333
100092 100092 0.015618 0.083743 0.696561 0.7 0.203622 0.2 0.098810 0.1 99200000 99 0.003439 0.003622 0.001190
100093 100093 0.015531 0.083417 0.696589 0.7 0.203604 0.2 0.098802 0.1 99300000 99 0.003411 0.003604 0.001198
100094 100094 0.015548 0.083617 0.696606 0.7 0.203568 0.2 0.098823 0.1 99400000 99 0.003394 0.003568 0.001177
100095 100095 0.015416 0.083810 0.696613 0.7 0.203519 0.2 0.098865 0.1 99500000 99 0.003387 0.003519 0.001135
100096 100096 0.015405 0.081898 0.696665 0.7 0.203625 0.2 0.098707 0.1 99600000 99 0.003335 0.003625 0.001293
100097 100097 0.015493 0.081550 0.696605 0.7 0.203724 0.2 0.098670 0.1 99700000 99 0.003395 0.003724 0.001330
100098 100098 0.015592 0.082688 0.696520 0.7 0.203719 0.2 0.098761 0.1 99800000 99 0.003480 0.003719 0.001239
100099 100099 0.015484 0.082322 0.696565 0.7 0.203686 0.2 0.098750 0.1 99900000 99 0.003435 0.003686 0.001250

100100 rows × 14 columns

Constant p_uncorr advantages in regimes 1 and 2


In [132]:
print "Regime 1 advantage: {}".format((1.5 / (1.0-0.7))**2)
print "Regime 2 advantage: {}".format((1.5 / (1.0-0.985))**2)


Regime 1 advantage: 25.0
Regime 2 advantage: 10000.0

Regime 2 lands kz within 0.002 of it's value of 0.005. That's good!

TRY UPDATING every 10 steps

This will be indicative of whether drifting case is worth it.


In [ ]: