In [1]:
# Python Standard Library
import getopt
import os
import sys
import math
import time
import collections
import random
# IPython
from IPython.display import display
# pandas
import pandas as pd
pd.set_option("display.max_rows", 10000)
pd.set_option("display.max_columns", 10000)
# Matplotlib
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from matplotlib.ticker import MultipleLocator
# seaborn
import seaborn as sns
sns.set_style("whitegrid")
sns.despine()
# NumPy
import numpy as np
# SciPy
import scipy as sp
from scipy.stats import gaussian_kde
# StatsModels
import statsmodels.api as sm
# scikit-learn
import sklearn
from sklearn import metrics
from sklearn import preprocessing
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.neighbors import KNeighborsRegressor
from sklearn.model_selection import cross_val_predict
from sklearn.pipeline import make_pipeline
In [2]:
def experiment_plots(results, save_to=None, figsize=(8, 8)):
fig, axarr = plt.subplots(2, 1, figsize=figsize)
for key, result in results.items():
max_error = math.ceil(result["error"].max())
kde = gaussian_kde(result["error"].values)
X_plot=np.linspace(0, max_error, 1000)
axarr[0].plot(X_plot, kde.evaluate(X_plot), "-", label=key)
axarr[0].set_xlabel("Error (e) in meters (m)")
axarr[0].set_ylabel(r"$F_X(e)$")
axarr[0].xaxis.set_major_locator(MultipleLocator(0.5))
axarr[0].set_xlim(0, result["error"].quantile(q=0.9975))
axarr[0].legend()
for key, result in results.items():
ecdf = sm.distributions.ECDF(result["error"])
x = np.linspace(min(result["error"]), max(result["error"]))
y = ecdf(x)
axarr[1].plot(x, y, label=key)
axarr[1].set_xlabel("Error (e) in meters (m)")
axarr[1].set_ylabel(r"$f_X(e)$")
axarr[1].xaxis.set_major_locator(MultipleLocator(0.5))
axarr[1].yaxis.set_major_locator(MultipleLocator(0.1))
axarr[1].set_xlim(0, result["error"].quantile(q=0.9975))
axarr[1].set_ylim(0)
axarr[1].legend()
fig.tight_layout()
if save_to is not None:
fig.savefig(output_data_directory+"/"+save_to, dpi=300)
plt.show()
def experiment_statistics(result):
statistics = collections.OrderedDict([
("mae", result["error"].abs().mean()),
("rmse", np.sqrt((result["error"]**2).mean())),
("sd", result["error"].std()),
("p50", result["error"].quantile(q=0.50)),
("p75", result["error"].quantile(q=0.75)),
("p90", result["error"].quantile(q=0.90)),
("p95", result["error"].quantile(q=0.95)),
("min", result["error"].min()),
("max", result["error"].max()),
])
return statistics
def knn_experiment(data, test_data, train_cols, coord_cols,
scaler=None, n_neighbors=5, weights="uniform",
algorithm="auto", leaf_size=30, p=2, metric="minkowski",
metric_params=None, n_jobs=1):
result = None
knn = KNeighborsRegressor(n_neighbors=n_neighbors, weights=weights, algorithm=algorithm,
leaf_size=leaf_size, p=p, metric=metric,
metric_params=metric_params, n_jobs=n_jobs)
if scaler is not None:
estimator = make_pipeline(scaler, knn)
else:
estimator = knn
locations = data.groupby(coord_cols).indices.keys()
for coords in locations:
train_data = data[(data[coord_cols[0]] != coords[0]) |
(data[coord_cols[1]] != coords[1])].reset_index(drop=True)
target_values = test_data[(test_data[coord_cols[0]] == coords[0]) &
(test_data[coord_cols[1]] == coords[1])].reset_index(drop=True)
estimator.fit(train_data[train_cols], train_data[coord_cols])
predictions = pd.DataFrame(estimator.predict(target_values[train_cols]), columns=coord_cols)
curr_result = target_values[coord_cols].join(predictions, rsuffix="_predicted")
error = pd.DataFrame((predictions[coord_cols] - curr_result[coord_cols]).apply(np.linalg.norm, axis=1),
columns=["error"])
curr_result = pd.concat([curr_result, error], axis=1)
result = pd.concat([result, curr_result])
return result
def knn_experiment_cv(data, cross_validation, train_cols, coord_cols,
scaler=None, n_neighbors=5, weights='uniform',
algorithm="auto", leaf_size=30, p=2, metric="minkowski",
metric_params=None, n_jobs=1):
result = None
knn = KNeighborsRegressor(n_neighbors=n_neighbors, weights=weights, algorithm=algorithm,
leaf_size=leaf_size, p=p, metric=metric,
metric_params=metric_params, n_jobs=n_jobs)
if scaler is not None:
estimator = make_pipeline(scaler, knn)
else:
estimator = knn
X = data[train_cols]
y = data[coord_cols]
predictions = pd.DataFrame(cross_val_predict(estimator, X, y, cv=cross_validation), columns=coord_cols)
result = y.join(predictions, rsuffix="_predicted")
error = pd.DataFrame((predictions[coord_cols] - result[coord_cols]).apply(np.linalg.norm, axis=1), columns=["error"])
result = pd.concat([result, error], axis=1)
return result
A class responsible for loading a JSON file (or all the JSON files in a given directory) into a Python dictionary
In [3]:
from yanux.cruncher.model.loader import JsonLoader
A class that takes a set of Python dictionaries containing Wi-Fi logging data loaded from JSON files collected by the YanuX Scavenger Android application
In [4]:
from yanux.cruncher.model.wifi import WifiLogs
In [5]:
input_data_directory = "data"
output_data_directory = "out"
statistics_excel_writer = pd.ExcelWriter(output_data_directory+"/statistics.xlsx")
In [6]:
if not os.path.exists(output_data_directory):
os.makedirs(output_data_directory)
Load all files from the data folder. The logs currently placed there were collected using the Yanux Scavenger Android application on April 28th, 2016 using an LG Nexus 5 running Androdid Marshmallow 6.0.1
In [7]:
json_loader = JsonLoader(input_data_directory+"/wifi-fingerprints")
wifi_logs = WifiLogs(json_loader.json_data)
Number of Recorded Samples per Location
In [8]:
num_samples_per_location = int(len(wifi_logs.wifi_samples()) / len(wifi_logs.locations))
num_samples_per_location
Out[8]:
Store the data into a Pandas Dataframe, in which each Wi-Fi result reading is represented by a single line
In [9]:
wifi_results_columns = ["filename", "place", "floor", "x", "y", "orientation", "sample_id", "mac_address",
"timestamp", "signal_strength"]
wifi_results = pd.DataFrame(wifi_logs.wifi_results(), columns=wifi_results_columns)
wifi_results.to_csv(output_data_directory + "/wifi_results.csv")
Identify the unique MAC Addresses present in the recorded data. Each one represents a single Wi-Fi Access Point.
In [10]:
mac_addresses = wifi_results.mac_address.unique()
Similarly, store the data into a Pandas Dataframe in which each line represents a single sampling cycle with n different readings for each of the Access Points within range. Those readings are stored as columns along each sample.
In [11]:
wifi_samples_columns = ["filename", "place", "floor", "x", "y", "orientation", "sample_id", "timestamp"]
wifi_samples_columns.extend(mac_addresses)
wifi_samples = pd.DataFrame(wifi_logs.wifi_samples(), columns=wifi_samples_columns)
wifi_samples = wifi_samples.sort_values(["filename", "x", "y", "floor", "sample_id"]).reset_index(drop=True)
wifi_samples.to_csv(output_data_directory + "/wifi_samples.csv")
Number of Results
In [12]:
len(wifi_results)
Out[12]:
Number of Unique Mac Addresses
In [13]:
len(wifi_results.mac_address.unique())
Out[13]:
In [14]:
wifi_results_mac_address_group = wifi_results.groupby("mac_address")
wifi_results_mac_address_group.size().plot(kind="bar")
wifi_results_mac_address_group.size()
Out[14]:
In [15]:
wifi_results_mac_address_group.size().mean()
Out[15]:
In [16]:
wifi_results_coord_group = wifi_results.groupby(["x", "y"])
wifi_results_coord_group.size().plot(kind="bar")
wifi_results_coord_group.size()
Out[16]:
In [17]:
wifi_results_coord_group.size().describe()
Out[17]:
In [18]:
wifi_ap_per_location = wifi_samples.groupby(["x","y"]).min()[wifi_results_mac_address_group.size().keys()].count(axis=1)
wifi_ap_per_location.plot(kind="bar")
wifi_ap_per_location
Out[18]:
In [19]:
wifi_ap_per_location.describe()
Out[19]:
In [20]:
coords = wifi_results[["x","y"]].drop_duplicates().sort_values(by=["x","y"]).reset_index(drop=True)
coords_plot_size = (min(coords["x"].min(),coords["y"].min()), max(coords["x"].max(),coords["y"].max()))
#TODO: If I end up using it in the document, then I should refactor the plot to use matplotlib directly to tweak a few things.
coords.plot(figsize=(16,5), x="x",y="y", style="o", grid=True, legend=False,
xlim=coords_plot_size, ylim=coords_plot_size,
xticks=np.arange(coords_plot_size[0]-1, coords_plot_size[1]+1, 1),
yticks=np.arange(coords_plot_size[0]-1, coords_plot_size[1]+1, 1)).axis('equal')
Out[20]:
In [21]:
wifi_results.hist(column="signal_strength")
Out[21]:
Set a train and test scenario to be used by default when testing.
In [22]:
train_cols = mac_addresses
coord_cols = ["x","y"]
default_data_scenario = wifi_samples.copy()
default_data_scenario_groups = default_data_scenario["x"].map(str)+","+default_data_scenario["y"].map(str)
In [23]:
n_neighbors=15
weights="distance"
metric="braycurtis"
nan_filler = default_data_scenario[mac_addresses].min().min()*1.001
scaler = preprocessing.StandardScaler()
cross_validation = LeaveOneGroupOut()
curr_data = default_data_scenario.fillna(nan_filler)
curr_result = knn_experiment_cv(curr_data,
cross_validation.split(curr_data[mac_addresses],
curr_data[coord_cols],
groups=default_data_scenario_groups),
mac_addresses,
coord_cols,
scaler=scaler,
algorithm="brute",
n_neighbors=n_neighbors,
weights=weights,
metric=metric)
curr_statistics = experiment_statistics(curr_result)
curr_result.to_csv(output_data_directory+"/results-base.csv")
statistics_table = pd.DataFrame([curr_statistics], columns=list(curr_statistics.keys()))
statistics_table.to_csv(output_data_directory+"/statistics-base.csv")
statistics_table.to_excel(statistics_excel_writer, "base")
#show table
display(statistics_table)
#plots
experiment_plots({'':curr_result})
In [24]:
n_neighbors=np.arange(1,31,1)
weights=["uniform", "distance"]
metric="braycurtis"
nan_filler = default_data_scenario[mac_addresses].min().min()*1.001
scaler = preprocessing.StandardScaler()
cross_validation = LeaveOneGroupOut()
curr_data = default_data_scenario.fillna(nan_filler)
# Just a statistics accumulator
statistics = []
for k in n_neighbors:
for w in weights:
curr_result = knn_experiment_cv(curr_data,
cross_validation.split(curr_data[mac_addresses],
curr_data[coord_cols],
groups=default_data_scenario_groups),
mac_addresses,
coord_cols,
scaler=scaler,
algorithm="brute",
n_neighbors=k,
weights=w,
metric=metric)
curr_statistics = experiment_statistics(curr_result)
curr_statistics["k"] = k
curr_statistics["weights"] = w
statistics.append(curr_statistics)
cols = ["k","weights"] + list(curr_statistics.keys())[:-2]
statistics_table = pd.DataFrame(statistics, columns=cols)
statistics_table.to_csv(output_data_directory + "/statistics-neighbors-weights.csv")
statistics_table.to_excel(statistics_excel_writer, "neighbors-weights")
#show table
display(statistics_table.sort_values(cols[3:]))
# Plotting Error statistics
fig, ax = plt.subplots(figsize=(8, 5))
index = n_neighbors
ax.plot(index, statistics_table[statistics_table["weights"] == "uniform"]["mae"].tolist(),
color="b", ls="-", label="Uniform (MAE)")
ax.plot(index, statistics_table[statistics_table["weights"] == "distance"]["mae"].tolist(),
color="r", ls="-", label="Distance (MAE)")
ax.plot(index, statistics_table[statistics_table["weights"] == "uniform"]["rmse"].tolist(),
color="b", ls="--", label="Uniform (RMSE)")
ax.plot(index, statistics_table[statistics_table["weights"] == "distance"]["rmse"].tolist(),
color="r", ls="--", label="Distance (RMSE)")
ax.xaxis.set_major_locator(MultipleLocator(1))
ax.yaxis.set_major_locator(MultipleLocator(0.05))
ax.set_xlabel("Number of Neighbours (k)")
ax.set_ylabel("Error (e) in meters (m)")
plt.legend()
plt.tight_layout()
plt.savefig(output_data_directory+"/plot-neighbors_weights.pdf", dpi=300)
plt.show()
Just test a few different distance statistics to assess if there is a better alternative than the plain old euclidean distance. The tested statistics include:
In [25]:
n_neighbors=15
weights="distance"
distance_statistics=["euclidean", "manhattan", "canberra", "braycurtis"]
nan_filler = default_data_scenario[mac_addresses].min().min()*1.001
scaler = preprocessing.StandardScaler()
cross_validation = LeaveOneGroupOut()
curr_data = default_data_scenario.fillna(nan_filler)
# Results and statistics accumulators
results = {}
statistics = []
for metric in distance_statistics:
curr_result = knn_experiment_cv(curr_data,
cross_validation.split(curr_data[mac_addresses],
curr_data[coord_cols],
groups=default_data_scenario_groups),
mac_addresses,
coord_cols,
scaler=scaler,
algorithm="brute",
n_neighbors=n_neighbors,
weights=weights,
metric=metric)
results[metric] = curr_result
curr_statistics = experiment_statistics(curr_result)
curr_statistics["metric"] = metric
statistics.append(curr_statistics)
cols = ["metric"] + list(curr_statistics.keys())[:-1]
statistics_table = pd.DataFrame(statistics, columns=cols)
statistics_table.to_csv(output_data_directory + "/statistics-metric.csv")
statistics_table.to_excel(statistics_excel_writer, "metric")
#show table
display(statistics_table.sort_values(cols[2:]))
#plots
experiment_plots(results, "plot-metric.pdf")
In [26]:
n_neighbors=15
weights="distance"
metric="braycurtis"
nan_filler= default_data_scenario[mac_addresses].min().min()*1.001
cross_validation = LeaveOneGroupOut()
scalers = {"No Scaling": None,
"Rescaling": preprocessing.MinMaxScaler(),
"Standardization": preprocessing.StandardScaler()}
# Results and statistics accumulators
results = {}
statistics = []
for scaler_name, scaler in scalers.items():
curr_data = default_data_scenario.fillna(nan_filler)
curr_result = knn_experiment_cv(curr_data,
cross_validation.split(curr_data[mac_addresses],
curr_data[coord_cols],
groups=default_data_scenario_groups),
mac_addresses,
coord_cols,
scaler=scaler,
algorithm="brute",
n_neighbors=n_neighbors,
weights=weights,
metric=metric)
results[scaler_name] = curr_result
curr_statistics = experiment_statistics(results[scaler_name])
curr_statistics["scaler"] = scaler_name
statistics.append(curr_statistics)
cols = ["scaler"] + list(curr_statistics.keys())[:-1]
statistics_table = pd.DataFrame(statistics, columns=cols)
statistics_table.to_csv(output_data_directory + "/statistics-feature_scaling.csv")
statistics_table.to_excel(statistics_excel_writer, "feature_scaling")
#show table
display(statistics_table.sort_values(cols[2:]))
#plots
experiment_plots(results, "plot-feature_scaling.pdf")
In [27]:
n_neighbors=15
weights="distance"
metric="braycurtis"
min_rssi_value = default_data_scenario[mac_addresses].min().min()
nan_fillers = [min_rssi_value,min_rssi_value*1.001,min_rssi_value*1.010,min_rssi_value*1.100,min_rssi_value*1.500]
scaler = preprocessing.StandardScaler()
cross_validation = LeaveOneGroupOut()
# Results and statistics accumulators
results = {}
statistics = []
for nf in nan_fillers:
curr_data = default_data_scenario.fillna(nf)
curr_result = knn_experiment_cv(curr_data,
cross_validation.split(curr_data[mac_addresses],
curr_data[coord_cols],
groups=default_data_scenario_groups),
mac_addresses,
coord_cols,
scaler=scaler,
algorithm="brute",
n_neighbors=n_neighbors,
weights=weights,
metric=metric)
results[nf] = curr_result
curr_statistics = experiment_statistics(curr_result)
curr_statistics["nan_filler"] = nf
statistics.append(curr_statistics)
cols = ["nan_filler"] + list(curr_statistics.keys())[:-1]
statistics_table = pd.DataFrame(statistics, columns=cols)
statistics_table.to_csv(output_data_directory + "/statistics-nan_filler.csv")
statistics_table.to_excel(statistics_excel_writer, "nan_filler")
#show table
display(statistics_table.sort_values(cols[2:]))
#plots
experiment_plots(results, "plot-nan_filler.pdf")
In [28]:
filename_prefixes = ["left-to-right-point", "right-to-left-point"]
filename_prefix_data_scenarios = {}
#filename_prefix_data_scenarios["all"] = default_data_scenario
for filename_prefix in filename_prefixes:
filename_prefix_data_scenarios[filename_prefix] = default_data_scenario[wifi_samples["filename"].str.startswith(filename_prefix)].reset_index(drop=True)
filename_prefix_test_data_scenarios = {}
filename_prefix_test_data_scenarios["all"] = default_data_scenario
for filename_prefix in filename_prefixes:
filename_prefix_test_data_scenarios[filename_prefix] = default_data_scenario[wifi_samples["filename"].str.startswith(filename_prefix)].reset_index(drop=True)
n_neighbors=15
weights="distance"
metric="braycurtis"
nan_filler = default_data_scenario[mac_addresses].min().min()*1.001
scaler = preprocessing.StandardScaler()
# Results and statistics accumulators
results = {}
statistics = []
for train_data_keys, train_data in filename_prefix_data_scenarios.items():
for test_data_keys, test_data in filename_prefix_test_data_scenarios.items():
curr_data = train_data.fillna(nan_filler)
curr_test_data = test_data.fillna(nan_filler)
curr_result = knn_experiment(curr_data,
curr_test_data,
mac_addresses,
coord_cols,
scaler=scaler,
algorithm="brute",
n_neighbors=n_neighbors,
weights=weights,
metric=metric)
label = "Train: "+train_data_keys+" Test: "+test_data_keys
results[label] = curr_result
curr_statistics = experiment_statistics(curr_result)
curr_statistics["orientation"] = label
statistics.append(curr_statistics)
cols = ["orientation"] + list(curr_statistics.keys())[:-1]
statistics_table = pd.DataFrame(statistics, columns=cols)
statistics_table.to_csv(output_data_directory + "/statistics-orientation.csv")
statistics_table.to_excel(statistics_excel_writer, "orientation")
#show table
display(statistics_table.sort_values(cols[2:]))
#plots
experiment_plots(results, "plot-orientation.pdf")
In [29]:
subset_reference_points_scenarios = {}
coords_indices = default_data_scenario.groupby(coord_cols).indices
odd_coords_keys = list(coords_indices.keys())[0::2]
odd_ids = []
for key in odd_coords_keys:
odd_ids.extend(coords_indices[key])
even_coords_keys = list(coords_indices.keys())[1::2]
even_ids = []
for key in even_coords_keys:
even_ids.extend(coords_indices[key])
subset_reference_points_scenarios["odd"] = default_data_scenario.loc[odd_ids].reset_index(drop=True)
subset_reference_points_scenarios["even"] = default_data_scenario.loc[even_ids].reset_index(drop=True)
subset_reference_points_scenarios["all"] = default_data_scenario
n_neighbors=15
weights="distance"
metric="braycurtis"
nan_filler = default_data_scenario[mac_addresses].min().min()*1.001
scaler = preprocessing.StandardScaler()
# Results and statistics accumulators
results = {}
statistics = []
for train_data_keys, train_data in subset_reference_points_scenarios.items():
curr_data = train_data.fillna(nan_filler)
curr_test_data = default_data_scenario.fillna(nan_filler)
curr_result = knn_experiment(curr_data,
curr_test_data,
mac_addresses,
coord_cols,
scaler=scaler,
algorithm="brute",
n_neighbors=n_neighbors,
weights=weights,
metric=metric)
results[train_data_keys] = curr_result
curr_statistics = experiment_statistics(curr_result)
curr_statistics["reference_points_spacing"] = train_data_keys
statistics.append(curr_statistics)
cols = ["reference_points_spacing"] + list(curr_statistics.keys())[:-1]
statistics_table = pd.DataFrame(statistics, columns=cols)
statistics_table.to_csv(output_data_directory + "/statistics-reference_points_spacing.csv")
statistics_table.to_excel(statistics_excel_writer, "reference_points_spacing")
#show table
display(statistics_table.sort_values(cols[2:]))
#plots
experiment_plots(results, "plot-reference_points_spacing.pdf")
In [30]:
n_neighbors=15
weights="distance"
metric="braycurtis"
nan_filler = default_data_scenario[mac_addresses].min().min()*1.001
scaler = preprocessing.StandardScaler()
partial_data = [0.9, 0.7, 0.5, 0.3, 0.1]
repetitions = 50
train_data = default_data_scenario[mac_addresses].copy()
target_values = default_data_scenario[coord_cols].copy()
target_values["label"] = default_data_scenario["x"].map(str) + "," + default_data_scenario["y"].map(str)+ "," + default_data_scenario["filename"].map(str)
# Results and statistics accumulators
results = {}
statistics = []
for partial in partial_data:
curr_result = pd.DataFrame()
for repetition in range(repetitions):
X_train, X_test, y_train, y_test = train_test_split(train_data,
target_values,
test_size=1-partial,
stratify=target_values["label"].values)
#train data
train_split_data = pd.concat([y_train, X_train], axis=1).reset_index(drop=True)
#test data
#test_split_data = pd.concat([y_test, X_test], axis=1).reset_index(drop=True)
test_split_data = default_data_scenario
curr_data = train_split_data.fillna(nan_filler)
curr_test_data = test_split_data.fillna(nan_filler)
curr_result = curr_result.append(knn_experiment(curr_data,
curr_test_data,
mac_addresses,
coord_cols,
scaler=scaler,
algorithm="brute",
n_neighbors=n_neighbors,
weights=weights,
metric=metric), ignore_index=True)
results[partial] = curr_result
curr_statistics = experiment_statistics(curr_result)
curr_statistics["partial_data"] = partial
statistics.append(curr_statistics)
cols = ["partial_data"] + list(curr_statistics.keys())[:-1]
statistics_table = pd.DataFrame(statistics, columns=cols)
statistics_table.to_csv(output_data_directory + "/statistics-partial_data.csv")
statistics_table.to_excel(statistics_excel_writer, "partial_data")
#show table
display(statistics_table.sort_values(cols[2:]))
#plots
experiment_plots(results, "plot-partial_data.pdf")
In [33]:
statistics_excel_writer.save()
In [32]:
k_neighbors_values = range(1,31,1)
weights_values = [
"uniform",
"distance"
]
metric_values = [
"euclidean",
"manhattan",
"canberra",
"braycurtis"
]
algorithm_values = ["brute"]
nan_filler = default_data_scenario[mac_addresses].min().min()*1.001
curr_data = default_data_scenario.fillna(nan_filler)
param_grid = {
"kneighborsregressor__n_neighbors": list(k_neighbors_values),
"kneighborsregressor__weights": weights_values,
"kneighborsregressor__metric": metric_values,
"kneighborsregressor__algorithm": algorithm_values,
}
scaler = preprocessing.StandardScaler()
cross_validation = LeaveOneGroupOut()
estimator = make_pipeline(preprocessing.StandardScaler(), KNeighborsRegressor())
grid = GridSearchCV(estimator,
param_grid=param_grid,
cv=cross_validation,
n_jobs=1,
scoring=sklearn.metrics.make_scorer(sklearn.metrics.mean_squared_error,
greater_is_better=False,
multioutput="uniform_average"))
grid.fit(curr_data[mac_addresses], curr_data[coord_cols], default_data_scenario_groups)
print("Best parameters set found on development set:")
print(grid.best_params_)
print("Grid scores on development set:")
for params, mean_score, scores in grid.grid_scores_:
print("%0.3f (+/-%0.03f) for %r" %(mean_score, scores.std() * 2, params))