Import modules and set up the environment


In [1]:
# Python Standard Library
import getopt
import os
import sys
import math
import time
import collections
import random

# IPython
from IPython.display import display

# pandas
import pandas as pd
pd.set_option("display.max_rows", 10000)
pd.set_option("display.max_columns", 10000)

# Matplotlib
%matplotlib inline
import matplotlib.pyplot as plt                       
import matplotlib.mlab as mlab
from matplotlib.ticker import MultipleLocator

# seaborn
import seaborn as sns
sns.set_style("whitegrid")
sns.despine()

# NumPy
import numpy as np                                    

# SciPy
import scipy as sp
from scipy.stats import gaussian_kde

# StatsModels
import statsmodels.api as sm

 # scikit-learn
import sklearn                                       
from sklearn import metrics
from sklearn import preprocessing
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.neighbors import KNeighborsRegressor
from sklearn.model_selection import cross_val_predict
from sklearn.pipeline import make_pipeline


c:\developmenttools\anaconda3\lib\site-packages\statsmodels\compat\pandas.py:56: FutureWarning: The pandas.core.datetools module is deprecated and will be removed in a future version. Please use the pandas.tseries module instead.
  from pandas.core import datetools
<matplotlib.figure.Figure at 0x18a5b7a4f60>

Helper Functions


In [2]:
def experiment_plots(results, save_to=None, figsize=(8, 8)):
    fig, axarr = plt.subplots(2, 1, figsize=figsize)
    
    for key, result in results.items():
        max_error = math.ceil(result["error"].max())
        kde = gaussian_kde(result["error"].values)
        X_plot=np.linspace(0, max_error, 1000)
        axarr[0].plot(X_plot, kde.evaluate(X_plot), "-", label=key)
    
    axarr[0].set_xlabel("Error (e) in meters (m)")
    axarr[0].set_ylabel(r"$F_X(e)$")
    axarr[0].xaxis.set_major_locator(MultipleLocator(0.5))
    axarr[0].set_xlim(0, result["error"].quantile(q=0.9975))
    axarr[0].legend()

    for key, result in results.items():
        ecdf = sm.distributions.ECDF(result["error"])
        x = np.linspace(min(result["error"]), max(result["error"]))
        y = ecdf(x)
        axarr[1].plot(x, y, label=key)
    
    axarr[1].set_xlabel("Error (e) in meters (m)")
    axarr[1].set_ylabel(r"$f_X(e)$")
    axarr[1].xaxis.set_major_locator(MultipleLocator(0.5))
    axarr[1].yaxis.set_major_locator(MultipleLocator(0.1))
    axarr[1].set_xlim(0, result["error"].quantile(q=0.9975))
    axarr[1].set_ylim(0)
    axarr[1].legend()
    
    fig.tight_layout()
    if save_to is not None:
        fig.savefig(output_data_directory+"/"+save_to, dpi=300)
    plt.show()

def experiment_statistics(result):
    statistics = collections.OrderedDict([
        ("mae",  result["error"].abs().mean()),
        ("rmse", np.sqrt((result["error"]**2).mean())),
        ("sd",   result["error"].std()),
        ("p50",  result["error"].quantile(q=0.50)),
        ("p75",  result["error"].quantile(q=0.75)),
        ("p90",  result["error"].quantile(q=0.90)),
        ("p95",  result["error"].quantile(q=0.95)),
        ("min",  result["error"].min()),
        ("max",  result["error"].max()),
    ])    
    return statistics

def knn_experiment(data, test_data, train_cols, coord_cols,
                   scaler=None, n_neighbors=5, weights="uniform",
                   algorithm="auto", leaf_size=30, p=2, metric="minkowski",
                   metric_params=None, n_jobs=1):
    result = None
    knn = KNeighborsRegressor(n_neighbors=n_neighbors, weights=weights, algorithm=algorithm,
                              leaf_size=leaf_size, p=p, metric=metric,
                              metric_params=metric_params, n_jobs=n_jobs)
    if scaler is not None:
        estimator = make_pipeline(scaler, knn)
    else:
        estimator = knn
    locations = data.groupby(coord_cols).indices.keys()
    for coords in locations:
        train_data = data[(data[coord_cols[0]] != coords[0]) |
                          (data[coord_cols[1]] != coords[1])].reset_index(drop=True)
        target_values = test_data[(test_data[coord_cols[0]] == coords[0]) &
                                  (test_data[coord_cols[1]] == coords[1])].reset_index(drop=True)
        estimator.fit(train_data[train_cols], train_data[coord_cols])
        predictions = pd.DataFrame(estimator.predict(target_values[train_cols]), columns=coord_cols)
        curr_result = target_values[coord_cols].join(predictions, rsuffix="_predicted")
        error = pd.DataFrame((predictions[coord_cols] - curr_result[coord_cols]).apply(np.linalg.norm, axis=1),
                             columns=["error"])
        curr_result = pd.concat([curr_result, error], axis=1)
        result = pd.concat([result, curr_result])
    return result

def knn_experiment_cv(data, cross_validation, train_cols, coord_cols,    
                      scaler=None, n_neighbors=5, weights='uniform',
                      algorithm="auto", leaf_size=30, p=2, metric="minkowski",
                      metric_params=None, n_jobs=1):    
    result = None
    knn = KNeighborsRegressor(n_neighbors=n_neighbors, weights=weights, algorithm=algorithm,
                              leaf_size=leaf_size, p=p, metric=metric,
                              metric_params=metric_params, n_jobs=n_jobs)
    if scaler is not None:
        estimator = make_pipeline(scaler, knn)
    else:
        estimator = knn
    X = data[train_cols]
    y = data[coord_cols]
    predictions = pd.DataFrame(cross_val_predict(estimator, X, y, cv=cross_validation), columns=coord_cols)
    result = y.join(predictions, rsuffix="_predicted")
    error = pd.DataFrame((predictions[coord_cols] - result[coord_cols]).apply(np.linalg.norm, axis=1), columns=["error"])
    result = pd.concat([result, error], axis=1)    
    return result

Load the model classes

A class responsible for loading a JSON file (or all the JSON files in a given directory) into a Python dictionary


In [3]:
from yanux.cruncher.model.loader import JsonLoader

A class that takes a set of Python dictionaries containing Wi-Fi logging data loaded from JSON files collected by the YanuX Scavenger Android application


In [4]:
from yanux.cruncher.model.wifi import WifiLogs

Initialize Input & Output Data Directories and other parameters


In [5]:
input_data_directory = "data"
output_data_directory = "out"
statistics_excel_writer = pd.ExcelWriter(output_data_directory+"/statistics.xlsx")

Create the output directory if it doesn't exist


In [6]:
if not os.path.exists(output_data_directory):
    os.makedirs(output_data_directory)

Load Data from the Input Data Directory

Load all files from the data folder. The logs currently placed there were collected using the Yanux Scavenger Android application on April 28th, 2016 using an LG Nexus 5 running Androdid Marshmallow 6.0.1


In [7]:
json_loader = JsonLoader(input_data_directory+"/wifi-fingerprints")
wifi_logs = WifiLogs(json_loader.json_data)

Wi-Fi Readings

Number of Recorded Samples per Location


In [8]:
num_samples_per_location = int(len(wifi_logs.wifi_samples()) / len(wifi_logs.locations))
num_samples_per_location


Out[8]:
40

Store the data into a Pandas Dataframe, in which each Wi-Fi result reading is represented by a single line


In [9]:
wifi_results_columns = ["filename", "place", "floor", "x", "y", "orientation", "sample_id", "mac_address",
                        "timestamp", "signal_strength"]

wifi_results = pd.DataFrame(wifi_logs.wifi_results(), columns=wifi_results_columns)
wifi_results.to_csv(output_data_directory + "/wifi_results.csv")

Identify the unique MAC Addresses present in the recorded data. Each one represents a single Wi-Fi Access Point.


In [10]:
mac_addresses = wifi_results.mac_address.unique()

Similarly, store the data into a Pandas Dataframe in which each line represents a single sampling cycle with n different readings for each of the Access Points within range. Those readings are stored as columns along each sample.


In [11]:
wifi_samples_columns = ["filename", "place", "floor", "x", "y", "orientation", "sample_id", "timestamp"]
wifi_samples_columns.extend(mac_addresses)

wifi_samples = pd.DataFrame(wifi_logs.wifi_samples(), columns=wifi_samples_columns)
wifi_samples = wifi_samples.sort_values(["filename", "x", "y", "floor", "sample_id"]).reset_index(drop=True)
wifi_samples.to_csv(output_data_directory + "/wifi_samples.csv")

Data Set Statistics

Number of Results


In [12]:
len(wifi_results)


Out[12]:
17346

Number of Unique Mac Addresses


In [13]:
len(wifi_results.mac_address.unique())


Out[13]:
39

How often has each Access Point been detected


In [14]:
wifi_results_mac_address_group = wifi_results.groupby("mac_address")
wifi_results_mac_address_group.size().plot(kind="bar")
wifi_results_mac_address_group.size()


Out[14]:
mac_address
00:0f:60:06:71:2e     33
00:11:21:66:75:80    544
00:11:21:66:75:81    595
00:11:21:66:75:82    639
00:11:21:6c:4e:70    709
00:11:21:6c:4e:71    771
00:11:21:6c:4e:72    792
00:11:21:6c:50:e0    635
00:11:21:6c:50:e1    643
00:11:21:6c:50:e2    647
00:12:43:f1:c4:40     30
00:12:43:f1:c4:41     27
00:12:43:f1:c4:42     30
00:12:80:17:ac:ae     17
00:12:da:9e:31:91      2
00:12:da:9e:32:30    940
00:12:da:9e:32:31    953
00:12:da:9e:32:32    955
00:15:f9:6c:7a:d0    392
00:15:f9:6c:7a:d1    413
00:15:f9:6c:7a:d2    479
00:15:f9:6c:7a:e0    622
00:15:f9:6c:7a:e1    619
00:15:f9:6c:7a:e2    622
00:15:f9:6c:7d:20    669
00:15:f9:6c:7d:21    668
00:15:f9:6c:7d:22    675
00:15:f9:6c:7d:70    956
00:15:f9:6c:7d:71    959
00:15:f9:6c:7d:72    962
00:17:df:7d:70:50     48
00:17:df:7d:70:51     53
00:17:df:7d:70:52     49
00:1b:fc:22:43:4e     50
30:8d:99:64:aa:8d      3
88:e3:ab:11:f4:ee    119
b0:5b:67:c2:a0:16     10
bc:14:01:9c:37:08      8
bc:14:01:9c:37:09      8
dtype: int64

In [15]:
wifi_results_mac_address_group.size().mean()


Out[15]:
444.7692307692308

How many Wi-Fi results were gathered at each location


In [16]:
wifi_results_coord_group = wifi_results.groupby(["x", "y"])
wifi_results_coord_group.size().plot(kind="bar")
wifi_results_coord_group.size()


Out[16]:
x      y    
0.25   0.75     475
2.25   0.75     520
4.25   0.75     527
6.25   0.75     575
8.25   0.75     606
10.25  0.75     679
12.25  0.75     700
13.75  1.25     691
       3.25     831
       5.25     869
       7.25     903
       9.25     887
       11.25    866
15.75  11.25    805
17.75  11.25    828
19.75  11.25    754
21.75  11.25    683
23.75  11.25    729
25.75  11.25    661
27.75  11.25    612
29.75  11.25    601
31.75  11.25    612
33.75  11.25    632
35.75  11.25    632
37.75  11.25    668
dtype: int64

In [17]:
wifi_results_coord_group.size().describe()


Out[17]:
count     25.000000
mean     693.840000
std      121.617803
min      475.000000
25%      612.000000
50%      679.000000
75%      805.000000
max      903.000000
dtype: float64

How many APs were detected at each location


In [18]:
wifi_ap_per_location = wifi_samples.groupby(["x","y"]).min()[wifi_results_mac_address_group.size().keys()].count(axis=1)
wifi_ap_per_location.plot(kind="bar")
wifi_ap_per_location


Out[18]:
x      y    
0.25   0.75     18
2.25   0.75     19
4.25   0.75     19
6.25   0.75     16
8.25   0.75     18
10.25  0.75     20
12.25  0.75     21
13.75  1.25     21
       3.25     24
       5.25     25
       7.25     26
       9.25     25
       11.25    24
15.75  11.25    24
17.75  11.25    21
19.75  11.25    22
21.75  11.25    18
23.75  11.25    22
25.75  11.25    19
27.75  11.25    19
29.75  11.25    16
31.75  11.25    19
33.75  11.25    21
35.75  11.25    21
37.75  11.25    22
dtype: int64

In [19]:
wifi_ap_per_location.describe()


Out[19]:
count    25.000000
mean     20.800000
std       2.768875
min      16.000000
25%      19.000000
50%      21.000000
75%      22.000000
max      26.000000
dtype: float64

The coordinates of the points where data was captured


In [20]:
coords = wifi_results[["x","y"]].drop_duplicates().sort_values(by=["x","y"]).reset_index(drop=True)
coords_plot_size = (min(coords["x"].min(),coords["y"].min()), max(coords["x"].max(),coords["y"].max()))
#TODO: If I end up using it in the document, then I should refactor the plot to use matplotlib directly to tweak a few things.
coords.plot(figsize=(16,5), x="x",y="y", style="o", grid=True, legend=False,
            xlim=coords_plot_size, ylim=coords_plot_size,
            xticks=np.arange(coords_plot_size[0]-1, coords_plot_size[1]+1, 1),
            yticks=np.arange(coords_plot_size[0]-1, coords_plot_size[1]+1, 1)).axis('equal')


Out[20]:
(-1.625, 39.625, 0.22499999999999998, 11.775)

Signal Strength Distribution


In [21]:
wifi_results.hist(column="signal_strength")


Out[21]:
array([[<matplotlib.axes._subplots.AxesSubplot object at 0x0000018A623C4208>]], dtype=object)

Set a train and test scenario to be used by default when testing.


In [22]:
train_cols = mac_addresses
coord_cols = ["x","y"]

default_data_scenario = wifi_samples.copy()
default_data_scenario_groups = default_data_scenario["x"].map(str)+","+default_data_scenario["y"].map(str)

Playground

Base Example


In [23]:
n_neighbors=15
weights="distance"
metric="braycurtis"
nan_filler = default_data_scenario[mac_addresses].min().min()*1.001
scaler = preprocessing.StandardScaler()
cross_validation = LeaveOneGroupOut()

curr_data  = default_data_scenario.fillna(nan_filler)

curr_result = knn_experiment_cv(curr_data,
                                cross_validation.split(curr_data[mac_addresses],
                                                       curr_data[coord_cols],
                                                       groups=default_data_scenario_groups),
                                mac_addresses,
                                coord_cols,
                                scaler=scaler,
                                algorithm="brute",
                                n_neighbors=n_neighbors,
                                weights=weights,
                                metric=metric)

curr_statistics = experiment_statistics(curr_result)    
curr_result.to_csv(output_data_directory+"/results-base.csv")

statistics_table = pd.DataFrame([curr_statistics], columns=list(curr_statistics.keys()))
statistics_table.to_csv(output_data_directory+"/statistics-base.csv")
statistics_table.to_excel(statistics_excel_writer, "base")

#show table
display(statistics_table)
#plots
experiment_plots({'':curr_result})


mae rmse sd p50 p75 p90 p95 min max
0 2.078653 2.377476 1.154523 2.0 2.745414 3.631983 4.193252 0.001298 7.430791
c:\developmenttools\anaconda3\lib\site-packages\matplotlib\axes\_axes.py:545: UserWarning: No labelled objects found. Use label='...' kwarg on individual plots.
  warnings.warn("No labelled objects found. "

# Neighbors & Distance Weights


In [24]:
n_neighbors=np.arange(1,31,1)
weights=["uniform", "distance"]
metric="braycurtis"
nan_filler = default_data_scenario[mac_addresses].min().min()*1.001
scaler = preprocessing.StandardScaler()
cross_validation = LeaveOneGroupOut()

curr_data  = default_data_scenario.fillna(nan_filler)

# Just a statistics accumulator
statistics = []
for k in n_neighbors:
    for w in weights:
        curr_result = knn_experiment_cv(curr_data,
                                        cross_validation.split(curr_data[mac_addresses],
                                                               curr_data[coord_cols],
                                                               groups=default_data_scenario_groups),
                                        mac_addresses,
                                        coord_cols,
                                        scaler=scaler,
                                        algorithm="brute",
                                        n_neighbors=k,
                                        weights=w,
                                        metric=metric)
        
        curr_statistics = experiment_statistics(curr_result)
        curr_statistics["k"] = k
        curr_statistics["weights"] = w
        statistics.append(curr_statistics)
    
cols = ["k","weights"] + list(curr_statistics.keys())[:-2]
statistics_table = pd.DataFrame(statistics, columns=cols)
statistics_table.to_csv(output_data_directory + "/statistics-neighbors-weights.csv")
statistics_table.to_excel(statistics_excel_writer, "neighbors-weights")

#show table
display(statistics_table.sort_values(cols[3:]))

# Plotting Error statistics
fig, ax = plt.subplots(figsize=(8, 5))
index = n_neighbors

ax.plot(index, statistics_table[statistics_table["weights"] == "uniform"]["mae"].tolist(),
         color="b", ls="-", label="Uniform (MAE)")

ax.plot(index, statistics_table[statistics_table["weights"] == "distance"]["mae"].tolist(),
         color="r", ls="-", label="Distance (MAE)")

ax.plot(index, statistics_table[statistics_table["weights"] == "uniform"]["rmse"].tolist(),
         color="b", ls="--", label="Uniform (RMSE)")

ax.plot(index, statistics_table[statistics_table["weights"] == "distance"]["rmse"].tolist(),
         color="r", ls="--", label="Distance (RMSE)")

ax.xaxis.set_major_locator(MultipleLocator(1))
ax.yaxis.set_major_locator(MultipleLocator(0.05))

ax.set_xlabel("Number of Neighbours (k)")
ax.set_ylabel("Error (e) in meters (m)")

plt.legend()
plt.tight_layout()
plt.savefig(output_data_directory+"/plot-neighbors_weights.pdf", dpi=300)
plt.show()


k weights mae rmse sd p50 p75 p90 p95 min max
33 17 distance 2.075707 2.373845 1.152349 2.000000 2.751959 3.631104 4.131764 0.002350 7.275868
37 19 distance 2.076249 2.377294 1.158471 2.000000 2.769058 3.646688 4.179433 0.002198 6.860739
29 15 distance 2.078653 2.377476 1.154523 2.000000 2.745414 3.631983 4.193252 0.001298 7.430791
35 18 distance 2.077343 2.377612 1.157162 2.000000 2.774437 3.620001 4.134502 0.003265 7.108615
31 16 distance 2.079158 2.378553 1.155835 2.000000 2.741545 3.614315 4.176096 0.003153 7.577996
41 21 distance 2.075277 2.382196 1.170237 2.000000 2.820564 3.654421 4.163440 0.004447 6.784986
39 20 distance 2.079516 2.382451 1.163203 2.000000 2.807039 3.650747 4.091513 0.005505 6.913347
49 25 distance 2.064087 2.382589 1.190670 2.000000 2.869489 3.649748 4.112247 0.000892 6.523337
43 22 distance 2.074744 2.383015 1.172849 2.000000 2.826463 3.647760 4.114994 0.001277 6.920270
45 23 distance 2.074425 2.383645 1.174694 2.000000 2.855515 3.649125 4.127990 0.001068 6.803534
47 24 distance 2.069856 2.383874 1.183195 2.000000 2.842211 3.631477 4.144237 0.003363 6.619186
27 14 distance 2.083410 2.384773 1.160988 2.000000 2.780884 3.643823 4.214262 0.001422 7.263487
51 26 distance 2.062587 2.386598 1.201261 2.000000 2.853275 3.637551 4.134204 0.003209 6.575454
28 15 uniform 2.086776 2.387635 1.160824 2.000000 2.800000 3.600000 4.140000 0.000000 7.466667
32 17 uniform 2.088983 2.388793 1.159235 2.000000 2.705882 3.647059 4.117647 0.000000 7.294118
53 27 distance 2.063729 2.390712 1.207471 2.000000 2.874691 3.691500 4.130302 0.000876 6.487887
25 13 distance 2.089839 2.390984 1.162209 2.000000 2.778294 3.641761 4.189120 0.002460 7.071860
30 16 uniform 2.090883 2.391990 1.162400 2.000000 2.750000 3.625000 4.250000 0.000000 7.625000
26 14 uniform 2.088966 2.393934 1.169834 2.000000 2.722692 3.714286 4.285714 0.000000 7.285714
55 28 distance 2.067045 2.394997 1.210290 2.001048 2.884396 3.686661 4.123262 0.002966 6.406294
34 18 uniform 2.093129 2.396083 1.166785 2.000000 2.888889 3.666667 4.111111 0.000000 7.111111
36 19 uniform 2.093014 2.397866 1.170651 2.000000 2.842105 3.684211 4.210526 0.000000 6.842105
57 29 distance 2.066187 2.398237 1.218154 2.000779 2.907138 3.693308 4.149629 0.000759 6.520196
24 13 uniform 2.094007 2.398906 1.171007 2.000000 2.769231 3.692308 4.153846 0.000000 7.076923
23 12 distance 2.107137 2.404353 1.158547 2.000000 2.815043 3.675400 4.183637 0.003497 6.999652
59 30 distance 2.067950 2.404720 1.227913 2.000000 2.926682 3.673039 4.145617 0.002167 6.626829
21 11 distance 2.112484 2.405548 1.151260 2.000000 2.774417 3.674797 4.221661 0.005176 6.915200
38 20 uniform 2.099021 2.407330 1.179297 2.000000 2.900000 3.700000 4.200000 0.000000 6.900000
40 21 uniform 2.095914 2.409717 1.189663 2.000000 2.952381 3.714286 4.190476 0.000000 6.857143
42 22 uniform 2.095444 2.412351 1.195819 2.000000 2.909091 3.727273 4.181818 0.000000 6.909091
22 12 uniform 2.108898 2.412748 1.172720 2.000000 2.833333 3.666667 4.166667 0.000000 7.000000
19 10 distance 2.126484 2.413331 1.141725 2.000000 2.792820 3.785927 4.204274 0.002502 6.625629
20 11 uniform 2.114045 2.413951 1.165906 2.000000 2.727273 3.636364 4.181818 0.000000 6.909091
44 23 uniform 2.095143 2.414550 1.200778 2.000000 2.956522 3.739130 4.173913 0.000000 6.869565
48 25 uniform 2.082396 2.415915 1.225470 2.000000 2.960000 3.680000 4.160000 0.000000 6.960000
46 24 uniform 2.089208 2.416190 1.214355 2.000000 2.921413 3.675852 4.166667 0.000000 7.000000
17 9 distance 2.133739 2.417701 1.137422 2.000000 2.762200 3.783061 4.208193 0.001177 7.107748
50 26 uniform 2.081430 2.421496 1.238072 2.000000 2.927114 3.769231 4.230769 0.000000 6.846154
18 10 uniform 2.124319 2.421641 1.163171 2.000000 2.800000 3.800000 4.200000 0.000000 6.600000
16 9 uniform 2.132357 2.425203 1.155855 2.000000 2.888889 3.777778 4.222222 0.000000 7.111111
52 27 uniform 2.083672 2.427959 1.246938 2.000000 2.962963 3.711111 4.222222 0.000000 6.888889
15 8 distance 2.160877 2.432441 1.117420 2.000000 2.735717 3.757825 4.180548 0.000187 7.002540
54 28 uniform 2.088187 2.435015 1.253133 2.000638 3.000000 3.714286 4.214286 0.000000 6.785714
56 29 uniform 2.087420 2.440420 1.264883 2.000000 3.034483 3.731034 4.206897 0.000000 6.827586
14 8 uniform 2.163323 2.440673 1.130572 2.000000 2.750000 3.750000 4.250000 0.000000 7.000000
13 7 distance 2.170971 2.446688 1.128912 2.000000 2.742942 3.731990 4.211159 0.003633 6.867153
58 30 uniform 2.090472 2.449583 1.277502 2.000000 3.000000 3.733333 4.203333 0.000000 6.866667
11 6 distance 2.179390 2.453675 1.127853 2.000000 2.685709 3.700065 4.253655 0.000023 6.441326
12 7 uniform 2.179771 2.456723 1.133743 2.000000 2.857143 3.714286 4.285714 0.000000 7.142857
10 6 uniform 2.184521 2.462384 1.136878 2.000000 2.666667 3.666667 4.333333 0.000000 7.000000
9 5 distance 2.211859 2.484362 1.131818 2.000000 2.757629 3.900240 4.062268 0.042187 7.200646
8 5 uniform 2.220394 2.493391 1.134961 2.000000 2.800000 4.000000 4.000000 0.000000 7.200000
7 4 distance 2.234304 2.507165 1.138006 2.000000 2.823597 4.000000 4.160594 0.000837 8.000000
6 4 uniform 2.239679 2.514750 1.144166 2.000000 3.000000 4.000000 4.000000 0.000000 8.000000
5 3 distance 2.251662 2.535540 1.166337 2.000000 2.656971 4.000000 4.641937 0.006815 8.100792
4 3 uniform 2.258284 2.539062 1.161177 2.000000 2.666667 4.000000 4.666667 0.000000 8.000000
2 2 uniform 2.272463 2.616080 1.296715 2.000000 3.000000 4.000000 4.000000 0.000000 8.000000
3 2 distance 2.281970 2.617649 1.283101 2.000000 2.849367 4.000000 4.294591 0.000799 8.000000
0 1 uniform 2.575784 2.865397 1.255952 2.000000 2.000000 4.000000 6.000000 1.581139 10.000000
1 1 distance 2.575784 2.865397 1.255952 2.000000 2.000000 4.000000 6.000000 1.581139 10.000000

Metric

Just test a few different distance statistics to assess if there is a better alternative than the plain old euclidean distance. The tested statistics include:

  • Euclidean Distance
    • sqrt(sum((x - y)^2))
  • Manhattan Distance
    • sum(|x - y|)
  • Chebyshev Distance
    • sum(max(|x - y|))
  • Hamming Distance
    • N_unequal(x, y) / N_tot
  • Canberra Distance
    • sum(|x - y| / (|x| + |y|))
  • Braycurtis Similarity
    • sum(|x - y|) / (sum(|x|) + sum(|y|))

In [25]:
n_neighbors=15
weights="distance"
distance_statistics=["euclidean", "manhattan", "canberra", "braycurtis"]
nan_filler = default_data_scenario[mac_addresses].min().min()*1.001
scaler = preprocessing.StandardScaler()
cross_validation = LeaveOneGroupOut()
curr_data = default_data_scenario.fillna(nan_filler)
# Results and statistics accumulators
results = {}
statistics = []
for metric in distance_statistics:
    curr_result = knn_experiment_cv(curr_data,
                                    cross_validation.split(curr_data[mac_addresses],
                                                           curr_data[coord_cols],
                                                           groups=default_data_scenario_groups),
                                    mac_addresses,
                                    coord_cols,
                                    scaler=scaler,
                                    algorithm="brute",
                                    n_neighbors=n_neighbors,
                                    weights=weights,
                                    metric=metric)
    results[metric] = curr_result
    curr_statistics = experiment_statistics(curr_result)
    curr_statistics["metric"] = metric
    statistics.append(curr_statistics)
    
cols = ["metric"] + list(curr_statistics.keys())[:-1]
statistics_table = pd.DataFrame(statistics, columns=cols)
statistics_table.to_csv(output_data_directory + "/statistics-metric.csv")
statistics_table.to_excel(statistics_excel_writer, "metric")
#show table
display(statistics_table.sort_values(cols[2:]))
#plots
experiment_plots(results, "plot-metric.pdf")


metric mae rmse sd p50 p75 p90 p95 min max
3 braycurtis 2.078653 2.377476 1.154523 2.000000 2.745414 3.631983 4.193252 0.001298 7.430791
2 canberra 2.070815 2.396229 1.206274 2.000000 2.780921 3.791733 4.319201 0.028864 6.933314
1 manhattan 2.125824 2.436192 1.190514 2.000000 2.752557 3.673767 4.211570 0.001575 8.266667
0 euclidean 2.304611 2.851889 1.680734 2.006514 3.035926 3.948149 4.395501 0.009308 22.212709

Feature Scaling

Test different data scaling and normalization approaches to find out if any of them provides a clear advantage over the others.


In [26]:
n_neighbors=15
weights="distance"
metric="braycurtis"
nan_filler= default_data_scenario[mac_addresses].min().min()*1.001
cross_validation = LeaveOneGroupOut()
scalers = {"No Scaling": None,
           "Rescaling": preprocessing.MinMaxScaler(),
           "Standardization": preprocessing.StandardScaler()}
# Results and statistics accumulators
results = {}
statistics = []
for scaler_name, scaler in scalers.items():
    curr_data = default_data_scenario.fillna(nan_filler)
    curr_result = knn_experiment_cv(curr_data,
                                    cross_validation.split(curr_data[mac_addresses],
                                                           curr_data[coord_cols],
                                                           groups=default_data_scenario_groups),
                                    mac_addresses,
                                    coord_cols,
                                    scaler=scaler,
                                    algorithm="brute",
                                    n_neighbors=n_neighbors,
                                    weights=weights,
                                    metric=metric)
    results[scaler_name] = curr_result
    curr_statistics = experiment_statistics(results[scaler_name])
    curr_statistics["scaler"] = scaler_name
    statistics.append(curr_statistics)

cols = ["scaler"] + list(curr_statistics.keys())[:-1]
statistics_table = pd.DataFrame(statistics, columns=cols)
statistics_table.to_csv(output_data_directory + "/statistics-feature_scaling.csv")
statistics_table.to_excel(statistics_excel_writer, "feature_scaling")
#show table
display(statistics_table.sort_values(cols[2:]))
#plots
experiment_plots(results, "plot-feature_scaling.pdf")


scaler mae rmse sd p50 p75 p90 p95 min max
2 Standardization 2.078653 2.377476 1.154523 2.0 2.745414 3.631983 4.193252 0.001298 7.430791
1 Rescaling 2.154491 2.481188 1.231249 2.0 2.839237 3.734635 4.147790 0.012591 8.006142
0 No Scaling 2.203234 2.578599 1.340423 2.0 2.918448 3.944852 4.724775 0.000274 8.980178

NaN filler values

Test which is the signal strength value that should be considered for Access Points that are currently out of range. This is needed as part of the process of computing the distance/similarity between different fingerprints.


In [27]:
n_neighbors=15
weights="distance"
metric="braycurtis"
min_rssi_value = default_data_scenario[mac_addresses].min().min()
nan_fillers = [min_rssi_value,min_rssi_value*1.001,min_rssi_value*1.010,min_rssi_value*1.100,min_rssi_value*1.500]
scaler = preprocessing.StandardScaler()
cross_validation = LeaveOneGroupOut()
# Results and statistics accumulators
results = {}
statistics = []
for nf in nan_fillers:
    curr_data = default_data_scenario.fillna(nf)
    curr_result = knn_experiment_cv(curr_data,
                                    cross_validation.split(curr_data[mac_addresses],
                                                           curr_data[coord_cols],
                                                           groups=default_data_scenario_groups),
                                    mac_addresses,
                                    coord_cols,
                                    scaler=scaler,
                                    algorithm="brute",
                                    n_neighbors=n_neighbors,
                                    weights=weights,
                                    metric=metric)
    results[nf] = curr_result
    curr_statistics = experiment_statistics(curr_result)
    curr_statistics["nan_filler"] = nf
    statistics.append(curr_statistics)

cols = ["nan_filler"] + list(curr_statistics.keys())[:-1]
statistics_table = pd.DataFrame(statistics, columns=cols)
statistics_table.to_csv(output_data_directory + "/statistics-nan_filler.csv")
statistics_table.to_excel(statistics_excel_writer, "nan_filler")
#show table
display(statistics_table.sort_values(cols[2:]))
#plots
experiment_plots(results, "plot-nan_filler.pdf")


nan_filler mae rmse sd p50 p75 p90 p95 min max
1 -92.092 2.078653 2.377476 1.154523 2.00000 2.745414 3.631983 4.193252 0.001298 7.430791
2 -92.920 2.078960 2.380165 1.159507 2.00000 2.773798 3.629042 4.153457 0.000967 7.491982
0 -92.000 2.088900 2.392967 1.167969 2.00000 2.744989 3.633771 4.196731 0.002054 7.501539
3 -101.200 2.086407 2.404318 1.195442 2.00000 2.889136 3.669568 4.110202 0.002409 7.040686
4 -138.000 2.307689 2.688679 1.380389 2.09817 3.147668 4.000581 4.818592 0.001288 7.593842

Impact of orientation in the results


In [28]:
filename_prefixes = ["left-to-right-point", "right-to-left-point"]
filename_prefix_data_scenarios = {}
#filename_prefix_data_scenarios["all"] = default_data_scenario
for filename_prefix in filename_prefixes:
    filename_prefix_data_scenarios[filename_prefix] = default_data_scenario[wifi_samples["filename"].str.startswith(filename_prefix)].reset_index(drop=True)

filename_prefix_test_data_scenarios = {}
filename_prefix_test_data_scenarios["all"] = default_data_scenario
for filename_prefix in filename_prefixes:
    filename_prefix_test_data_scenarios[filename_prefix] = default_data_scenario[wifi_samples["filename"].str.startswith(filename_prefix)].reset_index(drop=True)

n_neighbors=15
weights="distance"
metric="braycurtis"
nan_filler = default_data_scenario[mac_addresses].min().min()*1.001
scaler = preprocessing.StandardScaler()
# Results and statistics accumulators
results = {}
statistics = []
for train_data_keys, train_data in filename_prefix_data_scenarios.items():
    for test_data_keys, test_data in filename_prefix_test_data_scenarios.items():
        curr_data = train_data.fillna(nan_filler)
        curr_test_data = test_data.fillna(nan_filler)
        curr_result = knn_experiment(curr_data,
                                     curr_test_data,
                                     mac_addresses,
                                     coord_cols,
                                     scaler=scaler,
                                     algorithm="brute",
                                     n_neighbors=n_neighbors,
                                     weights=weights,
                                     metric=metric)
        label = "Train: "+train_data_keys+" Test: "+test_data_keys
        results[label] = curr_result
        curr_statistics = experiment_statistics(curr_result)
        curr_statistics["orientation"] = label
        statistics.append(curr_statistics)

cols = ["orientation"] + list(curr_statistics.keys())[:-1]
statistics_table = pd.DataFrame(statistics, columns=cols)
statistics_table.to_csv(output_data_directory + "/statistics-orientation.csv")
statistics_table.to_excel(statistics_excel_writer, "orientation")
#show table
display(statistics_table.sort_values(cols[2:]))
#plots
experiment_plots(results, "plot-orientation.pdf")


orientation mae rmse sd p50 p75 p90 p95 min max
1 Train: left-to-right-point Test: left-to-right... 2.078508 2.398607 1.198329 2.035648 2.742955 3.454114 4.118626 2.818865e-03 7.359108
5 Train: right-to-left-point Test: right-to-left... 2.108240 2.443425 1.236409 2.000000 2.780447 3.721208 4.549109 3.037623e-02 7.345844
3 Train: right-to-left-point Test: all 2.226596 2.605164 1.353137 2.000000 2.901959 4.035957 4.878423 9.237604e-03 8.000000
0 Train: left-to-right-point Test: all 2.271967 2.620056 1.305589 2.110889 3.062813 4.000044 4.493745 8.950904e-16 7.359108
4 Train: right-to-left-point Test: left-to-right... 2.344952 2.757432 1.452188 2.120310 3.005699 4.560532 5.347580 9.237604e-03 8.000000
2 Train: left-to-right-point Test: right-to-left... 2.465425 2.824193 1.378966 2.124233 3.480434 4.247193 4.735090 8.950904e-16 6.551945

Impact of the spacing between reference points in the results


In [29]:
subset_reference_points_scenarios = {}
coords_indices = default_data_scenario.groupby(coord_cols).indices

odd_coords_keys = list(coords_indices.keys())[0::2]
odd_ids = []
for key in odd_coords_keys:
    odd_ids.extend(coords_indices[key])

even_coords_keys = list(coords_indices.keys())[1::2]
even_ids = []
for key in even_coords_keys:
    even_ids.extend(coords_indices[key])

subset_reference_points_scenarios["odd"] = default_data_scenario.loc[odd_ids].reset_index(drop=True)
subset_reference_points_scenarios["even"] = default_data_scenario.loc[even_ids].reset_index(drop=True)
subset_reference_points_scenarios["all"] = default_data_scenario

n_neighbors=15
weights="distance"
metric="braycurtis"
nan_filler = default_data_scenario[mac_addresses].min().min()*1.001
scaler = preprocessing.StandardScaler()
# Results and statistics accumulators
results = {}
statistics = []
for train_data_keys, train_data in subset_reference_points_scenarios.items():
    curr_data = train_data.fillna(nan_filler)
    curr_test_data = default_data_scenario.fillna(nan_filler)
    curr_result = knn_experiment(curr_data,
                                 curr_test_data,
                                 mac_addresses,
                                 coord_cols,
                                 scaler=scaler,
                                 algorithm="brute",
                                 n_neighbors=n_neighbors,
                                 weights=weights,
                                 metric=metric)
    results[train_data_keys] = curr_result
    curr_statistics = experiment_statistics(curr_result)
    curr_statistics["reference_points_spacing"] = train_data_keys
    statistics.append(curr_statistics)

cols = ["reference_points_spacing"] + list(curr_statistics.keys())[:-1]
statistics_table = pd.DataFrame(statistics, columns=cols)
statistics_table.to_csv(output_data_directory + "/statistics-reference_points_spacing.csv")
statistics_table.to_excel(statistics_excel_writer, "reference_points_spacing")
#show table
display(statistics_table.sort_values(cols[2:]))
#plots
experiment_plots(results, "plot-reference_points_spacing.pdf")


reference_points_spacing mae rmse sd p50 p75 p90 p95 min max
2 all 2.078653 2.377476 1.154523 2.0 2.745414 3.631983 4.193252 0.001298 7.430791
0 odd 3.640083 3.889761 1.372463 4.0 4.225830 4.899383 5.699411 0.004238 8.660702
1 even 4.058169 4.322931 1.491183 4.0 4.804449 5.897443 6.732381 0.040384 7.754048

Impact of the amount of available data in the results


In [30]:
n_neighbors=15
weights="distance"
metric="braycurtis"
nan_filler = default_data_scenario[mac_addresses].min().min()*1.001
scaler = preprocessing.StandardScaler()

partial_data = [0.9, 0.7, 0.5, 0.3, 0.1]
repetitions = 50
train_data = default_data_scenario[mac_addresses].copy()
target_values = default_data_scenario[coord_cols].copy()
target_values["label"] = default_data_scenario["x"].map(str) + "," + default_data_scenario["y"].map(str)+ "," + default_data_scenario["filename"].map(str)

# Results and statistics accumulators
results = {}
statistics = []
for partial in partial_data:
    curr_result = pd.DataFrame()
    for repetition in range(repetitions):
        X_train, X_test, y_train, y_test = train_test_split(train_data,
                                                            target_values,
                                                            test_size=1-partial,
                                                            stratify=target_values["label"].values)
        #train data
        train_split_data = pd.concat([y_train, X_train], axis=1).reset_index(drop=True)
        #test data
        #test_split_data = pd.concat([y_test, X_test], axis=1).reset_index(drop=True)
        test_split_data = default_data_scenario
        
        curr_data = train_split_data.fillna(nan_filler)
        curr_test_data = test_split_data.fillna(nan_filler)
        curr_result = curr_result.append(knn_experiment(curr_data,
                                         curr_test_data,
                                         mac_addresses,
                                         coord_cols,
                                         scaler=scaler,
                                         algorithm="brute",
                                         n_neighbors=n_neighbors,
                                         weights=weights,
                                         metric=metric), ignore_index=True)
    results[partial] = curr_result
    curr_statistics = experiment_statistics(curr_result)
    curr_statistics["partial_data"] = partial
    statistics.append(curr_statistics)

cols = ["partial_data"] + list(curr_statistics.keys())[:-1]
statistics_table = pd.DataFrame(statistics, columns=cols)
statistics_table.to_csv(output_data_directory + "/statistics-partial_data.csv")
statistics_table.to_excel(statistics_excel_writer, "partial_data")
#show table
display(statistics_table.sort_values(cols[2:]))
#plots
experiment_plots(results, "plot-partial_data.pdf")


partial_data mae rmse sd p50 p75 p90 p95 min max
0 0.9 2.081682 2.380978 1.155717 2.000000 2.780895 3.621494 4.152299 0.000339 7.971286
1 0.7 2.088076 2.404489 1.192282 2.000000 2.840275 3.628147 4.154546 0.000104 8.621671
2 0.5 2.081793 2.425118 1.243932 2.000000 2.950175 3.706681 4.203153 0.000232 7.734365
3 0.3 2.170749 2.552576 1.342956 2.086415 3.154753 3.909247 4.393135 0.000013 8.201670
4 0.1 2.579019 3.050139 1.628514 2.394603 3.748661 4.882023 5.496493 0.000274 8.142210

Save all the data that was collected into an Excel file


In [33]:
statistics_excel_writer.save()

Grid Search - Automatically searching for the best estimator parameters


In [32]:
k_neighbors_values = range(1,31,1)
weights_values = [
                    "uniform",
                    "distance"
                 ]
metric_values = [
                    "euclidean",
                    "manhattan",
                    "canberra", 
                    "braycurtis"
                ]
algorithm_values = ["brute"]

nan_filler = default_data_scenario[mac_addresses].min().min()*1.001
curr_data = default_data_scenario.fillna(nan_filler)

param_grid = {
                "kneighborsregressor__n_neighbors": list(k_neighbors_values),
                "kneighborsregressor__weights": weights_values,
                "kneighborsregressor__metric": metric_values,
                "kneighborsregressor__algorithm": algorithm_values,
              }

scaler = preprocessing.StandardScaler()
cross_validation = LeaveOneGroupOut()
estimator = make_pipeline(preprocessing.StandardScaler(), KNeighborsRegressor())

grid = GridSearchCV(estimator,
                    param_grid=param_grid,
                    cv=cross_validation,
                    n_jobs=1,
                    scoring=sklearn.metrics.make_scorer(sklearn.metrics.mean_squared_error,
                                                        greater_is_better=False,
                                                        multioutput="uniform_average"))

grid.fit(curr_data[mac_addresses], curr_data[coord_cols], default_data_scenario_groups)
print("Best parameters set found on development set:")
print(grid.best_params_)
print("Grid scores on development set:")
for params, mean_score, scores in grid.grid_scores_:
    print("%0.3f (+/-%0.03f) for %r" %(mean_score, scores.std() * 2, params))


Best parameters set found on development set:
{'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 30, 'kneighborsregressor__weights': 'distance'}
Grid scores on development set:
-6.694 (+/-21.451) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 1, 'kneighborsregressor__weights': 'uniform'}
-6.694 (+/-21.451) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 1, 'kneighborsregressor__weights': 'distance'}
-6.256 (+/-21.583) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 2, 'kneighborsregressor__weights': 'uniform'}
-6.245 (+/-21.591) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 2, 'kneighborsregressor__weights': 'distance'}
-5.922 (+/-20.757) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 3, 'kneighborsregressor__weights': 'uniform'}
-5.900 (+/-20.838) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 3, 'kneighborsregressor__weights': 'distance'}
-4.493 (+/-8.572) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 4, 'kneighborsregressor__weights': 'uniform'}
-4.450 (+/-8.597) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 4, 'kneighborsregressor__weights': 'distance'}
-4.431 (+/-8.436) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 5, 'kneighborsregressor__weights': 'uniform'}
-4.394 (+/-8.455) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 5, 'kneighborsregressor__weights': 'distance'}
-4.333 (+/-8.474) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 6, 'kneighborsregressor__weights': 'uniform'}
-4.307 (+/-8.461) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 6, 'kneighborsregressor__weights': 'distance'}
-4.300 (+/-8.387) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 7, 'kneighborsregressor__weights': 'uniform'}
-4.271 (+/-8.368) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 7, 'kneighborsregressor__weights': 'distance'}
-4.261 (+/-8.260) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 8, 'kneighborsregressor__weights': 'uniform'}
-4.233 (+/-8.256) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 8, 'kneighborsregressor__weights': 'distance'}
-4.192 (+/-8.299) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 9, 'kneighborsregressor__weights': 'uniform'}
-4.170 (+/-8.253) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 9, 'kneighborsregressor__weights': 'distance'}
-4.159 (+/-8.372) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 10, 'kneighborsregressor__weights': 'uniform'}
-4.135 (+/-8.276) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 10, 'kneighborsregressor__weights': 'distance'}
-4.146 (+/-8.435) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 11, 'kneighborsregressor__weights': 'uniform'}
-4.120 (+/-8.291) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 11, 'kneighborsregressor__weights': 'distance'}
-4.111 (+/-8.608) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 12, 'kneighborsregressor__weights': 'uniform'}
-4.083 (+/-8.399) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 12, 'kneighborsregressor__weights': 'distance'}
-4.092 (+/-8.804) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 13, 'kneighborsregressor__weights': 'uniform'}
-4.061 (+/-8.524) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 13, 'kneighborsregressor__weights': 'distance'}
-4.118 (+/-8.982) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 14, 'kneighborsregressor__weights': 'uniform'}
-4.075 (+/-8.624) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 14, 'kneighborsregressor__weights': 'distance'}
-4.116 (+/-9.201) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 15, 'kneighborsregressor__weights': 'uniform'}
-4.067 (+/-8.754) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 15, 'kneighborsregressor__weights': 'distance'}
-4.120 (+/-9.470) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 16, 'kneighborsregressor__weights': 'uniform'}
-4.062 (+/-8.938) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 16, 'kneighborsregressor__weights': 'distance'}
-4.125 (+/-9.636) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 17, 'kneighborsregressor__weights': 'uniform'}
-4.062 (+/-9.050) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 17, 'kneighborsregressor__weights': 'distance'}
-4.156 (+/-9.893) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 18, 'kneighborsregressor__weights': 'uniform'}
-4.077 (+/-9.224) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 18, 'kneighborsregressor__weights': 'distance'}
-4.184 (+/-10.103) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 19, 'kneighborsregressor__weights': 'uniform'}
-4.093 (+/-9.366) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 19, 'kneighborsregressor__weights': 'distance'}
-4.167 (+/-10.275) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 20, 'kneighborsregressor__weights': 'uniform'}
-4.071 (+/-9.488) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 20, 'kneighborsregressor__weights': 'distance'}
-4.175 (+/-10.506) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 21, 'kneighborsregressor__weights': 'uniform'}
-4.073 (+/-9.658) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 21, 'kneighborsregressor__weights': 'distance'}
-4.160 (+/-10.691) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 22, 'kneighborsregressor__weights': 'uniform'}
-4.056 (+/-9.795) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 22, 'kneighborsregressor__weights': 'distance'}
-4.181 (+/-10.910) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 23, 'kneighborsregressor__weights': 'uniform'}
-4.069 (+/-9.964) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 23, 'kneighborsregressor__weights': 'distance'}
-4.199 (+/-11.124) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 24, 'kneighborsregressor__weights': 'uniform'}
-4.080 (+/-10.126) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 24, 'kneighborsregressor__weights': 'distance'}
-4.208 (+/-11.246) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 25, 'kneighborsregressor__weights': 'uniform'}
-4.085 (+/-10.233) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 25, 'kneighborsregressor__weights': 'distance'}
-4.228 (+/-11.353) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 26, 'kneighborsregressor__weights': 'uniform'}
-4.097 (+/-10.320) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 26, 'kneighborsregressor__weights': 'distance'}
-4.254 (+/-11.481) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 27, 'kneighborsregressor__weights': 'uniform'}
-4.114 (+/-10.422) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 27, 'kneighborsregressor__weights': 'distance'}
-4.287 (+/-11.648) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 28, 'kneighborsregressor__weights': 'uniform'}
-4.136 (+/-10.552) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 28, 'kneighborsregressor__weights': 'distance'}
-4.304 (+/-11.720) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 29, 'kneighborsregressor__weights': 'uniform'}
-4.146 (+/-10.613) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 29, 'kneighborsregressor__weights': 'distance'}
-4.329 (+/-11.769) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 30, 'kneighborsregressor__weights': 'uniform'}
-4.161 (+/-10.648) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'euclidean', 'kneighborsregressor__n_neighbors': 30, 'kneighborsregressor__weights': 'distance'}
-4.316 (+/-5.676) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 1, 'kneighborsregressor__weights': 'uniform'}
-4.316 (+/-5.676) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 1, 'kneighborsregressor__weights': 'distance'}
-3.597 (+/-4.055) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 2, 'kneighborsregressor__weights': 'uniform'}
-3.598 (+/-4.094) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 2, 'kneighborsregressor__weights': 'distance'}
-3.394 (+/-3.577) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 3, 'kneighborsregressor__weights': 'uniform'}
-3.383 (+/-3.618) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 3, 'kneighborsregressor__weights': 'distance'}
-3.264 (+/-3.264) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 4, 'kneighborsregressor__weights': 'uniform'}
-3.251 (+/-3.302) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 4, 'kneighborsregressor__weights': 'distance'}
-3.198 (+/-3.261) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 5, 'kneighborsregressor__weights': 'uniform'}
-3.185 (+/-3.276) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 5, 'kneighborsregressor__weights': 'distance'}
-3.191 (+/-3.403) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 6, 'kneighborsregressor__weights': 'uniform'}
-3.173 (+/-3.395) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 6, 'kneighborsregressor__weights': 'distance'}
-3.164 (+/-3.446) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 7, 'kneighborsregressor__weights': 'uniform'}
-3.145 (+/-3.415) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 7, 'kneighborsregressor__weights': 'distance'}
-3.113 (+/-3.436) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 8, 'kneighborsregressor__weights': 'uniform'}
-3.099 (+/-3.410) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 8, 'kneighborsregressor__weights': 'distance'}
-3.084 (+/-3.408) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 9, 'kneighborsregressor__weights': 'uniform'}
-3.068 (+/-3.380) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 9, 'kneighborsregressor__weights': 'distance'}
-3.068 (+/-3.439) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 10, 'kneighborsregressor__weights': 'uniform'}
-3.051 (+/-3.399) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 10, 'kneighborsregressor__weights': 'distance'}
-3.041 (+/-3.427) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 11, 'kneighborsregressor__weights': 'uniform'}
-3.021 (+/-3.363) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 11, 'kneighborsregressor__weights': 'distance'}
-3.036 (+/-3.529) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 12, 'kneighborsregressor__weights': 'uniform'}
-3.011 (+/-3.437) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 12, 'kneighborsregressor__weights': 'distance'}
-3.008 (+/-3.560) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 13, 'kneighborsregressor__weights': 'uniform'}
-2.980 (+/-3.434) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 13, 'kneighborsregressor__weights': 'distance'}
-3.012 (+/-3.708) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 14, 'kneighborsregressor__weights': 'uniform'}
-2.979 (+/-3.545) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 14, 'kneighborsregressor__weights': 'distance'}
-3.008 (+/-3.798) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 15, 'kneighborsregressor__weights': 'uniform'}
-2.968 (+/-3.600) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 15, 'kneighborsregressor__weights': 'distance'}
-2.999 (+/-3.837) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 16, 'kneighborsregressor__weights': 'uniform'}
-2.956 (+/-3.612) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 16, 'kneighborsregressor__weights': 'distance'}
-2.975 (+/-3.882) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 17, 'kneighborsregressor__weights': 'uniform'}
-2.934 (+/-3.642) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 17, 'kneighborsregressor__weights': 'distance'}
-2.994 (+/-4.006) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 18, 'kneighborsregressor__weights': 'uniform'}
-2.945 (+/-3.720) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 18, 'kneighborsregressor__weights': 'distance'}
-2.999 (+/-4.077) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 19, 'kneighborsregressor__weights': 'uniform'}
-2.943 (+/-3.766) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 19, 'kneighborsregressor__weights': 'distance'}
-3.013 (+/-4.215) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 20, 'kneighborsregressor__weights': 'uniform'}
-2.949 (+/-3.864) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 20, 'kneighborsregressor__weights': 'distance'}
-3.029 (+/-4.357) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 21, 'kneighborsregressor__weights': 'uniform'}
-2.957 (+/-3.964) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 21, 'kneighborsregressor__weights': 'distance'}
-3.062 (+/-4.528) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 22, 'kneighborsregressor__weights': 'uniform'}
-2.980 (+/-4.093) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 22, 'kneighborsregressor__weights': 'distance'}
-3.064 (+/-4.631) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 23, 'kneighborsregressor__weights': 'uniform'}
-2.974 (+/-4.156) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 23, 'kneighborsregressor__weights': 'distance'}
-3.087 (+/-4.792) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 24, 'kneighborsregressor__weights': 'uniform'}
-2.986 (+/-4.262) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 24, 'kneighborsregressor__weights': 'distance'}
-3.125 (+/-4.961) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 25, 'kneighborsregressor__weights': 'uniform'}
-3.012 (+/-4.380) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 25, 'kneighborsregressor__weights': 'distance'}
-3.153 (+/-5.175) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 26, 'kneighborsregressor__weights': 'uniform'}
-3.031 (+/-4.533) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 26, 'kneighborsregressor__weights': 'distance'}
-3.174 (+/-5.302) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 27, 'kneighborsregressor__weights': 'uniform'}
-3.044 (+/-4.619) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 27, 'kneighborsregressor__weights': 'distance'}
-3.213 (+/-5.472) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 28, 'kneighborsregressor__weights': 'uniform'}
-3.074 (+/-4.742) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 28, 'kneighborsregressor__weights': 'distance'}
-3.244 (+/-5.619) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 29, 'kneighborsregressor__weights': 'uniform'}
-3.097 (+/-4.850) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 29, 'kneighborsregressor__weights': 'distance'}
-3.276 (+/-5.814) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 30, 'kneighborsregressor__weights': 'uniform'}
-3.121 (+/-4.996) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'manhattan', 'kneighborsregressor__n_neighbors': 30, 'kneighborsregressor__weights': 'distance'}
-4.352 (+/-3.827) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 1, 'kneighborsregressor__weights': 'uniform'}
-4.352 (+/-3.827) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 1, 'kneighborsregressor__weights': 'distance'}
-3.686 (+/-3.361) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 2, 'kneighborsregressor__weights': 'uniform'}
-3.674 (+/-3.378) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 2, 'kneighborsregressor__weights': 'distance'}
-3.469 (+/-3.369) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 3, 'kneighborsregressor__weights': 'uniform'}
-3.449 (+/-3.358) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 3, 'kneighborsregressor__weights': 'distance'}
-3.282 (+/-3.201) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 4, 'kneighborsregressor__weights': 'uniform'}
-3.268 (+/-3.197) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 4, 'kneighborsregressor__weights': 'distance'}
-3.200 (+/-3.174) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 5, 'kneighborsregressor__weights': 'uniform'}
-3.188 (+/-3.170) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 5, 'kneighborsregressor__weights': 'distance'}
-3.109 (+/-3.023) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 6, 'kneighborsregressor__weights': 'uniform'}
-3.100 (+/-3.032) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 6, 'kneighborsregressor__weights': 'distance'}
-3.073 (+/-3.095) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 7, 'kneighborsregressor__weights': 'uniform'}
-3.066 (+/-3.096) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 7, 'kneighborsregressor__weights': 'distance'}
-3.035 (+/-3.079) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 8, 'kneighborsregressor__weights': 'uniform'}
-3.028 (+/-3.081) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 8, 'kneighborsregressor__weights': 'distance'}
-3.029 (+/-3.114) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 9, 'kneighborsregressor__weights': 'uniform'}
-3.021 (+/-3.112) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 9, 'kneighborsregressor__weights': 'distance'}
-2.989 (+/-3.167) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 10, 'kneighborsregressor__weights': 'uniform'}
-2.982 (+/-3.159) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 10, 'kneighborsregressor__weights': 'distance'}
-2.941 (+/-3.117) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 11, 'kneighborsregressor__weights': 'uniform'}
-2.937 (+/-3.110) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 11, 'kneighborsregressor__weights': 'distance'}
-2.924 (+/-3.080) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 12, 'kneighborsregressor__weights': 'uniform'}
-2.920 (+/-3.076) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 12, 'kneighborsregressor__weights': 'distance'}
-2.897 (+/-3.096) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 13, 'kneighborsregressor__weights': 'uniform'}
-2.895 (+/-3.090) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 13, 'kneighborsregressor__weights': 'distance'}
-2.889 (+/-3.101) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 14, 'kneighborsregressor__weights': 'uniform'}
-2.887 (+/-3.096) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 14, 'kneighborsregressor__weights': 'distance'}
-2.873 (+/-3.130) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 15, 'kneighborsregressor__weights': 'uniform'}
-2.871 (+/-3.123) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 15, 'kneighborsregressor__weights': 'distance'}
-2.848 (+/-3.117) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 16, 'kneighborsregressor__weights': 'uniform'}
-2.847 (+/-3.113) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 16, 'kneighborsregressor__weights': 'distance'}
-2.842 (+/-3.152) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 17, 'kneighborsregressor__weights': 'uniform'}
-2.840 (+/-3.142) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 17, 'kneighborsregressor__weights': 'distance'}
-2.843 (+/-3.197) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 18, 'kneighborsregressor__weights': 'uniform'}
-2.839 (+/-3.181) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 18, 'kneighborsregressor__weights': 'distance'}
-2.827 (+/-3.190) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 19, 'kneighborsregressor__weights': 'uniform'}
-2.823 (+/-3.175) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 19, 'kneighborsregressor__weights': 'distance'}
-2.826 (+/-3.217) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 20, 'kneighborsregressor__weights': 'uniform'}
-2.820 (+/-3.198) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 20, 'kneighborsregressor__weights': 'distance'}
-2.814 (+/-3.229) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 21, 'kneighborsregressor__weights': 'uniform'}
-2.807 (+/-3.209) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 21, 'kneighborsregressor__weights': 'distance'}
-2.806 (+/-3.253) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 22, 'kneighborsregressor__weights': 'uniform'}
-2.800 (+/-3.229) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 22, 'kneighborsregressor__weights': 'distance'}
-2.793 (+/-3.260) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 23, 'kneighborsregressor__weights': 'uniform'}
-2.787 (+/-3.236) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 23, 'kneighborsregressor__weights': 'distance'}
-2.789 (+/-3.278) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 24, 'kneighborsregressor__weights': 'uniform'}
-2.783 (+/-3.251) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 24, 'kneighborsregressor__weights': 'distance'}
-2.794 (+/-3.278) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 25, 'kneighborsregressor__weights': 'uniform'}
-2.787 (+/-3.251) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 25, 'kneighborsregressor__weights': 'distance'}
-2.790 (+/-3.272) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 26, 'kneighborsregressor__weights': 'uniform'}
-2.781 (+/-3.247) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 26, 'kneighborsregressor__weights': 'distance'}
-2.799 (+/-3.301) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 27, 'kneighborsregressor__weights': 'uniform'}
-2.788 (+/-3.272) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 27, 'kneighborsregressor__weights': 'distance'}
-2.804 (+/-3.354) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 28, 'kneighborsregressor__weights': 'uniform'}
-2.790 (+/-3.316) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 28, 'kneighborsregressor__weights': 'distance'}
-2.790 (+/-3.362) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 29, 'kneighborsregressor__weights': 'uniform'}
-2.776 (+/-3.322) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 29, 'kneighborsregressor__weights': 'distance'}
-2.789 (+/-3.346) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 30, 'kneighborsregressor__weights': 'uniform'}
-2.775 (+/-3.311) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'canberra', 'kneighborsregressor__n_neighbors': 30, 'kneighborsregressor__weights': 'distance'}
-4.105 (+/-4.803) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 1, 'kneighborsregressor__weights': 'uniform'}
-4.105 (+/-4.803) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 1, 'kneighborsregressor__weights': 'distance'}
-3.422 (+/-3.621) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 2, 'kneighborsregressor__weights': 'uniform'}
-3.426 (+/-3.639) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 2, 'kneighborsregressor__weights': 'distance'}
-3.223 (+/-3.253) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 3, 'kneighborsregressor__weights': 'uniform'}
-3.214 (+/-3.280) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 3, 'kneighborsregressor__weights': 'distance'}
-3.162 (+/-3.112) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 4, 'kneighborsregressor__weights': 'uniform'}
-3.143 (+/-3.117) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 4, 'kneighborsregressor__weights': 'distance'}
-3.108 (+/-2.949) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 5, 'kneighborsregressor__weights': 'uniform'}
-3.086 (+/-2.948) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 5, 'kneighborsregressor__weights': 'distance'}
-3.032 (+/-2.833) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 6, 'kneighborsregressor__weights': 'uniform'}
-3.010 (+/-2.838) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 6, 'kneighborsregressor__weights': 'distance'}
-3.018 (+/-2.869) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 7, 'kneighborsregressor__weights': 'uniform'}
-2.993 (+/-2.863) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 7, 'kneighborsregressor__weights': 'distance'}
-2.978 (+/-2.846) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 8, 'kneighborsregressor__weights': 'uniform'}
-2.958 (+/-2.838) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 8, 'kneighborsregressor__weights': 'distance'}
-2.941 (+/-2.939) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 9, 'kneighborsregressor__weights': 'uniform'}
-2.923 (+/-2.915) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 9, 'kneighborsregressor__weights': 'distance'}
-2.932 (+/-2.970) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 10, 'kneighborsregressor__weights': 'uniform'}
-2.912 (+/-2.936) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 10, 'kneighborsregressor__weights': 'distance'}
-2.914 (+/-2.991) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 11, 'kneighborsregressor__weights': 'uniform'}
-2.893 (+/-2.948) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 11, 'kneighborsregressor__weights': 'distance'}
-2.911 (+/-3.028) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 12, 'kneighborsregressor__weights': 'uniform'}
-2.890 (+/-2.975) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 12, 'kneighborsregressor__weights': 'distance'}
-2.877 (+/-3.075) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 13, 'kneighborsregressor__weights': 'uniform'}
-2.858 (+/-3.011) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 13, 'kneighborsregressor__weights': 'distance'}
-2.865 (+/-3.141) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 14, 'kneighborsregressor__weights': 'uniform'}
-2.844 (+/-3.060) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 14, 'kneighborsregressor__weights': 'distance'}
-2.850 (+/-3.156) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 15, 'kneighborsregressor__weights': 'uniform'}
-2.826 (+/-3.063) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 15, 'kneighborsregressor__weights': 'distance'}
-2.861 (+/-3.211) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 16, 'kneighborsregressor__weights': 'uniform'}
-2.829 (+/-3.095) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 16, 'kneighborsregressor__weights': 'distance'}
-2.853 (+/-3.234) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 17, 'kneighborsregressor__weights': 'uniform'}
-2.818 (+/-3.109) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 17, 'kneighborsregressor__weights': 'distance'}
-2.871 (+/-3.295) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 18, 'kneighborsregressor__weights': 'uniform'}
-2.827 (+/-3.146) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 18, 'kneighborsregressor__weights': 'distance'}
-2.875 (+/-3.403) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 19, 'kneighborsregressor__weights': 'uniform'}
-2.826 (+/-3.230) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 19, 'kneighborsregressor__weights': 'distance'}
-2.898 (+/-3.518) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 20, 'kneighborsregressor__weights': 'uniform'}
-2.838 (+/-3.310) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 20, 'kneighborsregressor__weights': 'distance'}
-2.903 (+/-3.616) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 21, 'kneighborsregressor__weights': 'uniform'}
-2.837 (+/-3.378) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 21, 'kneighborsregressor__weights': 'distance'}
-2.910 (+/-3.656) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 22, 'kneighborsregressor__weights': 'uniform'}
-2.839 (+/-3.395) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 22, 'kneighborsregressor__weights': 'distance'}
-2.915 (+/-3.717) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 23, 'kneighborsregressor__weights': 'uniform'}
-2.841 (+/-3.438) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 23, 'kneighborsregressor__weights': 'distance'}
-2.919 (+/-3.775) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 24, 'kneighborsregressor__weights': 'uniform'}
-2.841 (+/-3.476) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 24, 'kneighborsregressor__weights': 'distance'}
-2.918 (+/-3.840) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 25, 'kneighborsregressor__weights': 'uniform'}
-2.838 (+/-3.521) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 25, 'kneighborsregressor__weights': 'distance'}
-2.932 (+/-3.932) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 26, 'kneighborsregressor__weights': 'uniform'}
-2.848 (+/-3.594) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 26, 'kneighborsregressor__weights': 'distance'}
-2.947 (+/-4.023) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 27, 'kneighborsregressor__weights': 'uniform'}
-2.858 (+/-3.656) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 27, 'kneighborsregressor__weights': 'distance'}
-2.965 (+/-4.099) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 28, 'kneighborsregressor__weights': 'uniform'}
-2.868 (+/-3.708) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 28, 'kneighborsregressor__weights': 'distance'}
-2.978 (+/-4.183) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 29, 'kneighborsregressor__weights': 'uniform'}
-2.876 (+/-3.766) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 29, 'kneighborsregressor__weights': 'distance'}
-3.000 (+/-4.282) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 30, 'kneighborsregressor__weights': 'uniform'}
-2.891 (+/-3.842) for {'kneighborsregressor__algorithm': 'brute', 'kneighborsregressor__metric': 'braycurtis', 'kneighborsregressor__n_neighbors': 30, 'kneighborsregressor__weights': 'distance'}
c:\developmenttools\anaconda3\lib\site-packages\sklearn\model_selection\_search.py:667: DeprecationWarning: The grid_scores_ attribute was deprecated in version 0.18 in favor of the more elaborate cv_results_ attribute. The grid_scores_ attribute will not be available from 0.20
  DeprecationWarning)