In [1]:
from sklearn import preprocessing

In [2]:
l = [0, 1, 2, 3, 4]
print(l)


[0, 1, 2, 3, 4]

In [3]:
l_2d = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
print(l_2d)


[[0, 1, 2], [3, 4, 5], [6, 7, 8]]

In [4]:
mm = preprocessing.MinMaxScaler()

In [5]:
# mm.fit_transform(l)
# ValueError: Expected 2D array, got 1D array instead:

In [6]:
l_2d_min_max = mm.fit_transform(l_2d)

In [7]:
print(l_2d_min_max)


[[0.  0.  0. ]
 [0.5 0.5 0.5]
 [1.  1.  1. ]]

In [8]:
print(type(l_2d_min_max))


<class 'numpy.ndarray'>

In [9]:
print(preprocessing.minmax_scale(l))


[0.   0.25 0.5  0.75 1.  ]

In [10]:
print(preprocessing.minmax_scale(l_2d))


[[0.  0.  0. ]
 [0.5 0.5 0.5]
 [1.  1.  1. ]]

In [11]:
print(preprocessing.minmax_scale(l_2d, axis=1))


[[0.  0.5 1. ]
 [0.  0.5 1. ]
 [0.  0.5 1. ]]

In [12]:
ss = preprocessing.StandardScaler()

In [13]:
# print(ss.fit_transform(l))
# ValueError: Expected 2D array, got 1D array instead:

In [14]:
l_2d_standardization = ss.fit_transform(l_2d)

In [15]:
print(l_2d_standardization)


[[-1.22474487 -1.22474487 -1.22474487]
 [ 0.          0.          0.        ]
 [ 1.22474487  1.22474487  1.22474487]]

In [16]:
print(type(l_2d_standardization))


<class 'numpy.ndarray'>

In [17]:
print(preprocessing.scale(l))


[-1.41421356 -0.70710678  0.          0.70710678  1.41421356]

In [18]:
print(preprocessing.scale(l_2d))


[[-1.22474487 -1.22474487 -1.22474487]
 [ 0.          0.          0.        ]
 [ 1.22474487  1.22474487  1.22474487]]

In [19]:
print(preprocessing.scale(l_2d, axis=1))


[[-1.22474487  0.          1.22474487]
 [-1.22474487  0.          1.22474487]
 [-1.22474487  0.          1.22474487]]