In [1]:
import pandas
import numpy as np
import scipy.stats as sp
from sklearn import neighbors
from sklearn.neighbors import DistanceMetric
from pprint import pprint
titanic_data = pandas.read_csv("train.csv", header=0)
titanic_data.head(5)
Out[1]:
PassengerId
Survived
Pclass
Name
Sex
Age
SibSp
Parch
Ticket
Fare
Cabin
Embarked
0
1
0
3
Braund, Mr. Owen Harris
male
22.0
1
0
A/5 21171
7.2500
NaN
S
1
2
1
1
Cumings, Mrs. John Bradley (Florence Briggs Th...
female
38.0
1
0
PC 17599
71.2833
C85
C
2
3
1
3
Heikkinen, Miss. Laina
female
26.0
0
0
STON/O2. 3101282
7.9250
NaN
S
3
4
1
1
Futrelle, Mrs. Jacques Heath (Lily May Peel)
female
35.0
1
0
113803
53.1000
C123
S
4
5
0
3
Allen, Mr. William Henry
male
35.0
0
0
373450
8.0500
NaN
S
In [2]:
titanic_data.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True)
titanic_data.head(1)
Out[2]:
PassengerId
Survived
Pclass
Sex
Age
SibSp
Parch
Fare
Embarked
0
1
0
3
male
22.0
1
0
7.25
S
In [3]:
titanic_data.describe()
Out[3]:
PassengerId
Survived
Pclass
Age
SibSp
Parch
Fare
count
891.000000
891.000000
891.000000
714.000000
891.000000
891.000000
891.000000
mean
446.000000
0.383838
2.308642
29.699118
0.523008
0.381594
32.204208
std
257.353842
0.486592
0.836071
14.526497
1.102743
0.806057
49.693429
min
1.000000
0.000000
1.000000
0.420000
0.000000
0.000000
0.000000
25%
223.500000
0.000000
2.000000
20.125000
0.000000
0.000000
7.910400
50%
446.000000
0.000000
3.000000
28.000000
0.000000
0.000000
14.454200
75%
668.500000
1.000000
3.000000
38.000000
1.000000
0.000000
31.000000
max
891.000000
1.000000
3.000000
80.000000
8.000000
6.000000
512.329200
In [4]:
titanic_data.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 891 entries, 0 to 890
Data columns (total 9 columns):
PassengerId 891 non-null int64
Survived 891 non-null int64
Pclass 891 non-null int64
Sex 891 non-null object
Age 714 non-null float64
SibSp 891 non-null int64
Parch 891 non-null int64
Fare 891 non-null float64
Embarked 889 non-null object
dtypes: float64(2), int64(5), object(2)
memory usage: 62.7+ KB
Taking discrete values and making them integers.
In [5]:
titanic_data['Embarked'].unique()
titanic_data['Port'] = titanic_data['Embarked'].map({'C':1, 'S':2, 'Q':3}).astype(float)
titanic_data['Sex'].unique()
titanic_data['Gender'] = titanic_data['Sex'].map({'female': 0, 'male': 1}).astype(int)
titanic_data = titanic_data.drop(['Sex', 'Embarked'], axis=1)
titanic_data.head(5)
Out[5]:
PassengerId
Survived
Pclass
Age
SibSp
Parch
Fare
Port
Gender
0
1
0
3
22.0
1
0
7.2500
2.0
1
1
2
1
1
38.0
1
0
71.2833
1.0
0
2
3
1
3
26.0
0
0
7.9250
2.0
0
3
4
1
1
35.0
1
0
53.1000
2.0
0
4
5
0
3
35.0
0
0
8.0500
2.0
1
In [6]:
cols = titanic_data.columns.tolist()
print(cols)
# cols = [cols[1]] + cols[0:1] + cols[2:]
# print(cols)
titanic_data = titanic_data[cols]
# print(titanic_data.head(5))
train_data = titanic_data[cols[2: ]]
train_target = titanic_data[cols[1]]
# print(train_target.head(5))
print(train_data, train_target)
pprint('column_list: {0}'.format(cols))
['PassengerId', 'Survived', 'Pclass', 'Age', 'SibSp', 'Parch', 'Fare', 'Port', 'Gender']
Pclass Age SibSp Parch Fare Port Gender
0 3 22.0 1 0 7.2500 2.0 1
1 1 38.0 1 0 71.2833 1.0 0
2 3 26.0 0 0 7.9250 2.0 0
3 1 35.0 1 0 53.1000 2.0 0
4 3 35.0 0 0 8.0500 2.0 1
5 3 NaN 0 0 8.4583 3.0 1
6 1 54.0 0 0 51.8625 2.0 1
7 3 2.0 3 1 21.0750 2.0 1
8 3 27.0 0 2 11.1333 2.0 0
9 2 14.0 1 0 30.0708 1.0 0
10 3 4.0 1 1 16.7000 2.0 0
11 1 58.0 0 0 26.5500 2.0 0
12 3 20.0 0 0 8.0500 2.0 1
13 3 39.0 1 5 31.2750 2.0 1
14 3 14.0 0 0 7.8542 2.0 0
15 2 55.0 0 0 16.0000 2.0 0
16 3 2.0 4 1 29.1250 3.0 1
17 2 NaN 0 0 13.0000 2.0 1
18 3 31.0 1 0 18.0000 2.0 0
19 3 NaN 0 0 7.2250 1.0 0
20 2 35.0 0 0 26.0000 2.0 1
21 2 34.0 0 0 13.0000 2.0 1
22 3 15.0 0 0 8.0292 3.0 0
23 1 28.0 0 0 35.5000 2.0 1
24 3 8.0 3 1 21.0750 2.0 0
25 3 38.0 1 5 31.3875 2.0 0
26 3 NaN 0 0 7.2250 1.0 1
27 1 19.0 3 2 263.0000 2.0 1
28 3 NaN 0 0 7.8792 3.0 0
29 3 NaN 0 0 7.8958 2.0 1
.. ... ... ... ... ... ... ...
861 2 21.0 1 0 11.5000 2.0 1
862 1 48.0 0 0 25.9292 2.0 0
863 3 NaN 8 2 69.5500 2.0 0
864 2 24.0 0 0 13.0000 2.0 1
865 2 42.0 0 0 13.0000 2.0 0
866 2 27.0 1 0 13.8583 1.0 0
867 1 31.0 0 0 50.4958 2.0 1
868 3 NaN 0 0 9.5000 2.0 1
869 3 4.0 1 1 11.1333 2.0 1
870 3 26.0 0 0 7.8958 2.0 1
871 1 47.0 1 1 52.5542 2.0 0
872 1 33.0 0 0 5.0000 2.0 1
873 3 47.0 0 0 9.0000 2.0 1
874 2 28.0 1 0 24.0000 1.0 0
875 3 15.0 0 0 7.2250 1.0 0
876 3 20.0 0 0 9.8458 2.0 1
877 3 19.0 0 0 7.8958 2.0 1
878 3 NaN 0 0 7.8958 2.0 1
879 1 56.0 0 1 83.1583 1.0 0
880 2 25.0 0 1 26.0000 2.0 0
881 3 33.0 0 0 7.8958 2.0 1
882 3 22.0 0 0 10.5167 2.0 0
883 2 28.0 0 0 10.5000 2.0 1
884 3 25.0 0 0 7.0500 2.0 1
885 3 39.0 0 5 29.1250 3.0 0
886 2 27.0 0 0 13.0000 2.0 1
887 1 19.0 0 0 30.0000 2.0 0
888 3 NaN 1 2 23.4500 2.0 0
889 1 26.0 0 0 30.0000 1.0 1
890 3 32.0 0 0 7.7500 3.0 1
[891 rows x 7 columns] 0 0
1 1
2 1
3 1
4 0
5 0
6 0
7 0
8 1
9 1
10 1
11 1
12 0
13 0
14 0
15 1
16 0
17 1
18 0
19 1
20 0
21 1
22 1
23 1
24 0
25 1
26 0
27 0
28 1
29 0
..
861 0
862 1
863 0
864 0
865 1
866 1
867 0
868 0
869 1
870 0
871 1
872 0
873 0
874 1
875 1
876 0
877 0
878 0
879 1
880 1
881 0
882 0
883 0
884 0
885 0
886 0
887 1
888 0
889 1
890 0
Name: Survived, dtype: int64
("column_list: ['PassengerId', 'Survived', 'Pclass', 'Age', 'SibSp', 'Parch', "
"'Fare', 'Port', 'Gender']")
In [7]:
df_test = pandas.read_csv('test.csv')
df_test = df_test.drop(['Name', 'Ticket', 'Cabin'], axis=1)
df_test['Gender'] = df_test['Sex'].map({'female': 0, 'male': 1}).astype(int)
df_test['Port'] = df_test['Embarked'].map({'C':1, 'S':2, 'Q':3}).astype(int)
ids = df_test.PassengerId.values
df_test = df_test.drop(['Sex', 'Embarked', 'PassengerId'], axis=1)
df_test.Fare.fillna(np.mean(df_test.Fare), inplace=True)
df_test.Age.fillna(np.mean(df_test.Age), inplace=True)
df_test.info()
train_data.info()
test_data = df_test.values
test_data
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 418 entries, 0 to 417
Data columns (total 7 columns):
Pclass 418 non-null int64
Age 418 non-null float64
SibSp 418 non-null int64
Parch 418 non-null int64
Fare 418 non-null float64
Gender 418 non-null int64
Port 418 non-null int64
dtypes: float64(2), int64(5)
memory usage: 22.9 KB
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 891 entries, 0 to 890
Data columns (total 7 columns):
Pclass 891 non-null int64
Age 714 non-null float64
SibSp 891 non-null int64
Parch 891 non-null int64
Fare 891 non-null float64
Port 889 non-null float64
Gender 891 non-null int64
dtypes: float64(3), int64(4)
memory usage: 48.8 KB
Out[7]:
array([[ 3. , 34.5 , 0. , ..., 7.8292 ,
1. , 3. ],
[ 3. , 47. , 1. , ..., 7. ,
0. , 2. ],
[ 2. , 62. , 0. , ..., 9.6875 ,
1. , 3. ],
...,
[ 3. , 38.5 , 0. , ..., 7.25 ,
1. , 2. ],
[ 3. , 30.27259036, 0. , ..., 8.05 ,
1. , 2. ],
[ 3. , 30.27259036, 1. , ..., 22.3583 ,
1. , 1. ]])
In [8]:
titanic_data.Age.fillna(np.mean(titanic_data.Age), inplace=True)
titanic_data.Port.fillna(3.0, inplace=True)
train_data = titanic_data[cols[2: ]]
train_target = titanic_data[cols[1]]
print(titanic_data.Age)
titanic_data.info()
0 22.000000
1 38.000000
2 26.000000
3 35.000000
4 35.000000
5 29.699118
6 54.000000
7 2.000000
8 27.000000
9 14.000000
10 4.000000
11 58.000000
12 20.000000
13 39.000000
14 14.000000
15 55.000000
16 2.000000
17 29.699118
18 31.000000
19 29.699118
20 35.000000
21 34.000000
22 15.000000
23 28.000000
24 8.000000
25 38.000000
26 29.699118
27 19.000000
28 29.699118
29 29.699118
...
861 21.000000
862 48.000000
863 29.699118
864 24.000000
865 42.000000
866 27.000000
867 31.000000
868 29.699118
869 4.000000
870 26.000000
871 47.000000
872 33.000000
873 47.000000
874 28.000000
875 15.000000
876 20.000000
877 19.000000
878 29.699118
879 56.000000
880 25.000000
881 33.000000
882 22.000000
883 28.000000
884 25.000000
885 39.000000
886 27.000000
887 19.000000
888 29.699118
889 26.000000
890 32.000000
Name: Age, dtype: float64
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 891 entries, 0 to 890
Data columns (total 9 columns):
PassengerId 891 non-null int64
Survived 891 non-null int64
Pclass 891 non-null int64
Age 891 non-null float64
SibSp 891 non-null int64
Parch 891 non-null int64
Fare 891 non-null float64
Port 891 non-null float64
Gender 891 non-null int64
dtypes: float64(3), int64(6)
memory usage: 62.7 KB
In [9]:
model = neighbors.KNeighborsClassifier()
print(train_data.values)
train_data.info()
print(train_target.values)
print(train_data.values)
model.fit(train_data.values, train_target.values)
print(model.fit)
output = model.predict(test_data).astype(int)
print(output)
result = np.c_[ids.astype(int), output]
print(result)
df_result = pandas.DataFrame(result[:,0:2], columns=['PassengerId', 'Survived'])
df_result.to_csv('titanic.csv', index=False)
[[ 3. 22. 1. ..., 7.25 2. 1. ]
[ 1. 38. 1. ..., 71.2833 1. 0. ]
[ 3. 26. 0. ..., 7.925 2. 0. ]
...,
[ 3. 29.69911765 1. ..., 23.45 2. 0. ]
[ 1. 26. 0. ..., 30. 1. 1. ]
[ 3. 32. 0. ..., 7.75 3. 1. ]]
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 891 entries, 0 to 890
Data columns (total 7 columns):
Pclass 891 non-null int64
Age 891 non-null float64
SibSp 891 non-null int64
Parch 891 non-null int64
Fare 891 non-null float64
Port 891 non-null float64
Gender 891 non-null int64
dtypes: float64(3), int64(4)
memory usage: 48.8 KB
[0 1 1 1 0 0 0 0 1 1 1 1 0 0 0 1 0 1 0 1 0 1 1 1 0 1 0 0 1 0 0 1 1 0 0 0 1
0 0 1 0 0 0 1 1 0 0 1 0 0 0 0 1 1 0 1 1 0 1 0 0 1 0 0 0 1 1 0 1 0 0 0 0 0
1 0 0 0 1 1 0 1 1 0 1 1 0 0 1 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 1 1 0 1 0
0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 0 1 1 0 0 0 0 1 0 0 1 0 0 0 0 1 1 0 0 0 1 0
0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 1 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 1
0 1 1 0 0 1 0 1 1 1 1 0 0 1 0 0 0 0 0 1 0 0 1 1 1 0 1 0 0 0 1 1 0 1 0 1 0
0 0 1 0 1 0 0 0 1 0 0 1 0 0 0 1 0 0 0 1 0 0 0 0 0 1 1 0 0 0 0 0 0 1 1 1 1
1 0 1 0 0 0 0 0 1 1 1 0 1 1 0 1 1 0 0 0 1 0 0 0 1 0 0 1 0 1 1 1 1 0 0 0 0
0 0 1 1 1 1 0 1 0 1 1 1 0 1 1 1 0 0 0 1 1 0 1 1 0 0 1 1 0 1 0 1 1 1 1 0 0
0 1 0 0 1 1 0 1 1 0 0 0 1 1 1 1 0 0 0 0 0 0 0 1 0 1 1 0 0 0 0 0 0 1 1 1 1
1 0 0 0 0 1 1 0 0 0 1 1 0 1 0 0 0 1 0 1 1 1 0 1 1 0 0 0 0 1 1 0 0 0 0 0 0
1 0 0 0 0 1 0 1 0 1 1 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 0 0 1 0 1 0 0 1 0 0 1
1 1 1 1 1 1 0 0 0 1 0 1 0 1 1 0 1 0 0 0 0 0 0 0 0 1 0 0 1 1 0 0 0 0 0 1 0
0 0 1 1 0 1 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 1 1 0 1 1 0 1 1 0 0 1 0
1 0 1 0 0 1 0 0 1 0 0 0 1 0 0 1 0 1 0 1 0 1 1 0 0 1 0 0 1 1 0 1 1 0 0 1 1
0 1 0 1 1 0 0 0 0 0 0 0 0 0 1 1 1 1 1 0 0 1 1 0 1 1 1 0 0 0 1 0 1 0 0 0 1
0 0 0 0 1 0 0 1 1 0 0 0 1 0 0 1 1 1 0 0 1 0 0 1 0 0 1 0 0 1 1 0 0 0 0 1 0
0 1 0 1 0 0 1 0 0 0 0 0 1 0 1 1 1 0 1 0 1 0 1 0 1 0 0 0 0 0 0 1 0 0 0 1 0
0 0 0 1 1 0 0 1 0 0 0 1 0 1 0 1 0 0 0 0 0 0 0 1 1 1 1 0 0 0 0 1 0 0 1 1 0
0 0 0 1 1 1 1 1 0 1 0 0 0 1 1 0 0 1 0 0 0 1 0 1 1 0 0 1 0 0 0 0 0 0 1 0 0
1 0 1 0 1 0 0 1 0 0 1 1 0 0 1 1 0 0 0 1 0 0 1 1 0 1 0 0 0 0 0 0 0 0 1 0 0
1 0 1 1 1 0 0 0 0 1 0 1 0 0 0 0 0 0 0 1 1 0 0 0 1 1 1 1 0 0 0 0 1 0 0 0 0
0 0 0 0 0 0 1 1 0 1 0 0 0 1 1 1 1 1 0 0 0 1 0 0 1 1 0 0 1 0 0 0 0 0 0 1 0
0 0 1 0 1 1 1 1 0 0 0 1 0 0 1 1 0 0 1 0 1 0 0 1 1 0 0 0 1 1 0 0 0 0 0 0 1
0 1 0]
[[ 3. 22. 1. ..., 7.25 2. 1. ]
[ 1. 38. 1. ..., 71.2833 1. 0. ]
[ 3. 26. 0. ..., 7.925 2. 0. ]
...,
[ 3. 29.69911765 1. ..., 23.45 2. 0. ]
[ 1. 26. 0. ..., 30. 1. 1. ]
[ 3. 32. 0. ..., 7.75 3. 1. ]]
<bound method SupervisedIntegerMixin.fit of KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski',
metric_params=None, n_jobs=1, n_neighbors=5, p=2,
weights='uniform')>
[0 0 0 0 0 0 0 1 0 1 0 0 1 0 1 1 0 0 0 0 0 1 1 1 1 0 1 0 0 0 1 1 0 0 1 0 0
0 0 1 0 0 0 0 1 0 0 0 1 0 1 0 1 1 0 0 0 0 0 1 0 1 0 0 1 0 0 0 0 1 0 0 0 0
1 0 0 0 0 0 1 1 1 0 0 0 1 0 0 1 0 0 1 0 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0
0 1 0 1 0 0 1 1 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0
0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 1 1 0 1
0 1 0 0 0 1 0 1 0 0 0 1 0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 1 0 0 0
0 0 0 1 0 0 0 0 1 1 0 0 1 0 1 0 0 0 0 0 1 0 0 1 0 0 0 0 1 0 1 0 0 0 0 0 0
0 0 0 0 1 0 0 0 0 0 0 1 0 1 0 0 1 0 0 0 0 0 1 0 1 1 0 0 1 0 0 0 0 0 1 0 0
1 0 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 0 1 0 1 0 1 1 0 0 0 1 1 0 0 1 0 0 0 0 0
0 1 0 1 0 1 0 0 0 0 1 0 0 0 0 0 0 1 0 0 1 1 1 1 0 0 0 0 0 0 0 1 0 0 0 0 0
0 1 0 1 1 1 0 0 0 0 0 0 1 0 0 1 0 0 0 0 1 0 0 0 1 1 0 1 0 0 1 0 1 0 0 0 0
1 0 1 0 1 0 0 1 0 0 0]
[[ 892 0]
[ 893 0]
[ 894 0]
[ 895 0]
[ 896 0]
[ 897 0]
[ 898 0]
[ 899 1]
[ 900 0]
[ 901 1]
[ 902 0]
[ 903 0]
[ 904 1]
[ 905 0]
[ 906 1]
[ 907 1]
[ 908 0]
[ 909 0]
[ 910 0]
[ 911 0]
[ 912 0]
[ 913 1]
[ 914 1]
[ 915 1]
[ 916 1]
[ 917 0]
[ 918 1]
[ 919 0]
[ 920 0]
[ 921 0]
[ 922 1]
[ 923 1]
[ 924 0]
[ 925 0]
[ 926 1]
[ 927 0]
[ 928 0]
[ 929 0]
[ 930 0]
[ 931 1]
[ 932 0]
[ 933 0]
[ 934 0]
[ 935 0]
[ 936 1]
[ 937 0]
[ 938 0]
[ 939 0]
[ 940 1]
[ 941 0]
[ 942 1]
[ 943 0]
[ 944 1]
[ 945 1]
[ 946 0]
[ 947 0]
[ 948 0]
[ 949 0]
[ 950 0]
[ 951 1]
[ 952 0]
[ 953 1]
[ 954 0]
[ 955 0]
[ 956 1]
[ 957 0]
[ 958 0]
[ 959 0]
[ 960 0]
[ 961 1]
[ 962 0]
[ 963 0]
[ 964 0]
[ 965 0]
[ 966 1]
[ 967 0]
[ 968 0]
[ 969 0]
[ 970 0]
[ 971 0]
[ 972 1]
[ 973 1]
[ 974 1]
[ 975 0]
[ 976 0]
[ 977 0]
[ 978 1]
[ 979 0]
[ 980 0]
[ 981 1]
[ 982 0]
[ 983 0]
[ 984 1]
[ 985 0]
[ 986 0]
[ 987 0]
[ 988 1]
[ 989 0]
[ 990 0]
[ 991 0]
[ 992 1]
[ 993 0]
[ 994 0]
[ 995 0]
[ 996 0]
[ 997 0]
[ 998 0]
[ 999 0]
[1000 0]
[1001 0]
[1002 0]
[1003 0]
[1004 1]
[1005 0]
[1006 1]
[1007 0]
[1008 0]
[1009 1]
[1010 1]
[1011 0]
[1012 0]
[1013 0]
[1014 1]
[1015 0]
[1016 0]
[1017 0]
[1018 0]
[1019 0]
[1020 1]
[1021 0]
[1022 0]
[1023 0]
[1024 0]
[1025 0]
[1026 0]
[1027 0]
[1028 0]
[1029 0]
[1030 0]
[1031 0]
[1032 0]
[1033 1]
[1034 1]
[1035 0]
[1036 0]
[1037 0]
[1038 0]
[1039 0]
[1040 0]
[1041 0]
[1042 1]
[1043 0]
[1044 0]
[1045 0]
[1046 0]
[1047 0]
[1048 0]
[1049 0]
[1050 0]
[1051 0]
[1052 0]
[1053 1]
[1054 0]
[1055 0]
[1056 1]
[1057 0]
[1058 1]
[1059 0]
[1060 0]
[1061 0]
[1062 0]
[1063 0]
[1064 0]
[1065 0]
[1066 0]
[1067 0]
[1068 0]
[1069 1]
[1070 0]
[1071 1]
[1072 0]
[1073 1]
[1074 1]
[1075 0]
[1076 1]
[1077 0]
[1078 1]
[1079 0]
[1080 0]
[1081 0]
[1082 1]
[1083 0]
[1084 1]
[1085 0]
[1086 0]
[1087 0]
[1088 1]
[1089 0]
[1090 0]
[1091 0]
[1092 0]
[1093 1]
[1094 1]
[1095 0]
[1096 0]
[1097 0]
[1098 0]
[1099 0]
[1100 1]
[1101 0]
[1102 0]
[1103 0]
[1104 0]
[1105 0]
[1106 0]
[1107 0]
[1108 0]
[1109 1]
[1110 1]
[1111 0]
[1112 0]
[1113 0]
[1114 0]
[1115 0]
[1116 0]
[1117 1]
[1118 0]
[1119 0]
[1120 0]
[1121 0]
[1122 1]
[1123 1]
[1124 0]
[1125 0]
[1126 1]
[1127 0]
[1128 1]
[1129 0]
[1130 0]
[1131 0]
[1132 0]
[1133 0]
[1134 1]
[1135 0]
[1136 0]
[1137 1]
[1138 0]
[1139 0]
[1140 0]
[1141 0]
[1142 1]
[1143 0]
[1144 1]
[1145 0]
[1146 0]
[1147 0]
[1148 0]
[1149 0]
[1150 0]
[1151 0]
[1152 0]
[1153 0]
[1154 0]
[1155 1]
[1156 0]
[1157 0]
[1158 0]
[1159 0]
[1160 0]
[1161 0]
[1162 1]
[1163 0]
[1164 1]
[1165 0]
[1166 0]
[1167 1]
[1168 0]
[1169 0]
[1170 0]
[1171 0]
[1172 0]
[1173 1]
[1174 0]
[1175 1]
[1176 1]
[1177 0]
[1178 0]
[1179 1]
[1180 0]
[1181 0]
[1182 0]
[1183 0]
[1184 0]
[1185 1]
[1186 0]
[1187 0]
[1188 1]
[1189 0]
[1190 0]
[1191 0]
[1192 0]
[1193 0]
[1194 0]
[1195 0]
[1196 0]
[1197 0]
[1198 1]
[1199 1]
[1200 1]
[1201 0]
[1202 0]
[1203 0]
[1204 0]
[1205 0]
[1206 1]
[1207 0]
[1208 1]
[1209 0]
[1210 1]
[1211 1]
[1212 0]
[1213 0]
[1214 0]
[1215 1]
[1216 1]
[1217 0]
[1218 0]
[1219 1]
[1220 0]
[1221 0]
[1222 0]
[1223 0]
[1224 0]
[1225 0]
[1226 1]
[1227 0]
[1228 1]
[1229 0]
[1230 1]
[1231 0]
[1232 0]
[1233 0]
[1234 0]
[1235 1]
[1236 0]
[1237 0]
[1238 0]
[1239 0]
[1240 0]
[1241 0]
[1242 1]
[1243 0]
[1244 0]
[1245 1]
[1246 1]
[1247 1]
[1248 1]
[1249 0]
[1250 0]
[1251 0]
[1252 0]
[1253 0]
[1254 0]
[1255 0]
[1256 1]
[1257 0]
[1258 0]
[1259 0]
[1260 0]
[1261 0]
[1262 0]
[1263 1]
[1264 0]
[1265 1]
[1266 1]
[1267 1]
[1268 0]
[1269 0]
[1270 0]
[1271 0]
[1272 0]
[1273 0]
[1274 1]
[1275 0]
[1276 0]
[1277 1]
[1278 0]
[1279 0]
[1280 0]
[1281 0]
[1282 1]
[1283 0]
[1284 0]
[1285 0]
[1286 1]
[1287 1]
[1288 0]
[1289 1]
[1290 0]
[1291 0]
[1292 1]
[1293 0]
[1294 1]
[1295 0]
[1296 0]
[1297 0]
[1298 0]
[1299 1]
[1300 0]
[1301 1]
[1302 0]
[1303 1]
[1304 0]
[1305 0]
[1306 1]
[1307 0]
[1308 0]
[1309 0]]
In [ ]:
Content source: muniri92/Echo-Pod
Similar notebooks: