In [1]:
# 导入头文件
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC

import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score

In [2]:
data = pd.read_csv('data.csv')
np.array(data['y'])


Out[2]:
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 1])

In [3]:
# 分解数据中的
X = np.array(data[['x1', 'x2']])
y = np.array(data['y'])

In [4]:
plt.scatter(X[:,0],X[:,1],c=y)


Out[4]:
<matplotlib.collections.PathCollection at 0x1a18503d68>

In [5]:
# Logistic Regression Classifier
lg_classifier = LogisticRegression()
lg_classifier.fit(X,y)


/Users/weixu/AI/tool/anaconda3/envs/py3/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.
  FutureWarning)
Out[5]:
LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
          intercept_scaling=1, max_iter=100, multi_class='warn',
          n_jobs=None, penalty='l2', random_state=None, solver='warn',
          tol=0.0001, verbose=0, warm_start=False)

In [6]:
y_p = lg_classifier.predict(X)

In [8]:
plt.scatter(X[:,0],X[:,1],c=y_p)
score = accuracy_score(y, y_p)
print("score" + str(score))


score0.6666666666666666

In [9]:
Dt_classifier = DecisionTreeClassifier()
Dt_classifier.fit(X, y)


Out[9]:
DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=None,
            max_features=None, max_leaf_nodes=None,
            min_impurity_decrease=0.0, min_impurity_split=None,
            min_samples_leaf=1, min_samples_split=2,
            min_weight_fraction_leaf=0.0, presort=False, random_state=None,
            splitter='best')

In [10]:
y_Dp = Dt_classifier.predict(X)
plt.scatter(X[:,0],X[:,1],c=y_Dp)
score = accuracy_score(y, y_Dp)
print("score" + str(score))


score1.0

In [11]:
svc_classifier = SVC()
svc_classifier.fit(X, y)


/Users/weixu/AI/tool/anaconda3/envs/py3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.
  "avoid this warning.", FutureWarning)
Out[11]:
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
  decision_function_shape='ovr', degree=3, gamma='auto_deprecated',
  kernel='rbf', max_iter=-1, probability=False, random_state=None,
  shrinking=True, tol=0.001, verbose=False)

In [12]:
y_sp = svc_classifier.predict(X)
plt.scatter(X[:,0],X[:,1],c=y_sp)
score = accuracy_score(y, y_sp)
print("score" + str(score))


score0.6666666666666666

In [13]:
svc_classifier1 = SVC(kernel = 'poly', degree=8)
svc_classifier1.fit(X, y)
y_sp1 = svc_classifier1.predict(X)
plt.scatter(X[:,0],X[:,1],c=y_sp1)
score = accuracy_score(y, y_sp1)
print("score" + str(score))


score0.6666666666666666
/Users/weixu/AI/tool/anaconda3/envs/py3/lib/python3.6/site-packages/sklearn/svm/base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.
  "avoid this warning.", FutureWarning)

In [14]:
svc_classifier1 = SVC(kernel = 'rbf', gamma=200)
svc_classifier1.fit(X, y)
y_sp1 = svc_classifier1.predict(X)
plt.scatter(X[:,0],X[:,1],c=y_sp1)

score = accuracy_score(y, y_sp1)
print("score" + str(score))


score1.0

In [15]:
svc_classifier1 = SVC(kernel = 'linear')
svc_classifier1.fit(X, y)
y_sp1 = svc_classifier1.predict(X)
plt.scatter(X[:,0],X[:,1],c=y_sp1)
score = accuracy_score(y, y_sp1)
print("score" + str(score))


score0.6666666666666666

In [ ]: