In [1]:
%matplotlib inline
from sklearn import datasets
from sklearn import linear_model
import matplotlib.pyplot as plt
import sklearn
print sklearn.__version__
In [2]:
# boston data
boston = datasets.load_boston()
y = boston.target
In [8]:
' '.join(dir(boston))
Out[8]:
In [13]:
boston['feature_names']
Out[13]:
In [ ]:
regr = linear_model.LinearRegression()
lm = regr.fit(boston.data, y)
predicted = regr.predict(boston.data)
In [14]:
fig, ax = plt.subplots()
ax.scatter(y, predicted)
ax.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=4)
ax.set_xlabel('$Measured$', fontsize = 20)
ax.set_ylabel('$Predicted$', fontsize = 20)
plt.show()
In [7]:
lm.intercept_, lm.coef_, lm.score(boston.data, y)
Out[7]:
In [19]:
import pandas as pd
df = pd.read_csv('/Users/chengjun/github/cjc2016/data/tianya_bbs_threads_list.txt', sep = "\t", header=None)
df=df.rename(columns = {0:'title', 1:'link', 2:'author',3:'author_page', 4:'click', 5:'reply', 6:'time'})
df[:2]
Out[19]:
In [20]:
def randomSplit(dataX, dataY, num):
dataX_train = []
dataX_test = []
dataY_train = []
dataY_test = []
import random
test_index = random.sample(range(len(df)), num)
for k in range(len(dataX)):
if k in test_index:
dataX_test.append([dataX[k]])
dataY_test.append(dataY[k])
else:
dataX_train.append([dataX[k]])
dataY_train.append(dataY[k])
return dataX_train, dataX_test, dataY_train, dataY_test,
In [22]:
import numpy as np
# Use only one feature
data_X = df.reply
# Split the data into training/testing sets
data_X_train, data_X_test, data_y_train, data_y_test = randomSplit(np.log(df.click+1), np.log(df.reply+1), 20)
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(data_X_train, data_y_train)
# Explained variance score: 1 is perfect prediction
print'Variance score: %.2f' % regr.score(data_X_test, data_y_test)
In [23]:
# Plot outputs
plt.scatter(data_X_test, data_y_test, color='black')
plt.plot(data_X_test, regr.predict(data_X_test), color='blue', linewidth=3)
plt.show()
In [24]:
# The coefficients
print 'Coefficients: \n', regr.coef_
In [25]:
# The mean square error
print "Residual sum of squares: %.2f" % np.mean((regr.predict(data_X_test) - data_y_test) ** 2)
In [26]:
from sklearn.cross_validation import cross_val_score
regr = linear_model.LinearRegression()
scores = cross_val_score(regr, [[c] for c in df.click], df.reply, cv = 3)
scores.mean()
Out[26]:
In [27]:
from sklearn.cross_validation import cross_val_score
x = [[c] for c in np.log(df.click +0.1)]
y = np.log(df.reply+0.1)
regr = linear_model.LinearRegression()
scores = cross_val_score(regr, x, y , cv = 3)
scores.mean()
Out[27]:
In [96]:
repost = []
for i in df.title:
if u'转载' in i.decode('utf8'):
repost.append(1)
else:
repost.append(0)
In [78]:
data_X = [[df.click[i], df.reply[i]] for i in range(len(df))]
data_X[:3]
Out[78]:
In [97]:
from sklearn.linear_model import LogisticRegression
df['repost'] = repost
model.fit(data_X,df.repost)
model.score(data_X,df.repost)
Out[97]:
In [86]:
def randomSplitLogistic(dataX, dataY, num):
dataX_train = []
dataX_test = []
dataY_train = []
dataY_test = []
import random
test_index = random.sample(range(len(df)), num)
for k in range(len(dataX)):
if k in test_index:
dataX_test.append(dataX[k])
dataY_test.append(dataY[k])
else:
dataX_train.append(dataX[k])
dataY_train.append(dataY[k])
return dataX_train, dataX_test, dataY_train, dataY_test,
In [98]:
# Split the data into training/testing sets
data_X_train, data_X_test, data_y_train, data_y_test = randomSplitLogistic(data_X, df.repost, 20)
# Create linear regression object
log_regr = LogisticRegression()
# Train the model using the training sets
log_regr.fit(data_X_train, data_y_train)
# Explained variance score: 1 is perfect prediction
print'Variance score: %.2f' % log_regr.score(data_X_test, data_y_test)
In [99]:
logre = LogisticRegression()
scores = cross_val_score(logre, data_X,df.repost, cv = 3)
scores.mean()
Out[99]:
It is a classification technique based on Bayes’ Theorem with an assumption of independence among predictors.
In simple terms, a Naive Bayes classifier assumes that the presence of a particular feature in a class is unrelated to the presence of any other feature.
why it is known as ‘Naive’? For example, a fruit may be considered to be an apple if it is red, round, and about 3 inches in diameter. Even if these features depend on each other or upon the existence of the other features, all of these properties independently contribute to the probability that this fruit is an apple.
贝叶斯定理为使用$p(c)$, $p(x)$, $p(x|c)$ 计算后验概率$P(c|x)$提供了方法:
Step 1: Convert the data set into a frequency table
Step 2: Create Likelihood table by finding the probabilities like:
Step 3: Now, use Naive Bayesian equation to calculate the posterior probability for each class. The class with the highest posterior probability is the outcome of prediction.
We can solve it using above discussed method of posterior probability.
$P(Yes | Sunny) = \frac{P( Sunny | Yes) * P(Yes) } {P (Sunny)}$
Here we have P (Sunny |Yes) = 3/9 = 0.33, P(Sunny) = 5/14 = 0.36, P( Yes)= 9/14 = 0.64
Now, $P (Yes | Sunny) = \frac{0.33 * 0.64}{0.36} = 0.60$, which has higher probability.
In [17]:
from sklearn import naive_bayes
' '.join(dir(naive_bayes))
Out[17]:
In [29]:
#Import Library of Gaussian Naive Bayes model
from sklearn.naive_bayes import GaussianNB
import numpy as np
#assigning predictor and target variables
x= np.array([[-3,7],[1,5], [1,2], [-2,0], [2,3], [-4,0], [-1,1], [1,1], [-2,2], [2,7], [-4,1], [-2,7]])
Y = np.array([3, 3, 3, 3, 4, 3, 3, 4, 3, 4, 4, 4])
In [30]:
#Create a Gaussian Classifier
model = GaussianNB()
# Train the model using the training sets
model.fit(x[:8], Y[:8])
#Predict Output
predicted= model.predict([[1,2],[3,4]])
print predicted
In [31]:
model.score(x[8:], Y[8:])
Out[31]:
k-fold CV, the training set is split into k smaller sets (other approaches are described below, but generally follow the same principles). The following procedure is followed for each of the k “folds”:
In [48]:
data_X_train, data_X_test, data_y_train, data_y_test = randomSplit(df.click, df.reply, 20)
# Train the model using the training sets
model.fit(data_X_train, data_y_train)
#Predict Output
predicted= model.predict(data_X_test)
print predicted
In [49]:
model.score(data_X_test, data_y_test)
Out[49]:
In [51]:
from sklearn.cross_validation import cross_val_score
model = GaussianNB()
scores = cross_val_score(model, [[c] for c in df.click], df.reply, cv = 5)
scores.mean()
Out[51]:
In [92]:
from sklearn import tree
model = tree.DecisionTreeClassifier(criterion='gini')
In [100]:
data_X_train, data_X_test, data_y_train, data_y_test = randomSplitLogistic(data_X, df.repost, 20)
model.fit(data_X_train,data_y_train)
model.score(data_X_train,data_y_train)
Out[100]:
In [95]:
# Predict
model.predict(data_X_test)
Out[95]:
In [102]:
# crossvalidation
scores = cross_val_score(model, data_X, df.repost, cv = 3)
scores.mean()
Out[102]:
In [108]:
from sklearn import svm
# Create SVM classification object
model=svm.SVC()
In [107]:
' '.join(dir(svm))
Out[107]:
In [109]:
data_X_train, data_X_test, data_y_train, data_y_test = randomSplitLogistic(data_X, df.repost, 20)
model.fit(data_X_train,data_y_train)
model.score(data_X_train,data_y_train)
Out[109]:
In [110]:
# Predict
model.predict(data_X_test)
Out[110]:
In [117]:
# crossvalidation
scores = []
cvs = [3, 5, 10, 25, 50, 75, 100]
for i in cvs:
score = cross_val_score(model, data_X, df.repost, cv = i)
scores.append(score.mean() ) # Try to tune cv
In [119]:
plt.plot(cvs, scores, 'b-o')
plt.xlabel('$cv$', fontsize = 20)
plt.ylabel('$Score$', fontsize = 20)
plt.show()
机器学习算法的要点(附 Python 和 R 代码)http://blog.csdn.net/a6225301/article/details/50479672
The "Python Machine Learning" book code repository and info resource https://github.com/rasbt/python-machine-learning-book
An Introduction to Statistical Learning (James, Witten, Hastie, Tibshirani, 2013) : Python code https://github.com/JWarmenhoven/ISLR-python
BuildingMachineLearningSystemsWithPython https://github.com/luispedro/BuildingMachineLearningSystemsWithPython