In [ ]:
# Load data
import pandas as pd
import numpy as np
df = pd.read_csv('/Users/annette/Desktop/IntroToDataScienceClass/Lesson1/Numpy and Pandas/TitanicData.csv')

In [ ]:
# Replace missing age values with median age
medianAge = np.median(df['Age'])
df['Age'].fillna(medianAge, inplace=True)

In [ ]:
# Create Dummy Variables
dummy_Sex = pd.get_dummies(df['Sex'],prefix='Sex')
dummy_PClass = pd.get_dummies(df['Pclass'],prefix='PClass')

cols_to_keep = ['Age','Fare','SibSp','Parch']
df2 = df[cols_to_keep].join([dummy_Sex,dummy_PClass])

In [ ]:
#Create Training and Test Set
X = df2
Y = df['Survived']
from sklearn import cross_validation
X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, Y, test_size=0.4, random_state = 49)

In [ ]:
# Use Decision Tree Classifier 
from sklearn import tree
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X_train, Y_train)

accuracy = clf.score(X_test, Y_test)
print("Accuracy: %0.2f" % (accuracy))

In [ ]:
# Use Random Forest
from sklearn.ensemble import RandomForestClassifier

#create and train the random forest
rf = RandomForestClassifier(n_estimators=200,n_jobs = -1,random_state=121873)
rf.fit(X_train, Y_train)

Y_pred = rf.predict(X_test);
print rf.score(X_test, Y_test)

In [ ]:
from sklearn import cross_validation
scores = cross_validation.cross_val_score(clf,X,Y,cv=5,scoring='accuracy')
print("Decision Tree Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2)) 

scores = cross_validation.cross_val_score(rf,X,Y,cv=5,scoring='accuracy')
print("Random Forest Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))