In [ ]:
import numpy as np
import pandas as pd
import xgboost as xgb

from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score
from collections import Counter

%matplotlib notebook
import matplotlib.pyplot as plt

In [ ]:
train = pd.read_csv('Train.csv')
test = pd.read_csv('Test.csv')

X = train.drop(["Loan_ID","Loan_Status"],1)
y = train['Loan_Status']

Loan_ID = test["Loan_ID"].copy()
Z = test.drop(["Loan_ID"],1)

y.replace("Y",1,inplace=True)
y.replace("N",0,inplace=True)

In [ ]:
def prepare(X):
    
    for i in X.columns:
        top_v = Counter(X[i].values).most_common(1)[0][0]
        X[i].fillna(top_v, inplace=True)
     
    X = X.join((X.ApplicantIncome / X.LoanAmount).to_frame(name="k"))
    #X.k = X.ApplicantIncome / X.LoanAmount
    encoder = LabelEncoder()
    cat_columns = [c for c in X.columns if X[c].dtype.name == 'object']
    for ix, item in enumerate(cat_columns):
        X[item] = encoder.fit_transform(X[item])
    
    return X

In [ ]:
X = prepare(X)
Z = prepare(Z)

In [ ]:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 5)
clf = xgb.XGBClassifier(n_estimators=90, max_depth=4)
clf.fit(X_train,y_train)
predictions = clf.predict(X_test)

accuracy_score(y_test, predictions)

In [ ]:
plt.figure(figsize=(10,8))
plt.scatter(y, X.k, alpha = 0.5, s = 50)
plt.ylabel('k = ApplicantIncome / LoanAmount ')
plt.xlabel('Loan Status')
plt.show()

In [ ]:
pred = clf.predict(Z).astype(str)
pred [pred == "1"] = "Y"
pred [pred == "0"] = "N"

pred[np.where(Z.k > 90),] = "Y"

In [ ]:
submission = pd.DataFrame({"Loan_ID":Loan_ID, "Loan_Status": pred})
submission.to_csv("output.csv", index=False)