In [7]:
import pandas as pd
from sklearn.metrics import classification_report
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import PolynomialFeatures

In [2]:
df = pd.read_excel('06.dataset.xlsx')

In [3]:
def softmax(x):
    """Compute softmax values for each sets of scores in x."""
    sf = np.exp(x)
    sf = sf/np.sum(sf, axis=0)
    return sf

신 매물 유찰/낙찰 분류

1) RandomForest


In [26]:
for i in range(6):
    X = pd.concat([
            df.ix[df['failure_bidding'] >= i, '공유자매수신고':'rdtuch_중로한면'],
            df.ix[df['failure_bidding'] >= i, '가축사육제한구역':'현상변경허가 대상구역'],
            df.ix[df['failure_bidding'] >= i, 'log_est_jiga'],
            df.ix[df['failure_bidding'] >= i, 'area(m2)']
        ], axis = 1)


    y = df[df['failure_bidding'] >= i]['failure_bidding'].copy()
    y[y == i] = 0
    y[y > i] = 1
    
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
    
    pipe_rf = Pipeline([
            ('rf', RandomForestClassifier())
        ])

    param_grid = [
        {'rf__n_estimators' : np.arange(100, 150, 10),
        'rf__criterion' : ['gini', 'entropy']}
    ]

    gs = GridSearchCV(estimator = pipe_rf, param_grid = param_grid,
                  scoring = 'recall', cv = 10, n_jobs = -1)
    gs = gs.fit(X_train, y_train)
    
    model = RandomForestClassifier(n_estimators = gs.best_params_['rf__n_estimators'],
                               n_jobs = -1,
                               criterion = gs.best_params_['rf__criterion'])
    pred = model.fit(X_train, y_train)
    y_pred = model.predict(X_test)
    
    print('---------------- ',i,'회 유찰 매물 ----------------')
    print(classification_report(y_test, y_pred))
    print(confusion_matrix(y_test, y_pred))


----------------  0 회 유찰 매물 ----------------
             precision    recall  f1-score   support

          0       0.60      0.28      0.39       618
          1       0.74      0.92      0.82      1375

avg / total       0.70      0.72      0.68      1993

[[ 176  442]
 [ 116 1259]]
----------------  1 회 유찰 매물 ----------------
             precision    recall  f1-score   support

          0       0.58      0.32      0.41       479
          1       0.71      0.88      0.79       923

avg / total       0.67      0.69      0.66      1402

[[152 327]
 [109 814]]
----------------  2 회 유찰 매물 ----------------
             precision    recall  f1-score   support

          0       0.58      0.53      0.56       426
          1       0.62      0.67      0.65       493

avg / total       0.61      0.61      0.61       919

[[227 199]
 [162 331]]
----------------  3 회 유찰 매물 ----------------
             precision    recall  f1-score   support

          0       0.57      0.62      0.60       248
          1       0.59      0.54      0.56       247

avg / total       0.58      0.58      0.58       495

[[154  94]
 [114 133]]
----------------  4 회 유찰 매물 ----------------
             precision    recall  f1-score   support

          0       0.59      0.75      0.66       125
          1       0.59      0.41      0.48       111

avg / total       0.59      0.59      0.58       236

[[94 31]
 [66 45]]
----------------  5 회 유찰 매물 ----------------
             precision    recall  f1-score   support

          0       0.63      0.59      0.61        68
          1       0.36      0.41      0.39        39

avg / total       0.54      0.52      0.53       107

[[40 28]
 [23 16]]

2) Naive Bayes


In [27]:
for i in range(6):
    X = pd.concat([
            df.ix[df['failure_bidding'] >= i, '공유자매수신고':'rdtuch_중로한면'],
            df.ix[df['failure_bidding'] >= i, '가축사육제한구역':'현상변경허가 대상구역'],
            df.ix[df['failure_bidding'] >= i, 'log_est_jiga'],
            df.ix[df['failure_bidding'] >= i, 'area(m2)']
        ], axis = 1)
    y = df[df['failure_bidding'] >= i]['failure_bidding'].copy()
    y[y == i] = 0
    y[y > i] = 1
    
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
    
    model1 = BernoulliNB().fit(X_train.ix[:, '공유자매수신고':'현상변경허가 대상구역'], y_train)
    model2 = GaussianNB().fit(pd.DataFrame(X_train.ix[:, 'log_est_jiga':'area(m2)']), y_train)
    
    prob1 = model1.predict_proba(X_test.ix[:, '공유자매수신고':'현상변경허가 대상구역'])
    prob2 = model2.predict_proba(pd.DataFrame(X_test.ix[:, 'log_est_jiga':'area(m2)']))
    
    y_pred = np.zeros(len(prob1))
    for j in range(len(prob1)):
        y_pred[j] = np.argmax(softmax((prob1 * prob2)[j]))
    y_pred = y_pred.reshape(-1, 1)
    
    print('---------------- ',i,'회 유찰 매물 ----------------')
    print(classification_report(y_test, y_pred))
    print(confusion_matrix(y_test, y_pred))


----------------  0 회 유찰 매물 ----------------
             precision    recall  f1-score   support

          0       0.43      0.54      0.48       576
          1       0.79      0.71      0.75      1417

avg / total       0.69      0.66      0.67      1993

[[ 311  265]
 [ 416 1001]]
----------------  1 회 유찰 매물 ----------------
             precision    recall  f1-score   support

          0       0.40      0.49      0.44       475
          1       0.71      0.63      0.67       927

avg / total       0.60      0.58      0.59      1402

[[232 243]
 [344 583]]
----------------  2 회 유찰 매물 ----------------
             precision    recall  f1-score   support

          0       0.50      0.79      0.61       432
          1       0.61      0.29      0.39       487

avg / total       0.56      0.53      0.50       919

[[343  89]
 [346 141]]
----------------  3 회 유찰 매물 ----------------
             precision    recall  f1-score   support

          0       0.49      0.90      0.63       237
          1       0.59      0.13      0.22       258

avg / total       0.54      0.50      0.41       495

[[213  24]
 [224  34]]
----------------  4 회 유찰 매물 ----------------
             precision    recall  f1-score   support

          0       0.61      0.96      0.74       137
          1       0.72      0.13      0.22        99

avg / total       0.65      0.61      0.52       236

[[132   5]
 [ 86  13]]
----------------  5 회 유찰 매물 ----------------
             precision    recall  f1-score   support

          0       0.57      0.77      0.65        61
          1       0.42      0.22      0.29        46

avg / total       0.50      0.53      0.49       107

[[47 14]
 [36 10]]

3) Ensemble Method (RandomForest, Naive Bayes)


In [8]:
for i in range(6):
    X = pd.concat([
            df.ix[df['failure_bidding'] >= i, '공유자매수신고':'rdtuch_중로한면'],
            df.ix[df['failure_bidding'] >= i, '가축사육제한구역':'현상변경허가 대상구역'],
            df.ix[df['failure_bidding'] >= i, 'log_est_jiga'],
            df.ix[df['failure_bidding'] >= i, 'area(m2)']
        ], axis = 1)


    y = df[df['failure_bidding'] >= i]['failure_bidding'].copy()
    y[y == i] = 0
    y[y > i] = 1
    
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
    
    pipe_rf = Pipeline([
            ('rf', RandomForestClassifier())
        ])

    param_grid = [
        {'rf__n_estimators' : np.arange(100, 150, 10),
        'rf__criterion' : ['gini', 'entropy']}
    ]

    gs = GridSearchCV(estimator = pipe_rf, param_grid = param_grid,
                  scoring = 'recall', cv = 10, n_jobs = -1)
    gs = gs.fit(X_train, y_train)
    
    model0 = RandomForestClassifier(n_estimators = gs.best_params_['rf__n_estimators'],
                               n_jobs = -1,
                               criterion = gs.best_params_['rf__criterion']).fit(X_train, y_train)
    prob0 = model0.predict_proba(X_test)

    model1 = BernoulliNB().fit(X_train.ix[:, '공유자매수신고':'현상변경허가 대상구역'], y_train)
    prob1 = model1.predict_proba(X_test.ix[:, '공유자매수신고':'현상변경허가 대상구역'])

    model2 = GaussianNB().fit(pd.DataFrame(X_train.ix[:, 'log_est_jiga':'area(m2)']), y_train)    
    prob2 = model2.predict_proba(pd.DataFrame(X_test.ix[:, 'log_est_jiga':'area(m2)']))

    y_pred = np.zeros(len(prob1))
    for j in range(len(prob1)):
        y_pred[j] = np.argmax(softmax((prob0 * prob1 * prob2)[j]))
    y_pred = y_pred.reshape(-1, 1)

    print('---------------- ',i,'회 유찰 매물 ----------------')
    print(classification_report(y_test, y_pred))
    print(confusion_matrix(y_test, y_pred))


----------------  0 회 유찰 매물 ----------------
             precision    recall  f1-score   support

          0       0.46      0.44      0.45       588
          1       0.77      0.78      0.78      1405

avg / total       0.68      0.68      0.68      1993

[[ 260  328]
 [ 306 1099]]
----------------  1 회 유찰 매물 ----------------
             precision    recall  f1-score   support

          1       0.49      0.18      0.26       478
          2       0.68      0.90      0.78       924

avg / total       0.61      0.66      0.60      1402

[[ 86 392]
 [ 91 833]]
----------------  2 회 유찰 매물 ----------------
             precision    recall  f1-score   support

          2       0.53      0.48      0.50       423
          3       0.59      0.64      0.61       496

avg / total       0.56      0.56      0.56       919

[[203 220]
 [180 316]]
----------------  3 회 유찰 매물 ----------------
             precision    recall  f1-score   support

          3       0.62      0.62      0.62       272
          4       0.54      0.54      0.54       223

avg / total       0.58      0.58      0.58       495

[[168 104]
 [103 120]]
----------------  4 회 유찰 매물 ----------------
             precision    recall  f1-score   support

          4       0.64      0.63      0.63       147
          5       0.41      0.43      0.42        89

avg / total       0.55      0.55      0.55       236

[[92 55]
 [51 38]]
----------------  5 회 유찰 매물 ----------------
             precision    recall  f1-score   support

          5       0.61      0.61      0.61        66
          6       0.37      0.37      0.37        41

avg / total       0.51      0.51      0.51       107

[[40 26]
 [26 15]]

In [ ]: