In [1]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
from collections import OrderedDict
from tensorboardX import SummaryWriter
from torchvision import transforms
import torchvision
import torch.optim as optim
import torch.nn.functional as F
import torch.nn as nn
import torch
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from torch.nn import init
from sklearn.metrics import roc_auc_score, accuracy_score
import numpy as np
import pandas as pd
import os
print(os.listdir("../input"))
from sklearn.metrics import roc_auc_score
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
# load data
In [2]:
print('loading train')
train_df = pd.read_csv('../input/train.csv')
print('loading test')
test_df = pd.read_csv('../input/test.csv')
print(train_df)
print(test_df)
In [3]:
train_drop_label = train_df.drop(['target', 'ID_code'], axis=1) # Features
test_drop_label = test_df.drop(['ID_code'], axis=1)
all_df = pd.concat([train_drop_label, test_drop_label], axis=0)
train_len = train_df.count()[0]
test_len= test_df.count()[0]
print(train_len, test_len)
scaler = preprocessing.StandardScaler()
all_scaled = pd.DataFrame(scaler.fit_transform(all_df), columns=all_df.columns)
train_x,valid_x,train_y,valid_y= train_test_split(all_scaled[:train_len], train_df.target, test_size=.01, shuffle=True)
test_x = all_scaled[train_len:].values
In [4]:
logist_reg = LogisticRegression(solver="sag", max_iter=200)
logist_reg.fit(train_x,train_y)
Out[4]:
In [5]:
y_pred = logist_reg.predict(valid_x)
# y_pred = [1 - y_pred[:, 0]]
cnf_matrix = metrics.confusion_matrix(valid_y, y_pred)
print("Accuracy:",metrics.accuracy_score(valid_y, y_pred))
print("Precision:",metrics.precision_score(valid_y, y_pred))
print("Recall:",metrics.recall_score(valid_y, y_pred))
y_pred_proba = logist_reg.predict_proba(valid_x)[::,1]
fpr, tpr, _ = metrics.roc_curve(valid_y, y_pred_proba)
auc = metrics.roc_auc_score(valid_y, y_pred_proba)
plt.plot(fpr,tpr,label="data 1, auc="+str(auc))
plt.legend(loc=4)
plt.show()
In [6]:
y_pred = logist_reg.predict(train_x)
cnf_matrix = metrics.confusion_matrix(train_y, y_pred)
print("Accuracy:",metrics.accuracy_score(train_y, y_pred))
print("Precision:",metrics.precision_score(train_y, y_pred))
print("Recall:",metrics.recall_score(train_y, y_pred))
y_pred_proba = logist_reg.predict_proba(train_x)[::,1]
fpr, tpr, _ = metrics.roc_curve(train_y, y_pred_proba)
auc = metrics.roc_auc_score(train_y, y_pred_proba)
plt.plot(fpr,tpr,label="data 1, auc="+str(auc))
plt.legend(loc=4)
plt.show()
In [7]:
y_pred_test = logist_reg.predict_proba(test_x)
y_pred_test = 1 - y_pred_test[:, 0]
print(y_pred_test)
dataframe = pd.DataFrame({'ID_code': test_df.ID_code, 'target': y_pred_test})
dataframe.to_csv("result.csv", index=False, sep=',')
print(dataframe.count())
print(dataframe)
In [8]: