http://yamalab.tistory.com/80?
In [4]:
!pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# 1. Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
# PyDrive reference:
# https://googledrive.github.io/PyDrive/docs/build/html/index.html
# 2. Create & upload a file text file.
# 특정 폴더 안으로 파일 삽입
uploaded = drive.CreateFile({'title': 'Sample upload.txt', "parents": [{"kind": "drive#fileLink","id": 'your_drive_id'}]})
uploaded.SetContentString('Sample upload file content')
uploaded.Upload()
print('Uploaded file with ID {}'.format(uploaded.get('id')))
# 3. Load a file by ID and print its contents.
downloaded = drive.CreateFile({'id': uploaded.get('id')})
print('Downloaded content "{}"'.format(downloaded.GetContentString()))
In [5]:
from keras.utils import np_utils
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Activation
# mnist import
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
# 0~1 사이의 값으로 정규화
X_train = X_train.reshape(60000, 784).astype('float32') / 255.0
X_test = X_test.reshape(10000, 784).astype('float32') / 255.0
# 원핫 인코딩
Y_train = np_utils.to_categorical(Y_train)
Y_test = np_utils.to_categorical(Y_test)
model = Sequential()
model.add(Dense(units=64, input_dim=28*28, activation='relu'))
model.add(Dense(units=10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
hist = model.fit(X_train, Y_train, epochs=2, batch_size=32)
loss_and_metrics = model.evaluate(X_test, Y_test, batch_size=32)
print('loss_and_metrics : ' + str(loss_and_metrics))
# VM local root 경로에 모델파일 저장
model.save('mnist_mlp_model.h5')
In [6]:
# 1. Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
# PyDrive reference:
# https://googledrive.github.io/PyDrive/docs/build/html/index.html
# 2. Create & upload a file text file.
# 특정 폴더 안으로 파일 삽입
uploaded = drive.CreateFile({'title': 'mnist_mlp_model.h5', "parents": [{"kind": "drive#fileLink","id": 'your_drive_id'}]})
uploaded.SetContentString('Sample upload file content')
uploaded.SetContentFile('mnist_mlp_model.h5')
uploaded.Upload()
print('Uploaded file with ID {}'.format(uploaded.get('id')))
# 3. Load a file by ID and print its contents.
downloaded = drive.CreateFile({'id': uploaded.get('id')})
위 코드를 실행하면, 앞선 과정에서 저장했던 h5 파일이 드라이브의 내 폴더 안에 저장된다.
In [9]:
#- REST API로 드라이브에 있는 모델파일을 BytesIO로 다운로드
from google.colab import auth
auth.authenticate_user()
from googleapiclient.discovery import build
drive_service = build('drive', 'v3')
import io
from io import BytesIO
from googleapiclient.http import MediaIoBaseDownload
request = drive_service.files().get_media(fileId='11RMJNeeZLgUtuuvmyq3LkLBTnTc4vhHh')
downloaded = io.BytesIO()
downloader = MediaIoBaseDownload(downloaded, request)
done = False
while done is False:
status, done = downloader.next_chunk()
if status:
print("Download %%%d%%." % int(status.progress() * 100))
print("Download Complete!")
downloaded.seek(0)
with open('/tmp/mnist_mlp_model.h5', 'wb') as f:
f.write(downloaded.read())
#REST API를 이용해 위에서 저장했던 파일을 다운받는다. 이 파일의 형태는 buffer의 형태이므로, VM의 tmp 폴더에 저장해준다.
In [10]:
from keras.models import load_model
loaded_model = load_model('/tmp/mnist_mlp_model.h5')
#그리고 tmp 폴더에 저장해둔 파일을 load하면 한 사이클이 끝이 난다.
In [12]:
## predict
from numpy import argmax
xhat_idx = np.random.choice(X_test.shape[0], 5)
xhat = X_test[xhat_idx]
yhat = loaded_model.predict_classes(xhat)
for i in range(5):
print('True : ' + str(argmax(Y_test[xhat_idx[i]])) + ', Predict : ' + str(yhat[i]))
In [0]:
In [0]: