In [0]:
# meminfo
!cat /proc/meminfo
# cpu information
!cat /proc/cpuinfo
# GPU information
from tensorflow.python.client import device_lib
device_lib.list_local_devices()
# memory footprint support libraries/code
!ln -sf /opt/bin/nvidia-smi /usr/bin/nvidia-smi
!pip install gputil
!pip install psutil
!pip install humanize
import psutil
import humanize
import os
import GPUtil as GPU
GPUs = GPU.getGPUs()
# XXX: only one GPU on Colab and isn’t guaranteed
gpu = GPUs[0]
def printm():
process = psutil.Process(os.getpid())
print("Gen RAM Free: " + humanize.naturalsize( psutil.virtual_memory().available ), " | Proc size: " + humanize.naturalsize( process.memory_info().rss))
print("GPU RAM Free: {0:.0f}MB | Used: {1:.0f}MB | Util {2:3.0f}% | Total {3:.0f}MB".format(gpu.memoryFree, gpu.memoryUsed, gpu.memoryUtil*100, gpu.memoryTotal))
printm()
In [1]:
# Run this cell and select the kaggle.json file downloaded
# from the Kaggle account settings page.
from google.colab import files
files.upload()
Out[1]:
In [2]:
# Let's make sure the kaggle.json file is present
!ls -lha kaggle.json
In [0]:
# Next, install kaggle API client
!pip install -q kaggle
In [4]:
# The Kaggle API client expects this file to be in ~/.kaggle,
# so move it there.
!mkdir -p ~./kaggle
!cp kaggle.json ~/.kaggle/
# This permissions change avoids a warning on Kaggle tool startup.
!chmod 600 ~/.kaggle/kaggle.json
In [5]:
# List available datasets
!kaggle datasets list
In [0]:
# Kaggle dataset download
# copy from Kaggle dataset website
!kaggle competitions download -c humpback-whale-identification
## ../input/train , test
#!mkdir train
#!unzip ./train.zip -d train
#!mkdir test
#!unzip ./test.zip -d test
In [0]:
# Load the Drive helper and mount
from google.colab import drive
# This will prompt for authorization
drive.mount('/content/drive')
# After executing the cell above,
# Drive files will be present in "/content/drive/My Drive"
!ls "/content/drive/My Drive"
In [0]:
import h5py
##Prep:
import numpy as np
import h5py
data_to_write = np.random.random(size=(100,20)) # or some such
## Write:
with h5py.File('name-of-file.h5', 'w') as hf:
hf.create_dataset("name-of-dataset", data=data_to_write)
## Read:
with h5py.File('name-of-file.h5', 'r') as hf:
data = hf['name-of-dataset'][:]
In [0]:
## EXAMPLE
#Read: HDF5 file with h5py
import h5py
with h5py.File('whale_train_image.h5', 'r') as hf:
train_X = hf['whale_X'][:]
save neural network
model.save('model_whale.h5')
model.save('model.h5')
model_file = drive.CreateFile({'title' : 'model.h5'})
model_file.SetContentFile('model.h5')
model_file.Upload()
drive.CreateFile({'id': model_file.get('id')})
Load neural network
# Load neural network
network = load_model("model.h5")
!pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
create on Colab directory
model.save('model.h5')
model_file = drive.CreateFile({'title' : 'model.h5'})
model_file.SetContentFile('model.h5')
model_file.Upload()
download to google drive
drive.CreateFile({'id': model_file.get('id')})
Same for weights
model.save_weights('model_weights.h5')
weights_file = drive.CreateFile({'title' : 'model_weights.h5'})
weights_file.SetContentFile('model_weights.h5')
weights_file.Upload()
drive.CreateFile({'id': weights_file.get('id')})
On next run, try reloading the weights
use (get shareable link) to get file id
last_weight_file = drive.CreateFile({'id': '1sj...'}) last_weight_file.GetContentFile('last_weights.mat') model.load_weights('last_weights.mat')
EXAMPLE
reload
train_image_file = drive.CreateFile({'id': '1w8Q38eCYuxT5x55eGfUZxcD4401PbKV1'})
train_image_file.GetContentFile('whale_train_image.h5')
Saving memory: after loading file and retrieve variable, removing file
%reset_selective -f file # regex: file이 들어간 파일을 다 지움