Here is a walkthrough of how to read metadata and data from a level1b file. I've used the the converter I downloaded from this link to produce the Aqua granule netcdf file in this download directory: http://clouds.eos.ubc.ca/~phil/Downloads/a301/MYD021KM.A2005188.0405.005.2009232180906.nc (right click to save to your local drive)
I'm going to use http://unidata.github.io/netcdf4-python/ and two new modules (modismeta.py and netcdflib.py) to read the granule
In [0]:
from __future__ import print_function
from __future__ import division
import os,site
currdir=os.getcwd()
head,tail=os.path.split(currdir)
libdir=os.path.join(head,'lib')
site.addsitedir(libdir)
from modismeta import parseMeta
from netcdflib import ncdump
import glob
from netCDF4 import Dataset
the glob function finds a file using a wildcard to save typing (google: python glob wildcard)
In [0]:
nc_filename=glob.glob('*.nc')
print("found {}".format(nc_filename))
In [0]:
nc_file=Dataset(nc_filename[0])
netcdf files have attributes
In [0]:
print(nc_file.ncattrs())
In [0]:
print(nc_file.Earth_Sun_Distance)
In [0]:
print(nc_file.HDFEOSVersion)
netcdf files have variables -- stored in a dictionary
In [0]:
#print(nc_file.variables.keys())
In [0]:
the_long=nc_file.variables['longitude']
the_lat=nc_file.variables['latitude']
the following cell shows how to write code out to a file
%%file snippet.py size=100 lat_data=the_lat[:10,:10] long_data=the_long[:10,:10]
the following cell shows how to read code into a cell
%load snippet.py
In [0]:
size=10
lat_data=the_lat[:size,:size]
lon_data=the_long[:size,:size]
In [0]:
%matplotlib inline
In [0]:
fig=plt.figure(1,figsize=(9,9))
plt.clf()
ax1=fig.add_subplot(1,1,1)
the_points=ax1.plot(lon_data,lat_data,'r+')
ylim=(22.65,22.9)
xlim=(149.,149.8)
ax1.set_xlim(xlim)
ax1.set_ylim(ylim)
In [0]:
lon_bins=np.linspace(xlim[0],xlim[1],3)
lat_bins=np.linspace(ylim[0],ylim[1],3)
lon_indices=np.digitize(lon_data.flat,lon_bins)
lat_indices=np.digitize(lat_data.flat,lat_bins)
In [0]:
lat_indices
In [0]:
lon_indices
In [0]:
lat_array=np.zeros([len(lat_bins),len(lon_bins)],dtype=np.float)
bin_count=np.zeros([len(lat_bins),len(lon_bins)],dtype=np.int)
lon_array=np.zeros([len(lat_bins),len(lon_bins)],dtype=np.float)
for point_num in range(len(lat_indices)):
bin_row=lat_indices[point_num]
bin_col=lon_indices[point_num]
lat_array[bin_row,bin_col]=lat_array[bin_row,bin_col] + lat_data.flat[point_num]
lon_array[bin_row,bin_col]+=lon_data.flat[point_num]
bin_count[bin_row,bin_col]+=1
for i in range(lon_array.shape[0]):
for j in range(lon_array.shape[1]):
if bin_count[i,j] > 0:
lat_array[i,j]=lat_array[i,j]/bin_count[i,j]
lon_array[i,j]=lon_array[i,j]/bin_count[i,j]
else:
lat_array[i,j]=np.nan
lon_array[i,j]=np.nan
In [0]:
longwave=nc_file.variables['EV_1KM_Emissive']
longwave.ncattrs()
In [0]:
band_names=longwave.band_names
In [0]:
band_names=band_names.split(',')
index31=band_names.index('31')
In [0]:
index31
In [0]:
chan31=longwave[index31,:10,:10]
chan31
#chan31 = scale31 * (chan31 - offset31)
In [0]: