Using the Earth Engine to build a MODIS fractional cover calibration

This is based on Scarth etal endmember extraction method. The basic process is to:

  1. Assemble the field site data
  2. Extract the reflectannce data from the closest pixel in space and time
  3. Calculate the satellite viewed cover amount from the field data
  4. Compute the best fitting image endmembers based on the field and image data sets
  5. Use these endmembers to unmix the Earth Engine imagery

In [ ]:
# Imports
from IPython.core.display import Image
import os, numpy, plotly, time, datetime
from scipy.linalg import pinv2, svd
from scipy.optimize import nnls
import ee


# Initialise Plotly first time by running:
# python -c "import plotly; plotly.tools.set_credentials_file(username='xxxxxx', api_key='yyyyy')"

# Initialize the Earth Engine
ee.Initialize()

# Connect to the field site fusion table.
fieldSites = ee.FeatureCollection('ft:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
# Have a look at the data structure using:
# print fieldSites.getInfo()['features'][1]

# Import MODIS Imagery
mcd43a4 = ee.ImageCollection('MODIS/MCD43A4')

# Get the latest image
latestImage = mcd43a4.sort('system:time_start', False ).limit(1).median()
# Have a look at the data structure using: print(latestImage.getInfo())
bufferedSites = fieldSites.map(lambda site:site.buffer(10000))
siteImage = latestImage.select('Nadir_Reflectance_Band6', 'Nadir_Reflectance_Band2', 'Nadir_Reflectance_Band1').paint(bufferedSites,10000)

#Get a bounding region for the thumbnail extract
coords = fieldSites.geometry().convexHull().buffer(200000).getInfo()['coordinates']
# Produce an image thumbnail
imageURL = siteImage.getThumbUrl({'region': coords,'format': 'png','min':100,'max': 5000,'dimensions': '400'})
# Display the image    
Image(url=imageURL)

Extract the MODIS reflectance data from the Earth Engine by selecting the points nearest in space and time

Here we have to consider:

  • The presence of nans
  • The possibility of a few dates close to the field data
  • The adjustment of the field data to model what the satellite views

In [ ]:
# Function to extract point data from imagery close in time
def pixelDataAtPoint(imageCollection,javascriptTimestamp,coordinates):
    # Convert to datetime object
    nominalDate = datetime.datetime.fromtimestamp(javascriptTimestamp/1000)
    # Buffer by 10 days
    sampleDaysRange = datetime.timedelta(days=10)
    # Start date 
    startDate = nominalDate - sampleDaysRange
    # End date
    endDate = nominalDate + sampleDaysRange
    # Select the relevant images
    extractCollection = ee.ImageCollection(imageCollection).filterDate(startDate, endDate);
    # Get the pixel value at a scale of 100m
    pointExtracted = extractCollection.getRegion(ee.Geometry.Point(coordinates[0],coordinates[1]),100)
    # create an image from the collection
    return pointExtracted.getInfo()

# Function that takes a getRegion list and computes the median of the reflectance data
def pixelListToMean(pointData):
    # List 0 is the band names
    #print pointData[0]
    pixelList = pointData[1:]
    # Convert to float
    pixelArray = numpy.ndarray.astype(numpy.array(pointData[1:])[:,4:],dtype=float)
    # Return the median
    try:
        arrayMedian = numpy.nanmedian(pixelArray,axis=0)
    except:
        arrayMedian = numpy.zeros_like(pixelArray[0])
    
    return arrayMedian


# Function to compute the fractional covers as viewed by the satellite for the site
# Required a site properties object
def fractionalCoverSatView(siteProperties):
    nTotal = siteProperties['num_points']
    # Canopy Layer
    nCanopyBranch = siteProperties['over_b'] * nTotal / 100.0
    nCanopyDead = siteProperties['over_d'] * nTotal / 100.0
    nCanopyGreen = siteProperties['over_g'] * nTotal / 100.0
    # Midstory Layer
    nMidBranch = siteProperties['mid_b'] * nTotal / 100.0
    nMidGreen = siteProperties['mid_g'] * nTotal / 100.0
    nMidDead = siteProperties['mid_d'] * nTotal / 100.0
    # Ground Layer
    nGroundDeadLitter = (siteProperties['dead'] + siteProperties['litter']) * nTotal / 100.0
    nGroundCrustDistRock = (siteProperties['crust'] + siteProperties['dist'] + siteProperties['rock']) * nTotal / 100.0
    nGroundGreen = siteProperties['green'] * nTotal / 100.0
    nGroundCrypto = siteProperties['crypto'] * nTotal / 100.0
    # Work out the canopy elements as viewed from above
    canopyFoliageProjectiveCover = nCanopyGreen / (nTotal - nCanopyBranch)
    canopyDeadProjectiveCover = nCanopyDead / (nTotal - nCanopyBranch)
    canopyBranchProjectiveCover = nCanopyBranch / nTotal * (1.0 - canopyFoliageProjectiveCover - canopyDeadProjectiveCover)
    canopyPlantProjectiveCover = (nCanopyGreen+nCanopyDead + nCanopyBranch) / nTotal
    # Work out the midstorey fractions
    midFoliageProjectiveCover = nMidGreen / nTotal
    midDeadProjectiveCover = nMidDead / nTotal
    midBranchProjectiveCover = nMidBranch / nTotal
    midPlantProjectiveCover = (nMidGreen + nMidDead + nMidBranch) / nTotal
    # Work out the midstorey  elements as viewed by the satellite using a gap fraction method
    satMidFoliageProjectiveCover = midFoliageProjectiveCover * (1 - canopyPlantProjectiveCover)
    satMidDeadProjectiveCover = midDeadProjectiveCover * (1 - canopyPlantProjectiveCover)
    satMidBranchProjectiveCover = midBranchProjectiveCover * (1 - canopyPlantProjectiveCover)
    satMidPlantProjectiveCover = midPlantProjectiveCover * (1 - canopyPlantProjectiveCover)
    # Work out the groundcover fractions as seen by the observer
    groundPVCover = nGroundGreen / nTotal
    groundNPVCover = nGroundDeadLitter / nTotal
    groundBareCover = nGroundCrustDistRock / nTotal
    groundCryptoCover = nGroundCrypto / nTotal
    groundTotalCover = (nGroundGreen + nGroundDeadLitter + nGroundCrustDistRock) / nTotal
    # Work out the ground cover propoetions as seen by the satellite
    satGroundPVCover = groundPVCover * (1 - midPlantProjectiveCover) * (1 - canopyPlantProjectiveCover)
    satGroundNPVCover = groundNPVCover * ( 1- midPlantProjectiveCover) * (1 - canopyPlantProjectiveCover)
    satGroundBareCover = groundBareCover * (1 - midPlantProjectiveCover) * (1 - canopyPlantProjectiveCover)
    satGroundCryptoCover = groundCryptoCover * (1 - midPlantProjectiveCover) * (1 - canopyPlantProjectiveCover)
    satGroundTotalCover = groundTotalCover * (1 - midPlantProjectiveCover) * (1 - canopyPlantProjectiveCover)
    # Final total covers calculated using gap probabilities through all layers
    totalPVCover = canopyFoliageProjectiveCover + satMidFoliageProjectiveCover + satGroundPVCover
    totalNPVCover = canopyDeadProjectiveCover + canopyBranchProjectiveCover + satMidDeadProjectiveCover + satMidBranchProjectiveCover + satGroundNPVCover
    totalBareCover = satGroundBareCover
    totalCryptoCover = satGroundCryptoCover
    
    return numpy.array([totalPVCover,totalNPVCover,totalBareCover,totalCryptoCover])
    

   
# Create an empty array
fractionalCoverArray = numpy.array([])
modisReflectanceArray = numpy.array([])

# Extract the data for all the sites
for site in fieldSites.getInfo()['features']:
    #print site['properties']['site']
    
    
    # Extract the reflectance data from the nearest images
    coordinates = site['geometry']['coordinates']
    obsTime = site['properties']['obs_time']
    # Check if the data is within the MODIS era
    if datetime.datetime.fromtimestamp(obsTime/1000)>datetime.datetime(2000, 1, 1, 0, 0):
        
        try:
            # Wait for a bit to avoid hitting the EE quota limits
            time.sleep(1.0)
            sitePixelList =  pixelDataAtPoint(mcd43a4,obsTime,coordinates)
        except:
            # Try again after a longer break
            time.sleep(60.0)
            sitePixelList =  pixelDataAtPoint(mcd43a4,obsTime,coordinates)

        meanReflectance = pixelListToMean(sitePixelList)
        # Data Array contains the 7 reflectance bands
        modisReflectanceArray = numpy.append(modisReflectanceArray,meanReflectance)


        # Calculate the fractional cover as seen by the satellite
        siteCover = fractionalCoverSatView(site['properties'])
        # Data Array contains the 4 fractional cover values
        fractionalCoverArray = numpy.append(fractionalCoverArray,siteCover)

Clean and save the extracted data for further analysis


In [ ]:
# Reshape the arrays based on the 7 reflectance or 4 cover parameters
modisReflectanceArray = numpy.reshape(modisReflectanceArray,(-1,7))
fractionalCoverArray = numpy.reshape(fractionalCoverArray,(-1,4))
# Remove NANs
isGoodIDX = numpy.where(~numpy.isnan(xdata))
modisReflectanceArray = modisReflectanceArray[isGoodIDX]
fractionalCoverArray = fractionalCoverArray[isGoodIDX]
# And save the extracted data for reuse
numpy.savetxt('modisReflectanceArray.txt', modisReflectanceArray)
numpy.savetxt('fractionalCoverArray.txt', fractionalCoverArray)

Explore the data

Now we've extracted from the earth engine, we can undertake some basic data analysis


In [ ]:
# Load the data in from this point to continue
modisReflectanceArray = numpy.loadtxt('modisReflectanceArray.txt')
fractionalCoverArray = numpy.loadtxt('fractionalCoverArray.txt')

# Remove any nans
goodData = ~numpy.isnan(modisReflectanceArray).any(axis=1)
modisReflectanceArray=modisReflectanceArray[goodData]
fractionalCoverArray=fractionalCoverArray[goodData]

# Remove the sites which don't add up to 100% for one reason or another
goodSites=fractionalCoverArray[:,0:3].sum(axis=1) > 0.95
totalPVCover=fractionalCoverArray[goodSites,0]
totalNPVCover=fractionalCoverArray[goodSites,1]
totalBareCover=fractionalCoverArray[goodSites,2]
totalCryptoCover=fractionalCoverArray[goodSites,3]

# Put Cryptogram in NPV for now. Version 2 will put the crypto into the correct class based on reflectance
totalNPVCover=totalNPVCover+totalCryptoCover
satelliteReflectance=modisReflectanceArray[goodSites]
satelliteReflectance.shape

The traditional red band vs. cover relationship

As used in arid environments for some time. Red reflectance sorrelates sort of with cover - works best locally


In [ ]:
from scipy import stats
# Fit some data
xdata = modisReflectanceArray[:,0] # Red band
ydata = fractionalCoverArray[:,2] # Bare ground

# Polynomial Fitting
slope, intercept, r_value, p_value, std_err = stats.linregress(xdata,ydata)
print "r squared = %f" % r_value**2

fitPolyFun = numpy.poly1d([slope,intercept])
fitData = fitPolyFun(xdata)


# Create a site trace
traceSites = plotly.graph_objs.Scatter(
    x = xdata,
    y = ydata,
    mode = 'markers',
    name = 'Sites',
    marker=dict(color='red',symbol='circle',opacity=0.5)
)

# Create a fit trace
traceFit = plotly.graph_objs.Scatter(
    x = xdata,
    y = fitData,
    mode = 'lines',
    name = 'Fitted Line',
    line = dict(color = ('rgb(22, 96, 167)'),width = 4)
)

data = [traceSites,traceFit]

layout = plotly.graph_objs.Layout(
    title='Modis Red Band vs Bare Ground',
    xaxis=dict(
        title='MODIS Reflectance',
        ticklen=5,
        zeroline=False,
        gridwidth=2,
    ),
    yaxis=dict(
        title='Bare Ground',
        ticklen=5,
        gridwidth=2,
    ),
)

plotFig = plotly.graph_objs.Figure(data=data, layout=layout)

# Plot and embed in ipython notebook!
plotly.plotly.iplot(plotFig, filename='basic-scatter')

Check out the range of cover values

Using a histogram of bare ground over all the sites


In [ ]:
data = [plotly.graph_objs.Histogram(x=fractionalCoverArray.sum(axis=1))]
#data = [plotly.graph_objs.Histogram(x=fractionalCoverArray[:,2])]
plotly.plotly.iplot(data, filename='fractionalSums-histogram')

Add interactive terms to model the nonlinearities in the cover to reflectance relationship

This dramatically improves unmixing performance without having to resort to multiple endmember models But it requires good field data to guide the process We make sure we don't overfit in the next step by trimming the singular values we use in the inversion


In [ ]:
def computeTransform(band):
    band2=band[3]
    band3=band[0]
    band4=band[1]
    band5=band[4]
    band6=band[5]
    band7=band[6]
    return numpy.array([band2*band3, band2*band4, band2*band5, band2*band6, band2*band7,\
       band2*numpy.log(band2), band2*numpy.log(band3), band2*numpy.log(band4),\
       band2*numpy.log(band5), band2*numpy.log(band6), band2*numpy.log(band7), band3*band4, band3*band5,\
       band3*band6, band3*band7, band3*numpy.log(band2), band3*numpy.log(band3), band3*numpy.log(band4),\
       band3*numpy.log(band5), band3*numpy.log(band6), band3*numpy.log(band7), band4*band5, band4*band6, band4*band7,\
       band4*numpy.log(band2), band4*numpy.log(band3), band4*numpy.log(band4),\
       band4*numpy.log(band5), band4*numpy.log(band6), band4*numpy.log(band7), band5*band6, band5*band7, band5*numpy.log(band2),\
       band5*numpy.log(band3), band5*numpy.log(band4), band5*numpy.log(band5),\
       band5*numpy.log(band6), band5*numpy.log(band7), band6*band7, \
       band6*numpy.log(band2), band6*numpy.log(band3),\
       band6*numpy.log(band4), band6*numpy.log(band5), band6*numpy.log(band6), band6*numpy.log(band7),\
       band7*numpy.log(band2), band7*numpy.log(band3),\
       band7*numpy.log(band4), band7*numpy.log(band5), band7*numpy.log(band6), band7*numpy.log(band7),\
       numpy.log(band2)*numpy.log(band3), numpy.log(band2)*numpy.log(band4), numpy.log(band2)*numpy.log(band5),\
       numpy.log(band2)*numpy.log(band6), numpy.log(band2)*numpy.log(band7), numpy.log(band3)*numpy.log(band4), numpy.log(band3)*numpy.log(band5),\
       numpy.log(band3)*numpy.log(band6), numpy.log(band3)*numpy.log(band7), numpy.log(band4)*numpy.log(band5), numpy.log(band4)*numpy.log(band6),\
       numpy.log(band5)*numpy.log(band6), numpy.log(band5)*numpy.log(band7), numpy.log(band6)*numpy.log(band7), band2, band3, band4, band5, band6, band7,\
       numpy.log(band2), numpy.log(band3), numpy.log(band4), numpy.log(band5), numpy.log(band6), numpy.log(band7)])
                        


# Do the transformation
satelliteReflectanceTransformed=numpy.array(map(computeTransform,(satelliteReflectance+1)/10000))


# Create a  plot of the Singular Values
U,s,Vh = svd(satelliteReflectanceTransformed.transpose())

trace0 = plotly.graph_objs.Scatter(
    x = numpy.linspace(0, s.size-1),
    y = s,
    mode = 'lines',
    name = 'Singular-Values',
    line = dict(color = ('rgb(22, 96, 167)'),width = 4))

# Plot and embed in ipython notebook!
data = [trace0]
plotly.plotly.iplot(data, filename='svd-plot')

Compute the optimal dimensionality for inversion by cross validation

This takes some time - we select the optimal subspace using an exhaustive search with 2fold cross validation


In [ ]:
#Optimal number of SVs
sum2oneWeight=numpy.array([0.3])
crossvalidationAmount=10

bareRMSerror=[]
U,s,Vh = svd(satelliteReflectanceTransformed.transpose())
toleranceSVs=s/s.max()

siteDataArray=numpy.array([totalPVCover,totalNPVCover,totalBareCover])

numberSites=satelliteReflectanceTransformed[:,0].size

    
for i in range(toleranceSVs.size -1):
    bareRMSerrorArray=[]
    for loop in range(1000):
        
        shuffleIndex=numpy.random.permutation(numberSites)
        calibrationSites=shuffleIndex[0:numpy.floor(numberSites/crossvalidationAmount)]
        validationSites=shuffleIndex[numpy.floor(numberSites/crossvalidationAmount)+1:]
    
    
        endmembersWeighted=pinv2(numpy.dot(siteDataArray[:,calibrationSites],\
        pinv2(satelliteReflectanceTransformed[calibrationSites].transpose(),\
        rcond=toleranceSVs[i])))
    
        endmembersWeightedSum=numpy.append(endmembersWeighted,numpy.array([sum2oneWeight.repeat(endmembersWeighted[0].size)]),axis=0)
        
        def spectralUnmixing(spectra):
            return nnls(endmembersWeightedSum,numpy.append(spectra,sum2oneWeight))[0]
    
        retrievedCoverFractions = numpy.apply_along_axis(spectralUnmixing,1,satelliteReflectanceTransformed[validationSites])
        
        rmsError=numpy.sqrt(((siteDataArray[:,validationSites].transpose()-retrievedCoverFractions)**2).mean(axis=0))


        bareRMSerrorArray=numpy.append(bareRMSerrorArray,rmsError[2])
        
    bareRMSerror=numpy.append(bareRMSerror,bareRMSerrorArray.mean())
    
rcond =  toleranceSVs[bareRMSerror.argmin()]
print "rcond = %f " % rcond

# Create a Crossvalidation plot
trace0 = plotly.graph_objs.Scatter(
    x = numpy.linspace(0, bareRMSerror.size -1),
    y = numpy.log(bareRMSerror),
    mode = 'lines',
    name = 'Fitted Line',
    line = dict(color = ('rgb(22, 167, 96)'),width = 4))

# Plot and embed in ipython notebook!
data = [trace0]
plotly.plotly.iplot(data, filename='crossval-plot')

Compute the optimal sum to one coefficient for inversion

We select the optimal sum to one value that minimised the RMSE of the unmixing


In [ ]:
# Best Sum To One
rcond = 0.0001
bareRMSerror=[]
for i in range(100):
    sum2oneWeight=numpy.array(i/200.0)
    endmembersWeightedSum=numpy.append(endmembersWeighted,numpy.array([sum2oneWeight.repeat(endmembersWeighted[0].size)]),axis=0)
    
    
    def spectralUnmixing(spectra):
        return nnls(endmembersWeightedSum,numpy.append(spectra,sum2oneWeight))[0]
    
    retrievedCoverFractions = numpy.apply_along_axis(spectralUnmixing,1,satelliteReflectanceTransformed)
    rmsError=numpy.sqrt(((numpy.array([totalPVCover,totalNPVCover,totalBareCover]).transpose()-retrievedCoverFractions)**2).mean(axis=0))
    bareRMSerror=numpy.append(bareRMSerror,rmsError.mean())


# Create a Crossvalidation plot
trace0 = plotly.graph_objs.Scatter(
    x = numpy.arange(100)/50.0,
    y = bareRMSerror,
    mode = 'lines',
    name = 'Fitted Line',
    line = dict(color = ('rgb(22, 167, 96)'),width = 4))


# Plot and embed in ipython notebook!
data = [trace0]
layout = plotly.graph_objs.Layout(
    title='RMSE vs Sum to 1 Unmixing Constraint',
    autosize=False,
    width=800,
    height=800,
    xaxis=dict(
        title='Sum to 1 weight',
        ticklen=5,
        zeroline=False,
        gridwidth=2,
    ),
    yaxis=dict(
        title='RMSE (%)',
        ticklen=5,
        gridwidth=2,
    ),
)


# Plot and embed in ipython notebook!
plotFig = plotly.graph_objs.Figure(data=data, layout=layout)
plotly.plotly.iplot(plotFig, filename='sum2one-plot')

Compute the final Unmixing RMSE and make a plot of the values

Note there is still a fair bit of "noise" but a reasonable relationshp given the number of field sites and the scale mismatch between 500 x 500 mMODIS and the 100m x 100m field sites


In [ ]:
#Optimal Endmember Test

sum2oneWeight=numpy.array([0.25])
rcond = 0.0001

endmembersWeighted=pinv2(numpy.dot(numpy.array([totalPVCover,totalNPVCover,totalBareCover]),pinv2(satelliteReflectanceTransformed.transpose(),rcond=rcond)))

endmembersWeightedSum=numpy.append(endmembersWeighted,numpy.array([sum2oneWeight.repeat(endmembersWeighted[0].size)]),axis=0)

def spectralUnmixing(spectra):
    return nnls(endmembersWeightedSum,numpy.append(spectra,sum2oneWeight))[0]

retrievedCoverFractions = numpy.apply_along_axis(spectralUnmixing,1,satelliteReflectanceTransformed)

rmsError=numpy.sqrt(((numpy.array([totalPVCover,totalNPVCover,totalBareCover]).transpose()-retrievedCoverFractions)**2).mean(axis=0))
print rmsError



# Create a site traces
pvTrace = plotly.graph_objs.Scatter(
    x = retrievedCoverFractions[:,0],
    y = totalPVCover,
    mode = 'markers',
    name = 'PV Cover',
    marker=dict(color='green',symbol='circle',opacity=0.5)
)
npvTrace = plotly.graph_objs.Scatter(
    x = retrievedCoverFractions[:,1],
    y = totalNPVCover,
    mode = 'markers',
    name = 'NPV Cover',
    marker=dict(color='blue',symbol='circle',opacity=0.5)
)
bareTrace = plotly.graph_objs.Scatter(
    x = retrievedCoverFractions[:,2],
    y = totalBareCover,
    mode = 'markers',
    name = 'Bare Cover',
    marker=dict(color='red',symbol='circle',opacity=0.5)
)


# Plot and embed in ipython notebook!
data = [pvTrace,npvTrace,bareTrace]
layout = plotly.graph_objs.Layout(
    title='MPredicted vs Field Fractional Cover',
    autosize=False,
    width=800,
    height=800,
    xaxis=dict(
        title='MODIS Fractional Cover',
        ticklen=5,
        zeroline=False,
        gridwidth=2,
    ),
    yaxis=dict(
        title='Field Cover',
        ticklen=5,
        gridwidth=2,
    ),
)


# Plot and embed in ipython notebook!
plotFig = plotly.graph_objs.Figure(data=data, layout=layout)
plotly.plotly.iplot(plotFig, filename='predict-plot')

Find and adjust outlying field sites

This is a little helper script that finds the largest outlier in green or non-green with a matching outlier with opposite sign, and adjusts the pint slightly. This is to account for observer bias that has been shown by Trevithick etal 2013 to affect green/non-green much more than bare. We run this twice, and select the number of itertations that minimised the unmixed bare RMSE


In [ ]:
sum2oneWeight=numpy.array([0.25])
rcond = 0.001

# Test Inconsistent Field Data
totalPVCoverEstimate = totalPVCover * 1.0
totalNPVCoverEstimate =totalNPVCover * 1.0

bareRMSerror=[]
meanRMSerror=[]
for i in range(189):

    
    endmembersWeighted=pinv2(numpy.dot(numpy.array([totalPVCoverEstimate,totalNPVCoverEstimate,totalBareCover]),pinv2(satelliteReflectanceTransformed.transpose(),rcond=rcond)))
    endmembersWeightedSum=numpy.append(endmembersWeighted,numpy.array([sum2oneWeight.repeat(endmembersWeighted[0].size)]),axis=0)
    
        
    def spectralUnmixing(spectra):
        return nnls(endmembersWeightedSum,numpy.append(spectra,sum2oneWeight))[0]
    
    retrievedCoverFractions = numpy.apply_along_axis(spectralUnmixing,1,satelliteReflectanceTransformed)
    
    absError=(numpy.array([totalPVCoverEstimate,totalNPVCoverEstimate,totalBareCover]).transpose()-retrievedCoverFractions)
        
    rmsError=numpy.sqrt(((numpy.array([totalPVCoverEstimate,totalNPVCoverEstimate,totalBareCover]).transpose()-retrievedCoverFractions)**2).mean(axis=0))
        
    bareRMSerror=numpy.append(bareRMSerror,rmsError[2])
    meanRMSerror=numpy.append(meanRMSerror,rmsError.mean())

    
    pvNpvError=numpy.abs(absError[:,0]-absError[:,1])
    adjustSite=numpy.argmax(pvNpvError)
    
    if absError[adjustSite,0]>absError[adjustSite,1]:
        totalPVCoverEstimate[adjustSite]=totalPVCoverEstimate[adjustSite]*0.9
        totalNPVCoverEstimate[adjustSite]=1.0-(totalPVCoverEstimate[adjustSite]+totalBareCover[adjustSite])
    else:
        totalNPVCoverEstimate[adjustSite]=totalNPVCoverEstimate[adjustSite]*0.9
        totalPVCoverEstimate[adjustSite]=1.0-(totalNPVCoverEstimate[adjustSite]+totalBareCover[adjustSite])


print "Optimal Adjustment is after %s iterations" % numpy.argmin(bareRMSerror)


# Create a Crossvalidation plot
trace0 = plotly.graph_objs.Scatter(
    x = numpy.linspace(0, bareRMSerror.size - 1),
    y = bareRMSerror,
    mode = 'lines',
    name = 'Bare RMSE',
    line = dict(color = ('rgb(22, 167, 96)'),width = 4))

# Create a Crossvalidation plot
trace1 = plotly.graph_objs.Scatter(
    x = numpy.linspace(0, meanRMSerror.size - 1),
    y = meanRMSerror,
    mode = 'lines',
    name = 'Mean RMSE',
    line = dict(color = ('rgb(167, 22, 96)'),width = 4))

# Plot and embed in ipython notebook!
data = [trace0,trace1]
plotly.plotly.iplot(data, filename='dodgy-Sites')

Compute the final Unmixing RMSE and make a plot of the values

This looks a little better after we adjust some of the (possibly) field misclassisied green/non-green points


In [ ]:
#Optimal Endmember Test


rmsError=numpy.sqrt(((numpy.array([totalPVCoverEstimate,totalNPVCoverEstimate,totalBareCover]).transpose()-retrievedCoverFractions)**2).mean(axis=0))
print rmsError



# Create a site traces
pvTrace = plotly.graph_objs.Scatter(
    x = retrievedCoverFractions[:,0],
    y = totalPVCoverEstimate,
    mode = 'markers',
    name = 'PV Cover',
    marker=dict(color='green',symbol='circle',opacity=0.75, size=3)
)
npvTrace = plotly.graph_objs.Scatter(
    x = retrievedCoverFractions[:,1],
    y = totalNPVCoverEstimate,
    mode = 'markers',
    name = 'NPV Cover',
    marker=dict(color='blue',symbol='circle',opacity=0.5, size=3)
)
bareTrace = plotly.graph_objs.Scatter(
    x = retrievedCoverFractions[:,2],
    y = totalBareCover,
    mode = 'markers',
    name = 'Bare Cover',
    marker=dict(color='red',symbol='circle',opacity=0.5, size=3)
)


# Plot and embed in ipython notebook!
data = [pvTrace,npvTrace,bareTrace]
layout = plotly.graph_objs.Layout(
    title='Modis Predicted vs Field Fractional Cover',
    autosize=False,
    width=800,
    height=800,
    xaxis=dict(
        title='MODIS Fractional Cover',
        ticklen=5,
        zeroline=False,
        gridwidth=2,
    ),
    yaxis=dict(
        title='Field Cover',
        ticklen=5,
        gridwidth=2,
    ),
)


# Plot and embed in ipython notebook!
plotFig = plotly.graph_objs.Figure(data=data, layout=layout)
plotly.plotly.iplot(plotFig, filename='predict-plot')


# Save the final set of endmembers to a text file
numpy.savetxt('endmembers.txt', endmembersWeighted.transpose())