In [1]:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt
For this problem we are going to work with the following model:
$$ y_{model}(x) = a x^2 + b x + c $$The true values of the model parameters are as follows:
In [62]:
a_true = 0.5
b_true = 2.0
c_true = -4.0
dy = 2.0
x = np.linspace(-5,5,30)
First, generate a dataset using this model using these parameters and the following characteristics:
size argument of np.random.normal).After you generate the data, make a plot of the raw data (use points).
In [63]:
ydata = a_true*x**2 + b_true*x + c_true
In [ ]:
assert True # leave this cell for grading the raw data generation and plot
Now fit the model to the dataset to recover estimates for the model's parameters:
In [ ]:
In [17]:
assert True # leave this cell for grading the fit; should include a plot and printout of the parameters+errors
In [64]:
plt.plot(x, ydata, 'k.')
plt.xlabel('x')
plt.ylabel('y')
plt.xlim(-5,5)
;
Out[64]:
In [65]:
plt.errorbar(x, ydata, dy,
fmt='.k', ecolor='lightgray')
Out[65]:
In [66]:
def exp_model(x, A, B, C):
return A*np.exp(x*B) + C
yfit = exp_model(x, a_true, b_true, c_true)
In [73]:
plt.plot(x, yfit)
plt.plot(x, ydata, 'k.')
plt.xlabel('x')
plt.ylabel('y')
plt.ylim(-20,100);
In [ ]: