In [2]:
#Ejercicio 1
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import beta
def plot_beta(a, b):
x = np.arange (0.01, 1, 0.01)
y = beta.pdf(x,a,b)
plt.plot(x,y)
plt.xlim(0,1)
plt.ylim(0,2.5)
plot_beta(0.5,0.5)
plot_beta(5,1)
plot_beta(1,3)
plot_beta(2,2)
plot_beta(2,5)
plt.show()
In [4]:
#Ejercicio 2
from scipy import stats
import matplotlib.pyplot as plt
random_values = np.random.beta(0.5,0.5, size=100)
mean_value = np.mean(random_values)
median_value = np.median(random_values)
mode_value = stats.mode(random_values,axis=None)
kurtosis_value = stats.kurtosis(random_values)
skewness_value = stats.skew(random_values)
print('mean: ' + str(mean_value))
print('median: ' + str(median_value))
print('mode: ' + str(mode_value))
print('kurtosis: ' + str(kurtosis_value))
print('skewness: ' + str(skewness_value))
plt.axvline(mean_value)
plt.axvline(median_value)
plt.axvline(mode_value[0])
plt.axvline(kurtosis_value)
plt.axvline(skewness_value)
plt.show()
In [5]:
#Ejercicio 3
from scipy import stats
import matplotlib.pyplot as plt
x = np.random.beta(0.5,0.5, size=300)
y = x + np.random.beta(5,1, size=300)
slope, intercept, r_value, p_value, slope_std_error = stats.linregress(x, y)
predict_y = intercept + slope * x
pred_error = y - predict_y
degrees_of_freedom = len(x) - 2
residual_std_error = np.sqrt(np.sum(pred_error**2) / degrees_of_freedom)
plt.plot(x,y,'.')
plt.plot(x, predict_y, 'k-')
print("r_value:", r_value)
print("r_squared:", r_value**2)
print("slope_std_error:", slope_std_error)
print("residual_std_error:", residual_std_error)
plt.show()
In [8]:
import seaborn as sns
In [ ]: