100%|██████████| 100/100 [00:06<00:00, 20.34epoch/s, acc=-0.75, diff=2.57e-06, grad=107, loss=8.24e+04, w=82.3]
100%|██████████| 100/100 [00:05<00:00, 19.98epoch/s, acc=-0.75, diff=1.04e-05, grad=69.4, loss=8.28e+04, w=72.1]
15%|█▌ | 15/100 [00:01<00:07, 12.13epoch/s, acc=-0.75, diff=0.0245, grad=98.2, loss=8.24e+04, w=120]
---------------------------------------------------------------------------
KeyboardInterrupt Traceback (most recent call last)
<ipython-input-7-ffd68fd65e6a> in <module>()
11
12 idx_min, rmse_all, lambdas = k_fold.cross_validation_select(X_p, y_01, model, loss, kw_model = model_args, seed = 1,
---> 13 k_fold = 3, lambdas = lambdas, do_plot = True, do_tqdm = False)
~/Project1/src/scripts/k_fold.py in cross_validation_select(x, y, model, loss, kw_model, kw_loss, seed, k_fold, do_plot, do_tqdm, lambdas)
55 for k in range(k_fold):
56 [rmse_[i].append(x) for i, x in
---> 57 enumerate(cross_validation(y, x, k_indices, k, model, kw_model, loss, kw_loss, lambda_))]
58 [rmse[i].append(np.mean(x)) for (i, x) in enumerate(rmse_)]
59 [rmse_all[i].append(x) for (i, x) in enumerate(rmse_)]
~/Project1/src/scripts/k_fold.py in cross_validation(y, tx, k_indices, k, model, kw_model, loss, kw_loss, lambda_)
31
32 # training ridge regression
---> 33 weights, _ = model(y_tr, x_tr, lambda_, **kw_model)
34
35 # computing losses
~/Project1/src/scripts/implementation.py in reg_logistic_regression_newton_batch(y, tx, lambda_, initial_w, batch_size, max_iters, gamma, debug)
53 def reg_logistic_regression_newton_batch(y, tx, lambda_, initial_w, batch_size, max_iters, gamma, debug = False):
54 """ implement regularized logistic regression via gradient descent """
---> 55 losses, ws = stochastic_gradient_descent(y, tx, initial_w, batch_size, max_iters, gamma, loss_f = model_logistic.reg_loss, grad_f = model_logistic.newton_reg_grad, kwargs = {'lambda_': lambda_}, debug = debug)
56 return get_last_ans(ws, losses)
57
~/Project1/src/scripts/helpers.py in stochastic_gradient_descent(y, tx, initial_w, batch_size, max_iters, gamma, loss_f, grad_f, kwargs, debug)
76 # calculating loss and gradient
77 loss = loss_f(y, tx, w, **kwargs)
---> 78 stoch_gradient = grad_f(y_, tx_, w, **kwargs)
79
80 # updating w
~/Project1/src/scripts/model_logistic.py in newton_reg_grad(y, x, w, lambda_)
32 def newton_reg_grad(y, x, w, lambda_):
33 """ returns regularized newton gradient """
---> 34 return newton_grad(y, x, w, lambda_)
35
36 ### SECONDARY IMPLEMENTATION
~/Project1/src/scripts/model_logistic.py in newton_grad(y, x, w, lambda_)
24 """ returns newton gradient """
25 N, D = x.shape
---> 26 sigma = expit(x @ w).flatten()
27 S = diags(np.multiply(sigma, 1 - sigma))
28 H = x.T @ S @ x
KeyboardInterrupt: