Assignment 3 works

This commit is contained in:
Matthias Neeracher 2017-02-18 23:35:45 +01:00
parent 446cc667ac
commit 69306e0367
1 changed files with 5 additions and 4 deletions

View File

@ -151,12 +151,13 @@ function ret = d_loss_by_d_model(model, data, wd_coefficient)
% The returned object is supposed to be exactly like parameter <model>, i.e. it has fields ret.input_to_hid and ret.hid_to_class. However, the contents of those matrices are gradients (d loss by d model parameter), instead of model parameters.
class_input_gradient = (log_class_prob - data.targets) / size(log_class_prob, 2);
error_deriv = class_prob - data.targets; % <number of classes> x <number of data cases>
data_size = size(data.targets, 2); % <number of data cases>
hidden_grad = model.hid_to_class' * error_deriv .* hid_output .* (1 - hid_output); % hidden x data_size
% This is the only function that you're expected to change. Right now, it just returns a lot of zeros, which is obviously not the correct output. Your job is to replace that by a correct computation.
backprop = (model.hid_to_class' * class_input_gradient) .* hid_output .* (1 - hid_output);
ret.input_to_hid = backprop*data.inputs' + wd_coefficient*model.input_to_hid;
ret.hid_to_class = class_input_gradient * hid_output' + wd_coefficient*model.hid_to_class;
ret.input_to_hid = hidden_grad*data.inputs' ./ data_size + wd_coefficient*model.input_to_hid;
ret.hid_to_class = error_deriv*hid_output' ./ data_size + wd_coefficient*model.hid_to_class;
end
function ret = model_to_theta(model)