/netlab3.3/olgd.m

http://pmtksupport.googlecode.com/ · MATLAB · 183 lines · 92 code · 16 blank · 75 comment · 19 complexity · fafc4c614f3103151b4d81bb0da0d28f MD5 · raw file

  1. function [net, options, errlog, pointlog] = olgd(net, options, x, t)
  2. %OLGD On-line gradient descent optimization.
  3. %
  4. % Description
  5. % [NET, OPTIONS, ERRLOG, POINTLOG] = OLGD(NET, OPTIONS, X, T) uses on-
  6. % line gradient descent to find a local minimum of the error function
  7. % for the network NET computed on the input data X and target values T.
  8. % A log of the error values after each cycle is (optionally) returned
  9. % in ERRLOG, and a log of the points visited is (optionally) returned
  10. % in POINTLOG. Because the gradient is computed on-line (i.e. after
  11. % each pattern) this can be quite inefficient in Matlab.
  12. %
  13. % The error function value at final weight vector is returned in
  14. % OPTIONS(8).
  15. %
  16. % The optional parameters have the following interpretations.
  17. %
  18. % OPTIONS(1) is set to 1 to display error values; also logs error
  19. % values in the return argument ERRLOG, and the points visited in the
  20. % return argument POINTSLOG. If OPTIONS(1) is set to 0, then only
  21. % warning messages are displayed. If OPTIONS(1) is -1, then nothing is
  22. % displayed.
  23. %
  24. % OPTIONS(2) is the precision required for the value of X at the
  25. % solution. If the absolute difference between the values of X between
  26. % two successive steps is less than OPTIONS(2), then this condition is
  27. % satisfied.
  28. %
  29. % OPTIONS(3) is the precision required of the objective function at the
  30. % solution. If the absolute difference between the error functions
  31. % between two successive steps is less than OPTIONS(3), then this
  32. % condition is satisfied. Both this and the previous condition must be
  33. % satisfied for termination. Note that testing the function value at
  34. % each iteration roughly halves the speed of the algorithm.
  35. %
  36. % OPTIONS(5) determines whether the patterns are sampled randomly with
  37. % replacement. If it is 0 (the default), then patterns are sampled in
  38. % order.
  39. %
  40. % OPTIONS(6) determines if the learning rate decays. If it is 1 then
  41. % the learning rate decays at a rate of 1/T. If it is 0 (the default)
  42. % then the learning rate is constant.
  43. %
  44. % OPTIONS(9) should be set to 1 to check the user defined gradient
  45. % function.
  46. %
  47. % OPTIONS(10) returns the total number of function evaluations
  48. % (including those in any line searches).
  49. %
  50. % OPTIONS(11) returns the total number of gradient evaluations.
  51. %
  52. % OPTIONS(14) is the maximum number of iterations (passes through the
  53. % complete pattern set); default 100.
  54. %
  55. % OPTIONS(17) is the momentum; default 0.5.
  56. %
  57. % OPTIONS(18) is the learning rate; default 0.01.
  58. %
  59. % See also
  60. % GRADDESC
  61. %
  62. % Copyright (c) Ian T Nabney (1996-2001)
  63. % Set up the options.
  64. if length(options) < 18
  65. error('Options vector too short')
  66. end
  67. if (options(14))
  68. niters = options(14);
  69. else
  70. niters = 100;
  71. end
  72. % Learning rate: must be positive
  73. if (options(18) > 0)
  74. eta = options(18);
  75. else
  76. eta = 0.01;
  77. end
  78. % Save initial learning rate for annealing
  79. lr = eta;
  80. % Momentum term: allow zero momentum
  81. if (options(17) >= 0)
  82. mu = options(17);
  83. else
  84. mu = 0.5;
  85. end
  86. pakstr = [net.type, 'pak'];
  87. unpakstr = [net.type, 'unpak'];
  88. % Extract initial weights from the network
  89. w = feval(pakstr, net);
  90. display = options(1);
  91. % Work out if we need to compute f at each iteration.
  92. % Needed if display results or if termination
  93. % criterion requires it.
  94. fcneval = (display | options(3));
  95. % Check gradients
  96. if (options(9))
  97. feval('gradchek', w, 'neterr', 'netgrad', net, x, t);
  98. end
  99. dwold = zeros(1, length(w));
  100. fold = 0; % Must be initialised so that termination test can be performed
  101. ndata = size(x, 1);
  102. if fcneval
  103. fnew = neterr(w, net, x, t);
  104. options(10) = options(10) + 1;
  105. fold = fnew;
  106. end
  107. j = 1;
  108. if nargout >= 3
  109. errlog(j, :) = fnew;
  110. if nargout == 4
  111. pointlog(j, :) = w;
  112. end
  113. end
  114. % Main optimization loop.
  115. while j <= niters
  116. wold = w;
  117. if options(5)
  118. % Randomise order of pattern presentation: with replacement
  119. pnum = ceil(rand(ndata, 1).*ndata);
  120. else
  121. pnum = 1:ndata;
  122. end
  123. for k = 1:ndata
  124. grad = netgrad(w, net, x(pnum(k),:), t(pnum(k),:));
  125. if options(6)
  126. % Let learning rate decrease as 1/t
  127. lr = eta/((j-1)*ndata + k);
  128. end
  129. dw = mu*dwold - lr*grad;
  130. w = w + dw;
  131. dwold = dw;
  132. end
  133. options(11) = options(11) + 1; % Increment gradient evaluation count
  134. if fcneval
  135. fold = fnew;
  136. fnew = neterr(w, net, x, t);
  137. options(10) = options(10) + 1;
  138. end
  139. if display
  140. fprintf(1, 'Iteration %5d Error %11.8f\n', j, fnew);
  141. end
  142. j = j + 1;
  143. if nargout >= 3
  144. errlog(j) = fnew;
  145. if nargout == 4
  146. pointlog(j, :) = w;
  147. end
  148. end
  149. if (max(abs(w - wold)) < options(2) & abs(fnew - fold) < options(3))
  150. % Termination criteria are met
  151. options(8) = fnew;
  152. net = feval(unpakstr, net, w);
  153. return;
  154. end
  155. end
  156. if fcneval
  157. options(8) = fnew;
  158. else
  159. % Return error on entire dataset
  160. options(8) = neterr(w, net, x, t);
  161. options(10) = options(10) + 1;
  162. end
  163. if (options(1) >= 0)
  164. disp(maxitmess);
  165. end
  166. net = feval(unpakstr, net, w);
  167. end