PageRenderTime 233ms CodeModel.GetById 47ms RepoModel.GetById 1ms app.codeStats 0ms

/octave-3.6.2/scripts/optimization/fsolve.m

#
Objective C | 610 lines | 546 code | 64 blank | 0 comment | 81 complexity | 217bc77cb0632a851ae5b2c5e2388a7f MD5 | raw file
Possible License(s): GPL-3.0
  1. ## Copyright (C) 2008-2012 VZLU Prague, a.s.
  2. ##
  3. ## This file is part of Octave.
  4. ##
  5. ## Octave is free software; you can redistribute it and/or modify it
  6. ## under the terms of the GNU General Public License as published by
  7. ## the Free Software Foundation; either version 3 of the License, or (at
  8. ## your option) any later version.
  9. ##
  10. ## Octave is distributed in the hope that it will be useful, but
  11. ## WITHOUT ANY WARRANTY; without even the implied warranty of
  12. ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. ## General Public License for more details.
  14. ##
  15. ## You should have received a copy of the GNU General Public License
  16. ## along with Octave; see the file COPYING. If not, see
  17. ## <http://www.gnu.org/licenses/>.
  18. ##
  19. ## Author: Jaroslav Hajek <highegg@gmail.com>
  20. ## -*- texinfo -*-
  21. ## @deftypefn {Function File} {} fsolve (@var{fcn}, @var{x0}, @var{options})
  22. ## @deftypefnx {Function File} {[@var{x}, @var{fvec}, @var{info}, @var{output}, @var{fjac}] =} fsolve (@var{fcn}, @dots{})
  23. ## Solve a system of nonlinear equations defined by the function @var{fcn}.
  24. ## @var{fcn} should accept a vector (array) defining the unknown variables,
  25. ## and return a vector of left-hand sides of the equations. Right-hand sides
  26. ## are defined to be zeros.
  27. ## In other words, this function attempts to determine a vector @var{x} such
  28. ## that @code{@var{fcn} (@var{x})} gives (approximately) all zeros.
  29. ## @var{x0} determines a starting guess. The shape of @var{x0} is preserved
  30. ## in all calls to @var{fcn}, but otherwise it is treated as a column vector.
  31. ## @var{options} is a structure specifying additional options.
  32. ## Currently, @code{fsolve} recognizes these options:
  33. ## @code{"FunValCheck"}, @code{"OutputFcn"}, @code{"TolX"},
  34. ## @code{"TolFun"}, @code{"MaxIter"}, @code{"MaxFunEvals"},
  35. ## @code{"Jacobian"}, @code{"Updating"}, @code{"ComplexEqn"}
  36. ## @code{"TypicalX"}, @code{"AutoScaling"} and @code{"FinDiffType"}.
  37. ##
  38. ## If @code{"Jacobian"} is @code{"on"}, it specifies that @var{fcn},
  39. ## called with 2 output arguments, also returns the Jacobian matrix
  40. ## of right-hand sides at the requested point. @code{"TolX"} specifies
  41. ## the termination tolerance in the unknown variables, while
  42. ## @code{"TolFun"} is a tolerance for equations. Default is @code{1e-7}
  43. ## for both @code{"TolX"} and @code{"TolFun"}.
  44. ##
  45. ## If @code{"AutoScaling"} is on, the variables will be automatically scaled
  46. ## according to the column norms of the (estimated) Jacobian. As a result,
  47. ## TolF becomes scaling-independent. By default, this option is off, because
  48. ## it may sometimes deliver unexpected (though mathematically correct) results.
  49. ##
  50. ## If @code{"Updating"} is "on", the function will attempt to use Broyden
  51. ## updates to update the Jacobian, in order to reduce the amount of Jacobian
  52. ## calculations.
  53. ## If your user function always calculates the Jacobian (regardless of number
  54. ## of output arguments), this option provides no advantage and should be set to
  55. ## false.
  56. ##
  57. ## @code{"ComplexEqn"} is @code{"on"}, @code{fsolve} will attempt to solve
  58. ## complex equations in complex variables, assuming that the equations possess a
  59. ## complex derivative (i.e., are holomorphic). If this is not what you want,
  60. ## should unpack the real and imaginary parts of the system to get a real
  61. ## system.
  62. ##
  63. ## For description of the other options, see @code{optimset}.
  64. ##
  65. ## On return, @var{fval} contains the value of the function @var{fcn}
  66. ## evaluated at @var{x}, and @var{info} may be one of the following values:
  67. ##
  68. ## @table @asis
  69. ## @item 1
  70. ## Converged to a solution point. Relative residual error is less than
  71. ## specified by TolFun.
  72. ##
  73. ## @item 2
  74. ## Last relative step size was less that TolX.
  75. ##
  76. ## @item 3
  77. ## Last relative decrease in residual was less than TolF.
  78. ##
  79. ## @item 0
  80. ## Iteration limit exceeded.
  81. ##
  82. ## @item -3
  83. ## The trust region radius became excessively small.
  84. ## @end table
  85. ##
  86. ## Note: If you only have a single nonlinear equation of one variable, using
  87. ## @code{fzero} is usually a much better idea.
  88. ##
  89. ## Note about user-supplied Jacobians:
  90. ## As an inherent property of the algorithm, Jacobian is always requested for a
  91. ## solution vector whose residual vector is already known, and it is the last
  92. ## accepted successful step. Often this will be one of the last two calls, but
  93. ## not always. If the savings by reusing intermediate results from residual
  94. ## calculation in Jacobian calculation are significant, the best strategy is to
  95. ## employ OutputFcn: After a vector is evaluated for residuals, if OutputFcn is
  96. ## called with that vector, then the intermediate results should be saved for
  97. ## future Jacobian evaluation, and should be kept until a Jacobian evaluation
  98. ## is requested or until outputfcn is called with a different vector, in which
  99. ## case they should be dropped in favor of this most recent vector. A short
  100. ## example how this can be achieved follows:
  101. ##
  102. ## @example
  103. ## function [fvec, fjac] = user_func (x, optimvalues, state)
  104. ## persistent sav = [], sav0 = [];
  105. ## if (nargin == 1)
  106. ## ## evaluation call
  107. ## if (nargout == 1)
  108. ## sav0.x = x; # mark saved vector
  109. ## ## calculate fvec, save results to sav0.
  110. ## elseif (nargout == 2)
  111. ## ## calculate fjac using sav.
  112. ## endif
  113. ## else
  114. ## ## outputfcn call.
  115. ## if (all (x == sav0.x))
  116. ## sav = sav0;
  117. ## endif
  118. ## ## maybe output iteration status, etc.
  119. ## endif
  120. ## endfunction
  121. ##
  122. ## ## @dots{}
  123. ##
  124. ## fsolve (@@user_func, x0, optimset ("OutputFcn", @@user_func, @dots{}))
  125. ## @end example
  126. ## @seealso{fzero, optimset}
  127. ## @end deftypefn
  128. ## PKG_ADD: ## Discard result to avoid polluting workspace with ans at startup.
  129. ## PKG_ADD: [~] = __all_opts__ ("fsolve");
  130. function [x, fvec, info, output, fjac] = fsolve (fcn, x0, options = struct ())
  131. ## Get default options if requested.
  132. if (nargin == 1 && ischar (fcn) && strcmp (fcn, 'defaults'))
  133. x = optimset ("MaxIter", 400, "MaxFunEvals", Inf, \
  134. "Jacobian", "off", "TolX", 1e-7, "TolFun", 1e-7,
  135. "OutputFcn", [], "Updating", "on", "FunValCheck", "off",
  136. "ComplexEqn", "off", "FinDiffType", "central",
  137. "TypicalX", [], "AutoScaling", "off");
  138. return;
  139. endif
  140. if (nargin < 2 || nargin > 3 || ! ismatrix (x0))
  141. print_usage ();
  142. endif
  143. if (ischar (fcn))
  144. fcn = str2func (fcn, "global");
  145. elseif (iscell (fcn))
  146. fcn = @(x) make_fcn_jac (x, fcn{1}, fcn{2});
  147. endif
  148. xsiz = size (x0);
  149. n = numel (x0);
  150. has_jac = strcmpi (optimget (options, "Jacobian", "off"), "on");
  151. cdif = strcmpi (optimget (options, "FinDiffType", "central"), "central");
  152. maxiter = optimget (options, "MaxIter", 400);
  153. maxfev = optimget (options, "MaxFunEvals", Inf);
  154. outfcn = optimget (options, "OutputFcn");
  155. updating = strcmpi (optimget (options, "Updating", "on"), "on");
  156. complexeqn = strcmpi (optimget (options, "ComplexEqn", "off"), "on");
  157. ## Get scaling matrix using the TypicalX option. If set to "auto", the
  158. ## scaling matrix is estimated using the Jacobian.
  159. typicalx = optimget (options, "TypicalX");
  160. if (isempty (typicalx))
  161. typicalx = ones (n, 1);
  162. endif
  163. autoscale = strcmpi (optimget (options, "AutoScaling", "off"), "on");
  164. if (! autoscale)
  165. dg = 1 ./ typicalx;
  166. endif
  167. funvalchk = strcmpi (optimget (options, "FunValCheck", "off"), "on");
  168. if (funvalchk)
  169. ## Replace fcn with a guarded version.
  170. fcn = @(x) guarded_eval (fcn, x, complexeqn);
  171. endif
  172. ## These defaults are rather stringent. I think that normally, user
  173. ## prefers accuracy to performance.
  174. macheps = eps (class (x0));
  175. tolx = optimget (options, "TolX", 1e-7);
  176. tolf = optimget (options, "TolFun", 1e-7);
  177. factor = 1;
  178. niter = 1;
  179. nfev = 1;
  180. x = x0(:);
  181. info = 0;
  182. ## Initial evaluation.
  183. ## Handle arbitrary shapes of x and f and remember them.
  184. fvec = fcn (reshape (x, xsiz));
  185. fsiz = size (fvec);
  186. fvec = fvec(:);
  187. fn = norm (fvec);
  188. m = length (fvec);
  189. n = length (x);
  190. if (! isempty (outfcn))
  191. optimvalues.iter = niter;
  192. optimvalues.funccount = nfev;
  193. optimvalues.fval = fn;
  194. optimvalues.searchdirection = zeros (n, 1);
  195. state = 'init';
  196. stop = outfcn (x, optimvalues, state);
  197. if (stop)
  198. info = -1;
  199. break;
  200. endif
  201. endif
  202. nsuciter = 0;
  203. ## Outer loop.
  204. while (niter < maxiter && nfev < maxfev && ! info)
  205. ## Calculate function value and Jacobian (possibly via FD).
  206. if (has_jac)
  207. [fvec, fjac] = fcn (reshape (x, xsiz));
  208. ## If the Jacobian is sparse, disable Broyden updating.
  209. if (issparse (fjac))
  210. updating = false;
  211. endif
  212. fvec = fvec(:);
  213. nfev ++;
  214. else
  215. fjac = __fdjac__ (fcn, reshape (x, xsiz), fvec, typicalx, cdif);
  216. nfev += (1 + cdif) * length (x);
  217. endif
  218. ## For square and overdetermined systems, we update a QR
  219. ## factorization of the Jacobian to avoid solving a full system in each
  220. ## step. In this case, we pass a triangular matrix to __dogleg__.
  221. useqr = updating && m >= n && n > 10;
  222. if (useqr)
  223. ## FIXME: Currently, pivoting is mostly useless because the \ operator
  224. ## cannot exploit the resulting props of the triangular factor.
  225. ## Unpivoted QR is significantly faster so it doesn't seem right to pivot
  226. ## just to get invariance. Original MINPACK didn't pivot either, at least
  227. ## when qr updating was used.
  228. [q, r] = qr (fjac, 0);
  229. endif
  230. if (autoscale)
  231. ## Get column norms, use them as scaling factors.
  232. jcn = norm (fjac, 'columns').';
  233. if (niter == 1)
  234. dg = jcn;
  235. dg(dg == 0) = 1;
  236. else
  237. ## Rescale adaptively.
  238. ## FIXME: the original minpack used the following rescaling strategy:
  239. ## dg = max (dg, jcn);
  240. ## but it seems not good if we start with a bad guess yielding Jacobian
  241. ## columns with large norms that later decrease, because the corresponding
  242. ## variable will still be overscaled. So instead, we only give the old
  243. ## scaling a small momentum, but do not honor it.
  244. dg = max (0.1*dg, jcn);
  245. endif
  246. endif
  247. if (niter == 1)
  248. xn = norm (dg .* x);
  249. ## FIXME: something better?
  250. delta = factor * max (xn, 1);
  251. endif
  252. ## It also seems that in the case of fast (and inhomogeneously) changing
  253. ## Jacobian, the Broyden updates are of little use, so maybe we could
  254. ## skip them if a big disproportional change is expected. The question is,
  255. ## of course, how to define the above terms :)
  256. lastratio = 0;
  257. nfail = 0;
  258. nsuc = 0;
  259. decfac = 0.5;
  260. ## Inner loop.
  261. while (niter <= maxiter && nfev < maxfev && ! info)
  262. ## Get trust-region model (dogleg) minimizer.
  263. if (useqr)
  264. qtf = q'*fvec;
  265. s = - __dogleg__ (r, qtf, dg, delta);
  266. w = qtf + r * s;
  267. else
  268. s = - __dogleg__ (fjac, fvec, dg, delta);
  269. w = fvec + fjac * s;
  270. endif
  271. sn = norm (dg .* s);
  272. if (niter == 1)
  273. delta = min (delta, sn);
  274. endif
  275. fvec1 = fcn (reshape (x + s, xsiz)) (:);
  276. fn1 = norm (fvec1);
  277. nfev ++;
  278. if (fn1 < fn)
  279. ## Scaled actual reduction.
  280. actred = 1 - (fn1/fn)^2;
  281. else
  282. actred = -1;
  283. endif
  284. ## Scaled predicted reduction, and ratio.
  285. t = norm (w);
  286. if (t < fn)
  287. prered = 1 - (t/fn)^2;
  288. ratio = actred / prered;
  289. else
  290. prered = 0;
  291. ratio = 0;
  292. endif
  293. ## Update delta.
  294. if (ratio < min(max(0.1, 0.8*lastratio), 0.9))
  295. nsuc = 0;
  296. nfail ++;
  297. delta *= decfac;
  298. decfac ^= 1.4142;
  299. if (delta <= 1e1*macheps*xn)
  300. ## Trust region became uselessly small.
  301. info = -3;
  302. break;
  303. endif
  304. else
  305. lastratio = ratio;
  306. decfac = 0.5;
  307. nfail = 0;
  308. nsuc ++;
  309. if (abs (1-ratio) <= 0.1)
  310. delta = 1.4142*sn;
  311. elseif (ratio >= 0.5 || nsuc > 1)
  312. delta = max (delta, 1.4142*sn);
  313. endif
  314. endif
  315. if (ratio >= 1e-4)
  316. ## Successful iteration.
  317. x += s;
  318. xn = norm (dg .* x);
  319. fvec = fvec1;
  320. fn = fn1;
  321. nsuciter ++;
  322. endif
  323. niter ++;
  324. ## FIXME: should outputfcn be only called after a successful iteration?
  325. if (! isempty (outfcn))
  326. optimvalues.iter = niter;
  327. optimvalues.funccount = nfev;
  328. optimvalues.fval = fn;
  329. optimvalues.searchdirection = s;
  330. state = 'iter';
  331. stop = outfcn (x, optimvalues, state);
  332. if (stop)
  333. info = -1;
  334. break;
  335. endif
  336. endif
  337. ## Tests for termination conditions. A mysterious place, anything
  338. ## can happen if you change something here...
  339. ## The rule of thumb (which I'm not sure M*b is quite following)
  340. ## is that for a tolerance that depends on scaling, only 0 makes
  341. ## sense as a default value. But 0 usually means uselessly long
  342. ## iterations, so we need scaling-independent tolerances wherever
  343. ## possible.
  344. ## FIXME -- why tolf*n*xn? If abs (e) ~ abs(x) * eps is a vector
  345. ## of perturbations of x, then norm (fjac*e) <= eps*n*xn, i.e. by
  346. ## tolf ~ eps we demand as much accuracy as we can expect.
  347. if (fn <= tolf*n*xn)
  348. info = 1;
  349. ## The following tests done only after successful step.
  350. elseif (ratio >= 1e-4)
  351. ## This one is classic. Note that we use scaled variables again,
  352. ## but compare to scaled step, so nothing bad.
  353. if (sn <= tolx*xn)
  354. info = 2;
  355. ## Again a classic one. It seems weird to use the same tolf
  356. ## for two different tests, but that's what M*b manual appears
  357. ## to say.
  358. elseif (actred < tolf)
  359. info = 3;
  360. endif
  361. endif
  362. ## Criterion for recalculating Jacobian.
  363. if (! updating || nfail == 2 || nsuciter < 2)
  364. break;
  365. endif
  366. ## Compute the scaled Broyden update.
  367. if (useqr)
  368. u = (fvec1 - q*w) / sn;
  369. v = dg .* ((dg .* s) / sn);
  370. ## Update the QR factorization.
  371. [q, r] = qrupdate (q, r, u, v);
  372. else
  373. u = (fvec1 - w);
  374. v = dg .* ((dg .* s) / sn);
  375. ## update the Jacobian
  376. fjac += u * v';
  377. endif
  378. endwhile
  379. endwhile
  380. ## Restore original shapes.
  381. x = reshape (x, xsiz);
  382. fvec = reshape (fvec, fsiz);
  383. output.iterations = niter;
  384. output.successful = nsuciter;
  385. output.funcCount = nfev;
  386. endfunction
  387. ## An assistant function that evaluates a function handle and checks for
  388. ## bad results.
  389. function [fx, jx] = guarded_eval (fun, x, complexeqn)
  390. if (nargout > 1)
  391. [fx, jx] = fun (x);
  392. else
  393. fx = fun (x);
  394. jx = [];
  395. endif
  396. if (! complexeqn && ! (isreal (fx) && isreal (jx)))
  397. error ("fsolve:notreal", "fsolve: non-real value encountered");
  398. elseif (complexeqn && ! (isnumeric (fx) && isnumeric(jx)))
  399. error ("fsolve:notnum", "fsolve: non-numeric value encountered");
  400. elseif (any (isnan (fx(:))))
  401. error ("fsolve:isnan", "fsolve: NaN value encountered");
  402. endif
  403. endfunction
  404. function [fx, jx] = make_fcn_jac (x, fcn, fjac)
  405. fx = fcn (x);
  406. if (nargout == 2)
  407. jx = fjac (x);
  408. endif
  409. endfunction
  410. %!function retval = __f (p)
  411. %! x = p(1);
  412. %! y = p(2);
  413. %! z = p(3);
  414. %! retval = zeros (3, 1);
  415. %! retval(1) = sin(x) + y**2 + log(z) - 7;
  416. %! retval(2) = 3*x + 2**y -z**3 + 1;
  417. %! retval(3) = x + y + z - 5;
  418. %!endfunction
  419. %!test
  420. %! x_opt = [ 0.599054;
  421. %! 2.395931;
  422. %! 2.005014 ];
  423. %! tol = 1.0e-5;
  424. %! [x, fval, info] = fsolve (@__f, [ 0.5; 2.0; 2.5 ]);
  425. %! assert (info > 0);
  426. %! assert (norm (x - x_opt, Inf) < tol);
  427. %! assert (norm (fval) < tol);
  428. %!function retval = __f (p)
  429. %! x = p(1);
  430. %! y = p(2);
  431. %! z = p(3);
  432. %! w = p(4);
  433. %! retval = zeros (4, 1);
  434. %! retval(1) = 3*x + 4*y + exp (z + w) - 1.007;
  435. %! retval(2) = 6*x - 4*y + exp (3*z + w) - 11;
  436. %! retval(3) = x^4 - 4*y^2 + 6*z - 8*w - 20;
  437. %! retval(4) = x^2 + 2*y^3 + z - w - 4;
  438. %!endfunction
  439. %!test
  440. %! x_opt = [ -0.767297326653401, 0.590671081117440, 1.47190018629642, -1.52719341133957 ];
  441. %! tol = 1.0e-5;
  442. %! [x, fval, info] = fsolve (@__f, [-1, 1, 2, -1]);
  443. %! assert (info > 0);
  444. %! assert (norm (x - x_opt, Inf) < tol);
  445. %! assert (norm (fval) < tol);
  446. %!function retval = __f (p)
  447. %! x = p(1);
  448. %! y = p(2);
  449. %! z = p(3);
  450. %! retval = zeros (3, 1);
  451. %! retval(1) = sin(x) + y**2 + log(z) - 7;
  452. %! retval(2) = 3*x + 2**y -z**3 + 1;
  453. %! retval(3) = x + y + z - 5;
  454. %! retval(4) = x*x + y - z*log(z) - 1.36;
  455. %!endfunction
  456. %!test
  457. %! x_opt = [ 0.599054;
  458. %! 2.395931;
  459. %! 2.005014 ];
  460. %! tol = 1.0e-5;
  461. %! [x, fval, info] = fsolve (@__f, [ 0.5; 2.0; 2.5 ]);
  462. %! assert (info > 0);
  463. %! assert (norm (x - x_opt, Inf) < tol);
  464. %! assert (norm (fval) < tol);
  465. %!function retval = __f (p)
  466. %! x = p(1);
  467. %! y = p(2);
  468. %! z = p(3);
  469. %! retval = zeros (3, 1);
  470. %! retval(1) = sin(x) + y**2 + log(z) - 7;
  471. %! retval(2) = 3*x + 2**y -z**3 + 1;
  472. %! retval(3) = x + y + z - 5;
  473. %!endfunction
  474. %!test
  475. %! x_opt = [ 0.599054;
  476. %! 2.395931;
  477. %! 2.005014 ];
  478. %! tol = 1.0e-5;
  479. %! opt = optimset ("Updating", "qrp");
  480. %! [x, fval, info] = fsolve (@__f, [ 0.5; 2.0; 2.5 ], opt);
  481. %! assert (info > 0);
  482. %! assert (norm (x - x_opt, Inf) < tol);
  483. %! assert (norm (fval) < tol);
  484. %!test
  485. %! b0 = 3;
  486. %! a0 = 0.2;
  487. %! x = 0:.5:5;
  488. %! noise = 1e-5 * sin (100*x);
  489. %! y = exp (-a0*x) + b0 + noise;
  490. %! c_opt = [a0, b0];
  491. %! tol = 1e-5;
  492. %!
  493. %! [c, fval, info, output] = fsolve (@(c) (exp(-c(1)*x) + c(2) - y), [0, 0]);
  494. %! assert (info > 0);
  495. %! assert (norm (c - c_opt, Inf) < tol);
  496. %! assert (norm (fval) < norm (noise));
  497. %!function y = cfun (x)
  498. %! y(1) = (1+i)*x(1)^2 - (1-i)*x(2) - 2;
  499. %! y(2) = sqrt (x(1)*x(2)) - (1-2i)*x(3) + (3-4i);
  500. %! y(3) = x(1) * x(2) - x(3)^2 + (3+2i);
  501. %!endfunction
  502. %!test
  503. %! x_opt = [-1+i, 1-i, 2+i];
  504. %! x = [i, 1, 1+i];
  505. %!
  506. %! [x, f, info] = fsolve (@cfun, x, optimset ("ComplexEqn", "on"));
  507. %! tol = 1e-5;
  508. %! assert (norm (f) < tol);
  509. %! assert (norm (x - x_opt, Inf) < tol);
  510. ## Solve the double dogleg trust-region least-squares problem:
  511. ## Minimize norm(r*x-b) subject to the constraint norm(d.*x) <= delta,
  512. ## x being a convex combination of the gauss-newton and scaled gradient.
  513. ## TODO: error checks
  514. ## TODO: handle singularity, or leave it up to mldivide?
  515. function x = __dogleg__ (r, b, d, delta)
  516. ## Get Gauss-Newton direction.
  517. x = r \ b;
  518. xn = norm (d .* x);
  519. if (xn > delta)
  520. ## GN is too big, get scaled gradient.
  521. s = (r' * b) ./ d;
  522. sn = norm (s);
  523. if (sn > 0)
  524. ## Normalize and rescale.
  525. s = (s / sn) ./ d;
  526. ## Get the line minimizer in s direction.
  527. tn = norm (r*s);
  528. snm = (sn / tn) / tn;
  529. if (snm < delta)
  530. ## Get the dogleg path minimizer.
  531. bn = norm (b);
  532. dxn = delta/xn; snmd = snm/delta;
  533. t = (bn/sn) * (bn/xn) * snmd;
  534. t -= dxn * snmd^2 - sqrt ((t-dxn)^2 + (1-dxn^2)*(1-snmd^2));
  535. alpha = dxn*(1-snmd^2) / t;
  536. else
  537. alpha = 0;
  538. endif
  539. else
  540. alpha = delta / xn;
  541. snm = 0;
  542. endif
  543. ## Form the appropriate convex combination.
  544. x = alpha * x + ((1-alpha) * min (snm, delta)) * s;
  545. endif
  546. endfunction