PageRenderTime 25ms CodeModel.GetById 15ms app.highlight 7ms RepoModel.GetById 1ms app.codeStats 0ms

/PHPExcel/Shared/JAMA/examples/LevenbergMarquardt.php

https://bitbucket.org/nfredricks/wp-employee-time
PHP | 185 lines | 62 code | 26 blank | 97 comment | 16 complexity | d241f95f29a9251e75110db5e19a22fa MD5 | raw file
  1<?php
  2
  3// Levenberg-Marquardt in PHP
  4
  5// http://www.idiom.com/~zilla/Computer/Javanumeric/LM.java
  6
  7class LevenbergMarquardt {
  8
  9	/**
 10	 * Calculate the current sum-squared-error
 11	 *
 12	 * Chi-squared is the distribution of squared Gaussian errors,
 13	 * thus the name.
 14	 *
 15	 * @param double[][] $x
 16	 * @param double[] $a
 17	 * @param double[] $y,
 18	 * @param double[] $s,
 19	 * @param object $f
 20	 */
 21	function chiSquared($x, $a, $y, $s, $f) {
 22		$npts = count($y);
 23		$sum = 0.0;
 24
 25		for ($i = 0; $i < $npts; ++$i) {
 26			$d = $y[$i] - $f->val($x[$i], $a);
 27			$d = $d / $s[$i];
 28			$sum = $sum + ($d*$d);
 29		}
 30
 31		return $sum;
 32	}	//	function chiSquared()
 33
 34
 35	/**
 36	 * Minimize E = sum {(y[k] - f(x[k],a)) / s[k]}^2
 37	 * The individual errors are optionally scaled by s[k].
 38	 * Note that LMfunc implements the value and gradient of f(x,a),
 39	 * NOT the value and gradient of E with respect to a!
 40	 *
 41	 * @param x array of domain points, each may be multidimensional
 42	 * @param y corresponding array of values
 43	 * @param a the parameters/state of the model
 44	 * @param vary false to indicate the corresponding a[k] is to be held fixed
 45	 * @param s2 sigma^2 for point i
 46	 * @param lambda blend between steepest descent (lambda high) and
 47	 *	jump to bottom of quadratic (lambda zero).
 48	 * 	Start with 0.001.
 49	 * @param termepsilon termination accuracy (0.01)
 50	 * @param maxiter	stop and return after this many iterations if not done
 51	 * @param verbose	set to zero (no prints), 1, 2
 52	 *
 53	 * @return the new lambda for future iterations.
 54	 *  Can use this and maxiter to interleave the LM descent with some other
 55	 *  task, setting maxiter to something small.
 56	 */
 57	function solve($x, $a, $y, $s, $vary, $f, $lambda, $termepsilon, $maxiter, $verbose) {
 58		$npts = count($y);
 59		$nparm = count($a);
 60
 61		if ($verbose > 0) {
 62			print("solve x[".count($x)."][".count($x[0])."]");
 63			print(" a[".count($a)."]");
 64			println(" y[".count(length)."]");
 65		}
 66
 67		$e0 = $this->chiSquared($x, $a, $y, $s, $f);
 68
 69		//double lambda = 0.001;
 70		$done = false;
 71
 72		// g = gradient, H = hessian, d = step to minimum
 73		// H d = -g, solve for d
 74		$H = array();
 75		$g = array();
 76
 77		//double[] d = new double[nparm];
 78
 79		$oos2 = array();
 80
 81		for($i = 0; $i < $npts; ++$i) {
 82			$oos2[$i] = 1./($s[$i]*$s[$i]);
 83		}
 84		$iter = 0;
 85		$term = 0;	// termination count test
 86
 87		do {
 88			++$iter;
 89
 90			// hessian approximation
 91			for( $r = 0; $r < $nparm; ++$r) {
 92				for( $c = 0; $c < $nparm; ++$c) {
 93					for( $i = 0; $i < $npts; ++$i) {
 94						if ($i == 0) $H[$r][$c] = 0.;
 95						$xi = $x[$i];
 96						$H[$r][$c] += ($oos2[$i] * $f->grad($xi, $a, $r) * $f->grad($xi, $a, $c));
 97					}  //npts
 98				} //c
 99			} //r
100
101			// boost diagonal towards gradient descent
102			for( $r = 0; $r < $nparm; ++$r)
103				$H[$r][$r] *= (1. + $lambda);
104
105			// gradient
106			for( $r = 0; $r < $nparm; ++$r) {
107				for( $i = 0; $i < $npts; ++$i) {
108					if ($i == 0) $g[$r] = 0.;
109					$xi = $x[$i];
110					$g[$r] += ($oos2[$i] * ($y[$i]-$f->val($xi,$a)) * $f->grad($xi, $a, $r));
111				}
112			} //npts
113
114			// scale (for consistency with NR, not necessary)
115			if ($false) {
116				for( $r = 0; $r < $nparm; ++$r) {
117					$g[$r] = -0.5 * $g[$r];
118					for( $c = 0; $c < $nparm; ++$c) {
119						$H[$r][$c] *= 0.5;
120					}
121				}
122			}
123
124			// solve H d = -g, evaluate error at new location
125			//double[] d = DoubleMatrix.solve(H, g);
126//			double[] d = (new Matrix(H)).lu().solve(new Matrix(g, nparm)).getRowPackedCopy();
127			//double[] na = DoubleVector.add(a, d);
128//			double[] na = (new Matrix(a, nparm)).plus(new Matrix(d, nparm)).getRowPackedCopy();
129//			double e1 = chiSquared(x, na, y, s, f);
130
131//			if (verbose > 0) {
132//				System.out.println("\n\niteration "+iter+" lambda = "+lambda);
133//				System.out.print("a = ");
134//				(new Matrix(a, nparm)).print(10, 2);
135//				if (verbose > 1) {
136//					System.out.print("H = ");
137//					(new Matrix(H)).print(10, 2);
138//					System.out.print("g = ");
139//					(new Matrix(g, nparm)).print(10, 2);
140//					System.out.print("d = ");
141//					(new Matrix(d, nparm)).print(10, 2);
142//				}
143//				System.out.print("e0 = " + e0 + ": ");
144//				System.out.print("moved from ");
145//				(new Matrix(a, nparm)).print(10, 2);
146//				System.out.print("e1 = " + e1 + ": ");
147//				if (e1 < e0) {
148//					System.out.print("to ");
149//					(new Matrix(na, nparm)).print(10, 2);
150//				} else {
151//					System.out.println("move rejected");
152//				}
153//			}
154
155			// termination test (slightly different than NR)
156//			if (Math.abs(e1-e0) > termepsilon) {
157//				term = 0;
158//			} else {
159//				term++;
160//				if (term == 4) {
161//					System.out.println("terminating after " + iter + " iterations");
162//					done = true;
163//				}
164//			}
165//			if (iter >= maxiter) done = true;
166
167			// in the C++ version, found that changing this to e1 >= e0
168			// was not a good idea.  See comment there.
169			//
170//			if (e1 > e0 || Double.isNaN(e1)) { // new location worse than before
171//				lambda *= 10.;
172//			} else {		// new location better, accept new parameters
173//				lambda *= 0.1;
174//				e0 = e1;
175//				// simply assigning a = na will not get results copied back to caller
176//				for( int i = 0; i < nparm; i++ ) {
177//					if (vary[i]) a[i] = na[i];
178//				}
179//			}
180		} while(!$done);
181
182		return $lambda;
183	}	//	function solve()
184
185}	//	class LevenbergMarquardt