/matlab_tools/Converted/klrftrain.m
Objective C | 388 lines | 385 code | 3 blank | 0 comment | 66 complexity | 8e585895643791cb3d5b7f86f76342e0 MD5 | raw file
Possible License(s): BSD-3-Clause
- %klrftrain 'Calculate Weights for Localized Receptive Field Classifier (K1)'
- % This MatLab function was automatically generated by a converter (KhorosToMatLab) from the Khoros lrftrain.pane file
- %
- % Parameters:
- % InputFile: i1 'Input Image', required: 'input image'
- % InputFile: i2 'Cluster Center Image ', required: 'cluster center image'
- % InputFile: i3 'Cluster Variance Image', required: 'cluster variance image'
- % InputFile: i4 'Cluster Number Image ', required: 'cluster number image'
- % Integer: b 'Input Image Border Width', default: 0: 'Border Width'
- % Double: cv 'Convergence Value', default: 0.1: 'convergence value'
- % Double: meu 'Wt. Update Value ', default: 0.5: 'weight update value'
- % Integer: n 'Max Iterations ', default: 10000000: 'Maximum number of iterations'
- % Double: delta 'MIN Delta MSE ', default: 1e-09: 'Min delta MSE value'
- % Integer: d 'MSE Display Interval', default: 0: 'MSE display interval'
- % OutputFile: o 'Weight Image', required: 'weight image'
- % OutputFile: f 'Ascii Stats ', required: 'output file for training statistics'
- %
- % Example: [o, f] = klrftrain({i1, i2, i3, i4}, {'i1','';'i2','';'i3','';'i4','';'b',0;'cv',0.1;'meu',0.5;'n',10000000;'delta',1e-09;'d',0;'o','';'f',''})
- %
- % Khoros helpfile follows below:
- %
- % PROGRAM
- % lrftrain - Calculate Weights for Localized Receptive Field Classifier (K1)
- %
- % DESCRIPTION
- % .I lrftrain
- % trains on an image for the weights used with the Localized Receptive Field
- % classifier (see lrfclass). The Localized Receptive Field (LRF) is based on
- % a single layer of self-organizing, "localized receptive field" units,
- % followed by a single layer perceptron. The single layer of perceptron
- % units use the LMS or Adaline learning rule to adjust the weights.
- %
- % In contrast, multi-layer network models, such as the multi-layer
- % perceptron (MLP), use the back propagation learning model. The
- % back propagation learning
- % model is based on a gradient descent procedure, which tends to converge
- % at a very slow rate. Back propagation is also hampered by the fact that all
- % layers of weights in the network are computed by minimizing the error,
- % which is a function of the output. This tends to slow the learning,
- % since all weights in the network must be determined with each iteration.
- %
- % The LRF tries to overcome some of these problems by using a localized
- % representation of the input space to limit the number of units that
- % respond to a given input. This allows the LRF to converge at a faster
- % rate than similar types of neural network models, since only those
- % receptive fields which respond to an input need to be updated. Another
- % factor that allows the LRF to reduce the learning time, is that it
- % makes use of self-organized learning techniques, such as K-means, to train
- % the receptive field centers.
- % .SH "LRF network theory"
- %
- % The basic network model of the LRF consists of a two layer topology.
- % The first layer of "receptive field" nodes are trained using a clustering
- % algorithm, such as K-means, or some other algorithm which can determine
- % the receptive field centers. Each node in the first layer computes a
- % receptive field response function, which should approach zero as the
- % distance from the center of the receptive field is increased. The second
- % layer of the LRF model sums the weighted outputs of the first layer,
- % which produces the output or response of the network. A supervised
- % LMS rule is used to train the weights of the second layer nodes.
- %
- % The response function of the LRF network is formulated as follows:
- % .DS
- %
- % f(x) = SUM(Ti * Ri(x))
- %
- % where,
- %
- % Ri(x) = Q( ||x - xi|| / Wi )
- %
- % x - is a real valued vector in the input space,
- % Ri - is the ith receptive field response function,
- % Q - is a radially symmetric function with a single
- % maximum at the origin, decreasing to zero at
- % large radii,
- % xi - is the center of the ith receptive field,
- % Wi - is the width of the ith receptive field,
- % Ti - is the weight associated with each receptive field.
- %
- % .DE
- %
- % The receptive field response functions ( Ri(x) ), should be formulated
- % such that they decrease rapidly with increasing radii. This ensures that
- % the response functions provide highly localized representations of the
- % input space. The response function used here is modeled after the
- % Gaussian, and uses the trace of the covariance matrix to set the widths
- % of the receptive field centers.
- %
- % The weights for the output layer are found using the LMS learning rule.
- % The weights are adjusted at each iteration to minimize the total error,
- % which is based on the difference between the network output and the
- % desired result.
- %
- % The key element to the success of the LRF is the self-organizing
- % receptive fields. As noted above, the receptive field centers can be
- % determined from a statistical clustering algorithm such as K-means.
- % The inputs to the training phase of the LRF (ie. "lrftrain"), are the
- % outputs from "vkmeans" and the original image. Specifically, these are
- % the original input image, which may be a multi-band image containing all
- % of the feature bands used in the classification, and the "cluster number"
- % image, the "cluster center" image, and the "cluster variance" image.
- % The "cluster number" image specifies which vector belongs to what cluster,
- % the "cluster center" image specifies the cluster center locations in the
- % feature space, and the "cluster variance" image specifies the variances
- % of the data associated with each cluster center.
- %
- % Prior to using the LRF algorithm, it is necessary to run "vkmeans" on the
- % input training image to fix the cluster centers, followed by a supervised
- % classification of the clustered image, which assigns a desired class to
- % each cluster center. NOTE that the image resulting from the supervised
- % classification MUST be appended to the "cluster center" image before
- % running the LRF. This is necessary since it makes the appropriate
- % desired class assignments to the cluster centers for the training phase
- % of the LRF.
- %
- % .SH "Input Options"
- %
- %
- % "-d" 8
- % is an integer specifying the iteration interval used to print the
- % mean squared error (MSE) to the output statistics file. If this
- % value is left at zero (the default), only the MSE of the first iteration
- % is written to the file. Any other integer will cause the value of the
- % MSE to be written to the statistics file at the iteration interval
- % specified.
- %
- % "-cv" 8
- % is a float value that specifies the convergence value for the algorithm.
- % When the current MSE value reaches the specified convergence value, the
- % algorithm will terminate.
- %
- % "-meu" 8
- % is a float value that specifies the weight update parameter for the
- % learning algorithm. This value can be adjusted from 0 to 1. NOTE: this
- % parameter may have a significant affect on the rate of learning, and
- % it may have to be adjusted several times to get a feel for the optimum
- % learning rate.
- %
- % "-n" 8
- % is an integer that specifies the maximum number of iterations that the
- % algorithm will run before terminating. It is initially set to an
- % arbitrarily large number to allow the algorithm to complete the learning
- % phase.
- %
- % "-delta" 8
- % is a float value that specifies the minimum change in the MSE value from
- % one iteration to the next. This parameter may be used to terminate the
- % algorithm when the change in the MSE is zero or very small, but the MSE
- % has not yet reached the specified convergence value (-cv). This may
- % occur when the learning has reached a "plateau" or "bench" and is no
- % longer learning.
- %
- % "-b" 8
- % is an integer that specifies the border width, in pixels, encompassing
- % the desired region of the image to be classified. This region is ignored
- % during the classification process.
- %
- % Of the four input images to this routine, all but the "cluster number"
- % image must be of data storage type FLOAT. The "cluster number" image
- % should be of data storage type INTEGER. The output "weight" image is
- % written out as data storage type FLOAT. The output statistics file is
- % stored as an ASCII file.
- %
- % The statistics output file (-f) contains the following information:
- % .DS
- %
- % MSE at the first iteration
- % MSE at each specified interval (optional)
- % Total Number of Iterations
- % Final MSE at termination of the algorithm
- % Convergence Parameter used (-cv)
- % Weight Update Parameter used (-meu)
- % Minimum Delta MSE value (-delta)
- % Border Width (-b)
- % Number of Response Nodes in the network
- % Number of Output Classes in the network
- %
- % .DE
- %
- % The number of receptive field response nodes in the first layer of the
- % LRF is determined by the number of cluster centers in the "cluster center"
- % image. The number of output classes, and hence the number of output
- % nodes in the second (ie. last) layer, is determined by the number of
- % desired classes that was specified in the "supervised" classification
- % phase of the clustering. This information is contained in the last
- % band of the cluster center image. The number of weights in the network
- % is determined by the number of receptive field response nodes and the
- % number of output nodes. That is,
- % .DS
- %
- % #Wts = (#rf_response_nodes * #output_nodes) + #output_nodes
- %
- % .DE
- % .SH Advice
- %
- % As an initial step, try running the algorithm with a small number of
- % iterations (ex. -n = 500) to get a feel for how the MSE is behaving
- % (ie. decreasing rapidly, slowly, or increasing). Make sure you have the
- % MSE display parameter set to a reasonable interval (ex. -d = 10) so that
- % you can see how the MSE is behaving. These values will be written to
- % the statistics file (-f).
- %
- % After you get an idea of how the MSE is behaving, set the convergence
- % value (-cv) to a reasonable value. You may also try decreasing the
- % weight update parameter (-meu) to learn at a slower rate. Often times
- % a large weight update parameter will cause the learning to "oscillate"
- % and never reach a small MSE. You may also want to set the minimum
- % delta MSE parameter (-delta) to a small value, to ensure that the
- % algorithm terminates if the MSE levels off.
- %
- % This routine was written with the help of and ideas from
- % Dr. Don Hush, University of New Mexico, Dept. of EECE.
- %
- %
- %
- % EXAMPLES
- % lrftrain -i1 feature_image.xv -i2 cluster_centers -i3 variances -i4 cluster_numbers -o weight_image -f stats -d 10 -n 500
- %
- % This example illustrates a good initial step at training on an image.
- % The display interval is set to write out the MSE every 10 iterations.
- % The number of iterations is set to a small value, 500, to ensure that
- % the algorithm will stop in a reasonable amount of time to get a feel
- % for how the MSE is behaving.
- %
- % "SEE ALSO"
- % lrfclass(1)
- %
- % RESTRICTIONS
- % All input images except the "cluster number" image (-i4) MUST be of
- % data storage type FLOAT. The "cluster number" image (-i4) MUST be
- % of data storage type INTEGER. The output "weight" image (-o) is of
- % data storage type FLOAT.
- %
- % REFERENCES
- %
- % COPYRIGHT
- % Copyright (C) 1993 - 1997, Khoral Research, Inc. ("KRI") All rights reserved.
- %
- function varargout = klrftrain(varargin)
- if nargin ==0
- Inputs={};arglist={'',''};
- elseif nargin ==1
- Inputs=varargin{1};arglist={'',''};
- elseif nargin ==2
- Inputs=varargin{1}; arglist=varargin{2};
- else error('Usage: [out1,..] = klrftrain(Inputs,arglist).');
- end
- if size(arglist,2)~=2
- error('arglist must be of form {''ParameterTag1'',value1;''ParameterTag2'',value2}')
- end
- narglist={'i1', '__input';'i2', '__input';'i3', '__input';'i4', '__input';'b', 0;'cv', 0.1;'meu', 0.5;'n', 10000000;'delta', 1e-09;'d', 0;'o', '__output';'f', '__output'};
- maxval={0,0,0,0,100,2,1,2,2,1,0,0};
- minval={0,0,0,0,0,2,0,2,2,1,0,0};
- istoggle=[0,0,0,0,1,1,1,1,1,1,0,0];
- was_set=istoggle * 0;
- paramtype={'InputFile','InputFile','InputFile','InputFile','Integer','Double','Double','Integer','Double','Integer','OutputFile','OutputFile'};
- % identify the input arrays and assign them to the arguments as stated by the user
- if ~iscell(Inputs)
- Inputs = {Inputs};
- end
- NumReqOutputs=2; nextinput=1; nextoutput=1;
- for ii=1:size(arglist,1)
- wasmatched=0;
- for jj=1:size(narglist,1)
- if strcmp(arglist{ii,1},narglist{jj,1}) % a given argument was matched to the possible arguments
- wasmatched = 1;
- was_set(jj) = 1;
- if strcmp(narglist{jj,2}, '__input')
- if (nextinput > length(Inputs))
- error(['Input ' narglist{jj,1} ' has no corresponding input!']);
- end
- narglist{jj,2} = 'OK_in';
- nextinput = nextinput + 1;
- elseif strcmp(narglist{jj,2}, '__output')
- if (nextoutput > nargout)
- error(['Output nr. ' narglist{jj,1} ' is not present in the assignment list of outputs !']);
- end
- if (isempty(arglist{ii,2}))
- narglist{jj,2} = 'OK_out';
- else
- narglist{jj,2} = arglist{ii,2};
- end
- nextoutput = nextoutput + 1;
- if (minval{jj} == 0)
- NumReqOutputs = NumReqOutputs - 1;
- end
- elseif isstr(arglist{ii,2})
- narglist{jj,2} = arglist{ii,2};
- else
- if strcmp(paramtype{jj}, 'Integer') & (round(arglist{ii,2}) ~= arglist{ii,2})
- error(['Argument ' arglist{ii,1} ' is of integer type but non-integer number ' arglist{ii,2} ' was supplied']);
- end
- if (minval{jj} ~= 0 | maxval{jj} ~= 0)
- if (minval{jj} == 1 & maxval{jj} == 1 & arglist{ii,2} < 0)
- error(['Argument ' arglist{ii,1} ' must be bigger or equal to zero!']);
- elseif (minval{jj} == -1 & maxval{jj} == -1 & arglist{ii,2} > 0)
- error(['Argument ' arglist{ii,1} ' must be smaller or equal to zero!']);
- elseif (minval{jj} == 2 & maxval{jj} == 2 & arglist{ii,2} <= 0)
- error(['Argument ' arglist{ii,1} ' must be bigger than zero!']);
- elseif (minval{jj} == -2 & maxval{jj} == -2 & arglist{ii,2} >= 0)
- error(['Argument ' arglist{ii,1} ' must be smaller than zero!']);
- elseif (minval{jj} ~= maxval{jj} & arglist{ii,2} < minval{jj})
- error(['Argument ' arglist{ii,1} ' must be bigger than ' num2str(minval{jj})]);
- elseif (minval{jj} ~= maxval{jj} & arglist{ii,2} > maxval{jj})
- error(['Argument ' arglist{ii,1} ' must be smaller than ' num2str(maxval{jj})]);
- end
- end
- end
- if ~strcmp(narglist{jj,2},'OK_out') & ~strcmp(narglist{jj,2},'OK_in')
- narglist{jj,2} = arglist{ii,2};
- end
- end
- end
- if (wasmatched == 0 & ~strcmp(arglist{ii,1},''))
- error(['Argument ' arglist{ii,1} ' is not a valid argument for this function']);
- end
- end
- % match the remaining inputs/outputs to the unused arguments and test for missing required inputs
- for jj=1:size(narglist,1)
- if strcmp(paramtype{jj}, 'Toggle')
- if (narglist{jj,2} ==0)
- narglist{jj,1} = '';
- end;
- narglist{jj,2} = '';
- end;
- if ~strcmp(narglist{jj,2},'__input') && ~strcmp(narglist{jj,2},'__output') && istoggle(jj) && ~ was_set(jj)
- narglist{jj,1} = '';
- narglist{jj,2} = '';
- end;
- if strcmp(narglist{jj,2}, '__input')
- if (minval{jj} == 0) % meaning this input is required
- if (nextinput > size(Inputs))
- error(['Required input ' narglist{jj,1} ' has no corresponding input in the list!']);
- else
- narglist{jj,2} = 'OK_in';
- nextinput = nextinput + 1;
- end
- else % this is an optional input
- if (nextinput <= length(Inputs))
- narglist{jj,2} = 'OK_in';
- nextinput = nextinput + 1;
- else
- narglist{jj,1} = '';
- narglist{jj,2} = '';
- end;
- end;
- else
- if strcmp(narglist{jj,2}, '__output')
- if (minval{jj} == 0) % this is a required output
- if (nextoutput > nargout & nargout > 1)
- error(['Required output ' narglist{jj,1} ' is not stated in the assignment list!']);
- else
- narglist{jj,2} = 'OK_out';
- nextoutput = nextoutput + 1;
- NumReqOutputs = NumReqOutputs-1;
- end
- else % this is an optional output
- if (nargout - nextoutput >= NumReqOutputs)
- narglist{jj,2} = 'OK_out';
- nextoutput = nextoutput + 1;
- else
- narglist{jj,1} = '';
- narglist{jj,2} = '';
- end;
- end
- end
- end
- end
- if nargout
- varargout = cell(1,nargout);
- else
- varargout = cell(1,1);
- end
- global KhorosRoot
- if exist('KhorosRoot') && ~isempty(KhorosRoot)
- w=['"' KhorosRoot];
- else
- if ispc
- w='"C:\Program Files\dip\khorosBin\';
- else
- [s,w] = system('which cantata');
- w=['"' w(1:end-8)];
- end
- end
- [varargout{:}]=callKhoros([w 'lrftrain" '],Inputs,narglist);