- { **********************************************************************
- * Unit OPTIM.PAS *
- * Version 1.8 *
- **********************************************************************
- This unit implements the following methods for function minimization:
- * Brent's method for a function of 1 variable
- * Marquardt's, BFGS, Simplex and Simulated Annealing methods
- for a function of several variables
- **********************************************************************
- References:
- 1) 'Numerical Recipes' by Press et al.
- 2) D. W. MARQUARDT, J. Soc. Indust. Appl. Math., 1963, 11, 431-441
- 3) J. A. NELDER & R. MEAD, Comput. J., 1964, 7, 308-313
- 4) R. O'NEILL, Appl. Statist., 1971, 20, 338-345
- ********************************************************************** }
- unit Optim;
- interface
- uses
- FMath, Matrices, Random, Stat;
- { **********************************************************************
- Error codes
- ********************************************************************** }
- const
- BIG_LAMBDA = - 2; { Too high Marquardt's parameter }
- NON_CONV = - 3; { Non-convergence }
- { **********************************************************************
- Functional types
- ********************************************************************** }
- type
- { Function of one variable }
- TFunc = function(X : Float) : Float;
- { Function of several variables }
- TFuncNVar = function(X : PVector) : Float;
- { Procedure to compute gradient vector }
- TGradient = procedure(Func : TFuncNVar;
- X : PVector;
- Lbound, Ubound : Integer;
- G : PVector);
- { Procedure to compute gradient vector and hessian matrix }
- THessGrad = procedure(Func : TFuncNVar;
- X : PVector;
- Lbound, Ubound : Integer;
- G : PVector;
- H : PMatrix);
- { **********************************************************************
- Log file
- ********************************************************************** }
- const
- WriteLogFile : Boolean = False; { Write iteration info to log file }
- LogFileName : String = 'OPTIM.LOG'; { Name of log file }
- { **********************************************************************
- Parameters for simulated annealing
- ********************************************************************** }
- const
- SA_Nt : Integer = 50; { Nb of iterations at each temperature }
- SA_Cv : Float = 0.5; { Coef. of variation for random numbers }
- SA_MinSigma : Float = 1.0; { Minimal S.D. for random numbers }
- SA_SigmaFact : Float = 0.1; { Overall S.D. reduction factor }
- SA_NCycles : Integer = 1; { Number of SA cycles }
- { **********************************************************************
- Minimization routines
- ********************************************************************** }
- function Brent(Func : TFunc;
- X1, X2 : Float;
- MaxIter : Integer;
- Tol : Float;
- var X_min, F_min : Float) : Integer;
- { ----------------------------------------------------------------------
- Minimization of a function of 1 variable by Brent's method
- ----------------------------------------------------------------------
- Input parameters : Func = function to be minimized
- X1, X2 = two values near the minimum
- MaxIter = maximum number of iterations
- Tol = required precision
- ----------------------------------------------------------------------
- Output parameters : X_min = refined minimum coordinate
- F_min = function value at minimum
- ----------------------------------------------------------------------
- Possible results : MAT_OK
- NON_CONV
- ---------------------------------------------------------------------- }
- procedure NumGradient(Func : TFuncNVar;
- X : PVector;
- Lbound, Ubound : Integer;
- G : PVector);
- { ----------------------------------------------------------------------
- Computes the gradient vector of a function of several variables by
- numerical differentiation
- ----------------------------------------------------------------------
- Input parameters : Func = function of several variables
- X = vector of variables
- Lbound,
- Ubound = indices of first and last variables
- ----------------------------------------------------------------------
- Output parameter : G = gradient vector
- ---------------------------------------------------------------------- }
- procedure NumHessGrad(Func : TFuncNVar;
- X : PVector;
- Lbound, Ubound : Integer;
- G : PVector;
- H : PMatrix);
- { ----------------------------------------------------------------------
- Computes gradient vector & hessian matrix by numerical differentiation
- ----------------------------------------------------------------------
- Input parameters : as in NumGradient
- ----------------------------------------------------------------------
- Output parameters : G = gradient vector
- H = hessian matrix
- ---------------------------------------------------------------------- }
- function LinMin(Func : TFuncNVar;
- X, DeltaX : PVector;
- Lbound, Ubound : Integer;
- MaxIter : Integer;
- Tol : Float;
- var F_min : Float) : Integer;
- { ----------------------------------------------------------------------
- Minimizes function Func from point X in the direction specified by
- DeltaX
- ----------------------------------------------------------------------
- Input parameters : Func = objective function
- X = initial minimum coordinates
- DeltaX = direction in which minimum is searched
- Lbound,
- Ubound = indices of first and last variables
- MaxIter = maximum number of iterations
- Tol = required precision
- ----------------------------------------------------------------------
- Output parameters : X = refined minimum coordinates
- F_min = function value at minimum
- ----------------------------------------------------------------------
- Possible results : MAT_OK
- NON_CONV
- ---------------------------------------------------------------------- }
- function Marquardt(Func : TFuncNVar;
- HessGrad : THessGrad;
- X : PVector;
- Lbound, Ubound : Integer;
- MaxIter : Integer;
- Tol : Float;
- var F_min : Float;
- H_inv : PMatrix) : Integer;
- { ----------------------------------------------------------------------
- Minimization of a function of several variables by Marquardt's method
- ----------------------------------------------------------------------
- Input parameters : HessGrad = procedure to compute
- gradient and hessian
- Other parameters as in LinMin
- ----------------------------------------------------------------------
- Output parameters : X = refined minimum coordinates
- F_min = function value at minimum
- H_inv = inverse hessian matrix
- ----------------------------------------------------------------------
- Possible results : MAT_OK
- MAT_SINGUL
- BIG_LAMBDA
- NON_CONV
- ---------------------------------------------------------------------- }
- function BFGS(Func : TFuncNVar;
- Gradient : TGradient;
- X : PVector;
- Lbound, Ubound : Integer;
- MaxIter : Integer;
- Tol : Float;
- var F_min : Float;
- H_inv : PMatrix) : Integer;
- { ----------------------------------------------------------------------
- Minimization of a function of several variables by the
- Broyden-Fletcher-Goldfarb-Shanno method
- ----------------------------------------------------------------------
- Parameters : Gradient = procedure to compute gradient vector
- Other parameters as in Marquardt
- ----------------------------------------------------------------------
- Possible results : MAT_OK
- NON_CONV
- ---------------------------------------------------------------------- }
- function Simplex(Func : TFuncNVar;
- X : PVector;
- Lbound, Ubound : Integer;
- MaxIter : Integer;
- Tol : Float;
- var F_min : Float) : Integer;
- { ----------------------------------------------------------------------
- Minimization of a function of several variables by the simplex method
- of Nelder and Mead
- ----------------------------------------------------------------------
- Parameters : as in Marquardt
- ----------------------------------------------------------------------
- Possible results : MAT_OK
- NON_CONV
- ---------------------------------------------------------------------- }
- function SimAnn(Func : TFuncNVar;
- X : PVector;
- Lbound, Ubound : Integer;
- MaxIter : Integer;
- T_Ratio : Float;
- var F_min : Float) : Integer;
- { ----------------------------------------------------------------------
- Minimization of a function of several variables by simulated annealing
- ----------------------------------------------------------------------
- Input parameters : MaxIter = number of annealing steps
- T_Ratio = ratio of final to initial temperature
- Other parameters as in marquardt
- ----------------------------------------------------------------------
- Output parameter : F_min = function value
- ----------------------------------------------------------------------
- The function always returns 0
- ---------------------------------------------------------------------- }
- implementation
- var
- Eps : Float; { Fractional increment for numer. derivation }
- X1 : PVector; { Initial point for line minimization }
- DeltaX1 : PVector; { Direction for line minimization }
- Lbound1, Ubound1 : Integer; { Bounds of X1 and DeltaX1 }
- LinObjFunc : TFuncNVar; { Objective function for line minimization }
- LogFile : Text; { Log file }
- procedure MinBrak(Func : TFunc; var A, B, C : Float);
- { --------------------------------------------------------------------
- Procedure used by Brent's method to initially bracket the minimum.
- Given two points A and B, this routine finds
- a triplet of points (A < B < C) such that :
- Func(B) < Func(A) and Func(B) < Func(C)
- -------------------------------------------------------------------- }
- label 1;
- const
- GLIMIT = 100.0;
- TINY = 1.0E-20;
- var
- Fa, Fb, Fc, Ulim, U, R, Q, D, Fu : Float;
- begin
- Fa := Func(A);
- Fb := Func(B);
- if Fb > Fa then
- begin
- FSwap(A, B);
- FSwap(Fa, Fb);
- end;
- C := B + GOLD * (B - A);
- Fc := Func(C);
- 1: if Fb >= Fc then
- begin
- R := (B - A) * (Fb - Fc);
- Q := (B - C) * (Fb - Fa);
- D := Q - R;
- if Abs(D) < TINY then D := Sgn(D) * TINY;
- U := B - ((B - C) * Q - (B - A) * R) / (2.0 * D);
- Ulim := B + GLIMIT * (C - B);
- if (B - U) * (U - C) > 0.0 then
- begin
- Fu := Func(U);
- if Fu < Fc then
- begin
- A := B;
- Fa := Fb;
- B := U;
- Fb := Fu;
- goto 1
- end
- else if Fu > Fb then
- begin
- C := U;
- Fc := Fu;
- goto 1
- end;
- U := C + GOLD * (C - B);
- Fu := Func(U)
- end
- else if (C - U) * (U - Ulim) > 0.0 then
- begin
- Fu := Func(U);
- if Fu < Fc then
- begin
- B := C;
- C := U;
- U := C + GOLD * (C - B);
- Fb := Fc;
- Fc := Fu;
- Fu := Func(U)
- end
- end
- else if (U - Ulim) * (Ulim - C) >= 0.0 then
- begin
- U := Ulim;
- Fu := Func(U)
- end
- else
- begin
- U := C + GOLD * (C - B);
- Fu := Func(U)
- end;
- A := B;
- B := C;
- C := U;
- Fa := Fb;
- Fb := Fc;
- Fc := Fu;
- goto 1
- end
- end;
- function Brent(Func : TFunc;
- X1, X2 : Float;
- MaxIter : Integer;
- Tol : Float;
- var X_min, F_min : Float) : Integer;
- label 1, 2, 3;
- const
- TINY = 1.0E-10;
- var
- A, B, D, E, Etemp, Fu, Fv, Fw, Fx : Float;
- P, Q, R, Tol1, Tol2, U, V, W, X, X3, Xm : Float;
- Iter : Integer;
- begin
- MinBrak(Func, X1, X2, X3);
- if X1 < X3 then A := X1 else A := X3;
- if X1 > X3 then B := X1 else B := X3;
- V := X2;
- W := V;
- X := V;
- D := 0.0;
- E := 0.0;
- Fx := Func(X);
- Fv := Fx;
- Fw := Fx;
- for Iter := 1 to MaxIter do
- begin
- Xm := 0.5 * (A + B);
- Tol1 := Tol * Abs(X) + TINY;
- Tol2 := 2.0 * Tol1;
- if Abs(X - Xm) <= (Tol2 - 0.5 * (B - A)) then goto 3;
- if Abs(E) > Tol1 then
- begin
- R := (X - W) * (Fx - Fv);
- Q := (X - V) * (Fx - Fw);
- P := (X - V) * Q - (X - W) * R;
- Q := 2.0 * (Q - R);
- if Q > 0.0 then P := - P;
- Q := Abs(Q);
- Etemp := E;
- E := D;
- if (Abs(P) >= Abs(0.5 * Q * Etemp)) or (P <= Q * (A - X))
- or (P >= Q * (B - X)) then goto 1;
- D := P / Q;
- U := X + D;
- if ((U - A) < Tol2) or ((B - U) < Tol2) then
- D := Sgn(Xm - X) * Abs(Tol1);
- goto 2
- end;
- 1: if X >= Xm then E := A - X else E := B - X;
- D := CGOLD * E;
- 2: if Abs(D) >= Tol1 then U := X + D else U := X + Sgn(D) * Abs(Tol1);
- Fu := Func(U);
- if Fu <= Fx then
- begin
- if U >= X then A := X else B := X;
- V := W;
- Fv := Fw;
- W := X;
- Fw := Fx;
- X := U;
- Fx := Fu
- end
- else
- begin
- if U < X then A := U else B := U;
- if (Fu <= Fw) or (W = X) then
- begin
- V := W;
- Fv := Fw;
- W := U;
- Fw := Fu
- end
- else if (Fu <= Fv) or (V = X) or (V = 2) then
- begin
- V := U;
- Fv := Fu
- end
- end
- end;
- Brent := NON_CONV;
- 3: X_min := X;
- F_min := Fx;
- Brent := MAT_OK;
- end;
- {$F+}
- procedure NumGradient(Func : TFuncNVar;
- X : PVector;
- Lbound, Ubound : Integer;
- G : PVector);
- var
- Temp, Delta, Fplus, Fminus : Float;
- I : Integer;
- begin
- for I := Lbound to Ubound do
- begin
- Temp := X^[I];
- if Temp <> 0.0 then Delta := Eps * Abs(Temp) else Delta := Eps;
- X^[I] := Temp - Delta;
- Fminus := Func(X);
- X^[I] := Temp + Delta;
- Fplus := Func(X);
- G^[I] := (Fplus - Fminus) / (2.0 * Delta);
- X^[I] := Temp;
- end;
- end;
- {$F-}
- {$F+}
- procedure NumHessGrad(Func : TFuncNVar;
- X : PVector;
- Lbound, Ubound : Integer;
- G : PVector;
- H : PMatrix);
- var
- Delta, Xminus, Xplus, Fminus, Fplus : PVector;
- Temp1, Temp2, F, F2plus : Float;
- I, J : Integer;
- begin
- DimVector(Delta, Ubound); { Increments }
- DimVector(Xminus, Ubound); { X - Delta }
- DimVector(Xplus, Ubound); { X + Delta }
- DimVector(Fminus, Ubound); { F(X - Delta) }
- DimVector(Fplus, Ubound); { F(X + Delta) }
- F := Func(X);
- for I := Lbound to Ubound do
- begin
- if X^[I] <> 0.0 then
- Delta^[I] := Eps * Abs(X^[I])
- else
- Delta^[I] := Eps;
- Xplus^[I] := X^[I] + Delta^[I];
- Xminus^[I] := X^[I] - Delta^[I];
- end;
- for I := Lbound to Ubound do
- begin
- Temp1 := X^[I];
- X^[I] := Xminus^[I];
- Fminus^[I] := Func(X);
- X^[I] := Xplus^[I];
- Fplus^[I] := Func(X);
- X^[I] := Temp1;
- end;
- for I := Lbound to Ubound do
- begin
- G^[I] := (Fplus^[I] - Fminus^[I]) / (2.0 * Delta^[I]);
- H^[I]^[I] := (Fplus^[I] + Fminus^[I] - 2.0 * F) / Sqr(Delta^[I]);
- end;
- for I := Lbound to Pred(Ubound) do
- begin
- Temp1 := X^[I];
- X^[I] := Xplus^[I];
- for J := Succ(I) to Ubound do
- begin
- Temp2 := X^[J];
- X^[J] := Xplus^[J];
- F2plus := Func(X);
- H^[I]^[J] := (F2plus - Fplus^[I] - Fplus^[J] + F) /
- (Delta^[I] * Delta^[J]);
- H^[J]^[I] := H^[I]^[J];
- X^[J] := Temp2;
- end;
- X^[I] := Temp1;
- end;
- DelVector(Delta, Ubound);
- DelVector(Xminus, Ubound);
- DelVector(Xplus, Ubound);
- DelVector(Fminus, Ubound);
- DelVector(Fplus, Ubound);
- end;
- {$F-}
- {$F+}
- function F1dim(R : Float) : Float;
- { ----------------------------------------------------------------------
- Function used by LinMin to find the minimum of the objective function
- LinObjFunc in the direction specified by the global variables X1 and
- DeltaX1. R is the step in this direction.
- ---------------------------------------------------------------------- }
- const
- Xt : PVector = nil;
- var
- I : Integer;
- begin
- if Xt = nil then
- DimVector(Xt, Ubound1);
- for I := Lbound1 to Ubound1 do
- Xt^[I] := X1^[I] + R * DeltaX1^[I];
- F1dim := LinObjFunc(Xt);
- end;
- {$F-}
- function LinMin(Func : TFuncNVar;
- X, DeltaX : PVector;
- Lbound, Ubound : Integer;
- MaxIter : Integer;
- Tol : Float;
- var F_min : Float) : Integer;
- var
- I, ErrCode : Integer;
- R : Float;
- begin
- { Redimension global vectors }
- DelVector(X1, Ubound1);
- DelVector(DeltaX1, Ubound1);
- DimVector(X1, Ubound);
- DimVector(DeltaX1, Ubound);
- Lbound1 := Lbound;
- Ubound1 := Ubound;
- { Initialize global variables }
- LinObjFunc := Func;
- for I := Lbound to Ubound do
- begin
- X1^[I] := X^[I];
- DeltaX1^[I] := DeltaX^[I]
- end;
- { Call Brent's method }
- {$IFDEF FPK}
- ErrCode := Brent(@F1dim, 0.0, 1.0, MaxIter, Tol, R, F_min);
- {$ELSE}
- ErrCode := Brent(F1dim, 0.0, 1.0, MaxIter, Tol, R, F_min);
- {$ENDIF}
- { Update variables }
- if ErrCode = MAT_OK then
- for I := Lbound to Ubound do
- begin
- DeltaX^[I] := R * DeltaX^[I];
- X^[I] := X^[I] + DeltaX^[I];
- end;
- LinMin := ErrCode;
- end;
- function ParamConv(OldX, X : PVector;
- Lbound, Ubound : Integer;
- Tol : Float) : Boolean;
- { ----------------------------------------------------------------------
- Check for convergence on parameters
- ---------------------------------------------------------------------- }
- var
- I : Integer;
- Ref : Float;
- Conv : Boolean;
- begin
- I := Lbound;
- Conv := True;
- repeat
- if OldX^[I] <> 0.0 then Ref := Tol * Abs(OldX^[I]) else Ref := Tol;
- Conv := Conv and (Abs(X^[I] - OldX^[I]) < Ref);
- Inc(I);
- until (Conv = False) or (I > Ubound);
- ParamConv := Conv;
- end;
- procedure CreateLogFile;
- begin
- Assign(LogFile, LogFileName);
- Rewrite(LogFile);
- end;
- function Marquardt(Func : TFuncNVar;
- HessGrad : THessGrad;
- X : PVector;
- Lbound, Ubound : Integer;
- MaxIter : Integer;
- Tol : Float;
- var F_min : Float;
- H_inv : PMatrix) : Integer;
- const
- LAMBDA0 = 1.0E-2; { Initial lambda value }
- LAMBDAMAX = 1.0E+3; { Highest lambda value }
- FTOL = 1.0E-10; { Tolerance on function decrease }
- var
- Lambda,
- Lambda1 : Float; { Marquardt's lambda }
- I : Integer; { Loop variable }
- OldX : PVector; { Old parameters }
- G : PVector; { Gradient vector }
- H : PMatrix; { Hessian matrix }
- A : PMatrix; { Modified Hessian matrix }
- DeltaX : PVector; { New search direction }
- F1 : Float; { New minimum }
- Lambda_Ok : Boolean; { Successful Lambda decrease }
- Conv : Boolean; { Convergence reached }
- Done : Boolean; { Iterations done }
- Iter : Integer; { Iteration count }
- ErrCode : Integer; { Error code }
- begin
- if WriteLogFile then
- begin
- CreateLogFile;
- WriteLn(LogFile, 'Marquardt');
- WriteLn(LogFile, 'Iter F Lambda');
- end;
- Lambda := LAMBDA0;
- ErrCode := MAT_OK;
- DimVector(OldX, Ubound);
- DimVector(G, Ubound);
- DimMatrix(H, Ubound, Ubound);
- DimMatrix(A, Ubound, Ubound);
- DimVector(DeltaX, Ubound);
- F_min := Func(X); { Initial function value }
- LinObjFunc := Func; { Function for line minimization }
- Iter := 1;
- Conv := False;
- Done := False;
- repeat
- if WriteLogFile then
- WriteLn(LogFile, Iter:4, ' ', F_min:12, ' ', Lambda:12);
- { Save current parameters }
- CopyVector(OldX, X, Lbound, Ubound);
- { Compute Gradient and Hessian }
- HessGrad(Func, X, Lbound, Ubound, G, H);
- CopyMatrix(A, H, Lbound, Lbound, Ubound, Ubound);
- { Change sign of gradient }
- for I := Lbound to Ubound do
- G^[I] := - G^[I];
- if Conv then { Newton-Raphson iteration }
- begin
- ErrCode := GaussJordan(A, G, Lbound, Ubound, H_inv, DeltaX);
- if ErrCode = MAT_OK then
- for I := Lbound to Ubound do
- X^[I] := OldX^[I] + DeltaX^[I];
- Done := True;
- end
- else { Marquardt iteration }
- begin
- repeat
- { Multiply each diagonal term of H by (1 + Lambda) }
- Lambda1 := 1.0 + Lambda;
- for I := Lbound to Ubound do
- A^[I]^[I] := Lambda1 * H^[I]^[I];
- ErrCode := GaussJordan(A, G, Lbound, Ubound, H_inv, DeltaX);
- if ErrCode = MAT_OK then
- begin
- { Initialize parameters }
- CopyVector(X, OldX, Lbound, Ubound);
- { Minimize in the direction specified by DeltaX }
- ErrCode := LinMin(Func, X, DeltaX,
- Lbound, Ubound, 100, 0.01, F1);
- { Check that the function has decreased. Otherwise
- increase Lambda, without exceeding LAMBDAMAX }
- Lambda_Ok := (F1 - F_min) < F_min * FTOL;
- if not Lambda_Ok then Lambda := 10.0 * Lambda;
- if Lambda > LAMBDAMAX then ErrCode := BIG_LAMBDA;
- end;
- until Lambda_Ok or (ErrCode <> MAT_OK);
- { Check for convergence }
- Conv := ParamConv(OldX, X, Lbound, Ubound, Tol);
- { Prepare next iteration }
- Lambda := 0.1 * Lambda;
- F_min := F1;
- end;
- Inc(Iter);
- if Iter > MaxIter then ErrCode := NON_CONV;
- until Done or (ErrCode <> MAT_OK);
- DelVector(OldX, Ubound);
- DelVector(G, Ubound);
- DelMatrix(H, Ubound, Ubound);
- DelMatrix(A, Ubound, Ubound);
- DelVector(DeltaX, Ubound);
- if WriteLogFile then
- Close(LogFile);
- Marquardt := ErrCode;
- end;
- function BFGS(Func : TFuncNVar;
- Gradient : TGradient;
- X : PVector;
- Lbound, Ubound : Integer;
- MaxIter : Integer;
- Tol : Float;
- var F_min : Float;
- H_inv : PMatrix) : Integer;
- var
- I, J, Iter, ErrCode : Integer;
- F1, Fae, Fad, Fac : Float;
- OldX, DeltaX, G, OldG, dG, HdG : PVector;
- Conv : Boolean;
- begin
- if WriteLogFile then
- begin
- CreateLogFile;
- WriteLn(LogFile, 'BFGS');
- WriteLn(LogFile, 'Iter F');
- end;
- DimVector(OldX, Ubound);
- DimVector(DeltaX, Ubound);
- DimVector(G, Ubound);
- DimVector(OldG, Ubound);
- DimVector(dG, Ubound);
- DimVector(HdG, Ubound);
- Iter := 1;
- F1 := Func(X); { Initial function value }
- Gradient(Func, X, Lbound, Ubound, G); { Initial gradient }
- LinObjFunc := Func; { Function for line minimization }
- for I := Lbound to Ubound do
- begin
- { Initialize inverse hessian to unit matrix }
- for J := Lbound to Ubound do
- if I = J then H_inv^[I]^[J] := 1.0 else H_inv^[I]^[J] := 0.0;
- { Initial direction = opposite gradient }
- DeltaX^[I] := - G^[I];
- end;
- repeat
- if WriteLogFile then
- WriteLn(LogFile, Iter:4, ' ', F1:12);
- { Save current parameters }
- CopyVector(OldX, X, Lbound, Ubound);
- { Minimize along the direction specified by DeltaX }
- ErrCode := LinMin(Func, X, DeltaX, Lbound, Ubound, 100, 0.01, F_min);
- { Check for convergence on parameters }
- Conv := ParamConv(OldX, X, Lbound, Ubound, Tol);
- if not Conv then
- begin
- { Save old function and gradient and compute new ones }
- F1 := F_min;
- F_min := Func(X);
- CopyVector(OldG, G, Lbound, Ubound);
- Gradient(Func, X, Lbound, Ubound, G);
- { Compute difference of gradients }
- for I := Lbound to Ubound do
- dG^[I] := G^[I] - OldG^[I];
- { Multiply by inverse hessian }
- for I := Lbound to Ubound do
- begin
- HdG^[I] := 0.0;
- for J := Lbound to Ubound do
- HdG^[I] := HdG^[I] + H_inv^[I]^[J] * dG^[J];
- end;
- { Scalar products in denominator of BFGS formula }
- Fac := 0.0; Fae := 0.0;
- for I := Lbound to Ubound do
- begin
- Fac := Fac + dG^[I] * DeltaX^[I];
- Fae := Fae + dG^[I] * HdG^[I];
- end;
- { Apply BFGS correction }
- Fac := 1.0 / Fac;
- Fad := 1.0 / Fae;
- for I := Lbound to Ubound do
- dG^[I] := Fac * DeltaX^[I] - Fad * HdG^[I];
- for I := Lbound to Ubound do
- for J := Lbound to Ubound do
- H_inv^[I]^[J] := H_inv^[I]^[J] + Fac * DeltaX^[I] * DeltaX^[J]
- - Fad * HdG^[I] * HdG^[J]
- + Fae * dG^[I] * dG^[J];
- { Compute new search direction }
- for I := Lbound to Ubound do
- begin
- DeltaX^[I] := 0.0;
- for J := Lbound to Ubound do
- DeltaX^[I] := DeltaX^[I] - H_inv^[I]^[J] * G^[J];
- end
- end;
- Inc(Iter);
- until Conv or (Iter > MaxIter);
- DelVector(OldX, Ubound);
- DelVector(DeltaX, Ubound);
- DelVector(G, Ubound);
- DelVector(OldG, Ubound);
- DelVector(dG, Ubound);
- DelVector(HdG, Ubound);
- if WriteLogFile then
- Close(LogFile);
- if Iter > MaxIter then
- BFGS := NON_CONV
- else
- BFGS := MAT_OK;
- end;
- function Simplex(Func : TFuncNVar;
- X : PVector;
- Lbound, Ubound : Integer;
- MaxIter : Integer;
- Tol : Float;
- var F_min : Float) : Integer;
- const
- STEP = 1.50; { Step used to construct the initial simplex }
- var
- P : PMatrix; { Simplex coordinates }
- F : PVector; { Function values }
- Pbar : PVector; { Centroid coordinates }
- Pstar, P2star : PVector; { New vertices }
- Ystar, Y2star : Float; { New function values }
- F0 : Float; { Function value at minimum }
- N : Integer; { Number of parameters }
- M : Integer; { Index of last vertex }
- L, H : Integer; { Vertices with lowest & highest F values }
- I, J : Integer; { Loop variables }
- Iter : Integer; { Iteration count }
- Corr, MaxCorr : Float; { Corrections }
- Sum : Float;
- Flag : Boolean;
- procedure UpdateSimplex(Y : Float; Q : PVector);
- { Update "worst" vertex and function value }
- begin
- F^[H] := Y;
- CopyVector(P^[H], Q, Lbound, Ubound);
- end;
- begin
- if WriteLogFile then
- begin
- CreateLogFile;
- WriteLn(LogFile, 'Simplex');
- WriteLn(LogFile, 'Iter F');
- end;
- N := Ubound - Lbound + 1;
- M := Succ(Ubound);
- DimMatrix(P, M, Ubound);
- DimVector(F, M);
- DimVector(Pbar, Ubound);
- DimVector(Pstar, Ubound);
- DimVector(P2star, Ubound);
- Iter := 1;
- F0 := MAXNUM;
- { Construct initial simplex }
- for I := Lbound to M do
- CopyVector(P^[I], X, Lbound, Ubound);
- for I := Lbound to Ubound do
- P^[I]^[I] := P^[I]^[I] * STEP;
- { Evaluate function at each vertex }
- for I := Lbound to M do
- F^[I] := Func(P^[I]);
- repeat
- { Find vertices (L,H) having the lowest and highest
- function values, i.e. "best" and "worst" vertices }
- L := Lbound;
- H := Lbound;
- for I := Succ(Lbound) to M do
- if F^[I] < F^[L] then
- L := I
- else if F^[I] > F^[H] then
- H := I;
- if F^[L] < F0 then
- F0 := F^[L];
- if WriteLogFile then
- WriteLn(LogFile, Iter:4, ' ', F0:12);
- { Find centroid of points other than P(H) }
- for J := Lbound to Ubound do
- begin
- Sum := 0.0;
- for I := Lbound to M do
- if I <> H then Sum := Sum + P^[I]^[J];
- Pbar^[J] := Sum / N;
- end;
- { Reflect worst vertex through centroid }
- for J := Lbound to Ubound do
- Pstar^[J] := 2.0 * Pbar^[J] - P^[H]^[J];
- Ystar := Func(Pstar);
- { If reflection successful, try extension }
- if Ystar < F^[L] then
- begin
- for J := Lbound to Ubound do
- P2star^[J] := 3.0 * Pstar^[J] - 2.0 * Pbar^[J];
- Y2star := Func(P2star);
- { Retain extension or contraction }
- if Y2star < F^[L] then
- UpdateSimplex(Y2star, P2star)
- else
- UpdateSimplex(Ystar, Pstar);
- end
- else
- begin
- I := Lbound;
- Flag := False;
- repeat
- if (I <> H) and (F^[I] > Ystar) then Flag := True;
- Inc(I);
- until Flag or (I > M);
- if Flag then
- UpdateSimplex(Ystar, Pstar)
- else
- begin
- { Contraction on the reflection side of the centroid }
- if Ystar <= F^[H] then
- UpdateSimplex(Ystar, Pstar);
- { Contraction on the opposite side of the centroid }
- for J := Lbound to Ubound do
- P2star^[J] := 0.5 * (P^[H]^[J] + Pbar^[J]);
- Y2star := Func(P2star);
- if Y2star <= F^[H] then
- UpdateSimplex(Y2star, P2star)
- else
- { Contract whole simplex }
- for I := Lbound to M do
- for J := Lbound to Ubound do
- P^[I]^[J] := 0.5 * (P^[I]^[J] + P^[L]^[J]);
- end;
- end;
- { Test convergence }
- MaxCorr := 0.0;
- for J := Lbound to Ubound do
- begin
- Corr := Abs(P^[H]^[J] - P^[L]^[J]);
- if Corr > MaxCorr then MaxCorr := Corr;
- end;
- Inc(Iter);
- until (MaxCorr < Tol) or (Iter > MaxIter);
- CopyVector(X, P^[L], Lbound, Ubound);
- F_min := F^[L];
- DelMatrix(P, M, Ubound);
- DelVector(F, M);
- DelVector(Pbar, Ubound);
- DelVector(Pstar, Ubound);
- DelVector(P2star, Ubound);
- if WriteLogFile then
- Close(LogFile);
- if Iter > MaxIter then
- Simplex := NON_CONV
- else
- Simplex := MAT_OK;
- end;
- procedure GenRandomVector(Cv, MinSigma : Float;
- X, X1 : PVector;
- Lbound, Ubound : Integer);
- { ----------------------------------------------------------------------
- Generates a random vector X1 from X
- ---------------------------------------------------------------------- }
- var
- I : Integer;
- Sigma : Float; { S.D. of gaussian random numbers }
- begin
- for I := Lbound to Ubound do
- begin
- Sigma := (Cv * Abs(X^[I]) + MinSigma);
- X1^[I] := RanGauss(X^[I], Sigma);
- end;
- end;
- function InitTemp(Func : TFuncNVar;
- X : PVector;
- Lbound, Ubound : Integer;
- var F : Float) : Float;
- { ----------------------------------------------------------------------
- Computes the initial temperature so that the probability
- of accepting an increase of the function is about 0.5
- ---------------------------------------------------------------------- }
- const
- N_MAX = 50; { Max number of function evaluations }
- N_INC = 10; { Max number of function increases }
- var
- T : Float; { Temperature }
- X1 : PVector; { New coordinates }
- F1 : Float; { New function value }
- F_inc : PVector; { Function increases }
- N : Integer; { Counts function evaluations }
- K : Integer; { Counts function increases }
- begin
- DimVector(X1, Ubound);
- DimVector(F_inc, N_INC);
- K := 0;
- N := 0;
- T := 0.0;
- F := Func(X);
- { Compute K (<= N_INC) increases of the function }
- repeat
- Inc(N);
- GenRandomVector(SA_Cv, SA_MinSigma, X, X1, Lbound, Ubound);
- F1 := Func(X1);
- if F1 > F then
- begin
- Inc(K);
- F_inc^[K] := F1 - F;
- end;
- until (K = N_INC) or (N = N_MAX);
- { The median M of these K increases has a probability of 1/2.
- From Boltzmann's formula: Exp(-M/T) = 1/2 ==> T = M / Ln(2) }
- T := Median(F_inc, 1, K) / LN2;
- if T = 0.0 then T := 1.0;
- InitTemp := T;
- DelVector(X1, Ubound);
- DelVector(F_inc, N_INC);
- end;
- function Accept(DeltaF, T : Float;
- var N_inc, N_acc : Integer) : Boolean;
- { ----------------------------------------------------------------------
- Checks if a variation DeltaF of the function at temperature T is
- acceptable. Updates the counters N_inc (number of increases of the
- function) and N_acc (number of accepted increases).
- ---------------------------------------------------------------------- }
- begin
- if DeltaF < 0.0 then
- Accept := True
- else
- begin
- Inc(N_inc);
- if Exp(- DeltaF / T) > RanMar then
- begin
- Accept := True;
- Inc(N_acc);
- end
- else
- Accept := False;
- end;
- end;
- procedure SimAnnCycle(Func : TFuncNVar;
- X : PVector;
- Lbound, Ubound : Integer;
- MaxIter : Integer;
- T_Ratio : Float;
- var LogFile : Text;
- var F_min : Float);
- { ----------------------------------------------------------------------
- Performs one cycle of simulated annealing
- ---------------------------------------------------------------------- }
- var
- T0, T : Float; { Temperature }
- Rt : Float; { Temperature reduction factor }
- Rs : Float; { S.D. reduction factor }
- MinSigma : Float; { Minimal S.D. }
- Cv : Float; { Coef. of variation }
- F, F1 : Float; { Function values }
- DeltaF : Float; { Variation of function }
- X1 : PVector; { New coordinates }
- K : Integer; { Loop variable }
- Iter : Integer; { Iteration count }
- N_inc : Integer; { Nb of function increases }
- N_acc : Integer; { Nb of accepted increases }
- begin
- { Dimension array }
- DimVector(X1, Ubound);
- { Compute initial temperature and reduction factors }
- T0 := InitTemp(Func, X, Lbound, Ubound, F);
- Rt := Exp(Ln(T_Ratio) / MaxIter);
- Rs := Exp(Ln(SA_SigmaFact) / MaxIter);
- T := T0;
- Iter := 0;
- Cv := SA_Cv;
- MinSigma := SA_MinSigma;
- repeat
- N_inc := 0;
- N_acc := 0;
- { Perform SA_Nt evaluations at constant temperature }
- for K := 1 to SA_Nt do
- begin
- { Generate new point }
- GenRandomVector(Cv, MinSigma, X, X1, Lbound, Ubound);
- { Compute new function value }
- F1 := Func(X1);
- DeltaF := F1 - F;
- { Check for acceptance }
- if Accept(DeltaF, T, N_inc, N_acc) then
- begin
- CopyVector(X, X1, Lbound, Ubound);
- F := F1;
- end;
- end;
- if WriteLogFile then
- WriteLn(LogFile, Iter:4, ' ', T:12, ' ', F:12, N_inc:6, N_acc:6);
- { Update temperature, S.D. factors, and iteration count }
- T := T * Rt;
- Cv := Cv * Rs;
- MinSigma := MinSigma * Rs;
- Inc(Iter);
- until Iter > MaxIter;
- F_min := F;
- DelVector(X1, Ubound);
- end;
- function SimAnn(Func : TFuncNVar;
- X : PVector;
- Lbound, Ubound : Integer;
- MaxIter : Integer;
- T_Ratio : Float;
- var F_min : Float) : Integer;
- var
- K : Integer;
- begin
- if WriteLogFile then
- CreateLogFile;
- { Initialize the Marsaglia random number generator
- using the standard Pascal generator }
- Randomize;
- RMarIn(System.Random(10000), System.Random(10000));
- for K := 1 to SA_NCycles do
- begin
- if WriteLogFile then
- WriteLn(LogFile, 'Simulated annealing: Cycle ', K, #10,
- 'Iter T F Inc Acc');
- SimAnnCycle(Func, X, Lbound, Ubound, MaxIter, T_Ratio, LogFile, F_min);
- end;
- if WriteLogFile then
- Close(LogFile);
- SimAnn := 0;
- end;
- begin
- X1 := nil;
- DeltaX1 := nil;
- Ubound1 := 1;
- Eps := Pow(MACHEP, 0.333);
- end.