The following program uses the Broyden-Fletcher-Goldfarb-Shanno (BFGS) method to find the minimum of a function. This method is a quasi-Newton method. That is, the BFGS method is based on Newton's method but performs different calculations to update the guess refinements.
The program prompts you to enter for each variable (i.e. dimension):
1. The maximum number of iterations.
2. The tolerance for the minimized function,
3. The tolerance for the gradient.
4. The initial guesses for the optimum point for each variable.
5. The tolerance for the guess refinement for each variable.
The program displays intermediate values for the function and the variables. The program displays the following final results:
1. The coordinates of the minimum point.
2. The minimum function value.
3. The number of iterations
The current code finds the minimum for the following function:
f(x1,x2) = x1 - x2 + 2 * x1 ^ 2 + 2 * x1 * x2 + x2 ^ 2
Here is a sample session to solve for the optimum of the above function:
Here is the BASIC listing:
! Broyden-Fletcher-Goldfarb-Shanno Method (BFGS)
OPTION TYPO
OPTION NOLET
DECLARE NUMERIC MAX_VARS, N, I, EPSF, EPSG, T1, T2, F
DECLARE NUMERIC fNorm, Lambda, Iter, MaxIter
DECLARE NUMERIC bStop, bTrue, bFalse, boolRes
Dim X(1), Toler(1)
Dim grad1(1,1), grad2(1,1), g(1,1), d(1,1), S(1,1)
Dim Bmat(1,1), Mmat(1,1), Nmat(1,1)
Dim MM1(1,1), MM2(1,1), MM3(1,1), MM4(1,1)
Dim MM5(1,1), MM6(1,1), MM7(1,1), MM8(1,1)
Dim MM9(1,1), MM10(1,1), MM11(1,1)
bTrue = 1
bFalse = 0
SUB CalcNorm(X(,), N, FNorm)
LOCAL I
FNorm = 0
For I = 1 To N
FNorm = FNorm + X(I,1)^2
Next I
FNorm = Sqr(FNorm)
END SUB
SUB MyFx(X(), N, Res)
! Res = 100 * (X(1) ^ 2 - X(2)) ^ 2 + (1 - X(1)) ^ 2
Res = X(1) - X(2) + 2 * X(1) ^ 2 + 2 * X(1) * X(2) + X(2) ^ 2
End SUB
SUB FirstDeriv(N, X(), iVar, funRes)
LOCAL Xt, h, Fp, Fm
Xt = X(iVar)
h = 0.01 * (1 + Abs(Xt))
X(iVar) = Xt + h
CALL MyFx(X, N, Fp)
X(iVar) = Xt - h
CALL MyFx(X, N, Fm)
X(iVar) = Xt
funRes = (Fp - Fm) / 2 / h
End SUB
Sub GetFirstDerives(N, X(), FirstDerivX(,))
LOCAL I
For I = 1 To N
CALL FirstDeriv(N, X, I, FirstDerivX(I,1))
Next I
End Sub
SUB MyFxEx(N, X(), DeltaX(,), Lambda, funRes)
LOCAL I, XX(1)
MAT REDIM XX(N)
For I = 1 To N
XX(I) = X(I) + Lambda * DeltaX(I,1)
Next I
CALL MyFx(XX, N, funRes)
End SUB
SUB LinSearch_DirectSearch(X(), N, Lambda, DeltaX(,), InitStep, MinStep, boolRes)
LOCAL F1, F2
CALL MyFxEx(N, X, DeltaX, Lambda, F1)
Do
CALL MyFxEx(N, X, DeltaX, Lambda + InitStep, F2)
If F2 < F1 Then
F1 = F2
Lambda = Lambda + InitStep
Else
CALL MyFxEx(N, X, DeltaX, Lambda - InitStep, F2)
If F2 < F1 Then
F1 = F2
Lambda = Lambda - InitStep
Else
! reduce search step size
InitStep = InitStep / 10
End If
End If
Loop Until InitStep < MinStep
boolRes = bTrue
End SUB
! Broyden-Fletcher-Goldfarb-Shanno Method (BFGS)
MAX_VARS = 2
MAT REDIM Toler(MAX_VARS)
MAT REDIM X(MAX_VARS)
MAT REDIM grad1(MAX_VARS,1)
MAT REDIM grad2(MAX_VARS,1)
MAT REDIM g(MAX_VARS,1)
MAT REDIM d(MAX_VARS,1)
MAT REDIM S(MAX_VARS,1)
MAT REDIM Bmat(MAX_VARS, MAX_VARS)
MAT REDIM Mmat(MAX_VARS, MAX_VARS)
MAT REDIM Nmat(MAX_VARS, MAX_VARS)
MAT REDIM MM1(MAX_VARS, MAX_VARS)
MAT REDIM MM2(MAX_VARS, MAX_VARS)
MAT REDIM MM3(MAX_VARS, MAX_VARS)
MAT REDIM MM4(MAX_VARS, MAX_VARS)
MAT REDIM MM5(MAX_VARS, MAX_VARS)
MAT REDIM MM6(MAX_VARS, MAX_VARS)
MAT REDIM MM7(MAX_VARS, MAX_VARS)
MAT REDIM MM8(MAX_VARS, MAX_VARS)
MAT REDIM MM9(MAX_VARS, MAX_VARS)
MAT REDIM MM10(MAX_VARS, MAX_VARS)
MAT REDIM MM11(MAX_VARS, MAX_VARS)
N = MAX_VARS
PRINT "Broyden-Fletcher-Goldfarb-Shanno Method (BFGS)"
INPUT PROMPT "Enter maximum number of iterations? ": MaxIter
INPUT PROMPT "Enter function tolerance? ": EPSF
INPUT PROMPT "Enter gradient tolerance? ": EPSG
N = MAX_VARS
For I = 1 To N
PRINT "Enter value for X(";I;")";
INPUT X(I)
PRINT "Enter tolerance value for X(";I;")";
INPUT Toler(I)
Next I
! set matrix B as an indentity matrix
MAT Bmat = IDN(MAX_VARS)
Iter = 0
! calculate initial gradient
CALL GetFirstDerives(N, X, grad1)
! start main loop
Do
Iter = Iter + 1
If Iter > MaxIter Then
PRINT "Reached iteration limits"
Exit Do
End If
! calcuate vector S() and reverse its sign
MAT S = Bmat * grad1
MAT S = (-1) * S
! test if gradient is shallow enough
CALL CalcNorm(S, N, fNorm)
MAT S = (1/fNorm) * S ! objML.NormalizeVect S
Lambda = 0.1
CALL LinSearch_DirectSearch(X, N, Lambda, S, 0.1, 0.00001, boolRes)
! calculate optimum X()
MAT d = (Lambda) * S
For I = 1 To N
X(I) = X(I) + d(I,1)
Next I
! get new gradient
CALL GetFirstDerives(N, X, grad2)
MAT g = grad2 - grad1
MAT grad1 = grad2
! test for convergence
bStop = bTrue
For I = 1 To N
If Abs(d(I,1)) > Toler(I) Then
bStop = bFalse
Exit For
End If
Next I
If bStop = bTrue Then
PRINT "Exit due to values of Lambda * S()"
PRINT "Lamda="; Lambda
PRINT "Array S is:"
For I = 1 To N
PRINT "S(";I;")=";S(I,1);" ";
Next I
PRINT
Exit Do
End If
! exit if gradient is low
CALL CalcNorm(g, N, fNorm)
If fNorm < EPSG Then
PRINT "Exit due to G Norm"
PRINT "Norm=";fNorm
Exit Do
End If
!-------------------------------------------------
! Start elaborate process to upgare matrix B
!
! MM1 = g as column matrix
MAT MM1 = g
! MM2 = d as column matrix
MAT MM2 = d
! MM3 = g^T
MAT MM3 = TRN(g)
! MM4 = d^T
MAT MM4 = TRN(d)
! MM5 = d d^T
MAT MM5 = MM2 * MM4
! MM6 = d^T g
MAT MM6 = MM4 * MM1
! MM7 = d g^T
MAT MM7 = MM2 * MM3
! MM8 = g d^T
MAT MM8 = MM1 * MM4
!objML.MatMultMat MM3, Bmat, MM9
MAT MM9 = MM3 * Bmat
! MM9 = g^T [B] g
MAT MM9 = MM1 * MM9
! MM10 = d g^T [B]
MAT MM10 = MM7 * Bmat
! MM11 = [B] g d^T
MAT MM11 = Bmat * MM8
T1 = MM6(1, 1) ! d^T g
T2 = (1 + MM9(1, 1) / T1) / T1
MAT MM5 = (T2) * MM5
MAT MM10 = (1/T1) * MM10
MAT MM11 = (1/T2) * MM11
MAT Bmat = Bmat + MM5
MAT Bmat = Bmat - MM10
MAT Bmat = Bmat - MM11
CALL MyFx(X, N, F)
PRINT "F = ";F;" ";
For I = 1 To N
PRINT "X(";I;")=";X(I);" ";
PRINT "g(";I;")=";g(I,1);" ";
Next I
PRINT
Loop
PRINT "********FINAL RESULTS**********"
For I = 1 To N
PRINT "X(";I;")=";X(I)
Next I
For I = 1 To N
PRINT "Gradient(";I;")=";g(I,1)
Next I
CALL MyFx(X, N, F)
PRINT "Function value = ";F
PRINT "Iterations = ";Iter
END
Copyright (c) Namir Shammas. All rights reserved.