Package minfx :: Module ncg
[hide private]
[frames] | no frames]

Source Code for Module minfx.ncg

  1  ############################################################################### 
  2  #                                                                             # 
  3  # Copyright (C) 2003-2013 Edward d'Auvergne                                   # 
  4  #                                                                             # 
  5  # This file is part of the minfx optimisation library,                        # 
  6  # https://gna.org/projects/minfx                                              # 
  7  #                                                                             # 
  8  # This program is free software: you can redistribute it and/or modify        # 
  9  # it under the terms of the GNU General Public License as published by        # 
 10  # the Free Software Foundation, either version 3 of the License, or           # 
 11  # (at your option) any later version.                                         # 
 12  #                                                                             # 
 13  # This program is distributed in the hope that it will be useful,             # 
 14  # but WITHOUT ANY WARRANTY; without even the implied warranty of              # 
 15  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the               # 
 16  # GNU General Public License for more details.                                # 
 17  #                                                                             # 
 18  # You should have received a copy of the GNU General Public License           # 
 19  # along with this program.  If not, see <http://www.gnu.org/licenses/>.       # 
 20  #                                                                             # 
 21  ############################################################################### 
 22   
 23  # Module docstring. 
 24  """Line search Newton conjugate gradient optimization. 
 25   
 26  This file is part of the minfx optimisation library at U{https://gna.org/projects/minfx}. 
 27  This file is part of the U{minfx optimisation library<https://gna.org/projects/minfx>}. 
 28  """ 
 29   
 30  # Python module imports. 
 31  from numpy import dot, float64, sqrt, zeros 
 32   
 33  # Minfx module imports. 
 34  from minfx.base_classes import Line_search, Min 
 35   
 36   
37 -def ncg(func=None, dfunc=None, d2func=None, args=(), x0=None, min_options=None, func_tol=1e-25, grad_tol=None, maxiter=1e6, a0=1.0, mu=0.0001, eta=0.9, full_output=0, print_flag=0, print_prefix=""):
38 """Line search Newton conjugate gradient algorithm. 39 40 Page 140 from 'Numerical Optimization' by Jorge Nocedal and Stephen J. Wright, 1999, 2nd ed. The algorithm is: 41 42 - Given initial point x0. 43 - while 1: 44 - Compute a search direction pk by applying the CG method to Hk.pk = -gk, starting from x0 = 0. Terminate when ||rk|| <= min(0.5,sqrt(||gk||)), or if negative curvature is encountered. 45 - Set xk+1 = xk + ak.pk, where ak satisfies the Wolfe, Goldstein, or Armijo backtracking conditions. 46 """ 47 48 if print_flag: 49 if print_flag >= 2: 50 print(print_prefix) 51 print(print_prefix) 52 print(print_prefix + "Newton Conjugate Gradient minimisation") 53 print(print_prefix + "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") 54 min = Ncg(func, dfunc, d2func, args, x0, min_options, func_tol, grad_tol, maxiter, a0, mu, eta, full_output, print_flag, print_prefix) 55 if min.init_failure: 56 print(print_prefix + "Initialisation of minimisation has failed.") 57 return None 58 results = min.minimise() 59 return results
60 61
62 -class Ncg(Line_search, Min):
63 - def __init__(self, func, dfunc, d2func, args, x0, min_options, func_tol, grad_tol, maxiter, a0, mu, eta, full_output, print_flag, print_prefix):
64 """Class for newton conjugate gradient minimisation specific functions. 65 66 Unless you know what you are doing, you should call the function 'ncg' rather than using this class. 67 """ 68 69 # Function arguments. 70 self.func = func 71 self.dfunc = dfunc 72 self.d2func = d2func 73 self.args = args 74 self.xk = x0 75 self.func_tol = func_tol 76 self.grad_tol = grad_tol 77 self.maxiter = maxiter 78 self.full_output = full_output 79 self.print_flag = print_flag 80 self.print_prefix = print_prefix 81 82 # Set a0. 83 self.a0 = a0 84 85 # Line search constants for the Wolfe conditions. 86 self.mu = mu 87 self.eta = eta 88 89 # Initialisation failure flag. 90 self.init_failure = 0 91 92 # Setup the line search options and algorithm. 93 self.line_search_options(min_options) 94 self.setup_line_search() 95 96 # Initialise the function, gradient, and Hessian evaluation counters. 97 self.f_count = 0 98 self.g_count = 0 99 self.h_count = 0 100 101 # Initialise the warning string. 102 self.warning = None 103 104 # Set the convergence test function. 105 self.setup_conv_tests() 106 107 # Calculate the initial Newton function value, gradient vector, and Hessian matrix. 108 self.fk, self.f_count = self.func(*(self.xk,)+self.args), self.f_count + 1 109 self.dfk, self.g_count = self.dfunc(*(self.xk,)+self.args), self.g_count + 1 110 self.d2fk, self.h_count = self.d2func(*(self.xk,)+self.args), self.h_count + 1
111 112
113 - def get_pk(self):
114 """The CG algorithm.""" 115 116 # Initial values at i = 0. 117 xi = zeros(len(self.xk), float64) 118 ri = self.dfk * 1.0 119 pi = -ri 120 dot_ri = dot(ri, ri) 121 len_g = sqrt(dot_ri) 122 residual_test = min(0.5 * len_g, dot_ri) 123 #residual_test = min(0.5, sqrt(len_g)) * len_g 124 125 # Debugging. 126 if self.print_flag >= 2: 127 print(self.print_prefix + "Initial data:") 128 print(self.print_prefix + "\tx0: " + repr(xi)) 129 print(self.print_prefix + "\tr0: " + repr(ri)) 130 print(self.print_prefix + "\tp0: " + repr(pi)) 131 132 i = 0 133 while True: 134 # Matrix product and curvature. 135 Api = dot(self.d2fk, pi) 136 curv = dot(pi, Api) 137 138 # Negative curvature test. 139 if curv <= 0.0: 140 if i == 0: 141 if dot_ri == 0.0: 142 return xi 143 else: 144 ai = dot_ri / curv 145 return xi + ai*pi 146 else: 147 return xi 148 if sqrt(dot_ri) <= residual_test: 149 return xi 150 151 # Conjugate gradient part 152 ai = dot_ri / curv 153 xi_new = xi + ai*pi 154 ri_new = ri + ai*Api 155 dot_ri_new = dot(ri_new, ri_new) 156 bi_new = dot_ri_new / dot_ri 157 pi_new = -ri_new + bi_new*pi 158 159 # Debugging. 160 if self.print_flag >= 2: 161 print("") 162 print(self.print_prefix + "Iteration i = " + repr(i)) 163 print(self.print_prefix + "Api: " + repr(Api)) 164 print(self.print_prefix + "Curv: " + repr(curv)) 165 print(self.print_prefix + "ai: " + repr(ai)) 166 print(self.print_prefix + "xi+1: " + repr(xi_new)) 167 print(self.print_prefix + "ri+1: " + repr(ri_new)) 168 print(self.print_prefix + "bi+1: " + repr(bi_new)) 169 print(self.print_prefix + "pi+1: " + repr(pi_new)) 170 171 # Update i+1 to i. 172 xi = xi_new * 1.0 173 ri = ri_new * 1.0 174 pi = pi_new * 1.0 175 dot_ri = dot_ri_new 176 i = i + 1
177 178
179 - def new_param_func(self):
180 """The new parameter function. 181 182 Find the search direction, do a line search, and get xk+1 and fk+1. 183 """ 184 185 # Get the pk vector. 186 self.pk = self.get_pk() 187 188 # Line search. 189 self.line_search() 190 191 # Find the new parameter vector and function value at that point. 192 self.xk_new = self.xk + self.alpha * self.pk 193 self.fk_new, self.f_count = self.func(*(self.xk_new,)+self.args), self.f_count + 1 194 self.dfk_new, self.g_count = self.dfunc(*(self.xk_new,)+self.args), self.g_count + 1
195 196
197 - def update(self):
198 """Function to update the function value, gradient vector, and Hessian matrix.""" 199 200 self.xk = self.xk_new * 1.0 201 self.fk = self.fk_new 202 self.dfk = self.dfk_new * 1.0 203 self.d2fk, self.h_count = self.d2func(*(self.xk,)+self.args), self.h_count + 1
204