Package minfx :: Module base_classes
[hide private]
[frames] | no frames]

Source Code for Module minfx.base_classes

  1  ############################################################################### 
  2  #                                                                             # 
  3  # Copyright (C) 2003-2013 Edward d'Auvergne                                   # 
  4  #                                                                             # 
  5  # This file is part of the minfx optimisation library,                        # 
  6  # https://sourceforge.net/projects/minfx                                      # 
  7  #                                                                             # 
  8  # This program is free software: you can redistribute it and/or modify        # 
  9  # it under the terms of the GNU General Public License as published by        # 
 10  # the Free Software Foundation, either version 3 of the License, or           # 
 11  # (at your option) any later version.                                         # 
 12  #                                                                             # 
 13  # This program is distributed in the hope that it will be useful,             # 
 14  # but WITHOUT ANY WARRANTY; without even the implied warranty of              # 
 15  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the               # 
 16  # GNU General Public License for more details.                                # 
 17  #                                                                             # 
 18  # You should have received a copy of the GNU General Public License           # 
 19  # along with this program.  If not, see <http://www.gnu.org/licenses/>.       # 
 20  #                                                                             # 
 21  ############################################################################### 
 22   
 23  # Module docstring. 
 24  """Bases classes for the U{minfx optimisation library<https://sourceforge.net/projects/minfx>}. 
 25   
 26  This module contains the following base classes: 
 27      - Min:                The base class containing the main iterative minimisation loop and a few other base class functions. 
 28      - Line_search:        The base class containing the generic line search functions. 
 29      - Trust_region:       The base class containing the generic trust-region functions. 
 30      - Conjugate_gradient: The base class containing the generic conjugate gradient functions. 
 31  """ 
 32   
 33  # Inbuilt python modules. 
 34  ######################### 
 35   
 36  from numpy import dot, sqrt 
 37  from numpy.linalg import inv, LinAlgError 
 38  from re import match 
 39  import sys 
 40   
 41   
 42  # Line search functions. 
 43  ######################## 
 44   
 45  from minfx.line_search.backtrack import backtrack 
 46  from minfx.line_search.nocedal_wright_interpol import nocedal_wright_interpol 
 47  from minfx.line_search.nocedal_wright_wolfe import nocedal_wright_wolfe 
 48  from minfx.line_search.more_thuente import more_thuente 
 49   
 50   
 51  # Hessian modifications. 
 52  ######################## 
 53   
 54  from minfx.hessian_mods.cholesky_mod import cholesky_mod 
 55  from minfx.hessian_mods.eigenvalue import eigenvalue 
 56  from minfx.hessian_mods.gmw81 import gmw 
 57  from minfx.hessian_mods.gmw81_old import gmw_old 
 58  from minfx.hessian_mods.se99 import se99 
 59   
 60   
 90   
 91   
 92  # The generic minimisation base class (containing the main iterative loop). 
 93  ########################################################################### 
 94   
95 -class Min:
96 - def __init__(self):
97 """Base class containing the main minimisation iterative loop algorithm. 98 99 The algorithm is defined in the minimise function. Also supplied are generic setup, convergence tests, and update functions. 100 """
101 102
103 - def double_test(self, fk_new, fk, gk):
104 """Default base class function for both function and gradient convergence tests. 105 106 Test if the minimum function tolerance between fk and fk+1 has been reached as well as if the minimum gradient tolerance has been reached. 107 """ 108 109 # Test the function tolerance. 110 if abs(fk_new - fk) <= self.func_tol: 111 if self.print_flag >= 2: 112 print("\n" + self.print_prefix + "Function tolerance reached.") 113 print(self.print_prefix + "fk: " + repr(fk)) 114 print(self.print_prefix + "fk+1: " + repr(fk_new)) 115 print(self.print_prefix + "|fk+1 - fk|: " + repr(abs(fk_new - fk))) 116 print(self.print_prefix + "tol: " + repr(self.func_tol)) 117 return 1 118 119 # Test the gradient tolerance. 120 elif sqrt(dot(gk, gk)) <= self.grad_tol: 121 if self.print_flag >= 2: 122 print("\n" + self.print_prefix + "Gradient tolerance reached.") 123 print(self.print_prefix + "gk+1: " + repr(gk)) 124 print(self.print_prefix + "||gk+1||: " + repr(sqrt(dot(gk, gk)))) 125 print(self.print_prefix + "tol: " + repr(self.grad_tol)) 126 return 1
127 128
129 - def func_test(self, fk_new, fk, gk=None):
130 """Default base class function for the function convergence test. 131 132 Test if the minimum function tolerance between fk and fk+1 has been reached. 133 """ 134 135 # Test the function tolerance. 136 if abs(fk_new - fk) <= self.func_tol: 137 if self.print_flag >= 2: 138 print("\n" + self.print_prefix + "Function tolerance reached.") 139 print(self.print_prefix + "fk: " + repr(fk)) 140 print(self.print_prefix + "fk+1: " + repr(fk_new)) 141 print(self.print_prefix + "|fk+1 - fk|: " + repr(abs(fk_new - fk))) 142 print(self.print_prefix + "tol: " + repr(self.func_tol)) 143 return 1
144 145
146 - def grad_test(self, fk_new, fk, gk):
147 """Default base class function for the gradient convergence test. 148 149 Test if the minimum gradient tolerance has been reached. Minimisation will also terminate if the function value difference between fk and fk+1 is zero. This modification is essential for the quasi-Newton techniques. 150 """ 151 152 # Test the gradient tolerance. 153 if sqrt(dot(gk, gk)) <= self.grad_tol: 154 if self.print_flag >= 2: 155 print("\n" + self.print_prefix + "Gradient tolerance reached.") 156 print(self.print_prefix + "gk+1: " + repr(gk)) 157 print(self.print_prefix + "||gk+1||: " + repr(sqrt(dot(gk, gk)))) 158 print(self.print_prefix + "tol: " + repr(self.grad_tol)) 159 return 1 160 161 # No change in function value (prevents the minimiser from iterating without moving). 162 elif fk_new - fk == 0.0: 163 if self.print_flag >= 2: 164 print("\n" + self.print_prefix + "Function difference of zero.") 165 print(self.print_prefix + "fk_new - fk = 0.0") 166 return 1
167 168
169 - def hessian_type_and_mod(self, min_options, default_type='Newton', default_mod='GMW'):
170 """Hessian type and modification options. 171 172 Function for sorting out the minimisation options when either the Hessian type or Hessian modification can be selected. 173 """ 174 175 # Initialise. 176 self.hessian_type = None 177 self.hessian_mod = None 178 179 # Test if the options are a tuple. 180 if not isinstance(min_options, tuple): 181 print(self.print_prefix + "The minimisation options " + repr(min_options) + " is not a tuple.") 182 self.init_failure = 1 183 return 184 185 # Test that no more thant 2 options are given. 186 if len(min_options) > 2: 187 print(self.print_prefix + "A maximum of two minimisation options is allowed (the Hessian type and Hessian modification).") 188 self.init_failure = 1 189 return 190 191 # Sort out the minimisation options. 192 for opt in min_options: 193 if self.hessian_type == None and opt != None and (match('[Bb][Ff][Gg][Ss]', opt) or match('[Nn]ewton', opt)): 194 self.hessian_type = opt 195 elif self.hessian_mod == None and self.valid_hessian_mod(opt): 196 self.hessian_mod = opt 197 else: 198 print(self.print_prefix + "The minimisation option " + repr(opt) + " from " + repr(min_options) + " is neither a valid Hessian type nor modification.") 199 self.init_failure = 1 200 return 201 202 # Default Hessian type. 203 if self.hessian_type == None: 204 self.hessian_type = default_type 205 206 # Make sure that no Hessian modification is used with the BFGS matrix. 207 if match('[Bb][Ff][Gg][Ss]', self.hessian_type) and self.hessian_mod != None: 208 print(self.print_prefix + "When using the BFGS matrix, Hessian modifications should not be used.") 209 self.init_failure = 1 210 return 211 212 # Default Hessian modification when the Hessian type is Newton. 213 if match('[Nn]ewton', self.hessian_type) and self.hessian_mod == None: 214 self.hessian_mod = None 215 #self.hessian_mod = default_mod 216 217 # Print the Hessian type info. 218 if self.print_flag: 219 if match('[Bb][Ff][Gg][Ss]', self.hessian_type): 220 print(self.print_prefix + "Hessian type: BFGS") 221 else: 222 print(self.print_prefix + "Hessian type: Newton")
223 224
225 - def minimise(self):
226 """Main minimisation iterative loop algorithm. 227 228 This algorithm is designed to be compatible with all iterative minimisers. The outline is: 229 230 - k = 0 231 - while 1: 232 - New parameter function 233 - Convergence tests 234 - Update function 235 - k = k + 1 236 """ 237 238 # Start the iteration counter. 239 self.k = 0 240 if self.print_flag: 241 self.k2 = 0 242 print("") # Print a new line. 243 244 # Iterate until the local minima is found. 245 while True: 246 # Print out. 247 if self.print_flag: 248 out = 0 249 if self.print_flag >= 2: 250 print("\n" + self.print_prefix + "Main iteration k=" + repr(self.k)) 251 out = 1 252 else: 253 if self.k2 == 100: 254 self.k2 = 0 255 if self.k2 == 0: 256 out = 1 257 if out == 1: 258 print_iter(self.k, self.xk, self.fk, print_prefix=self.print_prefix) 259 260 # Get xk+1 (new parameter function). 261 try: 262 self.new_param_func() 263 except LinAlgError: 264 message = sys.exc_info()[1] 265 if isinstance(message.args[0], int): 266 text = message.args[1] 267 else: 268 text = message.args[0] 269 self.warning = "LinAlgError: " + text + " (fatal minimisation error)." 270 break 271 except OverflowError: 272 message = sys.exc_info()[1] 273 if isinstance(message.args[0], int): 274 text = message.args[1] 275 else: 276 text = message.args[0] 277 self.warning = "OverflowError: " + text + " (fatal minimisation error)." 278 break 279 except NameError: 280 message = sys.exc_info()[1] 281 self.warning = message.args[0] + " (fatal minimisation error)." 282 break 283 284 # Test for warnings. 285 if self.warning != None: 286 break 287 288 # Maximum number of iteration test. 289 if self.k >= self.maxiter - 1: 290 self.warning = "Maximum number of iterations reached" 291 break 292 293 # Convergence test. 294 if self.conv_test(self.fk_new, self.fk, self.dfk_new): 295 break 296 297 # Update function. 298 try: 299 self.update() 300 except OverflowError: 301 message = sys.exc_info()[1] 302 if isinstance(message.args[0], int): 303 text = message.args[1] 304 else: 305 text = message.args[0] 306 self.warning = "OverflowError: " + text + " (fatal minimisation error)." 307 break 308 except NameError: 309 message = sys.exc_info()[1] 310 if isinstance(message.args[0], int): 311 self.warning = message.args[1] 312 else: 313 self.warning = message.args[0] 314 break 315 316 # Iteration counter update. 317 self.k = self.k + 1 318 if self.print_flag: 319 self.k2 = self.k2 + 1 320 321 if self.full_output: 322 try: 323 return self.xk_new, self.fk_new, self.k+1, self.f_count, self.g_count, self.h_count, self.warning 324 except AttributeError: 325 return self.xk, self.fk, self.k, self.f_count, self.g_count, self.h_count, self.warning 326 else: 327 try: 328 return self.xk_new 329 except AttributeError: 330 return self.xk
331 332
333 - def setup_conv_tests(self):
334 """Default base class for selecting the convergence tests.""" 335 336 if self.func_tol != None and self.grad_tol != None: 337 self.conv_test = self.double_test 338 elif self.func_tol != None: 339 self.conv_test = self.func_test 340 elif self.grad_tol != None: 341 self.conv_test = self.grad_test 342 else: 343 print(self.print_prefix + "Convergence tests cannot be setup because both func_tol and grad_tol are set to None.") 344 self.init_failure = 1 345 return
346 347
348 - def update(self):
349 """Default base class update function. 350 351 xk+1 is shifted to xk 352 fk+1 is shifted to fk 353 """ 354 355 self.xk = self.xk_new * 1.0 356 self.fk = self.fk_new
357 358 359 360 361 362 # The base class containing the generic line search functions. 363 ############################################################## 364
365 -class Line_search:
366 - def __init__(self):
367 """Base class containing the generic line search functions."""
368 369
370 - def backline(self):
371 """Function for running the backtracking line search.""" 372 373 self.alpha, fc = backtrack(self.func, self.args, self.xk, self.fk, self.dfk, self.pk, a_init=self.a0) 374 self.f_count = self.f_count + fc
375 376
377 - def line_search_options(self, min_options):
378 """Line search options. 379 380 Function for sorting out the minimisation options when the only option can be a line search. 381 """ 382 383 # Initialise. 384 self.line_search_algor = None 385 386 # Test if the options are a tuple. 387 if not isinstance(min_options, tuple): 388 print(self.print_prefix + "The minimisation options " + repr(min_options) + " is not a tuple.") 389 self.init_failure = 1 390 return 391 392 # No more than one option is allowed. 393 if len(min_options) > 1: 394 print(self.print_prefix + "A maximum of one minimisation options is allowed (the line search algorithm).") 395 self.init_failure = 1 396 return 397 398 # Sort out the minimisation options. 399 for opt in min_options: 400 if self.valid_line_search(opt): 401 self.line_search_algor = opt 402 else: 403 print(self.print_prefix + "The minimisation option " + repr(opt) + " from " + repr(min_options) + " is not a valid line search algorithm.") 404 self.init_failure = 1 405 return 406 407 # Default line search algorithm. 408 if self.line_search_algor == None: 409 self.line_search_algor = 'Back'
410 411
412 - def mt(self):
413 """Function for running the More and Thuente line search.""" 414 415 self.alpha, fc, gc = more_thuente(self.func, self.dfunc, self.args, self.xk, self.fk, self.dfk, self.pk, a_init=self.a0, mu=self.mu, eta=self.eta, print_flag=0) 416 self.f_count = self.f_count + fc 417 self.g_count = self.g_count + gc
418 419
420 - def no_search(self):
421 """Set alpha to alpha0.""" 422 423 self.alpha = self.a0
424 425
426 - def nwi(self):
427 """Function for running the Nocedal and Wright interpolation based line search.""" 428 429 self.alpha, fc = nocedal_wright_interpol(self.func, self.args, self.xk, self.fk, self.dfk, self.pk, a_init=self.a0, mu=self.mu, print_flag=0) 430 self.f_count = self.f_count + fc
431 432
433 - def nww(self):
434 """Function for running the Nocedal and Wright line search for the Wolfe conditions.""" 435 436 self.alpha, fc, gc = nocedal_wright_wolfe(self.func, self.dfunc, self.args, self.xk, self.fk, self.dfk, self.pk, a_init=self.a0, mu=self.mu, eta=self.eta, print_flag=0) 437 self.f_count = self.f_count + fc 438 self.g_count = self.g_count + gc
439 440
441 - def setup_line_search(self):
442 """The line search function.""" 443 444 if self.line_search_algor == None: 445 self.init_failure = 1 446 return 447 elif match('^[Bb]ack', self.line_search_algor): 448 if self.print_flag: 449 print(self.print_prefix + "Line search: Backtracking line search.") 450 self.line_search = self.backline 451 elif match('^[Nn]ocedal[ _][Ww]right[ _][Ii]nt', self.line_search_algor) or match('^[Nn][Ww][Ii]', self.line_search_algor): 452 if self.print_flag: 453 print(self.print_prefix + "Line search: Nocedal and Wright interpolation based line search.") 454 self.line_search = self.nwi 455 elif match('^[Nn]ocedal[ _][Ww]right[ _][Ww]olfe', self.line_search_algor) or match('^[Nn][Ww][Ww]', self.line_search_algor): 456 if self.print_flag: 457 print(self.print_prefix + "Line search: Nocedal and Wright line search for the Wolfe conditions.") 458 self.line_search = self.nww 459 elif match('^[Mm]ore[ _][Tt]huente$', self.line_search_algor) or match('^[Mm][Tt]', self.line_search_algor): 460 if self.print_flag: 461 print(self.print_prefix + "Line search: More and Thuente line search.") 462 self.line_search = self.mt 463 elif match('^[Nn]o [Ll]ine [Ss]earch$', self.line_search_algor): 464 if self.print_flag: 465 print(self.print_prefix + "Line search: No line search.") 466 self.line_search = self.no_search
467 468
469 - def valid_line_search(self, type):
470 """Test if the string 'type' is a valid line search algorithm.""" 471 472 if type == None: 473 return 0 474 elif match('^[Bb]ack', type) or match('^[Nn]ocedal[ _][Ww]right[ _][Ii]nt', type) or match('^[Nn][Ww][Ii]', type) or match('^[Nn]ocedal[ _][Ww]right[ _][Ww]olfe', type) or match('^[Nn][Ww][Ww]', type) or match('^[Mm]ore[ _][Tt]huente$', type) or match('^[Mm][Tt]', type) or match('^[Nn]o [Ll]ine [Ss]earch$', type): 475 return 1 476 else: 477 return 0
478 479 480 481 482 483 # The base class containing the generic trust-region functions. 484 ############################################################### 485
486 -class Trust_region:
487 - def __init__(self):
488 """Base class containing the generic trust-region functions."""
489 490
491 - def trust_region_update(self):
492 """An algorithm for trust region radius selection. 493 494 Page 68 from 'Numerical Optimization' by Jorge Nocedal and Stephen J. Wright, 1999, 2nd ed. 495 496 First calculate rho using the formula:: 497 498 f(xk) - f(xk + pk) 499 rho = ------------------, 500 mk(0) - mk(pk) 501 502 where the numerator is called the actual reduction and the denominator is the predicted reduction. Secondly choose the trust region radius for the next iteration. Finally decide if xk+1 should be shifted to xk. 503 """ 504 505 # Actual reduction. 506 act_red = self.fk - self.fk_new 507 508 # Predicted reduction. 509 pred_red = - dot(self.dfk, self.pk) - 0.5 * dot(self.pk, dot(self.d2fk, self.pk)) 510 511 # Rho. 512 if pred_red == 0.0: 513 self.rho = 1e99 514 else: 515 self.rho = act_red / pred_red 516 517 # Calculate the Euclidean norm of pk. 518 self.norm_pk = sqrt(dot(self.pk, self.pk)) 519 520 if self.print_flag >= 2: 521 print(self.print_prefix + "Actual reduction: " + repr(act_red)) 522 print(self.print_prefix + "Predicted reduction: " + repr(pred_red)) 523 print(self.print_prefix + "rho: " + repr(self.rho)) 524 print(self.print_prefix + "||pk||: " + repr(self.norm_pk)) 525 526 # Rho is close to zero or negative, therefore the trust region is shrunk. 527 if self.rho < 0.25 or pred_red < 0.0: 528 self.delta = 0.25 * self.delta 529 if self.print_flag >= 2: 530 print(self.print_prefix + "Shrinking the trust region.") 531 532 # Rho is close to one and pk has reached the boundary of the trust region, therefore the trust region is expanded. 533 elif self.rho > 0.75 and abs(self.norm_pk - self.delta) < 1e-5: 534 self.delta = min(2.0*self.delta, self.delta_max) 535 if self.print_flag >= 2: 536 print(self.print_prefix + "Expanding the trust region.") 537 538 # Rho is positive but not close to one, therefore the trust region is unaltered. 539 else: 540 if self.print_flag >= 2: 541 print(self.print_prefix + "Trust region is unaltered.") 542 543 if self.print_flag >= 2: 544 print(self.print_prefix + "New trust region: " + repr(self.delta)) 545 546 # Choose the position for the next iteration. 547 if self.rho > self.eta and pred_red > 0.0: 548 self.shift_flag = 1 549 if self.print_flag >= 2: 550 print(self.print_prefix + "rho > eta, " + repr(self.rho) + " > " + repr(self.eta)) 551 print(self.print_prefix + "Moving to, self.xk_new: " + repr(self.xk_new)) 552 else: 553 self.shift_flag = 0 554 if self.print_flag >= 2: 555 print(self.print_prefix + "rho < eta, " + repr(self.rho) + " < " + repr(self.eta)) 556 print(self.print_prefix + "Not moving, self.xk: " + repr(self.xk))
557 558 559 560 561 562 # The base class containing the generic conjugate gradient functions. 563 ##################################################################### 564
565 -class Conjugate_gradient:
566 - def __init__(self):
567 """Class containing the non-specific conjugate gradient code."""
568 569
570 - def new_param_func(self):
571 """The new parameter function. 572 573 Do a line search then calculate xk+1, fk+1, and gk+1. 574 """ 575 576 # Line search. 577 self.line_search() 578 579 # Find the new parameter vector and function value at that point. 580 self.xk_new = self.xk + self.alpha * self.pk 581 self.fk_new, self.f_count = self.func(*(self.xk_new,)+self.args), self.f_count + 1 582 self.dfk_new, self.g_count = self.dfunc(*(self.xk_new,)+self.args), self.g_count + 1 583 584 if self.print_flag >= 2: 585 print(self.print_prefix + "New param func:") 586 print(self.print_prefix + "\ta: " + repr(self.alpha)) 587 print(self.print_prefix + "\tpk: " + repr(self.pk)) 588 print(self.print_prefix + "\txk: " + repr(self.xk)) 589 print(self.print_prefix + "\txk+1: " + repr(self.xk_new)) 590 print(self.print_prefix + "\tfk: " + repr(self.fk)) 591 print(self.print_prefix + "\tfk+1: " + repr(self.fk_new)) 592 print(self.print_prefix + "\tgk: " + repr(self.dfk)) 593 print(self.print_prefix + "\tgk+1: " + repr(self.dfk_new))
594 595
596 - def old_cg_conv_test(self):
597 """Convergence tests. 598 599 This is old code implementing the conjugate gradient convergence test given on page 124 of 'Numerical Optimization' by Jorge Nocedal and Stephen J. Wright, 1999, 2nd ed. This function is currently unused. 600 """ 601 602 inf_norm = 0.0 603 for i in range(len(self.dfk)): 604 inf_norm = max(inf_norm, abs(self.dfk[i])) 605 if inf_norm < self.grad_tol * (1.0 + abs(self.fk)): 606 return 1 607 elif self.fk_new - self.fk == 0.0: 608 self.warning = "Function tol of zero reached." 609 return 1
610 611
612 - def update(self):
613 """Function to update the function value, gradient vector, and Hessian matrix""" 614 615 # Gradient dot product at k+1. 616 self.dot_dfk_new = dot(self.dfk_new, self.dfk_new) 617 618 # Calculate beta at k+1. 619 bk_new = self.calc_bk() 620 621 # Restarts. 622 if abs(dot(self.dfk_new, self.dfk)) / self.dot_dfk_new >= 0.1: 623 if self.print_flag >= 2: 624 print(self.print_prefix + "Restarting.") 625 bk_new = 0 626 627 # Calculate pk+1. 628 self.pk_new = -self.dfk_new + bk_new * self.pk 629 630 if self.print_flag >= 2: 631 print(self.print_prefix + "Update func:") 632 print(self.print_prefix + "\tpk: " + repr(self.pk)) 633 print(self.print_prefix + "\tpk+1: " + repr(self.pk_new)) 634 635 # Update. 636 self.xk = self.xk_new * 1.0 637 self.fk = self.fk_new 638 self.dfk = self.dfk_new * 1.0 639 self.pk = self.pk_new * 1.0 640 self.dot_dfk = self.dot_dfk_new
641 642 643 644 645 # The base class containing the Hessian modifications. 646 ###################################################### 647
648 -class Hessian_mods:
649 - def __init__(self):
650 """Base class containing the generic line search functions."""
651 652
653 - def cholesky_mod(self, return_matrix=0):
654 """Function for running the Cholesky Hessian modification.""" 655 656 return cholesky_mod(self.dfk, self.d2fk, self.I, self.n, self.print_prefix, self.print_flag, return_matrix)
657 658
659 - def eigenvalue(self, return_matrix=0):
660 """Function for running the eigenvalue Hessian modification.""" 661 662 return eigenvalue(self.dfk, self.d2fk, self.I, self.print_prefix, self.print_flag, return_matrix)
663 664
665 - def gmw(self, return_matrix=0):
666 """Function for running the Gill, Murray, and Wright modified Cholesky algorithm.""" 667 668 return gmw(self.dfk, self.d2fk, self.I, self.n, self.mach_acc, self.print_prefix, self.print_flag, return_matrix)
669 670
671 - def gmw_old(self, return_matrix=0):
672 """Function for running the Gill, Murray, and Wright modified Cholesky algorithm.""" 673 674 return gmw_old(self.dfk, self.d2fk, self.I, self.n, self.mach_acc, self.print_prefix, self.print_flag, return_matrix)
675 676
677 - def se99(self, return_matrix=0):
678 """Function for running the Gill, Murray, and Wright modified Cholesky algorithm.""" 679 680 return se99(self.dfk, self.d2fk, self.I, self.n, self.tau, self.tau_bar, self.mu, self.print_prefix, self.print_flag, return_matrix)
681 682
683 - def setup_hessian_mod(self):
684 """Initialise the Hessian modification functions.""" 685 686 # Unmodified Hessian. 687 if self.hessian_mod == None or match('^[Nn]o [Hh]essian [Mm]od', self.hessian_mod): 688 if self.print_flag: 689 print(self.print_prefix + "Hessian modification: Unmodified Hessian.") 690 self.get_pk = self.unmodified_hessian 691 692 # Eigenvalue modification. 693 elif match('^[Ee]igen', self.hessian_mod): 694 if self.print_flag: 695 print(self.print_prefix + "Hessian modification: Eigenvalue modification.") 696 self.get_pk = self.eigenvalue 697 698 # Cholesky with added multiple of the identity. 699 elif match('^[Cc]hol', self.hessian_mod): 700 if self.print_flag: 701 print(self.print_prefix + "Hessian modification: Cholesky with added multiple of the identity.") 702 self.get_pk = self.cholesky_mod 703 704 # The Gill, Murray, and Wright modified Cholesky algorithm. 705 elif match('^[Gg][Mm][Ww]$', self.hessian_mod): 706 if self.print_flag: 707 print(self.print_prefix + "Hessian modification: The Gill, Murray, and Wright modified Cholesky algorithm.") 708 self.get_pk = self.gmw 709 710 # The Gill, Murray, and Wright modified Cholesky algorithm. 711 elif match('^[Gg][Mm][Ww][ -_]old', self.hessian_mod): 712 if self.print_flag: 713 print(self.print_prefix + "Hessian modification: The Gill, Murray, and Wright modified Cholesky algorithm.") 714 self.get_pk = self.gmw_old 715 716 # The revised modified cholesky factorisation algorithm of Schnabel and Eskow, 99. 717 elif match('^[Ss][Ee]99', self.hessian_mod): 718 if self.print_flag: 719 print(self.print_prefix + "Hessian modification: The Schnabel and Eskow 1999 algorithm.") 720 self.tau = self.mach_acc ** (1.0/3.0) 721 self.tau_bar = self.mach_acc ** (2.0/3.0) 722 self.mu = 0.1 723 self.get_pk = self.se99
724 725
726 - def unmodified_hessian(self, return_matrix=0):
727 """Calculate the pure Newton direction.""" 728 729 if return_matrix: 730 return -dot(inv(self.d2fk), self.dfk), self.d2fk 731 else: 732 return -dot(inv(self.d2fk), self.dfk)
733 734
735 - def valid_hessian_mod(self, mod):
736 """Test if the string 'mod' is a valid Hessian modification.""" 737 738 if mod == None or match('^[Ee]igen', mod) or match('^[Cc]hol', mod) or match('^[Gg][Mm][Ww]$', mod) or match('^[Gg][Mm][Ww][ -_]old', mod) or match('^[Ss][Ee]99', mod) or match('^[Nn]o [Hh]essian [Mm]od', mod): 739 return 1 740 else: 741 return 0
742