Package minimise :: Module base_classes
[hide private]
[frames] | no frames]

Source Code for Module minimise.base_classes

  1  ############################################################################### 
  2  #                                                                             # 
  3  # Copyright (C) 2003-2005 Edward d'Auvergne                                   # 
  4  #                                                                             # 
  5  # This file is part of the program relax.                                     # 
  6  #                                                                             # 
  7  # relax is free software; you can redistribute it and/or modify               # 
  8  # it under the terms of the GNU General Public License as published by        # 
  9  # the Free Software Foundation; either version 2 of the License, or           # 
 10  # (at your option) any later version.                                         # 
 11  #                                                                             # 
 12  # relax is distributed in the hope that it will be useful,                    # 
 13  # but WITHOUT ANY WARRANTY; without even the implied warranty of              # 
 14  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the               # 
 15  # GNU General Public License for more details.                                # 
 16  #                                                                             # 
 17  # You should have received a copy of the GNU General Public License           # 
 18  # along with relax; if not, write to the Free Software                        # 
 19  # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA   # 
 20  #                                                                             # 
 21  ############################################################################### 
 22   
 23   
 24  # This module contains the following base classes: 
 25  #    Min:                The base class containing the main iterative minimisation loop and 
 26  #        a few other base class functions. 
 27  #    Line_search:        The base class containing the generic line search functions. 
 28  #    Trust_region:       The base class containing the generic trust-region functions. 
 29  #    Conjugate_gradient: The base class containing the generic conjugate gradient functions. 
 30   
 31   
 32  # Inbuilt python modules. 
 33  ######################### 
 34   
 35  from LinearAlgebra import LinAlgError, inverse 
 36  from Numeric import dot, matrixmultiply, sqrt 
 37  from re import match 
 38   
 39   
 40  # Line search functions. 
 41  ######################## 
 42   
 43  from line_search.backtrack import backtrack 
 44  from line_search.nocedal_wright_interpol import nocedal_wright_interpol 
 45  from line_search.nocedal_wright_wolfe import nocedal_wright_wolfe 
 46  from line_search.more_thuente import more_thuente 
 47   
 48   
 49  # Hessian modifications. 
 50  ######################## 
 51   
 52  from hessian_mods.eigenvalue import eigenvalue 
 53  from hessian_mods.cholesky import cholesky 
 54  from hessian_mods.gmw81 import gmw 
 55  from hessian_mods.gmw81_old import gmw_old 
 56  from hessian_mods.se99 import se99 
 57   
 58   
 59  # The generic minimisation base class (containing the main iterative loop). 
 60  ########################################################################### 
 61   
62 -class Min:
63 - def __init__(self):
64 """Base class containing the main minimisation iterative loop algorithm. 65 66 The algorithm is defined in the minimise function. Also supplied are generic setup, 67 convergence tests, and update functions. 68 """
69 70
71 - def double_test(self, fk_new, fk, gk):
72 """Default base class function for both function and gradient convergence tests. 73 74 Test if the minimum function tolerance between fk and fk+1 has been reached as well as if 75 the minimum gradient tolerance has been reached. 76 """ 77 78 # Test the function tolerance. 79 if abs(fk_new - fk) <= self.func_tol: 80 if self.print_flag >= 2: 81 print "\n" + self.print_prefix + "Function tolerance reached." 82 print self.print_prefix + "fk: " + `fk` 83 print self.print_prefix + "fk+1: " + `fk_new` 84 print self.print_prefix + "|fk+1 - fk|: " + `abs(fk_new - fk)` 85 print self.print_prefix + "tol: " + `self.func_tol` 86 return 1 87 88 # Test the gradient tolerance. 89 elif sqrt(dot(gk, gk)) <= self.grad_tol: 90 if self.print_flag >= 2: 91 print "\n" + self.print_prefix + "Gradient tolerance reached." 92 print self.print_prefix + "gk+1: " + `gk` 93 print self.print_prefix + "||gk+1||: " + `sqrt(dot(gk, gk))` 94 print self.print_prefix + "tol: " + `self.grad_tol` 95 return 1
96 97
98 - def func_test(self, fk_new, fk, gk):
99 """Default base class function for the function convergence test. 100 101 Test if the minimum function tolerance between fk and fk+1 has been reached. 102 """ 103 104 # Test the function tolerance. 105 if abs(fk_new - fk) <= self.func_tol: 106 if self.print_flag >= 2: 107 print "\n" + self.print_prefix + "Function tolerance reached." 108 print self.print_prefix + "fk: " + `fk` 109 print self.print_prefix + "fk+1: " + `fk_new` 110 print self.print_prefix + "|fk+1 - fk|: " + `abs(fk_new - fk)` 111 print self.print_prefix + "tol: " + `self.func_tol` 112 return 1
113 114
115 - def grad_test(self, fk_new, fk, gk):
116 """Default base class function for the gradient convergence test. 117 118 Test if the minimum gradient tolerance has been reached. Minimisation will also terminate 119 if the function value difference between fk and fk+1 is zero. This modification is 120 essential for the quasi-Newton techniques. 121 """ 122 123 # Test the gradient tolerance. 124 if sqrt(dot(gk, gk)) <= self.grad_tol: 125 if self.print_flag >= 2: 126 print "\n" + self.print_prefix + "Gradient tolerance reached." 127 print self.print_prefix + "gk+1: " + `gk` 128 print self.print_prefix + "||gk+1||: " + `sqrt(dot(gk, gk))` 129 print self.print_prefix + "tol: " + `self.grad_tol` 130 return 1 131 132 # No change in function value (prevents the minimiser from iterating without moving). 133 elif fk_new - fk == 0.0: 134 if self.print_flag >= 2: 135 print "\n" + self.print_prefix + "Function difference of zero." 136 print self.print_prefix + "fk_new - fk = 0.0" 137 return 1
138 139
140 - def hessian_type_and_mod(self, min_options, default_type='Newton', default_mod='GMW'):
141 """Hessian type and modification options. 142 143 Function for sorting out the minimisation options when either the Hessian type or Hessian 144 modification can be selected. 145 """ 146 147 # Initialise. 148 self.hessian_type = None 149 self.hessian_mod = None 150 151 # Test if the options are a tuple. 152 if type(min_options) != tuple: 153 print self.print_prefix + "The minimisation options " + `min_options` + " is not a tuple." 154 self.init_failure = 1 155 return 156 157 # Test that no more thant 2 options are given. 158 if len(min_options) > 2: 159 print self.print_prefix + "A maximum of two minimisation options is allowed (the Hessian type and Hessian modification)." 160 self.init_failure = 1 161 return 162 163 # Sort out the minimisation options. 164 for opt in min_options: 165 if self.hessian_type == None and opt != None and (match('[Bb][Ff][Gg][Ss]', opt) or match('[Nn]ewton', opt)): 166 self.hessian_type = opt 167 elif self.hessian_mod == None and self.valid_hessian_mod(opt): 168 self.hessian_mod = opt 169 else: 170 print self.print_prefix + "The minimisation option " + `opt` + " from " + `min_options` + " is neither a valid Hessian type or modification." 171 self.init_failure = 1 172 return 173 174 # Default Hessian type. 175 if self.hessian_type == None: 176 self.hessian_type = default_type 177 178 # Make sure that no Hessian modification is used with the BFGS matrix. 179 if match('[Bb][Ff][Gg][Ss]', self.hessian_type) and self.hessian_mod != None: 180 print self.print_prefix + "When using the BFGS matrix, Hessian modifications should not be used." 181 self.init_failure = 1 182 return 183 184 # Default Hessian modification when the Hessian type is Newton. 185 if match('[Nn]ewton', self.hessian_type) and self.hessian_mod == None: 186 self.hessian_mod = None 187 #self.hessian_mod = default_mod 188 189 # Print the Hessian type info. 190 if self.print_flag: 191 if match('[Bb][Ff][Gg][Ss]', self.hessian_type): 192 print self.print_prefix + "Hessian type: BFGS" 193 else: 194 print self.print_prefix + "Hessian type: Newton"
195 196
197 - def minimise(self):
198 """Main minimisation iterative loop algorithm. 199 200 This algorithm is designed to be compatible with all iterative minimisers. The outline is: 201 202 k = 0 203 while 1: 204 New parameter function 205 Convergence tests 206 Update function 207 k = k + 1 208 """ 209 210 # Start the iteration counter. 211 self.k = 0 212 if self.print_flag: 213 self.k2 = 0 214 print "" # Print a new line. 215 216 # Iterate until the local minima is found. 217 while 1: 218 # Print out. 219 if self.print_flag: 220 out = 0 221 if self.print_flag >= 2: 222 print "\n" + self.print_prefix + "Main iteration k=" + `self.k` 223 out = 1 224 else: 225 if self.k2 == 100: 226 self.k2 = 0 227 if self.k2 == 0: 228 out = 1 229 if out == 1: 230 print self.print_prefix + "%-3s%-8i%-4s%-65s %-4s%-20s" % ("k:", self.k, "xk:", `self.xk`, "fk:", `self.fk`) 231 232 # Get xk+1 (new parameter function). 233 try: 234 self.new_param_func() 235 except "LinearAlgebraError", message: 236 self.warning = "LinearAlgebraError: " + message + " (fatal minimisation error)." 237 break 238 except LinAlgError, message: 239 if type(message.args[0]) == int: 240 text = message.args[1] 241 else: 242 text = message.args[0] 243 self.warning = "LinearAlgebraError: " + text + " (fatal minimisation error)." 244 break 245 except OverflowError, message: 246 if type(message.args[0]) == int: 247 text = message.args[1] 248 else: 249 text = message.args[0] 250 self.warning = "OverflowError: " + text + " (fatal minimisation error)." 251 break 252 except NameError, message: 253 self.warning = message.args[0] + " (fatal minimisation error)." 254 break 255 256 # Test for warnings. 257 if self.warning != None: 258 break 259 260 # Maximum number of iteration test. 261 if self.k >= self.maxiter - 1: 262 self.warning = "Maximum number of iterations reached" 263 break 264 265 # Convergence test. 266 if self.conv_test(self.fk_new, self.fk, self.dfk_new): 267 break 268 269 # Update function. 270 try: 271 self.update() 272 except OverflowError, message: 273 if type(message.args[0]) == int: 274 text = message.args[1] 275 else: 276 text = message.args[0] 277 self.warning = "OverflowError: " + text + " (fatal minimisation error)." 278 break 279 except NameError, message: 280 if type(message.args[0]) == int: 281 self.warning = message.args[1] 282 else: 283 self.warning = message.args[0] 284 break 285 286 # Iteration counter update. 287 self.k = self.k + 1 288 if self.print_flag: 289 self.k2 = self.k2 + 1 290 291 if self.full_output: 292 try: 293 return self.xk_new, self.fk_new, self.k+1, self.f_count, self.g_count, self.h_count, self.warning 294 except AttributeError: 295 return self.xk, self.fk, self.k, self.f_count, self.g_count, self.h_count, self.warning 296 else: 297 try: 298 return self.xk_new 299 except AttributeError: 300 return self.xk
301 302
303 - def setup_conv_tests(self):
304 """Default base class for selecting the convergence tests.""" 305 306 if self.func_tol != None and self.grad_tol != None: 307 self.conv_test = self.double_test 308 elif self.func_tol != None: 309 self.conv_test = self.func_test 310 elif self.grad_tol != None: 311 self.conv_test = self.grad_test 312 else: 313 print self.print_prefix + "Convergence tests cannot be setup because both func_tol and grad_tol are set to None." 314 self.init_failure = 1 315 return
316 317
318 - def update(self):
319 """Default base class update function. 320 321 xk+1 is shifted to xk 322 fk+1 is shifted to fk 323 """ 324 325 self.xk = self.xk_new * 1.0 326 self.fk = self.fk_new
327 328 329 330 331 332 # The base class containing the generic line search functions. 333 ############################################################## 334
335 -class Line_search:
336 - def __init__(self):
337 """Base class containing the generic line search functions."""
338 339
340 - def backline(self):
341 """Function for running the backtracking line search.""" 342 343 self.alpha, fc = backtrack(self.func, self.args, self.xk, self.fk, self.dfk, self.pk, a_init=self.a0) 344 self.f_count = self.f_count + fc
345 346
347 - def line_search_options(self, min_options):
348 """Line search options. 349 350 Function for sorting out the minimisation options when the only option can be a line search. 351 """ 352 353 # Initialise. 354 self.line_search_algor = None 355 356 # Test if the options are a tuple. 357 if type(min_options) != tuple: 358 print self.print_prefix + "The minimisation options " + `min_options` + " is not a tuple." 359 self.init_failure = 1 360 return 361 362 # No more than one option is allowed. 363 if len(min_options) > 1: 364 print self.print_prefix + "A maximum of one minimisation options is allowed (the line search algorithm)." 365 self.init_failure = 1 366 return 367 368 # Sort out the minimisation options. 369 for opt in min_options: 370 if self.valid_line_search(opt): 371 self.line_search_algor = opt 372 else: 373 print self.print_prefix + "The minimisation option " + `opt` + " from " + `min_options` + " is not a valid line search algorithm." 374 self.init_failure = 1 375 return 376 377 # Default line search algorithm. 378 if self.line_search_algor == None: 379 self.line_search_algor = 'Back'
380 381
382 - def mt(self):
383 """Function for running the More and Thuente line search.""" 384 385 self.alpha, fc, gc = more_thuente(self.func, self.dfunc, self.args, self.xk, self.fk, self.dfk, self.pk, a_init=self.a0, mu=self.mu, eta=self.eta, print_flag=0) 386 self.f_count = self.f_count + fc 387 self.g_count = self.g_count + gc
388 389
390 - def no_search(self):
391 """Set alpha to alpha0.""" 392 393 self.alpha = self.a0
394 395
396 - def nwi(self):
397 """Function for running the Nocedal and Wright interpolation based line search.""" 398 399 self.alpha, fc = nocedal_wright_interpol(self.func, self.args, self.xk, self.fk, self.dfk, self.pk, a_init=self.a0, mu=self.mu, print_flag=0) 400 self.f_count = self.f_count + fc
401 402
403 - def nww(self):
404 """Function for running the Nocedal and Wright line search for the Wolfe conditions.""" 405 406 self.alpha, fc, gc = nocedal_wright_wolfe(self.func, self.dfunc, self.args, self.xk, self.fk, self.dfk, self.pk, a_init=self.a0, mu=self.mu, eta=self.eta, print_flag=0) 407 self.f_count = self.f_count + fc 408 self.g_count = self.g_count + gc
409 410
411 - def setup_line_search(self):
412 """The line search function.""" 413 414 if self.line_search_algor == None: 415 self.init_failure = 1 416 return 417 elif match('^[Bb]ack', self.line_search_algor): 418 if self.print_flag: 419 print self.print_prefix + "Line search: Backtracking line search." 420 self.line_search = self.backline 421 elif match('^[Nn]ocedal[ _][Ww]right[ _][Ii]nt', self.line_search_algor) or match('^[Nn][Ww][Ii]', self.line_search_algor): 422 if self.print_flag: 423 print self.print_prefix + "Line search: Nocedal and Wright interpolation based line search." 424 self.line_search = self.nwi 425 elif match('^[Nn]ocedal[ _][Ww]right[ _][Ww]olfe', self.line_search_algor) or match('^[Nn][Ww][Ww]', self.line_search_algor): 426 if self.print_flag: 427 print self.print_prefix + "Line search: Nocedal and Wright line search for the Wolfe conditions." 428 self.line_search = self.nww 429 elif match('^[Mm]ore[ _][Tt]huente$', self.line_search_algor) or match('^[Mm][Tt]', self.line_search_algor): 430 if self.print_flag: 431 print self.print_prefix + "Line search: More and Thuente line search." 432 self.line_search = self.mt 433 elif match('^[Nn]one$', self.line_search_algor): 434 if self.print_flag: 435 print self.print_prefix + "Line search: No line search." 436 self.line_search = self.no_search
437 438
439 - def valid_line_search(self, type):
440 """Test if the string 'type' is a valid line search algorithm.""" 441 442 if type == None: 443 return 0 444 elif match('^[Bb]ack', type) or match('^[Nn]ocedal[ _][Ww]right[ _][Ii]nt', type) or match('^[Nn][Ww][Ii]', type) or match('^[Nn]ocedal[ _][Ww]right[ _][Ww]olfe', type) or match('^[Nn][Ww][Ww]', type) or match('^[Mm]ore[ _][Tt]huente$', type) or match('^[Mm][Tt]', type) or match('^[Nn]one$', type): 445 return 1 446 else: 447 return 0
448 449 450 451 452 453 # The base class containing the generic trust-region functions. 454 ############################################################### 455
456 -class Trust_region:
457 - def __init__(self):
458 """Base class containing the generic trust-region functions."""
459 460
461 - def trust_region_update(self):
462 """An algorithm for trust region radius selection. 463 464 Page 68 from 'Numerical Optimization' by Jorge Nocedal and Stephen J. Wright, 1999, 2nd ed. 465 466 First calculate rho using the formula: 467 468 f(xk) - f(xk + pk) 469 rho = ------------------ 470 mk(0) - mk(pk) 471 472 Where the numerator is called the actual reduction and the denominator is the predicted reduction. 473 474 Secondly choose the trust region radius for the next iteration. 475 Finally decide if xk+1 should be shifted to xk. 476 """ 477 478 # Actual reduction. 479 act_red = self.fk - self.fk_new 480 481 # Predicted reduction. 482 pred_red = - dot(self.dfk, self.pk) - 0.5 * dot(self.pk, dot(self.d2fk, self.pk)) 483 484 # Rho. 485 if pred_red == 0.0: 486 self.rho = 1e99 487 else: 488 self.rho = act_red / pred_red 489 490 # Calculate the Euclidean norm of pk. 491 self.norm_pk = sqrt(dot(self.pk, self.pk)) 492 493 if self.print_flag >= 2: 494 print self.print_prefix + "Actual reduction: " + `act_red` 495 print self.print_prefix + "Predicted reduction: " + `pred_red` 496 print self.print_prefix + "rho: " + `self.rho` 497 print self.print_prefix + "||pk||: " + `self.norm_pk` 498 499 # Rho is close to zero or negative, therefore the trust region is shrunk. 500 if self.rho < 0.25 or pred_red < 0.0: 501 self.delta = 0.25 * self.delta 502 if self.print_flag >= 2: 503 print self.print_prefix + "Shrinking the trust region." 504 505 # Rho is close to one and pk has reached the boundary of the trust region, therefore the trust region is expanded. 506 elif self.rho > 0.75 and abs(self.norm_pk - self.delta) < 1e-5: 507 self.delta = min(2.0*self.delta, self.delta_max) 508 if self.print_flag >= 2: 509 print self.print_prefix + "Expanding the trust region." 510 511 # Rho is positive but not close to one, therefore the trust region is unaltered. 512 else: 513 if self.print_flag >= 2: 514 print self.print_prefix + "Trust region is unaltered." 515 516 if self.print_flag >= 2: 517 print self.print_prefix + "New trust region: " + `self.delta` 518 519 # Choose the position for the next iteration. 520 if self.rho > self.eta and pred_red > 0.0: 521 self.shift_flag = 1 522 if self.print_flag >= 2: 523 print self.print_prefix + "rho > eta, " + `self.rho` + " > " + `self.eta` 524 print self.print_prefix + "Moving to, self.xk_new: " + `self.xk_new` 525 else: 526 self.shift_flag = 0 527 if self.print_flag >= 2: 528 print self.print_prefix + "rho < eta, " + `self.rho` + " < " + `self.eta` 529 print self.print_prefix + "Not moving, self.xk: " + `self.xk`
530 531 532 533 534 535 # The base class containing the generic conjugate gradient functions. 536 ##################################################################### 537
538 -class Conjugate_gradient:
539 - def __init__(self):
540 """Class containing the non-specific conjugate gradient code."""
541 542
543 - def new_param_func(self):
544 """The new parameter function. 545 546 Do a line search then calculate xk+1, fk+1, and gk+1. 547 """ 548 549 # Line search. 550 self.line_search() 551 552 # Find the new parameter vector and function value at that point. 553 self.xk_new = self.xk + self.alpha * self.pk 554 self.fk_new, self.f_count = apply(self.func, (self.xk_new,)+self.args), self.f_count + 1 555 self.dfk_new, self.g_count = apply(self.dfunc, (self.xk_new,)+self.args), self.g_count + 1 556 557 if self.print_flag >= 2: 558 print self.print_prefix + "New param func:" 559 print self.print_prefix + "\ta: " + `self.alpha` 560 print self.print_prefix + "\tpk: " + `self.pk` 561 print self.print_prefix + "\txk: " + `self.xk` 562 print self.print_prefix + "\txk+1: " + `self.xk_new` 563 print self.print_prefix + "\tfk: " + `self.fk` 564 print self.print_prefix + "\tfk+1: " + `self.fk_new` 565 print self.print_prefix + "\tgk: " + `self.dfk` 566 print self.print_prefix + "\tgk+1: " + `self.dfk_new`
567 568
569 - def old_cg_conv_test(self):
570 """Convergence tests. 571 572 This is old code implementing the conjugate gradient convergence test given on page 124 of 573 'Numerical Optimization' by Jorge Nocedal and Stephen J. Wright, 1999, 2nd ed. This 574 function is currently unused. 575 """ 576 577 inf_norm = 0.0 578 for i in xrange(len(self.dfk)): 579 inf_norm = max(inf_norm, abs(self.dfk[i])) 580 if inf_norm < self.grad_tol * (1.0 + abs(self.fk)): 581 return 1 582 elif self.fk_new - self.fk == 0.0: 583 self.warning = "Function tol of zero reached." 584 return 1
585 586
587 - def update(self):
588 """Function to update the function value, gradient vector, and Hessian matrix""" 589 590 # Gradient dot product at k+1. 591 self.dot_dfk_new = dot(self.dfk_new, self.dfk_new) 592 593 # Calculate beta at k+1. 594 bk_new = self.calc_bk() 595 596 # Restarts. 597 if abs(dot(self.dfk_new, self.dfk)) / self.dot_dfk_new >= 0.1: 598 if self.print_flag >= 2: 599 print self.print_prefix + "Restarting." 600 bk_new = 0 601 602 # Calculate pk+1. 603 self.pk_new = -self.dfk_new + bk_new * self.pk 604 605 if self.print_flag >= 2: 606 print self.print_prefix + "Update func:" 607 print self.print_prefix + "\tpk: " + `self.pk` 608 print self.print_prefix + "\tpk+1: " + `self.pk_new` 609 610 # Update. 611 self.xk = self.xk_new * 1.0 612 self.fk = self.fk_new 613 self.dfk = self.dfk_new * 1.0 614 self.pk = self.pk_new * 1.0 615 self.dot_dfk = self.dot_dfk_new
616 617 618 619 620 # The base class containing the Hessian modifications. 621 ###################################################### 622
623 -class Hessian_mods:
624 - def __init__(self):
625 """Base class containing the generic line search functions."""
626 627
628 - def cholesky(self, return_matrix=0):
629 """Function for running the Cholesky Hessian modification.""" 630 631 return cholesky(self.dfk, self.d2fk, self.I, self.n, self.print_prefix, self.print_flag, return_matrix)
632 633
634 - def eigenvalue(self, return_matrix=0):
635 """Function for running the eigenvalue Hessian modification.""" 636 637 return eigenvalue(self.dfk, self.d2fk, self.I, self.print_prefix, self.print_flag, return_matrix)
638 639
640 - def gmw(self, return_matrix=0):
641 """Function for running the Gill, Murray, and Wright modified Cholesky algorithm.""" 642 643 return gmw(self.dfk, self.d2fk, self.I, self.n, self.mach_acc, self.print_prefix, self.print_flag, return_matrix)
644 645
646 - def gmw_old(self, return_matrix=0):
647 """Function for running the Gill, Murray, and Wright modified Cholesky algorithm.""" 648 649 return gmw_old(self.dfk, self.d2fk, self.I, self.n, self.mach_acc, self.print_prefix, self.print_flag, return_matrix)
650 651
652 - def se99(self, return_matrix=0):
653 """Function for running the Gill, Murray, and Wright modified Cholesky algorithm.""" 654 655 return se99(self.dfk, self.d2fk, self.I, self.n, self.tau, self.tau_bar, self.mu, self.print_prefix, self.print_flag, return_matrix)
656 657
658 - def setup_hessian_mod(self):
659 """Initialise the Hessian modification functions.""" 660 661 # Unmodified Hessian. 662 if self.hessian_mod == None or match('[Nn]one', self.hessian_mod): 663 if self.print_flag: 664 print self.print_prefix + "Hessian modification: Unmodified Hessian." 665 self.get_pk = self.unmodified_hessian 666 667 # Eigenvalue modification. 668 elif match('^[Ee]igen', self.hessian_mod): 669 if self.print_flag: 670 print self.print_prefix + "Hessian modification: Eigenvalue modification." 671 self.get_pk = self.eigenvalue 672 673 # Cholesky with added multiple of the identity. 674 elif match('^[Cc]hol', self.hessian_mod): 675 if self.print_flag: 676 print self.print_prefix + "Hessian modification: Cholesky with added multiple of the identity." 677 self.get_pk = self.cholesky 678 679 # The Gill, Murray, and Wright modified Cholesky algorithm. 680 elif match('^[Gg][Mm][Ww]$', self.hessian_mod): 681 if self.print_flag: 682 print self.print_prefix + "Hessian modification: The Gill, Murray, and Wright modified Cholesky algorithm." 683 self.get_pk = self.gmw 684 685 # The Gill, Murray, and Wright modified Cholesky algorithm. 686 elif match('^[Gg][Mm][Ww][ -_]old', self.hessian_mod): 687 if self.print_flag: 688 print self.print_prefix + "Hessian modification: The Gill, Murray, and Wright modified Cholesky algorithm." 689 self.get_pk = self.gmw_old 690 691 # The revised modified cholesky factorisation algorithm of Schnabel and Eskow, 99. 692 elif match('^[Ss][Ee]99', self.hessian_mod): 693 if self.print_flag: 694 print self.print_prefix + "Hessian modification: The Schnabel and Eskow 1999 algorithm." 695 self.tau = self.mach_acc ** (1.0/3.0) 696 self.tau_bar = self.mach_acc ** (2.0/3.0) 697 self.mu = 0.1 698 self.get_pk = self.se99
699 700
701 - def unmodified_hessian(self, return_matrix=0):
702 """Calculate the pure Newton direction.""" 703 704 if return_matrix: 705 return -matrixmultiply(inverse(self.d2fk), self.dfk), self.d2fk 706 else: 707 return -matrixmultiply(inverse(self.d2fk), self.dfk)
708 709
710 - def valid_hessian_mod(self, mod):
711 """Test if the string 'mod' is a valid Hessian modification.""" 712 713 if mod == None or match('^[Ee]igen', mod) or match('^[Cc]hol', mod) or match('^[Gg][Mm][Ww]$', mod) or match('^[Gg][Mm][Ww][ -_]old', mod) or match('^[Ss][Ee]99', mod) or match('^[Nn]one', mod): 714 return 1 715 else: 716 return 0
717