Package auto_analyses :: Module frame_order
[hide private]
[frames] | no frames]

Source Code for Module auto_analyses.frame_order

   1  ############################################################################### 
   2  #                                                                             # 
   3  # Copyright (C) 2011-2015,2017 Edward d'Auvergne                              # 
   4  #                                                                             # 
   5  # This file is part of the program relax (http://www.nmr-relax.com).          # 
   6  #                                                                             # 
   7  # This program is free software: you can redistribute it and/or modify        # 
   8  # it under the terms of the GNU General Public License as published by        # 
   9  # the Free Software Foundation, either version 3 of the License, or           # 
  10  # (at your option) any later version.                                         # 
  11  #                                                                             # 
  12  # This program is distributed in the hope that it will be useful,             # 
  13  # but WITHOUT ANY WARRANTY; without even the implied warranty of              # 
  14  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the               # 
  15  # GNU General Public License for more details.                                # 
  16  #                                                                             # 
  17  # You should have received a copy of the GNU General Public License           # 
  18  # along with this program.  If not, see <http://www.gnu.org/licenses/>.       # 
  19  #                                                                             # 
  20  ############################################################################### 
  21   
  22  # Module docstring. 
  23  """The automated frame order analysis protocol. 
  24   
  25   
  26  Usage 
  27  ===== 
  28   
  29  To use this analysis, you should import the following into your script: 
  30   
  31      - Frame_order_analysis:  This is a Python class which contains the automated protocol.  Initialising the class will execute the full analysis.  See its documentation for all the options it accepts. 
  32      - Optimisation_settings:  This is a Python class which is used to set up and store the optimisation settings used in the automated protocol.  This allows for grid searches, zooming grid searches, minimisation settings, quasi-random Sobol' numerical integration of the PCS, and SciPy quadratic numerical integration of the PCS to be specified. 
  33   
  34  See the sample scripts for examples of how these are used.  In addition, the following two functions provide summaries of the analysis: 
  35   
  36      - count_sobol_points:  This function will summarize the number of quasi-random Sobol' points used in the PCS numeric integration.  The table it creates is very useful for judging the quality of the current optimisation settings. 
  37      - summarise:  This function will summarise all of the current frame order results. 
  38   
  39  Both these functions will be called at the end of the auto-analysis.  However they can also be used in simple scripts to summarise the results as an analysis is progressing. 
  40   
  41   
  42   
  43  The nested model parameter copying protocol 
  44  =========================================== 
  45   
  46  To allow the analysis to complete in under 1,000,000 years, the trick of copying parameters from simpler nested models is used in this auto-analysis.  The protocol is split into four categories for the average domain position, the pivot point, the motional eigenframe and the parameters of ordering.  These use the fact that the free rotor and torsionless models are the two extrema of the models where the torsion angle is restricted, whereby sigma_max is pi and 0 respectively. 
  47  """ 
  48   
  49   
  50  # Python module imports. 
  51  from math import pi 
  52  from numpy import float64, zeros 
  53  from os import F_OK, access, getcwd, sep 
  54  import sys 
  55  from time import time 
  56   
  57  # relax module imports. 
  58  from data_store import Relax_data_store; ds = Relax_data_store() 
  59  from lib.arg_check import is_bool, is_float, is_int, is_str 
  60  from lib.errors import RelaxError 
  61  from lib.frame_order.conversions import convert_axis_alpha_to_spherical 
  62  from lib.frame_order.variables import MODEL_DOUBLE_ROTOR, MODEL_FREE_ROTOR, MODEL_ISO_CONE, MODEL_ISO_CONE_FREE_ROTOR, MODEL_ISO_CONE_TORSIONLESS, MODEL_LIST, MODEL_LIST_FREE_ROTORS, MODEL_LIST_ISO_CONE, MODEL_LIST_NONREDUNDANT, MODEL_LIST_PSEUDO_ELLIPSE, MODEL_PSEUDO_ELLIPSE, MODEL_PSEUDO_ELLIPSE_FREE_ROTOR, MODEL_PSEUDO_ELLIPSE_TORSIONLESS, MODEL_RIGID, MODEL_ROTOR 
  63  from lib.geometry.angles import wrap_angles 
  64  from lib.geometry.coord_transform import spherical_to_cartesian 
  65  from lib.io import open_write_file 
  66  from lib.text.sectioning import subtitle, subsubtitle, title 
  67  from lib.text.table import MULTI_COL, format_table 
  68  from lib.timing import print_elapsed_time 
  69  from pipe_control import pipes, results 
  70  from pipe_control.mol_res_spin import return_spin, spin_loop 
  71  from pipe_control.structure.mass import pipe_centre_of_mass 
  72  from prompt.interpreter import Interpreter 
  73  from specific_analyses.frame_order.data import generate_pivot 
  74  from specific_analyses.frame_order import optimisation 
  75  from status import Status; status = Status() 
  76   
  77   
  78   
79 -def count_sobol_points(file_name='sobol_point_count', dir=None, force=True):
80 """Count the number of Sobol' points used for the PCS numerical integration. 81 82 This function can be used while a frame order analysis is running to summarise the results. It will create a table of the number of Sobol' points used, which can then be used to judge the quality of the integration. 83 84 85 @keyword file_name: The file to save the table into. 86 @type file_name: str 87 @keyword dir: The optional directory to place the file into. If specified, the results files will also be searched for in this directory. 88 @type dir: None or str 89 @keyword force: A flag which if True will cause any preexisting file to be overwritten. 90 @type force: bool 91 """ 92 93 # Store the current data pipe name. 94 original_pipe = pipes.cdp_name() 95 96 # The model names, titles and directories, including axis permutations. 97 models = [] 98 model_titles = [] 99 dirs = [] 100 for model in MODEL_LIST: 101 # Add the base model. 102 models.append(model) 103 model_title = model[0].upper() + model[1:] 104 model_titles.append(model_title) 105 dirs.append(model_directory(model, base_dir=dir)) 106 107 # Axis permutations. 108 if model in MODEL_LIST_ISO_CONE + MODEL_LIST_PSEUDO_ELLIPSE: 109 # The A permutation. 110 models.append("%s permutation A" % model) 111 model_titles.append(model_title + ' (perm A)') 112 dirs.append(model_directory(models[-1], base_dir=dir)) 113 114 # The B permutation. 115 if model in MODEL_LIST_PSEUDO_ELLIPSE: 116 models.append("%s permutation B" % model) 117 model_titles.append(model_title + ' (perm B)') 118 dirs.append(model_directory(models[-1], base_dir=dir)) 119 120 # Loop over the models. 121 count = {} 122 count_total = {} 123 percentage = {} 124 for i in range(len(models)): 125 # Skip the rigid model. 126 if models[i] == MODEL_RIGID: 127 continue 128 129 # No file. 130 found = False 131 for ext in ['.bz2', '', 'gz']: 132 file = dirs[i]+sep+'results'+ext 133 if access(file, F_OK): 134 found = True 135 break 136 if not found: 137 continue 138 139 # Create a data pipe. 140 pipe_name = 'temp %s' % models[i] 141 if pipes.has_pipe(pipe_name): 142 pipes.delete(pipe_name) 143 pipes.create(pipe_name, 'frame order') 144 145 # Load the data. 146 results.read(file='results', dir=dirs[i]) 147 148 # SciPy quadratic integration has been used. 149 if hasattr(cdp, 'quad_int') and cdp.quad_int: 150 count[models[i]] = 'Quad int' 151 count_total[models[i]] = '' 152 percentage[models[i]] = '' 153 continue 154 155 # Count the Sobol' points used. 156 if not hasattr(cdp, 'sobol_points_used'): 157 optimisation.count_sobol_points() 158 count[models[i]] = cdp.sobol_points_used 159 count_total[models[i]] = cdp.sobol_max_points 160 percentage[models[i]] = "%10.3f" % (float(cdp.sobol_points_used) / float(cdp.sobol_max_points) * 100.0) + '%' 161 162 # Delete the temporary data pipe. 163 pipes.delete(pipe_name) 164 165 # Initialise the output string. 166 string = "Quasi-random Sobol' numerical PCS integration point counting:\n\n" 167 168 # Assemble the table contents. 169 headings = [["Model", "Total points", "Used points", "Percentage"]] 170 contents = [] 171 for model in models: 172 if model not in count: 173 continue 174 contents.append([model, count_total[model], count[model], percentage[model]]) 175 176 # Add the table to the output string. 177 string += format_table(headings=headings, contents=contents) 178 179 # Stdout output. 180 sys.stdout.write("\n\n\n") 181 sys.stdout.write(string) 182 183 # Save to file. 184 file = open_write_file(file_name=file_name, dir=dir, force=force) 185 file.write(string) 186 file.close() 187 188 # Switch back to the original data pipe, if it exists. 189 if original_pipe: 190 pipes.switch(original_pipe)
191 192
193 -def model_directory(model, base_dir=None):
194 """Return the directory to be used for the model. 195 196 @param model: The frame order model name. 197 @type model: str 198 @keyword base_dir: The optional base directory to prepend to the file name. 199 @type base_dir: None or str 200 """ 201 202 # Convert the model name. 203 dir = model.replace(' ', '_') 204 dir = dir.replace(',', '') 205 206 # Process the base directory. 207 if base_dir == None: 208 base_dir = '' 209 elif base_dir[-1] != sep: 210 base_dir += sep 211 212 # Return the full path. 213 return base_dir + dir
214 215
216 -def summarise(file_name='summary', dir=None, force=True):
217 """Summarise the frame order auto-analysis results. 218 219 This function can be used while a frame order analysis is running to summarise the results. It will create a table of the statistics and model parameters for all of the optimised frame order models for which results files currently exist. 220 221 222 @keyword file_name: The file to save the table into. 223 @type file_name: str 224 @keyword dir: The optional directory to place the file into. If specified, the results files will also be searched for in this directory. 225 @type dir: None or str 226 @keyword force: A flag which if True will cause any preexisting file to be overwritten. 227 @type force: bool 228 """ 229 230 # Store the current data pipe name. 231 original_pipe = pipes.cdp_name() 232 233 # The model names, titles and directories, including axis permutations. 234 models = [] 235 model_titles = [] 236 dirs = [] 237 for model in MODEL_LIST: 238 # Add the base model. 239 models.append(model) 240 model_title = model[0].upper() + model[1:] 241 model_titles.append(model_title) 242 dirs.append(model_directory(model, base_dir=dir)) 243 244 # Axis permutations. 245 if model in MODEL_LIST_ISO_CONE + MODEL_LIST_PSEUDO_ELLIPSE: 246 # The A permutation. 247 models.append("%s permutation A" % model) 248 model_titles.append(model_title + ' (perm A)') 249 dirs.append(model_directory(models[-1], base_dir=dir)) 250 251 # The B permutation. 252 if model in MODEL_LIST_PSEUDO_ELLIPSE: 253 models.append("%s permutation B" % model) 254 model_titles.append(model_title + ' (perm B)') 255 dirs.append(model_directory(models[-1], base_dir=dir)) 256 257 # The analysis directory and structures. 258 contents = [] 259 contents.append(["Analysis directory", getcwd()]) 260 string = format_table(contents=contents) 261 262 # Table header. 263 headings1 = [] 264 headings1.append(["Model", "k", "chi2", "AIC", "Motional eigenframe", MULTI_COL, MULTI_COL, "Cone half angles (deg)", MULTI_COL, MULTI_COL, MULTI_COL]) 265 headings1.append([None, None, None, None, "a", "b/th", "g/ph", "thx", "thy", "smax", "smax2"]) 266 267 # 2nd table header. 268 headings2 = [] 269 headings2.append(["Model", "Average position", MULTI_COL, MULTI_COL, MULTI_COL, MULTI_COL, MULTI_COL, "Pivot point", MULTI_COL, MULTI_COL]) 270 headings2.append([None, "x", "y", "z", "a", "b", "g", "x", "y", "z"]) 271 272 # Loop over the models. 273 contents1 = [] 274 contents2 = [] 275 for i in range(len(models)): 276 # No file. 277 found = False 278 for ext in ['.bz2', '', 'gz']: 279 file = dirs[i]+sep+'results'+ext 280 if access(file, F_OK): 281 found = True 282 break 283 if not found: 284 continue 285 286 # Create a data pipe. 287 pipe_name = 'temp %s' % models[i] 288 if pipes.has_pipe(pipe_name): 289 pipes.delete(pipe_name) 290 pipes.create(pipe_name, 'frame order') 291 292 # Load the data. 293 results.read(file='results', dir=dirs[i]) 294 295 # Number of params. 296 k = len(cdp.params) 297 298 # Format the model information. 299 contents1.append([model_titles[i], k, cdp.chi2, cdp.chi2 + 2*k, None, None, None, None, None, None, None]) 300 contents2.append([model_titles[i], 0.0, 0.0, 0.0, None, None, None, None, None, None]) 301 302 # Eigen alpha. 303 if hasattr(cdp, 'eigen_alpha') and cdp.eigen_alpha != None: 304 contents1[-1][4] = cdp.eigen_alpha 305 306 # Eigen beta. 307 if hasattr(cdp, 'eigen_beta') and cdp.eigen_beta != None: 308 contents1[-1][5] = cdp.eigen_beta 309 elif hasattr(cdp, 'axis_theta') and cdp.axis_theta != None: 310 contents1[-1][5] = cdp.axis_theta 311 312 # Eigen gamma. 313 if hasattr(cdp, 'eigen_gamma') and cdp.eigen_gamma != None: 314 contents1[-1][6] = cdp.eigen_gamma 315 elif hasattr(cdp, 'axis_phi') and cdp.axis_phi != None: 316 contents1[-1][6] = cdp.axis_phi 317 318 # Convert the axis alpha angle to spherical angles for comparison. 319 if hasattr(cdp, 'axis_alpha') and cdp.model in [MODEL_ROTOR, MODEL_FREE_ROTOR]: 320 axis_theta, axis_phi = convert_axis_alpha_to_spherical(alpha=cdp.axis_alpha, pivot=generate_pivot(order=1, pipe_name=pipe_name), point=pipe_centre_of_mass(verbosity=0)) 321 contents1[-1][5] = wrap_angles(axis_theta, 0.0, 2.0*pi) 322 contents1[-1][6] = wrap_angles(axis_phi, 0.0, 2.0*pi) 323 324 # Order x. 325 if hasattr(cdp, 'cone_theta_x') and cdp.cone_theta_x != None: 326 contents1[-1][7] = cdp.cone_theta_x / 2.0 / pi * 360.0 327 elif hasattr(cdp, 'cone_theta') and cdp.cone_theta != None: 328 contents1[-1][7] = cdp.cone_theta / 2.0 / pi * 360.0 329 330 # Order y. 331 if hasattr(cdp, 'cone_theta_y') and cdp.cone_theta_y != None: 332 contents1[-1][8] = cdp.cone_theta_y / 2.0 / pi * 360.0 333 334 # Order torsion. 335 if hasattr(cdp, 'cone_sigma_max') and cdp.cone_sigma_max != None: 336 contents1[-1][9] = cdp.cone_sigma_max / 2.0 / pi * 360.0 337 338 # Order torsion 2. 339 if hasattr(cdp, 'cone_sigma_max_2') and cdp.cone_sigma_max_2 != None: 340 contents1[-1][10] = cdp.cone_sigma_max_2 / 2.0 / pi * 360.0 341 342 # The average position parameters. 343 if hasattr(cdp, 'ave_pos_x') and cdp.ave_pos_x != None: 344 contents2[-1][1] = cdp.ave_pos_x 345 if hasattr(cdp, 'ave_pos_y') and cdp.ave_pos_y != None: 346 contents2[-1][2] = cdp.ave_pos_y 347 if hasattr(cdp, 'ave_pos_z') and cdp.ave_pos_z != None: 348 contents2[-1][3] = cdp.ave_pos_z 349 if hasattr(cdp, 'ave_pos_alpha') and cdp.ave_pos_alpha != None: 350 contents2[-1][4] = cdp.ave_pos_alpha 351 if hasattr(cdp, 'ave_pos_beta') and cdp.ave_pos_beta != None: 352 contents2[-1][5] = cdp.ave_pos_beta 353 if hasattr(cdp, 'ave_pos_gamma') and cdp.ave_pos_gamma != None: 354 contents2[-1][6] = cdp.ave_pos_gamma 355 356 # The pivot point. 357 contents2[-1][7] = cdp.pivot_x 358 contents2[-1][8] = cdp.pivot_y 359 contents2[-1][9] = cdp.pivot_z 360 361 # Delete the temporary data pipe. 362 pipes.delete(pipe_name) 363 364 # Add the tables. 365 string += format_table(headings=headings1, contents=contents1, custom_format=[None, None, "%.2f", "%.2f", "%.3f", "%.3f", "%.3f", "%.2f", "%.2f", "%.2f", "%.2f"]) 366 string += format_table(headings=headings2, contents=contents2, custom_format=[None, "%.3f", "%.3f", "%.3f", "%.3f", "%.3f", "%.3f", "%.3f", "%.3f", "%.3f"]) 367 368 # Stdout output. 369 sys.stdout.write("\n\n\n") 370 sys.stdout.write(string) 371 372 # Save to file. 373 file = open_write_file(file_name=file_name, dir=dir, force=force) 374 file.write(string) 375 file.close() 376 377 # Switch back to the original data pipe, if it exists. 378 if original_pipe: 379 pipes.switch(original_pipe)
380 381 382
383 -class Frame_order_analysis:
384 """The frame order auto-analysis protocol.""" 385 386 # Debugging and test suite variables. 387 _final_state = True 388
389 - def __init__(self, data_pipe_full=None, data_pipe_subset=None, pipe_bundle=None, results_dir=None, pre_run_dir=None, opt_rigid=None, opt_subset=None, opt_full=None, opt_mc=None, mc_sim_num=500, models=MODEL_LIST_NONREDUNDANT, brownian_step_size=2.0, brownian_snapshot=10, brownian_total=1000, dist_total=1000, dist_max_rotations=1000000, results_compress_type=1, rigid_grid_split=False, store_intermediate=True, nested_params_ave_dom_pos=True):
390 """Perform the full frame order analysis. 391 392 @param data_pipe_full: The name of the data pipe containing all of the RDC and PCS data. 393 @type data_pipe_full: str 394 @param data_pipe_subset: The name of the data pipe containing all of the RDC data but only a small subset of ~5 PCS points. This optional argument is used to massively speed up the analysis. 395 @type data_pipe_subset: str or None 396 @keyword pipe_bundle: The data pipe bundle to associate all spawned data pipes with. 397 @type pipe_bundle: str 398 @keyword results_dir: The directory where files are saved in. 399 @type results_dir: str 400 @keyword pre_run_dir: The optional directory containing the frame order auto-analysis results from a previous run. If supplied, then the 'data_pipe_full', 'data_pipe_subset', and 'opt_subset' arguments will be ignored. The results will be loaded from the results files in this directory, and then optimisation starts from there. The model nesting algorithm will also be deactivated. 401 @keyword opt_rigid: The grid search, zooming grid search and minimisation settings object for the rigid frame order model. 402 @type pre_run_dir: None or str 403 @type opt_rigid: Optimisation_settings instance 404 @keyword opt_subset: The grid search, zooming grid search and minimisation settings object for optimisation of all models, excluding the rigid model, for the PCS data subset. 405 @type opt_subset: Optimisation_settings instance 406 @keyword opt_full: The grid search, zooming grid search and minimisation settings object for optimisation of all models, excluding the rigid model, for the full data set. 407 @type opt_full: Optimisation_settings instance 408 @keyword opt_mc: The grid search, zooming grid search and minimisation settings object for optimisation of the Monte Carlo simulations. Any grid search settings will be ignored, as only the minimise.execute user function is run for the simulations. And only the settings for the first iteration of the object will be accessed and used - iterative optimisation will be ignored. 409 @type opt_mc: Optimisation_settings instance 410 @keyword mc_sim_num: The number of Monte Carlo simulations to be used for error analysis at the end of the analysis. 411 @type mc_sim_num: int 412 @keyword models: The frame order models to use in the analysis. The 'rigid' model must be included as this is essential for the analysis. 413 @type models: list of str 414 @keyword brownian_step_size: The step_size argument for the pseudo-Brownian dynamics simulation frame_order.simulate user function. 415 @type brownian_step_size: float 416 @keyword brownian_snapshot: The snapshot argument for the pseudo-Brownian dynamics simulation frame_order.simulate user function. 417 @type brownian_snapshot: int 418 @keyword brownian_total: The total argument for the pseudo-Brownian dynamics simulation frame_order.simulate user function. 419 @type brownian_total: int 420 @keyword dist_total: The total argument for the uniform distribution frame_order.distribute user function. 421 @type dist_total: int 422 @keyword dist_max_rotations: The max_rotations argument for the uniform distribution frame_order.distribute user function. 423 @type dist_max_rotations: int 424 @keyword results_compress_type: The type of compression to use when creating the results files. See the results.write user function for details. 425 @type results_compress_type: int 426 @keyword rigid_grid_split: A flag which if True will cause the grid search for the rigid model to be split so that the rotation is optimised first followed by the translation. When combined with grid zooming, this can save optimisation time. However it may result in the global minimum being missed. 427 @type rigid_grid_split: bool 428 @keyword store_intermediate: A flag which if True will cause all intermediate optimisation results to be stored in the 'intermediate_results' directory. These can then be used for studying the optimisation settings or loaded for subsequent analyses. 429 @type store_intermediate: bool 430 @keyword nested_params_ave_dom_pos: A flag which if True will cause the average domain position parameters to be taken from the rigid or free-rotor models. If False, then these parameters will be set to zero. 431 @type nested_params_ave_dom_pos: bool 432 """ 433 434 # Execution lock. 435 status.exec_lock.acquire(pipe_bundle, mode='auto-analysis') 436 437 # Execute the full protocol. 438 try: 439 # Initial printout. 440 title(file=sys.stdout, text="Frame order auto-analysis", prespace=7) 441 442 # Store the args. 443 self.data_pipe_full = data_pipe_full 444 self.data_pipe_subset = data_pipe_subset 445 self.pipe_bundle = pipe_bundle 446 self.opt_rigid = opt_rigid 447 self.opt_subset = opt_subset 448 self.opt_full = opt_full 449 self.opt_mc = opt_mc 450 self.mc_sim_num = mc_sim_num 451 self.brownian_step_size = brownian_step_size 452 self.brownian_snapshot = brownian_snapshot 453 self.brownian_total = brownian_total 454 self.dist_total = dist_total 455 self.dist_max_rotations = dist_max_rotations 456 self.results_compress_type = results_compress_type 457 self.rigid_grid_split = rigid_grid_split 458 self.store_intermediate = store_intermediate 459 self.flag_nested_params_ave_dom_pos = nested_params_ave_dom_pos 460 461 # Re-order the models to enable the parameter nesting protocol. 462 self.models = self.reorder_models(models) 463 464 # A dictionary and list of the data pipe names. 465 self.pipe_name_dict = {} 466 self.pipe_name_list = [] 467 468 # Project directory (i.e. directory containing the model-free model results and the newly generated files) 469 if results_dir: 470 self.results_dir = results_dir + sep 471 else: 472 self.results_dir = getcwd() + sep 473 474 # The pre-run directory. 475 self.pre_run_dir = pre_run_dir 476 self.pre_run_flag = False 477 if self.pre_run_dir: 478 self.pre_run_dir += sep 479 self.pre_run_flag = True 480 481 # Data checks. 482 self.check_vars() 483 484 # Load the interpreter. 485 self.interpreter = Interpreter(show_script=False, raise_relax_error=True) 486 self.interpreter.populate_self() 487 self.interpreter.on(verbose=False) 488 489 # Output the starting time. 490 self.interpreter.system.time() 491 492 # The nested model optimisation protocol. 493 self.nested_models() 494 495 # Printout for the final run. 496 subtitle(file=sys.stdout, text="Final results") 497 498 # The final results does not already exist. 499 if not self.read_results(model='final', pipe_name='final'): 500 # Model selection. 501 self.interpreter.model_selection(method='AIC', modsel_pipe='final', pipes=self.pipe_name_list) 502 503 # The numerical optimisation settings. 504 opt = self.opt_mc 505 self.interpreter.frame_order.quad_int(opt.get_min_quad_int(0)) 506 self.sobol_setup(opt.get_min_sobol_info(0)) 507 508 # Monte Carlo simulations. 509 self.interpreter.monte_carlo.setup(number=self.mc_sim_num) 510 self.interpreter.monte_carlo.create_data() 511 self.interpreter.monte_carlo.initial_values() 512 self.interpreter.minimise.execute(opt.get_min_algor(0), func_tol=opt.get_min_func_tol(0), max_iter=opt.get_min_max_iter(0)) 513 self.interpreter.eliminate() 514 self.interpreter.monte_carlo.error_analysis() 515 516 # Create the output and visualisation files. 517 self.results_output(model='final', dir=self.results_dir+'final', results_file=True) 518 519 # Regenerate the output and visualisation files for the final results. 520 else: 521 self.results_output(model='final', dir=self.results_dir+'final', results_file=False) 522 523 # Output the finishing time. 524 self.interpreter.system.time() 525 526 # Final title printout. 527 subtitle(file=sys.stdout, text="Summaries") 528 529 # Save the final program state. 530 if self._final_state: 531 self.interpreter.state.save('final_state', dir=self.results_dir, force=True) 532 533 # Count the number of Sobol' points and create a summary file. 534 count = False 535 for model in self.models: 536 if model != MODEL_RIGID: 537 count = True 538 break 539 if count: 540 count_sobol_points(dir=self.results_dir, force=True) 541 summarise(dir=self.results_dir, force=True) 542 543 # Clean up. 544 finally: 545 # Final printout. 546 title(file=sys.stdout, text="Completion of the frame order auto-analysis", prespace=7) 547 print_elapsed_time(time() - status.start_time) 548 549 # Finish and unlock execution. 550 status.exec_lock.release()
551 552
553 - def axis_permutation_analysis(self, model=None):
554 """Handle the two local minima in the pseudo-elliptic models. 555 556 This involves creating a new data pipe for the pseudo-elliptic models, permuting the motional parameters, and performing an optimisation. This will explore the second minimum. 557 558 559 @keyword model: The frame order model to visualise. This should match the model of the current data pipe, unless the special value of 'final' is used to indicate the visualisation of the final results. 560 @type model: str 561 """ 562 563 # Nothing to do. 564 if model not in MODEL_LIST_ISO_CONE + MODEL_LIST_PSEUDO_ELLIPSE: 565 return 566 567 # The permutations. 568 perms = ['A'] 569 if model in MODEL_LIST_PSEUDO_ELLIPSE: 570 perms.append('B') 571 572 # Loop over all permutations. 573 for perm in perms: 574 # The title printout. 575 title = model[0].upper() + model[1:] 576 text = "Axis permutation '%s' of the %s frame order model" % (perm, title) 577 subtitle(file=sys.stdout, text=text, prespace=5) 578 579 # Output the model staring time. 580 self.interpreter.system.time() 581 582 # A new model name. 583 perm_model = "%s permutation %s" % (model, perm) 584 585 # The data pipe name. 586 self.pipe_name_dict[perm_model] = '%s permutation %s - %s' % (title, perm, self.pipe_bundle) 587 self.pipe_name_list.append(self.pipe_name_dict[perm_model]) 588 589 # The output directory. 590 dir = model_directory(perm_model, base_dir=self.results_dir) 591 592 # The results file already exists, so read its contents instead. 593 if self.read_results(model=perm_model, pipe_name=self.pipe_name_dict[perm_model]): 594 # Re-perform model elimination just in case. 595 self.interpreter.eliminate() 596 597 # Recreate the output and visualisation files (in case this was not completed correctly). 598 self.results_output(model=perm_model, dir=dir, results_file=False) 599 600 # Jump to the next permutation. 601 continue 602 603 # Copy the data pipe, and add it to the list so it is included in the model selection. 604 self.interpreter.pipe.copy(pipe_from=self.pipe_name_dict[model], pipe_to=self.pipe_name_dict[perm_model]) 605 606 # Switch to the new pipe. 607 self.interpreter.pipe.switch(pipe_name=self.pipe_name_dict[perm_model]) 608 609 # Permute the axes. 610 self.interpreter.frame_order.permute_axes(permutation=perm) 611 612 # Minimise with only the last optimisation settings (for the full data set). 613 opt = self.opt_full 614 for i in opt.loop_min(): 615 pass 616 617 # The numerical optimisation settings. 618 self.interpreter.frame_order.quad_int(opt.get_min_quad_int(i)) 619 self.sobol_setup(opt.get_min_sobol_info(i)) 620 621 # Perform the optimisation. 622 self.interpreter.minimise.execute(min_algor=opt.get_min_algor(i), func_tol=opt.get_min_func_tol(i), max_iter=opt.get_min_max_iter(i)) 623 624 # Results printout. 625 self.print_results() 626 627 # Model elimination. 628 self.interpreter.eliminate() 629 630 # Create the output and visualisation files. 631 self.results_output(model=perm_model, dir=dir, results_file=True)
632 633
634 - def check_vars(self):
635 """Check that the user has set the variables correctly.""" 636 637 # The pipe bundle. 638 if not isinstance(self.pipe_bundle, str): 639 raise RelaxError("The pipe bundle name '%s' is invalid." % self.pipe_bundle) 640 641 # Minimisation variables. 642 if not isinstance(self.mc_sim_num, int): 643 raise RelaxError("The mc_sim_num user variable '%s' is incorrectly set. It should be an integer." % self.mc_sim_num)
644 645
646 - def custom_grid_incs(self, model, inc=None, pivot_search=True):
647 """Set up a customised grid search increment number for each model. 648 649 @param model: The frame order model. 650 @type model: str 651 @keyword inc: The number of grid search increments to use for each dimension. 652 @type inc: int 653 @keyword pivot_search: A flag which if False will prevent the pivot point from being included in the grid search. 654 @type pivot_search: bool 655 @return: The list of increment values. 656 @rtype: list of int and None 657 """ 658 659 # Initialise the structure. 660 incs = [] 661 662 # The pivot parameters. 663 if hasattr(cdp, 'pivot_fixed') and not cdp.pivot_fixed: 664 # Optimise the pivot for the rotor model. 665 if pivot_search and model == MODEL_ROTOR: 666 incs += [inc, inc, inc] 667 668 # Otherwise use preset values (copied from other models). 669 else: 670 incs += [None, None, None] 671 672 # The 2nd pivot point parameters - the minimum inter rotor axis distance. 673 if model in [MODEL_DOUBLE_ROTOR]: 674 incs += [inc] 675 676 # The average domain position parameters. 677 if model == MODEL_FREE_ROTOR: 678 incs += [inc, inc, inc, inc, inc] 679 elif model in [MODEL_ISO_CONE_FREE_ROTOR, MODEL_PSEUDO_ELLIPSE_FREE_ROTOR]: 680 incs += [None, None, None, None, None] 681 else: 682 incs += [None, None, None, None, None, None] 683 684 # The motional eigenframe and order parameters - the rotor model. 685 if model == MODEL_ROTOR: 686 incs += [inc, inc] 687 688 # The motional eigenframe and order parameters - the free rotor model. 689 if model == MODEL_FREE_ROTOR: 690 incs += [None] 691 692 # The motional eigenframe and order parameters - the torsionless isotropic cone model. 693 if model == MODEL_ISO_CONE_TORSIONLESS: 694 incs += [None, None, None] 695 696 # The motional eigenframe and order parameters - the free rotor isotropic cone model. 697 if model == MODEL_ISO_CONE_FREE_ROTOR: 698 incs += [None, None, None] 699 700 # The motional eigenframe and order parameters - the isotropic cone model. 701 if model == MODEL_ISO_CONE: 702 incs += [None, None, inc, None] 703 704 # The motional eigenframe and order parameters - the torsionless pseudo-elliptic cone model. 705 if model == MODEL_PSEUDO_ELLIPSE_TORSIONLESS: 706 incs += [None, None, None, None, None] 707 708 # The motional eigenframe and order parameters - the free rotor pseudo-elliptic cone model. 709 if model == MODEL_PSEUDO_ELLIPSE_FREE_ROTOR: 710 incs += [None, None, None, None, None] 711 712 # The motional eigenframe and order parameters - the pseudo-elliptic cone model. 713 if model == MODEL_PSEUDO_ELLIPSE: 714 incs += [inc, inc, inc, None, inc, None] 715 716 # The motional eigenframe and order parameters - the double rotor model. 717 if model == MODEL_DOUBLE_ROTOR: 718 incs += [None, None, None, inc, inc] 719 720 # Return the increment list. 721 return incs
722 723
724 - def nested_params_ave_dom_pos(self, model):
725 """Copy the average domain parameters from simpler nested models for faster optimisation. 726 727 @param model: The frame order model. 728 @type model: str 729 """ 730 731 # No nesting, so set all parameters to zero. 732 if not self.flag_nested_params_ave_dom_pos: 733 self.interpreter.value.set(param='ave_pos_x', val=0.0) 734 self.interpreter.value.set(param='ave_pos_y', val=0.0) 735 self.interpreter.value.set(param='ave_pos_z', val=0.0) 736 self.interpreter.value.set(param='ave_pos_alpha', val=0.0) 737 self.interpreter.value.set(param='ave_pos_beta', val=0.0) 738 self.interpreter.value.set(param='ave_pos_gamma', val=0.0) 739 return 740 741 # Skip the following models to allow for full optimisation. 742 if model in [MODEL_RIGID, MODEL_FREE_ROTOR]: 743 # Printout. 744 print("No nesting of the average domain position parameters for the '%s' model." % model) 745 746 # Exit. 747 return 748 749 # The average position from the rigid model. 750 if model not in MODEL_LIST_FREE_ROTORS: 751 # Printout. 752 print("Obtaining the average position from the rigid model.") 753 754 # Get the rigid data pipe. 755 pipe = pipes.get_pipe(self.pipe_name_dict[MODEL_RIGID]) 756 757 # Copy the average position parameters from the rigid model. 758 cdp.ave_pos_x = pipe.ave_pos_x 759 cdp.ave_pos_y = pipe.ave_pos_y 760 cdp.ave_pos_z = pipe.ave_pos_z 761 cdp.ave_pos_alpha = pipe.ave_pos_alpha 762 cdp.ave_pos_beta = pipe.ave_pos_beta 763 cdp.ave_pos_gamma = pipe.ave_pos_gamma 764 765 # The average position from the free rotor model. 766 else: 767 # Printout. 768 print("Obtaining the average position from the free rotor model.") 769 770 # Get the free rotor data pipe. 771 pipe = pipes.get_pipe(self.pipe_name_dict[MODEL_FREE_ROTOR]) 772 773 # Copy the average position parameters from the free rotor model. 774 cdp.ave_pos_x = pipe.ave_pos_x 775 cdp.ave_pos_y = pipe.ave_pos_y 776 cdp.ave_pos_z = pipe.ave_pos_z 777 cdp.ave_pos_beta = pipe.ave_pos_beta 778 cdp.ave_pos_gamma = pipe.ave_pos_gamma
779 780
781 - def nested_params_eigenframe(self, model):
782 """Copy the eigenframe parameters from simpler nested models for faster optimisation. 783 784 @param model: The frame order model. 785 @type model: str 786 """ 787 788 # Skip the following models to allow for full optimisation. 789 if model in [MODEL_ROTOR, MODEL_PSEUDO_ELLIPSE]: 790 # Printout. 791 print("No nesting of the eigenframe parameters for the '%s' model." % model) 792 793 # Exit. 794 return 795 796 # The cone axis from the rotor model. 797 if model in [MODEL_FREE_ROTOR, MODEL_ISO_CONE]: 798 # Printout. 799 print("Obtaining the cone axis from the rotor model.") 800 801 # Get the rotor data pipe. 802 pipe = pipes.get_pipe(self.pipe_name_dict[MODEL_ROTOR]) 803 804 # The cone axis as the axis alpha angle. 805 if model == MODEL_FREE_ROTOR: 806 cdp.axis_alpha = pipe.axis_alpha 807 808 # The cone axis from the axis alpha angle to spherical angles. 809 if model == MODEL_ISO_CONE: 810 cdp.axis_theta, cdp.axis_phi = convert_axis_alpha_to_spherical(alpha=pipe.axis_alpha, pivot=generate_pivot(order=1, pipe_name=self.pipe_name_dict[MODEL_ROTOR]), point=pipe_centre_of_mass(verbosity=0)) 811 812 # The cone axis from the isotropic cone model. 813 elif model in [MODEL_ISO_CONE_FREE_ROTOR, MODEL_ISO_CONE_TORSIONLESS]: 814 # Printout. 815 print("Obtaining the cone axis from the isotropic cone model.") 816 817 # Get the iso cone data pipe. 818 pipe = pipes.get_pipe(self.pipe_name_dict[MODEL_ISO_CONE]) 819 820 # Copy the cone axis parameters. 821 cdp.axis_theta = pipe.axis_theta 822 cdp.axis_phi = pipe.axis_phi 823 824 # The full eigenframe from the pseudo-ellipse model. 825 elif model in [MODEL_PSEUDO_ELLIPSE_FREE_ROTOR, MODEL_PSEUDO_ELLIPSE_TORSIONLESS, MODEL_DOUBLE_ROTOR]: 826 # Printout. 827 print("Obtaining the full eigenframe from the pseudo-ellipse model.") 828 829 # Get the pseudo-ellipse data pipe. 830 pipe = pipes.get_pipe(self.pipe_name_dict[MODEL_PSEUDO_ELLIPSE]) 831 832 # Copy the three Euler angles. 833 cdp.eigen_alpha = pipe.eigen_alpha 834 cdp.eigen_beta = pipe.eigen_beta 835 cdp.eigen_gamma = pipe.eigen_gamma
836 837
838 - def nested_params_order(self, model):
839 """Copy the order parameters from simpler nested models for faster optimisation. 840 841 @param model: The frame order model. 842 @type model: str 843 """ 844 845 # Skip the following models to allow for full optimisation. 846 if model in [MODEL_ROTOR, MODEL_DOUBLE_ROTOR]: 847 # Printout. 848 print("No nesting of the order parameters for the '%s' model." % model) 849 850 # Exit. 851 return 852 853 # The cone angle from the isotropic cone model. 854 if model in [MODEL_ISO_CONE_TORSIONLESS, MODEL_PSEUDO_ELLIPSE, MODEL_ISO_CONE_FREE_ROTOR]: 855 # Get the iso cone data pipe. 856 pipe = pipes.get_pipe(self.pipe_name_dict[MODEL_ISO_CONE]) 857 858 # Copy the cone angle directly. 859 if model in [MODEL_ISO_CONE_FREE_ROTOR, MODEL_ISO_CONE_TORSIONLESS]: 860 print("Obtaining the cone angle from the isotropic cone model.") 861 cdp.cone_theta = pipe.cone_theta 862 863 # Copy as the X cone angle. 864 elif model == MODEL_PSEUDO_ELLIPSE: 865 print("Obtaining the cone X angle from the isotropic cone model.") 866 cdp.cone_theta_x = pipe.cone_theta 867 868 # The X and Y cone angles from the pseudo-ellipse model. 869 elif model in [MODEL_PSEUDO_ELLIPSE_TORSIONLESS, MODEL_PSEUDO_ELLIPSE_FREE_ROTOR]: 870 # Printout. 871 print("Obtaining the cone X and Y angles from the pseudo-ellipse model.") 872 873 # Get the pseudo-ellipse data pipe. 874 pipe = pipes.get_pipe(self.pipe_name_dict[MODEL_PSEUDO_ELLIPSE]) 875 876 # Copy the cone axis. 877 cdp.cone_theta_x = pipe.cone_theta_x 878 cdp.cone_theta_y = pipe.cone_theta_y 879 880 881 # The torsion from the rotor model. 882 if model in [MODEL_ISO_CONE, MODEL_PSEUDO_ELLIPSE]: 883 # Printout. 884 print("Obtaining the torsion angle from the rotor model.") 885 886 # Get the rotor data pipe. 887 pipe = pipes.get_pipe(self.pipe_name_dict[MODEL_ROTOR]) 888 889 # Copy the cone axis. 890 cdp.cone_sigma_max = pipe.cone_sigma_max
891 892
893 - def nested_params_pivot(self, model):
894 """Copy the pivot parameters from simpler nested models for faster optimisation. 895 896 @param model: The frame order model. 897 @type model: str 898 """ 899 900 # Skip the following models to allow for full optimisation. 901 if model in [MODEL_ROTOR]: 902 # Printout. 903 print("No nesting of the pivot parameters for the '%s' model." % model) 904 905 # Exit. 906 return 907 908 # The pivot from the rotor model. 909 print("Obtaining the pivot point from the rotor model.") 910 911 # Get the iso cone data pipe. 912 pipe = pipes.get_pipe(self.pipe_name_dict[MODEL_ROTOR]) 913 914 # Copy the pivot parameters. 915 cdp.pivot_x = pipe.pivot_x 916 cdp.pivot_y = pipe.pivot_y 917 cdp.pivot_z = pipe.pivot_z
918 919
920 - def nested_models(self):
921 """Protocol for the nested optimisation of the frame order models.""" 922 923 # First optimise the rigid model using all data. 924 self.optimise_rigid() 925 926 # Iteratively optimise the frame order models. 927 for model in self.models: 928 # Skip the already optimised rigid model. 929 if model == MODEL_RIGID: 930 continue 931 932 # The model title. 933 title = model[0].upper() + model[1:] 934 935 # Printout. 936 subtitle(file=sys.stdout, text="%s frame order model"%title, prespace=5) 937 938 # Output the model staring time. 939 self.interpreter.system.time() 940 941 # The data pipe name. 942 self.pipe_name_dict[model] = '%s - %s' % (title, self.pipe_bundle) 943 self.pipe_name_list.append(self.pipe_name_dict[model]) 944 945 # The output directory. 946 dir = model_directory(model, base_dir=self.results_dir) 947 948 # The results file already exists, so read its contents instead. 949 if self.read_results(model=model, pipe_name=self.pipe_name_dict[model]): 950 # Re-perform model elimination just in case. 951 self.interpreter.eliminate() 952 953 # Recreate the output files (in case this was not completed correctly). 954 self.results_output(model=model, dir=dir, results_file=False) 955 956 # Perform the axis permutation analysis. 957 self.axis_permutation_analysis(model=model) 958 959 # Skip to the next model. 960 continue 961 962 # Load a pre-run results file. 963 if self.pre_run_dir != None: 964 self.read_results(model=model, pipe_name=self.pipe_name_dict[model], pre_run=True) 965 966 # Otherwise use the base data pipes. 967 else: 968 # Create the data pipe using the full data set, and switch to it. 969 if self.data_pipe_subset != None: 970 self.interpreter.pipe.copy(self.data_pipe_subset, self.pipe_name_dict[model], bundle_to=self.pipe_bundle) 971 else: 972 self.interpreter.pipe.copy(self.data_pipe_full, self.pipe_name_dict[model], bundle_to=self.pipe_bundle) 973 self.interpreter.pipe.switch(self.pipe_name_dict[model]) 974 975 # Select the Frame Order model. 976 self.interpreter.frame_order.select_model(model=model) 977 978 # Copy nested parameters. 979 subsubtitle(file=sys.stdout, text="Parameter nesting") 980 self.nested_params_ave_dom_pos(model) 981 self.nested_params_eigenframe(model) 982 self.nested_params_pivot(model) 983 self.nested_params_order(model) 984 985 # Optimisation using the PCS subset (skipped if a pre-run directory is supplied). 986 if self.data_pipe_subset != None and self.opt_subset != None and not self.pre_run_flag: 987 self.optimisation(model=model, opt=self.opt_subset, pcs_text="PCS subset", intermediate_dir='pcs_subset') 988 989 # Operations if a subset was used, otherwise these are not needed. 990 if self.data_pipe_subset != None and self.data_pipe_full != None: 991 # Copy the PCS data. 992 self.interpreter.pcs.copy(pipe_from=self.data_pipe_full, pipe_to=self.pipe_name_dict[model]) 993 994 # Reset the selection status. 995 for spin, spin_id in spin_loop(return_id=True, skip_desel=False): 996 # Get the spin from the original pipe. 997 spin_orig = return_spin(spin_id=spin_id, pipe=self.data_pipe_full) 998 999 # Reset the spin selection. 1000 spin.select = spin_orig.select 1001 1002 # Optimisation using the full data set. 1003 if self.opt_full != None: 1004 self.optimisation(model=model, opt=self.opt_full, pcs_text="full data set", intermediate_dir='all_data') 1005 1006 # Results printout. 1007 self.print_results() 1008 1009 # Model elimination. 1010 self.interpreter.eliminate() 1011 1012 # Create the output and visualisation files. 1013 self.results_output(model=model, dir=dir, results_file=True) 1014 1015 # Perform the axis permutation analysis. 1016 self.axis_permutation_analysis(model=model)
1017 1018
1019 - def optimisation(self, model=None, opt=None, pcs_text=None, intermediate_dir=None):
1020 """Perform the grid search and minimisation. 1021 1022 @keyword model: The frame order model to optimise. 1023 @type model: str 1024 @keyword opt: The grid search, zooming grid search and minimisation settings object for optimisation of all models. 1025 @type opt: Optimisation_settings instance 1026 @keyword pcs_text: The text to use in the title. This is either about the PCS subset or full data set. 1027 @type pcs_text: str 1028 @keyword intermediate_dir: The directory for this PCS data set for storing the intermediate results. 1029 @type intermediate_dir: str 1030 """ 1031 1032 # Printout. 1033 subsubtitle(file=sys.stdout, text="Optimisation using the %s" % pcs_text) 1034 1035 # Results directory stub for intermediate results. 1036 intermediate_stub = self.results_dir + sep + 'intermediate_results' + sep + intermediate_dir 1037 1038 # Zooming grid search. 1039 for i in opt.loop_grid(): 1040 # The intermediate results directory. 1041 intermediate_dir = intermediate_stub + '_grid%i' % i 1042 1043 # Set the zooming grid search level. 1044 zoom = opt.get_grid_zoom_level(i) 1045 if zoom != None: 1046 self.interpreter.minimise.grid_zoom(level=zoom) 1047 intermediate_dir += '_zoom%i' % zoom 1048 1049 # Set up the custom grid increments. 1050 incs = self.custom_grid_incs(model, inc=opt.get_grid_inc(i), pivot_search=opt.get_grid_pivot_search(i)) 1051 intermediate_dir += '_inc%i' % opt.get_grid_inc(i) 1052 1053 # The numerical optimisation settings. 1054 quad_int = opt.get_grid_quad_int(i) 1055 if quad_int: 1056 self.interpreter.frame_order.quad_int(True) 1057 intermediate_dir += '_quad_int' 1058 else: 1059 sobol_num = opt.get_grid_sobol_info(i) 1060 self.sobol_setup(sobol_num) 1061 intermediate_dir += '_sobol%i' % sobol_num[0] 1062 1063 # Perform the grid search. 1064 self.interpreter.minimise.grid_search(inc=incs, skip_preset=False) 1065 1066 # Store the intermediate results and statistics. 1067 if self.store_intermediate: 1068 self.results_output(model=model, dir=model_directory(model, base_dir=intermediate_dir), results_file=True, simulation=False) 1069 count_sobol_points(dir=intermediate_dir, force=True) 1070 summarise(dir=intermediate_dir, force=True) 1071 1072 # Minimisation. 1073 for i in opt.loop_min(): 1074 # The intermediate results directory. 1075 func_tol = opt.get_min_func_tol(i) 1076 max_iter = opt.get_min_max_iter(i) 1077 intermediate_dir = intermediate_stub + '_min%i_ftol%g_max_iter%i' % (i, func_tol, max_iter) 1078 1079 # The numerical optimisation settings. 1080 quad_int = opt.get_min_quad_int(i) 1081 if quad_int: 1082 self.interpreter.frame_order.quad_int(True) 1083 intermediate_dir += '_quad_int' 1084 else: 1085 sobol_num = opt.get_min_sobol_info(i) 1086 self.sobol_setup(sobol_num) 1087 intermediate_dir += '_sobol%i' % sobol_num[0] 1088 1089 # Perform the optimisation. 1090 self.interpreter.minimise.execute(min_algor=opt.get_min_algor(i), func_tol=func_tol, max_iter=max_iter) 1091 1092 # Store the intermediate results. 1093 if self.store_intermediate: 1094 self.results_output(model=model, dir=model_directory(model, base_dir=intermediate_dir), results_file=True, simulation=False) 1095 count_sobol_points(dir=intermediate_dir, force=True) 1096 summarise(dir=intermediate_dir, force=True)
1097 1098
1099 - def optimise_rigid(self):
1100 """Optimise the rigid frame order model. 1101 1102 The Sobol' integration is not used here, so the algorithm is different to the other frame order models. 1103 """ 1104 1105 # The model. 1106 model = MODEL_RIGID 1107 title = model[0].upper() + model[1:] 1108 1109 # Print out. 1110 subtitle(file=sys.stdout, text="%s frame order model"%title, prespace=5) 1111 1112 # Output the model staring time. 1113 self.interpreter.system.time() 1114 1115 # The data pipe name. 1116 self.pipe_name_dict[model] = '%s - %s' % (title, self.pipe_bundle) 1117 self.pipe_name_list.append(self.pipe_name_dict[model]) 1118 1119 # The output directory. 1120 dir = model_directory(model, base_dir=self.results_dir) 1121 1122 # The results file already exists, so read its contents instead. 1123 if self.read_results(model=model, pipe_name=self.pipe_name_dict[model]): 1124 # Recreate the output and visualisation files (in case this was not completed correctly). 1125 self.results_output(model=model, dir=dir, results_file=False) 1126 1127 # Nothing more to do. 1128 return 1129 1130 # Load a pre-run results file. 1131 if self.pre_run_flag: 1132 self.read_results(model=model, pipe_name=self.pipe_name_dict[model], pre_run=True) 1133 1134 # Otherwise use the base data pipes. 1135 else: 1136 # Create the data pipe using the full data set, and switch to it. 1137 self.interpreter.pipe.copy(self.data_pipe_full, self.pipe_name_dict[model], bundle_to=self.pipe_bundle) 1138 self.interpreter.pipe.switch(self.pipe_name_dict[model]) 1139 1140 # Select the Frame Order model. 1141 self.interpreter.frame_order.select_model(model=model) 1142 1143 # Optimisation. 1144 opt = self.opt_rigid 1145 if opt != None: 1146 # No grid search. 1147 if not opt.has_grid(): 1148 # Set up the initial parameters. 1149 print("\n\nNo grid search, so setting the translational and rotational parameters to zero.") 1150 self.interpreter.value.set(param='ave_pos_x', val=0.0) 1151 self.interpreter.value.set(param='ave_pos_y', val=0.0) 1152 self.interpreter.value.set(param='ave_pos_z', val=0.0) 1153 self.interpreter.value.set(param='ave_pos_alpha', val=0.0) 1154 self.interpreter.value.set(param='ave_pos_beta', val=0.0) 1155 self.interpreter.value.set(param='ave_pos_gamma', val=0.0) 1156 1157 # Grid search alternation. 1158 elif self.rigid_grid_split: 1159 # Split zooming grid search for the translation. 1160 print("\n\nTranslation active - splitting the grid search and iterating.") 1161 self.interpreter.value.set(param='ave_pos_x', val=0.0) 1162 self.interpreter.value.set(param='ave_pos_y', val=0.0) 1163 self.interpreter.value.set(param='ave_pos_z', val=0.0) 1164 for i in opt.loop_grid(): 1165 # Set the zooming grid search level. 1166 zoom = opt.get_grid_zoom_level(i) 1167 if zoom != None: 1168 self.interpreter.minimise.grid_zoom(level=zoom) 1169 1170 # The numerical optimisation settings. 1171 self.interpreter.frame_order.quad_int(opt.get_grid_quad_int(i)) 1172 self.sobol_setup(opt.get_grid_sobol_info(i)) 1173 1174 # The number of increments. 1175 inc = opt.get_grid_inc(i) 1176 1177 # First optimise the rotation. 1178 self.interpreter.minimise.grid_search(inc=[None, None, None, inc, inc, inc], skip_preset=False) 1179 1180 # Then the translation. 1181 self.interpreter.minimise.grid_search(inc=[inc, inc, inc, None, None, None], skip_preset=False) 1182 1183 # Normal grid search. 1184 else: 1185 for i in opt.loop_grid(): 1186 # Set the zooming grid search level. 1187 zoom = opt.get_grid_zoom_level(i) 1188 if zoom != None: 1189 self.interpreter.minimise.grid_zoom(level=zoom) 1190 1191 # The numerical optimisation settings. 1192 self.interpreter.frame_order.quad_int(opt.get_grid_quad_int(i)) 1193 self.sobol_setup(opt.get_grid_sobol_info(i)) 1194 1195 # The number of increments. 1196 inc = opt.get_grid_inc(i) 1197 1198 # Grid search 1199 self.interpreter.minimise.grid_search(inc=inc, skip_preset=False) 1200 1201 # Minimise. 1202 for i in opt.loop_min(): 1203 # The numerical optimisation settings. 1204 self.interpreter.frame_order.quad_int(opt.get_min_quad_int(i)) 1205 self.sobol_setup(opt.get_min_sobol_info(i)) 1206 1207 # Perform the optimisation. 1208 self.interpreter.minimise.execute(min_algor=opt.get_min_algor(i), func_tol=opt.get_min_func_tol(i), max_iter=opt.get_min_max_iter(i)) 1209 1210 # Results printout. 1211 self.print_results() 1212 1213 # Create the output and visualisation files. 1214 self.results_output(model=model, dir=dir, results_file=True)
1215 1216
1217 - def print_results(self):
1218 """Print out the optimisation results for the current data pipe.""" 1219 1220 # Header. 1221 sys.stdout.write("\nFinal optimisation results:\n") 1222 1223 # Formatting string. 1224 format_float = " %-20s %20.15f\n" 1225 format_vect = " %-20s %20s\n" 1226 1227 # Pivot. 1228 sys.stdout.write("\nPivot point:\n") 1229 if hasattr(cdp, 'pivot_x'): 1230 sys.stdout.write(format_float % ('x:', cdp.pivot_x)) 1231 if hasattr(cdp, 'pivot_y'): 1232 sys.stdout.write(format_float % ('y:', cdp.pivot_y)) 1233 if hasattr(cdp, 'pivot_z'): 1234 sys.stdout.write(format_float % ('z:', cdp.pivot_z)) 1235 1236 # Average position. 1237 if hasattr(cdp, 'ave_pos_x') or hasattr(cdp, 'ave_pos_alpha') or hasattr(cdp, 'ave_pos_beta') or hasattr(cdp, 'ave_pos_gamma'): 1238 sys.stdout.write("\nAverage moving domain position:\n") 1239 if hasattr(cdp, 'ave_pos_x'): 1240 sys.stdout.write(format_float % ('x:', cdp.ave_pos_x)) 1241 if hasattr(cdp, 'ave_pos_y'): 1242 sys.stdout.write(format_float % ('y:', cdp.ave_pos_y)) 1243 if hasattr(cdp, 'ave_pos_z'): 1244 sys.stdout.write(format_float % ('z:', cdp.ave_pos_z)) 1245 if hasattr(cdp, 'ave_pos_alpha'): 1246 sys.stdout.write(format_float % ('alpha:', cdp.ave_pos_alpha)) 1247 if hasattr(cdp, 'ave_pos_beta'): 1248 sys.stdout.write(format_float % ('beta:', cdp.ave_pos_beta)) 1249 if hasattr(cdp, 'ave_pos_gamma'): 1250 sys.stdout.write(format_float % ('gamma:', cdp.ave_pos_gamma)) 1251 1252 # Frame order eigenframe. 1253 if hasattr(cdp, 'eigen_alpha') or hasattr(cdp, 'eigen_beta') or hasattr(cdp, 'eigen_gamma') or hasattr(cdp, 'axis_theta') or hasattr(cdp, 'axis_phi'): 1254 sys.stdout.write("\nFrame order eigenframe:\n") 1255 if hasattr(cdp, 'eigen_alpha'): 1256 sys.stdout.write(format_float % ('eigen alpha:', cdp.eigen_alpha)) 1257 if hasattr(cdp, 'eigen_beta'): 1258 sys.stdout.write(format_float % ('eigen beta:', cdp.eigen_beta)) 1259 if hasattr(cdp, 'eigen_gamma'): 1260 sys.stdout.write(format_float % ('eigen gamma:', cdp.eigen_gamma)) 1261 1262 # The cone axis. 1263 if hasattr(cdp, 'axis_theta'): 1264 # The angles. 1265 sys.stdout.write(format_float % ('axis theta:', cdp.axis_theta)) 1266 sys.stdout.write(format_float % ('axis phi:', cdp.axis_phi)) 1267 1268 # The axis. 1269 axis = zeros(3, float64) 1270 spherical_to_cartesian([1.0, cdp.axis_theta, cdp.axis_phi], axis) 1271 sys.stdout.write(format_vect % ('axis:', axis)) 1272 1273 # Frame ordering. 1274 if hasattr(cdp, 'cone_theta_x') or hasattr(cdp, 'cone_theta_y') or hasattr(cdp, 'cone_theta') or hasattr(cdp, 'cone_sigma_max'): 1275 sys.stdout.write("\nFrame ordering:\n") 1276 if hasattr(cdp, 'cone_theta_x'): 1277 sys.stdout.write(format_float % ('cone theta_x:', cdp.cone_theta_x)) 1278 if hasattr(cdp, 'cone_theta_y'): 1279 sys.stdout.write(format_float % ('cone theta_y:', cdp.cone_theta_y)) 1280 if hasattr(cdp, 'cone_theta'): 1281 sys.stdout.write(format_float % ('cone theta:', cdp.cone_theta)) 1282 if hasattr(cdp, 'cone_sigma_max'): 1283 sys.stdout.write(format_float % ('sigma_max:', cdp.cone_sigma_max)) 1284 1285 # Minimisation statistics. 1286 if hasattr(cdp, 'chi2'): 1287 sys.stdout.write("\nMinimisation statistics:\n") 1288 if hasattr(cdp, 'chi2'): 1289 sys.stdout.write(format_float % ('chi2:', cdp.chi2)) 1290 1291 # Final spacing. 1292 sys.stdout.write("\n")
1293 1294
1295 - def read_results(self, model=None, pipe_name=None, pre_run=False):
1296 """Attempt to read old results files. 1297 1298 @keyword model: The frame order model. 1299 @type model: str 1300 @keyword pipe_name: The name of the data pipe to use for this model. 1301 @type pipe_name: str 1302 @keyword pre_run: A flag which if True will read the results file from the pre-run directory. 1303 @type pre_run: bool 1304 @return: True if the file exists and has been read, False otherwise. 1305 @rtype: bool 1306 """ 1307 1308 # The file name. 1309 base_dir = self.results_dir 1310 if pre_run: 1311 base_dir = self.pre_run_dir 1312 path = model_directory(model, base_dir=base_dir) + sep + 'results' 1313 1314 # Loop over the compression types. 1315 found = False 1316 for ext in ['.bz2', '', 'gz']: 1317 # The full file name. 1318 full_path = path + ext 1319 1320 # The file does not exist. 1321 if not access(full_path, F_OK): 1322 continue 1323 1324 # Create an empty data pipe. 1325 self.interpreter.pipe.create(pipe_name=pipe_name, pipe_type='frame order') 1326 1327 # Read the results file. 1328 self.interpreter.results.read(full_path) 1329 1330 # Results printout. 1331 self.print_results() 1332 1333 # The flag. 1334 found = True 1335 break 1336 1337 # Success. 1338 return found
1339 1340
1341 - def reorder_models(self, models=None):
1342 """Reorder the frame order models to enable the nested parameter copying protocol. 1343 1344 @keyword models: The frame order models to be used in the auto-analysis. 1345 @type models: list of str 1346 @return: The reordered frame order models. 1347 @rtype: list of str 1348 """ 1349 1350 # The correct order for the nesting protocol. 1351 order = [ 1352 MODEL_RIGID, 1353 MODEL_ROTOR, 1354 MODEL_ISO_CONE, 1355 MODEL_PSEUDO_ELLIPSE, 1356 MODEL_ISO_CONE_TORSIONLESS, 1357 MODEL_PSEUDO_ELLIPSE_TORSIONLESS, 1358 MODEL_FREE_ROTOR, 1359 MODEL_ISO_CONE_FREE_ROTOR, 1360 MODEL_PSEUDO_ELLIPSE_FREE_ROTOR, 1361 MODEL_DOUBLE_ROTOR 1362 ] 1363 1364 # Create the new list. 1365 new = [] 1366 for i in range(len(order)): 1367 if order[i] in models: 1368 new.append(order[i]) 1369 1370 # Sanity check - the models must all be in this list. 1371 for i in range(len(models)): 1372 if models[i] not in order: 1373 raise RelaxError("The frame order model '%s' is unknown." % models[i]) 1374 1375 # Return the reordered list. 1376 return new
1377 1378
1379 - def results_output(self, dir=None, model=None, results_file=True, simulation=True):
1380 """Create visual representations of the frame order results for the given model. 1381 1382 This will call the following user functions: 1383 1384 - results.write to output a results file (turned off if the results_file argument is False). 1385 - rdc.corr_plot and pcs.corr_plot to visualise the quality of the data and fit as 2D Grace correlation plots. 1386 - frame_order.pdb_model to generate a PDB representation of the frame order motions. 1387 - frame_order.simulate to perform a pseudo-Brownian frame order dynamics simulation. 1388 1389 A relax script called 'pymol_display.py' will be created for easily visualising the PDB representation from the frame_order.pdb_model user function. 1390 1391 1392 This includes a PDB representation of the motions (the 'cone.pdb' file located in each model directory) together with a relax script for displaying the average domain positions together with the cone/motion representation in PyMOL (the 'pymol_display.py' file, also created in the model directory). 1393 1394 @keyword dir: The output directory. 1395 @type dir: str 1396 @keyword model: The frame order model. This should match the model of the current data pipe, unless the special value of 'final' is used to indicate the visualisation of the final results. 1397 @type model: str 1398 @keyword results_file: A flag which if True will cause a results file to be created via the results.write user function. 1399 @type results_file: bool 1400 @keyword simulation: A flag which if True will allow the pseudo-Brownian frame order dynamics simulation to be run. 1401 @type simulation: bool 1402 """ 1403 1404 # Printout. 1405 subsubtitle(file=sys.stdout, text="Generating the results and data visualisation files") 1406 1407 # Sanity check. 1408 if model != 'final' and model.replace(' permutation A', '').replace(' permutation B', '') != cdp.model: 1409 raise RelaxError("The model '%s' does not match the model '%s' of the current data pipe." % (model.replace(' permuted', ''), cdp.model)) 1410 1411 # The results file. 1412 if results_file: 1413 self.interpreter.results.write(dir=dir, compress_type=self.results_compress_type, force=True) 1414 1415 # The RDC and PCS correlation plots. 1416 self.interpreter.rdc.corr_plot(dir=dir, force=True) 1417 self.interpreter.pcs.corr_plot(dir=dir, force=True) 1418 1419 # The PDB representation of the model. 1420 self.interpreter.frame_order.pdb_model(dir=dir, force=True) 1421 1422 # Create the visualisation script for the PDB representation. 1423 script = open_write_file(file_name='pymol_display.py', dir=dir, force=True) 1424 script.write("# relax script for displaying the frame order results of this '%s' model in PyMOL.\n\n" % model) 1425 script.write("# PyMOL visualisation.\n") 1426 script.write("pymol.frame_order()\n") 1427 script.close() 1428 1429 # The uniform distribution of structures. 1430 if simulation: 1431 self.interpreter.frame_order.distribute(dir=dir, total=self.dist_total, max_rotations=self.dist_max_rotations, force=True) 1432 1433 # The pseudo-Brownian dynamics simulation. 1434 if simulation: 1435 self.interpreter.frame_order.simulate(dir=dir, step_size=self.brownian_step_size, snapshot=self.brownian_snapshot, total=self.brownian_total, force=True)
1436 1437
1438 - def sobol_setup(self, info=None):
1439 """Correctly handle the frame_order.sobol_setup user function. 1440 1441 @keyword info: The information from the Optimisation_settings.get_*_sobol_info() function. 1442 @type info: tuple of int or None 1443 """ 1444 1445 # Unpack the info. 1446 max_num, oversample = info 1447 1448 # Nothing to do. 1449 if max_num == None: 1450 return 1451 1452 # No oversampling specified. 1453 if oversample == None: 1454 self.interpreter.frame_order.sobol_setup(max_num=max_num) 1455 1456 # Full setup. 1457 else: 1458 self.interpreter.frame_order.sobol_setup(max_num=max_num, oversample=oversample)
1459 1460 1461
1462 -class Optimisation_settings:
1463 """A special object for storing the settings for optimisation. 1464 1465 This includes grid search information, zooming grid search settings, and settings for the minimisation. 1466 """ 1467
1468 - def __init__(self):
1469 """Set up the optimisation settings object.""" 1470 1471 # Initialise some private structures for the grid search. 1472 self._grid_count = 0 1473 self._grid_incs = [] 1474 self._grid_zoom = [] 1475 self._grid_sobol_max_points = [] 1476 self._grid_sobol_oversample = [] 1477 self._grid_quad_int = [] 1478 self._grid_pivot_search = [] 1479 1480 # Initialise some private structures for the minimisation. 1481 self._min_count = 0 1482 self._min_algor = [] 1483 self._min_func_tol = [] 1484 self._min_max_iter = [] 1485 self._min_sobol_max_points = [] 1486 self._min_sobol_oversample = [] 1487 self._min_quad_int = []
1488 1489
1490 - def _check_index(self, i, iter_type=None):
1491 """Check that the user supplied iteration index makes sense. 1492 1493 @param i: The iteration index. 1494 @type i: int 1495 @keyword iter_type: The type of the index. This can be either 'grid' or 'min'. 1496 @type iter_type: str 1497 @raises RelaxError: If the iteration is invalid. 1498 """ 1499 1500 # Check the index. 1501 is_int(i, name='i', can_be_none=False) 1502 1503 # Is the value too high? 1504 if iter_type == 'grid' and i >= self._grid_count: 1505 raise RelaxError("The iteration index %i is too high, only %i grid searches are set up." % (i, self._grid_count)) 1506 if iter_type == 'min' and i >= self._min_count: 1507 raise RelaxError("The iteration index %i is too high, only %i minimisations are set up." % (i, self._min_count))
1508 1509
1510 - def add_grid(self, inc=None, zoom=None, sobol_max_points=None, sobol_oversample=None, quad_int=False, pivot_search=True):
1511 """Add a grid search step. 1512 1513 @keyword inc: The grid search size (the number of increments per dimension). 1514 @type inc: int 1515 @keyword zoom: The grid zoom level for this grid search. 1516 @type zoom: None or int 1517 @keyword sobol_max_points: The maximum number of Sobol' points for the PCS numerical integration to use in the grid search. See the frame_order.sobol_setup user function for details. If not supplied, then the previous value will be used. 1518 @type sobol_max_points: None or int 1519 @keyword sobol_oversample: The Sobol' oversampling factor. See the frame_order.sobol_setup user function for details. 1520 @type sobol_oversample: None or int 1521 @keyword quad_int: The SciPy quadratic integration flag. See the frame_order.quad_int user function for details. 1522 @type quad_int: bool 1523 @keyword pivot_search: A flag which if False will prevent the pivot point from being included in the grid search. 1524 @type pivot_search: bool 1525 """ 1526 1527 # Value checking, as this will be set up by a user. 1528 is_int(inc, name='inc', can_be_none=False) 1529 is_int(zoom, name='zoom', can_be_none=True) 1530 is_int(sobol_max_points, name='sobol_max_points', can_be_none=True) 1531 is_int(sobol_oversample, name='sobol_oversample', can_be_none=True) 1532 is_bool(quad_int, name='quad_int') 1533 1534 # Store the values. 1535 self._grid_incs.append(inc) 1536 self._grid_zoom.append(zoom) 1537 self._grid_sobol_max_points.append(sobol_max_points) 1538 self._grid_sobol_oversample.append(sobol_oversample) 1539 self._grid_quad_int.append(quad_int) 1540 self._grid_pivot_search.append(pivot_search) 1541 1542 # Increment the count. 1543 self._grid_count += 1
1544 1545
1546 - def add_min(self, min_algor='simplex', func_tol=1e-25, max_iter=1000000, sobol_max_points=None, sobol_oversample=None, quad_int=False):
1547 """Add an optimisation step. 1548 1549 @keyword min_algor: The optimisation technique. 1550 @type min_algor: str 1551 @keyword func_tol: The minimisation function tolerance cutoff to terminate optimisation (see the minimise.execute user function). 1552 @type func_tol: int 1553 @keyword max_iter: The maximum number of iterations for the optimisation. 1554 @type max_iter: int 1555 @keyword sobol_max_points: The maximum number of Sobol' points for the PCS numerical integration to use in the optimisations after the grid search. See the frame_order.sobol_setup user function for details. If not supplied, then the previous value will be used. 1556 @type sobol_max_points: None or int 1557 @keyword sobol_oversample: The Sobol' oversampling factor. See the frame_order.sobol_setup user function for details. 1558 @type sobol_oversample: None or int 1559 @keyword quad_int: The SciPy quadratic integration flag. See the frame_order.quad_int user function for details. 1560 @type quad_int: bool 1561 """ 1562 1563 # Value checking, as this will be set up by a user. 1564 is_str(min_algor, name='min_algor', can_be_none=False) 1565 is_float(func_tol, name='func_tol', can_be_none=True) 1566 is_int(max_iter, name='max_iter', can_be_none=True) 1567 is_int(sobol_max_points, name='sobol_max_points', can_be_none=True) 1568 is_int(sobol_oversample, name='sobol_oversample', can_be_none=True) 1569 is_bool(quad_int, name='quad_int') 1570 1571 # Store the values. 1572 self._min_algor.append(min_algor) 1573 self._min_func_tol.append(func_tol) 1574 self._min_max_iter.append(max_iter) 1575 self._min_sobol_max_points.append(sobol_max_points) 1576 self._min_sobol_oversample.append(sobol_oversample) 1577 self._min_quad_int.append(quad_int) 1578 1579 # Increment the count. 1580 self._min_count += 1
1581 1582
1583 - def get_grid_inc(self, i):
1584 """Return the grid increments for the given iteration. 1585 1586 @param i: The grid search iteration from the loop_grid() method. 1587 @type i: int 1588 @return: The grid increments for the iteration. 1589 @rtype: int 1590 """ 1591 1592 # Check the index. 1593 self._check_index(i, iter_type='grid') 1594 1595 # Return the value. 1596 return self._grid_incs[i]
1597 1598
1599 - def get_grid_pivot_search(self, i):
1600 """Return the pivot grid search flag. 1601 1602 @param i: The grid search iteration from the loop_grid() method. 1603 @type i: int 1604 @return: The pivot grid search flag. 1605 @rtype: bool 1606 """ 1607 1608 # Check the index. 1609 self._check_index(i, iter_type='grid') 1610 1611 # Return the value. 1612 return self._grid_pivot_search[i]
1613 1614
1615 - def get_grid_quad_int(self, i):
1616 """Return the SciPy quadratic integration flag for the given iteration. 1617 1618 @param i: The grid search iteration from the loop_grid() method. 1619 @type i: int 1620 @return: The SciPy quadratic integration flag for the iteration. 1621 @rtype: bool 1622 """ 1623 1624 # Check the index. 1625 self._check_index(i, iter_type='grid') 1626 1627 # Return the value. 1628 return self._grid_quad_int[i]
1629 1630
1631 - def get_grid_sobol_info(self, i):
1632 """Return the number of numerical integration points and oversampling factor for the given iteration. 1633 1634 @param i: The grid search iteration from the loop_grid() method. 1635 @type i: int 1636 @return: The number of numerical integration points for the iteration and the oversampling factor. 1637 @rtype: int, int 1638 """ 1639 1640 # Check the index. 1641 self._check_index(i, iter_type='grid') 1642 1643 # Return the value. 1644 return self._grid_sobol_max_points[i], self._grid_sobol_oversample[i]
1645 1646
1647 - def get_grid_zoom_level(self, i):
1648 """Return the grid zoom level for the given iteration. 1649 1650 @param i: The grid search iteration from the loop_grid() method. 1651 @type i: int 1652 @return: The grid zoom level for the iteration. 1653 @rtype: None or int 1654 """ 1655 1656 # Check the index. 1657 self._check_index(i, iter_type='grid') 1658 1659 # Return the value. 1660 return self._grid_zoom[i]
1661 1662
1663 - def get_min_algor(self, i):
1664 """Return the minimisation algorithm for the given iteration. 1665 1666 @param i: The minimisation iteration from the loop_min() method. 1667 @type i: int 1668 @return: The minimisation algorithm for the iteration. 1669 @rtype: int 1670 """ 1671 1672 # Check the index. 1673 self._check_index(i, iter_type='min') 1674 1675 # Return the value. 1676 return self._min_algor[i]
1677 1678
1679 - def get_min_func_tol(self, i):
1680 """Return the minimisation function tolerance level for the given iteration. 1681 1682 @param i: The minimisation iteration from the loop_min() method. 1683 @type i: int 1684 @return: The minimisation function tolerance level for the iteration. 1685 @rtype: int 1686 """ 1687 1688 # Check the index. 1689 self._check_index(i, iter_type='min') 1690 1691 # Return the value. 1692 return self._min_func_tol[i]
1693 1694
1695 - def get_min_max_iter(self, i):
1696 """Return the maximum number of iterations for the optimisation for the given iteration. 1697 1698 @param i: The minimisation iteration from the loop_min() method. 1699 @type i: int 1700 @return: The maximum number of iterations for the optimisation for the iteration. 1701 @rtype: int 1702 """ 1703 1704 # Check the index. 1705 self._check_index(i, iter_type='min') 1706 1707 # Return the value. 1708 return self._min_max_iter[i]
1709 1710
1711 - def get_min_quad_int(self, i):
1712 """Return the SciPy quadratic integration flag for the given iteration. 1713 1714 @param i: The minimisation iteration from the loop_min() method. 1715 @type i: int 1716 @return: The SciPy quadratic integration flag for the iterationor. 1717 @rtype: bool 1718 """ 1719 1720 # Check the index. 1721 self._check_index(i, iter_type='min') 1722 1723 # Return the value. 1724 return self._min_quad_int[i]
1725 1726
1727 - def get_min_sobol_info(self, i):
1728 """Return the number of numerical integration points and oversampling factor for the given iteration. 1729 1730 @param i: The minimisation iteration from the loop_min() method. 1731 @type i: int 1732 @return: The number of numerical integration points for the iteration and the oversampling factor. 1733 @rtype: int, int 1734 """ 1735 1736 # Check the index. 1737 self._check_index(i, iter_type='min') 1738 1739 # Return the value. 1740 return self._min_sobol_max_points[i], self._min_sobol_oversample[i]
1741 1742
1743 - def has_grid(self):
1744 """Is a grid search set up? 1745 1746 @return: True if a grid search has been set up. 1747 @rtype: bool 1748 """ 1749 1750 # Grid information is present. 1751 if self._grid_count > 0: 1752 return True 1753 1754 # No grid. 1755 return False
1756 1757
1758 - def loop_grid(self):
1759 """Generator method for looping over all grid search iterations. 1760 1761 @return: The grid search iteration. 1762 @rtype: int 1763 """ 1764 1765 # Loop over the grid searches. 1766 for i in range(self._grid_count): 1767 yield i
1768 1769
1770 - def loop_min(self):
1771 """Generator method for looping over all minimisation iterations. 1772 1773 @return: The minimisation iteration. 1774 @rtype: int 1775 """ 1776 1777 # Loop over the minimisations. 1778 for i in range(self._min_count): 1779 yield i
1780