Package auto_analyses :: Module frame_order
[hide private]
[frames] | no frames]

Source Code for Module auto_analyses.frame_order

   1  ############################################################################### 
   2  #                                                                             # 
   3  # Copyright (C) 2011-2016 Edward d'Auvergne                                   # 
   4  #                                                                             # 
   5  # This file is part of the program relax (http://www.nmr-relax.com).          # 
   6  #                                                                             # 
   7  # This program is free software: you can redistribute it and/or modify        # 
   8  # it under the terms of the GNU General Public License as published by        # 
   9  # the Free Software Foundation, either version 3 of the License, or           # 
  10  # (at your option) any later version.                                         # 
  11  #                                                                             # 
  12  # This program is distributed in the hope that it will be useful,             # 
  13  # but WITHOUT ANY WARRANTY; without even the implied warranty of              # 
  14  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the               # 
  15  # GNU General Public License for more details.                                # 
  16  #                                                                             # 
  17  # You should have received a copy of the GNU General Public License           # 
  18  # along with this program.  If not, see <http://www.gnu.org/licenses/>.       # 
  19  #                                                                             # 
  20  ############################################################################### 
  21   
  22  # Module docstring. 
  23  """The automated frame order analysis protocol. 
  24   
  25   
  26  Usage 
  27  ===== 
  28   
  29  To use this analysis, you should import the following into your script: 
  30   
  31      - Frame_order_analysis:  This is a Python class which contains the automated protocol.  Initialising the class will execute the full analysis.  See its documentation for all the options it accepts. 
  32      - Optimisation_settings:  This is a Python class which is used to set up and store the optimisation settings used in the automated protocol.  This allows for grid searches, zooming grid searches, minimisation settings, quasi-random Sobol' numerical integration of the PCS, and SciPy quadratic numerical integration of the PCS to be specified. 
  33   
  34  See the sample scripts for examples of how these are used.  In addition, the following two functions provide summaries of the analysis: 
  35   
  36      - count_sobol_points:  This function will summarize the number of quasi-random Sobol' points used in the PCS numeric integration.  The table it creates is very useful for judging the quality of the current optimisation settings. 
  37      - summarise:  This function will summarise all of the current frame order results. 
  38   
  39  Both these functions will be called at the end of the auto-analysis.  However they can also be used in simple scripts to summarise the results as an analysis is progressing. 
  40   
  41   
  42   
  43  The nested model parameter copying protocol 
  44  =========================================== 
  45   
  46  To allow the analysis to complete in under 1,000,000 years, the trick of copying parameters from simpler nested models is used in this auto-analysis.  The protocol is split into four categories for the average domain position, the pivot point, the motional eigenframe and the parameters of ordering.  These use the fact that the free rotor and torsionless models are the two extrema of the models where the torsion angle is restricted, whereby sigma_max is pi and 0 respectively. 
  47  """ 
  48   
  49   
  50  # Python module imports. 
  51  from math import pi 
  52  from numpy import float64, zeros 
  53  from os import F_OK, access, getcwd, sep 
  54  import sys 
  55   
  56  # relax module imports. 
  57  from data_store import Relax_data_store; ds = Relax_data_store() 
  58  from lib.arg_check import is_bool, is_float, is_int, is_str 
  59  from lib.errors import RelaxError 
  60  from lib.frame_order.conversions import convert_axis_alpha_to_spherical 
  61  from lib.frame_order.variables import MODEL_DOUBLE_ROTOR, MODEL_FREE_ROTOR, MODEL_ISO_CONE, MODEL_ISO_CONE_FREE_ROTOR, MODEL_ISO_CONE_TORSIONLESS, MODEL_LIST, MODEL_LIST_FREE_ROTORS, MODEL_LIST_ISO_CONE, MODEL_LIST_NONREDUNDANT, MODEL_LIST_PSEUDO_ELLIPSE, MODEL_PSEUDO_ELLIPSE, MODEL_PSEUDO_ELLIPSE_FREE_ROTOR, MODEL_PSEUDO_ELLIPSE_TORSIONLESS, MODEL_RIGID, MODEL_ROTOR 
  62  from lib.geometry.angles import wrap_angles 
  63  from lib.geometry.coord_transform import spherical_to_cartesian 
  64  from lib.io import open_write_file 
  65  from lib.text.sectioning import subtitle, subsubtitle, title 
  66  from lib.text.table import MULTI_COL, format_table 
  67  from pipe_control import pipes, results 
  68  from pipe_control.mol_res_spin import return_spin, spin_loop 
  69  from pipe_control.structure.mass import pipe_centre_of_mass 
  70  from prompt.interpreter import Interpreter 
  71  from specific_analyses.frame_order.data import generate_pivot 
  72  from specific_analyses.frame_order import optimisation 
  73  from status import Status; status = Status() 
  74   
  75   
  76   
77 -def count_sobol_points(file_name='sobol_point_count', dir=None, force=True):
78 """Count the number of Sobol' points used for the PCS numerical integration. 79 80 This function can be used while a frame order analysis is running to summarise the results. It will create a table of the number of Sobol' points used, which can then be used to judge the quality of the integration. 81 82 83 @keyword file_name: The file to save the table into. 84 @type file_name: str 85 @keyword dir: The optional directory to place the file into. If specified, the results files will also be searched for in this directory. 86 @type dir: None or str 87 @keyword force: A flag which if True will cause any preexisting file to be overwritten. 88 @type force: bool 89 """ 90 91 # Store the current data pipe name. 92 original_pipe = pipes.cdp_name() 93 94 # The model names, titles and directories, including axis permutations. 95 models = [] 96 model_titles = [] 97 dirs = [] 98 for model in MODEL_LIST: 99 # Add the base model. 100 models.append(model) 101 title = model[0].upper() + model[1:] 102 model_titles.append(title) 103 dirs.append(model_directory(model, base_dir=dir)) 104 105 # Axis permutations. 106 if model in MODEL_LIST_ISO_CONE + MODEL_LIST_PSEUDO_ELLIPSE: 107 # The A permutation. 108 models.append("%s permutation A" % model) 109 model_titles.append(title + ' (perm A)') 110 dirs.append(model_directory(models[-1], base_dir=dir)) 111 112 # The B permutation. 113 if model in MODEL_LIST_PSEUDO_ELLIPSE: 114 models.append("%s permutation B" % model) 115 model_titles.append(title + ' (perm B)') 116 dirs.append(model_directory(models[-1], base_dir=dir)) 117 118 # Loop over the models. 119 count = {} 120 count_total = {} 121 percentage = {} 122 for i in range(len(models)): 123 # Skip the rigid model. 124 if models[i] == MODEL_RIGID: 125 continue 126 127 # No file. 128 found = False 129 for ext in ['.bz2', '', 'gz']: 130 file = dirs[i]+sep+'results'+ext 131 if access(file, F_OK): 132 found = True 133 break 134 if not found: 135 continue 136 137 # Create a data pipe. 138 pipe_name = 'temp %s' % models[i] 139 if pipes.has_pipe(pipe_name): 140 pipes.delete(pipe_name) 141 pipes.create(pipe_name, 'frame order') 142 143 # Load the data. 144 results.read(file='results', dir=dirs[i]) 145 146 # SciPy quadratic integration has been used. 147 if hasattr(cdp, 'quad_int') and cdp.quad_int: 148 count[models[i]] = 'Quad int' 149 count_total[models[i]] = '' 150 percentage[models[i]] = '' 151 continue 152 153 # Count the Sobol' points used. 154 if not hasattr(cdp, 'sobol_points_used'): 155 optimisation.count_sobol_points() 156 count[models[i]] = cdp.sobol_points_used 157 count_total[models[i]] = cdp.sobol_max_points 158 percentage[models[i]] = "%10.3f" % (float(cdp.sobol_points_used) / float(cdp.sobol_max_points) * 100.0) + '%' 159 160 # Delete the temporary data pipe. 161 pipes.delete(pipe_name) 162 163 # Initialise the output string. 164 string = "Quasi-random Sobol' numerical PCS integration point counting:\n\n" 165 166 # Assemble the table contents. 167 headings = [["Model", "Total points", "Used points", "Percentage"]] 168 contents = [] 169 for model in models: 170 if model not in count: 171 continue 172 contents.append([model, count_total[model], count[model], percentage[model]]) 173 174 # Add the table to the output string. 175 string += format_table(headings=headings, contents=contents) 176 177 # Stdout output. 178 sys.stdout.write("\n\n\n") 179 sys.stdout.write(string) 180 181 # Save to file. 182 file = open_write_file(file_name=file_name, dir=dir, force=force) 183 file.write(string) 184 file.close() 185 186 # Switch back to the original data pipe, if it exists. 187 if original_pipe: 188 pipes.switch(original_pipe)
189 190
191 -def model_directory(model, base_dir=None):
192 """Return the directory to be used for the model. 193 194 @param model: The frame order model name. 195 @type model: str 196 @keyword base_dir: The optional base directory to prepend to the file name. 197 @type base_dir: None or str 198 """ 199 200 # Convert the model name. 201 dir = model.replace(' ', '_') 202 dir = dir.replace(',', '') 203 204 # Process the base directory. 205 if base_dir == None: 206 base_dir = '' 207 elif base_dir[-1] != sep: 208 base_dir += sep 209 210 # Return the full path. 211 return base_dir + dir
212 213
214 -def summarise(file_name='summary', dir=None, force=True):
215 """Summarise the frame order auto-analysis results. 216 217 This function can be used while a frame order analysis is running to summarise the results. It will create a table of the statistics and model parameters for all of the optimised frame order models for which results files currently exist. 218 219 220 @keyword file_name: The file to save the table into. 221 @type file_name: str 222 @keyword dir: The optional directory to place the file into. If specified, the results files will also be searched for in this directory. 223 @type dir: None or str 224 @keyword force: A flag which if True will cause any preexisting file to be overwritten. 225 @type force: bool 226 """ 227 228 # Store the current data pipe name. 229 original_pipe = pipes.cdp_name() 230 231 # The model names, titles and directories, including axis permutations. 232 models = [] 233 model_titles = [] 234 dirs = [] 235 for model in MODEL_LIST: 236 # Add the base model. 237 models.append(model) 238 title = model[0].upper() + model[1:] 239 model_titles.append(title) 240 dirs.append(model_directory(model, base_dir=dir)) 241 242 # Axis permutations. 243 if model in MODEL_LIST_ISO_CONE + MODEL_LIST_PSEUDO_ELLIPSE: 244 # The A permutation. 245 models.append("%s permutation A" % model) 246 model_titles.append(title + ' (perm A)') 247 dirs.append(model_directory(models[-1], base_dir=dir)) 248 249 # The B permutation. 250 if model in MODEL_LIST_PSEUDO_ELLIPSE: 251 models.append("%s permutation B" % model) 252 model_titles.append(title + ' (perm B)') 253 dirs.append(model_directory(models[-1], base_dir=dir)) 254 255 # The analysis directory and structures. 256 contents = [] 257 contents.append(["Analysis directory", getcwd()]) 258 string = format_table(contents=contents) 259 260 # Table header. 261 headings1 = [] 262 headings1.append(["Model", "k", "chi2", "AIC", "Motional eigenframe", MULTI_COL, MULTI_COL, "Cone half angles (deg)", MULTI_COL, MULTI_COL, MULTI_COL]) 263 headings1.append([None, None, None, None, "a", "b/th", "g/ph", "thx", "thy", "smax", "smax2"]) 264 265 # 2nd table header. 266 headings2 = [] 267 headings2.append(["Model", "Average position", MULTI_COL, MULTI_COL, MULTI_COL, MULTI_COL, MULTI_COL, "Pivot point", MULTI_COL, MULTI_COL]) 268 headings2.append([None, "x", "y", "z", "a", "b", "g", "x", "y", "z"]) 269 270 # Loop over the models. 271 contents1 = [] 272 contents2 = [] 273 for i in range(len(models)): 274 # No file. 275 found = False 276 for ext in ['.bz2', '', 'gz']: 277 file = dirs[i]+sep+'results'+ext 278 if access(file, F_OK): 279 found = True 280 break 281 if not found: 282 continue 283 284 # Create a data pipe. 285 pipe_name = 'temp %s' % models[i] 286 if pipes.has_pipe(pipe_name): 287 pipes.delete(pipe_name) 288 pipes.create(pipe_name, 'frame order') 289 290 # Load the data. 291 results.read(file='results', dir=dirs[i]) 292 293 # Number of params. 294 k = len(cdp.params) 295 296 # Format the model information. 297 contents1.append([model_titles[i], k, cdp.chi2, cdp.chi2 + 2*k, None, None, None, None, None, None, None]) 298 contents2.append([model_titles[i], 0.0, 0.0, 0.0, None, None, None, None, None, None]) 299 300 # Eigen alpha. 301 if hasattr(cdp, 'eigen_alpha') and cdp.eigen_alpha != None: 302 contents1[-1][4] = cdp.eigen_alpha 303 304 # Eigen beta. 305 if hasattr(cdp, 'eigen_beta') and cdp.eigen_beta != None: 306 contents1[-1][5] = cdp.eigen_beta 307 elif hasattr(cdp, 'axis_theta') and cdp.axis_theta != None: 308 contents1[-1][5] = cdp.axis_theta 309 310 # Eigen gamma. 311 if hasattr(cdp, 'eigen_gamma') and cdp.eigen_gamma != None: 312 contents1[-1][6] = cdp.eigen_gamma 313 elif hasattr(cdp, 'axis_phi') and cdp.axis_phi != None: 314 contents1[-1][6] = cdp.axis_phi 315 316 # Convert the axis alpha angle to spherical angles for comparison. 317 if hasattr(cdp, 'axis_alpha') and cdp.model in [MODEL_ROTOR, MODEL_FREE_ROTOR]: 318 axis_theta, axis_phi = convert_axis_alpha_to_spherical(alpha=cdp.axis_alpha, pivot=generate_pivot(order=1, pipe_name=pipe_name), point=pipe_centre_of_mass(verbosity=0)) 319 contents1[-1][5] = wrap_angles(axis_theta, 0.0, 2.0*pi) 320 contents1[-1][6] = wrap_angles(axis_phi, 0.0, 2.0*pi) 321 322 # Order x. 323 if hasattr(cdp, 'cone_theta_x') and cdp.cone_theta_x != None: 324 contents1[-1][7] = cdp.cone_theta_x / 2.0 / pi * 360.0 325 elif hasattr(cdp, 'cone_theta') and cdp.cone_theta != None: 326 contents1[-1][7] = cdp.cone_theta / 2.0 / pi * 360.0 327 328 # Order y. 329 if hasattr(cdp, 'cone_theta_y') and cdp.cone_theta_y != None: 330 contents1[-1][8] = cdp.cone_theta_y / 2.0 / pi * 360.0 331 332 # Order torsion. 333 if hasattr(cdp, 'cone_sigma_max') and cdp.cone_sigma_max != None: 334 contents1[-1][9] = cdp.cone_sigma_max / 2.0 / pi * 360.0 335 336 # Order torsion 2. 337 if hasattr(cdp, 'cone_sigma_max_2') and cdp.cone_sigma_max_2 != None: 338 contents1[-1][10] = cdp.cone_sigma_max_2 / 2.0 / pi * 360.0 339 340 # The average position parameters. 341 if hasattr(cdp, 'ave_pos_x') and cdp.ave_pos_x != None: 342 contents2[-1][1] = cdp.ave_pos_x 343 if hasattr(cdp, 'ave_pos_y') and cdp.ave_pos_y != None: 344 contents2[-1][2] = cdp.ave_pos_y 345 if hasattr(cdp, 'ave_pos_z') and cdp.ave_pos_z != None: 346 contents2[-1][3] = cdp.ave_pos_z 347 if hasattr(cdp, 'ave_pos_alpha') and cdp.ave_pos_alpha != None: 348 contents2[-1][4] = cdp.ave_pos_alpha 349 if hasattr(cdp, 'ave_pos_beta') and cdp.ave_pos_beta != None: 350 contents2[-1][5] = cdp.ave_pos_beta 351 if hasattr(cdp, 'ave_pos_gamma') and cdp.ave_pos_gamma != None: 352 contents2[-1][6] = cdp.ave_pos_gamma 353 354 # The pivot point. 355 contents2[-1][7] = cdp.pivot_x 356 contents2[-1][8] = cdp.pivot_y 357 contents2[-1][9] = cdp.pivot_z 358 359 # Delete the temporary data pipe. 360 pipes.delete(pipe_name) 361 362 # Add the tables. 363 string += format_table(headings=headings1, contents=contents1, custom_format=[None, None, "%.2f", "%.2f", "%.3f", "%.3f", "%.3f", "%.2f", "%.2f", "%.2f", "%.2f"]) 364 string += format_table(headings=headings2, contents=contents2, custom_format=[None, "%.3f", "%.3f", "%.3f", "%.3f", "%.3f", "%.3f", "%.3f", "%.3f", "%.3f"]) 365 366 # Stdout output. 367 sys.stdout.write("\n\n\n") 368 sys.stdout.write(string) 369 370 # Save to file. 371 file = open_write_file(file_name=file_name, dir=dir, force=force) 372 file.write(string) 373 file.close() 374 375 # Switch back to the original data pipe, if it exists. 376 if original_pipe: 377 pipes.switch(original_pipe)
378 379 380
381 -class Frame_order_analysis:
382 """The frame order auto-analysis protocol.""" 383 384 # Debugging and test suite variables. 385 _final_state = True 386
387 - def __init__(self, data_pipe_full=None, data_pipe_subset=None, pipe_bundle=None, results_dir=None, pre_run_dir=None, opt_rigid=None, opt_subset=None, opt_full=None, opt_mc=None, mc_sim_num=500, models=MODEL_LIST_NONREDUNDANT, brownian_step_size=2.0, brownian_snapshot=10, brownian_total=1000, dist_total=1000, dist_max_rotations=1000000, results_compress_type=1, rigid_grid_split=False, store_intermediate=True, nested_params_ave_dom_pos=True):
388 """Perform the full frame order analysis. 389 390 @param data_pipe_full: The name of the data pipe containing all of the RDC and PCS data. 391 @type data_pipe_full: str 392 @param data_pipe_subset: The name of the data pipe containing all of the RDC data but only a small subset of ~5 PCS points. This optional argument is used to massively speed up the analysis. 393 @type data_pipe_subset: str or None 394 @keyword pipe_bundle: The data pipe bundle to associate all spawned data pipes with. 395 @type pipe_bundle: str 396 @keyword results_dir: The directory where files are saved in. 397 @type results_dir: str 398 @keyword pre_run_dir: The optional directory containing the frame order auto-analysis results from a previous run. If supplied, then the 'data_pipe_full', 'data_pipe_subset', and 'opt_subset' arguments will be ignored. The results will be loaded from the results files in this directory, and then optimisation starts from there. The model nesting algorithm will also be deactivated. 399 @keyword opt_rigid: The grid search, zooming grid search and minimisation settings object for the rigid frame order model. 400 @type pre_run_dir: None or str 401 @type opt_rigid: Optimisation_settings instance 402 @keyword opt_subset: The grid search, zooming grid search and minimisation settings object for optimisation of all models, excluding the rigid model, for the PCS data subset. 403 @type opt_subset: Optimisation_settings instance 404 @keyword opt_full: The grid search, zooming grid search and minimisation settings object for optimisation of all models, excluding the rigid model, for the full data set. 405 @type opt_full: Optimisation_settings instance 406 @keyword opt_mc: The grid search, zooming grid search and minimisation settings object for optimisation of the Monte Carlo simulations. Any grid search settings will be ignored, as only the minimise.execute user function is run for the simulations. And only the settings for the first iteration of the object will be accessed and used - iterative optimisation will be ignored. 407 @type opt_mc: Optimisation_settings instance 408 @keyword mc_sim_num: The number of Monte Carlo simulations to be used for error analysis at the end of the analysis. 409 @type mc_sim_num: int 410 @keyword models: The frame order models to use in the analysis. The 'rigid' model must be included as this is essential for the analysis. 411 @type models: list of str 412 @keyword brownian_step_size: The step_size argument for the pseudo-Brownian dynamics simulation frame_order.simulate user function. 413 @type brownian_step_size: float 414 @keyword brownian_snapshot: The snapshot argument for the pseudo-Brownian dynamics simulation frame_order.simulate user function. 415 @type brownian_snapshot: int 416 @keyword brownian_total: The total argument for the pseudo-Brownian dynamics simulation frame_order.simulate user function. 417 @type brownian_total: int 418 @keyword dist_total: The total argument for the uniform distribution frame_order.distribute user function. 419 @type dist_total: int 420 @keyword dist_max_rotations: The max_rotations argument for the uniform distribution frame_order.distribute user function. 421 @type dist_max_rotations: int 422 @keyword results_compress_type: The type of compression to use when creating the results files. See the results.write user function for details. 423 @type results_compress_type: int 424 @keyword rigid_grid_split: A flag which if True will cause the grid search for the rigid model to be split so that the rotation is optimised first followed by the translation. When combined with grid zooming, this can save optimisation time. However it may result in the global minimum being missed. 425 @type rigid_grid_split: bool 426 @keyword store_intermediate: A flag which if True will cause all intermediate optimisation results to be stored in the 'intermediate_results' directory. These can then be used for studying the optimisation settings or loaded for subsequent analyses. 427 @type store_intermediate: bool 428 @keyword nested_params_ave_dom_pos: A flag which if True will cause the average domain position parameters to be taken from the rigid or free-rotor models. If False, then these parameters will be set to zero. 429 @type nested_params_ave_dom_pos: bool 430 """ 431 432 # Execution lock. 433 status.exec_lock.acquire(pipe_bundle, mode='auto-analysis') 434 435 # Execute the full protocol. 436 try: 437 # Initial printout. 438 title(file=sys.stdout, text="Frame order auto-analysis", prespace=7) 439 440 # Store the args. 441 self.data_pipe_full = data_pipe_full 442 self.data_pipe_subset = data_pipe_subset 443 self.pipe_bundle = pipe_bundle 444 self.opt_rigid = opt_rigid 445 self.opt_subset = opt_subset 446 self.opt_full = opt_full 447 self.opt_mc = opt_mc 448 self.mc_sim_num = mc_sim_num 449 self.brownian_step_size = brownian_step_size 450 self.brownian_snapshot = brownian_snapshot 451 self.brownian_total = brownian_total 452 self.dist_total = dist_total 453 self.dist_max_rotations = dist_max_rotations 454 self.results_compress_type = results_compress_type 455 self.rigid_grid_split = rigid_grid_split 456 self.store_intermediate = store_intermediate 457 self.flag_nested_params_ave_dom_pos = nested_params_ave_dom_pos 458 459 # Re-order the models to enable the parameter nesting protocol. 460 self.models = self.reorder_models(models) 461 462 # A dictionary and list of the data pipe names. 463 self.pipe_name_dict = {} 464 self.pipe_name_list = [] 465 466 # Project directory (i.e. directory containing the model-free model results and the newly generated files) 467 if results_dir: 468 self.results_dir = results_dir + sep 469 else: 470 self.results_dir = getcwd() + sep 471 472 # The pre-run directory. 473 self.pre_run_dir = pre_run_dir 474 self.pre_run_flag = False 475 if self.pre_run_dir: 476 self.pre_run_dir += sep 477 self.pre_run_flag = True 478 479 # Data checks. 480 self.check_vars() 481 482 # Load the interpreter. 483 self.interpreter = Interpreter(show_script=False, raise_relax_error=True) 484 self.interpreter.populate_self() 485 self.interpreter.on(verbose=False) 486 487 # Output the starting time. 488 self.interpreter.system.time() 489 490 # The nested model optimisation protocol. 491 self.nested_models() 492 493 # Printout for the final run. 494 subtitle(file=sys.stdout, text="Final results") 495 496 # The final results does not already exist. 497 if not self.read_results(model='final', pipe_name='final'): 498 # Model selection. 499 self.interpreter.model_selection(method='AIC', modsel_pipe='final', pipes=self.pipe_name_list) 500 501 # The numerical optimisation settings. 502 opt = self.opt_mc 503 self.interpreter.frame_order.quad_int(opt.get_min_quad_int(0)) 504 self.sobol_setup(opt.get_min_sobol_info(0)) 505 506 # Monte Carlo simulations. 507 self.interpreter.monte_carlo.setup(number=self.mc_sim_num) 508 self.interpreter.monte_carlo.create_data() 509 self.interpreter.monte_carlo.initial_values() 510 self.interpreter.minimise.execute(opt.get_min_algor(0), func_tol=opt.get_min_func_tol(0), max_iter=opt.get_min_max_iter(0)) 511 self.interpreter.eliminate() 512 self.interpreter.monte_carlo.error_analysis() 513 514 # Create the output and visualisation files. 515 self.results_output(model='final', dir=self.results_dir+'final', results_file=True) 516 517 # Regenerate the output and visualisation files for the final results. 518 else: 519 self.results_output(model='final', dir=self.results_dir+'final', results_file=False) 520 521 # Output the finishing time. 522 self.interpreter.system.time() 523 524 # Final title printout. 525 subtitle(file=sys.stdout, text="Summaries") 526 527 # Save the final program state. 528 if self._final_state: 529 self.interpreter.state.save('final_state', dir=self.results_dir, force=True) 530 531 # Count the number of Sobol' points and create a summary file. 532 count = False 533 for model in self.models: 534 if model != MODEL_RIGID: 535 count = True 536 break 537 if count: 538 count_sobol_points(dir=self.results_dir, force=True) 539 summarise(dir=self.results_dir, force=True) 540 541 # Clean up. 542 finally: 543 # Finish and unlock execution. 544 status.exec_lock.release()
545 546
547 - def axis_permutation_analysis(self, model=None):
548 """Handle the two local minima in the pseudo-elliptic models. 549 550 This involves creating a new data pipe for the pseudo-elliptic models, permuting the motional parameters, and performing an optimisation. This will explore the second minimum. 551 552 553 @keyword model: The frame order model to visualise. This should match the model of the current data pipe, unless the special value of 'final' is used to indicate the visualisation of the final results. 554 @type model: str 555 """ 556 557 # Nothing to do. 558 if model not in MODEL_LIST_ISO_CONE + MODEL_LIST_PSEUDO_ELLIPSE: 559 return 560 561 # The permutations. 562 perms = ['A'] 563 if model in MODEL_LIST_PSEUDO_ELLIPSE: 564 perms.append('B') 565 566 # Loop over all permutations. 567 for perm in perms: 568 # The title printout. 569 title = model[0].upper() + model[1:] 570 text = "Axis permutation '%s' of the %s frame order model" % (perm, title) 571 subtitle(file=sys.stdout, text=text, prespace=5) 572 573 # Output the model staring time. 574 self.interpreter.system.time() 575 576 # A new model name. 577 perm_model = "%s permutation %s" % (model, perm) 578 579 # The data pipe name. 580 self.pipe_name_dict[perm_model] = '%s permutation %s - %s' % (title, perm, self.pipe_bundle) 581 self.pipe_name_list.append(self.pipe_name_dict[perm_model]) 582 583 # The output directory. 584 dir = model_directory(perm_model, base_dir=self.results_dir) 585 586 # The results file already exists, so read its contents instead. 587 if self.read_results(model=perm_model, pipe_name=self.pipe_name_dict[perm_model]): 588 # Re-perform model elimination just in case. 589 self.interpreter.eliminate() 590 591 # Recreate the output and visualisation files (in case this was not completed correctly). 592 self.results_output(model=perm_model, dir=dir, results_file=False) 593 594 # Jump to the next permutation. 595 continue 596 597 # Copy the data pipe, and add it to the list so it is included in the model selection. 598 self.interpreter.pipe.copy(pipe_from=self.pipe_name_dict[model], pipe_to=self.pipe_name_dict[perm_model]) 599 600 # Switch to the new pipe. 601 self.interpreter.pipe.switch(pipe_name=self.pipe_name_dict[perm_model]) 602 603 # Permute the axes. 604 self.interpreter.frame_order.permute_axes(permutation=perm) 605 606 # Minimise with only the last optimisation settings (for the full data set). 607 opt = self.opt_full 608 for i in opt.loop_min(): 609 pass 610 611 # The numerical optimisation settings. 612 self.interpreter.frame_order.quad_int(opt.get_min_quad_int(i)) 613 self.sobol_setup(opt.get_min_sobol_info(i)) 614 615 # Perform the optimisation. 616 self.interpreter.minimise.execute(min_algor=opt.get_min_algor(i), func_tol=opt.get_min_func_tol(i), max_iter=opt.get_min_max_iter(i)) 617 618 # Results printout. 619 self.print_results() 620 621 # Model elimination. 622 self.interpreter.eliminate() 623 624 # Create the output and visualisation files. 625 self.results_output(model=perm_model, dir=dir, results_file=True)
626 627
628 - def check_vars(self):
629 """Check that the user has set the variables correctly.""" 630 631 # The pipe bundle. 632 if not isinstance(self.pipe_bundle, str): 633 raise RelaxError("The pipe bundle name '%s' is invalid." % self.pipe_bundle) 634 635 # Minimisation variables. 636 if not isinstance(self.mc_sim_num, int): 637 raise RelaxError("The mc_sim_num user variable '%s' is incorrectly set. It should be an integer." % self.mc_sim_num)
638 639
640 - def custom_grid_incs(self, model, inc=None, pivot_search=True):
641 """Set up a customised grid search increment number for each model. 642 643 @param model: The frame order model. 644 @type model: str 645 @keyword inc: The number of grid search increments to use for each dimension. 646 @type inc: int 647 @keyword pivot_search: A flag which if False will prevent the pivot point from being included in the grid search. 648 @type pivot_search: bool 649 @return: The list of increment values. 650 @rtype: list of int and None 651 """ 652 653 # Initialise the structure. 654 incs = [] 655 656 # The pivot parameters. 657 if hasattr(cdp, 'pivot_fixed') and not cdp.pivot_fixed: 658 # Optimise the pivot for the rotor model. 659 if pivot_search and model == MODEL_ROTOR: 660 incs += [inc, inc, inc] 661 662 # Otherwise use preset values (copied from other models). 663 else: 664 incs += [None, None, None] 665 666 # The 2nd pivot point parameters - the minimum inter rotor axis distance. 667 if model in [MODEL_DOUBLE_ROTOR]: 668 incs += [inc] 669 670 # The average domain position parameters. 671 if model == MODEL_FREE_ROTOR: 672 incs += [inc, inc, inc, inc, inc] 673 elif model in [MODEL_ISO_CONE_FREE_ROTOR, MODEL_PSEUDO_ELLIPSE_FREE_ROTOR]: 674 incs += [None, None, None, None, None] 675 else: 676 incs += [None, None, None, None, None, None] 677 678 # The motional eigenframe and order parameters - the rotor model. 679 if model == MODEL_ROTOR: 680 incs += [inc, inc] 681 682 # The motional eigenframe and order parameters - the free rotor model. 683 if model == MODEL_FREE_ROTOR: 684 incs += [None] 685 686 # The motional eigenframe and order parameters - the torsionless isotropic cone model. 687 if model == MODEL_ISO_CONE_TORSIONLESS: 688 incs += [None, None, None] 689 690 # The motional eigenframe and order parameters - the free rotor isotropic cone model. 691 if model == MODEL_ISO_CONE_FREE_ROTOR: 692 incs += [None, None, None] 693 694 # The motional eigenframe and order parameters - the isotropic cone model. 695 if model == MODEL_ISO_CONE: 696 incs += [None, None, inc, None] 697 698 # The motional eigenframe and order parameters - the torsionless pseudo-elliptic cone model. 699 if model == MODEL_PSEUDO_ELLIPSE_TORSIONLESS: 700 incs += [None, None, None, None, None] 701 702 # The motional eigenframe and order parameters - the free rotor pseudo-elliptic cone model. 703 if model == MODEL_PSEUDO_ELLIPSE_FREE_ROTOR: 704 incs += [None, None, None, None, None] 705 706 # The motional eigenframe and order parameters - the pseudo-elliptic cone model. 707 if model == MODEL_PSEUDO_ELLIPSE: 708 incs += [inc, inc, inc, None, inc, None] 709 710 # The motional eigenframe and order parameters - the double rotor model. 711 if model == MODEL_DOUBLE_ROTOR: 712 incs += [None, None, None, inc, inc] 713 714 # Return the increment list. 715 return incs
716 717
718 - def nested_params_ave_dom_pos(self, model):
719 """Copy the average domain parameters from simpler nested models for faster optimisation. 720 721 @param model: The frame order model. 722 @type model: str 723 """ 724 725 # No nesting, so set all parameters to zero. 726 if not self.flag_nested_params_ave_dom_pos: 727 self.interpreter.value.set(param='ave_pos_x', val=0.0) 728 self.interpreter.value.set(param='ave_pos_y', val=0.0) 729 self.interpreter.value.set(param='ave_pos_z', val=0.0) 730 self.interpreter.value.set(param='ave_pos_alpha', val=0.0) 731 self.interpreter.value.set(param='ave_pos_beta', val=0.0) 732 self.interpreter.value.set(param='ave_pos_gamma', val=0.0) 733 return 734 735 # Skip the following models to allow for full optimisation. 736 if model in [MODEL_RIGID, MODEL_FREE_ROTOR]: 737 # Printout. 738 print("No nesting of the average domain position parameters for the '%s' model." % model) 739 740 # Exit. 741 return 742 743 # The average position from the rigid model. 744 if model not in MODEL_LIST_FREE_ROTORS: 745 # Printout. 746 print("Obtaining the average position from the rigid model.") 747 748 # Get the rigid data pipe. 749 pipe = pipes.get_pipe(self.pipe_name_dict[MODEL_RIGID]) 750 751 # Copy the average position parameters from the rigid model. 752 cdp.ave_pos_x = pipe.ave_pos_x 753 cdp.ave_pos_y = pipe.ave_pos_y 754 cdp.ave_pos_z = pipe.ave_pos_z 755 cdp.ave_pos_alpha = pipe.ave_pos_alpha 756 cdp.ave_pos_beta = pipe.ave_pos_beta 757 cdp.ave_pos_gamma = pipe.ave_pos_gamma 758 759 # The average position from the free rotor model. 760 else: 761 # Printout. 762 print("Obtaining the average position from the free rotor model.") 763 764 # Get the free rotor data pipe. 765 pipe = pipes.get_pipe(self.pipe_name_dict[MODEL_FREE_ROTOR]) 766 767 # Copy the average position parameters from the free rotor model. 768 cdp.ave_pos_x = pipe.ave_pos_x 769 cdp.ave_pos_y = pipe.ave_pos_y 770 cdp.ave_pos_z = pipe.ave_pos_z 771 cdp.ave_pos_beta = pipe.ave_pos_beta 772 cdp.ave_pos_gamma = pipe.ave_pos_gamma
773 774
775 - def nested_params_eigenframe(self, model):
776 """Copy the eigenframe parameters from simpler nested models for faster optimisation. 777 778 @param model: The frame order model. 779 @type model: str 780 """ 781 782 # Skip the following models to allow for full optimisation. 783 if model in [MODEL_ROTOR, MODEL_PSEUDO_ELLIPSE]: 784 # Printout. 785 print("No nesting of the eigenframe parameters for the '%s' model." % model) 786 787 # Exit. 788 return 789 790 # The cone axis from the rotor model. 791 if model in [MODEL_FREE_ROTOR, MODEL_ISO_CONE]: 792 # Printout. 793 print("Obtaining the cone axis from the rotor model.") 794 795 # Get the rotor data pipe. 796 pipe = pipes.get_pipe(self.pipe_name_dict[MODEL_ROTOR]) 797 798 # The cone axis as the axis alpha angle. 799 if model == MODEL_FREE_ROTOR: 800 cdp.axis_alpha = pipe.axis_alpha 801 802 # The cone axis from the axis alpha angle to spherical angles. 803 if model == MODEL_ISO_CONE: 804 cdp.axis_theta, cdp.axis_phi = convert_axis_alpha_to_spherical(alpha=pipe.axis_alpha, pivot=generate_pivot(order=1, pipe_name=self.pipe_name_dict[MODEL_ROTOR]), point=pipe_centre_of_mass(verbosity=0)) 805 806 # The cone axis from the isotropic cone model. 807 elif model in [MODEL_ISO_CONE_FREE_ROTOR, MODEL_ISO_CONE_TORSIONLESS]: 808 # Printout. 809 print("Obtaining the cone axis from the isotropic cone model.") 810 811 # Get the iso cone data pipe. 812 pipe = pipes.get_pipe(self.pipe_name_dict[MODEL_ISO_CONE]) 813 814 # Copy the cone axis parameters. 815 cdp.axis_theta = pipe.axis_theta 816 cdp.axis_phi = pipe.axis_phi 817 818 # The full eigenframe from the pseudo-ellipse model. 819 elif model in [MODEL_PSEUDO_ELLIPSE_FREE_ROTOR, MODEL_PSEUDO_ELLIPSE_TORSIONLESS, MODEL_DOUBLE_ROTOR]: 820 # Printout. 821 print("Obtaining the full eigenframe from the pseudo-ellipse model.") 822 823 # Get the pseudo-ellipse data pipe. 824 pipe = pipes.get_pipe(self.pipe_name_dict[MODEL_PSEUDO_ELLIPSE]) 825 826 # Copy the three Euler angles. 827 cdp.eigen_alpha = pipe.eigen_alpha 828 cdp.eigen_beta = pipe.eigen_beta 829 cdp.eigen_gamma = pipe.eigen_gamma
830 831
832 - def nested_params_order(self, model):
833 """Copy the order parameters from simpler nested models for faster optimisation. 834 835 @param model: The frame order model. 836 @type model: str 837 """ 838 839 # Skip the following models to allow for full optimisation. 840 if model in [MODEL_ROTOR, MODEL_DOUBLE_ROTOR]: 841 # Printout. 842 print("No nesting of the order parameters for the '%s' model." % model) 843 844 # Exit. 845 return 846 847 # The cone angle from the isotropic cone model. 848 if model in [MODEL_ISO_CONE_TORSIONLESS, MODEL_PSEUDO_ELLIPSE, MODEL_ISO_CONE_FREE_ROTOR]: 849 # Get the iso cone data pipe. 850 pipe = pipes.get_pipe(self.pipe_name_dict[MODEL_ISO_CONE]) 851 852 # Copy the cone angle directly. 853 if model in [MODEL_ISO_CONE_FREE_ROTOR, MODEL_ISO_CONE_TORSIONLESS]: 854 print("Obtaining the cone angle from the isotropic cone model.") 855 cdp.cone_theta = pipe.cone_theta 856 857 # Copy as the X cone angle. 858 elif model == MODEL_PSEUDO_ELLIPSE: 859 print("Obtaining the cone X angle from the isotropic cone model.") 860 cdp.cone_theta_x = pipe.cone_theta 861 862 # The X and Y cone angles from the pseudo-ellipse model. 863 elif model in [MODEL_PSEUDO_ELLIPSE_TORSIONLESS, MODEL_PSEUDO_ELLIPSE_FREE_ROTOR]: 864 # Printout. 865 print("Obtaining the cone X and Y angles from the pseudo-ellipse model.") 866 867 # Get the pseudo-ellipse data pipe. 868 pipe = pipes.get_pipe(self.pipe_name_dict[MODEL_PSEUDO_ELLIPSE]) 869 870 # Copy the cone axis. 871 cdp.cone_theta_x = pipe.cone_theta_x 872 cdp.cone_theta_y = pipe.cone_theta_y 873 874 875 # The torsion from the rotor model. 876 if model in [MODEL_ISO_CONE, MODEL_PSEUDO_ELLIPSE]: 877 # Printout. 878 print("Obtaining the torsion angle from the rotor model.") 879 880 # Get the rotor data pipe. 881 pipe = pipes.get_pipe(self.pipe_name_dict[MODEL_ROTOR]) 882 883 # Copy the cone axis. 884 cdp.cone_sigma_max = pipe.cone_sigma_max
885 886
887 - def nested_params_pivot(self, model):
888 """Copy the pivot parameters from simpler nested models for faster optimisation. 889 890 @param model: The frame order model. 891 @type model: str 892 """ 893 894 # Skip the following models to allow for full optimisation. 895 if model in [MODEL_ROTOR]: 896 # Printout. 897 print("No nesting of the pivot parameters for the '%s' model." % model) 898 899 # Exit. 900 return 901 902 # The pivot from the rotor model. 903 print("Obtaining the pivot point from the rotor model.") 904 905 # Get the iso cone data pipe. 906 pipe = pipes.get_pipe(self.pipe_name_dict[MODEL_ROTOR]) 907 908 # Copy the pivot parameters. 909 cdp.pivot_x = pipe.pivot_x 910 cdp.pivot_y = pipe.pivot_y 911 cdp.pivot_z = pipe.pivot_z
912 913
914 - def nested_models(self):
915 """Protocol for the nested optimisation of the frame order models.""" 916 917 # First optimise the rigid model using all data. 918 self.optimise_rigid() 919 920 # Iteratively optimise the frame order models. 921 for model in self.models: 922 # Skip the already optimised rigid model. 923 if model == MODEL_RIGID: 924 continue 925 926 # The model title. 927 title = model[0].upper() + model[1:] 928 929 # Printout. 930 subtitle(file=sys.stdout, text="%s frame order model"%title, prespace=5) 931 932 # Output the model staring time. 933 self.interpreter.system.time() 934 935 # The data pipe name. 936 self.pipe_name_dict[model] = '%s - %s' % (title, self.pipe_bundle) 937 self.pipe_name_list.append(self.pipe_name_dict[model]) 938 939 # The output directory. 940 dir = model_directory(model, base_dir=self.results_dir) 941 942 # The results file already exists, so read its contents instead. 943 if self.read_results(model=model, pipe_name=self.pipe_name_dict[model]): 944 # Re-perform model elimination just in case. 945 self.interpreter.eliminate() 946 947 # Recreate the output files (in case this was not completed correctly). 948 self.results_output(model=model, dir=dir, results_file=False) 949 950 # Perform the axis permutation analysis. 951 self.axis_permutation_analysis(model=model) 952 953 # Skip to the next model. 954 continue 955 956 # Load a pre-run results file. 957 if self.pre_run_dir != None: 958 self.read_results(model=model, pipe_name=self.pipe_name_dict[model], pre_run=True) 959 960 # Otherwise use the base data pipes. 961 else: 962 # Create the data pipe using the full data set, and switch to it. 963 if self.data_pipe_subset != None: 964 self.interpreter.pipe.copy(self.data_pipe_subset, self.pipe_name_dict[model], bundle_to=self.pipe_bundle) 965 else: 966 self.interpreter.pipe.copy(self.data_pipe_full, self.pipe_name_dict[model], bundle_to=self.pipe_bundle) 967 self.interpreter.pipe.switch(self.pipe_name_dict[model]) 968 969 # Select the Frame Order model. 970 self.interpreter.frame_order.select_model(model=model) 971 972 # Copy nested parameters. 973 subsubtitle(file=sys.stdout, text="Parameter nesting") 974 self.nested_params_ave_dom_pos(model) 975 self.nested_params_eigenframe(model) 976 self.nested_params_pivot(model) 977 self.nested_params_order(model) 978 979 # Optimisation using the PCS subset (skipped if a pre-run directory is supplied). 980 if self.data_pipe_subset != None and self.opt_subset != None and not self.pre_run_flag: 981 self.optimisation(model=model, opt=self.opt_subset, pcs_text="PCS subset", intermediate_dir='pcs_subset') 982 983 # Operations if a subset was used, otherwise these are not needed. 984 if self.data_pipe_subset != None and self.data_pipe_full != None: 985 # Copy the PCS data. 986 self.interpreter.pcs.copy(pipe_from=self.data_pipe_full, pipe_to=self.pipe_name_dict[model]) 987 988 # Reset the selection status. 989 for spin, spin_id in spin_loop(return_id=True, skip_desel=False): 990 # Get the spin from the original pipe. 991 spin_orig = return_spin(spin_id=spin_id, pipe=self.data_pipe_full) 992 993 # Reset the spin selection. 994 spin.select = spin_orig.select 995 996 # Optimisation using the full data set. 997 if self.opt_full != None: 998 self.optimisation(model=model, opt=self.opt_full, pcs_text="full data set", intermediate_dir='all_data') 999 1000 # Results printout. 1001 self.print_results() 1002 1003 # Model elimination. 1004 self.interpreter.eliminate() 1005 1006 # Create the output and visualisation files. 1007 self.results_output(model=model, dir=dir, results_file=True) 1008 1009 # Perform the axis permutation analysis. 1010 self.axis_permutation_analysis(model=model)
1011 1012
1013 - def optimisation(self, model=None, opt=None, pcs_text=None, intermediate_dir=None):
1014 """Perform the grid search and minimisation. 1015 1016 @keyword model: The frame order model to optimise. 1017 @type model: str 1018 @keyword opt: The grid search, zooming grid search and minimisation settings object for optimisation of all models. 1019 @type opt: Optimisation_settings instance 1020 @keyword pcs_text: The text to use in the title. This is either about the PCS subset or full data set. 1021 @type pcs_text: str 1022 @keyword intermediate_dir: The directory for this PCS data set for storing the intermediate results. 1023 @type intermediate_dir: str 1024 """ 1025 1026 # Printout. 1027 subsubtitle(file=sys.stdout, text="Optimisation using the %s" % pcs_text) 1028 1029 # Results directory stub for intermediate results. 1030 intermediate_stub = self.results_dir + sep + 'intermediate_results' + sep + intermediate_dir 1031 1032 # Zooming grid search. 1033 for i in opt.loop_grid(): 1034 # The intermediate results directory. 1035 intermediate_dir = intermediate_stub + '_grid%i' % i 1036 1037 # Set the zooming grid search level. 1038 zoom = opt.get_grid_zoom_level(i) 1039 if zoom != None: 1040 self.interpreter.minimise.grid_zoom(level=zoom) 1041 intermediate_dir += '_zoom%i' % zoom 1042 1043 # Set up the custom grid increments. 1044 incs = self.custom_grid_incs(model, inc=opt.get_grid_inc(i), pivot_search=opt.get_grid_pivot_search(i)) 1045 intermediate_dir += '_inc%i' % opt.get_grid_inc(i) 1046 1047 # The numerical optimisation settings. 1048 quad_int = opt.get_grid_quad_int(i) 1049 if quad_int: 1050 self.interpreter.frame_order.quad_int(True) 1051 intermediate_dir += '_quad_int' 1052 else: 1053 sobol_num = opt.get_grid_sobol_info(i) 1054 self.sobol_setup(sobol_num) 1055 intermediate_dir += '_sobol%i' % sobol_num[0] 1056 1057 # Perform the grid search. 1058 self.interpreter.minimise.grid_search(inc=incs, skip_preset=False) 1059 1060 # Store the intermediate results and statistics. 1061 if self.store_intermediate: 1062 self.results_output(model=model, dir=model_directory(model, base_dir=intermediate_dir), results_file=True, simulation=False) 1063 count_sobol_points(dir=intermediate_dir, force=True) 1064 summarise(dir=intermediate_dir, force=True) 1065 1066 # Minimisation. 1067 for i in opt.loop_min(): 1068 # The intermediate results directory. 1069 func_tol = opt.get_min_func_tol(i) 1070 max_iter = opt.get_min_max_iter(i) 1071 intermediate_dir = intermediate_stub + '_min%i_ftol%g_max_iter%i' % (i, func_tol, max_iter) 1072 1073 # The numerical optimisation settings. 1074 quad_int = opt.get_min_quad_int(i) 1075 if quad_int: 1076 self.interpreter.frame_order.quad_int(True) 1077 intermediate_dir += '_quad_int' 1078 else: 1079 sobol_num = opt.get_min_sobol_info(i) 1080 self.sobol_setup(sobol_num) 1081 intermediate_dir += '_sobol%i' % sobol_num[0] 1082 1083 # Perform the optimisation. 1084 self.interpreter.minimise.execute(min_algor=opt.get_min_algor(i), func_tol=func_tol, max_iter=max_iter) 1085 1086 # Store the intermediate results. 1087 if self.store_intermediate: 1088 self.results_output(model=model, dir=model_directory(model, base_dir=intermediate_dir), results_file=True, simulation=False) 1089 count_sobol_points(dir=intermediate_dir, force=True) 1090 summarise(dir=intermediate_dir, force=True)
1091 1092
1093 - def optimise_rigid(self):
1094 """Optimise the rigid frame order model. 1095 1096 The Sobol' integration is not used here, so the algorithm is different to the other frame order models. 1097 """ 1098 1099 # The model. 1100 model = MODEL_RIGID 1101 title = model[0].upper() + model[1:] 1102 1103 # Print out. 1104 subtitle(file=sys.stdout, text="%s frame order model"%title, prespace=5) 1105 1106 # Output the model staring time. 1107 self.interpreter.system.time() 1108 1109 # The data pipe name. 1110 self.pipe_name_dict[model] = '%s - %s' % (title, self.pipe_bundle) 1111 self.pipe_name_list.append(self.pipe_name_dict[model]) 1112 1113 # The output directory. 1114 dir = model_directory(model, base_dir=self.results_dir) 1115 1116 # The results file already exists, so read its contents instead. 1117 if self.read_results(model=model, pipe_name=self.pipe_name_dict[model]): 1118 # Recreate the output and visualisation files (in case this was not completed correctly). 1119 self.results_output(model=model, dir=dir, results_file=False) 1120 1121 # Nothing more to do. 1122 return 1123 1124 # Load a pre-run results file. 1125 if self.pre_run_flag: 1126 self.read_results(model=model, pipe_name=self.pipe_name_dict[model], pre_run=True) 1127 1128 # Otherwise use the base data pipes. 1129 else: 1130 # Create the data pipe using the full data set, and switch to it. 1131 self.interpreter.pipe.copy(self.data_pipe_full, self.pipe_name_dict[model], bundle_to=self.pipe_bundle) 1132 self.interpreter.pipe.switch(self.pipe_name_dict[model]) 1133 1134 # Select the Frame Order model. 1135 self.interpreter.frame_order.select_model(model=model) 1136 1137 # Optimisation. 1138 opt = self.opt_rigid 1139 if opt != None: 1140 # No grid search. 1141 if not opt.has_grid(): 1142 # Set up the initial parameters. 1143 print("\n\nNo grid search, so setting the translational and rotational parameters to zero.") 1144 self.interpreter.value.set(param='ave_pos_x', val=0.0) 1145 self.interpreter.value.set(param='ave_pos_y', val=0.0) 1146 self.interpreter.value.set(param='ave_pos_z', val=0.0) 1147 self.interpreter.value.set(param='ave_pos_alpha', val=0.0) 1148 self.interpreter.value.set(param='ave_pos_beta', val=0.0) 1149 self.interpreter.value.set(param='ave_pos_gamma', val=0.0) 1150 1151 # Grid search alternation. 1152 elif self.rigid_grid_split: 1153 # Split zooming grid search for the translation. 1154 print("\n\nTranslation active - splitting the grid search and iterating.") 1155 self.interpreter.value.set(param='ave_pos_x', val=0.0) 1156 self.interpreter.value.set(param='ave_pos_y', val=0.0) 1157 self.interpreter.value.set(param='ave_pos_z', val=0.0) 1158 for i in opt.loop_grid(): 1159 # Set the zooming grid search level. 1160 zoom = opt.get_grid_zoom_level(i) 1161 if zoom != None: 1162 self.interpreter.minimise.grid_zoom(level=zoom) 1163 1164 # The numerical optimisation settings. 1165 self.interpreter.frame_order.quad_int(opt.get_grid_quad_int(i)) 1166 self.sobol_setup(opt.get_grid_sobol_info(i)) 1167 1168 # The number of increments. 1169 inc = opt.get_grid_inc(i) 1170 1171 # First optimise the rotation. 1172 self.interpreter.minimise.grid_search(inc=[None, None, None, inc, inc, inc], skip_preset=False) 1173 1174 # Then the translation. 1175 self.interpreter.minimise.grid_search(inc=[inc, inc, inc, None, None, None], skip_preset=False) 1176 1177 # Normal grid search. 1178 else: 1179 for i in opt.loop_grid(): 1180 # Set the zooming grid search level. 1181 zoom = opt.get_grid_zoom_level(i) 1182 if zoom != None: 1183 self.interpreter.minimise.grid_zoom(level=zoom) 1184 1185 # The numerical optimisation settings. 1186 self.interpreter.frame_order.quad_int(opt.get_grid_quad_int(i)) 1187 self.sobol_setup(opt.get_grid_sobol_info(i)) 1188 1189 # The number of increments. 1190 inc = opt.get_grid_inc(i) 1191 1192 # Grid search 1193 self.interpreter.minimise.grid_search(inc=inc, skip_preset=False) 1194 1195 # Minimise. 1196 for i in opt.loop_min(): 1197 # The numerical optimisation settings. 1198 self.interpreter.frame_order.quad_int(opt.get_min_quad_int(i)) 1199 self.sobol_setup(opt.get_min_sobol_info(i)) 1200 1201 # Perform the optimisation. 1202 self.interpreter.minimise.execute(min_algor=opt.get_min_algor(i), func_tol=opt.get_min_func_tol(i), max_iter=opt.get_min_max_iter(i)) 1203 1204 # Results printout. 1205 self.print_results() 1206 1207 # Create the output and visualisation files. 1208 self.results_output(model=model, dir=dir, results_file=True)
1209 1210
1211 - def print_results(self):
1212 """Print out the optimisation results for the current data pipe.""" 1213 1214 # Header. 1215 sys.stdout.write("\nFinal optimisation results:\n") 1216 1217 # Formatting string. 1218 format_float = " %-20s %20.15f\n" 1219 format_vect = " %-20s %20s\n" 1220 1221 # Pivot. 1222 sys.stdout.write("\nPivot point:\n") 1223 if hasattr(cdp, 'pivot_x'): 1224 sys.stdout.write(format_float % ('x:', cdp.pivot_x)) 1225 if hasattr(cdp, 'pivot_y'): 1226 sys.stdout.write(format_float % ('y:', cdp.pivot_y)) 1227 if hasattr(cdp, 'pivot_z'): 1228 sys.stdout.write(format_float % ('z:', cdp.pivot_z)) 1229 1230 # Average position. 1231 if hasattr(cdp, 'ave_pos_x') or hasattr(cdp, 'ave_pos_alpha') or hasattr(cdp, 'ave_pos_beta') or hasattr(cdp, 'ave_pos_gamma'): 1232 sys.stdout.write("\nAverage moving domain position:\n") 1233 if hasattr(cdp, 'ave_pos_x'): 1234 sys.stdout.write(format_float % ('x:', cdp.ave_pos_x)) 1235 if hasattr(cdp, 'ave_pos_y'): 1236 sys.stdout.write(format_float % ('y:', cdp.ave_pos_y)) 1237 if hasattr(cdp, 'ave_pos_z'): 1238 sys.stdout.write(format_float % ('z:', cdp.ave_pos_z)) 1239 if hasattr(cdp, 'ave_pos_alpha'): 1240 sys.stdout.write(format_float % ('alpha:', cdp.ave_pos_alpha)) 1241 if hasattr(cdp, 'ave_pos_beta'): 1242 sys.stdout.write(format_float % ('beta:', cdp.ave_pos_beta)) 1243 if hasattr(cdp, 'ave_pos_gamma'): 1244 sys.stdout.write(format_float % ('gamma:', cdp.ave_pos_gamma)) 1245 1246 # Frame order eigenframe. 1247 if hasattr(cdp, 'eigen_alpha') or hasattr(cdp, 'eigen_beta') or hasattr(cdp, 'eigen_gamma') or hasattr(cdp, 'axis_theta') or hasattr(cdp, 'axis_phi'): 1248 sys.stdout.write("\nFrame order eigenframe:\n") 1249 if hasattr(cdp, 'eigen_alpha'): 1250 sys.stdout.write(format_float % ('eigen alpha:', cdp.eigen_alpha)) 1251 if hasattr(cdp, 'eigen_beta'): 1252 sys.stdout.write(format_float % ('eigen beta:', cdp.eigen_beta)) 1253 if hasattr(cdp, 'eigen_gamma'): 1254 sys.stdout.write(format_float % ('eigen gamma:', cdp.eigen_gamma)) 1255 1256 # The cone axis. 1257 if hasattr(cdp, 'axis_theta'): 1258 # The angles. 1259 sys.stdout.write(format_float % ('axis theta:', cdp.axis_theta)) 1260 sys.stdout.write(format_float % ('axis phi:', cdp.axis_phi)) 1261 1262 # The axis. 1263 axis = zeros(3, float64) 1264 spherical_to_cartesian([1.0, cdp.axis_theta, cdp.axis_phi], axis) 1265 sys.stdout.write(format_vect % ('axis:', axis)) 1266 1267 # Frame ordering. 1268 if hasattr(cdp, 'cone_theta_x') or hasattr(cdp, 'cone_theta_y') or hasattr(cdp, 'cone_theta') or hasattr(cdp, 'cone_sigma_max'): 1269 sys.stdout.write("\nFrame ordering:\n") 1270 if hasattr(cdp, 'cone_theta_x'): 1271 sys.stdout.write(format_float % ('cone theta_x:', cdp.cone_theta_x)) 1272 if hasattr(cdp, 'cone_theta_y'): 1273 sys.stdout.write(format_float % ('cone theta_y:', cdp.cone_theta_y)) 1274 if hasattr(cdp, 'cone_theta'): 1275 sys.stdout.write(format_float % ('cone theta:', cdp.cone_theta)) 1276 if hasattr(cdp, 'cone_sigma_max'): 1277 sys.stdout.write(format_float % ('sigma_max:', cdp.cone_sigma_max)) 1278 1279 # Minimisation statistics. 1280 if hasattr(cdp, 'chi2'): 1281 sys.stdout.write("\nMinimisation statistics:\n") 1282 if hasattr(cdp, 'chi2'): 1283 sys.stdout.write(format_float % ('chi2:', cdp.chi2)) 1284 1285 # Final spacing. 1286 sys.stdout.write("\n")
1287 1288
1289 - def read_results(self, model=None, pipe_name=None, pre_run=False):
1290 """Attempt to read old results files. 1291 1292 @keyword model: The frame order model. 1293 @type model: str 1294 @keyword pipe_name: The name of the data pipe to use for this model. 1295 @type pipe_name: str 1296 @keyword pre_run: A flag which if True will read the results file from the pre-run directory. 1297 @type pre_run: bool 1298 @return: True if the file exists and has been read, False otherwise. 1299 @rtype: bool 1300 """ 1301 1302 # The file name. 1303 base_dir = self.results_dir 1304 if pre_run: 1305 base_dir = self.pre_run_dir 1306 path = model_directory(model, base_dir=base_dir) + sep + 'results' 1307 1308 # Loop over the compression types. 1309 found = False 1310 for ext in ['.bz2', '', 'gz']: 1311 # The full file name. 1312 full_path = path + ext 1313 1314 # The file does not exist. 1315 if not access(full_path, F_OK): 1316 continue 1317 1318 # Create an empty data pipe. 1319 self.interpreter.pipe.create(pipe_name=pipe_name, pipe_type='frame order') 1320 1321 # Read the results file. 1322 self.interpreter.results.read(full_path) 1323 1324 # Results printout. 1325 self.print_results() 1326 1327 # The flag. 1328 found = True 1329 break 1330 1331 # Success. 1332 return found
1333 1334
1335 - def reorder_models(self, models=None):
1336 """Reorder the frame order models to enable the nested parameter copying protocol. 1337 1338 @keyword models: The frame order models to be used in the auto-analysis. 1339 @type models: list of str 1340 @return: The reordered frame order models. 1341 @rtype: list of str 1342 """ 1343 1344 # The correct order for the nesting protocol. 1345 order = [ 1346 MODEL_RIGID, 1347 MODEL_ROTOR, 1348 MODEL_ISO_CONE, 1349 MODEL_PSEUDO_ELLIPSE, 1350 MODEL_ISO_CONE_TORSIONLESS, 1351 MODEL_PSEUDO_ELLIPSE_TORSIONLESS, 1352 MODEL_FREE_ROTOR, 1353 MODEL_ISO_CONE_FREE_ROTOR, 1354 MODEL_PSEUDO_ELLIPSE_FREE_ROTOR, 1355 MODEL_DOUBLE_ROTOR 1356 ] 1357 1358 # Create the new list. 1359 new = [] 1360 for i in range(len(order)): 1361 if order[i] in models: 1362 new.append(order[i]) 1363 1364 # Sanity check - the models must all be in this list. 1365 for i in range(len(models)): 1366 if models[i] not in order: 1367 raise RelaxError("The frame order model '%s' is unknown." % models[i]) 1368 1369 # Return the reordered list. 1370 return new
1371 1372
1373 - def results_output(self, dir=None, model=None, results_file=True, simulation=True):
1374 """Create visual representations of the frame order results for the given model. 1375 1376 This will call the following user functions: 1377 1378 - results.write to output a results file (turned off if the results_file argument is False). 1379 - rdc.corr_plot and pcs.corr_plot to visualise the quality of the data and fit as 2D Grace correlation plots. 1380 - frame_order.pdb_model to generate a PDB representation of the frame order motions. 1381 - frame_order.simulate to perform a pseudo-Brownian frame order dynamics simulation. 1382 1383 A relax script called 'pymol_display.py' will be created for easily visualising the PDB representation from the frame_order.pdb_model user function. 1384 1385 1386 This includes a PDB representation of the motions (the 'cone.pdb' file located in each model directory) together with a relax script for displaying the average domain positions together with the cone/motion representation in PyMOL (the 'pymol_display.py' file, also created in the model directory). 1387 1388 @keyword dir: The output directory. 1389 @type dir: str 1390 @keyword model: The frame order model. This should match the model of the current data pipe, unless the special value of 'final' is used to indicate the visualisation of the final results. 1391 @type model: str 1392 @keyword results_file: A flag which if True will cause a results file to be created via the results.write user function. 1393 @type results_file: bool 1394 @keyword simulation: A flag which if True will allow the pseudo-Brownian frame order dynamics simulation to be run. 1395 @type simulation: bool 1396 """ 1397 1398 # Printout. 1399 subsubtitle(file=sys.stdout, text="Generating the results and data visualisation files") 1400 1401 # Sanity check. 1402 if model != 'final' and model.replace(' permutation A', '').replace(' permutation B', '') != cdp.model: 1403 raise RelaxError("The model '%s' does not match the model '%s' of the current data pipe." % (model.replace(' permuted', ''), cdp.model)) 1404 1405 # The results file. 1406 if results_file: 1407 self.interpreter.results.write(dir=dir, compress_type=self.results_compress_type, force=True) 1408 1409 # The RDC and PCS correlation plots. 1410 self.interpreter.rdc.corr_plot(dir=dir, force=True) 1411 self.interpreter.pcs.corr_plot(dir=dir, force=True) 1412 1413 # The PDB representation of the model. 1414 self.interpreter.frame_order.pdb_model(dir=dir, force=True) 1415 1416 # Create the visualisation script for the PDB representation. 1417 script = open_write_file(file_name='pymol_display.py', dir=dir, force=True) 1418 script.write("# relax script for displaying the frame order results of this '%s' model in PyMOL.\n\n" % model) 1419 script.write("# PyMOL visualisation.\n") 1420 script.write("pymol.frame_order()\n") 1421 script.close() 1422 1423 # The uniform distribution of structures. 1424 if simulation: 1425 self.interpreter.frame_order.distribute(dir=dir, total=self.dist_total, max_rotations=self.dist_max_rotations, force=True) 1426 1427 # The pseudo-Brownian dynamics simulation. 1428 if simulation: 1429 self.interpreter.frame_order.simulate(dir=dir, step_size=self.brownian_step_size, snapshot=self.brownian_snapshot, total=self.brownian_total, force=True)
1430 1431
1432 - def sobol_setup(self, info=None):
1433 """Correctly handle the frame_order.sobol_setup user function. 1434 1435 @keyword info: The information from the Optimisation_settings.get_*_sobol_info() function. 1436 @type info: tuple of int or None 1437 """ 1438 1439 # Unpack the info. 1440 max_num, oversample = info 1441 1442 # Nothing to do. 1443 if max_num == None: 1444 return 1445 1446 # No oversampling specified. 1447 if oversample == None: 1448 self.interpreter.frame_order.sobol_setup(max_num=max_num) 1449 1450 # Full setup. 1451 else: 1452 self.interpreter.frame_order.sobol_setup(max_num=max_num, oversample=oversample)
1453 1454 1455
1456 -class Optimisation_settings:
1457 """A special object for storing the settings for optimisation. 1458 1459 This includes grid search information, zooming grid search settings, and settings for the minimisation. 1460 """ 1461
1462 - def __init__(self):
1463 """Set up the optimisation settings object.""" 1464 1465 # Initialise some private structures for the grid search. 1466 self._grid_count = 0 1467 self._grid_incs = [] 1468 self._grid_zoom = [] 1469 self._grid_sobol_max_points = [] 1470 self._grid_sobol_oversample = [] 1471 self._grid_quad_int = [] 1472 self._grid_pivot_search = [] 1473 1474 # Initialise some private structures for the minimisation. 1475 self._min_count = 0 1476 self._min_algor = [] 1477 self._min_func_tol = [] 1478 self._min_max_iter = [] 1479 self._min_sobol_max_points = [] 1480 self._min_sobol_oversample = [] 1481 self._min_quad_int = []
1482 1483
1484 - def _check_index(self, i, iter_type=None):
1485 """Check that the user supplied iteration index makes sense. 1486 1487 @param i: The iteration index. 1488 @type i: int 1489 @keyword iter_type: The type of the index. This can be either 'grid' or 'min'. 1490 @type iter_type: str 1491 @raises RelaxError: If the iteration is invalid. 1492 """ 1493 1494 # Check the index. 1495 is_int(i, name='i', can_be_none=False) 1496 1497 # Is the value too high? 1498 if iter_type == 'grid' and i >= self._grid_count: 1499 raise RelaxError("The iteration index %i is too high, only %i grid searches are set up." % (i, self._grid_count)) 1500 if iter_type == 'min' and i >= self._min_count: 1501 raise RelaxError("The iteration index %i is too high, only %i minimisations are set up." % (i, self._min_count))
1502 1503
1504 - def add_grid(self, inc=None, zoom=None, sobol_max_points=None, sobol_oversample=None, quad_int=False, pivot_search=True):
1505 """Add a grid search step. 1506 1507 @keyword inc: The grid search size (the number of increments per dimension). 1508 @type inc: int 1509 @keyword zoom: The grid zoom level for this grid search. 1510 @type zoom: None or int 1511 @keyword sobol_max_points: The maximum number of Sobol' points for the PCS numerical integration to use in the grid search. See the frame_order.sobol_setup user function for details. If not supplied, then the previous value will be used. 1512 @type sobol_max_points: None or int 1513 @keyword sobol_oversample: The Sobol' oversampling factor. See the frame_order.sobol_setup user function for details. 1514 @type sobol_oversample: None or int 1515 @keyword quad_int: The SciPy quadratic integration flag. See the frame_order.quad_int user function for details. 1516 @type quad_int: bool 1517 @keyword pivot_search: A flag which if False will prevent the pivot point from being included in the grid search. 1518 @type pivot_search: bool 1519 """ 1520 1521 # Value checking, as this will be set up by a user. 1522 is_int(inc, name='inc', can_be_none=False) 1523 is_int(zoom, name='zoom', can_be_none=True) 1524 is_int(sobol_max_points, name='sobol_max_points', can_be_none=True) 1525 is_int(sobol_oversample, name='sobol_oversample', can_be_none=True) 1526 is_bool(quad_int, name='quad_int') 1527 1528 # Store the values. 1529 self._grid_incs.append(inc) 1530 self._grid_zoom.append(zoom) 1531 self._grid_sobol_max_points.append(sobol_max_points) 1532 self._grid_sobol_oversample.append(sobol_oversample) 1533 self._grid_quad_int.append(quad_int) 1534 self._grid_pivot_search.append(pivot_search) 1535 1536 # Increment the count. 1537 self._grid_count += 1
1538 1539
1540 - def add_min(self, min_algor='simplex', func_tol=1e-25, max_iter=1000000, sobol_max_points=None, sobol_oversample=None, quad_int=False):
1541 """Add an optimisation step. 1542 1543 @keyword min_algor: The optimisation technique. 1544 @type min_algor: str 1545 @keyword func_tol: The minimisation function tolerance cutoff to terminate optimisation (see the minimise.execute user function). 1546 @type func_tol: int 1547 @keyword max_iter: The maximum number of iterations for the optimisation. 1548 @type max_iter: int 1549 @keyword sobol_max_points: The maximum number of Sobol' points for the PCS numerical integration to use in the optimisations after the grid search. See the frame_order.sobol_setup user function for details. If not supplied, then the previous value will be used. 1550 @type sobol_max_points: None or int 1551 @keyword sobol_oversample: The Sobol' oversampling factor. See the frame_order.sobol_setup user function for details. 1552 @type sobol_oversample: None or int 1553 @keyword quad_int: The SciPy quadratic integration flag. See the frame_order.quad_int user function for details. 1554 @type quad_int: bool 1555 """ 1556 1557 # Value checking, as this will be set up by a user. 1558 is_str(min_algor, name='min_algor', can_be_none=False) 1559 is_float(func_tol, name='func_tol', can_be_none=True) 1560 is_int(max_iter, name='max_iter', can_be_none=True) 1561 is_int(sobol_max_points, name='sobol_max_points', can_be_none=True) 1562 is_int(sobol_oversample, name='sobol_oversample', can_be_none=True) 1563 is_bool(quad_int, name='quad_int') 1564 1565 # Store the values. 1566 self._min_algor.append(min_algor) 1567 self._min_func_tol.append(func_tol) 1568 self._min_max_iter.append(max_iter) 1569 self._min_sobol_max_points.append(sobol_max_points) 1570 self._min_sobol_oversample.append(sobol_oversample) 1571 self._min_quad_int.append(quad_int) 1572 1573 # Increment the count. 1574 self._min_count += 1
1575 1576
1577 - def get_grid_inc(self, i):
1578 """Return the grid increments for the given iteration. 1579 1580 @param i: The grid search iteration from the loop_grid() method. 1581 @type i: int 1582 @return: The grid increments for the iteration. 1583 @rtype: int 1584 """ 1585 1586 # Check the index. 1587 self._check_index(i, iter_type='grid') 1588 1589 # Return the value. 1590 return self._grid_incs[i]
1591 1592
1593 - def get_grid_pivot_search(self, i):
1594 """Return the pivot grid search flag. 1595 1596 @param i: The grid search iteration from the loop_grid() method. 1597 @type i: int 1598 @return: The pivot grid search flag. 1599 @rtype: bool 1600 """ 1601 1602 # Check the index. 1603 self._check_index(i, iter_type='grid') 1604 1605 # Return the value. 1606 return self._grid_pivot_search[i]
1607 1608
1609 - def get_grid_quad_int(self, i):
1610 """Return the SciPy quadratic integration flag for the given iteration. 1611 1612 @param i: The grid search iteration from the loop_grid() method. 1613 @type i: int 1614 @return: The SciPy quadratic integration flag for the iteration. 1615 @rtype: bool 1616 """ 1617 1618 # Check the index. 1619 self._check_index(i, iter_type='grid') 1620 1621 # Return the value. 1622 return self._grid_quad_int[i]
1623 1624
1625 - def get_grid_sobol_info(self, i):
1626 """Return the number of numerical integration points and oversampling factor for the given iteration. 1627 1628 @param i: The grid search iteration from the loop_grid() method. 1629 @type i: int 1630 @return: The number of numerical integration points for the iteration and the oversampling factor. 1631 @rtype: int, int 1632 """ 1633 1634 # Check the index. 1635 self._check_index(i, iter_type='grid') 1636 1637 # Return the value. 1638 return self._grid_sobol_max_points[i], self._grid_sobol_oversample[i]
1639 1640
1641 - def get_grid_zoom_level(self, i):
1642 """Return the grid zoom level for the given iteration. 1643 1644 @param i: The grid search iteration from the loop_grid() method. 1645 @type i: int 1646 @return: The grid zoom level for the iteration. 1647 @rtype: None or int 1648 """ 1649 1650 # Check the index. 1651 self._check_index(i, iter_type='grid') 1652 1653 # Return the value. 1654 return self._grid_zoom[i]
1655 1656
1657 - def get_min_algor(self, i):
1658 """Return the minimisation algorithm for the given iteration. 1659 1660 @param i: The minimisation iteration from the loop_min() method. 1661 @type i: int 1662 @return: The minimisation algorithm for the iteration. 1663 @rtype: int 1664 """ 1665 1666 # Check the index. 1667 self._check_index(i, iter_type='min') 1668 1669 # Return the value. 1670 return self._min_algor[i]
1671 1672
1673 - def get_min_func_tol(self, i):
1674 """Return the minimisation function tolerance level for the given iteration. 1675 1676 @param i: The minimisation iteration from the loop_min() method. 1677 @type i: int 1678 @return: The minimisation function tolerance level for the iteration. 1679 @rtype: int 1680 """ 1681 1682 # Check the index. 1683 self._check_index(i, iter_type='min') 1684 1685 # Return the value. 1686 return self._min_func_tol[i]
1687 1688
1689 - def get_min_max_iter(self, i):
1690 """Return the maximum number of iterations for the optimisation for the given iteration. 1691 1692 @param i: The minimisation iteration from the loop_min() method. 1693 @type i: int 1694 @return: The maximum number of iterations for the optimisation for the iteration. 1695 @rtype: int 1696 """ 1697 1698 # Check the index. 1699 self._check_index(i, iter_type='min') 1700 1701 # Return the value. 1702 return self._min_max_iter[i]
1703 1704
1705 - def get_min_quad_int(self, i):
1706 """Return the SciPy quadratic integration flag for the given iteration. 1707 1708 @param i: The minimisation iteration from the loop_min() method. 1709 @type i: int 1710 @return: The SciPy quadratic integration flag for the iterationor. 1711 @rtype: bool 1712 """ 1713 1714 # Check the index. 1715 self._check_index(i, iter_type='min') 1716 1717 # Return the value. 1718 return self._min_quad_int[i]
1719 1720
1721 - def get_min_sobol_info(self, i):
1722 """Return the number of numerical integration points and oversampling factor for the given iteration. 1723 1724 @param i: The minimisation iteration from the loop_min() method. 1725 @type i: int 1726 @return: The number of numerical integration points for the iteration and the oversampling factor. 1727 @rtype: int, int 1728 """ 1729 1730 # Check the index. 1731 self._check_index(i, iter_type='min') 1732 1733 # Return the value. 1734 return self._min_sobol_max_points[i], self._min_sobol_oversample[i]
1735 1736
1737 - def has_grid(self):
1738 """Is a grid search set up? 1739 1740 @return: True if a grid search has been set up. 1741 @rtype: bool 1742 """ 1743 1744 # Grid information is present. 1745 if self._grid_count > 0: 1746 return True 1747 1748 # No grid. 1749 return False
1750 1751
1752 - def loop_grid(self):
1753 """Generator method for looping over all grid search iterations. 1754 1755 @return: The grid search iteration. 1756 @rtype: int 1757 """ 1758 1759 # Loop over the grid searches. 1760 for i in range(self._grid_count): 1761 yield i
1762 1763
1764 - def loop_min(self):
1765 """Generator method for looping over all minimisation iterations. 1766 1767 @return: The minimisation iteration. 1768 @rtype: int 1769 """ 1770 1771 # Loop over the minimisations. 1772 for i in range(self._min_count): 1773 yield i
1774