Package specific_analyses :: Package frame_order :: Module api
[hide private]
[frames] | no frames]

Source Code for Module specific_analyses.frame_order.api

  1  ############################################################################### 
  2  #                                                                             # 
  3  # Copyright (C) 2009-2011,2013-2014,2017 Edward d'Auvergne                    # 
  4  #                                                                             # 
  5  # This file is part of the program relax (http://www.nmr-relax.com).          # 
  6  #                                                                             # 
  7  # This program is free software: you can redistribute it and/or modify        # 
  8  # it under the terms of the GNU General Public License as published by        # 
  9  # the Free Software Foundation, either version 3 of the License, or           # 
 10  # (at your option) any later version.                                         # 
 11  #                                                                             # 
 12  # This program is distributed in the hope that it will be useful,             # 
 13  # but WITHOUT ANY WARRANTY; without even the implied warranty of              # 
 14  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the               # 
 15  # GNU General Public License for more details.                                # 
 16  #                                                                             # 
 17  # You should have received a copy of the GNU General Public License           # 
 18  # along with this program.  If not, see <http://www.gnu.org/licenses/>.       # 
 19  #                                                                             # 
 20  ############################################################################### 
 21   
 22  # Module docstring. 
 23  """The frame order API object.""" 
 24   
 25  # Python module imports. 
 26  from copy import deepcopy 
 27  from math import pi 
 28  from minfx.grid import grid_split_array 
 29  from numpy import float64, zeros 
 30  from random import shuffle 
 31  from warnings import warn 
 32   
 33  # relax module imports. 
 34  from lib.errors import RelaxError, RelaxNoModelError 
 35  from lib.frame_order.variables import MODEL_ISO_CONE_FREE_ROTOR 
 36  from lib.warnings import RelaxWarning 
 37  from multi import Processor_box 
 38  from pipe_control import pipes 
 39  from pipe_control.interatomic import interatomic_loop, return_interatom 
 40  from pipe_control.mol_res_spin import return_spin, spin_loop 
 41  from specific_analyses.api_base import API_base 
 42  from specific_analyses.api_common import API_common 
 43  from specific_analyses.frame_order.checks import check_pivot 
 44  from specific_analyses.frame_order.data import domain_moving 
 45  from specific_analyses.frame_order.optimisation import Frame_order_grid_command, Frame_order_memo, Frame_order_minimise_command, count_sobol_points, grid_row, store_bc_data, target_fn_data_setup 
 46  from specific_analyses.frame_order.parameter_object import Frame_order_params 
 47  from specific_analyses.frame_order.parameters import assemble_param_vector, linear_constraints, param_num, update_model 
 48  from target_functions import frame_order 
 49   
 50   
51 -class Frame_order(API_base, API_common):
52 """Class containing the specific methods of the Frame Order theories.""" 53 54 # Class variable for storing the class instance (for the singleton design pattern). 55 instance = None 56
57 - def __init__(self):
58 """Initialise the class by placing API_common methods into the API.""" 59 60 # Place methods into the API. 61 self.deselect = self._deselect_global 62 self.get_model_container = self._get_model_container_cdp 63 self.is_spin_param = self._is_spin_param_false 64 self.model_loop = self._model_loop_single_global 65 self.model_type = self._model_type_global 66 self.print_model_title = self._print_model_title_global 67 self.return_conversion_factor = self._return_no_conversion_factor 68 self.set_param_values = self._set_param_values_global 69 70 # Place a copy of the parameter list object in the instance namespace. 71 self._PARAMS = Frame_order_params()
72 73
74 - def base_data_loop(self):
75 """Generator method for looping over the base data - RDCs and PCSs. 76 77 This loop yields the following: 78 79 - The RDC identification data for the interatomic data container and alignment. 80 - The PCS identification data for the spin data container and alignment. 81 82 @return: The base data type ('rdc' or 'pcs'), the spin or interatomic data container information (either one or two spin hashes), and the alignment ID string. 83 @rtype: list of str 84 """ 85 86 # Loop over the interatomic data containers for the moving domain (for the RDC data). 87 for interatom in interatomic_loop(selection1=domain_moving()): 88 # Skip deselected containers. 89 if not interatom.select: 90 continue 91 92 # No RDC, so skip. 93 if not hasattr(interatom, 'rdc'): 94 continue 95 96 # Loop over the alignment IDs. 97 for align_id in cdp.rdc_ids: 98 # Yield the info set. 99 if align_id in interatom.rdc and interatom.rdc[align_id] != None: 100 yield ['rdc', interatom._spin_hash1, interatom._spin_hash2, align_id] 101 102 # Loop over the spin containers for the moving domain (for the PCS data). 103 for spin, spin_id in spin_loop(selection=domain_moving(), return_id=True): 104 # Skip deselected spins. 105 if not spin.select: 106 continue 107 108 # No PCS, so skip. 109 if not hasattr(spin, 'pcs'): 110 continue 111 112 # Loop over the alignment IDs. 113 for align_id in cdp.pcs_ids: 114 # Yield the info set. 115 if align_id in spin.pcs and spin.pcs[align_id] != None: 116 yield ['pcs', spin_id, align_id]
117 118
119 - def calculate(self, spin_id=None, scaling_matrix=None, verbosity=1, sim_index=None):
120 """Calculate the chi-squared value for the current parameter values. 121 122 @keyword spin_id: The spin identification string (unused). 123 @type spin_id: None 124 @keyword scaling_matrix: The per-model list of diagonal and square scaling matrices. 125 @type scaling_matrix: list of numpy rank-2, float64 array or list of None 126 @keyword verbosity: The amount of information to print. The higher the value, the greater the verbosity. 127 @type verbosity: int 128 @keyword sim_index: The optional MC simulation index (unused). 129 @type sim_index: None or int 130 """ 131 132 # Set up the data structures for the target function. 133 param_vector, full_tensors, full_in_ref_frame, rdcs, rdc_err, rdc_weight, rdc_vect, rdc_const, pcs, pcs_err, pcs_weight, atomic_pos, temp, frq, paramag_centre, com, ave_pos_pivot, pivot, pivot_opt = target_fn_data_setup(sim_index=sim_index, verbosity=verbosity, unset_fail=True) 134 135 # The numeric integration information. 136 if not hasattr(cdp, 'quad_int'): 137 cdp.quad_int = False 138 sobol_max_points, sobol_oversample = None, None 139 if hasattr(cdp, 'sobol_max_points'): 140 sobol_max_points = cdp.sobol_max_points 141 sobol_oversample = cdp.sobol_oversample 142 143 # Set up the optimisation target function class. 144 target_fn = frame_order.Frame_order(model=cdp.model, init_params=param_vector, full_tensors=full_tensors, full_in_ref_frame=full_in_ref_frame, rdcs=rdcs, rdc_errors=rdc_err, rdc_weights=rdc_weight, rdc_vect=rdc_vect, dip_const=rdc_const, pcs=pcs, pcs_errors=pcs_err, pcs_weights=pcs_weight, atomic_pos=atomic_pos, temp=temp, frq=frq, paramag_centre=paramag_centre, com=com, ave_pos_pivot=ave_pos_pivot, pivot=pivot, pivot_opt=pivot_opt, sobol_max_points=sobol_max_points, sobol_oversample=sobol_oversample, quad_int=cdp.quad_int) 145 146 # Make a single function call. This will cause back calculation and the data will be stored in the class instance. 147 chi2 = target_fn.func(param_vector) 148 149 # Set the chi2. 150 cdp.chi2 = chi2 151 152 # Store the back-calculated data. 153 store_bc_data(A_5D_bc=target_fn.A_5D_bc, pcs_theta=target_fn.pcs_theta, rdc_theta=target_fn.rdc_theta) 154 155 # Feedback on the number of integration points used. 156 if not cdp.quad_int: 157 count_sobol_points(target_fn=target_fn, verbosity=verbosity) 158 159 # Printout. 160 if verbosity: 161 print("Chi2: %s" % chi2)
162 163
164 - def constraint_algorithm(self):
165 """Return the 'Log barrier' optimisation constraint algorithm. 166 167 @return: The 'Log barrier' constraint algorithm. 168 @rtype: str 169 """ 170 171 # The log barrier algorithm, as required by minfx. 172 return 'Log barrier'
173 174
175 - def create_mc_data(self, data_id=None):
176 """Create the Monte Carlo data by back calculating the RDCs or PCSs. 177 178 @keyword data_id: The data set as yielded by the base_data_loop() generator method. 179 @type data_id: list of str 180 @return: The Monte Carlo simulation data. 181 @rtype: list of floats 182 """ 183 184 # Initialise the MC data structure. 185 mc_data = [] 186 187 # The RDC data. 188 if data_id[0] == 'rdc': 189 # Unpack the set. 190 data_type, spin_hash1, spin_hash2, align_id = data_id 191 192 # Get the interatomic data container. 193 interatom = return_interatom(spin_hash1=spin_hash1, spin_hash2=spin_hash2) 194 195 # Does back-calculated data exist? 196 if not hasattr(interatom, 'rdc_bc'): 197 self.calculate() 198 199 # The data. 200 if not hasattr(interatom, 'rdc_bc') or align_id not in interatom.rdc_bc: 201 data = None 202 else: 203 data = interatom.rdc_bc[align_id] 204 205 # Append the data. 206 mc_data.append(data) 207 208 # The PCS data. 209 elif data_id[0] == 'pcs': 210 # Unpack the set. 211 data_type, spin_id, align_id = data_id 212 213 # Get the spin container. 214 spin = return_spin(spin_id=spin_id) 215 216 # Does back-calculated data exist? 217 if not hasattr(spin, 'pcs_bc'): 218 self.calculate() 219 220 # The data. 221 if not hasattr(spin, 'pcs_bc') or align_id not in spin.pcs_bc: 222 data = None 223 else: 224 data = spin.pcs_bc[align_id] 225 226 # Append the data. 227 mc_data.append(data) 228 229 # Return the data. 230 return mc_data
231 232
233 - def duplicate_data(self, pipe_from=None, pipe_to=None, model_info=None, global_stats=False, verbose=True):
234 """Duplicate the data specific to a single frame order data pipe. 235 236 @keyword pipe_from: The data pipe to copy the data from. 237 @type pipe_from: str 238 @keyword pipe_to: The data pipe to copy the data to. 239 @type pipe_to: str 240 @keyword model_info: The model information from model_loop(). This is unused. 241 @type model_info: None 242 @keyword global_stats: The global statistics flag. 243 @type global_stats: bool 244 @keyword verbose: Unused. 245 @type verbose: bool 246 """ 247 248 # Check that the data pipe does not exist. 249 if pipes.has_pipe(pipe_to): 250 raise RelaxError("The data pipe '%s' already exists." % pipe_to) 251 252 # Create the pipe_to data pipe by copying. 253 pipes.copy(pipe_from=pipe_from, pipe_to=pipe_to)
254 255
256 - def eliminate(self, name, value, args, sim=None, model_info=None):
257 """Model elimination method. 258 259 @param name: The parameter name. 260 @type name: str 261 @param value: The parameter value. 262 @type value: float 263 @param args: The elimination constant overrides. 264 @type args: None or tuple of float 265 @keyword sim: The Monte Carlo simulation index. 266 @type sim: int 267 @keyword model_info: The model information from model_loop(). This is unused. 268 @type model_info: None 269 @return: True if the model is to be eliminated, False otherwise. 270 @rtype: bool 271 """ 272 273 # Text to print out if a model failure occurs. 274 text = "The %s parameter of %.5g is %s than %.5g, eliminating " 275 if sim == None: 276 text += "the model." 277 else: 278 text += "simulation %i." % sim 279 280 # Isotropic cone angle out of range. 281 if name == 'cone_theta' and hasattr(cdp, 'cone_theta'): 282 if cdp.cone_theta >= pi: 283 print(text % ("cone opening angle theta", cdp.cone_theta, "greater", pi)) 284 return True 285 if cdp.cone_theta < 0.0: 286 print(text % ("cone opening angle theta", cdp.cone_theta, "less", 0)) 287 return True 288 289 # Pseudo-ellipse cone angles out of range (0.001 instead of 0.0 because of truncation in the numerical integration). 290 if name == 'cone_theta_x' and hasattr(cdp, 'cone_theta_x'): 291 if cdp.cone_theta_x >= pi: 292 print(text % ("cone opening angle theta x", cdp.cone_theta_x, "greater", pi)) 293 return True 294 if cdp.cone_theta_x < 0.001: 295 print(text % ("cone opening angle theta x", cdp.cone_theta_x, "less", 0.001)) 296 return True 297 if name == 'cone_theta_y' and hasattr(cdp, 'cone_theta_y'): 298 if cdp.cone_theta_y >= pi: 299 print(text % ("cone opening angle theta y", cdp.cone_theta_y, "greater", pi)) 300 return True 301 if cdp.cone_theta_y < 0.001: 302 print(text % ("cone opening angle theta y", cdp.cone_theta_y, "less", 0.001)) 303 return True 304 305 # Torsion angle out of range. 306 if name == 'cone_sigma_max' and hasattr(cdp, 'cone_sigma_max'): 307 if cdp.cone_sigma_max >= pi: 308 print(text % ("torsion angle sigma_max", cdp.cone_sigma_max, "greater", pi)) 309 return True 310 if cdp.cone_sigma_max < 0.0: 311 print(text % ("torsion angle sigma_max", cdp.cone_sigma_max, "less", 0.0)) 312 return True 313 314 # No failure. 315 return False
316 317
318 - def get_param_names(self, model_info=None):
319 """Return a vector of parameter names. 320 321 @keyword model_info: The model information from model_loop(). This is unused. 322 @type model_info: None 323 @return: The vector of parameter names. 324 @rtype: list of str 325 """ 326 327 # First update the model, if needed. 328 update_model(verbosity=0) 329 330 # Return the parameter list object. 331 return cdp.params
332 333
334 - def get_param_values(self, model_info=None, sim_index=None):
335 """Return a vector of parameter values. 336 337 @keyword model_info: The model information from model_loop(). This is unused. 338 @type model_info: None 339 @keyword sim_index: The Monte Carlo simulation index. 340 @type sim_index: int 341 @return: The vector of parameter values. 342 @rtype: list of str 343 """ 344 345 # Assemble the values and return it. 346 return assemble_param_vector(sim_index=sim_index)
347 348
349 - def grid_search(self, lower=None, upper=None, inc=None, scaling_matrix=None, constraints=False, verbosity=0, sim_index=None):
350 """Perform a grid search. 351 352 @keyword lower: The per-model lower bounds of the grid search which must be equal to the number of parameters in the model. 353 @type lower: list of lists of numbers 354 @keyword upper: The per-model upper bounds of the grid search which must be equal to the number of parameters in the model. 355 @type upper: list of lists of numbers 356 @keyword inc: The per-model increments for each dimension of the space for the grid search. The number of elements in the array must equal to the number of parameters in the model. 357 @type inc: list of lists of int 358 @keyword scaling_matrix: The per-model list of diagonal and square scaling matrices. 359 @type scaling_matrix: list of numpy rank-2, float64 array or list of None 360 @keyword constraints: If True, constraints are applied during the grid search (eliminating parts of the grid). If False, no constraints are used. 361 @type constraints: bool 362 @keyword verbosity: A flag specifying the amount of information to print. The higher the value, the greater the verbosity. 363 @type verbosity: int 364 @keyword sim_index: The Monte Carlo simulation index. 365 @type sim_index: None or int 366 """ 367 368 # Test if the Frame Order model has been set up. 369 if not hasattr(cdp, 'model'): 370 raise RelaxNoModelError('Frame Order') 371 372 # Test if the pivot has been set. 373 check_pivot() 374 375 # The number of parameters. 376 n = param_num() 377 378 # Alias the single model grid bounds and increments. 379 lower = lower[0] 380 upper = upper[0] 381 inc = inc[0] 382 383 # Initialise the grid increments structures. 384 grid = [] 385 """This structure is a list of lists. The first dimension corresponds to the model 386 parameter. The second dimension are the grid node positions.""" 387 388 # Generate the grid. 389 for i in range(n): 390 # Fixed parameter. 391 if inc[i] == None: 392 grid.append(None) 393 continue 394 395 # Reset. 396 dist_type = None 397 end_point = True 398 399 # Arccos grid from 0 to pi. 400 if cdp.params[i] in ['ave_pos_beta', 'eigen_beta', 'axis_theta']: 401 # Change the default increment numbers. 402 if not isinstance(inc, list): 403 inc[i] = int(inc[i] / 2) + 1 404 405 # The distribution type and end point. 406 dist_type = 'acos' 407 end_point = False 408 409 # Append the grid row. 410 row = grid_row(inc[i], lower[i], upper[i], dist_type=dist_type, end_point=end_point) 411 grid.append(row) 412 413 # Remove an inc if the end point has been removed. 414 if not end_point: 415 inc[i] -= 1 416 417 # Total number of points. 418 total_pts = 1 419 for i in range(n): 420 # Fixed parameter. 421 if grid[i] == None: 422 continue 423 424 total_pts = total_pts * len(grid[i]) 425 426 # Check the number. 427 max_pts = 50e6 428 if total_pts > max_pts: 429 raise RelaxError("The total number of grid points '%s' exceeds the maximum of '%s'." % (total_pts, int(max_pts))) 430 431 # Build the points array. 432 pts = zeros((total_pts, n), float64) 433 indices = zeros(n, int) 434 for i in range(total_pts): 435 # Loop over the dimensions. 436 for j in range(n): 437 # Fixed parameter. 438 if grid[j] == None: 439 # Get the current parameter value. 440 pts[i, j] = getattr(cdp, cdp.params[j]) / scaling_matrix[0][j, j] 441 442 # Add the point coordinate. 443 else: 444 pts[i, j] = grid[j][indices[j]] / scaling_matrix[0][j, j] 445 446 # Increment the step positions. 447 for j in range(n): 448 if inc[j] != None and indices[j] < inc[j]-1: 449 indices[j] += 1 450 break # Exit so that the other step numbers are not incremented. 451 else: 452 indices[j] = 0 453 454 # Linear constraints. 455 A, b = None, None 456 if constraints: 457 # Obtain the constraints. 458 A, b = linear_constraints(scaling_matrix=scaling_matrix[0]) 459 460 # Constraint flag set but no constraints present. 461 if A is None: 462 if verbosity: 463 warn(RelaxWarning("The '%s' model parameters are not constrained, turning the linear constraint algorithm off." % cdp.model)) 464 constraints = False 465 466 # The numeric integration information. 467 if not hasattr(cdp, 'quad_int'): 468 cdp.quad_int = False 469 sobol_max_points, sobol_oversample = None, None 470 if hasattr(cdp, 'sobol_max_points'): 471 sobol_max_points = cdp.sobol_max_points 472 sobol_oversample = cdp.sobol_oversample 473 474 # Set up the data structures for the target function. 475 param_vector, full_tensors, full_in_ref_frame, rdcs, rdc_err, rdc_weight, rdc_vect, rdc_const, pcs, pcs_err, pcs_weight, atomic_pos, temp, frq, paramag_centre, com, ave_pos_pivot, pivot, pivot_opt = target_fn_data_setup(sim_index=sim_index, verbosity=verbosity) 476 477 # Get the Processor box singleton (it contains the Processor instance) and alias the Processor. 478 processor_box = Processor_box() 479 processor = processor_box.processor 480 481 # Set up for multi-processor execution. 482 if processor.processor_size() > 1: 483 # Printout. 484 print("Parallelised grid search.") 485 print("Randomising the grid points to equalise the time required for each grid subdivision.\n") 486 487 # Randomise the points. 488 shuffle(pts) 489 490 # Loop over each grid subdivision, with all points violating constraints being eliminated. 491 for subdivision in grid_split_array(divisions=processor.processor_size(), points=pts, A=A, b=b, verbosity=verbosity): 492 # Set up the memo for storage on the master. 493 memo = Frame_order_memo(sim_index=sim_index, scaling_matrix=scaling_matrix[0]) 494 495 # Set up the command object to send to the slave and execute. 496 command = Frame_order_grid_command(points=subdivision, scaling_matrix=scaling_matrix[0], sim_index=sim_index, model=cdp.model, param_vector=param_vector, full_tensors=full_tensors, full_in_ref_frame=full_in_ref_frame, rdcs=rdcs, rdc_err=rdc_err, rdc_weight=rdc_weight, rdc_vect=rdc_vect, rdc_const=rdc_const, pcs=pcs, pcs_err=pcs_err, pcs_weight=pcs_weight, atomic_pos=atomic_pos, temp=temp, frq=frq, paramag_centre=paramag_centre, com=com, ave_pos_pivot=ave_pos_pivot, pivot=pivot, pivot_opt=pivot_opt, sobol_max_points=sobol_max_points, sobol_oversample=sobol_oversample, verbosity=verbosity, quad_int=cdp.quad_int) 497 498 # Add the slave command and memo to the processor queue. 499 processor.add_to_queue(command, memo) 500 501 # Execute the queued elements. 502 processor.run_queue()
503 504
505 - def map_bounds(self, param, spin_id=None):
506 """Create bounds for the OpenDX mapping function. 507 508 @param param: The name of the parameter to return the lower and upper bounds of. 509 @type param: str 510 @param spin_id: The spin identification string (unused). 511 @type spin_id: None 512 @return: The upper and lower bounds of the parameter. 513 @rtype: list of float 514 """ 515 516 # Average domain position. 517 if param in ['ave_pos_x', 'ave_pos_y', 'ave_pos_z']: 518 return [-100.0, 100] 519 if param in ['ave_pos_alpha', 'ave_pos_beta', 'ave_pos_gamma']: 520 return [0.0, 2*pi] 521 522 # Axis spherical coordinate theta. 523 if param == 'axis_theta': 524 return [0.0, pi] 525 526 # Axis spherical coordinate phi. 527 if param == 'axis_phi': 528 return [0.0, 2*pi] 529 530 # Axis alpha angle. 531 if param == 'axis_alpha': 532 return [0.0, 2*pi] 533 534 # Cone angle. 535 if param == 'cone_theta': 536 return [0.0, pi]
537 538
539 - def minimise(self, min_algor=None, min_options=None, func_tol=None, grad_tol=None, max_iterations=None, constraints=False, scaling_matrix=None, verbosity=0, sim_index=None, lower=None, upper=None, inc=None):
540 """Minimisation function. 541 542 @param min_algor: The minimisation algorithm to use. 543 @type min_algor: str 544 @param min_options: An array of options to be used by the minimisation algorithm. 545 @type min_options: array of str 546 @param func_tol: The function tolerance which, when reached, terminates optimisation. Setting this to None turns of the check. 547 @type func_tol: None or float 548 @param grad_tol: The gradient tolerance which, when reached, terminates optimisation. Setting this to None turns of the check. 549 @type grad_tol: None or float 550 @param max_iterations: The maximum number of iterations for the algorithm. 551 @type max_iterations: int 552 @param constraints: If True, constraints are used during optimisation. 553 @type constraints: bool 554 @keyword scaling_matrix: The per-model list of diagonal and square scaling matrices. 555 @type scaling_matrix: list of numpy rank-2, float64 array or list of None 556 @param verbosity: A flag specifying the amount of information to print. The higher the value, the greater the verbosity. 557 @type verbosity: int 558 @param sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired. 559 @type sim_index: None or int 560 @keyword lower: The per-model lower bounds of the grid search which must be equal to the number of parameters in the model. This optional argument is only used when doing a grid search. 561 @type lower: list of lists of numbers 562 @keyword upper: The per-model upper bounds of the grid search which must be equal to the number of parameters in the model. This optional argument is only used when doing a grid search. 563 @type upper: list of lists of numbers 564 @keyword inc: The per-model increments for each dimension of the space for the grid search. The number of elements in the array must equal to the number of parameters in the model. This argument is only used when doing a grid search. 565 @type inc: list of lists of int 566 """ 567 568 # Check the optimisation algorithm. 569 algor = min_algor 570 if min_algor == 'Log barrier': 571 algor = min_options[0] 572 allowed = ['simplex'] 573 if algor not in allowed: 574 raise RelaxError("Only the 'simplex' minimisation algorithm is supported for the relaxation dispersion analysis as function gradients are not implemented.") 575 576 # Set up the data structures for the target function. 577 param_vector, full_tensors, full_in_ref_frame, rdcs, rdc_err, rdc_weight, rdc_vect, rdc_const, pcs, pcs_err, pcs_weight, atomic_pos, temp, frq, paramag_centre, com, ave_pos_pivot, pivot, pivot_opt = target_fn_data_setup(sim_index=sim_index, verbosity=verbosity, unset_fail=True) 578 579 # The numeric integration information. 580 if not hasattr(cdp, 'quad_int'): 581 cdp.quad_int = False 582 sobol_max_points, sobol_oversample = None, None 583 if hasattr(cdp, 'sobol_max_points'): 584 sobol_max_points = cdp.sobol_max_points 585 sobol_oversample = cdp.sobol_oversample 586 587 # Get the Processor box singleton (it contains the Processor instance) and alias the Processor. 588 processor_box = Processor_box() 589 processor = processor_box.processor 590 591 # Set up the memo for storage on the master. 592 memo = Frame_order_memo(sim_index=sim_index, scaling_matrix=scaling_matrix[0]) 593 594 # Set up the command object to send to the slave and execute. 595 command = Frame_order_minimise_command(min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, max_iterations=max_iterations, scaling_matrix=scaling_matrix[0], constraints=constraints, sim_index=sim_index, model=cdp.model, param_vector=param_vector, full_tensors=full_tensors, full_in_ref_frame=full_in_ref_frame, rdcs=rdcs, rdc_err=rdc_err, rdc_weight=rdc_weight, rdc_vect=rdc_vect, rdc_const=rdc_const, pcs=pcs, pcs_err=pcs_err, pcs_weight=pcs_weight, atomic_pos=atomic_pos, temp=temp, frq=frq, paramag_centre=paramag_centre, com=com, ave_pos_pivot=ave_pos_pivot, pivot=pivot, pivot_opt=pivot_opt, sobol_max_points=sobol_max_points, sobol_oversample=sobol_oversample, verbosity=verbosity, quad_int=cdp.quad_int) 596 597 # Add the slave command and memo to the processor queue. 598 processor.add_to_queue(command, memo)
599 600
601 - def model_desc(self, model_info=None):
602 """Return a description of the model. 603 604 @keyword model_info: The model information from model_loop(). This is unused. 605 @type model_info: None 606 @return: The model description. 607 @rtype: str 608 """ 609 610 return ""
611 612
613 - def model_statistics(self, model_info=None, spin_id=None, global_stats=None):
614 """Return the k, n, and chi2 model statistics. 615 616 k - number of parameters. 617 n - number of data points. 618 chi2 - the chi-squared value. 619 620 621 @keyword model_info: The model information from model_loop(). This is unused. 622 @type model_info: None 623 @keyword spin_id: Unused. 624 @type spin_id: None 625 @keyword global_stats: Unused. 626 @type global_stats: None 627 @return: The optimisation statistics, in tuple format, of the number of parameters (k), the number of data points (n), and the chi-squared value (chi2). 628 @rtype: tuple of (int, int, float) 629 """ 630 631 # Count the number of parameters. 632 k = len(cdp.params) 633 634 # The number of data points (RDCs + PCSs). 635 n = 0 636 for data in self.base_data_loop(): 637 n += 1 638 639 # Check for the chi2 value. 640 if not hasattr(cdp, 'chi2'): 641 raise RelaxError("Statistics are not available, most likely because the model has not been optimised.") 642 643 # Return the data. 644 return k, n, cdp.chi2
645 646
647 - def overfit_deselect(self, data_check=True, verbose=True):
648 """Deselect spins which have insufficient data to support minimisation. 649 650 @keyword data_check: A flag to signal if the presence of base data is to be checked for. 651 @type data_check: bool 652 @keyword verbose: A flag which if True will allow printouts. 653 @type verbose: bool 654 """ 655 656 # Nothing to do. 657 if not data_check: 658 return 659 660 # Loop over spin data, checking for PCS data. 661 ids = [] 662 for spin, spin_id in spin_loop(return_id=True, skip_desel=True): 663 if not hasattr(spin, 'pcs'): 664 spin.select = False 665 ids.append(spin_id) 666 if verbose and len(ids): 667 warn(RelaxWarning("No PCS data is present, deselecting the spins %s." % ids)) 668 669 # Loop over the interatomic data containers, checking for RDC data. 670 ids = [] 671 for interatom in interatomic_loop(selection1=domain_moving()): 672 if not hasattr(interatom, 'rdc'): 673 interatom.select = False 674 ids.append("%s - %s" % (interatom.spin_id1, interatom.spin_id2)) 675 if verbose and len(ids): 676 warn(RelaxWarning("No RDC data is present, deselecting the interatomic data containers between spin pairs %s." % ids))
677 678
679 - def return_error(self, data_id):
680 """Return the RDC or PCS error structure. 681 682 @param data_id: The data set as yielded by the base_data_loop() generator method. 683 @type data_id: list of str 684 @return: The array of RDC or PCS error values. 685 @rtype: list of float 686 """ 687 688 # Initialise the MC data structure. 689 mc_errors = [] 690 691 # The RDC data. 692 if data_id[0] == 'rdc': 693 # Unpack the set. 694 data_type, spin_hash1, spin_hash2, align_id = data_id 695 696 # Get the interatomic data container. 697 interatom = return_interatom(spin_hash1=spin_hash1, spin_hash2=spin_hash2) 698 699 # Do errors exist? 700 if not hasattr(interatom, 'rdc_err'): 701 raise RelaxError("The RDC errors are missing for interatomic data container between spins '%s' and '%s'." % (spin_id1, spin_id2)) 702 703 # Handle missing data. 704 if align_id not in interatom.rdc_err: 705 mc_errors.append(None) 706 707 # Append the data. 708 else: 709 mc_errors.append(interatom.rdc_err[align_id]) 710 711 # The PCS data. 712 elif data_id[0] == 'pcs': 713 # Unpack the set. 714 data_type, spin_id, align_id = data_id 715 716 # Get the spin container. 717 spin = return_spin(spin_id=spin_id) 718 719 # Do errors exist? 720 if not hasattr(spin, 'pcs_err'): 721 raise RelaxError("The PCS errors are missing for spin '%s'." % spin_id) 722 723 # Handle missing data. 724 if align_id not in spin.pcs_err: 725 mc_errors.append(None) 726 727 # Append the data. 728 else: 729 mc_errors.append(spin.pcs_err[align_id]) 730 731 # Return the errors. 732 return mc_errors
733 734
735 - def set_error(self, index, error, model_info=None):
736 """Set the parameter errors. 737 738 @param index: The index of the parameter to set the errors for. 739 @type index: int 740 @param error: The error value. 741 @type error: float 742 @keyword model_info: The model information from model_loop(). This is unused. 743 @type model_info: None 744 """ 745 746 # Parameter increment counter. 747 inc = 0 748 749 # Loop over the residue specific parameters. 750 for param in self.data_names(set='params'): 751 # Not a parameter of the model. 752 if param not in cdp.params: 753 continue 754 755 # Return the parameter array. 756 if index == inc: 757 setattr(cdp, param + "_err", error) 758 759 # Increment. 760 inc = inc + 1 761 762 # Add some additional parameters. 763 if cdp.model == MODEL_ISO_CONE_FREE_ROTOR and inc == index: 764 setattr(cdp, 'cone_theta_err', error)
765 766
767 - def set_selected_sim(self, select_sim, model_info=None):
768 """Set the simulation selection flag for the spin. 769 770 @param select_sim: The selection flag for the simulations. 771 @type select_sim: bool 772 @keyword model_info: The model information from model_loop(). This is unused. 773 @type model_info: None 774 """ 775 776 # Set the array. 777 cdp.select_sim = deepcopy(select_sim)
778 779
780 - def sim_init_values(self):
781 """Initialise the Monte Carlo parameter values.""" 782 783 # Get the parameter object names. 784 param_names = self.data_names(set='params') 785 786 # The model parameters. 787 model_params = deepcopy(cdp.params) 788 789 # Add some additional parameters. 790 if cdp.model == MODEL_ISO_CONE_FREE_ROTOR: 791 param_names.append('cone_theta') 792 model_params.append('cone_theta') 793 794 # Get the minimisation statistic object names. 795 min_names = self.data_names(set='min') 796 797 # Loop over all the data names. 798 for object_name in param_names: 799 # Not a parameter of the model. 800 if object_name not in model_params: 801 continue 802 803 # Name for the simulation object. 804 sim_object_name = object_name + '_sim' 805 806 # Create the simulation object. 807 setattr(cdp, sim_object_name, []) 808 809 # Get the simulation object. 810 sim_object = getattr(cdp, sim_object_name) 811 812 # Loop over the simulations. 813 for j in range(cdp.sim_number): 814 # Copy and append the data. 815 sim_object.append(deepcopy(getattr(cdp, object_name))) 816 817 # Loop over all the minimisation object names. 818 for object_name in min_names: 819 # Name for the simulation object. 820 sim_object_name = object_name + '_sim' 821 822 # Create the simulation object. 823 setattr(cdp, sim_object_name, []) 824 825 # Get the normal and simulation object. 826 object = None 827 if hasattr(cdp, object_name): 828 object = getattr(cdp, object_name) 829 sim_object = getattr(cdp, sim_object_name) 830 831 # Loop over the simulations. 832 for j in range(cdp.sim_number): 833 # Copy and append the data. 834 sim_object.append(deepcopy(object))
835 836
837 - def sim_pack_data(self, data_id, sim_data):
838 """Pack the Monte Carlo simulation data. 839 840 @param data_id: The data set as yielded by the base_data_loop() generator method. 841 @type data_id: list of str 842 @param sim_data: The Monte Carlo simulation data. 843 @type sim_data: list of float 844 """ 845 846 # The RDC data. 847 if data_id[0] == 'rdc': 848 # Unpack the set. 849 data_type, spin_hash1, spin_hash2, align_id = data_id 850 851 # Get the interatomic data container. 852 interatom = return_interatom(spin_hash1=spin_hash1, spin_hash2=spin_hash2) 853 854 # Initialise. 855 if not hasattr(interatom, 'rdc_sim'): 856 interatom.rdc_sim = {} 857 858 # Store the data structure. 859 interatom.rdc_sim[align_id] = [] 860 for i in range(cdp.sim_number): 861 interatom.rdc_sim[align_id].append(sim_data[i][0]) 862 863 # The PCS data. 864 elif data_id[0] == 'pcs': 865 # Unpack the set. 866 data_type, spin_id, align_id = data_id 867 868 # Get the spin container. 869 spin = return_spin(spin_id=spin_id) 870 871 # Initialise. 872 if not hasattr(spin, 'pcs_sim'): 873 spin.pcs_sim = {} 874 875 # Store the data structure. 876 spin.pcs_sim[data_id[2]] = [] 877 for i in range(cdp.sim_number): 878 spin.pcs_sim[data_id[2]].append(sim_data[i][0])
879 880
881 - def sim_return_param(self, index, model_info=None):
882 """Return the array of simulation parameter values. 883 884 @param index: The index of the parameter to return the array of values for. 885 @type index: int 886 @keyword model_info: The model information from model_loop(). This is unused. 887 @type model_info: None 888 @return: The array of simulation parameter values. 889 @rtype: list of float 890 """ 891 892 # Parameter increment counter. 893 inc = 0 894 895 # Get the parameter object names. 896 param_names = self.data_names(set='params') 897 898 # Loop over the parameters. 899 for param in param_names: 900 # Not a parameter of the model. 901 if param not in cdp.params: 902 continue 903 904 # Return the parameter array. 905 if index == inc: 906 return getattr(cdp, param + "_sim") 907 908 # Increment. 909 inc = inc + 1 910 911 # Add some additional parameters. 912 if cdp.model == MODEL_ISO_CONE_FREE_ROTOR and inc == index: 913 return getattr(cdp, 'cone_theta_sim')
914 915
916 - def sim_return_selected(self, model_info=None):
917 """Return the array of selected simulation flags for the spin. 918 919 @keyword model_info: The model information from model_loop(). This is unused. 920 @type model_info: None 921 @return: The array of selected simulation flags. 922 @rtype: list of int 923 """ 924 925 # Return the array. 926 return cdp.select_sim
927