Package specific_analyses :: Package n_state_model :: Module api
[hide private]
[frames] | no frames]

Source Code for Module specific_analyses.n_state_model.api

  1  ############################################################################### 
  2  #                                                                             # 
  3  # Copyright (C) 2007-2014 Edward d'Auvergne                                   # 
  4  #                                                                             # 
  5  # This file is part of the program relax (http://www.nmr-relax.com).          # 
  6  #                                                                             # 
  7  # This program is free software: you can redistribute it and/or modify        # 
  8  # it under the terms of the GNU General Public License as published by        # 
  9  # the Free Software Foundation, either version 3 of the License, or           # 
 10  # (at your option) any later version.                                         # 
 11  #                                                                             # 
 12  # This program is distributed in the hope that it will be useful,             # 
 13  # but WITHOUT ANY WARRANTY; without even the implied warranty of              # 
 14  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the               # 
 15  # GNU General Public License for more details.                                # 
 16  #                                                                             # 
 17  # You should have received a copy of the GNU General Public License           # 
 18  # along with this program.  If not, see <http://www.gnu.org/licenses/>.       # 
 19  #                                                                             # 
 20  ############################################################################### 
 21   
 22  # Module docstring. 
 23  """The N-state model or structural ensemble analysis.""" 
 24   
 25  # Python module imports. 
 26  from copy import deepcopy 
 27  from math import pi 
 28  from minfx.generic import generic_minimise 
 29  from minfx.grid import grid 
 30  from numpy import dot, float64, zeros 
 31  from re import search 
 32  from warnings import warn 
 33   
 34  # relax module imports. 
 35  import lib.arg_check 
 36  from lib.errors import RelaxError, RelaxInfError, RelaxNaNError, RelaxNoModelError 
 37  from lib.float import isNaN, isInf 
 38  from lib.optimisation import test_grid_ops 
 39  from lib.warnings import RelaxWarning 
 40  from pipe_control import align_tensor, pcs, rdc 
 41  from pipe_control.align_tensor import opt_uses_tensor 
 42  from pipe_control.interatomic import interatomic_loop 
 43  from pipe_control.mol_res_spin import spin_loop 
 44  from specific_analyses.api_base import API_base 
 45  from specific_analyses.api_common import API_common 
 46  from specific_analyses.n_state_model.data import base_data_types, calc_ave_dist, num_data_points 
 47  from specific_analyses.n_state_model.optimisation import minimise_bc_data, target_fn_setup 
 48  from specific_analyses.n_state_model.parameter_object import N_state_params 
 49  from specific_analyses.n_state_model.parameters import disassemble_param_vector, linear_constraints, param_num 
 50  from target_functions.potential import quad_pot 
 51   
 52   
53 -class N_state_model(API_base, API_common):
54 """Class containing functions for the N-state model.""" 55 56 # Class variable for storing the class instance (for the singleton design pattern). 57 instance = None 58
59 - def __init__(self):
60 """Initialise the class by placing API_common methods into the API.""" 61 62 # Place methods into the API. 63 self.model_loop = self._model_loop_single_global 64 self.overfit_deselect = self._overfit_deselect_dummy 65 self.set_selected_sim = self._set_selected_sim_global 66 self.sim_return_selected = self._sim_return_selected_global 67 68 # Place a copy of the parameter list object in the instance namespace. 69 self._PARAMS = N_state_params()
70 71
72 - def base_data_loop(self):
73 """Loop over the base data of the spins - RDCs, PCSs, and NOESY data. 74 75 This loop iterates for each data point (RDC, PCS, NOESY) for each spin or interatomic data container, returning the identification information. 76 77 @return: A list of the spin or interatomic data container, the data type ('rdc', 'pcs', 'noesy'), and the alignment ID if required. 78 @rtype: list of [SpinContainer instance, str, str] or [InteratomContainer instance, str, str] 79 """ 80 81 # Loop over the interatomic data containers. 82 for interatom in interatomic_loop(): 83 # Skip deselected data. 84 if not interatom.select: 85 continue 86 87 # Re-initialise the data structure. 88 data = [interatom, None, None] 89 90 # RDC data. 91 if hasattr(interatom, 'rdc'): 92 data[1] = 'rdc' 93 94 # Loop over the alignment IDs. 95 for id in cdp.rdc_ids: 96 # Add the ID. 97 data[2] = id 98 99 # Yield the set. 100 yield data 101 102 # NOESY data. 103 if hasattr(interatom, 'noesy'): 104 data[1] = 'noesy' 105 106 # Loop over the alignment IDs. 107 for id in cdp.noesy_ids: 108 # Add the ID. 109 data[2] = id 110 111 # Yield the set. 112 yield data 113 114 # Loop over the spins. 115 for spin in spin_loop(): 116 # Skip deselected data. 117 if not spin.select: 118 continue 119 120 # Re-initialise the data structure. 121 data = [spin, None, None] 122 123 # PCS data. 124 if hasattr(spin, 'pcs'): 125 data[1] = 'pcs' 126 127 # Loop over the alignment IDs. 128 for id in cdp.pcs_ids: 129 # Add the ID. 130 data[2] = id 131 132 # Yield the set. 133 yield data
134 135
136 - def calculate(self, spin_id=None, verbosity=1, sim_index=None):
137 """Calculation function. 138 139 Currently this function simply calculates the NOESY flat-bottom quadratic energy potential, 140 if NOE restraints are available. 141 142 @keyword spin_id: The spin identification string (unused). 143 @type spin_id: None or str 144 @keyword verbosity: The amount of information to print. The higher the value, the greater the verbosity. 145 @type verbosity: int 146 @keyword sim_index: The MC simulation index (unused). 147 @type sim_index: None 148 """ 149 150 # Set up the target function for direct calculation. 151 model, param_vector, data_types, scaling_matrix = target_fn_setup() 152 153 # Calculate the chi-squared value. 154 if model: 155 # Make a function call. 156 chi2 = model.func(param_vector) 157 158 # Store the global chi-squared value. 159 cdp.chi2 = chi2 160 161 # Store the back-calculated data. 162 minimise_bc_data(model) 163 164 # Calculate the RDC Q factors. 165 if 'rdc' in data_types: 166 rdc.q_factors() 167 168 # Calculate the PCS Q factors. 169 if 'pcs' in data_types: 170 pcs.q_factors() 171 172 # NOE potential. 173 if hasattr(cdp, 'noe_restraints'): 174 # Init some numpy arrays. 175 num_restraints = len(cdp.noe_restraints) 176 dist = zeros(num_restraints, float64) 177 pot = zeros(num_restraints, float64) 178 lower = zeros(num_restraints, float64) 179 upper = zeros(num_restraints, float64) 180 181 # Loop over the NOEs. 182 for i in range(num_restraints): 183 # Create arrays of the NOEs. 184 lower[i] = cdp.noe_restraints[i][2] 185 upper[i] = cdp.noe_restraints[i][3] 186 187 # Calculate the average distances, using -6 power averaging. 188 dist[i] = calc_ave_dist(cdp.noe_restraints[i][0], cdp.noe_restraints[i][1], exp=-6) 189 190 # Calculate the quadratic potential. 191 quad_pot(dist, pot, lower, upper) 192 193 # Store the distance and potential information. 194 cdp.ave_dist = [] 195 cdp.quad_pot = [] 196 for i in range(num_restraints): 197 cdp.ave_dist.append([cdp.noe_restraints[i][0], cdp.noe_restraints[i][1], dist[i]]) 198 cdp.quad_pot.append([cdp.noe_restraints[i][0], cdp.noe_restraints[i][1], pot[i]])
199 200
201 - def create_mc_data(self, data_id=None):
202 """Create the Monte Carlo data by back-calculation. 203 204 @keyword data_id: The list of spin ID, data type, and alignment ID, as yielded by the base_data_loop() generator method. 205 @type data_id: str 206 @return: The Monte Carlo Ri data. 207 @rtype: list of floats 208 """ 209 210 # Initialise the MC data structure. 211 mc_data = [] 212 213 # Alias the spin or interatomic data container. 214 container = data_id[0] 215 216 # RDC data. 217 if data_id[1] == 'rdc' and hasattr(container, 'rdc'): 218 # Does back-calculated data exist? 219 if not hasattr(container, 'rdc_bc'): 220 self.calculate() 221 222 # The data. 223 if not hasattr(container, 'rdc_bc') or not data_id[2] in container.rdc_bc: 224 data = None 225 else: 226 data = container.rdc_bc[data_id[2]] 227 228 # Append the data. 229 mc_data.append(data) 230 231 # NOESY data. 232 elif data_id[1] == 'noesy' and hasattr(container, 'noesy'): 233 # Does back-calculated data exist? 234 if not hasattr(container, 'noesy_bc'): 235 self.calculate() 236 237 # Append the data. 238 mc_data.append(container.noesy_bc) 239 240 # PCS data. 241 elif data_id[1] == 'pcs' and hasattr(container, 'pcs'): 242 # Does back-calculated data exist? 243 if not hasattr(container, 'pcs_bc'): 244 self.calculate() 245 246 # The data. 247 if not hasattr(container, 'pcs_bc') or not data_id[2] in container.pcs_bc: 248 data = None 249 else: 250 data = container.pcs_bc[data_id[2]] 251 252 # Append the data. 253 mc_data.append(data) 254 255 # Return the data. 256 return mc_data
257 258
259 - def grid_search(self, lower=None, upper=None, inc=None, constraints=False, verbosity=0, sim_index=None):
260 """The grid search function. 261 262 @param lower: The lower bounds of the grid search which must be equal to the number of parameters in the model. 263 @type lower: array of numbers 264 @param upper: The upper bounds of the grid search which must be equal to the number of parameters in the model. 265 @type upper: array of numbers 266 @param inc: The increments for each dimension of the space for the grid search. The number of elements in the array must equal to the number of parameters in the model. 267 @type inc: array of int 268 @param constraints: If True, constraints are applied during the grid search (elinating parts of the grid). If False, no constraints are used. 269 @type constraints: bool 270 @param verbosity: A flag specifying the amount of information to print. The higher the value, the greater the verbosity. 271 @type verbosity: int 272 """ 273 274 # Test if the N-state model has been set up. 275 if not hasattr(cdp, 'model'): 276 raise RelaxNoModelError('N-state') 277 278 # The number of parameters. 279 n = param_num() 280 281 # Make sure that the length of the parameter array is > 0. 282 if n == 0: 283 print("Cannot run a grid search on a model with zero parameters, skipping the grid search.") 284 return 285 286 # Test the grid search options. 287 test_grid_ops(lower=lower, upper=upper, inc=inc, n=n) 288 289 # If inc is a single int, convert it into an array of that value. 290 if isinstance(inc, int): 291 inc = [inc]*n 292 293 # Setup the default bounds. 294 if not lower: 295 # Init. 296 lower = [] 297 upper = [] 298 299 # Loop over the parameters. 300 for i in range(n): 301 # i is in the parameter array. 302 if i < len(cdp.params): 303 # Probabilities (default values). 304 if search('^p', cdp.params[i]): 305 lower.append(0.0) 306 upper.append(1.0) 307 308 # Angles (default values). 309 if search('^alpha', cdp.params[i]) or search('^gamma', cdp.params[i]): 310 lower.append(0.0) 311 upper.append(2*pi) 312 elif search('^beta', cdp.params[i]): 313 lower.append(0.0) 314 upper.append(pi) 315 316 # The paramagnetic centre. 317 elif hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed and (n - i) <= 3: 318 lower.append(-100) 319 upper.append(100) 320 321 # Otherwise this must be an alignment tensor component. 322 else: 323 lower.append(-1e-3) 324 upper.append(1e-3) 325 326 # Determine the data type. 327 data_types = base_data_types() 328 329 # The number of tensors to optimise. 330 tensor_num = align_tensor.num_tensors(skip_fixed=True) 331 332 # Custom sub-grid search for when only tensors are optimised (as each tensor is independent, the number of points collapses from inc**(5*N) to N*inc**5). 333 if cdp.model == 'fixed' and tensor_num > 1 and ('rdc' in data_types or 'pcs' in data_types) and not align_tensor.all_tensors_fixed() and hasattr(cdp, 'paramag_centre_fixed') and cdp.paramag_centre_fixed: 334 # Print out. 335 print("Optimising each alignment tensor separately.") 336 337 # Store the alignment tensor fixed flags. 338 fixed_flags = [] 339 for i in range(len(cdp.align_ids)): 340 # Get the tensor object. 341 tensor = align_tensor.return_tensor(index=i, skip_fixed=False) 342 343 # Store the flag. 344 fixed_flags.append(tensor.fixed) 345 346 # Fix the tensor. 347 tensor.set('fixed', True) 348 349 # Loop over each sub-grid. 350 for i in range(len(cdp.align_ids)): 351 # Skip the tensor if originally fixed. 352 if fixed_flags[i]: 353 continue 354 355 # Get the tensor object. 356 tensor = align_tensor.return_tensor(index=i, skip_fixed=False) 357 358 # Unfix the current tensor. 359 tensor.set('fixed', False) 360 361 # Grid search parameter subsets. 362 lower_sub = lower[i*5:i*5+5] 363 upper_sub = upper[i*5:i*5+5] 364 inc_sub = inc[i*5:i*5+5] 365 366 # Minimisation of the sub-grid. 367 self.minimise(min_algor='grid', lower=lower_sub, upper=upper_sub, inc=inc_sub, constraints=constraints, verbosity=verbosity, sim_index=sim_index) 368 369 # Fix the tensor again. 370 tensor.set('fixed', True) 371 372 # Reset the state of the tensors. 373 for i in range(len(cdp.align_ids)): 374 # Get the tensor object. 375 tensor = align_tensor.return_tensor(index=i, skip_fixed=False) 376 377 # Fix the tensor. 378 tensor.set('fixed', fixed_flags[i]) 379 380 # All other minimisation. 381 else: 382 self.minimise(min_algor='grid', lower=lower, upper=upper, inc=inc, constraints=constraints, verbosity=verbosity, sim_index=sim_index)
383 384
385 - def is_spin_param(self, name):
386 """Determine whether the given parameter is spin specific. 387 388 @param name: The name of the parameter. 389 @type name: str 390 @return: False 391 @rtype: bool 392 """ 393 394 # Spin specific parameters. 395 if name in ['r', 'heteronuc_type', 'proton_type']: 396 return True 397 398 # All other parameters are global. 399 return False
400 401
402 - def map_bounds(self, param, spin_id=None):
403 """Create bounds for the OpenDX mapping function. 404 405 @param param: The name of the parameter to return the lower and upper bounds of. 406 @type param: str 407 @param spin_id: The spin identification string (unused). 408 @type spin_id: None 409 @return: The upper and lower bounds of the parameter. 410 @rtype: list of float 411 """ 412 413 # Paramagnetic centre. 414 if search('^paramag_[xyz]$', param): 415 return [-100.0, 100.0]
416 417
418 - def minimise(self, min_algor=None, min_options=None, func_tol=None, grad_tol=None, max_iterations=None, constraints=False, scaling=True, verbosity=0, sim_index=None, lower=None, upper=None, inc=None):
419 """Minimisation function. 420 421 @param min_algor: The minimisation algorithm to use. 422 @type min_algor: str 423 @param min_options: An array of options to be used by the minimisation algorithm. 424 @type min_options: array of str 425 @param func_tol: The function tolerance which, when reached, terminates optimisation. Setting this to None turns of the check. 426 @type func_tol: None or float 427 @param grad_tol: The gradient tolerance which, when reached, terminates optimisation. Setting this to None turns of the check. 428 @type grad_tol: None or float 429 @param max_iterations: The maximum number of iterations for the algorithm. 430 @type max_iterations: int 431 @param constraints: If True, constraints are used during optimisation. 432 @type constraints: bool 433 @param scaling: If True, diagonal scaling is enabled during optimisation to allow the problem to be better conditioned. 434 @type scaling: bool 435 @param verbosity: A flag specifying the amount of information to print. The higher the value, the greater the verbosity. 436 @type verbosity: int 437 @param sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired. 438 @type sim_index: None or int 439 @keyword lower: The lower bounds of the grid search which must be equal to the number of parameters in the model. This optional argument is only used when doing a grid search. 440 @type lower: array of numbers 441 @keyword upper: The upper bounds of the grid search which must be equal to the number of parameters in the model. This optional argument is only used when doing a grid search. 442 @type upper: array of numbers 443 @keyword inc: The increments for each dimension of the space for the grid search. The number of elements in the array must equal to the number of parameters in the model. This argument is only used when doing a grid search. 444 @type inc: array of int 445 """ 446 447 # Set up the target function for direct calculation. 448 model, param_vector, data_types, scaling_matrix = target_fn_setup(sim_index=sim_index, scaling=scaling) 449 450 # Nothing to do! 451 if not len(param_vector): 452 warn(RelaxWarning("The model has no parameters, minimisation cannot be performed.")) 453 return 454 455 # Right, constraints cannot be used for the 'fixed' model. 456 if constraints and cdp.model == 'fixed': 457 warn(RelaxWarning("Turning constraints off. These cannot be used for the 'fixed' model.")) 458 constraints = False 459 460 # Pop out the Method of Multipliers algorithm. 461 if min_algor == 'Method of Multipliers': 462 min_algor = min_options[0] 463 min_options = min_options[1:] 464 465 # And constraints absolutely must be used for the 'population' model. 466 if not constraints and cdp.model == 'population': 467 warn(RelaxWarning("Turning constraints on. These absolutely must be used for the 'population' model.")) 468 constraints = True 469 470 # Add the Method of Multipliers algorithm. 471 min_options = (min_algor,) + min_options 472 min_algor = 'Method of Multipliers' 473 474 # Disallow Newton optimisation and other Hessian optimisers for the paramagnetic centre position optimisation (the PCS Hessian is not yet implemented). 475 if hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed: 476 if min_algor in ['newton']: 477 raise RelaxError("For the paramagnetic centre position, as the Hessians are not yet implemented Newton optimisation cannot be performed.") 478 479 # Linear constraints. 480 if constraints: 481 A, b = linear_constraints(data_types=data_types, scaling_matrix=scaling_matrix) 482 else: 483 A, b = None, None 484 485 # Grid search. 486 if search('^[Gg]rid', min_algor): 487 # Scaling. 488 if scaling: 489 for i in range(len(param_vector)): 490 lower[i] = lower[i] / scaling_matrix[i, i] 491 upper[i] = upper[i] / scaling_matrix[i, i] 492 493 # The search. 494 results = grid(func=model.func, args=(), num_incs=inc, lower=lower, upper=upper, A=A, b=b, verbosity=verbosity) 495 496 # Unpack the results. 497 param_vector, func, iter_count, warning = results 498 f_count = iter_count 499 g_count = 0.0 500 h_count = 0.0 501 502 # Minimisation. 503 else: 504 results = generic_minimise(func=model.func, dfunc=model.dfunc, d2func=model.d2func, args=(), x0=param_vector, min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, maxiter=max_iterations, A=A, b=b, full_output=1, print_flag=verbosity) 505 506 # Unpack the results. 507 if results == None: 508 return 509 param_vector, func, iter_count, f_count, g_count, h_count, warning = results 510 511 # Catch infinite chi-squared values. 512 if isInf(func): 513 raise RelaxInfError('chi-squared') 514 515 # Catch chi-squared values of NaN. 516 if isNaN(func): 517 raise RelaxNaNError('chi-squared') 518 519 # Make a last function call to update the back-calculated RDC and PCS structures to the optimal values. 520 chi2 = model.func(param_vector) 521 522 # Scaling. 523 if scaling: 524 param_vector = dot(scaling_matrix, param_vector) 525 526 # Disassemble the parameter vector. 527 disassemble_param_vector(param_vector=param_vector, data_types=data_types, sim_index=sim_index) 528 529 # Monte Carlo minimisation statistics. 530 if sim_index != None: 531 # Chi-squared statistic. 532 cdp.chi2_sim[sim_index] = func 533 534 # Iterations. 535 cdp.iter_sim[sim_index] = iter_count 536 537 # Function evaluations. 538 cdp.f_count_sim[sim_index] = f_count 539 540 # Gradient evaluations. 541 cdp.g_count_sim[sim_index] = g_count 542 543 # Hessian evaluations. 544 cdp.h_count_sim[sim_index] = h_count 545 546 # Warning. 547 cdp.warning_sim[sim_index] = warning 548 549 # Normal statistics. 550 else: 551 # Chi-squared statistic. 552 cdp.chi2 = func 553 554 # Iterations. 555 cdp.iter = iter_count 556 557 # Function evaluations. 558 cdp.f_count = f_count 559 560 # Gradient evaluations. 561 cdp.g_count = g_count 562 563 # Hessian evaluations. 564 cdp.h_count = h_count 565 566 # Warning. 567 cdp.warning = warning 568 569 # Statistical analysis. 570 if sim_index == None and ('rdc' in data_types or 'pcs' in data_types): 571 # Get the final back calculated data (for the Q factor and 572 minimise_bc_data(model) 573 574 # Calculate the RDC Q factors. 575 if 'rdc' in data_types: 576 rdc.q_factors() 577 578 # Calculate the PCS Q factors. 579 if 'pcs' in data_types: 580 pcs.q_factors()
581 582
583 - def model_statistics(self, model_info=None, spin_id=None, global_stats=None):
584 """Return the k, n, and chi2 model statistics. 585 586 k - number of parameters. 587 n - number of data points. 588 chi2 - the chi-squared value. 589 590 591 @keyword model_info: The data returned from model_loop() (unused). 592 @type model_info: None 593 @keyword spin_id: The spin identification string. This is ignored in the N-state model. 594 @type spin_id: None or str 595 @keyword global_stats: A parameter which determines if global or local statistics are returned. For the N-state model, this argument is ignored. 596 @type global_stats: None or bool 597 @return: The optimisation statistics, in tuple format, of the number of parameters (k), the number of data points (n), and the chi-squared value (chi2). 598 @rtype: tuple of (int, int, float) 599 """ 600 601 # Return the values. 602 return param_num(), num_data_points(), cdp.chi2
603 604
605 - def return_data(self, data_id):
606 """Return the base data for the given data ID. 607 608 @keyword data_id: The list of spin ID, data type, and alignment ID, as yielded by the base_data_loop() generator method. 609 @type data_id: list of str 610 @return: The base data. 611 @rtype: list of (float or None) 612 """ 613 614 # Alias the spin or interatomic data container, data type and alignment ID. 615 container = data_id[0] 616 data_type = data_id[1] 617 align_id = data_id[2] 618 619 # The data structure to return. 620 data = [] 621 622 # Skip deselected spins. 623 if data_id[1] == 'pcs' and not container.select: 624 return 625 626 # Return the RDC data. 627 if data_type == 'rdc' and hasattr(container, 'rdc'): 628 if align_id not in container.rdc: 629 data.append(None) 630 else: 631 data.append(container.rdc[align_id]) 632 633 # Return the NOESY data. 634 elif data_type == 'noesy' and hasattr(container, 'noesy'): 635 data.append(container.noesy) 636 637 # Return the PCS data. 638 elif data_id[1] == 'pcs' and hasattr(container, 'pcs'): 639 if align_id not in container.pcs: 640 data.append(None) 641 else: 642 data.append(container.pcs[align_id]) 643 644 # Return the data. 645 return data
646 647
648 - def return_error(self, data_id=None):
649 """Create and return the spin specific Monte Carlo Ri error structure. 650 651 @keyword data_id: The list of spin ID, data type, and alignment ID, as yielded by the base_data_loop() generator method. 652 @type data_id: str 653 @return: The Monte Carlo simulation data errors. 654 @rtype: list of float 655 """ 656 657 # Initialise the MC data structure. 658 mc_errors = [] 659 660 # Alias the spin or interatomic data container. 661 container = data_id[0] 662 663 # Skip deselected spins. 664 if data_id[1] == 'pcs' and not container.select: 665 return 666 667 # RDC data. 668 if data_id[1] == 'rdc' and hasattr(container, 'rdc'): 669 # Do errors exist? 670 if not hasattr(container, 'rdc_err'): 671 raise RelaxError("The RDC errors are missing for the spin pair '%s' and '%s'." % (container.spin_id1, container.spin_id2)) 672 673 # The error. 674 if data_id[2] not in container.rdc_err: 675 err = None 676 else: 677 err = container.rdc_err[data_id[2]] 678 679 # Append the data. 680 mc_errors.append(err) 681 682 # NOESY data. 683 elif data_id[1] == 'noesy' and hasattr(container, 'noesy'): 684 # Do errors exist? 685 if not hasattr(container, 'noesy_err'): 686 raise RelaxError("The NOESY errors are missing for the spin pair '%s' and '%s'." % (container.spin_id1, container.spin_id2)) 687 688 # Append the data. 689 mc_errors.append(container.noesy_err) 690 691 # PCS data. 692 elif data_id[1] == 'pcs' and hasattr(container, 'pcs'): 693 # Do errors exist? 694 if not hasattr(container, 'pcs_err'): 695 raise RelaxError("The PCS errors are missing for spin '%s'." % data_id[0]) 696 697 # The error. 698 if data_id[2] not in container.pcs_err: 699 err = None 700 else: 701 err = container.pcs_err[data_id[2]] 702 703 # Append the data. 704 mc_errors.append(err) 705 706 # Return the errors. 707 return mc_errors
708 709
710 - def set_error(self, model_info, index, error):
711 """Set the parameter errors. 712 713 @param model_info: The global model index originating from model_loop(). 714 @type model_info: int 715 @param index: The index of the parameter to set the errors for. 716 @type index: int 717 @param error: The error value. 718 @type error: float 719 """ 720 721 # Align parameters. 722 names = ['Axx', 'Ayy', 'Axy', 'Axz', 'Ayz'] 723 724 # Alignment tensor parameters. 725 if index < len(cdp.align_ids)*5: 726 # The tensor and parameter index. 727 param_index = index % 5 728 tensor_index = (index - index % 5) / 5 729 730 # Set the error. 731 tensor = align_tensor.return_tensor(index=tensor_index, skip_fixed=True) 732 tensor.set(param=names[param_index], value=error, category='err') 733 734 # Return the object. 735 return getattr(tensor, names[param_index]+'_err')
736 737
738 - def set_param_values(self, param=None, value=None, index=None, spin_id=None, error=False, force=True):
739 """Set the N-state model parameter values. 740 741 @keyword param: The parameter name list. 742 @type param: list of str 743 @keyword value: The parameter value list. 744 @type value: list 745 @keyword index: The index for parameters which are of the list-type (probs, alpha, beta, and gamma). This is ignored for all other types. 746 @type index: None or int 747 @keyword spin_id: The spin identification string (unused). 748 @type spin_id: None 749 @keyword error: A flag which if True will allow the parameter errors to be set instead of the values. 750 @type error: bool 751 @keyword force: A flag which if True will cause current values to be overwritten. If False, a RelaxError will raised if the parameter value is already set. 752 @type force: bool 753 """ 754 755 # Checks. 756 lib.arg_check.is_str_list(param, 'parameter name') 757 lib.arg_check.is_list(value, 'parameter value') 758 759 # Loop over the parameters. 760 for i in range(len(param)): 761 # Is the parameter is valid? 762 if not param[i]: 763 raise RelaxError("The parameter '%s' is not valid for this data pipe type." % param[i]) 764 765 # Error object. 766 if error: 767 param[i] += '_err' 768 769 # Set the indexed parameter. 770 if param[i] in ['probs', 'alpha', 'beta', 'gamma']: 771 obj = getattr(cdp, param[i]) 772 obj[index] = value[i] 773 774 # Set the spin parameters. 775 else: 776 for spin in spin_loop(spin_id): 777 setattr(spin, param[i], value[i])
778 779
780 - def sim_init_values(self):
781 """Initialise the Monte Carlo parameter values.""" 782 783 # Get the minimisation statistic object names. 784 sim_names = self.data_names(set='min') 785 786 # Add the paramagnetic centre, if optimised. 787 if hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed: 788 sim_names += ['paramagnetic_centre'] 789 790 # Alignments. 791 if hasattr(cdp, 'align_tensors'): 792 # The parameter names. 793 names = ['Axx', 'Ayy', 'Axy', 'Axz', 'Ayz'] 794 795 # Loop over the alignments, adding the alignment tensor parameters to the tensor data container. 796 for i in range(len(cdp.align_tensors)): 797 # Skip non-optimised tensors. 798 if not opt_uses_tensor(cdp.align_tensors[i]): 799 continue 800 801 # Set up the number of simulations. 802 cdp.align_tensors[i].set_sim_num(cdp.sim_number) 803 804 # Loop over all the parameter names, setting the initial simulation values to those of the parameter value. 805 for object_name in names: 806 for j in range(cdp.sim_number): 807 cdp.align_tensors[i].set(param=object_name, value=deepcopy(getattr(cdp.align_tensors[i], object_name)), category='sim', sim_index=j) 808 809 # Create all other simulation objects. 810 for object_name in sim_names: 811 # Name for the simulation object. 812 sim_object_name = object_name + '_sim' 813 814 # Create the simulation object. 815 setattr(cdp, sim_object_name, []) 816 817 # Get the simulation object. 818 sim_object = getattr(cdp, sim_object_name) 819 820 # Loop over the simulations. 821 for j in range(cdp.sim_number): 822 # Append None to fill the structure. 823 sim_object.append(None) 824 825 # Set the simulation paramagnetic centre positions to the optimised values. 826 if hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed: 827 for j in range(cdp.sim_number): 828 cdp.paramagnetic_centre_sim[j] = deepcopy(cdp.paramagnetic_centre)
829 830
831 - def sim_pack_data(self, data_id, sim_data):
832 """Pack the Monte Carlo simulation data. 833 834 @keyword data_id: The list of spin ID, data type, and alignment ID, as yielded by the base_data_loop() generator method. 835 @type data_id: list of str 836 @param sim_data: The Monte Carlo simulation data. 837 @type sim_data: list of float 838 """ 839 840 # Alias the spin or interatomic data container. 841 container = data_id[0] 842 843 # RDC data. 844 if data_id[1] == 'rdc' and hasattr(container, 'rdc'): 845 # Initialise. 846 if not hasattr(container, 'rdc_sim'): 847 container.rdc_sim = {} 848 849 # Store the data structure. 850 container.rdc_sim[data_id[2]] = [] 851 for i in range(cdp.sim_number): 852 container.rdc_sim[data_id[2]].append(sim_data[i][0]) 853 854 # NOESY data. 855 elif data_id[1] == 'noesy' and hasattr(container, 'noesy'): 856 # Store the data structure. 857 container.noesy_sim = [] 858 for i in range(cdp.sim_number): 859 container.noesy_sim[data_id[2]].append(sim_data[i][0]) 860 861 # PCS data. 862 elif data_id[1] == 'pcs' and hasattr(container, 'pcs'): 863 # Initialise. 864 if not hasattr(container, 'pcs_sim'): 865 container.pcs_sim = {} 866 867 # Store the data structure. 868 container.pcs_sim[data_id[2]] = [] 869 for i in range(cdp.sim_number): 870 container.pcs_sim[data_id[2]].append(sim_data[i][0])
871 872
873 - def sim_return_param(self, model_info, index):
874 """Return the array of simulation parameter values. 875 876 @param model_info: The global model index originating from model_loop(). 877 @type model_info: int 878 @param index: The index of the parameter to return the array of values for. 879 @type index: int 880 @return: The array of simulation parameter values. 881 @rtype: list of float 882 """ 883 884 # Align parameters. 885 names = ['Axx', 'Ayy', 'Axy', 'Axz', 'Ayz'] 886 887 # Alignment tensor parameters. 888 if index < align_tensor.num_tensors(skip_fixed=True)*5: 889 # The tensor and parameter index. 890 param_index = index % 5 891 tensor_index = (index - index % 5) / 5 892 893 # Return the simulation parameter array. 894 tensor = align_tensor.return_tensor(index=tensor_index, skip_fixed=True) 895 return getattr(tensor, names[param_index]+'_sim')
896