Package specific_fns :: Module n_state_model
[hide private]
[frames] | no frames]

Source Code for Module specific_fns.n_state_model

   1  ############################################################################### 
   2  #                                                                             # 
   3  # Copyright (C) 2007-2013 Edward d'Auvergne                                   # 
   4  #                                                                             # 
   5  # This file is part of the program relax (http://www.nmr-relax.com).          # 
   6  #                                                                             # 
   7  # This program is free software: you can redistribute it and/or modify        # 
   8  # it under the terms of the GNU General Public License as published by        # 
   9  # the Free Software Foundation, either version 3 of the License, or           # 
  10  # (at your option) any later version.                                         # 
  11  #                                                                             # 
  12  # This program is distributed in the hope that it will be useful,             # 
  13  # but WITHOUT ANY WARRANTY; without even the implied warranty of              # 
  14  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the               # 
  15  # GNU General Public License for more details.                                # 
  16  #                                                                             # 
  17  # You should have received a copy of the GNU General Public License           # 
  18  # along with this program.  If not, see <http://www.gnu.org/licenses/>.       # 
  19  #                                                                             # 
  20  ############################################################################### 
  21   
  22  # Module docstring. 
  23  """Analysis specific code for the N-state model or structure ensemble analysis.""" 
  24   
  25  # Python module imports. 
  26  from copy import deepcopy 
  27  from math import acos, cos, pi 
  28  from minfx.generic import generic_minimise 
  29  from minfx.grid import grid 
  30  from numpy import array, dot, float64, identity, ones, zeros 
  31  from numpy.linalg import inv, norm 
  32  from re import search 
  33  from warnings import warn 
  34   
  35  # relax module imports. 
  36  import arg_check 
  37  from float import isNaN, isInf 
  38  from generic_fns import align_tensor, pcs, pipes, rdc 
  39  from generic_fns.interatomic import interatomic_loop 
  40  from generic_fns.mol_res_spin import return_spin, spin_loop 
  41  from generic_fns.structure import geometric 
  42  from generic_fns.structure.cones import Iso_cone 
  43  from generic_fns.structure.internal import Internal 
  44  from generic_fns.structure.mass import centre_of_mass 
  45  from maths_fns.n_state_model import N_state_opt 
  46  from maths_fns.potential import quad_pot 
  47  from maths_fns.rotation_matrix import euler_to_R_zyz, two_vect_to_R 
  48  from physical_constants import dipolar_constant, g1H, return_gyromagnetic_ratio 
  49  from relax_errors import RelaxError, RelaxInfError, RelaxNaNError, RelaxNoModelError, RelaxNoValueError, RelaxSpinTypeError 
  50  from relax_io import open_write_file 
  51  from relax_warnings import RelaxWarning 
  52  from specific_fns.api_base import API_base 
  53  from specific_fns.api_common import API_common 
  54  from user_functions.data import Uf_tables; uf_tables = Uf_tables() 
  55  from user_functions.objects import Desc_container 
  56   
  57   
58 -class N_state_model(API_base, API_common):
59 """Class containing functions for the N-state model.""" 60
61 - def __init__(self):
62 """Initialise the class by placing API_common methods into the API.""" 63 64 # Execute the base class __init__ method. 65 super(N_state_model, self).__init__() 66 67 # Place methods into the API. 68 self.model_loop = self._model_loop_single_global 69 self.overfit_deselect = self._overfit_deselect_dummy 70 self.return_conversion_factor = self._return_no_conversion_factor 71 self.set_selected_sim = self._set_selected_sim_global 72 self.sim_return_selected = self._sim_return_selected_global 73 self.test_grid_ops = self._test_grid_ops_general 74 75 # Set up the spin parameters. 76 self.PARAMS.add('csa', scope='spin', units='ppm', desc='CSA value', py_type=float, grace_string='\\qCSA\\Q') 77 78 # Add the minimisation data. 79 self.PARAMS.add_min_data(min_stats_global=False, min_stats_spin=True)
80 81
82 - def _assemble_param_vector(self, sim_index=None):
83 """Assemble all the parameters of the model into a single array. 84 85 @param sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired. 86 @type sim_index: None or int 87 @return: The parameter vector used for optimisation. 88 @rtype: numpy array 89 """ 90 91 # Test if the model is selected. 92 if not hasattr(cdp, 'model') or not isinstance(cdp.model, str): 93 raise RelaxNoModelError 94 95 # Determine the data type. 96 data_types = self._base_data_types() 97 98 # Initialise the parameter vector. 99 param_vector = [] 100 101 # A RDC or PCS data type requires the alignment tensors to be at the start of the parameter vector (unless the tensors are fixed). 102 if self._opt_uses_align_data(): 103 for i in range(len(cdp.align_tensors)): 104 # Skip non-optimised tensors. 105 if not self._opt_tensor(cdp.align_tensors[i]): 106 continue 107 108 # Add the parameters. 109 param_vector = param_vector + list(cdp.align_tensors[i].A_5D) 110 111 # Monte Carlo simulation data structures. 112 if sim_index != None: 113 # Populations. 114 if cdp.model in ['2-domain', 'population']: 115 probs = cdp.probs_sim[sim_index] 116 117 # Euler angles. 118 if cdp.model == '2-domain': 119 alpha = cdp.alpha_sim[sim_index] 120 beta = cdp.beta_sim[sim_index] 121 gamma = cdp.gamma_sim[sim_index] 122 123 # Normal data structures. 124 else: 125 # Populations. 126 if cdp.model in ['2-domain', 'population']: 127 probs = cdp.probs 128 129 # Euler angles. 130 if cdp.model == '2-domain': 131 alpha = cdp.alpha 132 beta = cdp.beta 133 gamma = cdp.gamma 134 135 # The probabilities (exclude that of state N). 136 if cdp.model in ['2-domain', 'population']: 137 param_vector = param_vector + probs[0:-1] 138 139 # The Euler angles. 140 if cdp.model == '2-domain': 141 for i in range(cdp.N): 142 param_vector.append(alpha[i]) 143 param_vector.append(beta[i]) 144 param_vector.append(gamma[i]) 145 146 # The paramagnetic centre. 147 if hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed: 148 if not hasattr(cdp, 'paramagnetic_centre'): 149 for i in range(3): 150 param_vector.append(0.0) 151 elif sim_index != None: 152 if cdp.paramagnetic_centre_sim[sim_index] == None: 153 for i in range(3): 154 param_vector.append(0.0) 155 else: 156 for i in range(3): 157 param_vector.append(cdp.paramagnetic_centre_sim[sim_index][i]) 158 else: 159 for i in range(3): 160 param_vector.append(cdp.paramagnetic_centre[i]) 161 162 # Convert all None values to zero (to avoid conversion to NaN). 163 for i in range(len(param_vector)): 164 if param_vector[i] == None: 165 param_vector[i] = 0.0 166 167 # Return a numpy arrary. 168 return array(param_vector, float64)
169 170
171 - def _assemble_scaling_matrix(self, data_types=None, scaling=True):
172 """Create and return the scaling matrix. 173 174 @keyword data_types: The base data types used in the optimisation. This list can contain 175 the elements 'rdc', 'pcs' or 'tensor'. 176 @type data_types: list of str 177 @keyword scaling: If False, then the identity matrix will be returned. 178 @type scaling: bool 179 @return: The square and diagonal scaling matrix. 180 @rtype: numpy rank-2 array 181 """ 182 183 # Initialise. 184 scaling_matrix = identity(self._param_num(), float64) 185 186 # Return the identity matrix. 187 if not scaling: 188 return scaling_matrix 189 190 # Starting point of the populations. 191 pop_start = 0 192 193 # Loop over the alignments. 194 tensor_num = 0 195 for i in range(len(cdp.align_tensors)): 196 # Skip non-optimised tensors. 197 if not self._opt_tensor(cdp.align_tensors[i]): 198 continue 199 200 # Add the 5 alignment parameters. 201 pop_start = pop_start + 5 202 203 # The alignment parameters. 204 for j in range(5): 205 scaling_matrix[5*tensor_num + j, 5*tensor_num + j] = 1.0 206 207 # Increase the tensor number. 208 tensor_num += 1 209 210 # Loop over the populations, and set the scaling factor. 211 if cdp.model in ['2-domain', 'population']: 212 factor = 0.1 213 for i in range(pop_start, pop_start + (cdp.N-1)): 214 scaling_matrix[i, i] = factor 215 216 # The paramagnetic centre. 217 if hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed: 218 for i in range(-3, 0): 219 scaling_matrix[i, i] = 1e2 220 221 # Return the matrix. 222 return scaling_matrix
223 224
225 - def _base_data_types(self):
226 """Determine all the base data types. 227 228 The base data types can include:: 229 - 'rdc', residual dipolar couplings. 230 - 'pcs', pseudo-contact shifts. 231 - 'noesy', NOE restraints. 232 - 'tensor', alignment tensors. 233 234 @return: A list of all the base data types. 235 @rtype: list of str 236 """ 237 238 # Array of data types. 239 list = [] 240 241 # RDC search. 242 for interatom in interatomic_loop(): 243 if hasattr(interatom, 'rdc'): 244 list.append('rdc') 245 break 246 247 # PCS search. 248 for spin in spin_loop(): 249 if hasattr(spin, 'pcs'): 250 list.append('pcs') 251 break 252 253 # Alignment tensor search. 254 if not ('rdc' in list or 'pcs' in list) and hasattr(cdp, 'align_tensors'): 255 list.append('tensor') 256 257 # NOESY data search. 258 if hasattr(cdp, 'noe_restraints'): 259 list.append('noesy') 260 261 # No data is present. 262 if not list: 263 raise RelaxError("Neither RDC, PCS, NOESY nor alignment tensor data is present.") 264 265 # Return the list. 266 return list
267 268
269 - def _calc_ave_dist(self, atom1, atom2, exp=1):
270 """Calculate the average distances. 271 272 The formula used is:: 273 274 _N_ 275 / 1 \ \ 1/exp 276 <r> = | - > |p1i - p2i|^exp | 277 \ N /__ / 278 i 279 280 where i are the members of the ensemble, N is the total number of structural models, and p1 281 and p2 at the two atom positions. 282 283 284 @param atom1: The atom identification string of the first atom. 285 @type atom1: str 286 @param atom2: The atom identification string of the second atom. 287 @type atom2: str 288 @keyword exp: The exponent used for the averaging, e.g. 1 for linear averaging and -6 for 289 r^-6 NOE averaging. 290 @type exp: int 291 @return: The average distance between the two atoms. 292 @rtype: float 293 """ 294 295 # Get the spin containers. 296 spin1 = return_spin(atom1) 297 spin2 = return_spin(atom2) 298 299 # Loop over each model. 300 num_models = len(spin1.pos) 301 ave_dist = 0.0 302 for i in range(num_models): 303 # Distance to the minus sixth power. 304 dist = norm(spin1.pos[i] - spin2.pos[i]) 305 ave_dist = ave_dist + dist**(exp) 306 307 # Average. 308 ave_dist = ave_dist / num_models 309 310 # The exponent. 311 ave_dist = ave_dist**(1.0/exp) 312 313 # Return the average distance. 314 return ave_dist
315 316
317 - def _CoM(self, pivot_point=None, centre=None):
318 """Centre of mass analysis. 319 320 This function does an analysis of the centre of mass (CoM) of the N states. This includes 321 calculating the order parameter associated with the pivot-CoM vector, and the associated 322 cone of motions. The pivot_point argument must be supplied. If centre is None, then the 323 CoM will be calculated from the selected parts of the loaded structure. Otherwise it will 324 be set to the centre arg. 325 326 @param pivot_point: The pivot point in the structural file(s). 327 @type pivot_point: list of float of length 3 328 @param centre: The optional centre of mass vector. 329 @type centre: list of float of length 3 330 """ 331 332 # Test if the current data pipe exists. 333 pipes.test() 334 335 # Set the pivot point. 336 cdp.pivot_point = pivot_point 337 338 # The centre has been supplied. 339 if centre: 340 cdp.CoM = centre 341 342 # Calculate from the structure file. 343 else: 344 cdp.CoM = centre_of_mass() 345 346 # Calculate the vector between the pivot and CoM points. 347 cdp.pivot_CoM = array(cdp.CoM, float64) - array(cdp.pivot_point, float64) 348 349 # Calculate the unit vector between the pivot and CoM points. 350 unit_vect = cdp.pivot_CoM / norm(cdp.pivot_CoM) 351 352 # Initilise some data structures. 353 R = zeros((3, 3), float64) 354 vectors = zeros((cdp.N, 3), float64) 355 356 # Loop over the N states. 357 for c in range(cdp.N): 358 # Generate the rotation matrix. 359 euler_to_R_zyz(cdp.alpha[c], cdp.beta[c], cdp.gamma[c], R) 360 361 # Rotate the unit vector. 362 vectors[c] = dot(R, unit_vect) 363 364 # Multiply by the probability. 365 vectors[c] = vectors[c] * cdp.probs[c] 366 367 # Average of the unit vectors. 368 cdp.ave_unit_pivot_CoM = sum(vectors) 369 370 # The length reduction. 371 cdp.ave_pivot_CoM_red = norm(cdp.ave_unit_pivot_CoM) 372 373 # The aveage pivot-CoM vector. 374 cdp.ave_pivot_CoM = norm(cdp.pivot_CoM) * cdp.ave_unit_pivot_CoM 375 376 # The full length rotated pivot-CoM vector. 377 cdp.full_ave_pivot_CoM = cdp.ave_pivot_CoM / cdp.ave_pivot_CoM_red 378 379 # The cone angle for diffusion on an axially symmetric cone. 380 cdp.theta_diff_on_cone = acos(cdp.ave_pivot_CoM_red) 381 cdp.S_diff_on_cone = (3.0*cos(cdp.theta_diff_on_cone)**2 - 1.0) / 2.0 382 383 # The cone angle and order parameter for diffusion in an axially symmetric cone. 384 cdp.theta_diff_in_cone = acos(2.*cdp.ave_pivot_CoM_red - 1.) 385 cdp.S_diff_in_cone = cos(cdp.theta_diff_in_cone) * (1 + cos(cdp.theta_diff_in_cone)) / 2.0 386 387 # Print out. 388 print("\n%-40s %-20s" % ("Pivot point:", repr(cdp.pivot_point))) 389 print("%-40s %-20s" % ("Moving domain CoM (prior to rotation):", repr(cdp.CoM))) 390 print("%-40s %-20s" % ("Pivot-CoM vector", repr(cdp.pivot_CoM))) 391 print("%-40s %-20s" % ("Pivot-CoM unit vector:", repr(unit_vect))) 392 print("%-40s %-20s" % ("Average of the unit pivot-CoM vectors:", repr(cdp.ave_unit_pivot_CoM))) 393 print("%-40s %-20s" % ("Average of the pivot-CoM vector:", repr(cdp.ave_pivot_CoM))) 394 print("%-40s %-20s" % ("Full length rotated pivot-CoM vector:", repr(cdp.full_ave_pivot_CoM))) 395 print("%-40s %-20s" % ("Length reduction from unity:", repr(cdp.ave_pivot_CoM_red))) 396 print("%-40s %.5f rad (%.5f deg)" % ("Cone angle (diffusion on a cone)", cdp.theta_diff_on_cone, cdp.theta_diff_on_cone / (2*pi) *360.)) 397 print("%-40s S_cone = %.5f (S^2 = %.5f)" % ("S_cone (diffusion on a cone)", cdp.S_diff_on_cone, cdp.S_diff_on_cone**2)) 398 print("%-40s %.5f rad (%.5f deg)" % ("Cone angle (diffusion in a cone)", cdp.theta_diff_in_cone, cdp.theta_diff_in_cone / (2*pi) *360.)) 399 print("%-40s S_cone = %.5f (S^2 = %.5f)" % ("S_cone (diffusion in a cone)", cdp.S_diff_in_cone, cdp.S_diff_in_cone**2)) 400 print("\n\n")
401 402
403 - def _check_rdcs(self, interatom, spin1, spin2):
404 """Check if the RDCs for the given interatomic data container should be used. 405 406 @param interatom: The interatomic data container. 407 @type interatom: InteratomContainer instance 408 @param spin1: The first spin container. 409 @type spin1: SpinContainer instance 410 @param spin2: The second spin container. 411 @type spin2: SpinContainer instance 412 @return: True if the RDCs should be used, False otherwise. 413 """ 414 415 # Skip deselected interatomic data containers. 416 if not interatom.select: 417 return False 418 419 # Skip deselected spins. 420 # FIXME: These checks could be fatal in the future if a user has good RDCs and one of the two spins are deselected! 421 if not spin1.select or not spin2.select: 422 return False 423 424 # Only use interatomic data containers with RDC data. 425 if not hasattr(interatom, 'rdc'): 426 return False 427 428 # RDC data exists but the interatomic vectors are missing? 429 if not hasattr(interatom, 'vector'): 430 # Throw a warning. 431 warn(RelaxWarning("RDC data exists but the interatomic vectors are missing, skipping the spin pair '%s' and '%s'." % (interatom.spin_id1, interatom.spin_id2))) 432 433 # Jump to the next spin. 434 return False 435 436 # Skip non-Me pseudo-atoms for the first spin. 437 if hasattr(spin1, 'members') and len(spin1.members) != 3: 438 warn(RelaxWarning("Only methyl group pseudo atoms are supported due to their fast rotation, skipping the spin pair '%s' and '%s'." % (interatom.spin_id1, interatom.spin_id2))) 439 return False 440 441 # Skip non-Me pseudo-atoms for the second spin. 442 if hasattr(spin2, 'members') and len(spin2.members) != 3: 443 warn(RelaxWarning("Only methyl group pseudo atoms are supported due to their fast rotation, skipping the spin pair '%s' and '%s'." % (interatom.spin_id1, interatom.spin_id2))) 444 return False 445 446 # Checks. 447 if not hasattr(spin1, 'isotope'): 448 raise RelaxSpinTypeError(interatom.spin_id1) 449 if not hasattr(spin2, 'isotope'): 450 raise RelaxSpinTypeError(interatom.spin_id2) 451 if not hasattr(interatom, 'r'): 452 raise RelaxNoValueError("averaged interatomic distance") 453 454 # Everything is ok. 455 return True
456 457
458 - def _cone_pdb(self, cone_type=None, scale=1.0, file=None, dir=None, force=False):
459 """Create a PDB file containing a geometric object representing the various cone models. 460 461 Currently the only cone types supported are 'diff in cone' and 'diff on cone'. 462 463 464 @param cone_type: The type of cone model to represent. 465 @type cone_type: str 466 @param scale: The size of the geometric object is eqaul to the average pivot-CoM 467 vector length multiplied by this scaling factor. 468 @type scale: float 469 @param file: The name of the PDB file to create. 470 @type file: str 471 @param dir: The name of the directory to place the PDB file into. 472 @type dir: str 473 @param force: Flag which if set to True will cause any pre-existing file to be 474 overwritten. 475 @type force: int 476 """ 477 478 # Test if the cone models have been determined. 479 if cone_type == 'diff in cone': 480 if not hasattr(cdp, 'S_diff_in_cone'): 481 raise RelaxError("The diffusion in a cone model has not yet been determined.") 482 elif cone_type == 'diff on cone': 483 if not hasattr(cdp, 'S_diff_on_cone'): 484 raise RelaxError("The diffusion on a cone model has not yet been determined.") 485 else: 486 raise RelaxError("The cone type " + repr(cone_type) + " is unknown.") 487 488 # The number of increments for the filling of the cone objects. 489 inc = 20 490 491 # The rotation matrix. 492 R = zeros((3, 3), float64) 493 two_vect_to_R(array([0, 0, 1], float64), cdp.ave_pivot_CoM/norm(cdp.ave_pivot_CoM), R) 494 495 # The isotropic cone object. 496 if cone_type == 'diff in cone': 497 angle = cdp.theta_diff_in_cone 498 elif cone_type == 'diff on cone': 499 angle = cdp.theta_diff_on_cone 500 cone = Iso_cone(angle) 501 502 # Create the structural object. 503 structure = Internal() 504 505 # Add a structure. 506 structure.add_molecule(name='cone') 507 508 # Alias the single molecule from the single model. 509 mol = structure.structural_data[0].mol[0] 510 511 # Add the pivot point. 512 mol.atom_add(pdb_record='HETATM', atom_num=1, atom_name='R', res_name='PIV', res_num=1, pos=cdp.pivot_point, element='C') 513 514 # Generate the average pivot-CoM vectors. 515 print("\nGenerating the average pivot-CoM vectors.") 516 sim_vectors = None 517 if hasattr(cdp, 'ave_pivot_CoM_sim'): 518 sim_vectors = cdp.ave_pivot_CoM_sim 519 res_num = geometric.generate_vector_residues(mol=mol, vector=cdp.ave_pivot_CoM, atom_name='Ave', res_name_vect='AVE', sim_vectors=sim_vectors, res_num=2, origin=cdp.pivot_point, scale=scale) 520 521 # Generate the cone outer edge. 522 print("\nGenerating the cone outer edge.") 523 cap_start_atom = mol.atom_num[-1]+1 524 geometric.cone_edge(mol=mol, cone=cone, res_name='CON', res_num=3, apex=cdp.pivot_point, R=R, scale=norm(cdp.pivot_CoM), inc=inc) 525 526 # Generate the cone cap, and stitch it to the cone edge. 527 if cone_type == 'diff in cone': 528 print("\nGenerating the cone cap.") 529 cone_start_atom = mol.atom_num[-1]+1 530 geometric.generate_vector_dist(mol=mol, res_name='CON', res_num=3, centre=cdp.pivot_point, R=R, limit_check=cone.limit_check, scale=norm(cdp.pivot_CoM), inc=inc) 531 geometric.stitch_cone_to_edge(mol=mol, cone=cone, dome_start=cone_start_atom, edge_start=cap_start_atom+1, inc=inc) 532 533 # Create the PDB file. 534 print("\nGenerating the PDB file.") 535 pdb_file = open_write_file(file, dir, force=force) 536 structure.write_pdb(pdb_file) 537 pdb_file.close()
538 539
540 - def _disassemble_param_vector(self, param_vector=None, data_types=None, sim_index=None):
541 """Disassemble the parameter vector and place the values into the relevant variables. 542 543 For the 2-domain N-state model, the parameters are stored in the probability and Euler angle data structures. For the population N-state model, only the probabilities are stored. If RDCs are present and alignment tensors are optimised, then these are stored as well. 544 545 @keyword data_types: The base data types used in the optimisation. This list can contain the elements 'rdc', 'pcs' or 'tensor'. 546 @type data_types: list of str 547 @keyword param_vector: The parameter vector returned from optimisation. 548 @type param_vector: numpy array 549 @keyword sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired. 550 @type sim_index: None or int 551 """ 552 553 # Test if the model is selected. 554 if not hasattr(cdp, 'model') or not isinstance(cdp.model, str): 555 raise RelaxNoModelError 556 557 # Unpack and strip off the alignment tensor parameters. 558 if ('rdc' in data_types or 'pcs' in data_types) and not align_tensor.all_tensors_fixed(): 559 # Loop over the alignments, adding the alignment tensor parameters to the tensor data container. 560 tensor_num = 0 561 for i in range(len(cdp.align_tensors)): 562 # Skip non-optimised tensors. 563 if not self._opt_tensor(cdp.align_tensors[i]): 564 continue 565 566 # Normal tensors. 567 if sim_index == None: 568 cdp.align_tensors[i].set(param='Axx', value=param_vector[5*tensor_num]) 569 cdp.align_tensors[i].set(param='Ayy', value=param_vector[5*tensor_num+1]) 570 cdp.align_tensors[i].set(param='Axy', value=param_vector[5*tensor_num+2]) 571 cdp.align_tensors[i].set(param='Axz', value=param_vector[5*tensor_num+3]) 572 cdp.align_tensors[i].set(param='Ayz', value=param_vector[5*tensor_num+4]) 573 574 # Monte Carlo simulated tensors. 575 else: 576 cdp.align_tensors[i].set(param='Axx', value=param_vector[5*tensor_num], category='sim', sim_index=sim_index) 577 cdp.align_tensors[i].set(param='Ayy', value=param_vector[5*tensor_num+1], category='sim', sim_index=sim_index) 578 cdp.align_tensors[i].set(param='Axy', value=param_vector[5*tensor_num+2], category='sim', sim_index=sim_index) 579 cdp.align_tensors[i].set(param='Axz', value=param_vector[5*tensor_num+3], category='sim', sim_index=sim_index) 580 cdp.align_tensors[i].set(param='Ayz', value=param_vector[5*tensor_num+4], category='sim', sim_index=sim_index) 581 582 # Increase the tensor number. 583 tensor_num += 1 584 585 # Create a new parameter vector without the tensors. 586 param_vector = param_vector[5*tensor_num:] 587 588 # Alias the Monte Carlo simulation data structures. 589 if sim_index != None: 590 # Populations. 591 if cdp.model in ['2-domain', 'population']: 592 probs = cdp.probs_sim[sim_index] 593 594 # Euler angles. 595 if cdp.model == '2-domain': 596 alpha = cdp.alpha_sim[sim_index] 597 beta = cdp.beta_sim[sim_index] 598 gamma = cdp.gamma_sim[sim_index] 599 600 # Alias the normal data structures. 601 else: 602 # Populations. 603 if cdp.model in ['2-domain', 'population']: 604 probs = cdp.probs 605 606 # Euler angles. 607 if cdp.model == '2-domain': 608 alpha = cdp.alpha 609 beta = cdp.beta 610 gamma = cdp.gamma 611 612 # Set the probabilities for states 0 to N-1 in the aliased structures. 613 if cdp.model in ['2-domain', 'population']: 614 for i in range(cdp.N-1): 615 probs[i] = param_vector[i] 616 617 # The probability for state N. 618 probs[-1] = 1 - sum(probs[0:-1]) 619 620 # Set the Euler angles in the aliased structures. 621 if cdp.model == '2-domain': 622 for i in range(cdp.N): 623 alpha[i] = param_vector[cdp.N-1 + 3*i] 624 beta[i] = param_vector[cdp.N-1 + 3*i + 1] 625 gamma[i] = param_vector[cdp.N-1 + 3*i + 2] 626 627 # The paramagnetic centre. 628 if hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed: 629 # Create the structure if needed. 630 if not hasattr(cdp, 'paramagnetic_centre'): 631 cdp.paramagnetic_centre = zeros(3, float64) 632 633 # The position. 634 if sim_index == None: 635 cdp.paramagnetic_centre[0] = param_vector[-3] 636 cdp.paramagnetic_centre[1] = param_vector[-2] 637 cdp.paramagnetic_centre[2] = param_vector[-1] 638 639 # Monte Carlo simulated positions. 640 else: 641 if cdp.paramagnetic_centre_sim[sim_index] == None: 642 cdp.paramagnetic_centre_sim[sim_index] = [None, None, None] 643 cdp.paramagnetic_centre_sim[sim_index][0] = param_vector[-3] 644 cdp.paramagnetic_centre_sim[sim_index][1] = param_vector[-2] 645 cdp.paramagnetic_centre_sim[sim_index][2] = param_vector[-1]
646 647
648 - def _elim_no_prob(self):
649 """Remove all structures or states which have no probability.""" 650 651 # Test if the current data pipe exists. 652 pipes.test() 653 654 # Test if the model is setup. 655 if not hasattr(cdp, 'model'): 656 raise RelaxNoModelError('N-state') 657 658 # Test if there are populations. 659 if not hasattr(cdp, 'probs'): 660 raise RelaxError("The N-state model populations do not exist.") 661 662 # Create the data structure if needed. 663 if not hasattr(cdp, 'select_models'): 664 cdp.state_select = [True] * cdp.N 665 666 # Loop over the structures. 667 for i in range(len(cdp.N)): 668 # No probability. 669 if cdp.probs[i] < 1e-5: 670 cdp.state_select[i] = False
671 672
673 - def _linear_constraints(self, data_types=None, scaling_matrix=None):
674 """Function for setting up the linear constraint matrices A and b. 675 676 Standard notation 677 ================= 678 679 The N-state model constraints are:: 680 681 0 <= pc <= 1, 682 683 where p is the probability and c corresponds to state c. 684 685 686 Matrix notation 687 =============== 688 689 In the notation A.x >= b, where A is an matrix of coefficients, x is an array of parameter 690 values, and b is a vector of scalars, these inequality constraints are:: 691 692 | 1 0 0 | | 0 | 693 | | | | 694 |-1 0 0 | | -1 | 695 | | | | 696 | 0 1 0 | | 0 | 697 | | | p0 | | | 698 | 0 -1 0 | | | | -1 | 699 | | . | p1 | >= | | 700 | 0 0 1 | | | | 0 | 701 | | | p2 | | | 702 | 0 0 -1 | | -1 | 703 | | | | 704 |-1 -1 -1 | | -1 | 705 | | | | 706 | 1 1 1 | | 0 | 707 708 This example is for a 4-state model, the last probability pn is not included as this 709 parameter does not exist (because the sum of pc is equal to 1). The Euler angle parameters 710 have been excluded here but will be included in the returned A and b objects. These 711 parameters simply add columns of zero to the A matrix and have no effect on b. The last two 712 rows correspond to the inequality:: 713 714 0 <= pN <= 1. 715 716 As:: 717 N-1 718 \ 719 pN = 1 - > pc, 720 /__ 721 c=1 722 723 then:: 724 725 -p1 - p2 - ... - p(N-1) >= -1, 726 727 p1 + p2 + ... + p(N-1) >= 0. 728 729 730 @keyword data_types: The base data types used in the optimisation. This list can 731 contain the elements 'rdc', 'pcs' or 'tensor'. 732 @type data_types: list of str 733 @keyword scaling_matrix: The diagonal scaling matrix. 734 @type scaling_matrix: numpy rank-2 square matrix 735 @return: The matrices A and b. 736 @rtype: tuple of len 2 of a numpy rank-2, size NxM matrix and numpy 737 rank-1, size N array 738 """ 739 740 # Starting point of the populations. 741 pop_start = 0 742 if ('rdc' in data_types or 'pcs' in data_types) and not align_tensor.all_tensors_fixed(): 743 # Loop over the alignments. 744 for i in range(len(cdp.align_tensors)): 745 # Skip non-optimised tensors. 746 if not self._opt_tensor(cdp.align_tensors[i]): 747 continue 748 749 # Add 5 parameters. 750 pop_start += 5 751 752 # Initialisation (0..j..m). 753 A = [] 754 b = [] 755 zero_array = zeros(self._param_num(), float64) 756 i = pop_start 757 j = 0 758 759 # Probability parameters. 760 if cdp.model in ['2-domain', 'population']: 761 # Loop over the prob parameters (N - 1, because the sum of pc is 1). 762 for k in range(cdp.N - 1): 763 # 0 <= pc <= 1. 764 A.append(zero_array * 0.0) 765 A.append(zero_array * 0.0) 766 A[j][i] = 1.0 767 A[j+1][i] = -1.0 768 b.append(0.0) 769 b.append(-1.0 / scaling_matrix[i, i]) 770 j = j + 2 771 772 # Increment i. 773 i = i + 1 774 775 # Add the inequalities for pN. 776 A.append(zero_array * 0.0) 777 A.append(zero_array * 0.0) 778 for i in range(pop_start, pop_start+cdp.N-1): 779 A[-2][i] = -1.0 780 A[-1][i] = 1.0 781 b.append(-1.0 / scaling_matrix[i, i]) 782 b.append(0.0) 783 784 # Convert to numpy data structures. 785 A = array(A, float64) 786 b = array(b, float64) 787 788 # Return the constraint objects. 789 return A, b
790 791
792 - def _minimise_bc_data(self, model):
793 """Extract and unpack the back calculated data. 794 795 @param model: The instantiated class containing the target function. 796 @type model: class instance 797 """ 798 799 # No alignment tensors, so nothing to do. 800 if not hasattr(cdp, 'align_tensors'): 801 return 802 803 # Loop over each alignment. 804 align_index = 0 805 for i in range(len(cdp.align_ids)): 806 # Skip non-optimised tensors. 807 if not self._opt_tensor(cdp.align_tensors[i]): 808 continue 809 810 # The alignment ID. 811 align_id = cdp.align_ids[i] 812 813 # Data flags 814 rdc_flag = False 815 if hasattr(cdp, 'rdc_ids') and align_id in cdp.rdc_ids: 816 rdc_flag = True 817 pcs_flag = False 818 if hasattr(cdp, 'pcs_ids') and align_id in cdp.pcs_ids: 819 pcs_flag = True 820 821 # Spin loop. 822 pcs_index = 0 823 for spin in spin_loop(): 824 # Skip deselected spins. 825 if not spin.select: 826 continue 827 828 # Spins with PCS data. 829 if pcs_flag and hasattr(spin, 'pcs'): 830 # Initialise the data structure if necessary. 831 if not hasattr(spin, 'pcs_bc'): 832 spin.pcs_bc = {} 833 834 # Add the back calculated PCS (in ppm). 835 spin.pcs_bc[align_id] = model.deltaij_theta[align_index, pcs_index] * 1e6 836 837 # Increment the data index if the spin container has data. 838 pcs_index = pcs_index + 1 839 840 # Interatomic data container loop. 841 rdc_index = 0 842 for interatom in interatomic_loop(): 843 # Get the spins. 844 spin1 = return_spin(interatom.spin_id1) 845 spin2 = return_spin(interatom.spin_id2) 846 847 # RDC checks. 848 if not self._check_rdcs(interatom, spin1, spin2): 849 continue 850 851 # Containers with RDC data. 852 if rdc_flag and hasattr(interatom, 'rdc'): 853 # Initialise the data structure if necessary. 854 if not hasattr(interatom, 'rdc_bc'): 855 interatom.rdc_bc = {} 856 857 # Append the back calculated PCS. 858 interatom.rdc_bc[align_id] = model.Dij_theta[align_index, rdc_index] 859 860 # Increment the data index if the interatom container has data. 861 rdc_index = rdc_index + 1 862 863 # Increment the alignment index (for the optimised tensors). 864 align_index += 1
865 866
867 - def _minimise_setup_atomic_pos(self, sim_index=None):
868 """Set up the atomic position data structures for optimisation using PCSs and PREs as base data sets. 869 870 @keyword sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired. 871 @type sim_index: None or int 872 @return: The atomic positions (the first index is the spins, the second is the structures, and the third is the atomic coordinates) and the paramagnetic centre. 873 @rtype: numpy rank-3 array, numpy rank-1 array. 874 """ 875 876 # Initialise. 877 atomic_pos = [] 878 879 # Store the atomic positions. 880 for spin in spin_loop(): 881 # Skip deselected spins. 882 if not spin.select: 883 continue 884 885 # Only use spins with alignment/paramagnetic data. 886 if not hasattr(spin, 'pcs') and not hasattr(spin, 'pre'): 887 continue 888 889 # The position list. 890 if type(spin.pos[0]) in [float, float64]: 891 atomic_pos.append([spin.pos]) 892 else: 893 atomic_pos.append(spin.pos) 894 895 # Convert to numpy objects. 896 atomic_pos = array(atomic_pos, float64) 897 898 # The paramagnetic centre. 899 if not hasattr(cdp, 'paramagnetic_centre'): 900 paramag_centre = zeros(3, float64) 901 elif sim_index != None and not cdp.paramag_centre_fixed: 902 if not hasattr(cdp, 'paramagnetic_centre_sim') or cdp.paramagnetic_centre_sim[sim_index] == None: 903 paramag_centre = zeros(3, float64) 904 else: 905 paramag_centre = array(cdp.paramagnetic_centre_sim[sim_index]) 906 else: 907 paramag_centre = array(cdp.paramagnetic_centre) 908 909 # Return the data structures. 910 return atomic_pos, paramag_centre
911 912
913 - def _minimise_setup_pcs(self, sim_index=None):
914 """Set up the data structures for optimisation using PCSs as base data sets. 915 916 @keyword sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired. 917 @type sim_index: None or int 918 @return: The assembled data structures for using PCSs as the base data for optimisation. These include: 919 - the PCS values. 920 - the unit vectors connecting the paramagnetic centre (the electron spin) to 921 - the PCS weight. 922 - the nuclear spin. 923 - the pseudocontact shift constants. 924 @rtype: tuple of (numpy rank-2 array, numpy rank-2 array, numpy rank-2 array, numpy rank-1 array, numpy rank-1 array) 925 """ 926 927 # Data setup tests. 928 if not hasattr(cdp, 'paramagnetic_centre') and (hasattr(cdp, 'paramag_centre_fixed') and cdp.paramag_centre_fixed): 929 raise RelaxError("The paramagnetic centre has not yet been specified.") 930 if not hasattr(cdp, 'temperature'): 931 raise RelaxError("The experimental temperatures have not been set.") 932 if not hasattr(cdp, 'frq'): 933 raise RelaxError("The spectrometer frequencies of the experiments have not been set.") 934 935 # Initialise. 936 pcs = [] 937 pcs_err = [] 938 pcs_weight = [] 939 temp = [] 940 frq = [] 941 942 # The PCS data. 943 for i in range(len(cdp.align_ids)): 944 # Alias the ID. 945 align_id = cdp.align_ids[i] 946 947 # Skip non-optimised data. 948 if not self._opt_uses_align_data(align_id): 949 continue 950 951 # Append empty arrays to the PCS structures. 952 pcs.append([]) 953 pcs_err.append([]) 954 pcs_weight.append([]) 955 956 # Get the temperature for the PCS constant. 957 if align_id in cdp.temperature: 958 temp.append(cdp.temperature[align_id]) 959 960 # The temperature must be given! 961 else: 962 raise RelaxError("The experimental temperature for the alignment ID '%s' has not been set." % align_id) 963 964 # Get the spectrometer frequency in Tesla units for the PCS constant. 965 if align_id in cdp.frq: 966 frq.append(cdp.frq[align_id] * 2.0 * pi / g1H) 967 968 # The frequency must be given! 969 else: 970 raise RelaxError("The spectrometer frequency for the alignment ID '%s' has not been set." % align_id) 971 972 # Spin loop. 973 j = 0 974 for spin in spin_loop(): 975 # Skip deselected spins. 976 if not spin.select: 977 continue 978 979 # Skip spins without PCS data. 980 if not hasattr(spin, 'pcs'): 981 continue 982 983 # Append the PCSs to the list. 984 if align_id in spin.pcs.keys(): 985 if sim_index != None: 986 pcs[-1].append(spin.pcs_sim[align_id][sim_index]) 987 else: 988 pcs[-1].append(spin.pcs[align_id]) 989 else: 990 pcs[-1].append(None) 991 992 # Append the PCS errors. 993 if hasattr(spin, 'pcs_err') and align_id in spin.pcs_err.keys(): 994 pcs_err[-1].append(spin.pcs_err[align_id]) 995 else: 996 pcs_err[-1].append(None) 997 998 # Append the weight. 999 if hasattr(spin, 'pcs_weight') and align_id in spin.pcs_weight.keys(): 1000 pcs_weight[-1].append(spin.pcs_weight[align_id]) 1001 else: 1002 pcs_weight[-1].append(1.0) 1003 1004 # Spin index. 1005 j = j + 1 1006 1007 # Convert to numpy objects. 1008 pcs = array(pcs, float64) 1009 pcs_err = array(pcs_err, float64) 1010 pcs_weight = array(pcs_weight, float64) 1011 1012 # Convert the PCS from ppm to no units. 1013 pcs = pcs * 1e-6 1014 pcs_err = pcs_err * 1e-6 1015 1016 # Return the data structures. 1017 return pcs, pcs_err, pcs_weight, temp, frq
1018 1019
1020 - def _minimise_setup_rdcs(self, sim_index=None):
1021 """Set up the data structures for optimisation using RDCs as base data sets. 1022 1023 @keyword sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired. 1024 @type sim_index: None or int 1025 @return: The assembled data structures for using RDCs as the base data for optimisation. These include: 1026 - rdc, the RDC values. 1027 - rdc_err, the RDC errors. 1028 - rdc_weight, the RDC weights. 1029 - vectors, the interatomic vectors. 1030 - rdc_const, the dipolar constants. 1031 - absolute, the absolute value flags (as 1's and 0's). 1032 @rtype: tuple of (numpy rank-2 array, numpy rank-2 array, numpy rank-2 array, numpy rank-3 array, numpy rank-2 array, numpy rank-2 array) 1033 """ 1034 1035 # Initialise. 1036 rdc = [] 1037 rdc_err = [] 1038 rdc_weight = [] 1039 unit_vect = [] 1040 rdc_const = [] 1041 absolute = [] 1042 1043 # The unit vectors and RDC constants. 1044 for interatom in interatomic_loop(): 1045 # Get the spins. 1046 spin1 = return_spin(interatom.spin_id1) 1047 spin2 = return_spin(interatom.spin_id2) 1048 1049 # RDC checks. 1050 if not self._check_rdcs(interatom, spin1, spin2): 1051 continue 1052 1053 # Add the vectors. 1054 if arg_check.is_float(interatom.vector[0], raise_error=False): 1055 unit_vect.append([interatom.vector]) 1056 else: 1057 unit_vect.append(interatom.vector) 1058 1059 # Gyromagnetic ratios. 1060 g1 = return_gyromagnetic_ratio(spin1.isotope) 1061 g2 = return_gyromagnetic_ratio(spin2.isotope) 1062 1063 # Calculate the RDC dipolar constant (in Hertz, and the 3 comes from the alignment tensor), and append it to the list. 1064 rdc_const.append(3.0/(2.0*pi) * dipolar_constant(g1, g2, interatom.r)) 1065 1066 # Fix the unit vector data structure. 1067 num = None 1068 for rdc_index in range(len(unit_vect)): 1069 # Number of vectors. 1070 if num == None: 1071 if unit_vect[rdc_index] != None: 1072 num = len(unit_vect[rdc_index]) 1073 continue 1074 1075 # Check. 1076 if unit_vect[rdc_index] != None and len(unit_vect[rdc_index]) != num: 1077 raise RelaxError("The number of interatomic vectors for all no match:\n%s" % unit_vect) 1078 1079 # Missing unit vectors. 1080 if num == None: 1081 raise RelaxError("No interatomic vectors could be found.") 1082 1083 # Update None entries. 1084 for i in range(len(unit_vect)): 1085 if unit_vect[i] == None: 1086 unit_vect[i] = [[None, None, None]]*num 1087 1088 # The RDC data. 1089 for i in range(len(cdp.align_ids)): 1090 # Alias the ID. 1091 align_id = cdp.align_ids[i] 1092 1093 # Skip non-optimised data. 1094 if not self._opt_uses_align_data(align_id): 1095 continue 1096 1097 # Append empty arrays to the RDC structures. 1098 rdc.append([]) 1099 rdc_err.append([]) 1100 rdc_weight.append([]) 1101 absolute.append([]) 1102 1103 # Interatom loop. 1104 for interatom in interatomic_loop(): 1105 # Get the spins. 1106 spin1 = return_spin(interatom.spin_id1) 1107 spin2 = return_spin(interatom.spin_id2) 1108 1109 # Skip deselected spins. 1110 if not spin1.select or not spin2.select: 1111 continue 1112 1113 # Only use interatomic data containers with RDC and vector data. 1114 if not hasattr(interatom, 'rdc') or not hasattr(interatom, 'vector'): 1115 continue 1116 1117 # Defaults of None. 1118 value = None 1119 error = None 1120 1121 # Pseudo-atom set up. 1122 if (hasattr(spin1, 'members') or hasattr(spin2, 'members')) and align_id in interatom.rdc.keys(): 1123 # Skip non-Me groups. 1124 if (hasattr(spin1, 'members') and len(spin1.members) != 3) or (hasattr(spin2, 'members') and len(spin2.members) != 3): 1125 continue 1126 1127 # The RDC for the Me-pseudo spin where: 1128 # <D> = -1/3 Dpar. 1129 # See Verdier, et al., JMR, 2003, 163, 353-359. 1130 if sim_index != None: 1131 value = -3.0 * interatom.rdc_sim[align_id][sim_index] 1132 else: 1133 value = -3.0 * interatom.rdc[align_id] 1134 1135 # The error. 1136 if hasattr(interatom, 'rdc_err') and align_id in interatom.rdc_err.keys(): 1137 error = -3.0 * interatom.rdc_err[align_id] 1138 1139 # Normal set up. 1140 elif align_id in interatom.rdc.keys(): 1141 # The RDC. 1142 if sim_index != None: 1143 value = interatom.rdc_sim[align_id][sim_index] 1144 else: 1145 value = interatom.rdc[align_id] 1146 1147 # The error. 1148 if hasattr(interatom, 'rdc_err') and align_id in interatom.rdc_err.keys(): 1149 error = interatom.rdc_err[align_id] 1150 1151 # Append the RDCs to the list. 1152 rdc[-1].append(value) 1153 1154 # Append the RDC errors. 1155 rdc_err[-1].append(error) 1156 1157 # Append the weight. 1158 if hasattr(interatom, 'rdc_weight') and align_id in interatom.rdc_weight.keys(): 1159 rdc_weight[-1].append(interatom.rdc_weight[align_id]) 1160 else: 1161 rdc_weight[-1].append(1.0) 1162 1163 # Append the absolute value flag. 1164 if hasattr(interatom, 'absolute_rdc') and align_id in interatom.absolute_rdc.keys(): 1165 absolute[-1].append(interatom.absolute_rdc[align_id]) 1166 else: 1167 absolute[-1].append(False) 1168 1169 # Convert to numpy objects. 1170 rdc = array(rdc, float64) 1171 rdc_err = array(rdc_err, float64) 1172 rdc_weight = array(rdc_weight, float64) 1173 unit_vect = array(unit_vect, float64) 1174 rdc_const = array(rdc_const, float64) 1175 absolute = array(absolute, float64) 1176 1177 # Return the data structures. 1178 return rdc, rdc_err, rdc_weight, unit_vect, rdc_const, absolute
1179 1180
1181 - def _minimise_setup_tensors(self, sim_index=None):
1182 """Set up the data structures for optimisation using alignment tensors as base data sets. 1183 1184 @keyword sim_index: The index of the simulation to optimise. This should be None if 1185 normal optimisation is desired. 1186 @type sim_index: None or int 1187 @return: The assembled data structures for using alignment tensors as the base 1188 data for optimisation. These include: 1189 - full_tensors, the data of the full alignment tensors. 1190 - red_tensor_elem, the tensors as concatenated rank-1 5D arrays. 1191 - red_tensor_err, the tensor errors as concatenated rank-1 5D 1192 arrays. 1193 - full_in_ref_frame, flags specifying if the tensor in the reference 1194 frame is the full or reduced tensor. 1195 @rtype: tuple of (list, numpy rank-1 array, numpy rank-1 array, numpy rank-1 1196 array) 1197 """ 1198 1199 # Initialise. 1200 n = len(cdp.align_tensors.reduction) 1201 full_tensors = zeros(n*5, float64) 1202 red_tensors = zeros(n*5, float64) 1203 red_err = ones(n*5, float64) * 1e-5 1204 full_in_ref_frame = zeros(n, float64) 1205 1206 # Loop over the full tensors. 1207 for i, tensor in self._tensor_loop(red=False): 1208 # The full tensor. 1209 full_tensors[5*i + 0] = tensor.Axx 1210 full_tensors[5*i + 1] = tensor.Ayy 1211 full_tensors[5*i + 2] = tensor.Axy 1212 full_tensors[5*i + 3] = tensor.Axz 1213 full_tensors[5*i + 4] = tensor.Ayz 1214 1215 # The full tensor corresponds to the frame of reference. 1216 if cdp.ref_domain == tensor.domain: 1217 full_in_ref_frame[i] = 1 1218 1219 # Loop over the reduced tensors. 1220 for i, tensor in self._tensor_loop(red=True): 1221 # The reduced tensor (simulation data). 1222 if sim_index != None: 1223 red_tensors[5*i + 0] = tensor.Axx_sim[sim_index] 1224 red_tensors[5*i + 1] = tensor.Ayy_sim[sim_index] 1225 red_tensors[5*i + 2] = tensor.Axy_sim[sim_index] 1226 red_tensors[5*i + 3] = tensor.Axz_sim[sim_index] 1227 red_tensors[5*i + 4] = tensor.Ayz_sim[sim_index] 1228 1229 # The reduced tensor. 1230 else: 1231 red_tensors[5*i + 0] = tensor.Axx 1232 red_tensors[5*i + 1] = tensor.Ayy 1233 red_tensors[5*i + 2] = tensor.Axy 1234 red_tensors[5*i + 3] = tensor.Axz 1235 red_tensors[5*i + 4] = tensor.Ayz 1236 1237 # The reduced tensor errors. 1238 if hasattr(tensor, 'Axx_err'): 1239 red_err[5*i + 0] = tensor.Axx_err 1240 red_err[5*i + 1] = tensor.Ayy_err 1241 red_err[5*i + 2] = tensor.Axy_err 1242 red_err[5*i + 3] = tensor.Axz_err 1243 red_err[5*i + 4] = tensor.Ayz_err 1244 1245 # Return the data structures. 1246 return full_tensors, red_tensors, red_err, full_in_ref_frame
1247 1248
1250 """Set up the data structures for the fixed alignment tensors. 1251 1252 @return: The assembled data structures for the fixed alignment tensors. 1253 @rtype: numpy rank-1 array. 1254 """ 1255 1256 # Initialise. 1257 n = align_tensor.num_tensors(skip_fixed=False) - align_tensor.num_tensors(skip_fixed=True) 1258 tensors = zeros(n*5, float64) 1259 1260 # Nothing to do. 1261 if n == 0: 1262 return None 1263 1264 # Loop over the tensors. 1265 index = 0 1266 for i in range(len(cdp.align_tensors)): 1267 # Skip non-optimised data. 1268 if not self._opt_uses_align_data(cdp.align_tensors[i].name): 1269 continue 1270 1271 # The real tensors. 1272 tensors[5*index + 0] = cdp.align_tensors[i].Axx 1273 tensors[5*index + 1] = cdp.align_tensors[i].Ayy 1274 tensors[5*index + 2] = cdp.align_tensors[i].Axy 1275 tensors[5*index + 3] = cdp.align_tensors[i].Axz 1276 tensors[5*index + 4] = cdp.align_tensors[i].Ayz 1277 1278 # Increment the index. 1279 index += 1 1280 1281 # Return the data structure. 1282 return tensors
1283 1284
1285 - def _num_data_points(self):
1286 """Determine the number of data points used in the model. 1287 1288 @return: The number, n, of data points in the model. 1289 @rtype: int 1290 """ 1291 1292 # Determine the data type. 1293 data_types = self._base_data_types() 1294 1295 # Init. 1296 n = 0 1297 1298 # Spin loop. 1299 for spin in spin_loop(): 1300 # Skip deselected spins. 1301 if not spin.select: 1302 continue 1303 1304 # PCS data (skipping array elements set to None). 1305 if 'pcs' in data_types: 1306 if hasattr(spin, 'pcs'): 1307 for pcs in spin.pcs: 1308 if isinstance(pcs, float): 1309 n = n + 1 1310 1311 # Interatomic data loop. 1312 for interatom in interatomic_loop(): 1313 # RDC data (skipping array elements set to None). 1314 if 'rdc' in data_types: 1315 if hasattr(interatom, 'rdc'): 1316 for rdc in interatom.rdc: 1317 if isinstance(rdc, float): 1318 n = n + 1 1319 1320 # Alignment tensors. 1321 if 'tensor' in data_types: 1322 n = n + 5*len(cdp.align_tensors) 1323 1324 # Return the value. 1325 return n
1326 1327
1328 - def _number_of_states(self, N=None):
1329 """Set the number of states in the N-state model. 1330 1331 @param N: The number of states. 1332 @type N: int 1333 """ 1334 1335 # Test if the current data pipe exists. 1336 pipes.test() 1337 1338 # Set the value of N. 1339 cdp.N = N 1340 1341 # Update the model, if set. 1342 if hasattr(cdp, 'model'): 1343 self._update_model()
1344 1345
1346 - def _opt_tensor(self, tensor):
1347 """Determine if the given tensor is to be optimised. 1348 1349 @param tensor: The alignment tensor to check. 1350 @type tensor: AlignmentTensor object. 1351 @return: True if the tensor is to be optimised, False otherwise. 1352 @rtype: bool 1353 """ 1354 1355 # Combine all RDC and PCS IDs. 1356 ids = [] 1357 if hasattr(cdp, 'rdc_ids'): 1358 ids += cdp.rdc_ids 1359 if hasattr(cdp, 'pcs_ids'): 1360 ids += cdp.pcs_ids 1361 1362 # No RDC or PCS data for the alignment, so skip the tensor as it will not be optimised. 1363 if tensor.name not in ids: 1364 return False 1365 1366 # Fixed tensor. 1367 if tensor.fixed: 1368 return False 1369 1370 # The tensor is to be optimised. 1371 return True
1372 1373
1374 - def _opt_uses_align_data(self, align_id=None):
1375 """Determine if the PCS or RDC data for the given alignment ID is needed for optimisation. 1376 1377 @keyword align_id: The optional alignment ID string. 1378 @type align_id: str 1379 @return: True if alignment data is to be used for optimisation, False otherwise. 1380 @rtype: bool 1381 """ 1382 1383 # No alignment IDs. 1384 if not hasattr(cdp, 'align_ids'): 1385 return False 1386 1387 # Convert the align IDs to an array, or take all IDs. 1388 if align_id: 1389 align_ids = [align_id] 1390 else: 1391 align_ids = cdp.align_ids 1392 1393 # Check the PCS and RDC. 1394 for align_id in align_ids: 1395 if self._opt_uses_pcs(align_id) or self._opt_uses_rdc(align_id): 1396 return True 1397 1398 # No alignment data is used for optimisation. 1399 return False
1400 1401
1402 - def _opt_uses_pcs(self, align_id):
1403 """Determine if the PCS data for the given alignment ID is needed for optimisation. 1404 1405 @param align_id: The alignment ID string. 1406 @type align_id: str 1407 @return: True if the PCS data is to be used for optimisation, False otherwise. 1408 @rtype: bool 1409 """ 1410 1411 # No alignment IDs. 1412 if not hasattr(cdp, 'pcs_ids'): 1413 return False 1414 1415 # No PCS data for the alignment. 1416 if align_id not in cdp.pcs_ids: 1417 return False 1418 1419 # Is the tensor optimised? 1420 tensor_flag = self._opt_tensor(align_tensor.get_tensor_object(align_id)) 1421 1422 # Is the paramagnetic position optimised? 1423 pos_flag = False 1424 if hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed: 1425 pos_flag = True 1426 1427 # Are the populations optimised? 1428 prob_flag = False 1429 if cdp.model == 'population': 1430 prob_flag = True 1431 1432 # Not used. 1433 if not tensor_flag and not pos_flag and not prob_flag: 1434 return False 1435 1436 # The PCS data is to be used for optimisation. 1437 return True
1438 1439
1440 - def _opt_uses_rdc(self, align_id):
1441 """Determine if the RDC data for the given alignment ID is needed for optimisation. 1442 1443 @param align_id: The alignment ID string. 1444 @type align_id: str 1445 @return: True if the RDC data is to be used for optimisation, False otherwise. 1446 @rtype: bool 1447 """ 1448 1449 # No alignment IDs. 1450 if not hasattr(cdp, 'rdc_ids'): 1451 return False 1452 1453 # No RDC data for the alignment. 1454 if align_id not in cdp.rdc_ids: 1455 return False 1456 1457 # Is the tensor optimised? 1458 tensor_flag = self._opt_tensor(align_tensor.get_tensor_object(align_id)) 1459 1460 # Is the paramagnetic position optimised? 1461 pos_flag = False 1462 if hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed: 1463 pos_flag = True 1464 1465 # Are the populations optimised? 1466 prob_flag = False 1467 if cdp.model == 'population': 1468 prob_flag = True 1469 1470 # Not used. 1471 if not tensor_flag and not pos_flag and not prob_flag: 1472 return False 1473 1474 # The RDC data is to be used for optimisation. 1475 return True
1476 1477
1478 - def _param_model_index(self, param):
1479 """Return the N-state model index for the given parameter. 1480 1481 @param param: The N-state model parameter. 1482 @type param: str 1483 @return: The N-state model index. 1484 @rtype: str 1485 """ 1486 1487 # Probability. 1488 if search('^p[0-9]*$', param): 1489 return int(param[1:]) 1490 1491 # Alpha Euler angle. 1492 if search('^alpha', param): 1493 return int(param[5:]) 1494 1495 # Beta Euler angle. 1496 if search('^beta', param): 1497 return int(param[4:]) 1498 1499 # Gamma Euler angle. 1500 if search('^gamma', param): 1501 return int(param[5:]) 1502 1503 # Model independent parameter. 1504 return None
1505 1506
1507 - def _param_num(self):
1508 """Determine the number of parameters in the model. 1509 1510 @return: The number of model parameters. 1511 @rtype: int 1512 """ 1513 1514 # Determine the data type. 1515 data_types = self._base_data_types() 1516 1517 # Init. 1518 num = 0 1519 1520 # Alignment tensor params. 1521 if ('rdc' in data_types or 'pcs' in data_types) and not align_tensor.all_tensors_fixed(): 1522 # Loop over the alignments. 1523 for i in range(len(cdp.align_tensors)): 1524 # Skip non-optimised tensors. 1525 if not self._opt_tensor(cdp.align_tensors[i]): 1526 continue 1527 1528 # Add 5 tensor parameters. 1529 num += 5 1530 1531 # Populations. 1532 if cdp.model in ['2-domain', 'population']: 1533 num = num + (cdp.N - 1) 1534 1535 # Euler angles. 1536 if cdp.model == '2-domain': 1537 num = num + 3*cdp.N 1538 1539 # The paramagnetic centre. 1540 if hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed: 1541 num = num + 3 1542 1543 # Return the param number. 1544 return num
1545 1546
1547 - def _ref_domain(self, ref=None):
1548 """Set the reference domain for the '2-domain' N-state model. 1549 1550 @param ref: The reference domain. 1551 @type ref: str 1552 """ 1553 1554 # Test if the current data pipe exists. 1555 pipes.test() 1556 1557 # Test if the model is setup. 1558 if not hasattr(cdp, 'model'): 1559 raise RelaxNoModelError('N-state') 1560 1561 # Test that the correct model is set. 1562 if cdp.model != '2-domain': 1563 raise RelaxError("Setting the reference domain is only possible for the '2-domain' N-state model.") 1564 1565 # Test if the reference domain exists. 1566 exists = False 1567 for tensor_cont in cdp.align_tensors: 1568 if tensor_cont.domain == ref: 1569 exists = True 1570 if not exists: 1571 raise RelaxError("The reference domain cannot be found within any of the loaded tensors.") 1572 1573 # Set the reference domain. 1574 cdp.ref_domain = ref 1575 1576 # Update the model. 1577 self._update_model()
1578 1579
1580 - def _select_model(self, model=None):
1581 """Select the N-state model type. 1582 1583 @param model: The N-state model type. Can be one of '2-domain', 'population', or 'fixed'. 1584 @type model: str 1585 """ 1586 1587 # Test if the current data pipe exists. 1588 pipes.test() 1589 1590 # Test if the model name exists. 1591 if not model in ['2-domain', 'population', 'fixed']: 1592 raise RelaxError("The model name " + repr(model) + " is invalid.") 1593 1594 # Test if the model is setup. 1595 if hasattr(cdp, 'model'): 1596 warn(RelaxWarning("The N-state model has already been set up. Switching from model '%s' to '%s'." % (cdp.model, model))) 1597 1598 # Set the model 1599 cdp.model = model 1600 1601 # Initialise the list of model parameters. 1602 cdp.params = [] 1603 1604 # Update the model. 1605 self._update_model()
1606 1607
1608 - def _target_fn_setup(self, sim_index=None, scaling=True):
1609 """Initialise the target function for optimisation or direct calculation. 1610 1611 @param sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired. 1612 @type sim_index: None or int 1613 @param scaling: If True, diagonal scaling is enabled during optimisation to allow the problem to be better conditioned. 1614 @type scaling: bool 1615 """ 1616 1617 # Test if the N-state model has been set up. 1618 if not hasattr(cdp, 'model'): 1619 raise RelaxNoModelError('N-state') 1620 1621 # '2-domain' model setup tests. 1622 if cdp.model == '2-domain': 1623 # The number of states. 1624 if not hasattr(cdp, 'N'): 1625 raise RelaxError("The number of states has not been set.") 1626 1627 # The reference domain. 1628 if not hasattr(cdp, 'ref_domain'): 1629 raise RelaxError("The reference domain has not been set.") 1630 1631 # Update the model parameters if necessary. 1632 self._update_model() 1633 1634 # Create the initial parameter vector. 1635 param_vector = self._assemble_param_vector(sim_index=sim_index) 1636 1637 # Determine if alignment tensors or RDCs are to be used. 1638 data_types = self._base_data_types() 1639 1640 # The probabilities. 1641 probs = None 1642 if hasattr(cdp, 'probs') and len(cdp.probs) and cdp.probs[0] != None: 1643 probs = cdp.probs 1644 1645 # Diagonal scaling. 1646 scaling_matrix = None 1647 if len(param_vector): 1648 scaling_matrix = self._assemble_scaling_matrix(data_types=data_types, scaling=scaling) 1649 param_vector = dot(inv(scaling_matrix), param_vector) 1650 1651 # Get the data structures for optimisation using the tensors as base data sets. 1652 full_tensors, red_tensor_elem, red_tensor_err, full_in_ref_frame = None, None, None, None 1653 if 'tensor' in data_types: 1654 full_tensors, red_tensor_elem, red_tensor_err, full_in_ref_frame = self._minimise_setup_tensors(sim_index=sim_index) 1655 1656 # Get the data structures for optimisation using PCSs as base data sets. 1657 pcs, pcs_err, pcs_weight, temp, frq = None, None, None, None, None 1658 if 'pcs' in data_types: 1659 pcs, pcs_err, pcs_weight, temp, frq = self._minimise_setup_pcs(sim_index=sim_index) 1660 1661 # Get the data structures for optimisation using RDCs as base data sets. 1662 rdcs, rdc_err, rdc_weight, rdc_vector, rdc_dj, absolute_rdc = None, None, None, None, None, None 1663 if 'rdc' in data_types: 1664 rdcs, rdc_err, rdc_weight, rdc_vector, rdc_dj, absolute_rdc = self._minimise_setup_rdcs(sim_index=sim_index) 1665 1666 # Get the fixed tensors. 1667 fixed_tensors = None 1668 if 'rdc' in data_types or 'pcs' in data_types: 1669 full_tensors = self._minimise_setup_fixed_tensors() 1670 1671 # The flag list. 1672 fixed_tensors = [] 1673 for i in range(len(cdp.align_tensors)): 1674 # Skip non-optimised data. 1675 if not self._opt_uses_align_data(cdp.align_tensors[i].name): 1676 continue 1677 1678 if cdp.align_tensors[i].fixed: 1679 fixed_tensors.append(True) 1680 else: 1681 fixed_tensors.append(False) 1682 1683 # Get the atomic_positions. 1684 atomic_pos, paramag_centre, centre_fixed = None, None, True 1685 if 'pcs' in data_types or 'pre' in data_types: 1686 atomic_pos, paramag_centre = self._minimise_setup_atomic_pos(sim_index=sim_index) 1687 1688 # Optimisation of the centre. 1689 if hasattr(cdp, 'paramag_centre_fixed'): 1690 centre_fixed = cdp.paramag_centre_fixed 1691 1692 # Set up the class instance containing the target function. 1693 model = N_state_opt(model=cdp.model, N=cdp.N, init_params=param_vector, probs=probs, full_tensors=full_tensors, red_data=red_tensor_elem, red_errors=red_tensor_err, full_in_ref_frame=full_in_ref_frame, fixed_tensors=fixed_tensors, pcs=pcs, rdcs=rdcs, pcs_errors=pcs_err, rdc_errors=rdc_err, pcs_weights=pcs_weight, rdc_weights=rdc_weight, rdc_vect=rdc_vector, temp=temp, frq=frq, dip_const=rdc_dj, absolute_rdc=absolute_rdc, atomic_pos=atomic_pos, paramag_centre=paramag_centre, scaling_matrix=scaling_matrix, centre_fixed=centre_fixed) 1694 1695 # Return the data. 1696 return model, param_vector, data_types, scaling_matrix
1697 1698
1699 - def _tensor_loop(self, red=False):
1700 """Generator method for looping over the full or reduced tensors. 1701 1702 @keyword red: A flag which if True causes the reduced tensors to be returned, and if False 1703 the full tensors are returned. 1704 @type red: bool 1705 @return: The tensor index and the tensor. 1706 @rtype: (int, AlignTensorData instance) 1707 """ 1708 1709 # Number of tensor pairs. 1710 n = len(cdp.align_tensors.reduction) 1711 1712 # Alias. 1713 data = cdp.align_tensors 1714 list = data.reduction 1715 1716 # Full or reduced index. 1717 if red: 1718 index = 1 1719 else: 1720 index = 0 1721 1722 # Loop over the reduction list. 1723 for i in range(n): 1724 yield i, data[list[i][index]]
1725 1726
1727 - def _update_model(self):
1728 """Update the model parameters as necessary.""" 1729 1730 # Initialise the list of model parameters. 1731 if not hasattr(cdp, 'params'): 1732 cdp.params = [] 1733 1734 # Determine the number of states (loaded as structural models), if not already set. 1735 if not hasattr(cdp, 'N'): 1736 # Set the number. 1737 if hasattr(cdp, 'structure'): 1738 cdp.N = cdp.structure.num_models() 1739 1740 # Otherwise return as the rest cannot be updated without N. 1741 else: 1742 return 1743 1744 # Set up the parameter arrays. 1745 if not cdp.params: 1746 # Add the probability or population weight parameters. 1747 if cdp.model in ['2-domain', 'population']: 1748 for i in range(cdp.N-1): 1749 cdp.params.append('p' + repr(i)) 1750 1751 # Add the Euler angle parameters. 1752 if cdp.model == '2-domain': 1753 for i in range(cdp.N): 1754 cdp.params.append('alpha' + repr(i)) 1755 cdp.params.append('beta' + repr(i)) 1756 cdp.params.append('gamma' + repr(i)) 1757 1758 # Initialise the probability and Euler angle arrays. 1759 if cdp.model in ['2-domain', 'population']: 1760 if not hasattr(cdp, 'probs'): 1761 cdp.probs = [None] * cdp.N 1762 if cdp.model == '2-domain': 1763 if not hasattr(cdp, 'alpha'): 1764 cdp.alpha = [None] * cdp.N 1765 if not hasattr(cdp, 'beta'): 1766 cdp.beta = [None] * cdp.N 1767 if not hasattr(cdp, 'gamma'): 1768 cdp.gamma = [None] * cdp.N 1769 1770 # Determine the data type. 1771 data_types = self._base_data_types() 1772 1773 # Set up tensors for each alignment. 1774 if hasattr(cdp, 'align_ids'): 1775 for id in cdp.align_ids: 1776 # No tensors initialised. 1777 if not hasattr(cdp, 'align_tensors'): 1778 align_tensor.init(tensor=id, params=[0.0, 0.0, 0.0, 0.0, 0.0]) 1779 1780 # Find if the tensor corresponding to the id exists. 1781 exists = False 1782 for tensor in cdp.align_tensors: 1783 if id == tensor.name: 1784 exists = True 1785 1786 # Initialise the tensor. 1787 if not exists: 1788 align_tensor.init(tensor=id, params=[0.0, 0.0, 0.0, 0.0, 0.0])
1789 1790
1791 - def base_data_loop(self):
1792 """Loop over the base data of the spins - RDCs, PCSs, and NOESY data. 1793 1794 This loop iterates for each data point (RDC, PCS, NOESY) for each spin or interatomic data container, returning the identification information. 1795 1796 @return: A list of the spin or interatomic data container, the data type ('rdc', 'pcs', 'noesy'), and the alignment ID if required. 1797 @rtype: list of [SpinContainer instance, str, str] or [InteratomContainer instance, str, str] 1798 """ 1799 1800 # Loop over the interatomic data containers. 1801 for interatom in interatomic_loop(): 1802 # Skip deselected data. 1803 if not interatom.select: 1804 continue 1805 1806 # Re-initialise the data structure. 1807 data = [interatom, None, None] 1808 1809 # RDC data. 1810 if hasattr(interatom, 'rdc'): 1811 data[1] = 'rdc' 1812 1813 # Loop over the alignment IDs. 1814 for id in cdp.rdc_ids: 1815 # Add the ID. 1816 data[2] = id 1817 1818 # Yield the set. 1819 yield data 1820 1821 # NOESY data. 1822 if hasattr(interatom, 'noesy'): 1823 data[1] = 'noesy' 1824 1825 # Loop over the alignment IDs. 1826 for id in cdp.noesy_ids: 1827 # Add the ID. 1828 data[2] = id 1829 1830 # Yield the set. 1831 yield data 1832 1833 # Loop over the spins. 1834 for spin in spin_loop(): 1835 # Skip deselected data. 1836 if not spin.select: 1837 continue 1838 1839 # Re-initialise the data structure. 1840 data = [spin, None, None] 1841 1842 # PCS data. 1843 if hasattr(spin, 'pcs'): 1844 data[1] = 'pcs' 1845 1846 # Loop over the alignment IDs. 1847 for id in cdp.pcs_ids: 1848 # Add the ID. 1849 data[2] = id 1850 1851 # Yield the set. 1852 yield data
1853 1854
1855 - def calculate(self, spin_id=None, verbosity=1, sim_index=None):
1856 """Calculation function. 1857 1858 Currently this function simply calculates the NOESY flat-bottom quadratic energy potential, 1859 if NOE restraints are available. 1860 1861 @keyword spin_id: The spin identification string (unused). 1862 @type spin_id: None or str 1863 @keyword verbosity: The amount of information to print. The higher the value, the greater the verbosity. 1864 @type verbosity: int 1865 @keyword sim_index: The MC simulation index (unused). 1866 @type sim_index: None 1867 """ 1868 1869 # Set up the target function for direct calculation. 1870 model, param_vector, data_types, scaling_matrix = self._target_fn_setup() 1871 1872 # Calculate the chi-squared value. 1873 if model: 1874 # Make a function call. 1875 chi2 = model.func(param_vector) 1876 1877 # Store the global chi-squared value. 1878 cdp.chi2 = chi2 1879 1880 # Store the back-calculated data. 1881 self._minimise_bc_data(model) 1882 1883 # Calculate the RDC Q-factors. 1884 if 'rdc' in data_types: 1885 rdc.q_factors() 1886 1887 # Calculate the PCS Q-factors. 1888 if 'pcs' in data_types: 1889 pcs.q_factors() 1890 1891 # NOE potential. 1892 if hasattr(cdp, 'noe_restraints'): 1893 # Init some numpy arrays. 1894 num_restraints = len(cdp.noe_restraints) 1895 dist = zeros(num_restraints, float64) 1896 pot = zeros(num_restraints, float64) 1897 lower = zeros(num_restraints, float64) 1898 upper = zeros(num_restraints, float64) 1899 1900 # Loop over the NOEs. 1901 for i in range(num_restraints): 1902 # Create arrays of the NOEs. 1903 lower[i] = cdp.noe_restraints[i][2] 1904 upper[i] = cdp.noe_restraints[i][3] 1905 1906 # Calculate the average distances, using -6 power averaging. 1907 dist[i] = self._calc_ave_dist(cdp.noe_restraints[i][0], cdp.noe_restraints[i][1], exp=-6) 1908 1909 # Calculate the quadratic potential. 1910 quad_pot(dist, pot, lower, upper) 1911 1912 # Store the distance and potential information. 1913 cdp.ave_dist = [] 1914 cdp.quad_pot = [] 1915 for i in range(num_restraints): 1916 cdp.ave_dist.append([cdp.noe_restraints[i][0], cdp.noe_restraints[i][1], dist[i]]) 1917 cdp.quad_pot.append([cdp.noe_restraints[i][0], cdp.noe_restraints[i][1], pot[i]])
1918 1919
1920 - def create_mc_data(self, data_id=None):
1921 """Create the Monte Carlo data by back-calculation. 1922 1923 @keyword data_id: The list of spin ID, data type, and alignment ID, as yielded by the base_data_loop() generator method. 1924 @type data_id: str 1925 @return: The Monte Carlo Ri data. 1926 @rtype: list of floats 1927 """ 1928 1929 # Initialise the MC data structure. 1930 mc_data = [] 1931 1932 # Alias the spin or interatomic data container. 1933 container = data_id[0] 1934 1935 # RDC data. 1936 if data_id[1] == 'rdc' and hasattr(container, 'rdc'): 1937 # Does back-calculated data exist? 1938 if not hasattr(container, 'rdc_bc'): 1939 self.calculate() 1940 1941 # The data. 1942 if not hasattr(container, 'rdc_bc') or not data_id[2] in container.rdc_bc: 1943 data = None 1944 else: 1945 data = container.rdc_bc[data_id[2]] 1946 1947 # Append the data. 1948 mc_data.append(data) 1949 1950 # NOESY data. 1951 elif data_id[1] == 'noesy' and hasattr(container, 'noesy'): 1952 # Does back-calculated data exist? 1953 if not hasattr(container, 'noesy_bc'): 1954 self.calculate() 1955 1956 # Append the data. 1957 mc_data.append(container.noesy_bc) 1958 1959 # PCS data. 1960 elif data_id[1] == 'pcs' and hasattr(container, 'pcs'): 1961 # Does back-calculated data exist? 1962 if not hasattr(container, 'pcs_bc'): 1963 self.calculate() 1964 1965 # The data. 1966 if not hasattr(container, 'pcs_bc') or not data_id[2] in container.pcs_bc: 1967 data = None 1968 else: 1969 data = container.pcs_bc[data_id[2]] 1970 1971 # Append the data. 1972 mc_data.append(data) 1973 1974 # Return the data. 1975 return mc_data
1976 1977 1978 default_value_doc = Desc_container("N-state model default values") 1979 _table = uf_tables.add_table(label="table: N-state default values", caption="N-state model default values.") 1980 _table.add_headings(["Data type", "Object name", "Value"]) 1981 _table.add_row(["Probabilities", "'p0', 'p1', 'p2', ..., 'pN'", "1/N"]) 1982 _table.add_row(["Euler angle alpha", "'alpha0', 'alpha1', ...", "(c+1) * pi / (N+1)"]) 1983 _table.add_row(["Euler angle beta", "'beta0', 'beta1', ...", "(c+1) * pi / (N+1)"]) 1984 _table.add_row(["Euler angle gamma", "'gamma0', 'gamma1', ...", "(c+1) * pi / (N+1)"]) 1985 default_value_doc.add_table(_table.label) 1986 default_value_doc.add_paragraph("In this table, N is the total number of states and c is the index of a given state ranging from 0 to N-1. The default probabilities are all set to be equal whereas the angles are given a range of values so that no 2 states are equal at the start of optimisation.") 1987 default_value_doc.add_paragraph("Note that setting the probability for state N will do nothing as it is equal to one minus all the other probabilities.") 1988
1989 - def default_value(self, param):
1990 """The default N-state model parameter values. 1991 1992 @param param: The N-state model parameter. 1993 @type param: str 1994 @return: The default value. 1995 @rtype: float 1996 """ 1997 1998 # Split the parameter into its base name and index. 1999 name = self.return_data_name(param) 2000 index = self._param_model_index(param) 2001 2002 # The number of states as a float. 2003 N = float(cdp.N) 2004 2005 # Probability. 2006 if name == 'probs': 2007 return 1.0 / N 2008 2009 # Euler angles. 2010 elif name == 'alpha' or name == 'beta' or name == 'gamma': 2011 return (float(index)+1) * pi / (N+1.0)
2012 2013
2014 - def grid_search(self, lower=None, upper=None, inc=None, constraints=False, verbosity=0, sim_index=None):
2015 """The grid search function. 2016 2017 @param lower: The lower bounds of the grid search which must be equal to the number of parameters in the model. 2018 @type lower: array of numbers 2019 @param upper: The upper bounds of the grid search which must be equal to the number of parameters in the model. 2020 @type upper: array of numbers 2021 @param inc: The increments for each dimension of the space for the grid search. The number of elements in the array must equal to the number of parameters in the model. 2022 @type inc: array of int 2023 @param constraints: If True, constraints are applied during the grid search (elinating parts of the grid). If False, no constraints are used. 2024 @type constraints: bool 2025 @param verbosity: A flag specifying the amount of information to print. The higher the value, the greater the verbosity. 2026 @type verbosity: int 2027 """ 2028 2029 # Test if the N-state model has been set up. 2030 if not hasattr(cdp, 'model'): 2031 raise RelaxNoModelError('N-state') 2032 2033 # The number of parameters. 2034 n = self._param_num() 2035 2036 # Make sure that the length of the parameter array is > 0. 2037 if n == 0: 2038 print("Cannot run a grid search on a model with zero parameters, skipping the grid search.") 2039 return 2040 2041 # Test the grid search options. 2042 self.test_grid_ops(lower=lower, upper=upper, inc=inc, n=n) 2043 2044 # If inc is a single int, convert it into an array of that value. 2045 if isinstance(inc, int): 2046 inc = [inc]*n 2047 2048 # Setup the default bounds. 2049 if not lower: 2050 # Init. 2051 lower = [] 2052 upper = [] 2053 2054 # Loop over the parameters. 2055 for i in range(n): 2056 # i is in the parameter array. 2057 if i < len(cdp.params): 2058 # Probabilities (default values). 2059 if search('^p', cdp.params[i]): 2060 lower.append(0.0) 2061 upper.append(1.0) 2062 2063 # Angles (default values). 2064 if search('^alpha', cdp.params[i]) or search('^gamma', cdp.params[i]): 2065 lower.append(0.0) 2066 upper.append(2*pi) 2067 elif search('^beta', cdp.params[i]): 2068 lower.append(0.0) 2069 upper.append(pi) 2070 2071 # The paramagnetic centre. 2072 elif hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed and (n - i) <= 3: 2073 lower.append(-100) 2074 upper.append(100) 2075 2076 # Otherwise this must be an alignment tensor component. 2077 else: 2078 lower.append(-1e-3) 2079 upper.append(1e-3) 2080 2081 # Determine the data type. 2082 data_types = self._base_data_types() 2083 2084 # The number of tensors to optimise. 2085 tensor_num = align_tensor.num_tensors(skip_fixed=True) 2086 2087 # Custom sub-grid search for when only tensors are optimised (as each tensor is independent, the number of points collapses from inc**(5*N) to N*inc**5). 2088 if cdp.model == 'fixed' and tensor_num > 1 and ('rdc' in data_types or 'pcs' in data_types) and not align_tensor.all_tensors_fixed() and hasattr(cdp, 'paramag_centre_fixed') and cdp.paramag_centre_fixed: 2089 # Print out. 2090 print("Optimising each alignment tensor separately.") 2091 2092 # Store the alignment tensor fixed flags. 2093 fixed_flags = [] 2094 for i in range(len(cdp.align_ids)): 2095 # Get the tensor object. 2096 tensor = align_tensor.return_tensor(index=i, skip_fixed=False) 2097 2098 # Store the flag. 2099 fixed_flags.append(tensor.fixed) 2100 2101 # Fix the tensor. 2102 tensor.set('fixed', True) 2103 2104 # Loop over each sub-grid. 2105 for i in range(len(cdp.align_ids)): 2106 # Skip the tensor if originally fixed. 2107 if fixed_flags[i]: 2108 continue 2109 2110 # Get the tensor object. 2111 tensor = align_tensor.return_tensor(index=i, skip_fixed=False) 2112 2113 # Unfix the current tensor. 2114 tensor.set('fixed', False) 2115 2116 # Grid search parameter subsets. 2117 lower_sub = lower[i*5:i*5+5] 2118 upper_sub = upper[i*5:i*5+5] 2119 inc_sub = inc[i*5:i*5+5] 2120 2121 # Minimisation of the sub-grid. 2122 self.minimise(min_algor='grid', lower=lower_sub, upper=upper_sub, inc=inc_sub, constraints=constraints, verbosity=verbosity, sim_index=sim_index) 2123 2124 # Fix the tensor again. 2125 tensor.set('fixed', True) 2126 2127 # Reset the state of the tensors. 2128 for i in range(len(cdp.align_ids)): 2129 # Get the tensor object. 2130 tensor = align_tensor.return_tensor(index=i, skip_fixed=False) 2131 2132 # Fix the tensor. 2133 tensor.set('fixed', fixed_flags[i]) 2134 2135 # All other minimisation. 2136 else: 2137 self.minimise(min_algor='grid', lower=lower, upper=upper, inc=inc, constraints=constraints, verbosity=verbosity, sim_index=sim_index)
2138 2139
2140 - def is_spin_param(self, name):
2141 """Determine whether the given parameter is spin specific. 2142 2143 @param name: The name of the parameter. 2144 @type name: str 2145 @return: False 2146 @rtype: bool 2147 """ 2148 2149 # Spin specific parameters. 2150 if name in ['r', 'heteronuc_type', 'proton_type']: 2151 return True 2152 2153 # All other parameters are global. 2154 return False
2155 2156
2157 - def map_bounds(self, param, spin_id=None):
2158 """Create bounds for the OpenDX mapping function. 2159 2160 @param param: The name of the parameter to return the lower and upper bounds of. 2161 @type param: str 2162 @param spin_id: The spin identification string (unused). 2163 @type spin_id: None 2164 @return: The upper and lower bounds of the parameter. 2165 @rtype: list of float 2166 """ 2167 2168 # Paramagnetic centre. 2169 if search('^paramag_[xyz]$', param): 2170 return [-100.0, 100.0]
2171 2172
2173 - def minimise(self, min_algor=None, min_options=None, func_tol=None, grad_tol=None, max_iterations=None, constraints=False, scaling=True, verbosity=0, sim_index=None, lower=None, upper=None, inc=None):
2174 """Minimisation function. 2175 2176 @param min_algor: The minimisation algorithm to use. 2177 @type min_algor: str 2178 @param min_options: An array of options to be used by the minimisation algorithm. 2179 @type min_options: array of str 2180 @param func_tol: The function tolerance which, when reached, terminates optimisation. Setting this to None turns of the check. 2181 @type func_tol: None or float 2182 @param grad_tol: The gradient tolerance which, when reached, terminates optimisation. Setting this to None turns of the check. 2183 @type grad_tol: None or float 2184 @param max_iterations: The maximum number of iterations for the algorithm. 2185 @type max_iterations: int 2186 @param constraints: If True, constraints are used during optimisation. 2187 @type constraints: bool 2188 @param scaling: If True, diagonal scaling is enabled during optimisation to allow the problem to be better conditioned. 2189 @type scaling: bool 2190 @param verbosity: A flag specifying the amount of information to print. The higher the value, the greater the verbosity. 2191 @type verbosity: int 2192 @param sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired. 2193 @type sim_index: None or int 2194 @keyword lower: The lower bounds of the grid search which must be equal to the number of parameters in the model. This optional argument is only used when doing a grid search. 2195 @type lower: array of numbers 2196 @keyword upper: The upper bounds of the grid search which must be equal to the number of parameters in the model. This optional argument is only used when doing a grid search. 2197 @type upper: array of numbers 2198 @keyword inc: The increments for each dimension of the space for the grid search. The number of elements in the array must equal to the number of parameters in the model. This argument is only used when doing a grid search. 2199 @type inc: array of int 2200 """ 2201 2202 # Set up the target function for direct calculation. 2203 model, param_vector, data_types, scaling_matrix = self._target_fn_setup(sim_index=sim_index, scaling=scaling) 2204 2205 # Nothing to do! 2206 if not len(param_vector): 2207 warn(RelaxWarning("The model has no parameters, minimisation cannot be performed.")) 2208 return 2209 2210 # Right, constraints cannot be used for the 'fixed' model. 2211 if constraints and cdp.model == 'fixed': 2212 warn(RelaxWarning("Turning constraints off. These cannot be used for the 'fixed' model.")) 2213 constraints = False 2214 2215 # Pop out the Method of Multipliers algorithm. 2216 if min_algor == 'Method of Multipliers': 2217 min_algor = min_options[0] 2218 min_options = min_options[1:] 2219 2220 # And constraints absolutely must be used for the 'population' model. 2221 if not constraints and cdp.model == 'population': 2222 warn(RelaxWarning("Turning constraints on. These absolutely must be used for the 'population' model.")) 2223 constraints = True 2224 2225 # Add the Method of Multipliers algorithm. 2226 min_options = (min_algor,) + min_options 2227 min_algor = 'Method of Multipliers' 2228 2229 # Disallow Newton optimisation and other Hessian optimisers for the paramagnetic centre position optimisation (the PCS Hessian is not yet implemented). 2230 if hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed: 2231 if min_algor in ['newton']: 2232 raise RelaxError("For the paramagnetic centre position, as the Hessians are not yet implemented Newton optimisation cannot be performed.") 2233 2234 # Linear constraints. 2235 if constraints: 2236 A, b = self._linear_constraints(data_types=data_types, scaling_matrix=scaling_matrix) 2237 else: 2238 A, b = None, None 2239 2240 # Grid search. 2241 if search('^[Gg]rid', min_algor): 2242 # Scaling. 2243 if scaling: 2244 for i in range(len(param_vector)): 2245 lower[i] = lower[i] / scaling_matrix[i, i] 2246 upper[i] = upper[i] / scaling_matrix[i, i] 2247 2248 # The search. 2249 results = grid(func=model.func, args=(), num_incs=inc, lower=lower, upper=upper, A=A, b=b, verbosity=verbosity) 2250 2251 # Unpack the results. 2252 param_vector, func, iter_count, warning = results 2253 f_count = iter_count 2254 g_count = 0.0 2255 h_count = 0.0 2256 2257 # Minimisation. 2258 else: 2259 results = generic_minimise(func=model.func, dfunc=model.dfunc, d2func=model.d2func, args=(), x0=param_vector, min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, maxiter=max_iterations, A=A, b=b, full_output=1, print_flag=verbosity) 2260 2261 # Unpack the results. 2262 if results == None: 2263 return 2264 param_vector, func, iter_count, f_count, g_count, h_count, warning = results 2265 2266 # Catch infinite chi-squared values. 2267 if isInf(func): 2268 raise RelaxInfError('chi-squared') 2269 2270 # Catch chi-squared values of NaN. 2271 if isNaN(func): 2272 raise RelaxNaNError('chi-squared') 2273 2274 # Make a last function call to update the back-calculated RDC and PCS structures to the optimal values. 2275 chi2 = model.func(param_vector) 2276 2277 # Scaling. 2278 if scaling: 2279 param_vector = dot(scaling_matrix, param_vector) 2280 2281 # Disassemble the parameter vector. 2282 self._disassemble_param_vector(param_vector=param_vector, data_types=data_types, sim_index=sim_index) 2283 2284 # Monte Carlo minimisation statistics. 2285 if sim_index != None: 2286 # Chi-squared statistic. 2287 cdp.chi2_sim[sim_index] = func 2288 2289 # Iterations. 2290 cdp.iter_sim[sim_index] = iter_count 2291 2292 # Function evaluations. 2293 cdp.f_count_sim[sim_index] = f_count 2294 2295 # Gradient evaluations. 2296 cdp.g_count_sim[sim_index] = g_count 2297 2298 # Hessian evaluations. 2299 cdp.h_count_sim[sim_index] = h_count 2300 2301 # Warning. 2302 cdp.warning_sim[sim_index] = warning 2303 2304 # Normal statistics. 2305 else: 2306 # Chi-squared statistic. 2307 cdp.chi2 = func 2308 2309 # Iterations. 2310 cdp.iter = iter_count 2311 2312 # Function evaluations. 2313 cdp.f_count = f_count 2314 2315 # Gradient evaluations. 2316 cdp.g_count = g_count 2317 2318 # Hessian evaluations. 2319 cdp.h_count = h_count 2320 2321 # Warning. 2322 cdp.warning = warning 2323 2324 # Statistical analysis. 2325 if sim_index == None and ('rdc' in data_types or 'pcs' in data_types): 2326 # Get the final back calculated data (for the Q-factor and 2327 self._minimise_bc_data(model) 2328 2329 # Calculate the RDC Q-factors. 2330 if 'rdc' in data_types: 2331 rdc.q_factors() 2332 2333 # Calculate the PCS Q-factors. 2334 if 'pcs' in data_types: 2335 pcs.q_factors()
2336 2337
2338 - def model_statistics(self, model_info=None, spin_id=None, global_stats=None):
2339 """Return the k, n, and chi2 model statistics. 2340 2341 k - number of parameters. 2342 n - number of data points. 2343 chi2 - the chi-squared value. 2344 2345 2346 @keyword model_info: The data returned from model_loop() (unused). 2347 @type model_info: None 2348 @keyword spin_id: The spin identification string. This is ignored in the N-state model. 2349 @type spin_id: None or str 2350 @keyword global_stats: A parameter which determines if global or local statistics are returned. For the N-state model, this argument is ignored. 2351 @type global_stats: None or bool 2352 @return: The optimisation statistics, in tuple format, of the number of parameters (k), the number of data points (n), and the chi-squared value (chi2). 2353 @rtype: tuple of (int, int, float) 2354 """ 2355 2356 # Return the values. 2357 return self._param_num(), self._num_data_points(), cdp.chi2
2358 2359
2360 - def return_data(self, data_id):
2361 """Return the base data for the given data ID. 2362 2363 @keyword data_id: The list of spin ID, data type, and alignment ID, as yielded by the base_data_loop() generator method. 2364 @type data_id: list of str 2365 @return: The base data. 2366 @rtype: list of (float or None) 2367 """ 2368 2369 # Alias the spin or interatomic data container, data type and alignment ID. 2370 container = data_id[0] 2371 data_type = data_id[1] 2372 align_id = data_id[2] 2373 2374 # The data structure to return. 2375 data = [] 2376 2377 # Skip deselected spins. 2378 if data_id[1] == 'pcs' and not container.select: 2379 return 2380 2381 # Return the RDC data. 2382 if data_type == 'rdc' and hasattr(container, 'rdc'): 2383 if align_id not in container.rdc: 2384 data.append(None) 2385 else: 2386 data.append(container.rdc[align_id]) 2387 2388 # Return the NOESY data. 2389 elif data_type == 'noesy' and hasattr(container, 'noesy'): 2390 data.append(container.noesy) 2391 2392 # Return the PCS data. 2393 elif data_id[1] == 'pcs' and hasattr(container, 'pcs'): 2394 if align_id not in container.pcs: 2395 data.append(None) 2396 else: 2397 data.append(container.pcs[align_id]) 2398 2399 # Return the data. 2400 return data
2401 2402 2403 return_data_name_doc = Desc_container("N-state model data type string matching patterns") 2404 _table = uf_tables.add_table(label="table: N-state data type patterns", caption="N-state model data type string matching patterns.") 2405 _table.add_headings(["Data type", "Object name", "Patterns"]) 2406 _table.add_row(["Probabilities", "'probs'", "'p0', 'p1', 'p2', ..., 'pN'"]) 2407 _table.add_row(["Euler angle alpha", "'alpha'", "'alpha0', 'alpha1', ..."]) 2408 _table.add_row(["Euler angle beta", "'beta'", "'beta0', 'beta1', ..."]) 2409 _table.add_row(["Euler angle gamma", "'gamma'", "'gamma0', 'gamma1', ..."]) 2410 _table.add_row(["Bond length", "'r'", "'^r$' or '[Bb]ond[ -_][Ll]ength'"]) 2411 _table.add_row(["Heteronucleus type", "'heteronuc_type'", "'^[Hh]eteronucleus$'"]) 2412 _table.add_row(["Proton type", "'proton_type'", "'^[Pp]roton$'"]) 2413 return_data_name_doc.add_table(_table.label) 2414 return_data_name_doc.add_paragraph("The objects corresponding to the object names are lists (or arrays) with each element corrsponding to each state.") 2415
2416 - def return_data_name(self, param):
2417 """Return a unique identifying string for the N-state model parameter. 2418 2419 @param param: The N-state model parameter. 2420 @type param: str 2421 @return: The unique parameter identifying string. 2422 @rtype: str 2423 """ 2424 2425 # Probability. 2426 if search('^p[0-9]*$', param): 2427 return 'probs' 2428 2429 # Alpha Euler angle. 2430 if search('^alpha', param): 2431 return 'alpha' 2432 2433 # Beta Euler angle. 2434 if search('^beta', param): 2435 return 'beta' 2436 2437 # Gamma Euler angle. 2438 if search('^gamma', param): 2439 return 'gamma' 2440 2441 # Bond length. 2442 if search('^r$', param) or search('[Bb]ond[ -_][Ll]ength', param): 2443 return 'r' 2444 2445 # Heteronucleus type. 2446 if param == 'heteronuc_type': 2447 return 'heteronuc_type' 2448 2449 # Proton type. 2450 if param == 'proton_type': 2451 return 'proton_type' 2452 2453 # Paramagnetic centre. 2454 if search('^paramag_[xyz]$', param): 2455 return param
2456 2457
2458 - def return_error(self, data_id=None):
2459 """Create and return the spin specific Monte Carlo Ri error structure. 2460 2461 @keyword data_id: The list of spin ID, data type, and alignment ID, as yielded by the base_data_loop() generator method. 2462 @type data_id: str 2463 @return: The Monte Carlo simulation data errors. 2464 @rtype: list of float 2465 """ 2466 2467 # Initialise the MC data structure. 2468 mc_errors = [] 2469 2470 # Alias the spin or interatomic data container. 2471 container = data_id[0] 2472 2473 # Skip deselected spins. 2474 if data_id[1] == 'pcs' and not container.select: 2475 return 2476 2477 # RDC data. 2478 if data_id[1] == 'rdc' and hasattr(container, 'rdc'): 2479 # Do errors exist? 2480 if not hasattr(container, 'rdc_err'): 2481 raise RelaxError("The RDC errors are missing for the spin pair '%s' and '%s'." % (container.spin_id1, container.spin_id2)) 2482 2483 # The error. 2484 if data_id[2] not in container.rdc_err: 2485 err = None 2486 else: 2487 err = container.rdc_err[data_id[2]] 2488 2489 # Append the data. 2490 mc_errors.append(err) 2491 2492 # NOESY data. 2493 elif data_id[1] == 'noesy' and hasattr(container, 'noesy'): 2494 # Do errors exist? 2495 if not hasattr(container, 'noesy_err'): 2496 raise RelaxError("The NOESY errors are missing for the spin pair '%s' and '%s'." % (container.spin_id1, container.spin_id2)) 2497 2498 # Append the data. 2499 mc_errors.append(container.noesy_err) 2500 2501 # PCS data. 2502 elif data_id[1] == 'pcs' and hasattr(container, 'pcs'): 2503 # Do errors exist? 2504 if not hasattr(container, 'pcs_err'): 2505 raise RelaxError("The PCS errors are missing for spin '%s'." % data_id[0]) 2506 2507 # The error. 2508 if data_id[2] not in container.pcs_err: 2509 err = None 2510 else: 2511 err = container.pcs_err[data_id[2]] 2512 2513 # Append the data. 2514 mc_errors.append(err) 2515 2516 # Return the errors. 2517 return mc_errors
2518 2519
2520 - def return_grace_string(self, param):
2521 """Return the Grace string representation of the parameter. 2522 2523 This is used for axis labelling. 2524 2525 @param param: The specific analysis parameter. 2526 @type param: str 2527 @return: The Grace string representation of the parameter. 2528 @rtype: str 2529 """ 2530 2531 # The measured PCS. 2532 if param == 'pcs': 2533 return "Measured PCS" 2534 2535 # The back-calculated PCS. 2536 if param == 'pcs_bc': 2537 return "Back-calculated PCS" 2538 2539 # The measured RDC. 2540 if param == 'rdc': 2541 return "Measured RDC" 2542 2543 # The back-calculated RDC. 2544 if param == 'rdc_bc': 2545 return "Back-calculated RDC"
2546 2547
2548 - def return_units(self, param):
2549 """Return a string representing the parameters units. 2550 2551 @param param: The name of the parameter to return the units string for. 2552 @type param: str 2553 @return: The parameter units string. 2554 @rtype: str 2555 """ 2556 2557 # PCSs. 2558 if param == 'pcs' or param == 'pcs_bc': 2559 return 'ppm' 2560 2561 # RDCs. 2562 if param == 'rdc' or param == 'rdc_bc': 2563 return 'Hz'
2564 2565 2566 set_doc = Desc_container("N-state model set details") 2567 set_doc.add_paragraph("Setting parameters for the N-state model is a little different from the other type of analyses as each state has a set of parameters with the same names as the other states. To set the parameters for a specific state c (ranging from 0 for the first to N-1 for the last, the number c should be added to the end of the parameter name. So the Euler angle gamma of the third state is specified using the string 'gamma2'.") 2568 2569
2570 - def set_error(self, model_info, index, error):
2571 """Set the parameter errors. 2572 2573 @param model_info: The global model index originating from model_loop(). 2574 @type model_info: int 2575 @param index: The index of the parameter to set the errors for. 2576 @type index: int 2577 @param error: The error value. 2578 @type error: float 2579 """ 2580 2581 # Align parameters. 2582 names = ['Axx', 'Ayy', 'Axy', 'Axz', 'Ayz'] 2583 2584 # Alignment tensor parameters. 2585 if index < len(cdp.align_ids)*5: 2586 # The tensor and parameter index. 2587 param_index = index % 5 2588 tensor_index = (index - index % 5) / 5 2589 2590 # Set the error. 2591 tensor = align_tensor.return_tensor(index=tensor_index, skip_fixed=True) 2592 tensor.set(param=names[param_index], value=error, category='err') 2593 2594 # Return the object. 2595 return getattr(tensor, names[param_index]+'_err')
2596 2597
2598 - def set_param_values(self, param=None, value=None, spin_id=None, force=True):
2599 """Set the N-state model parameter values. 2600 2601 @keyword param: The parameter name list. 2602 @type param: list of str 2603 @keyword value: The parameter value list. 2604 @type value: list 2605 @keyword spin_id: The spin identification string (unused). 2606 @type spin_id: None 2607 @keyword force: A flag which if True will cause current values to be overwritten. If False, a RelaxError will raised if the parameter value is already set. 2608 @type force: bool 2609 """ 2610 2611 # Checks. 2612 arg_check.is_str_list(param, 'parameter name') 2613 arg_check.is_list(value, 'parameter value') 2614 2615 # Loop over the parameters. 2616 for i in range(len(param)): 2617 # Get the object's name. 2618 obj_name = self.return_data_name(param[i]) 2619 2620 # Is the parameter is valid? 2621 if not obj_name: 2622 raise RelaxError("The parameter '%s' is not valid for this data pipe type." % param[i]) 2623 2624 # Set the indexed parameter. 2625 if obj_name in ['probs', 'alpha', 'beta', 'gamma']: 2626 # The index. 2627 index = self._param_model_index(param[i]) 2628 2629 # Set. 2630 obj = getattr(cdp, obj_name) 2631 obj[index] = value[i] 2632 2633 # The paramagnetic centre. 2634 if search('^paramag_[xyz]$', obj_name): 2635 # Init. 2636 if not hasattr(cdp, 'paramagnetic_centre'): 2637 cdp.paramagnetic_centre = zeros(3, float64) 2638 2639 # Set the coordinate. 2640 if obj_name == 'paramag_x': 2641 index = 0 2642 elif obj_name == 'paramag_y': 2643 index = 1 2644 else: 2645 index = 2 2646 2647 # Set the value in Angstrom. 2648 cdp.paramagnetic_centre[index] = value[i] 2649 2650 # Set the spin parameters. 2651 else: 2652 for spin in spin_loop(spin_id): 2653 setattr(spin, obj_name, value[i])
2654 2655
2656 - def sim_init_values(self):
2657 """Initialise the Monte Carlo parameter values.""" 2658 2659 # Get the minimisation statistic object names. 2660 sim_names = self.data_names(set='min') 2661 2662 # Add the paramagnetic centre, if optimised. 2663 if hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed: 2664 sim_names += ['paramagnetic_centre'] 2665 2666 # Alignments. 2667 if hasattr(cdp, 'align_tensors'): 2668 # The parameter names. 2669 names = ['Axx', 'Ayy', 'Axy', 'Axz', 'Ayz'] 2670 2671 # Loop over the alignments, adding the alignment tensor parameters to the tensor data container. 2672 for i in range(len(cdp.align_tensors)): 2673 # Skip non-optimised tensors. 2674 if not self._opt_tensor(cdp.align_tensors[i]): 2675 continue 2676 2677 # Set up the number of simulations. 2678 cdp.align_tensors[i].set_sim_num(cdp.sim_number) 2679 2680 # Loop over all the parameter names, setting the initial simulation values to those of the parameter value. 2681 for object_name in names: 2682 for j in range(cdp.sim_number): 2683 cdp.align_tensors[i].set(param=object_name, value=deepcopy(getattr(cdp.align_tensors[i], object_name)), category='sim', sim_index=j) 2684 2685 # Create all other simulation objects. 2686 for object_name in sim_names: 2687 # Name for the simulation object. 2688 sim_object_name = object_name + '_sim' 2689 2690 # Create the simulation object. 2691 setattr(cdp, sim_object_name, []) 2692 2693 # Get the simulation object. 2694 sim_object = getattr(cdp, sim_object_name) 2695 2696 # Loop over the simulations. 2697 for j in range(cdp.sim_number): 2698 # Append None to fill the structure. 2699 sim_object.append(None) 2700 2701 # Set the simulation paramagnetic centre positions to the optimised values. 2702 if hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed: 2703 for j in range(cdp.sim_number): 2704 cdp.paramagnetic_centre_sim[j] = deepcopy(cdp.paramagnetic_centre)
2705 2706
2707 - def sim_pack_data(self, data_id, sim_data):
2708 """Pack the Monte Carlo simulation data. 2709 2710 @keyword data_id: The list of spin ID, data type, and alignment ID, as yielded by the base_data_loop() generator method. 2711 @type data_id: list of str 2712 @param sim_data: The Monte Carlo simulation data. 2713 @type sim_data: list of float 2714 """ 2715 2716 # Alias the spin or interatomic data container. 2717 container = data_id[0] 2718 2719 # RDC data. 2720 if data_id[1] == 'rdc' and hasattr(container, 'rdc'): 2721 # Initialise. 2722 if not hasattr(container, 'rdc_sim'): 2723 container.rdc_sim = {} 2724 2725 # Store the data structure. 2726 container.rdc_sim[data_id[2]] = [] 2727 for i in range(cdp.sim_number): 2728 container.rdc_sim[data_id[2]].append(sim_data[i][0]) 2729 2730 # NOESY data. 2731 elif data_id[1] == 'noesy' and hasattr(container, 'noesy'): 2732 # Store the data structure. 2733 container.noesy_sim = [] 2734 for i in range(cdp.sim_number): 2735 container.noesy_sim[data_id[2]].append(sim_data[i][0]) 2736 2737 # PCS data. 2738 elif data_id[1] == 'pcs' and hasattr(container, 'pcs'): 2739 # Initialise. 2740 if not hasattr(container, 'pcs_sim'): 2741 container.pcs_sim = {} 2742 2743 # Store the data structure. 2744 container.pcs_sim[data_id[2]] = [] 2745 for i in range(cdp.sim_number): 2746 container.pcs_sim[data_id[2]].append(sim_data[i][0])
2747 2748
2749 - def sim_return_param(self, model_info, index):
2750 """Return the array of simulation parameter values. 2751 2752 @param model_info: The global model index originating from model_loop(). 2753 @type model_info: int 2754 @param index: The index of the parameter to return the array of values for. 2755 @type index: int 2756 @return: The array of simulation parameter values. 2757 @rtype: list of float 2758 """ 2759 2760 # Align parameters. 2761 names = ['Axx', 'Ayy', 'Axy', 'Axz', 'Ayz'] 2762 2763 # Alignment tensor parameters. 2764 if index < align_tensor.num_tensors(skip_fixed=True)*5: 2765 # The tensor and parameter index. 2766 param_index = index % 5 2767 tensor_index = (index - index % 5) / 5 2768 2769 # Return the simulation parameter array. 2770 tensor = align_tensor.return_tensor(index=tensor_index, skip_fixed=True) 2771 return getattr(tensor, names[param_index]+'_sim')
2772