Package specific_fns :: Module n_state_model
[hide private]
[frames] | no frames]

Source Code for Module specific_fns.n_state_model

   1  ############################################################################### 
   2  #                                                                             # 
   3  # Copyright (C) 2007-2012 Edward d'Auvergne                                   # 
   4  #                                                                             # 
   5  # This file is part of the program relax (http://www.nmr-relax.com).          # 
   6  #                                                                             # 
   7  # This program is free software: you can redistribute it and/or modify        # 
   8  # it under the terms of the GNU General Public License as published by        # 
   9  # the Free Software Foundation, either version 3 of the License, or           # 
  10  # (at your option) any later version.                                         # 
  11  #                                                                             # 
  12  # This program is distributed in the hope that it will be useful,             # 
  13  # but WITHOUT ANY WARRANTY; without even the implied warranty of              # 
  14  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the               # 
  15  # GNU General Public License for more details.                                # 
  16  #                                                                             # 
  17  # You should have received a copy of the GNU General Public License           # 
  18  # along with this program.  If not, see <http://www.gnu.org/licenses/>.       # 
  19  #                                                                             # 
  20  ############################################################################### 
  21   
  22  # Module docstring. 
  23  """Module for the specific analysis of the N-state dynamic model.""" 
  24   
  25  # Python module imports. 
  26  from copy import deepcopy 
  27  from math import acos, cos, pi 
  28  from minfx.generic import generic_minimise 
  29  from minfx.grid import grid 
  30  from numpy import array, dot, float64, identity, ones, zeros 
  31  from numpy.linalg import inv, norm 
  32  from re import search 
  33  from warnings import warn 
  34   
  35  # relax module imports. 
  36  import arg_check 
  37  from float import isNaN, isInf 
  38  from generic_fns import align_tensor, pcs, pipes, rdc 
  39  from generic_fns.interatomic import interatomic_loop 
  40  from generic_fns.mol_res_spin import return_spin, spin_loop 
  41  from generic_fns.structure import geometric 
  42  from generic_fns.structure.cones import Iso_cone 
  43  from generic_fns.structure.internal import Internal 
  44  from generic_fns.structure.mass import centre_of_mass 
  45  from maths_fns.n_state_model import N_state_opt 
  46  from maths_fns.potential import quad_pot 
  47  from maths_fns.rotation_matrix import euler_to_R_zyz, two_vect_to_R 
  48  from physical_constants import dipolar_constant, g1H, return_gyromagnetic_ratio 
  49  from relax_errors import RelaxError, RelaxInfError, RelaxNaNError, RelaxNoModelError, RelaxNoValueError, RelaxSpinTypeError 
  50  from relax_io import open_write_file 
  51  from relax_warnings import RelaxWarning 
  52  from specific_fns.api_base import API_base 
  53  from specific_fns.api_common import API_common 
  54  from user_functions.data import Uf_tables; uf_tables = Uf_tables() 
  55  from user_functions.objects import Desc_container 
  56   
  57   
58 -class N_state_model(API_base, API_common):
59 """Class containing functions for the N-state model.""" 60
61 - def __init__(self):
62 """Initialise the class by placing API_common methods into the API.""" 63 64 # Execute the base class __init__ method. 65 super(N_state_model, self).__init__() 66 67 # Place methods into the API. 68 self.model_loop = self._model_loop_single_global 69 self.overfit_deselect = self._overfit_deselect_dummy 70 self.return_conversion_factor = self._return_no_conversion_factor 71 self.set_selected_sim = self._set_selected_sim_global 72 self.sim_return_selected = self._sim_return_selected_global 73 self.test_grid_ops = self._test_grid_ops_general 74 75 # Set up the spin parameters. 76 self.PARAMS.add('csa', scope='spin', units='ppm', desc='CSA value', py_type=float, grace_string='\\qCSA\\Q') 77 78 # Add the minimisation data. 79 self.PARAMS.add_min_data(min_stats_global=False, min_stats_spin=True)
80 81
82 - def _assemble_param_vector(self, sim_index=None):
83 """Assemble all the parameters of the model into a single array. 84 85 @param sim_index: The index of the simulation to optimise. This should be None if 86 normal optimisation is desired. 87 @type sim_index: None or int 88 @return: The parameter vector used for optimisation. 89 @rtype: numpy array 90 """ 91 92 # Test if the model is selected. 93 if not hasattr(cdp, 'model') or not isinstance(cdp.model, str): 94 raise RelaxNoModelError 95 96 # Determine the data type. 97 data_types = self._base_data_types() 98 99 # Initialise the parameter vector. 100 param_vector = [] 101 102 # A RDC or PCS data type requires the alignment tensors to be at the start of the parameter vector (unless the tensors are fixed). 103 if ('rdc' in data_types or 'pcs' in data_types) and not align_tensor.all_tensors_fixed(): 104 # Loop over the alignments, adding the alignment tensor parameters to the parameter vector. 105 for i in range(len(cdp.align_tensors)): 106 # No alignment ID, so skip the tensor as it will not be optimised. 107 if cdp.align_tensors[i].name not in cdp.align_ids: 108 continue 109 110 # Fixed tensor. 111 if cdp.align_tensors[i].fixed: 112 continue 113 114 # Add the parameters. 115 param_vector = param_vector + list(cdp.align_tensors[i].A_5D) 116 117 # Monte Carlo simulation data structures. 118 if sim_index != None: 119 # Populations. 120 if cdp.model in ['2-domain', 'population']: 121 probs = cdp.probs_sim[sim_index] 122 123 # Euler angles. 124 if cdp.model == '2-domain': 125 alpha = cdp.alpha_sim[sim_index] 126 beta = cdp.beta_sim[sim_index] 127 gamma = cdp.gamma_sim[sim_index] 128 129 # Normal data structures. 130 else: 131 # Populations. 132 if cdp.model in ['2-domain', 'population']: 133 probs = cdp.probs 134 135 # Euler angles. 136 if cdp.model == '2-domain': 137 alpha = cdp.alpha 138 beta = cdp.beta 139 gamma = cdp.gamma 140 141 # The probabilities (exclude that of state N). 142 if cdp.model in ['2-domain', 'population']: 143 param_vector = param_vector + probs[0:-1] 144 145 # The Euler angles. 146 if cdp.model == '2-domain': 147 for i in range(cdp.N): 148 param_vector.append(alpha[i]) 149 param_vector.append(beta[i]) 150 param_vector.append(gamma[i]) 151 152 # The paramagnetic centre. 153 if hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed: 154 if not hasattr(cdp, 'paramagnetic_centre'): 155 param_vector.append(0.0) 156 param_vector.append(0.0) 157 param_vector.append(0.0) 158 159 else: 160 param_vector.append(cdp.paramagnetic_centre[0]) 161 param_vector.append(cdp.paramagnetic_centre[1]) 162 param_vector.append(cdp.paramagnetic_centre[2]) 163 164 # Convert all None values to zero (to avoid conversion to NaN). 165 for i in range(len(param_vector)): 166 if param_vector[i] == None: 167 param_vector[i] = 0.0 168 169 # Return a numpy arrary. 170 return array(param_vector, float64)
171 172
173 - def _assemble_scaling_matrix(self, data_types=None, scaling=True):
174 """Create and return the scaling matrix. 175 176 @keyword data_types: The base data types used in the optimisation. This list can contain 177 the elements 'rdc', 'pcs' or 'tensor'. 178 @type data_types: list of str 179 @keyword scaling: If False, then the identity matrix will be returned. 180 @type scaling: bool 181 @return: The square and diagonal scaling matrix. 182 @rtype: numpy rank-2 array 183 """ 184 185 # Initialise. 186 scaling_matrix = identity(self._param_num(), float64) 187 188 # Return the identity matrix. 189 if not scaling: 190 return scaling_matrix 191 192 # Starting point of the populations. 193 pop_start = 0 194 if ('rdc' in data_types or 'pcs' in data_types) and not align_tensor.all_tensors_fixed(): 195 # Loop over the alignments. 196 tensor_num = 0 197 for i in range(len(cdp.align_tensors)): 198 # Fixed tensor. 199 if cdp.align_tensors[i].fixed: 200 continue 201 202 # Add the 5 alignment parameters. 203 pop_start = pop_start + 5 204 205 # The alignment parameters. 206 for j in range(5): 207 scaling_matrix[5*tensor_num + j, 5*tensor_num + j] = 1.0 208 209 # Increase the tensor number. 210 tensor_num += 1 211 212 # Loop over the populations, and set the scaling factor. 213 if cdp.model in ['2-domain', 'population']: 214 factor = 0.1 215 for i in range(pop_start, pop_start + (cdp.N-1)): 216 scaling_matrix[i, i] = factor 217 218 # The paramagnetic centre. 219 if hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed: 220 for i in range(-3, 0): 221 scaling_matrix[i, i] = 1e2 222 223 # Return the matrix. 224 return scaling_matrix
225 226
227 - def _base_data_types(self):
228 """Determine all the base data types. 229 230 The base data types can include:: 231 - 'rdc', residual dipolar couplings. 232 - 'pcs', pseudo-contact shifts. 233 - 'noesy', NOE restraints. 234 - 'tensor', alignment tensors. 235 236 @return: A list of all the base data types. 237 @rtype: list of str 238 """ 239 240 # Array of data types. 241 list = [] 242 243 # RDC search. 244 for interatom in interatomic_loop(): 245 if hasattr(interatom, 'rdc'): 246 list.append('rdc') 247 break 248 249 # PCS search. 250 for spin in spin_loop(): 251 if hasattr(spin, 'pcs'): 252 list.append('pcs') 253 break 254 255 # Alignment tensor search. 256 if not ('rdc' in list or 'pcs' in list) and hasattr(cdp, 'align_tensors'): 257 list.append('tensor') 258 259 # NOESY data search. 260 if hasattr(cdp, 'noe_restraints'): 261 list.append('noesy') 262 263 # No data is present. 264 if not list: 265 raise RelaxError("Neither RDC, PCS, NOESY nor alignment tensor data is present.") 266 267 # Return the list. 268 return list
269 270
271 - def _calc_ave_dist(self, atom1, atom2, exp=1):
272 """Calculate the average distances. 273 274 The formula used is:: 275 276 _N_ 277 / 1 \ \ 1/exp 278 <r> = | - > |p1i - p2i|^exp | 279 \ N /__ / 280 i 281 282 where i are the members of the ensemble, N is the total number of structural models, and p1 283 and p2 at the two atom positions. 284 285 286 @param atom1: The atom identification string of the first atom. 287 @type atom1: str 288 @param atom2: The atom identification string of the second atom. 289 @type atom2: str 290 @keyword exp: The exponent used for the averaging, e.g. 1 for linear averaging and -6 for 291 r^-6 NOE averaging. 292 @type exp: int 293 @return: The average distance between the two atoms. 294 @rtype: float 295 """ 296 297 # Get the spin containers. 298 spin1 = return_spin(atom1) 299 spin2 = return_spin(atom2) 300 301 # Loop over each model. 302 num_models = len(spin1.pos) 303 ave_dist = 0.0 304 for i in range(num_models): 305 # Distance to the minus sixth power. 306 dist = norm(spin1.pos[i] - spin2.pos[i]) 307 ave_dist = ave_dist + dist**(exp) 308 309 # Average. 310 ave_dist = ave_dist / num_models 311 312 # The exponent. 313 ave_dist = ave_dist**(1.0/exp) 314 315 # Return the average distance. 316 return ave_dist
317 318
319 - def _CoM(self, pivot_point=None, centre=None):
320 """Centre of mass analysis. 321 322 This function does an analysis of the centre of mass (CoM) of the N states. This includes 323 calculating the order parameter associated with the pivot-CoM vector, and the associated 324 cone of motions. The pivot_point argument must be supplied. If centre is None, then the 325 CoM will be calculated from the selected parts of the loaded structure. Otherwise it will 326 be set to the centre arg. 327 328 @param pivot_point: The pivot point in the structural file(s). 329 @type pivot_point: list of float of length 3 330 @param centre: The optional centre of mass vector. 331 @type centre: list of float of length 3 332 """ 333 334 # Test if the current data pipe exists. 335 pipes.test() 336 337 # Set the pivot point. 338 cdp.pivot_point = pivot_point 339 340 # The centre has been supplied. 341 if centre: 342 cdp.CoM = centre 343 344 # Calculate from the structure file. 345 else: 346 cdp.CoM = centre_of_mass() 347 348 # Calculate the vector between the pivot and CoM points. 349 cdp.pivot_CoM = array(cdp.CoM, float64) - array(cdp.pivot_point, float64) 350 351 # Calculate the unit vector between the pivot and CoM points. 352 unit_vect = cdp.pivot_CoM / norm(cdp.pivot_CoM) 353 354 # Initilise some data structures. 355 R = zeros((3, 3), float64) 356 vectors = zeros((cdp.N, 3), float64) 357 358 # Loop over the N states. 359 for c in range(cdp.N): 360 # Generate the rotation matrix. 361 euler_to_R_zyz(cdp.alpha[c], cdp.beta[c], cdp.gamma[c], R) 362 363 # Rotate the unit vector. 364 vectors[c] = dot(R, unit_vect) 365 366 # Multiply by the probability. 367 vectors[c] = vectors[c] * cdp.probs[c] 368 369 # Average of the unit vectors. 370 cdp.ave_unit_pivot_CoM = sum(vectors) 371 372 # The length reduction. 373 cdp.ave_pivot_CoM_red = norm(cdp.ave_unit_pivot_CoM) 374 375 # The aveage pivot-CoM vector. 376 cdp.ave_pivot_CoM = norm(cdp.pivot_CoM) * cdp.ave_unit_pivot_CoM 377 378 # The full length rotated pivot-CoM vector. 379 cdp.full_ave_pivot_CoM = cdp.ave_pivot_CoM / cdp.ave_pivot_CoM_red 380 381 # The cone angle for diffusion on an axially symmetric cone. 382 cdp.theta_diff_on_cone = acos(cdp.ave_pivot_CoM_red) 383 cdp.S_diff_on_cone = (3.0*cos(cdp.theta_diff_on_cone)**2 - 1.0) / 2.0 384 385 # The cone angle and order parameter for diffusion in an axially symmetric cone. 386 cdp.theta_diff_in_cone = acos(2.*cdp.ave_pivot_CoM_red - 1.) 387 cdp.S_diff_in_cone = cos(cdp.theta_diff_in_cone) * (1 + cos(cdp.theta_diff_in_cone)) / 2.0 388 389 # Print out. 390 print("\n%-40s %-20s" % ("Pivot point:", repr(cdp.pivot_point))) 391 print("%-40s %-20s" % ("Moving domain CoM (prior to rotation):", repr(cdp.CoM))) 392 print("%-40s %-20s" % ("Pivot-CoM vector", repr(cdp.pivot_CoM))) 393 print("%-40s %-20s" % ("Pivot-CoM unit vector:", repr(unit_vect))) 394 print("%-40s %-20s" % ("Average of the unit pivot-CoM vectors:", repr(cdp.ave_unit_pivot_CoM))) 395 print("%-40s %-20s" % ("Average of the pivot-CoM vector:", repr(cdp.ave_pivot_CoM))) 396 print("%-40s %-20s" % ("Full length rotated pivot-CoM vector:", repr(cdp.full_ave_pivot_CoM))) 397 print("%-40s %-20s" % ("Length reduction from unity:", repr(cdp.ave_pivot_CoM_red))) 398 print("%-40s %.5f rad (%.5f deg)" % ("Cone angle (diffusion on a cone)", cdp.theta_diff_on_cone, cdp.theta_diff_on_cone / (2*pi) *360.)) 399 print("%-40s S_cone = %.5f (S^2 = %.5f)" % ("S_cone (diffusion on a cone)", cdp.S_diff_on_cone, cdp.S_diff_on_cone**2)) 400 print("%-40s %.5f rad (%.5f deg)" % ("Cone angle (diffusion in a cone)", cdp.theta_diff_in_cone, cdp.theta_diff_in_cone / (2*pi) *360.)) 401 print("%-40s S_cone = %.5f (S^2 = %.5f)" % ("S_cone (diffusion in a cone)", cdp.S_diff_in_cone, cdp.S_diff_in_cone**2)) 402 print("\n\n")
403 404
405 - def _check_rdcs(self, interatom, spin1, spin2):
406 """Check if the RDCs for the given interatomic data container should be used. 407 408 @param interatom: The interatomic data container. 409 @type interatom: InteratomContainer instance 410 @param spin1: The first spin container. 411 @type spin1: SpinContainer instance 412 @param spin2: The second spin container. 413 @type spin2: SpinContainer instance 414 @return: True if the RDCs should be used, False otherwise. 415 """ 416 417 # Skip deselected spins. 418 if not spin1.select or not spin2.select: 419 return False 420 421 # Only use interatomic data containers with RDC data. 422 if not hasattr(interatom, 'rdc'): 423 return False 424 425 # RDC data exists but the interatomic vectors are missing? 426 if not hasattr(interatom, 'vector'): 427 # Throw a warning. 428 warn(RelaxWarning("RDC data exists but the interatomic vectors are missing, skipping the spin pair '%s' and '%s'." % (interatom.spin_id1, interatom.spin_id2))) 429 430 # Jump to the next spin. 431 return False 432 433 # Skip non-Me pseudo-atoms for the first spin. 434 if hasattr(spin1, 'members') and len(spin1.members) != 3: 435 warn(RelaxWarning("Only methyl group pseudo atoms are supported due to their fast rotation, skipping the spin pair '%s' and '%s'." % (interatom.spin_id1, interatom.spin_id2))) 436 return False 437 438 # Skip non-Me pseudo-atoms for the second spin. 439 if hasattr(spin2, 'members') and len(spin2.members) != 3: 440 warn(RelaxWarning("Only methyl group pseudo atoms are supported due to their fast rotation, skipping the spin pair '%s' and '%s'." % (interatom.spin_id1, interatom.spin_id2))) 441 return False 442 443 # Checks. 444 if not hasattr(spin1, 'isotope'): 445 raise RelaxSpinTypeError(interatom.spin_id1) 446 if not hasattr(spin2, 'isotope'): 447 raise RelaxSpinTypeError(interatom.spin_id2) 448 if not hasattr(interatom, 'r'): 449 raise RelaxNoValueError("averaged interatomic distance") 450 451 # Everything is ok. 452 return True
453 454
455 - def _cone_pdb(self, cone_type=None, scale=1.0, file=None, dir=None, force=False):
456 """Create a PDB file containing a geometric object representing the various cone models. 457 458 Currently the only cone types supported are 'diff in cone' and 'diff on cone'. 459 460 461 @param cone_type: The type of cone model to represent. 462 @type cone_type: str 463 @param scale: The size of the geometric object is eqaul to the average pivot-CoM 464 vector length multiplied by this scaling factor. 465 @type scale: float 466 @param file: The name of the PDB file to create. 467 @type file: str 468 @param dir: The name of the directory to place the PDB file into. 469 @type dir: str 470 @param force: Flag which if set to True will cause any pre-existing file to be 471 overwritten. 472 @type force: int 473 """ 474 475 # Test if the cone models have been determined. 476 if cone_type == 'diff in cone': 477 if not hasattr(cdp, 'S_diff_in_cone'): 478 raise RelaxError("The diffusion in a cone model has not yet been determined.") 479 elif cone_type == 'diff on cone': 480 if not hasattr(cdp, 'S_diff_on_cone'): 481 raise RelaxError("The diffusion on a cone model has not yet been determined.") 482 else: 483 raise RelaxError("The cone type " + repr(cone_type) + " is unknown.") 484 485 # The number of increments for the filling of the cone objects. 486 inc = 20 487 488 # The rotation matrix. 489 R = zeros((3, 3), float64) 490 two_vect_to_R(array([0, 0, 1], float64), cdp.ave_pivot_CoM/norm(cdp.ave_pivot_CoM), R) 491 492 # The isotropic cone object. 493 if cone_type == 'diff in cone': 494 angle = cdp.theta_diff_in_cone 495 elif cone_type == 'diff on cone': 496 angle = cdp.theta_diff_on_cone 497 cone = Iso_cone(angle) 498 499 # Create the structural object. 500 structure = Internal() 501 502 # Add a structure. 503 structure.add_molecule(name='cone') 504 505 # Alias the single molecule from the single model. 506 mol = structure.structural_data[0].mol[0] 507 508 # Add the pivot point. 509 mol.atom_add(pdb_record='HETATM', atom_num=1, atom_name='R', res_name='PIV', res_num=1, pos=cdp.pivot_point, element='C') 510 511 # Generate the average pivot-CoM vectors. 512 print("\nGenerating the average pivot-CoM vectors.") 513 sim_vectors = None 514 if hasattr(cdp, 'ave_pivot_CoM_sim'): 515 sim_vectors = cdp.ave_pivot_CoM_sim 516 res_num = geometric.generate_vector_residues(mol=mol, vector=cdp.ave_pivot_CoM, atom_name='Ave', res_name_vect='AVE', sim_vectors=sim_vectors, res_num=2, origin=cdp.pivot_point, scale=scale) 517 518 # Generate the cone outer edge. 519 print("\nGenerating the cone outer edge.") 520 cap_start_atom = mol.atom_num[-1]+1 521 geometric.cone_edge(mol=mol, cone=cone, res_name='CON', res_num=3, apex=cdp.pivot_point, R=R, scale=norm(cdp.pivot_CoM), inc=inc) 522 523 # Generate the cone cap, and stitch it to the cone edge. 524 if cone_type == 'diff in cone': 525 print("\nGenerating the cone cap.") 526 cone_start_atom = mol.atom_num[-1]+1 527 geometric.generate_vector_dist(mol=mol, res_name='CON', res_num=3, centre=cdp.pivot_point, R=R, limit_check=cone.limit_check, scale=norm(cdp.pivot_CoM), inc=inc) 528 geometric.stitch_cone_to_edge(mol=mol, cone=cone, dome_start=cone_start_atom, edge_start=cap_start_atom+1, inc=inc) 529 530 # Create the PDB file. 531 print("\nGenerating the PDB file.") 532 pdb_file = open_write_file(file, dir, force=force) 533 structure.write_pdb(pdb_file) 534 pdb_file.close()
535 536
537 - def _disassemble_param_vector(self, param_vector=None, data_types=None, sim_index=None):
538 """Disassemble the parameter vector and place the values into the relevant variables. 539 540 For the 2-domain N-state model, the parameters are stored in the probability and Euler angle data structures. For the population N-state model, only the probabilities are stored. If RDCs are present and alignment tensors are optimised, then these are stored as well. 541 542 @keyword data_types: The base data types used in the optimisation. This list can contain the elements 'rdc', 'pcs' or 'tensor'. 543 @type data_types: list of str 544 @keyword param_vector: The parameter vector returned from optimisation. 545 @type param_vector: numpy array 546 @keyword sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired. 547 @type sim_index: None or int 548 """ 549 550 # Test if the model is selected. 551 if not hasattr(cdp, 'model') or not isinstance(cdp.model, str): 552 raise RelaxNoModelError 553 554 # Unpack and strip off the alignment tensor parameters. 555 if ('rdc' in data_types or 'pcs' in data_types) and not align_tensor.all_tensors_fixed(): 556 # Loop over the alignments, adding the alignment tensor parameters to the tensor data container. 557 tensor_num = 0 558 for i in range(len(cdp.align_tensors)): 559 # No alignment ID, so skip the tensor as it will not be optimised. 560 if cdp.align_tensors[i].name not in cdp.align_ids: 561 continue 562 563 # Fixed tensor. 564 if cdp.align_tensors[i].fixed: 565 continue 566 567 # Normal tensors. 568 if sim_index == None: 569 cdp.align_tensors[i].set(param='Axx', value=param_vector[5*tensor_num]) 570 cdp.align_tensors[i].set(param='Ayy', value=param_vector[5*tensor_num+1]) 571 cdp.align_tensors[i].set(param='Axy', value=param_vector[5*tensor_num+2]) 572 cdp.align_tensors[i].set(param='Axz', value=param_vector[5*tensor_num+3]) 573 cdp.align_tensors[i].set(param='Ayz', value=param_vector[5*tensor_num+4]) 574 575 # Monte Carlo simulated tensors. 576 else: 577 cdp.align_tensors[i].set(param='Axx', value=param_vector[5*tensor_num], category='sim', sim_index=sim_index) 578 cdp.align_tensors[i].set(param='Ayy', value=param_vector[5*tensor_num+1], category='sim', sim_index=sim_index) 579 cdp.align_tensors[i].set(param='Axy', value=param_vector[5*tensor_num+2], category='sim', sim_index=sim_index) 580 cdp.align_tensors[i].set(param='Axz', value=param_vector[5*tensor_num+3], category='sim', sim_index=sim_index) 581 cdp.align_tensors[i].set(param='Ayz', value=param_vector[5*tensor_num+4], category='sim', sim_index=sim_index) 582 583 # Increase the tensor number. 584 tensor_num += 1 585 586 # Create a new parameter vector without the tensors. 587 param_vector = param_vector[5*tensor_num:] 588 589 # Monte Carlo simulation data structures. 590 if sim_index != None: 591 # Populations. 592 if cdp.model in ['2-domain', 'population']: 593 probs = cdp.probs_sim[sim_index] 594 595 # Euler angles. 596 if cdp.model == '2-domain': 597 alpha = cdp.alpha_sim[sim_index] 598 beta = cdp.beta_sim[sim_index] 599 gamma = cdp.gamma_sim[sim_index] 600 601 # Normal data structures. 602 else: 603 # Populations. 604 if cdp.model in ['2-domain', 'population']: 605 probs = cdp.probs 606 607 # Euler angles. 608 if cdp.model == '2-domain': 609 alpha = cdp.alpha 610 beta = cdp.beta 611 gamma = cdp.gamma 612 613 # The probabilities for states 0 to N-1. 614 if cdp.model in ['2-domain', 'population']: 615 for i in range(cdp.N-1): 616 probs[i] = param_vector[i] 617 618 # The probability for state N. 619 probs[-1] = 1 - sum(probs[0:-1]) 620 621 # The Euler angles. 622 if cdp.model == '2-domain': 623 for i in range(cdp.N): 624 alpha[i] = param_vector[cdp.N-1 + 3*i] 625 beta[i] = param_vector[cdp.N-1 + 3*i + 1] 626 gamma[i] = param_vector[cdp.N-1 + 3*i + 2] 627 628 # The paramagnetic centre. 629 if hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed: 630 # Create the structure if needed. 631 if not hasattr(cdp, 'paramagnetic_centre'): 632 cdp.paramagnetic_centre = zeros(3, float64) 633 634 # The position. 635 cdp.paramagnetic_centre[0] = param_vector[-3] 636 cdp.paramagnetic_centre[1] = param_vector[-2] 637 cdp.paramagnetic_centre[2] = param_vector[-1]
638 639
640 - def _elim_no_prob(self):
641 """Remove all structures or states which have no probability.""" 642 643 # Test if the current data pipe exists. 644 pipes.test() 645 646 # Test if the model is setup. 647 if not hasattr(cdp, 'model'): 648 raise RelaxNoModelError('N-state') 649 650 # Test if there are populations. 651 if not hasattr(cdp, 'probs'): 652 raise RelaxError("The N-state model populations do not exist.") 653 654 # Create the data structure if needed. 655 if not hasattr(cdp, 'select_models'): 656 cdp.state_select = [True] * cdp.N 657 658 # Loop over the structures. 659 for i in range(len(cdp.N)): 660 # No probability. 661 if cdp.probs[i] < 1e-5: 662 cdp.state_select[i] = False
663 664
665 - def _linear_constraints(self, data_types=None, scaling_matrix=None):
666 """Function for setting up the linear constraint matrices A and b. 667 668 Standard notation 669 ================= 670 671 The N-state model constraints are:: 672 673 0 <= pc <= 1, 674 675 where p is the probability and c corresponds to state c. 676 677 678 Matrix notation 679 =============== 680 681 In the notation A.x >= b, where A is an matrix of coefficients, x is an array of parameter 682 values, and b is a vector of scalars, these inequality constraints are:: 683 684 | 1 0 0 | | 0 | 685 | | | | 686 |-1 0 0 | | -1 | 687 | | | | 688 | 0 1 0 | | 0 | 689 | | | p0 | | | 690 | 0 -1 0 | | | | -1 | 691 | | . | p1 | >= | | 692 | 0 0 1 | | | | 0 | 693 | | | p2 | | | 694 | 0 0 -1 | | -1 | 695 | | | | 696 |-1 -1 -1 | | -1 | 697 | | | | 698 | 1 1 1 | | 0 | 699 700 This example is for a 4-state model, the last probability pn is not included as this 701 parameter does not exist (because the sum of pc is equal to 1). The Euler angle parameters 702 have been excluded here but will be included in the returned A and b objects. These 703 parameters simply add columns of zero to the A matrix and have no effect on b. The last two 704 rows correspond to the inequality:: 705 706 0 <= pN <= 1. 707 708 As:: 709 N-1 710 \ 711 pN = 1 - > pc, 712 /__ 713 c=1 714 715 then:: 716 717 -p1 - p2 - ... - p(N-1) >= -1, 718 719 p1 + p2 + ... + p(N-1) >= 0. 720 721 722 @keyword data_types: The base data types used in the optimisation. This list can 723 contain the elements 'rdc', 'pcs' or 'tensor'. 724 @type data_types: list of str 725 @keyword scaling_matrix: The diagonal scaling matrix. 726 @type scaling_matrix: numpy rank-2 square matrix 727 @return: The matrices A and b. 728 @rtype: tuple of len 2 of a numpy rank-2, size NxM matrix and numpy 729 rank-1, size N array 730 """ 731 732 # Starting point of the populations. 733 pop_start = 0 734 if ('rdc' in data_types or 'pcs' in data_types) and not align_tensor.all_tensors_fixed(): 735 # Loop over the alignments. 736 for i in range(len(cdp.align_tensors)): 737 # Fixed tensor. 738 if cdp.align_tensors[i].fixed: 739 continue 740 741 # Add 5 parameters. 742 pop_start += 5 743 744 # Initialisation (0..j..m). 745 A = [] 746 b = [] 747 zero_array = zeros(self._param_num(), float64) 748 i = pop_start 749 j = 0 750 751 # Probability parameters. 752 if cdp.model in ['2-domain', 'population']: 753 # Loop over the prob parameters (N - 1, because the sum of pc is 1). 754 for k in range(cdp.N - 1): 755 # 0 <= pc <= 1. 756 A.append(zero_array * 0.0) 757 A.append(zero_array * 0.0) 758 A[j][i] = 1.0 759 A[j+1][i] = -1.0 760 b.append(0.0) 761 b.append(-1.0 / scaling_matrix[i, i]) 762 j = j + 2 763 764 # Increment i. 765 i = i + 1 766 767 # Add the inequalities for pN. 768 A.append(zero_array * 0.0) 769 A.append(zero_array * 0.0) 770 for i in range(pop_start, self._param_num()): 771 A[-2][i] = -1.0 772 A[-1][i] = 1.0 773 b.append(-1.0 / scaling_matrix[i, i]) 774 b.append(0.0) 775 776 # Convert to numpy data structures. 777 A = array(A, float64) 778 b = array(b, float64) 779 780 # Return the constraint objects. 781 return A, b
782 783
784 - def _minimise_bc_data(self, model):
785 """Extract and unpack the back calculated data. 786 787 @param model: The instantiated class containing the target function. 788 @type model: class instance 789 """ 790 791 # No alignment tensors, so nothing to do. 792 if not hasattr(cdp, 'align_tensors'): 793 return 794 795 # Loop over each alignment. 796 for align_index in range(len(cdp.align_ids)): 797 # Fixed tensor. 798 if cdp.align_tensors[align_index].fixed: 799 continue 800 801 # The alignment ID. 802 align_id = cdp.align_ids[align_index] 803 804 # Data flags 805 rdc_flag = False 806 if hasattr(cdp, 'rdc_ids') and align_id in cdp.rdc_ids: 807 rdc_flag = True 808 pcs_flag = False 809 if hasattr(cdp, 'pcs_ids') and align_id in cdp.pcs_ids: 810 pcs_flag = True 811 812 # Spin loop. 813 pcs_index = 0 814 for spin in spin_loop(): 815 # Skip deselected spins. 816 if not spin.select: 817 continue 818 819 # Spins with PCS data. 820 if pcs_flag and hasattr(spin, 'pcs'): 821 # Initialise the data structure if necessary. 822 if not hasattr(spin, 'pcs_bc'): 823 spin.pcs_bc = {} 824 825 # Add the back calculated PCS (in ppm). 826 spin.pcs_bc[align_id] = model.deltaij_theta[align_index, pcs_index] * 1e6 827 828 # Increment the data index if the spin container has data. 829 pcs_index = pcs_index + 1 830 831 # Interatomic data container loop. 832 rdc_index = 0 833 for interatom in interatomic_loop(): 834 # Get the spins. 835 spin1 = return_spin(interatom.spin_id1) 836 spin2 = return_spin(interatom.spin_id2) 837 838 # RDC checks. 839 if not self._check_rdcs(interatom, spin1, spin2): 840 continue 841 842 # Containers with RDC data. 843 if rdc_flag and hasattr(interatom, 'rdc'): 844 # Initialise the data structure if necessary. 845 if not hasattr(interatom, 'rdc_bc'): 846 interatom.rdc_bc = {} 847 848 # Append the back calculated PCS. 849 interatom.rdc_bc[align_id] = model.Dij_theta[align_index, rdc_index] 850 851 # Increment the data index if the interatom container has data. 852 rdc_index = rdc_index + 1
853 854
855 - def _minimise_setup_atomic_pos(self):
856 """Set up the atomic position data structures for optimisation using PCSs and PREs as base data sets. 857 858 @return: The atomic positions (the first index is the spins, the second is the structures, and the third is the atomic coordinates) and the paramagnetic centre. 859 @rtype: numpy rank-3 array, numpy rank-1 array. 860 """ 861 862 # Initialise. 863 atomic_pos = [] 864 865 # Store the atomic positions. 866 for spin in spin_loop(): 867 # Skip deselected spins. 868 if not spin.select: 869 continue 870 871 # Only use spins with alignment/paramagnetic data. 872 if not hasattr(spin, 'pcs') and not hasattr(spin, 'pre'): 873 continue 874 875 # The position list. 876 if type(spin.pos[0]) in [float, float64]: 877 atomic_pos.append([spin.pos]) 878 else: 879 atomic_pos.append(spin.pos) 880 881 # Convert to numpy objects. 882 atomic_pos = array(atomic_pos, float64) 883 884 # The paramagnetic centre. 885 if hasattr(cdp, 'paramagnetic_centre'): 886 paramag_centre = array(cdp.paramagnetic_centre) 887 else: 888 paramag_centre = zeros(3, float64) 889 890 # Return the data structures. 891 return atomic_pos, paramag_centre
892 893
894 - def _minimise_setup_pcs(self, sim_index=None):
895 """Set up the data structures for optimisation using PCSs as base data sets. 896 897 @keyword sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired. 898 @type sim_index: None or int 899 @return: The assembled data structures for using PCSs as the base data for optimisation. These include: 900 - the PCS values. 901 - the unit vectors connecting the paramagnetic centre (the electron spin) to 902 - the PCS weight. 903 - the nuclear spin. 904 - the pseudocontact shift constants. 905 @rtype: tuple of (numpy rank-2 array, numpy rank-2 array, numpy rank-2 array, numpy rank-1 array, numpy rank-1 array) 906 """ 907 908 # Data setup tests. 909 if not hasattr(cdp, 'paramagnetic_centre') and (hasattr(cdp, 'paramag_centre_fixed') and cdp.paramag_centre_fixed): 910 raise RelaxError("The paramagnetic centre has not yet been specified.") 911 if not hasattr(cdp, 'temperature'): 912 raise RelaxError("The experimental temperatures have not been set.") 913 if not hasattr(cdp, 'frq'): 914 raise RelaxError("The spectrometer frequencies of the experiments have not been set.") 915 916 # Initialise. 917 pcs = [] 918 pcs_err = [] 919 pcs_weight = [] 920 temp = [] 921 frq = [] 922 923 # The PCS data. 924 for align_id in cdp.align_ids: 925 # No RDC or PCS data, so jump to the next alignment. 926 if (hasattr(cdp, 'rdc_ids') and not align_id in cdp.rdc_ids) and (hasattr(cdp, 'pcs_ids') and not align_id in cdp.pcs_ids): 927 continue 928 929 # Append empty arrays to the PCS structures. 930 pcs.append([]) 931 pcs_err.append([]) 932 pcs_weight.append([]) 933 934 # Get the temperature for the PCS constant. 935 if align_id in cdp.temperature: 936 temp.append(cdp.temperature[align_id]) 937 else: 938 temp.append(0.0) 939 940 # Get the spectrometer frequency in Tesla units for the PCS constant. 941 if align_id in cdp.frq: 942 frq.append(cdp.frq[align_id] * 2.0 * pi / g1H) 943 else: 944 frq.append(1e-10) 945 946 # Spin loop. 947 j = 0 948 for spin in spin_loop(): 949 # Skip deselected spins. 950 if not spin.select: 951 continue 952 953 # Skip spins without PCS data. 954 if not hasattr(spin, 'pcs'): 955 continue 956 957 # Append the PCSs to the list. 958 if align_id in spin.pcs.keys(): 959 if sim_index != None: 960 pcs[-1].append(spin.pcs_sim[align_id][sim_index]) 961 else: 962 pcs[-1].append(spin.pcs[align_id]) 963 else: 964 pcs[-1].append(None) 965 966 # Append the PCS errors. 967 if hasattr(spin, 'pcs_err') and align_id in spin.pcs_err.keys(): 968 pcs_err[-1].append(spin.pcs_err[align_id]) 969 else: 970 pcs_err[-1].append(None) 971 972 # Append the weight. 973 if hasattr(spin, 'pcs_weight') and align_id in spin.pcs_weight.keys(): 974 pcs_weight[-1].append(spin.pcs_weight[align_id]) 975 else: 976 pcs_weight[-1].append(1.0) 977 978 # Spin index. 979 j = j + 1 980 981 # Convert to numpy objects. 982 pcs = array(pcs, float64) 983 pcs_err = array(pcs_err, float64) 984 pcs_weight = array(pcs_weight, float64) 985 986 # Convert the PCS from ppm to no units. 987 pcs = pcs * 1e-6 988 pcs_err = pcs_err * 1e-6 989 990 # Return the data structures. 991 return pcs, pcs_err, pcs_weight, temp, frq
992 993
994 - def _minimise_setup_rdcs(self, sim_index=None):
995 """Set up the data structures for optimisation using RDCs as base data sets. 996 997 @keyword sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired. 998 @type sim_index: None or int 999 @return: The assembled data structures for using RDCs as the base data for optimisation. These include: 1000 - rdc, the RDC values. 1001 - rdc_err, the RDC errors. 1002 - rdc_weight, the RDC weights. 1003 - vectors, the interatomic vectors. 1004 - rdc_const, the dipolar constants. 1005 - absolute, the absolute value flags (as 1's and 0's). 1006 @rtype: tuple of (numpy rank-2 array, numpy rank-2 array, numpy rank-2 array, numpy rank-3 array, numpy rank-2 array, numpy rank-2 array) 1007 """ 1008 1009 # Initialise. 1010 rdc = [] 1011 rdc_err = [] 1012 rdc_weight = [] 1013 unit_vect = [] 1014 rdc_const = [] 1015 absolute = [] 1016 1017 # The unit vectors and RDC constants. 1018 for interatom in interatomic_loop(): 1019 # Get the spins. 1020 spin1 = return_spin(interatom.spin_id1) 1021 spin2 = return_spin(interatom.spin_id2) 1022 1023 # RDC checks. 1024 if not self._check_rdcs(interatom, spin1, spin2): 1025 continue 1026 1027 # Add the vectors. 1028 if arg_check.is_float(interatom.vector[0], raise_error=False): 1029 unit_vect.append([interatom.vector]) 1030 else: 1031 unit_vect.append(interatom.vector) 1032 1033 # Gyromagnetic ratios. 1034 g1 = return_gyromagnetic_ratio(spin1.isotope) 1035 g2 = return_gyromagnetic_ratio(spin2.isotope) 1036 1037 # Calculate the RDC dipolar constant (in Hertz, and the 3 comes from the alignment tensor), and append it to the list. 1038 rdc_const.append(3.0/(2.0*pi) * dipolar_constant(g1, g2, interatom.r)) 1039 1040 # Fix the unit vector data structure. 1041 num = None 1042 for rdc_index in range(len(unit_vect)): 1043 # Number of vectors. 1044 if num == None: 1045 if unit_vect[rdc_index] != None: 1046 num = len(unit_vect[rdc_index]) 1047 continue 1048 1049 # Check. 1050 if unit_vect[rdc_index] != None and len(unit_vect[rdc_index]) != num: 1051 raise RelaxError("The number of interatomic vectors for all no match:\n%s" % unit_vect) 1052 1053 # Missing unit vectors. 1054 if num == None: 1055 raise RelaxError("No interatomic vectors could be found.") 1056 1057 # Update None entries. 1058 for i in range(len(unit_vect)): 1059 if unit_vect[i] == None: 1060 unit_vect[i] = [[None, None, None]]*num 1061 1062 # The RDC data. 1063 for align_id in cdp.align_ids: 1064 # No RDC or PCS data, so jump to the next alignment. 1065 if (hasattr(cdp, 'rdc_ids') and not align_id in cdp.rdc_ids) and (hasattr(cdp, 'pcs_ids') and not align_id in cdp.pcs_ids): 1066 continue 1067 1068 # Append empty arrays to the RDC structures. 1069 rdc.append([]) 1070 rdc_err.append([]) 1071 rdc_weight.append([]) 1072 absolute.append([]) 1073 1074 # Interatom loop. 1075 for interatom in interatomic_loop(): 1076 # Get the spins. 1077 spin1 = return_spin(interatom.spin_id1) 1078 spin2 = return_spin(interatom.spin_id2) 1079 1080 # Skip deselected spins. 1081 if not spin1.select or not spin2.select: 1082 continue 1083 1084 # Only use interatomic data containers with RDC and vector data. 1085 if not hasattr(interatom, 'rdc') or not hasattr(interatom, 'vector'): 1086 continue 1087 1088 # Defaults of None. 1089 value = None 1090 error = None 1091 1092 # Pseudo-atom set up. 1093 if (hasattr(spin1, 'members') or hasattr(spin2, 'members')) and align_id in interatom.rdc.keys(): 1094 # Skip non-Me groups. 1095 if len(spin1.members) != 3: 1096 continue 1097 1098 # The RDC for the Me-pseudo spin where: 1099 # <D> = -1/3 Dpar. 1100 # See Verdier, et al., JMR, 2003, 163, 353-359. 1101 if sim_index != None: 1102 value = -3.0 * interatom.rdc_sim[align_id][sim_index] 1103 else: 1104 value = -3.0 * interatom.rdc[align_id] 1105 1106 # The error. 1107 if hasattr(interatom, 'rdc_err') and align_id in interatom.rdc_err.keys(): 1108 error = -3.0 * interatom.rdc_err[align_id] 1109 1110 # Normal set up. 1111 elif align_id in interatom.rdc.keys(): 1112 # The RDC. 1113 if sim_index != None: 1114 value = interatom.rdc_sim[align_id][sim_index] 1115 else: 1116 value = interatom.rdc[align_id] 1117 1118 # The error. 1119 if hasattr(interatom, 'rdc_err') and align_id in interatom.rdc_err.keys(): 1120 error = interatom.rdc_err[align_id] 1121 1122 # Append the RDCs to the list. 1123 rdc[-1].append(value) 1124 1125 # Append the RDC errors. 1126 rdc_err[-1].append(error) 1127 1128 # Append the weight. 1129 if hasattr(interatom, 'rdc_weight') and align_id in interatom.rdc_weight.keys(): 1130 rdc_weight[-1].append(interatom.rdc_weight[align_id]) 1131 else: 1132 rdc_weight[-1].append(1.0) 1133 1134 # Append the absolute value flag. 1135 if hasattr(interatom, 'absolute_rdc') and align_id in interatom.absolute_rdc.keys(): 1136 absolute[-1].append(interatom.absolute_rdc[align_id]) 1137 else: 1138 absolute[-1].append(False) 1139 1140 # Convert to numpy objects. 1141 rdc = array(rdc, float64) 1142 rdc_err = array(rdc_err, float64) 1143 rdc_weight = array(rdc_weight, float64) 1144 unit_vect = array(unit_vect, float64) 1145 rdc_const = array(rdc_const, float64) 1146 absolute = array(absolute, float64) 1147 1148 # Return the data structures. 1149 return rdc, rdc_err, rdc_weight, unit_vect, rdc_const, absolute
1150 1151
1152 - def _minimise_setup_tensors(self, sim_index=None):
1153 """Set up the data structures for optimisation using alignment tensors as base data sets. 1154 1155 @keyword sim_index: The index of the simulation to optimise. This should be None if 1156 normal optimisation is desired. 1157 @type sim_index: None or int 1158 @return: The assembled data structures for using alignment tensors as the base 1159 data for optimisation. These include: 1160 - full_tensors, the data of the full alignment tensors. 1161 - red_tensor_elem, the tensors as concatenated rank-1 5D arrays. 1162 - red_tensor_err, the tensor errors as concatenated rank-1 5D 1163 arrays. 1164 - full_in_ref_frame, flags specifying if the tensor in the reference 1165 frame is the full or reduced tensor. 1166 @rtype: tuple of (list, numpy rank-1 array, numpy rank-1 array, numpy rank-1 1167 array) 1168 """ 1169 1170 # Initialise. 1171 n = len(cdp.align_tensors.reduction) 1172 full_tensors = zeros(n*5, float64) 1173 red_tensors = zeros(n*5, float64) 1174 red_err = ones(n*5, float64) * 1e-5 1175 full_in_ref_frame = zeros(n, float64) 1176 1177 # Loop over the full tensors. 1178 for i, tensor in self._tensor_loop(red=False): 1179 # The full tensor. 1180 full_tensors[5*i + 0] = tensor.Axx 1181 full_tensors[5*i + 1] = tensor.Ayy 1182 full_tensors[5*i + 2] = tensor.Axy 1183 full_tensors[5*i + 3] = tensor.Axz 1184 full_tensors[5*i + 4] = tensor.Ayz 1185 1186 # The full tensor corresponds to the frame of reference. 1187 if cdp.ref_domain == tensor.domain: 1188 full_in_ref_frame[i] = 1 1189 1190 # Loop over the reduced tensors. 1191 for i, tensor in self._tensor_loop(red=True): 1192 # The reduced tensor (simulation data). 1193 if sim_index != None: 1194 red_tensors[5*i + 0] = tensor.Axx_sim[sim_index] 1195 red_tensors[5*i + 1] = tensor.Ayy_sim[sim_index] 1196 red_tensors[5*i + 2] = tensor.Axy_sim[sim_index] 1197 red_tensors[5*i + 3] = tensor.Axz_sim[sim_index] 1198 red_tensors[5*i + 4] = tensor.Ayz_sim[sim_index] 1199 1200 # The reduced tensor. 1201 else: 1202 red_tensors[5*i + 0] = tensor.Axx 1203 red_tensors[5*i + 1] = tensor.Ayy 1204 red_tensors[5*i + 2] = tensor.Axy 1205 red_tensors[5*i + 3] = tensor.Axz 1206 red_tensors[5*i + 4] = tensor.Ayz 1207 1208 # The reduced tensor errors. 1209 if hasattr(tensor, 'Axx_err'): 1210 red_err[5*i + 0] = tensor.Axx_err 1211 red_err[5*i + 1] = tensor.Ayy_err 1212 red_err[5*i + 2] = tensor.Axy_err 1213 red_err[5*i + 3] = tensor.Axz_err 1214 red_err[5*i + 4] = tensor.Ayz_err 1215 1216 # Return the data structures. 1217 return full_tensors, red_tensors, red_err, full_in_ref_frame
1218 1219
1221 """Set up the data structures for the fixed alignment tensors. 1222 1223 @return: The assembled data structures for the fixed alignment tensors. 1224 @rtype: numpy rank-1 array. 1225 """ 1226 1227 # Initialise. 1228 n = align_tensor.num_tensors(skip_fixed=False) - align_tensor.num_tensors(skip_fixed=True) 1229 tensors = zeros(n*5, float64) 1230 1231 # Nothing to do. 1232 if n == 0: 1233 return None 1234 1235 # Loop over the tensors. 1236 index = 0 1237 for i in range(len(cdp.align_tensors)): 1238 # Skip unfixed tensors. 1239 if not cdp.align_tensors[i].fixed: 1240 continue 1241 1242 # The real tensors. 1243 tensors[5*index + 0] = cdp.align_tensors[i].Axx 1244 tensors[5*index + 1] = cdp.align_tensors[i].Ayy 1245 tensors[5*index + 2] = cdp.align_tensors[i].Axy 1246 tensors[5*index + 3] = cdp.align_tensors[i].Axz 1247 tensors[5*index + 4] = cdp.align_tensors[i].Ayz 1248 1249 # Increment the index. 1250 index += 1 1251 1252 # Return the data structure. 1253 return tensors
1254 1255
1256 - def _num_data_points(self):
1257 """Determine the number of data points used in the model. 1258 1259 @return: The number, n, of data points in the model. 1260 @rtype: int 1261 """ 1262 1263 # Determine the data type. 1264 data_types = self._base_data_types() 1265 1266 # Init. 1267 n = 0 1268 1269 # Spin loop. 1270 for spin in spin_loop(): 1271 # Skip deselected spins. 1272 if not spin.select: 1273 continue 1274 1275 # PCS data (skipping array elements set to None). 1276 if 'pcs' in data_types: 1277 if hasattr(spin, 'pcs'): 1278 for pcs in spin.pcs: 1279 if isinstance(pcs, float): 1280 n = n + 1 1281 1282 # Interatomic data loop. 1283 for interatom in interatomic_loop(): 1284 # RDC data (skipping array elements set to None). 1285 if 'rdc' in data_types: 1286 if hasattr(interatom, 'rdc'): 1287 for rdc in interatom.rdc: 1288 if isinstance(rdc, float): 1289 n = n + 1 1290 1291 # Alignment tensors. 1292 if 'tensor' in data_types: 1293 n = n + 5*len(cdp.align_tensors) 1294 1295 # Return the value. 1296 return n
1297 1298
1299 - def _number_of_states(self, N=None):
1300 """Set the number of states in the N-state model. 1301 1302 @param N: The number of states. 1303 @type N: int 1304 """ 1305 1306 # Test if the current data pipe exists. 1307 pipes.test() 1308 1309 # Test if the model is setup. 1310 if not hasattr(cdp, 'model'): 1311 raise RelaxNoModelError('N-state') 1312 1313 # Set the value of N. 1314 cdp.N = N 1315 1316 # Update the model. 1317 self._update_model()
1318 1319
1320 - def _param_model_index(self, param):
1321 """Return the N-state model index for the given parameter. 1322 1323 @param param: The N-state model parameter. 1324 @type param: str 1325 @return: The N-state model index. 1326 @rtype: str 1327 """ 1328 1329 # Probability. 1330 if search('^p[0-9]*$', param): 1331 return int(param[1:]) 1332 1333 # Alpha Euler angle. 1334 if search('^alpha', param): 1335 return int(param[5:]) 1336 1337 # Beta Euler angle. 1338 if search('^beta', param): 1339 return int(param[4:]) 1340 1341 # Gamma Euler angle. 1342 if search('^gamma', param): 1343 return int(param[5:]) 1344 1345 # Model independent parameter. 1346 return None
1347 1348
1349 - def _param_num(self):
1350 """Determine the number of parameters in the model. 1351 1352 @return: The number of model parameters. 1353 @rtype: int 1354 """ 1355 1356 # Determine the data type. 1357 data_types = self._base_data_types() 1358 1359 # Init. 1360 num = 0 1361 1362 # Alignment tensor params. 1363 if ('rdc' in data_types or 'pcs' in data_types) and not align_tensor.all_tensors_fixed(): 1364 # Loop over the alignments. 1365 for i in range(len(cdp.align_tensors)): 1366 # No alignment ID, so skip the tensor as it is not part of the parameter set. 1367 if cdp.align_tensors[i].name not in cdp.align_ids: 1368 continue 1369 1370 # Fixed tensor. 1371 if cdp.align_tensors[i].fixed: 1372 continue 1373 1374 # Add 5 tensor parameters. 1375 num += 5 1376 1377 # Populations. 1378 if cdp.model in ['2-domain', 'population']: 1379 num = num + (cdp.N - 1) 1380 1381 # Euler angles. 1382 if cdp.model == '2-domain': 1383 num = num + 3*cdp.N 1384 1385 # The paramagnetic centre. 1386 if hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed: 1387 num = num + 3 1388 1389 # Return the param number. 1390 return num
1391 1392
1393 - def _ref_domain(self, ref=None):
1394 """Set the reference domain for the '2-domain' N-state model. 1395 1396 @param ref: The reference domain. 1397 @type ref: str 1398 """ 1399 1400 # Test if the current data pipe exists. 1401 pipes.test() 1402 1403 # Test if the model is setup. 1404 if not hasattr(cdp, 'model'): 1405 raise RelaxNoModelError('N-state') 1406 1407 # Test that the correct model is set. 1408 if cdp.model != '2-domain': 1409 raise RelaxError("Setting the reference domain is only possible for the '2-domain' N-state model.") 1410 1411 # Test if the reference domain exists. 1412 exists = False 1413 for tensor_cont in cdp.align_tensors: 1414 if tensor_cont.domain == ref: 1415 exists = True 1416 if not exists: 1417 raise RelaxError("The reference domain cannot be found within any of the loaded tensors.") 1418 1419 # Set the reference domain. 1420 cdp.ref_domain = ref 1421 1422 # Update the model. 1423 self._update_model()
1424 1425
1426 - def _select_model(self, model=None):
1427 """Select the N-state model type. 1428 1429 @param model: The N-state model type. Can be one of '2-domain', 'population', or 'fixed'. 1430 @type model: str 1431 """ 1432 1433 # Test if the current data pipe exists. 1434 pipes.test() 1435 1436 # Test if the model name exists. 1437 if not model in ['2-domain', 'population', 'fixed']: 1438 raise RelaxError("The model name " + repr(model) + " is invalid.") 1439 1440 # Test if the model is setup. 1441 if hasattr(cdp, 'model'): 1442 warn(RelaxWarning("The N-state model has already been set up. Switching from model '%s' to '%s'." % (cdp.model, model))) 1443 1444 # Set the model 1445 cdp.model = model 1446 1447 # Initialise the list of model parameters. 1448 cdp.params = [] 1449 1450 # Update the model. 1451 self._update_model()
1452 1453
1454 - def _target_fn_setup(self, sim_index=None, scaling=True):
1455 """Initialise the target function for optimisation or direct calculation. 1456 1457 @param sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired. 1458 @type sim_index: None or int 1459 @param scaling: If True, diagonal scaling is enabled during optimisation to allow the problem to be better conditioned. 1460 @type scaling: bool 1461 """ 1462 1463 # Test if the N-state model has been set up. 1464 if not hasattr(cdp, 'model'): 1465 raise RelaxNoModelError('N-state') 1466 1467 # '2-domain' model setup tests. 1468 if cdp.model == '2-domain': 1469 # The number of states. 1470 if not hasattr(cdp, 'N'): 1471 raise RelaxError("The number of states has not been set.") 1472 1473 # The reference domain. 1474 if not hasattr(cdp, 'ref_domain'): 1475 raise RelaxError("The reference domain has not been set.") 1476 1477 # Update the model parameters if necessary. 1478 self._update_model() 1479 1480 # Create the initial parameter vector. 1481 param_vector = self._assemble_param_vector(sim_index=sim_index) 1482 1483 # Determine if alignment tensors or RDCs are to be used. 1484 data_types = self._base_data_types() 1485 1486 # The probabilities. 1487 probs = None 1488 if hasattr(cdp, 'probs'): 1489 probs = cdp.probs 1490 1491 # Diagonal scaling. 1492 scaling_matrix = None 1493 if len(param_vector): 1494 scaling_matrix = self._assemble_scaling_matrix(data_types=data_types, scaling=scaling) 1495 param_vector = dot(inv(scaling_matrix), param_vector) 1496 1497 # Get the data structures for optimisation using the tensors as base data sets. 1498 full_tensors, red_tensor_elem, red_tensor_err, full_in_ref_frame = None, None, None, None 1499 if 'tensor' in data_types: 1500 full_tensors, red_tensor_elem, red_tensor_err, full_in_ref_frame = self._minimise_setup_tensors(sim_index=sim_index) 1501 1502 # Get the data structures for optimisation using PCSs as base data sets. 1503 pcs, pcs_err, pcs_weight, temp, frq = None, None, None, None, None 1504 if 'pcs' in data_types: 1505 pcs, pcs_err, pcs_weight, temp, frq = self._minimise_setup_pcs(sim_index=sim_index) 1506 1507 # Get the data structures for optimisation using RDCs as base data sets. 1508 rdcs, rdc_err, rdc_weight, rdc_vector, rdc_dj, absolute_rdc = None, None, None, None, None, None 1509 if 'rdc' in data_types: 1510 rdcs, rdc_err, rdc_weight, rdc_vector, rdc_dj, absolute_rdc = self._minimise_setup_rdcs(sim_index=sim_index) 1511 1512 # Get the fixed tensors. 1513 fixed_tensors = None 1514 if 'rdc' in data_types or 'pcs' in data_types: 1515 full_tensors = self._minimise_setup_fixed_tensors() 1516 1517 # The flag list. 1518 fixed_tensors = [] 1519 for i in range(len(cdp.align_tensors)): 1520 if cdp.align_tensors[i].fixed: 1521 fixed_tensors.append(True) 1522 else: 1523 fixed_tensors.append(False) 1524 1525 # Get the atomic_positions. 1526 atomic_pos, paramag_centre, centre_fixed = None, None, True 1527 if 'pcs' in data_types or 'pre' in data_types: 1528 atomic_pos, paramag_centre = self._minimise_setup_atomic_pos() 1529 1530 # Optimisation of the centre. 1531 if hasattr(cdp, 'paramag_centre_fixed'): 1532 centre_fixed = cdp.paramag_centre_fixed 1533 1534 # Set up the class instance containing the target function. 1535 model = N_state_opt(model=cdp.model, N=cdp.N, init_params=param_vector, probs=probs, full_tensors=full_tensors, red_data=red_tensor_elem, red_errors=red_tensor_err, full_in_ref_frame=full_in_ref_frame, fixed_tensors=fixed_tensors, pcs=pcs, rdcs=rdcs, pcs_errors=pcs_err, rdc_errors=rdc_err, pcs_weights=pcs_weight, rdc_weights=rdc_weight, rdc_vect=rdc_vector, temp=temp, frq=frq, dip_const=rdc_dj, absolute_rdc=absolute_rdc, atomic_pos=atomic_pos, paramag_centre=paramag_centre, scaling_matrix=scaling_matrix, centre_fixed=centre_fixed) 1536 1537 # Return the data. 1538 return model, param_vector, data_types, scaling_matrix
1539 1540
1541 - def _tensor_loop(self, red=False):
1542 """Generator method for looping over the full or reduced tensors. 1543 1544 @keyword red: A flag which if True causes the reduced tensors to be returned, and if False 1545 the full tensors are returned. 1546 @type red: bool 1547 @return: The tensor index and the tensor. 1548 @rtype: (int, AlignTensorData instance) 1549 """ 1550 1551 # Number of tensor pairs. 1552 n = len(cdp.align_tensors.reduction) 1553 1554 # Alias. 1555 data = cdp.align_tensors 1556 list = data.reduction 1557 1558 # Full or reduced index. 1559 if red: 1560 index = 1 1561 else: 1562 index = 0 1563 1564 # Loop over the reduction list. 1565 for i in range(n): 1566 yield i, data[list[i][index]]
1567 1568
1569 - def _update_model(self):
1570 """Update the model parameters as necessary.""" 1571 1572 # Initialise the list of model parameters. 1573 if not hasattr(cdp, 'params'): 1574 cdp.params = [] 1575 1576 # Determine the number of states (loaded as structural models), if not already set. 1577 if not hasattr(cdp, 'N'): 1578 # Set the number. 1579 if hasattr(cdp, 'structure'): 1580 cdp.N = cdp.structure.num_models() 1581 1582 # Otherwise return as the rest cannot be updated without N. 1583 else: 1584 return 1585 1586 # Set up the parameter arrays. 1587 if not cdp.params: 1588 # Add the probability or population weight parameters. 1589 if cdp.model in ['2-domain', 'population']: 1590 for i in range(cdp.N-1): 1591 cdp.params.append('p' + repr(i)) 1592 1593 # Add the Euler angle parameters. 1594 if cdp.model == '2-domain': 1595 for i in range(cdp.N): 1596 cdp.params.append('alpha' + repr(i)) 1597 cdp.params.append('beta' + repr(i)) 1598 cdp.params.append('gamma' + repr(i)) 1599 1600 # Initialise the probability and Euler angle arrays. 1601 if cdp.model in ['2-domain', 'population']: 1602 if not hasattr(cdp, 'probs'): 1603 cdp.probs = [None] * cdp.N 1604 if cdp.model == '2-domain': 1605 if not hasattr(cdp, 'alpha'): 1606 cdp.alpha = [None] * cdp.N 1607 if not hasattr(cdp, 'beta'): 1608 cdp.beta = [None] * cdp.N 1609 if not hasattr(cdp, 'gamma'): 1610 cdp.gamma = [None] * cdp.N 1611 1612 # Determine the data type. 1613 data_types = self._base_data_types() 1614 1615 # Set up tensors for each alignment. 1616 if hasattr(cdp, 'align_ids'): 1617 for id in cdp.align_ids: 1618 # No tensors initialised. 1619 if not hasattr(cdp, 'align_tensors'): 1620 align_tensor.init(tensor=id, params=[0.0, 0.0, 0.0, 0.0, 0.0]) 1621 1622 # Find if the tensor corresponding to the id exists. 1623 exists = False 1624 for tensor in cdp.align_tensors: 1625 if id == tensor.name: 1626 exists = True 1627 1628 # Initialise the tensor. 1629 if not exists: 1630 align_tensor.init(tensor=id, params=[0.0, 0.0, 0.0, 0.0, 0.0])
1631 1632
1633 - def base_data_loop(self):
1634 """Loop over the base data of the spins - RDCs, PCSs, and NOESY data. 1635 1636 This loop iterates for each data point (RDC, PCS, NOESY) for each spin or interatomic data container, returning the identification information. 1637 1638 @return: A list of the spin or interatomic data container, the data type ('rdc', 'pcs', 'noesy'), and the alignment ID if required. 1639 @rtype: list of [SpinContainer instance, str, str] or [InteratomContainer instance, str, str] 1640 """ 1641 1642 # Loop over the interatomic data containers. 1643 for interatom in interatomic_loop(): 1644 # Re-initialise the data structure. 1645 data = [interatom, None, None] 1646 1647 # RDC data. 1648 if hasattr(interatom, 'rdc'): 1649 data[1] = 'rdc' 1650 1651 # Loop over the alignment IDs. 1652 for id in cdp.rdc_ids: 1653 # Add the ID. 1654 data[2] = id 1655 1656 # Yield the set. 1657 yield data 1658 1659 # NOESY data. 1660 if hasattr(interatom, 'noesy'): 1661 data[1] = 'noesy' 1662 1663 # Loop over the alignment IDs. 1664 for id in cdp.noesy_ids: 1665 # Add the ID. 1666 data[2] = id 1667 1668 # Yield the set. 1669 yield data 1670 1671 # Loop over the spins. 1672 for spin in spin_loop(): 1673 # Re-initialise the data structure. 1674 data = [spin, None, None] 1675 1676 # PCS data. 1677 if hasattr(spin, 'pcs'): 1678 data[1] = 'pcs' 1679 1680 # Loop over the alignment IDs. 1681 for id in cdp.pcs_ids: 1682 # Add the ID. 1683 data[2] = id 1684 1685 # Yield the set. 1686 yield data
1687 1688
1689 - def calculate(self, spin_id=None, verbosity=1, sim_index=None):
1690 """Calculation function. 1691 1692 Currently this function simply calculates the NOESY flat-bottom quadratic energy potential, 1693 if NOE restraints are available. 1694 1695 @keyword spin_id: The spin identification string (unused). 1696 @type spin_id: None or str 1697 @keyword verbosity: The amount of information to print. The higher the value, the greater the verbosity. 1698 @type verbosity: int 1699 @keyword sim_index: The MC simulation index (unused). 1700 @type sim_index: None 1701 """ 1702 1703 # Set up the target function for direct calculation. 1704 model, param_vector, data_types, scaling_matrix = self._target_fn_setup() 1705 1706 # Calculate the chi-squared value. 1707 if model: 1708 # Make a function call. 1709 chi2 = model.func(param_vector) 1710 1711 # Store the global chi-squared value. 1712 cdp.chi2 = chi2 1713 1714 # Store the back-calculated data. 1715 self._minimise_bc_data(model) 1716 1717 # Calculate the RDC Q-factors. 1718 if 'rdc' in data_types: 1719 rdc.q_factors() 1720 1721 # Calculate the PCS Q-factors. 1722 if 'pcs' in data_types: 1723 pcs.q_factors() 1724 1725 # NOE potential. 1726 if hasattr(cdp, 'noe_restraints'): 1727 # Init some numpy arrays. 1728 num_restraints = len(cdp.noe_restraints) 1729 dist = zeros(num_restraints, float64) 1730 pot = zeros(num_restraints, float64) 1731 lower = zeros(num_restraints, float64) 1732 upper = zeros(num_restraints, float64) 1733 1734 # Loop over the NOEs. 1735 for i in range(num_restraints): 1736 # Create arrays of the NOEs. 1737 lower[i] = cdp.noe_restraints[i][2] 1738 upper[i] = cdp.noe_restraints[i][3] 1739 1740 # Calculate the average distances, using -6 power averaging. 1741 dist[i] = self._calc_ave_dist(cdp.noe_restraints[i][0], cdp.noe_restraints[i][1], exp=-6) 1742 1743 # Calculate the quadratic potential. 1744 quad_pot(dist, pot, lower, upper) 1745 1746 # Store the distance and potential information. 1747 cdp.ave_dist = [] 1748 cdp.quad_pot = [] 1749 for i in range(num_restraints): 1750 cdp.ave_dist.append([cdp.noe_restraints[i][0], cdp.noe_restraints[i][1], dist[i]]) 1751 cdp.quad_pot.append([cdp.noe_restraints[i][0], cdp.noe_restraints[i][1], pot[i]])
1752 1753
1754 - def create_mc_data(self, data_id=None):
1755 """Create the Monte Carlo data by back-calculation. 1756 1757 @keyword data_id: The list of spin ID, data type, and alignment ID, as yielded by the base_data_loop() generator method. 1758 @type data_id: str 1759 @return: The Monte Carlo Ri data. 1760 @rtype: list of floats 1761 """ 1762 1763 # Initialise the MC data structure. 1764 mc_data = [] 1765 1766 # Alias the spin or interatomic data container. 1767 container = data_id[0] 1768 1769 # RDC data. 1770 if data_id[1] == 'rdc' and hasattr(container, 'rdc'): 1771 # Does back-calculated data exist? 1772 if not hasattr(container, 'rdc_bc'): 1773 self.calculate() 1774 1775 # The data. 1776 if not hasattr(container, 'rdc_bc') or not data_id[2] in container.rdc_bc: 1777 data = None 1778 else: 1779 data = container.rdc_bc[data_id[2]] 1780 1781 # Append the data. 1782 mc_data.append(data) 1783 1784 # NOESY data. 1785 elif data_id[1] == 'noesy' and hasattr(container, 'noesy'): 1786 # Does back-calculated data exist? 1787 if not hasattr(container, 'noesy_bc'): 1788 self.calculate() 1789 1790 # Append the data. 1791 mc_data.append(container.noesy_bc) 1792 1793 # PCS data. 1794 elif data_id[1] == 'pcs' and hasattr(container, 'pcs'): 1795 # Does back-calculated data exist? 1796 if not hasattr(container, 'pcs_bc'): 1797 self.calculate() 1798 1799 # The data. 1800 if not hasattr(container, 'pcs_bc') or not data_id[2] in container.pcs_bc: 1801 data = None 1802 else: 1803 data = container.pcs_bc[data_id[2]] 1804 1805 # Append the data. 1806 mc_data.append(data) 1807 1808 # Return the data. 1809 return mc_data
1810 1811 1812 default_value_doc = Desc_container("N-state model default values") 1813 _table = uf_tables.add_table(label="table: N-state default values", caption="N-state model default values.") 1814 _table.add_headings(["Data type", "Object name", "Value"]) 1815 _table.add_row(["Probabilities", "'p0', 'p1', 'p2', ..., 'pN'", "1/N"]) 1816 _table.add_row(["Euler angle alpha", "'alpha0', 'alpha1', ...", "(c+1) * pi / (N+1)"]) 1817 _table.add_row(["Euler angle beta", "'beta0', 'beta1', ...", "(c+1) * pi / (N+1)"]) 1818 _table.add_row(["Euler angle gamma", "'gamma0', 'gamma1', ...", "(c+1) * pi / (N+1)"]) 1819 default_value_doc.add_table(_table.label) 1820 default_value_doc.add_paragraph("In this table, N is the total number of states and c is the index of a given state ranging from 0 to N-1. The default probabilities are all set to be equal whereas the angles are given a range of values so that no 2 states are equal at the start of optimisation.") 1821 default_value_doc.add_paragraph("Note that setting the probability for state N will do nothing as it is equal to one minus all the other probabilities.") 1822
1823 - def default_value(self, param):
1824 """The default N-state model parameter values. 1825 1826 @param param: The N-state model parameter. 1827 @type param: str 1828 @return: The default value. 1829 @rtype: float 1830 """ 1831 1832 # Split the parameter into its base name and index. 1833 name = self.return_data_name(param) 1834 index = self._param_model_index(param) 1835 1836 # The number of states as a float. 1837 N = float(cdp.N) 1838 1839 # Probability. 1840 if name == 'probs': 1841 return 1.0 / N 1842 1843 # Euler angles. 1844 elif name == 'alpha' or name == 'beta' or name == 'gamma': 1845 return (float(index)+1) * pi / (N+1.0)
1846 1847
1848 - def grid_search(self, lower=None, upper=None, inc=None, constraints=False, verbosity=0, sim_index=None):
1849 """The grid search function. 1850 1851 @param lower: The lower bounds of the grid search which must be equal to the number of 1852 parameters in the model. 1853 @type lower: array of numbers 1854 @param upper: The upper bounds of the grid search which must be equal to the number of 1855 parameters in the model. 1856 @type upper: array of numbers 1857 @param inc: The increments for each dimension of the space for the grid search. The 1858 number of elements in the array must equal to the number of parameters 1859 in the model. 1860 @type inc: array of int 1861 @param constraints: If True, constraints are applied during the grid search (elinating parts 1862 of the grid). If False, no constraints are used. 1863 @type constraints: bool 1864 @param verbosity: A flag specifying the amount of information to print. The higher the 1865 value, the greater the verbosity. 1866 @type verbosity: int 1867 """ 1868 1869 # Test if the N-state model has been set up. 1870 if not hasattr(cdp, 'model'): 1871 raise RelaxNoModelError('N-state') 1872 1873 # The number of parameters. 1874 n = self._param_num() 1875 1876 # Make sure that the length of the parameter array is > 0. 1877 if n == 0: 1878 print("Cannot run a grid search on a model with zero parameters, skipping the grid search.") 1879 return 1880 1881 # Test the grid search options. 1882 self.test_grid_ops(lower=lower, upper=upper, inc=inc, n=n) 1883 1884 # If inc is a single int, convert it into an array of that value. 1885 if isinstance(inc, int): 1886 inc = [inc]*n 1887 1888 # Setup the default bounds. 1889 if not lower: 1890 # Init. 1891 lower = [] 1892 upper = [] 1893 1894 # Loop over the parameters. 1895 for i in range(n): 1896 # i is in the parameter array. 1897 if i < len(cdp.params): 1898 # Probabilities (default values). 1899 if search('^p', cdp.params[i]): 1900 lower.append(0.0) 1901 upper.append(1.0) 1902 1903 # Angles (default values). 1904 if search('^alpha', cdp.params[i]) or search('^gamma', cdp.params[i]): 1905 lower.append(0.0) 1906 upper.append(2*pi) 1907 elif search('^beta', cdp.params[i]): 1908 lower.append(0.0) 1909 upper.append(pi) 1910 1911 # The paramagnetic centre. 1912 elif hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed and (n - i) <= 3: 1913 lower.append(-100) 1914 upper.append(100) 1915 1916 # Otherwise this must be an alignment tensor component. 1917 else: 1918 lower.append(-1e-3) 1919 upper.append(1e-3) 1920 1921 # Minimisation. 1922 self.minimise(min_algor='grid', lower=lower, upper=upper, inc=inc, constraints=constraints, verbosity=verbosity, sim_index=sim_index)
1923 1924
1925 - def is_spin_param(self, name):
1926 """Determine whether the given parameter is spin specific. 1927 1928 @param name: The name of the parameter. 1929 @type name: str 1930 @return: False 1931 @rtype: bool 1932 """ 1933 1934 # Spin specific parameters. 1935 if name in ['r', 'heteronuc_type', 'proton_type']: 1936 return True 1937 1938 # All other parameters are global. 1939 return False
1940 1941
1942 - def map_bounds(self, param, spin_id=None):
1943 """Create bounds for the OpenDX mapping function. 1944 1945 @param param: The name of the parameter to return the lower and upper bounds of. 1946 @type param: str 1947 @param spin_id: The spin identification string (unused). 1948 @type spin_id: None 1949 @return: The upper and lower bounds of the parameter. 1950 @rtype: list of float 1951 """ 1952 1953 # Paramagnetic centre. 1954 if search('^paramag_[xyz]$', param): 1955 return [-100.0, 100.0]
1956 1957
1958 - def minimise(self, min_algor=None, min_options=None, func_tol=None, grad_tol=None, max_iterations=None, constraints=False, scaling=True, verbosity=0, sim_index=None, lower=None, upper=None, inc=None):
1959 """Minimisation function. 1960 1961 @param min_algor: The minimisation algorithm to use. 1962 @type min_algor: str 1963 @param min_options: An array of options to be used by the minimisation algorithm. 1964 @type min_options: array of str 1965 @param func_tol: The function tolerance which, when reached, terminates optimisation. Setting this to None turns of the check. 1966 @type func_tol: None or float 1967 @param grad_tol: The gradient tolerance which, when reached, terminates optimisation. Setting this to None turns of the check. 1968 @type grad_tol: None or float 1969 @param max_iterations: The maximum number of iterations for the algorithm. 1970 @type max_iterations: int 1971 @param constraints: If True, constraints are used during optimisation. 1972 @type constraints: bool 1973 @param scaling: If True, diagonal scaling is enabled during optimisation to allow the problem to be better conditioned. 1974 @type scaling: bool 1975 @param verbosity: A flag specifying the amount of information to print. The higher the value, the greater the verbosity. 1976 @type verbosity: int 1977 @param sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired. 1978 @type sim_index: None or int 1979 @keyword lower: The lower bounds of the grid search which must be equal to the number of parameters in the model. This optional argument is only used when doing a grid search. 1980 @type lower: array of numbers 1981 @keyword upper: The upper bounds of the grid search which must be equal to the number of parameters in the model. This optional argument is only used when doing a grid search. 1982 @type upper: array of numbers 1983 @keyword inc: The increments for each dimension of the space for the grid search. The number of elements in the array must equal to the number of parameters in the model. This argument is only used when doing a grid search. 1984 @type inc: array of int 1985 """ 1986 1987 # Set up the target function for direct calculation. 1988 model, param_vector, data_types, scaling_matrix = self._target_fn_setup(sim_index=sim_index, scaling=scaling) 1989 1990 # Nothing to do! 1991 if not len(param_vector): 1992 warn(RelaxWarning("The model has no parameters, minimisation cannot be performed.")) 1993 return 1994 1995 # Right, constraints cannot be used for the 'fixed' model. 1996 if constraints and cdp.model == 'fixed': 1997 warn(RelaxWarning("Turning constraints off. These cannot be used for the 'fixed' model.")) 1998 constraints = False 1999 2000 # Pop out the Method of Multipliers algorithm. 2001 if min_algor == 'Method of Multipliers': 2002 min_algor = min_options[0] 2003 min_options = min_options[1:] 2004 2005 # And constraints absolutely must be used for the 'population' model. 2006 if not constraints and cdp.model == 'population': 2007 warn(RelaxWarning("Turning constraints on. These absolutely must be used for the 'population' model.")) 2008 constraints = True 2009 2010 # Add the Method of Multipliers algorithm. 2011 min_options = (min_algor,) + min_options 2012 min_algor = 'Method of Multipliers' 2013 2014 # Linear constraints. 2015 if constraints: 2016 A, b = self._linear_constraints(data_types=data_types, scaling_matrix=scaling_matrix) 2017 else: 2018 A, b = None, None 2019 2020 # Grid search. 2021 if search('^[Gg]rid', min_algor): 2022 # Scaling. 2023 if scaling: 2024 for i in range(len(param_vector)): 2025 lower[i] = lower[i] / scaling_matrix[i, i] 2026 upper[i] = upper[i] / scaling_matrix[i, i] 2027 2028 # The search. 2029 results = grid(func=model.func, args=(), num_incs=inc, lower=lower, upper=upper, A=A, b=b, verbosity=verbosity) 2030 2031 # Unpack the results. 2032 param_vector, func, iter_count, warning = results 2033 f_count = iter_count 2034 g_count = 0.0 2035 h_count = 0.0 2036 2037 # Minimisation. 2038 else: 2039 results = generic_minimise(func=model.func, dfunc=model.dfunc, d2func=model.d2func, args=(), x0=param_vector, min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, maxiter=max_iterations, A=A, b=b, full_output=1, print_flag=verbosity) 2040 2041 # Unpack the results. 2042 if results == None: 2043 return 2044 param_vector, func, iter_count, f_count, g_count, h_count, warning = results 2045 2046 # Catch infinite chi-squared values. 2047 if isInf(func): 2048 raise RelaxInfError('chi-squared') 2049 2050 # Catch chi-squared values of NaN. 2051 if isNaN(func): 2052 raise RelaxNaNError('chi-squared') 2053 2054 # Make a last function call to update the back-calculated RDC and PCS structures to the optimal values. 2055 chi2 = model.func(param_vector) 2056 2057 # Scaling. 2058 if scaling: 2059 param_vector = dot(scaling_matrix, param_vector) 2060 2061 # Disassemble the parameter vector. 2062 self._disassemble_param_vector(param_vector=param_vector, data_types=data_types, sim_index=sim_index) 2063 2064 # Monte Carlo minimisation statistics. 2065 if sim_index != None: 2066 # Chi-squared statistic. 2067 cdp.chi2_sim[sim_index] = func 2068 2069 # Iterations. 2070 cdp.iter_sim[sim_index] = iter_count 2071 2072 # Function evaluations. 2073 cdp.f_count_sim[sim_index] = f_count 2074 2075 # Gradient evaluations. 2076 cdp.g_count_sim[sim_index] = g_count 2077 2078 # Hessian evaluations. 2079 cdp.h_count_sim[sim_index] = h_count 2080 2081 # Warning. 2082 cdp.warning_sim[sim_index] = warning 2083 2084 # Normal statistics. 2085 else: 2086 # Chi-squared statistic. 2087 cdp.chi2 = func 2088 2089 # Iterations. 2090 cdp.iter = iter_count 2091 2092 # Function evaluations. 2093 cdp.f_count = f_count 2094 2095 # Gradient evaluations. 2096 cdp.g_count = g_count 2097 2098 # Hessian evaluations. 2099 cdp.h_count = h_count 2100 2101 # Warning. 2102 cdp.warning = warning 2103 2104 # Statistical analysis. 2105 if sim_index == None and ('rdc' in data_types or 'pcs' in data_types): 2106 # Get the final back calculated data (for the Q-factor and 2107 self._minimise_bc_data(model) 2108 2109 # Calculate the RDC Q-factors. 2110 if 'rdc' in data_types: 2111 rdc.q_factors() 2112 2113 # Calculate the PCS Q-factors. 2114 if 'pcs' in data_types: 2115 pcs.q_factors()
2116 2117
2118 - def model_statistics(self, model_info=None, spin_id=None, global_stats=None):
2119 """Return the k, n, and chi2 model statistics. 2120 2121 k - number of parameters. 2122 n - number of data points. 2123 chi2 - the chi-squared value. 2124 2125 2126 @keyword model_info: The data returned from model_loop() (unused). 2127 @type model_info: None 2128 @keyword spin_id: The spin identification string. This is ignored in the N-state model. 2129 @type spin_id: None or str 2130 @keyword global_stats: A parameter which determines if global or local statistics are returned. For the N-state model, this argument is ignored. 2131 @type global_stats: None or bool 2132 @return: The optimisation statistics, in tuple format, of the number of parameters (k), the number of data points (n), and the chi-squared value (chi2). 2133 @rtype: tuple of (int, int, float) 2134 """ 2135 2136 # Return the values. 2137 return self._param_num(), self._num_data_points(), cdp.chi2
2138 2139 2140 return_data_name_doc = Desc_container("N-state model data type string matching patterns") 2141 _table = uf_tables.add_table(label="table: N-state data type patterns", caption="N-state model data type string matching patterns.") 2142 _table.add_headings(["Data type", "Object name", "Patterns"]) 2143 _table.add_row(["Probabilities", "'probs'", "'p0', 'p1', 'p2', ..., 'pN'"]) 2144 _table.add_row(["Euler angle alpha", "'alpha'", "'alpha0', 'alpha1', ..."]) 2145 _table.add_row(["Euler angle beta", "'beta'", "'beta0', 'beta1', ..."]) 2146 _table.add_row(["Euler angle gamma", "'gamma'", "'gamma0', 'gamma1', ..."]) 2147 _table.add_row(["Bond length", "'r'", "'^r$' or '[Bb]ond[ -_][Ll]ength'"]) 2148 _table.add_row(["Heteronucleus type", "'heteronuc_type'", "'^[Hh]eteronucleus$'"]) 2149 _table.add_row(["Proton type", "'proton_type'", "'^[Pp]roton$'"]) 2150 return_data_name_doc.add_table(_table.label) 2151 return_data_name_doc.add_paragraph("The objects corresponding to the object names are lists (or arrays) with each element corrsponding to each state.") 2152
2153 - def return_data_name(self, param):
2154 """Return a unique identifying string for the N-state model parameter. 2155 2156 @param param: The N-state model parameter. 2157 @type param: str 2158 @return: The unique parameter identifying string. 2159 @rtype: str 2160 """ 2161 2162 # Probability. 2163 if search('^p[0-9]*$', param): 2164 return 'probs' 2165 2166 # Alpha Euler angle. 2167 if search('^alpha', param): 2168 return 'alpha' 2169 2170 # Beta Euler angle. 2171 if search('^beta', param): 2172 return 'beta' 2173 2174 # Gamma Euler angle. 2175 if search('^gamma', param): 2176 return 'gamma' 2177 2178 # Bond length. 2179 if search('^r$', param) or search('[Bb]ond[ -_][Ll]ength', param): 2180 return 'r' 2181 2182 # Heteronucleus type. 2183 if param == 'heteronuc_type': 2184 return 'heteronuc_type' 2185 2186 # Proton type. 2187 if param == 'proton_type': 2188 return 'proton_type' 2189 2190 # Paramagnetic centre. 2191 if search('^paramag_[xyz]$', param): 2192 return param
2193 2194
2195 - def return_error(self, data_id=None):
2196 """Create and return the spin specific Monte Carlo Ri error structure. 2197 2198 @keyword data_id: The list of spin ID, data type, and alignment ID, as yielded by the base_data_loop() generator method. 2199 @type data_id: str 2200 @return: The Monte Carlo simulation data errors. 2201 @rtype: list of floats 2202 """ 2203 2204 # Initialise the MC data structure. 2205 mc_errors = [] 2206 2207 # Alias the spin or interatomic data container. 2208 container = data_id[0] 2209 2210 # Skip deselected spins. 2211 if data_id[1] == 'pcs' and not container.select: 2212 return 2213 2214 # RDC data. 2215 if data_id[1] == 'rdc' and hasattr(container, 'rdc'): 2216 # Do errors exist? 2217 if not hasattr(container, 'rdc_err'): 2218 raise RelaxError("The RDC errors are missing for the spin pair '%s' and '%s'." % (container.spin_id1, container.spin_id2)) 2219 2220 # Append the data. 2221 mc_errors.append(container.rdc_err[data_id[2]]) 2222 2223 # NOESY data. 2224 elif data_id[1] == 'noesy' and hasattr(container, 'noesy'): 2225 # Do errors exist? 2226 if not hasattr(container, 'noesy_err'): 2227 raise RelaxError("The NOESY errors are missing for the spin pair '%s' and '%s'." % (container.spin_id1, container.spin_id2)) 2228 2229 # Append the data. 2230 mc_errors.append(container.noesy_err) 2231 2232 # PCS data. 2233 elif data_id[1] == 'pcs' and hasattr(container, 'pcs'): 2234 # Do errors exist? 2235 if not hasattr(container, 'pcs_err'): 2236 raise RelaxError("The PCS errors are missing for spin '%s'." % data_id[0]) 2237 2238 # Append the data. 2239 mc_errors.append(container.pcs_err[data_id[2]]) 2240 2241 # Return the errors. 2242 return mc_errors
2243 2244
2245 - def return_grace_string(self, param):
2246 """Return the Grace string representation of the parameter. 2247 2248 This is used for axis labelling. 2249 2250 @param param: The specific analysis parameter. 2251 @type param: str 2252 @return: The Grace string representation of the parameter. 2253 @rtype: str 2254 """ 2255 2256 # The measured PCS. 2257 if param == 'pcs': 2258 return "Measured PCS" 2259 2260 # The back-calculated PCS. 2261 if param == 'pcs_bc': 2262 return "Back-calculated PCS" 2263 2264 # The measured RDC. 2265 if param == 'rdc': 2266 return "Measured RDC" 2267 2268 # The back-calculated RDC. 2269 if param == 'rdc_bc': 2270 return "Back-calculated RDC"
2271 2272
2273 - def return_units(self, param):
2274 """Return a string representing the parameters units. 2275 2276 @param param: The name of the parameter to return the units string for. 2277 @type param: str 2278 @return: The parameter units string. 2279 @rtype: str 2280 """ 2281 2282 # PCSs. 2283 if param == 'pcs' or param == 'pcs_bc': 2284 return 'ppm' 2285 2286 # RDCs. 2287 if param == 'rdc' or param == 'rdc_bc': 2288 return 'Hz'
2289 2290 2291 set_doc = Desc_container("N-state model set details") 2292 set_doc.add_paragraph("Setting parameters for the N-state model is a little different from the other type of analyses as each state has a set of parameters with the same names as the other states. To set the parameters for a specific state c (ranging from 0 for the first to N-1 for the last, the number c should be added to the end of the parameter name. So the Euler angle gamma of the third state is specified using the string 'gamma2'.") 2293 2294
2295 - def set_error(self, model_info, index, error):
2296 """Set the parameter errors. 2297 2298 @param model_info: The global model index originating from model_loop(). 2299 @type model_info: int 2300 @param index: The index of the parameter to set the errors for. 2301 @type index: int 2302 @param error: The error value. 2303 @type error: float 2304 """ 2305 2306 # Align parameters. 2307 names = ['Axx', 'Ayy', 'Axy', 'Axz', 'Ayz'] 2308 2309 # Alignment tensor parameters. 2310 if index < len(cdp.align_ids)*5: 2311 # The tensor and parameter index. 2312 param_index = index % 5 2313 tensor_index = (index - index % 5) / 5 2314 2315 # Set the error. 2316 tensor = align_tensor.return_tensor(index=tensor_index, skip_fixed=True) 2317 tensor.set(param=names[param_index], value=error, category='err') 2318 2319 # Return the object. 2320 return getattr(tensor, names[param_index]+'_err')
2321 2322
2323 - def set_param_values(self, param=None, value=None, spin_id=None, force=True):
2324 """Set the N-state model parameter values. 2325 2326 @keyword param: The parameter name list. 2327 @type param: list of str 2328 @keyword value: The parameter value list. 2329 @type value: list 2330 @keyword spin_id: The spin identification string (unused). 2331 @type spin_id: None 2332 @keyword force: A flag which if True will cause current values to be overwritten. If False, a RelaxError will raised if the parameter value is already set. 2333 @type force: bool 2334 """ 2335 2336 # Checks. 2337 arg_check.is_str_list(param, 'parameter name') 2338 arg_check.is_list(value, 'parameter value') 2339 2340 # Loop over the parameters. 2341 for i in range(len(param)): 2342 # Get the object's name. 2343 obj_name = self.return_data_name(param[i]) 2344 2345 # Is the parameter is valid? 2346 if not obj_name: 2347 raise RelaxError("The parameter '%s' is not valid for this data pipe type." % param[i]) 2348 2349 # Set the indexed parameter. 2350 if obj_name in ['probs', 'alpha', 'beta', 'gamma']: 2351 # The index. 2352 index = self._param_model_index(param[i]) 2353 2354 # Set. 2355 obj = getattr(cdp, obj_name) 2356 obj[index] = value[i] 2357 2358 # The paramagnetic centre. 2359 if search('^paramag_[xyz]$', obj_name): 2360 # Init. 2361 if not hasattr(cdp, 'paramagnetic_centre'): 2362 cdp.paramagnetic_centre = zeros(3, float64) 2363 2364 # Set the coordinate. 2365 if obj_name == 'paramag_x': 2366 index = 0 2367 elif obj_name == 'paramag_y': 2368 index = 1 2369 else: 2370 index = 2 2371 2372 # Set the value in Angstrom. 2373 cdp.paramagnetic_centre[index] = value[i] 2374 2375 # Set the spin parameters. 2376 else: 2377 for spin in spin_loop(spin_id): 2378 setattr(spin, obj_name, value[i])
2379 2380
2381 - def sim_init_values(self):
2382 """Initialise the Monte Carlo parameter values.""" 2383 2384 # Get the minimisation statistic object names. 2385 min_names = self.data_names(set='min') 2386 2387 # Alignments. 2388 if hasattr(cdp, 'align_tensors'): 2389 # The parameter names. 2390 names = ['Axx', 'Ayy', 'Axy', 'Axz', 'Ayz'] 2391 2392 # Loop over the alignments, adding the alignment tensor parameters to the tensor data container. 2393 for i in range(len(cdp.align_tensors)): 2394 # Fixed tensor. 2395 if cdp.align_tensors[i].fixed: 2396 continue 2397 2398 # Set up the number of simulations. 2399 cdp.align_tensors[i].set_sim_num(cdp.sim_number) 2400 2401 # Loop over all the parameter names, setting the initial simulation values to those of the parameter value. 2402 for object_name in names: 2403 for j in range(cdp.sim_number): 2404 cdp.align_tensors[i].set(param=object_name, value=deepcopy(getattr(cdp.align_tensors[i], object_name)), category='sim', sim_index=j) 2405 2406 # Loop over all the minimisation object names. 2407 for object_name in min_names: 2408 # Name for the simulation object. 2409 sim_object_name = object_name + '_sim' 2410 2411 # Create the simulation object. 2412 setattr(cdp, sim_object_name, []) 2413 2414 # Get the simulation object. 2415 sim_object = getattr(cdp, sim_object_name) 2416 2417 # Loop over the simulations. 2418 for j in range(cdp.sim_number): 2419 # Append None to fill the structure. 2420 sim_object.append(None)
2421 2422
2423 - def sim_pack_data(self, data_id, sim_data):
2424 """Pack the Monte Carlo simulation data. 2425 2426 @keyword data_id: The list of spin ID, data type, and alignment ID, as yielded by the base_data_loop() generator method. 2427 @type data_id: list of str 2428 @param sim_data: The Monte Carlo simulation data. 2429 @type sim_data: list of float 2430 """ 2431 2432 # Alias the spin or interatomic data container. 2433 container = data_id[0] 2434 2435 # RDC data. 2436 if data_id[1] == 'rdc' and hasattr(container, 'rdc'): 2437 # Initialise. 2438 if not hasattr(container, 'rdc_sim'): 2439 container.rdc_sim = {} 2440 2441 # Store the data structure. 2442 container.rdc_sim[data_id[2]] = [] 2443 for i in range(cdp.sim_number): 2444 container.rdc_sim[data_id[2]].append(sim_data[i][0]) 2445 2446 # NOESY data. 2447 elif data_id[1] == 'noesy' and hasattr(container, 'noesy'): 2448 # Store the data structure. 2449 container.noesy_sim = [] 2450 for i in range(cdp.sim_number): 2451 container.noesy_sim[data_id[2]].append(sim_data[i][0]) 2452 2453 # PCS data. 2454 elif data_id[1] == 'pcs' and hasattr(container, 'pcs'): 2455 # Initialise. 2456 if not hasattr(container, 'pcs_sim'): 2457 container.pcs_sim = {} 2458 2459 # Store the data structure. 2460 container.pcs_sim[data_id[2]] = [] 2461 for i in range(cdp.sim_number): 2462 container.pcs_sim[data_id[2]].append(sim_data[i][0])
2463 2464
2465 - def sim_return_param(self, model_info, index):
2466 """Return the array of simulation parameter values. 2467 2468 @param model_info: The global model index originating from model_loop(). 2469 @type model_info: int 2470 @param index: The index of the parameter to return the array of values for. 2471 @type index: int 2472 @return: The array of simulation parameter values. 2473 @rtype: list of float 2474 """ 2475 2476 # Align parameters. 2477 names = ['Axx', 'Ayy', 'Axy', 'Axz', 'Ayz'] 2478 2479 # Alignment tensor parameters. 2480 if index < align_tensor.num_tensors(skip_fixed=True)*5: 2481 # The tensor and parameter index. 2482 param_index = index % 5 2483 tensor_index = (index - index % 5) / 5 2484 2485 # Return the simulation parameter array. 2486 tensor = align_tensor.return_tensor(index=tensor_index, skip_fixed=True) 2487 return getattr(tensor, names[param_index]+'_sim')
2488