Package specific_fns :: Module n_state_model
[hide private]
[frames] | no frames]

Source Code for Module specific_fns.n_state_model

   1  ############################################################################### 
   2  #                                                                             # 
   3  # Copyright (C) 2007-2012 Edward d'Auvergne                                   # 
   4  #                                                                             # 
   5  # This file is part of the program relax.                                     # 
   6  #                                                                             # 
   7  # relax is free software; you can redistribute it and/or modify               # 
   8  # it under the terms of the GNU General Public License as published by        # 
   9  # the Free Software Foundation; either version 2 of the License, or           # 
  10  # (at your option) any later version.                                         # 
  11  #                                                                             # 
  12  # relax is distributed in the hope that it will be useful,                    # 
  13  # but WITHOUT ANY WARRANTY; without even the implied warranty of              # 
  14  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the               # 
  15  # GNU General Public License for more details.                                # 
  16  #                                                                             # 
  17  # You should have received a copy of the GNU General Public License           # 
  18  # along with relax; if not, write to the Free Software                        # 
  19  # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA   # 
  20  #                                                                             # 
  21  ############################################################################### 
  22   
  23  # Module docstring. 
  24  """Module for the specific analysis of the N-state dynamic model.""" 
  25   
  26  # Python module imports. 
  27  from math import acos, cos, pi, sqrt 
  28  from minfx.generic import generic_minimise 
  29  from minfx.grid import grid 
  30  from numpy import array, dot, float64, identity, ones, zeros 
  31  from numpy.linalg import inv, norm 
  32  from re import search 
  33  from warnings import warn 
  34   
  35  # relax module imports. 
  36  from api_base import API_base 
  37  from api_common import API_common 
  38  import arg_check 
  39  from float import isNaN, isInf 
  40  import generic_fns 
  41  from generic_fns.align_tensor import all_tensors_fixed, get_tensor_object, num_tensors, return_tensor 
  42  from generic_fns.mol_res_spin import return_spin, spin_loop 
  43  from generic_fns import pcs, pipes, rdc 
  44  from generic_fns.structure.cones import Iso_cone 
  45  import generic_fns.structure.geometric 
  46  from generic_fns.structure.internal import Internal 
  47  import generic_fns.structure.mass 
  48  from maths_fns.n_state_model import N_state_opt 
  49  from maths_fns.potential import quad_pot 
  50  from maths_fns.rotation_matrix import two_vect_to_R, euler_to_R_zyz 
  51  from physical_constants import dipolar_constant, g1H, return_gyromagnetic_ratio 
  52  from relax_errors import RelaxError, RelaxInfError, RelaxModelError, RelaxNaNError, RelaxNoModelError, RelaxNoTensorError, RelaxNoValueError, RelaxProtonTypeError, RelaxSpinTypeError 
  53  from relax_io import open_write_file 
  54  from relax_warnings import RelaxWarning, RelaxDeselectWarning 
  55   
  56   
57 -class N_state_model(API_base, API_common):
58 """Class containing functions for the N-state model.""" 59
60 - def __init__(self):
61 """Initialise the class by placing API_common methods into the API.""" 62 63 # Place methods into the API. 64 self.model_loop = self._model_loop_single_global 65 self.overfit_deselect = self._overfit_deselect_dummy 66 self.return_conversion_factor = self._return_no_conversion_factor 67 self.set_selected_sim = self._set_selected_sim_global 68 self.sim_return_selected = self._sim_return_selected_global 69 self.test_grid_ops = self._test_grid_ops_general
70 71
72 - def _assemble_param_vector(self, sim_index=None):
73 """Assemble all the parameters of the model into a single array. 74 75 @param sim_index: The index of the simulation to optimise. This should be None if 76 normal optimisation is desired. 77 @type sim_index: None or int 78 @return: The parameter vector used for optimisation. 79 @rtype: numpy array 80 """ 81 82 # Test if the model is selected. 83 if not hasattr(cdp, 'model') or not isinstance(cdp.model, str): 84 raise RelaxNoModelError 85 86 # Determine the data type. 87 data_types = self._base_data_types() 88 89 # Initialise the parameter vector. 90 param_vector = [] 91 92 # A RDC or PCS data type requires the alignment tensors to be at the start of the parameter vector (unless the tensors are fixed). 93 if ('rdc' in data_types or 'pcs' in data_types) and not all_tensors_fixed(): 94 # Loop over the alignments, adding the alignment tensor parameters to the parameter vector. 95 for i in xrange(len(cdp.align_tensors)): 96 # No alignment ID, so skip the tensor as it will not be optimised. 97 if cdp.align_tensors[i].name not in cdp.align_ids: 98 continue 99 100 # Fixed tensor. 101 if cdp.align_tensors[i].fixed: 102 continue 103 104 # Add the parameters. 105 param_vector = param_vector + list(cdp.align_tensors[i].A_5D) 106 107 # Monte Carlo simulation data structures. 108 if sim_index != None: 109 # Populations. 110 if cdp.model in ['2-domain', 'population']: 111 probs = cdp.probs_sim[sim_index] 112 113 # Euler angles. 114 if cdp.model == '2-domain': 115 alpha = cdp.alpha_sim[sim_index] 116 beta = cdp.beta_sim[sim_index] 117 gamma = cdp.gamma_sim[sim_index] 118 119 # Normal data structures. 120 else: 121 # Populations. 122 if cdp.model in ['2-domain', 'population']: 123 probs = cdp.probs 124 125 # Euler angles. 126 if cdp.model == '2-domain': 127 alpha = cdp.alpha 128 beta = cdp.beta 129 gamma = cdp.gamma 130 131 # The probabilities (exclude that of state N). 132 if cdp.model in ['2-domain', 'population']: 133 param_vector = param_vector + probs[0:-1] 134 135 # The Euler angles. 136 if cdp.model == '2-domain': 137 for i in xrange(cdp.N): 138 param_vector.append(alpha[i]) 139 param_vector.append(beta[i]) 140 param_vector.append(gamma[i]) 141 142 # The paramagnetic centre. 143 if hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed: 144 if not hasattr(cdp, 'paramagnetic_centre'): 145 param_vector.append(0.0) 146 param_vector.append(0.0) 147 param_vector.append(0.0) 148 149 else: 150 param_vector.append(cdp.paramagnetic_centre[0]) 151 param_vector.append(cdp.paramagnetic_centre[1]) 152 param_vector.append(cdp.paramagnetic_centre[2]) 153 154 # Convert all None values to zero (to avoid conversion to NaN). 155 for i in xrange(len(param_vector)): 156 if param_vector[i] == None: 157 param_vector[i] = 0.0 158 159 # Return a numpy arrary. 160 return array(param_vector, float64)
161 162
163 - def _assemble_scaling_matrix(self, data_types=None, scaling=True):
164 """Create and return the scaling matrix. 165 166 @keyword data_types: The base data types used in the optimisation. This list can contain 167 the elements 'rdc', 'pcs' or 'tensor'. 168 @type data_types: list of str 169 @keyword scaling: If False, then the identity matrix will be returned. 170 @type scaling: bool 171 @return: The square and diagonal scaling matrix. 172 @rtype: numpy rank-2 array 173 """ 174 175 # Initialise. 176 scaling_matrix = identity(self._param_num(), float64) 177 178 # Return the identity matrix. 179 if not scaling: 180 return scaling_matrix 181 182 # Starting point of the populations. 183 pop_start = 0 184 if ('rdc' in data_types or 'pcs' in data_types) and not all_tensors_fixed(): 185 # Loop over the alignments. 186 tensor_num = 0 187 for i in range(len(cdp.align_tensors)): 188 # Fixed tensor. 189 if cdp.align_tensors[i].fixed: 190 continue 191 192 # Add the 5 alignment parameters. 193 pop_start = pop_start + 5 194 195 # The alignment parameters. 196 for j in range(5): 197 scaling_matrix[5*tensor_num + j, 5*tensor_num + j] = 1.0 198 199 # Increase the tensor number. 200 tensor_num += 1 201 202 # Loop over the populations, and set the scaling factor. 203 if cdp.model in ['2-domain', 'population']: 204 factor = 0.1 205 for i in xrange(pop_start, pop_start + (cdp.N-1)): 206 scaling_matrix[i, i] = factor 207 208 # The paramagnetic centre. 209 if hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed: 210 for i in range(-3, 0): 211 scaling_matrix[i, i] = 1e2 212 213 # Return the matrix. 214 return scaling_matrix
215 216
217 - def _base_data_types(self):
218 """Determine all the base data types. 219 220 The base data types can include:: 221 - 'rdc', residual dipolar couplings. 222 - 'pcs', pseudo-contact shifts. 223 - 'noesy', NOE restraints. 224 - 'tensor', alignment tensors. 225 226 @return: A list of all the base data types. 227 @rtype: list of str 228 """ 229 230 # Array of data types. 231 list = [] 232 233 # RDC search. 234 for spin in spin_loop(): 235 if hasattr(spin, 'rdc'): 236 list.append('rdc') 237 break 238 239 # PCS search. 240 for spin in spin_loop(): 241 if hasattr(spin, 'pcs'): 242 list.append('pcs') 243 break 244 245 # Alignment tensor search. 246 if not ('rdc' in list or 'pcs' in list) and hasattr(cdp, 'align_tensors'): 247 list.append('tensor') 248 249 # NOESY data search. 250 if hasattr(cdp, 'noe_restraints'): 251 list.append('noesy') 252 253 # No data is present. 254 if not list: 255 raise RelaxError("Neither RDC, PCS, NOESY nor alignment tensor data is present.") 256 257 # Return the list. 258 return list
259 260
261 - def _calc_ave_dist(self, atom1, atom2, exp=1):
262 """Calculate the average distances. 263 264 The formula used is:: 265 266 _N_ 267 / 1 \ \ 1/exp 268 <r> = | - > |p1i - p2i|^exp | 269 \ N /__ / 270 i 271 272 where i are the members of the ensemble, N is the total number of structural models, and p1 273 and p2 at the two atom positions. 274 275 276 @param atom1: The atom identification string of the first atom. 277 @type atom1: str 278 @param atom2: The atom identification string of the second atom. 279 @type atom2: str 280 @keyword exp: The exponent used for the averaging, e.g. 1 for linear averaging and -6 for 281 r^-6 NOE averaging. 282 @type exp: int 283 @return: The average distance between the two atoms. 284 @rtype: float 285 """ 286 287 # Get the spin containers. 288 spin1 = return_spin(atom1) 289 spin2 = return_spin(atom2) 290 291 # Loop over each model. 292 num_models = len(spin1.pos) 293 ave_dist = 0.0 294 for i in range(num_models): 295 # Distance to the minus sixth power. 296 dist = norm(spin1.pos[i] - spin2.pos[i]) 297 ave_dist = ave_dist + dist**(exp) 298 299 # Average. 300 ave_dist = ave_dist / num_models 301 302 # The exponent. 303 ave_dist = ave_dist**(1.0/exp) 304 305 # Return the average distance. 306 return ave_dist
307 308
309 - def _CoM(self, pivot_point=None, centre=None):
310 """Centre of mass analysis. 311 312 This function does an analysis of the centre of mass (CoM) of the N states. This includes 313 calculating the order parameter associated with the pivot-CoM vector, and the associated 314 cone of motions. The pivot_point argument must be supplied. If centre is None, then the 315 CoM will be calculated from the selected parts of the loaded structure. Otherwise it will 316 be set to the centre arg. 317 318 @param pivot_point: The pivot point in the structural file(s). 319 @type pivot_point: list of float of length 3 320 @param centre: The optional centre of mass vector. 321 @type centre: list of float of length 3 322 """ 323 324 # Test if the current data pipe exists. 325 pipes.test() 326 327 # Set the pivot point. 328 cdp.pivot_point = pivot_point 329 330 # The centre has been supplied. 331 if centre: 332 cdp.CoM = centre 333 334 # Calculate from the structure file. 335 else: 336 cdp.CoM = generic_fns.structure.mass.centre_of_mass() 337 338 # Calculate the vector between the pivot and CoM points. 339 cdp.pivot_CoM = array(cdp.CoM, float64) - array(cdp.pivot_point, float64) 340 341 # Calculate the unit vector between the pivot and CoM points. 342 unit_vect = cdp.pivot_CoM / norm(cdp.pivot_CoM) 343 344 # Initilise some data structures. 345 R = zeros((3, 3), float64) 346 vectors = zeros((cdp.N, 3), float64) 347 348 # Loop over the N states. 349 for c in xrange(cdp.N): 350 # Generate the rotation matrix. 351 euler_to_R_zyz(cdp.alpha[c], cdp.beta[c], cdp.gamma[c], R) 352 353 # Rotate the unit vector. 354 vectors[c] = dot(R, unit_vect) 355 356 # Multiply by the probability. 357 vectors[c] = vectors[c] * cdp.probs[c] 358 359 # Average of the unit vectors. 360 cdp.ave_unit_pivot_CoM = sum(vectors) 361 362 # The length reduction. 363 cdp.ave_pivot_CoM_red = norm(cdp.ave_unit_pivot_CoM) 364 365 # The aveage pivot-CoM vector. 366 cdp.ave_pivot_CoM = norm(cdp.pivot_CoM) * cdp.ave_unit_pivot_CoM 367 368 # The full length rotated pivot-CoM vector. 369 cdp.full_ave_pivot_CoM = cdp.ave_pivot_CoM / cdp.ave_pivot_CoM_red 370 371 # The cone angle for diffusion on an axially symmetric cone. 372 cdp.theta_diff_on_cone = acos(cdp.ave_pivot_CoM_red) 373 cdp.S_diff_on_cone = (3.0*cos(cdp.theta_diff_on_cone)**2 - 1.0) / 2.0 374 375 # The cone angle and order parameter for diffusion in an axially symmetric cone. 376 cdp.theta_diff_in_cone = acos(2.*cdp.ave_pivot_CoM_red - 1.) 377 cdp.S_diff_in_cone = cos(cdp.theta_diff_in_cone) * (1 + cos(cdp.theta_diff_in_cone)) / 2.0 378 379 # Print out. 380 print(("\n%-40s %-20s" % ("Pivot point:", repr(cdp.pivot_point)))) 381 print(("%-40s %-20s" % ("Moving domain CoM (prior to rotation):", repr(cdp.CoM)))) 382 print(("%-40s %-20s" % ("Pivot-CoM vector", repr(cdp.pivot_CoM)))) 383 print(("%-40s %-20s" % ("Pivot-CoM unit vector:", repr(unit_vect)))) 384 print(("%-40s %-20s" % ("Average of the unit pivot-CoM vectors:", repr(cdp.ave_unit_pivot_CoM)))) 385 print(("%-40s %-20s" % ("Average of the pivot-CoM vector:", repr(cdp.ave_pivot_CoM)))) 386 print(("%-40s %-20s" % ("Full length rotated pivot-CoM vector:", repr(cdp.full_ave_pivot_CoM)))) 387 print(("%-40s %-20s" % ("Length reduction from unity:", repr(cdp.ave_pivot_CoM_red)))) 388 print(("%-40s %.5f rad (%.5f deg)" % ("Cone angle (diffusion on a cone)", cdp.theta_diff_on_cone, cdp.theta_diff_on_cone / (2*pi) *360.))) 389 print(("%-40s S_cone = %.5f (S^2 = %.5f)" % ("S_cone (diffusion on a cone)", cdp.S_diff_on_cone, cdp.S_diff_on_cone**2))) 390 print(("%-40s %.5f rad (%.5f deg)" % ("Cone angle (diffusion in a cone)", cdp.theta_diff_in_cone, cdp.theta_diff_in_cone / (2*pi) *360.))) 391 print(("%-40s S_cone = %.5f (S^2 = %.5f)" % ("S_cone (diffusion in a cone)", cdp.S_diff_in_cone, cdp.S_diff_in_cone**2))) 392 print("\n\n")
393 394
395 - def _cone_pdb(self, cone_type=None, scale=1.0, file=None, dir=None, force=False):
396 """Create a PDB file containing a geometric object representing the various cone models. 397 398 Currently the only cone types supported are 'diff in cone' and 'diff on cone'. 399 400 401 @param cone_type: The type of cone model to represent. 402 @type cone_type: str 403 @param scale: The size of the geometric object is eqaul to the average pivot-CoM 404 vector length multiplied by this scaling factor. 405 @type scale: float 406 @param file: The name of the PDB file to create. 407 @type file: str 408 @param dir: The name of the directory to place the PDB file into. 409 @type dir: str 410 @param force: Flag which if set to True will cause any pre-existing file to be 411 overwritten. 412 @type force: int 413 """ 414 415 # Test if the cone models have been determined. 416 if cone_type == 'diff in cone': 417 if not hasattr(cdp, 'S_diff_in_cone'): 418 raise RelaxError("The diffusion in a cone model has not yet been determined.") 419 elif cone_type == 'diff on cone': 420 if not hasattr(cdp, 'S_diff_on_cone'): 421 raise RelaxError("The diffusion on a cone model has not yet been determined.") 422 else: 423 raise RelaxError("The cone type " + repr(cone_type) + " is unknown.") 424 425 # The number of increments for the filling of the cone objects. 426 inc = 20 427 428 # The rotation matrix. 429 R = zeros((3, 3), float64) 430 two_vect_to_R(array([0, 0, 1], float64), cdp.ave_pivot_CoM/norm(cdp.ave_pivot_CoM), R) 431 432 # The isotropic cone object. 433 if cone_type == 'diff in cone': 434 angle = cdp.theta_diff_in_cone 435 elif cone_type == 'diff on cone': 436 angle = cdp.theta_diff_on_cone 437 cone = Iso_cone(angle) 438 439 # Create the structural object. 440 structure = Internal() 441 442 # Add a structure. 443 structure.add_molecule(name='cone') 444 445 # Alias the single molecule from the single model. 446 mol = structure.structural_data[0].mol[0] 447 448 # Add the pivot point. 449 mol.atom_add(pdb_record='HETATM', atom_num=1, atom_name='R', res_name='PIV', res_num=1, pos=cdp.pivot_point, element='C') 450 451 # Generate the average pivot-CoM vectors. 452 print("\nGenerating the average pivot-CoM vectors.") 453 sim_vectors = None 454 if hasattr(cdp, 'ave_pivot_CoM_sim'): 455 sim_vectors = cdp.ave_pivot_CoM_sim 456 res_num = generic_fns.structure.geometric.generate_vector_residues(mol=mol, vector=cdp.ave_pivot_CoM, atom_name='Ave', res_name_vect='AVE', sim_vectors=sim_vectors, res_num=2, origin=cdp.pivot_point, scale=scale) 457 458 # Generate the cone outer edge. 459 print("\nGenerating the cone outer edge.") 460 cap_start_atom = mol.atom_num[-1]+1 461 generic_fns.structure.geometric.cone_edge(mol=mol, cone=cone, res_name='CON', res_num=3, apex=cdp.pivot_point, R=R, scale=norm(cdp.pivot_CoM), inc=inc) 462 463 # Generate the cone cap, and stitch it to the cone edge. 464 if cone_type == 'diff in cone': 465 print("\nGenerating the cone cap.") 466 cone_start_atom = mol.atom_num[-1]+1 467 generic_fns.structure.geometric.generate_vector_dist(mol=mol, res_name='CON', res_num=3, centre=cdp.pivot_point, R=R, limit_check=cone.limit_check, scale=norm(cdp.pivot_CoM), inc=inc) 468 generic_fns.structure.geometric.stitch_cone_to_edge(mol=mol, cone=cone, dome_start=cone_start_atom, edge_start=cap_start_atom+1, inc=inc) 469 470 # Create the PDB file. 471 print("\nGenerating the PDB file.") 472 pdb_file = open_write_file(file, dir, force=force) 473 structure.write_pdb(pdb_file) 474 pdb_file.close()
475 476
477 - def _disassemble_param_vector(self, param_vector=None, data_types=None, sim_index=None):
478 """Disassemble the parameter vector and place the values into the relevant variables. 479 480 For the 2-domain N-state model, the parameters are stored in the probability and Euler angle data structures. For the population N-state model, only the probabilities are stored. If RDCs are present and alignment tensors are optimised, then these are stored as well. 481 482 @keyword data_types: The base data types used in the optimisation. This list can contain the elements 'rdc', 'pcs' or 'tensor'. 483 @type data_types: list of str 484 @keyword param_vector: The parameter vector returned from optimisation. 485 @type param_vector: numpy array 486 @keyword sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired. 487 @type sim_index: None or int 488 """ 489 490 # Test if the model is selected. 491 if not hasattr(cdp, 'model') or not isinstance(cdp.model, str): 492 raise RelaxNoModelError 493 494 # Unpack and strip off the alignment tensor parameters. 495 if ('rdc' in data_types or 'pcs' in data_types) and not all_tensors_fixed(): 496 # Loop over the alignments, adding the alignment tensor parameters to the tensor data container. 497 tensor_num = 0 498 for i in xrange(len(cdp.align_tensors)): 499 # No alignment ID, so skip the tensor as it will not be optimised. 500 if cdp.align_tensors[i].name not in cdp.align_ids: 501 continue 502 503 # Fixed tensor. 504 if cdp.align_tensors[i].fixed: 505 continue 506 507 # Normal tensors. 508 if sim_index == None: 509 cdp.align_tensors[i].Axx = param_vector[5*tensor_num] 510 cdp.align_tensors[i].Ayy = param_vector[5*tensor_num+1] 511 cdp.align_tensors[i].Axy = param_vector[5*tensor_num+2] 512 cdp.align_tensors[i].Axz = param_vector[5*tensor_num+3] 513 cdp.align_tensors[i].Ayz = param_vector[5*tensor_num+4] 514 515 # Monte Carlo simulated tensors. 516 else: 517 cdp.align_tensors[i].Axx_sim[sim_index] = param_vector[5*tensor_num] 518 cdp.align_tensors[i].Ayy_sim[sim_index] = param_vector[5*tensor_num+1] 519 cdp.align_tensors[i].Axy_sim[sim_index] = param_vector[5*tensor_num+2] 520 cdp.align_tensors[i].Axz_sim[sim_index] = param_vector[5*tensor_num+3] 521 cdp.align_tensors[i].Ayz_sim[sim_index] = param_vector[5*tensor_num+4] 522 523 # Increase the tensor number. 524 tensor_num += 1 525 526 # Create a new parameter vector without the tensors. 527 param_vector = param_vector[5*tensor_num:] 528 529 # Monte Carlo simulation data structures. 530 if sim_index != None: 531 # Populations. 532 if cdp.model in ['2-domain', 'population']: 533 probs = cdp.probs_sim[sim_index] 534 535 # Euler angles. 536 if cdp.model == '2-domain': 537 alpha = cdp.alpha_sim[sim_index] 538 beta = cdp.beta_sim[sim_index] 539 gamma = cdp.gamma_sim[sim_index] 540 541 # Normal data structures. 542 else: 543 # Populations. 544 if cdp.model in ['2-domain', 'population']: 545 probs = cdp.probs 546 547 # Euler angles. 548 if cdp.model == '2-domain': 549 alpha = cdp.alpha 550 beta = cdp.beta 551 gamma = cdp.gamma 552 553 # The probabilities for states 0 to N-1. 554 if cdp.model in ['2-domain', 'population']: 555 for i in xrange(cdp.N-1): 556 probs[i] = param_vector[i] 557 558 # The probability for state N. 559 probs[-1] = 1 - sum(probs[0:-1]) 560 561 # The Euler angles. 562 if cdp.model == '2-domain': 563 for i in xrange(cdp.N): 564 alpha[i] = param_vector[cdp.N-1 + 3*i] 565 beta[i] = param_vector[cdp.N-1 + 3*i + 1] 566 gamma[i] = param_vector[cdp.N-1 + 3*i + 2] 567 568 # The paramagnetic centre. 569 if hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed: 570 # Create the structure if needed. 571 if not hasattr(cdp, 'paramagnetic_centre'): 572 cdp.paramagnetic_centre = zeros(3, float64) 573 574 # The position. 575 cdp.paramagnetic_centre[0] = param_vector[-3] 576 cdp.paramagnetic_centre[1] = param_vector[-2] 577 cdp.paramagnetic_centre[2] = param_vector[-1]
578 579
580 - def _elim_no_prob(self):
581 """Remove all structures or states which have no probability.""" 582 583 # Test if the current data pipe exists. 584 pipes.test() 585 586 # Test if the model is setup. 587 if not hasattr(cdp, 'model'): 588 raise RelaxNoModelError('N-state') 589 590 # Test if there are populations. 591 if not hasattr(cdp, 'probs'): 592 raise RelaxError("The N-state model populations do not exist.") 593 594 # Loop over the structures. 595 i = 0 596 while True: 597 # End condition. 598 if i == cdp.N - 1: 599 break 600 601 # No probability. 602 if cdp.probs[i] < 1e-5: 603 # Remove the probability. 604 cdp.probs.pop(i) 605 606 # Remove the structure. 607 cdp.structure.structural_data.pop(i) 608 609 # Eliminate bond vectors. 610 for spin in spin_loop(): 611 # Position info. 612 if hasattr(spin, 'pos'): 613 spin.pos.pop(i) 614 615 # Vector info. 616 if hasattr(spin, 'xh_vect'): 617 spin.xh_vect.pop(i) 618 if hasattr(spin, 'bond_vect'): 619 spin.bond_vect.pop(i) 620 621 # Update N. 622 cdp.N -= 1 623 624 # Start the loop again without incrementing i. 625 continue 626 627 # Increment i. 628 i += 1
629 630
631 - def _linear_constraints(self, data_types=None, scaling_matrix=None):
632 """Function for setting up the linear constraint matrices A and b. 633 634 Standard notation 635 ================= 636 637 The N-state model constraints are:: 638 639 0 <= pc <= 1, 640 641 where p is the probability and c corresponds to state c. 642 643 644 Matrix notation 645 =============== 646 647 In the notation A.x >= b, where A is an matrix of coefficients, x is an array of parameter 648 values, and b is a vector of scalars, these inequality constraints are:: 649 650 | 1 0 0 | | 0 | 651 | | | | 652 |-1 0 0 | | -1 | 653 | | | | 654 | 0 1 0 | | 0 | 655 | | | p0 | | | 656 | 0 -1 0 | | | | -1 | 657 | | . | p1 | >= | | 658 | 0 0 1 | | | | 0 | 659 | | | p2 | | | 660 | 0 0 -1 | | -1 | 661 | | | | 662 |-1 -1 -1 | | -1 | 663 | | | | 664 | 1 1 1 | | 0 | 665 666 This example is for a 4-state model, the last probability pn is not included as this 667 parameter does not exist (because the sum of pc is equal to 1). The Euler angle parameters 668 have been excluded here but will be included in the returned A and b objects. These 669 parameters simply add columns of zero to the A matrix and have no effect on b. The last two 670 rows correspond to the inequality:: 671 672 0 <= pN <= 1. 673 674 As:: 675 N-1 676 \ 677 pN = 1 - > pc, 678 /__ 679 c=1 680 681 then:: 682 683 -p1 - p2 - ... - p(N-1) >= -1, 684 685 p1 + p2 + ... + p(N-1) >= 0. 686 687 688 @keyword data_types: The base data types used in the optimisation. This list can 689 contain the elements 'rdc', 'pcs' or 'tensor'. 690 @type data_types: list of str 691 @keyword scaling_matrix: The diagonal scaling matrix. 692 @type scaling_matrix: numpy rank-2 square matrix 693 @return: The matrices A and b. 694 @rtype: tuple of len 2 of a numpy rank-2, size NxM matrix and numpy 695 rank-1, size N array 696 """ 697 698 # Starting point of the populations. 699 pop_start = 0 700 if ('rdc' in data_types or 'pcs' in data_types) and not all_tensors_fixed(): 701 # Loop over the alignments. 702 for i in range(len(cdp.align_tensors)): 703 # Fixed tensor. 704 if cdp.align_tensors[i].fixed: 705 continue 706 707 # Add 5 parameters. 708 pop_start += 5 709 710 # Initialisation (0..j..m). 711 A = [] 712 b = [] 713 zero_array = zeros(self._param_num(), float64) 714 i = pop_start 715 j = 0 716 717 # Probability parameters. 718 if cdp.model in ['2-domain', 'population']: 719 # Loop over the prob parameters (N - 1, because the sum of pc is 1). 720 for k in xrange(cdp.N - 1): 721 # 0 <= pc <= 1. 722 A.append(zero_array * 0.0) 723 A.append(zero_array * 0.0) 724 A[j][i] = 1.0 725 A[j+1][i] = -1.0 726 b.append(0.0) 727 b.append(-1.0 / scaling_matrix[i, i]) 728 j = j + 2 729 730 # Increment i. 731 i = i + 1 732 733 # Add the inequalities for pN. 734 A.append(zero_array * 0.0) 735 A.append(zero_array * 0.0) 736 for i in xrange(pop_start, self._param_num()): 737 A[-2][i] = -1.0 738 A[-1][i] = 1.0 739 b.append(-1.0 / scaling_matrix[i, i]) 740 b.append(0.0) 741 742 # Convert to numpy data structures. 743 A = array(A, float64) 744 b = array(b, float64) 745 746 # Return the constraint objects. 747 return A, b
748 749
750 - def _minimise_bc_data(self, model):
751 """Extract and unpack the back calculated data. 752 753 @param model: The instantiated class containing the target function. 754 @type model: class instance 755 """ 756 757 # No alignment tensors, so nothing to do. 758 if not hasattr(cdp, 'align_tensors'): 759 return 760 761 # Loop over each alignment. 762 for i in xrange(len(cdp.align_ids)): 763 # Fixed tensor. 764 if cdp.align_tensors[i].fixed: 765 continue 766 767 # The alignment ID. 768 align_id = cdp.align_ids[i] 769 770 # Data flags 771 rdc_flag = False 772 if hasattr(cdp, 'rdc_ids') and align_id in cdp.rdc_ids: 773 rdc_flag = True 774 pcs_flag = False 775 if hasattr(cdp, 'pcs_ids') and align_id in cdp.pcs_ids: 776 pcs_flag = True 777 778 # Spin loop. 779 data_index = 0 780 for spin in spin_loop(): 781 # Skip deselected spins. 782 if not spin.select: 783 continue 784 785 # Spins with PCS data. 786 if pcs_flag and hasattr(spin, 'pcs'): 787 # Initialise the data structure if necessary. 788 if not hasattr(spin, 'pcs_bc'): 789 spin.pcs_bc = {} 790 791 # Add the back calculated PCS (in ppm). 792 spin.pcs_bc[align_id] = model.deltaij_theta[i, data_index] * 1e6 793 794 # Spins with RDC data. 795 if rdc_flag and hasattr(spin, 'rdc') and (hasattr(spin, 'xh_vect') or hasattr(spin, 'bond_vect')): 796 # Initialise the data structure if necessary. 797 if not hasattr(spin, 'rdc_bc'): 798 spin.rdc_bc = {} 799 800 # Append the back calculated PCS. 801 spin.rdc_bc[align_id] = model.Dij_theta[i, data_index] 802 803 # Increment the spin index if it contains data. 804 if hasattr(spin, 'pcs') or (hasattr(spin, 'rdc') and (hasattr(spin, 'xh_vect') or hasattr(spin, 'bond_vect'))): 805 data_index = data_index + 1
806 807
808 - def _minimise_setup_atomic_pos(self):
809 """Set up the atomic position data structures for optimisation using PCSs and PREs as base data sets. 810 811 @return: The atomic positions (the first index is the spins, the second is the structures, and the third is the atomic coordinates) and the paramagnetic centre. 812 @rtype: numpy rank-3 array, numpy rank-1 array. 813 """ 814 815 # Initialise. 816 atomic_pos = [] 817 818 # Store the atomic positions. 819 for spin in spin_loop(): 820 # Skip deselected spins. 821 if not spin.select: 822 continue 823 824 # Only use spins with alignment/paramagnetic data. 825 if not hasattr(spin, 'pcs') and not hasattr(spin, 'rdc') and not hasattr(spin, 'pre'): 826 continue 827 828 # The position list. 829 if type(spin.pos[0]) in [float, float64]: 830 atomic_pos.append([spin.pos]) 831 else: 832 atomic_pos.append(spin.pos) 833 834 # Convert to numpy objects. 835 atomic_pos = array(atomic_pos, float64) 836 837 # The paramagnetic centre. 838 if hasattr(cdp, 'paramagnetic_centre'): 839 paramag_centre = array(cdp.paramagnetic_centre) 840 else: 841 paramag_centre = zeros(3, float64) 842 843 # Return the data structures. 844 return atomic_pos, paramag_centre
845 846
847 - def _minimise_setup_pcs(self, sim_index=None):
848 """Set up the data structures for optimisation using PCSs as base data sets. 849 850 @keyword sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired. 851 @type sim_index: None or int 852 @return: The assembled data structures for using PCSs as the base data for optimisation. These include: 853 - the PCS values. 854 - the unit vectors connecting the paramagnetic centre (the electron spin) to 855 - the PCS weight. 856 - the nuclear spin. 857 - the pseudocontact shift constants. 858 @rtype: tuple of (numpy rank-2 array, numpy rank-2 array, numpy rank-2 array, numpy rank-1 array, numpy rank-1 array) 859 """ 860 861 # Data setup tests. 862 if not hasattr(cdp, 'paramagnetic_centre') and (hasattr(cdp, 'paramag_centre_fixed') and cdp.paramag_centre_fixed): 863 raise RelaxError("The paramagnetic centre has not yet been specified.") 864 if not hasattr(cdp, 'temperature'): 865 raise RelaxError("The experimental temperatures have not been set.") 866 if not hasattr(cdp, 'frq'): 867 raise RelaxError("The spectrometer frequencies of the experiments have not been set.") 868 869 # Initialise. 870 pcs = [] 871 pcs_err = [] 872 pcs_weight = [] 873 temp = [] 874 frq = [] 875 876 # The PCS data. 877 for align_id in cdp.align_ids: 878 # No RDC or PCS data, so jump to the next alignment. 879 if (hasattr(cdp, 'rdc_ids') and not align_id in cdp.rdc_ids) and (hasattr(cdp, 'pcs_ids') and not align_id in cdp.pcs_ids): 880 continue 881 882 # Append empty arrays to the PCS structures. 883 pcs.append([]) 884 pcs_err.append([]) 885 pcs_weight.append([]) 886 887 # Get the temperature for the PCS constant. 888 if cdp.temperature.has_key(align_id): 889 temp.append(cdp.temperature[align_id]) 890 else: 891 temp.append(0.0) 892 893 # Get the spectrometer frequency in Tesla units for the PCS constant. 894 if cdp.frq.has_key(align_id): 895 frq.append(cdp.frq[align_id] * 2.0 * pi / g1H) 896 else: 897 frq.append(1e-10) 898 899 # Spin loop. 900 j = 0 901 for spin in spin_loop(): 902 # Skip deselected spins. 903 if not spin.select: 904 continue 905 906 # Skip spins without PCS data. 907 if not hasattr(spin, 'pcs'): 908 # Add rows of None if other alignment data exists. 909 if hasattr(spin, 'rdc'): 910 pcs[-1].append(None) 911 pcs_err[-1].append(None) 912 pcs_weight[-1].append(None) 913 j = j + 1 914 915 # Jump to the next spin. 916 continue 917 918 # Append the PCSs to the list. 919 if align_id in spin.pcs.keys(): 920 if sim_index != None: 921 pcs[-1].append(spin.pcs_sim[align_id][sim_index]) 922 else: 923 pcs[-1].append(spin.pcs[align_id]) 924 else: 925 pcs[-1].append(None) 926 927 # Append the PCS errors. 928 if hasattr(spin, 'pcs_err') and align_id in spin.pcs_err.keys(): 929 pcs_err[-1].append(spin.pcs_err[align_id]) 930 else: 931 pcs_err[-1].append(None) 932 933 # Append the weight. 934 if hasattr(spin, 'pcs_weight') and align_id in spin.pcs_weight.keys(): 935 pcs_weight[-1].append(spin.pcs_weight[align_id]) 936 else: 937 pcs_weight[-1].append(1.0) 938 939 # Spin index. 940 j = j + 1 941 942 # Convert to numpy objects. 943 pcs = array(pcs, float64) 944 pcs_err = array(pcs_err, float64) 945 pcs_weight = array(pcs_weight, float64) 946 947 # Convert the PCS from ppm to no units. 948 pcs = pcs * 1e-6 949 pcs_err = pcs_err * 1e-6 950 951 # Return the data structures. 952 return pcs, pcs_err, pcs_weight, temp, frq
953 954
955 - def _minimise_setup_rdcs(self, sim_index=None):
956 """Set up the data structures for optimisation using RDCs as base data sets. 957 958 @keyword sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired. 959 @type sim_index: None or int 960 @return: The assembled data structures for using RDCs as the base data for optimisation. These include: 961 - rdc, the RDC values. 962 - rdc_err, the RDC errors. 963 - rdc_weight, the RDC weights. 964 - vectors, the heteronucleus to proton vectors. 965 - rdc_const, the dipolar constants. 966 @rtype: tuple of (numpy rank-2 array, numpy rank-2 array, numpy rank-2 array) 967 """ 968 969 # Initialise. 970 rdc = [] 971 rdc_err = [] 972 rdc_weight = [] 973 unit_vect = [] 974 rdc_const = [] 975 976 # The unit vectors and RDC constants. 977 for spin, spin_id in spin_loop(return_id=True): 978 # Skip deselected spins. 979 if not spin.select: 980 continue 981 982 # Only use spins with RDC data. 983 if not hasattr(spin, 'rdc'): 984 # Add rows of None if other alignment data exists. 985 if hasattr(spin, 'pcs'): 986 unit_vect.append(None) 987 rdc_const.append(None) 988 989 # Jump to the next spin. 990 continue 991 992 # RDC data exists but the XH bond vectors are missing? 993 if not hasattr(spin, 'members') and not hasattr(spin, 'xh_vect') and not hasattr(spin, 'bond_vect'): 994 # Throw a warning. 995 warn(RelaxWarning("RDC data exists but the XH bond vectors are missing, skipping spin %s." % spin_id)) 996 997 # Add rows of None if other data exists. 998 if hasattr(spin, 'pcs'): 999 unit_vect.append(None) 1000 rdc_const.append(None) 1001 1002 # Jump to the next spin. 1003 continue 1004 1005 # Pseudo-atom set up. 1006 if hasattr(spin, 'members'): 1007 # Skip non-Me groups. 1008 if len(spin.members) != 3: 1009 warn(RelaxWarning("Only methyl group pseudo atoms are supported due to their fast rotation, skipping spin %s." % spin_id)) 1010 continue 1011 1012 # The summed vector. 1013 vect = zeros(3, float64) 1014 for i in range(3): 1015 # Get the spin. 1016 spin_i = return_spin(spin.members[i]) 1017 1018 # Add the bond vector. 1019 if hasattr(spin_i, 'xh_vect'): 1020 obj = getattr(spin_i, 'xh_vect') 1021 else: 1022 obj = getattr(spin_i, 'bond_vect') 1023 vect = vect + obj 1024 1025 # Normalise. 1026 vect = vect / norm(vect) 1027 1028 # Normal spin set up. 1029 else: 1030 # Append the RDC and XH vectors to the lists. 1031 if hasattr(spin, 'xh_vect'): 1032 vect = getattr(spin, 'xh_vect') 1033 else: 1034 vect = getattr(spin, 'bond_vect') 1035 1036 # Add the bond vectors. 1037 if isinstance(vect[0], float): 1038 unit_vect.append([vect]) 1039 else: 1040 unit_vect.append(vect) 1041 1042 # Checks. 1043 if not hasattr(spin, 'heteronuc_type'): 1044 raise RelaxSpinTypeError 1045 if not hasattr(spin, 'proton_type'): 1046 raise RelaxProtonTypeError 1047 if not hasattr(spin, 'r'): 1048 raise RelaxNoValueError("bond length") 1049 1050 # Gyromagnetic ratios. 1051 gx = return_gyromagnetic_ratio(spin.heteronuc_type) 1052 gh = return_gyromagnetic_ratio(spin.proton_type) 1053 1054 # Calculate the RDC dipolar constant (in Hertz, and the 3 comes from the alignment tensor), and append it to the list. 1055 rdc_const.append(3.0/(2.0*pi) * dipolar_constant(gx, gh, spin.r)) 1056 1057 # Fix the unit vector data structure. 1058 num = None 1059 for i in range(len(unit_vect)): 1060 # Number of vectors. 1061 if num == None: 1062 if unit_vect[i] != None: 1063 num = len(unit_vect[i]) 1064 continue 1065 1066 # Check. 1067 if unit_vect[i] != None and len(unit_vect[i]) != num: 1068 raise RelaxError, "The number of bond vectors for all spins do no match:\n%s" % unit_vect 1069 1070 # Missing unit vectors. 1071 if num == None: 1072 raise RelaxError, "No bond vectors could be found." 1073 1074 # Update None entries. 1075 for i in range(len(unit_vect)): 1076 if unit_vect[i] == None: 1077 unit_vect[i] = [[None, None, None]]*num 1078 1079 # The RDC data. 1080 for align_id in cdp.align_ids: 1081 # No RDC or PCS data, so jump to the next alignment. 1082 if (hasattr(cdp, 'rdc_ids') and not align_id in cdp.rdc_ids) and (hasattr(cdp, 'pcs_ids') and not align_id in cdp.pcs_ids): 1083 continue 1084 1085 # Append empty arrays to the RDC structures. 1086 rdc.append([]) 1087 rdc_err.append([]) 1088 rdc_weight.append([]) 1089 1090 # Spin loop. 1091 for spin in spin_loop(): 1092 # Skip deselected spins. 1093 if not spin.select: 1094 continue 1095 1096 # Skip spins without RDC data or XH bond vectors. 1097 if not hasattr(spin, 'rdc') or (not hasattr(spin, 'members') and not hasattr(spin, 'xh_vect') and not hasattr(spin, 'bond_vect')): 1098 # Add rows of None if other alignment data exists. 1099 if hasattr(spin, 'pcs'): 1100 rdc[-1].append(None) 1101 rdc_err[-1].append(None) 1102 rdc_weight[-1].append(None) 1103 1104 # Jump to the next spin. 1105 continue 1106 1107 # Defaults of None. 1108 value = None 1109 error = None 1110 1111 # Pseudo-atom set up. 1112 if hasattr(spin, 'members') and align_id in spin.rdc.keys(): 1113 # Skip non-Me groups. 1114 if len(spin.members) != 3: 1115 continue 1116 1117 # The RDC for the Me-pseudo spin where: 1118 # <D> = -1/3 Dpar. 1119 # See Verdier, et al., JMR, 2003, 163, 353-359. 1120 if sim_index != None: 1121 value = -3.0 * spin.rdc_sim[align_id][sim_index] 1122 else: 1123 value = -3.0 * spin.rdc[align_id] 1124 1125 # The error. 1126 if hasattr(spin, 'rdc_err') and align_id in spin.rdc_err.keys(): 1127 error = -3.0 * spin.rdc_err[align_id] 1128 1129 # Normal spin set up. 1130 elif align_id in spin.rdc.keys(): 1131 # The RDC. 1132 if sim_index != None: 1133 value = spin.rdc_sim[align_id][sim_index] 1134 else: 1135 value = spin.rdc[align_id] 1136 1137 # The error. 1138 if hasattr(spin, 'rdc_err') and align_id in spin.rdc_err.keys(): 1139 error = spin.rdc_err[align_id] 1140 1141 # Append the RDCs to the list. 1142 rdc[-1].append(value) 1143 1144 # Append the RDC errors. 1145 rdc_err[-1].append(error) 1146 1147 # Append the weight. 1148 if hasattr(spin, 'rdc_weight') and align_id in spin.rdc_weight.keys(): 1149 rdc_weight[-1].append(spin.rdc_weight[align_id]) 1150 else: 1151 rdc_weight[-1].append(1.0) 1152 1153 # Convert to numpy objects. 1154 rdc = array(rdc, float64) 1155 rdc_err = array(rdc_err, float64) 1156 rdc_weight = array(rdc_weight, float64) 1157 unit_vect = array(unit_vect, float64) 1158 rdc_const = array(rdc_const, float64) 1159 1160 # Return the data structures. 1161 return rdc, rdc_err, rdc_weight, unit_vect, rdc_const
1162 1163
1164 - def _minimise_setup_tensors(self, sim_index=None):
1165 """Set up the data structures for optimisation using alignment tensors as base data sets. 1166 1167 @keyword sim_index: The index of the simulation to optimise. This should be None if 1168 normal optimisation is desired. 1169 @type sim_index: None or int 1170 @return: The assembled data structures for using alignment tensors as the base 1171 data for optimisation. These include: 1172 - full_tensors, the data of the full alignment tensors. 1173 - red_tensor_elem, the tensors as concatenated rank-1 5D arrays. 1174 - red_tensor_err, the tensor errors as concatenated rank-1 5D 1175 arrays. 1176 - full_in_ref_frame, flags specifying if the tensor in the reference 1177 frame is the full or reduced tensor. 1178 @rtype: tuple of (list, numpy rank-1 array, numpy rank-1 array, numpy rank-1 1179 array) 1180 """ 1181 1182 # Initialise. 1183 n = len(cdp.align_tensors.reduction) 1184 full_tensors = zeros(n*5, float64) 1185 red_tensors = zeros(n*5, float64) 1186 red_err = ones(n*5, float64) * 1e-5 1187 full_in_ref_frame = zeros(n, float64) 1188 1189 # Loop over the full tensors. 1190 for i, tensor in self._tensor_loop(red=False): 1191 # The full tensor. 1192 full_tensors[5*i + 0] = tensor.Axx 1193 full_tensors[5*i + 1] = tensor.Ayy 1194 full_tensors[5*i + 2] = tensor.Axy 1195 full_tensors[5*i + 3] = tensor.Axz 1196 full_tensors[5*i + 4] = tensor.Ayz 1197 1198 # The full tensor corresponds to the frame of reference. 1199 if cdp.ref_domain == tensor.domain: 1200 full_in_ref_frame[i] = 1 1201 1202 # Loop over the reduced tensors. 1203 for i, tensor in self._tensor_loop(red=True): 1204 # The reduced tensor (simulation data). 1205 if sim_index != None: 1206 red_tensors[5*i + 0] = tensor.Axx_sim[sim_index] 1207 red_tensors[5*i + 1] = tensor.Ayy_sim[sim_index] 1208 red_tensors[5*i + 2] = tensor.Axy_sim[sim_index] 1209 red_tensors[5*i + 3] = tensor.Axz_sim[sim_index] 1210 red_tensors[5*i + 4] = tensor.Ayz_sim[sim_index] 1211 1212 # The reduced tensor. 1213 else: 1214 red_tensors[5*i + 0] = tensor.Axx 1215 red_tensors[5*i + 1] = tensor.Ayy 1216 red_tensors[5*i + 2] = tensor.Axy 1217 red_tensors[5*i + 3] = tensor.Axz 1218 red_tensors[5*i + 4] = tensor.Ayz 1219 1220 # The reduced tensor errors. 1221 if hasattr(tensor, 'Axx_err'): 1222 red_err[5*i + 0] = tensor.Axx_err 1223 red_err[5*i + 1] = tensor.Ayy_err 1224 red_err[5*i + 2] = tensor.Axy_err 1225 red_err[5*i + 3] = tensor.Axz_err 1226 red_err[5*i + 4] = tensor.Ayz_err 1227 1228 # Return the data structures. 1229 return full_tensors, red_tensors, red_err, full_in_ref_frame
1230 1231
1233 """Set up the data structures for the fixed alignment tensors. 1234 1235 @return: The assembled data structures for the fixed alignment tensors. 1236 @rtype: numpy rank-1 array. 1237 """ 1238 1239 # Initialise. 1240 n = num_tensors(skip_fixed=False) - num_tensors(skip_fixed=True) 1241 tensors = zeros(n*5, float64) 1242 1243 # Nothing to do. 1244 if n == 0: 1245 return None 1246 1247 # Loop over the tensors. 1248 index = 0 1249 for i in range(len(cdp.align_tensors)): 1250 # Skip unfixed tensors. 1251 if not cdp.align_tensors[i].fixed: 1252 continue 1253 1254 # The real tensors. 1255 tensors[5*index + 0] = cdp.align_tensors[i].Axx 1256 tensors[5*index + 1] = cdp.align_tensors[i].Ayy 1257 tensors[5*index + 2] = cdp.align_tensors[i].Axy 1258 tensors[5*index + 3] = cdp.align_tensors[i].Axz 1259 tensors[5*index + 4] = cdp.align_tensors[i].Ayz 1260 1261 # Increment the index. 1262 index += 1 1263 1264 # Return the data structure. 1265 return tensors
1266 1267
1268 - def _num_data_points(self):
1269 """Determine the number of data points used in the model. 1270 1271 @return: The number, n, of data points in the model. 1272 @rtype: int 1273 """ 1274 1275 # Determine the data type. 1276 data_types = self._base_data_types() 1277 1278 # Init. 1279 n = 0 1280 1281 # Spin loop. 1282 for spin in spin_loop(): 1283 # Skip deselected spins. 1284 if not spin.select: 1285 continue 1286 1287 # RDC data (skipping array elements set to None). 1288 if 'rdc' in data_types: 1289 if hasattr(spin, 'rdc'): 1290 for rdc in spin.rdc: 1291 if isinstance(rdc, float): 1292 n = n + 1 1293 1294 # PCS data (skipping array elements set to None). 1295 if 'pcs' in data_types: 1296 if hasattr(spin, 'pcs'): 1297 for pcs in spin.pcs: 1298 if isinstance(pcs, float): 1299 n = n + 1 1300 1301 # Alignment tensors. 1302 if 'tensor' in data_types: 1303 n = n + 5*len(cdp.align_tensors) 1304 1305 # Return the value. 1306 return n
1307 1308
1309 - def _number_of_states(self, N=None):
1310 """Set the number of states in the N-state model. 1311 1312 @param N: The number of states. 1313 @type N: int 1314 """ 1315 1316 # Test if the current data pipe exists. 1317 pipes.test() 1318 1319 # Test if the model is setup. 1320 if not hasattr(cdp, 'model'): 1321 raise RelaxNoModelError('N-state') 1322 1323 # Set the value of N. 1324 cdp.N = N 1325 1326 # Update the model. 1327 self._update_model()
1328 1329
1330 - def _param_model_index(self, param):
1331 """Return the N-state model index for the given parameter. 1332 1333 @param param: The N-state model parameter. 1334 @type param: str 1335 @return: The N-state model index. 1336 @rtype: str 1337 """ 1338 1339 # Probability. 1340 if search('^p[0-9]*$', param): 1341 return int(param[1:]) 1342 1343 # Alpha Euler angle. 1344 if search('^alpha', param): 1345 return int(param[5:]) 1346 1347 # Beta Euler angle. 1348 if search('^beta', param): 1349 return int(param[4:]) 1350 1351 # Gamma Euler angle. 1352 if search('^gamma', param): 1353 return int(param[5:]) 1354 1355 # Model independent parameter. 1356 return None
1357 1358
1359 - def _param_num(self):
1360 """Determine the number of parameters in the model. 1361 1362 @return: The number of model parameters. 1363 @rtype: int 1364 """ 1365 1366 # Determine the data type. 1367 data_types = self._base_data_types() 1368 1369 # Init. 1370 num = 0 1371 1372 # Alignment tensor params. 1373 if ('rdc' in data_types or 'pcs' in data_types) and not all_tensors_fixed(): 1374 # Loop over the alignments. 1375 for i in xrange(len(cdp.align_tensors)): 1376 # No alignment ID, so skip the tensor as it is not part of the parameter set. 1377 if cdp.align_tensors[i].name not in cdp.align_ids: 1378 continue 1379 1380 # Fixed tensor. 1381 if cdp.align_tensors[i].fixed: 1382 continue 1383 1384 # Add 5 tensor parameters. 1385 num += 5 1386 1387 # Populations. 1388 if cdp.model in ['2-domain', 'population']: 1389 num = num + (cdp.N - 1) 1390 1391 # Euler angles. 1392 if cdp.model == '2-domain': 1393 num = num + 3*cdp.N 1394 1395 # The paramagnetic centre. 1396 if hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed: 1397 num = num + 3 1398 1399 # Return the param number. 1400 return num
1401 1402
1403 - def _ref_domain(self, ref=None):
1404 """Set the reference domain for the '2-domain' N-state model. 1405 1406 @param ref: The reference domain. 1407 @type ref: str 1408 """ 1409 1410 # Test if the current data pipe exists. 1411 pipes.test() 1412 1413 # Test if the model is setup. 1414 if not hasattr(cdp, 'model'): 1415 raise RelaxNoModelError('N-state') 1416 1417 # Test that the correct model is set. 1418 if cdp.model != '2-domain': 1419 raise RelaxError("Setting the reference domain is only possible for the '2-domain' N-state model.") 1420 1421 # Test if the reference domain exists. 1422 exists = False 1423 for tensor_cont in cdp.align_tensors: 1424 if tensor_cont.domain == ref: 1425 exists = True 1426 if not exists: 1427 raise RelaxError("The reference domain cannot be found within any of the loaded tensors.") 1428 1429 # Set the reference domain. 1430 cdp.ref_domain = ref 1431 1432 # Update the model. 1433 self._update_model()
1434 1435
1436 - def _select_model(self, model=None):
1437 """Select the N-state model type. 1438 1439 @param model: The N-state model type. Can be one of '2-domain', 'population', or 'fixed'. 1440 @type model: str 1441 """ 1442 1443 # Test if the current data pipe exists. 1444 pipes.test() 1445 1446 # Test if the model name exists. 1447 if not model in ['2-domain', 'population', 'fixed']: 1448 raise RelaxError("The model name " + repr(model) + " is invalid.") 1449 1450 # Test if the model is setup. 1451 if hasattr(cdp, 'model'): 1452 warn(RelaxWarning("The N-state model has already been set up. Switching from model '%s' to '%s'." % (cdp.model, model))) 1453 1454 # Set the model 1455 cdp.model = model 1456 1457 # Initialise the list of model parameters. 1458 cdp.params = [] 1459 1460 # Update the model. 1461 self._update_model()
1462 1463
1464 - def _target_fn_setup(self, sim_index=None, scaling=True):
1465 """Initialise the target function for optimisation or direct calculation. 1466 1467 @param sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired. 1468 @type sim_index: None or int 1469 @param scaling: If True, diagonal scaling is enabled during optimisation to allow the problem to be better conditioned. 1470 @type scaling: bool 1471 """ 1472 1473 # Test if the N-state model has been set up. 1474 if not hasattr(cdp, 'model'): 1475 raise RelaxNoModelError('N-state') 1476 1477 # '2-domain' model setup tests. 1478 if cdp.model == '2-domain': 1479 # The number of states. 1480 if not hasattr(cdp, 'N'): 1481 raise RelaxError("The number of states has not been set.") 1482 1483 # The reference domain. 1484 if not hasattr(cdp, 'ref_domain'): 1485 raise RelaxError("The reference domain has not been set.") 1486 1487 # Update the model parameters if necessary. 1488 self._update_model() 1489 1490 # Create the initial parameter vector. 1491 param_vector = self._assemble_param_vector(sim_index=sim_index) 1492 1493 # Determine if alignment tensors or RDCs are to be used. 1494 data_types = self._base_data_types() 1495 1496 # The probabilities. 1497 probs = None 1498 if hasattr(cdp, 'probs'): 1499 probs = cdp.probs 1500 1501 # Diagonal scaling. 1502 scaling_matrix = None 1503 if len(param_vector): 1504 scaling_matrix = self._assemble_scaling_matrix(data_types=data_types, scaling=scaling) 1505 param_vector = dot(inv(scaling_matrix), param_vector) 1506 1507 # Get the data structures for optimisation using the tensors as base data sets. 1508 full_tensors, red_tensor_elem, red_tensor_err, full_in_ref_frame = None, None, None, None 1509 if 'tensor' in data_types: 1510 full_tensors, red_tensor_elem, red_tensor_err, full_in_ref_frame = self._minimise_setup_tensors(sim_index=sim_index) 1511 1512 # Get the data structures for optimisation using PCSs as base data sets. 1513 pcs, pcs_err, pcs_weight, temp, frq = None, None, None, None, None 1514 if 'pcs' in data_types: 1515 pcs, pcs_err, pcs_weight, temp, frq = self._minimise_setup_pcs(sim_index=sim_index) 1516 1517 # Get the data structures for optimisation using RDCs as base data sets. 1518 rdcs, rdc_err, rdc_weight, xh_vect, rdc_dj = None, None, None, None, None 1519 if 'rdc' in data_types: 1520 rdcs, rdc_err, rdc_weight, xh_vect, rdc_dj = self._minimise_setup_rdcs(sim_index=sim_index) 1521 1522 # Get the fixed tensors. 1523 fixed_tensors = None 1524 if 'rdc' in data_types or 'pcs' in data_types: 1525 full_tensors = self._minimise_setup_fixed_tensors() 1526 1527 # The flag list. 1528 fixed_tensors = [] 1529 for i in range(len(cdp.align_tensors)): 1530 if cdp.align_tensors[i].fixed: 1531 fixed_tensors.append(True) 1532 else: 1533 fixed_tensors.append(False) 1534 1535 # Get the atomic_positions. 1536 atomic_pos, paramag_centre, centre_fixed = None, None, True 1537 if 'pcs' in data_types or 'pre' in data_types: 1538 atomic_pos, paramag_centre = self._minimise_setup_atomic_pos() 1539 1540 # Optimisation of the centre. 1541 if hasattr(cdp, 'paramag_centre_fixed'): 1542 centre_fixed = cdp.paramag_centre_fixed 1543 1544 # Set up the class instance containing the target function. 1545 model = N_state_opt(model=cdp.model, N=cdp.N, init_params=param_vector, probs=probs, full_tensors=full_tensors, red_data=red_tensor_elem, red_errors=red_tensor_err, full_in_ref_frame=full_in_ref_frame, fixed_tensors=fixed_tensors, pcs=pcs, rdcs=rdcs, pcs_errors=pcs_err, rdc_errors=rdc_err, pcs_weights=pcs_weight, rdc_weights=rdc_weight, xh_vect=xh_vect, temp=temp, frq=frq, dip_const=rdc_dj, atomic_pos=atomic_pos, paramag_centre=paramag_centre, scaling_matrix=scaling_matrix, centre_fixed=centre_fixed) 1546 1547 # Return the data. 1548 return model, param_vector, data_types, scaling_matrix
1549 1550
1551 - def _tensor_loop(self, red=False):
1552 """Generator method for looping over the full or reduced tensors. 1553 1554 @keyword red: A flag which if True causes the reduced tensors to be returned, and if False 1555 the full tensors are returned. 1556 @type red: bool 1557 @return: The tensor index and the tensor. 1558 @rtype: (int, AlignTensorData instance) 1559 """ 1560 1561 # Number of tensor pairs. 1562 n = len(cdp.align_tensors.reduction) 1563 1564 # Alias. 1565 data = cdp.align_tensors 1566 list = data.reduction 1567 1568 # Full or reduced index. 1569 if red: 1570 index = 1 1571 else: 1572 index = 0 1573 1574 # Loop over the reduction list. 1575 for i in range(n): 1576 yield i, data[list[i][index]]
1577 1578
1579 - def _update_model(self):
1580 """Update the model parameters as necessary.""" 1581 1582 # Initialise the list of model parameters. 1583 if not hasattr(cdp, 'params'): 1584 cdp.params = [] 1585 1586 # Determine the number of states (loaded as structural models), if not already set. 1587 if not hasattr(cdp, 'N'): 1588 # Set the number. 1589 if hasattr(cdp, 'structure'): 1590 cdp.N = cdp.structure.num_models() 1591 1592 # Otherwise return as the rest cannot be updated without N. 1593 else: 1594 return 1595 1596 # Set up the parameter arrays. 1597 if not cdp.params: 1598 # Add the probability or population weight parameters. 1599 if cdp.model in ['2-domain', 'population']: 1600 for i in xrange(cdp.N-1): 1601 cdp.params.append('p' + repr(i)) 1602 1603 # Add the Euler angle parameters. 1604 if cdp.model == '2-domain': 1605 for i in xrange(cdp.N): 1606 cdp.params.append('alpha' + repr(i)) 1607 cdp.params.append('beta' + repr(i)) 1608 cdp.params.append('gamma' + repr(i)) 1609 1610 # Initialise the probability and Euler angle arrays. 1611 if cdp.model in ['2-domain', 'population']: 1612 if not hasattr(cdp, 'probs'): 1613 cdp.probs = [None] * cdp.N 1614 if cdp.model == '2-domain': 1615 if not hasattr(cdp, 'alpha'): 1616 cdp.alpha = [None] * cdp.N 1617 if not hasattr(cdp, 'beta'): 1618 cdp.beta = [None] * cdp.N 1619 if not hasattr(cdp, 'gamma'): 1620 cdp.gamma = [None] * cdp.N 1621 1622 # Determine the data type. 1623 data_types = self._base_data_types() 1624 1625 # Set up tensors for each alignment. 1626 if hasattr(cdp, 'align_ids'): 1627 for id in cdp.align_ids: 1628 # No tensors initialised. 1629 if not hasattr(cdp, 'align_tensors'): 1630 generic_fns.align_tensor.init(tensor=id, params=[0.0, 0.0, 0.0, 0.0, 0.0]) 1631 1632 # Find if the tensor corresponding to the id exists. 1633 exists = False 1634 for tensor in cdp.align_tensors: 1635 if id == tensor.name: 1636 exists = True 1637 1638 # Initialise the tensor. 1639 if not exists: 1640 generic_fns.align_tensor.init(tensor=id, params=[0.0, 0.0, 0.0, 0.0, 0.0])
1641 1642
1643 - def base_data_loop(self):
1644 """Loop over the base data of the spins - RDCs, PCSs, and NOESY data. 1645 1646 This loop iterates for each data point (RDC, PCS, NOESY) for each spin, returning the identification information. 1647 1648 @return: A list of the spin ID string, the data type ('rdc', 'pcs', 'noesy'), and the alignment ID if required. 1649 @rtype: list of str 1650 """ 1651 1652 # Loop over the spins. 1653 for spin, spin_id in spin_loop(return_id=True): 1654 # Re-initialise the data structure. 1655 base_ids = [spin_id, None, None] 1656 1657 # Skip deselected spins. 1658 if not spin.select: 1659 continue 1660 1661 # RDC data. 1662 if hasattr(spin, 'rdc'): 1663 base_ids[1] = 'rdc' 1664 1665 # Loop over the alignment IDs. 1666 for id in cdp.rdc_ids: 1667 # Add the ID. 1668 base_ids[2] = id 1669 1670 # Yield the set. 1671 yield base_ids 1672 1673 # PCS data. 1674 if hasattr(spin, 'pcs'): 1675 base_ids[1] = 'pcs' 1676 1677 # Loop over the alignment IDs. 1678 for id in cdp.pcs_ids: 1679 # Add the ID. 1680 base_ids[2] = id 1681 1682 # Yield the set. 1683 yield base_ids 1684 1685 # NOESY data. 1686 if hasattr(spin, 'noesy'): 1687 base_ids[1] = 'noesy' 1688 1689 # Loop over the alignment IDs. 1690 for id in cdp.noesy_ids: 1691 # Add the ID. 1692 base_ids[2] = id 1693 1694 # Yield the set. 1695 yield base_ids
1696 1697
1698 - def calculate(self, spin_id=None, verbosity=1, sim_index=None):
1699 """Calculation function. 1700 1701 Currently this function simply calculates the NOESY flat-bottom quadratic energy potential, 1702 if NOE restraints are available. 1703 1704 @keyword spin_id: The spin identification string (unused). 1705 @type spin_id: None or str 1706 @keyword verbosity: The amount of information to print. The higher the value, the greater the verbosity. 1707 @type verbosity: int 1708 @keyword sim_index: The MC simulation index (unused). 1709 @type sim_index: None 1710 """ 1711 1712 # Set up the target function for direct calculation. 1713 model, param_vector, data_types, scaling_matrix = self._target_fn_setup() 1714 1715 # Calculate the chi-squared value. 1716 if model: 1717 # Make a function call. 1718 chi2 = model.func(param_vector) 1719 1720 # Store the global chi-squared value. 1721 cdp.chi2 = chi2 1722 1723 # Store the back-calculated data. 1724 self._minimise_bc_data(model) 1725 1726 # Calculate the RDC Q-factors. 1727 if 'rdc' in data_types: 1728 rdc.q_factors() 1729 1730 # Calculate the PCS Q-factors. 1731 if 'pcs' in data_types: 1732 pcs.q_factors() 1733 1734 # NOE potential. 1735 if hasattr(cdp, 'noe_restraints'): 1736 # Init some numpy arrays. 1737 num_restraints = len(cdp.noe_restraints) 1738 dist = zeros(num_restraints, float64) 1739 pot = zeros(num_restraints, float64) 1740 lower = zeros(num_restraints, float64) 1741 upper = zeros(num_restraints, float64) 1742 1743 # Loop over the NOEs. 1744 for i in range(num_restraints): 1745 # Create arrays of the NOEs. 1746 lower[i] = cdp.noe_restraints[i][2] 1747 upper[i] = cdp.noe_restraints[i][3] 1748 1749 # Calculate the average distances, using -6 power averaging. 1750 dist[i] = self._calc_ave_dist(cdp.noe_restraints[i][0], cdp.noe_restraints[i][1], exp=-6) 1751 1752 # Calculate the quadratic potential. 1753 quad_pot(dist, pot, lower, upper) 1754 1755 # Store the distance and potential information. 1756 cdp.ave_dist = [] 1757 cdp.quad_pot = [] 1758 for i in range(num_restraints): 1759 cdp.ave_dist.append([cdp.noe_restraints[i][0], cdp.noe_restraints[i][1], dist[i]]) 1760 cdp.quad_pot.append([cdp.noe_restraints[i][0], cdp.noe_restraints[i][1], pot[i]])
1761 1762
1763 - def create_mc_data(self, data_id=None):
1764 """Create the Monte Carlo data by back-calculation. 1765 1766 @keyword data_id: The list of spin ID, data type, and alignment ID, as yielded by the base_data_loop() generator method. 1767 @type data_id: str 1768 @return: The Monte Carlo Ri data. 1769 @rtype: list of floats 1770 """ 1771 1772 # Initialise the MC data structure. 1773 mc_data = [] 1774 1775 # Get the spin container and global spin index. 1776 spin = return_spin(data_id[0]) 1777 1778 # RDC data. 1779 if data_id[1] == 'rdc' and hasattr(spin, 'rdc'): 1780 # Does back-calculated data exist? 1781 if not hasattr(spin, 'rdc_bc'): 1782 self.calculate() 1783 1784 # The data. 1785 if not hasattr(spin, 'rdc_bc') or not spin.rdc_bc.has_key(data_id[2]): 1786 data = None 1787 else: 1788 data = spin.rdc_bc[data_id[2]] 1789 1790 # Append the data. 1791 mc_data.append(data) 1792 1793 # PCS data. 1794 elif data_id[1] == 'pcs' and hasattr(spin, 'pcs'): 1795 # Does back-calculated data exist? 1796 if not hasattr(spin, 'pcs_bc'): 1797 self.calculate() 1798 1799 # The data. 1800 if not hasattr(spin, 'pcs_bc') or not spin.pcs_bc.has_key(data_id[2]): 1801 data = None 1802 else: 1803 data = spin.pcs_bc[data_id[2]] 1804 1805 # Append the data. 1806 mc_data.append(data) 1807 1808 # NOESY data. 1809 elif data_id[1] == 'noesy' and hasattr(spin, 'noesy'): 1810 # Does back-calculated data exist? 1811 if not hasattr(spin, 'noesy_bc'): 1812 self.calculate() 1813 1814 # Append the data. 1815 mc_data.append(spin.noesy_bc) 1816 1817 # Return the data. 1818 return mc_data
1819 1820
1821 - def data_names(self, set='all', error_names=False, sim_names=False):
1822 """Return a list of names of data structures. 1823 1824 Description 1825 =========== 1826 1827 The names are as follows: 1828 1829 - 'chi2', chi-squared value. 1830 - 'iter', iterations. 1831 - 'f_count', function count. 1832 - 'g_count', gradient count. 1833 - 'h_count', hessian count. 1834 - 'warning', minimisation warning. 1835 1836 1837 @keyword set: The set of object names to return. This can be set to 'all' for all names, to 'generic' for generic object names, 'params' for analysis specific parameter names, or to 'min' for minimisation specific object names. 1838 @type set: str 1839 @keyword error_names: A flag which if True will add the error object names as well. 1840 @type error_names: bool 1841 @keyword sim_names: A flag which if True will add the Monte Carlo simulation object names as well. 1842 @type sim_names: bool 1843 @return: The list of object names. 1844 @rtype: list of str 1845 """ 1846 1847 # Initialise. 1848 names = [] 1849 1850 # Generic. 1851 if set == 'all' or set == 'generic': 1852 pass 1853 1854 # Parameters. 1855 if set == 'all' or set == 'params': 1856 pass 1857 1858 # Minimisation statistics. 1859 if set == 'all' or set == 'min': 1860 names.append('chi2') 1861 names.append('iter') 1862 names.append('f_count') 1863 names.append('g_count') 1864 names.append('h_count') 1865 names.append('warning') 1866 1867 # Parameter errors. 1868 if error_names and (set == 'all' or set == 'params'): 1869 pass 1870 1871 # Parameter simulation values. 1872 if sim_names and (set == 'all' or set == 'params'): 1873 pass 1874 1875 # Return the names. 1876 return names
1877 1878 1879 default_value_doc = ["N-state model default values", """ 1880 ______________________________________________________________________________________ 1881 | | | | 1882 | Data type | Object name | Value | 1883 |_____________________________|_____________________________|________________________| 1884 | | | | 1885 | Probabilities | 'p0', 'p1', 'p2', ..., 'pN' | 1/N | 1886 | | | | 1887 | Euler angle alpha | 'alpha0', 'alpha1', ... | (c+1) * pi / (N+1) | 1888 | | | | 1889 | Euler angle beta | 'beta0', 'beta1', ... | (c+1) * pi / (N+1) | 1890 | | | | 1891 | Euler angle gamma | 'gamma0', 'gamma1', ... | (c+1) * pi / (N+1) | 1892 |_____________________________|_____________________________|________________________| 1893 1894 In this table, N is the total number of states and c is the index of a given state ranging from 0 to N-1. The default probabilities are all set to be equal whereas the angles are given a range of values so that no 2 states are equal at the start of optimisation. 1895 1896 Note that setting the probability for state N will do nothing as it is equal to one minus all the other probabilities. 1897 """] 1898
1899 - def default_value(self, param):
1900 """The default N-state model parameter values. 1901 1902 @param param: The N-state model parameter. 1903 @type param: str 1904 @return: The default value. 1905 @rtype: float 1906 """ 1907 1908 # Split the parameter into its base name and index. 1909 name = self.return_data_name(param) 1910 index = self._param_model_index(param) 1911 1912 # The number of states as a float. 1913 N = float(cdp.N) 1914 1915 # Probability. 1916 if name == 'probs': 1917 return 1.0 / N 1918 1919 # Euler angles. 1920 elif name == 'alpha' or name == 'beta' or name == 'gamma': 1921 return (float(index)+1) * pi / (N+1.0)
1922 1923
1924 - def grid_search(self, lower=None, upper=None, inc=None, constraints=False, verbosity=0, sim_index=None):
1925 """The grid search function. 1926 1927 @param lower: The lower bounds of the grid search which must be equal to the number of 1928 parameters in the model. 1929 @type lower: array of numbers 1930 @param upper: The upper bounds of the grid search which must be equal to the number of 1931 parameters in the model. 1932 @type upper: array of numbers 1933 @param inc: The increments for each dimension of the space for the grid search. The 1934 number of elements in the array must equal to the number of parameters 1935 in the model. 1936 @type inc: array of int 1937 @param constraints: If True, constraints are applied during the grid search (elinating parts 1938 of the grid). If False, no constraints are used. 1939 @type constraints: bool 1940 @param verbosity: A flag specifying the amount of information to print. The higher the 1941 value, the greater the verbosity. 1942 @type verbosity: int 1943 """ 1944 1945 # Test if the N-state model has been set up. 1946 if not hasattr(cdp, 'model'): 1947 raise RelaxNoModelError('N-state') 1948 1949 # The number of parameters. 1950 n = self._param_num() 1951 1952 # Make sure that the length of the parameter array is > 0. 1953 if n == 0: 1954 print("Cannot run a grid search on a model with zero parameters, skipping the grid search.") 1955 return 1956 1957 # Test the grid search options. 1958 self.test_grid_ops(lower=lower, upper=upper, inc=inc, n=n) 1959 1960 # If inc is a single int, convert it into an array of that value. 1961 if isinstance(inc, int): 1962 inc = [inc]*n 1963 1964 # Setup the default bounds. 1965 if not lower: 1966 # Init. 1967 lower = [] 1968 upper = [] 1969 1970 # Loop over the parameters. 1971 for i in range(n): 1972 # i is in the parameter array. 1973 if i < len(cdp.params): 1974 # Probabilities (default values). 1975 if search('^p', cdp.params[i]): 1976 lower.append(0.0) 1977 upper.append(1.0) 1978 1979 # Angles (default values). 1980 if search('^alpha', cdp.params[i]) or search('^gamma', cdp.params[i]): 1981 lower.append(0.0) 1982 upper.append(2*pi) 1983 elif search('^beta', cdp.params[i]): 1984 lower.append(0.0) 1985 upper.append(pi) 1986 1987 # The paramagnetic centre. 1988 elif hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed and (n - i) <= 3: 1989 lower.append(-100) 1990 upper.append(100) 1991 1992 # Otherwise this must be an alignment tensor component. 1993 else: 1994 lower.append(-1e-3) 1995 upper.append(1e-3) 1996 1997 # Minimisation. 1998 self.minimise(min_algor='grid', lower=lower, upper=upper, inc=inc, constraints=constraints, verbosity=verbosity, sim_index=sim_index)
1999 2000
2001 - def is_spin_param(self, name):
2002 """Determine whether the given parameter is spin specific. 2003 2004 @param name: The name of the parameter. 2005 @type name: str 2006 @return: False 2007 @rtype: bool 2008 """ 2009 2010 # Spin specific parameters. 2011 if name in ['r', 'heteronuc_type', 'proton_type']: 2012 return True 2013 2014 # All other parameters are global. 2015 return False
2016 2017
2018 - def map_bounds(self, param, spin_id=None):
2019 """Create bounds for the OpenDX mapping function. 2020 2021 @param param: The name of the parameter to return the lower and upper bounds of. 2022 @type param: str 2023 @param spin_id: The spin identification string (unused). 2024 @type spin_id: None 2025 @return: The upper and lower bounds of the parameter. 2026 @rtype: list of float 2027 """ 2028 2029 # Paramagnetic centre. 2030 if search('^paramag_[xyz]$', param): 2031 return [-100.0, 100.0]
2032 2033
2034 - def minimise(self, min_algor=None, min_options=None, func_tol=None, grad_tol=None, max_iterations=None, constraints=False, scaling=True, verbosity=0, sim_index=None, lower=None, upper=None, inc=None):
2035 """Minimisation function. 2036 2037 @param min_algor: The minimisation algorithm to use. 2038 @type min_algor: str 2039 @param min_options: An array of options to be used by the minimisation algorithm. 2040 @type min_options: array of str 2041 @param func_tol: The function tolerance which, when reached, terminates optimisation. Setting this to None turns of the check. 2042 @type func_tol: None or float 2043 @param grad_tol: The gradient tolerance which, when reached, terminates optimisation. Setting this to None turns of the check. 2044 @type grad_tol: None or float 2045 @param max_iterations: The maximum number of iterations for the algorithm. 2046 @type max_iterations: int 2047 @param constraints: If True, constraints are used during optimisation. 2048 @type constraints: bool 2049 @param scaling: If True, diagonal scaling is enabled during optimisation to allow the problem to be better conditioned. 2050 @type scaling: bool 2051 @param verbosity: A flag specifying the amount of information to print. The higher the value, the greater the verbosity. 2052 @type verbosity: int 2053 @param sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired. 2054 @type sim_index: None or int 2055 @keyword lower: The lower bounds of the grid search which must be equal to the number of parameters in the model. This optional argument is only used when doing a grid search. 2056 @type lower: array of numbers 2057 @keyword upper: The upper bounds of the grid search which must be equal to the number of parameters in the model. This optional argument is only used when doing a grid search. 2058 @type upper: array of numbers 2059 @keyword inc: The increments for each dimension of the space for the grid search. The number of elements in the array must equal to the number of parameters in the model. This argument is only used when doing a grid search. 2060 @type inc: array of int 2061 """ 2062 2063 # Set up the target function for direct calculation. 2064 model, param_vector, data_types, scaling_matrix = self._target_fn_setup(sim_index=sim_index, scaling=scaling) 2065 2066 # Nothing to do! 2067 if not len(param_vector): 2068 warn(RelaxWarning("The model has no parameters, minimisation cannot be performed.")) 2069 return 2070 2071 # Right, constraints cannot be used for the 'fixed' model. 2072 if constraints and cdp.model == 'fixed': 2073 warn(RelaxWarning("Turning constraints off. These cannot be used for the 'fixed' model.")) 2074 constraints = False 2075 2076 # Pop out the Method of Multipliers algorithm. 2077 if min_algor == 'Method of Multipliers': 2078 min_algor = min_options[0] 2079 min_options = min_options[1:] 2080 2081 # And constraints absolutely must be used for the 'population' model. 2082 if not constraints and cdp.model == 'population': 2083 warn(RelaxWarning("Turning constraints on. These absolutely must be used for the 'population' model.")) 2084 constraints = True 2085 2086 # Add the Method of Multipliers algorithm. 2087 min_options = (min_algor,) + min_options 2088 min_algor = 'Method of Multipliers' 2089 2090 # Linear constraints. 2091 if constraints: 2092 A, b = self._linear_constraints(data_types=data_types, scaling_matrix=scaling_matrix) 2093 else: 2094 A, b = None, None 2095 2096 # Grid search. 2097 if search('^[Gg]rid', min_algor): 2098 # Scaling. 2099 if scaling: 2100 for i in xrange(len(param_vector)): 2101 lower[i] = lower[i] / scaling_matrix[i, i] 2102 upper[i] = upper[i] / scaling_matrix[i, i] 2103 2104 # The search. 2105 results = grid(func=model.func, args=(), num_incs=inc, lower=lower, upper=upper, A=A, b=b, verbosity=verbosity) 2106 2107 # Unpack the results. 2108 param_vector, func, iter_count, warning = results 2109 f_count = iter_count 2110 g_count = 0.0 2111 h_count = 0.0 2112 2113 # Minimisation. 2114 else: 2115 results = generic_minimise(func=model.func, dfunc=model.dfunc, d2func=model.d2func, args=(), x0=param_vector, min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, maxiter=max_iterations, A=A, b=b, full_output=1, print_flag=verbosity) 2116 2117 # Unpack the results. 2118 if results == None: 2119 return 2120 param_vector, func, iter_count, f_count, g_count, h_count, warning = results 2121 2122 # Catch infinite chi-squared values. 2123 if isInf(func): 2124 raise RelaxInfError('chi-squared') 2125 2126 # Catch chi-squared values of NaN. 2127 if isNaN(func): 2128 raise RelaxNaNError('chi-squared') 2129 2130 # Make a last function call to update the back-calculated RDC and PCS structures to the optimal values. 2131 chi2 = model.func(param_vector) 2132 2133 # Scaling. 2134 if scaling: 2135 param_vector = dot(scaling_matrix, param_vector) 2136 2137 # Disassemble the parameter vector. 2138 self._disassemble_param_vector(param_vector=param_vector, data_types=data_types, sim_index=sim_index) 2139 2140 # Monte Carlo minimisation statistics. 2141 if sim_index != None: 2142 # Chi-squared statistic. 2143 cdp.chi2_sim[sim_index] = func 2144 2145 # Iterations. 2146 cdp.iter_sim[sim_index] = iter_count 2147 2148 # Function evaluations. 2149 cdp.f_count_sim[sim_index] = f_count 2150 2151 # Gradient evaluations. 2152 cdp.g_count_sim[sim_index] = g_count 2153 2154 # Hessian evaluations. 2155 cdp.h_count_sim[sim_index] = h_count 2156 2157 # Warning. 2158 cdp.warning_sim[sim_index] = warning 2159 2160 # Normal statistics. 2161 else: 2162 # Chi-squared statistic. 2163 cdp.chi2 = func 2164 2165 # Iterations. 2166 cdp.iter = iter_count 2167 2168 # Function evaluations. 2169 cdp.f_count = f_count 2170 2171 # Gradient evaluations. 2172 cdp.g_count = g_count 2173 2174 # Hessian evaluations. 2175 cdp.h_count = h_count 2176 2177 # Warning. 2178 cdp.warning = warning 2179 2180 # Statistical analysis. 2181 if sim_index == None and ('rdc' in data_types or 'pcs' in data_types): 2182 # Get the final back calculated data (for the Q-factor and 2183 self._minimise_bc_data(model) 2184 2185 # Calculate the RDC Q-factors. 2186 if 'rdc' in data_types: 2187 rdc.q_factors() 2188 2189 # Calculate the PCS Q-factors. 2190 if 'pcs' in data_types: 2191 pcs.q_factors()
2192 2193
2194 - def model_statistics(self, model_info=None, spin_id=None, global_stats=None):
2195 """Return the k, n, and chi2 model statistics. 2196 2197 k - number of parameters. 2198 n - number of data points. 2199 chi2 - the chi-squared value. 2200 2201 2202 @keyword model_info: The data returned from model_loop() (unused). 2203 @type model_info: None 2204 @keyword spin_id: The spin identification string. This is ignored in the N-state model. 2205 @type spin_id: None or str 2206 @keyword global_stats: A parameter which determines if global or local statistics are returned. For the N-state model, this argument is ignored. 2207 @type global_stats: None or bool 2208 @return: The optimisation statistics, in tuple format, of the number of parameters (k), the number of data points (n), and the chi-squared value (chi2). 2209 @rtype: tuple of (int, int, float) 2210 """ 2211 2212 # Return the values. 2213 return self._param_num(), self._num_data_points(), cdp.chi2
2214 2215 2216 return_data_name_doc = ["N-state model data type string matching patterns", """ 2217 ____________________________________________________________________________________________ 2218 | | | | 2219 | Data type | Object name | Patterns | 2220 |________________________|_____________________________|___________________________________| 2221 | | | | 2222 | Probabilities | 'probs' | 'p0', 'p1', 'p2', ..., 'pN' | 2223 | | | | 2224 | Euler angle alpha | 'alpha' | 'alpha0', 'alpha1', ... | 2225 | | | | 2226 | Euler angle beta | 'beta' | 'beta0', 'beta1', ... | 2227 | | | | 2228 | Euler angle gamma | 'gamma' | 'gamma0', 'gamma1', ... | 2229 | | | | 2230 | Bond length | 'r' | '^r$' or '[Bb]ond[ -_][Ll]ength' | 2231 | | | | 2232 | Heteronucleus type | 'heteronuc_type' | '^[Hh]eteronucleus$' | 2233 | | | | 2234 | Proton type | 'proton_type' | '^[Pp]roton$' | 2235 |________________________|_____________________________|___________________________________| 2236 2237 The objects corresponding to the object names are lists (or arrays) with each element corrsponding to each state. 2238 """] 2239
2240 - def return_data_name(self, param):
2241 """Return a unique identifying string for the N-state model parameter. 2242 2243 @param param: The N-state model parameter. 2244 @type param: str 2245 @return: The unique parameter identifying string. 2246 @rtype: str 2247 """ 2248 2249 # Probability. 2250 if search('^p[0-9]*$', param): 2251 return 'probs' 2252 2253 # Alpha Euler angle. 2254 if search('^alpha', param): 2255 return 'alpha' 2256 2257 # Beta Euler angle. 2258 if search('^beta', param): 2259 return 'beta' 2260 2261 # Gamma Euler angle. 2262 if search('^gamma', param): 2263 return 'gamma' 2264 2265 # Bond length. 2266 if search('^r$', param) or search('[Bb]ond[ -_][Ll]ength', param): 2267 return 'r' 2268 2269 # Heteronucleus type. 2270 if param == 'heteronuc_type': 2271 return 'heteronuc_type' 2272 2273 # Proton type. 2274 if param == 'proton_type': 2275 return 'proton_type' 2276 2277 # Paramagnetic centre. 2278 if search('^paramag_[xyz]$', param): 2279 return param
2280 2281
2282 - def return_error(self, data_id=None):
2283 """Create and return the spin specific Monte Carlo Ri error structure. 2284 2285 @keyword data_id: The list of spin ID, data type, and alignment ID, as yielded by the base_data_loop() generator method. 2286 @type data_id: str 2287 @return: The Monte Carlo simulation data errors. 2288 @rtype: list of floats 2289 """ 2290 2291 # Initialise the MC data structure. 2292 mc_errors = [] 2293 2294 # Get the spin container and global spin index. 2295 spin = return_spin(data_id[0]) 2296 2297 # Skip deselected spins. 2298 if not spin.select: 2299 return 2300 2301 # RDC data. 2302 if data_id[1] == 'rdc' and hasattr(spin, 'rdc'): 2303 # Do errors exist? 2304 if not hasattr(spin, 'rdc_err'): 2305 raise RelaxError("The RDC errors are missing for spin '%s'." % data_id[0]) 2306 2307 # Append the data. 2308 mc_errors.append(spin.rdc_err[data_id[2]]) 2309 2310 # PCS data. 2311 elif data_id[1] == 'pcs' and hasattr(spin, 'pcs'): 2312 # Do errors exist? 2313 if not hasattr(spin, 'pcs_err'): 2314 raise RelaxError("The PCS errors are missing for spin '%s'." % data_id[0]) 2315 2316 # Append the data. 2317 mc_errors.append(spin.pcs_err[data_id[2]]) 2318 2319 # NOESY data. 2320 elif hasattr(spin, 'noesy'): 2321 # Do errors exist? 2322 if not hasattr(spin, 'noesy_err'): 2323 raise RelaxError("The NOESY errors are missing for spin '%s'." % data_id[0]) 2324 2325 # Append the data. 2326 mc_errors.append(spin.noesy_err) 2327 2328 # Return the errors. 2329 return mc_errors
2330 2331
2332 - def return_grace_string(self, param):
2333 """Return the Grace string representation of the parameter. 2334 2335 This is used for axis labelling. 2336 2337 @param param: The specific analysis parameter. 2338 @type param: str 2339 @return: The Grace string representation of the parameter. 2340 @rtype: str 2341 """ 2342 2343 # The measured PCS. 2344 if param == 'pcs': 2345 return "Measured PCS" 2346 2347 # The back-calculated PCS. 2348 if param == 'pcs_bc': 2349 return "Back-calculated PCS" 2350 2351 # The measured RDC. 2352 if param == 'rdc': 2353 return "Measured RDC" 2354 2355 # The back-calculated RDC. 2356 if param == 'rdc_bc': 2357 return "Back-calculated RDC"
2358 2359
2360 - def return_units(self, param):
2361 """Return a string representing the parameters units. 2362 2363 @param param: The name of the parameter to return the units string for. 2364 @type param: str 2365 @return: The parameter units string. 2366 @rtype: str 2367 """ 2368 2369 # PCSs. 2370 if param == 'pcs' or param == 'pcs_bc': 2371 return 'ppm' 2372 2373 # RDCs. 2374 if param == 'rdc' or param == 'rdc_bc': 2375 return 'Hz'
2376 2377 2378 set_doc = ["N-state model set details", """ 2379 Setting parameters for the N-state model is a little different from the other type of analyses as each state has a set of parameters with the same names as the other states. To set the parameters for a specific state c (ranging from 0 for the first to N-1 for the last, the number c should be added to the end of the parameter name. So the Euler angle gamma of the third state is specified using the string 'gamma2'. 2380 """] 2381 2382
2383 - def set_error(self, model_info, index, error):
2384 """Set the parameter errors. 2385 2386 @param model_info: The global model index originating from model_loop(). 2387 @type model_info: int 2388 @param index: The index of the parameter to set the errors for. 2389 @type index: int 2390 @param error: The error value. 2391 @type error: float 2392 """ 2393 2394 # Align parameters. 2395 names = ['Axx', 'Ayy', 'Axy', 'Axz', 'Ayz'] 2396 2397 # Alignment tensor parameters. 2398 if index < len(cdp.align_ids)*5: 2399 # The tensor and parameter index. 2400 param_index = index % 5 2401 tensor_index = (index - index % 5) / 5 2402 2403 # Set the error. 2404 tensor = return_tensor(index=tensor_index, skip_fixed=True) 2405 return setattr(tensor, names[param_index]+'_err', error)
2406 2407
2408 - def set_param_values(self, param=None, value=None, spin_id=None, force=True):
2409 """Set the N-state model parameter values. 2410 2411 @keyword param: The parameter name list. 2412 @type param: list of str 2413 @keyword value: The parameter value list. 2414 @type value: list 2415 @keyword spin_id: The spin identification string (unused). 2416 @type spin_id: None 2417 @keyword force: A flag which if True will cause current values to be overwritten. If False, a RelaxError will raised if the parameter value is already set. 2418 @type force: bool 2419 """ 2420 2421 # Checks. 2422 arg_check.is_str_list(param, 'parameter name') 2423 arg_check.is_list(value, 'parameter value') 2424 2425 # Loop over the parameters. 2426 for i in range(len(param)): 2427 # Get the object's name. 2428 obj_name = self.return_data_name(param[i]) 2429 2430 # Is the parameter is valid? 2431 if not obj_name: 2432 raise RelaxError("The parameter '%s' is not valid for this data pipe type." % param[i]) 2433 2434 # Set the indexed parameter. 2435 if obj_name in ['probs', 'alpha', 'beta', 'gamma']: 2436 # The index. 2437 index = self._param_model_index(param[i]) 2438 2439 # Set. 2440 obj = getattr(cdp, obj_name) 2441 obj[index] = value[i] 2442 2443 # The paramagnetic centre. 2444 if search('^paramag_[xyz]$', obj_name): 2445 # Init. 2446 if not hasattr(cdp, 'paramagnetic_centre'): 2447 cdp.paramagnetic_centre = zeros(3, float64) 2448 2449 # Set the coordinate. 2450 if obj_name == 'paramag_x': 2451 index = 0 2452 elif obj_name == 'paramag_y': 2453 index = 1 2454 else: 2455 index = 2 2456 2457 # Set the value in Angstrom. 2458 cdp.paramagnetic_centre[index] = value[i] 2459 2460 # Set the spin parameters. 2461 else: 2462 for spin in spin_loop(spin_id): 2463 setattr(spin, obj_name, value[i])
2464 2465
2466 - def sim_init_values(self):
2467 """Initialise the Monte Carlo parameter values.""" 2468 2469 # Get the minimisation statistic object names. 2470 min_names = self.data_names(set='min') 2471 2472 # Alignments. 2473 if hasattr(cdp, 'align_tensors'): 2474 # The parameter names. 2475 names = ['Axx', 'Ayy', 'Axy', 'Axz', 'Ayz'] 2476 2477 # Loop over the alignments, adding the alignment tensor parameters to the tensor data container. 2478 for i in xrange(len(cdp.align_tensors)): 2479 # Fixed tensor. 2480 if cdp.align_tensors[i].fixed: 2481 continue 2482 2483 # Loop over all the parameter names. 2484 for object_name in names: 2485 # Name for the simulation object. 2486 sim_object_name = object_name + '_sim' 2487 2488 # Create the simulation object. 2489 setattr(cdp.align_tensors[i], sim_object_name, []) 2490 2491 # Get the simulation object. 2492 sim_object = getattr(cdp.align_tensors[i], sim_object_name) 2493 2494 # Set the initial simulation values to the optimised tensor parameter values. 2495 for j in xrange(cdp.sim_number): 2496 sim_object.append(getattr(cdp.align_tensors[i], object_name)) 2497 2498 # Loop over all the minimisation object names. 2499 for object_name in min_names: 2500 # Name for the simulation object. 2501 sim_object_name = object_name + '_sim' 2502 2503 # Create the simulation object. 2504 setattr(cdp, sim_object_name, []) 2505 2506 # Get the simulation object. 2507 sim_object = getattr(cdp, sim_object_name) 2508 2509 # Loop over the simulations. 2510 for j in xrange(cdp.sim_number): 2511 # Append None to fill the structure. 2512 sim_object.append(None)
2513 2514
2515 - def sim_pack_data(self, data_id, sim_data):
2516 """Pack the Monte Carlo simulation data. 2517 2518 @keyword data_id: The list of spin ID, data type, and alignment ID, as yielded by the base_data_loop() generator method. 2519 @type data_id: list of str 2520 @param sim_data: The Monte Carlo simulation data. 2521 @type sim_data: list of float 2522 """ 2523 2524 # Get the spin container. 2525 spin = return_spin(data_id[0]) 2526 2527 # RDC data. 2528 if data_id[1] == 'rdc' and hasattr(spin, 'rdc'): 2529 # Initialise. 2530 if not hasattr(spin, 'rdc_sim'): 2531 spin.rdc_sim = {} 2532 2533 # Store the data structure. 2534 spin.rdc_sim[data_id[2]] = [] 2535 for i in range(cdp.sim_number): 2536 spin.rdc_sim[data_id[2]].append(sim_data[i][0]) 2537 2538 # PCS data. 2539 if data_id[1] == 'pcs' and hasattr(spin, 'pcs'): 2540 # Initialise. 2541 if not hasattr(spin, 'pcs_sim'): 2542 spin.pcs_sim = {} 2543 2544 # Store the data structure. 2545 spin.pcs_sim[data_id[2]] = [] 2546 for i in range(cdp.sim_number): 2547 spin.pcs_sim[data_id[2]].append(sim_data[i][0]) 2548 2549 # NOESY data. 2550 if data_id[1] == 'noesy' and hasattr(spin, 'noesy'): 2551 # Store the data structure. 2552 spin.noesy_sim = [] 2553 for i in range(cdp.sim_number): 2554 spin.noesy_sim[data_id[2]].append(sim_data[i][0])
2555 2556
2557 - def sim_return_param(self, model_info, index):
2558 """Return the array of simulation parameter values. 2559 2560 @param model_info: The global model index originating from model_loop(). 2561 @type model_info: int 2562 @param index: The index of the parameter to return the array of values for. 2563 @type index: int 2564 @return: The array of simulation parameter values. 2565 @rtype: list of float 2566 """ 2567 2568 # Align parameters. 2569 names = ['Axx', 'Ayy', 'Axy', 'Axz', 'Ayz'] 2570 2571 # Alignment tensor parameters. 2572 if index < num_tensors(skip_fixed=True)*5: 2573 # The tensor and parameter index. 2574 param_index = index % 5 2575 tensor_index = (index - index % 5) / 5 2576 2577 # Return the simulation parameter array. 2578 tensor = return_tensor(index=tensor_index, skip_fixed=True) 2579 return getattr(tensor, names[param_index]+'_sim')
2580