Package specific_analyses :: Package frame_order :: Module optimisation
[hide private]
[frames] | no frames]

Source Code for Module specific_analyses.frame_order.optimisation

  1  ############################################################################### 
  2  #                                                                             # 
  3  # Copyright (C) 2009-2014 Edward d'Auvergne                                   # 
  4  #                                                                             # 
  5  # This file is part of the program relax (http://www.nmr-relax.com).          # 
  6  #                                                                             # 
  7  # This program is free software: you can redistribute it and/or modify        # 
  8  # it under the terms of the GNU General Public License as published by        # 
  9  # the Free Software Foundation, either version 3 of the License, or           # 
 10  # (at your option) any later version.                                         # 
 11  #                                                                             # 
 12  # This program is distributed in the hope that it will be useful,             # 
 13  # but WITHOUT ANY WARRANTY; without even the implied warranty of              # 
 14  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the               # 
 15  # GNU General Public License for more details.                                # 
 16  #                                                                             # 
 17  # You should have received a copy of the GNU General Public License           # 
 18  # along with this program.  If not, see <http://www.gnu.org/licenses/>.       # 
 19  #                                                                             # 
 20  ############################################################################### 
 21   
 22  # Module docstring. 
 23  """Module for the optimisation of the frame order models.""" 
 24   
 25  # Python module imports. 
 26  from math import cos, pi 
 27  from numpy import arccos, array, dot, float64, ones, zeros 
 28  from numpy.linalg import inv, norm 
 29  import sys 
 30  from warnings import warn 
 31   
 32  # relax module imports. 
 33  from lib.float import isNaN, isInf 
 34  from lib.errors import RelaxError, RelaxInfError, RelaxNaNError, RelaxNoPCSError, RelaxNoRDCError 
 35  from lib.geometry.angles import wrap_angles 
 36  from lib.order import order_parameters 
 37  from lib.physical_constants import dipolar_constant, g1H, return_gyromagnetic_ratio 
 38  from lib.warnings import RelaxWarning 
 39  from pipe_control.interatomic import interatomic_loop 
 40  from pipe_control.mol_res_spin import return_spin, spin_loop 
 41  from pipe_control.rdc import check_rdcs 
 42  from pipe_control.structure.mass import pipe_centre_of_mass 
 43  from specific_analyses.frame_order.checks import check_ave_domain_setup 
 44  from specific_analyses.frame_order.data import base_data_types, domain_moving, pivot_fixed, tensor_loop, translation_fixed 
 45  from specific_analyses.frame_order.parameters import assemble_param_vector, assemble_scaling_matrix 
 46  from target_functions import frame_order 
 47   
 48   
49 -def grid_row(incs, lower, upper, dist_type=None, end_point=True):
50 """Set up a row of the grid search for a given parameter. 51 52 @param incs: The number of increments. 53 @type incs: int 54 @param lower: The lower bounds. 55 @type lower: float 56 @param upper: The upper bounds. 57 @type upper: float 58 @keyword dist_type: The spacing or distribution type between grid nodes. If None, then a linear grid row is returned. If 'acos', then an inverse cos distribution of points is returned (e.g. for uniform sampling in angular space). 59 @type dist_type: None or str 60 @keyword end_point: A flag which if False will cause the end point to be removed. 61 @type end_point: bool 62 @return: The row of the grid. 63 @rtype: list of float 64 """ 65 66 # Init. 67 row = [] 68 69 # Linear grid. 70 if dist_type == None: 71 # Loop over the increments. 72 for i in range(incs): 73 # The row. 74 row.append(lower + i * (upper - lower) / (incs - 1.0)) 75 76 # Inverse cos distribution. 77 elif dist_type == 'acos': 78 # Generate the increment values of v from cos(upper) to cos(lower). 79 v = zeros(incs, float64) 80 val = (cos(lower) - cos(upper)) / (incs - 1.0) 81 for i in range(incs): 82 v[-i-1] = cos(upper) + float(i) * val 83 84 # Generate the distribution. 85 row = arccos(v) 86 87 # Remove the last point. 88 if not end_point: 89 row = row[:-1] 90 91 # Return the row (as a list). 92 return list(row)
93 94
95 -def minimise_setup_atomic_pos(sim_index=None):
96 """Set up the atomic position data structures for optimisation using PCSs and PREs as base data sets. 97 98 @keyword sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired. 99 @type sim_index: None or int 100 @return: The atomic positions (the first index is the spins, the second is the structures, and the third is the atomic coordinates) and the paramagnetic centre. 101 @rtype: numpy rank-3 array, numpy rank-1 array. 102 """ 103 104 # Initialise. 105 atomic_pos = [] 106 107 # Store the atomic positions. 108 for spin, spin_id in spin_loop(selection=domain_moving(), return_id=True): 109 # Skip deselected spins. 110 if not spin.select: 111 continue 112 113 # Only use spins with alignment/paramagnetic data. 114 if not hasattr(spin, 'pcs') and not hasattr(spin, 'pre'): 115 continue 116 117 # A single atomic position. 118 if spin.pos.shape == (3,): 119 atomic_pos.append(spin.pos) 120 121 # A single model (rank-2 array of a single position). 122 elif spin.pos.shape == (1, 3): 123 atomic_pos.append(spin.pos[0]) 124 125 # Average multiple atomic positions. 126 else: 127 # First throw a warning to tell the user what is happening. 128 if sim_index == None: 129 warn(RelaxWarning("Averaging the %s atomic positions for the PCS for the spin '%s'." % (len(spin.pos), spin_id))) 130 131 # The average position. 132 ave_pos = zeros(3, float64) 133 for i in range(len(spin.pos)): 134 ave_pos += spin.pos[i] 135 ave_pos = ave_pos / len(spin.pos) 136 137 # Store. 138 atomic_pos.append(ave_pos) 139 140 # Convert to numpy objects. 141 atomic_pos = array(atomic_pos, float64) 142 143 # The paramagnetic centre. 144 if not hasattr(cdp, 'paramagnetic_centre'): 145 paramag_centre = zeros(3, float64) 146 else: 147 paramag_centre = array(cdp.paramagnetic_centre) 148 149 # Return the data structures. 150 return atomic_pos, paramag_centre
151 152
153 -def minimise_setup_pcs(sim_index=None):
154 """Set up the data structures for optimisation using PCSs as base data sets. 155 156 @keyword sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired. 157 @type sim_index: None or int 158 @return: The assembled data structures for using PCSs as the base data for optimisation. These include: 159 - the PCS values. 160 - the unit vectors connecting the paramagnetic centre (the electron spin) to 161 - the PCS weight. 162 - the nuclear spin. 163 - the pseudocontact shift constants. 164 @rtype: tuple of (numpy rank-2 array, numpy rank-2 array, numpy rank-2 array, numpy rank-1 array, numpy rank-1 array) 165 """ 166 167 # Data setup tests. 168 if not hasattr(cdp, 'paramagnetic_centre'): 169 raise RelaxError("The paramagnetic centre has not yet been specified.") 170 if not hasattr(cdp, 'temperature'): 171 raise RelaxError("The experimental temperatures have not been set.") 172 if not hasattr(cdp, 'spectrometer_frq'): 173 raise RelaxError("The spectrometer frequencies of the experiments have not been set.") 174 175 # Initialise. 176 pcs = [] 177 pcs_err = [] 178 pcs_weight = [] 179 temp = [] 180 frq = [] 181 182 # The PCS data. 183 for i in range(len(cdp.align_ids)): 184 # Alias the ID. 185 align_id = cdp.align_ids[i] 186 187 # Skip non-optimised data. 188 if not opt_uses_align_data(align_id): 189 continue 190 191 # Append empty arrays to the PCS structures. 192 pcs.append([]) 193 pcs_err.append([]) 194 pcs_weight.append([]) 195 196 # Get the temperature for the PCS constant. 197 if align_id in cdp.temperature: 198 temp.append(cdp.temperature[align_id]) 199 200 # The temperature must be given! 201 else: 202 raise RelaxError("The experimental temperature for the alignment ID '%s' has not been set." % align_id) 203 204 # Get the spectrometer frequency in Tesla units for the PCS constant. 205 if align_id in cdp.spectrometer_frq: 206 frq.append(cdp.spectrometer_frq[align_id] * 2.0 * pi / g1H) 207 208 # The frequency must be given! 209 else: 210 raise RelaxError("The spectrometer frequency for the alignment ID '%s' has not been set." % align_id) 211 212 # Spin loop over the domain. 213 j = 0 214 for spin in spin_loop(selection=domain_moving()): 215 # Skip deselected spins. 216 if not spin.select: 217 continue 218 219 # Skip spins without PCS data. 220 if not hasattr(spin, 'pcs'): 221 continue 222 223 # Append the PCSs to the list. 224 if align_id in spin.pcs.keys(): 225 if sim_index != None: 226 pcs[-1].append(spin.pcs_sim[align_id][sim_index]) 227 else: 228 pcs[-1].append(spin.pcs[align_id]) 229 else: 230 pcs[-1].append(None) 231 232 # Append the PCS errors. 233 if hasattr(spin, 'pcs_err') and align_id in spin.pcs_err.keys(): 234 pcs_err[-1].append(spin.pcs_err[align_id]) 235 else: 236 pcs_err[-1].append(None) 237 238 # Append the weight. 239 if hasattr(spin, 'pcs_weight') and align_id in spin.pcs_weight.keys(): 240 pcs_weight[-1].append(spin.pcs_weight[align_id]) 241 else: 242 pcs_weight[-1].append(1.0) 243 244 # Spin index. 245 j = j + 1 246 247 # Convert to numpy objects. 248 pcs = array(pcs, float64) 249 pcs_err = array(pcs_err, float64) 250 pcs_weight = array(pcs_weight, float64) 251 252 # Convert the PCS from ppm to no units. 253 pcs = pcs * 1e-6 254 pcs_err = pcs_err * 1e-6 255 256 # Return the data structures. 257 return pcs, pcs_err, pcs_weight, temp, frq
258 259
260 -def minimise_setup_rdcs(sim_index=None):
261 """Set up the data structures for optimisation using RDCs as base data sets. 262 263 @keyword sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired. 264 @type sim_index: None or int 265 @return: The assembled data structures for using RDCs as the base data for optimisation. These include: 266 - rdc, the RDC values. 267 - rdc_err, the RDC errors. 268 - rdc_weight, the RDC weights. 269 - vectors, the interatomic vectors. 270 - rdc_const, the dipolar constants. 271 - absolute, the absolute value flags (as 1's and 0's). 272 @rtype: tuple of (numpy rank-2 array, numpy rank-2 array, numpy rank-2 array, numpy rank-3 array, numpy rank-2 array, numpy rank-2 array) 273 """ 274 275 # Initialise. 276 rdc = [] 277 rdc_err = [] 278 rdc_weight = [] 279 unit_vect = [] 280 rdc_const = [] 281 absolute = [] 282 283 # The unit vectors and RDC constants. 284 for interatom in interatomic_loop(selection1=domain_moving()): 285 # RDC checks. 286 if not check_rdcs(interatom): 287 continue 288 289 # Get the spins. 290 spin1 = return_spin(interatom.spin_id1) 291 spin2 = return_spin(interatom.spin_id2) 292 293 # A single unit vector. 294 if interatom.vector.shape == (3,): 295 unit_vect.append(interatom.vector) 296 297 # Average multiple unit vectors. 298 else: 299 # First throw a warning to tell the user what is happening. 300 if sim_index == None: 301 warn(RelaxWarning("Averaging the %s unit vectors for the RDC for the spin pair '%s' and '%s'." % (len(interatom.vector), interatom.spin_id1, interatom.spin_id2))) 302 303 # The average position. 304 ave_vector = zeros(3, float64) 305 for i in range(len(interatom.vector)): 306 ave_vector += interatom.vector[i] 307 308 # Store. 309 unit_vect.append(ave_vector) 310 311 # Normalise (to be sure). 312 unit_vect[-1] = unit_vect[-1] / norm(unit_vect[-1]) 313 314 # Gyromagnetic ratios. 315 g1 = return_gyromagnetic_ratio(spin1.isotope) 316 g2 = return_gyromagnetic_ratio(spin2.isotope) 317 318 # Calculate the RDC dipolar constant (in Hertz, and the 3 comes from the alignment tensor), and append it to the list. 319 rdc_const.append(3.0/(2.0*pi) * dipolar_constant(g1, g2, interatom.r)) 320 321 # Fix the unit vector data structure. 322 num = None 323 for rdc_index in range(len(unit_vect)): 324 # Number of vectors. 325 if num == None: 326 if unit_vect[rdc_index] != None: 327 num = len(unit_vect[rdc_index]) 328 continue 329 330 # Check. 331 if unit_vect[rdc_index] != None and len(unit_vect[rdc_index]) != num: 332 raise RelaxError("The number of interatomic vectors for all no match:\n%s" % unit_vect) 333 334 # Missing unit vectors. 335 if num == None: 336 raise RelaxError("No interatomic vectors could be found.") 337 338 # Update None entries. 339 for i in range(len(unit_vect)): 340 if unit_vect[i] == None: 341 unit_vect[i] = [[None, None, None]]*num 342 343 # The RDC data. 344 for i in range(len(cdp.align_ids)): 345 # Alias the ID. 346 align_id = cdp.align_ids[i] 347 348 # Skip non-optimised data. 349 if not opt_uses_align_data(align_id): 350 continue 351 352 # Append empty arrays to the RDC structures. 353 rdc.append([]) 354 rdc_err.append([]) 355 rdc_weight.append([]) 356 absolute.append([]) 357 358 # Interatom loop over the domain. 359 for interatom in interatomic_loop(domain_moving()): 360 # Get the spins. 361 spin1 = return_spin(interatom.spin_id1) 362 spin2 = return_spin(interatom.spin_id2) 363 364 # Skip deselected spins. 365 if not spin1.select or not spin2.select: 366 continue 367 368 # Only use interatomic data containers with RDC and vector data. 369 if not hasattr(interatom, 'rdc') or not hasattr(interatom, 'vector'): 370 continue 371 372 # Defaults of None. 373 value = None 374 error = None 375 376 # Pseudo-atom set up. 377 if (hasattr(spin1, 'members') or hasattr(spin2, 'members')) and align_id in interatom.rdc.keys(): 378 raise RelaxError("Psuedo-atoms are currently not supported for the frame order analysis.") 379 380 # Normal set up. 381 elif align_id in interatom.rdc.keys(): 382 # The RDC. 383 if sim_index != None: 384 value = interatom.rdc_sim[align_id][sim_index] 385 else: 386 value = interatom.rdc[align_id] 387 388 # The error. 389 if hasattr(interatom, 'rdc_err') and align_id in interatom.rdc_err.keys(): 390 error = interatom.rdc_err[align_id] 391 392 # Append the RDCs to the list. 393 rdc[-1].append(value) 394 395 # Append the RDC errors. 396 rdc_err[-1].append(error) 397 398 # Append the weight. 399 if hasattr(interatom, 'rdc_weight') and align_id in interatom.rdc_weight.keys(): 400 rdc_weight[-1].append(interatom.rdc_weight[align_id]) 401 else: 402 rdc_weight[-1].append(1.0) 403 404 # Append the absolute value flag. 405 if hasattr(interatom, 'absolute_rdc') and align_id in interatom.absolute_rdc.keys(): 406 absolute[-1].append(interatom.absolute_rdc[align_id]) 407 else: 408 absolute[-1].append(False) 409 410 # Convert to numpy objects. 411 rdc = array(rdc, float64) 412 rdc_err = array(rdc_err, float64) 413 rdc_weight = array(rdc_weight, float64) 414 unit_vect = array(unit_vect, float64) 415 rdc_const = array(rdc_const, float64) 416 absolute = array(absolute, float64) 417 418 # Return the data structures. 419 return rdc, rdc_err, rdc_weight, unit_vect, rdc_const, absolute
420 421
422 -def minimise_setup_tensors(sim_index=None):
423 """Set up the data structures for optimisation using alignment tensors as base data sets. 424 425 @keyword sim_index: The simulation index. This should be None if normal optimisation is desired. 426 @type sim_index: None or int 427 @return: The assembled data structures for using alignment tensors as the base data for optimisation. These include: 428 - full_tensors, the full tensors as concatenated arrays. 429 - full_err, the full tensor errors as concatenated arrays. 430 - full_in_ref_frame, the flags specifying if the tensor is the full or reduced tensor in the non-moving reference domain. 431 @rtype: tuple of 3 numpy nx5D, rank-1 arrays 432 """ 433 434 # Checks. 435 if not hasattr(cdp, 'ref_domain'): 436 raise RelaxError("The reference domain has not been set up.") 437 if not hasattr(cdp.align_tensors, 'reduction'): 438 raise RelaxError("The tensor reductions have not been specified.") 439 for i, tensor in tensor_loop(): 440 if not hasattr(tensor, 'domain'): 441 raise RelaxError("The domain that the '%s' tensor is attached to has not been set" % tensor.name) 442 443 # Initialise. 444 n = len(cdp.align_tensors.reduction) 445 full_tensors = zeros(n*5, float64) 446 full_err = ones(n*5, float64) * 1e-5 447 full_in_ref_frame = zeros(n, float64) 448 449 # Loop over the full tensors. 450 for i, tensor in tensor_loop(red=False): 451 # The full tensor. 452 full_tensors[5*i + 0] = tensor.Axx 453 full_tensors[5*i + 1] = tensor.Ayy 454 full_tensors[5*i + 2] = tensor.Axy 455 full_tensors[5*i + 3] = tensor.Axz 456 full_tensors[5*i + 4] = tensor.Ayz 457 458 # The full tensor corresponds to the frame of reference. 459 if cdp.ref_domain == tensor.domain: 460 full_in_ref_frame[i] = 1 461 462 # The full tensor errors. 463 if hasattr(tensor, 'Axx_err'): 464 full_err[5*i + 0] = tensor.Axx_err 465 full_err[5*i + 1] = tensor.Ayy_err 466 full_err[5*i + 2] = tensor.Axy_err 467 full_err[5*i + 3] = tensor.Axz_err 468 full_err[5*i + 4] = tensor.Ayz_err 469 470 # Return the data structures. 471 return full_tensors, full_err, full_in_ref_frame
472 473
474 -def opt_uses_align_data(align_id=None):
475 """Determine if the PCS or RDC data for the given alignment ID is needed for optimisation. 476 477 @keyword align_id: The optional alignment ID string. 478 @type align_id: str 479 @return: True if alignment data is to be used for optimisation, False otherwise. 480 @rtype: bool 481 """ 482 483 # No alignment IDs. 484 if not hasattr(cdp, 'align_ids'): 485 return False 486 487 # Convert the align IDs to an array, or take all IDs. 488 if align_id: 489 align_ids = [align_id] 490 else: 491 align_ids = cdp.align_ids 492 493 # Check the PCS and RDC. 494 for align_id in align_ids: 495 if opt_uses_pcs(align_id) or opt_uses_rdc(align_id): 496 return True 497 498 # No alignment data is used for optimisation. 499 return False
500 501
502 -def opt_uses_pcs(align_id):
503 """Determine if the PCS data for the given alignment ID is needed for optimisation. 504 505 @param align_id: The alignment ID string. 506 @type align_id: str 507 @return: True if the PCS data is to be used for optimisation, False otherwise. 508 @rtype: bool 509 """ 510 511 # No alignment IDs. 512 if not hasattr(cdp, 'pcs_ids'): 513 return False 514 515 # No PCS data for the alignment. 516 if align_id not in cdp.pcs_ids: 517 return False 518 519 # The PCS data is to be used for optimisation. 520 return True
521 522
523 -def opt_uses_rdc(align_id):
524 """Determine if the RDC data for the given alignment ID is needed for optimisation. 525 526 @param align_id: The alignment ID string. 527 @type align_id: str 528 @return: True if the RDC data is to be used for optimisation, False otherwise. 529 @rtype: bool 530 """ 531 532 # No alignment IDs. 533 if not hasattr(cdp, 'rdc_ids'): 534 return False 535 536 # No RDC data for the alignment. 537 if align_id not in cdp.rdc_ids: 538 return False 539 540 # The RDC data is to be used for optimisation. 541 return True
542 543
544 -def store_bc_data(target_fn):
545 """Store the back-calculated data. 546 547 @param target_fn: The frame-order target function class. 548 @type target_fn: class instance 549 """ 550 551 # Loop over the reduced tensors. 552 for i, tensor in tensor_loop(red=True): 553 # Store the values. 554 tensor.set(param='Axx', value=target_fn.A_5D_bc[5*i + 0]) 555 tensor.set(param='Ayy', value=target_fn.A_5D_bc[5*i + 1]) 556 tensor.set(param='Axy', value=target_fn.A_5D_bc[5*i + 2]) 557 tensor.set(param='Axz', value=target_fn.A_5D_bc[5*i + 3]) 558 tensor.set(param='Ayz', value=target_fn.A_5D_bc[5*i + 4]) 559 560 # The RDC data. 561 for i in range(len(cdp.align_ids)): 562 # The alignment ID. 563 align_id = cdp.align_ids[i] 564 565 # Data flags 566 rdc_flag = False 567 if hasattr(cdp, 'rdc_ids') and align_id in cdp.rdc_ids: 568 rdc_flag = True 569 pcs_flag = False 570 if hasattr(cdp, 'pcs_ids') and align_id in cdp.pcs_ids: 571 pcs_flag = True 572 573 # Spin loop over the domain. 574 pcs_index = 0 575 for spin in spin_loop(domain_moving()): 576 # Skip deselected spins. 577 if not spin.select: 578 continue 579 580 # Spins with PCS data. 581 if pcs_flag and hasattr(spin, 'pcs'): 582 # Initialise the data structure. 583 if not hasattr(spin, 'pcs_bc'): 584 spin.pcs_bc = {} 585 586 # Store the back-calculated value (in ppm). 587 spin.pcs_bc[align_id] = target_fn.pcs_theta[i, pcs_index] * 1e6 588 589 # Increment the index. 590 pcs_index += 1 591 592 # Interatomic data container loop. 593 rdc_index = 0 594 for interatom in interatomic_loop(domain_moving()): 595 # RDC checks. 596 if not check_rdcs(interatom): 597 continue 598 599 # Initialise the data structure. 600 if not hasattr(interatom, 'rdc_bc'): 601 interatom.rdc_bc = {} 602 603 # Store the back-calculated value. 604 interatom.rdc_bc[align_id] = target_fn.rdc_theta[i, rdc_index] 605 606 # Increment the index. 607 rdc_index += 1
608 609
610 -def target_fn_setup(sim_index=None, verbosity=1, scaling=True):
611 """Initialise the target function for optimisation or direct calculation. 612 613 @param sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired. 614 @type sim_index: None or int 615 @keyword verbosity: The amount of information to print. The higher the value, the greater the verbosity. 616 @type verbosity: int 617 @param scaling: If True, diagonal scaling is enabled during optimisation to allow the problem to be better conditioned. 618 @type scaling: bool 619 """ 620 621 # Check for the average domain displacement information. 622 check_ave_domain_setup() 623 624 # Assemble the parameter vector. 625 param_vector = assemble_param_vector(sim_index=sim_index) 626 627 # Determine the base data types (RDCs and/or PCSs). 628 data_types = base_data_types() 629 630 # Diagonal scaling. 631 scaling_matrix = None 632 if len(param_vector): 633 scaling_matrix = assemble_scaling_matrix(scaling=scaling) 634 param_vector = dot(inv(scaling_matrix), param_vector) 635 636 # Get the data structures for optimisation using the tensors as base data sets. 637 full_tensors, full_tensor_err, full_in_ref_frame = minimise_setup_tensors(sim_index) 638 639 # Get the data structures for optimisation using PCSs as base data sets. 640 pcs, pcs_err, pcs_weight, temp, frq = None, None, None, None, None 641 if 'pcs' in data_types: 642 pcs, pcs_err, pcs_weight, temp, frq = minimise_setup_pcs(sim_index=sim_index) 643 644 # Get the data structures for optimisation using RDCs as base data sets. 645 rdcs, rdc_err, rdc_weight, rdc_vect, rdc_const, absolute_rdc = None, None, None, None, None, None 646 if 'rdc' in data_types: 647 rdcs, rdc_err, rdc_weight, rdc_vect, rdc_const, absolute_rdc = minimise_setup_rdcs(sim_index=sim_index) 648 649 # Data checks. 650 if pcs != None and not len(pcs): 651 raise RelaxNoPCSError 652 if rdcs != None and not len(rdcs): 653 raise RelaxNoRDCError 654 655 # Get the atomic_positions. 656 atomic_pos, paramag_centre = None, None 657 if 'pcs' in data_types or 'pre' in data_types: 658 atomic_pos, paramag_centre = minimise_setup_atomic_pos(sim_index=sim_index) 659 660 # Average domain translation. 661 translation_opt = False 662 if not translation_fixed(): 663 translation_opt = True 664 665 # The fixed pivot point. 666 pivot = None 667 if hasattr(cdp, 'pivot_x'): 668 pivot = array([cdp.pivot_x, cdp.pivot_y, cdp.pivot_z]) 669 670 # The second pivot. 671 pivot2 = None 672 if hasattr(cdp, 'pivot_x_2'): 673 pivot2 = array([cdp.pivot_x_2, cdp.pivot_y_2, cdp.pivot_z_2]) 674 675 # Pivot optimisation. 676 pivot_opt = True 677 if pivot_fixed(): 678 pivot_opt = False 679 680 # The number of integration points. 681 if not hasattr(cdp, 'num_int_pts'): 682 cdp.num_int_pts = 200000 683 684 # The centre of mass, for use in the rotor models. 685 com = None 686 if cdp.model in ['rotor', 'double rotor']: 687 # The centre of mass of all objects in the data pipe. 688 com = pipe_centre_of_mass(verbosity=0) 689 com = array(com, float64) 690 691 # Printout. 692 if verbosity: 693 print("The centre of mass reference coordinate for the rotor models is at:\n %s" % list(com)) 694 695 # The centre of mass of the moving domain - to use as the centroid for the average domain position rotation. 696 if cdp.ave_pos_pivot == 'com': 697 ave_pos_pivot = pipe_centre_of_mass(atom_id=domain_moving(), verbosity=0) 698 ave_pos_piv_sync = False 699 700 # The centre of mass of the moving domain - to use as the centroid for the average domain position rotation. 701 if cdp.ave_pos_pivot == 'motional': 702 ave_pos_pivot = pivot 703 ave_pos_piv_sync = True 704 705 # Print outs. 706 if sim_index == None: 707 if cdp.model != 'rigid': 708 if cdp.quad_int: 709 sys.stdout.write("Numerical integration via Scipy quadratic integration.\n") 710 else: 711 sys.stdout.write("Numerical integration via the quasi-random Sobol' sequence.\n") 712 sys.stdout.write("Number of integration points: %s\n" % cdp.num_int_pts) 713 base_data = [] 714 if rdcs != None and len(rdcs): 715 base_data.append("RDCs") 716 if pcs != None and len(pcs): 717 base_data.append("PCSs") 718 sys.stdout.write("Base data: %s\n" % repr(base_data)) 719 720 # Set up the optimisation function. 721 target = frame_order.Frame_order(model=cdp.model, init_params=param_vector, full_tensors=full_tensors, full_in_ref_frame=full_in_ref_frame, rdcs=rdcs, rdc_errors=rdc_err, rdc_weights=rdc_weight, rdc_vect=rdc_vect, dip_const=rdc_const, pcs=pcs, pcs_errors=pcs_err, pcs_weights=pcs_weight, atomic_pos=atomic_pos, temp=temp, frq=frq, paramag_centre=paramag_centre, scaling_matrix=scaling_matrix, com=com, ave_pos_pivot=ave_pos_pivot, ave_pos_piv_sync=ave_pos_piv_sync, translation_opt=translation_opt, pivot=pivot, pivot2=pivot2, pivot_opt=pivot_opt, num_int_pts=cdp.num_int_pts, quad_int=cdp.quad_int) 722 723 # Return the data. 724 return target, param_vector, scaling_matrix
725 726
727 -def unpack_opt_results(results, scaling=False, scaling_matrix=None, sim_index=None):
728 """Unpack and store the Frame Order optimisation results. 729 730 @param results: The results tuple returned by the minfx generic_minimise() function. 731 @type results: tuple 732 @keyword scaling: If True, diagonal scaling is enabled during optimisation to allow the problem to be better conditioned. 733 @type scaling: bool 734 @keyword scaling_matrix: The scaling matrix. 735 @type scaling_matrix: numpy rank-2 array 736 @keyword sim_index: The index of the simulation to optimise. This should be None for normal optimisation. 737 @type sim_index: None or int 738 """ 739 740 # Disassemble the results. 741 if len(results) == 4: # Grid search. 742 param_vector, func, iter_count, warning = results 743 f_count = iter_count 744 g_count = 0.0 745 h_count = 0.0 746 else: 747 param_vector, func, iter_count, f_count, g_count, h_count, warning = results 748 749 # Catch infinite chi-squared values. 750 if isInf(func): 751 raise RelaxInfError('chi-squared') 752 753 # Catch chi-squared values of NaN. 754 if isNaN(func): 755 raise RelaxNaNError('chi-squared') 756 757 # Scaling. 758 if scaling: 759 param_vector = dot(scaling_matrix, param_vector) 760 761 # The parameters to wrap. 762 wrap = [ 763 'ave_pos_alpha', 764 'ave_pos_beta', 765 'ave_pos_gamma', 766 'eigen_alpha', 767 'eigen_beta', 768 'eigen_gamma', 769 'axis_theta', 770 'axis_phi' 771 ] 772 773 # Monte Carlo simulation data structures. 774 if sim_index != None: 775 # Loop over the parameters. 776 for i in range(len(cdp.params)): 777 # Angle wrapping around the real value. 778 if cdp.params[i] in wrap or cdp.params[i] == 'axis_alpha': 779 val = getattr(cdp, cdp.params[i]) 780 param_vector[i] = wrap_angles(param_vector[i], val-pi, val+pi) 781 782 # FIXME: Implement linear constraints via the log-barrier algorithm, then delete this. 783 # Handle negative values of the cone_sigma_max parameter. 784 if cdp.params[i] == 'cone_sigma_max': 785 param_vector[i] = abs(param_vector[i]) 786 787 # Store the value. 788 obj = getattr(cdp, cdp.params[i]+'_sim') 789 obj[sim_index] = param_vector[i] 790 791 # Order parameter to angle conversion. 792 if cdp.params[i] == 'cone_s1': 793 cdp.cone_theta_sim[sim_index] = order_parameters.iso_cone_S_to_theta(param_vector[i]) 794 795 # Optimisation stats. 796 cdp.chi2_sim[sim_index] = func 797 cdp.iter_sim[sim_index] = iter_count 798 cdp.f_count_sim[sim_index] = f_count 799 cdp.g_count_sim[sim_index] = g_count 800 cdp.h_count_sim[sim_index] = h_count 801 cdp.warning_sim[sim_index] = warning 802 803 # Normal data structures. 804 else: 805 # Loop over the parameters. 806 for i in range(len(cdp.params)): 807 # Angle wrapping. 808 if cdp.params[i] in wrap: 809 param_vector[i] = wrap_angles(param_vector[i], 0.0, 2.0*pi) 810 if cdp.params[i] == 'axis_alpha': 811 param_vector[i] = wrap_angles(param_vector[i], -pi, pi) 812 813 # FIXME: Implement linear constraints via the log-barrier algorithm, then delete this. 814 # Handle negative values of the cone_sigma_max parameter. 815 if cdp.params[i] == 'cone_sigma_max': 816 param_vector[i] = abs(param_vector[i]) 817 818 # Store the value. 819 setattr(cdp, cdp.params[i], param_vector[i]) 820 821 # Order parameter to angle conversion. 822 if cdp.params[i] == 'cone_s1': 823 cdp.cone_theta = order_parameters.iso_cone_S_to_theta(param_vector[i]) 824 825 # Optimisation stats. 826 cdp.chi2 = func 827 cdp.iter = iter_count 828 cdp.f_count = f_count 829 cdp.g_count = g_count 830 cdp.h_count = h_count 831 cdp.warning = warning
832