1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23 """The N-state model or structural ensemble analysis."""
24
25
26 __all__ = [
27 'data',
28 'parameters'
29 ]
30
31
32 from copy import deepcopy
33 from math import acos, cos, pi
34 from minfx.generic import generic_minimise
35 from minfx.grid import grid
36 from numpy import array, dot, float64, ones, zeros
37 from numpy.linalg import inv, norm
38 from re import search
39 from warnings import warn
40
41
42 import lib.arg_check
43 from lib.errors import RelaxError, RelaxInfError, RelaxNaNError, RelaxNoModelError, RelaxNoValueError, RelaxSpinTypeError
44 from lib.float import isNaN, isInf
45 from lib.geometry.rotations import euler_to_R_zyz, two_vect_to_R
46 from lib.io import open_write_file
47 from lib.structure.cones import Iso_cone
48 from lib.structure.represent.cone import cone_edge, stitch_cone_to_edge
49 from lib.structure.internal.object import Internal
50 from lib.warnings import RelaxWarning
51 from pipe_control import align_tensor, pcs, pipes, rdc
52 from pipe_control.align_tensor import opt_uses_align_data, opt_uses_tensor
53 from pipe_control.interatomic import interatomic_loop
54 from pipe_control.mol_res_spin import return_spin, spin_loop
55 from pipe_control.pcs import return_pcs_data
56 from pipe_control.rdc import check_rdcs, return_rdc_data
57 from pipe_control.structure import geometric
58 from pipe_control.structure.mass import centre_of_mass
59 from specific_analyses.api_base import API_base
60 from specific_analyses.api_common import API_common
61 from specific_analyses.n_state_model.data import base_data_types, calc_ave_dist, num_data_points, tensor_loop
62 from specific_analyses.n_state_model.parameters import assemble_param_vector, assemble_scaling_matrix, disassemble_param_vector, linear_constraints, param_model_index, param_num, update_model
63 from target_functions.n_state_model import N_state_opt
64 from target_functions.potential import quad_pot
65 from user_functions.data import Uf_tables; uf_tables = Uf_tables()
66 from user_functions.objects import Desc_container
67
68
70 """Class containing functions for the N-state model."""
71
91
92
177
178
179 - def _cone_pdb(self, cone_type=None, scale=1.0, file=None, dir=None, force=False):
180 """Create a PDB file containing a geometric object representing the various cone models.
181
182 Currently the only cone types supported are 'diff in cone' and 'diff on cone'.
183
184
185 @param cone_type: The type of cone model to represent.
186 @type cone_type: str
187 @param scale: The size of the geometric object is eqaul to the average pivot-CoM
188 vector length multiplied by this scaling factor.
189 @type scale: float
190 @param file: The name of the PDB file to create.
191 @type file: str
192 @param dir: The name of the directory to place the PDB file into.
193 @type dir: str
194 @param force: Flag which if set to True will cause any pre-existing file to be
195 overwritten.
196 @type force: int
197 """
198
199
200 if cone_type == 'diff in cone':
201 if not hasattr(cdp, 'S_diff_in_cone'):
202 raise RelaxError("The diffusion in a cone model has not yet been determined.")
203 elif cone_type == 'diff on cone':
204 if not hasattr(cdp, 'S_diff_on_cone'):
205 raise RelaxError("The diffusion on a cone model has not yet been determined.")
206 else:
207 raise RelaxError("The cone type " + repr(cone_type) + " is unknown.")
208
209
210 inc = 20
211
212
213 R = zeros((3, 3), float64)
214 two_vect_to_R(array([0, 0, 1], float64), cdp.ave_pivot_CoM/norm(cdp.ave_pivot_CoM), R)
215
216
217 if cone_type == 'diff in cone':
218 angle = cdp.theta_diff_in_cone
219 elif cone_type == 'diff on cone':
220 angle = cdp.theta_diff_on_cone
221 cone = Iso_cone(angle)
222
223
224 structure = Internal()
225
226
227 structure.add_molecule(name='cone')
228
229
230 mol = structure.structural_data[0].mol[0]
231
232
233 mol.atom_add(pdb_record='HETATM', atom_num=1, atom_name='R', res_name='PIV', res_num=1, pos=cdp.pivot_point, element='C')
234
235
236 print("\nGenerating the average pivot-CoM vectors.")
237 sim_vectors = None
238 if hasattr(cdp, 'ave_pivot_CoM_sim'):
239 sim_vectors = cdp.ave_pivot_CoM_sim
240 res_num = geometric.generate_vector_residues(mol=mol, vector=cdp.ave_pivot_CoM, atom_name='Ave', res_name_vect='AVE', sim_vectors=sim_vectors, res_num=2, origin=cdp.pivot_point, scale=scale)
241
242
243 print("\nGenerating the cone outer edge.")
244 cap_start_atom = mol.atom_num[-1]+1
245 cone_edge(mol=mol, cone=cone, res_name='CON', res_num=3, apex=cdp.pivot_point, R=R, scale=norm(cdp.pivot_CoM), inc=inc)
246
247
248 if cone_type == 'diff in cone':
249 print("\nGenerating the cone cap.")
250 cone_start_atom = mol.atom_num[-1]+1
251 geometric.generate_vector_dist(mol=mol, res_name='CON', res_num=3, centre=cdp.pivot_point, R=R, limit_check=cone.limit_check, scale=norm(cdp.pivot_CoM), inc=inc)
252 stitch_cone_to_edge(mol=mol, cone=cone, dome_start=cone_start_atom, edge_start=cap_start_atom+1, inc=inc)
253
254
255 print("\nGenerating the PDB file.")
256 pdb_file = open_write_file(file, dir, force=force)
257 structure.write_pdb(pdb_file)
258 pdb_file.close()
259
260
262 """Extract and unpack the back calculated data.
263
264 @param model: The instantiated class containing the target function.
265 @type model: class instance
266 """
267
268
269 if not hasattr(cdp, 'align_tensors'):
270 return
271
272
273 align_index = 0
274 for i in range(len(cdp.align_ids)):
275
276 if not opt_uses_tensor(cdp.align_tensors[i]):
277 continue
278
279
280 align_id = cdp.align_ids[i]
281
282
283 rdc_flag = False
284 if hasattr(cdp, 'rdc_ids') and align_id in cdp.rdc_ids:
285 rdc_flag = True
286 pcs_flag = False
287 if hasattr(cdp, 'pcs_ids') and align_id in cdp.pcs_ids:
288 pcs_flag = True
289
290
291 pcs_index = 0
292 for spin in spin_loop():
293
294 if not spin.select:
295 continue
296
297
298 if pcs_flag and hasattr(spin, 'pcs'):
299
300 if not hasattr(spin, 'pcs_bc'):
301 spin.pcs_bc = {}
302
303
304 spin.pcs_bc[align_id] = model.deltaij_theta[align_index, pcs_index] * 1e6
305
306
307 pcs_index = pcs_index + 1
308
309
310 rdc_index = 0
311 for interatom in interatomic_loop():
312
313 spin1 = return_spin(interatom.spin_id1)
314 spin2 = return_spin(interatom.spin_id2)
315
316
317 if not check_rdcs(interatom):
318 continue
319
320
321 if rdc_flag and hasattr(interatom, 'rdc'):
322
323 if not hasattr(interatom, 'rdc_bc'):
324 interatom.rdc_bc = {}
325
326
327 interatom.rdc_bc[align_id] = model.rdc_theta[align_index, rdc_index]
328
329
330 rdc_index = rdc_index + 1
331
332
333 align_index += 1
334
335
337 """Set up the atomic position data structures for optimisation using PCSs and PREs as base data sets.
338
339 @keyword sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired.
340 @type sim_index: None or int
341 @return: The atomic positions (the first index is the spins, the second is the structures, and the third is the atomic coordinates) and the paramagnetic centre.
342 @rtype: numpy rank-3 array, numpy rank-1 array.
343 """
344
345
346 atomic_pos = []
347
348
349 for spin in spin_loop():
350
351 if not spin.select:
352 continue
353
354
355 if not hasattr(spin, 'pcs') and not hasattr(spin, 'pre'):
356 continue
357
358
359 if type(spin.pos[0]) in [float, float64]:
360 atomic_pos.append([spin.pos])
361 else:
362 atomic_pos.append(spin.pos)
363
364
365 atomic_pos = array(atomic_pos, float64)
366
367
368 if not hasattr(cdp, 'paramagnetic_centre'):
369 paramag_centre = zeros(3, float64)
370 elif sim_index != None and not cdp.paramag_centre_fixed:
371 if not hasattr(cdp, 'paramagnetic_centre_sim') or cdp.paramagnetic_centre_sim[sim_index] == None:
372 paramag_centre = zeros(3, float64)
373 else:
374 paramag_centre = array(cdp.paramagnetic_centre_sim[sim_index])
375 else:
376 paramag_centre = array(cdp.paramagnetic_centre)
377
378
379 return atomic_pos, paramag_centre
380
381
383 """Set up the data structures for optimisation using alignment tensors as base data sets.
384
385 @keyword sim_index: The index of the simulation to optimise. This should be None if
386 normal optimisation is desired.
387 @type sim_index: None or int
388 @return: The assembled data structures for using alignment tensors as the base
389 data for optimisation. These include:
390 - full_tensors, the data of the full alignment tensors.
391 - red_tensor_elem, the tensors as concatenated rank-1 5D arrays.
392 - red_tensor_err, the tensor errors as concatenated rank-1 5D
393 arrays.
394 - full_in_ref_frame, flags specifying if the tensor in the reference
395 frame is the full or reduced tensor.
396 @rtype: tuple of (list, numpy rank-1 array, numpy rank-1 array, numpy rank-1
397 array)
398 """
399
400
401 n = len(cdp.align_tensors.reduction)
402 full_tensors = zeros(n*5, float64)
403 red_tensors = zeros(n*5, float64)
404 red_err = ones(n*5, float64) * 1e-5
405 full_in_ref_frame = zeros(n, float64)
406
407
408 for i, tensor in tensor_loop(red=False):
409
410 full_tensors[5*i + 0] = tensor.Axx
411 full_tensors[5*i + 1] = tensor.Ayy
412 full_tensors[5*i + 2] = tensor.Axy
413 full_tensors[5*i + 3] = tensor.Axz
414 full_tensors[5*i + 4] = tensor.Ayz
415
416
417 if cdp.ref_domain == tensor.domain:
418 full_in_ref_frame[i] = 1
419
420
421 for i, tensor in tensor_loop(red=True):
422
423 if sim_index != None:
424 red_tensors[5*i + 0] = tensor.Axx_sim[sim_index]
425 red_tensors[5*i + 1] = tensor.Ayy_sim[sim_index]
426 red_tensors[5*i + 2] = tensor.Axy_sim[sim_index]
427 red_tensors[5*i + 3] = tensor.Axz_sim[sim_index]
428 red_tensors[5*i + 4] = tensor.Ayz_sim[sim_index]
429
430
431 else:
432 red_tensors[5*i + 0] = tensor.Axx
433 red_tensors[5*i + 1] = tensor.Ayy
434 red_tensors[5*i + 2] = tensor.Axy
435 red_tensors[5*i + 3] = tensor.Axz
436 red_tensors[5*i + 4] = tensor.Ayz
437
438
439 if hasattr(tensor, 'Axx_err'):
440 red_err[5*i + 0] = tensor.Axx_err
441 red_err[5*i + 1] = tensor.Ayy_err
442 red_err[5*i + 2] = tensor.Axy_err
443 red_err[5*i + 3] = tensor.Axz_err
444 red_err[5*i + 4] = tensor.Ayz_err
445
446
447 return full_tensors, red_tensors, red_err, full_in_ref_frame
448
449
451 """Set up the data structures for the fixed alignment tensors.
452
453 @return: The assembled data structures for the fixed alignment tensors.
454 @rtype: numpy rank-1 array.
455 """
456
457
458 n = align_tensor.num_tensors(skip_fixed=False) - align_tensor.num_tensors(skip_fixed=True)
459 tensors = zeros(n*5, float64)
460
461
462 if n == 0:
463 return None
464
465
466 index = 0
467 for i in range(len(cdp.align_tensors)):
468
469 if not opt_uses_align_data(cdp.align_tensors[i].name):
470 continue
471
472
473 tensors[5*index + 0] = cdp.align_tensors[i].Axx
474 tensors[5*index + 1] = cdp.align_tensors[i].Ayy
475 tensors[5*index + 2] = cdp.align_tensors[i].Axy
476 tensors[5*index + 3] = cdp.align_tensors[i].Axz
477 tensors[5*index + 4] = cdp.align_tensors[i].Ayz
478
479
480 index += 1
481
482
483 return tensors
484
485
487 """Initialise the target function for optimisation or direct calculation.
488
489 @param sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired.
490 @type sim_index: None or int
491 @param scaling: If True, diagonal scaling is enabled during optimisation to allow the problem to be better conditioned.
492 @type scaling: bool
493 """
494
495
496 if not hasattr(cdp, 'model'):
497 raise RelaxNoModelError('N-state')
498
499
500 if cdp.model == '2-domain':
501
502 if not hasattr(cdp, 'N'):
503 raise RelaxError("The number of states has not been set.")
504
505
506 if not hasattr(cdp, 'ref_domain'):
507 raise RelaxError("The reference domain has not been set.")
508
509
510 update_model()
511
512
513 param_vector = assemble_param_vector(sim_index=sim_index)
514
515
516 data_types = base_data_types()
517
518
519 probs = None
520 if hasattr(cdp, 'probs') and len(cdp.probs) and cdp.probs[0] != None:
521 probs = cdp.probs
522
523
524 scaling_matrix = None
525 if len(param_vector):
526 scaling_matrix = assemble_scaling_matrix(data_types=data_types, scaling=scaling)
527 param_vector = dot(inv(scaling_matrix), param_vector)
528
529
530 full_tensors, red_tensor_elem, red_tensor_err, full_in_ref_frame = None, None, None, None
531 if 'tensor' in data_types:
532 full_tensors, red_tensor_elem, red_tensor_err, full_in_ref_frame = self._minimise_setup_tensors(sim_index=sim_index)
533
534
535 pcs, pcs_err, pcs_weight, temp, frq, pcs_pseudo_flags = None, None, None, None, None, None
536 if 'pcs' in data_types:
537 pcs, pcs_err, pcs_weight, temp, frq, pcs_pseudo_flags = return_pcs_data(sim_index=sim_index)
538
539
540 rdcs, rdc_err, rdc_weight, rdc_vector, rdc_dj, absolute_rdc, T_flags, j_couplings, rdc_pseudo_flags = None, None, None, None, None, None, None, None, None
541 if 'rdc' in data_types:
542 rdcs, rdc_err, rdc_weight, rdc_vector, rdc_dj, absolute_rdc, T_flags, j_couplings, rdc_pseudo_flags = return_rdc_data(sim_index=sim_index)
543
544
545 fixed_tensors = None
546 if 'rdc' in data_types or 'pcs' in data_types:
547 full_tensors = self._minimise_setup_fixed_tensors()
548
549
550 fixed_tensors = []
551 for i in range(len(cdp.align_tensors)):
552
553 if not opt_uses_align_data(cdp.align_tensors[i].name):
554 continue
555
556 if cdp.align_tensors[i].fixed:
557 fixed_tensors.append(True)
558 else:
559 fixed_tensors.append(False)
560
561
562 atomic_pos, paramag_centre, centre_fixed = None, None, True
563 if 'pcs' in data_types or 'pre' in data_types:
564 atomic_pos, paramag_centre = self._minimise_setup_atomic_pos(sim_index=sim_index)
565
566
567 if hasattr(cdp, 'paramag_centre_fixed'):
568 centre_fixed = cdp.paramag_centre_fixed
569
570
571 model = N_state_opt(model=cdp.model, N=cdp.N, init_params=param_vector, probs=probs, full_tensors=full_tensors, red_data=red_tensor_elem, red_errors=red_tensor_err, full_in_ref_frame=full_in_ref_frame, fixed_tensors=fixed_tensors, pcs=pcs, rdcs=rdcs, pcs_errors=pcs_err, rdc_errors=rdc_err, T_flags=T_flags, j_couplings=j_couplings, rdc_pseudo_flags=rdc_pseudo_flags, pcs_pseudo_flags=pcs_pseudo_flags, pcs_weights=pcs_weight, rdc_weights=rdc_weight, rdc_vect=rdc_vector, temp=temp, frq=frq, dip_const=rdc_dj, absolute_rdc=absolute_rdc, atomic_pos=atomic_pos, paramag_centre=paramag_centre, scaling_matrix=scaling_matrix, centre_fixed=centre_fixed)
572
573
574 return model, param_vector, data_types, scaling_matrix
575
576
578 """Loop over the base data of the spins - RDCs, PCSs, and NOESY data.
579
580 This loop iterates for each data point (RDC, PCS, NOESY) for each spin or interatomic data container, returning the identification information.
581
582 @return: A list of the spin or interatomic data container, the data type ('rdc', 'pcs', 'noesy'), and the alignment ID if required.
583 @rtype: list of [SpinContainer instance, str, str] or [InteratomContainer instance, str, str]
584 """
585
586
587 for interatom in interatomic_loop():
588
589 if not interatom.select:
590 continue
591
592
593 data = [interatom, None, None]
594
595
596 if hasattr(interatom, 'rdc'):
597 data[1] = 'rdc'
598
599
600 for id in cdp.rdc_ids:
601
602 data[2] = id
603
604
605 yield data
606
607
608 if hasattr(interatom, 'noesy'):
609 data[1] = 'noesy'
610
611
612 for id in cdp.noesy_ids:
613
614 data[2] = id
615
616
617 yield data
618
619
620 for spin in spin_loop():
621
622 if not spin.select:
623 continue
624
625
626 data = [spin, None, None]
627
628
629 if hasattr(spin, 'pcs'):
630 data[1] = 'pcs'
631
632
633 for id in cdp.pcs_ids:
634
635 data[2] = id
636
637
638 yield data
639
640
641 - def calculate(self, spin_id=None, verbosity=1, sim_index=None):
642 """Calculation function.
643
644 Currently this function simply calculates the NOESY flat-bottom quadratic energy potential,
645 if NOE restraints are available.
646
647 @keyword spin_id: The spin identification string (unused).
648 @type spin_id: None or str
649 @keyword verbosity: The amount of information to print. The higher the value, the greater the verbosity.
650 @type verbosity: int
651 @keyword sim_index: The MC simulation index (unused).
652 @type sim_index: None
653 """
654
655
656 model, param_vector, data_types, scaling_matrix = self._target_fn_setup()
657
658
659 if model:
660
661 chi2 = model.func(param_vector)
662
663
664 cdp.chi2 = chi2
665
666
667 self._minimise_bc_data(model)
668
669
670 if 'rdc' in data_types:
671 rdc.q_factors()
672
673
674 if 'pcs' in data_types:
675 pcs.q_factors()
676
677
678 if hasattr(cdp, 'noe_restraints'):
679
680 num_restraints = len(cdp.noe_restraints)
681 dist = zeros(num_restraints, float64)
682 pot = zeros(num_restraints, float64)
683 lower = zeros(num_restraints, float64)
684 upper = zeros(num_restraints, float64)
685
686
687 for i in range(num_restraints):
688
689 lower[i] = cdp.noe_restraints[i][2]
690 upper[i] = cdp.noe_restraints[i][3]
691
692
693 dist[i] = calc_ave_dist(cdp.noe_restraints[i][0], cdp.noe_restraints[i][1], exp=-6)
694
695
696 quad_pot(dist, pot, lower, upper)
697
698
699 cdp.ave_dist = []
700 cdp.quad_pot = []
701 for i in range(num_restraints):
702 cdp.ave_dist.append([cdp.noe_restraints[i][0], cdp.noe_restraints[i][1], dist[i]])
703 cdp.quad_pot.append([cdp.noe_restraints[i][0], cdp.noe_restraints[i][1], pot[i]])
704
705
707 """Create the Monte Carlo data by back-calculation.
708
709 @keyword data_id: The list of spin ID, data type, and alignment ID, as yielded by the base_data_loop() generator method.
710 @type data_id: str
711 @return: The Monte Carlo Ri data.
712 @rtype: list of floats
713 """
714
715
716 mc_data = []
717
718
719 container = data_id[0]
720
721
722 if data_id[1] == 'rdc' and hasattr(container, 'rdc'):
723
724 if not hasattr(container, 'rdc_bc'):
725 self.calculate()
726
727
728 if not hasattr(container, 'rdc_bc') or not data_id[2] in container.rdc_bc:
729 data = None
730 else:
731 data = container.rdc_bc[data_id[2]]
732
733
734 mc_data.append(data)
735
736
737 elif data_id[1] == 'noesy' and hasattr(container, 'noesy'):
738
739 if not hasattr(container, 'noesy_bc'):
740 self.calculate()
741
742
743 mc_data.append(container.noesy_bc)
744
745
746 elif data_id[1] == 'pcs' and hasattr(container, 'pcs'):
747
748 if not hasattr(container, 'pcs_bc'):
749 self.calculate()
750
751
752 if not hasattr(container, 'pcs_bc') or not data_id[2] in container.pcs_bc:
753 data = None
754 else:
755 data = container.pcs_bc[data_id[2]]
756
757
758 mc_data.append(data)
759
760
761 return mc_data
762
763
764 default_value_doc = Desc_container("N-state model default values")
765 _table = uf_tables.add_table(label="table: N-state default values", caption="N-state model default values.")
766 _table.add_headings(["Data type", "Object name", "Value"])
767 _table.add_row(["Probabilities", "'p0', 'p1', 'p2', ..., 'pN'", "1/N"])
768 _table.add_row(["Euler angle alpha", "'alpha0', 'alpha1', ...", "(c+1) * pi / (N+1)"])
769 _table.add_row(["Euler angle beta", "'beta0', 'beta1', ...", "(c+1) * pi / (N+1)"])
770 _table.add_row(["Euler angle gamma", "'gamma0', 'gamma1', ...", "(c+1) * pi / (N+1)"])
771 default_value_doc.add_table(_table.label)
772 default_value_doc.add_paragraph("In this table, N is the total number of states and c is the index of a given state ranging from 0 to N-1. The default probabilities are all set to be equal whereas the angles are given a range of values so that no 2 states are equal at the start of optimisation.")
773 default_value_doc.add_paragraph("Note that setting the probability for state N will do nothing as it is equal to one minus all the other probabilities.")
774
776 """The default N-state model parameter values.
777
778 @param param: The N-state model parameter.
779 @type param: str
780 @return: The default value.
781 @rtype: float
782 """
783
784
785 name = self.return_data_name(param)
786 index = param_model_index(param)
787
788
789 N = float(cdp.N)
790
791
792 if name == 'probs':
793 return 1.0 / N
794
795
796 elif name == 'alpha' or name == 'beta' or name == 'gamma':
797 return (float(index)+1) * pi / (N+1.0)
798
799
800 - def grid_search(self, lower=None, upper=None, inc=None, constraints=False, verbosity=0, sim_index=None):
801 """The grid search function.
802
803 @param lower: The lower bounds of the grid search which must be equal to the number of parameters in the model.
804 @type lower: array of numbers
805 @param upper: The upper bounds of the grid search which must be equal to the number of parameters in the model.
806 @type upper: array of numbers
807 @param inc: The increments for each dimension of the space for the grid search. The number of elements in the array must equal to the number of parameters in the model.
808 @type inc: array of int
809 @param constraints: If True, constraints are applied during the grid search (elinating parts of the grid). If False, no constraints are used.
810 @type constraints: bool
811 @param verbosity: A flag specifying the amount of information to print. The higher the value, the greater the verbosity.
812 @type verbosity: int
813 """
814
815
816 if not hasattr(cdp, 'model'):
817 raise RelaxNoModelError('N-state')
818
819
820 n = param_num()
821
822
823 if n == 0:
824 print("Cannot run a grid search on a model with zero parameters, skipping the grid search.")
825 return
826
827
828 self.test_grid_ops(lower=lower, upper=upper, inc=inc, n=n)
829
830
831 if isinstance(inc, int):
832 inc = [inc]*n
833
834
835 if not lower:
836
837 lower = []
838 upper = []
839
840
841 for i in range(n):
842
843 if i < len(cdp.params):
844
845 if search('^p', cdp.params[i]):
846 lower.append(0.0)
847 upper.append(1.0)
848
849
850 if search('^alpha', cdp.params[i]) or search('^gamma', cdp.params[i]):
851 lower.append(0.0)
852 upper.append(2*pi)
853 elif search('^beta', cdp.params[i]):
854 lower.append(0.0)
855 upper.append(pi)
856
857
858 elif hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed and (n - i) <= 3:
859 lower.append(-100)
860 upper.append(100)
861
862
863 else:
864 lower.append(-1e-3)
865 upper.append(1e-3)
866
867
868 data_types = base_data_types()
869
870
871 tensor_num = align_tensor.num_tensors(skip_fixed=True)
872
873
874 if cdp.model == 'fixed' and tensor_num > 1 and ('rdc' in data_types or 'pcs' in data_types) and not align_tensor.all_tensors_fixed() and hasattr(cdp, 'paramag_centre_fixed') and cdp.paramag_centre_fixed:
875
876 print("Optimising each alignment tensor separately.")
877
878
879 fixed_flags = []
880 for i in range(len(cdp.align_ids)):
881
882 tensor = align_tensor.return_tensor(index=i, skip_fixed=False)
883
884
885 fixed_flags.append(tensor.fixed)
886
887
888 tensor.set('fixed', True)
889
890
891 for i in range(len(cdp.align_ids)):
892
893 if fixed_flags[i]:
894 continue
895
896
897 tensor = align_tensor.return_tensor(index=i, skip_fixed=False)
898
899
900 tensor.set('fixed', False)
901
902
903 lower_sub = lower[i*5:i*5+5]
904 upper_sub = upper[i*5:i*5+5]
905 inc_sub = inc[i*5:i*5+5]
906
907
908 self.minimise(min_algor='grid', lower=lower_sub, upper=upper_sub, inc=inc_sub, constraints=constraints, verbosity=verbosity, sim_index=sim_index)
909
910
911 tensor.set('fixed', True)
912
913
914 for i in range(len(cdp.align_ids)):
915
916 tensor = align_tensor.return_tensor(index=i, skip_fixed=False)
917
918
919 tensor.set('fixed', fixed_flags[i])
920
921
922 else:
923 self.minimise(min_algor='grid', lower=lower, upper=upper, inc=inc, constraints=constraints, verbosity=verbosity, sim_index=sim_index)
924
925
927 """Determine whether the given parameter is spin specific.
928
929 @param name: The name of the parameter.
930 @type name: str
931 @return: False
932 @rtype: bool
933 """
934
935
936 if name in ['r', 'heteronuc_type', 'proton_type']:
937 return True
938
939
940 return False
941
942
944 """Create bounds for the OpenDX mapping function.
945
946 @param param: The name of the parameter to return the lower and upper bounds of.
947 @type param: str
948 @param spin_id: The spin identification string (unused).
949 @type spin_id: None
950 @return: The upper and lower bounds of the parameter.
951 @rtype: list of float
952 """
953
954
955 if search('^paramag_[xyz]$', param):
956 return [-100.0, 100.0]
957
958
959 - def minimise(self, min_algor=None, min_options=None, func_tol=None, grad_tol=None, max_iterations=None, constraints=False, scaling=True, verbosity=0, sim_index=None, lower=None, upper=None, inc=None):
960 """Minimisation function.
961
962 @param min_algor: The minimisation algorithm to use.
963 @type min_algor: str
964 @param min_options: An array of options to be used by the minimisation algorithm.
965 @type min_options: array of str
966 @param func_tol: The function tolerance which, when reached, terminates optimisation. Setting this to None turns of the check.
967 @type func_tol: None or float
968 @param grad_tol: The gradient tolerance which, when reached, terminates optimisation. Setting this to None turns of the check.
969 @type grad_tol: None or float
970 @param max_iterations: The maximum number of iterations for the algorithm.
971 @type max_iterations: int
972 @param constraints: If True, constraints are used during optimisation.
973 @type constraints: bool
974 @param scaling: If True, diagonal scaling is enabled during optimisation to allow the problem to be better conditioned.
975 @type scaling: bool
976 @param verbosity: A flag specifying the amount of information to print. The higher the value, the greater the verbosity.
977 @type verbosity: int
978 @param sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired.
979 @type sim_index: None or int
980 @keyword lower: The lower bounds of the grid search which must be equal to the number of parameters in the model. This optional argument is only used when doing a grid search.
981 @type lower: array of numbers
982 @keyword upper: The upper bounds of the grid search which must be equal to the number of parameters in the model. This optional argument is only used when doing a grid search.
983 @type upper: array of numbers
984 @keyword inc: The increments for each dimension of the space for the grid search. The number of elements in the array must equal to the number of parameters in the model. This argument is only used when doing a grid search.
985 @type inc: array of int
986 """
987
988
989 model, param_vector, data_types, scaling_matrix = self._target_fn_setup(sim_index=sim_index, scaling=scaling)
990
991
992 if not len(param_vector):
993 warn(RelaxWarning("The model has no parameters, minimisation cannot be performed."))
994 return
995
996
997 if constraints and cdp.model == 'fixed':
998 warn(RelaxWarning("Turning constraints off. These cannot be used for the 'fixed' model."))
999 constraints = False
1000
1001
1002 if min_algor == 'Method of Multipliers':
1003 min_algor = min_options[0]
1004 min_options = min_options[1:]
1005
1006
1007 if not constraints and cdp.model == 'population':
1008 warn(RelaxWarning("Turning constraints on. These absolutely must be used for the 'population' model."))
1009 constraints = True
1010
1011
1012 min_options = (min_algor,) + min_options
1013 min_algor = 'Method of Multipliers'
1014
1015
1016 if hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed:
1017 if min_algor in ['newton']:
1018 raise RelaxError("For the paramagnetic centre position, as the Hessians are not yet implemented Newton optimisation cannot be performed.")
1019
1020
1021 if constraints:
1022 A, b = linear_constraints(data_types=data_types, scaling_matrix=scaling_matrix)
1023 else:
1024 A, b = None, None
1025
1026
1027 if search('^[Gg]rid', min_algor):
1028
1029 if scaling:
1030 for i in range(len(param_vector)):
1031 lower[i] = lower[i] / scaling_matrix[i, i]
1032 upper[i] = upper[i] / scaling_matrix[i, i]
1033
1034
1035 results = grid(func=model.func, args=(), num_incs=inc, lower=lower, upper=upper, A=A, b=b, verbosity=verbosity)
1036
1037
1038 param_vector, func, iter_count, warning = results
1039 f_count = iter_count
1040 g_count = 0.0
1041 h_count = 0.0
1042
1043
1044 else:
1045 results = generic_minimise(func=model.func, dfunc=model.dfunc, d2func=model.d2func, args=(), x0=param_vector, min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, maxiter=max_iterations, A=A, b=b, full_output=1, print_flag=verbosity)
1046
1047
1048 if results == None:
1049 return
1050 param_vector, func, iter_count, f_count, g_count, h_count, warning = results
1051
1052
1053 if isInf(func):
1054 raise RelaxInfError('chi-squared')
1055
1056
1057 if isNaN(func):
1058 raise RelaxNaNError('chi-squared')
1059
1060
1061 chi2 = model.func(param_vector)
1062
1063
1064 if scaling:
1065 param_vector = dot(scaling_matrix, param_vector)
1066
1067
1068 disassemble_param_vector(param_vector=param_vector, data_types=data_types, sim_index=sim_index)
1069
1070
1071 if sim_index != None:
1072
1073 cdp.chi2_sim[sim_index] = func
1074
1075
1076 cdp.iter_sim[sim_index] = iter_count
1077
1078
1079 cdp.f_count_sim[sim_index] = f_count
1080
1081
1082 cdp.g_count_sim[sim_index] = g_count
1083
1084
1085 cdp.h_count_sim[sim_index] = h_count
1086
1087
1088 cdp.warning_sim[sim_index] = warning
1089
1090
1091 else:
1092
1093 cdp.chi2 = func
1094
1095
1096 cdp.iter = iter_count
1097
1098
1099 cdp.f_count = f_count
1100
1101
1102 cdp.g_count = g_count
1103
1104
1105 cdp.h_count = h_count
1106
1107
1108 cdp.warning = warning
1109
1110
1111 if sim_index == None and ('rdc' in data_types or 'pcs' in data_types):
1112
1113 self._minimise_bc_data(model)
1114
1115
1116 if 'rdc' in data_types:
1117 rdc.q_factors()
1118
1119
1120 if 'pcs' in data_types:
1121 pcs.q_factors()
1122
1123
1125 """Return the k, n, and chi2 model statistics.
1126
1127 k - number of parameters.
1128 n - number of data points.
1129 chi2 - the chi-squared value.
1130
1131
1132 @keyword model_info: The data returned from model_loop() (unused).
1133 @type model_info: None
1134 @keyword spin_id: The spin identification string. This is ignored in the N-state model.
1135 @type spin_id: None or str
1136 @keyword global_stats: A parameter which determines if global or local statistics are returned. For the N-state model, this argument is ignored.
1137 @type global_stats: None or bool
1138 @return: The optimisation statistics, in tuple format, of the number of parameters (k), the number of data points (n), and the chi-squared value (chi2).
1139 @rtype: tuple of (int, int, float)
1140 """
1141
1142
1143 return param_num(), num_data_points(), cdp.chi2
1144
1145
1187
1188
1189 return_data_name_doc = Desc_container("N-state model data type string matching patterns")
1190 _table = uf_tables.add_table(label="table: N-state data type patterns", caption="N-state model data type string matching patterns.")
1191 _table.add_headings(["Data type", "Object name", "Patterns"])
1192 _table.add_row(["Probabilities", "'probs'", "'p0', 'p1', 'p2', ..., 'pN'"])
1193 _table.add_row(["Euler angle alpha", "'alpha'", "'alpha0', 'alpha1', ..."])
1194 _table.add_row(["Euler angle beta", "'beta'", "'beta0', 'beta1', ..."])
1195 _table.add_row(["Euler angle gamma", "'gamma'", "'gamma0', 'gamma1', ..."])
1196 _table.add_row(["Bond length", "'r'", "'^r$' or '[Bb]ond[ -_][Ll]ength'"])
1197 _table.add_row(["Heteronucleus type", "'heteronuc_type'", "'^[Hh]eteronucleus$'"])
1198 _table.add_row(["Proton type", "'proton_type'", "'^[Pp]roton$'"])
1199 return_data_name_doc.add_table(_table.label)
1200 return_data_name_doc.add_paragraph("The objects corresponding to the object names are lists (or arrays) with each element corrsponding to each state.")
1201
1203 """Return a unique identifying string for the N-state model parameter.
1204
1205 @param param: The N-state model parameter.
1206 @type param: str
1207 @return: The unique parameter identifying string.
1208 @rtype: str
1209 """
1210
1211
1212 if search('^p[0-9]*$', param):
1213 return 'probs'
1214
1215
1216 if search('^alpha', param):
1217 return 'alpha'
1218
1219
1220 if search('^beta', param):
1221 return 'beta'
1222
1223
1224 if search('^gamma', param):
1225 return 'gamma'
1226
1227
1228 if search('^r$', param) or search('[Bb]ond[ -_][Ll]ength', param):
1229 return 'r'
1230
1231
1232 if param == 'heteronuc_type':
1233 return 'heteronuc_type'
1234
1235
1236 if param == 'proton_type':
1237 return 'proton_type'
1238
1239
1240 if search('^paramag_[xyz]$', param):
1241 return param
1242
1243
1245 """Create and return the spin specific Monte Carlo Ri error structure.
1246
1247 @keyword data_id: The list of spin ID, data type, and alignment ID, as yielded by the base_data_loop() generator method.
1248 @type data_id: str
1249 @return: The Monte Carlo simulation data errors.
1250 @rtype: list of float
1251 """
1252
1253
1254 mc_errors = []
1255
1256
1257 container = data_id[0]
1258
1259
1260 if data_id[1] == 'pcs' and not container.select:
1261 return
1262
1263
1264 if data_id[1] == 'rdc' and hasattr(container, 'rdc'):
1265
1266 if not hasattr(container, 'rdc_err'):
1267 raise RelaxError("The RDC errors are missing for the spin pair '%s' and '%s'." % (container.spin_id1, container.spin_id2))
1268
1269
1270 if data_id[2] not in container.rdc_err:
1271 err = None
1272 else:
1273 err = container.rdc_err[data_id[2]]
1274
1275
1276 mc_errors.append(err)
1277
1278
1279 elif data_id[1] == 'noesy' and hasattr(container, 'noesy'):
1280
1281 if not hasattr(container, 'noesy_err'):
1282 raise RelaxError("The NOESY errors are missing for the spin pair '%s' and '%s'." % (container.spin_id1, container.spin_id2))
1283
1284
1285 mc_errors.append(container.noesy_err)
1286
1287
1288 elif data_id[1] == 'pcs' and hasattr(container, 'pcs'):
1289
1290 if not hasattr(container, 'pcs_err'):
1291 raise RelaxError("The PCS errors are missing for spin '%s'." % data_id[0])
1292
1293
1294 if data_id[2] not in container.pcs_err:
1295 err = None
1296 else:
1297 err = container.pcs_err[data_id[2]]
1298
1299
1300 mc_errors.append(err)
1301
1302
1303 return mc_errors
1304
1305
1307 """Return the Grace string representation of the parameter.
1308
1309 This is used for axis labelling.
1310
1311 @param param: The specific analysis parameter.
1312 @type param: str
1313 @return: The Grace string representation of the parameter.
1314 @rtype: str
1315 """
1316
1317
1318 if param == 'pcs':
1319 return "Measured PCS"
1320
1321
1322 if param == 'pcs_bc':
1323 return "Back-calculated PCS"
1324
1325
1326 if param == 'rdc':
1327 return "Measured RDC"
1328
1329
1330 if param == 'rdc_bc':
1331 return "Back-calculated RDC"
1332
1333
1335 """Return a string representing the parameters units.
1336
1337 @param param: The name of the parameter to return the units string for.
1338 @type param: str
1339 @return: The parameter units string.
1340 @rtype: str
1341 """
1342
1343
1344 if param == 'pcs' or param == 'pcs_bc':
1345 return 'ppm'
1346
1347
1348 if param == 'rdc' or param == 'rdc_bc':
1349 return 'Hz'
1350
1351
1352 set_doc = Desc_container("N-state model set details")
1353 set_doc.add_paragraph("Setting parameters for the N-state model is a little different from the other type of analyses as each state has a set of parameters with the same names as the other states. To set the parameters for a specific state c (ranging from 0 for the first to N-1 for the last, the number c should be added to the end of the parameter name. So the Euler angle gamma of the third state is specified using the string 'gamma2'.")
1354
1355
1356 - def set_error(self, model_info, index, error):
1357 """Set the parameter errors.
1358
1359 @param model_info: The global model index originating from model_loop().
1360 @type model_info: int
1361 @param index: The index of the parameter to set the errors for.
1362 @type index: int
1363 @param error: The error value.
1364 @type error: float
1365 """
1366
1367
1368 names = ['Axx', 'Ayy', 'Axy', 'Axz', 'Ayz']
1369
1370
1371 if index < len(cdp.align_ids)*5:
1372
1373 param_index = index % 5
1374 tensor_index = (index - index % 5) / 5
1375
1376
1377 tensor = align_tensor.return_tensor(index=tensor_index, skip_fixed=True)
1378 tensor.set(param=names[param_index], value=error, category='err')
1379
1380
1381 return getattr(tensor, names[param_index]+'_err')
1382
1383
1384 - def set_param_values(self, param=None, value=None, spin_id=None, error=False, force=True):
1385 """Set the N-state model parameter values.
1386
1387 @keyword param: The parameter name list.
1388 @type param: list of str
1389 @keyword value: The parameter value list.
1390 @type value: list
1391 @keyword spin_id: The spin identification string (unused).
1392 @type spin_id: None
1393 @keyword error: A flag which if True will allow the parameter errors to be set instead of the values.
1394 @type error: bool
1395 @keyword force: A flag which if True will cause current values to be overwritten. If False, a RelaxError will raised if the parameter value is already set.
1396 @type force: bool
1397 """
1398
1399
1400 lib.arg_check.is_str_list(param, 'parameter name')
1401 lib.arg_check.is_list(value, 'parameter value')
1402
1403
1404 for i in range(len(param)):
1405
1406 obj_name = self.return_data_name(param[i])
1407
1408
1409 if not obj_name:
1410 raise RelaxError("The parameter '%s' is not valid for this data pipe type." % param[i])
1411
1412
1413 if error:
1414 obj_name += '_err'
1415
1416
1417 if obj_name in ['probs', 'alpha', 'beta', 'gamma']:
1418
1419 index = param_model_index(param[i])
1420
1421
1422 obj = getattr(cdp, obj_name)
1423 obj[index] = value[i]
1424
1425
1426 if search('^paramag_[xyz]$', obj_name):
1427
1428 if not hasattr(cdp, 'paramagnetic_centre'):
1429 cdp.paramagnetic_centre = zeros(3, float64)
1430
1431
1432 if obj_name == 'paramag_x':
1433 index = 0
1434 elif obj_name == 'paramag_y':
1435 index = 1
1436 else:
1437 index = 2
1438
1439
1440 cdp.paramagnetic_centre[index] = value[i]
1441
1442
1443 else:
1444 for spin in spin_loop(spin_id):
1445 setattr(spin, obj_name, value[i])
1446
1447
1449 """Initialise the Monte Carlo parameter values."""
1450
1451
1452 sim_names = self.data_names(set='min')
1453
1454
1455 if hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed:
1456 sim_names += ['paramagnetic_centre']
1457
1458
1459 if hasattr(cdp, 'align_tensors'):
1460
1461 names = ['Axx', 'Ayy', 'Axy', 'Axz', 'Ayz']
1462
1463
1464 for i in range(len(cdp.align_tensors)):
1465
1466 if not opt_uses_tensor(cdp.align_tensors[i]):
1467 continue
1468
1469
1470 cdp.align_tensors[i].set_sim_num(cdp.sim_number)
1471
1472
1473 for object_name in names:
1474 for j in range(cdp.sim_number):
1475 cdp.align_tensors[i].set(param=object_name, value=deepcopy(getattr(cdp.align_tensors[i], object_name)), category='sim', sim_index=j)
1476
1477
1478 for object_name in sim_names:
1479
1480 sim_object_name = object_name + '_sim'
1481
1482
1483 setattr(cdp, sim_object_name, [])
1484
1485
1486 sim_object = getattr(cdp, sim_object_name)
1487
1488
1489 for j in range(cdp.sim_number):
1490
1491 sim_object.append(None)
1492
1493
1494 if hasattr(cdp, 'paramag_centre_fixed') and not cdp.paramag_centre_fixed:
1495 for j in range(cdp.sim_number):
1496 cdp.paramagnetic_centre_sim[j] = deepcopy(cdp.paramagnetic_centre)
1497
1498
1500 """Pack the Monte Carlo simulation data.
1501
1502 @keyword data_id: The list of spin ID, data type, and alignment ID, as yielded by the base_data_loop() generator method.
1503 @type data_id: list of str
1504 @param sim_data: The Monte Carlo simulation data.
1505 @type sim_data: list of float
1506 """
1507
1508
1509 container = data_id[0]
1510
1511
1512 if data_id[1] == 'rdc' and hasattr(container, 'rdc'):
1513
1514 if not hasattr(container, 'rdc_sim'):
1515 container.rdc_sim = {}
1516
1517
1518 container.rdc_sim[data_id[2]] = []
1519 for i in range(cdp.sim_number):
1520 container.rdc_sim[data_id[2]].append(sim_data[i][0])
1521
1522
1523 elif data_id[1] == 'noesy' and hasattr(container, 'noesy'):
1524
1525 container.noesy_sim = []
1526 for i in range(cdp.sim_number):
1527 container.noesy_sim[data_id[2]].append(sim_data[i][0])
1528
1529
1530 elif data_id[1] == 'pcs' and hasattr(container, 'pcs'):
1531
1532 if not hasattr(container, 'pcs_sim'):
1533 container.pcs_sim = {}
1534
1535
1536 container.pcs_sim[data_id[2]] = []
1537 for i in range(cdp.sim_number):
1538 container.pcs_sim[data_id[2]].append(sim_data[i][0])
1539
1540
1542 """Return the array of simulation parameter values.
1543
1544 @param model_info: The global model index originating from model_loop().
1545 @type model_info: int
1546 @param index: The index of the parameter to return the array of values for.
1547 @type index: int
1548 @return: The array of simulation parameter values.
1549 @rtype: list of float
1550 """
1551
1552
1553 names = ['Axx', 'Ayy', 'Axy', 'Axz', 'Ayz']
1554
1555
1556 if index < align_tensor.num_tensors(skip_fixed=True)*5:
1557
1558 param_index = index % 5
1559 tensor_index = (index - index % 5) / 5
1560
1561
1562 tensor = align_tensor.return_tensor(index=tensor_index, skip_fixed=True)
1563 return getattr(tensor, names[param_index]+'_sim')
1564