Package specific_analyses :: Package relax_disp :: Module optimisation
[hide private]
[frames] | no frames]

Source Code for Module specific_analyses.relax_disp.optimisation

  1  ############################################################################### 
  2  #                                                                             # 
  3  # Copyright (C) 2013-2014 Edward d'Auvergne                                   # 
  4  #                                                                             # 
  5  # This file is part of the program relax (http://www.nmr-relax.com).          # 
  6  #                                                                             # 
  7  # This program is free software: you can redistribute it and/or modify        # 
  8  # it under the terms of the GNU General Public License as published by        # 
  9  # the Free Software Foundation, either version 3 of the License, or           # 
 10  # (at your option) any later version.                                         # 
 11  #                                                                             # 
 12  # This program is distributed in the hope that it will be useful,             # 
 13  # but WITHOUT ANY WARRANTY; without even the implied warranty of              # 
 14  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the               # 
 15  # GNU General Public License for more details.                                # 
 16  #                                                                             # 
 17  # You should have received a copy of the GNU General Public License           # 
 18  # along with this program.  If not, see <http://www.gnu.org/licenses/>.       # 
 19  #                                                                             # 
 20  ############################################################################### 
 21   
 22  # Module docstring. 
 23  """Module for the optimisation of the relaxation dispersion models.""" 
 24   
 25  # Python module imports. 
 26  from minfx.generic import generic_minimise 
 27  from minfx.grid import grid 
 28  from numpy import dot, float64, int32, ones, zeros 
 29  from numpy.linalg import inv 
 30  from re import search 
 31  import sys 
 32   
 33  # relax module imports. 
 34  from lib.check_types import is_float 
 35  from lib.errors import RelaxError 
 36  from lib.text.sectioning import subsection 
 37  from multi import Memo, Result_command, Slave_command 
 38  from specific_analyses.relax_disp.disp_data import count_spins, has_disp_data, has_proton_mmq_cpmg, loop_exp, loop_exp_frq, loop_exp_frq_offset_point, loop_frq, loop_offset, pack_back_calc_r2eff, return_cpmg_frqs, return_index_from_disp_point, return_index_from_exp_type, return_index_from_frq, return_offset_data, return_param_key_from_data, return_r1_data, return_r2eff_arrays, return_spin_lock_nu1, return_value_from_frq_index 
 39  from specific_analyses.relax_disp.parameters import assemble_param_vector, assemble_scaling_matrix, disassemble_param_vector, linear_constraints, loop_parameters, param_conversion, param_num 
 40  from specific_analyses.relax_disp.variables import EXP_TYPE_CPMG_PROTON_MQ, EXP_TYPE_CPMG_PROTON_SQ, EXP_TYPE_LIST_CPMG, MODEL_CR72, MODEL_CR72_FULL, MODEL_DPL94, MODEL_LIST_MMQ, MODEL_LM63, MODEL_M61, MODEL_M61B, MODEL_MP05, MODEL_NS_R1RHO_2SITE, MODEL_TAP03, MODEL_TP02 
 41  from target_functions.relax_disp import Dispersion 
 42   
 43   
44 -def back_calc_r2eff(spin=None, spin_id=None, cpmg_frqs=None, spin_lock_nu1=None, store_chi2=False):
45 """Back-calculation of R2eff/R1rho values for the given spin. 46 47 @keyword spin: The specific spin data container. 48 @type spin: SpinContainer instance 49 @keyword spin_id: The spin ID string for the spin container. 50 @type spin_id: str 51 @keyword cpmg_frqs: The CPMG frequencies to use instead of the user loaded values - to enable interpolation. 52 @type cpmg_frqs: list of lists of numpy rank-1 float arrays 53 @keyword spin_lock_nu1: The spin-lock field strengths to use instead of the user loaded values - to enable interpolation. 54 @type spin_lock_nu1: list of lists of numpy rank-1 float arrays 55 @keyword store_chi2: A flag which if True will cause the spin specific chi-squared value to be stored in the spin container. 56 @type store_chi2: bool 57 @return: The back-calculated R2eff/R1rho value for the given spin. 58 @rtype: numpy rank-1 float array 59 """ 60 61 # Skip protons for MMQ data. 62 if spin.model in MODEL_LIST_MMQ and spin.isotope == '1H': 63 return 64 65 # Create the initial parameter vector. 66 param_vector = assemble_param_vector(spins=[spin]) 67 68 # Create a scaling matrix. 69 scaling_matrix = assemble_scaling_matrix(spins=[spin], scaling=False) 70 71 # Number of spectrometer fields. 72 fields = [None] 73 field_count = 1 74 if hasattr(cdp, 'spectrometer_frq_count'): 75 fields = cdp.spectrometer_frq_list 76 field_count = cdp.spectrometer_frq_count 77 78 # Initialise the data structures for the target function. 79 values, errors, missing, frqs, frqs_H, exp_types, relax_times = return_r2eff_arrays(spins=[spin], spin_ids=[spin_id], fields=fields, field_count=field_count) 80 81 # The offset and R1 data. 82 chemical_shifts, offsets, tilt_angles, Delta_omega, w_eff = return_offset_data(spins=[spin], spin_ids=[spin_id], field_count=field_count, fields=spin_lock_nu1) 83 r1 = return_r1_data(spins=[spin], spin_ids=[spin_id], field_count=field_count) 84 85 # The dispersion data. 86 recalc_tau = True 87 if cpmg_frqs == None and spin_lock_nu1 == None: 88 cpmg_frqs = return_cpmg_frqs(ref_flag=False) 89 spin_lock_nu1 = return_spin_lock_nu1(ref_flag=False) 90 91 # Reconstruct the structures for interpolation. 92 else: 93 recalc_tau = False 94 values = [] 95 errors = [] 96 missing = [] 97 for exp_type, ei in loop_exp(return_indices=True): 98 values.append([]) 99 errors.append([]) 100 missing.append([]) 101 for si in range(1): 102 values[ei].append([]) 103 errors[ei].append([]) 104 missing[ei].append([]) 105 for frq, mi in loop_frq(return_indices=True): 106 values[ei][si].append([]) 107 errors[ei][si].append([]) 108 missing[ei][si].append([]) 109 for offset, oi in loop_offset(exp_type=exp_type, frq=frq, return_indices=True): 110 if exp_type in EXP_TYPE_LIST_CPMG: 111 num = len(cpmg_frqs[ei][mi][oi]) 112 else: 113 num = len(spin_lock_nu1[ei][mi][oi]) 114 values[ei][si][mi].append(zeros(num, float64)) 115 errors[ei][si][mi].append(ones(num, float64)) 116 missing[ei][si][mi].append(zeros(num, int32)) 117 118 # Initialise the relaxation dispersion fit functions. 119 model = Dispersion(model=spin.model, num_params=param_num(spins=[spin]), num_spins=1, num_frq=field_count, exp_types=exp_types, values=values, errors=errors, missing=missing, frqs=frqs, frqs_H=frqs_H, cpmg_frqs=cpmg_frqs, spin_lock_nu1=spin_lock_nu1, chemical_shifts=chemical_shifts, offset=offsets, tilt_angles=tilt_angles, r1=r1, relax_times=relax_times, scaling_matrix=scaling_matrix, recalc_tau=recalc_tau) 120 121 # Make a single function call. This will cause back calculation and the data will be stored in the class instance. 122 chi2 = model.func(param_vector) 123 124 # Store the chi-squared value. 125 if store_chi2: 126 spin.chi2 = chi2 127 128 # Return the structure. 129 return model.back_calc
130 131
132 -def grid_search_setup(spins=None, spin_ids=None, param_vector=None, lower=None, upper=None, inc=None, scaling_matrix=None):
133 """The grid search setup function. 134 135 @keyword spins: The list of spin data containers for the block. 136 @type spins: list of SpinContainer instances 137 @keyword spin_ids: The corresponding spin ID strings. 138 @type spin_ids: list of str 139 @keyword param_vector: The parameter vector. 140 @type param_vector: numpy array 141 @keyword lower: The lower bounds of the grid search which must be equal to the number of parameters in the model. This optional argument is only used when doing a grid search. 142 @type lower: array of numbers 143 @keyword upper: The upper bounds of the grid search which must be equal to the number of parameters in the model. This optional argument is only used when doing a grid search. 144 @type upper: array of numbers 145 @keyword inc: The increments for each dimension of the space for the grid search. The number of elements in the array must equal to the number of parameters in the model. This argument is only used when doing a grid search. 146 @type inc: array of int 147 @keyword scaling_matrix: The scaling matrix. 148 @type scaling_matrix: numpy diagonal matrix 149 @return: A tuple of the grid size and the minimisation options. For the minimisation options, the first dimension corresponds to the model parameter. The second dimension is a list of the number of increments, the lower bound, and upper bound. 150 @rtype: (int, list of lists [int, float, float]) 151 """ 152 153 # The length of the parameter array. 154 n = len(param_vector) 155 156 # Make sure that the length of the parameter array is > 0. 157 if n == 0: 158 raise RelaxError("Cannot run a grid search on a model with zero parameters.") 159 160 # Lower bounds. 161 if lower != None and len(lower) != n: 162 raise RelaxLenError('lower bounds', n) 163 164 # Upper bounds. 165 if upper != None and len(upper) != n: 166 raise RelaxLenError('upper bounds', n) 167 168 # Increment. 169 if isinstance(inc, list) and len(inc) != n: 170 raise RelaxLenError('increment', n) 171 elif isinstance(inc, int): 172 inc = [inc]*n 173 174 # Set up the default bounds. 175 if not lower: 176 # Init. 177 lower = [] 178 upper = [] 179 180 # The R2eff model. 181 if cdp.model_type == 'R2eff': 182 # Loop over each experiment type, spectrometer frequency, offset and dispersion point. 183 for exp_type, frq, offset, point in loop_exp_frq_offset_point(): 184 # Loop over the parameters. 185 for param_name, param_index, si, r20_key in loop_parameters(spins=spins): 186 # R2eff relaxation rate (from 1 to 40 s^-1). 187 if param_name == 'r2eff': 188 lower.append(1.0) 189 upper.append(40.0) 190 191 # Intensity. 192 elif param_name == 'i0': 193 lower.append(0.0001) 194 upper.append(max(spins[si].intensities.values())) 195 196 # All other models. 197 else: 198 # Loop over the parameters. 199 for param_name, param_index, si, r20_key in loop_parameters(spins=spins): 200 # Cluster specific parameter. 201 if si == None: 202 si = 0 203 204 # R2 relaxation rates (from 1 to 40 s^-1). 205 if param_name in ['r2', 'r2a', 'r2b']: 206 lower.append(1.0) 207 upper.append(40.0) 208 209 # The pA.pB.dw**2 and pA.dw**2 parameters. 210 elif param_name in ['phi_ex', 'phi_ex_B', 'phi_ex_C', 'padw2']: 211 lower.append(0.0) 212 upper.append(10.0) 213 214 # Chemical shift difference between states A and B (heteronucleus). 215 elif param_name in ['dw', 'dw_AB', 'dw_AC', 'dw_BC']: 216 if spins[si].model in MODEL_LIST_MMQ: 217 lower.append(-10.0) 218 else: 219 lower.append(0.0) 220 upper.append(10.0) 221 222 # Chemical shift difference between states A and B (proton). 223 elif param_name in ['dwH', 'dwH_AB', 'dwH_AC', 'dwH_BC']: 224 if spins[si].model in MODEL_LIST_MMQ: 225 lower.append(-3.0) 226 else: 227 lower.append(0.0) 228 upper.append(3.0) 229 230 # The population of state A. 231 elif param_name == 'pA': 232 if spins[si].model == MODEL_M61B: 233 lower.append(0.85) 234 else: 235 lower.append(0.5) 236 upper.append(1.0) 237 238 # The population of state B (for 3-site exchange). 239 elif param_name == 'pB': 240 lower.append(0.0) 241 upper.append(0.5) 242 243 # Exchange rates. 244 elif param_name in ['kex', 'kex_AB', 'kex_AC', 'kex_BC', 'k_AB', 'kB', 'kC']: 245 lower.append(1.0) 246 upper.append(100000.0) 247 248 # Time of exchange. 249 elif param_name in ['tex']: 250 lower.append(1/100000.0) 251 upper.append(1.0) 252 253 # Pre-set parameters. 254 for param_name, param_index, si, r20_key in loop_parameters(spins=spins): 255 # Cluster specific parameter. 256 if si == None: 257 si = 0 258 259 # Get the parameter. 260 if hasattr(spins[si], param_name): 261 val = getattr(spins[si], param_name) 262 263 # Value already set. 264 if is_float(val) and val != 0.0: 265 # Printout. 266 print("The spin '%s' parameter '%s' is pre-set to %s, skipping it in the grid search." % (spin_ids[si], param_name, val)) 267 268 # Turn of the grid search for this parameter. 269 inc[param_index] = 1 270 lower[param_index] = val 271 upper[param_index] = val 272 273 # The full grid size. 274 grid_size = 1 275 for i in range(n): 276 grid_size *= inc[i] 277 278 # Diagonal scaling of minimisation options. 279 lower_new = [] 280 upper_new = [] 281 for i in range(n): 282 lower_new.append(lower[i] / scaling_matrix[i, i]) 283 upper_new.append(upper[i] / scaling_matrix[i, i]) 284 285 # Return the data structures. 286 return grid_size, inc, lower_new, upper_new
287 288 289
290 -class Disp_memo(Memo):
291 """The relaxation dispersion memo class.""" 292
293 - def __init__(self, spins=None, spin_ids=None, sim_index=None, scaling_matrix=None, verbosity=None):
294 """Initialise the relaxation dispersion memo class. 295 296 This is used for handling the optimisation results returned from a slave processor. It runs on the master processor and is used to store data which is passed to the slave processor and then passed back to the master via the results command. 297 298 299 @keyword spins: The list of spin data container for the cluster. If this argument is supplied, then the spin_id argument will be ignored. 300 @type spins: list of SpinContainer instances 301 @keyword spin_ids: The spin ID strings for the cluster. 302 @type spin_ids: list of str 303 @keyword sim_index: The optional MC simulation index. 304 @type sim_index: int 305 @keyword scaling_matrix: The diagonal, square scaling matrix. 306 @type scaling_matrix: numpy diagonal matrix 307 @keyword verbosity: The verbosity level. This is used by the result command returned to the master for printouts. 308 @type verbosity: int 309 """ 310 311 # Execute the base class __init__() method. 312 super(Disp_memo, self).__init__() 313 314 # Store the arguments. 315 self.spins = spins 316 self.spin_ids = spin_ids 317 self.sim_index = sim_index 318 self.scaling_matrix = scaling_matrix 319 self.verbosity = verbosity
320 321 322
323 -class Disp_minimise_command(Slave_command):
324 """Command class for relaxation dispersion optimisation on the slave processor.""" 325
326 - def __init__(self, spins=None, spin_ids=None, sim_index=None, scaling_matrix=None, min_algor=None, min_options=None, func_tol=None, grad_tol=None, max_iterations=None, constraints=False, verbosity=0, lower=None, upper=None, inc=None, fields=None, param_names=None):
327 """Initialise the base class, storing all the master data to be sent to the slave processor. 328 329 This method is run on the master processor whereas the run() method is run on the slave processor. 330 331 332 @keyword spins: The list of spin data container for the cluster. If this argument is supplied, then the spin_id argument will be ignored. 333 @type spins: list of SpinContainer instances 334 @keyword spin_ids: The list of spin ID strings corresponding to the spins argument. 335 @type spin_ids: list of str 336 @keyword sim_index: The index of the simulation to optimise. This should be None if normal optimisation is desired. 337 @type sim_index: None or int 338 @keyword scaling_matrix: The diagonal, square scaling matrix. 339 @type scaling_matrix: numpy diagonal matrix 340 @keyword min_algor: The minimisation algorithm to use. 341 @type min_algor: str 342 @keyword min_options: An array of options to be used by the minimisation algorithm. 343 @type min_options: array of str 344 @keyword func_tol: The function tolerance which, when reached, terminates optimisation. Setting this to None turns of the check. 345 @type func_tol: None or float 346 @keyword grad_tol: The gradient tolerance which, when reached, terminates optimisation. Setting this to None turns of the check. 347 @type grad_tol: None or float 348 @keyword max_iterations: The maximum number of iterations for the algorithm. 349 @type max_iterations: int 350 @keyword constraints: If True, constraints are used during optimisation. 351 @type constraints: bool 352 @keyword verbosity: The amount of information to print. The higher the value, the greater the verbosity. 353 @type verbosity: int 354 @keyword lower: The lower bounds of the grid search which must be equal to the number of parameters in the model. This optional argument is only used when doing a grid search. 355 @type lower: array of numbers 356 @keyword upper: The upper bounds of the grid search which must be equal to the number of parameters in the model. This optional argument is only used when doing a grid search. 357 @type upper: array of numbers 358 @keyword inc: The increments for each dimension of the space for the grid search. The number of elements in the array must equal to the number of parameters in the model. This argument is only used when doing a grid search. 359 @type inc: array of int 360 @keyword fields: The list of unique of spectrometer field strengths. 361 @type fields: int 362 @keyword param_names: The list of parameter names to use in printouts. 363 @type param_names: str 364 """ 365 366 # Execute the base class __init__() method. 367 super(Disp_minimise_command, self).__init__() 368 369 # Store the arguments needed by the run() method. 370 self.spins = spins 371 self.spin_ids = spin_ids 372 self.sim_index = sim_index 373 self.scaling_matrix = scaling_matrix 374 self.verbosity = verbosity 375 self.min_algor = min_algor 376 self.min_options = min_options 377 self.func_tol = func_tol 378 self.grad_tol = grad_tol 379 self.max_iterations = max_iterations 380 self.fields = fields 381 self.param_names = param_names 382 383 # Create the initial parameter vector. 384 self.param_vector = assemble_param_vector(spins=self.spins) 385 if len(scaling_matrix): 386 self.param_vector = dot(inv(scaling_matrix), self.param_vector) 387 388 # Get the grid search minimisation options. 389 self.lower_new, self.upper_new = None, None 390 if search('^[Gg]rid', min_algor): 391 self.grid_size, self.inc_new, self.lower_new, self.upper_new = grid_search_setup(spins=spins, spin_ids=spin_ids, param_vector=self.param_vector, lower=lower, upper=upper, inc=inc, scaling_matrix=self.scaling_matrix) 392 393 # Linear constraints. 394 self.A, self.b = None, None 395 if constraints: 396 self.A, self.b = linear_constraints(spins=spins, scaling_matrix=scaling_matrix) 397 398 # Test if the spectrometer frequencies have been set. 399 if spins[0].model in [MODEL_LM63, MODEL_CR72, MODEL_CR72_FULL, MODEL_M61, MODEL_TP02, MODEL_TAP03, MODEL_MP05] and not hasattr(cdp, 'spectrometer_frq'): 400 raise RelaxError("The spectrometer frequency information has not been specified.") 401 402 # The R2eff/R1rho data. 403 self.values, self.errors, self.missing, self.frqs, self.frqs_H, self.exp_types, self.relax_times = return_r2eff_arrays(spins=spins, spin_ids=spin_ids, fields=fields, field_count=len(fields), sim_index=sim_index) 404 405 # The offset and R1 data. 406 self.chemical_shifts, self.offsets, self.tilt_angles, self.Delta_omega, self.w_eff = return_offset_data(spins=spins, spin_ids=spin_ids, field_count=len(fields)) 407 self.r1 = return_r1_data(spins=spins, spin_ids=spin_ids, field_count=len(fields), sim_index=sim_index) 408 409 # Parameter number. 410 self.param_num = param_num(spins=spins) 411 412 # The dispersion data. 413 self.dispersion_points = cdp.dispersion_points 414 self.cpmg_frqs = return_cpmg_frqs(ref_flag=False) 415 self.spin_lock_nu1 = return_spin_lock_nu1(ref_flag=False)
416 417
418 - def run(self, processor, completed):
419 """Set up and perform the optimisation.""" 420 421 # Print out. 422 if self.verbosity >= 1: 423 # Individual spin block section. 424 top = 2 425 if self.verbosity >= 2: 426 top += 2 427 subsection(file=sys.stdout, text="Fitting to the spin block %s"%self.spin_ids, prespace=top) 428 429 # Grid search printout. 430 if search('^[Gg]rid', self.min_algor): 431 print("Unconstrained grid search size: %s (constraints may decrease this size).\n" % self.grid_size) 432 433 # Initialise the function to minimise. 434 model = Dispersion(model=self.spins[0].model, num_params=self.param_num, num_spins=count_spins(self.spins), num_frq=len(self.fields), exp_types=self.exp_types, values=self.values, errors=self.errors, missing=self.missing, frqs=self.frqs, frqs_H=self.frqs_H, cpmg_frqs=self.cpmg_frqs, spin_lock_nu1=self.spin_lock_nu1, chemical_shifts=self.chemical_shifts, offset=self.offsets, tilt_angles=self.tilt_angles, r1=self.r1, relax_times=self.relax_times, scaling_matrix=self.scaling_matrix) 435 436 # Grid search. 437 if search('^[Gg]rid', self.min_algor): 438 results = grid(func=model.func, args=(), num_incs=self.inc_new, lower=self.lower_new, upper=self.upper_new, A=self.A, b=self.b, verbosity=self.verbosity) 439 440 # Unpack the results. 441 param_vector, chi2, iter_count, warning = results 442 f_count = iter_count 443 g_count = 0.0 444 h_count = 0.0 445 446 # Minimisation. 447 else: 448 results = generic_minimise(func=model.func, args=(), x0=self.param_vector, min_algor=self.min_algor, min_options=self.min_options, func_tol=self.func_tol, grad_tol=self.grad_tol, maxiter=self.max_iterations, A=self.A, b=self.b, full_output=True, print_flag=self.verbosity) 449 450 # Unpack the results. 451 if results == None: 452 return 453 param_vector, chi2, iter_count, f_count, g_count, h_count, warning = results 454 455 # Optimisation printout. 456 if self.verbosity: 457 print("\nOptimised parameter values:") 458 for i in range(len(param_vector)): 459 print("%-20s %25.15f" % (self.param_names[i], param_vector[i]*self.scaling_matrix[i, i])) 460 461 # Printout. 462 if self.sim_index != None: 463 print("Simulation %s, cluster %s" % (self.sim_index+1, self.spin_ids)) 464 465 # Create the result command object to send back to the master. 466 processor.return_object(Disp_result_command(processor=processor, memo_id=self.memo_id, param_vector=param_vector, chi2=chi2, iter_count=iter_count, f_count=f_count, g_count=g_count, h_count=h_count, warning=warning, missing=self.missing, back_calc=model.back_calc, completed=False))
467 468 469
470 -class Disp_result_command(Result_command):
471 """Class for processing the dispersion optimisation results. 472 473 This object will be sent from the slave back to the master to have its run() method executed. 474 """ 475
476 - def __init__(self, processor=None, memo_id=None, param_vector=None, chi2=None, iter_count=None, f_count=None, g_count=None, h_count=None, warning=None, missing=None, back_calc=None, completed=True):
477 """Set up this class object on the slave, placing the minimisation results here. 478 479 @keyword processor: The processor object. 480 @type processor: multi.processor.Processor instance 481 @keyword memo_id: The memo identification string. 482 @type memo_id: str 483 @keyword param_vector: The optimised parameter vector. 484 @type param_vector: numpy rank-1 array 485 @keyword chi2: The final target function value. 486 @type chi2: float 487 @keyword iter_count: The number of optimisation iterations. 488 @type iter_count: int 489 @keyword f_count: The total function call count. 490 @type f_count: int 491 @keyword g_count: The total gradient call count. 492 @type g_count: int 493 @keyword h_count: The total Hessian call count. 494 @type h_count: int 495 @keyword warning: Any optimisation warnings. 496 @type warning: str or None 497 @keyword missing: The data structure indicating which R2eff/R1rho' base data is missing. 498 @type missing: numpy rank-3 array 499 @keyword back_calc: The back-calculated R2eff/R1rho' data structure from the target function class. This is will be transfered to the master to be stored in the r2eff_bc data structure. 500 @type back_calc: numpy rank-3 array 501 @keyword completed: A flag which if True signals that the optimisation successfully completed. 502 @type completed: bool 503 """ 504 505 # Execute the base class __init__() method. 506 super(Disp_result_command, self).__init__(processor=processor, completed=completed) 507 508 # Store the arguments (to be sent back to the master). 509 self.memo_id = memo_id 510 self.param_vector = param_vector 511 self.chi2 = chi2 512 self.iter_count = iter_count 513 self.f_count = f_count 514 self.g_count = g_count 515 self.h_count = h_count 516 self.warning = warning 517 self.missing = missing 518 self.back_calc = back_calc 519 self.completed = completed
520 521
522 - def run(self, processor=None, memo=None):
523 """Disassemble the model-free optimisation results (on the master). 524 525 @param processor: Unused! 526 @type processor: None 527 @param memo: The dispersion memo. This holds a lot of the data and objects needed for processing the results from the slave. 528 @type memo: memo 529 """ 530 531 # Scaling. 532 if memo.scaling_matrix != None: 533 param_vector = dot(memo.scaling_matrix, self.param_vector) 534 535 # Disassemble the parameter vector. 536 disassemble_param_vector(param_vector=param_vector, spins=memo.spins, sim_index=memo.sim_index) 537 param_conversion(spins=memo.spins, sim_index=memo.sim_index) 538 539 # Monte Carlo minimisation statistics. 540 if memo.sim_index != None: 541 for spin in memo.spins: 542 # Skip deselected spins. 543 if not spin.select: 544 continue 545 546 # Chi-squared statistic. 547 spin.chi2_sim[memo.sim_index] = self.chi2 548 549 # Iterations. 550 spin.iter_sim[memo.sim_index] = self.iter_count 551 552 # Function evaluations. 553 spin.f_count_sim[memo.sim_index] = self.f_count 554 555 # Gradient evaluations. 556 spin.g_count_sim[memo.sim_index] = self.g_count 557 558 # Hessian evaluations. 559 spin.h_count_sim[memo.sim_index] = self.h_count 560 561 # Warning. 562 spin.warning_sim[memo.sim_index] = self.warning 563 564 # Normal statistics. 565 else: 566 for spin in memo.spins: 567 # Skip deselected spins. 568 if not spin.select: 569 continue 570 571 # Chi-squared statistic. 572 spin.chi2 = self.chi2 573 574 # Iterations. 575 spin.iter = self.iter_count 576 577 # Function evaluations. 578 spin.f_count = self.f_count 579 580 # Gradient evaluations. 581 spin.g_count = self.g_count 582 583 # Hessian evaluations. 584 spin.h_count = self.h_count 585 586 # Warning. 587 spin.warning = self.warning 588 589 # Store the back-calculated values. 590 if memo.sim_index == None: 591 # 1H MMQ flag. 592 proton_mmq_flag = has_proton_mmq_cpmg() 593 594 # Loop over each spin, packing the data. 595 si = 0 596 for spin_index in range(len(memo.spins)): 597 # Skip deselected spins. 598 if not memo.spins[spin_index].select: 599 continue 600 601 # Pack the data. 602 pack_back_calc_r2eff(spin=memo.spins[spin_index], spin_id=memo.spin_ids[spin_index], si=si, back_calc=self.back_calc, proton_mmq_flag=proton_mmq_flag) 603 604 # Increment the spin index. 605 si += 1
606