mailr25290 - /trunk/test_suite/shared_data/curve_fitting/profiling/profiling_relax_fit.py


Others Months | Index by Date | Thread Index
>>   [Date Prev] [Date Next] [Thread Prev] [Thread Next]

Header


Content

Posted by tlinnet on August 26, 2014 - 14:37:
Author: tlinnet
Date: Tue Aug 26 14:37:30 2014
New Revision: 25290

URL: http://svn.gna.org/viewcvs/relax?rev=25290&view=rev
Log:
Modified profiling script to use the new estimate R2eff module.

task #7822(https://gna.org/task/index.php?7822): Implement user function to 
estimate R2eff and associated errors for exponential curve fitting.

Modified:
    
trunk/test_suite/shared_data/curve_fitting/profiling/profiling_relax_fit.py

Modified: 
trunk/test_suite/shared_data/curve_fitting/profiling/profiling_relax_fit.py
URL: 
http://svn.gna.org/viewcvs/relax/trunk/test_suite/shared_data/curve_fitting/profiling/profiling_relax_fit.py?rev=25290&r1=25289&r2=25290&view=diff
==============================================================================
--- 
trunk/test_suite/shared_data/curve_fitting/profiling/profiling_relax_fit.py 
(original)
+++ 
trunk/test_suite/shared_data/curve_fitting/profiling/profiling_relax_fit.py 
Tue Aug 26 14:37:30 2014
@@ -56,9 +56,9 @@
 
 # relax module imports.
 from status import Status; status = Status()
+from specific_analyses.relax_disp.estimate_r2eff import minimise_leastsq, Exp
 from target_functions.relax_fit import setup, func, dfunc, d2func, 
back_calc_I
 from target_functions.chi2 import chi2_rankN
-from target_functions.relax_disp_curve_fit import Exponential
 
 
 # Alter setup.
@@ -99,7 +99,7 @@
             print("C_cT=%1.1e C_cF=%1.1e P_cT=%1.1e P_cF=%1.1e" % 
(chi_v_cT-chi_v_cT, chi_v_cF-chi_v_cT, chi_v_pyt_cT-chi_v_cT, 
chi_v_pyt_cF-chi_v_cT) )
 
     # Do verification for Python code, and difference between minfx and 
Scipy optimisation without constraints.
-    if True:
+    if False:
         # Calculate with Python code.
         # Calculate without contraints.
         v_pyt_cF_chi2_list = array(verify_pyt(constraints=False))
@@ -198,7 +198,7 @@
         if verbose:
             v_cF_stats.print_stats(lines_report)
 
-    if True:
+    if False:
         #################
         print("Verify, without constraints, Python")
         constraints = False
@@ -225,7 +225,7 @@
             v_pyt_cF_stats.print_stats(lines_report)
 
 
-    if True:
+    if False:
         #################
         print("Verify, without constraints, Python Scipy")
 
@@ -287,36 +287,6 @@
         self.times_arr = load(self.data_path + "times_arr.npy")
         self.struct_arr = load(self.data_path + "struct_arr.npy")
 
-        #param_vector = array([ 0.,  0.])
-        self.param_vector = array([  8.800000000000001e+00,   
2.000000000800000e+05])
-        self.scaling_list = [1.0, 1.0]
-        self.scaling_matrix = array( [ [self.scaling_list[0], 0], [0, 
self.scaling_list[1]] ])
-
-        self.func_tol = 1e-25
-        self.grad_tol = None
-        #self.max_iterations = 10000000
-        self.max_iterations = 10000000
-        self.verbosity = 0
-
-    def set_options(self, constraints=None):
-        # Define which constraints should be used.
-        self.constraints = constraints
-
-        if self.constraints:
-            self.min_algor = 'Log barrier'
-            self.min_options = ('simplex',)
-            self.A = array([ [ 1.,  0.],
-                        [-1.,  0.],
-                        [ 0.,  1.]] )
-            self.b = array([   0., -200.,    0.])
-
-        else:
-            self.min_algor = 'simplex'
-            self.min_options = ()
-            self.A = None
-            self.b = None
-
-
     def loop_data(self):
         """Generator that yields data from array."""
 
@@ -342,17 +312,39 @@
     # Instantiate class.
     C = Profile()
 
-    # Set the minimising options.
-    C.set_options(constraints=constraints)
+    # Instantiate class.
+    E = Exp(verbosity=0)
 
     # List to store chi2.
     chi2_list = []
 
     for values, errors, times, struct, num_times in C.loop_data():
         # Initialise the function to minimise.
-        setup(num_params=len(C.param_vector), num_times=num_times, 
values=values, sd=errors, relax_times=times, scaling_matrix=C.scaling_list)
-
-        results = generic_minimise(func=func, dfunc=dfunc, d2func=d2func, 
args=(), x0=C.param_vector, min_algor=C.min_algor, min_options=C.min_options, 
func_tol=C.func_tol, grad_tol=C.grad_tol, maxiter=C.max_iterations, A=C.A, 
b=C.b, full_output=True, print_flag=C.verbosity)
+        E.setup_data(values=values, errors=errors, times=times)
+
+        # Initial guess for minimisation. Solved by linear least squares.
+        x0 = asarray( E.estimate_x0_exp() )
+
+        # Set the min_algor.
+        # simplex is algorithms without gradient. It is quite slow, since it 
needs to take many steps.
+        min_algor='simplex'
+
+        # Steepest descent uses only the gradient. This works, but it is not 
totally precise.
+        #min_algor = 'Steepest descent'
+        #max_iterations = 1000
+
+        # Quasi-Newton BFGS. Uses only the gradient.
+        # This gets the same results as 2000 Monte-Carlo with simplex.
+        # This is one of the best optimisation techniques when only the 
function and gradient are present, as it tries to numerically approximate the 
Hessian matrix, updating it as the algorithm moves along. 
+        min_algor = 'BFGS'
+
+        E.set_settings_minfx(min_algor=min_algor, constraints=constraints)
+
+        # Initialise the function to minimise.
+        scaling_list = [1.0, 1.0]
+        setup(num_params=len(x0), num_times=len(E.times), values=E.values, 
sd=E.errors, relax_times=E.times, scaling_matrix=scaling_list)
+
+        results = generic_minimise(func=func, dfunc=dfunc, d2func=d2func, 
args=(), x0=x0, min_algor=E.min_algor, min_options=E.min_options, 
func_tol=E.func_tol, grad_tol=E.grad_tol, maxiter=E.max_iterations, A=E.A, 
b=E.b, full_output=True, print_flag=E.verbosity)
 
         # Unpack
         param_vector, chi2, iter_count, f_count, g_count, h_count, warning = 
results
@@ -367,17 +359,39 @@
     # Instantiate class.
     C = Profile()
 
-    # Set the minimising options.
-    C.set_options(constraints=constraints)
+    # Instantiate class.
+    E = Exp(verbosity=0)
 
     # List to store chi2.
     chi2_list = []
 
     for values, errors, times, struct, num_times in C.loop_data():
         # Initialise the function to minimise.
-        exp_class = Exponential(num_params=len(C.param_vector), 
num_times=num_times, values=values, sd=errors, relax_times=times, 
scaling_matrix=C.scaling_matrix)
-
-        results = generic_minimise(func=exp_class.func_exp, args=(), 
x0=C.param_vector, min_algor=C.min_algor, min_options=C.min_options, 
func_tol=C.func_tol, grad_tol=C.grad_tol, maxiter=C.max_iterations, A=C.A, 
b=C.b, full_output=True, print_flag=C.verbosity)
+        E.setup_data(values=values, errors=errors, times=times)
+
+        # Initial guess for minimisation. Solved by linear least squares.
+        x0 = asarray( E.estimate_x0_exp() )
+
+        # Set the min_algor.
+        # simplex is algorithms without gradient. It is quite slow, since it 
needs to take many steps.
+        min_algor='simplex'
+
+        # Steepest descent uses only the gradient. This works, but it is not 
totally precise.
+        #min_algor = 'Steepest descent'
+        #max_iterations = 1000
+
+        # Quasi-Newton BFGS. Uses only the gradient.
+        # This gets the same results as 2000 Monte-Carlo with simplex.
+        # This is one of the best optimisation techniques when only the 
function and gradient are present, as it tries to numerically approximate the 
Hessian matrix, updating it as the algorithm moves along. 
+        #min_algor = 'BFGS'
+
+        E.set_settings_minfx(min_algor=min_algor, constraints=constraints)
+
+        # Define func.
+        func = E.func_exp
+        dfunc = E.func_exp_grad
+
+        results = generic_minimise(func=func, dfunc=dfunc, d2func=d2func, 
args=(), x0=x0, min_algor=E.min_algor, min_options=E.min_options, 
func_tol=E.func_tol, grad_tol=E.grad_tol, maxiter=E.max_iterations, A=E.A, 
b=E.b, full_output=True, print_flag=E.verbosity)
 
         # Unpack the results.
         param_vector, chi2, iter_count, f_count, g_count, h_count, warning = 
results
@@ -392,125 +406,29 @@
     # Instantiate class.
     C = Profile()
 
+    # Instantiate class.
+    E = Exp(verbosity=0)
+
     # List to store chi2.
     chi2_list = []
 
     for values, errors, times, struct, num_times in C.loop_data():
         # Initialise the function to minimise.
-        exp_class = Exponential()
-
-        # Do optimisation with scipy.optimize.curve_fit
-        # sigma : None or M-length sequence, optional. If not None, these 
values are used as weights in the least-squares problem.
-        # absolute_sigma : bool, optional. If False, sigma denotes relative 
weights of the data points.
-        # The returned covariance matrix pcov is based on estimated errors 
in the data, and is not affected by the overall magnitude of the values in 
sigma. 
-        # Only the relative magnitudes of the sigma values matter.
-        # If True, sigma describes one standard deviation errors of the 
input data points. The estimated covariance in pcov is based on these values.
-
-        #Returns:      
-        # popt : array
-        # Optimal values for the parameters so that the sum of the squared 
error of f(xdata, *popt) - ydata is minimized
-        # pcov : 2d array
-        # The estimated covariance of popt. The diagonals provide the 
variance of the parameter estimate.
-        # To compute one standard deviation errors on the parameters use 
perr = np.sqrt(np.diag(pcov)).
-        # How the sigma parameter affects the estimated covariance depends 
on absolute_sigma argument, as described above.
-
-        # With curve_fit. This is just a wrapper to non-linear least 
squares. This function though lacks input to tolerance.
-        #, sigma=None, absolute_sigma=False
-        # sigma: If not None, these values are used as weights in the 
least-squares problem.
-        #  if absolute_sigma=True, 'sigma' describes one standard deviation 
errors of the input data points. The estimated covariance in 'pcov' is based 
on these values.
-        #popt, pcov, infodict, errmsg, ier = curve_fit(f=exp_class.calc_exp, 
xdata=times, ydata=values, p0=C.param_vector, full_output=True)
-
-        # 'xtol': Relative error desired in the approximate solution.
-        # 'ftol': Relative error desired in the sum of squares.
-        #xtol = 1.49012e-08
-        xtol = 1e-15
-        ftol = xtol
-
-        # 'factor': float, optional. A parameter determining the initial 
step bound (``factor * || diag * x||``).  Should be in the interval ``(0.1, 
100)``.
-        factor= 100
-
-        # With leastsq directly.
-        # Define function to minimise.
-        use_weights = True
-        # If 'sigma'/erros describes one standard deviation errors of the 
input data points. The estimated covariance in 'pcov' is based on these 
values.
-        absolute_sigma = True
-
-        if use_weights:
-            func = exp_class.func_exp_weighted_general
-            weights = 1.0 / asarray(errors)
-            args=(times, values, weights )
-        else:
-            func = exp_class.func_exp_general
-            args=(times, values)
-
-        popt, pcov, infodict, errmsg, ier = leastsq(func=func, 
x0=C.param_vector, args=args, full_output=True, ftol=ftol, xtol=xtol, 
maxfev=C.max_iterations, factor=factor)
-
-        ## Unpack the results.
-        #param_vector, chi2, iter_count, f_count, g_count, h_count, warning 
= results
-
-        param_vector = popt
-
-        # Catch errors:
-        if ier in [1, 2, 3, 4]:
-            warning = None
-        elif ier in [4]:
-            warnings.warn(errmsg, RuntimeWarning)
-            warning = errmsg
-        elif ier in [0, 5, 6, 7, 8]:
-            raise RuntimeWarning(errmsg)
-
-        # 'nfev' number of function calls.
-        f_count = infodict['nfev']
-        g_count = 0
-        h_count = 0
-
-        # 'fvec': function evaluated at the output.
-        fvec = infodict['fvec']
-        #fvec_test = func(popt, times, values)
-
-        # 'fjac': A permutation of the R matrix of a QR factorization of the 
final approximate Jacobian matrix, stored column wise. Together with ipvt, 
the covariance of the estimate can be approximated.
-        # fjac = infodict['fjac']
-
-        # 'qtf': The vector (transpose(q) * fvec).
-        # qtf = infodict['qtf']
-
-        # 'ipvt'  An integer array of length N which defines a permutation 
matrix, p, such that fjac*p = q*r, where r is upper triangular
-        # with diagonal elements of nonincreasing magnitude. Column j of p 
is column ipvt(j) of the identity matrix.
-
-        # Back calc fitted curve.
-        back_calc = exp_class.calc_exp(times=times, r2eff=popt[0], 
i0=popt[1])
-
-        # Calculate chi2.
-        chi2 = chi2_rankN(data=values, back_calc_vals=back_calc, 
errors=errors)
+        E.setup_data(values=values, errors=errors, times=times)
+
+        results = minimise_leastsq(E=E)
+
+        # Unpack results
+        param_vector, param_vector_error, chi2, iter_count, f_count, 
g_count, h_count, warning = results
+
+        if E.verbosity:
+            r2eff = param_vector[0]
+            i0 = param_vector[1]
+            r2eff_err = param_vector_error[0]
+            i0_err = param_vector_error[1]
+            print("r2eff=%3.3f +/- %3.3f , i0=%3.3f +/- %3.3f" % (r2eff, 
r2eff_err, i0, i0_err) )
 
         chi2_list.append(chi2)
-
-        # 'pcov': The estimated covariance of popt.
-        # The diagonals provide the variance of the parameter estimate.
-        ydata = values
-        p0 = C.param_vector
-
-        if pcov is None:
-            # indeterminate covariance
-            pcov = zeros((len(popt), len(popt)), dtype=float)
-            pcov.fill(inf)
-        elif not absolute_sigma:
-            if len(ydata) > len(p0):
-                s_sq = sum( fvec**2 ) / (len(values) - len(p0))
-                pcov = pcov * s_sq
-            else:
-                pcov.fill(inf)
-
-        # To compute one standard deviation errors on the parameters, take 
the square root of the diagonal.
-        perr = sqrt(diag(pcov))
-
-        if print_info:
-            r2eff = popt[0]
-            i0 = popt[1]
-            r2eff_err = perr[0]
-            i0_err = perr[1]
-            print("r2eff=%3.3f +/- %3.3f , i0=%3.3f +/- %3.3f" % (r2eff, 
r2eff_err, i0, i0_err) )
-
 
     return chi2_list
 




Related Messages


Powered by MHonArc, Updated Tue Aug 26 15:00:02 2014