mailr27868 - in /branches/frame_order_cleanup: ./ auto_analyses/relax_disp_repeat_cpmg.py


Others Months | Index by Date | Thread Index
>>   [Date Prev] [Date Next] [Thread Prev] [Thread Next]

Header


Content

Posted by edward on September 25, 2015 - 14:02:
Author: bugman
Date: Fri Sep 25 14:02:51 2015
New Revision: 27868

URL: http://svn.gna.org/viewcvs/relax?rev=27868&view=rev
Log:
Merged revisions 27834-27840 via svnmerge from 
svn+ssh://bugman@xxxxxxxxxxx/svn/relax/trunk

........
  r27834 | tlinnet | 2015-05-27 03:09:35 +0200 (Wed, 27 May 2015) | 8 lines
  
  Wrote a method to store parameter data and dispersion curves, for the 
protocol of repeated analysis.
  
  This is to prepare for analysis in other programs.
  The method loops throug the data pipes, and write the data out.
  
  It then write a bash script, that will concatenate the data in an matrix 
array style, for reading and processing in other programs.
  
  Task #7826 (https://gna.org/task/?7826): Write an python class for the 
repeated analysis of dispersion data.
........
  r27835 | tlinnet | 2015-05-27 03:09:38 +0200 (Wed, 27 May 2015) | 3 lines
  
  Added to write out a collection script for chi2 and rate parameters.
  
  Task #7826 (https://gna.org/task/?7826): Write an python class for the 
repeated analysis of dispersion data.
........
  r27836 | tlinnet | 2015-05-27 03:09:40 +0200 (Wed, 27 May 2015) | 3 lines
  
  In the collection bash script, removes spins which have not been fitted.
  
  Task #7826 (https://gna.org/task/?7826): Write an python class for the 
repeated analysis of dispersion data.
........
  r27837 | tlinnet | 2015-05-27 03:09:42 +0200 (Wed, 27 May 2015) | 3 lines
  
  Fix for use of " instead of ' in bash script.
  
  Task #7826 (https://gna.org/task/?7826): Write an python class for the 
repeated analysis of dispersion data.
........
  r27838 | tlinnet | 2015-05-27 03:09:44 +0200 (Wed, 27 May 2015) | 3 lines
  
  Adding option to minimise class function, to perform monte carlo error 
analysis.
  
  Task #7826 (https://gna.org/task/?7826): Write an python class for the 
repeated analysis of dispersion data.
........
  r27839 | tlinnet | 2015-05-27 03:09:46 +0200 (Wed, 27 May 2015) | 3 lines
  
  Printout when minimising monte-carlo simulations.
  
  Task #7826 (https://gna.org/task/?7826): Write an python class for the 
repeated analysis of dispersion data.
........
  r27840 | tlinnet | 2015-05-27 03:09:48 +0200 (Wed, 27 May 2015) | 3 lines
  
  Adding keyword for verbosity for multi processor mode.
  
  Task #7826 (https://gna.org/task/?7826): Write an python class for the 
repeated analysis of dispersion data.
........

Modified:
    branches/frame_order_cleanup/   (props changed)
    branches/frame_order_cleanup/auto_analyses/relax_disp_repeat_cpmg.py

Propchange: branches/frame_order_cleanup/
------------------------------------------------------------------------------
--- svnmerge-integrated (original)
+++ svnmerge-integrated Fri Sep 25 14:02:51 2015
@@ -1 +1 @@
-/trunk:1-27797,27800-27827
+/trunk:1-27797,27800-27840

Modified: branches/frame_order_cleanup/auto_analyses/relax_disp_repeat_cpmg.py
URL: 
http://svn.gna.org/viewcvs/relax/branches/frame_order_cleanup/auto_analyses/relax_disp_repeat_cpmg.py?rev=27868&r1=27867&r2=27868&view=diff
==============================================================================
--- branches/frame_order_cleanup/auto_analyses/relax_disp_repeat_cpmg.py      
  (original)
+++ branches/frame_order_cleanup/auto_analyses/relax_disp_repeat_cpmg.py      
  Fri Sep 25 14:02:51 2015
@@ -32,10 +32,11 @@
 from copy import deepcopy
 from datetime import datetime
 from glob import glob
-from os import F_OK, access, getcwd, sep
+from os import F_OK, access, chmod, getcwd, sep
 from numpy import any, asarray, arange, concatenate, max, mean, min, sqrt, 
std, sum
 if dep_check.scipy_module:
     from scipy.stats import pearsonr
+from stat import S_IRWXU, S_IRGRP, S_IROTH
 import sys
 from warnings import warn
 
@@ -48,7 +49,7 @@
 from pipe_control.mol_res_spin import display_spin, generate_spin_string, 
return_spin, spin_loop
 from pipe_control import pipes
 from prompt.interpreter import Interpreter
-from specific_analyses.relax_disp.data import generate_r20_key, 
has_exponential_exp_type, is_r1_optimised, loop_exp_frq_offset, 
loop_exp_frq_offset_point, return_param_key_from_data
+from specific_analyses.relax_disp.data import generate_r20_key, 
has_exponential_exp_type, has_cpmg_exp_type, is_r1_optimised, 
loop_exp_frq_offset, loop_exp_frq_offset_point, return_param_key_from_data
 from status import Status; status = Status()
 
 if dep_check.matplotlib_module:
@@ -780,7 +781,7 @@
             print("Clustered spins are:", cdp.clustering)
 
 
-    def minimise_execute(self, verbosity=1, methods=None, model=None, 
model_from=None, analysis=None, analysis_from=None, list_glob_ini=None, 
force=False):
+    def minimise_execute(self, verbosity=1, methods=None, model=None, 
model_from=None, analysis=None, analysis_from=None, list_glob_ini=None, 
force=False, mc_err_analysis=False, mp_verbosity=0):
         """Use value.set on all pipes."""
 
         # Set default
@@ -821,9 +822,15 @@
 
                 # Print
                 subtitle(file=sys.stdout, text="Minimise for pipe='%s'" % 
(pipe_name), prespace=3)
+                if hasattr(cdp, "sim_number"):
+                    subsection(file=sys.stdout, text="Performing Monte-Carlo 
minimisations on %i simulations"%(getattr(cdp, "sim_number")), prespace=0)
 
                 # Do the minimisation.
-                self.interpreter.minimise.execute(min_algor=self.min_algor, 
func_tol=self.opt_func_tol, max_iter=self.opt_max_iterations, 
constraints=self.constraints, scaling=True, verbosity=verbosity)
+                self.interpreter.minimise.execute(min_algor=self.min_algor, 
func_tol=self.opt_func_tol, max_iter=self.opt_max_iterations, 
constraints=self.constraints, scaling=True, verbosity=verbosity, 
mp_verbosity=mp_verbosity)
+
+                # Do Monte-Carlo error analysis
+                if mc_err_analysis:
+                    self.interpreter.monte_carlo.error_analysis()
 
                 # Save results, and store the current settings dic to pipe.
                 cdp.settings = self.settings
@@ -2665,6 +2672,208 @@
             plt.show()
 
 
+    def write_results(self, method=None, model=None, analysis=None, 
list_glob_ini=None, selection=None, write_disp=True):
+
+        for glob_ini in list_glob_ini:
+            # Check previous, and get the pipe name.
+            found, pipe_name, resfile, path = 
self.check_previous_result(method=method, model=model, analysis=analysis, 
glob_ini=glob_ini, bundle=method)
+
+            if pipes.cdp_name() != pipe_name:
+                self.interpreter.pipe.switch(pipe_name)
+
+            # Printout.
+            section(file=sys.stdout, text="Results writing for 
pipe='%s"%(pipe_name), prespace=2, postspace=0)
+            model_params = MODEL_PARAMS[model]
+            subsection(file=sys.stdout, text="Model %s, with 
params='%s"%(model, model_params), prespace=0)
+
+            # Set path
+            model_path = model.replace(" ", "_")
+            analysis_path = analysis.replace(" ", "_")
+            path = self.results_dir+sep+model_path+sep+analysis_path
+
+            # Dispersion curves.
+            if write_disp:
+                path_disp = 
path+sep+"disp_curves"+sep+method+sep+str(glob_ini)
+                self.interpreter.relax_disp.plot_disp_curves(dir=path_disp, 
force=True)
+                self.interpreter.relax_disp.write_disp_curves(dir=path_disp, 
force=True)
+
+            # The selected models for the final run.
+            self.interpreter.value.write(param='model', file='model.out', 
dir=path, force=True)
+
+            models_tested = None
+
+            # For CPMG models.
+            filep = str(glob_ini)+"_"+method+"_"
+            path_par = path+sep+"r2"
+            if has_cpmg_exp_type():
+                # The R20 parameter.
+                self.write_results_test(path=path_par, model=model, 
models_tested=models_tested, param='r2', file_name_ini=filep+'r20')
+
+                # The R20A and R20B parameters.
+                self.write_results_test(path=path_par, model=model, 
models_tested=models_tested, param='r2a', file_name_ini=filep+'r20a')
+                self.write_results_test(path=path_par, model=model, 
models_tested=models_tested, param='r2b', file_name_ini=filep+'r20b')
+
+            # The pA and pB parameters.
+            path_par = path+sep+"pop"
+            search = method+"_"+"pA"
+            self.write_results_test(path=path_par, model=model, 
models_tested=models_tested, search=search, param='pA', 
file_name_ini=filep+'pA')
+            self.write_results_test(path=path_par, model=model, 
models_tested=models_tested, param='pB', file_name_ini=filep+'pB')
+
+            # The dw parameter.
+            path_par = path+sep+"dw"
+            search = method+"_"+"dw"
+            self.write_results_test(path=path_par, model=model, 
models_tested=models_tested, search=search, param='dw', 
file_name_ini=filep+'dw')
+
+            # The k_AB, kex and tex parameters.
+            path_par = path+sep+"rate"
+            params = ['k_AB', 'kex', 'tex']
+            for param in params:
+                search = method+"_"+param
+                self.write_results_test(path=path_par, model=model, 
models_tested=models_tested, search=search, param=param, 
file_name_ini=filep+param)
+
+            # Minimisation statistics.
+            if not (model == MODEL_R2EFF and has_fixed_time_exp_type()):
+                path_par = path+sep+"chi2"
+                self.interpreter.value.write(param='chi2', 
file=filep+'chi2.out', dir=path_par, force=True)
+                search = method+"_"+"chi2"
+                col_file_name="collect_%s.sh"%search
+                self.write_convert_file(file_name=col_file_name, 
path=path_par, search=search)
+
+                self.interpreter.grace.write(y_data_type='chi2', 
file='chi2.agr', dir=path_par+sep+"grace", force=True)
+
+
+    def write_results_test(self, path=None, model=None, models_tested=None, 
search=None, param=None, file_name_ini=None):
+        """Create a set of results, text and Grace files for the current 
data pipe.
+
+        @keyword path:              The directory to place the files into.
+        @type path:                 str
+        @keyword model:             The model tested.
+        @type model:                None or str
+        @keyword model_tested:      List of models tested, if the pipe is 
final.
+        @type model_tested:         None or list of str.
+        @keyword param:             The param to write out.
+        @type param:                None or list of str.
+        @keyword file_name_ini:     The initial part of the file name for 
the grace and text files.
+        @type file_name_ini:        None or str.
+        """
+
+        # If not set, use the name of the parameter.
+        if file_name_ini == None:
+            file_name_ini = param
+
+        # If the model is in the list of models which support the parameter.
+        write_result = False
+        if model != None:
+            # Get the model params.
+            model_params = MODEL_PARAMS[model]
+
+            if param in model_params:
+                write_result = True
+
+        # If this is the final pipe, then check if the model has been tested 
at any time.
+        elif model == None:
+            # Loop through all tested models.
+            for model_tested in models_tested:
+                # If one of the models tested has a parameter which belong 
in the list of models which support the parameter, then write it out.
+                model_params = MODEL_PARAMS[model_tested]
+
+                if param in model_params:
+                    write_result = True
+                    break
+
+        # Write results if some of the models supports the parameter.
+        if write_result:
+            self.interpreter.value.write(param=param, 
file='%s.out'%file_name_ini, dir=path, force=True)
+            # Write convert file
+            if search != None:
+                col_file_name="collect_%s.sh"%search
+                self.write_convert_file(file_name=col_file_name, path=path, 
search=search)
+
+            # Write grace
+            self.interpreter.grace.write(x_data_type='res_num', 
y_data_type=param, file='%s.agr'%file_name_ini, dir=path+sep+"grace", 
force=True)
+
+
+    def write_convert_file(self, file_name=None, path=None, search=None):
+        file_obj, file_path = open_write_file(file_name=file_name, dir=path, 
force=True, compress_type=0, verbosity=1, return_path=True)
+
+        # Write file
+        file_obj.write('#! /bin/bash' + '\n')
+        file_obj.write('SEARCH=%s'%(search) + '\n')
+        file_obj.write('FILES=(*_${SEARCH}.out)' + '\n')
+        file_obj.write('readarray -t FILESSORT < <(for a in "${FILES[@]}"; 
do echo "$a"; done | sort -Vr)' + '\n')
+        file_obj.write('# Skip the first two lines of header' + '\n')
+        file_obj.write("tail -n+3 ${FILESSORT[0]} | sed 's,^# ,,' | grep -v 
'None                    None' | awk '{print $2,$3,$5}' | column -t > 
collect_${SEARCH}.tmp" + '\n')
+        file_obj.write('# Make array' + '\n')
+        file_obj.write('ACUT=(collect_${SEARCH}.tmp)' + '\n')
+        file_obj.write('for f in "${FILESSORT[@]}"; do' + '\n')
+        file_obj.write('    FNAME="${f%.*}"' + '\n')
+        file_obj.write('    NI=`echo $f | cut -d"_" -f1`' + '\n')
+        file_obj.write('    echo "Processing $f with NI=$NI"' + '\n')
+        file_obj.write('    tail -n+3 $f | sed "s,^# ,," | grep -v "None     
               None" | sed "s,value,${NI}," | sed "s,error,${NI}," | awk 
%s{print $6,$7}%s | column -t > ${FNAME}.tmp'%("'","'") + '\n')
+        file_obj.write('    ACUT+=(${FNAME}.tmp)' + '\n')
+        file_obj.write('done' + '\n')
+        file_obj.write('paste "${ACUT[@]}" | column -t > 
collect_${SEARCH}.txt' + '\n')
+        file_obj.write('rm ${ACUT[@]}' + '\n')
+
+
+        # Close the batch script, then make it executable (expanding any ~ 
characters).
+        file_obj.close()
+
+        chmod(file_path, S_IRWXU|S_IRGRP|S_IROTH)
+
+
+    def create_mc_data(self, number=500, distribution="measured", 
fixed_error=None, methods=None, model=None, model_from=None, analysis=None, 
analysis_from=None, list_glob_ini=None, force=False):
+        """Create MC data."""
+
+        # Set default
+        if model_from == None:
+            model_from = model
+        if analysis_from == None:
+            analysis_from = analysis
+
+        # Loop over the methods.
+        for method in methods:
+            # Change the self key.
+            self.set_self(key='method', value=method)
+
+            # Loop over the glob ini:
+            for glob_ini in list_glob_ini:
+                # Check previous, and get the pipe name.
+                found_pipe, pipe_name, resfile, path = 
self.check_previous_result(method=self.method, model=model, 
analysis=analysis, glob_ini=glob_ini, bundle=self.method)
+
+                # Try from analysis
+                if not found_pipe:
+                    # Check previous, and get the pipe name.
+                    found_analysis, pipe_name, resfile, path = 
self.check_previous_result(method=self.method, model=model, 
analysis=analysis_from, glob_ini=glob_ini, bundle=self.method)
+
+                # Print
+                subtitle(file=sys.stdout, text="MC data for pipe='%s'" % 
(pipe_name), prespace=3)
+
+                # Select the model.
+                self.interpreter.relax_disp.select_model(model)
+
+                # Create data
+                self.interpreter.monte_carlo.setup(number=number)
+                
self.interpreter.monte_carlo.create_data(distribution=distribution, 
fixed_error=fixed_error)
+                self.interpreter.monte_carlo.initial_values()
+
+                # Save results, and store the current settings dic to pipe.
+                cdp.settings = self.settings
+
+                # Define new pipe names.
+                pipe_name = self.name_pipe(method=self.method, model=model, 
analysis=analysis, glob_ini=glob_ini)
+                resfile = pipe_name.replace(" ", "_")
+                model_path = model.replace(" ", "_")
+                path = self.results_dir+sep+model_path
+
+                if found_pipe and not force:
+                    file_path = get_file_path(file_name=resfile, dir=path)
+                    text = "The file '%s' already exists.  Set the force 
flag to True to overwrite." % (file_path)
+                    warn(RelaxWarning(text))
+                else:
+                    self.interpreter.results.write(file=resfile, dir=path, 
force=force)
+
+
     def interpreter_start(self):
         # Load the interpreter.
         self.interpreter = Interpreter(show_script=False, 
raise_relax_error=True)




Related Messages


Powered by MHonArc, Updated Fri Sep 25 14:20:25 2015