mailr10133 - in /branches/multi_processor_merge/specific_fns/model_free: mf_minimise.py multi_processor_commands.py


Others Months | Index by Date | Thread Index
>>   [Date Prev] [Date Next] [Thread Prev] [Thread Next]

Header


Content

Posted by edward on January 07, 2010 - 15:58:
Author: bugman
Date: Thu Jan  7 15:58:38 2010
New Revision: 10133

URL: http://svn.gna.org/viewcvs/relax?rev=10133&view=rev
Log:
Some clean ups of the model-free code for multi-processing.


Modified:
    branches/multi_processor_merge/specific_fns/model_free/mf_minimise.py
    
branches/multi_processor_merge/specific_fns/model_free/multi_processor_commands.py

Modified: 
branches/multi_processor_merge/specific_fns/model_free/mf_minimise.py
URL: 
http://svn.gna.org/viewcvs/relax/branches/multi_processor_merge/specific_fns/model_free/mf_minimise.py?rev=10133&r1=10132&r2=10133&view=diff
==============================================================================
--- branches/multi_processor_merge/specific_fns/model_free/mf_minimise.py 
(original)
+++ branches/multi_processor_merge/specific_fns/model_free/mf_minimise.py Thu 
Jan  7 15:58:38 2010
@@ -1544,8 +1544,6 @@
             relax_data, relax_error, equations, param_types, param_values, 
r, csa, num_frq, frq, num_ri, remap_table, noe_r1_table, ri_labels, gx, gh, 
num_params, xh_unit_vectors, diff_type, diff_params = 
self._minimise_data_setup(model_type, min_algor, num_data_sets, min_options, 
spin=spin, sim_index=sim_index)
 
 
-            #test.assert_mf_equivalent(self.mf)
-            ##self.mf=test.mf
             # Setup the minimisation algorithm when constraints are present.
             ################################################################
 
@@ -1577,6 +1575,12 @@
             # Back-calculation.
             ###################
 
+            if min_algor == 'back_calc':
+                # Initialise the model-free class.
+                self.mf = Mf(init_params=param_vector, 
model_type=model_type, diff_type=diff_type, diff_params=diff_params, 
scaling_matrix=scaling_matrix, num_spins=num_spins, equations=equations, 
param_types=param_types, param_values=param_values, relax_data=relax_data, 
errors=relax_error, bond_length=r, csa=csa, num_frq=num_frq, frq=frq, 
num_ri=num_ri, remap_table=remap_table, noe_r1_table=noe_r1_table, 
ri_labels=ri_labels, gx=gx, gh=gh, h_bar=h_bar, mu0=mu0, 
num_params=num_params, vectors=xh_unit_vectors)
+
+                # Return the back-calculated Ri data.
+                return self.mf.calc_ri()
 
 
             # Minimisation.
@@ -1587,11 +1591,7 @@
             processor = processor_box.processor
 
             # Parallelised grid search for the diffusion parameter space.
-            #FIXME??? strange contraints
             if match('^[Gg]rid', min_algor) and model_type == 'diff' :
-                # Determine the number of processors.
-                processors = processor.processor_size()
-
                 # Split up the grid into chunks for each processor.
                 full_grid_info = Grid_info(min_options)
                 sub_grid_list = 
full_grid_info.sub_divide(processor.processor_size())
@@ -1626,20 +1626,12 @@
             else:
                 # Minimisation initialisation.
                 command = MF_minimise_command()
+
+                # Set up the model-free data.
                 command.set_mf(init_params=param_vector, 
model_type=model_type, diff_type=diff_type, diff_params=diff_params, 
scaling_matrix=scaling_matrix, num_spins=num_spins, equations=equations, 
param_types=param_types, param_values=param_values, relax_data=relax_data, 
errors=relax_error, bond_length=r, csa=csa, num_frq=num_frq, frq=frq, 
num_ri=num_ri, remap_table=remap_table, noe_r1_table=noe_r1_table, 
ri_labels=ri_labels, gx=gx, gh=gh, h_bar=h_bar, mu0=mu0, 
num_params=num_params, vectors=xh_unit_vectors)
 
-                # Back calculation.
-                #FIXME could be neater?
-                if min_algor == 'back_calc':
-                    return command.build_mf().calc_ri()
-
-                # Constrained optimisation.
-                if constraints:
-                    command.set_minimise(args=(), x0=param_vector, 
min_algor=min_algor, min_options=min_options, func_tol=func_tol, 
grad_tol=grad_tol, maxiter=max_iterations, A=A, b=b, spin_id=spin_id, 
sim_index=sim_index, full_output=True, print_flag=verbosity)
-
-                # Unconstrained optimisation.
-                else:
-                    command.set_minimise(args=(), x0=param_vector, 
min_algor=min_algor, min_options=min_options, func_tol=func_tol, 
grad_tol=grad_tol, maxiter=max_iterations, spin_id=spin_id, 
sim_index=sim_index, full_output=True, print_flag=verbosity)
+                # Set up for optimisation.
+                command.set_minimise(args=(), x0=param_vector, 
min_algor=min_algor, min_options=min_options, func_tol=func_tol, 
grad_tol=grad_tol, maxiter=max_iterations, A=A, b=b, spin_id=spin_id, 
sim_index=sim_index, full_output=True, print_flag=verbosity)
 
                 # Set up the model-free memo and add it to the processor 
queue.
                 memo = MF_memo(model_free=self, spin=spin, 
sim_index=sim_index, model_type=model_type, scaling=scaling, 
scaling_matrix=scaling_matrix)

Modified: 
branches/multi_processor_merge/specific_fns/model_free/multi_processor_commands.py
URL: 
http://svn.gna.org/viewcvs/relax/branches/multi_processor_merge/specific_fns/model_free/multi_processor_commands.py?rev=10133&r1=10132&r2=10133&view=diff
==============================================================================
--- 
branches/multi_processor_merge/specific_fns/model_free/multi_processor_commands.py
 (original)
+++ 
branches/multi_processor_merge/specific_fns/model_free/multi_processor_commands.py
 Thu Jan  7 15:58:38 2010
@@ -33,19 +33,6 @@
 from maths_fns.mf import Mf
 from minfx.generic import generic_minimise
 from multi.processor import Capturing_exception, Memo, Result_command, 
Result_string, Slave_command
-
-
-OFFSET_XK = 0      # The array of minimised parameter values
-OFFSET_FK = 1      # The minimised function value,
-OFFSET_K = 2       # The number of iterations,
-OFFSET_F_COUNT = 3 # The number of function calls,
-OFFSET_G_COUNT = 4 # The number of gradient calls,
-OFFSET_H_COUNT = 5 # The number of Hessian calls,
-OFFSET_WARNING = 6 # The warning string.
-
-OFFSET_SHORT_MIN_PARAMS = 0
-OFFSET_SHORT_FK = 1
-OFFSET_SHORT_K = 2
 
 
 class MF_grid_memo(Memo):
@@ -128,13 +115,14 @@
                     print(print_prefix + "Parameter values: " + 
repr(sgm.short_results))
                 print("")
 
-            m_f = sgm.model_free
-            m_f.iter_count = 0
-            m_f.f_count = 0
-            m_f.g_count = 0
-            m_f.h_count = 0
-            #raise Exception()
-            m_f.disassemble_result(param_vector=sgm.xk, func=sgm.fk, 
iter=sgm.k, fc=sgm.f_count, gc=sgm.g_count, hc=sgm.h_count, 
warning=sgm.warning, spin=sgm.spin, sim_index=sgm.sim_index, 
model_type=sgm.model_type, scaling=sgm.scaling, 
scaling_matrix=sgm.scaling_matrix)
+            # Initialise the iteration counter and function, gradient, and 
Hessian call counters.
+            sgm.model_free.iter_count = 0
+            sgm.model_free.f_count = 0
+            sgm.model_free.g_count = 0
+            sgm.model_free.h_count = 0
+
+            # Disassemble the results.
+            sgm.model_free._disassemble_result(param_vector=sgm.xk, 
func=sgm.fk, iter=sgm.k, fc=sgm.f_count, gc=sgm.g_count, hc=sgm.h_count, 
warning=sgm.warning, spin=sgm.spin, sim_index=sgm.sim_index, 
model_type=sgm.model_type, scaling=sgm.scaling, 
scaling_matrix=sgm.scaling_matrix)
 
 
 class MF_memo(Memo):
@@ -232,6 +220,9 @@
 
 
     def run(self, processor, completed):
+        """Execute the model-free optimisation."""
+
+        # Run catching all errors.
         try:
             self.pre_run(processor)
             self.pre_command_feed_back(processor)
@@ -239,6 +230,8 @@
             self.post_command_feedback(results, processor)
             self.process_results(results, processor, completed)
             self.post_run(processor)
+
+        # An error occurred.
         except Exception, e :
             if isinstance(e, Capturing_exception):
                 raise e
@@ -315,12 +308,22 @@
 
 
     def run(self, processor, memo):
-        m_f = memo.model_free
-        m_f.iter_count = 0
-        m_f.f_count = 0
-        m_f.g_count = 0
-        m_f.h_count = 0
-        m_f._disassemble_result(param_vector=self.param_vector, 
func=self.func, iter=self.iter, fc=self.fc, gc=self.gc, hc=self.hc, 
warning=self.warning, spin=memo.spin, sim_index=memo.sim_index, 
model_type=memo.model_type, scaling=memo.scaling, 
scaling_matrix=memo.scaling_matrix)
+        """Disassemble the model-free optimisation results.
+        
+        @param processor:   Unused!
+        @type processor:    None
+        @param memo:        The model-free memo.
+        @type memo:         memo
+        """
+
+        # Initialise the iteration counter and function, gradient, and 
Hessian call counters.
+        memo.model_free.iter_count = 0
+        memo.model_free.f_count = 0
+        memo.model_free.g_count = 0
+        memo.model_free.h_count = 0
+
+        # Disassemble the results.
+        memo.model_free._disassemble_result(param_vector=self.param_vector, 
func=self.func, iter=self.iter, fc=self.fc, gc=self.gc, hc=self.hc, 
warning=self.warning, spin=memo.spin, sim_index=memo.sim_index, 
model_type=memo.model_type, scaling=memo.scaling, 
scaling_matrix=memo.scaling_matrix)
 
 
 class MF_super_grid_memo(MF_memo):
@@ -337,7 +340,6 @@
         self.grid_size = grid_size
         # aggregated results
         #             min_params, f_min, k
-        short_result = [None, None, 0]
         self.xk = None
         self.fk = 1e300
         self.k = 0
@@ -349,20 +351,29 @@
 
 
     def add_result(self, sub_memo, results):
+
+        # Normal minimisation.
         if self.full_output:
-            if results[OFFSET_FK] < self.fk:
-                self.xk = results[OFFSET_XK]
-                self.fk = results[OFFSET_FK]
-            self.k += results[OFFSET_K]
-            self.f_count += results[OFFSET_F_COUNT]
-
-            self.g_count += results[OFFSET_G_COUNT]
-            self.h_count += results[OFFSET_H_COUNT]
-            if results[OFFSET_WARNING] != None:
-                self.warning.append(results[OFFSET_WARNING])
-
+            # Unpack the results.
+            param_vector, func, iter, fc, gc, hc, warning = results
+
+            if func < self.fk:
+                self.xk = param_vector
+                self.fk = func
+            self.k += iter
+            self.f_count += fc
+
+            self.g_count += gc
+            self.h_count += hc
+            if warning != None:
+                self.warning.append(warning)
+
+        # Grid search.
         #FIXME: TESTME: do we use short results?
         else:
+            # Unpack the results.
+            param_vector, func, iter, warning = results
+
             if results[OFFSET_SHORT_FK] < self.short_result[OFFSET_SHORT_FK]:
                 self.short_result[OFFSET_SHORT_MIN_PARAMS] = 
results[OFFSET_SHORT_MIN_PARAMS]
                 self.short_result[OFFSET_SHORT_FK] = results[OFFSET_SHORT_FK]




Related Messages


Powered by MHonArc, Updated Thu Jan 07 16:20:02 2010