mailr15412 - in /1.3: multi/ specific_fns/model_free/


Others Months | Index by Date | Thread Index
>>   [Date Prev] [Date Next] [Thread Prev] [Thread Next]

Header


Content

Posted by edward on February 29, 2012 - 16:16:
Author: bugman
Date: Wed Feb 29 16:16:59 2012
New Revision: 15412

URL: http://svn.gna.org/viewcvs/relax?rev=15412&view=rev
Log:
Simplification and abstraction of the Slave_command.run() method.

The Processor now calls the _run() method of the Slave_command base class.  
This is a wrapper for
run() which performs the exception handling.  Therefore the program code, 
such as
MF_minimise_command, is no longer required to handle the multi-processor 
specific error handling.


Modified:
    1.3/multi/api.py
    1.3/multi/multi_processor_base.py
    1.3/multi/uni_processor.py
    1.3/specific_fns/model_free/multi_processor_commands.py

Modified: 1.3/multi/api.py
URL: 
http://svn.gna.org/viewcvs/relax/1.3/multi/api.py?rev=15412&r1=15411&r2=15412&view=diff
==============================================================================
--- 1.3/multi/api.py (original)
+++ 1.3/multi/api.py Wed Feb 29 16:16:59 2012
@@ -33,6 +33,7 @@
 
 # relax module imports.
 from multi.processor_io import Redirect_text
+from multi.misc import raise_unimplemented
 
 
 class Capturing_exception(Exception):
@@ -339,6 +340,26 @@
         self.memo_id = None
 
 
+    def _run(self, processor, completed):
+        """
+        @param processor:   The slave processor the command is running on.  
Results from the command are returned via calls to processor.return_object.
+        @type processor:    Processor instance
+        @param completed:   The flag used in batching result returns to 
indicate that the sequence of batched result commands has completed. This 
value should be returned via the last result object retuned by this method or 
methods it calls. All other Result_commands should be initialised with 
completed=False. This is an optimisation to prevent the sending an extra 
batched result queue completion result command being sent, it may be an over 
early optimisation.
+        @type completed:    bool
+        """
+
+        # Execute the user supplied run() method, catching all errors.
+        try:
+            self.run(processor)
+
+        # An error occurred.
+        except Exception, e :
+            if isinstance(e, Capturing_exception):
+                raise e
+            else:
+                raise Capturing_exception(rank=processor.rank(), 
name=processor.get_name())
+
+
     def run(self, processor, completed):
         '''Run the slave command on the slave processor.
 
@@ -347,20 +368,10 @@
         using the return_object method of the processor passed to the 
command. Any exceptions raised
         will be caught wrapped and returned to the master processor by the 
slave processor.
 
-        @param processor:   The slave processor the command is running on.  
Results from the command
-                            are returned via calls to 
processor.return_object.
-        @type processor:    Processor instance
-        @param completed:   The flag used in batching result returns to 
indicate that the sequence
-                            of batched result commands has completed. This 
value should be returned
-                            via the last result object retuned by this 
method or methods it calls.
-                            All other Result_commands should be initialised 
with completed=False.
-                            This is an optimisation to prevent the sending 
an extra batched result
-                            queue completion result command being sent, it 
may be an over early
-                            optimisation.
-        @type completed:    bool
-        '''
-
-        pass
+        '''
+
+        # This must be overridden!
+        raise_unimplemented(self.run)
 
 
     def set_memo_id(self, memo):

Modified: 1.3/multi/multi_processor_base.py
URL: 
http://svn.gna.org/viewcvs/relax/1.3/multi/multi_processor_base.py?rev=15412&r1=15411&r2=15412&view=diff
==============================================================================
--- 1.3/multi/multi_processor_base.py (original)
+++ 1.3/multi/multi_processor_base.py Wed Feb 29 16:16:59 2012
@@ -217,9 +217,9 @@
                         # Capture the standard IO streams for the slaves.
                         self.stdio_capture()
 
-                        # Execute the calculation.
+                        # Execute the calculation on the slave.
                         completed = (i == last_command)
-                        command.run(self, completed)
+                        command._run(self, completed)
 
                         # Restore the IO.
                         self.stdio_restore()

Modified: 1.3/multi/uni_processor.py
URL: 
http://svn.gna.org/viewcvs/relax/1.3/multi/uni_processor.py?rev=15412&r1=15411&r2=15412&view=diff
==============================================================================
--- 1.3/multi/uni_processor.py (original)
+++ 1.3/multi/uni_processor.py Wed Feb 29 16:16:59 2012
@@ -143,9 +143,9 @@
 
         last_command = len(self.command_queue)-1
         for i, command  in enumerate(self.command_queue):
+            # Execute the calculation on the slave.
             completed = (i == last_command)
-
-            command.run(self, completed)
+            command._run(self, completed)
 
         #self.run_command_queue()
         #TODO: add cheques for empty queues and maps if now warn

Modified: 1.3/specific_fns/model_free/multi_processor_commands.py
URL: 
http://svn.gna.org/viewcvs/relax/1.3/specific_fns/model_free/multi_processor_commands.py?rev=15412&r1=15411&r2=15412&view=diff
==============================================================================
--- 1.3/specific_fns/model_free/multi_processor_commands.py (original)
+++ 1.3/specific_fns/model_free/multi_processor_commands.py Wed Feb 29 
16:16:59 2012
@@ -117,32 +117,24 @@
         return results
 
 
-    def run(self, processor, completed):
+    def run(self, processor):
         """Setup and perform the model-free optimisation."""
 
-        # Run catching all errors.
-        try:
-            # Initialise the function to minimise.
-            self.mf = Mf(init_params=self.opt_params.param_vector, 
model_type=self.data.model_type, diff_type=self.data.diff_type, 
diff_params=self.data.diff_params, scaling_matrix=self.data.scaling_matrix, 
num_spins=self.data.num_spins, equations=self.data.equations, 
param_types=self.data.param_types, param_values=self.data.param_values, 
relax_data=self.data.ri_data, errors=self.data.ri_data_err, 
bond_length=self.data.r, csa=self.data.csa, num_frq=self.data.num_frq, 
frq=self.data.frq, num_ri=self.data.num_ri, 
remap_table=self.data.remap_table, noe_r1_table=self.data.noe_r1_table, 
ri_labels=self.data.ri_types, gx=self.data.gx, gh=self.data.gh, 
h_bar=self.data.h_bar, mu0=self.data.mu0, num_params=self.data.num_params, 
vectors=self.data.xh_unit_vectors)
-
-            # Print out.
-            if self.opt_params.verbosity >= 1 and (self.data.model_type == 
'mf' or self.data.model_type == 'local_tm'):
-                spin_print(self.data.spin_id, self.opt_params.verbosity)
-
-            # Preform optimisation.
-            results = self.optimise()
-
-            # Disassemble the results list.
-            param_vector, func, iter, fc, gc, hc, warning = results
-
-            processor.return_object(MF_result_command(processor, 
self.memo_id, param_vector, func, iter, fc, gc, hc, warning, completed=False))
-
-        # An error occurred.
-        except Exception, e :
-            if isinstance(e, Capturing_exception):
-                raise e
-            else:
-                raise Capturing_exception(rank=processor.rank(), 
name=processor.get_name())
+        # Initialise the function to minimise.
+        self.mf = Mf(init_params=self.opt_params.param_vector, 
model_type=self.data.model_type, diff_type=self.data.diff_type, 
diff_params=self.data.diff_params, scaling_matrix=self.data.scaling_matrix, 
num_spins=self.data.num_spins, equations=self.data.equations, 
param_types=self.data.param_types, param_values=self.data.param_values, 
relax_data=self.data.ri_data, errors=self.data.ri_data_err, 
bond_length=self.data.r, csa=self.data.csa, num_frq=self.data.num_frq, 
frq=self.data.frq, num_ri=self.data.num_ri, 
remap_table=self.data.remap_table, noe_r1_table=self.data.noe_r1_table, 
ri_labels=self.data.ri_types, gx=self.data.gx, gh=self.data.gh, 
h_bar=self.data.h_bar, mu0=self.data.mu0, num_params=self.data.num_params, 
vectors=self.data.xh_unit_vectors)
+
+        # Print out.
+        if self.opt_params.verbosity >= 1 and (self.data.model_type == 'mf' 
or self.data.model_type == 'local_tm'):
+            spin_print(self.data.spin_id, self.opt_params.verbosity)
+
+        # Preform optimisation.
+        results = self.optimise()
+
+        # Disassemble the results list.
+        param_vector, func, iter, fc, gc, hc, warning = results
+
+        # Send everything back to the processor.
+        processor.return_object(MF_result_command(processor, self.memo_id, 
param_vector, func, iter, fc, gc, hc, warning, completed=False))
 
 
     def store_data(self, data, opt_params):




Related Messages


Powered by MHonArc, Updated Wed Feb 29 16:40:02 2012