mailr10162 - /branches/multi_processor_merge/multi/mpi4py_processor.py


Others Months | Index by Date | Thread Index
>>   [Date Prev] [Date Next] [Thread Prev] [Thread Next]

Header


Content

Posted by edward on January 11, 2010 - 10:51:
Author: bugman
Date: Mon Jan 11 10:51:43 2010
New Revision: 10162

URL: http://svn.gna.org/viewcvs/relax?rev=10162&view=rev
Log:
Ported the multi.mpi4py_processor module to the new mpi4py interface.

The sending and receiving of objects is now performed differently.


Modified:
    branches/multi_processor_merge/multi/mpi4py_processor.py

Modified: branches/multi_processor_merge/multi/mpi4py_processor.py
URL: 
http://svn.gna.org/viewcvs/relax/branches/multi_processor_merge/multi/mpi4py_processor.py?rev=10162&r1=10161&r2=10162&view=diff
==============================================================================
--- branches/multi_processor_merge/multi/mpi4py_processor.py (original)
+++ branches/multi_processor_merge/multi/mpi4py_processor.py Mon Jan 11 
10:51:43 2010
@@ -1,6 +1,7 @@
 
###############################################################################
 #                                                                            
 #
 # Copyright (C) 2007 Gary S Thompson (https://gna.org/users/varioustoxins)   
 #
+# Copyright (C) 2010 Edward d'Auvergne                                       
 #
 #                                                                            
 #
 # This file is part of the program relax.                                    
 #
 #                                                                            
 #
@@ -61,14 +62,14 @@
 def broadcast_command(command):
     for i in range(1, MPI.COMM_WORLD.size):
         if i != 0:
-            MPI.COMM_WORLD.Send(buf=command, dest=i)
+            MPI.COMM_WORLD.send(obj=command, dest=i)
 
 
 def ditch_all_results():
     for i in range(1, MPI.COMM_WORLD.size):
         if i != 0:
             while True:
-                result = MPI.COMM_WORLD.Recv(source=i)
+                result = MPI.COMM_WORLD.recv(source=i)
                 if result.completed:
                     break
 
@@ -105,6 +106,8 @@
     """The mpi4py multi-processor class."""
 
     def __init__(self, processor_size, callback):
+        """Initialise the mpi4py processor."""
+
         mpi_processor_size = MPI.COMM_WORLD.size-1
 
         if processor_size == -1:
@@ -148,11 +151,11 @@
 
 
     def master_queue_command(self, command, dest):
-        MPI.COMM_WORLD.Send(buf=command, dest=dest)
+        MPI.COMM_WORLD.send(obj=command, dest=dest)
 
 
     def master_recieve_result(self):
-        return MPI.COMM_WORLD.Recv(source=MPI.ANY_SOURCE)
+        return MPI.COMM_WORLD.recv(source=MPI.ANY_SOURCE)
 
 
     def rank(self):
@@ -160,7 +163,7 @@
 
 
     def return_result_command(self, result_object):
-        MPI.COMM_WORLD.Send(buf=result_object, dest=0)
+        MPI.COMM_WORLD.send(obj=result_object, dest=0)
 
 
     def run(self):
@@ -171,4 +174,4 @@
 
 
     def slave_recieve_commands(self):
-        return MPI.COMM_WORLD.Recv(source=0)
+        return MPI.COMM_WORLD.recv(source=0)




Related Messages


Powered by MHonArc, Updated Mon Jan 11 11:00:02 2010