Author: varioustoxins Date: Fri Mar 30 09:00:39 2007 New Revision: 3249 URL: http://svn.gna.org/viewcvs/relax?rev=3249&view=rev Log: more work to remove specious calls to module mpi4py when starting uniprocessor version feedback prints correctly Modified: branches/multi_processor/multi/commands.py branches/multi_processor/multi/mpi4py_processor.py branches/multi_processor/multi/uni_processor.py branches/multi_processor/specific_fns/model_free.py Modified: branches/multi_processor/multi/commands.py URL: http://svn.gna.org/viewcvs/relax/branches/multi_processor/multi/commands.py?rev=3249&r1=3248&r2=3249&view=diff ============================================================================== --- branches/multi_processor/multi/commands.py (original) +++ branches/multi_processor/multi/commands.py Fri Mar 30 09:00:39 2007 @@ -1,6 +1,7 @@ from multi.processor import Memo,Slave_command from multi.processor import Result_command,Result_string,NULL_RESULT +from re import match from maths_fns.mf import Mf from minimise.generic import generic_minimise @@ -24,7 +25,7 @@ result = Result_string(msg,True) processor.return_object(result) -#not quit a momento so a memo +#not quite a momento so a memo class MF_completion_memo(Memo): def __init__(self,model_free,index,sim_index,run,param_set,scaling): self.index = index @@ -78,19 +79,27 @@ 'csa':None, 'num_frq':0, 'frq':None, 'num_ri':None, 'remap_table':None, 'noe_r1_table':None, 'ri_labels':None, 'gx':0, 'gh':0, 'g_ratio':0, 'h_bar':0, 'mu0':0, 'num_params':None, 'vectors':None} - + self.info_map={'res_id':None,'grid_size':1} #FIXME: bad names def set_mf(self, **kwargs): self.mf_map.update(**kwargs) def set_minimise(self,**kwargs): + print kwargs + if 'res_id' in kwargs: + self.info_map['res_id']= kwargs['res_id'] + del kwargs['res_id'] + if 'grid_size' in kwargs: + self.info_map['grid_size']= kwargs['grid_size'] + del kwargs['grid_size'] self.minimise_map.update(**kwargs) def build_mf(self): return Mf(**self.mf_map) def do_minimise(self,memo): + self.mf = self.build_mf() results = generic_minimise(func=self.mf.func, dfunc=self.mf.dfunc, d2func=self.mf.d2func, **self.minimise_map) @@ -100,8 +109,29 @@ gc=gc,hc=hc, warning=warning, run=memo.run, index=memo.index,sim_index=memo.sim_index, param_set=memo.param_set,scaling=memo.scaling) + + def do_feedback(self): + # Print out. + #print_flag,param_set,residue_num,residue_name,min_algor,grid_size=None + m_m=self.minimise_map + m_f=self.mf_map + i_m=self.info_map + if m_m['print_flag'] >= 1: + # Individual residue stuff. + if m_f['param_set'] == 'mf' or m_f['param_set'] == 'local_tm': + if m_m['print_flag'] >= 2: + print "\n\n" + string = "Fitting to residue: " + i_m['res_id'] + print "\n\n" + string + print len(string) * '~' + if match('^[Gg]rid', m_m['min_algor']): + print "Unconstrained grid search size: " + `i_m['grid_size']` + " (constraints may decrease this size).\n" + def run(self,processor): self.mf = self.build_mf() + + + self.do_feedback() results = generic_minimise(func=self.mf.func, dfunc=self.mf.dfunc, d2func=self.mf.d2func, **self.minimise_map) param_vector, func, iter, fc, gc, hc, warning = results Modified: branches/multi_processor/multi/mpi4py_processor.py URL: http://svn.gna.org/viewcvs/relax/branches/multi_processor/multi/mpi4py_processor.py?rev=3249&r1=3248&r2=3249&view=diff ============================================================================== --- branches/multi_processor/multi/mpi4py_processor.py (original) +++ branches/multi_processor/multi/mpi4py_processor.py Fri Mar 30 09:00:39 2007 @@ -5,20 +5,31 @@ import os import math import time,datetime +import textwrap from multi.processor import Memo,Slave_command from multi.processor import Result,Result_command,Result_string from multi.commands import Exit_command -#FIXME: me move top generic command module + # load mpi try: from mpi4py import MPI except ImportError: - raise Exception('test') - sys.stderr.write("The dependency 'mpi4py' has not been installed.\n") + msg = '''The dependency 'mpi4py' has not been installed. You should either + + 1. run without multiprocessor support i.e. remove the + --multi mpi4py flag from the command line + + 2. install mpi4py + + 3. choose another multi processor method to give to the + --multi command line flag\n''' + msg=textwrap.dedent(msg) + sys.stderr.write(msg) + sys.stderr.write('exiting...\n\n') sys.exit() # save original sys.exit to call after wrapper @@ -106,11 +117,6 @@ MPI.COMM_WORLD.Send(buf=result, dest=0) -# def process_commands(self,commands): -# self.assert_on_master() -# -# for i in range(1,MPI.size): -# MPI.COMM_WORLD.Send(buf=command,dest=i) def run_command_globally(self,command): queue = [command for i in range(1,MPI.size)] @@ -119,16 +125,6 @@ def run_command_queue(self,queue): self.assert_on_master() -# for i in range(1,MPI.size): -# MPI.COMM_WORLD.Send(buf=command,dest=i) -# for i in range(1,MPI.size): -# elem = MPI.COMM_WORLD.Recv(source=i) -# if type(elem) == 'object': -# elem.run(relax_instance, relax_instance.processor) -# else: -# #FIXME can't cope with multiple lines -# print i,elem - #queue = [command for i in range(1,MPI.size*2)] running_set=set() idle_set=set([i for i in range(1,MPI.size)]) @@ -172,27 +168,9 @@ raise Exception(message) -# for i in range(MPI.size): -# buf=[] -# if i !=0: -# print 'try',i -# MPI.COMM_WORLD.Recv(buf=buf, source=i) -# for i,elem in enumerate(buf): -# if elem.type!='object': -# print i,elem -# else: -# elem.run() def run(self): -# if MPI.rank == 0: -# self.relax_instance.multi_mode='multi_master' -# else: -# self.relax_instance.multi_mode='multi_slave' -# self.relax_instance.mode='slave' -# self.relax_instance.script_file=None -# self.relax_instance.dummy_mode=True -# #self.relax_instance.run() if MPI.rank ==0: @@ -201,12 +179,12 @@ end_time = time.time() time_diff= end_time - start_time time_delta = datetime.timedelta(seconds=time_diff) - sys.stderr.write('overall runtime: ' + time_delta.__str__() + '\n') - sys.stderr.flush() - # note this a mdofied exit that kills all MPI processors + print 'overall runtime: ' + time_delta.__str__() + '\n' + + # note this a modified exit that kills all MPI processors sys.exit() else: - #self.relax_instance.run(deamon=True) + while not self.do_quit: command = MPI.COMM_WORLD.Recv(source=0) try: @@ -216,9 +194,7 @@ - #if data=='close': - # exit_mpi() - # return + Modified: branches/multi_processor/multi/uni_processor.py URL: http://svn.gna.org/viewcvs/relax/branches/multi_processor/multi/uni_processor.py?rev=3249&r1=3248&r2=3249&view=diff ============================================================================== --- branches/multi_processor/multi/uni_processor.py (original) +++ branches/multi_processor/multi/uni_processor.py Fri Mar 30 09:00:39 2007 @@ -44,8 +44,8 @@ end_time = time.clock() time_diff= end_time - start_time time_delta = datetime.timedelta(seconds=time_diff) - sys.stderr.write('overall runtime: ' + time_delta.__str__() + '\n') - sys.stderr.flush() + print 'overall runtime: ' + time_delta.__str__() + '\n' + Modified: branches/multi_processor/specific_fns/model_free.py URL: http://svn.gna.org/viewcvs/relax/branches/multi_processor/specific_fns/model_free.py?rev=3249&r1=3248&r2=3249&view=diff ============================================================================== --- branches/multi_processor/specific_fns/model_free.py (original) +++ branches/multi_processor/specific_fns/model_free.py Fri Mar 30 09:00:39 2007 @@ -2168,17 +2168,17 @@ if constraints: A, b = self.linear_constraints(index=index) - # Print out. - if self.print_flag >= 1: - # Individual residue stuff. - if self.param_set == 'mf' or self.param_set == 'local_tm': - if self.print_flag >= 2: - print "\n\n" - string = "Fitting to residue: " + `self.relax.data.res[self.run][index].num` + " " + self.relax.data.res[self.run][index].name - print "\n\n" + string - print len(string) * '~' - if match('^[Gg]rid', min_algor): - print "Unconstrained grid search size: " + `self.grid_size` + " (constraints may decrease this size).\n" +# # Print out. +# if self.print_flag >= 1: +# # Individual residue stuff. +# if self.param_set == 'mf' or self.param_set == 'local_tm': +# if self.print_flag >= 2: +# print "\n\n" +# string = "Fitting to residue: " + `self.relax.data.res[self.run][index].num` + " " + self.relax.data.res[self.run][index].name +# print "\n\n" + string +# print len(string) * '~' +# if match('^[Gg]rid', min_algor): +# print "Unconstrained grid search size: " + `self.grid_size` + " (constraints may decrease this size).\n" # Initialise the iteration counter and function, gradient, and Hessian call counters. #FIXME: move to processor command @@ -2329,73 +2329,6 @@ -# # Initialise the function to minimise. -# ###################################### -# print 'initialise Mf' -# print 'init_params',self.param_vector -# print 'param_se',self.param_set -# print 'diff_type',diff_type -# print 'diff_params',diff_params -# print 'scaling_matrix',self.scaling_matrix -# print 'num_res',num_res -# print 'equations',equations -# print 'param_types',param_types -# print 'param_values',param_values -# print 'relax_data',relax_data -# print 'errors',relax_error -# print 'bond_length',r -# print 'csa=',csa -# print 'num_frq',num_frq -# print 'frq',frq -# print 'num_ri',num_ri -# print 'remap_table',remap_table -# print 'noe_r1_table',noe_r1_table -# print 'ri_labels',ri_labels -# print 'gx',self.relax.data.gx -# print 'gh',self.relax.data.gh -# print 'g_ratio',self.relax.data.g_ratio -# print 'h_bar',self.relax.data.h_bar -# print 'mu0',self.relax.data.mu0 -# print 'num_params',num_params -# print 'vectors',xh_unit_vectors -# -# data_list = [ 'initialise Mf', -# 'init_params',self.param_vector, -# 'param_set',self.param_set, -# 'diff_type',diff_type, -# 'diff_params',diff_params, -# 'scaling_matrix',self.scaling_matrix, -# 'num_res',num_res, -# 'equations',equations, -# 'param_types',param_types, -# 'param_values',param_values, -# 'relax_data',relax_data, -# 'errors',relax_error, -# 'bond_length',r, -# 'csa=',csa, -# 'num_frq',num_frq, -# 'frq',frq, -# 'num_ri',num_ri, -# 'remap_table',remap_table, -# 'noe_r1_table',noe_r1_table, -# 'ri_labels',ri_labels, -# 'gx',self.relax.data.gx, -# 'gh',self.relax.data.gh,\ -# 'g_ratio',self.relax.data.g_ratio, -# 'h_bar',self.relax.data.h_bar, -# 'mu0',self.relax.data.mu0, -# 'num_params',num_params, -# 'vectors',xh_unit_vectors] -# for elem in data_list: -# marshal.loads(marshal.dumps(elem)) -# self.mf = Mf(init_params=self.param_vector, param_set=self.param_set, diff_type=diff_type, -# diff_params=diff_params, scaling_matrix=self.scaling_matrix, num_res=num_res, -# equations=equations, param_types=param_types, param_values=param_values, -# relax_data=relax_data, errors=relax_error, bond_length=r, csa=csa, num_frq=num_frq, -# frq=frq, num_ri=num_ri, remap_table=remap_table, noe_r1_table=noe_r1_table, -# ri_labels=ri_labels, gx=self.relax.data.gx, gh=self.relax.data.gh, -# g_ratio=self.relax.data.g_ratio, h_bar=self.relax.data.h_bar, -# mu0=self.relax.data.mu0, num_params=num_params, vectors=xh_unit_vectors) command=MF_minimise_command() command.set_mf(init_params=self.param_vector, param_set=self.param_set, diff_type=diff_type, @@ -2446,34 +2379,25 @@ # Minimisation. ############### - #FIXME remove old version -# if constraints: -# results = generic_minimise(func=self.mf.func, dfunc=self.mf.dfunc, d2func=self.mf.d2func, args=(), x0=self.param_vector, min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, maxiter=max_iterations, A=A, b=b, full_output=1, print_flag=print_flag) -# else: -# results = generic_minimise(func=self.mf.func, dfunc=self.mf.dfunc, d2func=self.mf.d2func, args=(), x0=self.param_vector, min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, maxiter=max_iterations, full_output=1, print_flag=print_flag) -# if results == None: -# return #FIXME??? strange contraints + #residue_num,residue_name,min_algor,grid_size=None + res_num = self.relax.data.res[self.run][index].num + res_name = self.relax.data.res[self.run][index].name + res_id = `res_num` + ' ' + res_name + if constraints: command.set_minimise(args=(), x0=self.param_vector, min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, maxiter=max_iterations, A=A, b=b, full_output=1, - print_flag=print_flag) + print_flag=print_flag,res_id=res_id,grid_size=self.grid_size) else: command.set_minimise(args=(), x0=self.param_vector, min_algor=min_algor, min_options=min_options, func_tol=func_tol, grad_tol=grad_tol, maxiter=max_iterations, full_output=1, - print_flag=print_flag) + print_flag=print_flag,res_id=res_id,grid_size=self.grid_size) memo = MF_completion_memo(model_free=self,index=index,sim_index=sim_index,run=self.run,param_set=self.param_set,scaling=scaling) self.relax.processor.add_to_queue(command,memo) - #self.relax.processor.add_to_queue() - - #command.do_minimise(memo) - #command.memo_id - - #param_vector, func, iter, fc, gc, hc, warning = command.results - #self.disassemble_result(param_vector=param_vector,func=func,iter=iter,fc=fc,gc=gc,hc=hc,warning=warning, - # run=memo.run,index=memo.index,sim_index=memo.sim_index, param_set=memo.param_set,scaling=memo.scaling) + self.relax.processor.run_queue() @@ -2483,12 +2407,7 @@ self.warning=warning self.param_vector=param_vector - #FIXME something is resetting the count between each calculation! -# self.iter_count = iter -# self.f_count = fc -# self.g_count = gc -# self.h_count = hc - + #FIXME this is a fix for old code self.iter_count = self.iter_count + iter self.f_count = self.f_count + fc self.g_count = self.g_count + gc