Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -1 +1,7 @@
# PyPARIS

This package should be tested using the examples in:

https://github.com/PyCOMPLETE/PyPARIS_CoupledBunch_sim_class

https://github.com/PyCOMPLETE/PyPARIS_sim_class
4 changes: 2 additions & 2 deletions communication_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,13 +31,13 @@ def split_float_buffers(megabuffer):


def list_of_strings_2_buffer(strlist):
data = ''.join(map(lambda s:s+';', strlist))+'\n'
data = ''.join(map(lambda s:s+';', strlist))+'\nendbuf\n'
buf_to_send = np.atleast_1d(np.int_(np.array(map(ord, list(data)))))
return buf_to_send

def buffer_2_list_of_strings(buf):
str_received = ''.join(map(unichr, list(buf)))
strlist = list(map(str, str_received.split('\n')[0].split(';')))[:-1]
strlist = list(map(str, str_received.split('\nendbuf\n')[0].split(';')))[:-1]
return strlist


Expand Down
39 changes: 39 additions & 0 deletions gen_multibunch_beam.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import os

from scipy.constants import c as clight, e as qe
import numpy as np

Expand Down Expand Up @@ -55,3 +57,40 @@ def gen_matched_multibunch_beam(machine, n_macroparticles_per_bunch, filling_pat
bb.slice_info['i_turn'] = 0

return list_bunches


def load_multibunch_beam(dirname, reset_i_turns=True):
import PyPARIS.myfilemanager as mfm
import PyPARIS.communication_helpers as ch

print('Loading the beam from %s'%dirname)

bzero = ch.buffer_2_beam(mfm.dict_of_arrays_and_scalar_from_h5(
dirname+'/bunch0.h5')['bunchbuffer'])
N_bunches_tot_beam = bzero.slice_info['N_bunches_tot_beam']
list_bunches = [bzero]
for ibun in xrange(1, N_bunches_tot_beam):
list_bunches.append(ch.buffer_2_beam(
mfm.dict_of_arrays_and_scalar_from_h5(
dirname+'/bunch%d.h5'%ibun)['bunchbuffer']))

list_bunches = list_bunches[::-1] # We want the last bunch to be in pos 0
if reset_i_turns:
for bb in list_bunches:
bb.slice_info['i_turn'] = 0

return list_bunches


def save_bunch_to_folder(bunch, dirname):
import PyPARIS.myfilemanager as mfm
import PyPARIS.communication_helpers as ch
if not os.path.exists(dirname):
os.makedirs(dirname)
buf = ch.beam_2_buffer(bunch)
bpath = dirname+'/bunch%d.h5'%bunch.slice_info['i_bunch']
print('Saving: ' + bpath)
mfm.dict_to_h5(dict_save={'bunchbuffer':buf},
filename=bpath)


25 changes: 15 additions & 10 deletions myfilemanager.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,31 +59,36 @@ def monitorh5_to_dict(filename, key= 'Bunch'):
def monitorh5_to_obj(filename, key= 'Bunch'):
return obj_from_dict(monitorh5_to_dict(filename, key))

def monitorh5list_to_dict(filename_list, key='Bunch', permissive=False):
def monitorh5list_to_dict(filename_list, key='Bunch', flag_transpose=False, permissive=False):
monitor_dict = monitorh5_to_dict(filename_list[0], key=key)
for i_file in xrange(1,len(filename_list)):
for i_file in xrange(1, len(filename_list)):
print('Loading '+filename_list[i_file])
try:
monitor_dict_curr = monitorh5_to_dict(filename_list[i_file])
for kk in monitor_dict.keys():
monitor_dict[kk] = np.array(list(monitor_dict[kk])+list(monitor_dict_curr[kk]))
monitor_dict_curr = monitorh5_to_dict(filename_list[i_file], key=key)
if flag_transpose:
for kk in monitor_dict.keys():
monitor_dict[kk] = np.array(list(monitor_dict[kk].T)+list(monitor_dict_curr[kk].T)).T
else:
for kk in monitor_dict.keys():
monitor_dict[kk] = np.array(list(monitor_dict[kk])+list(monitor_dict_curr[kk]))
except IOError as err:
print('Got:')
print(err)
if not permissive:
raise err

return monitor_dict
return monitor_dict

def monitorh5list_to_obj(filename_list, key= 'Bunch', permissive=False):
return obj_from_dict(monitorh5list_to_dict(filename_list, key, permissive))
def monitorh5list_to_obj(filename_list, key= 'Bunch', flag_transpose=False, permissive=False):
return obj_from_dict(monitorh5list_to_dict(filename_list, key, flag_transpose, permissive))


def dict_to_h5(dict_save, filename):
def dict_to_h5(dict_save, filename, compression=None, compression_opts=None):
import h5py
with h5py.File(filename, 'w') as fid:
for kk in dict_save.keys():
fid[kk] = dict_save[kk]
fid.create_dataset(kk, data=dict_save[kk],
compression=compression, compression_opts=compression_opts)



Expand Down
78 changes: 46 additions & 32 deletions ring_of_CPUs_multiturn.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,9 @@ def __init__(self, sim_content, N_pieces_per_transfer=1, force_serial = False, c
N_buffer_float_size = 1000000, N_buffer_int_size = 100,
verbose = False, mpi_verbose = False, enable_barriers = False,
enable_orders_from_master = True):



self.iteration = -1
self.sim_content = sim_content
self.N_turns = sim_content.N_turns

Expand Down Expand Up @@ -145,7 +146,15 @@ def __init__(self, sim_content, N_pieces_per_transfer=1, force_serial = False, c
self.comm.Barrier()
self.verbose_mpi_out('After barrier 2 (cpu %d)'%self.comm.Get_rank())

self.sim_content.init_all()
if hasattr(self.sim_content, 'pre_init_master'):
if self.I_am_the_master:
from_master = self.sim_content.pre_init_master()
else:
from_master = None
from_master = self._broadcast_from_master(from_master)
self.sim_content.init_all(from_master)
else:
self.sim_content.init_all()

if self.enable_barriers:
self.verbose_mpi_out('At barrier 3 (cpu %d)'%self.comm.Get_rank())
Expand Down Expand Up @@ -195,7 +204,7 @@ def __init__(self, sim_content, N_pieces_per_transfer=1, force_serial = False, c
def run(self):


iteration = 0
self.iteration = 0
list_received_buffers = [self.sim_content.piece_to_buffer(None)]
while True:

Expand All @@ -219,7 +228,7 @@ def run(self):
if self.myring==0 and self.myid_in_ring == 0:
t_now = time.mktime(time.localtime())
print2logandstdo('%s, iter%03d - cpu %d.%d startin bunch %d/%d turn=%d'%(time.strftime("%d/%m/%Y %H:%M:%S", time.localtime(t_now)),
iteration, self.myring, self.myid_in_ring,
self.iteration, self.myring, self.myid_in_ring,
next_bunch.slice_info['i_bunch'], next_bunch.slice_info['N_bunches_tot_beam'], next_bunch.slice_info['i_turn']))


Expand Down Expand Up @@ -255,7 +264,7 @@ def run(self):
if thisslice is not None:
self.sim_content.treat_piece(thisslice)
t_end = time.mktime(time.localtime())
self._print_some_info_on_comm(thisslice, iteration, t_start, t_end)
self._print_some_info_on_comm(thisslice, t_start, t_end)


#########################
Expand Down Expand Up @@ -301,10 +310,10 @@ def run(self):
self.comm.Barrier()
self.verbose_mpi_out('After barrier L1 (cpu %d)'%self.comm.Get_rank())

self.verbose_mpi_out('At Sendrecv, cpu %d/%d, iter %d'%(self.myid, self.N_nodes, iteration))
self.verbose_mpi_out('At Sendrecv, cpu %d/%d, iter %d'%(self.myid, self.N_nodes, self.iteration))
self.comm.Sendrecv(sendbuf, dest=self.right, sendtag=self.right,
recvbuf=self.buf_float, source=self.left, recvtag=self.myid)
self.verbose_mpi_out('After Sendrecv, cpu %d/%d, iter %d'%(self.myid, self.N_nodes, iteration))
self.verbose_mpi_out('After Sendrecv, cpu %d/%d, iter %d'%(self.myid, self.N_nodes, self.iteration))

if self.enable_barriers:
self.verbose_mpi_out('At barrier L2 (cpu %d)'%self.comm.Get_rank())
Expand All @@ -324,26 +333,7 @@ def run(self):


if self.enable_orders_from_master:
if self.I_am_the_master:
# send orders
buforders = ch.list_of_strings_2_buffer(orders_from_master)
if len(buforders) > self.N_buffer_int_size:
raise ValueError('Int buffer is too small!')
self.buf_int = 0*self.buf_int
self.buf_int[:len(buforders)]=buforders

self.verbose_mpi_out('At Bcast, cpu %d/%d, iter %d'%(self.myid, self.N_nodes, iteration))
self.comm.Bcast(self.buf_int, self.master_id)
self.verbose_mpi_out('After Bcast, cpu %d/%d, iter %d'%(self.myid, self.N_nodes, iteration))

else:
# receive orders from the master
self.verbose_mpi_out('At Bcast, cpu %d/%d, iter %d'%(self.myid, self.N_nodes, iteration))
self.comm.Bcast(self.buf_int, self.master_id)
self.verbose_mpi_out('After Bcast, cpu %d/%d, iter %d'%(self.myid, self.N_nodes, iteration))

orders_from_master = ch.buffer_2_list_of_strings(self.buf_int)

orders_from_master = self._broadcast_from_master(orders_from_master)
# check if simulation has to be ended
if 'stop' in orders_from_master:
break
Expand All @@ -353,23 +343,47 @@ def run(self):
self.comm.Barrier()
self.verbose_mpi_out('After barrier L4 (cpu %d)'%self.comm.Get_rank())

iteration+=1
self.iteration+=1

# (TEMPORARY!) To stop
# if iteration==10000:
# if self.iteration==10000:
# break
# (TEMPORARY!)

def _print_some_info_on_comm(self, thisslice, iteration, t_start, t_end):
def _print_some_info_on_comm(self, thisslice, t_start, t_end):
if self.verbose:
if thisslice is not None:
print2logandstdo('Iter start on %s, iter%05d - I am %02d.%02d and I treated slice %d/%d of bunch %d/%d, lasts %ds'%(time.strftime("%d/%m/%Y %H:%M:%S", time.localtime(t_start)), iteration,
print2logandstdo('Iter start on %s, iter%05d - I am %02d.%02d and I treated slice %d/%d of bunch %d/%d, lasts %ds'%(time.strftime("%d/%m/%Y %H:%M:%S", time.localtime(t_start)), self.iteration,
self.myring, self.myid_in_ring,
thisslice.slice_info['i_slice'], thisslice.slice_info['N_slices_tot_bunch'],
thisslice.slice_info['info_parent_bunch']['i_bunch'],
thisslice.slice_info['info_parent_bunch']['N_bunches_tot_beam'],
t_end-t_start))
else:
print2logandstdo('Iter start on %s, iter%05d - I am %02d.%02d and I treated None, lasts %ds'%(time.strftime("%d/%m/%Y %H:%M:%S", time.localtime(t_start)), iteration,
print2logandstdo('Iter start on %s, iter%05d - I am %02d.%02d and I treated None, lasts %ds'%(time.strftime("%d/%m/%Y %H:%M:%S", time.localtime(t_start)), self.iteration,
self.myring, self.myid_in_ring,
t_end-t_start))


def _broadcast_from_master(self, list_of_strings):
if self.I_am_the_master:
# send orders
buforders = ch.list_of_strings_2_buffer(list_of_strings)
if len(buforders) > self.N_buffer_int_size:
raise ValueError('Int buffer is too small!')
self.buf_int = 0*self.buf_int
self.buf_int[:len(buforders)] = buforders

self.verbose_mpi_out('At Bcast, cpu %d/%d, iter %d'%(self.myid, self.N_nodes, self.iteration))
self.comm.Bcast(self.buf_int, self.master_id)
self.verbose_mpi_out('After Bcast, cpu %d/%d, iter %d'%(self.myid, self.N_nodes, self.iteration))

else:
# receive orders from the master
self.verbose_mpi_out('At Bcast, cpu %d/%d, iter %d'%(self.myid, self.N_nodes, self.iteration))
self.comm.Bcast(self.buf_int, self.master_id)
self.verbose_mpi_out('After Bcast, cpu %d/%d, iter %d'%(self.myid, self.N_nodes, self.iteration))

list_of_strings = ch.buffer_2_list_of_strings(self.buf_int)

return list_of_strings

This file was deleted.

This file was deleted.

Loading