Permalink
Browse files

parallel docs, tests, default config updated to newconfig

  • Loading branch information...
1 parent 4b57636 commit 37767329b56fe11e105c6bb0d303feb0511fa97f @minrk committed May 25, 2011
@@ -328,8 +328,8 @@ class ClusterApplication(BaseIPythonApplication):
The cluster directory is resolved as follows:
- * If the ``--cluster-dir`` option is given, it is used.
- * If ``--cluster-dir`` is not given, the application directory is
+ * If the ``cluster_dir`` option is given, it is used.
+ * If ``cluster_dir`` is not given, the application directory is
resolve using the profile name as ``cluster_<profile>``. The search
path for this directory is then i) cwd if it is found there
and ii) in ipython_dir otherwise.
@@ -46,8 +46,7 @@
default_config_file_name = u'ipcluster_config.py'
-_description = """\
-Start an IPython cluster for parallel computing.\n\n
+_description = """Start an IPython cluster for parallel computing.
An IPython cluster consists of 1 controller and 1 or more engines.
This command automates the startup of these processes using a wide
@@ -78,7 +77,8 @@
#-----------------------------------------------------------------------------
# Main application
#-----------------------------------------------------------------------------
-start_help = """
+start_help = """Start an IPython cluster for parallel computing
+
Start an ipython cluster by its profile name or cluster
directory. Cluster directories contain configuration, log and
security related files and are named using the convention
@@ -88,15 +88,17 @@
using its profile name, 'ipcluster start n=4 profile=<profile>`,
otherwise use the 'cluster_dir' option.
"""
-stop_help = """
+stop_help = """Stop a running IPython cluster
+
Stop a running ipython cluster by its profile name or cluster
directory. Cluster directories are named using the convention
'cluster_<profile>'. If your cluster directory is in
the cwd or the ipython directory, you can simply refer to it
using its profile name, 'ipcluster stop profile=<profile>`, otherwise
use the 'cluster_dir' option.
"""
-engines_help = """
+engines_help = """Start engines connected to an existing IPython cluster
+
Start one or more engines to connect to an existing Cluster
by profile name or cluster directory.
Cluster directories contain configuration, log and
@@ -107,7 +109,8 @@
using its profile name, 'ipcluster engines n=4 profile=<profile>`,
otherwise use the 'cluster_dir' option.
"""
-create_help = """
+create_help = """Create an ipcluster profile by name
+
Create an ipython cluster directory by its profile name or
cluster directory path. Cluster directories contain
configuration, log and security related files and are named
@@ -119,7 +122,9 @@
`ipcluster create profile=mycluster`, which will put the directory
in `<ipython_dir>/cluster_mycluster`.
"""
-list_help = """List all available clusters, by cluster directory, that can
+list_help = """List available cluster profiles
+
+List all available clusters, by cluster directory, that can
be found in the current working directly or in the ipython
directory. Cluster directories are named using the convention
'cluster_<profile>'.
@@ -79,8 +79,8 @@
clients. The controller needs to be started before the engines and can be
configured using command line options or using a cluster directory. Cluster
directories contain config, log and security files and are usually located in
-your ipython directory and named as "cluster_<profile>". See the --profile
-and --cluster-dir options for details.
+your ipython directory and named as "cluster_<profile>". See the `profile`
+and `cluster_dir` options for details.
"""
@@ -92,14 +92,16 @@
flags = {}
flags.update(base_flags)
flags.update({
- 'usethreads' : ( {'IPControllerApp' : {'usethreads' : True}},
+ 'usethreads' : ( {'IPControllerApp' : {'use_threads' : True}},
'Use threads instead of processes for the schedulers'),
- 'sqlitedb' : ({'HubFactory' : {'db_class' : 'IPython.parallel.controller.sqlitedb.SQLiteDB'}},
+ 'sqlitedb' : ({'HubFactory' : Config({'db_class' : 'IPython.parallel.controller.sqlitedb.SQLiteDB'})},
'use the SQLiteDB backend'),
- 'mongodb' : ({'HubFactory' : {'db_class' : 'IPython.parallel.controller.mongodb.MongoDB'}},
+ 'mongodb' : ({'HubFactory' : Config({'db_class' : 'IPython.parallel.controller.mongodb.MongoDB'})},
'use the MongoDB backend'),
- 'dictdb' : ({'HubFactory' : {'db_class' : 'IPython.parallel.controller.dictdb.DictDB'}},
+ 'dictdb' : ({'HubFactory' : Config({'db_class' : 'IPython.parallel.controller.dictdb.DictDB'})},
'use the in-memory DictDB backend'),
+ 'reuse' : ({'IPControllerApp' : Config({'reuse_files' : True})},
+ 'reuse existing json connection files')
})
flags.update()
@@ -133,15 +135,15 @@ class IPControllerApp(ClusterApplication):
help="import statements to be run at startup. Necessary in some environments"
)
- usethreads = Bool(False, config=True,
+ use_threads = Bool(False, config=True,
help='Use threads instead of processes for the schedulers',
)
# internal
children = List()
mq_class = Unicode('zmq.devices.ProcessMonitoredQueue')
- def _usethreads_changed(self, name, old, new):
+ def _use_threads_changed(self, name, old, new):
self.mq_class = 'zmq.devices.%sMonitoredQueue'%('Thread' if new else 'Process')
aliases = Dict(dict(
@@ -152,7 +154,7 @@ def _usethreads_changed(self, name, old, new):
reuse_files = 'IPControllerApp.reuse_files',
secure = 'IPControllerApp.secure',
ssh = 'IPControllerApp.ssh_server',
- usethreads = 'IPControllerApp.usethreads',
+ use_threads = 'IPControllerApp.use_threads',
import_statements = 'IPControllerApp.import_statements',
location = 'IPControllerApp.location',
@@ -271,7 +273,7 @@ def init_schedulers(self):
mq = import_item(str(self.mq_class))
hub = self.factory
- # maybe_inproc = 'inproc://monitor' if self.usethreads else self.monitor_url
+ # maybe_inproc = 'inproc://monitor' if self.use_threads else self.monitor_url
# IOPub relay (in a Process)
q = mq(zmq.PUB, zmq.SUB, zmq.PUB, 'N/A','iopub')
q.bind_in(hub.client_info['iopub'])
@@ -46,7 +46,7 @@
#: The default config file name for this application
default_config_file_name = u'ipengine_config.py'
-_description = """Start an IPython engine for parallel computing.\n\n
+_description = """Start an IPython engine for parallel computing.
IPython engines run in parallel and perform computations on behalf of a client
and controller. A controller needs to be started before the engines. The
@@ -36,14 +36,14 @@
#: The default config file name for this application
default_config_file_name = u'iplogger_config.py'
-_description = """Start an IPython logger for parallel computing.\n\n
+_description = """Start an IPython logger for parallel computing.
IPython controllers and engines (and your own processes) can broadcast log messages
by registering a `zmq.log.handlers.PUBHandler` with the `logging` module. The
logger can be configured using command line options or using a cluster
directory. Cluster directories contain config, log and security files and are
usually located in your ipython directory and named as "cluster_<profile>".
-See the --profile and --cluster-dir options for details.
+See the `profile` and `cluster_dir` options for details.
"""
@@ -141,7 +141,6 @@ def complete_registration(self, msg):
self.kernel.start()
hb_addrs = [ disambiguate_url(addr, self.location) for addr in hb_addrs ]
heart = Heart(*map(str, hb_addrs), heart_id=identity)
- # ioloop.DelayedCallback(heart.start, 1000, self.loop).start()
heart.start()
@@ -48,7 +48,7 @@ def start(self):
def setup():
cp = TestProcessLauncher()
cp.cmd_and_args = ipcontroller_cmd_argv + \
- ['--profile', 'iptest', '--log-level', '99', '-r']
+ ['profile=iptest', 'log_level=50', '--reuse']
cp.start()
launchers.append(cp)
cluster_dir = os.path.join(get_ipython_dir(), 'cluster_iptest')
@@ -70,7 +70,7 @@ def add_engines(n=1, profile='iptest'):
eps = []
for i in range(n):
ep = TestProcessLauncher()
- ep.cmd_and_args = ipengine_cmd_argv + ['--profile', profile, '--log-level', '99']
+ ep.cmd_and_args = ipengine_cmd_argv + ['profile=%s'%profile, 'log_level=50']
ep.start()
launchers.append(ep)
eps.append(ep)
@@ -48,11 +48,11 @@ def test_args(self):
self.assertTrue(s.unpack is ss.default_unpacker)
self.assertEquals(s.username, os.environ.get('USER', 'username'))
- s = ss.StreamSession(username=None)
+ s = ss.StreamSession()
self.assertEquals(s.username, os.environ.get('USER', 'username'))
- self.assertRaises(TypeError, ss.StreamSession, packer='hi')
- self.assertRaises(TypeError, ss.StreamSession, unpacker='hi')
+ self.assertRaises(TypeError, ss.StreamSession, pack='hi')
+ self.assertRaises(TypeError, ss.StreamSession, unpack='hi')
u = str(uuid.uuid4())
s = ss.StreamSession(username='carrot', session=u)
self.assertEquals(s.session, u)
@@ -195,7 +195,7 @@ simply start a controller and engines on a single host using the
:command:`ipcluster` command. To start a controller and 4 engines on your
localhost, just do::
- $ ipcluster start -n 4
+ $ ipcluster start n=4
More details about starting the IPython controller and engines can be found
:ref:`here <parallel_process>`
@@ -53,11 +53,11 @@ these things to happen.
Automatic starting using :command:`mpiexec` and :command:`ipcluster`
--------------------------------------------------------------------
-The easiest approach is to use the `mpiexec` mode of :command:`ipcluster`,
+The easiest approach is to use the `MPIExec` Launchers in :command:`ipcluster`,
which will first start a controller and then a set of engines using
:command:`mpiexec`::
- $ ipcluster mpiexec -n 4
+ $ ipcluster start n=4 elauncher=MPIExecEngineSetLauncher
This approach is best as interrupting :command:`ipcluster` will automatically
stop and clean up the controller and engines.
@@ -68,14 +68,14 @@ Manual starting using :command:`mpiexec`
If you want to start the IPython engines using the :command:`mpiexec`, just
do::
- $ mpiexec -n 4 ipengine --mpi=mpi4py
+ $ mpiexec n=4 ipengine mpi=mpi4py
This requires that you already have a controller running and that the FURL
files for the engines are in place. We also have built in support for
PyTrilinos [PyTrilinos]_, which can be used (assuming is installed) by
starting the engines with::
- $ mpiexec -n 4 ipengine --mpi=pytrilinos
+ $ mpiexec n=4 ipengine mpi=pytrilinos
Automatic starting using PBS and :command:`ipcluster`
------------------------------------------------------
@@ -110,7 +110,7 @@ distributed array. Save the following text in a file called :file:`psum.py`:
Now, start an IPython cluster::
- $ ipcluster start -p mpi -n 4
+ $ ipcluster start profile=mpi n=4
.. note::
@@ -19,7 +19,7 @@ To follow along with this tutorial, you will need to start the IPython
controller and four IPython engines. The simplest way of doing this is to use
the :command:`ipcluster` command::
- $ ipcluster start -n 4
+ $ ipcluster start n=4
For more detailed information about starting the controller and engines, see
our :ref:`introduction <ip1par>` to using IPython for parallel computing.
Oops, something went wrong.

0 comments on commit 3776732

Please sign in to comment.