Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

rebase IPython.parallel after removal of IPython.kernel

This commit removes all '*z' suffixes from scripts and docs,
as there is no longer conflict with IPython.kernel.
  • Loading branch information...
commit e950e624a7ac726812b0bd595432db4d62e2f974 1 parent 24641d1
@minrk minrk authored
Showing with 451 additions and 932 deletions.
  1. +90 −33 IPython/config/default/ipcluster_config.py
  2. +0 −241 IPython/config/default/ipclusterz_config.py
  3. +122 −78 IPython/config/default/ipcontroller_config.py
  4. +0 −180 IPython/config/default/ipcontrollerz_config.py
  5. +5 −10 IPython/config/default/ipengine_config.py
  6. +0 −85 IPython/config/default/ipenginez_config.py
  7. +6 −6 IPython/parallel/clusterdir.py
  8. +26 −26 IPython/parallel/ipclusterapp.py
  9. +3 −3 IPython/parallel/ipcontrollerapp.py
  10. +3 −3 IPython/parallel/ipengineapp.py
  11. +1 −1  IPython/parallel/iploggerapp.py
  12. +18 −18 IPython/parallel/launcher.py
  13. +1 −1  IPython/parallel/logwatcher.py
  14. 0  IPython/parallel/scripts/{ipclusterz → ipcluster}
  15. 0  IPython/parallel/scripts/{ipcontrollerz → ipcontroller}
  16. 0  IPython/parallel/scripts/{ipenginez → ipengine}
  17. 0  IPython/parallel/scripts/{iploggerz → iplogger}
  18. +2 −2 IPython/parallel/tests/__init__.py
  19. +1 −1  IPython/parallel/tests/test_newserialized.py
  20. +1 −0  IPython/testing/iptest.py
  21. +1 −2  docs/source/index.txt
  22. +31 −90 docs/source/install/install.txt
  23. 0  docs/source/{parallelz → parallel}/asian_call.pdf
  24. 0  docs/source/{parallelz → parallel}/asian_call.png
  25. 0  docs/source/{parallelz → parallel}/asian_put.pdf
  26. 0  docs/source/{parallelz → parallel}/asian_put.png
  27. 0  docs/source/{parallelz → parallel}/dag_dependencies.txt
  28. 0  docs/source/{parallelz → parallel}/dagdeps.pdf
  29. 0  docs/source/{parallelz → parallel}/dagdeps.png
  30. 0  docs/source/{parallelz → parallel}/hpc_job_manager.pdf
  31. 0  docs/source/{parallelz → parallel}/hpc_job_manager.png
  32. +15 −5 docs/source/parallel/index.txt
  33. 0  docs/source/{parallelz → parallel}/ipcluster_create.pdf
  34. 0  docs/source/{parallelz → parallel}/ipcluster_create.png
  35. 0  docs/source/{parallelz → parallel}/ipcluster_start.pdf
  36. 0  docs/source/{parallelz → parallel}/ipcluster_start.png
  37. 0  docs/source/{parallelz → parallel}/ipython_shell.pdf
  38. 0  docs/source/{parallelz → parallel}/ipython_shell.png
  39. 0  docs/source/{parallelz → parallel}/mec_simple.pdf
  40. 0  docs/source/{parallelz → parallel}/mec_simple.png
  41. +2 −2 docs/source/{parallelz → parallel}/parallel_demos.txt
  42. 0  docs/source/{parallelz → parallel}/parallel_details.txt
  43. +4 −4 docs/source/{parallelz → parallel}/parallel_intro.txt
  44. +10 −10 docs/source/{parallelz → parallel}/parallel_mpi.txt
  45. +3 −3 docs/source/{parallelz → parallel}/parallel_multiengine.txt
  46. 0  docs/source/{parallelz → parallel}/parallel_pi.pdf
  47. 0  docs/source/{parallelz → parallel}/parallel_pi.png
  48. +68 −68 docs/source/{parallelz → parallel}/parallel_process.txt
  49. +1 −1  docs/source/{parallelz → parallel}/parallel_security.txt
  50. +5 −5 docs/source/{parallelz → parallel}/parallel_task.txt
  51. 0  docs/source/{parallelz → parallel}/parallel_transition.txt
  52. +22 −22 docs/source/{parallelz → parallel}/parallel_winhpc.txt
  53. 0  docs/source/{parallelz → parallel}/simpledag.pdf
  54. 0  docs/source/{parallelz → parallel}/simpledag.png
  55. 0  docs/source/{parallelz → parallel}/single_digits.pdf
  56. 0  docs/source/{parallelz → parallel}/single_digits.png
  57. 0  docs/source/{parallelz → parallel}/two_digit_counts.pdf
  58. 0  docs/source/{parallelz → parallel}/two_digit_counts.png
  59. 0  docs/source/{parallelz → parallel}/winhpc_index.txt
  60. +0 −22 docs/source/parallelz/index.txt
  61. +5 −5 setup.py
  62. +5 −5 setupbase.py
View
123 IPython/config/default/ipcluster_config.py
@@ -11,8 +11,8 @@
# - Start as a regular process on localhost.
# - Start using mpiexec.
# - Start using the Windows HPC Server 2008 scheduler
-# - Start using PBS
-# - Start using SSH (currently broken)
+# - Start using PBS/SGE
+# - Start using SSH
# The selected launchers can be configured below.
@@ -21,15 +21,18 @@
# - LocalControllerLauncher
# - MPIExecControllerLauncher
# - PBSControllerLauncher
+# - SGEControllerLauncher
# - WindowsHPCControllerLauncher
-# c.Global.controller_launcher = 'IPython.kernel.launcher.LocalControllerLauncher'
+# c.Global.controller_launcher = 'IPython.parallel.launcher.LocalControllerLauncher'
+# c.Global.controller_launcher = 'IPython.parallel.launcher.PBSControllerLauncher'
# Options are:
# - LocalEngineSetLauncher
# - MPIExecEngineSetLauncher
# - PBSEngineSetLauncher
+# - SGEEngineSetLauncher
# - WindowsHPCEngineSetLauncher
-# c.Global.engine_launcher = 'IPython.kernel.launcher.LocalEngineSetLauncher'
+# c.Global.engine_launcher = 'IPython.parallel.launcher.LocalEngineSetLauncher'
#-----------------------------------------------------------------------------
# Global configuration
@@ -68,23 +71,23 @@
# MPIExec launchers
#-----------------------------------------------------------------------------
-# The mpiexec/mpirun command to use in started the controller.
-# c.MPIExecControllerLauncher.mpi_cmd = ['mpiexec']
+# The mpiexec/mpirun command to use in both the controller and engines.
+# c.MPIExecLauncher.mpi_cmd = ['mpiexec']
# Additional arguments to pass to the actual mpiexec command.
+# c.MPIExecLauncher.mpi_args = []
+
+# The mpiexec/mpirun command and args can be overridden if they should be different
+# for controller and engines.
+# c.MPIExecControllerLauncher.mpi_cmd = ['mpiexec']
# c.MPIExecControllerLauncher.mpi_args = []
+# c.MPIExecEngineSetLauncher.mpi_cmd = ['mpiexec']
+# c.MPIExecEngineSetLauncher.mpi_args = []
# The command line argument to call the controller with.
# c.MPIExecControllerLauncher.controller_args = \
# ['--log-to-file','--log-level', '40']
-
-# The mpiexec/mpirun command to use in started the controller.
-# c.MPIExecEngineSetLauncher.mpi_cmd = ['mpiexec']
-
-# Additional arguments to pass to the actual mpiexec command.
-# c.MPIExecEngineSetLauncher.mpi_args = []
-
# Command line argument passed to the engines.
# c.MPIExecEngineSetLauncher.engine_args = ['--log-to-file','--log-level', '40']
@@ -95,51 +98,105 @@
# SSH launchers
#-----------------------------------------------------------------------------
-# Todo
+# ipclusterz can be used to launch controller and engines remotely via ssh.
+# Note that currently ipclusterz does not do any file distribution, so if
+# machines are not on a shared filesystem, config and json files must be
+# distributed. For this reason, the reuse_files defaults to True on an
+# ssh-launched Controller. This flag can be overridded by the program_args
+# attribute of c.SSHControllerLauncher.
+
+# set the ssh cmd for launching remote commands. The default is ['ssh']
+# c.SSHLauncher.ssh_cmd = ['ssh']
+
+# set the ssh cmd for launching remote commands. The default is ['ssh']
+# c.SSHLauncher.ssh_args = ['tt']
+
+# Set the user and hostname for the controller
+# c.SSHControllerLauncher.hostname = 'controller.example.com'
+# c.SSHControllerLauncher.user = os.environ.get('USER','username')
+
+# Set the arguments to be passed to ipcontrollerz
+# note that remotely launched ipcontrollerz will not get the contents of
+# the local ipcontrollerz_config.py unless it resides on the *remote host*
+# in the location specified by the --cluster_dir argument.
+# c.SSHControllerLauncher.program_args = ['-r', '-ip', '0.0.0.0', '--cluster_dir', '/path/to/cd']
+
+# Set the default args passed to ipenginez for SSH launched engines
+# c.SSHEngineSetLauncher.engine_args = ['--mpi', 'mpi4py']
+# SSH engines are launched as a dict of locations/n-engines.
+# if a value is a tuple instead of an int, it is assumed to be of the form
+# (n, [args]), setting the arguments to passed to ipenginez on `host`.
+# otherwise, c.SSHEngineSetLauncher.engine_args will be used as the default.
+
+# In this case, there will be 3 engines at my.example.com, and
+# 2 at you@ipython.scipy.org with a special json connector location.
+# c.SSHEngineSetLauncher.engines = {'my.example.com' : 3,
+# 'you@ipython.scipy.org' : (2, ['-f', '/path/to/ipcontroller-engine.json']}
+# }
#-----------------------------------------------------------------------------
# Unix batch (PBS) schedulers launchers
#-----------------------------------------------------------------------------
+# SGE and PBS are very similar. All configurables in this section called 'PBS*'
+# also exist as 'SGE*'.
+
# The command line program to use to submit a PBS job.
-# c.PBSControllerLauncher.submit_command = 'qsub'
+# c.PBSLauncher.submit_command = ['qsub']
# The command line program to use to delete a PBS job.
-# c.PBSControllerLauncher.delete_command = 'qdel'
+# c.PBSLauncher.delete_command = ['qdel']
+
+# The PBS queue in which the job should run
+# c.PBSLauncher.queue = 'myqueue'
# A regular expression that takes the output of qsub and find the job id.
-# c.PBSControllerLauncher.job_id_regexp = r'\d+'
+# c.PBSLauncher.job_id_regexp = r'\d+'
+
+# If for some reason the Controller and Engines have different options above, they
+# can be set as c.PBSControllerLauncher.<option> etc.
+
+# PBS and SGE have default templates, but you can specify your own, either as strings
+# or from files, as described here:
# The batch submission script used to start the controller. This is where
-# environment variables would be setup, etc. This string is interpolated using
+# environment variables would be setup, etc. This string is interpreted using
# the Itpl module in IPython.external. Basically, you can use ${n} for the
# number of engine and ${cluster_dir} for the cluster_dir.
-# c.PBSControllerLauncher.batch_template = """"""
+# c.PBSControllerLauncher.batch_template = """
+# #PBS -N ipcontroller
+# #PBS -q $queue
+#
+# ipcontrollerz --cluster-dir $cluster_dir
+# """
+
+# You can also load this template from a file
+# c.PBSControllerLauncher.batch_template_file = u"/path/to/my/template.sh"
# The name of the instantiated batch script that will actually be used to
# submit the job. This will be written to the cluster directory.
-# c.PBSControllerLauncher.batch_file_name = u'pbs_batch_script_controller'
-
-
-# The command line program to use to submit a PBS job.
-# c.PBSEngineSetLauncher.submit_command = 'qsub'
-
-# The command line program to use to delete a PBS job.
-# c.PBSEngineSetLauncher.delete_command = 'qdel'
-
-# A regular expression that takes the output of qsub and find the job id.
-# c.PBSEngineSetLauncher.job_id_regexp = r'\d+'
+# c.PBSControllerLauncher.batch_file_name = u'pbs_controller'
# The batch submission script used to start the engines. This is where
-# environment variables would be setup, etc. This string is interpolated using
+# environment variables would be setup, etc. This string is interpreted using
# the Itpl module in IPython.external. Basically, you can use ${n} for the
# number of engine and ${cluster_dir} for the cluster_dir.
-# c.PBSEngineSetLauncher.batch_template = """"""
+# c.PBSEngineSetLauncher.batch_template = """
+# #PBS -N ipcontroller
+# #PBS -l nprocs=$n
+#
+# ipenginez --cluster-dir $cluster_dir$s
+# """
+
+# You can also load this template from a file
+# c.PBSControllerLauncher.batch_template_file = u"/path/to/my/template.sh"
# The name of the instantiated batch script that will actually be used to
# submit the job. This will be written to the cluster directory.
-# c.PBSEngineSetLauncher.batch_file_name = u'pbs_batch_script_engines'
+# c.PBSEngineSetLauncher.batch_file_name = u'pbs_engines'
+
+
#-----------------------------------------------------------------------------
# Windows HPC Server 2008 launcher configuration
View
241 IPython/config/default/ipclusterz_config.py
@@ -1,241 +0,0 @@
-import os
-
-c = get_config()
-
-#-----------------------------------------------------------------------------
-# Select which launchers to use
-#-----------------------------------------------------------------------------
-
-# This allows you to control what method is used to start the controller
-# and engines. The following methods are currently supported:
-# - Start as a regular process on localhost.
-# - Start using mpiexec.
-# - Start using the Windows HPC Server 2008 scheduler
-# - Start using PBS/SGE
-# - Start using SSH
-
-
-# The selected launchers can be configured below.
-
-# Options are:
-# - LocalControllerLauncher
-# - MPIExecControllerLauncher
-# - PBSControllerLauncher
-# - SGEControllerLauncher
-# - WindowsHPCControllerLauncher
-# c.Global.controller_launcher = 'IPython.parallel.launcher.LocalControllerLauncher'
-# c.Global.controller_launcher = 'IPython.parallel.launcher.PBSControllerLauncher'
-
-# Options are:
-# - LocalEngineSetLauncher
-# - MPIExecEngineSetLauncher
-# - PBSEngineSetLauncher
-# - SGEEngineSetLauncher
-# - WindowsHPCEngineSetLauncher
-# c.Global.engine_launcher = 'IPython.parallel.launcher.LocalEngineSetLauncher'
-
-#-----------------------------------------------------------------------------
-# Global configuration
-#-----------------------------------------------------------------------------
-
-# The default number of engines that will be started. This is overridden by
-# the -n command line option: "ipcluster start -n 4"
-# c.Global.n = 2
-
-# Log to a file in cluster_dir/log, otherwise just log to sys.stdout.
-# c.Global.log_to_file = False
-
-# Remove old logs from cluster_dir/log before starting.
-# c.Global.clean_logs = True
-
-# The working directory for the process. The application will use os.chdir
-# to change to this directory before starting.
-# c.Global.work_dir = os.getcwd()
-
-
-#-----------------------------------------------------------------------------
-# Local process launchers
-#-----------------------------------------------------------------------------
-
-# The command line arguments to call the controller with.
-# c.LocalControllerLauncher.controller_args = \
-# ['--log-to-file','--log-level', '40']
-
-# The working directory for the controller
-# c.LocalEngineSetLauncher.work_dir = u''
-
-# Command line argument passed to the engines.
-# c.LocalEngineSetLauncher.engine_args = ['--log-to-file','--log-level', '40']
-
-#-----------------------------------------------------------------------------
-# MPIExec launchers
-#-----------------------------------------------------------------------------
-
-# The mpiexec/mpirun command to use in both the controller and engines.
-# c.MPIExecLauncher.mpi_cmd = ['mpiexec']
-
-# Additional arguments to pass to the actual mpiexec command.
-# c.MPIExecLauncher.mpi_args = []
-
-# The mpiexec/mpirun command and args can be overridden if they should be different
-# for controller and engines.
-# c.MPIExecControllerLauncher.mpi_cmd = ['mpiexec']
-# c.MPIExecControllerLauncher.mpi_args = []
-# c.MPIExecEngineSetLauncher.mpi_cmd = ['mpiexec']
-# c.MPIExecEngineSetLauncher.mpi_args = []
-
-# The command line argument to call the controller with.
-# c.MPIExecControllerLauncher.controller_args = \
-# ['--log-to-file','--log-level', '40']
-
-# Command line argument passed to the engines.
-# c.MPIExecEngineSetLauncher.engine_args = ['--log-to-file','--log-level', '40']
-
-# The default number of engines to start if not given elsewhere.
-# c.MPIExecEngineSetLauncher.n = 1
-
-#-----------------------------------------------------------------------------
-# SSH launchers
-#-----------------------------------------------------------------------------
-
-# ipclusterz can be used to launch controller and engines remotely via ssh.
-# Note that currently ipclusterz does not do any file distribution, so if
-# machines are not on a shared filesystem, config and json files must be
-# distributed. For this reason, the reuse_files defaults to True on an
-# ssh-launched Controller. This flag can be overridded by the program_args
-# attribute of c.SSHControllerLauncher.
-
-# set the ssh cmd for launching remote commands. The default is ['ssh']
-# c.SSHLauncher.ssh_cmd = ['ssh']
-
-# set the ssh cmd for launching remote commands. The default is ['ssh']
-# c.SSHLauncher.ssh_args = ['tt']
-
-# Set the user and hostname for the controller
-# c.SSHControllerLauncher.hostname = 'controller.example.com'
-# c.SSHControllerLauncher.user = os.environ.get('USER','username')
-
-# Set the arguments to be passed to ipcontrollerz
-# note that remotely launched ipcontrollerz will not get the contents of
-# the local ipcontrollerz_config.py unless it resides on the *remote host*
-# in the location specified by the --cluster_dir argument.
-# c.SSHControllerLauncher.program_args = ['-r', '-ip', '0.0.0.0', '--cluster_dir', '/path/to/cd']
-
-# Set the default args passed to ipenginez for SSH launched engines
-# c.SSHEngineSetLauncher.engine_args = ['--mpi', 'mpi4py']
-
-# SSH engines are launched as a dict of locations/n-engines.
-# if a value is a tuple instead of an int, it is assumed to be of the form
-# (n, [args]), setting the arguments to passed to ipenginez on `host`.
-# otherwise, c.SSHEngineSetLauncher.engine_args will be used as the default.
-
-# In this case, there will be 3 engines at my.example.com, and
-# 2 at you@ipython.scipy.org with a special json connector location.
-# c.SSHEngineSetLauncher.engines = {'my.example.com' : 3,
-# 'you@ipython.scipy.org' : (2, ['-f', '/path/to/ipcontroller-engine.json']}
-# }
-
-#-----------------------------------------------------------------------------
-# Unix batch (PBS) schedulers launchers
-#-----------------------------------------------------------------------------
-
-# SGE and PBS are very similar. All configurables in this section called 'PBS*'
-# also exist as 'SGE*'.
-
-# The command line program to use to submit a PBS job.
-# c.PBSLauncher.submit_command = ['qsub']
-
-# The command line program to use to delete a PBS job.
-# c.PBSLauncher.delete_command = ['qdel']
-
-# The PBS queue in which the job should run
-# c.PBSLauncher.queue = 'myqueue'
-
-# A regular expression that takes the output of qsub and find the job id.
-# c.PBSLauncher.job_id_regexp = r'\d+'
-
-# If for some reason the Controller and Engines have different options above, they
-# can be set as c.PBSControllerLauncher.<option> etc.
-
-# PBS and SGE have default templates, but you can specify your own, either as strings
-# or from files, as described here:
-
-# The batch submission script used to start the controller. This is where
-# environment variables would be setup, etc. This string is interpreted using
-# the Itpl module in IPython.external. Basically, you can use ${n} for the
-# number of engine and ${cluster_dir} for the cluster_dir.
-# c.PBSControllerLauncher.batch_template = """
-# #PBS -N ipcontroller
-# #PBS -q $queue
-#
-# ipcontrollerz --cluster-dir $cluster_dir
-# """
-
-# You can also load this template from a file
-# c.PBSControllerLauncher.batch_template_file = u"/path/to/my/template.sh"
-
-# The name of the instantiated batch script that will actually be used to
-# submit the job. This will be written to the cluster directory.
-# c.PBSControllerLauncher.batch_file_name = u'pbs_controller'
-
-# The batch submission script used to start the engines. This is where
-# environment variables would be setup, etc. This string is interpreted using
-# the Itpl module in IPython.external. Basically, you can use ${n} for the
-# number of engine and ${cluster_dir} for the cluster_dir.
-# c.PBSEngineSetLauncher.batch_template = """
-# #PBS -N ipcontroller
-# #PBS -l nprocs=$n
-#
-# ipenginez --cluster-dir $cluster_dir$s
-# """
-
-# You can also load this template from a file
-# c.PBSControllerLauncher.batch_template_file = u"/path/to/my/template.sh"
-
-# The name of the instantiated batch script that will actually be used to
-# submit the job. This will be written to the cluster directory.
-# c.PBSEngineSetLauncher.batch_file_name = u'pbs_engines'
-
-
-
-#-----------------------------------------------------------------------------
-# Windows HPC Server 2008 launcher configuration
-#-----------------------------------------------------------------------------
-
-# c.IPControllerJob.job_name = 'IPController'
-# c.IPControllerJob.is_exclusive = False
-# c.IPControllerJob.username = r'USERDOMAIN\USERNAME'
-# c.IPControllerJob.priority = 'Highest'
-# c.IPControllerJob.requested_nodes = ''
-# c.IPControllerJob.project = 'MyProject'
-
-# c.IPControllerTask.task_name = 'IPController'
-# c.IPControllerTask.controller_cmd = [u'ipcontroller.exe']
-# c.IPControllerTask.controller_args = ['--log-to-file', '--log-level', '40']
-# c.IPControllerTask.environment_variables = {}
-
-# c.WindowsHPCControllerLauncher.scheduler = 'HEADNODE'
-# c.WindowsHPCControllerLauncher.job_file_name = u'ipcontroller_job.xml'
-
-
-# c.IPEngineSetJob.job_name = 'IPEngineSet'
-# c.IPEngineSetJob.is_exclusive = False
-# c.IPEngineSetJob.username = r'USERDOMAIN\USERNAME'
-# c.IPEngineSetJob.priority = 'Highest'
-# c.IPEngineSetJob.requested_nodes = ''
-# c.IPEngineSetJob.project = 'MyProject'
-
-# c.IPEngineTask.task_name = 'IPEngine'
-# c.IPEngineTask.engine_cmd = [u'ipengine.exe']
-# c.IPEngineTask.engine_args = ['--log-to-file', '--log-level', '40']
-# c.IPEngineTask.environment_variables = {}
-
-# c.WindowsHPCEngineSetLauncher.scheduler = 'HEADNODE'
-# c.WindowsHPCEngineSetLauncher.job_file_name = u'ipengineset_job.xml'
-
-
-
-
-
-
-
View
200 IPython/config/default/ipcontroller_config.py
@@ -25,112 +25,156 @@
# be imported in the controller for pickling to work.
# c.Global.import_statements = ['import math']
-# Reuse the controller's FURL files. If False, FURL files are regenerated
+# Reuse the controller's JSON files. If False, JSON files are regenerated
# each time the controller is run. If True, they will be reused, *but*, you
# also must set the network ports by hand. If set, this will override the
# values set for the client and engine connections below.
-# c.Global.reuse_furls = True
+# c.Global.reuse_files = True
-# Enable SSL encryption on all connections to the controller. If set, this
-# will override the values set for the client and engine connections below.
+# Enable exec_key authentication on all messages. Default is True
# c.Global.secure = True
# The working directory for the process. The application will use os.chdir
# to change to this directory before starting.
# c.Global.work_dir = os.getcwd()
+# The log url for logging to an `iploggerz` application. This will override
+# log-to-file.
+# c.Global.log_url = 'tcp://127.0.0.1:20202'
+
+# The specific external IP that is used to disambiguate multi-interface URLs.
+# The default behavior is to guess from external IPs gleaned from `socket`.
+# c.Global.location = '192.168.1.123'
+
+# The ssh server remote clients should use to connect to this controller.
+# It must be a machine that can see the interface specified in client_ip.
+# The default for client_ip is localhost, in which case the sshserver must
+# be an external IP of the controller machine.
+# c.Global.sshserver = 'controller.example.com'
+
+# the url to use for registration. If set, this overrides engine-ip,
+# engine-transport client-ip,client-transport, and regport.
+# c.RegistrationFactory.url = 'tcp://*:12345'
+
+# the port to use for registration. Clients and Engines both use this
+# port for registration.
+# c.RegistrationFactory.regport = 10101
+
#-----------------------------------------------------------------------------
-# Configure the client services
+# Configure the Task Scheduler
#-----------------------------------------------------------------------------
-# Basic client service config attributes
+# The routing scheme. 'pure' will use the pure-ZMQ scheduler. Any other
+# value will use a Python scheduler with various routing schemes.
+# python schemes are: lru, weighted, random, twobin. Default is 'weighted'.
+# Note that the pure ZMQ scheduler does not support many features, such as
+# dying engines, dependencies, or engine-subset load-balancing.
+# c.ControllerFactory.scheme = 'pure'
-# The network interface the controller will listen on for client connections.
-# This should be an IP address or hostname of the controller's host. The empty
-# string means listen on all interfaces.
-# c.FCClientServiceFactory.ip = ''
+# The pure ZMQ scheduler can limit the number of outstanding tasks per engine
+# by using the ZMQ HWM option. This allows engines with long-running tasks
+# to not steal too many tasks from other engines. The default is 0, which
+# means agressively distribute messages, never waiting for them to finish.
+# c.ControllerFactory.hwm = 1
-# The TCP/IP port the controller will listen on for client connections. If 0
-# a random port will be used. If the controller's host has a firewall running
-# it must allow incoming traffic on this port.
-# c.FCClientServiceFactory.port = 0
+# Whether to use Threads or Processes to start the Schedulers. Threads will
+# use less resources, but potentially reduce throughput. Default is to
+# use processes. Note that the a Python scheduler will always be in a Process.
+# c.ControllerFactory.usethreads
-# The client learns how to connect to the controller by looking at the
-# location field embedded in the FURL. If this field is empty, all network
-# interfaces that the controller is listening on will be listed. To have the
-# client connect on a particular interface, list it here.
-# c.FCClientServiceFactory.location = ''
+#-----------------------------------------------------------------------------
+# Configure the Hub
+#-----------------------------------------------------------------------------
+
+# Which class to use for the db backend. Currently supported are DictDB (the
+# default), and MongoDB. Uncomment this line to enable MongoDB, which will
+# slow-down the Hub's responsiveness, but also reduce its memory footprint.
+# c.HubFactory.db_class = 'IPython.parallel.mongodb.MongoDB'
-# Use SSL encryption for the client connection.
-# c.FCClientServiceFactory.secure = True
+# The heartbeat ping frequency. This is the frequency (in ms) at which the
+# Hub pings engines for heartbeats. This determines how quickly the Hub
+# will react to engines coming and going. A lower number means faster response
+# time, but more network activity. The default is 100ms
+# c.HubFactory.ping = 100
-# Reuse the client FURL each time the controller is started. If set, you must
-# also pick a specific network port above (FCClientServiceFactory.port).
-# c.FCClientServiceFactory.reuse_furls = False
+# HubFactory queue port pairs, to set by name: mux, iopub, control, task. Set
+# each as a tuple of length 2 of ints. The default is to find random
+# available ports
+# c.HubFactory.mux = (10102,10112)
#-----------------------------------------------------------------------------
-# Configure the engine services
+# Configure the client connections
#-----------------------------------------------------------------------------
-# Basic config attributes for the engine services.
+# Basic client connection config attributes
-# The network interface the controller will listen on for engine connections.
-# This should be an IP address or hostname of the controller's host. The empty
-# string means listen on all interfaces.
-# c.FCEngineServiceFactory.ip = ''
+# The network interface the controller will listen on for client connections.
+# This should be an IP address or interface on the controller. An asterisk
+# means listen on all interfaces. The transport can be any transport
+# supported by zeromq (tcp,epgm,pgm,ib,ipc):
+# c.HubFactory.client_ip = '*'
+# c.HubFactory.client_transport = 'tcp'
-# The TCP/IP port the controller will listen on for engine connections. If 0
-# a random port will be used. If the controller's host has a firewall running
-# it must allow incoming traffic on this port.
-# c.FCEngineServiceFactory.port = 0
+# individual client ports to configure by name: query_port, notifier_port
+# c.HubFactory.query_port = 12345
-# The engine learns how to connect to the controller by looking at the
-# location field embedded in the FURL. If this field is empty, all network
-# interfaces that the controller is listening on will be listed. To have the
-# client connect on a particular interface, list it here.
-# c.FCEngineServiceFactory.location = ''
+#-----------------------------------------------------------------------------
+# Configure the engine connections
+#-----------------------------------------------------------------------------
-# Use SSL encryption for the engine connection.
-# c.FCEngineServiceFactory.secure = True
+# Basic config attributes for the engine connections.
-# Reuse the client FURL each time the controller is started. If set, you must
-# also pick a specific network port above (FCClientServiceFactory.port).
-# c.FCEngineServiceFactory.reuse_furls = False
+# The network interface the controller will listen on for engine connections.
+# This should be an IP address or interface on the controller. An asterisk
+# means listen on all interfaces. The transport can be any transport
+# supported by zeromq (tcp,epgm,pgm,ib,ipc):
+# c.HubFactory.engine_ip = '*'
+# c.HubFactory.engine_transport = 'tcp'
+
+# set the engine heartbeat ports to use:
+# c.HubFactory.hb = (10303,10313)
#-----------------------------------------------------------------------------
-# Developer level configuration attributes
+# Configure the TaskRecord database backend
#-----------------------------------------------------------------------------
-# You shouldn't have to modify anything in this section. These attributes
-# are more for developers who want to change the behavior of the controller
-# at a fundamental level.
-
-# c.FCClientServiceFactory.cert_file = u'ipcontroller-client.pem'
-
-# default_client_interfaces = Config()
-# default_client_interfaces.Task.interface_chain = [
-# 'IPython.kernel.task.ITaskController',
-# 'IPython.kernel.taskfc.IFCTaskController'
-# ]
-#
-# default_client_interfaces.Task.furl_file = u'ipcontroller-tc.furl'
-#
-# default_client_interfaces.MultiEngine.interface_chain = [
-# 'IPython.kernel.multiengine.IMultiEngine',
-# 'IPython.kernel.multienginefc.IFCSynchronousMultiEngine'
-# ]
-#
-# default_client_interfaces.MultiEngine.furl_file = u'ipcontroller-mec.furl'
-#
-# c.FCEngineServiceFactory.interfaces = default_client_interfaces
-
-# c.FCEngineServiceFactory.cert_file = u'ipcontroller-engine.pem'
-
-# default_engine_interfaces = Config()
-# default_engine_interfaces.Default.interface_chain = [
-# 'IPython.kernel.enginefc.IFCControllerBase'
-# ]
-#
-# default_engine_interfaces.Default.furl_file = u'ipcontroller-engine.furl'
-#
-# c.FCEngineServiceFactory.interfaces = default_engine_interfaces
+# For memory/persistance reasons, tasks can be stored out-of-memory in a database.
+# Currently, only sqlite and mongodb are supported as backends, but the interface
+# is fairly simple, so advanced developers could write their own backend.
+
+# ----- in-memory configuration --------
+# this line restores the default behavior: in-memory storage of all results.
+# c.HubFactory.db_class = 'IPython.parallel.dictdb.DictDB'
+
+# ----- sqlite configuration --------
+# use this line to activate sqlite:
+# c.HubFactory.db_class = 'IPython.parallel.sqlitedb.SQLiteDB'
+
+# You can specify the name of the db-file. By default, this will be located
+# in the active cluster_dir, e.g. ~/.ipython/clusterz_default/tasks.db
+# c.SQLiteDB.filename = 'tasks.db'
+
+# You can also specify the location of the db-file, if you want it to be somewhere
+# other than the cluster_dir.
+# c.SQLiteDB.location = '/scratch/'
+
+# This will specify the name of the table for the controller to use. The default
+# behavior is to use the session ID of the SessionFactory object (a uuid). Overriding
+# this will result in results persisting for multiple sessions.
+# c.SQLiteDB.table = 'results'
+
+# ----- mongodb configuration --------
+# use this line to activate mongodb:
+# c.HubFactory.db_class = 'IPython.parallel.mongodb.MongoDB'
+
+# You can specify the args and kwargs pymongo will use when creating the Connection.
+# For more information on what these options might be, see pymongo documentation.
+# c.MongoDB.connection_kwargs = {}
+# c.MongoDB.connection_args = []
+
+# This will specify the name of the mongo database for the controller to use. The default
+# behavior is to use the session ID of the SessionFactory object (a uuid). Overriding
+# this will result in task results persisting through multiple sessions.
+# c.MongoDB.database = 'ipythondb'
+
+
View
180 IPython/config/default/ipcontrollerz_config.py
@@ -1,180 +0,0 @@
-from IPython.config.loader import Config
-
-c = get_config()
-
-#-----------------------------------------------------------------------------
-# Global configuration
-#-----------------------------------------------------------------------------
-
-# Basic Global config attributes
-
-# Start up messages are logged to stdout using the logging module.
-# These all happen before the twisted reactor is started and are
-# useful for debugging purposes. Can be (10=DEBUG,20=INFO,30=WARN,40=CRITICAL)
-# and smaller is more verbose.
-# c.Global.log_level = 20
-
-# Log to a file in cluster_dir/log, otherwise just log to sys.stdout.
-# c.Global.log_to_file = False
-
-# Remove old logs from cluster_dir/log before starting.
-# c.Global.clean_logs = True
-
-# A list of Python statements that will be run before starting the
-# controller. This is provided because occasionally certain things need to
-# be imported in the controller for pickling to work.
-# c.Global.import_statements = ['import math']
-
-# Reuse the controller's JSON files. If False, JSON files are regenerated
-# each time the controller is run. If True, they will be reused, *but*, you
-# also must set the network ports by hand. If set, this will override the
-# values set for the client and engine connections below.
-# c.Global.reuse_files = True
-
-# Enable exec_key authentication on all messages. Default is True
-# c.Global.secure = True
-
-# The working directory for the process. The application will use os.chdir
-# to change to this directory before starting.
-# c.Global.work_dir = os.getcwd()
-
-# The log url for logging to an `iploggerz` application. This will override
-# log-to-file.
-# c.Global.log_url = 'tcp://127.0.0.1:20202'
-
-# The specific external IP that is used to disambiguate multi-interface URLs.
-# The default behavior is to guess from external IPs gleaned from `socket`.
-# c.Global.location = '192.168.1.123'
-
-# The ssh server remote clients should use to connect to this controller.
-# It must be a machine that can see the interface specified in client_ip.
-# The default for client_ip is localhost, in which case the sshserver must
-# be an external IP of the controller machine.
-# c.Global.sshserver = 'controller.example.com'
-
-# the url to use for registration. If set, this overrides engine-ip,
-# engine-transport client-ip,client-transport, and regport.
-# c.RegistrationFactory.url = 'tcp://*:12345'
-
-# the port to use for registration. Clients and Engines both use this
-# port for registration.
-# c.RegistrationFactory.regport = 10101
-
-#-----------------------------------------------------------------------------
-# Configure the Task Scheduler
-#-----------------------------------------------------------------------------
-
-# The routing scheme. 'pure' will use the pure-ZMQ scheduler. Any other
-# value will use a Python scheduler with various routing schemes.
-# python schemes are: lru, weighted, random, twobin. Default is 'weighted'.
-# Note that the pure ZMQ scheduler does not support many features, such as
-# dying engines, dependencies, or engine-subset load-balancing.
-# c.ControllerFactory.scheme = 'pure'
-
-# The pure ZMQ scheduler can limit the number of outstanding tasks per engine
-# by using the ZMQ HWM option. This allows engines with long-running tasks
-# to not steal too many tasks from other engines. The default is 0, which
-# means agressively distribute messages, never waiting for them to finish.
-# c.ControllerFactory.hwm = 1
-
-# Whether to use Threads or Processes to start the Schedulers. Threads will
-# use less resources, but potentially reduce throughput. Default is to
-# use processes. Note that the a Python scheduler will always be in a Process.
-# c.ControllerFactory.usethreads
-
-#-----------------------------------------------------------------------------
-# Configure the Hub
-#-----------------------------------------------------------------------------
-
-# Which class to use for the db backend. Currently supported are DictDB (the
-# default), and MongoDB. Uncomment this line to enable MongoDB, which will
-# slow-down the Hub's responsiveness, but also reduce its memory footprint.
-# c.HubFactory.db_class = 'IPython.parallel.mongodb.MongoDB'
-
-# The heartbeat ping frequency. This is the frequency (in ms) at which the
-# Hub pings engines for heartbeats. This determines how quickly the Hub
-# will react to engines coming and going. A lower number means faster response
-# time, but more network activity. The default is 100ms
-# c.HubFactory.ping = 100
-
-# HubFactory queue port pairs, to set by name: mux, iopub, control, task. Set
-# each as a tuple of length 2 of ints. The default is to find random
-# available ports
-# c.HubFactory.mux = (10102,10112)
-
-#-----------------------------------------------------------------------------
-# Configure the client connections
-#-----------------------------------------------------------------------------
-
-# Basic client connection config attributes
-
-# The network interface the controller will listen on for client connections.
-# This should be an IP address or interface on the controller. An asterisk
-# means listen on all interfaces. The transport can be any transport
-# supported by zeromq (tcp,epgm,pgm,ib,ipc):
-# c.HubFactory.client_ip = '*'
-# c.HubFactory.client_transport = 'tcp'
-
-# individual client ports to configure by name: query_port, notifier_port
-# c.HubFactory.query_port = 12345
-
-#-----------------------------------------------------------------------------
-# Configure the engine connections
-#-----------------------------------------------------------------------------
-
-# Basic config attributes for the engine connections.
-
-# The network interface the controller will listen on for engine connections.
-# This should be an IP address or interface on the controller. An asterisk
-# means listen on all interfaces. The transport can be any transport
-# supported by zeromq (tcp,epgm,pgm,ib,ipc):
-# c.HubFactory.engine_ip = '*'
-# c.HubFactory.engine_transport = 'tcp'
-
-# set the engine heartbeat ports to use:
-# c.HubFactory.hb = (10303,10313)
-
-#-----------------------------------------------------------------------------
-# Configure the TaskRecord database backend
-#-----------------------------------------------------------------------------
-
-# For memory/persistance reasons, tasks can be stored out-of-memory in a database.
-# Currently, only sqlite and mongodb are supported as backends, but the interface
-# is fairly simple, so advanced developers could write their own backend.
-
-# ----- in-memory configuration --------
-# this line restores the default behavior: in-memory storage of all results.
-# c.HubFactory.db_class = 'IPython.parallel.dictdb.DictDB'
-
-# ----- sqlite configuration --------
-# use this line to activate sqlite:
-# c.HubFactory.db_class = 'IPython.parallel.sqlitedb.SQLiteDB'
-
-# You can specify the name of the db-file. By default, this will be located
-# in the active cluster_dir, e.g. ~/.ipython/clusterz_default/tasks.db
-# c.SQLiteDB.filename = 'tasks.db'
-
-# You can also specify the location of the db-file, if you want it to be somewhere
-# other than the cluster_dir.
-# c.SQLiteDB.location = '/scratch/'
-
-# This will specify the name of the table for the controller to use. The default
-# behavior is to use the session ID of the SessionFactory object (a uuid). Overriding
-# this will result in results persisting for multiple sessions.
-# c.SQLiteDB.table = 'results'
-
-# ----- mongodb configuration --------
-# use this line to activate mongodb:
-# c.HubFactory.db_class = 'IPython.parallel.mongodb.MongoDB'
-
-# You can specify the args and kwargs pymongo will use when creating the Connection.
-# For more information on what these options might be, see pymongo documentation.
-# c.MongoDB.connection_kwargs = {}
-# c.MongoDB.connection_args = []
-
-# This will specify the name of the mongo database for the controller to use. The default
-# behavior is to use the session ID of the SessionFactory object (a uuid). Overriding
-# this will result in task results persisting through multiple sessions.
-# c.MongoDB.database = 'ipythondb'
-
-
View
15 IPython/config/default/ipengine_config.py
@@ -29,10 +29,10 @@
# c.Global.connect_delay = 0.1
# c.Global.connect_max_tries = 15
-# By default, the engine will look for the controller's FURL file in its own
-# cluster directory. Sometimes, the FURL file will be elsewhere and this
-# attribute can be set to the full path of the FURL file.
-# c.Global.furl_file = u''
+# By default, the engine will look for the controller's JSON file in its own
+# cluster directory. Sometimes, the JSON file will be elsewhere and this
+# attribute can be set to the full path of the JSON file.
+# c.Global.url_file = u'/path/to/my/ipcontroller-engine.json'
# The working directory for the process. The application will use os.chdir
# to change to this directory before starting.
@@ -78,12 +78,7 @@
# You should not have to change these attributes.
-# c.Global.shell_class = 'IPython.kernel.core.interpreter.Interpreter'
-
-# c.Global.furl_file_name = u'ipcontroller-engine.furl'
-
-
-
+# c.Global.url_file_name = u'ipcontroller-engine.furl'
View
85 IPython/config/default/ipenginez_config.py
@@ -1,85 +0,0 @@
-c = get_config()
-
-#-----------------------------------------------------------------------------
-# Global configuration
-#-----------------------------------------------------------------------------
-
-# Start up messages are logged to stdout using the logging module.
-# These all happen before the twisted reactor is started and are
-# useful for debugging purposes. Can be (10=DEBUG,20=INFO,30=WARN,40=CRITICAL)
-# and smaller is more verbose.
-# c.Global.log_level = 20
-
-# Log to a file in cluster_dir/log, otherwise just log to sys.stdout.
-# c.Global.log_to_file = False
-
-# Remove old logs from cluster_dir/log before starting.
-# c.Global.clean_logs = True
-
-# A list of strings that will be executed in the users namespace on the engine
-# before it connects to the controller.
-# c.Global.exec_lines = ['import numpy']
-
-# The engine will try to connect to the controller multiple times, to allow
-# the controller time to startup and write its FURL file. These parameters
-# control the number of retries (connect_max_tries) and the initial delay
-# (connect_delay) between attemps. The actual delay between attempts gets
-# longer each time by a factor of 1.5 (delay[i] = 1.5*delay[i-1])
-# those attemps.
-# c.Global.connect_delay = 0.1
-# c.Global.connect_max_tries = 15
-
-# By default, the engine will look for the controller's JSON file in its own
-# cluster directory. Sometimes, the JSON file will be elsewhere and this
-# attribute can be set to the full path of the JSON file.
-# c.Global.url_file = u'/path/to/my/ipcontroller-engine.json'
-
-# The working directory for the process. The application will use os.chdir
-# to change to this directory before starting.
-# c.Global.work_dir = os.getcwd()
-
-#-----------------------------------------------------------------------------
-# MPI configuration
-#-----------------------------------------------------------------------------
-
-# Upon starting the engine can be configured to call MPI_Init. This section
-# configures that.
-
-# Select which MPI section to execute to setup MPI. The value of this
-# attribute must match the name of another attribute in the MPI config
-# section (mpi4py, pytrilinos, etc.). This can also be set by the --mpi
-# command line option.
-# c.MPI.use = ''
-
-# Initialize MPI using mpi4py. To use this, set c.MPI.use = 'mpi4py' to use
-# --mpi=mpi4py at the command line.
-# c.MPI.mpi4py = """from mpi4py import MPI as mpi
-# mpi.size = mpi.COMM_WORLD.Get_size()
-# mpi.rank = mpi.COMM_WORLD.Get_rank()
-# """
-
-# Initialize MPI using pytrilinos. To use this, set c.MPI.use = 'pytrilinos'
-# to use --mpi=pytrilinos at the command line.
-# c.MPI.pytrilinos = """from PyTrilinos import Epetra
-# class SimpleStruct:
-# pass
-# mpi = SimpleStruct()
-# mpi.rank = 0
-# mpi.size = 0
-# """
-
-#-----------------------------------------------------------------------------
-# Developer level configuration attributes
-#-----------------------------------------------------------------------------
-
-# You shouldn't have to modify anything in this section. These attributes
-# are more for developers who want to change the behavior of the controller
-# at a fundamental level.
-
-# You should not have to change these attributes.
-
-# c.Global.url_file_name = u'ipcontroller-engine.furl'
-
-
-
-
View
12 IPython/parallel/clusterdir.py
@@ -138,8 +138,8 @@ def copy_config_file(self, config_file, path=None, overwrite=False):
def copy_all_config_files(self, path=None, overwrite=False):
"""Copy all config files into the active cluster directory."""
- for f in [u'ipcontrollerz_config.py', u'ipenginez_config.py',
- u'ipclusterz_config.py']:
+ for f in [u'ipcontroller_config.py', u'ipengine_config.py',
+ u'ipcluster_config.py']:
self.copy_config_file(f, path=path, overwrite=overwrite)
@classmethod
@@ -164,11 +164,11 @@ def create_cluster_dir_by_profile(cls, path, profile=u'default'):
The path (directory) to put the cluster directory in.
profile : str
The name of the profile. The name of the cluster directory will
- be "clusterz_<profile>".
+ be "cluster_<profile>".
"""
if not os.path.isdir(path):
raise ClusterDirError('Directory not found: %s' % path)
- cluster_dir = os.path.join(path, u'clusterz_' + profile)
+ cluster_dir = os.path.join(path, u'cluster_' + profile)
return ClusterDir(location=cluster_dir)
@classmethod
@@ -190,9 +190,9 @@ def find_cluster_dir_by_profile(cls, ipython_dir, profile=u'default'):
The IPython directory to use.
profile : unicode or str
The name of the profile. The name of the cluster directory
- will be "clusterz_<profile>".
+ will be "cluster_<profile>".
"""
- dirname = u'clusterz_' + profile
+ dirname = u'cluster_' + profile
cluster_dir_paths = os.environ.get('IPCLUSTER_DIR_PATH','')
if cluster_dir_paths:
cluster_dir_paths = cluster_dir_paths.split(':')
View
52 IPython/parallel/ipclusterapp.py
@@ -37,7 +37,7 @@
#-----------------------------------------------------------------------------
-default_config_file_name = u'ipclusterz_config.py'
+default_config_file_name = u'ipcluster_config.py'
_description = """\
@@ -47,9 +47,9 @@
This command automates the startup of these processes using a wide
range of startup methods (SSH, local processes, PBS, mpiexec,
Windows HPC Server 2008). To start a cluster with 4 engines on your
-local host simply do 'ipclusterz start -n 4'. For more complex usage
-you will typically do 'ipclusterz create -p mycluster', then edit
-configuration files, followed by 'ipclusterz start -p mycluster -n 4'.
+local host simply do 'ipcluster start -n 4'. For more complex usage
+you will typically do 'ipcluster create -p mycluster', then edit
+configuration files, followed by 'ipcluster start -p mycluster -n 4'.
"""
@@ -108,9 +108,9 @@ def _add_arguments(self):
title='ipcluster subcommands',
description=
"""ipcluster has a variety of subcommands. The general way of
- running ipcluster is 'ipclusterz <cmd> [options]'. To get help
- on a particular subcommand do 'ipclusterz <cmd> -h'."""
- # help="For more help, type 'ipclusterz <cmd> -h'",
+ running ipcluster is 'ipcluster <cmd> [options]'. To get help
+ on a particular subcommand do 'ipcluster <cmd> -h'."""
+ # help="For more help, type 'ipcluster <cmd> -h'",
)
# The "list" subcommand parser
@@ -123,7 +123,7 @@ def _add_arguments(self):
"""List all available clusters, by cluster directory, that can
be found in the current working directly or in the ipython
directory. Cluster directories are named using the convention
- 'clusterz_<profile>'."""
+ 'cluster_<profile>'."""
)
# The "create" subcommand parser
@@ -136,13 +136,13 @@ def _add_arguments(self):
"""Create an ipython cluster directory by its profile name or
cluster directory path. Cluster directories contain
configuration, log and security related files and are named
- using the convention 'clusterz_<profile>'. By default they are
+ using the convention 'cluster_<profile>'. By default they are
located in your ipython directory. Once created, you will
probably need to edit the configuration files in the cluster
directory to configure your cluster. Most users will create a
cluster directory by profile name,
- 'ipclusterz create -p mycluster', which will put the directory
- in '<ipython_dir>/clusterz_mycluster'.
+ 'ipcluster create -p mycluster', which will put the directory
+ in '<ipython_dir>/cluster_mycluster'.
"""
)
paa = parser_create.add_argument
@@ -162,10 +162,10 @@ def _add_arguments(self):
"""Start an ipython cluster by its profile name or cluster
directory. Cluster directories contain configuration, log and
security related files and are named using the convention
- 'clusterz_<profile>' and should be creating using the 'start'
+ 'cluster_<profile>' and should be creating using the 'start'
subcommand of 'ipcluster'. If your cluster directory is in
the cwd or the ipython directory, you can simply refer to it
- using its profile name, 'ipclusterz start -n 4 -p <profile>`,
+ using its profile name, 'ipcluster start -n 4 -p <profile>`,
otherwise use the '--cluster-dir' option.
"""
)
@@ -200,9 +200,9 @@ def _add_arguments(self):
description=
"""Stop a running ipython cluster by its profile name or cluster
directory. Cluster directories are named using the convention
- 'clusterz_<profile>'. If your cluster directory is in
+ 'cluster_<profile>'. If your cluster directory is in
the cwd or the ipython directory, you can simply refer to it
- using its profile name, 'ipclusterz stop -p <profile>`, otherwise
+ using its profile name, 'ipcluster stop -p <profile>`, otherwise
use the '--cluster-dir' option.
"""
)
@@ -223,10 +223,10 @@ def _add_arguments(self):
by profile name or cluster directory.
Cluster directories contain configuration, log and
security related files and are named using the convention
- 'clusterz_<profile>' and should be creating using the 'start'
+ 'cluster_<profile>' and should be creating using the 'start'
subcommand of 'ipcluster'. If your cluster directory is in
the cwd or the ipython directory, you can simply refer to it
- using its profile name, 'ipclusterz engines -n 4 -p <profile>`,
+ using its profile name, 'ipcluster engines -n 4 -p <profile>`,
otherwise use the '--cluster-dir' option.
"""
)
@@ -249,7 +249,7 @@ def _add_arguments(self):
class IPClusterApp(ApplicationWithClusterDir):
- name = u'ipclusterz'
+ name = u'ipcluster'
description = _description
usage = None
command_line_loader = IPClusterAppConfigLoader
@@ -286,8 +286,8 @@ def find_resources(self):
except ClusterDirError:
raise ClusterDirError(
"Could not find a cluster directory. A cluster dir must "
- "be created before running 'ipclusterz start'. Do "
- "'ipclusterz create -h' or 'ipclusterz list -h' for more "
+ "be created before running 'ipcluster start'. Do "
+ "'ipcluster create -h' or 'ipcluster list -h' for more "
"information about creating and listing cluster dirs."
)
elif subcommand=='engines':
@@ -297,8 +297,8 @@ def find_resources(self):
except ClusterDirError:
raise ClusterDirError(
"Could not find a cluster directory. A cluster dir must "
- "be created before running 'ipclusterz start'. Do "
- "'ipclusterz create -h' or 'ipclusterz list -h' for more "
+ "be created before running 'ipcluster start'. Do "
+ "'ipcluster create -h' or 'ipcluster list -h' for more "
"information about creating and listing cluster dirs."
)
@@ -322,9 +322,9 @@ def list_cluster_dirs(self):
files = os.listdir(path)
for f in files:
full_path = os.path.join(path, f)
- if os.path.isdir(full_path) and f.startswith('clusterz_'):
+ if os.path.isdir(full_path) and f.startswith('cluster_'):
profile = full_path.split('_')[-1]
- start_cmd = 'ipclusterz start -p %s -n 4' % profile
+ start_cmd = 'ipcluster start -p %s -n 4' % profile
print start_cmd + " ==> " + full_path
def pre_construct(self):
@@ -498,7 +498,7 @@ def start_app_start(self):
else:
self.log.critical(
'Cluster is already running with [pid=%s]. '
- 'use "ipclusterz stop" to stop the cluster.' % pid
+ 'use "ipcluster stop" to stop the cluster.' % pid
)
# Here I exit with a unusual exit status that other processes
# can watch for to learn how I existed.
@@ -506,7 +506,7 @@ def start_app_start(self):
# Now log and daemonize
self.log.info(
- 'Starting ipclusterz with [daemon=%r]' % config.Global.daemonize
+ 'Starting ipcluster with [daemon=%r]' % config.Global.daemonize
)
# TODO: Get daemonize working on Windows or as a Windows Server.
if config.Global.daemonize:
View
6 IPython/parallel/ipcontrollerapp.py
@@ -48,7 +48,7 @@
#: The default config file name for this application
-default_config_file_name = u'ipcontrollerz_config.py'
+default_config_file_name = u'ipcontroller_config.py'
_description = """Start the IPython controller for parallel computing.
@@ -57,7 +57,7 @@
clients. The controller needs to be started before the engines and can be
configured using command line options or using a cluster directory. Cluster
directories contain config, log and security files and are usually located in
-your ipython directory and named as "clusterz_<profile>". See the --profile
+your ipython directory and named as "cluster_<profile>". See the --profile
and --cluster-dir options for details.
"""
@@ -251,7 +251,7 @@ def _add_arguments(self):
class IPControllerApp(ApplicationWithClusterDir):
- name = u'ipcontrollerz'
+ name = u'ipcontroller'
description = _description
command_line_loader = IPControllerAppConfigLoader
default_config_file_name = default_config_file_name
View
6 IPython/parallel/ipengineapp.py
@@ -40,7 +40,7 @@
#-----------------------------------------------------------------------------
#: The default config file name for this application
-default_config_file_name = u'ipenginez_config.py'
+default_config_file_name = u'ipengine_config.py'
mpi4py_init = """from mpi4py import MPI as mpi
@@ -64,7 +64,7 @@ class SimpleStruct:
and controller. A controller needs to be started before the engines. The
engine can be configured using command line options or using a cluster
directory. Cluster directories contain config, log and security files and are
-usually located in your ipython directory and named as "clusterz_<profile>".
+usually located in your ipython directory and named as "cluster_<profile>".
See the --profile and --cluster-dir options for details.
"""
@@ -124,7 +124,7 @@ def _add_arguments(self):
class IPEngineApp(ApplicationWithClusterDir):
- name = u'ipenginez'
+ name = u'ipengine'
description = _description
command_line_loader = IPEngineAppConfigLoader
default_config_file_name = default_config_file_name
View
2  IPython/parallel/iploggerapp.py
@@ -39,7 +39,7 @@
by registering a `zmq.log.handlers.PUBHandler` with the `logging` module. The
logger can be configured using command line options or using a cluster
directory. Cluster directories contain config, log and security files and are
-usually located in your ipython directory and named as "clusterz_<profile>".
+usually located in your ipython directory and named as "cluster_<profile>".
See the --profile and --cluster-dir options for details.
"""
View
36 IPython/parallel/launcher.py
@@ -63,15 +63,15 @@ def check_output(*args, **kwargs):
#-----------------------------------------------------------------------------
-ipclusterz_cmd_argv = pycmd2argv(get_ipython_module_path(
+ipcluster_cmd_argv = pycmd2argv(get_ipython_module_path(
'IPython.parallel.ipclusterapp'
))
-ipenginez_cmd_argv = pycmd2argv(get_ipython_module_path(
+ipengine_cmd_argv = pycmd2argv(get_ipython_module_path(
'IPython.parallel.ipengineapp'
))
-ipcontrollerz_cmd_argv = pycmd2argv(get_ipython_module_path(
+ipcontroller_cmd_argv = pycmd2argv(get_ipython_module_path(
'IPython.parallel.ipcontrollerapp'
))
@@ -304,7 +304,7 @@ def poll(self):
class LocalControllerLauncher(LocalProcessLauncher):
"""Launch a controller as a regular external process."""
- controller_cmd = List(ipcontrollerz_cmd_argv, config=True)
+ controller_cmd = List(ipcontroller_cmd_argv, config=True)
# Command line arguments to ipcontroller.
controller_args = List(['--log-to-file','--log-level', str(logging.INFO)], config=True)
@@ -322,7 +322,7 @@ def start(self, cluster_dir):
class LocalEngineLauncher(LocalProcessLauncher):
"""Launch a single engine as a regular externall process."""
- engine_cmd = List(ipenginez_cmd_argv, config=True)
+ engine_cmd = List(ipengine_cmd_argv, config=True)
# Command line arguments for ipengine.
engine_args = List(
['--log-to-file','--log-level', str(logging.INFO)], config=True
@@ -443,7 +443,7 @@ def start(self, n):
class MPIExecControllerLauncher(MPIExecLauncher):
"""Launch a controller using mpiexec."""
- controller_cmd = List(ipcontrollerz_cmd_argv, config=True)
+ controller_cmd = List(ipcontroller_cmd_argv, config=True)
# Command line arguments to ipcontroller.
controller_args = List(['--log-to-file','--log-level', str(logging.INFO)], config=True)
n = Int(1, config=False)
@@ -462,7 +462,7 @@ def find_args(self):
class MPIExecEngineSetLauncher(MPIExecLauncher):
- program = List(ipenginez_cmd_argv, config=True)
+ program = List(ipengine_cmd_argv, config=True)
# Command line arguments for ipengine.
program_args = List(
['--log-to-file','--log-level', str(logging.INFO)], config=True
@@ -531,13 +531,13 @@ def signal(self, sig):
class SSHControllerLauncher(SSHLauncher):
- program = List(ipcontrollerz_cmd_argv, config=True)
+ program = List(ipcontroller_cmd_argv, config=True)
# Command line arguments to ipcontroller.
program_args = List(['-r', '--log-to-file','--log-level', str(logging.INFO)], config=True)
class SSHEngineLauncher(SSHLauncher):
- program = List(ipenginez_cmd_argv, config=True)
+ program = List(ipengine_cmd_argv, config=True)
# Command line arguments for ipengine.
program_args = List(
['--log-to-file','--log-level', str(logging.INFO)], config=True
@@ -883,9 +883,9 @@ class PBSControllerLauncher(PBSLauncher):
batch_file_name = CUnicode(u'pbs_controller', config=True)
default_template= CUnicode("""#!/bin/sh
#PBS -V
-#PBS -N ipcontrollerz
+#PBS -N ipcontroller
%s --log-to-file --cluster-dir $cluster_dir
-"""%(' '.join(ipcontrollerz_cmd_argv)))
+"""%(' '.join(ipcontroller_cmd_argv)))
def start(self, cluster_dir):
"""Start the controller by profile or cluster_dir."""
@@ -898,9 +898,9 @@ class PBSEngineSetLauncher(PBSLauncher):
batch_file_name = CUnicode(u'pbs_engines', config=True)
default_template= CUnicode(u"""#!/bin/sh
#PBS -V
-#PBS -N ipenginez
+#PBS -N ipengine
%s --cluster-dir $cluster_dir
-"""%(' '.join(ipenginez_cmd_argv)))
+"""%(' '.join(ipengine_cmd_argv)))
def start(self, n, cluster_dir):
"""Start n engines by profile or cluster_dir."""
@@ -922,9 +922,9 @@ class SGEControllerLauncher(SGELauncher):
batch_file_name = CUnicode(u'sge_controller', config=True)
default_template= CUnicode(u"""#$$ -V
#$$ -S /bin/sh
-#$$ -N ipcontrollerz
+#$$ -N ipcontroller
%s --log-to-file --cluster-dir $cluster_dir
-"""%(' '.join(ipcontrollerz_cmd_argv)))
+"""%(' '.join(ipcontroller_cmd_argv)))
def start(self, cluster_dir):
"""Start the controller by profile or cluster_dir."""
@@ -936,9 +936,9 @@ class SGEEngineSetLauncher(SGELauncher):
batch_file_name = CUnicode(u'sge_engines', config=True)
default_template = CUnicode("""#$$ -V
#$$ -S /bin/sh
-#$$ -N ipenginez
+#$$ -N ipengine
%s --cluster-dir $cluster_dir
-"""%(' '.join(ipenginez_cmd_argv)))
+"""%(' '.join(ipengine_cmd_argv)))
def start(self, n, cluster_dir):
"""Start n engines by profile or cluster_dir."""
@@ -954,7 +954,7 @@ def start(self, n, cluster_dir):
class IPClusterLauncher(LocalProcessLauncher):
"""Launch the ipcluster program in an external process."""
- ipcluster_cmd = List(ipclusterz_cmd_argv, config=True)
+ ipcluster_cmd = List(ipcluster_cmd_argv, config=True)
# Command line arguments to pass to ipcluster.
ipcluster_args = List(
['--clean-logs', '--log-to-file', '--log-level', str(logging.INFO)], config=True)
View
2  IPython/parallel/logwatcher.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-"""A simple logger object that consolidates messages incoming from ipclusterz processes."""
+"""A simple logger object that consolidates messages incoming from ipcluster processes."""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
View
0  IPython/parallel/scripts/ipclusterz → IPython/parallel/scripts/ipcluster
File renamed without changes
View
0  IPython/parallel/scripts/ipcontrollerz → IPython/parallel/scripts/ipcontroller
File renamed without changes
View
0  IPython/parallel/scripts/ipenginez → IPython/parallel/scripts/ipengine
File renamed without changes
View
0  IPython/parallel/scripts/iploggerz → IPython/parallel/scripts/iplogger
File renamed without changes
View
4 IPython/parallel/tests/__init__.py
@@ -23,7 +23,7 @@
# nose setup/teardown
def setup():
- cp = Popen('ipcontrollerz --profile iptest -r --log-level 10 --log-to-file'.split(), stdout=blackhole, stderr=STDOUT)
+ cp = Popen('ipcontroller --profile iptest -r --log-level 10 --log-to-file'.split(), stdout=blackhole, stderr=STDOUT)
processes.append(cp)
time.sleep(.5)
add_engines(1)
@@ -38,7 +38,7 @@ def add_engines(n=1, profile='iptest'):
base = len(rc)
eps = []
for i in range(n):
- ep = Popen(['ipenginez']+ ['--profile', profile, '--log-level', '10', '--log-to-file'], stdout=blackhole, stderr=STDOUT)
+ ep = Popen(['ipengine']+ ['--profile', profile, '--log-level', '10', '--log-to-file'], stdout=blackhole, stderr=STDOUT)
# ep.start()
processes.append(ep)
eps.append(ep)
View
2  IPython/parallel/tests/test_newserialized.py
@@ -13,7 +13,7 @@
from unittest import TestCase
-from IPython.testing.parametric import parametric
+from IPython.testing.decorators import parametric
from IPython.utils import newserialized as ns
from IPython.utils.pickleutil import can, uncan, CannedObject, CannedFunction
from IPython.parallel.tests.clienttest import skip_without
View
1  IPython/testing/iptest.py
@@ -185,6 +185,7 @@ def make_exclude():
if not have['zmq']:
exclusions.append(ipjoin('zmq'))
+ exclusions.append(ipjoin('parallel'))
# This is needed for the reg-exp to match on win32 in the ipdoctest plugin.
if sys.platform == 'win32':
View
3  docs/source/index.txt
@@ -19,8 +19,7 @@ Contents
whatsnew/index.txt
install/index.txt
interactive/index.txt
- .. parallel/index.txt
- parallelz/index.txt
+ parallel/index.txt
config/index.txt
development/index.txt
api/index.txt
View
121 docs/source/install/install.txt
@@ -9,16 +9,16 @@ install all of its dependencies.
Please let us know if you have problems installing IPython or any of its
-dependencies. Officially, IPython requires Python version 2.5 or 2.6. We
-have *not* yet started to port IPython to Python 3.0.
+dependencies. Officially, IPython requires Python version 2.6 or 2.7. There
+is an experimental port of IPython for Python3 `on GitHub
+<https://github.com/ipython/ipython-py3k>`_
.. warning::
- Officially, IPython supports Python versions 2.5 and 2.6.
+ Officially, IPython supports Python versions 2.6 and 2.7.
- IPython 0.10 has only been well tested with Python 2.5 and 2.6. Parts of
- it may work with Python 2.4, but we do not officially support Python 2.4
- anymore. If you need to use 2.4, you can still run IPython 0.9.
+ IPython 0.11 has a hard syntax dependency on 2.6, and will no longer work
+ on Python <= 2.5.
Some of the installation approaches use the :mod:`setuptools` package and its
:command:`easy_install` command line program. In many scenarios, this provides
@@ -38,9 +38,9 @@ optional dependencies:
.. code-block:: bash
- $ easy_install ipython[kernel,security,test]
+ $ easy_install ipython[zmq,test]
-This will get Twisted, zope.interface and Foolscap, which are needed for
+This will get pyzmq, which is needed for
IPython's parallel computing features as well as the nose package, which will
enable you to run IPython's test suite.
@@ -221,8 +221,7 @@ On Windows, you will need the PyReadline module. PyReadline is a separate,
Windows only implementation of readline that uses native Windows calls through
:mod:`ctypes`. The easiest way of installing PyReadline is you use the binary
installer available `here <http://ipython.scipy.org/dist/>`_. The :mod:`ctypes`
-module, which comes with Python 2.5 and greater, is required by PyReadline. It
-is available for Python 2.4 at http://python.net/crew/theller/ctypes.
+module, which comes with Python 2.5 and greater, is required by PyReadline.
nose
----
@@ -267,91 +266,30 @@ The `pexpect <http://www.noah.org/wiki/Pexpect>`_ package is used in IPython's
Windows users are out of luck as pexpect does not run there.
-Dependencies for IPython.kernel (parallel computing)
-====================================================
+Dependencies for IPython.parallel (parallel computing)
+======================================================
-The IPython kernel provides a nice architecture for parallel computing. The
-main focus of this architecture is on interactive parallel computing. These
-features require a number of additional packages:
+:mod:`IPython.kernel` has been replaced by :mod:`IPython.parallel`,
+which uses ZeroMQ for all communication.
-* zope.interface (yep, we use interfaces)
-* Twisted (asynchronous networking framework)
-* Foolscap (a nice, secure network protocol)
-* pyOpenSSL (security for network connections)
+IPython.parallel provides a nice architecture for parallel computing. The
+main focus of this architecture is on interactive parallel computing. These
+features require just one package: pyzmq. See the next section for pyzmq
+details.
On a Unix style platform (including OS X), if you want to use
:mod:`setuptools`, you can just do:
.. code-block:: bash
- $ easy_install ipython[kernel] # the first three
- $ easy_install ipython[security] # pyOpenSSL
-
-zope.interface and Twisted
---------------------------
-
-Twisted [Twisted]_ and zope.interface [ZopeInterface]_ are used for networking
-related things. On Unix style platforms (including OS X), the simplest way of
-getting the these is to use :command:`easy_install`:
-
-.. code-block:: bash
-
- $ easy_install zope.interface
- $ easy_install Twisted
+ $ easy_install ipython[zmq] # will include pyzmq
-Of course, you can also download the source tarballs from the Twisted website
-[Twisted]_ and the
-`zope.interface page at PyPI <http://pypi.python.org/pypi/zope.interface>`_
-and do the usual ``python setup.py install`` if you prefer.
+Security in IPython.parallel is provided by SSH tunnels. By default, Linux
+and OSX clients will use the shell ssh command, but on Windows, we also
+support tunneling with paramiko [paramiko]_.
-Windows is a bit different. For zope.interface and Twisted, simply get the
-latest binary ``.exe`` installer from the Twisted website. This installer
-includes both zope.interface and Twisted and should just work.
-
-Foolscap
---------
-
-Foolscap [Foolscap]_ uses Twisted to provide a very nice secure RPC protocol that we use to implement our parallel computing features.
-
-On all platforms a simple:
-
-.. code-block:: bash
-
- $ easy_install foolscap
-
-should work. You can also download the source tarballs from the `Foolscap
-website <http://foolscap.lothar.com/trac>`_ and do ``python setup.py install``
-if you prefer.
-
-pyOpenSSL
----------
-
-IPython does not work with version 0.7 of pyOpenSSL [pyOpenSSL]_. It is known
-to work with version 0.6 and will likely work with the more recent 0.8 and 0.9
-versions. There are a couple of options for getting this:
-
-1. Most Linux distributions have packages for pyOpenSSL.
-2. The built-in Python 2.5 on OS X 10.5 already has it installed.
-3. There are source tarballs on the pyOpenSSL website. On Unix-like
- platforms, these can be built using ``python seutp.py install``.
-4. There is also a binary ``.exe`` Windows installer on the
- `pyOpenSSL website <http://pyopenssl.sourceforge.net/>`_.
-
-Dependencies for IPython.frontend (the IPython GUI)
-===================================================
-
-wxPython
---------
-
-Starting with IPython 0.9, IPython has a new :mod:`IPython.frontend` package
-that has a nice wxPython based IPython GUI. As you would expect, this GUI
-requires wxPython. Most Linux distributions have wxPython packages available
-and the built-in Python on OS X comes with wxPython preinstalled. For Windows,
-a binary installer is available on the `wxPython website
-<http://www.wxpython.org/>`_.
-
-Dependencies for IPython.zmq (new parallel)
-===========================================
+Dependencies for IPython.zmq
+============================
pyzmq
-----
@@ -359,9 +297,11 @@ pyzmq
IPython 0.11 introduced some new functionality, including a two-process
execution model using ZeroMQ for communication [ZeroMQ]_. The Python bindings
to ZeroMQ are found in the pyzmq project, which is easy_install-able once you
-have ZeroMQ installed. :mod:`IPython.kernel` is also in the process of being
-replaced by :mod:`IPython.zmq.parallel`, which uses ZeroMQ for all
-communication.
+have ZeroMQ installed (or even if you don't).
+
+IPython.zmq depends on pyzmq >= 2.0.10.1, but IPython.parallel requires the more
+recent 2.1.4. 2.1.4 also has binary releases for OSX and Windows, that do not
+require prior installation of libzmq.
Dependencies for ipython-qtconsole (new GUI)
============================================
@@ -377,11 +317,12 @@ which can be installed from the
pygments
--------
-The syntax-highlighting in ``ipython-qtconsole`` is done with the pygments project, which is easy_install-able.
+The syntax-highlighting in ``ipython-qtconsole`` is done with the pygments project,
+which is easy_install-able.
.. [Twisted] Twisted matrix. http://twistedmatrix.org
.. [ZopeInterface] http://pypi.python.org/pypi/zope.interface
.. [Foolscap] Foolscap network protocol. http://foolscap.lothar.com/trac
.. [pyOpenSSL] pyOpenSSL. http://pyopenssl.sourceforge.net
.. [ZeroMQ] ZeroMQ. http://www.zeromq.org
-
+.. [paramiko] paramiko. https://github.com/robey/paramiko
View
0  docs/source/parallelz/asian_call.pdf → docs/source/parallel/asian_call.pdf
File renamed without changes
View
0  docs/source/parallelz/asian_call.png → docs/source/parallel/asian_call.png
File renamed without changes
View
0  docs/source/parallelz/asian_put.pdf → docs/source/parallel/asian_put.pdf
File renamed without changes
View
0  docs/source/parallelz/asian_put.png → docs/source/parallel/asian_put.png
File renamed without changes
View
0  docs/source/parallelz/dag_dependencies.txt → docs/source/parallel/dag_dependencies.txt
File renamed without changes
View
0  docs/source/parallelz/dagdeps.pdf → docs/source/parallel/dagdeps.pdf
File renamed without changes
View
0  docs/source/parallelz/dagdeps.png → docs/source/parallel/dagdeps.png
File renamed without changes
View
0  docs/source/parallelz/hpc_job_manager.pdf → docs/source/parallel/hpc_job_manager.pdf
File renamed without changes
View
0  docs/source/parallelz/hpc_job_manager.png → docs/source/parallel/hpc_job_manager.png
File renamed without changes
View
20 docs/source/parallel/index.txt
@@ -4,9 +4,19 @@
Using IPython for parallel computing
====================================
-The twisted-based :mod:`IPython.kernel` has been removed, in favor of
-the new 0MQ-based :mod:`IPython.parallel`, whose merge into master is imminent.
+.. toctree::
+ :maxdepth: 2
+
+ parallel_intro.txt
+ parallel_process.txt
+ parallel_multiengine.txt
+ parallel_task.txt
+ parallel_mpi.txt
+ parallel_security.txt
+ parallel_winhpc.txt
+ parallel_demos.txt
+ dag_dependencies.txt
+ parallel_details.txt
+ parallel_transition.txt
+
-Until that code is merged, it can be found in the `newparallel branch
-<https://github.com/ipython/ipython/tree/newparallel>`_, and its draft documentation can be
-found `here <http://minrk.github.com/ipython-doc/newparallel>`_.
View
0  docs/source/parallelz/ipcluster_create.pdf → docs/source/parallel/ipcluster_create.pdf
File renamed without changes
View
0  docs/source/parallelz/ipcluster_create.png → docs/source/parallel/ipcluster_create.png
File renamed without changes
View
0  docs/source/parallelz/ipcluster_start.pdf → docs/source/parallel/ipcluster_start.pdf
File renamed without changes
View
0  docs/source/parallelz/ipcluster_start.png → docs/source/parallel/ipcluster_start.png
File renamed without changes
View
0  docs/source/parallelz/ipython_shell.pdf → docs/source/parallel/ipython_shell.pdf
File renamed without changes
View
0  docs/source/parallelz/ipython_shell.png → docs/source/parallel/ipython_shell.png
File renamed without changes
View
0  docs/source/parallelz/mec_simple.pdf → docs/source/parallel/mec_simple.pdf
File renamed without changes
View
0  docs/source/parallelz/mec_simple.png → docs/source/parallel/mec_simple.png
File renamed without changes
View
4 docs/source/parallelz/parallel_demos.txt → docs/source/parallel/parallel_demos.txt
@@ -110,7 +110,7 @@ results. The code to run this calculation in parallel is contained in
:file:`docs/examples/newparallel/parallelpi.py`. This code can be run in parallel
using IPython by following these steps:
-1. Use :command:`ipclusterz` to start 15 engines. We used an 8 core (2 quad
+1. Use :command:`ipcluster` to start 15 engines. We used an 8 core (2 quad
core CPUs) cluster with hyperthreading enabled which makes the 8 cores
looks like 16 (1 controller + 15 engines) in the OS. However, the maximum
speedup we can observe is still only 8x.
@@ -230,7 +230,7 @@ plot using Matplotlib.
.. literalinclude:: ../../examples/newparallel/mcdriver.py
:language: python
-To use this code, start an IPython cluster using :command:`ipclusterz`, open
+To use this code, start an IPython cluster using :command:`ipcluster`, open
IPython in the pylab mode with the file :file:`mcdriver.py` in your current
working directory and then type:
View
0  docs/source/parallelz/parallel_details.txt → docs/source/parallel/parallel_details.txt
File renamed without changes
View
8 docs/source/parallelz/parallel_intro.txt → docs/source/parallel/parallel_intro.txt
@@ -156,7 +156,7 @@ To connect and authenticate to the controller an engine or client needs
some information that the controller has stored in a JSON file.
Thus, the JSON files need to be copied to a location where
the clients and engines can find them. Typically, this is the
-:file:`~/.ipython/clusterz_default/security` directory on the host where the
+:file:`~/.ipython/cluster_default/security` directory on the host where the
client/engine is running (which could be a different host than the controller).
Once the JSON files are copied over, everything should work fine.
@@ -192,10 +192,10 @@ Getting Started
To use IPython for parallel computing, you need to start one instance of the
controller and one or more instances of the engine. Initially, it is best to
simply start a controller and engines on a single host using the
-:command:`ipclusterz` command. To start a controller and 4 engines on your
+:command:`ipcluster` command. To start a controller and 4 engines on your
localhost, just do::
- $ ipclusterz start -n 4
+ $ ipcluster start -n 4
More details about starting the IPython controller and engines can be found
:ref:`here <parallel_process>`
@@ -218,7 +218,7 @@ everything is working correctly, try the following commands:
When a client is created with no arguments, the client tries to find the corresponding JSON file
-in the local `~/.ipython/clusterz_default/security` directory. Or if you specified a profile,
+in the local `~/.ipython/cluster_default/security` directory. Or if you specified a profile,
you can use that with the Client. This should cover most cases:
.. sourcecode:: ipython
View
20 docs/source/parallelz/parallel_mpi.txt → docs/source/parallel/parallel_mpi.txt
@@ -50,16 +50,16 @@ To use code that calls MPI, there are typically two things that MPI requires.
There are a couple of ways that you can start the IPython engines and get
these things to happen.
-Automatic starting using :command:`mpiexec` and :command:`ipclusterz`
+Automatic starting using :command:`mpiexec` and :command:`ipcluster`
--------------------------------------------------------------------
-The easiest approach is to use the `mpiexec` mode of :command:`ipclusterz`,
+The easiest approach is to use the `mpiexec` mode of :command:`ipcluster`,
which will first start a controller and then a set of engines using
:command:`mpiexec`::
- $ ipclusterz mpiexec -n 4
+ $ ipcluster mpiexec -n 4
-This approach is best as interrupting :command:`ipclusterz` will automatically
+This approach is best as interrupting :command:`ipcluster` will automatically
stop and clean up the controller and engines.
Manual starting using :command:`mpiexec`
@@ -68,20 +68,20 @@ Manual starting using :command:`mpiexec`
If you want to start the IPython engines using the :command:`mpiexec`, just
do::
- $ mpiexec -n 4 ipenginez --mpi=mpi4py
+ $ mpiexec -n 4 ipengine --mpi=mpi4py
This requires that you already have a controller running and that the FURL
files for the engines are in place. We also have built in support for
PyTrilinos [PyTrilinos]_, which can be used (assuming is installed) by
starting the engines with::
- $ mpiexec -n 4 ipenginez --mpi=pytrilinos
+ $ mpiexec -n 4 ipengine --mpi=pytrilinos
-Automatic starting using PBS and :command:`ipclusterz`
+Automatic starting using PBS and :command:`ipcluster`
------------------------------------------------------
-The :command:`ipclusterz` command also has built-in integration with PBS. For
-more information on this approach, see our documentation on :ref:`ipclusterz
+The :command:`ipcluster` command also has built-in integration with PBS. For
+more information on this approach, see our documentation on :ref:`ipcluster
<parallel_process>`.
Actually using MPI
@@ -110,7 +110,7 @@ distributed array. Save the following text in a file called :file:`psum.py`:
Now, start an IPython cluster::
- $ ipclusterz start -p mpi -n 4
+ $ ipcluster start -p mpi -n 4
.. note::
View
6 ...source/parallelz/parallel_multiengine.txt → .../source/parallel/parallel_multiengine.txt
@@ -17,9 +17,9 @@ Starting the IPython controller and engines
To follow along with this tutorial, you will need to start the IPython
controller and four IPython engines. The simplest way of doing this is to use
-the :command:`ipclusterz` command::
+the :command:`ipcluster` command::
- $ ipclusterz start -n 4
+ $ ipcluster start -n 4
For more detailed information about starting the controller and engines, see
our :ref:`introduction <ip1par>` to using IPython for parallel computing.
@@ -37,7 +37,7 @@ module and then create a :class:`.Client` instance:
In [2]: rc = Client()
This form assumes that the default connection information (stored in
-:file:`ipcontroller-client.json` found in :file:`IPYTHON_DIR/clusterz_default/security`) is
+:file:`ipcontroller-client.json` found in :file:`IPYTHON_DIR/cluster_default/security`) is
accurate. If the controller was started on a remote machine, you must copy that connection
file to the client machine, or enter its contents as arguments to the Client constructor:
View
0  docs/source/parallelz/parallel_pi.pdf → docs/source/parallel/parallel_pi.pdf
File renamed without changes
View
0  docs/source/parallelz/parallel_pi.png → docs/source/parallel/parallel_pi.png
File renamed without changes
View
136 docs/source/parallelz/parallel_process.txt → docs/source/parallel/parallel_process.txt
@@ -11,12 +11,12 @@ Because of this, there are many different possibilities.
Broadly speaking, there are two ways of going about starting a controller and engines:
-* In an automated manner using the :command:`ipclusterz` command.
-* In a more manual way using the :command:`ipcontrollerz` and
- :command:`ipenginez` commands.
+* In an automated manner using the :command:`ipcluster` command.
+* In a more manual way using the :command:`ipcontroller` and
+ :command:`ipengine` commands.
This document describes both of these methods. We recommend that new users
-start with the :command:`ipclusterz` command as it simplifies many common usage
+start with the :command:`ipcluster` command as it simplifies many common usage
cases.
General considerations
@@ -30,29 +30,29 @@ matter which method you use to start your IPython cluster.
Let's say that you want to start the controller on ``host0`` and engines on
hosts ``host1``-``hostn``. The following steps are then required:
-1. Start the controller on ``host0`` by running :command:`ipcontrollerz` on
+1. Start the controller on ``host0`` by running :command:`ipcontroller` on
``host0``.
2. Move the JSON file (:file:`ipcontroller-engine.json`) created by the
controller from ``host0`` to hosts ``host1``-``hostn``.
3. Start the engines on hosts ``host1``-``hostn`` by running
- :command:`ipenginez`. This command has to be told where the JSON file
+ :command:`ipengine`. This command has to be told where the JSON file
(:file:`ipcontroller-engine.json`) is located.
At this point, the controller and engines will be connected. By default, the JSON files
-created by the controller are put into the :file:`~/.ipython/clusterz_default/security`
+created by the controller are put into the :file:`~/.ipython/cluster_default/security`
directory. If the engines share a filesystem with the controller, step 2 can be skipped as
the engines will automatically look at that location.
The final step required to actually use the running controller from a client is to move
the JSON file :file:`ipcontroller-client.json` from ``host0`` to any host where clients
-will be run. If these file are put into the :file:`~/.ipython/clusterz_default/security`
+will be run. If these file are put into the :file:`~/.ipython/cluster_default/security`
directory of the client's host, they will be found automatically. Otherwise, the full path
to them has to be passed to the client's constructor.
-Using :command:`ipclusterz`
+Using :command:`ipcluster`
===========================
-The :command:`ipclusterz` command provides a simple way of starting a
+The :command:`ipcluster` command provides a simple way of starting a
controller and engines in the following situations:
1. When the controller and engines are all run on localhost. This is useful
@@ -67,24 +67,24 @@ controller and engines in the following situations:
.. note::
- Currently :command:`ipclusterz` requires that the
+ Currently :command:`ipcluster` requires that the
:file:`~/.ipython/cluster_<profile>/security` directory live on a shared filesystem that is
seen by both the controller and engines. If you don't have a shared file
- system you will need to use :command:`ipcontrollerz` and
- :command:`ipenginez` directly.
+ system you will need to use :command:`ipcontroller` and
+ :command:`ipengine` directly.
-Under the hood, :command:`ipclusterz` just uses :command:`ipcontrollerz`
-and :command:`ipenginez` to perform the steps described above.
+Under the hood, :command:`ipcluster` just uses :command:`ipcontroller`
+and :command:`ipengine` to perform the steps described above.
-The simplest way to use ipclusterz requires no configuration, and will
+The simplest way to use ipcluster requires no configuration, and will
launch a controller and a number of engines on the local machine. For instance,
to start one controller and 4 engines on localhost, just do::
- $ ipclusterz start -n 4
+ $ ipcluster start -n 4
To see other command line options for the local mode, do::
- $ ipclusterz -h
+ $ ipcluster -h
Configuring an IPython cluster
@@ -92,25 +92,25 @@ Configuring an IPython cluster
Cluster configurations are stored as `profiles`. You can create a new profile with::
- $ ipclusterz create -p myprofile
+ $ ipcluster create -p myprofile
-This will create the directory :file:`IPYTHONDIR/clusterz_myprofile`, and populate it
+This will create the directory :file:`IPYTHONDIR/cluster_myprofile`, and populate it
with the default configuration files for the three IPython cluster commands. Once
-you edit those files, you can continue to call ipclusterz/ipcontrollerz/ipenginez
+you edit those files, you can continue to call ipcluster/ipcontroller/ipengine
with no arguments beyond ``-p myprofile``, and any configuration will be maintained.
There is no limit to the number of profiles you can have, so you can maintain a profile for each
of your common use cases. The default profile will be used whenever the
-profile argument is not specified, so edit :file:`IPYTHONDIR/clusterz_default/*_config.py` to
+profile argument is not specified, so edit :file:`IPYTHONDIR/cluster_default/*_config.py` to
represent your most common use case.
The configuration files are loaded with commented-out settings and explanations,
which should cover most of the available possibilities.
-Using various batch systems with :command:`ipclusterz`
+Using various batch systems with :command:`ipcluster`
------------------------------------------------------
-:command:`ipclusterz` has a notion of Launchers that can start controllers
+:command:`ipcluster` has a notion of Launchers that can start controllers
and engines with various remote execution schemes. Currently supported
models include `mpiexec`, PBS-style (Torque, SGE), and Windows HPC Server.
@@ -120,7 +120,7 @@ models include `mpiexec`, PBS-style (Torque, SGE), and Windows HPC Server.
users can subclass and configure them to fit their own system that we
have not yet supported (such as Condor)
-Using :command:`ipclusterz` in mpiexec/mpirun mode
+Using :command:`ipcluster` in mpiexec/mpirun mode
--------------------------------------------------
@@ -132,11 +132,11 @@ The mpiexec/mpirun mode is useful if you:
If these are satisfied, you can create a new profile::
- $ ipclusterz create -p mpi
+ $ ipcluster create -p mpi
-and edit the file :file:`IPYTHONDIR/clusterz_mpi/ipclusterz_config.py`.
+and edit the file :file:`IPYTHONDIR/cluster_mpi/ipcluster_config.py`.
-There, instruct ipclusterz to use the MPIExec launchers by adding the lines:
+There, instruct ipcluster to use the MPIExec launchers by adding the lines:
.. sourcecode:: python
@@ -144,7 +144,7 @@ There, instruct ipclusterz to use the MPIExec launchers by adding the lines:
If the default MPI configuration is correct, then you can now start your cluster, with::
- $ ipclusterz start -n 4 -p mpi
+ $ ipcluster start -n 4 -p mpi
This does the following:
@@ -166,7 +166,7 @@ On newer MPI implementations (such as OpenMPI), this will work even if you
don't make any calls to MPI or call :func:`MPI_Init`. However, older MPI
implementations actually require each process to call :func:`MPI_Init` upon
starting. The easiest way of having this done is to install the mpi4py
-[mpi4py]_ package and then specify the ``c.MPI.use`` option in :file:`ipenginez_config.py`:
+[mpi4py]_ package and then specify the ``c.MPI.use`` option in :file:`ipengine_config.py`:
.. sourcecode:: python
@@ -177,21 +177,21 @@ having problems with this, you will likely have to use a custom Python
executable that itself calls :func:`MPI_Init` at the appropriate time.
Fortunately, mpi4py comes with such a custom Python executable that is easy to
install and use. However, this custom Python executable approach will not work
-with :command:`ipclusterz` currently.
+with :command:`ipcluster` currently.
More details on using MPI with IPython can be found :ref:`here <parallelmpi>`.
-Using :command:`ipclusterz` in PBS mode
+Using :command:`ipcluster` in PBS mode
---------------------------------------
The PBS mode uses the Portable Batch System [PBS]_ to start the engines.
As usual, we will start by creating a fresh profile::
- $ ipclusterz create -p pbs
+ $ ipcluster create -p pbs
-And in :file:`ipclusterz_config.py`, we will select the PBS launchers for the controller
+And in :file:`ipcluster_config.py`, we will select the PBS launchers for the controller
and engines:
.. sourcecode:: python
@@ -213,7 +213,7 @@ to specify your own. Here is a sample PBS script template:
cd $$PBS_O_WORKDIR
export PATH=$$HOME/usr/local/bin
export PYTHONPATH=$$HOME/usr/local/lib/python2.7/site-packages
- /usr/local/bin/mpiexec -n ${n} ipenginez --cluster_dir=${cluster_dir}
+ /usr/local/bin/mpiexec -n ${n} ipengine --cluster_dir=${cluster_dir}
There are a few important points about this template:
@@ -232,8 +232,8 @@ There are a few important points about this template:
environment variables in the template, or in SGE, where the config lines start
with ``#$``, which will have to be ``#$$``.
-4. Any options to :command:`ipenginez` can be given in the batch script
- template, or in :file:`ipenginez_config.py`.
+4. Any options to :command:`ipengine` can be given in the batch script
+ template, or in :file:`ipengine_config.py`.
5. Depending on the configuration of you system, you may have to set
environment variables in the script template.
@@ -251,11 +251,11 @@ The controller template should be similar, but simpler:
cd $$PBS_O_WORKDIR
export PATH=$$HOME/usr/local/bin
export PYTHONPATH=$$HOME/usr/local/lib/python2.7/site-packages
- ipcontrollerz --cluster_dir=${cluster_dir}
+ ipcontroller --cluster_dir=${cluster_dir}
Once you have created these scripts, save them with names like
-:file:`pbs.engine.template`. Now you can load them into the :file:`ipclusterz_config` with:
+:file:`pbs.engine.template`. Now you can load them into the :file:`ipcluster_config` with:
.. sourcecode:: python
@@ -264,12 +264,12 @@ Once you have created these scripts, save them with names like
c.PBSControllerLauncher.batch_template_file = "pbs.controller.template"
-Alternately, you can just define the templates as strings inside :file:`ipclusterz_config`.
+Alternately, you can just define the templates as strings inside :file:`ipcluster_config`.
Whether you are using your own templates or our defaults, the extra configurables available are
the number of engines to launch (``$n``, and the batch system queue to which the jobs are to be
submitted (``$queue``)). These are configurables, and can be specified in
-:file:`ipclusterz_config`:
+:file:`ipcluster_config`:
.. sourcecode:: python
@@ -279,7 +279,7 @@ submitted (``$queue``)). These are configurables, and can be specified in
Note that assuming you are running PBS on a multi-node cluster, the Controller's default behavior
of listening only on localhost is likely too restrictive. In this case, also assuming the
nodes are safely behind a firewall, you can simply instruct the Controller to listen for
-connections on all its interfaces, by adding in :file:`ipcontrollerz_config`:
+connections on all its interfaces, by adding in :file:`ipcontroller_config`:
.. sourcecode:: python
@@ -287,9 +287,9 @@ connections on all its interfaces, by adding in :file:`ipcontrollerz_config`:
You can now run the cluster with::
- $ ipclusterz start -p pbs -n 128
+ $ ipcluster start -p pbs -n 128
-Additional configuration options can be found in the PBS section of :file:`ipclusterz_config`.
+Additional configuration options can be found in the PBS section of :file:`ipcluster_config`.
.. note::
@@ -298,12 +298,12 @@ Additional configuration options can be found in the PBS section of :file:`ipclu
and with further configuration in similar batch systems like Condor.
-Using :command:`ipclusterz` in SSH mode
+Using :command:`ipcluster` in SSH mode
---------------------------------------
-The SSH mode uses :command:`ssh` to execute :command:`ipenginez` on remote
-nodes and :command:`ipcontrollerz` can be run remotely as well, or on localhost.
+The SSH mode uses :command:`ssh` to execute :command:`ipengine` on remote
+nodes and :command:`ipcontroller` can be run remotely as well, or on localhost.