diff --git a/Makefile b/Makefile index 342b5478..05896c41 100644 --- a/Makefile +++ b/Makefile @@ -324,10 +324,21 @@ check: mpicompilechk/mpichk # between types and argument names, e.g., const~MPI\_Aint sdispls[] # The second grep line looks for naked LaTeX commands in section titles, which # can cause problems for the PDF links. +# This check is less strict to encourage more use. The goal should be to +# use ./getlatex chap-*/*.tex , e.g., warn about both pageref and punctuation +# of quotes checklatex: getlatex - ./getlatex chap-*/*.tex - -grep 'mpibind{' chap-*/*.tex | grep '[A-Za-z] [A-Za-z]' - -grep 'section{' chap-*/*.tex | sed -e 's/.*section{//g' | sed -e 's/\\texorpdfstring{.*}//g' | sed -e 's/\\#//g' | sed -e 's/\\ //g' -e 's/\\_/_/g' | grep '\\' + ./getlatex --allowpageref --noquotechk chap-*/*.tex + -@for file in $(wildcard chap-*/*.tex) ; do \ + if grep 'mpibind{' $$file | grep '[A-Za-z] [A-Za-z]' >/dev/null ; then \ + echo "$$file : mpibind missing tie :" ; \ + grep 'mpibind{' $$file | grep '[A-Za-z] [A-Za-z]' ; \ + fi ; \ + if grep 'section{' $$file | sed -e 's/.*section{//g' | sed -e 's/\\texorpdfstring{.*}//g' | sed -e 's/\\#//g' | sed -e 's/\\ //g' -e 's/\\_/_/g' | grep '\\' > /dev/null ; then \ + echo "$$file : section with unprotected LaTeX command :" ; \ + grep 'section{' $$file | sed -e 's/.*section{//g' | sed -e 's/\\texorpdfstring{.*}//g' | sed -e 's/\\#//g' | sed -e 's/\\ //g' -e 's/\\_/_/g' | grep '\\' ; \ + fi ;\ + done # # Create the single chapter versions of the document # These simplify checks for index and bibliographic entries diff --git a/chap-appLang/appLang-Const.tex b/chap-appLang/appLang-Const.tex index eaf90988..b9b61da1 100644 --- a/chap-appLang/appLang-Const.tex +++ b/chap-appLang/appLang-Const.tex @@ -1519,6 +1519,7 @@ The following info keys are reserved. They are strings. \\[3pt] \infoskip{mpi\_assert\_no\_any\_source} \\ \infoskip{mpi\_assert\_no\_any\_tag} \\ \infoskip{mpi\_assert\_strict\_start\_ordering} \\ +\infoskip{mpi\_initial\_errhandler} \\ \infoskip{mpi\_optimization\_goal} \\ \infoskip{mpi\_reuse\_count} \\ \infoskip{nb\_proc} \\ @@ -1538,6 +1539,9 @@ The following info keys are reserved. They are strings. \\[3pt] The following info values are reserved. They are strings. \\[3pt] \infoskip{false} \\ +\infoskip{mpi\_errors\_abort} \\ +\infoskip{mpi\_errors\_are\_fatal} \\ +\infoskip{mpi\_errors\_return} \\ \infoskip{random} \\ \infoskip{rar}\\ \infoskip{raw}\\ diff --git a/chap-backcompat/backcompat.tex b/chap-backcompat/backcompat.tex index 76e2e43e..03baf747 100644 --- a/chap-backcompat/backcompat.tex +++ b/chap-backcompat/backcompat.tex @@ -3,7 +3,7 @@ \label{chap:backcompat} \section{Backward Incompatible since \texorpdfstring{\mpiiiidotii/}{MPI-3.2}} -\label{sec:deprecated:since32} +\label{sec:incompatible:since32} The default communicator where errors are raised when not involving a communicator, window, or file was changed from \const{MPI\_COMM\_WORLD} to diff --git a/chap-binding/binding-2.tex b/chap-binding/binding-2.tex index e501c04d..c04bf94f 100644 --- a/chap-binding/binding-2.tex +++ b/chap-binding/binding-2.tex @@ -932,7 +932,7 @@ instead of using the \mpiskipfunc{PMPI} interface. Fortran routines are interceptable as described above. \end{implementors} -\subsection{\MPI/ for Different Fortran Standard Versions} +\subsection{\texorpdfstring{\MPI/}{MPI} for Different Fortran Standard Versions} \label{sec:f90:different-fortran-versions} This section describes which Fortran interface functionality @@ -2847,8 +2847,8 @@ see \sectionref{f90:syncreg}. %%SKIP %%ENDHEADER \begin{Verbatim}[commandchars=\\\{\}] - {\rm Example \ref{exa:lang:register:extreme}} {\rm Example \ref{exa:lang:register:isend}} - {\rm can be solved with} {\rm can be solved with} + \textrm{Example \ref{exa:lang:register:extreme}} \textrm{Example \ref{exa:lang:register:isend}} + \textrm{can be solved with} \textrm{can be solved with} call MPI_IRECV(buf,..req) buf = val call MPI_ISEND(buf,..req) copy = buf @@ -2874,8 +2874,8 @@ see \sectionref{f90:syncreg}. %%SKIP %%ENDHEADER \begin{Verbatim}[commandchars=\\\{\}] - {\rm Example \ref{exa:lang:register}} {\rm Example \ref{exa:lang:register:send}} - {\rm can be solved with} {\rm can be solved with} + \textrm{Example \ref{exa:lang:register}} \textrm{Example \ref{exa:lang:register:send}} + \textrm{can be solved with} \textrm{can be solved with} call MPI_F_SYNC_REG(buf) call MPI_F_SYNC_REG(buf) call MPI_RECV(MPI_BOTTOM,...) call MPI_SEND(MPI_BOTTOM,...) call MPI_F_SYNC_REG(buf) call MPI_F_SYNC_REG(buf) @@ -2909,7 +2909,7 @@ see \sectionref{f90:syncreg}. {\tt%%ALLOWLATEX% \begin{tabbing} \rule{3mm}{0mm}\=\rule{60mm}{0mm}\=\kill -\>{\rm\textbf{Source of Process 1}} \>{\rm\textbf{Source of Process 2}}\\[2pt] +\>\textrm{\textbf{Source of Process 1}} \>\textrm{\textbf{Source of Process 2}}\\[2pt] \>bbbb = 777 \> buff = 999 \\ \> \> call MPI\_F\_SYNC\_REG(buff) \\ \>call MPI\_WIN\_FENCE \> call MPI\_WIN\_FENCE \\ @@ -3814,7 +3814,7 @@ into a Fortran \ftype{INTEGER}, \ftype{DIMENSION(MPI\_STATUS\_SIZE)} status arra %\end{rationale} % end of removal -\subsection{\MPI/ Opaque Objects} +\subsection{\texorpdfstring{\MPI/}{MPI} Opaque Objects} \mpitermtitleindex{opaque objects} \label{subsec:mpiopaqueobjects} diff --git a/chap-changes/changes.tex b/chap-changes/changes.tex index 1f58b146..64c593f1 100644 --- a/chap-changes/changes.tex +++ b/chap-changes/changes.tex @@ -29,6 +29,63 @@ in previous versions of this standard. Section~\ref{sec:deprecated:since32} on page~\pageref{sec:deprecated-fortran-sizeof}. \newline \mpifunc{MPI\_SIZEOF} was deprecated. + +% 13.--- MPI-3.2-issue #103 +\item +Chapter~\ref{sec:dynamic-2}, +Sections~\ref{sec:terms-errorhandling}, +\ref{subsec:pt2pt-envelope}, +\ref{sec:predef-comms}, +\ref{subsec:context-grpconst}, +\ref{subsec:context-intracomconst}, +\ref{subsec:context-intercomm}, +\ref{subsec:caching:sessions}, +\ref{subsec:topol-overview}, +\ref{subsec:topol-cartesian-constructor}, +\ref{subsec:topol-graph-constructor}, +\ref{subsec:topol-distgraph-constructor}, +\ref{subsec:inquiry-version}, +\ref{subsec:inquiry-inquiry}, +\ref{sec:errorhandler}, +\ref{subsec:inquiry-errhdlr-sessions}, +\ref{sec:ei-error}, +\ref{sec:ei-threads}, +\ref{sec:io-filecntl-open}, +\ref{sec:io-filecntl-get-params}, +\ref{sec:io-errhandlers}, +\ref{sec:mpit:init}, +\ref{sec:misc-handleconvert}, +\ref{subsec:mpiopaqueobjects}, +and +Annex~\ref{sec:lang} +on pages~\pageref{sec:dynamic-2}, +\pageref{sec:terms-errorhandling}, +\pageref{subsec:pt2pt-envelope}, +\pageref{sec:predef-comms}, +\pageref{subsec:context-grpconst}, +\pageref{subsec:context-intracomconst}, +\pageref{subsec:context-intercomm}, +\pageref{subsec:caching:sessions}, +\pageref{subsec:topol-overview}, +\pageref{subsec:topol-cartesian-constructor}, +\pageref{subsec:topol-graph-constructor}, +\pageref{subsec:topol-distgraph-constructor}, +\pageref{subsec:inquiry-version}, +\pageref{subsec:inquiry-inquiry}, +\pageref{sec:errorhandler}, +\pageref{subsec:inquiry-errhdlr-sessions}, +\pageref{sec:ei-error}, +\pageref{sec:ei-threads}, +\pageref{sec:io-filecntl-open}, +\pageref{sec:io-filecntl-get-params}, +\pageref{sec:io-errhandlers}, +\pageref{sec:mpit:init}, +\pageref{sec:misc-handleconvert}, +\pageref{subsec:mpiopaqueobjects}, +and +\pageref{sec:lang} +\newline +The \spm{} was added to the standard. \end{enumerate} \section{Changes from Version 3.1 to Version 3.2} @@ -146,62 +203,18 @@ pages~\pageref{sec:errorhandler} and~\pageref{sec:ei-error-classes}. Clarified definition of errors to say that \MPI/ should continue whenever possible and allow the user to recover from errors. -% 13.--- MPI-3.2-issue #103 +% --- MPI-3.2-issue #102 \item -Chapter~\ref{sec:dynamic-2}, -Sections~\ref{sec:terms-errorhandling}, -\ref{subsec:pt2pt-envelope}, -\ref{sec:predef-comms}, -\ref{subsec:context-grpconst}, -\ref{subsec:context-intracomconst}, -\ref{subsec:context-intercomm}, -\ref{subsec:caching:sessions}, -\ref{subsec:topol-overview}, -\ref{subsec:topol-cartesian-constructor}, -\ref{subsec:topol-graph-constructor}, -\ref{subsec:topol-distgraph-constructor}, -\ref{subsec:inquiry-version}, -\ref{subsec:inquiry-inquiry}, -\ref{sec:errorhandler}, -\ref{subsec:inquiry-errhdlr-sessions}, -\ref{sec:ei-error}, -\ref{sec:ei-threads}, -\ref{sec:io-filecntl-open}, -\ref{sec:io-filecntl-get-params}, -\ref{sec:io-errhandlers}, -\ref{sec:mpit:init}, -\ref{sec:misc-handleconvert}, -\ref{subsec:mpiopaqueobjects}, -and -Annex~\ref{sec:lang} -on pages~\pageref{sec:dynamic-2}, -\pageref{sec:terms-errorhandling}, -\pageref{subsec:pt2pt-envelope}, -\pageref{sec:predef-comms}, -\pageref{subsec:context-grpconst}, -\pageref{subsec:context-intracomconst}, -\pageref{subsec:context-intercomm}, -\pageref{subsec:caching:sessions}, -\pageref{subsec:topol-overview}, -\pageref{subsec:topol-cartesian-constructor}, -\pageref{subsec:topol-graph-constructor}, -\pageref{subsec:topol-distgraph-constructor}, -\pageref{subsec:inquiry-version}, -\pageref{subsec:inquiry-inquiry}, -\pageref{sec:errorhandler}, -\pageref{subsec:inquiry-errhdlr-sessions}, -\pageref{sec:ei-error}, -\pageref{sec:ei-threads}, -\pageref{sec:io-filecntl-open}, -\pageref{sec:io-filecntl-get-params}, -\pageref{sec:io-errhandlers}, -\pageref{sec:mpit:init}, -\pageref{sec:misc-handleconvert}, -\pageref{subsec:mpiopaqueobjects}, -and -\pageref{sec:lang} -\newline -The \spm{} was added to the standard. + Section~\ref{sec:inquiry-startup} on page~\pageref{sec:inquiry-startup}. + Section~\ref{subsec:disconnect} on page~\pageref{subsec:disconnect}. \newline + Clarified the semantic of failure and error reporting before (and during) \mpifunc{MPI\_INIT} and after \mpifunc{MPI\_FINALIZE}. +\item + Section~\ref{subsec:spawnkeys} on page~\pageref{subsec:spawnkeys}. + Section~\ref{subsec:spawnkeys} on page~\pageref{subsec:spawnkeys}. \newline + Added the \infokey{mpi\_initial\_errhandler} reserved info key with the + reserved values \infoval{mpi\_errors\_abort}, + \infoval{mpi\_errors\_are\_fatal}, and \infoval{mpi\_errors\_return} to the + launch keys in \mpifunc{MPI\_COMM\_SPAWN}, \mpifunc{MPI\_COMM\_SPAWN\_MULTIPLE}, and \mpifunc{mpiexec}. \end{enumerate} diff --git a/chap-coll/coll.tex b/chap-coll/coll.tex index 67b36991..f2a38727 100644 --- a/chap-coll/coll.tex +++ b/chap-coll/coll.tex @@ -3058,7 +3058,7 @@ to all processes in a group on return. One variant scatters equal-sized blocks to all processes, while another variant scatters blocks that may vary in size for each process. -\subsection{\mpifunc{MPI\_REDUCE\_SCATTER\_BLOCK}} +\subsection{\texorpdfstring{\mpifunc{MPI\_REDUCE\_SCATTER\_BLOCK}}{MPI\_REDUCE\_SCATTER\_BLOCK}} \label{subsec:coll-reduce-scatter-block} % Sect. 5.10.1 p.174 NEWsection \begin{funcdef}{MPI\_REDUCE\_SCATTER\_BLOCK(sendbuf, recvbuf, recvcount, @@ -3117,7 +3117,7 @@ Otherwise, a communication is needed to figure out how many elements are reduced \end{rationale} -\subsection{\mpifunc{MPI\_REDUCE\_SCATTER}} +\subsection{\texorpdfstring{\mpifunc{MPI\_REDUCE\_SCATTER}}{MPI\_REDUCE\_SCATTER}} \label{subsec:coll-reduce-scatter} % Sect. 5.10.1 p.174 NEWsection \mpifunc{MPI\_REDUCE\_SCATTER} extends the functionality of \mpifunc{MPI\_REDUCE\_SCATTER\_BLOCK} @@ -3629,7 +3629,7 @@ integer)} This call starts a nonblocking variant of \mpifunc{MPI\_BCAST} (see Section~\ref{sec:coll-broadcast}). -\subsubsection{Example using \mpifunc{MPI\_IBCAST}} +\subsubsection{Example using \texorpdfstring{\mpifunc{MPI\_IBCAST}}{MPI\_IBCAST}} The example in this section uses an intracommunicator. diff --git a/chap-context/context.tex b/chap-context/context.tex index 498e2daa..11ca5b43 100644 --- a/chap-context/context.tex +++ b/chap-context/context.tex @@ -673,7 +673,7 @@ in order to obtain better scalability (time and space). The function \mpifunc{MPI\_GROUP\_FROM\_SESSION\_PSET} creates a group \mpiarg{newgroup} using the provided session handle and process set. The process set name must be one returned from an invocation of -\mpifunc{MPI\_SESSION\_GET\_PSET\_NAME} using the supplied \mpiarg{session} handle. +\mpifunc{MPI\_SESSION\_GET\_NTH\_PSET} using the supplied \mpiarg{session} handle. If the \mpiarg{pset\_name} does not exist, \consti{MPI\_GROUP\_NULL} will be returned in the \mpiarg{newgroup} argument. As with other group constructors, \mpifunc{MPI\_GROUP\_FROM\_SESSION\_PSET} is a local function. See section \ref{sec:model_sessions} for more information on sessions and process sets. @@ -1454,8 +1454,6 @@ platform-specific information to the application. \mpifunc{MPI\_COMM\_CREATE\_GROUP}, except that the set of \MPI/ processes involved in the creation of the new intracommunicator is specified by a \mpiarg{group} argument, rather than the group associated with a pre-existing communicator. -\mpifunc{MPI\_COMM\_CREATE\_FROM\_GROUP} must be called by -all \MPI/ processes specified in the \mpiarg{group} argument. If a non-empty \mpiarg{group} is specified, then all \MPI/ processes in that group must call the function and each of these \MPI/ processes must provide the same arguments, @@ -1466,13 +1464,13 @@ the call is a local operation and \const{MPI\_COMM\_NULL} is returned as \mpiarg The \mpiarg{stringtag} argument is analogous to the \mpiarg{tag} used for \mpifunc{MPI\_COMM\_CREATE\_GROUP}; it differentiates concurrent calls within multi-threaded applications. The \mpiarg{stringtag} shall not exceed \consti{MPI\_MAX\_FROM\_GROUP\_TAG} characters in length. -For C, this includes space for a NULL terminating character. +For C, this includes space for a null terminating character. The \mpiarg{errhandler} argument specifies an error handler to be attached to the new intracommunicator. This error handler will also be invoked if the \mpifunc{MPI\_COMM\_CREATE\_FROM\_GROUP} function encounters an error. The \mpiarg{info} argument provides hints and assertions, -possibly MPI implementation dependent, +possibly \MPI/ implementation dependent, which indicate desired characteristics and guide communicator creation. \begin{users} @@ -2415,7 +2413,7 @@ Wildcards are not permitted for \begin{funcdef2}{MPI\_INTERCOMM\_CREATE\_FROM\_GROUPS(local\_group, local\_leader, remote\_group,}{remote\_leader, stringtag, info, errhandler, newintercomm)} \funcarg{\IN}{local\_group}{local group (handle)} -\funcarg{\IN}{local\_leader}{rank of local group leader in \mpiarg{local\_group}, significant only at \mpiarg{local\_leader} (integer)} +\funcarg{\IN}{local\_leader}{rank of local group leader in \mpiarg{local\_group} (integer)} \funcarg{\IN}{remote\_group}{remote group, significant only at \mpiarg{local\_leader} (handle)} \funcarg{\IN}{remote\_leader}{rank of remote group leader in \mpiarg{remote\_group}, significant only at \mpiarg{local\_leader} (integer)} \funcarg{\IN}{stringtag}{unique idenitifier for this operation (string)} @@ -2442,7 +2440,7 @@ it differentiates concurrent calls in a multi-threaded environment. %DAN: I think we do not want to mandate this - each individual key may require this but some do not. %The \mpiarg{info} argument must be the same across the members of the \mpiarg{local\_group}, but may be differ from that supplied by members of the \mpiarg{remote\_group}. The \mpiarg{stringtag} shall not exceed \consti{MPI\_MAX\_FROM\_GROUP\_STRINGTAG} characters in length. -For C, this includes space for a NULL terminating character. +For C, this includes space for a null terminating character. \begin{funcdef}{MPI\_INTERCOMM\_MERGE(intercomm, high, newintracomm)} \funcarg{\IN}{intercomm}{Inter-Communicator (handle) } diff --git a/chap-datatypes/datatypes.tex b/chap-datatypes/datatypes.tex index a63e4093..e5d55b66 100644 --- a/chap-datatypes/datatypes.tex +++ b/chap-datatypes/datatypes.tex @@ -259,12 +259,12 @@ Then \mpiarg{newtype} has a type map with $\mpicode{count} \cdot \mpicode{n}$ entries defined by: \begin{displaymath} % The \hskip is to keep the equation within the text margins -\hskip-1em\{ (type_0, disp_0), \ldots , (type_{n-1}, disp_{n-1}), (type_0, disp_0 -+ex), \ldots ,(type_{n-1}, disp_{n-1} + ex),\\ +\hskip-1em\{ (type_0, disp_0), \ldots , (type_{n-1}, disp_{n-1}), %ALLOWLATEX% +(type_0, disp_0 +ex), \ldots ,(type_{n-1}, disp_{n-1} + ex),\\ \end{displaymath} % This is *not* the way to display a multiline equation - FIXME \begin{displaymath} -\hskip-1em\ldots,(type_0, disp_0 +ex \cdot(\mpicode{count}-1) ), \ldots , +\hskip-1em\ldots,(type_0, disp_0 +ex \cdot(\mpicode{count}-1) ), \ldots ,%ALLOWLATEX% (type_{n-1} , disp_{n-1} + ex \cdot (\mpicode{count}-1)) \} . \end{displaymath} diff --git a/chap-dynamic/dynamic-2.tex b/chap-dynamic/dynamic-2.tex index 06f30e8b..c131d973 100644 --- a/chap-dynamic/dynamic-2.tex +++ b/chap-dynamic/dynamic-2.tex @@ -171,7 +171,7 @@ In the \wpm{}, an \MPI/ program must contain exactly one call to an \MPI/ initi \mpifunc{MPI\_INIT} or \mpifunc{MPI\_INIT\_THREAD}. \const{MPI\_COMM\_WORLD} and \const{MPI\_COMM\_SELF} are not valid for use as communicators prior to invocation of \mpifunc{MPI\_INIT} or \mpifunc{MPI\_INIT\_THREAD}. Subsequent calls to either of these initialization routines are erroneous. A subset of \MPI/ functions may be invoked -before a \MPI/ initialization routines are called. See Section~\ref{sec:dynamic:common-elements}. +before \MPI/ initialization routines are called. See Section~\ref{sec:dynamic:common-elements}. \mpifunc{MPI\_INIT} accepts the \mpiarg{argc} and \mpiarg{argv} that are provided by the arguments to \code{main} or \mpiarg{NULL}: %%HEADER @@ -245,7 +245,7 @@ If \MPI/ is started with a call to \begin{verbatim} mpiexec -n 5 -arch sun ocean : -n 10 -arch rs6000 atmos \end{verbatim} -Then the first 5 processes will have have in their +Then the first 5 processes will have in their \const{MPI\_INFO\_ENV} object the pairs \const{(command, ocean)}, \const{(maxprocs, 5)}, and \const{(arch, sun)}. The next 10 processes will have in @@ -542,7 +542,7 @@ The call to \mpifunc{MPI\_FINALIZE} does not free objects created by \mpifuncindex{MPI\_REQUEST\_FREE}% \mpifuncindex{MPI\_TYPE\_FREE}% \mpifuncindex{MPI\_WIN\_FREE}% -\mpiskipfunc{MPI\_\XXX/\_FREE} calls. +\mpiskipfunc{MPI\_\XXX/\_FREE} calls. \mpifunc{MPI\_FINALIZE} is collective over all connected processes. If no processes were spawned, accepted or connected then this means @@ -550,7 +550,7 @@ over \const{MPI\_COMM\_WORLD}; otherwise it is collective over the union of all processes that have been and continue to be connected, as explained in \sectionref{subsec:disconnect}. -The following examples illustrates these rules +The following examples illustrate these rules. \begin{example} \exindex{MPI\_Finalize} @@ -635,7 +635,7 @@ Without a matching receive, the program is erroneous \exindex{MPI\_Iprobe} \exindex{MPI\_Test\_cancelled} \mpitermindex{cancel} - This program is correct. The cancel operation must succeed, + This program is correct. The cancel operation must succeed, since the send cannot complete normally. The wait operation, after the call to \cfunc{MPI\_Cancel}\mpifuncindex{MPI\_CANCEL}, is local --- no matching \MPI/ call is required on process 1. @@ -719,7 +719,7 @@ provided to tell if \mpi/ had been initialized using \wpm{} methods. In the \wpm{}, once \mpi/ has been finalized it cannot be restarted. A library needs to be able to determine this to act accordingly. To achieve this the -\mpifunc{MPI\_FINALIZED} is needed. +function \mpifunc{MPI\_FINALIZED} is needed. \begin{funcdef}{MPI\_INITIALIZED(flag)} \funcarg{\OUT}{flag}Flag is true if \mpifunc{MPI\_INIT} or \mpifunc{MPI\_INIT\_THREAD} has been called and false @@ -732,7 +732,7 @@ otherwise. \mpifbind{MPI\_INITIALIZED(FLAG, IERROR)\fargs LOGICAL FLAG \\ INTEGER IERROR} \mpicppemptybind{MPI::Is\_initialized()}{bool} -This routine may be used to determine whether \mpifunc{MPI\_INIT} \mpifunc{MPI\_INIT\_THREAD} has been +This routine may be used to determine whether \mpifunc{MPI\_INIT} or \mpifunc{MPI\_INIT\_THREAD} has been called. \mpifunc{MPI\_INITIALIZED} returns \mpiarg{true} if the calling process has called either of these methods. Whether \mpifunc{MPI\_FINALIZE} has been @@ -758,7 +758,7 @@ before \mpifunc{MPI\_INIT} and after \mpifunc{MPI\_FINALIZE}. This function must always be thread-safe, as defined in Section~\ref{sec:ei-threads}. -\subsection{Allowing User Functions at MPI Finalization} +\subsection{Allowing User Functions at \texorpdfstring{\mpi/}{MPI} Finalization} \mpitermtitleindex{user functions at process termination} \label{subsec:inquiry-startup-userfunc} @@ -797,13 +797,18 @@ down before the application-level callbacks are made. \section{The \spm{}} \label{sec:model_sessions} -There are a number of the limitations with the \wpm{} described +There are a number of limitations with the \wpm{} described in the preceding section. Among these are the following: \MPI/ cannot be initialized within an \MPI/ process from different application components without \emph{a priori} knowledge or coordination; -\MPI/ cannot be initialized more than once; and \MPI/ cannot be reinitialized after \MPI/ finalize has been called. +\MPI/ cannot be initialized more than once; and \MPI/ cannot be reinitialized after \mpifunc{MPI\_Finalize} has been called. This section describes an alternative approach to \MPI/ initialization - the \spm{}. With this approach, an \MPI/ application, or components of the application, can instantiate \MPI/ resources for the specific communication needs of this component. \const{MPI\_COMM\_WORLD} is not valid for use as a communicator. +\const{MPI\_INFO\_ENV} is not valid for use as an info object when only using the \spm{}. +As described in Section~\ref{sec:inquiry-startup}, \MPI/ must be initialized using the \wpm{} to use this info object. +% +% + In this model, \MPI/ resources can be allocated and freed multiple times in an \MPI/ process. When using the \spm{}, an \MPI/ process instantiates an \emph{MPI Session handle}, which in turn can be used @@ -812,7 +817,7 @@ as other system resources. Using this information, the \MPI/ process can then cr application requirements and available resources, which in turn can be used to create an \MPI/ Communicator, Window, or File. By judicious creation of communicators, an application only needs to allocate \MPI/ resources based on its communication requirements. Although there are existing \MPI/ interfaces for creating communicators which can, -in principal, allow for resource optimizations within an \MPI/ implementation, this can only be done following +in principle, allow for resource optimizations within an \MPI/ implementation, this can only be done following initialization of \MPI/. For multi-threaded applications the \spm{} provides fine-grain control of the thread support level for \MPI/ objects. It is possible to specify different thread support levels @@ -830,7 +835,7 @@ a single \MPI/ function call. Examples include: shall represent non-blocking or persistent operations associated with \MPI/ objects derived from the same \emph{MPI Session handle}. \end{itemize} -This restriction does not apply to generalized requests~\ref{sec:ei-gr} as such requests are not associated directly with communicators or other \MPI/ objects. +This restriction does not apply to generalized requests (Section~\ref{sec:ei-gr}) as such requests are not associated directly with communicators or other \MPI/ objects. Note however, the \spm{} does not otherwise change the semantics or behavior of \MPI/ objects. %\begin{implementors} @@ -844,16 +849,16 @@ Note however, the \spm{} does not otherwise change the semantics or behavior of \begin{figure} \centerline{\includegraphics[width=3.50in]{figures/session_flow}} - \caption[Session handle to communicator]{Steps to creating an MPI Communicator from an MPI Session handle} + \caption[Session handle to communicator]{Steps to creating an MPI Communicator from an MPI Session handle.} \label{sessions-fig} \end{figure} \subsection{Session Creation and Destruction Methods} \begin{funcdef}{MPI\_SESSION\_INIT(info, errhandler, session)} -\funcarg{\IN}{info}{info object to specify thread level support, \MPI/ implementation specific resources, etc.} -\funcarg{\IN}{errhandler}{specifies an error handler to invoke in the event that an error is encountered during Session instantiation} -\funcarg{\OUT}{session}{handle to the created MPI Session} +\funcarg{\IN}{info}{info object to specify thread support level, \MPI/ implementation specific resources, etc. (handle)} +\funcarg{\IN}{errhandler}{specifies an error handler to invoke in the event that an error is encountered during this function call(handle)} +\funcarg{\OUT}{session}{session object returned by the call (handle)} \end{funcdef} \mpibind{MPI\_Session\_init(MPI\_Info~info, MPI\_Errhandler~errhandler, MPI\_Session~*session)} \mpifnewbind{MPI\_Session\_init(info, errhandler, session, ierror) \fargs TYPE(MPI\_Info), INTENT(IN) :: info \\ TYPE(MPI\_Errhandler), INTENT(IN) :: errhandler\\ TYPE(MPI\_Session), INTENT(OUT) :: session \\ INTEGER, OPTIONAL, INTENT(OUT) :: ierror} @@ -863,7 +868,7 @@ The \mpiarg{info} argument is used to request \MPI/ functionality requirements a \begin{description} \item[\infokey{thread\_support\_level}] used to request the thread support level required for \MPI/ objects -derived from the Session. Allowed values are \const{``MPI\_THREAD\_SINGLE``}, \const{``MPI\_THREAD\_FUNNELED``}, \const{``MPI\_THREAD\_SERIALIZED``}, and \const{``MPI\_THREAD\_MULTIPLE``}. Note that the thread support value is specified by a string rather than the integer +derived from the Session. Allowed values are \const{``MPI\_THREAD\_SINGLE''}, \const{``MPI\_THREAD\_FUNNELED''}, \const{``MPI\_THREAD\_SERIALIZED''}, and \const{``MPI\_THREAD\_MULTIPLE''}. Note that the thread support value is specified by a string rather than the integer values supplied to \mpifunc{MPI\_INIT\_THREAD}. The thread support level actually provided by the \MPI/ implementation can be determined via a subsequent call to \mpifunc{MPI\_SESSION\_GET\_INFO} to return the info object associated with the Session. The default thread support level is \MPI/ implementation dependent. @@ -871,15 +876,20 @@ the Session. The default thread support level is \MPI/ implementation dependent The \mpiarg{errhandler} argument specifies an error handler to invoke in the event that the Session instantiation call encounters an error. Session instantiation is intended to be a lightweight operation. -An MPI process may instantiate multiple Sessions. \mpifunc{MPI\_SESSION\_INIT} is always thread safe; multiple threads +An \MPI/ process may instantiate multiple Sessions. \mpifunc{MPI\_SESSION\_INIT} is always thread safe; multiple threads within an application may invoke it concurrently. \begin{users} Requesting ``\const{MPI\_THREAD\_SINGLE}'' thread support level is generally not recommended, because this will conflict with other components of an application requesting higher levels of thread support. \end{users} +\begin{implementors} +Owing to the restrictions of the \const{``MPI\_THREAD\_SINGLE''} thread support level, implementators are discouraged from making this the default thread support level for Sessions. +\end{implementors} + + \begin{funcdef}{MPI\_SESSION\_FINALIZE(session)} -\funcarg{\IN}{session}{handle to a previously created MPI Session} +\funcarg{\IN}{session}{session to be finalized (handle)} \end{funcdef} \mpibind{MPI\_Session\_finalize(MPI\_Session~*session)} \mpifnewbind{MPI\_Session\_finalize(session, ierror) \fargs TYPE(MPI\_Session), INTENT(INOUT) :: session \\ INTEGER, OPTIONAL, INTENT(OUT) :: ierror} @@ -908,16 +918,16 @@ The call to \mpifunc{MPI\_SESSION\_FINALIZE} does not free objects created by \mpifuncindex{MPI\_WIN\_FREE}% \mpiskipfunc{MPI\_\XXX/\_FREE} calls.» -\mpifunc{MPI\_SESSION\_FINALIZE} is collective over all MPI processes that -are connected via MPI communicators, windows, or files that were created as part of the -session and still exist. If processes were spawned, accepted or connected -using MPI communicators created as part of this session, this operation is +\mpifunc{MPI\_SESSION\_FINALIZE} is collective over all \MPI/ processes that +are connected via \MPI/ Communicators, Windows, or Files that were created as part of the +Session and still exist. If processes were spawned, accepted, or connected +using \MPI/ Communicators created as part of this session, this operation is collective over the union of all processes that have been and continue to be connected via those objects, as explained in \sectionref{subsec:disconnect}. \begin{implementors} -A high quality \MPI/ implementation should be able to release \MPI/ state associated with a session without synchronization with other \MPI/ processes, provided an application frees all MPI windows and files, and uses \mpifunc{MPI\_COMM\_DISCONNECT} to free all \MPI/ communicators associated with a session prior to invoking \mpifunc{MP\_SESSION\_FINALIZE} on the corresponding session handle. +A high quality \MPI/ implementation should be able to release \MPI/ state associated with a session without synchronization with other \MPI/ processes, provided an application frees all \MPI/ Windows and Files, and uses \mpifunc{MPI\_COMM\_DISCONNECT} to free all \MPI/ Communicators associated with a session prior to invoking \mpifunc{MPI\_SESSION\_FINALIZE} on the corresponding session handle. \end{implementors} @@ -929,70 +939,74 @@ Process sets are identified by process set names. Process set names have a URI format. Two process set names are mandated: \const{mpi://WORLD} and \const{mpi://SELF}. Additional process set names may be defined, -e.g. \const{mpix://UNIVERSE} and \const{mpix://MPI\_COMM\_TYPE\_SHARED} +for example, \const{mpix://UNIVERSE} and \const{mpix://MPI\_COMM\_TYPE\_SHARED} may be defined by the \MPI/ implementation. The \const{mpi://} namespace is reserved for exclusive use by the \MPI/ standard. -Figure~\ref{sessions-pset-fig} depicts process sets that the runtime could associate with an instance of an MPI job. +Figure~\ref{sessions-pset-fig} depicts process sets that the runtime could associate with an instance of an \MPI/ job. In this example, the two mandated process sets are defined, in addition to optional, implementation specific ones. Mechanisms for defining process sets and how system resources are assigned to these sets is considered to be implementation dependent. -A process set caches key/value tuples which are accessible to the application via an \type{MPI\_Info} object. +A process set caches key/value tuples that are accessible to the application via an \type{MPI\_Info} object. The \emph{size} key is mandatory for all process sets. \begin{figure} \centerline{\includegraphics[width=4.50in]{figures/figure_pset}} - \caption[Process set examples]{Examples of process sets. Illustrated are the two mandated process sets - \const{mpi://WORLD} and \const{mpi://SELF} - along with several optional ones that a runtime could define. In this example, \mpifunc{MPI\_SESSION\_GET\_NUM\_PSETS} would return five. } + \caption[Process set examples]{Examples of process sets. Illustrated are the two mandated process sets - \const{mpi://WORLD} and \const{mpi://SELF} - along with several optional ones that a runtime could define. In this example, \mpifunc{MPI\_SESSION\_GET\_NUM\_PSETS} would return five at each \MPI/ process. } \label{sessions-pset-fig} \end{figure} \subsection{Runtime Query Functions} -\begin{funcdef}{MPI\_SESSION\_GET\_NUM\_PSETS(session, npset\_names)} -\funcarg{\IN}{session}{handle to a previously created MPI Session} -\funcarg{\OUT}{npset\_names}{Number of available process sets} +\begin{funcdef}{MPI\_SESSION\_GET\_NUM\_PSETS(session, info, npset\_names)} +\funcarg{\IN}{session}{session (handle)} +\funcarg{\IN}{info}{info object (handle)} +\funcarg{\OUT}{npset\_names}{number of available process sets} \end{funcdef} -\mpibind{MPI\_Session\_get\_num\_psets(MPI\_Session~session, int~*npset\_names)} -\mpifnewbind{MPI\_Session\_get\_num\_psets(session, npset\_names, ierror) \fargs TYPE(MPI\_Session), INTENT(IN) :: session \\ INTEGER, INTENT(OUT) :: npset\_names \\ INTEGER, OPTIONAL, INTENT(OUT) :: ierror} -\mpifbind{MPI\_SESSION\_GET\_NUM\_PSETS(SESSION, NPSET\_NAMES, IERROR) \fargs INTEGER SESSION, NPSET\_NAMES, IERROR} - -This function is used to query the runtime for the number of available process sets. The number of available process sets returned by this function may increase with subsequent calls to \mpifunc{MPI\_SESSION\_GET\_NUM\_PSETS}, but will not decrease. +\mpibind{MPI\_Session\_get\_num\_psets(MPI\_Session~session, MPI\_Info~info, int~*npset\_names)} +\mpifnewbind{MPI\_Session\_get\_num\_psets(session, info, npset\_names, ierror) \fargs TYPE(MPI\_Session), INTENT(IN) :: session \\ TYPE(MPI\_Info), INTENT(IN) :: info \\ INTEGER, INTENT(OUT) :: npset\_names \\ INTEGER, OPTIONAL, INTENT(OUT) :: ierror} +\mpifbind{MPI\_SESSION\_GET\_NUM\_PSETS(SESSION, INFO, NPSET\_NAMES, IERROR) \fargs INTEGER SESSION, INFO, NPSET\_NAMES, IERROR} + +This function is used to query the runtime for the number of available process sets in which the calling \MPI/ process is a member. +An \MPI/ implementation is allowed to increase the number of available process sets during the execution of an \MPI/ application when new process sets become available. +However, \MPI/ implementations are not allowed +to change the index of a particular process set name, +or to change the name of the process set at a particular index, +or to delete a process set name once it has been added. +When a process set becomes invalid, for example, when some processes become unreachable due to failures in the communication system, subsequent usage of the process set name should return an appropriate error code. +% [DAN] possibly advice to users for the next sentence? +For example, creating an \const{MPI\_Group} from such a process set might succeed because it is a local operation, but creating an \const{MPI\_Comm} from that group and attempting collective communication should return an appropriate error code. +% +% TODO: should we say something about non-mandated process sets possibly becoming invalid? +% \begin{implementors} It is anticipated that an \MPI/ implementation may be relying on an external runtime system to provide process sets. Such runtime systems may have the ability to dynamically create process sets during the course of application execution. Requiring the number of process sets returned by \mpifunc{MPI\_SESSION\_GET\_NUM\_PSETS} to be constant over the course of application execution would prevent an application from taking advantage of such capabilities. \end{implementors} -\begin{funcdef}{MPI\_SESSION\_GET\_NTH\_PSETLEN(session, n, pset\_len)} -\funcarg{\IN}{session}{handle to a previously created MPI Session} -\funcarg{\IN}{n}{process set name number (integer)} -\funcarg{\OUT}{pset\_len}{length of the nth process set name} -\end{funcdef} -\mpibind{MPI\_Session\_get\_nth\_psetlen(MPI\_Session~session, int~n, int~*pset\_len)} -\mpifnewbind{MPI\_Session\_get\_nth\_psetlen(session, n, pset\_len, ierror) \fargs TYPE(MPI\_Session), INTENT(IN) :: session \\ INTEGER, INTENT(IN) :: n \\ INTEGER, INTENT(OUT) :: pset\_len \\ INTEGER, OPTIONAL, INTENT(OUT) :: ierror} -\mpifbind{MPI\_SESSION\_GET\_NTH\_PSETLEN(SESSION, N, PSET\_LEN, IERROR)\fargs INTEGER SESSION, N, PSET\_LEN, IERROR} - -This function retrieves the length of the name of the \mpiarg{n}th process set name. -Valid values for \mpiarg{n} range from 0 to one minus the number of available process sets for this Session. -The number of available process sets for this session can be determined by calling \mpifunc{MPI\_SESSION\_GET\_NUM\_PSETS}. -The length returned in C includes space for the end-of-string character. - -\begin{funcdef}{MPI\_SESSION\_GET\_NTH\_PSET(session, n, pset\_len, pset\_name)} -\funcarg{\IN}{session}{handle to a previously created MPI Session} +\begin{funcdef}{MPI\_SESSION\_GET\_NTH\_PSET(session, info, n, pset\_len, pset\_name)} +\funcarg{\IN}{session}{session (handle)} +\funcarg{\IN}{info}{info object (handle)} \funcarg{\IN}{n}{process set name number (integer)} -\funcarg{\IN}{pset\_len}{length of the pset\_name argument} +\funcarg{\INOUT}{pset\_len}{length of the pset\_name argument} \funcarg{\OUT}{pset\_name}{pset\_name (string)} \end{funcdef} -\mpibind{MPI\_Session\_get\_nth\_pset(MPI\_Session~session, int~n, int~*pset\_len, char~*pset\_name)} -\mpifnewbind{MPI\_Session\_get\_nth\_pset(session, n, pset\_len, pset\_name, ierror) \fargs TYPE(MPI\_Session), INTENT(IN) :: session \\ INTEGER, INTENT(IN) :: n \\ INTEGER, INTENT(IN) :: pset\_len \\ CHARACTER(LEN=pset\_len), INTENT(OUT) :: pset\_name \\ INTEGER, OPTIONAL, INTENT(OUT) :: ierror} -\mpifbind{MPI\_SESSION\_GET\_NTH\_PSET(SESSION, N, PSET\_LEN, PSET\_NAME, IERROR)\fargs INTEGER SESSION, PSET\_LEN, IERROR \\CHARACTER*(*) PSET\_NAME} +\mpibind{MPI\_Session\_get\_nth\_pset(MPI\_Session~session, MPI\_Info~info, int~n, int~*pset\_len, char~*pset\_name)} +\mpifnewbind{MPI\_Session\_get\_nth\_pset(session, info, n, pset\_len, pset\_name, ierror) \fargs TYPE(MPI\_Session), INTENT(IN) :: session \\ TYPE(MPI\_Info), INTENT(IN) :: info \\ INTEGER, INTENT(IN) :: n \\ INTEGER, INTENT(INOUT) :: pset\_len \\ CHARACTER(LEN=pset\_len), INTENT(OUT) :: pset\_name \\ INTEGER, OPTIONAL, INTENT(OUT) :: ierror} +\mpifbind{MPI\_SESSION\_GET\_NTH\_PSET(SESSION, INFO, N, PSET\_LEN, PSET\_NAME, IERROR)\fargs INTEGER SESSION, INFO, PSET\_LEN, IERROR \\CHARACTER*(*) PSET\_NAME} This function returns the name of the \mpiarg{n}th process set in the supplied \mpiarg{pset\_name} buffer. -\mpiarg{pset\_len} is the number of characters available in \mpiarg{pset\_name}. If it is less than the actual -size of the process set name, the value returned in \mpiarg{pset\_name} is truncated. In C, \mpiarg{pset\_len} should -be one less than the amount of allocated space to allow for the null terminator. +\mpiarg{pset\_len} is the size of the buffer needed to store the \mpiarg{n}th process set name. If the \mpiarg{pset\_len} passed +into the function is less than the actual buffer size needed for the process set name, +then the string value returned in \mpiarg{pset\_name} is truncated. +If \mpiarg{pset\_len} is set to 0, \mpiarg{pset\_name} is not changed. On return, the value of \mpiarg{pset\_len} will be set +to the required buffer size to hold the process set name. In C, \mpiarg{pset\_len} includes the required space for the null terminator. +In C, this function returns a null terminated string in all cases where the \mpiarg{pset\_len} input value is greater than 0. If two \MPI/ processes get the same process set name, then the intersection of the two process sets shall either be the empty set or identical to the union of the two process sets. +After a successful call to \mpifunc{MPI\_SESSION\_GET\_NTH\_PSET}, subsequent calls to routines that query information about the same process set name and same session handle must return the same information. An MPI implementation is not allowed to alter any of the returned process set names. + Process set names have an implementation-defined maximum length of \const{MPI\_MAX\_PSET\_NAME\_LEN}. \begin{users} @@ -1001,8 +1015,8 @@ might not be wise to declare a string of that size. \end{users} \begin{funcdef}{MPI\_SESSION\_GET\_INFO(session, info\_used)} -\funcarg{\IN}{session}{handle to a previously created MPI Session} -\funcarg{\OUT}{info\_used}{see explanation below} +\funcarg{\IN}{session}{session (handle)} +\funcarg{\OUT}{info\_used}{see explanation below (handle)} \end{funcdef} \mpibind{MPI\_Session\_get\_info(MPI\_Session~session, MPI\_Info~*info\_used)} \mpifnewbind{MPI\_Session\_get\_info(session, info\_used, ierror) \fargs TYPE(MPI\_Session), INTENT(IN) :: session \\ TYPE(MPI\_Info), INTENT(OUT) :: info\\ INTEGER, OPTIONAL, INTENT(OUT) :: ierror} @@ -1022,9 +1036,9 @@ contains no key/value pair. The user is responsible for freeing \mpiarg{info\_used} via \mpifunc{MPI\_INFO\_FREE}. \begin{funcdef}{MPI\_SESSION\_GET\_PSET\_INFO(session, pset\_name, info)} -\funcarg{\IN}{session}{handle to a previously created MPI Session} +\funcarg{\IN}{session}{session (handle)} \funcarg{\IN}{set\_name}{Name of process set to query} -\funcarg{\OUT}{info}{info object containing information about the given process set} +\funcarg{\OUT}{info}{info object containing information about the given process set (handle)} \end{funcdef} \mpibind{MPI\_Session\_get\_pset\_info(MPI\_Session~session, char~*pset\_name, MPI\_Info~*info)} \mpifnewbind{MPI\_Session\_get\_pset\_info(session, set\_name, info, ierror) \fargs TYPE(MPI\_Session), INTENT(IN) :: session \\ CHARACTER(len=*), INTENT(IN) :: pset\_name \\ TYPE(MPI\_Info), INTENT(OUT) :: info\\ INTEGER, OPTIONAL, INTENT(OUT) :: ierror} @@ -1035,13 +1049,12 @@ can in turn be queried with existing \MPI/ info object query functions. One key defined, \emph{size}. The value of the size key specifies the number of \MPI/ processes in the process set. The user is responsible for freeing the returned \type{MPI\_Info} object. - \subsection{\spm{} Examples} This section presents several examples of how to use \MPI/ Sessions to create \MPI/ Groups and \MPI/ Communicators. \begin{example} -Simple example illustrating creation of an MPI Communicator using the \spm{}. +Simple example illustrating creation of an \MPI/ communicator using the \spm{}. \label{exa:session1} \exindex{MPI\_SESSION\_INIT}% \exindex{MPI\_INFO\_CREATE}% @@ -1115,7 +1128,7 @@ int library_foo_init(void) * get a communicator */ rc = MPI_Comm_create_from_group(wgroup, - "mpi.forum.mpi-v4_0.example-ex10_1", + "mpi.forum.mpi-v4_0.example-ex10_8", MPI_INFO_NULL, MPI_ERRORS_RETURN, &lib_comm); @@ -1147,18 +1160,16 @@ to select a Process Set to use for \MPI/ Group creation. \exindex{MPI\_SESSION\_FINALIZE}% \exindex{MPI\_GROUP\_FROM\_SESSION\_PSET}% \exindex{MPI\_SESSION\_GET\_NUM\_PSETS}% -\exindex{MPI\_SESSION\_GET\_NTH\_PSETLEN}% \exindex{MPI\_SESSION\_GET\_NTH\_PSET}% \begin{verbatim} #include #include #include -#include #include "mpi.h" int main(int argc, char *argv[]) { - int i, n_psets, psetlen, rc, ret psetsize, comm_size, pcomm_rank; + int i, n_psets, psetlen, rc, ret, psetsize, comm_size, pcomm_rank; int valuelen; int ar_out, flag = 0; char *pset_name = NULL; @@ -1179,25 +1190,21 @@ int main(int argc, char *argv[]) exit(-1); } - MPI_Session_get_num_psets(shandle, &n_psets); - - assert(n_psets >= 2); /* WORLD and SELF process - sets are suppose to be available */ + MPI_Session_get_num_psets(shandle, MPI_INFO_NULL, &n_psets); for (i = 0, pset_name = NULL; i < n_psets; i++) { - MPI_Session_get_nth_psetlen(shandle, i, &psetlen); + psetlen = 0; + MPI_Session_get_nth_pset(shandle, MPI_INFO_NULL, i, + &psetlen, NULL); pset_name = (char *)malloc(sizeof(char) * psetlen); - assert(NULL != pset_name); - MPI_Session_get_nth_pset(shandle, i, psetlen, pset_name); - + MPI_Session_get_nth_pset(shandle, MPI_INFO_NULL, i, + &psetlen, pset_name); if (strstr(pset_name, argv[1]) != NULL) break; free(pset_name); pset_name = NULL; } - assert(pset_name); - /* * get instance of an info object for this Session */ @@ -1205,13 +1212,10 @@ int main(int argc, char *argv[]) MPI_Session_pset_get_info(shandle, pset_name, &sinfo); MPI_Info_get_valuelen(sinfo, "size", &valuelen, &flag); - assert(flag != 0); info_val = (char *)malloc(valuelen+1); - assert(info_val != NULL); MPI_Info_get(sinfo, "size", valuelen, info_val, &flag); - assert(flag != 0); psetsize = atoi(info_val); free(info_val); @@ -1239,7 +1243,7 @@ int main(int argc, char *argv[]) Example~\ref{exa:session2} illustrates several aspects of the \spm{}. First, the default error handler can be specified when instantiating a Session instance. Second, there must be at least two process sets associated with a Session. Third, the example illustrates use of the Sessions info object and the -one required key - \emph{size}. +one required key -- \emph{size}. \begin{example} @@ -1252,7 +1256,6 @@ from a process set, and subsequently create an \MPI/ Communicator. \exindex{MPI\_COMM\_CREATE\_FROM\_GROUP}% \exindex{MPI\_BARRIER}% \exindex{MPI\_SESSION\_GET\_NUM\_PSETS}% -\exindex{MPI\_SESSION\_GET\_NTH\_PSETLEN}% \exindex{MPI\_SESSION\_GET\_NTH\_PSET}% \begin{verbatim} program main @@ -1271,7 +1274,7 @@ program main ERROR STOP end if - call MPI_Session_get_num_psets(shandle, n_psets) + call MPI_Session_get_num_psets(shandle, MPI_INFO_NULL, n_psets) IF (n_psets .lt. 2) THEN write(*,*) "MPI_Session_get_num_psets didn't return at least 2 psets" ERROR STOP @@ -1282,11 +1285,14 @@ program main ! Note that index values are zero-based, even in Fortran ! - call MPI_Session_get_nth_pset_len(shandle, 1, pset_len) + pset_len = 0 + call MPI_Session_get_nth_pset(shandle, MPI_INFO_NULL, 1, & + pset_len, pset_name) allocate(character(len=pset_len)::pset_name) - call MPI_Session_get_nth_pset(shandle, 1, pset_len, pset_name) + call MPI_Session_get_nth_pset(shandle, MPI_INFO_NULL, 1, & + pset_len, pset_name) ! ! create a group from the pset @@ -1322,7 +1328,8 @@ end program main \end{example} Note in this example that the call to \mpifunc{MPI\_SESSION\_FINALIZE} may block in order to -ensure that the call to \mpifunc{MPI\_BARRIER} completes for all MPI processes. If \mpifunc{MPI\_COMM\_DISCONNECT} had been used instead of \mpifunc{MPI\_COMM\_FREE}, the example would have blocked in \mpifunc{MPI\_COMM\_DISCONNECT} rather than \mpifunc{MPI\_SESSION\_FINALIZE}. +ensure that the calling \MPI/ process has completed its involvement in the preceding \mpifunc{MPI\_BARRIER} operation. If \mpifunc{MPI\_COMM\_DISCONNECT} had been used instead of \mpifunc{MPI\_COMM\_FREE}, the example would have blocked in \mpifunc{MPI\_COMM\_DISCONNECT} rather than \mpifunc{MPI\_SESSION\_FINALIZE}. + \section{Common Elements of Both Process Models} \label{sec:dynamic:common-elements} @@ -1360,7 +1367,6 @@ used. These functions can be called concurrently by multiple threads within an \ \mpifunc{MPI\_SESSION\_CALL\_ERRHANDLER}\\ \hline \mpifunc{MPI\_ERRHANDLER\_FREE} \\ -\mpifunc{MPI\_ERRHANDLER\_FREE} \\ \mpifunc{MPI\_ERROR\_STRING} \\ \mpifunc{MPI\_ERROR\_CLASS} \\ \hline @@ -1402,7 +1408,7 @@ When using the \wpm{}, and if no processes were spawned, accepted, or connected then this has the effect of aborting all the processes associated with \const{MPI\_COMM\_WORLD}. In the case of the \spm{}, if an \MPI/ process -has instantiated multiple sessions, the union of the \MPI/ processes in these +has instantiated multiple sessions, the union of the process sets in these sessions are considered connected processes. Thus invoking \mpifunc{MPI\_ABORT} on a communicator derived from one of these sessions will result in all \MPI/ processes in this union being aborted. @@ -1485,7 +1491,7 @@ It is suggested that\mpifuncmainindex{mpiexec} mpiexec -n \end{verbatim} be at least one way to start \code{} with an initial -set of \code{} processes, which will be accessible as the process set named "mpi://world" in the \spm{} and/or used to the form the group associated with the built-in communicator, \const{MPI\_COMM\_WORLD} in the \wpm{}. +set of \code{} processes, which will be accessible as the process set named ``mpi://world'' in the \spm{} and/or used to the form the group associated with the built-in communicator, \const{MPI\_COMM\_WORLD} in the \wpm{}. Other arguments to \code{mpiexec} may be implementation-dependent. \begin{implementors} @@ -1747,13 +1753,13 @@ This constraint simplifies implementation. \paragraph{Threads and the \spm{}} The \spm{}\mpitermtitleindexmainsub{MPI process initialization}{\spm{}} provides a finer-grain approach to controlling the interaction -between MPI calls and threads. When using this model, +between \MPI/ calls and threads. When using this model, the desired level of thread support is specified at Session initialization time. See Section~\ref{sec:model_sessions}. -Thus it is possible for communicators and other MPI objects derived from one Session to provide a different level of thread +Thus it is possible for communicators and other \MPI/ objects derived from one Session to provide a different level of thread support than those created from another Session for which a different level of thread support was requested. -Depending on the level of thread support requested at Session initialization time, different threads in a MPI process can make +Depending on the level of thread support requested at Session initialization time, different threads in a \MPI/ process can make concurrent calls to \MPI/ when using \MPI/ objects derived from different \emph{session handles}. -Note that the requested and granted level of thread support when creating a Session may influence the +Note that the requested and provided level of thread support when creating a Session may influence the granted level of thread support in a subsequent invocation of \mpifunc{MPI\_SESSION\_INIT}. Likewise, if the application at some point calls \mpifunc{MPI\_INIT\_THREAD}, the requested and granted level of thread support may influence the granted level of thread support for subsequent calls to \mpifunc{MPI\_SESSION\_INIT}. @@ -2604,6 +2610,14 @@ The format of \infokey{path} is determined by the implementation. is specified. The format of the filename and internal format of the file are determined by the implementation. +\item[\infokey{mpi\_initial\_errhandler}] Value is the name of an errhandler that will be +set as the initial error handler. \mpitermindex{error handling!startup}\mpitermindex{error handling!initial error handler} +The \infokey{mpi\_initial\_errhandler} key can take the case insensitive values \infoval{mpi\_errors\_are\_fatal}, \infoval{mpi\_errors\_abort}, +and \infoval{mpi\_errors\_return} representing the predefined \MPI/ error handlers +(\const{MPI\_ERRORS\_ARE\_FATAL}---the default, \const{MPI\_ERRORS\_ABORT}, and +\const{MPI\_ERRORS\_RETURN}, respectively). Other, non-standard values may be +supported by the implementation, which should document the resultant behavior. + \item[\infokey{soft}] Value specifies a set of numbers which are allowed values for the number of processes that \mpifunc{MPI\_COMM\_SPAWN} (et al.) may create. The format of the value is a comma-separated list of Fortran-90 @@ -3450,28 +3464,30 @@ with the runtime system. -\subsection{Singleton \texorpdfstring{\mpifunc{MPI\_INIT}}{MPI\_INIT}} +\subsection{Singleton \texorpdfstring{\MPI/}{MPI} Initialization} \mpitermtitleindex{singleton init} \label{subsec:singleton} A high-quality implementation will allow any process (including those not started with a ``parallel application'' mechanism) to become an \MPI/ process by calling -\mpifunc{MPI\_INIT}. Such a process can then +\mpifunc{MPI\_INIT}, \mpifunc{MPI\_INIT\_THREAD}, or \mpifunc{MPI\_SESSION\_INIT}. +Such a process can then connect to other \MPI/ processes using the \mpifunc{MPI\_COMM\_ACCEPT} and \mpifunc{MPI\_COMM\_CONNECT} routines, or spawn other \MPI/ processes. \MPI/ does not mandate this behavior, but strongly encourages it where technically feasible. \begin{implementors} -To start \mpi/ processes belonging to the same \consti{MPI\_COMM\_WORLD} -requires some special coordination. The processes +Special coordiation is required to start \mpi/ processes belonging to the same \consti{MPI\_COMM\_WORLD} in the case of the \wpm{}, +or the same ``mpi://world'' process set in the \spm{}. +The processes must be started at the ``same'' time, they must have a mechanism to establish communication, etc. Either the user or the operating system must take special steps beyond simply starting processes. -When an application enters \mpifunc{MPI\_INIT}, clearly +Considering the \wpm{}, when an application enters \mpifunc{MPI\_INIT}, clearly it must be able to determine if these special steps were taken. If a process enters \mpifunc{MPI\_INIT} and determines @@ -3479,7 +3495,7 @@ that no special steps were taken (i.e., it has not been given the information to form an \consti{MPI\_COMM\_WORLD} with other processes) it succeeds and forms a singleton \MPI/ program, that is, one in which -\consti{MPI\_COMM\_WORLD} has size 1. +\consti{MPI\_COMM\_WORLD} has size 1. In some implementations, \MPI/ may not be able to function without an ``\MPI/ environment.'' For example, @@ -3617,6 +3633,19 @@ independent processes are not affected but the effect on connected processes is not defined. \end{itemize} +\begin{implementors} + In practice, it may be difficult to distinguish between an \MPI/ process failure + \mpitermindex{error handling!finalize} + \mpitermindex{error handling!process failure} + and an erroneous program that terminates without calling + an \MPI/ finalization function: an implementation that defines semantics for + process failure management + may have to exhibit the behavior defined for \MPI/ process + failures with such erroneous programs. + A high quality implementation should exhibit a different behavior + for erroneous programs and \MPI/ process failures. +\end{implementors} + \begin{funcdef}{MPI\_COMM\_DISCONNECT(comm)} \funcarg{\INOUT}{comm}{ communicator (handle)} diff --git a/chap-frontm/abstract-cpy.tex b/chap-frontm/abstract-cpy.tex index 70f3b6de..f1b43c8a 100644 --- a/chap-frontm/abstract-cpy.tex +++ b/chap-frontm/abstract-cpy.tex @@ -9,14 +9,14 @@ % topics, process creation and management, one-sided communications, % extended collective operations, external interfaces, I/O, and % additional language bindings. -\ifdraft +\ifdraft%ALLOWLATEX% This document describes a draft version of the Message-Passing Interface (\mpi/) standard, version 4.0, intended for comment. It is not an official version of the standard. -\else +\else%ALLOWLATEX% This document describes the Message-Passing Interface (\mpi/) standard, version 4.0. -\fi +\fi%ALLOWLATEX% The \mpi/ standard includes point-to-point message-passing, collective communications, group and communicator concepts, process topologies, environmental management, @@ -50,9 +50,9 @@ Please send comments on \MPI/ to the \MPI/ Forum as follows: \href{mailto:mpi-comments@mpi-forum.org}{mpi-comments@mpi-forum.org}, together with the URL of the version of the \MPI/ standard and the page and line numbers on which you are commenting. -\ifdraft\else +\ifdraft\else%ALLOWLATEX% Only use the official versions. -\fi +\fi%ALLOWLATEX% Your comment will be forwarded to \MPI/ Forum committee members for diff --git a/chap-frontm/credits.tex b/chap-frontm/credits.tex index 2821be7b..7a68e8c0 100644 --- a/chap-frontm/credits.tex +++ b/chap-frontm/credits.tex @@ -1056,3 +1056,256 @@ University of Tokyo \subsection*{\MPIIVDOTO/:} \MPIIVDOTO/ is a major update to the \MPI/ Standard. + +\noindent The editors and organizers of the \MPIIVDOTO/ have been: +\begin{itemize} +\item Martin Schulz, \MPIIVDOTO/ chair, Info Object +\item Wesley Bland, \MPIIVDOTO/ Secretary +\item William Gropp, Steering committee, Front matter, Introduction, +One-Sided Communications, and Bibliography; Overall editor +\item Rolf Rabenseifner, Steering committee, Terms and Conventions, + Deprecated Functions, Removed Interfaces, Annex Change-Log, + and Annex Language Bindings +\item Daniel Holmes, Point-to-Point Communication, Sessions +\item George Bosilca, Datatypes and Environmental Management +\item Torsten Hoefler, Collective Communication and Process Topologies +\item Pavan Balaji, Groups, Contexts, and Communicators, and External Interfaces +\item Howard Pritchard, Process Creation and Management +\item Anthony Skjellum, I/O +\item Kathryn Mohror, Tools +\item Puri Bangalore, Language Bindings +\end{itemize} + +\noindent As part of the development of \MPIIVDOTO/, a number of +working groups were established. +In some cases, the work for these groups overlapped with multiple +chapters. +The following describes the major working groups and the leaders of +those groups: +{\raggedright%ALLOWLATEX% +\begin{description} +\item[Collective Communication, Topology, Communicators]Torsten +Hoefler, Andrew Lumsdaine, and Anthony Skjellum +\item[Fault Tolerance]Wesley Bland, Aur\'elien Bouteiller, and Richard +Graham +\item[Hardware-Topologies]Guillaume Mercier +\item[Hybrid]Pavan Balaji and Jim Dinan +\item[Large Counts]Jeff Hammond +\item[Persistence]Anthony Skjellum +\item[Point to Point Communication]Richard Graham and Dan Holmes +\item[Remove Memory Access]William Gropp and Rajeev Thakur +\item[Semantic Terms]Rolf Rabenseifner and Purushotham Bangalore +\item[Sessions]Daniel Holmes +\item[Tools]Kathryn Mohror and Marc-Andr\'e Hermanns +\end{description} +} + +The following list includes some of the active participants who +attended \MPI/ Forum meetings or participated in the e-mail +discussions. % and who are not mentioned above. + +\begin{center} +\begin{tabular}{lll} +Julien Adam\EE +Abdelhalim Amer\EE +Charles Archer\EE +Ammar Ahmad Awan\EE +Pavan Balaji\EE +Marc Gamell Balmana\EE +Purushotham Bangalore\EE +Mohammadreza Bayatpour\EE +Jean-Baptiste Besnard\EE +Claudia Blaas-Schenner\EE +Wesley Bland\EE +Gil Bloch\EE +George Bosilca\EE +Aurelien Bouteiller\EE +Ben Bratu\EE +Alexander Calvert\EE +Nicholas Chaimov\EE +Sourav Chakraborty\EE +Steffen Christgau\EE +Ching-Hsiang Chu\EE +Mikhail Chuvelev\EE +James Clark\EE +Carsten Clauss\EE +Giuseppe Congiu\EE +Brandon Cook\EE +James Custer\EE +Anna Daly\EE +Hoang-Vu Dang\EE +James Dinan\EE +Matthew Dosanjh\EE +Murali Emani\EE +Christian Engelmann\EE +Noah Evans\EE +Ana Gainaru\EE +Esthela Gallardo\EE +Balazs Gerofi\EE +Salvatore Di Girolamo\EE +Brice Goglin\EE +Richard Graham\EE +Ryan Grant\EE +Stanley Graves\EE +William Gropp\EE +Siegmar Gross\EE +Taylor Groves\EE +Yanfei Guo\EE +Khaled Hamidouche\EE +Jeff Hammond\EE +Marc-Andr\'e Hermanns\EE +Nathan Hjelm\EE +Torsten Hoefler\EE +Daniel Holmes\EE +Atsushi Hori\EE +Josh Hursey\EE +Ilya Ivanov\EE +Julien Jaeger\EE +Jeannot Jeannot\EE +Sylvain Jeaugey\EE +Jithin Jose\EE +Krishna Kandalla\EE +Takahiro Kawashima\EE +Chulho Kim\EE +Michael Knobloch\EE +Alice Koniges\EE +Sameer Kumar\EE +Kim Kyunghun\EE +Ignacio Laguna\EE +Stefan Lankes\EE +Tonglin Li\EE +Xioyi Lu\EE +Kavitha Madhu\EE +Alexey Malhanov\EE +Ryan Marshall\EE +William Marts\EE +Guillaume Mercier\EE +Kathryn Mohror\EE +Takeshi Nanri\EE +Thomas Naughton\EE +Takafumi Nose\EE +Lena Oden\EE +Steve Oyanagi\EE +Guillaume Papaur\'e\EE +Ivy Peng\EE +Ignacio Laguna Peralta\EE +Antonio Pe\~na\EE +Simon Pickartz\EE +Artem Polyakov\EE +Sreeram Potluri\EE +Howard Pritchard\EE +Martina Prugger\EE +Marc P\'erache\EE +Rolf Rabenseifner\EE +Nicholas Radcliffe\EE +Ken Raffenetti\EE +Craig Rasmussen\EE +Soren Rasmussen\EE +Hubert Ritzdorf\EE +Sergio Rivas-Gomez\EE +Davide Rossetti\EE +Martin Ruefenacht\EE +Amit Ruhela\EE +Joseph Schuchart\EE +Martin Schulz\EE +Sangmin Seo\EE +Sameh Sharkawi\EE +Sameer Shende\EE +Min Si\EE +Anthony Skjellum\EE +Brian Smith\EE +David Solt\EE +Jeff Squyres\EE +Srinivas Sridharan\EE +Hari Subramoni\EE +Nawrin Sultana\EE +Shinji Sumimoto\EE +Sayantan Sur\EE +Hugo Taboada\EE +Keita Teranishi\EE +Rajeev Thakur\EE +Keith Underwood\EE +Isaias Alberto Compres Urena\EE +Geoffroy Vallee\EE +Manjunath Gorentla Venkata\EE +Akshay Venkatesh\EE +Jerome Vienne\EE +Anh Vo\EE +Justin Wozniak\EE +Junchao Zhang\EE +Dong Zhong\EE +Hui Zhou\EE +\FlushEntries +\end{tabular} +\end{center} + +The \MPI/ Forum also acknowledges and appreciates the valuable input +from people via e-mail and in person. + +% People who provided significant input during the public comment +%The \MPI/ Forum also thanks those that provided feedback during the +%public comment period. + +The following institutions supported the \MPIIVDOTO/ effort through +time and travel support for the people listed above. + +\medskip%ALLOWLATEX% + +% From Wesley 9/1/19, with minor edits +\begin{obeylines}\leftskip=\parindent\parindent=0pt %ALLOWLATEX% +ATOS/BULL +Argonne National Laboratory +Arm +Auburn University +Barcelona Supercomputing Center +CEA +Cisco Systems Inc. +Cray Inc. +EPCC, The University Of Edinburgh +ETH Zürich +Forschungszentrum J\"ulich +Fujitsu +Fulda University of Applied Sciences +German Research School for Simulation Sciences +Hewlett Packard Enterprise +International Business Machines +INRIA +Intel Corporation +J\"ulich Aachen Research Alliance, High-Performance Computing (JARA-HPC) +J\"ulich Supercomputing Center +KTH Royal Institute Of Technology +Kyushu University +Lawrence Berkeley National Laboratory +Lawrence Livermore National Laboratory +Lenovo +Los Alamos National Laboratory +Mellanox Technologies, Inc. +Microsoft Corporation +NEC Corporation +NVIDIA Corporation +Oak Ridge National Laboratory +PAR-TEC +Paratools, Inc. +RIKEN AICS (R-CCS as of 2017) +RWTH Aachen University +Rutgers University +Sandia National Laboratories +Silicon Graphics, Inc. +TU Wien +Technical University Of Munich +The HDF Group +The Ohio State University +Texas Advanced Computing Center +Tokyo Institute of Technology +University of Alabama at Birmingham +University of Houston +University of Illinois at Urbana-Champaign and the National Center for Supercomputing Applications +University of Innsbruck +University of Oregon +University of Pottsdam +University of Stuttgart, High Performance Computing Center Stuttgart (HLRS) +University of Tennessee, Chattanooga +University of Tennessee, Knoxville +University of Texas at El Paso +University of Tokyo +\end{obeylines} diff --git a/chap-frontm/history.tex b/chap-frontm/history.tex index d5a01d20..a1d83601 100644 --- a/chap-frontm/history.tex +++ b/chap-frontm/history.tex @@ -2,7 +2,12 @@ % that should be done either in the introduction or through the change log. % Provide only the highest level summary here. -\paragraph{Version 4.0: XXXX 2020.} +%\paragraph{Version 4.0: XXXX 2020.} +\paragraph{2019 Draft Specification} +This document includes a number of new features which will be present in the final \mpiivdoto/ +document. The largest changes are the addition of persistent collectives, application info +assertions, and improvements to the definitions of error handling. In addition, there are a number +of smaller improvements and corrections. \paragraph{Version 3.1: June 4, 2015.} This document contains mostly corrections and clarifications to the diff --git a/chap-inquiry/inquiry.tex b/chap-inquiry/inquiry.tex index 2b922e64..7684bafa 100644 --- a/chap-inquiry/inquiry.tex +++ b/chap-inquiry/inquiry.tex @@ -485,7 +485,10 @@ communicators, windows, files, and sessions. The specified error handling routine will be used for any \MPI/ exception that occurs during a call to \MPI/ for the respective object. \MPI/ calls that are not related to any \MPI/ objects are considered to -be attached to the communicator \const{MPI\_COMM\_SELF}. +be attached to the communicator \const{MPI\_COMM\_SELF}. When +\const{MPI\_COMM\_SELF} is not initialized (i.e., +before \mpifunc{MPI\_INIT} / \mpifunc{MPI\_INIT\_THREAD} or after \mpifunc{MPI\_FINALIZE}) +the error raises the initial error handler (set during the launch operation, see~\ref{subsec:spawnkeys}).\mpitermindex{error handling!initial error handler} The attachment of error handlers to objects is purely local: different processes may attach different error handlers to corresponding objects. @@ -493,7 +496,7 @@ to corresponding objects. Several predefined error handlers are available in \MPI/: \begin{description} \item[\const{MPI\_ERRORS\_ARE\_FATAL}] The handler, when called, causes the -program to abort all connected processes. +program to abort all connected \MPI/ processes. This is similar to calling \mpifunc{MPI\_ABORT} using a communicator containing all connected processes with an implementation-specific value as the \mpiarg{errorcode} argument. @@ -521,18 +524,20 @@ other than returning the error code to the user. Implementations may provide additional predefined error handlers and programmers can code their own error handlers. -After initialization, the error handler -\const{MPI\_ERRORS\_ARE\_FATAL} is associated by default -with \const{MPI\_COMM\_WORLD}, \const{MPI\_COMM\_SELF}, and the communicator -returned by \mpifunc{MPI\_COMM\_GET\_PARENT} (if any). -Thus if the user chooses not to control error handling, every error that MPI handles is treated as fatal. +Unless otherwise requested, the error handler +\const{MPI\_ERRORS\_ARE\_FATAL} is set as the default initial error +handler and associated with predefined communicators.\mpitermindex{error handling!initial error handler} +Thus, if the user chooses not to control error handling, +every error that \MPI/ handles is treated as fatal. Since (almost) all \MPI/ calls return an error code, a user may choose to handle errors in its main code, by testing the return code of \MPI/ calls and executing a suitable recovery code when the call was not successful. In this case, the error handler \const{MPI\_ERRORS\_RETURN} will be used. Usually it is more convenient and more efficient not to test for errors after each \MPI/ call, and -have such error handled by a non-trivial \MPI/ error handler. +have such error handled by a non-trivial \MPI/ error handler. Note that unlike +predefined communicators, windows and files do not inherit from the initial error handler, +as defined in Sections~\ref{sec:1sided-errhandlers} and~\ref{sec:io-errhandlers} respectively. After an error is detected, \MPI/ will provide the user as much information as possible about that error using error classes. Some errors might prevent \MPI/ @@ -997,6 +1002,11 @@ The argument \mpiarg{string} must represent storage that is at least The number of characters actually written is returned in the output argument, \mpiarg{resultlen}. +This function must always be thread-safe, as defined in +Section~\ref{sec:ei-threads}. +It is one of the few routines that may be called before +\MPI/ is initialized or after \MPI/ is finalized. + \begin{rationale} The form of this function was chosen to make the Fortran and C bindings similar. A version that returns a pointer to a string has two @@ -1194,6 +1204,11 @@ allows us to define the error classes this way. Having a known The function \mpifunc{MPI\_ERROR\_CLASS} maps each standard error code (error class) onto itself. +This function must always be thread-safe, as defined in +Section~\ref{sec:ei-threads}. +It is one of the few routines that may be called before +\MPI/ is initialized or after \MPI/ is finalized. + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Error Classes, Error Codes, and Error Handlers} \mpitermtitleindex{error handling!error codes and classes} @@ -1379,7 +1394,8 @@ value in \mpiarg{IERROR} if the error handler was successfully called (assuming the process is not aborted and the error handler returns). \begin{users} -As with communicators, the default error handler for windows is \consti{MPI\_ERRORS\_ARE\_FATAL}. +In contrast to communicators, the error handler \consti{MPI\_ERRORS\_ARE\_FATAL} is associated with a +window when it is created. \end{users} \begin{funcdef}{MPI\_FILE\_CALL\_ERRHANDLER(fh, errorcode)} @@ -1510,4 +1526,3 @@ clock ticks. For example, if the clock is implemented by the hardware as a counter that is incremented every millisecond, the value returned by \mpifunc{MPI\_WTICK} should be \(10^{-3}\). - diff --git a/chap-inquiry/startup_section_from_merge_conflict.tex b/chap-inquiry/startup_section_from_merge_conflict.tex new file mode 100644 index 00000000..2a5e0ee5 --- /dev/null +++ b/chap-inquiry/startup_section_from_merge_conflict.tex @@ -0,0 +1,749 @@ +<<<<<<< RearrangeInitialisationTextForSessions +======= + + +\section{Startup} +\mpitermtitleindex{startup} +\label{sec:inquiry-startup} + +\label{sec:misc-init} +One goal of \MPI/ is to achieve \emph{source code portability}. By this we mean +that a program written using \MPI/ and complying with the relevant language +standards is portable as written, and must not require any source code changes +when moved from one system to another. This explicitly does \emph{not} say +anything about how an \MPI/ program is started or launched from the command +line, nor what the user must do to set up the environment in which an \MPI/ +program will run. However, an implementation may require some setup to be +performed before other \MPI/ routines may be called. To provide for this, \MPI/ +includes an initialization routine \mpifunc{MPI\_INIT}. + + +\begin{funcdefna}{MPI\_INIT()} +\end{funcdefna} + +\mpibind{MPI\_Init(int~*argc, char~***argv)} + +\mpifnewbind{MPI\_Init(ierror) \fargs INTEGER, OPTIONAL, INTENT(OUT) :: ierror} +\mpifbind{MPI\_INIT(IERROR)\fargs INTEGER IERROR} +\mpicppemptybind{MPI::Init(int\&~argc, char**\&~argv)}{void} +\mpicppemptybind{MPI::Init()}{void} + + +All \MPI/ programs must contain exactly one call to an \MPI/ initialization routine: +\mpifunc{MPI\_INIT} or \mpifunc{MPI\_INIT\_THREAD}. Subsequent calls to any +initialization routines are erroneous. The only \MPI/ functions that may be invoked +before the \MPI/ initialization routines are called are \mpifunc{MPI\_GET\_VERSION}, \mpifunc{MPI\_GET\_LIBRARY\_VERSION}, +\mpifunc{MPI\_INITIALIZED}, \mpifunc{MPI\_FINALIZED}, \mpifunc{MPI\_ERROR\_CLASS}, \mpifunc{MPI\_ERROR\_STRING}, and any function +with the prefix \mpicode{MPI\_T\_} (within the constraints for functions with this prefix listed in Section~\ref{sec:mpit:init}). The version for ISO C +accepts the \mpiarg{argc} and \mpiarg{argv} that are provided by the arguments to +\code{main} or \mpiarg{NULL}: +%%HEADER +%%LANG: C +%%ENDHEADER +\begin{verbatim} +int main(int argc, char *argv[]) +{ + MPI_Init(&argc, &argv); + + /* parse arguments */ + /* main program */ + + MPI_Finalize(); /* see below */ + return 0; +} +\end{verbatim} +The Fortran version takes only \const{IERROR}. + +Conforming implementations of \MPI/ are required to allow +applications to pass \mpiarg{NULL} for both the \mpiarg{argc} and +\mpiarg{argv} arguments of \cfunc{main} in +C. + +\mpitermtitleindex{error handling!startup} +Failures may disrupt the execution of the program before +or during \MPI/ initialization. A high-quality implementation shall +not deadlock during \MPI/ initialization, even in the presence of failures. +Except for functions with the \mpicode{MPI\_T\_} prefix, failures in \MPI/ operations prior to or during \MPI/ initialization are reported by invoking +the initial error handler.\mpitermindex{error handling!initial error handler} Users can use the \infokey{mpi\_initial\_errhandler} info key +during the launch of MPI processes (e.g., \mpifunc{MPI\_COMM\_SPAWN} / \mpifunc{MPI\_COMM\_SPAWN\_MULTIPLE}, or \mpifunc{mpiexec}) to set a non-fatal initial error handler before \MPI/ initialization. +When the initial error handler is set to \const{MPI\_ERRORS\_ABORT}, +raising an error before or during initialization aborts the local +\MPI/ process (i.e., it is similar to calling \mpifunc{MPI\_ABORT} +on \const{MPI\_COMM\_SELF}). +% +An implementation may not always be capable of determining, +before \MPI/ initialization, what constitutes the local \MPI/ process, or the set of +connected processes. In this case, errors before initialization may cause a different set +of \MPI/ processes to abort than specified. +% +After \MPI/ initialization, the initial error handler is associated with +\const{MPI\_COMM\_WORLD}, \const{MPI\_COMM\_SELF}, +and the communicator returned by \mpifunc{MPI\_COMM\_GET\_PARENT} (if any). + +\begin{implementors} +Some failures may leave \MPI/ in an undefined state, or raise an error +before the error handling capabilities are fully operational, in which cases +the implementation may be incapable of providing the desired error handling +behavior. Of note, in some implementations, the notion of an \MPI/ process +is not clearly established in the early stages of \MPI/ initialization (for +example, when the implementation considers threads that called \mpifunc{MPI\_INIT} +as independent \MPI/ processes); in this case, before \MPI/ is initialized, +the \const{MPI\_ERRORS\_ABORT} error handler may abort what would have become +multiple \MPI/ processes. + +When a failure occurs during \MPI/ initialization, +the implementation may decide to return \const{MPI\_SUCCESS} from the \MPI/ initialization function +instead of raising an error. +It is recommended that an implementation +masks an initialization error only when it +expects that later \MPI/ calls will result in well specified behavior (i.e., barring +additional failures, either the outcome of any call will be correct, or the call +will raise an appropriate error). +For example, it may be difficult for an implementation to +avoid unspecified behavior + when the group of \const{MPI\_COMM\_WORLD} does not contain +the same set of \MPI/ processes at all members of the communicator, or if the communicator +returned from \mpifunc{MPI\_COMM\_GET\_PARENT} was not initialized correctly. +\end{implementors} + +While \MPI/ is initialized, the application can access information +about the execution environment by querying the predefined info object +\const{MPI\_INFO\_ENV}. +The following keys are predefined for this object, corresponding to the +arguments of \mpifunc{MPI\_COMM\_SPAWN} or of \mpifunc{mpiexec}\mpitermindex{mpiexec}: +\begin{description} +\item[\infokey{command}] Name of program executed. +\item[\infokey{argv}] Space separated arguments to command. +\item[\infokey{maxprocs}] Maximum number of \MPI/ processes + to start. +\item[\infokey{mpi\_initial\_errhandler}] Name of the initial errhandler. +\item[\infokey{soft}] Allowed values for number of processors. + +\item[\infokey{host}] Hostname. +\item[\infokey{arch}] Architecture name. +\item[\infokey{wdir}] Working directory of the +\MPI/ process. +\item[\infokey{file}] Value is the name of a file in which additional information +is specified. +\item[\infokey{thread\_level}] Requested level of thread support, if + requested before the program started execution. +\end{description} + +Note that all values are strings. Thus, the maximum number of +processes is represented by a string such as \const{``1024''} and +the requested level is represented by a string such +as \const{``MPI\_THREAD\_SINGLE''}. + +The info object \const{MPI\_INFO\_ENV} need not contain a (key,value) +pair for each of these predefined keys; the set of (key,value) pairs +provided is implementation-dependent. +Implementations may provide additional, implementation specific, +(key,value) pairs. + +In case where the \MPI/ processes were started with +\mpifunc{MPI\_COMM\_SPAWN\_MULTIPLE} or, equivalently, with a +startup mechanism that supports multiple process specifications, then +the values stored in the info object \const{MPI\_INFO\_ENV} at a +process are those values that affect the local \MPI/ process. + +\begin{example} +\exindex{mpiexec} +\exindex{MPI\_INFO\_ENV} +If \MPI/ is started with a call to +%%HEADER +%%SKIP +%%ENDHEADER +\begin{verbatim} + mpiexec -n 5 -arch sun ocean : -n 10 -arch rs6000 atmos +\end{verbatim} +Then the first 5 processes will have have in their +\const{MPI\_INFO\_ENV} object the pairs \const{(command, ocean)}, +\const{(maxprocs, 5)}, +and \const{(arch, sun)}. The next 10 processes will have in +\const{MPI\_INFO\_ENV} \const{(command, atmos)}, +\const{(maxprocs, 10)}, +and \const{(arch, rs6000)} +\end{example} + +\begin{users} +The values passed in \const{MPI\_INFO\_ENV} are the values of the +arguments passed to the mechanism that started the \MPI/ execution --- +not the actual value provided. Thus, the value associated with +\const{maxprocs} is the number of \MPI/ processes requested; it can +be larger than the actual number of processes obtained, if the +\const{soft} option was used. +\end{users} + +\begin{implementors} +High-quality implementations will provide a (key,value) pair for each +parameter that can be passed to the command that starts an \MPI/ +program. +\end{implementors} + +\begin{funcdefna}{MPI\_FINALIZE()} +\end{funcdefna} + +\mpibind{MPI\_Finalize(void)} + +\mpifnewbind{MPI\_Finalize(ierror) \fargs INTEGER, OPTIONAL, INTENT(OUT) :: ierror} +\mpifbind{MPI\_FINALIZE(IERROR)\fargs INTEGER IERROR} +\mpicppemptybind{MPI::Finalize()}{void} + + +This routine cleans up all \MPI/ state. +If an \MPI/ program terminates normally (i.e., not due to a call to +\mpifunc{MPI\_ABORT} or an unrecoverable error) then each process must +call \mpifunc{MPI\_FINALIZE} before it exits. + + +Before an \MPI/ process invokes \mpifunc{MPI\_FINALIZE}, the process must +perform all \MPI/ calls needed to complete its +involvement in \MPI/ communications: It must locally complete all +\MPI/ operations that it initiated and must execute matching calls needed to complete \MPI/ +communications initiated by other processes. +For example, if the process executed a nonblocking send, it must +eventually call \mpifunc{MPI\_WAIT}, \mpifunc{MPI\_TEST}, +\mpifunc{MPI\_REQUEST\_FREE}, or any derived function; if the process +is the target of a send, then it must post +the matching receive; if it is part of a group executing a collective +operation, then it must have completed its participation in the +operation. + +The call to \mpifunc{MPI\_FINALIZE} does not free objects created by +\MPI/ calls; these objects are freed using +\mpifuncindex{MPI\_COMM\_FREE}% +\mpifuncindex{MPI\_ERRHANDLER\_FREE}% +\mpifuncindex{MPI\_GROUP\_FREE}% +\mpifuncindex{MPI\_INFO\_FREE}% +\mpifuncindex{MPI\_OP\_FREE}% +\mpifuncindex{MPI\_REQUEST\_FREE}% +\mpifuncindex{MPI\_TYPE\_FREE}% +\mpifuncindex{MPI\_WIN\_FREE}% +\mpiskipfunc{MPI\_\XXX/\_FREE} calls. + +\mpifunc{MPI\_FINALIZE} is collective over all connected processes. +If no processes were spawned, accepted or connected then this means +over \const{MPI\_COMM\_WORLD}; otherwise it is collective over the +union of all processes that have been and continue to be connected, +as explained in \sectionref{subsec:disconnect}. + +The following examples illustrates these rules + +\begin{example} +\exindex{MPI\_Finalize} +The following code is correct + +%%HEADER +%%SKIP +%%ENDHEADER +\begin{verbatim} + Process 0 Process 1 + --------- --------- + MPI_Init(); MPI_Init(); + MPI_Send(dest=1); MPI_Recv(src=0); + MPI_Finalize(); MPI_Finalize(); +\end{verbatim} +\end{example} + +\begin{example} +\exindex{MPI\_Finalize} +Without a matching receive, the program is erroneous +%%HEADER +%%SKIP +%%ENDHEADER +\begin{verbatim} + Process 0 Process 1 + ----------- ----------- + MPI_Init(); MPI_Init(); + MPI_Send (dest=1); + MPI_Finalize(); MPI_Finalize(); +\end{verbatim} +\end{example} + + +\begin{example} +\exindex{MPI\_Finalize} +\exindex{MPI\_Request\_free} + This program is correct: Process 0 calls + \cfunc{MPI\_Finalize}\mpifuncindex{MPI\_FINALIZE} after it has executed + the \MPI/ calls that complete the + send operation. Likewise, process 1 executes the \MPI/ call + that completes the matching receive operation before it calls \cfunc{MPI\_Finalize}\mpifuncindex{MPI\_FINALIZE}. +%%HEADER +%%SKIP +%%ENDHEADER +\begin{verbatim} + Process 0 Proces 1 + -------- -------- + MPI_Init(); MPI_Init(); + MPI_Isend(dest=1); MPI_Recv(src=0); + MPI_Request_free(); MPI_Finalize(); + MPI_Finalize(); exit(); +exit(); +\end{verbatim} +\end{example} + +\begin{example} +\exindex{MPI\_Finalize} +\exindex{MPI\_Buffer\_attach} + This program is correct. The attached buffer is a resource + allocated by the user, not by \MPI/; it is available to the user + after \MPI/ is finalized. +%%HEADER +%%SKIP +%%ENDHEADER +\begin{verbatim} + Process 0 Process 1 + --------- --------- + MPI_Init(); MPI_Init(); + buffer = malloc(1000000); MPI_Recv(src=0); + MPI_Buffer_attach(); MPI_Finalize(); + MPI_Send(dest=1)); exit(); + MPI_Finalize(); + free(buffer); + exit(); +\end{verbatim} +\end{example} + +\begin{example} +\exindex{MPI\_Finalize} +\exindex{MPI\_Barrier} +\exindex{MPI\_Cancel} +\exindex{MPI\_Iprobe} +\exindex{MPI\_Test\_cancelled} +\mpitermindex{cancel} + This program is correct. The cancel operation must succeed, + since the send cannot complete normally. The wait operation, after + the call to \cfunc{MPI\_Cancel}\mpifuncindex{MPI\_CANCEL}, is + local --- no matching \MPI/ call is required on process 1. + Cancelling a send request by calling \mpifunc{MPI\_CANCEL} is deprecated. + +%%HEADER +%%SKIP +%%ENDHEADER +\begin{verbatim} + + Process 0 Process 1 + --------- --------- + MPI_Issend(dest=1); MPI_Finalize(); + MPI_Cancel(); + MPI_Wait(); + MPI_Finalize(); +\end{verbatim} + +\end{example} + +\begin{implementors} +Even though a process has + executed all \MPI/ calls needed to complete the communications +it is involved with, such + communication may not yet be completed from the viewpoint of the underlying + \MPI/ system. For example, a blocking send may have returned, even though the data + is still buffered at the sender in an \MPI/ + buffer; an \MPI/ process may receive a cancel request for a + message it has completed receiving. The \MPI/ implementation must ensure that a + process has completed any involvement in \MPI/ communication before + \mpifunc{MPI\_FINALIZE} returns. Thus, if a process exits after the call to + \mpifunc{MPI\_FINALIZE}, this will not cause an ongoing communication to + fail. +The \MPI/ implementation should also complete freeing all + objects marked for deletion by \MPI/ calls that freed them. +\end{implementors} + +Once \mpifunc{MPI\_FINALIZE} returns, no \mpi/ routine (not even \mpifunc{MPI\_INIT}) may +be called, except for +\mpifunc{MPI\_GET\_VERSION}, \mpifunc{MPI\_GET\_LIBRARY\_VERSION}, +\mpifunc{MPI\_INITIALIZED}, +\mpifunc{MPI\_FINALIZED}, \mpifunc{MPI\_ERROR\_CLASS}, \mpifunc{MPI\_ERROR\_STRING}, and any function +with the prefix \mpicode{MPI\_T\_} (within the constraints for functions with this prefix listed in Section~\ref{sec:mpit:init}). + +\mpitermtitleindex{error handling!finalize} +Failures may disrupt \MPI/ operations during and after \MPI/ finalization. +A high quality implementation shall not deadlock in \MPI/ finalization, even in +the presence of failures. The normal +rules for \MPI/ error handling continue to apply. +After \const{MPI\_COMM\_SELF} has been ``freed'' (see \ref{subsec:inquiry-startup-userfunc}), errors that are not associated +with a communicator, window, or file raise the initial error handler (set during the launch operation, see~\ref{subsec:spawnkeys}).\mpitermindex{error handling!initial error handler} + +Although it is not required that all processes return from +\mpifunc{MPI\_FINALIZE}, it is required that, when it has not failed or aborted, +at least the \MPI/ process that was assigned rank 0 in +\const{MPI\_COMM\_WORLD} returns, so +that users can know that the \MPI/ portion of the computation is over. In +addition, in a POSIX environment, users may desire to supply an exit code for +each process that returns from \mpifunc{MPI\_FINALIZE}. + +\mpitermtitleindex{error handling!finalize} +Note that a failure may terminate the \MPI/ process that was assigned +rank 0 in \const{MPI\_COMM\_WORLD}, in which case it is possible +that no \MPI/ process returns from \mpifunc{MPI\_FINALIZE}. + +\par +\begin{users} + Applications that handle errors are encouraged to implement all rank-specific + code before the call to \mpifunc{MPI\_FINALIZE}. In Example~\ref{example:finalize-rank0} + below, the process with rank 0 in \const{MPI\_COMM\_WORLD} may have been terminated before, + during, or after the call to \mpifunc{MPI\_FINALIZE}, possibly leading to the code + after \mpifunc{MPI\_FINALIZE} never being executed. +\end{users} + +\begin{example}\label{example:finalize-rank0} +\exindex{MPI\_Finalize} +The following illustrates the use of requiring that at least one +process return and that it be known that process 0 is one of the processes +that return. One wants code like the following to work no matter how many +processes return. + +%%HEADER +%%SKIPELIPSIS +%%FRAGMENT +%%DECL: int myrank; FILE *resultfile; +%%DECL: void dump_results(FILE *); +%%ENDHEADER +\begin{verbatim} + ... + MPI_Comm_rank(MPI_COMM_WORLD, &myrank); + ... + MPI_Finalize(); + if (myrank == 0) { + resultfile = fopen("outfile", "w"); + dump_results(resultfile); + fclose(resultfile); + } + exit(0); +\end{verbatim} +\end{example} + +\begin{funcdef}{MPI\_INITIALIZED(flag)} +\funcarg{\OUT}{flag}Flag is true if \mpifunc{MPI\_INIT} has been called and false +otherwise. +\end{funcdef} + +\mpibind{MPI\_Initialized(int~*flag)} + +\mpifnewbind{MPI\_Initialized(flag, ierror) \fargs LOGICAL, INTENT(OUT) :: flag \\ INTEGER, OPTIONAL, INTENT(OUT) :: ierror} +\mpifbind{MPI\_INITIALIZED(FLAG, IERROR)\fargs LOGICAL FLAG \\ INTEGER IERROR} +\mpicppemptybind{MPI::Is\_initialized()}{bool} + + + +This routine may be used to determine whether \mpifunc{MPI\_INIT} has been +called. +\mpifunc{MPI\_INITIALIZED} returns \mpiarg{true} if the calling process has +called \mpifunc{MPI\_INIT}. Whether \mpifunc{MPI\_FINALIZE} has been +called does not affect the behavior of \mpifunc{MPI\_INITIALIZED}. +It is one of the few routines that may be called before +\mpifunc{MPI\_INIT} is called. +This function must always be thread-safe, as defined in +Section~\ref{sec:ei-threads}. + +\begin{funcdef}{MPI\_ABORT(comm, errorcode)} +\funcarg{\IN}{comm}{communicator of tasks to abort} +\funcarg{\IN}{errorcode}{error code to return to invoking environment} +\end{funcdef} + +\mpibind{MPI\_Abort(MPI\_Comm~comm, int~errorcode)} + +\mpifnewbind{MPI\_Abort(comm, errorcode, ierror) \fargs TYPE(MPI\_Comm), INTENT(IN) :: comm \\ INTEGER, INTENT(IN) :: errorcode \\ INTEGER, OPTIONAL, INTENT(OUT) :: ierror} +\mpifbind{MPI\_ABORT(COMM, ERRORCODE, IERROR)\fargs INTEGER COMM, ERRORCODE, IERROR} +\mpicppemptybind{MPI::Comm::Abort(int~errorcode)}{void} + + + +This routine makes a ``best attempt'' to abort all tasks in the group +of \mpiarg{comm}. +This function does not require that the invoking environment take any action +with the error code. However, a Unix or POSIX environment should handle this +as a \code{return errorcode} from the main program. + + +It may not be possible for an \MPI/ implementation to abort only the +processes represented by \mpiarg{comm} if this is a subset of the processes. +In this case, the \MPI/ implementation should attempt to abort all the connected +processes but should not abort any unconnected processes. +If no processes were spawned, accepted, or connected then this has the effect +of aborting all the processes associated with \const{MPI\_COMM\_WORLD}. + +\begin{implementors} + After aborting a subset of processes, a high quality implementation should + be able to provide error handling for communicators, windows, and files + involving both aborted and non-aborted processes. As an example, if the + user changes the error handler for \const{MPI\_COMM\_WORLD} to + \const{MPI\_ERRORS\_RETURN} or a custom error handler, when a subset of + \const{MPI\_COMM\_WORLD} is aborted, the remaining processes in + \const{MPI\_COMM\_WORLD} should be able to continue communicating with each + other and receive appropriate error codes when attempting communication + with an aborted process. +\end{implementors} + +\begin{users} +Whether the \mpiarg{errorcode} is returned from the executable or from the +\mpifuncindex{mpiexec}% +\mpitermindex{mpiexec}% +\MPI/ process startup mechanism (e.g., \code{mpiexec}), is an aspect of quality +of the \MPI/ library but not mandatory. +\end{users} +\mpifuncindex{mpiexec}% +\mpitermindex{mpiexec}% +\begin{implementors} +Where possible, a high-quality implementation will try to return the +\mpiarg{errorcode} from the \MPI/ process startup mechanism +(e.g. \code{mpiexec} or singleton init). +\end{implementors} + +\subsection{Allowing User Functions at Process Termination} +\mpitermtitleindex{user functions at process termination} +\label{subsec:inquiry-startup-userfunc} + +There are times in which it would be convenient to have actions happen +when an \mpi/ process finishes. For example, a routine may do +initializations that are useful until the \mpi/ job (or that part +of the job that being terminated in the case of dynamically created +processes) is finished. This can be accomplished in \mpi/ by +attaching an attribute to \consti{MPI\_COMM\_SELF} with a callback +function. When \mpifunc{MPI\_FINALIZE} +is called, it will first execute the equivalent of an +\mpifunc{MPI\_COMM\_FREE} on \consti{MPI\_COMM\_SELF}. +This will +cause the delete callback function to be executed on all keys +associated with \consti{MPI\_COMM\_SELF}, in the reverse order that +they were set on \consti{MPI\_COMM\_SELF}. If no key has been +attached to \consti{MPI\_COMM\_SELF}, then no callback is invoked. +The ``freeing'' of \consti{MPI\_COMM\_SELF} occurs before any other parts +of \MPI/ are affected. Thus, for example, calling +\mpifunc{MPI\_FINALIZED} will return \constskip{false} in any of these +callback functions. Once done with \consti{MPI\_COMM\_SELF}, the +order and rest of the actions taken by \mpifunc{MPI\_FINALIZE} +is not specified. + +\begin{implementors} +Since attributes can be added from any supported language, the \mpi/ +implementation needs to remember the creating language so the correct +callback is made. +Implementations that use the attribute delete callback on \consti{MPI\_COMM\_SELF} +internally should register their internal callbacks before returning from +\mpifunc{MPI\_INIT} / \mpifunc{MPI\_INIT\_THREAD}, so that libraries +or applications will not have portions of the \MPI/ implementation shut +down before the application-level callbacks are made. +\end{implementors} + + +\subsection{Determining Whether \texorpdfstring{\mpi/}{MPI} Has Finished} +\mpitermtitleindex{finished} + +One of the goals of \mpi/ was to allow for layered libraries. In +order for a library to do this cleanly, it needs to know if \mpi/ is +active. In \mpi/ the function \mpifunc{MPI\_INITIALIZED} was +provided to tell if \mpi/ had been initialized. The problem arises in +knowing if \mpi/ has been finalized. Once \mpi/ has been finalized it +is no longer active and cannot be restarted. A library needs to be +able to determine this to act accordingly. To achieve this the +following function is needed: + +\begin{funcdef}{MPI\_FINALIZED(flag)} +\funcarg{\OUT}{flag}{true if \mpi/ was finalized (logical)} +\end{funcdef} + +\mpibind{MPI\_Finalized(int~*flag)} + +\mpifnewbind{MPI\_Finalized(flag, ierror) \fargs LOGICAL, INTENT(OUT) :: flag \\ INTEGER, OPTIONAL, INTENT(OUT) :: ierror} +\mpifbind{MPI\_FINALIZED(FLAG, IERROR)\fargs LOGICAL FLAG\\INTEGER IERROR} + +\mpicppemptybind{MPI::Is\_finalized()}{bool} + +This routine returns \mpiarg{true} if \mpifunc{MPI\_FINALIZE} has completed. +It is valid to call \mpifunc{MPI\_FINALIZED} +before \mpifunc{MPI\_INIT} and after \mpifunc{MPI\_FINALIZE}. +This function must always be thread-safe, as defined in +Section~\ref{sec:ei-threads}. + +\begin{users} +\mpi/ is ``active'' and it is thus safe to call \mpi/ functions if +\mpifunc{MPI\_INIT} \emph{has} completed and \mpifunc{MPI\_FINALIZE} +\emph{has not} completed. If a library has no other way of knowing whether +\mpi/ is active or not, then it can use \mpifunc{MPI\_INITIALIZED} and +\mpifunc{MPI\_FINALIZED} to determine this. For example, \mpi/ is ``active'' +in callback functions that are invoked during \mpifunc{MPI\_FINALIZE}. +\end{users} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{Portable \texorpdfstring{\MPI/}{MPI} Process Startup} +\label{sec:inquiry-mpiexec} +\mpitermtitleindex{startup!portable} + +A number of implementations of \mpi/ provide a startup command for \MPI/ programs +that is of the form +\mpifuncindex{mpirun}% +%%HEADER +%%SKIP +%%ENDHEADER +\begin{verbatim} + mpirun +\end{verbatim} +Separating the command to start the program from the program itself provides +flexibility, particularly for network and heterogeneous implementations. For +example, the startup script need not run on one of the machines that will be +executing the \MPI/ program itself. + + +Having a standard startup mechanism also extends the portability of +\MPI/ programs one step further, to the command lines +and scripts that manage them. For example, a validation suite script +that runs hundreds of programs can be a portable script if it is +written using such a standard starup mechanism. +In order that the ``standard'' command not be confused with existing +practice, which is not standard and not portable among implementations, +\mpifuncindex{mpirun}% +\mpitermindex{mpirun}% +\mpifuncindex{mpiexec}% +instead of \code{mpirun} \MPI/ specifies \code{mpiexec}. + +While a standardized startup mechanism improves the usability of \MPI/, +the range of environments is so diverse (e.g., there may not even be a +command line interface) that \MPI/ cannot mandate such a +mechanism. Instead, \MPI/ specifies an \code{mpiexec} startup command and +recommends but does not require it, as advice to implementors. +However, if an implementation does provide a command called +\code{mpiexec}, it must be of the form described below. + + + +\mpitermdefindex{mpiexec}% +It is suggested that\mpifuncmainindex{mpiexec} +%%HEADER +%%SKIP +%%ENDHEADER +\begin{verbatim} + mpiexec -n +\end{verbatim} +be at least one way to start \code{} with an initial +\const{MPI\_COMM\_WORLD} whose group contains \code{} processes. +Other arguments to \code{mpiexec} may be implementation-dependent. + +\begin{implementors} + Implementors, if they do provide a special startup command for \MPI/ programs, + are advised to give it the following form. The syntax is chosen in order + that \code{mpiexec} be able to be viewed as a command-line version of + \mpifunc{MPI\_COMM\_SPAWN} (See Section~\ref{subsec:spawnkeys}). + + Analogous to \mpifunc{MPI\_COMM\_SPAWN}, we + have + +%%HEADER +%%SKIP +%%ENDHEADER +\begin{verbatim} + mpiexec -n + -soft < > + -host < > + -arch < > + -wdir < > + -path < > + -file < > + -initial-errhandler < > + ... + +\end{verbatim} + for the case where a single command line for the application program and its + arguments will suffice. See Section~\ref{subsec:spawnkeys} for the meanings + of these arguments. For the case corresponding to + \mpifunc{MPI\_COMM\_SPAWN\_MULTIPLE} there are two possible formats: + + Form A: + +%%HEADER +%%SKIP +%%ENDHEADER +\begin{verbatim} + mpiexec { } : { ... } : { ... } : ... : { ... } +\end{verbatim} + As with \mpifunc{MPI\_COMM\_SPAWN}, all the arguments are optional. (Even the + \code{-n x } argument is optional; the default is implementation dependent. + It might be \code{1}, it might be taken from an environment variable, or it + might be specified at compile time.) The names and meanings of the arguments are taken + from the keys in the \mpiarg{info} argument to \mpifunc{MPI\_COMM\_SPAWN}. There may + be other, implementation-dependent arguments as well. + + Note that Form A, though convenient to type, prevents colons from being + program arguments. Therefore an alternate, file-based form is allowed: + + Form B: + +%%HEADER +%%SKIP +%%ENDHEADER +\begin{verbatim} + mpiexec -configfile +\end{verbatim} +where the lines of \code{$<$filename$>$} are of the form separated by the +colons in Form A. Lines beginning with `\code{\#}' are comments, and lines +may be continued by terminating the partial line with `\code{\char`\\}'.%%ALLOWLATEX% + +\begin{example} +\exindex{mpiexec} +Start 16 instances of \code{myprog} on the current or default machine: +%%HEADER +%%SKIP +%%ENDHEADER +\begin{verbatim} + mpiexec -n 16 myprog +\end{verbatim} +\end{example} +\begin{example} +\exindex{mpiexec} + Start 10 processes on the machine called \code{ferrari}: +%%HEADER +%%SKIP +%%ENDHEADER +\begin{verbatim} + mpiexec -n 10 -host ferrari myprog +\end{verbatim} +\end{example} +\begin{example} +\exindex{mpiexec} + Start three copies of the same program with different command-line + arguments: +%%HEADER +%%SKIP +%%ENDHEADER +\begin{verbatim} + mpiexec myprog infile1 : myprog infile2 : myprog infile3 +\end{verbatim} +\end{example} +\begin{example} +\exindex{mpiexec} + Start the \code{ocean} program on five Suns and the \code{atmos} program on 10 + RS/6000's: +%%HEADER +%%SKIP +%%ENDHEADER +\begin{verbatim} + mpiexec -n 5 -arch sun ocean : -n 10 -arch rs6000 atmos +\end{verbatim} +It is assumed that the implementation in this case has a method for choosing +hosts of the appropriate type. Their ranks are in the order specified. +\end{example} +\begin{example} +\exindex{mpiexec} + Start the \code{ocean} program on five Suns and the \code{atmos} program on 10 + RS/6000's (Form B): +%%HEADER +%%SKIP +%%ENDHEADER +\begin{verbatim} + mpiexec -configfile myfile +\end{verbatim} + where \code{myfile} contains +%%HEADER +%%SKIP +%%ENDHEADER +\begin{verbatim} + -n 5 -arch sun ocean + -n 10 -arch rs6000 atmos +\end{verbatim} +\end{example} + + +\end{implementors} +>>>>>>> mpi-4.x diff --git a/chap-intro/intro.tex b/chap-intro/intro.tex index 27010987..b50b01f6 100644 --- a/chap-intro/intro.tex +++ b/chap-intro/intro.tex @@ -296,7 +296,14 @@ collective I/O routines, and routines to get the index value by name for \mpiskipfunc{MPI\_T} performance and control variables. A general index was also added. -\section{Background of \texorpdfstring{\MPIIVDOTO/}{MPI-4.0}} +%\section{Background of \texorpdfstring{\MPIIVDOTO/}{MPI-4.0}} +\section{Background of 2019 Draft Specification} + +The 2019 draft specification is expected to become the \MPIIVDOTO/ specification once all features +have been merged. \MPIIVDOTO/ is a major update to the \MPI/ standard. This update includes a number +of new features which will be present in the final \mpiivdoto/ document. The largest changes are the +addition of persistent collectives, application info assertions, and improvements to the definitions +of error handling. In addition, there are a number of smaller improvements and corrections. \section{Who Should Use This Standard?} diff --git a/chap-io/io-2.tex b/chap-io/io-2.tex index e3c2fa94..118ce2fe 100644 --- a/chap-io/io-2.tex +++ b/chap-io/io-2.tex @@ -205,7 +205,7 @@ reference the same file. \mpiarg{comm} must be an intracommunicator; it is erroneous to pass an intercommunicator to \mpifunc{MPI\_FILE\_OPEN}. Errors in \mpifunc{MPI\_FILE\_OPEN} are raised -using the default file error handler +using the default file error handler\mpitermindex{error handling!default file error handler} (see \sectionref{sec:io-errhandlers}). When using the \wpm{}\mpitermtitleindexmainsub{MPI process initialization}{\wpm{}} (Section~\ref{sec:dynamic:introduction}), a process can open a file independently of other processes by using the \consti{MPI\_COMM\_SELF} communicator. Applications using the \spm{} (Section~\ref{sec:model_sessions})\mpitermtitleindexmainsub{MPI process initialization}{Sessions Model} can achieve the same result using communicators created from the \const{mpi://SELF} process set. @@ -430,7 +430,7 @@ If the file is not deleted, an error in the class \error{MPI\_ERR\_FILE\_IN\_USE} or \error{MPI\_ERR\_ACCESS} will be raised. Errors are raised -using the default error handler +using the default file error handler\mpitermindex{error handling!default file error handler} (see \sectionref{sec:io-errhandlers}). \subsection{Resizing a File} @@ -2736,15 +2736,16 @@ Support of optional datatypes (e.g., \consti{MPI\_INTEGER2}) is not required. All floating point values are in big-endian -IEEE format \cite{ieee-754-1985} of the appropriate size. +IEEE format \cite{ieee-754-2008} of the appropriate size. Floating point values are represented by one -of three IEEE formats. These are the IEEE ``Single,'' ``Double,'' and -``Double Extended'' formats, requiring 4, 8, and 16 bytes of storage, +of three IEEE formats. + These are the IEEE ``Single (binary32),'' ``Double (binary64),'' and +``Double Extended (binary128)'' formats, requiring 4, 8, and 16 bytes of storage, respectively. -For the IEEE ``Double Extended'' formats, \MPI/ specifies a +For the IEEE ``Double Extended (binary128)'' formats, \MPI/ specifies a Format Width of 16 bytes, with 15 exponent bits, bias = +16383, -112 fraction bits, and an encoding analogous to the ``Double'' format. +112 fraction bits, and an encoding analogous to the ``Double (binary64)'' format. All integral values are in two's complement big-endian format. Big-endian means most significant byte at lowest address byte. For C \ctype{\_Bool}, Fortran \ftype{LOGICAL}, and C++ \ctype{bool}, @@ -2765,14 +2766,13 @@ have the sign bit at the most significant bit. bit of the real and imaginary parts at the most significant bit of each part. %-- weasel words from XDR via Leslie Hart - ies -According to IEEE specifications \cite{ieee-754-1985}, the ``NaN'' +According to IEEE specifications \cite{ieee-754-2008}, the ``NaN'' (not a number) is system dependent. It should not be interpreted within \MPI/ as anything other than ``NaN.'' \begin{implementors} -The \MPI/ treatment of ``NaN'' is similar to the approach used in XDR (see -https://www.ietf.org/rfc/rfc1832.txt). +The \MPI/ treatment of ``NaN'' is similar to the approach used in XDR \cite{IETF-RFC4506}. \end{implementors} All data is byte aligned, regardless of type. @@ -2791,62 +2791,109 @@ a header in the beginning of the pack buffer. \end{users} \begin{table}[htbp] -%% FIXME: DO NOT USE VERBATIM FOR THIS - USE TABULAR -%% NOTE THAT THIS DOES NOT INDEX THESE TYPES!! -%%HEADER -%%SKIP -%%ENDHEADER -\begin{Verbatim}[commandchars=\\\{\}] -Type Length Optional Type Length ------------------- ------ ------------------ ------ -MPI_PACKED 1 MPI_INTEGER1 1 -MPI_BYTE 1 MPI_INTEGER2 2 -MPI_CHAR 1 MPI_INTEGER4 4 -MPI_UNSIGNED_CHAR 1 MPI_INTEGER8 8 -MPI_SIGNED_CHAR 1 MPI_INTEGER16 16 -MPI_WCHAR 2 -MPI_SHORT 2 MPI_REAL2 2 -MPI_UNSIGNED_SHORT 2 MPI_REAL4 4 -MPI_INT 4 MPI_REAL8 8 -MPI_UNSIGNED 4 MPI_REAL16 16 -MPI_LONG 4 -MPI_UNSIGNED_LONG 4 MPI_COMPLEX4 2*2 -MPI_LONG_LONG_INT 8 MPI_COMPLEX8 2*4 -MPI_UNSIGNED_LONG_LONG 8 MPI_COMPLEX16 2*8 -MPI_FLOAT 4 MPI_COMPLEX32 2*16 -MPI_DOUBLE 8 -MPI_LONG_DOUBLE 16 - -MPI_C_BOOL 1 -MPI_INT8_T 1 C++ Types Length -MPI_INT16_T 2 ------------------ ------ -MPI_INT32_T 4 MPI_CXX_BOOL 1 -MPI_INT64_T 8 MPI_CXX_FLOAT_COMPLEX 2*4 -MPI_UINT8_T 1 MPI_CXX_DOUBLE_COMPLEX 2*8 -MPI_UINT16_T 2 MPI_CXX_LONG_DOUBLE_COMPLEX 2*16 -MPI_UINT32_T 4 -MPI_UINT64_T 8 -MPI_AINT 8 -MPI_COUNT 8 -MPI_OFFSET 8 -MPI_C_COMPLEX 2*4 -MPI_C_FLOAT_COMPLEX 2*4 -MPI_C_DOUBLE_COMPLEX 2*8 -MPI_C_LONG_DOUBLE_COMPLEX 2*16 - -MPI_CHARACTER 1 -MPI_LOGICAL 4 -MPI_INTEGER 4 -MPI_REAL 4 -MPI_DOUBLE_PRECISION 8 -MPI_COMPLEX 2*4 -MPI_DOUBLE_COMPLEX 2*8 -\end{Verbatim} +\begin{center} +\begin{tabular}{|l|l|} +\hline + Predefined Type & Length \\ +\hline +\const{MPI\_PACKED} & 1 \\ +\const{MPI\_BYTE} & 1 \\ +\const{MPI\_CHAR} & 1 \\ +\const{MPI\_UNSIGNED\_CHAR} & 1 \\ +\const{MPI\_SIGNED\_CHAR} & 1 \\ +\const{MPI\_WCHAR} & 2 \\ +\const{MPI\_SHORT} & 2 \\ +\const{MPI\_UNSIGNED\_SHORT} & 2 \\ +\const{MPI\_INT} & 4 \\ +\const{MPI\_LONG} & 4 \\ +\const{MPI\_UNSIGNED} & 4 \\ +\const{MPI\_UNSIGNED\_LONG} & 4 \\ +\const{MPI\_LONG\_LONG\_INT} & 8 \\ +\const{MPI\_UNSIGNED\_LONG\_LONG} & 8 \\ +\const{MPI\_FLOAT} & 4 \\ +\const{MPI\_DOUBLE} & 8 \\ +\const{MPI\_LONG\_DOUBLE} & 16 \\ +\hline +\const{MPI\_C\_BOOL} & 1 \\ +\const{MPI\_INT8\_T} & 1 \\ +\const{MPI\_INT16\_T} & 2 \\ +\const{MPI\_INT32\_T} & 4 \\ +\const{MPI\_INT64\_T} & 8 \\ +\const{MPI\_UINT8\_T} & 1 \\ +\const{MPI\_UINT16\_T} & 2 \\ +\const{MPI\_UINT32\_T} & 4 \\ +\const{MPI\_UINT64\_T} & 8 \\ +\const{MPI\_AINT} & 8 \\ +\const{MPI\_COUNT} & 8 \\ +\const{MPI\_OFFSET} & 8 \\ +\const{MPI\_C\_COMPLEX} & 2*4 \\ +\const{MPI\_C\_FLOAT\_COMPLEX} & 2*4 \\ +\const{MPI\_C\_DOUBLE\_COMPLEX} & 2*8 \\ +\const{MPI\_C\_LONG\_DOUBLE\_COMPLEX} & 2*16 \\ +\hline +\const{MPI\_CHARACTER} & 1 \\ +\const{MPI\_LOGICAL} & 4 \\ +\const{MPI\_INTEGER} & 4 \\ +\const{MPI\_REAL} & 4 \\ +\const{MPI\_DOUBLE\_PRECISION} & 8 \\ +\const{MPI\_COMPLEX} & 2*4 \\ +\const{MPI\_DOUBLE\_COMPLEX} & 2*8 \\ +\hline +\const{MPI\_CXX\_BOOL} & 1 \\ +\const{MPI\_CXX\_FLOAT\_COMPLEX} & 2*4 \\ +\const{MPI\_CXX\_DOUBLE\_COMPLEX} & 2*8 \\ +\const{MPI\_CXX\_LONG\_DOUBLE\_COMPLEX} & 2*16 \\ +\hline +\end{tabular} +\end{center} \caption{``external32'' sizes of predefined datatypes} -\label{publishercomment:i} \label{table:io:extsizes} \end{table} +\begin{table}[htbp] +\begin{center} +\begin{tabular}{|l|l|} +\hline + Predefined Type\hspace{3cm} & Length \\ +\hline +\const{MPI\_INTEGER1} & 1 \\ +\const{MPI\_INTEGER2} & 2 \\ +\const{MPI\_INTEGER4} & 4 \\ +\const{MPI\_INTEGER8} & 8 \\ +\const{MPI\_INTEGER16} & 16 \\ +\const{MPI\_REAL2} & 2 \\ +\const{MPI\_REAL4} & 4 \\ +\const{MPI\_REAL8} & 8 \\ +\const{MPI\_REAL16} & 16 \\ +\const{MPI\_COMPLEX4} & 2*2 \\ +\const{MPI\_COMPLEX8} & 2*4 \\ +\const{MPI\_COMPLEX16} & 2*8 \\ +\const{MPI\_COMPLEX32} & 2*16 \\ +\hline +\end{tabular} +\end{center} +\caption{``external32'' sizes of optional datatypes} +\label{publishercomment:i} +\label{table:io:optional-extsizes} +\end{table} + +\begin{table}[htbp] +\begin{center} +\begin{tabular}{|l|l|} +\hline + C++ Types & Length \\ +\hline +\const{MPI\_CXX\_BOOL} & 1 \\ +\const{MPI\_CXX\_FLOAT\_COMPLEX} & 2*4 \\ +\const{MPI\_CXX\_DOUBLE\_COMPLEX} & 2*8 \\ +\const{MPI\_CXX\_LONG\_DOUBLE\_COMPLEX} & 2*16 \\ +\hline +\end{tabular} +\end{center} +\caption{``external32'' sizes of C++ datatypes} +\label{table:io:cxx-extsizes} +\end{table} + The sizes of the predefined datatypes returned from \mpifunc{MPI\_TYPE\_CREATE\_F90\_REAL}, \mpifunc{MPI\_TYPE\_CREATE\_F90\_COMPLEX}, and @@ -2862,7 +2909,10 @@ This allows no conversion errors if the data range is within the range of the smaller size integer. \end{implementors} -Table~\ref{table:io:extsizes} specifies the sizes of predefined datatypes in ``external32'' format. +Table~\ref{table:io:extsizes}, \ref{table:io:optional-extsizes}, +and~\ref{table:io:cxx-extsizes} specify the sizes of +predefined, optional, and C++ datatypes in ``external32'' format, +respectively. @@ -3981,10 +4031,10 @@ another important aspect. By default, the predefined error handler for file handles is \consti{MPI\_ERRORS\_RETURN}. -The default file error handler has two purposes: +The \mpitermdef{default file error} handler\mpitermdefindex{error handling!default file error handler} has two purposes: when a new file handle is created (by \mpifunc{MPI\_FILE\_OPEN}), the error handler for the new file handle -is initially set to the default error handler, +is initially set to the default file error handler, and I/O routines that have no valid file handle on which to raise an error (e.g., \mpifunc{MPI\_FILE\_OPEN} or \mpifunc{MPI\_FILE\_DELETE}) use the default file error handler. diff --git a/chap-misc/misc-2.tex b/chap-misc/misc-2.tex index 799802d9..b336ed3a 100644 --- a/chap-misc/misc-2.tex +++ b/chap-misc/misc-2.tex @@ -11,7 +11,6 @@ % Moved here from Dynamic -% \section{The \mpiarg{Info} Object} \label{subsec:info} Many of the routines in diff --git a/chap-one-side/one-side-2.tex b/chap-one-side/one-side-2.tex index 087c6584..59058ab6 100644 --- a/chap-one-side/one-side-2.tex +++ b/chap-one-side/one-side-2.tex @@ -3029,8 +3029,8 @@ All other \RMA/ calls have an input \mpiarg{win} argument. When an error occurs during such a call, the error handler currently associated with \mpiarg{win} is invoked. -The default error handler associated with \mpiarg{win} is -\consti{MPI\_ERRORS\_ARE\_FATAL}. Users may change this default by +The error handler \consti{MPI\_ERRORS\_ARE\_FATAL} is associated +with \mpiarg{win} during its creation. Users may change this default by explicitly associating a new error handler with \mpiarg{win} (see \sectionref{sec:errorhandler}). @@ -3764,10 +3764,10 @@ sizes or target displacements is undefined. Accumulate calls enable element-wise atomic read and write to remote memory locations. \MPI/ specifies ordering between accumulate operations -from one process to the same (or overlapping) memory locations at -another process on a per-datatype granularity. The default ordering is +from an origin process to the same (or overlapping) memory locations at +a target process on a per-datatype granularity. The default ordering is strict ordering, which guarantees that overlapping updates from the same -source to a remote location are committed in program order and that +origin to a remote location are committed in program order and that reads (e.g., with \mpifunc{MPI\_GET\_ACCUMULATE}) and writes (e.g., with \mpifunc{MPI\_ACCUMULATE}) are executed and committed in program order. Ordering only applies to operations originating at the same origin that diff --git a/chap-terms/terms-2.tex b/chap-terms/terms-2.tex index 9856b684..3932eed6 100644 --- a/chap-terms/terms-2.tex +++ b/chap-terms/terms-2.tex @@ -949,15 +949,19 @@ A message sent is always received correctly, and the user does not need to check for transmission errors, time-outs, or other error conditions. In other words, \MPI/ does not provide mechanisms for -dealing with failures in the communication system. +dealing with \mpitermdef{transmission failures} \mpitermdefindex{error handling!transmission failure} +in the communication system. If the \MPI/ implementation is built on an unreliable underlying mechanism, then it is the job of the implementor of the \MPI/ subsystem -to insulate the user from this unreliability, or to reflect unrecoverable -errors as failures. +to insulate the user from this unreliability, and to reflect only unrecoverable +transmission failures. Whenever possible, such failures will be reflected as errors in the relevant communication call. + Similarly, \MPI/ itself provides no mechanisms for -handling processor failures. +handling \MPI/ \mpitermdef{process failures}\mpitermdefindex{error handling!process failure}, +that is, when an \MPI/ process unexpectedly and permanently stops communicating +(e.g., a software or hardware crash results in an \MPI/ process terminating unexpectedly). Of course, \MPI/ programs may still be erroneous. A \mpitermdef{program error}\mpitermdefindex{error handling!program error} can occur when an \MPI/ call is made with an incorrect argument (non-existing @@ -996,8 +1000,12 @@ expensive to detect in normal execution mode; finally some errors may be On the other hand, some errors may be detected after the associated operation has completed; some errors may not have a communicator, window, or file on which an error may be raised. -The manner in which errors are handled in this case depends on the method -used for initializing \MPI/. See Section~\ref{sec:errorhandler}. +In such cases, these errors will be raised on the communicator +\const{MPI\_COMM\_SELF}. +When \const{MPI\_COMM\_SELF} is not initialized (i.e., +before \mpifunc{MPI\_INIT} / \mpifunc{MPI\_INIT\_THREAD} or after \mpifunc{MPI\_FINALIZE}) +the error raises the \mpitermdef{initial error handler} (set during the launch operation, see~\ref{subsec:spawnkeys}).\mpitermdefindex{error handling!initial error handler} +\mpitermindex{error handling!startup}\mpitermindex{error handling!finalize} An example of such a case arises because of the nature of asynchronous communications: diff --git a/chap-tools/mpit.tex b/chap-tools/mpit.tex index 19fa5127..40011ddf 100644 --- a/chap-tools/mpit.tex +++ b/chap-tools/mpit.tex @@ -1,6 +1,6 @@ % LLNL-MI-422102-DRAFT -\section{The \MPI/ Tool Information Interface} +\section{The \texorpdfstring{\MPI/}{MPI} Tool Information Interface} \mpitermtitleindex{tool information interface} \label{sec:mpit} @@ -129,7 +129,7 @@ listed in the table; i.e., \const{MPI\_T\_VERBOSITY\_USER\_BASIC} $<$ \end{table} -\subsection{Binding \MPI/ Tool Information Interface Variables to \MPI/ Objects} +\subsection{Binding \texorpdfstring{\MPI/}{MPI} Tool Information Interface Variables to \texorpdfstring{\MPI/}{MPI} Objects} \label{sec:mpit:assoc} Each \MPI/ tool information interface variable provides access to a @@ -1775,7 +1775,7 @@ an arbitrary subset of size len. Otherwise, the entire set of elements is returned in the beginning entries of the array, and any remaining array entries are not modified. -\subsection{Return Codes for the \MPI/ Tool Information Interface} +\subsection{Return Codes for the \texorpdfstring{\MPI/}{MPI} Tool Information Interface} \label{sec:mpit:retcodes} All functions defined as part of the \MPI/ tool information interface diff --git a/chap-tools/prof.tex b/chap-tools/prof.tex index ace9a320..fa7e098c 100644 --- a/chap-tools/prof.tex +++ b/chap-tools/prof.tex @@ -190,7 +190,7 @@ int MPI_Send(const void* buffer, int count, MPI_Datatype datatype, \end{verbatim} \end{example} -\subsection{\MPI/ Library Implementation Example} +\subsection{\texorpdfstring{\MPI/}{MPI} Library Implementation Example} If the \MPI/ library is implemented in C on a Unix system, then there are various options, including the two presented here, for supporting the diff --git a/chap-topol/topol.tex b/chap-topol/topol.tex index b2e0e9cd..8cdc3323 100644 --- a/chap-topol/topol.tex +++ b/chap-topol/topol.tex @@ -2314,7 +2314,7 @@ END % Use small here for the lines that end .... first element of u(...) % because those lines are clearest as written, and changing the layout to % fit with the full size fonts sacrifices readability. -\begin{small} +\begin{small}%ALLOWLATEX% %%HEADER %%LANG: FORTRAN90 %%ENDHEADER @@ -2357,7 +2357,7 @@ CALL MPI_NEIGHBOR_ALLTOALLW(u, sndcounts, sdispls, sndtypes, & CALL MPI_TYPE_FREE(type_vec, ierr) END \end{verbatim} -\end{small} +\end{small}%ALLOWLATEX% %............................................................................ \caption{Communication routine with sparse neighborhood all-to-all-w and without local data copying.} \label{poisson-end} diff --git a/getlatex b/getlatex index 629adeac..ebc058ef 100755 --- a/getlatex +++ b/getlatex @@ -186,7 +186,7 @@ $quotechk = 1; 'MPIIIIDOTO' => 1, 'MPIIIIDOTI' => 1, 'MPIIIIDOTII' => 1, - 'MPIIVDOTO' => 1, + 'MPIIV' => 1, 'MPIIVDOTO' => 1, 'mpiivdoto' => 1, 'mpi' => 1, 'mpii' => 1, 'mpiii' => 1, 'mpiidoti' => 1, 'mpiidotii' => 1, 'mpiidoto' => 1, 'mpiiidoto' => 1, @@ -274,7 +274,6 @@ $quotechk = 1; 'class' => 1, - 'n' => 1, # Not really a commend, but occurs as \n in examples 'ifbookprinting' => 1, 'fi' => 1, @@ -289,6 +288,10 @@ $quotechk = 1; 'textoutdesc' => 1, # used to simplify MPI_T section 'textoutargs' => 1, + + 'EE' => 1, 'FlushEntries' => 1, + + 'MPIisinitialized' => 1, 'MPIisfinalized' => 1, ); %deprecatedMPI = ( 'status' => 1, # vote status, should be removed @@ -354,9 +357,13 @@ foreach $file (@ARGV) { elsif ($file =~ /^--?noquotechk/) { $quotechk = 0; } - elsif ($file =~ /^--help/ || $file =~ /^--usage/) { + elsif ($file =~ /^--?help/ || $file =~ /^--?usage/) { print "getlatex [--addcmd=name] [--noquotechk] [--usage] filenames\n"; } + elsif ($file =~ /^--?allowpageref/) { + delete $deprecatedTeX{'pageref'}; + $knownTeX{'pageref'} = 1; + } else { print STDERR "Unrecognized argument $file\n"; } diff --git a/mpi-report.tex b/mpi-report.tex index 7ff09fbc..80c16615 100644 --- a/mpi-report.tex +++ b/mpi-report.tex @@ -40,6 +40,11 @@ \drafttrue %\draftfalse +% Use \bindinglabeltrue to issue a language label before every binding +% definition +%\bindinglabeltrue + + %%%%% Packages Depending on whether we are using pdflatex or latex %%%%%%%%%%%%%%%%% \ifpdf \typeout{ } @@ -50,7 +55,6 @@ \DeclareGraphicsExtensions{.pdf} % In case of using pdflatex set ext. \ifusecolor \usepackage[pdftex, % For PDF output - a4paper, backref, baseurl={http://www.mpi-forum.org}, colorlinks=true, @@ -75,7 +79,6 @@ ]{hyperref} % For hyperlinks \else \usepackage[pdftex, % For PDF output - a4paper, backref, baseurl={http://www.mpi-forum.org}, colorlinks=true, @@ -104,7 +107,6 @@ \DeclareGraphicsExtensions{.eps} % In case of using pdflatex set ext. \usepackage[ dvips, % For PS output - a4paper, backref, colorlinks=true, filecolor=black, diff --git a/mpi-user-macs.tex b/mpi-user-macs.tex index 1fc66872..5b22ac52 100644 --- a/mpi-user-macs.tex +++ b/mpi-user-macs.tex @@ -145,15 +145,15 @@ % \end{tabular} % This avoids having to keep moving & and \\ markers around as a list is % constructed and updated. -% This is hardwired for 3 columns, but could be parameterized to allow the -% number of columns to be set. This is probably not necessary for the MPI -% document, so for simplicity, it is hardwired for 3. +% This is setup for 3 columns. Redefine \colmax for the number of columns +% if different that 3 is desired. \newcounter{colidx} \setcounter{colidx}{0} +\def\colmax{3} % \EE for ``EndEntry'' -\def\EE{\stepcounter{colidx}\ifnum\value{colidx}=3\setcounter{colidx}{0}\\\else&\fi} +\def\EE{\stepcounter{colidx}\ifnum\value{colidx}=\colmax\setcounter{colidx}{0}\\\else&\fi} % Use \FlushEntries *instead* of \EE after the last name. -\def\FlushEntries{\stepcounter{colidx}\ifnum\value{colidx}=3\\\else&\FlushEntries\fi\setcounter{colidx}{0}} +\def\FlushEntries{\stepcounter{colidx}\ifnum\value{colidx}=\colmax\\\else&\FlushEntries\fi\setcounter{colidx}{0}} % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -228,7 +228,7 @@ \hangindent 7em\hangafter=1 {\funcNoIndex{#1}}\mpifuncmainindex{#1} \MPIfunclist -}{\end{list} \vspace{\codeSpace}} +}{\end{list} \vspace{\codeSpace}\gdef\bindingcurlang{\bindingdef}} % special for functions that you don't want listed in index. % This was added for showing corrections to functions already listed. @@ -239,7 +239,7 @@ \samepage {\func{#1}} \MPIfunclist -}{\end{list} \vspace{\codeSpace}} +}{\end{list} \vspace{\codeSpace}\gdef\bindingcurlang{\bindingdef}} % % This is a hack to force a linebreak @@ -252,7 +252,7 @@ {\funcNoIndex{#1}\hfill\hbox{}\linebreak \funcNoIndex{#2}}\mpifuncmainindex{#1#2} \MPIfunclist -}{\end{list} \vspace{\codeSpace}} +}{\end{list} \vspace{\codeSpace}\gdef\bindingcurlang{\bindingdef}} % Special funcdef for functions with no arguments \newenvironment{funcdefna}[1]{ @@ -260,7 +260,7 @@ \vspace{\codeSpace} \noindent {\funcNoIndex{#1}}\mpifuncmainindex{#1}\par -}{\vspace{\codeSpace}} +}{\vspace{\codeSpace}\gdef\bindingcurlang{\bindingdef}} \newenvironment{cfuncdef}[1]{ \vspace{\codeSpace} @@ -437,13 +437,46 @@ \newenvironment{users}{\begin{list}{}{}\item[]{\it Advice to users.} }{{\rm ({\it End of advice to users.})} \end{list}} -%macros for language binding: mpibind, mpifbind, and fargs: - -\newcommand{\mpibind}[1]{{\raggedright \hangindent 7em\hangafter=1\tt -int #1 \par \vspace{0.1in}}} -\newcommand{\mpibindnotint}[1]{{\raggedright \hangindent 7em\hangafter=1\tt +%macros for language binding: mpibind, mpifnewbind, mpifbind, and fargs: +% This still needs work to align with the same line of the binding +% definition +%\reversemarginpar +%\marginparwidth 1in +%\newcommand{\bindingheading}[1]{\marginpar{\textbf{#1}}} + +\newcommand{\bindingheading}[1]{{\noindent\bf #1\par}} + +\def\bindingcname{C binding} +\def\bindingfname{F binding} +\def\bindingfnewname{F08 binding} + +% This if controls whether there is a heading +\newif\ifbindinglabel +\bindinglabelfalse + +% Because of how these are used in the \if tests, these definitions +% need to be a single character. +\def\bindingc{c} +\def\bindingf{f} +\def\bindingfnew{F} +\def\bindingdef{N} +\def\bindingcurlang{\bindingdef} +\def\bindingissue#1#2{\ifbindinglabel\if\bindingcurlang#1\else\gdef\bindingcurlang{#1}#2\fi\fi} + +\newcommand{\bindingbody}[1]{{\raggedright \hangindent 7em\hangafter=1\tt #1 \par \vspace{0.1in}}} + +\newcommand{\mpibind}[1]{{\bindingissue\bindingc{\bindingheading{\bindingcname}}% +\bindingbody{int #1}}} + +%\raggedright \hangindent 7em\hangafter=1\tt +%int #1 \par \vspace{0.1in}}} +\newcommand{\mpibindnotint}[1]{{\bindingissue\bindingc{\bindingheading{\bindingcname}}% +\bindingbody{#1}}} +%\raggedright \hangindent 7em\hangafter=1\tt +%#1 \par \vspace{0.1in}}} + % Ticket #281 removed the C++ bindings %\newcommand{\mpicppbind}[1]{{\raggedright \hangindent %7em\hangafter=1\tt #1 \par \vspace{0.1in}}} @@ -458,47 +491,62 @@ int #1 \par \vspace{0.1in}}} \def\jmsbind#1{{\raggedright \hangindent 7em\hangafter=1 \hspace{1em} \tt #1 \par \vspace{0.1in}}} %special macro for not including C binding in index -\newcommand{\mpiskipbind}[1]{{\raggedright \hangindent -7em\hangafter=1\tt int #1 \par \vspace{0.1in}}} +\newcommand{\mpiskipbind}[1]{{\bindingissue\bindingc{\bindingheading{\bindingcname}}% +\bindingbody{int #1}}} +%\raggedright \hangindent +%7em\hangafter=1\tt int #1 \par \vspace{0.1in}}} \newcommand{\mpicppskipemptybind}[2]{{\raggedright \hangindent 7em\hangafter=1\tt #2 #1 \par \vspace{0.1in}}} % special macro that avoids the int in front % should be used for C functions only that want to be in index -\newcommand{\mpiemptybind}[2]{{\raggedright \hangindent -7em\hangafter=1\tt #2 #1 \par \vspace{0.1in}}} +\newcommand{\mpiemptybind}[2]{{\bindingissue\bindingc{\bindingheading{\bindingcname}}% +\bindingbody{#2 #1}}} +%\raggedright \hangindent +%7em\hangafter=1\tt #2 #1 \par \vspace{0.1in}}} % % This version indexes the name % \mpiemptybindidx{routine}{returnvalue}{indexed name} -\newcommand{\mpiemptybindidx}[3]{{\raggedright \hangindent -7em\hangafter=1\tt #2 #1 \mpifuncmainindex{#3}\par \vspace{0.1in}}} +\newcommand{\mpiemptybindidx}[3]{{\bindingissue\bindingc{\bindingheading{\bindingcname}}% +\bindingbody{#2 #1\mpifuncmainindex{#3}}}} +%\raggedright \hangindent +%7em\hangafter=1\tt #2 #1 \mpifuncmainindex{#3}\par \vspace{0.1in}}} % % This version does not index the name, but has same argument list -\newcommand{\mpiemptybindNOidx}[3]{{\raggedright \hangindent -7em\hangafter=1\tt #2 #1 \par \vspace{0.1in}}} +\newcommand{\mpiemptybindNOidx}[3]{{\bindingissue\bindingc{\bindingheading{\bindingcname}}% +\bindingbody{#2 #1}}} +%\raggedright \hangindent +%7em\hangafter=1\tt #2 #1 \par \vspace{0.1in}}} % special macro that avoids the int in front % should be used for routines where don't want in index -\newcommand{\mpiskipemptybind}[2]{{\raggedright \hangindent -7em\hangafter=1\tt #2 #1 \par \vspace{0.1in}}} +\newcommand{\mpiskipemptybind}[2]{{\bindingissue\bindingc{\bindingheading{\bindingcname}}% +\bindingbody{#2 #1}}} +%\raggedright \hangindent +%7em\hangafter=1\tt #2 #1 \par \vspace{0.1in}}} % special macro for typedef int in front of SUBROUTINE in C % % Note that this is indexed. -\newcommand{\mpitypedefbind}[1]{{\raggedright \hangindent 7em\hangafter=1\tt -typedef int #1; \index{TYPEDEF:#1}\par \vspace{0.1in}}} -\newcommand{\mpitypedefbindvoid}[1]{{\raggedright \hangindent 7em\hangafter=1\tt -typedef void #1; \index{TYPEDEF:#1}\par \vspace{0.1in}}} +\newcommand{\mpitypedefbind}[1]{{\bindingbody{typedef int #1;\index{TYPEDEF:#1}}}} +%\raggedright \hangindent 7em\hangafter=1\tt +%typedef int #1; \index{TYPEDEF:#1}\par \vspace{0.1in}}} +\newcommand{\mpitypedefbindvoid}[1]{{\bindingbody{typedef void #1;\index{TYPDEF:#1}}}} +%\raggedright \hangindent 7em\hangafter=1\tt +%typedef void #1; \index{TYPEDEF:#1}\par \vspace{0.1in}}} % % Version without index -\newcommand{\mpitypedefskipbind}[1]{{\raggedright \hangindent 7em\hangafter=1\tt -typedef int #1; \par \vspace{0.1in}}} +\newcommand{\mpitypedefskipbind}[1]{{\bindingbody{typedef int #1;}}} +%\raggedright \hangindent 7em\hangafter=1\tt +%typedef int #1; \par \vspace{0.1in}}} % % Use this for typedef (second arg is type) -\newcommand{\mpitypedefemptybind}[2]{{\raggedright \hangindent -7em\hangafter=1\tt typedef #2 #1; \index{TYPEDEF:#1}\par \vspace{0.1in}}} -\newcommand{\mpitypedefemptyskipbind}[2]{{\raggedright \hangindent -7em\hangafter=1\tt typedef #2 #1;\par \vspace{0.1in}}} +\newcommand{\mpitypedefemptybind}[2]{{\bindingbody{typedef #2 #1;\index{TYPEDEF:#1}}}} +%\raggedright \hangindent +%7em\hangafter=1\tt typedef #2 #1; \index{TYPEDEF:#1}\par \vspace{0.1in}}} +\newcommand{\mpitypedefemptyskipbind}[2]{{\bindingbody{typedef #2 #1;}}} +%\raggedright \hangindent +%7em\hangafter=1\tt typedef #2 #1;\par \vspace{0.1in}}} % Ticket #281 removed the C++ bindings %\newcommand{\mpicpptypedefbind}[1]{{\raggedright \hangindent @@ -518,30 +566,38 @@ typedef int #1; \par \vspace{0.1in}}} %7em\hangafter=1\{\texttt{typedef #2 #1; }{\it (binding deprecated, see Section~\ref{sec:deprecated-cxx-bindings})}\}\par \vspace{0.1in}}} \newcommand{\mpicpptypedefemptybind}[2]{}{} -\newcommand{\mpifsubbind}[1]{{\raggedright \hangindent -7em\hangafter=1\tt SUBROUTINE #1 \par \vspace{0.1in}}} +\newcommand{\mpifsubbind}[1]{{\bindingbody{SUBROUTINE #1}}} +%\raggedright \hangindent +%7em\hangafter=1\tt SUBROUTINE #1 \par \vspace{0.1in}}} -\newcommand{\mpifnewsubbind}[1]{{\raggedright \hangindent -7em\hangafter=1\tt ABSTRACT INTERFACE \\\advance\leftskip 1em SUBROUTINE #1 \par \vspace{0.1in}}} +\newcommand{\mpifnewsubbind}[1]{{\bindingbody{ABSTRACT INTERFACE\\\advance\leftskip 1em SUBROUTINE #1}}} +%\raggedright \hangindent +%7em\hangafter=1\tt ABSTRACT INTERFACE \\\advance\leftskip 1em SUBROUTINE #1 \par \vspace{0.1in}}} %% - The following version is needed when removing the \MPIupdate{3.0}... macros: %% \newcommand{\mpifnewsubbind}[1]{{\raggedright \hangindent %% 7em\hangafter=1\tt ABSTRACT INTERFACE \\\advance\leftskip 1em SUBROUTINE #1 \par \vspace{0.1in}}} -\newcommand{\mpifbind}[1]{{\raggedright \hangindent 7em\hangafter=1\tt -#1 \par \vspace{0.1in}}} +\newcommand{\mpifbind}[1]{{\bindingissue\bindingf{\bindingheading{\bindingfname}}% +\bindingbody{#1}}} +%\raggedright \hangindent 7em\hangafter=1\tt +%#1 \par \vspace{0.1in}}} -\newcommand{\mpifnewbind}[1]{{\raggedright \hangindent 7em\hangafter=1\tt -#1 \par \vspace{0.1in}}} +\newcommand{\mpifnewbind}[1]{{\bindingissue{\bindingfnew}{\bindingheading{\bindingfnewname}}% +\bindingbody{#1}}} +%\raggedright \hangindent 7em\hangafter=1\tt +%#1 \par \vspace{0.1in}}} %% - The following version is needed when removing the \MPIupdate{3.0}... macros: %% \newcommand{\mpifnewbind}[1]{{\raggedright \hangindent 7em\hangafter=1\tt %% #1 \par \vspace{0.1in}}} % special macro for text instead of a binding; used for routines that were deprecated until MPI-2.2: -\newcommand{\mpifnewnonebind}[1]{{\raggedright \hangindent 7em\hangafter=1 -#1 \par \vspace{0.1in}}} +\newcommand{\mpifnewnonebind}[1]{{\bindingbody{#1}}} +%\raggedright \hangindent 7em\hangafter=1 +%#1 \par \vspace{0.1in}}} % special macro to skip fortran binding in index -\newcommand{\mpifskipbind}[1]{{\raggedright \hangindent -7em\hangafter=1\tt #1 \par \vspace{0.1in}}} +\newcommand{\mpifskipbind}[1]{{\bindingbody{#1}}} +%\raggedright \hangindent +%7em\hangafter=1\tt #1 \par \vspace{0.1in}}} \def\fargs{\\\advance\leftskip 2em} diff --git a/refs.bib b/refs.bib index e84f66d4..fe1e9dd5 100644 --- a/refs.bib +++ b/refs.bib @@ -756,6 +756,13 @@ and Manfred Morari", note = "{\tt http://www.mpi-forum.org}" } +@misc{IETF-RFC4506, + author = "The Internet Society", + title = "{XDR: External Data Representation Standard}", + year = 2008, + note = "{\tt http://www.rfc-editor.org/pdfrfc/rfc4506.txt.pdf}" +} + % from lusk % @@ -2747,6 +2754,15 @@ year = 1992} annote = "Standard 754-1985" } +@Manual{ieee-754-2008, + title = "{IEEE} Standard for Binary Floating-Point Arithmetic, + {IEEE} Standard 754-2008", + organization = "Institute of Electrical and Electronics Engineers", + year = 2008, + address = "New York", + annote = "Standard 754-2008" +} + @techreport{sockettutorial, author = "S. J. Lefflet and R. S. Fabry and W. N. Joy and P. Lapsley and S. Miller and C. Torek", title = "An Advanced 4.4{BSD} Interprocess Communication Tutorial, {U}nix Programmer's Supplementary Documents ({PSD}) 21",