diff --git a/docs/Makefile b/docs/Makefile
deleted file mode 100644
index 9b5689e8387..00000000000
--- a/docs/Makefile
+++ /dev/null
@@ -1,22 +0,0 @@
-# Minimal makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line.
-SPHINXOPTS =
-SPHINXBUILD = sphinx-build
-SOURCEDIR = source
-BUILDDIR = build
-
-# Put it first so that "make" without argument is like "make help".
-help:
- @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
-
-.PHONY: help Makefile
-
-# Catch-all target: route all unknown targets to Sphinx using the new
-# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
-%: Makefile
- @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
-
-clean:
- rm -rf $(BUILDDIR)/*
diff --git a/docs/source/conf.py b/docs/source/conf.py
index e7fad62dc62..acaa71a4646 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -49,7 +49,7 @@
"sphinx.ext.napoleon",
"pydata_sphinx_theme",
"nbsphinx",
- "sphinx.ext.intersphinx",
+ "sphinx.ext.intersphinx"
]
# Intersphinx mapping
@@ -95,7 +95,7 @@
# convert rst to md for ipynb
"pypandoc": True,
# path to your examples scripts
- "examples_dirs": ["../../examples/"],
+ "examples_dirs": ["../../examples"],
# path where to save gallery generated examples
"gallery_dirs": ["examples"],
# Patter to search for example files
diff --git a/docs/source/examples/06-distributed-post/00-distributed_total_disp.ipynb b/docs/source/examples/06-distributed-post/00-distributed_total_disp.ipynb
index f322bc66d79..f940e84127a 100644
--- a/docs/source/examples/06-distributed-post/00-distributed_total_disp.ipynb
+++ b/docs/source/examples/06-distributed-post/00-distributed_total_disp.ipynb
@@ -1,205 +1,169 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "%matplotlib inline"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "\n\n# Distributed post without client connection to remote processes\nThis example shows how distributed files can be read and post processed\non distributed processes. After remote post processing, results a merged\non the local process.\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Import dpf module and its examples files\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "from ansys.dpf import core as dpf\nfrom ansys.dpf.core import examples\nfrom ansys.dpf.core import operators as ops"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Create the template workflow of total displacement\nCreate displacement and norm operators\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "template_workflow = dpf.Workflow()\ndisplacement = ops.result.displacement()\nnorm = ops.math.norm_fc(displacement)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Add the operators to the template workflow and name its inputs and outputs\nOnce workflow's inputs and outputs are named, they can be connected later on\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "template_workflow.add_operators([displacement, norm])\ntemplate_workflow.set_input_name(\"data_sources\", displacement.inputs.data_sources)\ntemplate_workflow.set_output_name(\"out\", norm.outputs.fields_container)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Configure the servers\nMake a list of ip addresses an port numbers on which dpf servers are\nstarted. Workflows instances will be created on each of those servers to\naddress each a different result file.\nIn this example, we will post process an analysis distributed in 2 files,\nwe will consequently require 2 remote processes\nTo make this example easier, we will start local servers here,\nbut we could get connected to any existing servers on the network.\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]\nips = [remote_server.ip for remote_server in remote_servers]\nports = [remote_server.port for remote_server in remote_servers]"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Print the ips and ports\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "print(\"ips:\", ips)\nprint(\"ports:\", ports)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Here we show how we could send files in temporary directory if we were not\nin shared memory\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "files = examples.download_distributed_files()\nserver_file_paths = [dpf.upload_file_in_tmp_folder(files[0], server=remote_servers[0]),\n dpf.upload_file_in_tmp_folder(files[1], server=remote_servers[1])]"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Send workflows on servers\nHere we create new instances on the server by copies of the template workflow\nWe also connect the data sources to those workflows\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "remote_workflows = []\nfor i, server in enumerate(remote_servers):\n remote_workflows.append(template_workflow.create_on_other_server(server))\n ds = dpf.DataSources(server_file_paths[i])\n remote_workflows[i].connect(\"data_sources\", ds)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Create a local workflow able to merge the results\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "local_workflow = dpf.Workflow()\nmerge = ops.utility.merge_fields_containers()\nlocal_workflow.add_operator(merge)\nlocal_workflow.set_input_name(\"in0\", merge, 0)\nlocal_workflow.set_input_name(\"in1\", merge, 1)\nlocal_workflow.set_output_name(\"merged\", merge.outputs.merged_fields_container)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Connect the workflows together and get the output\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "for i, server in enumerate(remote_servers):\n local_workflow.connect_with(remote_workflows[i], (\"out\", \"in\" + str(i)))\n\nfc = local_workflow.get_output(\"merged\", dpf.types.fields_container)\nprint(fc)\nprint(fc[0].min().data)\nprint(fc[0].max().data)"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.8.5"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "%matplotlib inline"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Post processing of displacement on distributed processes {#ref_distributed_total_disp}\r\n\r\nTo help understand this example the following diagram is provided. It\r\nshows the operator chain used to compute the final result.\r\n\r\n{.align-center width=\"400px\"}\r\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Import dpf module and its examples files\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "from ansys.dpf import core as dpf\nfrom ansys.dpf.core import examples\nfrom ansys.dpf.core import operators as ops"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Configure the servers\r\n\r\nMake a list of ip addresses and port numbers on which dpf servers are\r\nstarted. Operator instances will be created on each of those servers to\r\naddress each a different result file. In this example, we will post\r\nprocess an analysis distributed in 2 files, we will consequently require\r\n2 remote processes. To make this example easier, we will start local\r\nservers here, but we could get connected to any existing servers on the\r\nnetwork.\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]\nips = [remote_server.ip for remote_server in remote_servers]\nports = [remote_server.port for remote_server in remote_servers]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Print the ips and ports\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "print(\"ips:\", ips)\nprint(\"ports:\", ports)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Here we show how we could send files in temporary directory if we were\r\nnot in shared memory\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "files = examples.download_distributed_files()\nserver_file_paths = [dpf.upload_file_in_tmp_folder(files[0], server=remote_servers[0]),\n dpf.upload_file_in_tmp_folder(files[1], server=remote_servers[1])]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Create the operators on the servers\r\n\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~ On each server we\r\ncreate two new operators for \\'displacement\\' and \\'norm\\' computations\r\nand define their data sources. The displacement operator receives data\r\nfrom the data file in its respective server. And the norm operator,\r\nbeing chained to the displacement operator, receives input from the\r\noutput of this one.\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "remote_operators = []\nfor i, server in enumerate(remote_servers):\n displacement = ops.result.displacement(server=server)\n norm = ops.math.norm_fc(displacement, server=server)\n remote_operators.append(norm)\n ds = dpf.DataSources(server_file_paths[i], server=server)\n displacement.inputs.data_sources(ds)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Create a merge_fields_containers operator able to merge the results\r\n\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "merge = ops.utility.merge_fields_containers()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Connect the operators together and get the output\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "for i, server in enumerate(remote_servers):\n merge.connect(i, remote_operators[i], 0)\n\nfc = merge.get_output(0, dpf.types.fields_container)\nprint(fc)\nprint(fc[0].min().data)\nprint(fc[0].max().data)"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.9.0"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
}
\ No newline at end of file
diff --git a/docs/source/examples/06-distributed-post/00-distributed_total_disp.py b/docs/source/examples/06-distributed-post/00-distributed_total_disp.py
index fc3c7d8ba1e..36ad0aba980 100644
--- a/docs/source/examples/06-distributed-post/00-distributed_total_disp.py
+++ b/docs/source/examples/06-distributed-post/00-distributed_total_disp.py
@@ -1,12 +1,15 @@
"""
.. _ref_distributed_total_disp:
-Distributed post without client connection to remote processes
+Post processing of displacement on distributed processes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This example shows how distributed files can be read and post processed
-on distributed processes. After remote post processing, results a merged
-on the local process.
+To help understand this example the following diagram is provided. It shows
+the operator chain used to compute the final result.
+
+.. image:: 00-operator-dep.svg
+ :align: center
+ :width: 400
"""
###############################################################################
@@ -16,30 +19,14 @@
from ansys.dpf.core import examples
from ansys.dpf.core import operators as ops
-###############################################################################
-# Create the template workflow of total displacement
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# Create displacement and norm operators
-
-template_workflow = dpf.Workflow()
-displacement = ops.result.displacement()
-norm = ops.math.norm_fc(displacement)
-
-###############################################################################
-# Add the operators to the template workflow and name its inputs and outputs
-# Once workflow's inputs and outputs are named, they can be connected later on
-template_workflow.add_operators([displacement, norm])
-template_workflow.set_input_name("data_sources", displacement.inputs.data_sources)
-template_workflow.set_output_name("out", norm.outputs.fields_container)
-
###############################################################################
# Configure the servers
# ~~~~~~~~~~~~~~~~~~~~~~
-# Make a list of ip addresses an port numbers on which dpf servers are
-# started. Workflows instances will be created on each of those servers to
+# Make a list of ip addresses and port numbers on which dpf servers are
+# started. Operator instances will be created on each of those servers to
# address each a different result file.
# In this example, we will post process an analysis distributed in 2 files,
-# we will consequently require 2 remote processes
+# we will consequently require 2 remote processes.
# To make this example easier, we will start local servers here,
# but we could get connected to any existing servers on the network.
@@ -60,35 +47,35 @@
dpf.upload_file_in_tmp_folder(files[1], server=remote_servers[1])]
###############################################################################
-# Send workflows on servers
+# Create the operators on the servers
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
-# Here we create new instances on the server by copies of the template workflow
-# We also connect the data sources to those workflows
-remote_workflows = []
+# On each server we create two new operators for 'displacement' and 'norm'
+# computations and define their data sources. The displacement operator
+# receives data from the data file in its respective server. And the norm
+# operator, being chained to the displacement operator, receives input from the
+# output of this one.
+remote_operators = []
for i, server in enumerate(remote_servers):
- remote_workflows.append(template_workflow.create_on_other_server(server))
- ds = dpf.DataSources(server_file_paths[i])
- remote_workflows[i].connect("data_sources", ds)
+ displacement = ops.result.displacement(server=server)
+ norm = ops.math.norm_fc(displacement, server=server)
+ remote_operators.append(norm)
+ ds = dpf.DataSources(server_file_paths[i], server=server)
+ displacement.inputs.data_sources(ds)
###############################################################################
-# Create a local workflow able to merge the results
+# Create a merge_fields_containers operator able to merge the results
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-local_workflow = dpf.Workflow()
merge = ops.utility.merge_fields_containers()
-local_workflow.add_operator(merge)
-local_workflow.set_input_name("in0", merge, 0)
-local_workflow.set_input_name("in1", merge, 1)
-local_workflow.set_output_name("merged", merge.outputs.merged_fields_container)
###############################################################################
-# Connect the workflows together and get the output
+# Connect the operators together and get the output
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
for i, server in enumerate(remote_servers):
- local_workflow.connect_with(remote_workflows[i], ("out", "in" + str(i)))
+ merge.connect(i, remote_operators[i], 0)
-fc = local_workflow.get_output("merged", dpf.types.fields_container)
+fc = merge.get_output(0, dpf.types.fields_container)
print(fc)
print(fc[0].min().data)
print(fc[0].max().data)
diff --git a/docs/source/examples/06-distributed-post/00-distributed_total_disp.py.md5 b/docs/source/examples/06-distributed-post/00-distributed_total_disp.py.md5
index d9a254b6111..ace82b5eb52 100644
--- a/docs/source/examples/06-distributed-post/00-distributed_total_disp.py.md5
+++ b/docs/source/examples/06-distributed-post/00-distributed_total_disp.py.md5
@@ -1 +1 @@
-4b241fa14d62437e3c4fd342e0bf53c8
\ No newline at end of file
+0e22fe3be833f3477b4ac02eb8f6e4c8
\ No newline at end of file
diff --git a/docs/source/examples/06-distributed-post/00-distributed_total_disp.rst b/docs/source/examples/06-distributed-post/00-distributed_total_disp.rst
index a2509264b2f..f4d397f9fc4 100644
--- a/docs/source/examples/06-distributed-post/00-distributed_total_disp.rst
+++ b/docs/source/examples/06-distributed-post/00-distributed_total_disp.rst
@@ -20,17 +20,21 @@
.. _ref_distributed_total_disp:
-Distributed post without client connection to remote processes
+Post processing of displacement on distributed processes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This example shows how distributed files can be read and post processed
-on distributed processes. After remote post processing, results a merged
-on the local process.
-.. GENERATED FROM PYTHON SOURCE LINES 13-14
+To help understand this example the following diagram is provided. It shows
+the operator chain used to compute the final result.
+
+.. image:: 00-operator-dep.svg
+ :align: center
+ :width: 400
+
+.. GENERATED FROM PYTHON SOURCE LINES 16-17
Import dpf module and its examples files
-.. GENERATED FROM PYTHON SOURCE LINES 14-19
+.. GENERATED FROM PYTHON SOURCE LINES 17-22
.. code-block:: default
@@ -46,61 +50,19 @@ Import dpf module and its examples files
-.. GENERATED FROM PYTHON SOURCE LINES 20-23
-
-Create the template workflow of total displacement
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Create displacement and norm operators
-
-.. GENERATED FROM PYTHON SOURCE LINES 23-28
-
-.. code-block:: default
-
-
- template_workflow = dpf.Workflow()
- displacement = ops.result.displacement()
- norm = ops.math.norm_fc(displacement)
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 29-31
-
-Add the operators to the template workflow and name its inputs and outputs
-Once workflow's inputs and outputs are named, they can be connected later on
-
-.. GENERATED FROM PYTHON SOURCE LINES 31-35
-
-.. code-block:: default
-
- template_workflow.add_operators([displacement, norm])
- template_workflow.set_input_name("data_sources", displacement.inputs.data_sources)
- template_workflow.set_output_name("out", norm.outputs.fields_container)
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 36-45
+.. GENERATED FROM PYTHON SOURCE LINES 23-32
Configure the servers
~~~~~~~~~~~~~~~~~~~~~~
-Make a list of ip addresses an port numbers on which dpf servers are
-started. Workflows instances will be created on each of those servers to
+Make a list of ip addresses and port numbers on which dpf servers are
+started. Operator instances will be created on each of those servers to
address each a different result file.
In this example, we will post process an analysis distributed in 2 files,
-we will consequently require 2 remote processes
+we will consequently require 2 remote processes.
To make this example easier, we will start local servers here,
but we could get connected to any existing servers on the network.
-.. GENERATED FROM PYTHON SOURCE LINES 45-50
+.. GENERATED FROM PYTHON SOURCE LINES 32-37
.. code-block:: default
@@ -116,11 +78,11 @@ but we could get connected to any existing servers on the network.
-.. GENERATED FROM PYTHON SOURCE LINES 51-52
+.. GENERATED FROM PYTHON SOURCE LINES 38-39
Print the ips and ports
-.. GENERATED FROM PYTHON SOURCE LINES 52-55
+.. GENERATED FROM PYTHON SOURCE LINES 39-42
.. code-block:: default
@@ -138,17 +100,17 @@ Print the ips and ports
.. code-block:: none
ips: ['127.0.0.1', '127.0.0.1']
- ports: [50058, 50059]
+ ports: [50054, 50055]
-.. GENERATED FROM PYTHON SOURCE LINES 56-58
+.. GENERATED FROM PYTHON SOURCE LINES 43-45
Here we show how we could send files in temporary directory if we were not
in shared memory
-.. GENERATED FROM PYTHON SOURCE LINES 58-62
+.. GENERATED FROM PYTHON SOURCE LINES 45-49
.. code-block:: default
@@ -163,22 +125,27 @@ in shared memory
-.. GENERATED FROM PYTHON SOURCE LINES 63-67
+.. GENERATED FROM PYTHON SOURCE LINES 50-57
-Send workflows on servers
+Create the operators on the servers
~~~~~~~~~~~~~~~~~~~~~~~~~~
-Here we create new instances on the server by copies of the template workflow
-We also connect the data sources to those workflows
+On each server we create two new operators for 'displacement' and 'norm'
+computations and define their data sources. The displacement operator
+receives data from the data file in its respective server. And the norm
+operator, being chained to the displacement operator, receives input from the
+output of this one.
-.. GENERATED FROM PYTHON SOURCE LINES 67-73
+.. GENERATED FROM PYTHON SOURCE LINES 57-65
.. code-block:: default
- remote_workflows = []
+ remote_operators = []
for i, server in enumerate(remote_servers):
- remote_workflows.append(template_workflow.create_on_other_server(server))
- ds = dpf.DataSources(server_file_paths[i])
- remote_workflows[i].connect("data_sources", ds)
+ displacement = ops.result.displacement(server=server)
+ norm = ops.math.norm_fc(displacement, server=server)
+ remote_operators.append(norm)
+ ds = dpf.DataSources(server_file_paths[i], server=server)
+ displacement.inputs.data_sources(ds)
@@ -187,22 +154,17 @@ We also connect the data sources to those workflows
-.. GENERATED FROM PYTHON SOURCE LINES 74-76
+.. GENERATED FROM PYTHON SOURCE LINES 66-68
-Create a local workflow able to merge the results
+Create a merge_fields_containers operator able to merge the results
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. GENERATED FROM PYTHON SOURCE LINES 76-84
+.. GENERATED FROM PYTHON SOURCE LINES 68-71
.. code-block:: default
- local_workflow = dpf.Workflow()
merge = ops.utility.merge_fields_containers()
- local_workflow.add_operator(merge)
- local_workflow.set_input_name("in0", merge, 0)
- local_workflow.set_input_name("in1", merge, 1)
- local_workflow.set_output_name("merged", merge.outputs.merged_fields_container)
@@ -211,20 +173,20 @@ Create a local workflow able to merge the results
-.. GENERATED FROM PYTHON SOURCE LINES 85-87
+.. GENERATED FROM PYTHON SOURCE LINES 72-74
-Connect the workflows together and get the output
+Connect the operators together and get the output
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. GENERATED FROM PYTHON SOURCE LINES 87-95
+.. GENERATED FROM PYTHON SOURCE LINES 74-82
.. code-block:: default
for i, server in enumerate(remote_servers):
- local_workflow.connect_with(remote_workflows[i], ("out", "in" + str(i)))
+ merge.connect(i, remote_operators[i], 0)
- fc = local_workflow.get_output("merged", dpf.types.fields_container)
+ fc = merge.get_output(0, dpf.types.fields_container)
print(fc)
print(fc[0].min().data)
print(fc[0].max().data)
@@ -254,7 +216,7 @@ Connect the workflows together and get the output
.. rst-class:: sphx-glr-timing
- **Total running time of the script:** ( 0 minutes 0.663 seconds)
+ **Total running time of the script:** ( 0 minutes 1.022 seconds)
.. _sphx_glr_download_examples_06-distributed-post_00-distributed_total_disp.py:
diff --git a/docs/source/examples/06-distributed-post/00-distributed_total_disp_codeobj.pickle b/docs/source/examples/06-distributed-post/00-distributed_total_disp_codeobj.pickle
index 50a1c9cb8ac..486a3a6dbba 100644
Binary files a/docs/source/examples/06-distributed-post/00-distributed_total_disp_codeobj.pickle and b/docs/source/examples/06-distributed-post/00-distributed_total_disp_codeobj.pickle differ
diff --git a/docs/source/examples/06-distributed-post/00-operator-dep.dot b/docs/source/examples/06-distributed-post/00-operator-dep.dot
new file mode 100644
index 00000000000..a99365e8260
--- /dev/null
+++ b/docs/source/examples/06-distributed-post/00-operator-dep.dot
@@ -0,0 +1,36 @@
+digraph foo {
+ graph [pad="0", nodesep="0.3", ranksep="0.3"]
+ node [shape=box, style=filled, fillcolor="#ffcc00", margin="0"];
+ rankdir=LR;
+ splines=line;
+
+ disp01 [label="displacement"];
+ disp02 [label="displacement"];
+ norm01 [label="norm"];
+ norm02 [label="norm"];
+
+ subgraph cluster_1 {
+ ds01 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
+
+ ds01 -> disp01 [style=dashed];
+ disp01 -> norm01;
+
+ label="Server 1";
+ style=filled;
+ fillcolor=lightgrey;
+ }
+
+ subgraph cluster_2 {
+ ds02 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
+
+ ds02 -> disp02 [style=dashed];
+ disp02 -> norm02;
+
+ label="Server 2";
+ style=filled;
+ fillcolor=lightgrey;
+ }
+
+ norm01 -> "merge";
+ norm02 -> "merge";
+}
diff --git a/docs/source/examples/06-distributed-post/00-operator-dep.svg b/docs/source/examples/06-distributed-post/00-operator-dep.svg
new file mode 100644
index 00000000000..f05e0868fc2
--- /dev/null
+++ b/docs/source/examples/06-distributed-post/00-operator-dep.svg
@@ -0,0 +1,101 @@
+
+
+
+
+
diff --git a/docs/source/examples/06-distributed-post/01-distributed_delegate_to_server.ipynb b/docs/source/examples/06-distributed-post/01-distributed_delegate_to_server.ipynb
deleted file mode 100644
index bd70cce0545..00000000000
--- a/docs/source/examples/06-distributed-post/01-distributed_delegate_to_server.ipynb
+++ /dev/null
@@ -1,205 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "%matplotlib inline"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "\n\n# Compute total displacement from distributed files with distributed post\nThis example shows how distributed files can be read and post processed\non distributed processes. After remote post processing of total displacement,\nresults a merged on the local process. In this example, the client is only\nconnected to the coordinator server. Connections to remote processes are only\ndone implicitly through the coordinator.\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Import dpf module and its examples files\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "from ansys.dpf import core as dpf\nfrom ansys.dpf.core import examples\nfrom ansys.dpf.core import operators as ops"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Create the template workflow of total displacement\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "template_workflow = dpf.Workflow()\ndisplacement = ops.result.displacement()\nnorm = ops.math.norm_fc(displacement)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Add the operators to the template workflow and name its inputs and outputs\nOnce workflow's inputs and outputs are named, they can be connected later on\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "template_workflow.add_operators([displacement, norm])\ntemplate_workflow.set_input_name(\"data_sources\", displacement.inputs.data_sources)\ntemplate_workflow.set_output_name(\"out\", norm.outputs.fields_container)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Configure the servers\nMake a list of ip addresses an port numbers on which dpf servers are\nstarted. Workflows instances will be created on each of those servers to\naddress each a different result file.\nIn this example, we will post process an analysis distributed in 2 files,\nwe will consequently require 2 remote processes\nTo make this example easier, we will start local servers here,\nbut we could get connected to any existing servers on the network.\nWe only keep instances of remote_servers to start and keep those servers\nawaik. The purpose of this example is to show that we can do distributed\npost processing without opening channels between this client and\nthe remote processes\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]\nips = [remote_server.ip for remote_server in remote_servers]\nports = [remote_server.port for remote_server in remote_servers]"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Print the ips and ports\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "print(\"ips:\", ips)\nprint(\"ports:\", ports)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Here we show how we could send files in temporary directory if we were not\nin shared memory\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "files = examples.download_distributed_files()\nserver_file_paths = [dpf.upload_file_in_tmp_folder(files[0], server=remote_servers[0]),\n dpf.upload_file_in_tmp_folder(files[1], server=remote_servers[1])]"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Send workflows on servers\nHere we create new instances on the server by copies of the template workflow\nWe also connect the data sources to those workflows.\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "remote_workflows = []\nfor i, ip in enumerate(ips):\n remote_workflows.append(template_workflow.create_on_other_server(ip=ip, port=ports[i]))\n ds = dpf.DataSources(server_file_paths[i])\n remote_workflows[i].connect(\"data_sources\", ds)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Create a local workflow able to merge the results\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "local_workflow = dpf.Workflow()\nmerge = ops.utility.merge_fields_containers()\nlocal_workflow.add_operator(merge)\nlocal_workflow.set_input_name(\"in0\", merge, 0)\nlocal_workflow.set_input_name(\"in1\", merge, 1)\nlocal_workflow.set_output_name(\"merged\", merge.outputs.merged_fields_container)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Connect the workflows together and get the output\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "for i, ip in enumerate(ips):\n local_workflow.connect_with(remote_workflows[i], (\"out\", \"in\" + str(i)))\n\nfc = local_workflow.get_output(\"merged\", dpf.types.fields_container)\nprint(fc)\nprint(fc[0].min().data)\nprint(fc[0].max().data)\n\ndpf.server.shutdown_all_session_servers()"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.8.5"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
\ No newline at end of file
diff --git a/docs/source/examples/06-distributed-post/01-distributed_delegate_to_server.py b/docs/source/examples/06-distributed-post/01-distributed_delegate_to_server.py
deleted file mode 100644
index 5b8595d191d..00000000000
--- a/docs/source/examples/06-distributed-post/01-distributed_delegate_to_server.py
+++ /dev/null
@@ -1,101 +0,0 @@
-"""
-.. _ref_distributed_delegate_to_server:
-
-Compute total displacement from distributed files with distributed post
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This example shows how distributed files can be read and post processed
-on distributed processes. After remote post processing of total displacement,
-results a merged on the local process. In this example, the client is only
-connected to the coordinator server. Connections to remote processes are only
-done implicitly through the coordinator.
-
-"""
-
-###############################################################################
-# Import dpf module and its examples files
-
-from ansys.dpf import core as dpf
-from ansys.dpf.core import examples
-from ansys.dpf.core import operators as ops
-
-###############################################################################
-# Create the template workflow of total displacement
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-template_workflow = dpf.Workflow()
-displacement = ops.result.displacement()
-norm = ops.math.norm_fc(displacement)
-
-###############################################################################
-# Add the operators to the template workflow and name its inputs and outputs
-# Once workflow's inputs and outputs are named, they can be connected later on
-template_workflow.add_operators([displacement, norm])
-template_workflow.set_input_name("data_sources", displacement.inputs.data_sources)
-template_workflow.set_output_name("out", norm.outputs.fields_container)
-
-###############################################################################
-# Configure the servers
-# ~~~~~~~~~~~~~~~~~~~~~~
-# Make a list of ip addresses an port numbers on which dpf servers are
-# started. Workflows instances will be created on each of those servers to
-# address each a different result file.
-# In this example, we will post process an analysis distributed in 2 files,
-# we will consequently require 2 remote processes
-# To make this example easier, we will start local servers here,
-# but we could get connected to any existing servers on the network.
-# We only keep instances of remote_servers to start and keep those servers
-# awaik. The purpose of this example is to show that we can do distributed
-# post processing without opening channels between this client and
-# the remote processes
-
-remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
-ips = [remote_server.ip for remote_server in remote_servers]
-ports = [remote_server.port for remote_server in remote_servers]
-
-###############################################################################
-# Print the ips and ports
-print("ips:", ips)
-print("ports:", ports)
-
-###############################################################################
-# Here we show how we could send files in temporary directory if we were not
-# in shared memory
-files = examples.download_distributed_files()
-server_file_paths = [dpf.upload_file_in_tmp_folder(files[0], server=remote_servers[0]),
- dpf.upload_file_in_tmp_folder(files[1], server=remote_servers[1])]
-
-###############################################################################
-# Send workflows on servers
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~
-# Here we create new instances on the server by copies of the template workflow
-# We also connect the data sources to those workflows.
-remote_workflows = []
-for i, ip in enumerate(ips):
- remote_workflows.append(template_workflow.create_on_other_server(ip=ip, port=ports[i]))
- ds = dpf.DataSources(server_file_paths[i])
- remote_workflows[i].connect("data_sources", ds)
-
-###############################################################################
-# Create a local workflow able to merge the results
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-local_workflow = dpf.Workflow()
-merge = ops.utility.merge_fields_containers()
-local_workflow.add_operator(merge)
-local_workflow.set_input_name("in0", merge, 0)
-local_workflow.set_input_name("in1", merge, 1)
-local_workflow.set_output_name("merged", merge.outputs.merged_fields_container)
-
-###############################################################################
-# Connect the workflows together and get the output
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-for i, ip in enumerate(ips):
- local_workflow.connect_with(remote_workflows[i], ("out", "in" + str(i)))
-
-fc = local_workflow.get_output("merged", dpf.types.fields_container)
-print(fc)
-print(fc[0].min().data)
-print(fc[0].max().data)
-
-dpf.server.shutdown_all_session_servers()
diff --git a/docs/source/examples/06-distributed-post/01-distributed_delegate_to_server.py.md5 b/docs/source/examples/06-distributed-post/01-distributed_delegate_to_server.py.md5
deleted file mode 100644
index 7ed3615f15c..00000000000
--- a/docs/source/examples/06-distributed-post/01-distributed_delegate_to_server.py.md5
+++ /dev/null
@@ -1 +0,0 @@
-f5894523c93ddb6f1cef3addcefdaaa3
\ No newline at end of file
diff --git a/docs/source/examples/06-distributed-post/01-distributed_delegate_to_server.rst b/docs/source/examples/06-distributed-post/01-distributed_delegate_to_server.rst
deleted file mode 100644
index c909ddae049..00000000000
--- a/docs/source/examples/06-distributed-post/01-distributed_delegate_to_server.rst
+++ /dev/null
@@ -1,293 +0,0 @@
-
-.. DO NOT EDIT.
-.. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY.
-.. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE:
-.. "examples\06-distributed-post\01-distributed_delegate_to_server.py"
-.. LINE NUMBERS ARE GIVEN BELOW.
-
-.. only:: html
-
- .. note::
- :class: sphx-glr-download-link-note
-
- Click :ref:`here `
- to download the full example code
-
-.. rst-class:: sphx-glr-example-title
-
-.. _sphx_glr_examples_06-distributed-post_01-distributed_delegate_to_server.py:
-
-
-.. _ref_distributed_delegate_to_server:
-
-Compute total displacement from distributed files with distributed post
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This example shows how distributed files can be read and post processed
-on distributed processes. After remote post processing of total displacement,
-results a merged on the local process. In this example, the client is only
-connected to the coordinator server. Connections to remote processes are only
-done implicitly through the coordinator.
-
-.. GENERATED FROM PYTHON SOURCE LINES 15-16
-
-Import dpf module and its examples files
-
-.. GENERATED FROM PYTHON SOURCE LINES 16-21
-
-.. code-block:: default
-
-
- from ansys.dpf import core as dpf
- from ansys.dpf.core import examples
- from ansys.dpf.core import operators as ops
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 22-24
-
-Create the template workflow of total displacement
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. GENERATED FROM PYTHON SOURCE LINES 24-29
-
-.. code-block:: default
-
-
- template_workflow = dpf.Workflow()
- displacement = ops.result.displacement()
- norm = ops.math.norm_fc(displacement)
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 30-32
-
-Add the operators to the template workflow and name its inputs and outputs
-Once workflow's inputs and outputs are named, they can be connected later on
-
-.. GENERATED FROM PYTHON SOURCE LINES 32-36
-
-.. code-block:: default
-
- template_workflow.add_operators([displacement, norm])
- template_workflow.set_input_name("data_sources", displacement.inputs.data_sources)
- template_workflow.set_output_name("out", norm.outputs.fields_container)
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 37-50
-
-Configure the servers
-~~~~~~~~~~~~~~~~~~~~~~
-Make a list of ip addresses an port numbers on which dpf servers are
-started. Workflows instances will be created on each of those servers to
-address each a different result file.
-In this example, we will post process an analysis distributed in 2 files,
-we will consequently require 2 remote processes
-To make this example easier, we will start local servers here,
-but we could get connected to any existing servers on the network.
-We only keep instances of remote_servers to start and keep those servers
-awaik. The purpose of this example is to show that we can do distributed
-post processing without opening channels between this client and
-the remote processes
-
-.. GENERATED FROM PYTHON SOURCE LINES 50-55
-
-.. code-block:: default
-
-
- remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
- ips = [remote_server.ip for remote_server in remote_servers]
- ports = [remote_server.port for remote_server in remote_servers]
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 56-57
-
-Print the ips and ports
-
-.. GENERATED FROM PYTHON SOURCE LINES 57-60
-
-.. code-block:: default
-
- print("ips:", ips)
- print("ports:", ports)
-
-
-
-
-
-.. rst-class:: sphx-glr-script-out
-
- Out:
-
- .. code-block:: none
-
- ips: ['127.0.0.1', '127.0.0.1']
- ports: [50057, 50058]
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 61-63
-
-Here we show how we could send files in temporary directory if we were not
-in shared memory
-
-.. GENERATED FROM PYTHON SOURCE LINES 63-67
-
-.. code-block:: default
-
- files = examples.download_distributed_files()
- server_file_paths = [dpf.upload_file_in_tmp_folder(files[0], server=remote_servers[0]),
- dpf.upload_file_in_tmp_folder(files[1], server=remote_servers[1])]
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 68-72
-
-Send workflows on servers
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-Here we create new instances on the server by copies of the template workflow
-We also connect the data sources to those workflows.
-
-.. GENERATED FROM PYTHON SOURCE LINES 72-78
-
-.. code-block:: default
-
- remote_workflows = []
- for i, ip in enumerate(ips):
- remote_workflows.append(template_workflow.create_on_other_server(ip=ip, port=ports[i]))
- ds = dpf.DataSources(server_file_paths[i])
- remote_workflows[i].connect("data_sources", ds)
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 79-81
-
-Create a local workflow able to merge the results
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. GENERATED FROM PYTHON SOURCE LINES 81-89
-
-.. code-block:: default
-
-
- local_workflow = dpf.Workflow()
- merge = ops.utility.merge_fields_containers()
- local_workflow.add_operator(merge)
- local_workflow.set_input_name("in0", merge, 0)
- local_workflow.set_input_name("in1", merge, 1)
- local_workflow.set_output_name("merged", merge.outputs.merged_fields_container)
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 90-92
-
-Connect the workflows together and get the output
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. GENERATED FROM PYTHON SOURCE LINES 92-102
-
-.. code-block:: default
-
-
- for i, ip in enumerate(ips):
- local_workflow.connect_with(remote_workflows[i], ("out", "in" + str(i)))
-
- fc = local_workflow.get_output("merged", dpf.types.fields_container)
- print(fc)
- print(fc[0].min().data)
- print(fc[0].max().data)
-
- dpf.server.shutdown_all_session_servers()
-
-
-
-
-.. rst-class:: sphx-glr-script-out
-
- Out:
-
- .. code-block:: none
-
- DPF Fields Container
- with 1 field(s)
- defined on labels: time
-
- with:
- - field 0 {time: 1} with Nodal location, 1 components and 432 entities.
-
- [0.]
- [10.03242272]
- ("'NoneType' object has no attribute 'shutdown'",)
-
-
-
-
-
-.. rst-class:: sphx-glr-timing
-
- **Total running time of the script:** ( 0 minutes 0.913 seconds)
-
-
-.. _sphx_glr_download_examples_06-distributed-post_01-distributed_delegate_to_server.py:
-
-
-.. only :: html
-
- .. container:: sphx-glr-footer
- :class: sphx-glr-footer-example
-
-
-
- .. container:: sphx-glr-download sphx-glr-download-python
-
- :download:`Download Python source code: 01-distributed_delegate_to_server.py <01-distributed_delegate_to_server.py>`
-
-
-
- .. container:: sphx-glr-download sphx-glr-download-jupyter
-
- :download:`Download Jupyter notebook: 01-distributed_delegate_to_server.ipynb <01-distributed_delegate_to_server.ipynb>`
-
-
-.. only:: html
-
- .. rst-class:: sphx-glr-signature
-
- `Gallery generated by Sphinx-Gallery `_
diff --git a/docs/source/examples/06-distributed-post/01-distributed_delegate_to_server_codeobj.pickle b/docs/source/examples/06-distributed-post/01-distributed_delegate_to_server_codeobj.pickle
deleted file mode 100644
index 0b97c4862e5..00000000000
Binary files a/docs/source/examples/06-distributed-post/01-distributed_delegate_to_server_codeobj.pickle and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/05-distributed_total_disp_with_operators.ipynb b/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote.ipynb
similarity index 59%
rename from docs/source/examples/06-distributed-post/05-distributed_total_disp_with_operators.ipynb
rename to docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote.ipynb
index 68f84e8ecea..43578b38870 100644
--- a/docs/source/examples/06-distributed-post/05-distributed_total_disp_with_operators.ipynb
+++ b/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote.ipynb
@@ -1,176 +1,169 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "%matplotlib inline"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "\n\n# Distributed post without client connection to remote processes with Operators\nThis example shows how distributed files can be read and post processed\non distributed processes. After remote post processing, results a merged\non the local process.\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Import dpf module and its examples files\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "from ansys.dpf import core as dpf\nfrom ansys.dpf.core import examples\nfrom ansys.dpf.core import operators as ops"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Create the template workflow of total displacement\n\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Configure the servers\nMake a list of ip addresses an port numbers on which dpf servers are\nstarted. Workflows instances will be created on each of those servers to\naddress each a different result file.\nIn this example, we will post process an analysis distributed in 2 files,\nwe will consequently require 2 remote processes\nTo make this example easier, we will start local servers here,\nbut we could get connected to any existing servers on the network.\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]\nips = [remote_server.ip for remote_server in remote_servers]\nports = [remote_server.port for remote_server in remote_servers]"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Print the ips and ports\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "print(\"ips:\", ips)\nprint(\"ports:\", ports)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Here we show how we could send files in temporary directory if we were not\nin shared memory\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "files = examples.download_distributed_files()\nserver_file_paths = [dpf.upload_file_in_tmp_folder(files[0], server=remote_servers[0]),\n dpf.upload_file_in_tmp_folder(files[1], server=remote_servers[1])]"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Send workflows on servers\nHere we create new instances on the server by copies of the template workflow\nWe also connect the data sources to those workflows\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "remote_operators = []\nfor i, server in enumerate(remote_servers):\n displacement = ops.result.displacement(server=server)\n norm = ops.math.norm_fc(displacement, server=server)\n remote_operators.append(norm)\n ds = dpf.DataSources(server_file_paths[i], server=server)\n displacement.inputs.data_sources(ds)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Create a local workflow able to merge the results\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "merge = ops.utility.merge_fields_containers()"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Connect the workflows together and get the output\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "for i, server in enumerate(remote_servers):\n merge.connect(i, remote_operators[i], 0)\n\nfc = merge.get_output(0, dpf.types.fields_container)\nprint(fc)\nprint(fc[0].min().data)\nprint(fc[0].max().data)"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.8.5"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "%matplotlib inline"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Create custom workflow on distributed processes {#ref_distributed_workflows_on_remote}\r\n\r\nThis example shows how distributed files can be read and post processed\r\non distributed processes. After remote post processing, results are\r\nmerged on the local process. In this example, different operator\r\nsequences are directly created on different servers. These operators are\r\nthen connected together without having to care that they are on remote\r\nprocesses.\r\n\r\n{.align-center width=\"400px\"}\r\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Import dpf module and its examples files\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "from ansys.dpf import core as dpf\nfrom ansys.dpf.core import examples\nfrom ansys.dpf.core import operators as ops"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Configure the servers\r\n\r\nTo make this example easier, we will start local servers here, but we\r\ncould get connected to any existing servers on the network.\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Here we show how we could send files in temporary directory if we were\r\nnot in shared memory\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "files = examples.download_distributed_files()\nserver_file_paths = [dpf.upload_file_in_tmp_folder(files[0], server=remote_servers[0]),\n dpf.upload_file_in_tmp_folder(files[1], server=remote_servers[1])]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "First operator chain.\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "remote_operators = []\n\nstress1 = ops.result.stress(server=remote_servers[0])\nremote_operators.append(stress1)\nds = dpf.DataSources(server_file_paths[0], server=remote_servers[0])\nstress1.inputs.data_sources(ds)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Second operator chain.\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "stress2 = ops.result.stress(server=remote_servers[1])\nmul = stress2 * 2.0\nremote_operators.append(mul)\nds = dpf.DataSources(server_file_paths[1], server=remote_servers[1])\nstress2.inputs.data_sources(ds)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Local merge operator.\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "merge = ops.utility.merge_fields_containers()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Connect the operator chains together and get the output\r\n\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "nodal = ops.averaging.to_nodal_fc(merge)\n\nmerge.connect(0, remote_operators[0], 0)\nmerge.connect(1, remote_operators[1], 0)\n\nfc = nodal.get_output(0, dpf.types.fields_container)\nprint(fc[0])\nfc[0].meshed_region.plot(fc[0])"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.9.0"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
}
\ No newline at end of file
diff --git a/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote.py b/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote.py
new file mode 100644
index 00000000000..e52e00f0283
--- /dev/null
+++ b/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote.py
@@ -0,0 +1,74 @@
+"""
+.. _ref_distributed_workflows_on_remote:
+
+Create custom workflow on distributed processes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+This example shows how distributed files can be read and post processed
+on distributed processes. After remote post processing,
+results are merged on the local process. In this example, different operator
+sequences are directly created on different servers. These operators are then
+connected together without having to care that they are on remote processes.
+
+.. image:: 01-operator-dep.svg
+ :align: center
+ :width: 400
+"""
+###############################################################################
+# Import dpf module and its examples files
+
+from ansys.dpf import core as dpf
+from ansys.dpf.core import examples
+from ansys.dpf.core import operators as ops
+
+###############################################################################
+# Configure the servers
+# ~~~~~~~~~~~~~~~~~~~~~~
+# To make this example easier, we will start local servers here,
+# but we could get connected to any existing servers on the network.
+
+remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
+
+###############################################################################
+# Here we show how we could send files in temporary directory if we were not
+# in shared memory
+
+files = examples.download_distributed_files()
+server_file_paths = [dpf.upload_file_in_tmp_folder(files[0], server=remote_servers[0]),
+ dpf.upload_file_in_tmp_folder(files[1], server=remote_servers[1])]
+
+###############################################################################
+# First operator chain.
+
+remote_operators = []
+
+stress1 = ops.result.stress(server=remote_servers[0])
+remote_operators.append(stress1)
+ds = dpf.DataSources(server_file_paths[0], server=remote_servers[0])
+stress1.inputs.data_sources(ds)
+
+###############################################################################
+# Second operator chain.
+
+stress2 = ops.result.stress(server=remote_servers[1])
+mul = stress2 * 2.0
+remote_operators.append(mul)
+ds = dpf.DataSources(server_file_paths[1], server=remote_servers[1])
+stress2.inputs.data_sources(ds)
+
+###############################################################################
+# Local merge operator.
+
+merge = ops.utility.merge_fields_containers()
+
+###############################################################################
+# Connect the operator chains together and get the output
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+nodal = ops.averaging.to_nodal_fc(merge)
+
+merge.connect(0, remote_operators[0], 0)
+merge.connect(1, remote_operators[1], 0)
+
+fc = nodal.get_output(0, dpf.types.fields_container)
+print(fc[0])
+fc[0].meshed_region.plot(fc[0])
diff --git a/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote.py.md5 b/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote.py.md5
new file mode 100644
index 00000000000..bc6f19f82c7
--- /dev/null
+++ b/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote.py.md5
@@ -0,0 +1 @@
+54d1e8dc36efba0170e88f1d3fa1d436
\ No newline at end of file
diff --git a/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote.rst b/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote.rst
new file mode 100644
index 00000000000..2cb611f0f6a
--- /dev/null
+++ b/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote.rst
@@ -0,0 +1,234 @@
+
+.. DO NOT EDIT.
+.. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY.
+.. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE:
+.. "examples\06-distributed-post\01-distributed_workflows_on_remote.py"
+.. LINE NUMBERS ARE GIVEN BELOW.
+
+.. only:: html
+
+ .. note::
+ :class: sphx-glr-download-link-note
+
+ Click :ref:`here `
+ to download the full example code
+
+.. rst-class:: sphx-glr-example-title
+
+.. _sphx_glr_examples_06-distributed-post_01-distributed_workflows_on_remote.py:
+
+
+.. _ref_distributed_workflows_on_remote:
+
+Create custom workflow on distributed processes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+This example shows how distributed files can be read and post processed
+on distributed processes. After remote post processing,
+results are merged on the local process. In this example, different operator
+sequences are directly created on different servers. These operators are then
+connected together without having to care that they are on remote processes.
+
+.. image:: 01-operator-dep.svg
+ :align: center
+ :width: 400
+
+.. GENERATED FROM PYTHON SOURCE LINES 17-18
+
+Import dpf module and its examples files
+
+.. GENERATED FROM PYTHON SOURCE LINES 18-23
+
+.. code-block:: default
+
+
+ from ansys.dpf import core as dpf
+ from ansys.dpf.core import examples
+ from ansys.dpf.core import operators as ops
+
+
+
+
+
+
+
+
+.. GENERATED FROM PYTHON SOURCE LINES 24-28
+
+Configure the servers
+~~~~~~~~~~~~~~~~~~~~~~
+To make this example easier, we will start local servers here,
+but we could get connected to any existing servers on the network.
+
+.. GENERATED FROM PYTHON SOURCE LINES 28-31
+
+.. code-block:: default
+
+
+ remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
+
+
+
+
+
+
+
+
+.. GENERATED FROM PYTHON SOURCE LINES 32-34
+
+Here we show how we could send files in temporary directory if we were not
+in shared memory
+
+.. GENERATED FROM PYTHON SOURCE LINES 34-39
+
+.. code-block:: default
+
+
+ files = examples.download_distributed_files()
+ server_file_paths = [dpf.upload_file_in_tmp_folder(files[0], server=remote_servers[0]),
+ dpf.upload_file_in_tmp_folder(files[1], server=remote_servers[1])]
+
+
+
+
+
+
+
+
+.. GENERATED FROM PYTHON SOURCE LINES 40-41
+
+First operator chain.
+
+.. GENERATED FROM PYTHON SOURCE LINES 41-49
+
+.. code-block:: default
+
+
+ remote_operators = []
+
+ stress1 = ops.result.stress(server=remote_servers[0])
+ remote_operators.append(stress1)
+ ds = dpf.DataSources(server_file_paths[0], server=remote_servers[0])
+ stress1.inputs.data_sources(ds)
+
+
+
+
+
+
+
+
+.. GENERATED FROM PYTHON SOURCE LINES 50-51
+
+Second operator chain.
+
+.. GENERATED FROM PYTHON SOURCE LINES 51-58
+
+.. code-block:: default
+
+
+ stress2 = ops.result.stress(server=remote_servers[1])
+ mul = stress2 * 2.0
+ remote_operators.append(mul)
+ ds = dpf.DataSources(server_file_paths[1], server=remote_servers[1])
+ stress2.inputs.data_sources(ds)
+
+
+
+
+
+
+
+
+.. GENERATED FROM PYTHON SOURCE LINES 59-60
+
+Local merge operator.
+
+.. GENERATED FROM PYTHON SOURCE LINES 60-63
+
+.. code-block:: default
+
+
+ merge = ops.utility.merge_fields_containers()
+
+
+
+
+
+
+
+
+.. GENERATED FROM PYTHON SOURCE LINES 64-66
+
+Connect the operator chains together and get the output
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. GENERATED FROM PYTHON SOURCE LINES 66-75
+
+.. code-block:: default
+
+
+ nodal = ops.averaging.to_nodal_fc(merge)
+
+ merge.connect(0, remote_operators[0], 0)
+ merge.connect(1, remote_operators[1], 0)
+
+ fc = nodal.get_output(0, dpf.types.fields_container)
+ print(fc[0])
+ fc[0].meshed_region.plot(fc[0])
+
+
+
+.. image-sg:: /examples/06-distributed-post/images/sphx_glr_01-distributed_workflows_on_remote_001.png
+ :alt: 01 distributed workflows on remote
+ :srcset: /examples/06-distributed-post/images/sphx_glr_01-distributed_workflows_on_remote_001.png
+ :class: sphx-glr-single-img
+
+
+.. rst-class:: sphx-glr-script-out
+
+ Out:
+
+ .. code-block:: none
+
+ DPF Field
+ Location: Nodal
+ Unit: Pa
+ 432 entities
+ Data:6 components and 432 elementary data
+
+
+
+
+
+
+.. rst-class:: sphx-glr-timing
+
+ **Total running time of the script:** ( 0 minutes 5.209 seconds)
+
+
+.. _sphx_glr_download_examples_06-distributed-post_01-distributed_workflows_on_remote.py:
+
+
+.. only :: html
+
+ .. container:: sphx-glr-footer
+ :class: sphx-glr-footer-example
+
+
+
+ .. container:: sphx-glr-download sphx-glr-download-python
+
+ :download:`Download Python source code: 01-distributed_workflows_on_remote.py <01-distributed_workflows_on_remote.py>`
+
+
+
+ .. container:: sphx-glr-download sphx-glr-download-jupyter
+
+ :download:`Download Jupyter notebook: 01-distributed_workflows_on_remote.ipynb <01-distributed_workflows_on_remote.ipynb>`
+
+
+.. only:: html
+
+ .. rst-class:: sphx-glr-signature
+
+ `Gallery generated by Sphinx-Gallery `_
diff --git a/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote_codeobj.pickle b/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote_codeobj.pickle
new file mode 100644
index 00000000000..da8f5ceadad
Binary files /dev/null and b/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote_codeobj.pickle differ
diff --git a/docs/source/examples/06-distributed-post/01-operator-dep.dot b/docs/source/examples/06-distributed-post/01-operator-dep.dot
new file mode 100644
index 00000000000..c39ea927ffd
--- /dev/null
+++ b/docs/source/examples/06-distributed-post/01-operator-dep.dot
@@ -0,0 +1,30 @@
+digraph foo {
+ graph [pad="0", nodesep="0.3", ranksep="0.3"]
+ node [shape=box, style=filled, fillcolor="#ffcc00", margin="0"];
+ rankdir=LR;
+ splines=line;
+
+ subgraph cluster_1 {
+ ds01 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
+
+ ds01 -> stress1 [style=dashed];
+
+ label="Server 1";
+ style=filled;
+ fillcolor=lightgrey;
+ }
+
+ subgraph cluster_2 {
+ ds02 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
+
+ ds02 -> stress2 [style=dashed];
+ stress2 -> mul;
+
+ label="Server 2";
+ style=filled;
+ fillcolor=lightgrey;
+ }
+
+ stress1 -> "merge";
+ mul -> "merge";
+}
diff --git a/docs/source/examples/06-distributed-post/01-operator-dep.svg b/docs/source/examples/06-distributed-post/01-operator-dep.svg
new file mode 100644
index 00000000000..c371cd09e9c
--- /dev/null
+++ b/docs/source/examples/06-distributed-post/01-operator-dep.svg
@@ -0,0 +1,89 @@
+
+
+
+
+
diff --git a/docs/source/examples/06-distributed-post/02-distributed-msup_expansion.ipynb b/docs/source/examples/06-distributed-post/02-distributed-msup_expansion.ipynb
new file mode 100644
index 00000000000..44d3f5d1517
--- /dev/null
+++ b/docs/source/examples/06-distributed-post/02-distributed-msup_expansion.ipynb
@@ -0,0 +1,169 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "%matplotlib inline"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Distributed modal superposition {#ref_distributed_msup}\r\n\r\nThis example shows how distributed files can be read and expanded on\r\ndistributed processes. The modal basis (2 distributed files) is read on\r\n2 remote servers and the modal response reading and the expansion is\r\ndone on a third server.\r\n\r\nTo help understand this example the following diagram is provided. It\r\nshows the operator chain used to compute the final result.\r\n\r\n{.align-center width=\"800px\"}\r\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Import dpf module and its examples files.\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "from ansys.dpf import core as dpf\nfrom ansys.dpf.core import examples\nfrom ansys.dpf.core import operators as ops"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Configure the servers\r\n\r\nMake a list of ip addresses and port numbers on which dpf servers are\r\nstarted. Operator instances will be created on each of those servers to\r\naddress each a different result file. In this example, we will post\r\nprocess an analysis distributed in 2 files, we will consequently require\r\n2 remote processes. To make this example easier, we will start local\r\nservers here, but we could get connected to any existing servers on the\r\nnetwork.\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]\nips = [remote_server.ip for remote_server in remote_servers]\nports = [remote_server.port for remote_server in remote_servers]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Print the ips and ports.\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "print(\"ips:\", ips)\nprint(\"ports:\", ports)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Choose the file path.\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "base_path = examples.distributed_msup_folder\nfiles = [base_path + r'/file0.mode', base_path + r'/file1.mode']\nfiles_aux = [base_path + r'/file0.rst', base_path + r'/file1.rst']"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Create the operators on the servers\r\n\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~ On each server we\r\ncreate two new operators, one for \\'displacement\\' computations and a\r\n\\'mesh_provider\\' operator and then define their data sources. The\r\ndisplacement and mesh_provider operators receive data from their\r\nrespective data files on each server.\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "remote_displacement_operators = []\nremote_mesh_operators = []\nfor i, server in enumerate(remote_servers):\n displacement = ops.result.displacement(server=server)\n mesh = ops.mesh.mesh_provider(server=server)\n remote_displacement_operators.append(displacement)\n remote_mesh_operators.append(mesh)\n ds = dpf.DataSources(files[i], server=server)\n ds.add_file_path(files_aux[i])\n displacement.inputs.data_sources(ds)\n mesh.inputs.data_sources(ds)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Create a local operators chain for expansion\r\n\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\r\nIn the following series of operators we merge the modal basis, the\r\nmeshes, read the modal response and expand the modal response with the\r\nmodal basis.\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "merge_fields = ops.utility.merge_fields_containers()\nmerge_mesh = ops.utility.merge_meshes()\n\nds = dpf.DataSources(base_path + r'/file_load_1.rfrq')\nresponse = ops.result.displacement(data_sources=ds)\nresponse.inputs.mesh(merge_mesh.outputs.merges_mesh)\n\nexpansion = ops.math.modal_superposition(\n solution_in_modal_space=response,\n modal_basis=merge_fields\n)\ncomponent = ops.logic.component_selector_fc(expansion, 1)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Connect the operator chains together and get the output\r\n\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "for i, server in enumerate(remote_servers):\n merge_fields.connect(i, remote_displacement_operators[i], 0)\n merge_mesh.connect(i, remote_mesh_operators[i], 0)\n\nfc = component.get_output(0, dpf.types.fields_container)\nmerged_mesh = merge_mesh.get_output(0, dpf.types.meshed_region)\n\nmerged_mesh.plot(fc.get_field_by_time_complex_ids(1, 0))\nmerged_mesh.plot(fc.get_field_by_time_complex_ids(10, 0))\nprint(fc)"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.9.0"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
\ No newline at end of file
diff --git a/docs/source/examples/06-distributed-post/02-distributed-msup_expansion.py b/docs/source/examples/06-distributed-post/02-distributed-msup_expansion.py
new file mode 100644
index 00000000000..d3f1cba0c90
--- /dev/null
+++ b/docs/source/examples/06-distributed-post/02-distributed-msup_expansion.py
@@ -0,0 +1,102 @@
+"""
+.. _ref_distributed_msup:
+
+Distributed modal superposition
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+This example shows how distributed files can be read and expanded
+on distributed processes. The modal basis (2 distributed files) is read
+on 2 remote servers and the modal response reading and the expansion is
+done on a third server.
+
+To help understand this example the following diagram is provided. It shows
+the operator chain used to compute the final result.
+
+.. image:: 02-operator-dep.svg
+ :align: center
+ :width: 800
+"""
+
+###############################################################################
+# Import dpf module and its examples files.
+
+from ansys.dpf import core as dpf
+from ansys.dpf.core import examples
+from ansys.dpf.core import operators as ops
+
+###############################################################################
+# Configure the servers
+# ~~~~~~~~~~~~~~~~~~~~~~
+# Make a list of ip addresses and port numbers on which dpf servers are
+# started. Operator instances will be created on each of those servers to
+# address each a different result file.
+# In this example, we will post process an analysis distributed in 2 files,
+# we will consequently require 2 remote processes.
+# To make this example easier, we will start local servers here,
+# but we could get connected to any existing servers on the network.
+
+remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
+ips = [remote_server.ip for remote_server in remote_servers]
+ports = [remote_server.port for remote_server in remote_servers]
+
+###############################################################################
+# Print the ips and ports.
+print("ips:", ips)
+print("ports:", ports)
+
+###############################################################################
+# Choose the file path.
+
+base_path = examples.distributed_msup_folder
+files = [base_path + r'/file0.mode', base_path + r'/file1.mode']
+files_aux = [base_path + r'/file0.rst', base_path + r'/file1.rst']
+
+###############################################################################
+# Create the operators on the servers
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~
+# On each server we create two new operators, one for 'displacement' computations
+# and a 'mesh_provider' operator and then define their data sources. The displacement
+# and mesh_provider operators receive data from their respective data files on each server.
+remote_displacement_operators = []
+remote_mesh_operators = []
+for i, server in enumerate(remote_servers):
+ displacement = ops.result.displacement(server=server)
+ mesh = ops.mesh.mesh_provider(server=server)
+ remote_displacement_operators.append(displacement)
+ remote_mesh_operators.append(mesh)
+ ds = dpf.DataSources(files[i], server=server)
+ ds.add_file_path(files_aux[i])
+ displacement.inputs.data_sources(ds)
+ mesh.inputs.data_sources(ds)
+
+###############################################################################
+# Create a local operators chain for expansion
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# In the following series of operators we merge the modal basis, the meshes, read
+# the modal response and expand the modal response with the modal basis.
+
+merge_fields = ops.utility.merge_fields_containers()
+merge_mesh = ops.utility.merge_meshes()
+
+ds = dpf.DataSources(base_path + r'/file_load_1.rfrq')
+response = ops.result.displacement(data_sources=ds)
+response.inputs.mesh(merge_mesh.outputs.merges_mesh)
+
+expansion = ops.math.modal_superposition(
+ solution_in_modal_space=response,
+ modal_basis=merge_fields
+)
+component = ops.logic.component_selector_fc(expansion, 1)
+
+###############################################################################
+# Connect the operator chains together and get the output
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+for i, server in enumerate(remote_servers):
+ merge_fields.connect(i, remote_displacement_operators[i], 0)
+ merge_mesh.connect(i, remote_mesh_operators[i], 0)
+
+fc = component.get_output(0, dpf.types.fields_container)
+merged_mesh = merge_mesh.get_output(0, dpf.types.meshed_region)
+
+merged_mesh.plot(fc.get_field_by_time_complex_ids(1, 0))
+merged_mesh.plot(fc.get_field_by_time_complex_ids(10, 0))
+print(fc)
diff --git a/docs/source/examples/06-distributed-post/02-distributed-msup_expansion.py.md5 b/docs/source/examples/06-distributed-post/02-distributed-msup_expansion.py.md5
new file mode 100644
index 00000000000..248dc6e392b
--- /dev/null
+++ b/docs/source/examples/06-distributed-post/02-distributed-msup_expansion.py.md5
@@ -0,0 +1 @@
+96aa330285750b41dfed6e28d1fa1f5b
\ No newline at end of file
diff --git a/docs/source/examples/06-distributed-post/03-distributed-msup_expansion.rst b/docs/source/examples/06-distributed-post/02-distributed-msup_expansion.rst
similarity index 56%
rename from docs/source/examples/06-distributed-post/03-distributed-msup_expansion.rst
rename to docs/source/examples/06-distributed-post/02-distributed-msup_expansion.rst
index 7704e4f150e..e0db7eae83f 100644
--- a/docs/source/examples/06-distributed-post/03-distributed-msup_expansion.rst
+++ b/docs/source/examples/06-distributed-post/02-distributed-msup_expansion.rst
@@ -2,7 +2,7 @@
.. DO NOT EDIT.
.. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY.
.. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE:
-.. "examples\06-distributed-post\03-distributed-msup_expansion.py"
+.. "examples\06-distributed-post\02-distributed-msup_expansion.py"
.. LINE NUMBERS ARE GIVEN BELOW.
.. only:: html
@@ -10,12 +10,12 @@
.. note::
:class: sphx-glr-download-link-note
- Click :ref:`here `
+ Click :ref:`here `
to download the full example code
.. rst-class:: sphx-glr-example-title
-.. _sphx_glr_examples_06-distributed-post_03-distributed-msup_expansion.py:
+.. _sphx_glr_examples_06-distributed-post_02-distributed-msup_expansion.py:
.. _ref_distributed_msup:
@@ -27,11 +27,18 @@ on distributed processes. The modal basis (2 distributed files) is read
on 2 remote servers and the modal response reading and the expansion is
done on a third server.
-.. GENERATED FROM PYTHON SOURCE LINES 13-14
+To help understand this example the following diagram is provided. It shows
+the operator chain used to compute the final result.
-Import dpf module and its examples files
+.. image:: 02-operator-dep.svg
+ :align: center
+ :width: 800
-.. GENERATED FROM PYTHON SOURCE LINES 14-19
+.. GENERATED FROM PYTHON SOURCE LINES 20-21
+
+Import dpf module and its examples files.
+
+.. GENERATED FROM PYTHON SOURCE LINES 21-26
.. code-block:: default
@@ -47,63 +54,19 @@ Import dpf module and its examples files
-.. GENERATED FROM PYTHON SOURCE LINES 20-23
-
-Create the template workflow
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-this workflow will provide the modal basis and the mesh for each domain
-
-.. GENERATED FROM PYTHON SOURCE LINES 23-28
-
-.. code-block:: default
-
-
- template_workflow = dpf.Workflow()
- displacement = ops.result.displacement()
- mesh = ops.mesh.mesh_provider()
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 29-31
-
-Add the operators to the template workflow and name its inputs and outputs
-Once workflow's inputs and outputs are named, they can be connected later on
-
-.. GENERATED FROM PYTHON SOURCE LINES 31-37
-
-.. code-block:: default
-
- template_workflow.add_operators([displacement])
- template_workflow.set_input_name("data_sources", displacement.inputs.data_sources)
- template_workflow.set_input_name("data_sources", mesh.inputs.data_sources)
- template_workflow.set_output_name("out", displacement.outputs.fields_container)
- template_workflow.set_output_name("outmesh", mesh.outputs.mesh)
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 38-47
+.. GENERATED FROM PYTHON SOURCE LINES 27-36
Configure the servers
~~~~~~~~~~~~~~~~~~~~~~
-Make a list of ip addresses an port numbers on which dpf servers are
-started. Workflows instances will be created on each of those servers to
+Make a list of ip addresses and port numbers on which dpf servers are
+started. Operator instances will be created on each of those servers to
address each a different result file.
In this example, we will post process an analysis distributed in 2 files,
-we will consequently require 2 remote processes
+we will consequently require 2 remote processes.
To make this example easier, we will start local servers here,
but we could get connected to any existing servers on the network.
-.. GENERATED FROM PYTHON SOURCE LINES 47-52
+.. GENERATED FROM PYTHON SOURCE LINES 36-41
.. code-block:: default
@@ -119,11 +82,11 @@ but we could get connected to any existing servers on the network.
-.. GENERATED FROM PYTHON SOURCE LINES 53-54
+.. GENERATED FROM PYTHON SOURCE LINES 42-43
-Print the ips and ports
+Print the ips and ports.
-.. GENERATED FROM PYTHON SOURCE LINES 54-57
+.. GENERATED FROM PYTHON SOURCE LINES 43-46
.. code-block:: default
@@ -141,16 +104,16 @@ Print the ips and ports
.. code-block:: none
ips: ['127.0.0.1', '127.0.0.1']
- ports: [50058, 50059]
+ ports: [50054, 50055]
-.. GENERATED FROM PYTHON SOURCE LINES 58-59
+.. GENERATED FROM PYTHON SOURCE LINES 47-48
-Choose the file path
+Choose the file path.
-.. GENERATED FROM PYTHON SOURCE LINES 59-64
+.. GENERATED FROM PYTHON SOURCE LINES 48-53
.. code-block:: default
@@ -166,23 +129,29 @@ Choose the file path
-.. GENERATED FROM PYTHON SOURCE LINES 65-69
+.. GENERATED FROM PYTHON SOURCE LINES 54-59
-Send workflows on servers
+Create the operators on the servers
~~~~~~~~~~~~~~~~~~~~~~~~~~
-Here we create new instances on the server by copies of the template workflow
-We also connect the data sources to those workflows
+On each server we create two new operators, one for 'displacement' computations
+and a 'mesh_provider' operator and then define their data sources. The displacement
+and mesh_provider operators receive data from their respective data files on each server.
-.. GENERATED FROM PYTHON SOURCE LINES 69-76
+.. GENERATED FROM PYTHON SOURCE LINES 59-71
.. code-block:: default
- remote_workflows = []
+ remote_displacement_operators = []
+ remote_mesh_operators = []
for i, server in enumerate(remote_servers):
- remote_workflows.append(template_workflow.create_on_other_server(server))
- ds = dpf.DataSources(files[i])
+ displacement = ops.result.displacement(server=server)
+ mesh = ops.mesh.mesh_provider(server=server)
+ remote_displacement_operators.append(displacement)
+ remote_mesh_operators.append(mesh)
+ ds = dpf.DataSources(files[i], server=server)
ds.add_file_path(files_aux[i])
- remote_workflows[i].connect("data_sources", ds)
+ displacement.inputs.data_sources(ds)
+ mesh.inputs.data_sources(ds)
@@ -191,37 +160,31 @@ We also connect the data sources to those workflows
-.. GENERATED FROM PYTHON SOURCE LINES 77-81
+.. GENERATED FROM PYTHON SOURCE LINES 72-76
-Create a local workflow for expansion
+Create a local operators chain for expansion
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-In this workflow we merge the modal basis, the meshes, read the modal response
-and expand the modal response with the modal basis
+In the following series of operators we merge the modal basis, the meshes, read
+the modal response and expand the modal response with the modal basis.
-.. GENERATED FROM PYTHON SOURCE LINES 81-102
+.. GENERATED FROM PYTHON SOURCE LINES 76-90
.. code-block:: default
- local_workflow = dpf.Workflow()
- merge = ops.utility.merge_fields_containers()
+ merge_fields = ops.utility.merge_fields_containers()
merge_mesh = ops.utility.merge_meshes()
ds = dpf.DataSources(base_path + r'/file_load_1.rfrq')
response = ops.result.displacement(data_sources=ds)
response.inputs.mesh(merge_mesh.outputs.merges_mesh)
- expansion = ops.math.modal_superposition(solution_in_modal_space=response, modal_basis=merge)
+ expansion = ops.math.modal_superposition(
+ solution_in_modal_space=response,
+ modal_basis=merge_fields
+ )
component = ops.logic.component_selector_fc(expansion, 1)
- local_workflow.add_operators([merge, response, expansion, merge_mesh, component])
- local_workflow.set_input_name("in0", merge, 0)
- local_workflow.set_input_name("in1", merge, 1)
- local_workflow.set_input_name("inmesh0", merge_mesh, 0)
- local_workflow.set_input_name("inmesh1", merge_mesh, 1)
-
- local_workflow.set_output_name("expanded", component.outputs.fields_container)
- local_workflow.set_output_name("mesh", merge_mesh.outputs.merges_mesh)
@@ -229,29 +192,26 @@ and expand the modal response with the modal basis
+.. GENERATED FROM PYTHON SOURCE LINES 91-93
-.. GENERATED FROM PYTHON SOURCE LINES 103-105
-
-Connect the workflows together and get the output
+Connect the operator chains together and get the output
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. GENERATED FROM PYTHON SOURCE LINES 105-117
+.. GENERATED FROM PYTHON SOURCE LINES 93-103
.. code-block:: default
-
for i, server in enumerate(remote_servers):
- local_workflow.connect_with(remote_workflows[i],
- {"out": "in" + str(i), "outmesh": "inmesh" + str(i)})
+ merge_fields.connect(i, remote_displacement_operators[i], 0)
+ merge_mesh.connect(i, remote_mesh_operators[i], 0)
+
+ fc = component.get_output(0, dpf.types.fields_container)
+ merged_mesh = merge_mesh.get_output(0, dpf.types.meshed_region)
- fc = local_workflow.get_output("expanded", dpf.types.fields_container)
- merged_mesh = local_workflow.get_output("mesh", dpf.types.meshed_region)
merged_mesh.plot(fc.get_field_by_time_complex_ids(1, 0))
merged_mesh.plot(fc.get_field_by_time_complex_ids(10, 0))
print(fc)
- dpf.server.shutdown_all_session_servers()
-
.. rst-class:: sphx-glr-horizontal
@@ -259,16 +219,16 @@ Connect the workflows together and get the output
*
- .. image-sg:: /examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_001.png
- :alt: 03 distributed msup expansion
- :srcset: /examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_001.png
+ .. image-sg:: /examples/06-distributed-post/images/sphx_glr_02-distributed-msup_expansion_001.png
+ :alt: 02 distributed msup expansion
+ :srcset: /examples/06-distributed-post/images/sphx_glr_02-distributed-msup_expansion_001.png
:class: sphx-glr-multi-img
*
- .. image-sg:: /examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_002.png
- :alt: 03 distributed msup expansion
- :srcset: /examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_002.png
+ .. image-sg:: /examples/06-distributed-post/images/sphx_glr_02-distributed-msup_expansion_002.png
+ :alt: 02 distributed msup expansion
+ :srcset: /examples/06-distributed-post/images/sphx_glr_02-distributed-msup_expansion_002.png
:class: sphx-glr-multi-img
@@ -311,10 +271,10 @@ Connect the workflows together and get the output
.. rst-class:: sphx-glr-timing
- **Total running time of the script:** ( 0 minutes 8.612 seconds)
+ **Total running time of the script:** ( 0 minutes 5.601 seconds)
-.. _sphx_glr_download_examples_06-distributed-post_03-distributed-msup_expansion.py:
+.. _sphx_glr_download_examples_06-distributed-post_02-distributed-msup_expansion.py:
.. only :: html
@@ -326,13 +286,13 @@ Connect the workflows together and get the output
.. container:: sphx-glr-download sphx-glr-download-python
- :download:`Download Python source code: 03-distributed-msup_expansion.py <03-distributed-msup_expansion.py>`
+ :download:`Download Python source code: 02-distributed-msup_expansion.py <02-distributed-msup_expansion.py>`
.. container:: sphx-glr-download sphx-glr-download-jupyter
- :download:`Download Jupyter notebook: 03-distributed-msup_expansion.ipynb <03-distributed-msup_expansion.ipynb>`
+ :download:`Download Jupyter notebook: 02-distributed-msup_expansion.ipynb <02-distributed-msup_expansion.ipynb>`
.. only:: html
diff --git a/docs/source/examples/06-distributed-post/02-distributed-msup_expansion_codeobj.pickle b/docs/source/examples/06-distributed-post/02-distributed-msup_expansion_codeobj.pickle
new file mode 100644
index 00000000000..c2bd02b653d
Binary files /dev/null and b/docs/source/examples/06-distributed-post/02-distributed-msup_expansion_codeobj.pickle differ
diff --git a/docs/source/examples/06-distributed-post/02-distributed_workflows_on_remote.ipynb b/docs/source/examples/06-distributed-post/02-distributed_workflows_on_remote.ipynb
deleted file mode 100644
index b552502009f..00000000000
--- a/docs/source/examples/06-distributed-post/02-distributed_workflows_on_remote.ipynb
+++ /dev/null
@@ -1,169 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "%matplotlib inline"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "\n\n# Connect workflows on different processes implicitly\nThis example shows how distributed files can be read and post processed\non distributed processes. After remote post processing,\nresults a merged on the local process. In this example, different workflows are\ndirectly created on different servers. Those workflows are then connected\ntogether without having to care that they are on remote processes.\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Import dpf module and its examples files\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "from ansys.dpf import core as dpf\nfrom ansys.dpf.core import examples\nfrom ansys.dpf.core import operators as ops"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Configure the servers\nTo make this example easier, we will start local servers here,\nbut we could get connected to any existing servers on the network.\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Create template workflows on remote servers\nFor the purpose of this example, we will create 2 workflows computing\nelemental nodal stresses on different servers. The second workflow will\nmultiply by 2.0 the stresses. A last workflow will merge the outputs\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "files = examples.download_distributed_files()"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "first workflow S\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "workflow1 = dpf.Workflow(server=remote_servers[0])\nmodel = dpf.Model(files[0], server=remote_servers[0])\nstress1 = model.results.stress()\nworkflow1.add_operator(stress1)\nworkflow1.set_output_name(\"out1\", stress1.outputs.fields_container)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "second workflow S*2.0\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "workflow2 = dpf.Workflow(server=remote_servers[1])\nmodel = dpf.Model(files[1], server=remote_servers[1])\nstress2 = model.results.stress()\nmul = stress2 * 2.0\nworkflow2.add_operator(mul)\nworkflow2.set_output_name(\"out2\", mul.outputs.fields_container)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "third workflow merge\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "local_workflow = dpf.Workflow()\nmerge = ops.utility.merge_fields_containers()\nnodal = ops.averaging.to_nodal_fc(merge)\nlocal_workflow.add_operators([merge, nodal])\nlocal_workflow.set_input_name(\"in1\", merge, 0)\nlocal_workflow.set_input_name(\"in2\", merge, 1)\nlocal_workflow.set_output_name(\"merged\", nodal.outputs.fields_container)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Connect the workflows together and get the output\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "local_workflow.connect_with(workflow1, (\"out1\", \"in1\"))\nlocal_workflow.connect_with(workflow2, (\"out2\", \"in2\"))\n\nfc = local_workflow.get_output(\"merged\", dpf.types.fields_container)\nfc[0].meshed_region.plot(fc[0])"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.8.5"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
\ No newline at end of file
diff --git a/docs/source/examples/06-distributed-post/02-distributed_workflows_on_remote.py b/docs/source/examples/06-distributed-post/02-distributed_workflows_on_remote.py
deleted file mode 100644
index f246f083d94..00000000000
--- a/docs/source/examples/06-distributed-post/02-distributed_workflows_on_remote.py
+++ /dev/null
@@ -1,71 +0,0 @@
-"""
-.. _ref_distributed_workflows_on_remote:
-
-Connect workflows on different processes implicitly
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This example shows how distributed files can be read and post processed
-on distributed processes. After remote post processing,
-results a merged on the local process. In this example, different workflows are
-directly created on different servers. Those workflows are then connected
-together without having to care that they are on remote processes.
-
-"""
-###############################################################################
-# Import dpf module and its examples files
-
-from ansys.dpf import core as dpf
-from ansys.dpf.core import examples
-from ansys.dpf.core import operators as ops
-
-###############################################################################
-# Configure the servers
-# ~~~~~~~~~~~~~~~~~~~~~~
-# To make this example easier, we will start local servers here,
-# but we could get connected to any existing servers on the network.
-
-remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
-
-###############################################################################
-# Create template workflows on remote servers
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# For the purpose of this example, we will create 2 workflows computing
-# elemental nodal stresses on different servers. The second workflow will
-# multiply by 2.0 the stresses. A last workflow will merge the outputs
-
-files = examples.download_distributed_files()
-
-###############################################################################
-# first workflow S
-workflow1 = dpf.Workflow(server=remote_servers[0])
-model = dpf.Model(files[0], server=remote_servers[0])
-stress1 = model.results.stress()
-workflow1.add_operator(stress1)
-workflow1.set_output_name("out1", stress1.outputs.fields_container)
-
-###############################################################################
-# second workflow S*2.0
-workflow2 = dpf.Workflow(server=remote_servers[1])
-model = dpf.Model(files[1], server=remote_servers[1])
-stress2 = model.results.stress()
-mul = stress2 * 2.0
-workflow2.add_operator(mul)
-workflow2.set_output_name("out2", mul.outputs.fields_container)
-
-###############################################################################
-# third workflow merge
-local_workflow = dpf.Workflow()
-merge = ops.utility.merge_fields_containers()
-nodal = ops.averaging.to_nodal_fc(merge)
-local_workflow.add_operators([merge, nodal])
-local_workflow.set_input_name("in1", merge, 0)
-local_workflow.set_input_name("in2", merge, 1)
-local_workflow.set_output_name("merged", nodal.outputs.fields_container)
-
-###############################################################################
-# Connect the workflows together and get the output
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-local_workflow.connect_with(workflow1, ("out1", "in1"))
-local_workflow.connect_with(workflow2, ("out2", "in2"))
-
-fc = local_workflow.get_output("merged", dpf.types.fields_container)
-fc[0].meshed_region.plot(fc[0])
diff --git a/docs/source/examples/06-distributed-post/02-distributed_workflows_on_remote.py.md5 b/docs/source/examples/06-distributed-post/02-distributed_workflows_on_remote.py.md5
deleted file mode 100644
index 5ff689e1757..00000000000
--- a/docs/source/examples/06-distributed-post/02-distributed_workflows_on_remote.py.md5
+++ /dev/null
@@ -1 +0,0 @@
-7c3207c194601b7b5ac43e2a9fea6ca6
\ No newline at end of file
diff --git a/docs/source/examples/06-distributed-post/02-distributed_workflows_on_remote.rst b/docs/source/examples/06-distributed-post/02-distributed_workflows_on_remote.rst
deleted file mode 100644
index f379a1fda9e..00000000000
--- a/docs/source/examples/06-distributed-post/02-distributed_workflows_on_remote.rst
+++ /dev/null
@@ -1,217 +0,0 @@
-
-.. DO NOT EDIT.
-.. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY.
-.. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE:
-.. "examples\06-distributed-post\02-distributed_workflows_on_remote.py"
-.. LINE NUMBERS ARE GIVEN BELOW.
-
-.. only:: html
-
- .. note::
- :class: sphx-glr-download-link-note
-
- Click :ref:`here `
- to download the full example code
-
-.. rst-class:: sphx-glr-example-title
-
-.. _sphx_glr_examples_06-distributed-post_02-distributed_workflows_on_remote.py:
-
-
-.. _ref_distributed_workflows_on_remote:
-
-Connect workflows on different processes implicitly
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This example shows how distributed files can be read and post processed
-on distributed processes. After remote post processing,
-results a merged on the local process. In this example, different workflows are
-directly created on different servers. Those workflows are then connected
-together without having to care that they are on remote processes.
-
-.. GENERATED FROM PYTHON SOURCE LINES 14-15
-
-Import dpf module and its examples files
-
-.. GENERATED FROM PYTHON SOURCE LINES 15-20
-
-.. code-block:: default
-
-
- from ansys.dpf import core as dpf
- from ansys.dpf.core import examples
- from ansys.dpf.core import operators as ops
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 21-25
-
-Configure the servers
-~~~~~~~~~~~~~~~~~~~~~~
-To make this example easier, we will start local servers here,
-but we could get connected to any existing servers on the network.
-
-.. GENERATED FROM PYTHON SOURCE LINES 25-28
-
-.. code-block:: default
-
-
- remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 29-34
-
-Create template workflows on remote servers
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-For the purpose of this example, we will create 2 workflows computing
-elemental nodal stresses on different servers. The second workflow will
-multiply by 2.0 the stresses. A last workflow will merge the outputs
-
-.. GENERATED FROM PYTHON SOURCE LINES 34-37
-
-.. code-block:: default
-
-
- files = examples.download_distributed_files()
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 38-39
-
-first workflow S
-
-.. GENERATED FROM PYTHON SOURCE LINES 39-45
-
-.. code-block:: default
-
- workflow1 = dpf.Workflow(server=remote_servers[0])
- model = dpf.Model(files[0], server=remote_servers[0])
- stress1 = model.results.stress()
- workflow1.add_operator(stress1)
- workflow1.set_output_name("out1", stress1.outputs.fields_container)
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 46-47
-
-second workflow S*2.0
-
-.. GENERATED FROM PYTHON SOURCE LINES 47-54
-
-.. code-block:: default
-
- workflow2 = dpf.Workflow(server=remote_servers[1])
- model = dpf.Model(files[1], server=remote_servers[1])
- stress2 = model.results.stress()
- mul = stress2 * 2.0
- workflow2.add_operator(mul)
- workflow2.set_output_name("out2", mul.outputs.fields_container)
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 55-56
-
-third workflow merge
-
-.. GENERATED FROM PYTHON SOURCE LINES 56-64
-
-.. code-block:: default
-
- local_workflow = dpf.Workflow()
- merge = ops.utility.merge_fields_containers()
- nodal = ops.averaging.to_nodal_fc(merge)
- local_workflow.add_operators([merge, nodal])
- local_workflow.set_input_name("in1", merge, 0)
- local_workflow.set_input_name("in2", merge, 1)
- local_workflow.set_output_name("merged", nodal.outputs.fields_container)
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 65-67
-
-Connect the workflows together and get the output
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. GENERATED FROM PYTHON SOURCE LINES 67-72
-
-.. code-block:: default
-
- local_workflow.connect_with(workflow1, ("out1", "in1"))
- local_workflow.connect_with(workflow2, ("out2", "in2"))
-
- fc = local_workflow.get_output("merged", dpf.types.fields_container)
- fc[0].meshed_region.plot(fc[0])
-
-
-
-.. image-sg:: /examples/06-distributed-post/images/sphx_glr_02-distributed_workflows_on_remote_001.png
- :alt: 02 distributed workflows on remote
- :srcset: /examples/06-distributed-post/images/sphx_glr_02-distributed_workflows_on_remote_001.png
- :class: sphx-glr-single-img
-
-
-
-
-
-
-.. rst-class:: sphx-glr-timing
-
- **Total running time of the script:** ( 0 minutes 2.297 seconds)
-
-
-.. _sphx_glr_download_examples_06-distributed-post_02-distributed_workflows_on_remote.py:
-
-
-.. only :: html
-
- .. container:: sphx-glr-footer
- :class: sphx-glr-footer-example
-
-
-
- .. container:: sphx-glr-download sphx-glr-download-python
-
- :download:`Download Python source code: 02-distributed_workflows_on_remote.py <02-distributed_workflows_on_remote.py>`
-
-
-
- .. container:: sphx-glr-download sphx-glr-download-jupyter
-
- :download:`Download Jupyter notebook: 02-distributed_workflows_on_remote.ipynb <02-distributed_workflows_on_remote.ipynb>`
-
-
-.. only:: html
-
- .. rst-class:: sphx-glr-signature
-
- `Gallery generated by Sphinx-Gallery `_
diff --git a/docs/source/examples/06-distributed-post/02-distributed_workflows_on_remote_codeobj.pickle b/docs/source/examples/06-distributed-post/02-distributed_workflows_on_remote_codeobj.pickle
deleted file mode 100644
index 64a28ca6108..00000000000
Binary files a/docs/source/examples/06-distributed-post/02-distributed_workflows_on_remote_codeobj.pickle and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/02-operator-dep.dot b/docs/source/examples/06-distributed-post/02-operator-dep.dot
new file mode 100644
index 00000000000..9f0ae38443b
--- /dev/null
+++ b/docs/source/examples/06-distributed-post/02-operator-dep.dot
@@ -0,0 +1,51 @@
+digraph foo {
+ graph [pad="0", nodesep="0.3", ranksep="0.3"]
+ node [shape=box, style=filled, fillcolor="#ffcc00", margin="0"];
+ rankdir=LR;
+ splines=line;
+
+ disp01 [label="displacement"];
+ disp02 [label="displacement"];
+ mesh01 [label="mesh"];
+ mesh02 [label="mesh"];
+
+ subgraph cluster_1 {
+ ds01 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
+
+ disp01; mesh01;
+
+ ds01 -> disp01 [style=dashed];
+ ds01 -> mesh01 [style=dashed];
+
+ label="Server 1";
+ style=filled;
+ fillcolor=lightgrey;
+ }
+
+ subgraph cluster_2 {
+ ds02 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
+
+
+ disp02; mesh02;
+
+ ds02 -> disp02 [style=dashed];
+ ds02 -> mesh02 [style=dashed];
+
+ label="Server 2";
+ style=filled;
+ fillcolor=lightgrey;
+ }
+
+ disp01 -> "merge_fields";
+ mesh01 -> "merged_mesh";
+ disp02 -> "merge_fields";
+ mesh02 -> "merged_mesh";
+
+ ds03 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
+ ds03 -> "response" [style=dashed];
+
+ "merged_mesh" -> "response";
+ "response" -> "expansion";
+ "merge_fields" -> "expansion";
+ "expansion" -> "component";
+}
diff --git a/docs/source/examples/06-distributed-post/02-operator-dep.svg b/docs/source/examples/06-distributed-post/02-operator-dep.svg
new file mode 100644
index 00000000000..fd04964afca
--- /dev/null
+++ b/docs/source/examples/06-distributed-post/02-operator-dep.svg
@@ -0,0 +1,173 @@
+
+
+
+
+
diff --git a/docs/source/examples/06-distributed-post/03-distributed-msup_expansion.ipynb b/docs/source/examples/06-distributed-post/03-distributed-msup_expansion.ipynb
deleted file mode 100644
index 79aecc9e633..00000000000
--- a/docs/source/examples/06-distributed-post/03-distributed-msup_expansion.ipynb
+++ /dev/null
@@ -1,205 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "%matplotlib inline"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "\n\n# Distributed modal superposition\nThis example shows how distributed files can be read and expanded\non distributed processes. The modal basis (2 distributed files) is read\non 2 remote servers and the modal response reading and the expansion is\ndone on a third server.\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Import dpf module and its examples files\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "from ansys.dpf import core as dpf\nfrom ansys.dpf.core import examples\nfrom ansys.dpf.core import operators as ops"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Create the template workflow\nthis workflow will provide the modal basis and the mesh for each domain\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "template_workflow = dpf.Workflow()\ndisplacement = ops.result.displacement()\nmesh = ops.mesh.mesh_provider()"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Add the operators to the template workflow and name its inputs and outputs\nOnce workflow's inputs and outputs are named, they can be connected later on\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "template_workflow.add_operators([displacement])\ntemplate_workflow.set_input_name(\"data_sources\", displacement.inputs.data_sources)\ntemplate_workflow.set_input_name(\"data_sources\", mesh.inputs.data_sources)\ntemplate_workflow.set_output_name(\"out\", displacement.outputs.fields_container)\ntemplate_workflow.set_output_name(\"outmesh\", mesh.outputs.mesh)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Configure the servers\nMake a list of ip addresses an port numbers on which dpf servers are\nstarted. Workflows instances will be created on each of those servers to\naddress each a different result file.\nIn this example, we will post process an analysis distributed in 2 files,\nwe will consequently require 2 remote processes\nTo make this example easier, we will start local servers here,\nbut we could get connected to any existing servers on the network.\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]\nips = [remote_server.ip for remote_server in remote_servers]\nports = [remote_server.port for remote_server in remote_servers]"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Print the ips and ports\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "print(\"ips:\", ips)\nprint(\"ports:\", ports)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Choose the file path\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "base_path = examples.distributed_msup_folder\nfiles = [base_path + r'/file0.mode', base_path + r'/file1.mode']\nfiles_aux = [base_path + r'/file0.rst', base_path + r'/file1.rst']"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Send workflows on servers\nHere we create new instances on the server by copies of the template workflow\nWe also connect the data sources to those workflows\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "remote_workflows = []\nfor i, server in enumerate(remote_servers):\n remote_workflows.append(template_workflow.create_on_other_server(server))\n ds = dpf.DataSources(files[i])\n ds.add_file_path(files_aux[i])\n remote_workflows[i].connect(\"data_sources\", ds)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Create a local workflow for expansion\nIn this workflow we merge the modal basis, the meshes, read the modal response\nand expand the modal response with the modal basis\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "local_workflow = dpf.Workflow()\nmerge = ops.utility.merge_fields_containers()\nmerge_mesh = ops.utility.merge_meshes()\n\nds = dpf.DataSources(base_path + r'/file_load_1.rfrq')\nresponse = ops.result.displacement(data_sources=ds)\nresponse.inputs.mesh(merge_mesh.outputs.merges_mesh)\n\nexpansion = ops.math.modal_superposition(solution_in_modal_space=response, modal_basis=merge)\ncomponent = ops.logic.component_selector_fc(expansion, 1)\n\nlocal_workflow.add_operators([merge, response, expansion, merge_mesh, component])\nlocal_workflow.set_input_name(\"in0\", merge, 0)\nlocal_workflow.set_input_name(\"in1\", merge, 1)\nlocal_workflow.set_input_name(\"inmesh0\", merge_mesh, 0)\nlocal_workflow.set_input_name(\"inmesh1\", merge_mesh, 1)\n\nlocal_workflow.set_output_name(\"expanded\", component.outputs.fields_container)\nlocal_workflow.set_output_name(\"mesh\", merge_mesh.outputs.merges_mesh)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Connect the workflows together and get the output\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "for i, server in enumerate(remote_servers):\n local_workflow.connect_with(remote_workflows[i],\n {\"out\": \"in\" + str(i), \"outmesh\": \"inmesh\" + str(i)})\n\nfc = local_workflow.get_output(\"expanded\", dpf.types.fields_container)\nmerged_mesh = local_workflow.get_output(\"mesh\", dpf.types.meshed_region)\nmerged_mesh.plot(fc.get_field_by_time_complex_ids(1, 0))\nmerged_mesh.plot(fc.get_field_by_time_complex_ids(10, 0))\nprint(fc)\n\ndpf.server.shutdown_all_session_servers()"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.8.5"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
\ No newline at end of file
diff --git a/docs/source/examples/06-distributed-post/03-distributed-msup_expansion.py b/docs/source/examples/06-distributed-post/03-distributed-msup_expansion.py
deleted file mode 100644
index 62d8eb164b7..00000000000
--- a/docs/source/examples/06-distributed-post/03-distributed-msup_expansion.py
+++ /dev/null
@@ -1,116 +0,0 @@
-"""
-.. _ref_distributed_msup:
-
-Distributed modal superposition
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This example shows how distributed files can be read and expanded
-on distributed processes. The modal basis (2 distributed files) is read
-on 2 remote servers and the modal response reading and the expansion is
-done on a third server.
-"""
-
-###############################################################################
-# Import dpf module and its examples files
-
-from ansys.dpf import core as dpf
-from ansys.dpf.core import examples
-from ansys.dpf.core import operators as ops
-
-###############################################################################
-# Create the template workflow
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# this workflow will provide the modal basis and the mesh for each domain
-
-template_workflow = dpf.Workflow()
-displacement = ops.result.displacement()
-mesh = ops.mesh.mesh_provider()
-
-###############################################################################
-# Add the operators to the template workflow and name its inputs and outputs
-# Once workflow's inputs and outputs are named, they can be connected later on
-template_workflow.add_operators([displacement])
-template_workflow.set_input_name("data_sources", displacement.inputs.data_sources)
-template_workflow.set_input_name("data_sources", mesh.inputs.data_sources)
-template_workflow.set_output_name("out", displacement.outputs.fields_container)
-template_workflow.set_output_name("outmesh", mesh.outputs.mesh)
-
-###############################################################################
-# Configure the servers
-# ~~~~~~~~~~~~~~~~~~~~~~
-# Make a list of ip addresses an port numbers on which dpf servers are
-# started. Workflows instances will be created on each of those servers to
-# address each a different result file.
-# In this example, we will post process an analysis distributed in 2 files,
-# we will consequently require 2 remote processes
-# To make this example easier, we will start local servers here,
-# but we could get connected to any existing servers on the network.
-
-remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
-ips = [remote_server.ip for remote_server in remote_servers]
-ports = [remote_server.port for remote_server in remote_servers]
-
-###############################################################################
-# Print the ips and ports
-print("ips:", ips)
-print("ports:", ports)
-
-###############################################################################
-# Choose the file path
-
-base_path = examples.distributed_msup_folder
-files = [base_path + r'/file0.mode', base_path + r'/file1.mode']
-files_aux = [base_path + r'/file0.rst', base_path + r'/file1.rst']
-
-###############################################################################
-# Send workflows on servers
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~
-# Here we create new instances on the server by copies of the template workflow
-# We also connect the data sources to those workflows
-remote_workflows = []
-for i, server in enumerate(remote_servers):
- remote_workflows.append(template_workflow.create_on_other_server(server))
- ds = dpf.DataSources(files[i])
- ds.add_file_path(files_aux[i])
- remote_workflows[i].connect("data_sources", ds)
-
-###############################################################################
-# Create a local workflow for expansion
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# In this workflow we merge the modal basis, the meshes, read the modal response
-# and expand the modal response with the modal basis
-
-local_workflow = dpf.Workflow()
-merge = ops.utility.merge_fields_containers()
-merge_mesh = ops.utility.merge_meshes()
-
-ds = dpf.DataSources(base_path + r'/file_load_1.rfrq')
-response = ops.result.displacement(data_sources=ds)
-response.inputs.mesh(merge_mesh.outputs.merges_mesh)
-
-expansion = ops.math.modal_superposition(solution_in_modal_space=response, modal_basis=merge)
-component = ops.logic.component_selector_fc(expansion, 1)
-
-local_workflow.add_operators([merge, response, expansion, merge_mesh, component])
-local_workflow.set_input_name("in0", merge, 0)
-local_workflow.set_input_name("in1", merge, 1)
-local_workflow.set_input_name("inmesh0", merge_mesh, 0)
-local_workflow.set_input_name("inmesh1", merge_mesh, 1)
-
-local_workflow.set_output_name("expanded", component.outputs.fields_container)
-local_workflow.set_output_name("mesh", merge_mesh.outputs.merges_mesh)
-
-###############################################################################
-# Connect the workflows together and get the output
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-for i, server in enumerate(remote_servers):
- local_workflow.connect_with(remote_workflows[i],
- {"out": "in" + str(i), "outmesh": "inmesh" + str(i)})
-
-fc = local_workflow.get_output("expanded", dpf.types.fields_container)
-merged_mesh = local_workflow.get_output("mesh", dpf.types.meshed_region)
-merged_mesh.plot(fc.get_field_by_time_complex_ids(1, 0))
-merged_mesh.plot(fc.get_field_by_time_complex_ids(10, 0))
-print(fc)
-
-dpf.server.shutdown_all_session_servers()
diff --git a/docs/source/examples/06-distributed-post/03-distributed-msup_expansion.py.md5 b/docs/source/examples/06-distributed-post/03-distributed-msup_expansion.py.md5
deleted file mode 100644
index 6eabf723147..00000000000
--- a/docs/source/examples/06-distributed-post/03-distributed-msup_expansion.py.md5
+++ /dev/null
@@ -1 +0,0 @@
-2d080bc6422913e6d798c13996eeb74b
\ No newline at end of file
diff --git a/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_codeobj.pickle b/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_codeobj.pickle
deleted file mode 100644
index 1d33a71391c..00000000000
Binary files a/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_codeobj.pickle and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps.ipynb b/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps.ipynb
new file mode 100644
index 00000000000..6bf7ee588ca
--- /dev/null
+++ b/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps.ipynb
@@ -0,0 +1,169 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "%matplotlib inline"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Distributed msup distributed modal response {#ref_distributed_msup_steps}\r\n\r\nThis example shows how distributed files can be read and expanded on\r\ndistributed processes. The modal basis (2 distributed files) is read on\r\n2 remote servers and the modal response (2 distributed files) reading\r\nand the expansion is done on a third server.\r\n\r\nTo help understand this example the following diagram is provided. It\r\nshows the operator chain used to compute the final result.\r\n\r\n{.align-center width=\"800px\"}\r\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Import dpf module and its examples files.\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "import os.path\n\nfrom ansys.dpf import core as dpf\nfrom ansys.dpf.core import examples\nfrom ansys.dpf.core import operators as ops"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Configure the servers\r\n\r\nMake a list of ip addresses and port numbers on which dpf servers are\r\nstarted. Operator instances will be created on each of those servers to\r\naddress each a different result file. In this example, we will post\r\nprocess an analysis distributed in 2 files, we will consequently require\r\n2 remote processes To make this example easier, we will start local\r\nservers here, but we could get connected to any existing servers on the\r\nnetwork.\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]\nips = [remote_server.ip for remote_server in remote_servers]\nports = [remote_server.port for remote_server in remote_servers]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Print the ips and ports.\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "print(\"ips:\", ips)\nprint(\"ports:\", ports)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Choose the file path.\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "base_path = examples.distributed_msup_folder\nfiles = [os.path.join(base_path, \"file0.mode\"), os.path.join(base_path, \"file1.mode\")]\nfiles_aux = [os.path.join(base_path, \"file0.rst\"), os.path.join(base_path, \"file1.rst\")]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Create the operators on the servers\r\n\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~ On each server we\r\ncreate two new operators, one for \\'displacement\\' computations and a\r\n\\'mesh_provider\\' operator, and then define their data sources. The\r\ndisplacement and mesh_provider operators receive data from their\r\nrespective data files on each server.\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "remote_displacement_operators = []\nremote_mesh_operators = []\nfor i, server in enumerate(remote_servers):\n displacement = ops.result.displacement(server=server)\n mesh = ops.mesh.mesh_provider(server=server)\n remote_displacement_operators.append(displacement)\n remote_mesh_operators.append(mesh)\n ds = dpf.DataSources(files[i], server=server)\n ds.add_file_path(files_aux[i])\n displacement.inputs.data_sources(ds)\n mesh.inputs.data_sources(ds)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Create a local operators chain for expansion\r\n\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\r\nIn the follwing series of operators we merge the modal basis, the\r\nmeshes, read the modal response and expand the modal response with the\r\nmodal basis.\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "merge_fields = ops.utility.merge_fields_containers()\nmerge_mesh = ops.utility.merge_meshes()\n\nds = dpf.DataSources(os.path.join(base_path, \"file_load_1.rfrq\"))\nresponse = ops.result.displacement(data_sources=ds)\nresponse.inputs.mesh(merge_mesh.outputs.merges_mesh)\n\nds = dpf.DataSources(os.path.join(base_path, \"file_load_2.rfrq\"))\nfrom os import walk\n\nfor (dirpath, dirnames, filenames) in walk(base_path):\n print(filenames)\nresponse2 = ops.result.displacement(data_sources=ds)\nresponse2fc = response2.outputs.fields_container()\nresponse2fc.time_freq_support.time_frequencies.scoping.set_id(0, 2)\n\nmerge_use_pass = ops.utility.merge_fields_containers()\nmerge_use_pass.inputs.fields_containers1(response)\nmerge_use_pass.inputs.fields_containers2(response2fc)\n\nexpansion = ops.math.modal_superposition(\n solution_in_modal_space=merge_use_pass,\n modal_basis=merge_fields\n )\ncomponent = ops.logic.component_selector_fc(expansion, 1)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Connect the operator chains together and get the output\r\n\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\\~\r\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
+ "source": [
+ "for i, server in enumerate(remote_servers):\n merge_fields.connect(i, remote_displacement_operators[i], 0)\n merge_mesh.connect(i, remote_mesh_operators[i], 0)\n\nfc = component.get_output(0, dpf.types.fields_container)\nmerged_mesh = merge_mesh.get_output(0, dpf.types.meshed_region)\n\nmerged_mesh.plot(fc.get_field_by_time_complex_ids(1, 0))\nmerged_mesh.plot(fc.get_field_by_time_complex_ids(20, 0))\nprint(fc)"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.9.0"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
\ No newline at end of file
diff --git a/examples/06-distributed-post/04-distributed-msup_expansion_steps.py b/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps.py
similarity index 53%
rename from examples/06-distributed-post/04-distributed-msup_expansion_steps.py
rename to docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps.py
index dce7bd8ac64..1fac648a2f8 100644
--- a/examples/06-distributed-post/04-distributed-msup_expansion_steps.py
+++ b/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps.py
@@ -8,81 +8,73 @@
on 2 remote servers and the modal response (2 distributed files) reading and the expansion is
done on a third server.
+To help understand this example the following diagram is provided. It shows
+the operator chain used to compute the final result.
+
+.. image:: 03-operator-dep.svg
+ :align: center
+ :width: 800
"""
###############################################################################
-# Import dpf module and its examples files
+# Import dpf module and its examples files.
import os.path
from ansys.dpf import core as dpf
from ansys.dpf.core import examples
from ansys.dpf.core import operators as ops
-###############################################################################
-# Create the template workflow
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# this workflow will provide the modal basis and the mesh for each domain
-
-template_workflow = dpf.Workflow()
-displacement = ops.result.displacement()
-mesh = ops.mesh.mesh_provider()
-
-###############################################################################
-# Add the operators to the template workflow and name its inputs and outputs
-# Once workflow's inputs and outputs are named, they can be connected later on
-template_workflow.add_operators([displacement])
-template_workflow.set_input_name("data_sources", displacement.inputs.data_sources)
-template_workflow.set_input_name("data_sources", mesh.inputs.data_sources)
-template_workflow.set_output_name("out", displacement.outputs.fields_container)
-template_workflow.set_output_name("outmesh", mesh.outputs.mesh)
-
###############################################################################
# Configure the servers
# ~~~~~~~~~~~~~~~~~~~~~~
-# Make a list of ip addresses an port numbers on which dpf servers are
-# started. Workflows instances will be created on each of those servers to
+# Make a list of ip addresses and port numbers on which dpf servers are
+# started. Operator instances will be created on each of those servers to
# address each a different result file.
# In this example, we will post process an analysis distributed in 2 files,
# we will consequently require 2 remote processes
# To make this example easier, we will start local servers here,
# but we could get connected to any existing servers on the network.
-
remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
ips = [remote_server.ip for remote_server in remote_servers]
ports = [remote_server.port for remote_server in remote_servers]
###############################################################################
-# Print the ips and ports
+# Print the ips and ports.
print("ips:", ips)
print("ports:", ports)
###############################################################################
-# Choose the file path
+# Choose the file path.
base_path = examples.distributed_msup_folder
files = [os.path.join(base_path, "file0.mode"), os.path.join(base_path, "file1.mode")]
files_aux = [os.path.join(base_path, "file0.rst"), os.path.join(base_path, "file1.rst")]
###############################################################################
-# Send workflows on servers
+# Create the operators on the servers
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
-# Here we create new instances on the server by copies of the template workflow
-# We also connect the data sources to those workflows
-remote_workflows = []
+# On each server we create two new operators, one for 'displacement' computations
+# and a 'mesh_provider' operator, and then define their data sources. The displacement
+# and mesh_provider operators receive data from their respective data files on each server.
+remote_displacement_operators = []
+remote_mesh_operators = []
for i, server in enumerate(remote_servers):
- remote_workflows.append(template_workflow.create_on_other_server(server))
- ds = dpf.DataSources(files[i])
+ displacement = ops.result.displacement(server=server)
+ mesh = ops.mesh.mesh_provider(server=server)
+ remote_displacement_operators.append(displacement)
+ remote_mesh_operators.append(mesh)
+ ds = dpf.DataSources(files[i], server=server)
ds.add_file_path(files_aux[i])
- remote_workflows[i].connect("data_sources", ds)
+ displacement.inputs.data_sources(ds)
+ mesh.inputs.data_sources(ds)
###############################################################################
-# Create a local workflow for expansion
+# Create a local operators chain for expansion
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# In this workflow we merge the modal basis, the meshes, read the modal response
-# and expand the modal response with the modal basis
+# In the follwing series of operators we merge the modal basis, the meshes, read
+# the modal response and expand the modal response with the modal basis.
-local_workflow = dpf.Workflow()
-merge = ops.utility.merge_fields_containers()
+merge_fields = ops.utility.merge_fields_containers()
merge_mesh = ops.utility.merge_meshes()
ds = dpf.DataSources(os.path.join(base_path, "file_load_1.rfrq"))
@@ -102,29 +94,22 @@
merge_use_pass.inputs.fields_containers1(response)
merge_use_pass.inputs.fields_containers2(response2fc)
-expansion = ops.math.modal_superposition(solution_in_modal_space=merge_use_pass, modal_basis=merge)
+expansion = ops.math.modal_superposition(
+ solution_in_modal_space=merge_use_pass,
+ modal_basis=merge_fields
+ )
component = ops.logic.component_selector_fc(expansion, 1)
-local_workflow.add_operators([merge, merge_use_pass, expansion, merge_mesh, component])
-local_workflow.set_input_name("in0", merge, 0)
-local_workflow.set_input_name("in1", merge, 1)
-local_workflow.set_input_name("inmesh0", merge_mesh, 0)
-local_workflow.set_input_name("inmesh1", merge_mesh, 1)
-
-local_workflow.set_output_name("expanded", component.outputs.fields_container)
-local_workflow.set_output_name("mesh", merge_mesh.outputs.merges_mesh)
-
###############################################################################
-# Connect the workflows together and get the output
+# Connect the operator chains together and get the output
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
for i, server in enumerate(remote_servers):
- local_workflow.connect_with(remote_workflows[i],
- {"out": "in" + str(i), "outmesh": "inmesh" + str(i)})
+ merge_fields.connect(i, remote_displacement_operators[i], 0)
+ merge_mesh.connect(i, remote_mesh_operators[i], 0)
+
+fc = component.get_output(0, dpf.types.fields_container)
+merged_mesh = merge_mesh.get_output(0, dpf.types.meshed_region)
-fc = local_workflow.get_output("expanded", dpf.types.fields_container)
-merged_mesh = local_workflow.get_output("mesh", dpf.types.meshed_region)
merged_mesh.plot(fc.get_field_by_time_complex_ids(1, 0))
merged_mesh.plot(fc.get_field_by_time_complex_ids(20, 0))
print(fc)
-dpf.server.shutdown_all_session_servers()
diff --git a/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps.py.md5 b/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps.py.md5
new file mode 100644
index 00000000000..b5804f9036d
--- /dev/null
+++ b/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps.py.md5
@@ -0,0 +1 @@
+4ae2eb1e2d6070aaa836f8e696bc753c
\ No newline at end of file
diff --git a/docs/source/examples/06-distributed-post/04-distributed-msup_expansion_steps.rst b/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps.rst
similarity index 64%
rename from docs/source/examples/06-distributed-post/04-distributed-msup_expansion_steps.rst
rename to docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps.rst
index 2e3605ae04b..c0ef74bcdef 100644
--- a/docs/source/examples/06-distributed-post/04-distributed-msup_expansion_steps.rst
+++ b/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps.rst
@@ -2,7 +2,7 @@
.. DO NOT EDIT.
.. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY.
.. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE:
-.. "examples\06-distributed-post\04-distributed-msup_expansion_steps.py"
+.. "examples\06-distributed-post\03-distributed-msup_expansion_steps.py"
.. LINE NUMBERS ARE GIVEN BELOW.
.. only:: html
@@ -10,12 +10,12 @@
.. note::
:class: sphx-glr-download-link-note
- Click :ref:`here `
+ Click :ref:`here `
to download the full example code
.. rst-class:: sphx-glr-example-title
-.. _sphx_glr_examples_06-distributed-post_04-distributed-msup_expansion_steps.py:
+.. _sphx_glr_examples_06-distributed-post_03-distributed-msup_expansion_steps.py:
.. _ref_distributed_msup_steps:
@@ -27,11 +27,18 @@ on distributed processes. The modal basis (2 distributed files) is read
on 2 remote servers and the modal response (2 distributed files) reading and the expansion is
done on a third server.
-.. GENERATED FROM PYTHON SOURCE LINES 14-15
+To help understand this example the following diagram is provided. It shows
+the operator chain used to compute the final result.
-Import dpf module and its examples files
+.. image:: 03-operator-dep.svg
+ :align: center
+ :width: 800
-.. GENERATED FROM PYTHON SOURCE LINES 15-21
+.. GENERATED FROM PYTHON SOURCE LINES 20-21
+
+Import dpf module and its examples files.
+
+.. GENERATED FROM PYTHON SOURCE LINES 21-27
.. code-block:: default
@@ -48,67 +55,22 @@ Import dpf module and its examples files
-.. GENERATED FROM PYTHON SOURCE LINES 22-25
-
-Create the template workflow
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-this workflow will provide the modal basis and the mesh for each domain
-
-.. GENERATED FROM PYTHON SOURCE LINES 25-30
-
-.. code-block:: default
-
-
- template_workflow = dpf.Workflow()
- displacement = ops.result.displacement()
- mesh = ops.mesh.mesh_provider()
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 31-33
-
-Add the operators to the template workflow and name its inputs and outputs
-Once workflow's inputs and outputs are named, they can be connected later on
-
-.. GENERATED FROM PYTHON SOURCE LINES 33-39
-
-.. code-block:: default
-
- template_workflow.add_operators([displacement])
- template_workflow.set_input_name("data_sources", displacement.inputs.data_sources)
- template_workflow.set_input_name("data_sources", mesh.inputs.data_sources)
- template_workflow.set_output_name("out", displacement.outputs.fields_container)
- template_workflow.set_output_name("outmesh", mesh.outputs.mesh)
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 40-49
+.. GENERATED FROM PYTHON SOURCE LINES 28-37
Configure the servers
~~~~~~~~~~~~~~~~~~~~~~
-Make a list of ip addresses an port numbers on which dpf servers are
-started. Workflows instances will be created on each of those servers to
+Make a list of ip addresses and port numbers on which dpf servers are
+started. Operator instances will be created on each of those servers to
address each a different result file.
In this example, we will post process an analysis distributed in 2 files,
we will consequently require 2 remote processes
To make this example easier, we will start local servers here,
but we could get connected to any existing servers on the network.
-.. GENERATED FROM PYTHON SOURCE LINES 49-54
+.. GENERATED FROM PYTHON SOURCE LINES 37-41
.. code-block:: default
-
remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
ips = [remote_server.ip for remote_server in remote_servers]
ports = [remote_server.port for remote_server in remote_servers]
@@ -120,11 +82,11 @@ but we could get connected to any existing servers on the network.
-.. GENERATED FROM PYTHON SOURCE LINES 55-56
+.. GENERATED FROM PYTHON SOURCE LINES 42-43
-Print the ips and ports
+Print the ips and ports.
-.. GENERATED FROM PYTHON SOURCE LINES 56-59
+.. GENERATED FROM PYTHON SOURCE LINES 43-46
.. code-block:: default
@@ -142,16 +104,16 @@ Print the ips and ports
.. code-block:: none
ips: ['127.0.0.1', '127.0.0.1']
- ports: [50058, 50059]
+ ports: [50054, 50055]
-.. GENERATED FROM PYTHON SOURCE LINES 60-61
+.. GENERATED FROM PYTHON SOURCE LINES 47-48
-Choose the file path
+Choose the file path.
-.. GENERATED FROM PYTHON SOURCE LINES 61-66
+.. GENERATED FROM PYTHON SOURCE LINES 48-53
.. code-block:: default
@@ -167,23 +129,29 @@ Choose the file path
-.. GENERATED FROM PYTHON SOURCE LINES 67-71
+.. GENERATED FROM PYTHON SOURCE LINES 54-59
-Send workflows on servers
+Create the operators on the servers
~~~~~~~~~~~~~~~~~~~~~~~~~~
-Here we create new instances on the server by copies of the template workflow
-We also connect the data sources to those workflows
+On each server we create two new operators, one for 'displacement' computations
+and a 'mesh_provider' operator, and then define their data sources. The displacement
+and mesh_provider operators receive data from their respective data files on each server.
-.. GENERATED FROM PYTHON SOURCE LINES 71-78
+.. GENERATED FROM PYTHON SOURCE LINES 59-71
.. code-block:: default
- remote_workflows = []
+ remote_displacement_operators = []
+ remote_mesh_operators = []
for i, server in enumerate(remote_servers):
- remote_workflows.append(template_workflow.create_on_other_server(server))
- ds = dpf.DataSources(files[i])
+ displacement = ops.result.displacement(server=server)
+ mesh = ops.mesh.mesh_provider(server=server)
+ remote_displacement_operators.append(displacement)
+ remote_mesh_operators.append(mesh)
+ ds = dpf.DataSources(files[i], server=server)
ds.add_file_path(files_aux[i])
- remote_workflows[i].connect("data_sources", ds)
+ displacement.inputs.data_sources(ds)
+ mesh.inputs.data_sources(ds)
@@ -192,20 +160,19 @@ We also connect the data sources to those workflows
-.. GENERATED FROM PYTHON SOURCE LINES 79-83
+.. GENERATED FROM PYTHON SOURCE LINES 72-76
-Create a local workflow for expansion
+Create a local operators chain for expansion
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-In this workflow we merge the modal basis, the meshes, read the modal response
-and expand the modal response with the modal basis
+In the follwing series of operators we merge the modal basis, the meshes, read
+the modal response and expand the modal response with the modal basis.
-.. GENERATED FROM PYTHON SOURCE LINES 83-117
+.. GENERATED FROM PYTHON SOURCE LINES 76-103
.. code-block:: default
- local_workflow = dpf.Workflow()
- merge = ops.utility.merge_fields_containers()
+ merge_fields = ops.utility.merge_fields_containers()
merge_mesh = ops.utility.merge_meshes()
ds = dpf.DataSources(os.path.join(base_path, "file_load_1.rfrq"))
@@ -225,18 +192,12 @@ and expand the modal response with the modal basis
merge_use_pass.inputs.fields_containers1(response)
merge_use_pass.inputs.fields_containers2(response2fc)
- expansion = ops.math.modal_superposition(solution_in_modal_space=merge_use_pass, modal_basis=merge)
+ expansion = ops.math.modal_superposition(
+ solution_in_modal_space=merge_use_pass,
+ modal_basis=merge_fields
+ )
component = ops.logic.component_selector_fc(expansion, 1)
- local_workflow.add_operators([merge, merge_use_pass, expansion, merge_mesh, component])
- local_workflow.set_input_name("in0", merge, 0)
- local_workflow.set_input_name("in1", merge, 1)
- local_workflow.set_input_name("inmesh0", merge_mesh, 0)
- local_workflow.set_input_name("inmesh1", merge_mesh, 1)
-
- local_workflow.set_output_name("expanded", component.outputs.fields_container)
- local_workflow.set_output_name("mesh", merge_mesh.outputs.merges_mesh)
-
@@ -252,26 +213,25 @@ and expand the modal response with the modal basis
-.. GENERATED FROM PYTHON SOURCE LINES 118-120
+.. GENERATED FROM PYTHON SOURCE LINES 104-106
-Connect the workflows together and get the output
+Connect the operator chains together and get the output
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. GENERATED FROM PYTHON SOURCE LINES 120-131
+.. GENERATED FROM PYTHON SOURCE LINES 106-116
.. code-block:: default
-
for i, server in enumerate(remote_servers):
- local_workflow.connect_with(remote_workflows[i],
- {"out": "in" + str(i), "outmesh": "inmesh" + str(i)})
+ merge_fields.connect(i, remote_displacement_operators[i], 0)
+ merge_mesh.connect(i, remote_mesh_operators[i], 0)
+
+ fc = component.get_output(0, dpf.types.fields_container)
+ merged_mesh = merge_mesh.get_output(0, dpf.types.meshed_region)
- fc = local_workflow.get_output("expanded", dpf.types.fields_container)
- merged_mesh = local_workflow.get_output("mesh", dpf.types.meshed_region)
merged_mesh.plot(fc.get_field_by_time_complex_ids(1, 0))
merged_mesh.plot(fc.get_field_by_time_complex_ids(20, 0))
print(fc)
- dpf.server.shutdown_all_session_servers()
@@ -280,16 +240,16 @@ Connect the workflows together and get the output
*
- .. image-sg:: /examples/06-distributed-post/images/sphx_glr_04-distributed-msup_expansion_steps_001.png
- :alt: 04 distributed msup expansion steps
- :srcset: /examples/06-distributed-post/images/sphx_glr_04-distributed-msup_expansion_steps_001.png
+ .. image-sg:: /examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_steps_001.png
+ :alt: 03 distributed msup expansion steps
+ :srcset: /examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_steps_001.png
:class: sphx-glr-multi-img
*
- .. image-sg:: /examples/06-distributed-post/images/sphx_glr_04-distributed-msup_expansion_steps_002.png
- :alt: 04 distributed msup expansion steps
- :srcset: /examples/06-distributed-post/images/sphx_glr_04-distributed-msup_expansion_steps_002.png
+ .. image-sg:: /examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_steps_002.png
+ :alt: 03 distributed msup expansion steps
+ :srcset: /examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_steps_002.png
:class: sphx-glr-multi-img
@@ -345,9 +305,6 @@ Connect the workflows together and get the output
- field 38 {complex: 0, time: 20} with Nodal location, 1 components and 1065 entities.
- field 39 {complex: 1, time: 20} with Nodal location, 1 components and 1065 entities.
- ("'NoneType' object has no attribute 'shutdown'",)
- ("'NoneType' object has no attribute 'shutdown'",)
- ("'NoneType' object has no attribute 'shutdown'",)
@@ -355,10 +312,10 @@ Connect the workflows together and get the output
.. rst-class:: sphx-glr-timing
- **Total running time of the script:** ( 0 minutes 7.266 seconds)
+ **Total running time of the script:** ( 0 minutes 2.743 seconds)
-.. _sphx_glr_download_examples_06-distributed-post_04-distributed-msup_expansion_steps.py:
+.. _sphx_glr_download_examples_06-distributed-post_03-distributed-msup_expansion_steps.py:
.. only :: html
@@ -370,13 +327,13 @@ Connect the workflows together and get the output
.. container:: sphx-glr-download sphx-glr-download-python
- :download:`Download Python source code: 04-distributed-msup_expansion_steps.py <04-distributed-msup_expansion_steps.py>`
+ :download:`Download Python source code: 03-distributed-msup_expansion_steps.py <03-distributed-msup_expansion_steps.py>`
.. container:: sphx-glr-download sphx-glr-download-jupyter
- :download:`Download Jupyter notebook: 04-distributed-msup_expansion_steps.ipynb <04-distributed-msup_expansion_steps.ipynb>`
+ :download:`Download Jupyter notebook: 03-distributed-msup_expansion_steps.ipynb <03-distributed-msup_expansion_steps.ipynb>`
.. only:: html
diff --git a/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps_codeobj.pickle b/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps_codeobj.pickle
new file mode 100644
index 00000000000..67e585d9507
Binary files /dev/null and b/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps_codeobj.pickle differ
diff --git a/docs/source/examples/06-distributed-post/03-operator-dep.dot b/docs/source/examples/06-distributed-post/03-operator-dep.dot
new file mode 100644
index 00000000000..ce9f093990f
--- /dev/null
+++ b/docs/source/examples/06-distributed-post/03-operator-dep.dot
@@ -0,0 +1,54 @@
+digraph foo {
+ graph [pad="0", nodesep="0.3", ranksep="0.3"]
+ node [shape=box, style=filled, fillcolor="#ffcc00", margin="0"];
+ rankdir=LR;
+ splines=line;
+
+ disp01 [label="displacement"];
+ disp02 [label="displacement"];
+ mesh01 [label="mesh"];
+ mesh02 [label="mesh"];
+
+ subgraph cluster_1 {
+ ds01 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
+
+ disp01; mesh01;
+
+ ds01 -> disp01 [style=dashed];
+ ds01 -> mesh01 [style=dashed];
+
+ label="Server 1";
+ style=filled;
+ fillcolor=lightgrey;
+ }
+
+ subgraph cluster_2 {
+ ds02 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
+
+ disp02; mesh02;
+
+ ds02 -> disp02 [style=dashed];
+ ds02 -> mesh02 [style=dashed];
+
+ label="Server 2";
+ style=filled;
+ fillcolor=lightgrey;
+ }
+
+ disp01 -> "merge_fields";
+ mesh01 -> "merge_mesh";
+ disp02 -> "merge_fields";
+ mesh02 -> "merge_mesh";
+
+ ds03 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
+ ds03 -> "response2" [style=dashed];
+ ds04 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
+ ds04 -> "response" [style=dashed];
+
+ "merge_mesh" -> "response";
+ "response" -> "merge_use_pass";
+ "response2" -> "merge_use_pass";
+ "merge_use_pass" -> "expansion";
+ "merge_fields" -> "expansion";
+ "expansion" -> "component";
+}
diff --git a/docs/source/examples/06-distributed-post/03-operator-dep.svg b/docs/source/examples/06-distributed-post/03-operator-dep.svg
new file mode 100644
index 00000000000..00f50ac766d
--- /dev/null
+++ b/docs/source/examples/06-distributed-post/03-operator-dep.svg
@@ -0,0 +1,209 @@
+
+
+
+
+
diff --git a/docs/source/examples/06-distributed-post/04-distributed-msup_expansion_steps.ipynb b/docs/source/examples/06-distributed-post/04-distributed-msup_expansion_steps.ipynb
deleted file mode 100644
index c17ec070b67..00000000000
--- a/docs/source/examples/06-distributed-post/04-distributed-msup_expansion_steps.ipynb
+++ /dev/null
@@ -1,205 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "%matplotlib inline"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "\n\n# Distributed msup distributed modal response\nThis example shows how distributed files can be read and expanded\non distributed processes. The modal basis (2 distributed files) is read\non 2 remote servers and the modal response (2 distributed files) reading and the expansion is\ndone on a third server.\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Import dpf module and its examples files\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "import os.path\n\nfrom ansys.dpf import core as dpf\nfrom ansys.dpf.core import examples\nfrom ansys.dpf.core import operators as ops"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Create the template workflow\nthis workflow will provide the modal basis and the mesh for each domain\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "template_workflow = dpf.Workflow()\ndisplacement = ops.result.displacement()\nmesh = ops.mesh.mesh_provider()"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Add the operators to the template workflow and name its inputs and outputs\nOnce workflow's inputs and outputs are named, they can be connected later on\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "template_workflow.add_operators([displacement])\ntemplate_workflow.set_input_name(\"data_sources\", displacement.inputs.data_sources)\ntemplate_workflow.set_input_name(\"data_sources\", mesh.inputs.data_sources)\ntemplate_workflow.set_output_name(\"out\", displacement.outputs.fields_container)\ntemplate_workflow.set_output_name(\"outmesh\", mesh.outputs.mesh)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Configure the servers\nMake a list of ip addresses an port numbers on which dpf servers are\nstarted. Workflows instances will be created on each of those servers to\naddress each a different result file.\nIn this example, we will post process an analysis distributed in 2 files,\nwe will consequently require 2 remote processes\nTo make this example easier, we will start local servers here,\nbut we could get connected to any existing servers on the network.\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]\nips = [remote_server.ip for remote_server in remote_servers]\nports = [remote_server.port for remote_server in remote_servers]"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Print the ips and ports\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "print(\"ips:\", ips)\nprint(\"ports:\", ports)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Choose the file path\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "base_path = examples.distributed_msup_folder\nfiles = [os.path.join(base_path, \"file0.mode\"), os.path.join(base_path, \"file1.mode\")]\nfiles_aux = [os.path.join(base_path, \"file0.rst\"), os.path.join(base_path, \"file1.rst\")]"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Send workflows on servers\nHere we create new instances on the server by copies of the template workflow\nWe also connect the data sources to those workflows\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "remote_workflows = []\nfor i, server in enumerate(remote_servers):\n remote_workflows.append(template_workflow.create_on_other_server(server))\n ds = dpf.DataSources(files[i])\n ds.add_file_path(files_aux[i])\n remote_workflows[i].connect(\"data_sources\", ds)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Create a local workflow for expansion\nIn this workflow we merge the modal basis, the meshes, read the modal response\nand expand the modal response with the modal basis\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "local_workflow = dpf.Workflow()\nmerge = ops.utility.merge_fields_containers()\nmerge_mesh = ops.utility.merge_meshes()\n\nds = dpf.DataSources(os.path.join(base_path, \"file_load_1.rfrq\"))\nresponse = ops.result.displacement(data_sources=ds)\nresponse.inputs.mesh(merge_mesh.outputs.merges_mesh)\n\nds = dpf.DataSources(os.path.join(base_path, \"file_load_2.rfrq\"))\nfrom os import walk\n\nfor (dirpath, dirnames, filenames) in walk(base_path):\n print(filenames)\nresponse2 = ops.result.displacement(data_sources=ds)\nresponse2fc = response2.outputs.fields_container()\nresponse2fc.time_freq_support.time_frequencies.scoping.set_id(0, 2)\n\nmerge_use_pass = ops.utility.merge_fields_containers()\nmerge_use_pass.inputs.fields_containers1(response)\nmerge_use_pass.inputs.fields_containers2(response2fc)\n\nexpansion = ops.math.modal_superposition(solution_in_modal_space=merge_use_pass, modal_basis=merge)\ncomponent = ops.logic.component_selector_fc(expansion, 1)\n\nlocal_workflow.add_operators([merge, merge_use_pass, expansion, merge_mesh, component])\nlocal_workflow.set_input_name(\"in0\", merge, 0)\nlocal_workflow.set_input_name(\"in1\", merge, 1)\nlocal_workflow.set_input_name(\"inmesh0\", merge_mesh, 0)\nlocal_workflow.set_input_name(\"inmesh1\", merge_mesh, 1)\n\nlocal_workflow.set_output_name(\"expanded\", component.outputs.fields_container)\nlocal_workflow.set_output_name(\"mesh\", merge_mesh.outputs.merges_mesh)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Connect the workflows together and get the output\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "for i, server in enumerate(remote_servers):\n local_workflow.connect_with(remote_workflows[i],\n {\"out\": \"in\" + str(i), \"outmesh\": \"inmesh\" + str(i)})\n\nfc = local_workflow.get_output(\"expanded\", dpf.types.fields_container)\nmerged_mesh = local_workflow.get_output(\"mesh\", dpf.types.meshed_region)\nmerged_mesh.plot(fc.get_field_by_time_complex_ids(1, 0))\nmerged_mesh.plot(fc.get_field_by_time_complex_ids(20, 0))\nprint(fc)\ndpf.server.shutdown_all_session_servers()"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.8.5"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
\ No newline at end of file
diff --git a/docs/source/examples/06-distributed-post/04-distributed-msup_expansion_steps.py.md5 b/docs/source/examples/06-distributed-post/04-distributed-msup_expansion_steps.py.md5
deleted file mode 100644
index 19f30816656..00000000000
--- a/docs/source/examples/06-distributed-post/04-distributed-msup_expansion_steps.py.md5
+++ /dev/null
@@ -1 +0,0 @@
-f22769e6341e409652b9e14543674dc5
\ No newline at end of file
diff --git a/docs/source/examples/06-distributed-post/04-distributed-msup_expansion_steps_codeobj.pickle b/docs/source/examples/06-distributed-post/04-distributed-msup_expansion_steps_codeobj.pickle
deleted file mode 100644
index e207c42eb3e..00000000000
Binary files a/docs/source/examples/06-distributed-post/04-distributed-msup_expansion_steps_codeobj.pickle and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/05-distributed_total_disp_with_operators.py b/docs/source/examples/06-distributed-post/05-distributed_total_disp_with_operators.py
deleted file mode 100644
index 298404617b6..00000000000
--- a/docs/source/examples/06-distributed-post/05-distributed_total_disp_with_operators.py
+++ /dev/null
@@ -1,78 +0,0 @@
-"""
-.. _ref_distributed_total_disp_op:
-
-Distributed post without client connection to remote processes with Operators
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This example shows how distributed files can be read and post processed
-on distributed processes. After remote post processing, results a merged
-on the local process.
-
-"""
-
-###############################################################################
-# Import dpf module and its examples files
-
-from ansys.dpf import core as dpf
-from ansys.dpf.core import examples
-from ansys.dpf.core import operators as ops
-
-###############################################################################
-# Create the template workflow of total displacement
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-###############################################################################
-# Configure the servers
-# ~~~~~~~~~~~~~~~~~~~~~~
-# Make a list of ip addresses an port numbers on which dpf servers are
-# started. Workflows instances will be created on each of those servers to
-# address each a different result file.
-# In this example, we will post process an analysis distributed in 2 files,
-# we will consequently require 2 remote processes
-# To make this example easier, we will start local servers here,
-# but we could get connected to any existing servers on the network.
-
-remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
-ips = [remote_server.ip for remote_server in remote_servers]
-ports = [remote_server.port for remote_server in remote_servers]
-
-###############################################################################
-# Print the ips and ports
-print("ips:", ips)
-print("ports:", ports)
-
-###############################################################################
-# Here we show how we could send files in temporary directory if we were not
-# in shared memory
-files = examples.download_distributed_files()
-server_file_paths = [dpf.upload_file_in_tmp_folder(files[0], server=remote_servers[0]),
- dpf.upload_file_in_tmp_folder(files[1], server=remote_servers[1])]
-
-###############################################################################
-# Send workflows on servers
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~
-# Here we create new instances on the server by copies of the template workflow
-# We also connect the data sources to those workflows
-remote_operators = []
-for i, server in enumerate(remote_servers):
- displacement = ops.result.displacement(server=server)
- norm = ops.math.norm_fc(displacement, server=server)
- remote_operators.append(norm)
- ds = dpf.DataSources(server_file_paths[i], server=server)
- displacement.inputs.data_sources(ds)
-
-###############################################################################
-# Create a local workflow able to merge the results
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-merge = ops.utility.merge_fields_containers()
-
-###############################################################################
-# Connect the workflows together and get the output
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-for i, server in enumerate(remote_servers):
- merge.connect(i, remote_operators[i], 0)
-
-fc = merge.get_output(0, dpf.types.fields_container)
-print(fc)
-print(fc[0].min().data)
-print(fc[0].max().data)
diff --git a/docs/source/examples/06-distributed-post/05-distributed_total_disp_with_operators.py.md5 b/docs/source/examples/06-distributed-post/05-distributed_total_disp_with_operators.py.md5
deleted file mode 100644
index b565a9eb67b..00000000000
--- a/docs/source/examples/06-distributed-post/05-distributed_total_disp_with_operators.py.md5
+++ /dev/null
@@ -1 +0,0 @@
-b07dd1f07c70956e6613c66f15fb072f
\ No newline at end of file
diff --git a/docs/source/examples/06-distributed-post/05-distributed_total_disp_with_operators.rst b/docs/source/examples/06-distributed-post/05-distributed_total_disp_with_operators.rst
deleted file mode 100644
index 3ca0ab49c4e..00000000000
--- a/docs/source/examples/06-distributed-post/05-distributed_total_disp_with_operators.rst
+++ /dev/null
@@ -1,244 +0,0 @@
-
-.. DO NOT EDIT.
-.. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY.
-.. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE:
-.. "examples\06-distributed-post\05-distributed_total_disp_with_operators.py"
-.. LINE NUMBERS ARE GIVEN BELOW.
-
-.. only:: html
-
- .. note::
- :class: sphx-glr-download-link-note
-
- Click :ref:`here `
- to download the full example code
-
-.. rst-class:: sphx-glr-example-title
-
-.. _sphx_glr_examples_06-distributed-post_05-distributed_total_disp_with_operators.py:
-
-
-.. _ref_distributed_total_disp_op:
-
-Distributed post without client connection to remote processes with Operators
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This example shows how distributed files can be read and post processed
-on distributed processes. After remote post processing, results a merged
-on the local process.
-
-.. GENERATED FROM PYTHON SOURCE LINES 13-14
-
-Import dpf module and its examples files
-
-.. GENERATED FROM PYTHON SOURCE LINES 14-19
-
-.. code-block:: default
-
-
- from ansys.dpf import core as dpf
- from ansys.dpf.core import examples
- from ansys.dpf.core import operators as ops
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 20-22
-
-Create the template workflow of total displacement
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. GENERATED FROM PYTHON SOURCE LINES 24-33
-
-Configure the servers
-~~~~~~~~~~~~~~~~~~~~~~
-Make a list of ip addresses an port numbers on which dpf servers are
-started. Workflows instances will be created on each of those servers to
-address each a different result file.
-In this example, we will post process an analysis distributed in 2 files,
-we will consequently require 2 remote processes
-To make this example easier, we will start local servers here,
-but we could get connected to any existing servers on the network.
-
-.. GENERATED FROM PYTHON SOURCE LINES 33-38
-
-.. code-block:: default
-
-
- remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
- ips = [remote_server.ip for remote_server in remote_servers]
- ports = [remote_server.port for remote_server in remote_servers]
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 39-40
-
-Print the ips and ports
-
-.. GENERATED FROM PYTHON SOURCE LINES 40-43
-
-.. code-block:: default
-
- print("ips:", ips)
- print("ports:", ports)
-
-
-
-
-
-.. rst-class:: sphx-glr-script-out
-
- Out:
-
- .. code-block:: none
-
- ips: ['127.0.0.1', '127.0.0.1']
- ports: [50057, 50058]
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 44-46
-
-Here we show how we could send files in temporary directory if we were not
-in shared memory
-
-.. GENERATED FROM PYTHON SOURCE LINES 46-50
-
-.. code-block:: default
-
- files = examples.download_distributed_files()
- server_file_paths = [dpf.upload_file_in_tmp_folder(files[0], server=remote_servers[0]),
- dpf.upload_file_in_tmp_folder(files[1], server=remote_servers[1])]
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 51-55
-
-Send workflows on servers
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-Here we create new instances on the server by copies of the template workflow
-We also connect the data sources to those workflows
-
-.. GENERATED FROM PYTHON SOURCE LINES 55-63
-
-.. code-block:: default
-
- remote_operators = []
- for i, server in enumerate(remote_servers):
- displacement = ops.result.displacement(server=server)
- norm = ops.math.norm_fc(displacement, server=server)
- remote_operators.append(norm)
- ds = dpf.DataSources(server_file_paths[i], server=server)
- displacement.inputs.data_sources(ds)
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 64-66
-
-Create a local workflow able to merge the results
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. GENERATED FROM PYTHON SOURCE LINES 66-68
-
-.. code-block:: default
-
- merge = ops.utility.merge_fields_containers()
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 69-71
-
-Connect the workflows together and get the output
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. GENERATED FROM PYTHON SOURCE LINES 71-79
-
-.. code-block:: default
-
-
- for i, server in enumerate(remote_servers):
- merge.connect(i, remote_operators[i], 0)
-
- fc = merge.get_output(0, dpf.types.fields_container)
- print(fc)
- print(fc[0].min().data)
- print(fc[0].max().data)
-
-
-
-
-.. rst-class:: sphx-glr-script-out
-
- Out:
-
- .. code-block:: none
-
- DPF Fields Container
- with 1 field(s)
- defined on labels: time
-
- with:
- - field 0 {time: 1} with Nodal location, 1 components and 432 entities.
-
- [0.]
- [10.03242272]
-
-
-
-
-
-.. rst-class:: sphx-glr-timing
-
- **Total running time of the script:** ( 0 minutes 0.941 seconds)
-
-
-.. _sphx_glr_download_examples_06-distributed-post_05-distributed_total_disp_with_operators.py:
-
-
-.. only :: html
-
- .. container:: sphx-glr-footer
- :class: sphx-glr-footer-example
-
-
-
- .. container:: sphx-glr-download sphx-glr-download-python
-
- :download:`Download Python source code: 05-distributed_total_disp_with_operators.py <05-distributed_total_disp_with_operators.py>`
-
-
-
- .. container:: sphx-glr-download sphx-glr-download-jupyter
-
- :download:`Download Jupyter notebook: 05-distributed_total_disp_with_operators.ipynb <05-distributed_total_disp_with_operators.ipynb>`
-
-
-.. only:: html
-
- .. rst-class:: sphx-glr-signature
-
- `Gallery generated by Sphinx-Gallery `_
diff --git a/docs/source/examples/06-distributed-post/05-distributed_total_disp_with_operators_codeobj.pickle b/docs/source/examples/06-distributed-post/05-distributed_total_disp_with_operators_codeobj.pickle
deleted file mode 100644
index d3d2405febb..00000000000
Binary files a/docs/source/examples/06-distributed-post/05-distributed_total_disp_with_operators_codeobj.pickle and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/images/graphviz-0b490f262404b99215a9bce9bcf28ed202fd712f.png b/docs/source/examples/06-distributed-post/images/graphviz-0b490f262404b99215a9bce9bcf28ed202fd712f.png
new file mode 100644
index 00000000000..0b53d9a6459
Binary files /dev/null and b/docs/source/examples/06-distributed-post/images/graphviz-0b490f262404b99215a9bce9bcf28ed202fd712f.png differ
diff --git a/docs/source/examples/06-distributed-post/images/graphviz-0b490f262404b99215a9bce9bcf28ed202fd712f.png.map b/docs/source/examples/06-distributed-post/images/graphviz-0b490f262404b99215a9bce9bcf28ed202fd712f.png.map
new file mode 100644
index 00000000000..fe5db591e8c
--- /dev/null
+++ b/docs/source/examples/06-distributed-post/images/graphviz-0b490f262404b99215a9bce9bcf28ed202fd712f.png.map
@@ -0,0 +1,2 @@
+
diff --git a/docs/source/examples/06-distributed-post/images/graphviz-6c5c6731afa3cb99496c9582a659820e66e59ad8.png b/docs/source/examples/06-distributed-post/images/graphviz-6c5c6731afa3cb99496c9582a659820e66e59ad8.png
new file mode 100644
index 00000000000..318e494d038
Binary files /dev/null and b/docs/source/examples/06-distributed-post/images/graphviz-6c5c6731afa3cb99496c9582a659820e66e59ad8.png differ
diff --git a/docs/source/examples/06-distributed-post/images/graphviz-6c5c6731afa3cb99496c9582a659820e66e59ad8.png.map b/docs/source/examples/06-distributed-post/images/graphviz-6c5c6731afa3cb99496c9582a659820e66e59ad8.png.map
new file mode 100644
index 00000000000..fe5db591e8c
--- /dev/null
+++ b/docs/source/examples/06-distributed-post/images/graphviz-6c5c6731afa3cb99496c9582a659820e66e59ad8.png.map
@@ -0,0 +1,2 @@
+
diff --git a/docs/source/examples/06-distributed-post/images/graphviz-6d76c45ee399e15a2be231294ffa4ba764e4c442.png b/docs/source/examples/06-distributed-post/images/graphviz-6d76c45ee399e15a2be231294ffa4ba764e4c442.png
new file mode 100644
index 00000000000..8a03883c23d
Binary files /dev/null and b/docs/source/examples/06-distributed-post/images/graphviz-6d76c45ee399e15a2be231294ffa4ba764e4c442.png differ
diff --git a/docs/source/examples/06-distributed-post/images/graphviz-6d76c45ee399e15a2be231294ffa4ba764e4c442.png.map b/docs/source/examples/06-distributed-post/images/graphviz-6d76c45ee399e15a2be231294ffa4ba764e4c442.png.map
new file mode 100644
index 00000000000..fe5db591e8c
--- /dev/null
+++ b/docs/source/examples/06-distributed-post/images/graphviz-6d76c45ee399e15a2be231294ffa4ba764e4c442.png.map
@@ -0,0 +1,2 @@
+
diff --git a/docs/source/examples/06-distributed-post/images/graphviz-8a6d6449ecd7de654cd0b1382ac1a9d333037718.png b/docs/source/examples/06-distributed-post/images/graphviz-8a6d6449ecd7de654cd0b1382ac1a9d333037718.png
new file mode 100644
index 00000000000..229467e5d2d
Binary files /dev/null and b/docs/source/examples/06-distributed-post/images/graphviz-8a6d6449ecd7de654cd0b1382ac1a9d333037718.png differ
diff --git a/docs/source/examples/06-distributed-post/images/graphviz-8a6d6449ecd7de654cd0b1382ac1a9d333037718.png.map b/docs/source/examples/06-distributed-post/images/graphviz-8a6d6449ecd7de654cd0b1382ac1a9d333037718.png.map
new file mode 100644
index 00000000000..fe5db591e8c
--- /dev/null
+++ b/docs/source/examples/06-distributed-post/images/graphviz-8a6d6449ecd7de654cd0b1382ac1a9d333037718.png.map
@@ -0,0 +1,2 @@
+
diff --git a/docs/source/examples/06-distributed-post/images/graphviz-d3a2fbe72465ba55879181d1c4ceccf7a25d2c7a.png b/docs/source/examples/06-distributed-post/images/graphviz-d3a2fbe72465ba55879181d1c4ceccf7a25d2c7a.png
new file mode 100644
index 00000000000..0b53d9a6459
Binary files /dev/null and b/docs/source/examples/06-distributed-post/images/graphviz-d3a2fbe72465ba55879181d1c4ceccf7a25d2c7a.png differ
diff --git a/docs/source/examples/06-distributed-post/images/graphviz-d3a2fbe72465ba55879181d1c4ceccf7a25d2c7a.png.map b/docs/source/examples/06-distributed-post/images/graphviz-d3a2fbe72465ba55879181d1c4ceccf7a25d2c7a.png.map
new file mode 100644
index 00000000000..fe5db591e8c
--- /dev/null
+++ b/docs/source/examples/06-distributed-post/images/graphviz-d3a2fbe72465ba55879181d1c4ceccf7a25d2c7a.png.map
@@ -0,0 +1,2 @@
+
diff --git a/docs/source/examples/06-distributed-post/images/sphx_glr_01-distributed_workflows_on_remote_001.png b/docs/source/examples/06-distributed-post/images/sphx_glr_01-distributed_workflows_on_remote_001.png
new file mode 100644
index 00000000000..c602ffd7ee6
Binary files /dev/null and b/docs/source/examples/06-distributed-post/images/sphx_glr_01-distributed_workflows_on_remote_001.png differ
diff --git a/docs/source/examples/06-distributed-post/images/sphx_glr_02-distributed-msup_expansion_001.png b/docs/source/examples/06-distributed-post/images/sphx_glr_02-distributed-msup_expansion_001.png
new file mode 100644
index 00000000000..e151862cfdc
Binary files /dev/null and b/docs/source/examples/06-distributed-post/images/sphx_glr_02-distributed-msup_expansion_001.png differ
diff --git a/docs/source/examples/06-distributed-post/images/sphx_glr_02-distributed-msup_expansion_002.png b/docs/source/examples/06-distributed-post/images/sphx_glr_02-distributed-msup_expansion_002.png
new file mode 100644
index 00000000000..16a66991b60
Binary files /dev/null and b/docs/source/examples/06-distributed-post/images/sphx_glr_02-distributed-msup_expansion_002.png differ
diff --git a/docs/source/examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_steps_001.png b/docs/source/examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_steps_001.png
new file mode 100644
index 00000000000..e151862cfdc
Binary files /dev/null and b/docs/source/examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_steps_001.png differ
diff --git a/docs/source/examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_steps_002.png b/docs/source/examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_steps_002.png
new file mode 100644
index 00000000000..becc7dc356e
Binary files /dev/null and b/docs/source/examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_steps_002.png differ
diff --git a/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_01-distributed_workflows_on_remote_thumb.png b/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_01-distributed_workflows_on_remote_thumb.png
new file mode 100644
index 00000000000..8760d4e48e2
Binary files /dev/null and b/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_01-distributed_workflows_on_remote_thumb.png differ
diff --git a/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_02-distributed-msup_expansion_thumb.png b/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_02-distributed-msup_expansion_thumb.png
new file mode 100644
index 00000000000..7f40444f283
Binary files /dev/null and b/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_02-distributed-msup_expansion_thumb.png differ
diff --git a/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_02-distributed_workflows_on_remote_thumb.png b/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_02-distributed_workflows_on_remote_thumb.png
deleted file mode 100644
index c5c7068fdc3..00000000000
Binary files a/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_02-distributed_workflows_on_remote_thumb.png and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_03-distributed-msup_expansion_steps_thumb.png b/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_03-distributed-msup_expansion_steps_thumb.png
new file mode 100644
index 00000000000..7f40444f283
Binary files /dev/null and b/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_03-distributed-msup_expansion_steps_thumb.png differ
diff --git a/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_03-distributed-msup_expansion_thumb.png b/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_03-distributed-msup_expansion_thumb.png
deleted file mode 100644
index d46cbf832e6..00000000000
Binary files a/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_03-distributed-msup_expansion_thumb.png and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_04-distributed-msup_expansion_steps_thumb.png b/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_04-distributed-msup_expansion_steps_thumb.png
deleted file mode 100644
index d46cbf832e6..00000000000
Binary files a/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_04-distributed-msup_expansion_steps_thumb.png and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_04-distributed_total_disp_with_operator_thumb.png b/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_04-distributed_total_disp_with_operator_thumb.png
new file mode 100644
index 00000000000..b06c4e6a177
Binary files /dev/null and b/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_04-distributed_total_disp_with_operator_thumb.png differ
diff --git a/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_01-distributed_delegate_to_server_thumb.png b/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_04-distributed_total_disp_with_operators_thumb.png
similarity index 100%
rename from docs/source/examples/06-distributed-post/images/thumb/sphx_glr_01-distributed_delegate_to_server_thumb.png
rename to docs/source/examples/06-distributed-post/images/thumb/sphx_glr_04-distributed_total_disp_with_operators_thumb.png
diff --git a/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_05-distributed_total_disp_with_operators_thumb.png b/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_05-distributed_total_disp_with_operators_thumb.png
deleted file mode 100644
index 8a5fed589d1..00000000000
Binary files a/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_05-distributed_total_disp_with_operators_thumb.png and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/sg_execution_times.rst b/docs/source/examples/06-distributed-post/sg_execution_times.rst
index 2daaaf744e0..4ac04379a1c 100644
--- a/docs/source/examples/06-distributed-post/sg_execution_times.rst
+++ b/docs/source/examples/06-distributed-post/sg_execution_times.rst
@@ -5,18 +5,14 @@
Computation times
=================
-**00:00.913** total execution time for **examples_06-distributed-post** files:
+**00:05.209** total execution time for **examples_06-distributed-post** files:
-+--------------------------------------------------------------------------------------------------------------------------------------------+-----------+--------+
-| :ref:`sphx_glr_examples_06-distributed-post_01-distributed_delegate_to_server.py` (``01-distributed_delegate_to_server.py``) | 00:00.913 | 0.0 MB |
-+--------------------------------------------------------------------------------------------------------------------------------------------+-----------+--------+
-| :ref:`sphx_glr_examples_06-distributed-post_00-distributed_total_disp.py` (``00-distributed_total_disp.py``) | 00:00.000 | 0.0 MB |
-+--------------------------------------------------------------------------------------------------------------------------------------------+-----------+--------+
-| :ref:`sphx_glr_examples_06-distributed-post_02-distributed_workflows_on_remote.py` (``02-distributed_workflows_on_remote.py``) | 00:00.000 | 0.0 MB |
-+--------------------------------------------------------------------------------------------------------------------------------------------+-----------+--------+
-| :ref:`sphx_glr_examples_06-distributed-post_03-distributed-msup_expansion.py` (``03-distributed-msup_expansion.py``) | 00:00.000 | 0.0 MB |
-+--------------------------------------------------------------------------------------------------------------------------------------------+-----------+--------+
-| :ref:`sphx_glr_examples_06-distributed-post_04-distributed-msup_expansion_steps.py` (``04-distributed-msup_expansion_steps.py``) | 00:00.000 | 0.0 MB |
-+--------------------------------------------------------------------------------------------------------------------------------------------+-----------+--------+
-| :ref:`sphx_glr_examples_06-distributed-post_05-distributed_total_disp_with_operators.py` (``05-distributed_total_disp_with_operators.py``) | 00:00.000 | 0.0 MB |
-+--------------------------------------------------------------------------------------------------------------------------------------------+-----------+--------+
++----------------------------------------------------------------------------------------------------------------------------------+-----------+--------+
+| :ref:`sphx_glr_examples_06-distributed-post_01-distributed_workflows_on_remote.py` (``01-distributed_workflows_on_remote.py``) | 00:05.209 | 0.0 MB |
++----------------------------------------------------------------------------------------------------------------------------------+-----------+--------+
+| :ref:`sphx_glr_examples_06-distributed-post_00-distributed_total_disp.py` (``00-distributed_total_disp.py``) | 00:00.000 | 0.0 MB |
++----------------------------------------------------------------------------------------------------------------------------------+-----------+--------+
+| :ref:`sphx_glr_examples_06-distributed-post_02-distributed-msup_expansion.py` (``02-distributed-msup_expansion.py``) | 00:00.000 | 0.0 MB |
++----------------------------------------------------------------------------------------------------------------------------------+-----------+--------+
+| :ref:`sphx_glr_examples_06-distributed-post_03-distributed-msup_expansion_steps.py` (``03-distributed-msup_expansion_steps.py``) | 00:00.000 | 0.0 MB |
++----------------------------------------------------------------------------------------------------------------------------------+-----------+--------+
diff --git a/examples/06-distributed-post/00-distributed_total_disp.py b/examples/06-distributed-post/00-distributed_total_disp.py
index fc3c7d8ba1e..36ad0aba980 100644
--- a/examples/06-distributed-post/00-distributed_total_disp.py
+++ b/examples/06-distributed-post/00-distributed_total_disp.py
@@ -1,12 +1,15 @@
"""
.. _ref_distributed_total_disp:
-Distributed post without client connection to remote processes
+Post processing of displacement on distributed processes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This example shows how distributed files can be read and post processed
-on distributed processes. After remote post processing, results a merged
-on the local process.
+To help understand this example the following diagram is provided. It shows
+the operator chain used to compute the final result.
+
+.. image:: 00-operator-dep.svg
+ :align: center
+ :width: 400
"""
###############################################################################
@@ -16,30 +19,14 @@
from ansys.dpf.core import examples
from ansys.dpf.core import operators as ops
-###############################################################################
-# Create the template workflow of total displacement
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# Create displacement and norm operators
-
-template_workflow = dpf.Workflow()
-displacement = ops.result.displacement()
-norm = ops.math.norm_fc(displacement)
-
-###############################################################################
-# Add the operators to the template workflow and name its inputs and outputs
-# Once workflow's inputs and outputs are named, they can be connected later on
-template_workflow.add_operators([displacement, norm])
-template_workflow.set_input_name("data_sources", displacement.inputs.data_sources)
-template_workflow.set_output_name("out", norm.outputs.fields_container)
-
###############################################################################
# Configure the servers
# ~~~~~~~~~~~~~~~~~~~~~~
-# Make a list of ip addresses an port numbers on which dpf servers are
-# started. Workflows instances will be created on each of those servers to
+# Make a list of ip addresses and port numbers on which dpf servers are
+# started. Operator instances will be created on each of those servers to
# address each a different result file.
# In this example, we will post process an analysis distributed in 2 files,
-# we will consequently require 2 remote processes
+# we will consequently require 2 remote processes.
# To make this example easier, we will start local servers here,
# but we could get connected to any existing servers on the network.
@@ -60,35 +47,35 @@
dpf.upload_file_in_tmp_folder(files[1], server=remote_servers[1])]
###############################################################################
-# Send workflows on servers
+# Create the operators on the servers
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
-# Here we create new instances on the server by copies of the template workflow
-# We also connect the data sources to those workflows
-remote_workflows = []
+# On each server we create two new operators for 'displacement' and 'norm'
+# computations and define their data sources. The displacement operator
+# receives data from the data file in its respective server. And the norm
+# operator, being chained to the displacement operator, receives input from the
+# output of this one.
+remote_operators = []
for i, server in enumerate(remote_servers):
- remote_workflows.append(template_workflow.create_on_other_server(server))
- ds = dpf.DataSources(server_file_paths[i])
- remote_workflows[i].connect("data_sources", ds)
+ displacement = ops.result.displacement(server=server)
+ norm = ops.math.norm_fc(displacement, server=server)
+ remote_operators.append(norm)
+ ds = dpf.DataSources(server_file_paths[i], server=server)
+ displacement.inputs.data_sources(ds)
###############################################################################
-# Create a local workflow able to merge the results
+# Create a merge_fields_containers operator able to merge the results
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-local_workflow = dpf.Workflow()
merge = ops.utility.merge_fields_containers()
-local_workflow.add_operator(merge)
-local_workflow.set_input_name("in0", merge, 0)
-local_workflow.set_input_name("in1", merge, 1)
-local_workflow.set_output_name("merged", merge.outputs.merged_fields_container)
###############################################################################
-# Connect the workflows together and get the output
+# Connect the operators together and get the output
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
for i, server in enumerate(remote_servers):
- local_workflow.connect_with(remote_workflows[i], ("out", "in" + str(i)))
+ merge.connect(i, remote_operators[i], 0)
-fc = local_workflow.get_output("merged", dpf.types.fields_container)
+fc = merge.get_output(0, dpf.types.fields_container)
print(fc)
print(fc[0].min().data)
print(fc[0].max().data)
diff --git a/examples/06-distributed-post/01-distributed_delegate_to_server.py b/examples/06-distributed-post/01-distributed_delegate_to_server.py
deleted file mode 100644
index 5b8595d191d..00000000000
--- a/examples/06-distributed-post/01-distributed_delegate_to_server.py
+++ /dev/null
@@ -1,101 +0,0 @@
-"""
-.. _ref_distributed_delegate_to_server:
-
-Compute total displacement from distributed files with distributed post
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This example shows how distributed files can be read and post processed
-on distributed processes. After remote post processing of total displacement,
-results a merged on the local process. In this example, the client is only
-connected to the coordinator server. Connections to remote processes are only
-done implicitly through the coordinator.
-
-"""
-
-###############################################################################
-# Import dpf module and its examples files
-
-from ansys.dpf import core as dpf
-from ansys.dpf.core import examples
-from ansys.dpf.core import operators as ops
-
-###############################################################################
-# Create the template workflow of total displacement
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-template_workflow = dpf.Workflow()
-displacement = ops.result.displacement()
-norm = ops.math.norm_fc(displacement)
-
-###############################################################################
-# Add the operators to the template workflow and name its inputs and outputs
-# Once workflow's inputs and outputs are named, they can be connected later on
-template_workflow.add_operators([displacement, norm])
-template_workflow.set_input_name("data_sources", displacement.inputs.data_sources)
-template_workflow.set_output_name("out", norm.outputs.fields_container)
-
-###############################################################################
-# Configure the servers
-# ~~~~~~~~~~~~~~~~~~~~~~
-# Make a list of ip addresses an port numbers on which dpf servers are
-# started. Workflows instances will be created on each of those servers to
-# address each a different result file.
-# In this example, we will post process an analysis distributed in 2 files,
-# we will consequently require 2 remote processes
-# To make this example easier, we will start local servers here,
-# but we could get connected to any existing servers on the network.
-# We only keep instances of remote_servers to start and keep those servers
-# awaik. The purpose of this example is to show that we can do distributed
-# post processing without opening channels between this client and
-# the remote processes
-
-remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
-ips = [remote_server.ip for remote_server in remote_servers]
-ports = [remote_server.port for remote_server in remote_servers]
-
-###############################################################################
-# Print the ips and ports
-print("ips:", ips)
-print("ports:", ports)
-
-###############################################################################
-# Here we show how we could send files in temporary directory if we were not
-# in shared memory
-files = examples.download_distributed_files()
-server_file_paths = [dpf.upload_file_in_tmp_folder(files[0], server=remote_servers[0]),
- dpf.upload_file_in_tmp_folder(files[1], server=remote_servers[1])]
-
-###############################################################################
-# Send workflows on servers
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~
-# Here we create new instances on the server by copies of the template workflow
-# We also connect the data sources to those workflows.
-remote_workflows = []
-for i, ip in enumerate(ips):
- remote_workflows.append(template_workflow.create_on_other_server(ip=ip, port=ports[i]))
- ds = dpf.DataSources(server_file_paths[i])
- remote_workflows[i].connect("data_sources", ds)
-
-###############################################################################
-# Create a local workflow able to merge the results
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-local_workflow = dpf.Workflow()
-merge = ops.utility.merge_fields_containers()
-local_workflow.add_operator(merge)
-local_workflow.set_input_name("in0", merge, 0)
-local_workflow.set_input_name("in1", merge, 1)
-local_workflow.set_output_name("merged", merge.outputs.merged_fields_container)
-
-###############################################################################
-# Connect the workflows together and get the output
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-for i, ip in enumerate(ips):
- local_workflow.connect_with(remote_workflows[i], ("out", "in" + str(i)))
-
-fc = local_workflow.get_output("merged", dpf.types.fields_container)
-print(fc)
-print(fc[0].min().data)
-print(fc[0].max().data)
-
-dpf.server.shutdown_all_session_servers()
diff --git a/examples/06-distributed-post/01-distributed_workflows_on_remote.py b/examples/06-distributed-post/01-distributed_workflows_on_remote.py
new file mode 100644
index 00000000000..e52e00f0283
--- /dev/null
+++ b/examples/06-distributed-post/01-distributed_workflows_on_remote.py
@@ -0,0 +1,74 @@
+"""
+.. _ref_distributed_workflows_on_remote:
+
+Create custom workflow on distributed processes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+This example shows how distributed files can be read and post processed
+on distributed processes. After remote post processing,
+results are merged on the local process. In this example, different operator
+sequences are directly created on different servers. These operators are then
+connected together without having to care that they are on remote processes.
+
+.. image:: 01-operator-dep.svg
+ :align: center
+ :width: 400
+"""
+###############################################################################
+# Import dpf module and its examples files
+
+from ansys.dpf import core as dpf
+from ansys.dpf.core import examples
+from ansys.dpf.core import operators as ops
+
+###############################################################################
+# Configure the servers
+# ~~~~~~~~~~~~~~~~~~~~~~
+# To make this example easier, we will start local servers here,
+# but we could get connected to any existing servers on the network.
+
+remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
+
+###############################################################################
+# Here we show how we could send files in temporary directory if we were not
+# in shared memory
+
+files = examples.download_distributed_files()
+server_file_paths = [dpf.upload_file_in_tmp_folder(files[0], server=remote_servers[0]),
+ dpf.upload_file_in_tmp_folder(files[1], server=remote_servers[1])]
+
+###############################################################################
+# First operator chain.
+
+remote_operators = []
+
+stress1 = ops.result.stress(server=remote_servers[0])
+remote_operators.append(stress1)
+ds = dpf.DataSources(server_file_paths[0], server=remote_servers[0])
+stress1.inputs.data_sources(ds)
+
+###############################################################################
+# Second operator chain.
+
+stress2 = ops.result.stress(server=remote_servers[1])
+mul = stress2 * 2.0
+remote_operators.append(mul)
+ds = dpf.DataSources(server_file_paths[1], server=remote_servers[1])
+stress2.inputs.data_sources(ds)
+
+###############################################################################
+# Local merge operator.
+
+merge = ops.utility.merge_fields_containers()
+
+###############################################################################
+# Connect the operator chains together and get the output
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+nodal = ops.averaging.to_nodal_fc(merge)
+
+merge.connect(0, remote_operators[0], 0)
+merge.connect(1, remote_operators[1], 0)
+
+fc = nodal.get_output(0, dpf.types.fields_container)
+print(fc[0])
+fc[0].meshed_region.plot(fc[0])
diff --git a/examples/06-distributed-post/02-distributed-msup_expansion.py b/examples/06-distributed-post/02-distributed-msup_expansion.py
new file mode 100644
index 00000000000..d3f1cba0c90
--- /dev/null
+++ b/examples/06-distributed-post/02-distributed-msup_expansion.py
@@ -0,0 +1,102 @@
+"""
+.. _ref_distributed_msup:
+
+Distributed modal superposition
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+This example shows how distributed files can be read and expanded
+on distributed processes. The modal basis (2 distributed files) is read
+on 2 remote servers and the modal response reading and the expansion is
+done on a third server.
+
+To help understand this example the following diagram is provided. It shows
+the operator chain used to compute the final result.
+
+.. image:: 02-operator-dep.svg
+ :align: center
+ :width: 800
+"""
+
+###############################################################################
+# Import dpf module and its examples files.
+
+from ansys.dpf import core as dpf
+from ansys.dpf.core import examples
+from ansys.dpf.core import operators as ops
+
+###############################################################################
+# Configure the servers
+# ~~~~~~~~~~~~~~~~~~~~~~
+# Make a list of ip addresses and port numbers on which dpf servers are
+# started. Operator instances will be created on each of those servers to
+# address each a different result file.
+# In this example, we will post process an analysis distributed in 2 files,
+# we will consequently require 2 remote processes.
+# To make this example easier, we will start local servers here,
+# but we could get connected to any existing servers on the network.
+
+remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
+ips = [remote_server.ip for remote_server in remote_servers]
+ports = [remote_server.port for remote_server in remote_servers]
+
+###############################################################################
+# Print the ips and ports.
+print("ips:", ips)
+print("ports:", ports)
+
+###############################################################################
+# Choose the file path.
+
+base_path = examples.distributed_msup_folder
+files = [base_path + r'/file0.mode', base_path + r'/file1.mode']
+files_aux = [base_path + r'/file0.rst', base_path + r'/file1.rst']
+
+###############################################################################
+# Create the operators on the servers
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~
+# On each server we create two new operators, one for 'displacement' computations
+# and a 'mesh_provider' operator and then define their data sources. The displacement
+# and mesh_provider operators receive data from their respective data files on each server.
+remote_displacement_operators = []
+remote_mesh_operators = []
+for i, server in enumerate(remote_servers):
+ displacement = ops.result.displacement(server=server)
+ mesh = ops.mesh.mesh_provider(server=server)
+ remote_displacement_operators.append(displacement)
+ remote_mesh_operators.append(mesh)
+ ds = dpf.DataSources(files[i], server=server)
+ ds.add_file_path(files_aux[i])
+ displacement.inputs.data_sources(ds)
+ mesh.inputs.data_sources(ds)
+
+###############################################################################
+# Create a local operators chain for expansion
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# In the following series of operators we merge the modal basis, the meshes, read
+# the modal response and expand the modal response with the modal basis.
+
+merge_fields = ops.utility.merge_fields_containers()
+merge_mesh = ops.utility.merge_meshes()
+
+ds = dpf.DataSources(base_path + r'/file_load_1.rfrq')
+response = ops.result.displacement(data_sources=ds)
+response.inputs.mesh(merge_mesh.outputs.merges_mesh)
+
+expansion = ops.math.modal_superposition(
+ solution_in_modal_space=response,
+ modal_basis=merge_fields
+)
+component = ops.logic.component_selector_fc(expansion, 1)
+
+###############################################################################
+# Connect the operator chains together and get the output
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+for i, server in enumerate(remote_servers):
+ merge_fields.connect(i, remote_displacement_operators[i], 0)
+ merge_mesh.connect(i, remote_mesh_operators[i], 0)
+
+fc = component.get_output(0, dpf.types.fields_container)
+merged_mesh = merge_mesh.get_output(0, dpf.types.meshed_region)
+
+merged_mesh.plot(fc.get_field_by_time_complex_ids(1, 0))
+merged_mesh.plot(fc.get_field_by_time_complex_ids(10, 0))
+print(fc)
diff --git a/examples/06-distributed-post/02-distributed_workflows_on_remote.py b/examples/06-distributed-post/02-distributed_workflows_on_remote.py
deleted file mode 100644
index f246f083d94..00000000000
--- a/examples/06-distributed-post/02-distributed_workflows_on_remote.py
+++ /dev/null
@@ -1,71 +0,0 @@
-"""
-.. _ref_distributed_workflows_on_remote:
-
-Connect workflows on different processes implicitly
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This example shows how distributed files can be read and post processed
-on distributed processes. After remote post processing,
-results a merged on the local process. In this example, different workflows are
-directly created on different servers. Those workflows are then connected
-together without having to care that they are on remote processes.
-
-"""
-###############################################################################
-# Import dpf module and its examples files
-
-from ansys.dpf import core as dpf
-from ansys.dpf.core import examples
-from ansys.dpf.core import operators as ops
-
-###############################################################################
-# Configure the servers
-# ~~~~~~~~~~~~~~~~~~~~~~
-# To make this example easier, we will start local servers here,
-# but we could get connected to any existing servers on the network.
-
-remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
-
-###############################################################################
-# Create template workflows on remote servers
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# For the purpose of this example, we will create 2 workflows computing
-# elemental nodal stresses on different servers. The second workflow will
-# multiply by 2.0 the stresses. A last workflow will merge the outputs
-
-files = examples.download_distributed_files()
-
-###############################################################################
-# first workflow S
-workflow1 = dpf.Workflow(server=remote_servers[0])
-model = dpf.Model(files[0], server=remote_servers[0])
-stress1 = model.results.stress()
-workflow1.add_operator(stress1)
-workflow1.set_output_name("out1", stress1.outputs.fields_container)
-
-###############################################################################
-# second workflow S*2.0
-workflow2 = dpf.Workflow(server=remote_servers[1])
-model = dpf.Model(files[1], server=remote_servers[1])
-stress2 = model.results.stress()
-mul = stress2 * 2.0
-workflow2.add_operator(mul)
-workflow2.set_output_name("out2", mul.outputs.fields_container)
-
-###############################################################################
-# third workflow merge
-local_workflow = dpf.Workflow()
-merge = ops.utility.merge_fields_containers()
-nodal = ops.averaging.to_nodal_fc(merge)
-local_workflow.add_operators([merge, nodal])
-local_workflow.set_input_name("in1", merge, 0)
-local_workflow.set_input_name("in2", merge, 1)
-local_workflow.set_output_name("merged", nodal.outputs.fields_container)
-
-###############################################################################
-# Connect the workflows together and get the output
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-local_workflow.connect_with(workflow1, ("out1", "in1"))
-local_workflow.connect_with(workflow2, ("out2", "in2"))
-
-fc = local_workflow.get_output("merged", dpf.types.fields_container)
-fc[0].meshed_region.plot(fc[0])
diff --git a/examples/06-distributed-post/03-distributed-msup_expansion.py b/examples/06-distributed-post/03-distributed-msup_expansion.py
deleted file mode 100644
index 62d8eb164b7..00000000000
--- a/examples/06-distributed-post/03-distributed-msup_expansion.py
+++ /dev/null
@@ -1,116 +0,0 @@
-"""
-.. _ref_distributed_msup:
-
-Distributed modal superposition
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This example shows how distributed files can be read and expanded
-on distributed processes. The modal basis (2 distributed files) is read
-on 2 remote servers and the modal response reading and the expansion is
-done on a third server.
-"""
-
-###############################################################################
-# Import dpf module and its examples files
-
-from ansys.dpf import core as dpf
-from ansys.dpf.core import examples
-from ansys.dpf.core import operators as ops
-
-###############################################################################
-# Create the template workflow
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# this workflow will provide the modal basis and the mesh for each domain
-
-template_workflow = dpf.Workflow()
-displacement = ops.result.displacement()
-mesh = ops.mesh.mesh_provider()
-
-###############################################################################
-# Add the operators to the template workflow and name its inputs and outputs
-# Once workflow's inputs and outputs are named, they can be connected later on
-template_workflow.add_operators([displacement])
-template_workflow.set_input_name("data_sources", displacement.inputs.data_sources)
-template_workflow.set_input_name("data_sources", mesh.inputs.data_sources)
-template_workflow.set_output_name("out", displacement.outputs.fields_container)
-template_workflow.set_output_name("outmesh", mesh.outputs.mesh)
-
-###############################################################################
-# Configure the servers
-# ~~~~~~~~~~~~~~~~~~~~~~
-# Make a list of ip addresses an port numbers on which dpf servers are
-# started. Workflows instances will be created on each of those servers to
-# address each a different result file.
-# In this example, we will post process an analysis distributed in 2 files,
-# we will consequently require 2 remote processes
-# To make this example easier, we will start local servers here,
-# but we could get connected to any existing servers on the network.
-
-remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
-ips = [remote_server.ip for remote_server in remote_servers]
-ports = [remote_server.port for remote_server in remote_servers]
-
-###############################################################################
-# Print the ips and ports
-print("ips:", ips)
-print("ports:", ports)
-
-###############################################################################
-# Choose the file path
-
-base_path = examples.distributed_msup_folder
-files = [base_path + r'/file0.mode', base_path + r'/file1.mode']
-files_aux = [base_path + r'/file0.rst', base_path + r'/file1.rst']
-
-###############################################################################
-# Send workflows on servers
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~
-# Here we create new instances on the server by copies of the template workflow
-# We also connect the data sources to those workflows
-remote_workflows = []
-for i, server in enumerate(remote_servers):
- remote_workflows.append(template_workflow.create_on_other_server(server))
- ds = dpf.DataSources(files[i])
- ds.add_file_path(files_aux[i])
- remote_workflows[i].connect("data_sources", ds)
-
-###############################################################################
-# Create a local workflow for expansion
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# In this workflow we merge the modal basis, the meshes, read the modal response
-# and expand the modal response with the modal basis
-
-local_workflow = dpf.Workflow()
-merge = ops.utility.merge_fields_containers()
-merge_mesh = ops.utility.merge_meshes()
-
-ds = dpf.DataSources(base_path + r'/file_load_1.rfrq')
-response = ops.result.displacement(data_sources=ds)
-response.inputs.mesh(merge_mesh.outputs.merges_mesh)
-
-expansion = ops.math.modal_superposition(solution_in_modal_space=response, modal_basis=merge)
-component = ops.logic.component_selector_fc(expansion, 1)
-
-local_workflow.add_operators([merge, response, expansion, merge_mesh, component])
-local_workflow.set_input_name("in0", merge, 0)
-local_workflow.set_input_name("in1", merge, 1)
-local_workflow.set_input_name("inmesh0", merge_mesh, 0)
-local_workflow.set_input_name("inmesh1", merge_mesh, 1)
-
-local_workflow.set_output_name("expanded", component.outputs.fields_container)
-local_workflow.set_output_name("mesh", merge_mesh.outputs.merges_mesh)
-
-###############################################################################
-# Connect the workflows together and get the output
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-for i, server in enumerate(remote_servers):
- local_workflow.connect_with(remote_workflows[i],
- {"out": "in" + str(i), "outmesh": "inmesh" + str(i)})
-
-fc = local_workflow.get_output("expanded", dpf.types.fields_container)
-merged_mesh = local_workflow.get_output("mesh", dpf.types.meshed_region)
-merged_mesh.plot(fc.get_field_by_time_complex_ids(1, 0))
-merged_mesh.plot(fc.get_field_by_time_complex_ids(10, 0))
-print(fc)
-
-dpf.server.shutdown_all_session_servers()
diff --git a/docs/source/examples/06-distributed-post/04-distributed-msup_expansion_steps.py b/examples/06-distributed-post/03-distributed-msup_expansion_steps.py
similarity index 53%
rename from docs/source/examples/06-distributed-post/04-distributed-msup_expansion_steps.py
rename to examples/06-distributed-post/03-distributed-msup_expansion_steps.py
index dce7bd8ac64..1fac648a2f8 100644
--- a/docs/source/examples/06-distributed-post/04-distributed-msup_expansion_steps.py
+++ b/examples/06-distributed-post/03-distributed-msup_expansion_steps.py
@@ -8,81 +8,73 @@
on 2 remote servers and the modal response (2 distributed files) reading and the expansion is
done on a third server.
+To help understand this example the following diagram is provided. It shows
+the operator chain used to compute the final result.
+
+.. image:: 03-operator-dep.svg
+ :align: center
+ :width: 800
"""
###############################################################################
-# Import dpf module and its examples files
+# Import dpf module and its examples files.
import os.path
from ansys.dpf import core as dpf
from ansys.dpf.core import examples
from ansys.dpf.core import operators as ops
-###############################################################################
-# Create the template workflow
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# this workflow will provide the modal basis and the mesh for each domain
-
-template_workflow = dpf.Workflow()
-displacement = ops.result.displacement()
-mesh = ops.mesh.mesh_provider()
-
-###############################################################################
-# Add the operators to the template workflow and name its inputs and outputs
-# Once workflow's inputs and outputs are named, they can be connected later on
-template_workflow.add_operators([displacement])
-template_workflow.set_input_name("data_sources", displacement.inputs.data_sources)
-template_workflow.set_input_name("data_sources", mesh.inputs.data_sources)
-template_workflow.set_output_name("out", displacement.outputs.fields_container)
-template_workflow.set_output_name("outmesh", mesh.outputs.mesh)
-
###############################################################################
# Configure the servers
# ~~~~~~~~~~~~~~~~~~~~~~
-# Make a list of ip addresses an port numbers on which dpf servers are
-# started. Workflows instances will be created on each of those servers to
+# Make a list of ip addresses and port numbers on which dpf servers are
+# started. Operator instances will be created on each of those servers to
# address each a different result file.
# In this example, we will post process an analysis distributed in 2 files,
# we will consequently require 2 remote processes
# To make this example easier, we will start local servers here,
# but we could get connected to any existing servers on the network.
-
remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
ips = [remote_server.ip for remote_server in remote_servers]
ports = [remote_server.port for remote_server in remote_servers]
###############################################################################
-# Print the ips and ports
+# Print the ips and ports.
print("ips:", ips)
print("ports:", ports)
###############################################################################
-# Choose the file path
+# Choose the file path.
base_path = examples.distributed_msup_folder
files = [os.path.join(base_path, "file0.mode"), os.path.join(base_path, "file1.mode")]
files_aux = [os.path.join(base_path, "file0.rst"), os.path.join(base_path, "file1.rst")]
###############################################################################
-# Send workflows on servers
+# Create the operators on the servers
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
-# Here we create new instances on the server by copies of the template workflow
-# We also connect the data sources to those workflows
-remote_workflows = []
+# On each server we create two new operators, one for 'displacement' computations
+# and a 'mesh_provider' operator, and then define their data sources. The displacement
+# and mesh_provider operators receive data from their respective data files on each server.
+remote_displacement_operators = []
+remote_mesh_operators = []
for i, server in enumerate(remote_servers):
- remote_workflows.append(template_workflow.create_on_other_server(server))
- ds = dpf.DataSources(files[i])
+ displacement = ops.result.displacement(server=server)
+ mesh = ops.mesh.mesh_provider(server=server)
+ remote_displacement_operators.append(displacement)
+ remote_mesh_operators.append(mesh)
+ ds = dpf.DataSources(files[i], server=server)
ds.add_file_path(files_aux[i])
- remote_workflows[i].connect("data_sources", ds)
+ displacement.inputs.data_sources(ds)
+ mesh.inputs.data_sources(ds)
###############################################################################
-# Create a local workflow for expansion
+# Create a local operators chain for expansion
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# In this workflow we merge the modal basis, the meshes, read the modal response
-# and expand the modal response with the modal basis
+# In the follwing series of operators we merge the modal basis, the meshes, read
+# the modal response and expand the modal response with the modal basis.
-local_workflow = dpf.Workflow()
-merge = ops.utility.merge_fields_containers()
+merge_fields = ops.utility.merge_fields_containers()
merge_mesh = ops.utility.merge_meshes()
ds = dpf.DataSources(os.path.join(base_path, "file_load_1.rfrq"))
@@ -102,29 +94,22 @@
merge_use_pass.inputs.fields_containers1(response)
merge_use_pass.inputs.fields_containers2(response2fc)
-expansion = ops.math.modal_superposition(solution_in_modal_space=merge_use_pass, modal_basis=merge)
+expansion = ops.math.modal_superposition(
+ solution_in_modal_space=merge_use_pass,
+ modal_basis=merge_fields
+ )
component = ops.logic.component_selector_fc(expansion, 1)
-local_workflow.add_operators([merge, merge_use_pass, expansion, merge_mesh, component])
-local_workflow.set_input_name("in0", merge, 0)
-local_workflow.set_input_name("in1", merge, 1)
-local_workflow.set_input_name("inmesh0", merge_mesh, 0)
-local_workflow.set_input_name("inmesh1", merge_mesh, 1)
-
-local_workflow.set_output_name("expanded", component.outputs.fields_container)
-local_workflow.set_output_name("mesh", merge_mesh.outputs.merges_mesh)
-
###############################################################################
-# Connect the workflows together and get the output
+# Connect the operator chains together and get the output
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
for i, server in enumerate(remote_servers):
- local_workflow.connect_with(remote_workflows[i],
- {"out": "in" + str(i), "outmesh": "inmesh" + str(i)})
+ merge_fields.connect(i, remote_displacement_operators[i], 0)
+ merge_mesh.connect(i, remote_mesh_operators[i], 0)
+
+fc = component.get_output(0, dpf.types.fields_container)
+merged_mesh = merge_mesh.get_output(0, dpf.types.meshed_region)
-fc = local_workflow.get_output("expanded", dpf.types.fields_container)
-merged_mesh = local_workflow.get_output("mesh", dpf.types.meshed_region)
merged_mesh.plot(fc.get_field_by_time_complex_ids(1, 0))
merged_mesh.plot(fc.get_field_by_time_complex_ids(20, 0))
print(fc)
-dpf.server.shutdown_all_session_servers()
diff --git a/examples/06-distributed-post/05-distributed_total_disp_with_operators.py b/examples/06-distributed-post/05-distributed_total_disp_with_operators.py
deleted file mode 100644
index 298404617b6..00000000000
--- a/examples/06-distributed-post/05-distributed_total_disp_with_operators.py
+++ /dev/null
@@ -1,78 +0,0 @@
-"""
-.. _ref_distributed_total_disp_op:
-
-Distributed post without client connection to remote processes with Operators
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This example shows how distributed files can be read and post processed
-on distributed processes. After remote post processing, results a merged
-on the local process.
-
-"""
-
-###############################################################################
-# Import dpf module and its examples files
-
-from ansys.dpf import core as dpf
-from ansys.dpf.core import examples
-from ansys.dpf.core import operators as ops
-
-###############################################################################
-# Create the template workflow of total displacement
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-###############################################################################
-# Configure the servers
-# ~~~~~~~~~~~~~~~~~~~~~~
-# Make a list of ip addresses an port numbers on which dpf servers are
-# started. Workflows instances will be created on each of those servers to
-# address each a different result file.
-# In this example, we will post process an analysis distributed in 2 files,
-# we will consequently require 2 remote processes
-# To make this example easier, we will start local servers here,
-# but we could get connected to any existing servers on the network.
-
-remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
-ips = [remote_server.ip for remote_server in remote_servers]
-ports = [remote_server.port for remote_server in remote_servers]
-
-###############################################################################
-# Print the ips and ports
-print("ips:", ips)
-print("ports:", ports)
-
-###############################################################################
-# Here we show how we could send files in temporary directory if we were not
-# in shared memory
-files = examples.download_distributed_files()
-server_file_paths = [dpf.upload_file_in_tmp_folder(files[0], server=remote_servers[0]),
- dpf.upload_file_in_tmp_folder(files[1], server=remote_servers[1])]
-
-###############################################################################
-# Send workflows on servers
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~
-# Here we create new instances on the server by copies of the template workflow
-# We also connect the data sources to those workflows
-remote_operators = []
-for i, server in enumerate(remote_servers):
- displacement = ops.result.displacement(server=server)
- norm = ops.math.norm_fc(displacement, server=server)
- remote_operators.append(norm)
- ds = dpf.DataSources(server_file_paths[i], server=server)
- displacement.inputs.data_sources(ds)
-
-###############################################################################
-# Create a local workflow able to merge the results
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-merge = ops.utility.merge_fields_containers()
-
-###############################################################################
-# Connect the workflows together and get the output
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-for i, server in enumerate(remote_servers):
- merge.connect(i, remote_operators[i], 0)
-
-fc = merge.get_output(0, dpf.types.fields_container)
-print(fc)
-print(fc[0].min().data)
-print(fc[0].max().data)