diff --git a/ansys/dpf/core/operators/serialization/__init__.py b/ansys/dpf/core/operators/serialization/__init__.py
index d092fbb42fc..cd613b0578c 100644
--- a/ansys/dpf/core/operators/serialization/__init__.py
+++ b/ansys/dpf/core/operators/serialization/__init__.py
@@ -14,6 +14,7 @@
from .vtk_export import vtk_export
from .vtk_to_fields import vtk_to_fields
from .migrate_file_to_vtk import migrate_file_to_vtk
+from .serialize_to_hdf5 import serialize_to_hdf5
from .workflow_import_json import workflow_import_json
from .workflow_export_json import workflow_export_json
from .vtu_export import vtu_export
diff --git a/docs/source/examples/06-distributed-post/00-distributed_total_disp.ipynb b/docs/source/examples/06-distributed-post/00-distributed_total_disp.ipynb
deleted file mode 100644
index e9d2d394154..00000000000
--- a/docs/source/examples/06-distributed-post/00-distributed_total_disp.ipynb
+++ /dev/null
@@ -1,169 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "%matplotlib inline"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "\n\n# Post processing of displacement on distributed processes\n\nTo help understand this example the following diagram is provided. It shows\nthe operator chain used to compute the final result.\n\n
\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Import dpf module and its examples files\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "from ansys.dpf import core as dpf\nfrom ansys.dpf.core import examples\nfrom ansys.dpf.core import operators as ops"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Configure the servers\nMake a list of ip addresses and port numbers on which dpf servers are\nstarted. Operator instances will be created on each of those servers to\naddress each a different result file.\nIn this example, we will post process an analysis distributed in 2 files,\nwe will consequently require 2 remote processes.\nTo make this example easier, we will start local servers here,\nbut we could get connected to any existing servers on the network.\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]\nips = [remote_server.ip for remote_server in remote_servers]\nports = [remote_server.port for remote_server in remote_servers]"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Print the ips and ports\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "print(\"ips:\", ips)\nprint(\"ports:\", ports)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Here we show how we could send files in temporary directory if we were not\nin shared memory\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "files = examples.download_distributed_files()\nserver_file_paths = [dpf.upload_file_in_tmp_folder(files[0], server=remote_servers[0]),\n dpf.upload_file_in_tmp_folder(files[1], server=remote_servers[1])]"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Create the operators on the servers\nOn each server we create two new operators for 'displacement' and 'norm'\ncomputations and define their data sources. The displacement operator\nreceives data from the data file in its respective server. And the norm\noperator, being chained to the displacement operator, receives input from the\noutput of this one.\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "remote_operators = []\nfor i, server in enumerate(remote_servers):\n displacement = ops.result.displacement(server=server)\n norm = ops.math.norm_fc(displacement, server=server)\n remote_operators.append(norm)\n ds = dpf.DataSources(server_file_paths[i], server=server)\n displacement.inputs.data_sources(ds)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Create a merge_fields_containers operator able to merge the results\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "merge = ops.utility.merge_fields_containers()"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Connect the operators together and get the output\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "for i, server in enumerate(remote_servers):\n merge.connect(i, remote_operators[i], 0)\n\nfc = merge.get_output(0, dpf.types.fields_container)\nprint(fc)\nprint(fc[0].min().data)\nprint(fc[0].max().data)"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.9.13"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
\ No newline at end of file
diff --git a/docs/source/examples/06-distributed-post/00-distributed_total_disp.py b/docs/source/examples/06-distributed-post/00-distributed_total_disp.py
deleted file mode 100644
index 49f6055799d..00000000000
--- a/docs/source/examples/06-distributed-post/00-distributed_total_disp.py
+++ /dev/null
@@ -1,81 +0,0 @@
-"""
-.. _ref_distributed_total_disp:
-
-Post processing of displacement on distributed processes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-To help understand this example the following diagram is provided. It shows
-the operator chain used to compute the final result.
-
-.. image:: 00-operator-dep.svg
- :align: center
- :width: 400
-"""
-
-###############################################################################
-# Import dpf module and its examples files
-
-from ansys.dpf import core as dpf
-from ansys.dpf.core import examples
-from ansys.dpf.core import operators as ops
-
-###############################################################################
-# Configure the servers
-# ~~~~~~~~~~~~~~~~~~~~~~
-# Make a list of ip addresses and port numbers on which dpf servers are
-# started. Operator instances will be created on each of those servers to
-# address each a different result file.
-# In this example, we will post process an analysis distributed in 2 files,
-# we will consequently require 2 remote processes.
-# To make this example easier, we will start local servers here,
-# but we could get connected to any existing servers on the network.
-
-remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
-ips = [remote_server.ip for remote_server in remote_servers]
-ports = [remote_server.port for remote_server in remote_servers]
-
-###############################################################################
-# Print the ips and ports
-print("ips:", ips)
-print("ports:", ports)
-
-###############################################################################
-# Here we show how we could send files in temporary directory if we were not
-# in shared memory
-files = examples.download_distributed_files()
-server_file_paths = [dpf.upload_file_in_tmp_folder(files[0], server=remote_servers[0]),
- dpf.upload_file_in_tmp_folder(files[1], server=remote_servers[1])]
-
-###############################################################################
-# Create the operators on the servers
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# On each server we create two new operators for 'displacement' and 'norm'
-# computations and define their data sources. The displacement operator
-# receives data from the data file in its respective server. And the norm
-# operator, being chained to the displacement operator, receives input from the
-# output of this one.
-remote_operators = []
-for i, server in enumerate(remote_servers):
- displacement = ops.result.displacement(server=server)
- norm = ops.math.norm_fc(displacement, server=server)
- remote_operators.append(norm)
- ds = dpf.DataSources(server_file_paths[i], server=server)
- displacement.inputs.data_sources(ds)
-
-###############################################################################
-# Create a merge_fields_containers operator able to merge the results
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-merge = ops.utility.merge_fields_containers()
-
-###############################################################################
-# Connect the operators together and get the output
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-for i, server in enumerate(remote_servers):
- merge.connect(i, remote_operators[i], 0)
-
-fc = merge.get_output(0, dpf.types.fields_container)
-print(fc)
-print(fc[0].min().data)
-print(fc[0].max().data)
diff --git a/docs/source/examples/06-distributed-post/00-distributed_total_disp.py.md5 b/docs/source/examples/06-distributed-post/00-distributed_total_disp.py.md5
deleted file mode 100644
index 75df16d1fc1..00000000000
--- a/docs/source/examples/06-distributed-post/00-distributed_total_disp.py.md5
+++ /dev/null
@@ -1 +0,0 @@
-c7db4566a3ddd7b357d5277698504001
\ No newline at end of file
diff --git a/docs/source/examples/06-distributed-post/00-distributed_total_disp.rst b/docs/source/examples/06-distributed-post/00-distributed_total_disp.rst
deleted file mode 100644
index 326e07a8141..00000000000
--- a/docs/source/examples/06-distributed-post/00-distributed_total_disp.rst
+++ /dev/null
@@ -1,247 +0,0 @@
-
-.. DO NOT EDIT.
-.. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY.
-.. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE:
-.. "examples\06-distributed-post\00-distributed_total_disp.py"
-.. LINE NUMBERS ARE GIVEN BELOW.
-
-.. only:: html
-
- .. note::
- :class: sphx-glr-download-link-note
-
- Click :ref:`here `
- to download the full example code
-
-.. rst-class:: sphx-glr-example-title
-
-.. _sphx_glr_examples_06-distributed-post_00-distributed_total_disp.py:
-
-
-.. _ref_distributed_total_disp:
-
-Post processing of displacement on distributed processes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-To help understand this example the following diagram is provided. It shows
-the operator chain used to compute the final result.
-
-.. image:: 00-operator-dep.svg
- :align: center
- :width: 400
-
-.. GENERATED FROM PYTHON SOURCE LINES 16-17
-
-Import dpf module and its examples files
-
-.. GENERATED FROM PYTHON SOURCE LINES 17-22
-
-.. code-block:: default
-
-
- from ansys.dpf import core as dpf
- from ansys.dpf.core import examples
- from ansys.dpf.core import operators as ops
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 23-32
-
-Configure the servers
-~~~~~~~~~~~~~~~~~~~~~~
-Make a list of ip addresses and port numbers on which dpf servers are
-started. Operator instances will be created on each of those servers to
-address each a different result file.
-In this example, we will post process an analysis distributed in 2 files,
-we will consequently require 2 remote processes.
-To make this example easier, we will start local servers here,
-but we could get connected to any existing servers on the network.
-
-.. GENERATED FROM PYTHON SOURCE LINES 32-37
-
-.. code-block:: default
-
-
- remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
- ips = [remote_server.ip for remote_server in remote_servers]
- ports = [remote_server.port for remote_server in remote_servers]
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 38-39
-
-Print the ips and ports
-
-.. GENERATED FROM PYTHON SOURCE LINES 39-42
-
-.. code-block:: default
-
- print("ips:", ips)
- print("ports:", ports)
-
-
-
-
-
-.. rst-class:: sphx-glr-script-out
-
- Out:
-
- .. code-block:: none
-
- ips: ['127.0.0.1', '127.0.0.1']
- ports: [50057, 50058]
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 43-45
-
-Here we show how we could send files in temporary directory if we were not
-in shared memory
-
-.. GENERATED FROM PYTHON SOURCE LINES 45-49
-
-.. code-block:: default
-
- files = examples.download_distributed_files()
- server_file_paths = [dpf.upload_file_in_tmp_folder(files[0], server=remote_servers[0]),
- dpf.upload_file_in_tmp_folder(files[1], server=remote_servers[1])]
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 50-57
-
-Create the operators on the servers
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-On each server we create two new operators for 'displacement' and 'norm'
-computations and define their data sources. The displacement operator
-receives data from the data file in its respective server. And the norm
-operator, being chained to the displacement operator, receives input from the
-output of this one.
-
-.. GENERATED FROM PYTHON SOURCE LINES 57-65
-
-.. code-block:: default
-
- remote_operators = []
- for i, server in enumerate(remote_servers):
- displacement = ops.result.displacement(server=server)
- norm = ops.math.norm_fc(displacement, server=server)
- remote_operators.append(norm)
- ds = dpf.DataSources(server_file_paths[i], server=server)
- displacement.inputs.data_sources(ds)
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 66-68
-
-Create a merge_fields_containers operator able to merge the results
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. GENERATED FROM PYTHON SOURCE LINES 68-71
-
-.. code-block:: default
-
-
- merge = ops.utility.merge_fields_containers()
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 72-74
-
-Connect the operators together and get the output
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. GENERATED FROM PYTHON SOURCE LINES 74-82
-
-.. code-block:: default
-
-
- for i, server in enumerate(remote_servers):
- merge.connect(i, remote_operators[i], 0)
-
- fc = merge.get_output(0, dpf.types.fields_container)
- print(fc)
- print(fc[0].min().data)
- print(fc[0].max().data)
-
-
-
-
-.. rst-class:: sphx-glr-script-out
-
- Out:
-
- .. code-block:: none
-
- DPF Fields Container
- with 1 field(s)
- defined on labels: time
-
- with:
- - field 0 {time: 1} with Nodal location, 1 components and 432 entities.
-
- [0.]
- [10.03242272]
-
-
-
-
-
-.. rst-class:: sphx-glr-timing
-
- **Total running time of the script:** ( 0 minutes 0.711 seconds)
-
-
-.. _sphx_glr_download_examples_06-distributed-post_00-distributed_total_disp.py:
-
-
-.. only :: html
-
- .. container:: sphx-glr-footer
- :class: sphx-glr-footer-example
-
-
-
- .. container:: sphx-glr-download sphx-glr-download-python
-
- :download:`Download Python source code: 00-distributed_total_disp.py <00-distributed_total_disp.py>`
-
-
-
- .. container:: sphx-glr-download sphx-glr-download-jupyter
-
- :download:`Download Jupyter notebook: 00-distributed_total_disp.ipynb <00-distributed_total_disp.ipynb>`
-
-
-.. only:: html
-
- .. rst-class:: sphx-glr-signature
-
- `Gallery generated by Sphinx-Gallery `_
diff --git a/docs/source/examples/06-distributed-post/00-distributed_total_disp_codeobj.pickle b/docs/source/examples/06-distributed-post/00-distributed_total_disp_codeobj.pickle
deleted file mode 100644
index 0f2c875d000..00000000000
Binary files a/docs/source/examples/06-distributed-post/00-distributed_total_disp_codeobj.pickle and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/00-operator-dep.dot b/docs/source/examples/06-distributed-post/00-operator-dep.dot
deleted file mode 100644
index a99365e8260..00000000000
--- a/docs/source/examples/06-distributed-post/00-operator-dep.dot
+++ /dev/null
@@ -1,36 +0,0 @@
-digraph foo {
- graph [pad="0", nodesep="0.3", ranksep="0.3"]
- node [shape=box, style=filled, fillcolor="#ffcc00", margin="0"];
- rankdir=LR;
- splines=line;
-
- disp01 [label="displacement"];
- disp02 [label="displacement"];
- norm01 [label="norm"];
- norm02 [label="norm"];
-
- subgraph cluster_1 {
- ds01 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
-
- ds01 -> disp01 [style=dashed];
- disp01 -> norm01;
-
- label="Server 1";
- style=filled;
- fillcolor=lightgrey;
- }
-
- subgraph cluster_2 {
- ds02 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
-
- ds02 -> disp02 [style=dashed];
- disp02 -> norm02;
-
- label="Server 2";
- style=filled;
- fillcolor=lightgrey;
- }
-
- norm01 -> "merge";
- norm02 -> "merge";
-}
diff --git a/docs/source/examples/06-distributed-post/00-operator-dep.svg b/docs/source/examples/06-distributed-post/00-operator-dep.svg
deleted file mode 100644
index f05e0868fc2..00000000000
--- a/docs/source/examples/06-distributed-post/00-operator-dep.svg
+++ /dev/null
@@ -1,101 +0,0 @@
-
-
-
-
-
diff --git a/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote.ipynb b/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote.ipynb
deleted file mode 100644
index a0ec25fd289..00000000000
--- a/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote.ipynb
+++ /dev/null
@@ -1,169 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "%matplotlib inline"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "\n\n# Create custom workflow on distributed processes\nThis example shows how distributed files can be read and post processed\non distributed processes. After remote post processing,\nresults are merged on the local process. In this example, different operator\nsequences are directly created on different servers. These operators are then\nconnected together without having to care that they are on remote processes.\n\n
\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Import dpf module and its examples files\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "from ansys.dpf import core as dpf\nfrom ansys.dpf.core import examples\nfrom ansys.dpf.core import operators as ops"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Configure the servers\nTo make this example easier, we will start local servers here,\nbut we could get connected to any existing servers on the network.\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Here we show how we could send files in temporary directory if we were not\nin shared memory\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "files = examples.download_distributed_files()\nserver_file_paths = [dpf.upload_file_in_tmp_folder(files[0], server=remote_servers[0]),\n dpf.upload_file_in_tmp_folder(files[1], server=remote_servers[1])]"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "First operator chain.\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "remote_operators = []\n\nstress1 = ops.result.stress(server=remote_servers[0])\nremote_operators.append(stress1)\nds = dpf.DataSources(server_file_paths[0], server=remote_servers[0])\nstress1.inputs.data_sources(ds)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Second operator chain.\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "stress2 = ops.result.stress(server=remote_servers[1])\nmul = stress2 * 2.0\nremote_operators.append(mul)\nds = dpf.DataSources(server_file_paths[1], server=remote_servers[1])\nstress2.inputs.data_sources(ds)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Local merge operator.\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "merge = ops.utility.merge_fields_containers()"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Connect the operator chains together and get the output\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "nodal = ops.averaging.to_nodal_fc(merge)\n\nmerge.connect(0, remote_operators[0], 0)\nmerge.connect(1, remote_operators[1], 0)\n\nfc = nodal.get_output(0, dpf.types.fields_container)\nprint(fc[0])\nfc[0].meshed_region.plot(fc[0])"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.9.13"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
\ No newline at end of file
diff --git a/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote.py b/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote.py
deleted file mode 100644
index aef10f94549..00000000000
--- a/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote.py
+++ /dev/null
@@ -1,74 +0,0 @@
-"""
-.. _ref_distributed_workflows_on_remote:
-
-Create custom workflow on distributed processes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This example shows how distributed files can be read and post processed
-on distributed processes. After remote post processing,
-results are merged on the local process. In this example, different operator
-sequences are directly created on different servers. These operators are then
-connected together without having to care that they are on remote processes.
-
-.. image:: 01-operator-dep.svg
- :align: center
- :width: 400
-"""
-###############################################################################
-# Import dpf module and its examples files
-
-from ansys.dpf import core as dpf
-from ansys.dpf.core import examples
-from ansys.dpf.core import operators as ops
-
-###############################################################################
-# Configure the servers
-# ~~~~~~~~~~~~~~~~~~~~~
-# To make this example easier, we will start local servers here,
-# but we could get connected to any existing servers on the network.
-
-remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
-
-###############################################################################
-# Here we show how we could send files in temporary directory if we were not
-# in shared memory
-
-files = examples.download_distributed_files()
-server_file_paths = [dpf.upload_file_in_tmp_folder(files[0], server=remote_servers[0]),
- dpf.upload_file_in_tmp_folder(files[1], server=remote_servers[1])]
-
-###############################################################################
-# First operator chain.
-
-remote_operators = []
-
-stress1 = ops.result.stress(server=remote_servers[0])
-remote_operators.append(stress1)
-ds = dpf.DataSources(server_file_paths[0], server=remote_servers[0])
-stress1.inputs.data_sources(ds)
-
-###############################################################################
-# Second operator chain.
-
-stress2 = ops.result.stress(server=remote_servers[1])
-mul = stress2 * 2.0
-remote_operators.append(mul)
-ds = dpf.DataSources(server_file_paths[1], server=remote_servers[1])
-stress2.inputs.data_sources(ds)
-
-###############################################################################
-# Local merge operator.
-
-merge = ops.utility.merge_fields_containers()
-
-###############################################################################
-# Connect the operator chains together and get the output
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-nodal = ops.averaging.to_nodal_fc(merge)
-
-merge.connect(0, remote_operators[0], 0)
-merge.connect(1, remote_operators[1], 0)
-
-fc = nodal.get_output(0, dpf.types.fields_container)
-print(fc[0])
-fc[0].meshed_region.plot(fc[0])
diff --git a/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote.py.md5 b/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote.py.md5
deleted file mode 100644
index a34a8b54e1d..00000000000
--- a/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote.py.md5
+++ /dev/null
@@ -1 +0,0 @@
-fe17cd1215c2b1ca07b7355552feed3a
\ No newline at end of file
diff --git a/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote.rst b/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote.rst
deleted file mode 100644
index f9fc58d87cb..00000000000
--- a/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote.rst
+++ /dev/null
@@ -1,234 +0,0 @@
-
-.. DO NOT EDIT.
-.. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY.
-.. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE:
-.. "examples\06-distributed-post\01-distributed_workflows_on_remote.py"
-.. LINE NUMBERS ARE GIVEN BELOW.
-
-.. only:: html
-
- .. note::
- :class: sphx-glr-download-link-note
-
- Click :ref:`here `
- to download the full example code
-
-.. rst-class:: sphx-glr-example-title
-
-.. _sphx_glr_examples_06-distributed-post_01-distributed_workflows_on_remote.py:
-
-
-.. _ref_distributed_workflows_on_remote:
-
-Create custom workflow on distributed processes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This example shows how distributed files can be read and post processed
-on distributed processes. After remote post processing,
-results are merged on the local process. In this example, different operator
-sequences are directly created on different servers. These operators are then
-connected together without having to care that they are on remote processes.
-
-.. image:: 01-operator-dep.svg
- :align: center
- :width: 400
-
-.. GENERATED FROM PYTHON SOURCE LINES 17-18
-
-Import dpf module and its examples files
-
-.. GENERATED FROM PYTHON SOURCE LINES 18-23
-
-.. code-block:: default
-
-
- from ansys.dpf import core as dpf
- from ansys.dpf.core import examples
- from ansys.dpf.core import operators as ops
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 24-28
-
-Configure the servers
-~~~~~~~~~~~~~~~~~~~~~
-To make this example easier, we will start local servers here,
-but we could get connected to any existing servers on the network.
-
-.. GENERATED FROM PYTHON SOURCE LINES 28-31
-
-.. code-block:: default
-
-
- remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 32-34
-
-Here we show how we could send files in temporary directory if we were not
-in shared memory
-
-.. GENERATED FROM PYTHON SOURCE LINES 34-39
-
-.. code-block:: default
-
-
- files = examples.download_distributed_files()
- server_file_paths = [dpf.upload_file_in_tmp_folder(files[0], server=remote_servers[0]),
- dpf.upload_file_in_tmp_folder(files[1], server=remote_servers[1])]
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 40-41
-
-First operator chain.
-
-.. GENERATED FROM PYTHON SOURCE LINES 41-49
-
-.. code-block:: default
-
-
- remote_operators = []
-
- stress1 = ops.result.stress(server=remote_servers[0])
- remote_operators.append(stress1)
- ds = dpf.DataSources(server_file_paths[0], server=remote_servers[0])
- stress1.inputs.data_sources(ds)
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 50-51
-
-Second operator chain.
-
-.. GENERATED FROM PYTHON SOURCE LINES 51-58
-
-.. code-block:: default
-
-
- stress2 = ops.result.stress(server=remote_servers[1])
- mul = stress2 * 2.0
- remote_operators.append(mul)
- ds = dpf.DataSources(server_file_paths[1], server=remote_servers[1])
- stress2.inputs.data_sources(ds)
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 59-60
-
-Local merge operator.
-
-.. GENERATED FROM PYTHON SOURCE LINES 60-63
-
-.. code-block:: default
-
-
- merge = ops.utility.merge_fields_containers()
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 64-66
-
-Connect the operator chains together and get the output
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. GENERATED FROM PYTHON SOURCE LINES 66-75
-
-.. code-block:: default
-
-
- nodal = ops.averaging.to_nodal_fc(merge)
-
- merge.connect(0, remote_operators[0], 0)
- merge.connect(1, remote_operators[1], 0)
-
- fc = nodal.get_output(0, dpf.types.fields_container)
- print(fc[0])
- fc[0].meshed_region.plot(fc[0])
-
-
-
-.. image-sg:: /examples/06-distributed-post/images/sphx_glr_01-distributed_workflows_on_remote_001.png
- :alt: 01 distributed workflows on remote
- :srcset: /examples/06-distributed-post/images/sphx_glr_01-distributed_workflows_on_remote_001.png
- :class: sphx-glr-single-img
-
-
-.. rst-class:: sphx-glr-script-out
-
- Out:
-
- .. code-block:: none
-
- DPF stress_7491.964387Hz Field
- Location: Nodal
- Unit: Pa
- 432 entities
- Data:6 components and 432 elementary data
-
-
-
-
-
-
-.. rst-class:: sphx-glr-timing
-
- **Total running time of the script:** ( 0 minutes 1.630 seconds)
-
-
-.. _sphx_glr_download_examples_06-distributed-post_01-distributed_workflows_on_remote.py:
-
-
-.. only :: html
-
- .. container:: sphx-glr-footer
- :class: sphx-glr-footer-example
-
-
-
- .. container:: sphx-glr-download sphx-glr-download-python
-
- :download:`Download Python source code: 01-distributed_workflows_on_remote.py <01-distributed_workflows_on_remote.py>`
-
-
-
- .. container:: sphx-glr-download sphx-glr-download-jupyter
-
- :download:`Download Jupyter notebook: 01-distributed_workflows_on_remote.ipynb <01-distributed_workflows_on_remote.ipynb>`
-
-
-.. only:: html
-
- .. rst-class:: sphx-glr-signature
-
- `Gallery generated by Sphinx-Gallery `_
diff --git a/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote_codeobj.pickle b/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote_codeobj.pickle
deleted file mode 100644
index 43a9c2e54cb..00000000000
Binary files a/docs/source/examples/06-distributed-post/01-distributed_workflows_on_remote_codeobj.pickle and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/01-operator-dep.dot b/docs/source/examples/06-distributed-post/01-operator-dep.dot
deleted file mode 100644
index c39ea927ffd..00000000000
--- a/docs/source/examples/06-distributed-post/01-operator-dep.dot
+++ /dev/null
@@ -1,30 +0,0 @@
-digraph foo {
- graph [pad="0", nodesep="0.3", ranksep="0.3"]
- node [shape=box, style=filled, fillcolor="#ffcc00", margin="0"];
- rankdir=LR;
- splines=line;
-
- subgraph cluster_1 {
- ds01 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
-
- ds01 -> stress1 [style=dashed];
-
- label="Server 1";
- style=filled;
- fillcolor=lightgrey;
- }
-
- subgraph cluster_2 {
- ds02 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
-
- ds02 -> stress2 [style=dashed];
- stress2 -> mul;
-
- label="Server 2";
- style=filled;
- fillcolor=lightgrey;
- }
-
- stress1 -> "merge";
- mul -> "merge";
-}
diff --git a/docs/source/examples/06-distributed-post/01-operator-dep.svg b/docs/source/examples/06-distributed-post/01-operator-dep.svg
deleted file mode 100644
index c371cd09e9c..00000000000
--- a/docs/source/examples/06-distributed-post/01-operator-dep.svg
+++ /dev/null
@@ -1,89 +0,0 @@
-
-
-
-
-
diff --git a/docs/source/examples/06-distributed-post/02-distributed-msup_expansion.ipynb b/docs/source/examples/06-distributed-post/02-distributed-msup_expansion.ipynb
deleted file mode 100644
index c2eb2b0bcd4..00000000000
--- a/docs/source/examples/06-distributed-post/02-distributed-msup_expansion.ipynb
+++ /dev/null
@@ -1,169 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "%matplotlib inline"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "\n\n# Distributed modal superposition\nThis example shows how distributed files can be read and expanded\non distributed processes. The modal basis (2 distributed files) is read\non 2 remote servers and the modal response reading and the expansion is\ndone on a third server.\n\nTo help understand this example the following diagram is provided. It shows\nthe operator chain used to compute the final result.\n\n
\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Import dpf module and its examples files.\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "from ansys.dpf import core as dpf\nfrom ansys.dpf.core import examples\nfrom ansys.dpf.core import operators as ops"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Configure the servers\nMake a list of ip addresses and port numbers on which dpf servers are\nstarted. Operator instances will be created on each of those servers to\naddress each a different result file.\nIn this example, we will post process an analysis distributed in 2 files,\nwe will consequently require 2 remote processes.\nTo make this example easier, we will start local servers here,\nbut we could get connected to any existing servers on the network.\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]\nips = [remote_server.ip for remote_server in remote_servers]\nports = [remote_server.port for remote_server in remote_servers]"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Print the ips and ports.\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "print(\"ips:\", ips)\nprint(\"ports:\", ports)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Choose the file path.\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "base_path = examples.distributed_msup_folder\nfiles = [base_path + r'/file0.mode', base_path + r'/file1.mode']\nfiles_aux = [base_path + r'/file0.rst', base_path + r'/file1.rst']"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Create the operators on the servers\nOn each server we create two new operators, one for 'displacement' computations\nand a 'mesh_provider' operator and then define their data sources. The displacement\nand mesh_provider operators receive data from their respective data files on each server.\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "remote_displacement_operators = []\nremote_mesh_operators = []\nfor i, server in enumerate(remote_servers):\n displacement = ops.result.displacement(server=server)\n mesh = ops.mesh.mesh_provider(server=server)\n remote_displacement_operators.append(displacement)\n remote_mesh_operators.append(mesh)\n ds = dpf.DataSources(files[i], server=server)\n ds.add_file_path(files_aux[i])\n displacement.inputs.data_sources(ds)\n mesh.inputs.data_sources(ds)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Create a local operators chain for expansion\nIn the following series of operators we merge the modal basis, the meshes, read\nthe modal response and expand the modal response with the modal basis.\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "merge_fields = ops.utility.merge_fields_containers()\nmerge_mesh = ops.utility.merge_meshes()\n\nds = dpf.DataSources(base_path + r'/file_load_1.rfrq')\nresponse = ops.result.displacement(data_sources=ds)\nresponse.inputs.mesh(merge_mesh.outputs.merges_mesh)\n\nexpansion = ops.math.modal_superposition(\n solution_in_modal_space=response,\n modal_basis=merge_fields\n)\ncomponent = ops.logic.component_selector_fc(expansion, 1)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Connect the operator chains together and get the output\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "for i, server in enumerate(remote_servers):\n merge_fields.connect(i, remote_displacement_operators[i], 0)\n merge_mesh.connect(i, remote_mesh_operators[i], 0)\n\nfc = component.get_output(0, dpf.types.fields_container)\nmerged_mesh = merge_mesh.get_output(0, dpf.types.meshed_region)\n\nmerged_mesh.plot(fc.get_field_by_time_complex_ids(1, 0))\nmerged_mesh.plot(fc.get_field_by_time_complex_ids(10, 0))\nprint(fc)"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.9.13"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
\ No newline at end of file
diff --git a/docs/source/examples/06-distributed-post/02-distributed-msup_expansion.py b/docs/source/examples/06-distributed-post/02-distributed-msup_expansion.py
deleted file mode 100644
index bbf4e0862dd..00000000000
--- a/docs/source/examples/06-distributed-post/02-distributed-msup_expansion.py
+++ /dev/null
@@ -1,102 +0,0 @@
-"""
-.. _ref_distributed_msup:
-
-Distributed modal superposition
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This example shows how distributed files can be read and expanded
-on distributed processes. The modal basis (2 distributed files) is read
-on 2 remote servers and the modal response reading and the expansion is
-done on a third server.
-
-To help understand this example the following diagram is provided. It shows
-the operator chain used to compute the final result.
-
-.. image:: 02-operator-dep.svg
- :align: center
- :width: 800
-"""
-
-###############################################################################
-# Import dpf module and its examples files.
-
-from ansys.dpf import core as dpf
-from ansys.dpf.core import examples
-from ansys.dpf.core import operators as ops
-
-###############################################################################
-# Configure the servers
-# ~~~~~~~~~~~~~~~~~~~~~
-# Make a list of ip addresses and port numbers on which dpf servers are
-# started. Operator instances will be created on each of those servers to
-# address each a different result file.
-# In this example, we will post process an analysis distributed in 2 files,
-# we will consequently require 2 remote processes.
-# To make this example easier, we will start local servers here,
-# but we could get connected to any existing servers on the network.
-
-remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
-ips = [remote_server.ip for remote_server in remote_servers]
-ports = [remote_server.port for remote_server in remote_servers]
-
-###############################################################################
-# Print the ips and ports.
-print("ips:", ips)
-print("ports:", ports)
-
-###############################################################################
-# Choose the file path.
-
-base_path = examples.distributed_msup_folder
-files = [base_path + r'/file0.mode', base_path + r'/file1.mode']
-files_aux = [base_path + r'/file0.rst', base_path + r'/file1.rst']
-
-###############################################################################
-# Create the operators on the servers
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# On each server we create two new operators, one for 'displacement' computations
-# and a 'mesh_provider' operator and then define their data sources. The displacement
-# and mesh_provider operators receive data from their respective data files on each server.
-remote_displacement_operators = []
-remote_mesh_operators = []
-for i, server in enumerate(remote_servers):
- displacement = ops.result.displacement(server=server)
- mesh = ops.mesh.mesh_provider(server=server)
- remote_displacement_operators.append(displacement)
- remote_mesh_operators.append(mesh)
- ds = dpf.DataSources(files[i], server=server)
- ds.add_file_path(files_aux[i])
- displacement.inputs.data_sources(ds)
- mesh.inputs.data_sources(ds)
-
-###############################################################################
-# Create a local operators chain for expansion
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# In the following series of operators we merge the modal basis, the meshes, read
-# the modal response and expand the modal response with the modal basis.
-
-merge_fields = ops.utility.merge_fields_containers()
-merge_mesh = ops.utility.merge_meshes()
-
-ds = dpf.DataSources(base_path + r'/file_load_1.rfrq')
-response = ops.result.displacement(data_sources=ds)
-response.inputs.mesh(merge_mesh.outputs.merges_mesh)
-
-expansion = ops.math.modal_superposition(
- solution_in_modal_space=response,
- modal_basis=merge_fields
-)
-component = ops.logic.component_selector_fc(expansion, 1)
-
-###############################################################################
-# Connect the operator chains together and get the output
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-for i, server in enumerate(remote_servers):
- merge_fields.connect(i, remote_displacement_operators[i], 0)
- merge_mesh.connect(i, remote_mesh_operators[i], 0)
-
-fc = component.get_output(0, dpf.types.fields_container)
-merged_mesh = merge_mesh.get_output(0, dpf.types.meshed_region)
-
-merged_mesh.plot(fc.get_field_by_time_complex_ids(1, 0))
-merged_mesh.plot(fc.get_field_by_time_complex_ids(10, 0))
-print(fc)
diff --git a/docs/source/examples/06-distributed-post/02-distributed-msup_expansion.py.md5 b/docs/source/examples/06-distributed-post/02-distributed-msup_expansion.py.md5
deleted file mode 100644
index 64b9c205aac..00000000000
--- a/docs/source/examples/06-distributed-post/02-distributed-msup_expansion.py.md5
+++ /dev/null
@@ -1 +0,0 @@
-12fa856147254622aeff3ffed4456802
\ No newline at end of file
diff --git a/docs/source/examples/06-distributed-post/02-distributed-msup_expansion.rst b/docs/source/examples/06-distributed-post/02-distributed-msup_expansion.rst
deleted file mode 100644
index b1d90c0ce9d..00000000000
--- a/docs/source/examples/06-distributed-post/02-distributed-msup_expansion.rst
+++ /dev/null
@@ -1,302 +0,0 @@
-
-.. DO NOT EDIT.
-.. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY.
-.. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE:
-.. "examples\06-distributed-post\02-distributed-msup_expansion.py"
-.. LINE NUMBERS ARE GIVEN BELOW.
-
-.. only:: html
-
- .. note::
- :class: sphx-glr-download-link-note
-
- Click :ref:`here `
- to download the full example code
-
-.. rst-class:: sphx-glr-example-title
-
-.. _sphx_glr_examples_06-distributed-post_02-distributed-msup_expansion.py:
-
-
-.. _ref_distributed_msup:
-
-Distributed modal superposition
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This example shows how distributed files can be read and expanded
-on distributed processes. The modal basis (2 distributed files) is read
-on 2 remote servers and the modal response reading and the expansion is
-done on a third server.
-
-To help understand this example the following diagram is provided. It shows
-the operator chain used to compute the final result.
-
-.. image:: 02-operator-dep.svg
- :align: center
- :width: 800
-
-.. GENERATED FROM PYTHON SOURCE LINES 20-21
-
-Import dpf module and its examples files.
-
-.. GENERATED FROM PYTHON SOURCE LINES 21-26
-
-.. code-block:: default
-
-
- from ansys.dpf import core as dpf
- from ansys.dpf.core import examples
- from ansys.dpf.core import operators as ops
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 27-36
-
-Configure the servers
-~~~~~~~~~~~~~~~~~~~~~
-Make a list of ip addresses and port numbers on which dpf servers are
-started. Operator instances will be created on each of those servers to
-address each a different result file.
-In this example, we will post process an analysis distributed in 2 files,
-we will consequently require 2 remote processes.
-To make this example easier, we will start local servers here,
-but we could get connected to any existing servers on the network.
-
-.. GENERATED FROM PYTHON SOURCE LINES 36-41
-
-.. code-block:: default
-
-
- remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
- ips = [remote_server.ip for remote_server in remote_servers]
- ports = [remote_server.port for remote_server in remote_servers]
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 42-43
-
-Print the ips and ports.
-
-.. GENERATED FROM PYTHON SOURCE LINES 43-46
-
-.. code-block:: default
-
- print("ips:", ips)
- print("ports:", ports)
-
-
-
-
-
-.. rst-class:: sphx-glr-script-out
-
- Out:
-
- .. code-block:: none
-
- ips: ['127.0.0.1', '127.0.0.1']
- ports: [50057, 50058]
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 47-48
-
-Choose the file path.
-
-.. GENERATED FROM PYTHON SOURCE LINES 48-53
-
-.. code-block:: default
-
-
- base_path = examples.distributed_msup_folder
- files = [base_path + r'/file0.mode', base_path + r'/file1.mode']
- files_aux = [base_path + r'/file0.rst', base_path + r'/file1.rst']
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 54-59
-
-Create the operators on the servers
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-On each server we create two new operators, one for 'displacement' computations
-and a 'mesh_provider' operator and then define their data sources. The displacement
-and mesh_provider operators receive data from their respective data files on each server.
-
-.. GENERATED FROM PYTHON SOURCE LINES 59-71
-
-.. code-block:: default
-
- remote_displacement_operators = []
- remote_mesh_operators = []
- for i, server in enumerate(remote_servers):
- displacement = ops.result.displacement(server=server)
- mesh = ops.mesh.mesh_provider(server=server)
- remote_displacement_operators.append(displacement)
- remote_mesh_operators.append(mesh)
- ds = dpf.DataSources(files[i], server=server)
- ds.add_file_path(files_aux[i])
- displacement.inputs.data_sources(ds)
- mesh.inputs.data_sources(ds)
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 72-76
-
-Create a local operators chain for expansion
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-In the following series of operators we merge the modal basis, the meshes, read
-the modal response and expand the modal response with the modal basis.
-
-.. GENERATED FROM PYTHON SOURCE LINES 76-90
-
-.. code-block:: default
-
-
- merge_fields = ops.utility.merge_fields_containers()
- merge_mesh = ops.utility.merge_meshes()
-
- ds = dpf.DataSources(base_path + r'/file_load_1.rfrq')
- response = ops.result.displacement(data_sources=ds)
- response.inputs.mesh(merge_mesh.outputs.merges_mesh)
-
- expansion = ops.math.modal_superposition(
- solution_in_modal_space=response,
- modal_basis=merge_fields
- )
- component = ops.logic.component_selector_fc(expansion, 1)
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 91-93
-
-Connect the operator chains together and get the output
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. GENERATED FROM PYTHON SOURCE LINES 93-103
-
-.. code-block:: default
-
- for i, server in enumerate(remote_servers):
- merge_fields.connect(i, remote_displacement_operators[i], 0)
- merge_mesh.connect(i, remote_mesh_operators[i], 0)
-
- fc = component.get_output(0, dpf.types.fields_container)
- merged_mesh = merge_mesh.get_output(0, dpf.types.meshed_region)
-
- merged_mesh.plot(fc.get_field_by_time_complex_ids(1, 0))
- merged_mesh.plot(fc.get_field_by_time_complex_ids(10, 0))
- print(fc)
-
-
-
-.. rst-class:: sphx-glr-horizontal
-
-
- *
-
- .. image-sg:: /examples/06-distributed-post/images/sphx_glr_02-distributed-msup_expansion_001.png
- :alt: 02 distributed msup expansion
- :srcset: /examples/06-distributed-post/images/sphx_glr_02-distributed-msup_expansion_001.png
- :class: sphx-glr-multi-img
-
- *
-
- .. image-sg:: /examples/06-distributed-post/images/sphx_glr_02-distributed-msup_expansion_002.png
- :alt: 02 distributed msup expansion
- :srcset: /examples/06-distributed-post/images/sphx_glr_02-distributed-msup_expansion_002.png
- :class: sphx-glr-multi-img
-
-
-.. rst-class:: sphx-glr-script-out
-
- Out:
-
- .. code-block:: none
-
- DPF Fields Container
- with 20 field(s)
- defined on labels: complex time
-
- with:
- - field 0 {complex: 0, time: 1} with Nodal location, 1 components and 1065 entities.
- - field 1 {complex: 1, time: 1} with Nodal location, 1 components and 1065 entities.
- - field 2 {complex: 0, time: 2} with Nodal location, 1 components and 1065 entities.
- - field 3 {complex: 1, time: 2} with Nodal location, 1 components and 1065 entities.
- - field 4 {complex: 0, time: 3} with Nodal location, 1 components and 1065 entities.
- - field 5 {complex: 1, time: 3} with Nodal location, 1 components and 1065 entities.
- - field 6 {complex: 0, time: 4} with Nodal location, 1 components and 1065 entities.
- - field 7 {complex: 1, time: 4} with Nodal location, 1 components and 1065 entities.
- - field 8 {complex: 0, time: 5} with Nodal location, 1 components and 1065 entities.
- - field 9 {complex: 1, time: 5} with Nodal location, 1 components and 1065 entities.
- - field 10 {complex: 0, time: 6} with Nodal location, 1 components and 1065 entities.
- - field 11 {complex: 1, time: 6} with Nodal location, 1 components and 1065 entities.
- - field 12 {complex: 0, time: 7} with Nodal location, 1 components and 1065 entities.
- - field 13 {complex: 1, time: 7} with Nodal location, 1 components and 1065 entities.
- - field 14 {complex: 0, time: 8} with Nodal location, 1 components and 1065 entities.
- - field 15 {complex: 1, time: 8} with Nodal location, 1 components and 1065 entities.
- - field 16 {complex: 0, time: 9} with Nodal location, 1 components and 1065 entities.
- - field 17 {complex: 1, time: 9} with Nodal location, 1 components and 1065 entities.
- - field 18 {complex: 0, time: 10} with Nodal location, 1 components and 1065 entities.
- - field 19 {complex: 1, time: 10} with Nodal location, 1 components and 1065 entities.
-
-
-
-
-
-
-.. rst-class:: sphx-glr-timing
-
- **Total running time of the script:** ( 0 minutes 5.095 seconds)
-
-
-.. _sphx_glr_download_examples_06-distributed-post_02-distributed-msup_expansion.py:
-
-
-.. only :: html
-
- .. container:: sphx-glr-footer
- :class: sphx-glr-footer-example
-
-
-
- .. container:: sphx-glr-download sphx-glr-download-python
-
- :download:`Download Python source code: 02-distributed-msup_expansion.py <02-distributed-msup_expansion.py>`
-
-
-
- .. container:: sphx-glr-download sphx-glr-download-jupyter
-
- :download:`Download Jupyter notebook: 02-distributed-msup_expansion.ipynb <02-distributed-msup_expansion.ipynb>`
-
-
-.. only:: html
-
- .. rst-class:: sphx-glr-signature
-
- `Gallery generated by Sphinx-Gallery `_
diff --git a/docs/source/examples/06-distributed-post/02-distributed-msup_expansion_codeobj.pickle b/docs/source/examples/06-distributed-post/02-distributed-msup_expansion_codeobj.pickle
deleted file mode 100644
index 511b6402715..00000000000
Binary files a/docs/source/examples/06-distributed-post/02-distributed-msup_expansion_codeobj.pickle and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/02-operator-dep.dot b/docs/source/examples/06-distributed-post/02-operator-dep.dot
deleted file mode 100644
index 9f0ae38443b..00000000000
--- a/docs/source/examples/06-distributed-post/02-operator-dep.dot
+++ /dev/null
@@ -1,51 +0,0 @@
-digraph foo {
- graph [pad="0", nodesep="0.3", ranksep="0.3"]
- node [shape=box, style=filled, fillcolor="#ffcc00", margin="0"];
- rankdir=LR;
- splines=line;
-
- disp01 [label="displacement"];
- disp02 [label="displacement"];
- mesh01 [label="mesh"];
- mesh02 [label="mesh"];
-
- subgraph cluster_1 {
- ds01 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
-
- disp01; mesh01;
-
- ds01 -> disp01 [style=dashed];
- ds01 -> mesh01 [style=dashed];
-
- label="Server 1";
- style=filled;
- fillcolor=lightgrey;
- }
-
- subgraph cluster_2 {
- ds02 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
-
-
- disp02; mesh02;
-
- ds02 -> disp02 [style=dashed];
- ds02 -> mesh02 [style=dashed];
-
- label="Server 2";
- style=filled;
- fillcolor=lightgrey;
- }
-
- disp01 -> "merge_fields";
- mesh01 -> "merged_mesh";
- disp02 -> "merge_fields";
- mesh02 -> "merged_mesh";
-
- ds03 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
- ds03 -> "response" [style=dashed];
-
- "merged_mesh" -> "response";
- "response" -> "expansion";
- "merge_fields" -> "expansion";
- "expansion" -> "component";
-}
diff --git a/docs/source/examples/06-distributed-post/02-operator-dep.svg b/docs/source/examples/06-distributed-post/02-operator-dep.svg
deleted file mode 100644
index fd04964afca..00000000000
--- a/docs/source/examples/06-distributed-post/02-operator-dep.svg
+++ /dev/null
@@ -1,173 +0,0 @@
-
-
-
-
-
diff --git a/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps.ipynb b/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps.ipynb
deleted file mode 100644
index 300e4d539bb..00000000000
--- a/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps.ipynb
+++ /dev/null
@@ -1,169 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "%matplotlib inline"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "\n\n# Distributed msup distributed modal response\nThis example shows how distributed files can be read and expanded\non distributed processes. The modal basis (2 distributed files) is read\non 2 remote servers and the modal response (2 distributed files) reading and the expansion is\ndone on a third server.\n\nTo help understand this example the following diagram is provided. It shows\nthe operator chain used to compute the final result.\n\n
\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Import dpf module and its examples files.\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "import os.path\n\nfrom ansys.dpf import core as dpf\nfrom ansys.dpf.core import examples\nfrom ansys.dpf.core import operators as ops"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Configure the servers\nMake a list of ip addresses and port numbers on which dpf servers are\nstarted. Operator instances will be created on each of those servers to\naddress each a different result file.\nIn this example, we will post process an analysis distributed in 2 files,\nwe will consequently require 2 remote processes\nTo make this example easier, we will start local servers here,\nbut we could get connected to any existing servers on the network.\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]\nips = [remote_server.ip for remote_server in remote_servers]\nports = [remote_server.port for remote_server in remote_servers]"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Print the ips and ports.\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "print(\"ips:\", ips)\nprint(\"ports:\", ports)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Choose the file path.\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "base_path = examples.distributed_msup_folder\nfiles = [os.path.join(base_path, \"file0.mode\"), os.path.join(base_path, \"file1.mode\")]\nfiles_aux = [os.path.join(base_path, \"file0.rst\"), os.path.join(base_path, \"file1.rst\")]"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Create the operators on the servers\nOn each server we create two new operators, one for 'displacement' computations\nand a 'mesh_provider' operator, and then define their data sources. The displacement\nand mesh_provider operators receive data from their respective data files on each server.\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "remote_displacement_operators = []\nremote_mesh_operators = []\nfor i, server in enumerate(remote_servers):\n displacement = ops.result.displacement(server=server)\n mesh = ops.mesh.mesh_provider(server=server)\n remote_displacement_operators.append(displacement)\n remote_mesh_operators.append(mesh)\n ds = dpf.DataSources(files[i], server=server)\n ds.add_file_path(files_aux[i])\n displacement.inputs.data_sources(ds)\n mesh.inputs.data_sources(ds)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Create a local operators chain for expansion\nIn the following series of operators we merge the modal basis, the meshes, read\nthe modal response and expand the modal response with the modal basis.\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "merge_fields = ops.utility.merge_fields_containers()\nmerge_mesh = ops.utility.merge_meshes()\n\nds = dpf.DataSources(os.path.join(base_path, \"file_load_1.rfrq\"))\nresponse = ops.result.displacement(data_sources=ds)\nresponse.inputs.mesh(merge_mesh.outputs.merges_mesh)\n\nds = dpf.DataSources(os.path.join(base_path, \"file_load_2.rfrq\"))\nfrom os import walk\n\nfor (dirpath, dirnames, filenames) in walk(base_path):\n print(filenames)\nresponse2 = ops.result.displacement(data_sources=ds)\nresponse2fc = response2.outputs.fields_container()\nresponse2fc.time_freq_support.time_frequencies.scoping.set_id(0, 2)\n\nmerge_use_pass = ops.utility.merge_fields_containers()\nmerge_use_pass.inputs.fields_containers1(response)\nmerge_use_pass.inputs.fields_containers2(response2fc)\n\nexpansion = ops.math.modal_superposition(\n solution_in_modal_space=merge_use_pass,\n modal_basis=merge_fields\n )\ncomponent = ops.logic.component_selector_fc(expansion, 1)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Connect the operator chains together and get the output\n\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
- "source": [
- "for i, server in enumerate(remote_servers):\n merge_fields.connect(i, remote_displacement_operators[i], 0)\n merge_mesh.connect(i, remote_mesh_operators[i], 0)\n\nfc = component.get_output(0, dpf.types.fields_container)\nmerged_mesh = merge_mesh.get_output(0, dpf.types.meshed_region)\n\nmerged_mesh.plot(fc.get_field_by_time_complex_ids(1, 0))\nmerged_mesh.plot(fc.get_field_by_time_complex_ids(20, 0))\nprint(fc)"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.9.13"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
\ No newline at end of file
diff --git a/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps.py b/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps.py
deleted file mode 100644
index 860e08c9e4b..00000000000
--- a/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps.py
+++ /dev/null
@@ -1,115 +0,0 @@
-"""
-.. _ref_distributed_msup_steps:
-
-Distributed msup distributed modal response
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This example shows how distributed files can be read and expanded
-on distributed processes. The modal basis (2 distributed files) is read
-on 2 remote servers and the modal response (2 distributed files) reading and the expansion is
-done on a third server.
-
-To help understand this example the following diagram is provided. It shows
-the operator chain used to compute the final result.
-
-.. image:: 03-operator-dep.svg
- :align: center
- :width: 800
-"""
-
-###############################################################################
-# Import dpf module and its examples files.
-import os.path
-
-from ansys.dpf import core as dpf
-from ansys.dpf.core import examples
-from ansys.dpf.core import operators as ops
-
-###############################################################################
-# Configure the servers
-# ~~~~~~~~~~~~~~~~~~~~~
-# Make a list of ip addresses and port numbers on which dpf servers are
-# started. Operator instances will be created on each of those servers to
-# address each a different result file.
-# In this example, we will post process an analysis distributed in 2 files,
-# we will consequently require 2 remote processes
-# To make this example easier, we will start local servers here,
-# but we could get connected to any existing servers on the network.
-remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
-ips = [remote_server.ip for remote_server in remote_servers]
-ports = [remote_server.port for remote_server in remote_servers]
-
-###############################################################################
-# Print the ips and ports.
-print("ips:", ips)
-print("ports:", ports)
-
-###############################################################################
-# Choose the file path.
-
-base_path = examples.distributed_msup_folder
-files = [os.path.join(base_path, "file0.mode"), os.path.join(base_path, "file1.mode")]
-files_aux = [os.path.join(base_path, "file0.rst"), os.path.join(base_path, "file1.rst")]
-
-###############################################################################
-# Create the operators on the servers
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# On each server we create two new operators, one for 'displacement' computations
-# and a 'mesh_provider' operator, and then define their data sources. The displacement
-# and mesh_provider operators receive data from their respective data files on each server.
-remote_displacement_operators = []
-remote_mesh_operators = []
-for i, server in enumerate(remote_servers):
- displacement = ops.result.displacement(server=server)
- mesh = ops.mesh.mesh_provider(server=server)
- remote_displacement_operators.append(displacement)
- remote_mesh_operators.append(mesh)
- ds = dpf.DataSources(files[i], server=server)
- ds.add_file_path(files_aux[i])
- displacement.inputs.data_sources(ds)
- mesh.inputs.data_sources(ds)
-
-###############################################################################
-# Create a local operators chain for expansion
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# In the following series of operators we merge the modal basis, the meshes, read
-# the modal response and expand the modal response with the modal basis.
-
-merge_fields = ops.utility.merge_fields_containers()
-merge_mesh = ops.utility.merge_meshes()
-
-ds = dpf.DataSources(os.path.join(base_path, "file_load_1.rfrq"))
-response = ops.result.displacement(data_sources=ds)
-response.inputs.mesh(merge_mesh.outputs.merges_mesh)
-
-ds = dpf.DataSources(os.path.join(base_path, "file_load_2.rfrq"))
-from os import walk
-
-for (dirpath, dirnames, filenames) in walk(base_path):
- print(filenames)
-response2 = ops.result.displacement(data_sources=ds)
-response2fc = response2.outputs.fields_container()
-response2fc.time_freq_support.time_frequencies.scoping.set_id(0, 2)
-
-merge_use_pass = ops.utility.merge_fields_containers()
-merge_use_pass.inputs.fields_containers1(response)
-merge_use_pass.inputs.fields_containers2(response2fc)
-
-expansion = ops.math.modal_superposition(
- solution_in_modal_space=merge_use_pass,
- modal_basis=merge_fields
- )
-component = ops.logic.component_selector_fc(expansion, 1)
-
-###############################################################################
-# Connect the operator chains together and get the output
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-for i, server in enumerate(remote_servers):
- merge_fields.connect(i, remote_displacement_operators[i], 0)
- merge_mesh.connect(i, remote_mesh_operators[i], 0)
-
-fc = component.get_output(0, dpf.types.fields_container)
-merged_mesh = merge_mesh.get_output(0, dpf.types.meshed_region)
-
-merged_mesh.plot(fc.get_field_by_time_complex_ids(1, 0))
-merged_mesh.plot(fc.get_field_by_time_complex_ids(20, 0))
-print(fc)
diff --git a/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps.py.md5 b/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps.py.md5
deleted file mode 100644
index 3b174cdcdd1..00000000000
--- a/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps.py.md5
+++ /dev/null
@@ -1 +0,0 @@
-3b3383f39064e84e39438992e3964441
\ No newline at end of file
diff --git a/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps.rst b/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps.rst
deleted file mode 100644
index a7fc7455a62..00000000000
--- a/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps.rst
+++ /dev/null
@@ -1,343 +0,0 @@
-
-.. DO NOT EDIT.
-.. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY.
-.. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE:
-.. "examples\06-distributed-post\03-distributed-msup_expansion_steps.py"
-.. LINE NUMBERS ARE GIVEN BELOW.
-
-.. only:: html
-
- .. note::
- :class: sphx-glr-download-link-note
-
- Click :ref:`here `
- to download the full example code
-
-.. rst-class:: sphx-glr-example-title
-
-.. _sphx_glr_examples_06-distributed-post_03-distributed-msup_expansion_steps.py:
-
-
-.. _ref_distributed_msup_steps:
-
-Distributed msup distributed modal response
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This example shows how distributed files can be read and expanded
-on distributed processes. The modal basis (2 distributed files) is read
-on 2 remote servers and the modal response (2 distributed files) reading and the expansion is
-done on a third server.
-
-To help understand this example the following diagram is provided. It shows
-the operator chain used to compute the final result.
-
-.. image:: 03-operator-dep.svg
- :align: center
- :width: 800
-
-.. GENERATED FROM PYTHON SOURCE LINES 20-21
-
-Import dpf module and its examples files.
-
-.. GENERATED FROM PYTHON SOURCE LINES 21-27
-
-.. code-block:: default
-
- import os.path
-
- from ansys.dpf import core as dpf
- from ansys.dpf.core import examples
- from ansys.dpf.core import operators as ops
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 28-37
-
-Configure the servers
-~~~~~~~~~~~~~~~~~~~~~
-Make a list of ip addresses and port numbers on which dpf servers are
-started. Operator instances will be created on each of those servers to
-address each a different result file.
-In this example, we will post process an analysis distributed in 2 files,
-we will consequently require 2 remote processes
-To make this example easier, we will start local servers here,
-but we could get connected to any existing servers on the network.
-
-.. GENERATED FROM PYTHON SOURCE LINES 37-41
-
-.. code-block:: default
-
- remote_servers = [dpf.start_local_server(as_global=False), dpf.start_local_server(as_global=False)]
- ips = [remote_server.ip for remote_server in remote_servers]
- ports = [remote_server.port for remote_server in remote_servers]
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 42-43
-
-Print the ips and ports.
-
-.. GENERATED FROM PYTHON SOURCE LINES 43-46
-
-.. code-block:: default
-
- print("ips:", ips)
- print("ports:", ports)
-
-
-
-
-
-.. rst-class:: sphx-glr-script-out
-
- Out:
-
- .. code-block:: none
-
- ips: ['127.0.0.1', '127.0.0.1']
- ports: [50057, 50058]
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 47-48
-
-Choose the file path.
-
-.. GENERATED FROM PYTHON SOURCE LINES 48-53
-
-.. code-block:: default
-
-
- base_path = examples.distributed_msup_folder
- files = [os.path.join(base_path, "file0.mode"), os.path.join(base_path, "file1.mode")]
- files_aux = [os.path.join(base_path, "file0.rst"), os.path.join(base_path, "file1.rst")]
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 54-59
-
-Create the operators on the servers
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-On each server we create two new operators, one for 'displacement' computations
-and a 'mesh_provider' operator, and then define their data sources. The displacement
-and mesh_provider operators receive data from their respective data files on each server.
-
-.. GENERATED FROM PYTHON SOURCE LINES 59-71
-
-.. code-block:: default
-
- remote_displacement_operators = []
- remote_mesh_operators = []
- for i, server in enumerate(remote_servers):
- displacement = ops.result.displacement(server=server)
- mesh = ops.mesh.mesh_provider(server=server)
- remote_displacement_operators.append(displacement)
- remote_mesh_operators.append(mesh)
- ds = dpf.DataSources(files[i], server=server)
- ds.add_file_path(files_aux[i])
- displacement.inputs.data_sources(ds)
- mesh.inputs.data_sources(ds)
-
-
-
-
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 72-76
-
-Create a local operators chain for expansion
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-In the following series of operators we merge the modal basis, the meshes, read
-the modal response and expand the modal response with the modal basis.
-
-.. GENERATED FROM PYTHON SOURCE LINES 76-103
-
-.. code-block:: default
-
-
- merge_fields = ops.utility.merge_fields_containers()
- merge_mesh = ops.utility.merge_meshes()
-
- ds = dpf.DataSources(os.path.join(base_path, "file_load_1.rfrq"))
- response = ops.result.displacement(data_sources=ds)
- response.inputs.mesh(merge_mesh.outputs.merges_mesh)
-
- ds = dpf.DataSources(os.path.join(base_path, "file_load_2.rfrq"))
- from os import walk
-
- for (dirpath, dirnames, filenames) in walk(base_path):
- print(filenames)
- response2 = ops.result.displacement(data_sources=ds)
- response2fc = response2.outputs.fields_container()
- response2fc.time_freq_support.time_frequencies.scoping.set_id(0, 2)
-
- merge_use_pass = ops.utility.merge_fields_containers()
- merge_use_pass.inputs.fields_containers1(response)
- merge_use_pass.inputs.fields_containers2(response2fc)
-
- expansion = ops.math.modal_superposition(
- solution_in_modal_space=merge_use_pass,
- modal_basis=merge_fields
- )
- component = ops.logic.component_selector_fc(expansion, 1)
-
-
-
-
-
-.. rst-class:: sphx-glr-script-out
-
- Out:
-
- .. code-block:: none
-
- ['file0.mode', 'file0.rst', 'file1.mode', 'file1.rst', 'file_load_1.rfrq', 'file_load_2.rfrq']
-
-
-
-
-.. GENERATED FROM PYTHON SOURCE LINES 104-106
-
-Connect the operator chains together and get the output
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. GENERATED FROM PYTHON SOURCE LINES 106-116
-
-.. code-block:: default
-
- for i, server in enumerate(remote_servers):
- merge_fields.connect(i, remote_displacement_operators[i], 0)
- merge_mesh.connect(i, remote_mesh_operators[i], 0)
-
- fc = component.get_output(0, dpf.types.fields_container)
- merged_mesh = merge_mesh.get_output(0, dpf.types.meshed_region)
-
- merged_mesh.plot(fc.get_field_by_time_complex_ids(1, 0))
- merged_mesh.plot(fc.get_field_by_time_complex_ids(20, 0))
- print(fc)
-
-
-
-.. rst-class:: sphx-glr-horizontal
-
-
- *
-
- .. image-sg:: /examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_steps_001.png
- :alt: 03 distributed msup expansion steps
- :srcset: /examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_steps_001.png
- :class: sphx-glr-multi-img
-
- *
-
- .. image-sg:: /examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_steps_002.png
- :alt: 03 distributed msup expansion steps
- :srcset: /examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_steps_002.png
- :class: sphx-glr-multi-img
-
-
-.. rst-class:: sphx-glr-script-out
-
- Out:
-
- .. code-block:: none
-
- DPF Fields Container
- with 40 field(s)
- defined on labels: complex time
-
- with:
- - field 0 {complex: 0, time: 1} with Nodal location, 1 components and 1065 entities.
- - field 1 {complex: 1, time: 1} with Nodal location, 1 components and 1065 entities.
- - field 2 {complex: 0, time: 2} with Nodal location, 1 components and 1065 entities.
- - field 3 {complex: 1, time: 2} with Nodal location, 1 components and 1065 entities.
- - field 4 {complex: 0, time: 3} with Nodal location, 1 components and 1065 entities.
- - field 5 {complex: 1, time: 3} with Nodal location, 1 components and 1065 entities.
- - field 6 {complex: 0, time: 4} with Nodal location, 1 components and 1065 entities.
- - field 7 {complex: 1, time: 4} with Nodal location, 1 components and 1065 entities.
- - field 8 {complex: 0, time: 5} with Nodal location, 1 components and 1065 entities.
- - field 9 {complex: 1, time: 5} with Nodal location, 1 components and 1065 entities.
- - field 10 {complex: 0, time: 6} with Nodal location, 1 components and 1065 entities.
- - field 11 {complex: 1, time: 6} with Nodal location, 1 components and 1065 entities.
- - field 12 {complex: 0, time: 7} with Nodal location, 1 components and 1065 entities.
- - field 13 {complex: 1, time: 7} with Nodal location, 1 components and 1065 entities.
- - field 14 {complex: 0, time: 8} with Nodal location, 1 components and 1065 entities.
- - field 15 {complex: 1, time: 8} with Nodal location, 1 components and 1065 entities.
- - field 16 {complex: 0, time: 9} with Nodal location, 1 components and 1065 entities.
- - field 17 {complex: 1, time: 9} with Nodal location, 1 components and 1065 entities.
- - field 18 {complex: 0, time: 10} with Nodal location, 1 components and 1065 entities.
- - field 19 {complex: 1, time: 10} with Nodal location, 1 components and 1065 entities.
- - field 20 {complex: 0, time: 11} with Nodal location, 1 components and 1065 entities.
- - field 21 {complex: 1, time: 11} with Nodal location, 1 components and 1065 entities.
- - field 22 {complex: 0, time: 12} with Nodal location, 1 components and 1065 entities.
- - field 23 {complex: 1, time: 12} with Nodal location, 1 components and 1065 entities.
- - field 24 {complex: 0, time: 13} with Nodal location, 1 components and 1065 entities.
- - field 25 {complex: 1, time: 13} with Nodal location, 1 components and 1065 entities.
- - field 26 {complex: 0, time: 14} with Nodal location, 1 components and 1065 entities.
- - field 27 {complex: 1, time: 14} with Nodal location, 1 components and 1065 entities.
- - field 28 {complex: 0, time: 15} with Nodal location, 1 components and 1065 entities.
- - field 29 {complex: 1, time: 15} with Nodal location, 1 components and 1065 entities.
- - field 30 {complex: 0, time: 16} with Nodal location, 1 components and 1065 entities.
- - field 31 {complex: 1, time: 16} with Nodal location, 1 components and 1065 entities.
- - field 32 {complex: 0, time: 17} with Nodal location, 1 components and 1065 entities.
- - field 33 {complex: 1, time: 17} with Nodal location, 1 components and 1065 entities.
- - field 34 {complex: 0, time: 18} with Nodal location, 1 components and 1065 entities.
- - field 35 {complex: 1, time: 18} with Nodal location, 1 components and 1065 entities.
- - field 36 {complex: 0, time: 19} with Nodal location, 1 components and 1065 entities.
- - field 37 {complex: 1, time: 19} with Nodal location, 1 components and 1065 entities.
- - field 38 {complex: 0, time: 20} with Nodal location, 1 components and 1065 entities.
- - field 39 {complex: 1, time: 20} with Nodal location, 1 components and 1065 entities.
-
-
-
-
-
-
-.. rst-class:: sphx-glr-timing
-
- **Total running time of the script:** ( 0 minutes 2.859 seconds)
-
-
-.. _sphx_glr_download_examples_06-distributed-post_03-distributed-msup_expansion_steps.py:
-
-
-.. only :: html
-
- .. container:: sphx-glr-footer
- :class: sphx-glr-footer-example
-
-
-
- .. container:: sphx-glr-download sphx-glr-download-python
-
- :download:`Download Python source code: 03-distributed-msup_expansion_steps.py <03-distributed-msup_expansion_steps.py>`
-
-
-
- .. container:: sphx-glr-download sphx-glr-download-jupyter
-
- :download:`Download Jupyter notebook: 03-distributed-msup_expansion_steps.ipynb <03-distributed-msup_expansion_steps.ipynb>`
-
-
-.. only:: html
-
- .. rst-class:: sphx-glr-signature
-
- `Gallery generated by Sphinx-Gallery `_
diff --git a/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps_codeobj.pickle b/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps_codeobj.pickle
deleted file mode 100644
index 60274fa6aad..00000000000
Binary files a/docs/source/examples/06-distributed-post/03-distributed-msup_expansion_steps_codeobj.pickle and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/03-operator-dep.dot b/docs/source/examples/06-distributed-post/03-operator-dep.dot
deleted file mode 100644
index ce9f093990f..00000000000
--- a/docs/source/examples/06-distributed-post/03-operator-dep.dot
+++ /dev/null
@@ -1,54 +0,0 @@
-digraph foo {
- graph [pad="0", nodesep="0.3", ranksep="0.3"]
- node [shape=box, style=filled, fillcolor="#ffcc00", margin="0"];
- rankdir=LR;
- splines=line;
-
- disp01 [label="displacement"];
- disp02 [label="displacement"];
- mesh01 [label="mesh"];
- mesh02 [label="mesh"];
-
- subgraph cluster_1 {
- ds01 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
-
- disp01; mesh01;
-
- ds01 -> disp01 [style=dashed];
- ds01 -> mesh01 [style=dashed];
-
- label="Server 1";
- style=filled;
- fillcolor=lightgrey;
- }
-
- subgraph cluster_2 {
- ds02 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
-
- disp02; mesh02;
-
- ds02 -> disp02 [style=dashed];
- ds02 -> mesh02 [style=dashed];
-
- label="Server 2";
- style=filled;
- fillcolor=lightgrey;
- }
-
- disp01 -> "merge_fields";
- mesh01 -> "merge_mesh";
- disp02 -> "merge_fields";
- mesh02 -> "merge_mesh";
-
- ds03 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
- ds03 -> "response2" [style=dashed];
- ds04 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
- ds04 -> "response" [style=dashed];
-
- "merge_mesh" -> "response";
- "response" -> "merge_use_pass";
- "response2" -> "merge_use_pass";
- "merge_use_pass" -> "expansion";
- "merge_fields" -> "expansion";
- "expansion" -> "component";
-}
diff --git a/docs/source/examples/06-distributed-post/03-operator-dep.svg b/docs/source/examples/06-distributed-post/03-operator-dep.svg
deleted file mode 100644
index 00f50ac766d..00000000000
--- a/docs/source/examples/06-distributed-post/03-operator-dep.svg
+++ /dev/null
@@ -1,209 +0,0 @@
-
-
-
-
-
diff --git a/docs/source/examples/06-distributed-post/images/graphviz-0b490f262404b99215a9bce9bcf28ed202fd712f.png b/docs/source/examples/06-distributed-post/images/graphviz-0b490f262404b99215a9bce9bcf28ed202fd712f.png
deleted file mode 100644
index 0b53d9a6459..00000000000
Binary files a/docs/source/examples/06-distributed-post/images/graphviz-0b490f262404b99215a9bce9bcf28ed202fd712f.png and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/images/graphviz-0b490f262404b99215a9bce9bcf28ed202fd712f.png.map b/docs/source/examples/06-distributed-post/images/graphviz-0b490f262404b99215a9bce9bcf28ed202fd712f.png.map
deleted file mode 100644
index fe5db591e8c..00000000000
--- a/docs/source/examples/06-distributed-post/images/graphviz-0b490f262404b99215a9bce9bcf28ed202fd712f.png.map
+++ /dev/null
@@ -1,2 +0,0 @@
-
diff --git a/docs/source/examples/06-distributed-post/images/graphviz-6c5c6731afa3cb99496c9582a659820e66e59ad8.png b/docs/source/examples/06-distributed-post/images/graphviz-6c5c6731afa3cb99496c9582a659820e66e59ad8.png
deleted file mode 100644
index 318e494d038..00000000000
Binary files a/docs/source/examples/06-distributed-post/images/graphviz-6c5c6731afa3cb99496c9582a659820e66e59ad8.png and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/images/graphviz-6c5c6731afa3cb99496c9582a659820e66e59ad8.png.map b/docs/source/examples/06-distributed-post/images/graphviz-6c5c6731afa3cb99496c9582a659820e66e59ad8.png.map
deleted file mode 100644
index fe5db591e8c..00000000000
--- a/docs/source/examples/06-distributed-post/images/graphviz-6c5c6731afa3cb99496c9582a659820e66e59ad8.png.map
+++ /dev/null
@@ -1,2 +0,0 @@
-
diff --git a/docs/source/examples/06-distributed-post/images/graphviz-6d76c45ee399e15a2be231294ffa4ba764e4c442.png b/docs/source/examples/06-distributed-post/images/graphviz-6d76c45ee399e15a2be231294ffa4ba764e4c442.png
deleted file mode 100644
index 8a03883c23d..00000000000
Binary files a/docs/source/examples/06-distributed-post/images/graphviz-6d76c45ee399e15a2be231294ffa4ba764e4c442.png and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/images/graphviz-6d76c45ee399e15a2be231294ffa4ba764e4c442.png.map b/docs/source/examples/06-distributed-post/images/graphviz-6d76c45ee399e15a2be231294ffa4ba764e4c442.png.map
deleted file mode 100644
index fe5db591e8c..00000000000
--- a/docs/source/examples/06-distributed-post/images/graphviz-6d76c45ee399e15a2be231294ffa4ba764e4c442.png.map
+++ /dev/null
@@ -1,2 +0,0 @@
-
diff --git a/docs/source/examples/06-distributed-post/images/graphviz-8a6d6449ecd7de654cd0b1382ac1a9d333037718.png b/docs/source/examples/06-distributed-post/images/graphviz-8a6d6449ecd7de654cd0b1382ac1a9d333037718.png
deleted file mode 100644
index 229467e5d2d..00000000000
Binary files a/docs/source/examples/06-distributed-post/images/graphviz-8a6d6449ecd7de654cd0b1382ac1a9d333037718.png and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/images/graphviz-8a6d6449ecd7de654cd0b1382ac1a9d333037718.png.map b/docs/source/examples/06-distributed-post/images/graphviz-8a6d6449ecd7de654cd0b1382ac1a9d333037718.png.map
deleted file mode 100644
index fe5db591e8c..00000000000
--- a/docs/source/examples/06-distributed-post/images/graphviz-8a6d6449ecd7de654cd0b1382ac1a9d333037718.png.map
+++ /dev/null
@@ -1,2 +0,0 @@
-
diff --git a/docs/source/examples/06-distributed-post/images/graphviz-d3a2fbe72465ba55879181d1c4ceccf7a25d2c7a.png b/docs/source/examples/06-distributed-post/images/graphviz-d3a2fbe72465ba55879181d1c4ceccf7a25d2c7a.png
deleted file mode 100644
index 0b53d9a6459..00000000000
Binary files a/docs/source/examples/06-distributed-post/images/graphviz-d3a2fbe72465ba55879181d1c4ceccf7a25d2c7a.png and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/images/graphviz-d3a2fbe72465ba55879181d1c4ceccf7a25d2c7a.png.map b/docs/source/examples/06-distributed-post/images/graphviz-d3a2fbe72465ba55879181d1c4ceccf7a25d2c7a.png.map
deleted file mode 100644
index fe5db591e8c..00000000000
--- a/docs/source/examples/06-distributed-post/images/graphviz-d3a2fbe72465ba55879181d1c4ceccf7a25d2c7a.png.map
+++ /dev/null
@@ -1,2 +0,0 @@
-
diff --git a/docs/source/examples/06-distributed-post/images/sphx_glr_01-distributed_workflows_on_remote_001.png b/docs/source/examples/06-distributed-post/images/sphx_glr_01-distributed_workflows_on_remote_001.png
deleted file mode 100644
index 6e156dc71ec..00000000000
Binary files a/docs/source/examples/06-distributed-post/images/sphx_glr_01-distributed_workflows_on_remote_001.png and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/images/sphx_glr_02-distributed-msup_expansion_001.png b/docs/source/examples/06-distributed-post/images/sphx_glr_02-distributed-msup_expansion_001.png
deleted file mode 100644
index d32ad0bb532..00000000000
Binary files a/docs/source/examples/06-distributed-post/images/sphx_glr_02-distributed-msup_expansion_001.png and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/images/sphx_glr_02-distributed-msup_expansion_002.png b/docs/source/examples/06-distributed-post/images/sphx_glr_02-distributed-msup_expansion_002.png
deleted file mode 100644
index 937eaf8f4db..00000000000
Binary files a/docs/source/examples/06-distributed-post/images/sphx_glr_02-distributed-msup_expansion_002.png and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/images/sphx_glr_02-distributed_workflows_on_remote_001.png b/docs/source/examples/06-distributed-post/images/sphx_glr_02-distributed_workflows_on_remote_001.png
deleted file mode 100644
index c87a7eb7cbf..00000000000
Binary files a/docs/source/examples/06-distributed-post/images/sphx_glr_02-distributed_workflows_on_remote_001.png and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_001.png b/docs/source/examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_001.png
deleted file mode 100644
index d854d8fa4f3..00000000000
Binary files a/docs/source/examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_001.png and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_002.png b/docs/source/examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_002.png
deleted file mode 100644
index 4b874f64b85..00000000000
Binary files a/docs/source/examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_002.png and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_steps_001.png b/docs/source/examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_steps_001.png
deleted file mode 100644
index d32ad0bb532..00000000000
Binary files a/docs/source/examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_steps_001.png and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_steps_002.png b/docs/source/examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_steps_002.png
deleted file mode 100644
index fcf8fd2d9af..00000000000
Binary files a/docs/source/examples/06-distributed-post/images/sphx_glr_03-distributed-msup_expansion_steps_002.png and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/images/sphx_glr_04-distributed-msup_expansion_steps_001.png b/docs/source/examples/06-distributed-post/images/sphx_glr_04-distributed-msup_expansion_steps_001.png
deleted file mode 100644
index d854d8fa4f3..00000000000
Binary files a/docs/source/examples/06-distributed-post/images/sphx_glr_04-distributed-msup_expansion_steps_001.png and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/images/sphx_glr_04-distributed-msup_expansion_steps_002.png b/docs/source/examples/06-distributed-post/images/sphx_glr_04-distributed-msup_expansion_steps_002.png
deleted file mode 100644
index ac8b80ce9fc..00000000000
Binary files a/docs/source/examples/06-distributed-post/images/sphx_glr_04-distributed-msup_expansion_steps_002.png and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_00-distributed_total_disp_thumb.png b/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_00-distributed_total_disp_thumb.png
deleted file mode 100644
index 8a5fed589d1..00000000000
Binary files a/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_00-distributed_total_disp_thumb.png and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_01-distributed_workflows_on_remote_thumb.png b/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_01-distributed_workflows_on_remote_thumb.png
deleted file mode 100644
index 3748dd87e97..00000000000
Binary files a/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_01-distributed_workflows_on_remote_thumb.png and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_02-distributed-msup_expansion_thumb.png b/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_02-distributed-msup_expansion_thumb.png
deleted file mode 100644
index cb42d0be0ec..00000000000
Binary files a/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_02-distributed-msup_expansion_thumb.png and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_03-distributed-msup_expansion_steps_thumb.png b/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_03-distributed-msup_expansion_steps_thumb.png
deleted file mode 100644
index cb42d0be0ec..00000000000
Binary files a/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_03-distributed-msup_expansion_steps_thumb.png and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_04-distributed_total_disp_with_operator_thumb.png b/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_04-distributed_total_disp_with_operator_thumb.png
deleted file mode 100644
index b06c4e6a177..00000000000
Binary files a/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_04-distributed_total_disp_with_operator_thumb.png and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_04-distributed_total_disp_with_operators_thumb.png b/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_04-distributed_total_disp_with_operators_thumb.png
deleted file mode 100644
index 8a5fed589d1..00000000000
Binary files a/docs/source/examples/06-distributed-post/images/thumb/sphx_glr_04-distributed_total_disp_with_operators_thumb.png and /dev/null differ
diff --git a/docs/source/examples/06-distributed-post/sg_execution_times.rst b/docs/source/examples/06-distributed-post/sg_execution_times.rst
deleted file mode 100644
index 28d83aa6724..00000000000
--- a/docs/source/examples/06-distributed-post/sg_execution_times.rst
+++ /dev/null
@@ -1,18 +0,0 @@
-
-:orphan:
-
-.. _sphx_glr_examples_06-distributed-post_sg_execution_times:
-
-Computation times
-=================
-**00:10.295** total execution time for **examples_06-distributed-post** files:
-
-+----------------------------------------------------------------------------------------------------------------------------------+-----------+--------+
-| :ref:`sphx_glr_examples_06-distributed-post_02-distributed-msup_expansion.py` (``02-distributed-msup_expansion.py``) | 00:05.095 | 0.0 MB |
-+----------------------------------------------------------------------------------------------------------------------------------+-----------+--------+
-| :ref:`sphx_glr_examples_06-distributed-post_03-distributed-msup_expansion_steps.py` (``03-distributed-msup_expansion_steps.py``) | 00:02.859 | 0.0 MB |
-+----------------------------------------------------------------------------------------------------------------------------------+-----------+--------+
-| :ref:`sphx_glr_examples_06-distributed-post_01-distributed_workflows_on_remote.py` (``01-distributed_workflows_on_remote.py``) | 00:01.630 | 0.0 MB |
-+----------------------------------------------------------------------------------------------------------------------------------+-----------+--------+
-| :ref:`sphx_glr_examples_06-distributed-post_00-distributed_total_disp.py` (``00-distributed_total_disp.py``) | 00:00.711 | 0.0 MB |
-+----------------------------------------------------------------------------------------------------------------------------------+-----------+--------+
diff --git a/docs/source/examples/07-averaging/00-compute_and_average.dot b/docs/source/examples/07-averaging/00-compute_and_average.dot
deleted file mode 100644
index 1e8b61ac97f..00000000000
--- a/docs/source/examples/07-averaging/00-compute_and_average.dot
+++ /dev/null
@@ -1,39 +0,0 @@
-digraph foo {
- graph [pad="0", nodesep="0.3", ranksep="0.3"]
- node [shape=box, style=filled, fillcolor="#ffcc0", margin="0"];
- rankdir=LR;
- splines=line;
- node [fixedsize=true,width=2.5]
-
- stress01 [label="stress"];
- stress02 [label="stress"];
- vm01 [label="von_mises_eqv"];
- vm02 [label="von_mises_eqv"];
- avg01 [label="elemental_nodal_to_nodal", width=2.5];
- avg02 [label="elemental_nodal_to_nodal", width=2.5];
-
- subgraph cluster_1 {
- ds01 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
-
- ds01 -> stress01 [style=dashed];
- stress01 -> vm01;
- vm01 -> avg01
-
- label="Compute Von Mises then average stresses";
- style=filled;
- fillcolor=lightgrey;
- }
-
- subgraph cluster_2 {
- ds02 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
-
- ds02 -> stress02 [style=dashed];
- stress02 -> avg02;
- avg02 -> vm02
-
- label="Average stresses then compute Von Mises";
- style=filled;
- fillcolor=lightgrey;
- }
-
-}
diff --git a/docs/source/examples/07-averaging/00-compute_and_average.svg b/docs/source/examples/07-averaging/00-compute_and_average.svg
deleted file mode 100644
index d0f542d2bcd..00000000000
--- a/docs/source/examples/07-averaging/00-compute_and_average.svg
+++ /dev/null
@@ -1,107 +0,0 @@
-
-
-
-
-
diff --git a/docs/source/examples/07-python-operators/images/thumb/out02.glb b/docs/source/examples/07-python-operators/images/thumb/out02.glb
deleted file mode 100644
index b9ab1d7d495..00000000000
Binary files a/docs/source/examples/07-python-operators/images/thumb/out02.glb and /dev/null differ
diff --git a/examples/00-basic/09-results_over_space_subset.py b/examples/00-basic/09-results_over_space_subset.py
index 18b54dd3ea3..30b256f252a 100644
--- a/examples/00-basic/09-results_over_space_subset.py
+++ b/examples/00-basic/09-results_over_space_subset.py
@@ -141,9 +141,10 @@
print(stress)
for body_id in stress.get_mat_scoping().ids:
- field = stress.get_field_by_mat_id(body_id)
- if field.elementary_data_count > 0:
- model.metadata.meshed_region.plot(field)
+ fields = stress.get_fields_by_mat_id(body_id)
+ for field in fields:
+ if field.elementary_data_count > 0:
+ model.metadata.meshed_region.plot(field)
###############################################################################
# Create a custom spatial split
diff --git a/examples/06-distributed-post/00-distributed_total_disp.py b/examples/06-distributed-post/00-distributed_total_disp.py
index 761f13f7fac..5d753d65457 100644
--- a/examples/06-distributed-post/00-distributed_total_disp.py
+++ b/examples/06-distributed-post/00-distributed_total_disp.py
@@ -7,9 +7,44 @@
To help understand this example the following diagram is provided. It shows
the operator chain used to compute the final result.
-.. image:: 00-operator-dep.svg
- :align: center
- :width: 400
+.. graphviz::
+
+ digraph foo {
+ graph [pad="0", nodesep="0.3", ranksep="0.3"]
+ node [shape=box, style=filled, fillcolor="#ffcc00", margin="0"];
+ rankdir=LR;
+ splines=line;
+
+ disp01 [label="displacement"];
+ disp02 [label="displacement"];
+ norm01 [label="norm"];
+ norm02 [label="norm"];
+
+ subgraph cluster_1 {
+ ds01 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
+
+ ds01 -> disp01 [style=dashed];
+ disp01 -> norm01;
+
+ label="Server 1";
+ style=filled;
+ fillcolor=lightgrey;
+ }
+
+ subgraph cluster_2 {
+ ds02 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
+
+ ds02 -> disp02 [style=dashed];
+ disp02 -> norm02;
+
+ label="Server 2";
+ style=filled;
+ fillcolor=lightgrey;
+ }
+
+ norm01 -> "merge";
+ norm02 -> "merge";
+ }
"""
###############################################################################
diff --git a/examples/06-distributed-post/01-distributed_workflows_on_remote.py b/examples/06-distributed-post/01-distributed_workflows_on_remote.py
index e3feb129ed4..65c0182d80d 100644
--- a/examples/06-distributed-post/01-distributed_workflows_on_remote.py
+++ b/examples/06-distributed-post/01-distributed_workflows_on_remote.py
@@ -9,9 +9,39 @@
sequences are directly created on different servers. These operators are then
connected together without having to care that they are on remote processes.
-.. image:: 01-operator-dep.svg
- :align: center
- :width: 400
+.. graphviz::
+
+ digraph foo {
+ graph [pad="0", nodesep="0.3", ranksep="0.3"]
+ node [shape=box, style=filled, fillcolor="#ffcc00", margin="0"];
+ rankdir=LR;
+ splines=line;
+
+ subgraph cluster_1 {
+ ds01 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
+
+ ds01 -> stress1 [style=dashed];
+
+ label="Server 1";
+ style=filled;
+ fillcolor=lightgrey;
+ }
+
+ subgraph cluster_2 {
+ ds02 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
+
+ ds02 -> stress2 [style=dashed];
+ stress2 -> mul;
+
+ label="Server 2";
+ style=filled;
+ fillcolor=lightgrey;
+ }
+
+ stress1 -> "merge";
+ mul -> "merge";
+ }
+
"""
###############################################################################
# Import dpf module and its examples files
diff --git a/examples/06-distributed-post/02-distributed-msup_expansion.py b/examples/06-distributed-post/02-distributed-msup_expansion.py
index 92188c1f1fe..1c0846c5088 100644
--- a/examples/06-distributed-post/02-distributed-msup_expansion.py
+++ b/examples/06-distributed-post/02-distributed-msup_expansion.py
@@ -11,9 +11,60 @@
To help understand this example the following diagram is provided. It shows
the operator chain used to compute the final result.
-.. image:: 02-operator-dep.svg
- :align: center
- :width: 800
+.. graphviz::
+
+ digraph foo {
+ graph [pad="0", nodesep="0.3", ranksep="0.3"]
+ node [shape=box, style=filled, fillcolor="#ffcc00", margin="0"];
+ rankdir=LR;
+ splines=line;
+
+ disp01 [label="displacement"];
+ disp02 [label="displacement"];
+ mesh01 [label="mesh"];
+ mesh02 [label="mesh"];
+
+ subgraph cluster_1 {
+ ds01 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
+
+ disp01; mesh01;
+
+ ds01 -> disp01 [style=dashed];
+ ds01 -> mesh01 [style=dashed];
+
+ label="Server 1";
+ style=filled;
+ fillcolor=lightgrey;
+ }
+
+ subgraph cluster_2 {
+ ds02 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
+
+
+ disp02; mesh02;
+
+ ds02 -> disp02 [style=dashed];
+ ds02 -> mesh02 [style=dashed];
+
+ label="Server 2";
+ style=filled;
+ fillcolor=lightgrey;
+ }
+
+ disp01 -> "merge_fields";
+ mesh01 -> "merged_mesh";
+ disp02 -> "merge_fields";
+ mesh02 -> "merged_mesh";
+
+ ds03 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
+ ds03 -> "response" [style=dashed];
+
+ "merged_mesh" -> "response";
+ "response" -> "expansion";
+ "merge_fields" -> "expansion";
+ "expansion" -> "component";
+ }
+
"""
###############################################################################
diff --git a/examples/06-distributed-post/03-distributed-msup_expansion_steps.py b/examples/06-distributed-post/03-distributed-msup_expansion_steps.py
index 99c0a6531f1..487dda9c822 100644
--- a/examples/06-distributed-post/03-distributed-msup_expansion_steps.py
+++ b/examples/06-distributed-post/03-distributed-msup_expansion_steps.py
@@ -11,9 +11,63 @@
To help understand this example the following diagram is provided. It shows
the operator chain used to compute the final result.
-.. image:: 03-operator-dep.svg
- :align: center
- :width: 800
+.. graphviz::
+
+ digraph foo {
+ graph [pad="0", nodesep="0.3", ranksep="0.3"]
+ node [shape=box, style=filled, fillcolor="#ffcc00", margin="0"];
+ rankdir=LR;
+ splines=line;
+
+ disp01 [label="displacement"];
+ disp02 [label="displacement"];
+ mesh01 [label="mesh"];
+ mesh02 [label="mesh"];
+
+ subgraph cluster_1 {
+ ds01 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
+
+ disp01; mesh01;
+
+ ds01 -> disp01 [style=dashed];
+ ds01 -> mesh01 [style=dashed];
+
+ label="Server 1";
+ style=filled;
+ fillcolor=lightgrey;
+ }
+
+ subgraph cluster_2 {
+ ds02 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
+
+ disp02; mesh02;
+
+ ds02 -> disp02 [style=dashed];
+ ds02 -> mesh02 [style=dashed];
+
+ label="Server 2";
+ style=filled;
+ fillcolor=lightgrey;
+ }
+
+ disp01 -> "merge_fields";
+ mesh01 -> "merge_mesh";
+ disp02 -> "merge_fields";
+ mesh02 -> "merge_mesh";
+
+ ds03 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
+ ds03 -> "response2" [style=dashed];
+ ds04 [label="data_src", shape=box, style=filled, fillcolor=cadetblue2];
+ ds04 -> "response" [style=dashed];
+
+ "merge_mesh" -> "response";
+ "response" -> "merge_use_pass";
+ "response2" -> "merge_use_pass";
+ "merge_use_pass" -> "expansion";
+ "merge_fields" -> "expansion";
+ "expansion" -> "component";
+ }
+
"""
###############################################################################
diff --git a/examples/06-distributed-post/06-distributed_stress_averaging.py b/examples/06-distributed-post/06-distributed_stress_averaging.py
index 32fa934e43b..8a52763e7ee 100644
--- a/examples/06-distributed-post/06-distributed_stress_averaging.py
+++ b/examples/06-distributed-post/06-distributed_stress_averaging.py
@@ -28,7 +28,8 @@
files = examples.download_distributed_files()
-remote_servers = [dpf.start_local_server(as_global=False) for file in files]
+config = dpf.ServerConfig(protocol=dpf.server.CommunicationProtocols.gRPC)
+remote_servers = [dpf.start_local_server(as_global=False, config=config) for file in files]
ips = [remote_server.ip for remote_server in remote_servers]
ports = [remote_server.port for remote_server in remote_servers]
@@ -129,8 +130,9 @@
ds.set_domain_result_file_path(files[0], 0)
ds.set_domain_result_file_path(files[1], 1)
-stress = dpf.Model(ds).results.stress()
-fc_single_process = ops.averaging.to_nodal_fc(stress).outputs.fields_container()
+model = dpf.Model(ds)
+stress = model.results.stress()
+fc_single_process = ops.averaging.to_nodal_fc(stress).eval()
fc_single_process[0].plot()
print(fc_single_process[0].min().data)
diff --git a/tests/testfiles/pythonPlugins/all_types/dpf_types_op.py b/tests/testfiles/pythonPlugins/all_types/dpf_types_op.py
index e6b4dd0132b..a790c07d1bc 100644
--- a/tests/testfiles/pythonPlugins/all_types/dpf_types_op.py
+++ b/tests/testfiles/pythonPlugins/all_types/dpf_types_op.py
@@ -1,6 +1,6 @@
from ansys.dpf.core.custom_operator import CustomOperatorBase
from ansys.dpf.core import field, scoping, fields_container, meshes_container, scopings_container,\
- property_field, data_sources, types, workflow, data_tree, string_field
+ property_field, data_sources, types, workflow, data_tree
class ForwardFieldOperator(CustomOperatorBase):
@@ -101,6 +101,7 @@ def name(self):
class ForwardStringFieldOperator(CustomOperatorBase):
def run(self):
+ from ansys.dpf.core import string_field
f = self.get_input(0, string_field.StringField)
f = self.get_input(0, types.string_field)
self.set_output(0, f)