From 44b2913196fe6c357b280f978532ae52b0a7a999 Mon Sep 17 00:00:00 2001 From: Federico Negri Date: Tue, 14 Feb 2023 14:30:30 +0100 Subject: [PATCH 1/2] Clean up unused files --- .flake8 | 2 +- README.rst | 47 +-- README_dcs_py_client.md | 105 ----- doc/source/conf_templ.py | 81 ---- doc/source/examples/ex_adding_file.rst | 49 --- doc/source/examples/ex_cfx_static_mixer.rst | 5 +- doc/source/examples/ex_download.rst | 229 ----------- doc/source/examples/ex_fluent_nozzle.rst | 5 +- doc/source/quickstart.rst | 2 - examples/README.md | 1 - examples/cfx_static_mixer/project_setup.py | 2 +- examples/exec_scripts/exec_cfx.py | 285 -------------- examples/exec_scripts/exec_fluent.py | 361 ------------------ examples/exec_scripts/exec_mapdl.py | 49 --- examples/exec_scripts/exec_python.py | 48 --- examples/fluent_nozzle/project_setup.py | 2 +- .../mapdl_motorbike_frame/project_setup.py | 2 +- .../project_setup.py | 2 +- prepare_documentation.py | 10 +- 19 files changed, 28 insertions(+), 1259 deletions(-) delete mode 100644 README_dcs_py_client.md delete mode 100644 doc/source/conf_templ.py delete mode 100644 doc/source/examples/ex_adding_file.rst delete mode 100644 doc/source/examples/ex_download.rst delete mode 100644 examples/README.md delete mode 100644 examples/exec_scripts/exec_cfx.py delete mode 100644 examples/exec_scripts/exec_fluent.py delete mode 100644 examples/exec_scripts/exec_mapdl.py delete mode 100644 examples/exec_scripts/exec_python.py diff --git a/.flake8 b/.flake8 index 41573c4a4..4496a9aef 100644 --- a/.flake8 +++ b/.flake8 @@ -1,6 +1,6 @@ [flake8] exclude = venv, __init__.py, doc/_build, .venv -select = W191, W291, W293, W391, E115, E117, E122, E124, E125, E225, E231, E301, E303, E501, F401, F403 +select = W191, W291, W293, W391, E115, E117, E122, E124, E125, E225, E231, E301, E303, E501, F401, F403, N801, N802, N803, N804, N805, N806, N807, N815, N816 count = True max-complexity = 10 max-line-length = 100 diff --git a/README.rst b/README.rst index b1495d0f6..1710371dc 100644 --- a/README.rst +++ b/README.rst @@ -1,4 +1,4 @@ -pyrep +PyREP ===== |pyansys| |python| |pypi| |GH-CI| |codecov| |MIT| |black| @@ -33,26 +33,17 @@ pyrep A Python client for Ansys REP - Remote Execution Platform -Note: The following README instructions are not yet fully tested and adapted for pyrep. - How to install -------------- -.. TODO: Update installation instructions, try to follow this general scheme as much as possible. - -At least two installation modes are provided: user and developer. - -For users -^^^^^^^^^ - -In order to install Pyrep rep, make sure you +In order to install PyREP, make sure you have the latest version of `pip`_. To do so, run: .. code:: bash python -m pip install -U pip -Then, as long as pyrep is a private pyAnsys module not published to pypi yet, you can execute: +Then, as long as PyREP is a private pyAnsys module not published to pypi yet, you can execute: .. code:: bash @@ -60,22 +51,21 @@ Then, as long as pyrep is a private pyAnsys module not published to pypi yet, yo .. TODO: Enable this once pyrep is published: python -m pip install ansys-rep -For developers -^^^^^^^^^^^^^^ +Contribute +---------- -Installing pyrep in developer mode allows -you to modify the source and enhance it. - -Before contributing to the project, please refer to the `PyAnsys Developer's guide`_. You will +Before contributing to the project, ensure that you are thoroughly +familiar with the `PyAnsys Developer's guide`_. You will need to follow these steps: -#. Start by cloning this repository: +#. Clone this repository: .. code:: bash git clone https://github.com/pyansys/pyrep + cd pyrep -#. Create a fresh-clean Python environment and activate it: +#. Create a new Python environment and activate it: .. code:: bash @@ -106,13 +96,6 @@ need to follow these steps: .. code:: bash python -m pip install --editable . - - #. Finally, verify your development installation by running: - - .. code:: bash - - tox - How to testing -------------- @@ -131,7 +114,7 @@ order to guarantee project's integrity. The following environments commands are - **tox -e style**: will check for coding style quality. - **tox -e py**: checks for unit tests. - **tox -e py-coverage**: checks for unit testing and code coverage. -- **tox -e doc**: checs for documentation building process. +- **tox -e doc**: checks for documentation building process. Raw testing @@ -157,14 +140,14 @@ encouraged to install this tool via: Documentation ------------- -For building documentation, you can either run the usual rules provided in the -`Sphinx`_ Makefile, such us: +For building documentation, you can manually run: .. code:: bash - make -C doc/ html && your_browser_name doc/html/index.html + python prepare_documentation.py + python -m sphinx -b html doc/source build/sphinx/html -However, the recommended way of checking documentation integrity is using: +The recommended way of checking documentation integrity is using: .. code:: bash diff --git a/README_dcs_py_client.md b/README_dcs_py_client.md deleted file mode 100644 index 33cea9278..000000000 --- a/README_dcs_py_client.md +++ /dev/null @@ -1,105 +0,0 @@ -# Ansys pyrep - REP Python client # - -Python clients for Ansys Distributed Compute Services (DCS). - -## Installation - -A wheel is available on the ANSYS PyPI repository. Installing it is as simple as: - -``` -pip install ansys-dcs-client --extra-index-url http://:@canartifactory.ansys.com:8080/artifactory/api/pypi/pypi/simple --trusted-host canartifactory.ansys.com -``` - -The `--extra-index-url` allows `pip` to retrieve ``ansys-dcs-client`` from ANSYS private PyPI repository but still install public packages ``ansys-dcs-client`` depend on that may not be available there. - -To install the latest development version add the `--pre` flag to the command above. - - -## Example Usage - -```python -from ansys.rep.client.jms import Client - -client = Client(rep_url="https://127.0.0.1:8443/rep", username="repadmin", password="repadmin") - -# query a project -project = client.get_project(id="demo_project") - -# get design points -jobs = project.get_jobs() - -# set failed design points to pending -failed_dps = [dp for dp in jobs if dp.eval_status == "failed"] -for dp in failed_dps: - dp.eval_status = "pending" -failed_dps = project.update_jobs(failed_dps) -``` - -## Development - -This package is currently developed with standard Python 3 -and standard packages downloaed from https://pypi.org/. - -The source code is available [here](ttps://tfs.ansys.com:8443/tfs/ANSYS_Development/ANSYS-CH/_git/dcs-client). The TFS build definition can be found [here](https://tfs.ansys.com:8443/tfs/ANSYS_Development/ANSYS-CH/ANSYS-CH%20Team/_build/index?definitionId=2540&_a=completed). - -### Setup - -* Install standard Python 3 -* Setup the project dev environment - ``` - python build.py venv - ``` - Remember to activate the virtual env. - -### Run tests - -From within the dev env, using `pytest` - -``` -python -m pytest -``` - -Or, using the build script -``` -python build.py tests -``` - -which will run something similar to - -``` -python -m pytest -v --junitxml test_results.xml --cov=ansys --cov-report=xml --cov-report=html -``` - -By default the integration tests try to connect to the local DCS server at `https:/127.0.0.1/dcs/` with default username and password. To specify different ones (e.g. on a build machine), please set the following environment variables: - -| Variable | Example Values on Windows | Description | -|-----------------------|--------------------------------------|------------------------------------| -| DCS_TEST_URL | https://212.126.163.153/dcs/ | DCS server URL | -| DCS_TEST_USERNAME | tfsbuild | Username | -| DCS_TEST_PASSWORD | tfsbuild | Password | - -Few test assumes that the project `mapdl_motorbike_frame` already exists on the DCS server. -In case, you can create such project running the script `examples/mapdl_motorbike_frame/project_setup.py`. - -### Create wheel package - - ``` - python build.py wheel - ``` - -### Generate the documentation - - To generate the HTML documentation: - - ``` - python build.py documentation - ``` - -## Update of the Ansys Version - -There is a Python script `update_ansys_version_number.py` in the root folder of the repo that updates the version numbers. -Here is an example how to run it. The first argument is the internal ID, second argument is the external ID: - -``` bash ->> python update_ansys_version_number.py 23.1 "2023 R1" -``` \ No newline at end of file diff --git a/doc/source/conf_templ.py b/doc/source/conf_templ.py deleted file mode 100644 index ae110ca2b..000000000 --- a/doc/source/conf_templ.py +++ /dev/null @@ -1,81 +0,0 @@ -"""Sphinx documentation configuration file.""" -from datetime import datetime - -from ansys_sphinx_theme import pyansys_logo_black as logo - -# Project information -project = "ansys-rep-rep" -copyright = f"(c) {datetime.now().year} ANSYS, Inc. All rights reserved" -author = "ANSYS, Inc." -release = version = "0.1.dev0" - -# Select desired logo, theme, and declare the html title -html_logo = logo -html_theme = "ansys_sphinx_theme" -html_short_title = html_title = "pyrep" - -# specify the location of your github repo -html_theme_options = { - "github_url": "[https://github.com/pyansys/pyrep", - "show_prev_next": False, - "show_breadcrumbs": True, - "additional_breadcrumbs": [ - ("PyAnsys", "https://docs.pyansys.com/"), - ], -} - -# Sphinx extensions -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "numpydoc", - "sphinx.ext.intersphinx", - "sphinx_copybutton", -] - -# Intersphinx mapping -intersphinx_mapping = { - "python": ("https://docs.python.org/dev", None), - # kept here as an example - # "scipy": ("https://docs.scipy.org/doc/scipy/reference", None), - # "numpy": ("https://numpy.org/devdocs", None), - # "matplotlib": ("https://matplotlib.org/stable", None), - # "pandas": ("https://pandas.pydata.org/pandas-docs/stable", None), - # "pyvista": ("https://docs.pyvista.org/", None), - # "grpc": ("https://grpc.github.io/grpc/python/", None), -} - -# numpydoc configuration -numpydoc_show_class_members = False -numpydoc_xref_param_type = True - -# Consider enabling numpydoc validation. See: -# https://numpydoc.readthedocs.io/en/latest/validation.html# -numpydoc_validate = True -numpydoc_validation_checks = { - "GL06", # Found unknown section - "GL07", # Sections are in the wrong order. - "GL08", # The object does not have a docstring - "GL09", # Deprecation warning should precede extended summary - "GL10", # reST directives {directives} must be followed by two colons - "SS01", # No summary found - "SS02", # Summary does not start with a capital letter - # "SS03", # Summary does not end with a period - "SS04", # Summary contains heading whitespaces - # "SS05", # Summary must start with infinitive verb, not third person - "RT02", # The first line of the Returns section should contain only the - # type, unless multiple values are being returned" -} - - -# static path -html_static_path = ["_static"] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# The suffix(es) of source filenames. -source_suffix = ".rst" - -# The master toctree document. -master_doc = "index" diff --git a/doc/source/examples/ex_adding_file.rst b/doc/source/examples/ex_adding_file.rst deleted file mode 100644 index f80b01610..000000000 --- a/doc/source/examples/ex_adding_file.rst +++ /dev/null @@ -1,49 +0,0 @@ -.. _example_adding_files: - -Adding a file to a project -============================================================ - -This example shows how to add one or more output files to an existing project job_definition. -This is a typical scenario when the DCS project is automatically generated by Ansys Workbench and you would like to further customize it. - -As a starting point, we consider the project included in the DCS tutorial :ansys_dcs_tutorial:`Bicycle Wheel – Parameter Study `. -The project consists of a parametric model of a bicycle wheel and uses Ansys Mechanical for the structural simulation. - -Here we show you how to modify the project in order to pick up the Mechanical ``solve.out`` file for every design point. -To begin with, you need to copy the ``solve.out`` file to the directory of the project. -To this end, we create two simple APDL command snippets as shown in the screenshot (the same is done for the radial local case). - -.. image:: ../_static/bicycle_command_snippet.png - :alt: bicycle project command snippet - -Then, once the project is sent to the DCS server via Workbench, we can add the ``solve.out`` files to the list of collected output files using the Python client. - -.. code-block:: python - - from ansys.rep.client.jms import Client, File - client = Client(rep_url="https://127.0.0.1:8443/rep", username="repadmin", password="repadmin") - - # Get the project and its current job_definition - proj = client.get_project(id="DPS_Mechanical_Bicycle_Wheel_2019R3") - job_def = proj.get_job_definitions(id=1)[0] - - # Define new file objects - additional_files = [] - additional_files.append(File( name="solve_lateral_force_out", evaluation_path="lateral_force.out", type="text/plain", collect=True ) ) - additional_files.append(File( name="solve_radial_force_out", evaluation_path="radial_force.out", type="text/plain", collect=True ) ) - - # Add new files to the list of project files - files = proj.create_files(additional_files) - - # Get file IDs - solve_1_out = files[0] - solve_2_out = files[1] - - # Add the new files to the list of existing output files - job_def.task_definitions[0].output_file_ids.extend([solve_1_out.id, solve_2_out.id]) - - # Send the modified project job_definition to the server - job_def = proj.update_job_definitions([job_def]) - -Once the design points are evaluated, you will be able to view the solve output files in the DPS Web APP -as well as to download them using the Python client as shown in the :ref:`previous example `. \ No newline at end of file diff --git a/doc/source/examples/ex_cfx_static_mixer.rst b/doc/source/examples/ex_cfx_static_mixer.rst index b3cdf93bf..f5dcb35bd 100644 --- a/doc/source/examples/ex_cfx_static_mixer.rst +++ b/doc/source/examples/ex_cfx_static_mixer.rst @@ -9,13 +9,12 @@ This example shows how to submit a CFX Static Mixer model to be solved on REP. The project setup script as well as the data files can be downloaded here :download:`CFX Static Mixer Example <../../../build/cfx_static_mixer.zip>`. - The project uses an execution script exec_cfx.py instead of a solver command line. - The execution script is located in this zip file :download:`Example Execution Scripts <../../../build/exec_scripts.zip>`. + The project uses an execution script `exec_cfx.py` instead of a solver command line. .. literalinclude:: ../../../examples/cfx_static_mixer/project_setup.py :language: python :caption: project_setup.py -.. literalinclude:: ../../../examples/exec_scripts/exec_cfx.py +.. literalinclude:: ../../../examples/cfx_static_mixer/exec_cfx.py :language: python :caption: exec_cfx.py \ No newline at end of file diff --git a/doc/source/examples/ex_download.rst b/doc/source/examples/ex_download.rst deleted file mode 100644 index 5e4ccff21..000000000 --- a/doc/source/examples/ex_download.rst +++ /dev/null @@ -1,229 +0,0 @@ -.. _example_download_files: - -Download of output files -================================== - -This example shows how to download the output files of design points. -We assume you have already completed the :ref:`example_mapdl_motorbike_frame` tutorial and have the MAPDL Motorbike Frame project at hand. - -To begin with, we connect to a DCS server running on the localhost with default username and password -and specify the local path where the files are going to be saved. - -.. warning:: - Before executing the code, make sure to adjust the ``download_path`` variable. - -.. code-block:: python - - import os - from ansys.rep.client.jms import Client - - # Connect to the DCS server and query the project - client = Client(rep_url="https://127.0.0.1:8443/rep", username="repadmin", password="repadmin") - proj = client.get_project(id="mapdl_motorbike_frame") - - # Specify the base path where to download files - download_path = #e.g. 'C:\\Users\\your_username\\Documents\\mapdl_motorbike_frame\\download' - if not os.path.exists(download_path): - os.makedirs(download_path) - - -Log files ------------------------------- - -We first show how to download the log files. To this end, let's first get all evaluated and failed design points. -Note that we specify the ``fields`` query parameter to restrict the returned fields to the ones we actually need for this example. - -.. code-block:: python - - dps = proj.get_jobs(fields=["id", "files", "eval_status"], eval_status=["evaluated", "failed"]) - -Also note that each design point holds a list of file IDs linked to it. - -.. code-block:: python - - print(dps[0].file_ids) - #Output (numbers could be different) - #[6, 7, 8, 36, 1] - -For each design point, we now download the content of its ``file.out`` log file (which contains the APDL output) and save it to disk. - -.. code-block:: python - - for dp in dps: - - print(f"Downloading log file of design point {dp.id} ...") - f = proj.get_files(id=dp.file_ids, evaluation_path="file.out", content=True)[0] - - # If some content is found (it's not the case if the design point fails - # even before starting the simulation) we save it to disk. - if f.content: - path = os.path.join(download_path, f"{f.name}_dp{dp.id}.txt") - print(f"Saving {f.name} to {path}") - with open(path, "w") as tf: - tf.write(f.content.decode('utf-8')) - -.. note:: - With the ``id`` and ``evaluation path`` query parameters in the ``get_files`` function we specify that we are only - interested in the ``file.out`` produced by the current design point. - Moreover, by setting ``content=True`` we effectively download the file content and - not only its metadata. - -Images -------------------------------------- - -Similarly, we can download the images associated to the design points. In this example, for each design point -we download and save all JPEG images. - -.. code-block:: python - - for dp in dps: - - print(f"Downloading image files of design point {dp.id} ... ") - files = proj.get_files(id=dp.file_ids, type="image/jpeg", content=True) - - for f in files: - if f.content: - path = os.path.join(download_path, f"{f.name}_dp{dp.id}.jpg") - print(f"Saving {f.name} to {path}") - with open(path, "wb") as bf: - bf.write(f.content) - - -Parsing of log files ----------------------------------- - -The possibility to download and parse log files can be particularly useful when design point failures need to be investigated. -Consider for instance the case where design points are submitted to DPS from a Workbench project. -Suppose that unexpected failures are observed for different reasons. For instance, some might be due to parameter values resulting in invalid job_definitions, -others could be related to non-convergence of some intermediate steps, some others could instead be caused by hardware failure. - -To systematically investigate such cases, one could for instance download the log files of failed design points and parse the content looking for specific -failure indicators. - -Downloading the log files goes along the lines of the examples above. Note however the different query parameters -used in the ``get_files`` function. - -.. code-block:: python - - import os - from ansys.rep.client.jms import Client - - # Connect to the DCS server and query the project - client = Client(rep_url="https://127.0.0.1:8443/rep", username="repadmin", password="repadmin") - proj = client.get_project(id="project_id") - - # Specify the base path where to download files - download_path = #e.g. 'C:\\Users\\your_username\\Documents\\project_id\\download' - if not os.path.exists(download_path): - os.makedirs(download_path) - - # Get all failed design points - failed_dps = proj.get_jobs(eval_status=['failed']) - print("%d failed design points" % len(failed_dps)) - - # for each design point, download its workbench log file - for dp in failed_dps: - - print("Downloading log file of design point %d ... " % dp.id) - - query_params = {"id": dp.file_ids, "name.contains": "log_Workbench_Project"} - f = proj.get_files(**query_params, content=True)[0] - - if f.content: - path = os.path.join(download_path, f"{f.name}_dp{dp.id}.txt") - print(f"Saving {f.name} to {path}") - with open(path, "w") as tf: - tf.write(f.content.decode('utf-8')) - -The actual parsing of the log files could be done for instance by first defining a dictionary of -failure strings to look for. -These could have been identified by prior knowledge of possible failure reasons or by manually inspecting some of the log files. - -.. code-block:: python - - failure_dict = [ - { - "failure_string": "Could not find file", - "label": "Missing file" - }, - { - "failure_string": "no longer available in the geometry", - "label": "Missing geometry" - }, - { - "failure_string": "Can't initialize Addin", - "label": "Installation" - }, - { - "failure_string": "One or more entities failed to mesh", - "label": "Meshing failure" - }, - { - "failure_string": "license", - "label": "No License" - } - ] - -For each design point, we then open the corresponding log file and test whether it contains any failure strings. -The results are collected in a ``pandas`` dataframe object which can be easily queried or exported in ``csv`` or ``xlsx`` format. - -.. code-block:: python - - import pandas - df = pandas.DataFrame(columns=["Id", "Name", "Eval Status", "Elapsed Time", "Failures"]) - - for dp in failed_dps: - log_file_path = os.path.join(download_path, "log_Workbench_Project_dp%d.txt" % dp.id) - - errors = set() - - if os.path.exists(log_file_path): - with open(log_file_path, 'r') as lf: - content = lf.read() - - errors = set() - for row in failure_dict: - if row["failure_string"] in content: - errors.add(row["label"]) - - if not errors: - errors.add("Unknown") - else: - errors.add("Missing Log File") - - df = df.append({ - "Id": dp.id, - "Name": dp.name, - "Eval Status": dp.eval_status, - "Elapsed Time": dp.elapsed_time, - "Failures": '; '.join(errors) - }, ignore_index=True) - - - # Example export to Excel or CSV - df.to_excel(os.path.join(download_path, "failures.xlsx"), index=False) # requires the openpyxl package to be available - df.to_csv(os.path.join(download_path, "failures.csv"), index=False) - -The dataframe will look like this - -.. code-block:: python - - Id Name Eval Status Elapsed Time Failures - 0 6 8 failed 137.543538 Missing geometry - 1 7 9 failed 86.561933 Unknown - 2 10 12 failed 112.935375 Missing geometry; Missing file - 3 11 13 failed 138.019429 License - 4 18 20 failed 133.204345 Missing geometry - ... - -By suitably querying the dataframe, one could then easily e.g. set to pending all design points which failed because of a specific reason. - -.. code-block:: python - - from ansys.rep.client.jms import Job - - # get the ID of design points which failed because of license issues - ids = df[df["Failures"].str.contains("License")]["Id"].to_list() - - dps_of_interest = [Job(id=id, eval_status="pending") for id in ids] - dps_of_interest = proj.update_jobs(dps_of_interest) \ No newline at end of file diff --git a/doc/source/examples/ex_fluent_nozzle.rst b/doc/source/examples/ex_fluent_nozzle.rst index ec6879fd6..33c29ed3b 100644 --- a/doc/source/examples/ex_fluent_nozzle.rst +++ b/doc/source/examples/ex_fluent_nozzle.rst @@ -9,13 +9,12 @@ This example shows how to submit a Fluent nozzle model to be solved on REP. The project setup script as well as the data files can be downloaded here :download:`Fluent Nozzle Example <../../../build/fluent_nozzle.zip>`. - The project uses an execution script exec_fluent.py instead of a solver command line. - The execution script is located in this zip file :download:`Example Execution Scripts <../../../build/exec_scripts.zip>`. + The project uses an execution script `exec_fluent.py` instead of a solver command line. .. literalinclude:: ../../../examples/fluent_nozzle/project_setup.py :language: python :caption: project_setup.py -.. literalinclude:: ../../../examples/exec_scripts/exec_fluent.py +.. literalinclude:: ../../../examples/fluent_nozzle/exec_fluent.py :language: python :caption: exec_fluent.py \ No newline at end of file diff --git a/doc/source/quickstart.rst b/doc/source/quickstart.rst index 08e307683..490b250e0 100644 --- a/doc/source/quickstart.rst +++ b/doc/source/quickstart.rst @@ -247,8 +247,6 @@ Get file definitions from an existing project Job Definition and replace the fir file.src = r"D:\local_folder\my_project\input_file.xyz" project.update_files([file]) -For instructions on how to add a new file to an existing project job_definition, see :ref:`Adding a file to a project `. - Modify and create users ------------------------------------------ diff --git a/examples/README.md b/examples/README.md deleted file mode 100644 index df635b4e6..000000000 --- a/examples/README.md +++ /dev/null @@ -1 +0,0 @@ -# Examples diff --git a/examples/cfx_static_mixer/project_setup.py b/examples/cfx_static_mixer/project_setup.py index 95b9df840..f730beb83 100644 --- a/examples/cfx_static_mixer/project_setup.py +++ b/examples/cfx_static_mixer/project_setup.py @@ -67,7 +67,7 @@ def create_project(client, name, num_jobs=20, version=__external_version__): name="exec_cfx", evaluation_path="exec_cfx.py", type="application/x-python-code", - src=os.path.join(cwd, "..", "exec_scripts", "exec_cfx.py"), + src=os.path.join(cwd, "exec_cfx.py"), ) ) diff --git a/examples/exec_scripts/exec_cfx.py b/examples/exec_scripts/exec_cfx.py deleted file mode 100644 index 171f993fe..000000000 --- a/examples/exec_scripts/exec_cfx.py +++ /dev/null @@ -1,285 +0,0 @@ -""" -Copyright (C) 2021 ANSYS, Inc. and its subsidiaries. All Rights Reserved. -""" -import _thread -import json -import logging -import os -from os import path -import platform -import re -import shlex -import subprocess -import time -import traceback - -from ansys.rep.common.logging import log -from ansys.rep.evaluator.task_manager import ApplicationExecution -from ansys.rep.evaluator.task_manager.context import SubmitContext - - -class CfxExecution(ApplicationExecution): - isLinux = platform.platform().startswith("Linux") - - def __init__(self, context): - self.active_run_name = None - self.putative_run_name = None - self.withSoftInterrupt = True - ApplicationExecution.__init__(self, context) - - def publish_to_default_log(self, msg): - log.info(msg) - - def publish_to_debug_log(self, msg): - log.debug(msg) - - def execute(self): - log.info("Start CFX execution script") - - try: - log.info("Evaluator Platform: " + platform.platform()) - - num_cores = self.context.resource_requirements["num_cores"] - log.info(f"Requested cores: {num_cores}") - - # create defaults for inputs not provided - inputs = { - "cfx_additionalArgs": self.context.execution_context.get("cfx_additionalArgs", "") - } - inputs["cfx_solverFile"] = self.context.execution_context.get("cfx_solverFile", None) - inputs["cfx_definitionFile"] = self.context.execution_context.get( - "cfx_definitionFile", None - ) - inputs["cfx_iniFile"] = self.context.execution_context.get("cfx_iniFile", None) - inputs["cfx_cclFile"] = self.context.execution_context.get("cfx_cclFile", None) - inputs["cfx_contFile"] = self.context.execution_context.get("cfx_contFile", None) - inputs["cfx_mcontFile"] = self.context.execution_context.get("cfx_mcontFile", None) - inputs["cfx_mdefFile"] = self.context.execution_context.get("cfx_mdefFile", None) - inputs["cfx_parFile"] = self.context.execution_context.get("cfx_parFile", None) - inputs["cfx_indirectPath"] = self.context.execution_context.get( - "cfx_indirectPath", None - ) - inputs["cfx_version"] = self.context.execution_context.get("cfx_version", None) - inputs["cfx_useAAS"] = self.context.execution_context.get("cfx_useAAS", False) - inputs["cfx_startMethod"] = self.context.execution_context.get("cfx_startMethod", None) - inputs["cfx_runName"] = self.context.execution_context.get("cfx_runName", None) - - cclFile = next((f for f in self.context.input_files if f["name"] == "ccl"), None) - if cclFile != None: - inputs["cfx_cclFile"] = cclFile["path"] - log.info("ccl file path: " + cclFile["path"]) - - defFile = next((f for f in self.context.input_files if f["name"] == "def"), None) - if defFile != None: - inputs["cfx_definitionFile"] = defFile["path"] - log.info("def file path: " + defFile["path"]) - - self.publish_to_default_log( - "Task inputs after applying default values to missing inputs:" - ) - for name in inputs.keys(): - if inputs[name] == None: - continue - self.publish_to_default_log("\t-" + name + ":<" + str(inputs[name]) + ">") - - # Check existence of files which must exist if specified - inputs_existchk = [ - "cclFile", - "contFile", - "definitionFile", - "iniFile", - "mcontFile", - "mdefFile", - "parFile", - "solverFile", - ] - - self.publish_to_default_log("Checking if provided files exist in the storage...") - for i in inputs_existchk: - k = "cfx_" + i - if not inputs[k] == None: - if not os.path.isfile(inputs[k]): - raise Exception("Required file does not exist!\n" + inputs[k]) - - if not inputs["cfx_indirectPath"] == None: - # Special check for indirect startup and set active name for later use - rundir = inputs["cfx_indirectPath"] + ".dir" - if not os.path.isdir(rundir): - raise Exception("Required directory does not exist!\n" + rundir) - startup_ccl = rundir + "/startup.ccl" - if not os.path.isfile(startup_ccl): - raise Exception(startup_ccl) - self.active_run_name = inputs["cfx_indirectPath"] - else: - # Set putative run name from input file - for i in ["definitionFile", "mdefFile", "contFile", "iniFile", "mcontFile"]: - k = "cfx_" + i - if not inputs[k] == None: - probname = re.sub("(_\d{3})?\.[^\.]+$", "", inputs[k]) - self.set_putative_run_name(probname) - break - - if self.putative_run_name == None and inputs["cfx_runName"] != None: - self.set_putative_run_name(inputs["cfx_runName"]) - - # Set putative run name from -eg or -name value (-name always wins) - if ( - not inputs["cfx_additionalArgs"] == "" - and not inputs["cfx_additionalArgs"] == None - ): - for opt in ["-eg", "-example", "-name"]: - m = re.search(opt + "\s+([^\s-]+)", inputs["cfx_additionalArgs"]) - if m: - self.set_putative_run_name(m.group(1)) - - # Identify application - app_name = "Ansys CFX" - app = next((a for a in self.context.software if a["name"] == app_name), None) - assert app, f"{app_name} is required for execution" - - log.info("Using " + app["name"] + " " + app["version"]) - log.info("Current directory: " + os.getcwd()) - - files = [f for f in os.listdir(".") if os.path.isfile(f)] - for f in files: - log.info(" " + f) - - # Determine CFX root directory, solver command and hosts - self.publish_to_default_log("CFX Root directory = " + app["install_path"]) - - exe = app["executable"] # should already be platform specific - self.publish_to_default_log("CFX Solver command: " + exe) - - # Create command line - # Add parallel options - cmd = [os.path.basename(exe)] - cmd.append("-fullname") - cmd.append(self.active_run_name) - cmd.append("-batch") - cmd.append("-serial") - - # Add options requiring an argument - options_arg = { - "-ccl": "cclFile", - "-continue-from-file": "contFile", - "-def": "definitionFile", - "-indirect-startup-path": "indirectPath", - "-initial-file": "iniFile", - "-mcont": "mcontFile", - "-mdef": "mdefFile", - "-parfile-read": "parFile", - "-solver": "solverFile", - } - for opt, i in sorted(options_arg.items()): - k = "cfx_" + i - if not inputs[k] == None: - cmd.append(opt) - cmd.append(inputs[k]) - - # Add additional options - if not inputs["cfx_additionalArgs"] == "" and not inputs["cfx_additionalArgs"] == None: - cmd.extend(shlex.split(inputs["cfx_additionalArgs"])) - - # Start the solver - self.publish_to_default_log("CFX solver command line = " + str(cmd)) - - rc = None - self.CFXOutputFile = None - self.CFXMonFile = None - cfx_env = os.environ.copy() - - with subprocess.Popen( - cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=cfx_env, executable=exe - ) as self.proc: - if self.proc == None: - raise Exception("CFX Solver did not start") - self.publish_to_default_log("CFX solver started\npid:" + format(self.proc.pid)) - t1 = _thread.start_new_thread(self.process_output, (self.proc,)) - t2 = _thread.start_new_thread(self.process_error, (self.proc,)) - - while rc is None: - rc = self.proc.poll() - time.sleep(1) - - # Post solution actions - for msg in ["Finished CFX solve"]: - self.publish_to_default_log(msg) - - if rc != 0: - self.publish_to_default_log(f"Error: Solver exited with errors ({rc}).") - raise Exception("Solver exited with errors.") - - return - - except Exception as e: - self.publish_to_debug_log(traceback.print_exc()) - self.publish_to_default_log(str(e)) - raise e - - # Set putative run name from problem name (to be called BEFORE the run is started) - def set_putative_run_name(self, probname): - if self.active_run_name != None: - return - imax = 0 - for dI in os.listdir(os.getcwd()): - m = re.match("^" + probname + "_(\d+)(\.(ansys|dir|out|res|mres|trn|cfx))?$", dI) - if m: - i = int(m.group(1)) - if i > imax: - imax = i - prob_ext = str(imax + 1) - self.putative_run_name = probname + "_" + prob_ext.zfill(3) - self.active_run_name = self.putative_run_name - self.publish_to_default_log("Set putative run name = " + self.putative_run_name) - - # Find active run name from putative run name (to be called AFTER the run is started) - def find_active_run_name(self): - # Putative run name set: Wait for output or run directory or output file to exist - if self.active_run_name == None: - if self.putative_run_name == None: - raise Exception("Unable to find active run name. Putative run name not set.") - outdir = path.join(os.getcwd(), self.putative_run_name) - rundir = outdir + ".dir" - outfile = outdir + ".out" - while self.active_run_name == None: - if path.isdir(outdir) or path.isdir(rundir) or path.isfile(outfile): - self.active_run_name = self.putative_run_name - else: - time.sleep(1) - return self.active_run_name - - # Monitor the stdout of the main process. If present, create log and log data. - def process_output(self, proc): - for line in iter(proc.stdout.readline, b""): - msg = line.decode("utf-8").rstrip() - self.publish_to_default_log(msg) - proc.stdout.close() - - # Monitor the stderr of the main process. If present, create log and log data. - def process_error(self, proc): - for line in iter(proc.stderr.readline, b""): - msg = line.decode("utf-8").rstrip() - self.publish_to_default_log(msg) - proc.stderr.close() - - -# EXAMPLE: this function will only be called if this script is run at the command line. -if __name__ == "__main__": - log = logging.getLogger() - logging.basicConfig(format="%(message)s", level=logging.DEBUG) - - try: - log.info("Loading sample CFX context...") - - with open("cfx_context.json", "r") as f: - context = json.load(f) - print(context) - - submit_context = SubmitContext(**context) - - log.info("Executing...") - ex = CfxExecution(submit_context).execute() - log.info("Execution ended.") - - except Exception as e: - log.error(str(e)) diff --git a/examples/exec_scripts/exec_fluent.py b/examples/exec_scripts/exec_fluent.py deleted file mode 100644 index 41063415a..000000000 --- a/examples/exec_scripts/exec_fluent.py +++ /dev/null @@ -1,361 +0,0 @@ -""" -Copyright (C) 2021 ANSYS, Inc. and its subsidiaries. All Rights Reserved. -""" -import _thread -import json -import logging -import os -import platform -import subprocess -import time -import traceback - -from ansys.rep.common.logging import log -from ansys.rep.evaluator.task_manager import ApplicationExecution -from ansys.rep.evaluator.task_manager.context import SubmitContext -import psutil - - -class FluentExecution(ApplicationExecution): - isLinux = platform.platform().startswith("Linux") - - def __init__(self, context): - self.CleanupScript = None - self.FluentTranscript = None - self.error_detected = False - self.fluent_children = [] - ApplicationExecution.__init__(self, context) - - def execute(self): - try: - log.info("Start FLUENT execution script") - - pythoncode_version = "0.1" - log.info("python code version " + pythoncode_version) - - log.info("Evaluator Platform: " + platform.platform()) - - num_cores = self.context.resource_requirements["num_cores"] - log.info(f"Requested cores: {num_cores}") - - # self.environmentInfo.defaultMpi - defaultMpi = "intel" - - # create defaults for inputs not provided - inputs = { - "fluent_dimension": self.context.execution_context.get("fluent_dimension", "2d") - } - inputs["fluent_precision"] = self.context.execution_context.get( - "fluent_precision", "dp" - ) - inputs["fluent_meshing"] = self.context.execution_context.get("fluent_meshing", False) - inputs["fluent_numGPGPUsPerMachine"] = self.context.execution_context.get( - "fluent_numGPGPUsPerMachine", 0 - ) - inputs["fluent_defaultFluentVersion"] = self.context.execution_context.get( - "fluent_defaultFluentVersion", None - ) - inputs["fluent_MPIType"] = self.context.execution_context.get( - "fluent_MPIType", defaultMpi - ) - inputs["fluent_otherEnvironment"] = self.context.execution_context.get( - "fluent_otherEnvironment", "{}" - ) - inputs["fluent_UDFBat"] = self.context.execution_context.get("fluent_UDFBat", None) - inputs["fluent_useGUI"] = self.context.execution_context.get("fluent_useGUI", False) - inputs["fluent_additionalArgs"] = self.context.execution_context.get( - "fluent_additionalArgs", "" - ) - - log.info("Task inputs ") - for name in inputs.keys(): - if inputs[name] == None: - continue - log.info("\t-" + name + ":<" + str(inputs[name]) + ">") - - log.info("Checking if required inputs are provided...") - - valid_launcher_dimensions = ["2d", "3d"] - if not inputs["fluent_dimension"] in valid_launcher_dimensions: - raise Exception( - "Required Input is invalid! fluent_dimension(" - + inputs["fluent_dimension"] - + ")\nValid values are " - + format(valid_launcher_dimensions) - ) - - valid_launcher_precisions = ["sp", "dp"] - if not inputs["fluent_precision"] in valid_launcher_precisions: - raise Exception( - "Required Input is invalid! fluent_precision(" - + inputs["fluent_precision"] - + ")\nValid values are " - + format(valid_launcher_precisions) - ) - - # Identify application - app_name = "Ansys Fluent" - app = next((a for a in self.context.software if a["name"] == app_name), None) - assert app, f"{app_name} is required for execution" - - log.info("Using " + app["name"] + " " + app["version"]) - log.info("Current directory: " + os.getcwd()) - - files = [f for f in os.listdir(".") if os.path.isfile(f)] - for f in files: - log.info(" " + f) - - jouFile = next((f for f in self.context.input_files if f["name"] == "jou"), None) - log.info("journal file path: " + jouFile["path"]) - - if jouFile == None or not os.path.isfile(jouFile["path"]): - raise Exception("File " + jouFile["path"] + " does not exist!") - - # Add " around exe if needed for Windows - exe = app["executable"] # should already be platform specific - log.info("Fluent executable: " + exe) - - if inputs["fluent_UDFBat"] == None: - if self.isLinux: - pass # no need in Linux, None is OK - else: - inputs["fluent_UDFBat"] = os.path.join(os.path.dirname(exe), "udf.bat") - log.info("Setting fluent_UDFBat to " + inputs["fluent_UDFBat"]) - - otherEnvironment = json.loads(inputs["fluent_otherEnvironment"]) - noGuiOptions = None - if not inputs["fluent_useGUI"]: - if self.isLinux: - noGuiOptions = " -gu -driver null" - else: - noGuiOptions = " -hidden -driver null" - - log.debug(f"exe: {exe}") - args = inputs["fluent_dimension"] - args += inputs["fluent_precision"] if inputs["fluent_precision"] == "dp" else "" - args += " -meshing" if inputs["fluent_meshing"] else "" - args += " -t" + format(num_cores) - if inputs["fluent_MPIType"] != None and inputs["fluent_MPIType"] != "": - args += " -mpi=" + format(inputs["fluent_MPIType"]) - if inputs["fluent_numGPGPUsPerMachine"] > 0: - args += " -gpgp=" + format(inputs["fluent_numGPGPUsPerMachine"]) - args += " -i " + jouFile["path"] - # args+= cnf - if not noGuiOptions == None: - args += noGuiOptions - args += " " + inputs["fluent_additionalArgs"] + " " - - cmd = [os.path.basename(exe)] - cmd.extend(args.split(" ")) - - rc = None - firstchild = None - - fluent_env = os.environ.copy() - - for oenv in otherEnvironment: - if "FLUENT_GUI" == oenv["Name"]: - continue - # if "FLUENT_AAS"==oenv['Name']:continue - fluent_env[oenv["Name"]] = oenv["Value"] - log.info("Fluent environment:") - for k in fluent_env: - try: - log.info("\t- " + k + "\n\t\t " + fluent_env[k]) - except: - log.info("\t- error while printing " + k) - - log.info(" ".join(cmd)) - - max_wait_time = 120 - tried_time = 0 - self.error_detected = False - with subprocess.Popen( - cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=fluent_env, executable=exe - ) as self.proc: - log.info("Fluent started\npid:" + format(self.proc.pid)) - log.info("TODO: start new thread to monitor process children") - t3 = _thread.start_new_thread(self.monitor_children, (self.proc,)) - log.info("Fluent started a new thread to monitor its children") - t4 = _thread.start_new_thread(self.monitor_transcript, (self.proc,)) - log.info("Fluent started a new thread to monitor its transcript") - - t1 = _thread.start_new_thread(self.process_output, (self.proc,)) - log.info("Fluent started a new thread for stdout log") - t2 = _thread.start_new_thread(self.process_error, (self.proc,)) - log.info("Fluent started a new thread for stderr log") - while True: - if self.error_detected: - log.info("Error: Solver exited with error") - log.info("TODO: implement child process kill") - for child in self.fluent_children: - pToKill = psutil.Process(child) - pToKill.kill() - raise Exception("Solver exited with errors.") - if rc is None: - rc = self.proc.poll() - elif firstchild is None: - time.sleep(3) - tried_time = tried_time + 3 - if len(self.fluent_children) == 0: - if tried_time < max_wait_time: - log.info("\t- no fluent children process found, continue") - continue - else: - log.info( - "\t- can not start fluent in " - + format(max_wait_time) - + "seconds, quit the process" - ) - break - firstchild = self.fluent_children[0] - log.info("rc:" + format(rc) + " ,firstchild:" + format(firstchild)) - elif not psutil.pid_exists(firstchild): - log.info("\t- fluent exits normally") - break - - log.info("Finished Fluent solve") - if rc != 0: - log.info(f"Error: Solver exited with errors ({rc}).") - raise Exception("Solver exited with errors.") - - except Exception as e: - log.info("====== error in execute =========") - log.debug(traceback.print_exc()) - log.info(str(e)) - log.info("====== error in execute =========") - raise e - - # monitor the children of the main process - def monitor_children(self, proc): - starting_process = psutil.Process(proc.pid) - try: - while True: - for child in starting_process.children(): - if not child.pid in self.fluent_children: - self.fluent_children.append(child.pid) - time.sleep(0.001) - except Exception as e: - if not "psutil.NoSuchProcess" in format(e): - errormessage = traceback.format_exc() - log.info(errormessage) - log.info("<" + format(e) + ">") - - # monitor creation and content of transcript files and record content to corresponding logs - def monitor_transcript(self, proc): - try: - while True: - log.info("Looking for fluent automatically generated transcript file...") - if not self.FluentTranscript == None: - break - time.sleep(1) - for fn in os.listdir("."): - if not fn.endswith(".trn"): - continue - if fn.endswith(format(self.proc.pid) + ".trn"): - self.FluentTranscript = fn - for childpid in self.fluent_children: - if fn.endswith(format(childpid) + ".trn"): - log.info( - "Warning: a fluent child process generated transcript <" - + format(fn) - + "> is found!" - ) - self.FluentTranscript = fn - if not self.FluentTranscript == None: - break - log.info("Fluent transcript detected: <" + format(self.FluentTranscript) + ">") - - current_line = 0 - while True: - time.sleep(1) - with open(self.FluentTranscript) as f: - for _ in range(current_line): - next(f) - for line in f: - log.info(line.rstrip()) - current_line = current_line + 1 - msg = line.rstrip() - if msg.startswith("ANSYS LICENSE STDOUT ERROR"): - self.error_detected = True - log.info("License error detected in fluent") - if msg.startswith("Unexpected license problem"): - self.error_detected = True - log.info("Unexpected license error detected in fluent") - if msg.startswith( - "Warning: An error or interrupt occurred while reading the journal file" - ): - self.error_detected = True - log.info("An error detected in fluent, killing fluent...") - if msg.startswith("Error:"): - self.error_detected = True - log.info("An error detected in fluent, killing fluent...") - if msg.startswith("Cleanup script file is"): - self.CleanupScript = msg.replace("Cleanup script file is ", "") - log.debug("Execute kills script is : " + self.CleanupScript) - if msg.startswith('Opening input/output transcript to file "'): - self.FluentTranscript = msg.replace( - 'Opening input/output transcript to file "', "" - ).replace('".', "") - log.debug("Fluent transcript is : " + self.FluentTranscript) - except Exception as e: - errormessage = traceback.format_exc() - log.info(errormessage) - log.info("<" + format(e) + ">") - - # monitor the stdout of the main process and log information to corresponding logs - def process_output(self, proc): - for line in iter(proc.stdout.readline, b""): - msg = line.decode("utf-8").rstrip() - log.info(msg) - if msg.startswith("ANSYS LICENSE MANAGER ERROR"): - self.error_detected = True - if msg.startswith("Cleanup script file is"): - self.CleanupScript = msg.replace("Cleanup script file is ", "") - log.debug("Execute kills script is : " + self.CleanupScript) - if msg.startswith('Opening input/output transcript to file "'): - self.FluentTranscript = msg.replace( - 'Opening input/output transcript to file "', "" - ).replace('".', "") - log.debug("Fluent transcript is : " + self.FluentTranscript) - # log.info(msg) - if self.error_detected: - log.debug(msg) - proc.stdout.close() - - # monitor the stderr of the main process and log information to corresponding logs - def process_error(self, proc): - for line in iter(proc.stderr.readline, b""): - msg = line.decode("utf-8").rstrip() - log.error(msg) - if msg.startswith("Fatal error in MPI_Init: Internal MPI error!"): - if self.CleanupScript == None: - self.proc.kill() - else: - p = subprocess.Popen( - self.CleanupScript, stdout=subprocess.PIPE, stderr=subprocess.PIPE - ) - stdout, stderr = p.communicate() - proc.stderr.close() - - -# EXAMPLE: this function will only be called if this script is run at the command line. -if __name__ == "__main__": - log = logging.getLogger() - logging.basicConfig(format="%(message)s", level=logging.DEBUG) - - try: - log.info("Loading sample Fluent context...") - - with open("fluent_context.json", "r") as f: - context = json.load(f) - print(context) - - submit_context = SubmitContext(**context) - - log.info("Executing...") - ex = FluentExecution(submit_context).execute() - log.info("Execution ended.") - - except Exception as e: - log.error(str(e)) diff --git a/examples/exec_scripts/exec_mapdl.py b/examples/exec_scripts/exec_mapdl.py deleted file mode 100644 index a1bf9483e..000000000 --- a/examples/exec_scripts/exec_mapdl.py +++ /dev/null @@ -1,49 +0,0 @@ -""" -Basic execution script for MAPDL. - -Command formed: ansys.exe -b -i -o -np 4 - -""" -import os -import subprocess - -from ansys.rep.common.logging import log -from ansys.rep.evaluator.task_manager import ApplicationExecution - - -class MAPDLExecution(ApplicationExecution): - def execute(self): - - log.info("Starting MAPDL execution script") - - # Identify files - inp_file = next((f for f in self.context.input_files if f["name"] == "inp"), None) - assert inp_file, "Input file inp missing" - out_file = next((f for f in self.context.output_files if f["name"] == "out"), None) - assert out_file, "Output file out missing" - - # Identify application - app_name = "Ansys Mechanical APDL" - app = next((a for a in self.context.software if a["name"] == app_name), None) - assert app, f"Cannot find app {app_name}" - - # Add " around exe if needed for Windows - exe = app["executable"] - if " " in exe and not exe.startswith('"'): - exe = '"%s"' % exe - - # Use properties from resource requirements - num_cores = self.context.resource_requirements["num_cores"] - - # Pass env vars correctly - env = dict(os.environ) - env.update(self.context.environment) - - # Form command - cmd = f"{exe} -b -i {inp_file['path']} -o {out_file['path']} -np {num_cores}" - - # Execute command - log.info(f"Executing: {cmd}") - subprocess.run(cmd, shell=True, check=True, env=env) - - log.info("End MAPDL execution script") diff --git a/examples/exec_scripts/exec_python.py b/examples/exec_scripts/exec_python.py deleted file mode 100644 index 182ef6f3f..000000000 --- a/examples/exec_scripts/exec_python.py +++ /dev/null @@ -1,48 +0,0 @@ -""" -Simplistic execution script for Python. - -Command formed: python -""" -import os -import subprocess - -from ansys.rep.common.logging import log -from ansys.rep.evaluator.task_manager import ApplicationExecution - - -class PythonExecution(ApplicationExecution): - def execute(self): - - log.info("Start Python execution script") - - # Identify files - script_file = next((f for f in self.context.input_files if f["name"] == "script"), None) - assert script_file, "Python script file script missing" - inp_file = next((f for f in self.context.input_files if f["name"] == "inp"), None) - assert inp_file, "Input file inp missing" - - # Identify application - app_name = "Python" - app = next((a for a in self.context.software if a["name"] == app_name), None) - assert app, f"Cannot find app {app_name}" - - # Add " around exe if needed for Windows - exe = app["executable"] - if " " in exe and not exe.startswith('"'): - exe = '"%s"' % exe - - # Use properties from resource requirements - # None currently - - # Pass env vars correctly - env = dict(os.environ) - env.update(self.context.environment) - - # Form command - cmd = f"{exe} {script_file['path']} {inp_file['path']}" - - # Execute - log.info(f"Executing: {cmd}") - subprocess.run(cmd, shell=True, check=True, env=env) - - log.info("End Python execution script") diff --git a/examples/fluent_nozzle/project_setup.py b/examples/fluent_nozzle/project_setup.py index 1e3bc76fe..f36ddc10e 100644 --- a/examples/fluent_nozzle/project_setup.py +++ b/examples/fluent_nozzle/project_setup.py @@ -59,7 +59,7 @@ def create_project(client, name, num_jobs=20, version=__external_version__): name="exec_fluent", evaluation_path="exec_fluent.py", type="application/x-python-code", - src=os.path.join(cwd, "..", "exec_scripts", "exec_fluent.py"), + src=os.path.join(cwd, "exec_fluent.py"), ) ) diff --git a/examples/mapdl_motorbike_frame/project_setup.py b/examples/mapdl_motorbike_frame/project_setup.py index 85ab25002..35053b836 100644 --- a/examples/mapdl_motorbike_frame/project_setup.py +++ b/examples/mapdl_motorbike_frame/project_setup.py @@ -89,7 +89,7 @@ def create_project( name="exec_mapdl", evaluation_path="exec_mapdl.py", type="application/x-python-code", - src=os.path.join(cwd, "..", "exec_scripts", "exec_mapdl.py"), + src=os.path.join(cwd, "exec_mapdl.py"), ) ) diff --git a/examples/python_two_bar_truss_problem/project_setup.py b/examples/python_two_bar_truss_problem/project_setup.py index cecef50b6..27aa4e390 100644 --- a/examples/python_two_bar_truss_problem/project_setup.py +++ b/examples/python_two_bar_truss_problem/project_setup.py @@ -70,7 +70,7 @@ def main(client, num_jobs, use_exec_script) -> Project: name="exec_python", evaluation_path="exec_python.py", type="application/x-python-code", - src=os.path.join(cwd, "..", "exec_scripts", "exec_python.py"), + src=os.path.join(cwd, "exec_python.py"), ) ) diff --git a/prepare_documentation.py b/prepare_documentation.py index b0915ee4d..9350f36ae 100644 --- a/prepare_documentation.py +++ b/prepare_documentation.py @@ -134,6 +134,7 @@ def archive_examples(): "mapdl_motorbike_frame": [ "project_setup.py", "project_query.py", + "exec_mapdl.py", "motorbike_frame_results.txt", "motorbike_frame.mac", ], @@ -155,6 +156,7 @@ def archive_examples(): ], "python_two_bar_truss_problem": [ "project_setup.py", + "exec_python.py", "evaluate.py", "input_parameters.json", ], @@ -165,21 +167,17 @@ def archive_examples(): ], "fluent_nozzle": [ "project_setup.py", + "exec_fluent.py", "solve.jou", "nozzle.cas", ], "cfx_static_mixer": [ "project_setup.py", + "exec_cfx.py", "runInput.ccl", "StaticMixer_001.cfx", "StaticMixer_001.def", ], - "exec_scripts": [ - "exec_cfx.py", - "exec_mapdl.py", - "exec_fluent.py", - "exec_python.py", - ], } os.makedirs("build", exist_ok=True) From bc244f2de0d9d1bf2bf0649d09a67c7a015e7eac Mon Sep 17 00:00:00 2001 From: Federico Negri Date: Tue, 14 Feb 2023 14:37:35 +0100 Subject: [PATCH 2/2] Add missing files --- examples/cfx_static_mixer/exec_cfx.py | 285 ++++++++++++++ examples/fluent_nozzle/exec_fluent.py | 361 ++++++++++++++++++ examples/mapdl_motorbike_frame/exec_mapdl.py | 49 +++ .../exec_python.py | 48 +++ 4 files changed, 743 insertions(+) create mode 100644 examples/cfx_static_mixer/exec_cfx.py create mode 100644 examples/fluent_nozzle/exec_fluent.py create mode 100644 examples/mapdl_motorbike_frame/exec_mapdl.py create mode 100644 examples/python_two_bar_truss_problem/exec_python.py diff --git a/examples/cfx_static_mixer/exec_cfx.py b/examples/cfx_static_mixer/exec_cfx.py new file mode 100644 index 000000000..171f993fe --- /dev/null +++ b/examples/cfx_static_mixer/exec_cfx.py @@ -0,0 +1,285 @@ +""" +Copyright (C) 2021 ANSYS, Inc. and its subsidiaries. All Rights Reserved. +""" +import _thread +import json +import logging +import os +from os import path +import platform +import re +import shlex +import subprocess +import time +import traceback + +from ansys.rep.common.logging import log +from ansys.rep.evaluator.task_manager import ApplicationExecution +from ansys.rep.evaluator.task_manager.context import SubmitContext + + +class CfxExecution(ApplicationExecution): + isLinux = platform.platform().startswith("Linux") + + def __init__(self, context): + self.active_run_name = None + self.putative_run_name = None + self.withSoftInterrupt = True + ApplicationExecution.__init__(self, context) + + def publish_to_default_log(self, msg): + log.info(msg) + + def publish_to_debug_log(self, msg): + log.debug(msg) + + def execute(self): + log.info("Start CFX execution script") + + try: + log.info("Evaluator Platform: " + platform.platform()) + + num_cores = self.context.resource_requirements["num_cores"] + log.info(f"Requested cores: {num_cores}") + + # create defaults for inputs not provided + inputs = { + "cfx_additionalArgs": self.context.execution_context.get("cfx_additionalArgs", "") + } + inputs["cfx_solverFile"] = self.context.execution_context.get("cfx_solverFile", None) + inputs["cfx_definitionFile"] = self.context.execution_context.get( + "cfx_definitionFile", None + ) + inputs["cfx_iniFile"] = self.context.execution_context.get("cfx_iniFile", None) + inputs["cfx_cclFile"] = self.context.execution_context.get("cfx_cclFile", None) + inputs["cfx_contFile"] = self.context.execution_context.get("cfx_contFile", None) + inputs["cfx_mcontFile"] = self.context.execution_context.get("cfx_mcontFile", None) + inputs["cfx_mdefFile"] = self.context.execution_context.get("cfx_mdefFile", None) + inputs["cfx_parFile"] = self.context.execution_context.get("cfx_parFile", None) + inputs["cfx_indirectPath"] = self.context.execution_context.get( + "cfx_indirectPath", None + ) + inputs["cfx_version"] = self.context.execution_context.get("cfx_version", None) + inputs["cfx_useAAS"] = self.context.execution_context.get("cfx_useAAS", False) + inputs["cfx_startMethod"] = self.context.execution_context.get("cfx_startMethod", None) + inputs["cfx_runName"] = self.context.execution_context.get("cfx_runName", None) + + cclFile = next((f for f in self.context.input_files if f["name"] == "ccl"), None) + if cclFile != None: + inputs["cfx_cclFile"] = cclFile["path"] + log.info("ccl file path: " + cclFile["path"]) + + defFile = next((f for f in self.context.input_files if f["name"] == "def"), None) + if defFile != None: + inputs["cfx_definitionFile"] = defFile["path"] + log.info("def file path: " + defFile["path"]) + + self.publish_to_default_log( + "Task inputs after applying default values to missing inputs:" + ) + for name in inputs.keys(): + if inputs[name] == None: + continue + self.publish_to_default_log("\t-" + name + ":<" + str(inputs[name]) + ">") + + # Check existence of files which must exist if specified + inputs_existchk = [ + "cclFile", + "contFile", + "definitionFile", + "iniFile", + "mcontFile", + "mdefFile", + "parFile", + "solverFile", + ] + + self.publish_to_default_log("Checking if provided files exist in the storage...") + for i in inputs_existchk: + k = "cfx_" + i + if not inputs[k] == None: + if not os.path.isfile(inputs[k]): + raise Exception("Required file does not exist!\n" + inputs[k]) + + if not inputs["cfx_indirectPath"] == None: + # Special check for indirect startup and set active name for later use + rundir = inputs["cfx_indirectPath"] + ".dir" + if not os.path.isdir(rundir): + raise Exception("Required directory does not exist!\n" + rundir) + startup_ccl = rundir + "/startup.ccl" + if not os.path.isfile(startup_ccl): + raise Exception(startup_ccl) + self.active_run_name = inputs["cfx_indirectPath"] + else: + # Set putative run name from input file + for i in ["definitionFile", "mdefFile", "contFile", "iniFile", "mcontFile"]: + k = "cfx_" + i + if not inputs[k] == None: + probname = re.sub("(_\d{3})?\.[^\.]+$", "", inputs[k]) + self.set_putative_run_name(probname) + break + + if self.putative_run_name == None and inputs["cfx_runName"] != None: + self.set_putative_run_name(inputs["cfx_runName"]) + + # Set putative run name from -eg or -name value (-name always wins) + if ( + not inputs["cfx_additionalArgs"] == "" + and not inputs["cfx_additionalArgs"] == None + ): + for opt in ["-eg", "-example", "-name"]: + m = re.search(opt + "\s+([^\s-]+)", inputs["cfx_additionalArgs"]) + if m: + self.set_putative_run_name(m.group(1)) + + # Identify application + app_name = "Ansys CFX" + app = next((a for a in self.context.software if a["name"] == app_name), None) + assert app, f"{app_name} is required for execution" + + log.info("Using " + app["name"] + " " + app["version"]) + log.info("Current directory: " + os.getcwd()) + + files = [f for f in os.listdir(".") if os.path.isfile(f)] + for f in files: + log.info(" " + f) + + # Determine CFX root directory, solver command and hosts + self.publish_to_default_log("CFX Root directory = " + app["install_path"]) + + exe = app["executable"] # should already be platform specific + self.publish_to_default_log("CFX Solver command: " + exe) + + # Create command line + # Add parallel options + cmd = [os.path.basename(exe)] + cmd.append("-fullname") + cmd.append(self.active_run_name) + cmd.append("-batch") + cmd.append("-serial") + + # Add options requiring an argument + options_arg = { + "-ccl": "cclFile", + "-continue-from-file": "contFile", + "-def": "definitionFile", + "-indirect-startup-path": "indirectPath", + "-initial-file": "iniFile", + "-mcont": "mcontFile", + "-mdef": "mdefFile", + "-parfile-read": "parFile", + "-solver": "solverFile", + } + for opt, i in sorted(options_arg.items()): + k = "cfx_" + i + if not inputs[k] == None: + cmd.append(opt) + cmd.append(inputs[k]) + + # Add additional options + if not inputs["cfx_additionalArgs"] == "" and not inputs["cfx_additionalArgs"] == None: + cmd.extend(shlex.split(inputs["cfx_additionalArgs"])) + + # Start the solver + self.publish_to_default_log("CFX solver command line = " + str(cmd)) + + rc = None + self.CFXOutputFile = None + self.CFXMonFile = None + cfx_env = os.environ.copy() + + with subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=cfx_env, executable=exe + ) as self.proc: + if self.proc == None: + raise Exception("CFX Solver did not start") + self.publish_to_default_log("CFX solver started\npid:" + format(self.proc.pid)) + t1 = _thread.start_new_thread(self.process_output, (self.proc,)) + t2 = _thread.start_new_thread(self.process_error, (self.proc,)) + + while rc is None: + rc = self.proc.poll() + time.sleep(1) + + # Post solution actions + for msg in ["Finished CFX solve"]: + self.publish_to_default_log(msg) + + if rc != 0: + self.publish_to_default_log(f"Error: Solver exited with errors ({rc}).") + raise Exception("Solver exited with errors.") + + return + + except Exception as e: + self.publish_to_debug_log(traceback.print_exc()) + self.publish_to_default_log(str(e)) + raise e + + # Set putative run name from problem name (to be called BEFORE the run is started) + def set_putative_run_name(self, probname): + if self.active_run_name != None: + return + imax = 0 + for dI in os.listdir(os.getcwd()): + m = re.match("^" + probname + "_(\d+)(\.(ansys|dir|out|res|mres|trn|cfx))?$", dI) + if m: + i = int(m.group(1)) + if i > imax: + imax = i + prob_ext = str(imax + 1) + self.putative_run_name = probname + "_" + prob_ext.zfill(3) + self.active_run_name = self.putative_run_name + self.publish_to_default_log("Set putative run name = " + self.putative_run_name) + + # Find active run name from putative run name (to be called AFTER the run is started) + def find_active_run_name(self): + # Putative run name set: Wait for output or run directory or output file to exist + if self.active_run_name == None: + if self.putative_run_name == None: + raise Exception("Unable to find active run name. Putative run name not set.") + outdir = path.join(os.getcwd(), self.putative_run_name) + rundir = outdir + ".dir" + outfile = outdir + ".out" + while self.active_run_name == None: + if path.isdir(outdir) or path.isdir(rundir) or path.isfile(outfile): + self.active_run_name = self.putative_run_name + else: + time.sleep(1) + return self.active_run_name + + # Monitor the stdout of the main process. If present, create log and log data. + def process_output(self, proc): + for line in iter(proc.stdout.readline, b""): + msg = line.decode("utf-8").rstrip() + self.publish_to_default_log(msg) + proc.stdout.close() + + # Monitor the stderr of the main process. If present, create log and log data. + def process_error(self, proc): + for line in iter(proc.stderr.readline, b""): + msg = line.decode("utf-8").rstrip() + self.publish_to_default_log(msg) + proc.stderr.close() + + +# EXAMPLE: this function will only be called if this script is run at the command line. +if __name__ == "__main__": + log = logging.getLogger() + logging.basicConfig(format="%(message)s", level=logging.DEBUG) + + try: + log.info("Loading sample CFX context...") + + with open("cfx_context.json", "r") as f: + context = json.load(f) + print(context) + + submit_context = SubmitContext(**context) + + log.info("Executing...") + ex = CfxExecution(submit_context).execute() + log.info("Execution ended.") + + except Exception as e: + log.error(str(e)) diff --git a/examples/fluent_nozzle/exec_fluent.py b/examples/fluent_nozzle/exec_fluent.py new file mode 100644 index 000000000..41063415a --- /dev/null +++ b/examples/fluent_nozzle/exec_fluent.py @@ -0,0 +1,361 @@ +""" +Copyright (C) 2021 ANSYS, Inc. and its subsidiaries. All Rights Reserved. +""" +import _thread +import json +import logging +import os +import platform +import subprocess +import time +import traceback + +from ansys.rep.common.logging import log +from ansys.rep.evaluator.task_manager import ApplicationExecution +from ansys.rep.evaluator.task_manager.context import SubmitContext +import psutil + + +class FluentExecution(ApplicationExecution): + isLinux = platform.platform().startswith("Linux") + + def __init__(self, context): + self.CleanupScript = None + self.FluentTranscript = None + self.error_detected = False + self.fluent_children = [] + ApplicationExecution.__init__(self, context) + + def execute(self): + try: + log.info("Start FLUENT execution script") + + pythoncode_version = "0.1" + log.info("python code version " + pythoncode_version) + + log.info("Evaluator Platform: " + platform.platform()) + + num_cores = self.context.resource_requirements["num_cores"] + log.info(f"Requested cores: {num_cores}") + + # self.environmentInfo.defaultMpi + defaultMpi = "intel" + + # create defaults for inputs not provided + inputs = { + "fluent_dimension": self.context.execution_context.get("fluent_dimension", "2d") + } + inputs["fluent_precision"] = self.context.execution_context.get( + "fluent_precision", "dp" + ) + inputs["fluent_meshing"] = self.context.execution_context.get("fluent_meshing", False) + inputs["fluent_numGPGPUsPerMachine"] = self.context.execution_context.get( + "fluent_numGPGPUsPerMachine", 0 + ) + inputs["fluent_defaultFluentVersion"] = self.context.execution_context.get( + "fluent_defaultFluentVersion", None + ) + inputs["fluent_MPIType"] = self.context.execution_context.get( + "fluent_MPIType", defaultMpi + ) + inputs["fluent_otherEnvironment"] = self.context.execution_context.get( + "fluent_otherEnvironment", "{}" + ) + inputs["fluent_UDFBat"] = self.context.execution_context.get("fluent_UDFBat", None) + inputs["fluent_useGUI"] = self.context.execution_context.get("fluent_useGUI", False) + inputs["fluent_additionalArgs"] = self.context.execution_context.get( + "fluent_additionalArgs", "" + ) + + log.info("Task inputs ") + for name in inputs.keys(): + if inputs[name] == None: + continue + log.info("\t-" + name + ":<" + str(inputs[name]) + ">") + + log.info("Checking if required inputs are provided...") + + valid_launcher_dimensions = ["2d", "3d"] + if not inputs["fluent_dimension"] in valid_launcher_dimensions: + raise Exception( + "Required Input is invalid! fluent_dimension(" + + inputs["fluent_dimension"] + + ")\nValid values are " + + format(valid_launcher_dimensions) + ) + + valid_launcher_precisions = ["sp", "dp"] + if not inputs["fluent_precision"] in valid_launcher_precisions: + raise Exception( + "Required Input is invalid! fluent_precision(" + + inputs["fluent_precision"] + + ")\nValid values are " + + format(valid_launcher_precisions) + ) + + # Identify application + app_name = "Ansys Fluent" + app = next((a for a in self.context.software if a["name"] == app_name), None) + assert app, f"{app_name} is required for execution" + + log.info("Using " + app["name"] + " " + app["version"]) + log.info("Current directory: " + os.getcwd()) + + files = [f for f in os.listdir(".") if os.path.isfile(f)] + for f in files: + log.info(" " + f) + + jouFile = next((f for f in self.context.input_files if f["name"] == "jou"), None) + log.info("journal file path: " + jouFile["path"]) + + if jouFile == None or not os.path.isfile(jouFile["path"]): + raise Exception("File " + jouFile["path"] + " does not exist!") + + # Add " around exe if needed for Windows + exe = app["executable"] # should already be platform specific + log.info("Fluent executable: " + exe) + + if inputs["fluent_UDFBat"] == None: + if self.isLinux: + pass # no need in Linux, None is OK + else: + inputs["fluent_UDFBat"] = os.path.join(os.path.dirname(exe), "udf.bat") + log.info("Setting fluent_UDFBat to " + inputs["fluent_UDFBat"]) + + otherEnvironment = json.loads(inputs["fluent_otherEnvironment"]) + noGuiOptions = None + if not inputs["fluent_useGUI"]: + if self.isLinux: + noGuiOptions = " -gu -driver null" + else: + noGuiOptions = " -hidden -driver null" + + log.debug(f"exe: {exe}") + args = inputs["fluent_dimension"] + args += inputs["fluent_precision"] if inputs["fluent_precision"] == "dp" else "" + args += " -meshing" if inputs["fluent_meshing"] else "" + args += " -t" + format(num_cores) + if inputs["fluent_MPIType"] != None and inputs["fluent_MPIType"] != "": + args += " -mpi=" + format(inputs["fluent_MPIType"]) + if inputs["fluent_numGPGPUsPerMachine"] > 0: + args += " -gpgp=" + format(inputs["fluent_numGPGPUsPerMachine"]) + args += " -i " + jouFile["path"] + # args+= cnf + if not noGuiOptions == None: + args += noGuiOptions + args += " " + inputs["fluent_additionalArgs"] + " " + + cmd = [os.path.basename(exe)] + cmd.extend(args.split(" ")) + + rc = None + firstchild = None + + fluent_env = os.environ.copy() + + for oenv in otherEnvironment: + if "FLUENT_GUI" == oenv["Name"]: + continue + # if "FLUENT_AAS"==oenv['Name']:continue + fluent_env[oenv["Name"]] = oenv["Value"] + log.info("Fluent environment:") + for k in fluent_env: + try: + log.info("\t- " + k + "\n\t\t " + fluent_env[k]) + except: + log.info("\t- error while printing " + k) + + log.info(" ".join(cmd)) + + max_wait_time = 120 + tried_time = 0 + self.error_detected = False + with subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=fluent_env, executable=exe + ) as self.proc: + log.info("Fluent started\npid:" + format(self.proc.pid)) + log.info("TODO: start new thread to monitor process children") + t3 = _thread.start_new_thread(self.monitor_children, (self.proc,)) + log.info("Fluent started a new thread to monitor its children") + t4 = _thread.start_new_thread(self.monitor_transcript, (self.proc,)) + log.info("Fluent started a new thread to monitor its transcript") + + t1 = _thread.start_new_thread(self.process_output, (self.proc,)) + log.info("Fluent started a new thread for stdout log") + t2 = _thread.start_new_thread(self.process_error, (self.proc,)) + log.info("Fluent started a new thread for stderr log") + while True: + if self.error_detected: + log.info("Error: Solver exited with error") + log.info("TODO: implement child process kill") + for child in self.fluent_children: + pToKill = psutil.Process(child) + pToKill.kill() + raise Exception("Solver exited with errors.") + if rc is None: + rc = self.proc.poll() + elif firstchild is None: + time.sleep(3) + tried_time = tried_time + 3 + if len(self.fluent_children) == 0: + if tried_time < max_wait_time: + log.info("\t- no fluent children process found, continue") + continue + else: + log.info( + "\t- can not start fluent in " + + format(max_wait_time) + + "seconds, quit the process" + ) + break + firstchild = self.fluent_children[0] + log.info("rc:" + format(rc) + " ,firstchild:" + format(firstchild)) + elif not psutil.pid_exists(firstchild): + log.info("\t- fluent exits normally") + break + + log.info("Finished Fluent solve") + if rc != 0: + log.info(f"Error: Solver exited with errors ({rc}).") + raise Exception("Solver exited with errors.") + + except Exception as e: + log.info("====== error in execute =========") + log.debug(traceback.print_exc()) + log.info(str(e)) + log.info("====== error in execute =========") + raise e + + # monitor the children of the main process + def monitor_children(self, proc): + starting_process = psutil.Process(proc.pid) + try: + while True: + for child in starting_process.children(): + if not child.pid in self.fluent_children: + self.fluent_children.append(child.pid) + time.sleep(0.001) + except Exception as e: + if not "psutil.NoSuchProcess" in format(e): + errormessage = traceback.format_exc() + log.info(errormessage) + log.info("<" + format(e) + ">") + + # monitor creation and content of transcript files and record content to corresponding logs + def monitor_transcript(self, proc): + try: + while True: + log.info("Looking for fluent automatically generated transcript file...") + if not self.FluentTranscript == None: + break + time.sleep(1) + for fn in os.listdir("."): + if not fn.endswith(".trn"): + continue + if fn.endswith(format(self.proc.pid) + ".trn"): + self.FluentTranscript = fn + for childpid in self.fluent_children: + if fn.endswith(format(childpid) + ".trn"): + log.info( + "Warning: a fluent child process generated transcript <" + + format(fn) + + "> is found!" + ) + self.FluentTranscript = fn + if not self.FluentTranscript == None: + break + log.info("Fluent transcript detected: <" + format(self.FluentTranscript) + ">") + + current_line = 0 + while True: + time.sleep(1) + with open(self.FluentTranscript) as f: + for _ in range(current_line): + next(f) + for line in f: + log.info(line.rstrip()) + current_line = current_line + 1 + msg = line.rstrip() + if msg.startswith("ANSYS LICENSE STDOUT ERROR"): + self.error_detected = True + log.info("License error detected in fluent") + if msg.startswith("Unexpected license problem"): + self.error_detected = True + log.info("Unexpected license error detected in fluent") + if msg.startswith( + "Warning: An error or interrupt occurred while reading the journal file" + ): + self.error_detected = True + log.info("An error detected in fluent, killing fluent...") + if msg.startswith("Error:"): + self.error_detected = True + log.info("An error detected in fluent, killing fluent...") + if msg.startswith("Cleanup script file is"): + self.CleanupScript = msg.replace("Cleanup script file is ", "") + log.debug("Execute kills script is : " + self.CleanupScript) + if msg.startswith('Opening input/output transcript to file "'): + self.FluentTranscript = msg.replace( + 'Opening input/output transcript to file "', "" + ).replace('".', "") + log.debug("Fluent transcript is : " + self.FluentTranscript) + except Exception as e: + errormessage = traceback.format_exc() + log.info(errormessage) + log.info("<" + format(e) + ">") + + # monitor the stdout of the main process and log information to corresponding logs + def process_output(self, proc): + for line in iter(proc.stdout.readline, b""): + msg = line.decode("utf-8").rstrip() + log.info(msg) + if msg.startswith("ANSYS LICENSE MANAGER ERROR"): + self.error_detected = True + if msg.startswith("Cleanup script file is"): + self.CleanupScript = msg.replace("Cleanup script file is ", "") + log.debug("Execute kills script is : " + self.CleanupScript) + if msg.startswith('Opening input/output transcript to file "'): + self.FluentTranscript = msg.replace( + 'Opening input/output transcript to file "', "" + ).replace('".', "") + log.debug("Fluent transcript is : " + self.FluentTranscript) + # log.info(msg) + if self.error_detected: + log.debug(msg) + proc.stdout.close() + + # monitor the stderr of the main process and log information to corresponding logs + def process_error(self, proc): + for line in iter(proc.stderr.readline, b""): + msg = line.decode("utf-8").rstrip() + log.error(msg) + if msg.startswith("Fatal error in MPI_Init: Internal MPI error!"): + if self.CleanupScript == None: + self.proc.kill() + else: + p = subprocess.Popen( + self.CleanupScript, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) + stdout, stderr = p.communicate() + proc.stderr.close() + + +# EXAMPLE: this function will only be called if this script is run at the command line. +if __name__ == "__main__": + log = logging.getLogger() + logging.basicConfig(format="%(message)s", level=logging.DEBUG) + + try: + log.info("Loading sample Fluent context...") + + with open("fluent_context.json", "r") as f: + context = json.load(f) + print(context) + + submit_context = SubmitContext(**context) + + log.info("Executing...") + ex = FluentExecution(submit_context).execute() + log.info("Execution ended.") + + except Exception as e: + log.error(str(e)) diff --git a/examples/mapdl_motorbike_frame/exec_mapdl.py b/examples/mapdl_motorbike_frame/exec_mapdl.py new file mode 100644 index 000000000..a1bf9483e --- /dev/null +++ b/examples/mapdl_motorbike_frame/exec_mapdl.py @@ -0,0 +1,49 @@ +""" +Basic execution script for MAPDL. + +Command formed: ansys.exe -b -i -o -np 4 + +""" +import os +import subprocess + +from ansys.rep.common.logging import log +from ansys.rep.evaluator.task_manager import ApplicationExecution + + +class MAPDLExecution(ApplicationExecution): + def execute(self): + + log.info("Starting MAPDL execution script") + + # Identify files + inp_file = next((f for f in self.context.input_files if f["name"] == "inp"), None) + assert inp_file, "Input file inp missing" + out_file = next((f for f in self.context.output_files if f["name"] == "out"), None) + assert out_file, "Output file out missing" + + # Identify application + app_name = "Ansys Mechanical APDL" + app = next((a for a in self.context.software if a["name"] == app_name), None) + assert app, f"Cannot find app {app_name}" + + # Add " around exe if needed for Windows + exe = app["executable"] + if " " in exe and not exe.startswith('"'): + exe = '"%s"' % exe + + # Use properties from resource requirements + num_cores = self.context.resource_requirements["num_cores"] + + # Pass env vars correctly + env = dict(os.environ) + env.update(self.context.environment) + + # Form command + cmd = f"{exe} -b -i {inp_file['path']} -o {out_file['path']} -np {num_cores}" + + # Execute command + log.info(f"Executing: {cmd}") + subprocess.run(cmd, shell=True, check=True, env=env) + + log.info("End MAPDL execution script") diff --git a/examples/python_two_bar_truss_problem/exec_python.py b/examples/python_two_bar_truss_problem/exec_python.py new file mode 100644 index 000000000..182ef6f3f --- /dev/null +++ b/examples/python_two_bar_truss_problem/exec_python.py @@ -0,0 +1,48 @@ +""" +Simplistic execution script for Python. + +Command formed: python +""" +import os +import subprocess + +from ansys.rep.common.logging import log +from ansys.rep.evaluator.task_manager import ApplicationExecution + + +class PythonExecution(ApplicationExecution): + def execute(self): + + log.info("Start Python execution script") + + # Identify files + script_file = next((f for f in self.context.input_files if f["name"] == "script"), None) + assert script_file, "Python script file script missing" + inp_file = next((f for f in self.context.input_files if f["name"] == "inp"), None) + assert inp_file, "Input file inp missing" + + # Identify application + app_name = "Python" + app = next((a for a in self.context.software if a["name"] == app_name), None) + assert app, f"Cannot find app {app_name}" + + # Add " around exe if needed for Windows + exe = app["executable"] + if " " in exe and not exe.startswith('"'): + exe = '"%s"' % exe + + # Use properties from resource requirements + # None currently + + # Pass env vars correctly + env = dict(os.environ) + env.update(self.context.environment) + + # Form command + cmd = f"{exe} {script_file['path']} {inp_file['path']}" + + # Execute + log.info(f"Executing: {cmd}") + subprocess.run(cmd, shell=True, check=True, env=env) + + log.info("End Python execution script")