diff --git a/daliuge-engine/dlg/apps/pyfunc.py b/daliuge-engine/dlg/apps/pyfunc.py index 31bc301d3..25490184c 100644 --- a/daliuge-engine/dlg/apps/pyfunc.py +++ b/daliuge-engine/dlg/apps/pyfunc.py @@ -39,10 +39,8 @@ from dlg.drop import BarrierAppDROP from dlg.exceptions import InvalidDropException from dlg.meta import ( - dlg_bool_param, dlg_string_param, dlg_enum_param, - dlg_float_param, dlg_dict_param, dlg_component, dlg_batch_input, @@ -116,10 +114,10 @@ def import_using_code(code): class DropParser(Enum): PICKLE = 'pickle' EVAL = 'eval' - PATH = 'path' - DATAURL = 'dataurl' NPY = 'npy' #JSON = "json" + PATH = 'path' # input only + DATAURL = 'dataurl' # input only ## # @brief PyFuncApp @@ -148,9 +146,9 @@ class DropParser(Enum): # \~English Python function name # @param[in] aparam/func_code Function Code//String/readwrite/False//False/ # \~English Python function code, e.g. 'def function_name(args): return args' -# @param[in] aparam/input_parser Input Parser/pickle/Select/readwrite/False/pickle,eval,path,dataurl,npy/False/ +# @param[in] aparam/input_parser Input Parser/pickle/Select/readwrite/False/pickle,eval,npy,path,dataurl/False/ # \~English Input port parsing technique -# @param[in] aparam/output_parser Output Parser/pickle/Select/readwrite/False/pickle,eval,path,dataurl,npy/False/ +# @param[in] aparam/output_parser Output Parser/pickle/Select/readwrite/False/pickle,eval,npy/False/ # \~English output port parsing technique # @param[in] aparam/func_defaults Function Defaults//String/readwrite/False//False/ # \~English Mapping from argname to default value. Should match only the last part of the argnames list. @@ -243,7 +241,7 @@ def _init_func_defaults(self): + "{self.f.__name__}: {self.func_defaults}, {type(self.func_defaults)}" ) raise ValueError - if DropParser(self.input_parser) is DropParser.PICKLE: + if self.input_parser is DropParser.PICKLE: # only values are pickled, get them unpickled for name, value in self.func_defaults.items(): self.func_defaults[name] = deserialize_data(value) @@ -382,18 +380,21 @@ def run(self): # Inputs are un-pickled and treated as the arguments of the function # Their order must be preserved, so we use an OrderedDict - if DropParser(self.input_parser) is DropParser.PICKLE: - all_contents = lambda x: pickle.loads(droputils.allDropContents(x)) - elif DropParser(self.input_parser) is DropParser.EVAL: - def astparse(x): + if self.input_parser is DropParser.PICKLE: + #all_contents = lambda x: pickle.loads(droputils.allDropContents(x)) + all_contents = droputils.load_pickle + elif self.input_parser is DropParser.EVAL: + def optionalEval(x): # Null and Empty Drops will return an empty byte string # which should propogate back to None - content: bytes = droputils.allDropContents(x) - return ast.literal_eval(content.decode('utf-8')) if content else None - all_contents = astparse - elif DropParser(self.input_parser) is DropParser.PATH: + content: str = droputils.allDropContents(x).decode('utf-8') + return ast.literal_eval(content) if len(content) > 0 else None + all_contents = optionalEval + elif self.input_parser is DropParser.NPY: + all_contents = droputils.load_npy + elif self.input_parser is DropParser.PATH: all_contents = lambda x: x.path - elif DropParser(self.input_parser) is DropParser.DATAURL: + elif self.input_parser is DropParser.DATAURL: all_contents = lambda x: x.dataurl else: raise ValueError(self.input_parser.__repr__()) @@ -547,11 +548,13 @@ def write_results(self, result): if len(outputs) == 1: result = [result] for r, o in zip(result, outputs): - if DropParser(self.output_parser) is DropParser.PICKLE: + if self.output_parser is DropParser.PICKLE: logger.debug(f"Writing pickeled result {type(r)} to {o}") o.write(pickle.dumps(r)) - elif DropParser(self.output_parser) is DropParser.EVAL: + elif self.output_parser is DropParser.EVAL: o.write(repr(r).encode('utf-8')) + elif self.output_parser is DropParser.NPY: + droputils.save_npy(o, r) else: ValueError(self.output_parser.__repr__()) diff --git a/daliuge-engine/dlg/droputils.py b/daliuge-engine/dlg/droputils.py index 2c21042fb..4ce78c231 100644 --- a/daliuge-engine/dlg/droputils.py +++ b/daliuge-engine/dlg/droputils.py @@ -117,7 +117,7 @@ def __exit__(self, typ, value, tb): ) -def allDropContents(drop, bufsize=4096): +def allDropContents(drop, bufsize=4096) -> bytes: """ Returns all the data contained in a given DROP """ @@ -267,24 +267,24 @@ def listify(o): return [o] -# def save_pickle(drop: DataDROP, data: Any): -# """Saves a python object in pkl format""" -# pickle.dump(data, drop) +def save_pickle(drop: DataDROP, data: Any): + """Saves a python object in pkl format""" + pickle.dump(data, drop) -# def load_pickle(drop: DataDROP) -> Any: -# """Loads a pkl formatted data object stored in a DataDROP. -# Note: does not support streaming mode. -# """ -# buf = io.BytesIO() -# desc = drop.open() -# while True: -# data = drop.read(desc) -# if not data: -# break -# buf.write(data) -# drop.close(desc) -# return pickle.loads(buf.getbuffer()) +def load_pickle(drop: DataDROP) -> Any: + """Loads a pkl formatted data object stored in a DataDROP. + Note: does not support streaming mode. + """ + buf = io.BytesIO() + desc = drop.open() + while True: + data = drop.read(desc) + if not data: + break + buf.write(data) + drop.close(desc) + return pickle.loads(buf.getbuffer()) # async def save_pickle_iter(drop: DataDROP, data: Iterable[Any]): @@ -298,7 +298,7 @@ def listify(o): # yield pickle.load(p) -def save_numpy(drop: DataDROP, ndarray: np.ndarray, allow_pickle=False): +def save_npy(drop: DataDROP, ndarray: np.ndarray, allow_pickle=False): """ Saves a numpy ndarray to a drop in npy format """ @@ -312,7 +312,11 @@ def save_numpy(drop: DataDROP, ndarray: np.ndarray, allow_pickle=False): dropio.close() -def load_numpy(drop: DataDROP, allow_pickle=False) -> np.ndarray: +def save_numpy(drop: DataDROP, ndarray: np.ndarray): + save_npy(drop, ndarray) + + +def load_npy(drop: DataDROP, allow_pickle=False) -> np.ndarray: """ Loads a numpy ndarray from a drop in npy format """ @@ -323,6 +327,10 @@ def load_numpy(drop: DataDROP, allow_pickle=False) -> np.ndarray: return res +def load_numpy(drop: DataDROP): + return load_npy(drop) + + # def save_jsonp(drop: PathBasedDrop, data: Dict[str, object]): # with open(drop.path, 'r') as f: # json.dump(data, f) diff --git a/daliuge-engine/test/apps/test_pyfunc.py b/daliuge-engine/test/apps/test_pyfunc.py index 4f7448e43..7a9181fbb 100644 --- a/daliuge-engine/test/apps/test_pyfunc.py +++ b/daliuge-engine/test/apps/test_pyfunc.py @@ -27,6 +27,7 @@ import unittest import pkg_resources import json +import numpy from ..manager import test_dm from dlg import droputils, graph_loader @@ -79,8 +80,6 @@ def _PyFuncApp(oid, uid, f, **kwargs): func_name=fname, func_code=fcode, func_defaults=fdefaults, - input_parser=pyfunc.DropParser.PICKLE, - output_parser=pyfunc.DropParser.PICKLE, **kwargs ) @@ -124,22 +123,84 @@ def inner_function(x, y): _PyFuncApp("a", "a", inner_function) - def _test_simple_functions(self, f, input_data, output_data): + def test_pickle_func(self, f = lambda x: x, input_data="hello", output_data="hello"): + a = InMemoryDROP("a", "a") + b = _PyFuncApp("b", "b", f) + c = InMemoryDROP("c", "c") + + b.addInput(a) + b.addOutput(c) + + with DROPWaiterCtx(self, c, 5): + droputils.save_pickle(a, input_data) + a.setCompleted() + for drop in a, b, c: + self.assertEqual(DROPStates.COMPLETED, drop.status) + self.assertEqual( + output_data, droputils.load_pickle(c) + ) + + def test_eval_func(self, f = lambda x: x, input_data=None, output_data=None): + input_data = [2,2] if input_data is None else input_data + output_data = [2,2] if output_data is None else output_data + + a = InMemoryDROP("a", "a") + b = _PyFuncApp("b", "b", f, + input_parser=pyfunc.DropParser.EVAL, + output_parser=pyfunc.DropParser.EVAL + ) + c = InMemoryDROP("c", "c") + + b.addInput(a) + b.addOutput(c) + + with DROPWaiterCtx(self, c, 5): + a.write(repr(input_data).encode('utf-8')) + a.setCompleted() + for drop in a, b, c: + self.assertEqual(DROPStates.COMPLETED, drop.status) + self.assertEqual( + output_data, eval(droputils.allDropContents(c).decode('utf-8'), {}, {}) + ) + + def test_npy_func(self, f = lambda x: x, input_data=None, output_data=None): + input_data = numpy.ones([2,2]) if input_data is None else input_data + output_data = numpy.ones([2,2]) if output_data is None else output_data + a = InMemoryDROP("a", "a") + b = _PyFuncApp("b", "b", f, + input_parser=pyfunc.DropParser.NPY, + output_parser=pyfunc.DropParser.NPY + ) + c = InMemoryDROP("c", "c") + + b.addInput(a) + b.addOutput(c) + + with DROPWaiterCtx(self, c, 5): + droputils.save_npy(a, input_data) + a.setCompleted() + for drop in a, b, c: + self.assertEqual(DROPStates.COMPLETED, drop.status) + numpy.testing.assert_equal( + output_data, droputils.load_npy(c) + ) + + def _test_simple_functions(self, f, input_data, output_data): a, c = [InMemoryDROP(x, x) for x in ("a", "c")] b = _PyFuncApp("b", "b", f) b.addInput(a) b.addOutput(c) with DROPWaiterCtx(self, c, 5): - a.write(pickle.dumps(input_data)) # @UndefinedVariable + a.write(pickle.dumps(input_data)) a.setCompleted() for drop in a, b, c: self.assertEqual(DROPStates.COMPLETED, drop.status) self.assertEqual( output_data, pickle.loads(droputils.allDropContents(c)) - ) # @UndefinedVariable + ) def test_func1(self): """Checks that func1 in this module works when wrapped""" diff --git a/daliuge-engine/test/test_droputils.py b/daliuge-engine/test/test_droputils.py index 109ef2596..529ec9c33 100644 --- a/daliuge-engine/test/test_droputils.py +++ b/daliuge-engine/test/test_droputils.py @@ -25,11 +25,14 @@ @author: rtobar """ +import subprocess import unittest +import numpy + from dlg import droputils from dlg.common import dropdict, Categories -from dlg.drop import InMemoryDROP, FileDROP, BarrierAppDROP +from dlg.drop import InMemoryDROP, FileDROP, BarrierAppDROP, PlasmaDROP from dlg.droputils import DROPFile @@ -141,6 +144,43 @@ def testGetEndNodes(self): endNodes = droputils.getLeafNodes(a) self.assertSetEqual(set([j, f]), set(endNodes)) + def _test_datadrop_function(self, test_function, input_data): + # basic datadrop + for drop_type in (InMemoryDROP,FileDROP): + test_function(drop_type, input_data) + + #plasma datadrop + store = None + try: + store = subprocess.Popen( + ["plasma_store", "-m", "1000000", "-s", "/tmp/plasma"] + ) + test_function(PlasmaDROP, input_data) + finally: + if store: + store.terminate() + + def _test_save_load_pickle(self, drop_type, data): + drop = drop_type("a", "a") + droputils.save_pickle(drop, data) + drop.setCompleted() + output_data = droputils.load_pickle(drop) + self.assertEqual(data, output_data) + + def test_save_load_pickle(self): + input_data = {'nested': {'data': {'object': {}}}} + self._test_datadrop_function(self._test_save_load_pickle, input_data) + + def _test_save_load_npy(self, drop_type, data): + drop = drop_type("a", "a") + droputils.save_npy(drop, data) + output_data = droputils.load_npy(drop) + numpy.testing.assert_equal(data, output_data) + + def test_save_load_npy(self): + input_data = numpy.ones([3,5]) + self._test_datadrop_function(self._test_save_load_npy, input_data) + def test_DROPFile(self): """ This test exercises the DROPFile mechanism to read the data represented by diff --git a/docs/development/app_development/app_index.rst b/docs/development/app_development/app_index.rst index b15e5ed45..a0959985b 100644 --- a/docs/development/app_development/app_index.rst +++ b/docs/development/app_development/app_index.rst @@ -25,6 +25,7 @@ integration and testing during component development. As mentioned already, for dynlib_components docker_components service_components + pyfunc_components datadrop_io wrap_existing test_and_debug diff --git a/docs/development/app_development/datadrop_io.rst b/docs/development/app_development/datadrop_io.rst index b9a8fc57b..b76feb82f 100644 --- a/docs/development/app_development/datadrop_io.rst +++ b/docs/development/app_development/datadrop_io.rst @@ -1,3 +1,5 @@ +.. _datadrop_io: + DataDROP I/O ============ @@ -24,15 +26,18 @@ Writing data into an output drop is similar but simpler. Application authors nee one or more times the :attr:`write ` method with the data that needs to be written. -String Serialization --------------------- +Serialization +------------- -Many data drops are capable of storing data in different formats managed by the app drop. +Many data components are capable of storing data in multiple formats determined by the drop component. The common data io interface allows app components to be compatible with many data component types, however different app components connected to the same data component must use compatible serialization and deserialization types and utilities. + +String Serialization +^^^^^^^^^^^^^^^^^^^^ Raw String """""""""" -The simplest serialization format supported directly by `DataDrop.write` and `DataDrop.read`. +The simplest deserialization format supported directly by `DataDrop.write` and `DataDrop.read`. JSON (.json) """""""""""" @@ -59,26 +64,34 @@ XML (.xml) """""""""" Markup format with similar features to YAML but with the addition of attributes. Serialization can be performed -using `dicttoxml` or both serialization and deserialiation using `xml.etree.ElementTree`. +using `dicttoxml` or both serialization and deserialization using `xml.etree.ElementTree`. + +Python Eval (.py) +""""""""""""""""" + +Python expressions and literals are valid string serialization formats whereby the string data is iterpreted as python code. Serialization is typically performed using the `__repr__` instance method and deserialization using `eval` or `ast.eval_literal`. Binary Serialization --------------------- +^^^^^^^^^^^^^^^^^^^^ Data drops may also store binary formats that are typically more efficient than string formats and may utilize the python buffer protocol. +Raw Bytes +""""""""" + +Data drops can always be read as raw bytes using `droputils.allDropContents` and written to using `DataDROP.write`. Reading as a bytes object creates a readonly in-memory data copy that may not be as performant as other drop utilities. + Pickle (.pkl) """"""""""""" -Default serialazation format. Use `save_pickle` for serialization to this format and -`allDropContents` or `load_pickle` for deserialization. - +Default serialazation format capable of serializing any python object. Use `save_pickle` for serialization to this format and `load_pickle` for deserialization. Numpy (.npy) """""""""""" -Portable numpy serialization format. Use `save_numpy` +Portable numpy serialization format. Use `save_numpy` for serialization and `load_numpy` for deserialization. Numpy Zipped (.npz) """"""""""""""""""" @@ -87,15 +100,15 @@ Portable zipped numpy serialization format. Consists of a .zip directory holding files. Table Serialization -------------------- +^^^^^^^^^^^^^^^^^^^ -parquet (.parquet) +parquet (.parquet) """"""""""""""""""" Open source column-based relational data format from Apache. -Drop Specialized Serialization ------------------------------- +Specialized Serialization +^^^^^^^^^^^^^^^^^^^^^^^^^ Data drops such as RDBMSDrop drops manage their own record format and are interfaced using relational data objects such `dict`, `pyarrow.RecordBatch` or `pandas.DataFrame`. \ No newline at end of file diff --git a/docs/development/app_development/pyfunc_components.rst b/docs/development/app_development/pyfunc_components.rst new file mode 100644 index 000000000..d6e4dd19c --- /dev/null +++ b/docs/development/app_development/pyfunc_components.rst @@ -0,0 +1,24 @@ +.. _pyfunc_components: + +Pyfunc Components +================= + +Pyfunc components are generalized python components that can be configured to behave as a custom python component entirely through component parameters and application arguments. A pyfunc component +maps directly to an existing python function or a lambda expression, named application arguments and input ports are mapped to the function keyword args, and the result is mapping to the output port. + +Port Parsers +------------ + +Pyfunc components when interfacing with data drops may utilize one of several builtin port parsing formats. + +* Pickle - Reads and writes data to pickle format +* Eval - Reads data using eval() function and writes using repr() function +* Npy - Reads and writes to .npy format +* Path - Reads the drop path rather than data +* Url - Reads the drop url rather than data + + +Note +"""" + +Only a single port parser can currently be used for all input ports of a Pyfunc. This is subject to change in future. diff --git a/docs/development/app_development/python_components.rst b/docs/development/app_development/python_components.rst index 5a9fee90c..f51d07c2c 100644 --- a/docs/development/app_development/python_components.rst +++ b/docs/development/app_development/python_components.rst @@ -57,18 +57,19 @@ GREAT! In exactly the same manner you can work along to change the functionality Obviously you can add more than one component class to the file ``app_components.py``, or add multiple files to the directory. Just don't forget to update the file ``__init__.py`` accordingly as well. -Remove boylerplate and add your documentation +Remove boilerplate and add your documentation --------------------------------------------- Next step is to clean up the mess from the boylerplate template and update the documentation of our new |daliuge| component. The first thing is to remove the files `ABOUT_THIS_TEMPLATE.md` and `CONTRIBUTING.md`. The next step is to update the file `README.md`. Open that file and remove everything above ```` and then do exactly what is written on that line: *Write your project README below!*. Then save the file. Make sure the LICENSE file contains a license you (and your employer) are happy with. If you had to install any additional Python packages, make sure they are listed in the ``requriements-test.txt`` and ``requirements.txt`` files and modify the file ``setup.py`` as required. Finally add more detailed documentation to the docs directory. This will then also be published on readthedocs whenever you push to the main branch. After that you will have a pretty nice and presentable component package already. -Using parameters ----------------- -Typically your code allows some user inputs in the form of parameters and/or keywords. |daliuge| supports that as well and the end user of your component will be able to populate the values for such parameters in EAGLE during the development of the workflows using your component. In order to make this happen you will need to expose the parameters through the component interface and also document them appropriately so that EAGLE can display that information to the end user. Since the end-users of your component will want to specify the values of these parameters through the EAGLE editor there are a few tricks required to enable that. For you as the developer of a component this is pretty much invisible, but you need to use the API. |daliuge| is currently offering six types of parameters: +Adding Parameters and App Arguments +----------------------------------- +Typically workflows require some user configuration in addition to data. |daliuge| supports this in the form of parameters and/or app arguments and the end user of your component will be able to populate the values for such components in EAGLE during the development of the workflows. In order to make this happen you will need to declare the parameters through the component interface and also document them appropriately so that EAGLE can provide the parameters in the component palette to the end user. Since the end-users of your component will want to specify the values of these parameters through the EAGLE editor there are a few tricks required to enable that. For you as the developer of a component this is pretty much invisible, but you need to use the API. |daliuge| is currently offering six types of parameters: #. dlg_string_param #. dlg_int_param #. dlg_float_param #. dlg_bool_param +#. dlg_enum_param #. dlg_list_param #. dlg_dict_param @@ -76,23 +77,25 @@ For example to define a greeting parameter for a HelloWorld application you can .. code-block:: python - greet = dlg_string_param("greet", "World") + greet = dlg_int_param("index", 0) -in the global block of your application class. This will give you access to the parameters passed on through the graph to your component at run time. Another example is shown below, if you have a parameter called ``index`` you can get the value from the graph at run time by adding a single line to your ``initialize`` method: +as a member of the custom component class. At runtime the param will be passed on through the graph to the component and converted to the string type after class initialization. Another example is shown below, if you have a parameter called ``index`` you can get the value from the graph at run time by adding a single line to your ``initialize`` method: .. _graphs.figs.tmpl_params1.png: .. figure:: ../../images/tmpl_params1.png -you should always do that before calling the initialize of the base class, in the example the ``BarrierAppDROP`` class and add an appropriate variable to the object's name space (``self.index``). In that way all other methods will have access to the index parameter's value. Then you should also add a line to the doxygen in-line documentation like this: +you should always do that before calling the initialize of the base class, in the example the ``BarrierAppDROP`` class and add an appropriate variable to the object (``self.index``) such that all other methods will have access to the index parameter's value. Then you should also add a line to the doxygen in-line documentation like this: .. _graphs.figs.tmpl_params2: .. figure:: ../../images/tmpl_params2.png see chapter :doc:`eagle_app_integration` for more details on the syntax. When you now checkin your code to the github repo a github action will generate the palette (JSON description of your components) automatically and you can load it into EAGLE to construct a workflow. -Adding input and output ports +Adding Input and Output Ports ----------------------------- -Ports are another way of getting data and information in and out of your component. Ports are always connected to data components and provide the application component with a homogeneous I/O interface. You write whatever you want to an output port, but be aware that other components, maybe not developed by yourself, will need to be able to understand and interpret correctly. In the same spirit you might not be responsible for what is presented to your component on the input ports, but you certainly need to be able to read and use that information. The first step to make sure this will fit in a workflow, is to document your own inputs and outputs and check the data on the inputs for compliance with what you are expecting. |daliuge|, or more precisely EAGLE is using that information to guide the users developing a workflow and by default allows connections only between matching ports. Again this is based on the doxygen description of your components ports, which look like this: +Ports are how runtime data and information move in and out of your component. Ports are always connected to data components and provide the application component with a homogeneous I/O interface. App components can write whatever data you want to an output port, but be aware that other components, maybe not developed by yourself, will need a compatible reader to interpret the data. In the same spirit you might not be responsible for what is presented to your component on the input ports, but you certainly need to be able to read and use that information. See chapter :doc:`datadrop_io` for more details. + +The first step to make sure this will fit in a workflow, is to document your own inputs and outputs and check the data on the inputs for compliance with what you are expecting. |daliuge|, or more precisely EAGLE is using that information to guide the users developing a workflow and by default allows connections only between matching ports. Again this is based on the doxygen description of your components ports, which look like this: .. _graphs.figs.tmpl_ports1: .. figure:: ../../images/tmpl_ports1.png @@ -112,4 +115,6 @@ Your ``run`` method could look very simple and essentially always the same, but Consider Granularity and Parallelism ------------------------------------ -You can put very complex and even complete applications inside a component, but that is not the idea. In fact components should perform quite limited tasks, which should in general be useful for other, ideally many workflows. There is always a trade-off between overhead and functionality as well. Although the template makes the development of components quite easy, it still is an overhead, compared to just adding a few lines of code in some existing component. One of the driving requirements to write a new component might thus be whether the functionality of the new component is generic enough to be useful. There might also be other ways of implementing that same functionality and thus there might be a choice of components providing that. The other, really important consideration is parallelism. In general you should never do that inside a component, but leave that to the developer of the workflow itself. |daliuge| is mainly about distributing and optimizing the distribution of such parallel tasks (instances of components). You should aim to give the |daliuge| engine as many degrees of freedom as possible to deploy the final workflow on the available platform. When developing a component you won't know in what kind of workflows it is going to be used, nor will you know how big and complex those workflows are. Thus, don't assume anything and implement just the functionality to deal with a single, atomic entity of the data the component has to deal with. That also makes the implementation easier and much more straight forward. +You can put very complex and even complete applications inside a component, but this limits code reusability and daliuge only provides scheduling and deployment parallelism down to the component level. In fact components should perform quite limited tasks, which should in general be useful for other, ideally many workflows. There is always a trade-off between overhead and functionality as well. Although the template makes the development of components quite easy, it still is an overhead, compared to just adding a few lines of code in some existing component. One of the driving requirements to write a new component might thus be whether the functionality of the new component is generic enough to be useful. There might also be other ways of implementing that same functionality and thus there might be a choice of components providing that. + +The other, really important consideration is parallelism. In general you should never do that inside a component, but leave that to the developer of the workflow itself. |daliuge| is mainly about distributing and optimizing the distribution of such parallel tasks (instances of components). You should aim to give the |daliuge| engine as many degrees of freedom as possible to deploy the final workflow on the available platform. When developing a component you won't know in what kind of workflows it is going to be used, nor will you know how big and complex those workflows are. Thus, don't assume anything and implement just the functionality to deal with a single, atomic entity of the data the component has to deal with. That also makes the implementation easier and much more straight forward. diff --git a/docs/development/app_development/special_components.rst b/docs/development/app_development/special_components.rst index cdd1e44f2..23549419c 100644 --- a/docs/development/app_development/special_components.rst +++ b/docs/development/app_development/special_components.rst @@ -7,7 +7,6 @@ In addition users can develop a number of specialized components, which are base #. Start and Stop Components #. Branch Components #. MPI Components -#. Python-function Components #. Archiving/store Components Descriptions TODO