diff --git a/.gitignore b/.gitignore index 51eea8609..120f1f2ce 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,5 @@ nosetests.xml .coverage tests/coverage.xml .c9revisions +.idea +/venv diff --git a/.travis.yml b/.travis.yml index c53ffd6c8..bd833b12f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,10 +1,8 @@ language: python python: - - "2.7" - - "3.3" - - "3.4" - - "3.5" - "3.6" + - "3.7" + - "3.8" install: - "pip install -r requirements.txt" - "pip install celery" diff --git a/AUTHORS b/AUTHORS index 1f5964648..05623d342 100644 --- a/AUTHORS +++ b/AUTHORS @@ -1,3 +1,5 @@ Samuel Abels Ziad Sawalha Matthew Hampton +Kelly McDonald +Dan Funk diff --git a/CONTRIB b/CONTRIB index ed4edbf88..89481f8ff 100644 --- a/CONTRIB +++ b/CONTRIB @@ -8,7 +8,7 @@ Coding style: Testing: Non-public classes and methods MUST be prefixed by _. This is also important - because the test and API documentation machinery makes assumtions based on + because the test and API documentation machinery makes assumptions based on this convention. Every added public class MUST have a corresponding unit test. The tests are diff --git a/Dockerfile b/Dockerfile index 5781d8ea3..ca5f172f6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM python:2-wheezy +FROM python:3.6 RUN apt-get -y update && apt-get upgrade -yu COPY . /tmp/SpiffWorkflow RUN cd /tmp/SpiffWorkflow && make wheel && pip install dist/SpiffWorkflow*.whl diff --git a/SpiffWorkflow/__init__.py b/SpiffWorkflow/__init__.py index 6640bd15f..2b32e48c3 100644 --- a/SpiffWorkflow/__init__.py +++ b/SpiffWorkflow/__init__.py @@ -5,6 +5,12 @@ from .workflow import Workflow from .task import Task from .exceptions import WorkflowException +from .navigation import NavItem +from .bpmn.specs.BpmnSpecMixin import BpmnSpecMixin, SequenceFlow +from .bpmn.specs.UnstructuredJoin import UnstructuredJoin +from .bpmn.specs.MultiInstanceTask import MultiInstanceTask +from .bpmn.specs.CallActivity import CallActivity +from .bpmn.specs.BoundaryEvent import _BoundaryEventParent import inspect __all__ = [name for name, obj in list(locals().items()) diff --git a/SpiffWorkflow/bpmn/BpmnFeelScriptEngine.py b/SpiffWorkflow/bpmn/BpmnFeelScriptEngine.py new file mode 100644 index 000000000..b10ee6b83 --- /dev/null +++ b/SpiffWorkflow/bpmn/BpmnFeelScriptEngine.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +from __future__ import division, absolute_import +from builtins import object +# Copyright (C) 2012 Matthew Hampton +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301 USA +from SpiffWorkflow.exceptions import WorkflowTaskExecException + +from .FeelLikeScriptEngine import FeelLikeScriptEngine +from ..operators import Operator + + +class BpmnFeelScriptEngine(FeelLikeScriptEngine): + """ + Used during execution of a BPMN workflow to evaluate condition / value + expressions. These are used by Gateways, and by Catching Events + (non-message ones). + + Also used to execute scripts. + + If you are uncomfortable with the use of eval() and exec, then you should + provide a specialised subclass that parses and executes the scripts / + expressions in a mini-language of your own. + """ + + def evaluate(self, task, expression): + """ + Evaluate the given expression, within the context of the given task and + return the result. + """ + try: + if isinstance(expression, Operator): + # I am assuming that this takes care of some kind of XML + # expression judging from the contents of operators.py + return expression._matches(task) + else: + return super().evaluate(expression, **task.data) + except Exception as e: + raise WorkflowTaskExecException(task, + "Error evaluating expression " + "'%s', %s" % (expression, str(e))) + diff --git a/SpiffWorkflow/bpmn/BpmnScriptEngine.py b/SpiffWorkflow/bpmn/BpmnScriptEngine.py index d35700c04..a66d4399a 100644 --- a/SpiffWorkflow/bpmn/BpmnScriptEngine.py +++ b/SpiffWorkflow/bpmn/BpmnScriptEngine.py @@ -17,11 +17,13 @@ # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301 USA +from SpiffWorkflow.exceptions import WorkflowTaskExecException +from .PythonScriptEngine import PythonScriptEngine from ..operators import Operator -class BpmnScriptEngine(object): +class BpmnScriptEngine(PythonScriptEngine): """ Used during execution of a BPMN workflow to evaluate condition / value expressions. These are used by Gateways, and by Catching Events @@ -39,18 +41,15 @@ def evaluate(self, task, expression): Evaluate the given expression, within the context of the given task and return the result. """ - if isinstance(expression, Operator): - return expression._matches(task) - else: - return self._eval(task, expression, **task.data) + try: + if isinstance(expression, Operator): + # I am assuming that this takes care of some kind of XML + # expression judging from the contents of operators.py + return expression._matches(task) + else: + return super().evaluate(expression, **task.data) + except Exception as e: + raise WorkflowTaskExecException(task, + "Error evaluating expression " + "'%s', %s" % (expression, str(e))) - def execute(self, task, script, **kwargs): - """ - Execute the script, within the context of the specified task - """ - locals().update(kwargs) - exec(script) - - def _eval(self, task, expression, **kwargs): - locals().update(kwargs) - return eval(expression) diff --git a/SpiffWorkflow/bpmn/DMNPythonScriptEngine.py b/SpiffWorkflow/bpmn/DMNPythonScriptEngine.py new file mode 100644 index 000000000..1981b60d1 --- /dev/null +++ b/SpiffWorkflow/bpmn/DMNPythonScriptEngine.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- +from builtins import object +import ast +import re +import operator +import datetime +from datetime import timedelta +from decimal import Decimal +from SpiffWorkflow.workflow import WorkflowException +from .PythonScriptEngine import PythonScriptEngine, Box + +# Copyright (C) 2020 Kelly McDonald +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301 USA + +class DMNPythonScriptEngine(PythonScriptEngine): + """ + This simply overrides the python script engine to do some specific things for + DMN - I left the underlying PythonScriptEngine with everything in place so it + will play nice with the existing FeelLikeScriptEngine + """ + def __init__(self): + super().__init__() + + def eval_dmn_expression(self, inputExpr, matchExpr, **kwargs): + """ + Here we need to handle a few things such as if it is an equality or if + the equality has already been taken care of. For now, we just assume it is equality. + """ + + + + if matchExpr is None: + return True + + nolhs = False + if '?' in matchExpr: + nolhs = True + matchExpr = matchExpr.replace('?', 'dmninputexpr') + + rhs, needsEquals = self.validateExpression(matchExpr) + + extra = { + 'datetime': datetime, + 'timedelta': timedelta, + 'Decimal': Decimal, + 'Box': Box + } + + lhs, lhsNeedsEquals = self.validateExpression(inputExpr) + if not lhsNeedsEquals: + raise WorkflowException("Input Expression '%s' is malformed"%inputExpr) + if nolhs: + dmninputexpr = self.evaluate(lhs, externalMethods= extra, **kwargs) + extra = {'dmninputexpr':dmninputexpr, + 'datetime':datetime, + 'timedelta':timedelta, + 'Decimal':Decimal, + 'Box':Box + } + return self.evaluate(rhs,externalMethods=extra, **kwargs) + if needsEquals: + expression = lhs + ' == ' + rhs + else: + expression = lhs + rhs + + return self.evaluate(expression, externalMethods=extra, **kwargs) + diff --git a/SpiffWorkflow/bpmn/FeelLikeScriptEngine.py b/SpiffWorkflow/bpmn/FeelLikeScriptEngine.py new file mode 100644 index 000000000..668988089 --- /dev/null +++ b/SpiffWorkflow/bpmn/FeelLikeScriptEngine.py @@ -0,0 +1,338 @@ +# -*- coding: utf-8 -*- +from builtins import object +import ast +import re +import datetime +import operator +from datetime import timedelta +from decimal import Decimal +from SpiffWorkflow.workflow import WorkflowException +from .PythonScriptEngine import PythonScriptEngine + +# Copyright (C) 2020 Kelly McDonald +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301 USA + +def feelConvertTime(datestr,parsestr): + return datetime.datetime.strptime(datestr,parsestr) + +class FeelInterval(): + def __init__(self, begin, end, leftOpen=False, rightOpen=False): + # pesky thing with python floats and Decimal comparison + if isinstance(begin,float): + begin = Decimal("%0.5f"%begin) + if isinstance(end, float): + end = Decimal("%0.5f" % end) + + self.startInterval = begin + self.endInterval = end + self.leftOpen = leftOpen + self.rightOpen = rightOpen + + def __eq__(self, other): + if self.leftOpen: + lhs = other > self.startInterval + else: + lhs = other >= self.startInterval + if self.rightOpen: + rhs = other < self.endInterval + else: + rhs = other <= self.endInterval + return lhs and rhs + +class FeelContains(): + def __init__(self, testItem,invert=False ): + self.test = testItem + self.invert = invert + def __eq__(self, other): + has = False + if isinstance(other,dict): + has = self.test in list(other.keys()) + else: + has = self.test in list(other) + if self.invert: + return not has + else: + return has + +class FeelNot(): + def __init__(self, testItem): + self.test = testItem + + def __eq__(self, other): + if other == self.test: + return False + else: + return True + +def feelConcatenate(*lst): + ilist = [] + for l in lst: + ilist = ilist + l + return ilist + +def feelAppend(lst,item): + newlist = lst[:] # get a copy + newlist.append(item) + return newlist + +def feelNow(): + return datetime.datetime.now() + +def feelGregorianDOW(date): + # we assume date is either date in Y-m-d format + # or it is of datetime class + if isinstance(date,str): + date = datetime.datetime.strptime(date,'%Y-%m-%d') + return date.isoweekday()%7 + + +def transformDuration(duration,td): + if duration: + return td * float(duration) + else: + return timedelta(seconds=0) + +def lookupPart(code,base): + x= re.search("([0-9.]+)"+code,base) + if x: + return x.group(1) + else: + return None + +def feelFilter(var,a,b,op,column=None): + """ + here we are trying to cover some of the basic test cases, + dict, list of dicts and list. + """ + opmap = {'=':operator.eq, + '<':operator.lt, + '>':operator.gt, + '<=':operator.le, + '>=':operator.ge, + '!=':operator.ne} + #print('eval b',a,b,op,column) + b = eval(b) + # if it is a list and we are referring to 'item' then we + # expect the variable to be a simple list + if (isinstance(var,list)) and a == 'item': + return [x for x in var if opmap[op](x,b)] + # if it is a dictionary, and the keys refer to dictionaries, + # then we convert it to a list of dictionaries with the elements + # all having {'key':key,} + # if it is a dictionary and the key refers to a non-dict, then + # we conver to a dict having {'key':key,'value':value} + if (isinstance(var,dict)): + newvar = [] + for key in var.keys(): + if isinstance(var[key],dict): + newterm = var[key] + newterm.update({'key':key}) + newvar.append(newterm) + else: + newvar.append({'key':key,'value':var[key]}) + var = newvar + + #print (var) + #print(column) + if column!=None: + return [x.get(column) for x in var if opmap[op](x.get(a), b)] + else: + return [x for x in var if opmap[op](x.get(a), b)] + + + +def feelParseISODuration(input): + """ + Given an ISO duration designation + such as : + P0Y1M2DT3H2S + and convert it into a python timedelta + + Abbreviations may be made as in : + + PT30S + + NB: + Months are defined as 30 days currently - as I am dreading getting into + Date arithmetic edge cases. + + """ + if input[0] != 'P': + raise Exception("Oh Crap!") + input = input[1:] + days, time = input.split("T") + lookups = [("Y",days,timedelta(days=365)), + ("M", days, timedelta(days=30)), + ("W", days, timedelta(days=7)), + ("D", days, timedelta(days=1)), + ("H", time, timedelta(seconds=60*60)), + ("M", time, timedelta(seconds=60)), + ("S", time, timedelta(seconds=1)), + ] + totaltime = [transformDuration(lookupPart(x[0],x[1]),x[2]) for x in lookups] + return sum(totaltime,timedelta(seconds=0)) + + + + + + + +# Order Matters!! +fixes = [(r'string\s+length\((.+?)\)','len(\\1)'), + (r'count\((.+?)\)','len(\1)'), + (r'concatenate\((.+?)\)','feelConcatenate(\\1)'), + (r'append\((.+?),(.+?)\)','feelAppend(\\1,\\2)'), # again will not work with literal list + (r'list\s+contains\((.+?),(.+?)\)','\\2 in \\1'), # list contains(['a','b','stupid,','c'],'stupid,') will break + (r'contains\((.+?),(.+?)\)','\\2 in \\1'), # contains('my stupid, stupid comment','stupid') will break + (r'not\s+?contains\((.+?)\)','FeelContains(\\1,invert=True)'), # not contains('something') + (r'not\((.+?)\)','FeelNot(\\1)'), # not('x') + + (r'now\(\)','feelNow()'), + (r'contains\((.+?)\)', 'FeelContains(\\1)'), # contains('x') + # date and time () + (r'date\s+?and\s+?time\s*\((.+?)\)', 'feelConvertTime(\\1,"%Y-%m-%dT%H:%M:%S")'), + (r'date\s*\((.+?)\)', 'feelConvertTime(\\1,"%Y-%m-%d)'), # date () + (r'day\s+of\s+\week\((.+?)\)','feelGregorianDOW(\\1)'), + (r'\[([^\[\]]+?)[.]{2}([^\[\]]+?)\]','FeelInterval(\\1,\\2)'), # closed interval on both sides + (r'[\]\(]([^\[\]\(\)]+?)[.]{2}([^\[\]\)\(]+?)\]','FeelInterval(\\1,\\2,leftOpen=True)'), # open lhs + (r'\[([^\[\]\(\)]+?)[.]{2}([^\[\]\(\)]+?)[\[\)]','FeelInterval(\\1,\\2,rightOpen=True)'), # open rhs + # I was having problems with this matching a "P" somewhere in another expression + # so I added a bunch of different cases that should isolate this. + (r'^(P(([0-9.]+Y)?([0-9.]+M)?([0-9.]+W)?([0-9.]+D)?)?(T([0-9.]+H)?([0-9.]+M)?([0-9.]+S)?)?)$', + 'feelParseISODuration("\\1")'), ## Parse ISO Duration convert to timedelta - standalone + (r'^(P(([0-9.]+Y)?([0-9.]+M)?([0-9.]+W)?([0-9.]+D)?)?(T([0-9.]+H)?([0-9.]+M)?([0-9.]+S)?)?)\s', + 'feelParseISODuration("\\1") '), ## Parse ISO Duration convert to timedelta beginning + (r'\s(P(([0-9.]+Y)?([0-9.]+M)?([0-9.]+W)?([0-9.]+D)?)?(T([0-9.]+H)?([0-9.]+M)?([0-9.]+S)?)?)\s', + ' feelParseISODuration("\\1") '), ## Parse ISO Duration convert to timedelta in context + (r'\s(P(([0-9.]+Y)?([0-9.]+M)?([0-9.]+W)?([0-9.]+D)?)?(T([0-9.]+H)?([0-9.]+M)?([0-9.]+S)?)?)$', + ' feelParseISODuration("\\1")'), ## Parse ISO Duration convert to timedelta end + + (r'(.+)\[(\S+)?(<=)(.+)]\.(\S+)', 'feelFilter(\\1,"\\2","\\4","\\3","\\5")'), # implement a simple filter + (r'(.+)\[(\S+)?(>=)(.+)]\.(\S+)', 'feelFilter(\\1,"\\2","\\4","\\3","\\5")'), # implement a simple filter + (r'(.+)\[(\S+)?(!=)(.+)]\.(\S+)', 'feelFilter(\\1,"\\2","\\4","\\3","\\5")'), # implement a simple filter + (r'(.+)\[(\S+)?([=<>])(.+)]\.(\S+)', 'feelFilter(\\1,"\\2",\\4,"\\3","\\5")'), # implement a simple filter + (r'(.+)\[(\S+)?(<=)(.+)]', 'feelFilter(\\1,"\\2","\\4","\\3")'), # implement a simple filter + (r'(.+)\[(\S+)?(>=)(.+)]', 'feelFilter(\\1,"\\2","\\4","\\3")'), # implement a simple filter + (r'(.+)\[(\S+)?(!=)(.+)]', 'feelFilter(\\1,"\\2","\\4","\\3")'), # implement a simple filter + (r'(.+)\[(\S+)?([=<>])(.+)]','feelFilter(\\1,"\\2","\\4","\\3")'), # implement a simple filter + (r'[\]\(]([^\[\]\(\)]+?)[.]{2}([^\[\]\(\)]+?)[\[\)]', + 'FeelInterval(\\1,\\2,rightOpen=True,leftOpen=True)'), # open both + + + # parse dot.dict for several different edge cases + # make sure that it begins with a letter character - otherwise we + # may get float numbers. + # will not work for cases where we do something like: + # x contains(this.dotdict.item) + # and it may be difficult, because we do not want to replace for the case of + # somedict.keys() - because that is actually in the tests. + # however, it would be fixed by doing: + # x contains( this.dotdict.item ) + + ('true','True'), + ('false','False') + ] +externalFuncs = { + 'feelConvertTime':feelConvertTime, + 'FeelInterval':FeelInterval, + 'FeelNot':FeelNot, + 'Decimal':Decimal, + 'feelConcatenate': feelConcatenate, + 'feelAppend': feelAppend, + 'feelFilter': feelFilter, + 'feelNow': feelNow, + 'FeelContains': FeelContains, + 'datetime':datetime, + 'feelParseISODuration': feelParseISODuration, + 'feelGregorianDOW':feelGregorianDOW, +} + +default_header = """ + + + +""" +class FeelLikeScriptEngine(PythonScriptEngine): + """ + This should serve as a base for all scripting & expression evaluation + operations that are done within both BPMN and BMN. Eventually it will also + serve as a base for FEEL expressions as well + + If you are uncomfortable with the use of eval() and exec, then you should + provide a specialised subclass that parses and executes the scripts / + expressions in a mini-language of your own. + """ + def __init__(self): + super().__init__() + + def patch_expression(self,invalid_python,lhs=''): + if invalid_python is None: + return None + proposed_python = invalid_python + for transformation in fixes: + if isinstance(transformation[1],str): + proposed_python = re.sub(transformation[0],transformation[1],proposed_python) + else: + for x in re.findall(transformation[0],proposed_python): + if '.' in(x): + proposed_python = proposed_python.replace(x,transformation[1](x)) + if lhs is not None: + proposed_python = lhs + proposed_python + return proposed_python + + def validateExpression (self,text): + if text is None: + return + try: + # this should work if we can just do a straight equality + revised_text = self.patch_expression(text) + ast.parse(revised_text) + return revised_text,True + except: + try: + revised_text = self.patch_expression(text, 's ') # if we have problems parsing, then we introduce a + # variable on the left hand side and try that and see if that parses. If so, then we know that + # we do not need to introduce an equality operator later in the dmn + ast.parse(revised_text) + return revised_text[2:],False + except Exception as e: + raise Exception("error parsing expression "+text + " " + + str(e)) + + + + + def evaluate(self, expression,externalMethods={}, **kwargs): + """ + Evaluate the given expression, within the context of the given task and + return the result. + """ + externalMethods.update(externalFuncs) + return super().evaluate(expression,externalMethods,**kwargs) + + + def execute(self, task, script, data,externalMethods={}): + """ + Execute the script, within the context of the specified task + """ + externalMethods.update(externalFuncs) + super(PythonScriptEngine).execute(task,script,externalMethods) + + + + diff --git a/SpiffWorkflow/bpmn/PythonScriptEngine.py b/SpiffWorkflow/bpmn/PythonScriptEngine.py new file mode 100644 index 000000000..4f4a71643 --- /dev/null +++ b/SpiffWorkflow/bpmn/PythonScriptEngine.py @@ -0,0 +1,203 @@ +# -*- coding: utf-8 -*- +import copy +from builtins import object +import ast +import datetime +from datetime import timedelta +from SpiffWorkflow.workflow import WorkflowException + +# Copyright (C) 2020 Kelly McDonald +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301 USA +class Box(dict): + """ + Example: + m = Box({'first_name': 'Eduardo'}, last_name='Pool', age=24, sports=['Soccer']) + """ + def __init__(self, *args, **kwargs): + super(Box, self).__init__(*args, **kwargs) + for arg in args: + if isinstance(arg, dict): + for k, v in arg.items(): + if isinstance(v,dict): + self[k] = Box(v) + else: + self[k] = v + + if kwargs: + for k, v in kwargs.items(): + if isinstance(v, dict): + self[k] = Box(v) + else: + self[k] = v + + def __deepcopy__(self, memodict={}): + my_copy = Box() + for k,v in self.items(): + my_copy[k] = copy.deepcopy(v) + return my_copy + + def __getattr__(self, attr): + try: + output = self[attr] + except: + raise AttributeError + return output + + def __setattr__(self, key, value): + self.__setitem__(key, value) + + def __setitem__(self, key, value): + super(Box, self).__setitem__(key, value) + self.__dict__.update({key: value}) + def __getstate__(self): + return self.__dict__ + + def __setstate__(self, state): + self.__init__(state) + + + + def __delattr__(self, item): + self.__delitem__(item) + + def __delitem__(self, key): + super(Box, self).__delitem__(key) + del self.__dict__[key] + + +default_header = """ + + + +""" +class PythonScriptEngine(object): + """ + This should serve as a base for all scripting & expression evaluation + operations that are done within both BPMN and BMN. Eventually it will also + serve as a base for FEEL expressions as well + + If you are uncomfortable with the use of eval() and exec, then you should + provide a specialised subclass that parses and executes the scripts / + expressions in a mini-language of your own. + """ + def __init__(self,scriptingAdditions = {}): + self.globals = {'timedelta':timedelta, + 'datetime':datetime, + 'Box':Box, + } + self.globals.update(scriptingAdditions) + + def validateExpression (self,text): + if text is None: + return + try: + # this should work if we can just do a straight equality + ast.parse(text) + return text,True + except: + try: + revised_text = 's ' + text # if we have problems parsing, + # then we introduce a + # variable on the left hand side and try that and see if that parses. If so, then we know that + # we do not need to introduce an equality operator later in the dmn + ast.parse(revised_text) + return revised_text[2:],False + except Exception as e: + raise Exception("error parsing expression "+text + " " + + str(e)) + + def eval_dmn_expression(self, inputExpr, matchExpr, **kwargs): + """ + Here we need to handle a few things such as if it is an equality or if + the equality has already been taken care of. For now, we just assume it is equality. + """ + if matchExpr is None: + return True + rhs, needsEquals = self.validateExpression(matchExpr) + lhs, lhsNeedsEquals = self.validateExpression(inputExpr) + if not lhsNeedsEquals: + raise WorkflowException("Input Expression '%s' is malformed"%inputExpr) + if needsEquals: + expression = lhs + ' == ' + rhs + else: + expression = lhs + rhs + + return self.evaluate(default_header + expression, do_convert=False, **kwargs) + + def evaluate(self, expression,externalMethods={}, do_convert=True, **kwargs): + """ + Evaluate the given expression, within the context of the given task and + return the result. + """ + exp,valid = self.validateExpression(expression) + return self._eval(exp, **kwargs,do_convert=do_convert, externalMethods=externalMethods) + + def convertToBoxSub(self,data): + if isinstance(data,list): + for x in range(len(data)): + data[x] = self.convertToBoxSub(data[x]) + return data + if isinstance(data,dict): + for x in data.keys(): + if isinstance(data[x],dict): + data[x] = self.convertToBoxSub(data[x]) + return Box(data) + return data + + + def convertToBox(self,data): + for key in data.keys(): + data[key] = self.convertToBoxSub(data[key]) + + def convertFromBoxSub(self,data): + if isinstance(data,list): + return [self.convertFromBoxSub(x) for x in data] + if isinstance(data,(dict,Box)): + return {k:self.convertFromBoxSub(v) for k,v in data.items()} + return data + + def convertFromBox(self,data): + for k in data.keys(): + data[k] = self.convertFromBoxSub(data[k]) + + + def execute(self, task, script, data,externalMethods={}): + """ + Execute the script, within the context of the specified task + """ + globals = self.globals + + self.convertToBox(data) + #data.update({'task':task}) # one of our legacy tests is looking at task. + # this may cause a problem down the road if we + # actually have a variable named 'task' + globals.update(data) # dict comprehensions cause problems when the variables are not viable. + globals.update(externalMethods) + exec(script,globals,data) + self.convertFromBox(data) + + + def _eval(self, expression,externalMethods={}, **kwargs): + lcls = {} + lcls.update(kwargs) + globals = self.globals + for x in lcls.keys(): + if isinstance(lcls[x], dict): + lcls[x] = Box(lcls[x]) + globals.update(lcls) + globals.update(externalMethods) + return eval(expression,globals,lcls) diff --git a/SpiffWorkflow/bpmn/parser/BpmnParser.py b/SpiffWorkflow/bpmn/parser/BpmnParser.py index adf179e8e..d2842245a 100644 --- a/SpiffWorkflow/bpmn/parser/BpmnParser.py +++ b/SpiffWorkflow/bpmn/parser/BpmnParser.py @@ -26,6 +26,7 @@ from ..specs.ExclusiveGateway import ExclusiveGateway from ..specs.InclusiveGateway import InclusiveGateway from ..specs.IntermediateCatchEvent import IntermediateCatchEvent +from ..specs.IntermediateThrowEvent import IntermediateThrowEvent from ..specs.ManualTask import ManualTask from ..specs.NoneTask import NoneTask from ..specs.ParallelGateway import ParallelGateway @@ -40,9 +41,10 @@ ExclusiveGatewayParser, ParallelGatewayParser, InclusiveGatewayParser, CallActivityParser, ScriptTaskParser, IntermediateCatchEventParser, - BoundaryEventParser) -import xml.etree.ElementTree as ET - + IntermediateThrowEventParser, + BoundaryEventParser,SubWorkflowParser) +from lxml import etree +CAMUNDA_MODEL_NS = 'http://camunda.org/schema/1.0/bpmn' class BpmnParser(object): """ @@ -61,6 +63,8 @@ class BpmnParser(object): full_tag('endEvent'): (EndEventParser, EndEvent), full_tag('userTask'): (UserTaskParser, UserTask), full_tag('task'): (NoneTaskParser, NoneTask), + full_tag('subProcess'): (SubWorkflowParser, CallActivity), + full_tag('manualTask'): (ManualTaskParser, ManualTask), full_tag('exclusiveGateway'): (ExclusiveGatewayParser, ExclusiveGateway), @@ -71,6 +75,8 @@ class BpmnParser(object): full_tag('scriptTask'): (ScriptTaskParser, ScriptTask), full_tag('intermediateCatchEvent'): (IntermediateCatchEventParser, IntermediateCatchEvent), + full_tag('intermediateThrowEvent'): (IntermediateThrowEventParser, + IntermediateThrowEvent), full_tag('boundaryEvent'): (BoundaryEventParser, BoundaryEvent), } @@ -123,7 +129,7 @@ def add_bpmn_files(self, filenames): for filename in filenames: f = open(filename, 'r') try: - self.add_bpmn_xml(ET.parse(f), filename=filename) + self.add_bpmn_xml(etree.parse(f), filename=filename) finally: f.close() @@ -136,6 +142,20 @@ def add_bpmn_xml(self, bpmn, svg=None, filename=None): :param filename: Optionally, provide the source filename. """ xpath = xpath_eval(bpmn) + # do a check on our bpmn to ensure that no id appears twice + # this *should* be taken care of by our modeler - so this test + # should never fail. + ids = [x for x in xpath('.//bpmn:*[@id]')] + foundids = {} + for node in ids: + id = node.get('id') + if foundids.get(id,None) is not None: + raise ValidationException( + 'The bpmn document should have no repeating ids but (%s) repeats'%id, + node=node, + filename=filename) + else: + foundids[id] = 1 processes = xpath('.//bpmn:process') for process in processes: @@ -172,6 +192,16 @@ def parse_condition(self, condition_expression, outgoing_task, """ return condition_expression + def parse_extensions(self, node, task_parser=None, xpath=None): + extensions = {} + xpath = xpath or xpath_eval(node) + extension_nodes = xpath( + './/bpmn:extensionElements/{%s}properties/{%s}property'%( + CAMUNDA_MODEL_NS,CAMUNDA_MODEL_NS)) + for node in extension_nodes: + extensions[node.get('name')] = node.get('value') + return extensions + def _parse_documentation(self, node, task_parser=None, xpath=None): xpath = xpath or xpath_eval(node) documentation_node = first(xpath('.//bpmn:documentation')) diff --git a/SpiffWorkflow/bpmn/parser/ProcessParser.py b/SpiffWorkflow/bpmn/parser/ProcessParser.py index d1d528ffa..e4f2e0a96 100644 --- a/SpiffWorkflow/bpmn/parser/ProcessParser.py +++ b/SpiffWorkflow/bpmn/parser/ProcessParser.py @@ -20,7 +20,7 @@ from .ValidationException import ValidationException from ..specs.BpmnProcessSpec import BpmnProcessSpec -from .util import xpath_eval +from .util import xpath_eval, DIAG_COMMON_NS class ProcessParser(object): @@ -53,6 +53,10 @@ def __init__(self, p, node, svg=None, filename=None, doc_xpath=None): self.filename = filename self.id_to_lane_lookup = None self._init_lane_lookup() + self.id_to_coords_lookup = None # Dictionary of positional arguments for each node. + self._init_coord_lookup() + self.message_lookup = {} # Dictionary of positional arguments for each node. + self._init_message_lookup() def get_id(self): """ @@ -102,17 +106,49 @@ def _init_lane_lookup(self): if id: self.id_to_lane_lookup[id] = name + + def get_coord(self, id): + """ + Return the x,y coordinates of the given task, if available. + """ + return self.id_to_coords_lookup.get(id, {'x':0, 'y':0}) + + def _init_message_lookup(self): + """Creates a lookup table for the name/id of all messages in the workflow + """ + self.message_lookup = {} + for message in self.doc_xpath('.//bpmn:message'): + self.message_lookup[message.attrib['id']] = message.attrib['name'] + for message in self.doc_xpath('.//bpmn:signal'): + self.message_lookup[message.attrib['id']] = message.attrib['name'] + + def _init_coord_lookup(self): + """Creates a lookup table with the x/y coordinates of each shape. + Only tested with the output from the Camunda modeler, which provides + these details in the bpmndi / and dc namespaces.""" + self.id_to_coords_lookup = {} + for position in self.doc_xpath('.//bpmndi:BPMNShape'): + bounds = xpath_eval(position)("dc:Bounds") + if len(bounds) > 0 and 'bpmnElement' in position.attrib: + bound = bounds[0] + self.id_to_coords_lookup[position.attrib['bpmnElement']] = \ + {'x': float(bound.attrib['x']), 'y': float(bound.attrib['y'])} + def _parse(self): - start_node_list = self.xpath('.//bpmn:startEvent') + # here we only look in the top level, We will have another + # bpmn:startEvent if we have a subworkflow task + start_node_list = self.xpath('./bpmn:startEvent') + if not start_node_list: raise ValidationException( "No start event found", node=self.node, filename=self.filename) - elif len(start_node_list) != 1: - raise ValidationException( - "Only one Start Event is supported in each process", - node=self.node, filename=self.filename) + # elif len(start_node_list) != 1: + # raise ValidationException( + # "Only one Start Event is supported in each process", + # node=self.node, filename=self.filename) self.parsing_started = True - self.parse_node(start_node_list[0]) + for node in start_node_list: + self.parse_node(node) self.is_parsed = True def get_spec(self): diff --git a/SpiffWorkflow/bpmn/parser/TaskParser.py b/SpiffWorkflow/bpmn/parser/TaskParser.py index 697c98384..e7145a48e 100644 --- a/SpiffWorkflow/bpmn/parser/TaskParser.py +++ b/SpiffWorkflow/bpmn/parser/TaskParser.py @@ -22,11 +22,20 @@ import sys import traceback from .ValidationException import ValidationException +from ..specs.ScriptTask import ScriptTask +from ..specs.UserTask import UserTask from ..specs.BoundaryEvent import _BoundaryEventParent +from ..specs.MultiInstanceTask import getDynamicMIClass +from ...dmn.specs.BusinessRuleTask import BusinessRuleTask +from ...operators import Attrib, PathAttrib from .util import xpath_eval, one LOG = logging.getLogger(__name__) +STANDARDLOOPCOUNT = '25' + +CAMUNDA_MODEL_NS = 'http://camunda.org/schema/1.0/bpmn' + class TaskParser(object): """ @@ -55,6 +64,105 @@ def __init__(self, process_parser, spec_class, node): self.node = node self.xpath = xpath_eval(node) + def _detect_multiinstance(self): + + # get special task decorators from XML + multiinstanceElement = self.process_xpath( + './/*[@id="%s"]/bpmn:multiInstanceLoopCharacteristics' % self.get_id()) + standardLoopElement = self.process_xpath( + './/*[@id="%s"]/bpmn:standardLoopCharacteristics' % self.get_id()) + + # initialize variables + isMultiInstance = len(multiinstanceElement) > 0 + isLoop = len(standardLoopElement) > 0 + multiinstance = False + isSequential = False + completecondition = None + collectionText = None + elementVarText = None + self.task.loopTask = False + + # Fix up MultiInstance mixin to take care of both + # MultiInstance and standard Looping task + if isMultiInstance or isLoop: + multiinstance = True + if isMultiInstance: + sequentialText = multiinstanceElement[0].get('isSequential') + collectionText = multiinstanceElement[0].attrib.get( + '{' + CAMUNDA_MODEL_NS + '}collection') + elementVarText = multiinstanceElement[0].attrib.get( + '{' + CAMUNDA_MODEL_NS + '}elementVariable') + + if sequentialText == 'true': + isSequential = True + loopCardinality = self.process_xpath( + './/*[@id="%s"]/bpmn:multiInstanceLoopCharacteristics/bpmn:loopCardinality' % self.get_id()) + if len(loopCardinality) > 0: + loopcount = loopCardinality[0].text + elif collectionText is not None: + loopcount = collectionText + else: + loopcount = '1' + completionCondition = self.process_xpath( + './/*[@id="%s"]/bpmn:multiInstanceLoopCharacteristics/bpmn:completionCondition' % self.get_id()) + if len(completionCondition) > 0: + completecondition = completionCondition[0].text + + else: # must be loop + isSequential = True + loopcount = STANDARDLOOPCOUNT # here we default to a sane numer of loops + self.task.loopTask = True + LOG.debug("Task Name: %s - class %s" % ( + self.get_id(), self.task.__class__)) + LOG.debug(" Task is MultiInstance: %s" % multiinstance) + LOG.debug(" MultiInstance is Sequential: %s" % isSequential) + LOG.debug(" Task has loopcount of: %s" % loopcount) + LOG.debug(" Class has name of : " + "%s" % self.task.__class__.__name__) + # currently a safeguard that this isn't applied in any condition + # that we do not expect. This list can be exapanded at a later + # date To handle other use cases - don't forget the overridden + # test classes! + if multiinstance and isinstance(self.task, (UserTask,BusinessRuleTask,ScriptTask)): + loopcount = loopcount.replace('.', + '/') # make dot notation compatible + # with bmpmn path notation. + + if loopcount.find('/') >= 0: + self.task.times = PathAttrib(loopcount) + else: + self.task.times = Attrib(loopcount) + + if collectionText is not None: + collectionText = collectionText.replace('.', '/') # make dot + # notation compatible + # with bmpmn path notation. + if collectionText.find('/') >= 0: + self.task.collection = PathAttrib(collectionText) + else: + self.task.collection = Attrib(collectionText) + else: + self.task.collection = None + + # self.task.collection = collectionText + self.task.elementVar = elementVarText + self.task.completioncondition = completecondition # we need to define what this is + self.task.isSequential = isSequential + # add some kind of limits here in terms of what kinds of classes + # we will allow to be multiinstance + + self.task.prevtaskclass = self.task.__module__ + "." + self.task.__class__.__name__ + newtaskclass = getDynamicMIClass(self.get_id(),self.task.__class__) + self.task.__class__ = newtaskclass + # self.task.__class__ = type(self.get_id() + '_class', ( + # MultiInstanceTask,self.task.__class__ ), {}) + self.task.multiInstance = multiinstance + self.task.isSequential = isSequential + if isLoop: + self.task.expanded = 25 + else: + self.task.expanded = 1 + def parse_node(self): """ Parse this node, and all children, returning the connected task spec. @@ -63,9 +171,14 @@ def parse_node(self): try: self.task = self.create_task() + self.task.extensions = self.parser.parse_extensions(self.node, + xpath=self.xpath, + task_parser=self) self.task.documentation = self.parser._parse_documentation( self.node, xpath=self.xpath, task_parser=self) + self._detect_multiinstance() + boundary_event_nodes = self.process_xpath( './/bpmn:boundaryEvent[@attachedToRef="%s"]' % self.get_id()) if boundary_event_nodes: @@ -74,7 +187,6 @@ def parse_node(self): self.task, lane=self.task.lane) self.process_parser.parsed_nodes[ self.node.get('id')] = parent_task - parent_task.connect_outgoing( self.task, '%s.FromBoundaryEventParent' % self.get_id(), None, None) @@ -100,18 +212,31 @@ def parse_node(self): filename=self.process_parser.filename) for sequence_flow in outgoing: target_ref = sequence_flow.get('targetRef') - target_node = one( - self.process_xpath('.//*[@id="%s"]' % target_ref)) + try: + target_node = one( + self.process_xpath('.//bpmn:*[@id="%s"]'% \ + target_ref)) + except: + raise ValidationException( + 'When looking for a task spec, we found two items, ' + 'perhaps a form has the same ID? (%s)' % target_ref, + node=self.node, + filename=self.process_parser.filename) + c = self.process_parser.parse_node(target_node) - children.append((c, target_node, sequence_flow)) + position = c.position + children.append((position, c, target_node, sequence_flow)) if children: + # Sort children by their y coordinate. + children = sorted(children, key=lambda tup: float(tup[0]["y"])) + default_outgoing = self.node.get('default') if not default_outgoing: - (c, target_node, sequence_flow) = children[0] + (position, c, target_node, sequence_flow) = children[0] default_outgoing = sequence_flow.get('id') - for (c, target_node, sequence_flow) in children: + for (position, c, target_node, sequence_flow) in children: self.connect_outgoing( c, target_node, sequence_flow, sequence_flow.get('id') == default_outgoing) @@ -153,7 +278,8 @@ def create_task(self): """ return self.spec_class(self.spec, self.get_task_spec_name(), lane=self.get_lane(), - description=self.node.get('name', None)) + description=self.node.get('name', None), + position=self.process_parser.get_coord(self.get_id())) def connect_outgoing(self, outgoing_task, outgoing_task_node, sequence_flow_node, is_default): diff --git a/SpiffWorkflow/bpmn/parser/task_parsers.py b/SpiffWorkflow/bpmn/parser/task_parsers.py index 2ed650b3a..e162308ec 100644 --- a/SpiffWorkflow/bpmn/parser/task_parsers.py +++ b/SpiffWorkflow/bpmn/parser/task_parsers.py @@ -19,11 +19,17 @@ from .ValidationException import ValidationException from .TaskParser import TaskParser +from ..workflow import BpmnWorkflow from .util import first, one from ..specs.event_definitions import (TimerEventDefinition, - MessageEventDefinition) -import xml.etree.ElementTree as ET - + MessageEventDefinition, + SignalEventDefinition, + CancelEventDefinition) +from lxml import etree +import copy +from SpiffWorkflow.exceptions import WorkflowException +from SpiffWorkflow.bpmn.specs.IntermediateCatchEvent import IntermediateCatchEvent +CAMUNDA_MODEL_NS = 'http://camunda.org/schema/1.0/bpmn' class StartEventParser(TaskParser): @@ -32,6 +38,21 @@ class StartEventParser(TaskParser): """ def create_task(self): + + isMessageCatchingEvent = self.xpath('.//bpmn:messageEventDefinition') + isSignalCatchingEvent = self.xpath('.//bpmn:signalEventDefinition') + isCancelCatchingEvent = self.xpath('.//bpmn:cancelEventDefinition') + if (len(isMessageCatchingEvent) > 0)\ + or (len(isSignalCatchingEvent) > 0)\ + or (len(isCancelCatchingEvent) > 0): + # we need to fix this up to wait on an event + + self.__class__ = type(self.get_id() + '_class', ( + self.__class__, IntermediateCatchEventParser), {}) + self.spec_class = IntermediateCatchEvent + t = IntermediateCatchEventParser.create_task(self) + self.spec.start.connect(t) + return t t = super(StartEventParser, self).create_task() self.spec.start.connect(t) return t @@ -50,6 +71,11 @@ def create_task(self): terminateEventDefinition = self.xpath( './/bpmn:terminateEventDefinition') + if terminateEventDefinition: + terminateEventDefinition = True # here it is just assigning the lxml object, I couldn't see where it was + # ever using this other than just a boolean + else: + terminateEventDefinition = False task = self.spec_class(self.spec, self.get_task_spec_name(), is_terminate_event=terminateEventDefinition, description=self.node.get('name', None)) @@ -84,6 +110,9 @@ class NoneTaskParser(UserTaskParser): pass + + + class ExclusiveGatewayParser(TaskParser): """ Parses an Exclusive Gateway, setting up the outgoing conditions @@ -164,6 +193,66 @@ def get_subprocess_parser(self): return self.parser.get_process_parser(calledElement) +class SubWorkflowParser(CallActivityParser): + + """ + Base class for parsing unspecified Tasks. Currently assumes that such Tasks + should be treated the same way as User Tasks. + """ + def create_task(self): + wf_spec = self.get_subprocess_parser() + return self.spec_class( + self.spec, self.get_task_spec_name(), bpmn_wf_spec=wf_spec, + bpmn_wf_class=self.parser.WORKFLOW_CLASS, + description=self.node.get('name', None)) + + + def get_subprocess_parser(self): + thisTask = self.process_xpath('.//*[@id="%s"]'% self.get_id())[0] + workflowStartEvent = self.process_xpath('.//*[@id="%s"]/bpmn:startEvent' % self.get_id()) + workflowEndEvent = self.process_xpath('.//*[@id="%s"]/bpmn:endEvent' % self.get_id()) + if len(workflowStartEvent) != 1: + raise ValidationException( + 'Multiple Start points are not allowed in SubWorkflow Task', + node=self.node, + filename=self.process_parser.filename) + if len(workflowEndEvent) != 1: + raise ValidationException( + 'Multiple End points are not allowed in SubWorkflow Task', + node=self.node, + filename=self.process_parser.filename) + thisTaskCopy = copy.deepcopy(thisTask) + definitions = {'bpmn':"http://www.omg.org/spec/BPMN/20100524/MODEL", + 'bpmndi':"http://www.omg.org/spec/BPMN/20100524/DI", + 'dc':"http://www.omg.org/spec/DD/20100524/DC", + 'camunda':"http://camunda.org/schema/1.0/bpmn", + 'di':"http://www.omg.org/spec/DD/20100524/DI"} + # Create wrapper xml for the subworkflow + for ns in definitions.keys(): + etree.register_namespace(ns,definitions[ns]) + #root = etree.Element('bpmn:definitions') + root = etree.Element('{'+definitions['bpmn']+'}definitions') + + # Change the subProcess into a new bpmn:process & change the ID + thisTaskCopy.tag='{'+definitions['bpmn']+'}process' + thisTaskCopy.set('id',thisTaskCopy.get('id')+"_process") + thisTaskCopy.set('isExecutable','true') + #inject the subWorkflow process into the header + root.append(thisTaskCopy) + # we have to put xml into our taskspec because + # the actual workflow spec will not serialize to + # json, but the XML is just a string + + xml = etree.tostring(root).decode('ascii') + workflow_name = thisTaskCopy.get('id') + + self.parser.add_bpmn_xml(etree.fromstring(xml)) + wf_spec = self.parser.get_spec(workflow_name) + wf_spec.file = self.process_parser.filename + return wf_spec + + + class ScriptTaskParser(TaskParser): """ Parses a script task @@ -172,6 +261,7 @@ class ScriptTaskParser(TaskParser): def create_task(self): script = self.get_script() return self.spec_class(self.spec, self.get_task_spec_name(), script, + lane=self.get_lane(), description=self.node.get('name', None)) def get_script(self): @@ -185,7 +275,7 @@ def get_script(self): class IntermediateCatchEventParser(TaskParser): """ - Parses an Intermediate Catch Event. This currently onlt supports Message + Parses an Intermediate Catch Event. This currently only supports Message and Timer event definitions. """ @@ -193,6 +283,7 @@ def create_task(self): event_definition = self.get_event_definition() return self.spec_class( self.spec, self.get_task_spec_name(), event_definition, + lane = self.get_lane(), description=self.node.get('name', None)) def get_event_definition(self): @@ -204,23 +295,74 @@ def get_event_definition(self): if messageEventDefinition is not None: return self.get_message_event_definition(messageEventDefinition) + signalEventDefinition = first( + self.xpath('.//bpmn:signalEventDefinition')) + if signalEventDefinition is not None: + return self.get_signal_event_definition(signalEventDefinition) + + cancelEventDefinition = first( + self.xpath('.//bpmn:cancelEventDefinition')) + if cancelEventDefinition is not None: + return self.get_cancel_event_definition(cancelEventDefinition) + timerEventDefinition = first( self.xpath('.//bpmn:timerEventDefinition')) if timerEventDefinition is not None: return self.get_timer_event_definition(timerEventDefinition) - raise NotImplementedError( - 'Unsupported Intermediate Catch Event: %r', ET.tostring(self.node)) + raise NotImplementedError( + 'Unsupported Intermediate Catch Event: %r', etree.tostring(self.node)) def get_message_event_definition(self, messageEventDefinition): """ Parse the messageEventDefinition node and return an instance of MessageEventDefinition """ + # we have two different modelers that handle messages + # in different ways. + # first the Signavio : messageRef = first(self.xpath('.//bpmn:messageRef')) - message = messageRef.get( - 'name') if messageRef is not None else self.node.get('name') - return MessageEventDefinition(message) + if messageRef is not None: + message = messageRef.get('name') + elif messageEventDefinition is not None: + message = messageEventDefinition.get('messageRef') + if message is None: + message = self.node.get('name') + return MessageEventDefinition(message,name=self.process_parser.message_lookup.get(message,'')) + + def get_signal_event_definition(self, signalEventDefinition): + """ + Parse the messageEventDefinition node and return an instance of + MessageEventDefinition + """ + # we have two different modelers that handle messages + # in different ways. + # first the Signavio : + signalRef = first(self.xpath('.//bpmn:signalRef')) + if signalRef is not None: + message = signalRef.get('name') + elif signalEventDefinition is not None: + message = signalEventDefinition.get('signalRef') + if message is None: + message = self.node.get('name') + return SignalEventDefinition(message,name=self.process_parser.message_lookup.get(message,'')) + + def get_cancel_event_definition(self, cancelEventDefinition): + """ + Parse the messageEventDefinition node and return an instance of + MessageEventDefinition + """ + # we have two different modelers that handle messages + # in different ways. + # first the Signavio : + cancelRef = first(self.xpath('.//bpmn:cancelRef')) + if cancelRef is not None: + message = cancelRef.get('name') + elif cancelEventDefinition is not None: + message = cancelEventDefinition.get('cancelRef') + if message is None: + message = self.node.get('name') + return CancelEventDefinition(message,name=self.process_parser.message_lookup.get(message,'')) def get_timer_event_definition(self, timerEventDefinition): """ @@ -229,12 +371,128 @@ def get_timer_event_definition(self, timerEventDefinition): This currently only supports the timeDate node for specifying an expiry time for the timer. + + ============================= + WIP: add other definitions such as timeDuration and timeCycle + for both timeDuration and timeCycle - from when?? certainly not from the time + we parse the document, so how do we define total time inside a process or subprocess? + + + Furthermore . . . + + do we add a start time for any process that we are working on, i.e. do I need to add a begin time for a + subprocess to use a timer boundary event? or do we start the timer on entry? + + What about a process that has a duration of 5 days and no one actually touches the workflow for 7 days? How + does this get triggered when no one is using the workflow on a day-to-day basis - do we need a cron job to go + discover waiting tasks and fire them if no one is doing anything? + """ timeDate = first(self.xpath('.//bpmn:timeDate')) - return TimerEventDefinition( - self.node.get('name', timeDate.text), - self.parser.parse_condition( - timeDate.text, None, None, None, None, self)) + + if timeDate is not None: + return TimerEventDefinition( + self.node.get('name'), + timeDate.text) +# self.parser.parse_condition( +# timeDate.text, None, None, None, None, self)) + # in the case that it is a duration + timeDuration = first(self.xpath('.//bpmn:timeDuration')) + if timeDuration is not None: + return TimerEventDefinition( + self.node.get('name'), + timeDuration.text) +# self.parser.parse_condition( +# timeDuration.text, None, None, None, None, self)) + + # in the case that it is a cycle - for now, it is an error + timeCycle = first(self.xpath('.//bpmn:timeCycle')) + if timeCycle is not None: + raise NotImplementedError('Cycle Time Definition is not currently supported.') + return TimerEventDefinition( + self.node.get('name'),timeCycle.text) +# self.parser.parse_condition( +# timeCycle.text, None, None, None, None, self)) + raise ValidationException("Unknown Time Specification", + node=self.node, + filename=self.process_parser.filename) + +class IntermediateThrowEventParser(TaskParser): + """ + Parses an Intermediate Catch Event. This currently onlt supports Message + and Timer event definitions. + """ + + def create_task(self): + event_definition = self.get_event_definition() + return self.spec_class( + self.spec, self.get_task_spec_name(), event_definition, + lane=self.get_lane(), + description=self.node.get('name', None)) + + def get_event_definition(self): + """ + Parse the event definition node, and return an instance of Event + """ + messageEventDefinition = first( + self.xpath('.//bpmn:messageEventDefinition')) + if messageEventDefinition is not None: + return self.get_message_event_definition(messageEventDefinition) + + signalEventDefinition = first( + self.xpath('.//bpmn:signalEventDefinition')) + if signalEventDefinition is not None: + return self.get_signal_event_definition(signalEventDefinition) + + cancelEventDefinition = first( + self.xpath('.//bpmn:cancelEventDefinition')) + if cancelEventDefinition is not None: + return self.get_cancel_event_definition(cancelEventDefinition) + + raise NotImplementedError( + 'Unsupported Intermediate Catch Event: %r', etree.tostring(self.node)) + + def get_message_event_definition(self, messageEventDefinition): + """ + Parse the messageEventDefinition node and return an instance of + MessageEventDefinition + """ + #messageRef = first(self.xpath('.//bpmn:messageEventDefinition')) + name = self.node.get('name') + message = messageEventDefinition.get( + 'messageRef') if messageEventDefinition is not None else name + + payload = messageEventDefinition.attrib.get('{' + CAMUNDA_MODEL_NS + '}expression') + resultVar = messageEventDefinition.attrib.get('{' + CAMUNDA_MODEL_NS + '}resultVariable') + + return MessageEventDefinition(message,payload,resultVar=resultVar) + + + def get_signal_event_definition(self, signalEventDefinition): + """ + Parse the signalEventDefinition node and return an instance of + SignalEventDefinition + """ + + message = signalEventDefinition.get( + 'signalRef') if signalEventDefinition is not None else self.node.get('name') + # camunda doesn't have payload for signals evidently + #payload = signalEventDefinition.attrib.get('{'+ CAMUNDA_MODEL_NS +'}expression') + return SignalEventDefinition(message) + + def get_cancel_event_definition(self, cancelEventDefinition): + """ + Parse the cancelEventDefinition node and return an instance of + cancelEventDefinition + """ + + message = cancelEventDefinition.get( + 'cancelRef') if cancelEventDefinition is not None else self.node.get('name') + # camunda doesn't have payload for cancels evidently + #payload = cancelEventDefinition.attrib.get('{'+ CAMUNDA_MODEL_NS +'}expression') + return CancelEventDefinition(message) + + class BoundaryEventParser(IntermediateCatchEventParser): diff --git a/SpiffWorkflow/bpmn/parser/util.py b/SpiffWorkflow/bpmn/parser/util.py index 3a08b143b..6909dcbc7 100644 --- a/SpiffWorkflow/bpmn/parser/util.py +++ b/SpiffWorkflow/bpmn/parser/util.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- from __future__ import division + # Copyright (C) 2012 Matthew Hampton # # This library is free software; you can redistribute it and/or @@ -19,6 +20,8 @@ BPMN_MODEL_NS = 'http://www.omg.org/spec/BPMN/20100524/MODEL' +DIAG_INTERCHANGE_NS = "http://www.omg.org/spec/BPMN/20100524/DI" +DIAG_COMMON_NS = "http://www.omg.org/spec/DD/20100524/DC" def one(nodes, or_none=False): @@ -47,7 +50,9 @@ def xpath_eval(node, extra_ns=None): Returns an XPathEvaluator, with namespace prefixes 'bpmn' for http://www.omg.org/spec/BPMN/20100524/MODEL, and additional specified ones """ - namespaces = {'bpmn': BPMN_MODEL_NS} + namespaces = {'bpmn': BPMN_MODEL_NS, + 'dc': DIAG_COMMON_NS, + 'bpmndi': DIAG_INTERCHANGE_NS} if extra_ns: namespaces.update(extra_ns) return lambda path: node.findall(path, namespaces) diff --git a/SpiffWorkflow/bpmn/serializer/BpmnSerializer.py b/SpiffWorkflow/bpmn/serializer/BpmnSerializer.py index 25a30ed10..202e7fce0 100644 --- a/SpiffWorkflow/bpmn/serializer/BpmnSerializer.py +++ b/SpiffWorkflow/bpmn/serializer/BpmnSerializer.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- from __future__ import division, absolute_import -# Copyright (C) 2012 Matthew Hampton +# Copyright (C) 2020 Matthew Hampton, Dan Funk # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public @@ -19,15 +19,19 @@ import configparser from io import BytesIO, TextIOWrapper -import xml.etree.ElementTree as ET +from lxml import etree import zipfile import os -from ...serializer.base import Serializer +from json import loads + +from SpiffWorkflow.bpmn.specs.CallActivity import CallActivity +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from SpiffWorkflow.serializer import json as spiff_json from ..parser.BpmnParser import BpmnParser from .Packager import Packager -class BpmnSerializer(Serializer): +class BpmnSerializer(spiff_json.JSONSerializer): """ The BpmnSerializer class provides support for deserializing a Bpmn Workflow Spec from a BPMN package. The BPMN package must have been created using the @@ -35,22 +39,68 @@ class BpmnSerializer(Serializer): It will also use the appropriate subclass of BpmnParser, if one is included in the metadata.ini file. + + It can further serialize and deserialize a running workflow into a json + data structure. The json structure will not include the Spec, this must + be passed as an argument when deserializing. """ - def serialize_workflow_spec(self, wf_spec, **kwargs): - raise NotImplementedError( - "The BpmnSerializer class cannot be used to serialize. " - "BPMN authoring should be done using a supported editor.") def serialize_workflow(self, workflow, **kwargs): - raise NotImplementedError( - "The BPMN standard does not provide a specification for " - "serializing a running workflow.") - - def deserialize_workflow(self, s_state, **kwargs): - raise NotImplementedError( - "The BPMN standard does not provide a specification for " - "serializing a running workflow.") + """ + Serializes the workflow data and task tree, but not the specification + That must be passed in when deserializing the data structure. + """ + assert isinstance(workflow, BpmnWorkflow) + include_spec = kwargs.get('include_spec',True) + return super().serialize_workflow(workflow, include_spec=include_spec) + + def serialize_task(self, task, skip_children=False, **kwargs): + return super().serialize_task(task, + skip_children=skip_children, + allow_subs=True) + + def deserialize_workflow(self, s_state, workflow_spec=None, + read_only=False, **kwargs): + + return super().deserialize_workflow(s_state, + wf_class=BpmnWorkflow, + wf_spec=workflow_spec, + read_only=read_only, + **kwargs) + + def _deserialize_task_children(self, task, s_state): + """Reverses the internal process that will merge children from a + sub-workflow in the top level workflow. This copies the states + back into the sub-workflow after generating it from the base spec""" + if not isinstance(task.task_spec, CallActivity): + return super()._deserialize_task_children(task, s_state) + else: + + sub_workflow = task.task_spec.create_sub_workflow(task) + children = [] + for c in s_state['children']: + if sub_workflow.get_tasks_from_spec_name(c['task_spec']): + start_task = self.deserialize_task(sub_workflow, c) + children.append(start_task) + start_task.parent = task.id + sub_workflow.task_tree = start_task + # get a list of tasks in reverse order of change + # our last task should be on the top. + tasks = sub_workflow.get_tasks(task.COMPLETED) + tasks.sort(key=lambda x: x.last_state_change,reverse=True) + if len(tasks)>0: + last_task = tasks[0] + sub_workflow.last_task = last_task + else: + resume_task = self.deserialize_task(task.workflow, c) + resume_task.parent = task.id + children.append(resume_task) + return children + + def deserialize_task(self, workflow, s_state): + assert isinstance(workflow, BpmnWorkflow) + return super().deserialize_task(workflow, s_state) def deserialize_workflow_spec(self, s_state, filename=None): """ @@ -59,7 +109,11 @@ def deserialize_workflow_spec(self, s_state, filename=None): :param filename: the name of the package file. """ - if isinstance(s_state, (str, bytes)): + if isinstance(s_state,dict): + return super().deserialize_workflow_spec(s_state) + if isinstance(s_state,str): + return super().deserialize_workflow_spec(s_state) + if isinstance(s_state, bytes): s_state = BytesIO(s_state) package_zip = zipfile.ZipFile( @@ -73,14 +127,8 @@ def deserialize_workflow_spec(self, s_state, filename=None): ini_fp.close() parser_class = BpmnParser - - try: - parser_class_module = config.get( - 'MetaData', 'parser_class_module', fallback=None) - except TypeError: - # unfortunately the fallback= does not exist on python 2 - parser_class_module = config.get( - 'MetaData', 'parser_class_module', None) + parser_class_module = config.get( + 'MetaData', 'parser_class_module', fallback=None) if parser_class_module: mod = __import__(parser_class_module, fromlist=[ @@ -101,12 +149,12 @@ def deserialize_workflow_spec(self, s_state, filename=None): bpmn_fp = package_zip.open(info) try: - bpmn = ET.parse(bpmn_fp) + bpmn = etree.parse(bpmn_fp) finally: bpmn_fp.close() parser.add_bpmn_xml( bpmn, svg=svg, filename='%s:%s' % (filename, info.filename)) - - return parser.get_spec(config.get('MetaData', 'entry_point_process')) + spec_name = config.get('MetaData', 'entry_point_process') + return parser.get_spec(spec_name) diff --git a/SpiffWorkflow/bpmn/serializer/Packager.py b/SpiffWorkflow/bpmn/serializer/Packager.py index 54e2f0fd6..952cf2b13 100644 --- a/SpiffWorkflow/bpmn/serializer/Packager.py +++ b/SpiffWorkflow/bpmn/serializer/Packager.py @@ -23,14 +23,13 @@ import glob import hashlib import inspect -import xml.etree.ElementTree as ET import zipfile from io import StringIO from optparse import OptionParser, OptionGroup from ..parser.BpmnParser import BpmnParser from ..parser.ValidationException import ValidationException from ..parser.util import xpath_eval, one - +from lxml import etree SIGNAVIO_NS = 'http://www.signavio.com' CONFIG_SECTION_NAME = "Packager Options" @@ -139,7 +138,7 @@ def create_package(self): # Parse all of the XML: self.bpmn = {} for filename in self.input_files: - bpmn = ET.parse(filename) + bpmn = etree.parse(filename) self.bpmn[os.path.abspath(filename)] = bpmn # Now run through pre-parsing and validation: @@ -150,7 +149,9 @@ def create_package(self): # Now check that we can parse it fine: for filename, bpmn in list(self.bpmn.items()): self.parser.add_bpmn_xml(bpmn, filename=filename) - + # at this point, we have a item in self.wf_spec.get_specs_depth_first() + # that has a filename of None and a bpmn that needs to be added to the + # list below in for spec. self.wf_spec = self.parser.get_spec(self.entry_point_process) # Now package everything: @@ -158,16 +159,23 @@ def create_package(self): self.package_file, "w", compression=zipfile.ZIP_DEFLATED) done_files = set() + for spec in self.wf_spec.get_specs_depth_first(): filename = spec.file + if filename is None: + # This is for when we are doing a subworkflow, and it + # creates something in the bpmn spec list, but it really has + # no file. In this case, it is safe to skip the add to the + # zip file. + continue if filename not in done_files: done_files.add(filename) bpmn = self.bpmn[os.path.abspath(filename)] self.write_to_package_zip( - "%s.bpmn" % spec.name, ET.tostring(bpmn.getroot())) + "%s.bpmn" % spec.name, etree.tostring(bpmn.getroot())) - self.write_file_to_package_zip( + self.write_to_package_zip( "src/" + self._get_zip_path(filename), filename) self._call_editor_hook('package_for_editor', spec, filename) @@ -298,7 +306,7 @@ def _fix_call_activities_signavio(self, bpmn, filename): for b in list(self.bpmn.values()): for p in xpath_eval(b)(".//bpmn:process"): if (p.get('name', p.get('id', None)) == - subprocess_reference): + subprocess_reference): matches.append(p) if not matches: raise ValidationException( @@ -330,7 +338,7 @@ def package_for_editor_signavio(self, spec, filename): f = open(signavio_file, 'r') try: - signavio_tree = ET.parse(f) + signavio_tree = etree.parse(f) finally: f.close() svg_node = one(signavio_tree.findall('.//svg-representation')) diff --git a/SpiffWorkflow/bpmn/specs/BoundaryEvent.py b/SpiffWorkflow/bpmn/specs/BoundaryEvent.py index de4d17db0..985a7a757 100644 --- a/SpiffWorkflow/bpmn/specs/BoundaryEvent.py +++ b/SpiffWorkflow/bpmn/specs/BoundaryEvent.py @@ -61,6 +61,12 @@ def _should_cancel(self, task_spec): return (issubclass(task_spec.__class__, BoundaryEvent) and task_spec._cancel_activity) + def serialize(self, serializer): + return serializer.serialize_boundary_event_parent(self) + @classmethod + def deserialize(self, serializer, wf_spec, s_state): + return serializer.deserialize_boundary_event_parent(wf_spec, s_state, _BoundaryEventParent) + class BoundaryEvent(IntermediateCatchEvent): """ @@ -77,3 +83,9 @@ def __init__(self, wf_spec, name, cancel_activity=None, super(BoundaryEvent, self).__init__( wf_spec, name, event_definition=event_definition, **kwargs) self._cancel_activity = cancel_activity + + def serialize(self, serializer): + return serializer.serialize_boundary_event(self) + @classmethod + def deserialize(self, serializer, wf_spec, s_state): + return serializer.deserialize_boundary_event(wf_spec, s_state, BoundaryEvent) diff --git a/SpiffWorkflow/bpmn/specs/BpmnProcessSpec.py b/SpiffWorkflow/bpmn/specs/BpmnProcessSpec.py index 1f833ba92..dd694d4af 100644 --- a/SpiffWorkflow/bpmn/specs/BpmnProcessSpec.py +++ b/SpiffWorkflow/bpmn/specs/BpmnProcessSpec.py @@ -21,8 +21,7 @@ from .UnstructuredJoin import UnstructuredJoin from ...specs.Simple import Simple from ...specs.WorkflowSpec import WorkflowSpec -import xml.etree.ElementTree as ET - +import lxml.etree as ET LOG = logging.getLogger(__name__) @@ -62,6 +61,13 @@ def _on_complete_hook(self, my_task): super(_EndJoin, self)._on_complete_hook(my_task) my_task.workflow.data.update(my_task.data) + def serialize(self, serializer): + return serializer.serialize_join(self) + + @classmethod + def deserialize(self, serializer, wf_spec, s_state): + return serializer.deserialize_join(wf_spec, s_state, _EndJoin) + class BpmnProcessSpec(WorkflowSpec): """ diff --git a/SpiffWorkflow/bpmn/specs/BpmnSpecMixin.py b/SpiffWorkflow/bpmn/specs/BpmnSpecMixin.py index 9f7ab0a65..a9913dfcf 100644 --- a/SpiffWorkflow/bpmn/specs/BpmnSpecMixin.py +++ b/SpiffWorkflow/bpmn/specs/BpmnSpecMixin.py @@ -49,6 +49,12 @@ def __init__(self, id, name, documentation, target_task_spec): self.documentation = documentation self.target_task_spec = target_task_spec + def serialize(self): + return {'id':self.id, + 'name':self.name, + 'documentation':self.documentation, + 'target_task_spec':self.target_task_spec.id} + class BpmnSpecMixin(TaskSpec): """ @@ -66,9 +72,16 @@ def __init__(self, wf_spec, name, lane=None, **kwargs): super(BpmnSpecMixin, self).__init__(wf_spec, name, **kwargs) self.outgoing_sequence_flows = {} self.outgoing_sequence_flows_by_id = {} + self.loopTask = False self.lane = lane self.documentation = None + def is_loop_task(self): + """ + Returns true if this task is a BPMN looping task + """ + return self.loopTask + def connect_outgoing(self, taskspec, sequence_flow_id, sequence_flow_name, documentation): """ @@ -178,8 +191,6 @@ def entering_cancelled_state(self, my_task): """ pass - # - def _on_complete_hook(self, my_task): super(BpmnSpecMixin, self)._on_complete_hook(my_task) if isinstance(my_task.parent.task_spec, BpmnSpecMixin): diff --git a/SpiffWorkflow/bpmn/specs/CallActivity.py b/SpiffWorkflow/bpmn/specs/CallActivity.py index 9b2e5476c..3cec5176c 100644 --- a/SpiffWorkflow/bpmn/specs/CallActivity.py +++ b/SpiffWorkflow/bpmn/specs/CallActivity.py @@ -16,6 +16,7 @@ # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301 USA +from SpiffWorkflow import Task from .BpmnSpecMixin import BpmnSpecMixin from ...specs.SubWorkflow import SubWorkflow @@ -39,17 +40,24 @@ def __init__(self, wf_spec, name, bpmn_wf_spec=None, bpmn_wf_class=None, super(CallActivity, self).__init__(wf_spec, name, None, **kwargs) self.spec = bpmn_wf_spec self.wf_class = bpmn_wf_class + self.sub_workflow = None def test(self): TaskSpec.test(self) - def _create_subworkflow(self, my_task): - return self.get_workflow_class()( + def create_sub_workflow(self, my_task): + + sub_workflow = self.get_workflow_class()( self.spec, name=self.name, read_only=my_task.workflow.read_only, script_engine=my_task.workflow.outer_workflow.script_engine, parent=my_task.workflow) + sub_workflow.completed_event.connect( + self._on_subworkflow_completed, my_task) + sub_workflow.data = my_task.workflow.data + return sub_workflow + def get_workflow_class(self): """ Returns the workflow class to instantiate for the sub workflow @@ -61,3 +69,26 @@ def _on_subworkflow_completed(self, subworkflow, my_task): subworkflow, my_task) if isinstance(my_task.parent.task_spec, BpmnSpecMixin): my_task.parent.task_spec._child_complete_hook(my_task) + + def _on_ready_before_hook(self, my_task): + self.sub_workflow = self.create_sub_workflow(my_task) + self._integrate_subworkflow_tree(my_task, self.sub_workflow) + + def _on_ready_hook(self, my_task): + # Assign variables, if so requested. + for child in self.sub_workflow.task_tree.children: + for assignment in self.in_assign: + assignment.assign(my_task, child) + + self._predict(my_task) + for child in self.sub_workflow.task_tree.children: + child.task_spec._update(child) + + def serialize(self, serializer): + return serializer.serialize_call_activity(self) + @classmethod + def deserialize(self, serializer, wf_spec, s_state): + return serializer.deserialize_call_activity(wf_spec, s_state, CallActivity) + + + diff --git a/SpiffWorkflow/bpmn/specs/EndEvent.py b/SpiffWorkflow/bpmn/specs/EndEvent.py index b72bdda5b..2f98846e7 100644 --- a/SpiffWorkflow/bpmn/specs/EndEvent.py +++ b/SpiffWorkflow/bpmn/specs/EndEvent.py @@ -71,3 +71,11 @@ def _on_complete_hook(self, my_task): my_task.workflow.refresh_waiting_tasks() super(EndEvent, self)._on_complete_hook(my_task) + + + def serialize(self, serializer): + return serializer.serialize_end_event(self) + + @classmethod + def deserialize(self, serializer, wf_spec, s_state): + return serializer.deserialize_end_event(wf_spec, s_state, EndEvent) diff --git a/SpiffWorkflow/bpmn/specs/ExclusiveGateway.py b/SpiffWorkflow/bpmn/specs/ExclusiveGateway.py index 17e06d106..f93bad3cd 100644 --- a/SpiffWorkflow/bpmn/specs/ExclusiveGateway.py +++ b/SpiffWorkflow/bpmn/specs/ExclusiveGateway.py @@ -50,3 +50,11 @@ def test(self): continue if self.default_task_spec is None: raise WorkflowException(self, 'A default output is required.') + + def serialize(self, serializer): + return serializer.serialize_exclusive_gateway(self) + + + @classmethod + def deserialize(self, serializer, wf_spec, s_state): + return serializer.deserialize_exclusive_gateway(wf_spec, s_state) diff --git a/SpiffWorkflow/bpmn/specs/InclusiveGateway.py b/SpiffWorkflow/bpmn/specs/InclusiveGateway.py index 7cf83c4bc..ecc388b47 100644 --- a/SpiffWorkflow/bpmn/specs/InclusiveGateway.py +++ b/SpiffWorkflow/bpmn/specs/InclusiveGateway.py @@ -116,3 +116,7 @@ def _has_directed_path_to(self, task, task_spec, done.add(child) q.append(child) return False + + @classmethod + def deserialize(self, serializer, wf_spec, s_state): + return serializer.deserialize_generic(wf_spec, s_state, InclusiveGateway) diff --git a/SpiffWorkflow/bpmn/specs/IntermediateCatchEvent.py b/SpiffWorkflow/bpmn/specs/IntermediateCatchEvent.py index d1e354154..3b8073470 100644 --- a/SpiffWorkflow/bpmn/specs/IntermediateCatchEvent.py +++ b/SpiffWorkflow/bpmn/specs/IntermediateCatchEvent.py @@ -20,7 +20,8 @@ from ...task import Task from .BpmnSpecMixin import BpmnSpecMixin from ...specs.Simple import Simple - +from SpiffWorkflow.bpmn.specs.StartEvent import StartEvent +from SpiffWorkflow.specs.StartTask import StartTask class IntermediateCatchEvent(Simple, BpmnSpecMixin): @@ -39,7 +40,18 @@ def __init__(self, wf_spec, name, event_definition=None, **kwargs): def _update_hook(self, my_task): target_state = getattr(my_task, '_bpmn_load_target_state', None) - if target_state == Task.READY or ( + message = self.event_definition._message_ready(my_task) + if message: + if message[1] != None: + resultVar = message[1] + else: + resultVar = my_task.task_spec.name + '_Response' + my_task.data[resultVar] = message[0] + # this next line actually matters for some start events. + my_task.children = [] + my_task._sync_children(my_task.task_spec.outputs) + super(IntermediateCatchEvent, self)._update_hook(my_task) + elif target_state == Task.READY or ( not my_task.workflow._is_busy_with_restore() and self.event_definition.has_fired(my_task)): super(IntermediateCatchEvent, self)._update_hook(my_task) @@ -54,9 +66,24 @@ def _update_hook(self, my_task): def _on_ready_hook(self, my_task): self._predict(my_task) + def _on_complete_hook(self, my_task): + super(IntermediateCatchEvent, self)._on_complete_hook(my_task) + if isinstance(my_task.parent.task_spec, StartTask): + my_task._set_state(Task.WAITING) + + def accept_message(self, my_task, message): if (my_task.state == Task.WAITING and self.event_definition._accept_message(my_task, message)): self._update(my_task) return True return False + + def serialize(self, serializer): + return serializer.serialize_generic_event(self) + + @classmethod + def deserialize(self, serializer, wf_spec, s_state): + return serializer.deserialize_generic_event(wf_spec, s_state,IntermediateCatchEvent) + + diff --git a/SpiffWorkflow/bpmn/specs/IntermediateThrowEvent.py b/SpiffWorkflow/bpmn/specs/IntermediateThrowEvent.py new file mode 100644 index 000000000..18d426f64 --- /dev/null +++ b/SpiffWorkflow/bpmn/specs/IntermediateThrowEvent.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +from __future__ import division +# Copyright (C) 2012 Matthew Hampton +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301 USA + +from ...task import Task +from .BpmnSpecMixin import BpmnSpecMixin +from ...specs.Simple import Simple + + +class IntermediateThrowEvent(Simple, BpmnSpecMixin): + + """ + Task Spec for a bpmn:intermediateCatchEvent node. + """ + + def __init__(self, wf_spec, name, event_definition=None, **kwargs): + """ + Constructor. + + :param event_definition: the EventDefinition that we must wait for. + """ + super(IntermediateThrowEvent, self).__init__(wf_spec, name, **kwargs) + self.event_definition = event_definition + self.name = name + + def _update_hook(self, my_task): + target_state = getattr(my_task, '_bpmn_load_target_state', None) + if target_state == Task.READY or ( + not my_task.workflow._is_busy_with_restore() and + self.event_definition.has_fired(my_task)): + super(IntermediateThrowEvent, self)._update_hook(my_task) + else: + if not my_task.parent._is_finished(): + return + # here we diverge from the previous + # and we just send the message + if hasattr(self.event_definition,'resultVar'): + self.event_definition._send_message(my_task, self.event_definition.resultVar) + else: + self.event_definition._send_message(my_task) + # if we throw the message, then we need to be completed. + if not my_task.state == Task.READY: + my_task._set_state(Task.READY) + + def _on_ready_hook(self, my_task): + self._predict(my_task) + + def serialize(self, serializer): + return serializer.serialize_generic_event(self) + + @classmethod + def deserialize(self, serializer, wf_spec, s_state): + return serializer.deserialize_generic_event(wf_spec, s_state,IntermediateThrowEvent) + + + diff --git a/SpiffWorkflow/bpmn/specs/ManualTask.py b/SpiffWorkflow/bpmn/specs/ManualTask.py index a80e89b0d..ab634475f 100644 --- a/SpiffWorkflow/bpmn/specs/ManualTask.py +++ b/SpiffWorkflow/bpmn/specs/ManualTask.py @@ -16,13 +16,19 @@ # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301 USA +from SpiffWorkflow.bpmn.specs.BpmnSpecMixin import BpmnSpecMixin + +from SpiffWorkflow.specs import Simple from .UserTask import UserTask -class ManualTask(UserTask): +class ManualTask(Simple, BpmnSpecMixin): + + @classmethod + def deserialize(self, serializer, wf_spec, s_state): + return serializer.deserialize_generic(wf_spec, s_state, ManualTask) + - """ - Task Spec for a bpmn:manualTask node. - """ - pass + def is_engine_task(self): + return False diff --git a/SpiffWorkflow/bpmn/specs/MultiInstanceTask.py b/SpiffWorkflow/bpmn/specs/MultiInstanceTask.py new file mode 100644 index 000000000..922b4b04b --- /dev/null +++ b/SpiffWorkflow/bpmn/specs/MultiInstanceTask.py @@ -0,0 +1,565 @@ +# -*- coding: utf-8 -*- +from __future__ import division, absolute_import + +import copy +import logging +import random +import string +from builtins import range +from uuid import uuid4 +import re +from .ParallelGateway import ParallelGateway +from .ScriptTask import ScriptTask +from ...dmn.specs.BusinessRuleTask import BusinessRuleTask +from ...exceptions import WorkflowException, WorkflowTaskExecException +from ...operators import valueof, is_number +from ...specs.base import TaskSpec +from ...util.impl import get_class +# Copyright (C) 2020 Sartography +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301 USA +from ...task import Task +from ...util.deep_merge import DeepMerge + +LOG = logging.getLogger(__name__) + + +def gendict(path, d): + if len(path) == 0: + return d + else: + return gendict(path[:-1], {path[-1]: d}) + + + +class MultiInstanceTask(TaskSpec): + """ + When executed, this task performs a split on the current task. + The number of outgoing tasks depends on the runtime value of a + specified data field. + If more than one input is connected, the task performs an implicit + multi merge. + + This task has one or more inputs and may have any number of outputs. + """ + + def __init__(self, wf_spec, name, times, **kwargs): + """ + Constructor. + + :type wf_spec: WorkflowSpec + :param wf_spec: A reference to the workflow specification. + :type name: str + :param name: The name of the task spec. + :type times: int or :class:`SpiffWorkflow.operators.Term` + :param times: The number of tasks to create. + :type kwargs: dict + :param kwargs: See :class:`SpiffWorkflow.specs.TaskSpec`. + """ + if times is None: + raise ValueError('times argument is required') + self.times = times + self.elementVar = None + self.collection = None + self.expanded = 1 # this code never gets run + TaskSpec.__init__(self, wf_spec, name, **kwargs) + + def _find_my_task(self, task): + for thetask in task.workflow.task_tree: + if thetask.thread_id != task.thread_id: + continue + if thetask.task_spec == self: + return thetask + return None + + def _on_trigger(self, task_spec): + """ + May be called after execute() was already completed to create an + additional outbound task. + """ + + # Find a Task for this TaksSpec. + + my_task = self._find_my_task(task_spec) + LOG.debug(my_task.get_name() + 'trigger') + if my_task._has_state(Task.COMPLETED): + state = Task.READY + else: + state = Task.FUTURE + for output in self.outputs: + new_task = my_task._add_child(output, state) + new_task.triggered = True + output._predict(new_task) + + def _check_inputs(self, my_task): + if self.collection is None: + return + variable = valueof(my_task, self.times, + 1) # look for variable in context, if we don't find it, default to 1 + if self.times.name == self.collection.name and type(variable) == type( + []): + raise WorkflowTaskExecException(my_task, + 'If we are updating a collection, then the collection must be a dictionary.') + + def _get_count(self, my_task): + """ + self.times has the text entered in the BPMN model. + It could be just a number - in this case return the number + it could be a variable name - so we get the variable value from my_task + the variable could be a number (text representation??) - in this case return the integer value of the number + it could be a list of records - in this case return the cardinality of the list + it could be a dict with a bunch of keys - it this case return the cardinality of the keys + """ + + if is_number(self.times.name): + return int(self.times.name) + variable = valueof(my_task, self.times, + 1) # look for variable in context, if we don't find it, default to 1 + + if is_number(variable): + return int(variable) + if isinstance(variable,list): + return len(variable) + if isinstance(variable,dict): + return len(variable.keys()) + return 1 # we shouldn't ever get here, but just in case return a sane value. + + def _get_current_var(self, my_task, pos): + variable = valueof(my_task, self.times, 1) + if is_number(variable): + return pos + if isinstance(variable,list) and len(variable) >= pos: + return variable[pos - 1] + elif isinstance(variable,dict) and len(list(variable.keys())) >= pos: + return variable[list(variable.keys())[pos - 1]] + else: + return pos + + def _get_predicted_outputs(self, my_task): + split_n = self._get_count(my_task) + + # Predict the outputs. + outputs = [] + for i in range(split_n): + outputs += self.outputs + + return outputs + + def _build_gateway_name(self,position): + """ + Build a unique name for each task - need to be the + same over save/restore of the workflow spec. + """ + base = 'Gateway_for_' + str(self.name) + "_" + position + LOG.debug("MI New Gateway " + base ) + return base + + def _add_gateway(self, my_task): + """ Generate parallel gateway tasks on either side of the current task. + This emulates a standard BPMN pattern of having parallel tasks between + two parallel gateways. + Once we have set up the gateways, we write a note into our internal data so that + we don't do it again. + """ + + if my_task.parent.task_spec.name[:11] == 'Gateway_for': + LOG.debug("MI Recovering from save/restore") + return + split_n = self._get_count(my_task) + expanded = getattr(self, 'expanded', 1) + if split_n >= expanded: + setattr(self, 'expanded', split_n) + + LOG.debug("MI being augmented") + # build the gateway specs and the tasks. + # Spiff wants a distinct spec for each task + # that it has in the workflow or it will throw an error + + + # I've encountered a case where the task_spec tree has already been expanded + # such as a workflow that has multiple exclusive gateways - + # in this case I try to detect it and use the task_spec tree as it already + # is without fixing things up. + + startgatewayname = self._build_gateway_name('start') + start_gw_spec = my_task.workflow.get_task_spec_from_name(startgatewayname) + if start_gw_spec is not None: + return + start_gw_spec = ParallelGateway(self._wf_spec, + self._build_gateway_name('start'), + triggered=False, + description="Begin Gateway") + start_gw = Task(my_task.workflow, task_spec=start_gw_spec) + + endgatewayname = self._build_gateway_name('end') + + + gw_spec = ParallelGateway(self._wf_spec, endgatewayname, + triggered=False, description="End Gateway") + end_gw = Task(my_task.workflow, task_spec=gw_spec) + + # Set up the parent task and insert it into the workflow + + my_task.parent.task_spec.outputs = [x for x in my_task.parent.task_spec.outputs if x != my_task.task_spec] + # in the case that our parent is a gateway with a default route, + # we need to ensure that the default route is empty + # so that connect can set it up properly + if hasattr(my_task.parent.task_spec,'default_task_spec') and \ + my_task.parent.task_spec.default_task_spec == my_task.task_spec.name: + my_task.parent.task_spec.default_task_spec = None + my_task.parent.task_spec.connect(start_gw_spec) + else: + my_task.parent.task_spec.outputs.append(start_gw_spec) + start_gw_spec.inputs.append(my_task.parent.task_spec) + # here we had assumed that the only child of the parent was us - + # this is an error - we need to find any child of the parent that is us + # and replace it with the start gateway. + newchildren = [] + for child in my_task.parent.children: + if child == my_task: + newchildren.append(start_gw) + else: + newchildren.append(child) + my_task.parent.children = newchildren + start_gw.parent = my_task.parent + my_task.parent = start_gw + start_gw_spec.connect(self) + start_gw.children = [my_task] + + # transfer my outputs to the ending gateway and set up the + # child parent links + gw_spec.outputs = self.outputs.copy() + self.connect(gw_spec) + self.outputs = [gw_spec] + end_gw.parent = my_task + my_task.children = [end_gw] + + def multiinstance_info(self, my_task): + split_n = self._get_count(my_task) + + runtimes = int(my_task._get_internal_data('runtimes', + 1)) # set a default if not already run + loop = False + parallel = False + sequential = False + + if my_task.task_spec.loopTask: + loop = True + elif my_task.task_spec.isSequential: + sequential = True + else: + parallel = True + + return {'is_looping': loop, + 'is_sequential_mi': sequential, + 'is_parallel_mi': parallel, + 'mi_count': split_n, + 'mi_index': runtimes} + + def _fix_task_spec_tree(self,my_task): + """ + Make sure the task spec tree aligns with our children. + """ + for x in range(len(my_task.parent.children)-1): + new_task_spec = self._make_new_task_spec(my_task.task_spec,my_task,x) + #new_task_spec = copy.copy(my_task.task_spec) + self.outputs[0].inputs.append(new_task_spec) + + def _make_new_task_spec(self,proto_task_spec,my_task,suffix): + + new_task_spec = copy.copy(proto_task_spec) + new_task_spec.name = new_task_spec.name + "_%d" % suffix + new_task_spec.id = str(new_task_spec.id) + "_%d" % suffix + my_task.workflow.spec.task_specs[new_task_spec.name] = new_task_spec # add to registry + return new_task_spec + + def _predict_hook(self, my_task): + + LOG.debug(my_task.get_name() + 'pre hook') + + split_n = self._get_count(my_task) + runtimes = int(my_task._get_internal_data('runtimes', + 1)) # set a default if not already run + + my_task._set_internal_data(splits=split_n, runtimes=runtimes) + if not self.elementVar: + self.elementVar = my_task.task_spec.name + "_CurrentVar" + + my_task.data[self.elementVar] = copy.copy( + self._get_current_var(my_task, runtimes)) + + # Create the outgoing tasks. + outputs = [] + # The MultiInstance class that this was based on actually + # duplicates the outputs - this caused our use case problems + + # In the special case that this is a Parallel multiInstance, we need + # to expand the children in the middle. This method gets called + # during every pass through the tree, so we need to wait until our + # real cardinality gets updated to expand the tree. + if (not self.isSequential): + # Each time we call _add_gateway - the contents should only + # happen once + self._add_gateway(my_task) + + + for tasknum in range(len(my_task.parent.children)): + task = my_task.parent.children[tasknum] + # we had an error on save/restore that was causing a problem down the line + # basically every task that we have expanded out needs its own task_spec. + # the save restore gets the right thing in the child, but not on each of the + # intermediate tasks. + if task.task_spec != task.task_spec.outputs[0].inputs[tasknum]: + LOG.debug("fix up save/restore in predict") + task.task_spec = task.task_spec.outputs[0].inputs[tasknum] + + if len(my_task.parent.children) < split_n: + # expand the tree + for x in range(split_n - len(my_task.parent.children)): + # here we generate a distinct copy of our original task and spec for each + # parallel instance, and hook them up into the task tree + LOG.debug("MI creating new child & task spec") + new_child = copy.copy(my_task) + new_child.id = uuid4() + # I think we will need to update both every variables + # internal data and the copy of the public data to get the + # variables correct + new_child.internal_data = copy.copy(my_task.internal_data) + + new_child.internal_data[ + 'runtimes'] = x + 2 # working with base 1 and we already have one done + + new_child.data = copy.copy(my_task.data) + new_child.data[self.elementVar] = self._get_current_var(my_task, + x + 2) + + new_child.children = [] # make sure we have a distinct list of children for + # each child. The copy is not a deep copy, and + # I was having problems with tasks sharing + # their child list. + + # NB - at this point, many of the tasks have a null children, but + # Spiff will actually generate the child when it rolls through and + # does a sync children - it is enough at this point to + # have the task spec in the right place. + new_task_spec = self._make_new_task_spec(my_task.task_spec,my_task,x) + + new_child.task_spec = new_task_spec + + self.outputs[0].inputs.append(new_task_spec) + my_task.parent.children.append(new_child) + my_task.parent.task_spec.outputs.append(new_task_spec) + else: + LOG.debug("parent child length:" + str( + len(my_task.task_spec.outputs))) + elif not self.loopTask: + # this should be only for SMI and not looping tasks - + # we need to patch up the children and make sure they chain correctly + # this is different from PMI because the children all link together, not to + # the gateways on both ends. + # first let's check for a task in the task spec tree + expanded = getattr(self, 'expanded', 1) + if split_n >= expanded: + setattr(self, 'expanded', split_n) + + + if not (expanded == split_n): + + + # # this next part is only critical for when we are re-loading the task_tree + # # but not the task_spec tree - it gets hooked up incorrectly. It would be great + # # if I can make them both work the same + # # as it is, it will need to be re-factored so that it works correctly with SMI + # for tasknum in range(len(my_task.parent.children)): + # task = my_task.parent.children[tasknum] + # # we had an error on save/restore that was causing a problem down the line + # # basically every task that we have expanded out needs its own task_spec. + # # the save restore gets the right thing in the child, but not on each of the + # # intermediate tasks. + # if task.task_spec != task.task_spec.outputs[0].inputs[tasknum]: + # LOG.debug("fix up save/restore in predict") + # task.task_spec = task.task_spec.outputs[0].inputs[tasknum] + my_task_copy = copy.copy(my_task) +# my_task_children = my_task.children + current_task = my_task + current_task_spec = self + proto_task_spec = copy.copy(self) + + + + if expanded < split_n: + # expand the tree + + for x in range(split_n - expanded): + # here we generate a distinct copy of our original task and spec for each + # parallel instance, and hook them up into the task tree + LOG.debug("MI creating new child & task spec") + new_child = copy.copy(my_task_copy) + new_child.id = uuid4() + # I think we will need to update both every variables + # internal data and the copy of the public data to get the + # variables correct + new_child.internal_data = copy.copy(my_task_copy.internal_data) + + new_child.internal_data[ + 'runtimes'] = x + 2 # working with base 1 and we already have one done + + new_child.data = copy.copy(my_task_copy.data) + new_child.data[self.elementVar] = self._get_current_var(my_task_copy, + x + 2) + + new_child.children = copy.copy(my_task_copy.children) # make sure we have a distinct list of + for child in new_child.children: + child.parent=new_child + # children for + + # NB - at this point, many of the tasks have a null children, but + # Spiff will actually generate the child when it rolls through and + # does a sync children - it is enough at this point to + # have the task spec in the right place. + new_task_spec = self._make_new_task_spec(proto_task_spec,my_task,x) + new_child.task_spec = new_task_spec + new_child._set_state(Task.MAYBE) + + + #for nextitem in current_task_spec.outputs: + # nextitem.inputs = [new_task_spec] + current_task_spec.outputs = [new_task_spec] + new_task_spec.inputs = [current_task_spec] + current_task.children = [new_child] + new_child.parent = current_task + + current_task = new_child + current_task_spec = new_task_spec + + outputs += self.outputs + if my_task._is_definite(): + my_task._sync_children(outputs, Task.FUTURE) + else: + my_task._sync_children(outputs, Task.LIKELY) + + def _build_class_names(self): + classes = [BusinessRuleTask,ScriptTask] + return {x.__module__ + "."+x.__name__:x for x in classes} + + def _on_complete_hook(self, my_task): + classes = self._build_class_names() + if my_task.task_spec.prevtaskclass in classes.keys(): + super()._on_complete_hook(my_task) + + + self._check_inputs(my_task) + runcount = self._get_count(my_task) + runtimes = int(my_task._get_internal_data('runtimes', 1)) + + if self.collection is not None: + colvarname = self.collection.name + else: + colvarname = my_task.task_spec.name + + collect = valueof(my_task, self.collection, {}) + + # if we are updating the same collection as was our loopcardinality + # then all the keys should be there and we can use the sorted keylist + # if not, we use an integer - we should be guaranteed that the + # collection is a dictionary + if self.collection is not None and \ + self.times.name == self.collection.name: + keys = list(collect.keys()) + if len(keys)= len(self.inputs), waiting_tasks) + + @classmethod + def deserialize(self, serializer, wf_spec, s_state): + return serializer.deserialize_generic(wf_spec, s_state, ParallelGateway) diff --git a/SpiffWorkflow/bpmn/specs/ScriptTask.py b/SpiffWorkflow/bpmn/specs/ScriptTask.py index accd9b1c2..387b0b862 100644 --- a/SpiffWorkflow/bpmn/specs/ScriptTask.py +++ b/SpiffWorkflow/bpmn/specs/ScriptTask.py @@ -46,8 +46,8 @@ def _on_complete_hook(self, task): return assert not task.workflow.read_only try: - task.workflow.script_engine.execute(task, self.script, **task.data) - except Exception: + task.workflow.script_engine.execute(task, self.script, task.data) + except Exception as e: LOG.error('Error executing ScriptTask; task=%r', task, exc_info=True) # set state to WAITING (because it is definitely not COMPLETED) @@ -55,5 +55,13 @@ def _on_complete_hook(self, task): # maybe upstream someone will be able to handle this situation task._setstate(Task.WAITING, force=True) raise WorkflowTaskExecException( - task, 'Error during script execution') + task, 'Error during script execution:' + str(e)) super(ScriptTask, self)._on_complete_hook(task) + + def serialize(self, serializer): + return serializer.serialize_script_task(self) + + @classmethod + def deserialize(self, serializer, wf_spec, s_state): + return serializer.deserialize_script_task(wf_spec, s_state) + diff --git a/SpiffWorkflow/bpmn/specs/StartEvent.py b/SpiffWorkflow/bpmn/specs/StartEvent.py index 579a3c302..cc8f979e9 100644 --- a/SpiffWorkflow/bpmn/specs/StartEvent.py +++ b/SpiffWorkflow/bpmn/specs/StartEvent.py @@ -27,5 +27,14 @@ class StartEvent(Simple, BpmnSpecMixin): Task Spec for a bpmn:startEvent node. """ - def __init__(self, wf_spec, name, **kwargs): + def __init__(self, wf_spec, name, event_definition=None, **kwargs): super(StartEvent, self).__init__(wf_spec, name, **kwargs) + self.event_definition = event_definition + + def serialize(self, serializer): + return serializer.serialize_generic_event(self) + + @classmethod + def deserialize(self, serializer, wf_spec, s_state): + return serializer.deserialize_generic_event(wf_spec, s_state,StartEvent) + diff --git a/SpiffWorkflow/bpmn/specs/UnstructuredJoin.py b/SpiffWorkflow/bpmn/specs/UnstructuredJoin.py index cba33ec45..61bbfbd54 100644 --- a/SpiffWorkflow/bpmn/specs/UnstructuredJoin.py +++ b/SpiffWorkflow/bpmn/specs/UnstructuredJoin.py @@ -17,6 +17,9 @@ # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301 USA import logging + +from ... import WorkflowException + from ...task import Task from .BpmnSpecMixin import BpmnSpecMixin from ...specs.Join import Join @@ -54,9 +57,10 @@ def _get_inputs_with_tokens(self, my_task): if task.parent._has_state(Task.COMPLETED) and ( task._has_state(Task.WAITING) or task == my_task): if task.parent.task_spec in completed_inputs: - raise NotImplementedError( - "Unsupported looping behaviour: two threads waiting " - "on the same sequence flow.") + raise(WorkflowException + (task.task_spec, + "Unsupported looping behaviour: two threads waiting" + " on the same sequence flow.")) completed_inputs.add(task.parent.task_spec) else: waiting_tasks.append(task.parent) @@ -122,6 +126,11 @@ def _do_join(self, my_task): or changed > last_changed.parent.last_state_change: last_changed = task + # Update data from all the same thread tasks. + thread_tasks.sort(key=lambda t: t.parent.last_state_change) + for task in thread_tasks: + self.data.update(task.data) + # Mark the identified task instances as COMPLETED. The exception # is the most recently changed task, for which we assume READY. # By setting the state to READY only, we allow for calling @@ -129,12 +138,14 @@ def _do_join(self, my_task): # (re)built underneath the node. for task in thread_tasks: if task == last_changed: + task.data.update(self.data) self.entered_event.emit(my_task.workflow, my_task) task._ready() else: task.state = Task.COMPLETED task._drop_children() + def _update_hook(self, my_task): if my_task._is_predicted(): diff --git a/SpiffWorkflow/bpmn/specs/event_definitions.py b/SpiffWorkflow/bpmn/specs/event_definitions.py index d9ca1b1a6..19068f6c4 100644 --- a/SpiffWorkflow/bpmn/specs/event_definitions.py +++ b/SpiffWorkflow/bpmn/specs/event_definitions.py @@ -1,6 +1,8 @@ # -*- coding: utf-8 -*- from __future__ import division +from SpiffWorkflow.bpmn.PythonScriptEngine import PythonScriptEngine from builtins import object +import datetime # Copyright (C) 2012 Matthew Hampton # # This library is free software; you can redistribute it and/or @@ -34,6 +36,9 @@ def has_fired(self, my_task): """ return my_task._get_internal_data('event_fired', False) + def _message_ready(self, my_task): + return False + def _accept_message(self, my_task, message): return False @@ -46,7 +51,8 @@ class ThrowingEventDefinition(object): This class is for future functionality. It will define the methods needed on an event definition that can be Thrown. """ - + def _send_message(self, my_task, message): + return False class MessageEventDefinition(CatchingEventDefinition, ThrowingEventDefinition): """ @@ -54,13 +60,16 @@ class MessageEventDefinition(CatchingEventDefinition, ThrowingEventDefinition): for Message Events. """ - def __init__(self, message): + def __init__(self, message,payload="",name="",resultVar=None): """ Constructor. :param message: The message to wait for. """ self.message = message + self.payload = payload + self.resultVar = resultVar + self.name = name def has_fired(self, my_task): """ @@ -69,12 +78,138 @@ def has_fired(self, my_task): """ return my_task._get_internal_data('event_fired', False) + def _message_ready(self, my_task): + waiting_messages = my_task.workflow.task_tree.internal_data.get('messages',{}) + if (self.message in waiting_messages.keys()): + evaledpayload = waiting_messages[self.message] + del(waiting_messages[self.message]) + return evaledpayload + return False + + def _send_message(self, my_task,resultVar): + payload = PythonScriptEngine().evaluate(self.payload, **my_task.data) + my_task.workflow.message(self.message,payload,resultVar=resultVar) + return True + def _accept_message(self, my_task, message): if message != self.message: return False self._fire(my_task) return True + @classmethod + def deserialize(self, dct): + return MessageEventDefinition(dct['message'],dct['payload'],dct['name'],dct['resultVar']) + + def serialize(self): + retdict = {} + module_name = self.__class__.__module__ + retdict['classname'] = module_name + '.' + self.__class__.__name__ + retdict['message'] = self.message + retdict['payload'] = self.payload + retdict['resultVar'] = self.resultVar + retdict['name'] = self.name + return retdict + +class SignalEventDefinition(CatchingEventDefinition, ThrowingEventDefinition): + """ + The MessageEventDefinition is the implementation of event definition used + for Message Events. + """ + + def __init__(self, message,name=''): + """ + Constructor. + + :param message: The message to wait for. + """ + # breakpoint() + self.message = message + self.name = name + + + def has_fired(self, my_task): + """ + Returns true if the message was received while the task was in a + WAITING state. + """ + return my_task._get_internal_data('event_fired', False) + + def _message_ready(self, my_task): + waiting_messages = my_task.workflow.task_tree.internal_data.get('signals',{}) + if (self.message in waiting_messages.keys()) : + return (self.message,None) + return False + + def _send_message(self, my_task): + my_task.workflow.signal(self.message) + return True + + def _accept_message(self, my_task, message): + if message != self.message: + return False + self._fire(my_task) + return True + + @classmethod + def deserialize(self, dct): + return SignalEventDefinition(dct['message'],dct['name']) + + def serialize(self): + retdict = {} + module_name = self.__class__.__module__ + retdict['classname'] = module_name + '.' + self.__class__.__name__ + retdict['message'] = self.message + retdict['name'] = self.name + return retdict + + +class CancelEventDefinition(CatchingEventDefinition): + """ + The CancelEventDefinition is the implementation of event definition used + for Cancel Events. + """ + + def __init__(self, message, name=''): + """ + Constructor. + + :param message: The message to wait for. + """ + self.message = message + self.name = name + + def has_fired(self, my_task): + """ + Returns true if the message was received while the task was in a + WAITING state. + """ + return my_task._get_internal_data('event_fired', False) + + def _message_ready(self, my_task): + waiting_messages = my_task.workflow.task_tree.internal_data.get('cancels',{}) + if ('TokenReset' in waiting_messages.keys()) : + return ('TokenReset', None) + return False + + def _accept_message(self, my_task, message): + if message != self.message: + return False + self._fire(my_task) + return True + + @classmethod + def deserialize(self, dct): + return CancelEventDefinition(dct['message'],dct['name']) + + def serialize(self): + retdict = {} + module_name = self.__class__.__module__ + retdict['classname'] = module_name + '.' + self.__class__.__name__ + retdict['message'] = self.message + retdict['name'] = self.name + return retdict + class TimerEventDefinition(CatchingEventDefinition): """ @@ -101,6 +236,16 @@ def has_fired(self, my_task): expression is before datetime.datetime.now() """ dt = my_task.workflow.script_engine.evaluate(my_task, self.dateTime) + if isinstance(dt,datetime.timedelta): + if my_task._get_internal_data('start_time',None) is not None: + start_time = datetime.datetime.strptime(my_task._get_internal_data('start_time',None),'%Y-%m-%d ' + '%H:%M:%S.%f') + elapsed = datetime.datetime.now() - start_time + return elapsed > dt + else: + my_task.internal_data['start_time'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f') + return False + if dt is None: return False if dt.tzinfo: @@ -109,3 +254,16 @@ def has_fired(self, my_task): else: now = datetime.datetime.now() return now > dt + + @classmethod + def deserialize(self, dct): + return TimerEventDefinition(dct['label'],dct['dateTime']) + + def serialize(self): + retdict = {} + module_name = self.__class__.__module__ + retdict['classname'] = module_name + '.' + self.__class__.__name__ + retdict['label'] = self.label + retdict['dateTime'] = self.dateTime + return retdict + diff --git a/SpiffWorkflow/bpmn/workflow.py b/SpiffWorkflow/bpmn/workflow.py index d5c6b3d86..543efae34 100644 --- a/SpiffWorkflow/bpmn/workflow.py +++ b/SpiffWorkflow/bpmn/workflow.py @@ -87,12 +87,17 @@ def refresh_waiting_tasks(self): for my_task in self.get_tasks(Task.WAITING): my_task.task_spec._update(my_task) - def get_ready_user_tasks(self): + def get_ready_user_tasks(self,lane=None): """ Returns a list of User Tasks that are READY for user action """ - return [t for t in self.get_tasks(Task.READY) - if not self._is_engine_task(t.task_spec)] + if lane is not None: + return [t for t in self.get_tasks(Task.READY) + if (not self._is_engine_task(t.task_spec)) + and (t.task_spec.lane == lane)] + else: + return [t for t in self.get_tasks(Task.READY) + if not self._is_engine_task(t.task_spec)] def get_waiting_tasks(self): """ @@ -111,6 +116,7 @@ def _is_engine_task(self, task_spec): def _task_completed_notify(self, task): assert (not self.read_only) or self._is_busy_with_restore() + self.last_task = task super(BpmnWorkflow, self)._task_completed_notify(task) def _task_cancelled_notify(self, task): diff --git a/SpiffWorkflow/camunda/README.md b/SpiffWorkflow/camunda/README.md new file mode 100644 index 000000000..daa746ce2 --- /dev/null +++ b/SpiffWorkflow/camunda/README.md @@ -0,0 +1,9 @@ +# BPMN Parser for the Camunda Modeler + +This package provides support for parsing BPMN diagrams provided by Camunda +It is a thin layer on top of the SpiffWorkflow BPMN package with additional +tools for handling User Forms. + +The [Camunda Modeler](https://camunda.com/download/modeler/) is a feature-rich +open source visual editor for authoring and modifying BPMN Diagrams. + diff --git a/SpiffWorkflow/camunda/__init__.py b/SpiffWorkflow/camunda/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/SpiffWorkflow/camunda/parser/CamundaParser.py b/SpiffWorkflow/camunda/parser/CamundaParser.py new file mode 100644 index 000000000..488f4eb22 --- /dev/null +++ b/SpiffWorkflow/camunda/parser/CamundaParser.py @@ -0,0 +1,11 @@ +from SpiffWorkflow.camunda.specs.UserTask import UserTask +from SpiffWorkflow.camunda.parser.UserTaskParser import UserTaskParser +from SpiffWorkflow.bpmn.parser.BpmnParser import BpmnParser, full_tag + + +class CamundaParser(BpmnParser): + OVERRIDE_PARSER_CLASSES = { + full_tag('userTask'): (UserTaskParser, UserTask), + } + + diff --git a/SpiffWorkflow/camunda/parser/UserTaskParser.py b/SpiffWorkflow/camunda/parser/UserTaskParser.py new file mode 100644 index 000000000..580bc0529 --- /dev/null +++ b/SpiffWorkflow/camunda/parser/UserTaskParser.py @@ -0,0 +1,60 @@ +from SpiffWorkflow.bpmn.parser.TaskParser import TaskParser, xpath_eval +from SpiffWorkflow.camunda.specs.UserTask import Form, FormField, EnumFormField + +CAMUNDA_MODEL_NS = 'http://camunda.org/schema/1.0/bpmn' + + +class UserTaskParser(TaskParser): + + def __init__(self, process_parser, spec_class, node): + super(UserTaskParser, self).__init__(process_parser, spec_class, node) + self.xpath = xpath_eval(node, extra_ns={'camunda': CAMUNDA_MODEL_NS}) + + """ + Base class for parsing User Tasks + """ + pass + + def create_task(self): + form = self.get_form() + return self.spec_class(self.spec, self.get_task_spec_name(), form, + lane=self.get_lane(), description=self.node.get('name', None)) + + def get_form(self): + """Camunda provides a simple form builder, this will extract the + details from that form and construct a form model from it. """ + form = Form() + try: + form.key = self.node.attrib['{' + CAMUNDA_MODEL_NS + '}formKey'] + except (KeyError): + return form + for xml_field in self.xpath('.//camunda:formData/camunda:formField'): + if xml_field.get('type') == 'enum': + field = self.get_enum_field(xml_field) + else: + field = FormField() + + field.id = xml_field.get('id') + field.type = xml_field.get('type') + field.label = xml_field.get('label') + field.default_value = xml_field.get('defaultValue') + + for child in xml_field: + if child.tag == '{' + CAMUNDA_MODEL_NS + '}properties': + for p in child: + field.add_property(p.get('id'), p.get('value')) + + if child.tag == '{' + CAMUNDA_MODEL_NS + '}validation': + for v in child: + field.add_validation(v.get('name'), v.get('config')) + + form.add_field(field) + return form + + def get_enum_field(self, xml_field): + field = EnumFormField() + + for child in xml_field: + if child.tag == '{' + CAMUNDA_MODEL_NS + '}value': + field.add_option(child.get('id'), child.get('name')) + return field diff --git a/SpiffWorkflow/camunda/parser/__init__.py b/SpiffWorkflow/camunda/parser/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/SpiffWorkflow/camunda/serializer/__init__.py b/SpiffWorkflow/camunda/serializer/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/SpiffWorkflow/camunda/specs/UserTask.py b/SpiffWorkflow/camunda/specs/UserTask.py new file mode 100644 index 000000000..b9285fe0c --- /dev/null +++ b/SpiffWorkflow/camunda/specs/UserTask.py @@ -0,0 +1,144 @@ +# -*- coding: utf-8 -*- +from __future__ import division + +from SpiffWorkflow.bpmn.specs.UserTask import UserTask +from SpiffWorkflow.bpmn.specs.BpmnSpecMixin import BpmnSpecMixin +from SpiffWorkflow.specs import Simple + + +class UserTask(UserTask, BpmnSpecMixin): + + def __init__(self, wf_spec, name, form, **kwargs): + """ + Constructor. + :param form: the information that needs to be provided by the user, + as parsed from the camunda xml file's form details. + """ + super(UserTask, self).__init__(wf_spec, name, **kwargs) + self.form = form + + + """ + Task Spec for a bpmn:userTask node. + """ + + def _on_trigger(self, my_task): + pass + + def is_engine_task(self): + return False + + def serialize(self, serializer): + return serializer.serialize_user_task(self) + + @classmethod + def deserialize(self, serializer, wf_spec, s_state): + return serializer.deserialize_user_task(wf_spec, s_state) + + +class FormField(object): + def __init__(self, form_type="text"): + self.id = "" + self.type = form_type + self.label = "" + self.default_value = "" + self.properties = [] + self.validation = [] + + def add_property(self, property_id, value): + self.properties.append(FormFieldProperty(property_id, value)) + + def add_validation(self, name, config): + self.validation.append(FormFieldValidation(name, config)) + + def get_property(self, property_id): + for prop in self.properties: + if prop.id == property_id: + return prop.value + + def has_property(self, property_id): + return self.get_property(property_id) is not None + + def get_validation(self, name): + for v in self.validation: + if v.name == name: + return v.config + + def has_validation(self, name): + return self.get_validation(name) is not None + + def jsonable(self): + return self.__dict__ + +class EnumFormField(FormField): + def __init__(self): + super(EnumFormField, self).__init__("enum") + self.options = [] + + def add_option(self, option_id, name): + self.options.append(EnumFormFieldOption(option_id, name)) + + def jsonable(self): + return self.__dict__ + + +class EnumFormFieldOption: + def __init__(self, option_id, name): + self.id = option_id + self.name = name + + def jsonable(self): + return self.__dict__ + + +class FormFieldProperty: + def __init__(self, property_id, value): + self.id = property_id + self.value = value + + def jsonable(self): + return self.__dict__ + + +class FormFieldValidation: + def __init__(self, name, config): + self.name = name + self.config = config + + def jsonable(self): + return self.__dict__ + + +class Form: + def __init__(self,init=None): + self.key = "" + self.fields = [] + if init: + self.from_dict(init) + + def add_field(self, field): + self.fields.append(field) + + def jsonable(self): + return self.__dict__ + + def from_dict(self,formdict): + self.key = formdict['key'] + for field in formdict['fields']: + if field.get('options',None): + newfield = EnumFormField() + for option in field['options']: + newfield.add_option(option['id'],option['name']) + else: + newfield = FormField() + newfield.id = field['id'] + newfield.default_value = field['default_value'] + newfield.label = field['label'] + newfield.type = field['type'] + for prop in field['properties']: + newfield.add_property(prop['id'],prop['value']) + for validation in field['validation']: + newfield.add_validation(validation['name'],validation['config']) + self.add_field(newfield) + + diff --git a/SpiffWorkflow/camunda/specs/__init__.py b/SpiffWorkflow/camunda/specs/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/SpiffWorkflow/dmn/__init__.py b/SpiffWorkflow/dmn/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/SpiffWorkflow/dmn/engine/DMNEngine.py b/SpiffWorkflow/dmn/engine/DMNEngine.py new file mode 100644 index 000000000..27dc707e8 --- /dev/null +++ b/SpiffWorkflow/dmn/engine/DMNEngine.py @@ -0,0 +1,99 @@ +import logging +import Levenshtein +import re +from SpiffWorkflow.bpmn.DMNPythonScriptEngine import DMNPythonScriptEngine + + +class DMNEngine: + """ + Handles the processing of a decision table. + """ + + + def __init__(self, decisionTable, debug=None): + self.decisionTable = decisionTable + self.debug = debug + self.scriptEngine = DMNPythonScriptEngine() + self.logger = logging.getLogger('DMNEngine') + if not self.logger.handlers: + self.logger.addHandler(logging.StreamHandler()) + self.logger.setLevel(getattr(logging, 'DEBUG' if debug else 'INFO')) + + def decide(self, *inputArgs, **inputKwargs): + for rule in self.decisionTable.rules: + self.logger.debug('Checking rule %s (%s)...' % (rule.id, rule.description)) + + res = self.__checkRule(rule, *inputArgs, **inputKwargs) + self.logger.debug(' Match? %s' % (res)) + if res: + self.logger.debug(' Return %s (%s)' % (rule.id, rule.description)) + return rule + + def __checkRule(self, rule, *inputData, **inputKwargs): + for idx, inputEntry in enumerate(rule.inputEntries): + input = self.decisionTable.inputs[idx] + + self.logger.debug(' Checking input entry %s (%s: %s)...' % (inputEntry.id, input.label, inputEntry.lhs)) + # if inputData: + # self.logger.debug('inputData:', inputData) + # if inputKwargs: + # self.logger.debug('inputKwargs:', inputKwargs) + local_data = {} + local_data.update(inputKwargs) + if inputData and isinstance(inputData[idx], dict): + local_data.update(inputData[idx]) + + for lhs in inputEntry.lhs: + if lhs is not None: + inputVal = DMNEngine.__getInputVal(inputEntry, idx, *inputData, **inputKwargs) + else: + inputVal = None + try: + #PythonScriptEngine.convertToBox(DMNPythonScriptEngine(),local_data) + if not input.scriptEngine.eval_dmn_expression(inputVal, lhs, **local_data): + return False + except NameError as e: + x = re.match("name '(.+)' is not defined",str(e)) + name = x.group(1) + distances = [(key,Levenshtein.distance(name,key)) for key in local_data.keys()] + distances.sort(key=lambda x: x[1]) + + raise NameError("Failed to execute " + "expression: '%s' is '%s' in the " + "Row with annotation '%s'. The following " + "value does not exist: %s - did you mean one of %s?" % ( + inputVal, lhs, rule.description, str(e),str([x[0] for x in distances[:3]]))) + except Exception as e: + raise Exception("Failed to execute " + "expression: '%s' is '%s' in the " + "Row with annotation '%s', %s" % ( + inputVal, lhs, rule.description, str(e))) + else: + # Empty means ignore decision value + self.logger.debug(' Value not defined') + continue # Check the other operators/columns + + self.logger.debug(' All inputs checked') + return True + + @staticmethod + def __getInputVal(inputEntry, idx, *inputData, **inputKwargs): + """ + The input of the decision method can be an expression, args or kwargs. + It prefers an input expression per the Specification, but will fallback + to using inputData if available. Finally it will fall back to the + likely very bad idea of trying to use the label. + + :param inputEntry: + :param idx: + :param inputData: + :param inputKwargs: + :return: + """ + if inputEntry.input.expression: + return inputEntry.input.expression + elif inputData: + return "%r" % inputData[idx] + else: + # Backwards compatibility + return "%r" % inputKwargs[inputEntry.input.label] diff --git a/SpiffWorkflow/dmn/engine/__init__.py b/SpiffWorkflow/dmn/engine/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/SpiffWorkflow/dmn/parser/BpmnDmnParser.py b/SpiffWorkflow/dmn/parser/BpmnDmnParser.py new file mode 100644 index 000000000..d35cf0241 --- /dev/null +++ b/SpiffWorkflow/dmn/parser/BpmnDmnParser.py @@ -0,0 +1,55 @@ +import glob + +from SpiffWorkflow.bpmn.parser.util import xpath_eval + +from SpiffWorkflow.bpmn.parser.BpmnParser import BpmnParser, full_tag +from SpiffWorkflow.dmn.parser.BusinessRuleTaskParser import BusinessRuleTaskParser +from SpiffWorkflow.dmn.parser.DMNParser import DMNParser +from SpiffWorkflow.dmn.specs.BusinessRuleTask import BusinessRuleTask +from lxml import etree + +class BpmnDmnParser(BpmnParser): + + OVERRIDE_PARSER_CLASSES = { + full_tag('businessRuleTask'): (BusinessRuleTaskParser, + BusinessRuleTask) + } + + def __init__(self): + super().__init__() + self.dmn_parsers = {} + self.dmn_parsers_by_name = {} + + def add_dmn_xml(self, node, svg=None, filename=None): + """ + Add the given lxml representation of the DMN file to the parser's set. + """ + xpath = xpath_eval(node) + dmn_parser = DMNParser( + self, node, svg, filename=filename, doc_xpath=xpath) + self.dmn_parsers[dmn_parser.get_id()] = dmn_parser + self.dmn_parsers_by_name[dmn_parser.get_name()] = dmn_parser + + def add_dmn_file(self, filename): + """ + Add the given DMN filename to the parser's set. + """ + self.add_dmn_files([filename]) + + def add_dmn_files_by_glob(self, g): + """ + Add all filenames matching the provided pattern (e.g. *.bpmn) to the + parser's set. + """ + self.add_dmn_files(glob.glob(g)) + + def add_dmn_files(self, filenames): + """ + Add all filenames in the given list to the parser's set. + """ + for filename in filenames: + f = open(filename, 'r') + try: + self.add_dmn_xml(etree.parse(f).getroot(), filename=filename) + finally: + f.close() diff --git a/SpiffWorkflow/dmn/parser/BusinessRuleTaskParser.py b/SpiffWorkflow/dmn/parser/BusinessRuleTaskParser.py new file mode 100644 index 000000000..011111337 --- /dev/null +++ b/SpiffWorkflow/dmn/parser/BusinessRuleTaskParser.py @@ -0,0 +1,51 @@ +from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException + +from SpiffWorkflow.bpmn.parser.util import xpath_eval + +from SpiffWorkflow.bpmn.specs.BpmnSpecMixin import BpmnSpecMixin + +from SpiffWorkflow.bpmn.parser.TaskParser import TaskParser +from SpiffWorkflow.dmn.engine.DMNEngine import DMNEngine +from SpiffWorkflow.dmn.specs.BusinessRuleTask import BusinessRuleTask + +CAMUNDA_MODEL_NS = 'http://camunda.org/schema/1.0/bpmn' + + +class BusinessRuleTaskParser(TaskParser, BpmnSpecMixin): + dmn_debug = None + + def __init__(self, process_parser, spec_class, node): + super(BusinessRuleTaskParser, self).__init__(process_parser, + spec_class, node) + self.xpath = xpath_eval(self.node, extra_ns={'camunda': CAMUNDA_MODEL_NS}) + self.dmnEngine = self._get_engine() + + def _get_engine(self): + decision_ref = self.node.attrib['{' + CAMUNDA_MODEL_NS + '}decisionRef'] + if decision_ref not in self.process_parser.parser.dmn_parsers: + options = ', '.join(list(self.process_parser.parser.dmn_parsers.keys())) + raise ValidationException( + 'No DMN Diagram available with id "%s", Available DMN ids are: %s' %(decision_ref, options), + node=self.node, filename='') + dmnParser = self.parser.dmn_parsers[decision_ref] + dmnParser.parse() + decision = dmnParser.decision + return DMNEngine(decision.decisionTables[0]) + + def create_task(self): + return BusinessRuleTask(self.spec, self.get_task_spec_name(), + dmnEngine=self.dmnEngine, + lane=self.get_lane(), + description=self.node.get('name', None), + ) + + def _on_trigger(self, my_task): + pass + + def serialize(self, serializer, **kwargs): + pass + + @classmethod + def deserialize(cls, serializer, wf_spec, s_state, **kwargs): + pass + diff --git a/SpiffWorkflow/dmn/parser/DMNParser.py b/SpiffWorkflow/dmn/parser/DMNParser.py new file mode 100644 index 000000000..ef90a64c1 --- /dev/null +++ b/SpiffWorkflow/dmn/parser/DMNParser.py @@ -0,0 +1,205 @@ +import re +from decimal import Decimal +from ast import literal_eval +from datetime import datetime + +from SpiffWorkflow.bpmn.parser.util import xpath_eval + +from SpiffWorkflow.dmn.specs.model import Decision, DecisionTable, InputEntry, \ + OutputEntry, Input, Output, Rule +from SpiffWorkflow.bpmn.PythonScriptEngine import PythonScriptEngine +from SpiffWorkflow.bpmn.FeelLikeScriptEngine import FeelLikeScriptEngine +from SpiffWorkflow.bpmn.DMNPythonScriptEngine import DMNPythonScriptEngine + +def get_dmn_ns(node): + """ + Returns the namespace definition for the current DMN + + :param node: the XML node for the DMN document + """ + if 'http://www.omg.org/spec/DMN/20151101/dmn.xsd' in node.nsmap.values(): + return 'http://www.omg.org/spec/DMN/20151101/dmn.xsd' + elif 'https://www.omg.org/spec/DMN/20191111/MODEL/' in node.nsmap.values(): + return 'https://www.omg.org/spec/DMN/20191111/MODEL/' + return None + + +class DMNParser(object): + """ + Please note this DMN Parser still needs a lot of work. A few key areas + that need to be addressed: + 1. it assumes that only one decision table exists within a decision + 2. it is not always name space aware (I fixed the top level, but could be + cleaner all the way through. + """ + + DT_FORMAT = '%Y-%m-%dT%H:%M:%S' + + def __init__(self, p, node, svg=None, filename=None, doc_xpath=None): + """ + Constructor. + + :param p: the owning BpmnParser instance + :param node: the XML node for the DMN document + :param svg: the SVG representation of this process as a string + (optional) + :param filename: the source BMN filename (optional) + """ + self.parser = p + self.node = node + self.decision = None + self.svg = svg + self.filename = filename + self.doc_xpath = doc_xpath + self.dmn_ns = get_dmn_ns(self.node) + self.xpath = xpath_eval(self.node, {'dmn': self.dmn_ns}) + self.scriptEngine = DMNPythonScriptEngine() + + def parse(self): + self.decision = self._parse_decision(self.node) + + def get_id(self): + """ + Returns the process ID + """ + return self.xpath('dmn:decision[1]')[0].get('id') + + def get_name(self): + """ + Returns the process name (or ID, if no name is included in the file) + """ + return self.xpath('dmn:decision[1]')[0].get('name') + + def _parse_decision(self, root): + decisionElements = list(root) + if len(decisionElements) == 0: + raise Exception('No decisions found') + + if len(decisionElements) > 1: + raise Exception('Multiple decisions found') + + decisionElement = decisionElements[0] + assert decisionElement.tag.endswith( + 'decision'), 'Element %r is not of type "decision"' % ( + decisionElement.tag) + + decision = Decision(decisionElement.attrib['id'], + decisionElement.attrib.get('name', '')) + + # Parse decision tables + try: + self._parseDecisionTables(decision, decisionElement) + except Exception as e: + raise Exception("Error in Decision '%s': %s" % (decision.name, str(e))) + + return decision + + def _parseDecisionTables(self, decision, decisionElement): + xpath = xpath_eval(decisionElement, {'dmn': self.dmn_ns}) + for decisionTableElement in xpath('dmn:decisionTable'): + decisionTable = DecisionTable(decisionTableElement.attrib['id'], + decisionTableElement.attrib.get( + 'name', '')) + decision.decisionTables.append(decisionTable) + + # parse inputs + self._parseInputsOutputs(decision, decisionTable, + decisionTableElement) + + def _parseInputsOutputs(self, decision, decisionTable, decisionTableElement): + for element in decisionTableElement: + if element.tag.endswith('input'): + input = self._parseInput(element) + decisionTable.inputs.append(input) + elif element.tag.endswith('output'): + output = self._parseOutput(element) + decisionTable.outputs.append(output) + elif element.tag.endswith('rule'): + rule = self._parseRule(decision, decisionTable, element) + decisionTable.rules.append(rule) + else: + raise Exception( + 'Unknown type in decision table: %r' % (element.tag)) + + def _parseInput(self, inputElement): + typeRef = None + xpath = xpath_eval(inputElement, {'dmn': self.dmn_ns}) + for inputExpression in xpath('dmn:inputExpression'): + + typeRef = inputExpression.attrib.get('typeRef', '') + scriptEngine = self.scriptEngine + engine = inputExpression.attrib.get('expressionLanguage') + if engine == 'feel': + scriptEngine = FeelLikeScriptEngine() + + expressionNode = inputExpression.find('{' + self.dmn_ns + '}text') + if expressionNode is not None: + expression = expressionNode.text + else: + expression = None + + input = Input(inputElement.attrib['id'], + inputElement.attrib.get('label', ''), + inputElement.attrib.get('name', ''), + expression, + scriptEngine, + typeRef) + return input + + def _parseOutput(self, outputElement): + output = Output(outputElement.attrib['id'], + outputElement.attrib.get('label', ''), + outputElement.attrib.get('name', ''), + outputElement.attrib.get('typeRef', '')) + return output + + def _parseRule(self, decision, decisionTable, ruleElement): + rule = Rule(ruleElement.attrib['id']) + + inputIdx = 0 + outputIdx = 0 + for child in ruleElement: + # Load description + if child.tag.endswith('description'): + rule.description = child.text + + # Load input entries + elif child.tag.endswith('inputEntry'): + inputEntry = self._parseInputOutputElement(decisionTable, + child, + InputEntry, + inputIdx) + rule.inputEntries.append(inputEntry) + inputIdx += 1 + + # Load output entries + elif child.tag.endswith('outputEntry'): + outputEntry = self._parseInputOutputElement(decisionTable, + child, + OutputEntry, + outputIdx) + rule.outputEntries.append(outputEntry) + outputIdx += 1 + + return rule + + def _parseInputOutputElement(self, decision_table, element, cls, idx): + inputOrOutput = ( + decision_table.inputs if cls == InputEntry else decision_table.outputs if cls == OutputEntry else None)[ + idx] + entry = cls(element.attrib['id'], inputOrOutput) + for child in element: + if child.tag.endswith('description'): + entry.description = child.text + elif child.tag.endswith('text'): + entry.text = child.text + if cls == InputEntry: + entry.lhs.append(entry.text) + elif cls == OutputEntry: + if entry.text and entry.text != '': + py, needsEquals = self.scriptEngine.validateExpression(entry.text) + if not needsEquals: + raise Exception("Malformed Output Expression '%s' " % entry.text) + else: + entry.parsedRef = py + return entry diff --git a/SpiffWorkflow/dmn/parser/__init__.py b/SpiffWorkflow/dmn/parser/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/SpiffWorkflow/dmn/specs/BusinessRuleTask.py b/SpiffWorkflow/dmn/specs/BusinessRuleTask.py new file mode 100644 index 000000000..38128830d --- /dev/null +++ b/SpiffWorkflow/dmn/specs/BusinessRuleTask.py @@ -0,0 +1,47 @@ +from SpiffWorkflow.exceptions import WorkflowTaskExecException + +from SpiffWorkflow.specs import Simple + +from SpiffWorkflow.bpmn.specs.BpmnSpecMixin import BpmnSpecMixin + + +from ...bpmn.PythonScriptEngine import PythonScriptEngine +from ...util.deep_merge import DeepMerge + + +class BusinessRuleTask(Simple, BpmnSpecMixin): + """ + Task Spec for a bpmn:businessTask (DMB Decision Reference) node. + """ + + def _on_trigger(self, my_task): + pass + + def __init__(self, wf_spec, name, dmnEngine=None, **kwargs): + super().__init__(wf_spec, name, **kwargs) + + self.dmnEngine = dmnEngine + self.res = None + self.resDict = None + + def _on_complete_hook(self, my_task): + try: + convert = PythonScriptEngine() + convert.convertToBox(my_task.data) + self.res = self.dmnEngine.decide(**my_task.data) + if self.res is not None: # it is conceivable that no rules fire. + self.resDict = self.res.outputAsDict(my_task.data) + my_task.data = DeepMerge.merge(my_task.data,self.resDict) + convert.convertFromBox(my_task.data) + super(BusinessRuleTask, self)._on_complete_hook(my_task) + except Exception as e: + raise WorkflowTaskExecException(my_task, str(e)) + + def serialize(self, serializer): + return serializer.serialize_business_rule_task(self) + + @classmethod + def deserialize(self, serializer, wf_spec, s_state): + return serializer.deserialize_business_rule_task(wf_spec, s_state) + + diff --git a/SpiffWorkflow/dmn/specs/__init__.py b/SpiffWorkflow/dmn/specs/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/SpiffWorkflow/dmn/specs/model.py b/SpiffWorkflow/dmn/specs/model.py new file mode 100644 index 000000000..28c7eb7df --- /dev/null +++ b/SpiffWorkflow/dmn/specs/model.py @@ -0,0 +1,193 @@ +from collections import OrderedDict + +from SpiffWorkflow.bpmn.DMNPythonScriptEngine import DMNPythonScriptEngine +from SpiffWorkflow.bpmn.PythonScriptEngine import PythonScriptEngine +from SpiffWorkflow.util.deep_merge import DeepMerge + + +class Decision: + def __init__(self, id, name): + self.id = id + self.name = name + + self.decisionTables = [] + +class DecisionTable: + def __init__(self, id, name): + self.id = id + self.name = name + + self.inputs = [] + self.outputs = [] + self.rules = [] + + def serialize(self): + out = {} + out['id'] = self.id + out['name'] = self.name + out['inputs'] = [x.serialize() for x in self.inputs] + out['outputs'] = [x.serialize() for x in self.outputs] + out['rules'] = [x.serialize() for x in self.rules] + return out + + def deserialize(self,indict): + self.id = indict['id'] + self.name = indict['name'] + self.inputs = [Input(**x) for x in indict['inputs']] + list(map(lambda x, y: x.deserialize(y), self.inputs, indict['inputs'])) + self.outputs = [Output(**x) for x in indict['outputs']] + self.rules = [Rule(None) for x in indict['rules']] + list(map(lambda x, y: x.deserialize(y),self.rules,indict['rules'])) + + + + +class Input: + def __init__(self, id, label, name, expression, scriptEngine, typeRef): + self.id = id + self.label = label + self.name = name + self.expression = expression + self.scriptEngine = scriptEngine + self.typeRef = typeRef + + def serialize(self): + out = {} + out['id'] = self.id + out['label'] = self.label + out['name'] = self.name + out['expression'] = self.expression + out['scriptEngine'] = self.scriptEngine.__class__.__name__ + out['typeRef'] = self.typeRef + return out + + def deserialize(self,indict): + #for key in indict.keys(): + # setattr(self,key,indict[key]) + self.scriptEngine = DMNPythonScriptEngine() # this is the only one we deal with now, + # later we will want to look at the classname + # that is in indct['scriptEngine'] and instantiate + # the right class + + + + +class InputEntry: + def __init__(self, id, input): + self.id = id + self.input = input + + self.description = '' + self.lhs = [] + + def serialize(self): + out = {} + out['id'] = self.id + out['input'] = self.input.serialize() + out['description'] = self.description + out['lhs'] = self.lhs + return out + + def deserialize(self, indict): + self.id = indict['id'] + self.description = indict['description'] + self.lhs = indict['lhs'] + self.input = Input(**indict['input']) + self.input.deserialize(indict['input']) + +class Output: + def __init__(self, id, label, name, typeRef): + self.id = id + self.label = label + self.name = name + self.typeRef = typeRef + + def serialize(self): + out = {} + out['id'] = self.id + out['label'] = self.label + out['name'] = self.name + out['typeRef'] = self.typeRef + return out + + +class OutputEntry: + def __init__(self, id, output): + self.id = id + self.output = output + + self.description = '' + self.text = '' + + def serialize(self): + out = {} + out['id'] = self.id + out['output'] = self.output.serialize() + out['description'] = self.description + out['text'] = self.text + if hasattr(self,'parsedRef'): + out['parsedRef'] = self.parsedRef + return out + + def deserialize(self, indict): + self.id = indict['id'] + self.description = indict['description'] + self.text = indict['text'] + if 'parsedRef' in indict: + self.parsedRef = indict['parsedRef'] + self.output = Output(**indict['output']) + + + +class Rule: + def __init__(self, id): + self.id = id + + self.description = '' + self.inputEntries = [] + self.outputEntries = [] + + def serialize(self): + out = {} + out['id'] = self.id + out['description'] = self.description + out['inputEntries'] = [x.serialize() for x in self.inputEntries] + out['outputEntries'] = [x.serialize() for x in self.outputEntries] + return out + + def deserialize(self,indict): + self.id = indict['id'] + self.description = indict['description'] + self.inputEntries = [InputEntry(None,None) for x in indict['inputEntries']] + list(map(lambda x,y : x.deserialize(y), self.inputEntries, indict['inputEntries'])) + self.outputEntries = [OutputEntry(None, None) for x in indict['outputEntries']] + list(map(lambda x, y: x.deserialize(y), self.outputEntries, indict['outputEntries'])) + + + + + def outputAsDict(self, data): + out = OrderedDict() + for outputEntry in self.outputEntries: + # try to use the id, but fall back to label if no name is provided. + key = outputEntry.output.name or outputEntry.output.label + if hasattr(outputEntry, "parsedRef"): + outvalue = PythonScriptEngine().evaluate(outputEntry.parsedRef,**data) + else: + outvalue = "" + if '.' in key: # we need to allow for dot notation in the DMN - + # I would use box to do this, but they didn't have a feature to build + # a dict based on a dot notation withoug eval + # so we build up a dictionary structure based on the key, and let the parent + # do a deep merge + currentout = {} + subkeylist = list(reversed(key.split('.'))) + for subkey in subkeylist[:-1]: + currentout[subkey] = outvalue + outvalue = currentout + currentout = {} + basekey = subkeylist[-1] + out[basekey] = DeepMerge.merge(out.get(basekey,{}),outvalue) + else: + out[key] = outvalue + return out diff --git a/SpiffWorkflow/navigation.py b/SpiffWorkflow/navigation.py new file mode 100644 index 000000000..340da1262 --- /dev/null +++ b/SpiffWorkflow/navigation.py @@ -0,0 +1,428 @@ +import copy + +from . import WorkflowException +from .bpmn.specs.EndEvent import EndEvent +from .bpmn.specs.ExclusiveGateway import ExclusiveGateway +from .bpmn.specs.ManualTask import ManualTask +from .bpmn.specs.NoneTask import NoneTask +from .bpmn.specs.ParallelGateway import ParallelGateway +from .bpmn.specs.ScriptTask import ScriptTask +from .bpmn.specs.StartEvent import StartEvent +from .bpmn.specs.UserTask import UserTask +from .dmn.specs.BusinessRuleTask import BusinessRuleTask +from .specs import CancelTask, StartTask +from .task import Task +from .bpmn.specs.BpmnSpecMixin import BpmnSpecMixin, SequenceFlow +from .bpmn.specs.UnstructuredJoin import UnstructuredJoin +from .bpmn.specs.MultiInstanceTask import MultiInstanceTask +from .bpmn.specs.CallActivity import CallActivity +from .bpmn.specs.BoundaryEvent import _BoundaryEventParent, BoundaryEvent + + +class NavItem(object): + """ + A waypoint in a workflow along with some key metrics + - Each list item has : + spec_id - TaskSpec or Sequence flow id + name - The name of the task spec (or sequence) + spec_type - The type of task spec (it's class name) + task_id - The uuid of the actual task instance, if it exists + description - Text description + backtrack_to - The spec_id of the task this will back track to. + indent - A hint for indentation + lane - This is the lane for the task if indicated. + state - State of the task + """ + + def __init__(self, spec_id, name, description, + lane=None, backtrack_to=None, indent=0): + self.spec_id = spec_id + self.name = name + self.spec_type = "None" + self.description = description + self.lane = lane + self.backtrack_to = backtrack_to + self.indent = indent + self.task_id = None + self.state = None + self.children = [] + + def set_spec_type(self, spec): + types = [UserTask, ManualTask, BusinessRuleTask, CancelTask, + ScriptTask, StartTask, EndEvent, StartEvent, + MultiInstanceTask, StartEvent, SequenceFlow, + ExclusiveGateway, ParallelGateway, CallActivity, + UnstructuredJoin, NoneTask, BoundaryEvent] + spec_type = None + for t in types: + if isinstance(spec, t): + spec_type = t.__name__ + break + if spec_type: + self.spec_type = spec_type + elif spec.__class__.__name__.startswith('_'): + # These should be removed at some point in the process. + self.spec_type = spec.__class__.__name__ + else: + raise WorkflowException(spec, "Unknown spec: " + + spec.__class__.__name__) + + @classmethod + def from_spec(cls, spec: BpmnSpecMixin, backtrack_to=None, indent=None): + instance = cls( + spec_id=spec.id, + name=spec.name, + description=spec.description, + lane=spec.lane, + backtrack_to=backtrack_to, + indent=indent + ) + instance.set_spec_type(spec) + return instance + + @classmethod + def from_flow(cls, flow: SequenceFlow, lane, backtrack_to, indent): + """We include flows in the navigation if we hit a conditional gateway, + as in do this if x, do this if y....""" + instance = cls( + spec_id=flow.id, + name=flow.name, + description=flow.name, + lane=lane, + backtrack_to=backtrack_to, + indent=indent + ) + instance.set_spec_type(flow) + return instance + + def __eq__(self, other): + if isinstance(other, NavItem): + return self.spec_id == other.spec_id and \ + self.name == other.name and \ + self.spec_type == other.spec_type and \ + self.description == other.description and \ + self.lane == other.lane and \ + self.backtrack_to == other.backtrack_to and \ + self.indent == other.indent + return False + + def __str__(self): + text = self.description + if self.spec_type == "StartEvent": + text = "O" + elif self.spec_type == "TaskEndEvent": + text = "@" + elif self.spec_type == "ExclusiveGateway": + text = f"X {text} X" + elif self.spec_type == "ParallelGateway": + text = f"+ {text}" + elif self.spec_type == "SequenceFlow": + text = f"-> {text}" + elif self.spec_type[-4:] == "Task": + text = f"[{text}] TASK ID: {self.task_id}" + else: + text = f"({self.spec_type}) {text}" + + result = f' {"..," * self.indent} STATE: {self.state} {text}' + if self.lane: + result = f'|{self.lane}| {result}' + if self.backtrack_to: + result += f" (BACKTRACK to {self.backtrack_to}" + + return result + +def get_deep_nav_list(workflow): + # converts a flat nav into a hierarchical list, that is easier to render + # in some cases. This assumes that we never jump more than one indent + # forward at a time, which should always be the case, but that we might + # un-indent any amount. + nav_items = [] + flat_navs = get_flat_nav_list(workflow) + parents = [] + for nav_item in flat_navs: + if nav_item.indent == 0: + nav_items.append(nav_item) + parents = [nav_item] + else: + parents[nav_item.indent - 1].children.append(nav_item) + if len(parents) > nav_item.indent: + parents = parents[:nav_item.indent] # Trim back to branch point. + parents.append(nav_item) + + # With this navigation now deep, we can work back trough tasks, and set + # states with a little more clarity + set_deep_state(nav_items) + + return nav_items + + +def set_deep_state(nav_items): + # recursive, in a deeply nested navigation, use the state of children to + # inform the state of the parent, so we have some idea what is going on at + # that deeper level. This may override the state of a gateway, which + # may be completed, but contain children that are not. + state_precedence = ['READY', 'LIKELY', 'FUTURE', 'MAYBE', 'WAITING', + 'COMPLETED', 'CANCELLED'] + for nav_item in nav_items: + if len(nav_item.children) > 0: + child_states = [] + for child in nav_item.children: + child_states.append(set_deep_state([child])) + for state in state_precedence: + if state in child_states: + nav_item.state = state + return state + return nav_item.state + +def get_flat_nav_list(workflow): + # This takes the flat navigation returned from the follow_tree, and + # adds task states, producing a full flat navigation list. + nav_items = [] + for top in workflow.task_tree.children[0].task_spec.outputs: + nav_items.extend(follow_tree(top, output=[], + found=set(), workflow=workflow)) + task_list = workflow.get_tasks() + + # look up task status for each item in the list + used_tasks = set() # set of tasks already used to get state. + for nav_item in nav_items: + # get a list of statuses for the current task_spec + # we may have more than one task for each + tasks = [x for x in task_list if + x.task_spec.id == nav_item.spec_id and + x.task_spec.name == nav_item.name and + x not in used_tasks] + + if len(tasks) == 0: + # There is no task associated with this nav item, so we don't + # know its state here. + nav_item.state = None + nav_item.task_id = None + else: + if len(tasks) == 1: + task = tasks[0] + else: + # Something has caused us to loop back around in some way to + # this task spec again, and so there are multiple states for + # this navigation item. Opt for returning the last state. + # the first ready task, + # if available, then fall back to the last completed task. + ready_task = next((t for t in tasks + if t.state == Task.READY), None) + comp_task = next((t for t in reversed(tasks) + if t.state == Task.COMPLETED), None) + if ready_task: + task = ready_task + elif comp_task: + task = comp_task + else: + task = tasks[0] # Not sure what else to do here yet. + used_tasks.add(task) + nav_item.state = task.state_names[task.state] + nav_item.task_id = task.id + + return nav_items + + +def same_ending_length(nav_with_children): + """ + return the length of the endings of each child that match each other + """ + # go get a modified list of just the ids in each child. + endings = [[leaf.spec_id for leaf in branch.children] for branch in + nav_with_children] + # the longest identical ending will be equal to the lenght of the + # shortest list + if len(endings) == 0: + shortest_list = 0 + else: + shortest_list = min([len(x) for x in endings]) + # walk through the list and determine if they are all the same + # for each. If they are not the same, then we back off the snip point + snip_point = shortest_list + for x in reversed(range(shortest_list)): + current_pos = -(x + 1) + end_ids = [el[current_pos] for el in endings] + if not len(set(end_ids)) <= 1: + snip_point = snip_point - 1 + return snip_point + + +def snip_same_ending(nav_with_children, length): + """ + shorten each child task list to be only it's unique children, + return a list of the same endings so we can tack it on the + parent tree. + """ + if len(nav_with_children) == 0 or length == 0: + return [] + retlist = nav_with_children[0].children[-length:] + for branch in nav_with_children: + branch.children = branch.children[:-length] + return retlist + + +def conditional_task_add(output, task_spec, indent, backtrack_to=None): + #if task_spec.id not in [x.spec_id for x in output]: + output.append(NavItem.from_spec(spec=task_spec, + backtrack_to=backtrack_to, + indent=indent)) + + +def follow_tree(tree, output=[], found=set(), level=0, workflow=None): + """RECURSIVE - follows the tree returning a list of NavItem objects""" + + # I had an issue with a test being nondeterministic the yes/no + # were in an alternate order in some cases. To be 100% correct, this should + # probably also use the X/Y information that we are parsing elsewhere, but + # I did not see that information in the task spec. + # At a bare minimum, this should fix the problem where keys in a dict are + # flip-flopping. + # After I'm done, you should be able to manage the order of the sequence flows by + # naming the Flow_xxxxx names in the order you want them to appear. + + outputs = list(tree.outgoing_sequence_flows.keys()) + idlinks = [(x, tree.outgoing_sequence_flows[x]) for x in outputs] + idlinks.sort(key=lambda x: x[1].target_task_spec.position['y']) + outputs = [x[0] for x in idlinks] + + # --------------------- + # Endpoint, no children + # --------------------- + if len(outputs) == 0: + # This has no children, so we append it and terminate the recursion + conditional_task_add(output, tree, + backtrack_to=False, indent=level) + found.add(tree.id) + return output + + # --------------------- + # Call Activity - follow subtree + # --------------------- + if isinstance(tree, CallActivity): + tsk = workflow.get_tasks_from_spec_name(tree.name)[0] + x = tree.create_sub_workflow(tsk) + + output.append( NavItem.from_spec(tree, indent=level)) + + sublist_outputs = [ + follow_tree(top, output=[], found=set(), level=level + 1, workflow=x) + for top in x.task_tree.children[0].task_spec.outputs] + for lst in sublist_outputs: + for item in lst: + output.append(item) + for key in tree.outgoing_sequence_flows.keys(): + link = tree.outgoing_sequence_flows[key] + output = follow_tree(link.target_task_spec, output, found, + level, workflow) + return output + + if isinstance(tree, MultiInstanceTask) and not tree.isSequential: + # When we have expanded the tree, we'll have multiple tasks, and + # need this special case. If the tree has not yet been expanded + # it should just run through logic lower on in this function, + if len(tree.inputs) > 1: + # we have expanded the tree: + outputs = tree.inputs[1].outputs + for task_spec in outputs: + last_spec = task_spec + linkkey = list(task_spec.outgoing_sequence_flows.keys())[0] + link = task_spec.outgoing_sequence_flows[linkkey] + conditional_task_add(output, task_spec, indent=level) + if task_spec.id not in found: + found.add(task_spec.id) + + if last_spec: + output = follow_tree(link.target_task_spec, output, found, + level, workflow) + return output + else: + # Don't treat this like a multi-instance yet, and continue. + pass + + # ------------------ + # Simple case - no branching + # ----------------- + + if len(outputs) == 1: + # there are no branching points here, so our job is simple + # add to the tree and descend into the tree some more + link = tree.outgoing_sequence_flows[outputs[0]] + if tree.id not in found: + conditional_task_add(output, tree, indent=level) + found.add(tree.id) + output = follow_tree(link.target_task_spec, output, found, + level, workflow) + else: + conditional_task_add(output, tree, indent=level, backtrack_to=tree.name) + return output + + if isinstance(tree, _BoundaryEventParent): + for task in outputs: + link = tree.outgoing_sequence_flows[task] + conditional_task_add(output, tree, indent=level) + if link.target_task_spec.id not in found: + found.add(link.target_task_spec.id) + output = follow_tree(link.target_task_spec, output, found, + level + 1, workflow) + return output + + # if we are here, then we assume that we have a gateway of some form, + # where more than one path will exist, we need to follow multiple paths + # and then sync those paths and realign. + task_children = [] + structured = not (isinstance(tree, UnstructuredJoin)) + for key in outputs: + f = copy.copy(found) + flow = tree.outgoing_sequence_flows[key] + my_children = [] + level_increment = 1 + if structured: level_increment = 2 + if flow.target_task_spec.id not in found: + my_children = follow_tree(flow.target_task_spec, my_children, f, + level + level_increment, workflow) + backtrack_link = None + else: + my_children = [] # This is backtracking, no need to follow it. + backtrack_link = flow.target_task_spec.name + + # Note that we append the NavWithChildren here, not just nav + item = NavItem.from_flow(flow=flow, lane=tree.lane, + backtrack_to=backtrack_link, indent=level + 1) + item.children = my_children + task_children.append(item) + + """ we know we have several decision points which may merge in the future. + The lists should be identical except for the levels. + essentially, we want to find the list of ID's that are in each of the + task_children's children and remove that from each list. + in addition, this should be the same length on each end because + of the sort above. now that we have our children lists, we can remove + the intersection of the group """ + if tree.id not in [x.spec_id for x in output]: + snip_lists = same_ending_length(task_children) + merge_list = snip_same_ending(task_children, snip_lists) + output.append(NavItem.from_spec(spec=tree, + backtrack_to=None, indent=level)) + for child in task_children: + # Add the flow nav item, but only if this is an exclsive gateway, + # and not an instructured join + if structured: + output.append(child) + for descendent in child.children: + output.append(descendent) + child.children = [] # Remove internal children, as the results + # of this should be flat. + + if len(merge_list) > 0: + # This bit gets the indentation right, we are merging back out + # to the current level, so the first child here should be moved + # out to current level, and any subsequent children should be + # reduced by that same difference. + indent_correction = merge_list[0].indent - level + for child in merge_list: + child.indent -= indent_correction + output.append(child) + + found.add(tree.id) + return output diff --git a/SpiffWorkflow/operators.py b/SpiffWorkflow/operators.py index b44e98c17..081304531 100644 --- a/SpiffWorkflow/operators.py +++ b/SpiffWorkflow/operators.py @@ -31,6 +31,15 @@ class Term(object): """ pass +class DotDict(dict): + """dot.notation access to dictionary attributes""" + def __getattr__(*args): + val = dict.get(*args) + return DotDict(val) if type(val) is dict else val + __setattr__ = dict.__setitem__ + __delattr__ = dict.__delitem__ + + class Attrib(Term): @@ -75,6 +84,7 @@ class PathAttrib(Term): def __init__(self, path): self.path = path + self.name = path def serialize(self, serializer): """ @@ -191,6 +201,13 @@ def valueof(scope, op, default=None): else: return op +def is_number(text): + try: + x = int(text) + except: + return False + return True + class Operator(Term): diff --git a/SpiffWorkflow/serializer/dict.py b/SpiffWorkflow/serializer/dict.py index 1babde4d5..92ef1c2d0 100644 --- a/SpiffWorkflow/serializer/dict.py +++ b/SpiffWorkflow/serializer/dict.py @@ -1,5 +1,7 @@ # -*- coding: utf-8 -*- from __future__ import division, absolute_import + +import codecs from builtins import str # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public @@ -18,6 +20,10 @@ import pickle from base64 import b64encode, b64decode from .. import Workflow +from ..bpmn.specs.BpmnSpecMixin import SequenceFlow +from ..dmn.engine.DMNEngine import DMNEngine +from ..dmn.specs.BusinessRuleTask import BusinessRuleTask +from ..dmn.specs.model import DecisionTable from ..util.impl import get_class from ..task import Task from ..operators import (Attrib, PathAttrib, Equal, NotEqual, @@ -26,10 +32,16 @@ ExclusiveChoice, Execute, Gate, Join, MultiChoice, MultiInstance, ReleaseMutex, Simple, WorkflowSpec, TaskSpec, SubWorkflow, StartTask, ThreadMerge, - ThreadSplit, ThreadStart, Merge, Trigger) + ThreadSplit, ThreadStart, Merge, Trigger, ) from .base import Serializer -from .exceptions import TaskNotSupportedError +from ..bpmn.specs.MultiInstanceTask import MultiInstanceTask +from ..camunda.specs.UserTask import UserTask +from ..bpmn.specs.ExclusiveGateway import ExclusiveGateway +from ..bpmn.specs.ScriptTask import ScriptTask +from .exceptions import TaskNotSupportedError, MissingSpecError import warnings +import copy + class DictionarySerializer(Serializer): @@ -119,7 +131,11 @@ def deserialize_arg(self, s_state): elif arg_type == 'value': return arg arg_cls = get_class(arg_type) - return arg_cls.deserialize(self, arg) + ret = arg_cls.deserialize(self, arg) + if isinstance(ret,list): + return arg_cls(*ret) + else: + return ret def serialize_task_spec(self, spec): s_state = dict(id=spec.id, @@ -130,9 +146,24 @@ def serialize_task_spec(self, spec): lookahead=spec.lookahead) module_name = spec.__class__.__module__ s_state['class'] = module_name + '.' + spec.__class__.__name__ - s_state['inputs'] = [t.name for t in spec.inputs] - s_state['outputs'] = [t.name for t in spec.outputs] + x = all([hasattr(t,'id') for t in spec.inputs]) + s_state['inputs'] = [t.id for t in spec.inputs] + s_state['outputs'] = [t.id for t in spec.outputs] s_state['data'] = self.serialize_dict(spec.data) + if hasattr(spec,'documentation'): + s_state['documentation'] = spec.documentation + if hasattr(spec,'extensions'): + s_state['extensions'] = self.serialize_dict(spec.extensions) + s_state['position'] = self.serialize_dict(spec.position) + if hasattr(spec,'lane'): + s_state['lane'] = spec.lane + + if hasattr(spec,'outgoing_sequence_flows'): + s_state['outgoing_sequence_flows'] = {x:spec.outgoing_sequence_flows[x].serialize() for x in + spec.outgoing_sequence_flows.keys()} + s_state['outgoing_sequence_flows_by_id'] = {x:spec.outgoing_sequence_flows_by_id[x].serialize() for x in + spec.outgoing_sequence_flows_by_id.keys()} + s_state['defines'] = self.serialize_dict(spec.defines) s_state['pre_assign'] = self.serialize_list(spec.pre_assign) s_state['post_assign'] = self.serialize_list(spec.post_assign) @@ -149,8 +180,18 @@ def deserialize_task_spec(self, wf_spec, s_state, spec): spec.manual = s_state.get('manual', False) spec.internal = s_state.get('internal', False) spec.lookahead = s_state.get('lookahead', 2) + # I would use the s_state.get('extensions',{}) inside of the deserialize + # but many tasks have no extensions on them. + if s_state.get('extensions',None) != None: + spec.extensions = self.deserialize_dict(s_state['extensions']) + if 'documentation' in s_state.keys(): + spec.documentation = s_state['documentation'] + spec.data = self.deserialize_dict(s_state.get('data', {})) + if 'lane' in s_state.keys(): + spec.lane = s_state.get('lane',None) spec.defines = self.deserialize_dict(s_state.get('defines', {})) + spec.position = self.deserialize_dict(s_state.get('position', {})) spec.pre_assign = self.deserialize_list(s_state.get('pre_assign', [])) spec.post_assign = self.deserialize_list( s_state.get('post_assign', [])) @@ -159,6 +200,10 @@ def deserialize_task_spec(self, wf_spec, s_state, spec): # deserialized yet. So keep the names, and resolve them in the end. spec.inputs = s_state.get('inputs', [])[:] spec.outputs = s_state.get('outputs', [])[:] + if s_state.get('outgoing_sequence_flows',None): + spec.outgoing_sequence_flows = s_state.get('outgoing_sequence_flows', {}) + spec.outgoing_sequence_flows_by_id = s_state.get('outgoing_sequence_flows_by_id', {}) + return spec def serialize_acquire_mutex(self, spec): @@ -233,6 +278,19 @@ def deserialize_choose(self, wf_spec, s_state): self.deserialize_task_spec(wf_spec, s_state, spec=spec) return spec + def serialize_exclusive_gateway(self, spec): + s_state = self.serialize_multi_choice(spec) + s_state['default_task_spec'] = spec.default_task_spec + return s_state + + def deserialize_exclusive_gateway(self, wf_spec, s_state): + spec = ExclusiveGateway(wf_spec, s_state['name']) + self.deserialize_multi_choice(wf_spec, s_state, spec=spec) + spec.default_task_spec = s_state['default_task_spec'] + return spec + + + def serialize_exclusive_choice(self, spec): s_state = self.serialize_multi_choice(spec) s_state['default_task_spec'] = spec.default_task_spec @@ -264,6 +322,130 @@ def deserialize_gate(self, wf_spec, s_state): self.deserialize_task_spec(wf_spec, s_state, spec=spec) return spec + def serialize_script_task(self, spec): + s_state = self.serialize_task_spec(spec) + s_state['script'] = spec.script + return s_state + + def deserialize_script_task(self, wf_spec, s_state): + spec = ScriptTask(wf_spec, s_state['name'], s_state['script']) + self.deserialize_task_spec(wf_spec, s_state, spec=spec) + return spec + + + def serialize_call_activity(self, spec): + s_state = self.serialize_task_spec(spec) + s_state['wf_class'] = spec.wf_class.__module__ + "." + spec.wf_class.__name__ + s_state['spec'] = self.serialize_workflow_spec(spec.spec) + return s_state + + def deserialize_call_activity(self, wf_spec, s_state, cls): + spec = cls(wf_spec, s_state['name']) + spec.wf_class = get_class(s_state['wf_class']) + spec.spec = self.deserialize_workflow_spec(s_state['spec']) + self.deserialize_task_spec(wf_spec, s_state, spec=spec) + return spec + + + def serialize_generic_event(self, spec): + s_state = self.serialize_task_spec(spec) + if spec.event_definition: + s_state['event_definition'] = spec.event_definition.serialize() + else: + s_state['event_definition'] = None + return s_state + + def deserialize_generic_event(self, wf_spec, s_state, cls): + if s_state.get('event_definition',None): + evtcls = get_class(s_state['event_definition']['classname']) + event = evtcls.deserialize(s_state['event_definition']) + else: + event = None + spec = cls(wf_spec, s_state['name'], event) + self.deserialize_task_spec(wf_spec, s_state, spec=spec) + return spec + + + def serialize_boundary_event_parent(self, spec): + s_state = self.serialize_task_spec(spec) + s_state['main_child_task_spec'] = spec.main_child_task_spec.id + return s_state + + def deserialize_boundary_event_parent(self, wf_spec, s_state, cls): + + main_child_task_spec = wf_spec.get_task_spec_from_id(s_state['main_child_task_spec']) + spec = cls(wf_spec, s_state['name'], main_child_task_spec) + self.deserialize_task_spec(wf_spec, s_state, spec=spec) + return spec + + + + def serialize_boundary_event(self, spec): + s_state = self.serialize_task_spec(spec) + if spec._cancel_activity: + s_state['cancel_activity'] = spec._cancel_activity + else: + s_state['cancel_activity'] = None + if spec.event_definition: + s_state['event_definition'] = spec.event_definition.serialize() + else: + s_state['event_definition'] = None + return s_state + + def deserialize_boundary_event(self, wf_spec, s_state, cls): + cancel_activity = s_state.get('cancel_activity',None) + if s_state['event_definition']: + eventclass = get_class(s_state['event_definition']['classname']) + event = eventclass.deserialize(s_state['event_definition']) + else: + event = None + spec = cls(wf_spec, s_state['name'], cancel_activity=cancel_activity,event_definition=event) + self.deserialize_task_spec(wf_spec, s_state, spec=spec) + return spec + + + + def serialize_end_event(self, spec): + s_state = self.serialize_task_spec(spec) + s_state['is_terminate_event'] = spec.is_terminate_event + return s_state + + def deserialize_end_event(self, wf_spec, s_state, cls): + terminateEvent = s_state.get('is_terminate_event',None) + spec = cls(wf_spec, s_state['name'],terminateEvent) + self.deserialize_task_spec(wf_spec, s_state, spec=spec) + return spec + + + def serialize_user_task(self, spec): + s_state = self.serialize_task_spec(spec) + s_state['form'] = spec.form + return s_state + + def deserialize_user_task(self, wf_spec, s_state): + spec = UserTask(wf_spec, s_state['name'], s_state['form']) + self.deserialize_task_spec(wf_spec, s_state, spec=spec) + return spec + + + def serialize_business_rule_task(self, spec): + s_state = self.serialize_task_spec(spec) + dictrep = spec.dmnEngine.decisionTable.serialize() + # future + s_state['dmn'] = dictrep + return s_state + + def deserialize_business_rule_task(self, wf_spec, s_state): + dt = DecisionTable(None,None) + dt.deserialize(s_state['dmn']) + dmnEngine = DMNEngine(dt) + spec = BusinessRuleTask(wf_spec, s_state['name'], dmnEngine) + self.deserialize_task_spec(wf_spec, s_state, spec=spec) + return spec + + + + def serialize_join(self, spec): s_state = self.serialize_task_spec(spec) s_state['split_task'] = spec.split_task @@ -272,11 +454,15 @@ def serialize_join(self, spec): s_state['cancel_remaining'] = spec.cancel_remaining return s_state - def deserialize_join(self, wf_spec, s_state): - spec = Join(wf_spec, + def deserialize_join(self, wf_spec, s_state, cls=Join): + if isinstance(s_state['threshold'],dict): + byte_payload = s_state['threshold']['__bytes__'] + else: + byte_payload = s_state['threshold'] + spec = cls(wf_spec, s_state['name'], split_task=s_state['split_task'], - threshold=pickle.loads(b64decode(s_state['threshold'])), + threshold=pickle.loads(b64decode(byte_payload)), cancel=s_state['cancel_remaining']) self.deserialize_task_spec(wf_spec, s_state, spec=spec) return spec @@ -308,15 +494,51 @@ def deserialize_multi_choice(self, wf_spec, s_state, spec=None): def serialize_multi_instance(self, spec): s_state = self.serialize_task_spec(spec) + # here we need to add in all of the things that would get serialized + # for other classes that the MultiInstance could be - + # + if hasattr(spec,'form'): + s_state['form'] = spec.form + + if isinstance(spec,MultiInstanceTask): + s_state['collection'] = self.serialize_arg(spec.collection) + s_state['elementVar'] = self.serialize_arg(spec.elementVar) + s_state['isSequential'] = self.serialize_arg(spec.isSequential) + s_state['loopTask'] = self.serialize_arg(spec.loopTask) + if (hasattr(spec,'expanded')): + s_state['expanded'] = self.serialize_arg(spec.expanded) + if isinstance(spec,BusinessRuleTask): + brState = self.serialize_business_rule_task(spec) + s_state['dmn'] = brState['dmn'] s_state['times'] = self.serialize_arg(spec.times) + s_state['prevtaskclass'] = spec.prevtaskclass return s_state - def deserialize_multi_instance(self, wf_spec, s_state): - spec = MultiInstance(wf_spec, - s_state['name'], - times=self.deserialize_arg(s_state['times'])) - self.deserialize_task_spec(wf_spec, s_state, spec=spec) - return spec + def deserialize_multi_instance(self, wf_spec, s_state, cls=None): + if cls == None: + cls = MultiInstance(wf_spec, + s_state['name'], + times=self.deserialize_arg(s_state['times'])) + if isinstance(s_state['times'],list): + s_state['times'] = self.deserialize_arg(s_state['times']) + cls.times = s_state['times'] + if isinstance(cls,MultiInstanceTask): + cls.isSequential = self.deserialize_arg(s_state['isSequential']) + cls.loopTask = self.deserialize_arg(s_state['loopTask']) + cls.elementVar = self.deserialize_arg(s_state['elementVar']) + cls.collection = self.deserialize_arg(s_state['collection']) + if s_state.get('expanded',None): + cls.expanded = self.deserialize_arg(s_state['expanded']) + if isinstance(cls,BusinessRuleTask): + dt = DecisionTable(None,None) + dt.deserialize(s_state['dmn']) + dmnEngine = DMNEngine(dt) + cls.dmnEngine=dmnEngine + if s_state.get('form',None): + cls.form = s_state['form'] + + self.deserialize_task_spec(wf_spec, s_state, spec=cls) + return cls def serialize_release_mutex(self, spec): s_state = self.serialize_task_spec(spec) @@ -338,6 +560,13 @@ def deserialize_simple(self, wf_spec, s_state): self.deserialize_task_spec(wf_spec, s_state, spec=spec) return spec + + def deserialize_generic(self, wf_spec, s_state,newclass): + assert isinstance(wf_spec, WorkflowSpec) + spec = newclass(wf_spec, s_state['name']) + self.deserialize_task_spec(wf_spec, s_state, spec=spec) + return spec + def serialize_start_task(self, spec): return self.serialize_task_spec(spec) @@ -422,8 +651,21 @@ def serialize_workflow_spec(self, spec, **kwargs): s_state = dict(name=spec.name, description=spec.description, file=spec.file) - s_state['task_specs'] = dict((k, v.serialize(self)) - for k, v in list(spec.task_specs.items())) + + if 'Root' not in spec.task_specs: + # This is to fix up the case when we + # load in a task spec and there is no root object. + # it causes problems when we deserialize and then re-serialize + # because the deserialize process adds a root. + root = Simple(spec, 'Root') + spec.task_specs['Root'] = root + + mylista = [v for k, v in list(spec.task_specs.items())] + mylist = [(k, v.serialize(self)) + for k, v in list(spec.task_specs.items())] + if hasattr(spec,'end'): + s_state['end']=spec.end.id + s_state['task_specs'] = dict(mylist) return s_state def deserialize_workflow_spec(self, s_state, **kwargs): @@ -437,26 +679,51 @@ def deserialize_workflow_spec(self, s_state, **kwargs): self, spec, start_task_spec_state) spec.start = start_task_spec spec.task_specs['Start'] = start_task_spec - for name, task_spec_state in list(s_state['task_specs'].items()): if name == 'Start': continue - task_spec_cls = get_class(task_spec_state['class']) + prevtask = task_spec_state.get('prevtaskclass',None) + if prevtask: + oldtask = get_class(prevtask) + task_spec_cls = type(task_spec_state['class'], ( + MultiInstanceTask,oldtask ), {}) + else: + task_spec_cls = get_class(task_spec_state['class']) task_spec = task_spec_cls.deserialize(self, spec, task_spec_state) spec.task_specs[name] = task_spec + for name, task_spec in list(spec.task_specs.items()): - task_spec.inputs = [spec.get_task_spec_from_name(t) + if hasattr(task_spec,'outgoing_sequence_flows'): + for entry,value in task_spec.outgoing_sequence_flows.items(): + task_spec.outgoing_sequence_flows[entry] = \ + SequenceFlow(value['id'], + value['name'], + value['documentation'], + spec.get_task_spec_from_id(value['target_task_spec'])) + for entry, value in task_spec.outgoing_sequence_flows_by_id.items(): + task_spec.outgoing_sequence_flows_by_id[entry] = \ + SequenceFlow(value['id'], + value['name'], + value['documentation'], + spec.get_task_spec_from_id(value['target_task_spec'])) + + task_spec.inputs = [spec.get_task_spec_from_id(t) for t in task_spec.inputs] - task_spec.outputs = [spec.get_task_spec_from_name(t) + task_spec.outputs = [spec.get_task_spec_from_id(t) for t in task_spec.outputs] + if s_state.get('end', None): + spec.end = spec.get_task_spec_from_id(s_state['end']) + assert spec.start is spec.get_task_spec_from_name('Start') return spec - def serialize_workflow(self, workflow, **kwargs): + def serialize_workflow(self, workflow, include_spec=True, **kwargs): + assert isinstance(workflow, Workflow) s_state = dict() - s_state['wf_spec'] = self.serialize_workflow_spec(workflow.spec, - **kwargs) + if include_spec: + s_state['wf_spec'] = self.serialize_workflow_spec(workflow.spec, + **kwargs) # data s_state['data'] = self.serialize_dict(workflow.data) @@ -476,9 +743,17 @@ def serialize_workflow(self, workflow, **kwargs): return s_state - def deserialize_workflow(self, s_state, **kwargs): - wf_spec = self.deserialize_workflow_spec(s_state['wf_spec'], **kwargs) - workflow = Workflow(wf_spec) + def deserialize_workflow(self, s_state, wf_class=Workflow, + read_only=False, wf_spec=None, **kwargs): + """It is possible to override the workflow class, and specify a + workflow_spec, otherwise the spec is assumed to be serialized in the + s_state['wf_spec']""" + + if wf_spec is None: + wf_spec = self.deserialize_workflow_spec(s_state['wf_spec'], **kwargs) + workflow = wf_class(wf_spec) + + workflow.read_only = read_only # data workflow.data = self.deserialize_dict(s_state['data']) @@ -498,18 +773,25 @@ def deserialize_workflow(self, s_state, **kwargs): workflow, s_state['task_tree']) # Re-connect parents - for task in workflow.get_tasks(): - task.parent = workflow.get_task(task.parent) + tasklist = list(workflow.get_tasks()) + for task in tasklist: + task.parent = workflow.get_task(task.parent,tasklist) # last_task - workflow.last_task = workflow.get_task(s_state['last_task']) + workflow.last_task = workflow.get_task(s_state['last_task'],tasklist) return workflow - def serialize_task(self, task, skip_children=False): + + def serialize_task(self, task, skip_children=False, allow_subs=False): + """ + :param allow_subs: Allows sub-serialization to take place, otherwise + assumes that the subworkflow is stored in internal data and raises an error. + """ + assert isinstance(task, Task) - if isinstance(task.task_spec, SubWorkflow): + if not allow_subs and isinstance(task.task_spec, SubWorkflow): raise TaskNotSupportedError( "Subworkflow tasks cannot be serialized (due to their use of" + " internal_data to store the subworkflow).") @@ -548,12 +830,109 @@ def serialize_task(self, task, skip_children=False): return s_state + # comments for the if statement below: + # -------------------------------------- + # if s_state['internal_data'].get('runtimes',None) is not None \ # <-- if we are in a multiinstance + # and s_state['internal_data']['runtimes'] > 1 and \ # <-- and the cardinality has gone above one, + # \ # presumably because we are using a collection + # len(task_spec.inputs) > 1 and \ #<- and we have previously patched up the task spec tree + # # which should happen as soon as we load the original task spec + # len(task_spec.inputs[1].outputs) < s_state['internal_data']['runtimes'] : # <-- and we haven't expanded + # # the tree yet (i.e. we are + # # re-loading the task spec) + + + def deserialize_task(self, workflow, s_state): assert isinstance(workflow, Workflow) - # task_spec - task_spec = workflow.get_task_spec_from_name(s_state['task_spec']) + splits = s_state['task_spec'].split('_') + oldtaskname = s_state['task_spec'] + task_spec = workflow.get_task_spec_from_name(oldtaskname) + # if task_spec is None and \ + # splits[-1].isdigit(): + # num = int(splits[-1]) + # if num == 0: + # newtaskname = '_'.join(splits[:-1]) + # else: + # newtaskname = '_'.join(splits[:-1]) + "_%d"%(num-1) + # # in order to patch things up correctly, we need to append in the correct order + # # I'm really hoping that things get added to save list in saved order + # # otherwise I'll have some problems. + # task_spec = workflow.get_task_spec_from_name(newtaskname) + # new_task_spec = copy.copy(task_spec) + # new_task_spec.name = oldtaskname + # if isinstance(new_task_spec.id,int): + # new_task_spec.id = "%d_%d"%(new_task_spec.id,num) + # else: + # new_task_spec.id = '_'.join(new_task_spec.id.split('_')[:-1])+"_%d"%num + # workflow.spec.task_specs[oldtaskname] = new_task_spec + # inter_out = task_spec.outputs + # task_spec.outputs=[new_task_spec] + # new_task_spec.outputs = inter_out + # for item in inter_out: + # item.inputs = [new_task_spec] + # task_spec = new_task_spec + # if s_state['internal_data'].get('runtimes', None) is not None \ + # and s_state['internal_data']['runtimes'] > 1 and \ + # len(task_spec.inputs) > 1 and \ + # len(task_spec.inputs[1].outputs) < s_state['internal_data']['runtimes']: + # task_spec = copy.copy(task_spec) + # task_spec.id = str(task_spec.id) + '_%d'%(s_state['internal_data']['runtimes']-1) + # #FIXME: we should only have 1 input, not 2 + # task_spec.inputs[1].outputs.append(task_spec) + # task_spec.outputs[0].inputs.append(task_spec) + # if task_spec is None and \ + # splits[0] == 'Gateway' and \ + # splits[-1] == 'start': + # # if we are here, then we have a task tree that has an expanded PMI - but the task + # # spec tree that we are importing is not expanded - we are dealing with a lot of other + # # parallel multi-instance stuff here, so I'm going to try to do this as well. + # newtaskname = '_'.join(splits[2:-1]) + # endtaskname = '_'.join(splits[:-1])+"_end" + # task_spec = workflow.get_task_spec_from_name(newtaskname) + # #-------------------------------------------------------------- + # # it may be best to try and use the code from MultiInstanceTask._add_gateway + # # instead of re-create it here, the problem is that + # # it requires a task to be created for it and I don't actually have the + # # task yet, because I've got a chicken/egg problem - the code below outlines + # # what I need to do but it doesn't accurately represent the way the + # # PMI instances are getting set up. + # #-------------------------------------------------------------- + # # create the beginning gateway + # task_spec._wf_spec + # newtaskspec = ParallelGateway(task_spec._wf_spec, + # oldtaskname, + # triggered=False, + # description="Begin Gateway") + # + # endtaskspec = ParallelGateway(task_spec._wf_spec, endtaskname, + # triggered=False, description="End Gateway") + # + # + # # patch middle task into begin gateway + # newtaskspec.outputs = [task_spec] + # task_spec.inputs.append(newtaskspec) + # # patch middle into end gateway + # endtaskspec.inputs = [task_spec] + # endtaskspec.outputs = copy.copy(task_spec.outputs) + # task_spec.outputs = [endtaskspec] + # # inform registry about new task specs + # workflow.spec.task_specs[oldtaskname] = newtaskspec + # workflow.spec.task_specs[endtaskname] = endtaskspec + # + # task_spec = newtaskspec # change to our beginning gateway + # + + + if task_spec is None: + raise MissingSpecError("Unknown task spec: " + oldtaskname) task = Task(workflow, task_spec) + if getattr(task_spec,'isSequential',False) and \ + s_state['internal_data'].get('splits') is not None: + task.task_spec.expanded = s_state['internal_data']['splits'] + + # id task.id = s_state['id'] @@ -563,8 +942,7 @@ def deserialize_task(self, workflow, s_state): task.parent = s_state['parent'] # children - task.children = [self.deserialize_task(workflow, c) - for c in s_state['children']] + task.children = self._deserialize_task_children(task, s_state) # state task._state = s_state['state'] @@ -578,5 +956,10 @@ def deserialize_task(self, workflow, s_state): # internal_data task.internal_data = s_state['internal_data'] - return task + + def _deserialize_task_children(self, task, s_state): + """This may need to be overridden if you need to support + deserialization of sub-workflows""" + return [self.deserialize_task(task.workflow, c) + for c in s_state['children']] diff --git a/SpiffWorkflow/serializer/exceptions.py b/SpiffWorkflow/serializer/exceptions.py index babc69600..73d9e4b4b 100644 --- a/SpiffWorkflow/serializer/exceptions.py +++ b/SpiffWorkflow/serializer/exceptions.py @@ -4,3 +4,7 @@ class TaskSpecNotSupportedError(ValueError): class TaskNotSupportedError(ValueError): pass + + +class MissingSpecError(ValueError): + pass diff --git a/SpiffWorkflow/serializer/json.py b/SpiffWorkflow/serializer/json.py index 69cad7c8d..0468a51e4 100644 --- a/SpiffWorkflow/serializer/json.py +++ b/SpiffWorkflow/serializer/json.py @@ -18,7 +18,8 @@ import uuid from .dict import DictionarySerializer from ..operators import Attrib - +from SpiffWorkflow.camunda.specs.UserTask import Form +from ..util.impl import get_class def object_hook(dct): if '__uuid__' in dct: @@ -30,9 +31,24 @@ def object_hook(dct): if '__attrib__' in dct: return Attrib(dct['__attrib__']) + if '__form__' in dct: + return Form(init=json.loads(dct['__form__'])) + + return dct +def JsonableHandler(Obj): + if hasattr(Obj, 'jsonable'): + return Obj.jsonable() + else: + raise 'Object of type %s with value of %s is not JSON serializable' % ( + type(Obj), repr(Obj)) + + + + + def default(obj): if isinstance(obj, uuid.UUID): return {'__uuid__': obj.hex} @@ -43,6 +59,9 @@ def default(obj): if isinstance(obj, Attrib): return {'__attrib__': obj.name} + if isinstance(obj,Form): + return {'__form__': json.dumps(obj, default=JsonableHandler)} + raise TypeError('%r is not JSON serializable' % obj) diff --git a/SpiffWorkflow/serializer/xml.py b/SpiffWorkflow/serializer/xml.py index bc0c3f1d3..9d44b345a 100644 --- a/SpiffWorkflow/serializer/xml.py +++ b/SpiffWorkflow/serializer/xml.py @@ -522,8 +522,11 @@ def serialize_multi_instance(self, spec): self.serialize_value(SubElement(elem, 'times'), spec.times) return self.serialize_task_spec(spec, elem) - def deserialize_multi_instance(self, wf_spec, elem, cls=MultiInstance, + def deserialize_multi_instance(self, wf_spec, elem, cls=None, **kwargs): + if cls == None: + cls = MultiInstance + #cls = MultiInstance(wf_spec,elem.find('name'),elem.find('times')) times = self.deserialize_value(elem.find('times')) return self.deserialize_task_spec(wf_spec, elem, cls, times=times, **kwargs) diff --git a/SpiffWorkflow/specs/Join.py b/SpiffWorkflow/specs/Join.py index a0204b976..49592b441 100644 --- a/SpiffWorkflow/specs/Join.py +++ b/SpiffWorkflow/specs/Join.py @@ -281,6 +281,8 @@ def _do_join(self, my_task): task.state = Task.COMPLETED task._drop_children() + + def _on_trigger(self, my_task): """ May be called to fire the Join before the incoming branches are diff --git a/SpiffWorkflow/specs/MultiInstance.py b/SpiffWorkflow/specs/MultiInstance.py index 22131b287..486eaa516 100644 --- a/SpiffWorkflow/specs/MultiInstance.py +++ b/SpiffWorkflow/specs/MultiInstance.py @@ -51,6 +51,7 @@ def __init__(self, wf_spec, name, times, **kwargs): raise ValueError('times argument is required') TaskSpec.__init__(self, wf_spec, name, **kwargs) self.times = times + self.prevtaskclass = None def _find_my_task(self, task): for thetask in task.workflow.task_tree: diff --git a/SpiffWorkflow/specs/SubWorkflow.py b/SpiffWorkflow/specs/SubWorkflow.py index bf9997b81..f0bc1ecd6 100644 --- a/SpiffWorkflow/specs/SubWorkflow.py +++ b/SpiffWorkflow/specs/SubWorkflow.py @@ -100,7 +100,10 @@ def _on_ready_before_hook(self, my_task): subworkflow = self._create_subworkflow(my_task) subworkflow.completed_event.connect( self._on_subworkflow_completed, my_task) + self._integrate_subworkflow_tree(my_task, subworkflow) + my_task._set_internal_data(subworkflow=subworkflow) + def _integrate_subworkflow_tree(self, my_task, subworkflow): # Integrate the tree of the subworkflow into the tree of this workflow. my_task._sync_children(self.outputs, Task.FUTURE) for child in my_task.children: @@ -110,8 +113,6 @@ def _on_ready_before_hook(self, my_task): my_task.children.insert(0, child) child.parent = my_task - my_task._set_internal_data(subworkflow=subworkflow) - def _on_ready_hook(self, my_task): # Assign variables, if so requested. subworkflow = my_task._get_internal_data('subworkflow') @@ -126,6 +127,8 @@ def _on_ready_hook(self, my_task): def _on_subworkflow_completed(self, subworkflow, my_task): # Assign variables, if so requested. for child in my_task.children: + if subworkflow.last_task is not None: + child.data.update(subworkflow.last_task.data) if child.task_spec in self.outputs: for assignment in self.out_assign: assignment.assign(subworkflow, child) diff --git a/SpiffWorkflow/specs/WorkflowSpec.py b/SpiffWorkflow/specs/WorkflowSpec.py index 343a81c13..cc84fd4ed 100644 --- a/SpiffWorkflow/specs/WorkflowSpec.py +++ b/SpiffWorkflow/specs/WorkflowSpec.py @@ -62,7 +62,23 @@ def get_task_spec_from_name(self, name): :rtype: TaskSpec :returns: The task spec with the given name. """ - return self.task_specs[name] + return self.task_specs.get(name) + + def get_task_spec_from_id(self, id): + """ + Returns the task with the given name. + + :type name: str + :param name: The name of the task spec. + :rtype: TaskSpec + :returns: The task spec with the given name. + """ + ret_spec = None + for x in self.task_specs: + if self.task_specs[x].id == id: + ret_spec = self.task_specs[x] + return ret_spec + def validate(self): """Checks integrity of workflow and reports any problems with it. @@ -133,6 +149,7 @@ def deserialize(cls, serializer, s_state, **kwargs): :rtype: WorkflowSpec :returns: The resulting instance. """ + return serializer.deserialize_workflow_spec(s_state, **kwargs) def get_dump(self, verbose=False): diff --git a/SpiffWorkflow/specs/base.py b/SpiffWorkflow/specs/base.py index e001b01b5..5801c49b4 100644 --- a/SpiffWorkflow/specs/base.py +++ b/SpiffWorkflow/specs/base.py @@ -56,7 +56,7 @@ class TaskSpec(object): (cancelled may happen at any time) The only events where implementing something other than state tracking - may be useful are the following: + may be useful are the following:_ - Reached: You could mess with the pre-assign variables here, for example. Other then that, there is probably no need in a real application. @@ -95,6 +95,10 @@ def __init__(self, wf_spec, name, **kwargs): :param pre_assign: a list of name/value pairs :type post_assign: list((str, object)) :param post_assign: a list of name/value pairs + :type position: dict((str, object)) + :param position: a dict containing an 'x' and 'y' with coordinates + that describe where the element occured in the + diagram. """ assert wf_spec is not None assert name is not None @@ -111,6 +115,7 @@ def __init__(self, wf_spec, name, **kwargs): self.pre_assign = kwargs.get('pre_assign', []) self.post_assign = kwargs.get('post_assign', []) self.locks = kwargs.get('lock', []) + self.position = kwargs.get('position', {'x': 0, 'y': 0}) self.lookahead = 2 # Maximum number of MAYBE predictions. # Events. @@ -438,6 +443,26 @@ def serialize(self, serializer, **kwargs): :rtype: object :returns: The serialized object. """ + module = self.__class__.__module__ + class_name = module + '.' + self.__class__.__name__ + + return { + 'id':self.id, + 'class': class_name, + 'name':self.name, + 'description':self.description, + 'inputs':[x.id for x in self.inputs], + 'outputs':[x.id for x in self.outputs], + 'manual':self.manual, + 'internal':self.internal, + 'data':self.data, + 'defines':self.defines, + 'pre_assign':self.pre_assign, + 'post_assign':self.post_assign, + 'locks':self.locks, + 'position':self.position, + 'lookahead':self.lookahead, + } raise NotImplementedError @classmethod @@ -462,4 +487,22 @@ def deserialize(cls, serializer, wf_spec, s_state, **kwargs): :rtype: TaskSpec :returns: The task specification instance. """ - raise NotImplementedError + print(s_state) + print(wf_spec) + out = cls(wf_spec,s_state.get('name')) + out.id = s_state.get('id') + out.name = s_state.get('name') + out.description = s_state.get('description') + out.inputs = s_state.get('inputs') + out.outputs = s_state.get('outputs') + out.manual = s_state.get('manual') + out.internal = s_state.get('internal') + out.data = s_state.get('data') + out.defines = s_state.get('defines') + out.pre_assign = s_state.get('pre_assign') + out.post_assign = s_state.get('post_assign') + out.locks = s_state.get('locks') + out.position = s_state.get('position') + out.lookahead = s_state.get('lookahead') + return out + #raise NotImplementedError diff --git a/SpiffWorkflow/task.py b/SpiffWorkflow/task.py index 27e28b9a5..5b7463cf1 100644 --- a/SpiffWorkflow/task.py +++ b/SpiffWorkflow/task.py @@ -20,16 +20,29 @@ # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301 USA +from SpiffWorkflow.exceptions import WorkflowException import logging import time from uuid import uuid4 -from .exceptions import WorkflowException +import random + +from .util.deep_merge import DeepMerge LOG = logging.getLogger(__name__) +def updateDotDict(dict,id,value): + x = id.split('.') + print(x) + if len(x) == 1: + dict[x[0]]=value + elif dict.get(x[0]): + dict[x[0]][x[1]] = value + else: + dict[x[0]] = {x[1]:value} + -class Task(object): +class Task(object): """ Used internally for composing a tree that represents the path that is taken (or predicted) within the workflow. @@ -95,16 +108,18 @@ class Task(object): NOT_FINISHED_MASK = PREDICTED_MASK | WAITING | READY ANY_MASK = FINISHED_MASK | NOT_FINISHED_MASK - state_names = {FUTURE: 'FUTURE', - WAITING: 'WAITING', - READY: 'READY', + state_names = {FUTURE: 'FUTURE', + WAITING: 'WAITING', + READY: 'READY', CANCELLED: 'CANCELLED', COMPLETED: 'COMPLETED', - LIKELY: 'LIKELY', - MAYBE: 'MAYBE'} + LIKELY: 'LIKELY', + MAYBE: 'MAYBE'} class Iterator(object): + MAX_ITERATIONS = 10000 + """ This is a tree iterator that supports filtering such that a client may walk through all tasks that have a specific state. @@ -116,20 +131,31 @@ def __init__(self, current, filter=None): """ self.filter = filter self.path = [current] + self.count = 1 + def __iter__(self): return self def _next(self): + # Make sure that the end is not yet reached. if len(self.path) == 0: raise StopIteration() + current = self.path[-1] + + # Assure we don't recurse forever. + self.count += 1 + if self.count > self.MAX_ITERATIONS: + raise WorkflowException(current, + "Task Iterator entered infinite recursion loop" ) + + # If the current task has children, the first child is the next # item. If the current task is LIKELY, and predicted tasks are not # specificly searched, we can ignore the children, because # predicted tasks should only have predicted children. - current = self.path[-1] ignore_task = False if self.filter is not None: search_predicted = self.filter & Task.LIKELY != 0 @@ -187,11 +213,13 @@ def __init__(self, workflow, task_spec, parent=None, state=MAYBE): self.state_history = [state] self.log = [] self.task_spec = task_spec - self.id = uuid4() + self.id = uuid4() #UUID(int=random.getrandbits(128),version=4) self.thread_id = self.__class__.thread_id_pool self.last_state_change = time.time() self.data = {} + self.terminate_current_loop = False self.internal_data = {} + self.mi_collect_data = {} if parent is not None: self.parent._child_added_notify(self) @@ -201,6 +229,117 @@ def __repr__(self): self.get_state_name(), hex(id(self))) + def update_data_var(self, fieldid, value): + model = {} + updateDotDict(model,fieldid, value) + self.update_data(model) + + def update_data(self, data): + """ + If the task.data needs to be updated from a UserTask form or + a Script task then use this function rather than updating task.data + directly. It will handle deeper merges of data, + and MultiInstance tasks will be updated correctly. + """ + self.data = DeepMerge.merge(self.data, data) + + def task_info(self): + """ + Returns a dictionary of information about the current task, so that + we can give hints to the user about what kind of task we are working + with such as a looping task or a Parallel MultiInstance task + :returns: dictionary + """ + default = {'is_looping': False, + 'is_sequential_mi': False, + 'is_parallel_mi': False, + 'mi_count': 0, + 'mi_index': 0} + + miInfo = getattr(self.task_spec, "multiinstance_info", None) + if callable(miInfo): + return miInfo(self) + else: + return default + + def terminate_loop(self): + """ + Used in the case that we are working with a BPMN 'loop' task. + The task will loop, repeatedly asking for input until terminate_loop + is called on the task + """ + + def raiseError(): + raise WorkflowException(self.task_spec, + 'The method terminate_loop should only be called in the case of a BPMN Loop Task') + + islooping = getattr(self.task_spec, "is_loop_task", None) + if callable(islooping): + if not (self.task_spec.is_loop_task()): + raiseError() + else: + raiseError() + self.terminate_current_loop = True + + def set_children_future(self): + """ + for a parallel gateway, we need to set up our + children so that the gateway figures out that it needs to join up + the inputs - otherwise our child process never gets marked as + 'READY' + """ + from .bpmn.specs.UnstructuredJoin import UnstructuredJoin + + if (self.state != self.COMPLETED and self.state != self.READY) and \ + not (isinstance(self.task_spec,UnstructuredJoin)): + return + + if isinstance(self.task_spec, UnstructuredJoin): + # go find all of the gateways with the same name as this one, + # drop children and set state to WAITING + for t in list(self.workflow.task_tree): + if t.task_spec.name == self.task_spec.name and \ + t.state == self.COMPLETED: + t._set_state(self.WAITING) + # now we set this one to execute + + self._set_state(self.MAYBE) + self._sync_children(self.task_spec.outputs) + for child in self.children: + child.set_children_future() + + + def find_children_by_name(self,name): + """ + for debugging + """ + return [x for x in self.workflow.task_tree if x.task_spec.name == name] + + def reset_token(self, reset_data=False): + """ + Resets the token to this task. This should allow a trip 'back in time' + as it were to items that have already been completed. + :type reset_data: bool + :param reset_data: Do we want to have the data be where we left of in + this task or not + """ + from .bpmn.specs.CallActivity import CallActivity + taskinfo = self.task_info() + if not reset_data: + self.data = self.workflow.last_task.data + if taskinfo['is_looping'] or taskinfo['is_sequential_mi']: + # if looping or sequential, we want to start from the beginning + self.internal_data['runtimes'] = 1 + for child in self.children: + if isinstance(child.task_spec,CallActivity): + self.children = [] # if we have a call activity, + # force reset of children. + self.workflow.last_task = self.parent + self.set_children_future() # this method actually fixes the problem + self._set_state(self.READY) + self.task_spec._predict(self) + self._sync_children(self.task_spec.outputs) + def _getstate(self): return self._state @@ -267,10 +406,10 @@ def _child_added_notify(self, child): assert child is not None self.children.append(child) - def _drop_children(self): + def _drop_children(self, force=False): drop = [] for child in self.children: - if not child._is_finished(): + if force or (not child._is_finished()): drop.append(child) else: child._drop_children() @@ -282,8 +421,10 @@ def _set_state(self, state, force=True): Setting force to True allows for changing a state after it COMPLETED. This would otherwise be invalid. """ + orig_state = self.state self._setstate(state, True) - self.last_state_change = time.time() + if state != orig_state: + self.last_state_change = time.time() def _has_state(self, state): """ diff --git a/SpiffWorkflow/util/deep_merge.py b/SpiffWorkflow/util/deep_merge.py new file mode 100644 index 000000000..f7464d143 --- /dev/null +++ b/SpiffWorkflow/util/deep_merge.py @@ -0,0 +1,41 @@ + +class DeepMerge(object): + # Merges two deeply nested json-like dictionaries, + # useful for updating things like task data. + # I know in my heart, that this isn't completely correct. + # But I don't want to create a dependency, and this is passing + # all the failure points I've found so far. So I'll just + # keep plugging away at ti. + + @staticmethod + def merge(a, b, path=None, update=True): + "merges b into a" + if path is None: path = [] + for key in b: + if key in a: + if a[key] == b[key]: + continue + elif isinstance(a[key], dict) and isinstance(b[key], dict): + DeepMerge.merge(a[key], b[key], path + [str(key)]) + elif isinstance(a[key], list) and isinstance(b[key], list): + DeepMerge.merge_array(a[key], b[key], path + [str(key)]) + else: + a[key] = b[key] # Just overwrite the value in a. + else: + a[key] = b[key] + return a + + @staticmethod + def merge_array(a, b, path=None): + + for idx, val in enumerate(b): + if isinstance(b[idx], dict): # Recurse back on dictionaries. + # If lists of dictionaries get out of order, this might + # cause us some pain. + if len(a) > idx: + a[idx] = DeepMerge.merge(a[idx], b[idx], path + [str(idx)]) + else: + a.append(b[idx]) + else: # Just merge whatever it is back in. + a.extend(x for x in b if x not in a) + diff --git a/SpiffWorkflow/workflow.py b/SpiffWorkflow/workflow.py index cac799192..a168e4a96 100644 --- a/SpiffWorkflow/workflow.py +++ b/SpiffWorkflow/workflow.py @@ -1,6 +1,8 @@ # -*- coding: utf-8 -*- from __future__ import division, absolute_import from __future__ import print_function + +import copy from builtins import next from builtins import object # Copyright (C) 2007 Samuel Abels @@ -25,10 +27,11 @@ from .util.compat import mutex from .util.event import Event from .exceptions import WorkflowException - +from .bpmn.specs.BoundaryEvent import _BoundaryEventParent LOG = logging.getLogger(__name__) + class Workflow(object): """ @@ -89,7 +92,7 @@ def is_completed(self): mask = Task.NOT_FINISHED_MASK iter = Task.Iterator(self.task_tree, mask) try: - next(iter) + nexttask = next(iter) except StopIteration: # No waiting tasks found. return True @@ -158,19 +161,23 @@ def get_task_spec_from_name(self, name): """ return self.spec.get_task_spec_from_name(name) - def get_task(self, id): + def get_task(self, id,tasklist=None): """ Returns the task with the given id. :type id:integer :param id: The id of a task. + :param tasklist: Optional cache of get_tasks for operations + where we are calling multiple times as when we + are deserializing the workflow :rtype: Task :returns: The task with the given id. """ - for task in self.get_tasks_iterator(): - if task.id == id: - return task - return None + if tasklist: + tasks = [task for task in tasklist if task.id == id] + else: + tasks = [task for task in self.get_tasks() if task.id == id] + return tasks[0] if len(tasks) == 1 else None def get_tasks_from_spec_name(self, name): """ @@ -184,6 +191,89 @@ def get_tasks_from_spec_name(self, name): return [task for task in self.get_tasks_iterator() if task.task_spec.name == name] + def empty(self,str): + if str == None: + return True + if str == '': + return True + return False + + def get_message_name_xlate(self): + message_name_xlate = {} + + alltasks = self.get_tasks() + tasks = [x for x in alltasks if (x.state == x.READY or x.state== x.WAITING or x.state==x.COMPLETED) + and hasattr(x.parent,'task_spec')] + #tasks = self.get_tasks(state=Task.READY) + + for task in tasks: + parent = task.parent + if hasattr(task.task_spec,'event_definition') \ + and hasattr(task.task_spec.event_definition,'message'): + message_name_xlate[task.task_spec.event_definition.name] = task.task_spec.event_definition.message + if isinstance(parent.task_spec,_BoundaryEventParent): + for sibling in parent.children: + if hasattr(sibling.task_spec,'event_definition') \ + and sibling.task_spec.event_definition is not None: + message_name_xlate[sibling.task_spec.event_definition.name] = \ + sibling.task_spec.event_definition.message + # doing this for the case that we have triggered the event and it is now completed + # but the task is still active, so we would like to be able to re-trigger the event + if sibling.state == Task.COMPLETED and task.state == Task.READY: + sibling._setstate(Task.WAITING, force=True) + return message_name_xlate + + def message(self,message_name,payload,resultVar): + + message_name_xlate = self.get_message_name_xlate() + + if message_name in message_name_xlate.keys() or \ + message_name in message_name_xlate.values(): + if message_name in message_name_xlate.keys(): + message_name = message_name_xlate[message_name] + self.task_tree.internal_data['messages'] = self.task_tree.internal_data.get('messages',{}) # ensure + self.task_tree.internal_data['messages'][message_name] = (payload,resultVar) + self.refresh_waiting_tasks() + self.do_engine_steps() + self.task_tree.internal_data['messages'] = {} + + def signal(self, message_name): + # breakpoint() + message_name_xlate = self.get_message_name_xlate() + + if message_name in message_name_xlate.keys() or \ + message_name in message_name_xlate.values(): + if message_name in message_name_xlate.keys(): + message_name = message_name_xlate[message_name] + self.task_tree.internal_data['signals'] = self.task_tree.internal_data.get('signals',{}) # ensure + self.task_tree.internal_data['signals'][message_name] = True + LOG.debug("signal Workflow instance: %s" % self.task_tree.internal_data) + self.refresh_waiting_tasks() + LOG.debug("signal Workflow instance: %s" % self.task_tree.internal_data) + self.do_engine_steps() + self.task_tree.internal_data['signals'] = {} + + def cancel_notify(self): + self.task_tree.internal_data['cancels'] = \ + self.task_tree.internal_data.get('cancels', {}) # ensure + self.task_tree.internal_data['cancels']['TokenReset'] = True + self.refresh_waiting_tasks() + self.do_engine_steps() + self.task_tree.internal_data['cancels'] = {} + + def get_flat_nav_list(self): + """Returns a navigation list with indentation hints, but the list + is completly flat, and a nav item has no children.""" + from . import navigation + return navigation.get_flat_nav_list(self) + + def get_deep_nav_list(self): + """Returns a nested navigation list, where indentation hints are + applied to recreate a deep structure.""" + from . import navigation + return navigation.get_deep_nav_list(self) + + def get_tasks(self, state=Task.ANY_MASK): """ Returns a list of Task objects with the given state. @@ -195,6 +285,21 @@ def get_tasks(self, state=Task.ANY_MASK): """ return [t for t in Task.Iterator(self.task_tree, state)] + def reset_task_from_id(self, task_id): + """ + Runs the task with the given id. + + :type task_id: integer + :param task_id: The id of the Task object. + """ + if task_id is None: + raise WorkflowException(self.spec, 'task_id is None') + for task in self.task_tree: + if task.id == task_id: + return task.reset_token() + msg = 'A task with the given task_id (%s) was not found' % task_id + raise WorkflowException(self.spec, msg) + def get_tasks_iterator(self, state=Task.ANY_MASK): """ Returns a iterator of Task objects with the given state. diff --git a/doc/Makefile b/doc/Makefile index ba2950fec..0bf4a3052 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -22,6 +22,7 @@ I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help help: @echo "Please use \`make ' where is one of" + @echo " apidoc to build in the api documentation" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" diff --git a/doc/bpmn.rst b/doc/bpmn.rst index f758eb1d0..7fa5560b0 100644 --- a/doc/bpmn.rst +++ b/doc/bpmn.rst @@ -1,32 +1,140 @@ -.. _bpmn_page: - -Business Process Model and Notation (BPMN) -========================================== - -Business Process Model and Notation (BPMN) is a standard for business process modeling that -provides a graphical notation for specifying business processes, based on a flowcharting technique. -The objective of BPMN is to support business process management, for both technical users and business users, -by providing a notation that is intuitive to business users, yet able to represent complex -process semantics. The BPMN specification also provides a standard XML serialization format, which -is what Spiff Workflow parses. - -A reasonable subset of the BPMN notation is supported, including the following elements: - - 1. Call Activity - 2. Start Event - 3. End Event (including interrupting) - 4. User and Manual Tasks - 5. Script Task - 6. Exclusive Gateway - 7. Inclusive Gateway (converging only) - 8. Parallel Gateway - 9. Intermediate Catch Events (Timer and Message) - 10. Boundary Events (Timer and Message, interrupting and non-interrupting) - -.. figure:: figures/action-management.png - :alt: Example BPMN Workflow - - Example BPMN Workflow - -Please refer to http://www.bpmn.org/ for details on BPMN and to the API documentation for instructions on the -use of the BPMN implementation. +.. _bpmn_page: + +Business Process Model and Notation (BPMN) +========================================== + +Business Process Model and Notation (BPMN) is a standard for business process modeling that +provides a graphical notation for specifying business processes, based on a flowcharting technique. +The objective of BPMN is to support business process management, for both technical users and business users, +by providing a notation that is intuitive to business users, yet able to represent complex +process semantics. The BPMN specification also provides a standard XML serialization format, which +is what Spiff Workflow parses. + +A reasonable subset of the BPMN notation is supported, including the following elements: + + 1. Call Activity + 2. Start Event + 3. End Event (including interrupting) + 4. User and Manual Tasks + 5. Script Task + 6. Exclusive Gateway + 7. Inclusive Gateway (converging only) + 8. Parallel Gateway + 9. MultiInstance & Variants + 10. Intermediate Catch Events (Timer and Message) + 11. Boundary Events (Timer and Message, interrupting and non-interrupting) + +.. figure:: figures/action-management.png + :alt: Example BPMN Workflow + + Example BPMN Workflow + +Please refer to http://www.bpmn.org/ for details on BPMN and to the API documentation for instructions on the +use of the BPMN implementation. + +MultiInstance Notes +------------------- + +A subset of MultiInstance and Looping Tasks are supported. Notably, +the completion condition is not currently supported. + +The following definitions should prove helpful + +**loopCardinality** - This variable can be a text representation of a +number - for example '2' or it can be the name of a variable in +task.data that resolves to a text representation of a number. +It can also be a collection such as a list or a dictionary. In the +case that it is a list, the loop cardinality is equal to the length of +the list and in the case of a dictionary, it is equal to the list of +the keys of the dictionary. + +If loopCardinality is left blank and the Collection is defined, or if +loopCardinality and Collection are the same collection, then the +Multiinstnace will loop over the collection and update each element of +that collection with the new information. In this case, it is assumed +that the incoming collection is a dictionary, currently behavior for +working with a list in this manner is not defined and will raise an error. + +**Collection** This is the name of the collection that is created from +the data generated when the task is run. Examples of this would be +form data that is generated from a UserTask or data that is generated +from a script that is run. Currently the collection is built up to be +a dictionary with a numeric key that corresponds to the place in the +loopCardinality. For example, if we set the loopCardinality to be a +list such as ['a','b','c] the resulting collection would be {1:'result +from a',2:'result from b',3:'result from c'} - and this would be true +even if it is a parallel MultiInstance where it was filled out in a +different order. + +**Element Variable** This is the variable name for the current +iteration of the MultiInstance. In the case of the loopCardinality +being just a number, this would be 1,2,3, . . . If the +loopCardinality variable is mapped to a collection it would be either +the list value from that position, or it would be the value from the +dictionary where the keys are in sorted order. It is the content of the +element variable that should be updated in the task.data. This content +will then be added to the collection each time the task is completed. + +Example: + In a sequential MultiInstance, loop cardinality is ['a','b','c'] and elementVariable is 'myvar' + then in the case of a sequential multiinstance the first call would + have 'myvar':'a' in the first run of the task and 'myvar':'b' in the + second. + +Example: + In a Parallel MultiInstance, Loop cardinality is a variable that contains + {'a':'A','b':'B','c':'C'} and elementVariable is 'myvar' - when the multiinstance is ready, there + will be 3 tasks. If we choose the second task, the task.data will + contain 'myvar':'B'. + +Updating Data +------------ + +While there may be some MultiInstances that will not result in any +data, most of the time there will be some kind of data generated that +will be collected from the MultiInstance. A good example of this is a +UserTask that has an associated form or a script that will do a lookup +on a variable. + +Each time the MultiInstance task generates data, the method +task.update_data(data) should be called where data is the data +generated. The 'data' variable that is passed in is assumed to be a +dictionary that contains the element variable. Calling task.update_data(...) +will ensure that the MultiInstance gets the correct data to include in the +collection. The task.data is also updated with the dictionary passed to +this method. + +Example: + In a Parallel MultiInstance, Loop cardinality is a variable that contains + {'a':'A','b':'B','c':'C'} and elementVariable is 'myvar'. + If we choose the second task, the task.data will contain 'myvar':{'b':'B'}. + If we wish to update the data, we would call task.update_data('myvar':{'b':'B2'}) + When the task is completed, the task.data will now contain: + {'a':'A','b':'B2','c':'C'} + +Looping Tasks +------------- + +A looping task sets the cardinality to 25 which is assumed to be a +sane maximum value. The looping task will add to the collection each +time it is processed assuming data is updated as outlined in the +previous paragraph. + +To halt the looping the task.terminate_loop() + +Each time task.complete() is called (or +workflow.complete_task_by_id(task.id) ), the task will again present +as READY until either the cardinality is exausted, or +task.terminate_loop() is called. + +**Caveats** +----------- + +At the current time a sequential MultiInstance behaves more like a +Looping Task than a MultiInstance - A true MultiInstace would actually +create multiple copies of the task in the task tree - currently only +one task is created and it is repeated the number of the +loopCardinality + + + diff --git a/doc/conf.py b/doc/conf.py index a6ec0a776..0e06893ee 100755 --- a/doc/conf.py +++ b/doc/conf.py @@ -16,11 +16,12 @@ # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. + sys.path.insert(0, os.path.abspath('..')) # Auto-generate API documentation. -from sphinx.apidoc import main -main(['-d5', '-Mef', '-o', '.', '../SpiffWorkflow']) +#from sphinx.apidoc import main +#sphinx.apidoc(['-d5', '-Mef', '-o', '.', '../SpiffWorkflow']) # -- General configuration ----------------------------------------------------- diff --git a/doc/figures/nuclear_strike.bpmn b/doc/figures/nuclear_strike.bpmn new file mode 100644 index 000000000..d3fbd7a94 --- /dev/null +++ b/doc/figures/nuclear_strike.bpmn @@ -0,0 +1,71 @@ + + + + + + + + + + SequenceFlow_1xrbp0m + + + SequenceFlow_1xrbp0m + SequenceFlow_1vwfrws + SequenceFlow_0x0u589 + + + SequenceFlow_1vwfrws + + + SequenceFlow_0x0u589 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/tutorial/nuclear.json b/doc/tutorial/nuclear.json index d8b6f84f0..7f0efb2a8 100644 --- a/doc/tutorial/nuclear.json +++ b/doc/tutorial/nuclear.json @@ -2,21 +2,23 @@ "task_specs": { "Start": { "class": "SpiffWorkflow.specs.StartTask.StartTask", + "id" : 1, "manual": false, "outputs": [ - "general" + 2 ] }, "general": { "class": "SpiffWorkflow.specs.ExclusiveChoice.ExclusiveChoice", "name": "general", + "id" : 2, "manual": true, "inputs": [ - "Start" + 1 ], "outputs": [ - "workflow_aborted", - "president" + 5, + 3 ], "choice": null, "default_task_spec": "workflow_aborted", @@ -42,13 +44,14 @@ "president": { "class": "SpiffWorkflow.specs.ExclusiveChoice.ExclusiveChoice", "name": "president", + "id" : 3, "manual": true, "inputs": [ - "general" + 2 ], "outputs": [ - "workflow_aborted", - "nuclear_strike" + 5, + 4 ], "choice": null, "default_task_spec": "workflow_aborted", @@ -72,18 +75,20 @@ ] }, "nuclear_strike": { + "id" : 4, "class": "SpiffWorkflow.specs.Simple.Simple", "name": "nuclear_strike", "inputs": [ - "president" + 3 ] }, "workflow_aborted": { + "id" : 5, "class": "SpiffWorkflow.specs.Cancel.Cancel", "name": "workflow_aborted", "inputs": [ - "general", - "president" + 2, + 3 ] } }, diff --git a/requirements.txt b/requirements.txt index 9c558e357..2f5f7369d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1,4 @@ +celery +python-Levenshtein +lxml . diff --git a/setup.py b/setup.py index 58ab99b32..81036c576 100644 --- a/setup.py +++ b/setup.py @@ -23,7 +23,8 @@ author_email = 'cheeseshop.python.org@debain.org', license = 'lGPLv2', packages = find_packages(exclude=['tests', 'tests.*']), - install_requires = ['future', 'configparser', 'lxml'], + install_requires = ['future', 'configparser', 'lxml', 'celery', + 'python-levenshtein', 'python-box'], keywords = 'spiff workflow bpmn engine', url = 'https://github.com/knipknap/SpiffWorkflow', classifiers = [ diff --git a/tests/SpiffWorkflow/PatternTest.py b/tests/SpiffWorkflow/PatternTest.py index 6902b70a2..c0ec8153a 100644 --- a/tests/SpiffWorkflow/PatternTest.py +++ b/tests/SpiffWorkflow/PatternTest.py @@ -11,7 +11,7 @@ from SpiffWorkflow.specs import * from SpiffWorkflow import Task from SpiffWorkflow.serializer.prettyxml import XmlSerializer -from util import run_workflow +from tests.SpiffWorkflow.util import run_workflow class WorkflowTestData(object): diff --git a/tests/SpiffWorkflow/bpmn/AntiLoopTaskTest.py b/tests/SpiffWorkflow/bpmn/AntiLoopTaskTest.py new file mode 100644 index 000000000..7fb0acdc8 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/AntiLoopTaskTest.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import unittest +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from SpiffWorkflow.exceptions import WorkflowException +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + + +class AntiLoopTaskTest(BpmnWorkflowTestCase): + """The example bpmn is actually a MultiInstance. It should not report that it is a looping task and + it should fail when we try to terminate the loop""" + + def setUp(self): + self.spec = self.load_workflow1_spec() + + def load_workflow1_spec(self): + return self.load_workflow_spec('bpmnAntiLoopTask.bpmn','LoopTaskTest') + + def testRunThroughHappy(self): + + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_ready_user_tasks() + self.assertTrue(len(ready_tasks) ==1) + self.assertFalse(ready_tasks[0].task_spec.is_loop_task()) + try: + ready_tasks[0].terminate_loop() + self.fail("Terminate Loop should throw and error when called on a non-loop MultiInstance") + except WorkflowException as ex: + self.assertTrue( + 'The method terminate_loop should only be called in the case of a BPMN Loop Task' in ( + '%r' % ex), + '\'The method terminate_loop should only be called in the case of a BPMN Loop Task\' should be a substring of error message: \'%r\'' % ex) + + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(AntiLoopTaskTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/BpmnLoaderForTests.py b/tests/SpiffWorkflow/bpmn/BpmnLoaderForTests.py index 98891502f..b9af66d33 100644 --- a/tests/SpiffWorkflow/bpmn/BpmnLoaderForTests.py +++ b/tests/SpiffWorkflow/bpmn/BpmnLoaderForTests.py @@ -32,6 +32,10 @@ def do_choice(self, task, choice): task.set_data(choice=choice) task.complete() + @classmethod + def deserialize(self, serializer, wf_spec, s_state): + return serializer.deserialize_generic(wf_spec, s_state, TestUserTask) + class TestEndEvent(EndEvent): @@ -40,12 +44,29 @@ def _on_complete_hook(self, my_task): super(TestEndEvent, self)._on_complete_hook(my_task) + def serialize(self, serializer): + return serializer.serialize_end_event(self) + + + @classmethod + def deserialize(self, serializer, wf_spec, s_state): + return serializer.deserialize_end_event(wf_spec, s_state, TestEndEvent) + + + + + + class TestCallActivity(CallActivity): def __init__(self, parent, name, **kwargs): super(TestCallActivity, self).__init__(parent, name, out_assign=[Assign('choice', 'end_event')], **kwargs) + @classmethod + def deserialize(self, serializer, wf_spec, s_state): + return serializer.deserialize_call_activity(wf_spec, s_state, TestCallActivity) + class TestBpmnParser(BpmnParser): OVERRIDE_PARSER_CLASSES = { diff --git a/tests/SpiffWorkflow/bpmn/BpmnSerializerTest.py b/tests/SpiffWorkflow/bpmn/BpmnSerializerTest.py new file mode 100644 index 000000000..8b539d2d7 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/BpmnSerializerTest.py @@ -0,0 +1,95 @@ +import os +import unittest +from SpiffWorkflow.bpmn.serializer.BpmnSerializer import BpmnSerializer +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.PackagerForTests import PackagerForTests + + + +class BpmnSerializerTest(unittest.TestCase): + CORRELATE = BpmnSerializer + + def load_workflow_spec(self, filename, process_name): + f = os.path.join(os.path.dirname(__file__), 'data', filename) + + return BpmnSerializer().deserialize_workflow_spec( + PackagerForTests.package_in_memory(process_name, f)) + + def setUp(self): + super(BpmnSerializerTest, self).setUp() + self.serializer = BpmnSerializer() + self.spec = self.load_workflow_spec('random_fact.bpmn', 'random_fact') + self.workflow = BpmnWorkflow(self.spec) + self.return_type = str + + def testDeserializeWorkflowSpec(self): + self.assertIsNotNone(self.spec) + + # def testSerializeWorkflowSpec(self): + # with self.assertRaises(NotImplementedError): + # self.serializer.serialize_workflow_spec(self.spec) + + def testSerializeWorkflow(self): + json = self.serializer.serialize_workflow(self.workflow) + print(json) + + def testDeserializeWorkflow(self): + self._compare_with_deserialized_copy(self.workflow) + + def testDeserializeCallActivityChildren(self): + """Tested as a part of deserialize workflow.""" + pass + + def testSerializeTask(self): + json = self.serializer.serialize_workflow(self.workflow) + print(json) + + def testDeserializeTask(self): + self._compare_with_deserialized_copy(self.workflow) + + def testDeserializeActiveWorkflow(self): + self.workflow.do_engine_steps() + self._compare_with_deserialized_copy(self.workflow) + + def testDeserializeWithData(self): + self.workflow.data["test"] = "my_test" + json = self.serializer.serialize_workflow(self.workflow) + wf2 = self.serializer.deserialize_workflow(json, workflow_spec=self.spec) + self.assertEqual('my_test', wf2.get_data("test")) + + def testDeserializeWithDataOnTask(self): + self.workflow.do_engine_steps() + user_task = self.workflow.get_ready_user_tasks()[0] + user_task.data = {"test":"my_test"} + self._compare_with_deserialized_copy(self.workflow) + + def testLastTaskIsSetAndWorksThroughRestore(self): + self.workflow.do_engine_steps() + json = self.serializer.serialize_workflow(self.workflow) + wf2 = self.serializer.deserialize_workflow(json, workflow_spec=self.spec) + self.assertIsNotNone(self.workflow.last_task) + self.assertIsNotNone(wf2.last_task) + self._compare_workflows(self.workflow, wf2) + + def _compare_with_deserialized_copy(self, wf): + json = self.serializer.serialize_workflow(wf) + wf2 = self.serializer.deserialize_workflow(json, workflow_spec=self.spec) + self._compare_workflows(wf, wf2) + + def _compare_workflows(self, w1, w2): + self.assertIsInstance(w1, BpmnWorkflow) + self.assertIsInstance(w2, BpmnWorkflow) + self.assertEqual(w1.data, w2.data) + self.assertEqual(w1.name, w2.name) + for task in w1.get_ready_user_tasks(): + w2_task = w2.get_task(task.id) + self.assertIsNotNone(w2_task) + self.assertEqual(task.data, w2_task.data) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(BpmnSerializerTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/BpmnWorkflowTestCase.py b/tests/SpiffWorkflow/bpmn/BpmnWorkflowTestCase.py index 8e9c91fce..eff12e71e 100644 --- a/tests/SpiffWorkflow/bpmn/BpmnWorkflowTestCase.py +++ b/tests/SpiffWorkflow/bpmn/BpmnWorkflowTestCase.py @@ -1,11 +1,14 @@ # -*- coding: utf-8 -*- from __future__ import print_function, absolute_import, division + +import json import logging import os import unittest + +from SpiffWorkflow import NavItem from SpiffWorkflow.task import Task from SpiffWorkflow.bpmn.serializer.BpmnSerializer import BpmnSerializer -from SpiffWorkflow.bpmn.serializer.CompactWorkflowSerializer import CompactWorkflowSerializer from tests.SpiffWorkflow.bpmn.PackagerForTests import PackagerForTests __author__ = 'matth' @@ -21,7 +24,7 @@ def load_workflow_spec(self, filename, process_name): def do_next_exclusive_step(self, step_name, with_save_load=False, set_attribs=None, choice=None): if with_save_load: - self.save_restore() + self.save_restore_all() self.workflow.do_engine_steps() tasks = self.workflow.get_tasks(Task.READY) @@ -83,28 +86,56 @@ def _do_single_step(self, step_name, tasks, set_attribs=None, choice=None, only_ tasks[0].set_data(**set_attribs) tasks[0].complete() - def save_restore(self): - state = self._get_workflow_state() + def save_restore(self,spec_from_state=True): + + state = self._get_workflow_state(include_spec=spec_from_state) logging.debug('Saving state: %s', state) before_dump = self.workflow.get_dump() - self.restore(state) + self.restore(state,spec_from_state=spec_from_state) # We should still have the same state: after_dump = self.workflow.get_dump() - after_state = self._get_workflow_state() + after_state = self._get_workflow_state(do_steps=False,include_spec=spec_from_state) + if state != after_state: logging.debug("Before save:\n%s", before_dump) logging.debug("After save:\n%s", after_dump) + self.maxDiff = None + self.assertEqual(before_dump, after_dump) self.assertEqual(state, after_state) - def restore(self, state): - self.workflow = CompactWorkflowSerializer().deserialize_workflow( - state, workflow_spec=self.spec) + + def restore(self, state, spec_from_state=False): + if spec_from_state: + self.workflow = BpmnSerializer().deserialize_workflow( + state, workflow_spec=None) + else: + self.workflow = BpmnSerializer().deserialize_workflow( + state, workflow_spec=self.spec) def get_read_only_workflow(self): state = self._get_workflow_state() - return CompactWorkflowSerializer().deserialize_workflow(state, workflow_spec=self.spec, read_only=True) - - def _get_workflow_state(self): - self.workflow.do_engine_steps() - self.workflow.refresh_waiting_tasks() - return CompactWorkflowSerializer().serialize_workflow(self.workflow, include_spec=False) + return BpmnSerializer().deserialize_workflow(state, workflow_spec=self.spec, read_only=True) + + def _get_workflow_state(self,do_steps=True,include_spec=True): + if do_steps: + self.workflow.do_engine_steps() + self.workflow.refresh_waiting_tasks() + return BpmnSerializer().serialize_workflow(self.workflow, include_spec=include_spec) + + def assertNav(self, nav_item: NavItem, name=None, description=None, + spec_type=None, indent=None, state=None, lane=None, + backtrack_to=None): + if name: + self.assertEqual(name, nav_item.name) + if description: + self.assertEqual(description, nav_item.description) + if spec_type: + self.assertEqual(spec_type, nav_item.spec_type) + if indent: + self.assertEqual(indent, nav_item.indent) + if state: + self.assertEqual(state, nav_item.state) + if lane: + self.assertEqual(lane, nav_item.lane) + if backtrack_to: + self.assertEqual(backtrack_to, nav_item.backtrack_to) diff --git a/tests/SpiffWorkflow/bpmn/CallActivitySubProcessPropTest.py b/tests/SpiffWorkflow/bpmn/CallActivitySubProcessPropTest.py new file mode 100644 index 000000000..1d679ab51 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/CallActivitySubProcessPropTest.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class CallActivitySubProcessPropTest(BpmnWorkflowTestCase): + """ + Make sure that workflow.data propagates to the subworkflows + in a BPMN + """ + + def setUp(self): + self.filename = 'proptest-*.bpmn' + self.process_name = 'TopLevel' + self.spec = self.load_workflow1_spec() + + + + def load_workflow1_spec(self): + return self.load_workflow_spec(self.filename, self.process_name) + + def testSaveRestore(self): + self.actualTest(True) + + def actualTest(self, save_restore=False): + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + if save_restore: + self.save_restore() + self.assertTrue(self.workflow.is_completed()) + self.assertEqual(self.workflow.data['valA'],1) + self.assertEqual(self.workflow.data['valB'],1) + self.assertEqual(self.workflow.data['valC'],1) + self.assertEqual(self.workflow.data['valD'],1) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(CallActivitySubProcessPropTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/CancelBoundaryEventTest.py b/tests/SpiffWorkflow/bpmn/CancelBoundaryEventTest.py new file mode 100644 index 000000000..1a2105cc8 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/CancelBoundaryEventTest.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import unittest +from SpiffWorkflow.task import Task +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'michaelc' + + +class CancelBoundaryTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec = self.load_spec() + + def load_spec(self): + return self.load_workflow_spec('message_event.bpmn', 'Process_1dagb7t') + + def testBoundaryNavigation(self): + # get the workflow + self.workflow = BpmnWorkflow(self.spec) + # do engine steps + self.workflow.do_engine_steps() + nav = self.workflow.get_flat_nav_list() + nav_deep = self.workflow.get_deep_nav_list() + self.assertEquals(7, len(nav)) + self.assertNav(nav_item=nav[4], state="MAYBE", description="TokenReset") + + ready_tasks = self.workflow.get_tasks(Task.READY) + ready_tasks[0].update_data(data={'formdata': 'asdf'}) + self.workflow.complete_task_from_id(ready_tasks[0].id) + self.workflow.do_engine_steps() + nav = self.workflow.get_flat_nav_list() + print(nav) + self.assertEquals(7, len(nav)) + self.assertNav(nav_item=nav[4], state="WAITING", description="TokenReset") + + def testCancelEvent(self): + # get the workflow + self.workflow = BpmnWorkflow(self.spec) + # do engine steps + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_tasks(Task.READY) + # task = ready_tasks[0] + # task is Activity_GetData which has a form + ready_tasks[0].update_data(data={'formdata': 'asdf'}) + self.workflow.complete_task_from_id(ready_tasks[0].id) + self.save_restore() + # refresh and do engine steps again + self.workflow.refresh_waiting_tasks() + self.workflow.do_engine_steps() + # this gets us to Activity_HowMany, where we cancel the workflow + self.workflow.cancel_notify() + self.assertEqual(self.workflow.last_task.data['title'], 'New Title') + # assert that Activity_TestMessage state is Completed + self.assertEqual(self.workflow.last_task.get_name(), 'Activity_TestMessage') + self.assertEqual(self.workflow.last_task.get_state(), 32) + + def testNoCancelEvent(self): + # get the workflow + self.workflow = BpmnWorkflow(self.spec) + # do engine steps + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_tasks(Task.READY) + # task = ready_tasks[0] + # task is Activity_GetData which has a form + ready_tasks[0].update_data(data={'formdata': 'asdf'}) + self.workflow.complete_task_from_id(ready_tasks[0].id) + # refresh and do engine steps again + self.workflow.refresh_waiting_tasks() + self.workflow.do_engine_steps() + # this time we don't cancel + # 'title' should not be in last_task.data + self.assertNotIn('title', self.workflow.last_task.data) + # and Activity_HowMany should be Completed + self.assertEqual(self.workflow.last_task.get_name(), 'Activity_HowMany.BoundaryEventParent') + self.assertEqual(self.workflow.last_task.get_state(), 32) + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(CancelBoundaryTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ClashingName2Test.py b/tests/SpiffWorkflow/bpmn/ClashingName2Test.py new file mode 100644 index 000000000..87ca4e979 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ClashingName2Test.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) + +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase +from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException +__author__ = 'kellym' + + + +class ClashingNameTest2(BpmnWorkflowTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + def setUp(self): + pass + def loadWorkflow(self): + self.spec = self.load_workflow_spec( + 'Approvals_bad.bpmn', + 'Approvals') + def testRunThroughHappy(self): + # make sure we raise an exception + # when validating a workflow with multiple + # same IDs in the BPMN workspace + self.assertRaises(ValidationException,self.loadWorkflow) + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ClashingNameTest2) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ExclusiveGatewayIntoMultiInstanceTest.py b/tests/SpiffWorkflow/bpmn/ExclusiveGatewayIntoMultiInstanceTest.py new file mode 100644 index 000000000..79b77ae7b --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ExclusiveGatewayIntoMultiInstanceTest.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class ExclusiveGatewayIntoMultiInstanceTest(BpmnWorkflowTestCase): + """In the example BPMN Diagram we set x = 0, then we have an + exclusive gateway that should skip over a parallel multi-instance + class, so it should run straight through and complete without issue.""" + + def setUp(self): + self.spec = self.load_workflow1_spec() + + def load_workflow1_spec(self): + return self.load_workflow_spec('exclusive_into_multi.bpmn','ExclusiveToMulti') + + def testRunThroughHappy(self): + + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + self.assertTrue(self.workflow.is_completed()) + + def testSaveRestore(self): + + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + self.assertTrue(self.workflow.is_completed()) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ExclusiveGatewayIntoMultiInstanceTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ExternalMessageBoundaryEventTest.py b/tests/SpiffWorkflow/bpmn/ExternalMessageBoundaryEventTest.py new file mode 100644 index 000000000..1800a6cac --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ExternalMessageBoundaryEventTest.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import unittest +import datetime +import time +from SpiffWorkflow.task import Task +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + + +class ExternalMessageBoundaryTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec = self.load_spec() + + def load_spec(self): + return self.load_workflow_spec('external_message.bpmn', 'ExternalMessage') + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testThroughSaveRestore(self): + self.actual_test(save_restore=True) + + + + def actual_test(self,save_restore = False): + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_tasks(Task.READY) + self.assertEqual(1, len(ready_tasks),'Expected to have only one ready task') + self.workflow.message('Interrupt','SomethingImportant','interrupt_var') + ready_tasks = self.workflow.get_tasks(Task.READY) + self.assertEqual(2,len(ready_tasks),'Expected to have only one ready task') + # here because the thread just dies and doesn't lead to a task, we expect the data + # to die with it. + # item 1 should be at 'Pause' + self.assertEqual('Pause',ready_tasks[1].task_spec.description) + self.assertEqual('SomethingImportant', ready_tasks[1].data['interrupt_var']) + self.assertEqual(True, ready_tasks[1].data['caughtinterrupt']) + self.assertEqual('Meaningless User Task',ready_tasks[0].task_spec.description) + self.assertEqual(False, ready_tasks[0].data['caughtinterrupt']) + self.workflow.complete_task_from_id(ready_tasks[1].id) + self.workflow.do_engine_steps() + # what I think is going on here is that when we hit the reset, it is updating the + # last_task and appending the data to whatever happened there, so it would make sense that + # we have the extra variables that happened in 'pause' + # if on the other hand, we went on from 'meaningless task' those variables would not get added. + self.workflow.message('reset','SomethingDrastic','reset_var') + ready_tasks = self.workflow.get_tasks(Task.READY) + self.assertEqual(1, len(ready_tasks),'Expected to have only one ready task') + self.assertEqual('SomethingDrastic', ready_tasks[0].data['reset_var']) + self.assertEqual(False, ready_tasks[0].data['caughtinterrupt']) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ExternalMessageBoundaryTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/InvalidWorkflowsTest.py b/tests/SpiffWorkflow/bpmn/InvalidWorkflowsTest.py index a2dd90eb1..51e542297 100644 --- a/tests/SpiffWorkflow/bpmn/InvalidWorkflowsTest.py +++ b/tests/SpiffWorkflow/bpmn/InvalidWorkflowsTest.py @@ -56,28 +56,29 @@ def testNoStartEvent(self): self.assertTrue('No Start Event' in ('%r' % ex), '\'No Start Event\' should be a substring of error message: \'%r\'' % ex) - def testMultipleStartEvents(self): - try: - self.load_workflow_spec( - 'Invalid-Workflows/Multiple-Start-Events.bpmn20.xml', 'Multiple Start Events') - self.fail( - "self.load_workflow_spec('Invalid-Workflows/Multiple-Start-Events.bpmn20.xml', 'Multiple Start Events') should fail.") - except ValidationException as ex: - self.assertTrue( - 'Only one Start Event is supported in each process' in ( - '%r' % ex), - '\'Only one Start Event is supported in each process\' should be a substring of error message: \'%r\'' % ex) -# self.assertTrue('line 10' in ('%r'%ex), -# '\'line 10\' should be a substring of error message: \'%r\'' % ex) - self.assertTrue('Multiple-Start-Events.bpmn20.xml' in ('%r' % ex), - '\'Multiple-Start-Events.bpmn20.xml\' should be a substring of error message: \'%r\'' % ex) - self.assertTrue('process' in ('%r' % ex), - '\'process\' should be a substring of error message: \'%r\'' % ex) - self.assertTrue( - 'sid-1e457abc-2ee3-4d60-a4df-d2ddf5b18c2b' in ('%r' % ex), - '\'sid-1e457abc-2ee3-4d60-a4df-d2ddf5b18c2b\' should be a substring of error message: \'%r\'' % ex) - self.assertTrue('Multiple Start Events' in ('%r' % ex), - '\'Multiple Start Events\' should be a substring of error message: \'%r\'' % ex) +# Removed as we now allow a start catching message +# def testMultipleStartEvents(self): +# try: +# self.load_workflow_spec( +# 'Invalid-Workflows/Multiple-Start-Events.bpmn20.xml', 'Multiple Start Events') +# self.fail( +# "self.load_workflow_spec('Invalid-Workflows/Multiple-Start-Events.bpmn20.xml', 'Multiple Start Events') should fail.") +# except ValidationException as ex: +# self.assertTrue( +# 'Only one Start Event is supported in each process' in ( +# '%r' % ex), +# '\'Only one Start Event is supported in each process\' should be a substring of error message: \'%r\'' % ex) +# # self.assertTrue('line 10' in ('%r'%ex), +# # '\'line 10\' should be a substring of error message: \'%r\'' % ex) +# self.assertTrue('Multiple-Start-Events.bpmn20.xml' in ('%r' % ex), +# '\'Multiple-Start-Events.bpmn20.xml\' should be a substring of error message: \'%r\'' % ex) +# self.assertTrue('process' in ('%r' % ex), +# '\'process\' should be a substring of error message: \'%r\'' % ex) +# self.assertTrue( +# 'sid-1e457abc-2ee3-4d60-a4df-d2ddf5b18c2b' in ('%r' % ex), +# '\'sid-1e457abc-2ee3-4d60-a4df-d2ddf5b18c2b\' should be a substring of error message: \'%r\'' % ex) +# self.assertTrue('Multiple Start Events' in ('%r' % ex), +# '\'Multiple Start Events\' should be a substring of error message: \'%r\'' % ex) def testSubprocessNotFound(self): try: diff --git a/tests/SpiffWorkflow/bpmn/LoopBackNavTest.py b/tests/SpiffWorkflow/bpmn/LoopBackNavTest.py new file mode 100644 index 000000000..4012a4104 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/LoopBackNavTest.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import unittest +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + + +class LoopBackNavTest(BpmnWorkflowTestCase): + """The example bpmn diagram follows a looping structure where a gateway + may send the token back to a previously executed task. This test assures + that navigation works correctly in that circumstance.""" + + def setUp(self): + self.spec = self.load_workflow1_spec() + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + + def load_workflow1_spec(self): + return self.load_workflow_spec('LoopBackNav.bpmn', 'LoopBackNav') + + def testRunThroughHappy(self): + + ready_tasks = self.workflow.get_ready_user_tasks() + + self.assertTrue(len(ready_tasks) == 1) + task = ready_tasks[0] + # + nav = self.workflow.get_flat_nav_list() + self.assertEqual(9, len(nav), "Navigation should include 6 elements, " + "start, the task, gateway, and true," + " false, and end paths.") + + self.assertEqual("Loop Again?", nav[1].description) + self.assertEqual("READY", nav[1].state) + + task.data = {"loop_again":True} + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + self.assertFalse(self.workflow.is_completed()) + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual("Why?", task.task_spec.description) + self.workflow.complete_task_from_id(task.id) + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual("Loop Again?", task.task_spec.description) + + nav = self.workflow.get_flat_nav_list() + self.assertEqual("Loop Again?", nav[1].description) + self.assertEqual("READY", nav[1].state) + + task = self.workflow.get_ready_user_tasks()[0] + task.data = {"loop_again": False} + self.workflow.complete_task_from_id(task.id) + nav = self.workflow.get_flat_nav_list() + self.assertEqual("COMPLETED", nav[1].state) + + self.workflow.do_engine_steps() + self.assertTrue(self.workflow.is_completed()) + + def testDeepNavigation(self): + nav = self.workflow.get_deep_nav_list() + print(nav) + self.assertNav(nav[1], description="Loop Again?", state="READY") + self.assertNav(nav[2], spec_type="ExclusiveGateway", state="LIKELY") + self.assertNav(nav[2].children[1].children[1], description="Loop Again?", + backtrack_to=nav[1].name) + pass + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(LoopBackNavTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/LoopTaskTest.py b/tests/SpiffWorkflow/bpmn/LoopTaskTest.py new file mode 100644 index 000000000..f2a25e865 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/LoopTaskTest.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import unittest +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + + +class LoopTaskTest(BpmnWorkflowTestCase): + """The example bpmn diagram has a single task with a loop cardinality of 5. + It should repeat 5 times before termination.""" + + def setUp(self): + self.spec = self.load_workflow1_spec() + + def load_workflow1_spec(self): + return self.load_workflow_spec('bpmnLoopTask.bpmn','LoopTaskTest') + + def testRunThroughHappy(self): + + self.workflow = BpmnWorkflow(self.spec) + for i in range(5): + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_ready_user_tasks() + self.assertTrue(len(ready_tasks) ==1) + self.assertTrue(ready_tasks[0].task_spec.is_loop_task()) + self.assertFalse(self.workflow.is_completed()) + last_task = self.workflow.last_task + + self.do_next_exclusive_step('Activity_TestLoop') + + ready_tasks = self.workflow.get_ready_user_tasks() + self.assertTrue(len(ready_tasks) ==1) + ready_tasks[0].terminate_loop() + self.do_next_exclusive_step('Activity_TestLoop') + self.workflow.do_engine_steps() + self.assertTrue(self.workflow.is_completed()) + + + def testSaveRestore(self): + + + self.workflow = BpmnWorkflow(self.spec) + for i in range(5): + self.save_restore() + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_ready_user_tasks() + self.assertTrue(len(ready_tasks) ==1) + self.assertTrue(ready_tasks[0].task_spec.is_loop_task()) + self.assertFalse(self.workflow.is_completed()) + self.do_next_exclusive_step('Activity_TestLoop') + + ready_tasks = self.workflow.get_ready_user_tasks() + self.assertTrue(len(ready_tasks) ==1) + ready_tasks[0].terminate_loop() + self.do_next_exclusive_step('Activity_TestLoop') + self.workflow.do_engine_steps() + self.assertTrue(self.workflow.is_completed()) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(LoopTaskTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/MessageBoundaryEventTest.py b/tests/SpiffWorkflow/bpmn/MessageBoundaryEventTest.py new file mode 100644 index 000000000..39bab2e61 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/MessageBoundaryEventTest.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import unittest +import datetime +import time +from SpiffWorkflow.task import Task +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + + +class MessageBoundaryTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec = self.load_spec() + + def load_spec(self): + return self.load_workflow_spec('MessageBoundary.bpmn', 'MessageBoundary') + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testThroughSaveRestore(self): + self.actual_test(save_restore=True) + + + + def actual_test(self,save_restore = False): + steps = [('Activity_Interrupt', {'interrupt_task':'No'}), + ('Activity_Interrupt', {'interrupt_task': 'No'}), + ('Activity_Interrupt', {'interrupt_task': 'Yes'}), + ] + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_tasks(Task.READY) + self.assertEqual(2, len(ready_tasks),'Expected to have two ready tasks') + for step in steps: + for task in ready_tasks: + if task.task_spec.name == step[0]: + task.update_data(step[1]) + + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + time.sleep(.05) + self.workflow.refresh_waiting_tasks() + if save_restore: self.save_restore() + ready_tasks = self.workflow.get_tasks(Task.READY) + time.sleep(.1) + self.workflow.refresh_waiting_tasks() + self.workflow.do_engine_steps() + self.assertEqual(self.workflow.is_completed(),True,'Expected the workflow to be complete at this point') + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(MessageBoundaryTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/MessagesTest.py b/tests/SpiffWorkflow/bpmn/MessagesTest.py index c95d115ae..271eff878 100644 --- a/tests/SpiffWorkflow/bpmn/MessagesTest.py +++ b/tests/SpiffWorkflow/bpmn/MessagesTest.py @@ -3,8 +3,6 @@ from __future__ import division, absolute_import import unittest -import datetime -import time from SpiffWorkflow.task import Task from SpiffWorkflow.bpmn.workflow import BpmnWorkflow from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase diff --git a/tests/SpiffWorkflow/bpmn/MultiInstanceParallelCondTest.py b/tests/SpiffWorkflow/bpmn/MultiInstanceParallelCondTest.py new file mode 100644 index 000000000..a62b5fbeb --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/MultiInstanceParallelCondTest.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class MultiInstanceCondTest(BpmnWorkflowTestCase): + """The example bpmn diagram has a single task set to be a parallel + multi-instance with a loop cardinality of 5. + It should repeat 5 times before termination, and it should + have a navigation list with 7 items in it - one for start, one for end, + and five items for the repeating section. """ + + def setUp(self): + self.filename = 'MultiInstanceParallelTaskCond.bpmn' + self.process_name = 'MultiInstance' + self.spec = self.load_workflow1_spec() + + + def reload_save_restore(self): + state = self._get_workflow_state() + #self.spec = self.load_workflow1_spec() + #self.workflow = BpmnWorkflow(self.spec) + self.restore(state,spec_from_state=True) + # We should still have the same state: + after_state = self._get_workflow_state(do_steps=False) + self.maxDiff = None + #self.assertEqual(before_dump, after_dump) + self.assertEqual(state, after_state) + + + + def load_workflow1_spec(self): + return self.load_workflow_spec(self.filename, self.process_name) + + def testRunThroughHappy(self): + self.actualTest() + + def testSaveRestore(self): + self.actualTest(True) + + def actualTest(self, save_restore=False): + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + self.assertEqual(1, len(self.workflow.get_ready_user_tasks())) + task = self.workflow.get_ready_user_tasks()[0] + task.data['collection'] = {'a':{'a':'test'}, + 'b':{'b':'test'}} + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + + for task in self.workflow.get_ready_user_tasks(): + self.assertFalse(self.workflow.is_completed()) + self.workflow.complete_task_from_id(task.id) + nav_list = self.workflow.get_flat_nav_list() + self.assertNotEqual(None, nav_list[1].task_id) + if(save_restore): + self.reload_save_restore() + self.workflow.do_engine_steps() + self.assertTrue(self.workflow.is_completed()) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(MultiInstanceCondTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/MultiInstanceParallelTest.py b/tests/SpiffWorkflow/bpmn/MultiInstanceParallelTest.py new file mode 100644 index 000000000..934d610b0 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/MultiInstanceParallelTest.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class MultiInstanceTest(BpmnWorkflowTestCase): + """The example bpmn diagram has a single task set to be a parallel + multi-instance with a loop cardinality of 5. + It should repeat 5 times before termination, and it should + have a navigation list with 7 items in it - one for start, one for end, + and five items for the repeating section. """ + + def setUp(self): + self.filename = 'MultiInstanceParallelTask.bpmn' + self.process_name = 'MultiInstance' + self.spec = self.load_workflow1_spec() + + + def reload_save_restore(self): + #self.spec = self.load_workflow1_spec() + self.save_restore() + + def load_workflow1_spec(self): + return self.load_workflow_spec(self.filename, self.process_name) + + def testRunThroughHappy(self): + self.actualTest() + + def testSaveRestore(self): + self.actualTest(True) + + def actualTest(self, save_restore=False): + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + self.assertEqual(1, len(self.workflow.get_ready_user_tasks())) + task = self.workflow.get_ready_user_tasks()[0] + task.data['collection'] = [1,2,3,4,5] + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + for task in self.workflow.get_ready_user_tasks(): + self.assertFalse(self.workflow.is_completed()) + self.workflow.complete_task_from_id(task.id) + nav_list = self.workflow.get_flat_nav_list() + self.assertNotEqual(None, nav_list[4].task_id) + if(save_restore): + self.reload_save_restore() + self.workflow.do_engine_steps() + self.assertTrue(self.workflow.is_completed()) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(MultiInstanceTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/MultiInstanceTest.py b/tests/SpiffWorkflow/bpmn/MultiInstanceTest.py new file mode 100644 index 000000000..637a699a0 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/MultiInstanceTest.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class MultiInstanceTest(BpmnWorkflowTestCase): + """The example bpmn diagram has a single task with a loop cardinality of 5. + It should repeat 5 times before termination.""" + + def setUp(self): + self.spec = self.load_workflow1_spec() + + def load_workflow1_spec(self): + return self.load_workflow_spec('bpmnMultiUserTask.bpmn','MultiInstance') + + def testRunThroughHappy(self): + + self.workflow = BpmnWorkflow(self.spec) + + for i in range(5): + self.workflow.do_engine_steps() + self.assertFalse(self.workflow.is_completed()) + self.do_next_exclusive_step('Activity_Loop') + + self.workflow.do_engine_steps() + self.assertTrue(self.workflow.is_completed()) + + def testSaveRestore(self): + + self.workflow = BpmnWorkflow(self.spec) + for i in range(5): + self.save_restore() + self.workflow.do_engine_steps() + self.assertFalse(self.workflow.is_completed()) + self.do_next_exclusive_step('Activity_Loop') + + self.workflow.do_engine_steps() + self.assertTrue(self.workflow.is_completed()) + + def testNav(self): + self.workflow = BpmnWorkflow(self.spec) + nav = self.workflow.get_flat_nav_list() + print(nav) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(MultiInstanceTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/NIMessageBoundaryTest.py b/tests/SpiffWorkflow/bpmn/NIMessageBoundaryTest.py new file mode 100644 index 000000000..dff8bc044 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/NIMessageBoundaryTest.py @@ -0,0 +1,106 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import unittest +import datetime +import time +from SpiffWorkflow.task import Task +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + + +class NIMessageBoundaryTest(BpmnWorkflowTestCase): + """ + Non-Interrupting Timer boundary test + """ + def setUp(self): + self.spec = self.load_spec() + + def load_spec(self): + return self.load_workflow_spec('noninterrupting-MessageBoundary.bpmn', 'MessageBoundary') + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testThroughSaveRestore(self): + self.actual_test(save_restore=True) + + + def actual_test(self,save_restore = False): + self.workflow = BpmnWorkflow(self.spec) + ready_tasks = self.workflow.get_tasks(Task.READY) + self.assertEqual(1, len(ready_tasks)) + self.workflow.complete_task_from_id(ready_tasks[0].id) + self.workflow.do_engine_steps() + + # first we run through a couple of steps where we answer No to each + # question + answers = {'Activity_WorkLate':('flag_task','No'), + 'Activity_DoWork': ('work_done','No')} + for x in range(3): + ready_tasks = self.workflow.get_tasks(Task.READY) + for task in ready_tasks: + response = answers.get(task.task_spec.name,None) + self.assertEqual(response==None, + False, + 'We got a ready task that we did not expect - %s'%( + task.task_spec.name)) + task.data[response[0]] = response[1] + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + # if we have a list of tasks - that list becomes invalid + # after we do a save restore, so I'm completing the list + # before doing the save restore. + if save_restore: self.save_restore() + + + answers = {'Activity_WorkLate':('flag_task','Yes'), + 'Activity_DoWork': ('work_done','No'), + 'Activity_WorkLateReason':('work_late_reason','covid-19')} + for x in range(3): + ready_tasks = self.workflow.get_tasks(Task.READY) + for task in ready_tasks: + response = answers.get(task.task_spec.name,None) + self.assertEqual(response==None, + False, + 'We got a ready task that we did not expect - %s'%( + task.task_spec.name)) + task.data[response[0]] = response[1] + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + ready_tasks = self.workflow.get_tasks(Task.READY) + self.assertEqual(len(ready_tasks),1) + task = ready_tasks[0] + self.assertEqual(task.task_spec.name,'Activity_DoWork') + task.data['work_done'] = 'Yes' + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_tasks(Task.READY) + self.assertEqual(len(ready_tasks), 1) + task = ready_tasks[0] + self.assertEqual(task.task_spec.name, 'Activity_WorkCompleted') + task.data['work_completed'] = 'Lots of Stuff' + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + self.assertEqual(self.workflow.is_completed(),True) + self.assertEqual(self.workflow.last_task.data,{'Event_InterruptBoundary_Response': 'Youre late!', + 'flag_task': 'Yes', + 'work_done': 'Yes', + 'work_completed': 'Lots of Stuff', + 'work_late_reason': 'covid-19', + 'end_event': None}) # end event gets appended + # by the test framework + + + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(NIMessageBoundaryTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/NITimerDurationBoundaryTest.py b/tests/SpiffWorkflow/bpmn/NITimerDurationBoundaryTest.py new file mode 100644 index 000000000..dbe0f9a75 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/NITimerDurationBoundaryTest.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import unittest +import datetime +import time +from SpiffWorkflow.task import Task +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + + +class NITimerDurationTest(BpmnWorkflowTestCase): + """ + Non-Interrupting Timer boundary test + """ + def setUp(self): + self.spec = self.load_spec() + + def load_spec(self): + return self.load_workflow_spec('timer-non-interrupt-boundary.bpmn', 'NonInterruptTimer') + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testThroughSaveRestore(self): + self.actual_test(save_restore=True) + + + def actual_test(self,save_restore = False): + self.workflow = BpmnWorkflow(self.spec) + ready_tasks = self.workflow.get_tasks(Task.READY) + self.assertEqual(1, len(ready_tasks)) + self.workflow.complete_task_from_id(ready_tasks[0].id) + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_tasks(Task.READY) + self.assertEqual(1, len(ready_tasks)) + ready_tasks[0].data['work_done'] = 'No' + self.workflow.complete_task_from_id(ready_tasks[0].id) + self.workflow.do_engine_steps() + + loopcount = 0 + # test bpmn has a timeout of .25s + # we should terminate loop before that. + starttime = datetime.datetime.now() + while loopcount < 10: + ready_tasks = self.workflow.get_tasks(Task.READY) + if len(ready_tasks) > 1: + break + if save_restore: self.save_restore() + #self.assertEqual(1, len(self.workflow.get_tasks(Task.WAITING))) + time.sleep(0.1) + self.workflow.complete_task_from_id(ready_tasks[0].id) + self.workflow.refresh_waiting_tasks() + self.workflow.do_engine_steps() + loopcount = loopcount +1 + endtime = datetime.datetime.now() + duration = endtime-starttime + # appropriate time here is .5 seconds + # due to the .3 seconds that we loop and then + # the two conditions that we complete after the timer completes. + self.assertEqual(durationdatetime.timedelta(seconds=.2),True) + for task in ready_tasks: + if task.task_spec == 'GetReason': + task.data['delay_reason'] = 'Just Because' + else: + task.data['work_done'] = 'Yes' + self.workflow.complete_task_from_id(task.id) + self.workflow.refresh_waiting_tasks() + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_tasks(Task.READY) + self.assertEqual(1, len(ready_tasks)) + ready_tasks[0].data['experience'] = 'Great!' + self.workflow.complete_task_from_id(ready_tasks[0].id) + self.workflow.do_engine_steps() + self.assertEqual(self.workflow.is_completed(),True) + self.assertEqual(self.workflow.last_task.data,{'work_done': 'Yes', 'end_event': None, 'experience': 'Great!'}) + print (self.workflow.last_task.data) + print(duration) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(NITimerDurationTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/NavDoubleGatewayTest.py b/tests/SpiffWorkflow/bpmn/NavDoubleGatewayTest.py new file mode 100644 index 000000000..afb7b99ad --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/NavDoubleGatewayTest.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import unittest +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + + +class NavDoubleGateway(BpmnWorkflowTestCase): + """The example bpmn diagram looks roughly like this, a gateway + that leads to two different end points + + [Step 1] -> + -> 'False' -> [Return Step 1] + -> 'True' -> [Step 2] -> END + """ + + def setUp(self): + self.spec = self.load_workflow1_spec() + + def load_workflow1_spec(self): + return self.load_workflow_spec('DoubleGatewayNavigation.bpmn','DoubleGatewayNavigation') + + def testRunThroughHappy(self): + + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + nav_list = self.workflow.get_flat_nav_list() + self.assertEqual(14, len(nav_list)) + + self.assertEqual("StartEvent", nav_list[0].spec_type) + self.assertEqual("Task 1", nav_list[1].description) + self.assertEqual("Decide Which Branch?", nav_list[2].description) + self.assertEqual("a", nav_list[3].description) + self.assertEqual("Enter Task 2a", nav_list[4].description) + self.assertEqual("flow b or c", nav_list[5].description) + self.assertEqual(None, nav_list[6].description) + self.assertEqual("flow b", nav_list[7].description) + self.assertEqual("Enter Task 2b", nav_list[8].description) + self.assertEqual("flow_c", nav_list[9].description) + self.assertEqual("Enter Task 2c", nav_list[10].description) + self.assertEqual("Enter Task 3", nav_list[11].description) + + for nav_item in nav_list: + if nav_item.spec_type[-4:] == "Task": + self.assertIsNotNone(nav_item.task_id) + + + # Sanity check on deep nav. + nav_list = self.workflow.get_deep_nav_list() + self.assertNav(nav_list[0], spec_type="StartEvent", state="COMPLETED") + self.assertNav(nav_list[1], description="Task 1", state="READY") + self.assertNav(nav_list[2], description="Decide Which Branch?") + self.assertNav(nav_list[2].children[0], description="a") + self.assertNav(nav_list[2].children[0].children[0], description="Enter Task 2a") + self.assertNav(nav_list[2].children[1], description="flow b or c") + self.assertNav(nav_list[3], description="Enter Task 3") + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(NavDoubleGateway) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/NavLeadFrogTest.py b/tests/SpiffWorkflow/bpmn/NavLeadFrogTest.py new file mode 100644 index 000000000..fc897c71c --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/NavLeadFrogTest.py @@ -0,0 +1,97 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import unittest +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + + +class NavLeapfrogTest(BpmnWorkflowTestCase): + """The example bpmn diagram looks roughly like this, a gateway + that leads to two different end points + + [Step 1] -> + -> 'False' -> [Return Step 1] + -> 'True' -> [Step 2] -> END + """ + + def setUp(self): + self.spec = self.load_workflow1_spec() + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + task = self.workflow.get_ready_user_tasks()[0] + task.data = {"cats": 10} + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + + def load_workflow1_spec(self): + return self.load_workflow_spec('NavLeapFrog.bpmn','NavLeapFrog') + + def testRunThroughFlatNav(self): + + # Complete a little bit, so we can see the states in action. + + nav_list = self.workflow.get_flat_nav_list() + self.assertEqual(21, len(nav_list)) + + self.assertNav(nav_list[0], name="StartEvent_1", indent=0, state="COMPLETED") + self.assertNav(nav_list[1], description="Get Data", indent=0, state="COMPLETED") + self.assertNav(nav_list[2], description="how many cats", indent=0) + self.assertNav(nav_list[3], description="many a cat", indent=1) + self.assertNav(nav_list[4], description="Tell me bout da cats", indent=2, state="READY") + self.assertNav(nav_list[5], description="no cats", indent=1) + self.assertNav(nav_list[6], description="Get som dem cats", indent=2) + self.assertNav(nav_list[7], description="how many cows", indent=0) + self.assertNav(nav_list[8], description="1 or more cows", indent=1) + self.assertNav(nav_list[9], description="Tell me bout dem cows", indent=2) + self.assertNav(nav_list[10], description="no cows", indent=1) + self.assertNav(nav_list[11], description="How many chickens", indent=0) + self.assertNav(nav_list[12], description="1 or more chicks", indent=1) + self.assertNav(nav_list[13], description="Tell me bout da Chikens", indent=2) + self.assertNav(nav_list[14], description="no chickens", indent=1) + self.assertNav(nav_list[15], description="How many Pigs?", indent=0) + self.assertNav(nav_list[16], description="no pigs", indent=1) + self.assertNav(nav_list[17], description="1 or more pigs", indent=1) + self.assertNav(nav_list[18], description="Tell me boud dem Pigs", indent=2) + self.assertNav(nav_list[19], spec_type="EndEvent", indent=0) + + def testRunThroughDeepNav(self): + + nav_list = self.workflow.get_deep_nav_list() + self.assertEqual(8, len(nav_list)) + self.assertNav(nav_list[0], name="StartEvent_1", indent=0, state="COMPLETED") + self.assertNav(nav_list[1], description="Get Data", indent=0, state="COMPLETED") + self.assertNav(nav_list[2], description="how many cats", indent=0, state="READY") + self.assertNav(nav_list[3], description="how many cows", indent=0, state="LIKELY") + self.assertNav(nav_list[4], description="How many chickens", indent=0, state="MAYBE") + self.assertNav(nav_list[5], description="How many Pigs?", indent=0, state=None) + self.assertNav(nav_list[6], spec_type="EndEvent", indent=0, state=None) + + # Cats + self.assertNav(nav_list[2].children[0], description="many a cat", state="READY") + self.assertNav(nav_list[2].children[0].children[0], description="Tell me bout da cats", state="READY") + self.assertNav(nav_list[2].children[1], description="no cats", indent=1, state=None) + self.assertNav(nav_list[2].children[1].children[0], description="Get som dem cats") + + # Cows + self.assertNav(nav_list[3].children[0], description="1 or more cows", state=None) + self.assertNav(nav_list[3].children[0].children[0], description="Tell me bout dem cows", state=None) + self.assertNav(nav_list[3].children[1], description="no cows", indent=1, state=None) + + # Chickens + self.assertNav(nav_list[4].children[0], description="1 or more chicks", state=None) + self.assertNav(nav_list[4].children[0].children[0], description="Tell me bout da Chikens", state=None) + self.assertNav(nav_list[4].children[1], description="no chickens", indent=1) + + # Pigs + self.assertNav(nav_list[5].children[0], description="no pigs", state=None) + self.assertNav(nav_list[5].children[1], description="1 or more pigs", state=None) + self.assertNav(nav_list[5].children[1].children[0], description="Tell me boud dem Pigs", state=None) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(NavLeapfrogTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/NavListBacktrackTest.py b/tests/SpiffWorkflow/bpmn/NavListBacktrackTest.py new file mode 100644 index 000000000..4f07cb802 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/NavListBacktrackTest.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import unittest +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + + +class NavListBacktrackTest(BpmnWorkflowTestCase): + """The example bpmn diagram looks roughly like this, a gateway + that leads to two different end points + + [Step 1] -> + -> 'False' -> [Return Step 1] + -> 'True' -> [Step 2] -> END + """ + + def setUp(self): + self.spec = self.load_workflow1_spec() + + def load_workflow1_spec(self): + return self.load_workflow_spec('BacktrackNavigation.bpmn','BacktrackNavigation') + + def testRunThroughHappy(self): + + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + nav_list = self.workflow.get_flat_nav_list() + self.assertEqual(9, len(nav_list)) + + self.assertNav(nav_list[0], name="StartEvent_1", indent=0) + self.assertNav(nav_list[1], description="Step 1", indent=0) + self.assertNav(nav_list[2], description="Gateway", indent=0) + self.assertNav(nav_list[3], description="True", indent=1) + self.assertNav(nav_list[4], description="Step 2", indent=2) + self.assertNav(nav_list[5], description="Step 3", indent=2) + self.assertNav(nav_list[6], spec_type="EndEvent", indent=2) + self.assertNav(nav_list[8], description="False", indent=1) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(NavListBacktrackTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/NavListExclusiveGatewayTest.py b/tests/SpiffWorkflow/bpmn/NavListExclusiveGatewayTest.py new file mode 100644 index 000000000..1b7c10e4e --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/NavListExclusiveGatewayTest.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import unittest +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + + +class NavListExclusiveGatewayTest(BpmnWorkflowTestCase): + """The example bpmn diagram looks roughtly like this + + [Task 1] -> + -> 'a' -> [Task 2a] + -> 'b' -> [Task 2b] + -> [Task 3] + + """ + + def setUp(self): + self.spec = self.load_workflow1_spec() + + def load_workflow1_spec(self): + return self.load_workflow_spec('ExclusiveGatewayNavigation.bpmn', + 'ExclusiveGatewayNavigation') + + def testRunThroughHappy(self): + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + nav_list = self.workflow.get_flat_nav_list() + self.assertEquals(11, len(nav_list)) + + self.assertNav(nav_list[1], description="Enter Task 1", indent=0) + self.assertNav(nav_list[2], description="Decide Which Branch?", + indent=0) + self.assertNav(nav_list[3], description="a", indent=1) + self.assertNav(nav_list[4], description="Enter Task 2a", indent=2) + self.assertNav(nav_list[5], description="b", indent=1) + self.assertNav(nav_list[6], description="Enter Task 2b", indent=2) + self.assertNav(nav_list[7], spec_type="ExclusiveGateway", indent=0) + self.assertNav(nav_list[8], description="Enter Task 3", indent=0) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase( + NavListExclusiveGatewayTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/NavListExclusiveGatewayTwiceTest.py b/tests/SpiffWorkflow/bpmn/NavListExclusiveGatewayTwiceTest.py new file mode 100644 index 000000000..2d6c926ba --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/NavListExclusiveGatewayTwiceTest.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import unittest +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + + +class NavListExclusiveGatewayTest(BpmnWorkflowTestCase): + """The example bpmn diagram looks roughtly like this + + [Task 1] -> Are you a viking? + -> 'yes' -> [Choose Helmet] + -> 'no' -> + Do you eat spam? + -> 'yes' -> [Eat plate of spam] + -> 'no' -> + + -> [Examine your life] + + """ + + def setUp(self): + self.spec = self.load_workflow1_spec() + + def load_workflow1_spec(self): + return self.load_workflow_spec('ExclusiveGatewayTwiceNavigation.bpmn','ExclusiveGatewayTwiceNavigation') + + def testRunThroughHappy(self): + + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + nav_list = self.workflow.get_flat_nav_list() + self.assertEqual(13, len(nav_list)) + + self.assertNav(nav_list[0], name="StartEvent_1", indent=0) + self.assertNav(nav_list[1], description="Make Choices", indent=0) + self.assertNav(nav_list[2], description="Are you a viking?", indent=0) + self.assertNav(nav_list[3], description="Yes", indent=1) + self.assertNav(nav_list[4], description="Select a helmet", indent=2) + self.assertNav(nav_list[5], description="No", indent=1) + self.assertNav(nav_list[6], description="Do you eat Spam?", indent=0) + self.assertNav(nav_list[7], description="Yes", indent=1) + self.assertNav(nav_list[8], description="Eat plate of spam", indent=2) + self.assertNav(nav_list[9], description="No", indent=1) + self.assertNav(nav_list[10], description="Examine your life", indent=0) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(NavListExclusiveGatewayTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/NavListMulipleEnds.py b/tests/SpiffWorkflow/bpmn/NavListMulipleEnds.py new file mode 100644 index 000000000..ad2787f50 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/NavListMulipleEnds.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import unittest +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + + +class NavListExclusiveGatewayTest(BpmnWorkflowTestCase): + """The example bpmn diagram looks roughly like this, a gateway + that leads to two different end points + + [Step 1] -> + -> 'False' -> [Alternate End] -> END A + -> 'True' -> [Step 2] -> END B + """ + + def setUp(self): + self.spec = self.load_workflow1_spec() + + def load_workflow1_spec(self): + return self.load_workflow_spec('ExclusiveGatewayMultipleEndNavigation.bpmn','ExclusiveGatewayMultipleEndNavigation') + + def testRunThroughHappy(self): + + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + nav_list = self.workflow.get_nav_list() + self.assertEqual(6, len(nav_list)) + + self.assertEqual("Step 1", nav_list[0]["description"]) + self.assertEqual("GatewayToEnd", nav_list[1]["description"]) + self.assertEqual("False", nav_list[2]["description"]) + self.assertEqual("Step End", nav_list[3]["description"]) + self.assertEqual("True", nav_list[4]["description"]) + self.assertEqual("Step 2", nav_list[5]["description"]) + + self.assertEqual(0, nav_list[0]["indent"]) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(NavListExclusiveGatewayTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/NavListParallelGatewayTest.py b/tests/SpiffWorkflow/bpmn/NavListParallelGatewayTest.py new file mode 100644 index 000000000..ddf037b46 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/NavListParallelGatewayTest.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import unittest +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + + +class NavListParallelGatewayTest(BpmnWorkflowTestCase): + """The example bpmn diagram looks roughtly like this + + [Task 1] -> + -> 'yes' -> + noop + -> 'no' -> + <+> -> + [Task 2a] + [Task 2b] + [Task 2c] + -> + -> [Task 3] + + """ + + def setUp(self): + self.spec = self.load_workflow1_spec() + + def load_workflow1_spec(self): + return self.load_workflow_spec('ParallelTasksNavigation.bpmn','ParallelTasksNavigation') + + def testRunThroughHappy(self): + + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + nav_list = self.workflow.get_flat_nav_list() + self.assertNav(nav_list[0], name="StartEvent_1", indent=0) + self.assertNav(nav_list[1], description="Enter Task 1", indent=0) + self.assertNav(nav_list[2], description="Skip to Task 3?", indent=0) + self.assertNav(nav_list[3], description="Yes", indent=1) + self.assertNav(nav_list[4], description="No", indent=1) + self.assertNav(nav_list[5], spec_type="ParallelGateway", indent=2) + self.assertNav(nav_list[6], description="Enter Task 2a", indent=3) + self.assertNav(nav_list[7], description="Enter Task 2b", indent=3) + self.assertNav(nav_list[8], description="Enter Task 2b_2", indent=3) + self.assertNav(nav_list[9], description="Enter Task 2c", indent=3) + self.assertNav(nav_list[10], spec_type="ParallelGateway", indent=2) + self.assertNav(nav_list[11], description="Enter Task 3", indent=0) + self.assertNav(nav_list[12], spec_type="EndEvent", indent=0) + + x = self.workflow.get_ready_user_tasks() + x[0].data['skip_to_task_3'] = False + self.workflow.complete_task_from_id(x[0].id) + self.workflow.do_engine_steps() + self.save_restore() + nav_list = self.workflow.get_flat_nav_list() + self.assertNav(nav_list[2], description="Skip to Task 3?", indent=0, state="COMPLETED") + self.assertNav(nav_list[6], description="Enter Task 2a", indent=0, state="READY") + self.assertNav(nav_list[7], description="Enter Task 2b", indent=0, state="READY") + self.assertNav(nav_list[8], description="Enter Task 2b_2", indent=0, state="MAYBE") + self.assertNav(nav_list[9], description="Enter Task 2c", indent=0, state="READY") + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(NavListParallelGatewayTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ParallelOrderTest.py b/tests/SpiffWorkflow/bpmn/ParallelOrderTest.py new file mode 100644 index 000000000..e226f6dad --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ParallelOrderTest.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class MultiInstanceTest(BpmnWorkflowTestCase): + """The example bpmn diagram has a 4 parallel workflows, this + verifies that the parallel tasks have a natural order that follows + the visual layout of the diagram, rather than just the order in which + they were created. """ + + def setUp(self): + self.spec = self.load_workflow1_spec() + + def load_workflow1_spec(self): + return self.load_workflow_spec('ParallelOrder.bpmn','ParallelOrder') + + def testRunThroughHappy(self): + + self.workflow = BpmnWorkflow(self.spec) + + self.workflow.do_engine_steps() + self.assertFalse(self.workflow.is_completed()) + self.assertEquals(4, len(self.workflow.get_ready_user_tasks())) + tasks = self.workflow.get_ready_user_tasks() + self.assertEquals("Task 1", tasks[0].get_description()) + self.assertEquals("Task 2", tasks[1].get_description()) + self.assertEquals("Task 3", tasks[2].get_description()) + self.assertEquals("Task 4", tasks[3].get_description()) + + nav = self.workflow.get_flat_nav_list() + self.assertNav(nav[2], description="Task 1") + self.assertNav(nav[3], description="Task 2") + self.assertNav(nav[4], description="Task 3") + self.assertNav(nav[5], description="Task 4") + + nav = self.workflow.get_deep_nav_list() + self.assertNav(nav[1], spec_type="ParallelGateway") + self.assertNav(nav[1].children[0], description="Task 1") + self.assertNav(nav[1].children[1], description="Task 2") + self.assertNav(nav[1].children[2], description="Task 3") + self.assertNav(nav[1].children[3], description="Task 4") + self.assertNav(nav[2], spec_type="ParallelGateway") + self.assertNav(nav[3], spec_type="EndEvent") + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(MultiInstanceTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ParallelTest.py b/tests/SpiffWorkflow/bpmn/ParallelTest.py index 50283c468..f04f2c660 100644 --- a/tests/SpiffWorkflow/bpmn/ParallelTest.py +++ b/tests/SpiffWorkflow/bpmn/ParallelTest.py @@ -81,6 +81,66 @@ def testRunThroughThread1First(self): 0, len(self.workflow.get_tasks(Task.READY | Task.WAITING))) +class ParallelFromCamunda(BpmnWorkflowTestCase): + + def setUp(self): + self.spec = self.load_spec() + + def load_spec(self): + return self.load_workflow_spec('Test-Workflows/Parallel.camunda.bpmn20.xml', 'Process_1hb021r') + + def testRunThroughParallelTaskFirst(self): + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + + # 1 first task + self.assertEqual(1, len(self.workflow.get_tasks(Task.READY))) + self.do_next_named_step('First Task') + self.save_restore() + self.workflow.do_engine_steps() + self.assertRaises(AssertionError, self.do_next_named_step, 'Done') + + # 3 parallel tasks + self.assertEqual(3, len(self.workflow.get_tasks(Task.READY))) + self.do_next_named_step('Parallel Task A') + self.save_restore() + self.workflow.do_engine_steps() + self.assertRaises(AssertionError, self.do_next_named_step, 'Done') + self.do_next_named_step('Parallel Task B') + self.save_restore() + self.assertRaises(AssertionError, self.do_next_named_step, 'Done') + self.do_next_named_step('Parallel Task C') + self.save_restore() + self.workflow.do_engine_steps() + self.save_restore() + + # 1 last task + self.assertEqual(1, len(self.workflow.get_tasks(Task.READY))) + self.do_next_named_step('Last Task') + self.save_restore() + self.workflow.do_engine_steps() + self.assertRaises(AssertionError, self.do_next_named_step, 'Done') + + def testAllParallelDataMakesItIntoGatewayTask(self): + """It should be true that data collected across parallel tasks + is all available in the join task.""" + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + self.do_next_named_step('First Task') + self.do_next_named_step('Parallel Task A', + set_attribs={"taskA": "taskA"}) + self.do_next_named_step('Parallel Task B', + set_attribs={"taskB": "taskB"}) + self.do_next_named_step('Parallel Task C', + set_attribs={"taskC": "taskC"}) + self.workflow.do_engine_steps() + self.do_next_named_step('Last Task') + self.assertEquals("taskA", self.workflow.last_task.data["taskA"]) + self.assertEquals("taskB", self.workflow.last_task.data["taskB"]) + self.assertEquals("taskC", self.workflow.last_task.data["taskC"]) + + + class ParallelJoinLongInclusiveTest(ParallelJoinLongTest): def load_spec(self): diff --git a/tests/SpiffWorkflow/bpmn/PythonExpressionEngineTest.py b/tests/SpiffWorkflow/bpmn/PythonExpressionEngineTest.py new file mode 100644 index 000000000..2e6cae3cc --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/PythonExpressionEngineTest.py @@ -0,0 +1,221 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import sys +import os +import unittest + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from SpiffWorkflow.bpmn.FeelLikeScriptEngine import FeelLikeScriptEngine, FeelInterval +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase +import datetime + +__author__ = 'matth' + + +class PythonExpressionTest(BpmnWorkflowTestCase): + """The example bpmn diagram has a single task with a loop cardinality of 5. + It should repeat 5 times before termination.""" + + def setUp(self): + self.data = { + "ApprovalReceived": { + "ApprovalNotificationReceived": True + }, + "ApprvlApprovrRole1": "Supervisor", + "ApprvlApprvr1": "sf4d", + "ApprvlApprvr2": "mas3x", + "ApprvlApprvrName1": "Steven K Funkhouser", + "ApprvlApprvrName2": "Margaret Shupnik", + "ApprvlApprvrRole2": "Associate Research Dean", + "ApprvlSchool": "Medicine", + "CIDR_MaxPersonnel": 23, + "CIDR_TotalSqFt": 2345, + "CoreResources": None, + "ExclusiveSpaceRoomIDBuilding": None, + "IRBApprovalRelevantNumbers": None, + "LabPlan": [ + 18 + ], + "NeededSupplies": { + "NeededSupplies": True + }, + "NonUVASpaces": None, + "PIComputingID": { + "data": { + "display_name": "Alex Herron", + "given_name": "Alex", + "email_address": "cah3us@virginia.edu", + "telephone_number": "5402712904", + "title": "", + "department": "", + "affiliation": "sponsored", + "sponsor_type": "Contractor", + "uid": "cah3us" + }, + "label": "Alex Herron (cah3us)", + "value": "cah3us" + }, + "PIPrimaryDeptArchitecture": None, + "PIPrimaryDeptArtsSciences": None, + "PIPrimaryDeptEducation": None, + "PIPrimaryDeptEngineering": None, + "PIPrimaryDeptMedicine": "Pediatrics", + "PIPrimaryDeptOther": None, + "PIPrimaryDeptProvostOffice": None, + "PISchool": "Medicine", + "PISupervisor": { + "data": { + "display_name": "Steven K Funkhouser", + "given_name": "Steven", + "email_address": "sf4d@virginia.edu", + "telephone_number": "+1 (434) 243-2634", + "title": "E1:Surgical Tech Sat Elective, E0:Supv Endoscopy Surg Techs", + "department": "E1:Sterile Processing, E0:Sterile Processing", + "affiliation": "staff", + "sponsor_type": "", + "uid": "sf4d" + }, + "label": "Steven K Funkhouser (sf4d)", + "value": "sf4d" + }, + "PWADescribe": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Mauris nibh nulla, ultricies non tempor a, tincidunt a libero. Praesent eu felis eget tellus congue vulputate eget nec elit. Aliquam in diam at risus gravida tempor sed sed ex. Aliquam eros sapien, facilisis vel enim sed, vestibulum blandit augue. Suspendisse potenti. Mauris in blandit metus, eget pellentesque augue. Nam risus nisl, hendrerit ut ligula vel, fermentum convallis nisl. Etiam ornare neque massa. Fusce auctor lorem ipsum. Suspendisse eget facilisis risus. Fusce augue libero, maximus quis maximus vitae, euismod quis turpis. Morbi fringilla magna iaculis dolor rutrum convallis.\n\nQuisque eget urna ac orci ultrices pellentesque hendrerit nec nisl. Sed interdum lorem pellentesque, aliquam sem eget, luctus leo. Aliquam ut pretium neque. In porttitor dignissim tellus, nec vehicula risus. Vestibulum bibendum quis nibh at maximus. Nulla facilisi. Suspendisse suscipit enim ipsum, iaculis interdum erat suscipit at. Praesent commodo fermentum mauris, vel ullamcorper leo faucibus eu.", + "PWAFiles": [ + 17 + ], + "PersonnelWeeklySchedule": [ + 16 + ], + "RequiredTraining": { + "AllRequiredTraining": True + }, + "ShareSpaceRoomIDBuilding": None, + "SupplyList": None, + "exclusive": [ + { + "ExclusiveSpaceRoomID": "121", + "ExclusiveSpaceType": "Lab", + "ExclusiveSpaceSqFt": 400, + "ExclusiveSpacePercentUsable": 50, + "ExclusiveSpaceMaxPersonnel": 5, + "ExclusiveSpaceBuilding": { + "data": "{\"Value\":\"Pinn Hall\",\"Building Name\":\"Pinn Hall\"}", + "label": "Pinn Hall", + "id": 557, + "value": "Pinn Hall" + }, + "ExclusiveSpaceAMComputingID": { + "data": { + "display_name": "Emily L Funk", + "given_name": "Emily", + "email_address": "elf6m@virginia.edu", + "telephone_number": "", + "title": "S1:Grad McIntire, S0:Graduate Student Worker", + "department": "S1:MC-Dean's Admin, S0:PV-Admission-Undergrad", + "affiliation": "grace_student", + "sponsor_type": "", + "uid": "elf6m" + }, + "label": "Emily L Funk (elf6m)", + "value": "elf6m" + } + }, + { + "ExclusiveSpaceRoomID": "345", + "ExclusiveSpaceType": "Lab", + "ExclusiveSpaceSqFt": 300, + "ExclusiveSpacePercentUsable": 80, + "ExclusiveSpaceMaxPersonnel": 6, + "ExclusiveSpaceBuilding": { + "data": "{\"Value\":\"Pinn Hall\",\"Building Name\":\"Pinn Hall\"}", + "label": "Pinn Hall", + "id": 557, + "value": "Pinn Hall" + }, + "ExclusiveSpaceAMComputingID": None + } + ], + "isAnimalResearch": False, + "isCoreResourcesUse": False, + "isHumanSubjects": False, + "isNecessarySupplies": True, + "isNonUVASpaces": False, + "personnel": [ + { + "PersonnelType": "Faculty", + "PersonnelSpace": "121 Pinn Hall", + "PersonnelJustification": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Mauris nibh nulla, ultricies non tempor a, tincidunt a libero. Praesent eu felis eget tellus congue vulputate eget nec elit. Aliquam in diam at risus gravida tempor sed sed ex. Aliquam eros sapien, facilisis vel enim sed, vestibulum blandit augue. Suspendisse potenti. Mauris in blandit metus, eget pellentesque augue. Nam risus nisl, hendrerit ut ligula vel, fermentum convallis nisl. Etiam ornare neque massa. Fusce auctor lorem ipsum. Suspendisse eget facilisis risus. Fusce augue libero, maximus quis maximus vitae, euismod quis turpis. Morbi fringilla magna iaculis dolor rutrum convallis.\n\nQuisque eget urna ac orci ultrices pellentesque hendrerit nec nisl. Sed interdum lorem pellentesque, aliquam sem eget, luctus leo. Aliquam ut pretium neque. In porttitor dignissim tellus, nec vehicula risus. Vestibulum bibendum quis nibh at maximus. Nulla facilisi. Suspendisse suscipit enim ipsum, iaculis interdum erat suscipit at. Praesent commodo fermentum mauris, vel ullamcorper leo faucibus eu.", + "PersonnelComputingID": { + "data": { + "display_name": "Steven K Funkhouser", + "given_name": "Steven", + "email_address": "sf4d@virginia.edu", + "telephone_number": "+1 (434) 243-2634", + "title": "E1:Surgical Tech Sat Elective, E0:Supv Endoscopy Surg Techs", + "department": "E1:Sterile Processing, E0:Sterile Processing", + "affiliation": "staff", + "sponsor_type": "", + "uid": "sf4d" + }, + "label": "Steven K Funkhouser (sf4d)", + "value": "sf4d" + } + } + ], + "shared": [] + } + self.expressionEngine = FeelLikeScriptEngine() + + def testRunThroughExpressions(self): + tests = [("string length('abcd')", 4, {}), + ("contains('abcXYZdef','XYZ')", True, {}), + ("list contains(x,'b')", True, {'x': ['a', 'b', 'c']}), + ("list contains(x,'z')", False, {'x': ['a', 'b', 'c']}), + # ("list contains(['a','b','c'],'b')",True,{}), # fails due to parse error + ("all ([True,True,True])", True, {}), + ("all ([True,False,True])", False, {}), + ("any ([False,False,False])", False, {}), + ("any ([True,False,True])", True, {}), + ("PT3S", datetime.timedelta(seconds=3), {}), + ("d[item>1]",[2,3,4],{'d':[1,2,3,4]}), + ("d[x>=2].y",[2,3,4],{'d':[{'x':1,'y':1}, + {'x': 2, 'y': 2}, + {'x': 3, 'y': 3}, + {'x': 4, 'y': 4}, + ]}), + ("concatenate(a,b,c)", ['a', 'b', 'c'], {'a': ['a'], + 'b': ['b'], + 'c': ['c'], + }), + ("append(a,'c')", ['a', 'b', 'c'], {'a': ['a', 'b']}), + ("now()", FeelInterval(datetime.datetime.now() - datetime.timedelta(seconds=1), + datetime.datetime.now() + datetime.timedelta(seconds=1)), + {}), + ("day of week('2020-05-07')", 4, {}), + ("day of week(a)", 0, {'a': datetime.datetime(2020, 5, 3)}), + ("list contains(a.b,'x')", True, {'a': {'b': ['a', 'x']}}), # combo + ("list contains(a.b,'c')", False, {'a': {'b': ['a', 'x']}}), + ("list contains(a.keys(),'b')", True, {'a': {'b': ['a', 'x']}}), + ("list contains(a.keys(),'c')", False, {'a': {'b': ['a', 'x']}}), + ] + for test in tests: + print(test[0]) + self.assertEqual(self.expressionEngine.evaluate(test[0], **test[2]), + test[1], "test --> %s <-- with variables ==> %s <==Fail!" % (test[0], str(test[2]))) + + def testRunThroughDMNExpression(self): + """ + Real world test + """ + x = self.expressionEngine.eval_dmn_expression("""sum([1 for x in exclusive if x.get( + 'ExclusiveSpaceAMComputingID',None)==None])""", '0', **self.data) + self.assertEqual(x, False) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(PythonExpressionTest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ResetSubProcessTest.py b/tests/SpiffWorkflow/bpmn/ResetSubProcessTest.py new file mode 100644 index 000000000..62a5c0aec --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ResetSubProcessTest.py @@ -0,0 +1,86 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class ResetSubProcessTest(BpmnWorkflowTestCase): + """The example bpmn diagram has a single task set to be a parallel + multi-instance with a loop cardinality of 5. + It should repeat 5 times before termination, and it should + have a navigation list with 7 items in it - one for start, one for end, + and five items for the repeating section. """ + + def setUp(self): + self.filename = 'resetworkflowA-*.bpmn' + self.process_name = 'TopLevel' + self.spec = self.load_workflow1_spec() + + + def reload_save_restore(self): + self.filename = 'resetworkflowB-*.bpmn' + self.spec = self.load_workflow1_spec() + self.workflow.do_engine_steps() + self.save_restore(spec_from_state=False) + + def load_workflow1_spec(self): + return self.load_workflow_spec(self.filename, self.process_name) + + def testSaveRestore(self): + self.actualTest(True) + + def actualTest(self, save_restore=False): + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + self.assertEqual(1, len(self.workflow.get_ready_user_tasks())) + task = self.workflow.get_ready_user_tasks()[0] + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + navlist = self.workflow.get_flat_nav_list() + self.assertEqual(len(navlist),10) + self.assertNav(navlist[4], name="SubTask2", state="READY") + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(task.get_name(),'SubTask2') + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + task = self.workflow.get_tasks_from_spec_name('Task1')[0] + task.reset_token() + self.workflow.do_engine_steps() + self.reload_save_restore() + task = self.workflow.get_ready_user_tasks()[0] + navlist = self.workflow.get_flat_nav_list() + self.assertEqual(len(navlist), 11) + self.assertNav(navlist[4], name="Subtask2", state=None) + + self.assertEqual(task.get_name(),'Task1') + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(task.get_name(),'Subtask2') + navlist = self.workflow.get_flat_nav_list() + self.assertNav(navlist[4], name="Subtask2", state="READY") + + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(task.get_name(),'Subtask2A') + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(task.get_name(),'Task2') + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + self.assertTrue(self.workflow.is_completed()) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ResetSubProcessTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ScriptTest.py b/tests/SpiffWorkflow/bpmn/ScriptTest.py new file mode 100644 index 000000000..d517016c6 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ScriptTest.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import unittest +from SpiffWorkflow.task import Task +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class InlineScriptTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec = self.load_spec() + + def load_spec(self): + return self.load_workflow_spec('ScriptTest.bpmn', 'ScriptTest') + + def testRunThroughHappy(self): + + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + data = self.workflow.last_task.data + self.assertEqual(data,{'testvar': {'a': 1, 'b': 2, 'new': 'Test'}, + 'testvar2': [{'x': 1, 'y': 'a'}, + {'x': 2, 'y': 'b'}, + {'x': 3, 'y': 'c'}], + 'sample': ['b', 'c'], 'end_event': None}) + + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(InlineScriptTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/ScriptTestBox.py b/tests/SpiffWorkflow/bpmn/ScriptTestBox.py new file mode 100644 index 000000000..83a6a60cb --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/ScriptTestBox.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import unittest +from SpiffWorkflow.task import Task +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' + + +class InlineScriptTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec = self.load_spec() + + def load_spec(self): + return self.load_workflow_spec('ScriptTestBox.bpmn', 'ScriptTest') + + def testRunThroughHappy(self): + + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + data = self.workflow.last_task.data + self.assertEqual(data,{'testvar': {'a': 1, 'b': 2, 'new': 'Test'}, + 'testvar2': [{'x': 1, 'y': 'a'}, + {'x': 2, 'y': 'b'}, + {'x': 3, 'y': 'c'}], + 'sample': ['b', 'c'], 'end_event': None}) + + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(InlineScriptTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/StartMessageEventTest.py b/tests/SpiffWorkflow/bpmn/StartMessageEventTest.py new file mode 100644 index 000000000..869a0e441 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/StartMessageEventTest.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import unittest +import datetime +import time +from SpiffWorkflow.task import Task +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + + +class StartMessageTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec = self.load_spec() + + def load_spec(self): + return self.load_workflow_spec('message_test.bpmn', 'ThrowCatch') + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testThroughSaveRestore(self): + self.actual_test(save_restore=True) + + + + def actual_test(self,save_restore = False): + steps = [('Activity_EnterPlan',{'plan_details':'Bad'}), + ('Activity_ApproveOrDeny', {'approved':'No'}), + ('Activity_EnterPlan', {'plan_details':'Better'}), + ('Activity_ApproveOrDeny', {'approved':'No'}), + ('Activity_EnterPlan', {'plan_details':'Best'}), + ('Activity_ApproveOrDeny', {'approved':'Yes'}), + ('Activity_EnablePlan',{'Done':'OK!'}) + ] + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() # get around start task + ready_tasks = self.workflow.get_tasks(Task.READY) + self.assertEqual(1, len(ready_tasks),'Expected to have one ready task') + waiting_tasks = self.workflow.get_tasks(Task.WAITING) + self.assertEqual(1, len(waiting_tasks), 'Expected to have one waiting task') + + for step in steps: + current_task = ready_tasks[0] + self.assertEqual(current_task.task_spec.name,step[0]) + current_task.update_data(step[1]) + self.workflow.complete_task_from_id(current_task.id) + self.workflow.do_engine_steps() + self.workflow.refresh_waiting_tasks() + if save_restore: self.save_restore() + ready_tasks = self.workflow.get_tasks(Task.READY) + self.assertEqual(self.workflow.is_completed(),True,'Expected the workflow to be complete at this point') + self.assertEqual(self.workflow.last_task.data,{'plan_details': 'Best', + 'ApprovalResult': 'Yes', + 'Done': 'OK!', + 'end_event': None}) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(StartMessageTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/SwimLaneTest.py b/tests/SpiffWorkflow/bpmn/SwimLaneTest.py new file mode 100644 index 000000000..a01f0130e --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/SwimLaneTest.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import unittest +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + + +class SwimLaneTest(BpmnWorkflowTestCase): + """ + Test sample bpmn document to make sure the nav list + contains the correct swimlane in the 'lane' component + and make sure that our waiting tasks accept a lane parameter + and that it picks up the correct tasks. + """ + + def setUp(self): + self.spec = self.load_workflow1_spec() + + def load_workflow1_spec(self): + return self.load_workflow_spec('lanes.bpmn','lanes') + + def testRunThroughHappy(self): + + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + nav_list = self.workflow.get_flat_nav_list() + self.assertNav(nav_list[1], description="Request Feature", lane="A") + self.assertNav(nav_list[2], description="Clarifying Questions?", lane="B") + self.assertNav(nav_list[5], description="Clarify Request", lane="A") + self.assertNav(nav_list[7], description="Implement Feature", lane="B") + atasks = self.workflow.get_ready_user_tasks(lane="A") + btasks = self.workflow.get_ready_user_tasks(lane="B") + self.assertEqual(1, len(atasks)) + self.assertEqual(0, len(btasks)) + task = atasks[0] + self.assertEqual('Activity_A1', task.task_spec.name) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + atasks = self.workflow.get_ready_user_tasks(lane="A") + btasks = self.workflow.get_ready_user_tasks(lane="B") + self.assertEqual(0, len(atasks)) + self.assertEqual(1, len(btasks)) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(SwimLaneTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/TimerDurationBoundaryTest.py b/tests/SpiffWorkflow/bpmn/TimerDurationBoundaryTest.py new file mode 100644 index 000000000..89b79c931 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/TimerDurationBoundaryTest.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import unittest +import datetime +import time +from SpiffWorkflow.task import Task +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase +from SpiffWorkflow.bpmn.BpmnFeelScriptEngine import BpmnFeelScriptEngine +__author__ = 'kellym' + + +class TimerDurationTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec = self.load_spec() + + def load_spec(self): + return self.load_workflow_spec('boundary.bpmn', 'boundary_event') + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testThroughSaveRestore(self): + self.actual_test(save_restore=True) + + + def actual_test(self,save_restore = False): + self.workflow = BpmnWorkflow(self.spec) + self.workflow.script_engine = BpmnFeelScriptEngine() + ready_tasks = self.workflow.get_tasks(Task.READY) + self.assertEqual(1, len(ready_tasks)) + self.workflow.complete_task_from_id(ready_tasks[0].id) + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_tasks(Task.READY) + self.assertEqual(1, len(ready_tasks)) + ready_tasks[0].data['answer']='No' + self.workflow.complete_task_from_id(ready_tasks[0].id) + self.workflow.do_engine_steps() + + loopcount = 0 + # test bpmn has a timeout of .25s + # we should terminate loop before that. + starttime = datetime.datetime.now() + while loopcount < 11: + ready_tasks = self.workflow.get_tasks(Task.READY) + if len(ready_tasks) < 1: + break + if save_restore: + self.save_restore() + self.workflow.script_engine = BpmnFeelScriptEngine() + #self.assertEqual(1, len(self.workflow.get_tasks(Task.WAITING))) + time.sleep(0.1) + self.workflow.complete_task_from_id(ready_tasks[0].id) + self.workflow.refresh_waiting_tasks() + self.workflow.do_engine_steps() + loopcount = loopcount +1 + endtime = datetime.datetime.now() + duration = endtime-starttime + # Assure that the loopcount is less than 10, and the timer interrupt fired, rather + # than allowing us to continue to loop the full 10 times. + self.assertTrue(loopcount < 10) + print(duration) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(TimerDurationTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/TimerDurationTest.py b/tests/SpiffWorkflow/bpmn/TimerDurationTest.py new file mode 100644 index 000000000..97e89886e --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/TimerDurationTest.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import unittest +import datetime +import time +from SpiffWorkflow.task import Task +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + + +class TimerDurationTest(BpmnWorkflowTestCase): + + def setUp(self): + self.spec = self.load_spec() + + def load_spec(self): + return self.load_workflow_spec('timer.bpmn', 'timer') + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testThroughSaveRestore(self): + self.actual_test(save_restore=True) + + + def actual_test(self,save_restore = False): + self.workflow = BpmnWorkflow(self.spec) + ready_tasks = self.workflow.get_tasks(Task.READY) + self.assertEqual(1, len(ready_tasks)) + self.workflow.complete_task_from_id(ready_tasks[0].id) + self.workflow.do_engine_steps() + ready_tasks = self.workflow.get_tasks(Task.READY) + self.assertEqual(1, len(ready_tasks)) + self.workflow.complete_task_from_id(ready_tasks[0].id) + self.workflow.do_engine_steps() + + loopcount = 0 + # test bpmn has a timeout of .25s + # we should terminate loop before that. + starttime = datetime.datetime.now() + while loopcount < 10: + if len(self.workflow.get_tasks(Task.READY)) >= 1: + break + if save_restore: self.save_restore() + self.assertEqual(1, len(self.workflow.get_tasks(Task.WAITING))) + time.sleep(0.1) + self.workflow.refresh_waiting_tasks() + loopcount = loopcount +1 + endtime = datetime.datetime.now() + duration = endtime-starttime + self.assertEqual(durationdatetime.timedelta(seconds=.25),True) + print(duration) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(TimerDurationTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/bpmn/data/Approvals.bpmn b/tests/SpiffWorkflow/bpmn/data/Approvals.bpmn index 73a4e06d7..152f7785d 100644 --- a/tests/SpiffWorkflow/bpmn/data/Approvals.bpmn +++ b/tests/SpiffWorkflow/bpmn/data/Approvals.bpmn @@ -1,6 +1,6 @@ - + Person who takes the first action to start the process @@ -400,4 +400,4 @@ - \ No newline at end of file + diff --git a/tests/SpiffWorkflow/bpmn/data/Approvals_bad.bpmn b/tests/SpiffWorkflow/bpmn/data/Approvals_bad.bpmn new file mode 100644 index 000000000..5e33ada61 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Approvals_bad.bpmn @@ -0,0 +1,403 @@ + + + + + + Person who takes the first action to start the process + + + + + + + + + + + Start1 + First_Approval_Wins + End1 + First_Approval_Wins_Done + Parallel_Approvals_Done + Parallel_SP + Parallel_SP_Done + + + + Supervisor_Approval__P_ + Gateway4 + Gateway5 + + + + Manager_Approval__P_ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Supervisor_Approval + Start2 + Supervisor_Approved + + + + Manager_Approval + Manager_Approved + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Start3 + Supervisor_Approval + End2 + + + + Manager_Approval + + + + Step1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/BacktrackNavigation.bpmn b/tests/SpiffWorkflow/bpmn/data/BacktrackNavigation.bpmn new file mode 100644 index 000000000..453533e29 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/BacktrackNavigation.bpmn @@ -0,0 +1,138 @@ + + + + + Flow_0kcrx5l + + + ##### Please confirm Primary Investigator entered in Protocol Builder is correct and if so, provide additional information: +### **{{ pi.display_name }}** +***Email:*** {{ pi.email_address }} + +**Primary Appointment** +***School:*** {{ pi.E0.schoolName }} +***Department:*** {{ pi.E0.deptName }} + + +{% if is_me_pi %} +Since you are the person entering this information, you already have access and will receive all emails. +{% endif %} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Flow_147b9li + Flow_0s60u0u + + + + + Flow_1dcsioh + Flow_147b9li + Flow_00prawo + + + tru + + + false + + + Flow_0kcrx5l + Flow_00prawo + Flow_1dcsioh + + + Flow_00lzhsh + + + Flow_0s60u0u + Flow_00lzhsh + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/ComplexNavigation.bpmn b/tests/SpiffWorkflow/bpmn/data/ComplexNavigation.bpmn new file mode 100644 index 000000000..87b743e07 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/ComplexNavigation.bpmn @@ -0,0 +1,746 @@ + + + + + Flow_0kcrx5l + + + + Flow_0kcrx5l + Flow_1seuuie + + + The Study's Responsible Organization is needed in order to confirm the Department Chair. If it is the same as the Primary Investigator's Primary Department show below, we have all the information needed to determine the Department Chair. + + +**Primary Investigator's Primary Appointment** +***School:*** {{ pi.E0.schoolName }} +***Department:*** {{ pi.E0.deptName }} + + + + + + + + + + + + + Flow_12obxbo + Flow_1y4gjsg + + + Flow_02614fd + Flow_0c4tt8e + ro.chair = {} +ro.chair.uid = RO_Chair_CID +ro.chair.name_degree = RO_Chair_Name_Degree +ro.chair.title = RO_Chair_Title +ro.chair.sig_block = RO_Chair_Sig_Block + + + Flow_1seuuie + Flow_1ni06mz + Flow_1y9edqt + + + Flow_1y9edqt + Flow_1oriwwz + Flow_185jvp3 + + + Flow_185jvp3 + Flow_1dh8c45 + sch_enum = [] +if pi.E0.schoolAbbrv != "MD": + sch_enum_md = [ + { + "value": "MD", + "label": "Medicine" + }, + ] +else: + sch_enum_md = [] +if pi.E0.schoolAbbrv != "AS": + sch_enum_as = [ + { + "value": "AS", + "label": "Arts & Science" + }, + ] +else: + sch_enum_as = [] +if pi.E0.schoolAbbrv != "CU": + sch_enum_cu = [ + { + "value": "CU", + "label": "Education" + }, + ] +else: + sch_enum_cu = [] +if pi.E0.schoolAbbrv != "NR": + sch_enum_nr = [ + { + "value": "NR", + "label": "Nursing" + }, + ] +else: + sch_enum_nr = [] +sch_enum = sch_enum_md + sch_enum_as + sch_enum_cu + sch_enum_nr +del(sch_enum_md) +del(sch_enum_as) +del(sch_enum_cu) +del(sch_enum_nr) + + + + + + + + + + + + + + Flow_1dh8c45 + Flow_0mf9npl + + + Flow_1oriwwz + Flow_0nmpxmc + Flow_12obxbo + Flow_03s8gvx + Flow_0nzochy + Flow_0h955ao + + + Flow_1y4gjsg + Flow_0lnb8jw + Flow_1fqtd41 + Flow_0a626ba + + + Flow_0a626ba + Flow_0ssrpqx + if PIsPrimaryDepartmentSameAsRO.value == "diffSchool": + ro.schoolName = RO_StudySchool.label + ro.schoolAbbrv = RO_StudySchool.value + +if PIsPrimaryDepartmentSameAsRO.value != "yes": + if ro.schoolAbbrv == "MD": + ro.deptName = RO_StudyDeptMedicine.label + ro.deptAbbrv = RO_StudyDeptMedicine.value + elif ro.schoolAbbrv == "AS": + ro.deptName = RO_StudyDeptArtsSciences.label + ro.deptAbbrv = RO_StudyDeptArtsSciences.value + elif ro.schoolAbbrv == "CU": + ro.deptName = RO_StudyDeptEducation.label + ro.deptAbbrv = RO_StudyDeptEducation.value + else: + ro.deptName = "" + ro.deptAbbrv = "" + + + The Study's Responsible Organization is needed in order to confirm the Department Chair. If it is the same as the Primary Investigator's Primary Department show below, we have all the information needed to determine the Department Chair. + + +**Primary Investigator's Primary Appointment** +***School:*** {{ pi.E0.schoolName }} +***Department:*** {{ pi.E0.deptName }} + + + + + + + + + + + + + Flow_0nzochy + Flow_0lnb8jw + + + The Study's Responsible Organization is needed in order to confirm the Department Chair. If it is the same as the Primary Investigator's Primary Department show below, we have all the information needed to determine the Department Chair. + + +**Primary Investigator's Primary Appointment** +***School:*** {{ pi.E0.schoolName }} +***Department:*** {{ pi.E0.deptName }} + + + + + + + + + + + + + Flow_0h955ao + Flow_1fqtd41 + + + Flow_0mf9npl + Flow_0nmpxmc + ro.schoolName = RO_StudySchool.label +ro.schoolAbbrv = RO_StudySchool.value + + + Flow_03s8gvx + Flow_0ssrpqx + Flow_0tnnt3b + + + ro.schoolAbbrv == "CU" + + + + + + + PIsPrimaryDepartmentSameAsRO.value != "yes" + + + + PIsPrimaryDepartmentSameAsRO.value == 'diffSchool' + + + + + + ro.schoolAbbrv not in ["MD", "AS", "CU"] + + + + ro.schoolAbbrv == "AS" + + + + + + + + Flow_1ni06mz + Flow_0tnnt3b + Flow_02614fd + + + temp + Flow_15xpsq8 + Flow_1g7q28p + + + Flow_0cqbu1f + Flow_1d4sb3d + Flow_12oux1f + Flow_0ygr7cu + + + The following Primary Coordinators were entered in Protocol Builder: +{%+ for key, value in pcs.items() %}{{value.display_name}} ({{key}}){% if loop.index is lt cnt_pcs %}, {% endif %}{% endfor %} +To Save the current settings for all Primary Coordinators, select Save All. + + +Otherwise, edit each Coordinator as necessary and select the Save button for each. + +### Please provide supplemental information for: + #### {{ pc.display_name }} + ##### Title: {{ pc.title }} + + ##### Department: {{ pc.department }} + ##### Affiliation: {{ pc.affiliation }} + + + + + + + Flow_12oux1f + Flow_1ik148z + + + + Flow_0c4tt8e + Flow_05g7d16 + Flow_13zasb1 + + + The PI is also the RO Chair + Flow_13zasb1 + Flow_0cqbu1f + + + Flow_0efu6u1 + Flow_0a3fjzp + Flow_0ljn2v6 + Flow_0pdoc38 + + + The following Sub-Investigators were entered in Protocol Builder: +{%+ for key, value in subs.items() %}{{value.display_name}} ({{key}}){% if loop.index is lt cnt_subs %}, {% endif %}{% endfor %} +To Save the current settings for all Sub-Investigators, select Save All. + + +Otherwise, edit each Sub-Investigator as necessary and select the Save button for each. + + +### Please provide supplemental information for: + #### {{ sub.display_name }} + ##### Title: {{ sub.title }} + + ##### Department: {{ sub.department }} + ##### Affiliation: {{ sub.affiliation }} + + + + + + + Flow_0ljn2v6 + Flow_07vu2b0 + + + + Flow_1ik148z + Flow_0ygr7cu + Flow_0a3fjzp + Flow_0rstqv5 + + + The following Additional Coordinators were entered in Protocol Builder: +{%+ for key, value in acs.items() %}{{value.display_name}} ({{key}}){% if loop.index is lt cnt_acs %}, {% endif %}{% endfor %} +To Save the current settings for all Additional Coordinators, select Save All. + + + + +Otherwise, edit each Coordinator as necessary and select the Save button for each. + + +### Please provide supplemental information for: + #### {{ acs.display_name }} + ##### Title: {{ acs.title }} + + + ##### Department: {{ acs.department }} + ##### Affiliation: {{ acs.affiliation }} + Flow_0rstqv5 + Flow_0efu6u1 + + + + Flow_0pdoc38 + Flow_07vu2b0 + Flow_1g7q28p + Flow_0qti1ms + + + The following Additional Personnel were entered in Protocol Builder: +{%+ for key, value in aps.items() %}{{value.display_name}} ({{key}}){% if loop.index is lt cnt_aps %}, {% endif %}{% endfor %} +To Save the current settings for all Additional Personnel, select Save All. + + + + +Otherwise, edit each Additional Personnel as necessary and select the Save button for each. + + + + +### Please provide supplemental information for: + #### {{ ap.display_name }} + ##### Title: {{ ap.title }} + + + ##### Department: {{ ap.department }} + ##### Affiliation: {{ ap.affiliation }} + + + + + + + Flow_0qti1ms + Flow_15xpsq8 + + + + ***Name & Degree:*** {{ RO_Chair_Name_Degree }} +***School:*** {{ RO_School }} +***Department:*** {{ RO_Department }} +***Title:*** {{ RO_Chair_Title }} +***Email:*** {{ RO_Chair_CID }} + + +{% if RO_Chair_CID != dc.uid %} + *Does not match the Department Chair specified in Protocol Builder, {{ dc.display_name }}* +{% endif %} + + + + + + + + + + Flow_05g7d16 + Flow_1d4sb3d + + + + + + + + + + + RO_Chair_CID == pi.uid + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/DoubleGatewayNavigation.bpmn b/tests/SpiffWorkflow/bpmn/data/DoubleGatewayNavigation.bpmn new file mode 100644 index 000000000..182f46359 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/DoubleGatewayNavigation.bpmn @@ -0,0 +1,173 @@ + + + + + Flow_1ux3ndu + + + + Flow_1ut95vk + Flow_a + Flow_1na0ra6 + + + + which_branch == 'a' + + + + + + + + + + + Flow_1ux3ndu + Flow_1ut95vk + + + + + + + + Flow_a + Flow_0ivuim0 + + + + + + + + Flow_1ogjot9 + Flow_17lkj34 + + + + + + + + Flow_17lkj34 + Flow_0ivuim0 + Flow_1c10q6f + Flow_0kr8pvy + + + Flow_0kr8pvy + + + + Flow_1na0ra6 + Flow_1ogjot9 + Flow_18bte4w + + + + true + + + false + + + Flow_18bte4w + Flow_1c10q6f + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/ExclusiveGatewayMultipleEndNavigation.bpmn b/tests/SpiffWorkflow/bpmn/data/ExclusiveGatewayMultipleEndNavigation.bpmn new file mode 100644 index 000000000..28c4a5538 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/ExclusiveGatewayMultipleEndNavigation.bpmn @@ -0,0 +1,143 @@ + + + + + Flow_0kcrx5l + + + ##### Please confirm Primary Investigator entered in Protocol Builder is correct and if so, provide additional information: +### **{{ pi.display_name }}** +***Email:*** {{ pi.email_address }} + +**Primary Appointment** +***School:*** {{ pi.E0.schoolName }} +***Department:*** {{ pi.E0.deptName }} + + +{% if is_me_pi %} +Since you are the person entering this information, you already have access and will receive all emails. +{% endif %} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Flow_147b9li + Flow_0xnj2rp + + + + + Flow_1dcsioh + Flow_147b9li + Flow_00prawo + + + tru + + + false + + + Flow_16qr5jf + + + + Flow_0kcrx5l + Flow_1dcsioh + + + No PI entered in PB + Flow_00prawo + Flow_16qr5jf + + + Flow_0xnj2rp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/ExclusiveGatewayNavigation.bpmn b/tests/SpiffWorkflow/bpmn/data/ExclusiveGatewayNavigation.bpmn new file mode 100644 index 000000000..fc3fecd83 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/ExclusiveGatewayNavigation.bpmn @@ -0,0 +1,143 @@ + + + + + Flow_1ux3ndu + + + + Flow_1ut95vk + Flow_a + Flow_b + + + + which_branch == 'a' + + + which_branch == 'b' + + + Flow_03ddkww + Flow_0ozlczo + Flow_1ph05b1 + + + + + + + + + + + + + + Flow_1ux3ndu + Flow_1ut95vk + + + + + + + + Flow_a + Flow_03ddkww + + + + + + + + Flow_b + Flow_0ozlczo + + + + + + + + Flow_1ph05b1 + Flow_0kr8pvy + + + Flow_0kr8pvy + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/ExclusiveGatewayTwiceNavigation.bpmn b/tests/SpiffWorkflow/bpmn/data/ExclusiveGatewayTwiceNavigation.bpmn new file mode 100644 index 000000000..b6b266668 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/ExclusiveGatewayTwiceNavigation.bpmn @@ -0,0 +1,148 @@ + + + + + SequenceFlow_05ja25w + + + + ### {{ LabName }} +#### Lab details + + +Your response to these questions will determine if you do or do not provide additional information regarding each topic later. + SequenceFlow_05ja25w + Flow_0scfmzc + + + Flow_1e2qi9s + + + + Review plan, make changes if needed, continue of ready to submit. + Flow_S2 + SequenceFlow_1b4non2 + Flow_1e2qi9s + + + Flow_0scfmzc + Flow_V1 + Flow_V2 + + + isViking == True + + + Flow_V2 + SequenceFlow_1yi9lig + Flow_S1 + Flow_S2 + + + eatSpam == True + + + + isViking == False + + + + Flow_S1 + SequenceFlow_1b4non2 + + + Flow_V1 + SequenceFlow_1yi9lig + + + eatSpam == False + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/LoopBackNav.bpmn b/tests/SpiffWorkflow/bpmn/data/LoopBackNav.bpmn new file mode 100644 index 000000000..fa6a13573 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/LoopBackNav.bpmn @@ -0,0 +1,81 @@ + + + + + Flow_167u8pq + + + + Flow_0fi72hf + Flow_0x2ct8h + Flow_0hq66qs + + + + Flow_0x2ct8h + + + loop_again==False + + + loop_again==True + + + Flow_167u8pq + Flow_1jwf95z + Flow_0fi72hf + + + + Flow_0hq66qs + Flow_1jwf95z + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/MessageBoundary.bpmn b/tests/SpiffWorkflow/bpmn/data/MessageBoundary.bpmn new file mode 100644 index 000000000..77bf2a35c --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/MessageBoundary.bpmn @@ -0,0 +1,243 @@ + + + + + + + + + Event_0d3xq5q + Activity_Interrupt + Gateway_0ncff13 + Event_0l8sadb + Event_0g8w85g + + + Event_12moz8m + Event_0j702hl + Activity_1m4766l + Event_InterruptBoundary + + + + Flow_0bvln2b + + + + + + + + + Flow_0bvln2b + Flow_1t2ocwk + Flow_1ya6ran + + + Flow_1ya6ran + Flow_0saykw5 + Flow_1t2ocwk + + + + interrupt_task == 'Yes' + + + Flow_0saykw5 + Flow_0lekhj5 + + + + Flow_0lekhj5 + + + + + Flow_1gd7a2h + + + + + + Flow_0o0l113 + Flow_093roev + + + Flow_1gd7a2h + Flow_093roev + + Flow_1gs89vo + + + Flow_1gs89vo + Flow_0wuxluk + Flow_11u0pgk + + + + Flow_11u0pgk + Flow_1rqk2v9 + + timedelta(seconds=.1) + + + + + Flow_1rqk2v9 + Flow_18d90uu + Flow_0wuxluk + + + Flow_18d90uu + + + False + + + + + + Flow_0o0l113 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/MultiInstanceParallelTask.bpmn b/tests/SpiffWorkflow/bpmn/data/MultiInstanceParallelTask.bpmn new file mode 100644 index 000000000..6b02f0961 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/MultiInstanceParallelTask.bpmn @@ -0,0 +1,59 @@ + + + + + Flow_0t6p1sb + + + + Flow_0ugjw69 + + + + Flow_0ds4mp0 + Flow_0ugjw69 + + collection + + + + Flow_0t6p1sb + Flow_0ds4mp0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/MultiInstanceParallelTaskCond.bpmn b/tests/SpiffWorkflow/bpmn/data/MultiInstanceParallelTaskCond.bpmn new file mode 100644 index 000000000..2945c381a --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/MultiInstanceParallelTaskCond.bpmn @@ -0,0 +1,145 @@ + + + + + Flow_0t6p1sb + + + + Flow_0ugjw69 + Flow_1oo4mpj + + + + Flow_0u92n7b + Flow_0ugjw69 + + + + Flow_0t6p1sb + Flow_0ds4mp0 + + + + Flow_1sx7n9u + Flow_1oo4mpj + Flow_0u92n7b + + + len(collection.keys())==0 + + + + + Flow_0ds4mp0 + Flow_1dah8xt + Flow_0i1bv5g + + + + Flow_1dah8xt + Flow_0io0g18 + + + Flow_0io0g18 + Flow_0i1bv5g + Flow_1sx7n9u + + + + 1==1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/NavLeapFrog.bpmn b/tests/SpiffWorkflow/bpmn/data/NavLeapFrog.bpmn new file mode 100644 index 000000000..b24aadb8b --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/NavLeapFrog.bpmn @@ -0,0 +1,279 @@ + + + + + Flow_1d5ya6o + + + temp + Flow_1g7q28p + Flow_15xpsq8 + + + Flow_0pdoc38 + Flow_07vu2b0 + Flow_1g7q28p + Flow_0qti1ms + + + The following Additional Personnel were entered in Protocol Builder: +{%+ for key, value in aps.items() %}{{value.display_name}} ({{key}}){% if loop.index is lt cnt_aps %}, {% endif %}{% endfor %} +To Save the current settings for all Additional Personnel, select Save All. + + + + +Otherwise, edit each Additional Personnel as necessary and select the Save button for each. + + + + +### Please provide supplemental information for: + #### {{ ap.display_name }} + ##### Title: {{ ap.title }} + + + ##### Department: {{ ap.department }} + ##### Affiliation: {{ ap.affiliation }} + + + + + + + Flow_0qti1ms + Flow_15xpsq8 + + + + + Flow_1d5ya6o + Flow_1j25zj6 + + + Flow_1284s5t + Flow_011udmb + Flow_1qfn1re + Flow_15w791d + + + + Flow_1qfn1re + Flow_1c4kg6v + + cows + + + + Flow_1j25zj6 + Flow_0yq9y65 + Flow_0rdfr06 + + + Flow_0yq9y65 + Flow_1284s5t + + cats + + + + + cats>0 + + + + cats==0 + + + + + Flow_1c4kg6v + Flow_15w791d + Flow_0pdoc38 + Flow_0ljn2v6 + + + + The following Sub-Investigators were entered in Protocol Builder: +{%+ for key, value in subs.items() %}{{value.display_name}} ({{key}}){% if loop.index is lt cnt_subs %}, {% endif %}{% endfor %} +To Save the current settings for all Sub-Investigators, select Save All. + + +Otherwise, edit each Sub-Investigator as necessary and select the Save button for each. + + +### Please provide supplemental information for: + #### {{ sub.display_name }} + ##### Title: {{ sub.title }} + + ##### Department: {{ sub.department }} + ##### Affiliation: {{ sub.affiliation }} + + + + + + + Flow_0ljn2v6 + Flow_07vu2b0 + + + + + + + + + + Flow_0rdfr06 + Flow_011udmb + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/NavLeapFrogLong.bpmn b/tests/SpiffWorkflow/bpmn/data/NavLeapFrogLong.bpmn new file mode 100644 index 000000000..6a92338e8 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/NavLeapFrogLong.bpmn @@ -0,0 +1,1209 @@ + + + + + Flow_0kcrx5l + + + Flow_0kcrx5l + Flow_1dcsioh + current_user = ldap() +investigators = study_info('investigators') +# Primary Investigator +pi = investigators.get('PI', None) +is_cu_pi = False +if pi != None: + hasPI = True + study_data_set("PiUid",pi['uid']) + if pi.get('uid', None) != None: + pi_invalid_uid = False + if pi['uid'] == current_user['uid']: + is_cu_pi = True + else: + pi_invalid_uid = True +else: + hasPI = False + +# Department Chair +dc = investigators.get('DEPT_CH', None) +if dc != None: + if dc.get('uid', None) != None: + dc_invalid_uid = False + else: + dc_invalid_uid = True +else: + dc_invalid_uid = False + +# Primary Coordinators +pcs = {} +is_cu_pc = False +cnt_pcs_uid = 0 +for k in investigators.keys(): + if k in ['SC_I','SC_II','IRBC']: + investigator = investigators.get(k) + if investigator.get('uid', None) != None: + if investigator['uid'] != current_user['uid']: + pcs[k] = investigator + cnt_pcs_uid = cnt_pcs_uid + 1 + else: + is_cu_pc = True + is_cu_pc_role = investigator['label'] + else: + pcs[k] = investigator +cnt_pcs = len(pcs.keys()) +if cnt_pcs != cnt_pcs_uid: + pcs_invalid_uid = True +else: + pcs_invalid_uid = False +if cnt_pcs > 0: + del(k) + del(investigator) + +# Additional Coordinators +acs = {} +is_cu_ac = False +cnt_acs_uid = 0 +for k in investigators.keys(): + if k == 'AS_C': + investigator = investigators.get(k) + if investigator.get('uid', None) != None: + if investigator['uid'] != current_user['uid']: + acs[k] = investigator + cnt_acs_uid = cnt_acs_uid + 1 + else: + is_cu_ac = True + is_cu_ac_role = investigator['label'] + else: + acs[k] = investigator +cnt_acs = len(acs.keys()) +if cnt_pcs != cnt_pcs_uid: + acs_invalid_uid = True +else: + acs_invalid_uid = False +if cnt_acs > 0: + del(k) + del(investigator) + +# Sub-Investigatoers +subs = {} +is_cu_subs = False +cnt_subs_uid = 0 +for k in investigators.keys(): + if k[:2] == 'SI': + investigator = investigators.get(k) + if investigator.get('uid', None) != None: + if investigator['uid'] != current_user['uid']: + subs[k] = investigator + cnt_subs_uid = cnt_subs_uid + 1 + else: + is_cu_subs = True + else: + subs[k] = investigator +cnt_subs = len(subs.keys()) +if cnt_subs != cnt_subs_uid: + subs_invalid_uid = True +else: + subs_invalid_uid = False +if cnt_subs > 0: + del(k) + del(investigator) + +# Additional Personnel +aps = {} +is_cu_ap = False +cnt_aps_uid = 0 +for k in investigators.keys(): + if k in ['SCI','DC']: + investigator = investigators.get(k) + if investigator.get('uid', None) != None: + if investigator['uid'] != current_user['uid']: + aps[k] = investigator + cnt_aps_uid = cnt_aps_uid + 1 + else: + is_cu_ap = True + is_cu_ap_role = investigator['label'] + else: + aps[k] = investigator +cnt_aps = len(aps.keys()) +if cnt_aps != cnt_aps_uid: + aps_invalid_uid = True +else: + aps_invalid_uid = False +if cnt_aps > 0: + del(k) + del(investigator) +del(investigators) + + + temp + Flow_10zn0h1 + Flow_0kp47dz + + + ##### Please confirm Primary Investigator entered in Protocol Builder is correct and if so, provide additional information: +### **{{ pi.display_name }}** +***Email:*** {{ pi.email_address }} + +**Primary Appointment** +***School:*** {{ pi.E0.schoolName }} +***Department:*** {{ pi.E0.deptName }} + + +{% if is_me_pi %} +Since you are the person entering this information, you already have access and will receive all emails. +{% endif %} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Flow_1kg5jot + Flow_1mplloa + + + + + Flow_1dcsioh + Flow_147b9li + Flow_00prawo + + + + not(hasPI) or (hasPI and pi_invalid_uid) + + + No PI entered in PB + Flow_00prawo + Flow_16qr5jf + + + Flow_0kpe12r + Flow_1ayisx2 + Flow_0xifvai + Flow_1oqem42 + + + + + The following Primary Coordinators were entered in Protocol Builder: +{%+ for key, value in pcs.items() %}{{value.display_name}} ({{key}}){% if loop.index is lt cnt_pcs %}, {% endif %}{% endfor %} +To Save the current settings for all Primary Coordinators, select Save All. + + +Otherwise, edit each Coordinator as necessary and select the Save button for each. + +### Please provide supplemental information for: + #### {{ pc.display_name }} + ##### Title: {{ pc.title }} + + ##### Department: {{ pc.department }} + ##### Affiliation: {{ pc.affiliation }} + + + + + + + Flow_0xifvai + Flow_1n0k4pd + + + + cnt_pcs == 0 + + + Flow_0tfprc8 + Flow_0tsdclr + Flow_1grahhv + LDAP_dept = pi.department +length_LDAP_dept = len(LDAP_dept) +pi.E0 = {} +if length_LDAP_dept > 0: + E0_start = LDAP_dept.find("E0:") + 3 + E0_slice = LDAP_dept[E0_start:length_LDAP_dept] + E0_first_hyphen = E0_slice.find("-") + E0_dept_start = E0_first_hyphen + 1 + pi.E0.schoolAbbrv = E0_slice[0:E0_first_hyphen] + isSpace = " " in E0_slice + if isSpace: + E0_first_space = E0_slice.find(" ") + E0_spec_start = E0_first_space + 1 + E0_spec_end = len(E0_slice) + pi.E0.deptAbbrv = E0_slice[E0_dept_start:E0_first_space] + pi.E0.specName = E0_slice[E0_spec_start:E0_spec_end] + else: + pi.E0.specName = "" +else: + pi.E0.schoolAbbrv = "Not in LDAP" + pi.E0.deptAbbrv = "Not in LDAP" + pi.E0.specName = "Not in LDAP" + + + + The Study's Responsible Organization is needed in order to confirm the Department Chair. If it is the same as the Primary Investigator's Primary Department show below, we have all the information needed to determine the Department Chair. + + +**Primary Investigator's Primary Appointment** +***School:*** {{ pi.E0.schoolName }} +***Department:*** {{ pi.E0.deptName }} + + + + + + + + + + + + + Flow_0iuzu7j + Flow_0whqr3p + + + + Flow_070j5fg + Flow_0vi6thu + Flow_00yhlrq + + + + RO_Chair_CID == pi.uid + + + The PI is also the RO Chair + Flow_00yhlrq + Flow_0kpe12r + + + + Flow_12ss6u8 + Flow_0dt3pjw + Flow_05rqrlf + Flow_0jxzqw1 + + + + + cnt_subs == 0 + + + The following Sub-Investigators were entered in Protocol Builder: +{%+ for key, value in subs.items() %}{{value.display_name}} ({{key}}){% if loop.index is lt cnt_subs %}, {% endif %}{% endfor %} +To Save the current settings for all Sub-Investigators, select Save All. + + +Otherwise, edit each Sub-Investigator as necessary and select the Save button for each. + + +### Please provide supplemental information for: + #### {{ sub.display_name }} + ##### Title: {{ sub.title }} + + ##### Department: {{ sub.department }} + ##### Affiliation: {{ sub.affiliation }} + + + + + + + Flow_05rqrlf + Flow_0ofpgml + + + + Please enter the Private Investigator in Protocol Builder. + Flow_16qr5jf + + + + + Flow_1grahhv + Flow_1kg5jot + pi.E0.schoolName = PI_E0_schoolName +pi.E0.deptName = PI_E0_deptName +pi_experience_key = "pi_experience_" + pi.user_id +pi.experience = user_data_get(pi_experience_key,"") +ro = {} +ro['chair'] = {} + + + + Flow_1oo0ijr + Flow_070j5fg + ro.chair = {} +ro.chair.uid = RO_Chair_CID +ro.chair.name_degree = RO_Chair_Name_Degree +ro.chair.title = RO_Chair_Title +ro.chair.sig_block = RO_Chair_Sig_Block + + + Flow_1n0k4pd + Flow_1oqem42 + Flow_1gtl2o3 + Flow_0dt3pjw + + + + + The following Additional Coordinators were entered in Protocol Builder: +{%+ for key, value in acs.items() %}{{value.display_name}} ({{key}}){% if loop.index is lt cnt_acs %}, {% endif %}{% endfor %} +To Save the current settings for all Additional Coordinators, select Save All. + + + + +Otherwise, edit each Coordinator as necessary and select the Save button for each. + + +### Please provide supplemental information for: + #### {{ acs.display_name }} + ##### Title: {{ acs.title }} + + + ##### Department: {{ acs.department }} + ##### Affiliation: {{ acs.affiliation }} + Flow_1gtl2o3 + Flow_12ss6u8 + + + + cnt_acs == 0 + + + Flow_1va8c15 + Flow_1yd7kbi + Flow_0w4d2bz + + + Flow_1yd7kbi + Flow_13la8l3 + Flow_0ycdxbl + + + PIsPrimaryDepartmentSameAsRO.value != "yes" + + + + PIsPrimaryDepartmentSameAsRO.value == 'diffSchool' + + + Flow_0ycdxbl + Flow_1fj9iz0 + sch_enum = [] +if pi.E0.schoolAbbrv != "MD": + sch_enum_md = [ + { + "value": "MD", + "label": "Medicine" + }, + ] +else: + sch_enum_md = [] +if pi.E0.schoolAbbrv != "AS": + sch_enum_as = [ + { + "value": "AS", + "label": "Arts & Science" + }, + ] +else: + sch_enum_as = [] +if pi.E0.schoolAbbrv != "CU": + sch_enum_cu = [ + { + "value": "CU", + "label": "Education" + }, + ] +else: + sch_enum_cu = [] +if pi.E0.schoolAbbrv != "NR": + sch_enum_nr = [ + { + "value": "NR", + "label": "Nursing" + }, + ] +else: + sch_enum_nr = [] +sch_enum = sch_enum_md + sch_enum_as + sch_enum_cu + sch_enum_nr +del(sch_enum_md) +del(sch_enum_as) +del(sch_enum_cu) +del(sch_enum_nr) + + + + + + + + + + + + + + + Flow_1fj9iz0 + Flow_1yz8k2a + + + + + Flow_13la8l3 + Flow_0mdjaid + Flow_0fw4rck + Flow_1azfvtx + Flow_0giqf35 + Flow_0iuzu7j + + + ro.schoolAbbrv not in ["MD", "AS", "CU"] + + + Flow_0whqr3p + Flow_0zc01f9 + Flow_1vyg8ir + Flow_0m9peiz + + + + Flow_0m9peiz + Flow_1vv63qa + if PIsPrimaryDepartmentSameAsRO.value == "diffSchool": + ro.schoolName = RO_StudySchool.label + ro.schoolAbbrv = RO_StudySchool.value + +if PIsPrimaryDepartmentSameAsRO.value != "yes": + if ro.schoolAbbrv == "MD": + ro.deptName = RO_StudyDeptMedicine.label + ro.deptAbbrv = RO_StudyDeptMedicine.value + elif ro.schoolAbbrv == "AS": + ro.deptName = RO_StudyDeptArtsSciences.label + ro.deptAbbrv = RO_StudyDeptArtsSciences.value + elif ro.schoolAbbrv == "CU": + ro.deptName = RO_StudyDeptEducation.label + ro.deptAbbrv = RO_StudyDeptEducation.value + else: + ro.deptName = "" + ro.deptAbbrv = "" + + + The Study's Responsible Organization is needed in order to confirm the Department Chair. If it is the same as the Primary Investigator's Primary Department show below, we have all the information needed to determine the Department Chair. + + +**Primary Investigator's Primary Appointment** +***School:*** {{ pi.E0.schoolName }} +***Department:*** {{ pi.E0.deptName }} + + + + + + + + + + + + + Flow_1azfvtx + Flow_0zc01f9 + + + Flow_1e0yt3v + Flow_0shnt6k + Flow_1va8c15 + ro = {} +ro['schoolName'] = PI_E0_schoolName +ro['schoolAbbrv'] = pi.E0.schoolAbbrv +ro['deptName'] = pi.E0.deptName +ro['deptAbbrv'] = pi.E0.deptAbbrv + + + + + + The Study's Responsible Organization is needed in order to confirm the Department Chair. If it is the same as the Primary Investigator's Primary Department show below, we have all the information needed to determine the Department Chair. + + +**Primary Investigator's Primary Appointment** +***School:*** {{ pi.E0.schoolName }} +***Department:*** {{ pi.E0.deptName }} + + + + + + + + + + + + + Flow_0giqf35 + Flow_1vyg8ir + + + + + + + Flow_1yz8k2a + Flow_0mdjaid + ro.schoolName = RO_StudySchool.label +ro.schoolAbbrv = RO_StudySchool.value + + + + ro.schoolAbbrv == "AS" + + + ro.schoolAbbrv == "CU" + + + Flow_1vv63qa + Flow_0fw4rck + Flow_0vff9k5 + + + + Flow_0ofpgml + Flow_0jxzqw1 + Flow_0q56tn8 + Flow_0kp47dz + + + + + cnt_aps == 0 + + + The following Additional Personnel were entered in Protocol Builder: +{%+ for key, value in aps.items() %}{{value.display_name}} ({{key}}){% if loop.index is lt cnt_aps %}, {% endif %}{% endfor %} +To Save the current settings for all Additional Personnel, select Save All. + + + + +Otherwise, edit each Additional Personnel as necessary and select the Save button for each. + + + + +### Please provide supplemental information for: + #### {{ ap.display_name }} + ##### Title: {{ ap.title }} + + + ##### Department: {{ ap.department }} + ##### Affiliation: {{ ap.affiliation }} + + + + + + + Flow_0q56tn8 + Flow_10zn0h1 + + + + Flow_147b9li + Flow_0tfprc8 + Flow_0nz62mu + + + + dc_invalid_uid or pcs_invalid_uid or acs_invalid_uid or subs_invalid_uid or aps_invalid_uid + + + Select No if all displayed invalid Computing IDs do not need system access and/or receive emails. If they do, correct in Protocol Builder first and then select Yes. + + +{% if dc_invalid_uid %} +Department Chair + {{ dc.error }} +{% endif %} +{% if pcs_invalid_uid %} +Primary Coordinators +{% for k, pc in pcs.items() %} + {% if pc.get('uid', None) == None: %} + {{ pc.error }} + {% endif %} +{% endfor %} +{% endif %} +{% if acs_invalid_uid %} +Additional Coordinators +{% for k, ac in acs.items() %} + {% if ac.get('uid', None) == None: %} + {{ ac.error }} + {% endif %} +{% endfor %} +{% endif %} +{% if subs_invalid_uid %} +Sub-Investigators +{% for k, sub in subs.items() %} + {% if sub.get('uid', None) == None: %} + {{ sub.error }} + {% endif %} +{% endfor %} +{% endif %} +{% if aps_invalid_uid %} +Additional Personnnel +{% for k, ap in aps.items() %} + {% if ap.get('uid', None) == None: %} + {{ ap.error }} + {% endif %} +{% endfor %} +{% endif %} + + + + + + + + + + Flow_0nz62mu + Flow_16bkbuc + + + Flow_16bkbuc + Flow_0tsdclr + Flow_1mtwuyq + + + + not(FixInvalidUIDs) + + + ***Name & Degree:*** {{ RO_Chair_Name_Degree }} +***School:*** {{ RO_School }} +***Department:*** {{ RO_Department }} +***Title:*** {{ RO_Chair_Title }} +***Email:*** {{ RO_Chair_CID }} + + +{% if RO_Chair_CID != dc.uid %} + *Does not match the Department Chair specified in Protocol Builder, {{ dc.display_name }}* +{% endif %} + + + + + + + + + + Flow_0vi6thu + Flow_1ayisx2 + + + + Flow_07ur9cc + Flow_0shnt6k + user_data_set(pi_experience_key, pi.experience) + + + Flow_1mplloa + Flow_07ur9cc + Flow_1e0yt3v + + + pi.experience != user_data_get(pi_experience_key,"") + + + + + Flow_0vff9k5 + Flow_0w4d2bz + Flow_1oo0ijr + + + Flow_1mtwuyq + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/ParallelOrder.bpmn b/tests/SpiffWorkflow/bpmn/data/ParallelOrder.bpmn new file mode 100644 index 000000000..b37ac39fc --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/ParallelOrder.bpmn @@ -0,0 +1,138 @@ + + + + + Flow_0a440h9 + + + + Flow_0a440h9 + Flow_0fyg6vt + Flow_1hxdywg + Flow_1knmmur + Flow_1hchuue + + + Flow_0fyg6vt + Flow_0vkxjxc + + + + Flow_1hxdywg + Flow_1l55egz + + + Flow_1knmmur + Flow_1drgguy + + + Flow_1hchuue + Flow_0rxk9n3 + + + + + + + Flow_1l55egz + Flow_0vkxjxc + Flow_1drgguy + Flow_0rxk9n3 + Flow_0mckkuv + + + + + + Flow_0mckkuv + + + + These tasks were created in the opposite order in which they are displayed.  In the XML, Task4 happens first, then 3, 2, and 1. When Parsed, the order of these tasks should be 1,2,3 and 4. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/ParallelTasksNavigation.bpmn b/tests/SpiffWorkflow/bpmn/data/ParallelTasksNavigation.bpmn new file mode 100644 index 000000000..b735e7f9f --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/ParallelTasksNavigation.bpmn @@ -0,0 +1,233 @@ + + + + + Flow_0onk49f + + + + + + + + + Flow_04fhyzu + Flow_ST2 + Flow_ST1 + + + + + Flow_ST2 + Flow_A + Flow_B + Flow_0xgjtz5 + Flow_C + + + + + + Flow_0w1fyeg + + + + skip_to_task_3 == False + + + + + + + + + + + + + + + + + + + + + Flow_0onk49f + Flow_04fhyzu + + + + + + + + Flow_A + Flow_0o3lota + + + + + + + + Flow_B + Flow_0fecjum + + + + + + + + + Flow_C + Flow_11ksiq9 + + + + + + + + + Flow_ST1 + Flow_00sur6q + Flow_0w1fyeg + + + + skip_to_task_3 == True + + + Flow_0xgjtz5 + Flow_0o3lota + Flow_11ksiq9 + Flow_13ggrxo + Flow_00sur6q + + + + Flow_0fecjum + Flow_13ggrxo + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/ScriptTest.bpmn b/tests/SpiffWorkflow/bpmn/data/ScriptTest.bpmn new file mode 100644 index 000000000..f97d40ec7 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/ScriptTest.bpmn @@ -0,0 +1,56 @@ + + + + + Flow_0dsbqk4 + + + + Flow_0dsbqk4 + Flow_1izwhjx + testvar = {'a':1,'b':2} +testvar2 = [{'x':1,'y':'a'}, + {'x':2,'y':'b'}, + {'x':3,'y':'c'}] + + + Flow_1rbktuo + + + + + Flow_1izwhjx + Flow_1rbktuo + testvar['new'] = 'Test' +sample = [x['y'] for x in testvar2 if x['x'] > 1] + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Action-Management.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Action-Management.bpmn20.xml index 38355eeba..85d39b530 100644 --- a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Action-Management.bpmn20.xml +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Action-Management.bpmn20.xml @@ -97,7 +97,7 @@ sid-4B3460BF-1433-4961-BEA6-CD4766A5F509 sid-C06ACF4A-E241-4E40-B283-F35060801420 - + @@ -107,7 +107,7 @@ sid-CA558A2D-1F6E-4BEB-B04F-6868529FCC24 sid-6B67CBBC-E314-4DCF-B12F-968AEA30B05D - + @@ -125,7 +125,7 @@ sid-2F70B74A-5D28-4D73-9C3B-540E7F9723F2 sid-2DE53FE2-6F50-4EF5-9B11-E3733E2BD494 - + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Action-Management.signavio.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Action-Management.signavio.xml index 47b79711c..a53e48590 100644 --- a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Action-Management.signavio.xml +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Action-Management.signavio.xml @@ -404,7 +404,7 @@ "messageref": "", "operationref": "", "instantiate": "", - "script": "task.set_data(script_output\u003d\u0027NEW ACTION\u0027)", + "script": "script_output\u003d\u0027NEW ACTION\u0027", "scriptformat": "", "bgcolor": "#ffffcc", "looptype": "None", @@ -480,7 +480,7 @@ "messageref": "", "operationref": "", "instantiate": "", - "script": "task.set_data(script_output\u003d\u0027ACTION CANCELLED\u0027)", + "script": "script_output\u003d\u0027ACTION CANCELLED\u0027", "scriptformat": "", "bgcolor": "#ffffcc", "looptype": "None", @@ -659,7 +659,7 @@ "messageref": "", "operationref": "", "instantiate": "", - "script": "task.set_data(script_output\u003d \u0027ACTION OVERDUE\u0027)", + "script": "script_output\u003d \u0027ACTION OVERDUE\u0027", "scriptformat": "", "bgcolor": "#ffffcc", "looptype": "None", diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Multi-Instance.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Multi-Instance.bpmn20.xml new file mode 100644 index 000000000..4c480fbd7 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Multi-Instance.bpmn20.xml @@ -0,0 +1,88 @@ + + + + + SequenceFlow_1svgzrn + + + # {{person.id}} +Please answer a few questions about this person. +loo + + + + + + SequenceFlow_066wkms + SequenceFlow_1qz7vsd + + 3 + + + + # Thank you for completeing the user information. + +{% for person in personnel %} +  * Person {{person.uid}} does {% if not person.dog_friendly %} NOT {% endif %} likes dogs. +{% endfor %} + SequenceFlow_1qz7vsd + + + + SequenceFlow_1svgzrn + SequenceFlow_066wkms + + + + + Loops through each person and asks if they like dogs. + + + + Collects the set of personnel from the Protocol Builder + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel.camunda.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel.camunda.bpmn20.xml new file mode 100644 index 000000000..620cf25b8 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Parallel.camunda.bpmn20.xml @@ -0,0 +1,158 @@ + + + + + SequenceFlow_1vv685e + + + + + SequenceFlow_160ihio + SequenceFlow_01s4u0j + SequenceFlow_1x5zcdu + SequenceFlow_1g26zbi + + + + + + + SequenceFlow_11uv01u + SequenceFlow_0z80s5o + SequenceFlow_0cquzxd + SequenceFlow_0uq97wv + + + + + + SequenceFlow_1a97zm5 + + + + + + + + + + + + + + SequenceFlow_1vv685e + SequenceFlow_160ihio + + + + + + + + SequenceFlow_01s4u0j + SequenceFlow_11uv01u + + + + + + + + SequenceFlow_1x5zcdu + SequenceFlow_0z80s5o + + + + + + + + SequenceFlow_1g26zbi + SequenceFlow_0cquzxd + + + + + + + + SequenceFlow_0uq97wv + SequenceFlow_1a97zm5 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Test-Workflows.bpmn20.xml b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Test-Workflows.bpmn20.xml index 8a1f122b0..b9ab28f94 100644 --- a/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Test-Workflows.bpmn20.xml +++ b/tests/SpiffWorkflow/bpmn/data/Test-Workflows/Test-Workflows.bpmn20.xml @@ -1,194 +1,209 @@ - - - - - - - - - - - - - - - sid-2C69C7A6-BD02-428F-B5AA-0BB10EC514C2 - sid-6A83C24E-9609-47CD-B595-BCFC30BBF790 - sid-1192E625-131C-46FE-B872-20C4533EA7DE - sid-464B8E64-10B4-4158-BDEE-11144CE20306 - sid-7C7227E8-087F-4CB6-9B60-200B5D495886 - sid-983D0282-05EA-48F6-B02F-41BBD8DF538E - sid-5A3ED200-8278-4D1F-9F84-B5E53B268133 - sid-60507DE6-5A96-4C88-A322-E3451526EB33 - - - - - - - sid-6469A686-F148-4E90-81BB-5D3305421758 - - - - - - sid-6469A686-F148-4E90-81BB-5D3305421758 - sid-0B133E7E-E6B3-4578-8F8F-1C9DA5D8A015 - - - - - - sid-0B133E7E-E6B3-4578-8F8F-1C9DA5D8A015 - sid-85975D4B-1DC8-4998-A7DF-6F9C31861EE7 - sid-ECE4F718-B986-45F1-8B0C-C0C1DAE66DB3 - sid-488CD0F1-E280-4BDE-B794-64CAF8C4FCA8 - sid-9AD6C46B-BC40-4D3A-91E2-8E933EF9ADF0 - sid-BFCB2CAC-CE5B-4EA2-8A4D-339D7D5894A4 - - - - - - sid-36EA1BBD-FB84-4C19-AEFA-5C731F9C8789 - sid-979356AC-A00F-456E-9790-39D512F50D3C - sid-9AD6C46B-BC40-4D3A-91E2-8E933EF9ADF0 - sid-B7DB2642-79A3-4D7B-9CB6-06C4854E4C9D - sid-EC51B1C2-2A2D-4515-90CF-9CDCF31A8A44 - - - - - - - sid-85975D4B-1DC8-4998-A7DF-6F9C31861EE7 - sid-36EA1BBD-FB84-4C19-AEFA-5C731F9C8789 - - - - - - - sid-488CD0F1-E280-4BDE-B794-64CAF8C4FCA8 - sid-979356AC-A00F-456E-9790-39D512F50D3C - - - - - - - sid-ECE4F718-B986-45F1-8B0C-C0C1DAE66DB3 - sid-B7DB2642-79A3-4D7B-9CB6-06C4854E4C9D - - - - - - - sid-BFCB2CAC-CE5B-4EA2-8A4D-339D7D5894A4 - sid-EC51B1C2-2A2D-4515-90CF-9CDCF31A8A44 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + sid-2C69C7A6-BD02-428F-B5AA-0BB10EC514C2 + USER_INPUT_SELECT_TEST + GATEWAY_SELECTED_TEST + sid-464B8E64-10B4-4158-BDEE-11144CE20306 + MESSAGES_SUB + SCRIPTS_SUB + MESSAGE_INTERRUPTS_SUB + MESSAGE_NON_INTERRUPT_SUB + + + + + + + sid-6469A686-F148-4E90-81BB-5D3305421758 + + + + + + sid-6469A686-F148-4E90-81BB-5D3305421758 + sid-0B133E7E-E6B3-4578-8F8F-1C9DA5D8A015 + + + + + + sid-0B133E7E-E6B3-4578-8F8F-1C9DA5D8A015 + sid-85975D4B-1DC8-4998-A7DF-6F9C31861EE7 + sid-ECE4F718-B986-45F1-8B0C-C0C1DAE66DB3 + sid-488CD0F1-E280-4BDE-B794-64CAF8C4FCA8 + sid-9AD6C46B-BC40-4D3A-91E2-8E933EF9ADF0 + sid-BFCB2CAC-CE5B-4EA2-8A4D-339D7D5894A4 + + + + + + sid-36EA1BBD-FB84-4C19-AEFA-5C731F9C8789 + sid-979356AC-A00F-456E-9790-39D512F50D3C + sid-9AD6C46B-BC40-4D3A-91E2-8E933EF9ADF0 + sid-B7DB2642-79A3-4D7B-9CB6-06C4854E4C9D + sid-EC51B1C2-2A2D-4515-90CF-9CDCF31A8A44 + + + + + + + sid-85975D4B-1DC8-4998-A7DF-6F9C31861EE7 + sid-36EA1BBD-FB84-4C19-AEFA-5C731F9C8789 + + + + + + + sid-488CD0F1-E280-4BDE-B794-64CAF8C4FCA8 + sid-979356AC-A00F-456E-9790-39D512F50D3C + + + + + + + sid-ECE4F718-B986-45F1-8B0C-C0C1DAE66DB3 + sid-B7DB2642-79A3-4D7B-9CB6-06C4854E4C9D + + + + + + + sid-BFCB2CAC-CE5B-4EA2-8A4D-339D7D5894A4 + sid-EC51B1C2-2A2D-4515-90CF-9CDCF31A8A44 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tests/SpiffWorkflow/bpmn/data/boundary.bpmn b/tests/SpiffWorkflow/bpmn/data/boundary.bpmn new file mode 100644 index 000000000..6a3a29bfd --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/boundary.bpmn @@ -0,0 +1,163 @@ + + + + + Flow_1pbxbk9 + + + Flow_1jnwt7c + Flow_0f0f7wg + + Flow_0vmzw8v + + + + + + + + Flow_0vmzw8v + Flow_0hkqchr + Flow_0axldsu + + + + Flow_0q65w45 + + + Flow_0axldsu + Flow_0q65w45 + Flow_0hkqchr + + + + answer == 'Yes' + + + + + Flow_0yzqey7 + Flow_1v53za5 + + + + + + + Flow_0jqkm6y + + + + Flow_0f0f7wg + Flow_1v53za5 + Flow_0jqkm6y + + + + + + + + + Flow_1pbxbk9 + Flow_1jnwt7c + + + Flow_0yzqey7 + + PT0.3S + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/bpmnAntiLoopTask.bpmn b/tests/SpiffWorkflow/bpmn/data/bpmnAntiLoopTask.bpmn new file mode 100644 index 000000000..4893f713a --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/bpmnAntiLoopTask.bpmn @@ -0,0 +1,47 @@ + + + + + Flow_0q33jmj + + + Enter Name for member {{ Activity_TestLoop_CurrentVar }} + + + + + + Flow_0q33jmj + Flow_13213ce + + 5 + + + + + Flow_13213ce + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/bpmnLoopTask.bpmn b/tests/SpiffWorkflow/bpmn/data/bpmnLoopTask.bpmn new file mode 100644 index 000000000..c35b237e2 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/bpmnLoopTask.bpmn @@ -0,0 +1,45 @@ + + + + + Flow_0q33jmj + + + Enter Name for member {{ Activity_TestLoop_CurrentVar }} + + + + + + Flow_0q33jmj + Flow_13213ce + + + + + Flow_13213ce + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/bpmnMultiUserTask.bpmn b/tests/SpiffWorkflow/bpmn/data/bpmnMultiUserTask.bpmn new file mode 100644 index 000000000..d9a723161 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/bpmnMultiUserTask.bpmn @@ -0,0 +1,49 @@ + + + + + Flow_0t6p1sb + + + + Flow_0ugjw69 + + + + Flow_0t6p1sb + Flow_0ugjw69 + + 5 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/exclusive_into_multi.bpmn b/tests/SpiffWorkflow/bpmn/data/exclusive_into_multi.bpmn new file mode 100644 index 000000000..0fb12a411 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/exclusive_into_multi.bpmn @@ -0,0 +1,83 @@ + + + + + Flow_163toj3 + + + Flow_163toj3 + Flow_1rakb4c + x = 0 + + + + Flow_1rakb4c + Flow_04bjhw6 + Flow_0340se7 + + + + x != 0 + + + + + Flow_04bjhw6 + Flow_073oado + + 1 + + + + Flow_073oado + Flow_0340se7 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/external_message.bpmn b/tests/SpiffWorkflow/bpmn/data/external_message.bpmn new file mode 100644 index 000000000..ce3a8f5a0 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/external_message.bpmn @@ -0,0 +1,109 @@ + + + + + Flow_1a0tyih + + + + + + + + Flow_081mykh + Flow_08pe1c9 + + + + Flow_18jn2xj + + + + Flow_18jn2xj + Flow_0q62iou + caughtinterrupt = True + + + + Flow_1pv9l9r + + + + + Flow_1a0tyih + Flow_1pv9l9r + Flow_081mykh + caughtinterrupt = False + + + + + + + + + Flow_0q62iou + + + + Flow_08pe1c9 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/lanes.bpmn b/tests/SpiffWorkflow/bpmn/data/lanes.bpmn new file mode 100644 index 000000000..0dcc70262 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/lanes.bpmn @@ -0,0 +1,148 @@ + + + + + + + + + StartEvent_1 + Activity_A1 + Activity_A2 + + + Activity_B1 + Gateway_askQuestion + Event_0j610d6 + Activity_B2 + + + + Flow_0jwejm5 + + + + + + + + Flow_0jwejm5 + Flow_140vffb + + + + + + + + Flow_140vffb + Flow_1k9gsm1 + + + Flow_1k9gsm1 + Flow_0okhwy0 + Flow_182bqvo + + + Flow_0nr14q3 + + + Flow_182bqvo + Flow_17rng3c + Flow_0nr14q3 + + + + + + + + Flow_0okhwy0 + Flow_17rng3c + + + + + + + NeedClarification == 'Yes' + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/message_event.bpmn b/tests/SpiffWorkflow/bpmn/data/message_event.bpmn new file mode 100644 index 000000000..f473c9655 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/message_event.bpmn @@ -0,0 +1,123 @@ + + + + + Flow_0xym55y + + + + Flow_16q1uec + print('New Title') +title = 'New Title' + + + + + + + + Flow_1rvh899 + Flow_1n1fs6z + + + Flow_07i0gvv + Flow_1c2tudh + [print(formdata) for _ in range(how_many)] +printdata = formdata + + + Flow_1c2tudh + + + + + + Flow_0xym55y + Flow_1rvh899 + print('Hello'); printdata=''; test_message='' + + + + + + + + + Flow_1n1fs6z + Flow_07i0gvv + + + + Flow_16q1uec + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/message_test.bpmn b/tests/SpiffWorkflow/bpmn/data/message_test.bpmn new file mode 100644 index 000000000..932a4ad6a --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/message_test.bpmn @@ -0,0 +1,210 @@ + + + + + + + + + Activity_ApproveOrDeny + Event_SendRequestResponse + Event_EndEvent2 + Event_GotRequest + + + Event_NormalStart + Gateway_Approved + Activity_EnterPlan + Event_SendRequest + Event_GetRequest + Event_EndEvent1 + Activity_EnablePlan + + + + Flow_060cfic + + + Flow_1nsjil4 + Flow_1t3bhky + + + + Flow_1t3bhky + Flow_1jdfc06 + + + + Flow_1jdfc06 + Flow_0jqxt85 + Flow_1tfirpy + + + + + + + ApprovalResult=="Yes" + + + + + + + + + + + Flow_1ym5g7r + Flow_0m1dzpq + + + + + + + + + Flow_1tfirpy + Flow_060cfic + Flow_1nsjil4 + + + + Flow_0m1dzpq + Flow_0abuvsx + + + + Flow_0abuvsx + + + Flow_1ym5g7r + + + + Flow_1ync7ek + + + + + + + + + Flow_0jqxt85 + Flow_1ync7ek + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/noninterrupting-MessageBoundary.bpmn b/tests/SpiffWorkflow/bpmn/data/noninterrupting-MessageBoundary.bpmn new file mode 100644 index 000000000..3ea3f311f --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/noninterrupting-MessageBoundary.bpmn @@ -0,0 +1,287 @@ + + + + + + + + + Event_12moz8m + Gateway_0mlgg9d + Activity_WorkLateReason + Event_1w6cide + Activity_1m4766l + Event_InterruptBoundary + + + Event_0d3xq5q + Activity_WorkLate + Gateway_0ncff13 + Event_0g8w85g + Event_0l8sadb + + + + Flow_0bvln2b + + + + + + + + Flow_0bvln2b + Flow_1t2ocwk + Flow_1ya6ran + + + Flow_1ya6ran + Flow_0saykw5 + Flow_1t2ocwk + + + Flow_0saykw5 + Flow_0lekhj5 + + + + Flow_0lekhj5 + + + Flow_1gd7a2h + + + Flow_1g8u810 + Flow_1firdqj + Flow_10gq9an + + + + + + + + Flow_0o0l113 + Flow_1g8u810 + + + Flow_10gq9an + + + Flow_1gd7a2h + Flow_1firdqj + + Flow_1gs89vo + + + + + Flow_11u0pgk + Flow_18d90uu + Flow_0wuxluk + + + Flow_1x6ji2h + + + work_done == 'Yes' + + + + + + + + + Flow_1gs89vo + Flow_0wuxluk + Flow_11u0pgk + + + + + + + + + Flow_18d90uu + Flow_1x6ji2h + + + + Flow_0o0l113 + + + + + + + + flag_task == 'Yes' + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/proptest-Sub.bpmn b/tests/SpiffWorkflow/bpmn/data/proptest-Sub.bpmn new file mode 100644 index 000000000..84f987fa7 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/proptest-Sub.bpmn @@ -0,0 +1,40 @@ + + + + + Flow_0wro40z + + + + Flow_0061o90 + + + + Flow_0wro40z + Flow_0061o90 + valC=valB +valD=valA + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/proptest-Top.bpmn b/tests/SpiffWorkflow/bpmn/data/proptest-Top.bpmn new file mode 100644 index 000000000..323ace022 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/proptest-Top.bpmn @@ -0,0 +1,111 @@ + + + + + Flow_0du1rjv + + + Flow_0du1rjv + Flow_0gl3cli + valA = 1 + + + + Flow_0gl3cli + Flow_0qkplpb + + Flow_0i4qk1g + + + + Flow_001zehj + + + + Flow_0i4qk1g + Flow_001zehj + valB = valA + + + + + Flow_0qkplpb + Flow_1rptzfw + + + + + Flow_1u6sv80 + + + + Flow_1rptzfw + Flow_1u6sv80 + #print(valA) +#print(valB) +#print(valC) +#print(valD) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/random_fact.bpmn b/tests/SpiffWorkflow/bpmn/data/random_fact.bpmn new file mode 100644 index 000000000..da2b52546 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/random_fact.bpmn @@ -0,0 +1,127 @@ + + + + + SequenceFlow_0ik56h0 + + + + Here's some documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SequenceFlow_0ik56h0 + SequenceFlow_1291h6i + + + + + + + + SequenceFlow_1291h6i + SequenceFlow_0am07in + #! scripts.FactService + + + SequenceFlow_0am07in + + + + + User sets the Fact.type to cat, norris, or buzzword + + + + Makes an API  call to get a fact of the required type. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/resetworkflowA-sublevel.bpmn b/tests/SpiffWorkflow/bpmn/data/resetworkflowA-sublevel.bpmn new file mode 100644 index 000000000..c4f5267fe --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/resetworkflowA-sublevel.bpmn @@ -0,0 +1,38 @@ + + + + + Flow_1mp4u8z + + + Flow_1mp4u8z + Flow_1ud341z + + + + Flow_1ud341z + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tests/SpiffWorkflow/bpmn/data/resetworkflowA-toplevel.bpmn b/tests/SpiffWorkflow/bpmn/data/resetworkflowA-toplevel.bpmn new file mode 100644 index 000000000..9a8c22bd2 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/resetworkflowA-toplevel.bpmn @@ -0,0 +1,64 @@ + + + + + Flow_1mskfwg + + + Flow_083r7tz + Flow_07sx36q + + + + + + Flow_1ydceye + + + + Flow_1mskfwg + Flow_083r7tz + + + Flow_07sx36q + Flow_1ydceye + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/resetworkflowB-sublevel.bpmn b/tests/SpiffWorkflow/bpmn/data/resetworkflowB-sublevel.bpmn new file mode 100644 index 000000000..b6cc411e3 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/resetworkflowB-sublevel.bpmn @@ -0,0 +1,50 @@ + + + + + Flow_1mp4u8z + + + Flow_1mp4u8z + Flow_1ud341z + + + + Flow_1qhjty2 + + + + Flow_1ud341z + Flow_1qhjty2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tests/SpiffWorkflow/bpmn/data/resetworkflowB-toplevel.bpmn b/tests/SpiffWorkflow/bpmn/data/resetworkflowB-toplevel.bpmn new file mode 100644 index 000000000..9a8c22bd2 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/resetworkflowB-toplevel.bpmn @@ -0,0 +1,64 @@ + + + + + Flow_1mskfwg + + + Flow_083r7tz + Flow_07sx36q + + + + + + Flow_1ydceye + + + + Flow_1mskfwg + Flow_083r7tz + + + Flow_07sx36q + Flow_1ydceye + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/rrt.bpmn b/tests/SpiffWorkflow/bpmn/data/rrt.bpmn new file mode 100644 index 000000000..e6d1afb27 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/rrt.bpmn @@ -0,0 +1,336 @@ + + + + + SequenceFlow_05ja25w + + + ### UNIVERSITY OF VIRGINIA RESEARCH +#### Research Ramp-up Plan + + +As we plan for the resumption of on-grounds research, PIs are required to develop a Research Ramp-up Plan. Please use the ramp-up guidance provided to lay out your plan(s) to manage operations while prioritizing physical distancing, staggered work shifts to reduce group size, remote work, and other exposure-reducing measures. + + +Plans must be submitted to the Office of Research by Monday, May ?? for consideration in the first round of approvals. Plans will then be reviewed on a rolling basis going forward. + + +Instructions for Submitting: + + +1. Add a Request for each lab space you manage in a building. If your lab spans multiple rooms or floors in a single building, one request will be required for that lab. If your lab spans multipe buildings, one request for each building will be required for that lab. The primary reason for this differentiation is that in addition to obtaining approval to restart operations, this information will also be used after start up to assist with any contact tracing that may be needed. + + +2. Select each Request added and step through each form presented, responding to all required and applicable fields. You may be presented with different questions if activities in each lab differ. + + +3. After all forms have been completed, you will be presented with the option to create your Research Recovery Plan in Word format. Download the document and review it. If you see any corrections that need to be made, return to the coresponding form and make the correction. + + +4. Once the generated Research Recovery Plan is finalize, use the web site to submit it to the Office of the Vice President for Research for review. + + +Please submit questions on the Research Support website. + SequenceFlow_05ja25w + SequenceFlow_0h50bp3 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SequenceFlow_0h50bp3 + SequenceFlow_0bqu7pp + + + + + + ### {{ LabName }} +#### Lab details + + +Your response to these questions will determine if you do or do not provide additional information regarding each topic later. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SequenceFlow_0bqu7pp + Flow_0scfmzc + + + SequenceFlow_1qtrgbv + + + + Review plan, make changes if needed, continue of ready to submit. + Flow_1b6vbkk + Flow_1e2qi9s + + + + Flow_1e2qi9s + SequenceFlow_1qtrgbv + CompleteTemplate ResearchRecoveryPlan.docx RESEARCH_RECOVERY + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Flow_0so3402 + SequenceFlow_1yi9lig + + + Flow_0scfmzc + Flow_0so3402 + Flow_0141rp3 + + + isAnimalUse == True + + + + + + + + + + + + + + Flow_1121pfu + SequenceFlow_1b4non2 + + + Flow_0141rp3 + SequenceFlow_1yi9lig + Flow_1121pfu + SequenceFlow_1wp5zmg + + + isGrantSupport == True + + + SequenceFlow_1b4non2 + SequenceFlow_1wp5zmg + Flow_1b6vbkk + + + + + isGrantSupport == False + + + + isAnimalUse == False + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/timer-non-interrupt-boundary.bpmn b/tests/SpiffWorkflow/bpmn/data/timer-non-interrupt-boundary.bpmn new file mode 100644 index 000000000..25d750e27 --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/timer-non-interrupt-boundary.bpmn @@ -0,0 +1,169 @@ + + + + + Flow_1hyztad + + + Flow_1hyztad + Flow_07l1pau + + Flow_1ls93l9 + + + + Flow_1ku6me6 + Flow_06jd2h7 + Flow_10bimyk + + + + + Flow_10bimyk + + + work_done == 'Yes' + + + + + + + + Flow_1ls93l9 + Flow_06jd2h7 + Flow_1ku6me6 + + + + Flow_03e1mfr + + timedelta(seconds=.2) + + + + + + + + + Flow_03e1mfr + Flow_0tlkkap + + + + + + + Flow_07l1pau + Flow_0tlkkap + Flow_0vper9q + + + + Flow_0or6odg + + + + + + + + + Flow_0vper9q + Flow_0or6odg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/bpmn/data/timer.bpmn b/tests/SpiffWorkflow/bpmn/data/timer.bpmn new file mode 100644 index 000000000..fa26d75fb --- /dev/null +++ b/tests/SpiffWorkflow/bpmn/data/timer.bpmn @@ -0,0 +1,68 @@ + + + + + Flow_1pahvlr + + + Flow_1pahvlr + Flow_1pvkgnu + + + Flow_1pvkgnu + Flow_1elbn9u + + timedelta(seconds=.25) + + + + Flow_1elbn9u + Flow_1ekgt3x + + + Flow_1ekgt3x + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/BaseTestCase.py b/tests/SpiffWorkflow/camunda/BaseTestCase.py new file mode 100644 index 000000000..f4bc5c0aa --- /dev/null +++ b/tests/SpiffWorkflow/camunda/BaseTestCase.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import + +import os +import unittest + +from SpiffWorkflow.bpmn.serializer.BpmnSerializer import BpmnSerializer + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from SpiffWorkflow.camunda.parser.CamundaParser import CamundaParser +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'danfunk' + +from tests.SpiffWorkflow.bpmn.PackagerForTests import PackagerForTests + + +class PackagerForCamundaTests(PackagerForTests): + PARSER_CLASS = CamundaParser + + +class BaseTestCase(BpmnWorkflowTestCase): + """ Provides some basic tools for loading up and parsing camunda BPMN files """ + + def load_workflow_spec(self, filename, process_name): + f = os.path.join(os.path.dirname(__file__), filename) + return BpmnSerializer().deserialize_workflow_spec( + PackagerForCamundaTests.package_in_memory(process_name, f)) + + def reload_save_restore(self): + #self.spec = self.load_workflow_spec( + # 'data/multi_instance_array_parallel.bpmn', + # 'MultiInstanceArray') + self.save_restore() diff --git a/tests/SpiffWorkflow/camunda/ClashingNameTest.py b/tests/SpiffWorkflow/camunda/ClashingNameTest.py new file mode 100644 index 000000000..b56fdb266 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/ClashingNameTest.py @@ -0,0 +1,97 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase +from SpiffWorkflow.exceptions import WorkflowException +__author__ = 'kellym' + +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + + +class ClashingNameTest(BaseTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + def setUp(self): + self.spec = self.load_workflow_spec( + 'data/token_trial_camunda_clash.bpmn', + 'token') + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testRunThroughSaveRestore(self): + self.actual_test(save_restore=True) + + def testRunThroughHappyReset(self): + self.actual_test(save_restore=False,reset_data=True,expected={'do_step':False,'C':'c'}) + + def testRunThroughSaveRestoreReset(self): + self.actual_test(save_restore=True,reset_data=True,expected={'do_step':False,'C':'c'}) + + + + def actual_test(self, save_restore=False,reset_data=False,expected={'do_step':False,'A':'a','B':'b','C':'c'}): + + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + firsttaskid = None + steps = [{'taskname':'First', + 'formvar': 'do_step', + 'answer': True}, + {'taskname': 'FormA', + 'formvar': 'A', + 'answer': 'a'}, + {'taskname': 'FormB', + 'formvar': 'B', + 'answer': 'b'}, + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + if firsttaskid == None: + firsttaskid = task.id + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.workflow.reset_task_from_id(firsttaskid) + steps = [{'taskname':'First', + 'formvar': 'do_step', + 'answer': False}, + {'taskname': 'FormC', + 'formvar': 'C', + 'answer': 'c'}, + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.assertTrue(self.workflow.is_completed()) + + self.assertEqual({'do_step':False,'A':'a','B':'b','C':'c'}, + self.workflow.last_task.data) + + + + + + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ClashingNameTest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/DefaultGatewayPMITest.py b/tests/SpiffWorkflow/camunda/DefaultGatewayPMITest.py new file mode 100644 index 000000000..e37c3ea08 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/DefaultGatewayPMITest.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase +from SpiffWorkflow.exceptions import WorkflowException +__author__ = 'matth' + +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + + +class DefaultGatewayPMITest(BaseTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + def setUp(self): + self.spec = self.load_workflow_spec( + 'data/default_gateway_pmi.bpmn', + 'DefaultGateway') + + def testRunThroughHappy(self): + self.actual_test(False) + + def testRunThroughSaveRestore(self): + self.actual_test(True) + + + + + def actual_test(self, save_restore=False): + + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + + # Set initial array size to 3 in the first user form. + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual("DoStuff", task.task_spec.name) + task.update_data({"morestuff": 'Yep'}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + # Set the names of the 3 family members. + for i in range(3): + task = self.workflow.get_ready_user_tasks()[0] + if i == 0: + self.assertEqual("GetMoreStuff", task.task_spec.name) + else: + self.assertEqual("GetMoreStuff_%d"%(i-1), task.task_spec.name) + + + task.update_data({"stuff.addstuff": "Stuff %d"%i}) + self.workflow.complete_task_from_id(task.id) + if save_restore: self.save_restore() + self.workflow.do_engine_steps() + + if save_restore: self.save_restore() + self.assertTrue(self.workflow.is_completed()) + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(DefaultGatewayPMITest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/ExclusiveGatewayPMITest.py b/tests/SpiffWorkflow/camunda/ExclusiveGatewayPMITest.py new file mode 100644 index 000000000..08280e129 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/ExclusiveGatewayPMITest.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase +from SpiffWorkflow.exceptions import WorkflowException +__author__ = 'matth' + +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + + +class ExclusiveGatewayPMITest(BaseTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + def setUp(self): + self.spec = self.load_workflow_spec( + 'data/default_gateway_pmi.bpmn', + 'DefaultGateway') + + def testRunThroughHappy(self): + self.actual_test(False) + + def testRunThroughSaveRestore(self): + self.actual_test(True) + + def testRunThroughHappyNo(self): + self.actual_test(False,'No') + + def testRunThroughSaveRestoreNo(self): + self.actual_test(True,'No') + + + + def actual_test(self, save_restore=False,response='Yes'): + + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + + # Set initial array size to 3 in the first user form. + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual("DoStuff", task.task_spec.name) + task.update_data({"morestuff": response}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + # Set the names of the 3 family members. + if response == 'Yes': + for i in range(3): + task = self.workflow.get_ready_user_tasks()[0] + if i == 0: + self.assertEqual("GetMoreStuff", task.task_spec.name) + else: + self.assertEqual("GetMoreStuff_%d"%(i-1), task.task_spec.name) + + + task.update_data({"stuff.addstuff": "Stuff %d"%i}) + self.workflow.complete_task_from_id(task.id) + if save_restore: self.save_restore() + self.workflow.do_engine_steps() + + if save_restore: self.save_restore() + self.assertTrue(self.workflow.is_completed()) + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ExclusiveGatewayPMITest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/MultiInstanceArrayTest.py b/tests/SpiffWorkflow/camunda/MultiInstanceArrayTest.py new file mode 100644 index 000000000..190569054 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/MultiInstanceArrayTest.py @@ -0,0 +1,239 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase +from SpiffWorkflow.exceptions import WorkflowException +__author__ = 'matth' + +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + + +class MultiInstanceArrayTest(BaseTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + def setUp(self): + self.spec = self.load_workflow_spec( + 'data/multi_instance_array.bpmn', + 'MultiInstanceArray') + + def testRunThroughHappy(self): + self.actual_test(False) + + def testRunThroughSaveRestore(self): + self.actual_test(True) + + + def testRunThroughHappyList(self): + self.actual_test2(False) + + def testRunThroughSaveRestoreList(self): + self.actual_test2(True) + + def testRunThroughHappyDict(self): + self.actual_test_with_dict(False) + + def testRunThroughSaveRestoreDict(self): + self.actual_test_with_dict(True) + + def testGetTaskExtensions(self): + self.actual_test_for_extensions(False) + + + def actual_test(self, save_restore=False): + + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + + # Set initial array size to 3 in the first user form. + task = self.workflow.get_ready_user_tasks()[0] + taskinfo = task.task_info() + self.assertEqual(taskinfo,{'is_looping':False, + 'is_sequential_mi':False, + 'is_parallel_mi':False, + 'mi_count':0, + 'mi_index':0}) + self.assertEqual("Activity_FamSize", task.task_spec.name) + task.update_data({"Family": {"Size": 3}}) + self.workflow.complete_task_from_id(task.id) + if save_restore: self.save_restore() + + # Set the names of the 3 family members. + for i in range(3): + task = self.workflow.get_ready_user_tasks()[0] + taskinfo = task.task_info() + self.assertEqual(taskinfo, {'is_looping': False, + 'is_sequential_mi': True, + 'is_parallel_mi': False, + 'mi_count': 3, + 'mi_index': i+1}) + + if i > 0: + self.assertEqual("FamilyMemberTask"+"_%d"%(i-1), task.task_spec.name) + else: + self.assertEqual("FamilyMemberTask", task.task_spec.name) + + + task.update_data({"FamilyMember": {"FirstName": "The Funk #%i" % i}}) + self.workflow.complete_task_from_id(task.id) + if save_restore: self.save_restore() + self.workflow.do_engine_steps() + + self.assertEqual({1: {'FirstName': 'The Funk #0'}, + 2: {'FirstName': 'The Funk #1'}, + 3: {'FirstName': 'The Funk #2'}}, + task.data["Family"]["Members"]) + #### NB - start here + ### Data is not correctly getting to the next task upon complete of the last task + ### after do_engine_steps, the next task in the list should be the same as task.data + ### but it is not. + + ### invalid copy of data?? ## appears that parent is not hooked up correctly + + # Set the birthdays of the 3 family members. + for i in range(3): + task = self.workflow.get_ready_user_tasks()[0] + if i > 0: + self.assertEqual("FamilyMemberBday"+"_%d"%(i-1), task.task_spec.name) + else: + self.assertEqual("FamilyMemberBday", task.task_spec.name) + task.update_data({"CurrentFamilyMember": {"Birthdate": "10/0%i/1985" % i}}) + self.workflow.complete_task_from_id(task.id) + if save_restore: self.save_restore() + self.workflow.do_engine_steps() + + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + self.assertTrue(self.workflow.is_completed()) + + self.assertEqual({1: {'FirstName': 'The Funk #0', "Birthdate": "10/00/1985"}, + 2: {'FirstName': 'The Funk #1', "Birthdate": "10/01/1985"}, + 3: {'FirstName': 'The Funk #2', "Birthdate": "10/02/1985"}}, + self.workflow.last_task.data["Family"]["Members"]) + + + + + def actual_test2(self, save_restore=False): + + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + + # Set initial array size to 3 in the first user form. + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual("Activity_FamSize", task.task_spec.name) + task.update_data({"Family":{"Size": 3}}) + self.workflow.complete_task_from_id(task.id) + if save_restore: self.save_restore() + + # Set the names of the 3 family members. + for i in range(3): + task = self.workflow.get_ready_user_tasks()[0] + if i > 0: + self.assertEqual("FamilyMemberTask"+"_%d"%(i-1), task.task_spec.name) + else: + self.assertEqual("FamilyMemberTask", task.task_spec.name) + task.update_data({"FamilyMember": {"FirstName": "The Funk #%i" % i}}) + self.workflow.complete_task_from_id(task.id) + if save_restore: self.save_restore() + + self.assertEqual({1: {'FirstName': 'The Funk #0'}, + 2: {'FirstName': 'The Funk #1'}, + 3: {'FirstName': 'The Funk #2'}}, + task.data["Family"]["Members"]) + + + # Make sure that if we have a list as both input and output + # collection, that we raise an exception + + task = self.workflow.get_ready_user_tasks()[0] + task.data['Family']['Members'] = ['The Funk #0','The Funk #1','The Funk #2'] + self.assertEqual("FamilyMemberBday", task.task_spec.name) + task.update_data( + {"CurrentFamilyMember": {"Birthdate": "10/0%i/1985" % i}}) + with self.assertRaises(WorkflowException) as context: + self.workflow.complete_task_from_id(task.id) + + + def actual_test_with_dict(self, save_restore=False): + + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + + # Set initial array size to 3 in the first user form. + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual("Activity_FamSize", task.task_spec.name) + task.update_data({"Family":{"Size": 3}}) + self.workflow.complete_task_from_id(task.id) + if save_restore: self.save_restore() + + # Set the names of the 3 family members. + for i in range(3): + task = self.workflow.get_ready_user_tasks()[0] + if i > 0: + self.assertEqual("FamilyMemberTask"+"_%d"%(i-1), task.task_spec.name) + else: + self.assertEqual("FamilyMemberTask", task.task_spec.name) + task.update_data({"FamilyMember": {"FirstName": "The Funk #%i" % i}}) + self.workflow.complete_task_from_id(task.id) + if save_restore: self.save_restore() + + self.assertEqual({1: {'FirstName': 'The Funk #0'}, + 2: {'FirstName': 'The Funk #1'}, + 3: {'FirstName': 'The Funk #2'}}, + task.data["Family"]["Members"]) + + + + # Set the birthdays of the 3 family members. + for i in range(3): + task = self.workflow.get_ready_user_tasks()[0] + if i == 0: + # Modify so that the dict keys are alpha rather than int + task.data["Family"]["Members"] = { + "a": {'FirstName': 'The Funk #0'}, + "b": {'FirstName': 'The Funk #1'}, + "c": {'FirstName': 'The Funk #2'}} + if (i > 0): + self.assertEqual("FamilyMemberBday"+"_%d"%(i-1), task.task_spec.name) + else: + self.assertEqual("FamilyMemberBday", task.task_spec.name) + task.update_data( + {"CurrentFamilyMember": {"Birthdate": "10/0%i/1985" % i}}) + self.workflow.complete_task_from_id(task.id) +# if save_restore: self.save_restore() + + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + self.assertTrue(self.workflow.is_completed()) + + self.assertEqual({"a": {'FirstName': 'The Funk #0', "Birthdate": "10/00/1985"}, + "b": {'FirstName': 'The Funk #1', "Birthdate": "10/01/1985"}, + "c": {'FirstName': 'The Funk #2', "Birthdate": "10/02/1985"}}, + self.workflow.last_task.data["Family"]["Members"]) + + + + def actual_test_for_extensions(self, save_restore=False): + + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + + # Set initial array size to 3 in the first user form. + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual("Activity_FamSize", task.task_spec.name) + extensions = task.task_spec.extensions # assume bpmn + self.assertEqual(extensions,{'Test1':'Value1','Test2':'Value2'}) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(MultiInstanceArrayTest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/MultiInstanceDeepDictEdit.py b/tests/SpiffWorkflow/camunda/MultiInstanceDeepDictEdit.py new file mode 100644 index 000000000..9b42b6d9d --- /dev/null +++ b/tests/SpiffWorkflow/camunda/MultiInstanceDeepDictEdit.py @@ -0,0 +1,115 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import + +import copy +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase +from SpiffWorkflow.exceptions import WorkflowException +__author__ = 'matth' + +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + + +class MultiInstanceDeepDictTest(BaseTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + deep_dict = { + "StudyInfo": { + "investigators": { + "PI": { + "affiliation": "", + "department": "", + "display_name": "Daniel Harold Funk", + "email": "dhf8r@virginia.edu", + "given_name": "Daniel", + "sponsor_type": "Contractor", + "telephone_number": "", + "title": "", + "type_full": "Primary Investigator", + "user_id": "dhf8r" + }, + "DC": { + "type_full": "Department Contact", + "user_id": "John Smith" + } + } + } + } + + expected_result = copy.copy(deep_dict) + expected_result["StudyInfo"]["investigators"]["DC"]["email"] = "john.smith@gmail.com" + expected_result["StudyInfo"]["investigators"]["PI"]["email"] = "dan.funk@gmail.com" + + def setUp(self): + self.spec = self.load_workflow_spec( + 'data/multi_instance_parallel_deep_data_edit.bpmn', + 'MultiInstance') + + def testRunThroughHappy(self): + self.actual_test(False) + + def testRunThroughSaveRestore(self): + self.actual_test(True) + + def actual_test(self, save_restore=False): + + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + # The initial task is a script task. Set the data there + # and move one. + task = self.workflow.get_ready_user_tasks()[0] + task.data = self.deep_dict + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + task = self.workflow.get_ready_user_tasks()[0] + taskinfo = task.task_info() + self.assertEqual(taskinfo,{'is_looping':False, + 'is_sequential_mi':False, + 'is_parallel_mi':True, + 'mi_count':2, + 'mi_index':1}) + self.assertEqual("MultiInstanceTask", task.task_spec.name) + self.assertTrue("investigator" in task.data) + data = copy.copy(task.data) + data['investigator']['email'] = "john.smith@gmail.com" + task.update_data(data) + self.workflow.complete_task_from_id(task.id) + if save_restore: self.save_restore() + + + task = self.workflow.get_ready_user_tasks()[0] + taskinfo = task.task_info() + self.assertEqual(taskinfo,{'is_looping':False, + 'is_sequential_mi':False, + 'is_parallel_mi':True, + 'mi_count':2, + 'mi_index':2}) + self.assertEqual("MultiInstanceTask", task.task_spec.name) + self.assertTrue("investigator" in task.data) + data = copy.copy(task.data) + data['investigator']['email'] = "dan.funk@gmail.com" + task.update_data(data) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.assertTrue(self.workflow.is_completed()) + task = self.workflow.last_task + self.assertEqual(self.expected_result, task.data) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(MultiInstanceDeepDictTest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/MultiInstanceParallelArrayTest.py b/tests/SpiffWorkflow/camunda/MultiInstanceParallelArrayTest.py new file mode 100644 index 000000000..dda804b95 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/MultiInstanceParallelArrayTest.py @@ -0,0 +1,113 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import unittest +import sys +import os +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) + + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'matth' +import random + +debug = True + +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + + +class MultiInstanceParallelArrayTest(BaseTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + def setUp(self): + + self.spec = self.load_workflow_spec( + 'data/multi_instance_array_parallel.bpmn', + 'MultiInstanceArray') + + def testRunThroughHappy(self): + self.actual_test(False) + + def testRunThroughSaveRestore(self): + self.actual_test(True) + + + def actual_test(self, save_restore=False): + + self.workflow = BpmnWorkflow(self.spec) + first_task = self.workflow.task_tree + + # A previous task (in this case the root task) will set the data + # so it must be found later. + first_task.update_data({"FamilySize": 3}) + self.workflow.do_engine_steps() + if save_restore: self.reload_save_restore() + # Set initial array size to 3 in the first user form. + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual("Activity_FamSize", task.task_spec.name) + task.update_data({"FamilySize": 3}) + self.workflow.complete_task_from_id(task.id) + if save_restore: self.reload_save_restore() + self.workflow.do_engine_steps() + + # Set the names of the 3 family members. + for i in range(3): + + tasks = self.workflow.get_ready_user_tasks() + self.assertEqual(len(tasks),1) # still with sequential MI + task = tasks[0] + if i > 0: + self.assertEqual("FamilyMemberTask"+"_%d"%(i-1), task.task_spec.name) + else: + self.assertEqual("FamilyMemberTask", task.task_spec.name) + task.update_data({"FamilyMember": {"FirstName": "The Funk #%i" % i}}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: + self.reload_save_restore() + tasks = self.workflow.get_ready_user_tasks() + + self.assertEqual(3,len(tasks)) + # Set the birthdays of the 3 family members. + for i in range(3): # emulate random Access + task = random.choice(tasks) + x = task.internal_data['runtimes'] -1 + self.assertEqual("FamilyMemberBday", task.task_spec.name[:16]) + self.assertEqual({"FirstName": "The Funk #%i" % x}, + task.data["CurrentFamilyMember"]) + task.update_data( + {"CurrentFamilyMember": {"Birthdate": "10/05/1985" + str(x)}}) + self.workflow.do_engine_steps() + self.workflow.complete_task_from_id(task.id) + # The data should still be available on the current task. + self.assertEqual({'FirstName': "The Funk #%i" % x, + 'Birthdate': '10/05/1985' + str(x)}, + self.workflow.get_task(task.id) + .data['CurrentFamilyMember']) + self.workflow.do_engine_steps() + if save_restore: + self.reload_save_restore() + self.workflow.do_engine_steps() + + tasks = self.workflow.get_ready_user_tasks() + + self.workflow.do_engine_steps() + if save_restore: + self.reload_save_restore() + + names = task.data['FamilyMembers'] + bdays = task.data['FamilyMemberBirthday'] + for x in list(names.keys()): + self.assertEqual(str(names[x]['FirstName'][-1]),str(bdays[x]['Birthdate'][-1])) + self.assertTrue(self.workflow.is_completed()) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(MultiInstanceParallelArrayTest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/ResetTokenMIParallelTest.py b/tests/SpiffWorkflow/camunda/ResetTokenMIParallelTest.py new file mode 100644 index 000000000..e4ba1c57c --- /dev/null +++ b/tests/SpiffWorkflow/camunda/ResetTokenMIParallelTest.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- +from __future__ import division, absolute_import +from __future__ import print_function, absolute_import, division + +import os +import sys +import unittest + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +__author__ = 'kellym' + +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + + +class ResetTokenTestMIParallel(BaseTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + def setUp(self): + self.spec = self.load_workflow_spec( + 'data/token_trial_MIParallel.bpmn', + 'token') + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testRunThroughSaveRestore(self): + self.actual_test(save_restore=True) + + + + + def actual_test(self, save_restore=False,reset_data=False): + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + firsttaskid = None + steps = [{'taskname':'First', + 'task_data': {'do_step':'Yes'}}, + {'taskname': 'FormA', + 'task_data': {'current': {'A' : 'x'}}}, + {'taskname': 'FormA', + 'task_data': {'current': {'A' : 'y'}}}, + {'taskname': 'FormA', + 'task_data': {'current': {'A' : 'z'}}} + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + if firsttaskid == None and step['taskname']=='FormA': + firsttaskid = task.id + self.assertEqual(step['taskname'], task.task_spec.name[:len(step['taskname'])]) + task.update_data(step['task_data']) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.assertEqual({'current': {'A': 'y'}, + 'do_step': 'Yes', + 'output': {1: {'A': 'x'}, 2: {'A': 'y'}, 3: {'A': 'z'}}}, + self.workflow.last_task.data) + + self.workflow.reset_task_from_id(firsttaskid) + #NB - this won't test random access + steps = [{'taskname': 'FormA', + 'task_data': {'current': {'A' : 'a1'}}}, + {'taskname': 'FormC', + 'task_data': {'C' : 'c'}}, + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data(step['task_data']) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.assertTrue(self.workflow.is_completed()) + + self.assertEqual({'current': {'A': 'x'}, + 'do_step': 'Yes', + 'C': 'c', + 'output': {1: {'A': 'a1'}, + 2: {'A': 'y'}, + 3: {'A': 'z'}}}, + self.workflow.last_task.data) + + + + + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ResetTokenTestMIParallel) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/ResetTokenMITest.py b/tests/SpiffWorkflow/camunda/ResetTokenMITest.py new file mode 100644 index 000000000..d9366b31e --- /dev/null +++ b/tests/SpiffWorkflow/camunda/ResetTokenMITest.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase +from SpiffWorkflow.exceptions import WorkflowException +__author__ = 'kellym' + +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + + +class ResetTokenTestMI(BaseTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + def setUp(self): + self.spec = self.load_workflow_spec( + 'data/token_trial_MI.bpmn', + 'token') + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testRunThroughSaveRestore(self): + self.actual_test(save_restore=True) + + + + + def actual_test(self, save_restore=False,reset_data=False): + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + firsttaskid = None + steps = [{'taskname':'First', + 'task_data': {'do_step':'Yes'}}, + {'taskname': 'FormA', + 'task_data': {'current': {'A' : 'x'}}}, + {'taskname': 'FormA_0', + 'task_data': {'current': {'A' : 'y'}}}, + {'taskname': 'FormA_1', + 'task_data': {'current': {'A' : 'z'}}} + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + if firsttaskid == None and step['taskname']=='FormA': + firsttaskid = task.id + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data(step['task_data']) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.workflow.reset_task_from_id(firsttaskid) + + steps = [{'taskname': 'FormA', + 'task_data': {'current': {'A': 'a1'}}}, + {'taskname': 'FormA_0', + 'task_data': {'current': {'A': 'a2'}}}, + {'taskname': 'FormA_1', + 'task_data': {'current': {'A': 'a3'}}}, + {'taskname': 'FormC', + 'task_data': {'C': 'c'}} + ] + + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data(step['task_data']) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.assertTrue(self.workflow.is_completed()) + self.assertEqual({'do_step': 'Yes', + 'output': {1: {'A': 'a1'}, + 2: {'A': 'a2'}, + 3: {'A': 'a3'}}, + 'C': 'c'}, + self.workflow.last_task.data) + + + + + + + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ResetTokenTestMI) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/ResetTokenNestedParallelTest.py b/tests/SpiffWorkflow/camunda/ResetTokenNestedParallelTest.py new file mode 100644 index 000000000..5d4ab0056 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/ResetTokenNestedParallelTest.py @@ -0,0 +1,222 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +__author__ = 'kellym' + +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + + +class ResetTokenTestNestedParallel(BaseTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + def setUp(self): + self.spec = self.load_workflow_spec( + 'data/token_trial_nested_parallel.bpmn', + 'token') + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testRunThroughSaveRestore(self): + self.actual_test(save_restore=True) + + def testRunThroughHappyAlt(self): + self.actual_test2(save_restore=False) + + def testRunThroughSaveRestoreAlt(self): + self.actual_test2(save_restore=True) + + def actual_test(self, save_restore=False,reset_data=False): + """ + Test a complicated parallel matrix, complete the matrix and + Reset somewhere in the middle. It should complete the row that we + Reset to, and retain all previous answers. + """ + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + firsttaskid = None + steps = [{'taskname':'First', + 'formvar': 'First', + 'answer': 'Yes'}, + {'taskname': 'FormA1', + 'formvar': 'A1', + 'answer': 'xa1'}, + {'taskname': 'FormA2', + 'formvar': 'A2', + 'answer': 'xa2'}, + {'taskname': 'FormA3', + 'formvar': 'A3', + 'answer': 'xa3'}, + {'taskname': 'FormB1', + 'formvar': 'B1', + 'answer': 'xb1'}, + {'taskname': 'FormB2', + 'formvar': 'B2', + 'answer': 'xb2'}, + {'taskname': 'FormB3', + 'formvar': 'B3', + 'answer': 'xb3'}, + {'taskname': 'FormC1', + 'formvar': 'C1', + 'answer': 'xc1'}, + {'taskname': 'FormC2', + 'formvar': 'C2', + 'answer': 'xc2'}, + {'taskname': 'FormC3', + 'formvar': 'C3', + 'answer': 'xc3'}, + + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + if firsttaskid == None and step['taskname']=='FormB2': + firsttaskid = task.id + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + self.workflow.reset_task_from_id(firsttaskid) + self.workflow.do_engine_steps() + #NB - this won't test random access + steps = [{'taskname': 'FormB2', + 'formvar': 'B2', + 'answer': 'b2'}, + {'taskname': 'FormD', + 'formvar': 'D', + 'answer': 'd'}, + ] + + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + notworking = self.workflow.get_ready_user_tasks() + self.assertTrue(self.workflow.is_completed()) + self.assertEqual({'First': 'Yes', + 'A1': 'xa1', + 'A2': 'xa2', + 'A3': 'xa3', + 'B1': 'xb1', + 'B2': 'b2', + 'B3': 'xb3', + 'C1': 'xc1', + 'C2': 'xc2', + 'C3': 'xc3', + 'D': 'd'}, + + self.workflow.last_task.data) + + + def actual_test2(self, save_restore=False,reset_data=False): + """ + Test a complicated parallel matrix, + Complete several items in the parallel matrix, but do not complete it, + Reset to a previous version on another branch of the parallel, it should + complete that branch and then pick up where we left off. + Also, after we reset the branch, there should then be three tasks ready, + A2,B3,and C1 + """ + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + firsttaskid = None + steps = [{'taskname':'First', + 'formvar': 'First', + 'answer': 'Yes'}, + {'taskname': 'FormA1', + 'formvar': 'A1', + 'answer': 'xa1'}, + {'taskname': 'FormA2', + 'formvar': 'A2', + 'answer': 'xa2'}, + {'taskname': 'FormA3', + 'formvar': 'A3', + 'answer': 'xa3'}, + {'taskname': 'FormB1', + 'formvar': 'B1', + 'answer': 'xb1'}, + {'taskname': 'FormB2', + 'formvar': 'B2', + 'answer': 'xb2'}, + + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + if firsttaskid == None and step['taskname']=='FormA2': + firsttaskid = task.id + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.workflow.reset_task_from_id(firsttaskid) + #NB - this won't test random access + steps = [{'taskname': 'FormA2', + 'formvar': 'A2', + 'answer': 'a2'}, + + {'taskname': 'FormB3', + 'formvar': 'B3', + 'answer': 'b3'}, + + {'taskname': 'FormC1', + 'formvar': 'C1', + 'answer': 'c1'}, + {'taskname': 'FormC2', + 'formvar': 'C2', + 'answer': 'c2'}, + {'taskname': 'FormC3', + 'formvar': 'C3', + 'answer': 'c3'}, + + {'taskname': 'FormD', + 'formvar': 'D', + 'answer': 'd'}, + ] + readytasks = [t.task_spec.name for t in self.workflow.get_ready_user_tasks()] + self.assertEqual(readytasks,['FormA2','FormB3','FormC1','FormC2','FormC3']) + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.assertTrue(self.workflow.is_completed()) + + self.assertEqual({'First': 'Yes', + 'A1': 'xa1', + 'A2': 'a2', + 'A3': 'xa3', + 'B1': 'xb1', + 'B2': 'xb2', + 'B3': 'b3', + 'C1': 'c1', + 'C2': 'c2', + 'C3': 'c3', + 'D': 'd'}, + + self.workflow.last_task.data) + + + + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ResetTokenTestNestedParallel) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/ResetTokenParallelMatrixTest.py b/tests/SpiffWorkflow/camunda/ResetTokenParallelMatrixTest.py new file mode 100644 index 000000000..7d8650040 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/ResetTokenParallelMatrixTest.py @@ -0,0 +1,231 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase +from SpiffWorkflow.exceptions import WorkflowException +__author__ = 'kellym' + +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + + +class ResetTokenTestParallelMatrix(BaseTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + def setUp(self): + self.spec = self.load_workflow_spec( + 'data/token_trial_parallel_matrix.bpmn', + 'token') + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testRunThroughSaveRestore(self): + self.actual_test(save_restore=True) + + def testRunThroughHappyAlt(self): + self.actual_test2(save_restore=False) + + def testRunThroughSaveRestoreAlt(self): + self.actual_test2(save_restore=True) + + + + def actual_test(self, save_restore=False,reset_data=False): + """ + Test a complicated parallel matrix, complete the matrix and + Reset somewhere in the middle. It should complete the row that we + Reset to, and retain all previous answers. + """ + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + firsttaskid = None + steps = [{'taskname':'First', + 'formvar': 'First', + 'answer': 'Yes'}, + {'taskname': 'FormA1', + 'formvar': 'A1', + 'answer': 'xa1'}, + {'taskname': 'FormA2', + 'formvar': 'A2', + 'answer': 'xa2'}, + {'taskname': 'FormA3', + 'formvar': 'A3', + 'answer': 'xa3'}, + {'taskname': 'FormB1', + 'formvar': 'B1', + 'answer': 'xb1'}, + {'taskname': 'FormB2', + 'formvar': 'B2', + 'answer': 'xb2'}, + {'taskname': 'FormB3', + 'formvar': 'B3', + 'answer': 'xb3'}, + {'taskname': 'FormC1', + 'formvar': 'C1', + 'answer': 'xc1'}, + {'taskname': 'FormC2', + 'formvar': 'C2', + 'answer': 'xc2'}, + {'taskname': 'FormC3', + 'formvar': 'C3', + 'answer': 'xc3'}, + + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + if firsttaskid == None and step['taskname']=='FormB2': + firsttaskid = task.id + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.workflow.reset_task_from_id(firsttaskid) + #NB - this won't test random access + steps = [{'taskname': 'FormB2', + 'formvar': 'B2', + 'answer': 'b2'}, + {'taskname': 'FormB3', + 'formvar': 'B3', + 'answer': 'b3'}, + {'taskname': 'FormD', + 'formvar': 'D', + 'answer': 'd'}, + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.assertTrue(self.workflow.is_completed()) + + self.assertEqual({'First': 'Yes', + 'A1': 'xa1', + 'A2': 'xa2', + 'A3': 'xa3', + 'B1': 'xb1', + 'B2': 'b2', + 'B3': 'b3', + 'C1': 'xc1', + 'C2': 'xc2', + 'C3': 'xc3', + 'D': 'd'}, + + self.workflow.last_task.data) + + + def actual_test2(self, save_restore=False,reset_data=False): + """ + Test a complicated parallel matrix, + Complete several items in the parallel matrix, but do not complete it, + Reset to a previous version on another branch of the parallel, it should + complete that branch and then pick up where we left off. + Also, after we reset the branch, there should then be three tasks ready, + A2,B3,and C1 + """ + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + firsttaskid = None + steps = [{'taskname':'First', + 'formvar': 'First', + 'answer': 'Yes'}, + {'taskname': 'FormA1', + 'formvar': 'A1', + 'answer': 'xa1'}, + {'taskname': 'FormA2', + 'formvar': 'A2', + 'answer': 'xa2'}, + {'taskname': 'FormA3', + 'formvar': 'A3', + 'answer': 'xa3'}, + {'taskname': 'FormB1', + 'formvar': 'B1', + 'answer': 'xb1'}, + {'taskname': 'FormB2', + 'formvar': 'B2', + 'answer': 'xb2'}, + + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + if firsttaskid == None and step['taskname']=='FormA2': + firsttaskid = task.id + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.workflow.reset_task_from_id(firsttaskid) + #NB - this won't test random access + steps = [{'taskname': 'FormA2', + 'formvar': 'A2', + 'answer': 'a2'}, + {'taskname': 'FormA3', + 'formvar': 'A3', + 'answer': 'a3'}, + + {'taskname': 'FormB3', + 'formvar': 'B3', + 'answer': 'b3'}, + {'taskname': 'FormC1', + 'formvar': 'C1', + 'answer': 'c1'}, + {'taskname': 'FormC2', + 'formvar': 'C2', + 'answer': 'c2'}, + {'taskname': 'FormC3', + 'formvar': 'C3', + 'answer': 'c3'}, + + {'taskname': 'FormD', + 'formvar': 'D', + 'answer': 'd'}, + ] + readytasks = [t.task_spec.name for t in self.workflow.get_ready_user_tasks()] + self.assertEqual(readytasks,['FormA2','FormB3','FormC1']) + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.assertTrue(self.workflow.is_completed()) + + self.assertEqual({'First': 'Yes', + 'A1': 'xa1', + 'A2': 'a2', + 'A3': 'a3', + 'B1': 'xb1', + 'B2': 'xb2', + 'B3': 'b3', + 'C1': 'c1', + 'C2': 'c2', + 'C3': 'c3', + 'D': 'd'}, + + self.workflow.last_task.data) + + + + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ResetTokenTestParallelMatrix) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/ResetTokenParallelTaskCountTest.py b/tests/SpiffWorkflow/camunda/ResetTokenParallelTaskCountTest.py new file mode 100644 index 000000000..e2484fce2 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/ResetTokenParallelTaskCountTest.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase +from SpiffWorkflow.exceptions import WorkflowException +__author__ = 'kellym' + +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + + +class ResetTokenParallelTaskCountTest(BaseTestCase): + """Assure that setting the token does not effect the overall task + count. Added this when we discovered that this was growing + exponentially in some cases..""" + + def setUp(self): + self.spec = self.load_workflow_spec( + 'data/token_trial_parallel_simple.bpmn', + 'token_trial_parallel_simple') + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testRunThroughSaveRestore(self): + self.actual_test(save_restore=True) + + def actual_test(self, save_restore=False): + total = 10 # I would expect there to be 9 tasks, but we get 10. + + # Set the workflow in motion, and assure we have the right + # number of tasks + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + self.assertEquals(total, len(self.workflow.get_tasks())) + + # Tell the exclusive gateway to skip the parallel tasks section. + # We should still have the same number of tasks. + data = {'skipParallel': True} + task = self.workflow.get_ready_user_tasks()[0] + task.data = data + self.workflow.complete_task_from_id(task.id) + self.assertEquals(total, len(self.workflow.get_tasks())) + + # Reset the token to the first user task. + # We should still have the same number of tasks. + self.workflow.task_tree.dump() + task.reset_token(reset_data=True) + print('=-----') + self.workflow.task_tree.dump() + self.assertEquals(total, len(self.workflow.get_tasks())) + self.assertEquals(1, len(self.workflow.get_ready_user_tasks())) + ready_nav = [item for item in self.workflow.get_flat_nav_list() if item.state == "READY"] + self.assertEquals(1, len(ready_nav)) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ResetTokenParallelTaskCountTest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/ResetTokenSubWorkflowTest.py b/tests/SpiffWorkflow/camunda/ResetTokenSubWorkflowTest.py new file mode 100644 index 000000000..077b559e9 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/ResetTokenSubWorkflowTest.py @@ -0,0 +1,213 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase +from SpiffWorkflow.exceptions import WorkflowException +__author__ = 'kellym' + +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + + +class ResetTokenTestSubProcess(BaseTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + def setUp(self): + self.spec = self.load_workflow_spec( + 'data/token_trial_subprocess.bpmn', + 'token') + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testRunThroughSaveRestore(self): + self.actual_test(save_restore=True) + +# def testRunThroughHappyAlt(self): +# self.actual_test2(save_restore=False) + + # def testRunThroughSaveRestoreAlt(self): + # self.actual_test2(save_restore=True) + + + + def actual_test(self, save_restore=False): + """ + Test a complicated parallel matrix, complete the matrix and + Reset somewhere in the middle. It should complete the row that we + Reset to, and retain all previous answers. + """ + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + firsttaskid = None + steps = [{'taskname':'First', + 'formvar': 'First', + 'answer': 'Yes'}, + {'taskname': 'FormA1', + 'formvar': 'A1', + 'answer': 'xa1'}, + {'taskname': 'FormA2', + 'formvar': 'A2', + 'answer': 'xa2'}, + {'taskname': 'FormA3', + 'formvar': 'A3', + 'answer': 'xa3'}, + + + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + if firsttaskid == None and step['taskname']=='FormA1': + firsttaskid = task.id + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: + self.save_restore() + + self.workflow.reset_task_from_id(firsttaskid) + #NB - this won't test random access + steps = [{'taskname': 'FormA1', + 'formvar': 'A1', + 'answer': 'a1'}, + {'taskname': 'FormA2', + 'formvar': 'A2', + 'answer': 'a2'}, + {'taskname': 'FormA3', + 'formvar': 'A3', + 'answer': 'a3'}, + {'taskname': 'FormD', + 'formvar': 'D', + 'answer': 'd'}, + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: + self.save_restore() + + self.assertTrue(self.workflow.is_completed()) + + self.assertEqual({'First': 'Yes', + 'A1': 'a1', + 'A2': 'a2', + 'A3': 'a3', + 'D': 'd'}, + + self.workflow.last_task.data) + + + def actual_test2(self, save_restore=False,reset_data=False): + """ + Test a complicated parallel matrix, + Complete several items in the parallel matrix, but do not complete it, + Reset to a previous version on another branch of the parallel, it should + complete that branch and then pick up where we left off. + Also, after we reset the branch, there should then be three tasks ready, + A2,B3,and C1 + """ + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + firsttaskid = None + steps = [{'taskname':'First', + 'formvar': 'First', + 'answer': 'Yes'}, + {'taskname': 'FormA1', + 'formvar': 'A1', + 'answer': 'xa1'}, + {'taskname': 'FormA2', + 'formvar': 'A2', + 'answer': 'xa2'}, + {'taskname': 'FormA3', + 'formvar': 'A3', + 'answer': 'xa3'}, + {'taskname': 'FormB1', + 'formvar': 'B1', + 'answer': 'xb1'}, + {'taskname': 'FormB2', + 'formvar': 'B2', + 'answer': 'xb2'}, + + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + if firsttaskid == None and step['taskname']=='FormA2': + firsttaskid = task.id + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.workflow.reset_task_from_id(firsttaskid) + #NB - this won't test random access + steps = [{'taskname': 'FormA2', + 'formvar': 'A2', + 'answer': 'a2'}, + {'taskname': 'FormA3', + 'formvar': 'A3', + 'answer': 'a3'}, + + {'taskname': 'FormB3', + 'formvar': 'B3', + 'answer': 'b3'}, + {'taskname': 'FormC1', + 'formvar': 'C1', + 'answer': 'c1'}, + {'taskname': 'FormC2', + 'formvar': 'C2', + 'answer': 'c2'}, + {'taskname': 'FormC3', + 'formvar': 'C3', + 'answer': 'c3'}, + + {'taskname': 'FormD', + 'formvar': 'D', + 'answer': 'd'}, + ] + readytasks = [t.task_spec.name for t in self.workflow.get_ready_user_tasks()] + self.assertEqual(readytasks,['FormA2','FormB3','FormC1']) + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.assertTrue(self.workflow.is_completed()) + + self.assertEqual({'First': 'Yes', + 'A1': 'xa1', + 'A2': 'a2', + 'A3': 'a3', + 'B1': 'xb1', + 'B2': 'xb2', + 'B3': 'b3', + 'C1': 'c1', + 'C2': 'c2', + 'C3': 'c3', + 'D': 'd'}, + + self.workflow.last_task.data) + + + + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ResetTokenTestSubProcess) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) \ No newline at end of file diff --git a/tests/SpiffWorkflow/camunda/ResetTokenTest.py b/tests/SpiffWorkflow/camunda/ResetTokenTest.py new file mode 100644 index 000000000..105dfdae5 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/ResetTokenTest.py @@ -0,0 +1,97 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import sys +import os +import unittest +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase +from SpiffWorkflow.exceptions import WorkflowException +__author__ = 'kellym' + +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + + +class ResetTokenTest(BaseTestCase): + """The example bpmn diagram tests both a set cardinality from user input + as well as looping over an existing array.""" + + def setUp(self): + self.spec = self.load_workflow_spec( + 'data/token_trial.bpmn', + 'token') + + def testRunThroughHappy(self): + self.actual_test(save_restore=False) + + def testRunThroughSaveRestore(self): + self.actual_test(save_restore=True) + + def testRunThroughHappyReset(self): + self.actual_test(save_restore=False,reset_data=True,expected={'do_step':False,'C':'c'}) + + def testRunThroughSaveRestoreReset(self): + self.actual_test(save_restore=True,reset_data=True,expected={'do_step':False,'C':'c'}) + + + + def actual_test(self, save_restore=False,reset_data=False,expected={'do_step':False,'A':'a','B':'b','C':'c'}): + + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + firsttaskid = None + steps = [{'taskname':'First', + 'formvar': 'do_step', + 'answer': True}, + {'taskname': 'FormA', + 'formvar': 'A', + 'answer': 'a'}, + {'taskname': 'FormB', + 'formvar': 'B', + 'answer': 'b'}, + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + if firsttaskid == None: + firsttaskid = task.id + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.workflow.reset_task_from_id(firsttaskid) + steps = [{'taskname':'First', + 'formvar': 'do_step', + 'answer': False}, + {'taskname': 'FormC', + 'formvar': 'C', + 'answer': 'c'}, + ] + for step in steps: + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual(step['taskname'], task.task_spec.name) + task.update_data({step['formvar']: step['answer']}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.assertTrue(self.workflow.is_completed()) + + self.assertEqual({'do_step':False,'A':'a','B':'b','C':'c'}, + self.workflow.last_task.data) + + + + + + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ResetTokenTest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/SubWorkflowTest.py b/tests/SpiffWorkflow/camunda/SubWorkflowTest.py new file mode 100644 index 000000000..894df04c7 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/SubWorkflowTest.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division, absolute_import +import unittest +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +__author__ = 'kellym' + +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + + +class SubWorkflowTest(BaseTestCase): + """The tests a somewhat complex subworkflow and verifies that it does + what we expect""" + + def setUp(self): + self.spec = self.load_workflow_spec( + 'data/subWorkflowComplex.bpmn', + 'SubWorkflow') + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + self.answers = ['A','A1','A2','B'] + + def testRunThroughHappy(self): + self.actual_test(False) + + def testRunThroughSaveRestore(self): + self.actual_test(True) + + def actual_test(self, save_restore=False): + + + # Set initial array size to 3 in the first user form. + for answer in self.answers: + task = self.workflow.get_ready_user_tasks()[0] + self.assertEqual("Activity_"+answer, task.task_spec.name) + task.update_data({"Field"+answer: answer}) + self.workflow.complete_task_from_id(task.id) + self.workflow.do_engine_steps() + if save_restore: self.save_restore() + + self.assertEqual(self.workflow.last_task.data,{'FieldA': 'A', + 'FieldA1': 'A1', + 'FieldA2': 'A2', + 'FieldB': 'B'}) + self.assertTrue(self.workflow.is_completed()) + + def testSubWorkflowNav(self): + flat = self.workflow.get_flat_nav_list() + nav = self.workflow.get_deep_nav_list() + self.assertNav(nav[0], spec_type="StartEvent") + self.assertNav(nav[1], spec_type="CallActivity", state="READY") + self.assertNav(nav[1].children[0], spec_type="StartEvent", state="COMPLETED") + self.assertNav(nav[1].children[1], description="FormA", state="READY") + self.assertNav(nav[1].children[2], spec_type="ParallelGateway") + self.assertNav(nav[1].children[2].children[0], description="Form A1") + self.assertNav(nav[1].children[2].children[1], description="Form A2") + self.assertNav(nav[1].children[4], spec_type="EndEvent") + self.assertNav(nav[2], spec_type="UserTask", description="FormB", state="FUTURE") + self.assertNav(nav[3], spec_type="EndEvent", state="FUTURE") + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(SubWorkflowTest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/__init__.py b/tests/SpiffWorkflow/camunda/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/SpiffWorkflow/camunda/data/default_gateway_pmi.bpmn b/tests/SpiffWorkflow/camunda/data/default_gateway_pmi.bpmn new file mode 100644 index 000000000..d98baf5d8 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/default_gateway_pmi.bpmn @@ -0,0 +1,89 @@ + + + + + Flow_1wis1un + + + + + + + + Flow_1wis1un + Flow_144jxvd + + + + Flow_144jxvd + Flow_1riszc2 + Flow_0xdvee4 + + + + + Flow_13ncefd + Flow_0xdvee4 + + + + + + + + + Flow_1riszc2 + Flow_13ncefd + + 3 + + + + morestuff == 'No' + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/exclusive_gateway_pmi.bpmn b/tests/SpiffWorkflow/camunda/data/exclusive_gateway_pmi.bpmn new file mode 100644 index 000000000..4acb9f8e3 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/exclusive_gateway_pmi.bpmn @@ -0,0 +1,94 @@ + + + + + Flow_1wis1un + + + + + + + + Flow_1wis1un + Flow_144jxvd + + + + Flow_144jxvd + Flow_1riszc2 + Flow_0xdvee4 + + + + morestuff == 'Yes' + + + Flow_13ncefd + Flow_0xdvee4 + + + + + + + + + Flow_1riszc2 + Flow_13ncefd + + 3 + + + + morestuff == 'No' + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/multi_instance_array.bpmn b/tests/SpiffWorkflow/camunda/data/multi_instance_array.bpmn new file mode 100644 index 000000000..bf5b0391c --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/multi_instance_array.bpmn @@ -0,0 +1,100 @@ + + + + + Flow_0bplvtg + + + Please enter family size: + + + + + + + + + + + + + + + + + + + + Flow_0bplvtg + Flow_0zpm0rc + + + + Please enter information for family member {{ FamilyMember }}: + + + + + + Flow_0zpm0rc + Flow_0659lqh + + Family.Size + test completion + + + + + Enter Birthday for {{ CurrentFamilyMember['FamilyMember.FormField_FirstName'] }} + + + + + + Flow_0659lqh + Flow_0ncqf54 + + + + + + XXX + Flow_0ncqf54 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/multi_instance_array_parallel.bpmn b/tests/SpiffWorkflow/camunda/data/multi_instance_array_parallel.bpmn new file mode 100644 index 000000000..b12a16ea9 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/multi_instance_array_parallel.bpmn @@ -0,0 +1,100 @@ + + + + + Flow_0bplvtg + + + Please enter family size: + + + + + + + + + + + + + + + + Flow_0bplvtg + Flow_0zpm0rc + + + + Please enter information for family member {{ FamilyMember }}: + + + + + + Flow_0zpm0rc + Flow_0659lqh + + FamilySize + test completion + + + + + Enter Birthday for {{ CurrentFamilyMember['FamilyMember.FormField_FirstName'] }} + + + + + + Flow_0659lqh + Flow_0ncqf54 + + FamilyMembers + + + + + + XXX + Flow_0ncqf54 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/multi_instance_parallel_deep_data_edit.bpmn b/tests/SpiffWorkflow/camunda/data/multi_instance_parallel_deep_data_edit.bpmn new file mode 100644 index 000000000..7ce994be9 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/multi_instance_parallel_deep_data_edit.bpmn @@ -0,0 +1,66 @@ + + + + + Flow_0t6p1sb + + + + Flow_0ugjw69 + + + + # Please provide addtional information about: +## Investigator ID: {{investigator.user_id}} +## Role: {{investigator.type_full}} + + + + + + SequenceFlow_1p568pp + Flow_0ugjw69 + + + + + Imagine a script task here that loads a complex data set. + Flow_0t6p1sb + SequenceFlow_1p568pp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/no_form.bpmn b/tests/SpiffWorkflow/camunda/data/no_form.bpmn new file mode 100644 index 000000000..20dfc7f5f --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/no_form.bpmn @@ -0,0 +1,54 @@ + + + + + SequenceFlow_0ik56h0 + + + + + + + + + SequenceFlow_0ik56h0 + SequenceFlow_1de4q40 + + + SequenceFlow_1de4q40 + + + + This is a user task with no form in it, It should not error + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/random_fact.bpmn b/tests/SpiffWorkflow/camunda/data/random_fact.bpmn new file mode 100644 index 000000000..1c897ad19 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/random_fact.bpmn @@ -0,0 +1,84 @@ + + + + + SequenceFlow_0ik56h0 + + + + + + + + + + + + + + + + + + + + + + SequenceFlow_0ik56h0 + SequenceFlow_1wl4cli + + + + + + + + SequenceFlow_1wl4cli + scripts.FactService + + + + User sets the Fact.type to cat, norris, or buzzword + + + + Makes an API  call to get a fact of the required type. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/random_fact.svg b/tests/SpiffWorkflow/camunda/data/random_fact.svg new file mode 100644 index 000000000..3078ea0e3 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/random_fact.svg @@ -0,0 +1,4 @@ + + + +Set TypeDisplay FactUser sets the Fact.type to cat,norris, or buzzwordMakes an API call to get a factof the required type. \ No newline at end of file diff --git a/tests/SpiffWorkflow/camunda/data/subWorkflowComplex.bpmn b/tests/SpiffWorkflow/camunda/data/subWorkflowComplex.bpmn new file mode 100644 index 000000000..5f8651526 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/subWorkflowComplex.bpmn @@ -0,0 +1,162 @@ + + + + + Flow_1 + + + Flow_1 + Flow_4 + + Flow_2 + + + + Enter Form1 + + + + + + Flow_2 + Flow_0j8meqp + + + + + Enter Form A1 + + + + + + Flow_1jbvpss + Flow_1w00bbg + + + + Enter Form A2 + + + + + + Flow_0vl1ixa + Flow_0kzcljc + + + + + Flow_1cnvx4h + + + + Flow_0j8meqp + Flow_1jbvpss + Flow_0vl1ixa + + + Flow_1w00bbg + Flow_0kzcljc + Flow_1cnvx4h + + + + + + Enter form 2 + + + + + + Flow_4 + Flow_5 + + + Flow_5 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/token_trial.bpmn b/tests/SpiffWorkflow/camunda/data/token_trial.bpmn new file mode 100644 index 000000000..89cc6dc0b --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/token_trial.bpmn @@ -0,0 +1,144 @@ + + + + + Flow_03vnrmv + + + Flow_0g2wjhu + Flow_0ya87hl + Flow_1qgke9w + + + Do you want to do the next steps? + + + + + + Flow_03vnrmv + Flow_13qpm6f + + + + Flow_13qpm6f + Flow_04bpvfu + Flow_0g2wjhu + + + Yes + + + do_step == True + + + do_step== False + + + + + + FormA + + + + + + Flow_04bpvfu + Flow_0ahlz50 + + + FormB + + + + + + Flow_0ahlz50 + Flow_0ya87hl + + + FormC + + + + + + Flow_1qgke9w + Flow_039y4lk + + + + Flow_039y4lk + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/token_trial_MI.bpmn b/tests/SpiffWorkflow/camunda/data/token_trial_MI.bpmn new file mode 100644 index 000000000..368cd4f08 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/token_trial_MI.bpmn @@ -0,0 +1,83 @@ + + + + + Flow_03vnrmv + + + Do you want to do the next steps? + + + + + + Flow_03vnrmv + Flow_10pdq2v + + + FormC + + + + + + Flow_0ztfesh + Flow_039y4lk + + + + Flow_039y4lk + + + + MI item + + + + + + Flow_10pdq2v + Flow_0ztfesh + + 3 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/token_trial_MIParallel.bpmn b/tests/SpiffWorkflow/camunda/data/token_trial_MIParallel.bpmn new file mode 100644 index 000000000..3e2f1b19b --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/token_trial_MIParallel.bpmn @@ -0,0 +1,83 @@ + + + + + Flow_03vnrmv + + + Do you want to do the next steps? + + + + + + Flow_03vnrmv + Flow_10pdq2v + + + FormC + + + + + + Flow_0ztfesh + Flow_039y4lk + + + + Flow_039y4lk + + + + MI item + + + + + + Flow_10pdq2v + Flow_0ztfesh + + 3 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/token_trial_camunda_clash.bpmn b/tests/SpiffWorkflow/camunda/data/token_trial_camunda_clash.bpmn new file mode 100644 index 000000000..ee11331ba --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/token_trial_camunda_clash.bpmn @@ -0,0 +1,142 @@ + + + + + Flow_03vnrmv + + + Flow_0g2wjhu + Flow_0ya87hl + Flow_1qgke9w + + + Do you want to do the next steps? + + + + + + Flow_03vnrmv + Flow_13qpm6f + + + + Flow_13qpm6f + Flow_04bpvfu + Flow_0g2wjhu + + + Yes + do_step == True + + + do_step== False + + + + + + FormA + + + + + + Flow_04bpvfu + Flow_0ahlz50 + + + FormB + + + + + + Flow_0ahlz50 + Flow_0ya87hl + + + FormC + + + + + + Flow_1qgke9w + Flow_039y4lk + + + + Flow_039y4lk + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/token_trial_nested_parallel.bpmn b/tests/SpiffWorkflow/camunda/data/token_trial_nested_parallel.bpmn new file mode 100644 index 000000000..1ff66eaa9 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/token_trial_nested_parallel.bpmn @@ -0,0 +1,402 @@ + + + + + Flow_03vnrmv + + + Do you want to do the next steps? + + + + + + Flow_03vnrmv + Flow_10pdq2v + + + FormD + + + + + + Flow_08nd97v + Flow_039y4lk + + + + Flow_039y4lk + + + + + + Flow_10pdq2v + Flow_0rg1whs + Flow_0pyul3k + Flow_1l934p1 + + + Flow_093ce35 + Flow_0c4mntn + Flow_0knduft + Flow_08nd97v + + + FormA1 + + + + + + Flow_1uw6r98 + Flow_0n71r7a + + + FormA2 + + + + + + Flow_1rihpzh + Flow_0p30bun + + + FormA3 + + + + + + Flow_0098ozb + Flow_04lzszv + + + FormB1 + + + + + + Flow_1tpkm1k + Flow_0zndavy + + + FormB2 + + + + + + Flow_0oz5j4d + Flow_1u9tezs + + + FormB3 + + + + + + Flow_11diihw + Flow_1xgsff0 + + + FormC1 + + + + + + Flow_04yup8h + Flow_0v6ozza + + + FormC2 + + + + + + Flow_1ay413y + Flow_1d1kroa + + + FormC3 + + + + + + Flow_0etaqvr + Flow_0kahsqi + + + Flow_0rg1whs + Flow_1uw6r98 + Flow_1rihpzh + Flow_0098ozb + + + Flow_0n71r7a + Flow_0p30bun + Flow_04lzszv + Flow_0c4mntn + + + + + + + + + + Flow_0pyul3k + Flow_1tpkm1k + Flow_0oz5j4d + Flow_11diihw + + + Flow_0zndavy + Flow_1u9tezs + Flow_1xgsff0 + Flow_093ce35 + + + + + + + + + + + + Flow_1l934p1 + Flow_04yup8h + Flow_1ay413y + Flow_0etaqvr + + + Flow_0v6ozza + Flow_1d1kroa + Flow_0kahsqi + Flow_0knduft + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/token_trial_parallel_matrix.bpmn b/tests/SpiffWorkflow/camunda/data/token_trial_parallel_matrix.bpmn new file mode 100644 index 000000000..276897834 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/token_trial_parallel_matrix.bpmn @@ -0,0 +1,272 @@ + + + + + Flow_03vnrmv + + + Do you want to do the next steps? + + + + + + Flow_03vnrmv + Flow_10pdq2v + + + FormD + + + + + + Flow_08nd97v + Flow_039y4lk + + + + Flow_039y4lk + + + + + + Flow_10pdq2v + Flow_0fme0gm + Flow_0cmqr9j + Flow_0dv8nod + + + Flow_1pf4svr + Flow_0y1tqui + Flow_1h5wu4u + Flow_08nd97v + + + + + FormA1 + + + + + + Flow_0fme0gm + Flow_05w8299 + + + FormA2 + + + + + + Flow_05w8299 + Flow_16gvr7i + + + + FormA3 + + + + + + Flow_16gvr7i + Flow_1pf4svr + + + + FormB1 + + + + + + Flow_0cmqr9j + Flow_0ae6rzq + + + FormB2 + + + + + + Flow_0ae6rzq + Flow_0643kan + + + FormB3 + + + + + + Flow_0643kan + Flow_0y1tqui + + + + + + + FormC1 + + + + + + Flow_0dv8nod + Flow_0mtw6yv + + + FormC2 + + + + + + Flow_0mtw6yv + Flow_0rqbd7e + + + FormC3 + + + + + + Flow_0rqbd7e + Flow_1h5wu4u + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/token_trial_parallel_simple.bpmn b/tests/SpiffWorkflow/camunda/data/token_trial_parallel_simple.bpmn new file mode 100644 index 000000000..5c3658575 --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/token_trial_parallel_simple.bpmn @@ -0,0 +1,216 @@ + + + + + Flow_1w2tcdp + + + Flow_1vtdwmy + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SequenceFlow_1242uxm + SequenceFlow_09c4dnr + + + #### Please plan to ensure adequate supplies for staff cleaning before and after patients + + + + + + + + + + + + + + + + + + + + + + + + + SequenceFlow_1ylxjys + SequenceFlow_0rwnquq + + + Flow_0f6q83k + SequenceFlow_1242uxm + SequenceFlow_1ylxjys + + + + + SequenceFlow_09c4dnr + SequenceFlow_0rwnquq + SequenceFlow_00fpfhi + + + + + + SequenceFlow_00fpfhi + Flow_0wycgzo + Flow_1vtdwmy + + + Flow_00zjlx7 + Flow_0f6q83k + Flow_0wycgzo + + + skipParallel == False + + + skipParallel == True + + + + + Flow_1w2tcdp + Flow_00zjlx7 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/data/token_trial_subprocess.bpmn b/tests/SpiffWorkflow/camunda/data/token_trial_subprocess.bpmn new file mode 100644 index 000000000..20610a94c --- /dev/null +++ b/tests/SpiffWorkflow/camunda/data/token_trial_subprocess.bpmn @@ -0,0 +1,151 @@ + + + + + Flow_03vnrmv + + + Do you want to do the next steps? + + + + + + Flow_03vnrmv + Flow_13362mb + + + FormD + + + + + + Flow_0e9x16w + Flow_039y4lk + + + + Flow_039y4lk + + + + Flow_13362mb + Flow_0e9x16w + + Flow_1sy7h5y + + + FormA1 + + + + + + Flow_1sy7h5y + Flow_0f89gdk + + + FormA2 + + + + + + Flow_0f89gdk + Flow_0nudpra + + + FormA3 + + + + + + Flow_0nudpra + Flow_0zucva0 + + + + + + Flow_0zucva0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/camunda/serializer/CamundaParserTest.py b/tests/SpiffWorkflow/camunda/serializer/CamundaParserTest.py new file mode 100644 index 000000000..29760e9db --- /dev/null +++ b/tests/SpiffWorkflow/camunda/serializer/CamundaParserTest.py @@ -0,0 +1,28 @@ +import unittest + +from SpiffWorkflow.camunda.parser.UserTaskParser import UserTaskParser +from SpiffWorkflow.camunda.specs.UserTask import UserTask +from SpiffWorkflow.camunda.parser.CamundaParser import CamundaParser + + +class CamundaParserTest(unittest.TestCase): + CORRELATE = CamundaParser + + def setUp(self): + self.parser = CamundaParser() + + def test_overrides(self): + expected_key = "{http://www.omg.org/spec/BPMN/20100524/MODEL}userTask" + self.assertIn(expected_key, + self.parser.OVERRIDE_PARSER_CLASSES) + + self.assertEqual((UserTaskParser, UserTask), + self.parser.OVERRIDE_PARSER_CLASSES.get(expected_key)) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(CamundaParserTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/serializer/UserTaskParserTest.py b/tests/SpiffWorkflow/camunda/serializer/UserTaskParserTest.py new file mode 100644 index 000000000..d352d8afd --- /dev/null +++ b/tests/SpiffWorkflow/camunda/serializer/UserTaskParserTest.py @@ -0,0 +1,52 @@ +import unittest + +from SpiffWorkflow.camunda.parser.UserTaskParser import UserTaskParser +from tests.SpiffWorkflow.camunda.BaseTestCase import BaseTestCase + + +class UserTaskParserTest(BaseTestCase): + CORRELATE = UserTaskParser + + def setUp(self): + self.spec = self.load_workflow_spec('data/random_fact.bpmn', 'random_fact') + + def testConstructor(self): + pass # this is accomplished through setup. + + def testGetForm(self): + form = self.spec.task_specs['Task_User_Select_Type'].form + self.assertIsNotNone(form) + + def testGetEnumField(self): + form = self.spec.task_specs['Task_User_Select_Type'].form + self.assertEquals("Fact", form.key) + self.assertEquals(1, len(form.fields)) + self.assertEquals("type", form.fields[0].id) + self.assertEquals(3, len(form.fields[0].options)) + + def testGetFieldProperties(self): + form = self.spec.task_specs['Task_User_Select_Type'].form + self.assertEquals(1, len(form.fields[0].properties)) + self.assertEquals('description', form.fields[0].properties[0].id) + self.assertEquals('Choose from the list of available types of random facts', form.fields[0].properties[0].value) + + def testGetFieldValidation(self): + form = self.spec.task_specs['Task_User_Select_Type'].form + self.assertEquals(1, len(form.fields[0].validation)) + self.assertEquals('maxlength', form.fields[0].validation[0].name) + self.assertEquals('25', form.fields[0].validation[0].config) + + def testNoFormDoesNotBombOut(self): + self.load_workflow_spec('data/no_form.bpmn', 'no_form') + self.assertTrue(True) # You can load a user task that has no form and you can still get here. + + def testCreateTask(self): + pass + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(UserTaskParserTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/serializer/__init__.py b/tests/SpiffWorkflow/camunda/serializer/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/SpiffWorkflow/camunda/specs/UserTaskSpecTest.py b/tests/SpiffWorkflow/camunda/specs/UserTaskSpecTest.py new file mode 100644 index 000000000..2d3c16d7f --- /dev/null +++ b/tests/SpiffWorkflow/camunda/specs/UserTaskSpecTest.py @@ -0,0 +1,152 @@ +import json +import unittest + +from SpiffWorkflow.camunda.specs.UserTask import FormField, UserTask, Form, \ + EnumFormField +from SpiffWorkflow.specs import WorkflowSpec, TaskSpec + + +class UserTaskSpecTest(unittest.TestCase): + CORRELATE = UserTask + + def create_instance(self): + if 'testtask' in self.wf_spec.task_specs: + del self.wf_spec.task_specs['testtask'] + task_spec = TaskSpec(self.wf_spec, 'testtask', description='foo') + self.form = Form() + return UserTask(self.wf_spec, 'userTask', self.form) + + def setUp(self): + self.wf_spec = WorkflowSpec() + self.user_spec = self.create_instance() + + def testConstructor(self): + self.assertEquals(self.user_spec.name, 'userTask') + self.assertEqual(self.user_spec.data, {}) + self.assertEqual(self.user_spec.defines, {}) + self.assertEqual(self.user_spec.pre_assign, []) + self.assertEqual(self.user_spec.post_assign, []) + self.assertEqual(self.user_spec.locks, []) + + def test_set_form(self): + self.assertEqual(self.form, self.user_spec.form) + + def testSerialize(self): + pass + + def test_text_field(self): + form_field = FormField(form_type="text") + form_field.id = "1234" + self.form.add_field(form_field) + self.assertEqual(form_field, self.user_spec.form.fields[0]) + + def test_enum_field(self): + enum_field = EnumFormField() + enum_field.label = "Which kind of fool are you" + enum_field.add_option('old fool', 'This is old, therefor it is good.') + enum_field.add_option('new fool', + 'This is new, therefor it is better.') + self.form.add_field(enum_field) + self.assertEqual(enum_field, self.user_spec.form.fields[-1]) + + def test_properties(self): + form_field = FormField(form_type="text") + self.assertFalse(form_field.has_property("wilma")) + form_field.add_property("wilma", "flintstone") + self.assertTrue(form_field.has_property("wilma")) + self.assertEquals("flintstone", form_field.get_property("wilma")) + + def test_validations(self): + form_field = FormField(form_type="text") + self.assertFalse(form_field.has_validation("barney")) + form_field.add_validation("barney", "rubble") + self.assertTrue(form_field.has_validation("barney")) + self.assertEquals("rubble", form_field.get_validation("barney")) + + def testIsEngineTask(self): + self.assertFalse(self.user_spec.is_engine_task()) + + def test_convert_to_dict(self): + form = Form() + + field1 = FormField(form_type="text") + field1.id = "quest" + field1.label = "What is your quest?" + field1.default_value = "I seek the grail!" + + field2 = EnumFormField() + field2.id = "color" + field2.label = "What is your favorite color?" + field2.add_option("red", "Red") + field2.add_option("orange", "Green") + field2.add_option("yellow", "Yellow") + field2.add_option("green", "Green") + field2.add_option("blue", "Blue") + field2.add_option("indigo", "Indigo") + field2.add_option("violet", "Violet") + field2.add_option("other", "Other") + field2.add_property("description", "You know what to do.") + field2.add_validation("maxlength", "25") + + form.key = "formKey" + form.add_field(field1) + form.add_field(field2) + + def JsonableHandler(Obj): + if hasattr(Obj, 'jsonable'): + return Obj.jsonable() + else: + raise 'Object of type %s with value of %s is not JSON serializable' % ( + type(Obj), repr(Obj)) + + json_form = json.dumps(form, default=JsonableHandler) + actual = json.loads(json_form) + + expected = { + "fields": [ + { + "default_value": "I seek the grail!", + "label": "What is your quest?", + "id": "quest", + "properties": [], + "type": "text", + "validation": [], + }, + { + "default_value": "", + "id": "color", + "label": "What is your favorite color?", + "options": [ + {"id": "red", "name": "Red"}, + {"id": "orange", "name": "Green"}, + {"id": "yellow", "name": "Yellow"}, + {"id": "green", "name": "Green"}, + {"id": "blue", "name": "Blue"}, + {"id": "indigo", "name": "Indigo"}, + {"id": "violet", "name": "Violet"}, + {"id": "other", "name": "Other"}, + ], + "properties": [ + {"id": "description", "value": "You know what to do."}, + ], + "type": "enum", + "validation": [ + {"name": "maxlength", "config": "25"}, + ], + } + ], + "key": "formKey", + } + + expected_parsed = json.loads(json.dumps(expected)) + + self.maxDiff = None + self.assertDictEqual(actual, expected_parsed) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(UserTaskSpecTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/camunda/specs/__init__.py b/tests/SpiffWorkflow/camunda/specs/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/SpiffWorkflow/dmn/BoolDecisionTest.py b/tests/SpiffWorkflow/dmn/BoolDecisionTest.py new file mode 100644 index 000000000..b07ce7e14 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/BoolDecisionTest.py @@ -0,0 +1,31 @@ +import unittest + +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner + + +class BoolDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = DecisionRunner('bool_decision.dmn', debug='DEBUG') + + def test_bool_decision_string_output1(self): + res = self.runner.decide(True) + self.assertEqual(res.description, 'Y Row Annotation') + + def test_bool_decision_string_output2(self): + res = self.runner.decide(False) + self.assertEqual(res.description, 'N Row Annotation') + + def test_bool_decision_string_output3(self): + res = self.runner.decide(None) + self.assertEqual(res.description, 'ELSE Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(BoolDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/BoxDeepCopyTest.py b/tests/SpiffWorkflow/dmn/BoxDeepCopyTest.py new file mode 100644 index 000000000..17eaf6d70 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/BoxDeepCopyTest.py @@ -0,0 +1,27 @@ +import unittest + +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner +from SpiffWorkflow.bpmn.PythonScriptEngine import Box + + +class BoxDeepCopyTest(unittest.TestCase): + + def test_deep_copy_of_box(self): + data = {"foods": { + "spam": {"delicious": False} + }, + "hamsters": ['your', 'mother'] + } + data = Box(data) + data2 = data.__deepcopy__() + self.assertEqual(data, data2) + data.foods.spam.delicious = True + data.hamsters = ['your', 'father'] + self.assertFalse(data2.foods.spam.delicious) + self.assertEquals(['your', 'mother'], data2.hamsters) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(BoxDeepCopyTest) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/BpmnDmnParserTest.py b/tests/SpiffWorkflow/dmn/BpmnDmnParserTest.py new file mode 100644 index 000000000..78e2a1f5f --- /dev/null +++ b/tests/SpiffWorkflow/dmn/BpmnDmnParserTest.py @@ -0,0 +1,27 @@ +import unittest + +from SpiffWorkflow.dmn.parser.BusinessRuleTaskParser import BusinessRuleTaskParser +from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser +from SpiffWorkflow.dmn.specs.BusinessRuleTask import BusinessRuleTask + + +class BpmnDmnParserTest(unittest.TestCase): + + def setUp(self): + self.parser = BpmnDmnParser() + + def test_overrides(self): + expected_key = "{http://www.omg.org/spec/BPMN/20100524/MODEL}businessRuleTask" + self.assertIn(expected_key, + self.parser.OVERRIDE_PARSER_CLASSES) + + self.assertEqual((BusinessRuleTaskParser, BusinessRuleTask), + self.parser.OVERRIDE_PARSER_CLASSES.get(expected_key)) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(BpmnDmnParserTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/BusinessRuleTaskParserTest.py b/tests/SpiffWorkflow/dmn/BusinessRuleTaskParserTest.py new file mode 100644 index 000000000..021664fa0 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/BusinessRuleTaskParserTest.py @@ -0,0 +1,52 @@ +import os +import unittest + +from SpiffWorkflow import Task + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +class BusinessRuleTaskParserTest(BpmnWorkflowTestCase): + PARSER_CLASS = BpmnDmnParser + + def setUp(self): + parser = BpmnDmnParser() + bpmn = os.path.join(os.path.dirname(__file__), 'data', 'BpmnDmn', + 'ExclusiveGatewayIfElseAndDecision.bpmn') + dmn = os.path.join(os.path.dirname(__file__), 'data', 'BpmnDmn', + 'test_integer_decision.dmn') + parser.add_bpmn_file(bpmn) + parser.add_dmn_file(dmn) + self.spec = parser.get_spec('Process_1') + self.workflow = BpmnWorkflow(self.spec) + + def testConstructor(self): + pass # this is accomplished through setup. + + def testDmnHappy(self): + self.workflow = BpmnWorkflow(self.spec) + self.workflow.get_tasks(Task.READY)[0].set_data(x=3) + self.workflow.do_engine_steps() + self.assertDictEqual(self.workflow.data, {'x': 3, 'y': 'A'}) + self.assertDictEqual(self.workflow.last_task.data, {'x': 3, 'y': 'A'}) + + def testDmnSaveRestore(self): + self.workflow = BpmnWorkflow(self.spec) + self.workflow.get_tasks(Task.READY)[0].set_data(x=3) + self.save_restore() + self.workflow.do_engine_steps() + self.save_restore() + self.assertDictEqual(self.workflow.data, {'x': 3, 'y': 'A'}) + self.assertDictEqual(self.workflow.last_task.data, {'x': 3, 'y': 'A'}) + + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(BusinessRuleTaskParserTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/DMNDictTest.py b/tests/SpiffWorkflow/dmn/DMNDictTest.py new file mode 100644 index 000000000..c05d9d162 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/DMNDictTest.py @@ -0,0 +1,55 @@ +import os +import unittest + +from SpiffWorkflow import Task + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +class DMNDictTest(BpmnWorkflowTestCase): + PARSER_CLASS = BpmnDmnParser + + def setUp(self): + parser = BpmnDmnParser() + bpmn = os.path.join(os.path.dirname(__file__), 'data', 'BpmnDmn', + 'dmndict.bpmn') + dmn = os.path.join(os.path.dirname(__file__), 'data', 'BpmnDmn', + 'dmndict.dmn') + parser.add_bpmn_file(bpmn) + parser.add_dmn_file(dmn) + self.expectedResult = {'inputvar': 1, 'pi': {'test': {'me': 'yup it worked'}, 'test2': {'other': 'yes'}}} + self.spec = parser.get_spec('start') + self.workflow = BpmnWorkflow(self.spec) + + def testConstructor(self): + pass # this is accomplished through setup. + + def testDmnHappy(self): + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + x = self.workflow.get_ready_user_tasks() + self.workflow.complete_task_from_id(x[0].id) + self.workflow.do_engine_steps() + self.assertDictEqual(self.workflow.last_task.data, self.expectedResult) + + def testDmnSaveRestore(self): + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + self.save_restore() + x = self.workflow.get_ready_user_tasks() + self.workflow.complete_task_from_id(x[0].id) + self.workflow.do_engine_steps() + self.save_restore() + self.assertDictEqual(self.workflow.last_task.data, self.expectedResult) + + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(DMNDictTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/DateDecisionTest.py b/tests/SpiffWorkflow/dmn/DateDecisionTest.py new file mode 100644 index 000000000..75f4e6ddd --- /dev/null +++ b/tests/SpiffWorkflow/dmn/DateDecisionTest.py @@ -0,0 +1,41 @@ +import unittest +from datetime import datetime + +from SpiffWorkflow.dmn.parser.DMNParser import DMNParser +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner + + +class DateDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = DecisionRunner('date_decision.dmn', debug='DEBUG') + + def test_date_decision_string_output1(self): + res = self.runner.decide(datetime.strptime('2017-11-01T10:00:00', DMNParser.DT_FORMAT)) + self.assertEqual(res.description, '111 Row Annotation') + + def test_date_decision_string_output2(self): + res = self.runner.decide(datetime.strptime('2017-11-03T00:00:00', DMNParser.DT_FORMAT)) + self.assertEqual(res.description, '311 Row Annotation') + + def test_date_decision_string_output3(self): + res = self.runner.decide(datetime.strptime('2017-11-02T00:00:00', DMNParser.DT_FORMAT)) + self.assertEqual(res.description, '<3.11 Row Annotation') + + def test_date_decision_string_output4(self): + res = self.runner.decide(datetime.strptime('2017-11-04T00:00:00', DMNParser.DT_FORMAT)) + self.assertEqual(res.description, '>3.11 Row Annotation') + + def test_date_decision_string_output5(self): + res = self.runner.decide(datetime.strptime('2017-11-13T12:00:00', DMNParser.DT_FORMAT)) + self.assertEqual(res.description, '>13.11<14.11 Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(DateDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/DecisionRunner.py b/tests/SpiffWorkflow/dmn/DecisionRunner.py new file mode 100644 index 000000000..6b8c0bbb0 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/DecisionRunner.py @@ -0,0 +1,32 @@ +import os + +from lxml import etree + +from SpiffWorkflow.bpmn.PythonScriptEngine import PythonScriptEngine +from SpiffWorkflow.dmn.engine.DMNEngine import DMNEngine +from SpiffWorkflow.dmn.parser.DMNParser import DMNParser + + +class DecisionRunner: + def __init__(self, path, debug=None): + self.path = os.path.join(os.path.dirname(__file__), + 'data', + path) + + f = open(self.path, 'r') + try: + node = etree.parse(f) + finally: + f.close() + self.dmnParser = DMNParser(None, node.getroot()) + self.dmnParser.parse() + + decision = self.dmnParser.decision + assert len(decision.decisionTables) == 1, \ + 'Exactly one decision table should exist! (%s)' \ + % (len(decision.decisionTables)) + + self.dmnEngine = DMNEngine(decision.decisionTables[0], debug=debug) + + def decide(self, *inputArgs, **inputKwargs): + return self.dmnEngine.decide(*inputArgs, **inputKwargs) diff --git a/tests/SpiffWorkflow/dmn/DictDecisionTest.py b/tests/SpiffWorkflow/dmn/DictDecisionTest.py new file mode 100644 index 000000000..4bd7dd259 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/DictDecisionTest.py @@ -0,0 +1,36 @@ +import unittest + +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner + + +class DictDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = DecisionRunner('dict_decision.dmn', debug='DEBUG') + + def test_string_decision_string_output1(self): + data = {"allergies": { + "PEANUTS": {"delicious": True}, + "SPAM": {"delicious": False} + }} + res = self.runner.decide(data) + self.assertEqual(res.description, 'They are allergic to peanuts') + + def test_string_decision_string_output2(self): + data = {"allergies": { + "SpAm": {"delicious": False}, + "SPAM": {"delicious": False} + }} + res = self.runner.decide(data) + self.assertEqual(res.description, 'They are not allergic to peanuts') + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(DictDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/DictDotNotationDecisionTest.py b/tests/SpiffWorkflow/dmn/DictDotNotationDecisionTest.py new file mode 100644 index 000000000..406b13f6a --- /dev/null +++ b/tests/SpiffWorkflow/dmn/DictDotNotationDecisionTest.py @@ -0,0 +1,47 @@ +import unittest + +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner +from SpiffWorkflow.bpmn.PythonScriptEngine import Box + + +class DictDotNotationDecisionTestClass(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.runner = DecisionRunner('dict_dot_notation_decision.dmn', debug='DEBUG') + + def test_string_decision_string_output1(self): + data = {"foods": { + "spam": {"delicious": False} + }} + data = Box(data) + res = self.runner.decide(data) + self.assertEqual(res.description, 'This person has a tongue, brain ' + 'or sense of smell.') + + data = Box({"foods": { + "spam": {"delicious": False} + }}) + def test_string_decision_string_output2(self): + data = {"foods": { + "spam": {"delicious": True} + }} + res = self.runner.decide(Box(data)) + self.assertEqual(res.description, 'This person is lacking many ' + 'critical decision making skills, ' + 'or is a viking.') + + def test_string_decision_with_kwargs(self): + data = {"foods": { + "spam": {"delicious": False} + }} + res = self.runner.decide({}, **Box(data)) + self.assertEqual(res.description, 'This person has a tongue, brain ' + 'or sense of smell.') + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(DictDotNotationDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/DictDotNotationDecisionWeirdCharactersTest.py b/tests/SpiffWorkflow/dmn/DictDotNotationDecisionWeirdCharactersTest.py new file mode 100644 index 000000000..4f8fc767a --- /dev/null +++ b/tests/SpiffWorkflow/dmn/DictDotNotationDecisionWeirdCharactersTest.py @@ -0,0 +1,30 @@ +import unittest + +from SpiffWorkflow.bpmn.PythonScriptEngine import Box +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner + + +class DictDotNotationDecisionWeirdCharactersTestClass(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.runner = DecisionRunner('dict_dot_notation_decision_weird_characters.dmn', debug='DEBUG') + + def test_string_decision_string_output1(self): + data = {"odd_foods": { + "SPAM_LIKE": {"delicious": False} + }} + res = self.runner.decide(Box(data)) + self.assertEqual(res.description, 'This person has a tongue, brain ' + 'or sense of smell.') + + data = {"foods": { + "spam": {"delicious": False} + }} + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase( + DictDotNotationDecisionWeirdCharactersTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/Dmn20151101VersionTest.py b/tests/SpiffWorkflow/dmn/Dmn20151101VersionTest.py new file mode 100644 index 000000000..68be28b54 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/Dmn20151101VersionTest.py @@ -0,0 +1,28 @@ +import os +import unittest + +from SpiffWorkflow import Task + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +class DmnVersionTest(BpmnWorkflowTestCase): + PARSER_CLASS = BpmnDmnParser + + def setUp(self): + self.parser = BpmnDmnParser() + + def testLoad(self): + dmn = os.path.join(os.path.dirname(__file__), 'data', + 'dmn_version_20191111_test.dmn') + self.assertIsNone(self.parser.add_dmn_file(dmn)) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(DmnVersionTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/Dmn20191111VersionTest.py b/tests/SpiffWorkflow/dmn/Dmn20191111VersionTest.py new file mode 100644 index 000000000..68be28b54 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/Dmn20191111VersionTest.py @@ -0,0 +1,28 @@ +import os +import unittest + +from SpiffWorkflow import Task + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +class DmnVersionTest(BpmnWorkflowTestCase): + PARSER_CLASS = BpmnDmnParser + + def setUp(self): + self.parser = BpmnDmnParser() + + def testLoad(self): + dmn = os.path.join(os.path.dirname(__file__), 'data', + 'dmn_version_20191111_test.dmn') + self.assertIsNone(self.parser.add_dmn_file(dmn)) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(DmnVersionTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/DmnFailVersionTest.py b/tests/SpiffWorkflow/dmn/DmnFailVersionTest.py new file mode 100644 index 000000000..a5412b6e7 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/DmnFailVersionTest.py @@ -0,0 +1,28 @@ +import os +import unittest + +from SpiffWorkflow import Task + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +class DmnVersionTest(BpmnWorkflowTestCase): + PARSER_CLASS = BpmnDmnParser + + def setUp(self): + self.parser = BpmnDmnParser() + + def testLoad(self): + dmn = os.path.join(os.path.dirname(__file__), 'data', + 'dmn_version_fail_test.dmn') + with self.assertRaises(IndexError): + self.parser.add_dmn_file(dmn) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(DmnVersionTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/FeelBoolDecisionTest.py b/tests/SpiffWorkflow/dmn/FeelBoolDecisionTest.py new file mode 100644 index 000000000..30f3da04c --- /dev/null +++ b/tests/SpiffWorkflow/dmn/FeelBoolDecisionTest.py @@ -0,0 +1,31 @@ +import unittest + +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner + + +class FeelBoolDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = DecisionRunner('bool_decision_feel.dmn', debug='DEBUG') + + def test_bool_decision_string_output1(self): + res = self.runner.decide(True) + self.assertEqual(res.description, 'Y Row Annotation') + + def test_bool_decision_string_output2(self): + res = self.runner.decide(False) + self.assertEqual(res.description, 'N Row Annotation') + + def test_bool_decision_string_output3(self): + res = self.runner.decide(None) + self.assertEqual(res.description, 'ELSE Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelBoolDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/FeelBusinessRuleTaskParserTest.py b/tests/SpiffWorkflow/dmn/FeelBusinessRuleTaskParserTest.py new file mode 100644 index 000000000..24586cd59 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/FeelBusinessRuleTaskParserTest.py @@ -0,0 +1,52 @@ +import os +import unittest + +from SpiffWorkflow import Task + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +class FeelBusinessRuleTaskParserTest(BpmnWorkflowTestCase): + PARSER_CLASS = BpmnDmnParser + + def setUp(self): + parser = BpmnDmnParser() + bpmn = os.path.join(os.path.dirname(__file__), 'data', 'BpmnDmn', + 'ExclusiveGatewayIfElseAndDecision.bpmn') + dmn = os.path.join(os.path.dirname(__file__), 'data', 'BpmnDmn', + 'test_integer_decision_feel.dmn') + parser.add_bpmn_file(bpmn) + parser.add_dmn_file(dmn) + self.spec = parser.get_spec('Process_1') + self.workflow = BpmnWorkflow(self.spec) + + def testConstructor(self): + pass # this is accomplished through setup. + + def testDmnHappy(self): + self.workflow = BpmnWorkflow(self.spec) + self.workflow.get_tasks(Task.READY)[0].set_data(x=3) + self.workflow.do_engine_steps() + self.assertDictEqual(self.workflow.data, {'x': 3, 'y': 'A'}) + self.assertDictEqual(self.workflow.last_task.data, {'x': 3, 'y': 'A'}) + + def testDmnSaveRestore(self): + self.workflow = BpmnWorkflow(self.spec) + self.workflow.get_tasks(Task.READY)[0].set_data(x=3) + self.save_restore() + self.workflow.do_engine_steps() + self.save_restore() + self.assertDictEqual(self.workflow.data, {'x': 3, 'y': 'A'}) + self.assertDictEqual(self.workflow.last_task.data, {'x': 3, 'y': 'A'}) + + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelBusinessRuleTaskParserTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/FeelDateDecisionTest.py b/tests/SpiffWorkflow/dmn/FeelDateDecisionTest.py new file mode 100644 index 000000000..a57eff81b --- /dev/null +++ b/tests/SpiffWorkflow/dmn/FeelDateDecisionTest.py @@ -0,0 +1,41 @@ +import unittest +from datetime import datetime + +from SpiffWorkflow.dmn.parser.DMNParser import DMNParser +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner + + +class FeelDateDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = DecisionRunner('date_decision_feel.dmn', debug='DEBUG') + + def test_date_decision_string_output1(self): + res = self.runner.decide(datetime.strptime('2017-11-01T10:00:00', DMNParser.DT_FORMAT)) + self.assertEqual(res.description, '111 Row Annotation') + + def test_date_decision_string_output2(self): + res = self.runner.decide(datetime.strptime('2017-11-03T00:00:00', DMNParser.DT_FORMAT)) + self.assertEqual(res.description, '311 Row Annotation') + + def test_date_decision_string_output3(self): + res = self.runner.decide(datetime.strptime('2017-11-02T00:00:00', DMNParser.DT_FORMAT)) + self.assertEqual(res.description, '<3.11 Row Annotation') + + def test_date_decision_string_output4(self): + res = self.runner.decide(datetime.strptime('2017-11-04T00:00:00', DMNParser.DT_FORMAT)) + self.assertEqual(res.description, '>3.11 Row Annotation') + + def test_date_decision_string_output5(self): + res = self.runner.decide(datetime.strptime('2017-11-13T12:00:00', DMNParser.DT_FORMAT)) + self.assertEqual(res.description, '>13.11<14.11 Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelDateDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/FeelDictDecisionTest.py b/tests/SpiffWorkflow/dmn/FeelDictDecisionTest.py new file mode 100644 index 000000000..cb9746adf --- /dev/null +++ b/tests/SpiffWorkflow/dmn/FeelDictDecisionTest.py @@ -0,0 +1,38 @@ +import unittest + +from SpiffWorkflow.bpmn.PythonScriptEngine import PythonScriptEngine +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner + + +class FeelDictDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = DecisionRunner('dict_decision_feel.dmn', debug='DEBUG') + + def test_string_decision_string_output1(self): + data = {"allergies": { + "PEANUTS": {"delicious": True}, + "SPAM": {"delicious": False} + }} + PythonScriptEngine.convertToBox(PythonScriptEngine(),data) + res = self.runner.decide(data) + self.assertEqual(res.description, 'They are allergic to peanuts') + + def test_string_decision_string_output2(self): + data = {"allergies": { + "SpAm": {"delicious": False}, + "SPAM": {"delicious": False} + }} + res = self.runner.decide(data) + self.assertEqual(res.description, 'They are not allergic to peanuts') + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelDictDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/FeelDictDotNotationDecisionTest.py b/tests/SpiffWorkflow/dmn/FeelDictDotNotationDecisionTest.py new file mode 100644 index 000000000..69671f669 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/FeelDictDotNotationDecisionTest.py @@ -0,0 +1,46 @@ +import unittest + +from SpiffWorkflow.bpmn.PythonScriptEngine import Box +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner + + +class FeelDictDotNotationDecisionTestClass(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.runner = DecisionRunner('dict_dot_notation_decision_feel.dmn', debug='DEBUG') + + def test_string_decision_string_output1(self): + data = {"foods": { + "spam": {"delicious": False} + }} + res = self.runner.decide(Box(data)) + self.assertEqual(res.description, 'This person has a tongue, brain ' + 'or sense of smell.') + + data = {"foods": { + "spam": {"delicious": False} + }} + def test_string_decision_string_output2(self): + data = {"foods": { + "spam": {"delicious": True} + }} + res = self.runner.decide(Box(data)) + self.assertEqual(res.description, 'This person is lacking many ' + 'critical decision making skills, ' + 'or is a viking.') + + def test_string_decision_with_kwargs(self): + data = {"foods": { + "spam": {"delicious": False} + }} + res = self.runner.decide({}, **Box(data)) + self.assertEqual(res.description, 'This person has a tongue, brain ' + 'or sense of smell.') + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelDictDotNotationDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/FeelIntegerDecisionComparisonTest.py b/tests/SpiffWorkflow/dmn/FeelIntegerDecisionComparisonTest.py new file mode 100644 index 000000000..656099bc9 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/FeelIntegerDecisionComparisonTest.py @@ -0,0 +1,31 @@ +import unittest + +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner + + +class FeelIntegerDecisionComparisonTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = DecisionRunner('integer_decision_comparison_feel.dmn', debug='DEBUG') + + def test_integer_decision_string_output1(self): + res = self.runner.decide(30) + self.assertEqual(res.description, '30 Row Annotation') + + def test_integer_decision_string_output2(self): + res = self.runner.decide(24) + self.assertEqual(res.description, 'L Row Annotation') + + def test_integer_decision_string_output3(self): + res = self.runner.decide(25) + self.assertEqual(res.description, 'H Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelIntegerDecisionComparisonTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/FeelIntegerDecisionRangeTest.py b/tests/SpiffWorkflow/dmn/FeelIntegerDecisionRangeTest.py new file mode 100644 index 000000000..1ed6459f5 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/FeelIntegerDecisionRangeTest.py @@ -0,0 +1,75 @@ +import unittest + +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner + + +class FeelIntegerDecisionRangeTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + def test_integer_decision_string_output_inclusive(self): + runner = DecisionRunner('integer_decision_range_inclusive_feel.dmn', debug='DEBUG') + + res = runner.decide(100) + self.assertEqual(res.description, '100-110 Inclusive Annotation') + + res = runner.decide(99) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide(110) + self.assertEqual(res.description, '100-110 Inclusive Annotation') + + res = runner.decide(111) + self.assertEqual(res.description, 'ELSE Row Annotation') + + def test_integer_decision_string_output_exclusive(self): + runner = DecisionRunner('integer_decision_range_exclusive_feel.dmn', debug='DEBUG') + + res = runner.decide(100) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide(101) + self.assertEqual(res.description, '100-110 Exclusive Annotation') + + res = runner.decide(110) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide(109) + self.assertEqual(res.description, '100-110 Exclusive Annotation') + + def test_integer_decision_string_output_excl_inclusive(self): + runner = DecisionRunner('integer_decision_range_excl_inclusive_feel.dmn', debug='DEBUG') + + res = runner.decide(100) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide(101) + self.assertEqual(res.description, '100-110 ExclInclusive Annotation') + + res = runner.decide(110) + self.assertEqual(res.description, '100-110 ExclInclusive Annotation') + + res = runner.decide(111) + self.assertEqual(res.description, 'ELSE Row Annotation') + + def test_integer_decision_string_output_incl_exclusive(self): + runner = DecisionRunner('integer_decision_range_incl_exclusive_feel.dmn', debug='DEBUG') + + res = runner.decide(100) + self.assertEqual(res.description, '100-110 InclExclusive Annotation') + + res = runner.decide(99) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide(110) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide(109) + self.assertEqual(res.description, '100-110 InclExclusive Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelIntegerDecisionRangeTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/FeelKwargsParameterTest.py b/tests/SpiffWorkflow/dmn/FeelKwargsParameterTest.py new file mode 100644 index 000000000..6e2444031 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/FeelKwargsParameterTest.py @@ -0,0 +1,23 @@ +import unittest + +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner + + +class FeelStringDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = DecisionRunner('kwargs_parameter_feel.dmn', debug='DEBUG') + + def test_string_decision_string_output1(self): + res = self.runner.decide(Gender='m') + self.assertEqual(res.description, 'm Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelStringDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/FeelListDecisionTest.py b/tests/SpiffWorkflow/dmn/FeelListDecisionTest.py new file mode 100644 index 000000000..a97dec8c3 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/FeelListDecisionTest.py @@ -0,0 +1,28 @@ +import unittest + +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner + + +class FeelListDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = DecisionRunner('list_decision_feel.dmn', debug='DEBUG') + + def test_string_decision_string_output1(self): + res = self.runner.decide(["PEANUTS", "SPAM"]) + self.assertEqual(res.description, 'They are allergic to peanuts') + + def test_string_decision_string_output1(self): + res = self.runner.decide(["SPAM", "SPAM"]) + self.assertEqual(res.description, 'They are not allergic to peanuts') + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelListDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/FeelLongDoubleComparisonTest.py b/tests/SpiffWorkflow/dmn/FeelLongDoubleComparisonTest.py new file mode 100644 index 000000000..00b252c6b --- /dev/null +++ b/tests/SpiffWorkflow/dmn/FeelLongDoubleComparisonTest.py @@ -0,0 +1,33 @@ +import unittest + +from decimal import Decimal + +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner + + +class FeelLongOrDoubleDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = DecisionRunner('long_or_double_decision_comparison_feel.dmn', debug='DEBUG') + + def test_long_or_double_decision_string_output1(self): + res = self.runner.decide(Decimal('30.5')) + self.assertEqual(res.description, '30.5 Row Annotation') + + def test_long_or_double_decision_stringz_output2(self): + res = self.runner.decide(Decimal('25.3')) + self.assertEqual(res.description, 'L Row Annotation') + + def test_long_or_double_decision_string_output3(self): + res = self.runner.decide(Decimal('25.4')) + self.assertEqual(res.description, 'H Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelLongOrDoubleDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/FeelLongOrDoubleRangeTest.py b/tests/SpiffWorkflow/dmn/FeelLongOrDoubleRangeTest.py new file mode 100644 index 000000000..a2f85eca1 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/FeelLongOrDoubleRangeTest.py @@ -0,0 +1,77 @@ +import unittest + +from decimal import Decimal + +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner + + +class FeelLongOrDoubleDecisionRangeTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + def test_long_or_double_decision_string_output_inclusive(self): + runner = DecisionRunner('long_or_double_decision_range_inclusive_feel.dmn', debug='DEBUG') + + res = runner.decide(Decimal('100.05')) + self.assertEqual(res.description, '100.05-110.05 Inclusive Annotation') + + res = runner.decide(Decimal('99')) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide(Decimal('110.05')) + self.assertEqual(res.description, '100.05-110.05 Inclusive Annotation') + + res = runner.decide(Decimal('111')) + self.assertEqual(res.description, 'ELSE Row Annotation') + + def test_long_or_double_decision_string_output_exclusive(self): + runner = DecisionRunner('long_or_double_decision_range_exclusive_feel.dmn', debug='DEBUG') + + res = runner.decide(Decimal('100.05')) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide(Decimal('101')) + self.assertEqual(res.description, '100.05-110.05 Exclusive Annotation') + + res = runner.decide(Decimal('110.05')) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide(Decimal('109')) + self.assertEqual(res.description, '100.05-110.05 Exclusive Annotation') + + def test_long_or_double_decision_string_output_excl_inclusive(self): + runner = DecisionRunner('long_or_double_decision_range_excl_inclusive_feel.dmn', debug='DEBUG') + + res = runner.decide(Decimal('100.05')) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide(Decimal('101')) + self.assertEqual(res.description, '100.05-110.05 ExclInclusive Annotation') + + res = runner.decide(Decimal('110.05')) + self.assertEqual(res.description, '100.05-110.05 ExclInclusive Annotation') + + res = runner.decide(Decimal('111')) + self.assertEqual(res.description, 'ELSE Row Annotation') + + def test_long_or_double_decision_string_output_incl_exclusive(self): + runner = DecisionRunner('long_or_double_decision_range_incl_exclusive_feel.dmn', debug='DEBUG') + + res = runner.decide(Decimal('100.05')) + self.assertEqual(res.description, '100.05-110.05 InclExclusive Annotation') + + res = runner.decide(Decimal('99')) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide(Decimal('110.05')) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide(Decimal('109')) + self.assertEqual(res.description, '100.05-110.05 InclExclusive Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelLongOrDoubleDecisionRangeTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/FeelNearMissNameTest.py b/tests/SpiffWorkflow/dmn/FeelNearMissNameTest.py new file mode 100644 index 000000000..45a2bc408 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/FeelNearMissNameTest.py @@ -0,0 +1,53 @@ +import unittest + +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner + + +class FeelNearMissTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.data = { + "Exclusive": [ + { + "ExclusiveSpaceRoomID": "121", + } + ], + "eXclusive": [ + { + "ExclusiveSpaceRoomID": "121", + } + ], + "EXCLUSIVE": [ + { + "ExclusiveSpaceRoomID": "121", + } + ], + "personnel": [ + { + "PersonnelType": "Faculty", + "label": "Steven K Funkhouser (sf4d)", + "value": "sf4d" + } + ], + + "shared": [] + } + + cls.runner = DecisionRunner('exclusive_feel.dmn', debug='DEBUG') + + def test_string_decision_string_output1(self): + self.assertRaisesRegex(NameError, + ".+\['Exclusive', 'eXclusive', 'EXCLUSIVE'\]\?", + self.runner.decide, + **self.data) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelNearMissTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/FeelStringDecisionTest.py b/tests/SpiffWorkflow/dmn/FeelStringDecisionTest.py new file mode 100644 index 000000000..42204b2aa --- /dev/null +++ b/tests/SpiffWorkflow/dmn/FeelStringDecisionTest.py @@ -0,0 +1,35 @@ +import unittest + +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner + + +class FeelStringDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = DecisionRunner('string_decision_feel.dmn', debug='DEBUG') + + def test_string_decision_string_output1(self): + res = self.runner.decide('m') + self.assertEqual(res.description, 'm Row Annotation') + + def test_string_decision_string_output2(self): + res = self.runner.decide('f') + self.assertEqual(res.description, 'f Row Annotation') + + def test_string_decision_string_output3(self): + res = self.runner.decide('y') + self.assertEqual(res.description, 'NOT x Row Annotation') + + def test_string_decision_string_output4(self): + res = self.runner.decide('x') + self.assertEqual(res.description, 'ELSE Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelStringDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/FeelStringIntegerDecisionTest.py b/tests/SpiffWorkflow/dmn/FeelStringIntegerDecisionTest.py new file mode 100644 index 000000000..17fe5b40a --- /dev/null +++ b/tests/SpiffWorkflow/dmn/FeelStringIntegerDecisionTest.py @@ -0,0 +1,39 @@ +import unittest + +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner + + +class FeelStringIntegerDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = DecisionRunner('string_integer_decision_feel.dmn', debug='DEBUG') + + def test_string_integer_decision_string_output1(self): + res = self.runner.decide('m', 30) + self.assertEqual(res.description, 'm30 Row Annotation') + + def test_string_integer_decision_string_output2(self): + res = self.runner.decide('m', 24) + self.assertEqual(res.description, 'mL Row Annotation') + + def test_string_integer_decision_string_output3(self): + res = self.runner.decide('m', 25) + self.assertEqual(res.description, 'mH Row Annotation') + + def test_string_integer_decision_string_output4(self): + res = self.runner.decide('f', -1) + self.assertEqual(res.description, 'fL Row Annotation') + + def test_string_integer_decision_string_output5(self): + res = self.runner.decide('x', 0) + self.assertEqual(res.description, 'ELSE Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(FeelStringIntegerDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/IntegerDecisionComparisonTest.py b/tests/SpiffWorkflow/dmn/IntegerDecisionComparisonTest.py new file mode 100644 index 000000000..bcbe305be --- /dev/null +++ b/tests/SpiffWorkflow/dmn/IntegerDecisionComparisonTest.py @@ -0,0 +1,31 @@ +import unittest + +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner + + +class IntegerDecisionComparisonTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = DecisionRunner('integer_decision_comparison.dmn', debug='DEBUG') + + def test_integer_decision_string_output1(self): + res = self.runner.decide(30) + self.assertEqual(res.description, '30 Row Annotation') + + def test_integer_decision_string_output2(self): + res = self.runner.decide(24) + self.assertEqual(res.description, 'L Row Annotation') + + def test_integer_decision_string_output3(self): + res = self.runner.decide(25) + self.assertEqual(res.description, 'H Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(IntegerDecisionComparisonTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/IntegerDecisionRangeTest.py b/tests/SpiffWorkflow/dmn/IntegerDecisionRangeTest.py new file mode 100644 index 000000000..2c1126f24 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/IntegerDecisionRangeTest.py @@ -0,0 +1,75 @@ +import unittest + +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner + + +class IntegerDecisionRangeTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + def test_integer_decision_string_output_inclusive(self): + runner = DecisionRunner('integer_decision_range_inclusive.dmn', debug='DEBUG') + + res = runner.decide(100) + self.assertEqual(res.description, '100-110 Inclusive Annotation') + + res = runner.decide(99) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide(110) + self.assertEqual(res.description, '100-110 Inclusive Annotation') + + res = runner.decide(111) + self.assertEqual(res.description, 'ELSE Row Annotation') + + def test_integer_decision_string_output_exclusive(self): + runner = DecisionRunner('integer_decision_range_exclusive.dmn', debug='DEBUG') + + res = runner.decide(100) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide(101) + self.assertEqual(res.description, '100-110 Exclusive Annotation') + + res = runner.decide(110) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide(109) + self.assertEqual(res.description, '100-110 Exclusive Annotation') + + def test_integer_decision_string_output_excl_inclusive(self): + runner = DecisionRunner('integer_decision_range_excl_inclusive.dmn', debug='DEBUG') + + res = runner.decide(100) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide(101) + self.assertEqual(res.description, '100-110 ExclInclusive Annotation') + + res = runner.decide(110) + self.assertEqual(res.description, '100-110 ExclInclusive Annotation') + + res = runner.decide(111) + self.assertEqual(res.description, 'ELSE Row Annotation') + + def test_integer_decision_string_output_incl_exclusive(self): + runner = DecisionRunner('integer_decision_range_incl_exclusive.dmn', debug='DEBUG') + + res = runner.decide(100) + self.assertEqual(res.description, '100-110 InclExclusive Annotation') + + res = runner.decide(99) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide(110) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide(109) + self.assertEqual(res.description, '100-110 InclExclusive Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(IntegerDecisionRangeTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/InvalidBusinessRuleTaskParserTest.py b/tests/SpiffWorkflow/dmn/InvalidBusinessRuleTaskParserTest.py new file mode 100644 index 000000000..2657a7800 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/InvalidBusinessRuleTaskParserTest.py @@ -0,0 +1,51 @@ +import os +import unittest + +from SpiffWorkflow.exceptions import WorkflowTaskExecException + +from SpiffWorkflow import Task, WorkflowException + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +class BusinessRuleTaskParserTest(BpmnWorkflowTestCase): + PARSER_CLASS = BpmnDmnParser + + def setUp(self): + parser = BpmnDmnParser() + bpmn = os.path.join(os.path.dirname(__file__), 'data', 'InvalidBpmnDmn', + 'InvalidDecision.bpmn') + dmn = os.path.join(os.path.dirname(__file__), 'data', 'InvalidBpmnDmn', + 'invalid_decision.dmn') + parser.add_bpmn_file(bpmn) + parser.add_dmn_file(dmn) + self.spec = parser.get_spec('Process_1') + self.workflow = BpmnWorkflow(self.spec) + + def testConstructor(self): + pass # this is accomplished through setup. + + def testDmnRaisesTaskErrors(self): + self.workflow = BpmnWorkflow(self.spec) + self.workflow.get_tasks(Task.READY)[0].set_data(x=3) + try: + self.workflow.do_engine_steps() + self.assertTrue(False, "An error should have been raised.") + except WorkflowTaskExecException as we: + self.assertTrue(True, "An error was raised..") + self.assertEquals("InvalidDecisionTaskId", we.sender.name) + self.maxDiff = 1000 + self.assertEquals("InvalidDecisionTaskId: Failed to execute " + "expression: 'spam' is '= 1' in the Row with " + "annotation 'This is complletely wrong.'" + ", invalid syntax (, line 1)", + str(we)) + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(BusinessRuleTaskParserTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/KwargsParameterTest.py b/tests/SpiffWorkflow/dmn/KwargsParameterTest.py new file mode 100644 index 000000000..0ed94fbb5 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/KwargsParameterTest.py @@ -0,0 +1,23 @@ +import unittest + +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner + + +class StringDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = DecisionRunner('kwargs_parameter.dmn', debug='DEBUG') + + def test_string_decision_string_output1(self): + res = self.runner.decide(Gender='m') + self.assertEqual(res.description, 'm Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(StringDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/ListDecisionTest.py b/tests/SpiffWorkflow/dmn/ListDecisionTest.py new file mode 100644 index 000000000..2cc0bcb8e --- /dev/null +++ b/tests/SpiffWorkflow/dmn/ListDecisionTest.py @@ -0,0 +1,28 @@ +import unittest + +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner + + +class ListDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = DecisionRunner('list_decision.dmn', debug='DEBUG') + + def test_string_decision_string_output1(self): + res = self.runner.decide(["PEANUTS", "SPAM"]) + self.assertEqual(res.description, 'They are allergic to peanuts') + + def test_string_decision_string_output1(self): + res = self.runner.decide(["SPAM", "SPAM"]) + self.assertEqual(res.description, 'They are not allergic to peanuts') + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(ListDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/LongDoubleComparisonTest.py b/tests/SpiffWorkflow/dmn/LongDoubleComparisonTest.py new file mode 100644 index 000000000..3d02cbe63 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/LongDoubleComparisonTest.py @@ -0,0 +1,33 @@ +import unittest + +from decimal import Decimal + +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner + + +class LongOrDoubleDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = DecisionRunner('long_or_double_decision_comparison.dmn', debug='DEBUG') + + def test_long_or_double_decision_string_output1(self): + res = self.runner.decide(Decimal('30.5')) + self.assertEqual(res.description, '30.5 Row Annotation') + + def test_long_or_double_decision_string_output2(self): + res = self.runner.decide(Decimal('25.3')) + self.assertEqual(res.description, 'L Row Annotation') + + def test_long_or_double_decision_string_output3(self): + res = self.runner.decide(Decimal('25.4')) + self.assertEqual(res.description, 'H Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(LongOrDoubleDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/LongOrDoubleRangeTest.py b/tests/SpiffWorkflow/dmn/LongOrDoubleRangeTest.py new file mode 100644 index 000000000..fdeac13d6 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/LongOrDoubleRangeTest.py @@ -0,0 +1,77 @@ +import unittest + +from decimal import Decimal + +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner + + +class LongOrDoubleDecisionRangeTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + def test_long_or_double_decision_string_output_inclusive(self): + runner = DecisionRunner('long_or_double_decision_range_inclusive.dmn', debug='DEBUG') + + res = runner.decide(Decimal('100.05')) + self.assertEqual(res.description, '100.05-110.05 Inclusive Annotation') + + res = runner.decide(Decimal('99')) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide(Decimal('110.05')) + self.assertEqual(res.description, '100.05-110.05 Inclusive Annotation') + + res = runner.decide(Decimal('111')) + self.assertEqual(res.description, 'ELSE Row Annotation') + + def test_long_or_double_decision_string_output_exclusive(self): + runner = DecisionRunner('long_or_double_decision_range_exclusive.dmn', debug='DEBUG') + + res = runner.decide(Decimal('100.05')) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide(Decimal('101')) + self.assertEqual(res.description, '100.05-110.05 Exclusive Annotation') + + res = runner.decide(Decimal('110.05')) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide(Decimal('109')) + self.assertEqual(res.description, '100.05-110.05 Exclusive Annotation') + + def test_long_or_double_decision_string_output_excl_inclusive(self): + runner = DecisionRunner('long_or_double_decision_range_excl_inclusive.dmn', debug='DEBUG') + + res = runner.decide(Decimal('100.05')) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide(Decimal('101')) + self.assertEqual(res.description, '100.05-110.05 ExclInclusive Annotation') + + res = runner.decide(Decimal('110.05')) + self.assertEqual(res.description, '100.05-110.05 ExclInclusive Annotation') + + res = runner.decide(Decimal('111')) + self.assertEqual(res.description, 'ELSE Row Annotation') + + def test_long_or_double_decision_string_output_incl_exclusive(self): + runner = DecisionRunner('long_or_double_decision_range_incl_exclusive.dmn', debug='DEBUG') + + res = runner.decide(Decimal('100.05')) + self.assertEqual(res.description, '100.05-110.05 InclExclusive Annotation') + + res = runner.decide(Decimal('99')) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide(Decimal('110.05')) + self.assertEqual(res.description, 'ELSE Row Annotation') + + res = runner.decide(Decimal('109')) + self.assertEqual(res.description, '100.05-110.05 InclExclusive Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(LongOrDoubleDecisionRangeTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/MultiInstanceDMNTest.py b/tests/SpiffWorkflow/dmn/MultiInstanceDMNTest.py new file mode 100644 index 000000000..6f86288c4 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/MultiInstanceDMNTest.py @@ -0,0 +1,60 @@ +import os +import unittest + +from box import Box + +from SpiffWorkflow import Task + +from SpiffWorkflow.bpmn.workflow import BpmnWorkflow + +from SpiffWorkflow.dmn.parser.BpmnDmnParser import BpmnDmnParser +from tests.SpiffWorkflow.bpmn.BpmnWorkflowTestCase import BpmnWorkflowTestCase + +class MultiInstanceDMNTest(BpmnWorkflowTestCase): + PARSER_CLASS = BpmnDmnParser + + def setUp(self): + parser = BpmnDmnParser() + bpmn = os.path.join(os.path.dirname(__file__), 'data', 'BpmnDmn', + 'DMNMultiInstance.bpmn') + dmn = os.path.join(os.path.dirname(__file__), 'data', 'BpmnDmn', + 'test_integer_decision_multi.dmn') + parser.add_bpmn_file(bpmn) + parser.add_dmn_file(dmn) + self.spec = parser.get_spec('Process_1') + self.workflow = BpmnWorkflow(self.spec) + + def testConstructor(self): + pass # this is accomplished through setup. + + def testDmnHappy(self): + self.workflow = BpmnWorkflow(self.spec) + self.workflow.do_engine_steps() + self.workflow.complete_next() + self.workflow.do_engine_steps() + self.workflow.complete_next() + self.workflow.do_engine_steps() + self.assertEqual(self.workflow.data['stuff']['E']['y'], 'D') + + + def testDmnSaveRestore(self): + self.workflow = BpmnWorkflow(self.spec) + self.save_restore() + self.workflow.do_engine_steps() + self.workflow.complete_next() + self.save_restore() + self.workflow.do_engine_steps() + self.workflow.complete_next() + self.save_restore() + self.workflow.do_engine_steps() + self.save_restore() + self.assertEqual(self.workflow.data['stuff']['E']['y'], 'D') + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(MultiInstanceDMNTest) + + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/NearMissNameTest.py b/tests/SpiffWorkflow/dmn/NearMissNameTest.py new file mode 100644 index 000000000..3841631fe --- /dev/null +++ b/tests/SpiffWorkflow/dmn/NearMissNameTest.py @@ -0,0 +1,53 @@ +import unittest + +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner + + +class NearMissTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.data = { + "Exclusive": [ + { + "ExclusiveSpaceRoomID": "121", + } + ], + "eXclusive": [ + { + "ExclusiveSpaceRoomID": "121", + } + ], + "EXCLUSIVE": [ + { + "ExclusiveSpaceRoomID": "121", + } + ], + "personnel": [ + { + "PersonnelType": "Faculty", + "label": "Steven K Funkhouser (sf4d)", + "value": "sf4d" + } + ], + + "shared": [] + } + + cls.runner = DecisionRunner('exclusive.dmn', debug='DEBUG') + + def test_string_decision_string_output1(self): + self.assertRaisesRegex(NameError, + ".+\['Exclusive', 'eXclusive', 'EXCLUSIVE'\]\?", + self.runner.decide, + **self.data) + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(NearMissTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/StringDecisionTest.py b/tests/SpiffWorkflow/dmn/StringDecisionTest.py new file mode 100644 index 000000000..92eb04b08 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/StringDecisionTest.py @@ -0,0 +1,35 @@ +import unittest + +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner + + +class StringDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = DecisionRunner('string_decision.dmn', debug='DEBUG') + + def test_string_decision_string_output1(self): + res = self.runner.decide('m') + self.assertEqual(res.description, 'm Row Annotation') + + def test_string_decision_string_output2(self): + res = self.runner.decide('f') + self.assertEqual(res.description, 'f Row Annotation') + + def test_string_decision_string_output3(self): + res = self.runner.decide('y') + self.assertEqual(res.description, 'NOT x Row Annotation') + + def test_string_decision_string_output4(self): + res = self.runner.decide('x') + self.assertEqual(res.description, 'ELSE Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(StringDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/StringIntegerDecisionTest.py b/tests/SpiffWorkflow/dmn/StringIntegerDecisionTest.py new file mode 100644 index 000000000..d67d8cffa --- /dev/null +++ b/tests/SpiffWorkflow/dmn/StringIntegerDecisionTest.py @@ -0,0 +1,39 @@ +import unittest + +from tests.SpiffWorkflow.dmn.DecisionRunner import DecisionRunner + + +class StringIntegerDecisionTestClass(unittest.TestCase): + """ + Doc: https://docs.camunda.org/manual/7.7/user-guide/dmn-engine/ + """ + + @classmethod + def setUpClass(cls): + cls.runner = DecisionRunner('string_integer_decision.dmn', debug='DEBUG') + + def test_string_integer_decision_string_output1(self): + res = self.runner.decide('m', 30) + self.assertEqual(res.description, 'm30 Row Annotation') + + def test_string_integer_decision_string_output2(self): + res = self.runner.decide('m', 24) + self.assertEqual(res.description, 'mL Row Annotation') + + def test_string_integer_decision_string_output3(self): + res = self.runner.decide('m', 25) + self.assertEqual(res.description, 'mH Row Annotation') + + def test_string_integer_decision_string_output4(self): + res = self.runner.decide('f', -1) + self.assertEqual(res.description, 'fL Row Annotation') + + def test_string_integer_decision_string_output5(self): + res = self.runner.decide('x', 0) + self.assertEqual(res.description, 'ELSE Row Annotation') + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(StringIntegerDecisionTestClass) + +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/dmn/__init__.py b/tests/SpiffWorkflow/dmn/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/SpiffWorkflow/dmn/data/BpmnDmn/DMNMultiInstance.bpmn b/tests/SpiffWorkflow/dmn/data/BpmnDmn/DMNMultiInstance.bpmn new file mode 100644 index 000000000..ddf2c44e5 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/BpmnDmn/DMNMultiInstance.bpmn @@ -0,0 +1,105 @@ + + + + + Flow_1b29lxw + + + Flow_0fusz9y + + + + Flow_0z7tfh1 + SequenceFlow_06fnqj2 + + + + + + Flow_066d5e1 + Flow_0fusz9y + print('EndScript') +print(stuff) + + + + This is a test +of documentation + Flow_1b29lxw + Flow_09ciw49 + stuff={'A': {'x': 3}, + 'B': {'x': 4}, + 'C': {'x': 5}, + 'D': {'x': 6}, + 'E': {'x': 7}} + + + + Flow_09ciw49 + Flow_0z7tfh1 + + + + SequenceFlow_06fnqj2 + Flow_066d5e1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/BpmnDmn/ExclusiveGatewayIfElseAndDecision.bpmn b/tests/SpiffWorkflow/dmn/data/BpmnDmn/ExclusiveGatewayIfElseAndDecision.bpmn new file mode 100644 index 000000000..82f761d26 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/BpmnDmn/ExclusiveGatewayIfElseAndDecision.bpmn @@ -0,0 +1,112 @@ + + + + + SequenceFlow_0b7whlk + + + SequenceFlow_15emspo + + + SequenceFlow_0b7whlk + SequenceFlow_15emspo + SequenceFlow_030p6mf + SequenceFlow_14jk7cm + + + SequenceFlow_030p6mf + + + + x==1 + + + x==2 + + + SequenceFlow_06fnqj2 + + + + + SequenceFlow_14jk7cm + SequenceFlow_06fnqj2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/BpmnDmn/dmndict.bpmn b/tests/SpiffWorkflow/dmn/data/BpmnDmn/dmndict.bpmn new file mode 100644 index 000000000..4778f57b3 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/BpmnDmn/dmndict.bpmn @@ -0,0 +1,64 @@ + + + + + Flow_0k348ph + + + + Flow_132mhgo + Flow_03rcoxc + pi = {'test':{'me':'stupid var'}} +inputvar = 1 + + + Flow_03rcoxc + Flow_0pvahf7 + + + + Flow_0pvahf7 + + + + + Flow_0k348ph + Flow_132mhgo + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/BpmnDmn/dmndict.dmn b/tests/SpiffWorkflow/dmn/data/BpmnDmn/dmndict.dmn new file mode 100644 index 000000000..2670ed14c --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/BpmnDmn/dmndict.dmn @@ -0,0 +1,61 @@ + + + + + + + inputvar + + + + + + + + + + + + 1 + + + + + + 'yup it worked' + + + "yes" + + + + + 2 + + + + + + 'didnt expect this' + + + "No" + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/BpmnDmn/test_integer_decision.dmn b/tests/SpiffWorkflow/dmn/data/BpmnDmn/test_integer_decision.dmn new file mode 100644 index 000000000..8f85db1b1 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/BpmnDmn/test_integer_decision.dmn @@ -0,0 +1,49 @@ + + + + + + + + + + + + A Annotation + + 3 + + + "A" + + + + B Annotation + + 4 + + + "B" + + + + C Annotation + + 5 + + + "C" + + + + D Annotation + + >= 6 + + + "D" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/BpmnDmn/test_integer_decision_feel.dmn b/tests/SpiffWorkflow/dmn/data/BpmnDmn/test_integer_decision_feel.dmn new file mode 100644 index 000000000..8f85db1b1 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/BpmnDmn/test_integer_decision_feel.dmn @@ -0,0 +1,49 @@ + + + + + + + + + + + + A Annotation + + 3 + + + "A" + + + + B Annotation + + 4 + + + "B" + + + + C Annotation + + 5 + + + "C" + + + + D Annotation + + >= 6 + + + "D" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/BpmnDmn/test_integer_decision_multi.dmn b/tests/SpiffWorkflow/dmn/data/BpmnDmn/test_integer_decision_multi.dmn new file mode 100644 index 000000000..7565b4c0e --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/BpmnDmn/test_integer_decision_multi.dmn @@ -0,0 +1,49 @@ + + + + + + + item.x + + + + + A Annotation + + 3 + + + "A" + + + + B Annotation + + 4 + + + "B" + + + + C Annotation + + 5 + + + "C" + + + + D Annotation + + >= 6 + + + "D" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/InvalidBpmnDmn/InvalidDecision.bpmn b/tests/SpiffWorkflow/dmn/data/InvalidBpmnDmn/InvalidDecision.bpmn new file mode 100644 index 000000000..56b4c1c1e --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/InvalidBpmnDmn/InvalidDecision.bpmn @@ -0,0 +1,112 @@ + + + + + SequenceFlow_0b7whlk + + + SequenceFlow_06fnqj2 + + + + SequenceFlow_14jk7cm + SequenceFlow_06fnqj2 + + + SequenceFlow_15emspo + + + SequenceFlow_030p6mf + + + SequenceFlow_0b7whlk + SequenceFlow_14jk7cm + SequenceFlow_15emspo + SequenceFlow_030p6mf + + + + x==1 + + + + x==2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/InvalidBpmnDmn/invalid_decision.dmn b/tests/SpiffWorkflow/dmn/data/InvalidBpmnDmn/invalid_decision.dmn new file mode 100644 index 000000000..cbafd50c1 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/InvalidBpmnDmn/invalid_decision.dmn @@ -0,0 +1,35 @@ + + + + + + + + + + spam + + + + + This is complletely wrong. + + mGender Description + = 1 + + + "wrong" + + + + so is this. + + >= 100 + + + "My cat's breath smells like cat food." + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/InvalidBpmnDmn/invalid_decision_feel.dmn b/tests/SpiffWorkflow/dmn/data/InvalidBpmnDmn/invalid_decision_feel.dmn new file mode 100644 index 000000000..cbafd50c1 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/InvalidBpmnDmn/invalid_decision_feel.dmn @@ -0,0 +1,35 @@ + + + + + + + + + + spam + + + + + This is complletely wrong. + + mGender Description + = 1 + + + "wrong" + + + + so is this. + + >= 100 + + + "My cat's breath smells like cat food." + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/__init__.py b/tests/SpiffWorkflow/dmn/data/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/SpiffWorkflow/dmn/data/bool_decision.dmn b/tests/SpiffWorkflow/dmn/data/bool_decision.dmn new file mode 100644 index 000000000..904393be7 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/bool_decision.dmn @@ -0,0 +1,38 @@ + + + + + + + + + + Y Row Annotation + + True + + + "Yesss" + + + + N Row Annotation + + False + + + "Noooo" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/bool_decision_feel.dmn b/tests/SpiffWorkflow/dmn/data/bool_decision_feel.dmn new file mode 100644 index 000000000..5a0d9f457 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/bool_decision_feel.dmn @@ -0,0 +1,32 @@ + + + + + + + + + + Y Row Annotation + true + + + + + + N Row Annotation + false + + + + + + ELSE Row Annotation + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/date_decision.dmn b/tests/SpiffWorkflow/dmn/data/date_decision.dmn new file mode 100644 index 000000000..04e0e7802 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/date_decision.dmn @@ -0,0 +1,56 @@ + + + + + + + + + + >13.11<14.11 Row Annotation + + datetime.datetime(2017,11,13) <= ? <= datetime.datetime(2017,11,14,23,59,59) + + + "between 13.11 and 14.11" + + + + 111 Row Annotation + + datetime.datetime(2017,11,1,10) + + + "01.11" + + + + 311 Row Annotation + + datetime.datetime(2017,11,3) + + + "03.11" + + + + <3.11 Row Annotation + + < datetime.datetime(2017,11,3) + + + "before 03.11" + + + + >3.11 Row Annotation + + > datetime.datetime(2017,11,3) + + + "after 03.11" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/date_decision_feel.dmn b/tests/SpiffWorkflow/dmn/data/date_decision_feel.dmn new file mode 100644 index 000000000..f8bac3d3c --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/date_decision_feel.dmn @@ -0,0 +1,46 @@ + + + + + + + + + + 13.11<14.11 Row Annotation]]> + + + + + + + 111 Row Annotation + + + + + + + 311 Row Annotation + + + + + + + + + + + + + + 3.11 Row Annotation]]> + date and time("2017-11-03T00:00:00")]]> + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/dict_decision.dmn b/tests/SpiffWorkflow/dmn/data/dict_decision.dmn new file mode 100644 index 000000000..4c26b6151 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/dict_decision.dmn @@ -0,0 +1,35 @@ + + + + + + + + + + allergies.keys() + + + + + They are allergic to peanuts + + mGender Description + "PEANUTS" in ? + + + "isPeanuts" + + + + They are not allergic to peanuts + + "PEANUTS" not in ? + + + "IsNotPeanuts" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/dict_decision_feel.dmn b/tests/SpiffWorkflow/dmn/data/dict_decision_feel.dmn new file mode 100644 index 000000000..22d550476 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/dict_decision_feel.dmn @@ -0,0 +1,35 @@ + + + + + + + + + + allergies.keys() + + + + + They are allergic to peanuts + + mGender Description + contains("PEANUTS") + + + "isPeanuts" + + + + They are not allergic to peanuts + + not contains("PEANUTS") + + + "IsNotPeanuts" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/dict_dot_notation_decision.dmn b/tests/SpiffWorkflow/dmn/data/dict_dot_notation_decision.dmn new file mode 100644 index 000000000..42e75fbf1 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/dict_dot_notation_decision.dmn @@ -0,0 +1,35 @@ + + + + + + + + + + foods.spam.delicious + + + + + This person is lacking many critical decision making skills, or is a viking. + + mGender Description + True + + + "wrong" + + + + This person has a tongue, brain or sense of smell. + + False + + + "correct, spam is not delicious" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/dict_dot_notation_decision_feel.dmn b/tests/SpiffWorkflow/dmn/data/dict_dot_notation_decision_feel.dmn new file mode 100644 index 000000000..81aca89f3 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/dict_dot_notation_decision_feel.dmn @@ -0,0 +1,35 @@ + + + + + + + + + + foods.spam.delicious + + + + + This person is lacking many critical decision making skills, or is a viking. + + mGender Description + True + + + "wrong" + + + + This person has a tongue, brain or sense of smell. + + False + + + "correct, spam is not delicious" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/dict_dot_notation_decision_weird_characters.dmn b/tests/SpiffWorkflow/dmn/data/dict_dot_notation_decision_weird_characters.dmn new file mode 100644 index 000000000..3ebb9dc4e --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/dict_dot_notation_decision_weird_characters.dmn @@ -0,0 +1,35 @@ + + + + + + + + + + odd_foods.SPAM_LIKE.delicious + + + + + This person is lacking many critical decision making skills, or is a viking. + + mGender Description + True + + + "wrong" + + + + This person has a tongue, brain or sense of smell. + + False + + + "correct, spam is not delicious" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/dict_dot_notation_decision_weird_characters_feel.dmn b/tests/SpiffWorkflow/dmn/data/dict_dot_notation_decision_weird_characters_feel.dmn new file mode 100644 index 000000000..c7a9602ec --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/dict_dot_notation_decision_weird_characters_feel.dmn @@ -0,0 +1,35 @@ + + + + + + + + + + odd_foods.SPAM_LIKE.delicious + + + + + This person is lacking many critical decision making skills, or is a viking. + + mGender Description + true + + + "wrong" + + + + This person has a tongue, brain or sense of smell. + + false + + + "correct, spam is not delicious" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/dmn_version_20151101_test.dmn b/tests/SpiffWorkflow/dmn/data/dmn_version_20151101_test.dmn new file mode 100644 index 000000000..c3ecf312e --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/dmn_version_20151101_test.dmn @@ -0,0 +1,32 @@ + + + + + + + + + + + + They are allergic to peanuts + + mGender Description + "PEANUTS" in ? + + + "isPeanuts" + + + + They are not allergic to peanuts + + "PEANUTS" not in ? + + + "IsNotPeanuts" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/dmn_version_20191111_test.dmn b/tests/SpiffWorkflow/dmn/data/dmn_version_20191111_test.dmn new file mode 100644 index 000000000..4d5c253c5 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/dmn_version_20191111_test.dmn @@ -0,0 +1,32 @@ + + + + + + + + + + + + They are allergic to peanuts + + mGender Description + "PEANUTS" in ? + + + "isPeanuts" + + + + They are not allergic to peanuts + + "PEANUTS" not in ? + + + "IsNotPeanuts" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/dmn_version_fail_test.dmn b/tests/SpiffWorkflow/dmn/data/dmn_version_fail_test.dmn new file mode 100644 index 000000000..6bcee64f5 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/dmn_version_fail_test.dmn @@ -0,0 +1,32 @@ + + + + + + + + + + + + They are allergic to peanuts + + mGender Description + "PEANUTS" in ? + + + "isPeanuts" + + + + They are not allergic to peanuts + + "PEANUTS" not in ? + + + "IsNotPeanuts" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/exclusive.dmn b/tests/SpiffWorkflow/dmn/data/exclusive.dmn new file mode 100644 index 000000000..95065d0e4 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/exclusive.dmn @@ -0,0 +1,31 @@ + + + + + + + sum([1 for x in exclusive if x.ExclusiveSpaceAMComputingID is None]) + + + + + No exclusive spaces without Area Monitor + + 0 + + + true + + + + More than one exclusive space without an Area Monitor + + > 0 + + + false + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/exclusive_feel.dmn b/tests/SpiffWorkflow/dmn/data/exclusive_feel.dmn new file mode 100644 index 000000000..902140306 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/exclusive_feel.dmn @@ -0,0 +1,31 @@ + + + + + + + sum([1 for x in exclusive if x.ExclusiveSpaceAMComputingID is None]) + + + + + No exclusive spaces without Area Monitor + + 0 + + + true + + + + More than one exclusive space without an Area Monitor + + > 0 + + + false + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/integer_decision_comparison.dmn b/tests/SpiffWorkflow/dmn/data/integer_decision_comparison.dmn new file mode 100644 index 000000000..6ef46d6af --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/integer_decision_comparison.dmn @@ -0,0 +1,39 @@ + + + + + + + + + + 30 Row Annotation + 30 + + + + + + L Row Annotation + + + + + + + H Row Annotation + = 25]]> + + + + + + ELSE Row Annotation + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/integer_decision_comparison_feel.dmn b/tests/SpiffWorkflow/dmn/data/integer_decision_comparison_feel.dmn new file mode 100644 index 000000000..60ab51d1d --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/integer_decision_comparison_feel.dmn @@ -0,0 +1,39 @@ + + + + + + + + + + 30 Row Annotation + 30 + + + + + + L Row Annotation + + + + + + + H Row Annotation + = 25]]> + + + + + + ELSE Row Annotation + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/integer_decision_range_excl_inclusive.dmn b/tests/SpiffWorkflow/dmn/data/integer_decision_range_excl_inclusive.dmn new file mode 100644 index 000000000..8e6403643 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/integer_decision_range_excl_inclusive.dmn @@ -0,0 +1,29 @@ + + + + + + + + + + 100-110 ExclInclusive Annotation + + 100 < ? <= 110 + + + "100-110 ExclInclusive" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/integer_decision_range_excl_inclusive_feel.dmn b/tests/SpiffWorkflow/dmn/data/integer_decision_range_excl_inclusive_feel.dmn new file mode 100644 index 000000000..a7eb20279 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/integer_decision_range_excl_inclusive_feel.dmn @@ -0,0 +1,29 @@ + + + + + + + + + + 100-110 ExclInclusive Annotation + + ]100..110] + + + "100-110 ExclInclusive" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/integer_decision_range_exclusive.dmn b/tests/SpiffWorkflow/dmn/data/integer_decision_range_exclusive.dmn new file mode 100644 index 000000000..53dfb5217 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/integer_decision_range_exclusive.dmn @@ -0,0 +1,29 @@ + + + + + + + + + + 100-110 Exclusive Annotation + + 100 < ? < 110 + + + "100-110 Exclusive" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/integer_decision_range_exclusive_feel.dmn b/tests/SpiffWorkflow/dmn/data/integer_decision_range_exclusive_feel.dmn new file mode 100644 index 000000000..74200deef --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/integer_decision_range_exclusive_feel.dmn @@ -0,0 +1,25 @@ + + + + + + + + + + 100-110 Exclusive Annotation + ]100..110[ + + + + + + ELSE Row Annotation + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/integer_decision_range_incl_exclusive.dmn b/tests/SpiffWorkflow/dmn/data/integer_decision_range_incl_exclusive.dmn new file mode 100644 index 000000000..34d739bd2 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/integer_decision_range_incl_exclusive.dmn @@ -0,0 +1,29 @@ + + + + + + + + + + 100-110 InclExclusive Annotation + + 100 <= ? < 110 + + + "100-110 InclExclusive" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/integer_decision_range_incl_exclusive_feel.dmn b/tests/SpiffWorkflow/dmn/data/integer_decision_range_incl_exclusive_feel.dmn new file mode 100644 index 000000000..792d74793 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/integer_decision_range_incl_exclusive_feel.dmn @@ -0,0 +1,25 @@ + + + + + + + + + + 100-110 InclExclusive Annotation + [100..110[ + + + + + + ELSE Row Annotation + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/integer_decision_range_inclusive.dmn b/tests/SpiffWorkflow/dmn/data/integer_decision_range_inclusive.dmn new file mode 100644 index 000000000..27afab635 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/integer_decision_range_inclusive.dmn @@ -0,0 +1,29 @@ + + + + + + + + + + 100-110 Inclusive Annotation + + 100 <= ? <= 110 + + + "100-110 Inclusive" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/integer_decision_range_inclusive_feel.dmn b/tests/SpiffWorkflow/dmn/data/integer_decision_range_inclusive_feel.dmn new file mode 100644 index 000000000..d667fd18b --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/integer_decision_range_inclusive_feel.dmn @@ -0,0 +1,25 @@ + + + + + + + + + + 100-110 Inclusive Annotation + [100..110] + + + + + + ELSE Row Annotation + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/kwargs_parameter.dmn b/tests/SpiffWorkflow/dmn/data/kwargs_parameter.dmn new file mode 100644 index 000000000..351397cf4 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/kwargs_parameter.dmn @@ -0,0 +1,41 @@ + + + + + + + Gender + + + + + m Row Annotation + + mGender Description + "m" + + + "isM" + + + + f Row Annotation + + "f" + + + "isF" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/kwargs_parameter_feel.dmn b/tests/SpiffWorkflow/dmn/data/kwargs_parameter_feel.dmn new file mode 100644 index 000000000..d470b04b0 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/kwargs_parameter_feel.dmn @@ -0,0 +1,41 @@ + + + + + + + Gender + + + + + m Row Annotation + + mGender Description + "m" + + + "isM" + + + + f Row Annotation + + "f" + + + "isF" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/list_decision.dmn b/tests/SpiffWorkflow/dmn/data/list_decision.dmn new file mode 100644 index 000000000..c3ecf312e --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/list_decision.dmn @@ -0,0 +1,32 @@ + + + + + + + + + + + + They are allergic to peanuts + + mGender Description + "PEANUTS" in ? + + + "isPeanuts" + + + + They are not allergic to peanuts + + "PEANUTS" not in ? + + + "IsNotPeanuts" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/list_decision_feel.dmn b/tests/SpiffWorkflow/dmn/data/list_decision_feel.dmn new file mode 100644 index 000000000..7d993b612 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/list_decision_feel.dmn @@ -0,0 +1,32 @@ + + + + + + + + + + + + They are allergic to peanuts + + mGender Description + contains("PEANUTS") + + + "isPeanuts" + + + + They are not allergic to peanuts + + not contains("PEANUTS") + + + "IsNotPeanuts" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/long_or_double_decision_comparison.dmn b/tests/SpiffWorkflow/dmn/data/long_or_double_decision_comparison.dmn new file mode 100644 index 000000000..ef74e30b6 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/long_or_double_decision_comparison.dmn @@ -0,0 +1,39 @@ + + + + + + + + + + 30.5 Row Annotation + 30.5 + + + + + + L Row Annotation + + + + + + + H Row Annotation + = 25.4]]> + + + + + + ELSE Row Annotation + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/long_or_double_decision_comparison_feel.dmn b/tests/SpiffWorkflow/dmn/data/long_or_double_decision_comparison_feel.dmn new file mode 100644 index 000000000..0ccf4a30d --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/long_or_double_decision_comparison_feel.dmn @@ -0,0 +1,39 @@ + + + + + + + + + + 30.5 Row Annotation + 30.5 + + + + + + L Row Annotation + + + + + + + H Row Annotation + = 25.4]]> + + + + + + ELSE Row Annotation + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/long_or_double_decision_range_excl_inclusive.dmn b/tests/SpiffWorkflow/dmn/data/long_or_double_decision_range_excl_inclusive.dmn new file mode 100644 index 000000000..d095d2147 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/long_or_double_decision_range_excl_inclusive.dmn @@ -0,0 +1,29 @@ + + + + + + + + + + 100.05-110.05 ExclInclusive Annotation + + Decimal('100.05') < ? <= Decimal('110.05') + + + "100.05-110.05 ExclInclusive" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/long_or_double_decision_range_excl_inclusive_feel.dmn b/tests/SpiffWorkflow/dmn/data/long_or_double_decision_range_excl_inclusive_feel.dmn new file mode 100644 index 000000000..7c16ef3d8 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/long_or_double_decision_range_excl_inclusive_feel.dmn @@ -0,0 +1,25 @@ + + + + + + + + + + 100.05-110.05 ExclInclusive Annotation + ]100.05..110.05] + + + + + + ELSE Row Annotation + + + + + + + + \ No newline at end of file diff --git a/tests/SpiffWorkflow/dmn/data/long_or_double_decision_range_exclusive.dmn b/tests/SpiffWorkflow/dmn/data/long_or_double_decision_range_exclusive.dmn new file mode 100644 index 000000000..1a8e79b5f --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/long_or_double_decision_range_exclusive.dmn @@ -0,0 +1,29 @@ + + + + + + + + + + 100.05-110.05 Exclusive Annotation + + Decimal('100.05') < ? < Decimal('110.05') + + + "100.05-110.05 Exclusive" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/long_or_double_decision_range_exclusive_feel.dmn b/tests/SpiffWorkflow/dmn/data/long_or_double_decision_range_exclusive_feel.dmn new file mode 100644 index 000000000..cd689a935 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/long_or_double_decision_range_exclusive_feel.dmn @@ -0,0 +1,25 @@ + + + + + + + + + + 100.05-110.05 Exclusive Annotation + ]100.05..110.05[ + + + + + + ELSE Row Annotation + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/long_or_double_decision_range_incl_exclusive.dmn b/tests/SpiffWorkflow/dmn/data/long_or_double_decision_range_incl_exclusive.dmn new file mode 100644 index 000000000..3cb396b28 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/long_or_double_decision_range_incl_exclusive.dmn @@ -0,0 +1,29 @@ + + + + + + + + + + 100.05-110.05 InclExclusive Annotation + + 100.05 <= ? < 110.05 + + + "100.05-110.05 InclExclusive" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/long_or_double_decision_range_incl_exclusive_feel.dmn b/tests/SpiffWorkflow/dmn/data/long_or_double_decision_range_incl_exclusive_feel.dmn new file mode 100644 index 000000000..ed30196af --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/long_or_double_decision_range_incl_exclusive_feel.dmn @@ -0,0 +1,25 @@ + + + + + + + + + + 100.05-110.05 InclExclusive Annotation + [100.05..110.05[ + + + + + + ELSE Row Annotation + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/long_or_double_decision_range_inclusive.dmn b/tests/SpiffWorkflow/dmn/data/long_or_double_decision_range_inclusive.dmn new file mode 100644 index 000000000..325d6134b --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/long_or_double_decision_range_inclusive.dmn @@ -0,0 +1,29 @@ + + + + + + + + + + 100.05-110.05 Inclusive Annotation + + Decimal('100.05') <= ? <= Decimal('110.05') + + + "100.05-110.05 Inclusive" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/long_or_double_decision_range_inclusive_feel.dmn b/tests/SpiffWorkflow/dmn/data/long_or_double_decision_range_inclusive_feel.dmn new file mode 100644 index 000000000..04a5515ab --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/long_or_double_decision_range_inclusive_feel.dmn @@ -0,0 +1,25 @@ + + + + + + + + + + 100.05-110.05 Inclusive Annotation + [100.05..110.05] + + + + + + ELSE Row Annotation + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/string_decision.dmn b/tests/SpiffWorkflow/dmn/data/string_decision.dmn new file mode 100644 index 000000000..4860f018c --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/string_decision.dmn @@ -0,0 +1,50 @@ + + + + + + + + + + + + m Row Annotation + + mGender Description + "m" + + + "isM" + + + + f Row Annotation + + "f" + + + "isF" + + + + NOT x Row Annotation + + ? != "x" + + + "notX" + + + + ELSE Row Annotation + + + + + "ELSE" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/string_decision_feel.dmn b/tests/SpiffWorkflow/dmn/data/string_decision_feel.dmn new file mode 100644 index 000000000..b18d9b053 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/string_decision_feel.dmn @@ -0,0 +1,43 @@ + + + + + + + + + + + + m Row Annotation + + mGender Description + + + + + + + f Row Annotation + + + + + + + NOT x Row Annotation + + + + + + + ELSE Row Annotation + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/string_integer_decision.dmn b/tests/SpiffWorkflow/dmn/data/string_integer_decision.dmn new file mode 100644 index 000000000..3fb05c367 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/string_integer_decision.dmn @@ -0,0 +1,71 @@ + + + + + + + + + + + + + + m30 Row Annotation + + mGender Description + + + 30 + + + + + + mL Row Annotation + + + + + + + + + mH Row Annotation + + + = 25]]> + + + + + + fL Row Annotation + + + + + + + + + fH Row Annotation + + + = 20]]> + + + + + + ELSE Row Annotation + + + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/string_integer_decision_feel.dmn b/tests/SpiffWorkflow/dmn/data/string_integer_decision_feel.dmn new file mode 100644 index 000000000..e21f1ca05 --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/string_integer_decision_feel.dmn @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + m30 Row Annotation + + mGender Description + + + 30 + + + + + + mL Row Annotation + + + + + + + + + mH Row Annotation + + + = 25]]> + + + + + + fL Row Annotation + + + + + + + + + fH Row Annotation + + + = 20]]> + + + + + + ELSE Row Annotation + + + + + + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/test_integer_decision.dmn b/tests/SpiffWorkflow/dmn/data/test_integer_decision.dmn new file mode 100644 index 000000000..9986d16ad --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/test_integer_decision.dmn @@ -0,0 +1,49 @@ + + + + + + + x + + + + + A Annotation + + 3 + + + "A" + + + + B Annotation + + 4 + + + "B" + + + + C Annotation + + 5 + + + "C" + + + + D Annotation + + >= 6 + + + "D" + + + + + diff --git a/tests/SpiffWorkflow/dmn/data/test_integer_decision_feel.dmn b/tests/SpiffWorkflow/dmn/data/test_integer_decision_feel.dmn new file mode 100644 index 000000000..86b41068c --- /dev/null +++ b/tests/SpiffWorkflow/dmn/data/test_integer_decision_feel.dmn @@ -0,0 +1,49 @@ + + + + + + + x + + + + + A Annotation + + 3 + + + "A" + + + + B Annotation + + 4 + + + "B" + + + + C Annotation + + 5 + + + "C" + + + + D Annotation + + >= 6 + + + "D" + + + + + diff --git a/tests/SpiffWorkflow/run_suite.py b/tests/SpiffWorkflow/run_suite.py index 07d8282a8..64c201169 100755 --- a/tests/SpiffWorkflow/run_suite.py +++ b/tests/SpiffWorkflow/run_suite.py @@ -93,21 +93,26 @@ def suite(): return load_suite([os.path.basename(f) for f in files]) -def recursive_suite(): - return load_suite(find('.', '*Test.py')) +def recursive_suite(glob): + return load_suite(find('.', glob)) if __name__ == '__main__': # Parse CLI options. if len(sys.argv) == 1: + filename = '*Test.py' verbosity = 2 elif len(sys.argv) == 2: - verbosity = int(sys.argv[1]) + filename = sys.argv[1] + verbosity = 2 + elif len(sys.argv) == 3: + filename = sys.argv[1] + verbosity = int(sys.argv[2]) else: - print('Syntax:', sys.argv[0], '[verbosity]') + print('Syntax:', sys.argv[0], '[testfile] [verbosity]') print('Default verbosity is 2') sys.exit(2) # Run. results = unittest.TextTestRunner( - verbosity=verbosity).run(recursive_suite()) + verbosity=verbosity).run(recursive_suite(filename)) sys.exit(0 if results.wasSuccessful() else 1) diff --git a/tests/SpiffWorkflow/serializer/baseTest.py b/tests/SpiffWorkflow/serializer/baseTest.py index ae9e9c46e..8241f9e90 100644 --- a/tests/SpiffWorkflow/serializer/baseTest.py +++ b/tests/SpiffWorkflow/serializer/baseTest.py @@ -45,7 +45,6 @@ def _test_roundtrip_serialization(self, obj): except TaskNotSupportedError as e: warnings.warn('unsupported task spec: ' + str(e)) return - self.assertIsInstance(serialized1, self.return_type) self.assertIsInstance(serialized2, self.return_type) serialized1 = self._prepare_result(serialized1) diff --git a/tests/SpiffWorkflow/specs/DeepMergeTest.py b/tests/SpiffWorkflow/specs/DeepMergeTest.py new file mode 100644 index 000000000..d39ffb8ee --- /dev/null +++ b/tests/SpiffWorkflow/specs/DeepMergeTest.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, absolute_import, division + +from __future__ import division +import os +import sys +import unittest + +from SpiffWorkflow.util.deep_merge import DeepMerge + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) + +from tests.SpiffWorkflow.util import run_workflow +from .TaskSpecTest import TaskSpecTest +from SpiffWorkflow.specs import Transform, Simple + + +class DeepMergeTest(TaskSpecTest): + CORRELATE = DeepMerge + + def testBasicMerge(self): + """ + Tests that we can merge one dictionary into another dictionary deeply + and that dot-notation is correctly parsed and processed. + """ + a = {"fruit": {"apples": "tasty"}} + b = {"fruit": {"oranges": "also tasty"}} + c = DeepMerge.merge(a, b) + self.assertEqual({"fruit": + {"apples": "tasty", + "oranges": "also tasty" + } + }, c) + + + def testOutOfOrderMerge(self): + a = {"foods": [{"fruit": {"apples": "tasty", "oranges": "also tasty"}}]} + b = {"foods": [{"fruit": {"oranges": "also tasty", "apples": "tasty"}}, + {"canned meats": {"spam": "nope."}}]} + c = DeepMerge.merge(a, b) + self.assertEqual({"foods": [ + {"fruit": + {"apples": "tasty", + "oranges": "also tasty" + } + }, + {"canned meats": + {"spam": "nope."} + } + ]}, c) + + def testMixOfArrayTypes(self): + a = {"foods": [{"fruit": {"apples": "tasty", "oranges": "also tasty"}}, + {"canned_meats":["spam", "more spam"]}]} + b = {"foods": [{"fruit": {"apples": "tasty", "oranges": "also tasty"}}, + {"canned_meats":["wonderful spam", "spam", "more spam"]}]} + + c = DeepMerge.merge(a, b) + + self.assertEqual({"foods": [{"fruit": {"apples": "tasty", "oranges": "also tasty"}}, + {"canned_meats":["spam", "more spam", "wonderful spam"]}]}, c) + + + +def suite(): + return unittest.TestLoader().loadTestsFromTestCase(DeepMergeTest) +if __name__ == '__main__': + unittest.TextTestRunner(verbosity=2).run(suite()) diff --git a/tests/SpiffWorkflow/specs/WorkflowSpecTest.py b/tests/SpiffWorkflow/specs/WorkflowSpecTest.py index 71b5f866d..aaff83b32 100644 --- a/tests/SpiffWorkflow/specs/WorkflowSpecTest.py +++ b/tests/SpiffWorkflow/specs/WorkflowSpecTest.py @@ -110,6 +110,8 @@ def testValidate(self): self.assertIn("Found loop with 'First': First->Second then 'First' " "again", results) + def testGetTaskSpecFromId(self): + pass def suite(): return unittest.TestLoader().loadTestsFromTestCase(WorkflowSpecTest)