diff --git a/.env b/.env
index f8be5532..73fbdf63 100755
--- a/.env
+++ b/.env
@@ -70,7 +70,7 @@ SHUFFLE_SWARM_BRIDGE_DEFAULT_MTU=1500 # 1500 by default
# Used for auto-cleanup of containers. REALLY important at scale. Set to false to see all container info.
SHUFFLE_MEMCACHED=
SHUFFLE_CONTAINER_AUTO_CLEANUP=true
-SHUFFLE_ORBORUS_EXECUTION_CONCURRENCY=3 # The amount of concurrent executions Orborus can handle. This is a soft limit, but it's recommended to keep it low.
+SHUFFLE_ORBORUS_EXECUTION_CONCURRENCY=5 # The amount of concurrent executions Orborus can handle. This is a soft limit, but it's recommended to keep it low.
SHUFFLE_HEALTHCHECK_DISABLED=false
SHUFFLE_ELASTIC=true
SHUFFLE_LOGS_DISABLED=false
@@ -93,4 +93,4 @@ SHUFFLE_OPENSEARCH_PROXY=
SHUFFLE_OPENSEARCH_INDEX_PREFIX=
SHUFFLE_OPENSEARCH_SKIPSSL_VERIFY=true
-DEBUG_MODE=false
\ No newline at end of file
+DEBUG_MODE=false
diff --git a/.github/workflows/dockerbuild.yaml b/.github/workflows/dockerbuild.yaml
index b3c546f1..ca750fa5 100644
--- a/.github/workflows/dockerbuild.yaml
+++ b/.github/workflows/dockerbuild.yaml
@@ -3,7 +3,7 @@ name: dockerbuild
on:
push:
branches:
- - main
+ - 1.4.0
paths:
- "**"
- "!.github/**"
@@ -77,9 +77,9 @@ jobs:
cache-to: type=local,dest=/tmp/.buildx-cache
tags: |
ghcr.io/shuffle/shuffle-${{ matrix.app }}:${{ matrix.version }}
- ghcr.io/shuffle/shuffle-${{ matrix.app }}:latest
+ ghcr.io/shuffle/shuffle-${{ matrix.app }}:nightly
${{ secrets.DOCKERHUB_USERNAME }}/shuffle-${{ matrix.app }}:${{ matrix.version }}
- ${{ secrets.DOCKERHUB_USERNAME }}/shuffle-${{ matrix.app }}:latest
+ ${{ secrets.DOCKERHUB_USERNAME }}/shuffle-${{ matrix.app }}:nightly
- name: Image digest
run: echo ${{ steps.docker_build.outputs.digest }}
diff --git a/backend/app_sdk/app_base.py b/backend/app_sdk/app_base.py
index 298be08f..003f4459 100755
--- a/backend/app_sdk/app_base.py
+++ b/backend/app_sdk/app_base.py
@@ -302,9 +302,11 @@ def __init__(self, redis=None, logger=None, console_logger=None):#, docker_clien
self.authorization = os.getenv("AUTHORIZATION", "")
self.current_execution_id = os.getenv("EXECUTIONID", "")
self.full_execution = os.getenv("FULL_EXECUTION", "")
- self.start_time = int(time.time())
self.result_wrapper_count = 0
+ # Make start time with milliseconds
+ self.start_time = int(time.time_ns())
+
self.action_result = {
"action": self.action,
"authorization": self.authorization,
@@ -312,9 +314,24 @@ def __init__(self, redis=None, logger=None, console_logger=None):#, docker_clien
"result": f"",
"started_at": self.start_time,
"status": "",
- "completed_at": int(time.time()),
+ "completed_at": int(time.time_ns()),
+ }
+
+ self.proxy_config = {
+ "http": os.getenv("HTTP_PROXY", ""),
+ "https": os.getenv("HTTPS_PROXY", ""),
+ "no_proxy": os.getenv("NO_PROXY", ""),
}
+ if len(os.getenv("SHUFFLE_INTERNAL_HTTP_PROXY", "")) > 0:
+ self.proxy_config["http"] = os.getenv("SHUFFLE_INTERNAL_HTTP_PROXY", "")
+
+ if len(os.getenv("SHUFFLE_INTERNAL_HTTPS_PROXY", "")) > 0:
+ self.proxy_config["https"] = os.getenv("SHUFFLE_INTERNAL_HTTP_PROXY", "")
+
+ if len(os.getenv("SHUFFLE_INTERNAL_NO_PROXY", "")) > 0:
+ self.proxy_config["no_proxy"] = os.getenv("SHUFFLE_INTERNAL_NO_PROXY", "")
+
if isinstance(self.action, str):
try:
self.action = json.loads(self.action)
@@ -468,7 +485,7 @@ def send_result(self, action_result, headers, stream_path):
# Try it with some magic
- action_result["completed_at"] = int(time.time())
+ action_result["completed_at"] = int(time.time_ns())
self.logger.info(f"""[DEBUG] Inside Send result with status {action_result["status"]}""")
#if isinstance(action_result,
@@ -512,7 +529,7 @@ def send_result(self, action_result, headers, stream_path):
sleeptime = float(random.randint(0, 10) / 10)
try:
- ret = requests.post(url, headers=headers, json=action_result, timeout=10, verify=False)
+ ret = requests.post(url, headers=headers, json=action_result, timeout=10, verify=False, proxies=self.proxy_config)
self.logger.info(f"[DEBUG] Result: {ret.status_code} (break on 200 or 201)")
if ret.status_code == 200 or ret.status_code == 201:
@@ -520,11 +537,29 @@ def send_result(self, action_result, headers, stream_path):
break
else:
self.logger.info(f"[ERROR] Bad resp {ret.status_code}: {ret.text}")
+ time.sleep(sleeptime)
+
+ # Proxyerrror
+ except requests.exceptions.ProxyError as e:
+ self.logger.info(f"[ERROR] Proxy error: {e}")
+ self.proxy_config = {}
+ continue
except requests.exceptions.RequestException as e:
self.logger.info(f"[DEBUG] Request problem: {e}")
time.sleep(sleeptime)
+ # Check if we have a read timeout. If we do, exit as we most likely sent the result without getting a good result
+ if "Read timed out" in str(e):
+ self.logger.warning(f"[WARNING] Read timed out: {e}")
+ finished = True
+ break
+
+ if "Max retries exceeded with url" in str(e):
+ self.logger.warning(f"[WARNING] Max retries exceeded with url: {e}")
+ finished = True
+ break
+
#time.sleep(5)
continue
except TimeoutError as e:
@@ -560,7 +595,6 @@ def send_result(self, action_result, headers, stream_path):
action_result["result"] = json.dumps({"success": False, "reason": "POST error: Failed connecting to %s over 10 retries to the backend" % url})
self.logger.info(f"[ERROR] Before typeerror stream result - NOT finished after 10 requests")
- #ret = requests.post("%s%s" % (self.base_url, stream_path), headers=headers, json=action_result, verify=False)
self.send_result(action_result, {"Content-Type": "application/json", "Authorization": "Bearer %s" % self.authorization}, "/api/v1/streams")
return
@@ -572,7 +606,7 @@ def send_result(self, action_result, headers, stream_path):
action_result["result"] = json.dumps({"success": False, "reason": "Typeerror when sending to backend URL %s" % url})
self.logger.info(f"[DEBUG] Before typeerror stream result: {e}")
- ret = requests.post("%s%s" % (self.base_url, stream_path), headers=headers, json=action_result, verify=False)
+ ret = requests.post("%s%s" % (self.base_url, stream_path), headers=headers, json=action_result, verify=False, proxies=self.proxy_config)
#self.logger.info(f"[DEBUG] Result: {ret.status_code}")
#if ret.status_code != 200:
# pr
@@ -701,7 +735,7 @@ def validate_unique_fields(self, params):
#self.logger.info(f"RET: {ret.text}")
#self.logger.info(f"ID: {ret.status_code}")
url = f"{self.url}/api/v1/orgs/{org_id}/validate_app_values"
- ret = requests.post(url, json=data, verify=False)
+ ret = requests.post(url, json=data, verify=False, proxies=self.proxy_config)
if ret.status_code == 200:
json_value = ret.json()
if len(json_value["found"]) > 0:
@@ -1011,7 +1045,7 @@ def run_recursed_items(self, func, baseparams, loop_wrapper):
"result": f"All {len(param_multiplier)} values were non-unique",
"started_at": self.start_time,
"status": "SKIPPED",
- "completed_at": int(time.time()),
+ "completed_at": int(time.time_ns()),
}
self.send_result(self.action_result, {"Content-Type": "application/json", "Authorization": "Bearer %s" % self.authorization}, "/api/v1/streams")
@@ -1175,7 +1209,7 @@ def get_file_category_ids(self, category):
"User-Agent": "Shuffle 1.1.0",
}
- ret = requests.get("%s%s" % (self.url, get_path), headers=headers, verify=False)
+ ret = requests.get("%s%s" % (self.url, get_path), headers=headers, verify=False, proxies=self.proxy_config)
return ret.json()
#if ret1.status_code != 200:
# return {
@@ -1201,7 +1235,7 @@ def get_file_namespace(self, namespace):
"User-Agent": "Shuffle 1.1.0",
}
- ret1 = requests.get("%s%s" % (self.url, get_path), headers=headers, verify=False)
+ ret1 = requests.get("%s%s" % (self.url, get_path), headers=headers, verify=False, proxies=self.proxy_config)
if ret1.status_code != 200:
return None
@@ -1264,7 +1298,7 @@ def get_file(self, value):
"User-Agent": "Shuffle 1.1.0",
}
- ret1 = requests.get("%s%s" % (self.url, get_path), headers=headers, verify=False)
+ ret1 = requests.get("%s%s" % (self.url, get_path), headers=headers, verify=False, proxies=self.proxy_config)
self.logger.info("RET1 (file get): %s" % ret1.text)
if ret1.status_code != 200:
returns.append({
@@ -1275,7 +1309,7 @@ def get_file(self, value):
continue
content_path = "/api/v1/files/%s/content?execution_id=%s" % (item, full_execution["execution_id"])
- ret2 = requests.get("%s%s" % (self.url, content_path), headers=headers, verify=False)
+ ret2 = requests.get("%s%s" % (self.url, content_path), headers=headers, verify=False, proxies=self.proxy_config)
self.logger.info("RET2 (file get) done")
if ret2.status_code == 200:
tmpdata = ret1.json()
@@ -1311,7 +1345,7 @@ def delete_cache(self, key):
"key": key,
}
- response = requests.post(url, json=data, verify=False)
+ response = requests.post(url, json=data, verify=False, proxies=self.proxy_config)
try:
allvalues = response.json()
return json.dumps(allvalues)
@@ -1332,7 +1366,7 @@ def set_cache(self, key, value):
"value": str(value),
}
- response = requests.post(url, json=data, verify=False)
+ response = requests.post(url, json=data, verify=False, proxies=self.proxy_config)
try:
allvalues = response.json()
allvalues["key"] = key
@@ -1354,7 +1388,7 @@ def get_cache(self, key):
"key": key,
}
- value = requests.post(url, json=data, verify=False)
+ value = requests.post(url, json=data, verify=False, proxies=self.proxy_config)
try:
allvalues = value.json()
self.logger.info("VAL1: ", allvalues)
@@ -1408,7 +1442,7 @@ def set_files(self, infiles):
self.logger.info(f"KeyError in file setup: {e}")
pass
- ret = requests.post("%s%s" % (self.url, create_path), headers=headers, json=data, verify=False)
+ ret = requests.post("%s%s" % (self.url, create_path), headers=headers, json=data, verify=False, proxies=self.proxy_config)
#self.logger.info(f"Ret CREATE: {ret.text}")
cur_id = ""
if ret.status_code == 200:
@@ -1440,7 +1474,7 @@ def set_files(self, infiles):
files={"shuffle_file": (filename, curfile["data"])}
#open(filename,'rb')}
- ret = requests.post("%s%s" % (self.url, upload_path), files=files, headers=new_headers, verify=False)
+ ret = requests.post("%s%s" % (self.url, upload_path), files=files, headers=new_headers, verify=False, proxies=self.proxy_config)
self.logger.info("Ret UPLOAD: %s" % ret.text)
self.logger.info("Ret2 UPLOAD: %d" % ret.status_code)
@@ -1457,7 +1491,7 @@ def execute_action(self, action):
"authorization": self.authorization,
"execution_id": self.current_execution_id,
"result": "",
- "started_at": int(time.time()),
+ "started_at": int(time.time_ns()),
"status": "EXECUTING"
}
@@ -1537,7 +1571,8 @@ def execute_action(self, action):
"%s/api/v1/streams/results" % (self.base_url),
headers=headers,
json=tmpdata,
- verify=False
+ verify=False,
+ proxies=self.proxy_config,
)
if ret.status_code == 200:
@@ -1964,6 +1999,8 @@ def parse_wrapper_start(data, self):
# Parses JSON loops and such down to the item you're looking for
# $nodename.#.id
# $nodename.data.#min-max.info.id
+ # $nodename.data.#1-max.info.id
+ # $nodename.data.#min-1.info.id
def recurse_json(basejson, parsersplit):
match = "#([0-9a-z]+):?-?([0-9a-z]+)?#?"
try:
@@ -2080,6 +2117,8 @@ def recurse_json(basejson, parsersplit):
if (basejson[value].endswith("}") and basejson[value].endswith("}")) or (basejson[value].startswith("[") and basejson[value].endswith("]")):
basejson = json.loads(basejson[value])
else:
+ # Should we sanitize here?
+ self.logger.info("[DEBUG] VALUE TO SANITIZE?: %s" % basejson[value])
return str(basejson[value]), False
except json.decoder.JSONDecodeError as e:
return str(basejson[value]), False
@@ -2128,7 +2167,6 @@ def get_json_value(execution_data, input_data):
actionname_lower = parsersplit[0][1:].lower()
#Actionname: Start_node
- #print(f"\n[INFO] Actionname: {actionname_lower}")
# 1. Find the action
baseresult = ""
@@ -2466,7 +2504,7 @@ def parse_liquid(template, self):
self.action_result["result"] = f"Failed to parse LiquidPy: {error_msg}"
print("[WARNING] Failed to set LiquidPy result")
- self.action_result["completed_at"] = int(time.time())
+ self.action_result["completed_at"] = int(time.time_ns())
self.send_result(self.action_result, headers, stream_path)
self.logger.info(f"[ERROR] Sent FAILURE response to backend due to : {e}")
@@ -2558,6 +2596,27 @@ def recurse_cleanup_script(data):
return data
+ # Makes JSON string values into valid strings in JSON
+ # Mainly by removing newlines and such
+ def fix_json_string_value(value):
+ try:
+ value = value.replace("\r\n", "\\r\\n")
+ value = value.replace("\n", "\\n")
+ value = value.replace("\r", "\\r")
+
+ # Fix quotes in the string
+ value = value.replace("\\\"", "\"")
+ value = value.replace("\"", "\\\"")
+
+ value = value.replace("\\\'", "\'")
+ value = value.replace("\'", "\\\'")
+ except Exception as e:
+ print(f"[WARNING] Failed to fix json string value: {e}")
+
+ return value
+
+
+
# Parses parameters sent to it and returns whether it did it successfully with the values found
def parse_params(action, fullexecution, parameter, self):
# Skip if it starts with $?
@@ -2621,6 +2680,20 @@ def parse_params(action, fullexecution, parameter, self):
value, is_loop = get_json_value(fullexecution, to_be_replaced)
#self.logger.info(f"\n\nType of value: {type(value)}")
if isinstance(value, str):
+ # Could we take it here?
+ self.logger.info(f"[DEBUG] Got value %s for parameter {paramname}" % value)
+ # Should check if there is are quotes infront of and after the to_be_replaced
+ # If there are, then we need to sanitize the value
+ # 1. Look for the to_be_replaced in the data
+ # 2. Check if there is a quote infront of it and also if there are {} in the data to validate JSON
+ # 3. If there are, sanitize!
+ #if data.find(f'"{to_be_replaced}"') != -1 and data.find("{") != -1 and data.find("}") != -1:
+ # print(f"[DEBUG] Found quotes infront of and after {to_be_replaced}! This probably means it's JSON and should be sanitized.")
+ # returnvalue = fix_json_string_value(value)
+ # value = returnvalue
+
+
+
parameter["value"] = parameter["value"].replace(to_be_replaced, value)
elif isinstance(value, dict) or isinstance(value, list):
# Changed from JSON dump to str() 28.05.2021
@@ -2633,7 +2706,7 @@ def parse_params(action, fullexecution, parameter, self):
# parameter["value"] = parameter["value"].replace(to_be_replaced, json.dumps(value))
# self.logger.info("Failed parsing value as string?")
else:
- self.logger.info("[WARNING] Unknown type %s" % type(value))
+ self.logger.error("[ERROR] Unknown type %s" % type(value))
try:
parameter["value"] = parameter["value"].replace(to_be_replaced, json.dumps(value))
except json.decoder.JSONDecodeError as e:
@@ -2740,7 +2813,7 @@ def parse_params(action, fullexecution, parameter, self):
return "", parameter["value"], is_loop
def run_validation(sourcevalue, check, destinationvalue):
- print("[DEBUG] Checking %s %s %s" % (sourcevalue, check, destinationvalue))
+ self.logger.info("[DEBUG] Checking %s '%s' %s" % (sourcevalue, check, destinationvalue))
if check == "=" or check.lower() == "equals":
if str(sourcevalue).lower() == str(destinationvalue).lower():
@@ -2758,15 +2831,16 @@ def run_validation(sourcevalue, check, destinationvalue):
if destinationvalue.lower() in sourcevalue.lower():
return True
- elif check.lower() == "is empty":
- if len(sourcevalue) == 0:
- return True
+ elif check.lower() == "is empty" or check.lower() == "is_empty":
+ try:
+ if len(json.loads(sourcevalue)) == 0:
+ return True
+ except Exception as e:
+ self.logger.info(f"[WARNING] Failed to check if empty as list: {e}")
- if str(sourcevalue) == 0:
+ if len(str(sourcevalue)) == 0:
return True
- return False
-
elif check.lower() == "contains_any_of":
newvalue = [destinationvalue.lower()]
if "," in destinationvalue:
@@ -2782,7 +2856,6 @@ def run_validation(sourcevalue, check, destinationvalue):
print("[INFO] Found %s in %s" % (item, sourcevalue))
return True
- return False
elif check.lower() == "larger than" or check.lower() == "bigger than":
try:
if str(sourcevalue).isdigit() and str(destinationvalue).isdigit():
@@ -2790,9 +2863,23 @@ def run_validation(sourcevalue, check, destinationvalue):
return True
except AttributeError as e:
- print("[WARNING] Condition larger than failed with values %s and %s: %s" % (sourcevalue, destinationvalue, e))
- return False
+ self.logger.info("[WARNING] Condition larger than failed with values %s and %s: %s" % (sourcevalue, destinationvalue, e))
+
+ try:
+ destinationvalue = len(json.loads(destinationvalue))
+ except Exception as e:
+ self.logger.info(f"[WARNING] Failed to convert destination to list: {e}")
+ try:
+ # Check if it's a list in autocast and if so, check the length
+ if len(json.loads(sourcevalue)) > int(destinationvalue):
+ return True
+ except Exception as e:
+ self.logger.info(f"[WARNING] Failed to check if larger than as list: {e}")
+
+
elif check.lower() == "smaller than" or check.lower() == "less than":
+ self.logger.info("In smaller than check: %s %s" % (sourcevalue, destinationvalue))
+
try:
if str(sourcevalue).isdigit() and str(destinationvalue).isdigit():
if int(sourcevalue) < int(destinationvalue):
@@ -2800,12 +2887,27 @@ def run_validation(sourcevalue, check, destinationvalue):
except AttributeError as e:
print("[WARNING] Condition smaller than failed with values %s and %s: %s" % (sourcevalue, destinationvalue, e))
- return False
+
+ try:
+ destinationvalue = len(json.loads(destinationvalue))
+ except Exception as e:
+ self.logger.info(f"[WARNING] Failed to convert destination to list: {e}")
+
+ try:
+ # Check if it's a list in autocast and if so, check the length
+ if len(json.loads(sourcevalue)) < int(destinationvalue):
+ return True
+ except Exception as e:
+ self.logger.info(f"[WARNING] Failed to check if smaller than as list: {e}")
+
elif check.lower() == "re" or check.lower() == "matches regex":
try:
- found = re.search(destinationvalue, sourcevalue)
+ found = re.search(str(destinationvalue), str(sourcevalue))
except re.error as e:
- print("[WARNING] Regex error in condition: %s" % e)
+ print("[WARNING] Regex error in condition (re.error): %s" % e)
+ return False
+ except Exception as e:
+ print("[WARNING] Regex error in condition (catchall): %s" % e)
return False
if found == None:
@@ -2813,7 +2915,7 @@ def run_validation(sourcevalue, check, destinationvalue):
return True
else:
- print("[DEBUG] Condition: can't handle %s yet. Setting to true" % check)
+ self.logger.error("[DEBUG] Condition: can't handle %s yet. Setting to true" % check)
return False
@@ -2826,12 +2928,40 @@ def check_branch_conditions(action, fullexecution, self):
return True, ""
# Startnode should always run - no need to check incoming
+ # Removed November 2023 due to people wanting startnode to also check
+ # This is to make it possible ot
try:
if action["id"] == fullexecution["start"]:
return True, ""
+
+ # Need to validate if the source is a trigger or not
+ # need to remove branches that are not from trigger to the startnode to make it all work
+ #if "workflow" in fullexecution["workflow"] and "triggers" in fullexecution["workflow"]:
+ # cnt = 0
+ # found_branch_indexes = []
+ # for branch in fullexecution["workflow"]["branches"]:
+ # if branch["destination_id"] != action["id"]:
+ # continue
+
+ # # Check if the source is a trigger
+ # # if we can't find it as trigger, remove the branch
+ # print("Found relevant branch: %s" % branch)
+ # for action in fullexecution["workflow"]["actions"]:
+ # if action["id"] == branch["source_id"]:
+ # found_branch_indexes.append(branch["source_id"])
+ # break
+
+ # if len(found_branch_indexes) > 0:
+ # for i in sorted(found_branch_indexes, reverse=True):
+ # fullexecution["workflow"]["branches"].pop(i)
+
+ # print("Removed %d branches" % len(found_branch_indexes))
+ #else:
+ # print("[WARNING] No branches or triggers found in fullexecution for startnode")
except Exception as error:
self.logger.info(f"[WARNING] Failed checking startnode: {error}")
- return True, ""
+ #return True, ""
+ #return True, ""
available_checks = [
"=",
@@ -2850,6 +2980,8 @@ def check_branch_conditions(action, fullexecution, self):
"contains_any_of",
"re",
"matches regex",
+ "is empty",
+ "is_empty",
]
relevantbranches = []
@@ -2909,7 +3041,7 @@ def check_branch_conditions(action, fullexecution, self):
destinationvalue = parse_wrapper_start(destinationvalue, self)
if not condition["condition"]["value"] in available_checks:
- self.logger.warning("Skipping %s %s %s because %s is invalid." % (sourcevalue, condition["condition"]["value"], destinationvalue, condition["condition"]["value"]))
+ self.logger.error("[ERROR] Skipping '%s' -> %s -> '%s' because %s is invalid." % (sourcevalue, condition["condition"]["value"], destinationvalue, condition["condition"]["value"]))
continue
# Configuration = negated because of WorkflowAppActionParam..
@@ -2974,7 +3106,7 @@ def check_branch_conditions(action, fullexecution, self):
self.logger.info("Failed one or more branch conditions.")
self.action_result["result"] = tmpresult
self.action_result["status"] = "SKIPPED"
- self.action_result["completed_at"] = int(time.time())
+ self.action_result["completed_at"] = int(time.time_ns())
self.send_result(self.action_result, headers, stream_path)
return
@@ -3460,7 +3592,7 @@ def check_branch_conditions(action, fullexecution, self):
self.logger.info("[WARNING] SHOULD STOP EXECUTION BECAUSE FIELDS AREN'T UNIQUE")
self.action_result["status"] = "SKIPPED"
self.action_result["result"] = f"A non-unique value was found"
- self.action_result["completed_at"] = int(time.time())
+ self.action_result["completed_at"] = int(time.time_ns())
self.send_result(self.action_result, headers, stream_path)
return
@@ -3522,6 +3654,7 @@ def check_branch_conditions(action, fullexecution, self):
timeout_env = os.getenv("SHUFFLE_APP_SDK_TIMEOUT", timeout)
try:
timeout = int(timeout_env)
+ self.logger.info(f"[DEBUG] Timeout set to {timeout} seconds")
except Exception as e:
self.logger.info(f"[WARNING] Failed parsing timeout to int: {e}")
@@ -3537,7 +3670,7 @@ def check_branch_conditions(action, fullexecution, self):
future.cancel()
newres = json.dumps({
"success": False,
- "reason": "Timeout error within %d seconds. This happens if we can't reach or use the API you're trying to use within the time limit." % timeout,
+ "reason": "Timeout error within %d seconds (1). This happens if we can't reach or use the API you're trying to use within the time limit. Configure SHUFFLE_APP_SDK_TIMEOUT=100 in Orborus to increase it to 100 seconds. Not changeable for cloud." % timeout,
"exception": str(e),
})
@@ -3550,40 +3683,13 @@ def check_branch_conditions(action, fullexecution, self):
except concurrent.futures.TimeoutError as e:
newres = json.dumps({
"success": False,
- "reason": "Timeout error within %d seconds (2). This happens if we can't reach or use the API you're trying to use within the time limit" % timeout
+ "reason": "Timeout error within %d seconds (2). This happens if we can't reach or use the API you're trying to use within the time limit. Configure SHUFFLE_APP_SDK_TIMEOUT=100 in Orborus to increase it to 100 seconds. Not changeable for cloud." % timeout,
})
break
-
-
-
- #thread = threading.Thread(target=func, args=(**params,))
- #thread.start()
-
- #thread.join(timeout)
-
- #if thread.is_alive():
- # # The thread is still running, so we need to stop it
- # # You can handle this as needed, such as raising an exception
- # timeout_handler()
-
-
- #with Timeout(timeout):
- # newres = func(**params)
- # break
- #except Timeout.Timeout as e:
- # self.logger.info(f"[DEBUG] Timeout error: {e}")
- # newres = json.dumps({
- # "success": False,
- # "reason": "Timeout error within %d seconds. This typically happens if we can't reach the API you're trying to reach." % timeout,
- # "exception": str(e),
- # })
-
- # break
-
except TypeError as e:
newres = ""
- self.logger.info(f"[DEBUG] Got exec type error: {e}")
+ self.logger.info(f"[ERROR] Got function exec type error: {e}")
try:
e = json.loads(f"{e}")
except:
@@ -3814,7 +3920,7 @@ async def parse_value(newres):
})
# Send the result :)
- self.action_result["completed_at"] = int(time.time())
+ self.action_result["completed_at"] = int(time.time_ns())
self.send_result(self.action_result, headers, stream_path)
#try:
diff --git a/backend/go-app/go.mod b/backend/go-app/go.mod
index 46cf8516..af4b2f8b 100644
--- a/backend/go-app/go.mod
+++ b/backend/go-app/go.mod
@@ -18,8 +18,8 @@ require (
github.com/gorilla/mux v1.8.0
github.com/h2non/filetype v1.1.3
github.com/satori/go.uuid v1.2.0
- github.com/shuffle/shuffle-shared v0.4.88
- golang.org/x/crypto v0.9.0
+ github.com/shuffle/shuffle-shared v0.5.30
+ golang.org/x/crypto v0.14.0
google.golang.org/api v0.125.0
google.golang.org/grpc v1.55.0
gopkg.in/src-d/go-git.v4 v4.13.1
diff --git a/backend/go-app/go.sum b/backend/go-app/go.sum
index e55ea13a..bea5b3c6 100644
--- a/backend/go-app/go.sum
+++ b/backend/go-app/go.sum
@@ -406,6 +406,22 @@ github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shuffle/shuffle-shared v0.4.66 h1:Aw4qOp0VsVJrRzW1sJhEy4OY4fRGlFErUD5+93RXL6g=
github.com/shuffle/shuffle-shared v0.4.66/go.mod h1:X613gbo0dT3fnYvXDRwjQZyLC+T49T2nSQOrCV5QMlI=
+github.com/shuffle/shuffle-shared v0.4.80 h1:03OL+O8prwL9zq6Gnb9SRORPWi5+ThO0jPoxk+xctOo=
+github.com/shuffle/shuffle-shared v0.4.80/go.mod h1:X613gbo0dT3fnYvXDRwjQZyLC+T49T2nSQOrCV5QMlI=
+github.com/shuffle/shuffle-shared v0.4.95 h1:xr92/03/uQeJiDme9S8/vgF1KWyQgJ1KQXVE7nQMKis=
+github.com/shuffle/shuffle-shared v0.4.95/go.mod h1:X613gbo0dT3fnYvXDRwjQZyLC+T49T2nSQOrCV5QMlI=
+github.com/shuffle/shuffle-shared v0.4.96 h1:iaIB/HP9eKpw9DMMJZhSLDbKdHJt075kFYLHg9AaiiM=
+github.com/shuffle/shuffle-shared v0.4.96/go.mod h1:X613gbo0dT3fnYvXDRwjQZyLC+T49T2nSQOrCV5QMlI=
+github.com/shuffle/shuffle-shared v0.4.97 h1:1c8LdNteMykKNEV97vwP63oSP2tV/Uso3O4TC+oxdFQ=
+github.com/shuffle/shuffle-shared v0.4.97/go.mod h1:X613gbo0dT3fnYvXDRwjQZyLC+T49T2nSQOrCV5QMlI=
+github.com/shuffle/shuffle-shared v0.4.98 h1:pgsLdWUpxZ/q+eHpAjOCH9icOsmuO5u2olmirOldy5A=
+github.com/shuffle/shuffle-shared v0.4.98/go.mod h1:X613gbo0dT3fnYvXDRwjQZyLC+T49T2nSQOrCV5QMlI=
+github.com/shuffle/shuffle-shared v0.5.11 h1:Eqbs9o8E49QAL5/6aV6BfFtWSjLIvgET7AL3fa4OQTg=
+github.com/shuffle/shuffle-shared v0.5.11/go.mod h1:X613gbo0dT3fnYvXDRwjQZyLC+T49T2nSQOrCV5QMlI=
+github.com/shuffle/shuffle-shared v0.5.14 h1:d14u1e4k+qKgnf4Insq4x2S+0MMKlDqdyTTyVP3puRA=
+github.com/shuffle/shuffle-shared v0.5.14/go.mod h1:X613gbo0dT3fnYvXDRwjQZyLC+T49T2nSQOrCV5QMlI=
+github.com/shuffle/shuffle-shared v0.5.29 h1:n4vThl7v3mFVXbrIW71XREFdmZZo7mOBAWxnsdiNjDk=
+github.com/shuffle/shuffle-shared v0.5.29/go.mod h1:X613gbo0dT3fnYvXDRwjQZyLC+T49T2nSQOrCV5QMlI=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
diff --git a/backend/go-app/main.go b/backend/go-app/main.go
index 3fbd4f70..571e8422 100755
--- a/backend/go-app/main.go
+++ b/backend/go-app/main.go
@@ -1965,6 +1965,7 @@ func executeCloudAction(action shuffle.CloudSyncJob, apikey string) error {
return err
}
+ defer newresp.Body.Close()
respBody, err := ioutil.ReadAll(newresp.Body)
if err != nil {
return err
@@ -3513,15 +3514,13 @@ func remoteOrgJobHandler(org shuffle.Org, interval int) error {
)
req.Header.Add("Authorization", fmt.Sprintf(`Bearer %s`, org.SyncConfig.Apikey))
-
- //log.Printf("[INFO] Sending org sync with autho %s", org.SyncConfig.Apikey)
-
newresp, err := client.Do(req)
if err != nil {
//log.Printf("Failed request in org sync: %s", err)
return err
}
+ defer newresp.Body.Close()
respBody, err := ioutil.ReadAll(newresp.Body)
if err != nil {
log.Printf("[ERROR] Failed body read in job sync: %s", err)
@@ -3574,6 +3573,12 @@ func runInitEs(ctx context.Context) {
log.Printf("[DEBUG] Getting organizations for Elasticsearch/Opensearch")
activeOrgs, err := shuffle.GetAllOrgs(ctx)
+ log.Printf("[DEBUG] Got %d organizations to look into. If this is 0, we wait 10 more seconds until DB is ready and try again.", len(activeOrgs))
+ if len(activeOrgs) == 0 {
+ time.Sleep(10 * time.Second)
+ activeOrgs, err = shuffle.GetAllOrgs(ctx)
+ }
+
setUsers := false
_ = setUsers
if err != nil {
@@ -3655,7 +3660,6 @@ func runInitEs(ctx context.Context) {
log.Printf("Successfully updated org to have users!")
}
}
-
}
}
}
@@ -3688,6 +3692,10 @@ func runInitEs(ctx context.Context) {
orgId = activeOrgs[0].Id
}
+ if len(schedule.Org) == 36 {
+ orgId = schedule.Org
+ }
+
_, _, err := handleExecution(schedule.WorkflowId, shuffle.Workflow{}, request, orgId)
if err != nil {
log.Printf("[WARNING] Failed to execute %s: %s", schedule.WorkflowId, err)
@@ -3697,15 +3705,21 @@ func runInitEs(ctx context.Context) {
for _, schedule := range schedules {
if strings.ToLower(schedule.Environment) == "cloud" {
- log.Printf("Skipping cloud schedule")
+ log.Printf("[DEBUG] Skipping cloud schedule")
continue
}
+ // FIXME: Add a randomized timer to avoid all schedules running at the same time
+ // Many are at 5 minutes / 1 hour. The point is to spread these out
+ // a bit instead of all of them starting at the exact same time
+
//log.Printf("Schedule: %#v", schedule)
//log.Printf("Schedule time: every %d seconds", schedule.Seconds)
jobret, err := newscheduler.Every(schedule.Seconds).Seconds().NotImmediately().Run(job(schedule))
if err != nil {
- log.Printf("Failed to schedule workflow: %s", err)
+ log.Printf("[ERROR] Failed to start schedule for workflow %s: %s", schedule.WorkflowId, err)
+ } else {
+ log.Printf("[DEBUG] Successfully started schedule for workflow %s", schedule.WorkflowId)
}
scheduledJobs[schedule.Id] = jobret
@@ -3977,7 +3991,7 @@ func runInitEs(ctx context.Context) {
r, err := git.Clone(storer, fs, cloneOptions)
if err != nil {
- log.Printf("[WARNING] Failed loading repo into memory (init): %s", err)
+ log.Printf("[ERROR] Failed loading repo into memory (init): %s", err)
}
dir, err := fs.ReadDir("")
@@ -4014,7 +4028,7 @@ func runInitEs(ctx context.Context) {
}
_, err = git.Clone(storer, fs, cloneOptions)
if err != nil {
- log.Printf("[WARNING] Failed loading repo %s into memory: %s", apis, err)
+ log.Printf("[ERROR] Failed loading repo %s into memory: %s", apis, err)
} else if err == nil && len(workflowapps) < 10 {
log.Printf("[INFO] Finished git clone. Looking for updates to the repo.")
dir, err := fs.ReadDir("")
@@ -4030,7 +4044,7 @@ func runInitEs(ctx context.Context) {
if os.Getenv("SHUFFLE_HEALTHCHECK_DISABLED") != "true" {
- healthcheckInterval := 15
+ healthcheckInterval := 30
log.Printf("[INFO] Starting healthcheck job every %d minute. Stats available on /api/v1/health/stats. Disable with SHUFFLE_HEALTHCHECK_DISABLED=true", healthcheckInterval)
job := func() {
// Prepare a fake http.responsewriter
@@ -4725,7 +4739,7 @@ func initHandlers() {
log.Printf("[DEBUG] Initialized Shuffle database connection. Setting up environment.")
if elasticConfig == "elasticsearch" {
- time.Sleep(5 * time.Second)
+ time.Sleep(10 * time.Second)
go runInitEs(ctx)
} else {
//go shuffle.runInit(ctx)
@@ -4787,6 +4801,7 @@ func initHandlers() {
// App specific
// From here down isnt checked for org specific
r.HandleFunc("/api/v1/apps/{key}/execute", executeSingleAction).Methods("POST", "OPTIONS")
+ r.HandleFunc("/api/v1/apps/{key}/run", executeSingleAction).Methods("POST", "OPTIONS")
r.HandleFunc("/api/v1/apps/categories", shuffle.GetActiveCategories).Methods("GET", "OPTIONS")
r.HandleFunc("/api/v1/apps/categories/run", shuffle.RunCategoryAction).Methods("POST", "OPTIONS")
r.HandleFunc("/api/v1/apps/upload", handleAppZipUpload).Methods("POST", "OPTIONS")
@@ -4824,11 +4839,14 @@ func initHandlers() {
/* Everything below here increases the counters*/
r.HandleFunc("/api/v1/workflows", shuffle.GetWorkflows).Methods("GET", "OPTIONS")
r.HandleFunc("/api/v1/workflows", shuffle.SetNewWorkflow).Methods("POST", "OPTIONS")
+ r.HandleFunc("/api/v1/workflows/search", shuffle.HandleWorkflowRunSearch).Methods("POST", "OPTIONS")
r.HandleFunc("/api/v1/workflows/schedules", shuffle.HandleGetSchedules).Methods("GET", "OPTIONS")
r.HandleFunc("/api/v1/workflows/{key}/executions", shuffle.GetWorkflowExecutions).Methods("GET", "OPTIONS")
+ r.HandleFunc("/api/v1/workflows/{key}/executions/{key}/rerun", checkUnfinishedExecution).Methods("GET", "POST", "OPTIONS")
r.HandleFunc("/api/v1/workflows/{key}/executions/{key}/abort", shuffle.AbortExecution).Methods("GET", "OPTIONS")
r.HandleFunc("/api/v1/workflows/{key}/schedule", scheduleWorkflow).Methods("POST", "OPTIONS")
r.HandleFunc("/api/v1/workflows/download_remote", loadSpecificWorkflows).Methods("POST", "OPTIONS")
+ r.HandleFunc("/api/v1/workflows/{key}/run", executeWorkflow).Methods("GET", "POST", "OPTIONS")
r.HandleFunc("/api/v1/workflows/{key}/execute", executeWorkflow).Methods("GET", "POST", "OPTIONS")
r.HandleFunc("/api/v1/workflows/{key}/schedule/{schedule}", stopSchedule).Methods("DELETE", "OPTIONS")
r.HandleFunc("/api/v1/workflows/{key}/stream", shuffle.HandleStreamWorkflow).Methods("GET", "OPTIONS")
diff --git a/backend/go-app/walkoff.go b/backend/go-app/walkoff.go
index 32fb67ef..dafbcdc7 100755
--- a/backend/go-app/walkoff.go
+++ b/backend/go-app/walkoff.go
@@ -705,7 +705,8 @@ func handleWorkflowQueue(resp http.ResponseWriter, request *http.Request) {
// Will make sure transactions are always ran for an execution. This is recursive if it fails. Allowed to fail up to 5 times
func runWorkflowExecutionTransaction(ctx context.Context, attempts int64, workflowExecutionId string, actionResult shuffle.ActionResult, resp http.ResponseWriter) {
- log.Printf("[DEBUG] Running workflow execution transaction for %s", workflowExecutionId)
+ log.Printf("[DEBUG][%s] Running workflow execution update", workflowExecutionId)
+
// Should start a tx for the execution here
workflowExecution, err := shuffle.GetWorkflowExecution(ctx, workflowExecutionId)
@@ -1063,10 +1064,6 @@ func handleExecution(id string, workflow shuffle.Workflow, request *http.Request
}
}
- err = shuffle.SetWorkflowExecution(ctx, workflowExecution, true)
- if err != nil {
- log.Printf("[ERROR] Failed setting workflow execution during init (2): %s", err)
- }
err = imageCheckBuilder(execInfo.ImageNames)
if err != nil {
@@ -1573,6 +1570,11 @@ func handleExecution(id string, workflow shuffle.Workflow, request *http.Request
workflowExecution.ExecutionOrg = workflow.ExecutingOrg.Id
}
+ err = shuffle.SetWorkflowExecution(ctx, workflowExecution, true)
+ if err != nil {
+ log.Printf("[ERROR] Failed setting workflow execution during init (2): %s", err)
+ }
+
var allEnvs []shuffle.Environment
if len(workflowExecution.ExecutionOrg) > 0 {
//log.Printf("[INFO] Executing ORG: %s", workflowExecution.ExecutionOrg)
@@ -1665,7 +1667,7 @@ func handleExecution(id string, workflow shuffle.Workflow, request *http.Request
// FIXME - tmp name based on future companyname-companyId
// This leads to issues with overlaps. Should set limits and such instead
for _, environment := range execInfo.Environments {
- log.Printf("[INFO] Execution: %s should execute onprem with execution environment \"%s\". Workflow: %s", workflowExecution.ExecutionId, environment, workflowExecution.Workflow.ID)
+ log.Printf("[INFO][%s] Execution: should execute onprem with execution environment \"%s\". Workflow: %s", workflowExecution.ExecutionId, environment, workflowExecution.Workflow.ID)
executionRequest := shuffle.ExecutionRequest{
ExecutionId: workflowExecution.ExecutionId,
@@ -3358,11 +3360,21 @@ func executeSingleAction(resp http.ResponseWriter, request *http.Request) {
return
}
- workflowExecution.Priority = 10
+
+ workflowExecution.Priority = 11
environments, err := shuffle.GetEnvironments(ctx, user.ActiveOrg.Id)
environment := "Shuffle"
if len(environments) >= 1 {
+ // Find default one
environment = environments[0].Name
+
+ for _, env := range environments {
+ if env.Default {
+ environment = env.Name
+ break
+ }
+ }
+
} else {
log.Printf("[ERROR] No environments found for org %s. Exiting", user.ActiveOrg.Id)
resp.WriteHeader(401)
@@ -3370,6 +3382,14 @@ func executeSingleAction(resp http.ResponseWriter, request *http.Request) {
return
}
+ // Enforcing same env for job + run to be default
+ // FIXME: Should use environment that is in the source workflow if it exists
+ for i, _ := range workflowExecution.Workflow.Actions {
+ workflowExecution.Workflow.Actions[i].Environment = environment
+ workflowExecution.Workflow.Actions[i].Label = "TMP"
+ }
+ shuffle.SetWorkflowExecution(ctx, workflowExecution, false)
+
log.Printf("[INFO] Execution (single action): %s should execute onprem with execution environment \"%s\". Workflow: %s", workflowExecution.ExecutionId, environment, workflowExecution.Workflow.ID)
executionRequest := shuffle.ExecutionRequest{
@@ -3377,6 +3397,7 @@ func executeSingleAction(resp http.ResponseWriter, request *http.Request) {
WorkflowId: workflowExecution.Workflow.ID,
Authorization: workflowExecution.Authorization,
Environments: []string{environment},
+ Priority: 11,
}
executionRequest.Priority = workflowExecution.Priority
@@ -3397,6 +3418,9 @@ func executeSingleAction(resp http.ResponseWriter, request *http.Request) {
log.Printf("[ERROR] Failed to marshal retStruct in single execution: %s", err)
}
+ // Deleting as this is a single action and doesn't need to be stored
+ shuffle.DeleteKey(ctx, "workflowexecution", executionRequest.ExecutionId)
+
resp.WriteHeader(200)
resp.Write([]byte(returnBytes))
}
@@ -3993,3 +4017,160 @@ func checkWorkflowApp(workflowApp shuffle.WorkflowApp) error {
return nil
}
+
+func checkUnfinishedExecution(resp http.ResponseWriter, request *http.Request) {
+ cors := shuffle.HandleCors(resp, request)
+ if cors {
+ return
+ }
+
+ location := strings.Split(request.URL.String(), "/")
+ var fileId string
+ if location[1] == "api" {
+ if len(location) <= 4 {
+ resp.WriteHeader(401)
+ resp.Write([]byte(`{"success": false}`))
+ return
+ }
+
+ fileId = location[4]
+ }
+
+ if len(fileId) != 36 {
+ resp.WriteHeader(401)
+ resp.Write([]byte(`{"success": false, "reason": "Workflow ID to abort is not valid"}`))
+ return
+ }
+
+ executionId := location[6]
+ if len(executionId) != 36 {
+ resp.WriteHeader(401)
+ resp.Write([]byte(`{"success": false, "reason": "ExecutionID not valid"}`))
+ return
+ }
+
+ ctx := shuffle.GetContext(request)
+ exec, err := shuffle.GetWorkflowExecution(ctx, executionId)
+ if err != nil {
+ log.Printf("[ERROR] Failed getting execution (rerun workflow - 1) %s: %s", executionId, err)
+ resp.WriteHeader(401)
+ resp.Write([]byte(fmt.Sprintf(`{"success": false, "reason": "Failed getting execution ID %s because it doesn't exist (abort)."}`, executionId)))
+ return
+ }
+
+ apikey := request.Header.Get("Authorization")
+ parsedKey := ""
+ if strings.HasPrefix(apikey, "Bearer ") {
+ apikeyCheck := strings.Split(apikey, " ")
+ if len(apikeyCheck) == 2 {
+ parsedKey = apikeyCheck[1]
+ }
+ }
+
+ // ONLY allowed to run automatically with the same auth (july 2022)
+ if exec.Authorization != parsedKey {
+ user, err := shuffle.HandleApiAuthentication(resp, request)
+ if err != nil {
+ log.Printf("[ERROR][%s] Bad authorization key for execution (rerun workflow - 3): %s", executionId, err)
+ resp.WriteHeader(403)
+ resp.Write([]byte(fmt.Sprintf(`{"success": false, "reason": "Failed because you're not authorized to see this workflow (3)."}`)))
+ return
+ }
+
+ // Check if user is in the correct org
+ if user.ActiveOrg.Id == exec.ExecutionOrg && user.Role != "org-reader" {
+ log.Printf("[AUDIT][%s] User %s (%s) is force continuing execution from org access", executionId, user.Username, user.Id)
+ } else if user.SupportAccess {
+ log.Printf("[AUDIT][%s] User %s (%s) is force continuing execution with support access", executionId, user.Username, user.Id)
+ } else {
+ log.Printf("[ERROR][%s] Bad authorization key for continue execution (rerun workflow - 2): %s", executionId, err)
+ resp.WriteHeader(403)
+ resp.Write([]byte(fmt.Sprintf(`{"success": false, "reason": "Failed because you're not authorized to see this workflow (2)."}`)))
+ return
+ }
+ }
+
+ // Meant as a function that periodically checks whether previous executions have finished or not.
+ // Should probably be based on executedIds and finishedIds
+ // Schedule a check in the future instead?
+
+ // Auth vs execution check!
+ extraInputs := 0
+ for _, trigger := range exec.Workflow.Triggers {
+ if trigger.Name == "User Input" && trigger.AppName == "User Input" {
+ extraInputs += 1
+
+ //exec.Workflow.Actions = append(exec.Workflow.Actions, shuffle.Action{
+ // ID: trigger.ID,
+ // Label: trigger.Label,
+ // Name: trigger.Name,
+ //})
+ } else if trigger.Name == "Shuffle Workflow" && trigger.AppName == "Shuffle Workflow" {
+ extraInputs += 1
+
+ //exec.Workflow.Actions = append(exec.Workflow.Actions, shuffle.Action{
+ // ID: trigger.ID,
+ // Label: trigger.Label,
+ // Name: trigger.Name,
+ //})
+ }
+ }
+
+ if exec.Status != "ABORTED" && exec.Status != "FINISHED" && exec.Status != "FAILURE" {
+ log.Printf("[DEBUG][%s] Rechecking execution and its status to send to backend IF the status is EXECUTING (%s - %d/%d finished)", exec.ExecutionId, exec.Status, len(exec.Results), len(exec.Workflow.Actions)+extraInputs)
+ }
+
+ // Usually caused by issue during startup
+ if exec.Status == "" {
+ resp.WriteHeader(401)
+ resp.Write([]byte(fmt.Sprintf(`{"success": false, "reason": "No status for the execution"}`)))
+ return
+ }
+
+ if exec.Status != "EXECUTING" {
+ resp.WriteHeader(200)
+ resp.Write([]byte(fmt.Sprintf(`{"success": true, "reason": "Already finished"}`)))
+ return
+ }
+
+ // Force it back in the queue to be executed
+ if len(exec.Workflow.Actions) == 0 {
+ resp.WriteHeader(200)
+ resp.Write([]byte(fmt.Sprintf(`{"success": true, "reason": "Not a cloud env workflow. Only rerunning cloud env."}`)))
+ return
+ }
+
+ log.Printf("[DEBUG][%s] Workflow: %s (%s)", exec.ExecutionId, exec.Workflow.Name, exec.Workflow.ID)
+ if exec.Workflow.ID == "" || exec.Workflow.Name == "" {
+ log.Printf("[ERROR][%s] No workflow ID found for execution", exec.ExecutionId)
+ shuffle.DeleteKey(ctx, "workflowexecution", exec.ExecutionId)
+ resp.WriteHeader(200)
+ resp.Write([]byte(fmt.Sprintf(`{"success": true, "reason": "No workflow name / ID found. Can't run. Contact support@shuffler.io if this persists."}`)))
+ return
+ }
+
+ environment := exec.Workflow.Actions[0].Environment
+ log.Printf("[DEBUG][%s] Not a cloud env workflow. Re-adding job in queue for env %s.", exec.ExecutionId, environment)
+
+ parsedEnv := fmt.Sprintf("%s_%s", strings.ToLower(strings.ReplaceAll(strings.ReplaceAll(environment, " ", "-"), "_", "-")), exec.ExecutionOrg)
+ log.Printf("[DEBUG][%s] Adding new run job to env (2): %s", exec.ExecutionId, parsedEnv)
+
+ executionRequest := shuffle.ExecutionRequest{
+ ExecutionId: exec.ExecutionId,
+ WorkflowId: exec.Workflow.ID,
+ Authorization: exec.Authorization,
+ Environments: []string{environment},
+ }
+
+ // Increase priority on reruns to catch up
+ executionRequest.Priority = 11
+ err = shuffle.SetWorkflowQueue(ctx, executionRequest, parsedEnv)
+ if err != nil {
+ log.Printf("[ERROR] Failed adding execution to db: %s", err)
+ }
+
+
+ resp.WriteHeader(200)
+ resp.Write([]byte(fmt.Sprintf(`{"success": true, "reason": "Reran workflow in %s"}`, parsedEnv)))
+
+}
diff --git a/docker-compose.yml b/docker-compose.yml
index 80b2b60f..727f3e65 100755
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -1,7 +1,7 @@
version: '3'
services:
frontend:
- image: ghcr.io/shuffle/shuffle-frontend:latest
+ image: ghcr.io/shuffle/shuffle-frontend:nightly
container_name: shuffle-frontend
hostname: shuffle-frontend
ports:
@@ -15,7 +15,7 @@ services:
depends_on:
- backend
backend:
- image: ghcr.io/shuffle/shuffle-backend:latest
+ image: ghcr.io/shuffle/shuffle-backend:nightly
container_name: shuffle-backend
hostname: ${BACKEND_HOSTNAME}
# Here for debugging:
@@ -34,7 +34,7 @@ services:
- SHUFFLE_FILE_LOCATION=/shuffle-files
restart: unless-stopped
orborus:
- image: ghcr.io/shuffle/shuffle-orborus:latest
+ image: ghcr.io/shuffle/shuffle-orborus:nightly
container_name: shuffle-orborus
hostname: shuffle-orborus
networks:
@@ -42,7 +42,8 @@ services:
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
- - SHUFFLE_APP_SDK_TIMEOUT=300 # New SDK default timeout
+ - SHUFFLE_APP_SDK_TIMEOUT=300
+ - SHUFFLE_ORBORUS_EXECUTION_CONCURRENCY=5 # The amount of concurrent executions Orborus can handle.
#- DOCKER_HOST=tcp://docker-socket-proxy:2375
- ENVIRONMENT_NAME=${ENVIRONMENT_NAME}
- BASE_URL=http://${OUTER_HOSTNAME}:5001
diff --git a/frontend/package.json b/frontend/package.json
index 0cb2e764..fbababec 100755
--- a/frontend/package.json
+++ b/frontend/package.json
@@ -14,6 +14,7 @@
"@mui/styles": "^5.14.0",
"@mui/x-data-grid": "^5.17.11",
"@mui/x-date-pickers": "^6.11.1",
+ "@uiw/codemirror-theme-vscode": "^4.21.20",
"@uiw/codemirror-themes": "^4.21.9",
"@uiw/react-codemirror": "^4.21.9",
"@use-it/interval": "^1.0.0",
diff --git a/frontend/src/components/AppSearchButtons.jsx b/frontend/src/components/AppSearchButtons.jsx
index dbd2c055..553b7e41 100644
--- a/frontend/src/components/AppSearchButtons.jsx
+++ b/frontend/src/components/AppSearchButtons.jsx
@@ -47,8 +47,6 @@ const AppSearchButtons = (props) => {
const [newSelectedApp, setNewSelectedApp] = useState(undefined)
useEffect(() => {
- console.log("AppSearchButtons: newSelectedApp: " + JSON.stringify(newSelectedApp))
-
if (newSelectedApp !== undefined && setMissing != undefined) {
console.log("AppSearchButtons: setMissing is defined!")
@@ -131,9 +129,12 @@ const AppSearchButtons = (props) => {
//setFrameworkLoaded(true)
})
}
+
const icon = foundApp.large_image
- console.log("index:", moreButton)
- console.log("totalApps:", totalApps)
+ var foundAppImage = AppImage
+ if (foundApp.name !== undefined && foundApp.name !== null && !foundApp.name.includes(":default")) {
+ foundAppImage = foundApp.large_image
+ }
let xsValue = 12;
if (index === totalApps - 1 || index === totalApps - 2 || index === totalApps - 3 || index === totalApps - 4) {
@@ -142,6 +143,8 @@ const AppSearchButtons = (props) => {
if (index === totalApps - 5) {
xsValue = 12;
}
+
+ // This is silly huh
if (moreButton) {
switch (index) {
case totalApps - 1:
@@ -159,7 +162,6 @@ const AppSearchButtons = (props) => {
xsValue = 12;
break;
default:
- // Handle other cases if needed
}
}
@@ -221,10 +223,11 @@ const AppSearchButtons = (props) => {
>
{
e.preventDefault();
setLocalSearchOpen(false)
- setDefaultSearch("")
+
const submitDeletedApp = {
"description": "",
"id": "remove",
@@ -233,15 +236,23 @@ const AppSearchButtons = (props) => {
}
setFrameworkItem(submitDeletedApp)
setNewSelectedApp({})
+
+ if (setDefaultSearch !== undefined) {
+ setDefaultSearch("")
+ }
+
setTimeout(() => {
- setDiscoveryData({})
+ if (setDiscoveryData !== undefined) {
+ setDiscoveryData({})
+ }
+
setFrameworkItem(submitDeletedApp)
//setNewSelectedApp({})
}, 1000)
//setAppName(discoveryData.cases.name)
}}
>
-
+
@@ -283,12 +294,12 @@ const AppSearchButtons = (props) => {
}}
>
- {AppImage === undefined || AppImage === null || AppImage.length === 0 ?
-
+ {foundAppImage === undefined || foundAppImage === null || foundAppImage.length === 0 ?
+
:
-
+
}
{
/>
- {
- console.log("Chip: ", chip)
- //newWorkflowTags.push(chip);
- setNewWorkflowTags(chip);
- }}
- onAdd={(chip) => {
- newWorkflowTags.push(chip);
- setNewWorkflowTags(newWorkflowTags);
- }}
- onDelete={(chip, index) => {
- console.log("Deleting: ", chip, index)
- newWorkflowTags.splice(index, 1);
- setNewWorkflowTags(newWorkflowTags);
- setUpdate(Math.random());
- }}
- />
{usecases !== null && usecases !== undefined && usecases.length > 0 ?
-
+
Usecases
{
: null}
+ {
+ console.log("Chip: ", chip)
+ //newWorkflowTags.push(chip);
+ setNewWorkflowTags(chip);
+ }}
+ onAdd={(chip) => {
+ newWorkflowTags.push(chip);
+ setNewWorkflowTags(newWorkflowTags);
+ }}
+ onDelete={(chip, index) => {
+ console.log("Deleting: ", chip, index)
+ newWorkflowTags.splice(index, 1);
+ setNewWorkflowTags(newWorkflowTags);
+ setUpdate(Math.random());
+ }}
+ />
{showMoreClicked === true ?
@@ -365,7 +365,8 @@ const EditWorkflow = (props) => {
onChange={(e) => {
console.log("Data: ", e.target.value)
- innerWorkflow.workflow_type = e.target.value
+ //innerWorkflow.workflow_type = e.target.value
+ innerWorkflow.status = e.target.value
setInnerWorkflow(innerWorkflow)
}}
>
diff --git a/frontend/src/components/Header.jsx b/frontend/src/components/Header.jsx
index 2acc229f..dbf68fe2 100644
--- a/frontend/src/components/Header.jsx
+++ b/frontend/src/components/Header.jsx
@@ -216,44 +216,44 @@ const { globalUrl, setNotifications, notifications, isLoggedIn, removeCookie, ho
const NotificationItem = (props) => {
const {data} = props
- var image = "";
- var orgName = "";
- var orgId = "";
- if (userdata.orgs !== undefined) {
- const foundOrg = userdata.orgs.find((org) => org.id === data["org_id"]);
- if (foundOrg !== undefined && foundOrg !== null) {
- //position: "absolute", bottom: 5, right: -5,
- const imageStyle = {
- width: imagesize,
- height: imagesize,
- pointerEvents: "none",
- marginLeft: data.creator_org !== undefined && data.creator_org.length > 0 ? 20 : 0,
- borderRadius: 10,
- border: foundOrg.id === userdata.active_org.id ? `3px solid ${boxColor}` : null,
- cursor: "pointer",
- marginRight: 10,
- };
-
- image =
- foundOrg.image === "" ? (
-
- ) : (
-
{}}
- />
- );
-
- orgName = foundOrg.name;
- orgId = foundOrg.id;
- }
- }
+ var image = "";
+ var orgName = "";
+ var orgId = "";
+ if (userdata.orgs !== undefined) {
+ const foundOrg = userdata.orgs.find((org) => org.id === data["org_id"]);
+ if (foundOrg !== undefined && foundOrg !== null) {
+ //position: "absolute", bottom: 5, right: -5,
+ const imageStyle = {
+ width: imagesize,
+ height: imagesize,
+ pointerEvents: "none",
+ marginLeft: data.creator_org !== undefined && data.creator_org.length > 0 ? 20 : 0,
+ borderRadius: 10,
+ border: foundOrg.id === userdata.active_org.id ? `3px solid ${boxColor}` : null,
+ cursor: "pointer",
+ marginRight: 10,
+ };
+
+ image =
+ foundOrg.image === "" ? (
+
+ ) : (
+
{}}
+ />
+ );
+
+ orgName = foundOrg.name;
+ orgId = foundOrg.id;
+ }
+ }
return (
diff --git a/frontend/src/components/NewHeader.jsx b/frontend/src/components/NewHeader.jsx
index 7b72f0bd..77cabb46 100644
--- a/frontend/src/components/NewHeader.jsx
+++ b/frontend/src/components/NewHeader.jsx
@@ -106,6 +106,8 @@ const Header = (props) => {
const clearNotifications = () => {
// Don't really care about the logout
+
+ toast("Clearing notifications")
fetch(`${globalUrl}/api/v1/notifications/clear`, {
credentials: "include",
method: "GET",
@@ -294,49 +296,26 @@ const Header = (props) => {
borderBottom: "1px solid rgba(255,255,255,0.4)",
}}
>
- {/*
- {new Date(data.updated_at).toISOString()}
- */}
- {data.reference_url !== undefined &&
- data.reference_url !== null &&
- data.reference_url.length > 0 ? (
-
- {data.title}
-
- ) : (
-
- {data.title}
-
- )}
+ {data.reference_url !== undefined && data.reference_url !== null && data.reference_url.length > 0 ?
+
+
+ {data.title} ({data.amount})
+
+
+ :
+
+ {data.title}
+
+ }
- {data.image !== undefined &&
- data.image !== null &&
- data.image.length > 0 ? (
-
- ) : null}
- {data.description}
- {/*data.tags !== undefined && data.tags !== null && data.tags.length > 0 ?
- data.tags.map((tag, index) => {
- return (
- {
- }}
- variant="outlined"
- color="primary"
- />
- )
- })
- : null */}
+ {data.image !== undefined && data.image !== null && data.image.length > 0 ?
+
+ :
+ null
+ }
+
+ {data.description}
+
{data.read === false ? (
{
setAnchorEl(event.currentTarget);
}}
>
-
+ n.read === false).length} color="primary">
{
>
- Your Notifications ({notifications.length})
+ Your Notifications ({notifications.filter((data) => !data.read).length})
{notifications.length > 1 ? (
{
) : null}
- Notifications are made by Shuffle to help you discover issues or
- improvements.
+ Notifications generated made by Shuffle to help you discover issues or
+ improvements.
+ Learn more
{notifications.map((data, index) => {
- return ;
+ if (data.read) {
+ return null
+ }
+
+ return ;
})}
@@ -626,6 +610,11 @@ const Header = (props) => {
>
Logout
+
+
+
+ Version: 1.3.1
+
);
@@ -1352,14 +1341,17 @@ const Header = (props) => {
);
//
- return !isMobile ?
- isLoggedIn ?
+ //
+ /*
+ !isLoggedIn ?
{loginTextBrowser}
:
+ */
+ return !isMobile ?
{
authenticationType.client_secret.length > 0
);
- const [clientId, setClientId] = React.useState(
- defaultConfigSet ? authenticationType.client_id : ""
- );
- const [clientSecret, setClientSecret] = React.useState(
- defaultConfigSet ? authenticationType.client_secret : ""
- );
+ const [clientId, setClientId] = React.useState(defaultConfigSet ? authenticationType.client_id : "");
+ const [clientSecret, setClientSecret] = React.useState(defaultConfigSet ? authenticationType.client_secret : "");
+
+ const [username, setUsername] = React.useState("");
+ const [password, setPassword] = React.useState("");
+
const [oauthUrl, setOauthUrl] = React.useState("");
const [buttonClicked, setButtonClicked] = React.useState(false);
-
const [offlineAccess, setOfflineAccess] = React.useState(true);
- const allscopes = authenticationType.scope !== undefined ? authenticationType.scope : [];
+ const allscopes = authenticationType.scope !== undefined && authenticationType.scope !== null ? authenticationType.scope : [];
+ const [selectedScopes, setSelectedScopes] = React.useState(allscopes !== null && allscopes !== undefined ? allscopes.length > 0 && allscopes.length <= 3 ? allscopes : [] : [])
- const [selectedScopes, setSelectedScopes] = React.useState(allscopes.length > 0 && allscopes.length <= 3 ? [allscopes[0]] : [])
const [manuallyConfigure, setManuallyConfigure] = React.useState(
defaultConfigSet ? false : true
);
@@ -158,6 +157,7 @@ const AuthenticationOauth2 = (props) => {
return null;
}
+
const startOauth2Request = (admin_consent) => {
// Admin consent also means to add refresh tokens
console.log("Inside oauth2 request for app: ", selectedApp.name)
@@ -301,6 +301,23 @@ const AuthenticationOauth2 = (props) => {
if ((authenticationType.redirect_uri === undefined || authenticationType.redirect_uri === null || authenticationType.redirect_uri.length === 0) && (authenticationType.token_uri !== undefined && authenticationType.token_uri !== null && authenticationType.token_uri.length > 0)) {
console.log("No redirect URI found, and token URI found. Assuming client credentials flow and saving directly in the database")
+
+ var tokenUri = authenticationType.token_uri;
+ if (oauthUrl !== undefined && oauthUrl !== null && oauthUrl.length > 0 && selectedApp !== undefined && selectedApp !== null) {
+ var same = false
+ for (var i = 0; i < selectedApp.authentication.parameters.length; i++) {
+ const param = selectedApp.authentication.parameters[i];
+ if (param.name === "url" && (param.value === oauthUrl || param.example === oauthUrl)) {
+ same = true
+ break
+ }
+ }
+
+ if (!same) {
+ tokenUri = oauthUrl
+ }
+ }
+
// Find app.configuration=true fields in the app.paramters
var parsedFields = [{
"key": "client_id",
@@ -316,9 +333,35 @@ const AuthenticationOauth2 = (props) => {
},
{
"key": "token_uri",
- "value": authenticationType.token_uri,
+ "value": tokenUri,
}]
+ if (authenticationType.grant_type !== undefined && authenticationType.grant_type !== null && authenticationType.grant_type.length > 0) {
+ if (authenticationType.grant_type === "client_credentials") {
+ parsedFields.push({
+ "key": "grant_type",
+ "value": authenticationType.grant_type,
+ })
+ } else if (authenticationType.grant_type === "password") {
+ parsedFields.push({
+ "key": "grant_type",
+ "value": authenticationType.grant_type,
+ })
+
+ parsedFields.push({
+ "key": "username",
+ "value": username,
+ })
+
+ parsedFields.push({
+ "key": "password",
+ "value": password,
+ })
+ } else {
+ toast("Unknown grant type: " + authenticationType.grant_type)
+ }
+ }
+
const appAuthData = {
"label": "OAuth2 for " + selectedApp.name,
"app": {
@@ -332,14 +375,18 @@ const AuthenticationOauth2 = (props) => {
"reference_workflow": workflowId,
}
- setNewAppAuth(appAuthData)
+ if (setNewAppAuth !== undefined) {
+ setNewAppAuth(appAuthData, true)
+ } else {
+ console.log("setNewAppAuth is undefined")
+ }
+
// Wait 1 second, then get app auth with update
- //
- if (getAppAuthentication !== undefined) {
- setTimeout(() => {
- getAppAuthentication(true, true, true);
- }, 1000)
- }
+ //if (getAppAuthentication !== undefined) {
+ // setTimeout(() => {
+ // getAppAuthentication(true, true, true);
+ // }, 1000)
+ //}
return
}
@@ -369,8 +416,6 @@ const AuthenticationOauth2 = (props) => {
}
const authentication_url = authenticationType.token_uri;
- //console.log("AUTH: ", authenticationType)
- //console.log("SCOPES2: ", resources)
const redirectUri = `${window.location.protocol}//${window.location.host}/set_authentication`;
const workflowId = workflow !== undefined ? workflow.id : "";
var state = `workflow_id%3D${workflowId}%26reference_action_id%3d${selectedAction.app_id}%26app_name%3d${selectedAction.app_name}%26app_id%3d${selectedAction.app_id}%26app_version%3d${selectedAction.app_version}%26authentication_url%3d${authentication_url}%26scope%3d${resources}%26client_id%3d${client_id}%26client_secret%3d${client_secret}`;
@@ -478,8 +523,6 @@ const AuthenticationOauth2 = (props) => {
}
return;
- //do {
- //} while (
};
authenticationOption.app.actions = [];
@@ -497,7 +540,6 @@ const AuthenticationOauth2 = (props) => {
}
const handleSubmitCheck = () => {
- console.log("NEW AUTH: ", authenticationOption);
if (authenticationOption.label.length === 0) {
authenticationOption.label = `Auth for ${selectedApp.name}`;
//toast("Label can't be empty")
@@ -564,7 +606,10 @@ const AuthenticationOauth2 = (props) => {
console.log("FIELDS: ", newFields);
newAuthOption.fields = newFields;
- setNewAppAuth(newAuthOption);
+
+ if (setNewAppAuth !== undefined) {
+ setNewAppAuth(newAuthOption);
+ }
//appAuthentication.push(newAuthOption)
//setAppAuthentication(appAuthentication)
//
@@ -647,7 +692,7 @@ const AuthenticationOauth2 = (props) => {
)}
- if (authButtonOnly === true) {
+ if (authButtonOnly === true && (authenticationType.redirect_uri !== undefined && authenticationType.redirect_uri !== null && authenticationType.redirect_uri.length > 0) && (authenticationType.token_uri !== undefined && authenticationType.token_uri !== null && authenticationType.token_uri.length > 0)) {
return autoAuthButton
}
@@ -751,10 +796,14 @@ const AuthenticationOauth2 = (props) => {
setOauthUrl(data.value);
}
+ const defaultValue = data.name === "url" && authenticationType.token_uri !== undefined && authenticationType.token_uri !== null && authenticationType.token_uri.length > 0 && (authenticationType.authorizationUrl === undefined || authenticationType.authorizationUrl === null || authenticationType.authorizationUrl.length === 0) ? authenticationType.token_uri : data.value === undefined || data.value === null ? "" : data.value
+ const fieldname = data.name === "url" && authenticationType.grant_type !== undefined && authenticationType.grant_type !== null && authenticationType.grant_type.length > 0 ? "Token URL" : data.name
+
return (
-
{data.name}
+
+
{fieldname}
{data.schema !== undefined &&
data.schema !== null &&
@@ -767,6 +816,7 @@ const AuthenticationOauth2 = (props) => {
}}
defaultValue={"false"}
fullWidth
+ label={fieldname}
onChange={(e) => {
console.log("Value: ", e.target.value);
authenticationOption.fields[data.name] = e.target.value;
@@ -816,16 +866,11 @@ const AuthenticationOauth2 = (props) => {
: "text"
}
color="primary"
- defaultValue={
- data.value !== undefined && data.value !== null
- ? data.value
- : ""
- }
+ defaultValue={defaultValue}
placeholder={data.example}
onChange={(event) => {
- authenticationOption.fields[data.name] =
- event.target.value;
- console.log("Setting oauth url");
+ authenticationOption.fields[data.name] = event.target.value;
+ console.log("Setting oauth url: ", event.target.value);
setOauthUrl(event.target.value);
//const [oauthUrl, setOauthUrl] = React.useState("")
}}
@@ -872,8 +917,51 @@ const AuthenticationOauth2 = (props) => {
//authenticationOption.label = event.target.value
}}
/>
- {allscopes.length === 0 ? null : "Scopes (access rights)"}
- {allscopes.length === 0 ? null : (
+
+ {authenticationType.grant_type !== "password" ? null :
+
+ {
+ setUsername(event.target.value);
+ //authenticationOption.label = event.target.value
+ }}
+ />
+ {
+ setPassword(event.target.value);
+ //authenticationOption.label = event.target.value
+ }}
+ />
+
+ }
+
+ {allscopes === undefined || allscopes === null || allscopes.length === 0 ? null : "Scopes (access rights)"}
+ {allscopes === undefined || allscopes === null || allscopes.length === 0 ? null : (
{
borderRadius: theme.palette.borderRadius,
}}
disabled={
- clientSecret.length === 0 || clientId.length === 0 || buttonClicked || selectedScopes.length === 0
+ clientSecret.length === 0 || clientId.length === 0 || buttonClicked || (allscopes.length !== 0 && selectedScopes.length === 0)
}
variant="contained"
fullWidth
diff --git a/frontend/src/components/ParsedAction.jsx b/frontend/src/components/ParsedAction.jsx
index c069499f..e0abbe42 100755
--- a/frontend/src/components/ParsedAction.jsx
+++ b/frontend/src/components/ParsedAction.jsx
@@ -170,6 +170,7 @@ const ParsedAction = (props) => {
setEditorData,
setcodedata,
+ setAiQueryModalOpen,
} = props;
const classes = useStyles();
@@ -181,6 +182,8 @@ const ParsedAction = (props) => {
const [fieldCount, setFieldCount] = React.useState(0);
const [hiddenDescription, setHiddenDescription] = React.useState(true);
+ const [autoCompleting, setAutocompleting] = React.useState(false);
+
useEffect(() => {
if (setLastSaved !== undefined) {
@@ -796,7 +799,6 @@ const ParsedAction = (props) => {
selectedActionParameters[count].value = splitparsed[0]
selectedAction.parameters[count].value = splitparsed[0]
- //changeActionParameter({target: {value: splitparsed[1]}},
selectedActionParameters[1].value = splitparsed[1]
selectedAction.parameters[1].value = splitparsed[1]
forceUpdate = true
@@ -2909,17 +2911,26 @@ const ParsedAction = (props) => {
marginLeft: 15,
paddingRight: 0,
}}
+ disabled={autoCompleting}
onClick={() => {
- // aiSubmit(aiMsg, undefined, undefined, newSelectedAction)
- aiSubmit("Fill based on previous values", undefined, undefined, selectedAction)
+ //if (setAiQueryModalOpen !== undefined) {
+ // setAiQueryModalOpen(true)
+ //} else {
+ aiSubmit("Fill based on previous values", undefined, undefined, selectedAction)
+ //}
+ setAutocompleting(true)
}}
>
+ {autoCompleting ?
+
+ :
+ }
@@ -3409,6 +3420,12 @@ const ParsedAction = (props) => {
setSelectedActionEnvironment(env);
selectedAction.environment = env.Name;
setSelectedAction(selectedAction);
+
+ for (let actionkey in workflow.actions) {
+ workflow.actions[actionkey].environment = env.Name
+ }
+ setWorkflow(workflow)
+ toast("Set environment for ALL actions to " + env.Name)
}}
style={{
backgroundColor: theme.palette.inputColor,
diff --git a/frontend/src/components/Priorities.jsx b/frontend/src/components/Priorities.jsx
index b588fdf8..63c06515 100644
--- a/frontend/src/components/Priorities.jsx
+++ b/frontend/src/components/Priorities.jsx
@@ -1,24 +1,30 @@
import React, { useState, useEffect } from "react";
+import { toast } from "react-toastify";
import theme from "../theme.jsx";
import {
Paper,
- Typography,
+ Tooltip,
+ Typography,
Divider,
Button,
+ ButtonGroup,
Grid,
Card,
- Switch,
+ Chip,
+ Switch,
} from "@mui/material";
+import { useNavigate, Link } from "react-router-dom";
import Priority from "../components/Priority.jsx";
//import { useAlert
const Priorities = (props) => {
- const { globalUrl, userdata, serverside, billingInfo, stripeKey, checkLogin, setAdminTab, setCurTab, } = props;
+ const { globalUrl, userdata, serverside, billingInfo, stripeKey, checkLogin, setAdminTab, setCurTab, notifications, setNotifications, } = props;
const [showDismissed, setShowDismissed] = React.useState(false);
const [showRead, setShowRead] = React.useState(false);
const [appFramework, setAppFramework] = React.useState({});
+ let navigate = useNavigate();
useEffect(() => {
getFramework()
@@ -61,6 +67,182 @@ const Priorities = (props) => {
})
}
+ const dismissNotification = (alert_id) => {
+ // Don't really care about the logout
+ fetch(`${globalUrl}/api/v1/notifications/${alert_id}/markasread`, {
+ credentials: "include",
+ method: "GET",
+ headers: {
+ "Content-Type": "application/json",
+ },
+ })
+ .then(function (response) {
+ if (response.status !== 200) {
+ console.log("Error in response");
+ }
+
+ return response.json();
+ })
+ .then(function (responseJson) {
+ if (responseJson.success === true) {
+ const newNotifications = notifications.filter(
+ (data) => data.id !== alert_id
+ );
+ console.log("NEW NOTIFICATIONS: ", newNotifications);
+
+ if (setNotifications !== undefined) {
+ setNotifications(newNotifications)
+ }
+ } else {
+ toast("Failed dismissing notification. Please try again later.");
+ }
+ })
+ .catch((error) => {
+ console.log("error in notification dismissal: ", error);
+ //removeCookie("session_token", {path: "/"})
+ })
+ }
+
+
+ const notificationWidth = "100%"
+ const imagesize = 22
+ const boxColor = "#86c142"
+ const NotificationItem = (props) => {
+ const {data} = props
+
+ var image = "";
+ var orgName = "";
+ var orgId = "";
+ if (userdata.orgs !== undefined) {
+ const foundOrg = userdata.orgs.find((org) => org.id === data["org_id"]);
+ if (foundOrg !== undefined && foundOrg !== null) {
+ //position: "absolute", bottom: 5, right: -5,
+ const imageStyle = {
+ width: imagesize,
+ height: imagesize,
+ pointerEvents: "none",
+ marginLeft:
+ data.creator_org !== undefined && data.creator_org.length > 0
+ ? 20
+ : 0,
+ borderRadius: 10,
+ border:
+ foundOrg.id === userdata.active_org.id
+ ? `3px solid ${boxColor}`
+ : null,
+ cursor: "pointer",
+ marginRight: 10,
+ };
+
+ image =
+ foundOrg.image === "" ? (
+
+ ) : (
+
{}}
+ />
+ );
+
+ orgName = foundOrg.name;
+ orgId = foundOrg.id;
+ }
+ }
+
+ return (
+
+
+ {data.amount === 1 && data.read === false ?
+
+ : null}
+ {data.read === false ?
+
+ :
+
+ }
+
+ {data.title}
+
+
+
+ {data.image !== undefined && data.image !== null && data.image.length > 0 ?
+
+ :
+ null
+ }
+
+ {data.description}
+
+
+
+ {
+ window.open(data.reference_url, "_blank")
+ }}
+ >
+ Explore
+
+ {data.read === false ? (
+ {
+ dismissNotification(data.id);
+ }}
+ >
+ Dismiss
+
+ ) : null}
+
+
+
+ First seen : {new Date(data.created_at * 1000).toISOString().slice(0, 19)}
+
+
+ Last seen : {new Date(data.updated_at * 1000).toISOString().slice(0, 19)}
+
+
+ Times seen : {data.amount}
+
+
+
+ );
+ }
+
return (
Suggestions
@@ -125,6 +307,20 @@ const Priorities = (props) => {
setShowRead(!showRead);
}}
/> Show read
+ {notifications === null || notifications === undefined || notifications.length === 0 ? null :
+
+ {notifications.map((notification, index) => {
+ if (showRead === false && notification.read === true) {
+ return null
+ }
+
+ return (
+
+ )
+ })}
+
+ }
+
)
}
diff --git a/frontend/src/components/RuntimeDebugger.jsx b/frontend/src/components/RuntimeDebugger.jsx
index fa149570..5810cbab 100644
--- a/frontend/src/components/RuntimeDebugger.jsx
+++ b/frontend/src/components/RuntimeDebugger.jsx
@@ -4,6 +4,7 @@ import {
TextField,
Link,
Button,
+ ButtonGroup,
CircularProgress,
Select,
MenuList,
@@ -13,6 +14,7 @@ import {
Autocomplete,
Tooltip,
Typography,
+ IconButton,
} from '@mui/material';
import { toast } from "react-toastify"
@@ -21,6 +23,7 @@ import theme from '../theme.jsx';
import dayjs from 'dayjs';
import { AdapterDayjs } from '@mui/x-date-pickers/AdapterDayjs'
import Pagination from '@mui/material/Pagination';
+import { triggers as alltriggers } from "../views/AngularWorkflow.jsx"
import {
DatePicker,
DateTimePicker,
@@ -29,6 +32,9 @@ import {
import {
OpenInNew as OpenInNewIcon,
+ PlayArrow as PlayArrowIcon,
+ Insights as InsightsIcon,
+ Replay as ReplayIcon,
} from '@mui/icons-material';
import { DataGrid, GridColDef, GridValueGetterParams } from '@mui/x-data-grid'
@@ -44,21 +50,18 @@ const RuntimeDebugger = (props) => {
const classes = useStyles();
- //const [workflowId, setWorkflowId] = useState("");
- //const [status, setStatus] = useState("FINISHED");
- //const [endTime, setEndTime] = useState(dayjs().subtract(0, 'day'))
- //const [startTime, setStartTime] = useState(dayjs().subtract(30, 'day'))
-
const [workflowId, setWorkflowId] = useState("")
const [status, setStatus] = useState("")
const [endTime, setEndTime] = useState("")
const [startTime, setStartTime] = useState("")
const [workflow, setWorkflow] = useState({})
+ const [ignoreOrg, setIgnoreOrg] = useState(false)
const [searchLoading, setSearchLoading] = useState(false)
const [rowCursor, setCursor] = useState("")
const [rowsPerPage, setRowsPerPage] = useState(10)
const [resultRows, setResultRows] = useState([])
+ const [selectedWorkflowExecutions, setSelectedWorkflowExecutions] = useState([])
const [workflows, setWorkflows] = useState([
{"id": "", "name": "All Workflows",}
])
@@ -73,7 +76,7 @@ const RuntimeDebugger = (props) => {
const submitSearch = (workflowId, status, startTime, endTime, cursor, limit) => {
- setResultRows([])
+ //setResultRows([])
setSearchLoading(true)
const fetchData = {
workflow_id: workflowId,
@@ -83,6 +86,7 @@ const RuntimeDebugger = (props) => {
status: status.toUpperCase(),
start_time: startTime,
end_time: endTime,
+ ignore_org: ignoreOrg,
}
fetch(`${globalUrl}/api/v1/workflows/search`, {
@@ -105,22 +109,29 @@ const RuntimeDebugger = (props) => {
//data.runs[key].endTimestamp = data.runs[key].ended_at.toISOString().slice(0, 19).replace('T', ' ')
const startTimestamp = new Date(data.runs[key].started_at*1000)
data.runs[key].startTimestamp = startTimestamp.toISOString().slice(0, 19).replace('T', ' ')
+
const endTimestamp = new Date(data.runs[key].completed_at*1000)
data.runs[key].endTimestamp = endTimestamp.toISOString().slice(0, 19).replace('T', ' ')
+ if (data.runs[key].completed_at === 0 || data.runs[key].completed_at === null) {
+ data.runs[key].endTimestamp = ""
+ }
}
// Add 20 empty rows to the end of the resultRows array
// This is to make sure that the scrollbar is always visible
setResultRows(data.runs)
+ } else {
+ toast("No results found. Keeping old runs")
}
} else {
- console.error("Search error: ", data.reason)
+ toast("Failed to search for runs. Please try again.")
}
})
.catch((error) => {
setSearchLoading(false)
console.error("Error:", error);
+ toast("Failed to search for runs. Please try again (2)")
})
}
@@ -176,11 +187,78 @@ const RuntimeDebugger = (props) => {
}
}, [])
+ const forceContinue = (execution) => {
+ console.log(`FORCE CONTINUE execution ${execution.execution_id} for workflow ${execution.workflow.id}`)
+
+ fetch(`${globalUrl}/api/v1/workflows/${execution.workflow.id}/executions/${execution.execution_id}/rerun`, {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ },
+ credentials: "include",
+ })
+ .then((response) => response.json())
+ .then((data) => {
+ if (data.success) {
+ if (data.reason !== undefined && data.reason !== null && data.reason !== "") {
+ toast("Successful response: " + data.reason)
+ } else {
+ toast("Successfully forced continue")
+ }
+ } else {
+ if (data.reason !== undefined && data.reason !== null && data.reason !== "") {
+ toast(`Failed to force continue: ${data.reason}`)
+ } else {
+ toast("Failed to force continue")
+ }
+ }
+ })
+ .catch((error) => {
+ console.error("Error:", error);
+ toast(`Failed to force continue: ${error}`)
+ })
+ }
+
+ const imageSize = 30
+ const timenowUnix = Math.floor(Date.now() / 1000)
const columns: GridColDef[] = [
+ {
+ field: 'execution_source',
+ headerName: 'Source',
+ width: 75,
+ renderCell: (params) => {
+ var foundSource =
+
+ var source = params.row.execution_source
+ if (source === "schedule") {
+ foundSource =
+ } else if (source === "webhook") {
+ foundSource =
+ } else if (source === "subflow" || source.length === 36) {
+ foundSource =
+ source = "subflow"
+ } else if (source === "rerun" || source.length === 36) {
+ foundSource =
+ source = "rerun of a previous run"
+ } else {
+ source = "manual"
+ }
+
+ return (
+
{
+ //setStatus(params.row.status)
+ }}>
+
+ {foundSource}
+
+
+ )
+ },
+ },
{
field: 'status',
headerName: 'Status',
- width: 150,
+ width: 100,
renderCell: (params) => (
{
setStatus(params.row.status)
@@ -208,6 +286,7 @@ const RuntimeDebugger = (props) => {
),
},
+
{
field: 'workflow results',
headerName: 'Results',
@@ -235,17 +314,212 @@ const RuntimeDebugger = (props) => {
)
},
},
- { field: 'startTimestamp', headerName: 'Start time', width: 160, },
- { field: 'endTimestamp', headerName: 'End time', width: 160, },
+ {
+ field: 'finished',
+ headerName: 'Finished',
+ width: 75,
+ renderCell: (params) => {
+ var foundItems = 0
+ var foundSkipped = 0
+ if (params.row.results !== null && params.row.results !== undefined) {
+ for (let key in params.row.results) {
+ if (params.row.results[key].status === "SUCCESS") {
+ foundItems += 1
+ }
+
+ if (params.row.results[key].status === "SKIPPED") {
+ foundSkipped += 1
+ }
+ }
+ }
+
+ var foundError = ""
+ if (foundItems + foundSkipped < params.row.workflow.actions.length && params.row.status === "FINISHED") {
+ foundError = "Workflow is done, but all nodes are not finished. This most likely indicates a problem with the workflow"
+ }
+
+ return (
+
+ 0 ? "rgba(244,0,0,0.6)" : "inherit"}} onClick={() => {
+ }}>
+ {foundItems}
+
+
+ )
+ },
+ },
+ {
+ field: 'skipped',
+ headerName: 'Skipped',
+ width: 75,
+ renderCell: (params) => {
+ var foundItems = 0
+ if (params.row.results !== null && params.row.results !== undefined) {
+ for (let key in params.row.results) {
+ if (params.row.results[key].status === "SKIPPED") {
+ foundItems += 1
+ }
+ }
+ }
+
+ return (
+
{
+ }}>
+ {foundItems}
+
+ )
+ },
+ },
+ { field: 'startTimestamp', headerName: 'Start time (UTC)', width: 160,
+ renderCell: (params) => {
+ const comparisonTimestamp = params.row.completed_at === 0 ? timenowUnix : params.row.completed_at
+ const hasError = comparisonTimestamp-params.row.started_at > 300
+
+ return (
+
+ {
+ console.log("Zoom in on end timestamp is this one: ", params.row.endTimestamp)
+ //setEndTimestamp(params.row.endTimestamp)
+
+ // Make a new Date() from params.row.startTimestamp and set it in the endTime
+ const newEndTime = new Date(params.row.startTimestamp)
+ if (newEndTime !== null && newEndTime !== undefined && newEndTime !== "" && newEndTime !== "Invalid Date") {
+ // Translate newEndTime to UTC no matter what timezone we are in. Based it on local()
+ // Plus 1 minute to make sure it comes in
+ setEndTime(dayjs(newEndTime.setMinutes(newEndTime.getMinutes()+1)))
+
+ // Use dayjs to translate it into something useful
+
+ // Remove 5 minutes from it and set startTime
+ //newEndTime.setMinutes(newEndTime.getMinutes()-5)
+ //setStartTime(dayjs(newEndTime))
+ }
+ }}>
+ {params.row.startTimestamp}
+
+
+ )
+ }
+ },
+ { field: 'endTimestamp', headerName: 'End time (UTC)', width: 160, },
{
field: 'id',
headerName: 'Explore',
- width: 65,
- renderCell: (params) => (
-
-
-
- ),
+ width: 120,
+ renderCell: (params) => {
+ const parsedResult = params.row.result === null || params.row.result === undefined || params.row.result === "" ? "" : params.row.result
+
+ var errorReason = ""
+ var hasError = parsedResult !== null && parsedResult !== undefined && parsedResult !== "" ? parsedResult.includes("{%") && parsedResult.includes("%}") : false
+
+ if (hasError) {
+ errorReason = "Liquid parsing error"
+ }
+
+ // if success: false
+ // if node == FAILURE or ABORTED
+ if (parsedResult.includes(`\"success\": false`)) {
+ errorReason = "success: false in last result"
+ hasError = true
+ }
+
+ if (!hasError && parsedResult.includes(`\"status\":`)) {
+ // Look for any status that is 300 or higher
+ const statusSplit = parsedResult.split(`\"status\":`)
+ if (statusSplit.length > 1) {
+ var foundStatus = statusSplit[1].trim()
+ // Check if pattern is \d,
+ if (foundStatus.includes(",")) {
+ const foundStatusSplit = foundStatus.split(",")
+
+ if (foundStatusSplit.length > 1) {
+ foundStatus = foundStatusSplit[0].trim()
+ // Check if it's a number
+ }
+ } else {
+ foundStatus = ""
+ }
+
+ if (!isNaN(foundStatus) && foundStatus >= 300) {
+ errorReason = "Status code: "+foundStatus
+ hasError = true
+ }
+ }
+ }
+
+ if (!hasError) {
+ // Find last node that isn't skipped and check status
+ var lastresult = {}
+ for (var key in params.row.results) {
+ const result = params.row.results[key]
+ if (result.status === "SKIPPED") {
+ continue
+ }
+
+ if (result.completed_at === undefined || result.completed_at === null) {
+ continue
+ }
+
+ if (result.completed_at >= lastresult.completed_at) {
+ lastresult = result
+ }
+ }
+
+ if (lastresult.id !== undefined && lastresult.status !== "SUCCESS" && lastresult.status !== "SKIPPED") {
+ errorReason = "Bad status for last node: "+lastresult.status
+ hasError = true
+ }
+ }
+
+ if (!hasError && params.row.notifications_created !== null && params.row.notifications_created !== undefined && params.row.notifications_created !== 0) {
+ hasError = true
+ errorReason = "Generated notifications: "+params.row.notifications_created
+ }
+
+ return (
+
+
+ Workflow result: {errorReason}
+ {params.row.result !== null && params.row.result !== undefined && params.row.result !== "" ?
+ params.row.result
+ :
+ null
+ }
+
+ }>
+
+
+
+
+
+
+
+ {
+ forceContinue(params.row)
+
+ }}
+ >
+
+
+
+
+ {
+ window.open(`${globalUrl}/api/v1/workflows/search/${params.row.id}`, "_blank")
+ }}
+ disabled={!userdata.support}
+ >
+
+
+
+
+ )
+ }
},
]
@@ -253,7 +527,6 @@ const RuntimeDebugger = (props) => {
// Check if the user is currently focusing a texxtfield or not
// If they are, don't submit the search
if (document.activeElement.tagName === "INPUT") {
- console.log("User is focusing a textfield, not submitting search")
return
}
@@ -273,6 +546,29 @@ const RuntimeDebugger = (props) => {
setEndTime(date)
}
+ const abortExecution = (workflowId, executionId) => {
+ fetch(`${globalUrl}/api/v1/workflows/${workflowId}/executions/${executionId}/abort`,
+ {
+ method: "GET",
+ headers: {
+ "Content-Type": "application/json",
+ Accept: "application/json",
+ },
+ credentials: "include",
+ }
+ )
+ .then((response) => {
+ if (response.status !== 200) {
+ console.log("Status not 200 for ABORT EXECUTION :O!");
+ }
+
+ return response.json();
+ })
+ .catch((error) => {
+ toast.error("Error aborting execution: "+error.toString())
+ });
+ };
+
const handleWorkflowSelectionUpdate = (e, isUserinput) => {
if (e.target.value === undefined || e.target.value === null || e.target.value.id === undefined) {
console.log("Returning as there's no id")
@@ -287,9 +583,119 @@ const RuntimeDebugger = (props) => {
submitSearch(e.target.value.id, status, startTime, endTime, rowCursor, rowsPerPage)
}
+ const executeWorkflow = (execution) => {
+ const data = {
+ execution_argument: execution.execution_argument,
+ start: execution.start,
+ execution_source: "rerun",
+ };
+
+ fetch(`${globalUrl}/api/v1/workflows/${execution.workflow.id}/execute?start=${execution.start}`,
+ {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ Accept: "application/json",
+ },
+ credentials: "include",
+ body: JSON.stringify(data),
+ }
+ )
+ .then((response) => {
+ if (response.status !== 200) {
+ console.log("Status not 200 for WORKFLOW EXECUTION :O!");
+ }
+
+ return response.json();
+ })
+ .then((responseJson) => {
+ if (!responseJson.success) {
+ toast("Error executing workflow: "+responseJson.error)
+ } else {
+ console.log("Executed workflow: ", responseJson)
+ }
+ })
+ .catch((error) => {
+ toast("Failed to execute workflow: "+error.toString())
+ });
+ }
+
return (
-
-
Workflow Run Debugger
+
+
+
+
Workflow Run Debugger
+ {selectedWorkflowExecutions.length > 0 ?
+
+
+ {
+
+ for (var i = 0; i < selectedWorkflowExecutions.length; i++) {
+ const selected = selectedWorkflowExecutions[i]
+ executeWorkflow(selected)
+ }
+
+ toast("Reran "+selectedWorkflowExecutions.length+" workflow run!")
+ setSelectedWorkflowExecutions([])
+
+ }}
+ >
+ Rerun Selected ({selectedWorkflowExecutions.length})
+
+
+
+ {
+ toast("Attempting to abort "+selectedWorkflowExecutions.length+" workflow runs...")
+
+ var aborted = 0
+ for (var i = 0; i < selectedWorkflowExecutions.length; i++) {
+ const selected = selectedWorkflowExecutions[i]
+ if (selected.status === "EXECUTING") {
+ abortExecution(selected.workflow.id, selected.execution_id)
+ aborted += 1
+ }
+ }
+
+ if (aborted === 0) {
+ toast("No workflows were aborted as they are not executing.")
+ } else {
+ toast("Aborted "+aborted+" workflows.")
+ // Research
+ submitSearch(workflowId, status, startTime, endTime, rowCursor, rowsPerPage)
+
+ setSelectedWorkflowExecutions([])
+ }
+
+ }}
+ >
+ Abort Selected ({selectedWorkflowExecutions.length})
+
+
+
+
+ : null}
+
+ {userdata.support === true ?
+ {
+ setIgnoreOrg(!ignoreOrg)
+ }}
+ >
+ {ignoreOrg ? "Ignoring Org" : "Ignore Org"}
+
+ : null}
+
diff --git a/frontend/src/components/SearchData.jsx b/frontend/src/components/SearchData.jsx
index cc9c1551..cb6d156c 100644
--- a/frontend/src/components/SearchData.jsx
+++ b/frontend/src/components/SearchData.jsx
@@ -50,6 +50,7 @@ const SearchData = props => {
const [value, setValue] = useState("");
const [userTyped, setUserTyped] = useState(false)
+
if (serverside === true) {
return null
}
@@ -59,25 +60,16 @@ const SearchData = props => {
//}
const isCloud = window.location.host === "localhost:3002" || window.location.host === "shuffler.io";
-
- //if (window.location.pathname !== oldPath) {
- // setSearchOpen(false)
- // setOldPath(window.location.pathname)
- //}
-
- //if (window.location.pathname === "/search") {
- // setModalOpen(true)
- //}
// if (window.location.pathname === "/docs" || window.location.pathname === "/apps" || window.location.pathname === "/usecases" ) {
// setModalOpen(false)
// }
- //useEffect(() => {
- // if (searchOpen) {
- // var tarfield = document.getElementById("shuffle_search_field")
- // tarfield.focus()
- // }
- //}, searchOpen)
+ // useEffect(() => {
+ // if (searchOpen) {
+ // var tarfield = document.getElementById("shuffle_search_field")
+ // tarfield.focus()
+ // }
+ // }, searchOpen)
const SearchBox = ({ currentRefinement, refine, isSearchStalled, }) => {
const keyPressHandler = (e) => {
@@ -85,9 +77,11 @@ const SearchData = props => {
if (e.which === 13) {
// alert("You pressed enter!");
navigate("/search?q=" + currentRefinement, { state: value, replace: true });
- setSearchOpen(false)
- setModalOpen(false)
- return
+
+ setSearchOpen(false)
+ setModalOpen(false)
+ return
+
}
};
/*
@@ -104,10 +98,11 @@ const SearchData = props => {
return (
-
+
@@ -749,29 +714,6 @@ const SearchData = props => {
- {/*
-
-
-
- Popular searches
-
-
-
-
- Apps
-
-
-
- Workflows
-
-
-
- Creator
-
-
-
-
- */}
{ window.location = "/search"; }} >
@@ -779,12 +721,38 @@ const SearchData = props => {
+ ): null
+
+ const CustomSearchBox = connectSearchBox(SearchBox)
+ const CustomAppHits = connectHits(AppHits)
+ const CustomWorkflowHits = connectHits(WorkflowHits)
+ const CustomDocHits = connectHits(DocHits)
+
+ const modalView = (
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
)
return (
-
+
{
- console.log("CLICKED 1")
}}>
{
diff --git a/frontend/src/components/Searchfield.jsx b/frontend/src/components/Searchfield.jsx
index 49c28f64..693d096e 100644
--- a/frontend/src/components/Searchfield.jsx
+++ b/frontend/src/components/Searchfield.jsx
@@ -90,8 +90,8 @@ const SearchField = props => {
},
}}
>
- {isHeader ?
-
Search Shuffle
+ {isHeader ?
+
Search for docs, apps, workflows and more
{
setModalOpen(false);
}}>
@@ -101,12 +101,11 @@ const SearchField = props => {
-
-
- {/*
+
+ {/*
+
Discord
- */}
- {/*
+
@@ -118,8 +117,9 @@ const SearchField = props => {
- */}
+ */}
+
);
diff --git a/frontend/src/components/ShuffleCodeEditor.jsx b/frontend/src/components/ShuffleCodeEditor.jsx
index 5fb9b152..39b1a26c 100644
--- a/frontend/src/components/ShuffleCodeEditor.jsx
+++ b/frontend/src/components/ShuffleCodeEditor.jsx
@@ -22,6 +22,7 @@ import { isMobile } from "react-device-detect"
import { NestedMenuItem } from "mui-nested-menu"
import { GetParsedPaths, FindJsonPath } from "../views/Apps.jsx";
import { SetJsonDotnotation } from "../views/AngularWorkflow.jsx";
+import { vscodeDark, vscodeDarkInit } from '@uiw/codemirror-theme-vscode';
import {
FullscreenExit as FullscreenExitIcon,
@@ -82,6 +83,7 @@ const pythonFilters = [
{"name": "Handle JSON", "value": `{% python %}\nimport json\njsondata = json.loads(r"""$nodename""")\n{% endpython %}`, "example": ``},
]
+/*
const shuffleTheme = createTheme({
theme: 'dark',
settings: {
@@ -110,6 +112,7 @@ const shuffleTheme = createTheme({
{ tag: t.attributeName, color: '#5c6166' },
],
});
+*/
const CodeEditor = (props) => {
const {
@@ -547,7 +550,6 @@ const CodeEditor = (props) => {
var code_lines = localcodedata.split('\n')
for (var i = 0; i < code_lines.length; i++){
var current_code_line = code_lines[i]
- // console.log(current_code_line)
var variable_occurence = current_code_line.match(/[\\]{0,1}[$]{1}([a-zA-Z0-9_-]+\.?){1}([a-zA-Z0-9#_-]+\.?){0,}/g)
@@ -610,8 +612,7 @@ const CodeEditor = (props) => {
var correctVariable = availableVariables.includes(fixedVariable)
if(!correctVariable) {
value.markText({line:i, ch:dollar_occurence[occ]}, {line:i, ch:dollar_occurence_len[occ]+dollar_occurence[occ]}, {"css": "background-color: rgb(248, 106, 62, 0.9); padding-top: 2px; padding-bottom: 2px; color: white"})
- }
- else{
+ } else {
value.markText({line:i, ch:dollar_occurence[occ]}, {line:i, ch:dollar_occurence_len[occ]+dollar_occurence[occ]}, {"css": "background-color: #8b8e26; padding-top: 2px; padding-bottom: 2px; color: white"})
}
// console.log(correctVariables)
@@ -660,6 +661,23 @@ const CodeEditor = (props) => {
setlocalcodedata(updatedCode)
}
+ const fixStringInput = (new_input) => {
+ // Newline fixes
+ new_input = new_input.replace(/\r\n/g, "\\n")
+ new_input = new_input.replace(/\n/g, "\\n")
+
+ // Quote fixes
+ new_input = new_input.replace(/\\"/g, '"')
+ new_input = new_input.replace(/"/g, '\\"')
+
+ new_input = new_input.replace(/\\'/g, "'")
+ new_input = new_input.replace(/'/g, "\\'")
+
+
+ return new_input
+ }
+
+
const expectedOutput = (input) => {
//const found = input.match(/[$]{1}([a-zA-Z0-9_-]+\.?){1}([a-zA-Z0-9#_-]+\.?){0,}/g)
@@ -674,36 +692,39 @@ const CodeEditor = (props) => {
try {
for (var i = 0; i < found.length; i++) {
try {
- // For found specifically, should replace .#\d with .# with regex
-
-
- //found[i] = found[i].toLowerCase()
const fixedVariable = fixVariable(found[i])
- //var correctVariable = availableVariables.includes(fixedVariable)
-
- //
var valuefound = false
for (var j = 0; j < actionlist.length; j++) {
- if(fixedVariable.slice(1,).toLowerCase() === actionlist[j].autocomplete.toLowerCase()){
- valuefound = true
+ if(fixedVariable.slice(1,).toLowerCase() !== actionlist[j].autocomplete.toLowerCase()){
+ continue
+ }
- try {
- if (typeof actionlist[j].example === "object") {
- input = input.replace(found[i], JSON.stringify(actionlist[j].example), -1);
+ valuefound = true
- } else if (actionlist[j].example.trim().startsWith("{") || actionlist[j].example.trim().startsWith("[")) {
- input = input.replace(found[i], JSON.stringify(actionlist[j].example), -1);
- } else {
- input = input.replace(found[i], actionlist[j].example, -1)
- }
- } catch (e) {
- input = input.replace(found[i], actionlist[j].example, -1)
+ console.log("Here. Checking if we got an example?")
+ try {
+ if (typeof actionlist[j].example === "object") {
+
+ input = input.replace(found[i], JSON.stringify(actionlist[j].example), -1);
+
+ } else if (actionlist[j].example.trim().startsWith("{") || actionlist[j].example.trim().startsWith("[")) {
+ input = input.replace(found[i], JSON.stringify(actionlist[j].example), -1);
+ } else {
+ console.log("This?")
+
+ const newExample = fixStringInput(actionlist[j].example)
+ input = input.replace(found[i], newExample, -1)
}
- } else {
- // Couldn't find the correct example value
+ } catch (e) {
+ input = input.replace(found[i], actionlist[j].example, -1)
}
}
+
+ //if (!valuefound) {
+ // console.log("Couldn't find value "+fixedVariable)
+ //}
+
if (!valuefound && availableVariables.includes(fixedVariable)) {
var shouldbreak = false
for (var k=0; k < actionlist.length; k++){
@@ -714,46 +735,50 @@ const CodeEditor = (props) => {
for (var key in parsedPaths) {
const fullpath = "$"+actionlist[k].autocomplete.toLowerCase()+parsedPaths[key].autocomplete
- if (fullpath === fixedVariable) {
- //if (actionlist[k].example === undefined) {
- // actionlist[k].example = "TMP"
- //}
-
- var new_input = ""
- try {
- new_input = FindJsonPath(fullpath, actionlist[k].example)
- } catch (e) {
- console.log("ERR IN INPUT: ", e)
- }
+ if (fullpath !== fixedVariable) {
+ continue
+ }
+
+ //if (actionlist[k].example === undefined) {
+ // actionlist[k].example = "TMP"
+ //}
+
+ var new_input = ""
+ try {
+ new_input = FindJsonPath(fullpath, actionlist[k].example)
+ } catch (e) {
+ console.log("ERR IN INPUT: ", e)
+ }
+
+ console.log("Got output for: ", fullpath, new_input, actionlist[k].example, typeof new_input)
- //console.log("Got output for: ", fullpath, new_input, actionlist[k].example, typeof new_input)
+ if (typeof new_input === "object") {
+ new_input = JSON.stringify(new_input)
+ } else {
+ if (typeof new_input === "string") {
+ // Check if it contains any newlines, and replace them with raw newlines
+ new_input = fixStringInput(new_input)
- if (typeof new_input === "object") {
- new_input = JSON.stringify(new_input)
+ // Replace quotes with nothing
} else {
- if (typeof new_input === "string") {
- new_input = new_input
- } else {
- console.log("NO TYPE? ", typeof new_input)
- try {
- new_input = new_input.toString()
- } catch (e) {
- new_input = ""
- }
+ console.log("NO TYPE? ", typeof new_input)
+ try {
+ new_input = new_input.toString()
+ } catch (e) {
+ new_input = ""
}
}
+ }
- //console.log("FOUND2: ", fixedVariable, actionlist[j].example)
- input = input.replace(fixedVariable, new_input, -1)
- input = input.replace(found[i], new_input, -1)
+ input = input.replace(fixedVariable, new_input, -1)
+ input = input.replace(found[i], new_input, -1)
- //} catch (e) {
- // input = input.replace(found[i], actionlist[k].example)
- //}
+ //} catch (e) {
+ // input = input.replace(found[i], actionlist[k].example)
+ //}
- shouldbreak = true
- break
- }
+ shouldbreak = true
+ break
}
if (shouldbreak) {
@@ -766,7 +791,7 @@ const CodeEditor = (props) => {
}
}
} catch (e) {
- //console.log("Outer replace error: ", e)
+ console.log("Outer replace error: ", e)
}
}
@@ -895,7 +920,7 @@ const CodeEditor = (props) => {
aria-labelledby="draggable-code-modal"
disableBackdropClick={true}
disableEnforceFocus={true}
- //style={{ pointerEvents: "none" }}
+ //style={{ pointerEvents: "none" }}
hideBackdrop={true}
open={expansionModalOpen}
onClose={() => {
@@ -964,6 +989,7 @@ const CodeEditor = (props) => {
}}
>
+ {/*
{
>
Code Editor
-
{
-
- }}
- >
-
-
-
-
-
-
-
{
- autoFormat(localcodedata)
- }}
- >
-
- {isAiLoading ?
-
- :
-
- }
-
-
-
-
- }
-
-
+ */}
{ isFileEditor ? null :
-
+
{
variant="outlined"
color="secondary"
style={{
- textTransform: "none",
+ textTransform: "none",
width: 100,
}}
onClick={(event) => {
@@ -1143,9 +1119,9 @@ const CodeEditor = (props) => {
variant="outlined"
color="secondary"
style={{
- textTransform: "none",
+ textTransform: "none",
width: 130,
- marginLeft: 170,
+ marginLeft: 20,
}}
onClick={(event) => {
setMenuPosition({
@@ -1393,49 +1369,105 @@ const CodeEditor = (props) => {
}
-
{
+
+ }}
+ >
+
+
+
+
+
+
+ {
+ autoFormat(localcodedata)
+ }}
+ >
+
+ {isAiLoading ?
+
+ :
+
+ }
+
+
+
+
+ }
+
+
+
+
{
- // console.log(value.getCursor())
+ console.log("CURSOR: ", value.getCursor())
setCurrentCharacter(value.getCursor().ch)
setCurrentLine(value.getCursor().line)
// console.log(value.getCursor().ch, value.getCursor().line)
findIndex(value.getCursor().line, value.getCursor().ch)
+
highlight_variables(value)
}}
onChange={(value, viewUpdate) => {
- console.log("Value: ", value, viewUpdate)
setlocalcodedata(value)
expectedOutput(value)
+ highlight_variables(value)
+
//if(value.display.input.prevInput.startsWith('$') || value.display.input.prevInput.endsWith('$')){
// setEditorPopupOpen(true)
//}
}}
- extensions={[]}//indentWithTab]}
- theme={shuffleTheme}
options={{
- styleSelectedText: true,
- keyMap: 'sublime',
mode: validation === true ? "json" : "python",
lineWrapping: linewrap,
+ theme: vscodeDark,
}}
/>
-
+
{/*editorPopupOpen ?
{
-
+
{isFileEditor ? null :
{isMobile ? null :
@@ -1564,14 +1597,29 @@ const CodeEditor = (props) => {
Expected Output
- {
- executeSingleAction(expOutput)
- }}>
-
- {executing ? : }
-
-
-
+
+ {
+ executeSingleAction(expOutput)
+ }}
+ >
+ {executing ?
+
+ :
+ Try it
+ }
+
+
}
@@ -1613,8 +1661,8 @@ const CodeEditor = (props) => {
borderRadius: theme.palette.borderRadius,
maxHeight: 500,
minHeight: 500,
- minWidth: 500,
- maxWidth: 500,
+ minWidth: 580,
+ maxWidth: 580,
overflow: "auto",
whiteSpace: "pre-wrap",
}}
diff --git a/frontend/src/components/WorkflowTemplatePopup.jsx b/frontend/src/components/WorkflowTemplatePopup.jsx
index ff20ad36..7b7e2944 100644
--- a/frontend/src/components/WorkflowTemplatePopup.jsx
+++ b/frontend/src/components/WorkflowTemplatePopup.jsx
@@ -44,7 +44,6 @@ const WorkflowTemplatePopup = (props) => {
const [missingDestination, setMissingDestination] = React.useState(undefined);
useEffect(() => {
- console.log("Source & Dest check:", missingSource, missingDestination)
}, [missingSource, missingDestination])
const isCloud = window.location.host === "localhost:3002" || window.location.host === "shuffler.io";
@@ -392,7 +391,7 @@ const WorkflowTemplatePopup = (props) => {
appFramework={appFramework}
appType={missingSource.type}
- appImage={missingSource.image}
+ AppImage={missingSource.image}
setMissing={setMissingSource}
/>
@@ -406,7 +405,7 @@ const WorkflowTemplatePopup = (props) => {
appFramework={appFramework}
appType={missingDestination.type}
- appImage={missingDestination.image}
+ AppImage={missingDestination.image}
setMissing={setMissingDestination}
/>
@@ -443,7 +442,7 @@ const WorkflowTemplatePopup = (props) => {
if (title.length > maxlength) {
parsedTitle = title.substring(0, maxlength) + "..."
}
- console.log("isHomePage", isHomePage)
+
parsedTitle = parsedTitle.replaceAll("_", " ")
const parsedDescription = description !== undefined && description !== null ? description.replaceAll("_", " ") : ""
diff --git a/frontend/src/defaultCytoscapeStyle.jsx b/frontend/src/defaultCytoscapeStyle.jsx
index 01c389f8..7e7132fd 100644
--- a/frontend/src/defaultCytoscapeStyle.jsx
+++ b/frontend/src/defaultCytoscapeStyle.jsx
@@ -4,11 +4,10 @@ const data = [
css: {
label: "data(label)",
"text-valign": "center",
- "font-family":
- "Segoe UI, Tahoma, Geneva, Verdana, sans-serif, sans-serif",
+ "font-family": "Segoe UI, Tahoma, Geneva, Verdana, sans-serif, sans-serif",
"font-weight": "lighter",
- "margin-right": "10px",
"font-size": "18px",
+ "margin-right": "10px",
width: "80px",
height: "80px",
color: "white",
diff --git a/frontend/src/views/Admin.jsx b/frontend/src/views/Admin.jsx
index 9631fd1e..9a7884b0 100755
--- a/frontend/src/views/Admin.jsx
+++ b/frontend/src/views/Admin.jsx
@@ -16,6 +16,7 @@ import {
OutlinedInput,
Checkbox,
Card,
+ Chip,
Tooltip,
FormControlLabel,
Typography,
@@ -133,7 +134,7 @@ const FileCategoryInput = (props) => {
const Admin = (props) => {
- const { globalUrl, userdata, serverside, checkLogin } = props;
+ const { globalUrl, userdata, serverside, checkLogin, notifications, setNotifications, } = props;
var to_be_copied = "";
const classes = useStyles();
@@ -485,6 +486,14 @@ If you're interested, please let me know a time that works for you, or set up a
return `mailto:${admins}?bcc=frikky@shuffler.io,binu@shuffler.io&subject=${subject}&body=${body}`
}
+
+ const changeDistribution = (data) => {
+ //changeDistributed(data, !isDistributed)
+ console.log("Should change distribution to be shared among suborgs")
+
+ editAuthenticationConfig(data.id, "suborg_distribute")
+ }
+
const deleteAuthentication = (data) => {
toast("Deleting auth " + data.label);
@@ -800,10 +809,10 @@ If you're interested, please let me know a time that works for you, or set up a
});
};
- const editAuthenticationConfig = (id) => {
+ const editAuthenticationConfig = (id, parentAction) => {
const data = {
id: id,
- action: "assign_everywhere",
+ action: parentAction !== undefined && parentAction !== null ? parentAction : "assign_everywhere",
};
const url = globalUrl + "/api/v1/apps/authentication/" + id + "/config";
@@ -821,9 +830,9 @@ If you're interested, please let me know a time that works for you, or set up a
.then((response) =>
response.json().then((responseJson) => {
if (responseJson["success"] === false) {
- toast("Failed overwriting appauth in workflows");
+ toast("Failed overwriting appauth");
} else {
- toast("Successfully updated auth everywhere!");
+ toast("Successfully updated auth!");
setSelectedUserModalOpen(false);
setTimeout(() => {
getAppAuthentication();
@@ -1732,7 +1741,7 @@ If you're interested, please let me know a time that works for you, or set up a
const userId = user.id;
const data = { user_id: userId };
- console.log(user, userdata)
+ toast("Generating new API key")
var fetchdata = {
method: "POST",
@@ -2827,6 +2836,8 @@ If you're interested, please let me know a time that works for you, or set up a
checkLogin={checkLogin}
setAdminTab={setAdminTab}
setCurTab={setCurTab}
+ notifications={notifications}
+ setNotifications={setNotifications}
/>
: adminTab === 3 ?
+
{schedules === undefined || schedules === null
? null
@@ -3658,10 +3670,13 @@ If you're interested, please let me know a time that works for you, or set up a
style={{ minWidth: 125, maxWidth: 125, overflow: "hidden" }}
/>
-
+
+
{authentication === undefined || authentication === null
? null
@@ -3693,6 +3708,8 @@ If you're interested, please let me know a time that works for you, or set up a
];
}
+ const isDistributed = data.suborg_distributed === true ? true : false;
+
return (
{
updateAppAuthentication(data);
}}
+ disabled={data.org_id !== selectedOrganization.id ? true : false}
>
-
+
{data.defined ? (
{
editAuthenticationConfig(data.id);
}}
>
@@ -3803,23 +3821,54 @@ If you're interested, please let me know a time that works for you, or set up a
placement="top"
>
{}}
+ disabled={data.org_id !== selectedOrganization.id ? true : false}
>
)}
{
deleteAuthentication(data);
}}
>
-
+
+
+ {selectedOrganization.id !== undefined && data.org_id !== selectedOrganization.id ?
+
+
+
+ :
+
+ {
+ changeDistribution(data, !isDistributed)
+ }}
+ />
+
+ }
+
);
})}
@@ -3936,22 +3985,22 @@ If you're interested, please let me know a time that works for you, or set up a
primary="Type"
style={{ minWidth: 125, maxWidth: 125 }}
/>
+
-
{environments === undefined || environments === null
? null
@@ -3969,18 +4018,20 @@ If you're interested, please let me know a time that works for you, or set up a
bgColor = "#1f2023";
}
- // Check if there's a notification for it in userdata.priorities
- var showCPUAlert = false
- var foundIndex = -1
- if (userdata !== undefined && userdata !== null && userdata.priorities !== undefined && userdata.priorities !== null && userdata.priorities.length > 0) {
- foundIndex = userdata.priorities.findIndex(prio => prio.name.includes("CPU") && prio.active === true)
+ // Check if there's a notification for it in userdata.priorities
+ var showCPUAlert = false
+ var foundIndex = -1
+ if (userdata !== undefined && userdata !== null && userdata.priorities !== undefined && userdata.priorities !== null && userdata.priorities.length > 0) {
+ foundIndex = userdata.priorities.findIndex(prio => prio.name.includes("CPU") && prio.active === true)
+
+ if (foundIndex >= 0 && userdata.priorities[foundIndex].name.endsWith(environment.Name)) {
+ showCPUAlert = true
+ }
+ }
- if (foundIndex >= 0 && userdata.priorities[foundIndex].name.endsWith(environment.Name)) {
- showCPUAlert = true
- }
- }
+ //console.log("Show CPU alert: ", showCPUAlert)
- console.log("Show CPU alert: ", showCPUAlert)
+ const queueSize = environment.queue !== undefined && environment.queue !== null ? environment.queue < 0 ? 0 : environment.queue > 99 ? ">99" : environment.queue : 0
return (
@@ -4016,49 +4067,56 @@ If you're interested, please let me know a time that works for you, or set up a
- {
- if (environment.Type === "cloud") {
- toast("No Orborus necessary for environment cloud. Create and use a different environment to run executions on-premises.")
- return
- }
-
- const elementName = "copy_element_shuffle";
- const auth = environment.auth === "" ? 'cb5st3d3Z!3X3zaJ*Pc' : environment.auth
- const commandData = `docker run --volume "/var/run/docker.sock:/var/run/docker.sock" -e ENVIRONMENT_NAME="${environment.Name}" -e 'AUTH=${auth}' -e ORG="${props.userdata.active_org.id}" -e DOCKER_API_VERSION=1.40 -e BASE_URL="${globalUrl}" --name="shuffle-orborus" -d ghcr.io/shuffle/shuffle-orborus:latest`
- var copyText = document.getElementById(elementName);
- if (copyText !== null && copyText !== undefined) {
- const clipboard = navigator.clipboard;
- if (clipboard === undefined) {
- toast("Can only copy over HTTPS (port 3443)");
- return;
- }
-
- navigator.clipboard.writeText(commandData);
- copyText.select();
- copyText.setSelectionRange(
- 0,
- 99999
- ); /* For mobile devices */
-
- /* Copy the text inside the text field */
- document.execCommand("copy");
-
- toast("Orborus command copied to clipboard");
- }
- }}
- >
-
-
-
- }
+
+ {
+ if (environment.Type === "cloud") {
+ toast("No Orborus necessary for environment cloud. Create and use a different environment to run executions on-premises.")
+ return
+ }
+
+ if (props.userdata.active_org === undefined || props.userdata.active_org === null) {
+ toast("No active organization yet. Are you logged in?")
+ return
+ }
+
+ const elementName = "copy_element_shuffle";
+ const auth = environment.auth === "" ? 'cb5st3d3Z!3X3zaJ*Pc' : environment.auth
+ const newUrl = globalUrl === "https://shuffler.io" ? "https://shuffle-backend-stbuwivzoq-nw.a.run.app" : globalUrl
+
+ const commandData = `docker run --volume "/var/run/docker.sock:/var/run/docker.sock" -e ENVIRONMENT_NAME="${environment.Name}" -e 'AUTH=${auth}' -e ORG="${props.userdata.active_org.id}" -e DOCKER_API_VERSION=1.40 -e BASE_URL="${newUrl}" --name="shuffle-orborus" -d ghcr.io/shuffle/shuffle-orborus:latest`
+ var copyText = document.getElementById(elementName);
+ if (copyText !== null && copyText !== undefined) {
+ const clipboard = navigator.clipboard;
+ if (clipboard === undefined) {
+ toast("Can only copy over HTTPS (port 3443)");
+ return;
+ }
+
+ navigator.clipboard.writeText(commandData);
+ copyText.select();
+ copyText.setSelectionRange(
+ 0,
+ 99999
+ ); /* For mobile devices */
+
+ /* Copy the text inside the text field */
+ document.execCommand("copy");
+
+ toast("Orborus command copied to clipboard");
+ }
+ }}
+ >
+
+
+
+ }
/>
+ setDefaultEnvironment(environment)}
color="primary"
>
- Make default
+ Set Default
)}
+ >
+
+
+ deleteEnvironment(environment)}
+ color="primary"
+ >
+ {environment.archived ? "Activate" : "Disable"}
+
+ {
+ console.log("Should clear executions for: ", environment);
+
+ if (isCloud && environment.Name.toLowerCase() === "cloud") {
+ rerunCloudWorkflows(environment);
+ } else {
+ abortEnvironmentWorkflows(environment);
+ }
+ }}
+ color="primary"
+ >
+ {isCloud && environment.Name.toLowerCase() === "cloud" ? "Rerun" : "Clear"}
+
+
+
+
+
-
-
-
- deleteEnvironment(environment)}
- color="primary"
- >
- {environment.archived ? "Activate" : "Disable"}
-
- {
- console.log("Should clear executions for: ", environment);
-
- if (isCloud && environment.Name.toLowerCase() === "cloud") {
- rerunCloudWorkflows(environment);
- } else {
- abortEnvironmentWorkflows(environment);
- }
- }}
- color="primary"
- >
- {isCloud && environment.Name.toLowerCase() === "cloud" ? "Rerun" : "Clear"}
-
-
-
-
- {showCPUAlert === false ? null :
+ {showCPUAlert === false ? null :
-
-
-
- 90% CPU the server(s) hosting the Shuffle App Runner (Orborus) was found.
-
-
- Need help with High Availability and Scale? Read documentation and Get in touch .
-
-
-
- {
- // dismiss -> get envs
- changeRecommendation(userdata.priorities[foundIndex], "dismiss")
- }}>
- Dismiss
-
-
-
-
- }
-
+
+
+
+ 90% CPU the server(s) hosting the Shuffle App Runner (Orborus) was found.
+
+
+ Need help with High Availability and Scale? Read documentation and Get in touch .
+
+
+
+ {
+ // dismiss -> get envs
+ changeRecommendation(userdata.priorities[foundIndex], "dismiss")
+ }}>
+ Dismiss
+
+
+
+
+ }
+
);
})}
diff --git a/frontend/src/views/AngularWorkflow.jsx b/frontend/src/views/AngularWorkflow.jsx
index 8133d485..53a43232 100755
--- a/frontend/src/views/AngularWorkflow.jsx
+++ b/frontend/src/views/AngularWorkflow.jsx
@@ -11,7 +11,7 @@ import { useNavigate, Link, useParams } from "react-router-dom";
import { useBeforeunload } from "react-beforeunload";
import ReactJson from "react-json-view";
import { NestedMenuItem } from 'mui-nested-menu';
-import ReactMarkdown from "react-markdown";
+import Markdown from "react-markdown";
//import { useAlert
import { ToastContainer, toast } from "react-toastify"
import { isMobile } from "react-device-detect"
@@ -71,6 +71,7 @@ import {
import {
Folder as FolderIcon,
+ Insights as InsightsIcon,
LibraryBooks as LibraryBooksIcon,
OpenInNew as OpenInNewIcon,
Undo as UndoIcon,
@@ -112,6 +113,7 @@ import {
AutoFixHigh as AutoFixHighIcon,
Polyline as PolylineIcon,
QueryStats as QueryStatsIcon,
+ AutoAwesome as AutoAwesomeIcon,
} from "@mui/icons-material";
@@ -395,9 +397,10 @@ const AngularWorkflow = (defaultprops) => {
var to_be_copied = "";
const [firstrequest, setFirstrequest] = React.useState(true);
const [cystyle] = useState(cytoscapestyle);
+
const [cy, setCy] = React.useState();
- const [toolsApp, setToolsApp] = React.useState({});
+ const [toolsApp, setToolsApp] = React.useState({});
const [currentView, setCurrentView] = React.useState(0);
const [triggerAuthentication, setTriggerAuthentication] = React.useState({});
const [triggerFolders, setTriggerFolders] = React.useState([]);
@@ -437,6 +440,7 @@ const AngularWorkflow = (defaultprops) => {
const [appAuthentication, setAppAuthentication] = React.useState(undefined);
const [variablesModalOpen, setVariablesModalOpen] = React.useState(false);
+ const [aiQueryModalOpen, setAiQueryModalOpen] = React.useState(false)
const [executionVariablesModalOpen, setExecutionVariablesModalOpen] =
React.useState(false);
const [authenticationModalOpen, setAuthenticationModalOpen] = React.useState(false);
@@ -500,6 +504,7 @@ const AngularWorkflow = (defaultprops) => {
const [selectedAction, setSelectedAction] = React.useState({});
const [selectedActionEnvironment, setSelectedActionEnvironment] = React.useState({});
+ const [streamDisabled, setStreamDisabled] = React.useState(false);
const [executionRequest, setExecutionRequest] = React.useState({});
const [executionRunning, setExecutionRunning] = React.useState(false);
@@ -550,10 +555,18 @@ const AngularWorkflow = (defaultprops) => {
props.userdata.active_org !== undefined
? props.userdata.active_org.cloud_sync === true
: false;
- const isCloud =
- window.location.host === "localhost:3002" ||
- window.location.host === "shuffler.io";
+ const isCloud = window.location.host === "localhost:3002" || window.location.host === "shuffler.io";
+ useEffect(() => {
+ return () => {
+ console.log("UNMOUNTING USER!")
+ sendStreamRequest({
+ "item": "workflow",
+ "type": "leave",
+ "id": workflow.id,
+ })
+ }
+ }, [])
/*
useEffect(() => {
console.log("In useeffect for workflow: ", workflow)
@@ -582,17 +595,22 @@ const AngularWorkflow = (defaultprops) => {
const [elements, setElements] = useState([]);
const [loopRunning, setLoopRunning] = useState(false)
+ var loopRunning2 = loopRunning
const stop = () => {
setLoopRunning(false)
+ loopRunning2 = false
}
const start = () => {
setLoopRunning(true)
+ loopRunning2 = true
}
useEffect(() => {
- //console.log("In useeffect for loopRunning: ", loopRunning)
- if (loopRunning) {
+ // Current variable + future state controlled
+ // This is so that the loop can stop itself as well
+ console.log("In useeffect for loopRunning: ", loopRunning, loopRunning2)
+ if (loopRunning && loopRunning2) {
const intervalId = setInterval(() => {
if (!loopRunning) {
clearInterval(intervalId);
@@ -634,7 +652,11 @@ const AngularWorkflow = (defaultprops) => {
if (responseJson.success === true) {
if (responseJson.reason !== undefined && responseJson.reason !== undefined && responseJson.reason.length > 0) {
if (!responseJson.reason.includes("404: Not Found") && responseJson.reason.length > 25) {
- selectedApp.documentation = responseJson.reason
+ // Translate
into markdown ![]()
+ const imgRegex = /
{
});
};
- const setNewAppAuth = (appAuthData) => {
+ const setNewAppAuth = (appAuthData, refresh) => {
fetch(globalUrl + "/api/v1/apps/authentication", {
method: "PUT",
headers: {
@@ -932,9 +954,14 @@ const AngularWorkflow = (defaultprops) => {
})
.then((responseJson) => {
if (!responseJson.success) {
- toast("Failed to set app auth: " + responseJson.reason);
+ toast("Error: " + responseJson.reason);
} else {
- getAppAuthentication(true, false);
+ if (refresh === true) {
+ getAppAuthentication(true, true, true);
+ } else {
+ getAppAuthentication(true, false);
+ }
+
setAuthenticationModalOpen(false);
// Needs a refresh with the new authentication..
@@ -964,20 +991,20 @@ const AngularWorkflow = (defaultprops) => {
return response.json();
})
.then((responseJson) => {
- if (responseJson !== undefined && responseJson !== null && responseJson.executions !== undefined && responseJson.executions !== null && responseJson.executions.length > 0) {
+ console.log("GOT A RESPONSE??")
+ if (responseJson !== undefined && responseJson !== null && responseJson.executions !== undefined && responseJson.executions !== null) {
// - means it's opposite
const newkeys = sortByKey(responseJson.executions, "-started_at");
setWorkflowExecutions(newkeys);
const cursearch = typeof window === "undefined" || window.location === undefined ? "" : window.location.search;
-
var tmpView = new URLSearchParams(cursearch).get("execution_id");
if (execution_id !== undefined && execution_id !== null && execution_id.length > 0 && (tmpView === undefined || tmpView === null || tmpView.length === 0)) {
tmpView = execution_id;
}
- console.log("TMPVIEW: ", tmpView);
+ console.log("EXECUTION ID: ", tmpView)
// Compare with currently selected item
if (tmpView !== undefined && tmpView !== null && tmpView.length > 0) {
@@ -1000,16 +1027,15 @@ const AngularWorkflow = (defaultprops) => {
}
setExecutionModalView(1);
- start();
-
setExecutionRequest({
execution_id: execution.execution_id,
authorization: execution.authorization,
});
- const newitem = removeParam("execution_id", cursearch);
- navigate(curpath + newitem)
- //props.history.push(curpath + newitem);
+ start();
+
+ //const newitem = removeParam("execution_id", cursearch);
+ //navigate(curpath + newitem)
} else {
console.log("Couldn't find execution for execution ID. Retrying as user to get ", tmpView)
@@ -1018,19 +1044,33 @@ const AngularWorkflow = (defaultprops) => {
execution_id: tmpView,
//authorization: data.authorization,
}
+ setExecutionRunning(true);
setExecutionModalView(1);
setExecutionRequest(cur_execution);
start();
- const newitem = removeParam("execution_id", cursearch);
- navigate(curpath + newitem)
-
- setTimeout(() => {
- stop()
- }, 5000);
+ //const newitem = removeParam("execution_id", cursearch);
+ //navigate(curpath + newitem)
+ //setTimeout(() => {
+ // stop()
+ //}, 5000);
}
}
- }
+ } else {
+ const cursearch = typeof window === "undefined" || window.location === undefined ? "" : window.location.search;
+ var tmpView = new URLSearchParams(cursearch).get("execution_id");
+ console.log("Alertnative execution id check: ", tmpView)
+
+ if (tmpView === undefined || tmpView === null || tmpView.length === 0) {
+ const execution_id = tmpView;
+ setExecutionModalView(1);
+ setExecutionRequest({
+ execution_id: execution_id,
+ });
+
+ start()
+ }
+ }
})
.catch((error) => {
//toast(error.toString());
@@ -1051,8 +1091,14 @@ const AngularWorkflow = (defaultprops) => {
})
.then((response) => {
if (response.status !== 200) {
- console.log("Status not 200 for stream results :O!");
stop();
+ setExecutionModalView(0);
+ toast("Failed loading the workflow run")
+ console.log("Status not 200 for stream results :O!");
+
+ //const cursearch = typeof window === "undefined" || window.location === undefined ? "" : window.location.search;
+ //const newitem = removeParam("execution_id", cursearch);
+ //navigate(curpath + newitem)
}
return response.json();
@@ -1231,10 +1277,11 @@ const AngularWorkflow = (defaultprops) => {
// Controls the colors and direction of execution results.
// Style is in defaultCytoscapeStyle.js
const handleUpdateResults = (responseJson, executionRequest) => {
- if (responseJson === undefined || responseJson === null || responseJson.success === false) {
- return
- }
- //console.log(responseJson)
+ if (responseJson === undefined || responseJson === null || responseJson.success === false) {
+ stop()
+ return
+ }
+//console.log(responseJson)
// Loop nodes and find results
// Update on every interval? idk
@@ -1247,7 +1294,8 @@ const AngularWorkflow = (defaultprops) => {
//console.log("Updating data!")
setExecutionData(responseJson)
} else {
- if (responseJson.status === "ABORTED" || responseJson.status === "STOPPED" || responseJson.status === "FAILURE" || responseJson.status === "WAITING") {
+ if (responseJson.status === "ABORTED" || responseJson.status === "STOPPED" || responseJson.status === "FAILURE" || responseJson.status === "WAITING" || responseJson.status === "FINISHED") {
+ console.log("DONE!")
stop()
}
@@ -1305,14 +1353,31 @@ const AngularWorkflow = (defaultprops) => {
})
};
+ var streamDisabled2 = false
const sendStreamRequest = (body) => {
//console.log("Stream not activated yet.")
- return
+ if (!isCloud) {
+ console.log("Stream not activated yet for onprem")
+ return
+ }
+
+ if (streamDisabled) {
+ console.log("Stream disabled")
+ return
+ }
+
// Session may be important here huh
body.user_id = userdata.id
- fetch(`${globalUrl}/api/v1/workflows/${props.match.params.key}/stream`, {
+ //const url = ${globalUrl}/api/v1/workflows/${props.match.params.key}/stream
+ //const streamUrl = "http://localhost:5002"
+
+ console.log("Stream request: ", body)
+ const streamUrl = "https://stream.shuffler.io"
+ const url = `${streamUrl}/api/v1/workflows/${props.match.params.key}/stream`
+
+ fetch(url, {
method: "POST",
headers: {
"Content-Type": "application/json",
@@ -1324,6 +1389,9 @@ const AngularWorkflow = (defaultprops) => {
.then((response) => {
setSavingState(0);
if (response.status !== 200) {
+
+ setStreamDisabled(true)
+ streamDisabled2 = true
//console.log("Status not 200 for stream :O!");
}
@@ -1334,7 +1402,8 @@ const AngularWorkflow = (defaultprops) => {
})
.catch((error) => {
console.log("Stream send error: ", error.toString())
- //toast(error.toString());
+ setStreamDisabled(true)
+ streamDisabled2 = true
})
}
@@ -1668,7 +1737,7 @@ const AngularWorkflow = (defaultprops) => {
if (hasSaved === false) {
setExecutionRequestStarted(true);
saveWorkflow(workflow, executionArgument, startNode);
- console.log("FIXME: Might have forgotten to save before executing.");
+ //console.log("FIXME: Might have forgotten to save before executing.");
return;
}
@@ -1869,6 +1938,7 @@ const AngularWorkflow = (defaultprops) => {
setSelectedAction(selectedAction);
setWorkflow(workflow);
saveWorkflow(workflow);
+
toast("Added and updated authentication!");
shouldClose = true
} else {
@@ -2081,7 +2151,9 @@ const AngularWorkflow = (defaultprops) => {
}
const onChunkedResponseError = (err) => {
- console.error(err)
+ if (streamDisabled) {
+ return
+ }
}
@@ -2502,6 +2574,17 @@ const AngularWorkflow = (defaultprops) => {
try {
var chunkJson = JSON.parse(chunk)
+ if (chunkJson.success === false) {
+ console.log("Chunk failed: ", chunkJson)
+
+ if (!streamDisabled) {
+ setStreamDisabled(true)
+ streamDisabled2 = true
+ }
+ return
+ }
+
+
if (chunkJson.item !== undefined && chunkJson.item !== null && chunkJson.item !== "") {
if (chunkJson.item === "node") {
if (chunkJson.type === "move") {
@@ -2526,6 +2609,13 @@ const AngularWorkflow = (defaultprops) => {
}
} catch (e) {
console.log("Chunk JSON error: ", e)
+
+ if (!streamDisabled) {
+ setStreamDisabled(true)
+ streamDisabled2 = true
+ }
+
+ return
}
//data.push(chunk)
@@ -2567,14 +2657,30 @@ const AngularWorkflow = (defaultprops) => {
}
const startWorkflowStream = async (workflowId) => {
- const timeout = 60000
+ if (!isCloud) {
+ console.log("Not cloud, not starting workflow stream")
+ return
+ }
- return
-
+ if (streamDisabled) {
+ console.log("Stream disabled")
+ return
+ }
+
+ const timeout = 60000
+ //const url = `${globalUrl}/api/v1/workflows/${workflowId}/stream`
+ //const streamUrl = "https://shuffle-streaming-backend-stbuwivzoq-ew.a.run.app"
+ //
+ const streamUrl = "https://stream.shuffler.io"
+ const url = `${streamUrl}/api/v1/workflows/${workflowId}/stream`
while (true) {
+ if (streamDisabled === true || streamDisabled2 === true) {
+ break
+ }
+
// Wait 1 second before next request just in case of timeouts
await new Promise(r => setTimeout(r, 1000));
- await fetchWithTimeout(`${globalUrl}/api/v1/workflows/${workflowId}/stream`, {
+ await fetchWithTimeout(url, {
method: "GET",
headers: {
"Content-Type": "application/json",
@@ -2970,7 +3076,7 @@ const AngularWorkflow = (defaultprops) => {
sendStreamRequest({
"item": "node",
"type": "unselect",
- "userid": userdata.id,
+ "id": workflow.id,
})
//}, 150)
};
@@ -3782,6 +3888,8 @@ const AngularWorkflow = (defaultprops) => {
//}
curaction.app_id = curapp.id
+ console.log("CURAPP: ", curapp.authentication)
+
setAuthenticationType(
curapp.authentication.type === "oauth2" && curapp.authentication.redirect_uri !== undefined && curapp.authentication.redirect_uri !== null ? {
type: "oauth2",
@@ -3791,6 +3899,7 @@ const AngularWorkflow = (defaultprops) => {
scope: curapp.authentication.scope,
client_id: curapp.authentication.client_id,
client_secret: curapp.authentication.client_secret,
+ grant_type: curapp.authentication.grant_type,
} : {
type: "",
}
@@ -4017,7 +4126,6 @@ const AngularWorkflow = (defaultprops) => {
"item": "node",
"type": "select",
"id": data.id,
- "userid": userdata.id,
"location": {
"x": event.target.position("x"),
"y": event.target.position("y"),
@@ -5931,10 +6039,10 @@ const AngularWorkflow = (defaultprops) => {
const xParsed = destinationnodePosition.x - sourcenodePosition.x
const yParsed = destinationnodePosition.y - sourcenodePosition.y
- const z = Math.sqrt(xParsed * xParsed + yParsed * yParsed);
- const costheta = xParsed / z;
- const alpha = 0.25;
- var controlPointDistance = [-alpha * yParsed * costheta, alpha * yParsed * costheta];
+ const z = Math.sqrt(xParsed * xParsed + yParsed * yParsed)
+ const costheta = xParsed / z
+ const alpha = 0.3
+ var controlPointDistance = [-alpha * yParsed * costheta, alpha * yParsed * costheta]
var controlPointWeight = [alpha, 1 - alpha]
//'control-point-weight': ['0.33', '0.66'],
@@ -6097,8 +6205,18 @@ const AngularWorkflow = (defaultprops) => {
const foundtriggers = inputworkflow.triggers.map((trigger) => {
const node = {};
node.position = trigger.position;
- node.data = trigger;
+ if (trigger.large_image === undefined || trigger.large_image === null || trigger.large_image.length === 0) {
+
+ // Search triggers array for it where the name is matching and set image
+ var foundTrigger = triggers.find((t) => t.name === trigger.name)
+ if (foundTrigger !== undefined && foundTrigger !== null) {
+ console.log("Autofilled missing trigger image")
+ trigger.large_image = foundTrigger.large_image
+ }
+ }
+
+ node.data = trigger;
node.data._id = trigger["id"];
node.data.id = trigger["id"];
node.data.type = "TRIGGER";
@@ -6255,7 +6373,6 @@ const AngularWorkflow = (defaultprops) => {
}
insertedNodes = insertedNodes.concat(newedges);
-
setWorkflow(inputworkflow);
// Reset view for cytoscape
@@ -6265,6 +6382,8 @@ const AngularWorkflow = (defaultprops) => {
} else {
setElements(insertedNodes);
}
+
+ console.log("Setupgraph done 2!")
};
const removeNode = (nodeId) => {
@@ -6339,11 +6458,14 @@ const AngularWorkflow = (defaultprops) => {
sendStreamRequest({
"item": "workflow",
"type": "enter",
- "userid": userdata.id,
+ "id": workflow.id,
})
}
const fetchRecommendations = (inputWorkflow) => {
+ console.log("Disabled recommendations as they were too inaccurate")
+ return
+
const parsedWorkflow = JSON.parse(JSON.stringify(inputWorkflow))
fetch(globalUrl + "/api/v1/workflows/recommend", {
@@ -6442,6 +6564,11 @@ const AngularWorkflow = (defaultprops) => {
return response.json();
})
.then((responseJson) => {
+ if (responseJson === null) {
+ console.log("No revisions found")
+ return
+ }
+
if (responseJson.success === false) {
console.log("Error getting workflow revisions: ", responseJson)
return
@@ -6495,13 +6622,16 @@ const AngularWorkflow = (defaultprops) => {
}
// App length necessary cus of cy initialization
- if (elements.length === 0 && workflow.actions !== undefined && !graphSetup && Object.getOwnPropertyNames(workflow).length > 0 && workflowRecommendations !== undefined) {
+ // Not using recommendations, so skipping this for now
+ //if (elements.length === 0 && workflow.actions !== undefined && !graphSetup && Object.getOwnPropertyNames(workflow).length > 0 && workflowRecommendations !== undefined) {
+ if (elements.length === 0 && workflow.actions !== undefined && !graphSetup && Object.getOwnPropertyNames(workflow).length > 0) {
setGraphSetup(true);
setupGraph(workflow);
console.log("In graph setup")
// 2nd load - configures cytoscape
- } else if (!established && cy !== undefined && ((apps !== null && apps !== undefined && apps.length > 0) || workflow.public === true) && Object.getOwnPropertyNames(workflow).length > 0 && appAuthentication !== undefined && workflowRecommendations !== undefined) {
+ //} else if (!established && cy !== undefined && ((apps !== null && apps !== undefined && apps.length > 0) || workflow.public === true) && Object.getOwnPropertyNames(workflow).length > 0 && appAuthentication !== undefined && workflowRecommendations !== undefined) {
+ } else if (!established && cy !== undefined && ((apps !== null && apps !== undefined && apps.length > 0) || workflow.public === true) && Object.getOwnPropertyNames(workflow).length > 0 && appAuthentication !== undefined) {
console.log("In POST graph setup!")
@@ -6567,6 +6697,7 @@ const AngularWorkflow = (defaultprops) => {
}
// preview: true,
+ console.log("In POST graph setup 2")
cy.fit(null, 200);
cy.on("boxselect", "node", (e) => {
@@ -6615,6 +6746,7 @@ const AngularWorkflow = (defaultprops) => {
document.title = "Workflow - " + workflow.name;
+ console.log("In POST graph setup 3")
startWorkflowStream(props.match.params.key);
registerKeys();
@@ -7507,14 +7639,30 @@ const AngularWorkflow = (defaultprops) => {
description = app.actions[actionIndex].description
}
- const parsedEnvironments =
+ var parsedEnvironments =
environments === null || environments === []
? "cloud"
: environments[defaultEnvironmentIndex] === undefined
? "cloud"
: environments[defaultEnvironmentIndex].Name;
- // activated: app.generated === true ? app.activated === false ? false : true : true,
+ // List other nodes in the workflow and see if they have an environment set. If they do, use that as the default
+ if (cy !== undefined && cy !== null) {
+ const foundnodes = cy.nodes().jsons()
+ if (foundnodes !== undefined && foundnodes !== null && foundnodes.length > 0) {
+ // As they should all be the same, this is just an override
+ for (let nodekey in foundnodes) {
+ const curnode = foundnodes[nodekey]
+ if (curnode.data.environment !== undefined && curnode.data.environment !== null && curnode.data.environment.length > 0) {
+ console.log("Found environment: ", curnode.data.environment)
+ parsedEnvironments = curnode.data.environment
+ break
+ }
+ }
+ }
+ }
+
+ console.log("Discovered environment: ", parsedEnvironments)
const newAppData = {
name: app.actions[actionIndex].name,
label: actionLabel,
@@ -8130,6 +8278,25 @@ const AngularWorkflow = (defaultprops) => {
)
})}
+ {visibleApps.length <= 4 ? (
+
{
+ }}
+ >
+
+ Click one of the relevant public apps below to Activate it for your organization.
+
+ {
+ console.log("CLICKED")
+ }}>
+
+
+
+
+
+
+ ) : null}
) : apps.length > 0 ? (
{
}}
>
- Couldn't find the app you're looking for? Searching unactivated apps. Click one of the below apps to Activate it for your organization.
+ Couldn't find the apps you were looking for? Searching unactivated apps. Click one of the below apps to Activate it for your organization.
{
console.log("CLICKED")
@@ -8355,7 +8522,6 @@ const AngularWorkflow = (defaultprops) => {
}
}
- console.log("NEW ACTION: ", newSelectedAction);
setSelectedAction(newSelectedAction);
setUpdate(Math.random());
@@ -8992,6 +9158,33 @@ const AngularWorkflow = (defaultprops) => {
backgroundColor: theme.palette.inputColor,
};
+ const aiQueryModal =
+ {
+ }}
+ >
+
+ Condition
+
+
+
+
const conditionsModal = (
{
style={{ marginTop: 10 }}
label={Wait for results
}
/>
-
-
+
{
}}
onChange={(event, newValue) => {
// Workaround with event lol
- console.log(event, newValue)
+ console.log("CHANGE: ", event, newValue)
if (newValue !== undefined && newValue !== null) {
var parsedvalue = JSON.parse(JSON.stringify(newValue))
parsedvalue.actions = []
@@ -11678,6 +11863,7 @@ const AngularWorkflow = (defaultprops) => {
>
{
+ console.log("CLICK: ", app)
const newValue = app
if (newValue !== undefined && newValue !== null) {
@@ -11881,7 +12067,7 @@ const AngularWorkflow = (defaultprops) => {
workflow.triggers[selectedTriggerIndex].parameters[0].value
}
color="primary"
- placeholder="defaultValue"
+ placeholder="10"
onBlur={(e) => {
setTriggerCronWrapper(e.target.value);
}}
@@ -12349,7 +12535,7 @@ const AngularWorkflow = (defaultprops) => {
// email,sms,app ...
workflow.triggers[selectedTriggerIndex].parameters[2] = {
name: "type",
- value: "email",
+ value: "subflow",
};
workflow.triggers[selectedTriggerIndex].parameters[3] = {
@@ -12430,16 +12616,7 @@ const AngularWorkflow = (defaultprops) => {
/>
*/}
-
-
-
Parameters
+
{
display: "flex",
}}
>
-
Information
+
+ The information you want to show the user. Supports variables.
+
{
display: "flex",
}}
>
-
- Contact options
+ Input options
+
+ Use subflows to connect to any app you want, or use the default email and sms options
+
{
{
+ setTriggerOptionsWrapper("subflow");
+ }}
+ color="primary"
+ value="subflow"
+ />
+ }
+ label={Subflow
}
+ />
+ {
0 && workflow.triggers[selectedTriggerIndex].parameters[2] !== undefined && workflow.triggers[selectedTriggerIndex].parameters[2].value !== undefined ? workflow.triggers[selectedTriggerIndex].parameters[2].value.includes("sms") : false}
onChange={() => {
setTriggerOptionsWrapper("sms");
@@ -12534,21 +12714,8 @@ const AngularWorkflow = (defaultprops) => {
}
label={SMS
}
/>
- {
- setTriggerOptionsWrapper("subflow");
- }}
- color="primary"
- value="subflow"
- />
- }
- label={Subflow
}
- />
- {workflow.triggers[selectedTriggerIndex].parameters[2] !== undefined && workflow.triggers[selectedTriggerIndex].parameters[2].value.includes("subflow") ? (
+ {workflow.triggers[selectedTriggerIndex].parameters[2] !== undefined && workflow.triggers[selectedTriggerIndex].parameters[2].value.includes("subflow") ? (
{workflows === undefined ||
workflows === null ||
@@ -12654,7 +12821,7 @@ const AngularWorkflow = (defaultprops) => {
},
}}
fullWidth
- label="Email"
+ label="Email"
color="primary"
required
placeholder={"mail1@company.com,mail2@company.com"}
@@ -13250,10 +13417,6 @@ const AngularWorkflow = (defaultprops) => {
"user": "Anonymous",
"user_id": "user_id",
"color": "blue",
- }, {
- "user": "frikky",
- "user_id": "user_id",
- "color": "red",
}]
@@ -13309,18 +13472,18 @@ const AngularWorkflow = (defaultprops) => {
const showErrors = !isMobile && !workflow.public && workflow.errors !== undefined && workflow.errors !== null && workflow.errors.length > 0 ?
-
- {workflow.errors.length} Potential Workflow Issue{workflow.errors.length > 1 ? "s" : ""}
+
+ {workflow.errors.length} Workflow Issue{workflow.errors.length > 1 ? "s" : ""}
{
expansionModalOpen={codeEditorModalOpen}
setExpansionModalOpen={setCodeEditorModalOpen}
setEditorData={setEditorData}
+ setAiQueryModalOpen={setAiQueryModalOpen}
/>
} else if (Object.getOwnPropertyNames(selectedComment).length > 0) {
@@ -14313,6 +14477,13 @@ const AngularWorkflow = (defaultprops) => {
style={{ width: size, height: size }}
/>
);
+ } else if (execution.execution_source === "ShuffleGPT") {
+ return (
+
+ );
}
if (
@@ -14917,7 +15088,10 @@ const AngularWorkflow = (defaultprops) => {
marginTop: "auto",
marginBottom: "auto",
}}
- onClick={() => { }}
+ onClick={() => {
+ setExecutionRunning(false);
+ stop()
+ }}
>
@@ -14927,6 +15101,8 @@ const AngularWorkflow = (defaultprops) => {
const cursearch = typeof window === "undefined" || window.location === undefined ? "" : window.location.search;
const newitem = removeParam("execution_id", cursearch);
navigate(curpath + newitem)
+ setExecutionRunning(false);
+ stop()
}}
>
See more runs
@@ -14991,7 +15167,40 @@ const AngularWorkflow = (defaultprops) => {
) : null}
+ {isCloud ? (
+
+
+ {
+ window.open(`/api/v1/workflows/search/${executionData.execution_id}`, "_blank")
+ }}
+ >
+
+
+
+
+ ) : null}
+ {executionData.workflow !== undefined && executionData.workflow !== null && executionData.workflow.actions !== undefined && executionData.workflow.actions !== null && executionData.workflow.actions.length > 0 && executionData.workflow.actions[0].environment !== "Cloud" ?
+
+
+ Env
+
+ {
+ window.open("/admin?tab=environments", "_blank")
+ }}>
+ {executionData.workflow.actions[0].environment}
+
+
+ : null}
{executionData.status !== undefined &&
executionData.status.length > 0 ? (
@@ -15382,19 +15591,23 @@ const AngularWorkflow = (defaultprops) => {
width: 30,
}}
onClick={() => {
- const oldstartnode = cy.getElementById(data.action.id);
- //console.log("FOUND NODe: ", oldstartnode)
- if (oldstartnode !== undefined && oldstartnode !== null) {
- const foundname = oldstartnode.data("label")
- if (foundname !== undefined && foundname !== null) {
- data.action.label = foundname
- }
- }
+ if (cy !== undefined) {
+ const oldstartnode = cy.getElementById(data.action.id);
+ //console.log("FOUND NODe: ", oldstartnode)
+ if (oldstartnode !== undefined && oldstartnode !== null) {
+ const foundname = oldstartnode.data("label")
+ if (foundname !== undefined && foundname !== null) {
+ data.action.label = foundname
+ }
+ }
- //console.log("Click data: ", data)
- //data.action.label = ""
- setSelectedResult(data);
- setCodeModalOpen(true);
+ //console.log("Click data: ", data)
+ //data.action.label = ""
+ setSelectedResult(data);
+ setCodeModalOpen(true);
+ } else {
+ toast("Please wait until the workflow is loaded and try again")
+ }
}}
>
{
marginBottom: "auto",
}}
>
- {data.action.label}
+ {data.action.label === undefined || data.action.label === null || data.action.label === "" ? data.action.label : data.action.label.replaceAll("_", " ")}
+
@@ -15973,8 +16187,9 @@ const AngularWorkflow = (defaultprops) => {
cy={(incy) => {
// FIXME: There's something specific loading when
// you do the first hover of a node. Why is this different?
- //console.log("CY: ", incy)
- setCy(incy);
+
+
+ setCy(incy);
}}
/>
@@ -16938,7 +17153,7 @@ const AngularWorkflow = (defaultprops) => {
)}
) : (
- {
}}
>
{selectedApp.documentation}
-
+
)}
@@ -17694,6 +17909,7 @@ const AngularWorkflow = (defaultprops) => {
{newView}
+ {aiQueryModal}
{conditionsModal}
{authenticationModal}
{codePopoutModal}
diff --git a/frontend/src/views/AppCreator.jsx b/frontend/src/views/AppCreator.jsx
index c2656a95..15c2c094 100755
--- a/frontend/src/views/AppCreator.jsx
+++ b/frontend/src/views/AppCreator.jsx
@@ -398,7 +398,6 @@ const AppCreator = (defaultprops) => {
apikeySelection.length > 0 ? apikeySelection[0] : ""
);
const [refreshUrl, setRefreshUrl] = useState("");
- const [oauth2Scopes, setOauth2Scopes] = useState([]);
const [projectCategories, setProjectCategories] = useState([]);
const [selectedCategory, setSelectedCategory] = useState("");
@@ -411,7 +410,12 @@ const AppCreator = (defaultprops) => {
const [appBuilding, setAppBuilding] = useState(false);
const [fileDownloadEnabled, setFileDownloadEnabled] = useState(false);
const [actionAmount, setActionAmount] = useState(increaseAmount);
- const [oauth2Type, setOauth2Type] = useState("application");
+
+ const [oauth2Scopes, setOauth2Scopes] = useState([]);
+ const [oauth2Type, setOauth2Type] = useState("delegated");
+
+ //client_credentials
+ const [oauth2GrantType, setOauth2GrantType] = useState("");
const defaultAuth = {
name: "",
type: "header",
@@ -1748,17 +1752,25 @@ const AppCreator = (defaultprops) => {
//console.log("FLOW-1: ", value)
const flowkey = value.flow === undefined ? "flows" : "flow";
//console.log("FLOW: ", value[flowkey])
- const basekey = value[flowkey].authorizationCode !== undefined
- ? "authorizationCode"
- : "implicit";
+
+
+ // Doesn't seem to be used for now
+ const basekey = value[flowkey].authorizationCode !== undefined ? "authorizationCode" : "implicit";
+
+ // Kind of fucked up, but it works for now?
+ if (value["x-grant-type"] !== undefined && value["x-grant-type"] !== null && value["x-grant-type"].length !== 0) {
+ setOauth2GrantType(value["x-grant-type"])
+ }
//console.log("FLOW2: ", value[flowkey][basekey])
if (value[flowkey] !== undefined && value[flowkey][basekey] !== undefined
) {
- if (value[flowkey][basekey].authorizationUrl !== undefined && parameterName.length === 0) {
+ var newparamname = parameterName
+ if (value[flowkey][basekey].authorizationUrl !== undefined && value[flowkey][basekey].authorizationUrl !== null && value[flowkey][basekey].authorizationUrl.length !== 0 && parameterName.length === 0) {
setParameterName(value[flowkey][basekey].authorizationUrl);
- // } else {
- // setOauth2Type("application")
+ } else {
+ setOauth2Type("application")
+
}
var tokenUrl = "";
@@ -2499,10 +2511,16 @@ const AppCreator = (defaultprops) => {
scheme: "basic",
};
} else if (authenticationOption === "Oauth2") {
- console.log("oauth2: ", parameterName)
+ console.log("oauth2: ", parameterName)
var newparamName = parameterName.replaceAll('"', "");
newparamName = newparamName.replaceAll("'", "");
+ // FIXME - this is a hack to get around the fact that the oauth2
+ // flow is not properly defined
+ if (oauth2Type === "application") {
+ newparamName = ""
+ }
+
//parameterName, parameterValue, revocationUrl
data.components.securitySchemes["Oauth2"] = {
type: "oauth2",
@@ -2519,6 +2537,14 @@ const AppCreator = (defaultprops) => {
},
},
};
+
+
+ //if (value[flowkey][basekey]["x-grant-type"] !== undefined && value[flowkey][basekey]["x-grant-type"] !== null && value[flowkey][basekey]["x-grant-type"].length !== 0) {
+ if (oauth2GrantType.length > 0) {
+ data.components.securitySchemes["Oauth2"]["x-grant-type"] = oauth2GrantType;
+ }
+
+ console.log("SECURITYSCHEMES: ", data.components);
}
if (setExtraAuth.length > 0) {
@@ -2925,7 +2951,7 @@ const AppCreator = (defaultprops) => {
color="textSecondary"
style={{ marginTop: 10 }}
>
- Base Authorization URL for Oauth2
+ Authorization URL for Oauth2
{
},
}}
style={{ maxHeight: 80, overflowX: "hidden", overflowY: "auto" }}
- placeholder="Available Oauth2 Scopes (enter to add)"
+ placeholder="Available Oauth2 Scopes"
color="primary"
fullWidth
value={oauth2Scopes}
@@ -5957,36 +5983,71 @@ const AppCreator = (defaultprops) => {
{authenticationOption === "Oauth2" ?
-
+
{/*
- Delegated: The user will get a popup for access their personal data.
- Application: Permissions are set by the app creator in the 3rd party platform.
*/}
- {
- setOauth2Type(e.target.value);
- }}
- value={oauth2Type}
- style={{
- backgroundColor: inputColor,
- color: "white",
- height: "50px",
- }}
- >
- {["delegated", "application"].map((data, index) => (
-
+ Oauth2 type
+ {
+ setOauth2Type(e.target.value);
+
+ if (e.target.value === "application" && oauth2GrantType === "") {
+ setOauth2GrantType("client_credentials")
+ }
+ }}
+ value={oauth2Type}
+ style={{
+ backgroundColor: inputColor,
+ color: "white",
+ height: "50px",
+ }}
+ >
+ {["delegated", "application"].map((data, index) => (
+
+ {data}
+
+ ))}
+
+
+
+ {oauth2Type === "application" ?
+
+ Grant Type
+ {
+ setOauth2GrantType(e.target.value);
+ }}
+ value={oauth2GrantType}
+ style={{
+ backgroundColor: inputColor,
+ color: "white",
+ height: "50px",
+ }}
>
- {data}
-
- ))}
-
+ {["client_credentials", "password"].map((data, index) => (
+
+ {data}
+
+ ))}
+
+
+ : null}
: null}
diff --git a/frontend/src/views/Apps.jsx b/frontend/src/views/Apps.jsx
index e6d6956e..29a52b83 100755
--- a/frontend/src/views/Apps.jsx
+++ b/frontend/src/views/Apps.jsx
@@ -403,7 +403,7 @@ const Apps = (props) => {
return response.json();
})
.then((responseJson) => {
- //console.log("Apps: ", responseJson)
+ console.log("Apps: ", responseJson)
//responseJson = sortByKey(responseJson, "large_image")
//responseJson = sortByKey(responseJson, "is_valid")
//setFilteredApps(responseJson.filter(app => !internalIds.includes(app.name) && !(!app.activated && app.generated)))
diff --git a/frontend/src/views/Dashboard.jsx b/frontend/src/views/Dashboard.jsx
index f961076b..21c2d513 100755
--- a/frontend/src/views/Dashboard.jsx
+++ b/frontend/src/views/Dashboard.jsx
@@ -102,6 +102,8 @@ const UsecaseListComponent = (props) => {
const [expandedItem, setExpandedItem] = useState(-1);
const [inputUsecase, setInputUsecase] = useState({});
+ const [prevSubcase, setPrevSubcase] = useState({})
+
const [editing, setEditing] = useState(false);
const [description, setDescription] = useState("");
const [video, setVideo] = useState("");
@@ -117,6 +119,42 @@ const UsecaseListComponent = (props) => {
const [mitreTags, setMitreTags] = useState([]);
+ const parseUsecase = (subcase) => {
+ const srcdata = findSpecificApp(frameworkData, subcase.type)
+ const dstdata = findSpecificApp(frameworkData, subcase.last)
+
+ if (srcdata !== undefined && srcdata !== null) {
+ subcase.srcimg = srcdata.large_image
+ subcase.srcapp = srcdata.name
+ }
+
+ if (dstdata !== undefined && dstdata !== null) {
+ subcase.dstimg = dstdata.large_image
+ subcase.dstapp = dstdata.name
+ }
+
+ return subcase
+ }
+
+ useEffect(() => {
+ console.log("In frameworkData useEffect: frameworkData: ", frameworkData)
+ if (frameworkData === undefined || prevSubcase === undefined) {
+ return
+ }
+
+ console.log("PAST!")
+
+ var parsedUsecase = inputUsecase
+ const subcase = parseUsecase(prevSubcase)
+
+ parsedUsecase.srcimg = subcase.srcimg
+ parsedUsecase.srcapp = subcase.srcapp
+ parsedUsecase.dstimg = subcase.dstimg
+ parsedUsecase.dstapp = subcase.dstapp
+
+ setInputUsecase(parsedUsecase)
+ }, [frameworkData])
+
const loadApps = () => {
fetch(`${globalUrl}/api/v1/apps`, {
method: "GET",
@@ -163,35 +201,14 @@ const UsecaseListComponent = (props) => {
if (keys === undefined || keys === null || keys.length === 0) {
return null
- }
-
-
- const parseUsecase = (subcase) => {
- //console.log("parseUsecase: ", subcase)
- const srcdata = findSpecificApp(frameworkData, subcase.type)
- const dstdata = findSpecificApp(frameworkData, subcase.last)
-
- if (srcdata !== undefined && srcdata !== null) {
- subcase.srcimg = srcdata.large_image
- subcase.srcapp = srcdata.name
- }
-
- if (dstdata !== undefined && dstdata !== null) {
- subcase.dstimg = dstdata.large_image
- subcase.dstapp = dstdata.name
- }
-
- return subcase
- }
+ }
+
+ // Timeout 50ms to delay it slightly
const getUsecase = (subcase, index, subindex) => {
subcase = parseUsecase(subcase)
-
- // Timeout 50ms to delay it slightly
- //setTimeout(() => {
- // setInputUsecase(subcase)
- //}, 50)
+ setPrevSubcase(subcase)
fetch(`${globalUrl}/api/v1/workflows/usecases/${escape(subcase.name.replaceAll(" ", "_"))}`, {
method: "GET",
@@ -214,8 +231,6 @@ const UsecaseListComponent = (props) => {
if (responseJson.success === false) {
parsedUsecase = subcase
} else {
- console.log("FOUND: ", JSON.parse(JSON.stringify(responseJson)))
-
parsedUsecase = responseJson
parsedUsecase.srcimg = subcase.srcimg
@@ -314,7 +329,7 @@ const UsecaseListComponent = (props) => {
})
.catch((error) => {
//toast(error.toString());
- //setFrameworkLoaded(true)
+ //setFrameworkLoaded(true)
})
}
@@ -416,15 +431,16 @@ const UsecaseListComponent = (props) => {
return (
{
+ if (fixedName === "increase authentication") {
+ getUsecase(subcase, index, subindex)
+ return
+ }
//setSelectedWorkflows([])
if (selectedItem) {
} else {
getUsecase(subcase, index, subindex)
navigate(`/usecases?selected_object=${fixedName}`)
-
- //const newitem = removeParam("selected_object", cursearch);
- //navigate(curpath + newitem)
}
}}>
{
@@ -594,11 +610,12 @@ const UsecaseListComponent = (props) => {
>
{
- setExpandedItem(-1)
- setExpandedIndex(-1)
- setEditing(false)
- setInputUsecase({})
+ setExpandedItem(-1)
+ setExpandedIndex(-1)
+ setEditing(false)
+ setInputUsecase({})
}}
>
@@ -1172,13 +1189,28 @@ const Dashboard = (props) => {
if (foundQuery !== null && foundQuery !== undefined) {
setSelectedUsecaseCategory(foundQuery)
- const newitem = removeParam("selected", cursearch);
+ const newitem = removeParam("selected", cursearch);
navigate(curpath + newitem)
}
+ const baseItem = document.getElementById("increase authentication")
+ if (baseItem !== undefined && baseItem !== null) {
+ baseItem.click()
+
+ // Find close window button -> go to top
+ const foundButton = document.getElementById("close_selection")
+ if (foundButton !== undefined && foundButton !== null) {
+ foundButton.click()
+ }
+
+ // Scroll back to top
+ window.scrollTo(0, 0)
+ }
+
const foundQuery2 = params["selected_object"]
if (foundQuery2 !== null && foundQuery2 !== undefined) {
- //console.log("Got selected_object: ", foundQuery2)
+ // Take a random object, quickly click it, then go to this one
+ // Something is weird with loading apps without it
const queryName = foundQuery2.toLowerCase().replaceAll("_", " ")
// Waiting a bit for it to render
@@ -1198,7 +1230,7 @@ const Dashboard = (props) => {
} else {
//console.log("Couldn't find item with name ", queryName)
}
- }, 100);
+ }, 1000);
}
}
@@ -1251,6 +1283,7 @@ const Dashboard = (props) => {
})
}
+
const getAvailableWorkflows = () => {
fetch(globalUrl + "/api/v1/workflows", {
method: "GET",
@@ -1385,8 +1418,7 @@ const Dashboard = (props) => {
useEffect(() => {
getAvailableWorkflows()
- getFramework()
- //fetchUsecases()
+ getFramework()
}, []);
const fetchdata = (stats_id) => {
diff --git a/frontend/src/views/Docs.jsx b/frontend/src/views/Docs.jsx
index ba56b835..ef7f3bad 100755
--- a/frontend/src/views/Docs.jsx
+++ b/frontend/src/views/Docs.jsx
@@ -1,13 +1,12 @@
import React, { useEffect, useState } from "react";
-import ReactMarkdown from "react-markdown";
+import { toast } from 'react-toastify';
+import Markdown from 'react-markdown'
+
import { BrowserView, MobileView } from "react-device-detect";
import { useParams, useNavigate, Link } from "react-router-dom";
import { isMobile } from "react-device-detect";
import theme from '../theme.jsx';
-import remarkGfm from 'remark-gfm'
-import KeyboardArrowRightIcon from '@mui/icons-material/KeyboardArrowRight';
-import ExpandMoreIcon from '@mui/icons-material/ExpandMore';
import {
Grid,
@@ -29,6 +28,8 @@ import {
import {
Link as LinkIcon,
Edit as EditIcon,
+ KeyboardArrowRight as KeyboardArrowRightIcon,
+ ExpandMore as ExpandMoreIcon,
} from "@mui/icons-material";
const Body = {
@@ -135,7 +136,7 @@ const Docs = (defaultprops) => {
};
const fetchDocList = () => {
- fetch(globalUrl + "/api/v1/docs", {
+ fetch(`${globalUrl}/api/v1/docs`, {
method: "GET",
headers: {
"Content-Type": "application/json",
@@ -147,9 +148,8 @@ const Docs = (defaultprops) => {
if (responseJson.success) {
setList(responseJson.list);
} else {
- setList([
- "# Error loading documentation. Please contact us if this persists.",
- ]);
+ setList(["# Error loading documentation. Please contact us if this persists.",]);
+ toast("Failed loading documentation. Please reload the window")
}
setListLoaded(true);
})
@@ -157,7 +157,7 @@ const Docs = (defaultprops) => {
};
const fetchDocs = (docId) => {
- fetch(globalUrl + "/api/v1/docs/" + docId, {
+ fetch(`${globalUrl}/api/v1/docs/${docId}`, {
method: "GET",
headers: {
"Content-Type": "application/json",
@@ -166,8 +166,15 @@ const Docs = (defaultprops) => {
})
.then((response) => response.json())
.then((responseJson) => {
- if (responseJson.success) {
- setData(responseJson.reason);
+ if (responseJson.success === false) {
+ //toast("Failed loading documentation. Please reload the UI")
+ }
+
+ if (responseJson.success && responseJson.reason !== undefined) {
+ // Find tags and translate them into ![]() format
+ const imgRegex = / {
}
const markdownStyle = {
- color: "rgba(255, 255, 255, 0.65)",
+ color: "rgba(255, 255, 255, 0.90)",
overflow: "hidden",
paddingBottom: 100,
margin: "auto",
maxWidth: "100%",
minWidth: "100%",
overflow: "hidden",
- fontSize: isMobile ? "1.3rem" : "1.0rem",
+ fontSize: isMobile ? "1.3rem" : "1.1rem",
};
function OuterLink(props) {
- console.log("Link: ", props.href)
if (props.href.includes("http") || props.href.includes("mailto")) {
return (
{
}
function CodeHandler(props) {
- console.log("PROPS: ", props)
+ //console.log("Codehandler PROPS: ", props)
const propvalue = props.value !== undefined && props.value !== null ? props.value : props.children !== undefined && props.children !== null && props.children.length > 0 ? props.children[0] : ""
@@ -603,7 +609,6 @@ const Docs = (defaultprops) => {
const [hover, setHover] = useState(false);
- console.log("Link: ", link)
if (link === undefined || link === null) {
return null
}
@@ -681,60 +686,20 @@ const Docs = (defaultprops) => {
Organize. Whether an organization of 1000 or 1, management tools are necessary. In Shuffle we offer full user management, MFA and single-signon options, multi-tenancy and a lot more - for free!
-
- {/*
-
- {list.map((data, index) => {
- const item = data.name;
- if (item === undefined) {
- return null;
- }
-
- const path = "/docs/" + item;
- const newname =
- item.charAt(0).toUpperCase() +
- item.substring(1).split("_").join(" ").split("-").join(" ");
-
- const itemMatching = props.match.params.key === undefined ? false :
- props.match.params.key.toLowerCase() === item.toLowerCase();
-
- return (
-
-
-
- )
- })}
-
- */}
-
- {/*
-
{
- console.log("Change: ", event.target.value)
- }}
- />
- */}
+ const markdownComponents = {
+ img: Img,
+ code: CodeHandler,
+ h1: Heading,
+ h2: Heading,
+ h3: Heading,
+ h4: Heading,
+ h5: Heading,
+ h6: Heading,
+ a: OuterLink,
+ }
+
// PostDataBrowser Section
const postDataBrowser =
list === undefined || list === null ? null : (
@@ -812,32 +777,22 @@ const Docs = (defaultprops) => {
mainpageInfo
:
-
{data}
-
+
}
);
- // remarkPlugins={[remarkGfm]}
const mobileStyle = {
color: "white",
@@ -849,6 +804,7 @@ const Docs = (defaultprops) => {
flexDirection: "column",
};
+
const postDataMobile =
list === undefined || list === null ? null : (
@@ -899,18 +855,8 @@ const Docs = (defaultprops) => {
mainpageInfo
:
- {
}}
>
{data}
-
+
}
{
);
- //const imageModal =
- //
- // {imageModal}
-
// Padding and zIndex etc set because of footer in cloud.
const loadedCheck = (
-
+
{postDataBrowser}
{postDataMobile}
diff --git a/frontend/src/views/Workflows.jsx b/frontend/src/views/Workflows.jsx
index 2bcd3b38..73f02cc2 100755
--- a/frontend/src/views/Workflows.jsx
+++ b/frontend/src/views/Workflows.jsx
@@ -1344,7 +1344,7 @@ const Workflows = (props) => {
}, i * 200);
}
- toast(`exporting and keeping original for all ${allWorkflows.length} workflows`);
+ toast(`Exporting and keeping original for all ${allWorkflows.length} workflows`);
};
const deduplicateIds = (data, skip_sanitize) => {
@@ -1982,16 +1982,16 @@ const Workflows = (props) => {
- {data.image !== undefined && data.image !== null && data.image.length > 0 ?
-
- : null}
-
- Edit {data.name}
-
-
- } placement="bottom">
-
+ {data.image !== undefined && data.image !== null && data.image.length > 0 ?
+
+ : null}
+
+ Edit {data.name}
+
+
+ } placement="left">
+
0 {
+ serviceSpec.TaskTemplate.ContainerSpec.Env = append(serviceSpec.TaskTemplate.ContainerSpec.Env, fmt.Sprintf("SHUFFLE_VOLUME_BINDS=%s", os.Getenv("SHUFFLE_VOLUME_BINDS")))
+ }
+
+ overrideHttpProxy := os.Getenv("SHUFFLE_INTERNAL_HTTP_PROXY")
+ overrideHttpsProxy := os.Getenv("SHUFFLE_INTERNAL_HTTPS_PROXY")
+ if len(overrideHttpProxy) > 0 {
+ log.Printf("[DEBUG] Added internal proxy: %s", overrideHttpProxy)
+ serviceSpec.TaskTemplate.ContainerSpec.Env = append(serviceSpec.TaskTemplate.ContainerSpec.Env, fmt.Sprintf("SHUFFLE_INTERNAL_HTTP_PROXY=%s", overrideHttpProxy))
+ }
+
+ if len(overrideHttpsProxy) > 0 {
+ log.Printf("[DEBUG] Added internal proxy: %s", overrideHttpsProxy)
+ serviceSpec.TaskTemplate.ContainerSpec.Env = append(serviceSpec.TaskTemplate.ContainerSpec.Env, fmt.Sprintf("SHUFFLE_INTERNAL_HTTPS_PROXY=%s", overrideHttpsProxy))
+ }
+
serviceOptions := types.ServiceCreateOptions{}
_, err = dockercli.ServiceCreate(
ctx,
@@ -788,7 +805,7 @@ func deployWorker(image string, identifier string, env []string, executionReques
log.Printf("[ERROR] Failed to start worker container in environment %s: %s", environment, err)
return err
} else {
- log.Printf("[INFO] Worker Container %s was created under environment %s for execution %s: docker logs %s", cont.ID, environment, executionRequest.ExecutionId, cont.ID)
+ log.Printf("[INFO][%s] Worker Container created. Environment %s: docker logs %s", executionRequest.ExecutionId, environment, cont.ID)
}
//stats, err := cli.ContainerInspect(context.Background(), containerName)
@@ -813,7 +830,7 @@ func deployWorker(image string, identifier string, env []string, executionReques
// }
//}
} else {
- log.Printf("[INFO] Worker Container %s was created under environment %s: docker logs %s", cont.ID, environment, cont.ID)
+ log.Printf("[INFO][%s] New Worker created. Environment %s: docker logs %s", executionRequest.ExecutionId, environment, cont.ID)
}
return nil
@@ -1298,7 +1315,6 @@ func main() {
ctx := context.Background()
// Run by default from now
//commenting for now as its stoppoing minikube
- // zombiecheck(ctx, workerTimeout)
log.Printf("[INFO] Running towards %s (BASE_URL) with environment name %s", baseUrl, environment)
@@ -1333,6 +1349,8 @@ func main() {
//deployServiceWorkers(workerImage)
}
+ zombiecheck(ctx, workerTimeout)
+
client := shuffle.GetExternalClient(baseUrl)
fullUrl := fmt.Sprintf("%s/api/v1/workflows/queue", baseUrl)
log.Printf("[INFO] Finished configuring docker environment. Connecting to %s", fullUrl)
@@ -1389,7 +1407,7 @@ func main() {
// Should find data to send (memory etc.)
// Create timeout of max 4 seconds just in case
- ctx, cancel := context.WithTimeout(context.Background(), 4*time.Second)
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
// Marshal and set body
@@ -1524,9 +1542,12 @@ func main() {
log.Printf("[INFO] Execution already handled (rerun of old executions?): %s", execution.ExecutionId)
toBeRemoved.Data = append(toBeRemoved.Data, execution)
+ // Should check when last this was ran, and if it's more than 10 minutes ago and it's not finished, we should run it again?
+ /*
if swarmConfig != "run" && swarmConfig != "swarm" {
continue
}
+ */
}
// Now, how do I execute this one?
@@ -1574,6 +1595,30 @@ func main() {
env = append(env, fmt.Sprintf("SHUFFLE_DEBUG_MEMORY=%s", os.Getenv("SHUFFLE_DEBUG_MEMORY")))
}
+ // Look for volume binds
+ if len(os.Getenv("SHUFFLE_VOLUME_BINDS")) > 0 {
+ log.Printf("[DEBUG] Added volume binds: %s", os.Getenv("SHUFFLE_VOLUME_BINDS"))
+ env = append(env, fmt.Sprintf("SHUFFLE_VOLUME_BINDS=%s", os.Getenv("SHUFFLE_VOLUME_BINDS")))
+ }
+
+
+ if len(os.Getenv("SHUFFLE_APP_SDK_TIMEOUT")) > 0 {
+ env = append(env, fmt.Sprintf("SHUFFLE_APP_SDK_TIMEOUT=%s", os.Getenv("SHUFFLE_APP_SDK_TIMEOUT")))
+ }
+
+ // Setting up internal proxy config for Shuffle -> shuffle comms
+ overrideHttpProxy := os.Getenv("SHUFFLE_INTERNAL_HTTP_PROXY")
+ overrideHttpsProxy := os.Getenv("SHUFFLE_INTERNAL_HTTPS_PROXY")
+ if len(overrideHttpProxy) > 0 {
+ log.Printf("[DEBUG] Added internal proxy: %s", overrideHttpProxy)
+ env = append(env, fmt.Sprintf("HTTP_PROXY=%s", overrideHttpProxy))
+ }
+
+ if len(overrideHttpsProxy) > 0 {
+ log.Printf("[DEBUG] Added internal proxy: %s", overrideHttpsProxy)
+ env = append(env, fmt.Sprintf("HTTPS_PROXY=%s", overrideHttpsProxy))
+ }
+
err = deployWorker(workerImage, containerName, env, execution)
zombiecounter += 1
if err == nil {
@@ -1754,7 +1799,7 @@ func zombiecheck(ctx context.Context, workerTimeout int) error {
return nil
}
- log.Println("[INFO] Looking for old containers (zombies)")
+ log.Println("[INFO] Looking for old containers to remove")
containers, err := dockercli.ContainerList(ctx, types.ContainerListOptions{
All: true,
})
@@ -1771,10 +1816,11 @@ func zombiecheck(ctx context.Context, workerTimeout int) error {
stopContainers := []string{}
removeContainers := []string{}
log.Printf("[INFO] Baseimage: %s, Workertimeout: %d", baseimagename, int64(workerTimeout))
- baseString := `/bin/sh -c 'python app.py --log-level DEBUG'`
+ //baseString := `/bin/sh -c 'python app.py --log-level DEBUG'`
+ baseString := `python app.py`
for _, container := range containers {
// Skip random containers. Only handle things related to Shuffle.
- if !strings.Contains(container.Image, baseimagename) && container.Command != baseString && container.Command != "./worker" {
+ if !strings.Contains(container.Image, baseimagename) && !strings.Contains(container.Command, baseString) && !strings.Contains(container.Command, "walkoff") && container.Command != "./worker" {
shuffleFound := false
for _, item := range container.Labels {
if item == "shuffle" {
@@ -1785,7 +1831,7 @@ func zombiecheck(ctx context.Context, workerTimeout int) error {
// Check image name
if !shuffleFound {
- //log.Printf("[WARNING] Zombie container skip: %#v, %s", container.Labels, container.Image)
+ log.Printf("[WARNING] Zombie container skip: %#v, %s", container.Labels, container.Image)
continue
}
//} else {
@@ -1820,7 +1866,7 @@ func zombiecheck(ctx context.Context, workerTimeout int) error {
}
// FIXME - add killing of apps with same execution ID too
- log.Printf("[INFO] Should STOP %d containers.", len(stopContainers))
+ log.Printf("[INFO] Should STOP and remove %d containers.", len(stopContainers))
var options container.StopOptions
for _, containername := range stopContainers {
log.Printf("[INFO] Stopping and removing container %s", containerNames[containername])
@@ -1878,7 +1924,7 @@ func sendWorkerRequest(workflowExecution shuffle.ExecutionRequest) error {
streamUrl = fmt.Sprintf("%s:33333/api/v1/execute", workerServerUrl)
}
- if strings.Contains(streamUrl, "localhost") || strings.Contains(streamUrl, "shuffle-backend") {
+ if strings.Contains(streamUrl, "shuffler.io") || strings.Contains(streamUrl, "localhost") || strings.Contains(streamUrl, "shuffle-backend") {
log.Printf("[INFO] Using default worker server url as previous is invalid: %s", streamUrl)
streamUrl = fmt.Sprintf("http://shuffle-workers:33333/api/v1/execute")
}
diff --git a/functions/onprem/worker/go.mod b/functions/onprem/worker/go.mod
index e4cf2262..9e0f8e1e 100644
--- a/functions/onprem/worker/go.mod
+++ b/functions/onprem/worker/go.mod
@@ -11,7 +11,7 @@ require (
github.com/gorilla/mux v1.8.0
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/satori/go.uuid v1.2.0
- github.com/shuffle/shuffle-shared v0.4.57
+ github.com/shuffle/shuffle-shared v0.5.29
k8s.io/api v0.28.3
k8s.io/apimachinery v0.28.3
k8s.io/client-go v0.28.3
diff --git a/functions/onprem/worker/go.sum b/functions/onprem/worker/go.sum
index f26caf60..5afdce76 100644
--- a/functions/onprem/worker/go.sum
+++ b/functions/onprem/worker/go.sum
@@ -284,6 +284,8 @@ github.com/shuffle/shuffle-shared v0.4.50 h1:fJLfhWIJ5mYap4JwHnD/B5aaLyIULwylFSl
github.com/shuffle/shuffle-shared v0.4.50/go.mod h1:X613gbo0dT3fnYvXDRwjQZyLC+T49T2nSQOrCV5QMlI=
github.com/shuffle/shuffle-shared v0.4.57 h1:o+mMPRY4ourkE3R0qdi80jg6RlCtvAJ/VVrPk4y75Hk=
github.com/shuffle/shuffle-shared v0.4.57/go.mod h1:X613gbo0dT3fnYvXDRwjQZyLC+T49T2nSQOrCV5QMlI=
+github.com/shuffle/shuffle-shared v0.5.29 h1:n4vThl7v3mFVXbrIW71XREFdmZZo7mOBAWxnsdiNjDk=
+github.com/shuffle/shuffle-shared v0.5.29/go.mod h1:X613gbo0dT3fnYvXDRwjQZyLC+T49T2nSQOrCV5QMlI=
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0=
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
diff --git a/functions/onprem/worker/worker.go b/functions/onprem/worker/worker.go
index fb913d16..0c8fd3a5 100755
--- a/functions/onprem/worker/worker.go
+++ b/functions/onprem/worker/worker.go
@@ -3,7 +3,7 @@ package main
import (
"github.com/shuffle/shuffle-shared"
- //"bufio"
+
"bytes"
"context"
"encoding/json"
@@ -14,29 +14,23 @@ import (
"log"
"net"
"net/http"
+ "net/http/pprof"
"net/url"
"os"
+ "strconv"
"strings"
"time"
"github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/container"
- //"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/mount"
dockerclient "github.com/docker/docker/client"
- //"github.com/go-git/go-billy/v5/memfs"
-
- //newdockerclient "github.com/fsouza/go-dockerclient"
- //"github.com/satori/go.uuid"
+ // This is for automatic removal of certain code :)
"github.com/gorilla/mux"
- "github.com/patrickmn/go-cache"
"github.com/satori/go.uuid"
- // No necessary outside shared
- "cloud.google.com/go/datastore"
- "cloud.google.com/go/storage"
-
//k8s deps
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -45,24 +39,24 @@ import (
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/homedir"
"path/filepath"
- // "k8s.io/client-go/util/retry"
)
// This is getting out of hand :)
-var environment = os.Getenv("ENVIRONMENT_NAME")
+var timezone = os.Getenv("TZ")
var baseUrl = os.Getenv("BASE_URL")
var appCallbackUrl = os.Getenv("BASE_URL")
+var isKubernetes = os.Getenv("IS_KUBERNETES")
+var environment = os.Getenv("ENVIRONMENT_NAME")
+var logsDisabled = os.Getenv("SHUFFLE_LOGS_DISABLED")
var cleanupEnv = strings.ToLower(os.Getenv("CLEANUP"))
-var dockerApiVersion = strings.ToLower(os.Getenv("DOCKER_API_VERSION"))
var swarmNetworkName = os.Getenv("SHUFFLE_SWARM_NETWORK_NAME")
-var timezone = os.Getenv("TZ")
+var dockerApiVersion = strings.ToLower(os.Getenv("DOCKER_API_VERSION"))
var baseimagename = "frikky/shuffle"
// var baseimagename = "registry.hub.docker.com/frikky/shuffle"
var registryName = "registry.hub.docker.com"
var sleepTime = 2
-var requestCache *cache.Cache
var topClient *http.Client
var data string
var requestsSent = 0
@@ -84,11 +78,21 @@ var startAction string
//var allLogs map[string]string
//var containerIds []string
var downloadedImages []string
+type ImageDownloadBody struct {
+ Image string `json:"image"`
+}
+
+type ImageRequest struct {
+ Image string `json:"image"`
+}
+
+var finishedExecutions []string
+
// Images to be autodeployed in the latest version of Shuffle.
var autoDeploy = map[string]string{
- "http:1.3.0": "frikky/shuffle:http_1.3.0",
"http:1.4.0": "frikky/shuffle:http_1.4.0",
+ "http:1.3.0": "frikky/shuffle:http_1.3.0",
"shuffle-tools:1.2.0": "frikky/shuffle:shuffle-tools_1.2.0",
"shuffle-subflow:1.0.0": "frikky/shuffle:shuffle-subflow_1.0.0",
"shuffle-subflow:1.1.0": "frikky/shuffle:shuffle-subflow_1.1.0",
@@ -108,6 +112,166 @@ type UserInputSubflow struct {
CancelUrl string `json:"cancel_url"`
}
+// Not using shuffle.SetWorkflowExecution as we only want to use cache in reality
+func setWorkflowExecution(ctx context.Context, workflowExecution shuffle.WorkflowExecution, dbSave bool) error {
+ if len(workflowExecution.ExecutionId) == 0 {
+ log.Printf("[DEBUG] Workflowexecution executionId can't be empty.")
+ return errors.New("ExecutionId can't be empty.")
+ }
+
+ //log.Printf("[DEBUG][%s] Setting with %d results (pre)", workflowExecution.ExecutionId, len(workflowExecution.Results))
+ workflowExecution = shuffle.Fixexecution(ctx, workflowExecution)
+ cacheKey := fmt.Sprintf("workflowexecution_%s", workflowExecution.ExecutionId)
+
+ execData, err := json.Marshal(workflowExecution)
+ if err != nil {
+ log.Printf("[ERROR] Failed marshalling execution during set: %s", err)
+ return err
+ }
+
+ err = shuffle.SetCache(ctx, cacheKey, execData, 30)
+ if err != nil {
+ log.Printf("[ERROR][%s] Failed adding to cache during setexecution", workflowExecution)
+ return err
+ }
+
+
+ handleExecutionResult(workflowExecution)
+ validated := shuffle.ValidateFinished(ctx, -1, workflowExecution)
+ if validated {
+ shutdownData, err := json.Marshal(workflowExecution)
+ if err != nil {
+ log.Printf("[ERROR] Failed marshalling shutdowndata during set: %s", err)
+ }
+
+ log.Printf("[DEBUG][%s] Sending result (set)", workflowExecution.ExecutionId)
+ sendResult(workflowExecution, shutdownData)
+ return nil
+ }
+
+ // FIXME: Should this shutdown OR send the result?
+ // The worker may not be running the backend hmm
+ if dbSave {
+ if workflowExecution.ExecutionSource == "default" {
+ log.Printf("[DEBUG][%s] Shutting down (25)", workflowExecution.ExecutionId)
+ shutdown(workflowExecution, "", "", true)
+ //return
+ } else {
+ log.Printf("[DEBUG][%s] NOT shutting down with dbSave (%s). Instead sending result to backend and start polling until subflow is updated", workflowExecution.ExecutionId, workflowExecution.ExecutionSource)
+
+ shutdownData, err := json.Marshal(workflowExecution)
+ if err != nil {
+ log.Printf("[ERROR] Failed marshalling shutdowndata during dbSave handler: %s", err)
+ }
+
+ sendResult(workflowExecution, shutdownData)
+
+ // Poll for 1 minute max if there is a "wait for results" subflow
+ subflowId := ""
+ for _, result := range workflowExecution.Results {
+ if result.Status == "WAITING" {
+ //log.Printf("[DEBUG][%s] Found waiting result", workflowExecution.ExecutionId)
+ subflowId = result.Action.ID
+ }
+ }
+
+ if len(subflowId) == 0 {
+ log.Printf("[DEBUG][%s] No waiting result found. Not polling", workflowExecution.ExecutionId)
+
+ for _, action := range workflowExecution.Workflow.Actions {
+ if action.AppName == "User Input" || action.AppName == "Shuffle Workflow" || action.AppName == "shuffle-subflow" {
+ workflowExecution.Workflow.Triggers = append(workflowExecution.Workflow.Triggers, shuffle.Trigger{
+ AppName: action.AppName,
+ Parameters: action.Parameters,
+ ID: action.ID,
+ })
+ }
+ }
+
+
+ for _, trigger := range workflowExecution.Workflow.Triggers {
+ //log.Printf("[DEBUG] Found trigger %s", trigger.AppName)
+ if trigger.AppName != "User Input" && trigger.AppName != "Shuffle Workflow" && trigger.AppName != "shuffle-subflow" {
+ continue
+ }
+
+
+ // check if it has wait for results in params
+ wait := false
+ for _, param := range trigger.Parameters {
+ //log.Printf("[DEBUG] Found param %s with value %s", param.Name, param.Value)
+ if param.Name == "check_result" && strings.ToLower(param.Value) == "true" {
+ //log.Printf("[DEBUG][%s] Found check result param!", workflowExecution.ExecutionId)
+ wait = true
+ break
+ }
+ }
+
+ if wait {
+ // Check if it has a result or not
+ found := false
+ for _, result := range workflowExecution.Results {
+ //log.Printf("[DEBUG][%s] Found result %s", workflowExecution.ExecutionId, result.Action.ID)
+ if result.Action.ID == trigger.ID && result.Status != "SUCCESS" && result.Status != "FAILURE" {
+ //log.Printf("[DEBUG][%s] Found subflow result that is not handled. Waiting for results", workflowExecution.ExecutionId)
+
+ subflowId = result.Action.ID
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ log.Printf("[DEBUG][%s] No result found for subflow. Setting subflowId to %s", workflowExecution.ExecutionId, trigger.ID)
+ subflowId = trigger.ID
+ }
+ }
+
+ if len(subflowId) > 0 {
+ break
+ }
+ }
+ }
+
+ if len(subflowId) > 0 {
+ // Under rerun period timeout
+ timeComparison := 120
+ log.Printf("[DEBUG][%s] Starting polling for %d seconds to see if new subflow updates are found on the backend that are not handled. Subflow ID: %s", workflowExecution.ExecutionId, timeComparison, subflowId)
+ timestart := time.Now()
+ streamResultUrl := fmt.Sprintf("%s/api/v1/streams/results", baseUrl)
+ for {
+ err = handleSubflowPoller(ctx, workflowExecution, streamResultUrl, subflowId)
+ if err == nil {
+ log.Printf("[DEBUG] Subflow is finished and we are breaking the thingy")
+
+ if os.Getenv("SHUFFLE_SWARM_CONFIG") != "run" && os.Getenv("SHUFFLE_SWARM_CONFIG") != "swarm" && workflowExecution.ExecutionSource != "default" {
+ log.Printf("[DEBUG] Force shutdown of worker due to optimized run with webserver. Expecting reruns to take care of this")
+ os.Exit(0)
+ }
+
+
+ break
+ }
+
+ timepassed := time.Since(timestart)
+ if timepassed.Seconds() > float64(timeComparison) {
+ log.Printf("[DEBUG][%s] Max poll time reached to look for updates. Stopping poll. This poll is here to send personal results back to itself to be handled, then to stop this thread.", workflowExecution.ExecutionId)
+ break
+ }
+
+ // Sleep for 1 second
+ time.Sleep(1 * time.Second)
+ }
+ } else {
+ log.Printf("[DEBUG][%s] No need to poll for results. Not polling", workflowExecution.ExecutionId)
+ }
+ }
+ }
+
+ return nil
+}
+
+
// removes every container except itself (worker)
func shutdown(workflowExecution shuffle.WorkflowExecution, nodeId string, reason string, handleResultSend bool) {
log.Printf("[DEBUG][%s] Shutdown (%s) started with reason %#v. Result amount: %d. ResultsSent: %d, Send result: %#v, Parent: %#v", workflowExecution.ExecutionId, workflowExecution.Status, reason, len(workflowExecution.Results), requestsSent, handleResultSend, workflowExecution.ExecutionParent)
@@ -126,6 +290,30 @@ func shutdown(workflowExecution shuffle.WorkflowExecution, nodeId string, reason
time.Sleep(time.Duration(sleepDuration) * time.Second)
}
+ // Might not be necessary because of cleanupEnv hostconfig autoremoval
+ if cleanupEnv == "true" && (os.Getenv("SHUFFLE_SWARM_CONFIG") != "run" && os.Getenv("SHUFFLE_SWARM_CONFIG") != "swarm") {
+ /*
+ ctx := context.Background()
+ dockercli, err := dockerclient.NewEnvClient()
+ if err == nil {
+ log.Printf("[INFO] Cleaning up %d containers", len(containerIds))
+ removeOptions := types.ContainerRemoveOptions{
+ RemoveVolumes: true,
+ Force: true,
+ }
+
+ for _, containername := range containerIds {
+ log.Printf("[INFO] Should stop and and remove container %s (deprecated)", containername)
+ //dockercli.ContainerStop(ctx, containername, nil)
+ //dockercli.ContainerRemove(ctx, containername, removeOptions)
+ //removeContainers = append(removeContainers, containername)
+ }
+ }
+ */
+ } else {
+
+ }
+
if len(reason) > 0 && len(nodeId) > 0 {
//log.Printf("[INFO] Running abort of workflow because it should be finished")
@@ -138,7 +326,6 @@ func shutdown(workflowExecution shuffle.WorkflowExecution, nodeId string, reason
path += fmt.Sprintf("&env=%s", url.QueryEscape(environment))
}
- //fmt.Printf(url.QueryEscape(query))
abortUrl += path
log.Printf("[DEBUG][%s] Abort URL: %s", workflowExecution.ExecutionId, abortUrl)
@@ -152,19 +339,22 @@ func shutdown(workflowExecution shuffle.WorkflowExecution, nodeId string, reason
log.Printf("[WARNING][%s] Failed building request: %s", workflowExecution.ExecutionId, err)
}
- authorization := os.Getenv("AUTHORIZATION")
- if len(authorization) > 0 {
- req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", authorization))
+ // FIXME: Add an API call to the backend
+ if os.Getenv("SHUFFLE_SWARM_CONFIG") != "run" && os.Getenv("SHUFFLE_SWARM_CONFIG") != "swarm" {
+ authorization := os.Getenv("AUTHORIZATION")
+ if len(authorization) > 0 {
+ req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", authorization))
+ } else {
+ log.Printf("[ERROR][%s] No authorization specified for abort", workflowExecution.ExecutionId)
+ }
} else {
- log.Printf("[ERROR][%s] No authorization specified for abort", workflowExecution.ExecutionId)
+ req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", workflowExecution.Authorization))
}
req.Header.Add("Content-Type", "application/json")
- client := shuffle.GetExternalClient(baseUrl)
-
//log.Printf("[DEBUG][%s] All App Logs: %#v", workflowExecution.ExecutionId, allLogs)
- newresp, err := client.Do(req)
+ newresp, err := topClient.Do(req)
if err != nil {
log.Printf("[WARNING][%s] Failed abort request: %s", workflowExecution.ExecutionId, err)
} else {
@@ -178,63 +368,17 @@ func shutdown(workflowExecution shuffle.WorkflowExecution, nodeId string, reason
//Finished shutdown (after %d seconds). ", sleepDuration)
// Allows everything to finish in subprocesses (apps)
- time.Sleep(time.Duration(sleepDuration) * time.Second)
- os.Exit(3)
-}
-
-// }
-
-func isRunningInCluster() bool {
- _, existsHost := os.LookupEnv("KUBERNETES_SERVICE_HOST")
- _, existsPort := os.LookupEnv("KUBERNETES_SERVICE_PORT")
- return existsHost && existsPort
-}
-
-func buildEnvVars(envMap map[string]string) []corev1.EnvVar {
- var envVars []corev1.EnvVar
- for key, value := range envMap {
- envVars = append(envVars, corev1.EnvVar{Name: key, Value: value})
- }
- return envVars
-}
-
-func getKubernetesClient() (*kubernetes.Clientset, error) {
- if isRunningInCluster() {
- config, err := rest.InClusterConfig()
- if err != nil {
- return nil, err
- }
- clientset, err := kubernetes.NewForConfig(config)
- if err != nil {
- return nil, err
- }
- return clientset, nil
+ if os.Getenv("SHUFFLE_SWARM_CONFIG") != "run" && os.Getenv("SHUFFLE_SWARM_CONFIG") != "swarm" {
+ time.Sleep(time.Duration(sleepDuration) * time.Second)
+ os.Exit(3)
} else {
- home := homedir.HomeDir()
- kubeconfigPath := filepath.Join(home, ".kube", "config")
- config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
- if err != nil {
- return nil, err
- }
- clientset, err := kubernetes.NewForConfig(config)
- if err != nil {
- return nil, err
- }
- return clientset, nil
+ log.Printf("[DEBUG][%s] Sending result and resetting values (K8s & Swarm).", workflowExecution.ExecutionId)
}
}
// Deploys the internal worker whenever something happens
func deployApp(cli *dockerclient.Client, image string, identifier string, env []string, workflowExecution shuffle.WorkflowExecution, action shuffle.Action) error {
- // log.Printf("################################### new call to deployApp ###################################")
- // log.Printf("image: %s", image)
- // log.Printf("identifier: %s", identifier)
- // log.Printf("execution: %+v", workflowExecution)
- log.Printf("[DEBUG] Adding SHUFFLE_APP_SDK_TIMEOUT=%s", os.Getenv("SHUFFLE_APP_SDK_TIMEOUT"))
- env = append(env, fmt.Sprintf("SHUFFLE_APP_SDK_TIMEOUT=%s", os.Getenv("SHUFFLE_APP_SDK_TIMEOUT")))
-
- if os.Getenv("IS_KUBERNETES") == "true" {
-
+ if isKubernetes == "true" {
namespace := "shuffle"
localRegistry := os.Getenv("REGISTRY_URL")
@@ -248,8 +392,9 @@ func deployApp(cli *dockerclient.Client, image string, identifier string, env []
clientset, err := getKubernetesClient()
if err != nil {
- fmt.Println("[ERROR]Error getting kubernetes client:", err)
- // os.Exit(1)
+ log.Printf("[ERROR] Failed getting kubernetes: %s [INFO] Setting kubernetes to false to enable running Shuffle with Docker for the next iterations.", err)
+ isKubernetes = "false"
+ return err
}
log.Printf("[DEBUG] Got kubernetes client")
@@ -264,8 +409,6 @@ func deployApp(cli *dockerclient.Client, image string, identifier string, env []
appName := strings.Join(appDetailsSplit[:len(appDetailsSplit)-1], "_")
appVersion := appDetailsSplit[len(appDetailsSplit)-1]
- // log.Printf("APP VERSION IS: %s", appVersion)
-
for _, app := range workflowExecution.Workflow.Actions {
// log.Printf("[DEBUG] App: %s, Version: %s", appName, appVersion)
// log.Printf("[DEBUG] Checking app %s with version %s", app.AppName, app.AppVersion)
@@ -308,115 +451,140 @@ func deployApp(cli *dockerclient.Client, image string, identifier string, env []
createdPod, err := clientset.CoreV1().Pods(namespace).Create(context.Background(), pod, metav1.CreateOptions{})
if err != nil {
- fmt.Fprintf(os.Stderr, "Error creating pod: %v\n", err)
+ fmt.Fprintf(os.Stderr, "Error creating pod: %v", err)
// os.Exit(1)
}
- fmt.Printf("[DEBUG] Created pod %q in namespace %q\n", createdPod.Name, createdPod.Namespace)
- } else {
- // form basic hostConfig
- ctx := context.Background()
+ log.Printf("[DEBUG] Created pod %q in namespace %q", createdPod.Name, createdPod.Namespace)
+ return nil
+ }
- if action.AppName == "shuffle-subflow" {
- // Automatic replacement of URL
- for paramIndex, param := range action.Parameters {
- if param.Name != "backend_url" {
- continue
- }
+ // form basic hostConfig
+ ctx := context.Background()
- if strings.Contains(param.Value, "shuffle-backend") {
- // Automatic replacement as this is default
+ // Check action if subflow
+ // Check if url is default (shuffle-backend)
+ // If it doesn't exist, add it
+ if action.AppName == "shuffle-subflow" {
+ // Automatic replacement of URL
+ for paramIndex, param := range action.Parameters {
+ if param.Name != "backend_url" {
+ continue
+ }
+
+ if strings.Contains(param.Value, "shuffle-backend") {
+ // Automatic replacement as this is default
+ if len(os.Getenv("BASE_URL")) > 0 {
action.Parameters[paramIndex].Value = os.Getenv("BASE_URL")
- log.Printf("[DEBUG][%s] Replaced backend_url with %s", workflowExecution.ExecutionId, os.Getenv("BASE_URL"))
+ log.Printf("[DEBUG][%s] Replaced backend_url with base_url %s", workflowExecution.ExecutionId, os.Getenv("BASE_URL"))
+ }
+
+ if len(os.Getenv("SHUFFLE_CLOUDRUN_URL")) > 0 {
+ action.Parameters[paramIndex].Value = os.Getenv("SHUFFLE_CLOUDRUN_URL")
+ log.Printf("[DEBUG][%s] Replaced backend_url with cloudrun %s", workflowExecution.ExecutionId, os.Getenv("SHUFFLE_CLOUDRUN_URL"))
}
}
}
+ }
- // Max 10% CPU every second
- //CPUShares: 128,
- //CPUQuota: 10000,
- //CPUPeriod: 100000,
- hostConfig := &container.HostConfig{
- LogConfig: container.LogConfig{
- Type: "json-file",
- Config: map[string]string{
- "max-size": "10m",
- },
+
+ // Max 10% CPU every second
+ //CPUShares: 128,
+ //CPUQuota: 10000,
+ //CPUPeriod: 100000,
+ hostConfig := &container.HostConfig{
+ LogConfig: container.LogConfig{
+ Type: "json-file",
+ Config: map[string]string{
+ "max-size": "10m",
},
- Resources: container.Resources{},
- }
+ },
+ Resources: container.Resources{},
+ }
+ if os.Getenv("SHUFFLE_SWARM_CONFIG") != "run" && os.Getenv("SHUFFLE_SWARM_CONFIG") != "swarm" {
hostConfig.NetworkMode = container.NetworkMode(fmt.Sprintf("container:worker-%s", workflowExecution.ExecutionId))
+ //log.Printf("Environments: %#v", env)
+ }
- // Removing because log extraction should happen first
- if cleanupEnv == "true" {
- hostConfig.AutoRemove = true
- }
-
- // FIXME: Add proper foldermounts here
- //log.Printf("\n\nPRE FOLDERMOUNT\n\n")
- //volumeBinds := []string{"/tmp/shuffle-mount:/rules"}
- //volumeBinds := []string{"/tmp/shuffle-mount:/rules"}
- volumeBinds := []string{}
- if len(volumeBinds) > 0 {
- log.Printf("[DEBUG] Setting up binds for container!")
- hostConfig.Binds = volumeBinds
- hostConfig.Mounts = []mount.Mount{}
- for _, bind := range volumeBinds {
- if !strings.Contains(bind, ":") || strings.Contains(bind, "..") || strings.HasPrefix(bind, "~") {
- log.Printf("[WARNING] Bind %s is invalid.", bind)
- continue
- }
+ // Removing because log extraction should happen first
+ if cleanupEnv == "true" {
+ hostConfig.AutoRemove = true
+ }
- log.Printf("[DEBUG] Appending bind %s", bind)
- bindSplit := strings.Split(bind, ":")
- sourceFolder := bindSplit[0]
- destinationFolder := bindSplit[0]
- hostConfig.Mounts = append(hostConfig.Mounts, mount.Mount{
- Type: mount.TypeBind,
- Source: sourceFolder,
- Target: destinationFolder,
- })
+ // Get environment for certificates
+ volumeBinds := []string{}
+ volumeBindString:= os.Getenv("SHUFFLE_VOLUME_BINDS")
+ if len(volumeBindString) > 0 {
+ volumeBindSplit := strings.Split(volumeBindString, ",")
+ for _, volumeBind := range volumeBindSplit {
+ if strings.Contains(volumeBind, ":") {
+ volumeBinds = append(volumeBinds, volumeBind)
+ } else {
+ log.Printf("[ERROR] Volume bind '%s' is invalid.", volumeBind)
}
- } else {
- //log.Printf("[WARNING] Not mounting folders")
}
+ }
- config := &container.Config{
- Image: image,
- Env: env,
- }
+ // Add more volume binds if possible
+ if len(volumeBinds) > 0 {
+ log.Printf("[DEBUG] Setting up binds for container. Got %d volume binds.", len(volumeBinds))
- // Checking as late as possible, just in case.
- newExecId := fmt.Sprintf("%s_%s", workflowExecution.ExecutionId, action.ID)
- _, err := shuffle.GetCache(ctx, newExecId)
- if err == nil {
- log.Printf("\n\n[DEBUG] Result for %s already found - returning\n\n", newExecId)
- return nil
- }
+ hostConfig.Binds = volumeBinds
+ hostConfig.Mounts = []mount.Mount{}
+ for _, bind := range volumeBinds {
+ if !strings.Contains(bind, ":") || strings.Contains(bind, "..") || strings.HasPrefix(bind, "~") {
+ log.Printf("[ERROR] Volume bind '%s' is invalid. Use absolute paths.", bind)
+ continue
+ }
- cacheData := []byte("1")
- err = shuffle.SetCache(ctx, newExecId, cacheData, 30)
- if err != nil {
- log.Printf("[WARNING] Failed setting cache for action %s: %s", newExecId, err)
- } else {
- log.Printf("[DEBUG] Adding %s to cache. Name: %s", newExecId, action.Name)
+ log.Printf("[DEBUG] Appending bind %s to app container", bind)
+ bindSplit := strings.Split(bind, ":")
+ sourceFolder := bindSplit[0]
+ destinationFolder := bindSplit[1]
+ hostConfig.Mounts = append(hostConfig.Mounts, mount.Mount{
+ Type: mount.TypeBind,
+ Source: sourceFolder,
+ Target: destinationFolder,
+ })
}
+ }
+
+ config := &container.Config{
+ Image: image,
+ Env: env,
+ }
- if action.ExecutionDelay > 0 {
- log.Printf("[DEBUG] Running app %s in docker with delay of %d", action.Name, action.ExecutionDelay)
- waitTime := time.Duration(action.ExecutionDelay) * time.Second
- time.AfterFunc(waitTime, func() {
- DeployContainer(ctx, cli, config, hostConfig, identifier, workflowExecution, newExecId)
- })
- } else {
- log.Printf("[DEBUG] Running app %s in docker NORMALLY as there is no delay set with identifier %s", action.Name, identifier)
- returnvalue := DeployContainer(ctx, cli, config, hostConfig, identifier, workflowExecution, newExecId)
- log.Printf("[DEBUG] Normal deploy ret: %s", returnvalue)
- return returnvalue
- }
+ // Checking as late as possible, just in case.
+ newExecId := fmt.Sprintf("%s_%s", workflowExecution.ExecutionId, action.ID)
+ _, err := shuffle.GetCache(ctx, newExecId)
+ if err == nil {
+ log.Printf("[DEBUG][%s] Result for action %s already found - returning", newExecId, action.ID)
return nil
}
+
+ cacheData := []byte("1")
+ err = shuffle.SetCache(ctx, newExecId, cacheData, 30)
+ if err != nil {
+ //log.Printf("[WARNING][%s] Failed setting cache for action: %s", newExecId, err)
+ } else {
+ //log.Printf("[DEBUG][%s] Adding to cache. Name: %s", workflowExecution.ExecutionId, action.Name)
+ }
+
+ if action.ExecutionDelay > 0 {
+ log.Printf("[DEBUG][%s] Running app '%s' with label '%s' in docker with delay of %d", workflowExecution.ExecutionId, action.AppName, action.Label, action.ExecutionDelay)
+ waitTime := time.Duration(action.ExecutionDelay) * time.Second
+
+ time.AfterFunc(waitTime, func() {
+ DeployContainer(ctx, cli, config, hostConfig, identifier, workflowExecution, newExecId)
+ })
+ } else {
+ log.Printf("[DEBUG][%s] Running app %s in docker NORMALLY as there is no delay set with identifier %s", workflowExecution.ExecutionId, action.Name, identifier)
+ returnvalue := DeployContainer(ctx, cli, config, hostConfig, identifier, workflowExecution, newExecId)
+ //log.Printf("[DEBUG][%s] Normal deploy ret: %s", workflowExecution.ExecutionId, returnvalue)
+ return returnvalue
+ }
+
return nil
}
@@ -429,7 +597,7 @@ func cleanupExecution(clientset *kubernetes.Clientset, workflowExecution shuffle
LabelSelector: labelSelector,
})
if err != nil {
- return fmt.Errorf("[ERROR]failed to list apps with label selector %s: %v", labelSelector, err)
+ return fmt.Errorf("[ERROR] Failed to list apps with label selector %s: %#vv", labelSelector, err)
}
for _, pod := range podList.Items {
@@ -437,14 +605,14 @@ func cleanupExecution(clientset *kubernetes.Clientset, workflowExecution shuffle
if err != nil {
return fmt.Errorf("failed to delete app %s: %v", pod.Name, err)
}
- fmt.Printf("App %s in namespace %s deleted.\n", pod.Name, namespace)
+ log.Printf("App %s in namespace %s deleted.", pod.Name, namespace)
}
podErr := clientset.CoreV1().Pods(namespace).Delete(context.TODO(), workerName, metav1.DeleteOptions{})
if podErr != nil {
return fmt.Errorf("[ERROR] failed to delete the worker %s in namespace %s: %v", workerName, namespace, podErr)
}
- fmt.Printf("[DEBUG] %s in namespace %s deleted.\n", workerName, namespace)
+ log.Printf("[DEBUG] %s in namespace %s deleted.", workerName, namespace)
return nil
}
@@ -458,6 +626,8 @@ func DeployContainer(ctx context.Context, cli *dockerclient.Client, config *cont
identifier,
)
+ //log.Printf("[DEBUG] config set: %#v", config)
+
if err != nil {
//log.Printf("[ERROR] Failed creating container: %s", err)
if !strings.Contains(err.Error(), "Conflict. The container name") {
@@ -465,7 +635,7 @@ func DeployContainer(ctx context.Context, cli *dockerclient.Client, config *cont
cacheErr := shuffle.DeleteCache(ctx, newExecId)
if cacheErr != nil {
- log.Printf("[ERROR] FAILED Deleting cache for %s: %s", newExecId, cacheErr)
+ log.Printf("[ERROR] FAILURE Deleting cache for %s: %s", newExecId, cacheErr)
}
return err
@@ -489,7 +659,7 @@ func DeployContainer(ctx context.Context, cli *dockerclient.Client, config *cont
cacheErr := shuffle.DeleteCache(ctx, newExecId)
if cacheErr != nil {
- log.Printf("[ERROR] FAILED Deleting cache for %s: %s", newExecId, cacheErr)
+ log.Printf("[ERROR] FAILURE Deleting cache for %s: %s", newExecId, cacheErr)
}
return err
@@ -528,7 +698,7 @@ func DeployContainer(ctx context.Context, cli *dockerclient.Client, config *cont
cacheErr := shuffle.DeleteCache(ctx, newExecId)
if cacheErr != nil {
- log.Printf("[ERROR] FAILED Deleting cache for %s: %s", newExecId, cacheErr)
+ log.Printf("[ERROR] FAILURE Deleting cache for %s: %s", newExecId, cacheErr)
}
return err
@@ -543,7 +713,7 @@ func DeployContainer(ctx context.Context, cli *dockerclient.Client, config *cont
cacheErr := shuffle.DeleteCache(ctx, newExecId)
if cacheErr != nil {
- log.Printf("[ERROR] FAILED Deleting cache for %s: %s", newExecId, cacheErr)
+ log.Printf("[ERROR] FAILURE Deleting cache for %s: %s", newExecId, cacheErr)
}
//shutdown(workflowExecution, workflowExecution.Workflow.ID, true)
@@ -551,16 +721,16 @@ func DeployContainer(ctx context.Context, cli *dockerclient.Client, config *cont
}
}
- log.Printf("[DEBUG] Container %s was created for %s", cont.ID, identifier)
+ log.Printf("[DEBUG][%s] Container %s was created for %s", workflowExecution.ExecutionId, cont.ID, identifier)
// Waiting to see if it exits.. Stupid, but stable(r)
if workflowExecution.ExecutionSource != "default" {
- log.Printf("[INFO] Handling NON-default execution source %s - NOT waiting or validating!", workflowExecution.ExecutionSource)
+ log.Printf("[INFO][%s] Handling NON-default execution source %s - NOT waiting or validating!", workflowExecution.ExecutionId, workflowExecution.ExecutionSource)
} else if workflowExecution.ExecutionSource == "default" {
- log.Printf("[INFO] Handling DEFAULT execution source %s - SKIPPING wait anyway due to exited issues!", workflowExecution.ExecutionSource)
+ log.Printf("[INFO][%s] Handling DEFAULT execution source %s - SKIPPING wait anyway due to exited issues!", workflowExecution.ExecutionId, workflowExecution.ExecutionSource)
}
- log.Printf("[DEBUG] Deployed container ID %s", cont.ID)
+ //log.Printf("[DEBUG] Deployed container ID %s", cont.ID)
//containerIds = append(containerIds, cont.ID)
return nil
@@ -620,37 +790,126 @@ func removeIndex(s []string, i int) []string {
return s[:len(s)-1]
}
-func handleExecutionResult(workflowExecution shuffle.WorkflowExecution) {
- ctx := context.Background()
+func getWorkerURLs() ([]string, error) {
+ workerUrls := []string{}
- //log.Printf("[DEBUG][%s] Pre DecideExecution", workflowExecution.ExecutionId)
- workflowExecution, relevantActions := shuffle.DecideExecution(ctx, workflowExecution, environment)
- startAction, extra, children, parents, visited, executed, nextActions, environments := shuffle.GetExecutionVariables(ctx, workflowExecution.ExecutionId)
+ // Create a new Docker client
+ cli, err := dockerclient.NewEnvClient()
+ if err != nil {
+ log.Println("[ERROR] Failed to create Docker client:", err)
+ return workerUrls, err
+ }
+
+ // Specify the name of the service for which you want to list tasks
+ serviceName := "shuffle-workers"
+
+ // Get the list of tasks for the service
+ tasks, err := cli.TaskList(context.Background(), types.TaskListOptions{
+ Filters: filters.NewArgs(filters.Arg("service", serviceName)),
+ })
- dockercli, err := dockerclient.NewEnvClient()
if err != nil {
- log.Printf("[ERROR] Unable to create docker client (3): %s", err)
+ log.Println("[ERROR] Failed to list tasks for service:", err)
+ return workerUrls, err
+ }
+
+ // Print task information
+ for _, task := range tasks {
+ url := fmt.Sprintf("http://%s.%d.%s:33333", serviceName, task.Slot, task.ID)
+ workerUrls = append(workerUrls, url)
+ }
+
+ return workerUrls, nil
+}
+
+func askOtherWorkersToDownloadImage(image string) {
+ if os.Getenv("SHUFFLE_SWARM_CONFIG") != "run" && os.Getenv("SHUFFLE_SWARM_CONFIG") != "swarm" {
return
}
- // log.Printf("\n\n[DEBUG] Got %d relevant action(s) to run!\n\n", len(relevantActions))
- for _, action := range relevantActions {
- appname := action.AppName
- appversion := action.AppVersion
- appname = strings.Replace(appname, ".", "-", -1)
- appversion = strings.Replace(appversion, ".", "-", -1)
+ urls, err := getWorkerURLs()
+ if err != nil {
+ log.Printf("[ERROR] Error in listing worker urls: %s", err)
+ return
+ }
- parsedAppname := strings.Replace(strings.ToLower(action.AppName), " ", "-", -1)
- image := fmt.Sprintf("%s:%s_%s", baseimagename, parsedAppname, action.AppVersion)
- if strings.Contains(image, " ") {
- image = strings.ReplaceAll(image, " ", "-")
+ for _, url := range urls {
+ log.Printf("[DEBUG] Trying to speak to: %s", url)
+ imagesRequest := ImageRequest{
+ Image: image,
}
- // Added UUID to identifier just in case
- //identifier := fmt.Sprintf("%s_%s_%s_%s_%s", appname, appversion, action.ID, workflowExecution.ExecutionId, uuid.NewV4())
- identifier := fmt.Sprintf("%s_%s_%s_%s", appname, appversion, action.ID, workflowExecution.ExecutionId)
- if strings.Contains(identifier, " ") {
- identifier = strings.ReplaceAll(identifier, " ", "-")
+ url = fmt.Sprintf("%s/api/v1/download", url)
+
+ imageJSON, err := json.Marshal(imagesRequest)
+
+ log.Printf("[INFO] Making a request to %s to download images", url)
+ req, err := http.NewRequest(
+ "POST",
+ url,
+ bytes.NewBuffer(imageJSON),
+ )
+
+ if err != nil {
+ log.Printf("[ERROR] Error in making request to %s : %s", url, err)
+ continue
+ }
+
+ httpClient := &http.Client{}
+ resp, err := httpClient.Do(req)
+ if err != nil {
+ log.Printf("[ERROR] Error in making request to %s : %s", url, err)
+ continue
+ }
+
+ defer resp.Body.Close()
+ respBody, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ log.Printf("[ERROR] Error in reading response body : %s", err)
+ continue
+ }
+
+ log.Printf("[INFO] Response body when tried sending images for nodes to download: %s", respBody)
+ }
+}
+
+func handleExecutionResult(workflowExecution shuffle.WorkflowExecution) {
+ ctx := context.Background()
+
+ workflowExecution, relevantActions := shuffle.DecideExecution(ctx, workflowExecution, environment)
+ if workflowExecution.Status == "FINISHED" || workflowExecution.Status == "FAILURE" || workflowExecution.Status == "ABORTED" {
+ log.Printf("[DEBUG][%s] Shutting down because status is %s", workflowExecution.ExecutionId, workflowExecution.Status)
+ shutdown(workflowExecution, "", "Workflow run is already finished", true)
+ return
+ }
+
+
+ startAction, extra, children, parents, visited, executed, nextActions, environments := shuffle.GetExecutionVariables(ctx, workflowExecution.ExecutionId)
+
+ dockercli, err := dockerclient.NewEnvClient()
+ if err != nil {
+ log.Printf("[ERROR] Unable to create docker client (3): %s", err)
+ return
+ }
+
+ for _, action := range relevantActions {
+ appname := action.AppName
+ appversion := action.AppVersion
+ appname = strings.Replace(appname, ".", "-", -1)
+ appversion = strings.Replace(appversion, ".", "-", -1)
+
+ parsedAppname := strings.Replace(strings.ToLower(action.AppName), " ", "-", -1)
+ image := fmt.Sprintf("%s:%s_%s", baseimagename, parsedAppname, action.AppVersion)
+ if strings.Contains(image, " ") {
+ image = strings.ReplaceAll(image, " ", "-")
+ }
+ askOtherWorkersToDownloadImage(image)
+
+ // Added UUID to identifier just in case
+ //identifier := fmt.Sprintf("%s_%s_%s_%s_%s", appname, appversion, action.ID, workflowExecution.ExecutionId, uuid.NewV4())
+ identifier := fmt.Sprintf("%s_%s_%s_%s", appname, appversion, action.ID, workflowExecution.ExecutionId)
+ if strings.Contains(identifier, " ") {
+ identifier = strings.ReplaceAll(identifier, " ", "-")
}
//if arrayContains(executed, action.ID) || arrayContains(visited, action.ID) {
@@ -688,7 +947,9 @@ func handleExecutionResult(workflowExecution shuffle.WorkflowExecution) {
}
// marshal action and put it in there rofl
- log.Printf("[INFO][%s] Time to execute %s (%s) with app %s:%s, function %s, env %s with %d parameters.", workflowExecution.ExecutionId, action.ID, action.Label, action.AppName, action.AppVersion, action.Name, action.Environment, len(action.Parameters))
+ //log.Printf("[INFO][%s] Time to execute %s (%s) with app %s:%s, function %s, env %s with %d parameters.", workflowExecution.ExecutionId, action.ID, action.Label, action.AppName, action.AppVersion, action.Name, action.Environment, len(action.Parameters))
+
+ log.Printf("[DEBUG][%s] Action: Send, Label: '%s', Action: '%s', Run status: %s, Extra=", workflowExecution.ExecutionId, action.Label, action.AppName, workflowExecution.Status)
actionData, err := json.Marshal(action)
if err != nil {
@@ -697,7 +958,7 @@ func handleExecutionResult(workflowExecution shuffle.WorkflowExecution) {
}
if action.AppID == "0ca8887e-b4af-4e3e-887c-87e9d3bc3d3e" {
- log.Printf("[DEBUG] Should run filter: %#v\n\n", action)
+ log.Printf("[DEBUG] Should run filter: %#v", action)
runFilter(workflowExecution, action)
continue
}
@@ -718,7 +979,7 @@ func handleExecutionResult(workflowExecution shuffle.WorkflowExecution) {
fmt.Sprintf("CALLBACK_URL=%s", baseUrl),
fmt.Sprintf("BASE_URL=%s", appCallbackUrl),
fmt.Sprintf("TZ=%s", timezone),
- fmt.Sprintf("SHUFFLE_LOGS_DISABLED=%s", os.Getenv("SHUFFLE_LOGS_DISABLED")),
+ fmt.Sprintf("SHUFFLE_LOGS_DISABLED=%s", logsDisabled),
}
if len(actionData) >= 100000 {
@@ -762,6 +1023,21 @@ func handleExecutionResult(workflowExecution shuffle.WorkflowExecution) {
env = append(env, fmt.Sprintf("NO_PROXY=%s", os.Getenv("NO_PROXY")))
}
+ overrideHttpProxy := os.Getenv("SHUFFLE_INTERNAL_HTTP_PROXY")
+ overrideHttpsProxy := os.Getenv("SHUFFLE_INTERNAL_HTTPS_PROXY")
+ if overrideHttpProxy != "" {
+ env = append(env, fmt.Sprintf("SHUFFLE_INTERNAL_HTTP_PROXY=%s", overrideHttpProxy))
+ }
+
+ if overrideHttpsProxy != "" {
+ env = append(env, fmt.Sprintf("SHUFFLE_INTERNAL_HTTPS_PROXY=%s", overrideHttpsProxy))
+ }
+
+ if len(os.Getenv("SHUFFLE_APP_SDK_TIMEOUT")) > 0 {
+ env = append(env, fmt.Sprintf("SHUFFLE_APP_SDK_TIMEOUT=%s", os.Getenv("SHUFFLE_APP_SDK_TIMEOUT")))
+ }
+
+
// Fixes issue:
// standard_go init_linux.go:185: exec user process caused "argument list too long"
// https://devblogs.microsoft.com/oldnewthing/20100203-00/?p=15083
@@ -785,10 +1061,12 @@ func handleExecutionResult(workflowExecution shuffle.WorkflowExecution) {
// 3. Add remote repo location
images := []string{
image,
- fmt.Sprintf("%s:%s_%s", baseimagename, parsedAppname, action.AppVersion),
fmt.Sprintf("%s/%s:%s_%s", registryName, baseimagename, parsedAppname, action.AppVersion),
+ fmt.Sprintf("%s:%s_%s", baseimagename, parsedAppname, action.AppVersion),
}
+
+
// If cleanup is set, it should run for efficiency
pullOptions := types.ImagePullOptions{}
if cleanupEnv == "true" {
@@ -859,7 +1137,7 @@ func handleExecutionResult(workflowExecution shuffle.WorkflowExecution) {
return
} else {
if strings.Contains(buildBuf.String(), "errorDetail") {
- log.Printf("[ERROR] Docker build:\n%s\nERROR ABOVE: Trying to pull tags from: %s", buildBuf.String(), image)
+ log.Printf("[ERROR] Docker build:%sERROR ABOVE: Trying to pull tags from: %s", buildBuf.String(), image)
log.Printf("[DEBUG] Shutting down (6)")
shutdown(workflowExecution, action.ID, fmt.Sprintf("%s", err.Error()), true)
return
@@ -969,7 +1247,7 @@ func handleExecutionResult(workflowExecution shuffle.WorkflowExecution) {
return
} else {
if strings.Contains(buildBuf.String(), "errorDetail") {
- log.Printf("[ERROR] Docker build:\n%s\nERROR ABOVE: Trying to pull tags from: %s", buildBuf.String(), image)
+ log.Printf("[ERROR] Docker build:%sERROR ABOVE: Trying to pull tags from: %s", buildBuf.String(), image)
log.Printf("[DEBUG] Shutting down (14)")
shutdown(workflowExecution, action.ID, fmt.Sprintf("Error deploying container: %s", buildBuf.String()), true)
return
@@ -1018,7 +1296,7 @@ func handleExecutionResult(workflowExecution shuffle.WorkflowExecution) {
// FIXME - clean up stopped (remove) containers with this execution id
err = shuffle.UpdateExecutionVariables(ctx, workflowExecution.ExecutionId, startAction, children, parents, visited, executed, nextActions, environments, extra)
if err != nil {
- log.Printf("\n\n[ERROR] Failed to update exec variables for execution %s: %s (2)\n\n", workflowExecution.ExecutionId, err)
+ log.Printf("[ERROR] Failed to update exec variables for execution %s: %s (2)", workflowExecution.ExecutionId, err)
}
if len(workflowExecution.Results) == len(workflowExecution.Workflow.Actions)+extra {
@@ -1036,13 +1314,22 @@ func handleExecutionResult(workflowExecution shuffle.WorkflowExecution) {
if shutdownCheck {
log.Printf("[INFO][%s] BREAKING BECAUSE RESULTS IS SAME LENGTH AS ACTIONS. SHOULD CHECK ALL RESULTS FOR WHETHER THEY'RE DONE", workflowExecution.ExecutionId)
- validateFinished(workflowExecution)
+ validated := shuffle.ValidateFinished(ctx, -1, workflowExecution)
+ if validated {
+ shutdownData, err := json.Marshal(workflowExecution)
+ if err != nil {
+ log.Printf("[ERROR] Failed marshalling shutdowndata during set: %s", err)
+ }
+
+ sendResult(workflowExecution, shutdownData)
+ }
+
log.Printf("[DEBUG][%s] Shutting down (17)", workflowExecution.ExecutionId)
- if os.Getenv("IS_KUBERNETES") == "true" {
+ if isKubernetes == "true" {
// log.Printf("workflow execution: %#v", workflowExecution)
clientset, err := getKubernetesClient()
if err != nil {
- fmt.Println("[ERROR]Error getting kubernetes client:", err)
+ log.Println("[ERROR] Error getting kubernetes client (1):", err)
os.Exit(1)
}
cleanupExecution(clientset, workflowExecution, "shuffle")
@@ -1094,7 +1381,7 @@ func executionInit(workflowExecution shuffle.WorkflowExecution) error {
}
for _, trigger := range workflowExecution.Workflow.Triggers {
- //log.Printf("Appname trigger (0): %s", trigger.AppName)
+ //log.Printf("Appname trigger (0): %s (%s)", trigger.AppName, trigger.ID)
if trigger.AppName == "User Input" || trigger.AppName == "Shuffle Workflow" {
if trigger.ID == branch.SourceID {
sourceFound = true
@@ -1107,22 +1394,16 @@ func executionInit(workflowExecution shuffle.WorkflowExecution) error {
if sourceFound {
parents[branch.DestinationID] = append(parents[branch.DestinationID], branch.SourceID)
} else {
- log.Printf("[DEBUG] ID %s was not found in actions! Skipping parent. (TRIGGER?)", branch.SourceID)
+ log.Printf("[DEBUG] Parent ID %s was not found in actions! Skipping parent. (TRIGGER?)", branch.SourceID)
}
if destinationFound {
children[branch.SourceID] = append(children[branch.SourceID], branch.DestinationID)
} else {
- log.Printf("[DEBUG] ID %s was not found in actions! Skipping child. (TRIGGER?)", branch.SourceID)
+ log.Printf("[DEBUG] Child ID %s was not found in actions! Skipping child. (TRIGGER?)", branch.SourceID)
}
}
- /*
- log.Printf("\n\n\n[INFO] CHILDREN FOUND: %#v", children)
- log.Printf("[INFO] PARENTS FOUND: %#v", parents)
- log.Printf("[INFO] NEXT ACTIONS: %#v\n\n", nextActions)
- */
-
log.Printf("[INFO][%s] shuffle.Actions: %d + Special shuffle.Triggers: %d", workflowExecution.ExecutionId, len(workflowExecution.Workflow.Actions), extra)
onpremApps := []string{}
toExecuteOnprem := []string{}
@@ -1190,114 +1471,209 @@ func executionInit(workflowExecution shuffle.WorkflowExecution) error {
environments = append(environments, action.Environment)
}
}
- //var visited []string
- //var executed []string
+
err := shuffle.UpdateExecutionVariables(ctx, workflowExecution.ExecutionId, startAction, children, parents, visited, executed, nextActions, environments, extra)
if err != nil {
- log.Printf("\n\n[ERROR] Failed to update exec variables for execution %s: %s\n\n", workflowExecution.ExecutionId, err)
+ log.Printf("[ERROR] Failed to update exec variables for execution %s: %s", workflowExecution.ExecutionId, err)
}
return nil
}
-func handleDefaultExecution(client *http.Client, req *http.Request, workflowExecution shuffle.WorkflowExecution) error {
- // if no onprem runs (shouldn't happen, but extra check), exit
- // if there are some, load the images ASAP for the app
- ctx := context.Background()
- //startAction, extra, children, parents, visited, executed, nextActions, environments := shuffle.GetExecutionVariables(ctx, workflowExecution.ExecutionId)
- startAction, extra, _, _, _, _, _, _ := shuffle.GetExecutionVariables(ctx, workflowExecution.ExecutionId)
+func handleSubflowPoller(ctx context.Context, workflowExecution shuffle.WorkflowExecution, streamResultUrl, subflowId string) error {
+ extra := 0
+ for _, trigger := range workflowExecution.Workflow.Triggers {
+ if trigger.AppName == "User Input" || trigger.AppName == "Shuffle Workflow" {
+ extra += 1
+ }
+ }
- err := executionInit(workflowExecution)
+ req, err := http.NewRequest(
+ "POST",
+ streamResultUrl,
+ bytes.NewBuffer([]byte(data)),
+ )
+
+ newresp, err := topClient.Do(req)
if err != nil {
- log.Printf("[INFO] Workflow setup failed for %s: %s", workflowExecution.ExecutionId, err)
- log.Printf("[DEBUG] Shutting down (18)")
- shutdown(workflowExecution, "", "", true)
+ log.Printf("[ERROR] Failed making request (1): %s", err)
+ time.Sleep(time.Duration(sleepTime) * time.Second)
+ return err
}
- log.Printf("[DEBUG] DEFAULT EXECUTION Startaction: %s", startAction)
+ defer newresp.Body.Close()
+ body, err := ioutil.ReadAll(newresp.Body)
+ if err != nil {
+ log.Printf("[ERROR] Failed reading body (1): %s", err)
+ time.Sleep(time.Duration(sleepTime) * time.Second)
+ return err
+ }
- setWorkflowExecution(ctx, workflowExecution, false)
+ if newresp.StatusCode != 200 {
+ log.Printf("[ERROR] Bad statuscode: %d, %s", newresp.StatusCode, string(body))
- streamResultUrl := fmt.Sprintf("%s/api/v1/streams/results", baseUrl)
- for {
- //fullUrl := fmt.Sprintf("%s/api/v1/workflows/%s/executions/%s/abort", baseUrl, workflowExecution.Workflow.ID, workflowExecution.ExecutionId)
- //log.Printf("[INFO] URL: %s", fullUrl)
- req, err := http.NewRequest(
- "POST",
- streamResultUrl,
- bytes.NewBuffer([]byte(data)),
- )
+ if strings.Contains(string(body), "Workflowexecution is already finished") {
+ log.Printf("[DEBUG] Shutting down (19)")
+ shutdown(workflowExecution, "", "", true)
+ }
- newresp, err := topClient.Do(req)
- if err != nil {
- log.Printf("[ERROR] Failed making request (1): %s", err)
- time.Sleep(time.Duration(sleepTime) * time.Second)
- continue
+ time.Sleep(time.Duration(sleepTime) * time.Second)
+ return errors.New(fmt.Sprintf("Bad statuscode: %d", newresp.StatusCode))
+ }
+
+ err = json.Unmarshal(body, &workflowExecution)
+ if err != nil {
+ log.Printf("[ERROR] Failed workflowExecution unmarshal: %s", err)
+ time.Sleep(time.Duration(sleepTime) * time.Second)
+ return err
+ }
+
+ if workflowExecution.Status == "FINISHED" || workflowExecution.Status == "SUCCESS" {
+ log.Printf("[INFO][%s] Workflow execution is finished. Exiting worker.", workflowExecution.ExecutionId)
+ log.Printf("[DEBUG] Shutting down (20)")
+ if isKubernetes == "true" {
+ // log.Printf("workflow execution: %#v", workflowExecution)
+ clientset, err := getKubernetesClient()
+ if err != nil {
+ log.Println("[ERROR] Error getting kubernetes client (2):", err)
+ os.Exit(1)
+ }
+
+ cleanupExecution(clientset, workflowExecution, "shuffle")
+ } else {
+ shutdown(workflowExecution, "", "", true)
}
+ }
- defer newresp.Body.Close()
- body, err := ioutil.ReadAll(newresp.Body)
- if err != nil {
- log.Printf("[ERROR] Failed reading body (1): %s", err)
- time.Sleep(time.Duration(sleepTime) * time.Second)
+ for _, result := range workflowExecution.Results {
+ if result.Action.ID != subflowId {
continue
}
- if newresp.StatusCode != 200 {
- log.Printf("[ERROR] Bad statuscode: %d, %s", newresp.StatusCode, string(body))
+ log.Printf("[DEBUG][%s] Found subflow to handle: %s (%s)", workflowExecution.ExecutionId, result.Action.Label, result.Status)
+ if result.Status == "SUCCESS" || result.Status == "FINISHED" || result.Status == "FAILURE" || result.Status == "ABORTED" {
+ // Check for results
- if strings.Contains(string(body), "Workflowexecution is already finished") {
- log.Printf("[DEBUG] Shutting down (19)")
- shutdown(workflowExecution, "", "", true)
- }
+ setWorkflowExecution(ctx, workflowExecution, false)
+ return nil
+ }
+ }
- time.Sleep(time.Duration(sleepTime) * time.Second)
- continue
+ log.Printf("[INFO][%s] Status: %s, Results: %d, actions: %d", workflowExecution.ExecutionId, workflowExecution.Status, len(workflowExecution.Results), len(workflowExecution.Workflow.Actions)+extra)
+ return errors.New("Subflow status not found yet")
+}
+
+func handleDefaultExecutionWrapper(ctx context.Context, workflowExecution shuffle.WorkflowExecution, streamResultUrl string, extra int) error {
+ if extra == -1 {
+ extra = 0
+ for _, trigger := range workflowExecution.Workflow.Triggers {
+ if trigger.AppName == "User Input" || trigger.AppName == "Shuffle Workflow" {
+ extra += 1
+ }
}
+ }
- err = json.Unmarshal(body, &workflowExecution)
- if err != nil {
- log.Printf("[ERROR] Failed workflowExecution unmarshal: %s", err)
- time.Sleep(time.Duration(sleepTime) * time.Second)
- continue
+ req, err := http.NewRequest(
+ "POST",
+ streamResultUrl,
+ bytes.NewBuffer([]byte(data)),
+ )
+
+ newresp, err := topClient.Do(req)
+ if err != nil {
+ log.Printf("[ERROR] Failed making request (1): %s", err)
+ time.Sleep(time.Duration(sleepTime) * time.Second)
+ return err
+ }
+
+ defer newresp.Body.Close()
+ body, err := ioutil.ReadAll(newresp.Body)
+ if err != nil {
+ log.Printf("[ERROR] Failed reading body (1): %s", err)
+ time.Sleep(time.Duration(sleepTime) * time.Second)
+ return err
+ }
+
+ if newresp.StatusCode != 200 {
+ log.Printf("[ERROR] Bad statuscode: %d, %s", newresp.StatusCode, string(body))
+
+ if strings.Contains(string(body), "Workflowexecution is already finished") {
+ log.Printf("[DEBUG] Shutting down (19)")
+ shutdown(workflowExecution, "", "", true)
}
- if workflowExecution.Status == "FINISHED" || workflowExecution.Status == "SUCCESS" {
- log.Printf("[INFO][%s] Workflow execution is finished. Exiting worker.", workflowExecution.ExecutionId)
- log.Printf("[DEBUG] Shutting down (20)")
- //handle workerssssssssss
- if os.Getenv("IS_KUBERNETES") == "true" {
- // log.Printf("workflow execution: %#v", workflowExecution)
- clientset, err := getKubernetesClient()
- if err != nil {
- fmt.Println("[ERROR]Error getting kubernetes client:", err)
- os.Exit(1)
- }
- cleanupExecution(clientset, workflowExecution, "shuffle")
- } else {
- shutdown(workflowExecution, "", "", true)
+ time.Sleep(time.Duration(sleepTime) * time.Second)
+ return errors.New(fmt.Sprintf("Bad statuscode: %d", newresp.StatusCode))
+ }
+
+ err = json.Unmarshal(body, &workflowExecution)
+ if err != nil {
+ log.Printf("[ERROR] Failed workflowExecution unmarshal: %s", err)
+ time.Sleep(time.Duration(sleepTime) * time.Second)
+ return err
+ }
+
+ if workflowExecution.Status == "FINISHED" || workflowExecution.Status == "SUCCESS" {
+ log.Printf("[INFO][%s] Workflow execution is finished. Exiting worker.", workflowExecution.ExecutionId)
+ log.Printf("[DEBUG] Shutting down (20)")
+ if isKubernetes == "true" {
+ // log.Printf("workflow execution: %#v", workflowExecution)
+ clientset, err := getKubernetesClient()
+ if err != nil {
+ log.Println("[ERROR] Error getting kubernetes client (2):", err)
+ os.Exit(1)
}
+ cleanupExecution(clientset, workflowExecution, "shuffle")
+ } else {
+ shutdown(workflowExecution, "", "", true)
}
+ }
- log.Printf("[INFO][%s] Status: %s, Results: %d, actions: %d", workflowExecution.ExecutionId, workflowExecution.Status, len(workflowExecution.Results), len(workflowExecution.Workflow.Actions)+extra)
- if workflowExecution.Status != "EXECUTING" {
- log.Printf("[WARNING][%s] Exiting as worker execution has status %s!", workflowExecution.ExecutionId, workflowExecution.Status)
- log.Printf("[DEBUG] Shutting down (21)")
- if os.Getenv("IS_KUBERNETES") == "true" {
- // log.Printf("workflow execution: %#v", workflowExecution)
- clientset, err := getKubernetesClient()
- if err != nil {
- fmt.Println("[ERROR]Error getting kubernetes client:", err)
- os.Exit(1)
- }
- cleanupExecution(clientset, workflowExecution, "shuffle")
- } else {
- shutdown(workflowExecution, "", "", true)
+ log.Printf("[INFO][%s] Status: %s, Results: %d, actions: %d", workflowExecution.ExecutionId, workflowExecution.Status, len(workflowExecution.Results), len(workflowExecution.Workflow.Actions)+extra)
+ if workflowExecution.Status != "EXECUTING" {
+ log.Printf("[WARNING][%s] Exiting as worker execution has status %s!", workflowExecution.ExecutionId, workflowExecution.Status)
+ log.Printf("[DEBUG] Shutting down (21)")
+ if isKubernetes == "true" {
+ // log.Printf("workflow execution: %#v", workflowExecution)
+ clientset, err := getKubernetesClient()
+ if err != nil {
+ log.Println("[ERROR] Error getting kubernetes client (3):", err)
+ os.Exit(1)
}
+ cleanupExecution(clientset, workflowExecution, "shuffle")
+ } else {
+ shutdown(workflowExecution, "", "", true)
}
+ }
+
+ setWorkflowExecution(ctx, workflowExecution, false)
+ return nil
+}
- setWorkflowExecution(ctx, workflowExecution, false)
- //handleExecutionResult(workflowExecution)
+func handleDefaultExecution(client *http.Client, req *http.Request, workflowExecution shuffle.WorkflowExecution) error {
+ // if no onprem runs (shouldn't happen, but extra check), exit
+ // if there are some, load the images ASAP for the app
+ ctx := context.Background()
+ //startAction, extra, children, parents, visited, executed, nextActions, environments := shuffle.GetExecutionVariables(ctx, workflowExecution.ExecutionId)
+ startAction, extra, _, _, _, _, _, _ := shuffle.GetExecutionVariables(ctx, workflowExecution.ExecutionId)
+
+ err := executionInit(workflowExecution)
+ if err != nil {
+ log.Printf("[INFO] Workflow setup failed for %s: %s", workflowExecution.ExecutionId, err)
+ log.Printf("[DEBUG] Shutting down (18)")
+ shutdown(workflowExecution, "", "", true)
+ }
+
+ log.Printf("[DEBUG] DEFAULT EXECUTION Startaction: %s", startAction)
+
+ setWorkflowExecution(ctx, workflowExecution, false)
+
+ streamResultUrl := fmt.Sprintf("%s/api/v1/streams/results", baseUrl)
+ for {
+ err = handleDefaultExecutionWrapper(ctx, workflowExecution, streamResultUrl, extra)
+ if err != nil {
+ log.Printf("[ERROR] Failed handling default execution: %s", err)
+ }
}
return nil
@@ -1377,7 +1753,7 @@ func runSkipAction(client *http.Client, action shuffle.Action, workflowId, workf
return err
}
- newresp, err := client.Do(req)
+ newresp, err := topClient.Do(req)
if err != nil {
log.Printf("[WARNING] Error running skip request (0): %s", err)
return err
@@ -1394,167 +1770,6 @@ func runSkipAction(client *http.Client, action shuffle.Action, workflowId, workf
return nil
}
-// Sends request back to backend to handle the node
-func runUserInput(client *http.Client, action shuffle.Action, workflowId string, workflowExecution shuffle.WorkflowExecution, authorization string, configuration string, dockercli *dockerclient.Client) error {
- timeNow := time.Now().Unix()
- result := shuffle.ActionResult{
- Action: action,
- ExecutionId: workflowExecution.ExecutionId,
- Authorization: authorization,
- Result: configuration,
- StartedAt: timeNow,
- CompletedAt: 0,
- Status: "WAITING",
- }
-
- // Checking for userinput to deploy subflow for it
- subflow := false
- subflowId := ""
- argument := ""
- continueUrl := "testing continue"
- cancelUrl := "testing cancel"
- for _, item := range action.Parameters {
- if item.Name == "subflow" {
- subflow = true
- subflowId = item.Value
- } else if item.Name == "alertinfo" {
- argument = item.Value
- }
- }
-
- if subflow {
- log.Printf("[DEBUG] Should run action with subflow app with argument %#v", argument)
- newAction := shuffle.Action{
- AppName: "shuffle-subflow",
- Name: "run_subflow",
- AppVersion: "1.0.0",
- Label: "User Input Subflow Execution",
- }
-
- identifier := fmt.Sprintf("%s_%s_%s_%s", newAction.AppName, newAction.AppVersion, action.ID, workflowExecution.ExecutionId)
- if strings.Contains(identifier, " ") {
- identifier = strings.ReplaceAll(identifier, " ", "-")
- }
-
- inputValue := UserInputSubflow{
- Argument: argument,
- ContinueUrl: continueUrl,
- CancelUrl: cancelUrl,
- }
-
- parsedArgument, err := json.Marshal(inputValue)
- if err != nil {
- log.Printf("[ERROR] Failed to parse arguments: %s", err)
- parsedArgument = []byte(argument)
- }
-
- newAction.Parameters = []shuffle.WorkflowAppActionParameter{
- shuffle.WorkflowAppActionParameter{
- Name: "user_apikey",
- Value: workflowExecution.Authorization,
- },
- shuffle.WorkflowAppActionParameter{
- Name: "workflow",
- Value: subflowId,
- },
- shuffle.WorkflowAppActionParameter{
- Name: "argument",
- Value: string(parsedArgument),
- },
- }
-
- newAction.Parameters = append(newAction.Parameters, shuffle.WorkflowAppActionParameter{
- Name: "source_workflow",
- Value: workflowExecution.Workflow.ID,
- })
-
- newAction.Parameters = append(newAction.Parameters, shuffle.WorkflowAppActionParameter{
- Name: "source_execution",
- Value: workflowExecution.ExecutionId,
- })
-
- newAction.Parameters = append(newAction.Parameters, shuffle.WorkflowAppActionParameter{
- Name: "source_node",
- Value: action.ID,
- })
-
- newAction.Parameters = append(newAction.Parameters, shuffle.WorkflowAppActionParameter{
- Name: "source_auth",
- Value: workflowExecution.Authorization,
- })
-
- newAction.Parameters = append(newAction.Parameters, shuffle.WorkflowAppActionParameter{
- Name: "startnode",
- Value: "",
- })
-
- // If cleanup is set, it should run for efficiency
- //appName := strings.Replace(identifier, fmt.Sprintf("_%s", action.ID), "", -1)
- //appName = strings.Replace(appName, fmt.Sprintf("_%s", workflowExecution.ExecutionId), "", -1)
- actionData, err := json.Marshal(newAction)
- if err != nil {
- return err
- }
-
- env := []string{
- fmt.Sprintf("ACTION=%s", string(actionData)),
- fmt.Sprintf("EXECUTIONID=%s", workflowExecution.ExecutionId),
- fmt.Sprintf("AUTHORIZATION=%s", workflowExecution.Authorization),
- fmt.Sprintf("CALLBACK_URL=%s", baseUrl),
- fmt.Sprintf("BASE_URL=%s", appCallbackUrl),
- fmt.Sprintf("TZ=%s", timezone),
- fmt.Sprintf("SHUFFLE_LOGS_DISABLED=%s", os.Getenv("SHUFFLE_LOGS_DISABLED")),
- }
-
- if strings.ToLower(os.Getenv("SHUFFLE_PASS_APP_PROXY")) == "true" {
- //log.Printf("APPENDING PROXY TO THE APP!")
- env = append(env, fmt.Sprintf("HTTP_PROXY=%s", os.Getenv("HTTP_PROXY")))
- env = append(env, fmt.Sprintf("HTTPS_PROXY=%s", os.Getenv("HTTPS_PROXY")))
- env = append(env, fmt.Sprintf("NO_PROXY=%s", os.Getenv("NO_PROXY")))
- }
-
- err = deployApp(dockercli, "frikky/shuffle:shuffle-subflow_1.0.0", identifier, env, workflowExecution, newAction)
- if err != nil {
- log.Printf("[ERROR] Failed to deploy subflow for user input trigger %s: %s", action.ID, err)
- }
- } else {
- log.Printf("[DEBUG] Running user input WITHOUT subflow")
- }
-
- resultData, err := json.Marshal(result)
- if err != nil {
- return err
- }
-
- streamUrl := fmt.Sprintf("%s/api/v1/streams", baseUrl)
- req, err := http.NewRequest(
- "POST",
- streamUrl,
- bytes.NewBuffer([]byte(resultData)),
- )
-
- if err != nil {
- log.Printf("[WARNING] Error building test request (2): %s", err)
- return err
- }
-
- newresp, err := client.Do(req)
- if err != nil {
- log.Printf("[WARNING] Error running test request (2): %s", err)
- return err
- }
-
- defer newresp.Body.Close()
- body, err := ioutil.ReadAll(newresp.Body)
- if err != nil {
- log.Printf("Failed reading body when waiting: %s", err)
- return err
- }
-
- log.Printf("[INFO] User Input Body: %s", string(body))
- return nil
-}
-
func runTestExecution(client *http.Client, workflowId, apikey string) (string, string) {
executeUrl := fmt.Sprintf("%s/api/v1/workflows/%s/execute", baseUrl, workflowId)
req, err := http.NewRequest(
@@ -1569,7 +1784,7 @@ func runTestExecution(client *http.Client, workflowId, apikey string) (string, s
}
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", apikey))
- newresp, err := client.Do(req)
+ newresp, err := topClient.Do(req)
if err != nil {
log.Printf("[WARNING] Error running test request (3): %s", err)
return "", ""
@@ -1593,12 +1808,53 @@ func runTestExecution(client *http.Client, workflowId, apikey string) (string, s
return workflowExecution.Authorization, workflowExecution.ExecutionId
}
+func isRunningInCluster() bool {
+ _, existsHost := os.LookupEnv("KUBERNETES_SERVICE_HOST")
+ _, existsPort := os.LookupEnv("KUBERNETES_SERVICE_PORT")
+ return existsHost && existsPort
+}
+
+func buildEnvVars(envMap map[string]string) []corev1.EnvVar {
+ var envVars []corev1.EnvVar
+ for key, value := range envMap {
+ envVars = append(envVars, corev1.EnvVar{Name: key, Value: value})
+ }
+ return envVars
+}
+
+func getKubernetesClient() (*kubernetes.Clientset, error) {
+ if isRunningInCluster() {
+ config, err := rest.InClusterConfig()
+ if err != nil {
+ return nil, err
+ }
+ clientset, err := kubernetes.NewForConfig(config)
+ if err != nil {
+ return nil, err
+ }
+ return clientset, nil
+ } else {
+ home := homedir.HomeDir()
+ kubeconfigPath := filepath.Join(home, ".kube", "config")
+ config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
+ if err != nil {
+ return nil, err
+ }
+ clientset, err := kubernetes.NewForConfig(config)
+ if err != nil {
+ return nil, err
+ }
+ return clientset, nil
+ }
+}
+
func handleWorkflowQueue(resp http.ResponseWriter, request *http.Request) {
if request.Body == nil {
resp.WriteHeader(http.StatusBadRequest)
return
}
+ defer request.Body.Close()
body, err := ioutil.ReadAll(request.Body)
if err != nil {
log.Printf("[WARNING] (3) Failed reading body for workflowqueue")
@@ -1607,8 +1863,6 @@ func handleWorkflowQueue(resp http.ResponseWriter, request *http.Request) {
return
}
- defer request.Body.Close()
-
var actionResult shuffle.ActionResult
err = json.Unmarshal(body, &actionResult)
if err != nil {
@@ -1619,7 +1873,7 @@ func handleWorkflowQueue(resp http.ResponseWriter, request *http.Request) {
}
if len(actionResult.ExecutionId) == 0 {
- log.Printf("[WARNING] No workflow execution id in action result. Data: %s", string(body))
+ log.Printf("[ERROR] No workflow execution id in action result. Data: %s", string(body))
resp.WriteHeader(400)
resp.Write([]byte(fmt.Sprintf(`{"success": false, "reason": "No workflow execution id in action result"}`)))
return
@@ -1635,44 +1889,47 @@ func handleWorkflowQueue(resp http.ResponseWriter, request *http.Request) {
workflowExecution, err := shuffle.GetWorkflowExecution(ctx, actionResult.ExecutionId)
if err != nil {
log.Printf("[ERROR][%s] Failed getting execution (workflowqueue) %s: %s", actionResult.ExecutionId, actionResult.ExecutionId, err)
- resp.WriteHeader(401)
+ resp.WriteHeader(500)
resp.Write([]byte(fmt.Sprintf(`{"success": false, "reason": "Failed getting execution ID %s because it doesn't exist locally."}`, actionResult.ExecutionId)))
return
}
if workflowExecution.Authorization != actionResult.Authorization {
- log.Printf("[INFO] Bad authorization key when updating node (workflowQueue) %s. Want: %s, Have: %s", actionResult.ExecutionId, workflowExecution.Authorization, actionResult.Authorization)
- resp.WriteHeader(401)
+ log.Printf("[ERROR][%s] Bad authorization key when updating node (workflowQueue). Want: %s, Have: %s", actionResult.ExecutionId, workflowExecution.Authorization, actionResult.Authorization)
+ resp.WriteHeader(403)
resp.Write([]byte(fmt.Sprintf(`{"success": false, "reason": "Bad authorization key"}`)))
return
}
if workflowExecution.Status == "FINISHED" {
- log.Printf("[DEBUG] Workflowexecution is already FINISHED. No further action can be taken")
- resp.WriteHeader(401)
+ log.Printf("[DEBUG][%s] Workflowexecution is already FINISHED. No further action can be taken", workflowExecution.ExecutionId)
+ resp.WriteHeader(200)
resp.Write([]byte(fmt.Sprintf(`{"success": false, "reason": "Workflowexecution is already finished because it has status %s. Lastnode: %s"}`, workflowExecution.Status, workflowExecution.LastNode)))
return
}
if workflowExecution.Status == "ABORTED" || workflowExecution.Status == "FAILURE" {
+ log.Printf("[WARNING][%s] Workflowexecution already has status %s. No further action can be taken", workflowExecution.ExecutionId, workflowExecution.Status)
+ resp.WriteHeader(200)
+ resp.Write([]byte(fmt.Sprintf(`{"success": false, "reason": "Workflowexecution is aborted because of %s with result %s and status %s"}`, workflowExecution.LastNode, workflowExecution.Result, workflowExecution.Status)))
+ return
+ }
- if workflowExecution.Workflow.Configuration.ExitOnError {
- log.Printf("[WARNING] Workflowexecution already has status %s. No further action can be taken", workflowExecution.Status)
- resp.WriteHeader(401)
- resp.Write([]byte(fmt.Sprintf(`{"success": false, "reason": "Workflowexecution is aborted because of %s with result %s and status %s"}`, workflowExecution.LastNode, workflowExecution.Result, workflowExecution.Status)))
- return
- } else {
- log.Printf("Continuing even though it's aborted.")
+ retries := 0
+ retry, retriesok := request.URL.Query()["retries"]
+ if retriesok && len(retry) > 0 {
+ val, err := strconv.Atoi(retry[0])
+ if err == nil {
+ retries = val
}
}
- log.Printf("[INFO][%s] Got result '%s' from '%s' with app '%s':'%s'", actionResult.ExecutionId, actionResult.Status, actionResult.Action.Label, actionResult.Action.AppName, actionResult.Action.AppVersion)
+ log.Printf("[DEBUG][%s] Action: Received, Label: '%s', Action: '%s', Status: %s, Run status: %s, Extra=Retry:%d", workflowExecution.ExecutionId, actionResult.Action.Label, actionResult.Action.AppName, actionResult.Status, workflowExecution.Status, retries)
//results = append(results, actionResult)
//log.Printf("[INFO][%s] Time to execute %s (%s) with app %s:%s, function %s, env %s with %d parameters.", workflowExecution.ExecutionId, action.ID, action.Label, action.AppName, action.AppVersion, action.Name, action.Environment, len(action.Parameters))
//log.Printf("[DEBUG][%s] In workflowQueue with transaction", workflowExecution.ExecutionId)
runWorkflowExecutionTransaction(ctx, 0, workflowExecution.ExecutionId, actionResult, resp)
-
}
// Will make sure transactions are always ran for an execution. This is recursive if it fails. Allowed to fail up to 5 times
@@ -1690,12 +1947,38 @@ func runWorkflowExecutionTransaction(ctx context.Context, attempts int64, workfl
setExecution := true
workflowExecution, dbSave, err := shuffle.ParsedExecutionResult(ctx, *workflowExecution, actionResult, true, 0)
- if err != nil {
+ if err == nil {
+ if workflowExecution.Status != "EXECUTING" && workflowExecution.Status != "WAITING" {
+ log.Printf("[WARNING][%s] Execution is not executing, but %s. Stopping Transaction update.", workflowExecution.ExecutionId, workflowExecution.Status)
+ if resp != nil {
+ resp.WriteHeader(200)
+ resp.Write([]byte(fmt.Sprintf(`{"success": true, "reason": "Execution is not executing, but %s"}`, workflowExecution.Status)))
+ }
+
+
+ log.Printf("[DEBUG][%s] Shutting down (35)", workflowExecution.ExecutionId)
+
+ // Force sending result
+ shutdownData, err := json.Marshal(workflowExecution)
+ if err != nil {
+ log.Printf("[ERROR][%s] Failed marshalling execution (35): %s", workflowExecution.ExecutionId, err)
+ }
+
+ sendResult(*workflowExecution, shutdownData)
+ shutdown(*workflowExecution, "", "", false)
+ return
+ }
+ } else {
+ if strings.Contains(strings.ToLower(fmt.Sprintf("%s", err)), "already been ran") || strings.Contains(strings.ToLower(fmt.Sprintf("%s", err)), "already finished") {
+ log.Printf("[ERROR][%s] Skipping rerun of action result as it's already been ran: %s", workflowExecution.ExecutionId)
+ return
+ }
+
log.Printf("[DEBUG] Rerunning transaction? %s", err)
if strings.Contains(fmt.Sprintf("%s", err), "Rerun this transaction") {
workflowExecution, err := shuffle.GetWorkflowExecution(ctx, workflowExecutionId)
if err != nil {
- log.Printf("[ERROR] Failed getting execution cache (2): %s", err)
+ log.Printf("[ERROR][%s] Failed getting execution cache (2): %s", workflowExecution.ExecutionId, err)
resp.WriteHeader(401)
resp.Write([]byte(fmt.Sprintf(`{"success": false, "reason": "Failed getting execution (2)"}`)))
return
@@ -1706,15 +1989,15 @@ func runWorkflowExecutionTransaction(ctx context.Context, attempts int64, workfl
workflowExecution, dbSave, err = shuffle.ParsedExecutionResult(ctx, *workflowExecution, actionResult, false, 0)
if err != nil {
- log.Printf("[ERROR] Failed execution of parsedexecution (2): %s", err)
+ log.Printf("[ERROR][%s] Failed execution of parsedexecution (2): %s", workflowExecution.ExecutionId, err)
resp.WriteHeader(401)
resp.Write([]byte(fmt.Sprintf(`{"success": false, "reason": "Failed getting execution (2)"}`)))
return
} else {
- log.Printf("[DEBUG] Successfully got ParsedExecution with %d results!", len(workflowExecution.Results))
+ log.Printf("[DEBUG][%s] Successfully got ParsedExecution with %d results!", workflowExecution.ExecutionId, len(workflowExecution.Results))
}
} else {
- log.Printf("[ERROR] Failed execution of parsedexecution: %s", err)
+ log.Printf("[ERROR][%s] Failed execution of parsedexecution: %s", workflowExecution.ExecutionId, err)
resp.WriteHeader(401)
resp.Write([]byte(fmt.Sprintf(`{"success": false, "reason": "Failed getting execution"}`)))
return
@@ -1739,43 +2022,28 @@ func runWorkflowExecutionTransaction(ctx context.Context, attempts int64, workfl
cacheData := []byte(cache.([]uint8))
err = json.Unmarshal(cacheData, &workflowExecution)
if err != nil {
- log.Printf("[ERROR] Failed unmarshalling workflowexecution: %s", err)
+ log.Printf("[ERROR][%s] Failed unmarshalling workflowexecution: %s", workflowExecution.ExecutionId, err)
}
if len(parsedValue.Results) > 0 && len(parsedValue.Results) != resultLength {
setExecution = false
if attempts > 5 {
- //log.Printf("\n\nSkipping execution input - %d vs %d. Attempts: (%d)\n\n", len(parsedValue.Results), resultLength, attempts)
}
attempts += 1
+ log.Printf("[DEBUG][%s] Rerunning transaction as results has changed. %d vs %d", workflowExecution.ExecutionId, len(parsedValue.Results), resultLength)
+ /*
if len(workflowExecution.Results) <= len(workflowExecution.Workflow.Actions) {
+ log.Printf("[DEBUG][%s] Rerunning transaction as results has changed. %d vs %d", workflowExecution.ExecutionId, len(workflowExecution.Results), len(workflowExecution.Workflow.Actions))
runWorkflowExecutionTransaction(ctx, attempts, workflowExecutionId, actionResult, resp)
return
}
+ */
}
}
- /*
- if value, found := requestCache.Get(cacheKey); found {
- parsedValue := value.(*shuffle.WorkflowExecution)
- if len(parsedValue.Results) > 0 && len(parsedValue.Results) != resultLength {
- setExecution = false
- if attempts > 5 {
- //log.Printf("\n\nSkipping execution input - %d vs %d. Attempts: (%d)\n\n", len(parsedValue.Results), resultLength, attempts)
- }
-
- attempts += 1
- if len(workflowExecution.Results) <= len(workflowExecution.Workflow.Actions) {
- runWorkflowExecutionTransaction(ctx, attempts, workflowExecutionId, actionResult, resp)
- return
- }
- }
- }
- */
-
if setExecution || workflowExecution.Status == "FINISHED" || workflowExecution.Status == "ABORTED" || workflowExecution.Status == "FAILURE" {
- log.Printf("[DEBUG][%s] Running setexec with status %s and %d results", workflowExecution.ExecutionId, workflowExecution.Status, len(workflowExecution.Results))
+ log.Printf("[DEBUG][%s] Running setexec with status %s and %d result(s)", workflowExecution.ExecutionId, workflowExecution.Status, len(workflowExecution.Results))
err = setWorkflowExecution(ctx, *workflowExecution, dbSave)
if err != nil {
resp.WriteHeader(401)
@@ -1789,7 +2057,6 @@ func runWorkflowExecutionTransaction(ctx context.Context, attempts int64, workfl
// Just in case. Should MAYBE validate finishing another time as well.
// This fixes issues with e.g. shuffle.Action -> shuffle.Trigger -> shuffle.Action.
handleExecutionResult(*workflowExecution)
- //validateFinished(workflowExecution)
}
//if newExecutions && len(nextActions) > 0 {
@@ -1802,8 +2069,7 @@ func runWorkflowExecutionTransaction(ctx context.Context, attempts int64, workfl
}
func sendSelfRequest(actionResult shuffle.ActionResult) {
- log.Printf("[INFO][%s] Not sending backend info since source is default (not swarm)", actionResult.ExecutionId)
- return
+
data, err := json.Marshal(actionResult)
if err != nil {
@@ -1837,7 +2103,7 @@ func sendSelfRequest(actionResult shuffle.ActionResult) {
newresp, err := topClient.Do(req)
if err != nil {
- log.Printf("[ERROR][%s] Error running self request (2): %s", actionResult.ExecutionId, err)
+ log.Printf("[ERROR][%s] Error running finishing request (2): %s", actionResult.ExecutionId, err)
return
}
@@ -1846,9 +2112,9 @@ func sendSelfRequest(actionResult shuffle.ActionResult) {
body, err := ioutil.ReadAll(newresp.Body)
//log.Printf("[INFO] BACKEND STATUS: %d", newresp.StatusCode)
if err != nil {
- log.Printf("[ERROR][%s] Failed reading self request body: %s", actionResult.ExecutionId, err)
+ log.Printf("[ERROR][%s] Failed reading body: %s", actionResult.ExecutionId, err)
} else {
- log.Printf("[DEBUG][%s] NEWRESP (from self - 1): %s", actionResult.ExecutionId, string(body))
+ log.Printf("[DEBUG][%s] NEWRESP (from backend): %s", actionResult.ExecutionId, string(body))
}
}
}
@@ -1857,8 +2123,27 @@ func sendResult(workflowExecution shuffle.WorkflowExecution, data []byte) {
if workflowExecution.ExecutionSource == "default" && os.Getenv("SHUFFLE_SWARM_CONFIG") != "run" && os.Getenv("SHUFFLE_SWARM_CONFIG") != "swarm" {
//log.Printf("[INFO][%s] Not sending backend info since source is default (not swarm)", workflowExecution.ExecutionId)
//return
+ } else {
+ }
+
+ // Basically to reduce backend strain
+ /*
+ if shuffle.ArrayContains(finishedExecutions, workflowExecution.ExecutionId) {
+ log.Printf("[INFO][%s] NOT sending backend info since it's already been sent before.", workflowExecution.ExecutionId)
+ return
+ }
+ */
+
+ // Take it down again
+ /*
+ if len(finishedExecutions) > 100 {
+ log.Printf("[DEBUG][%s] Removing old execution from finishedExecutions: %s", workflowExecution.ExecutionId, finishedExecutions[0])
+ finishedExecutions = finishedExecutions[99:]
}
+ finishedExecutions = append(finishedExecutions, workflowExecution.ExecutionId)
+ */
+
streamUrl := fmt.Sprintf("%s/api/v1/streams", baseUrl)
req, err := http.NewRequest(
"POST",
@@ -1907,10 +2192,9 @@ func validateFinished(workflowExecution shuffle.WorkflowExecution) bool {
workflowExecution = shuffle.Fixexecution(ctx, workflowExecution)
_, extra, _, _, _, _, _, environments := shuffle.GetExecutionVariables(ctx, workflowExecution.ExecutionId)
- log.Printf("[INFO][%s] VALIDATION. Status: %s, shuffle.Actions: %d, Extra: %d, Results: %d. Parent: %#v\n", workflowExecution.ExecutionId, workflowExecution.Status, len(workflowExecution.Workflow.Actions), extra, len(workflowExecution.Results), workflowExecution.ExecutionParent)
+ log.Printf("[INFO][%s] VALIDATION. Status: %s, shuffle.Actions: %d, Extra: %d, Results: %d. Parent: %#v", workflowExecution.ExecutionId, workflowExecution.Status, len(workflowExecution.Workflow.Actions), extra, len(workflowExecution.Results), workflowExecution.ExecutionParent)
- //if len(workflowExecution.Results) == len(workflowExecution.Workflow.Actions)+extra {
- if (len(environments) == 1 && requestsSent == 0 && len(workflowExecution.Results) >= 1 && os.Getenv("SHUFFLE_SWARM_CONFIG") != "run" && os.Getenv("SHUFFLE_SWARM_CONFIG") != "swarm") || (len(workflowExecution.Results) >= len(workflowExecution.Workflow.Actions)+extra && len(workflowExecution.Workflow.Actions) > 0) {
+ if workflowExecution.Status == "FINISHED" || workflowExecution.Status == "ABORTED" || (len(environments) == 1 && requestsSent == 0 && len(workflowExecution.Results) >= 1 && os.Getenv("SHUFFLE_SWARM_CONFIG") != "run" && os.Getenv("SHUFFLE_SWARM_CONFIG") != "swarm") || (len(workflowExecution.Results) >= len(workflowExecution.Workflow.Actions)+extra && len(workflowExecution.Workflow.Actions) > 0) {
if workflowExecution.Status == "FINISHED" {
for _, result := range workflowExecution.Results {
@@ -1921,7 +2205,6 @@ func validateFinished(workflowExecution shuffle.WorkflowExecution) bool {
}
}
- requestsSent += 1
log.Printf("[DEBUG][%s] Should send full result to %s", workflowExecution.ExecutionId, baseUrl)
@@ -1933,9 +2216,11 @@ func validateFinished(workflowExecution shuffle.WorkflowExecution) bool {
}
cacheKey := fmt.Sprintf("workflowexecution_%s", workflowExecution.ExecutionId)
- err = shuffle.SetCache(ctx, cacheKey, shutdownData, 30)
- if err != nil {
- log.Printf("[ERROR][%s] Failed adding to cache during validateFinished", workflowExecution)
+ if len(workflowExecution.Authorization) > 0 {
+ err = shuffle.SetCache(ctx, cacheKey, shutdownData, 31)
+ if err != nil {
+ log.Printf("[ERROR][%s] Failed adding to cache during ValidateFinished", workflowExecution)
+ }
}
shuffle.RunCacheCleanup(ctx, workflowExecution)
@@ -1947,6 +2232,7 @@ func validateFinished(workflowExecution shuffle.WorkflowExecution) bool {
}
func handleGetStreamResults(resp http.ResponseWriter, request *http.Request) {
+ defer request.Body.Close()
body, err := ioutil.ReadAll(request.Body)
if err != nil {
log.Printf("[WARNING] Failed reading body for stream result queue")
@@ -1955,9 +2241,6 @@ func handleGetStreamResults(resp http.ResponseWriter, request *http.Request) {
return
}
- defer request.Body.Close()
- //log.Printf("[DEBUG] In get stream results with body length %d: %s", len(body), string(body))
-
var actionResult shuffle.ActionResult
err = json.Unmarshal(body, &actionResult)
if err != nil {
@@ -1985,7 +2268,7 @@ func handleGetStreamResults(resp http.ResponseWriter, request *http.Request) {
// Authorization is done here
if workflowExecution.Authorization != actionResult.Authorization {
- log.Printf("Bad authorization key when getting stream results %s.", actionResult.ExecutionId)
+ log.Printf("[ERROR] Bad authorization key when getting stream results from cache %s.", actionResult.ExecutionId)
resp.WriteHeader(401)
resp.Write([]byte(fmt.Sprintf(`{"success": false, "reason": "Bad authorization key or execution_id might not exist."}`)))
return
@@ -2003,52 +2286,10 @@ func handleGetStreamResults(resp http.ResponseWriter, request *http.Request) {
}
-func setWorkflowExecution(ctx context.Context, workflowExecution shuffle.WorkflowExecution, dbSave bool) error {
- if len(workflowExecution.ExecutionId) == 0 {
- log.Printf("[DEBUG] Workflowexecution executionId can't be empty.")
- return errors.New("ExecutionId can't be empty.")
- }
-
- //log.Printf("[DEBUG][%s] Setting with %d results (pre)", workflowExecution.ExecutionId, len(workflowExecution.Results))
- workflowExecution = shuffle.Fixexecution(ctx, workflowExecution)
- //log.Printf("[DEBUG][%s] Setting with %d results (post)", workflowExecution.ExecutionId, len(workflowExecution.Results))
-
- cacheKey := fmt.Sprintf("workflowexecution_%s", workflowExecution.ExecutionId)
-
- execData, err := json.Marshal(workflowExecution)
- if err != nil {
- log.Printf("[ERROR] Failed marshalling execution during set: %s", err)
- return err
- }
-
- err = shuffle.SetCache(ctx, cacheKey, execData, 30)
- if err != nil {
- log.Printf("[ERROR][%s] Failed adding to cache during setexecution", workflowExecution)
- return err
- }
- //requestCache.Set(cacheKey, &workflowExecution, cache.DefaultExpiration)
-
- handleExecutionResult(workflowExecution)
- validateFinished(workflowExecution)
-
- // FIXME: Should this shutdown OR send the result?
- // The worker may not be running the backend hmm
- if dbSave {
- if workflowExecution.ExecutionSource == "default" {
- log.Printf("[DEBUG][%s] Shutting down (25)", workflowExecution.ExecutionId)
- shutdown(workflowExecution, "", "", true)
- //return
- } else {
- log.Printf("[DEBUG] NOT shutting down with dbSave (%s)", workflowExecution.ExecutionSource)
- }
- }
-
- return nil
-}
-
// GetLocalIP returns the non loopback local IP of the host
func getLocalIP() string {
+
addrs, err := net.InterfaceAddrs()
if err != nil {
return ""
@@ -2082,7 +2323,6 @@ func getAvailablePort() (net.Listener, error) {
func webserverSetup(workflowExecution shuffle.WorkflowExecution) net.Listener {
hostname = getLocalIP()
-
os.Setenv("WORKER_HOSTNAME", hostname)
// FIXME: This MAY not work because of speed between first
@@ -2094,9 +2334,11 @@ func webserverSetup(workflowExecution shuffle.WorkflowExecution) net.Listener {
}
log.Printf("[DEBUG] OLD HOSTNAME: %s", appCallbackUrl)
+
+
port := listener.Addr().(*net.TCPAddr).Port
- log.Printf("\n\n[DEBUG] Starting webserver (2) on port %d with hostname: %s\n\n", port, hostname)
+ log.Printf("[DEBUG] Starting webserver (2) on port %d with hostname: %s", port, hostname)
appCallbackUrl = fmt.Sprintf("http://%s:%d", hostname, port)
log.Printf("[INFO] NEW WORKER HOSTNAME: %s", appCallbackUrl)
@@ -2130,7 +2372,7 @@ func downloadDockerImageBackend(client *http.Client, imageName string) error {
//return
}
- newresp, err := client.Do(req)
+ newresp, err := topClient.Do(req)
if err != nil {
log.Printf("[ERROR] Failed download request for %s: %s", imageName, err)
return err
@@ -2202,215 +2444,467 @@ func downloadDockerImageBackend(client *http.Client, imageName string) error {
return nil
}
-// Initial loop etc
-func main() {
- // Elasticsearch necessary to ensure we'ren ot running with Datastore configurations for minimal/maximal data sizes
- _, err := shuffle.RunInit(datastore.Client{}, storage.Client{}, "", "worker", true, "elasticsearch")
+func findActiveSwarmNodes(dockercli *dockerclient.Client) (int64, error) {
+ ctx := context.Background()
+ nodes, err := dockercli.NodeList(ctx, types.NodeListOptions{})
if err != nil {
- log.Printf("[ERROR] Failed to run worker init: %s", err)
- } else {
- log.Printf("[DEBUG] Ran init for worker to set up cache system. Docker version: %s", dockerApiVersion)
+ return 0, err
}
- log.Printf("[INFO] Setting up worker environment")
- sleepTime := 5
- client := shuffle.GetExternalClient(baseUrl)
-
- if timezone == "" {
- timezone = "Europe/Amsterdam"
+ nodeCount := int64(0)
+ for _, node := range nodes {
+ //log.Printf("ID: %s - %#v", node.ID, node.Status.State)
+ if node.Status.State == "ready" {
+ nodeCount += 1
+ }
}
- log.Printf("[INFO] Running with timezone %s and swarm config %#v", timezone, os.Getenv("SHUFFLE_SWARM_CONFIG"))
+ return nodeCount, nil
- authorization := ""
- executionId := ""
+ /*
+ containers, err := dockercli.ContainerList(ctx, types.ContainerListOptions{
+ All: true,
+ })
+ */
+}
- // INFO: Allows you to run a test execution
- testing := os.Getenv("WORKER_TESTING_WORKFLOW")
- shuffle_apikey := os.Getenv("WORKER_TESTING_APIKEY")
- if len(testing) > 0 && len(shuffle_apikey) > 0 {
- // Execute a workflow and use that info
- log.Printf("[WARNING] Running test environment for worker by executing workflow %s. PS: This may NOT reach the worker in real time, but rather be deployed as a docker container (bad). Instead use AUTHORIZATION and EXECUTIONID for direct testing", testing)
- authorization, executionId = runTestExecution(client, testing, shuffle_apikey)
- } else {
- authorization = os.Getenv("AUTHORIZATION")
- executionId = os.Getenv("EXECUTIONID")
- log.Printf("[INFO] Running normal execution with auth %s and ID %s", authorization, executionId)
+// Runs data discovery
+
+func sendAppRequest(ctx context.Context, incomingUrl, appName string, port int, action *shuffle.Action, workflowExecution *shuffle.WorkflowExecution) error {
+ parsedRequest := shuffle.OrborusExecutionRequest{
+ Cleanup: cleanupEnv,
+ ExecutionId: workflowExecution.ExecutionId,
+ Authorization: workflowExecution.Authorization,
+ EnvironmentName: os.Getenv("ENVIRONMENT_NAME"),
+ Timezone: os.Getenv("TZ"),
+ HTTPProxy: os.Getenv("HTTP_PROXY"),
+ HTTPSProxy: os.Getenv("HTTPS_PROXY"),
+ ShufflePassProxyToApp: os.Getenv("SHUFFLE_PASS_APP_PROXY"),
+ Url: baseUrl,
+ BaseUrl: baseUrl,
+ Action: *action,
+ FullExecution: *workflowExecution,
}
+ // Sometimes makes it have the wrong data due to timing
- workflowExecution := shuffle.WorkflowExecution{
- ExecutionId: executionId,
+ // Specific for subflow to ensure worker matches the backend correctly
+
+ parsedBaseurl := incomingUrl
+ if strings.Count(baseUrl, ":") >= 2 {
+ baseUrlSplit := strings.Split(baseUrl, ":")
+ if len(baseUrlSplit) >= 3 {
+ parsedBaseurl = strings.Join(baseUrlSplit[0:2], ":")
+ //parsedRequest.BaseUrl = fmt.Sprintf("%s:33333", parsedBaseurl)
+ }
}
- if len(authorization) == 0 {
- log.Printf("[INFO] No AUTHORIZATION key set in env")
- log.Printf("[DEBUG] Shutting down (27)")
- shutdown(workflowExecution, "", "", false)
+
+ if len(parsedRequest.Url) == 0 {
+ // Fixed callback url to the worker itself
+ if strings.Count(parsedBaseurl, ":") >= 2 {
+ parsedRequest.Url = parsedBaseurl
+ } else {
+ // Callback to worker
+ parsedRequest.Url = fmt.Sprintf("%s:%d", parsedBaseurl, baseport)
+
+ //parsedRequest.Url
+ }
+
+ //log.Printf("[DEBUG][%s] Should add a baseurl for the app to get back to: %s", workflowExecution.ExecutionId, parsedRequest.Url)
}
- if len(executionId) == 0 {
- log.Printf("[INFO] No EXECUTIONID key set in env")
- log.Printf("[DEBUG] Shutting down (28)")
- shutdown(workflowExecution, "", "", false)
+ // Swapping because this was confusing during dev
+ // No real reason, just variable names
+ tmp := parsedRequest.Url
+ parsedRequest.Url = parsedRequest.BaseUrl
+ parsedRequest.BaseUrl = tmp
+
+ // Run with proper hostname, but set to shuffle-worker to avoid specific host target.
+ // This means running with VIP instead.
+ if len(hostname) > 0 {
+ parsedRequest.BaseUrl = fmt.Sprintf("http://%s:%d", hostname, baseport)
+ //parsedRequest.BaseUrl = fmt.Sprintf("http://shuffle-workers:%d", baseport)
+ //log.Printf("[DEBUG][%s] Changing hostname to local hostname in Docker network for WORKER URL: %s", workflowExecution.ExecutionId, parsedRequest.BaseUrl)
+
+ if parsedRequest.Action.AppName == "shuffle-subflow" || parsedRequest.Action.AppName == "shuffle-subflow-v2" || parsedRequest.Action.AppName == "User Input" {
+ parsedRequest.BaseUrl = fmt.Sprintf("http://%s:%d", hostname, baseport)
+ //parsedRequest.Url = parsedRequest.BaseUrl
+ }
}
- data = fmt.Sprintf(`{"execution_id": "%s", "authorization": "%s"}`, executionId, authorization)
- streamResultUrl := fmt.Sprintf("%s/api/v1/streams/results", baseUrl)
+ // Making sure to get the LATEST execution data
+ // This is due to cache timing issues
+ exec, err := shuffle.GetWorkflowExecution(ctx, workflowExecution.ExecutionId)
+ if err == nil && len(exec.ExecutionId) > 0 {
+ parsedRequest.FullExecution = *exec
+ }
+
+ data, err := json.Marshal(parsedRequest)
+ if err != nil {
+ log.Printf("[ERROR] Failed marshalling worker request: %s", err)
+ return err
+ }
+
+ streamUrl := fmt.Sprintf("http://%s:%d/api/v1/run", appName, port)
+ log.Printf("[DEBUG][%s] Worker URL: %s, Backend URL: %s, Target App: %s", workflowExecution.ExecutionId, parsedRequest.BaseUrl, parsedRequest.Url, streamUrl)
req, err := http.NewRequest(
"POST",
- streamResultUrl,
+ streamUrl,
bytes.NewBuffer([]byte(data)),
)
+ // Checking as LATE as possible, ensuring we don't rerun what's already ran
+ //ctx = context.Background()
+ newExecId := fmt.Sprintf("%s_%s", workflowExecution.ExecutionId, action.ID)
+ _, err = shuffle.GetCache(ctx, newExecId)
+ if err == nil {
+ log.Printf("[DEBUG] Result for %s already found (PRE REQUEST) - returning", newExecId)
+ return nil
+ }
+
+ cacheData := []byte("1")
+ err = shuffle.SetCache(ctx, newExecId, cacheData, 30)
if err != nil {
- log.Printf("[ERROR] Failed making request builder for backend")
- log.Printf("[DEBUG] Shutting down (29)")
- shutdown(workflowExecution, "", "", true)
+ log.Printf("[WARNING] Failed setting cache for action %s: %s", newExecId, err)
+ } else {
+ log.Printf("[DEBUG][%s] Adding %s to cache (%#v)", workflowExecution.ExecutionId, newExecId, action.Name)
}
- topClient = client
+ // FIXME: Add 5 tries
- firstRequest := true
- environments := []string{}
- for {
- // Because of this, it always has updated data.
- // Removed request requirement from app_sdk
- newresp, err := client.Do(req)
- if err != nil {
- log.Printf("[ERROR] Failed request: %s", err)
- time.Sleep(time.Duration(sleepTime) * time.Second)
- continue
+ newresp, err := topClient.Do(req)
+ if err != nil {
+ // Another timeout issue here somewhere
+ // context deadline
+ if strings.Contains(fmt.Sprintf("%s", err), "context deadline exceeded") || strings.Contains(fmt.Sprintf("%s", err), "Client.Timeout exceeded") {
+ return nil
}
- defer newresp.Body.Close()
- body, err := ioutil.ReadAll(newresp.Body)
- if err != nil {
- log.Printf("[ERROR] Failed reading body: %s", err)
- time.Sleep(time.Duration(sleepTime) * time.Second)
- continue
+ if strings.Contains(fmt.Sprintf("%s", err), "timeout awaiting response") {
+ return nil
}
- if newresp.StatusCode != 200 {
- log.Printf("[ERROR] %s\nStatusCode (1): %d", string(body), newresp.StatusCode)
- time.Sleep(time.Duration(sleepTime) * time.Second)
- continue
+ newerr := fmt.Sprintf("%s", err)
+ if strings.Contains(newerr, "connection refused") || strings.Contains(newerr, "no such host") {
+ newerr = fmt.Sprintf("Failed connecting to app %s. Is the Docker image available?", appName)
+ } else {
+ // escape quotes and newlines
+ newerr = strings.ReplaceAll(strings.ReplaceAll(newerr, "\"", "\\\""), "\n", "\\n")
}
- err = json.Unmarshal(body, &workflowExecution)
- if err != nil {
- log.Printf("[ERROR] Failed workflowExecution unmarshal: %s", err)
- time.Sleep(time.Duration(sleepTime) * time.Second)
- continue
+ log.Printf("[ERROR][%s] Error running app run request: %s", workflowExecution.ExecutionId, err)
+ actionResult := shuffle.ActionResult{
+ Action: *action,
+ ExecutionId: workflowExecution.ExecutionId,
+ Authorization: workflowExecution.Authorization,
+ Result: fmt.Sprintf(`{"success": false, "reason": "Failed to connect to app %s in swarm. Restart Orborus if this is recurring, or contact support@shuffler.io.", "details": "%s"}`, streamUrl, newerr),
+ StartedAt: int64(time.Now().Unix()),
+ CompletedAt: int64(time.Now().Unix()),
+ Status: "FAILURE",
}
- if firstRequest {
- firstRequest = false
- //workflowExecution.StartedAt = int64(time.Now().Unix())
+ // If this happens - send failure signal to stop the workflow?
+ sendSelfRequest(actionResult)
+ return err
+ }
- ctx := context.Background()
- cacheKey := fmt.Sprintf("workflowexecution_%s", workflowExecution.ExecutionId)
- execData, err := json.Marshal(workflowExecution)
- if err != nil {
- log.Printf("[ERROR][%s] Failed marshalling execution during set (3): %s", workflowExecution.ExecutionId, err)
- } else {
- err = shuffle.SetCache(ctx, cacheKey, execData, 30)
- if err != nil {
- log.Printf("[ERROR][%s] Failed adding to cache during setexecution (3): %s", workflowExecution.ExecutionId, err)
- }
- }
+ defer newresp.Body.Close()
+ body, err := ioutil.ReadAll(newresp.Body)
+ if err != nil {
+ log.Printf("[ERROR] Failed reading app request body body: %s", err)
+ return err
+ } else {
+ log.Printf("[DEBUG][%s] NEWRESP (from app): %s", workflowExecution.ExecutionId, string(body))
+ }
- //requestCache = cache.New(60*time.Minute, 120*time.Minute)
- //requestCache.Set(cacheKey, &workflowExecution, cache.DefaultExpiration)
+ return nil
+}
- for _, action := range workflowExecution.Workflow.Actions {
- found := false
- for _, environment := range environments {
- if action.Environment == environment {
- found = true
- break
- }
- }
+// Function to auto-deploy certain apps if "run" is set
+// Has some issues with loading when running multiple workers and such.
+func baseDeploy() {
- if !found {
- environments = append(environments, action.Environment)
+ cli, err := dockerclient.NewEnvClient()
+ if err != nil {
+ log.Printf("[ERROR] Unable to create docker client (3): %s", err)
+ return
+ }
+
+ for key, value := range autoDeploy {
+ newNameSplit := strings.Split(key, ":")
+
+ action := shuffle.Action{
+ AppName: newNameSplit[0],
+ AppVersion: newNameSplit[1],
+ ID: "TBD",
+ }
+
+ workflowExecution := shuffle.WorkflowExecution{
+ ExecutionId: "TBD",
+ }
+
+ appname := action.AppName
+ appversion := action.AppVersion
+ appname = strings.Replace(appname, ".", "-", -1)
+ appversion = strings.Replace(appversion, ".", "-", -1)
+
+ env := []string{
+ fmt.Sprintf("EXECUTIONID=%s", workflowExecution.ExecutionId),
+ fmt.Sprintf("AUTHORIZATION=%s", workflowExecution.Authorization),
+ fmt.Sprintf("CALLBACK_URL=%s", baseUrl),
+ fmt.Sprintf("BASE_URL=%s", appCallbackUrl),
+ fmt.Sprintf("TZ=%s", timezone),
+ fmt.Sprintf("SHUFFLE_LOGS_DISABLED=%s", logsDisabled),
+ }
+
+ if strings.ToLower(os.Getenv("SHUFFLE_PASS_APP_PROXY")) == "true" {
+ //log.Printf("APPENDING PROXY TO THE APP!")
+ env = append(env, fmt.Sprintf("HTTP_PROXY=%s", os.Getenv("HTTP_PROXY")))
+ env = append(env, fmt.Sprintf("HTTPS_PROXY=%s", os.Getenv("HTTPS_PROXY")))
+ env = append(env, fmt.Sprintf("NO_PROXY=%s", os.Getenv("NO_PROXY")))
+ }
+
+ if len(os.Getenv("SHUFFLE_APP_SDK_TIMEOUT")) > 0 {
+ log.Printf("[DEBUG] Setting SHUFFLE_APP_SDK_TIMEOUT to %s", os.Getenv("SHUFFLE_APP_SDK_TIMEOUT"))
+ env = append(env, fmt.Sprintf("SHUFFLE_APP_SDK_TIMEOUT=%s", os.Getenv("SHUFFLE_APP_SDK_TIMEOUT")))
+ }
+
+ identifier := fmt.Sprintf("%s_%s_%s_%s", appname, appversion, action.ID, workflowExecution.ExecutionId)
+ if strings.Contains(identifier, " ") {
+ identifier = strings.ReplaceAll(identifier, " ", "-")
+ }
+
+ //deployApp(cli, value, identifier, env, workflowExecution, action)
+ log.Printf("[DEBUG] Deploying app with identifier %s to ensure basic apps are available from the get-go", identifier)
+ err = deployApp(cli, value, identifier, env, workflowExecution, action)
+ _ = err
+ //err := deployApp(cli, value, identifier, env, workflowExecution, action)
+ //if err != nil {
+ // log.Printf("[DEBUG] Failed deploying app %s: %s", value, err)
+ //}
+ }
+
+ appsInitialized = true
+}
+
+func getStreamResultsWrapper(client *http.Client, req *http.Request, workflowExecution shuffle.WorkflowExecution, firstRequest bool, environments []string) ([]string, error) {
+ // Because of this, it always has updated data.
+ // Removed request requirement from app_sdk
+ newresp, err := topClient.Do(req)
+ if err != nil {
+ log.Printf("[ERROR] Failed request: %s", err)
+ time.Sleep(time.Duration(sleepTime) * time.Second)
+ return environments, err
+ }
+
+ defer newresp.Body.Close()
+ body, err := ioutil.ReadAll(newresp.Body)
+ if err != nil {
+ log.Printf("[ERROR] Failed reading body: %s", err)
+ time.Sleep(time.Duration(sleepTime) * time.Second)
+ return environments, err
+ }
+
+ if newresp.StatusCode != 200 {
+ log.Printf("[ERROR] %sStatusCode (1): %d", string(body), newresp.StatusCode)
+ time.Sleep(time.Duration(sleepTime) * time.Second)
+ return environments, errors.New(fmt.Sprintf("Bad status code: %d", newresp.StatusCode) )
+ }
+
+ err = json.Unmarshal(body, &workflowExecution)
+ if err != nil {
+ log.Printf("[ERROR] Failed workflowExecution unmarshal: %s", err)
+ time.Sleep(time.Duration(sleepTime) * time.Second)
+ return environments, err
+ }
+
+ if firstRequest {
+ firstRequest = false
+
+ ctx := context.Background()
+ cacheKey := fmt.Sprintf("workflowexecution_%s", workflowExecution.ExecutionId)
+ execData, err := json.Marshal(workflowExecution)
+ if err != nil {
+ log.Printf("[ERROR][%s] Failed marshalling execution during set (3): %s", workflowExecution.ExecutionId, err)
+ } else {
+ err = shuffle.SetCache(ctx, cacheKey, execData, 30)
+ if err != nil {
+ log.Printf("[ERROR][%s] Failed adding to cache during setexecution (3): %s", workflowExecution.ExecutionId, err)
+ }
+ }
+
+ for _, action := range workflowExecution.Workflow.Actions {
+ found := false
+ for _, environment := range environments {
+ if action.Environment == environment {
+ found = true
+ break
}
}
- // Checks if a subflow is child of the startnode, as sub-subflows aren't working properly yet
- childNodes := shuffle.FindChildNodes(workflowExecution, workflowExecution.Start, []string{}, []string{})
- log.Printf("[DEBUG] Looking for subflow in %#v to check execution pattern as child of %s", childNodes, workflowExecution.Start)
- subflowFound := false
- for _, childNode := range childNodes {
- for _, trigger := range workflowExecution.Workflow.Triggers {
- if trigger.ID != childNode {
- continue
- }
+ if !found {
+ environments = append(environments, action.Environment)
+ }
+ }
- if trigger.AppName == "Shuffle Workflow" {
- subflowFound = true
- break
- }
+ // Checks if a subflow is child of the startnode, as sub-subflows aren't working properly yet
+ childNodes := shuffle.FindChildNodes(workflowExecution, workflowExecution.Start, []string{}, []string{})
+ log.Printf("[DEBUG] Looking for subflow in %#v to check execution pattern as child of %s", childNodes, workflowExecution.Start)
+ subflowFound := false
+ for _, childNode := range childNodes {
+ for _, trigger := range workflowExecution.Workflow.Triggers {
+ if trigger.ID != childNode {
+ continue
}
- if subflowFound {
+ if trigger.AppName == "Shuffle Workflow" {
+ subflowFound = true
break
}
}
- log.Printf("\n\nEnvironments: %s. Source: %s. 1 env = webserver, 0 or >1 = default. Subflow exists: %#v\n\n", environments, workflowExecution.ExecutionSource, subflowFound)
- if len(environments) == 1 && workflowExecution.ExecutionSource != "default" && !subflowFound {
- log.Printf("\n\n[DEBUG] Running OPTIMIZED execution (not manual)\n\n")
- listener := webserverSetup(workflowExecution)
- err := executionInit(workflowExecution)
- if err != nil {
- log.Printf("[DEBUG] Workflow setup failed: %s", workflowExecution.ExecutionId, err)
- log.Printf("[DEBUG] Shutting down (30)")
- shutdown(workflowExecution, "", "", true)
- }
+ if subflowFound {
+ break
+ }
+ }
- go func() {
- time.Sleep(time.Duration(1))
- handleExecutionResult(workflowExecution)
- }()
+ log.Printf("[DEBUG] Environments: %s. Source: %s. 1 env = webserver, 0 or >1 = default. Subflow exists: %#v", environments, workflowExecution.ExecutionSource, subflowFound)
+ if len(environments) == 1 && workflowExecution.ExecutionSource != "default" && !subflowFound {
+ log.Printf("[DEBUG] Running OPTIMIZED execution (not manual)")
+ listener := webserverSetup(workflowExecution)
+ err := executionInit(workflowExecution)
+ if err != nil {
+ log.Printf("[DEBUG] Workflow setup failed: %s", workflowExecution.ExecutionId, err)
+ log.Printf("[DEBUG] Shutting down (30)")
+ shutdown(workflowExecution, "", "", true)
+ }
- runWebserver(listener)
- //log.Printf("Before wait")
- //wg := sync.WaitGroup{}
- //wg.Add(1)
- //wg.Wait()
- } else {
- log.Printf("\n\n[DEBUG] Running NON-OPTIMIZED execution for type %s with %d environment(s). This only happens when ran manually OR when running with subflows. Status: %s\n\n", workflowExecution.ExecutionSource, len(environments), workflowExecution.Status)
- err := executionInit(workflowExecution)
- if err != nil {
- log.Printf("[DEBUG] Workflow setup failed: %s", workflowExecution.ExecutionId, err)
- shutdown(workflowExecution, "", "", true)
- }
+ go func() {
+ time.Sleep(time.Duration(1))
+ handleExecutionResult(workflowExecution)
+ }()
- // Trying to make worker into microservice~ :)
+ runWebserver(listener)
+ //log.Printf("Before wait")
+ //wg := sync.WaitGroup{}
+ //wg.Add(1)
+ //wg.Wait()
+ } else {
+ log.Printf("[DEBUG] Running NON-OPTIMIZED execution for type %s with %d environment(s). This only happens when ran manually OR when running with subflows. Status: %s", workflowExecution.ExecutionSource, len(environments), workflowExecution.Status)
+ err := executionInit(workflowExecution)
+ if err != nil {
+ log.Printf("[DEBUG] Workflow setup failed: %s", workflowExecution.ExecutionId, err)
+ shutdown(workflowExecution, "", "", true)
}
+
+ // Trying to make worker into microservice~ :)
}
+ }
+
+ if workflowExecution.Status == "FINISHED" || workflowExecution.Status == "SUCCESS" {
+ log.Printf("[DEBUG] Workflow %s is finished. Exiting worker.", workflowExecution.ExecutionId)
+ log.Printf("[DEBUG] Shutting down (31)")
+ shutdown(workflowExecution, "", "", true)
+ }
- if workflowExecution.Status == "FINISHED" || workflowExecution.Status == "SUCCESS" {
- log.Printf("[DEBUG] Workflow %s is finished. Exiting worker.", workflowExecution.ExecutionId)
- log.Printf("[DEBUG] Shutting down (31)")
+ if workflowExecution.Status == "EXECUTING" || workflowExecution.Status == "RUNNING" {
+ //log.Printf("Status: %s", workflowExecution.Status)
+ err = handleDefaultExecution(client, req, workflowExecution)
+ if err != nil {
+ log.Printf("[DEBUG] Workflow %s is finished: %s", workflowExecution.ExecutionId, err)
+ log.Printf("[DEBUG] Shutting down (32)")
shutdown(workflowExecution, "", "", true)
}
+ } else {
+ log.Printf("[DEBUG] Workflow %s has status %s. Exiting worker (if WAITING, rerun will happen).", workflowExecution.ExecutionId, workflowExecution.Status)
+ log.Printf("[DEBUG] Shutting down (33)")
+ shutdown(workflowExecution, workflowExecution.Workflow.ID, "", true)
+ }
- if workflowExecution.Status == "EXECUTING" || workflowExecution.Status == "RUNNING" {
- //log.Printf("Status: %s", workflowExecution.Status)
- err = handleDefaultExecution(client, req, workflowExecution)
- if err != nil {
- log.Printf("[DEBUG] Workflow %s is finished: %s", workflowExecution.ExecutionId, err)
- log.Printf("[DEBUG] Shutting down (32)")
- shutdown(workflowExecution, "", "", true)
- }
- } else {
- log.Printf("[DEBUG] Workflow %s has status %s. Exiting worker.", workflowExecution.ExecutionId, workflowExecution.Status)
- log.Printf("[DEBUG] Shutting down (33)")
- shutdown(workflowExecution, workflowExecution.Workflow.ID, "", true)
+ time.Sleep(time.Duration(sleepTime) * time.Second)
+ return environments, nil
+}
+
+// Initial loop etc
+func main() {
+ // Elasticsearch necessary to ensure we'ren ot running with Datastore configurations for minimal/maximal data sizes
+ // Recursive import kind of :)
+ _, err := shuffle.RunInit(*shuffle.GetDatastore(), *shuffle.GetStorage(), "", "worker", true, "elasticsearch")
+ if err != nil {
+ if !strings.Contains(fmt.Sprintf("%s", err), "no such host") {
+ log.Printf("[ERROR] Failed to run worker init: %s", err)
}
+ } else {
+ log.Printf("[DEBUG] Ran init for worker to set up cache system. Docker version: %s", dockerApiVersion)
+ }
- time.Sleep(time.Duration(sleepTime) * time.Second)
+ log.Printf("[INFO] Setting up worker environment")
+ sleepTime = 5
+ client := shuffle.GetExternalClient(baseUrl)
+
+ if timezone == "" {
+ timezone = "Europe/Amsterdam"
+ }
+
+ topClient = client
+ swarmConfig := os.Getenv("SHUFFLE_SWARM_CONFIG")
+ log.Printf("[INFO] Running with timezone %s and swarm config %#v", timezone, swarmConfig)
+
+
+ authorization := ""
+ executionId := ""
+
+ // INFO: Allows you to run a test execution
+ testing := os.Getenv("WORKER_TESTING_WORKFLOW")
+ shuffle_apikey := os.Getenv("WORKER_TESTING_APIKEY")
+ if len(testing) > 0 && len(shuffle_apikey) > 0 {
+ // Execute a workflow and use that info
+ log.Printf("[WARNING] Running test environment for worker by executing workflow %s. PS: This may NOT reach the worker in real time, but rather be deployed as a docker container (bad). Instead use AUTHORIZATION and EXECUTIONID for direct testing", testing)
+ authorization, executionId = runTestExecution(client, testing, shuffle_apikey)
+
+ } else {
+ authorization = os.Getenv("AUTHORIZATION")
+ executionId = os.Getenv("EXECUTIONID")
+ log.Printf("[INFO] Running normal execution with auth %s and ID %s", authorization, executionId)
+ }
+
+ workflowExecution := shuffle.WorkflowExecution{
+ ExecutionId: executionId,
+ }
+ if len(authorization) == 0 {
+ log.Printf("[INFO] No AUTHORIZATION key set in env")
+ log.Printf("[DEBUG] Shutting down (27)")
+ shutdown(workflowExecution, "", "", false)
+ }
+
+ if len(executionId) == 0 {
+ log.Printf("[INFO] No EXECUTIONID key set in env")
+ log.Printf("[DEBUG] Shutting down (28)")
+ shutdown(workflowExecution, "", "", false)
+ }
+
+ data = fmt.Sprintf(`{"execution_id": "%s", "authorization": "%s"}`, executionId, authorization)
+ streamResultUrl := fmt.Sprintf("%s/api/v1/streams/results", baseUrl)
+ req, err := http.NewRequest(
+ "POST",
+ streamResultUrl,
+ bytes.NewBuffer([]byte(data)),
+ )
+
+ if err != nil {
+ log.Printf("[ERROR] Failed making request builder for backend")
+ log.Printf("[DEBUG] Shutting down (29)")
+ shutdown(workflowExecution, "", "", true)
+ }
+
+ topClient = client
+ firstRequest := true
+ environments := []string{}
+ for {
+ environments, err = getStreamResultsWrapper(client, req, workflowExecution, firstRequest, environments)
+ if err != nil {
+ log.Printf("[ERROR] Failed getting stream results: %s", err)
+ }
}
}
@@ -2447,6 +2941,7 @@ func checkUnfinished(resp http.ResponseWriter, request *http.Request, execReques
}
func handleRunExecution(resp http.ResponseWriter, request *http.Request) {
+ defer request.Body.Close()
body, err := ioutil.ReadAll(request.Body)
if err != nil {
log.Printf("[WARNING] Failed reading body for stream result queue")
@@ -2455,8 +2950,6 @@ func handleRunExecution(resp http.ResponseWriter, request *http.Request) {
return
}
- defer request.Body.Close()
-
//log.Printf("[DEBUG] In run execution with body length %d", len(body))
var execRequest shuffle.OrborusExecutionRequest
err = json.Unmarshal(body, &execRequest)
@@ -2511,10 +3004,10 @@ func handleRunExecution(resp http.ResponseWriter, request *http.Request) {
os.Setenv("AUTHORIZATION", execRequest.Authorization)
}
- topClient = &http.Client{}
var workflowExecution shuffle.WorkflowExecution
data = fmt.Sprintf(`{"execution_id": "%s", "authorization": "%s"}`, execRequest.ExecutionId, execRequest.Authorization)
streamResultUrl := fmt.Sprintf("%s/api/v1/streams/results", baseUrl)
+ topClient = shuffle.GetExternalClient(streamResultUrl)
req, err := http.NewRequest(
"POST",
@@ -2530,6 +3023,7 @@ func handleRunExecution(resp http.ResponseWriter, request *http.Request) {
return
}
+ defer newresp.Body.Close()
body, err = ioutil.ReadAll(newresp.Body)
if err != nil {
log.Printf("[ERROR] Failed reading body (2): %s", err)
@@ -2560,6 +3054,7 @@ func handleRunExecution(resp http.ResponseWriter, request *http.Request) {
}
ctx := context.Background()
+ //err = shuffle.SetWorkflowExecution(ctx, workflowExecution, true)
err = setWorkflowExecution(ctx, workflowExecution, true)
if err != nil {
log.Printf("[ERROR] Failed initializing execution saving for %s: %s", workflowExecution.ExecutionId, err)
@@ -2601,14 +3096,12 @@ func handleRunExecution(resp http.ResponseWriter, request *http.Request) {
if err != nil {
log.Printf("[ERROR][%s] Failed marshalling execution during set (3): %s", workflowExecution.ExecutionId, err)
} else {
- err = shuffle.SetCache(ctx, cacheKey, execData, 30)
+ err = shuffle.SetCache(ctx, cacheKey, execData, 31)
if err != nil {
log.Printf("[ERROR][%s] Failed adding to cache during setexecution (3): %s", workflowExecution.ExecutionId, err)
}
}
- //requestCache.Set(cacheKey, &workflowExecution, cache.DefaultExpiration)
-
err = executionInit(workflowExecution)
if err != nil {
log.Printf("[DEBUG][%s] Shutting down (30) - Workflow setup failed: %s", workflowExecution.ExecutionId, workflowExecution.ExecutionId, err)
@@ -2618,16 +3111,95 @@ func handleRunExecution(resp http.ResponseWriter, request *http.Request) {
//shutdown(workflowExecution, "", "", true)
}
- //go handleExecutionResult(workflowExecution)
handleExecutionResult(workflowExecution)
resp.WriteHeader(200)
resp.Write([]byte(fmt.Sprintf(`{"success": true}`)))
}
+func handleDownloadImage(resp http.ResponseWriter, request *http.Request) {
+ // Read the request body
+ defer request.Body.Close()
+ bodyBytes, err := ioutil.ReadAll(request.Body)
+ if err != nil {
+ log.Printf("[ERROR] Failed reading body for stream result queue. Error: %s", err)
+ resp.WriteHeader(401)
+ resp.Write([]byte(fmt.Sprintf(`{"success": false, "reason": "%s"}`, err)))
+ return
+ }
+
+ // get images from request
+ image := &ImageDownloadBody{}
+ err = json.Unmarshal(bodyBytes, image)
+ if err != nil {
+ log.Printf("[ERROR] Error in unmarshalling body: %s", err)
+ resp.WriteHeader(401)
+ resp.Write([]byte(fmt.Sprintf(`{"success": false, "reason": "%s"}`, err)))
+ return
+ }
+
+ client, err := dockerclient.NewEnvClient()
+ if err != nil {
+ log.Printf("[ERROR] Unable to create docker client (4): %s", err)
+ resp.WriteHeader(401)
+ resp.Write([]byte(fmt.Sprintf(`{"success": false, "reason": "%s"}`, err)))
+ return
+ }
+
+ // check if images are already downloaded
+ // Retrieve a list of Docker images
+ images, err := client.ImageList(context.Background(), types.ImageListOptions{})
+ if err != nil {
+ log.Printf("[ERROR] listing images: %s", err)
+ resp.WriteHeader(401)
+ resp.Write([]byte(fmt.Sprintf(`{"success": false, "reason": "%s"}`, err)))
+ return
+ }
+
+ for _, img := range images {
+ for _, tag := range img.RepoTags {
+ splitTag := strings.Split(tag, ":")
+ baseTag := tag
+ if len(splitTag) > 1 {
+ baseTag = splitTag[1]
+ }
+
+ var possibleNames []string
+ possibleNames = append(possibleNames, fmt.Sprintf("frikky/shuffle:%s", baseTag))
+ possibleNames = append(possibleNames, fmt.Sprintf("registry.hub.docker.com/frikky/shuffle:%s", baseTag))
+
+ if (arrayContains(possibleNames, image.Image)) {
+ log.Printf("[DEBUG] Image %s already downloaded that has been requested to download", image.Image)
+ resp.WriteHeader(200)
+ resp.Write([]byte(fmt.Sprintf(`{"success": false, "reason": "image already present"}`)))
+ return
+ }
+ }
+ }
+
+ log.Printf("[INFO] Downloading image %s", image.Image)
+ downloadDockerImageBackend(&http.Client{Timeout: 60 * time.Second}, image.Image)
+
+ // return success
+ resp.WriteHeader(200)
+ resp.Write([]byte(fmt.Sprintf(`{"success": true, "status": "starting download"}`)))
+}
+
func runWebserver(listener net.Listener) {
r := mux.NewRouter()
r.HandleFunc("/api/v1/streams", handleWorkflowQueue).Methods("POST", "OPTIONS")
r.HandleFunc("/api/v1/streams/results", handleGetStreamResults).Methods("POST", "OPTIONS")
+ r.HandleFunc("/api/v1/execute", handleRunExecution).Methods("POST", "OPTIONS")
+ r.HandleFunc("/api/v1/run", handleRunExecution).Methods("POST", "OPTIONS")
+ r.HandleFunc("/api/v1/download", handleDownloadImage).Methods("POST", "OPTIONS")
+
+
+ if strings.ToLower(os.Getenv("SHUFFLE_DEBUG_MEMORY")) == "true" {
+ r.HandleFunc("/debug/pprof/", pprof.Index)
+ r.HandleFunc("/debug/pprof/heap", pprof.Handler("heap").ServeHTTP)
+ r.HandleFunc("/debug/pprof/profile", pprof.Profile)
+ r.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
+ r.HandleFunc("/debug/pprof/trace", pprof.Trace)
+ }
//log.Fatal(http.ListenAndServe(port, nil))
//srv := http.Server{
@@ -2638,7 +3210,7 @@ func runWebserver(listener net.Listener) {
//log.Fatal(http.Serve(listener, nil))
- log.Printf("\n\n[DEBUG] NEW webserver setup\n\n")
+ log.Printf("[DEBUG] NEW webserver setup")
http.Handle("/", r)
srv := http.Server{
@@ -2651,7 +3223,7 @@ func runWebserver(listener net.Listener) {
err := srv.Serve(listener)
if err != nil {
- log.Printf("serveIssue: %#v", err)
+ log.Printf("[ERROR] Serve issue in worker: %#v", err)
}
log.Printf("[DEBUG] Do we see this?")
}