From 694401a34d45cfefa88c02c028fcd39a1d23f0da Mon Sep 17 00:00:00 2001 From: Rino Jose Date: Mon, 27 May 2024 13:42:29 -0700 Subject: [PATCH 01/18] Copied v3 into forthic-py --- forthic-py/__init__.py | 0 forthic-py/src/README.md | 9 + forthic-py/src/__init__.py | 1 + forthic-py/src/forthic/modules/__init__.py | 0 .../src/forthic/modules/airtable_module.py | 155 + .../src/forthic/modules/alation_module.py | 201 ++ .../src/forthic/modules/cache_module.py | 76 + .../src/forthic/modules/confluence_module.py | 444 +++ .../src/forthic/modules/datasets_module.py | 184 ++ .../src/forthic/modules/excel_module.py | 337 ++ forthic-py/src/forthic/modules/gdoc_module.py | 695 +++++ .../src/forthic/modules/gsheet_module.py | 844 +++++ forthic-py/src/forthic/modules/html_module.py | 756 +++++ .../src/forthic/modules/intake_module.py | 267 ++ .../src/forthic/modules/isoweek_module.py | 150 + .../src/forthic/modules/jinja_module.py | 25 + forthic-py/src/forthic/modules/jira_module.py | 1111 +++++++ forthic-py/src/forthic/modules/org_module.py | 365 +++ .../src/forthic/modules/stats_module.py | 28 + forthic-py/src/forthic/modules/svg_module.py | 109 + .../src/forthic/modules/trino_module.py | 107 + forthic-py/src/forthic/modules/ui_module.py | 54 + .../src/forthic/modules/wiki_status_module.py | 162 + forthic-py/src/global_module.py | 2715 +++++++++++++++++ forthic-py/src/interfaces.py | 110 + forthic-py/src/interpreter.py | 399 +++ forthic-py/src/module.py | 257 ++ forthic-py/src/profile.py | 105 + forthic-py/src/tokenizer.py | 201 ++ forthic-py/src/tokens.py | 52 + forthic-py/tests/__init__.py | 0 .../tests/modules/datasets_data/.gitignore | 1 + .../tests/modules/datasets_data/README.md | 3 + forthic-py/tests/modules/jira_context.py | 317 ++ .../tests/modules/test_v3_datasets_module.py | 107 + .../tests/modules/test_v3_isoweek_module.py | 68 + .../tests/modules/test_v3_jira_module.py | 229 ++ .../tests/modules/test_v3_org_module.py | 152 + .../tests/modules/test_v3_trino_module.py | 32 + forthic-py/tests/modules/trino_context.py | 68 + forthic-py/tests/sample_date_module.py | 15 + forthic-py/tests/test_v3_global_module.py | 1866 +++++++++++ forthic-py/tests/test_v3_interpreter.py | 201 ++ forthic-py/tests/test_v3_tokenizer.py | 114 + 44 files changed, 13092 insertions(+) create mode 100644 forthic-py/__init__.py create mode 100644 forthic-py/src/README.md create mode 100644 forthic-py/src/__init__.py create mode 100644 forthic-py/src/forthic/modules/__init__.py create mode 100644 forthic-py/src/forthic/modules/airtable_module.py create mode 100644 forthic-py/src/forthic/modules/alation_module.py create mode 100644 forthic-py/src/forthic/modules/cache_module.py create mode 100644 forthic-py/src/forthic/modules/confluence_module.py create mode 100644 forthic-py/src/forthic/modules/datasets_module.py create mode 100644 forthic-py/src/forthic/modules/excel_module.py create mode 100644 forthic-py/src/forthic/modules/gdoc_module.py create mode 100644 forthic-py/src/forthic/modules/gsheet_module.py create mode 100644 forthic-py/src/forthic/modules/html_module.py create mode 100644 forthic-py/src/forthic/modules/intake_module.py create mode 100644 forthic-py/src/forthic/modules/isoweek_module.py create mode 100644 forthic-py/src/forthic/modules/jinja_module.py create mode 100644 forthic-py/src/forthic/modules/jira_module.py create mode 100644 forthic-py/src/forthic/modules/org_module.py create mode 100644 forthic-py/src/forthic/modules/stats_module.py create mode 100644 forthic-py/src/forthic/modules/svg_module.py create mode 100644 forthic-py/src/forthic/modules/trino_module.py create mode 100644 forthic-py/src/forthic/modules/ui_module.py create mode 100644 forthic-py/src/forthic/modules/wiki_status_module.py create mode 100644 forthic-py/src/global_module.py create mode 100644 forthic-py/src/interfaces.py create mode 100644 forthic-py/src/interpreter.py create mode 100644 forthic-py/src/module.py create mode 100644 forthic-py/src/profile.py create mode 100644 forthic-py/src/tokenizer.py create mode 100644 forthic-py/src/tokens.py create mode 100644 forthic-py/tests/__init__.py create mode 100644 forthic-py/tests/modules/datasets_data/.gitignore create mode 100644 forthic-py/tests/modules/datasets_data/README.md create mode 100644 forthic-py/tests/modules/jira_context.py create mode 100644 forthic-py/tests/modules/test_v3_datasets_module.py create mode 100644 forthic-py/tests/modules/test_v3_isoweek_module.py create mode 100644 forthic-py/tests/modules/test_v3_jira_module.py create mode 100644 forthic-py/tests/modules/test_v3_org_module.py create mode 100644 forthic-py/tests/modules/test_v3_trino_module.py create mode 100644 forthic-py/tests/modules/trino_context.py create mode 100644 forthic-py/tests/sample_date_module.py create mode 100644 forthic-py/tests/test_v3_global_module.py create mode 100644 forthic-py/tests/test_v3_interpreter.py create mode 100644 forthic-py/tests/test_v3_tokenizer.py diff --git a/forthic-py/__init__.py b/forthic-py/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/forthic-py/src/README.md b/forthic-py/src/README.md new file mode 100644 index 0000000..e5fa31c --- /dev/null +++ b/forthic-py/src/README.md @@ -0,0 +1,9 @@ +# README.md + +The `v3` version of Forthic is meant to tighten up the language, the modules, and the conventions based on the learnings of a year of actively developing and managing roughly 300 Forthic apps at LinkedIn. + +Many of the improvements are motivated by the success of applying ideas from Category Theory to Forthic development. This applies to both the language conventions and the Forthic modules. + +Some of the improvements in the modules built on 3rd party APIs is to improve the fidelity of the module so that if the API allows something, so will the module. But at the same time, if an API does something inconsistent, we will try to correct it where possible in the Forthic module. + +The overall aim is to simplify the Forthic language by making it more consistent and "categorical". \ No newline at end of file diff --git a/forthic-py/src/__init__.py b/forthic-py/src/__init__.py new file mode 100644 index 0000000..de40ea7 --- /dev/null +++ b/forthic-py/src/__init__.py @@ -0,0 +1 @@ +__import__('pkg_resources').declare_namespace(__name__) diff --git a/forthic-py/src/forthic/modules/__init__.py b/forthic-py/src/forthic/modules/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/forthic-py/src/forthic/modules/airtable_module.py b/forthic-py/src/forthic/modules/airtable_module.py new file mode 100644 index 0000000..3bb8d2b --- /dev/null +++ b/forthic-py/src/forthic/modules/airtable_module.py @@ -0,0 +1,155 @@ +import requests +import urllib +from ..module import Module +from ..interfaces import IInterpreter +from ...utils.errors import ( + AirtableError, + AirtableUnauthorized +) +from typing import List + + +MAX_ITERATIONS = 100 + + +class AirtableModule(Module): + """Adds support for working with Airtable + + This adds basic support for working with Airtable: + """ + def __init__(self, interp: IInterpreter): + super().__init__('airtable', interp, FORTHIC) + self.context_stack: List['AirtableCredsContext'] = [] + + self.add_module_word('PUSH-CONTEXT!', self.word_PUSH_CONTEXT_bang) + self.add_module_word('POP-CONTEXT!', self.word_POP_CONTEXT_bang) + self.add_module_word('RECORDS', self.word_RECORDS) + + # ( creds_context -- ) + def word_PUSH_CONTEXT_bang(self, interp: IInterpreter): + creds_context = interp.stack_pop() + self.context_stack.append(creds_context) + + # ( -- ) + def word_POP_CONTEXT_bang(self, interp: IInterpreter): + self.context_stack.pop() + + # ( base_id table config -- ) + def word_RECORDS(self, interp: IInterpreter): + config = interp.stack_pop() + table = interp.stack_pop() + base_id = interp.stack_pop() + + def urlencode(string): + return urllib.parse.quote_plus(string) + + def make_fields_param(value): + pieces = [] + for v in value: + pieces.append(f"fields%5B%5D={urlencode(v)}") + return "&".join(pieces) + + def make_sort_param(records): + # We're converting records to things like + # sort[0][field]=TaskID&sort[0][direction]=asc + pieces = [] + for index, r in enumerate(records): + pieces.append(f"sort%5B{index}%5D%5Bfield%5D={r['field']}") + if r.get('direction'): + pieces.append(f"sort%5B{index}%5D%5Bdirection%5D={r['direction']}") + return "&".join(pieces) + + def make_query_param(field, value): + res = "" + if field == "fields": + res = make_fields_param(value) + elif field == "sort": + res = make_sort_param(value) + else: + res = f"{field}={urlencode(value)}" + return res + + def construct_query_param_string(config, offset): + if not config: + config = {} + + if offset: + config["offset"] = offset + + if len(config) == 0: + return "" + + pieces = [] + for field, value in config.items(): + pieces.append(make_query_param(field, value)) + + res = f"?{'&'.join(pieces)}" + return res + + context = self.get_context() + + # We may need to iterate to get all of the records + def get_records(records=[], offset=None, iterations=1): + qstring = construct_query_param_string(config, offset) + api_url = f'/v0/{base_id}/{table}{qstring}' + response = context.requests_get(api_url) + if not response.ok: + raise RuntimeError(f"airtable.RECORDS: Error getting records: {response.reason}") + data = response.json() + + records.extend(data["records"]) + if iterations > MAX_ITERATIONS: + raise RuntimeError(f"airtable.RECORDS exceeded {MAX_ITERATIONS} iterations") + + if data.get("offset"): + get_records(records, data["offset"], iterations + 1) + return records + + result = get_records() + interp.stack_push(result) + + # ================================= + # Helpers + + def get_context(self): + if not self.context_stack: + raise AirtableError( + 'Need to push an AirtableCredsContext with PUSH-CONTEXT!' + ) + result = self.context_stack[-1] + return result + + +class AirtableCredsContext: + """Clients of the alation module must extend CredsContext and use PUSH-CONTEXT! + in order to set the current creds context""" + def __init__(self, field): + self.field = field + + def get_host(self): + return None + + def get_api_token(self): + return None + + def get_cert_verify(self): + return False + + def requests_get(self, api_url): + """Makes HTTP GET call to pull data""" + api_url_w_host = self.get_host() + api_url + headers = { + "Authorization": f"Bearer {self.get_api_token()}" + } + result = requests.get( + api_url_w_host, + headers=headers, + verify=self.get_cert_verify(), + ) + if result.status_code == 401: + raise AirtableUnauthorized() + return result + + +FORTHIC = ''' +''' diff --git a/forthic-py/src/forthic/modules/alation_module.py b/forthic-py/src/forthic/modules/alation_module.py new file mode 100644 index 0000000..d9288a3 --- /dev/null +++ b/forthic-py/src/forthic/modules/alation_module.py @@ -0,0 +1,201 @@ +import requests +import csv + +from ..module import Module +from ..interfaces import IInterpreter +from typing import List + + +class InvalidAlationCreds(RuntimeError): + def __init__(self, field, host): + super().__init__(f'Invalid field: {field}') + self.field = field + self.host = host + + +class AlationError(RuntimeError): + pass + + +class AlationModule(Module): + """Adds support for working with Alation + + This adds basic support for working with Alation: + + * Updating/clearing refresh tokens + * Accessing SQL queries + * Accessing query results + """ + def __init__(self, interp: IInterpreter): + super().__init__('alation', interp, ALATION_FORTHIC) + self.context_stack: List['AlationCredsContext'] = [] + + self.add_module_word('PUSH-CONTEXT!', self.word_PUSH_CONTEXT_bang) + self.add_module_word('POP-CONTEXT!', self.word_POP_CONTEXT_bang) + + self.add_module_word('QUERY-SQL', self.word_QUERY_SQL) + self.add_module_word('QUERY-RESULT-INFO', self.word_QUERY_RESULT_INFO) + self.add_module_word('QUERY-RESULT', self.word_QUERY_RESULT) + self.add_module_word('UPDATE-REFRESH-TOKEN', self.word_UPDATE_REFRESH_TOKEN) + self.add_module_word('DELETE-CREDS', self.word_DELETE_CREDS) + + # ( creds_context -- ) + def word_PUSH_CONTEXT_bang(self, interp: IInterpreter): + creds_context = interp.stack_pop() + self.context_stack.append(creds_context) + + # ( -- ) + def word_POP_CONTEXT_bang(self, interp: IInterpreter): + self.context_stack.pop() + + # ( query_id -- sql ) + def word_QUERY_SQL(self, interp: IInterpreter): + query_id = interp.stack_pop() + context = self.get_context() + access_token = self.get_access_token() + headers = {'Token': access_token} + url = f'https://{context.get_host()}/integration/v1/query/{query_id}/sql/' + response = requests.get( + url, headers=headers, verify=context.get_cert_verify() + ) + + if not response.ok: + raise AlationError(f'QUERY-SQL failed: {response.text}') + + interp.stack_push(response.text) + + # ( query_id -- result_info ) + def word_QUERY_RESULT_INFO(self, interp: IInterpreter): + """Returns the last query result ID""" + query_id = interp.stack_pop() + context = self.get_context() + access_token = self.get_access_token() + headers = {'Token': access_token} + url = f'https://{context.get_host()}/integration/v1/query/{query_id}/result/latest' + response = requests.get( + url, headers=headers, verify=context.get_cert_verify() + ) + + if not response.ok: + raise AlationError(f'QUERY-RESULT-ID failed: {response.text}') + + result = response.json() + interp.stack_push(result) + + # ( result_id -- records ) + def word_QUERY_RESULT(self, interp: IInterpreter): + """Returns result for the given result_id""" + result_id = interp.stack_pop() + context = self.get_context() + access_token = self.get_access_token() + headers = {'Token': access_token} + url = f'https://{context.get_host()}/integration/v1/result/{result_id}/csv' + response = requests.get( + url, headers=headers, verify=context.get_cert_verify() + ) + + if not response.ok: + raise AlationError(f'QUERY-RESULT-ID failed: {response.text}') + + decoded_content = response.content.decode('utf-8') + csv_reader = csv.DictReader( + decoded_content.splitlines(), delimiter=',' + ) + result = list(csv_reader) + interp.stack_push(result) + + # ( -- ) + def word_UPDATE_REFRESH_TOKEN(self, interp: IInterpreter): + """Regenerates Alation refresh token for current user, updating current Alation context and database + + NOTE: After calling this, the previous token will become invalid! + """ + context = self.get_context() + + data = { + 'refresh_token': context.get_refresh_token(), + 'user_id': context.get_user_id(), + } + + response = requests.post( + f'https://{context.get_host()}/integration/v1/regenRefreshToken/', + data=data, + verify=context.get_cert_verify(), + ) + + if not response.ok: + raise AlationError(f'REGEN-REFRESH-TOKEN failed: {response.text}') + + # Update current context + context.update_token_info(response.json()) + + # ( -- ) + def word_DELETE_CREDS(self, interp: IInterpreter): + """Deletes Alation creds + """ + context = self.get_context() + context.delete_creds() + + # ================================= + # Helpers + + def get_context(self): + if not self.context_stack: + raise AlationError( + 'Need to push an AlationCredsContext with PUSH-CONTEXT!' + ) + result = self.context_stack[-1] + return result + + def get_access_token(self): + context = self.get_context() + data = { + 'refresh_token': context.get_refresh_token(), + 'user_id': context.get_user_id(), + } + + url = f'https://{context.get_host()}/integration/v1/createAPIAccessToken/' + response = requests.post( + url, data=data, verify=context.get_cert_verify() + ) + + if not response.ok: + raise InvalidAlationCreds(context.get_field(), context.get_host()) + + result = response.json()['api_access_token'] + return result + + +class AlationCredsContext: + """Clients of the alation module must extend CredsContext and use PUSH-CONTEXT! + in order to set the current creds context""" + + def update_token_info(self, token_info): + self.token_info = token_info + + def delete_creds(self): + """Use this to clear out Alation creds""" + pass + + def get_host(self): + return None + + def get_field(self): + return None + + def get_proxies(self): + """Returns a dict object containing proxies for fields 'http' and 'https'""" + return None + + def get_user_id(self): + return None + + def get_refresh_token(self): + return None + + def get_cert_verify(self): + return False + + +ALATION_FORTHIC = ''' +''' diff --git a/forthic-py/src/forthic/modules/cache_module.py b/forthic-py/src/forthic/modules/cache_module.py new file mode 100644 index 0000000..126ff0b --- /dev/null +++ b/forthic-py/src/forthic/modules/cache_module.py @@ -0,0 +1,76 @@ +import os +import json +from ..module import Module +from ..interfaces import IInterpreter +from ..global_module import default_json_serialize + + +class CacheModule(Module): + """This implements a simple file-based cache for Forthic data + + `CACHE!` stores data in JSON format + `CACHE@` loads data from cache as a Python dict + + See `docs/modules/cache_module.md` for detailed descriptions of each word. + """ + def __init__(self, interp: IInterpreter): + super().__init__('cache', interp, CACHE_FORTHIC) + self.add_module_word('CWD!', self.word_CWD_bang) + self.add_module_word('CACHE!', self.word_CACHE_bang) + self.add_module_word('CACHE@', self.word_CACHE_at) + + self.working_directory = '.' + self.cache_file = '.cache' + + # ( path -- ) + def word_CWD_bang(self, interp: IInterpreter): + path = interp.stack_pop() + self.working_directory = path + + # ( value key -- ) + def word_CACHE_bang(self, interp: IInterpreter): + key = interp.stack_pop() + value = interp.stack_pop() + cache = self.load_cache() + + cache[key] = value + + self.store_cache(cache) + + # ( key -- value ) + def word_CACHE_at(self, interp: IInterpreter): + key = interp.stack_pop() + cache = self.load_cache() + result = cache.get(key) + interp.stack_push(result) + + # ---------------------------------------- + # Helpers + def get_cache_filename(self): + result = f'{self.working_directory}/{self.cache_file}' + return result + + def ensure_cache_file(self): + filename = self.get_cache_filename() + if not os.path.isfile(filename): + with open(filename, 'w') as f: + f.write(json.dumps({})) + + def load_cache(self): + self.ensure_cache_file() + filename = self.get_cache_filename() + with open(filename, 'r') as f: + content = f.read().strip() + if content: + result = json.loads(content) + else: + result = {} + return result + + def store_cache(self, cache): + filename = self.get_cache_filename() + with open(filename, 'w') as f: + f.write(json.dumps(cache, indent=4, separators=(',', ': '), default=default_json_serialize)) + + +CACHE_FORTHIC = '' diff --git a/forthic-py/src/forthic/modules/confluence_module.py b/forthic-py/src/forthic/modules/confluence_module.py new file mode 100644 index 0000000..489893f --- /dev/null +++ b/forthic-py/src/forthic/modules/confluence_module.py @@ -0,0 +1,444 @@ +import re +import urllib +import requests +from ..module import Module +from ..interfaces import IInterpreter +from ...utils.errors import ConfluenceError +from typing import List, Optional + +# Unit separator +US = chr(31) + + +class ConfluenceModule(Module): + """This implements basic support to upsert wiki pages to Confluence + + See `docs/modules/confluence_module.md` for detailed descriptions of each word. + """ + def __init__(self, interp: IInterpreter): + super().__init__('confluence', interp, CONFLUENCE_FORTHIC) + self.context_stack: List['ConfluenceContext'] = [] + + self.add_module_word('PUSH-CONTEXT!', self.word_PUSH_CONTEXT_bang) + self.add_module_word('POP-CONTEXT!', self.word_POP_CONTEXT_bang) + self.add_module_word('HOST', self.word_HOST) + + self.add_module_word('PAGE-INFO', self.word_PAGE_INFO) + + self.add_module_word('NBSP', self.word_NBSP) + self.add_module_word('SPACES-WIDE', self.word_SPACES_WIDE) + + self.add_module_word('|ESCAPE-TABLE-CONTENT', self.word_pipe_ESCAPE_TABLE_CONTENT) + self.add_module_word('|ESCAPE-NEWLINES', self.word_pipe_ESCAPE_NEWLINES) + self.add_module_word('COLOR-BOX', self.word_COLOR_BOX) + self.add_module_word('TABLE', self.word_TABLE) + self.add_module_word('RENDER', self.word_RENDER) + + self.add_module_word('UPSERT-PAGE', self.word_UPSERT_PAGE) + self.add_module_word('ADD-BLOG-POST', self.word_ADD_BLOG_POST) + + # ( context -- ) + def word_PUSH_CONTEXT_bang(self, interp: IInterpreter): + context = interp.stack_pop() + self.context_stack.append(context) + + # ( -- ) + def word_POP_CONTEXT_bang(self, interp: IInterpreter): + self.context_stack.pop() + + # ( -- host ) + def word_HOST(self, interp: IInterpreter): + context = self.current_context() + interp.stack_push(context.get_host()) + + # ( space title -- page_info ) + def word_PAGE_INFO(self, interp: IInterpreter): + context = self.current_context() + title = interp.stack_pop() + space = interp.stack_pop() + + encoded_title = urllib.parse.quote_plus(title) + api_url = f'/wiki/cf/rest/api/content?title={encoded_title}&spaceKey={space}&expand=version' + response = context.requests_get(api_url) + + if response.status_code != 200: + raise ConfluenceError( + f"Can't find '{title}' in space '{space}: {response.text}'" + ) + data = response.json() + + if not data['results']: + raise ConfluenceError(f"Can't find '{title}' in space '{space}'") + + result = data['results'][0] + interp.stack_push(result) + + # ( -- nbsp_char ) + def word_NBSP(self, interp: IInterpreter): + interp.stack_push(' ') + + # ( str num_spaces -- str ) + def word_SPACES_WIDE(self, interp: IInterpreter): + """This forces a string to be num_spaces wide using  """ + num_spaces = interp.stack_pop() + string = interp.stack_pop() + + # Count   as one space + num_nbsps = len(re.findall(' ', string)) + chars_to_subtract = 5 * num_nbsps + string_len = len(string) - chars_to_subtract + + if string_len >= num_spaces: + result = string + else: + spaces_to_add = num_spaces - string_len + result = string + spaces_to_add * ' ' + + interp.stack_push(result) + + # ( str -- str ) + def word_pipe_ESCAPE_TABLE_CONTENT(self, interp: IInterpreter): + """This escapes content that should be rendered into a wiki table cell. + + In particular, we convert newlines into "\\", *except* for bulleted lists and numbered lists. + We also remove the '|' character except in the case where it's used to specify a link + """ + content = interp.stack_pop() + result = escape_table_content(content) + interp.stack_push(result) + + # ( str -- str ) + def word_pipe_ESCAPE_NEWLINES(self, interp: IInterpreter): + content = interp.stack_pop() + if not content: + interp.stack_push(content) + return + content = content.strip() + content = content.replace('\r', '') + pieces = content.split('\n') + result = r" \\ ".join(pieces) + interp.stack_push(result) + pass + + # ( color -- ColorBox ) + def word_COLOR_BOX(self, interp: IInterpreter): + color = interp.stack_pop() + result = ColorBox(color) + interp.stack_push(result) + + # ( headers recs -- wiki_markup ) + def word_TABLE(self, interp: IInterpreter): + recs = interp.stack_pop() + headers = interp.stack_pop() + + def table_heading(): + interp.run("[ ''") + for h in headers: + interp.stack_push(h) + interp.run("'' ] '||' JOIN") + + def table_row(rec): + interp.run("[ ''") + for h in headers: + value = rec.get(h) + if not value: + value = '' + interp.stack_push(value) + interp.run("'' ] '|' JOIN") + + # Assemble table + interp.run('[') + table_heading() + for r in recs: + table_row(r) + interp.run(']') + interp.run('/N JOIN') + + # ( object -- html/wiki ) + def word_RENDER(self, interp: IInterpreter): + obj = interp.stack_pop() + if isinstance(obj, str): + result = obj + else: + result = obj.render() + interp.stack_push(result) + + # ( space parent_title title content -- ) + def word_UPSERT_PAGE(self, interp: IInterpreter): + context = self.current_context() + + content = interp.stack_pop() + title = interp.stack_pop() + parent_title = interp.stack_pop() + space = interp.stack_pop() + encoded_title = urllib.parse.quote_plus(title) + + def does_page_exist(): + api_url = f'/wiki/cf/rest/api/content?title={encoded_title}&spaceKey={space}&expand=ancestors' + response = context.requests_get(api_url) + data = response.json() + if data['size'] == 0: + return False + + page_info = data['results'][0] + current_parent = page_info['ancestors'][-1]['title'] + if current_parent != parent_title: + raise ConfluenceError( + f"'{title}' exists, but its current parent '{current_parent}' does not match the specified parent '{parent_title}'" + ) + return True + + def get_page_info(page_title): + interp.stack_push(space) + interp.stack_push(page_title) + interp.run('PAGE-INFO') + res = interp.stack_pop() + return res + + def create_page(): + parent_info = get_page_info(parent_title) + parent_id = parent_info['id'] + request_data = { + 'type': 'page', + 'title': title, + 'ancestors': [{'id': parent_id}], + 'space': {'key': space}, + 'body': { + 'storage': {'value': content, 'representation': 'wiki'} + }, + } + api_url = '/wiki/cf/rest/api/content' + response = context.requests_post(api_url, json=request_data) + if response.status_code != 200: + raise ConfluenceError( + f"Could not create page '{title}': {response.text}" + ) + + def get_version(page_info): + version_info = page_info.get('version') + if version_info: + res = int(version_info['number']) + else: + res = 1 + return res + + def update_page(): + page_info = get_page_info(title) + page_id = page_info['id'] + version = get_version(page_info) + + request_data = { + 'id': page_id, + 'type': 'page', + 'title': title, + 'space': {'key': space}, + 'body': { + 'storage': {'value': content, 'representation': 'wiki'} + }, + 'version': {'number': version + 1}, + } + + api_url = f'/wiki/cf/rest/api/content/{page_id}' + response = context.requests_put(api_url, json=request_data) + + if response.status_code != 200: + raise ConfluenceError( + f"Could not update page '{title}': {response.text}" + ) + + # Do the upsert + if does_page_exist(): + update_page() + else: + create_page() + + # NOTE: This has not been officially released yet and is subject to change + # ( space title content labels -- ) + def word_ADD_BLOG_POST(self, interp: IInterpreter): + context = self.current_context() + + labels = interp.stack_pop() + content = interp.stack_pop() + title = interp.stack_pop() + space = interp.stack_pop() + + def make_record_label(label): + return { + "prefix": "global", + "name": label + } + + if labels: + label_records = [make_record_label(label) for label in labels] + else: + label_records = None + + def create_post(): + request_data = { + 'type': 'blogpost', + 'title': title, + 'space': {'key': space}, + 'body': { + 'storage': {'value': content, 'representation': 'wiki'} + } + } + api_url = '/wiki/cf/rest/api/content' + response = context.requests_post(api_url, json=request_data) + if response.status_code != 200: + raise ConfluenceError( + f"Could not create post '{title}': {response.text}" + ) + + # Add labels + if label_records: + page_id = response.json()["id"] + label_api_url = f'/wiki/cf/rest/api/content/{page_id}/label' + response = context.requests_post(label_api_url, json=label_records) + if response.status_code != 200: + raise ConfluenceError( + f"Could not add labels to blog post '{title}': {response.text}" + ) + return + + create_post() + return + + def current_context(self): + if not self.context_stack: + raise ConfluenceError( + 'Use confluence.PUSH-CONTEXT! to provide a Confluence context' + ) + + result = self.context_stack[-1] + return result + + +def escape_table_content(content): + """This escapes content that should be rendered into a wiki table cell. + + In particular, we remove blank lines and we also remove the '|' character except in the case where it's + used to specify a link + """ + if not content: + return '' + + def remove_blank_lines(s): + s = s.strip() + s = s.replace('\r', '') + pieces = s.split('\n') + non_blank_pieces = [p for p in pieces if p] + res = "\n".join(non_blank_pieces) + + # If content is empty, return a space so the table cell doesn't collapse + if not res: + res = ' ' + return res + + def remove_pipes_if_needed(s): + res = re.sub( + r'\[(.*?)\|(.*?)\]', r'[\1%s\2]' % US, s + ) # Replace pipes in links with US character + res = re.sub( + r'\|', '', res + ) # Remove all other pipes + res = re.sub( + US, '|', res + ) # Replace US chars with pipes again + return res + + result = remove_blank_lines(content) + result = remove_pipes_if_needed(result) + return result + + +def raise_status_error_if_needed(response): + if response.status_code < 400: + return + + if response.status_code == 401: + raise ConfluenceError("Unauthorized request. Please check your Confluence credentials.") + else: + raise ConfluenceError(response.text) + + +class ConfluenceContext: + """Override this and pass to PUSH-CONTEXT! in order to make Confluence calls""" + + def requests_get(self, api_url: str): + """Makes HTTP GET call to pull data""" + api_url_w_host = self.get_host() + api_url + result = requests.get( + api_url_w_host, + auth=(self.get_username(), self.get_password()), + verify=self.get_cert_verify(), + ) + raise_status_error_if_needed(result) + return result + + def requests_post(self, api_url: str, json: Optional[str] = None): + api_url_w_host = self.get_host() + api_url + result = requests.post( + api_url_w_host, + auth=(self.get_username(), self.get_password()), + json=json, + verify=self.get_cert_verify(), + ) + raise_status_error_if_needed(result) + return result + + def requests_put(self, api_url: str, json: Optional[str] = None): + api_url_w_host = self.get_host() + api_url + result = requests.put( + api_url_w_host, + auth=(self.get_username(), self.get_password()), + json=json, + verify=self.get_cert_verify(), + ) + raise_status_error_if_needed(result) + return result + + def get_host(self): + return None + + # Override this to supply the path to the cert file to use. Use False to skip verification + def get_cert_verify(self): + return False + + def get_username(self): + return None + + def get_password(self): + return None + + +CONFLUENCE_FORTHIC = ''' +''' + + +class ColorBox(): + def __init__(self, color): + self.color = color + self.options = { + "hover_text": '' + } + return + + def __getitem__(self, key: str) -> Optional[bool]: + result = self.options.get(key) + return result + + def __setitem__(self, key: str, value: Optional[bool]): + if key not in self.options: + raise RuntimeError(f"Unknown ColorBox option: '{key}'. Must be one of {self.options.keys()}") + self.options[key] = value + + def render(self): + result = '{html}' + result += ' ' + result += ' ' + result += f'''
''' + result += '
{html}' + + return result diff --git a/forthic-py/src/forthic/modules/datasets_module.py b/forthic-py/src/forthic/modules/datasets_module.py new file mode 100644 index 0000000..9fa483e --- /dev/null +++ b/forthic-py/src/forthic/modules/datasets_module.py @@ -0,0 +1,184 @@ +import os +import json +import threading +from ..module import Module +from ..interfaces import IInterpreter +from typing import Any + + +# From: https://www.oreilly.com/library/view/python-cookbook/0596001673/ch06s04.html +class ReadWriteLock: + """A lock object that allows many simultaneous "read locks", but + only one "write lock." """ + + def __init__(self): + self._read_ready = threading.Condition(threading.Lock()) + self._readers = 0 + + def acquire_read(self): + """Acquire a read lock. Blocks only if a thread has + acquired the write lock.""" + self._read_ready.acquire() + try: + self._readers += 1 + finally: + self._read_ready.release() + + def release_read(self): + """ Release a read lock. """ + self._read_ready.acquire() + try: + self._readers -= 1 + if not self._readers: + self._read_ready.notifyAll() + finally: + self._read_ready.release() + + def acquire_write(self): + """Acquire a write lock. Blocks until there are no + acquired read or write locks.""" + self._read_ready.acquire() + while self._readers > 0: + self._read_ready.wait() + + def release_write(self): + """ Release a write lock. """ + self._read_ready.release() + + +DATASETS_LOCK = ReadWriteLock() + + +class DatasetsModule(Module): + """This implements a simple file-based storage of datasets + + This reads/writes/upserts arrays of records as coherent datasets. + + See `docs/modules/datasets_module.md` for detailed descriptions of each word. + """ + def __init__(self, interp: IInterpreter): + super().__init__('datasets', interp, DATASETS_FORTHIC) + self.working_directory = None + self.flags = { + "overwrite": None, + "drop_nulls": None, + } + + self.add_module_word('CWD!', self.word_CWD_bang) + + self.add_module_word('DATASET!', self.word_DATASET_bang) + self.add_module_word('DATASET', self.word_DATASET) + self.add_module_word('RECORDS', self.word_RECORDS) + + # Flag words + self.add_module_word('!OVERWRITE', self.word_bang_OVERWRITE) + self.add_module_word('!DROP-NULLS', self.word_bang_DROP_NULLS) + + # ( path -- ) + def word_CWD_bang(self, interp: IInterpreter): + path = interp.stack_pop() + self.working_directory = path + + # ( record dataset_label -- ) + def word_DATASET_bang(self, interp: IInterpreter): + """Updates a dataset + + If !OVERWRITE is set, then this overwrites the dataset. Otherwise, the data is merged. + """ + dataset_label = interp.stack_pop() + record = interp.stack_pop() + flags = self.get_flags() + + filepath = self.dataset_filepath(dataset_label) + + if flags.get("overwrite"): + self.write_dataset(filepath, record) + else: + dataset = self.load_dataset(filepath) + for k, v in record.items(): + dataset[k] = v + self.write_dataset(filepath, dataset) + + # ( dataset_label -- dataset ) + def word_DATASET(self, interp: IInterpreter): + """Loads a dataset + """ + dataset_label = interp.stack_pop() + + filepath = self.dataset_filepath(dataset_label) + result = self.load_dataset(filepath) + interp.stack_push(result) + + # ( dataset_label keys -- records ) + def word_RECORDS(self, interp: IInterpreter): + """Loads records from a dataset + """ + keys = interp.stack_pop() + dataset_label = interp.stack_pop() + flags = self.get_flags() + + filepath = self.dataset_filepath(dataset_label) + dataset = self.load_dataset(filepath) + result = [] + for k in keys: + value = dataset.get(k) + if flags.get('drop_nulls') and value is None: + pass + else: + result.append(value) + interp.stack_push(result) + + # ( -- ) + def word_bang_OVERWRITE(self, interp: IInterpreter): + self.flags["overwrite"] = True + + # ( -- ) + def word_bang_DROP_NULLS(self, interp: IInterpreter): + self.flags["drop_nulls"] = True + + # ---------------------------------------- + # Helpers + + def get_flags(self): + flags = self.flags.copy() + self.flags = {} + return flags + + def dataset_filepath(self, dataset_label: str) -> str: + result = f'{self.working_directory}/datasets/{dataset_label}.dataset' + return result + + def load_dataset(self, filepath: str) -> Any: + result = {} + DATASETS_LOCK.acquire_read() + try: + self.ensure_dirpath(filepath) + if not os.path.isfile(filepath): + return {} + + with open(filepath, 'r') as f: + content = f.read().strip() + if content: + result = json.loads(content) + else: + result = {} + finally: + DATASETS_LOCK.release_read() + return result + + def write_dataset(self, filepath: str, dataset: Any) -> None: + DATASETS_LOCK.acquire_write() + try: + self.ensure_dirpath(filepath) + with open(filepath, 'w') as f: + f.write(json.dumps(dataset, indent=4, separators=(',', ': '))) + finally: + DATASETS_LOCK.release_write() + + def ensure_dirpath(self, filepath: str) -> None: + if not os.path.exists(os.path.dirname(filepath)): + os.makedirs(os.path.dirname(filepath)) + + +DATASETS_FORTHIC = ''' +''' diff --git a/forthic-py/src/forthic/modules/excel_module.py b/forthic-py/src/forthic/modules/excel_module.py new file mode 100644 index 0000000..91338bc --- /dev/null +++ b/forthic-py/src/forthic/modules/excel_module.py @@ -0,0 +1,337 @@ +import base64 +import json +import oauthlib.oauth2.rfc6749.errors +from requests_oauthlib import OAuth2Session # type: ignore +from ..module import Module +from ..interfaces import IInterpreter +from ...utils.errors import ( + ExpiredMSGraphOAuthToken, + ExcelError +) +from typing import List + + +def raises_ExpiredMSGraphOAuthToken(fn): + """Decorator that catches expiration errors and raises ExpiredMSGraphOAuthToken instead""" + def wrapper(*args, **kwargs): + res = None + try: + res = fn(*args, **kwargs) + except (oauthlib.oauth2.rfc6749.errors.TokenExpiredError, oauthlib.oauth2.rfc6749.errors.InvalidGrantError): + raise ExpiredMSGraphOAuthToken() + return res + return wrapper + + +class ExcelModule(Module): + """This implements basic access to Excel via MS Graph + + See `docs/modules/excel_module.md` for detailed descriptions of each word. + """ + def __init__(self, interp: IInterpreter): + super().__init__('excel', interp, EXCEL_FORTHIC) + self.context_stack: List['CredsContext'] = [] + + self.add_module_word('PUSH-CONTEXT!', self.word_PUSH_CONTEXT_bang) + self.add_module_word('POP-CONTEXT!', self.word_POP_CONTEXT_bang) + + self.add_module_word('WORKBOOK-INFO', self.word_WORKBOOK_INFO) + self.add_module_word('SHEET-NAMES', self.word_SHEET_NAMES) + self.add_module_word('TABLE-NAMES', self.word_TABLE_NAMES) + + self.add_module_word('TABLE-RECORDS', self.word_TABLE_RECORDS) + self.add_module_word('ADD-TABLE-ROWS', self.word_ADD_TABLE_ROWS) + self.add_module_word('UPDATE-RANGE', self.word_UPDATE_RANGE) + self.add_module_word("USED-RANGE", self.word_USED_RANGE) + + # ( creds_context -- ) + def word_PUSH_CONTEXT_bang(self, interp: IInterpreter): + creds_context = interp.stack_pop() + self.context_stack.append(creds_context) + + # ( -- ) + def word_POP_CONTEXT_bang(self, interp: IInterpreter): + self.context_stack.pop() + + # (shared_url -- doc_info) + @raises_ExpiredMSGraphOAuthToken + def word_WORKBOOK_INFO(self, interp: IInterpreter): + shared_url = interp.stack_pop() + msgraph_session = self.get_msgraph_session() + + # See https://docs.microsoft.com/en-us/graph/api/shares-get?view=graph-rest-1.0&tabs=http + def get_encoded_url() -> str: + encoded_url = base64.b64encode(shared_url.encode()).decode('utf-8') + res = 'u!' + encoded_url.strip('=').replace('/', '_').replace( + '+', '-' + ) + return res + + context = self.get_context() + api_url = ( + f'https://graph.microsoft.com/v1.0/shares/{get_encoded_url()}/root' + ) + response = msgraph_session.get(api_url, proxies=context.get_proxies()) + data = response.json() + result = { + 'drive_id': data['parentReference']['driveId'], + 'item_id': data['id'], + } + interp.stack_push(result) + + # (workbook_info -- names) + @raises_ExpiredMSGraphOAuthToken + def word_SHEET_NAMES(self, interp: IInterpreter): + workbook_info = interp.stack_pop() + drive_id = workbook_info['drive_id'] + item_id = workbook_info['item_id'] + + msgraph_session = self.get_msgraph_session() + workbook_session_id = self.get_workbook_session_id( + drive_id, item_id, msgraph_session + ) + + api_url = f'https://graph.microsoft.com/v1.0/drives/{drive_id}/items/{item_id}/workbook/worksheets' + headers = {'workbook-session-id': workbook_session_id} + context = self.get_context() + response = msgraph_session.get( + api_url, headers=headers, proxies=context.get_proxies() + ) + if response.status_code != 200: + raise ExcelError( + f'Unable to get sheet names for {item_id}: {response.text}' + ) + + data = response.json() + result = [item['name'] for item in data['value']] + interp.stack_push(result) + + # (workbook_info sheet_name -- names) + @raises_ExpiredMSGraphOAuthToken + def word_TABLE_NAMES(self, interp: IInterpreter): + sheet_name = interp.stack_pop() + workbook_info = interp.stack_pop() + drive_id = workbook_info['drive_id'] + item_id = workbook_info['item_id'] + + msgraph_session = self.get_msgraph_session() + workbook_session_id = self.get_workbook_session_id( + drive_id, item_id, msgraph_session + ) + + api_url = f'https://graph.microsoft.com/v1.0/drives/{drive_id}/items/{item_id}/workbook/worksheets/{sheet_name}/tables' + headers = {'workbook-session-id': workbook_session_id} + context = self.get_context() + response = msgraph_session.get( + api_url, headers=headers, proxies=context.get_proxies() + ) + if response.status_code != 200: + raise ExcelError( + f'Unable to get table names for {item_id}/{sheet_name}: {response.text}' + ) + + data = response.json() + result = [item['name'] for item in data['value']] + interp.stack_push(result) + + # (workbook_info sheet_name table_name -- records) + @raises_ExpiredMSGraphOAuthToken + def word_TABLE_RECORDS(self, interp: IInterpreter): + table_name = interp.stack_pop() + sheet_name = interp.stack_pop() + workbook_info = interp.stack_pop() + drive_id = workbook_info['drive_id'] + item_id = workbook_info['item_id'] + + msgraph_session = self.get_msgraph_session() + workbook_session_id = self.get_workbook_session_id( + drive_id, item_id, msgraph_session + ) + + def get_table_columns(): + api_url = f'https://graph.microsoft.com/v1.0/drives/{drive_id}/items/{item_id}/workbook/worksheets/{sheet_name}/tables/{table_name}/columns' + headers = {'workbook-session-id': workbook_session_id} + context = self.get_context() + response = msgraph_session.get( + api_url, headers=headers, proxies=context.get_proxies() + ) + data = response.json() + res = [] + for item in data['value']: + col_vals = [] + for v in item['values']: + col_vals.append(v[0]) + res.append(col_vals) + return res + + def columns_to_records(columns): + if len(columns) == 0: + return [] + + # Set up result + res = [] + num_records = ( + len(columns[0]) - 1 + ) # Don't count heading as a record + for _ in range(num_records): + res.append({}) + + # Store values + for col in columns: + field = col[0] + values = col[1:] + for i in range(len(values)): + res[i][field] = values[i] + return res + + # Pull the data and convert it into records + table_columns = get_table_columns() + result = columns_to_records(table_columns) + interp.stack_push(result) + + # (workbook_info sheet_name table_name rows -- ) + @raises_ExpiredMSGraphOAuthToken + def word_ADD_TABLE_ROWS(self, interp: IInterpreter): + rows = interp.stack_pop() + table_name = interp.stack_pop() + sheet_name = interp.stack_pop() + workbook_info = interp.stack_pop() + drive_id = workbook_info['drive_id'] + item_id = workbook_info['item_id'] + + msgraph_session = self.get_msgraph_session() + workbook_session_id = self.get_workbook_session_id( + drive_id, item_id, msgraph_session + ) + + api_url = f'https://graph.microsoft.com/v1.0/drives/{drive_id}/items/{item_id}/workbook/worksheets/{sheet_name}/tables/{table_name}/rows' + headers = {'workbook-session-id': workbook_session_id} + data = {'values': rows} + context = self.get_context() + response = msgraph_session.post( + api_url, json=data, headers=headers, proxies=context.get_proxies() + ) + if response.status_code != 201: + raise RuntimeError( + f'Unable to add table rows to {item_id}/{sheet_name}/{table_name}: {response.text}' + ) + + # (workbook_info sheet_name range rows -- ) + @raises_ExpiredMSGraphOAuthToken + def word_UPDATE_RANGE(self, interp: IInterpreter): + rows = interp.stack_pop() + a1_range = interp.stack_pop() + sheet_name = interp.stack_pop() + workbook_info = interp.stack_pop() + drive_id = workbook_info['drive_id'] + item_id = workbook_info['item_id'] + + msgraph_session = self.get_msgraph_session() + workbook_session_id = self.get_workbook_session_id( + drive_id, item_id, msgraph_session + ) + + api_url = f"https://graph.microsoft.com/v1.0/drives/{drive_id}/items/{item_id}/workbook/worksheets/{sheet_name}/range(address='{a1_range}')" + headers = {'workbook-session-id': workbook_session_id} + data = {'values': rows} + context = self.get_context() + response = msgraph_session.patch( + api_url, json=data, headers=headers, proxies=context.get_proxies() + ) + if response.status_code != 200: + raise ExcelError( + f'Unable to update range {item_id}/{sheet_name}/{a1_range}: {response.text}' + ) + + # (workbook_info sheet_name -- rows) + @raises_ExpiredMSGraphOAuthToken + def word_USED_RANGE(self, interp: IInterpreter): + sheet_name = interp.stack_pop() + workbook_info = interp.stack_pop() + drive_id = workbook_info['drive_id'] + item_id = workbook_info['item_id'] + + msgraph_session = self.get_msgraph_session() + workbook_session_id = self.get_workbook_session_id( + drive_id, item_id, msgraph_session + ) + api_url = f"https://graph.microsoft.com/v1.0/drives/{drive_id}/items/{item_id}/workbook/worksheets/{sheet_name}/usedRange" + headers = { + "workbook-session-id": workbook_session_id + } + response = msgraph_session.get(api_url, headers=headers) + if response.status_code != 200: + raise RuntimeError(f"Unable to get used range {item_id}/{sheet_name}: {response}") + data = response.json() + result = data.get('values') + interp.stack_push(result) + + # ================================= + # Helpers + + def get_msgraph_session(self) -> OAuth2Session: + context = self.get_context() + app_creds = context.get_app_creds() + token = context.get_auth_token() + + def token_updater(token): + pass + + refresh_url = ( + 'https://login.microsoftonline.com/common/oauth2/v2.0/token' + ) + result = OAuth2Session( + app_creds['client_id'], + token=token, + auto_refresh_kwargs=app_creds, + auto_refresh_url=refresh_url, + token_updater=token_updater, + ) + return result + + def get_context(self) -> 'CredsContext': + if not self.context_stack: + raise ExcelError( + 'Need to push an MS Graph context with PUSH-CONTEXT!' + ) + result = self.context_stack[-1] + return result + + def get_workbook_session_id(self, drive_id: str, item_id: str, msgraph_session: OAuth2Session) -> str: + api_url = f'https://graph.microsoft.com/v1.0/drives/{drive_id}/items/{item_id}/workbook/createSession' + request_body = {'persistChanges': True} + context = self.get_context() + response = msgraph_session.post( + api_url, + data=json.dumps(request_body), + proxies=context.get_proxies(), + ) + if response.status_code != 201: + raise ExcelError( + f'Unable to get workbook session id for {item_id}: {response.text}' + ) + result = response.json()['id'] + return result + + +class CredsContext: + """Clients of the excel module must provide extend CredsContext and use PUSH-CONTEXT! + in order to set the current creds context""" + + def get_app_creds(self): + """Returns an object with the following fields: client_id, client_secret""" + return None + + def get_proxies(self): + """Returns a dict object containing proxies for fields 'http' and 'https'""" + return None + + def get_auth_token(self): + return None + + +EXCEL_FORTHIC = ''' +: WORKBOOK-ID WORKBOOK-INFO 'item_id' REC@; # (shared_url -- workbook_id) + +["WORKBOOK-ID"] EXPORT +''' diff --git a/forthic-py/src/forthic/modules/gdoc_module.py b/forthic-py/src/forthic/modules/gdoc_module.py new file mode 100644 index 0000000..79fc54f --- /dev/null +++ b/forthic-py/src/forthic/modules/gdoc_module.py @@ -0,0 +1,695 @@ +import json +from requests_oauthlib import OAuth2Session # type: ignore +import oauthlib.oauth2.rfc6749.errors +from ..module import Module +from ..interfaces import IInterpreter +from ...utils.errors import ( + GdocError, + ExpiredGdocOAuthToken +) +from typing import List, Any, Dict + + +def raises_ExpiredGdocOAuthToken(fn): + """Decorator that catches expiration errors and raises ExpiredGdocOAuthToken instead""" + def wrapper(*args, **kwargs): + res = None + try: + res = fn(*args, **kwargs) + except (oauthlib.oauth2.rfc6749.errors.TokenExpiredError, oauthlib.oauth2.rfc6749.errors.InvalidGrantError): + raise ExpiredGdocOAuthToken() + return res + return wrapper + + +FORTHIC = ''' +''' + + +# TODO: Need to rework this so it matches the gsheet module +class GdocModule(Module): + """This implements basic access to Gdocs via Google's [gdoc API](https://developers.google.com/docs/api) + + See `docs/modules/gdoc_module.md` for detailed descriptions of each word. + """ + def __init__(self, interp: IInterpreter): + super().__init__('gdoc', interp, FORTHIC) + self.context_stack: List['CredsContext'] = [] + + self.add_module_word('PUSH-CONTEXT!', self.word_PUSH_CONTEXT_bang) + self.add_module_word('POP-CONTEXT!', self.word_POP_CONTEXT_bang) + + self.add_module_word('DOC', self.word_DOC) + self.add_module_word('NEW-DOC', self.word_NEW_DOC) + self.add_module_word('BATCH-UPDATE', self.word_BATCH_UPDATE) + self.add_module_word('INSERT', self.word_INSERT) + + self.add_module_word('PT', self.word_PT) + self.add_module_word('COLOR', self.word_COLOR) + + # ----- Content + self.add_module_word('TABLE', self.word_TABLE) + self.add_module_word('TEXT', self.word_TEXT) + self.add_module_word('PAGE-BREAK', self.word_PAGE_BREAK) + + # ----- Content manipulation + self.add_module_word('TEXT-CONCAT', self.word_TEXT_CONCAT) + self.add_module_word(' 'CredsContext': + if not self.context_stack: + raise GdocError( + 'Use gdoc.PUSH-CONTEXT! to provide a Google context' + ) + result = self.context_stack[-1] + return result + + def get_gdoc_session(self) -> OAuth2Session: + context = self.get_context() + app_creds = context.get_app_creds() + token = context.get_auth_token() + + def token_updater(token): + pass + + refresh_url = 'https://oauth2.googleapis.com/token' + result = OAuth2Session( + app_creds['client_id'], + token=token, + auto_refresh_kwargs=app_creds, + auto_refresh_url=refresh_url, + token_updater=token_updater, + ) + return result + + +class Content: + """This is the base class for all gdoc content objects + + Some functions like `get_start_index` and `get_end_index` are used by all `Content` subclasses, + but some (like `get_insert_content_requests`) are only relevant to specific classes. The `Content` + interface is a union of all possible gdoc content methods and provides sensible defaults so all content + objects can be used in all rendering situations. + """ + def __init__(self): + self.start_index = 0 + self.end_index = 0 + + def get_text(self) -> str: + """Returns the raw text content""" + return "" + + def update_start_index(self, index: int): + """Updates the start index of content when it needs to move within a document""" + return + + def get_start_index(self) -> int: + return self.start_index + + def get_end_index(self) -> int: + return self.end_index + + def get_insert_request(self) -> Dict[str, Any]: + """Returns gdoc API batch request to insert the content into a document + + Some content (like `Table`) must be inserted first before their subcontent is added. + """ + raise RuntimeError("Content is meant to be subclassed") + + def get_style_requests(self) -> List[Dict[str, Any]]: + """Returns an array of style requests for the `Content` object""" + return [] + + def get_insert_content_requests(self) -> List[Dict[str, Any]]: + """For container objects like `Table`, this returns an array of insertion requests for their subcontent""" + return [] + + def get_merges(self) -> List[Dict[str, Any]]: + """For `Table`, this returns an array of cell merge requests""" + return [] + + def get_table_styles(self) -> List[Dict[str, Any]]: + """For `Table`, returns an array of gdoc Table styles""" + return [] + + +class PageBreak(Content): + def __init__(self): + super().__init__() + + def update_start_index(self, index: int): + self.start_index = index + self.end_index = index + 2 + + def get_insert_request(self) -> Dict[str, Any]: + result = { + "insertPageBreak": { + "location": {"segmentId": "", "index": self.start_index} + } + } + return result + + +class Text(Content): + """This represents text that's being accumulated in a content array for a batch render + """ + def __init__(self, text): + super().__init__() + self.text = text + self.style_requests = [] + self.update_start_index(0) + + def get_text(self) -> str: + return self.text + + def update_start_index(self, index: int): + """Updates the start/end indexes of the content and style + """ + self.start_index = index + self.end_index = index + len(self.text) + 1 # Add implicit newline + cur_index = index + + # Update style requests + def update_style(update_type: str, style: Dict[str, Any]): + num_chars = style[update_type]["range"]["endIndex"] - style[update_type]["range"]["startIndex"] + style[update_type]["range"]["startIndex"] = cur_index + style[update_type]["range"]["endIndex"] = cur_index + num_chars + return + + for r in self.style_requests: + if "updateTextStyle" in r: + update_style("updateTextStyle", r) + elif "updateParagraphStyle" in r: + update_style("updateParagraphStyle", r) + else: + raise RuntimeError(f"Unknown style request: {r}") + + def get_insert_request(self) -> Dict[str, Any]: + result = { + "insertText": { + "text": self.text, + "location": {"segmentId": "", "index": self.start_index} + } + } + return result + + def add_text_style(self, style: Dict[str, Any]): + style_request = { + "updateTextStyle": { + "textStyle": style, + "fields": ",".join(style.keys()), + "range": { + "segmentId": "", + "startIndex": self.start_index, + "endIndex": self.end_index + } + } + } + self.style_requests.append(style_request) + + def add_paragraph_style(self, style: Dict[str, Any]): + style_request = { + "updateParagraphStyle": { + "paragraphStyle": style, + "fields": ",".join(style.keys()), + "range": { + "segmentId": "", + "startIndex": self.start_index, + "endIndex": self.end_index + } + } + } + self.style_requests.append(style_request) + + def get_style_requests(self) -> List[Dict[str, Any]]: + return self.style_requests + + +class ConcatText(Content): + """This represents an array of Text that's being concatenated + """ + def __init__(self, text_items: List[Text]): + super().__init__() + self.text_items = text_items + self.update_start_index(0) + + def get_text(self) -> str: + result = "" + for t in self.text_items: + result += t.get_text() + return result + + def update_start_index(self, index: int): + """Updates the start/end indexes of the content and style + """ + text = self.get_text() + self.start_index = index + self.end_index = index + len(text) + 1 # Add implicit newline + cur_index = index + + for t in self.text_items: + t.update_start_index(cur_index) + cur_index += len(t.get_text()) + + def get_insert_request(self) -> Dict[str, Any]: + result = { + "insertText": { + "text": self.get_text(), + "location": {"segmentId": "", "index": self.start_index} + } + } + return result + + def get_style_requests(self) -> List[Dict[str, Any]]: + result = [] + for t in self.text_items: + result += t.get_style_requests() + return result + + +class Table(Content): + """This represents a table to render + """ + def __init__(self, table_rows: List[List[Content]]): + super().__init__() + self.table_rows = self.normalize_rows(table_rows) + self.table_rows_w_indexes: List[List[Any]] = [] + self.table_styles: List[Dict[str, Any]] = [] + self.merges: List[Dict[str, Any]] = [] + self.update_start_index(0) + + def normalize_rows(self, rows: List[List[Any]]) -> List[List[Any]]: + blank = None + if not rows: + return [] + + def max_row_length() -> int: + res = 0 + for r in rows: + if len(r) > res: + res = len(r) + return res + + row_length = max_row_length() + for r in rows: + if len(r) < row_length: + r += (row_length - len(r)) * [blank] + return rows + + def update_start_index(self, index: int): + self.start_index = index + + # Tables advance the index by 1 at the start and 1 at the end + # Every row advances the index by 1 + # Every cell advances the index by 2 + num_rows = len(self.table_rows) + num_cols = len(self.table_rows[0]) + self.end_index = index + 2 + num_rows + 2 * num_rows * num_cols + + # Update merge cells requests + for m in self.merges: + m["mergeTableCells"]["tableRange"]["tableCellLocation"]["tableStartLocation"]["index"] = self.start_index + + # Add indexes to table content + self.table_rows_w_indexes = [] + cur_index = index + 1 # Advance index for rows container + for r in self.table_rows: + cur_index += 1 # Advance index for row + row_w_index = [] + for c in r: + cur_index += 1 # Advance index for start cell + cell_w_index = [c, cur_index] + row_w_index.append(cell_w_index) + cur_index += 1 # Advance index for start paragraph + self.table_rows_w_indexes.append(row_w_index) + return + + def add_table_style(self, style: Dict[str, Any], row: int, col: int, row_span: int, col_span: int): + request = { + "updateTableCellStyle": { + "tableCellStyle": style, + "fields": ",".join(style.keys()), + "tableRange": { + "tableCellLocation": { + "tableStartLocation": {"segmentId": "", "index": self.start_index}, + "rowIndex": row, + "columnIndex": col + }, + "rowSpan": row_span, + "columnSpan": col_span + } + } + } + self.table_styles.append(request) + return + + def add_full_table_style(self, style: Dict[str, Any]): + request = { + "updateTableCellStyle": { + "tableCellStyle": style, + "fields": ",".join(style.keys()), + "tableStartLocation": { + "segmentId": "", + "index": self.start_index + } + } + } + self.table_styles.append(request) + return + + def add_column_properties(self, column_properties: Dict[str, Any], column_indices: List[int]): + request = { + "updateTableColumnProperties": { + "tableStartLocation": { + "segmentId": "", + "index": self.start_index + }, + "columnIndices": column_indices, + "tableColumnProperties": column_properties, + "fields": ",".join(column_properties.keys()) + } + } + self.table_styles.append(request) + return + + def add_merge_cells(self, row: int, col: int, row_span: int, col_span: int): + request = { + "mergeTableCells": { + "tableRange": { + "tableCellLocation": { + "tableStartLocation": {"segmentId": "", "index": self.start_index}, + "rowIndex": row, + "columnIndex": col + }, + "rowSpan": row_span, + "columnSpan": col_span + } + } + } + self.merges.append(request) + + def get_merges(self) -> List[Dict[str, Any]]: + return self.merges + + def get_table_styles(self) -> List[Dict[str, Any]]: + def get_style_update(style) -> Dict[str, Any]: + types = ["updateTableCellStyle", "updateTableColumnProperties"] + for t in types: + if t in style: + return style[t] + raise RuntimeError(f"Couldn't find style update in {style}") + + # Update the start_index of each table style + for style in self.table_styles: + style_update = get_style_update(style) + if "tableRange" in style_update: + style_update["tableRange"]["tableCellLocation"]["tableStartLocation"]["index"] = self.start_index + else: + style_update["tableStartLocation"]["index"] = self.start_index + return self.table_styles + + def get_insert_request(self) -> Dict[str, Any]: + result = { + "insertTable": { + "rows": len(self.table_rows), + "columns": len(self.table_rows[0]), # We've normalized table rows, so there will be a valid row + "location": {"segmentId": "", "index": self.start_index - 1} # Bring within paragraph + } + } + + return result + + def get_insert_content_requests(self) -> List[Dict[str, Any]]: + result = [] + for r in reversed(self.table_rows_w_indexes): + for cell in reversed(r): + index = cell[1] + cell_content = cell[0] + if cell_content: + cell_content.update_start_index(index) + result.append(cell_content.get_insert_request()) + result += cell_content.get_style_requests() + return result + + +def normalize_content_array(char_index: int, content_array: List[Content]) -> List[Content]: + cur_index = char_index + result: List[Content] = [] + last_content = None + for content in content_array: + # Add implied paragraph if necessary + if isinstance(content, Table) and not isinstance(last_content, Text): + implied_paragraph = Text(" ") + implied_paragraph.update_start_index(cur_index) + result.append(implied_paragraph) + cur_index = implied_paragraph.get_end_index() + + # Remove implicit newline between two TextContents in a row + if isinstance(content, Text) and isinstance(last_content, Text): + cur_index -= 1 + + content.update_start_index(cur_index) + cur_index = content.get_end_index() + result.append(content) + last_content = content + return result + + +class CredsContext: + """Clients of the gsheet module must provide extend CredsContext and use PUSH-CONTEXT! + in order to set the current creds context""" + + def get_app_creds(self): + """Returns an object with the following fields: client_id, client_secret""" + return None + + def get_proxies(self): + """Returns a dict object containing proxies for fields 'http' and 'https'""" + return None + + def get_auth_token(self): + """Returns an object with token information returned from google. + + This will have fields like access_token, refresh_token, scope, etc. + """ + return None diff --git a/forthic-py/src/forthic/modules/gsheet_module.py b/forthic-py/src/forthic/modules/gsheet_module.py new file mode 100644 index 0000000..4da2be3 --- /dev/null +++ b/forthic-py/src/forthic/modules/gsheet_module.py @@ -0,0 +1,844 @@ +import re +import json +import urllib.parse +from requests_oauthlib import OAuth2Session # type: ignore +import oauthlib.oauth2.rfc6749.errors +from ..module import Module +from ..interfaces import IInterpreter +from ...utils.errors import ( + GsheetError, + ExpiredGsheetOAuthToken +) +from typing import List, Any, Dict, Tuple + + +def raises_ExpiredGsheetOAuthToken(fn): + """Decorator that catches expiration errors and raises ExpiredGsheetOAuthToken instead""" + def wrapper(*args, **kwargs): + res = None + try: + res = fn(*args, **kwargs) + except (oauthlib.oauth2.rfc6749.errors.TokenExpiredError, oauthlib.oauth2.rfc6749.errors.InvalidGrantError): + raise ExpiredGsheetOAuthToken() + return res + return wrapper + + +FORTHIC = "" + + +class GsheetModule(Module): + """This implements access to gsheets via Google's [Sheets API](https://developers.google.com/sheets/api) + """ + + def __init__(self, interp: IInterpreter): + super().__init__('gsheet', interp, FORTHIC) + self.context_stack: List['CredsContext'] = [] + + # These are set by "flag words" to change the behavior of the words in this module + self.flags = { + "range": None, + "transpose": False, + "cell_format": False, + "null_on_error": False, + } + + self.add_module_word('PUSH-CONTEXT!', self.word_PUSH_CONTEXT_bang) + self.add_module_word('POP-CONTEXT!', self.word_POP_CONTEXT_bang) + + self.add_module_word('SPREADSHEET', self.word_SPREADSHEET) + self.add_module_word('TAB', self.word_TAB) + self.add_module_word('TAB@', self.word_TAB_at) + self.add_module_word('ENSURE-TAB!', self.word_ENSURE_TAB_bang) + + self.add_module_word('ROWS', self.word_ROWS) + self.add_module_word('ROWS!', self.word_ROWS_bang) + + self.add_module_word('CLEAR!', self.word_CLEAR_bang) + + self.add_module_word('RECORDS', self.word_RECORDS) + self.add_module_word('RECORDS!', self.word_RECORDS_bang) + self.add_module_word('BATCH-UPDATE-TAB!', self.word_BATCH_UPDATE_TAB_bang) + + # Flag words + self.add_module_word('!RANGE', self.word_bang_RANGE) + self.add_module_word('!TRANSPOSE', self.word_bang_TRANSPOSE) + self.add_module_word('!CELL-FORMAT', self.word_bang_CELL_FORMAT) + self.add_module_word('!NULL-ON-ERROR', self.word_bang_NULL_ON_ERROR) + + # Utils + self.add_module_word('INDEX>COL-NAME', self.word_INDEX_to_COL_NAME) + self.add_module_word('COL-NAME>INDEX', self.word_COL_NAME_to_INDEX) + + # ( creds_context -- ) + def word_PUSH_CONTEXT_bang(self, interp: IInterpreter): + """Sets the credentials context used to make calls against the API + """ + creds_context = interp.stack_pop() + self.context_stack.append(creds_context) + + # ( -- ) + def word_POP_CONTEXT_bang(self, interp: IInterpreter): + self.context_stack.pop() + + # ( url -- Spreadsheet ) + # ( Tab -- Spreadsheet ) + @raises_ExpiredGsheetOAuthToken + def word_SPREADSHEET(self, interp: IInterpreter): + """Creates a `Spreadsheet` object from a url or extracts the parent spreadsheet from a `Tab` object + """ + arg = interp.stack_pop() + + context = self.get_context() + if isinstance(arg, str): + url = arg + result = Spreadsheet(context, url) + elif isinstance(arg, Tab): + tab = arg + result = tab.get_spreadsheet() + else: + result = None + interp.stack_push(result) + + # ( url -- Tab ) + @raises_ExpiredGsheetOAuthToken + def word_TAB(self, interp: IInterpreter): + """Creates a `Tab` object from a url + """ + url = interp.stack_pop() + + try: + context = self.get_context() + _, tab_id = get_gsheet_id_and_tab_id(url) + spreadsheet = Spreadsheet(context, url) + result = spreadsheet.get_tab(tab_id) + interp.stack_push(result) + except RuntimeError: + flags = self.get_flags() + if flags.get('null_on_error'): + interp.stack_push(None) + else: + raise + + + + # ( Spreadsheet id -- Tab ) + # ( Spreadsheet name -- Tab ) + @raises_ExpiredGsheetOAuthToken + def word_TAB_at(self, interp: IInterpreter): + """Retrieves a `Tab` from a `Spreadsheet` using its id or name + """ + id_or_name = interp.stack_pop() + spreadsheet = interp.stack_pop() + + try: + result = spreadsheet.get_tab(id_or_name) + interp.stack_push(result) + except RuntimeError: + flags = self.get_flags() + if flags.get('null_on_error'): + interp.stack_push(None) + else: + raise + + + # ( Tab -- rows ) + @raises_ExpiredGsheetOAuthToken + def word_ROWS(self, interp: IInterpreter): + """Retrieves all the rows from a `Tab` + + Flag words: + * !RANGE: This specifies the range to read (See https://developers.google.com/sheets/api/guides/concepts#cell) + * !TRANSPOSE: If set, data is returned by column rather than by row + """ + tab = interp.stack_pop() + + flags = self.get_flags() + + if flags.get('range'): + tab_range = f"{tab.get_name()}!{flags.get('range')}" + else: + tab_range = tab.get_name() + + try: + result = get_rows(tab.get_context(), tab.get_spreadsheet_id(), tab_range, flags.get('transpose')) + interp.stack_push(result) + except RuntimeError: + if flags.get('null_on_error'): + interp.stack_push(None) + else: + raise + + + # ( Tab rows -- ) + @raises_ExpiredGsheetOAuthToken + def word_ROWS_bang(self, interp: IInterpreter): + """Writes an array of rows to a `Tab` + + Flag words: + * !RANGE: This specifies the start range to write to (See https://developers.google.com/sheets/api/guides/concepts#cell) + * !TRANSPOSE: By default, data will be written as rows. If this flag word is set, data will be written as columns + * !CELL-FORMAT: By default, data is assumed to be strings. If `!CELL-FORMAT` is set, the data will be treated + as being in a "cell" format. This is a record with a `content` string field and an `updateRequest` + field that contains a record with the structure of a gsheet API update request. + See https://developers.google.com/sheets/api/samples/formatting + """ + rows = interp.stack_pop() + tab = interp.stack_pop() + + flags = self.get_flags() + + if flags.get('range'): + tab_range = f"{tab.get_name()}!{flags.get('range')}" + else: + tab_range = tab.get_name() + + if flags.get('cell_format'): + write_cells(tab, tab_range, rows, flags.get('transpose')) + else: + write_rows(tab, tab_range, rows, flags.get('transpose')) + + # ( Tab header -- Records ) + @raises_ExpiredGsheetOAuthToken + def word_RECORDS(self, interp: IInterpreter): + """Reads data from a `Tab` as an array of records + + The specified `header` is an array of column names that will be searched for in the rows of the gsheet. + If a header is found, the rows below it will be used to create an array of records where the header + columns are used as record fields. + """ + header = interp.stack_pop() + tab = interp.stack_pop() + + if not tab: + interp.stack_push(None) + return + + try: + # Check flags + flags = self.get_flags() + if flags.get('range'): + tab_range = f"{tab.get_name()}!{flags.get('range')}" + else: + tab_range = tab.get_name() + + rows = get_rows(tab.get_context(), tab.get_spreadsheet_id(), tab_range) + + def to_ascii(value: str) -> str: + res = ''.join([c for c in value if ord(c) < 128]).strip() + return res + + def get_header_to_column(values: List[str]) -> Dict[str, int]: + res = {} + ascii_values = [to_ascii(v) for v in values] + for h in header: + for i in range(len(ascii_values)): + if ascii_values[i] == h: + res[h] = i + return res + + def find_header() -> Any: + res = None + for i in range(len(rows)): + header_to_column = get_header_to_column(rows[i]) + found_all = True + for h in header: + if h not in header_to_column: + found_all = False + break + if found_all: + res = { + 'header_row': i, + 'header_to_column': header_to_column, + } + break + return res + + header_info = find_header() + if not header_info: + raise GsheetError( + f"Can't find header ({header}) in gsheet {tab.get_spreadsheet_id()} {tab.get_name()}" + ) + + def row_to_rec(row: List[str]) -> Dict[str, Any]: + res = {} + for h in header: + col = header_info['header_to_column'][h] + res[h] = row[col] + return res + + result = [] + for r in rows[header_info['header_row'] + 1:]: + result.append(row_to_rec(r)) + + interp.stack_push(result) + except RuntimeError: + if flags.get('null_on_error'): + interp.stack_push(None) + else: + raise + + # ( Tab header records -- ) + @raises_ExpiredGsheetOAuthToken + def word_RECORDS_bang(self, interp: IInterpreter): + """Writes an array of records to a `Tab` + + The specified header determines the order of the columns. + NOTE: This uses the same flag words as `ROWS!` + """ + records = interp.stack_pop() + header = interp.stack_pop() + tab = interp.stack_pop() + + # Peek at cell_format flag, but don't clear them since ROWS! will use them + use_cell_format = self.flags.get('cell_format') + + header_values = header + default_value = "" + + # The cell format requires values to be dicts with a "content" field + if use_cell_format: + header_values = [{"content": h} for h in header] + default_value = {"content": ""} + + rows = [header_values] + for rec in records: + row = [] + for h in header: + row.append(rec.get(h) or default_value) + rows.append(row) + + interp.stack_push(tab) + interp.stack_push(rows) + interp.run("ROWS!") + + # ( Tab update_requests -- ) + @raises_ExpiredGsheetOAuthToken + def word_BATCH_UPDATE_TAB_bang(self, interp: IInterpreter): + """Makes a batch update against a tab + + This is essentially a low-level way to access the gsheets API directly. + See https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/batchUpdate + """ + update_requests = interp.stack_pop() + tab = interp.stack_pop() + batch_update_tab(tab, update_requests) + + # ( Tab -- ) + @raises_ExpiredGsheetOAuthToken + def word_CLEAR_bang(self, interp: IInterpreter): + """Clears the contents of a `Tab` + """ + tab = interp.stack_pop() + clear_tab(tab) + + # ( Spreadsheet tab_name -- Tab) + @raises_ExpiredGsheetOAuthToken + def word_ENSURE_TAB_bang(self, interp: IInterpreter): + """Ensures that the specified `Tab` exists in the gsheet and then returns it + """ + tab_name = interp.stack_pop() + spreadsheet = interp.stack_pop() + result = ensure_tab(spreadsheet, tab_name) + interp.stack_push(result) + + # ( index -- col_name ) + def word_INDEX_to_COL_NAME(self, interp: IInterpreter): + """Converts an integer index to a character column name + """ + index = interp.stack_pop() + result = index_to_col_name(index) + interp.stack_push(result) + + # ( col_name -- index ) + def word_COL_NAME_to_INDEX(self, interp: IInterpreter): + """Converts a character column name to an index + """ + col_name = interp.stack_pop() + result = col_name_to_index(col_name) + interp.stack_push(result) + + # ( range -- ) + def word_bang_RANGE(self, interp: IInterpreter): + """Sets a spreadsheet `range` flag + """ + tab_range = interp.stack_pop() + self.flags["range"] = tab_range + + # ( -- ) + def word_bang_TRANSPOSE(self, interp: IInterpreter): + """Sets a `transpose` flag to treat data as columns instead of rows + """ + self.flags["transpose"] = True + + # ( -- ) + def word_bang_CELL_FORMAT(self, interp: IInterpreter): + """Sets a `cell_format` flag to indicate that data is provided in "cell" format rather than as strings + """ + self.flags["cell_format"] = True + + # ( -- ) + def word_bang_NULL_ON_ERROR(self, interp: IInterpreter): + """When TRUE, if a word were to return a result and an error occurs, return NULL instead + """ + self.flags["null_on_error"] = True + + # ================================= + # Helpers + def get_flags(self): + flags = self.flags.copy() + self.flags = {} + return flags + + def get_context(self) -> 'CredsContext': + if not self.context_stack: + raise GsheetError( + 'Use gsheet.PUSH-CONTEXT! to provide a Google context' + ) + result = self.context_stack[-1] + return result + + +# ------------------------------------------------ +# Helper functions and classes +def get_gsheets_session(context) -> OAuth2Session: + app_creds = context.get_app_creds() + token = context.get_auth_token() + + def token_updater(token): + pass + + refresh_url = 'https://oauth2.googleapis.com/token' + result = OAuth2Session( + app_creds['client_id'], + token=token, + auto_refresh_kwargs=app_creds, + auto_refresh_url=refresh_url, + token_updater=token_updater, + ) + return result + + +def get_gsheet_id_and_tab_id(url: str) -> Tuple[str, str]: + """Parses a spreadsheet ID and tab ID from a gsheet URL + """ + match = re.match(r'.*docs\.google\.com.*\/d\/([^\/]+).*gid=(\d+)', url) + if not match: + raise GsheetError( + f'Unable to find gsheet_id and tab key from: {url}' + ) + gsheet_id = match.group(1) + tab_id = int(match.group(2)) + return gsheet_id, tab_id + + +def get_sheet_info(context, gsheet_id: str) -> Any: + gsheets_session = get_gsheets_session(context) + response = gsheets_session.get( + f'https://sheets.googleapis.com/v4/spreadsheets/{gsheet_id}', + proxies=context.get_proxies(), + ) + if not response.ok: + raise GsheetError(response.text) + result = response.json() + return result + + +def get_rows(context, spreadsheet_id: str, spreadsheet_range: str, transpose: bool = False) -> List[List[str]]: + spreadsheet_range_url_encoded = urllib.parse.quote_plus(spreadsheet_range) + gsheets_session = get_gsheets_session(context) + + if transpose: + majorDimension = 'COLUMNS' + else: + majorDimension = 'ROWS' + + base = 'https://sheets.googleapis.com/v4/spreadsheets' + api_url = f"{base}/{spreadsheet_id}/values/{spreadsheet_range_url_encoded}?majorDimension={majorDimension}" + response = gsheets_session.get(api_url, proxies=context.get_proxies()) + if not response.ok: + raise GsheetError(response.text) + + data = response.json() + if "values" not in data: + rows = [] + else: + rows = data['values'] + + # We add empty cells where needed to make all rows the same length + def pad_rows(rows: List[List[str]]) -> List[List[str]]: + if not rows: + return rows + + row_lengths = [len(r) for r in rows] + max_length = max(row_lengths) + res = [] + for r in rows: + padded_row = r + if len(r) < max_length: + for _ in range(max_length - len(r)): + padded_row.append('') + res.append(padded_row) + return res + + result = pad_rows(rows) + return result + + +def write_rows(tab: "Tab", spreadsheet_range: str, rows: List[List[str]], transpose: bool = False): + context = tab.get_context() + spreadsheet_id = tab.get_spreadsheet_id() + + spreadsheet_range_url_encoded = urllib.parse.quote_plus(spreadsheet_range) + + if not rows: + return + + if transpose: + majorDimension = 'COLUMNS' + else: + majorDimension = 'ROWS' + + gsheets_session = get_gsheets_session(context) + update_data = { + 'range': spreadsheet_range, + 'majorDimension': majorDimension, + 'values': rows, + } + input_option = 'USER_ENTERED' + api_url = f'https://sheets.googleapis.com/v4/spreadsheets/{spreadsheet_id}/values/{spreadsheet_range_url_encoded}?valueInputOption={input_option}' + status = gsheets_session.put( + api_url, + data=json.dumps(update_data), + proxies=context.get_proxies(), + ) + if not status.ok: + raise GsheetError(f'Problem writing to gsheet {spreadsheet_id} {spreadsheet_range}: {status.text}') + + +def write_cells(tab: "Tab", spreadsheet_range: str, rows: List[List[Any]], transpose: bool = False): + spreadsheet_id = tab.get_spreadsheet_id() + + content_rows = [] + for r in rows: + content_row = [] + for cell in r: + content_row.append(cell.get('content')) + content_rows.append(content_row) + + # Write content + write_rows(tab, spreadsheet_range, content_rows, transpose) + + # Gather formatting + # See: https://developers.google.com/sheets/api/samples/formatting + + def get_start_row_col(): + pieces = spreadsheet_range.split("!") + + if len(pieces) < 2: + startRowIndex = 0 + startColumnIndex = 0 + else: + range_pieces = pieces[1].split(":") + range_start = range_pieces[0] + match = re.match(r'([A-Z]+)(\d+)', range_start) + + column_name = match.group(1) + row = int(match.group(2)) + + startColumnIndex = col_name_to_index(column_name) + startRowIndex = row - 1 + + return startRowIndex, startColumnIndex + + startRowIndex, startColumnIndex = get_start_row_col() + + # Figure out update requests + def get_update_request_row(row): + values = [] + for cell in row: + values.append(cell.get("updateRequest") or {}) + result = { + "values": values + } + return result + + def transpose_rows(rows): + num_rows = len(rows) + if num_rows == 0: + return [] + + result = [] + num_cols = len(rows[0]) + for i in range(num_cols): + col = [] + for j in range(num_rows): + col.append(rows[j][i]) + result.append(col) + return result + + if transpose: + rows = transpose_rows(rows) + + update_request_rows = [] + for r in rows: + update_request_rows.append(get_update_request_row(r)) + + def get_fields(): + result = set() + for row in rows: + for cell in row: + update_request = cell.get("updateRequest") or {} + for k in update_request.keys(): + if k == "userEnteredFormat": + for sub_k in update_request[k].keys(): + result.add(f"{k}.{sub_k}") + else: + result.add(k) + return list(result) + + fields = get_fields() + + # If there are no fields to update, we're done + if not fields: + return + + update_requests = [{ + "updateCells": { + "range": { + "sheetId": spreadsheet_id, + "startRowIndex": startRowIndex, + "startColumnIndex": startColumnIndex, + }, + "rows": update_request_rows, + "fields": ",".join(fields) + } + }] + + batch_update_tab(tab, update_requests) + + +def clear_tab(tab: "Tab"): + context = tab.get_context() + spreadsheet_id = tab.get_spreadsheet_id() + tab_id = tab.get_id() + + gsheets_session = get_gsheets_session(context) + update_data = { + 'requests': [ + { + 'updateCells': { + 'range': {'sheetId': tab_id}, + 'fields': 'userEnteredValue', + } + }, + ] + } + api_url = f'https://sheets.googleapis.com/v4/spreadsheets/{spreadsheet_id}:batchUpdate' + status = gsheets_session.post( + api_url, + data=json.dumps(update_data), + proxies=context.get_proxies(), + ) + if not status.ok: + raise GsheetError(f'Problem clearing gsheet {spreadsheet_id} {tab.get_name()}: {status.text}') + + +def ensure_tab(spreadsheet: "Spreadsheet", tab_name: str) -> "Tab": + if spreadsheet.has_tab(tab_name): + return spreadsheet.get_tab(tab_name) + + # Otherwise, add tab, update spreadsheet state, and return tab + context = spreadsheet.get_context() + gsheets_session = get_gsheets_session(context) + update_data = { + 'requests': [ + { + 'addSheet': { + 'properties': { + 'title': tab_name + } + } + }, + ] + } + spreadsheet_id = spreadsheet.get_spreadsheet_id() + api_url = f'https://sheets.googleapis.com/v4/spreadsheets/{spreadsheet_id}:batchUpdate' + status = gsheets_session.post( + api_url, + data=json.dumps(update_data), + proxies=context.get_proxies(), + ) + if not status.ok: + raise GsheetError(f'Problem adding sheet to gsheet {spreadsheet_id}: {status.text}') + + # Update spreadsheet + updated_spreadsheet = Spreadsheet(context, spreadsheet.get_url()) + spreadsheet.update(updated_spreadsheet) + return spreadsheet.get_tab(tab_name) + + +def batch_update_tab(tab: "Tab", update_requests): + context = tab.get_context() + spreadsheet_id = tab.get_spreadsheet_id() + tab_id = tab.get_id() + + gsheets_session = get_gsheets_session(context) + + def add_sheet_id(update_requests): + for r in update_requests: + for v in r.values(): + if 'range' in v: + v['range']['sheetId'] = tab_id + return + + add_sheet_id(update_requests) + data = { + 'requests': update_requests + } + + api_url = f'https://sheets.googleapis.com/v4/spreadsheets/{spreadsheet_id}:batchUpdate' + status = gsheets_session.post( + api_url, + data=json.dumps(data), + proxies=context.get_proxies(), + ) + if not status.ok: + raise GsheetError(f'Problem running batch_update_tab {spreadsheet_id} {tab.get_name()}: {status.text}') + + +def index_to_col_name(zero_based_index: int) -> str: + if zero_based_index < 0: + raise GsheetError(f'Index ({zero_based_index}) must be >= 0') + + one_based_index = zero_based_index + 1 + + def rightmost_digit(num): + modulo = num % 26 + if modulo == 0: + res = 'Z' + else: + offset = modulo - 1 + res = chr(ord('A') + offset) + return res + + def downshift(num): + res = int((num - 1) / 26) + return res + + digits = [] + while one_based_index: + digits.append(rightmost_digit(one_based_index)) + one_based_index = downshift(one_based_index) + digits.reverse() + result = ''.join(digits) + return result + + +def col_name_to_index(col_name: str) -> int: + col_name = col_name.upper().strip() + if not re.match('^[A-Z]+$', col_name): + raise GsheetError(f'Column name ({col_name}) must be all letters') + + def char_to_val(c): + res = ord(c) - ord('A') + 1 + return res + + reversed_col_name = col_name[::-1] + result = 0 + for i in range(len(reversed_col_name)): + char = reversed_col_name[i] + result += char_to_val(char) * (26 ** i) + + result = result - 1 # Convert to 0-based index + return result + + +class CredsContext: + """Clients of the gsheet module must provide extend CredsContext and use PUSH-CONTEXT! + in order to set the current creds context""" + + def get_app_creds(self): + """Returns an object with the following fields: client_id, client_secret""" + return None + + def get_proxies(self): + """Returns a dict object containing proxies for fields 'http' and 'https'""" + return None + + def get_auth_token(self): + """Returns an object with token information returned from google. + + This will have fields like access_token, refresh_token, scope, etc. + """ + return None + + +class Spreadsheet: + def __init__(self, context, url): + self.context = context + self.url = url + + self.spreadsheet_id, _ = get_gsheet_id_and_tab_id(url) + self.sheet_info = get_sheet_info(context, self.spreadsheet_id) + + def update(self, spreadsheet): + self.context = spreadsheet.context + self.url = spreadsheet.url + self.spreadsheet_id = spreadsheet.spreadsheet_id + self.sheet_info = spreadsheet.sheet_info + + def get_context(self): + return self.context + + def get_url(self): + return self.url + + def get_spreadsheet_id(self): + return self.spreadsheet_id + + def has_tab(self, id_or_name): + sheets = self.sheet_info['sheets'] + for s in sheets: + properties = s['properties'] + if properties['sheetId'] == id_or_name or properties['title'] == id_or_name: + return True + return False + + def get_tab(self, id_or_name): + sheets = self.sheet_info['sheets'] + + tab_properties = None + for s in sheets: + properties = s['properties'] + if properties['sheetId'] == id_or_name or properties['title'] == id_or_name: + tab_properties = properties + break + + if tab_properties is None: + return None + + result = Tab(self.context, self, tab_properties) + return result + + +class Tab: + def __init__(self, context, spreadsheet, tab_properties): + self.context = context + self.spreadsheet = spreadsheet + self.tab_properties = tab_properties + + def get_context(self): + return self.context + + def get_spreadsheet(self): + return self.spreadsheet + + def get_spreadsheet_id(self): + return self.spreadsheet.spreadsheet_id + + def get_id(self): + return self.tab_properties['sheetId'] + + def get_name(self): + return self.tab_properties['title'] diff --git a/forthic-py/src/forthic/modules/html_module.py b/forthic-py/src/forthic/modules/html_module.py new file mode 100644 index 0000000..4876f3a --- /dev/null +++ b/forthic-py/src/forthic/modules/html_module.py @@ -0,0 +1,756 @@ +import json +import html +import markdown +from ..module import Module +import random +from ..interfaces import IInterpreter +from ...utils.errors import ( + HtmlModuleError, + InvalidForthicWordError +) +from typing import List, Dict, Optional + + +ASYNC_BUTTON_KEY = '_async_forthic_button_state' + + +class HtmlModule(Module): + """This implements basic rendering of HTML via Forthic + + NOTE: For more sophisticated template-based rendering, see the `jinja_module`. + + See `docs/modules/html_module.md` for detailed descriptions of each word. + """ + def __init__(self, interp: IInterpreter): + super().__init__('html', interp, HTML_FORTHIC) + self.add_module_word('ELEMENT', self.word_ELEMENT) + self.add_module_word('RAW-HTML', self.word_RAW_HTML) + self.add_module_word('HTML', self.word_MARKDOWN_to_HTML) + self.add_module_word('RENDER', self.word_RENDER) + self.add_module_word('JS-PATH!', self.word_JS_PATH_bang) + self.add_module_word('RUN-FORTHIC.JS', self.word_RUN_FORTHIC_JS) + self.add_module_word('FORTHIC-BUTTON', self.word_FORTHIC_BUTTON) + + self.add_module_word('ASYNC-FORTHIC-BUTTON', self.word_ASYNC_FORTHIC_BUTTON) + self.add_module_word('RUN-ASYNC-BUTTON', self.word_RUN_ASYNC_BUTTON) + + self.js_path = '/static/js/forthic/v2/' + + # ( type -- element ) + def word_ELEMENT(self, interp: IInterpreter): + elem_type = interp.stack_pop() + result = Element(elem_type) + interp.stack_push(result) + + # ( string -- raw_html ) + def word_RAW_HTML(self, interp: IInterpreter): + string = interp.stack_pop() + result = RawHtml(string) + interp.stack_push(result) + + # ( parent child -- parent ) + # ( parent child_items -- parent ) + def word_l_APPEND(self, interp: IInterpreter): + child = interp.stack_pop() + parent = interp.stack_pop() + + if isinstance(child, list): + child_items = child + else: + child_items = [child] + + for item in child_items: + parent.appendChild(item) + interp.stack_push(parent) + + # ( element -- children ) + def word_CHILD_NODES(self, interp: IInterpreter): + element = interp.stack_pop() + result = element.getChildNodes() + interp.stack_push(result) + + # ( element string -- element ) + def word_l_INNER_HTML_bang(self, interp: IInterpreter): + string = interp.stack_pop() + element = interp.stack_pop() + element.setInnerHTML(string) + interp.stack_push(element) + + # ( element string -- element ) + def word_l_INNER_TEXT_bang(self, interp: IInterpreter): + string = interp.stack_pop() + element = interp.stack_pop() + element.setInnerText(string) + interp.stack_push(element) + + # ( element -- string ) + def word_INNER_HTML(self, interp: IInterpreter): + element = interp.stack_pop() + result = element.getInnerHTML() + interp.stack_push(result) + + # ( element string position -- element ) + # Position is one of: 'beforebegin', 'afterbegin', 'beforeend', 'afterend' + def word_l_INSERT_ADJ_HTML(self, interp: IInterpreter): + position = interp.stack_pop() + string = interp.stack_pop() + element = interp.stack_pop() + element.insertAdjacentHTML(position, string) + interp.stack_push(element) + + # ( element key val -- element ) + # ( element pairs -- element ) + def word_l_ATTR_bang(self, interp: IInterpreter): + val = interp.stack_pop() + if isinstance(val, list): + pairs = val + else: + key = interp.stack_pop() + pairs = [[key, val]] + + element = interp.stack_pop() + + for pair in pairs: + element.setAttribute(pair[0], pair[1]) + interp.stack_push(element) + + # ( element attr -- val ) + def word_ATTR(self, interp: IInterpreter): + key = interp.stack_pop() + element = interp.stack_pop() + result = element.getAttribute(key) + interp.stack_push(result) + + # ( element -- val ) + def word_VALUE(self, interp: IInterpreter): + element = interp.stack_pop() + result = element.value + interp.stack_push(result) + + # ( element class -- element ) + # ( element classes -- element ) + def word_l_ADD_CLASS(self, interp: IInterpreter): + css_class = interp.stack_pop() + element = interp.stack_pop() + + if isinstance(css_class, list): + classes = css_class + else: + classes = [css_class] + + element.addClasses(classes) + interp.stack_push(element) + + # ( element -- classes ) + def word_CLASSES(self, interp: IInterpreter): + element = interp.stack_pop() + result = element.getClasses() + interp.stack_push(result) + + # ( element class -- element ) + # ( element classes -- element ) + def word_l_REMOVE_CLASS(self, interp: IInterpreter): + css_class = interp.stack_pop() + element = interp.stack_pop() + + if isinstance(css_class, list): + classes = css_class + else: + classes = [css_class] + element.removeClasses(classes) + + interp.stack_push(element) + + # ( markdown -- html) + def word_MARKDOWN_to_HTML(self, interp: IInterpreter): + markdown_content = interp.stack_pop() + result = markdown.markdown(markdown_content) + interp.stack_push(result) + + # ( element -- html ) + # ( elements -- html ) + def word_RENDER(self, interp: IInterpreter): + element = interp.stack_pop() + if isinstance(element, list): + elements = element + else: + elements = [element] + + result = '' + for e in elements: + result += e.render() + interp.stack_push(result) + + # ( path -- ) + def word_JS_PATH_bang(self, interp: IInterpreter): + """Sets the URL path where the Forthic JS interpreter is""" + path = interp.stack_pop() + self.js_path = path + + # ( forthic -- script_element ) + def word_RUN_FORTHIC_JS(self, interp: IInterpreter): + """Creates a script element that sets up a Forthic interpreter on the browser + and runs a forthic string + """ + forthic = interp.stack_pop() + result = Element('script') + result.setAttribute('type', 'module') + random_str = random.uniform(0, 1) + result.setInnerHTML( + f''' + import {{ Interpreter }} from "{self.js_path}/interpreter.mjs?version={random_str}"; + let interp = new Interpreter(); + interp.run(`{forthic}`) + .then(() => {{ + window.FORTHIC_INTERP = interp + }})''' + ) + interp.stack_push(result) + + # ( id label forthic -- ForthicButton ) + def word_FORTHIC_BUTTON(self, interp: IInterpreter): + forthic = interp.stack_pop() + label = interp.stack_pop() + html_id = interp.stack_pop() + + result = ForthicButton(interp, html_id, label, forthic) + interp.stack_push(result) + + # ( id label forthic_word -- ForthicButton ) + def word_ASYNC_FORTHIC_BUTTON(self, interp: IInterpreter): + forthic_word = interp.stack_pop() + label = interp.stack_pop() + html_id = interp.stack_pop() + result = AsyncForthicButton(interp, html_id, label, forthic_word) + interp.stack_push(result) + + # ( forthic button_id -- ) + def word_RUN_ASYNC_BUTTON(self, interp: IInterpreter): + button_id = interp.stack_pop() + forthic = interp.stack_pop() + + def get_button_states(): + interp.run(f"'{ASYNC_BUTTON_KEY}' cache.CACHE@ [] REC DEFAULT") + res = interp.stack_pop() + return res + + def store_button_states(button_id, state_info): + """state_info is a dict with the following fields: state, Optional[message]""" + button_states = get_button_states() + button_states[button_id] = state_info + interp.stack_push(button_states) + interp.run(f"'{ASYNC_BUTTON_KEY}' cache.CACHE!") + + def is_running(): + button_states = get_button_states() + state_info = button_states.get(button_id) + if not state_info: + state_info = {} + state = state_info.get('state') + res = state == 'RUNNING' + return res + + if is_running(): + return + + try: + store_button_states(button_id, {'state': 'RUNNING'}) + interp.run(forthic) + store_button_states(button_id, {'state': ''}) + except Exception as e: + store_button_states(button_id, {'state': 'ERROR', 'message': str(e)}) + + +HTML_FORTHIC = ''' +: COMMON-TYPES ["H1" "H2" "H3" "H4" "H5" "H6" + "P" "UL" "OL" "LI" + "A" "SPAN" + "TABLE" "TR" "TH" "TD" + "DIV" "SECTION" + "STYLE" "IMG" "CANVAS" + "SCRIPT" + ] ; + +[ "type" ] VARIABLES +: FDEFINE-ELEMENT (type !) [": " type @ " '" type @ "' ELEMENT ;"] CONCAT ; +COMMON-TYPES "FDEFINE-ELEMENT INTERPRET" FOREACH + +: SVG "svg" ELEMENT [["xmlns" "http://www.w3.org/2000/svg"] ["version" "1.1"]] List['Element']: + return self.childNodes + + def setInnerHTML(self, string: str): + self.childNodes = [] + self.innerHTML = string + + def setInnerText(self, string: str): + self.setInnerHTML(html.escape(string)) + + def getInnerHTML(self) -> str: + if self.innerHTML is not None: + return self.innerHTML + + result = '' + for child in self.childNodes: + result += child.render() + return result + + def insertAdjacentHTML(self, position: str, string: str): + if position == 'beforebegin': + self.beforeBegin += string + elif position == 'afterbegin': + raw_items: List[Element] = [RawHtml(string)] + self.childNodes = raw_items + self.childNodes + elif position == 'beforeend': + self.childNodes.append(RawHtml(string)) + elif position == 'afterend': + self.afterEnd += string + else: + raise HtmlModuleError(f'Unhandled position: {position}') + + def getAttribute(self, key: str) -> str: + result = self.attributes.get(key) + if result is None: + result = '' + return result + + def setAttribute(self, key, val: Optional[str] = None): + if val is None: + del self.attributes[key] + return + self.attributes[key] = val + + def addClasses(self, classes: List[str]): + element_classes = self.getClasses() + for item in classes: + if item not in element_classes: + element_classes.append(item) + self.setClasses(element_classes) + + def getClasses(self) -> List[str]: + class_string = self.attributes.get('class') + if not class_string: + return [] + result = class_string.strip().split(' ') + return result + + def setClasses(self, classes: List[str]): + class_string = ' '.join(classes) + self.attributes['class'] = class_string + + def removeClasses(self, classes: List[str]): + element_classes = self.getClasses() + remaining_classes = [] + for item in element_classes: + if item not in classes: + remaining_classes.append(item) + self.setClasses(remaining_classes) + + def render(self): + def get_attr_string() -> str: + keys = sorted(self.attributes.keys()) + fragments = [] + for key in keys: + fragment = f'{key}="{self.attributes[key]}"' + if self.attributes[key] is None: + fragment = key + fragments.append(fragment) + res = ' '.join(fragments) + if res != '': + res = ' ' + res + return res + + tag = self.tagName.lower() + attributes = get_attr_string() + + if tag in VOID_ELEMENTS: + result = f'<{tag}{attributes}>' + else: + result = self.beforeBegin + result += f'<{tag}{attributes}>' + result += self.getInnerHTML() + result += f'' + result += self.afterEnd + return result + + +class RawHtml(Element): + def __init__(self, string: str): + self.html = string + + def render(self) -> str: + return self.html + + +class ForthicButton: + def __init__(self, interp: IInterpreter, html_id: str, label: str, forthic: str): + self.html_id = html_id + self.label = label + self.forthic = forthic + + self.options = { + 'reload_page': False, + 'post_data_ids': None, + 'confirmable': False, + } + + def __getitem__(self, key: str) -> Optional[bool]: + result = self.options.get(key) + return result + + def __setitem__(self, key: str, value: Optional[bool]): + if key not in self.options: + raise RuntimeError(f"Unknown ForthicButton option: '{key}'") + self.options[key] = value + + def render(self) -> str: + def get_done_code() -> str: + if self.options['reload_page']: + res = ''' + window.location.reload(true); + ''' + else: + res = ''' + $('#{html_id}').prop("disabled", false); + alert("Done!"); + '''.format( + html_id=self.html_id + ) + return res + + def get_confirm_code() -> str: + res = 'true' + if self.options['confirmable']: + res = 'confirm("Are you sure?")' + return res + + def make_func_gather_data() -> str: + res = 'function gather_data() {\n' + res += ' var fields = %s\n;' % json.dumps( + self.options['post_data_ids'] + ) + res += ' var res = {};\n' + res += " fields.forEach(f => res[f] = $('#' + f).val());\n" + res += ' return res;\n' + res += '}\n' + return res + + def make_func_prepend_data() -> str: + res = 'function prepend_data(forthic) {\n' + if self.options['post_data_ids']: + res += make_func_gather_data() + res += 'var data = gather_data();\n' + res += "var res = `'${JSON.stringify(data)}' ${forthic}`;\n" + else: + res += 'var res = forthic;\n' + res += ' return res;\n' + res += '}\n' + return res + + result = ''' + + + '''.format( + html_id=self.html_id, + label=self.label, + forthic=self.forthic, + done_code=get_done_code(), + confirm_code=get_confirm_code(), + func_prepend_data=make_func_prepend_data(), + ) + return result + + +class AsyncForthicButton: + def __init__(self, interp: IInterpreter, html_id: str, label: str, forthic: str): + self.html_id = html.escape(html_id) + self.label = label + self.forthic = forthic + self.interp = interp + + # Ensure that `forthic` is just a Forthic word + if ' ' in forthic or "'" in forthic or '"' in forthic: + raise InvalidForthicWordError(forthic) + + self.options = { + 'reload_page': False, + 'post_data_ids': None, + 'confirmable': False, + } + + def __getitem__(self, key: str) -> Optional[bool]: + result = self.options.get(key) + return result + + def __setitem__(self, key: str, value: Optional[bool]): + if key not in self.options: + raise RuntimeError(f"Unknown AsyncForthicButton option: '{key}'") + self.options[key] = value + + def get_async_state(self) -> Dict[str, str]: + self.interp.run(f"'{ASYNC_BUTTON_KEY}' cache.CACHE@") + button_states = self.interp.stack_pop() + if button_states is None: + button_states = {} + result = button_states.get(self.html_id) + if not result: + result = {} + return result + + def render(self) -> str: + def get_done_code() -> str: + if self.options['reload_page']: + res = ''' + window.location.reload(true); + ''' + else: + res = ''' + $('#{html_id}').prop("disabled", false); + alert("Done!"); + '''.format( + html_id=self.html_id + ) + return res + + def get_confirm_code() -> str: + res = 'true' + if self.options['confirmable']: + res = 'confirm("Are you sure?")' + return res + + def make_func_gather_data() -> str: + res = 'function gather_data() {\n' + res += ' var fields = %s\n;' % json.dumps( + self.options['post_data_ids'] + ) + res += ' var res = {};\n' + res += " fields.forEach(f => res[f] = $('#' + f).val());\n" + res += ' return res;\n' + res += '}\n' + return res + + def make_func_prepend_data() -> str: + res = 'function prepend_data(forthic) {\n' + if self.options['post_data_ids']: + res += make_func_gather_data() + res += 'var data = gather_data();\n' + res += "var res = `'${JSON.stringify(data)}' ${forthic}`;\n" + else: + res += 'var res = forthic;\n' + res += ' return res;\n' + res += '}\n' + return res + + async_state = self.get_async_state() + + result = f''' + + + + + + ''' + return result diff --git a/forthic-py/src/forthic/modules/intake_module.py b/forthic-py/src/forthic/modules/intake_module.py new file mode 100644 index 0000000..3869b62 --- /dev/null +++ b/forthic-py/src/forthic/modules/intake_module.py @@ -0,0 +1,267 @@ +from ..module import Module +from ..interfaces import IInterpreter + +# NOTE: This requires the gsheet module to be used in the app module +FORTHIC = """ +# ----- gsheet setup ----------------------------------------------------------------------------------------- +["gsheet" "cache"] USE-MODULES + +["url" "configs" "config" "tab" "admins" "content" "type" "google_context" "info"] VARIABLES + +: url-GSHEET url @ gsheet.SPREADSHEET; +: tab-TAB url-GSHEET tab @ gsheet.TAB@; + +# NOTE: These are the fields used by the ConfigurableForm field records +: HEADER-INFO [ + ["Field ID" "A unique identifier for the given field"] + ["Jira Field" "The exact name of a Jira field as it appears in your Jira instance UI (or something like customfield_1234)"] + ["Field Label" "The of the field shown to users of the form"] + ["Field Description" "Extra information that shows up beneat the field label in your form"] + ["Is Required?" "All required fields must be filled out in order to submit the form"] + ["Field Type" "The type of field control: Dropdown, TextInput, Textarea, RadioCheckbox, MultiCheckbox, DateInput, Attachment"] + ["Field Content" "Default content for a TextInput/Textarea. For Dropdowns and checkboxes, this specifies the options, one per line"] + ["Max Input Length" "The maximum number of characters for a TextInput or Textarea"] + ["Condition" "A Forthic predicate that indicates of a field should be hidden or shown"] +] REC; + +: HEADERS HEADER-INFO KEYS; +: HEADER-NOTE-CONTENTS HEADER-INFO VALUES; + +# ===== Adding headers and header notes +: A1 [["startRowIndex" 0] ["startColumnIndex" 0]] REC; +: content-CELL-DATA [["note" content @]] REC; +: HEADER-NOTES-ROW-DATA [["values" HEADER-NOTE-CONTENTS "(content !) content-CELL-DATA" MAP]] REC; + +: HEADER-NOTES-CHANGE [ + ["range" A1] + ["rows" [HEADER-NOTES-ROW-DATA]] + ["fields" "note"] +] REC; + +: HEADER-NOTES-BATCH-UPDATE [ ["updateCells" HEADER-NOTES-CHANGE] ] REC; +: tab-ADD-HEADER tab-TAB [HEADERS] gsheet.ROWS!; + +# ===== Adding field type data validation +: FIELD-TYPES ["Dropdown" "TextInput" "Textarea" "RadioCheckbox" "MultiCheckbox" "DateInput" "Attachment" "Markdown" "Html"]; +: type-CONDITION-VALUE [["userEnteredValue" type @]] REC; +: FIELD-TYPE-COLUMN HEADERS "Field Type" KEY-OF; + +: FIELD-TYPE-RANGE [ + ["startRowIndex" 1] + ["startColumnIndex" FIELD-TYPE-COLUMN] + ["endColumnIndex" FIELD-TYPE-COLUMN 1 +] +] REC; + +: FIELD-TYPE-CONDITION [ + ["type" "ONE_OF_LIST"] + ["values" FIELD-TYPES "(type !) type-CONDITION-VALUE" MAP] +] REC; + +: FIELD-TYPE-RULE [ + ["condition" FIELD-TYPE-CONDITION] + ["showCustomUi" TRUE] +] REC; + +: FIELD-TYPE-DATA-VALIDATION-CHANGE [ + ["range" FIELD-TYPE-RANGE] + ["rule" FIELD-TYPE-RULE] +] REC; + +: FIELD-TYPE-BATCH-UPDATE [ ["setDataValidation" FIELD-TYPE-DATA-VALIDATION-CHANGE] ] REC; + +# ----- Is Required? validation +: IS-REQUIRED-COLUMN HEADERS "Is Required?" KEY-OF; + +: IS-REQUIRED-RANGE [ + ["startRowIndex" 1] + ["startColumnIndex" IS-REQUIRED-COLUMN] + ["endColumnIndex" IS-REQUIRED-COLUMN 1 +] +] REC; + +: IS-REQUIRED-CONDITION [ + ["type" "ONE_OF_LIST"] + ["values" ["Yes" "No" ""] "(type !) type-CONDITION-VALUE" MAP] +] REC; + +: IS-REQUIRED-RULE [["condition" IS-REQUIRED-CONDITION] ["showCustomUi" TRUE]] REC; + +: IS-REQUIRED-DATA-VALIDATION-CHANGE [ + ["range" IS-REQUIRED-RANGE] + ["rule" IS-REQUIRED-RULE] +] REC; + +: IS-REQUIRED-BATCH-UPDATE [["setDataValidation" IS-REQUIRED-DATA-VALIDATION-CHANGE]] REC; + + +# ===== Style header +: HEADER-ROW-RANGE [["startRowIndex" 0] ["endRowIndex" 1]] REC; +: HEADER-BG-COLOR [["red" 0.24] ["green" 0.52] ["blue" 0.38]] REC; +: HEADER-FG-COLOR [["red" 1.0] ["green" 1.0] ["blue" 1.0]] REC; + +: HEADER-TEXT-FORMAT [ + ["foregroundColor" HEADER-FG-COLOR] + ["bold" TRUE] +] REC; + +: HEADER-CELL-STYLE [ + ["userEnteredFormat" [ + ["backgroundColor" HEADER-BG-COLOR] + ["horizontalAlignment" "CENTER"] + ["textFormat" HEADER-TEXT-FORMAT] + ] REC] + +] REC; + +: HEADER-STYLE-CHANGE [ + ["range" HEADER-ROW-RANGE] + ["cell" HEADER-CELL-STYLE] + ["fields" "userEnteredFormat(backgroundColor,textFormat,horizontalAlignment)"] +] REC; + +: HEADER-STYLE-BATCH-UPDATE [["repeatCell" HEADER-STYLE-CHANGE]] REC; +: COLUMN-RANGE [["dimension" "COLUMNS"]] REC; + +: COLUMN-WIDTH-CHANGE [ + ["range" COLUMN-RANGE] + ["properties" [["pixelSize" 150]] REC] + ["fields" "pixelSize"] +] REC; + +: COLUMN-WIDTH-BATCH-UPDATE [ ["updateDimensionProperties" COLUMN-WIDTH-CHANGE] ] REC; + +: tab-STYLE-TAB tab-TAB [ + HEADER-NOTES-BATCH-UPDATE + FIELD-TYPE-BATCH-UPDATE + IS-REQUIRED-BATCH-UPDATE + HEADER-STYLE-BATCH-UPDATE + COLUMN-WIDTH-BATCH-UPDATE +] gsheet.BATCH-UPDATE-TAB!; + +: tab-CREATE-IF-NEEDED [[FALSE "url-GSHEET tab @ gsheet.ENSURE-TAB! POP"]] REC tab-TAB >BOOL REC@ INTERPRET; +: tab-ADD-TEMPLATE-IF-NEEDED [[FALSE "tab-ADD-HEADER tab-STYLE-TAB"]] REC tab-TAB gsheet.ROWS >BOOL REC@ INTERPRET; +: tab-ENSURE-TAB tab-CREATE-IF-NEEDED tab-ADD-TEMPLATE-IF-NEEDED; +: tab-FIELD-RECORDS tab-TAB HEADERS gsheet.!NULL-ON-ERROR gsheet.RECORDS [] DEFAULT; + +: config-TAB config @ 'tab' REC@; +: config-STEP-TABS config @ 'step_tabs' REC@; +: config-TABS [config-TAB config-STEP-TABS] FLATTEN ">BOOL" SELECT; + +: " SELECT "dup_fields" TICKET-VALUE (fields !) [ + [0 "NULL"] + [1 "fields @ 0 NTH 'value' REC@"] + [2 "fields-MULTI-VALUE"] +] REC fields @ LENGTH [0 1 2] RANGE-INDEX REC@ INTERPRET; + +["key" "value"] VARIABLES +: key/value-ARRAYIFY [ + [TRUE [value @] FLATTEN] + [FALSE value @] +] REC key @ ["Labels" "Component/s"] IN REC@; + +: |ARRAYIFY-IF-NEEDED "(value ! key !) key/value-ARRAYIFY" !WITH-KEY MAP; + +: info-PROJECT info @ ['formConfig' 'Project'] REC@; +: info-ISSUE-TYPE info @ ['formConfig' 'Issue Type'] REC@; +: INFO>TICKET-RECORD (info !) + info-FIELDS-BY-JIRA-FIELD "FIELDS>TICKET-VALUE" MAP |ARRAYIFY-IF-NEEDED + info-PROJECT "Project" ATTACHMENTS (info !) + info-ATTACHMENT-FIELDS "'Field ID' REC@" MAP "(key !) info @ ['valuesById' key @] REC@" MAP + [] REC "UNION" REDUCE +; + +["jira_field"] VARIABLES +: FORM-CONFIG-VALUE info @ ['formConfig' jira_field @] REC@; +: APPEND-FORM-CONFIG-FIELD (jira_field !) [ + [TRUE "DUP jira_field @ REC@ FORM-CONFIG-VALUE APPEND FLATTEN |NON-NULL jira_field @ TICKET-RECORD" + + # ( info -- attachments_record ) + # This constructs a record mapping filename to attachment. It is suitable to use directly by jira.ADD-ATTACHMENTS + "INFO>ATTACHMENTS" + + # ( ticket_record jira_field -- ticket_record ) + # This is a utility word that appends a field specified in a form config record to the appropriate field in a ticket record + # It handles the case where there are existing values inputted by the user or if there are no values + "APPEND-FORM-CONFIG-FIELD" +] EXPORT +""" + + +class IntakeModule(Module): + """This implements code that supports the building of intake forms + """ + + def __init__(self, interp: IInterpreter): + super().__init__('intake', interp, FORTHIC) diff --git a/forthic-py/src/forthic/modules/isoweek_module.py b/forthic-py/src/forthic/modules/isoweek_module.py new file mode 100644 index 0000000..abf5da0 --- /dev/null +++ b/forthic-py/src/forthic/modules/isoweek_module.py @@ -0,0 +1,150 @@ +import datetime +from ..module import Module +from ..interfaces import IInterpreter + + +class ISOWeekModule(Module): + """Implements words to manipulate ISO Week information + + See https://en.wikipedia.org/wiki/ISO_week_date for more info + """ + + def __init__(self, interp: IInterpreter): + super().__init__('isoweek', interp, ISOWEEK_FORTHIC) + self.add_module_word('WEEK-NUM', self.word_WEEK_NUM) + self.add_module_word('QUARTER-START', self.word_QUARTER_START) + self.add_module_word('QUARTER-END', self.word_QUARTER_END) + self.add_module_word('QUARTER/YEAR', self.word_QUARTER_slash_YEAR) + self.add_module_word('QUARTER', self.word_QUARTER) + self.add_module_word('YEAR', self.word_YEAR) + + # ( date -- num ) + def word_WEEK_NUM(self, interp: IInterpreter): + date = interp.stack_pop() + result = self.date_to_week_num(date) + interp.stack_push(result) + + # ( date -- date ) + def word_QUARTER_START(self, interp: IInterpreter): + date = interp.stack_pop() + week_num = self.date_to_week_num(date) + quarter_num = int((week_num - 1) / 13) + 1 + quarter_to_week_num = { + 1: 1, + 2: 14, + 3: 27, + 4: 40 + } + day_of_week = self.get_day_of_week(date) + start_week = quarter_to_week_num[quarter_num] + delta_days = 7 * (week_num - start_week) + (day_of_week - 1) + + result = date - datetime.timedelta(delta_days) + interp.stack_push(result) + + # ( date -- date ) + def word_QUARTER_END(self, interp: IInterpreter): + date = interp.stack_pop() + week_num = self.date_to_week_num(date) + + quarter_num = int((week_num - 1) / 13) + 1 + quarter_to_week_num = { + 1: 13, + 2: 26, + 3: 39, + 4: 52 + } + day_of_week = self.get_day_of_week(date) + end_week = quarter_to_week_num[quarter_num] + if quarter_num == 4 and self.is_long_year(date.timetuple().tm_year): + end_week = 53 + + delta_days = 7 * (end_week - week_num) - (day_of_week - 1) + 6 # End of ISO Week is Sunday + + result = date + datetime.timedelta(delta_days) + interp.stack_push(result) + + # ( date qtr_offset -- [qtr year] ) + def word_QUARTER_slash_YEAR(self, interp: IInterpreter): + qtr_offset = interp.stack_pop() + date = interp.stack_pop() + res_quarter = self.get_quarter(date, qtr_offset) + res_year = self.get_year(date, qtr_offset) + interp.stack_push([res_quarter, res_year]) + + # ( date qtr_offset -- qtr ) + def word_QUARTER(self, interp: IInterpreter): + qtr_offset = interp.stack_pop() + date = interp.stack_pop() + result = self.get_quarter(date, qtr_offset) + interp.stack_push(result) + + # ( date qtr_offset -- qtr ) + def word_YEAR(self, interp: IInterpreter): + qtr_offset = interp.stack_pop() + date = interp.stack_pop() + result = self.get_year(date, qtr_offset) + interp.stack_push(result) + + # ---------------------------------------- + # Helpers + def get_quarter(self, date, qtr_offset=0): + """Returns the quarter number for the current date + + If `qtr_offset` is specified, applies that offset to the quarter number + """ + week_num = self.date_to_week_num(date) + if week_num >= 1 and week_num <= 13: + quarter = 1 + elif week_num >= 14 and week_num <= 26: + quarter = 2 + elif week_num >= 27 and week_num <= 39: + quarter = 3 + else: + quarter = 4 + + result = ((quarter - 1) + qtr_offset) % 4 + 1 + return result + + def get_year(self, date, qtr_offset=0): + """Returns the year for the current date + + If `qtr_offset` is specified, applies that offset to the year + """ + res_date = date + datetime.timedelta(qtr_offset * 13 * 7) + result = res_date.timetuple().tm_year + return result + + def get_day_of_week(self, date): + day_of_week = date.timetuple().tm_wday + 1 # ISO Week Monday is 1 + return day_of_week + + # If Jan 1 or Dec 31 are Thursdays, it's a long year + def is_long_year(self, year): + jan_1 = datetime.date(year, 1, 1) + dec_31 = datetime.date(year, 12, 31) + result = jan_1.timetuple().tm_wday == 4 or dec_31.timetuple().tm_wday == 4 + return result + + # See Algorithms section of https://en.wikipedia.org/wiki/ISO_week_date + def date_to_week_num(self, date): + year = date.timetuple().tm_year + day_of_week = self.get_day_of_week(date) + + day_of_year = date.timetuple().tm_yday + week_number = int((day_of_year - day_of_week + 10) / 7) + + # If week number is 53 and this isn't a long year, the date is in the first week of the next year + if week_number == 53 and not self.is_long_year(year): + week_number = 1 + + # If week number is 0, the date is in the last week of the previous year + if week_number == 0: + if self.is_long_year(year - 1): + week_number = 53 + else: + week_number = 52 + return week_number + + +ISOWEEK_FORTHIC = '' diff --git a/forthic-py/src/forthic/modules/jinja_module.py b/forthic-py/src/forthic/modules/jinja_module.py new file mode 100644 index 0000000..27158f7 --- /dev/null +++ b/forthic-py/src/forthic/modules/jinja_module.py @@ -0,0 +1,25 @@ +import jinja2 +from ..module import Module +from ..interfaces import IInterpreter + + +class JinjaModule(Module): + """This provides access to Jinja via a thin wrapper + + See `docs/modules/jinja_module.md` for detailed descriptions of each word. + """ + def __init__(self, interp: IInterpreter): + super().__init__('jinja', interp, JINJA_FORTHIC) + self.add_module_word('RENDER', self.word_RENDER) + + # ( template_contents kw_args -- string ) + def word_RENDER(self, interp: IInterpreter): + kw_args = interp.stack_pop() + template_contents = interp.stack_pop() + + template = jinja2.Template(template_contents) + result = template.render(kw_args) + interp.stack_push(result) + + +JINJA_FORTHIC = '' diff --git a/forthic-py/src/forthic/modules/jira_module.py b/forthic-py/src/forthic/modules/jira_module.py new file mode 100644 index 0000000..d2337bc --- /dev/null +++ b/forthic-py/src/forthic/modules/jira_module.py @@ -0,0 +1,1111 @@ +import re +import requests +import datetime +import numbers +import pytz +import base64 +from dateutil import parser +from ..module import Module +from ..global_module import drill_for_value +from collections import defaultdict +from ...utils.errors import ( + UnauthorizedError, + JiraError +) +from ..interfaces import IInterpreter +from typing import List, Any, Dict, Optional + + +UNIX_EPOCH = datetime.datetime.utcfromtimestamp(0).replace(tzinfo=None) +DEFAULT_MAX_TICKETS = 1000 + + +class JiraModule(Module): + """This implements support for common use cases when interacting with Jira. + + See `docs/modules/jira_module.md` for detailed descriptions of each word. + """ + def __init__(self, interp: IInterpreter): + super().__init__('jira', interp, JIRA_FORTHIC) + self.context_stack: List['JiraContext'] = [] + + self.flags = {} + self.get_flags() + + self.add_module_word('PUSH-CONTEXT!', self.word_PUSH_CONTEXT_bang) + self.add_module_word('POP-CONTEXT!', self.word_POP_CONTEXT_bang) + + self.add_module_word('HOST', self.word_HOST) + self.add_module_word('NUM-TICKETS', self.word_NUM_TICKETS) + self.add_module_word('SEARCH', self.word_SEARCH) + self.add_module_word('DEFAULT-SEARCH', self.word_DEFAULT_SEARCH) + self.add_module_word('RENDERED-SEARCH', self.word_RENDERED_SEARCH) + + self.add_module_word('CREATE', self.word_CREATE) + self.add_module_word('UPDATE', self.word_UPDATE) + self.add_module_word('ADD-WATCHER', self.word_ADD_WATCHER) + self.add_module_word('LINK-ISSUES', self.word_LINK_ISSUES) + self.add_module_word('VOTES', self.word_VOTES) + self.add_module_word('ADD-ATTACHMENTS', self.word_ADD_ATTACHMENTS) + self.add_module_word('COMMENTS', self.word_COMMENTS) + self.add_module_word('ADD-COMMENT', self.word_ADD_COMMENT) + self.add_module_word('TRANSITIONS', self.word_TRANSITIONS) + self.add_module_word('TRANSITION!', self.word_TRANSITION_bang) + + self.add_module_word('CHANGELOG', self.word_CHANGELOG) + self.add_module_word('FIELD-AS-OF', self.word_FIELD_AS_OF) + self.add_module_word('FIELD-AS-OF-SINCE', self.word_FIELD_AS_OF_SINCE) + self.add_module_word('FIELD-CHANGE-AS-OF', self.word_FIELD_CHANGE_AS_OF) + self.add_module_word('TIME-IN-STATE', self.word_TIME_IN_STATE) + + self.add_module_word('FIELD-TAG', self.word_FIELD_TAG) + self.add_module_word('REMOVE-FIELD-TAGS', self.word_REMOVE_FIELD_TAGS) + self.add_module_word(' 2: + raise RuntimeError("Invalid base64 string") + + decoded_content = base64.b64decode(bare_content) + files = { + "file": (name, decoded_content, "application-type") + } + res = context.requests_post(api_url, headers=headers, files=files) + if not res.ok: + raise JiraError(f"Unable to add attachment: {res.reason}") + + # ( ticket_key -- comments ) + def word_COMMENTS(self, interp: IInterpreter): + ticket_key = interp.stack_pop() + context = self.current_context() + + api_url = f'/rest/api/2/issue/{ticket_key}/comment' + response = context.requests_get(api_url) + if not response.ok: + raise JiraError(f"Unable to get comments for {ticket_key}: {response.reason}") + result = response.json()['comments'] + interp.stack_push(result) + return + + # ( ticket_key comment -- ) + def word_ADD_COMMENT(self, interp: IInterpreter): + comment = interp.stack_pop() + ticket_key = interp.stack_pop() + context = self.current_context() + + req_data = { + 'body': comment, + } + api_url = f'/rest/api/2/issue/{ticket_key}/comment' + response = context.requests_post(api_url, json=req_data) + if not response.ok: + raise JiraError(f"Unable to post comment for {ticket_key}: {response.reason}") + return + + # ( ticket_key -- transitions ) + def word_TRANSITIONS(self, interp: IInterpreter): + ticket_key = interp.stack_pop() + + context = self.current_context() + + api_url = f'/rest/api/2/issue/{ticket_key}/transitions' + response = context.requests_get(api_url) + if not response.ok: + raise JiraError(f"Unable to get transitions for {ticket_key}: {response.reason}") + result = response.json()['transitions'] + interp.stack_push(result) + return + + # ( ticket_key transition_id -- ) + def word_TRANSITION_bang(self, interp: IInterpreter): + transition_id = interp.stack_pop() + ticket_key = interp.stack_pop() + + context = self.current_context() + + req_data = { + 'transition': {"id": transition_id}, + } + api_url = f'/rest/api/2/issue/{ticket_key}/transitions' + response = context.requests_post(api_url, json=req_data) + if not response.ok: + raise JiraError(f"Unable to transition {ticket_key}: {response.reason}") + return + + # ( ticket_key fields -- changes ) + def word_CHANGELOG(self, interp: IInterpreter): + fields = interp.stack_pop() + key = interp.stack_pop() + + if not key: + result = [] + else: + result = self.get_changelog(key, fields) + + interp.stack_push(result) + + # ( date changes field -- value ) + def word_FIELD_AS_OF(self, interp: IInterpreter): + field = interp.stack_pop() + changes = interp.stack_pop() + date = interp.stack_pop() + + field_changes = select_field_changes(field, changes) + change = change_containing_date(field_changes, date) + result = None + if change: + result = change['to'] + interp.stack_push(result) + + # ( as_of_date changes field since_date -- value ) + def word_FIELD_AS_OF_SINCE(self, interp: IInterpreter): + """Returns change as of a date since a date + + If `since_date` > `as_of_date`, this will have the same behavior as `FIELD-AS-OF` + """ + since_date = interp.stack_pop() + field = interp.stack_pop() + changes = interp.stack_pop() + date = interp.stack_pop() + + field_changes = select_field_changes(field, changes) + change = change_containing_date(field_changes, date) + result = None + if change and change['date'].date() >= since_date: + result = change['to'] + interp.stack_push(result) + + # ( date changes field -- change ) + def word_FIELD_CHANGE_AS_OF(self, interp: IInterpreter): + field = interp.stack_pop() + changes = interp.stack_pop() + date = interp.stack_pop() + + field_changes = select_field_changes(field, changes) + result = change_containing_date(field_changes, date) + interp.stack_push(result) + + # ( resolution changes field -- record ) + def word_TIME_IN_STATE(self, interp: IInterpreter): + field = interp.stack_pop() + changes = interp.stack_pop() + resolution = interp.stack_pop() + + # NOTE: Each change should have one of the following forms: + # {'date': datetime.datetime(2021, 1, 15, 8, 31, 15, tzinfo=tzutc()), 'field': 'status', 'from': 'Open', 'to': 'In Progress', 'from_': '1', 'to_': '3'} + # {'date': 1626830097, 'field': 'status', 'from': 'Open', 'to': 'In Progress', 'from_': '1', 'to_': '3'} + + result: Dict[str, float] = {} + + # If there is 1 or fewer changes, we can't compute anything + if len(changes) <= 1: + interp.stack_push(result) + return + + def select_changes_for_field(changes, field): + res = [] + for c in changes: + if c['field'] == field: + res.append(c) + return res + + def check_consistency(changes, field): + """Check that field changes are consistent""" + cur_value = changes[0]['to'] + for change in changes[1:]: + if change['from'] != cur_value: + raise JiraError(f"TIME-IN-STATE expected next value to be '{cur_value}' not '{change['from']}'") + cur_value = change['to'] + return + + def make_duration_rec(state, duration_h): + res = { + "state": state, + "duration_h": duration_h + } + return res + + def ensure_seconds(obj): + result = 0 + if isinstance(obj, datetime.datetime): + result = (obj.replace(tzinfo=None) - UNIX_EPOCH).total_seconds() + elif (isinstance(obj, numbers.Number)): + result = obj + else: + raise JiraError(f"Invalid date type: {obj}") + return result + + def compute_duration_h(cur_time, prev_time): + cur_time_s = ensure_seconds(cur_time) + prev_time_s = ensure_seconds(prev_time) + duration = cur_time_s - prev_time_s + res = duration / 3600 + return res + + def compute_duration_recs(changes): + res = [] + cur_timestamp = changes[0]['date'] + for change in changes[1:]: + res.append(make_duration_rec(change['from'], compute_duration_h(change['date'], cur_timestamp))) + cur_timestamp = change['date'] + + # If the ticket is resolved, the last state's duration is zero. Otherwise, the clock is still running + if resolution: + res.append(make_duration_rec(change['to'], 0)) + else: + now = datetime.datetime.now().replace(tzinfo=pytz.UTC) + res.append(make_duration_rec(change['to'], compute_duration_h(now, cur_timestamp))) + return res + + def consolidate_changes(duration_recs): + res = defaultdict(float) + for rec in duration_recs: + res[rec['state']] += rec['duration_h'] + return res + + # Compute result + changes = select_changes_for_field(changes, field) + check_consistency(changes, field) + duration_recs = compute_duration_recs(changes) + result = consolidate_changes(duration_recs) + interp.stack_push(result) + + # ( ticket field key -- value ) + def word_FIELD_TAG(self, interp: IInterpreter): + key = interp.stack_pop() + field = interp.stack_pop() + ticket = interp.stack_pop() + + field_text = ticket.get(field) + result = self.get_field_tag_value(field_text, key) + interp.stack_push(result) + + # ( string -- stringvalue ) + def word_REMOVE_FIELD_TAGS(self, interp: IInterpreter): + string = interp.stack_pop() + + # Escape "[http...] + RS = chr(30) # Record Separator + http_escaped = re.sub(r'\[(http.+?:.+?)\]', f'{RS}' + r'\g<1>' + f'{RS}', string) + + # Remove field tags + no_field_tags = re.sub(r'\[[^\]]+?:.+?\]', '', http_escaped) + + # Unescape http + result = re.sub(f'{RS}(.+?){RS}', r'[\g<1>]', no_field_tags) + interp.stack_push(result) + + # ( ticket_rec field key value -- ticket ) + def word_l_FIELD_TAG_bang(self, interp: IInterpreter): + value = interp.stack_pop() + key = interp.stack_pop() + field = interp.stack_pop() + ticket = interp.stack_pop() + + # Get field value from ticket + field_value = ticket[field] + if not field_value or field_value == 'None': + field_value = '' + + # Append/replace field tag with new value + pattern = re.compile(r'\[%s:.*\]' % key, re.DOTALL) + new_field_tag = f'[{key}: {value}]' + (field_value, num) = pattern.subn(new_field_tag, field_value) + if num == 0: + field_value += '\n\n' + new_field_tag + + # Return result + ticket[field] = field_value + interp.stack_push(ticket) + + # ( rapidViewId -- data ) + def word_SPRINTQUERY(self, interp: IInterpreter): + rapid_view_id = interp.stack_pop() + context = self.current_context() + api_url = f"/rest/greenhopper/1.0/sprintquery/{rapid_view_id}" + res = context.requests_get(api_url) + if not res.ok: + raise JiraError( + f"Can't get sprintquery for rapid_view_id: {rapid_view_id}: {res.text}" + ) + result = res.json() + interp.stack_push(result) + + # ( rapidViewId sprintId -- data ) + def word_RAPID_CHARTS_SPRINTREPORT(self, interp: IInterpreter): + sprint_id = interp.stack_pop() + rapid_view_id = interp.stack_pop() + + context = self.current_context() + api_url = f"/rest/greenhopper/1.0/rapid/charts/sprintreport?rapidViewId={rapid_view_id}&sprintId={sprint_id}" + res = context.requests_get(api_url) + if not res.ok: + raise JiraError( + f"Can't get sprint report for rapid_view_id: {rapid_view_id}, sprint_id: {sprint_id}: {res.text}" + ) + result = res.json() + interp.stack_push(result) + + # ( rapidViewId sprintId -- data ) + def word_RAPID_CHARTS_SCOPECHANGEBURNDOWNCHART(self, interp: IInterpreter): + sprint_id = interp.stack_pop() + rapid_view_id = interp.stack_pop() + + context = self.current_context() + api_url = f"/rest/greenhopper/1.0/rapid/charts/scopechangeburndownchart.json?rapidViewId={rapid_view_id}&sprintId={sprint_id}" + res = context.requests_get(api_url) + if not res.ok: + raise JiraError( + f"Can't get scope burndown chart for rapid_view_id: {rapid_view_id}, sprint_id: {sprint_id}: {res.text}" + ) + result = res.json() + interp.stack_push(result) + + # ( num_tickets -- ) + def word_bang_MAX_TICKETS(self, interp: IInterpreter): + num_tickets = interp.stack_pop() + self.flags['max_tickets'] = num_tickets + + # ================================= + # Helpers + + def current_context(self): + if not self.context_stack: + raise JiraError('Use jira.PUSH-CONTEXT! to provide a Jira context') + + result = self.context_stack[-1] + return result + + def get_field_tag_value(self, field_text, key, ignore_case=True): + if not field_text: + field_text = '' + + def get_field_tags(string: str) -> Dict[str, str]: + """Extracts all field tags from string returns a hash of them. + + Field tags look like this: "[rank: 1]" + """ + m_iter = re.finditer( + r'\[([^\]]+):\s*([^\]]*)\]', string, re.MULTILINE | re.DOTALL + ) + res = {} + for match in m_iter: + tag = match.group(1) + value = match.group(2) + res[tag] = value + return res + + def to_lowercase(d: Dict[str, Any]) -> Dict[str, Any]: + res = {} + for k, v in d.items(): + res[k.lower()] = v + return res + + field_tags = get_field_tags(field_text) + if ignore_case: + field_tags = to_lowercase(field_tags) + value = field_tags.get(key.lower()) + else: + value = field_tags.get(key) + + result = value + if not result: + result = ' ' + return result + + def get_normalized_data(self, fields: List[str]) -> Dict[str, Any]: + field_names_to_keys = {} + for f in fields: + field_names_to_keys[f] = self.get_field_keys(f) + + normalized_fields = [] + for field_keys in field_names_to_keys.values(): + normalized_fields += field_keys + return { + "field_names_to_keys": field_names_to_keys, + "normalized_fields": normalized_fields + } + + def get_changelog(self, ticket_key: str, fields: List[str]) -> List[Dict[str, Any]]: + context = self.current_context() + + normalized_data = self.get_normalized_data(fields) + normalized_fields = normalized_data['normalized_fields'] + + api_url = f"/rest/api/2/issue/{ticket_key}?expand=changelog&fields={','.join(normalized_fields + ['created'])}" + res = context.requests_get(api_url) + if not res.ok: + raise JiraError( + f"Can't get changelog for {ticket_key}: {res.text}" + ) + + ticket = res.json() + + def get_ticket_changes(ticket: Dict[str, Any]) -> List[Dict[str, Any]]: + result = [] + for history in ticket['changelog']['histories']: + for item in history['items']: + item_field = item['field'] + if item_field in fields or item_field in normalized_fields: + result.append( + { + 'date': parser.parse(history['created']), + 'field': item_field, + 'from': item['fromString'], + 'to': item['toString'], + 'from_': item['from'], + 'to_': item['to'], + } + ) + return result + + changes = get_ticket_changes(ticket) + changes = sorted(changes, key=lambda r: r['date']) + + def get_first_change(field: str, normalized_field: str) -> Optional[Any]: + res = None + for c in changes: + if c['field'] in [field, normalized_field]: + res = c + break + return res + + def create_initial_change(field: str, value: Any): + res = { + 'date': parser.parse(ticket['fields']['created']), + 'field': field, + 'from': '', + 'to': value, + } + return res + + def get_initial_change(field: str) -> Any: + """This handles two cases: when a field has been set at least once, and when a field has never been set""" + normalized_field = normalized_fields[fields.index(field)] + first_change = get_first_change(field, normalized_field) + res = None + if first_change: + res = create_initial_change(field, first_change['from']) + else: + value = ticket['fields'].get(normalized_field) + string_value = str(self.simplify_value(field, value)) + res = create_initial_change(field, string_value) + return res + + # Pull changes and construct result + result = [get_initial_change(f) for f in fields] + changes + return result + + def simplify_value(self, user_field: str, val: Any): + """ + This extracts simple values from Jira value records. See normalize_value for more info. + """ + field = self.normalize_field(user_field) + result = self.simplify_field_key_value(field, val) + return result + + def simplify_field_key_value(self, field_key: str, val: Any): + """ + This extracts simple values from Jira value records. See normalize_value for more info. + """ + context = self.current_context() + + schema = context.field_to_schema.get(field_key) + if not schema: + schema = {'type': '?'} + + def simplify_schema_value(schema_type: str, value: Any) -> Any: + if not value: + res = None + elif schema_type == 'array': + if value: + res = [ + simplify_schema_value(schema['items'], v) + for v in value + ] + else: + res = [] + else: + if schema_type in ('date', 'datetime', 'string', 'number'): + res = value + elif schema_type in ('timetracking'): + res = value + elif schema_type in ('option'): + res = value['value'] + elif schema_type in ('option-with-child'): + res = value + elif schema_type in ('project'): + res = value['key'] + elif isinstance(value, dict) and 'name' in value: + res = value['name'] + elif isinstance(value, dict) and 'displayName' in value: + res = value['displayName'] + else: + res = value + return res + + result = simplify_schema_value(schema['type'], val) + return result + + def normalize_value(self, field: str, val: Any) -> Any: + """ + Most field values will be single items. Some will be arrays of items: + + * option-with-child: ["parent-value" "child-value"] + * timestracking: ["original-estimate" "remaining-estimate"] + + We assume that any value that can take an ID or name will take a name. + + The value of the record fields must normalized according schema of the Jira field. Here are + examples of normalized values for given types: + + * array: An array of values like [{"name": "jsmith"}, {"name": "bjones"}] + * date: "2011-10-03" + * datetime: "2011-10-19T10:29:29.908+1100" + * group: {"name": "jira-devs"} + * issuetype: {"name": "bug"} + * number: 42.07 + * option: {"value": "green"} + * option-with-child: {"value": "green", "child": {"value":"blue"} } + * priority: {"name": "Critical"} + * parent: {"key": "PROJ-1234"} + * project: {"key": "JIRA"} + * resolution: {"name": "Fixed"} + * securitylevel: {"name": "?"} + * string: "Howdy" + * timetracking: {"originalEstimate": "1d 2h", "remainingEstimate": "3h 25m"} + * user: {"name": "jsmith"} + * version: {"name": "5.0"} + """ + context = self.current_context() + schema = context.field_to_schema.get(field) + + # Handle "parent" field as a special case + if field == 'parent': + schema = {'type': 'parent'} + + if not schema: + raise JiraError(f'Could not find schema for field {field}') + + def schematize_value(schema_type: str, value: Any) -> Any: + if schema_type == 'array': + if not value: + res = [] + else: + res: Any = [schematize_value(schema['items'], v) for v in value] + else: + if schema_type in ('date', 'datetime', 'string', 'number'): + res = value + elif schema_type in ('timetracking'): + res = { + 'originalEstimate': value[0], + 'remainingEstimate': value[1], + } + elif schema_type in ('option'): + res = {'value': value} + elif schema_type in ('option-with-child'): + res = {'value': value[0], 'child': {'value': value[1]}} + elif schema_type in ('project', 'parent'): + res = {'key': value} + else: + res = {'name': value} + return res + + result = schematize_value(schema['type'], val) + return result + + def normalize_ticket_record(self, record: Dict[str, Any]) -> Dict[str, Any]: + """This normalizes fields and values of the specified ticket record.""" + result = {} + for field, value in record.items(): + normalized_field = self.normalize_field(field) + result[normalized_field] = self.normalize_value( + normalized_field, value + ) + return result + + def num_tickets(self, jql: str): + """Uses jql to search Jira, returning records with the specified fields""" + if jql.strip() == '': + raise JiraError('JQL must not be blank') + + context = self.current_context() + + req_data = { + 'jql': jql, + 'maxResults': 1, + 'fields': ['key'] + } + + with requests.Session() as session: + api_url = '/rest/api/2/search' + response = context.requests_post(api_url, json=req_data, session=session) + + if not response.ok: + raise JiraError( + f"Problem doing Jira search '{jql}': {response.text}" + ) + res_data = response.json() + result = res_data.get('total') + return result + + def search(self, jql: str, fields_: List[str], expand: List[str] = [], max_tickets: int = DEFAULT_MAX_TICKETS): + """Uses jql to search Jira, returning records with the specified fields""" + if jql.strip() == '': + raise JiraError('JQL must not be blank') + + fields = fields_.copy() + context = self.current_context() + + # id and key always comes back in the results. Specifying it will cause the value to be nulled out + if 'key' in fields: + fields.remove('key') + if 'id' in fields: + fields.remove('id') + + normalized_data = self.get_normalized_data(fields) + normalized_fields = normalized_data['normalized_fields'] + field_names_to_keys = normalized_data['field_names_to_keys'] + + batch_size = 200 + + def run_batch(start_at, session): + req_data = { + 'jql': jql, + 'startAt': start_at, + 'maxResults': batch_size, + 'fields': normalized_fields, + } + + if len(expand) > 0: + req_data['expand'] = expand + + api_url = '/rest/api/2/search' + response = context.requests_post( + api_url, json=req_data, session=session + ) + + if not response.ok: + raise JiraError(f"Problem doing Jira search '{jql}': {response.text}") + res_data = response.json() + res = res_data['issues'] + if res_data['total'] > max_tickets: + raise JiraError(f"Number of tickets {res_data['total']} exceeds max num tickets {max_tickets}. Use !MAX-TICKETS to override.") + return res + + def run(session: requests.Session): + res = [] + start_at = 0 + while True: + batch = run_batch(start_at, session) + res += batch + if len(batch) < batch_size: + break + start_at += batch_size + return res + + with requests.Session() as session: + issues = run(session) + + def issue_data_to_record(issue_data: Dict[str, Any]) -> Dict[str, Any]: + # Prefill result with id and key + res = { + 'id': issue_data['id'], + 'key': issue_data['key'] + } + + def get_value(field_key): + raw_value = drill_for_value(issue_data, ['fields', field_key]) + res = self.simplify_field_key_value(field_key, raw_value) + return res + + # Map field back to what the user provided + for field in fields: + field_keys = field_names_to_keys[field] + field_values = [get_value(k) for k in field_keys] + non_null_field_values = list(filter(lambda x: x is not None, field_values)) + + if len(non_null_field_values) > 0: + res[field] = non_null_field_values[0] + else: + res[field] = None + return res + + result = [issue_data_to_record(d) for d in issues] + return result + + def get_field_keys(self, field: str) -> str: + """Returns all of the field keys corresponding to this field name""" + context = self.current_context() + if field in context.field_map: + return field + ids = context.field_name_to_id.get(field) + + if ids is None: + return [field] + else: + return ids + + # NOTE: This is needed to map fields into field keys for creating/updating tickets + def normalize_field(self, field: str) -> str: + """If field doesn't correspond to a field ID, search for name of field in field map""" + context = self.current_context() + if field in context.field_map: + return field + ids = context.field_name_to_id.get(field) + + if ids is None: + return field + + if len(ids) > 1: + raise JiraError( + f"Jira field '{field}' corresponds to multiple field ids: {ids}" + ) + + result = ids[0] + return result + + def get_flags(self): + flags = self.flags.copy() + self.flags = { + "max_tickets": DEFAULT_MAX_TICKETS, + } + return flags + + +# TODO: The JiraContext needs to store info about the current ticket limit. It should also allow you to +# override this. +# The JiraContext should store information about the number of tickets in the last query (even if it +# didn't return all of them) +# There should also be an option to raise Exception on limit violation or return truncated results. I'm +# thinking that raising an Exception is the correct behavior +# Should define the __get__ and __set__ methods so we can use List[Dict[str, Any]]: + """Given a list of changes and a `field`, return only changes for `field` + """ + if not changes: + changes = [] + result = [] + for c in changes: + if c['field'] == field: + result.append(c) + return result + + +def change_containing_date(field_changes: List[Dict[str, Any]], date: datetime.date) -> Optional[Dict[str, Any]]: + """Given a list of field changes and a `date`, returns the change containing `date` + """ + res = None + for c in field_changes: + change_date = c['date'].date() + if change_date > date: + break + else: + res = c + return res diff --git a/forthic-py/src/forthic/modules/org_module.py b/forthic-py/src/forthic/modules/org_module.py new file mode 100644 index 0000000..3200298 --- /dev/null +++ b/forthic-py/src/forthic/modules/org_module.py @@ -0,0 +1,365 @@ +import collections +from ..module import Module +from ..interfaces import IInterpreter +from typing import List, Callable, Dict, Optional, Any + + +class OrgModule(Module): + def __init__(self, interp: IInterpreter): + super().__init__('org', interp) + + self.org_contexts: List['OrgContext'] = [] + + self.flags = { + "with_lead": None, + } + + self.add_module_word('PUSH-CONTEXT!', self.word_PUSH_CONTEXT_bang) + self.add_module_word('POP-CONTEXT!', self.word_POP_CONTEXT_bang) + self.add_module_word('ROOT-MANAGERS', self.word_ROOT_MANAGERS) + self.add_module_word('FULL-ORG', self.word_FULL_ORG) + self.add_module_word('ORG-MANAGERS', self.word_ORG_MANAGERS) + self.add_module_word('DIRECTS', self.word_DIRECTS) + self.add_module_word('DIRECT-MANAGERS', self.word_DIRECT_MANAGERS) + self.add_module_word('GROUP-BY-LEADS', self.word_GROUP_BY_LEADS) + self.add_module_word('ITEM>LEAD', self.word_ITEM_to_LEAD) + self.add_module_word('MANAGER', self.word_MANAGER) + self.add_module_word('CHAIN', self.word_CHAIN) + self.add_module_word('CHAIN-KEY-FUNC', self.word_CHAIN_KEY_FUNC) + self.add_module_word('USERS-MANAGERS', self.word_USERS_MANAGERS) + + self.add_module_word('!WITH-LEAD', self.word_bang_WITH_LEAD) + + # ( org_context -- ) + def word_PUSH_CONTEXT_bang(self, interp: IInterpreter): + """Sets context for org computations""" + org_context = interp.stack_pop() + self.org_contexts.append(org_context) + + # ( -- ) + def word_POP_CONTEXT_bang(self, interp: IInterpreter): + """Restores previous context for org computations""" + self.org_contexts.pop() + + # ( -- username) + def word_ROOT_MANAGERS(self, interp: IInterpreter): + """Returns root manager of org context""" + org_context = self.current_context() + result = org_context.root_managers() + interp.stack_push(result) + + # (manager -- usernames) + def word_FULL_ORG(self, interp: IInterpreter): + """Returns all usernames reporting up to manager""" + manager = interp.stack_pop() + org_context = self.current_context() + flags = self.get_flags() + + result = org_context.full_org(manager) + if flags.get('with_lead'): + result = [manager] + result + interp.stack_push(result) + + # (manager -- usernames) + def word_ORG_MANAGERS(self, interp: IInterpreter): + """Returns all manager usernames reporting up to manager""" + manager = interp.stack_pop() + org_context = self.current_context() + flags = self.get_flags() + + result = org_context.org_managers(manager) + + if not flags.get('with_lead'): + result = result[1:] + + interp.stack_push(result) + + # (manager -- usernames) + def word_DIRECTS(self, interp: IInterpreter): + """Returns usernames of direct reports of a manager + """ + manager = interp.stack_pop() + org_context = self.current_context() + flags = self.get_flags() + + result = org_context.get_directs(manager) + if flags.get('with_lead'): + result = [manager] + result + interp.stack_push(result) + + # (manager -- usernames) + def word_DIRECT_MANAGERS(self, interp: IInterpreter): + """Returns usernames of direct reports of a manager who are also managers + + NOTE: This also returns the manager at the end of the list + """ + manager = interp.stack_pop() + org_context = self.current_context() + flags = self.get_flags() + + result = org_context.get_direct_managers(manager) + if flags.get('with_lead'): + result = [manager] + result + interp.stack_push(result) + + # ( items field leads default_lead -- record ) + def word_GROUP_BY_LEADS(self, interp: IInterpreter): + default_lead = interp.stack_pop() + leads = interp.stack_pop() + field = interp.stack_pop() + items = interp.stack_pop() + + org_context = self.current_context() + result = org_context.group_by_leads(items, field, leads, default_lead) + interp.stack_push(result) + + # ( item field leads default_lead -- lead ) + def word_ITEM_to_LEAD(self, interp: IInterpreter): + default_lead = interp.stack_pop() + leads = interp.stack_pop() + field = interp.stack_pop() + item = interp.stack_pop() + + org_context = self.current_context() + result = org_context.item_to_lead(item, field, leads, default_lead) + interp.stack_push(result) + + # ( username -- manager ) + def word_MANAGER(self, interp: IInterpreter): + username = interp.stack_pop() + + org_context = self.current_context() + result = org_context.get_manager(username) + interp.stack_push(result) + + # ( username root_username -- usernames ) + def word_CHAIN(self, interp: IInterpreter): + root_username = interp.stack_pop() + username = interp.stack_pop() + org_context = self.current_context() + result = org_context.get_chain(username, root_username) + interp.stack_push(result) + + # ( root_username -- key_func ) + def word_CHAIN_KEY_FUNC(self, interp: IInterpreter): + """Returns a function that can be used as a key function in SORT + + The comparator returns an integer giving the distance from a user to the root. + """ + root_username = interp.stack_pop() + org_context = self.current_context() + + def result(username: str) -> int: + chain = org_context.get_chain(username, root_username) + if username == root_username: + res = 0 + else: + res = len(chain) + return res + + interp.stack_push(result) + + # ( -- user_mgr_pairs ) + def word_USERS_MANAGERS(self, interp: IInterpreter): + """Returns an array of user/manager pairs + """ + org_context = self.current_context() + result = org_context.get_users_managers() + interp.stack_push(result) + + # ( -- ) + def word_bang_WITH_LEAD(self, interp: IInterpreter): + self.flags["with_lead"] = True + + # ================================= + # Helpers + def get_flags(self): + flags = self.flags.copy() + self.flags = {} + return flags + + def current_context(self): + if not self.org_contexts: + raise RuntimeError( + 'Use org.PUSH-CONTEXT! to provide an Org context' + ) + + result = self.org_contexts[-1] + return result + + +class OrgContext: + def __init__(self, get_users_managers: Callable[[], List[List[str]]]): + """The `get_users_managers` function returns a list of pairs [username, manager_username]. + This information is used to construct a hierarchy. + """ + self.get_users_managers = get_users_managers + self.user_managers = self.get_users_managers() + + def make_user_to_manager() -> Dict[str, str]: + res: Dict[str, str] = {} + for p in self.user_managers: + res[p[0]] = p[1] + return res + + def make_manager_to_users() -> Dict[str, List[str]]: + res: Dict[str, List[str]] = collections.defaultdict(list) + for p in self.user_managers: + res[p[1]].append(p[0]) + return res + + self.user_to_manager = make_user_to_manager() + self.managers = list(set(self.user_to_manager.values())) + self.manager_to_users = make_manager_to_users() + + def gather_direct_managers() -> Dict[Optional[str], List[str]]: + res = collections.defaultdict(list) + for m in self.managers: + res[self.user_to_manager.get(m)].append(m) + return res + + self.direct_managers = gather_direct_managers() + + def root_managers(self): + managers = list(set(self.user_to_manager.values())) + result = [m for m in managers if self.user_to_manager.get(m) is None] + return result + + def get_manager(self, username: str) -> Optional[str]: + result = self.user_to_manager.get(username) + return result + + def org_managers(self, root_manager: str) -> List[str]: + """Returns all managers that are part of a root_manager's org, including the root_manager""" + if root_manager not in self.direct_managers: + return [root_manager] + + def add_directs(manager, res): + if manager not in self.direct_managers: + return + directs = self.direct_managers[manager] + for m in directs: + if m != manager: + res.append(m) + add_directs(m, res) + + result = [] + result.append(root_manager) + add_directs(root_manager, result) + return result + + def full_org(self, manager: str) -> List[str]: + """Returns a list of people rolling up to a manager""" + org_managers = self.org_managers(manager) + + def get_lead(username): + manager = self.user_to_manager[username] + if manager in org_managers: + return manager + return None + + result = [] + for username in self.user_to_manager: + lead = get_lead(username) + if lead: + result.append(username) + return result + + def get_directs(self, username: str) -> List[str]: + """Returns direct reports of a user""" + result = self.manager_to_users.get(username) + if result is None: + result = [] + return result + + def get_direct_managers(self, username: str) -> List[str]: + """Returns direct reports of a user who are managers""" + result = self.direct_managers.get(username) + if not result: + result = [] + result = result[:] + result.sort() + return result + + def group_by_leads(self, items: List[Dict[str, Any]], field: str, leads: List[str], default_lead: str) -> Dict[str, List[Any]]: + manager_to_lead: Dict[str, str] = {} + lead: Optional[str] = None + + if not items: + items = [] + + if not leads: + leads = [] + + for lead in leads: + managers = self.org_managers(lead) + for m in managers: + manager_to_lead[m] = lead + + # Group items by lead + result: Dict[str, List[Any]] = collections.defaultdict(list) + for lead in leads: + result[lead] = [] + + for item in items: + if field is None: + username = item + else: + username = item[field] + + lead = manager_to_lead.get(username) + + # If user is not a manger, get their manager and map to lead + if not lead: + manager = self.user_to_manager.get(username) + if manager: + lead = manager_to_lead.get(manager) + + if not lead: + lead = default_lead + + result[lead].append(item) + return result + + def item_to_lead(self, item: Dict[str, Any], field: str, leads: List[str], default_lead: str): + if field is None: + username = item + else: + username = item[field] + + if not leads: + leads = [] + + # Recursively climb org tree until we find a lead in `leads` + def get_lead(username: Optional[str]) -> str: + if not username: + return default_lead + + if username in leads: + return username + + username = self.user_to_manager.get(username) + if not username: + return default_lead + return get_lead(username) + + result = get_lead(username) + return result + + def get_chain(self, username: str, root_username: str) -> List[str]: + result: List[str] = [] + cur_user = username + + while True: + result.append(cur_user) + cur_user = self.user_to_manager.get(cur_user) # type: ignore + + if cur_user == root_username: + result.append(root_username) + break + + if not cur_user: + break + + result.reverse() + return result diff --git a/forthic-py/src/forthic/modules/stats_module.py b/forthic-py/src/forthic/modules/stats_module.py new file mode 100644 index 0000000..09ae658 --- /dev/null +++ b/forthic-py/src/forthic/modules/stats_module.py @@ -0,0 +1,28 @@ +"""Implements module to compute statistics +""" +import statistics +from ..module import Module +from ..interfaces import IInterpreter + + +class StatsModule(Module): + def __init__(self, interp: IInterpreter): + super().__init__("stats", interp, FORTHIC) + self.add_module_word("MEAN", self.word_MEAN) + self.add_module_word("MEDIAN", self.word_MEDIAN) + return + + # ( numbers -- mean ) + def word_MEAN(self, interp: IInterpreter): + numbers = interp.stack_pop() + result = statistics.mean(numbers) + interp.stack_push(result) + + # ( numbers -- median ) + def word_MEDIAN(self, interp: IInterpreter): + numbers = interp.stack_pop() + result = statistics.median(numbers) + interp.stack_push(result) + + +FORTHIC = "" diff --git a/forthic-py/src/forthic/modules/svg_module.py b/forthic-py/src/forthic/modules/svg_module.py new file mode 100644 index 0000000..4ca69bc --- /dev/null +++ b/forthic-py/src/forthic/modules/svg_module.py @@ -0,0 +1,109 @@ +from ..module import Module +from ..interfaces import IInterpreter + + +FORTHIC = "" + + +class SvgError(RuntimeError): + pass + + +class SvgModule(Module): + """This implements construction of SVG elements + """ + + def __init__(self, interp: IInterpreter): + super().__init__('svg', interp, FORTHIC) + + # These are set by "flag words" to change the behavior of the words in this module + self.flags = { + } + + self.add_module_word('SVG>IMG-SRC', self.word_SVG_to_IMG_SRC) + + # Chart support + self.add_module_word('AXIS', self.word_AXIS) + self.add_module_word('VAL>PIX', self.word_VAL_to_PIX) + self.add_module_word('TICK-VALUES', self.word_TICK_VALUES) + + # ( svg -- img_src ) + def word_SVG_to_IMG_SRC(self, interp: IInterpreter): + svg = interp.stack_pop() + result = f"data:image/svg+xml;utf8,{escape_svg(svg)}" + interp.stack_push(result) + + # ( pix_y_0 pix_y_max y_values num_ticks -- Axis ) + def word_AXIS(self, interp: IInterpreter): + num_ticks = interp.stack_pop() + y_values = interp.stack_pop() + pix_y_max = interp.stack_pop() + pix_y_0 = interp.stack_pop() + result = Axis(pix_y_0, pix_y_max, y_values, num_ticks) + interp.stack_push(result) + + # ( Axis val -- pix ) + def word_VAL_to_PIX(self, interp: IInterpreter): + val = interp.stack_pop() + axis = interp.stack_pop() + result = axis.val_to_pix(val) + interp.stack_push(result) + + # ( Axis -- tick_values ) + def word_TICK_VALUES(self, interp: IInterpreter): + axis = interp.stack_pop() + result = axis.tick_values() + interp.stack_push(result) + + +# ----- Helpers ---------------------------------------------------------------------------------------------- +def escape_svg(string): + result = string.replace('<', '%3C').replace('>', '%3E').replace('{', '%7B').replace('}', '%7D').replace('#', '%23').replace('"', '"').replace("'", ''') + return result + + +class Axis: + """This is a support class that figures out how to map data values to chart pixel values + """ + def __init__(self, pix_0, pix_max, values, num_ticks): + self.pix_0 = pix_0 + self.pix_max = pix_max + self.values = values + + if num_ticks < 2: + self.num_ticks = 2 + else: + self.num_ticks = num_ticks + + if not values: + raise SvgError("svg Axis: values not specified") + + self.val_0 = min(self.values) + self.val_max = max(self.values) * 1.05 + + self.tick_step = (self.val_max - self.val_0) / (self.num_ticks - 1) + + # Condition tick step when it's close to 5 or 10 + # TODO: Write this in a generic way + if self.tick_step > 3 and self.tick_step < 5: + self.tick_step = 5 + elif self.tick_step > 5 and self.tick_step < 10: + self.tick_step = 10 + + # Adjust val max based on tick step + self.val_max = self.val_0 + self.tick_step * (self.num_ticks - 1) + + if self.val_max != self.val_0: + self.pix_per_val = (self.pix_max - self.pix_0) / (self.val_max - self.val_0) + else: + self.pix_per_val = 1 + + def val_to_pix(self, val): + result = self.pix_0 + (val - self.val_0) * self.pix_per_val + return result + + def tick_values(self): + result = [] + for i in range(self.num_ticks): + result.append(self.val_0 + self.tick_step * i) + return result diff --git a/forthic-py/src/forthic/modules/trino_module.py b/forthic-py/src/forthic/modules/trino_module.py new file mode 100644 index 0000000..e119417 --- /dev/null +++ b/forthic-py/src/forthic/modules/trino_module.py @@ -0,0 +1,107 @@ +from ..module import Module +from ..interfaces import IInterpreter +from typing import List +import trino +import pandas as pd +import json + + +class TrinoModule(Module): + """This implements a trino client + """ + def __init__(self, interp: IInterpreter): + super().__init__('trino', interp, "") + self.context_stack: List['TrinoContext'] = [] + + self.flags = {} + self.get_flags() + + self.add_module_word('PUSH-CONTEXT!', self.word_PUSH_CONTEXT_bang) + self.add_module_word('POP-CONTEXT!', self.word_POP_CONTEXT_bang) + self.add_module_word('QUERY', self.word_QUERY) + + # ( context -- ) + def word_PUSH_CONTEXT_bang(self, interp: IInterpreter): + context = interp.stack_pop() + self.context_stack.append(context) + + # ( -- ) + def word_POP_CONTEXT_bang(self, interp: IInterpreter): + self.context_stack.pop() + + # ( query -- result ) + def word_QUERY(self, interp: IInterpreter): + query = interp.stack_pop() + context = self.current_context() + + context.connect() + df = context.query(query) + result = json.loads(df.to_json()) + context.close() + interp.stack_push(result) + + # ================================= + # Helpers + def get_flags(self): + flags = self.flags.copy() + self.flags = {} + return flags + + def current_context(self): + if not self.context_stack: + raise RuntimeError('Use trino.PUSH-CONTEXT! to provide a TrinoContext') + + result = self.context_stack[-1] + return result + + +class TrinoContext: + """NOTE: Override this to use""" + def __init__(self): + self.conn = None + self.cursor = None + + def connect(self): + self.conn = trino.dbapi.connect( + host=self.get_host(), + port=self.get_port(), + user=self.get_username(), + verify=False, + catalog=self.get_catalog(), + http_scheme='https', + auth=trino.auth.BasicAuthentication(self.get_username(), self.get_password()),) + self.cursor = self.conn.cursor() + + def close(self): + self.cursor.close() + + def query(self, q): + self.cursor.execute(q) + rows = self.cursor.fetchall() + result = pd.DataFrame(list(rows)) + result.columns = [desc[0] for desc in self.cursor.description] + return result + + # ============================= + # Connection settings to override + def get_field(self): + return None + + def get_host(self): + return None + + def get_port(self): + return None + + def get_catalog(self): + return None + + def get_username(self): + return None + + def get_password(self): + return None + + # Supply the path to the cert file to use. Use False to skip verification + def get_cert_verify(self): + return False diff --git a/forthic-py/src/forthic/modules/ui_module.py b/forthic-py/src/forthic/modules/ui_module.py new file mode 100644 index 0000000..f94c60c --- /dev/null +++ b/forthic-py/src/forthic/modules/ui_module.py @@ -0,0 +1,54 @@ +"""Implements module specifying UI frameworks +""" +from ..module import Module +from ..interfaces import IInterpreter + + +class UIModule(Module): + def __init__(self, interp: IInterpreter): + super().__init__("ui", interp, FORTHIC) + self.add_module_word("FORTHIC-REACT-v1", self.word_FORTHIC_REACT_v1) + self.add_module_word("DATE NONE !=" SELECT; +: |IN-PAST "TODAY <" SELECT ; + +["color"] VARIABLES +: COLOR-VALUES [ + [ "red" 1 ] + [ "yellow" 2 ] + [ "green" 3 ] +] REC; + +# Returns the color value for a given color +: COLOR>VALUE COLOR-VALUES SWAP |LOWER REC@ 100 DEFAULT; + +: COLOR-TITLE [ color @ COLOR>VALUE " - " color @ ] CONCAT; +: COLOR-LOZENGE ( color ! ) [ [ "None" "--" ] ] REC color @ REC@ + [ "{status:colour=" color @ "|title=" COLOR-TITLE "}" ] CONCAT DEFAULT; + +: STATUS>COLOR [ + [ "Blocked" "Red" ] + [ "Resolved" "Blue" ] + [ "Closed" "Blue" ] +] REC SWAP REC@ "Gray" DEFAULT ; + +: WIKI-LI "# " SWAP CONCAT ; # ( str -- str ) + +: GREEN "#00875A" ; +: YELLOW "#FFAB00" ; +: RED "#DE350B" ; +: BLUE "#B3D4FF" ; +: GRAY "gray" ; +: WHITE "white" ; + +: COLOR-BOX {confluence COLOR-BOX}; +: COLOR-RISK-BOXES [ + [ "Red" "RED COLOR-BOX" ] + [ "Blocked" "RED COLOR-BOX" ] + + [ "Yellow" "YELLOW COLOR-BOX" ] + [ "At-Risk" "YELLOW COLOR-BOX" ] + [ "Not on track" "YELLOW COLOR-BOX" ] + + [ "Green" "GREEN COLOR-BOX" ] + [ "On Track" "GREEN COLOR-BOX" ] + + [ "Blue" "BLUE COLOR-BOX" ] + [ "Completed" "BLUE COLOR-BOX" ] + [ "Canceled" "BLUE COLOR-BOX" ] + + [ "Gray" "GRAY COLOR-BOX" ] + [ "Light Gray" "GRAY COLOR-BOX" ] +] REC; + +: COLOR-RISK-BOX COLOR-RISK-BOXES SWAP REC@ "WHITE COLOR-BOX" DEFAULT INTERPRET ; # ( color -- color_box ) + +[ "color_update" ] VARIABLES +: HOVER-COLOR color_update @ 0 NTH ; +: HOVER-UPDATE color_update @ 1 NTH ; +: HOVER-COLOR-BAR ( color_update ! ) HOVER-COLOR COLOR-RISK-BOX HOVER-UPDATE "hover_text" CHILDREN CHILD-JQL child_fields @ jira.SEARCH; +: TICKETS-BY-PARENT (child_fields ! fchild_jql ! parent_tickets !) PARENT-KEYS DUP "PARENT-KEY>CHILDREN" MAP ZIP REC; + + +# -- Details Table +: WIKI-LIST "WIKI-LI" MAP /N JOIN " " CONCAT; # (items -- wiki_list) + +["as_of" "as_of_field" "as_of_ticket_key" "as_of_fields"] VARIABLES +: AS-OF-FIELDS! as_of_fields !; # (fields --) +: TICKET-CHANGELOG as_of_ticket_key @ as_of_fields @ jira.CHANGELOG; +: AS-OF-IN-FUTURE? as_of @ TODAY >=; + +: FIELD-CACHE-KEY [ as_of_ticket_key @ as_of_field @ as_of @ DATE>STR ] "_" JOIN ; +: CACHE/GET-FIELD as_of @ TICKET-CHANGELOG as_of_field @ jira.FIELD-AS-OF DUP FIELD-CACHE-KEY cache.CACHE!; +: FIELD-AS-OF-FORTHIC + [ [ TRUE "project @ as_of_field @ REC@" ] + [ FALSE "FIELD-CACHE-KEY cache.CACHE@ 'CACHE/GET-FIELD' *DEFAULT" ] + ] REC AS-OF-IN-FUTURE? REC@ ; +: FIELD-AS-OF (as_of_ticket_key ! as_of !) FIELD-AS-OF-FORTHIC INTERPRET ; +: AS-OF-FIELD! as_of_field !; # (field -- ) + +[ + "PAST-DATES" + "|w/STATUS" + "|w/RISK-FACTOR" + "|w/LABEL" + "|w/out-RISK-FACTOR" + "|w/DUE-DATE" + "|IN-PAST" + "TICKETS-BY-PARENT" + "STATUS>COLOR" + "AS-OF-FIELD!" + "AS-OF-FIELDS!" + "FIELD-AS-OF" + "WIKI-LIST" + "COLOR-LOZENGE" + "HOVER-COLOR-BAR" +] EXPORT + +""" diff --git a/forthic-py/src/global_module.py b/forthic-py/src/global_module.py new file mode 100644 index 0000000..98fffb1 --- /dev/null +++ b/forthic-py/src/global_module.py @@ -0,0 +1,2715 @@ +import re +import getpass +import random +import math +import pytz +import pdb +import datetime +from dateutil import parser +import urllib +import json +import io +import markdown +import csv +from collections import defaultdict +import statistics + +from .module import Module, PushValueWord +from .profile import ProfileAnalyzer +from .interfaces import IInterpreter + +from typing import Optional, Union, Any, List + +DLE = chr(16) # ASCII DLE char + + +class StackDump(RuntimeError): + pass + + +class GlobalModuleError(RuntimeError): + pass + + +class InvalidTimeError(GlobalModuleError): + pass + + +# TODO: Ensure that None flows through all words properly + +class GlobalModule(Module): + """This implements the standard `global` module words + + The `GlobalModule` is a special module because it always the last one searched for Forthic words. Because + of this, it is also responsible for handling "literal words" that push themselves onto the stack. These + are words like "1", "2.5", "06-05-2021", etc. + + The `GlobalModule` also implements base words that might usually be built into the language, like + `VARIABLES`, `!`, `@`, etc. + + See `docs/modules/global_module.md` for detailed descriptions of each word. + """ + def __init__(self, interp, timezone): + super().__init__('', interp) + self.timezone = timezone + + # "Screens" of Forthic code can be loaded from disk/memory. Since screens can load other screens, + # we need to be careful not to get into a loop. The `active_screens` keeps track of this. + self.active_screens = set() + + # `literal_handlers` convert tokens into values when no other words can be found. + # A Forthic interpreter can be customized here to recoginize domain-specific literals. + self.literal_handlers = [ + self.to_bool, + self.to_int, + self.to_float, + self.to_date, + self.to_time, + ] + + # Module Flags: These are all None but are settable for one-time use to change the behavior + # of module words + self.flags = { + "with_key": None, + "push_error": None, + "comparator": None, + "push_rest": None, + "depth": None, + } + + # ---------------- + # Base words + self.add_module_word('VARIABLES', self.word_VARIABLES) + self.add_module_word('!', self.word_bang) + self.add_module_word('@', self.word_at) + self.add_module_word('!@', self.word_bang_at) + self.add_module_word('STR', self.word_to_STR) + self.add_module_word('URL-ENCODE', self.word_URL_ENCODE) + self.add_module_word('URL-DECODE', self.word_URL_DECODE) + + # ---------------- + # Tree words + self.add_module_word('TRAVERSE-DEPTH-FIRST', self.word_TRAVERSE_DEPTH_FIRST) + self.add_module_word('SUBTREES', self.word_SUBTREES) + + # ---------------- + # Misc words + self.add_module_word('NULL', self.word_NULL) + self.add_module_word('QUOTE-CHAR', self.word_QUOTE_CHAR) + self.add_module_word('QUOTED', self.word_QUOTED) + self.add_module_word('DEFAULT', self.word_DEFAULT) + self.add_module_word('*DEFAULT', self.word_star_DEFAULT) + self.add_module_word('FIXED', self.word_to_FIXED) + + # TODO: Add support for serializing dates and datetimes + self.add_module_word('>JSON', self.word_to_JSON) + self.add_module_word('JSON>', self.word_JSON_to) + + self.add_module_word('>TSV', self.word_to_TSV) + self.add_module_word('TSV>', self.word_TSV_to) + self.add_module_word('RECS>TSV', self.word_RECS_to_TSV) + self.add_module_word('TSV>RECS', self.word_TSV_to_RECS) + self.add_module_word('.s', self.word_dot_s) + + # ---------------- + # Date/time words + self.add_module_word('AM', self.word_AM) + self.add_module_word('PM', self.word_PM) + self.add_module_word('NOW', self.word_NOW) + self.add_module_word('>TIME', self.word_to_TIME) + self.add_module_word('STR', self.word_TIME_to_STR) + self.add_module_word('>DATE', self.word_to_DATE) + self.add_module_word('TODAY', self.word_TODAY) + self.add_module_word('MONDAY', self.word_MONDAY) + self.add_module_word('TUESDAY', self.word_TUESDAY) + self.add_module_word('WEDNESDAY', self.word_WEDNESDAY) + self.add_module_word('THURSDAY', self.word_THURSDAY) + self.add_module_word('FRIDAY', self.word_FRIDAY) + self.add_module_word('SATURDAY', self.word_SATURDAY) + self.add_module_word('SUNDAY', self.word_SUNDAY) + self.add_module_word('NEXT', self.word_NEXT) + + self.add_module_word('ADD-DAYS', self.word_ADD_DAYS) + self.add_module_word('SUBTRACT-DATES', self.word_SUBTRACT_DATES) + self.add_module_word('SUBTRACT-TIMES', self.word_SUBTRACT_TIMES) + self.add_module_word('DATE>STR', self.word_DATE_to_STR) + self.add_module_word('DATE-TIME>DATETIME', self.word_DATE_TIME_to_DATETIME) + self.add_module_word('DATETIME>TIMESTAMP', self.word_DATETIME_to_TIMESTAMP) + self.add_module_word('TIMESTAMP>DATETIME', self.word_TIMESTAMP_to_DATETIME) + self.add_module_word('STR>DATETIME', self.word_STR_to_DATETIME) + self.add_module_word('STR>TIMESTAMP', self.word_STR_to_TIMESTAMP) + + # ---------------- + # Math words + self.add_module_word('+', self.word_plus) + self.add_module_word('-', self.word_minus) + self.add_module_word('*', self.word_times) + self.add_module_word('/', self.word_divide_by) + self.add_module_word('MOD', self.word_MOD) + self.add_module_word('MEAN', self.word_MEAN) + self.add_module_word('ROUND', self.word_ROUND) + self.add_module_word('MAX', self.word_MAX) + self.add_module_word('MIN', self.word_MIN) + self.add_module_word('==', self.word_equal_equal) + self.add_module_word('!=', self.word_not_equal) + self.add_module_word('>', self.word_greater_than) + self.add_module_word('>=', self.word_greater_than_or_equal) + self.add_module_word('<', self.word_less_than) + self.add_module_word('<=', self.word_less_than_or_equal) + self.add_module_word('OR', self.word_OR) + self.add_module_word('AND', self.word_AND) + self.add_module_word('NOT', self.word_NOT) + self.add_module_word('IN', self.word_IN) + self.add_module_word('ANY', self.word_ANY) + self.add_module_word('ALL', self.word_ALL) + self.add_module_word('>BOOL', self.word_to_BOOL) + self.add_module_word('>INT', self.word_to_INT) + self.add_module_word('>FLOAT', self.word_to_FLOAT) + self.add_module_word('UNIFORM-RANDOM', self.word_UNIFORM_RANDOM) + self.add_module_word('RANGE-INDEX', self.word_RANGE_INDEX) + + # ---------------- + # Flag words + self.add_module_word('!PUSH-ERROR', self.word_bang_PUSH_ERROR) + self.add_module_word('!WITH-KEY', self.word_bang_WITH_KEY) + self.add_module_word('!COMPARATOR', self.word_bang_COMPARATOR) + self.add_module_word('!PUSH-REST', self.word_bang_PUSH_REST) + self.add_module_word('!DEPTH', self.word_bang_DEPTH) + + # ---------------- + # Profiling words + self.add_module_word('PROFILE-START', self.word_PROFILE_START) + self.add_module_word('PROFILE-TIMESTAMP', self.word_PROFILE_TIMESTAMP) + self.add_module_word('PROFILE-END', self.word_PROFILE_END) + self.add_module_word('PROFILE-DATA', self.word_PROFILE_DATA) + self.add_module_word('PROFILE-REPORT', self.word_PROFILE_REPORT) + + # ---------------- + # Python-only words + self.add_module_word('CURRENT-USER', self.word_CURRENT_USER) + self.add_module_word('MARKDOWN>HTML', self.word_MARKDOWN_to_HTML) + + def find_word(self, name: str): + """Searches the global module for a word, trying literals if no word can be found""" + result = super().find_word(name) + if result is None: + result = self.find_literal_word(name) + return result + + def find_literal_word(self, string: str): + """Converts a string into a literal using one of the registered converters""" + for handler in self.literal_handlers: + value = handler(string) + if value is not None: + return PushValueWord(string, value) + return None + + # -------------------------------------------------------------------------- + # Literal handlers + + def to_bool(self, str_val: str) -> Optional[bool]: + """If str_val can be converted to bool, return value; otherwise None""" + result = None + if str_val == 'TRUE': + result = True + elif str_val == 'FALSE': + result = False + return result + + def to_int(self, str_val: str) -> Optional[int]: + """If str_val can be converted to int, return value; otherwise None""" + try: + result = int(str_val) + except ValueError: + return None + return result + + def to_float(self, str_val: str) -> Optional[float]: + """If str_val can be converted to float, return value; otherwise None""" + try: + result = float(str_val) + except ValueError: + return None + return result + + def to_date(self, str_val: str) -> Optional[datetime.date]: + """If str_val can be converted to date, return value; otherwise None""" + match = re.match(r'(\d{4})-(\d{2})-(\d{2})', str_val) + if not match: + return None + + year = int(match.group(1)) + month = int(match.group(2)) + day = int(match.group(3)) + result = datetime.date(year, month, day) + return result + + def to_time(self, str_val: str) -> Optional[datetime.time]: + """If str_val can be converted to time, return value; otherwise None""" + match = re.match(r'(\d{1,2}):(\d{2})', str_val) + if not match: + return None + + hour = int(match.group(1)) + minute = int(match.group(2)) + if hour > 23 or minute > 60: + return None + result = datetime.time(hour, minute, tzinfo=self.timezone) + return result + + # -------------------------------------------------------------------------- + # Word handlers + + # ( varnames -- ) + def word_VARIABLES(self, interp: IInterpreter): + """Creates a new variable in the current module""" + varnames = interp.stack_pop() + module = interp.cur_module() + for v in varnames: + module.add_variable(v) + + # ( value variable -- ) + def word_bang(self, interp: IInterpreter): + """Sets the value of a variable""" + variable = interp.stack_pop() + value = interp.stack_pop() + variable.value = value + + # ( variable -- value ) + def word_at(self, interp: IInterpreter): + """Pushes variable's value onto the stack""" + variable = interp.stack_pop() + interp.stack_push(variable.value) + + # ( value variable -- value ) + def word_bang_at(self, interp: IInterpreter): + """Set the value of a variable and then pushes variable's value onto the stack""" + variable = interp.stack_pop() + value = interp.stack_pop() + variable.value = value + interp.stack_push(variable.value) + + # ( value variable -- variable ) + def word_l_bang(self, interp: IInterpreter): + """Set the value of a variable and then pushes variable onto the stack""" + variable = interp.stack_pop() + value = interp.stack_pop() + variable.value = value + interp.stack_push(variable) + + # ( object -- ? ) + def word_INTERPRET(self, interp: IInterpreter): + """Pops a string/Lambda and interprets it""" + obj = interp.stack_pop() + + if not obj: + return + + execute(interp, obj) + + # ( names -- ) + def word_EXPORT(self, interp: IInterpreter): + names = interp.stack_pop() + interp.cur_module().add_exportable(names) + + # ( names -- ) + def word_USE_MODULES(self, interp: IInterpreter): + names = interp.stack_pop() + + for name in names: + module_name = name + prefix = name + + if isinstance(name, list): + module_name = name[0] + prefix = name[1] + + module = interp.find_module(module_name) + interp.app_module.import_module(prefix, module, interp) + + # ( key_vals -- rec ) + # key_vals is an array of [key val] pairs + def word_REC(self, interp: IInterpreter): + key_vals = interp.stack_pop() + + if not key_vals: + key_vals = [] + + result = {} + for pair in key_vals: + key = None + val = None + if pair: + if len(pair) >= 1: + key = pair[0] + if len(pair) >= 2: + val = pair[1] + result[key] = val + interp.stack_push(result) + + # ( rec field -- value ) + # ( rec fields -- value ) + def word_REC_at(self, interp: IInterpreter): + field = interp.stack_pop() + rec = interp.stack_pop() + + if not rec: + interp.stack_push(None) + return + + if isinstance(field, list): + fields = field + else: + fields = [field] + + result = drill_for_value(rec, fields) + interp.stack_push(result) + + # ( rec value field -- rec ) + def word_l_REC_bang(self, interp: IInterpreter): + field = interp.stack_pop() + value = interp.stack_pop() + rec = interp.stack_pop() + + if not rec: + rec = {} + + if isinstance(field, list): + fields = field + else: + fields = [field] + + def ensure_field(rec, field): + res = rec.get(field) + if not res: + res = {} + rec[field] = res + return res + + cur_rec = rec + for f in fields[:-1]: # Drill down up until the last value + cur_rec = ensure_field(cur_rec, f) + + # Set the value at the right depth within rec + cur_rec[fields[-1]] = value + + interp.stack_push(rec) + + # ( content name -- ) + def word_SCREEN_bang(self, interp: IInterpreter): + """Stores a screen in the application module""" + name = interp.stack_pop() + content = interp.stack_pop() + interp.app_module.set_screen(name, content) + + # ( name -- content ) + def word_SCREEN(self, interp: IInterpreter): + """Returns screen stored in application module""" + name = interp.stack_pop() + result = interp.app_module.get_screen(name) + interp.stack_push(result) + + # ( name -- ? ) + def word_LOAD_SCREEN(self, interp: IInterpreter): + """Runs screen""" + name = interp.stack_pop() + if name in self.active_screens: + raise GlobalModuleError( + f"Can't load screen '{name}' because it is currently being loaded" + ) + + screen = interp.app_module.get_screen(name) + + self.active_screens.add(name) + interp.run_in_module(interp.app_module, screen) + self.active_screens.remove(name) + + # ( array item -- array ) + # ( record key/val -- record ) + def word_APPEND(self, interp: IInterpreter): + item = interp.stack_pop() + result = interp.stack_pop() + + if not result: + result = [] + + if isinstance(result, list): + result.append(item) + else: # If not a list, treat as record + result[item[0]] = item[1] + + interp.stack_push(result) + + # ( array -- array ) + # ( record -- record ) + def word_REVERSE(self, interp: IInterpreter): + container = interp.stack_pop() + + if not container: + interp.stack_push(container) + return + + def reverse_record(rec): + res = {} + for pair in reversed(rec.items()): + res[pair[0]] = pair[1] + return res + + if isinstance(container, list): + result = list(reversed(container)) + else: # If not a list, treat as record + result = reverse_record(container) + + interp.stack_push(result) + + # ( array -- array ) + # ( record -- record ) + # NOTE: If record, assuming its values are hashable + def word_UNIQUE(self, interp: IInterpreter): + container = interp.stack_pop() + + if not container: + interp.stack_push(container) + return + + def invert_record(record): + res = {} + for k, v in record.items(): + res[v] = k + return res + + if isinstance(container, list): + result = list(set(container)) + else: # If not a list, treat as record + result = invert_record(invert_record(container)) + + interp.stack_push(result) + + # ( array index -- array ) + # ( record key -- record ) + def word_L_DEL(self, interp: IInterpreter): + key = interp.stack_pop() + container = interp.stack_pop() + + if not container: + interp.stack_push(container) + return + + if isinstance(container, list): + del container[key] + else: + if key in container: + del container[key] + interp.stack_push(container) + + # ( array old_keys new_keys -- array ) + # ( record old_keys new_keys -- record ) + def word_RELABEL(self, interp: IInterpreter): + new_keys = interp.stack_pop() + old_keys = interp.stack_pop() + container = interp.stack_pop() + + if not container: + interp.stack_push(container) + return + + if len(old_keys) != len(new_keys): + raise GlobalModuleError( + 'RELABEL: old_keys and new_keys must be same length' + ) + + new_to_old = {} + for i in range(len(old_keys)): + new_to_old[new_keys[i]] = old_keys[i] + + if isinstance(container, list): + result: Any = [] + for key in sorted(new_to_old): + result.append(container[new_to_old[key]]) + else: + result = {} + for key in new_to_old: + result[key] = container.get(new_to_old[key]) + + interp.stack_push(result) + + # ( array field -- field_to_item ) + # ( record field -- field_to_item ) + def word_BY_FIELD(self, interp: IInterpreter): + field = interp.stack_pop() + container = interp.stack_pop() + + if not container: + container = [] + + if isinstance(container, list): + values = container + else: + values = container.values() + + result = {} + for v in values: + if v is not None: + result[v.get(field)] = v + + interp.stack_push(result) + + # ( array field -- field_to_items ) + # ( record field -- field_to_items ) + def word_GROUP_BY_FIELD(self, interp: IInterpreter): + field = interp.stack_pop() + container = interp.stack_pop() + + if not container: + container = [] + + if isinstance(container, list): + values = container + else: + values = container.values() + + result = defaultdict(list) + for v in values: + field_value = None + if v is not None: + field_value = v.get(field) + + if field_value is None: + field_value = "" + + if isinstance(field_value, list): + for fv in field_value: + result[fv].append(v) + else: + result[field_value].append(v) + + interp.stack_push(result) + + # ( array forthic -- group_to_items ) + # ( record forthic -- group_to_items ) + # + # Flagged behavior: + # with_key: Pushes container key in addition to container value before executing Forthic + def word_GROUP_BY(self, interp: IInterpreter): + forthic = interp.stack_pop() + container = interp.stack_pop() + + flags = self.get_flags() + + if not container: + container = [] + + if isinstance(container, list): + keys: Any = range(len(container)) + values = container + else: + keys = list(container.keys()) + values = list(container.values()) + + result = defaultdict(list) + for i in range(len(values)): + key = keys[i] + v = values[i] + if flags.get('with_key'): + interp.stack_push(key) + interp.stack_push(v) + execute(interp, forthic) + group = interp.stack_pop() + result[group].append(v) + + interp.stack_push(result) + + # ( array n -- arrays ) + # ( record n -- records ) + def word_GROUPS_OF(self, interp: IInterpreter): + size = interp.stack_pop() + container = interp.stack_pop() + if size <= 0: + raise GlobalModuleError('GROUPS-OF requires group size > 0') + + if not container: + container = [] + + def group_items(items, group_size): + num_groups = math.ceil(len(items) / group_size) + res = [] + remaining = items[:] + for _ in range(num_groups): + res.append(remaining[0:group_size]) + remaining = remaining[group_size:] + return res + + def extract_rec(record, keys): + res = {} + for k in keys: + res[k] = record[k] + return res + + if isinstance(container, list): + result = group_items(container, size) + else: + keys = list(container.keys()) + key_groups = group_items(keys, size) + result = [extract_rec(container, ks) for ks in key_groups] + + interp.stack_push(result) + + # ( array forthic -- record ) + def word_INDEX(self, interp: IInterpreter): + forthic = interp.stack_pop() # Returns a list of string keys + items = interp.stack_pop() + + if not items: + interp.stack_push(items) + return + + result = defaultdict(list) + for item in items: + interp.stack_push(item) + execute(interp, forthic) + keys = interp.stack_pop() + for k in keys: + result[k.lower()].append(item) + + interp.stack_push(result) + + # ( array forthic -- array ) + # ( record forthic -- record ) + # + # Flagged behavior: + # * with_key: Pushes key in addition to value + # * push_error: If an error occurs while mapping over an element, push None onto the stack and gather the error. + # At the end of the mapping, push the errors onto the stack + def word_MAP(self, interp: IInterpreter): + # Get the args + forthic = interp.stack_pop() + items = interp.stack_pop() + + # Get flags + flags = self.get_flags() + + depth = flags.get('depth') + if not depth: + depth = 0 + + # Early exit if no items + if not items: + interp.stack_push(items) + return + + # This maps the forthic over an item, storing errors if needed + def map_value(key, value, errors): + if flags.get('with_key'): + interp.stack_push(key) + interp.stack_push(value) + + if flags.get('push_error'): + error = None + try: + execute(interp, forthic) + except Exception as e: + interp.stack_push(None) + error = e + errors.append(error) + else: + execute(interp, forthic) + + return interp.stack_pop() + + # This recursively descends a record structure + def descend_record(record, depth, accum, errors): + for k, item in record.items(): + if depth > 0: + if isinstance(item, list): + accum[k] = [] + descend_list(item, depth - 1, accum[k], errors) + else: + accum[k] = {} + descend_record(item, depth - 1, accum[k], errors) + else: + accum[k] = map_value(k, item, errors) + return accum + + # This recursively descends a list + def descend_list(items, depth, accum, errors): + for i in range(len(items)): + item = items[i] + if depth > 0: + if isinstance(item, list): + accum.append([]) + descend_list(item, depth - 1, accum[-1], errors) + else: + accum.append({}) + descend_record(item, depth - 1, accum[-1], errors) + else: + accum.append(map_value(i, item, errors)) + return accum + + errors: Any = [] + result: Any = [] + if isinstance(items, list): + result = descend_list(items, depth, [], errors) + else: + result = descend_record(items, depth, {}, errors) + + # Return results + interp.stack_push(result) + if flags.get('push_error'): + interp.stack_push(errors) + + # ( items forthic -- ? ) + # ( record forthic -- ? ) + # + # Flagged behavior + # * with_key: Pushes key in addition to value when executing Forthic + # * push_error: After execution, push an array of errors onto stack corresponding to each element + # in the specified container + def word_FOREACH(self, interp: IInterpreter): + flags = self.get_flags() + foreach(interp, flags) + + # ( record -- record ) + # Swaps the order of nested keys in a record + def word_INVERT_KEYS(self, interp: IInterpreter): + record = interp.stack_pop() + result: Any = defaultdict(dict) + for first_key, sub_record in record.items(): + for second_key, value in sub_record.items(): + result[second_key][first_key] = value + interp.stack_push(result) + + # ( array1 array2 -- array ) + # ( record1 record2 -- record ) + def word_ZIP(self, interp: IInterpreter): + container2 = interp.stack_pop() + container1 = interp.stack_pop() + + if not container1: + container1 = [] + + if not container2: + container2 = [] + + if isinstance(container2, list): + result: Any = [] + for i in range(len(container1)): + value2 = container2[i] if i < len(container2) else None + result.append([container1[i], value2]) + else: + result = {} + for k, v in container1.items(): + result[k] = [v, container2.get(k)] + + interp.stack_push(result) + + # ( array1 array2 forthic -- array ) + # ( record1 record2 forthic -- record ) + def word_ZIP_WITH(self, interp: IInterpreter): + forthic = interp.stack_pop() + container2 = interp.stack_pop() + container1 = interp.stack_pop() + + if not container1: + container1 = [] + + if not container2: + container2 = [] + + if isinstance(container2, list): + result: Any = [] + for i in range(len(container1)): + value1 = container1[i] + value2 = container2[i] if i < len(container2) else None + interp.stack_push(value1) + interp.stack_push(value2) + execute(interp, forthic) + res = interp.stack_pop() + result.append(res) + else: + result = {} + for k, v in container1.items(): + interp.stack_push(v) + interp.stack_push(container2.get(k)) + execute(interp, forthic) + res = interp.stack_pop() + result[k] = res + + interp.stack_push(result) + + # ( array -- array ) + # ( record -- array ) + def word_KEYS(self, interp: IInterpreter): + container = interp.stack_pop() + + if not container: + container = [] + + if isinstance(container, list): + result = list(range(len(container))) + else: + result = list(container.keys()) + + interp.stack_push(result) + + # ( array -- array ) + # ( record -- array ) + def word_VALUES(self, interp: IInterpreter): + container = interp.stack_pop() + + if not container: + container = [] + + if isinstance(container, list): + result = container + else: + result = list(container.values()) + + interp.stack_push(result) + + # ( array -- length ) + # ( record -- length ) + def word_LENGTH(self, interp: IInterpreter): + container = interp.stack_pop() + + if not container: + container = [] + + result = len(container) + + interp.stack_push(result) + + # ( array fstart fend -- indices ) + # Returns start and end indices of a range bounded where fstart and fend are true + def word_RANGE(self, interp: IInterpreter): + fend = interp.stack_pop() + fstart = interp.stack_pop() + array = interp.stack_pop() + + if not array: + array = [] + + start_found = False + end_found = False + + start_index = None + end_index = None + for index, item in enumerate(array): + if not start_found: + interp.stack_push(item) + execute(interp, fstart) + start_found = interp.stack_pop() + if start_found: + start_index = index + + if start_found and not end_found: + interp.stack_push(item) + execute(interp, fend) + end_found = interp.stack_pop() + if end_found: + end_index = index + break + + interp.stack_push([start_index, end_index]) + + # ( array start end -- array ) + # ( record start end -- record ) + def word_SLICE(self, interp: IInterpreter): + end = int(interp.stack_pop()) + start = int(interp.stack_pop()) + container = interp.stack_pop() + length = len(container) + + if not container: + container = [] + + def normalize_index(index): + res = index + if index < 0: + res = index + length + return res + + start = normalize_index(start) + end = normalize_index(end) + + step = 1 + if start > end: + step = -1 + + indexes: List[Any] = [start] + if start < 0 or start >= length: + indexes = [] + + while start != end: + start = start + step + if start < 0 or start >= length: + indexes.append(None) + else: + indexes.append(start) + + if isinstance(container, list): + result: Any = [] + for i in indexes: + if i is None: + result.append(None) + else: + result.append(container[i]) + else: + keys = list(container.keys()) + result = {} + for i in indexes: + if i is not None: + k = keys[i] + result[k] = container.get(k) + + interp.stack_push(result) + + # ( larray rarray -- array ) + # ( lrecord rrecord -- record ) + def word_DIFFERENCE(self, interp: IInterpreter): + rcontainer = interp.stack_pop() + lcontainer = interp.stack_pop() + + if lcontainer is None: + lcontainer = [] + + if rcontainer is None: + rcontainer = [] + + def difference(left, right): + res = [] + for item in left: + if item not in right: + res.append(item) + return res + + if isinstance(rcontainer, list): + result = difference(lcontainer, rcontainer) + else: + lkeys = lcontainer.keys() + rkeys = rcontainer.keys() + diff = difference(lkeys, rkeys) + result = {} + for k in diff: + result[k] = lcontainer[k] + + interp.stack_push(result) + + # ( larray rarray -- array ) + # ( lrecord rrecord -- record ) + def word_INTERSECTION(self, interp: IInterpreter): + rcontainer = interp.stack_pop() + lcontainer = interp.stack_pop() + + if lcontainer is None: + lcontainer = [] + + if rcontainer is None: + rcontainer = [] + + if isinstance(rcontainer, list): + lset = set(lcontainer) + rset = set(rcontainer) + result: Any = list(lset.intersection(rset)) + else: + lkeys = set(lcontainer.keys()) + rkeys = set(rcontainer.keys()) + intersection = lkeys.intersection(rkeys) + result = {} + for k in intersection: + result[k] = lcontainer[k] + + interp.stack_push(result) + + # ( larray rarray -- array ) + # ( lrecord rrecord -- record ) + def word_UNION(self, interp: IInterpreter): + rcontainer = interp.stack_pop() + lcontainer = interp.stack_pop() + + if lcontainer is None: + lcontainer = [] + + if rcontainer is None: + rcontainer = [] + + if isinstance(rcontainer, list): + lset = set(lcontainer) + rset = set(rcontainer) + result: Any = list(lset.union(rset)) + else: + lkeys = set(lcontainer.keys()) + rkeys = set(rcontainer.keys()) + union = lkeys.union(rkeys) + result = {} + for k in union: + item = lcontainer.get(k) + if not item: + item = rcontainer.get(k) + result[k] = item + + interp.stack_push(result) + + # ( larray forthic -- array ) + # ( lrecord forthic -- record ) + # + # Flagged behavior: + # with_key: Pushes key and value onto stack for evaluation + def word_SELECT(self, interp: IInterpreter): + forthic = interp.stack_pop() + container = interp.stack_pop() + + flags = self.get_flags() + + if not container: + interp.stack_push(container) + return + + if isinstance(container, list): + result: Any = [] + for i in range(len(container)): + item = container[i] + if flags.get('with_key'): + interp.stack_push(i) + interp.stack_push(item) + execute(interp, forthic) + should_select = interp.stack_pop() + if should_select: + result.append(item) + else: + result = {} + for k, v in container.items(): + if flags.get('with_key'): + interp.stack_push(k) + interp.stack_push(v) + execute(interp, forthic) + should_select = interp.stack_pop() + if should_select: + result[k] = v + + interp.stack_push(result) + + # ( array n -- array ) + # ( record n -- record ) + # + # Flagged behavior: + # * push_rest: This pushes the rest of the take container onto the stack + def word_TAKE(self, interp: IInterpreter): + n = interp.stack_pop() + container = interp.stack_pop() + + flags = self.get_flags() + + if not container: + container = [] + + if isinstance(container, list): + taken = container[:n] + rest = container[n:] + else: + keys = sorted(list(container.keys())) + taken_keys = keys[:n] + rest_keys = keys[n:] + taken = [container[k] for k in taken_keys] + rest = [container[k] for k in rest_keys] + + interp.stack_push(taken) + if flags.get('push_rest'): + interp.stack_push(rest) + + # ( array n -- rest ) + # ( record n -- rest ) + def word_DROP(self, interp: IInterpreter): + n = interp.stack_pop() + container = interp.stack_pop() + + if not container: + container = [] + + if isinstance(container, list): + rest = container[n:] + else: + keys = sorted(list(container.keys())) + rest_keys = keys[n:] + rest = [container[k] for k in rest_keys] + + interp.stack_push(rest) + + # ( array -- array ) + # ( record -- record ) + def word_ROTATE(self, interp: IInterpreter): + container = interp.stack_pop() + + if not container: + result = container + elif isinstance(container, list): + result = container[:] + last = result.pop() + result.insert(0, last) + else: + result = {} + keys = list(container.keys()) + last = keys.pop() + keys.insert(0, last) + for k in keys: + result[k] = container[k] + + interp.stack_push(result) + + # ( value -- bool ) + def word_ARRAY_q(self, interp: IInterpreter): + value = interp.stack_pop() + result = isinstance(value, list) + interp.stack_push(result) + + # ( array -- array ) + # ( record -- record ) + def word_SHUFFLE(self, interp: IInterpreter): + container = interp.stack_pop() + + if not container: + container = [] + + if isinstance(container, list): + result = container[:] + random.shuffle(result) + else: + result = container + + interp.stack_push(result) + + # ( array -- array ) + # ( record -- record ) + def word_SORT(self, interp: IInterpreter): + container = interp.stack_pop() + + flags = self.get_flags() + comparator = flags.get('comparator') + + if not container: + container = [] + + # Sort using default item comparision + def sort_without_comparator(): + def sort_record(record): + sorted_items = sorted(record.items(), key=lambda x: x[1]) + res = {} + for pair in sorted_items: + res[pair[0]] = pair[1] + return res + + if isinstance(container, list): + non_nones = [item for item in container if item is not None] + nones = [item for item in container if item is None] + result = sorted(non_nones) + nones + else: + result = sort_record(container) + return result + + # Sort using a forthic string + def sort_with_forthic(forthic): + def forthic_func(val): + interp.stack_push(val) + execute(interp, forthic) + res = interp.stack_pop() + return res + + def sort_record(record): + sorted_items = sorted(record.items(), key=lambda x: forthic_func(x[1])) + res = {} + for pair in sorted_items: + res[pair[0]] = pair[1] + return res + + if isinstance(container, list): + result = sorted(container[:], key=forthic_func) + else: + result = sort_record(container) + return result + + # Sort using a key func + def sort_with_key_func(key_func): + if isinstance(container, list): + result = sorted(container[:], key=key_func) + else: + result = container + return result + + if isinstance(comparator, str): + result = sort_with_forthic(comparator) + elif callable(comparator): + result = sort_with_key_func(comparator) + else: + result = sort_without_comparator() + interp.stack_push(result) + + # ( field -- key_func ) + def word_FIELD_KEY_FUNC(self, interp: IInterpreter): + field = interp.stack_pop() + + def result(record): + return record[field] + + interp.stack_push(result) + + # ( array n -- item ) + # ( record n -- value ) + def word_NTH(self, interp: IInterpreter): + n = interp.stack_pop() + container = interp.stack_pop() + + if n is None or not container: + interp.stack_push(None) + return + + if n < 0 or n >= len(container): + interp.stack_push(None) + return + + if isinstance(container, list): + result = container[n] + else: + keys = list(container.keys()) + key = keys[n] + result = container[key] + + interp.stack_push(result) + + # ( array -- item ) + # ( record -- value ) + def word_LAST(self, interp: IInterpreter): + container = interp.stack_pop() + + if not container: + interp.stack_push(None) + return + + if isinstance(container, list): + result = container[-1] + else: + keys = sorted(list(container.keys())) + key = keys[-1] + result = container[key] + + interp.stack_push(result) + + # ( array -- a1 a2 .. an ) + def word_UNPACK(self, interp: IInterpreter): + container = interp.stack_pop() + + if not container: + container = [] + + if isinstance(container, list): + for item in container: + interp.stack_push(item) + else: + keys = sorted(list(container.keys())) + for k in keys: + interp.stack_push(container[k]) + + # ( nested_arrays -- array ) + # ( nested_records -- record ) + def word_FLATTEN(self, interp: IInterpreter): + nested = interp.stack_pop() + flags = self.get_flags() + + if not nested: + nested = [] + + depth = flags.get('depth') + + def fully_flatten_array(items, accum): + for item in items: + if isinstance(item, list): + fully_flatten_array(item, accum) + else: + accum.append(item) + return accum + + def flatten_array(items, depth, accum=[]): + if depth is None: + return fully_flatten_array(items, accum) + + for item in items: + if depth > 0 and isinstance(item, list): + flatten_array(item, depth - 1, accum) + else: + accum.append(item) + return accum + + def add_to_record_result(item, keys, key, result): + new_key = '.'.join(keys + [key]) + result[new_key] = item + + def fully_flatten_record(record, res, keys): + for k, item in record.items(): + if isinstance(item, dict): + fully_flatten_record(item, res, keys + [k]) + else: + add_to_record_result(item, keys, k, res) + return res + + def flatten_record(record, depth, res={}, keys=[]): + if depth is None: + return fully_flatten_record(record, res, keys) + + for k, item in record.items(): + if depth > 0 and isinstance(item, dict): + flatten_record(item, depth - 1, res, keys + [k]) + else: + add_to_record_result(item, keys, k, res) + return res + + if isinstance(nested, list): + result = flatten_array(nested, depth) + else: + result = flatten_record(nested, depth) + + interp.stack_push(result) + return + + # ( list item -- index ) + # ( record item -- key ) + def word_KEY_OF(self, interp: IInterpreter): + item = interp.stack_pop() + container = interp.stack_pop() + + if not container: + container = [] + + if isinstance(container, list): + if item not in container: + result = None + else: + result = container.index(item) + else: + result = None + for k, v in container.items(): + if v == item: + result = k + break + + interp.stack_push(result) + + # ( list initial forthic -- value ) + # ( record initial forthic -- value ) + def word_REDUCE(self, interp: IInterpreter): + forthic = interp.stack_pop() + initial = interp.stack_pop() + container = interp.stack_pop() + + if not container: + container = [] + + if isinstance(container, list): + interp.stack_push(initial) + for item in container: + interp.stack_push(item) + execute(interp, forthic) + result = interp.stack_pop() + else: + interp.stack_push(initial) + for _, v in container.items(): + interp.stack_push(v) + execute(interp, forthic) + result = interp.stack_pop() + + interp.stack_push(result) + + # ( records field breakpoints -- cumulative_distribution ) + def word_CUMULATIVE_DIST(self, interp: IInterpreter): + breakpoints = interp.stack_pop() + field = interp.stack_pop() + records = interp.stack_pop() + + sorted_breakpoints = sorted(breakpoints) + + def get_breakpoint_index(breakpoints, value): + out_of_range_index = len(breakpoints) + 1000 # Adding 1000 so it doesn't look like an "off by one" error :-) + if value is None: + return out_of_range_index + + res = None + for i, breakpoint_value in enumerate(breakpoints): + if value <= breakpoint_value: + res = i + break + + if res is None: + res = out_of_range_index + return res + + # Compute breakpoint indexes + record_breakpoint_indexes = [] + for r in records: + record_breakpoint_indexes.append(get_breakpoint_index(sorted_breakpoints, r.get(field))) + + # Compute breakpoint counts + breakpoint_counts = [0] * len(sorted_breakpoints) + for breakpoint_index in record_breakpoint_indexes: + for i in range(len(breakpoint_counts)): + if breakpoint_index <= i: + breakpoint_counts[i] += 1 + + # Compute breakpoint pcts + breakpoint_pcts = [0.0] * len(sorted_breakpoints) + num_records = len(records) + if num_records > 0: + for i, count in enumerate(breakpoint_counts): + breakpoint_pcts[i] = count / num_records * 100.0 + + result = { + "records": records, + "field": field, + "breakpoints": breakpoints, + "record_breakpoint_indexes": record_breakpoint_indexes, + "breakpoint_counts": breakpoint_counts, + "breakpoint_pcts": breakpoint_pcts, + } + interp.stack_push(result) + return + + # ( item -- ) + def word_POP(self, interp: IInterpreter): + interp.stack_pop() + + # ( a -- a a ) + def word_DUP(self, interp: IInterpreter): + a = interp.stack_pop() + interp.stack_push(a) + interp.stack_push(a) + + # ( a b -- b a ) + def word_SWAP(self, interp: IInterpreter): + b = interp.stack[-1] + a = interp.stack[-2] + interp.stack[-1] = a + interp.stack[-2] = b + + # ( str1 str2 -- str ) + # ( array_of_str -- str ) + def word_CONCAT(self, interp: IInterpreter): + """Concatenates two strings""" + str2 = interp.stack_pop() + array = None + if isinstance(str2, list): + array = str2 + else: + str1 = interp.stack_pop() + array = [str1, str2] + + str_array = [str(item) for item in array] + result = ''.join(str_array) + interp.stack_push(result) + + # ( string sep -- items ) + def word_SPLIT(self, interp: IInterpreter): + sep = interp.stack_pop() + string = interp.stack_pop() + + if not string: + string = '' + + result = string.split(sep) + interp.stack_push(result) + + # ( array sep -- string ) + def word_JOIN(self, interp: IInterpreter): + sep = interp.stack_pop() + array = interp.stack_pop() + + if not array: + array = [] + + string_array = [str(item) for item in array] + result = sep.join(string_array) + interp.stack_push(result) + + # ( -- char ) + def word_slash_N(self, interp: IInterpreter): + interp.stack_push('\n') + + # ( -- char ) + def word_slash_R(self, interp: IInterpreter): + interp.stack_push('\r') + + # ( -- char ) + def word_slash_T(self, interp: IInterpreter): + interp.stack_push('\t') + + # ( string -- string ) + def word_LOWERCASE(self, interp: IInterpreter): + string = interp.stack_pop() + + if not string: + string = '' + + result = string.lower() + interp.stack_push(result) + + # ( string -- string ) + def word_UPPERCASE(self, interp: IInterpreter): + string = interp.stack_pop() + + if not string: + string = '' + + result = string.upper() + interp.stack_push(result) + + # ( string -- string ) + def word_ASCII(self, interp: IInterpreter): + string = interp.stack_pop() + + if not string: + string = '' + + result = '' + for c in string: + if ord(c) < 256: + result += c + interp.stack_push(result) + + # ( string -- string ) + def word_STRIP(self, interp: IInterpreter): + string = interp.stack_pop() + + if not string: + string = '' + + interp.stack_push(string.strip()) + + # ( string s r -- string ) + def word_REPLACE(self, interp: IInterpreter): + replacement = interp.stack_pop() + search_string = interp.stack_pop() + string = interp.stack_pop() + + if not string: + string = '' + if not replacement: + replacement = '' + + result = string.replace(search_string, replacement) + interp.stack_push(result) + + # ( string regex replace -- string ) + def word_RE_REPLACE(self, interp: IInterpreter): + replace = interp.stack_pop() + regex = interp.stack_pop() + string = interp.stack_pop() + + if not string: + string = '' + + result = re.sub(regex, replace, string, flags=re.MULTILINE | re.DOTALL) + interp.stack_push(result) + + # ( string regex -- match ) + def word_RE_MATCH(self, interp: IInterpreter): + regex = interp.stack_pop() + string = interp.stack_pop() + + if not string: + string = '' + + result = re.match(regex, string, re.MULTILINE | re.DOTALL) + interp.stack_push(result) + + # ( string regex -- matches ) + def word_RE_MATCH_ALL(self, interp: IInterpreter): + regex = interp.stack_pop() + string = interp.stack_pop() + + if not string: + string = '' + + result = re.findall(regex, string, re.MULTILINE | re.DOTALL) + interp.stack_push(result) + + # ( match num -- string ) + def word_RE_MATCH_GROUP(self, interp: IInterpreter): + num = interp.stack_pop() + match = interp.stack_pop() + result = None + if match: + result = match.group(num) + interp.stack_push(result) + + # ( object -- string ) + def word_to_STR(self, interp: IInterpreter): + obj = interp.stack_pop() + result = str(obj) + interp.stack_push(result) + + # ( str -- url_encoded_str ) + def word_URL_ENCODE(self, interp: IInterpreter): + string = interp.stack_pop() + + if not string: + string = '' + + result = urllib.parse.quote_plus(string) + interp.stack_push(result) + + # ( url_encoded -- str ) + def word_URL_DECODE(self, interp: IInterpreter): + encoded = interp.stack_pop() + + if not encoded: + encoded = '' + + result = urllib.parse.unquote(encoded) + interp.stack_push(result) + + # ( root child_items_forthic -- node_items ) + # This starts from a root and applies `child_items_forthic` to get a set of child items. + # This repeats, depth first, until all elements have been traversed. + # If an item has already been traversed, it is not further traversed. + # + # Each node_item is a record with the following fields: + # * depth: Depth in tree (0 for root) + # * value: The child item + def word_TRAVERSE_DEPTH_FIRST(self, interp: IInterpreter): + child_items_forthic = interp.stack_pop() + root = interp.stack_pop() + + result = [] + + def traverse(item, depth): + if item in result: + return + node_item = { + 'depth': depth, + 'value': item, + } + result.append(node_item) + interp.stack_push(item) + execute(interp, child_items_forthic) + children = interp.stack_pop() + for c in children: + traverse(c, depth + 1) + + traverse(root, 0) + interp.stack_push(result) + + # ( tree subroots -- subtrees ) + # `tree` is an array of `node_items` (see word_TRAVERSE_DEPTH_FIRST) + # `subroots` is an array of `node_items` in the tree + # `subtrees` is an array of trees rooted at the subroots + # + # If a subroot is not in the tree, then the value of its subtree is [] + def word_SUBTREES(self, interp: IInterpreter): + subroots = interp.stack_pop() + tree = interp.stack_pop() + + def get_subtree(subroot): + try: + index = tree.index(subroot) + except ValueError: + index = None + + # If subroot is not in tree + if index is None: + return [] + + # Return node items from subroot to next node item at the subroot depth or higher + res = [subroot] + subroot_depth = subroot['depth'] + for node_item in tree[index + 1:]: + if node_item['depth'] <= subroot_depth: + break + res.append(node_item) + return res + + result = [get_subtree(s) for s in subroots] + interp.stack_push(result) + + # ( -- None ) + def word_NULL(self, interp: IInterpreter): + interp.stack_push(None) + + # ( -- quote_char ) + def word_QUOTE_CHAR(self, interp: IInterpreter): + result = DLE + interp.stack_push(result) + + # ( string -- quoted_string ) + def word_QUOTED(self, interp: IInterpreter): + string = interp.stack_pop() + + if not string: + string = '' + + chars = [] + for c in string: + if c == DLE: + c = ' ' + chars.append(c) + clean_string = ''.join(chars) + result = f'{DLE}{clean_string}{DLE}' + interp.stack_push(result) + + # ( value default_value -- val ) + def word_DEFAULT(self, interp: IInterpreter): + default_value = interp.stack_pop() + value = interp.stack_pop() + if value is None or value == '': + value = default_value + interp.stack_push(value) + + # ( value default_forthic -- val ) + def word_star_DEFAULT(self, interp: IInterpreter): + default_forthic = interp.stack_pop() + value = interp.stack_pop() + if value is None or value == '': + execute(interp, default_forthic) + value = interp.stack_pop() + interp.stack_push(value) + + # ( item forthic num-times -- ? ) + def word_l_REPEAT(self, interp: IInterpreter): + num_times = interp.stack_pop() + forthic = interp.stack_pop() + for _ in range(num_times): + # Store item so we can push it back later + item = interp.stack_pop() + interp.stack_push(item) + + execute(interp, forthic) + res = interp.stack_pop() + + # Push original item and result + interp.stack_push(item) + interp.stack_push(res) + + # ( a -- a ) + def word_IDENTITY(self, interp: IInterpreter): + pass + + # ( num digits -- str ) + def word_to_FIXED(self, interp: IInterpreter): + digits = interp.stack_pop() + num = interp.stack_pop() + + if num is None: + interp.stack_push(None) + return + + result = f'%.{digits}f' % num + interp.stack_push(result) + + # ( item -- json ) + def word_to_JSON(self, interp: IInterpreter): + item = interp.stack_pop() + result = json.dumps(item, default=default_json_serialize) + interp.stack_push(result) + + # ( json -- item ) + def word_JSON_to(self, interp: IInterpreter): + string = interp.stack_pop() + result = json.loads(string) + interp.stack_push(result) + + # ( items -- tsv ) + def word_to_TSV(self, interp: IInterpreter): + items = interp.stack_pop() + + if not items: + items = [] + + buf = io.StringIO() + writer = csv.writer(buf, delimiter='\t') + writer.writerows(items) + result = buf.getvalue() + interp.stack_push(result) + + # ( tsv -- items ) + def word_TSV_to(self, interp: IInterpreter): + tsv = interp.stack_pop() + + buf = io.StringIO(tsv) + reader = csv.reader(buf, delimiter='\t') + result = [row for row in reader] + interp.stack_push(result) + + # ( records header -- tsv ) + def word_RECS_to_TSV(self, interp: IInterpreter): + header = interp.stack_pop() + records = interp.stack_pop() + + if not records: + records = [] + + vals_array = [] + for rec in records: + vals_array.append([rec[h] for h in header]) + + buf = io.StringIO() + writer = csv.writer(buf, delimiter='\t') + + writer.writerow(header) + writer.writerows(vals_array) + result = buf.getvalue() + interp.stack_push(result) + + # ( tsv -- records ) + def word_TSV_to_RECS(self, interp: IInterpreter): + tsv = interp.stack_pop() + + buf = io.StringIO(tsv) + reader = csv.reader(buf, delimiter='\t') + rows = [row for row in reader] + header = rows[0] + result = [] + for row in rows[1:]: + rec = {} + for i in range(len(header)): + rec[header[i]] = row[i] + result.append(rec) + interp.stack_push(result) + + # ( -- ) + def word_dot_s(self, interp: IInterpreter): + top_of_stack = None + if len(interp.stack) > 0: + top_of_stack = interp.stack[-1] + + if interp.dev_mode: + print(top_of_stack) + pdb.set_trace() + else: + # Raising an exception to show stack to user + items = ['Forthic Stack:'] + indices = reversed(range(len(interp.stack))) + for i in indices: + items.append( + f'[{i}]: {str(interp.stack[i])}' + ) + stack_string = '\n'.join(items) + raise StackDump(stack_string) + + # ( time -- time ) + def word_AM(self, interp: IInterpreter): + a_time = interp.stack_pop() + if not isinstance(a_time, datetime.time): + raise InvalidTimeError(f'AM expecting a time, not {a_time}') + + result = a_time + if a_time.hour >= 12: + result = datetime.time(a_time.hour - 12, a_time.minute) + + interp.stack_push(result) + + # ( time -- time ) + def word_PM(self, interp: IInterpreter): + a_time = interp.stack_pop() + if not isinstance(a_time, datetime.time): + raise InvalidTimeError(f'PM expecting a time, not {a_time}') + + result = a_time + if a_time.hour < 12: + result = datetime.time(a_time.hour + 12, a_time.minute) + + interp.stack_push(result) + + # ( -- time ) + def word_NOW(self, interp: IInterpreter): + result = datetime.datetime.now(tz=self.timezone) + interp.stack_push(result) + + # ( str -- time ) + # ( time -- time ) + def word_to_TIME(self, interp: IInterpreter): + item = interp.stack_pop() + result: Union[datetime.time, datetime.datetime, None] = None + if isinstance(item, datetime.datetime): + result = item + else: + t = parser.parse(item) + tz = self.timezone + if t.tzinfo: + tz = t.tzinfo + result = datetime.time(t.hour, t.minute, tzinfo=tz) + + interp.stack_push(result) + + # ( time tzstr -- time ) + def word_l_TZ_bang(self, interp: IInterpreter): + tzstr = interp.stack_pop() + t = interp.stack_pop() + tz = pytz.timezone(tzstr) + if isinstance(t, datetime.datetime): + result: Union[datetime.datetime, datetime.time] = t.replace(tzinfo=tz) + else: + result = datetime.time(t.hour, t.minute, tzinfo=tz) + + interp.stack_push(result) + + # ( time -- string ) + def word_TIME_to_STR(self, interp: IInterpreter): + t = interp.stack_pop() + dt = t.tzinfo.localize(datetime.datetime(2000, 1, 1, t.hour, t.minute)) + interp_dt = dt.astimezone(self.timezone) + result = interp_dt.strftime('%H:%M') + interp.stack_push(result) + + # ( item -- date ) + def word_to_DATE(self, interp: IInterpreter): + item = interp.stack_pop() + + result = None + if not item: + result = None + elif isinstance(item, datetime.datetime): + result = item.date() + elif isinstance(item, datetime.date): + result = item + else: + result = parser.parse(item).date() + interp.stack_push(result) + + # ( -- date ) + def word_TODAY(self, interp: IInterpreter): + result = datetime.date.today() + interp.stack_push(result) + + # ( -- date ) + def word_MONDAY(self, interp: IInterpreter): + interp.stack_push(self.day_this_week(0)) + + # ( -- date ) + def word_TUESDAY(self, interp: IInterpreter): + interp.stack_push(self.day_this_week(1)) + + # ( -- date ) + def word_WEDNESDAY(self, interp: IInterpreter): + interp.stack_push(self.day_this_week(2)) + + # ( -- date ) + def word_THURSDAY(self, interp: IInterpreter): + interp.stack_push(self.day_this_week(3)) + + # ( -- date ) + def word_FRIDAY(self, interp: IInterpreter): + interp.stack_push(self.day_this_week(4)) + + # ( -- date ) + def word_SATURDAY(self, interp: IInterpreter): + interp.stack_push(self.day_this_week(5)) + + # ( -- date ) + def word_SUNDAY(self, interp: IInterpreter): + interp.stack_push(self.day_this_week(6)) + + # ( date -- date ) + def word_NEXT(self, interp: IInterpreter): + # If date is in the past, return date + 7 days + a_date = interp.stack_pop() + today = datetime.date.today() + result = a_date + if a_date < today: + result = a_date + datetime.timedelta(7) + interp.stack_push(result) + + def day_this_week(self, day_of_week): + # NOTE: Monday is start of week + today = datetime.date.today() + delta_days = (day_of_week - today.weekday()) % 7 + if day_of_week < today.weekday(): + delta_days -= 7 + result = today + datetime.timedelta(delta_days) + return result + + # ( date num_days -- date ) + def word_ADD_DAYS(self, interp: IInterpreter): + num_days = interp.stack_pop() + date = interp.stack_pop() + result = date + datetime.timedelta(num_days) + interp.stack_push(result) + + # ( ldate rdate -- num_days ) + def word_SUBTRACT_DATES(self, interp: IInterpreter): + rdate = interp.stack_pop() + ldate = interp.stack_pop() + delta = ldate - rdate + result = round(delta.total_seconds() / 60 / 60 / 24) + interp.stack_push(result) + + # ( ldate rdate -- num_secs ) + def word_SUBTRACT_TIMES(self, interp: IInterpreter): + rdate = interp.stack_pop() + ldate = interp.stack_pop() + delta = ldate - rdate + result = delta.total_seconds() + interp.stack_push(result) + + # ( date -- str ) + def word_DATE_to_STR(self, interp: IInterpreter): + date = interp.stack_pop() + if not date: + interp.stack_push('') + return + + result = f'{date.year}-{date.month:02d}-{date.day:02d}' + interp.stack_push(result) + + # ( date time -- datetime ) + def word_DATE_TIME_to_DATETIME(self, interp: IInterpreter): + a_time = interp.stack_pop() + a_date = interp.stack_pop() + result = datetime.datetime( + a_date.year, a_date.month, a_date.day, a_time.hour, a_time.minute + ) + tz = self.timezone + if a_time.tzinfo: + tz = a_time.tzinfo + result = tz.localize(result) + interp.stack_push(result) + + # ( datetime -- timestamp ) + def word_DATETIME_to_TIMESTAMP(self, interp: IInterpreter): + dt = interp.stack_pop() + result = int(datetime.datetime.timestamp(dt)) + interp.stack_push(result) + + # ( timestamp -- datetime ) + def word_TIMESTAMP_to_DATETIME(self, interp: IInterpreter): + ts = interp.stack_pop() + result = datetime.datetime.fromtimestamp(int(ts)) + interp.stack_push(result) + + # ( str -- datetime ) + def word_STR_to_DATETIME(self, interp: IInterpreter): + string = interp.stack_pop() + + if string is None: + interp.stack_push(None) + return + + result = parser.parse(string) + interp.stack_push(result) + + # ( str -- timestamp ) + def word_STR_to_TIMESTAMP(self, interp: IInterpreter): + string = interp.stack_pop() + + if string is None: + interp.stack_push(None) + return + + datetime_val = parser.parse(string) + result = int(datetime.datetime.timestamp(datetime_val)) + interp.stack_push(result) + + # ( a b -- a+b ) + # ( [a1 a2...] -- sum ) + def word_plus(self, interp: IInterpreter): + """Adds two numbers or an array of numbers""" + b = interp.stack_pop() + result = 0 + if isinstance(b, list): + for num in b: + if num is not None: + result += num + else: + a = interp.stack_pop() + if a is None: + a = 0 + if b is None: + b = 0 + result = a + b + interp.stack_push(result) + + # ( a b -- a-b ) + def word_minus(self, interp: IInterpreter): + b = interp.stack_pop() + a = interp.stack_pop() + + if a is None or b is None: + interp.stack_push(None) + return + + # Return seconds for datetime + if isinstance(a, datetime.datetime): + delta = a - b + result = delta.total_seconds() + # Return days for date + elif isinstance(a, datetime.date): + delta = a - b + result = delta.total_seconds() / 60 / 60 / 24 + else: + result = a - b + + interp.stack_push(result) + + # ( a b -- a*b ) + # ( [a1 a2...] -- product ) + def word_times(self, interp: IInterpreter): + b = interp.stack_pop() + result = 1 + numbers = [] + if isinstance(b, list): + numbers = b + else: + a = interp.stack_pop() + numbers = [a, b] + + for num in numbers: + if num is None: + interp.stack_push(None) + return + result *= num + interp.stack_push(result) + + # ( a b -- a/b ) + def word_divide_by(self, interp: IInterpreter): + b = interp.stack_pop() + a = interp.stack_pop() + + if a is None or b is None: + interp.stack_push(None) + return + + if b == 0: + result = None + else: + result = a / b + interp.stack_push(result) + + # ( m n -- m%n ) + def word_MOD(self, interp: IInterpreter): + n = interp.stack_pop() + m = interp.stack_pop() + + if m is None or n is None: + interp.stack_push(None) + return + + interp.stack_push(m % n) + + # ( numbers -- mean ) + def word_MEAN(self, interp: IInterpreter): + numbers = interp.stack_pop() + + if not numbers: + interp.stack_push(0) + return + + if isinstance(numbers, list) and len(numbers) == 1: + interp.stack_push(numbers[0]) + return + + result = statistics.mean(numbers) + interp.stack_push(result) + + # ( num -- int ) + def word_ROUND(self, interp: IInterpreter): + num = interp.stack_pop() + + if num is None: + interp.stack_push(None) + return + + interp.stack_push(round(num)) + + # ( items -- item ) + def word_MAX(self, interp: IInterpreter): + items = interp.stack_pop() + if not items: + interp.stack_push(None) + return + interp.stack_push(max(items)) + + # ( items -- item ) + def word_MIN(self, interp: IInterpreter): + items = interp.stack_pop() + if not items: + interp.stack_push(None) + return + interp.stack_push(min(items)) + + # ( m n -- bool ) + def word_equal_equal(self, interp: IInterpreter): + n = interp.stack_pop() + m = interp.stack_pop() + interp.stack_push(m == n) + + # ( m n -- bool ) + def word_not_equal(self, interp: IInterpreter): + n = interp.stack_pop() + m = interp.stack_pop() + interp.stack_push(m != n) + + # ( m n -- bool ) + def word_greater_than(self, interp: IInterpreter): + n = interp.stack_pop() + m = interp.stack_pop() + + if m is None or n is None: + interp.stack_push(None) + return + + interp.stack_push(m > n) + + # ( m n -- bool ) + def word_greater_than_or_equal(self, interp: IInterpreter): + n = interp.stack_pop() + m = interp.stack_pop() + + if m is None or n is None: + interp.stack_push(None) + return + + interp.stack_push(m >= n) + + # ( m n -- bool ) + def word_less_than(self, interp: IInterpreter): + n = interp.stack_pop() + m = interp.stack_pop() + + if m is None or n is None: + interp.stack_push(None) + return + + interp.stack_push(m < n) + + # ( m n -- bool ) + def word_less_than_or_equal(self, interp: IInterpreter): + n = interp.stack_pop() + m = interp.stack_pop() + + if m is None or n is None: + interp.stack_push(None) + return + + interp.stack_push(m <= n) + + # ( a b -- bool ) + # ( [a1 a2...] -- bool ) + def word_OR(self, interp: IInterpreter): + b = interp.stack_pop() + if isinstance(b, list): + result = any(b) + else: + a = interp.stack_pop() + result = a or b + interp.stack_push(result) + + # ( a b -- bool ) + # ( [a1 a2...] -- bool ) + def word_AND(self, interp: IInterpreter): + b = interp.stack_pop() + if isinstance(b, list): + result = all(b) + else: + a = interp.stack_pop() + result = a and b + interp.stack_push(result) + + # ( a -- bool ) + def word_NOT(self, interp: IInterpreter): + a = interp.stack_pop() + interp.stack_push(not a) + + # ( item items -- bool ) + def word_IN(self, interp: IInterpreter): + items = interp.stack_pop() + item = interp.stack_pop() + if not items: + items = [] + result = item in items + interp.stack_push(result) + + # ( vals required_vals -- bool ) + def word_ANY(self, interp: IInterpreter): + required_vals = interp.stack_pop() + vals = interp.stack_pop() + + if not vals: + vals = [] + if not required_vals: + required_vals = [] + + result = False + + for rv in required_vals: + if rv in vals: + result = True + break + + # If nothing is required, then all values are true + if len(required_vals) == 0: + result = True + interp.stack_push(result) + + # ( vals required_vals -- bool ) + def word_ALL(self, interp: IInterpreter): + required_vals = interp.stack_pop() + vals = interp.stack_pop() + + if not vals: + vals = [] + if not required_vals: + required_vals = [] + + required_set = set(required_vals) + vals_set = set(vals) + intersection_set = required_set.intersection(vals_set) + + result = intersection_set == required_set + interp.stack_push(result) + + # ( item -- bool ) + def word_to_BOOL(self, interp: IInterpreter): + item = interp.stack_pop() + result = False + if item: + result = True + interp.stack_push(result) + + # ( a -- a_int ) + def word_to_INT(self, interp: IInterpreter): + a = interp.stack_pop() + + if a is None: + interp.stack_push(0) + return + + if isinstance(a, list) or isinstance(a, dict): + interp.stack_push(len(a)) + return + + result: Union[int, None] + try: + result = int(float(a)) + except ValueError: + result = None + + interp.stack_push(result) + + # ( a -- a_int ) + def word_to_FLOAT(self, interp: IInterpreter): + a = interp.stack_pop() + + if a is None: + interp.stack_push(0.0) + return + + result: Union[float, None] + try: + result = float(a) + except ValueError: + result = None + + interp.stack_push(result) + + # ( low high -- int ) + def word_UNIFORM_RANDOM(self, interp: IInterpreter): + high = interp.stack_pop() + low = interp.stack_pop() + result = random.uniform(low, high) + interp.stack_push(result) + + # ( val start_ranges -- index ) + def word_RANGE_INDEX(self, interp: IInterpreter): + """Returns index of range that value falls into""" + start_ranges = interp.stack_pop() + val = interp.stack_pop() + + # Cap off the value ranges with infinity + start_ranges.append(math.inf) + + if val is None or not start_ranges: + interp.stack_push(None) + return + + if val < start_ranges[0]: + interp.stack_push(None) + return + + result = None + for i in range(len(start_ranges) - 1): + if val >= start_ranges[i] and val < start_ranges[i + 1]: + result = i + break + + interp.stack_push(result) + + # ( -- ) + def word_bang_PUSH_ERROR(self, interp: IInterpreter): + self.flags["push_error"] = True + + # ( -- ) + def word_bang_WITH_KEY(self, interp: IInterpreter): + self.flags["with_key"] = True + + # (comparator -- ) + # + # `comparator` may be a Forthic string or a Python key function + def word_bang_COMPARATOR(self, interp: IInterpreter): + comparator = interp.stack_pop() + self.flags["comparator"] = comparator + + # ( -- ) + def word_bang_PUSH_REST(self, interp: IInterpreter): + self.flags["push_rest"] = True + + # (depth -- ) + # + # `depth` of 0 is the same as a regular MAP + def word_bang_DEPTH(self, interp: IInterpreter): + depth = interp.stack_pop() + self.flags["depth"] = depth + + # ( -- ) + def word_PROFILE_START(self, interp: IInterpreter): + interp.start_profiling() + + # ( label -- ) + def word_PROFILE_TIMESTAMP(self, interp: IInterpreter): + label = interp.stack_pop() + interp.add_timestamp(label) + + # ( -- ProfileAnalyzer ) + def word_PROFILE_END(self, interp: IInterpreter): + interp.stop_profiling() + result = None + if interp.cur_word_profile: + interp.cur_word_profile = interp.cur_word_profile.get_parent() + result = ProfileAnalyzer(interp.cur_word_profile) + interp.stack_push(result) + + # ( -- data ) + def word_PROFILE_DATA(self, interp: IInterpreter): + histogram = interp.word_histogram() + timestamps = interp.profile_timestamps() + + result = defaultdict(list) + for val in histogram: + rec = {'word': val['word'], 'count': val['count']} + result['word_counts'].append(rec) + + prev_time = 0.0 + for t in timestamps: + rec = { + 'label': t['label'], + 'time': t['time'], + 'delta': t['time'] - prev_time, + } + prev_time = t['time'] + result['timestamps'].append(rec) + + interp.stack_push(result) + + # ( -- profile_report ) + def word_PROFILE_REPORT(self, interp: IInterpreter): + histogram = interp.word_histogram() + result = '\nWord counts:\n' + result += '\n'.join( + [ + '%30s: %d' % (val['word'], val['count']) + for val in histogram + if val['count'] > 1 + ] + ) + + result += '\n\nTimestamps (sec):\n' + timestamps = interp.profile_timestamps() + + def timestamp_strings(timestamps): + res = [] + prev_time = 0.0 + for t in timestamps: + string = '%30s: %.3f (%.3f)' % ( + t['label'], + t['time'], + t['time'] - prev_time, + ) + prev_time = t['time'] + res.append(string) + return res + + result += '\n'.join(timestamp_strings(timestamps)) + result += '\n' + + interp.stack_push(result) + + # ( -- username ) + def word_CURRENT_USER(self, interp: IInterpreter): + result = getpass.getuser() + interp.stack_push(result) + + # ( markdown -- html) + def word_MARKDOWN_to_HTML(self, interp: IInterpreter): + markdown_content = interp.stack_pop() + result = markdown.markdown(markdown_content, extensions=['tables']) + interp.stack_push(result) + + def get_flags(self): + flags = self.flags.copy() + self.flags = {} + return flags + + +def drill_for_value(record, fields): + """Descends into record using an array of fields, returning final value or None""" + result = record + try: + for f in fields: + if result is None: + return result + if isinstance(result, list): + result = result[f] + else: + result = result.get(f) + except Exception: + result = None + return result + + +def run_returning_error(interp, forthic): + result = None + try: + execute(interp, forthic) + except Exception as e: + result = e + return result + + +def foreach(interp, flags): + forthic = interp.stack_pop() + container = interp.stack_pop() + + errors = [] + if not container: + container = [] + + if isinstance(container, list): + items = container + for i in range(len(items)): + item = items[i] + if flags.get('with_key'): + interp.stack_push(i) + interp.stack_push(item) + if flags.get('push_error'): + errors.append(run_returning_error(interp, forthic)) + else: + execute(interp, forthic) + + else: + for k, item in container.items(): + if flags.get('with_key'): + interp.stack_push(k) + interp.stack_push(item) + if flags.get('push_error'): + errors.append(run_returning_error(interp, forthic)) + else: + execute(interp, forthic) + + if flags.get('push_error'): + interp.stack_push(errors) + return + + +def default_json_serialize(obj): + # Python can't serialize datetimes, so we'll default to converting them to timestamps + if isinstance(obj, datetime.datetime): + return int(datetime.datetime.timestamp(obj)) + raise TypeError(f"{obj} not serializable") + + +def execute(interp: IInterpreter, object: str): + if isinstance(object, str): + interp.run(object) + else: + object.execute(interp) + return + + +def execute_returning_error(interp: IInterpreter, object: str) -> Optional[Exception]: + result = None + try: + execute(interp, object) + except Exception as e: + result = e + return result diff --git a/forthic-py/src/interfaces.py b/forthic-py/src/interfaces.py new file mode 100644 index 0000000..a11a764 --- /dev/null +++ b/forthic-py/src/interfaces.py @@ -0,0 +1,110 @@ +from typing import Any, Optional + + +class IWord: + """Forthic words can be executed by the interpreter or compiled into Forthic definitions""" + def __init__(self, name: str): + self.name = name + + def execute(self, _interp: 'IInterpreter') -> None: + """Called when a Forthic word is executed by the interpreter + + Words take parameters from the interpreter `stack` and return values to it. + """ + pass + + +class IModule: + """Modules store Forthic words and variables""" + def __init__(self): + self.name = None + + def find_word(self, name: str) -> Optional[IWord]: + """Searches module for a word with the specified `name`""" + pass + + def add_word(self, word: IWord) -> None: + """Adds a `word` to a module""" + pass + + def add_memo_words(self, word: IWord) -> None: + """Adds memo words based on `word` to a module""" + pass + + +class IInterpreter: + """A Forthic interpreter runs Forthic strings + + The interpreter maintains the following: + + * A `stack` for passing parameters between words + * An `app_module` in which the current Forthic application runs + * A `global_module` containing words common to all Forthic applications + + The interpreter has a `dev_mode` property which can change the behavior of certain words + (e.g., `.s` will drop into the debugger if in dev mode). + + The interpreter also maintains some data structures for profiling Forthic code. + """ + def __init__(self): + self.app_module = None + self.cur_module = None + self.stack = None + + # Profiling support + self.cur_word_profile = None + self.profile_timestamps = None + self.word_histogram = None + self.dev_mode = None + + def run(self, string: str): + """Runs a Forthic string in the context of the current module""" + pass + + def run_in_module(self, module: IModule, string: str): + """Runs a Forthic string in the context of the specified `module`""" + pass + + def stack_push(self, value: Any): + """Pushes a value onto the parameter `stack`""" + pass + + def stack_pop(self) -> Any: + """Pops a variable from the parameter `stack`""" + pass + + def module_stack_push(self, module: IModule): + """Pushes a module onto the module stack, making it the current module""" + pass + + def module_stack_pop(self): + """Popping a module from the module stack""" + pass + + def find_module(self, name: str) -> IModule: + """Searches interpreter for a module registered under `name`""" + return IModule() + + def start_profiling(self) -> None: + """Initializes interpreter profiling data to start a profiling run""" + pass + + def add_timestamp(self, label: str) -> None: + """Adds a labeled timestamp during a profiling run""" + pass + + def stop_profiling(self) -> None: + """Stops a profiling run and returns interpreter to normal mode""" + pass + + def count_word(self, w: IWord) -> None: + """Increments count of a word's execution during a profiling run""" + pass + + def start_profile_word(self, word: IWord): + """Notes the start of a word execution during a profiling run""" + pass + + def end_profile_word(self): + """Notes the end of a word execution during a profiling run""" + pass diff --git a/forthic-py/src/interpreter.py b/forthic-py/src/interpreter.py new file mode 100644 index 0000000..9923d6c --- /dev/null +++ b/forthic-py/src/interpreter.py @@ -0,0 +1,399 @@ +import time +import operator +import pytz +import collections +from .tokens import ( + StringToken, + CommentToken, + StartArrayToken, + EndArrayToken, + StartModuleToken, + EndModuleToken, + StartDefinitionToken, + EndDefinitionToken, + StartMemoToken, + WordToken, + EOSToken, + Token, +) +from .tokenizer import Tokenizer + +from .module import Module, Word, PushValueWord, DefinitionWord +from .global_module import GlobalModule +from .profile import WordProfile +from .interfaces import IInterpreter, IModule, IWord +from typing import List, Any, Dict, Optional + + +# ----- Errors ----------------------------------------------------------------------------------------------- +class InterpreterError(RuntimeError): + pass + + +class UnknownModuleError(InterpreterError): + def __init__(self, name: str): + super().__init__(f"Can't find module: '{name}'") + + +class UnknownTokenError(InterpreterError): + def __init__(self, token: Token): + super().__init__(f'Unknown token: {token}') + + +class NestedDefinitionError(InterpreterError): + def __init__(self): + super().__init__("Can't have nested definitions") + + +class UnmatchedEndDefinitionError(InterpreterError): + def __init__(self): + super().__init__('Unmatched end definition') + + +class UnknownWordError(InterpreterError): + def __init__(self, word_name: str): + super().__init__(f"Unknown word: '{word_name}'") + + +# ----- Word Types ------------------------------------------------------------------------------------------- +class EndArrayWord(Word): + """This represents the end of an array""" + def __init__(self): + super().__init__(']') + + def execute(self, interp: IInterpreter) -> None: + items: List[Any] = [] + item = interp.stack_pop() + while not isinstance(item, StartArrayToken): + items.append(item) + item = interp.stack_pop() + items.reverse() + interp.stack_push(items) + + +class StartModuleWord(Word): + """This indicates the start of a module + + See `docs/ARCHITECTURE.md` for more details on modules. + """ + def __init__(self, name: str): + super().__init__(name) + + def execute(self, interp: IInterpreter) -> None: + # The app module is the only module with a blank name + if self.name == '': + interp.module_stack_push(interp.app_module) + return + + # If the module is used by the current module, push it onto the module stack; + # otherwise, create a new module and push that onto the module stack. + module = interp.cur_module().find_module(self.name) + + # Check app module + if not module: + module = interp.app_module.find_module(self.name) + + if not module: + module = Module(self.name, interp) + interp.cur_module().register_module(module.name, module) + + interp.module_stack_push(module) + + +class EndModuleWord(Word): + def __init__(self): + super().__init__('}') + + def execute(self, interp: IInterpreter) -> None: + interp.module_stack_pop() + + +class AppModule(Module): + """The AppModule contains the words and variables of a Forthic application + + The app module is a speical module. This is the first module on the module stack. All applications start + here. It is the only module where `USE-MODULE` can be called. It is the only nameless module. + """ + def __init__(self, interp: 'Interpreter'): + super().__init__('', interp) + # Screens map names to chunks of Forthic code + self.screens: Dict[str, str] = collections.defaultdict(str) + + def set_screen(self, name: str, content: str): + self.screens[name] = content + + def get_screen(self, name: str) -> str: + return self.screens[name] + + +class Interpreter(IInterpreter): + """Interprets Forthic strings + + Modules may be registered with an Interpreter to provide more functionality. + """ + def __init__(self, timezone=None): + if not timezone: + timezone = pytz.timezone('US/Pacific') + self.timezone = timezone + self.stack: List[Any] = [] + self.global_module = GlobalModule(self, self.timezone) + self.app_module = AppModule(self) + self.module_stack: List[IModule] = [self.app_module] + self.registered_modules: Dict[str, Module] = {} + self.is_compiling: bool = False + self.is_memo_definition: bool = False + self.cur_definition: Optional[DefinitionWord] = None + self._dev_mode: bool = False + + # Profiling support + self.word_counts: Dict[IWord, int] = collections.defaultdict(int) + self.is_profiling: bool = False + self.start_profile_time: Optional[float] = None + self.timestamps: List[Any] = [] + self.cur_word_profile: WordProfile = None + + @property + def dev_mode(self) -> bool: + """This is used to indicate that things like debugging are ok""" + return self._dev_mode + + @dev_mode.setter + def dev_mode(self, dev_mode: bool): + self._dev_mode = dev_mode + + def run(self, string: str) -> None: + """Interprets a Forthic string, executing words one-at-a-time until the end of the string""" + tokenizer = Tokenizer(string) + token = tokenizer.next_token() + while not isinstance(token, EOSToken): + self.handle_token(token) + token = tokenizer.next_token() + + def run_in_module(self, module: IModule, string: str) -> None: + """Runs a Forthic string in the context of a given module""" + self.module_stack_push(module) + self.run(string) + self.module_stack.pop() + + def cur_module(self) -> IModule: + """The top of the module stack is the currently active module""" + result = self.module_stack[-1] + return result + + def find_module(self, name: str) -> Module: + """Returns the module registered under the specified `name`""" + if name not in self.registered_modules: + raise UnknownModuleError(name) + result = self.registered_modules[name] + return result + + def stack_push(self, val: Any) -> None: + """Pushes a value onto the Forth stack""" + self.stack.append(val) + + def stack_pop(self) -> Any: + """Pops a value from the Forth stack and returns it""" + result = self.stack.pop() + return result + + def module_stack_push(self, module: IModule) -> None: + """Makes the specified `module` the active module""" + self.module_stack.append(module) + + def module_stack_pop(self) -> IModule: + """Removes the current module from the stack and makes the next module the current module""" + return self.module_stack.pop() + + def register_module(self, module_class): + """Registers an instance of Module with the interpreter + + Modules are typically registered at code time. This is where new capabilities can be made available + to Forthic programs. + """ + module = module_class(self) + self.registered_modules[module.name] = module + + def find_word(self, name: str) -> Optional[IWord]: + """Searches the interpreter for a word + + The module stack is searched top down. If the words cannot be found, the global module is searched. + Note that the bottom of the module stack is always the application module. + """ + modules = reversed(self.module_stack) + result = None + for m in modules: + result = m.find_word(name) + if result: + break + + if not result: + result = self.global_module.find_word(name) + return result + + # -------------------------------------------------------------------------- + # Profiling support + + def start_profiling(self) -> None: + """Clears word counts and starts profiling word executions""" + self.is_profiling = True + self.timestamps = [] + self.start_profile_time = time.perf_counter() + self.add_timestamp('START') + self.word_counts = collections.defaultdict(int) + + def add_timestamp(self, label: str) -> None: + """Adds a timestamped label to a profiling run""" + if not self.is_profiling or not self.start_profile_time: + return + self.timestamps.append( + { + 'label': label, + 'time': time.perf_counter() - self.start_profile_time, + } + ) + + def count_word(self, w: IWord) -> None: + """If profiling, count word""" + if self.is_profiling: + self.word_counts[w] += 1 + + def start_profile_word(self, word: IWord) -> None: + """Used to mark the start of a word execution during a profiling run""" + if not self.is_profiling: + return + + word_profile = WordProfile( + self.cur_word_profile, self.cur_module(), word + ) + self.cur_word_profile = word_profile + + def end_profile_word(self) -> None: + """Used to mark the end of a word execution during a profiling run""" + if not self.cur_word_profile: + return + + self.cur_word_profile.end_profile() + parent = self.cur_word_profile.get_parent() + if parent: + self.cur_word_profile = parent + + def stop_profiling(self) -> None: + """Stops profiling""" + self.add_timestamp('END') + self.is_profiling = False + + def word_histogram(self) -> List[Any]: + """Returns a list of counts in descending order""" + items = [ + {'word': w.name, 'count': c} for w, c in self.word_counts.items() + ] + result = sorted(items, key=operator.itemgetter('count'), reverse=True) + return result + + def profile_timestamps(self) -> List[Any]: + return self.timestamps + + # -------------------------------------------------------------------------- + # Handle tokens + + def handle_token(self, token: Token) -> None: + """Called to handle each token from the Tokenizer""" + if isinstance(token, StringToken): + self.handle_string_token(token) + elif isinstance(token, CommentToken): + self.handle_comment_token(token) + elif isinstance(token, StartArrayToken): + self.handle_start_array_token(token) + elif isinstance(token, EndArrayToken): + self.handle_end_array_token(token) + elif isinstance(token, StartModuleToken): + self.handle_start_module_token(token) + elif isinstance(token, EndModuleToken): + self.handle_end_module_token(token) + elif isinstance(token, StartDefinitionToken): + self.handle_start_definition_token(token) + elif isinstance(token, StartMemoToken): + self.handle_start_memo_token(token) + elif isinstance(token, EndDefinitionToken): + self.handle_end_definition_token(token) + elif isinstance(token, WordToken): + self.handle_word_token(token) + else: + raise UnknownTokenError(token) + + def handle_string_token(self, token: StringToken) -> None: + self.handle_word(PushValueWord('', token.string)) + + def handle_start_module_token(self, token: StartModuleToken) -> None: + """Start/end module tokens are treated as IMMEDIATE words *and* are compiled""" + word = StartModuleWord(token.name) + if self.is_compiling: + if not self.cur_definition: + raise InterpreterError("Interpreter is compiling, but there is no current definition") + self.cur_definition.add_word(word) + + # NOTE: We execute the word within a definition so we can do lookups during compile + self.count_word(word) + word.execute(self) + + def handle_end_module_token(self, token: EndModuleToken) -> None: + word = EndModuleWord() + if self.is_compiling: + if not self.cur_definition: + raise InterpreterError("Interpreter is compiling, but there is no current definition") + self.cur_definition.add_word(word) + + # NOTE: We execute the word within a definition so we can do lookups during compile + self.count_word(word) + word.execute(self) + + def handle_start_array_token(self, token: StartArrayToken) -> None: + self.handle_word(PushValueWord('', token)) + + def handle_end_array_token(self, token: EndArrayToken) -> None: + self.handle_word(EndArrayWord()) + + def handle_comment_token(self, token: CommentToken) -> None: + pass + + def handle_start_definition_token(self, token: StartDefinitionToken) -> None: + if self.is_compiling: + raise NestedDefinitionError() + self.cur_definition = DefinitionWord(token.name) + self.is_compiling = True + self.is_memo_definition = False + + def handle_start_memo_token(self, token: StartMemoToken) -> None: + if self.is_compiling: + raise NestedDefinitionError() + self.cur_definition = DefinitionWord(token.name) + self.is_compiling = True + self.is_memo_definition = True + + def handle_end_definition_token(self, token: EndDefinitionToken) -> None: + if not self.is_compiling: + raise UnmatchedEndDefinitionError() + if not self.cur_definition: + raise InterpreterError("Cannot finish definition because no 'cur_definition'") + + if self.is_memo_definition: + self.cur_module().add_memo_words(self.cur_definition) + else: + self.cur_module().add_word(self.cur_definition) + self.is_compiling = False + + def handle_word_token(self, token: WordToken) -> None: + word = self.find_word(token.name) + if word is None: + raise UnknownWordError(token.name) + + self.handle_word(word) + + def handle_word(self, word: IWord) -> None: + if self.is_compiling: + if not self.cur_definition: + raise InterpreterError("Interpreter is compiling, but there is no current definition") + self.cur_definition.add_word(word) + else: + self.count_word(word) + word.execute(self) diff --git a/forthic-py/src/module.py b/forthic-py/src/module.py new file mode 100644 index 0000000..5b0e915 --- /dev/null +++ b/forthic-py/src/module.py @@ -0,0 +1,257 @@ +from .interfaces import IInterpreter, IModule, IWord +from typing import Any, Callable, List, Dict, Optional + + +class Variable: + """Represents a Forthic variable""" + def __init__(self, value: Any = None): + self.value = value + self.has_value = False + + def set_value(self, val): + self.value = val + self.has_value = True + + def get_value(self): + return self.value + + +class Word(IWord): + """Base class for all Forthic words""" + def __init__(self, name: str): + self.name: str = name + + def execute(self, _interp: IInterpreter) -> None: + raise RuntimeError('Must override Word.execute') + + +class PushValueWord(Word): + """This word knows how to push a value onto the stack + + One use is to implement literal words + """ + def __init__(self, name: str, value: Any): + super().__init__(name) + self.value = value + + def execute(self, interp: IInterpreter) -> None: + interp.stack_push(self.value) + + +class DefinitionWord(Word): + """This represents a word that is defined from other words + + A definition looks like this: + ``` + : WORD-NAME WORD1 WORD2 WORD3; + ``` + The name of the defined word is `WORD-NAME`. When it is executed, `WORD1`, `WORD2`, and `WORD3` are + executed in that order. + """ + def __init__(self, name: str): + super().__init__(name) + self.words: List[IWord] = [] + + def add_word(self, word: IWord): + """Adds a new word to the definition""" + self.words.append(word) + + def execute(self, interp: IInterpreter) -> None: + for w in self.words: + interp.start_profile_word(w) + w.execute(interp) + interp.end_profile_word() + + +class ModuleWord(Word): + """This is used when defining Forthic words in Python + + The `name` is the word name. + The `handler` is a Python function that's called when the word is executed. All handlers take an interpreter + as their only argument and return nothing. All argument passing and results are handled via the interpreter + stack. + """ + def __init__(self, name: str, handler: Callable[[IInterpreter], None]): + super().__init__(name) + self.handler = handler + + def execute(self, interp: IInterpreter) -> None: + self.handler(interp) + + +class ImportedWord(Word): + """This represents words imported from other modules + + Words imported from other modules usually have their module name as a prefix (e.g., jira.SEARCH), but + it's also possible to use a different prefix, or none at all. + """ + def __init__(self, module_word: IWord, prefix: str, module: 'Module'): + if prefix != '': + prefix = prefix + '.' + + super().__init__(f'{prefix}{module_word.name}') + self.module_word = module_word + self.imported_module = module + + def execute(self, interp: IInterpreter) -> None: + interp.module_stack_push(self.imported_module) + self.module_word.execute(interp) + interp.module_stack_pop() + + +class ModuleMemoWord(Word): + """This memoizes the execution of an expensive operation that returns a value + """ + def __init__(self, word: IWord): + super().__init__(word.name) + self.word = word + self.has_value = False + self.value = None + + def refresh(self, interp): + self.word.execute(interp) + self.value = interp.stack_pop() + self.has_value = True + + def execute(self, interp: IInterpreter) -> None: + if not self.has_value: + self.refresh(interp) + interp.stack_push(self.value) + + +class ModuleMemoBangWord(Word): + """This forces the update of a ModuleMemoWord + """ + def __init__(self, word: ModuleMemoWord): + super().__init__(f"{word.name}!") + self.memo_word = word + + def execute(self, interp: IInterpreter) -> None: + self.memo_word.refresh(interp) + + +class ModuleMemoBangAtWord(Word): + """This forces the update of a ModuleMemoWord + """ + def __init__(self, word: ModuleMemoWord): + super().__init__(f"{word.name}!@") + self.memo_word = word + + def execute(self, interp: IInterpreter) -> None: + self.memo_word.refresh(interp) + interp.stack_push(self.memo_word.value) + + +class Module(IModule): + """A Module is a collection of variables and words + + Modules may also create other modules. + """ + def __init__(self, name: str, interp: IInterpreter, forthic_code: str = ''): + self.interp: IInterpreter = interp + self.words: List[IWord] = [] + self.exportable: List[str] = [] # Word names + self.variables: Dict[str, Variable] = {} + self.modules: Dict[str, Module] = {} + self.name: str = name + self.forthic_code: str = forthic_code + + def find_module(self, name: str) -> Optional['Module']: + result = self.modules.get(name) + return result + + def add_word(self, word: IWord) -> None: + """Adds a word to the module""" + self.words.append(word) + + def add_memo_words(self, word: IWord) -> None: + """Adds memo words to a module based on a core definition word + + For a word named "MY-MEMO", this adds the following words: + * MY-MEMO (memoizes the execution of the provided definition word) + * MY-MEMO! (re-runs MY-MEMO to update its memoized value) + * MY-MEMO!@ (runs MY-MEMO! and returns then returns the new memo value) + """ + memo_word = ModuleMemoWord(word) + self.words.append(memo_word) + self.words.append(ModuleMemoBangWord(memo_word)) + self.words.append(ModuleMemoBangAtWord(memo_word)) + + def add_module_word(self, word_name: str, word_func: Callable[[IInterpreter], None]) -> None: + """Convenience function for adding exportable module words""" + self.add_exportable_word(ModuleWord(word_name, word_func)) + + def add_exportable_word(self, word: ModuleWord) -> None: + """Marks a word as exportable by the module + + Only exportable words can be used by other modules + """ + self.words.append(word) + self.exportable.append(word.name) + + def add_exportable(self, names: List[str]) -> None: + """Convenience to add a set of exportable words + + This is used when marking words as exportable from Forthic + """ + self.exportable += names + + def exportable_words(self) -> List[IWord]: + result = [w for w in self.words if w.name in self.exportable] + return result + + def add_variable(self, name: str, value: Any = None) -> None: + """Adds variable to module, noop if variable exists""" + if name not in self.variables: + self.variables[name] = Variable(value) + + def initialize(self, interp: IInterpreter) -> None: + """When a module is imported, its `forthic_code` must be executed in order to fully define its words""" + interp.run_in_module(self, self.forthic_code) + + def register_module(self, module_name: str, module: 'Module') -> None: + """Registers a module by name""" + self.modules[module_name] = module + + def import_module(self, module_name: str, module: 'Module', interp: IInterpreter) -> None: + """This is used to import a module for use by another module via Python + + Typically, modules are independent. But in some cases, a module may depend on other modules. When this + is the case, `import_module` is used to import the modules at code time. + """ + # If module has already been registered, use it + if module_name in self.modules: + new_module = self.modules[module_name] + else: + new_module = module + new_module.initialize(interp) + + words = new_module.exportable_words() + for word in words: + self.add_word(ImportedWord(word, module_name, new_module)) + + self.register_module(module_name, new_module) + + def find_word(self, name: str) -> Optional[IWord]: + """Searches module for a word""" + result = self.find_dictionary_word(name) + if result is None: + result = self.find_variable(name) + return result + + def find_dictionary_word(self, word_name: str) -> Optional[IWord]: + """Looks up word in module, returning None if not found""" + indexes = list(reversed(range(len(self.words)))) + for i in indexes: + w = self.words[i] + if w.name == word_name: + return w + return None + + def find_variable(self, varname: str) -> Optional[PushValueWord]: + """Returns variable""" + variable = self.variables.get(varname) + result = None + if variable: + result = PushValueWord(varname, variable) + return result diff --git a/forthic-py/src/profile.py b/forthic-py/src/profile.py new file mode 100644 index 0000000..8f84114 --- /dev/null +++ b/forthic-py/src/profile.py @@ -0,0 +1,105 @@ +import time +from typing import List, Optional +from .interfaces import IModule, IWord + + +class WordProfile: + """Stores information about a word's execution time + + This also stores a list of WordProfiles for words that are called by the word in question + """ + + def __init__(self, parent: 'WordProfile', module: IModule, word: IWord): + self.parent = parent + self.module = module + self.word = word + self.start_time = time.perf_counter() + self.end_time: Optional[float] = None + self.word_profiles: List['WordProfile'] = [] + self.index: int = -1 + + if self.parent: + self.parent.add_word_profile(self) + + def add_word_profile(self, word_profile: 'WordProfile'): + self.word_profiles.append(word_profile) + + def get_key(self) -> str: + result = f'{self.module.name}:{self.word.name}' + return result + + def get_parent(self) -> 'WordProfile': + return self.parent + + def end_profile(self) -> None: + self.end_time = time.perf_counter() + + def get_duration_s(self) -> Optional[float]: + if self.end_time is None: + return None + + result = self.end_time - self.start_time + return result + + +class ProfileAnalyzer: + """Prints a report for a WordProfile and allows navigation through the call tree + + This is meant to be used at the commandline when the interpreter is in debug mode: + + * print() This prints the execution time of the current word as well as the words + called by it sorted by execution time, descending. The words are prefixed + by an index which represent the order in which the words were called. + * up() This drills up to the current word's parent and calls print() + * down(index) This drills down to a word at the specified index (see print()) and calls print() + """ + + def __init__(self, word_profile: WordProfile): + self.word_profile: WordProfile = word_profile + self.cur_profile: WordProfile = word_profile + self.num_called: int = 10 # Limits number of called words to display + + def down(self, index: int) -> None: + self.cur_profile = self.cur_profile.word_profiles[index] + self.print() + + def up(self) -> None: + self.cur_profile = self.cur_profile.get_parent() + self.print() + + def print(self) -> None: + duration = self.cur_profile.get_duration_s() + if not duration: + print("Nothing to report") + return + + print( + '%s: %.3f s' + % (self.cur_profile.get_key(), duration) + ) + for i, p in enumerate(self.cur_profile.word_profiles): + p.index = i + + def get_duration(profile): + res = profile.get_duration_s() + if not res: + res = 0 + return res + + def get_max_key_len(profiles): + res = 0 + for p in profiles: + key = p.get_key() + if len(key) > res: + res = len(key) + return res + + sorted_profiles = sorted( + self.cur_profile.word_profiles, key=get_duration + ) + sorted_profiles.reverse() + format_string = ( + f' [%d] %{get_max_key_len(sorted_profiles) + 1}s: %.3f s' + ) + for p in sorted_profiles[: self.num_called]: + print(format_string % (p.index, p.get_key(), get_duration(p))) diff --git a/forthic-py/src/tokenizer.py b/forthic-py/src/tokenizer.py new file mode 100644 index 0000000..3a2dd6d --- /dev/null +++ b/forthic-py/src/tokenizer.py @@ -0,0 +1,201 @@ +from .tokens import StartArrayToken, EndArrayToken, StartDefinitionToken, EndDefinitionToken,\ + StartMemoToken, CommentToken, StartModuleToken, EndModuleToken, StringToken, WordToken, EOSToken, Token +from typing import List + + +# 'Data Link Escape' +DLE = chr(16) + + +class TokenizerError(RuntimeError): + pass + + +class InvalidDefinitionError(TokenizerError): + def __init__(self, msg: str): + super().__init__(msg) + + +class UnterminatedStringError(TokenizerError): + def __init__(self, msg: str): + super().__init__(msg) + + +class Tokenizer: + """A Tokenizer is constructed with an input string and returns the next available + token on request. + """ + def __init__(self, string: str): + self.input_string: str = string + self.position: int = 0 + self.whitespace: List[str] = [' ', '\t', '\n', '\r', '(', ')'] + self.quote_chars: List[str] = ['"', "'", '^', DLE] + self.token_string: str = '' # Token string currently gathered from the input string + + def next_token(self): + self.clear_token_string() + return self.transition_from_START() + + # ======= + # Internal functions + + def clear_token_string(self): + self.token_string = '' + + def is_whitespace(self, char: str) -> bool: + return char in self.whitespace + + def is_quote(self, char: str) -> bool: + return char in self.quote_chars + + def is_triple_quote(self, index: int, char: str) -> bool: + if not self.is_quote(char): + return False + if index + 2 >= len(self.input_string): + return False + return self.input_string[index + 1] == char and self.input_string[index + 2] == char + + def is_start_memo(self, index: int) -> bool: + if index + 1 >= len(self.input_string): + return False + result = self.input_string[index] == "@" and self.input_string[index + 1] == ":" + return result + + def transition_from_START(self) -> Token: + """Tokenization is implemented as a state machine. This is the entry point. + """ + while self.position < len(self.input_string): + char = self.input_string[self.position] + self.position += 1 + if self.is_whitespace(char): + pass + elif char == '#': + return self.transition_from_COMMENT() + elif char == ':': + return self.transition_from_START_DEFINITION() + elif self.is_start_memo(self.position - 1): + self.position += 1 # Skip over ":" in "@:" + return self.transition_from_START_MEMO() + elif char == ';': + return EndDefinitionToken() + elif char == '[': + return StartArrayToken() + elif char == ']': + return EndArrayToken() + elif char == '{': + return self.transition_from_GATHER_MODULE() + elif char == '}': + return EndModuleToken() + elif self.is_triple_quote(self.position - 1, char): + self.position += 2 # Skip over 2nd and 3rd quote chars + return self.transition_from_GATHER_TRIPLE_QUOTE_STRING(char) + elif self.is_quote(char): + return self.transition_from_GATHER_STRING(char) + else: + self.position -= 1 # Back up to beginning of word + return self.transition_from_GATHER_WORD() + return EOSToken() + + def transition_from_COMMENT(self) -> CommentToken: + while self.position < len(self.input_string): + char = self.input_string[self.position] + self.token_string += char + self.position += 1 + if char == '\n': + break + return CommentToken(self.token_string) + + def transition_from_START_DEFINITION(self) -> StartDefinitionToken: + while self.position < len(self.input_string): + char = self.input_string[self.position] + self.position += 1 + + if self.is_whitespace(char): + continue + else: + self.position -= 1 + return self.transition_from_GATHER_DEFINITION_NAME() + + raise InvalidDefinitionError("Got EOS in START_DEFINITION") + + def transition_from_START_MEMO(self) -> StartMemoToken: + while self.position < len(self.input_string): + char = self.input_string[self.position] + self.position += 1 + + if self.is_whitespace(char): + continue + else: + self.position -= 1 + return self.transition_from_GATHER_MEMO_NAME() + + raise InvalidDefinitionError("Got EOS in START_MEMO") + + def gather_definition_name(self) -> None: + while self.position < len(self.input_string): + char = self.input_string[self.position] + self.position += 1 + if self.is_whitespace(char): + break + elif self.is_quote(char): + raise InvalidDefinitionError("Definitions can't have quotes in them") + elif char in ['[', ']', '{', '}']: + raise InvalidDefinitionError(f"Definitions can't have '{char}' in them") + else: + self.token_string += char + return + + def transition_from_GATHER_DEFINITION_NAME(self) -> StartDefinitionToken: + self.gather_definition_name() + return StartDefinitionToken(self.token_string) + + def transition_from_GATHER_MEMO_NAME(self) -> StartMemoToken: + self.gather_definition_name() + return StartMemoToken(self.token_string) + + def transition_from_GATHER_MODULE(self) -> StartModuleToken: + while self.position < len(self.input_string): + char = self.input_string[self.position] + self.position += 1 + if self.is_whitespace(char): + break + elif char == '}': + self.position -= 1 + break + else: + self.token_string += char + return StartModuleToken(self.token_string) + + def transition_from_GATHER_TRIPLE_QUOTE_STRING(self, string_delimiter: str) -> StringToken: + while self.position < len(self.input_string): + char = self.input_string[self.position] + if char == string_delimiter and self.is_triple_quote(self.position, char): + self.position += 3 + return StringToken(self.token_string) + else: + self.position += 1 + self.token_string += char + raise UnterminatedStringError(f"Unterminated triple quoted string ({string_delimiter*3})") + + def transition_from_GATHER_STRING(self, string_delimiter: str) -> StringToken: + while self.position < len(self.input_string): + char = self.input_string[self.position] + self.position += 1 + if char == string_delimiter: + return StringToken(self.token_string) + else: + self.token_string += char + raise UnterminatedStringError(f"Unterminated string ({string_delimiter}), {self.token_string}") + + def transition_from_GATHER_WORD(self) -> WordToken: + while self.position < len(self.input_string): + char = self.input_string[self.position] + self.position += 1 + if self.is_whitespace(char): + break + if char in [';', '[', ']', '}']: + self.position -= 1 + break + else: + self.token_string += char + return WordToken(self.token_string) diff --git a/forthic-py/src/tokens.py b/forthic-py/src/tokens.py new file mode 100644 index 0000000..1410d3b --- /dev/null +++ b/forthic-py/src/tokens.py @@ -0,0 +1,52 @@ +class Token: + pass + + +class StringToken(Token): + def __init__(self, string: str): + self.string: str = string + + +class CommentToken(Token): + def __init__(self, string: str): + self.string: str = string + + +class StartArrayToken(Token): + pass + + +class EndArrayToken(Token): + pass + + +class StartModuleToken(Token): + def __init__(self, name: str): + self.name: str = name + + +class EndModuleToken(Token): + pass + + +class StartDefinitionToken(Token): + def __init__(self, name: str): + self.name: str = name + + +class EndDefinitionToken(Token): + pass + + +class StartMemoToken(Token): + def __init__(self, name: str): + self.name: str = name + + +class WordToken(Token): + def __init__(self, name: str): + self.name: str = name + + +class EOSToken(Token): + pass diff --git a/forthic-py/tests/__init__.py b/forthic-py/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/forthic-py/tests/modules/datasets_data/.gitignore b/forthic-py/tests/modules/datasets_data/.gitignore new file mode 100644 index 0000000..c0190e1 --- /dev/null +++ b/forthic-py/tests/modules/datasets_data/.gitignore @@ -0,0 +1 @@ +datasets \ No newline at end of file diff --git a/forthic-py/tests/modules/datasets_data/README.md b/forthic-py/tests/modules/datasets_data/README.md new file mode 100644 index 0000000..f2ce076 --- /dev/null +++ b/forthic-py/tests/modules/datasets_data/README.md @@ -0,0 +1,3 @@ +# README.md + +This is a place to store test data for the datasets module test. \ No newline at end of file diff --git a/forthic-py/tests/modules/jira_context.py b/forthic-py/tests/modules/jira_context.py new file mode 100644 index 0000000..2155dbf --- /dev/null +++ b/forthic-py/tests/modules/jira_context.py @@ -0,0 +1,317 @@ +import json +from forthic.v3.modules.jira_module import JiraContext + + +class ServerResponse: + def __init__(self, string, status_code=200): + self.json_string = string + self.status_code = status_code + self.text = "" + self.ok = status_code < 300 + + def json(self): + result = json.loads(self.json_string) + return result + + +class JiraTestContext(JiraContext): + def get_host(self): + return "http://testcontext" + + def requests_get(self, api_url, session=None): + result = ServerResponse("null") + if api_url == '/rest/api/2/field': + result = ServerResponse(REST_API_2_FIELD_RESPONSE) + elif api_url == '/rest/api/2/issue/SAMPLE-101/votes': + result = ServerResponse(VOTE_RESPONSE) + elif api_url == '/rest/api/2/issue/SAMPLE-101?expand=changelog&fields=customfield_10460,created': + result = ServerResponse(CHANGELOG_RESPONSE) + else: + raise Exception(f"Unknown route: {api_url}") + return result + + def requests_post(self, api_url, json=None, session=None): + result = ServerResponse("null") + if api_url == '/rest/api/2/search': + if json["jql"] == 'assignee=testuser and resolution is null' and json["fields"] == ['summary', 'assignee']: + result = ServerResponse(SEARCH_RESPONSE1) + elif api_url == '/rest/api/2/issue': + result = ServerResponse('{"key": "SAMPLE-12345"}', 201) + elif api_url == '/rest/api/2/issue/SAMPLE-1234/watchers': + result = ServerResponse("null", 204) + elif api_url == '/rest/api/2/issueLink': + result = ServerResponse("null", 201) + else: + raise Exception(f"Unknown route: {api_url}") + return result + + def requests_put(self, api_url, json=None, session=None): + result = ServerResponse("null") + if api_url == '/rest/api/2/issue/SAMPLE-1234': + result = ServerResponse("null", 204) + else: + raise Exception(f"Unknown route: {api_url}") + return result + + +REST_API_2_FIELD_RESPONSE = ''' +[{"id":"issuekey","name":"Key","custom":false,"orderable":false,"navigable":true,"searchable":false, +"clauseNames":["id","issue","issuekey","key"]}, +{"id":"assignee","name":"Assignee","custom":false,"orderable":true,"navigable":true,"searchable":true, +"clauseNames":["assignee"],"schema":{"type":"user","system":"assignee"}}, +{"id":"summary","name":"Summary","custom":false,"orderable":true,"navigable":true,"searchable":true, +"clauseNames":["summary"],"schema":{"type":"string","system":"summary"}}, +{"id":"project","name":"Project","custom":false,"orderable":false,"navigable":true,"searchable":true, +"clauseNames":["project"],"schema":{"type":"project","system":"project"}}, +{"id":"reporter","name":"Reporter","custom":false,"orderable":true,"navigable":true,"searchable":true, +"clauseNames":["reporter"],"schema":{"type":"user","system":"reporter"}}, +{"id":"issuetype","name":"Issue Type","custom":false,"orderable":true,"navigable":true,"searchable":true, +"clauseNames":["issuetype","type"],"schema":{"type":"issuetype","system":"issuetype"}}, +{"id":"customfield_10460","name":"Risk_Factor","custom":true,"orderable":true,"navigable":true,"searchable":true, +"clauseNames":["cf[10460]","Risk_Factor"],"schema":{"type":"option","custom":"com.atlassian.jira.plugin.system.customfieldtypes:select","customId":10460}}, +{"id":"timespent","name":"Time Spent","custom":false,"orderable":false,"navigable":true,"searchable":false, +"clauseNames":["timespent"],"schema":{"type":"number","system":"timespent"}}] +''' + +SEARCH_RESPONSE1 = ''' +{ + "expand": "schema,names", + "startAt": 0, + "maxResults": 200, + "total": 2, + "issues": [ + { + "expand": "operations,versionedRepresentations,editmeta,changelog,renderedFields", + "id": "15791174", + "self": "https://testcontext/rest/api/2/issue/15791174", + "key": "SAMPLE-1234", + "fields": { + "summary": "Forthic ask", + "assignee": { + "self": "https://testcontext/rest/api/2/user?username=testuser", + "name": "testuser", + "key": "testuser", + "emailAddress": "testuser@testcontext", + "avatarUrls": { + "48x48": "https://testcontext/secure/useravatar?avatarId=10172", + "24x24": "https://testcontext/secure/useravatar?size=small&avatarId=10172", + "16x16": "https://testcontext/secure/useravatar?size=xsmall&avatarId=10172", + "32x32": "https://testcontext/secure/useravatar?size=medium&avatarId=10172" + }, + "displayName": "Test User", + "active": true, + "timeZone": "America/Los_Angeles" + } + } + }, + { + "expand": "operations,versionedRepresentations,editmeta,changelog,renderedFields", + "id": "15752784", + "self": "https://testcontext/rest/api/2/issue/15752784", + "key": "SAMPLE-1235", + "fields": { + "summary": "Forthic report", + "assignee": { + "self": "https://testcontext/rest/api/2/user?username=testuser", + "name": "testuser", + "key": "testuser", + "emailAddress": "testuser@testcontext", + "avatarUrls": { + "48x48": "https://testcontext/secure/useravatar?avatarId=10172", + "24x24": "https://testcontext/secure/useravatar?size=small&avatarId=10172", + "16x16": "https://testcontext/secure/useravatar?size=xsmall&avatarId=10172", + "32x32": "https://testcontext/secure/useravatar?size=medium&avatarId=10172" + }, + "displayName": "Test User", + "active": true, + "timeZone": "America/Los_Angeles" + } + } + }] +} +''' + +VOTE_RESPONSE = ''' +{ + "self": "https://testcontext/rest/api/2/issue/SAMPLE-101/votes", + "votes": 2, + "hasVoted": false, + "voters": [ + { + "self": "https://testcontext/rest/api/2/user?username=user1", + "key": "user1", + "name": "user1", + "avatarUrls": { + "48x48": "https://testcontext/secure/useravatar?ownerId=user1&avatarId=14334", + "24x24": "https://testcontext/secure/useravatar?size=small&ownerId=user1&avatarId=14334", + "16x16": "https://testcontext/secure/useravatar?size=xsmall&ownerId=user1&avatarId=14334", + "32x32": "https://testcontext/secure/useravatar?size=medium&ownerId=user1&avatarId=14334" + }, + "displayName": "User User1", + "active": false + }, + { + "self": "https://testcontext/rest/api/2/user?username=user2", + "key": "user2", + "name": "user2", + "avatarUrls": { + "48x48": "https://testcontext/secure/useravatar?ownerId=user2&avatarId=13788", + "24x24": "https://testcontext/secure/useravatar?size=small&ownerId=user2&avatarId=13788", + "16x16": "https://testcontext/secure/useravatar?size=xsmall&ownerId=user2&avatarId=13788", + "32x32": "https://testcontext/secure/useravatar?size=medium&ownerId=user2&avatarId=13788" + }, + "displayName": "User User2", + "active": false + } + ] +}''' + +CHANGELOG_RESPONSE = ''' +{ + "expand": "renderedFields,names,schema,operations,editmeta,changelog,versionedRepresentations", + "id": "15117861", + "self": "https://testcontext/rest/api/2/issue/15117861", + "key": "SAMPLE-10112", + "fields": { + "customfield_10460": { + "self": "https://testcontext/rest/api/2/customFieldOption/32077", + "value": "Red", + "id": "32077" + }, + "created": "2020-07-25T01:36:24.000+0000" + }, + "changelog": { + "startAt": 0, + "maxResults": 3, + "total": 3, + "histories": [ + { + "id": "82031758", + "author": { + "self": "https://testcontext/rest/api/2/user?username=user2", + "name": "user2", + "key": "user2", + "emailAddress": "user2@linkedin.com", + "avatarUrls": { + "48x48": "https://testcontext/secure/useravatar?avatarId=10172", + "24x24": "https://testcontext/secure/useravatar?size=small&avatarId=10172", + "16x16": "https://testcontext/secure/useravatar?size=xsmall&avatarId=10172", + "32x32": "https://testcontext/secure/useravatar?size=medium&avatarId=10172" + }, + "displayName": "User User2", + "active": true, + "timeZone": "America/Los_Angeles" + }, + "created": "2020-07-25T01:36:25.000+0000", + "items": [ + { + "field": "Link", + "fieldtype": "jira", + "from": null, + "fromString": null, + "to": "SAMPLE-9465", + "toString": "This issue cloned from SAMPLE-9465" + } + ] + }, + { + "id": "82031773", + "author": { + "self": "https://testcontext/rest/api/2/user?username=user2", + "name": "user2", + "key": "user2", + "emailAddress": "user2@linkedin.com", + "avatarUrls": { + "48x48": "https://testcontext/secure/useravatar?avatarId=10172", + "24x24": "https://testcontext/secure/useravatar?size=small&avatarId=10172", + "16x16": "https://testcontext/secure/useravatar?size=xsmall&avatarId=10172", + "32x32": "https://testcontext/secure/useravatar?size=medium&avatarId=10172" + }, + "displayName": "User User2", + "active": true, + "timeZone": "America/Los_Angeles" + }, + "created": "2020-07-25T01:38:46.000+0000", + "items": [ + { + "field": "Risk_Factor", + "fieldtype": "custom", + "from": "32078", + "fromString": "Blue", + "to": "32075", + "toString": "Green" + }, + { + "field": "Target start", + "fieldtype": "custom", + "from": "2020-04-01", + "fromString": "1/Apr/20", + "to": "2020-06-22", + "toString": "22/Jun/20" + }, + { + "field": "assignee", + "fieldtype": "jira", + "from": "user3", + "fromString": "User User3", + "to": "user4", + "toString": "User User4" + }, + { + "field": "duedate", + "fieldtype": "jira", + "from": "2020-05-21", + "fromString": "2020-05-21 00:00:00.0", + "to": "2020-08-28", + "toString": "2020-08-28 00:00:00.0" + }, + { + "field": "labels", + "fieldtype": "jira", + "from": null, + "fromString": "fy20q4", + "to": null, + "toString": "fy20q4 fy21q1" + } + ] + }, + { + "id": "82031777", + "author": { + "self": "https://testcontext/rest/api/2/user?username=user2", + "name": "user2", + "key": "user2", + "emailAddress": "user2@linkedin.com", + "avatarUrls": { + "48x48": "https://testcontext/secure/useravatar?avatarId=10172", + "24x24": "https://testcontext/secure/useravatar?size=small&avatarId=10172", + "16x16": "https://testcontext/secure/useravatar?size=xsmall&avatarId=10172", + "32x32": "https://testcontext/secure/useravatar?size=medium&avatarId=10172" + }, + "displayName": "User User2", + "active": true, + "timeZone": "America/Los_Angeles" + }, + "created": "2020-08-15T01:39:05.000+0000", + "items": [ + { + "field": "Link", + "fieldtype": "jira", + "from": null, + "fromString": null, + "to": "SAMPLE-10113", + "toString": "This issue cloned to SAMPLE-10113" + }, + { + "field": "Risk_Factor", + "fieldtype": "custom", + "from": "32078", + "fromString": "Green", + "to": "32075", + "toString": "Yellow" + } + ] + } + ]} +} +''' diff --git a/forthic-py/tests/modules/test_v3_datasets_module.py b/forthic-py/tests/modules/test_v3_datasets_module.py new file mode 100644 index 0000000..315df03 --- /dev/null +++ b/forthic-py/tests/modules/test_v3_datasets_module.py @@ -0,0 +1,107 @@ +import os +import json +import unittest +from forthic.v3.interpreter import Interpreter +from forthic.v3.modules.datasets_module import DatasetsModule + +def get_data_dir(): + return f"{os.getcwd()}/tests/tests_py/v3/modules/datasets_data" + +def get_dataset_file(dataset_name): + return f"{get_data_dir()}/datasets/{dataset_name}.dataset" + +def load_dataset(dataset_name): + with open(get_dataset_file(dataset_name)) as f: + result = json.loads(f.read()) + return result + +def clear_dataset(dataset_name): + dataset_file = get_dataset_file(dataset_name) + if os.path.isfile(dataset_file): + os.remove(dataset_file) + +def get_interp(): + result = Interpreter() + result.register_module(DatasetsModule) + result.run(f"['datasets'] USE-MODULES '{get_data_dir()}' datasets.CWD!") + return result + + +class TestDatasetsModule(unittest.TestCase): + def setUp(self): + clear_dataset("greek") + self.interp = get_interp() + + def test_DATASET_bang(self): + # Test: Store data + dataset = { + "alpha": [1, 2, 3], + "beta": [4, 5, 6] + } + self.interp.stack_push(dataset) + self.interp.run("'greek' datasets.DATASET!") + + loaded_data = load_dataset("greek") + self.assertDictEqual(dataset, loaded_data) + + # Test: Add data to existing dataset + dataset = { + "gamma": [7, 8, 9] + } + self.interp.stack_push(dataset) + self.interp.run("'greek' datasets.DATASET!") + loaded_data = load_dataset("greek") + modified_dataset = { + "alpha": [1, 2, 3], + "beta": [4, 5, 6], + "gamma": [7, 8, 9] + } + self.assertDictEqual(modified_dataset, loaded_data) + + # Test: Ovewrite existing dataset + dataset = { + "delta": [10, 11, 12] + } + self.interp.stack_push(dataset) + self.interp.run("'greek' datasets.!OVERWRITE datasets.DATASET!") + loaded_data = load_dataset("greek") + new_dataset = { + "delta": [10, 11, 12] + } + self.assertDictEqual(new_dataset, loaded_data) + + def test_DATASET(self): + # Store dataset + dataset = { + "alpha": [1, 2, 3], + "beta": [4, 5, 6] + } + self.interp.stack_push(dataset) + self.interp.run("'greek' datasets.DATASET!") + + # Get dataset + self.interp.run("'greek' datasets.DATASET") + self.assertDictEqual(dataset, self.interp.stack[0]) + + def test_RECORDS(self): + # Store dataset + dataset = { + "alpha": [1, 2, 3], + "beta": [4, 5, 6] + } + self.interp.stack_push(dataset) + self.interp.run("'greek' datasets.DATASET!") + + # Get records + self.interp.run("'greek' ['beta' 'alpha'] datasets.RECORDS") + self.assertEqual([[4, 5, 6], [1, 2, 3]], self.interp.stack[-1]) + + # Get records with NULLs for missing keys + self.interp.run("'greek' ['beta' 'MISSING' 'alpha'] datasets.RECORDS") + self.assertEqual([[4, 5, 6], None, [1, 2, 3]], self.interp.stack[-1]) + + # Get records dropping NULLs for missing keys + self.interp.run("'greek' ['beta' 'MISSING' 'alpha'] datasets.!DROP-NULLS datasets.RECORDS") + self.assertEqual([[4, 5, 6], [1, 2, 3]], self.interp.stack[-1]) +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/forthic-py/tests/modules/test_v3_isoweek_module.py b/forthic-py/tests/modules/test_v3_isoweek_module.py new file mode 100644 index 0000000..7de5121 --- /dev/null +++ b/forthic-py/tests/modules/test_v3_isoweek_module.py @@ -0,0 +1,68 @@ +import unittest +from forthic.v3.interpreter import Interpreter +from forthic.v3.modules.isoweek_module import ISOWeekModule +import datetime + + +def get_interp(): + result = Interpreter() + result.register_module(ISOWeekModule) + result.run('["isoweek"] USE-MODULES') + return result + + +class TestISOWeekModule(unittest.TestCase): + def setUp(self): + self.interp = get_interp() + + def test_DATE_to_WEEK_NUM(self): + self.interp.run(""" + 2022-08-09 isoweek.WEEK-NUM + """) + self.assertEqual(32, self.interp.stack[0]) + + def test_QUARTER_START(self): + self.interp.run(""" + 2022-08-09 isoweek.QUARTER-START + 2022-07-04 isoweek.QUARTER-START + """) + self.assertEqual(datetime.date(2022, 7, 4), self.interp.stack[0]) + self.assertEqual(datetime.date(2022, 7, 4), self.interp.stack[1]) + + def test_QUARTER_END(self): + self.interp.run(""" + 2022-08-09 isoweek.QUARTER-END + 2022-10-02 isoweek.QUARTER-END + """) + self.assertEqual(datetime.date(2022, 10, 2), self.interp.stack[0]) + self.assertEqual(datetime.date(2022, 10, 2), self.interp.stack[1]) + + def test_QUARTER_slash_YEAR(self): + self.interp.run(""" + # Computes fiscal quarter for a company with a FY offset by 2 quarters + 2022-08-09 2 isoweek.QUARTER/YEAR + 2022-06-19 2 isoweek.QUARTER/YEAR + """) + self.assertEqual([1, 2023], self.interp.stack[0]) + self.assertEqual([4, 2022], self.interp.stack[1]) + + def test_QUARTER(self): + self.interp.run(""" + # Computes fiscal quarter for a company with a FY offset by 2 quarters + 2022-08-09 2 isoweek.QUARTER + 2022-06-19 2 isoweek.QUARTER + """) + self.assertEqual(1, self.interp.stack[0]) + self.assertEqual(4, self.interp.stack[1]) + + def test_YEAR(self): + self.interp.run(""" + # Computes fiscal quarter for a company with a FY offset by 2 quarters + 2022-08-09 2 isoweek.YEAR + 2022-06-19 2 isoweek.YEAR + """) + self.assertEqual(2023, self.interp.stack[0]) + self.assertEqual(2022, self.interp.stack[1]) + +if __name__ == '__main__': + unittest.main() diff --git a/forthic-py/tests/modules/test_v3_jira_module.py b/forthic-py/tests/modules/test_v3_jira_module.py new file mode 100644 index 0000000..6a54f95 --- /dev/null +++ b/forthic-py/tests/modules/test_v3_jira_module.py @@ -0,0 +1,229 @@ +import unittest +import datetime +from forthic.v3.interpreter import Interpreter +from forthic.v3.modules.jira_module import JiraModule +from tests.tests_py.v3.modules.jira_context import JiraTestContext + + +def get_interp(): + interp = Interpreter() + interp.register_module(JiraModule) + + # Set up Jira staging context + interp.run("['jira'] USE-MODULES") + interp.stack_push(JiraTestContext()) + interp.run("jira.PUSH-CONTEXT!") + return interp + + +class TestJiraModule(unittest.TestCase): + def setUp(self): + self.interp = get_interp() + + def test_HOST(self): + self.interp.run("jira.HOST") + self.assertEqual(self.interp.stack[0], "http://testcontext") + + def test_SEARCH(self): + self.interp.run(""" + : JQL ["assignee=testuser and resolution is null"] CONCAT; + : FIELDS ['Summary' 'Assignee']; + JQL FIELDS jira.SEARCH + """) + issues = self.interp.stack[0] + self.assertEqual(2, len(issues)) + self.assertEqual("SAMPLE-1234", issues[0]['key']) + self.assertEqual("testuser", issues[0]['Assignee']) + self.assertEqual("SAMPLE-1235", issues[1]['key']) + + def test_DEFAULT_SEARCH(self): + self.interp.run(""" + : JQL ["assignee=testuser and resolution is null"] CONCAT; + : FIELDS ['Summary' 'Assignee']; + JQL FIELDS jira.DEFAULT-SEARCH + """) + issues = self.interp.stack[0] + self.assertEqual(2, len(issues)) + self.assertEqual("SAMPLE-1234", issues[0]['key']) + self.assertEqual("testuser", issues[0]['Assignee']) + + def test_CREATE(self): + self.interp.run(""" + [ + ["Project" "SAMPLE"] + ["Summary" "A sample ticket"] + ["Reporter" "testuser"] + ["Issue Type" "Task"] + ] REC jira.CREATE + """) + self.assertEqual("SAMPLE-12345", self.interp.stack[0]) + + def test_UPDATE(self): + self.interp.run(""" + "SAMPLE-1234" [["Assignee" "testuser2"]] REC jira.UPDATE + """) + + def test_ADD_WATCHER(self): + self.interp.run(""" + "SAMPLE-1234" "manager1" jira.ADD-WATCHER + """) + + def test_LINK_ISSUES(self): + self.interp.run(""" + "SAMPLE-101" "SAMPLE-202" jira.DEPENDENCY jira.LINK-ISSUES + "SAMPLE-101" "SAMPLE-202" jira.ACTION-ITEM jira.LINK-ISSUES + "SAMPLE-101" "SAMPLE-202" jira.CLONERS jira.LINK-ISSUES + "SAMPLE-101" "SAMPLE-202" jira.DUPLICATE jira.LINK-ISSUES + "SAMPLE-101" "SAMPLE-202" jira.ISSUE-SPLIT jira.LINK-ISSUES + "SAMPLE-101" "SAMPLE-202" jira.RELATED jira.LINK-ISSUES + "SAMPLE-101" "SAMPLE-202" jira.REQUIRE jira.LINK-ISSUES + """) + + def test_VOTES(self): + self.interp.run(""" + "SAMPLE-101" jira.VOTES + """) + self.assertEqual(['user1', 'user2'], self.interp.stack[0]) + + def test_CHANGELOG(self): + self.interp.run(""" + "SAMPLE-101" ["Risk_Factor"] jira.CHANGELOG + """) + changes = self.interp.stack[0] + self.assertEqual(3, len(changes)) + + self.assertEqual('', changes[0]['from']) + self.assertEqual('Blue', changes[0]['to']) + + self.assertEqual('Blue', changes[1]['from']) + self.assertEqual('Green', changes[1]['to']) + + self.assertEqual('Green', changes[2]['from']) + self.assertEqual('Yellow', changes[2]['to']) + + def test_FIELD_AS_OF(self): + self.interp.run(""" + ["changes"] VARIABLES + "SAMPLE-101" ["Risk_Factor"] jira.CHANGELOG changes ! + 2020-07-25 changes @ "Risk_Factor" jira.FIELD-AS-OF + 2020-10-01 changes @ "Risk_Factor" jira.FIELD-AS-OF + """) + self.assertEqual("Green", self.interp.stack[0]) + self.assertEqual("Yellow", self.interp.stack[1]) + + def test_FIELD_CHANGE_AS_OF(self): + self.interp.run(""" + ["changes"] VARIABLES + "SAMPLE-101" ["Risk_Factor"] jira.CHANGELOG changes ! + 2020-07-25 changes @ "Risk_Factor" jira.FIELD-CHANGE-AS-OF 'date' REC@ DATE>STR + 2020-10-01 changes @ "Risk_Factor" jira.FIELD-CHANGE-AS-OF 'date' REC@ DATE>STR + """) + self.assertEqual("2020-07-25", self.interp.stack[0]) + self.assertEqual("2020-08-15", self.interp.stack[1]) + + def test_FIELD_AS_OF_SINCE(self): + self.interp.run(""" + ["changes"] VARIABLES + "SAMPLE-101" ["Risk_Factor"] jira.CHANGELOG changes ! + + # NOTE: Here is the changelog + # [{'date': datetime.datetime(2020, 7, 25, 1, 36, 24, tzinfo=tzutc()), 'field': 'Risk_Factor', 'from': '', 'to': 'Blue'}, + # {'date': datetime.datetime(2020, 7, 25, 1, 38, 46, tzinfo=tzutc()), 'field': 'Risk_Factor', + # 'from': 'Blue', 'to': 'Green', 'from_': '32078', 'to_': '32075'}, + # {'date': datetime.datetime(2020, 8, 15, 1, 39, 5, tzinfo=tzutc()), 'field': 'Risk_Factor', + # 'from': 'Green', 'to': 'Yellow', 'from_': '32078', 'to_': '32075'}] + + 2020-07-25 changes @ "Risk_Factor" 2020-07-01 jira.FIELD-AS-OF-SINCE + 2020-10-01 changes @ "Risk_Factor" 2020-07-01 jira.FIELD-AS-OF-SINCE + 2020-08-17 changes @ "Risk_Factor" 2020-08-01 jira.FIELD-AS-OF-SINCE + 2020-08-17 changes @ "Risk_Factor" 2020-08-16 jira.FIELD-AS-OF-SINCE + 2020-10-01 changes @ "Risk_Factor" 2020-09-01 jira.FIELD-AS-OF-SINCE + """) + self.assertEqual("Green", self.interp.stack[0]) + self.assertEqual("Yellow", self.interp.stack[1]) + self.assertEqual("Yellow", self.interp.stack[2]) + self.assertIsNone(self.interp.stack[3]) + self.assertIsNone(self.interp.stack[4]) + + def test_TIME_IN_STATE(self): + field = "status" + resolution = "Fixed" + + # NOTE: The following data would come from something like `'PROJ-1234' ['status'] jira.CHANGELOG` + changes = [ + {"date": datetime.datetime(2021, 7, 21, 1, 14, 57), "field": "status", "from": "", "to": "Open"}, + {"date": datetime.datetime(2021, 8, 23, 2, 56, 7), "field": "status", "from": "Open", "to": "Scoping", "from_": "1", "to_": "10128"}, + {"date": datetime.datetime(2021, 9, 27, 19, 53, 39), "field": "status", "from": "Scoping", "to": "In Development", "from_": "10128", "to_": "10194"}, + {"date": datetime.datetime(2021, 11, 4, 8, 36, 5), "field": "status", "from": "In Development", "to": "Closed", "from_": "10194", "to_": "6"} + ] + + # Make the call + self.interp.stack_push(resolution) + self.interp.stack_push(changes) + self.interp.stack_push(field) + self.interp.run("jira.TIME-IN-STATE") + + # Check the results + result = self.interp.stack_pop() + self.assertAlmostEqual(793, int(result['Open'])) + self.assertAlmostEqual(856, int(result['Scoping'])) + self.assertAlmostEqual(900, int(result['In Development'])) + self.assertAlmostEqual(0, int(result['Closed'])) + + def test_TIME_IN_STATE_timestamps(self): + field = "status" + resolution = "Fixed" + + # NOTE: The following data would come from something like `'PROJ-1234' ['status'] jira.CHANGELOG` + changes = [ + {"date": 1626830097, "field": "status", "from": "", "to": "Open"}, + {"date": 1629687367, "field": "status", "from": "Open", "to": "Scoping", "from_": "1", "to_": "10128"}, + {"date": 1632772419, "field": "status", "from": "Scoping", "to": "In Development", "from_": "10128", "to_": "10194"}, + {"date": 1636014965, "field": "status", "from": "In Development", "to": "Closed", "from_": "10194", "to_": "6"} + ] + + # Make the call + self.interp.stack_push(resolution) + self.interp.stack_push(changes) + self.interp.stack_push(field) + self.interp.run("jira.TIME-IN-STATE") + + # Check the results + result = self.interp.stack_pop() + self.assertAlmostEqual(793, int(result['Open'])) + self.assertAlmostEqual(856, int(result['Scoping'])) + self.assertAlmostEqual(900, int(result['In Development'])) + self.assertAlmostEqual(0, int(result['Closed'])) + + def test_FIELD_TAG(self): + self.interp.run(""" + ["ticket"] VARIABLES + [ + ["Description" "This is a sample description [objective: To make things awesome]"] + ] REC ticket ! + + ticket @ "Description" "objective" jira.FIELD-TAG + """) + self.assertEqual("To make things awesome", self.interp.stack[0]) + + def test_REMOVE_FIELD_TAGS(self): + self.interp.run(""" + "This is a sample description. [objective: To make things awesome] alpha [tag2: Something else] beta" jira.REMOVE-FIELD-TAGS + """) + self.assertEqual("This is a sample description. alpha beta", self.interp.stack[0]) + + def test_l_FIELD_TAG_bang(self): + self.interp.run(""" + ["ticket"] VARIABLES + [ + ["Description" "This is a sample description."] + ] REC ticket ! + + ticket @ "Description" "risk" "There isn't any risk!" jira.LEAD") + self.assertEqual('mgr1', self.interp.stack[0]) + + def test_MANAGER(self): + self.interp.run("'mgr1' org.MANAGER") + self.assertEqual('director1', self.interp.stack[0]) + + def test_CHAIN(self): + self.interp.run("'user201' 'vp1' org.CHAIN") + self.assertEqual(['vp1', 'director1', 'mgr2', 'user201'], self.interp.stack[0]) + + self.interp.run("'unknown' 'vp1' org.CHAIN") + self.assertEqual(['unknown'], self.interp.stack[1]) + + def test_CHAIN_KEY_FUNC(self): + self.interp.run("['user101' 'mgr1' 'user203' 'director1'] 'vp1' org.CHAIN-KEY-FUNC !COMPARATOR SORT") + self.assertEqual(['director1', 'mgr1', 'user101', 'user203'], self.interp.stack[0]) + + +def get_context(): + def get_users_managers(): + res = [ + ["user101", "mgr1"], + ["user102", "mgr1"], + ["user103", "mgr1"], + ["user201", "mgr2"], + ["user202", "mgr2"], + ["user203", "mgr2"], + ["mgr1", "director1"], + ["mgr2", "director1"], + ["director1", "vp1"] + ] + return res + + result = OrgContext(get_users_managers) + return result + + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/forthic-py/tests/modules/test_v3_trino_module.py b/forthic-py/tests/modules/test_v3_trino_module.py new file mode 100644 index 0000000..8966a8f --- /dev/null +++ b/forthic-py/tests/modules/test_v3_trino_module.py @@ -0,0 +1,32 @@ +import unittest +from forthic.v3.interpreter import Interpreter +from forthic.v3.modules.trino_module import TrinoModule +from tests.tests_py.v3.modules.trino_context import TrinoTestContext + + +def get_interp(): + interp = Interpreter() + interp.register_module(TrinoModule) + + # Set up Trino test context + interp.run("['trino'] USE-MODULES") + interp.stack_push(TrinoTestContext()) + interp.run("trino.PUSH-CONTEXT!") + return interp + + +class TestTrinoModule(unittest.TestCase): + def setUp(self): + self.interp = get_interp() + + def test_QUERY(self): + self.interp.run(""" + 'select * from metric_ds.dim_country_tests' trino.QUERY + """) + data = self.interp.stack[0] + self.assertEqual('alpha', data['test_key']['0']) + self.assertEqual('Tanzania', data['country']['0']) + + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/forthic-py/tests/modules/trino_context.py b/forthic-py/tests/modules/trino_context.py new file mode 100644 index 0000000..2963e95 --- /dev/null +++ b/forthic-py/tests/modules/trino_context.py @@ -0,0 +1,68 @@ +import json +from forthic.v3.modules.trino_module import TrinoContext + + +class TestDataFrame: + def __init__(self, json_data): + self.json_data = json_data + + def to_json(self): + return self.json_data + + +class TrinoTestContext(TrinoContext): + def connect(self): + pass + + def close(self): + pass + + def query(self, _): + result = TestDataFrame(QUERY_RESPONSE) + return result + + +QUERY_RESPONSE = ''' +{ + "test_key": { + "0": "alpha", + "1": "beta", + "2": "gamma" + }, + "priority": { + "0": 4, + "1": 3, + "2": 4 + }, + "name": { + "0": "Test1", + "1": "Test2", + "2": "Test3" + }, + "country": { + "0": "Tanzania", + "1": "Nicaragua", + "2": "Costa Rica" + }, + "metric": { + "0": "runners", + "1": "hikers", + "2": "runners" + }, + "sample": { + "0": 12345, + "1": 23456, + "2": 34567 + }, + "confidence": { + "0": 0.91, + "1": 0.92, + "2": 0.93 + }, + "owners": { + "0": "['user1', 'user2']", + "1": "['user2', 'user3']", + "2": "['user3', 'user4']" + } +} +''' diff --git a/forthic-py/tests/sample_date_module.py b/forthic-py/tests/sample_date_module.py new file mode 100644 index 0000000..0a643f0 --- /dev/null +++ b/forthic-py/tests/sample_date_module.py @@ -0,0 +1,15 @@ +from forthic.v3.module import Module +import datetime + + +class SampleDateModule(Module): + def __init__(self, interp): + super().__init__("date", interp) + self.add_module_word("TODAY", self.word_TODAY) + + # ( -- today ) + def word_TODAY(self, interp): + """Pushes today's date + """ + result = datetime.date.today() + interp.stack_push(result) diff --git a/forthic-py/tests/test_v3_global_module.py b/forthic-py/tests/test_v3_global_module.py new file mode 100644 index 0000000..3893b52 --- /dev/null +++ b/forthic-py/tests/test_v3_global_module.py @@ -0,0 +1,1866 @@ +import unittest +import datetime +import pytz +from forthic.v3.interpreter import Interpreter, UnknownWordError +from forthic.v3.tokenizer import DLE +from forthic.v3.global_module import GlobalModuleError + + + +class TestGlobalModule(unittest.TestCase): + def test_literal(self): + interp = Interpreter() + interp.run("TRUE FALSE 2 3.14 2020-06-05 9:00 11:30 PM 22:15 AM") + self.assertEqual(interp.stack[0], True) + self.assertEqual(interp.stack[1], False) + self.assertEqual(interp.stack[2], 2) + self.assertEqual(interp.stack[3], 3.14) + self.assertEqual(interp.stack[4], datetime.date(2020, 6, 5)) + self.assertEqual(interp.stack[5], datetime.time(9, 0)) + self.assertEqual(interp.stack[6], datetime.time(23, 30)) + self.assertEqual(interp.stack[7], datetime.time(10, 15)) + + def test_variables(self): + interp = Interpreter() + interp.run("['x' 'y'] VARIABLES") + variables = interp.app_module.variables + self.assertIsNotNone(variables.get('x')) + self.assertIsNotNone(variables.get('y')) + + def test_set_get_variables(self): + interp = Interpreter() + interp.run("['x'] VARIABLES") + interp.run("24 x !") + x_var = interp.app_module.variables['x'] + + self.assertEqual(x_var.get_value(), 24) + + interp.run("x @") + self.assertEqual(interp.stack[-1], 24) + + def test_bang_at(self): + interp = Interpreter() + interp.run("['x'] VARIABLES") + interp.run("24 x !@") + x_var = interp.app_module.variables['x'] + + self.assertEqual(x_var.get_value(), 24) + self.assertEqual(interp.stack[-1], 24) + + def test_interpret(self): + interp = Interpreter() + interp.run("'24' INTERPRET") + + self.assertEqual(interp.stack[-1], 24) + + interp.run("""'{module-A : MESSAGE "Hi" ;}' INTERPRET""") + interp.run("{module-A MESSAGE}") + self.assertEqual(interp.stack[-1], 'Hi') + + def test_memo(self): + interp = Interpreter() + interp.run(""" + ['count'] VARIABLES + 0 count ! + @: COUNT count @ 1 + count ! count @; + """) + + interp.run("COUNT") + self.assertEqual(interp.stack[-1], 1) + + interp.run("COUNT") + self.assertEqual(interp.stack[-1], 1) + + interp.run("COUNT! COUNT") + self.assertEqual(interp.stack[-1], 2) + self.assertEqual(len(interp.stack), 3) + + interp.run("COUNT!@") + self.assertEqual(interp.stack[-1], 3) + + def test_rec(self): + interp = Interpreter() + interp.run(""" + [ ["alpha" 2] ["beta" 3] ["gamma" 4] ] REC + """) + + self.assertEqual(len(interp.stack), 1) + + rec = interp.stack[-1] + self.assertEqual(rec["alpha"], 2) + self.assertEqual(rec["gamma"], 4) + + def test_rec_at(self): + interp = Interpreter() + interp.run(""" + [ ["alpha" 2] ["beta" 3] ["gamma" 4] ] REC + 'beta' REC@ + """) + self.assertEqual(len(interp.stack), 1) + self.assertEqual(interp.stack[0], 3) + + interp.run(""" + [10 20 30 40 50] 3 REC@ + """) + self.assertEqual(interp.stack[-1], 40) + + def test_nested_rec_at(self): + interp = Interpreter() + interp.run(""" + [ ["alpha" [["alpha1" 20]] REC] + ["beta" [["beta1" 30]] REC] + ] REC + ["beta" "beta1"] REC@ + """) + self.assertEqual(interp.stack[-1], 30) + + interp.run(""" + [ [] [] [[3]] ] + [2 0 0] REC@ + """) + self.assertEqual(interp.stack[-1], 3) + + interp.run(""" + [ ["alpha" [["alpha1" 20]] REC] + ["beta" [["beta1" [10 20 30]]] REC] + ] REC + ["beta" "beta1" 1] REC@ + """) + self.assertEqual(interp.stack[-1], 20) + + def test_l_rec_bang(self): + # Case: Set value on a record + interp = Interpreter() + interp.run(""" + [ ["alpha" 2] ["beta" 3] ["gamma" 4] ] REC + 700 'beta' JSON + # [ [ [4 6] ] [ [6 8] ] ] + ''') + array_json = interp.stack[-1] + self.assertEqual(array_json, '[[[{"m": 4}, {"m": 6}]], [[{"m": 6}, {"m": 8}]]]') + + def test_map_depth_over_array_of_maps(self): + interp = Interpreter() + interp.run(''' + : DEEP-LIST [ [ [2 3] ] [ [3 4] ] ]; + + DEEP-LIST "2 *" 2 !DEPTH MAP + ''') + array = interp.stack[-1] + self.assertEqual(array, [ [ [4, 6] ], [ [6, 8] ] ]) + + def test_map_depth_w_error(self): + interp = Interpreter() + interp.run(''' + : k1-REC [ + ["l1" [["m" 2]] REC] + ["l2" [["m" 3]] REC] + ] REC; + + : k2-REC [ + ["l1" [["m" 'GARBAGE']] REC] + ["l2" [["m" 4]] REC] + ] REC; + + : DEEP-RECORD [ + ["k1" k1-REC] + ["k2" k2-REC] + ] REC; + + DEEP-RECORD ">STR INTERPRET" 2 !DEPTH !PUSH-ERROR MAP + # {'k1': {'l1': {'m': 2}, 'l2': {'m': 3}}, 'k2': {'l1': {'m': None}, 'l2': {'m': 4}}} + ''') + errors = interp.stack[-1] + record = interp.stack[-2] + self.assertDictEqual(record, {'k1': {'l1': {'m': 2}, 'l2': {'m': 3}}, 'k2': {'l1': {'m': None}, 'l2': {'m': 4}}}) + self.assertEqual([str(e) for e in errors], ['None', 'None', "Unknown word: 'GARBAGE'", 'None']) + + def test_map_w_key(self): + interp = Interpreter() + interp.run(""" + [1 2 3 4 5] '+ 2 *' !WITH-KEY MAP + """) + array = interp.stack[0] + self.assertEqual(array, [2, 6, 10, 14, 18]) + + # Test mapping over a record + interp = Interpreter() + + # First, set up the record + records = self.make_records() + by_key = {} + for rec in records: + by_key[rec["key"]] = rec + interp.stack_push(by_key) + + interp.run(""" + ["k" "v"] VARIABLES + "v ! k ! k @ >STR v @ 'status' REC@ CONCAT" !WITH-KEY MAP + """) + record = interp.stack[0] + self.assertEqual(record[100], "100OPEN") + self.assertEqual(record[102], "102IN PROGRESS") + self.assertEqual(record[106], "106CLOSED") + + def test_foreach(self): + interp = Interpreter() + interp.run(""" + 0 [1 2 3 4 5] '+' FOREACH + """) + sum = interp.stack[0] + self.assertEqual(sum, 15) + + # Test grouping a record + interp = Interpreter() + + # First, set up the record + records = self.make_records() + by_key = {} + for rec in records: + by_key[rec["key"]] = rec + interp.stack_push(by_key) + + interp.run(""" + "" SWAP "'status' REC@ CONCAT" FOREACH + """) + string = interp.stack[0] + self.assertEqual(string, "OPENOPENIN PROGRESSCLOSEDIN PROGRESSOPENCLOSED") + + def test_foreach_w_key(self): + interp = Interpreter() + interp.run(""" + 0 [1 2 3 4 5] '+ +' !WITH-KEY FOREACH + """) + sum = interp.stack[0] + self.assertEqual(sum, 25) + + # Test grouping a record + interp = Interpreter() + + # First, set up the record + records = self.make_records() + by_key = {} + for rec in records: + by_key[rec["key"]] = rec + interp.stack_push(by_key) + + interp.run(""" + "" SWAP "'status' REC@ CONCAT CONCAT" !WITH-KEY FOREACH + """) + string = interp.stack[0] + self.assertEqual(string, "100OPEN101OPEN102IN PROGRESS103CLOSED104IN PROGRESS105OPEN106CLOSED") + + def test_foreach_to_errors(self): + interp = Interpreter() + interp.run(""" + ['2' '3' 'GARBAGE' '+'] 'INTERPRET' !PUSH-ERROR FOREACH + """) + errors = interp.stack[-1] + self.assertIsNone(errors[0]) + self.assertIsNone(errors[1]) + self.assertIsNotNone(errors[2]) + self.assertIsNone(errors[3]) + res = interp.stack[-2] + self.assertEqual(res, 5) + + def test_invert_keys(self): + interp = Interpreter() + status_to_manager_to_ids = self.make_status_to_manager_to_ids() + interp.stack_push(status_to_manager_to_ids) + interp.run("INVERT-KEYS") + res = interp.stack_pop() + expected = { + "manager1": { + "open": [101, 102], + "closed": [10, 11] + }, + "manager2": { + "open": [103], + "closed": [12, 13] + }, + "manager3": { + "blocked": [104] + } + } + + self.assertEqual(res, expected) + + def test_zip(self): + interp = Interpreter() + interp.run(""" + ['a' 'b'] [1 2] ZIP + """) + array = interp.stack[0] + self.assertEqual(array[0], ['a', 1]) + self.assertEqual(array[1], ['b', 2]) + + # Zip a record + interp = Interpreter() + + # First, set up the record + interp.run(""" + [['a' 100] ['b' 200] ['z' 300]] REC [['a' 'Hi'] ['b' 'Bye'] ['c' '?']] REC ZIP + """) + record = interp.stack[0] + self.assertEqual(sorted(record.keys()), ['a', 'b', 'z']) + self.assertEqual(record['a'], [100, 'Hi']) + self.assertEqual(record['b'], [200, 'Bye']) + self.assertEqual(record['z'], [300, None]) + + def test_zip_with(self): + interp = Interpreter() + interp.run(""" + [10 20] [1 2] "+" ZIP-WITH + """) + array = interp.stack[0] + self.assertEqual(array[0], 11) + self.assertEqual(array[1], 22) + + # Zip a record + interp = Interpreter() + + # First, set up the record + interp.run(""" + [['a' 1] ['b' 2]] REC [['a' 10] ['b' 20]] REC "+" ZIP-WITH + """) + record = interp.stack[0] + self.assertEqual(sorted(record.keys()), ['a', 'b']) + self.assertEqual(record['a'], 11) + self.assertEqual(record['b'], 22) + + def test_keys(self): + interp = Interpreter() + interp.run(""" + ['a' 'b' 'c'] KEYS + """) + array = interp.stack[0] + self.assertEqual(array, [0, 1, 2]) + + # Test record + interp = Interpreter() + + # First, set up the record + interp.run(""" + [['a' 1] ['b' 2]] REC KEYS + """) + array = interp.stack[0] + self.assertEqual(sorted(array), ['a', 'b']) + + def test_values(self): + interp = Interpreter() + interp.run(""" + ['a' 'b' 'c'] VALUES + """) + array = interp.stack[0] + self.assertEqual(array, ['a', 'b', 'c']) + + # Test record + interp = Interpreter() + + # First, set up the record + interp.run(""" + [['a' 1] ['b' 2]] REC VALUES + """) + array = interp.stack[0] + self.assertEqual(sorted(array), [1, 2]) + + def test_length(self): + interp = Interpreter() + interp.run(""" + ['a' 'b' 'c'] LENGTH + "Howdy" LENGTH + """) + self.assertEqual(interp.stack[0], 3) + self.assertEqual(interp.stack[1], 5) + + # Test record + interp = Interpreter() + + interp.run(""" + [['a' 1] ['b' 2]] REC LENGTH + """) + length = interp.stack[0] + self.assertEqual(length, 2) + + def test_RANGE(self): + interp = Interpreter() + interp.run(""" + : EVEN? 2 MOD 0 ==; + : ODD? 2 MOD 1 ==; + [1 2 3 4 5] "EVEN?" "ODD?" RANGE + """) + self.assertEqual(interp.stack[0], [1, 2]) + + # Test record + interp = Interpreter() + + interp.run(""" + [['a' 1] ['b' 2]] REC LENGTH + """) + length = interp.stack[0] + self.assertEqual(length, 2) + + def test_slice(self): + interp = Interpreter() + interp.run(""" + ['x'] VARIABLES + ['a' 'b' 'c' 'd' 'e' 'f' 'g'] x ! + x @ 0 2 SLICE + x @ 1 3 SLICE + x @ 5 3 SLICE + x @ -1 -2 SLICE + x @ 4 -2 SLICE + x @ 5 8 SLICE + """) + stack = interp.stack + self.assertEqual(stack[0], ['a', 'b', 'c']) + self.assertEqual(stack[1], ['b', 'c', 'd']) + self.assertEqual(stack[2], ['f', 'e', 'd']) + self.assertEqual(stack[3], ['g', 'f']) + self.assertEqual(stack[4], ['e', 'f']) + self.assertEqual(stack[5], ['f', 'g', None, None]) + + # Slice records + interp = Interpreter() + interp.run(""" + ['x'] VARIABLES + [['a' 1] ['b' 2] ['c' 3]] REC x ! + x @ 0 1 SLICE + x @ -1 -2 SLICE + x @ 5 7 SLICE + """) + stack = interp.stack + self.assertEqual(sorted(list(stack[0].keys())), ['a', 'b']) + self.assertEqual(sorted(list(stack[1].keys())), ['b', 'c']) + self.assertEqual(stack[2], {}) + + def test_difference(self): + interp = Interpreter() + interp.run(""" + ['x' 'y'] VARIABLES + ['a' 'b' 'c'] x ! + ['a' 'c' 'd'] y ! + x @ y @ DIFFERENCE + y @ x @ DIFFERENCE + """) + stack = interp.stack + self.assertEqual(stack[0], ['b']) + self.assertEqual(stack[1], ['d']) + + # Records + interp = Interpreter() + interp.run(""" + ['x' 'y'] VARIABLES + [['a' 1] ['b' 2] ['c' 3]] REC x ! + [['a' 20] ['c' 40] ['d' 10]] REC y ! + x @ y @ DIFFERENCE + y @ x @ DIFFERENCE + """) + stack = interp.stack + self.assertEqual(list(stack[0].keys()), ['b']) + self.assertEqual(list(stack[0].values()), [2]) + self.assertEqual(list(stack[1].keys()), ['d']) + self.assertEqual(list(stack[1].values()), [10]) + + def test_intersection(self): + interp = Interpreter() + interp.run(""" + ['x' 'y'] VARIABLES + ['a' 'b' 'c'] x ! + ['a' 'c' 'd'] y ! + x @ y @ INTERSECTION + """) + stack = interp.stack + self.assertEqual(sorted(stack[0]), ['a', 'c']) + + # Records + interp = Interpreter() + interp.run(""" + ['x' 'y'] VARIABLES + [['a' 1] ['b' 2] ['f' 3]] REC x ! + [['a' 20] ['c' 40] ['d' 10]] REC y ! + x @ y @ INTERSECTION + """) + stack = interp.stack + self.assertEqual(list(stack[0].keys()), ['a']) + self.assertEqual(list(stack[0].values()), [1]) + + def test_UNION(self): + interp = Interpreter() + interp.run(""" + ['x' 'y'] VARIABLES + ['a' 'b' 'c'] x ! + ['a' 'c' 'd'] y ! + x @ y @ UNION + """) + stack = interp.stack + self.assertEqual(sorted(stack[0]), ['a', 'b', 'c', 'd']) + + # Records + interp = Interpreter() + interp.run(""" + ['x' 'y'] VARIABLES + [['a' 1] ['b' 2] ['f' 3]] REC x ! + [['a' 20] ['c' 40] ['d' 10]] REC y ! + x @ y @ UNION + """) + stack = interp.stack + self.assertEqual(sorted(list(stack[0].keys())), ['a', 'b', 'c', 'd', 'f']) + self.assertEqual(sorted(list(stack[0].values())), [1, 2, 3, 10, 40]) + + def test_select(self): + interp = Interpreter() + interp.run(""" + [0 1 2 3 4 5 6] "2 MOD 1 ==" SELECT + """) + stack = interp.stack + self.assertEqual(stack[0], [1, 3, 5]) + + # Slice records + interp = Interpreter() + interp.run(""" + [['a' 1] ['b' 2] ['c' 3]] REC "2 MOD 0 ==" SELECT + """) + stack = interp.stack + self.assertEqual(list(stack[0].keys()), ['b']) + self.assertEqual(list(stack[0].values()), [2]) + + def test_select_w_key(self): + interp = Interpreter() + interp.run(""" + [0 1 2 3 4 5 6] "+ 3 MOD 1 ==" !WITH-KEY SELECT + """) + stack = interp.stack + self.assertEqual(stack[0], [2, 5]) + + # Slice records + interp = Interpreter() + interp.run(""" + [['a' 1] ['b' 2] ['c' 3]] REC "CONCAT 'c3' ==" !WITH-KEY SELECT + """) + stack = interp.stack + self.assertEqual(list(stack[0].keys()), ['c']) + self.assertEqual(list(stack[0].values()), [3]) + + def test_take(self): + interp = Interpreter() + interp.run(""" + [0 1 2 3 4 5 6] 3 TAKE + """) + stack = interp.stack + self.assertEqual(stack[0], [0, 1, 2]) + + # Take records + interp = Interpreter() + interp.run(""" + [['a' 1] ['b' 2] ['c' 3]] REC 2 TAKE + """) + stack = interp.stack + self.assertEqual(len(stack[0]), 2) + + def test_take_with_rest(self): + interp = Interpreter() + interp.run(""" + [0 1 2 3 4 5 6] 3 !PUSH-REST TAKE + """) + stack = interp.stack + self.assertEqual(stack[0], [0, 1, 2]) + self.assertEqual(stack[1], [3, 4, 5, 6]) + + # Take records + interp = Interpreter() + interp.run(""" + [['a' 1] ['b' 2] ['c' 3]] REC 2 !PUSH-REST TAKE + """) + stack = interp.stack + self.assertEqual(len(stack[0]), 2) + self.assertEqual(len(stack[1]), 1) + + + def test_drop(self): + interp = Interpreter() + interp.run(""" + [0 1 2 3 4 5 6] 4 DROP + """) + stack = interp.stack + self.assertEqual(stack[0], [4, 5, 6]) + + # Drop records + interp = Interpreter() + interp.run(""" + [['a' 1] ['b' 2] ['c' 3]] REC 2 DROP + """) + stack = interp.stack + self.assertEqual(len(stack[0]), 1) + + def test_rotate(self): + interp = Interpreter() + interp.run(""" + ['a' 'b' 'c' 'd'] ROTATE + ['b'] ROTATE + [] ROTATE + """) + stack = interp.stack + self.assertEqual(stack[0], ['d', 'a', 'b', 'c']) + self.assertEqual(stack[1], ['b']) + self.assertEqual(stack[2], []) + + def test_array_q(self): + interp = Interpreter() + interp.run(""" + ['a' 'b' 'c' 'd'] ARRAY? + 'b' ARRAY? + 0 ARRAY? + """) + stack = interp.stack + self.assertEqual(stack[0], True) + self.assertEqual(stack[1], False) + self.assertEqual(stack[2], False) + + def test_shuffle(self): + interp = Interpreter() + interp.run(""" + [0 1 2 3 4 5 6] SHUFFLE + """) + stack = interp.stack + self.assertEqual(len(stack[0]), 7) + + # Shuffle record (no-op) + interp = Interpreter() + interp.run(""" + [['a' 1] ['b' 2] ['c' 3]] REC SHUFFLE + """) + stack = interp.stack + self.assertEqual(len(stack[0]), 3) + + def test_sort(self): + interp = Interpreter() + interp.run(""" + [2 8 1 4 7 3] SORT + """) + stack = interp.stack + self.assertEqual(stack[0], [1, 2, 3, 4, 7, 8]) + + # Sort record + interp = Interpreter() + interp.run(""" + [['a' 3] ['b' 1] ['c' 2]] REC SORT + """) + stack = interp.stack + self.assertEqual(len(stack[0]), 3) + self.assertEqual(list(stack[0].keys()), ['b', 'c', 'a']) + + def test_sort_with_null(self): + interp = Interpreter() + interp.run(""" + [2 8 1 NULL 4 7 NULL 3] SORT + """) + stack = interp.stack + self.assertEqual(stack[0], [1, 2, 3, 4, 7, 8, None, None]) + + def test_sort_w_forthic(self): + interp = Interpreter() + interp.run(""" + [2 8 1 4 7 3] "-1 *" !COMPARATOR SORT + """) + stack = interp.stack + self.assertEqual(stack[0], [8, 7, 4, 3, 2, 1]) + + # Sort record (no-op) + interp = Interpreter() + interp.run(""" + [['a' 1] ['b' 2] ['c' 3]] REC "-1 *" !COMPARATOR SORT + """) + stack = interp.stack + self.assertEqual(len(stack[0]), 3) + self.assertEqual(list(stack[0].keys()), ['c', 'b', 'a']) + + def test_sort_w_key_func(self): + interp = Interpreter() + interp.stack_push(self.make_records()) + interp.run(""" + 'status' FIELD-KEY-FUNC !COMPARATOR SORT + """) + stack = interp.stack + self.assertEqual(stack[0][0]["status"], "CLOSED") + self.assertEqual(stack[0][1]["status"], "CLOSED") + self.assertEqual(stack[0][2]["status"], "IN PROGRESS") + self.assertEqual(stack[0][3]["status"], "IN PROGRESS") + self.assertEqual(stack[0][4]["status"], "OPEN") + self.assertEqual(stack[0][5]["status"], "OPEN") + self.assertEqual(stack[0][6]["status"], "OPEN") + + # Sort record (no-op) + interp = Interpreter() + interp.run(""" + [['a' 1] ['b' 2] ['c' 3]] NULL !COMPARATOR SORT + """) + stack = interp.stack + self.assertEqual(len(stack[0]), 3) + + def test_nth(self): + interp = Interpreter() + interp.run(""" + ["x"] VARIABLES + [0 1 2 3 4 5 6] x ! + x @ 0 NTH + x @ 5 NTH + x @ 55 NTH + """) + stack = interp.stack + self.assertEqual(stack[0], 0) + self.assertEqual(stack[1], 5) + self.assertIsNone(stack[2]) + + # For record + interp = Interpreter() + interp.run(""" + ["x"] VARIABLES + [['a' 1] ['b' 2] ['c' 3]] REC x ! + x @ 0 NTH + x @ 2 NTH + x @ 55 NTH + """) + stack = interp.stack + self.assertEqual(stack[0], 1) + self.assertEqual(stack[1], 3) + self.assertIsNone(stack[2]) + + def test_last(self): + interp = Interpreter() + interp.run(""" + [0 1 2 3 4 5 6] LAST + """) + stack = interp.stack + self.assertEqual(stack[0], 6) + + # For record + interp = Interpreter() + interp.run(""" + [['a' 1] ['b' 2] ['c' 3]] REC LAST + """) + stack = interp.stack + self.assertEqual(stack[0], 3) + + def test_unpack(self): + interp = Interpreter() + interp.run(""" + [0 1 2] UNPACK + """) + stack = interp.stack + self.assertEqual(stack[0], 0) + self.assertEqual(stack[1], 1) + self.assertEqual(stack[2], 2) + + # For record + interp = Interpreter() + interp.run(""" + [['a' 1] ['b' 2] ['c' 3]] REC UNPACK + """) + stack = interp.stack + self.assertEqual(stack[0], 1) + self.assertEqual(stack[1], 2) + self.assertEqual(stack[2], 3) + + def test_FLATTEN(self): + interp = Interpreter() + interp.run(""" + [0 [1 2 [3 [4]] ]] FLATTEN + """) + stack = interp.stack + self.assertEqual(stack[0], [0, 1, 2, 3, 4]) + + # For record + interp = Interpreter() + interp.run(""" + ['uno' 'alpha'] VARIABLES + [['uno' 4] ['duo' 8]] REC uno ! + [['alpha' uno @]] REC alpha ! + [['a' 1] ['b' alpha @] ['c' 3]] REC FLATTEN + """) + stack = interp.stack + record = stack[0] + self.assertEqual(sorted(list(record.keys())), ['a', 'b.alpha.duo', 'b.alpha.uno', 'c']) + + def test_FLATTEN_depth(self): + interp = Interpreter() + interp.run(""" + [ [ [0 1] [2 3] ] + [ [4 5] ] ] 1 !DEPTH FLATTEN + """) + array = interp.stack[-1] + self.assertEqual(array, [[0, 1], [2, 3], [4, 5]]) + + interp.run(""" + [ [ [0 1] [2 3] ] + [ [4 5] ] ] 0 !DEPTH FLATTEN + """) + array = interp.stack[-1] + self.assertEqual(array, [[[0, 1] , [2, 3]], [[4, 5]]]) + + interp.run(""" + [ [ [0 1] [2 3] ] + [ [4 5] ] ] 2 !DEPTH FLATTEN + """) + array = interp.stack[-1] + self.assertEqual(array, [0, 1, 2, 3, 4, 5]) + return + + def test_FLATTEN_one_level_record(self): + interp = Interpreter() + interp.run(""" + ['uno' 'alpha'] VARIABLES + [['uno' 4] ['duo' 8]] REC uno ! + [['alpha' uno @]] REC alpha ! + [['a' 1] ['b' alpha @] ['c' 3]] REC 1 !DEPTH FLATTEN + """) + record = interp.stack[0] + self.assertEqual(sorted(record.keys()), ['a', 'b.alpha', 'c']) + return + + + def test_key_of(self): + interp = Interpreter() + interp.run(""" + ['x'] VARIABLES + ['a' 'b' 'c' 'd'] x ! + x @ 'c' KEY-OF + x @ 'z' KEY-OF + """) + stack = interp.stack + self.assertEqual(stack[0], 2) + self.assertIsNone(stack[1]) + + # For record + interp = Interpreter() + interp.run(""" + [['a' 1] ['b' 2] ['c' 3]] REC 2 KEY-OF + """) + stack = interp.stack + self.assertEqual(stack[0], 'b') + + def test_reduce(self): + interp = Interpreter() + interp.run(""" + [1 2 3 4 5] 10 "+" REDUCE + """) + stack = interp.stack + self.assertEqual(stack[0], 25) + + # For record + interp = Interpreter() + interp.run(""" + [['a' 1] ['b' 2] ['c' 3]] REC 20 "+" REDUCE + """) + stack = interp.stack + self.assertEqual(stack[0], 26) + + def test_cumulative_dist(self): + def get_sample_records(): + res = [] + for i in range(20): + res.append({"x": i}) + + # Add records with no "x" field + res.append({}) + res.append({}) + return res + + # Inputs + sample_records = get_sample_records() + field = "x" + breakpoints = [5, 10, 20] + + # --------------------------------------- + # Normal case + interp = Interpreter() + interp.stack_push(sample_records) + interp.stack_push(field) + interp.stack_push(breakpoints) + interp.run("CUMULATIVE-DIST") + result = interp.stack_pop() + + # Should get inputs back + self.assertEqual(result.get("records"), sample_records) + self.assertEqual(result.get("field"), field) + self.assertEqual(result.get("breakpoints"), breakpoints) + + # Record breakpoint indexes should be correct + record_breakpoint_indexes = result.get("record_breakpoint_indexes") + self.assertEqual(0, record_breakpoint_indexes[0]) + self.assertEqual(0, record_breakpoint_indexes[5]) + self.assertEqual(1, record_breakpoint_indexes[6]) + self.assertEqual(1, record_breakpoint_indexes[10]) + self.assertEqual(2, record_breakpoint_indexes[11]) + self.assertEqual(2, record_breakpoint_indexes[19]) + self.assertEqual(1003, record_breakpoint_indexes[20]) # Have x being NULL + self.assertEqual(1003, record_breakpoint_indexes[21]) # Have x being NULL + + # Breakpoint counts should be correct + breakpoint_counts = result.get("breakpoint_counts") + self.assertEqual(6, breakpoint_counts[0]) + self.assertEqual(11, breakpoint_counts[1]) + self.assertEqual(20, breakpoint_counts[2]) + + # --------------------------------------- + # Empty records + interp = Interpreter() + interp.stack_push([]) + interp.stack_push(field) + interp.stack_push(breakpoints) + interp.run("CUMULATIVE-DIST") + result = interp.stack_pop() + self.assertEqual([], result.get("record_breakpoint_indexes")) + self.assertEqual([0, 0, 0], result.get("breakpoint_counts")) + + # --------------------------------------- + # Incorrect field + interp = Interpreter() + interp.stack_push(sample_records) + interp.stack_push("bad_field") + interp.stack_push(breakpoints) + interp.run("CUMULATIVE-DIST") + result = interp.stack_pop() + self.assertEqual([1003] * 22, result.get("record_breakpoint_indexes")) + self.assertEqual([0, 0, 0], result.get("breakpoint_counts")) + + return + + def test_pop(self): + interp = Interpreter() + interp.run(""" + 1 2 3 4 5 POP + """) + stack = interp.stack + self.assertEqual(len(stack), 4) + self.assertEqual(stack[-1], 4) + + def test_dup(self): + interp = Interpreter() + interp.run(""" + 5 DUP + """) + stack = interp.stack + self.assertEqual(len(stack), 2) + self.assertEqual(stack[0], 5) + self.assertEqual(stack[1], 5) + + def test_swap(self): + interp = Interpreter() + interp.run(""" + 6 8 SWAP + """) + stack = interp.stack + self.assertEqual(len(stack), 2) + self.assertEqual(stack[0], 8) + self.assertEqual(stack[1], 6) + + def test_split(self): + interp = Interpreter() + interp.run(""" + 'Now is the time' ' ' SPLIT + """) + stack = interp.stack + self.assertEqual(len(stack), 1) + self.assertEqual(stack[0], ["Now", "is", "the", "time"]) + + def test_join(self): + interp = Interpreter() + interp.run(""" + ["Now" "is" "the" "time"] "--" JOIN + """) + stack = interp.stack + self.assertEqual(len(stack), 1) + self.assertEqual(stack[0], "Now--is--the--time") + + def test_special_chars(self): + interp = Interpreter() + interp.run(""" + /R /N /T + """) + stack = interp.stack + self.assertEqual(stack[0], "\r") + self.assertEqual(stack[1], "\n") + self.assertEqual(stack[2], "\t") + + def test_LOWERCASE(self): + interp = Interpreter() + interp.run(""" + "HOWDY, Everyone!" LOWERCASE + """) + stack = interp.stack + self.assertEqual(stack[0], "howdy, everyone!") + + def test_ascii(self): + interp = Interpreter() + interp.run(""" + "“HOWDY, Everyone!”" ASCII + """) + stack = interp.stack + self.assertEqual(stack[0], "HOWDY, Everyone!") + + def test_strip(self): + interp = Interpreter() + interp.run(""" + " howdy " STRIP + """) + stack = interp.stack + self.assertEqual(stack[0], "howdy") + + def test_replace(self): + interp = Interpreter() + interp.run(""" + "1-40 2-20" "-" "." REPLACE + """) + stack = interp.stack + self.assertEqual(stack[0], "1.40 2.20") + + def test_re_replace(self): + interp = Interpreter() + interp.run(r""" + "Howdy https://www.linkedin.com" "(https?://\S+)" "=HYPERLINK('\1', '\1')" RE-REPLACE + """) + stack = interp.stack + self.assertEqual(stack[0], "Howdy =HYPERLINK('https://www.linkedin.com', 'https://www.linkedin.com')") + + def test_match(self): + interp = Interpreter() + interp.run(r""" + "123message456" "\d{3}.*\d{3}" RE-MATCH + """) + stack = interp.stack + self.assertTrue(stack[0]) + + def test_match_group(self): + interp = Interpreter() + interp.run(r""" + "123message456" "\d{3}(.*)\d{3}" RE-MATCH 1 RE-MATCH-GROUP + """) + stack = interp.stack + self.assertEqual(stack[0], "message") + + def test_match_all(self): + interp = Interpreter() + interp.run(""" + "mr-android ios my-android web test-web" ".*?(android|ios|web|seo)" RE-MATCH-ALL + """) + stack = interp.stack + self.assertEqual(stack[0], ['android', 'ios', 'android', 'web', 'web']) + + def test_URL_ENCODE(self): + interp = Interpreter() + interp.run(""" + "now/is the time" URL-ENCODE + """) + stack = interp.stack + self.assertEqual(stack[0], "now%2Fis+the+time") + + def test_URL_DECODE(self): + interp = Interpreter() + interp.run(""" + "now%2Fis%20the%20time" URL-DECODE + """) + stack = interp.stack + self.assertEqual(stack[0], "now/is the time") + + def test_default(self): + interp = Interpreter() + interp.run(""" + NULL 22.4 DEFAULT + 0 22.4 DEFAULT + "" "Howdy" DEFAULT + """) + stack = interp.stack + self.assertEqual(stack[0], 22.4) + self.assertEqual(stack[1], 0) + self.assertEqual(stack[2], "Howdy") + + def test_star_DEFAULT(self): + interp = Interpreter() + interp.run(""" + NULL "3.1 5 +" *DEFAULT + 0 "22.4" *DEFAULT + "" "['Howdy, ' 'Everyone!'] CONCAT" *DEFAULT + """) + stack = interp.stack + self.assertAlmostEqual(stack[0], 8.1) + self.assertEqual(stack[1], 0) + self.assertEqual(stack[2], "Howdy, Everyone!") + + def test_l_repeat(self): + interp = Interpreter() + interp.run(""" + [0 "1 +" 6 FIXED + """) + stack = interp.stack + self.assertEqual(stack[0], "3.14") + + def test_to_json(self): + interp = Interpreter() + interp.run(""" + [["a" 1] ["b" 2]] REC >JSON + """) + stack = interp.stack + self.assertEqual(stack[0], '{"a": 1, "b": 2}') + + def test_json_to(self): + interp = Interpreter() + interp.run(""" + '{"a": 1, "b": 2}' JSON> + """) + stack = interp.stack + self.assertEqual(sorted(stack[0].keys()), ['a', 'b']) + self.assertEqual(stack[0]['a'], 1) + self.assertEqual(stack[0]['b'], 2) + + def test_to_tsv(self): + interp = Interpreter() + interp.run(""" + [['alpha' 'beta' 'gamma'] [1 2 3]] >TSV + [['a\t1' 'b\t2' 'c\n3'] [4 5 6]] >TSV + """) + stack = interp.stack + self.assertEqual(stack[0], "alpha\tbeta\tgamma\r\n1\t2\t3\r\n") + self.assertEqual(stack[1], '"a\t1"\t"b\t2"\t"c\n3"\r\n4\t5\t6\r\n') + + def test_tsv_to(self): + interp = Interpreter() + interp.run(""" + "alpha\tbeta\tgamma\r\n1\t2\t3\r\n" TSV> + """) + stack = interp.stack + self.assertEqual(stack[0], [['alpha', 'beta', 'gamma'], ['1', '2', '3']]) + + def test_recs_to_tsv(self): + interp = Interpreter() + interp.run(""" + [ + ['alpha' 'beta' 'gamma'] [1 2 3] ZIP REC + ['alpha' 'beta' 'gamma'] [2 4 6] ZIP REC + ] ['alpha' 'gamma'] RECS>TSV + """) + stack = interp.stack + self.assertEqual(stack[0], "alpha\tgamma\r\n1\t3\r\n2\t6\r\n") + + def test_tsv_to_recs(self): + interp = Interpreter() + interp.run(""" + "alpha\tgamma\r\n1\t3\r\n2\t6\r\n" TSV>RECS + """) + stack = interp.stack + self.assertEqual(sorted(stack[0][0].keys()), ['alpha', 'gamma']) + self.assertEqual(sorted(stack[0][1].keys()), ['alpha', 'gamma']) + self.assertEqual(stack[0][0]['alpha'], '1') + self.assertEqual(stack[0][0]['gamma'], '3') + self.assertEqual(stack[0][1]['alpha'], '2') + self.assertEqual(stack[0][1]['gamma'], '6') + + def test_now(self): + now = datetime.datetime.now() + interp = Interpreter() + interp.run("NOW") + stack = interp.stack + self.assertEqual(stack[0].hour, now.hour) + self.assertEqual(stack[0].minute, now.minute) + + def test_to_time(self): + interp = Interpreter() + interp.run("'10:52 PM' >TIME") + stack = interp.stack + self.assertEqual(stack[0].hour, 22) + self.assertEqual(stack[0].minute, 52) + + def test_l_tz_bang(self): + interp = Interpreter() + interp.run("'10:52 PM' >TIME 'US/Eastern' TIME 'US/Eastern' STR + """) + stack = interp.stack + self.assertEqual(stack[0], "07:52") + + def test_to_date(self): + interp = Interpreter() + interp.run(""" + "Oct 21, 2020" >DATE + """) + stack = interp.stack + self.assertEqual(stack[0].year, 2020) + self.assertEqual(stack[0].month, 10) + self.assertEqual(stack[0].day, 21) + + def test_today(self): + interp = Interpreter() + interp.run(""" + TODAY + """) + today = datetime.date.today() + stack = interp.stack + self.assertEqual(stack[0].year, today.year) + self.assertEqual(stack[0].month, today.month) + self.assertEqual(stack[0].day, today.day) + + def test_days_of_week(self): + today = datetime.date.today() + interp = Interpreter() + interp.run(""" + MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY SUNDAY + """) + stack = interp.stack + self.assertTrue(stack[0] <= today) + self.assertTrue(stack[6] >= today) + + def test_add_days(self): + interp = Interpreter() + interp.run(""" + 2020-10-21 12 ADD-DAYS + """) + stack = interp.stack + self.assertEqual(stack[0].year, 2020) + self.assertEqual(stack[0].month, 11) + self.assertEqual(stack[0].day, 2) + + def test_subtract_dates(self): + interp = Interpreter() + interp.run(""" + 2020-10-21 2020-11-02 SUBTRACT-DATES + """) + stack = interp.stack + self.assertEqual(stack[0], -12) + + def test_date_to_str(self): + interp = Interpreter() + interp.run(""" + 2020-11-02 DATE>STR + """) + stack = interp.stack + self.assertEqual(stack[0], "2020-11-02") + + def test_date_time_to_datetime(self): + interp = Interpreter() + interp.run(""" + 2020-11-02 10:25 PM DATE-TIME>DATETIME + 2020-11-02 10:25 PM DATE-TIME>DATETIME >DATE + 2020-11-02 10:25 PM DATE-TIME>DATETIME >TIME + """) + stack = interp.stack + self.assertEqual(stack[0].year, 2020) + self.assertEqual(stack[0].month, 11) + self.assertEqual(stack[0].day, 2) + self.assertEqual(stack[0].hour, 22) + self.assertEqual(stack[0].minute, 25) + self.assertEqual(stack[1].year, 2020) + self.assertEqual(stack[1].month, 11) + self.assertEqual(stack[1].day, 2) + self.assertEqual(stack[2].hour, 22) + self.assertEqual(stack[2].minute, 25) + + def test_datetime_to_timestamp(self): + interp = Interpreter() + interp.run(""" + 2020-07-01 15:20 DATE-TIME>DATETIME DATETIME>TIMESTAMP + """) + stack = interp.stack + self.assertEqual(stack[0], 1593642000) + + def test_timestamp_to_datetime(self): + interp = Interpreter() + interp.run(""" + 1593895532 TIMESTAMP>DATETIME + """) + stack = interp.stack + self.assertEqual(stack[0].year, 2020) + self.assertEqual(stack[0].month, 7) + self.assertEqual(stack[0].day, 4) + self.assertEqual(stack[0].hour, 13) + self.assertEqual(stack[0].minute, 45) + + def test_arithmetic(self): + interp = Interpreter() + interp.run(""" + 2 4 + + 2 4 - + 2 4 * + 2 4 / + 5 3 MOD + 2.51 ROUND + [1 2 3] + + [2 3 4] * + """) + stack = interp.stack + self.assertEqual(stack[0], 6) + self.assertEqual(stack[1], -2) + self.assertEqual(stack[2], 8) + self.assertEqual(stack[3], 0.5) + self.assertEqual(stack[4], 2) + self.assertEqual(stack[5], 3) + self.assertEqual(stack[6], 6) + self.assertEqual(stack[7], 24) + + def test_mean(self): + interp = Interpreter() + stack = interp.stack + interp.run("[1 2 3 4 5] MEAN") + self.assertEqual(stack[-1], 3) + + interp.run("[4] MEAN") + self.assertEqual(stack[-1], 4) + + interp.run("[] MEAN") + self.assertEqual(stack[-1], 0) + + interp.run("NULL MEAN") + self.assertEqual(stack[-1], 0) + + def test_comparison(self): + interp = Interpreter() + interp.run(""" + 2 4 == + 2 4 != + 2 4 < + 2 4 <= + 2 4 > + 2 4 >= + """) + stack = interp.stack + self.assertFalse(stack[0]) + self.assertTrue(stack[1]) + self.assertTrue(stack[2]) + self.assertTrue(stack[3]) + self.assertFalse(stack[4]) + self.assertFalse(stack[5]) + + def test_logic(self): + interp = Interpreter() + interp.run(""" + FALSE FALSE OR + [FALSE FALSE TRUE FALSE] OR + FALSE TRUE AND + [FALSE FALSE TRUE FALSE] AND + FALSE NOT + """) + stack = interp.stack + self.assertFalse(stack[0]) + self.assertTrue(stack[1]) + self.assertFalse(stack[2]) + self.assertFalse(stack[3]) + self.assertTrue(stack[4]) + + def test_in(self): + interp = Interpreter() + interp.run(""" + "alpha" ["beta" "gamma"] IN + "alpha" ["beta" "gamma" "alpha"] IN + """) + stack = interp.stack + self.assertFalse(stack[0]) + self.assertTrue(stack[1]) + + def test_any(self): + interp = Interpreter() + interp.run(""" + ["alpha" "beta"] ["beta" "gamma"] ANY + ["delta" "beta"] ["gamma" "alpha"] ANY + ["alpha" "beta"] [] ANY + """) + stack = interp.stack + self.assertTrue(stack[0]) + self.assertFalse(stack[1]) + self.assertTrue(stack[2]) + + def test_all(self): + interp = Interpreter() + interp.run(""" + ["alpha" "beta"] ["beta" "gamma"] ALL + ["delta" "beta"] ["beta"] ALL + ["alpha" "beta"] [] ALL + """) + stack = interp.stack + self.assertFalse(stack[0]) + self.assertTrue(stack[1]) + self.assertTrue(stack[2]) + + def test_quoted(self): + interp = Interpreter() + interp.run(f""" + "howdy" QUOTED + "sinister{DLE}INJECT-BADNESS" QUOTED + """) + stack = interp.stack + self.assertEqual(f"{DLE}howdy{DLE}", stack[0]) + self.assertEqual(f"{DLE}sinister INJECT-BADNESS{DLE}", stack[1]) + + def test_rangeindex(self): + interp = Interpreter() + interp.run(""" + 0 [0 1 2] RANGE-INDEX + 1 [0 1 2] RANGE-INDEX + 2 [0 1 2] RANGE-INDEX + 3 [0 1 2] RANGE-INDEX + 100 [0 1 2] RANGE-INDEX + -1 [0 1 2] RANGE-INDEX + """) + stack = interp.stack + self.assertEqual(0, stack[0]) + self.assertEqual(1, stack[1]) + self.assertEqual(2, stack[2]) + self.assertEqual(2, stack[3]) + self.assertEqual(2, stack[4]) + self.assertEqual(None, stack[5]) + + def test_math_converters(self): + interp = Interpreter() + interp.run(""" + NULL >BOOL + 0 >BOOL + 1 >BOOL + "" >BOOL + "Hi" >BOOL + "3" >INT + 4 >INT + 4.6 >INT + "1.2" >FLOAT + 2 >FLOAT + """) + stack = interp.stack + self.assertFalse(stack[0]) + self.assertFalse(stack[1]) + self.assertTrue(stack[2]) + self.assertFalse(stack[3]) + self.assertTrue(stack[4]) + self.assertEqual(stack[5], 3) + self.assertEqual(stack[6], 4) + self.assertEqual(stack[7], 4) + self.assertEqual(stack[8], 1.2) + self.assertEqual(stack[9], 2.0) + + def test_profiling(self): + interp = Interpreter() + interp.run(""" + PROFILE-START + [0 "1 +" 6 Date: Mon, 27 May 2024 14:08:37 -0700 Subject: [PATCH 02/18] Can build a forthic package --- forthic-py/LICENSE | 28 +++ forthic-py/README.md | 57 +++++ forthic-py/pyproject.toml | 24 ++ forthic-py/src/__init__.py | 1 - forthic-py/src/forthic/__init__.py | 0 forthic-py/src/{ => forthic}/global_module.py | 0 forthic-py/src/{ => forthic}/interfaces.py | 0 forthic-py/src/{ => forthic}/interpreter.py | 0 forthic-py/src/{ => forthic}/module.py | 0 forthic-py/src/{ => forthic}/profile.py | 0 forthic-py/src/{ => forthic}/tokenizer.py | 0 forthic-py/src/{ => forthic}/tokens.py | 0 forthic-py/tests/modules/__init__.py | 0 ...sets_module.py => test_datasets_module.py} | 49 ++--- ...oweek_module.py => test_isoweek_module.py} | 0 ..._v3_jira_module.py => test_jira_module.py} | 205 ++++++++++++------ ...st_v3_org_module.py => test_org_module.py} | 0 ...3_trino_module.py => test_trino_module.py} | 0 ...global_module.py => test_global_module.py} | 0 ..._v3_interpreter.py => test_interpreter.py} | 0 ...test_v3_tokenizer.py => test_tokenizer.py} | 0 21 files changed, 272 insertions(+), 92 deletions(-) create mode 100644 forthic-py/LICENSE create mode 100644 forthic-py/README.md create mode 100644 forthic-py/pyproject.toml create mode 100644 forthic-py/src/forthic/__init__.py rename forthic-py/src/{ => forthic}/global_module.py (100%) rename forthic-py/src/{ => forthic}/interfaces.py (100%) rename forthic-py/src/{ => forthic}/interpreter.py (100%) rename forthic-py/src/{ => forthic}/module.py (100%) rename forthic-py/src/{ => forthic}/profile.py (100%) rename forthic-py/src/{ => forthic}/tokenizer.py (100%) rename forthic-py/src/{ => forthic}/tokens.py (100%) create mode 100644 forthic-py/tests/modules/__init__.py rename forthic-py/tests/modules/{test_v3_datasets_module.py => test_datasets_module.py} (77%) rename forthic-py/tests/modules/{test_v3_isoweek_module.py => test_isoweek_module.py} (100%) rename forthic-py/tests/modules/{test_v3_jira_module.py => test_jira_module.py} (60%) rename forthic-py/tests/modules/{test_v3_org_module.py => test_org_module.py} (100%) rename forthic-py/tests/modules/{test_v3_trino_module.py => test_trino_module.py} (100%) rename forthic-py/tests/{test_v3_global_module.py => test_global_module.py} (100%) rename forthic-py/tests/{test_v3_interpreter.py => test_interpreter.py} (100%) rename forthic-py/tests/{test_v3_tokenizer.py => test_tokenizer.py} (100%) diff --git a/forthic-py/LICENSE b/forthic-py/LICENSE new file mode 100644 index 0000000..1e2c101 --- /dev/null +++ b/forthic-py/LICENSE @@ -0,0 +1,28 @@ +BSD 2-CLAUSE LICENSE + +Copyright 2020 LinkedIn Corporation. +All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the + distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/forthic-py/README.md b/forthic-py/README.md new file mode 100644 index 0000000..b356b89 --- /dev/null +++ b/forthic-py/README.md @@ -0,0 +1,57 @@ +# Forthic + +Forthic is a stack-based language for writing tweakable applications by coordinating code written in a host language. + +## Documentation +For a brief overview of Forthic, see [OVERVIEW.md](docs/OVERVIEW.md). The [ARCHITECTURE.md](docs/ARCHITECTURE.md) provides more technical detail about Forthic, including a brief overview of some of the standard global Forthic words. The [IDIOMS.md](docs/IDIOMS.md) file gives pointers on how to use Forthic the way it was designed to be used. Also see the [THREAT_MODEL.md](docs/THREAT_MODEL.md) file for guidance on running Forthic securely. + +## Getting started +``` +# On Mac and Linux +make + +# On Windows +.\make-install.ps1 +.\make-server.ps1 +``` + +This will create a Python virtual environment, install Forthic into it, and run a +web server on port 8000 that can run some sample applications. + +## Examples +The Forthic examples run as web apps. To see a list of the examples run the server using `make` and then go here: [http://localhost:8000](http://localhost:8000) + +See [EXAMPLES.md](docs/EXAMPLES.md) for more info. + + +## Tests +``` +# Tests the Python Forthic interpreter +make test + +# Tests the JavaScript Forthic interpreter +make test-js + +# Tests both +make test-all +``` + +## Deleting secrets +All credentials are stored encrypted in a JSON object in the `server/.secrets` file. To delete a particular secret, just remove it from the JSON record +and save the file. To delete all secrets along with the encryption key, delete `server/.secrets` and `server/.key` or + +``` +# On Mac and Linux +make delete-secrets + +# On Windows +make-delete-secrets.ps1 +``` + +## YouTube +- [Coding Forthic with Rino](https://www.youtube.com/@codingforthic) + +## Articles +- [Categorical Coding](https://forthix.com/category/categorical-coding/) +- [Forthic How To](https://forthix.com/category/how-to/) +- LinkedIn Article on how to use the Jira module https://www.linkedin.com/pulse/hello-forthic-abdul-sheik diff --git a/forthic-py/pyproject.toml b/forthic-py/pyproject.toml new file mode 100644 index 0000000..c412dec --- /dev/null +++ b/forthic-py/pyproject.toml @@ -0,0 +1,24 @@ +[tool.poetry] +name = "forthic" +version = "4.0.0" +description = "A stack-based language for concisely building tweakable apps" +authors = ["Rino Jose "] +license = {file = "LICENSE"} +readme = "README.md" + +[tool.poetry.dependencies] +python = "^3.9" +urllib3 = "1.26.10" +pytz = "^2024.1" +cryptography = "^42.0.7" +python-dateutil = "^2.9.0.post0" +requests-oauthlib = "^2.0.0" +Jinja2 = "^3.1.4" +Markdown = "^3.6" + +[tool.poetry.group.dev.dependencies] +Flask = "^3.0.3" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/forthic-py/src/__init__.py b/forthic-py/src/__init__.py index de40ea7..e69de29 100644 --- a/forthic-py/src/__init__.py +++ b/forthic-py/src/__init__.py @@ -1 +0,0 @@ -__import__('pkg_resources').declare_namespace(__name__) diff --git a/forthic-py/src/forthic/__init__.py b/forthic-py/src/forthic/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/forthic-py/src/global_module.py b/forthic-py/src/forthic/global_module.py similarity index 100% rename from forthic-py/src/global_module.py rename to forthic-py/src/forthic/global_module.py diff --git a/forthic-py/src/interfaces.py b/forthic-py/src/forthic/interfaces.py similarity index 100% rename from forthic-py/src/interfaces.py rename to forthic-py/src/forthic/interfaces.py diff --git a/forthic-py/src/interpreter.py b/forthic-py/src/forthic/interpreter.py similarity index 100% rename from forthic-py/src/interpreter.py rename to forthic-py/src/forthic/interpreter.py diff --git a/forthic-py/src/module.py b/forthic-py/src/forthic/module.py similarity index 100% rename from forthic-py/src/module.py rename to forthic-py/src/forthic/module.py diff --git a/forthic-py/src/profile.py b/forthic-py/src/forthic/profile.py similarity index 100% rename from forthic-py/src/profile.py rename to forthic-py/src/forthic/profile.py diff --git a/forthic-py/src/tokenizer.py b/forthic-py/src/forthic/tokenizer.py similarity index 100% rename from forthic-py/src/tokenizer.py rename to forthic-py/src/forthic/tokenizer.py diff --git a/forthic-py/src/tokens.py b/forthic-py/src/forthic/tokens.py similarity index 100% rename from forthic-py/src/tokens.py rename to forthic-py/src/forthic/tokens.py diff --git a/forthic-py/tests/modules/__init__.py b/forthic-py/tests/modules/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/forthic-py/tests/modules/test_v3_datasets_module.py b/forthic-py/tests/modules/test_datasets_module.py similarity index 77% rename from forthic-py/tests/modules/test_v3_datasets_module.py rename to forthic-py/tests/modules/test_datasets_module.py index 315df03..6ad7be5 100644 --- a/forthic-py/tests/modules/test_v3_datasets_module.py +++ b/forthic-py/tests/modules/test_datasets_module.py @@ -1,25 +1,31 @@ import os import json import unittest + from forthic.v3.interpreter import Interpreter from forthic.v3.modules.datasets_module import DatasetsModule + def get_data_dir(): return f"{os.getcwd()}/tests/tests_py/v3/modules/datasets_data" + def get_dataset_file(dataset_name): return f"{get_data_dir()}/datasets/{dataset_name}.dataset" + def load_dataset(dataset_name): with open(get_dataset_file(dataset_name)) as f: result = json.loads(f.read()) return result + def clear_dataset(dataset_name): dataset_file = get_dataset_file(dataset_name) if os.path.isfile(dataset_file): os.remove(dataset_file) + def get_interp(): result = Interpreter() result.register_module(DatasetsModule) @@ -34,10 +40,7 @@ def setUp(self): def test_DATASET_bang(self): # Test: Store data - dataset = { - "alpha": [1, 2, 3], - "beta": [4, 5, 6] - } + dataset = {"alpha": [1, 2, 3], "beta": [4, 5, 6]} self.interp.stack_push(dataset) self.interp.run("'greek' datasets.DATASET!") @@ -45,37 +48,24 @@ def test_DATASET_bang(self): self.assertDictEqual(dataset, loaded_data) # Test: Add data to existing dataset - dataset = { - "gamma": [7, 8, 9] - } + dataset = {"gamma": [7, 8, 9]} self.interp.stack_push(dataset) self.interp.run("'greek' datasets.DATASET!") loaded_data = load_dataset("greek") - modified_dataset = { - "alpha": [1, 2, 3], - "beta": [4, 5, 6], - "gamma": [7, 8, 9] - } + modified_dataset = {"alpha": [1, 2, 3], "beta": [4, 5, 6], "gamma": [7, 8, 9]} self.assertDictEqual(modified_dataset, loaded_data) # Test: Ovewrite existing dataset - dataset = { - "delta": [10, 11, 12] - } + dataset = {"delta": [10, 11, 12]} self.interp.stack_push(dataset) self.interp.run("'greek' datasets.!OVERWRITE datasets.DATASET!") loaded_data = load_dataset("greek") - new_dataset = { - "delta": [10, 11, 12] - } + new_dataset = {"delta": [10, 11, 12]} self.assertDictEqual(new_dataset, loaded_data) def test_DATASET(self): # Store dataset - dataset = { - "alpha": [1, 2, 3], - "beta": [4, 5, 6] - } + dataset = {"alpha": [1, 2, 3], "beta": [4, 5, 6]} self.interp.stack_push(dataset) self.interp.run("'greek' datasets.DATASET!") @@ -85,10 +75,7 @@ def test_DATASET(self): def test_RECORDS(self): # Store dataset - dataset = { - "alpha": [1, 2, 3], - "beta": [4, 5, 6] - } + dataset = {"alpha": [1, 2, 3], "beta": [4, 5, 6]} self.interp.stack_push(dataset) self.interp.run("'greek' datasets.DATASET!") @@ -101,7 +88,11 @@ def test_RECORDS(self): self.assertEqual([[4, 5, 6], None, [1, 2, 3]], self.interp.stack[-1]) # Get records dropping NULLs for missing keys - self.interp.run("'greek' ['beta' 'MISSING' 'alpha'] datasets.!DROP-NULLS datasets.RECORDS") + self.interp.run( + "'greek' ['beta' 'MISSING' 'alpha'] datasets.!DROP-NULLS datasets.RECORDS" + ) self.assertEqual([[4, 5, 6], [1, 2, 3]], self.interp.stack[-1]) -if __name__ == '__main__': - unittest.main() \ No newline at end of file + + +if __name__ == "__main__": + unittest.main() diff --git a/forthic-py/tests/modules/test_v3_isoweek_module.py b/forthic-py/tests/modules/test_isoweek_module.py similarity index 100% rename from forthic-py/tests/modules/test_v3_isoweek_module.py rename to forthic-py/tests/modules/test_isoweek_module.py diff --git a/forthic-py/tests/modules/test_v3_jira_module.py b/forthic-py/tests/modules/test_jira_module.py similarity index 60% rename from forthic-py/tests/modules/test_v3_jira_module.py rename to forthic-py/tests/modules/test_jira_module.py index 6a54f95..393ec6a 100644 --- a/forthic-py/tests/modules/test_v3_jira_module.py +++ b/forthic-py/tests/modules/test_jira_module.py @@ -1,8 +1,9 @@ import unittest import datetime -from forthic.v3.interpreter import Interpreter -from forthic.v3.modules.jira_module import JiraModule -from tests.tests_py.v3.modules.jira_context import JiraTestContext + +from ...src.forthic.interpreter import Interpreter +from ...src.forthic.modules.jira_module import JiraModule +from .jira_context import JiraTestContext def get_interp(): @@ -25,51 +26,62 @@ def test_HOST(self): self.assertEqual(self.interp.stack[0], "http://testcontext") def test_SEARCH(self): - self.interp.run(""" + self.interp.run( + """ : JQL ["assignee=testuser and resolution is null"] CONCAT; : FIELDS ['Summary' 'Assignee']; JQL FIELDS jira.SEARCH - """) + """ + ) issues = self.interp.stack[0] self.assertEqual(2, len(issues)) - self.assertEqual("SAMPLE-1234", issues[0]['key']) - self.assertEqual("testuser", issues[0]['Assignee']) - self.assertEqual("SAMPLE-1235", issues[1]['key']) + self.assertEqual("SAMPLE-1234", issues[0]["key"]) + self.assertEqual("testuser", issues[0]["Assignee"]) + self.assertEqual("SAMPLE-1235", issues[1]["key"]) def test_DEFAULT_SEARCH(self): - self.interp.run(""" + self.interp.run( + """ : JQL ["assignee=testuser and resolution is null"] CONCAT; : FIELDS ['Summary' 'Assignee']; JQL FIELDS jira.DEFAULT-SEARCH - """) + """ + ) issues = self.interp.stack[0] self.assertEqual(2, len(issues)) - self.assertEqual("SAMPLE-1234", issues[0]['key']) - self.assertEqual("testuser", issues[0]['Assignee']) + self.assertEqual("SAMPLE-1234", issues[0]["key"]) + self.assertEqual("testuser", issues[0]["Assignee"]) def test_CREATE(self): - self.interp.run(""" + self.interp.run( + """ [ ["Project" "SAMPLE"] ["Summary" "A sample ticket"] ["Reporter" "testuser"] ["Issue Type" "Task"] ] REC jira.CREATE - """) + """ + ) self.assertEqual("SAMPLE-12345", self.interp.stack[0]) def test_UPDATE(self): - self.interp.run(""" + self.interp.run( + """ "SAMPLE-1234" [["Assignee" "testuser2"]] REC jira.UPDATE - """) + """ + ) def test_ADD_WATCHER(self): - self.interp.run(""" + self.interp.run( + """ "SAMPLE-1234" "manager1" jira.ADD-WATCHER - """) + """ + ) def test_LINK_ISSUES(self): - self.interp.run(""" + self.interp.run( + """ "SAMPLE-101" "SAMPLE-202" jira.DEPENDENCY jira.LINK-ISSUES "SAMPLE-101" "SAMPLE-202" jira.ACTION-ITEM jira.LINK-ISSUES "SAMPLE-101" "SAMPLE-202" jira.CLONERS jira.LINK-ISSUES @@ -77,52 +89,62 @@ def test_LINK_ISSUES(self): "SAMPLE-101" "SAMPLE-202" jira.ISSUE-SPLIT jira.LINK-ISSUES "SAMPLE-101" "SAMPLE-202" jira.RELATED jira.LINK-ISSUES "SAMPLE-101" "SAMPLE-202" jira.REQUIRE jira.LINK-ISSUES - """) + """ + ) def test_VOTES(self): - self.interp.run(""" + self.interp.run( + """ "SAMPLE-101" jira.VOTES - """) - self.assertEqual(['user1', 'user2'], self.interp.stack[0]) + """ + ) + self.assertEqual(["user1", "user2"], self.interp.stack[0]) def test_CHANGELOG(self): - self.interp.run(""" + self.interp.run( + """ "SAMPLE-101" ["Risk_Factor"] jira.CHANGELOG - """) + """ + ) changes = self.interp.stack[0] self.assertEqual(3, len(changes)) - self.assertEqual('', changes[0]['from']) - self.assertEqual('Blue', changes[0]['to']) + self.assertEqual("", changes[0]["from"]) + self.assertEqual("Blue", changes[0]["to"]) - self.assertEqual('Blue', changes[1]['from']) - self.assertEqual('Green', changes[1]['to']) + self.assertEqual("Blue", changes[1]["from"]) + self.assertEqual("Green", changes[1]["to"]) - self.assertEqual('Green', changes[2]['from']) - self.assertEqual('Yellow', changes[2]['to']) + self.assertEqual("Green", changes[2]["from"]) + self.assertEqual("Yellow", changes[2]["to"]) def test_FIELD_AS_OF(self): - self.interp.run(""" + self.interp.run( + """ ["changes"] VARIABLES "SAMPLE-101" ["Risk_Factor"] jira.CHANGELOG changes ! 2020-07-25 changes @ "Risk_Factor" jira.FIELD-AS-OF 2020-10-01 changes @ "Risk_Factor" jira.FIELD-AS-OF - """) + """ + ) self.assertEqual("Green", self.interp.stack[0]) self.assertEqual("Yellow", self.interp.stack[1]) def test_FIELD_CHANGE_AS_OF(self): - self.interp.run(""" + self.interp.run( + """ ["changes"] VARIABLES "SAMPLE-101" ["Risk_Factor"] jira.CHANGELOG changes ! 2020-07-25 changes @ "Risk_Factor" jira.FIELD-CHANGE-AS-OF 'date' REC@ DATE>STR 2020-10-01 changes @ "Risk_Factor" jira.FIELD-CHANGE-AS-OF 'date' REC@ DATE>STR - """) + """ + ) self.assertEqual("2020-07-25", self.interp.stack[0]) self.assertEqual("2020-08-15", self.interp.stack[1]) def test_FIELD_AS_OF_SINCE(self): - self.interp.run(""" + self.interp.run( + """ ["changes"] VARIABLES "SAMPLE-101" ["Risk_Factor"] jira.CHANGELOG changes ! @@ -138,7 +160,8 @@ def test_FIELD_AS_OF_SINCE(self): 2020-08-17 changes @ "Risk_Factor" 2020-08-01 jira.FIELD-AS-OF-SINCE 2020-08-17 changes @ "Risk_Factor" 2020-08-16 jira.FIELD-AS-OF-SINCE 2020-10-01 changes @ "Risk_Factor" 2020-09-01 jira.FIELD-AS-OF-SINCE - """) + """ + ) self.assertEqual("Green", self.interp.stack[0]) self.assertEqual("Yellow", self.interp.stack[1]) self.assertEqual("Yellow", self.interp.stack[2]) @@ -151,10 +174,36 @@ def test_TIME_IN_STATE(self): # NOTE: The following data would come from something like `'PROJ-1234' ['status'] jira.CHANGELOG` changes = [ - {"date": datetime.datetime(2021, 7, 21, 1, 14, 57), "field": "status", "from": "", "to": "Open"}, - {"date": datetime.datetime(2021, 8, 23, 2, 56, 7), "field": "status", "from": "Open", "to": "Scoping", "from_": "1", "to_": "10128"}, - {"date": datetime.datetime(2021, 9, 27, 19, 53, 39), "field": "status", "from": "Scoping", "to": "In Development", "from_": "10128", "to_": "10194"}, - {"date": datetime.datetime(2021, 11, 4, 8, 36, 5), "field": "status", "from": "In Development", "to": "Closed", "from_": "10194", "to_": "6"} + { + "date": datetime.datetime(2021, 7, 21, 1, 14, 57), + "field": "status", + "from": "", + "to": "Open", + }, + { + "date": datetime.datetime(2021, 8, 23, 2, 56, 7), + "field": "status", + "from": "Open", + "to": "Scoping", + "from_": "1", + "to_": "10128", + }, + { + "date": datetime.datetime(2021, 9, 27, 19, 53, 39), + "field": "status", + "from": "Scoping", + "to": "In Development", + "from_": "10128", + "to_": "10194", + }, + { + "date": datetime.datetime(2021, 11, 4, 8, 36, 5), + "field": "status", + "from": "In Development", + "to": "Closed", + "from_": "10194", + "to_": "6", + }, ] # Make the call @@ -165,10 +214,10 @@ def test_TIME_IN_STATE(self): # Check the results result = self.interp.stack_pop() - self.assertAlmostEqual(793, int(result['Open'])) - self.assertAlmostEqual(856, int(result['Scoping'])) - self.assertAlmostEqual(900, int(result['In Development'])) - self.assertAlmostEqual(0, int(result['Closed'])) + self.assertAlmostEqual(793, int(result["Open"])) + self.assertAlmostEqual(856, int(result["Scoping"])) + self.assertAlmostEqual(900, int(result["In Development"])) + self.assertAlmostEqual(0, int(result["Closed"])) def test_TIME_IN_STATE_timestamps(self): field = "status" @@ -177,9 +226,30 @@ def test_TIME_IN_STATE_timestamps(self): # NOTE: The following data would come from something like `'PROJ-1234' ['status'] jira.CHANGELOG` changes = [ {"date": 1626830097, "field": "status", "from": "", "to": "Open"}, - {"date": 1629687367, "field": "status", "from": "Open", "to": "Scoping", "from_": "1", "to_": "10128"}, - {"date": 1632772419, "field": "status", "from": "Scoping", "to": "In Development", "from_": "10128", "to_": "10194"}, - {"date": 1636014965, "field": "status", "from": "In Development", "to": "Closed", "from_": "10194", "to_": "6"} + { + "date": 1629687367, + "field": "status", + "from": "Open", + "to": "Scoping", + "from_": "1", + "to_": "10128", + }, + { + "date": 1632772419, + "field": "status", + "from": "Scoping", + "to": "In Development", + "from_": "10128", + "to_": "10194", + }, + { + "date": 1636014965, + "field": "status", + "from": "In Development", + "to": "Closed", + "from_": "10194", + "to_": "6", + }, ] # Make the call @@ -190,40 +260,51 @@ def test_TIME_IN_STATE_timestamps(self): # Check the results result = self.interp.stack_pop() - self.assertAlmostEqual(793, int(result['Open'])) - self.assertAlmostEqual(856, int(result['Scoping'])) - self.assertAlmostEqual(900, int(result['In Development'])) - self.assertAlmostEqual(0, int(result['Closed'])) + self.assertAlmostEqual(793, int(result["Open"])) + self.assertAlmostEqual(856, int(result["Scoping"])) + self.assertAlmostEqual(900, int(result["In Development"])) + self.assertAlmostEqual(0, int(result["Closed"])) def test_FIELD_TAG(self): - self.interp.run(""" + self.interp.run( + """ ["ticket"] VARIABLES [ ["Description" "This is a sample description [objective: To make things awesome]"] ] REC ticket ! ticket @ "Description" "objective" jira.FIELD-TAG - """) + """ + ) self.assertEqual("To make things awesome", self.interp.stack[0]) def test_REMOVE_FIELD_TAGS(self): - self.interp.run(""" + self.interp.run( + """ "This is a sample description. [objective: To make things awesome] alpha [tag2: Something else] beta" jira.REMOVE-FIELD-TAGS - """) - self.assertEqual("This is a sample description. alpha beta", self.interp.stack[0]) + """ + ) + self.assertEqual( + "This is a sample description. alpha beta", self.interp.stack[0] + ) def test_l_FIELD_TAG_bang(self): - self.interp.run(""" + self.interp.run( + """ ["ticket"] VARIABLES [ ["Description" "This is a sample description."] ] REC ticket ! ticket @ "Description" "risk" "There isn't any risk!" jira. Date: Mon, 27 May 2024 14:34:02 -0700 Subject: [PATCH 03/18] Got tests running again - Had to use absolute imports in tests --- forthic-py/pyproject.toml | 2 +- .../src/forthic/modules/airtable_module.py | 39 +- .../src/forthic/modules/confluence_module.py | 172 ++++--- .../src/forthic/modules/excel_module.py | 153 +++--- forthic-py/src/forthic/modules/gdoc_module.py | 208 ++++---- .../src/forthic/modules/gsheet_module.py | 311 ++++++------ forthic-py/src/forthic/modules/html_module.py | 250 +++++----- forthic-py/src/forthic/modules/jira_module.py | 454 +++++++++--------- .../src/forthic/modules/trino_module.py | 107 ----- forthic-py/src/forthic/utils/__init__.py | 0 forthic-py/src/forthic/utils/creds.py | 212 ++++++++ forthic-py/src/forthic/utils/errors.py | 70 +++ forthic-py/tests/modules/jira_context.py | 41 +- .../tests/modules/test_datasets_module.py | 4 +- .../tests/modules/test_isoweek_module.py | 43 +- forthic-py/tests/modules/test_jira_module.py | 5 +- forthic-py/tests/modules/test_org_module.py | 118 +++-- forthic-py/tests/modules/test_trino_module.py | 32 -- forthic-py/tests/modules/trino_context.py | 68 --- forthic-py/tests/sample_date_module.py | 5 +- forthic-py/tests/test_global_module.py | 6 +- forthic-py/tests/test_interpreter.py | 30 +- forthic-py/tests/test_tokenizer.py | 42 +- .../datasets_data/datasets/greek.dataset | 12 + 24 files changed, 1289 insertions(+), 1095 deletions(-) delete mode 100644 forthic-py/src/forthic/modules/trino_module.py create mode 100644 forthic-py/src/forthic/utils/__init__.py create mode 100644 forthic-py/src/forthic/utils/creds.py create mode 100644 forthic-py/src/forthic/utils/errors.py delete mode 100644 forthic-py/tests/modules/test_trino_module.py delete mode 100644 forthic-py/tests/modules/trino_context.py create mode 100644 forthic-py/tests/tests_py/v3/modules/datasets_data/datasets/greek.dataset diff --git a/forthic-py/pyproject.toml b/forthic-py/pyproject.toml index c412dec..d17333c 100644 --- a/forthic-py/pyproject.toml +++ b/forthic-py/pyproject.toml @@ -3,7 +3,7 @@ name = "forthic" version = "4.0.0" description = "A stack-based language for concisely building tweakable apps" authors = ["Rino Jose "] -license = {file = "LICENSE"} +license = "BSD 2-CLAUSE LICENSE" readme = "README.md" [tool.poetry.dependencies] diff --git a/forthic-py/src/forthic/modules/airtable_module.py b/forthic-py/src/forthic/modules/airtable_module.py index 3bb8d2b..a4e721f 100644 --- a/forthic-py/src/forthic/modules/airtable_module.py +++ b/forthic-py/src/forthic/modules/airtable_module.py @@ -2,10 +2,7 @@ import urllib from ..module import Module from ..interfaces import IInterpreter -from ...utils.errors import ( - AirtableError, - AirtableUnauthorized -) +from ..utils.errors import AirtableError, AirtableUnauthorized from typing import List @@ -17,13 +14,14 @@ class AirtableModule(Module): This adds basic support for working with Airtable: """ + def __init__(self, interp: IInterpreter): - super().__init__('airtable', interp, FORTHIC) - self.context_stack: List['AirtableCredsContext'] = [] + super().__init__("airtable", interp, FORTHIC) + self.context_stack: List["AirtableCredsContext"] = [] - self.add_module_word('PUSH-CONTEXT!', self.word_PUSH_CONTEXT_bang) - self.add_module_word('POP-CONTEXT!', self.word_POP_CONTEXT_bang) - self.add_module_word('RECORDS', self.word_RECORDS) + self.add_module_word("PUSH-CONTEXT!", self.word_PUSH_CONTEXT_bang) + self.add_module_word("POP-CONTEXT!", self.word_POP_CONTEXT_bang) + self.add_module_word("RECORDS", self.word_RECORDS) # ( creds_context -- ) def word_PUSH_CONTEXT_bang(self, interp: IInterpreter): @@ -55,7 +53,7 @@ def make_sort_param(records): pieces = [] for index, r in enumerate(records): pieces.append(f"sort%5B{index}%5D%5Bfield%5D={r['field']}") - if r.get('direction'): + if r.get("direction"): pieces.append(f"sort%5B{index}%5D%5Bdirection%5D={r['direction']}") return "&".join(pieces) @@ -91,15 +89,19 @@ def construct_query_param_string(config, offset): # We may need to iterate to get all of the records def get_records(records=[], offset=None, iterations=1): qstring = construct_query_param_string(config, offset) - api_url = f'/v0/{base_id}/{table}{qstring}' + api_url = f"/v0/{base_id}/{table}{qstring}" response = context.requests_get(api_url) if not response.ok: - raise RuntimeError(f"airtable.RECORDS: Error getting records: {response.reason}") + raise RuntimeError( + f"airtable.RECORDS: Error getting records: {response.reason}" + ) data = response.json() records.extend(data["records"]) if iterations > MAX_ITERATIONS: - raise RuntimeError(f"airtable.RECORDS exceeded {MAX_ITERATIONS} iterations") + raise RuntimeError( + f"airtable.RECORDS exceeded {MAX_ITERATIONS} iterations" + ) if data.get("offset"): get_records(records, data["offset"], iterations + 1) @@ -114,7 +116,7 @@ def get_records(records=[], offset=None, iterations=1): def get_context(self): if not self.context_stack: raise AirtableError( - 'Need to push an AirtableCredsContext with PUSH-CONTEXT!' + "Need to push an AirtableCredsContext with PUSH-CONTEXT!" ) result = self.context_stack[-1] return result @@ -123,6 +125,7 @@ def get_context(self): class AirtableCredsContext: """Clients of the alation module must extend CredsContext and use PUSH-CONTEXT! in order to set the current creds context""" + def __init__(self, field): self.field = field @@ -138,9 +141,7 @@ def get_cert_verify(self): def requests_get(self, api_url): """Makes HTTP GET call to pull data""" api_url_w_host = self.get_host() + api_url - headers = { - "Authorization": f"Bearer {self.get_api_token()}" - } + headers = {"Authorization": f"Bearer {self.get_api_token()}"} result = requests.get( api_url_w_host, headers=headers, @@ -151,5 +152,5 @@ def requests_get(self, api_url): return result -FORTHIC = ''' -''' +FORTHIC = """ +""" diff --git a/forthic-py/src/forthic/modules/confluence_module.py b/forthic-py/src/forthic/modules/confluence_module.py index 489893f..7bbe78e 100644 --- a/forthic-py/src/forthic/modules/confluence_module.py +++ b/forthic-py/src/forthic/modules/confluence_module.py @@ -3,7 +3,7 @@ import requests from ..module import Module from ..interfaces import IInterpreter -from ...utils.errors import ConfluenceError +from ..utils.errors import ConfluenceError from typing import List, Optional # Unit separator @@ -15,27 +15,30 @@ class ConfluenceModule(Module): See `docs/modules/confluence_module.md` for detailed descriptions of each word. """ + def __init__(self, interp: IInterpreter): - super().__init__('confluence', interp, CONFLUENCE_FORTHIC) - self.context_stack: List['ConfluenceContext'] = [] + super().__init__("confluence", interp, CONFLUENCE_FORTHIC) + self.context_stack: List["ConfluenceContext"] = [] - self.add_module_word('PUSH-CONTEXT!', self.word_PUSH_CONTEXT_bang) - self.add_module_word('POP-CONTEXT!', self.word_POP_CONTEXT_bang) - self.add_module_word('HOST', self.word_HOST) + self.add_module_word("PUSH-CONTEXT!", self.word_PUSH_CONTEXT_bang) + self.add_module_word("POP-CONTEXT!", self.word_POP_CONTEXT_bang) + self.add_module_word("HOST", self.word_HOST) - self.add_module_word('PAGE-INFO', self.word_PAGE_INFO) + self.add_module_word("PAGE-INFO", self.word_PAGE_INFO) - self.add_module_word('NBSP', self.word_NBSP) - self.add_module_word('SPACES-WIDE', self.word_SPACES_WIDE) + self.add_module_word("NBSP", self.word_NBSP) + self.add_module_word("SPACES-WIDE", self.word_SPACES_WIDE) - self.add_module_word('|ESCAPE-TABLE-CONTENT', self.word_pipe_ESCAPE_TABLE_CONTENT) - self.add_module_word('|ESCAPE-NEWLINES', self.word_pipe_ESCAPE_NEWLINES) - self.add_module_word('COLOR-BOX', self.word_COLOR_BOX) - self.add_module_word('TABLE', self.word_TABLE) - self.add_module_word('RENDER', self.word_RENDER) + self.add_module_word( + "|ESCAPE-TABLE-CONTENT", self.word_pipe_ESCAPE_TABLE_CONTENT + ) + self.add_module_word("|ESCAPE-NEWLINES", self.word_pipe_ESCAPE_NEWLINES) + self.add_module_word("COLOR-BOX", self.word_COLOR_BOX) + self.add_module_word("TABLE", self.word_TABLE) + self.add_module_word("RENDER", self.word_RENDER) - self.add_module_word('UPSERT-PAGE', self.word_UPSERT_PAGE) - self.add_module_word('ADD-BLOG-POST', self.word_ADD_BLOG_POST) + self.add_module_word("UPSERT-PAGE", self.word_UPSERT_PAGE) + self.add_module_word("ADD-BLOG-POST", self.word_ADD_BLOG_POST) # ( context -- ) def word_PUSH_CONTEXT_bang(self, interp: IInterpreter): @@ -58,7 +61,7 @@ def word_PAGE_INFO(self, interp: IInterpreter): space = interp.stack_pop() encoded_title = urllib.parse.quote_plus(title) - api_url = f'/wiki/cf/rest/api/content?title={encoded_title}&spaceKey={space}&expand=version' + api_url = f"/wiki/cf/rest/api/content?title={encoded_title}&spaceKey={space}&expand=version" response = context.requests_get(api_url) if response.status_code != 200: @@ -67,15 +70,15 @@ def word_PAGE_INFO(self, interp: IInterpreter): ) data = response.json() - if not data['results']: + if not data["results"]: raise ConfluenceError(f"Can't find '{title}' in space '{space}'") - result = data['results'][0] + result = data["results"][0] interp.stack_push(result) # ( -- nbsp_char ) def word_NBSP(self, interp: IInterpreter): - interp.stack_push(' ') + interp.stack_push(" ") # ( str num_spaces -- str ) def word_SPACES_WIDE(self, interp: IInterpreter): @@ -84,7 +87,7 @@ def word_SPACES_WIDE(self, interp: IInterpreter): string = interp.stack_pop() # Count   as one space - num_nbsps = len(re.findall(' ', string)) + num_nbsps = len(re.findall(" ", string)) chars_to_subtract = 5 * num_nbsps string_len = len(string) - chars_to_subtract @@ -92,7 +95,7 @@ def word_SPACES_WIDE(self, interp: IInterpreter): result = string else: spaces_to_add = num_spaces - string_len - result = string + spaces_to_add * ' ' + result = string + spaces_to_add * " " interp.stack_push(result) @@ -114,8 +117,8 @@ def word_pipe_ESCAPE_NEWLINES(self, interp: IInterpreter): interp.stack_push(content) return content = content.strip() - content = content.replace('\r', '') - pieces = content.split('\n') + content = content.replace("\r", "") + pieces = content.split("\n") result = r" \\ ".join(pieces) interp.stack_push(result) pass @@ -142,17 +145,17 @@ def table_row(rec): for h in headers: value = rec.get(h) if not value: - value = '' + value = "" interp.stack_push(value) interp.run("'' ] '|' JOIN") # Assemble table - interp.run('[') + interp.run("[") table_heading() for r in recs: table_row(r) - interp.run(']') - interp.run('/N JOIN') + interp.run("]") + interp.run("/N JOIN") # ( object -- html/wiki ) def word_RENDER(self, interp: IInterpreter): @@ -174,14 +177,14 @@ def word_UPSERT_PAGE(self, interp: IInterpreter): encoded_title = urllib.parse.quote_plus(title) def does_page_exist(): - api_url = f'/wiki/cf/rest/api/content?title={encoded_title}&spaceKey={space}&expand=ancestors' + api_url = f"/wiki/cf/rest/api/content?title={encoded_title}&spaceKey={space}&expand=ancestors" response = context.requests_get(api_url) data = response.json() - if data['size'] == 0: + if data["size"] == 0: return False - page_info = data['results'][0] - current_parent = page_info['ancestors'][-1]['title'] + page_info = data["results"][0] + current_parent = page_info["ancestors"][-1]["title"] if current_parent != parent_title: raise ConfluenceError( f"'{title}' exists, but its current parent '{current_parent}' does not match the specified parent '{parent_title}'" @@ -191,23 +194,21 @@ def does_page_exist(): def get_page_info(page_title): interp.stack_push(space) interp.stack_push(page_title) - interp.run('PAGE-INFO') + interp.run("PAGE-INFO") res = interp.stack_pop() return res def create_page(): parent_info = get_page_info(parent_title) - parent_id = parent_info['id'] + parent_id = parent_info["id"] request_data = { - 'type': 'page', - 'title': title, - 'ancestors': [{'id': parent_id}], - 'space': {'key': space}, - 'body': { - 'storage': {'value': content, 'representation': 'wiki'} - }, + "type": "page", + "title": title, + "ancestors": [{"id": parent_id}], + "space": {"key": space}, + "body": {"storage": {"value": content, "representation": "wiki"}}, } - api_url = '/wiki/cf/rest/api/content' + api_url = "/wiki/cf/rest/api/content" response = context.requests_post(api_url, json=request_data) if response.status_code != 200: raise ConfluenceError( @@ -215,30 +216,28 @@ def create_page(): ) def get_version(page_info): - version_info = page_info.get('version') + version_info = page_info.get("version") if version_info: - res = int(version_info['number']) + res = int(version_info["number"]) else: res = 1 return res def update_page(): page_info = get_page_info(title) - page_id = page_info['id'] + page_id = page_info["id"] version = get_version(page_info) request_data = { - 'id': page_id, - 'type': 'page', - 'title': title, - 'space': {'key': space}, - 'body': { - 'storage': {'value': content, 'representation': 'wiki'} - }, - 'version': {'number': version + 1}, + "id": page_id, + "type": "page", + "title": title, + "space": {"key": space}, + "body": {"storage": {"value": content, "representation": "wiki"}}, + "version": {"number": version + 1}, } - api_url = f'/wiki/cf/rest/api/content/{page_id}' + api_url = f"/wiki/cf/rest/api/content/{page_id}" response = context.requests_put(api_url, json=request_data) if response.status_code != 200: @@ -263,10 +262,7 @@ def word_ADD_BLOG_POST(self, interp: IInterpreter): space = interp.stack_pop() def make_record_label(label): - return { - "prefix": "global", - "name": label - } + return {"prefix": "global", "name": label} if labels: label_records = [make_record_label(label) for label in labels] @@ -275,14 +271,12 @@ def make_record_label(label): def create_post(): request_data = { - 'type': 'blogpost', - 'title': title, - 'space': {'key': space}, - 'body': { - 'storage': {'value': content, 'representation': 'wiki'} - } + "type": "blogpost", + "title": title, + "space": {"key": space}, + "body": {"storage": {"value": content, "representation": "wiki"}}, } - api_url = '/wiki/cf/rest/api/content' + api_url = "/wiki/cf/rest/api/content" response = context.requests_post(api_url, json=request_data) if response.status_code != 200: raise ConfluenceError( @@ -292,7 +286,7 @@ def create_post(): # Add labels if label_records: page_id = response.json()["id"] - label_api_url = f'/wiki/cf/rest/api/content/{page_id}/label' + label_api_url = f"/wiki/cf/rest/api/content/{page_id}/label" response = context.requests_post(label_api_url, json=label_records) if response.status_code != 200: raise ConfluenceError( @@ -306,7 +300,7 @@ def create_post(): def current_context(self): if not self.context_stack: raise ConfluenceError( - 'Use confluence.PUSH-CONTEXT! to provide a Confluence context' + "Use confluence.PUSH-CONTEXT! to provide a Confluence context" ) result = self.context_stack[-1] @@ -320,30 +314,26 @@ def escape_table_content(content): used to specify a link """ if not content: - return '' + return "" def remove_blank_lines(s): s = s.strip() - s = s.replace('\r', '') - pieces = s.split('\n') + s = s.replace("\r", "") + pieces = s.split("\n") non_blank_pieces = [p for p in pieces if p] res = "\n".join(non_blank_pieces) # If content is empty, return a space so the table cell doesn't collapse if not res: - res = ' ' + res = " " return res def remove_pipes_if_needed(s): res = re.sub( - r'\[(.*?)\|(.*?)\]', r'[\1%s\2]' % US, s - ) # Replace pipes in links with US character - res = re.sub( - r'\|', '', res - ) # Remove all other pipes - res = re.sub( - US, '|', res - ) # Replace US chars with pipes again + r"\[(.*?)\|(.*?)\]", r"[\1%s\2]" % US, s + ) # Replace pipes in links with US character + res = re.sub(r"\|", "", res) # Remove all other pipes + res = re.sub(US, "|", res) # Replace US chars with pipes again return res result = remove_blank_lines(content) @@ -356,7 +346,9 @@ def raise_status_error_if_needed(response): return if response.status_code == 401: - raise ConfluenceError("Unauthorized request. Please check your Confluence credentials.") + raise ConfluenceError( + "Unauthorized request. Please check your Confluence credentials." + ) else: raise ConfluenceError(response.text) @@ -411,16 +403,14 @@ def get_password(self): return None -CONFLUENCE_FORTHIC = ''' -''' +CONFLUENCE_FORTHIC = """ +""" -class ColorBox(): +class ColorBox: def __init__(self, color): self.color = color - self.options = { - "hover_text": '' - } + self.options = {"hover_text": ""} return def __getitem__(self, key: str) -> Optional[bool]: @@ -429,16 +419,18 @@ def __getitem__(self, key: str) -> Optional[bool]: def __setitem__(self, key: str, value: Optional[bool]): if key not in self.options: - raise RuntimeError(f"Unknown ColorBox option: '{key}'. Must be one of {self.options.keys()}") + raise RuntimeError( + f"Unknown ColorBox option: '{key}'. Must be one of {self.options.keys()}" + ) self.options[key] = value def render(self): result = '{html}' result += ' ' result += ' ' - result += f'''
''' - result += '
{html}' + background-color:{self.color}">""" + result += " {html}" return result diff --git a/forthic-py/src/forthic/modules/excel_module.py b/forthic-py/src/forthic/modules/excel_module.py index 91338bc..4b07b54 100644 --- a/forthic-py/src/forthic/modules/excel_module.py +++ b/forthic-py/src/forthic/modules/excel_module.py @@ -1,25 +1,27 @@ import base64 import json import oauthlib.oauth2.rfc6749.errors -from requests_oauthlib import OAuth2Session # type: ignore +from requests_oauthlib import OAuth2Session # type: ignore from ..module import Module from ..interfaces import IInterpreter -from ...utils.errors import ( - ExpiredMSGraphOAuthToken, - ExcelError -) +from ..utils.errors import ExpiredMSGraphOAuthToken, ExcelError from typing import List def raises_ExpiredMSGraphOAuthToken(fn): """Decorator that catches expiration errors and raises ExpiredMSGraphOAuthToken instead""" + def wrapper(*args, **kwargs): res = None try: res = fn(*args, **kwargs) - except (oauthlib.oauth2.rfc6749.errors.TokenExpiredError, oauthlib.oauth2.rfc6749.errors.InvalidGrantError): + except ( + oauthlib.oauth2.rfc6749.errors.TokenExpiredError, + oauthlib.oauth2.rfc6749.errors.InvalidGrantError, + ): raise ExpiredMSGraphOAuthToken() return res + return wrapper @@ -28,20 +30,21 @@ class ExcelModule(Module): See `docs/modules/excel_module.md` for detailed descriptions of each word. """ + def __init__(self, interp: IInterpreter): - super().__init__('excel', interp, EXCEL_FORTHIC) - self.context_stack: List['CredsContext'] = [] + super().__init__("excel", interp, EXCEL_FORTHIC) + self.context_stack: List["CredsContext"] = [] - self.add_module_word('PUSH-CONTEXT!', self.word_PUSH_CONTEXT_bang) - self.add_module_word('POP-CONTEXT!', self.word_POP_CONTEXT_bang) + self.add_module_word("PUSH-CONTEXT!", self.word_PUSH_CONTEXT_bang) + self.add_module_word("POP-CONTEXT!", self.word_POP_CONTEXT_bang) - self.add_module_word('WORKBOOK-INFO', self.word_WORKBOOK_INFO) - self.add_module_word('SHEET-NAMES', self.word_SHEET_NAMES) - self.add_module_word('TABLE-NAMES', self.word_TABLE_NAMES) + self.add_module_word("WORKBOOK-INFO", self.word_WORKBOOK_INFO) + self.add_module_word("SHEET-NAMES", self.word_SHEET_NAMES) + self.add_module_word("TABLE-NAMES", self.word_TABLE_NAMES) - self.add_module_word('TABLE-RECORDS', self.word_TABLE_RECORDS) - self.add_module_word('ADD-TABLE-ROWS', self.word_ADD_TABLE_ROWS) - self.add_module_word('UPDATE-RANGE', self.word_UPDATE_RANGE) + self.add_module_word("TABLE-RECORDS", self.word_TABLE_RECORDS) + self.add_module_word("ADD-TABLE-ROWS", self.word_ADD_TABLE_ROWS) + self.add_module_word("UPDATE-RANGE", self.word_UPDATE_RANGE) self.add_module_word("USED-RANGE", self.word_USED_RANGE) # ( creds_context -- ) @@ -61,21 +64,17 @@ def word_WORKBOOK_INFO(self, interp: IInterpreter): # See https://docs.microsoft.com/en-us/graph/api/shares-get?view=graph-rest-1.0&tabs=http def get_encoded_url() -> str: - encoded_url = base64.b64encode(shared_url.encode()).decode('utf-8') - res = 'u!' + encoded_url.strip('=').replace('/', '_').replace( - '+', '-' - ) + encoded_url = base64.b64encode(shared_url.encode()).decode("utf-8") + res = "u!" + encoded_url.strip("=").replace("/", "_").replace("+", "-") return res context = self.get_context() - api_url = ( - f'https://graph.microsoft.com/v1.0/shares/{get_encoded_url()}/root' - ) + api_url = f"https://graph.microsoft.com/v1.0/shares/{get_encoded_url()}/root" response = msgraph_session.get(api_url, proxies=context.get_proxies()) data = response.json() result = { - 'drive_id': data['parentReference']['driveId'], - 'item_id': data['id'], + "drive_id": data["parentReference"]["driveId"], + "item_id": data["id"], } interp.stack_push(result) @@ -83,27 +82,27 @@ def get_encoded_url() -> str: @raises_ExpiredMSGraphOAuthToken def word_SHEET_NAMES(self, interp: IInterpreter): workbook_info = interp.stack_pop() - drive_id = workbook_info['drive_id'] - item_id = workbook_info['item_id'] + drive_id = workbook_info["drive_id"] + item_id = workbook_info["item_id"] msgraph_session = self.get_msgraph_session() workbook_session_id = self.get_workbook_session_id( drive_id, item_id, msgraph_session ) - api_url = f'https://graph.microsoft.com/v1.0/drives/{drive_id}/items/{item_id}/workbook/worksheets' - headers = {'workbook-session-id': workbook_session_id} + api_url = f"https://graph.microsoft.com/v1.0/drives/{drive_id}/items/{item_id}/workbook/worksheets" + headers = {"workbook-session-id": workbook_session_id} context = self.get_context() response = msgraph_session.get( api_url, headers=headers, proxies=context.get_proxies() ) if response.status_code != 200: raise ExcelError( - f'Unable to get sheet names for {item_id}: {response.text}' + f"Unable to get sheet names for {item_id}: {response.text}" ) data = response.json() - result = [item['name'] for item in data['value']] + result = [item["name"] for item in data["value"]] interp.stack_push(result) # (workbook_info sheet_name -- names) @@ -111,27 +110,27 @@ def word_SHEET_NAMES(self, interp: IInterpreter): def word_TABLE_NAMES(self, interp: IInterpreter): sheet_name = interp.stack_pop() workbook_info = interp.stack_pop() - drive_id = workbook_info['drive_id'] - item_id = workbook_info['item_id'] + drive_id = workbook_info["drive_id"] + item_id = workbook_info["item_id"] msgraph_session = self.get_msgraph_session() workbook_session_id = self.get_workbook_session_id( drive_id, item_id, msgraph_session ) - api_url = f'https://graph.microsoft.com/v1.0/drives/{drive_id}/items/{item_id}/workbook/worksheets/{sheet_name}/tables' - headers = {'workbook-session-id': workbook_session_id} + api_url = f"https://graph.microsoft.com/v1.0/drives/{drive_id}/items/{item_id}/workbook/worksheets/{sheet_name}/tables" + headers = {"workbook-session-id": workbook_session_id} context = self.get_context() response = msgraph_session.get( api_url, headers=headers, proxies=context.get_proxies() ) if response.status_code != 200: raise ExcelError( - f'Unable to get table names for {item_id}/{sheet_name}: {response.text}' + f"Unable to get table names for {item_id}/{sheet_name}: {response.text}" ) data = response.json() - result = [item['name'] for item in data['value']] + result = [item["name"] for item in data["value"]] interp.stack_push(result) # (workbook_info sheet_name table_name -- records) @@ -140,8 +139,8 @@ def word_TABLE_RECORDS(self, interp: IInterpreter): table_name = interp.stack_pop() sheet_name = interp.stack_pop() workbook_info = interp.stack_pop() - drive_id = workbook_info['drive_id'] - item_id = workbook_info['item_id'] + drive_id = workbook_info["drive_id"] + item_id = workbook_info["item_id"] msgraph_session = self.get_msgraph_session() workbook_session_id = self.get_workbook_session_id( @@ -149,17 +148,17 @@ def word_TABLE_RECORDS(self, interp: IInterpreter): ) def get_table_columns(): - api_url = f'https://graph.microsoft.com/v1.0/drives/{drive_id}/items/{item_id}/workbook/worksheets/{sheet_name}/tables/{table_name}/columns' - headers = {'workbook-session-id': workbook_session_id} + api_url = f"https://graph.microsoft.com/v1.0/drives/{drive_id}/items/{item_id}/workbook/worksheets/{sheet_name}/tables/{table_name}/columns" + headers = {"workbook-session-id": workbook_session_id} context = self.get_context() response = msgraph_session.get( api_url, headers=headers, proxies=context.get_proxies() ) data = response.json() res = [] - for item in data['value']: + for item in data["value"]: col_vals = [] - for v in item['values']: + for v in item["values"]: col_vals.append(v[0]) res.append(col_vals) return res @@ -170,9 +169,7 @@ def columns_to_records(columns): # Set up result res = [] - num_records = ( - len(columns[0]) - 1 - ) # Don't count heading as a record + num_records = len(columns[0]) - 1 # Don't count heading as a record for _ in range(num_records): res.append({}) @@ -196,24 +193,24 @@ def word_ADD_TABLE_ROWS(self, interp: IInterpreter): table_name = interp.stack_pop() sheet_name = interp.stack_pop() workbook_info = interp.stack_pop() - drive_id = workbook_info['drive_id'] - item_id = workbook_info['item_id'] + drive_id = workbook_info["drive_id"] + item_id = workbook_info["item_id"] msgraph_session = self.get_msgraph_session() workbook_session_id = self.get_workbook_session_id( drive_id, item_id, msgraph_session ) - api_url = f'https://graph.microsoft.com/v1.0/drives/{drive_id}/items/{item_id}/workbook/worksheets/{sheet_name}/tables/{table_name}/rows' - headers = {'workbook-session-id': workbook_session_id} - data = {'values': rows} + api_url = f"https://graph.microsoft.com/v1.0/drives/{drive_id}/items/{item_id}/workbook/worksheets/{sheet_name}/tables/{table_name}/rows" + headers = {"workbook-session-id": workbook_session_id} + data = {"values": rows} context = self.get_context() response = msgraph_session.post( api_url, json=data, headers=headers, proxies=context.get_proxies() ) if response.status_code != 201: raise RuntimeError( - f'Unable to add table rows to {item_id}/{sheet_name}/{table_name}: {response.text}' + f"Unable to add table rows to {item_id}/{sheet_name}/{table_name}: {response.text}" ) # (workbook_info sheet_name range rows -- ) @@ -223,8 +220,8 @@ def word_UPDATE_RANGE(self, interp: IInterpreter): a1_range = interp.stack_pop() sheet_name = interp.stack_pop() workbook_info = interp.stack_pop() - drive_id = workbook_info['drive_id'] - item_id = workbook_info['item_id'] + drive_id = workbook_info["drive_id"] + item_id = workbook_info["item_id"] msgraph_session = self.get_msgraph_session() workbook_session_id = self.get_workbook_session_id( @@ -232,15 +229,15 @@ def word_UPDATE_RANGE(self, interp: IInterpreter): ) api_url = f"https://graph.microsoft.com/v1.0/drives/{drive_id}/items/{item_id}/workbook/worksheets/{sheet_name}/range(address='{a1_range}')" - headers = {'workbook-session-id': workbook_session_id} - data = {'values': rows} + headers = {"workbook-session-id": workbook_session_id} + data = {"values": rows} context = self.get_context() response = msgraph_session.patch( api_url, json=data, headers=headers, proxies=context.get_proxies() ) if response.status_code != 200: raise ExcelError( - f'Unable to update range {item_id}/{sheet_name}/{a1_range}: {response.text}' + f"Unable to update range {item_id}/{sheet_name}/{a1_range}: {response.text}" ) # (workbook_info sheet_name -- rows) @@ -248,22 +245,22 @@ def word_UPDATE_RANGE(self, interp: IInterpreter): def word_USED_RANGE(self, interp: IInterpreter): sheet_name = interp.stack_pop() workbook_info = interp.stack_pop() - drive_id = workbook_info['drive_id'] - item_id = workbook_info['item_id'] + drive_id = workbook_info["drive_id"] + item_id = workbook_info["item_id"] msgraph_session = self.get_msgraph_session() workbook_session_id = self.get_workbook_session_id( drive_id, item_id, msgraph_session ) api_url = f"https://graph.microsoft.com/v1.0/drives/{drive_id}/items/{item_id}/workbook/worksheets/{sheet_name}/usedRange" - headers = { - "workbook-session-id": workbook_session_id - } + headers = {"workbook-session-id": workbook_session_id} response = msgraph_session.get(api_url, headers=headers) if response.status_code != 200: - raise RuntimeError(f"Unable to get used range {item_id}/{sheet_name}: {response}") + raise RuntimeError( + f"Unable to get used range {item_id}/{sheet_name}: {response}" + ) data = response.json() - result = data.get('values') + result = data.get("values") interp.stack_push(result) # ================================= @@ -277,11 +274,9 @@ def get_msgraph_session(self) -> OAuth2Session: def token_updater(token): pass - refresh_url = ( - 'https://login.microsoftonline.com/common/oauth2/v2.0/token' - ) + refresh_url = "https://login.microsoftonline.com/common/oauth2/v2.0/token" result = OAuth2Session( - app_creds['client_id'], + app_creds["client_id"], token=token, auto_refresh_kwargs=app_creds, auto_refresh_url=refresh_url, @@ -289,17 +284,17 @@ def token_updater(token): ) return result - def get_context(self) -> 'CredsContext': + def get_context(self) -> "CredsContext": if not self.context_stack: - raise ExcelError( - 'Need to push an MS Graph context with PUSH-CONTEXT!' - ) + raise ExcelError("Need to push an MS Graph context with PUSH-CONTEXT!") result = self.context_stack[-1] return result - def get_workbook_session_id(self, drive_id: str, item_id: str, msgraph_session: OAuth2Session) -> str: - api_url = f'https://graph.microsoft.com/v1.0/drives/{drive_id}/items/{item_id}/workbook/createSession' - request_body = {'persistChanges': True} + def get_workbook_session_id( + self, drive_id: str, item_id: str, msgraph_session: OAuth2Session + ) -> str: + api_url = f"https://graph.microsoft.com/v1.0/drives/{drive_id}/items/{item_id}/workbook/createSession" + request_body = {"persistChanges": True} context = self.get_context() response = msgraph_session.post( api_url, @@ -308,9 +303,9 @@ def get_workbook_session_id(self, drive_id: str, item_id: str, msgraph_session: ) if response.status_code != 201: raise ExcelError( - f'Unable to get workbook session id for {item_id}: {response.text}' + f"Unable to get workbook session id for {item_id}: {response.text}" ) - result = response.json()['id'] + result = response.json()["id"] return result @@ -330,8 +325,8 @@ def get_auth_token(self): return None -EXCEL_FORTHIC = ''' +EXCEL_FORTHIC = """ : WORKBOOK-ID WORKBOOK-INFO 'item_id' REC@; # (shared_url -- workbook_id) ["WORKBOOK-ID"] EXPORT -''' +""" diff --git a/forthic-py/src/forthic/modules/gdoc_module.py b/forthic-py/src/forthic/modules/gdoc_module.py index 79fc54f..a3caa1d 100644 --- a/forthic-py/src/forthic/modules/gdoc_module.py +++ b/forthic-py/src/forthic/modules/gdoc_module.py @@ -1,29 +1,31 @@ import json -from requests_oauthlib import OAuth2Session # type: ignore +from requests_oauthlib import OAuth2Session # type: ignore import oauthlib.oauth2.rfc6749.errors from ..module import Module from ..interfaces import IInterpreter -from ...utils.errors import ( - GdocError, - ExpiredGdocOAuthToken -) +from ..utils.errors import GdocError, ExpiredGdocOAuthToken from typing import List, Any, Dict def raises_ExpiredGdocOAuthToken(fn): """Decorator that catches expiration errors and raises ExpiredGdocOAuthToken instead""" + def wrapper(*args, **kwargs): res = None try: res = fn(*args, **kwargs) - except (oauthlib.oauth2.rfc6749.errors.TokenExpiredError, oauthlib.oauth2.rfc6749.errors.InvalidGrantError): + except ( + oauthlib.oauth2.rfc6749.errors.TokenExpiredError, + oauthlib.oauth2.rfc6749.errors.InvalidGrantError, + ): raise ExpiredGdocOAuthToken() return res + return wrapper -FORTHIC = ''' -''' +FORTHIC = """ +""" # TODO: Need to rework this so it matches the gsheet module @@ -32,34 +34,37 @@ class GdocModule(Module): See `docs/modules/gdoc_module.md` for detailed descriptions of each word. """ + def __init__(self, interp: IInterpreter): - super().__init__('gdoc', interp, FORTHIC) - self.context_stack: List['CredsContext'] = [] + super().__init__("gdoc", interp, FORTHIC) + self.context_stack: List["CredsContext"] = [] - self.add_module_word('PUSH-CONTEXT!', self.word_PUSH_CONTEXT_bang) - self.add_module_word('POP-CONTEXT!', self.word_POP_CONTEXT_bang) + self.add_module_word("PUSH-CONTEXT!", self.word_PUSH_CONTEXT_bang) + self.add_module_word("POP-CONTEXT!", self.word_POP_CONTEXT_bang) - self.add_module_word('DOC', self.word_DOC) - self.add_module_word('NEW-DOC', self.word_NEW_DOC) - self.add_module_word('BATCH-UPDATE', self.word_BATCH_UPDATE) - self.add_module_word('INSERT', self.word_INSERT) + self.add_module_word("DOC", self.word_DOC) + self.add_module_word("NEW-DOC", self.word_NEW_DOC) + self.add_module_word("BATCH-UPDATE", self.word_BATCH_UPDATE) + self.add_module_word("INSERT", self.word_INSERT) - self.add_module_word('PT', self.word_PT) - self.add_module_word('COLOR', self.word_COLOR) + self.add_module_word("PT", self.word_PT) + self.add_module_word("COLOR", self.word_COLOR) # ----- Content - self.add_module_word('TABLE', self.word_TABLE) - self.add_module_word('TEXT', self.word_TEXT) - self.add_module_word('PAGE-BREAK', self.word_PAGE_BREAK) + self.add_module_word("TABLE", self.word_TABLE) + self.add_module_word("TEXT", self.word_TEXT) + self.add_module_word("PAGE-BREAK", self.word_PAGE_BREAK) # ----- Content manipulation - self.add_module_word('TEXT-CONCAT', self.word_TEXT_CONCAT) - self.add_module_word(' 'CredsContext': + def get_context(self) -> "CredsContext": if not self.context_stack: - raise GdocError( - 'Use gdoc.PUSH-CONTEXT! to provide a Google context' - ) + raise GdocError("Use gdoc.PUSH-CONTEXT! to provide a Google context") result = self.context_stack[-1] return result @@ -292,9 +301,9 @@ def get_gdoc_session(self) -> OAuth2Session: def token_updater(token): pass - refresh_url = 'https://oauth2.googleapis.com/token' + refresh_url = "https://oauth2.googleapis.com/token" result = OAuth2Session( - app_creds['client_id'], + app_creds["client_id"], token=token, auto_refresh_kwargs=app_creds, auto_refresh_url=refresh_url, @@ -311,6 +320,7 @@ class Content: interface is a union of all possible gdoc content methods and provides sensible defaults so all content objects can be used in all rendering situations. """ + def __init__(self): self.start_index = 0 self.end_index = 0 @@ -371,8 +381,8 @@ def get_insert_request(self) -> Dict[str, Any]: class Text(Content): - """This represents text that's being accumulated in a content array for a batch render - """ + """This represents text that's being accumulated in a content array for a batch render""" + def __init__(self, text): super().__init__() self.text = text @@ -383,15 +393,17 @@ def get_text(self) -> str: return self.text def update_start_index(self, index: int): - """Updates the start/end indexes of the content and style - """ + """Updates the start/end indexes of the content and style""" self.start_index = index self.end_index = index + len(self.text) + 1 # Add implicit newline cur_index = index # Update style requests def update_style(update_type: str, style: Dict[str, Any]): - num_chars = style[update_type]["range"]["endIndex"] - style[update_type]["range"]["startIndex"] + num_chars = ( + style[update_type]["range"]["endIndex"] + - style[update_type]["range"]["startIndex"] + ) style[update_type]["range"]["startIndex"] = cur_index style[update_type]["range"]["endIndex"] = cur_index + num_chars return @@ -408,7 +420,7 @@ def get_insert_request(self) -> Dict[str, Any]: result = { "insertText": { "text": self.text, - "location": {"segmentId": "", "index": self.start_index} + "location": {"segmentId": "", "index": self.start_index}, } } return result @@ -421,8 +433,8 @@ def add_text_style(self, style: Dict[str, Any]): "range": { "segmentId": "", "startIndex": self.start_index, - "endIndex": self.end_index - } + "endIndex": self.end_index, + }, } } self.style_requests.append(style_request) @@ -435,8 +447,8 @@ def add_paragraph_style(self, style: Dict[str, Any]): "range": { "segmentId": "", "startIndex": self.start_index, - "endIndex": self.end_index - } + "endIndex": self.end_index, + }, } } self.style_requests.append(style_request) @@ -446,8 +458,8 @@ def get_style_requests(self) -> List[Dict[str, Any]]: class ConcatText(Content): - """This represents an array of Text that's being concatenated - """ + """This represents an array of Text that's being concatenated""" + def __init__(self, text_items: List[Text]): super().__init__() self.text_items = text_items @@ -460,8 +472,7 @@ def get_text(self) -> str: return result def update_start_index(self, index: int): - """Updates the start/end indexes of the content and style - """ + """Updates the start/end indexes of the content and style""" text = self.get_text() self.start_index = index self.end_index = index + len(text) + 1 # Add implicit newline @@ -475,7 +486,7 @@ def get_insert_request(self) -> Dict[str, Any]: result = { "insertText": { "text": self.get_text(), - "location": {"segmentId": "", "index": self.start_index} + "location": {"segmentId": "", "index": self.start_index}, } } return result @@ -488,8 +499,8 @@ def get_style_requests(self) -> List[Dict[str, Any]]: class Table(Content): - """This represents a table to render - """ + """This represents a table to render""" + def __init__(self, table_rows: List[List[Content]]): super().__init__() self.table_rows = self.normalize_rows(table_rows) @@ -528,13 +539,15 @@ def update_start_index(self, index: int): # Update merge cells requests for m in self.merges: - m["mergeTableCells"]["tableRange"]["tableCellLocation"]["tableStartLocation"]["index"] = self.start_index + m["mergeTableCells"]["tableRange"]["tableCellLocation"][ + "tableStartLocation" + ]["index"] = self.start_index # Add indexes to table content self.table_rows_w_indexes = [] - cur_index = index + 1 # Advance index for rows container + cur_index = index + 1 # Advance index for rows container for r in self.table_rows: - cur_index += 1 # Advance index for row + cur_index += 1 # Advance index for row row_w_index = [] for c in r: cur_index += 1 # Advance index for start cell @@ -544,20 +557,25 @@ def update_start_index(self, index: int): self.table_rows_w_indexes.append(row_w_index) return - def add_table_style(self, style: Dict[str, Any], row: int, col: int, row_span: int, col_span: int): + def add_table_style( + self, style: Dict[str, Any], row: int, col: int, row_span: int, col_span: int + ): request = { "updateTableCellStyle": { "tableCellStyle": style, "fields": ",".join(style.keys()), "tableRange": { "tableCellLocation": { - "tableStartLocation": {"segmentId": "", "index": self.start_index}, + "tableStartLocation": { + "segmentId": "", + "index": self.start_index, + }, "rowIndex": row, - "columnIndex": col + "columnIndex": col, }, "rowSpan": row_span, - "columnSpan": col_span - } + "columnSpan": col_span, + }, } } self.table_styles.append(request) @@ -568,25 +586,21 @@ def add_full_table_style(self, style: Dict[str, Any]): "updateTableCellStyle": { "tableCellStyle": style, "fields": ",".join(style.keys()), - "tableStartLocation": { - "segmentId": "", - "index": self.start_index - } + "tableStartLocation": {"segmentId": "", "index": self.start_index}, } } self.table_styles.append(request) return - def add_column_properties(self, column_properties: Dict[str, Any], column_indices: List[int]): + def add_column_properties( + self, column_properties: Dict[str, Any], column_indices: List[int] + ): request = { "updateTableColumnProperties": { - "tableStartLocation": { - "segmentId": "", - "index": self.start_index - }, + "tableStartLocation": {"segmentId": "", "index": self.start_index}, "columnIndices": column_indices, "tableColumnProperties": column_properties, - "fields": ",".join(column_properties.keys()) + "fields": ",".join(column_properties.keys()), } } self.table_styles.append(request) @@ -597,12 +611,15 @@ def add_merge_cells(self, row: int, col: int, row_span: int, col_span: int): "mergeTableCells": { "tableRange": { "tableCellLocation": { - "tableStartLocation": {"segmentId": "", "index": self.start_index}, + "tableStartLocation": { + "segmentId": "", + "index": self.start_index, + }, "rowIndex": row, - "columnIndex": col + "columnIndex": col, }, "rowSpan": row_span, - "columnSpan": col_span + "columnSpan": col_span, } } } @@ -623,7 +640,9 @@ def get_style_update(style) -> Dict[str, Any]: for style in self.table_styles: style_update = get_style_update(style) if "tableRange" in style_update: - style_update["tableRange"]["tableCellLocation"]["tableStartLocation"]["index"] = self.start_index + style_update["tableRange"]["tableCellLocation"]["tableStartLocation"][ + "index" + ] = self.start_index else: style_update["tableStartLocation"]["index"] = self.start_index return self.table_styles @@ -632,8 +651,13 @@ def get_insert_request(self) -> Dict[str, Any]: result = { "insertTable": { "rows": len(self.table_rows), - "columns": len(self.table_rows[0]), # We've normalized table rows, so there will be a valid row - "location": {"segmentId": "", "index": self.start_index - 1} # Bring within paragraph + "columns": len( + self.table_rows[0] + ), # We've normalized table rows, so there will be a valid row + "location": { + "segmentId": "", + "index": self.start_index - 1, + }, # Bring within paragraph } } @@ -652,7 +676,9 @@ def get_insert_content_requests(self) -> List[Dict[str, Any]]: return result -def normalize_content_array(char_index: int, content_array: List[Content]) -> List[Content]: +def normalize_content_array( + char_index: int, content_array: List[Content] +) -> List[Content]: cur_index = char_index result: List[Content] = [] last_content = None diff --git a/forthic-py/src/forthic/modules/gsheet_module.py b/forthic-py/src/forthic/modules/gsheet_module.py index 4da2be3..25835a3 100644 --- a/forthic-py/src/forthic/modules/gsheet_module.py +++ b/forthic-py/src/forthic/modules/gsheet_module.py @@ -1,26 +1,28 @@ import re import json import urllib.parse -from requests_oauthlib import OAuth2Session # type: ignore +from requests_oauthlib import OAuth2Session # type: ignore import oauthlib.oauth2.rfc6749.errors from ..module import Module from ..interfaces import IInterpreter -from ...utils.errors import ( - GsheetError, - ExpiredGsheetOAuthToken -) +from ..utils.errors import GsheetError, ExpiredGsheetOAuthToken from typing import List, Any, Dict, Tuple def raises_ExpiredGsheetOAuthToken(fn): """Decorator that catches expiration errors and raises ExpiredGsheetOAuthToken instead""" + def wrapper(*args, **kwargs): res = None try: res = fn(*args, **kwargs) - except (oauthlib.oauth2.rfc6749.errors.TokenExpiredError, oauthlib.oauth2.rfc6749.errors.InvalidGrantError): + except ( + oauthlib.oauth2.rfc6749.errors.TokenExpiredError, + oauthlib.oauth2.rfc6749.errors.InvalidGrantError, + ): raise ExpiredGsheetOAuthToken() return res + return wrapper @@ -28,12 +30,11 @@ def wrapper(*args, **kwargs): class GsheetModule(Module): - """This implements access to gsheets via Google's [Sheets API](https://developers.google.com/sheets/api) - """ + """This implements access to gsheets via Google's [Sheets API](https://developers.google.com/sheets/api)""" def __init__(self, interp: IInterpreter): - super().__init__('gsheet', interp, FORTHIC) - self.context_stack: List['CredsContext'] = [] + super().__init__("gsheet", interp, FORTHIC) + self.context_stack: List["CredsContext"] = [] # These are set by "flag words" to change the behavior of the words in this module self.flags = { @@ -43,37 +44,36 @@ def __init__(self, interp: IInterpreter): "null_on_error": False, } - self.add_module_word('PUSH-CONTEXT!', self.word_PUSH_CONTEXT_bang) - self.add_module_word('POP-CONTEXT!', self.word_POP_CONTEXT_bang) + self.add_module_word("PUSH-CONTEXT!", self.word_PUSH_CONTEXT_bang) + self.add_module_word("POP-CONTEXT!", self.word_POP_CONTEXT_bang) - self.add_module_word('SPREADSHEET', self.word_SPREADSHEET) - self.add_module_word('TAB', self.word_TAB) - self.add_module_word('TAB@', self.word_TAB_at) - self.add_module_word('ENSURE-TAB!', self.word_ENSURE_TAB_bang) + self.add_module_word("SPREADSHEET", self.word_SPREADSHEET) + self.add_module_word("TAB", self.word_TAB) + self.add_module_word("TAB@", self.word_TAB_at) + self.add_module_word("ENSURE-TAB!", self.word_ENSURE_TAB_bang) - self.add_module_word('ROWS', self.word_ROWS) - self.add_module_word('ROWS!', self.word_ROWS_bang) + self.add_module_word("ROWS", self.word_ROWS) + self.add_module_word("ROWS!", self.word_ROWS_bang) - self.add_module_word('CLEAR!', self.word_CLEAR_bang) + self.add_module_word("CLEAR!", self.word_CLEAR_bang) - self.add_module_word('RECORDS', self.word_RECORDS) - self.add_module_word('RECORDS!', self.word_RECORDS_bang) - self.add_module_word('BATCH-UPDATE-TAB!', self.word_BATCH_UPDATE_TAB_bang) + self.add_module_word("RECORDS", self.word_RECORDS) + self.add_module_word("RECORDS!", self.word_RECORDS_bang) + self.add_module_word("BATCH-UPDATE-TAB!", self.word_BATCH_UPDATE_TAB_bang) # Flag words - self.add_module_word('!RANGE', self.word_bang_RANGE) - self.add_module_word('!TRANSPOSE', self.word_bang_TRANSPOSE) - self.add_module_word('!CELL-FORMAT', self.word_bang_CELL_FORMAT) - self.add_module_word('!NULL-ON-ERROR', self.word_bang_NULL_ON_ERROR) + self.add_module_word("!RANGE", self.word_bang_RANGE) + self.add_module_word("!TRANSPOSE", self.word_bang_TRANSPOSE) + self.add_module_word("!CELL-FORMAT", self.word_bang_CELL_FORMAT) + self.add_module_word("!NULL-ON-ERROR", self.word_bang_NULL_ON_ERROR) # Utils - self.add_module_word('INDEX>COL-NAME', self.word_INDEX_to_COL_NAME) - self.add_module_word('COL-NAME>INDEX', self.word_COL_NAME_to_INDEX) + self.add_module_word("INDEX>COL-NAME", self.word_INDEX_to_COL_NAME) + self.add_module_word("COL-NAME>INDEX", self.word_COL_NAME_to_INDEX) # ( creds_context -- ) def word_PUSH_CONTEXT_bang(self, interp: IInterpreter): - """Sets the credentials context used to make calls against the API - """ + """Sets the credentials context used to make calls against the API""" creds_context = interp.stack_pop() self.context_stack.append(creds_context) @@ -85,8 +85,7 @@ def word_POP_CONTEXT_bang(self, interp: IInterpreter): # ( Tab -- Spreadsheet ) @raises_ExpiredGsheetOAuthToken def word_SPREADSHEET(self, interp: IInterpreter): - """Creates a `Spreadsheet` object from a url or extracts the parent spreadsheet from a `Tab` object - """ + """Creates a `Spreadsheet` object from a url or extracts the parent spreadsheet from a `Tab` object""" arg = interp.stack_pop() context = self.get_context() @@ -103,8 +102,7 @@ def word_SPREADSHEET(self, interp: IInterpreter): # ( url -- Tab ) @raises_ExpiredGsheetOAuthToken def word_TAB(self, interp: IInterpreter): - """Creates a `Tab` object from a url - """ + """Creates a `Tab` object from a url""" url = interp.stack_pop() try: @@ -115,19 +113,16 @@ def word_TAB(self, interp: IInterpreter): interp.stack_push(result) except RuntimeError: flags = self.get_flags() - if flags.get('null_on_error'): + if flags.get("null_on_error"): interp.stack_push(None) else: raise - - # ( Spreadsheet id -- Tab ) # ( Spreadsheet name -- Tab ) @raises_ExpiredGsheetOAuthToken def word_TAB_at(self, interp: IInterpreter): - """Retrieves a `Tab` from a `Spreadsheet` using its id or name - """ + """Retrieves a `Tab` from a `Spreadsheet` using its id or name""" id_or_name = interp.stack_pop() spreadsheet = interp.stack_pop() @@ -136,12 +131,11 @@ def word_TAB_at(self, interp: IInterpreter): interp.stack_push(result) except RuntimeError: flags = self.get_flags() - if flags.get('null_on_error'): + if flags.get("null_on_error"): interp.stack_push(None) else: raise - # ( Tab -- rows ) @raises_ExpiredGsheetOAuthToken def word_ROWS(self, interp: IInterpreter): @@ -155,21 +149,25 @@ def word_ROWS(self, interp: IInterpreter): flags = self.get_flags() - if flags.get('range'): + if flags.get("range"): tab_range = f"{tab.get_name()}!{flags.get('range')}" else: tab_range = tab.get_name() try: - result = get_rows(tab.get_context(), tab.get_spreadsheet_id(), tab_range, flags.get('transpose')) + result = get_rows( + tab.get_context(), + tab.get_spreadsheet_id(), + tab_range, + flags.get("transpose"), + ) interp.stack_push(result) except RuntimeError: - if flags.get('null_on_error'): + if flags.get("null_on_error"): interp.stack_push(None) else: raise - # ( Tab rows -- ) @raises_ExpiredGsheetOAuthToken def word_ROWS_bang(self, interp: IInterpreter): @@ -188,15 +186,15 @@ def word_ROWS_bang(self, interp: IInterpreter): flags = self.get_flags() - if flags.get('range'): + if flags.get("range"): tab_range = f"{tab.get_name()}!{flags.get('range')}" else: tab_range = tab.get_name() - if flags.get('cell_format'): - write_cells(tab, tab_range, rows, flags.get('transpose')) + if flags.get("cell_format"): + write_cells(tab, tab_range, rows, flags.get("transpose")) else: - write_rows(tab, tab_range, rows, flags.get('transpose')) + write_rows(tab, tab_range, rows, flags.get("transpose")) # ( Tab header -- Records ) @raises_ExpiredGsheetOAuthToken @@ -217,7 +215,7 @@ def word_RECORDS(self, interp: IInterpreter): try: # Check flags flags = self.get_flags() - if flags.get('range'): + if flags.get("range"): tab_range = f"{tab.get_name()}!{flags.get('range')}" else: tab_range = tab.get_name() @@ -225,7 +223,7 @@ def word_RECORDS(self, interp: IInterpreter): rows = get_rows(tab.get_context(), tab.get_spreadsheet_id(), tab_range) def to_ascii(value: str) -> str: - res = ''.join([c for c in value if ord(c) < 128]).strip() + res = "".join([c for c in value if ord(c) < 128]).strip() return res def get_header_to_column(values: List[str]) -> Dict[str, int]: @@ -248,8 +246,8 @@ def find_header() -> Any: break if found_all: res = { - 'header_row': i, - 'header_to_column': header_to_column, + "header_row": i, + "header_to_column": header_to_column, } break return res @@ -263,17 +261,17 @@ def find_header() -> Any: def row_to_rec(row: List[str]) -> Dict[str, Any]: res = {} for h in header: - col = header_info['header_to_column'][h] + col = header_info["header_to_column"][h] res[h] = row[col] return res result = [] - for r in rows[header_info['header_row'] + 1:]: + for r in rows[header_info["header_row"] + 1 :]: result.append(row_to_rec(r)) interp.stack_push(result) except RuntimeError: - if flags.get('null_on_error'): + if flags.get("null_on_error"): interp.stack_push(None) else: raise @@ -291,7 +289,7 @@ def word_RECORDS_bang(self, interp: IInterpreter): tab = interp.stack_pop() # Peek at cell_format flag, but don't clear them since ROWS! will use them - use_cell_format = self.flags.get('cell_format') + use_cell_format = self.flags.get("cell_format") header_values = header default_value = "" @@ -327,16 +325,14 @@ def word_BATCH_UPDATE_TAB_bang(self, interp: IInterpreter): # ( Tab -- ) @raises_ExpiredGsheetOAuthToken def word_CLEAR_bang(self, interp: IInterpreter): - """Clears the contents of a `Tab` - """ + """Clears the contents of a `Tab`""" tab = interp.stack_pop() clear_tab(tab) # ( Spreadsheet tab_name -- Tab) @raises_ExpiredGsheetOAuthToken def word_ENSURE_TAB_bang(self, interp: IInterpreter): - """Ensures that the specified `Tab` exists in the gsheet and then returns it - """ + """Ensures that the specified `Tab` exists in the gsheet and then returns it""" tab_name = interp.stack_pop() spreadsheet = interp.stack_pop() result = ensure_tab(spreadsheet, tab_name) @@ -344,43 +340,37 @@ def word_ENSURE_TAB_bang(self, interp: IInterpreter): # ( index -- col_name ) def word_INDEX_to_COL_NAME(self, interp: IInterpreter): - """Converts an integer index to a character column name - """ + """Converts an integer index to a character column name""" index = interp.stack_pop() result = index_to_col_name(index) interp.stack_push(result) # ( col_name -- index ) def word_COL_NAME_to_INDEX(self, interp: IInterpreter): - """Converts a character column name to an index - """ + """Converts a character column name to an index""" col_name = interp.stack_pop() result = col_name_to_index(col_name) interp.stack_push(result) # ( range -- ) def word_bang_RANGE(self, interp: IInterpreter): - """Sets a spreadsheet `range` flag - """ + """Sets a spreadsheet `range` flag""" tab_range = interp.stack_pop() self.flags["range"] = tab_range # ( -- ) def word_bang_TRANSPOSE(self, interp: IInterpreter): - """Sets a `transpose` flag to treat data as columns instead of rows - """ + """Sets a `transpose` flag to treat data as columns instead of rows""" self.flags["transpose"] = True # ( -- ) def word_bang_CELL_FORMAT(self, interp: IInterpreter): - """Sets a `cell_format` flag to indicate that data is provided in "cell" format rather than as strings - """ + """Sets a `cell_format` flag to indicate that data is provided in "cell" format rather than as strings""" self.flags["cell_format"] = True # ( -- ) def word_bang_NULL_ON_ERROR(self, interp: IInterpreter): - """When TRUE, if a word were to return a result and an error occurs, return NULL instead - """ + """When TRUE, if a word were to return a result and an error occurs, return NULL instead""" self.flags["null_on_error"] = True # ================================= @@ -390,11 +380,9 @@ def get_flags(self): self.flags = {} return flags - def get_context(self) -> 'CredsContext': + def get_context(self) -> "CredsContext": if not self.context_stack: - raise GsheetError( - 'Use gsheet.PUSH-CONTEXT! to provide a Google context' - ) + raise GsheetError("Use gsheet.PUSH-CONTEXT! to provide a Google context") result = self.context_stack[-1] return result @@ -408,9 +396,9 @@ def get_gsheets_session(context) -> OAuth2Session: def token_updater(token): pass - refresh_url = 'https://oauth2.googleapis.com/token' + refresh_url = "https://oauth2.googleapis.com/token" result = OAuth2Session( - app_creds['client_id'], + app_creds["client_id"], token=token, auto_refresh_kwargs=app_creds, auto_refresh_url=refresh_url, @@ -420,13 +408,10 @@ def token_updater(token): def get_gsheet_id_and_tab_id(url: str) -> Tuple[str, str]: - """Parses a spreadsheet ID and tab ID from a gsheet URL - """ - match = re.match(r'.*docs\.google\.com.*\/d\/([^\/]+).*gid=(\d+)', url) + """Parses a spreadsheet ID and tab ID from a gsheet URL""" + match = re.match(r".*docs\.google\.com.*\/d\/([^\/]+).*gid=(\d+)", url) if not match: - raise GsheetError( - f'Unable to find gsheet_id and tab key from: {url}' - ) + raise GsheetError(f"Unable to find gsheet_id and tab key from: {url}") gsheet_id = match.group(1) tab_id = int(match.group(2)) return gsheet_id, tab_id @@ -435,7 +420,7 @@ def get_gsheet_id_and_tab_id(url: str) -> Tuple[str, str]: def get_sheet_info(context, gsheet_id: str) -> Any: gsheets_session = get_gsheets_session(context) response = gsheets_session.get( - f'https://sheets.googleapis.com/v4/spreadsheets/{gsheet_id}', + f"https://sheets.googleapis.com/v4/spreadsheets/{gsheet_id}", proxies=context.get_proxies(), ) if not response.ok: @@ -444,16 +429,18 @@ def get_sheet_info(context, gsheet_id: str) -> Any: return result -def get_rows(context, spreadsheet_id: str, spreadsheet_range: str, transpose: bool = False) -> List[List[str]]: +def get_rows( + context, spreadsheet_id: str, spreadsheet_range: str, transpose: bool = False +) -> List[List[str]]: spreadsheet_range_url_encoded = urllib.parse.quote_plus(spreadsheet_range) gsheets_session = get_gsheets_session(context) if transpose: - majorDimension = 'COLUMNS' + majorDimension = "COLUMNS" else: - majorDimension = 'ROWS' + majorDimension = "ROWS" - base = 'https://sheets.googleapis.com/v4/spreadsheets' + base = "https://sheets.googleapis.com/v4/spreadsheets" api_url = f"{base}/{spreadsheet_id}/values/{spreadsheet_range_url_encoded}?majorDimension={majorDimension}" response = gsheets_session.get(api_url, proxies=context.get_proxies()) if not response.ok: @@ -463,7 +450,7 @@ def get_rows(context, spreadsheet_id: str, spreadsheet_range: str, transpose: bo if "values" not in data: rows = [] else: - rows = data['values'] + rows = data["values"] # We add empty cells where needed to make all rows the same length def pad_rows(rows: List[List[str]]) -> List[List[str]]: @@ -477,7 +464,7 @@ def pad_rows(rows: List[List[str]]) -> List[List[str]]: padded_row = r if len(r) < max_length: for _ in range(max_length - len(r)): - padded_row.append('') + padded_row.append("") res.append(padded_row) return res @@ -485,7 +472,9 @@ def pad_rows(rows: List[List[str]]) -> List[List[str]]: return result -def write_rows(tab: "Tab", spreadsheet_range: str, rows: List[List[str]], transpose: bool = False): +def write_rows( + tab: "Tab", spreadsheet_range: str, rows: List[List[str]], transpose: bool = False +): context = tab.get_context() spreadsheet_id = tab.get_spreadsheet_id() @@ -495,35 +484,39 @@ def write_rows(tab: "Tab", spreadsheet_range: str, rows: List[List[str]], transp return if transpose: - majorDimension = 'COLUMNS' + majorDimension = "COLUMNS" else: - majorDimension = 'ROWS' + majorDimension = "ROWS" gsheets_session = get_gsheets_session(context) update_data = { - 'range': spreadsheet_range, - 'majorDimension': majorDimension, - 'values': rows, + "range": spreadsheet_range, + "majorDimension": majorDimension, + "values": rows, } - input_option = 'USER_ENTERED' - api_url = f'https://sheets.googleapis.com/v4/spreadsheets/{spreadsheet_id}/values/{spreadsheet_range_url_encoded}?valueInputOption={input_option}' + input_option = "USER_ENTERED" + api_url = f"https://sheets.googleapis.com/v4/spreadsheets/{spreadsheet_id}/values/{spreadsheet_range_url_encoded}?valueInputOption={input_option}" status = gsheets_session.put( api_url, data=json.dumps(update_data), proxies=context.get_proxies(), ) if not status.ok: - raise GsheetError(f'Problem writing to gsheet {spreadsheet_id} {spreadsheet_range}: {status.text}') + raise GsheetError( + f"Problem writing to gsheet {spreadsheet_id} {spreadsheet_range}: {status.text}" + ) -def write_cells(tab: "Tab", spreadsheet_range: str, rows: List[List[Any]], transpose: bool = False): +def write_cells( + tab: "Tab", spreadsheet_range: str, rows: List[List[Any]], transpose: bool = False +): spreadsheet_id = tab.get_spreadsheet_id() content_rows = [] for r in rows: content_row = [] for cell in r: - content_row.append(cell.get('content')) + content_row.append(cell.get("content")) content_rows.append(content_row) # Write content @@ -541,7 +534,7 @@ def get_start_row_col(): else: range_pieces = pieces[1].split(":") range_start = range_pieces[0] - match = re.match(r'([A-Z]+)(\d+)', range_start) + match = re.match(r"([A-Z]+)(\d+)", range_start) column_name = match.group(1) row = int(match.group(2)) @@ -558,9 +551,7 @@ def get_update_request_row(row): values = [] for cell in row: values.append(cell.get("updateRequest") or {}) - result = { - "values": values - } + result = {"values": values} return result def transpose_rows(rows): @@ -603,17 +594,19 @@ def get_fields(): if not fields: return - update_requests = [{ - "updateCells": { - "range": { - "sheetId": spreadsheet_id, - "startRowIndex": startRowIndex, - "startColumnIndex": startColumnIndex, - }, - "rows": update_request_rows, - "fields": ",".join(fields) + update_requests = [ + { + "updateCells": { + "range": { + "sheetId": spreadsheet_id, + "startRowIndex": startRowIndex, + "startColumnIndex": startColumnIndex, + }, + "rows": update_request_rows, + "fields": ",".join(fields), + } } - }] + ] batch_update_tab(tab, update_requests) @@ -625,23 +618,27 @@ def clear_tab(tab: "Tab"): gsheets_session = get_gsheets_session(context) update_data = { - 'requests': [ + "requests": [ { - 'updateCells': { - 'range': {'sheetId': tab_id}, - 'fields': 'userEnteredValue', + "updateCells": { + "range": {"sheetId": tab_id}, + "fields": "userEnteredValue", } }, ] } - api_url = f'https://sheets.googleapis.com/v4/spreadsheets/{spreadsheet_id}:batchUpdate' + api_url = ( + f"https://sheets.googleapis.com/v4/spreadsheets/{spreadsheet_id}:batchUpdate" + ) status = gsheets_session.post( api_url, data=json.dumps(update_data), proxies=context.get_proxies(), ) if not status.ok: - raise GsheetError(f'Problem clearing gsheet {spreadsheet_id} {tab.get_name()}: {status.text}') + raise GsheetError( + f"Problem clearing gsheet {spreadsheet_id} {tab.get_name()}: {status.text}" + ) def ensure_tab(spreadsheet: "Spreadsheet", tab_name: str) -> "Tab": @@ -652,25 +649,23 @@ def ensure_tab(spreadsheet: "Spreadsheet", tab_name: str) -> "Tab": context = spreadsheet.get_context() gsheets_session = get_gsheets_session(context) update_data = { - 'requests': [ - { - 'addSheet': { - 'properties': { - 'title': tab_name - } - } - }, + "requests": [ + {"addSheet": {"properties": {"title": tab_name}}}, ] } spreadsheet_id = spreadsheet.get_spreadsheet_id() - api_url = f'https://sheets.googleapis.com/v4/spreadsheets/{spreadsheet_id}:batchUpdate' + api_url = ( + f"https://sheets.googleapis.com/v4/spreadsheets/{spreadsheet_id}:batchUpdate" + ) status = gsheets_session.post( api_url, data=json.dumps(update_data), proxies=context.get_proxies(), ) if not status.ok: - raise GsheetError(f'Problem adding sheet to gsheet {spreadsheet_id}: {status.text}') + raise GsheetError( + f"Problem adding sheet to gsheet {spreadsheet_id}: {status.text}" + ) # Update spreadsheet updated_spreadsheet = Spreadsheet(context, spreadsheet.get_url()) @@ -688,38 +683,40 @@ def batch_update_tab(tab: "Tab", update_requests): def add_sheet_id(update_requests): for r in update_requests: for v in r.values(): - if 'range' in v: - v['range']['sheetId'] = tab_id + if "range" in v: + v["range"]["sheetId"] = tab_id return add_sheet_id(update_requests) - data = { - 'requests': update_requests - } + data = {"requests": update_requests} - api_url = f'https://sheets.googleapis.com/v4/spreadsheets/{spreadsheet_id}:batchUpdate' + api_url = ( + f"https://sheets.googleapis.com/v4/spreadsheets/{spreadsheet_id}:batchUpdate" + ) status = gsheets_session.post( api_url, data=json.dumps(data), proxies=context.get_proxies(), ) if not status.ok: - raise GsheetError(f'Problem running batch_update_tab {spreadsheet_id} {tab.get_name()}: {status.text}') + raise GsheetError( + f"Problem running batch_update_tab {spreadsheet_id} {tab.get_name()}: {status.text}" + ) def index_to_col_name(zero_based_index: int) -> str: if zero_based_index < 0: - raise GsheetError(f'Index ({zero_based_index}) must be >= 0') + raise GsheetError(f"Index ({zero_based_index}) must be >= 0") one_based_index = zero_based_index + 1 def rightmost_digit(num): modulo = num % 26 if modulo == 0: - res = 'Z' + res = "Z" else: offset = modulo - 1 - res = chr(ord('A') + offset) + res = chr(ord("A") + offset) return res def downshift(num): @@ -731,26 +728,26 @@ def downshift(num): digits.append(rightmost_digit(one_based_index)) one_based_index = downshift(one_based_index) digits.reverse() - result = ''.join(digits) + result = "".join(digits) return result def col_name_to_index(col_name: str) -> int: col_name = col_name.upper().strip() - if not re.match('^[A-Z]+$', col_name): - raise GsheetError(f'Column name ({col_name}) must be all letters') + if not re.match("^[A-Z]+$", col_name): + raise GsheetError(f"Column name ({col_name}) must be all letters") def char_to_val(c): - res = ord(c) - ord('A') + 1 + res = ord(c) - ord("A") + 1 return res reversed_col_name = col_name[::-1] result = 0 for i in range(len(reversed_col_name)): char = reversed_col_name[i] - result += char_to_val(char) * (26 ** i) + result += char_to_val(char) * (26**i) - result = result - 1 # Convert to 0-based index + result = result - 1 # Convert to 0-based index return result @@ -798,20 +795,20 @@ def get_spreadsheet_id(self): return self.spreadsheet_id def has_tab(self, id_or_name): - sheets = self.sheet_info['sheets'] + sheets = self.sheet_info["sheets"] for s in sheets: - properties = s['properties'] - if properties['sheetId'] == id_or_name or properties['title'] == id_or_name: + properties = s["properties"] + if properties["sheetId"] == id_or_name or properties["title"] == id_or_name: return True return False def get_tab(self, id_or_name): - sheets = self.sheet_info['sheets'] + sheets = self.sheet_info["sheets"] tab_properties = None for s in sheets: - properties = s['properties'] - if properties['sheetId'] == id_or_name or properties['title'] == id_or_name: + properties = s["properties"] + if properties["sheetId"] == id_or_name or properties["title"] == id_or_name: tab_properties = properties break @@ -838,7 +835,7 @@ def get_spreadsheet_id(self): return self.spreadsheet.spreadsheet_id def get_id(self): - return self.tab_properties['sheetId'] + return self.tab_properties["sheetId"] def get_name(self): - return self.tab_properties['title'] + return self.tab_properties["title"] diff --git a/forthic-py/src/forthic/modules/html_module.py b/forthic-py/src/forthic/modules/html_module.py index 4876f3a..18e5859 100644 --- a/forthic-py/src/forthic/modules/html_module.py +++ b/forthic-py/src/forthic/modules/html_module.py @@ -4,14 +4,11 @@ from ..module import Module import random from ..interfaces import IInterpreter -from ...utils.errors import ( - HtmlModuleError, - InvalidForthicWordError -) +from ..utils.errors import HtmlModuleError, InvalidForthicWordError from typing import List, Dict, Optional -ASYNC_BUTTON_KEY = '_async_forthic_button_state' +ASYNC_BUTTON_KEY = "_async_forthic_button_state" class HtmlModule(Module): @@ -21,35 +18,36 @@ class HtmlModule(Module): See `docs/modules/html_module.md` for detailed descriptions of each word. """ + def __init__(self, interp: IInterpreter): - super().__init__('html', interp, HTML_FORTHIC) - self.add_module_word('ELEMENT', self.word_ELEMENT) - self.add_module_word('RAW-HTML', self.word_RAW_HTML) - self.add_module_word('HTML', self.word_MARKDOWN_to_HTML) - self.add_module_word('RENDER', self.word_RENDER) - self.add_module_word('JS-PATH!', self.word_JS_PATH_bang) - self.add_module_word('RUN-FORTHIC.JS', self.word_RUN_FORTHIC_JS) - self.add_module_word('FORTHIC-BUTTON', self.word_FORTHIC_BUTTON) + self.add_module_word("MARKDOWN>HTML", self.word_MARKDOWN_to_HTML) + self.add_module_word("RENDER", self.word_RENDER) + self.add_module_word("JS-PATH!", self.word_JS_PATH_bang) + self.add_module_word("RUN-FORTHIC.JS", self.word_RUN_FORTHIC_JS) + self.add_module_word("FORTHIC-BUTTON", self.word_FORTHIC_BUTTON) - self.add_module_word('ASYNC-FORTHIC-BUTTON', self.word_ASYNC_FORTHIC_BUTTON) - self.add_module_word('RUN-ASYNC-BUTTON', self.word_RUN_ASYNC_BUTTON) + self.add_module_word("ASYNC-FORTHIC-BUTTON", self.word_ASYNC_FORTHIC_BUTTON) + self.add_module_word("RUN-ASYNC-BUTTON", self.word_RUN_ASYNC_BUTTON) - self.js_path = '/static/js/forthic/v2/' + self.js_path = "/static/js/forthic/v2/" # ( type -- element ) def word_ELEMENT(self, interp: IInterpreter): @@ -191,7 +189,7 @@ def word_RENDER(self, interp: IInterpreter): else: elements = [element] - result = '' + result = "" for e in elements: result += e.render() interp.stack_push(result) @@ -208,17 +206,17 @@ def word_RUN_FORTHIC_JS(self, interp: IInterpreter): and runs a forthic string """ forthic = interp.stack_pop() - result = Element('script') - result.setAttribute('type', 'module') + result = Element("script") + result.setAttribute("type", "module") random_str = random.uniform(0, 1) result.setInnerHTML( - f''' + f""" import {{ Interpreter }} from "{self.js_path}/interpreter.mjs?version={random_str}"; let interp = new Interpreter(); interp.run(`{forthic}`) .then(() => {{ window.FORTHIC_INTERP = interp - }})''' + }})""" ) interp.stack_push(result) @@ -261,22 +259,22 @@ def is_running(): state_info = button_states.get(button_id) if not state_info: state_info = {} - state = state_info.get('state') - res = state == 'RUNNING' + state = state_info.get("state") + res = state == "RUNNING" return res if is_running(): return try: - store_button_states(button_id, {'state': 'RUNNING'}) + store_button_states(button_id, {"state": "RUNNING"}) interp.run(forthic) - store_button_states(button_id, {'state': ''}) + store_button_states(button_id, {"state": ""}) except Exception as e: - store_button_states(button_id, {'state': 'ERROR', 'message': str(e)}) + store_button_states(button_id, {"state": "ERROR", "message": str(e)}) -HTML_FORTHIC = ''' +HTML_FORTHIC = """ : COMMON-TYPES ["H1" "H2" "H3" "H4" "H5" "H6" "P" "UL" "OL" "LI" "A" "SPAN" @@ -294,27 +292,27 @@ def is_running(): COMMON-TYPES EXPORT ["SVG"] EXPORT -''' +""" VOID_ELEMENTS = [ - 'area', - 'base', - 'br', - 'col', - 'embed', - 'hr', - 'img', - 'input', - 'link', - 'meta', - 'param', - 'source', - 'track', - 'wbr', + "area", + "base", + "br", + "col", + "embed", + "hr", + "img", + "input", + "link", + "meta", + "param", + "source", + "track", + "wbr", ] -VALID_POSITIONS = ['beforebegin', 'afterbegin', 'beforeend', 'afterend'] +VALID_POSITIONS = ["beforebegin", "afterbegin", "beforeend", "afterend"] class Element: @@ -322,14 +320,14 @@ def __init__(self, elem_type: str): self.tagName = elem_type.upper() self.childNodes: List[Element] = [] self.attributes: Dict[str, str] = {} - self.beforeBegin: str = '' - self.afterEnd: str = '' + self.beforeBegin: str = "" + self.afterEnd: str = "" self.innerHTML: Optional[str] = None - def appendChild(self, item: 'Element'): + def appendChild(self, item: "Element"): self.childNodes.append(item) - def getChildNodes(self) -> List['Element']: + def getChildNodes(self) -> List["Element"]: return self.childNodes def setInnerHTML(self, string: str): @@ -343,28 +341,28 @@ def getInnerHTML(self) -> str: if self.innerHTML is not None: return self.innerHTML - result = '' + result = "" for child in self.childNodes: result += child.render() return result def insertAdjacentHTML(self, position: str, string: str): - if position == 'beforebegin': + if position == "beforebegin": self.beforeBegin += string - elif position == 'afterbegin': + elif position == "afterbegin": raw_items: List[Element] = [RawHtml(string)] self.childNodes = raw_items + self.childNodes - elif position == 'beforeend': + elif position == "beforeend": self.childNodes.append(RawHtml(string)) - elif position == 'afterend': + elif position == "afterend": self.afterEnd += string else: - raise HtmlModuleError(f'Unhandled position: {position}') + raise HtmlModuleError(f"Unhandled position: {position}") def getAttribute(self, key: str) -> str: result = self.attributes.get(key) if result is None: - result = '' + result = "" return result def setAttribute(self, key, val: Optional[str] = None): @@ -381,15 +379,15 @@ def addClasses(self, classes: List[str]): self.setClasses(element_classes) def getClasses(self) -> List[str]: - class_string = self.attributes.get('class') + class_string = self.attributes.get("class") if not class_string: return [] - result = class_string.strip().split(' ') + result = class_string.strip().split(" ") return result def setClasses(self, classes: List[str]): - class_string = ' '.join(classes) - self.attributes['class'] = class_string + class_string = " ".join(classes) + self.attributes["class"] = class_string def removeClasses(self, classes: List[str]): element_classes = self.getClasses() @@ -408,21 +406,21 @@ def get_attr_string() -> str: if self.attributes[key] is None: fragment = key fragments.append(fragment) - res = ' '.join(fragments) - if res != '': - res = ' ' + res + res = " ".join(fragments) + if res != "": + res = " " + res return res tag = self.tagName.lower() attributes = get_attr_string() if tag in VOID_ELEMENTS: - result = f'<{tag}{attributes}>' + result = f"<{tag}{attributes}>" else: result = self.beforeBegin - result += f'<{tag}{attributes}>' + result += f"<{tag}{attributes}>" result += self.getInnerHTML() - result += f'' + result += f"" result += self.afterEnd return result @@ -442,9 +440,9 @@ def __init__(self, interp: IInterpreter, html_id: str, label: str, forthic: str) self.forthic = forthic self.options = { - 'reload_page': False, - 'post_data_ids': None, - 'confirmable': False, + "reload_page": False, + "post_data_ids": None, + "confirmable": False, } def __getitem__(self, key: str) -> Optional[bool]: @@ -458,49 +456,47 @@ def __setitem__(self, key: str, value: Optional[bool]): def render(self) -> str: def get_done_code() -> str: - if self.options['reload_page']: - res = ''' + if self.options["reload_page"]: + res = """ window.location.reload(true); - ''' + """ else: - res = ''' + res = """ $('#{html_id}').prop("disabled", false); alert("Done!"); - '''.format( + """.format( html_id=self.html_id ) return res def get_confirm_code() -> str: - res = 'true' - if self.options['confirmable']: + res = "true" + if self.options["confirmable"]: res = 'confirm("Are you sure?")' return res def make_func_gather_data() -> str: - res = 'function gather_data() {\n' - res += ' var fields = %s\n;' % json.dumps( - self.options['post_data_ids'] - ) - res += ' var res = {};\n' + res = "function gather_data() {\n" + res += " var fields = %s\n;" % json.dumps(self.options["post_data_ids"]) + res += " var res = {};\n" res += " fields.forEach(f => res[f] = $('#' + f).val());\n" - res += ' return res;\n' - res += '}\n' + res += " return res;\n" + res += "}\n" return res def make_func_prepend_data() -> str: - res = 'function prepend_data(forthic) {\n' - if self.options['post_data_ids']: + res = "function prepend_data(forthic) {\n" + if self.options["post_data_ids"]: res += make_func_gather_data() - res += 'var data = gather_data();\n' + res += "var data = gather_data();\n" res += "var res = `'${JSON.stringify(data)}' ${forthic}`;\n" else: - res += 'var res = forthic;\n' - res += ' return res;\n' - res += '}\n' + res += "var res = forthic;\n" + res += " return res;\n" + res += "}\n" return res - result = ''' + result = """ - '''.format( + """.format( html_id=self.html_id, label=self.label, forthic=self.forthic, @@ -551,13 +547,13 @@ def __init__(self, interp: IInterpreter, html_id: str, label: str, forthic: str) self.interp = interp # Ensure that `forthic` is just a Forthic word - if ' ' in forthic or "'" in forthic or '"' in forthic: + if " " in forthic or "'" in forthic or '"' in forthic: raise InvalidForthicWordError(forthic) self.options = { - 'reload_page': False, - 'post_data_ids': None, - 'confirmable': False, + "reload_page": False, + "post_data_ids": None, + "confirmable": False, } def __getitem__(self, key: str) -> Optional[bool]: @@ -581,51 +577,49 @@ def get_async_state(self) -> Dict[str, str]: def render(self) -> str: def get_done_code() -> str: - if self.options['reload_page']: - res = ''' + if self.options["reload_page"]: + res = """ window.location.reload(true); - ''' + """ else: - res = ''' + res = """ $('#{html_id}').prop("disabled", false); alert("Done!"); - '''.format( + """.format( html_id=self.html_id ) return res def get_confirm_code() -> str: - res = 'true' - if self.options['confirmable']: + res = "true" + if self.options["confirmable"]: res = 'confirm("Are you sure?")' return res def make_func_gather_data() -> str: - res = 'function gather_data() {\n' - res += ' var fields = %s\n;' % json.dumps( - self.options['post_data_ids'] - ) - res += ' var res = {};\n' + res = "function gather_data() {\n" + res += " var fields = %s\n;" % json.dumps(self.options["post_data_ids"]) + res += " var res = {};\n" res += " fields.forEach(f => res[f] = $('#' + f).val());\n" - res += ' return res;\n' - res += '}\n' + res += " return res;\n" + res += "}\n" return res def make_func_prepend_data() -> str: - res = 'function prepend_data(forthic) {\n' - if self.options['post_data_ids']: + res = "function prepend_data(forthic) {\n" + if self.options["post_data_ids"]: res += make_func_gather_data() - res += 'var data = gather_data();\n' + res += "var data = gather_data();\n" res += "var res = `'${JSON.stringify(data)}' ${forthic}`;\n" else: - res += 'var res = forthic;\n' - res += ' return res;\n' - res += '}\n' + res += "var res = forthic;\n" + res += " return res;\n" + res += "}\n" return res async_state = self.get_async_state() - result = f''' + result = f"""