diff --git a/.gitignore b/.gitignore index 56ac861..63e00fa 100644 --- a/.gitignore +++ b/.gitignore @@ -2,7 +2,7 @@ __pycache__/ .idea/ *.DS_Store *.json -schedule.html +db/html/ frontend/static/README.md # pytest diff --git a/Pipfile b/Pipfile index 4977ced..1e5b2c2 100644 --- a/Pipfile +++ b/Pipfile @@ -16,6 +16,8 @@ maya = "*" pytest = "*" flask = "*" pylint = "*" +selenium = "*" +colorama = "*" [dev-packages] diff --git a/Pipfile.lock b/Pipfile.lock index ae649ba..97f0feb 100644 --- a/Pipfile.lock +++ b/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "aebd83c7173a3d0dcc9013e1c610a33d4ffc785fe275cabb5825e246f33fbb93" + "sha256": "4811f7bec1a856e27bc2c2fff11d68d045e2c5eaa2d2a0c04e1e3bfc7125ea58" }, "host-environment-markers": { "implementation_name": "cpython", @@ -31,10 +31,10 @@ "default": { "astroid": { "hashes": [ - "sha256:032f6e09161e96f417ea7fad46d3fac7a9019c775f202182c22df0e4f714cb1c", - "sha256:dea42ae6e0b789b543f728ddae7ddb6740ba33a49fb52c4a4d9cb7bb4aa6ec09" + "sha256:0ef2bf9f07c3150929b25e8e61b5198c27b0dca195e156f0e4d5bdd89185ca1a", + "sha256:fc9b582dba0366e63540982c3944a9230cbc6f303641c51483fa547dcc22393a" ], - "version": "==1.6.4" + "version": "==1.6.5" }, "atomicwrites": { "hashes": [ @@ -79,6 +79,13 @@ ], "version": "==6.7" }, + "colorama": { + "hashes": [ + "sha256:463f8483208e921368c9f306094eb6f725c6ca42b0f97e313cb5d5512459feda", + "sha256:48eb22f4f8461b1df5734a074b57042430fb06e1d61bd1e11b078c0fe6d7a1f1" + ], + "version": "==0.3.9" + }, "dateparser": { "hashes": [ "sha256:b452ef8b36cd78ae86a50721794bc674aa3994e19b570f7ba92810f4e0a2ae03", @@ -235,17 +242,17 @@ }, "pylint": { "hashes": [ - "sha256:aa519865f8890a5905fa34924fed0f3bfc7d84fc9f9142c16dac52ffecd25a39", - "sha256:c353d8225195b37cc3aef18248b8f3fe94c5a6a95affaf885ae21a24ca31d8eb" + "sha256:a48070545c12430cfc4e865bf62f5ad367784765681b3db442d8230f0960aa3c", + "sha256:fff220bcb996b4f7e2b0f6812fd81507b72ca4d8c4d05daf2655c333800cb9b3" ], - "version": "==1.9.1" + "version": "==1.9.2" }, "pytest": { "hashes": [ - "sha256:c76e93f3145a44812955e8d46cdd302d8a45fbfc7bf22be24fe231f9d8d8853a", - "sha256:39555d023af3200d004d09e51b4dd9fdd828baa863cded3fd6ba2f29f757ae2d" + "sha256:26838b2bc58620e01675485491504c3aa7ee0faf335c37fcd5f8731ca4319591", + "sha256:32c49a69566aa7c333188149ad48b58ac11a426d5352ea3d8f6ce843f88199cb" ], - "version": "==3.6.0" + "version": "==3.6.1" }, "python-dateutil": { "hashes": [ @@ -270,23 +277,23 @@ }, "regex": { "hashes": [ - "sha256:333687d9a44738c486735955993f83bd22061a416c48f5a5f9e765e90cf1b0c9", - "sha256:361a1fd703a35580a4714ec28d85e29780081a4c399a99bbfb2aee695d72aedb", - "sha256:f69d1201a4750f763971ea8364ed95ee888fc128968b39d38883a72a4d005895", - "sha256:a50532f61b23d4ab9d216a6214f359dd05c911c1a1ad20986b6738a782926c1a", - "sha256:1b428a296531ea1642a7da48562746309c5c06471a97bd0c02dd6a82e9cecee8", - "sha256:5b9c0ddd5b4afa08c9074170a2ea9b34ea296e32aeea522faaaaeeeb2fe0af2e", - "sha256:27d72bb42dffb32516c28d218bb054ce128afd3e18464f30837166346758af67", - "sha256:32cf4743debee9ea12d3626ee21eae83052763740e04086304e7a74778bf58c9", - "sha256:35eeccf17af3b017a54d754e160af597036435c58eceae60f1dd1364ae1250c7", - "sha256:be42a601aaaeb7a317f818490a39d153952a97c40c6e9beeb2a1103616405348", - "sha256:eee4d94b1a626490fc8170ffd788883f8c641b576e11ba9b4a29c9f6623371e0", - "sha256:32f6408dbca35040bc65f9f4ae1444d5546411fde989cb71443a182dd643305e", - "sha256:a9243d7b359b72c681a2c32eaa7ace8d346b7e8ce09d172a683acf6853161d9c", - "sha256:494bed6396a20d3aa6376bdf2d3fbb1005b8f4339558d8ac7b53256755f80303", - "sha256:b44624a38d07d3c954c84ad302c29f7930f4bf01443beef5589e9157b14e2a29" - ], - "version": "==2018.2.21" + "sha256:60ff6be94b168ee7f6f5a8b334503f3d3eda21b2aa9cf3909736bc600ed9455d", + "sha256:92ef64d4afe7e709b57b1ca38a41ef0df54f03b4418f1c0e1b2edb52f671eec8", + "sha256:9ee46c7cb5306c96ae9dad07f608f5a474f47c9505fe046d32df6bcb5e6c18ba", + "sha256:3cc8106f31467d9b7a7ea6d0db95cb7a4097e3683e686c89cc14d3a81f66e637", + "sha256:8ad161a52107e6e4bd56f1ee299b1dc492873b8abbfcf639fea4765d96853e32", + "sha256:3326619c3716dbbfe5b2a3e4a109b0bbb6476a35398612539788b15663e0f0d3", + "sha256:f87f51647eeff0f7a1e787b2a8b56d059cfa3ea28f2d825b50a66a172574c6f0", + "sha256:712922a779b153290e3007f4bbdb0af459c36c70f00c6690acd0a86f2f3f52b0", + "sha256:7638a3babd94f947e639c45c0b13cee62caea31ad6fedce392bd3edacf412c5f", + "sha256:3b95120ffcbeb44eb3362456ec887c72190726a2a3e270f1c7343266941826d4", + "sha256:45fa86b2e6bf8b1f5b60820d2d9520d42f32497d4bf8903ed0b86285b29d3aa9", + "sha256:bfb99e3bdf1ff372c8876f217b00fe44dd08f3f53ab590df6fa93b3b72d9dfb6", + "sha256:99665c4ca23f9b09618b38afd3c11d0dd6424d0e2d4374afd4c3fc319236552b", + "sha256:ab174253361da55a8425f60bbe319fb32083b295507bace5513834bc3723fcd1", + "sha256:afeb71482e4f7c18ad94802c6c8fbabf2585d3804ca45a8c9db046c120a44a51" + ], + "version": "==2018.6.6" }, "requests": { "hashes": [ @@ -295,6 +302,13 @@ ], "version": "==2.18.4" }, + "selenium": { + "hashes": [ + "sha256:1372101ad23798462038481f92ba1c7fab8385c788b05da6b44318f10ea52422", + "sha256:b8a2630fd858636c894960726ca3c94d8277e516ea3a9d81614fb819a5844764" + ], + "version": "==3.12.0" + }, "six": { "hashes": [ "sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb", diff --git a/scrape_advanced.py b/scrape_advanced.py new file mode 100644 index 0000000..5b93eea --- /dev/null +++ b/scrape_advanced.py @@ -0,0 +1,307 @@ +import sys +from os import makedirs, rename, remove +from os.path import join, exists +from collections import defaultdict +from itertools import product +import re + +# 3rd party +import requests +from bs4 import BeautifulSoup +from tinydb import TinyDB +from colorama import init, Fore, Style +from selenium_login import scrape_cookies, kill_driver +from selenium.common.exceptions import TimeoutException + +from settings import DB_DIR + +CAMPUS_RANGE = (1, 2) +YEAR_RANGE = (1, 8) +QUARTER_RANGE = (1, 4) + +DEBUG = True + +PREFIXES = ('old', 'debug') + +ADVANCED_FORM_DATA = [ + [ + [('sel_subj', 'dummy'), ('sel_day', 'dummy'), ('sel_schd', 'dummy'), + ('sel_insm', 'dummy'), ('sel_camp', 'dummy'), ('sel_levl', 'dummy'), + ('sel_sess', 'dummy'), ('sel_instr', 'dummy'), ('sel_ptrm', 'dummy'), + ('sel_attr', 'dummy')], + [('sel_crse', ''), ('sel_title', ''), ('sel_from_cred', ''), + ('sel_to_cred', ''), ('sel_camp', '%'), ('sel_sess', '%'), + ('sel_instr', '%'), ('sel_ptrm', '%'), ('begin_hh', '0'), + ('begin_mi', '0'), ('begin_ap', 'a'), ('end_hh', '0'), ('end_mi', '0'), + ('end_ap', 'a'), ('SUB_BTN', 'Section Search'), ('path', '1')] + ], + [ + [('sel_subj', 'dummy'), ('sel_day', 'dummy'), ('sel_schd', 'dummy'), + ('sel_insm', 'dummy'), ('sel_camp', 'dummy'), ('sel_levl', 'dummy'), + ('sel_sess', 'dummy'), ('sel_instr', 'dummy'), ('sel_ptrm', 'dummy'), + ('sel_attr', 'dummy')], + [('sel_crse', ''), ('sel_title', ''), ('sel_schd', '%'), + ('sel_from_cred', ''), ('sel_to_cred', ''), ('sel_camp', '%'), + ('sel_instr', '%'), ('sel_sess', '%'), ('sel_ptrm', '%'), + ('sel_attr', '%'), ('begin_hh', '0'), ('begin_mi', '0'), + ('begin_ap', 'a'), ('end_hh', '0'), ('end_mi', '0'), + ('end_ap', 'a'), ('SUB_BTN', 'Section Search'), ('path', '1')] + ], + [ + [('sel_subj', 'dummy'), ('sel_day', 'dummy'), ('sel_schd', 'dummy'), + ('sel_insm', 'dummy'), ('sel_camp', 'dummy'), ('sel_levl', 'dummy'), + ('sel_sess', 'dummy'), ('sel_instr', 'dummy'), ('sel_ptrm', 'dummy'), + ('sel_attr', 'dummy')], + [('sel_crse', ''), ('sel_title', ''), ('sel_schd', '%'), + ('sel_from_cred', ''), ('sel_to_cred', ''), ('sel_camp', '%'), + ('sel_levl', '%'), ('sel_ptrm', '%'), ('sel_instr', '%'), + ('sel_sess', '%'), ('sel_attr', '%'), ('begin_hh', '0'), + ('begin_mi', '0'), ('begin_ap', 'a'), ('end_hh', '0'), ('end_mi', '0'), + ('end_ap', 'a'), ('SUB_BTN', 'Section Search'), ('path', '1')] + ] +] + + +def main(): + if not exists(DB_DIR): + makedirs(DB_DIR, exist_ok=True) + + if not exists(join(DB_DIR, 'html')): + makedirs(join(DB_DIR, 'html'), exist_ok=True) + + codes = generate_term_codes() + print_c(f'Loaded {color(Fore.CYAN, len(codes))} term codes\n') + + prefix = PREFIXES[0] if not DEBUG else PREFIXES[1] + if DEBUG: + codes = codes[:5] + + print_c(f'Scraping session cookie…\r') + + cookies = scrape_cookies() + print_c(f"Scraped session cookie {color(Fore.YELLOW, cookies['CPSESSID'])}\n{'-'*79}\n") + + temp_path = join(DB_DIR, 'temp.json') + + try: + for term in codes: + print_c(f" [{term}] [{color(Fore.YELLOW, 'MINING…')}] Scraping…\r") + + temp = TinyDB(temp_path) + + dept_data = mine_dept_data(term, write=False) + print_c(f" [{term}] [{color(Fore.YELLOW, 'MINING…')}] " + + f"Parsing {len(dept_data)} departments…\r") + + failed = False + for idx, variant in enumerate(ADVANCED_FORM_DATA): + content = mine_table_data(term, variant, dept_data, cookies, write=False) + if advanced_parse(content, db=temp, term=term): + break + elif idx == len(ADVANCED_FORM_DATA) - 1: + failed = True + + if rename(temp_path, join(DB_DIR, f'{prefix}_{term}_database.json')): + remove(temp_path) + + db = TinyDB(join(DB_DIR, f'{prefix}_{term}_database.json')) + + num_courses = sum([len(db.table(t).all()) for t in db.tables()]) + + if failed: + print_c(f" [{term}] [{color(Fore.RED, 'ERROR!!')}] Payload failed…\n") + else: + print_c(f" [{term}] [{color(Fore.GREEN, 'SUCCESS')}] Mined {num_courses} courses\n") + + except (KeyboardInterrupt, TimeoutException) as e: + print_c(f"{color(Fore.GREEN, e)}\n") + kill_driver() + remove(temp_path) + finally: + kill_driver() + + +def mine_dept_data(term, write=False): + ''' + Mine dept data will grab the department IDs for a given quarter. + :param term: (str) the term to mine + :param write: (bool) write to file? + :return data (list(tuple)) the html body + ''' + data = [('p_calling_proc', 'bwckschd.p_disp_dyn_sched'), ('p_term', f'{term}')] + + res = requests.post('https://banssb.fhda.edu/PROD/bwckgens.p_proc_term_date', data=data) + res.raise_for_status() + + if write: + write_to_file(res, term) + + soup = BeautifulSoup(res.content, "html5lib") + select = soup.find('select', {'id': 'subj_id'}) + options = select.find_all('option') + + data = [('sel_subj', o['value']) for o in options] + return data + + +def mine_table_data(term, payload, dept_data, cookies, write=False): + ''' + Mine will hit the database for foothill's class listings + :param term: (str) the term to mine + :param payload: (str) data payload for request + :param dept_data: (str) department data payload + :param cookies: (dict) cookies to send with POST + :param write: (bool) write to file? + :return res.content: (json) the html body + ''' + data = [('rsts', 'dummy'), ('crn', 'dummy'), ('term_in', f'{term}')] + + data.extend(payload[0]) + + if DEBUG: + dept_data = dept_data[:1] + + data.extend(dept_data) + + data.extend(payload[1]) + + res = requests.post('https://banssb.fhda.edu/PROD/bwskfcls.P_GetCrse_Advanced', + cookies=cookies, data=data) + res.raise_for_status() + + if write: + write_to_file(res, term) + + return res.content + + +def advanced_parse(content, db, term=''): + ''' + Advanced parse takes the content from the request and then populates the database with the data + :param content: (html) The html containing the courses + :param db: (TinyDB) the current database + :return: None + ''' + soup = BeautifulSoup(content, 'html5lib') + table_rows = None + + try: + table = soup.find('table', {'class': 'datadisplaytable'}) + table_rows = table.find_all('tr') + except AttributeError as e: + return False + + table_headers = list() + start_idx = 0 + for i, tr in enumerate(table_rows): + header_cols = tr.find_all('th', {'class': 'ddheader'}) + for th in header_cols: + table_headers.append(get_parsed_text(th)) + if table_headers: + start_idx = i + break + + for tr in table_rows[start_idx:]: + parse_row(tr, table_headers, db) + return True + + +def parse_row(tr, th, db): + try: + cols = tr.find_all('td', {'class': 'dddefault'}) + + if cols: + s = defaultdict(lambda: defaultdict(list)) + + num_blank = 0 + for i, c in enumerate(cols): + a = c.find('a') + cols[i] = get_parsed_text(a) if a else get_parsed_text(cols[i]) + if cols[i].isspace(): + num_blank += 1 + + if num_blank > len(cols) - num_blank: + raise BlankRow + + data = dict(zip(th, cols)) + + subject = data['Subj'] + key = data['Crse'] + crn = data['CRN'] + + s[key][crn].append(data) + j = dict(s) + + db.table(f'{subject}').insert(j) + except BlankRow: + return + + +def generate_term_codes(): + """ + This helper generates a list of term codes based on the ranges set by: + YEAR_RANGE, QUARTER_RANGE, CAMPUS_RANGE + :return: (list(str)) list of term codes + """ + i = range(YEAR_RANGE[0], YEAR_RANGE[1] + 1) + j = range(QUARTER_RANGE[0], QUARTER_RANGE[1] + 1) + k = range(CAMPUS_RANGE[0], CAMPUS_RANGE[1] + 1) + codes = [f'201{x[0]}{x[1]}{x[2]}' for x in product(i, j, k)] + return codes + + +class BlankRow(Exception): + pass + + +def get_parsed_text(tag): + """ + Regex that strips all html tags and their contents + :param tag: (str) inner contents of parent tag + :return: (str) isolated text + """ + text = tag.get_text() + p = re.compile(r'<.*?>') + return p.sub('', text) + + +def print_c(message): + """ + Clears last carriage returned line and writes a new one + :param message: (str) + :return: None + """ + sys.stdout.write('\x1b[2K') + sys.stdout.write(message) + sys.stdout.flush() + + +def color(c, word): + """ + Format template that inserts a color for a given word + :param c: (Color) Color to format to + :param word: (str) Word to format + :return: (str) Formatted String + """ + return f'{c}{word}{Style.RESET_ALL}' + + +def write_to_file(res, term): + """ + Writes a bytestream to a nested file directory + :param res: response object + :param term: term code + :return: None + """ + with open(f"{join(DB_DIR, 'html', term+'.html')}", "wb") as file: + for chunk in res.iter_content(chunk_size=512): + if not chunk: + break + + file.write(chunk) + file.flush() + + +if __name__ == '__main__': + init() #colorama + main() diff --git a/data_scraper.py b/scrape_term.py similarity index 72% rename from data_scraper.py rename to scrape_term.py index 2420f18..27aab7d 100644 --- a/data_scraper.py +++ b/scrape_term.py @@ -1,27 +1,23 @@ -from collections import defaultdict from os import makedirs, rename, remove from os.path import join, exists from re import match +from collections import defaultdict # 3rd party import requests from bs4 import BeautifulSoup from tinydb import TinyDB -from settings import DB_DIR +from settings import DB_DIR, COURSE_PATTERN, HEADERS, SCHEDULE -SCHEDULE = 'schedule.html' -TERM_CODES = {'fh': '201911', 'da': '201912'} -HEADERS = ('course', 'CRN', 'desc', 'status', 'days', 'time', 'start', 'end', - 'room', 'campus', 'units', 'instructor', 'seats', 'wait_seats', 'wait_cap') +CURRENT_TERM_CODES = {'fh': '201911', 'da': '201912'} -COURSE_PATTERN = r'[FD]0*(\d*\w?)\.?\d*([YWZH])?' def main(): if not exists(DB_DIR): makedirs(DB_DIR, exist_ok=True) - for term in TERM_CODES.values(): + for term in CURRENT_TERM_CODES.values(): temp_path = join(DB_DIR, 'temp.json') temp = TinyDB(temp_path) @@ -37,27 +33,14 @@ def main(): def mine(term, write=False): ''' - Mine will hit the database for foothill's class listings and write it to a file. + Mine will hit the database for foothill's class listings :param term: (str) the term to mine :param write: (bool) write to file? :return res.content: (json) the html body ''' - headers = { - 'Origin': 'https://banssb.fhda.edu', - 'Accept-Encoding': 'gzip, deflate, br', - 'Accept-Language': 'en-US,en;q=0.9', - 'User-Agent': 'FoothillAPI', - 'Content-Type': 'application/x-www-form-urlencoded', - 'Accept': 'text/html, */*; q=0.01', - 'Referer': 'https://banssb.fhda.edu/PROD/fhda_opencourses.P_Application', - 'X-Requested-With': 'XMLHttpRequest', - 'Connection': 'keep-alive', - } - - data = [('termcode', f'{term}'), ] - - res = requests.post('https://banssb.fhda.edu/PROD/fhda_opencourses.P_GetCourseList', - headers=headers, data=data) + data = [('termcode', f'{term}')] + + res = requests.post('https://banssb.fhda.edu/PROD/fhda_opencourses.P_GetCourseList', data=data) res.raise_for_status() if write: @@ -84,8 +67,8 @@ def parse(content, db): rows = t.find_all('tr', {'class': 'CourseRow'}) s = defaultdict(lambda: defaultdict(list)) - for r in rows: - cols = r.find_all(lambda tag: tag.name == 'td' and not tag.get_text().isspace()) + for tr in rows: + cols = tr.find_all(lambda tag: tag.name == 'td' and not tag.get_text().isspace()) if cols: for i, c in enumerate(cols): diff --git a/selenium_login.py b/selenium_login.py new file mode 100644 index 0000000..ea35661 --- /dev/null +++ b/selenium_login.py @@ -0,0 +1,50 @@ +import os + +from selenium import webdriver +from selenium.webdriver.support.ui import WebDriverWait +from selenium.webdriver.support import expected_conditions as EC + +from selenium.webdriver.chrome.options import Options + +chrome_options = Options() +chrome_options.add_argument("--window-size=300,400") + +driver = webdriver.Chrome(chrome_options=chrome_options) +driver.get("https://myportal.fhda.edu/cp/home/displaylogin") + + +def scrape_cookies(): + driver.execute_script(f"document.getElementById('user').value='{os.environ['MP_USER']}'") + driver.execute_script(f"document.getElementById('pass').value='{os.environ['MP_PASS']}'") + + try: + driver.execute_script("doLogin()") + WebDriverWait(driver, 3).until( + EC.title_is("MyPortal / Foothill-De Anza College District") + ) + + driver.get( + "https://myportal.fhda.edu/render.UserLayoutRootNode.uP?uP_tparam=utf&utf=%2fcp%2fip%2flogin%3fsys%3dsctssb%26url%3dhttps%3A%2F%2Fbanssb.fhda.edu%2FPROD%2Fbwskfcls.p_sel_crse_search") + + WebDriverWait(driver, 3).until( + EC.title_is("MyPortal / Foothill-De Anza College District") + ) + finally: + cookies_list = driver.get_cookies() + + return get_cookies(cookies_list) + + +def get_cookies(cookies_list): + cookies_dict = {} + for cookie in cookies_list: + cookies_dict[cookie['name']] = cookie['value'] + return cookies_dict + + +def kill_driver(): + driver.quit() + + +if __name__ == '__main__': + scrape_cookies() diff --git a/server.py b/server.py index 517ffff..23504c1 100644 --- a/server.py +++ b/server.py @@ -10,7 +10,9 @@ from tinydb import TinyDB from maya import when, MayaInterval -# Quart config +from settings import COURSE_PATTERN, DAYS_PATTERN, CAMPUS_LIST + +# Flask config def add_cors_headers(response): response.headers['Access-Control-Allow-Origin'] = '*' return response @@ -21,11 +23,6 @@ def add_cors_headers(response): DB_ROOT = 'db/' -CAMPUS_LIST = {'fh':'201911', 'da':'201912', 'test':'test'} - -COURSE_PATTERN = r'[FD]0*(\d*\w?)\.?\d*([YWZH])?' -DAYS_PATTERN = f"^{'(M|T|W|Th|F|S|U)?'*7}$" - FH_TYPE_ALIAS = {'standard': None, 'online': 'W', 'hybrid': 'Y'} DA_TYPE_ALIAS = {'standard': None, 'online': 'Z', 'hybrid': 'Y'} diff --git a/settings.py b/settings.py index df60686..7f370ff 100644 --- a/settings.py +++ b/settings.py @@ -1,7 +1,15 @@ import os ROOT_DIR = os.path.abspath(os.path.dirname(__file__)) -API_DIR = os.path.join(ROOT_DIR, 'owlapi') DB_DIR = os.path.join(ROOT_DIR, 'db') TEST_DIR = os.path.join(ROOT_DIR, 'tests') TEST_DB_DIR = os.path.join(TEST_DIR, 'test_db') + +COURSE_PATTERN = r'[FD]0*(\d*\w?)\.?\d*([YWZH])?' +DAYS_PATTERN = f"^{'(M|T|W|Th|F|S|U)?'*7}$" + + +SCHEDULE = 'schedule.html' +HEADERS = ('course', 'CRN', 'desc', 'status', 'days', 'time', 'start', 'end', 'room', 'campus', 'units', 'instructor', 'seats', 'wait_seats', 'wait_cap') + +CAMPUS_LIST = {'fh': '201911', 'da': '201912', 'test': 'test'}