diff --git a/.coveragerc b/.coveragerc index 228ef0f8..6a64b4d1 100755 --- a/.coveragerc +++ b/.coveragerc @@ -2,6 +2,7 @@ # Exclude lines that match patterns from coverage report. exclude_lines = if __name__ == .__main__.: + \\$ # Only show one number after decimal point in report. precision = 1 diff --git a/CHANGELOG b/CHANGELOG index 9eb5e61a..dcd61df3 100755 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,5 +1,13 @@ # Changelog +## v0.8.9 + +### Improvements + +- Added Ctrl+C handler +- Added configurable buffer size for HTTP requests +- Added anvoa to supported pre-deployed models in tabpy + ## v0.8.7 ### Improvements diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0a94bdfd..63518658 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -177,3 +177,15 @@ TabPy package: python setup.py sdist bdist_wheel python -m twine upload dist/* ``` + +To publish test version of the package use the following command: + +```sh +python -m twine upload --repository-url https://test.pypi.org/legacy/ dist/* +``` + +To install package from TestPyPi use the command: + +```sh +pip install -i https://test.pypi.org/simple/ tabpy +``` diff --git a/docs/server-config.md b/docs/server-config.md index 39c1ca39..e6e6508e 100755 --- a/docs/server-config.md +++ b/docs/server-config.md @@ -85,6 +85,9 @@ at [`logging.config` documentation page](https://docs.python.org/3.6/library/log not set. - `TABPY_LOG_DETAILS` - when set to `true` additional call information (caller IP, URL, client info, etc.) is logged. Default value - `false`. +- `TABPY_MAX_REQUEST_SIZE_MB` - maximal request size supported by TabPy server + in Megabytes. All requests of exceeding size are rejected. Default value is + 100 Mb. - `TABPY_EVALUATE_TIMEOUT` - script evaluation timeout in seconds. Default value - `30`. @@ -116,10 +119,15 @@ settings._ # end user info if provided. # TABPY_LOG_DETAILS = true +# Limit request size (in Mb) - any request which size exceeds +# specified amount will be rejected by TabPy. +# Default value is 100 Mb. +# TABPY_MAX_REQUEST_SIZE_MB = 100 + # Configure how long a custom script provided to the /evaluate method # will run before throwing a TimeoutError. # The value should be a float representing the timeout time in seconds. -#TABPY_EVALUATE_TIMEOUT = 30 +# TABPY_EVALUATE_TIMEOUT = 30 [loggers] keys=root diff --git a/docs/tabpy-tools.md b/docs/tabpy-tools.md index 07ca73c4..e80f7828 100755 --- a/docs/tabpy-tools.md +++ b/docs/tabpy-tools.md @@ -14,6 +14,7 @@ on TabPy server. * [Principal Component Analysis (PCA)](#principal-component-analysis-pca) * [Sentiment Analysis](#sentiment-analysis) * [T-Test](#t-test) + * [ANOVA](#anova) - [Providing Schema Metadata](#providing-schema-metadata) - [Querying an Endpoint](#querying-an-endpoint) - [Evaluating Arbitrary Python Scripts](#evaluating-arbitrary-python-scripts) @@ -318,6 +319,22 @@ The function returns a two-tailed [p-value](https://en.wikipedia.org/wiki/P-valu you may reject or fail to reject the null hypothesis. +### ANOVA + +[Analysis of variance](https://en.wikipedia.org/wiki/Analysis_of_variance) +helps inform if two or more group means within a sample differ. By measuring +the variation between and among groups and computing the resulting F-statistic +we are able to obtain a p-value. While a statistically significant p-value +will inform you that at least 2 of your groups’ means are different from each +other, it will not tell you which of the two groups differ. + +You can call ANOVA from tableau in the following way, + +```python + +tabpy.query(‘anova’, _arg1, _arg2, _arg3)[‘response’] +``` + ## Providing Schema Metadata As soon as you share your deployed functions, you also need to share metadata diff --git a/tabpy/VERSION b/tabpy/VERSION index 35864a97..021abec7 100755 --- a/tabpy/VERSION +++ b/tabpy/VERSION @@ -1 +1 @@ -0.8.7 \ No newline at end of file +0.8.9 \ No newline at end of file diff --git a/tabpy/models/deploy_models.py b/tabpy/models/deploy_models.py index 30dda263..a2d6b63c 100644 --- a/tabpy/models/deploy_models.py +++ b/tabpy/models/deploy_models.py @@ -2,7 +2,6 @@ import os import sys import platform -import runpy import subprocess from pathlib import Path from tabpy.models.utils import setup_utils diff --git a/tabpy/models/scripts/ANOVA.py b/tabpy/models/scripts/ANOVA.py new file mode 100644 index 00000000..b151b086 --- /dev/null +++ b/tabpy/models/scripts/ANOVA.py @@ -0,0 +1,25 @@ +import scipy.stats as stats +from tabpy.models.utils import setup_utils + + +def anova(_arg1, _arg2, *_argN): + ''' + ANOVA is a statistical hypothesis test that is used to compare + two or more group means for equality.For more information on + the function and how to use it please refer to tabpy-tools.md + ''' + + cols = [_arg1, _arg2] + list(_argN) + for col in cols: + if not isinstance(col[0], (int, float)): + print("values must be numeric") + raise ValueError + _, p_value = stats.f_oneway(_arg1, _arg2, *_argN) + return p_value + + +if __name__ == '__main__': + setup_utils.deploy_model( + 'anova', + anova, + 'Returns the p-value form an ANOVA test') diff --git a/tabpy/models/scripts/PCA.py b/tabpy/models/scripts/PCA.py index 2a2d6b20..f9f5f492 100644 --- a/tabpy/models/scripts/PCA.py +++ b/tabpy/models/scripts/PCA.py @@ -4,8 +4,6 @@ from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder -import sys -from pathlib import Path from tabpy.models.utils import setup_utils diff --git a/tabpy/models/scripts/SentimentAnalysis.py b/tabpy/models/scripts/SentimentAnalysis.py index d3b97e7a..4a978b9f 100644 --- a/tabpy/models/scripts/SentimentAnalysis.py +++ b/tabpy/models/scripts/SentimentAnalysis.py @@ -1,8 +1,6 @@ from textblob import TextBlob import nltk from nltk.sentiment.vader import SentimentIntensityAnalyzer -import sys -from pathlib import Path from tabpy.models.utils import setup_utils diff --git a/tabpy/models/scripts/tTest.py b/tabpy/models/scripts/tTest.py index 9bbc0823..8fffee37 100644 --- a/tabpy/models/scripts/tTest.py +++ b/tabpy/models/scripts/tTest.py @@ -1,6 +1,4 @@ from scipy import stats -import sys -from pathlib import Path from tabpy.models.utils import setup_utils diff --git a/tabpy/tabpy_server/app/ConfigParameters.py b/tabpy/tabpy_server/app/ConfigParameters.py index 9ff1a4ad..14abbca2 100644 --- a/tabpy/tabpy_server/app/ConfigParameters.py +++ b/tabpy/tabpy_server/app/ConfigParameters.py @@ -12,4 +12,5 @@ class ConfigParameters: TABPY_PWD_FILE = 'TABPY_PWD_FILE' TABPY_LOG_DETAILS = 'TABPY_LOG_DETAILS' TABPY_STATIC_PATH = 'TABPY_STATIC_PATH' + TABPY_MAX_REQUEST_SIZE_MB = 'TABPY_MAX_REQUEST_SIZE_MB' TABPY_EVALUATE_TIMEOUT = 'TABPY_EVALUATE_TIMEOUT' diff --git a/tabpy/tabpy_server/app/SettingsParameters.py b/tabpy/tabpy_server/app/SettingsParameters.py index 562b6de4..a455fdaa 100755 --- a/tabpy/tabpy_server/app/SettingsParameters.py +++ b/tabpy/tabpy_server/app/SettingsParameters.py @@ -12,4 +12,5 @@ class SettingsParameters: ApiVersions = 'versions' LogRequestContext = 'log_request_context' StaticPath = 'static_path' + MaxRequestSizeInMb = 'max_request_size_in_mb' EvaluateTimeout = 'evaluate_timeout' diff --git a/tabpy/tabpy_server/app/app.py b/tabpy/tabpy_server/app/app.py index 7297bc9e..2df69620 100644 --- a/tabpy/tabpy_server/app/app.py +++ b/tabpy/tabpy_server/app/app.py @@ -6,6 +6,7 @@ import multiprocessing import os import shutil +import signal import tabpy.tabpy_server from tabpy.tabpy import __version__ from tabpy.tabpy_server.app.ConfigParameters import ConfigParameters @@ -60,6 +61,10 @@ def __init__(self, config_file=None): def run(self): application = self._create_tornado_web_app() + max_request_size =\ + int(self.settings[SettingsParameters.MaxRequestSizeInMb]) *\ + 1024 * 1024 + logger.info(f'Setting max request size to {max_request_size} bytes') init_model_evaluator( self.settings, @@ -67,25 +72,41 @@ def run(self): self.python_service) protocol = self.settings[SettingsParameters.TransferProtocol] - if protocol == 'http': - application.listen(self.settings[SettingsParameters.Port]) - elif protocol == 'https': - application.listen(self.settings[SettingsParameters.Port], - ssl_options={ + ssl_options = None + if protocol == 'https': + ssl_options = { 'certfile': self.settings[SettingsParameters.CertificateFile], 'keyfile': self.settings[SettingsParameters.KeyFile] - }) - else: + } + elif protocol != 'http': msg = f'Unsupported transfer protocol {protocol}.' logger.critical(msg) raise RuntimeError(msg) + application.listen( + self.settings[SettingsParameters.Port], + ssl_options=ssl_options, + max_buffer_size=max_request_size, + max_body_size=max_request_size) + logger.info( 'Web service listening on port ' f'{str(self.settings[SettingsParameters.Port])}') tornado.ioloop.IOLoop.instance().start() def _create_tornado_web_app(self): + class TabPyTornadoApp(tornado.web.Application): + is_closing = False + + def signal_handler(self, signal): + logger.critical(f'Exiting on signal {signal}...') + self.is_closing = True + + def try_exit(self): + if self.is_closing: + tornado.ioloop.IOLoop.instance().stop() + logger.info('Shutting down TabPy...') + logger.info('Initializing TabPy...') tornado.ioloop.IOLoop.instance().run_sync( lambda: init_ps_server(self.settings, self.tabpy_state)) @@ -95,7 +116,7 @@ def _create_tornado_web_app(self): max_workers=multiprocessing.cpu_count()) # initialize Tornado application - application = tornado.web.Application([ + application = TabPyTornadoApp([ # skip MainHandler to use StaticFileHandler .* page requests and # default to index.html # (r"/", MainHandler), @@ -121,10 +142,12 @@ def _create_tornado_web_app(self): default_filename="index.html")), ], debug=False, **self.settings) + signal.signal(signal.SIGINT, application.signal_handler) + tornado.ioloop.PeriodicCallback(application.try_exit, 500).start() + return application - @staticmethod - def _parse_cli_arguments(): + def _parse_cli_arguments(self): ''' Parse command line arguments. Expected arguments: * --config: string @@ -303,6 +326,10 @@ def set_parameter(settings_key, else 'disabled' logger.info(f'Call context logging is {call_context_state}') + set_parameter(SettingsParameters.MaxRequestSizeInMb, + ConfigParameters.TABPY_MAX_REQUEST_SIZE_MB, + default_val=100) + def _validate_transfer_protocol_settings(self): if SettingsParameters.TransferProtocol not in self.settings: msg = 'Missing transfer protocol information.' diff --git a/tabpy/tabpy_server/common/default.conf b/tabpy/tabpy_server/common/default.conf index 52786491..ee02453d 100755 --- a/tabpy/tabpy_server/common/default.conf +++ b/tabpy/tabpy_server/common/default.conf @@ -20,10 +20,15 @@ # end user info if provided. # TABPY_LOG_DETAILS = true +# Limit request size (in Mb) - any request which size exceeds +# specified amount will be rejected by TabPy. +# Default value is 100 Mb. +# TABPY_MAX_REQUEST_SIZE_MB = 100 + # Configure how long a custom script provided to the /evaluate method # will run before throwing a TimeoutError. # The value should be a float representing the timeout time in seconds. -#TABPY_EVALUATE_TIMEOUT = 30 +# TABPY_EVALUATE_TIMEOUT = 30 [loggers] keys=root diff --git a/tabpy/tabpy_server/handlers/management_handler.py b/tabpy/tabpy_server/handlers/management_handler.py index 805d3e51..e7a0b3c0 100644 --- a/tabpy/tabpy_server/handlers/management_handler.py +++ b/tabpy/tabpy_server/handlers/management_handler.py @@ -94,7 +94,7 @@ def _add_or_update_endpoint(self, action, name, version, request_data): self.settings[SettingsParameters.StateFilePath], name, version) self.logger.log(logging.DEBUG, f'Checking source path {src_path}...') - _path_checker = _compile(r'^[\\\:a-zA-Z0-9-_~\s/\.]+$') + _path_checker = _compile(r'^[\\\:a-zA-Z0-9-_~\s/\.\(\)]+$') # copy from staging if src_path: if not isinstance(request_data['src_path'], str): diff --git a/tabpy/tabpy_server/handlers/query_plane_handler.py b/tabpy/tabpy_server/handlers/query_plane_handler.py index 70774626..61657a39 100644 --- a/tabpy/tabpy_server/handlers/query_plane_handler.py +++ b/tabpy/tabpy_server/handlers/query_plane_handler.py @@ -135,6 +135,7 @@ def _process_query(self, endpoint_name, start): # Sanitize input data data = self._sanitize_request_data(json.loads(request_json)) except Exception as e: + self.logger.log(logging.ERROR, str(e)) err_msg = format_exception(e, "Invalid Input Data") self.error_out(400, err_msg) return @@ -177,6 +178,7 @@ def _process_query(self, endpoint_name, start): return except Exception as e: + self.logger.log(logging.ERROR, str(e)) err_msg = format_exception(e, 'process query') self.error_out(500, 'Error processing query', info=err_msg) return diff --git a/tabpy/tabpy_server/management/util.py b/tabpy/tabpy_server/management/util.py index 13d1eae0..7bc21244 100644 --- a/tabpy/tabpy_server/management/util.py +++ b/tabpy/tabpy_server/management/util.py @@ -46,35 +46,3 @@ def _get_state_from_file(state_path, logger=logging.getLogger(__name__)): return config - -_ZERO = timedelta(0) - - -class _UTC(tzinfo): - """ - A UTC datetime.tzinfo class modeled after the pytz library. It includes a - __reduce__ method for pickling, - """ - - def fromutc(self, dt): - if dt.tzinfo is None: - return self.localize(dt) - return super(_UTC, self).fromutc(dt) - - def utcoffset(self, dt): - return _ZERO - - def tzname(self, dt): - return "UTC" - - def dst(self, dt): - return _ZERO - - def __reduce__(self): - return _UTC, () - - def __repr__(self): - return "" - - def __str__(self): - return "UTC" diff --git a/tabpy/tabpy_server/psws/python_service.py b/tabpy/tabpy_server/psws/python_service.py index 3fd9aa96..768b113e 100644 --- a/tabpy/tabpy_server/psws/python_service.py +++ b/tabpy/tabpy_server/psws/python_service.py @@ -42,6 +42,7 @@ def manage_request(self, msg): logger.debug(f'Returning response {response}') return response except Exception as e: + logger.exception(e) msg = e if hasattr(e, 'message'): msg = e.message @@ -90,6 +91,7 @@ def _load_object(self, object_uri, object_url, object_version, is_update, 'status': 'LoadSuccessful', 'last_error': None} except Exception as e: + logger.exception(e) logger.error(f'Unable to load QueryObject: path={object_url}, ' f'error={str(e)}') @@ -132,6 +134,7 @@ def load_object(self, object_uri, object_url, object_version, is_update, object_uri, object_url, object_version, is_update, object_type) except Exception as e: + logger.exception(e) logger.error(f'Unable to load QueryObject: path={object_url}, ' f'error={str(e)}') @@ -226,6 +229,7 @@ def query(self, object_uri, params, uid): else: return UnknownURI(object_uri) except Exception as e: + logger.exception(e) err_msg = format_exception(e, '/query') logger.error(err_msg) return QueryFailed(uri=object_uri, error=err_msg) diff --git a/tabpy/tabpy_tools/client.py b/tabpy/tabpy_tools/client.py index d6a5a175..758397cb 100755 --- a/tabpy/tabpy_tools/client.py +++ b/tabpy/tabpy_tools/client.py @@ -97,21 +97,6 @@ def __repr__(self): ' object at ' + hex(id(self)) + ' connected to ' + repr(self._endpoint) + ">") - def get_info(self): - """Returns a dict containing information about the service. - - Returns - ------- - dict - Keys are: - * name: The name of the service - * creation_time: The creation time in seconds since 1970-01-01 - * description: Description of the service - * server_version: The version of the service used - * state_path: Where the state file is stored. - """ - return self._service.get_info() - def get_status(self): ''' Gets the status of the deployed endpoints. @@ -210,57 +195,6 @@ def _get_endpoint_upload_destination(self): """Returns the endpoint upload destination.""" return self._service.get_endpoint_upload_destination()['path'] - def alias(self, alias, existing_endpoint_name, description=None): - ''' - Create a new endpoint to redirect to an existing endpoint, or update an - existing alias to point to a different existing endpoint. - - Parameters - ---------- - alias : str - The new endpoint name or an existing alias endpoint name. - - existing_endpoint_name : str - A name of an existing endpoint to redirect the alias to. - - description : str, optional - A description for the alias. - ''' - # check for invalid PO names - _check_endpoint_name(alias) - - if not description: - description = f'Alias for {existing_endpoint_name}' - - if existing_endpoint_name not in self.get_endpoints(): - raise ValueError( - f'Endpoint "{existing_endpoint_name}" does not exist.') - - # Can only overwrite existing alias - existing_endpoint = self.get_endpoints().get(alias) - endpoint = AliasEndpoint( - name=alias, - type='alias', - description=description, - target=existing_endpoint_name, - cache_state='disabled', - version=1, - ) - - if existing_endpoint: - if existing_endpoint.type != 'alias': - raise RuntimeError( - f'Name "{alias}" is already in use by another ' - 'endpoint.') - - endpoint.version = existing_endpoint.version + 1 - - self._service.set_endpoint(endpoint) - else: - self._service.add_endpoint(endpoint) - - self._wait_for_endpoint_deployment(alias, endpoint.version) - def deploy(self, name, obj, description='', schema=None, override=False): @@ -447,72 +381,6 @@ def _wait_for_endpoint_deployment(self, logger.info(f'Sleeping {interval}...') time.sleep(interval) - def remove(self, name): - ''' - Remove the endpoint that has the specified name. - - Parameters - ---------- - name : str - The name of the endpoint to be removed. - - Notes - ----- - This could fail if the endpoint does not exist, or if the endpoint is - in use by an alias. To check all endpoints - that are depending on this endpoint, use `get_endpoint_dependencies`. - - See Also - -------- - deploy, get_endpoint_dependencies - ''' - self._service.remove_endpoint(name) - - # Wait for the endpoint to be removed - while name in self.get_endpoints(): - time.sleep(1.0) - - def get_endpoint_dependencies(self, endpoint_name=None): - ''' - Get all endpoints that depend on the given endpoint. The only - dependency that is recorded is aliases on the endpoint they refer to. - This will not return internal dependencies, as when you have an - endpoint that calls another endpoint from within its code body. - - Parameters - ---------- - endpoint_name : str, optional - The name of the endpoint to find dependent endpoints. If not given, - find all dependent endpoints for all endpoints. - - Returns - ------- - dependent endpoints : dict - If endpoint_name is given, returns a list of endpoint names that - depend on the given endpoint. - - If endpoint_name is not given, returns a dictionary where key is - the endpoint name and value is a set of endpoints that depend on - the endpoint specified by the key. - ''' - endpoints = self.get_endpoints() - - def get_dependencies(endpoint): - result = set() - for d in endpoints[endpoint].dependencies: - result.update([d]) - result.update(get_dependencies(d)) - return result - - if endpoint_name: - return get_dependencies(endpoint_name) - - else: - return { - endpoint: get_dependencies(endpoint) - for endpoint in endpoints - } - def set_credentials(self, username, password): ''' Set credentials for all the TabPy client-server communication diff --git a/tabpy/tabpy_tools/rest.py b/tabpy/tabpy_tools/rest.py index 7189ff49..9446708c 100755 --- a/tabpy/tabpy_tools/rest.py +++ b/tabpy/tabpy_tools/rest.py @@ -1,12 +1,11 @@ import abc +from collections.abc import MutableMapping import logging import requests from requests.auth import HTTPBasicAuth from re import compile import json as json -from collections import MutableMapping as _MutableMapping - logger = logging.getLogger(__name__) @@ -246,7 +245,7 @@ def __init__(self, type, from_json=lambda x: x, to_json=lambda x: x, self.from_json = from_json self.to_json = to_json - def __get__(self, instance, owner): + def __get__(self, instance, _): if instance: try: return getattr(instance, self.name) @@ -290,7 +289,7 @@ def __init__(self, name, bases, dict): self.__rest__.add(k) -class RESTObject(_MutableMapping, metaclass=_RESTMetaclass): +class RESTObject(MutableMapping, metaclass=_RESTMetaclass): """A base class that has methods generally useful for interacting with REST objects. The attributes are accessible either as dict keys or as attributes. The object also behaves like a dict, even replicating the diff --git a/tabpy/tabpy_tools/schema.py b/tabpy/tabpy_tools/schema.py index 080d3529..6fc32556 100755 --- a/tabpy/tabpy_tools/schema.py +++ b/tabpy/tabpy_tools/schema.py @@ -1,5 +1,5 @@ import logging -import genson as _genson +import genson import jsonschema @@ -12,9 +12,9 @@ def _generate_schema_from_example_and_description(input, description): to the example in json-schema.org. The description given by the users is then added to the schema. ''' - s = _genson.Schema() + s = genson.SchemaBuilder(None) s.add_object(input) - input_schema = s.to_dict() + input_schema = s.to_schema() if description is not None: if 'properties' in input_schema: diff --git a/tests/integration/integ_test_base.py b/tests/integration/integ_test_base.py index 13305642..5ed8aa60 100755 --- a/tests/integration/integ_test_base.py +++ b/tests/integration/integ_test_base.py @@ -227,20 +227,19 @@ def setUp(self): with open(self.tmp_dir + '/output.txt', 'w') as outfile: cmd = ['tabpy', '--config=' + self.config_file_name] - coverage.process_startup() + preexec_fn = None if platform.system() == 'Windows': self.py = 'python' - self.process = subprocess.Popen( - cmd, - stdout=outfile, - stderr=outfile) else: self.py = 'python3' - self.process = subprocess.Popen( - cmd, - preexec_fn=os.setsid, - stdout=outfile, - stderr=outfile) + preexec_fn = os.setsid + + coverage.process_startup() + self.process = subprocess.Popen( + cmd, + preexec_fn=preexec_fn, + stdout=outfile, + stderr=outfile) # give the app some time to start up... time.sleep(5) @@ -299,3 +298,6 @@ def deploy_models(self, username: str, password: str): input=input_string.encode('utf-8'), stdout=outfile, stderr=outfile) + + def _get_process(self): + return self.process diff --git a/tests/integration/test_deploy_and_evaluate_model.py b/tests/integration/test_deploy_and_evaluate_model.py index 3a2e80f5..30274ebe 100644 --- a/tests/integration/test_deploy_and_evaluate_model.py +++ b/tests/integration/test_deploy_and_evaluate_model.py @@ -14,7 +14,7 @@ def test_deploy_and_evaluate_model(self): # Uncomment the following line to preserve # test case output and other files (config, state, ect.) # in system temp folder. - self.set_delete_temp_folder(False) + # self.set_delete_temp_folder(False) self.deploy_models(self._get_username(), self._get_password()) diff --git a/tests/integration/test_deploy_and_evaluate_model_ssl.py b/tests/integration/test_deploy_and_evaluate_model_ssl.py index 09a68feb..bb928a75 100755 --- a/tests/integration/test_deploy_and_evaluate_model_ssl.py +++ b/tests/integration/test_deploy_and_evaluate_model_ssl.py @@ -18,6 +18,11 @@ def _get_key_file_name(self) -> str: return './tests/integration/resources/2019_04_24_to_3018_08_25.key' def test_deploy_and_evaluate_model_ssl(self): + # Uncomment the following line to preserve + # test case output and other files (config, state, ect.) + # in system temp folder. + # self.set_delete_temp_folder(False) + self.deploy_models(self._get_username(), self._get_password()) payload = ( diff --git a/tests/integration/test_deploy_model_ssl_off_auth_off.py b/tests/integration/test_deploy_model_ssl_off_auth_off.py index f5b0749d..1aa0d097 100644 --- a/tests/integration/test_deploy_model_ssl_off_auth_off.py +++ b/tests/integration/test_deploy_model_ssl_off_auth_off.py @@ -5,11 +5,16 @@ class TestDeployModelSSLOffAuthOff(integ_test_base.IntegTestBase): def test_deploy_ssl_off_auth_off(self): + # Uncomment the following line to preserve + # test case output and other files (config, state, ect.) + # in system temp folder. + # self.set_delete_temp_folder(False) + self.deploy_models(self._get_username(), self._get_password()) conn = self._get_connection() - models = ['PCA', 'Sentiment%20Analysis', "ttest"] + models = ['PCA', 'Sentiment%20Analysis', "ttest", "anova"] for m in models: conn.request("GET", f'/endpoints/{m}') m_request = conn.getresponse() diff --git a/tests/integration/test_deploy_model_ssl_off_auth_on.py b/tests/integration/test_deploy_model_ssl_off_auth_on.py index 0f09bdc6..7092b4d8 100644 --- a/tests/integration/test_deploy_model_ssl_off_auth_on.py +++ b/tests/integration/test_deploy_model_ssl_off_auth_on.py @@ -22,7 +22,7 @@ def test_deploy_ssl_off_auth_on(self): conn = self._get_connection() - models = ['PCA', 'Sentiment%20Analysis', "ttest"] + models = ['PCA', 'Sentiment%20Analysis', "ttest", "anova"] for m in models: conn.request("GET", f'/endpoints/{m}', headers=headers) m_request = conn.getresponse() diff --git a/tests/integration/test_deploy_model_ssl_on_auth_off.py b/tests/integration/test_deploy_model_ssl_on_auth_off.py index 8c549b47..584ce648 100644 --- a/tests/integration/test_deploy_model_ssl_on_auth_off.py +++ b/tests/integration/test_deploy_model_ssl_on_auth_off.py @@ -23,7 +23,7 @@ def test_deploy_ssl_on_auth_off(self): # Do not warn about insecure request requests.packages.urllib3.disable_warnings() - models = ['PCA', 'Sentiment%20Analysis', "ttest"] + models = ['PCA', 'Sentiment%20Analysis', "ttest", "anova"] for m in models: m_response = session.get(url=f'{self._get_transfer_protocol()}://' f'localhost:9004/endpoints/{m}') diff --git a/tests/integration/test_deploy_model_ssl_on_auth_on.py b/tests/integration/test_deploy_model_ssl_on_auth_on.py index 142d6cde..36739252 100644 --- a/tests/integration/test_deploy_model_ssl_on_auth_on.py +++ b/tests/integration/test_deploy_model_ssl_on_auth_on.py @@ -38,7 +38,7 @@ def test_deploy_ssl_on_auth_on(self): # Do not warn about insecure request requests.packages.urllib3.disable_warnings() - models = ['PCA', 'Sentiment%20Analysis', "ttest"] + models = ['PCA', 'Sentiment%20Analysis', "ttest", "anova"] for m in models: m_response = session.get(url=f'{self._get_transfer_protocol()}://' f'localhost:9004/endpoints/{m}', diff --git a/tests/unit/server_tests/test_config.py b/tests/unit/server_tests/test_config.py index 2ecffb7e..d665657b 100644 --- a/tests/unit/server_tests/test_config.py +++ b/tests/unit/server_tests/test_config.py @@ -10,6 +10,18 @@ class TestConfigEnvironmentCalls(unittest.TestCase): + def test_config_file_does_not_exist(self): + app = TabPyApp('/folder_does_not_exit/file_does_not_exist.conf') + + self.assertEqual(app.settings['port'], 9004) + self.assertEqual(app.settings['server_version'], + open('tabpy/VERSION').read().strip()) + self.assertEqual(app.settings['transfer_protocol'], 'http') + self.assertTrue('certificate_file' not in app.settings) + self.assertTrue('key_file' not in app.settings) + self.assertEqual(app.settings['log_request_context'], False) + self.assertEqual(app.settings['evaluate_timeout'], 30) + @patch('tabpy.tabpy_server.app.app.TabPyApp._parse_cli_arguments', return_value=Namespace(config=None)) @patch('tabpy.tabpy_server.app.app.TabPyState') @@ -166,6 +178,14 @@ def tearDown(self): os.remove(self.fp.name) self.fp = None + def test_invalid_protocol(self): + self.fp.write("[TabPy]\n" + "TABPY_TRANSFER_PROTOCOL = gopher") + self.fp.close() + + self.assertTabPyAppRaisesRuntimeError( + 'Unsupported transfer protocol: gopher') + def test_http(self): self.fp.write("[TabPy]\n" "TABPY_TRANSFER_PROTOCOL = http") diff --git a/tests/unit/tools_tests/test_client.py b/tests/unit/tools_tests/test_client.py index 670069d0..62ffccb0 100644 --- a/tests/unit/tools_tests/test_client.py +++ b/tests/unit/tools_tests/test_client.py @@ -2,6 +2,7 @@ from unittest.mock import Mock from tabpy.tabpy_tools.client import Client +from tabpy.tabpy_tools.client import _check_endpoint_name class TestClient(unittest.TestCase): @@ -82,3 +83,12 @@ def test_set_credentials(self): self.client._service.set_credentials.assert_called_once_with( username, password) + + def test_check_invalid_endpoint_name(self): + endpoint_name = 'Invalid:model:@name' + with self.assertRaises(ValueError) as err: + _check_endpoint_name(endpoint_name) + + self.assertEqual(err.exception.args[0], + f'endpoint name {endpoint_name } can only contain: ' + 'a-z, A-Z, 0-9, underscore, hyphens and spaces.')