Skip to content
This repository has been archived by the owner on May 13, 2022. It is now read-only.

Commit

Permalink
fix repeated logging, #78
Browse files Browse the repository at this point in the history
  • Loading branch information
luciano-renzi committed Jan 29, 2018
1 parent 912f381 commit 3e36f39
Show file tree
Hide file tree
Showing 6 changed files with 35 additions and 19 deletions.
2 changes: 1 addition & 1 deletion docs/source/settings.md
Expand Up @@ -86,7 +86,7 @@ Default is 'INFO'

- *log_all_events*

Default is false
Default is true

<br>

Expand Down
12 changes: 9 additions & 3 deletions golem/core/execution_logger.py
Expand Up @@ -27,17 +27,17 @@ def _get_log_level(log_level_string):


def get_logger(log_directory=None, console_log_level='INFO',
log_all_events=False):
log_all_events=True):
"""instantiate the logger for the execution.
log_directory: where the file logs will be stored
console_log_level: the log leve used for the console output
log_all_events: log all the events or only golem's events
log_all_events: log all the events or only golem events
3 log levels are defined:
1. console output (by default INFO)
2. file output (DEBUG)
3. file output (INFO)
"""
"""
logger = None
if log_all_events:
logger = logging.getLogger()
Expand Down Expand Up @@ -71,3 +71,9 @@ def get_logger(log_directory=None, console_log_level='INFO',
logger.addHandler(file_handler)
return logger


# def reset_logger(logger):
# logging.shutdown()
# import importlib
# importlib.reload(logging)

6 changes: 3 additions & 3 deletions golem/core/settings_manager.py
Expand Up @@ -46,8 +46,8 @@
// Default option is INFO
"console_log_level": "INFO",
// Log all events, instead of just Golem events. Default is false
"log_all_events": "false"
// Log all events, instead of just Golem events. Default is true
"log_all_events": true
}
""")

Expand Down Expand Up @@ -96,7 +96,7 @@ def assign_settings_default_values(settings):
('remote_url', None),
('remote_browsers', {}),
('console_log_level', 'INFO'),
('log_all_events', False)
('log_all_events', True)
]
for default in defaults:
if not default[0] in settings:
Expand Down
3 changes: 3 additions & 0 deletions golem/execution.py
Expand Up @@ -20,6 +20,9 @@ def __init__(self):
self.report_directory = None
self.logger = None

def reset(self):
self.__init__()


# An instance of Execution is added to sys.modules
# to simplify the public API.
Expand Down
26 changes: 16 additions & 10 deletions golem/test_runner/start_execution.py
Expand Up @@ -286,16 +286,22 @@ def run_test_or_suite(workspace, project, test=None, suite=None, directory=None)
if test_execution.interactive and execution['workers'] != 1:
print('WARNING: to run in debug mode, threads must equal one')

if execution['workers'] == 1:
# run tests serially
for test in execution_list:
run_test(workspace, project,
test['test_name'], test['data_set'],
test['driver'], test_execution.settings,
test['report_directory'])
else:
# run tests using multiprocessing
multiprocess_executor(execution_list, execution['workers'])
# if execution['workers'] == 1:
# # run tests serially
# # Note: when running test serially I can't seem
# # to be able to reset the logger without stopping
# # third party loggers like selenium's
# # so, running everything through multiprocessing
# for test in execution_list:
# run_test(workspace, project,
# test['test_name'], test['data_set'],
# test['driver'], test_execution.settings,
# test['report_directory'])
# else:
# # run tests using multiprocessing
# multiprocess_executor(execution_list, execution['workers'])

multiprocess_executor(execution_list, execution['workers'])

# run suite `after` function
if execution['suite_after']:
Expand Down
5 changes: 3 additions & 2 deletions golem/test_runner/test_runner.py
Expand Up @@ -179,6 +179,7 @@ def __getattr__(*args):
#sys.modules['golem.execution'] = None

report.generate_report(report_directory, test_name, execution.data, result)
# del execution
sys.modules['golem.execution'] = None

# execution.reset()
# execution_logger.reset_logger(logger)
return

0 comments on commit 3e36f39

Please sign in to comment.