diff --git a/CHANGELOG b/CHANGELOG index e33125e9..7d09bbca 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,7 +1,14 @@ Changelog for apel ================== + * Mon Apr 16 2018 Adrian Coveney - 1.6.2-1 + - [parsers] Added remaining job statuses for SLURM that indicate the job has + stopped and that resources have been used. + - [server] Fix CpuCount being NULL in cloud accounting records and leading to + warnings when summarising. + - [docs] Remove references to specific LSF versions as all now allowed. + * Thu Dec 14 2017 Adrian Coveney - 1.6.1-1 - - [Parsers] Removed version restriction from LSF parser so that it can + - [parsers] Removed version restriction from LSF parser so that it can additionally work with version 10 onwards. - Added more columns to cloud summaries primary key to prevent mis-grouping. - Added Python setup script to enable installation on non-RHEL-based systems. diff --git a/README.md b/README.md index 18d4bd42..87e414b6 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ Python and uses MySQL. It has the following components: ### apel-parsers These extract data from the following batch systems: -* LSF 5.x to 9.x +* LSF * PBS * SGE/OGE * SLURM diff --git a/apel.spec b/apel.spec index 4c9cd2e6..f16a40db 100644 --- a/apel.spec +++ b/apel.spec @@ -4,7 +4,7 @@ %endif Name: apel -Version: 1.6.1 +Version: 1.6.2 %define releasenumber 1 Release: %{releasenumber}%{?dist} Summary: APEL packages @@ -34,7 +34,7 @@ apel-lib provides required libraries for the rest of APEL system. %package parsers Summary: Parsers for APEL system Group: Development/Languages -Requires: apel-lib >= 1.6.1 +Requires: apel-lib >= 1.6.2 Requires(pre): shadow-utils %description parsers @@ -44,7 +44,7 @@ supported by the APEL system: Torque, SGE and LSF. %package client Summary: APEL client package Group: Development/Languages -Requires: apel-lib >= 1.6.1, apel-ssm +Requires: apel-lib >= 1.6.2, apel-ssm Requires(pre): shadow-utils %description client @@ -55,7 +55,7 @@ SSM. %package server Summary: APEL server package Group: Development/Languages -Requires: apel-lib >= 1.6.1, apel-ssm +Requires: apel-lib >= 1.6.2, apel-ssm Requires(pre): shadow-utils %description server @@ -109,8 +109,8 @@ cp schemas/server-extra.sql %{buildroot}%_datadir/apel/ cp schemas/cloud.sql %{buildroot}%_datadir/apel/ cp schemas/storage.sql %{buildroot}%_datadir/apel/ -cp scripts/update-1.5.1-1.6.0.sql %{buildroot}%_datadir/apel/ -cp scripts/update-1.6.0-1.6.1.sql %{buildroot}%_datadir/apel/ +# All update scripts matched by wildcard +cp scripts/update-*.sql %{buildroot}%_datadir/apel/ # accounting scripts cp scripts/slurm_acc.sh %{buildroot}%_datadir/apel/ @@ -174,8 +174,8 @@ exit 0 %_datadir/apel/server-extra.sql %_datadir/apel/cloud.sql %_datadir/apel/storage.sql -%_datadir/apel/update-1.5.1-1.6.0.sql -%_datadir/apel/update-1.6.0-1.6.1.sql +# Include all update scripts by wildcard matching +%_datadir/apel/update-*.sql %attr(755,root,root) %_datadir/apel/msg_status.py %exclude %_datadir/apel/msg_status.pyc @@ -199,8 +199,15 @@ exit 0 # ============================================================================== %changelog + * Mon Apr 16 2018 Adrian Coveney - 1.6.2-1 + - [parsers] Added remaining job statuses for SLURM that indicate the job has + stopped and that resources have been used. + - [server] Fix CpuCount being NULL in cloud accounting records and leading to + warnings when summarising. + - [docs] Remove references to specific LSF versions as all now allowed. + * Thu Dec 14 2017 Adrian Coveney - 1.6.1-1 - - [Parsers] Removed version restriction from LSF parser so that it can + - [parsers] Removed version restriction from LSF parser so that it can additionally work with version 10 onwards. - Added more columns to cloud summaries primary key to prevent mis-grouping. - Added Python setup script to enable installation on non-RHEL-based systems. diff --git a/apel/__init__.py b/apel/__init__.py index 675aa075..b53aabc5 100644 --- a/apel/__init__.py +++ b/apel/__init__.py @@ -15,4 +15,4 @@ @author Konrad Jopek, Will Rogers ''' -__version__ = (1, 6, 1) +__version__ = (1, 6, 2) diff --git a/apel/db/records/cloud.py b/apel/db/records/cloud.py index ed153d69..d25f49f8 100644 --- a/apel/db/records/cloud.py +++ b/apel/db/records/cloud.py @@ -1,123 +1,132 @@ -''' - Copyright (C) 2011 STFC - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -@author Will Rogers -''' - -from apel.db.records import Record, InvalidRecordException -from apel.common import parse_fqan -from datetime import datetime, timedelta - - -class CloudRecord(Record): - ''' - Class to represent one cloud record. - - It knows about the structure of the MySQL table and the message format. - It stores its information in a dictionary self._record_content. The keys - are in the same format as in the messages, and are case-sensitive. - ''' - def __init__(self): - '''Provide the necessary lists containing message information.''' - - Record.__init__(self) - - # Fields which are required by the message format. - self._mandatory_fields = ["VMUUID", "SiteName"] - - # This list allows us to specify the order of lines when we construct records. - self._msg_fields = ["VMUUID", "SiteName", "CloudComputeService", "MachineName", - "LocalUserId", "LocalGroupId", "GlobalUserName", "FQAN", - "Status", "StartTime", "EndTime", "SuspendDuration", - "WallDuration", "CpuDuration", "CpuCount", - "NetworkType", "NetworkInbound", "NetworkOutbound", "PublicIPCount", - "Memory", "Disk", "BenchmarkType", "Benchmark", - "StorageRecordId", "ImageId", "CloudType"] - - # This list specifies the information that goes in the database. - self._db_fields = self._msg_fields[:8] + ['VO', 'VOGroup', 'VORole'] + self._msg_fields[8:] - self._all_fields = self._db_fields - - self._ignored_fields = ["UpdateTime"] - - # Fields which will have an integer stored in them - self._int_fields = [ "SuspendDuration", "WallDuration", "CpuDuration", "CpuCount", - "NetworkInbound", "NetworkOutbound", "PublicIPCount", "Memory", "Disk"] - - self._float_fields = ['Benchmark'] - self._datetime_fields = ["StartTime", "EndTime"] - - def _check_fields(self): - ''' - Add extra checks to those made in every record. - ''' - # First, call the parent's version. - Record._check_fields(self) - - # Extract the relevant information from the user fqan. - # Keep the fqan itself as other methods in the class use it. - role, group, vo = parse_fqan(self._record_content['FQAN']) - # We can't / don't put NULL in the database, so we use 'None' - if role is None: - role = 'None' - if group is None: - group = 'None' - if vo is None: - vo = 'None' - - if self._record_content['Benchmark'] is None: - # If Benchmark is not present in the original record the - # parent Record class level type checking will set it to - # None. We can't pass None as a Benchmark as the field is - # NOT NULL in the database, so we set it to something - # meaningful. In this case the float 0.0. - self._record_content['Benchmark'] = 0.0 - - - self._record_content['VORole'] = role - self._record_content['VOGroup'] = group - self._record_content['VO'] = vo - - # Check the values of StartTime and EndTime - # self._check_start_end_times() - - - def _check_start_end_times(self): - '''Checks the values of StartTime and EndTime in _record_content. - StartTime should be less than or equal to EndTime. - Neither StartTime or EndTime should be zero. - EndTime should not be in the future. - - This is merely factored out for simplicity. - ''' - try: - start = int(self._record_content['StartTime']) - end = int(self._record_content['EndTime']) - if end < start: - raise InvalidRecordException("EndTime is before StartTime.") - - if start == 0 or end == 0: - raise InvalidRecordException("Epoch times StartTime and EndTime mustn't be 0.") - - now = datetime.now() - # add two days to prevent timezone problems - tomorrow = now + timedelta(2) - if datetime.fromtimestamp(end) > tomorrow: - raise InvalidRecordException("Epoch time " + str(end) + " is in the future.") - - except ValueError: - raise InvalidRecordException("Cannot parse an integer from StartTime or EndTime.") - - +''' + Copyright (C) 2011 STFC + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +@author Will Rogers +''' + +from apel.db.records import Record, InvalidRecordException +from apel.common import parse_fqan +from datetime import datetime, timedelta + + +class CloudRecord(Record): + ''' + Class to represent one cloud record. + + It knows about the structure of the MySQL table and the message format. + It stores its information in a dictionary self._record_content. The keys + are in the same format as in the messages, and are case-sensitive. + ''' + def __init__(self): + '''Provide the necessary lists containing message information.''' + + Record.__init__(self) + + # Fields which are required by the message format. + self._mandatory_fields = ["VMUUID", "SiteName"] + + # This list allows us to specify the order of lines when we construct records. + self._msg_fields = ["VMUUID", "SiteName", "CloudComputeService", "MachineName", + "LocalUserId", "LocalGroupId", "GlobalUserName", "FQAN", + "Status", "StartTime", "EndTime", "SuspendDuration", + "WallDuration", "CpuDuration", "CpuCount", + "NetworkType", "NetworkInbound", "NetworkOutbound", "PublicIPCount", + "Memory", "Disk", "BenchmarkType", "Benchmark", + "StorageRecordId", "ImageId", "CloudType"] + + # This list specifies the information that goes in the database. + self._db_fields = self._msg_fields[:8] + ['VO', 'VOGroup', 'VORole'] + self._msg_fields[8:] + self._all_fields = self._db_fields + + self._ignored_fields = ["UpdateTime"] + + # Fields which will have an integer stored in them + self._int_fields = [ "SuspendDuration", "WallDuration", "CpuDuration", "CpuCount", + "NetworkInbound", "NetworkOutbound", "PublicIPCount", "Memory", "Disk"] + + self._float_fields = ['Benchmark'] + self._datetime_fields = ["StartTime", "EndTime"] + + def _check_fields(self): + ''' + Add extra checks to those made in every record. + ''' + # First, call the parent's version. + Record._check_fields(self) + + # Extract the relevant information from the user fqan. + # Keep the fqan itself as other methods in the class use it. + role, group, vo = parse_fqan(self._record_content['FQAN']) + # We can't / don't put NULL in the database, so we use 'None' + if role is None: + role = 'None' + if group is None: + group = 'None' + if vo is None: + vo = 'None' + + if self._record_content['Benchmark'] is None: + # If Benchmark is not present in the original record the + # parent Record class level type checking will set it to + # None. We can't pass None as a Benchmark as the field is + # NOT NULL in the database, so we set it to something + # meaningful. In this case the float 0.0. + self._record_content['Benchmark'] = 0.0 + + + self._record_content['VORole'] = role + self._record_content['VOGroup'] = group + self._record_content['VO'] = vo + + # If the message was missing a CpuCount, assume it used + # zero Cpus, to prevent a NULL being written into the column + # in the CloudRecords tables. + # Doing so would be a problem despite the CloudRecords + # table allowing it because the CloudSummaries table + # doesn't allow it, creating a problem at summariser time. + if self._record_content['CpuCount'] is None: + self._record_content['CpuCount'] = 0 + + # Check the values of StartTime and EndTime + # self._check_start_end_times() + + + def _check_start_end_times(self): + '''Checks the values of StartTime and EndTime in _record_content. + StartTime should be less than or equal to EndTime. + Neither StartTime or EndTime should be zero. + EndTime should not be in the future. + + This is merely factored out for simplicity. + ''' + try: + start = int(self._record_content['StartTime']) + end = int(self._record_content['EndTime']) + if end < start: + raise InvalidRecordException("EndTime is before StartTime.") + + if start == 0 or end == 0: + raise InvalidRecordException("Epoch times StartTime and EndTime mustn't be 0.") + + now = datetime.now() + # add two days to prevent timezone problems + tomorrow = now + timedelta(2) + if datetime.fromtimestamp(end) > tomorrow: + raise InvalidRecordException("Epoch time " + str(end) + " is in the future.") + + except ValueError: + raise InvalidRecordException("Cannot parse an integer from StartTime or EndTime.") + + diff --git a/apel/db/records/cloud_summary.py b/apel/db/records/cloud_summary.py index 132bc28f..83875bfd 100644 --- a/apel/db/records/cloud_summary.py +++ b/apel/db/records/cloud_summary.py @@ -1,60 +1,60 @@ -''' - Copyright (C) 2013 STFC - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -@author Will Rogers -''' - -from apel.db.records import Record - - -class CloudSummaryRecord(Record): - ''' - Class to represent one cloud summary record. - - It knows about the structure of the MySQL table and the message format. - It stores its information in a dictionary self._record_content. The keys - are in the same format as in the messages, and are case-sensitive. - ''' - def __init__(self): - '''Provide the necessary lists containing message information.''' - - Record.__init__(self) - - # Fields which are required by the message format. - self._mandatory_fields = ['SiteName', 'Month', 'Year', 'NumberOfVMs'] - - # This list allows us to specify the order of lines when we construct records. - self._msg_fields = ['SiteName', 'CloudComputeService', 'Month', 'Year', - 'GlobalUserName', 'VO', 'VOGroup', 'VORole', - 'Status', 'CloudType', 'ImageId', - 'EarliestStartTime', 'LatestStartTime', - 'WallDuration', 'CpuDuration', 'CpuCount', - 'NetworkInbound', 'NetworkOutbound', - 'Memory', 'Disk', - 'BenchmarkType', 'Benchmark', 'NumberOfVMs'] - - # This list specifies the information that goes in the database. - self._db_fields = self._msg_fields - self._all_fields = self._db_fields - - self._ignored_fields = ['UpdateTime'] - - # Fields which will have an integer stored in them - self._int_fields = ['Month', 'Year', 'WallDuration', 'CpuDuration', - 'CpuCount', 'NetworkInbound', 'NetworkOutbound', - 'Memory', 'Disk', 'NumberOfVMs'] - - self._float_fields = ['Benchmark'] - self._datetime_fields = ['EarliestStartTime', 'LatestStartTime'] +''' + Copyright (C) 2013 STFC + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +@author Will Rogers +''' + +from apel.db.records import Record + + +class CloudSummaryRecord(Record): + ''' + Class to represent one cloud summary record. + + It knows about the structure of the MySQL table and the message format. + It stores its information in a dictionary self._record_content. The keys + are in the same format as in the messages, and are case-sensitive. + ''' + def __init__(self): + '''Provide the necessary lists containing message information.''' + + Record.__init__(self) + + # Fields which are required by the message format. + self._mandatory_fields = ['SiteName', 'Month', 'Year', 'NumberOfVMs'] + + # This list allows us to specify the order of lines when we construct records. + self._msg_fields = ['SiteName', 'CloudComputeService', 'Month', 'Year', + 'GlobalUserName', 'VO', 'VOGroup', 'VORole', + 'Status', 'CloudType', 'ImageId', + 'EarliestStartTime', 'LatestStartTime', + 'WallDuration', 'CpuDuration', 'CpuCount', + 'NetworkInbound', 'NetworkOutbound', + 'Memory', 'Disk', + 'BenchmarkType', 'Benchmark', 'NumberOfVMs'] + + # This list specifies the information that goes in the database. + self._db_fields = self._msg_fields + self._all_fields = self._db_fields + + self._ignored_fields = ['UpdateTime'] + + # Fields which will have an integer stored in them + self._int_fields = ['Month', 'Year', 'WallDuration', 'CpuDuration', + 'CpuCount', 'NetworkInbound', 'NetworkOutbound', + 'Memory', 'Disk', 'NumberOfVMs'] + + self._float_fields = ['Benchmark'] + self._datetime_fields = ['EarliestStartTime', 'LatestStartTime'] diff --git a/apel/parsers/lsf.py b/apel/parsers/lsf.py index acbe0b36..13bfa1da 100644 --- a/apel/parsers/lsf.py +++ b/apel/parsers/lsf.py @@ -29,12 +29,7 @@ class LSFParser(Parser): ''' - LSFParser parses LSF accounting logs from versions: - - 5.x - - 6.x - - 7.x - - 8.x - - 9.x + LSFParser parses LSF accounting logs from all LSF versions. The expression below describes elements which we are looking for. Here is some explanation: diff --git a/apel/parsers/slurm.py b/apel/parsers/slurm.py index 5d18410c..92b6ba2c 100644 --- a/apel/parsers/slurm.py +++ b/apel/parsers/slurm.py @@ -58,7 +58,9 @@ def parse(self, line): # log.info('line: %s' % (line)); values = line.strip().split('|') - if values[14] != 'COMPLETED': + # These statuses indicate the job has stopped and resources were used. + if values[14] not in ('CANCELLED', 'COMPLETED', 'FAILED', + 'NODE_FAIL', 'PREEMPTED', 'TIMEOUT'): return None rmem = self._normalise_memory(values[12]) @@ -96,9 +98,7 @@ def parse(self, line): # Input checking if rc['CpuDuration'] < 0: raise ValueError('Negative CpuDuration value') - - if rc['WallDuration'] < 0: - raise ValueError('Negative WallDuration value') + # No negative WallDuration test as parse_time prevents that. if rc['StopTime'] < rc['StartTime']: raise ValueError('StopTime less than StartTime') diff --git a/bin/__init__.py b/bin/__init__.py index b0a8dfdd..3fb06e7f 100644 --- a/bin/__init__.py +++ b/bin/__init__.py @@ -1 +1 @@ -# This file exists so that the tests can import modules from the bin directory +# This file exists so that the tests can import modules from the bin directory diff --git a/bin/parser.py b/bin/parser.py index 2c328ef3..4b3ddef2 100644 --- a/bin/parser.py +++ b/bin/parser.py @@ -18,7 +18,7 @@ # - BLAH # - PBS # - SGE -# - LSF (5.x to 9.x) +# - LSF ''' @author: Konrad Jopek, Will Rogers diff --git a/conf/logging.cfg b/conf/logging.cfg index 773b5aa4..d9830569 100644 --- a/conf/logging.cfg +++ b/conf/logging.cfg @@ -1,48 +1,48 @@ -[loggers] -keys=root,loader,SSM,apeldb,apelldap - -[handlers] -keys=consoleHandler,fileHandler - -[formatters] -keys=simpleFormatter - -[logger_root] -level=INFO -handlers=consoleHandler,fileHandler - -[logger_loader] -qualname=loader -level=INFO -handlers=consoleHandler,fileHandler - -[logger_apelldap] -qualname=apelldap -level=INFO -handlers=consoleHandler,fileHandler - -[logger_SSM] -qualname=SSM -level=INFO -handlers=consoleHandler,fileHandler - -[logger_apeldb] -qualname=apeldb -level=INFO -handlers=consoleHandler,fileHandler - -[handler_fileHandler] -class=FileHandler -level=INFO -formatter=simpleFormatter -args=('/var/log/apel/apel.log', 'a') - -[handler_consoleHandler] -class=StreamHandler -level=INFO -formatter=simpleFormatter -args=(sys.stdout,) - -[formatter_simpleFormatter] -format=%(asctime)s - %(name)s - %(levelname)s - %(message)s -datefmt= +[loggers] +keys=root,loader,SSM,apeldb,apelldap + +[handlers] +keys=consoleHandler,fileHandler + +[formatters] +keys=simpleFormatter + +[logger_root] +level=INFO +handlers=consoleHandler,fileHandler + +[logger_loader] +qualname=loader +level=INFO +handlers=consoleHandler,fileHandler + +[logger_apelldap] +qualname=apelldap +level=INFO +handlers=consoleHandler,fileHandler + +[logger_SSM] +qualname=SSM +level=INFO +handlers=consoleHandler,fileHandler + +[logger_apeldb] +qualname=apeldb +level=INFO +handlers=consoleHandler,fileHandler + +[handler_fileHandler] +class=FileHandler +level=INFO +formatter=simpleFormatter +args=('/var/log/apel/apel.log', 'a') + +[handler_consoleHandler] +class=StreamHandler +level=INFO +formatter=simpleFormatter +args=(sys.stdout,) + +[formatter_simpleFormatter] +format=%(asctime)s - %(name)s - %(levelname)s - %(message)s +datefmt= diff --git a/scripts/apel-build-rpm b/scripts/apel-build-rpm index 8706f232..f31d93c7 100755 --- a/scripts/apel-build-rpm +++ b/scripts/apel-build-rpm @@ -10,7 +10,7 @@ rpmdev-setuptree RPMDIR=/home/rpmb/rpmbuild -VERSION=1.6.1-1 +VERSION=1.6.2-1 APELDIR=apel-$VERSION # Remove old sources and RPMS diff --git a/scripts/update-1.6.1-1.6.2.sql b/scripts/update-1.6.1-1.6.2.sql new file mode 100644 index 00000000..36f8572c --- /dev/null +++ b/scripts/update-1.6.1-1.6.2.sql @@ -0,0 +1,17 @@ +-- This script contains multiple comment blocks that can update +-- APEL version 1.6.1 databases of the following types to 1.6.2: +-- - Cloud Accounting Database + +-- UPDATE SCRIPT FOR CLOUD SCHEMA + +-- If you have a Cloud Accounting Database and wish to +-- upgrade to APEL Version next, remove the block comment +-- symbols around this section and run this script + +-- This section will: +-- - Change records with a NULL CpuCount so that they have a CpuCount of 0, +-- to prevent problems at summarising time. + +UPDATE CloudRecords SET + CpuCount=0 + WHERE CpuCount is NULL; diff --git a/test/test_cloud_record.py b/test/test_cloud_record.py index 33863e17..11e7f9d0 100644 --- a/test/test_cloud_record.py +++ b/test/test_cloud_record.py @@ -168,10 +168,50 @@ def setUp(self): 'Benchmark': 0.0, 'ImageId': '\'scilin6\'', 'CloudType': 'OpenNebula'} + + self._msg4 = ''' +BenchmarkType: HEPSPEC06 +Status: completed +SiteName: Test Site +MachineName: Test Machine +ImageId: Test Image ID +LocalUserId: Test Local User ID +FQAN: NULL +LocalGroupId: Test Local Group ID +VMUUID: Test VM ID +CloudType: caso/0.3.4 (OpenStack) +GlobalUserName: Test User +CloudComputeService: Test Service''' + + self._values4 = {'VMUUID': 'Test VM ID', + 'SiteName': 'Test Site', + 'CloudComputeService': 'Test Service', + 'MachineName': 'Test Machine', + 'LocalUserId': 'Test Local User ID', + 'LocalGroupId': 'Test Local Group ID', + 'GlobalUserName': 'Test User', + 'FQAN': 'None', + 'Status': 'completed', + 'StartTime': None, + 'EndTime': None, + 'SuspendDuration': None, + 'WallDuration': None, + 'CpuDuration': None, + 'CpuCount': 0, + 'NetworkType': 'None', + 'NetworkInbound': None, + 'NetworkOutbound': None, + 'Memory': None, + 'Disk': None, + 'StorageRecordId': 'None', + 'ImageId': 'Test Image ID', + 'CloudType': 'caso/0.3.4 (OpenStack)'} + self.cases = {} self.cases[self._msg1] = self._values1 self.cases[self._msg2] = self._values2 self.cases[self._msg3] = self._values3 + self.cases[self._msg4] = self._values4 def test_load_from_msg_value_check(self): """Check for correct values in CloudRecords generated from messages.""" diff --git a/test/test_mysql.py b/test/test_mysql.py index 37743415..8de0eb02 100644 --- a/test/test_mysql.py +++ b/test/test_mysql.py @@ -109,11 +109,16 @@ def test_load_and_get_cloud(self): cloud4_nb = apel.db.records.cloud.CloudRecord() cloud4_nb.load_from_msg(CLOUD4_NULL_BENCHMARKS) + # Test a Cloud V0.4 Record with mising fields + cloud4_mf = apel.db.records.cloud.CloudRecord() + cloud4_mf.load_from_msg(CLOUD4_MISSING_FIELDS) + items_in = cloud2._record_content.items() items_in += cloud4._record_content.items() items_in += cloud4_nb._record_content.items() + items_in += cloud4_mf._record_content.items() - record_list = [cloud2, cloud4, cloud4_nb] + record_list = [cloud2, cloud4, cloud4_nb, cloud4_mf] # load_records changes the 'cloud' cloud record as it calls _check_fields # which adds placeholders to empty fields @@ -290,5 +295,24 @@ def test_last_update(self): CloudType: Cloud Technology 2 ''' +# A Cloud V0.4 Record, but missing a lot of fields. +# We need to check we can load this record as we receive +# messages with records like this and currently +# load them. +CLOUD4_MISSING_FIELDS = ''' +BenchmarkType: HEPSPEC06 +Status: completed +SiteName: Test Site +MachineName: Test Machine +ImageId: Test Image ID +LocalUserId: Test Local User ID +FQAN: NULL +LocalGroupId: Test Local Group ID +VMUUID: Test VM ID +CloudType: caso/0.3.4 (OpenStack) +GlobalUserName: Test User +CloudComputeService: Test Service +''' + if __name__ == '__main__': unittest.main() diff --git a/test/test_slurm.py b/test/test_slurm.py index 8c2c0644..a17ace3c 100644 --- a/test/test_slurm.py +++ b/test/test_slurm.py @@ -36,7 +36,6 @@ def test_parse_line(self): '324554.batch|batch|||2013-10-28T04:28:30|2013-10-28T04:28:33|00:00:03|3||1|1|wn65|0|0|COMPLETED', # Zero memory '324554.batch|batch|||2013-10-28T04:28:30|2013-10-28T04:28:33|00:00:03|3||1|1|wn65|20H|54J|COMPLETED', # Invalid unit prefix '324554.batch|batch|||2013-10-28T04:28:33|2013-10-28T04:28:30|00:00:03|3||1|1|wn65|28K|80K|COMPLETED', # StopTime < StartTime - '297720.batch|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|-00:00:16|16||1|1|wn37|3228K|23820K|COMPLETED', # -ve WallDuration '297720.batch|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|-16||1|1|wn37|3228K|23820K|COMPLETED',) # -ve CpuDuration # Examples for correct lines @@ -45,7 +44,8 @@ def test_parse_line(self): ('278952.batch|batch|||2013-10-23T21:37:24|2013-10-25T00:01:37|1-02:24:13|95053||1|1|wn36|438.50M|1567524K|COMPLETED'), ('297720.batch|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn37|3228K|23820K|COMPLETED'), ('321439.batch|batch|||2013-10-27T17:09:35|2013-10-28T04:47:20|11:37:45|41865||1|1|wn16|770728K|1.40G|COMPLETED'), - ('320816.batch|batch|||2013-10-27T14:56:03|2013-10-28T05:03:50|14:07:47|50867||1|1|wn33|1325232K|2.22G|COMPLETED'),) + ('320816.batch|batch|||2013-10-27T14:56:03|2013-10-28T05:03:50|14:07:47|50867||1|1|wn33|1325232K|2.22G|COMPLETED'), + ) values = ( ('1000', 'dteam005', 'dteam', 2, 2, @@ -67,7 +67,8 @@ def test_parse_line(self): ('320816.batch', None, None, 50867, 50867, datetime.utcfromtimestamp(mktime((2013, 10, 27, 14, 56, 3, 0, 1, -1))), datetime.utcfromtimestamp(mktime((2013, 10, 28, 5, 3, 50, 0, 1, -1))), - None, 1325232, int(2.22*1024*1024), 1, 1)) + None, 1325232, int(2.22*1024*1024), 1, 1), + ) cases = {} for line, value in zip(lines, values): @@ -94,5 +95,34 @@ def test_parse_line(self): for line in value_fails: self.assertRaises(ValueError, self.parser.parse, line) + def test_job_status(self): + """Check that the right statuses are accepted.""" + accepted = ( # These job statuses are all described as "terminated". + '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|CANCELLED', + '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|COMPLETED', + '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|FAILED', + '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|NODE_FAIL', + '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|PREEMPTED', + '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|TIMEOUT', + ) + + rejected = ( # These jobs would be unstarted or still running. + '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|BOOT_FAIL', + '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|CONFIGURING', + '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|COMPLETING', + '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|DEADLINE', + '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|PENDING', + '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|RUNNING', + '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|RESIZING', + '123|batch|||2013-10-25T12:11:20|2013-10-25T12:11:36|00:00:16|16||1|1|wn|28K|20K|SUSPENDED', + ) + for line in accepted: + self.assertNotEqual(self.parser.parse(line), None, + "Line incorrectly rejected: %s" % line) + + for line in rejected: + self.assertEqual(self.parser.parse(line), None, + "Line incorrectly accepted: %s" % line) + if __name__ == '__main__': unittest.main()