-
Notifications
You must be signed in to change notification settings - Fork 77
/
test_utils.py
1295 lines (1126 loc) · 53.2 KB
/
test_utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Copyright 2018 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing base class for implementing charm tests."""
import contextlib
import logging
import subprocess
import sys
import tenacity
import unittest
import yaml
import novaclient
import zaza.model as model
import zaza.charm_lifecycle.utils as lifecycle_utils
import zaza.openstack.configure.guest as configure_guest
import zaza.openstack.utilities.openstack as openstack_utils
import zaza.openstack.utilities.generic as generic_utils
import zaza.openstack.charm_tests.glance.setup as glance_setup
import zaza.utilities.machine_os
def skipIfNotHA(service_name):
"""Run decorator to skip tests if application not in HA configuration."""
def _skipIfNotHA_inner_1(f):
def _skipIfNotHA_inner_2(*args, **kwargs):
ips = model.get_app_ips(
service_name)
if len(ips) > 1:
return f(*args, **kwargs)
else:
logging.warn("Skipping HA test for non-ha service {}".format(
service_name))
return _skipIfNotHA_inner_2
return _skipIfNotHA_inner_1
def skipUntilVersion(service, package, release):
"""Run decorator to skip this test if application version is too low."""
def _skipUntilVersion_inner_1(f):
def _skipUntilVersion_inner_2(*args, **kwargs):
package_version = generic_utils.get_pkg_version(service, package)
try:
subprocess.check_call(['dpkg', '--compare-versions',
package_version, 'ge', release],
stderr=subprocess.STDOUT,
universal_newlines=True)
return f(*args, **kwargs)
except subprocess.CalledProcessError:
logging.warn("Skipping test for older ({})"
"service {}, requested {}".format(
package_version, service, release))
return _skipUntilVersion_inner_2
return _skipUntilVersion_inner_1
def audit_assertions(action,
expected_passes,
expected_failures=None,
expected_to_pass=True):
"""Check expected assertion failures in security-checklist actions.
:param action: Action object from running the security-checklist action
:type action: juju.action.Action
:param expected_passes: List of test names that are expected to pass
:type expected_passes: List[str]
:param expected_failures: List of test names that are expected to fail
:type expected_failures: List[str]
:raises: AssertionError if the assertion fails.
"""
if expected_failures is None:
expected_failures = []
if expected_to_pass:
assert action.data["status"] == "completed", \
"Security check is expected to pass by default"
else:
assert action.data["status"] == "failed", \
"Security check is not expected to pass by default"
results = action.data['results']
for key, value in results.items():
if key in expected_failures:
assert "FAIL" in value, "Unexpected test pass: {}".format(key)
if key in expected_passes:
assert value == "PASS", "Unexpected failure: {}".format(key)
class BaseCharmTest(unittest.TestCase):
"""Generic helpers for testing charms."""
run_resource_cleanup = False
def resource_cleanup(self):
"""Cleanup any resources created during the test run.
Override this method with a method which removes any resources
which were created during the test run. If the test sets
"self.run_resource_cleanup = True" then cleanup will be
performed.
"""
pass
# this must be a class instance method otherwise descentents will not be
# able to influence if cleanup should be run.
def tearDown(self):
"""Run teardown for test class."""
if self.run_resource_cleanup:
logging.info('Running resource cleanup')
self.resource_cleanup()
@classmethod
def setUpClass(cls, application_name=None, model_alias=None):
"""Run setup for test class to create common resources.
Note: the derived class may not use the application_name; if it's set
to None then this setUpClass() method will attempt to extract the
application name from the charm_config (essentially the test.yaml)
using the key 'charm_name' in the test_config. If that isn't present,
then there will be no application_name set, and this is considered a
generic scenario of a whole model rather than a particular charm under
test.
:param application_name: the name of the applications that the derived
class is testing. If None, then it's a generic test not connected
to any single charm.
:type application_name: Optional[str]
:param model_alias: the alias to use if needed.
:type model_alias: Optional[str]
"""
cls.model_aliases = model.get_juju_model_aliases()
if model_alias:
cls.model_name = cls.model_aliases[model_alias]
else:
cls.model_name = model.get_juju_model()
cls.test_config = lifecycle_utils.get_charm_config(fatal=False)
if application_name:
cls.application_name = application_name
else:
try:
charm_under_test_name = cls.test_config['charm_name']
except KeyError:
logging.warning("No application_name and no charm config so "
"not setting the application_name. Likely a "
"scenario test.")
return
deployed_app_names = model.sync_deployed(model_name=cls.model_name)
if charm_under_test_name in deployed_app_names:
# There is an application named like the charm under test.
# Let's consider it the application under test:
cls.application_name = charm_under_test_name
else:
# Let's search for any application whose name starts with the
# name of the charm under test and assume it's the application
# under test:
for app_name in deployed_app_names:
if app_name.startswith(charm_under_test_name):
cls.application_name = app_name
break
else:
logging.warning('Could not find application under test')
return
cls.lead_unit = model.get_lead_unit_name(
cls.application_name,
model_name=cls.model_name)
logging.debug('Leader unit is {}'.format(cls.lead_unit))
def config_current_separate_non_string_type_keys(
self, non_string_type_keys, config_keys=None,
application_name=None):
"""Obtain current config and the non-string type config separately.
If the charm config option is not string, it will not accept being
reverted back in "config_change()" method if the current value is None.
Therefore, obtain the current config and separate those out, so they
can be used for a separate invocation of "config_change()" with
reset_to_charm_default set to True.
:param config_keys: iterable of strs to index into the current config.
If None, return all keys from the config
:type config_keys: Optional[Iterable[str]]
:param non_string_type_keys: list of non-string type keys to be
separated out only if their current value
is None
:type non_string_type_keys: list
:param application_name: String application name for use when called
by a charm under test other than the object's
application.
:type application_name: Optional[str]
:return: Dictionary of current charm configs without the
non-string type keys provided, and dictionary of the
non-string keys found in the supplied config_keys list.
:rtype: Dict[str, Any], Dict[str, None]
"""
current_config = self.config_current(application_name, config_keys)
non_string_type_config = {}
if config_keys is None:
config_keys = list(current_config.keys())
for key in config_keys:
# We only care if the current value is None, otherwise it will
# not face issues being reverted by "config_change()"
if key in non_string_type_keys and current_config[key] is None:
non_string_type_config[key] = None
current_config.pop(key)
return current_config, non_string_type_config
def config_current(self, application_name=None, keys=None):
"""Get Current Config of an application normalized into key-values.
:param application_name: String application name for use when called
by a charm under test other than the object's
application.
:type application_name: Optional[str]
:param keys: iterable of strs to index into the current config. If
None, return all keys from the config
:type keys: Optional[Iterable[str]]
:return: Dictionary of requested config from application
:rtype: Dict[str, Any]
"""
if not application_name:
application_name = self.application_name
_app_config = model.get_application_config(application_name)
keys = keys or _app_config.keys()
return {
k: _app_config.get(k, {}).get('value')
for k in keys
}
@staticmethod
def _stringed_value_config(config):
"""Stringify values in a dict.
Workaround:
libjuju refuses to accept data with types other than strings
through the zaza.model.set_application_config
:param config: Config dictionary with any typed values
:type config: Dict[str,Any]
:return: Config Dictionary with string-ly typed values
:rtype: Dict[str,str]
"""
# if v is None, stringify to ''
# otherwise use a strict cast with str(...)
return {
k: '' if v is None else str(v)
for k, v in config.items()
}
@contextlib.contextmanager
def config_change(self, default_config, alternate_config,
application_name=None, reset_to_charm_default=False):
"""Run change config tests.
Change config to `alternate_config`, wait for idle workload status,
yield, return config to `default_config` and wait for idle workload
status before return from function.
Example usage:
with self.config_change({'preferred-api-version': '2'},
{'preferred-api-version': '3'}):
do_something()
:param default_config: Dict of charm settings to set on completion
:type default_config: dict
:param alternate_config: Dict of charm settings to change to
:type alternate_config: dict
:param application_name: String application name for use when called
by a charm under test other than the object's
application.
:type application_name: str
:param reset_to_charm_default: When True we will ask Juju to reset each
configuration option mentioned in the
`alternate_config` dictionary back to
the charm default and ignore the
`default_config` dictionary.
:type reset_to_charm_default: bool
"""
if not application_name:
application_name = self.application_name
# we need to compare config values to what is already applied before
# attempting to set them. otherwise the model will behave differently
# than we would expect while waiting for completion of the change
app_config = self.config_current(
application_name, keys=alternate_config.keys()
)
if all(item in app_config.items()
for item in alternate_config.items()):
logging.debug('alternate_config equals what is already applied '
'config')
yield
if default_config == alternate_config:
logging.debug('default_config also equals what is already '
'applied config')
return
logging.debug('alternate_config already set, and default_config '
'needs to be applied before return')
else:
logging.debug('Changing charm setting to {}'
.format(alternate_config))
model.set_application_config(
application_name,
self._stringed_value_config(alternate_config),
model_name=self.model_name)
logging.debug(
'Waiting for units to execute config-changed hook')
model.wait_for_agent_status(model_name=self.model_name)
logging.debug(
'Waiting for units to reach target states')
model.wait_for_application_states(
model_name=self.model_name,
states=self.test_config.get('target_deploy_status', {}))
# TODO: Optimize with a block on a specific application until idle.
model.block_until_all_units_idle()
yield
if reset_to_charm_default:
logging.debug('Resetting these charm configuration options to the '
'charm default: "{}"'
.format(alternate_config.keys()))
model.reset_application_config(application_name,
list(alternate_config.keys()),
model_name=self.model_name)
elif default_config == alternate_config:
logging.debug('default_config == alternate_config, not attempting '
' to restore configuration')
return
else:
logging.debug('Restoring charm setting to {}'
.format(default_config))
model.set_application_config(
application_name,
self._stringed_value_config(default_config),
model_name=self.model_name)
logging.debug(
'Waiting for units to execute config-changed hook')
model.wait_for_agent_status(model_name=self.model_name)
logging.debug(
'Waiting for units to reach target states')
model.wait_for_application_states(
model_name=self.model_name,
states=self.test_config.get('target_deploy_status', {}))
# TODO: Optimize with a block on a specific application until idle.
model.block_until_all_units_idle()
def restart_on_changed_debug_oslo_config_file(self, config_file, services,
config_section='DEFAULT'):
"""Check restart happens on config change by flipping debug mode.
Change debug mode and assert that change propagates to the correct
file and that services are restarted as a result. config_file must be
an oslo config file and debug option must be set in the
`config_section` section.
:param config_file: OSLO Config file to check for settings
:type config_file: str
:param services: Services expected to be restarted when config_file is
changed.
:type services: list
"""
# Expected default and alternate values
current_value = model.get_application_config(
self.application_name)['debug']['value']
new_value = str(not bool(current_value)).title()
current_value = str(current_value).title()
set_default = {'debug': current_value}
set_alternate = {'debug': new_value}
default_entry = {config_section: {'debug': [current_value]}}
alternate_entry = {config_section: {'debug': [new_value]}}
# Make config change, check for service restarts
logging.info(
'Changing settings on {} to {}'.format(
self.application_name, set_alternate))
self.restart_on_changed(
config_file,
set_default,
set_alternate,
default_entry,
alternate_entry,
services)
def restart_on_changed(self, config_file, default_config, alternate_config,
default_entry, alternate_entry, services,
pgrep_full=False):
"""Run restart on change tests.
Test that changing config results in config file being updated and
services restarted. Return config to default_config afterwards
:param config_file: Config file to check for settings
:type config_file: str
:param default_config: Dict of charm settings to set on completion
:type default_config: dict
:param alternate_config: Dict of charm settings to change to
:type alternate_config: dict
:param default_entry: Config file entries that correspond to
default_config
:type default_entry: dict
:param alternate_entry: Config file entries that correspond to
alternate_config
:type alternate_entry: dict
:param services: Services expected to be restarted when config_file is
changed.
:type services: list
:param pgrep_full: Should pgrep be used rather than pidof to identify
a service.
:type pgrep_full: bool
"""
# lead_unit is only useed to grab a timestamp, the assumption being
# that all the units times are in sync.
mtime = model.get_unit_time(
self.lead_unit,
model_name=self.model_name)
logging.debug('Remote unit timestamp {}'.format(mtime))
with self.config_change(default_config, alternate_config):
# If this is not an OSLO config file set default_config={}
if alternate_entry:
logging.debug(
'Waiting for updates to propagate to {}'
.format(config_file))
model.block_until_oslo_config_entries_match(
self.application_name,
config_file,
alternate_entry,
model_name=self.model_name)
else:
model.block_until_all_units_idle(model_name=self.model_name)
# Config update has occured and hooks are idle. Any services should
# have been restarted by now:
logging.debug(
'Waiting for services ({}) to be restarted'.format(services))
model.block_until_services_restarted(
self.application_name,
mtime,
services,
model_name=self.model_name,
pgrep_full=pgrep_full)
# If this is not an OSLO config file set default_config={}
if default_entry:
logging.debug(
'Waiting for updates to propagate to {}'.format(config_file))
model.block_until_oslo_config_entries_match(
self.application_name,
config_file,
default_entry,
model_name=self.model_name)
else:
model.block_until_all_units_idle(model_name=self.model_name)
@contextlib.contextmanager
def pause_resume(self, services, pgrep_full=False):
"""Run Pause and resume tests.
Pause and then resume a unit checking that services are in the
required state after each action
:param services: Services expected to be restarted when the unit is
paused/resumed.
:type services: list
:param pgrep_full: Should pgrep be used rather than pidof to identify
a service.
:type pgrep_full: bool
"""
model.block_until_service_status(
self.lead_unit,
services,
'running',
model_name=self.model_name,
pgrep_full=pgrep_full)
model.block_until_unit_wl_status(
self.lead_unit,
'active',
model_name=self.model_name)
generic_utils.assertActionRanOK(model.run_action(
self.lead_unit,
'pause',
model_name=self.model_name))
model.block_until_unit_wl_status(
self.lead_unit,
'maintenance',
model_name=self.model_name)
model.block_until_all_units_idle(model_name=self.model_name)
model.block_until_service_status(
self.lead_unit,
services,
'stopped',
model_name=self.model_name,
pgrep_full=pgrep_full)
yield
generic_utils.assertActionRanOK(model.run_action(
self.lead_unit,
'resume',
model_name=self.model_name))
model.block_until_unit_wl_status(
self.lead_unit,
'active',
model_name=self.model_name)
model.block_until_all_units_idle(model_name=self.model_name)
model.block_until_service_status(
self.lead_unit,
services,
'running',
model_name=self.model_name,
pgrep_full=pgrep_full)
def get_my_tests_options(self, key, default=None):
"""Retrieve tests_options for specific test.
Prefix for key is built from dot-notated absolute path to calling
method or function.
Example:
# In tests.yaml:
tests_options:
zaza.charm_tests.noop.tests.NoopTest.test_foo.key: true
# called from zaza.charm_tests.noop.tests.NoopTest.test_foo()
>>> get_my_tests_options('key')
True
:param key: Suffix for tests_options key.
:type key: str
:param default: Default value to return if key is not found.
:type default: any
:returns: Value associated with key in tests_options.
:rtype: any
"""
# note that we need to do this in-line otherwise we would get the path
# to ourself. I guess we could create a common method that would go two
# frames back, but that would be kind of useless for anyone else than
# this method.
caller_path = []
# get path to module
caller_path.append(sys.modules[
sys._getframe().f_back.f_globals['__name__']].__name__)
# attempt to get class name
try:
caller_path.append(
sys._getframe().f_back.f_locals['self'].__class__.__name__)
except KeyError:
pass
# get method or function name
caller_path.append(sys._getframe().f_back.f_code.co_name)
return self.test_config.get('tests_options', {}).get(
'.'.join(caller_path + [key]), default)
def get_applications_with_substring_in_name(self, substring):
"""Get applications with substring in name.
:param substring: String to search for in application names
:type substring: str
:returns: List of matching applictions
:rtype: List
"""
status = model.get_status().applications
applications = []
for application in status.keys():
if substring in application:
applications.append(application)
return applications
def run_update_status_hooks(self, units):
"""Run update status hooks on units.
:param units: List of unit names or unit.entity_id
:type units: List[str]
:returns: None
:rtype: None
"""
for unit in units:
model.run_on_unit(unit, "hooks/update-status")
def assert_unit_cpu_topology(self, unit, nr_1g_hugepages):
r"""Assert unit under test CPU topology.
When using OpenStack as CI substrate:
By default, when instance NUMA placement is not specified,
a topology of N sockets, each with one core and one thread,
is used for an instance, where N corresponds to the number of
instance vCPUs requested.
In this context a socket is a physical socket on the motherboard
where a CPU is connected.
The DPDK Environment Abstraction Layer (EAL) allocates memory per
CPU socket, so we want the CPU topology inside the instance to
mimic something we would be likely to find in the real world and
at the same time not make the test too heavy.
The charm default is to have Open vSwitch allocate 1GB RAM per
CPU socket.
The following command would set the apropriate CPU topology for a
4 VCPU, 8 GB RAM flavor:
openstack flavor set onesocketm1.large \
--property hw:cpu_sockets=1 \
--property hw:cpu_cores=2 \
--property hw:cpu_threads=2
For validation of operation with multiple sockets, the following
command would set the apropriate CPU topology for a
8 VCPU, 16GB RAM flavor:
openstack flavor set twosocketm1.xlarge \
--property hw:cpu_sockets=2 \
--property hw:cpu_cores=2 \
--property hw:cpu_threads=2 \
--property hw:numa_nodes=2
"""
# Get number of sockets
cmd = 'lscpu -p|grep -v ^#|cut -f3 -d,|sort|uniq|wc -l'
sockets = int(zaza.utilities.juju.remote_run(
unit.name, cmd, model_name=self.model_name, fatal=True).rstrip())
# Get total memory
cmd = 'cat /proc/meminfo |grep ^MemTotal'
_, meminfo_value, _ = zaza.utilities.juju.remote_run(
unit.name,
cmd,
model_name=self.model_name,
fatal=True).rstrip().split()
mbtotal = int(meminfo_value) * 1024 / 1000 / 1000
mbtotalhugepages = nr_1g_hugepages * 1024
# headroom for operating system and daemons in instance
mbsystemheadroom = 2048
# memory to be consumed by the nested instance
mbinstance = 1024
# the amount of hugepage memory OVS / DPDK EAL will allocate
mbovshugepages = sockets * 1024
# the amount of hugepage memory available for nested instance
mbfreehugepages = mbtotalhugepages - mbovshugepages
assert (mbtotal - mbtotalhugepages >= mbsystemheadroom and
mbfreehugepages >= mbinstance), (
'Unit {} is not suitable for test, please adjust instance '
'type CPU topology or provide suitable physical machine. '
'CPU Sockets: {} '
'Available memory: {} MB '
'Details:\n{}'
.format(unit.name,
sockets,
mbtotal,
self.assert_unit_cpu_topology.__doc__))
def enable_hugepages_vfio_on_hvs_in_vms(self, nr_1g_hugepages):
"""Enable hugepages and unsafe VFIO NOIOMMU on virtual hypervisors."""
for unit in model.get_units(
zaza.utilities.machine_os.get_hv_application(),
model_name=self.model_name):
if not zaza.utilities.machine_os.is_vm(unit.name,
model_name=self.model_name):
logging.info('Unit {} is a physical machine, assuming '
'hugepages and IOMMU configuration already '
'performed through kernel command line.')
continue
logging.info('Checking CPU topology on {}'.format(unit.name))
self.assert_unit_cpu_topology(unit, nr_1g_hugepages)
logging.info('Enabling hugepages on {}'.format(unit.name))
zaza.utilities.machine_os.enable_hugepages(
unit, nr_1g_hugepages, model_name=self.model_name)
logging.info('Enabling unsafe VFIO NOIOMMU mode on {}'
.format(unit.name))
zaza.utilities.machine_os.enable_vfio_unsafe_noiommu_mode(
unit, model_name=self.model_name)
def disable_hugepages_vfio_on_hvs_in_vms(self):
"""Disable hugepages and unsafe VFIO NOIOMMU on virtual hypervisors."""
for unit in model.get_units(
zaza.utilities.machine_os.get_hv_application(),
model_name=self.model_name):
if not zaza.utilities.machine_os.is_vm(unit.name,
model_name=self.model_name):
logging.info('Unit {} is a physical machine, assuming '
'hugepages and IOMMU configuration already '
'performed through kernel command line.')
continue
logging.info('Disabling hugepages on {}'.format(unit.name))
zaza.utilities.machine_os.disable_hugepages(
unit, model_name=self.model_name)
logging.info('Disabling unsafe VFIO NOIOMMU mode on {}'
.format(unit.name))
zaza.utilities.machine_os.disable_vfio_unsafe_noiommu_mode(
unit, model_name=self.model_name)
class OpenStackBaseTest(BaseCharmTest):
"""Generic helpers for testing OpenStack API charms."""
@classmethod
def setUpClass(cls, application_name=None, model_alias=None):
"""Run setup for test class to create common resources."""
super(OpenStackBaseTest, cls).setUpClass(application_name, model_alias)
cls.keystone_session = openstack_utils.get_overcloud_keystone_session(
model_name=cls.model_name)
cls.cacert = openstack_utils.get_cacert()
cls.nova_client = (
openstack_utils.get_nova_session_client(cls.keystone_session))
def resource_cleanup(self):
"""Remove test resources."""
try:
logging.info('Removing instances launched by test ({}*)'
.format(self.RESOURCE_PREFIX))
for server in self.nova_client.servers.list():
if server.name.startswith(self.RESOURCE_PREFIX):
openstack_utils.delete_resource(
self.nova_client.servers,
server.id,
msg="server")
except AssertionError as e:
# Resource failed to be removed within the expected time frame,
# log this fact and carry on.
logging.warning('Gave up waiting for resource cleanup: "{}"'
.format(str(e)))
except AttributeError:
# Test did not define self.RESOURCE_PREFIX, ignore.
pass
def launch_guest(self, guest_name, userdata=None, use_boot_volume=False,
instance_key=None, flavor_name=None,
attach_to_external_network=False):
"""Launch one guest to use in tests.
Note that it is up to the caller to have set the RESOURCE_PREFIX class
variable prior to calling this method.
Also note that this method will remove any already existing instance
with same name as what is requested.
:param guest_name: Name of instance
:type guest_name: str
:param userdata: Userdata to attach to instance
:type userdata: Optional[str]
:param use_boot_volume: Whether to boot guest from a shared volume.
:type use_boot_volume: boolean
:param instance_key: Key to collect associated config data with.
:type instance_key: Optional[str]
:param attach_to_external_network: Attach instance directly to external
network.
:type attach_to_external_network: bool
:returns: Nova instance objects
:rtype: Server
"""
instance_key = instance_key or glance_setup.LTS_IMAGE_NAME
instance_name = '{}-{}'.format(self.RESOURCE_PREFIX, guest_name)
for attempt in tenacity.Retrying(
stop=tenacity.stop_after_attempt(3),
wait=tenacity.wait_exponential(
multiplier=1, min=2, max=10)):
with attempt:
old_instance_with_same_name = self.retrieve_guest(
instance_name)
if old_instance_with_same_name:
logging.info(
'Removing already existing instance ({}) with '
'requested name ({})'
.format(old_instance_with_same_name.id, instance_name))
openstack_utils.delete_resource(
self.nova_client.servers,
old_instance_with_same_name.id,
msg="server")
return configure_guest.launch_instance(
instance_key,
vm_name=instance_name,
use_boot_volume=use_boot_volume,
userdata=userdata,
flavor_name=flavor_name,
attach_to_external_network=attach_to_external_network)
def launch_guests(self, userdata=None, attach_to_external_network=False,
flavor_name=None):
"""Launch two guests to use in tests.
Note that it is up to the caller to have set the RESOURCE_PREFIX class
variable prior to calling this method.
:param userdata: Userdata to attach to instance
:type userdata: Optional[str]
:param attach_to_external_network: Attach instance directly to external
network.
:type attach_to_external_network: bool
:returns: List of launched Nova instance objects
:rtype: List[Server]
"""
launched_instances = []
for guest_number in range(1, 2+1):
launched_instances.append(
self.launch_guest(
guest_name='ins-{}'.format(guest_number),
userdata=userdata,
attach_to_external_network=attach_to_external_network,
flavor_name=flavor_name))
return launched_instances
def retrieve_guest(self, guest_name):
"""Return guest matching name.
:param nova_client: Nova client to use when checking status
:type nova_client: Nova client
:returns: the matching guest
:rtype: Union[novaclient.Server, None]
"""
try:
return self.nova_client.servers.find(name=guest_name)
except novaclient.exceptions.NotFound:
return None
def retrieve_guests(self):
"""Return test guests.
Note that it is up to the caller to have set the RESOURCE_PREFIX class
variable prior to calling this method.
:param nova_client: Nova client to use when checking status
:type nova_client: Nova client
:returns: the matching guest
:rtype: Union[novaclient.Server, None]
"""
instance_1 = self.retrieve_guest(
'{}-ins-1'.format(self.RESOURCE_PREFIX))
instance_2 = self.retrieve_guest(
'{}-ins-1'.format(self.RESOURCE_PREFIX))
return instance_1, instance_2
class BaseDeferredRestartTest(BaseCharmTest):
"""Check deferred restarts.
Example of adding a deferred restart test::
class NeutronOVSDeferredRestartTest(
test_utils.BaseDeferredRestartTest):
@classmethod
def setUpClass(cls):
super().setUpClass(application_name='neutron-openvswitch')
def run_tests(self):
# Trigger a config change which triggers a deferred hook.
self.run_charm_change_hook_test('config-changed')
# Trigger a package change which requires a restart
self.run_package_change_test(
'openvswitch-switch',
'openvswitch-switch')
NOTE: The test has been broken into various class methods which may require
specialisation if the charm being tested is not a standard OpenStack
charm e.g. `trigger_deferred_hook_via_charm` if the charm is not
an oslo config or does not have a debug option.
"""
@classmethod
def setUpClass(cls, application_name):
"""Run test setup.
:param application_name: Name of application to run tests against.
:type application_name: str
"""
cls.application_name = application_name
super().setUpClass(application_name=cls.application_name)
def check_status_message_is_clear(self):
"""Check each units status message show no defeerred events."""
# Check workload status no longer shows deferred restarts.
for unit in model.get_units(self.application_name):
model.block_until_unit_wl_message_match(
unit.entity_id,
'Unit is ready')
model.block_until_all_units_idle()
def check_clear_restarts(self):
"""Clear and deferred restarts and check status.
Clear and deferred restarts and then check the workload status message
for each unit.
"""
# Use action to run any deferred restarts
for unit in model.get_units(self.application_name):
logging.info("Running restart-services on {}".format(
unit.entity_id))
model.run_action(
unit.entity_id,
'restart-services',
action_params={'deferred-only': True},
raise_on_failure=True)
# Check workload status no longer shows deferred restarts.
self.check_status_message_is_clear()
def clear_hooks(self):
"""Clear and deferred hooks.
Run any deferred hooks.
"""
# Use action to run any deferred restarts
for unit in model.get_units(self.application_name):
logging.info("Running run-deferred-hooks on {}".format(
unit.entity_id))
model.run_action(
unit.entity_id,
'run-deferred-hooks',
raise_on_failure=True)
def check_clear_hooks(self):
"""Clear deferred hooks and check status.
Clear deferred hooks and then check the workload status message
for each unit.
"""
self.clear_hooks()
# Check workload status no longer shows deferred restarts.
self.check_status_message_is_clear()
def run_show_deferred_events_action(self):
"""Run show-deferred-events and return results.
:returns: Data from action run
:rtype: Dict
"""
unit = model.get_units(self.application_name)[0]
action = model.run_action(
unit.entity_id,
'show-deferred-events',
raise_on_failure=True)
return yaml.safe_load(action.data['results']['output'])
def check_show_deferred_events_action_restart(self, test_service,
restart_reason):
"""Check the output from the action to list deferred restarts.
Run the action to list any deferred restarts and check it has entry for
the given service and reason.
:param test_service: Service that should need a restart
:type test_service: str
:param restart_reason: The reason the action should list for the
service needing to be restarted. This can be a
substring.
:type restart_reason: str
"""
# Ensure that the deferred restart and cause are listed via action
logging.info(
("Checking {} is marked as needing restart in "
"show-deferred-events action").format(
test_service))
for event in self.run_show_deferred_events_action()['restarts']:
logging.info("{} in {} and {} in {}".format(
test_service,
event,
restart_reason,
event))
if test_service in event and restart_reason in event:
break
else:
msg = 'No entry for restart of {} for reason {} found'.format(
test_service,
restart_reason)
raise Exception(msg)
def check_show_deferred_events_action_hook(self, hook):
"""Check the output from the action to list deferred eveents.