/
nova.py
2097 lines (1728 loc) · 82.3 KB
/
nova.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fixtures for Nova tests."""
import collections
import contextlib
from contextlib import contextmanager
import functools
from importlib.abc import MetaPathFinder
import logging as std_logging
import os
import sys
import time
from unittest import mock
import warnings
import eventlet
import fixtures
import futurist
from openstack import service_description
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import test_fixtures as db_fixtures
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_messaging import conffixture as messaging_conffixture
from oslo_privsep import daemon as privsep_daemon
from oslo_utils.fixture import uuidsentinel
from oslo_utils import strutils
from requests import adapters
from sqlalchemy import exc as sqla_exc
from wsgi_intercept import interceptor
from nova.api.openstack import wsgi_app
from nova.api import wsgi
from nova.compute import multi_cell_list
from nova.compute import rpcapi as compute_rpcapi
from nova import context
from nova.db.api import api as api_db_api
from nova.db.main import api as main_db_api
from nova.db import migration
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.objects import service as service_obj
import nova.privsep
from nova import quota as nova_quota
from nova import rpc
from nova.scheduler import weights
from nova import service
from nova.tests.functional.api import client
from nova import utils
from nova.virt import node
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
DB_SCHEMA = collections.defaultdict(str)
PROJECT_ID = '6f70656e737461636b20342065766572'
class ServiceFixture(fixtures.Fixture):
"""Run a service as a test fixture."""
def __init__(self, name, host=None, cell=None, **kwargs):
name = name
# If not otherwise specified, the host will default to the
# name of the service. Some things like aggregates care that
# this is stable.
host = host or name
kwargs.setdefault('host', host)
kwargs.setdefault('binary', 'nova-%s' % name)
self.cell = cell
self.kwargs = kwargs
def setUp(self):
super(ServiceFixture, self).setUp()
self.ctxt = context.get_admin_context()
if self.cell:
context.set_target_cell(self.ctxt, self.cell)
with mock.patch('nova.context.get_admin_context',
return_value=self.ctxt):
self.service = service.Service.create(**self.kwargs)
self.service.start()
self.addCleanup(self.service.kill)
class NullHandler(std_logging.Handler):
"""custom default NullHandler to attempt to format the record.
Used in conjunction with
log_fixture.get_logging_handle_error_fixture to detect formatting errors in
debug level logs without saving the logs.
"""
def handle(self, record):
self.format(record)
def emit(self, record):
pass
def createLock(self):
self.lock = None
class StandardLogging(fixtures.Fixture):
"""Setup Logging redirection for tests.
There are a number of things we want to handle with logging in tests:
* Redirect the logging to somewhere that we can test or dump it later.
* Ensure that as many DEBUG messages as possible are actually
executed, to ensure they are actually syntactically valid (they
often have not been).
* Ensure that we create useful output for tests that doesn't
overwhelm the testing system (which means we can't capture the
100 MB of debug logging on every run).
To do this we create a logger fixture at the root level, which
defaults to INFO and create a Null Logger at DEBUG which lets
us execute log messages at DEBUG but not keep the output.
To support local debugging OS_DEBUG=True can be set in the
environment, which will print out the full debug logging.
There are also a set of overrides for particularly verbose
modules to be even less than INFO.
"""
def setUp(self):
super(StandardLogging, self).setUp()
# set root logger to debug
root = std_logging.getLogger()
root.setLevel(std_logging.DEBUG)
# supports collecting debug level for local runs
if os.environ.get('OS_DEBUG') in ('True', 'true', '1', 'yes'):
level = std_logging.DEBUG
else:
level = std_logging.INFO
# Collect logs
fs = '%(asctime)s %(levelname)s [%(name)s] %(message)s'
self.logger = self.useFixture(
fixtures.FakeLogger(format=fs, level=None))
# TODO(sdague): why can't we send level through the fake
# logger? Tests prove that it breaks, but it's worth getting
# to the bottom of.
root.handlers[0].setLevel(level)
if level > std_logging.DEBUG:
# Just attempt to format debug level logs, but don't save them
handler = NullHandler()
self.useFixture(fixtures.LogHandler(handler, nuke_handlers=False))
handler.setLevel(std_logging.DEBUG)
# Don't log every single DB migration step
std_logging.getLogger(
'migrate.versioning.api').setLevel(std_logging.WARNING)
# Or alembic for model comparisons.
std_logging.getLogger('alembic').setLevel(std_logging.WARNING)
# Or oslo_db provisioning steps
std_logging.getLogger('oslo_db.sqlalchemy').setLevel(
std_logging.WARNING)
# At times we end up calling back into main() functions in
# testing. This has the possibility of calling logging.setup
# again, which completely unwinds the logging capture we've
# created here. Once we've setup the logging the way we want,
# disable the ability for the test to change this.
def fake_logging_setup(*args):
pass
self.useFixture(
fixtures.MonkeyPatch('oslo_log.log.setup', fake_logging_setup))
def delete_stored_logs(self):
# NOTE(gibi): this depends on the internals of the fixtures.FakeLogger.
# This could be enhanced once the PR
# https://github.com/testing-cabal/fixtures/pull/42 merges
self.logger._output.truncate(0)
class DatabasePoisonFixture(fixtures.Fixture):
def setUp(self):
super(DatabasePoisonFixture, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'oslo_db.sqlalchemy.enginefacade._TransactionFactory.'
'_create_session',
self._poison_configure))
# NOTE(gibi): not just _create_session indicates a manipulation on the
# DB but actually any operation that actually initializes (starts) a
# transaction factory. If a test does this without using the Database
# fixture then that test i) actually a database test and should declare
# it so ii) actually manipulates a global state without proper cleanup
# and test isolation. This could lead that later tests are failing with
# the error: oslo_db.sqlalchemy.enginefacade.AlreadyStartedError: this
# TransactionFactory is already started
self.useFixture(fixtures.MonkeyPatch(
'oslo_db.sqlalchemy.enginefacade._TransactionFactory._start',
self._poison_configure))
def _poison_configure(self, *a, **k):
# If you encounter this error, you might be tempted to just not
# inherit from NoDBTestCase. Bug #1568414 fixed a few hundred of these
# errors, and not once was that the correct solution. Instead,
# consider some of the following tips (when applicable):
#
# - mock at the object layer rather than the db layer, for example:
# nova.objects.instance.Instance.get
# vs.
# nova.db.instance_get
#
# - mock at the api layer rather than the object layer, for example:
# nova.api.openstack.common.get_instance
# vs.
# nova.objects.instance.Instance.get
#
# - mock code that requires the database but is otherwise tangential
# to the code you're testing (for example: EventReporterStub)
#
# - peruse some of the other database poison warning fixes here:
# https://review.opendev.org/#/q/topic:bug/1568414
raise Exception('This test uses methods that set internal oslo_db '
'state, but it does not claim to use the database. '
'This will conflict with the setup of tests that '
'do use the database and cause failures later.')
class SingleCellSimple(fixtures.Fixture):
"""Setup the simplest cells environment possible
This should be used when you do not care about multiple cells,
or having a "real" environment for tests that should not care.
This will give you a single cell, and map any and all accesses
to that cell (even things that would go to cell0).
If you need to distinguish between cell0 and cellN, then you
should use the CellDatabases fixture.
If instances should appear to still be in scheduling state, pass
instances_created=False to init.
"""
def __init__(
self, instances_created=True, project_id=PROJECT_ID,
):
self.instances_created = instances_created
self.project_id = project_id
def setUp(self):
super(SingleCellSimple, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.CellMappingList._get_all_from_db',
self._fake_cell_list))
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.CellMappingList._get_by_project_id_from_db',
self._fake_cell_list))
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.CellMapping._get_by_uuid_from_db',
self._fake_cell_get))
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.HostMapping._get_by_host_from_db',
self._fake_hostmapping_get))
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.InstanceMapping._get_by_instance_uuid_from_db',
self._fake_instancemapping_get))
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.InstanceMappingList._get_by_instance_uuids_from_db',
self._fake_instancemapping_get_uuids))
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.InstanceMapping._save_in_db',
self._fake_instancemapping_get_save))
self.useFixture(fixtures.MonkeyPatch(
'nova.context.target_cell',
self._fake_target_cell))
self.useFixture(fixtures.MonkeyPatch(
'nova.context.set_target_cell',
self._fake_set_target_cell))
def _fake_hostmapping_get(self, *args):
return {'id': 1,
'updated_at': None,
'created_at': None,
'host': 'host1',
'cell_mapping': self._fake_cell_list()[0]}
def _fake_instancemapping_get_common(self, instance_uuid):
return {
'id': 1,
'updated_at': None,
'created_at': None,
'instance_uuid': instance_uuid,
'cell_id': (self.instances_created and 1 or None),
'project_id': self.project_id,
'cell_mapping': (
self.instances_created and self._fake_cell_get() or None),
}
def _fake_instancemapping_get_save(self, *args):
return self._fake_instancemapping_get_common(args[-2])
def _fake_instancemapping_get(self, *args):
return self._fake_instancemapping_get_common(args[-1])
def _fake_instancemapping_get_uuids(self, *args):
return [self._fake_instancemapping_get(uuid)
for uuid in args[-1]]
def _fake_cell_get(self, *args):
return self._fake_cell_list()[0]
def _fake_cell_list(self, *args):
return [{'id': 1,
'updated_at': None,
'created_at': None,
'uuid': uuidsentinel.cell1,
'name': 'onlycell',
'transport_url': 'fake://nowhere/',
'database_connection': 'sqlite:///',
'disabled': False}]
@contextmanager
def _fake_target_cell(self, context, target_cell):
# Just do something simple and set/unset the cell_uuid on the context.
if target_cell:
context.cell_uuid = getattr(target_cell, 'uuid',
uuidsentinel.cell1)
else:
context.cell_uuid = None
yield context
def _fake_set_target_cell(self, context, cell_mapping):
# Just do something simple and set/unset the cell_uuid on the context.
if cell_mapping:
context.cell_uuid = getattr(cell_mapping, 'uuid',
uuidsentinel.cell1)
else:
context.cell_uuid = None
class CheatingSerializer(rpc.RequestContextSerializer):
"""A messaging.RequestContextSerializer that helps with cells.
Our normal serializer does not pass in the context like db_connection
and mq_connection, for good reason. We don't really want/need to
force a remote RPC server to use our values for this. However,
during unit and functional tests, since we're all in the same
process, we want cell-targeted RPC calls to preserve these values.
Unless we had per-service config and database layer state for
the fake services we start, this is a reasonable cheat.
"""
def serialize_context(self, context):
"""Serialize context with the db_connection inside."""
values = super(CheatingSerializer, self).serialize_context(context)
values['db_connection'] = context.db_connection
values['mq_connection'] = context.mq_connection
return values
def deserialize_context(self, values):
"""Deserialize context and honor db_connection if present."""
ctxt = super(CheatingSerializer, self).deserialize_context(values)
ctxt.db_connection = values.pop('db_connection', None)
ctxt.mq_connection = values.pop('mq_connection', None)
return ctxt
class CellDatabases(fixtures.Fixture):
"""Create per-cell databases for testing.
How to use::
fix = CellDatabases()
fix.add_cell_database('connection1')
fix.add_cell_database('connection2', default=True)
self.useFixture(fix)
Passing default=True tells the fixture which database should
be given to code that doesn't target a specific cell.
"""
def __init__(self):
self._ctxt_mgrs = {}
self._last_ctxt_mgr = None
self._default_ctxt_mgr = None
# NOTE(danms): Use a ReaderWriterLock to synchronize our
# global database muckery here. If we change global db state
# to point to a cell, we need to take an exclusive lock to
# prevent any other calls to get_context_manager() until we
# reset to the default.
self._cell_lock = ReaderWriterLock()
def _cache_schema(self, connection_str):
# NOTE(melwitt): See the regular Database fixture for why
# we do this.
global DB_SCHEMA
if not DB_SCHEMA[('main', None)]:
ctxt_mgr = self._ctxt_mgrs[connection_str]
engine = ctxt_mgr.writer.get_engine()
conn = engine.connect()
migration.db_sync(database='main')
DB_SCHEMA[('main', None)] = "".join(line for line
in conn.connection.iterdump())
engine.dispose()
@contextmanager
def _wrap_target_cell(self, context, cell_mapping):
# NOTE(danms): This method is responsible for switching global
# database state in a safe way such that code that doesn't
# know anything about cell targeting (i.e. compute node code)
# can continue to operate when called from something that has
# targeted a specific cell. In order to make this safe from a
# dining-philosopher-style deadlock, we need to be able to
# support multiple threads talking to the same cell at the
# same time and potentially recursion within the same thread
# from code that would otherwise be running on separate nodes
# in real life, but where we're actually recursing in the
# tests.
#
# The basic logic here is:
# 1. Grab a reader lock to see if the state is already pointing at
# the cell we want. If it is, we can yield and return without
# altering the global state further. The read lock ensures that
# global state won't change underneath us, and multiple threads
# can be working at the same time, as long as they are looking
# for the same cell.
# 2. If we do need to change the global state, grab a writer lock
# to make that change, which assumes that nothing else is looking
# at a cell right now. We do only non-schedulable things while
# holding that lock to avoid the deadlock mentioned above.
# 3. We then re-lock with a reader lock just as step #1 above and
# yield to do the actual work. We can do schedulable things
# here and not exclude other threads from making progress.
# If an exception is raised, we capture that and save it.
# Note that it is possible that another thread has changed the
# global state (step #2) after we released the writer lock but
# before we acquired the reader lock. If this happens, we will
# detect the global state change and retry step #2 a limited number
# of times. If we happen to race repeatedly with another thread and
# exceed our retry limit, we will give up and raise a RuntimeError,
# which will fail the test.
# 4. If we changed state in #2, we need to change it back. So we grab
# a writer lock again and do that.
# 5. Finally, if an exception was raised in #3 while state was
# changed, we raise it to the caller.
if cell_mapping:
desired = self._ctxt_mgrs[cell_mapping.database_connection]
else:
desired = self._default_ctxt_mgr
with self._cell_lock.read_lock():
if self._last_ctxt_mgr == desired:
with self._real_target_cell(context, cell_mapping) as c:
yield c
return
raised_exc = None
def set_last_ctxt_mgr():
with self._cell_lock.write_lock():
if cell_mapping is not None:
# This assumes the next local DB access is the same cell
# that was targeted last time.
self._last_ctxt_mgr = desired
# Set last context manager to the desired cell's context manager.
set_last_ctxt_mgr()
# Retry setting the last context manager if we detect that a writer
# changed global DB state before we take the read lock.
for retry_time in range(0, 3):
try:
with self._cell_lock.read_lock():
if self._last_ctxt_mgr != desired:
# NOTE(danms): This is unlikely to happen, but it's
# possible another waiting writer changed the state
# between us letting it go and re-acquiring as a
# reader. If lockutils supported upgrading and
# downgrading locks, this wouldn't be a problem.
# Regardless, assert that it is still as we left it
# here so we don't hit the wrong cell. If this becomes
# a problem, we just need to retry the write section
# above until we land here with the cell we want.
raise RuntimeError(
'Global DB state changed underneath us')
try:
with self._real_target_cell(
context, cell_mapping
) as ccontext:
yield ccontext
except Exception as exc:
raised_exc = exc
# Leave the retry loop after calling target_cell
break
except RuntimeError:
# Give other threads a chance to make progress, increasing the
# wait time between attempts.
time.sleep(retry_time)
set_last_ctxt_mgr()
with self._cell_lock.write_lock():
# Once we have returned from the context, we need
# to restore the default context manager for any
# subsequent calls
self._last_ctxt_mgr = self._default_ctxt_mgr
if raised_exc:
raise raised_exc
def _wrap_create_context_manager(self, connection=None):
ctxt_mgr = self._ctxt_mgrs[connection]
return ctxt_mgr
def _wrap_get_context_manager(self, context):
try:
# If already targeted, we can proceed without a lock
if context.db_connection:
return context.db_connection
except AttributeError:
# Unit tests with None, FakeContext, etc
pass
# NOTE(melwitt): This is a hack to try to deal with
# local accesses i.e. non target_cell accesses.
with self._cell_lock.read_lock():
# FIXME(mriedem): This is actually misleading and means we don't
# catch things like bug 1717000 where a context should be targeted
# to a cell but it's not, and the fixture here just returns the
# last targeted context that was used.
return self._last_ctxt_mgr
def _wrap_get_server(self, target, endpoints, serializer=None):
"""Mirror rpc.get_server() but with our special sauce."""
serializer = CheatingSerializer(serializer)
return messaging.get_rpc_server(rpc.TRANSPORT,
target,
endpoints,
executor='eventlet',
serializer=serializer)
def _wrap_get_client(self, target, version_cap=None, serializer=None,
call_monitor_timeout=None):
"""Mirror rpc.get_client() but with our special sauce."""
serializer = CheatingSerializer(serializer)
return messaging.get_rpc_client(rpc.TRANSPORT, target,
version_cap=version_cap,
serializer=serializer,
call_monitor_timeout=call_monitor_timeout)
def add_cell_database(self, connection_str, default=False):
"""Add a cell database to the fixture.
:param connection_str: An identifier used to represent the connection
string for this database. It should match the database_connection field
in the corresponding CellMapping.
"""
# NOTE(danms): Create a new context manager for the cell, which
# will house the sqlite:// connection for this cell's in-memory
# database. Store/index it by the connection string, which is
# how we identify cells in CellMapping.
ctxt_mgr = main_db_api.create_context_manager()
self._ctxt_mgrs[connection_str] = ctxt_mgr
# NOTE(melwitt): The first DB access through service start is
# local so this initializes _last_ctxt_mgr for that and needs
# to be a compute cell.
self._last_ctxt_mgr = ctxt_mgr
# NOTE(danms): Record which context manager should be the default
# so we can restore it when we return from target-cell contexts.
# If none has been provided yet, store the current one in case
# no default is ever specified.
if self._default_ctxt_mgr is None or default:
self._default_ctxt_mgr = ctxt_mgr
def get_context_manager(context):
return ctxt_mgr
# NOTE(danms): This is a temporary MonkeyPatch just to get
# a new database created with the schema we need and the
# context manager for it stashed.
with fixtures.MonkeyPatch(
'nova.db.main.api.get_context_manager',
get_context_manager,
):
engine = ctxt_mgr.writer.get_engine()
engine.dispose()
self._cache_schema(connection_str)
conn = engine.connect()
conn.connection.executescript(DB_SCHEMA[('main', None)])
def setUp(self):
super(CellDatabases, self).setUp()
self.addCleanup(self.cleanup)
self._real_target_cell = context.target_cell
# NOTE(danms): These context managers are in place for the
# duration of the test (unlike the temporary ones above) and
# provide the actual "runtime" switching of connections for us.
self.useFixture(fixtures.MonkeyPatch(
'nova.db.main.api.create_context_manager',
self._wrap_create_context_manager))
self.useFixture(fixtures.MonkeyPatch(
'nova.db.main.api.get_context_manager',
self._wrap_get_context_manager))
self.useFixture(fixtures.MonkeyPatch(
'nova.context.target_cell',
self._wrap_target_cell))
self.useFixture(fixtures.MonkeyPatch(
'nova.rpc.get_server',
self._wrap_get_server))
self.useFixture(fixtures.MonkeyPatch(
'nova.rpc.get_client',
self._wrap_get_client))
def cleanup(self):
for ctxt_mgr in self._ctxt_mgrs.values():
engine = ctxt_mgr.writer.get_engine()
engine.dispose()
class Database(fixtures.Fixture):
# TODO(stephenfin): The 'version' argument is unused and can be removed
def __init__(self, database='main', version=None, connection=None):
"""Create a database fixture.
:param database: The type of database, 'main', or 'api'
:param connection: The connection string to use
"""
super().__init__()
assert database in {'main', 'api'}, f'Unrecognized database {database}'
if database == 'api':
assert connection is None, 'Not supported for the API database'
self.database = database
self.version = version
self.connection = connection
def setUp(self):
super().setUp()
if self.database == 'main':
if self.connection is not None:
ctxt_mgr = main_db_api.create_context_manager(
connection=self.connection)
self.get_engine = ctxt_mgr.writer.get_engine
else:
# NOTE(gibi): this injects a new factory for each test and
# cleans it up at then end of the test case. This way we can
# let each test configure the factory so we can avoid having a
# global flag guarding against factory re-configuration
new_engine = enginefacade.transaction_context()
self.useFixture(
db_fixtures.ReplaceEngineFacadeFixture(
main_db_api.context_manager, new_engine))
main_db_api.configure(CONF)
self.get_engine = main_db_api.get_engine
elif self.database == 'api':
# NOTE(gibi): similar note applies here as for the main_db_api
# above
new_engine = enginefacade.transaction_context()
self.useFixture(
db_fixtures.ReplaceEngineFacadeFixture(
api_db_api.context_manager, new_engine))
api_db_api.configure(CONF)
self.get_engine = api_db_api.get_engine
self._apply_schema()
self.addCleanup(self.cleanup)
def _apply_schema(self):
global DB_SCHEMA
if not DB_SCHEMA[(self.database, self.version)]:
# apply and cache schema
engine = self.get_engine()
conn = engine.connect()
migration.db_sync(database=self.database, version=self.version)
DB_SCHEMA[(self.database, self.version)] = "".join(
line for line in conn.connection.iterdump())
else:
# apply the cached schema
engine = self.get_engine()
conn = engine.connect()
conn.connection.executescript(
DB_SCHEMA[(self.database, self.version)])
def cleanup(self):
engine = self.get_engine()
engine.dispose()
class DefaultFlavorsFixture(fixtures.Fixture):
def setUp(self):
super(DefaultFlavorsFixture, self).setUp()
ctxt = context.get_admin_context()
defaults = {'rxtx_factor': 1.0, 'disabled': False, 'is_public': True,
'ephemeral_gb': 0, 'swap': 0}
extra_specs = {
"hw:numa_nodes": "1"
}
default_flavors = [
objects.Flavor(context=ctxt, memory_mb=512, vcpus=1,
root_gb=1, flavorid='1', name='m1.tiny',
**defaults),
objects.Flavor(context=ctxt, memory_mb=2048, vcpus=1,
root_gb=20, flavorid='2', name='m1.small',
**defaults),
objects.Flavor(context=ctxt, memory_mb=4096, vcpus=2,
root_gb=40, flavorid='3', name='m1.medium',
**defaults),
objects.Flavor(context=ctxt, memory_mb=8192, vcpus=4,
root_gb=80, flavorid='4', name='m1.large',
**defaults),
objects.Flavor(context=ctxt, memory_mb=16384, vcpus=8,
root_gb=160, flavorid='5', name='m1.xlarge',
**defaults),
objects.Flavor(context=ctxt, memory_mb=512, vcpus=1,
root_gb=1, flavorid='6', name='m1.tiny.specs',
extra_specs=extra_specs, **defaults),
]
for flavor in default_flavors:
flavor.create()
class RPCFixture(fixtures.Fixture):
def __init__(self, *exmods):
super(RPCFixture, self).__init__()
self.exmods = []
self.exmods.extend(exmods)
self._buses = {}
def _fake_create_transport(self, url):
# FIXME(danms): Right now, collapse all connections
# to a single bus. This is how our tests expect things
# to work. When the tests are fixed, this fixture can
# support simulating multiple independent buses, and this
# hack should be removed.
url = None
# NOTE(danms): This will be called with a non-None url by
# cells-aware code that is requesting to contact something on
# one of the many transports we're multplexing here.
if url not in self._buses:
exmods = rpc.get_allowed_exmods()
self._buses[url] = messaging.get_rpc_transport(
CONF,
url=url,
allowed_remote_exmods=exmods)
return self._buses[url]
def setUp(self):
super(RPCFixture, self).setUp()
self.addCleanup(rpc.cleanup)
rpc.add_extra_exmods(*self.exmods)
self.addCleanup(rpc.clear_extra_exmods)
self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
self.messaging_conf.transport_url = 'fake:/'
self.useFixture(self.messaging_conf)
self.useFixture(fixtures.MonkeyPatch(
'nova.rpc.create_transport', self._fake_create_transport))
# NOTE(danms): Execute the init with get_transport_url() as None,
# instead of the parsed TransportURL(None) so that we can cache
# it as it will be called later if the default is requested by
# one of our mq-switching methods.
with mock.patch('nova.rpc.get_transport_url') as mock_gtu:
mock_gtu.return_value = None
rpc.init(CONF)
def cleanup_in_flight_rpc_messages():
messaging._drivers.impl_fake.FakeExchangeManager._exchanges = {}
self.addCleanup(cleanup_in_flight_rpc_messages)
class WarningsFixture(fixtures.Fixture):
"""Filters out warnings during test runs."""
def setUp(self):
super().setUp()
self._original_warning_filters = warnings.filters[:]
# NOTE(sdague): Make deprecation warnings only happen once. Otherwise
# this gets kind of crazy given the way that upstream python libs use
# this.
warnings.simplefilter("once", DeprecationWarning)
# NOTE(sdague): this remains an unresolved item around the way
# forward on is_admin, the deprecation is definitely really premature.
warnings.filterwarnings(
'ignore',
message=(
'Policy enforcement is depending on the value of is_admin. '
'This key is deprecated. Please update your policy '
'file to use the standard policy values.'
),
)
# NOTE(mriedem): Ignore scope check UserWarnings from oslo.policy.
warnings.filterwarnings(
'ignore',
message="Policy .* failed scope check",
category=UserWarning,
)
# NOTE(gibi): The UUIDFields emits a warning if the value is not a
# valid UUID. Let's escalate that to an exception in the test to
# prevent adding violations.
warnings.filterwarnings('error', message=".*invalid UUID.*")
# NOTE(mriedem): Avoid adding anything which tries to convert an
# object to a primitive which jsonutils.to_primitive() does not know
# how to handle (or isn't given a fallback callback).
warnings.filterwarnings(
'error',
message=(
'Cannot convert <oslo_db.sqlalchemy.enginefacade._Default '
'object at '
),
category=UserWarning,
)
# Enable deprecation warnings for nova itself to capture upcoming
# SQLAlchemy changes
warnings.filterwarnings(
'ignore',
category=sqla_exc.SADeprecationWarning,
)
warnings.filterwarnings(
'error',
module='nova',
category=sqla_exc.SADeprecationWarning,
)
# Enable general SQLAlchemy warnings also to ensure we're not doing
# silly stuff. It's possible that we'll need to filter things out here
# with future SQLAlchemy versions, but that's a good thing
warnings.filterwarnings(
'error',
module='nova',
category=sqla_exc.SAWarning,
)
self.addCleanup(self._reset_warning_filters)
def _reset_warning_filters(self):
warnings.filters[:] = self._original_warning_filters
class ConfPatcher(fixtures.Fixture):
"""Fixture to patch and restore global CONF.
This also resets overrides for everything that is patched during
it's teardown.
"""
def __init__(self, **kwargs):
"""Constructor
:params group: if specified all config options apply to that group.
:params **kwargs: the rest of the kwargs are processed as a
set of key/value pairs to be set as configuration override.
"""
super(ConfPatcher, self).__init__()
self.group = kwargs.pop('group', None)
self.args = kwargs
def setUp(self):
super(ConfPatcher, self).setUp()
for k, v in self.args.items():
self.addCleanup(CONF.clear_override, k, self.group)
CONF.set_override(k, v, self.group)
class OSAPIFixture(fixtures.Fixture):
"""Create an OS API server as a fixture.
This spawns an OS API server as a fixture in a new greenthread in
the current test. The fixture has a .api parameter with is a
simple rest client that can communicate with it.
This fixture is extremely useful for testing REST responses
through the WSGI stack easily in functional tests.
Usage:
api = self.useFixture(fixtures.OSAPIFixture()).api
resp = api.api_request('/someurl')
self.assertEqual(200, resp.status_code)
resp = api.api_request('/otherurl', method='POST', body='{foo}')
The resp is a requests library response. Common attributes that
you'll want to use are:
- resp.status_code - integer HTTP status code returned by the request
- resp.content - the body of the response
- resp.headers - dictionary of HTTP headers returned
This fixture also has the following clients with various differences:
self.admin_api - Project user with is_admin=True and the "admin" role
self.reader_api - Project user with only the "reader" role
self.other_api - Project user with only the "other" role
"""
def __init__(
self, api_version='v2', project_id=PROJECT_ID,
use_project_id_in_urls=False, stub_keystone=True,
):
"""Constructor
:param api_version: the API version that we're interested in
using. Currently this expects 'v2' or 'v2.1' as possible
options.
:param project_id: the project id to use on the API.
:param use_project_id_in_urls: If True, act like the "endpoint" in the
"service catalog" has the legacy format including the project_id.
:param stub_keystone: If True, stub keystonemiddleware and
NovaKeystoneContext to simulate (but not perform) real auth.
"""
super(OSAPIFixture, self).__init__()
self.api_version = api_version
self.project_id = project_id
self.use_project_id_in_urls = use_project_id_in_urls
self.stub_keystone = stub_keystone
def setUp(self):
super(OSAPIFixture, self).setUp()
# A unique hostname for the wsgi-intercept.
hostname = uuidsentinel.osapi_host
port = 80
service_name = 'osapi_compute'
endpoint = 'http://%s:%s/' % (hostname, port)
conf_overrides = {
'osapi_compute_listen': hostname,
'osapi_compute_listen_port': port,
'debug': True,
}
self.useFixture(ConfPatcher(**conf_overrides))
if self.stub_keystone:
self._stub_keystone()
# Turn off manipulation of socket_options in TCPKeepAliveAdapter
# to keep wsgi-intercept happy. Replace it with the method
# from its superclass.
self.useFixture(fixtures.MonkeyPatch(
'keystoneauth1.session.TCPKeepAliveAdapter.init_poolmanager',
adapters.HTTPAdapter.init_poolmanager))
loader = wsgi.Loader().load_app(service_name)
app = lambda: loader
# reuse service setup code from wsgi_app to register
# service, which is looked for in some tests
wsgi_app._setup_service(CONF.host, service_name)
intercept = interceptor.RequestsInterceptor(app, url=endpoint)
intercept.install_intercept()
self.addCleanup(intercept.uninstall_intercept)
base_url = 'http://%(host)s:%(port)s/%(api_version)s' % ({