/
sqlalchemy.py
1430 lines (1259 loc) · 58.5 KB
/
sqlalchemy.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# -*- encoding: utf-8 -*-
#
# Copyright © 2014-2015 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import datetime
import itertools
import operator
import os.path
import threading
import uuid
from alembic import migration
from alembic import operations
import daiquiri
import oslo_db.api
from oslo_db import exception
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import utils as oslo_db_utils
try:
import psycopg2
except ImportError:
psycopg2 = None
try:
import pymysql.constants.ER
import pymysql.err
except ImportError:
pymysql = None
import six
from six.moves.urllib import parse as urlparse
import sqlalchemy
from sqlalchemy.engine import url as sqlalchemy_url
import sqlalchemy.exc
from sqlalchemy import types as sa_types
import sqlalchemy_utils
from gnocchi import exceptions
from gnocchi import indexer
from gnocchi.indexer import sqlalchemy_base as base
from gnocchi.indexer import sqlalchemy_types as types
from gnocchi import resource_type
from gnocchi import utils
Base = base.Base
Metric = base.Metric
ArchivePolicy = base.ArchivePolicy
ArchivePolicyRule = base.ArchivePolicyRule
Resource = base.Resource
ResourceHistory = base.ResourceHistory
ResourceType = base.ResourceType
_marker = indexer._marker
LOG = daiquiri.getLogger(__name__)
def _retry_on_exceptions(exc):
if not isinstance(exc, exception.DBError):
return False
inn_e = exc.inner_exception
if not isinstance(inn_e, sqlalchemy.exc.InternalError):
return False
return ((
pymysql and
isinstance(inn_e.orig, pymysql.err.InternalError) and
(inn_e.orig.args[0] == pymysql.constants.ER.TABLE_DEF_CHANGED)
) or (
# HACK(jd) Sometimes, PostgreSQL raises an error such as "current
# transaction is aborted, commands ignored until end of transaction
# block" on its own catalog, so we need to retry, but this is not
# caught by oslo.db as a deadlock. This is likely because when we use
# Base.metadata.create_all(), sqlalchemy itself gets an error it does
# not catch or something. So this is why this function exists. To
# paperover I guess.
psycopg2
and isinstance(inn_e.orig, psycopg2.InternalError)
# current transaction is aborted
and inn_e.orig.pgcode == '25P02'
))
def retry_on_deadlock(f):
return oslo_db.api.wrap_db_retry(retry_on_deadlock=True,
max_retries=20,
retry_interval=0.1,
max_retry_interval=2,
exception_checker=_retry_on_exceptions)(f)
class PerInstanceFacade(object):
def __init__(self, conf):
self.trans = enginefacade.transaction_context()
self.trans.configure(
**dict(conf.database.items())
)
self._context = threading.local()
def independent_writer(self):
return self.trans.independent.writer.using(self._context)
def independent_reader(self):
return self.trans.independent.reader.using(self._context)
def writer_connection(self):
return self.trans.connection.writer.using(self._context)
def reader_connection(self):
return self.trans.connection.reader.using(self._context)
def writer(self):
return self.trans.writer.using(self._context)
def reader(self):
return self.trans.reader.using(self._context)
def get_engine(self):
# TODO(mbayer): add get_engine() to enginefacade
if not self.trans._factory._started:
self.trans._factory._start()
return self.trans._factory._writer_engine
def dispose(self):
# TODO(mbayer): add dispose() to enginefacade
if self.trans._factory._started:
self.trans._factory._writer_engine.dispose()
class ResourceClassMapper(object):
def __init__(self):
# FIXME(sileht): 3 attributes, perhaps we need a better structure.
self._cache = {'generic': {'resource': base.Resource,
'history': base.ResourceHistory,
'updated_at': utils.utcnow()}}
@staticmethod
def _build_class_mappers(resource_type, baseclass=None):
tablename = resource_type.tablename
tables_args = {"extend_existing": True}
tables_args.update(base.COMMON_TABLES_ARGS)
# TODO(sileht): Add columns
if not baseclass:
baseclass = resource_type.to_baseclass()
resource_ext = type(
str("%s_resource" % tablename),
(baseclass, base.ResourceExtMixin, base.Resource),
{"__tablename__": tablename, "__table_args__": tables_args})
resource_history_ext = type(
str("%s_history" % tablename),
(baseclass, base.ResourceHistoryExtMixin, base.ResourceHistory),
{"__tablename__": ("%s_history" % tablename),
"__table_args__": tables_args})
return {'resource': resource_ext,
'history': resource_history_ext,
'updated_at': resource_type.updated_at}
def get_classes(self, resource_type):
# NOTE(sileht): We don't care about concurrency here because we allow
# sqlalchemy to override its global object with extend_existing=True
# this is safe because classname and tablename are uuid.
try:
mappers = self._cache[resource_type.tablename]
# Cache is outdated
if (resource_type.name != "generic"
and resource_type.updated_at > mappers['updated_at']):
for table_purpose in ['resource', 'history']:
Base.metadata.remove(Base.metadata.tables[
mappers[table_purpose].__tablename__])
del self._cache[resource_type.tablename]
raise KeyError
return mappers
except KeyError:
mapper = self._build_class_mappers(resource_type)
self._cache[resource_type.tablename] = mapper
return mapper
@retry_on_deadlock
def map_and_create_tables(self, resource_type, facade):
if resource_type.state != "creating":
raise RuntimeError("map_and_create_tables must be called in state "
"creating")
mappers = self.get_classes(resource_type)
tables = [Base.metadata.tables[mappers["resource"].__tablename__],
Base.metadata.tables[mappers["history"].__tablename__]]
with facade.writer_connection() as connection:
Base.metadata.create_all(connection, tables=tables)
# NOTE(sileht): no need to protect the _cache with a lock
# get_classes cannot be called in state creating
self._cache[resource_type.tablename] = mappers
@retry_on_deadlock
def unmap_and_delete_tables(self, resource_type, facade):
if resource_type.state != "deleting":
raise RuntimeError("unmap_and_delete_tables must be called in "
"state deleting")
mappers = self.get_classes(resource_type)
del self._cache[resource_type.tablename]
tables = [Base.metadata.tables[mappers['resource'].__tablename__],
Base.metadata.tables[mappers['history'].__tablename__]]
# NOTE(sileht): Base.metadata.drop_all doesn't
# issue CASCADE stuffs correctly at least on postgresql
# We drop foreign keys manually to not lock the destination
# table for too long during drop table.
# It's safe to not use a transaction since
# the resource_type table is already cleaned and committed
# so this code cannot be triggerred anymore for this
# resource_type
with facade.writer_connection() as connection:
for table in tables:
for fk in table.foreign_key_constraints:
try:
self._safe_execute(
connection,
sqlalchemy.schema.DropConstraint(fk))
except exception.DBNonExistentConstraint:
pass
for table in tables:
try:
self._safe_execute(connection,
sqlalchemy.schema.DropTable(table))
except exception.DBNonExistentTable:
pass
# NOTE(sileht): If something goes wrong here, we are currently
# fucked, that why we expose the state to the superuser.
# But we allow him to delete a resource type in error state
# in case of he cleanup the mess manually and want gnocchi to
# control and finish the cleanup.
# TODO(sileht): Remove this resource on other workers
# by using expiration on cache ?
for table in tables:
Base.metadata.remove(table)
@retry_on_deadlock
def _safe_execute(self, connection, works):
# NOTE(sileht): we create a transaction to ensure mysql
# create locks on other transaction...
trans = connection.begin()
connection.execute(works)
trans.commit()
class SQLAlchemyIndexer(indexer.IndexerDriver):
_RESOURCE_TYPE_MANAGER = ResourceClassMapper()
@staticmethod
def _set_url_database(url, database):
if hasattr(url, "set"):
return url.set(database=database)
else:
url.database = database
return url
@staticmethod
def _set_url_drivername(url, drivername):
if hasattr(url, "set"):
return url.set(drivername=drivername)
else:
url.drivername = drivername
return url
@classmethod
def _create_new_database(cls, url):
"""Used by testing to create a new database."""
purl = sqlalchemy_url.make_url(
cls.dress_url(
url))
new_database = purl.database + str(uuid.uuid4()).replace('-', '')
purl = cls._set_url_database(purl, new_database)
new_url = str(purl)
sqlalchemy_utils.create_database(new_url)
return new_url
@classmethod
def dress_url(cls, url):
# If no explicit driver has been set, we default to pymysql
if url.startswith("mysql://"):
url = sqlalchemy_url.make_url(url)
new_drivername = "mysql+pymysql"
url = cls._set_url_drivername(url, new_drivername)
return str(url)
if url.startswith("postgresql://"):
url = sqlalchemy_url.make_url(url)
new_drivername = "postgresql+psycopg2"
url = cls._set_url_drivername(url, new_drivername)
return str(url)
return url
def __init__(self, conf):
conf.set_override("connection",
self.dress_url(conf.indexer.url),
"database")
self.conf = conf
self.facade = PerInstanceFacade(conf)
def __str__(self):
parsed = urlparse.urlparse(self.conf.indexer.url)
url = urlparse.urlunparse((
parsed.scheme,
"***:***@%s%s" % (parsed.hostname,
":%s" % parsed.port if parsed.port else ""),
parsed.path,
parsed.params,
parsed.query,
parsed.fragment))
return "%s: %s" % (self.__class__.__name__, url)
def disconnect(self):
self.facade.dispose()
def _get_alembic_config(self):
from alembic import config
cfg = config.Config(
"%s/alembic/alembic.ini" % os.path.dirname(__file__))
cfg.set_main_option('sqlalchemy.url',
self.conf.database.connection.replace('%', '%%'))
return cfg
def get_engine(self):
return self.facade.get_engine()
def upgrade(self, nocreate=False):
from alembic import command
from alembic import migration
cfg = self._get_alembic_config()
cfg.conf = self.conf
if nocreate:
command.upgrade(cfg, "head")
else:
with self.facade.writer_connection() as connection:
ctxt = migration.MigrationContext.configure(connection)
current_version = ctxt.get_current_revision()
if current_version is None:
Base.metadata.create_all(connection)
command.stamp(cfg, "head")
else:
command.upgrade(cfg, "head")
try:
with self.facade.writer() as session:
session.add(
ResourceType(
name="generic",
tablename="generic",
state="active",
attributes=resource_type.ResourceTypeAttributes()))
except exception.DBDuplicateEntry:
pass
# NOTE(jd) We can have deadlock errors either here or later in
# map_and_create_tables(). We can't decorate create_resource_type()
# directly or each part might retry later on its own and cause a
# duplicate. And it seems there's no way to use the same session for
# both adding the resource_type in our table and calling
# map_and_create_tables() :-(
@retry_on_deadlock
def _add_resource_type(self, resource_type):
try:
with self.facade.writer() as session:
session.add(resource_type)
except exception.DBDuplicateEntry:
raise indexer.ResourceTypeAlreadyExists(resource_type.name)
def create_resource_type(self, resource_type):
# NOTE(sileht): mysql have a stupid and small length limitation on the
# foreign key and index name, so we can't use the resource type name as
# tablename, the limit is 64. The longest name we have is
# fk_<tablename>_h_revision_rh_revision,
# so 64 - 26 = 38 and 3 chars for rt_, 35 chars, uuid is 32, it's cool.
tablename = "rt_%s" % uuid.uuid4().hex
resource_type = ResourceType(name=resource_type.name,
tablename=tablename,
attributes=resource_type.attributes,
state="creating")
# NOTE(sileht): ensure the driver is able to store the request
# resource_type
resource_type.to_baseclass()
self._add_resource_type(resource_type)
try:
self._RESOURCE_TYPE_MANAGER.map_and_create_tables(resource_type,
self.facade)
except Exception:
# NOTE(sileht): We fail the DDL, we have no way to automatically
# recover, just set a particular state
self._set_resource_type_state(resource_type.name, "creation_error")
raise
self._set_resource_type_state(resource_type.name, "active")
resource_type.state = "active"
return resource_type
def update_resource_type(self, name, add_attributes=None,
del_attributes=None, update_attributes=None):
if not add_attributes and not del_attributes and not update_attributes:
return
add_attributes = add_attributes or []
del_attributes = del_attributes or []
update_attributes = update_attributes or []
with self.facade.independent_writer() as session:
engine = session.connection()
rt = self._get_resource_type(session, name)
with self.facade.writer_connection() as connection:
ctx = migration.MigrationContext.configure(connection)
op = operations.Operations(ctx)
self.fill_null_attribute_values(engine, name, rt, session,
update_attributes)
self._set_resource_type_state(name, "updating", "active")
for table in [rt.tablename, '%s_history' % rt.tablename]:
with op.batch_alter_table(table) as batch_op:
for attr in del_attributes:
LOG.debug("Dropping column [%s] from resource [%s]"
" and database table [%s]",
attr, name, table)
batch_op.drop_column(attr)
for attr in add_attributes:
LOG.debug("Adding new column [%s] with type [%s], "
"nullable [%s] and default value [%s] "
"in resource [%s] and database "
"table [%s]", attr.name, attr.satype,
not attr.required,
getattr(attr, 'fill', None), name, table)
server_default = attr.for_filling(
engine.dialect)
batch_op.add_column(sqlalchemy.Column(
attr.name, attr.satype,
nullable=not attr.required,
server_default=server_default))
# We have all rows filled now, we can remove
# the server_default
if server_default is not None:
LOG.debug("Removing default value [%s] from "
"column [%s] of resource [%s] and "
"database table [%s]",
getattr(attr, 'fill', None),
attr.name, name, table)
batch_op.alter_column(
column_name=attr.name,
existing_type=attr.satype,
existing_server_default=server_default,
existing_nullable=not attr.required,
server_default=None)
for attr in update_attributes:
LOG.debug("Updating column [%s] from old values "
"type [%s], nullable [%s], to new values"
" type [%s], nullable [%s] of resource "
"[%s] and database_table [%s]",
attr[1].name, attr[1].satype,
not attr[1].required, attr[0].satype,
not attr[0].required, name, table)
batch_op.alter_column(
column_name=attr[1].name,
existing_type=attr[1].satype,
existing_nullable=not attr[1].required,
type_=attr[0].satype,
nullable=not attr[0].required)
rt.state = "active"
rt.updated_at = utils.utcnow()
rt.attributes.extend(add_attributes)
update_attributes = list(map(lambda a: a[0],
update_attributes))
update_attributes_names = list(map(lambda a: a.name,
update_attributes))
for attr in list(rt.attributes):
if (attr.name in del_attributes or
attr.name in update_attributes_names):
rt.attributes.remove(attr)
rt.attributes.extend(update_attributes)
# FIXME(sileht): yeah that's wierd but attributes is a custom
# json column and 'extend' doesn't trigger sql update, this
# enforce the update. I wonder if sqlalchemy provides something
# on column description side.
LOG.debug("Updating resource [%s] setting attributes as [%s]",
name, list(rt.attributes))
sqlalchemy.orm.attributes.flag_modified(rt, 'attributes')
return rt
def fill_null_attribute_values(self, engine, name, rt, session,
update_attributes):
for table in [rt.tablename, '%s_history' % rt.tablename]:
for attr in update_attributes:
if (hasattr(attr[0], 'fill') and
attr[0].fill is not None):
mappers = self._resource_type_to_mappers(
session, name)
if table == rt.tablename:
resource_cls = mappers["resource"]
else:
resource_cls = mappers["history"]
cls_attr = attr[0].name
f = QueryTransformer.build_filter(
engine.dialect.name, resource_cls,
{'=': {cls_attr: None}})
q = session.query(resource_cls).filter(
f).with_for_update()
resources = q.all()
if resources:
LOG.debug("Null resources [%s] to be filled with [%s] "
"for resource-type [%s]", resources,
attr[0].fill, name)
for resource in resources:
if hasattr(resource, attr[0].name):
setattr(resource, attr[0].name,
attr[0].fill)
def get_resource_type(self, name):
with self.facade.independent_reader() as session:
return self._get_resource_type(session, name)
def _get_resource_type(self, session, name):
resource_type = session.query(ResourceType).get(name)
if not resource_type:
raise indexer.NoSuchResourceType(name)
return resource_type
@retry_on_deadlock
def _set_resource_type_state(self, name, state,
expected_previous_state=None):
with self.facade.writer() as session:
q = session.query(ResourceType)
q = q.filter(ResourceType.name == name)
if expected_previous_state is not None:
q = q.filter(ResourceType.state == expected_previous_state)
update = q.update({'state': state})
if update == 0:
if expected_previous_state is not None:
rt = session.query(ResourceType).get(name)
if rt:
raise indexer.UnexpectedResourceTypeState(
name, expected_previous_state, rt.state)
raise indexer.IndexerException(
"Fail to set resource type state of %s to %s" %
(name, state))
@staticmethod
def get_resource_type_schema():
return base.RESOURCE_TYPE_SCHEMA_MANAGER
@staticmethod
def get_resource_attributes_schemas():
return [ext.plugin.schema() for ext in ResourceType.RESOURCE_SCHEMAS]
def list_resource_types(self):
with self.facade.independent_reader() as session:
return list(session.query(ResourceType).order_by(
ResourceType.name.asc()).all())
# NOTE(jd) We can have deadlock errors either here or later in
# map_and_create_tables(). We can't decorate delete_resource_type()
# directly or each part might retry later on its own and cause a
# duplicate. And it seems there's no way to use the same session for
# both adding the resource_type in our table and calling
# map_and_create_tables() :-(
@retry_on_deadlock
def _mark_as_deleting_resource_type(self, name):
try:
with self.facade.writer() as session:
rt = self._get_resource_type(session, name)
if rt.state not in ["active", "deletion_error",
"creation_error", "updating_error"]:
raise indexer.UnexpectedResourceTypeState(
name,
"active/deletion_error/creation_error/updating_error",
rt.state)
session.delete(rt)
# FIXME(sileht): Why do I need to flush here !!!
# I want remove/add in the same transaction !!!
session.flush()
# NOTE(sileht): delete and recreate to:
# * raise duplicate constraints
# * ensure we do not create a new resource type
# with the same name while we destroy the tables next
rt = ResourceType(name=rt.name,
tablename=rt.tablename,
state="deleting",
attributes=rt.attributes)
session.add(rt)
except exception.DBReferenceError as e:
if (e.constraint in [
'fk_resource_resource_type_name',
'fk_resource_history_resource_type_name',
'fk_rh_resource_type_name']):
raise indexer.ResourceTypeInUse(name)
raise
return rt
@retry_on_deadlock
def _delete_resource_type(self, name):
# Really delete the resource type, no resource can be linked to it
# Because we cannot add a resource to a resource_type not in 'active'
# state
with self.facade.writer() as session:
resource_type = self._get_resource_type(session, name)
session.delete(resource_type)
def delete_resource_type(self, name):
if name == "generic":
raise indexer.ResourceTypeInUse(name)
rt = self._mark_as_deleting_resource_type(name)
try:
self._RESOURCE_TYPE_MANAGER.unmap_and_delete_tables(
rt, self.facade)
except Exception:
# NOTE(sileht): We fail the DDL, we have no way to automatically
# recover, just set a particular state
self._set_resource_type_state(rt.name, "deletion_error")
raise
self._delete_resource_type(name)
def _resource_type_to_mappers(self, session, name):
resource_type = self._get_resource_type(session, name)
if resource_type.state != "active":
raise indexer.UnexpectedResourceTypeState(
name, "active", resource_type.state)
return self._RESOURCE_TYPE_MANAGER.get_classes(resource_type)
def list_archive_policies(self):
with self.facade.independent_reader() as session:
return list(session.query(ArchivePolicy).all())
def get_archive_policy(self, name):
with self.facade.independent_reader() as session:
return session.query(ArchivePolicy).get(name)
def update_archive_policy(self, name, ap_items, **kwargs):
with self.facade.independent_writer() as session:
ap = session.query(ArchivePolicy).get(name)
if not ap:
raise indexer.NoSuchArchivePolicy(name)
current = sorted(ap.definition,
key=operator.attrgetter('granularity'))
new = sorted(ap_items, key=operator.attrgetter('granularity'))
if len(current) != len(new):
raise indexer.UnsupportedArchivePolicyChange(
name, 'Cannot add or drop granularities')
for c, n in zip(current, new):
if c.granularity != n.granularity:
raise indexer.UnsupportedArchivePolicyChange(
name, '%s granularity interval was changed'
% utils.timespan_total_seconds(c.granularity))
# NOTE(gordc): ORM doesn't update JSON column unless new
ap.definition = ap_items
if kwargs.get("back_window") is not None:
ap.back_window = kwargs.get("back_window")
return ap
def delete_archive_policy(self, name):
constraints = [
"fk_metric_ap_name_ap_name",
"fk_apr_ap_name_ap_name"]
with self.facade.writer() as session:
try:
if session.query(ArchivePolicy).filter(
ArchivePolicy.name == name).delete() == 0:
raise indexer.NoSuchArchivePolicy(name)
except exception.DBReferenceError as e:
if e.constraint in constraints:
raise indexer.ArchivePolicyInUse(name)
raise
def create_archive_policy(self, archive_policy):
ap = ArchivePolicy(
name=archive_policy.name,
back_window=archive_policy.back_window,
definition=archive_policy.definition,
aggregation_methods=list(archive_policy.aggregation_methods),
)
try:
with self.facade.writer() as session:
session.add(ap)
except exception.DBDuplicateEntry:
raise indexer.ArchivePolicyAlreadyExists(archive_policy.name)
return ap
def list_archive_policy_rules(self):
with self.facade.independent_reader() as session:
return session.query(ArchivePolicyRule).order_by(
ArchivePolicyRule.metric_pattern.desc(),
ArchivePolicyRule.name.asc()
).all()
def get_archive_policy_rule(self, name):
with self.facade.independent_reader() as session:
return session.query(ArchivePolicyRule).get(name)
def delete_archive_policy_rule(self, name):
with self.facade.writer() as session:
if session.query(ArchivePolicyRule).filter(
ArchivePolicyRule.name == name).delete() == 0:
raise indexer.NoSuchArchivePolicyRule(name)
def create_archive_policy_rule(self, name, metric_pattern,
archive_policy_name):
apr = ArchivePolicyRule(
name=name,
archive_policy_name=archive_policy_name,
metric_pattern=metric_pattern
)
try:
with self.facade.writer() as session:
session.add(apr)
except exception.DBReferenceError as e:
if e.constraint == 'fk_apr_ap_name_ap_name':
raise indexer.NoSuchArchivePolicy(archive_policy_name)
raise
except exception.DBDuplicateEntry:
raise indexer.ArchivePolicyRuleAlreadyExists(name)
return apr
def update_archive_policy_rule(self, name, new_name):
apr = self.get_archive_policy_rule(name)
if not apr:
raise indexer.NoSuchArchivePolicyRule(name)
apr.name = new_name
try:
with self.facade.writer() as session:
session.add(apr)
except exception.DBDuplicateEntry:
raise indexer.UnsupportedArchivePolicyRuleChange(
name,
'Archive policy rule %s already exists.'
% new_name)
return apr
@retry_on_deadlock
def create_metric(self, id, creator, archive_policy_name,
name=None, unit=None, resource_id=None):
m = Metric(id=id,
creator=creator,
archive_policy_name=archive_policy_name,
name=name,
unit=unit,
resource_id=resource_id)
try:
with self.facade.writer() as session:
session.add(m)
except exception.DBDuplicateEntry:
raise indexer.NamedMetricAlreadyExists(name)
except exception.DBReferenceError as e:
if (e.constraint ==
'fk_metric_ap_name_ap_name'):
raise indexer.NoSuchArchivePolicy(archive_policy_name)
if e.constraint == 'fk_metric_resource_id_resource_id':
raise indexer.NoSuchResource(resource_id)
raise
return m
@retry_on_deadlock
def list_metrics(self, details=False, status='active',
limit=None, marker=None, sorts=None,
policy_filter=None, resource_policy_filter=None,
attribute_filter=None):
sorts = sorts or []
with self.facade.independent_reader() as session:
q = session.query(Metric).filter(
Metric.status == status)
if details:
q = q.options(sqlalchemy.orm.joinedload('resource'))
if policy_filter or resource_policy_filter or attribute_filter:
engine = session.connection()
if attribute_filter:
# We don't catch the indexer.QueryAttributeError error here
# since we expect any user input on this function. If the
# caller screws it, it's its problem: no need to convert
# the exception to another type.
attribute_f = QueryTransformer.build_filter(
engine.dialect.name,
Metric, attribute_filter)
q = q.filter(attribute_f)
if policy_filter:
# We don't catch the indexer.QueryAttributeError error here
# since we expect any user input on this function. If the
# caller screws it, it's its problem: no need to convert
# the exception to another type.
policy_f = QueryTransformer.build_filter(
engine.dialect.name,
Metric, policy_filter)
else:
policy_f = None
if resource_policy_filter:
q = q.join(Metric.resource)
try:
resource_policy_f = QueryTransformer.build_filter(
engine.dialect.name,
Resource,
resource_policy_filter)
except indexer.QueryAttributeError as e:
# NOTE(jd) The QueryAttributeError does not know about
# resource_type, so convert it
raise indexer.ResourceAttributeError("generic",
e.attribute)
else:
resource_policy_f = None
if policy_filter or resource_policy_filter:
q = q.filter(sqlalchemy.or_(policy_f, resource_policy_f))
sort_keys, sort_dirs = self._build_sort_keys(sorts, ['id'])
if marker:
metric_marker = self.list_metrics(
attribute_filter={"in": {"id": [marker]}})
if metric_marker:
metric_marker = metric_marker[0]
else:
raise indexer.InvalidPagination(
"Invalid marker: `%s'" % marker)
else:
metric_marker = None
try:
q = oslo_db_utils.paginate_query(q, Metric, limit=limit,
sort_keys=sort_keys,
marker=metric_marker,
sort_dirs=sort_dirs)
except ValueError as e:
raise indexer.InvalidPagination(e)
except exception.InvalidSortKey as e:
raise indexer.InvalidPagination(e)
return list(q.all())
@retry_on_deadlock
def create_resource(self, resource_type, id,
creator, user_id=None, project_id=None,
started_at=None, ended_at=None, metrics=None,
original_resource_id=None,
**kwargs):
if (started_at is not None
and ended_at is not None
and started_at > ended_at):
raise ValueError(
"Start timestamp cannot be after end timestamp")
if original_resource_id is None:
original_resource_id = str(id)
with self.facade.writer() as session:
resource_cls = self._resource_type_to_mappers(
session, resource_type)['resource']
r = resource_cls(
id=id,
original_resource_id=original_resource_id,
type=resource_type,
creator=creator,
user_id=user_id,
project_id=project_id,
started_at=started_at,
ended_at=ended_at,
**kwargs)
session.add(r)
try:
session.flush()
except exception.DBDuplicateEntry:
raise indexer.ResourceAlreadyExists(id)
except exception.DBReferenceError as ex:
raise indexer.ResourceValueError(r.type,
ex.key,
getattr(r, ex.key))
if metrics is not None:
self._set_metrics_for_resource(session, r, metrics)
# NOTE(jd) Force load of metrics :)
r.metrics
return r
@retry_on_deadlock
def update_resource(self, resource_type, resource_id,
ended_at=_marker, metrics=_marker,
append_metrics=False, create_revision=True,
**kwargs):
with self.facade.writer() as session:
data_to_update = kwargs.copy()
data_to_update['ended_at'] = ended_at
data_to_update['metrics'] = metrics
if create_revision:
resource = self.get_resource(
resource_type, resource_id, with_metrics=True)
if not utils.is_resource_revision_needed(
resource, data_to_update):
LOG.info("We thought that a revision for resource "
"[%s] was needed. However, after locking the "
"table and checking it again, we found that it "
"is not needed anymore. This is due to a "
"concurrency issue that might happen. Therefore, "
"no revision is going to be generated at this "
"time.", data_to_update)
create_revision = False
mappers = self._resource_type_to_mappers(session, resource_type)
resource_cls = mappers["resource"]
resource_history_cls = mappers["history"]
try:
# NOTE(sileht): We use FOR UPDATE that is not galera friendly,
# but they are no other way to cleanly patch a resource and
# store the history that safe when two concurrent calls are
# done.
q = session.query(resource_cls).filter(
resource_cls.id == resource_id).with_for_update()
r = q.first()
if r is None:
raise indexer.NoSuchResource(resource_id)
if create_revision:
# Build history
rh = resource_history_cls()
for col in sqlalchemy.inspect(resource_cls).columns:
setattr(rh, col.name, getattr(r, col.name))
now = utils.utcnow()
rh.revision_end = now
session.add(rh)
r.revision_start = now
# Update the resource
if ended_at is not _marker:
# NOTE(jd) MySQL does not honor checks. I hate it.
engine = session.connection()
if engine.dialect.name == "mysql":
if r.started_at is not None and ended_at is not None:
if r.started_at > ended_at:
raise indexer.ResourceValueError(
resource_type, "ended_at", ended_at)
r.ended_at = ended_at
if kwargs:
for attribute, value in six.iteritems(kwargs):
if hasattr(r, attribute):
setattr(r, attribute, value)
else:
raise indexer.ResourceAttributeError(
r.type, attribute)
if metrics is not _marker:
if not append_metrics:
session.query(Metric).filter(
Metric.resource_id == resource_id,
Metric.status == 'active').update(
{"resource_id": None})
self._set_metrics_for_resource(session, r, metrics)
session.flush()
except exception.DBConstraintError as e:
if e.check_name in (
"ck_resource_started_before_ended",
"ck_resource_history_started_before_ended"):
raise indexer.ResourceValueError(
resource_type, "ended_at", ended_at)
raise
# NOTE(jd) Force load of metrics – do it outside the session!
r.metrics
return r
@staticmethod
def _set_metrics_for_resource(session, r, metrics):
for name, value in six.iteritems(metrics):
if isinstance(value, uuid.UUID):
try:
update = session.query(Metric).filter(
Metric.id == value,
Metric.status == 'active',
Metric.creator == r.creator,