/
models.py
1958 lines (1608 loc) · 68.7 KB
/
models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# coding: utf-8
# Copyright (C) 2010 Oregon State University et al.
# Copyright (C) 2010 Greek Research and Technology Network
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
import binascii
import cPickle
from datetime import datetime, timedelta
from hashlib import sha1
import random
import re
import string
import sys
import time
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.contrib.contenttypes.generic import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites import models as sites_app
from django.contrib.sites.management import create_default_site
from django.core.validators import RegexValidator, MinValueValidator
from django.db import models
from django.db.models import BooleanField, Q, Sum
from django.db.models.query import QuerySet
from django.db.models.signals import post_save, post_syncdb
from django.db.utils import DatabaseError
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext_lazy as _
from django_fields.fields import PickleField
from ganeti_web.logs import register_log_actions
from object_log.models import LogItem
log_action = LogItem.objects.log_action
from object_permissions.registration import register
from muddle_users import signals as muddle_user_signals
from ganeti_web import constants, management, permissions
from ganeti_web.fields import (PatchedEncryptedCharField,
PreciseDateTimeField, SumIf)
from ganeti_web.util import client
from ganeti_web.util.client import GanetiApiError, REPLACE_DISK_AUTO
from south.signals import post_migrate
if settings.VNC_PROXY:
from ganeti_web.util.vncdaemon.vapclient import (request_forwarding,
request_ssh)
class QuerySetManager(models.Manager):
"""
Useful if you want to define manager methods that need to chain. In this
case create a QuerySet class within your model and add all of your methods
directly to the queryset. Example:
class Foo(models.Model):
enabled = fields.BooleanField()
dirty = fields.BooleanField()
class QuerySet:
def active(self):
return self.filter(enabled=True)
def clean(self):
return self.filter(dirty=False)
Foo.objects.active().clean()
"""
def __getattr__(self, name, *args):
# Cull under/dunder names to avoid certain kinds of recursion. Django
# isn't super-bright here.
if name.startswith('_'):
raise AttributeError
return getattr(self.get_query_set(), name, *args)
def get_query_set(self):
return self.model.QuerySet(self.model)
def generate_random_password(length=12):
"Generate random sequence of specified length"
return "".join(random.sample(string.letters + string.digits, length))
FINISHED_JOBS = 'success', 'unknown', 'error'
RAPI_CACHE = {}
RAPI_CACHE_HASHES = {}
def get_rapi(hash, cluster):
"""
Retrieves the cached Ganeti RAPI client for a given hash. The Hash is
derived from the connection credentials required for a cluster. If the
client is not yet cached, it will be created and added.
If a hash does not correspond to any cluster then Cluster.DoesNotExist will
be raised.
@param cluster - either a cluster object, or ID of object. This is used
for resolving the cluster if the client is not already found. The id is
used rather than the hash, because the hash is mutable.
@return a Ganeti RAPI client.
"""
if hash in RAPI_CACHE:
return RAPI_CACHE[hash]
# always look up the instance, even if we were given a Cluster instance
# it ensures we are retrieving the latest credentials. This helps avoid
# stale credentials. Retrieve only the values because we don't actually
# need another Cluster instance here.
if isinstance(cluster, (Cluster,)):
cluster = cluster.id
(credentials,) = Cluster.objects.filter(id=cluster) \
.values_list('hash', 'hostname', 'port', 'username', 'password')
hash, host, port, user, password = credentials
user = user or None
# decrypt password
# XXX django-fields only stores str, convert to None if needed
password = Cluster.decrypt_password(password) if password else None
password = None if password in ('None', '') else password
# now that we know hash is fresh, check cache again. The original hash
# could have been stale. This avoids constructing a new RAPI that already
# exists.
if hash in RAPI_CACHE:
return RAPI_CACHE[hash]
# delete any old version of the client that was cached.
if cluster in RAPI_CACHE_HASHES:
del RAPI_CACHE[RAPI_CACHE_HASHES[cluster]]
# Set connect timeout in settings.py so that you do not learn patience.
rapi = client.GanetiRapiClient(host, port, user, password,
timeout=settings.RAPI_CONNECT_TIMEOUT)
RAPI_CACHE[hash] = rapi
RAPI_CACHE_HASHES[cluster] = hash
return rapi
def clear_rapi_cache():
"""
clears the rapi cache
"""
RAPI_CACHE.clear()
RAPI_CACHE_HASHES.clear()
ssh_public_key_re = re.compile(
r'^ssh-(rsa|dsa|dss) [A-Z0-9+/=]+ .+$', re.IGNORECASE)
ssh_public_key_error = _("Enter a valid RSA or DSA SSH key.")
validate_sshkey = RegexValidator(ssh_public_key_re, ssh_public_key_error,
"invalid")
class CachedClusterObject(models.Model):
"""
Parent class for objects which belong to Ganeti but have cached data in
GWM.
The main point of this class is to permit saving lots of data from Ganeti
so that we don't have to look things up constantly. The Ganeti RAPI is
slow, so avoiding it as much as possible is a good idea.
This class provides transparent caching for all of the data that it
serializes; no explicit cache accesses are required.
This model is abstract and may not be instantiated on its own.
"""
serialized_info = models.TextField(default="", editable=False)
mtime = PreciseDateTimeField(null=True, editable=False)
cached = PreciseDateTimeField(null=True, editable=False)
ignore_cache = models.BooleanField(default=False)
last_job_id = None
__info = None
error = None
ctime = None
deleted = False
class Meta:
abstract = True
def save(self, *args, **kwargs):
"""
overridden to ensure info is serialized prior to save
"""
if not self.serialized_info:
self.serialized_info = cPickle.dumps(self.__info)
super(CachedClusterObject, self).save(*args, **kwargs)
def __init__(self, *args, **kwargs):
super(CachedClusterObject, self).__init__(*args, **kwargs)
self.load_info()
@property
def info(self):
"""
A dictionary of metadata for this object.
This is a proxy for the ``serialized_info`` field. Reads from this
property lazily access the field, and writes to this property will be
lazily saved.
Writes to this property do *not* force serialization.
"""
if self.__info is None:
if self.serialized_info:
self.__info = cPickle.loads(str(self.serialized_info))
return self.__info
def _set_info(self, value):
self.__info = value
if value is not None:
self.parse_info()
self.serialized_info = ""
info = info.setter(_set_info)
def load_info(self):
"""
Load cached info retrieved from the ganeti cluster. This function
includes a lazy cache mechanism that uses a timer to decide whether or
not to refresh the cached information with new information from the
ganeti cluster.
This will ignore the cache when self.ignore_cache is True
"""
epsilon = timedelta(0, 0, 0, settings.LAZY_CACHE_REFRESH)
if self.id:
if (self.ignore_cache
or self.cached is None
or datetime.now() > self.cached + epsilon):
self.refresh()
elif self.info:
self.parse_transient_info()
else:
self.error = 'No Cached Info'
def parse_info(self):
"""
Parse all of the attached metadata, and attach it to this object.
"""
self.parse_transient_info()
data = self.parse_persistent_info(self.info)
for k in data:
setattr(self, k, data[k])
def refresh(self):
"""
Retrieve and parse info from the ganeti cluster. If successfully
retrieved and parsed, this method will also call save().
If communication with Ganeti fails, an error will be stored in
``error``.
"""
job_data = self.check_job_status()
for k, v in job_data.items():
setattr(self, k, v)
# XXX this try/except is far too big; see if we can pare it down.
try:
info_ = self._refresh()
if info_:
if info_['mtime']:
mtime = datetime.fromtimestamp(info_['mtime'])
else:
mtime = None
self.cached = datetime.now()
else:
# no info retrieved, use current mtime
mtime = self.mtime
if self.id and (self.mtime is None or mtime > self.mtime):
# there was an update. Set info and save the object
self.info = info_
self.save()
else:
# There was no change on the server. Only update the cache
# time. This bypasses the info serialization mechanism and
# uses a smaller query.
if job_data:
self.__class__.objects.filter(pk=self.id) \
.update(cached=self.cached, **job_data)
elif self.id is not None:
self.__class__.objects.filter(pk=self.id) \
.update(cached=self.cached)
except GanetiApiError, e:
# Use regular expressions to match the quoted message
# given by GanetiApiError. '\\1' is a group substitution
# which places the first group '('|\")' in it's place.
comp = re.compile("('|\")(?P<msg>.*)\\1")
err = comp.search(str(e))
# Any search that has 0 results will just return None.
# That is why we must check for err before proceeding.
if err:
msg = err.groupdict()['msg']
self.error = msg
else:
msg = str(e)
self.error = str(e)
GanetiError.store_error(msg, obj=self, code=e.code)
else:
if self.error:
self.error = None
GanetiError.objects.clear_errors(obj=self)
def _refresh(self):
"""
Fetch raw data from the Ganeti cluster.
This must be implemented by children of this class.
"""
raise NotImplementedError
def check_job_status(self):
if not self.last_job_id:
return {}
ct = ContentType.objects.get_for_model(self)
qs = Job.objects.filter(content_type=ct, object_id=self.pk)
jobs = qs.order_by("job_id")
updates = {}
for job in jobs:
status = 'unknown'
op = None
try:
data = self.rapi.GetJobStatus(job.job_id)
status = data['status']
op = data['ops'][-1]['OP_ID']
except GanetiApiError:
pass
if status in ('success', 'error'):
for k, v in Job.parse_persistent_info(data).items():
setattr(job, k, v)
if status == 'unknown':
job.status = "unknown"
job.ignore_cache = False
if status in ('success', 'error', 'unknown'):
_updates = self._complete_job(self.cluster_id,
self.hostname, op, status)
# XXX if the delete flag is set in updates then delete this
# model this happens here because _complete_job cannot delete
# this model
if _updates:
if 'deleted' in _updates:
# Delete ourselves. Also delete the job that caused us
# to delete ourselves; see #8439 for "fun" details.
# Order matters; the job's deletion cascades over us.
# Revisit that when we finally nuke all this caching
# bullshit.
self.delete()
job.delete()
else:
updates.update(_updates)
# we only care about the very last job for resetting the cache flags
if status in ('success', 'error', 'unknown') or not jobs:
updates['ignore_cache'] = False
updates['last_job'] = None
return updates
@classmethod
def _complete_job(cls, cluster_id, hostname, op, status):
"""
Process a completed job. This method will make any updates to related
classes (like deleting an instance template) and return any data that
should be updated. This is a class method so that this processing can
be done without a full instance.
@returns dict of updated values
"""
pass
def parse_transient_info(self):
"""
Parse properties from cached info that is stored on the class but not
in the database.
These properties will be loaded every time the object is instantiated.
Properties stored on the class cannot be search efficiently via the
django query api.
This method is specific to the child object.
"""
info_ = self.info
# XXX ganeti 2.1 ctime is always None
# XXX this means that we could nuke the conditionals!
if info_['ctime'] is not None:
self.ctime = datetime.fromtimestamp(info_['ctime'])
@classmethod
def parse_persistent_info(cls, info):
"""
Parse properties from cached info that are stored in the database.
These properties will be searchable by the django query api.
This method is specific to the child object.
"""
# mtime is sometimes None if object has never been modified
if info['mtime'] is None:
return {'mtime': None}
return {'mtime': datetime.fromtimestamp(info['mtime'])}
class JobManager(models.Manager):
"""
Custom manager for Ganeti Jobs model
"""
def create(self, **kwargs):
""" helper method for creating a job with disabled cache """
job = Job(ignore_cache=True, **kwargs)
job.save(force_insert=True)
return job
class Job(CachedClusterObject):
"""
model representing a job being run on a ganeti Cluster. This includes
operations such as creating or delting a virtual machine.
Jobs are a special type of CachedClusterObject. Job's run once then become
immutable. The lazy cache is modified to become permanent once a complete
status (success/error) has been detected. The cache can be disabled by
settning ignore_cache=True.
"""
job_id = models.IntegerField()
content_type = models.ForeignKey(ContentType, related_name="+")
object_id = models.IntegerField()
obj = GenericForeignKey('content_type', 'object_id')
cluster = models.ForeignKey('Cluster', related_name='jobs', editable=False)
cluster_hash = models.CharField(max_length=40, editable=False)
finished = models.DateTimeField(null=True, blank=True)
status = models.CharField(max_length=10)
op = models.CharField(max_length=50)
objects = JobManager()
def save(self, *args, **kwargs):
"""
sets the cluster_hash for newly saved instances
"""
if self.id is None or self.cluster_hash == '':
self.cluster_hash = self.cluster.hash
super(Job, self).save(*args, **kwargs)
@models.permalink
def get_absolute_url(self):
job = '%s/job/(?P<job_id>\d+)' % self.cluster
return 'ganeti_web.views.jobs.detail', (), {'job': job}
@property
def rapi(self):
return get_rapi(self.cluster_hash, self.cluster_id)
def _refresh(self):
return self.rapi.GetJobStatus(self.job_id)
def load_info(self):
"""
Load info for class. This will load from ganeti if ignore_cache==True,
otherwise this will always load from the cache.
"""
if self.id and (self.ignore_cache or self.info is None):
try:
self.refresh()
except GanetiApiError, e:
# if the Job has been archived then we don't know whether it
# was successful or not. Mark it as unknown.
if e.code == 404:
self.status = 'unknown'
self.save()
else:
# its possible the cluster or crednetials are bad. fail
# silently
pass
def refresh(self):
self.info = self._refresh()
self.save()
@classmethod
def parse_persistent_info(cls, info):
"""
Parse status and turn off cache bypass flag if job has finished
"""
data = {'status': info['status'],
'op': info['ops'][-1]['OP_ID']}
if data['status'] in ('error', 'success'):
data['ignore_cache'] = False
if info['end_ts']:
data['finished'] = cls.parse_end_timestamp(info)
return data
@staticmethod
def parse_end_timestamp(info):
sec, micro = info['end_ts']
return datetime.fromtimestamp(sec + (micro / 1000000.0))
def parse_transient_info(self):
pass
@property
def current_operation(self):
"""
Jobs may consist of multiple commands/operations. This helper
method will return the operation that is currently running or errored
out, or the last operation if all operations have completed
@returns raw name of the current operation
"""
info = self.info
index = 0
for i in range(len(info['opstatus'])):
if info['opstatus'][i] != 'success':
index = i
break
return info['ops'][index]['OP_ID']
@property
def operation(self):
"""
Returns the last operation, which is generally the primary operation.
"""
return self.info['ops'][-1]['OP_ID']
def __repr__(self):
return "<Job %d (%d), status %r>" % (self.id, self.job_id,
self.status)
__unicode__ = __repr__
class VirtualMachine(CachedClusterObject):
"""
The VirtualMachine (VM) model represents VMs within a Ganeti cluster.
The majority of properties are a cache for data stored in the cluster.
All data retrieved via the RAPI is stored in VirtualMachine.info, and
serialized automatically into VirtualMachine.serialized_info.
Attributes that need to be searchable should be stored as model fields.
All other attributes will be stored within VirtualMachine.info.
This object uses a lazy update mechanism on instantiation. If the cached
info from the Ganeti cluster has expired, it will trigger an update. This
allows the cache to function in the absence of a periodic update mechanism
such as Cron, Celery, or Threads.
XXX Serialized_info can possibly be changed to a CharField if an upper
limit can be determined. (Later Date, if it will optimize db)
"""
cluster = models.ForeignKey('Cluster', related_name='virtual_machines',
editable=False, default=0)
hostname = models.CharField(max_length=128, db_index=True)
owner = models.ForeignKey('ClusterUser', related_name='virtual_machines',
null=True, blank=True,
on_delete=models.SET_NULL)
virtual_cpus = models.IntegerField(default=-1)
disk_size = models.IntegerField(default=-1)
ram = models.IntegerField(default=-1)
minram = models.IntegerField(default=-1)
cluster_hash = models.CharField(max_length=40, editable=False)
operating_system = models.CharField(max_length=128)
status = models.CharField(max_length=14)
# node relations
primary_node = models.ForeignKey('Node', related_name='primary_vms',
null=True, blank=True)
secondary_node = models.ForeignKey('Node', related_name='secondary_vms',
null=True, blank=True)
# The last job reference indicates that there is at least one pending job
# for this virtual machine. There may be more than one job, and that can
# never be prevented. This just indicates that job(s) are pending and the
# job related code should be run (status, cleanup, etc).
last_job = models.ForeignKey('Job', related_name="+", null=True,
blank=True)
# deleted flag indicates a VM is being deleted, but the job has not
# completed yet. VMs that have pending_delete are still displayed in lists
# and counted in quotas, but only so status can be checked.
pending_delete = models.BooleanField(default=False)
deleted = False
# Template temporarily stores parameters used to create this virtual
# machine. This template is used to recreate the values entered into the
# form.
template = models.ForeignKey("VirtualMachineTemplate",
related_name="instances", null=True,
blank=True)
class Meta:
ordering = ["hostname"]
unique_together = (("cluster", "hostname"),)
def __unicode__(self):
return self.hostname
def save(self, *args, **kwargs):
"""
sets the cluster_hash for newly saved instances
"""
if self.id is None:
self.cluster_hash = self.cluster.hash
info_ = self.info
if info_:
found = False
remove = []
if self.cluster.username:
for tag in info_['tags']:
# Update owner Tag. Make sure the tag is set to the owner
# that is set in webmgr.
if tag.startswith(constants.OWNER_TAG):
id = int(tag[len(constants.OWNER_TAG):])
# Since there is no 'update tag' delete old tag and
# replace with tag containing correct owner id.
if id == self.owner_id:
found = True
else:
remove.append(tag)
if remove:
self.rapi.DeleteInstanceTags(self.hostname, remove)
for tag in remove:
info_['tags'].remove(tag)
if self.owner_id and not found:
tag = '%s%s' % (constants.OWNER_TAG, self.owner_id)
self.rapi.AddInstanceTags(self.hostname, [tag])
self.info['tags'].append(tag)
super(VirtualMachine, self).save(*args, **kwargs)
@models.permalink
def get_absolute_url(self):
"""
Return absolute url for this instance.
"""
return 'instance-detail', (), {'cluster_slug': self.cluster.slug,
'instance': self.hostname}
@property
def rapi(self):
return get_rapi(self.cluster_hash, self.cluster_id)
@property
def is_running(self):
return self.status == 'running'
@classmethod
def parse_persistent_info(cls, info):
"""
Loads all values from cached info, included persistent properties that
are stored in the database
"""
data = super(VirtualMachine, cls).parse_persistent_info(info)
# Parse resource properties
data['ram'] = info['beparams']['memory']
data['virtual_cpus'] = info['beparams']['vcpus']
# Sum up the size of each disk used by the VM
disk_size = 0
for disk in info['disk.sizes']:
disk_size += disk
data['disk_size'] = disk_size
data['operating_system'] = info['os']
data['status'] = info['status']
primary = info['pnode']
if primary:
try:
data['primary_node'] = Node.objects.get(hostname=primary)
except Node.DoesNotExist:
# node is not created yet. fail silently
data['primary_node'] = None
else:
data['primary_node'] = None
secondary = info['snodes']
if len(secondary):
secondary = secondary[0]
try:
data['secondary_node'] = Node.objects.get(hostname=secondary)
except Node.DoesNotExist:
# node is not created yet. fail silently
data['secondary_node'] = None
else:
data['secondary_node'] = None
return data
@classmethod
def _complete_job(cls, cluster_id, hostname, op, status):
"""
if the cache bypass is enabled then check the status of the last job
when the job is complete we can reenable the cache.
@returns - dictionary of values that were updates
"""
if status == 'unknown':
# unknown status, the job was archived before it's final status
# was polled. Impossible to tell what happened. Clear the job
# so it is no longer polled.
#
# XXX This VM might be added by the CLI and be in an invalid
# pending_delete state. clearing pending_delete prevents this
# but will result in "missing" vms in some cases.
return dict(pending_delete=False)
base = VirtualMachine.objects.filter(cluster=cluster_id,
hostname=hostname)
if op == 'OP_INSTANCE_REMOVE':
if status == 'success':
# XXX can't actually delete here since it would cause a
# recursive loop
return dict(deleted=True)
elif op == 'OP_INSTANCE_CREATE' and status == 'success':
# XXX must update before deleting the template to maintain
# referential integrity. as a consequence return no other
# updates.
base.update(template=None)
VirtualMachineTemplate.objects \
.filter(instances__hostname=hostname,
instances__cluster=cluster_id) \
.delete()
return dict(template=None)
return
def _refresh(self):
# XXX if delete is pending then no need to refresh this object.
if self.pending_delete or self.template_id:
return None
return self.rapi.GetInstance(self.hostname)
def shutdown(self, timeout=None):
if timeout is None:
id = self.rapi.ShutdownInstance(self.hostname)
else:
id = self.rapi.ShutdownInstance(self.hostname, timeout=timeout)
job = Job.objects.create(job_id=id, obj=self,
cluster_id=self.cluster_id)
self.last_job = job
VirtualMachine.objects.filter(pk=self.id) \
.update(last_job=job, ignore_cache=True)
return job
def startup(self):
id = self.rapi.StartupInstance(self.hostname)
job = Job.objects.create(job_id=id, obj=self,
cluster_id=self.cluster_id)
self.last_job = job
VirtualMachine.objects.filter(pk=self.id) \
.update(last_job=job, ignore_cache=True)
return job
def reboot(self):
id = self.rapi.RebootInstance(self.hostname)
job = Job.objects.create(job_id=id, obj=self,
cluster_id=self.cluster_id)
self.last_job = job
VirtualMachine.objects.filter(pk=self.id) \
.update(last_job=job, ignore_cache=True)
return job
def migrate(self, mode='live', cleanup=False):
"""
Migrates this VirtualMachine to another node.
Only works if the disk type is DRDB.
@param mode: live or non-live
@param cleanup: clean up a previous migration, default is False
"""
id = self.rapi.MigrateInstance(self.hostname, mode, cleanup)
job = Job.objects.create(job_id=id, obj=self,
cluster_id=self.cluster_id)
self.last_job = job
VirtualMachine.objects.filter(pk=self.id) \
.update(last_job=job, ignore_cache=True)
return job
def replace_disks(self, mode=REPLACE_DISK_AUTO, disks=None, node=None,
iallocator=None):
id = self.rapi.ReplaceInstanceDisks(self.hostname, disks, mode, node,
iallocator)
job = Job.objects.create(job_id=id, obj=self,
cluster_id=self.cluster_id)
self.last_job = job
VirtualMachine.objects.filter(pk=self.id) \
.update(last_job=job, ignore_cache=True)
return job
def setup_ssh_forwarding(self, sport=0):
"""
Poke a proxy to start SSH forwarding.
Returns None if no proxy is configured, or if there was an error
contacting the proxy.
"""
command = self.rapi.GetInstanceConsole(self.hostname)["command"]
if settings.VNC_PROXY:
proxy_server = settings.VNC_PROXY.split(":")
password = generate_random_password()
sport = request_ssh(proxy_server, sport, self.info["pnode"],
self.info["network_port"], password, command)
if sport:
return proxy_server[0], sport, password
def setup_vnc_forwarding(self, sport=0, tls=False):
"""
Obtain VNC forwarding information, optionally configuring a proxy.
Returns None if a proxy is configured and there was an error
contacting the proxy.
"""
password = ''
info_ = self.info
port = info_['network_port']
node = info_['pnode']
# use proxy for VNC connection
if settings.VNC_PROXY:
proxy_server = settings.VNC_PROXY.split(":")
password = generate_random_password()
result = request_forwarding(proxy_server, node, port, password,
sport=sport, tls=tls)
if result:
return proxy_server[0], int(result), password
else:
return node, port, password
def __repr__(self):
return "<VirtualMachine: '%s'>" % self.hostname
class Node(CachedClusterObject):
"""
The Node model represents nodes within a Ganeti cluster.
The majority of properties are a cache for data stored in the cluster.
All data retrieved via the RAPI is stored in VirtualMachine.info, and
serialized automatically into VirtualMachine.serialized_info.
Attributes that need to be searchable should be stored as model fields.
All other attributes will be stored within VirtualMachine.info.
"""
ROLE_CHOICES = ((k, v) for k, v in constants.NODE_ROLE_MAP.items())
cluster = models.ForeignKey('Cluster', related_name='nodes')
hostname = models.CharField(max_length=128, unique=True)
cluster_hash = models.CharField(max_length=40, editable=False)
offline = models.BooleanField()
role = models.CharField(max_length=1, choices=ROLE_CHOICES)
ram_total = models.IntegerField(default=-1)
ram_free = models.IntegerField(default=-1)
disk_total = models.IntegerField(default=-1)
disk_free = models.IntegerField(default=-1)
cpus = models.IntegerField(null=True, blank=True)
# The last job reference indicates that there is at least one pending job
# for this virtual machine. There may be more than one job, and that can
# never be prevented. This just indicates that job(s) are pending and the
# job related code should be run (status, cleanup, etc).
last_job = models.ForeignKey('Job', related_name="+", null=True,
blank=True)
def __unicode__(self):
return self.hostname
def save(self, *args, **kwargs):
"""
sets the cluster_hash for newly saved instances
"""
if self.id is None:
self.cluster_hash = self.cluster.hash
super(Node, self).save(*args, **kwargs)
@models.permalink
def get_absolute_url(self):
"""
Return absolute url for this node.
"""
return 'node-detail', (), {'cluster_slug': self.cluster.slug,
'host': self.hostname}
def _refresh(self):
""" returns node info from the ganeti server """
return self.rapi.GetNode(self.hostname)
@property
def rapi(self):
return get_rapi(self.cluster_hash, self.cluster_id)
@classmethod
def parse_persistent_info(cls, info):
"""
Loads all values from cached info, included persistent properties that
are stored in the database
"""
data = super(Node, cls).parse_persistent_info(info)
# Parse resource properties
data['ram_total'] = info.get("mtotal") or 0
data['ram_free'] = info.get("mfree") or 0
data['disk_total'] = info.get("dtotal") or 0
data['disk_free'] = info.get("dfree") or 0
data['cpus'] = info.get("csockets")
data['offline'] = info['offline']
data['role'] = info['role']
return data
@property
def ram(self):
""" returns dict of free and total ram """
values = VirtualMachine.objects \
.filter(Q(primary_node=self) | Q(secondary_node=self)) \
.filter(status='running') \
.exclude(ram=-1).order_by() \
.aggregate(used=Sum('ram'))
total = self.ram_total
used = total - self.ram_free
allocated = values.get("used") or 0
free = total - allocated if allocated >= 0 and total >= 0 else -1
return {
'total': total,
'free': free,
'allocated': allocated,
'used': used,
}
@property
def disk(self):
""" returns dict of free and total disk space """
values = VirtualMachine.objects \
.filter(Q(primary_node=self) | Q(secondary_node=self)) \
.exclude(disk_size=-1).order_by() \
.aggregate(used=Sum('disk_size'))
total = self.disk_total
used = total - self.disk_free
allocated = values.get("used") or 0
free = total - allocated if allocated >= 0 and total >= 0 else -1