-
Notifications
You must be signed in to change notification settings - Fork 478
/
vm_lifecycle.py
174 lines (141 loc) · 6.05 KB
/
vm_lifecycle.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
from middlewared.schema import accepts, Bool, Dict, Int, returns
from middlewared.service import CallError, item_method, job, private, Service
from .vm_supervisor import VMSupervisorMixin
class VMService(Service, VMSupervisorMixin):
@private
async def lifecycle_action_check(self):
if not await self.middleware.call('vm.license_active'):
raise CallError('Requested action cannot be performed as system is not licensed to use VMs')
@item_method
@accepts(Int('id'), Dict('options', Bool('overcommit', default=False)))
@returns()
async def start(self, id, options):
"""
Start a VM.
options.overcommit defaults to false, meaning VMs are not allowed to
start if there is not enough available memory to hold all configured VMs.
If true, VM starts even if there is not enough memory for all configured VMs.
Error codes:
ENOMEM(12): not enough free memory to run the VM without overcommit
"""
await self.lifecycle_action_check()
await self.middleware.run_in_thread(self._check_setup_connection)
vm = await self.middleware.call('vm.get_instance', id)
vm_state = vm['status']['state']
if vm_state == 'RUNNING':
raise CallError(f'{vm["name"]!r} is already running')
if vm_state == 'SUSPENDED':
raise CallError(f'{vm["name"]!r} VM is suspended and can only be resumed/powered off')
if vm['bootloader'] not in await self.middleware.call('vm.bootloader_options'):
raise CallError(f'"{vm["bootloader"]}" is not supported on this platform.')
# Perhaps we should have a default config option for VMs?
await self.middleware.call('vm.init_guest_vmemory', vm, options['overcommit'])
try:
await self.middleware.run_in_thread(self._start, vm['name'])
except Exception:
if (await self.middleware.call('vm.get_instance', id))['status']['state'] != 'RUNNING':
await self.middleware.call('vm.teardown_guest_vmemory', id)
raise
await self.middleware.call('service.reload', 'haproxy')
@item_method
@accepts(
Int('id'),
Dict(
'options',
Bool('force', default=False),
Bool('force_after_timeout', default=False),
),
)
@returns()
@job(lock=lambda args: f'stop_vm_{args[0]}')
def stop(self, job, id, options):
"""
Stops a VM.
For unresponsive guests who have exceeded the `shutdown_timeout` defined by the user and have become
unresponsive, they required to be powered down using `vm.poweroff`. `vm.stop` is only going to send a
shutdown signal to the guest and wait the desired `shutdown_timeout` value before tearing down guest vmemory.
`force_after_timeout` when supplied, it will initiate poweroff for the VM forcing it to exit if it has
not already stopped within the specified `shutdown_timeout`.
"""
self._check_setup_connection()
vm_data = self.middleware.call_sync('vm.get_instance', id)
if options['force']:
self._poweroff(vm_data['name'])
else:
self._stop(vm_data['name'], vm_data['shutdown_timeout'])
if options['force_after_timeout'] and self.middleware.call_sync('vm.status', id)['state'] == 'RUNNING':
self._poweroff(vm_data['name'])
@item_method
@accepts(Int('id'))
@returns()
def poweroff(self, id):
"""
Poweroff a VM.
"""
self._check_setup_connection()
vm_data = self.middleware.call_sync('vm.get_instance', id)
self._poweroff(vm_data['name'])
@item_method
@accepts(Int('id'))
@returns()
@job(lock=lambda args: f'restart_vm_{args[0]}')
def restart(self, job, id):
"""
Restart a VM.
"""
self._check_setup_connection()
vm = self.middleware.call_sync('vm.get_instance', id)
stop_job = self.middleware.call_sync('vm.stop', id, {'force_after_timeout': True})
stop_job.wait_sync()
if stop_job.error:
raise CallError(f'Failed to stop {vm["name"]!r} vm: {stop_job.error}')
self.middleware.call_sync('vm.start', id, {'overcommit': True})
@item_method
@accepts(Int('id'))
@returns()
def suspend(self, id):
"""
Suspend `id` VM.
"""
self._check_setup_connection()
vm = self.middleware.call_sync('vm.get_instance', id)
self._suspend(vm['name'])
@item_method
@accepts(Int('id'))
@returns()
def resume(self, id):
"""
Resume suspended `id` VM.
"""
self._check_setup_connection()
vm = self.middleware.call_sync('vm.get_instance', id)
self._resume(vm['name'])
@private
def suspend_vms(self, vm_ids):
vms = {vm['id']: vm for vm in self.middleware.call_sync('vm.query')}
for vm_id in filter(
lambda vm_id: vms.get(vm_id).get('status', {}).get('state') == 'RUNNING',
map(int, vm_ids)
):
try:
self.suspend(vm_id)
except Exception:
self.logger.error('Failed to suspend %r vm', vms[vm_id]['name'], exc_info=True)
@private
def resume_suspended_vms(self, vm_ids):
vms = {vm['id']: vm for vm in self.middleware.call_sync('vm.query')}
for vm_id in filter(
lambda vm_id: vms.get(vm_id).get('status', {}).get('state') == 'SUSPENDED',
map(int, vm_ids)
):
try:
self.resume(vm_id)
except Exception:
self.logger.error('Failed to resume %r vm', vms[vm_id]['name'], exc_info=True)
async def _event_vms(middleware, event_type, args):
vm = await middleware.call('vm.query', [['id', '=', args['id']]])
if not vm or vm[0]['status']['state'] != 'STOPPED' or args.get('state') != 'SHUTOFF':
return
middleware.create_task(middleware.call('vm.teardown_guest_vmemory', args['id']))
async def setup(middleware):
middleware.event_subscribe('vm.query', _event_vms)