Skip to content

Commit

Permalink
Merge pull request #267 from clarkperkins/feature/support-nvme
Browse files Browse the repository at this point in the history
NVMe support for m5 / c5 instances
  • Loading branch information
clarkperkins committed Dec 5, 2017
2 parents 1023e2a + 146b2b0 commit ce54802
Show file tree
Hide file tree
Showing 9 changed files with 163 additions and 48 deletions.
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@
'celery[redis]~=3.1.0',
'Django~=1.9.0',
'djangorestframework~=3.4.0',
'salt~=2016.3.0,!=2016.3.3',
'salt~=2016.3.0,!=2016.3.3,!=2016.3.6,!=2016.3.7,!=2016.3.8',

# Used, but still hold an upper bound on the version
'boto~=2.32',
Expand Down
12 changes: 8 additions & 4 deletions stackdio/api/cloud/apps.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,9 @@
# limitations under the License.
#

from __future__ import print_function, unicode_literals
from __future__ import unicode_literals

import io
import json
import logging
import os
Expand All @@ -32,12 +33,13 @@

def load_initial_data():
cloud_dir = os.path.dirname(os.path.abspath(__file__))
fixtures_json = os.path.join(cloud_dir, 'fixtures', 'cloud_objects.json')

with open(os.path.join(cloud_dir, 'fixtures', 'cloud_objects.json')) as f:
with io.open(fixtures_json, 'rt') as f:
initial_data = json.load(f)

if initial_data is None:
print('Unable to load cloud objects')
logger.info('Unable to load cloud objects')

return initial_data

Expand All @@ -51,15 +53,17 @@ def load_cloud_objects(app_config, verbosity=2, interactive=True,
initial_data = load_initial_data()

for model in initial_data:
logger.info("Attempting to load data for {}...".format(model['model']))
# Grab the model class, but don't fail if we can't find it
try:
model_cls = apps.get_model('cloud', model['model'])
except LookupError:
print('Failed to load model class: {}'.format(model['model']))
logger.warning('Failed to load model class: {}'.format(model['model']))
continue

# If we can't migrate this model, don't do anything & go on to the next
if not router.allow_migrate_model(using, model_cls):
logger.info("Skipping data load for {}".format(model['model']))
continue

to_create = []
Expand Down
105 changes: 105 additions & 0 deletions stackdio/api/cloud/fixtures/cloud_objects.json
Original file line number Diff line number Diff line change
Expand Up @@ -882,6 +882,111 @@
"title": "d2.8xlarge",
"description": "Dense Storage D2 Eight Extra Large Instance",
"provider_id": 1
},
{
"instance_id": "m5.large",
"slug": "m5.large",
"title": "m5.large",
"description": "General Purpose M5 Large Instance",
"provider_id": 1
},
{
"instance_id": "m5.xlarge",
"slug": "m5.xlarge",
"title": "m5.xlarge",
"description": "General Purpose M5 Extra Large Instance",
"provider_id": 1
},
{
"instance_id": "m5.2xlarge",
"slug": "m5.2xlarge",
"title": "m5.2xlarge",
"description": "General Purpose M5 Double Extra Large Instance",
"provider_id": 1
},
{
"instance_id": "m5.4xlarge",
"slug": "m5.4xlarge",
"title": "m5.4xlarge",
"description": "General Purpose M5 Quadruple Extra Large Instance",
"provider_id": 1
},
{
"instance_id": "m5.12xlarge",
"slug": "m5.12xlarge",
"title": "m5.12xlarge",
"description": "General Purpose M5 Twelve Extra Large Instance",
"provider_id": 1
},
{
"instance_id": "m5.24xlarge",
"slug": "m5.24xlarge",
"title": "m5.24xlarge",
"description": "General Purpose M5 Twenty-Four Extra Large Instance",
"provider_id": 1
},
{
"instance_id": "c5.large",
"slug": "c5.large",
"title": "c5.large",
"description": "Compute Optimized C5 Large Instance",
"provider_id": 1
},
{
"instance_id": "c5.xlarge",
"slug": "c5.xlarge",
"title": "c5.xlarge",
"description": "Compute Optimized C5 Extra Large Instance",
"provider_id": 1
},
{
"instance_id": "c5.2xlarge",
"slug": "c5.2xlarge",
"title": "c5.2xlarge",
"description": "Compute Optimized C5 Double Extra Large Instance",
"provider_id": 1
},
{
"instance_id": "c5.4xlarge",
"slug": "c5.4xlarge",
"title": "c5.4xlarge",
"description": "Compute Optimized C5 Quadruple Extra Large Instance",
"provider_id": 1
},
{
"instance_id": "c5.9xlarge",
"slug": "c5.9xlarge",
"title": "c5.9xlarge",
"description": "Compute Optimized C5 Nine Extra Large Instance",
"provider_id": 1
},
{
"instance_id": "c5.18xlarge",
"slug": "c5.18xlarge",
"title": "c5.18xlarge",
"description": "Compute Optimized C5 Eighteen Extra Large Instance",
"provider_id": 1
},
{
"instance_id": "p3.2xlarge",
"slug": "p3.2xlarge",
"title": "p3.2xlarge",
"description": "Accelerated Computing P3 Double Extra Large Instance",
"provider_id": 1
},
{
"instance_id": "p3.8xlarge",
"slug": "p3.8xlarge",
"title": "p3.8xlarge",
"description": "Accelerated Computing P3 Eight Extra Large Instance",
"provider_id": 1
},
{
"instance_id": "p3.16xlarge",
"slug": "p3.16xlarge",
"title": "p3.16xlarge",
"description": "Accelerated Computing P3 Sixteen Extra Large Instance",
"provider_id": 1
}
]
}
Expand Down
2 changes: 1 addition & 1 deletion stackdio/api/stacks/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ def terminate_hosts(stack, cloud_map, hostnames):

map_data = mapper.map_data()

missing_hosts = map_data.get('create', [])
missing_hosts = set(map_data.get('create', []))

terminate_list = set(hostnames)

Expand Down
16 changes: 13 additions & 3 deletions stackdio/salt/core_states/_modules/mount_ext.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,12 +54,22 @@ def find_ebs_device(device):
if device.startswith('/dev/sd'):
if os.path.exists('/dev/xvda'):
new_device_letter = device[7]
else:
# Some systems start with /dev/xvde instead of /dev/xvda, so we need to add 4
device_pattern = '/dev/xvd{}{}'
elif os.path.exists('/dev/xvde'):
# Some systems start with /dev/xvde instead of /dev/xvda,
# so we need to add 4 to the current letter
new_device_letter = chr(ord(device[7]) + 4)
device_pattern = '/dev/xvd{}{}'
elif os.path.exists('/dev/nvme0n1'):
# Some devices use the NVMe naming scheme, so we need to convert the letter to a number
new_device_letter = ord(device[7]) - ord('a')
device_pattern = '/dev/nvme{}n1{}'
else:
return None

device_partition = device[8:]

new_device = '/dev/xvd{0}{1}'.format(new_device_letter, device_partition)
new_device = device_pattern.format(new_device_letter, device_partition)
if os.path.exists(new_device):
return new_device

Expand Down
17 changes: 0 additions & 17 deletions stackdio/salt/core_states/core/hostname.sls
Original file line number Diff line number Diff line change
@@ -1,11 +1,3 @@
{% set hostname_service = salt['grains.filter_by']({
'Debian': salt['grains.filter_by']({
'12': 'hostname',
'14': 'hostname',
'16': 'networking',
}, 'osmajorrelease'),
'RedHat': 'network',
}) %}

# Edit the appropriate hostname file
hostname_file:
Expand Down Expand Up @@ -52,12 +44,3 @@ set_hostname:
- unless: "hostname | grep {{ grains['fqdn'] }}"
- require:
- file: hostname_file

# Restart the appropriate service for this change to take effect
hostname-svc:
service.running:
- name: {{ hostname_service }}
- watch:
- cmd: set_hostname
- file: stack_hostnames
- file: hostname_file
52 changes: 31 additions & 21 deletions stackdio/salt/utils/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

from __future__ import absolute_import, print_function, unicode_literals

import io
import logging
import os
import re
Expand Down Expand Up @@ -147,8 +148,8 @@ def process_sls_result(sls_result, err_file):
continue

# Write to the error log
with open(err_file, 'a') as f:
f.write(yaml.safe_dump(stage_result))
with io.open(err_file, 'at') as f:
yaml.safe_dump(stage_result, f)

return ret

Expand Down Expand Up @@ -191,43 +192,52 @@ def process_times(sls_result):
# Now we want the sum since these are NOT running in parallel.
time_map[info_dict['module']] = current + max_time

for module, time in sorted(time_map.items()):
logger.info('Module {0} took {1} total seconds to run'.format(module, time / 1000))
for smodule, time in sorted(time_map.items()):
logger.info('Module {0} took {1} total seconds to run'.format(smodule, time / 1000))


def process_orchestrate_result(result, err_file):
ret = {
'failed': False,
'succeeded_sls': {},
'failed_sls': {},
'cancelled_sls': {},
}

if 'data' not in result:
with io.open(err_file, 'at') as f:
f.write('Orchestration result is missing information:\n\n')
f.write(six.text_type(result))
ret['failed'] = True
return ret

# The actual info we want is nested in the 'data' key
result = result['data']

opts = salt.config.client_config(settings.STACKDIO_CONFIG.salt_master_config)

if not isinstance(result, dict):
with open(err_file, 'a') as f:
with io.open(err_file, 'at') as f:
f.write('Orchestration failed. See below.\n\n')
f.write(six.text_type(result))
return True, set()
ret['failed'] = True
return ret

if opts['id'] not in result:
with open(err_file, 'a') as f:
with io.open(err_file, 'at') as f:
f.write('Orchestration result is missing information:\n\n')
f.write(six.text_type(result))
return True, set()
ret['failed'] = True
return ret

result = result[opts['id']]

if not isinstance(result, dict):
with open(err_file, 'a') as f:
with io.open(err_file, 'at') as f:
f.write(six.text_type(result))

raise StackdioSaltClientException(result)

ret = {
'failed': False,
'succeeded_sls': {},
'failed_sls': {},
'cancelled_sls': {},
}

for sls, sls_result in sorted(result.items(), key=lambda x: x[1]['__run_num__']):
sls_dict = state_to_dict(sls)

Expand All @@ -250,7 +260,7 @@ def process_orchestrate_result(result, err_file):
sls_ret_dict['succeeded_hosts'] = set(sls_result['changes'].get('ret', {}).keys())

# Write a message to the error log
with open(err_file, 'a') as f:
with io.open(err_file, 'at') as f:
if sls_ret_dict['succeeded_hosts']:
f.write(
'Stage {} succeeded and returned {} host info object{}\n\n'.format(
Expand All @@ -268,7 +278,7 @@ def process_orchestrate_result(result, err_file):
ret['succeeded_sls'][sls_dict['name']] = sls_ret_dict
else:
# We failed - print a message to the log.
with open(err_file, 'a') as f:
with io.open(err_file, 'at') as f:
if 'changes' in sls_result and 'ret' in sls_result['changes']:
f.write(
'Stage {} failed and returned {} host info object{}\n\n'.format(
Expand Down Expand Up @@ -347,7 +357,7 @@ def _set_up_logging(self):

# "touch" the log file and symlink it to the latest
for l in (self.log_file, self.err_file):
with open(l, 'w') as _:
with io.open(l, 'w') as _:
pass
self._symlink(self.log_file, log_symlink)
self._symlink(self.err_file, err_symlink)
Expand Down Expand Up @@ -406,9 +416,9 @@ def run(self, target, function, **kwargs):
# We failed.
ret['failed'] = True
ret['failed_hosts'].add(host)
with open(self.err_file, 'a') as f:
with io.open(self.err_file, 'at') as f:
f.write('Errors on host {}:\n'.format(host))
f.write(yaml.safe_dump(host_errors))
yaml.safe_dump(host_errors, f)
f.write('\n')
else:
# We succeeded!
Expand Down
2 changes: 1 addition & 1 deletion stackdio/server/settings/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@
},
'formatters': {
'default': {
'format': '%(asctime)s %(levelname)s %(name)s - %(message)s',
'format': '%(asctime)s %(levelname)s [%(threadName)s] %(name)s - %(message)s',
}
},
'handlers': {
Expand Down
3 changes: 3 additions & 0 deletions stackdio/server/settings/development.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,9 @@

DATABASES['default']['CONN_MAX_AGE'] = 0

# Add in the secure proxy header
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'ssl')

##
# Add in additional applications
##
Expand Down

0 comments on commit ce54802

Please sign in to comment.