Skip to content

Commit

Permalink
Merge branch '2018.3' into 'fluorine'
Browse files Browse the repository at this point in the history
Conflicts:
  - tests/integration/modules/test_archive.py
  • Loading branch information
rallytime committed Sep 4, 2018
2 parents 52279f2 + 93df5c4 commit 0c8b412
Show file tree
Hide file tree
Showing 31 changed files with 452 additions and 63 deletions.
73 changes: 73 additions & 0 deletions .ci/kitchen-windows2016-py2
@@ -0,0 +1,73 @@
pipeline {
agent { label 'kitchen-slave' }
options {
timestamps()
ansiColor('xterm')
}
environment {
SALT_KITCHEN_PLATFORMS = "/var/jenkins/workspace/platforms.yml"
SALT_KITCHEN_DRIVER = "/var/jenkins/workspace/driver.yml"
PATH = "/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin"
RBENV_VERSION = "2.4.2"
TEST_SUITE = "py2"
TEST_PLATFORM = "windows-2016"
PY_COLORS = 1
}
stages {
stage('github-pending') {
steps {
githubNotify credentialsId: 'test-jenkins-credentials',
description: "running ${TEST_SUITE}-${TEST_PLATFORM}...",
status: 'PENDING',
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
}
}
stage('setup') {
steps {
sh 'bundle install --with ec2 windows --without opennebula docker'
}
}
stage('run kitchen') {
steps {
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
sshagent(credentials: ['jenkins-testing-ssh-key']) {
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
}
}}
}
post {
always {
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
sshagent(credentials: ['jenkins-testing-ssh-key']) {
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
}
}}
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
archiveArtifacts artifacts: 'artifacts/logs/minion'
archiveArtifacts artifacts: 'artifacts/logs/salt-runtests.log'
}
}
}
}
post {
always {
junit 'artifacts/xml-unittests-output/*.xml'
cleanWs()
}
success {
githubNotify credentialsId: 'test-jenkins-credentials',
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
status: 'SUCCESS',
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
}
failure {
githubNotify credentialsId: 'test-jenkins-credentials',
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
status: 'FAILURE',
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
}
}
}
73 changes: 73 additions & 0 deletions .ci/kitchen-windows2016-py3
@@ -0,0 +1,73 @@
pipeline {
agent { label 'kitchen-slave' }
options {
timestamps()
ansiColor('xterm')
}
environment {
SALT_KITCHEN_PLATFORMS = "/var/jenkins/workspace/platforms.yml"
SALT_KITCHEN_DRIVER = "/var/jenkins/workspace/driver.yml"
PATH = "/usr/local/rbenv/shims/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin"
RBENV_VERSION = "2.4.2"
TEST_SUITE = "py3"
TEST_PLATFORM = "windows-2016"
PY_COLORS = 1
}
stages {
stage('github-pending') {
steps {
githubNotify credentialsId: 'test-jenkins-credentials',
description: "running ${TEST_SUITE}-${TEST_PLATFORM}...",
status: 'PENDING',
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
}
}
stage('setup') {
steps {
sh 'bundle install --with ec2 windows --without opennebula docker'
}
}
stage('run kitchen') {
steps {
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
sshagent(credentials: ['jenkins-testing-ssh-key']) {
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
sh 'bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM || bundle exec kitchen converge $TEST_SUITE-$TEST_PLATFORM'
sh 'bundle exec kitchen verify $TEST_SUITE-$TEST_PLATFORM'
}
}}
}
post {
always {
script { withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'AWS_ACCESS_KEY_ID', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
sshagent(credentials: ['jenkins-testing-ssh-key']) {
sh 'ssh-add ~/.ssh/jenkins-testing.pem'
sh 'bundle exec kitchen destroy $TEST_SUITE-$TEST_PLATFORM'
}
}}
archiveArtifacts artifacts: 'artifacts/xml-unittests-output/*.xml'
archiveArtifacts artifacts: 'artifacts/logs/minion'
archiveArtifacts artifacts: 'artifacts/logs/salt-runtests.log'
}
}
}
}
post {
always {
junit 'artifacts/xml-unittests-output/*.xml'
cleanWs()
}
success {
githubNotify credentialsId: 'test-jenkins-credentials',
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has passed",
status: 'SUCCESS',
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
}
failure {
githubNotify credentialsId: 'test-jenkins-credentials',
description: "The ${TEST_SUITE}-${TEST_PLATFORM} job has failed",
status: 'FAILURE',
context: "jenkins/pr/${TEST_SUITE}-${TEST_PLATFORM}"
}
}
}
34 changes: 30 additions & 4 deletions .ci/lint
Expand Up @@ -20,22 +20,48 @@ pipeline {
}
stage('setup') {
steps {
sh 'eval "$(pyenv init -)"; pyenv install 2.7.14 || echo "We already have this python."; pyenv local 2.7.14; pyenv shell 2.7.14'
sh 'eval "$(pyenv init -)"; pip install tox'
sh '''
eval "$(pyenv init -)"
pyenv --version
pyenv install --skip-existing 2.7.14
pyenv local 2.7.14
pyenv shell 2.7.14
python --version
pip install tox
'''
}
}
stage('linting') {
failFast false
parallel {
stage('salt linting') {
steps {
sh 'eval "$(pyenv init - --no-rehash)"; tox -e pylint-salt $(find salt/ -name "*.py" -exec git diff --name-only "origin/$CHANGE_TARGET" "origin/$BRANCH_NAME" setup.py {} +) | tee pylint-report.xml'
sh '''
eval "$(pyenv init - --no-rehash)"
_FILES="$(find salt/ -name "*.py" -exec git diff --name-only "origin/$CHANGE_TARGET" "origin/$BRANCH_NAME" {} +)"
_FILES="$_FILES $(git diff --name-only "origin/$CHANGE_TARGET" "origin/$BRANCH_NAME" setup.py)"
if [[ -z ${_FILES} ]]; then
echo "No pylint run, no changes found in the files"
echo "empty" pylint-reports.xml
else
tox -e pylint-salt ${_FILES} | tee pylint-report.xml
fi
'''
archiveArtifacts artifacts: 'pylint-report.xml'
}
}
stage('test linting') {
steps {
sh 'eval "$(pyenv init - --no-rehash)"; tox -e pylint-tests $(find tests/ -name "*.py" -exec git diff --name-only "origin/$CHANGE_TARGET" "origin/$BRANCH_NAME" {} +) | tee pylint-report-tests.xml'
sh '''
eval "$(pyenv init - --no-rehash)"
_FILES="$(find tests/ -name "*.py" -exec git diff --name-only "origin/$CHANGE_TARGET" "origin/$BRANCH_NAME" setup.py {} +)"
if [[ -z ${_FILES} ]]; then
echo "No pylint run, no changes found in the files"
touch pylint-report-tests.xml
else
tox -e pylint-tests ${_FILES} | tee pylint-report-tests.xml
fi
'''
archiveArtifacts artifacts: 'pylint-report-tests.xml'
}
}
Expand Down
2 changes: 1 addition & 1 deletion conf/master
Expand Up @@ -125,7 +125,7 @@
# The master can take a while to start up when lspci and/or dmidecode is used
# to populate the grains for the master. Enable if you want to see GPU hardware
# data for your master.
# enable_gpu_grains: True
# enable_gpu_grains: False

# The master maintains a job cache. While this is a great addition, it can be
# a burden on the master for larger deployments (over 5000 minions).
Expand Down
5 changes: 5 additions & 0 deletions conf/minion
Expand Up @@ -152,6 +152,11 @@
# Set the directory used to hold unix sockets.
#sock_dir: /var/run/salt/minion

# The minion can take a while to start up when lspci and/or dmidecode is used
# to populate the grains for the minion. Set this to False if you do not need
# GPU hardware grains for your minion.
# enable_gpu_grains: True

# Set the default outputter used by the salt-call command. The default is
# "nested".
#output: nested
Expand Down
2 changes: 1 addition & 1 deletion conf/suse/master
Expand Up @@ -127,7 +127,7 @@ syndic_user: salt
# The master can take a while to start up when lspci and/or dmidecode is used
# to populate the grains for the master. Enable if you want to see GPU hardware
# data for your master.
# enable_gpu_grains: True
# enable_gpu_grains: False

# The master maintains a job cache. While this is a great addition, it can be
# a burden on the master for larger deployments (over 5000 minions).
Expand Down
4 changes: 2 additions & 2 deletions doc/man/salt.7
Expand Up @@ -5256,7 +5256,7 @@ sock_dir: /var/run/salt/master
.UNINDENT
.SS \fBenable_gpu_grains\fP
.sp
Default: \fBFalse\fP
Default: \fBTrue\fP
.sp
Enable GPU hardware data for your master. Be aware that the master can
take a while to start up when lspci and/or dmidecode is used to populate the
Expand Down Expand Up @@ -15225,7 +15225,7 @@ and \fBmine_functions\fP\&.
# The master can take a while to start up when lspci and/or dmidecode is used
# to populate the grains for the master. Enable if you want to see GPU hardware
# data for your master.
# enable_gpu_grains: True
# enable_gpu_grains: False

# The master maintains a job cache. While this is a great addition, it can be
# a burden on the master for larger deployments (over 5000 minions).
Expand Down
4 changes: 4 additions & 0 deletions doc/ref/configuration/master.rst
Expand Up @@ -478,6 +478,10 @@ Enable GPU hardware data for your master. Be aware that the master can
take a while to start up when lspci and/or dmidecode is used to populate the
grains for the master.

.. code-block:: yaml
enable_gpu_grains: True
.. conf_master:: job_cache

``job_cache``
Expand Down
16 changes: 16 additions & 0 deletions doc/ref/configuration/minion.rst
Expand Up @@ -936,6 +936,22 @@ The directory where Unix sockets will be kept.
sock_dir: /var/run/salt/minion
.. conf_minion:: enable_gpu_grains

``enable_gpu_grains``
---------------------

Default: ``True``

Enable GPU hardware data for your master. Be aware that the minion can
take a while to start up when lspci and/or dmidecode is used to populate the
grains for the minion, so this can be set to ``False`` if you do not need these
grains.

.. code-block:: yaml
enable_gpu_grains: False
.. conf_minion:: outputter_dirs

``outputter_dirs``
Expand Down
21 changes: 11 additions & 10 deletions salt/cache/etcd_cache.py
Expand Up @@ -50,6 +50,7 @@
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
import base64
try:
import etcd
HAS_ETCD = True
Expand Down Expand Up @@ -112,7 +113,7 @@ def _init_client():
log.info("etcd: Setting up client with params: %r", etcd_kwargs)
client = etcd.Client(**etcd_kwargs)
try:
client.get(path_prefix)
client.read(path_prefix)
except etcd.EtcdKeyNotFound:
log.info("etcd: Creating dir %r", path_prefix)
client.write(path_prefix, None, dir=True)
Expand All @@ -126,7 +127,7 @@ def store(bank, key, data):
etcd_key = '{0}/{1}/{2}'.format(path_prefix, bank, key)
try:
value = __context__['serial'].dumps(data)
client.set(etcd_key, value)
client.write(etcd_key, base64.b64encode(value))
except Exception as exc:
raise SaltCacheError(
'There was an error writing the key, {0}: {1}'.format(etcd_key, exc)
Expand All @@ -140,10 +141,10 @@ def fetch(bank, key):
_init_client()
etcd_key = '{0}/{1}/{2}'.format(path_prefix, bank, key)
try:
value = client.get(etcd_key).value
if value is None:
return {}
return __context__['serial'].loads(value)
value = client.read(etcd_key).value
return __context__['serial'].loads(base64.b64decode(value))
except etcd.EtcdKeyNotFound:
return {}
except Exception as exc:
raise SaltCacheError(
'There was an error reading the key, {0}: {1}'.format(
Expand All @@ -162,7 +163,7 @@ def flush(bank, key=None):
else:
etcd_key = '{0}/{1}/{2}'.format(path_prefix, bank, key)
try:
client.get(etcd_key)
client.read(etcd_key)
except etcd.EtcdKeyNotFound:
return # nothing to flush
try:
Expand All @@ -184,7 +185,7 @@ def _walk(r):
return [r.key.split('/', 3)[3]]

keys = []
for c in client.get(r.key).children:
for c in client.read(r.key).children:
keys.extend(_walk(c))
return keys

Expand All @@ -197,7 +198,7 @@ def ls(bank):
_init_client()
path = '{0}/{1}'.format(path_prefix, bank)
try:
return _walk(client.get(path))
return _walk(client.read(path))
except Exception as exc:
raise SaltCacheError(
'There was an error getting the key "{0}": {1}'.format(
Expand All @@ -213,7 +214,7 @@ def contains(bank, key):
_init_client()
etcd_key = '{0}/{1}/{2}'.format(path_prefix, bank, key)
try:
r = client.get(etcd_key)
r = client.read(etcd_key)
# return True for keys, not dirs
return r.dir is False
except etcd.EtcdKeyNotFound:
Expand Down
4 changes: 4 additions & 0 deletions salt/config/__init__.py
Expand Up @@ -446,6 +446,9 @@ def _gather_buffer_space():
# Tell the loader to attempt to import *.pyx cython files if cython is available
'cython_enable': bool,

# Whether or not to load grains for the GPU
'enable_gpu_grains': bool,

# Tell the loader to attempt to import *.zip archives
'enable_zip_modules': bool,

Expand Down Expand Up @@ -1395,6 +1398,7 @@ def _gather_buffer_space():
'test': False,
'ext_job_cache': '',
'cython_enable': False,
'enable_gpu_grains': True,
'enable_zip_modules': False,
'state_verbose': True,
'state_output': 'full',
Expand Down
9 changes: 9 additions & 0 deletions salt/fileserver/__init__.py
Expand Up @@ -514,6 +514,15 @@ def envs(self, back=None, sources=False):
return ret
return list(ret)

def file_envs(self, load=None):
'''
Return environments for all backends for requests from fileclient
'''
if load is None:
load = {}
load.pop('cmd', None)
return self.envs(**load)

def init(self, back=None):
'''
Initialize the backend, only do so if the fs supports an init function
Expand Down

0 comments on commit 0c8b412

Please sign in to comment.