Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix publisher leak #34683

Merged
merged 11 commits into from
Jul 20, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions conf/master
Original file line number Diff line number Diff line change
Expand Up @@ -243,6 +243,14 @@
# ZMQ high-water-mark for EventPublisher pub socket
#event_publisher_pub_hwm: 10000

# The master may allocate memory per-event and not
# reclaim it.
# To set a high-water mark for memory allocation, use
# ipc_write_buffer to set a high-water mark for message
# buffering.
# Value: In bytes. Set to 'dynamic' to have Salt select
# a value for you. Default is disabled.
# ipc_write_buffer: 'dynamic'


##### Security settings #####
Expand Down
48 changes: 47 additions & 1 deletion salt/config/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,13 @@
import salt.utils.sdb
from salt.utils.locales import sdecode

try:
import psutil
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
import salt.grains.core

log = logging.getLogger(__name__)

_DFLT_LOG_DATEFMT = '%H:%M:%S'
Expand All @@ -60,6 +67,31 @@
_DFLT_IPC_MODE = 'ipc'
_MASTER_TRIES = 1


def _gather_buffer_space():
'''
Gather some system data and then calculate
buffer space.

Result is in bytes.
'''
if HAS_PSUTIL:
# Oh good, we have psutil. This will be quick.
total_mem = psutil.virtual_memory().total
else:
# We need to load up some grains. This will be slow.
os_data = salt.grains.core.os_data()
grains = salt.grains.core._memdata(os_data)
total_mem = grains['mem_total']
# Return the higher number between 5% of the system memory and 100MB
return max([total_mem * 0.05, 10 << 20])

# For the time being this will be a fixed calculation
# TODO: Allow user configuration
_DFLT_IPC_WBUFFER = _gather_buffer_space() * .5
# TODO: Reserved for future use
_DFLT_IPC_RBUFFER = _gather_buffer_space() * .5

FLO_DIR = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'daemons', 'flo')
Expand Down Expand Up @@ -444,6 +476,10 @@
# ZMQ HWM for EventPublisher pub socket
'event_publisher_pub_hwm': int,

# IPC buffer size
# Refs https://github.com/saltstack/salt/issues/34215
'ipc_write_buffer': int,

# The number of MWorker processes for a master to startup. This number needs to scale up as
# the number of connected minions increases.
'worker_threads': int,
Expand Down Expand Up @@ -933,6 +969,7 @@
'mine_return_job': False,
'mine_interval': 60,
'ipc_mode': _DFLT_IPC_MODE,
'ipc_write_buffer': _DFLT_IPC_WBUFFER,
'ipv6': False,
'file_buffer_size': 262144,
'tcp_pub_port': 4510,
Expand Down Expand Up @@ -1176,6 +1213,7 @@
'minion_data_cache': True,
'enforce_mine_cache': False,
'ipc_mode': _DFLT_IPC_MODE,
'ipc_write_buffer': _DFLT_IPC_WBUFFER,
'ipv6': False,
'tcp_master_pub_port': 4512,
'tcp_master_pull_port': 4513,
Expand Down Expand Up @@ -2942,6 +2980,11 @@ def apply_minion_config(overrides=None,
if 'beacons' not in opts:
opts['beacons'] = {}

if overrides.get('ipc_write_buffer', '') == 'dynamic':
opts['ipc_write_buffer'] = _DFLT_IPC_WBUFFER
if 'ipc_write_buffer' not in overrides:
opts['ipc_write_buffer'] = 0

# if there is no schedule option yet, add an empty scheduler
if 'schedule' not in opts:
opts['schedule'] = {}
Expand Down Expand Up @@ -3016,7 +3059,10 @@ def apply_master_config(overrides=None, defaults=None):
)
opts['token_dir'] = os.path.join(opts['cachedir'], 'tokens')
opts['syndic_dir'] = os.path.join(opts['cachedir'], 'syndics')

if overrides.get('ipc_write_buffer', '') == 'dynamic':
opts['ipc_write_buffer'] = _DFLT_IPC_WBUFFER
if 'ipc_write_buffer' not in overrides:
opts['ipc_write_buffer'] = 0
using_ip_for_id = False
append_master = False
if not opts.get('id'):
Expand Down
20 changes: 15 additions & 5 deletions salt/transport/ipc.py
Original file line number Diff line number Diff line change
Expand Up @@ -431,9 +431,10 @@ class IPCMessagePublisher(object):
A Tornado IPC Publisher similar to Tornado's TCPServer class
but using either UNIX domain sockets or TCP sockets
'''
def __init__(self, socket_path, io_loop=None):
def __init__(self, opts, socket_path, io_loop=None):
'''
Create a new Tornado IPC server
:param dict opts: Salt options
:param str/int socket_path: Path on the filesystem for the
socket to bind to. This socket does
not need to exist prior to calling
Expand All @@ -444,6 +445,7 @@ def __init__(self, socket_path, io_loop=None):
for a tcp localhost connection.
:param IOLoop io_loop: A Tornado ioloop to handle scheduling
'''
self.opts = opts
self.socket_path = socket_path
self._started = False

Expand Down Expand Up @@ -506,10 +508,18 @@ def publish(self, msg):
def handle_connection(self, connection, address):
log.trace('IPCServer: Handling connection to address: {0}'.format(address))
try:
stream = IOStream(
connection,
io_loop=self.io_loop,
)
if self.opts['ipc_write_buffer'] > 0:
log.trace('Setting IPC connection write buffer: {0}'.format((self.opts['ipc_write_buffer'])))
stream = IOStream(
connection,
io_loop=self.io_loop,
max_write_buffer_size=self.opts['ipc_write_buffer']
)
else:
stream = IOStream(
connection,
io_loop=self.io_loop
)
self.streams.add(stream)
except Exception as exc:
log.error('IPC streaming error: {0}'.format(exc))
Expand Down
2 changes: 2 additions & 0 deletions salt/utils/event.py
Original file line number Diff line number Diff line change
Expand Up @@ -865,6 +865,7 @@ def __init__(self, opts, publish_handler, io_loop=None):
raise

self.publisher = salt.transport.ipc.IPCMessagePublisher(
self.opts,
epub_uri,
io_loop=self.io_loop
)
Expand Down Expand Up @@ -953,6 +954,7 @@ def run(self):
)

self.publisher = salt.transport.ipc.IPCMessagePublisher(
self.opts,
epub_uri,
io_loop=self.io_loop
)
Expand Down