From c314f5166bbdc928b3caa0409b349d3d41dad2df Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Thu, 28 Feb 2019 13:18:53 -0800 Subject: [PATCH 001/340] Correctly handle the situation when with the secret key or public key values are empty. --- salt/utils/nacl.py | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/salt/utils/nacl.py b/salt/utils/nacl.py index ce2b591343c3..6f2ee668a7b3 100644 --- a/salt/utils/nacl.py +++ b/salt/utils/nacl.py @@ -69,13 +69,16 @@ def _get_sk(**kwargs): Return sk ''' config = _get_config(**kwargs) - key = salt.utils.stringutils.to_str(config['sk']) + key = None + if config['sk']: + key = salt.utils.stringutils.to_str(config['sk']) sk_file = config['sk_file'] if not key and sk_file: - with salt.utils.files.fopen(sk_file, 'rb') as keyf: - key = salt.utils.stringutils.to_unicode(keyf.read()).rstrip('\n') - if key is None: - raise Exception('no key or sk_file found') + try: + with salt.utils.files.fopen(sk_file, 'rb') as keyf: + key = salt.utils.stringutils.to_unicode(keyf.read()).rstrip('\n') + except (IOError, OSError): + raise Exception('no key or sk_file found') return base64.b64decode(key) @@ -84,13 +87,16 @@ def _get_pk(**kwargs): Return pk ''' config = _get_config(**kwargs) - pubkey = salt.utils.stringutils.to_str(config['pk']) + pubkey = None + if config['pk']: + pubkey = salt.utils.stringutils.to_str(config['pk']) pk_file = config['pk_file'] if not pubkey and pk_file: - with salt.utils.files.fopen(pk_file, 'rb') as keyf: - pubkey = salt.utils.stringutils.to_unicode(keyf.read()).rstrip('\n') - if pubkey is None: - raise Exception('no pubkey or pk_file found') + try: + with salt.utils.files.fopen(pk_file, 'rb') as keyf: + pubkey = salt.utils.stringutils.to_unicode(keyf.read()).rstrip('\n') + except (IOError, OSError): + raise Exception('no pubkey or pk_file found') pubkey = six.text_type(pubkey) return base64.b64decode(pubkey) From 5527dff2ea9c6c8258516f98cacd2bd61797126c Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Fri, 1 Mar 2019 13:48:28 -0800 Subject: [PATCH 002/340] Ensure the comment, changes, and result are valid arguments for various test state functions as they are valid arguments for mod_watch. --- salt/states/test.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/states/test.py b/salt/states/test.py index a5659b577fbe..de4c4ac5ddfa 100644 --- a/salt/states/test.py +++ b/salt/states/test.py @@ -67,7 +67,7 @@ def nop(name, **kwargs): return succeed_without_changes(name) -def succeed_without_changes(name): +def succeed_without_changes(name, **kwargs): ''' Returns successful. @@ -85,7 +85,7 @@ def succeed_without_changes(name): return ret -def fail_without_changes(name): +def fail_without_changes(name, **kwargs): ''' Returns failure. @@ -108,7 +108,7 @@ def fail_without_changes(name): return ret -def succeed_with_changes(name): +def succeed_with_changes(name, **kwargs): ''' Returns successful and changes is not empty @@ -141,7 +141,7 @@ def succeed_with_changes(name): return ret -def fail_with_changes(name): +def fail_with_changes(name, **kwargs): ''' Returns failure and changes is not empty. From 710ab50624b16012d54485beeff151ff5940846a Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Mon, 4 Mar 2019 22:38:20 +0300 Subject: [PATCH 003/340] Support parallel work of multiple IPCMEssageSubscribers in one process --- salt/transport/ipc.py | 275 ++++++++++++++++++++++-------------------- 1 file changed, 147 insertions(+), 128 deletions(-) diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index f1f844bca02b..ee3c5d7c43c3 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -18,7 +18,8 @@ import tornado.gen import tornado.netutil import tornado.concurrent -from tornado.locks import Semaphore +import tornado.queues +from tornado.locks import Lock from tornado.ioloop import IOLoop, TimeoutError as TornadoTimeoutError from tornado.iostream import IOStream # Import Salt libs @@ -582,11 +583,116 @@ def __del__(self): self.close() -class IPCMessageSubscriber(IPCClient): +class IPCMessageSubscriberService(IPCClient): + ''' + IPC message subscriber service that is a standalone singleton class starting once for a number + of IPCMessageSubscriber instances feeding all of them with data. It closes automatically when + there are no more subscribers. + + To use this rever to IPCMessageSubscriber documentation. + ''' + def __singleton_init__(self, socket_path, io_loop=None): + super(IPCMessageSubscriberService, self).__singleton_init__( + socket_path, io_loop=io_loop) + self.saved_data = [] + self._read_in_progress = Lock() + self.handlers = weakref.WeakSet() + + def _subscribe(self, handler): + self.handlers.add(handler) + + def unsubscribe(self, handler): + self.handlers.discard(handler) + + def _has_subscribers(self): + return bool(self.handlers) + + def _feed_subscribers(self, data): + for subscriber in self.handlers: + subscriber._feed(data) + + @tornado.gen.coroutine + def _read(self, timeout, callback=None): + try: + yield self._read_in_progress.acquire(timeout=0) + except tornado.gen.TimeoutError: + raise tornado.gen.Return(None) + + log.debug('IPC Subscriber Service is starting reading') + # If timeout is not specified we need to set some here to make the service able to check + # is there any handler waiting for data. + if timeout is None: + timeout = 5 + + read_stream_future = None + while self._has_subscribers(): + if read_stream_future is None: + read_stream_future = self.stream.read_bytes(4096, partial=True) + + try: + wire_bytes = yield FutureWithTimeout(self.io_loop, + read_stream_future, + timeout) + read_stream_future = None + + self.unpacker.feed(wire_bytes) + msgs = [msg['body'] for msg in self.unpacker] + self._feed_subscribers(msgs) + except TornadoTimeoutError: + # Continue checking are there alive waiting handlers + # Keep 'read_stream_future' alive to wait it more in the next loop + continue + except tornado.iostream.StreamClosedError as exc: + log.trace('Subscriber disconnected from IPC %s', self.socket_path) + self._feed_subscribers([None]) + break + except Exception as exc: + log.error('Exception occurred in Subscriber while handling stream: %s', exc) + self._feed_subscribers([exc]) + break + + log.debug('IPC Subscriber Service is stopping due to a lack of subscribers') + self._read_in_progress.release() + raise tornado.gen.Return(None) + + @tornado.gen.coroutine + def read(self, handler, timeout=None): + ''' + Asynchronously read messages and invoke a callback when they are ready. + + :param callback: A callback with the received data + ''' + self._subscribe(handler) + while not self.connected(): + try: + yield self.connect(timeout=5) + except tornado.iostream.StreamClosedError: + log.trace('Subscriber closed stream on IPC %s before connect', self.socket_path) + yield tornado.gen.sleep(1) + except Exception as exc: + log.error('Exception occurred while Subscriber connecting: %s', exc) + yield tornado.gen.sleep(1) + self._read(timeout) + + def close(self): + ''' + Routines to handle any cleanup before the instance shuts down. + Sockets and filehandles should be closed explicitly, to prevent + leaks. + ''' + if not self._closing: + super(IPCMessageSubscriberService, self).close() + + def __del__(self): + if IPCMessageSubscriberService in globals(): + self.close() + + +class IPCMessageSubscriber(object): ''' Salt IPC message subscriber - Create an IPC client to receive messages from IPC publisher + Create or reuse an IPC client to receive messages from IPC publisher An example of a very simple IPCMessageSubscriber connecting to an IPCMessagePublisher. This example assumes an already running IPCMessagePublisher. @@ -615,147 +721,60 @@ class IPCMessageSubscriber(IPCClient): # Wait for some data package = ipc_subscriber.read_sync() ''' - def __singleton_init__(self, socket_path, io_loop=None): - super(IPCMessageSubscriber, self).__singleton_init__( - socket_path, io_loop=io_loop) - self._read_sync_future = None - self._read_stream_future = None - self._sync_ioloop_running = False - self.saved_data = [] - self._sync_read_in_progress = Semaphore() - - @tornado.gen.coroutine - def _read_sync(self, timeout): - yield self._sync_read_in_progress.acquire() - exc_to_raise = None - ret = None - - try: - while True: - if self._read_stream_future is None: - self._read_stream_future = self.stream.read_bytes(4096, partial=True) - - if timeout is None: - wire_bytes = yield self._read_stream_future - else: - future_with_timeout = FutureWithTimeout( - self.io_loop, self._read_stream_future, timeout) - wire_bytes = yield future_with_timeout + def __init__(self, socket_path, io_loop=None): + self.service = IPCMessageSubscriberService(socket_path, io_loop) + self.queue = tornado.queues.Queue() - self._read_stream_future = None + def connected(self): + return self.service.connected() - # Remove the timeout once we get some data or an exception - # occurs. We will assume that the rest of the data is already - # there or is coming soon if an exception doesn't occur. - timeout = None + def connect(self, callback=None, timeout=None): + return self.service.connect(callback=callback, timeout=timeout) - self.unpacker.feed(wire_bytes) - first = True - for framed_msg in self.unpacker: - if first: - ret = framed_msg['body'] - first = False - else: - self.saved_data.append(framed_msg['body']) - if not first: - # We read at least one piece of data - break - except TornadoTimeoutError: - # In the timeout case, just return None. - # Keep 'self._read_stream_future' alive. - ret = None - except tornado.iostream.StreamClosedError as exc: - log.trace('Subscriber disconnected from IPC %s', self.socket_path) - self._read_stream_future = None - exc_to_raise = exc - except Exception as exc: - log.error('Exception occurred in Subscriber while handling stream: %s', exc) - self._read_stream_future = None - exc_to_raise = exc + @tornado.gen.coroutine + def _feed(self, msgs): + for msg in msgs: + yield self.queue.put(msg) - if self._sync_ioloop_running: - # Stop the IO Loop so that self.io_loop.start() will return in - # read_sync(). - self.io_loop.spawn_callback(self.io_loop.stop) + @tornado.gen.coroutine + def read_async(self, callback, timeout=None): + ''' + Asynchronously read messages and invoke a callback when they are ready. - if exc_to_raise is not None: - raise exc_to_raise # pylint: disable=E0702 - self._sync_read_in_progress.release() - raise tornado.gen.Return(ret) + :param callback: A callback with the received data + ''' + self.service.read(self) + while True: + try: + if timeout is not None: + deadline = time.time() + timeout + else: + deadline = None + data = yield self.queue.get(timeout=deadline) + except tornado.gen.TimeoutError: + raise tornado.gen.Return(None) + if data is None: + break + elif isinstance(data, Exception): + raise data + elif callback: + self.service.io_loop.spawn_callback(callback, data) + else: + raise tornado.gen.Return(data) def read_sync(self, timeout=None): ''' Read a message from an IPC socket - The socket must already be connected. The associated IO Loop must NOT be running. :param int timeout: Timeout when receiving message :return: message data if successful. None if timed out. Will raise an exception for all other error conditions. ''' - if self.saved_data: - return self.saved_data.pop(0) - - self._sync_ioloop_running = True - self._read_sync_future = self._read_sync(timeout) - self.io_loop.start() - self._sync_ioloop_running = False - - ret_future = self._read_sync_future - self._read_sync_future = None - return ret_future.result() - - @tornado.gen.coroutine - def _read_async(self, callback): - while not self.stream.closed(): - try: - self._read_stream_future = self.stream.read_bytes(4096, partial=True) - wire_bytes = yield self._read_stream_future - self._read_stream_future = None - self.unpacker.feed(wire_bytes) - for framed_msg in self.unpacker: - body = framed_msg['body'] - self.io_loop.spawn_callback(callback, body) - except tornado.iostream.StreamClosedError: - log.trace('Subscriber disconnected from IPC %s', self.socket_path) - break - except Exception as exc: - log.error('Exception occurred while Subscriber handling stream: %s', exc) - - @tornado.gen.coroutine - def read_async(self, callback): - ''' - Asynchronously read messages and invoke a callback when they are ready. - - :param callback: A callback with the received data - ''' - while not self.connected(): - try: - yield self.connect(timeout=5) - except tornado.iostream.StreamClosedError: - log.trace('Subscriber closed stream on IPC %s before connect', self.socket_path) - yield tornado.gen.sleep(1) - except Exception as exc: - log.error('Exception occurred while Subscriber connecting: %s', exc) - yield tornado.gen.sleep(1) - yield self._read_async(callback) + return self.service.io_loop.run_sync(lambda: self.read_async(None, timeout)) def close(self): - ''' - Routines to handle any cleanup before the instance shuts down. - Sockets and filehandles should be closed explicitly, to prevent - leaks. - ''' - if not self._closing: - IPCClient.close(self) - # This will prevent this message from showing up: - # '[ERROR ] Future exception was never retrieved: - # StreamClosedError' - if self._read_sync_future is not None and self._read_sync_future.done(): - self._read_sync_future.exception() - if self._read_stream_future is not None and self._read_stream_future.done(): - self._read_stream_future.exception() + self.service.unsubscribe(self) def __del__(self): - if IPCMessageSubscriber in globals(): - self.close() + self.close() From 6c0dcf222caea580b4561b22306faa29b56abc10 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Tue, 5 Mar 2019 17:01:13 -0800 Subject: [PATCH 004/340] Swapping pchanges for changes in file state. --- salt/states/file.py | 14 +++++++------- tests/integration/states/test_file.py | 15 +++++++++++++++ 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/salt/states/file.py b/salt/states/file.py index 15bb93e5ec61..ecf3429f2bbc 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -2719,7 +2719,7 @@ def managed(name, try: if __opts__['test']: if 'file.check_managed_changes' in __salt__: - ret['pchanges'] = __salt__['file.check_managed_changes']( + ret['changes'] = __salt__['file.check_managed_changes']( name, source, source_hash, @@ -2750,15 +2750,15 @@ def managed(name, reset=win_perms_reset) except CommandExecutionError as exc: if exc.strerror.startswith('Path not found'): - ret['pchanges'] = '{0} will be created'.format(name) + ret['changes'] = '{0} will be created'.format(name) - if isinstance(ret['pchanges'], tuple): - ret['result'], ret['comment'] = ret['pchanges'] - elif ret['pchanges']: + if isinstance(ret['changes'], tuple): + ret['result'], ret['comment'] = ret['changes'] + elif ret['changes']: ret['result'] = None ret['comment'] = 'The file {0} is set to be changed'.format(name) - if 'diff' in ret['pchanges'] and not show_changes: - ret['pchanges']['diff'] = '' + if 'diff' in ret['changes'] and not show_changes: + ret['changes']['diff'] = '' else: ret['result'] = True ret['comment'] = 'The file {0} is in the correct state'.format(name) diff --git a/tests/integration/states/test_file.py b/tests/integration/states/test_file.py index 69bed94c5ca9..fa75d14b6c0b 100644 --- a/tests/integration/states/test_file.py +++ b/tests/integration/states/test_file.py @@ -459,6 +459,21 @@ def test_managed_show_changes_false(self): changes = next(six.itervalues(ret))['changes'] self.assertEqual('', changes['diff']) + def test_managed_show_changes_true(self): + ''' + file.managed test interface + ''' + name = os.path.join(TMP, 'grail_not_scene33') + with salt.utils.files.fopen(name, 'wb') as fp_: + fp_.write(b'test_managed_show_changes_false\n') + + ret = self.run_state( + 'file.managed', name=name, source='salt://grail/scene33', + ) + + changes = next(six.itervalues(ret))['changes'] + self.assertIn('diff', changes) + @skipIf(IS_WINDOWS, 'Don\'t know how to fix for Windows') def test_managed_escaped_file_path(self): ''' From a65e621abd0882559ca661ed8232ecd91bbfc85c Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Tue, 5 Mar 2019 17:06:51 -0800 Subject: [PATCH 005/340] Adding an additional note warning that the file will not be changed because test=True, but could be changed by other states. --- salt/states/file.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/states/file.py b/salt/states/file.py index ecf3429f2bbc..ecc66dc305e9 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -2757,6 +2757,8 @@ def managed(name, elif ret['changes']: ret['result'] = None ret['comment'] = 'The file {0} is set to be changed'.format(name) + ret['comment'] += ('\nNote: No changes made, actual changes may\n' + 'be different due to other states.') if 'diff' in ret['changes'] and not show_changes: ret['changes']['diff'] = '' else: From 76618608249409139d1fbea5fa854873a23e62c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Yavercovski?= Date: Thu, 7 Mar 2019 16:48:34 -0500 Subject: [PATCH 006/340] fix missing client_args in influxdb module --- salt/states/influxdb_retention_policy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/states/influxdb_retention_policy.py b/salt/states/influxdb_retention_policy.py index fc8a6fdac875..6b07f475308d 100644 --- a/salt/states/influxdb_retention_policy.py +++ b/salt/states/influxdb_retention_policy.py @@ -89,7 +89,7 @@ def present(name, database, duration="7d", return ret else: - current_policy = __salt__['influxdb.get_retention_policy'](database=database, name=name) + current_policy = __salt__['influxdb.get_retention_policy'](database=database, name=name, **client_args) update_policy = False if current_policy['duration'] != convert_duration(duration): update_policy = True From 59b1d4f7679239f8d3c920eff4e17e1799c35be7 Mon Sep 17 00:00:00 2001 From: twangboy Date: Thu, 7 Mar 2019 16:16:13 -0700 Subject: [PATCH 007/340] Revert back to using reg to get timezone Reverts PR 51095 Checks for null characters --- salt/modules/win_timezone.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/salt/modules/win_timezone.py b/salt/modules/win_timezone.py index 135d908f114c..b3eda53c3f29 100644 --- a/salt/modules/win_timezone.py +++ b/salt/modules/win_timezone.py @@ -205,22 +205,23 @@ def get_zone(): Returns: str: Timezone in unix format - Raises: - CommandExecutionError: If timezone could not be gathered - CLI Example: .. code-block:: bash salt '*' timezone.get_zone ''' - cmd = ['tzutil', '/g'] - res = __salt__['cmd.run_all'](cmd, python_shell=False) - if res['retcode'] or not res['stdout']: - raise CommandExecutionError('tzutil encountered an error getting ' - 'timezone', - info=res) - return mapper.get_unix(res['stdout'].lower(), 'Unknown') + win_zone = __utils__['reg.read_value']( + hive='HKLM', + key='SYSTEM\\CurrentControlSet\\Control\\TimeZoneInformation', + vname='TimeZoneKeyName')['vdata'] + # Some data may have null characters. We only need the first portion up to + # the first null character. See the following: + # https://github.com/saltstack/salt/issues/51940 + # https://stackoverflow.com/questions/27716746/hklm-system-currentcontrolset-control-timezoneinformation-timezonekeyname-corrup + if '\0' in win_zone: + win_zone = win_zone.split('\0')[0] + return mapper.get_unix(win_zone.lower(), 'Unknown') def get_offset(): From 9a8afa64331e09109b5a205c27b06cd8e17fb47f Mon Sep 17 00:00:00 2001 From: twangboy Date: Thu, 7 Mar 2019 16:28:31 -0700 Subject: [PATCH 008/340] Fix tests, add null byte test --- tests/unit/modules/test_win_timezone.py | 77 +++++++++---------------- 1 file changed, 28 insertions(+), 49 deletions(-) diff --git a/tests/unit/modules/test_win_timezone.py b/tests/unit/modules/test_win_timezone.py index 6916f08de595..dc589e7c00ef 100644 --- a/tests/unit/modules/test_win_timezone.py +++ b/tests/unit/modules/test_win_timezone.py @@ -7,7 +7,7 @@ # Import Salt Libs import salt.modules.win_timezone as win_timezone -from salt.exceptions import CommandExecutionError + # Import Salt Testing Libs from tests.support.mixins import LoaderModuleMockMixin from tests.support.mock import MagicMock, patch @@ -26,31 +26,26 @@ def setup_loader_modules(self): def test_get_zone(self): ''' - Test if it get current timezone (i.e. Asia/Calcutta) + Test if it gets current timezone (i.e. Asia/Calcutta) ''' - mock_read_ok = MagicMock(return_value={'pid': 78, - 'retcode': 0, - 'stderr': '', - 'stdout': 'India Standard Time'}) + mock_read = MagicMock(side_effect=[{'vdata': 'India Standard Time'}, + {'vdata': 'Indian Standard Time'}]) - with patch.dict(win_timezone.__salt__, {'cmd.run_all': mock_read_ok}): + with patch.dict(win_timezone.__utils__, {'reg.read_value': mock_read}): self.assertEqual(win_timezone.get_zone(), 'Asia/Calcutta') - - mock_read_error = MagicMock(return_value={'pid': 78, - 'retcode': 0, - 'stderr': '', - 'stdout': 'Indian Standard Time'}) - - with patch.dict(win_timezone.__salt__, {'cmd.run_all': mock_read_error}): self.assertEqual(win_timezone.get_zone(), 'Unknown') - mock_read_fatal = MagicMock(return_value={'pid': 78, - 'retcode': 1, - 'stderr': '', - 'stdout': ''}) + def test_get_zone_null_terminated(self): + ''' + Test if it handles instances where the registry contains null values + ''' + mock_read = MagicMock(side_effect=[ + {'vdata': 'India Standard Time\0\0\0\0'}, + {'vdata': 'Indian Standard Time\0\0some more junk data\0\0'}]) - with patch.dict(win_timezone.__salt__, {'cmd.run_all': mock_read_fatal}): - self.assertRaises(CommandExecutionError, win_timezone.get_zone) + with patch.dict(win_timezone.__utils__, {'reg.read_value': mock_read}): + self.assertEqual(win_timezone.get_zone(), 'Asia/Calcutta') + self.assertEqual(win_timezone.get_zone(), 'Unknown') # 'get_offset' function tests: 1 @@ -58,16 +53,9 @@ def test_get_offset(self): ''' Test if it get current numeric timezone offset from UCT (i.e. +0530) ''' - # time = ('(UTC+05:30) Chennai, Kolkata, Mumbai, \ - # New Delhi\nIndia Standard Time') - # mock_cmd = MagicMock(side_effect=['India Standard Time', time]) - # with patch.dict(win_timezone.__salt__, {'cmd.run': mock_cmd}): - mock_read = MagicMock(return_value={'pid': 78, - 'retcode': 0, - 'stderr': '', - 'stdout': 'India Standard Time'}) + mock_read = MagicMock(return_value={'vdata': 'India Standard Time'}) - with patch.dict(win_timezone.__salt__, {'cmd.run_all': mock_read}): + with patch.dict(win_timezone.__utils__, {'reg.read_value': mock_read}): self.assertEqual(win_timezone.get_offset(), '+0530') # 'get_zonecode' function tests: 1 @@ -76,12 +64,9 @@ def test_get_zonecode(self): ''' Test if it get current timezone (i.e. PST, MDT, etc) ''' - mock_read = MagicMock(return_value={'pid': 78, - 'retcode': 0, - 'stderr': '', - 'stdout': 'India Standard Time'}) + mock_read = MagicMock(return_value={'vdata': 'India Standard Time'}) - with patch.dict(win_timezone.__salt__, {'cmd.run_all': mock_read}): + with patch.dict(win_timezone.__utils__, {'reg.read_value': mock_read}): self.assertEqual(win_timezone.get_zonecode(), 'IST') # 'set_zone' function tests: 1 @@ -90,17 +75,14 @@ def test_set_zone(self): ''' Test if it unlinks, then symlinks /etc/localtime to the set timezone. ''' - mock_write = MagicMock(return_value={'pid': 78, - 'retcode': 0, - 'stderr': '', - 'stdout': ''}) - mock_read = MagicMock(return_value={'pid': 78, - 'retcode': 0, - 'stderr': '', - 'stdout': 'India Standard Time'}) + mock_cmd = MagicMock(return_value={'pid': 78, + 'retcode': 0, + 'stderr': '', + 'stdout': ''}) + mock_read = MagicMock(return_value={'vdata': 'India Standard Time'}) - with patch.dict(win_timezone.__salt__, {'cmd.run_all': mock_write}), \ - patch.dict(win_timezone.__salt__, {'cmd.run_all': mock_read}): + with patch.dict(win_timezone.__salt__, {'cmd.run_all': mock_cmd}), \ + patch.dict(win_timezone.__utils__, {'reg.read_value': mock_read}): self.assertTrue(win_timezone.set_zone('Asia/Calcutta')) @@ -112,12 +94,9 @@ def test_zone_compare(self): the one set in /etc/localtime. Returns True if they match, and False if not. Mostly useful for running state checks. ''' - mock_read = MagicMock(return_value={'pid': 78, - 'retcode': 0, - 'stderr': '', - 'stdout': 'India Standard Time'}) + mock_read = MagicMock(return_value={'vdata': 'India Standard Time'}) - with patch.dict(win_timezone.__salt__, {'cmd.run_all': mock_read}): + with patch.dict(win_timezone.__utils__, {'reg.read_value': mock_read}): self.assertTrue(win_timezone.zone_compare('Asia/Calcutta')) # 'get_hwclock' function tests: 1 From 7656331fd2dddf84669a0d54137c794151770962 Mon Sep 17 00:00:00 2001 From: twangboy Date: Fri, 8 Mar 2019 11:02:02 -0700 Subject: [PATCH 009/340] Change default language from 1033 to en_US The integer was causing the .replace('_', '-') function to fail --- salt/modules/win_lgpo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/win_lgpo.py b/salt/modules/win_lgpo.py index 42a40ff637bf..fadab5f40758 100644 --- a/salt/modules/win_lgpo.py +++ b/salt/modules/win_lgpo.py @@ -133,7 +133,7 @@ # Default to `en-US` (1033) windll = ctypes.windll.kernel32 INSTALL_LANGUAGE = locale.windows_locale.get( - windll.GetSystemDefaultUILanguage(), 1033).replace('_', '-') + windll.GetSystemDefaultUILanguage(), 'en_US').replace('_', '-') except ImportError: HAS_WINDOWS_MODULES = False From 83bed46a60ee9cf41f60f5b7fdc26d1c429df69a Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 11 Mar 2019 15:52:55 -0600 Subject: [PATCH 010/340] Add missing symlink test --- tests/unit/modules/test_win_file.py | 39 +++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/tests/unit/modules/test_win_file.py b/tests/unit/modules/test_win_file.py index 6413dd37ae4e..dfa0b956ed72 100644 --- a/tests/unit/modules/test_win_file.py +++ b/tests/unit/modules/test_win_file.py @@ -7,18 +7,21 @@ import os # Import Salt Testing Libs -from tests.support.unit import TestCase, skipIf +from tests.support.helpers import destructiveTest +from tests.support.mixins import LoaderModuleMockMixin from tests.support.mock import patch, NO_MOCK, NO_MOCK_REASON +from tests.support.unit import TestCase, skipIf # Import Salt Libs import salt.modules.win_file as win_file +import salt.modules.temp as temp from salt.exceptions import CommandExecutionError import salt.utils.platform import salt.utils.win_dacl @skipIf(NO_MOCK, NO_MOCK_REASON) -class WinFileTestCase(TestCase): +class WinFileTestCase(TestCase, LoaderModuleMockMixin): ''' Test cases for salt.modules.win_file ''' @@ -28,6 +31,13 @@ class WinFileTestCase(TestCase): else: FAKE_PATH = os.sep.join(['path', 'does', 'not', 'exist']) + def setup_loader_modules(self): + return {win_file: { + '__utils__': { + 'dacl.set_perms': salt.utils.win_dacl.set_perms + } + }} + def test_issue_43328_stats(self): ''' Make sure that a CommandExecutionError is raised if the file does NOT @@ -46,3 +56,28 @@ def test_issue_43328_check_perms_no_ret(self): with patch('os.path.exists', return_value=False): self.assertRaises( CommandExecutionError, win_file.check_perms, self.FAKE_PATH) + + @destructiveTest + @skipIf(not salt.utils.platform.is_windows(), 'Skip on Non-Windows systems') + def test_issue_52002_check_file_remove_symlink(self): + ''' + Make sure that directories including symlinks or symlinks can be removed + ''' + base = temp.dir(prefix='base-') + target = os.path.join(base, 'child 1', 'target\\') + symlink = os.path.join(base, 'child 2', 'link') + try: + # Create environment + self.assertFalse(win_file.directory_exists(target)) + self.assertFalse(win_file.directory_exists(symlink)) + self.assertTrue(win_file.makedirs_(target)) + self.assertTrue(win_file.makedirs_(symlink)) + self.assertTrue(win_file.symlink(target, symlink)) + self.assertTrue(win_file.directory_exists(symlink)) + self.assertTrue(win_file.is_link(symlink)) + # Test removal of directory containing symlink + self.assertTrue(win_file.remove(base)) + self.assertFalse(win_file.directory_exists(base)) + finally: + if os.path.exists(base): + win_file.remove(base) From 3d3110267dd0d0d930e3825abcd95393d8f92cce Mon Sep 17 00:00:00 2001 From: Simon Flood Date: Tue, 12 Mar 2019 11:49:29 +0000 Subject: [PATCH 011/340] Fix typo in actual 2019.2.0 release notes See #51861 --- doc/topics/releases/2019.2.0.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/topics/releases/2019.2.0.rst b/doc/topics/releases/2019.2.0.rst index c3659eef4cdf..e44845db2a9e 100644 --- a/doc/topics/releases/2019.2.0.rst +++ b/doc/topics/releases/2019.2.0.rst @@ -271,7 +271,7 @@ a BGP policy referenced in many places, you can do so by running: .. code-block:: bash - salt '*' net.replae_pattern OLD-POLICY-CONFIG new-policy-config + salt '*' net.replace_pattern OLD-POLICY-CONFIG new-policy-config Similarly, you can also replace entire configuration blocks using the :mod:`net.blockreplace ` function. From e339de1349eb8f032d00a2b22c241494e9ced4bf Mon Sep 17 00:00:00 2001 From: Alexander Werner Date: Wed, 13 Mar 2019 23:24:49 +0100 Subject: [PATCH 012/340] use proxyfile based on salt.syspaths.CONFIG_DIR also fix generation of proxyfile as valid yaml closes #52169 --- salt/modules/salt_proxy.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/modules/salt_proxy.py b/salt/modules/salt_proxy.py index 9815e513dab4..13bcd2615505 100644 --- a/salt/modules/salt_proxy.py +++ b/salt/modules/salt_proxy.py @@ -14,6 +14,7 @@ # Import Salt libs import salt.utils.files +import salt.syspaths # Import 3rd-party libs import salt.ext.six.moves @@ -33,7 +34,7 @@ def _write_proxy_conf(proxyfile): if proxyfile: log.debug('Writing proxy conf file') with salt.utils.files.fopen(proxyfile, 'w') as proxy_conf: - proxy_conf.write(salt.utils.stringutils.to_str('master = {0}' + proxy_conf.write(salt.utils.stringutils.to_str('master: {0}' .format(__grains__['master']))) msg = 'Wrote proxy file {0}'.format(proxyfile) log.debug(msg) @@ -132,7 +133,7 @@ def configure_proxy(proxyname, start=True): test = __opts__['test'] # write the proxy file if necessary - proxyfile = '/etc/salt/proxy' + proxyfile = os.path.join(salt.syspaths.CONFIG_DIR, 'proxy') status_file, msg_new, msg_old = _proxy_conf_file(proxyfile, test) changes_new.extend(msg_new) changes_old.extend(msg_old) From 4b99afa8192f103ca28a6e16cafe3db3bceecf03 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Wed, 13 Mar 2019 17:27:39 -0700 Subject: [PATCH 013/340] Fixing the output when there are changes for the ACL state so the permissions are shown and not the octal number. --- salt/states/linux_acl.py | 13 +++++++++++-- tests/unit/states/test_linux_acl.py | 18 +++++++++--------- 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/salt/states/linux_acl.py b/salt/states/linux_acl.py index 38ff1c083016..7f68f3ebc5b5 100644 --- a/salt/states/linux_acl.py +++ b/salt/states/linux_acl.py @@ -29,6 +29,7 @@ # Import Python libs from __future__ import absolute_import, print_function, unicode_literals +import logging import os # Import salt libs @@ -36,6 +37,8 @@ from salt.exceptions import CommandExecutionError import salt.utils.path +log = logging.getLogger(__name__) + __virtualname__ = 'acl' @@ -60,6 +63,11 @@ def present(name, acl_type, acl_name='', perms='', recurse=False): 'comment': ''} _octal = {'r': 4, 'w': 2, 'x': 1, '-': 0} + _octal_lookup = {'1': 'x', + '2': 'w', + '4': 'r', + '5': 'rx', + '7': 'rwx'} if not os.path.exists(name): ret['comment'] = '{0} does not exist'.format(name) @@ -111,18 +119,19 @@ def present(name, acl_type, acl_name='', perms='', recurse=False): if not need_refresh: ret['comment'] = 'Permissions are in the desired state' else: + new_perms = _octal_lookup[six.text_type(user[_search_name]['octal'])] changes = {'new': {'acl_name': acl_name, 'acl_type': acl_type, 'perms': perms}, 'old': {'acl_name': acl_name, 'acl_type': acl_type, - 'perms': six.text_type(user[_search_name]['octal'])}} + 'perms': new_perms}} if __opts__['test']: ret.update({'comment': 'Updated permissions will be applied for ' '{0}: {1} -> {2}'.format( acl_name, - six.text_type(user[_search_name]['octal']), + new_perms, perms), 'result': None, 'pchanges': changes}) return ret diff --git a/tests/unit/states/test_linux_acl.py b/tests/unit/states/test_linux_acl.py index 168ce4b76251..cf4c51005936 100644 --- a/tests/unit/states/test_linux_acl.py +++ b/tests/unit/states/test_linux_acl.py @@ -43,17 +43,17 @@ def test_present(self): perms = 'rwx' mock = MagicMock(side_effect=[{name: {acl_type: [{acl_name: - {'octal': 'A'}}]}}, + {'octal': 5}}]}}, {name: {acl_type: [{acl_name: - {'octal': 'A'}}]}}, + {'octal': 5}}]}}, {name: {acl_type: [{acl_name: - {'octal': 'A'}}]}}, + {'octal': 5}}]}}, {name: {acl_type: [{}]}}, {name: {acl_type: [{}]}}, {name: {acl_type: [{}]}}, { name: {acl_type: [{acl_name: {'octal': 7}}]}, - name+"/foo": {acl_type: [{acl_name: {'octal': 'A'}}]} + name+"/foo": {acl_type: [{acl_name: {'octal': 5}}]} }, { name: {acl_type: [{acl_name: {'octal': 7}}]}, @@ -65,7 +65,7 @@ def test_present(self): with patch.dict(linux_acl.__salt__, {'acl.getfacl': mock}): # Update - test=True with patch.dict(linux_acl.__opts__, {'test': True}): - comt = ('Updated permissions will be applied for {0}: A -> {1}' + comt = ('Updated permissions will be applied for {0}: rx -> {1}' ''.format(acl_name, perms)) ret = {'name': name, 'comment': comt, @@ -75,7 +75,7 @@ def test_present(self): 'perms': perms}, 'old': {'acl_name': acl_name, 'acl_type': acl_type, - 'perms': 'A'}}, + 'perms': 'rx'}}, 'result': None} self.assertDictEqual(linux_acl.present(name, acl_type, acl_name, @@ -91,7 +91,7 @@ def test_present(self): 'perms': perms}, 'old': {'acl_name': acl_name, 'acl_type': acl_type, - 'perms': 'A'}}, + 'perms': 'rx'}}, 'pchanges': {}, 'result': True} self.assertDictEqual(linux_acl.present(name, acl_type, @@ -159,7 +159,7 @@ def test_present(self): with patch.dict(linux_acl.__salt__, {'acl.getfacl': mock}): # Update - test=True with patch.dict(linux_acl.__opts__, {'test': True}): - comt = ('Updated permissions will be applied for {0}: 7 -> {1}' + comt = ('Updated permissions will be applied for {0}: rwx -> {1}' ''.format(acl_name, perms)) ret = {'name': name, 'comment': comt, @@ -169,7 +169,7 @@ def test_present(self): 'perms': perms}, 'old': {'acl_name': acl_name, 'acl_type': acl_type, - 'perms': '7'}}, + 'perms': 'rwx'}}, 'result': None} self.assertDictEqual(linux_acl.present(name, acl_type, acl_name, From 24c907be017e01da6047eb8fdef94b6b2f860456 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Wed, 13 Mar 2019 17:48:31 -0700 Subject: [PATCH 014/340] Adding additional permissions to the lookup. --- salt/states/linux_acl.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/salt/states/linux_acl.py b/salt/states/linux_acl.py index 7f68f3ebc5b5..76b1c86c8bed 100644 --- a/salt/states/linux_acl.py +++ b/salt/states/linux_acl.py @@ -63,10 +63,13 @@ def present(name, acl_type, acl_name='', perms='', recurse=False): 'comment': ''} _octal = {'r': 4, 'w': 2, 'x': 1, '-': 0} - _octal_lookup = {'1': 'x', + _octal_lookup = {'0': '-', + '1': 'x', '2': 'w', + '3': 'wx', '4': 'r', '5': 'rx', + '6': 'rw', '7': 'rwx'} if not os.path.exists(name): From bfdb6691ffb02e71f8c35969c0ede69f278424d3 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Thu, 14 Mar 2019 10:21:42 -0700 Subject: [PATCH 015/340] Updating the reverse octal lookup dictionary. Updating tests. --- salt/states/linux_acl.py | 14 +++++--------- tests/unit/states/test_linux_acl.py | 6 +++--- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/salt/states/linux_acl.py b/salt/states/linux_acl.py index 76b1c86c8bed..72fa1266d766 100644 --- a/salt/states/linux_acl.py +++ b/salt/states/linux_acl.py @@ -63,14 +63,7 @@ def present(name, acl_type, acl_name='', perms='', recurse=False): 'comment': ''} _octal = {'r': 4, 'w': 2, 'x': 1, '-': 0} - _octal_lookup = {'0': '-', - '1': 'x', - '2': 'w', - '3': 'wx', - '4': 'r', - '5': 'rx', - '6': 'rw', - '7': 'rwx'} + _octal_lookup = {0: '-', 1: 'r', 2: 'w', 4: 'x'} if not os.path.exists(name): ret['comment'] = '{0} does not exist'.format(name) @@ -122,7 +115,10 @@ def present(name, acl_type, acl_name='', perms='', recurse=False): if not need_refresh: ret['comment'] = 'Permissions are in the desired state' else: - new_perms = _octal_lookup[six.text_type(user[_search_name]['octal'])] + _num = user[_search_name]['octal'] + new_perms = '{}{}{}'.format(_octal_lookup[_num&1], + _octal_lookup[_num&2], + _octal_lookup[_num&4]) changes = {'new': {'acl_name': acl_name, 'acl_type': acl_type, 'perms': perms}, diff --git a/tests/unit/states/test_linux_acl.py b/tests/unit/states/test_linux_acl.py index cf4c51005936..8d60f9c80660 100644 --- a/tests/unit/states/test_linux_acl.py +++ b/tests/unit/states/test_linux_acl.py @@ -65,7 +65,7 @@ def test_present(self): with patch.dict(linux_acl.__salt__, {'acl.getfacl': mock}): # Update - test=True with patch.dict(linux_acl.__opts__, {'test': True}): - comt = ('Updated permissions will be applied for {0}: rx -> {1}' + comt = ('Updated permissions will be applied for {0}: r-x -> {1}' ''.format(acl_name, perms)) ret = {'name': name, 'comment': comt, @@ -75,7 +75,7 @@ def test_present(self): 'perms': perms}, 'old': {'acl_name': acl_name, 'acl_type': acl_type, - 'perms': 'rx'}}, + 'perms': 'r-x'}}, 'result': None} self.assertDictEqual(linux_acl.present(name, acl_type, acl_name, @@ -91,7 +91,7 @@ def test_present(self): 'perms': perms}, 'old': {'acl_name': acl_name, 'acl_type': acl_type, - 'perms': 'rx'}}, + 'perms': 'r-x'}}, 'pchanges': {}, 'result': True} self.assertDictEqual(linux_acl.present(name, acl_type, From 889660f9840ad5c679bd84f221571312e4e47dd6 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Thu, 14 Mar 2019 16:21:47 -0700 Subject: [PATCH 016/340] Fixing lint. --- salt/states/linux_acl.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/states/linux_acl.py b/salt/states/linux_acl.py index 72fa1266d766..f7408514b75d 100644 --- a/salt/states/linux_acl.py +++ b/salt/states/linux_acl.py @@ -116,9 +116,9 @@ def present(name, acl_type, acl_name='', perms='', recurse=False): ret['comment'] = 'Permissions are in the desired state' else: _num = user[_search_name]['octal'] - new_perms = '{}{}{}'.format(_octal_lookup[_num&1], - _octal_lookup[_num&2], - _octal_lookup[_num&4]) + new_perms = '{}{}{}'.format(_octal_lookup[_num & 1], + _octal_lookup[_num & 2], + _octal_lookup[_num & 4]) changes = {'new': {'acl_name': acl_name, 'acl_type': acl_type, 'perms': perms}, From a901ec4a91df0c49e8de83d6a020130d9027f42b Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Fri, 15 Mar 2019 17:13:21 -0700 Subject: [PATCH 017/340] Don't save beacons when test=True --- salt/states/beacon.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/salt/states/beacon.py b/salt/states/beacon.py index 362a2c365a4e..8af353f2934f 100644 --- a/salt/states/beacon.py +++ b/salt/states/beacon.py @@ -100,8 +100,11 @@ def present(name, ret['comment'].append('Adding {0} to beacons'.format(name)) if save: - result = __salt__['beacons.save']() - ret['comment'].append('Beacon {0} saved'.format(name)) + if 'test' in __opts__ and __opts__['test']: + ret['comment'].append('Beacon {0} would be saved'.format(name)) + else: + result = __salt__['beacons.save']() + ret['comment'].append('Beacon {0} saved'.format(name)) ret['comment'] = '\n'.join(ret['comment']) return ret @@ -146,8 +149,11 @@ def absent(name, ret['comment'].append('{0} not configured in beacons'.format(name)) if save: - result = __salt__['beacons.save']() - ret['comment'].append('Beacon {0} saved'.format(name)) + if 'test' in __opts__ and __opts__['test']: + ret['comment'].append('Beacon {0} would be saved'.format(name)) + else: + result = __salt__['beacons.save']() + ret['comment'].append('Beacon {0} saved'.format(name)) ret['comment'] = '\n'.join(ret['comment']) return ret From dce4ffab3501af897571f8a58f044cfc6ec2c667 Mon Sep 17 00:00:00 2001 From: Pedro Algarvio Date: Mon, 18 Mar 2019 15:30:18 +0000 Subject: [PATCH 018/340] Avoid a traceback on tornado.testing test classes https://gist.github.com/s0undt3ch/9298a69a3492404d89a832de9efb1e68 This only happens when XML reporting is enabled. Why only now, I have no clue. --- tests/support/xmlunit.py | 48 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 45 insertions(+), 3 deletions(-) diff --git a/tests/support/xmlunit.py b/tests/support/xmlunit.py index 61294b9e5b59..82300e61ca89 100644 --- a/tests/support/xmlunit.py +++ b/tests/support/xmlunit.py @@ -16,6 +16,8 @@ from __future__ import absolute_import import io import sys +import time +import inspect import logging # Import 3rd-party libs @@ -27,6 +29,7 @@ try: import xmlrunner.runner import xmlrunner.result + import xmlrunner.unittest HAS_XMLRUNNER = True class _DelegateIO(object): @@ -56,9 +59,48 @@ def __getattr__(self, attr): class _XMLTestResult(xmlrunner.result._XMLTestResult): def startTest(self, test): - log.debug('>>>>> START >>>>> {0}'.format(test.id())) + log.debug('>>>>> START >>>>> %s', test.id()) # xmlrunner classes are NOT new-style classes - xmlrunner.result._XMLTestResult.startTest(self, test) + # xmlrunner.result._XMLTestResult.startTest(self, test) + + # ----- Re-Implement startTest --------------------------------------------------------------------------> + # The reason being that _XMLTestResult does not like tornado testing wrapping it's test class + # https://gist.github.com/s0undt3ch/9298a69a3492404d89a832de9efb1e68 + self.start_time = time.time() + xmlrunner.unittest.TestResult.startTest(self, test) + + try: + if getattr(test, '_dt_test', None) is not None: + # doctest.DocTestCase + self.filename = test._dt_test.filename + self.lineno = test._dt_test.lineno + else: + # regular unittest.TestCase? + test_method = getattr(test, test._testMethodName) + test_class = type(test) + # Note: inspect can get confused with decorators, so use class. + self.filename = inspect.getsourcefile(test_class) + # Handle partial and partialmethod objects. + test_method = getattr(test_method, 'func', test_method) + + # ----- Code which avoids the inspect tracebacks ------------------------------------------------> + try: + from tornado.testing import _TestMethodWrapper + if isinstance(test_method, _TestMethodWrapper): + test_method = test_method.orig_method + except (ImportError, AttributeError): + pass + # <---- Code which avoids the inspect tracebacks ------------------------------------------------- + _, self.lineno = inspect.getsourcelines(test_method) + finally: + pass + + if self.showAll: + self.stream.write(' ' + self.getDescription(test)) + self.stream.write(" ... ") + self.stream.flush() + # <---- Re-Implement startTest --------------------------------------------------------------------------- + if self.buffer: # Let's override the values of self._stdXXX_buffer # We want a similar sys.stdXXX file like behaviour @@ -68,7 +110,7 @@ def startTest(self, test): sys.stdout = self._stdout_buffer def stopTest(self, test): - log.debug('<<<<< END <<<<<<< {0}'.format(test.id())) + log.debug('<<<<< END <<<<<<< %s', test.id()) # xmlrunner classes are NOT new-style classes return xmlrunner.result._XMLTestResult.stopTest(self, test) From 8c0f46baba9cb319e5f0cb85309d726c136408b4 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Mon, 18 Mar 2019 10:05:32 -0700 Subject: [PATCH 019/340] Use windows state to bootstrap windows builds This is also removing the use of dev_*.ps1 scripts in the windows builds --- .kitchen.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.kitchen.yml b/.kitchen.yml index 0f1a3c968e42..4a97283470e8 100644 --- a/.kitchen.yml +++ b/.kitchen.yml @@ -54,7 +54,7 @@ provisioner: base: "os:Windows": - match: grain - - prep_windows + - windows "*": - <%= ENV['KITCHEN_STATE'] || 'git.salt' %> pillars: From 7263956d8cd17e555451dec1c7b65dfda4d7448f Mon Sep 17 00:00:00 2001 From: Pedro Algarvio Date: Mon, 18 Mar 2019 17:47:40 +0000 Subject: [PATCH 020/340] Lock coverage and xml-unittest-reporting versions --- noxfile.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/noxfile.py b/noxfile.py index ab0f2a659f2c..9d5d8e7fa1a4 100644 --- a/noxfile.py +++ b/noxfile.py @@ -127,7 +127,7 @@ def _install_requirements(session, *extra_requirements): def _run_with_coverage(session, *test_cmd): - session.install('coverage') + session.install('coverage==4.5.3') session.run('coverage', 'erase') python_path_env_var = os.environ.get('PYTHONPATH') or None if python_path_env_var is None: @@ -149,7 +149,7 @@ def _run_with_coverage(session, *test_cmd): @nox.parametrize('coverage', [False, True]) def runtests(session, coverage): # Install requirements - _install_requirements(session, 'unittest-xml-reporting') + _install_requirements(session, 'unittest-xml-reporting<2.4.0') # Create required artifacts directories _create_ci_directories() From aa2c626cdf2201c5f140663fe20fb01f19337286 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Mon, 18 Mar 2019 18:13:54 -0700 Subject: [PATCH 021/340] With the change/addition of the matcher subsystem in 2019.2, the match.search_by when used in pillar broke when targetting the minion that was also the salt master. This was caused by the id in __opts__ being used in all cases. This change updates the glob_match function to use the preserved minion_id of the master if it is available so that targeting works as expected. --- salt/matchers/compound_match.py | 8 ++++++-- salt/matchers/glob_match.py | 6 +++++- salt/pillar/__init__.py | 1 + 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/salt/matchers/compound_match.py b/salt/matchers/compound_match.py index e00d33c4ea54..60f01b3a5cab 100644 --- a/salt/matchers/compound_match.py +++ b/salt/matchers/compound_match.py @@ -25,11 +25,15 @@ def match(tgt): ''' nodegroups = __opts__.get('nodegroups', {}) matchers = salt.loader.matchers(__opts__) + if 'minion_id' in __opts__: + minion_id = __opts__['minion_id'] + else: + minion_id = __opts__['id'] if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)): log.error('Compound target received that is neither string, list nor tuple') return False - log.debug('compound_match: %s ? %s', __opts__['id'], tgt) + log.debug('compound_match: %s ? %s', minion_id, tgt) ref = {'G': 'grain', 'P': 'grain_pcre', 'I': 'pillar', @@ -102,7 +106,7 @@ def match(tgt): results.append(six.text_type(matchers['glob_match.match'](word))) results = ' '.join(results) - log.debug('compound_match %s ? "%s" => "%s"', __opts__['id'], tgt, results) + log.debug('compound_match %s ? "%s" => "%s"', minion_id, tgt, results) try: return eval(results) # pylint: disable=W0123 except Exception: diff --git a/salt/matchers/glob_match.py b/salt/matchers/glob_match.py index 5516a276d627..5217b75f47fc 100644 --- a/salt/matchers/glob_match.py +++ b/salt/matchers/glob_match.py @@ -12,7 +12,11 @@ def match(tgt): ''' Returns true if the passed glob matches the id ''' + if 'minion_id' in __opts__: + minion_id = __opts__['minion_id'] + else: + minion_id = __opts__['id'] if not isinstance(tgt, six.string_types): return False - return fnmatch.fnmatch(__opts__['id'], tgt) + return fnmatch.fnmatch(minion_id, tgt) diff --git a/salt/pillar/__init__.py b/salt/pillar/__init__.py index 94dd9695ca1d..f20f4ec5cf68 100644 --- a/salt/pillar/__init__.py +++ b/salt/pillar/__init__.py @@ -386,6 +386,7 @@ def __init__(self, opts, grains, minion_id, saltenv, ext=None, functions=None, else: self.functions = functions + self.opts['minion_id'] = minion_id self.matchers = salt.loader.matchers(self.opts) self.rend = salt.loader.render(self.opts, self.functions) ext_pillar_opts = copy.deepcopy(self.opts) From 6a5b5b28e2333f622dcabe05fb36036642a4f89f Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Tue, 19 Mar 2019 09:19:48 -0700 Subject: [PATCH 022/340] swapping out if...else approach for __opts__.get approach. --- salt/matchers/compound_match.py | 5 +---- salt/matchers/glob_match.py | 5 +---- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/salt/matchers/compound_match.py b/salt/matchers/compound_match.py index 60f01b3a5cab..f6918acecc9b 100644 --- a/salt/matchers/compound_match.py +++ b/salt/matchers/compound_match.py @@ -25,10 +25,7 @@ def match(tgt): ''' nodegroups = __opts__.get('nodegroups', {}) matchers = salt.loader.matchers(__opts__) - if 'minion_id' in __opts__: - minion_id = __opts__['minion_id'] - else: - minion_id = __opts__['id'] + minion_id = __opts__.get(['minion_id'], __opts__['id']) if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)): log.error('Compound target received that is neither string, list nor tuple') diff --git a/salt/matchers/glob_match.py b/salt/matchers/glob_match.py index 5217b75f47fc..7b01b855fdaa 100644 --- a/salt/matchers/glob_match.py +++ b/salt/matchers/glob_match.py @@ -12,10 +12,7 @@ def match(tgt): ''' Returns true if the passed glob matches the id ''' - if 'minion_id' in __opts__: - minion_id = __opts__['minion_id'] - else: - minion_id = __opts__['id'] + minion_id = __opts__.get('minion_id', __opts__['id']) if not isinstance(tgt, six.string_types): return False From 7f83b4baee6dd3bb36c3bd065d338099d5dd0511 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Tue, 19 Mar 2019 09:49:23 -0700 Subject: [PATCH 023/340] Swapping out if state looking for test in opts for a __opts__.get --- salt/states/beacon.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/salt/states/beacon.py b/salt/states/beacon.py index 8af353f2934f..a151ceeab215 100644 --- a/salt/states/beacon.py +++ b/salt/states/beacon.py @@ -67,7 +67,7 @@ def present(name, if beacon_data == current_beacons[name]: ret['comment'].append('Job {0} in correct state'.format(name)) else: - if 'test' in __opts__ and __opts__['test']: + if __opts__.get('test'): kwargs['test'] = True result = __salt__['beacons.modify'](name, beacon_data) ret['comment'].append(result['comment']) @@ -86,7 +86,7 @@ def present(name, ret['comment'].append(result['comment']) else: - if 'test' in __opts__ and __opts__['test']: + if __opts__.get('test'): kwargs['test'] = True result = __salt__['beacons.add'](name, beacon_data, **kwargs) ret['comment'].append(result['comment']) @@ -100,7 +100,7 @@ def present(name, ret['comment'].append('Adding {0} to beacons'.format(name)) if save: - if 'test' in __opts__ and __opts__['test']: + if __opts__.get('test'): ret['comment'].append('Beacon {0} would be saved'.format(name)) else: result = __salt__['beacons.save']() @@ -133,7 +133,7 @@ def absent(name, current_beacons = __salt__['beacons.list'](return_yaml=False) if name in current_beacons: - if 'test' in __opts__ and __opts__['test']: + if __opts__.get('test'): kwargs['test'] = True result = __salt__['beacons.delete'](name, **kwargs) ret['comment'].append(result['comment']) @@ -149,7 +149,7 @@ def absent(name, ret['comment'].append('{0} not configured in beacons'.format(name)) if save: - if 'test' in __opts__ and __opts__['test']: + if __opts__.get('test'): ret['comment'].append('Beacon {0} would be saved'.format(name)) else: result = __salt__['beacons.save']() @@ -178,7 +178,7 @@ def enabled(name, **kwargs): current_beacons = __salt__['beacons.list'](return_yaml=False) if name in current_beacons: - if 'test' in __opts__ and __opts__['test']: + if __opts__.get('test'): kwargs['test'] = True result = __salt__['beacons.enable_beacon'](name, **kwargs) ret['comment'].append(result['comment']) @@ -216,7 +216,7 @@ def disabled(name, **kwargs): current_beacons = __salt__['beacons.list'](return_yaml=False) if name in current_beacons: - if 'test' in __opts__ and __opts__['test']: + if __opts__.get('test'): kwargs['test'] = True result = __salt__['beacons.disable_beacon'](name, **kwargs) ret['comment'].append(result['comment']) From a2173d7dcdb979e4c3d1651613eba628f4e7c5ea Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Tue, 19 Mar 2019 10:34:59 -0700 Subject: [PATCH 024/340] Fixing a typo where attempting to get a list from __opts__ instead of an individual item --- salt/matchers/compound_match.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/matchers/compound_match.py b/salt/matchers/compound_match.py index f6918acecc9b..0d60d8cca36a 100644 --- a/salt/matchers/compound_match.py +++ b/salt/matchers/compound_match.py @@ -25,7 +25,7 @@ def match(tgt): ''' nodegroups = __opts__.get('nodegroups', {}) matchers = salt.loader.matchers(__opts__) - minion_id = __opts__.get(['minion_id'], __opts__['id']) + minion_id = __opts__.get('minion_id', __opts__['id']) if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)): log.error('Compound target received that is neither string, list nor tuple') From 173d1e73c90280be43d2bac3ad5d509d6bc882e8 Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 19 Mar 2019 15:12:41 -0600 Subject: [PATCH 025/340] Fix the domain grain on Windows Make the domain grain match the windowsdomain grain on Windows --- salt/grains/core.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/grains/core.py b/salt/grains/core.py index 9758275f2c53..2aea5d7ca053 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py @@ -1247,6 +1247,8 @@ def _windows_platform_data(): elif 'OpenStack' in systeminfo.Model: grains['virtual'] = 'OpenStack' + grains['domain'] = grains['windowsdomain'] + return grains From 65eb461071eadac6492f5a449dbb77d6be6cc4e1 Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 19 Mar 2019 15:40:56 -0600 Subject: [PATCH 026/340] Add a test for windows grains --- tests/unit/grains/test_core.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py index 3874b0001c28..85254a60726d 100644 --- a/tests/unit/grains/test_core.py +++ b/tests/unit/grains/test_core.py @@ -133,6 +133,30 @@ def test_missing_os_release(self): os_release = core._parse_os_release('/etc/os-release', '/usr/lib/os-release') self.assertEqual(os_release, {}) + @skipIf(not salt.utils.platform.is_windows(), 'System is not Windows') + def test__windows_platform_data(self): + grains = core._windows_platform_data() + keys = ['biosversion', + 'osrelease', + 'domain', + 'kernelrelease', + 'motherboard', + 'serialnumber', + 'timezone', + 'manufacturer', + 'kernelversion', + 'osservicepack', + 'virtual', + 'productname', + 'osfullname', + 'osmanufacturer', + 'osversion', + 'windowsdomain'] + for key in keys: + self.assertIn(key, grains) + + self.assertEqual(grains['domain'], grains['windowsdomain']) + @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux') def test_gnu_slash_linux_in_os_name(self): ''' From b039cec21adacdbc71eeb3b90162389b80677cde Mon Sep 17 00:00:00 2001 From: Pedro Algarvio Date: Wed, 20 Mar 2019 14:02:51 +0000 Subject: [PATCH 027/340] Windows now has a static requirements file and no longer needs hacks --- noxfile.py | 11 ++-- requirements/static/windows.in | 37 +++++++++++ requirements/static/windows.txt | 90 +++++++++++++++++++++++++ tests/support/nox-windows-setup.py | 102 ----------------------------- 4 files changed, 132 insertions(+), 108 deletions(-) create mode 100644 requirements/static/windows.in create mode 100644 requirements/static/windows.txt delete mode 100644 tests/support/nox-windows-setup.py diff --git a/noxfile.py b/noxfile.py index 9d5d8e7fa1a4..46b2d39e8bba 100644 --- a/noxfile.py +++ b/noxfile.py @@ -56,7 +56,11 @@ def _install_requirements(session, *extra_requirements): # Install requirements distro_requirements = None - if not IS_WINDOWS: + if IS_WINDOWS: + _distro_requirements = os.path.join(REPO_ROOT, 'requirements', 'static', 'windows.txt') + if os.path.exists(_distro_requirements): + distro_requirements = _distro_requirements + else: # The distro package doesn't output anything for Windows session.install('distro') output = session.run('distro', '-j', silent=True) @@ -120,11 +124,6 @@ def _install_requirements(session, *extra_requirements): if extra_requirements: session.install(*extra_requirements) - if IS_WINDOWS: - # Windows hacks :/ - nox_windows_setup = os.path.join(REPO_ROOT, 'tests', 'support', 'nox-windows-setup.py') - session.run('python', nox_windows_setup) - def _run_with_coverage(session, *test_cmd): session.install('coverage==4.5.3') diff --git a/requirements/static/windows.in b/requirements/static/windows.in new file mode 100644 index 000000000000..4673bbe3006a --- /dev/null +++ b/requirements/static/windows.in @@ -0,0 +1,37 @@ +apache-libcloud==1.0.0 +boto3 +boto>=2.46.0 +dmidecode +dnspython +docker==2.7.0 +ioflo +jsonschema<=2.6.0 +keyring==5.7.1 +kubernetes<4.0 +mock<1.1.0 +more-itertools==5.0.0 +msgpack-python >= 0.4.2, != 0.5.5 +patch +psutil +pycryptodomex +pyopenssl +python-etcd==0.4.2 +python-gnupg +pyvmomi +rfc3987 +salttesting==2017.6.1 +sed +setproctitle +strict_rfc3339 +supervisor; python_version < '3' +timelib +tornado<5.0 +virtualenv + +# If running under windows, please uncomment the following 2 requirements before running +# pip-compile -o requirements/static/windows.txt requirements/zeromq.txt requirements/raet.txt requirements/pytest.txt requirements/static/windows.in +# +# On non windows, please copy the following 2 requirements to the generated windows.txt, un-commented + +#pywin32==223 +#wmi==1.4.9 diff --git a/requirements/static/windows.txt b/requirements/static/windows.txt new file mode 100644 index 000000000000..566bf3537f83 --- /dev/null +++ b/requirements/static/windows.txt @@ -0,0 +1,90 @@ +# +# This file is autogenerated by pip-compile +# To update, run: +# +# pip-compile -o requirements/static/windows.txt requirements/zeromq.txt requirements/raet.txt requirements/pytest.txt requirements/static/windows.in +# +apache-libcloud==1.0.0 +asn1crypto==0.24.0 # via cryptography +atomicwrites==1.3.0 # via pytest +attrs==19.1.0 # via pytest +backports-abc==0.5 # via tornado +backports.ssl-match-hostname==3.7.0.1 # via docker, websocket-client +boto3==1.9.117 +boto==2.49.0 +botocore==1.12.117 # via boto3, s3transfer +cachetools==3.1.0 # via google-auth +certifi==2019.3.9 # via kubernetes, requests, tornado +cffi==1.12.2 # via cryptography +chardet==3.0.4 # via requests +coverage==4.5.3 # via pytest-cov +cryptography==2.6.1 # via pyopenssl +dmidecode==0.9.0 +dnspython==1.16.0 +docker-pycreds==0.4.0 # via docker +docker==2.7.0 +docutils==0.14 # via botocore +enum34==1.1.6 # via cryptography, raet +funcsigs==1.0.2 # via pytest +functools32==3.2.3.post2 # via jsonschema +futures==3.2.0 ; python_version < "3.0" +google-auth==1.6.3 # via kubernetes +idna==2.8 # via requests +ioflo==1.7.5 +ipaddress==1.0.22 # via cryptography, docker, kubernetes +jinja2==2.10 +jmespath==0.9.4 # via boto3, botocore +jsonschema==2.6.0 +keyring==5.7.1 +kubernetes==3.0.0 +libnacl==1.6.1 +markupsafe==1.1.1 +meld3==1.0.2 # via supervisor +mock==1.0.1 +more-itertools==5.0.0 +msgpack-python==0.5.6 +msgpack==0.6.1 +patch==1.16 +pathlib2==2.3.3 # via pytest +pluggy==0.9.0 # via pytest +psutil==5.6.1 +py==1.8.0 # via pytest +pyasn1-modules==0.2.4 # via google-auth +pyasn1==0.4.5 # via pyasn1-modules, rsa +pycparser==2.19 # via cffi +pycrypto==2.6.1 +pycryptodomex==3.7.3 +pyopenssl==19.0.0 +pytest-cov==2.6.1 +pytest-helpers-namespace==2019.1.8 +pytest-salt-runtests-bridge==2019.1.30 +pytest-salt==2018.12.8 +pytest-tempdir==2018.8.11 +pytest-timeout==1.3.3 +pytest==4.3.1 +python-dateutil==2.8.0 # via botocore, kubernetes +python-etcd==0.4.2 +python-gnupg==0.4.4 +pyvmomi==6.7.1.2018.12 +pyyaml==3.13 +pyzmq==18.0.1 ; python_version != "3.4" +raet==0.6.8 +requests==2.21.0 +rfc3987==1.3.8 +rsa==4.0 # via google-auth +s3transfer==0.2.0 # via boto3 +salttesting==2017.6.1 +scandir==1.10.0 # via pathlib2 +sed==0.3.1 +setproctitle==1.1.10 +singledispatch==3.4.0.3 # via tornado +six==1.12.0 # via cryptography, docker, docker-pycreds, google-auth, kubernetes, more-itertools, pathlib2, pyopenssl, pytest, python-dateutil, pyvmomi, raet, salttesting, singledispatch, websocket-client +strict-rfc3339==0.7 +supervisor==3.3.5 ; python_version < "3" +timelib==0.2.4 +tornado==4.5.3 ; python_version < "3" +urllib3==1.24.1 # via botocore, kubernetes, python-etcd, requests +virtualenv==16.4.3 +websocket-client==0.40.0 # via docker, kubernetes +pywin32==223 +wmi==1.4.9 diff --git a/tests/support/nox-windows-setup.py b/tests/support/nox-windows-setup.py deleted file mode 100644 index 9409884ef3ea..000000000000 --- a/tests/support/nox-windows-setup.py +++ /dev/null @@ -1,102 +0,0 @@ -# -*- coding: utf-8 -*- -''' - tests.support.nox-windows-setup - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - This script is meant to run under the nox virtualenv to take care of required - windows procedures -''' -# pylint: disable=resource-leakage - -from __future__ import absolute_import, print_function, unicode_literals -import os -import re -import sys -import site -import shutil - -try: - import site - SITE_PACKAGES = site.getsitepackages() - PYTHON_EXECUTABLE_DIRECTORY = os.path.dirname(sys.executable) - PYTHON_SCRIPTS_DIR = os.path.join(PYTHON_EXECUTABLE_DIRECTORY, 'Scripts') -except AttributeError: - # The site module does not have the getsitepackages function when running within a virtualenv - # But the site-packages directory WILL be on sys.path - SITE_PACKAGES = None - for entry in sys.path: - if 'site-packages' in entry: - SITE_PACKAGES = entry - break - # Under a virtualenv, the python "binary" is under Scripts already. - # Well, not the binary, but the Python DLLs - PYTHON_EXECUTABLE_DIRECTORY = PYTHON_SCRIPTS_DIR = os.path.dirname(sys.executable) - -# Requests is a Salt dependency, it's safe to import, but... -try: - import requests - HAS_REQUESTS = True -except ImportError: - HAS_REQUESTS = False - -IS_64_BITS = sys.maxsize > 2**32 -SALT_REPO_URL = 'https://repo.saltstack.com/windows/dependencies/{}'.format(IS_64_BITS and 64 or 32) -DLLS = ("libeay32.dll", "ssleay32.dll", "OpenSSL_License.txt", "msvcr120.dll", "libsodium.dll") - -for dll in DLLS: - outfile = os.path.join(PYTHON_EXECUTABLE_DIRECTORY, dll) - if os.path.exists(outfile): - continue - src_url = '{}/{}'.format(SALT_REPO_URL, dll) - if HAS_REQUESTS: - print('Downloading {} to {}'.format(src_url, outfile)) - request = requests.get(src_url, allow_redirects=True) - with open(outfile, 'wb') as wfh: - wfh.write(request.content) - else: - print('ATTENTION: The python requests package is not installed, can\'t download {}'.format(src_url)) - -PYWIN32_SYSTEM32_DIR = os.path.join(SITE_PACKAGES, 'pywin32_system32') -if os.path.exists(PYWIN32_SYSTEM32_DIR): - for fname in os.listdir(PYWIN32_SYSTEM32_DIR): - if not fname.endswith('.dll'): - continue - spath = os.path.join(PYWIN32_SYSTEM32_DIR, fname) - dpath = spath.replace('pywin32_system32', 'win32') - print('Moving {} to {}'.format(spath, dpath)) - shutil.move(spath, dpath) - - print('Deleting {}'.format(PYWIN32_SYSTEM32_DIR)) - shutil.rmtree(PYWIN32_SYSTEM32_DIR, ignore_errors=True) - - -if os.path.exists(PYTHON_SCRIPTS_DIR): - print('Searching for pywin32 scripts to delete') - for fname in os.listdir(PYTHON_SCRIPTS_DIR): - if not fname.startswith('pywin32_'): - continue - fpath = os.path.join(PYTHON_SCRIPTS_DIR, fname) - print('Deleting {}'.format(fpath)) - os.unlink(fpath) - - -PYTHONWIN_DIR = os.path.join(SITE_PACKAGES, 'pythonwin') -if os.path.exists(PYTHONWIN_DIR): - print('Deleting {}'.format(PYTHONWIN_DIR)) - shutil.rmtree(PYTHONWIN_DIR, ignore_errors=True) - -PYCRPTO_NT_FILE = os.path.join(SITE_PACKAGES, 'Crypto', 'Random', 'OSRNG', 'nt.py') -if os.path.exists(PYCRPTO_NT_FILE): - with open(PYCRPTO_NT_FILE, 'r') as rfh: - contents = rfh.read() - new_contents = re.sub( - r'^import winrandom$', - 'from Crypto.Random.OSRNG import winrandom', - contents, - count=1, - flags=re.MULTILINE - ) - if contents != new_contents: - print('Patching {}'.format(PYCRPTO_NT_FILE)) - with open(PYCRPTO_NT_FILE, 'w') as wfh: - wfh.write(new_contents) From 981335a177d8d459779c4c86fcf21275d5f98b95 Mon Sep 17 00:00:00 2001 From: Pedro Algarvio Date: Wed, 20 Mar 2019 14:09:45 +0000 Subject: [PATCH 028/340] Revert "Avoid a traceback on tornado.testing test classes" This reverts commit dce4ffab3501af897571f8a58f044cfc6ec2c667. --- tests/support/xmlunit.py | 48 +++------------------------------------- 1 file changed, 3 insertions(+), 45 deletions(-) diff --git a/tests/support/xmlunit.py b/tests/support/xmlunit.py index 82300e61ca89..61294b9e5b59 100644 --- a/tests/support/xmlunit.py +++ b/tests/support/xmlunit.py @@ -16,8 +16,6 @@ from __future__ import absolute_import import io import sys -import time -import inspect import logging # Import 3rd-party libs @@ -29,7 +27,6 @@ try: import xmlrunner.runner import xmlrunner.result - import xmlrunner.unittest HAS_XMLRUNNER = True class _DelegateIO(object): @@ -59,48 +56,9 @@ def __getattr__(self, attr): class _XMLTestResult(xmlrunner.result._XMLTestResult): def startTest(self, test): - log.debug('>>>>> START >>>>> %s', test.id()) + log.debug('>>>>> START >>>>> {0}'.format(test.id())) # xmlrunner classes are NOT new-style classes - # xmlrunner.result._XMLTestResult.startTest(self, test) - - # ----- Re-Implement startTest --------------------------------------------------------------------------> - # The reason being that _XMLTestResult does not like tornado testing wrapping it's test class - # https://gist.github.com/s0undt3ch/9298a69a3492404d89a832de9efb1e68 - self.start_time = time.time() - xmlrunner.unittest.TestResult.startTest(self, test) - - try: - if getattr(test, '_dt_test', None) is not None: - # doctest.DocTestCase - self.filename = test._dt_test.filename - self.lineno = test._dt_test.lineno - else: - # regular unittest.TestCase? - test_method = getattr(test, test._testMethodName) - test_class = type(test) - # Note: inspect can get confused with decorators, so use class. - self.filename = inspect.getsourcefile(test_class) - # Handle partial and partialmethod objects. - test_method = getattr(test_method, 'func', test_method) - - # ----- Code which avoids the inspect tracebacks ------------------------------------------------> - try: - from tornado.testing import _TestMethodWrapper - if isinstance(test_method, _TestMethodWrapper): - test_method = test_method.orig_method - except (ImportError, AttributeError): - pass - # <---- Code which avoids the inspect tracebacks ------------------------------------------------- - _, self.lineno = inspect.getsourcelines(test_method) - finally: - pass - - if self.showAll: - self.stream.write(' ' + self.getDescription(test)) - self.stream.write(" ... ") - self.stream.flush() - # <---- Re-Implement startTest --------------------------------------------------------------------------- - + xmlrunner.result._XMLTestResult.startTest(self, test) if self.buffer: # Let's override the values of self._stdXXX_buffer # We want a similar sys.stdXXX file like behaviour @@ -110,7 +68,7 @@ def startTest(self, test): sys.stdout = self._stdout_buffer def stopTest(self, test): - log.debug('<<<<< END <<<<<<< %s', test.id()) + log.debug('<<<<< END <<<<<<< {0}'.format(test.id())) # xmlrunner classes are NOT new-style classes return xmlrunner.result._XMLTestResult.stopTest(self, test) From c874831ef494bc2534455ec12a7ec511c6e56797 Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 20 Mar 2019 08:52:21 -0600 Subject: [PATCH 029/340] Don't set the domain grain to windowsdomain --- salt/grains/core.py | 2 -- tests/unit/grains/test_core.py | 2 -- 2 files changed, 4 deletions(-) diff --git a/salt/grains/core.py b/salt/grains/core.py index 2aea5d7ca053..9758275f2c53 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py @@ -1247,8 +1247,6 @@ def _windows_platform_data(): elif 'OpenStack' in systeminfo.Model: grains['virtual'] = 'OpenStack' - grains['domain'] = grains['windowsdomain'] - return grains diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py index 85254a60726d..e08b95943f52 100644 --- a/tests/unit/grains/test_core.py +++ b/tests/unit/grains/test_core.py @@ -155,8 +155,6 @@ def test__windows_platform_data(self): for key in keys: self.assertIn(key, grains) - self.assertEqual(grains['domain'], grains['windowsdomain']) - @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux') def test_gnu_slash_linux_in_os_name(self): ''' From e103561858ebde46a6af01efcc18c5b1eaa5442b Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 20 Mar 2019 08:55:40 -0600 Subject: [PATCH 030/340] Add some documentation about the domain grain on Windows --- salt/grains/core.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/salt/grains/core.py b/salt/grains/core.py index 9758275f2c53..6f66f3a56cd2 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py @@ -2028,6 +2028,11 @@ def locale_info(): def hostname(): ''' Return fqdn, hostname, domainname + + .. note:: + On Windows the ``domain`` grain may refer to the dns entry for the host + instead of the Windows domain to which the host is joined. It may also + be empty if not a part of any domain ''' # This is going to need some work # Provides: From b4053918e4f38585b3de05153d2614293ce98299 Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 20 Mar 2019 08:58:07 -0600 Subject: [PATCH 031/340] Refer to the windowsdomain grain --- salt/grains/core.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/grains/core.py b/salt/grains/core.py index 6f66f3a56cd2..e799c56f042f 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py @@ -2032,7 +2032,8 @@ def hostname(): .. note:: On Windows the ``domain`` grain may refer to the dns entry for the host instead of the Windows domain to which the host is joined. It may also - be empty if not a part of any domain + be empty if not a part of any domain. Refer to the ``windowsdomain`` + grain instead ''' # This is going to need some work # Provides: From 684bf584f68bef5d1965e81494dfbd00f5c46542 Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Wed, 20 Mar 2019 21:04:49 +0300 Subject: [PATCH 032/340] Update doc conf with the new import `tornado.queues` --- doc/conf.py | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/conf.py b/doc/conf.py index 4814536bf60a..7b2e3b444646 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -131,6 +131,7 @@ def inner(fn, *iargs, **ikwargs): # pylint: disable=unused-argument 'tornado.ioloop', 'tornado.iostream', 'tornado.netutil', + 'tornado.queues', 'tornado.simple_httpclient', 'tornado.stack_context', 'tornado.web', From bfdc47bfa39816bc0ce578f4362bfcfb5f48e01b Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 20 Mar 2019 18:20:08 -0600 Subject: [PATCH 033/340] Handle new enhanced retcode 2 --- salt/modules/chocolatey.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/salt/modules/chocolatey.py b/salt/modules/chocolatey.py index 3547e8d68bd8..29b9e53c6218 100644 --- a/salt/modules/chocolatey.py +++ b/salt/modules/chocolatey.py @@ -291,7 +291,12 @@ def list_(narrow=None, result = __salt__['cmd.run_all'](cmd, python_shell=False) - if result['retcode'] != 0: + # Chocolatey introduced Enhanced Exit Codes starting with version 0.10.12 + # Exit Code 2 means there were no results, but is not a failure + # This may start to effect other functions in the future as Chocolatey + # moves more functions to this new paradigm + # https://github.com/chocolatey/choco/issues/1758 + if result['retcode'] not in [0, 2]: raise CommandExecutionError( 'Running chocolatey failed: {0}'.format(result['stdout']) ) From bf9c55e6bea04e31226a7303c50ff8004c91bc0e Mon Sep 17 00:00:00 2001 From: Pedro Algarvio Date: Thu, 21 Mar 2019 08:29:50 +0000 Subject: [PATCH 034/340] Previously have setuptools-git installed if ioflo is to be installed --- noxfile.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/noxfile.py b/noxfile.py index 46b2d39e8bba..5536b0c02e75 100644 --- a/noxfile.py +++ b/noxfile.py @@ -59,6 +59,12 @@ def _install_requirements(session, *extra_requirements): if IS_WINDOWS: _distro_requirements = os.path.join(REPO_ROOT, 'requirements', 'static', 'windows.txt') if os.path.exists(_distro_requirements): + with open(_distro_requirements) as rfh: + if 'ioflo' in rfh.read(): + # Because we still install ioflo, which requires setuptools-git, which fails with a + # weird SSL certificate issue(weird because the requirements file requirements install + # fine), let's previously have setuptools-git installed + session.install('setuptools-git') distro_requirements = _distro_requirements else: # The distro package doesn't output anything for Windows From cc6fb4662ab33e8e15b264d58c00a443ffaac35e Mon Sep 17 00:00:00 2001 From: Pedro Algarvio Date: Thu, 21 Mar 2019 11:32:39 +0000 Subject: [PATCH 035/340] Additionally ignore files in nox virtualenvs and CI artifacts directories --- tests/unit/test_doc.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/unit/test_doc.py b/tests/unit/test_doc.py index c7b53e03ba39..f619c397673a 100644 --- a/tests/unit/test_doc.py +++ b/tests/unit/test_doc.py @@ -58,12 +58,13 @@ def test_check_for_doc_inline_markup(self): regex = re.compile(r':(?!\\)') key, val = regex.split(line, 1) - # Don't test man pages, this file, - # the tox virtualenv files, the page - # that documents to not use ":doc:", - # or the doc/conf.py file + # Don't test man pages, this file, the tox or nox virtualenv files, + # the page that documents to not use ":doc:", the doc/conf.py file + # or the artifacts directory on nox CI test runs if 'man' in key \ or '.tox/' in key \ + or '.nox/' in key \ + or 'artifacts/' in key \ or key.endswith('test_doc.py') \ or key.endswith(os.sep.join(['doc', 'conf.py'])) \ or key.endswith(os.sep.join(['conventions', 'documentation.rst'])) \ From 57348cad9d0c0fd14aa28248632c5d2fc7da4c8b Mon Sep 17 00:00:00 2001 From: Pedro Algarvio Date: Thu, 21 Mar 2019 11:42:49 +0000 Subject: [PATCH 036/340] Add static requirements for Arch linux --- noxfile.py | 1 + requirements/static/arch.in | 34 +++++++++++ requirements/static/arch.txt | 111 +++++++++++++++++++++++++++++++++++ 3 files changed, 146 insertions(+) create mode 100644 requirements/static/arch.in create mode 100644 requirements/static/arch.txt diff --git a/noxfile.py b/noxfile.py index 46b2d39e8bba..ce9806756293 100644 --- a/noxfile.py +++ b/noxfile.py @@ -67,6 +67,7 @@ def _install_requirements(session, *extra_requirements): distro = json.loads(output.strip()) session.log('Distro information:\n%s', pprint.pformat(distro)) distro_keys = [ + '{id}'.format(**distro), '{id}-{version}'.format(**distro), '{id}-{version_parts[major]}'.format(**distro) ] diff --git a/requirements/static/arch.in b/requirements/static/arch.in new file mode 100644 index 000000000000..7a44d1a70758 --- /dev/null +++ b/requirements/static/arch.in @@ -0,0 +1,34 @@ +# This is a compilation of requirements installed on salt-jenkins git.salt state run +apache-libcloud==1.0.0 +boto3 +boto>=2.46.0 +cffi +cherrypy==17.3.0 +dnspython +docker +futures>=2.0; python_version < '3.0' +GitPython +ioflo +jsonschema<=2.6.0 +keyring==5.7.1 +kubernetes<4.0 +mock<1.1.0 +more-itertools==5.0.0 +moto +msgpack-python >= 0.4.2, != 0.5.5 +psutil +pycrypto>=2.6.1 +pyinotify +pyopenssl +python-etcd==0.4.2 +python-gnupg +pyvmomi +pyzmq +requests +rfc3987 +salttesting==2017.6.1 +setproctitle +strict_rfc3339 +timelib +tornado<5.0 +virtualenv diff --git a/requirements/static/arch.txt b/requirements/static/arch.txt new file mode 100644 index 000000000000..192cd18e66cf --- /dev/null +++ b/requirements/static/arch.txt @@ -0,0 +1,111 @@ +# +# This file is autogenerated by pip-compile +# To update, run: +# +# pip-compile -o requirements/static/arch.txt requirements/zeromq.txt requirements/raet.txt requirements/pytest.txt requirements/static/arch.in +# +apache-libcloud==1.0.0 +asn1crypto==0.24.0 # via cryptography +atomicwrites==1.3.0 # via pytest +attrs==19.1.0 # via pytest +aws-xray-sdk==0.95 # via moto +backports-abc==0.5 # via tornado +backports.functools-lru-cache==1.5 # via cheroot, jaraco.functools +backports.ssl-match-hostname==3.7.0.1 # via docker, websocket-client +backports.tempfile==1.0 # via moto +backports.weakref==1.0.post1 # via backports.tempfile +boto3==1.9.118 +boto==2.49.0 +botocore==1.12.118 # via boto3, moto, s3transfer +cachetools==3.1.0 # via google-auth +certifi==2019.3.9 # via kubernetes, requests, tornado +cffi==1.12.2 +chardet==3.0.4 # via requests +cheroot==6.5.4 # via cherrypy +cherrypy==17.3.0 +contextlib2==0.5.5 # via cherrypy +cookies==2.2.1 # via responses +coverage==4.5.3 # via pytest-cov +cryptography==2.6.1 # via moto, pyopenssl +dnspython==1.16.0 +docker-pycreds==0.4.0 # via docker +docker==3.7.1 +docutils==0.14 # via botocore +ecdsa==0.13 # via python-jose +enum34==1.1.6 # via cryptography, raet +funcsigs==1.0.2 # via pytest +functools32==3.2.3.post2 # via jsonschema +future==0.17.1 # via python-jose +futures==3.2.0 ; python_version < "3.0" +gitdb2==2.0.5 # via gitpython +gitpython==2.1.11 +google-auth==1.6.3 # via kubernetes +idna==2.8 # via requests +ioflo==1.7.5 +ipaddress==1.0.22 # via cryptography, docker, kubernetes +jaraco.functools==2.0 # via tempora +jinja2==2.10 +jmespath==0.9.4 # via boto3, botocore +jsondiff==1.1.1 # via moto +jsonpickle==1.1 # via aws-xray-sdk +jsonschema==2.6.0 +keyring==5.7.1 +kubernetes==3.0.0 +libnacl==1.6.1 +markupsafe==1.1.1 +mock==1.0.1 +more-itertools==5.0.0 +moto==1.3.7 +msgpack-python==0.5.6 +msgpack==0.6.1 +pathlib2==2.3.3 # via pytest +pluggy==0.9.0 # via pytest +portend==2.3 # via cherrypy +psutil==5.6.1 +py==1.8.0 # via pytest +pyaml==18.11.0 # via moto +pyasn1-modules==0.2.4 # via google-auth +pyasn1==0.4.5 # via pyasn1-modules, rsa +pycparser==2.19 # via cffi +pycrypto==2.6.1 +pycryptodome==3.7.3 # via python-jose +pyinotify==0.9.6 +pyopenssl==19.0.0 +pytest-cov==2.6.1 +pytest-helpers-namespace==2019.1.8 +pytest-salt-runtests-bridge==2019.1.30 +pytest-salt==2018.12.8 +pytest-tempdir==2018.8.11 +pytest-timeout==1.3.3 +pytest==4.3.1 +python-dateutil==2.8.0 # via botocore, kubernetes, moto +python-etcd==0.4.2 +python-gnupg==0.4.4 +python-jose==2.0.2 # via moto +pytz==2018.9 # via moto, tempora +pyvmomi==6.7.1.2018.12 +pyyaml==3.13 +pyzmq==18.0.1 ; python_version != "3.4" +raet==0.6.8 +requests==2.21.0 +responses==0.10.6 # via moto +rfc3987==1.3.8 +rsa==4.0 # via google-auth +s3transfer==0.2.0 # via boto3 +salttesting==2017.6.1 +scandir==1.10.0 # via pathlib2 +setproctitle==1.1.10 +singledispatch==3.4.0.3 # via tornado +six==1.12.0 # via cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, kubernetes, more-itertools, moto, pathlib2, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, raet, responses, salttesting, singledispatch, tempora, websocket-client +smmap2==2.0.5 # via gitdb2 +strict-rfc3339==0.7 +tempora==1.14 # via portend +timelib==0.2.4 +tornado==4.5.3 ; python_version < "3" +urllib3==1.24.1 # via botocore, kubernetes, python-etcd, requests +virtualenv==16.4.3 +websocket-client==0.40.0 # via docker, kubernetes +werkzeug==0.15.0 # via moto +wrapt==1.11.1 # via aws-xray-sdk +xmltodict==0.12.0 # via moto +zc.lockfile==1.4 # via cherrypy From 36ed50d771ce6b9c54ba94e4a162bb62e0f5d698 Mon Sep 17 00:00:00 2001 From: Pedro Algarvio Date: Thu, 21 Mar 2019 11:51:48 +0000 Subject: [PATCH 037/340] Lock to unittest-xml-reporting 2.2.1 After this version, the library started taking into account expected failures which Jenkins treats as regular failures. --- noxfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/noxfile.py b/noxfile.py index ce9806756293..c09d57924a71 100644 --- a/noxfile.py +++ b/noxfile.py @@ -149,7 +149,7 @@ def _run_with_coverage(session, *test_cmd): @nox.parametrize('coverage', [False, True]) def runtests(session, coverage): # Install requirements - _install_requirements(session, 'unittest-xml-reporting<2.4.0') + _install_requirements(session, 'unittest-xml-reporting==2.2.1') # Create required artifacts directories _create_ci_directories() From 1f99e2855033789dd3c96810f4fafe6a465466ee Mon Sep 17 00:00:00 2001 From: Pedro Algarvio Date: Thu, 21 Mar 2019 19:26:48 +0000 Subject: [PATCH 038/340] Let's not include raet by default --- requirements/static/arch.txt | 8 +++----- requirements/static/centos-6.txt | 8 +++----- requirements/static/centos-7.txt | 8 +++----- requirements/static/debian-8.txt | 8 +++----- requirements/static/debian-9.txt | 8 +++----- requirements/static/fedora-28.txt | 8 +++----- requirements/static/fedora-29.txt | 8 +++----- requirements/static/opensuse-42.txt | 8 +++----- requirements/static/opensuse-leap-15.txt | 8 +++----- requirements/static/ubuntu-14.04.txt | 8 +++----- requirements/static/ubuntu-16.04.txt | 8 +++----- requirements/static/ubuntu-18.04.txt | 8 +++----- requirements/static/windows.txt | 8 +++----- 13 files changed, 39 insertions(+), 65 deletions(-) diff --git a/requirements/static/arch.txt b/requirements/static/arch.txt index 192cd18e66cf..a0ad0126e167 100644 --- a/requirements/static/arch.txt +++ b/requirements/static/arch.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile -o requirements/static/arch.txt requirements/zeromq.txt requirements/raet.txt requirements/pytest.txt requirements/static/arch.in +# pip-compile -o requirements/static/arch.txt requirements/zeromq.txt requirements/pytest.txt requirements/static/arch.in # apache-libcloud==1.0.0 asn1crypto==0.24.0 # via cryptography @@ -32,7 +32,7 @@ docker-pycreds==0.4.0 # via docker docker==3.7.1 docutils==0.14 # via botocore ecdsa==0.13 # via python-jose -enum34==1.1.6 # via cryptography, raet +enum34==1.1.6 # via cryptography funcsigs==1.0.2 # via pytest functools32==3.2.3.post2 # via jsonschema future==0.17.1 # via python-jose @@ -51,7 +51,6 @@ jsonpickle==1.1 # via aws-xray-sdk jsonschema==2.6.0 keyring==5.7.1 kubernetes==3.0.0 -libnacl==1.6.1 markupsafe==1.1.1 mock==1.0.1 more-itertools==5.0.0 @@ -86,7 +85,6 @@ pytz==2018.9 # via moto, tempora pyvmomi==6.7.1.2018.12 pyyaml==3.13 pyzmq==18.0.1 ; python_version != "3.4" -raet==0.6.8 requests==2.21.0 responses==0.10.6 # via moto rfc3987==1.3.8 @@ -96,7 +94,7 @@ salttesting==2017.6.1 scandir==1.10.0 # via pathlib2 setproctitle==1.1.10 singledispatch==3.4.0.3 # via tornado -six==1.12.0 # via cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, kubernetes, more-itertools, moto, pathlib2, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, raet, responses, salttesting, singledispatch, tempora, websocket-client +six==1.12.0 # via cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, kubernetes, more-itertools, moto, pathlib2, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, responses, salttesting, singledispatch, tempora, websocket-client smmap2==2.0.5 # via gitdb2 strict-rfc3339==0.7 tempora==1.14 # via portend diff --git a/requirements/static/centos-6.txt b/requirements/static/centos-6.txt index 1d88a5ab47b9..a21a3ac8fd5a 100644 --- a/requirements/static/centos-6.txt +++ b/requirements/static/centos-6.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile -o requirements/static/centos-6.txt requirements/zeromq.txt requirements/raet.txt requirements/pytest.txt requirements/static/centos-6.in +# pip-compile -o requirements/static/centos-6.txt requirements/zeromq.txt requirements/pytest.txt requirements/static/centos-6.in # apache-libcloud==1.0.0 asn1crypto==0.24.0 # via cryptography @@ -33,7 +33,7 @@ docker-pycreds==0.4.0 # via docker docker==3.7.0 docutils==0.14 # via botocore ecdsa==0.13 # via python-jose -enum34==1.1.6 # via cryptography, raet +enum34==1.1.6 # via cryptography funcsigs==1.0.2 # via pytest functools32==3.2.3.post2 # via jsonschema future==0.17.1 # via python-jose @@ -54,7 +54,6 @@ junos-eznc==2.2.0 jxmlease==1.0.1 keyring==5.7.1 kubernetes==3.0.0 -libnacl==1.6.1 lxml==4.3.2 # via junos-eznc, ncclient markupsafe==1.1.1 meld3==1.0.2 # via supervisor @@ -96,7 +95,6 @@ pytz==2018.9 # via moto, tempora pyvmomi==6.7.1.2018.12 pyyaml==3.13 pyzmq==18.0.1 ; python_version != "3.4" -raet==0.6.8 requests==2.21.0 responses==0.10.5 # via moto rfc3987==1.3.8 @@ -108,7 +106,7 @@ scp==0.13.1 # via junos-eznc selectors2==2.0.1 # via ncclient setproctitle==1.1.10 singledispatch==3.4.0.3 # via tornado -six==1.12.0 # via bcrypt, cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, junos-eznc, kubernetes, more-itertools, moto, ncclient, pathlib2, pynacl, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, raet, responses, salttesting, singledispatch, tempora, websocket-client +six==1.12.0 # via bcrypt, cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, junos-eznc, kubernetes, more-itertools, moto, ncclient, pathlib2, pynacl, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, responses, salttesting, singledispatch, tempora, websocket-client smmap==0.9.0 # via gitdb strict-rfc3339==0.7 supervisor==3.3.5 ; python_version < "3" diff --git a/requirements/static/centos-7.txt b/requirements/static/centos-7.txt index 7534e24aa952..cde8d51ee818 100644 --- a/requirements/static/centos-7.txt +++ b/requirements/static/centos-7.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile -o requirements/static/centos-7.txt requirements/zeromq.txt requirements/raet.txt requirements/pytest.txt requirements/static/centos-7.in +# pip-compile -o requirements/static/centos-7.txt requirements/zeromq.txt requirements/pytest.txt requirements/static/centos-7.in # apache-libcloud==1.0.0 asn1crypto==0.24.0 # via cryptography @@ -33,7 +33,7 @@ docker-pycreds==0.4.0 # via docker docker==3.7.0 docutils==0.14 # via botocore ecdsa==0.13 # via python-jose -enum34==1.1.6 # via cryptography, raet +enum34==1.1.6 # via cryptography funcsigs==1.0.2 # via pytest functools32==3.2.3.post2 # via jsonschema future==0.17.1 # via python-jose @@ -54,7 +54,6 @@ junos-eznc==2.2.0 jxmlease==1.0.1 keyring==5.7.1 kubernetes==3.0.0 -libnacl==1.6.1 lxml==4.3.2 # via junos-eznc, ncclient markupsafe==1.1.1 meld3==1.0.2 # via supervisor @@ -96,7 +95,6 @@ pytz==2018.9 # via moto, tempora pyvmomi==6.7.1.2018.12 pyyaml==3.13 pyzmq==18.0.1 ; python_version != "3.4" -raet==0.6.8 requests==2.21.0 responses==0.10.5 # via moto rfc3987==1.3.8 @@ -108,7 +106,7 @@ scp==0.13.1 # via junos-eznc selectors2==2.0.1 # via ncclient setproctitle==1.1.10 singledispatch==3.4.0.3 # via tornado -six==1.12.0 # via bcrypt, cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, junos-eznc, kubernetes, more-itertools, moto, ncclient, pathlib2, pynacl, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, raet, responses, salttesting, singledispatch, tempora, websocket-client +six==1.12.0 # via bcrypt, cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, junos-eznc, kubernetes, more-itertools, moto, ncclient, pathlib2, pynacl, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, responses, salttesting, singledispatch, tempora, websocket-client smmap2==2.0.5 # via gitdb2 strict-rfc3339==0.7 supervisor==3.3.5 ; python_version < "3" diff --git a/requirements/static/debian-8.txt b/requirements/static/debian-8.txt index 035d9715bfdf..573d062eec22 100644 --- a/requirements/static/debian-8.txt +++ b/requirements/static/debian-8.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile -o requirements/static/debian-8.txt requirements/zeromq.txt requirements/raet.txt requirements/pytest.txt requirements/static/debian-8.in +# pip-compile -o requirements/static/debian-8.txt requirements/zeromq.txt requirements/pytest.txt requirements/static/debian-8.in # apache-libcloud==1.0.0 asn1crypto==0.24.0 # via cryptography @@ -32,7 +32,7 @@ docker-pycreds==0.4.0 # via docker docker==3.7.0 docutils==0.14 # via botocore ecdsa==0.13 # via python-jose -enum34==1.1.6 # via cryptography, raet +enum34==1.1.6 # via cryptography funcsigs==1.0.2 # via pytest functools32==3.2.3.post2 # via jsonschema future==0.17.1 # via python-jose @@ -53,7 +53,6 @@ junos-eznc==2.2.0 jxmlease==1.0.1 keyring==5.7.1 kubernetes==3.0.0 -libnacl==1.6.1 lxml==4.3.2 # via junos-eznc, ncclient markupsafe==1.1.1 mock==1.0.1 @@ -93,7 +92,6 @@ pytz==2018.9 # via moto, tempora pyvmomi==6.7.1.2018.12 pyyaml==3.13 pyzmq==18.0.1 ; python_version != "3.4" -raet==0.6.8 requests==2.21.0 responses==0.10.5 # via moto rfc3987==1.3.8 @@ -105,7 +103,7 @@ scp==0.13.1 # via junos-eznc selectors2==2.0.1 # via ncclient setproctitle==1.1.10 singledispatch==3.4.0.3 # via tornado -six==1.12.0 # via cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, junos-eznc, kubernetes, more-itertools, moto, ncclient, pathlib2, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, raet, responses, salttesting, singledispatch, tempora, websocket-client +six==1.12.0 # via cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, junos-eznc, kubernetes, more-itertools, moto, ncclient, pathlib2, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, responses, salttesting, singledispatch, tempora, websocket-client smmap2==2.0.5 # via gitdb2 strict-rfc3339==0.7 tempora==1.14 # via portend diff --git a/requirements/static/debian-9.txt b/requirements/static/debian-9.txt index 640f2b662de9..55435505fc6e 100644 --- a/requirements/static/debian-9.txt +++ b/requirements/static/debian-9.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile -o requirements/static/debian-9.txt requirements/zeromq.txt requirements/raet.txt requirements/pytest.txt requirements/static/debian-9.in +# pip-compile -o requirements/static/debian-9.txt requirements/zeromq.txt requirements/pytest.txt requirements/static/debian-9.in # apache-libcloud==1.0.0 asn1crypto==0.24.0 # via cryptography @@ -32,7 +32,7 @@ docker-pycreds==0.4.0 # via docker docker==3.7.0 docutils==0.14 # via botocore ecdsa==0.13 # via python-jose -enum34==1.1.6 # via cryptography, raet +enum34==1.1.6 # via cryptography funcsigs==1.0.2 # via pytest functools32==3.2.3.post2 # via jsonschema future==0.17.1 # via python-jose @@ -53,7 +53,6 @@ junos-eznc==2.2.0 jxmlease==1.0.1 keyring==5.7.1 kubernetes==3.0.0 -libnacl==1.6.1 lxml==4.3.2 # via junos-eznc, ncclient markupsafe==1.1.1 mock==1.0.1 @@ -93,7 +92,6 @@ pytz==2018.9 # via moto, tempora pyvmomi==6.7.1.2018.12 pyyaml==3.13 pyzmq==18.0.1 ; python_version != "3.4" -raet==0.6.8 requests==2.21.0 responses==0.10.5 # via moto rfc3987==1.3.8 @@ -105,7 +103,7 @@ scp==0.13.1 # via junos-eznc selectors2==2.0.1 # via ncclient setproctitle==1.1.10 singledispatch==3.4.0.3 # via tornado -six==1.12.0 # via cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, junos-eznc, kubernetes, more-itertools, moto, ncclient, pathlib2, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, raet, responses, salttesting, singledispatch, tempora, websocket-client +six==1.12.0 # via cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, junos-eznc, kubernetes, more-itertools, moto, ncclient, pathlib2, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, responses, salttesting, singledispatch, tempora, websocket-client smmap2==2.0.5 # via gitdb2 strict-rfc3339==0.7 tempora==1.14 # via portend diff --git a/requirements/static/fedora-28.txt b/requirements/static/fedora-28.txt index 1ff78a38d632..71f53a9b5b76 100644 --- a/requirements/static/fedora-28.txt +++ b/requirements/static/fedora-28.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile -o requirements/static/fedora-28.txt requirements/zeromq.txt requirements/raet.txt requirements/pytest.txt requirements/static/fedora-28.in +# pip-compile -o requirements/static/fedora-28.txt requirements/zeromq.txt requirements/pytest.txt requirements/static/fedora-28.in # apache-libcloud==1.0.0 asn1crypto==0.24.0 # via cryptography @@ -33,7 +33,7 @@ docker-pycreds==0.4.0 # via docker docker==3.7.0 docutils==0.14 # via botocore ecdsa==0.13 # via python-jose -enum34==1.1.6 # via cryptography, raet +enum34==1.1.6 # via cryptography funcsigs==1.0.2 # via pytest functools32==3.2.3.post2 # via jsonschema future==0.17.1 # via python-jose @@ -54,7 +54,6 @@ junos-eznc==2.2.0 jxmlease==1.0.1 keyring==5.7.1 kubernetes==3.0.0 -libnacl==1.6.1 lxml==4.3.2 # via junos-eznc, ncclient markupsafe==1.1.1 mock==1.0.1 @@ -95,7 +94,6 @@ pytz==2018.9 # via moto, tempora pyvmomi==6.7.1.2018.12 pyyaml==3.13 pyzmq==18.0.1 ; python_version != "3.4" -raet==0.6.8 requests==2.21.0 responses==0.10.5 # via moto rfc3987==1.3.8 @@ -107,7 +105,7 @@ scp==0.13.1 # via junos-eznc selectors2==2.0.1 # via ncclient setproctitle==1.1.10 singledispatch==3.4.0.3 # via tornado -six==1.12.0 # via bcrypt, cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, junos-eznc, kubernetes, more-itertools, moto, ncclient, pathlib2, pynacl, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, raet, responses, salttesting, singledispatch, tempora, websocket-client +six==1.12.0 # via bcrypt, cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, junos-eznc, kubernetes, more-itertools, moto, ncclient, pathlib2, pynacl, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, responses, salttesting, singledispatch, tempora, websocket-client smmap2==2.0.5 # via gitdb2 strict-rfc3339==0.7 tempora==1.14 # via portend diff --git a/requirements/static/fedora-29.txt b/requirements/static/fedora-29.txt index 7049c8db4333..57753ce405de 100644 --- a/requirements/static/fedora-29.txt +++ b/requirements/static/fedora-29.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile -o requirements/static/fedora-29.txt requirements/zeromq.txt requirements/raet.txt requirements/pytest.txt requirements/static/fedora-29.in +# pip-compile -o requirements/static/fedora-29.txt requirements/zeromq.txt requirements/pytest.txt requirements/static/fedora-29.in # apache-libcloud==1.0.0 asn1crypto==0.24.0 # via cryptography @@ -33,7 +33,7 @@ docker-pycreds==0.4.0 # via docker docker==3.7.0 docutils==0.14 # via botocore ecdsa==0.13 # via python-jose -enum34==1.1.6 # via cryptography, raet +enum34==1.1.6 # via cryptography funcsigs==1.0.2 # via pytest functools32==3.2.3.post2 # via jsonschema future==0.17.1 # via python-jose @@ -54,7 +54,6 @@ junos-eznc==2.2.0 jxmlease==1.0.1 keyring==5.7.1 kubernetes==3.0.0 -libnacl==1.6.1 lxml==4.3.2 # via junos-eznc, ncclient markupsafe==1.1.1 mock==1.0.1 @@ -95,7 +94,6 @@ pytz==2018.9 # via moto, tempora pyvmomi==6.7.1.2018.12 pyyaml==3.13 pyzmq==18.0.1 ; python_version != "3.4" -raet==0.6.8 requests==2.21.0 responses==0.10.5 # via moto rfc3987==1.3.8 @@ -107,7 +105,7 @@ scp==0.13.1 # via junos-eznc selectors2==2.0.1 # via ncclient setproctitle==1.1.10 singledispatch==3.4.0.3 # via tornado -six==1.12.0 # via bcrypt, cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, junos-eznc, kubernetes, more-itertools, moto, ncclient, pathlib2, pynacl, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, raet, responses, salttesting, singledispatch, tempora, websocket-client +six==1.12.0 # via bcrypt, cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, junos-eznc, kubernetes, more-itertools, moto, ncclient, pathlib2, pynacl, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, responses, salttesting, singledispatch, tempora, websocket-client smmap2==2.0.5 # via gitdb2 strict-rfc3339==0.7 tempora==1.14 # via portend diff --git a/requirements/static/opensuse-42.txt b/requirements/static/opensuse-42.txt index 53df3ccdd8d6..a8ea73e414cc 100644 --- a/requirements/static/opensuse-42.txt +++ b/requirements/static/opensuse-42.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile -o requirements/static/opensuse-42.txt requirements/zeromq.txt requirements/raet.txt requirements/pytest.txt requirements/static/opensuse-42.in +# pip-compile -o requirements/static/opensuse-42.txt requirements/zeromq.txt requirements/pytest.txt requirements/static/opensuse-42.in # apache-libcloud==1.0.0 asn1crypto==0.24.0 # via cryptography @@ -32,7 +32,7 @@ docker-pycreds==0.4.0 # via docker docker==3.7.0 docutils==0.14 # via botocore ecdsa==0.13 # via python-jose -enum34==1.1.6 # via cryptography, raet +enum34==1.1.6 # via cryptography funcsigs==1.0.2 # via pytest functools32==3.2.3.post2 # via jsonschema future==0.17.1 # via python-jose @@ -52,7 +52,6 @@ jsonpickle==1.1 # via aws-xray-sdk jsonschema==2.6.0 keyring==5.7.1 kubernetes==3.0.0 -libnacl==1.6.1 markupsafe==1.1.1 meld3==1.0.2 # via supervisor mock==1.0.1 @@ -88,7 +87,6 @@ pytz==2018.9 # via moto, tempora pyvmomi==6.7.1.2018.12 pyyaml==3.13 pyzmq==18.0.1 ; python_version != "3.4" -raet==0.6.8 requests==2.21.0 responses==0.10.5 # via moto rfc3987==1.3.8 @@ -99,7 +97,7 @@ scandir==1.10.0 # via pathlib2 setproctitle==1.1.10 setuptools-scm==3.2.0 singledispatch==3.4.0.3 # via tornado -six==1.12.0 # via cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, kubernetes, more-itertools, moto, pathlib2, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, raet, responses, salttesting, singledispatch, tempora, websocket-client +six==1.12.0 # via cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, kubernetes, more-itertools, moto, pathlib2, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, responses, salttesting, singledispatch, tempora, websocket-client smmap2==2.0.5 # via gitdb2 strict-rfc3339==0.7 supervisor==3.3.5 ; python_version < "3" diff --git a/requirements/static/opensuse-leap-15.txt b/requirements/static/opensuse-leap-15.txt index 26f78a6fafed..2d56a9f92875 100644 --- a/requirements/static/opensuse-leap-15.txt +++ b/requirements/static/opensuse-leap-15.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile -o requirements/static/opensuse-leap-15.txt requirements/zeromq.txt requirements/raet.txt requirements/pytest.txt requirements/static/opensuse-leap-15.in +# pip-compile -o requirements/static/opensuse-leap-15.txt requirements/zeromq.txt requirements/pytest.txt requirements/static/opensuse-leap-15.in # apache-libcloud==1.0.0 asn1crypto==0.24.0 # via cryptography @@ -32,7 +32,7 @@ docker-pycreds==0.4.0 # via docker docker==3.7.0 docutils==0.14 # via botocore ecdsa==0.13 # via python-jose -enum34==1.1.6 # via cryptography, raet +enum34==1.1.6 # via cryptography funcsigs==1.0.2 # via pytest functools32==3.2.3.post2 # via jsonschema future==0.17.1 # via python-jose @@ -52,7 +52,6 @@ jsonpickle==1.1 # via aws-xray-sdk jsonschema==2.6.0 keyring==5.7.1 kubernetes==3.0.0 -libnacl==1.6.1 markupsafe==1.1.1 mock==1.0.1 more-itertools==5.0.0 @@ -87,7 +86,6 @@ pytz==2018.9 # via moto, tempora pyvmomi==6.7.1.2018.12 pyyaml==3.13 pyzmq==18.0.1 ; python_version != "3.4" -raet==0.6.8 requests==2.21.0 responses==0.10.5 # via moto rfc3987==1.3.8 @@ -98,7 +96,7 @@ scandir==1.10.0 # via pathlib2 setproctitle==1.1.10 setuptools-scm==3.2.0 singledispatch==3.4.0.3 # via tornado -six==1.12.0 # via cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, kubernetes, more-itertools, moto, pathlib2, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, raet, responses, salttesting, singledispatch, tempora, websocket-client +six==1.12.0 # via cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, kubernetes, more-itertools, moto, pathlib2, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, responses, salttesting, singledispatch, tempora, websocket-client smmap2==2.0.5 # via gitdb2 strict-rfc3339==0.7 tempora==1.14 # via portend diff --git a/requirements/static/ubuntu-14.04.txt b/requirements/static/ubuntu-14.04.txt index d59e4176d971..8efda726a2f5 100644 --- a/requirements/static/ubuntu-14.04.txt +++ b/requirements/static/ubuntu-14.04.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile -o requirements/static/ubuntu-14.04.txt requirements/zeromq.txt requirements/raet.txt requirements/pytest.txt requirements/static/ubuntu-14.04.in +# pip-compile -o requirements/static/ubuntu-14.04.txt requirements/zeromq.txt requirements/pytest.txt requirements/static/ubuntu-14.04.in # apache-libcloud==1.0.0 asn1crypto==0.24.0 # via cryptography @@ -32,7 +32,7 @@ docker-pycreds==0.4.0 # via docker docker==3.7.0 docutils==0.14 # via botocore ecdsa==0.13 # via python-jose -enum34==1.1.6 # via cryptography, raet +enum34==1.1.6 # via cryptography funcsigs==1.0.2 # via pytest functools32==3.2.3.post2 # via jsonschema future==0.17.1 # via python-jose @@ -53,7 +53,6 @@ junos-eznc==2.2.0 jxmlease==1.0.1 keyring==5.7.1 kubernetes==3.0.0 -libnacl==1.6.1 lxml==4.3.2 # via junos-eznc, ncclient markupsafe==1.1.1 mock==1.0.1 @@ -93,7 +92,6 @@ pytz==2018.9 # via moto, tempora pyvmomi==6.7.1.2018.12 pyyaml==3.13 pyzmq==18.0.1 ; python_version != "3.4" -raet==0.6.8 requests==2.21.0 responses==0.10.5 # via moto rfc3987==1.3.8 @@ -105,7 +103,7 @@ scp==0.13.1 # via junos-eznc selectors2==2.0.1 # via ncclient setproctitle==1.1.10 singledispatch==3.4.0.3 # via tornado -six==1.12.0 # via cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, junos-eznc, kubernetes, more-itertools, moto, ncclient, pathlib2, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, raet, responses, salttesting, singledispatch, tempora, websocket-client +six==1.12.0 # via cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, junos-eznc, kubernetes, more-itertools, moto, ncclient, pathlib2, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, responses, salttesting, singledispatch, tempora, websocket-client smmap2==2.0.5 # via gitdb2 strict-rfc3339==0.7 tempora==1.14 # via portend diff --git a/requirements/static/ubuntu-16.04.txt b/requirements/static/ubuntu-16.04.txt index 1647182c16e1..8e7a08f6ef01 100644 --- a/requirements/static/ubuntu-16.04.txt +++ b/requirements/static/ubuntu-16.04.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile -o requirements/static/ubuntu-16.04.txt requirements/zeromq.txt requirements/raet.txt requirements/pytest.txt requirements/static/ubuntu-16.04.in +# pip-compile -o requirements/static/ubuntu-16.04.txt requirements/zeromq.txt requirements/pytest.txt requirements/static/ubuntu-16.04.in # apache-libcloud==1.0.0 asn1crypto==0.24.0 # via cryptography @@ -32,7 +32,7 @@ docker-pycreds==0.4.0 # via docker docker==3.7.0 docutils==0.14 # via botocore ecdsa==0.13 # via python-jose -enum34==1.1.6 # via cryptography, raet +enum34==1.1.6 # via cryptography funcsigs==1.0.2 # via pytest functools32==3.2.3.post2 # via jsonschema future==0.17.1 # via python-jose @@ -53,7 +53,6 @@ junos-eznc==2.2.0 jxmlease==1.0.1 keyring==5.7.1 kubernetes==3.0.0 -libnacl==1.6.1 lxml==4.3.2 # via junos-eznc, ncclient markupsafe==1.1.1 mock==1.0.1 @@ -93,7 +92,6 @@ pytz==2018.9 # via moto, tempora pyvmomi==6.7.1.2018.12 pyyaml==3.13 pyzmq==18.0.1 ; python_version != "3.4" -raet==0.6.8 requests==2.21.0 responses==0.10.5 # via moto rfc3987==1.3.8 @@ -105,7 +103,7 @@ scp==0.13.1 # via junos-eznc selectors2==2.0.1 # via ncclient setproctitle==1.1.10 singledispatch==3.4.0.3 # via tornado -six==1.12.0 # via cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, junos-eznc, kubernetes, more-itertools, moto, ncclient, pathlib2, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, raet, responses, salttesting, singledispatch, tempora, websocket-client +six==1.12.0 # via cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, junos-eznc, kubernetes, more-itertools, moto, ncclient, pathlib2, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, responses, salttesting, singledispatch, tempora, websocket-client smmap2==2.0.5 # via gitdb2 strict-rfc3339==0.7 tempora==1.14 # via portend diff --git a/requirements/static/ubuntu-18.04.txt b/requirements/static/ubuntu-18.04.txt index 49837a33505d..9d3547a56fad 100644 --- a/requirements/static/ubuntu-18.04.txt +++ b/requirements/static/ubuntu-18.04.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile -o requirements/static/ubuntu-18.04.txt requirements/zeromq.txt requirements/raet.txt requirements/pytest.txt requirements/static/ubuntu-18.04.in +# pip-compile -o requirements/static/ubuntu-18.04.txt requirements/zeromq.txt requirements/pytest.txt requirements/static/ubuntu-18.04.in # apache-libcloud==1.0.0 asn1crypto==0.24.0 # via cryptography @@ -32,7 +32,7 @@ docker-pycreds==0.4.0 # via docker docker==3.7.0 docutils==0.14 # via botocore ecdsa==0.13 # via python-jose -enum34==1.1.6 # via cryptography, raet +enum34==1.1.6 # via cryptography funcsigs==1.0.2 # via pytest functools32==3.2.3.post2 # via jsonschema future==0.17.1 # via python-jose @@ -53,7 +53,6 @@ junos-eznc==2.2.0 jxmlease==1.0.1 keyring==5.7.1 kubernetes==3.0.0 -libnacl==1.6.1 lxml==4.3.2 # via junos-eznc, ncclient markupsafe==1.1.1 mock==1.0.1 @@ -93,7 +92,6 @@ pytz==2018.9 # via moto, tempora pyvmomi==6.7.1.2018.12 pyyaml==3.13 pyzmq==18.0.1 ; python_version != "3.4" -raet==0.6.8 requests==2.21.0 responses==0.10.5 # via moto rfc3987==1.3.8 @@ -105,7 +103,7 @@ scp==0.13.1 # via junos-eznc selectors2==2.0.1 # via ncclient setproctitle==1.1.10 singledispatch==3.4.0.3 # via tornado -six==1.12.0 # via cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, junos-eznc, kubernetes, more-itertools, moto, ncclient, pathlib2, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, raet, responses, salttesting, singledispatch, tempora, websocket-client +six==1.12.0 # via cheroot, cherrypy, cryptography, docker, docker-pycreds, google-auth, junos-eznc, kubernetes, more-itertools, moto, ncclient, pathlib2, pyopenssl, pytest, python-dateutil, python-jose, pyvmomi, responses, salttesting, singledispatch, tempora, websocket-client smmap2==2.0.5 # via gitdb2 strict-rfc3339==0.7 tempora==1.14 # via portend diff --git a/requirements/static/windows.txt b/requirements/static/windows.txt index 566bf3537f83..01d70c39103a 100644 --- a/requirements/static/windows.txt +++ b/requirements/static/windows.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile -o requirements/static/windows.txt requirements/zeromq.txt requirements/raet.txt requirements/pytest.txt requirements/static/windows.in +# pip-compile -o requirements/static/windows.txt requirements/zeromq.txt requirements/pytest.txt requirements/static/windows.in # apache-libcloud==1.0.0 asn1crypto==0.24.0 # via cryptography @@ -24,7 +24,7 @@ dnspython==1.16.0 docker-pycreds==0.4.0 # via docker docker==2.7.0 docutils==0.14 # via botocore -enum34==1.1.6 # via cryptography, raet +enum34==1.1.6 # via cryptography funcsigs==1.0.2 # via pytest functools32==3.2.3.post2 # via jsonschema futures==3.2.0 ; python_version < "3.0" @@ -37,7 +37,6 @@ jmespath==0.9.4 # via boto3, botocore jsonschema==2.6.0 keyring==5.7.1 kubernetes==3.0.0 -libnacl==1.6.1 markupsafe==1.1.1 meld3==1.0.2 # via supervisor mock==1.0.1 @@ -68,7 +67,6 @@ python-gnupg==0.4.4 pyvmomi==6.7.1.2018.12 pyyaml==3.13 pyzmq==18.0.1 ; python_version != "3.4" -raet==0.6.8 requests==2.21.0 rfc3987==1.3.8 rsa==4.0 # via google-auth @@ -78,7 +76,7 @@ scandir==1.10.0 # via pathlib2 sed==0.3.1 setproctitle==1.1.10 singledispatch==3.4.0.3 # via tornado -six==1.12.0 # via cryptography, docker, docker-pycreds, google-auth, kubernetes, more-itertools, pathlib2, pyopenssl, pytest, python-dateutil, pyvmomi, raet, salttesting, singledispatch, websocket-client +six==1.12.0 # via cryptography, docker, docker-pycreds, google-auth, kubernetes, more-itertools, pathlib2, pyopenssl, pytest, python-dateutil, pyvmomi, salttesting, singledispatch, websocket-client strict-rfc3339==0.7 supervisor==3.3.5 ; python_version < "3" timelib==0.2.4 From cbe5f7d26777e7d830a01d62be52c1111d4e278b Mon Sep 17 00:00:00 2001 From: David Murphy < dmurphy@saltstack.com> Date: Thu, 21 Mar 2019 17:26:38 -0600 Subject: [PATCH 039/340] Update to allow for malformed CPE_NAME from some OS's --- salt/grains/core.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/salt/grains/core.py b/salt/grains/core.py index 9758275f2c53..d3711edf3360 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py @@ -1495,6 +1495,10 @@ def _parse_cpe_name(cpe): Info: https://csrc.nist.gov/projects/security-content-automation-protocol/scap-specifications/cpe + Note: cpe:2.3:part:vendor:product:version:update:edition:lang:sw_edition:target_sw:target_hw:other + however some OS's do not have the full 13 elements, for example: + CPE_NAME="cpe:2.3:o:amazon:amazon_linux:2" + :param cpe: :return: ''' @@ -1510,7 +1514,11 @@ def _parse_cpe_name(cpe): ret['vendor'], ret['product'], ret['version'] = cpe[2:5] ret['phase'] = cpe[5] if len(cpe) > 5 else None ret['part'] = part.get(cpe[1][1:]) - elif len(cpe) == 13 and cpe[1] == '2.3': # WFN to a string + elif len(cpe) == 6 and cpe[1] == '2.3': # WFN to a string + ret['vendor'], ret['product'], ret['version'] = [x if x != '*' else None for x in cpe[3:6]] + ret['phase'] = None + ret['part'] = part.get(cpe[2]) + elif len(cpe) > 7 and len(cpe) <= 13 and cpe[1] == '2.3': # WFN to a string ret['vendor'], ret['product'], ret['version'], ret['phase'] = [x if x != '*' else None for x in cpe[3:7]] ret['part'] = part.get(cpe[2]) From 0b1eb00dc44b939d6d9d8c03d163e458e1e2b928 Mon Sep 17 00:00:00 2001 From: Alan Cugler Date: Mon, 25 Mar 2019 12:09:48 -0500 Subject: [PATCH 040/340] Replaced `test.ping` with `test.version` Result of issue #52277 Any where `test.ping` still exists in the docs was left because of the context in those docs. --- doc/ref/cli/salt-unity.rst | 4 ++-- doc/ref/cli/salt.rst | 4 ++-- doc/ref/configuration/index.rst | 16 ++++++------- doc/ref/executors/index.rst | 4 ++-- doc/ref/peer.rst | 6 ++--- doc/ref/returners/index.rst | 24 +++++++++---------- doc/ref/runners/index.rst | 2 +- doc/topics/blackout/index.rst | 4 ++-- doc/topics/cloud/aws.rst | 2 +- doc/topics/cloud/azure.rst | 2 +- doc/topics/cloud/azurearm.rst | 2 +- doc/topics/cloud/deploy.rst | 2 +- doc/topics/cloud/gce.rst | 2 +- doc/topics/cloud/libvirt.rst | 2 +- doc/topics/cloud/linode.rst | 2 +- doc/topics/cloud/opennebula.rst | 2 +- doc/topics/cloud/parallels.rst | 2 +- doc/topics/cloud/proxmox.rst | 2 +- doc/topics/cloud/saltify.rst | 4 ++-- doc/topics/cloud/softlayer.rst | 6 ++--- doc/topics/cloud/vagrant.rst | 2 +- doc/topics/cloud/vexxhost.rst | 2 +- doc/topics/cloud/xen.rst | 2 +- doc/topics/development/architecture.rst | 2 +- doc/topics/development/hacking.rst | 2 +- doc/topics/eauth/access_control.rst | 4 ++-- doc/topics/eauth/index.rst | 4 ++-- doc/topics/event/master_events.rst | 4 ++-- doc/topics/installation/index.rst | 4 ++-- doc/topics/installation/windows.rst | 2 +- doc/topics/proxyminion/demo.rst | 2 +- doc/topics/ssh/index.rst | 6 ++--- doc/topics/targeting/batch.rst | 4 ++-- doc/topics/targeting/compound.rst | 10 ++++---- doc/topics/targeting/globbing.rst | 18 +++++++------- doc/topics/targeting/grains.rst | 2 +- doc/topics/targeting/index.rst | 4 ++-- doc/topics/targeting/ipcidr.rst | 6 ++--- doc/topics/targeting/nodegroups.rst | 2 +- doc/topics/targeting/pillar.rst | 4 ++-- doc/topics/targeting/range.rst | 4 ++-- doc/topics/thorium/index.rst | 4 ++-- doc/topics/topology/syndic.rst | 10 ++++---- doc/topics/tutorials/cloud_controller.rst | 2 +- doc/topics/tutorials/docker_sls.rst | 2 +- doc/topics/tutorials/esxi_proxy_minion.rst | 11 +++++---- doc/topics/tutorials/firewall.rst | 2 +- doc/topics/tutorials/modules.rst | 16 ++++++------- doc/topics/tutorials/multimaster_pki.rst | 2 +- doc/topics/tutorials/walkthrough.rst | 26 ++++++++++----------- doc/topics/tutorials/walkthrough_macosx.rst | 4 ++-- 51 files changed, 131 insertions(+), 132 deletions(-) diff --git a/doc/ref/cli/salt-unity.rst b/doc/ref/cli/salt-unity.rst index 2144acb07ffc..db746038c647 100644 --- a/doc/ref/cli/salt-unity.rst +++ b/doc/ref/cli/salt-unity.rst @@ -9,7 +9,7 @@ Synopsis .. code-block:: bash - salt-unity salt '*' test.ping + salt-unity salt '*' test.version Description =========== @@ -35,4 +35,4 @@ See also :manpage:`salt-minion(1)` :manpage:`salt-run(1)` :manpage:`salt-ssh(1)` -:manpage:`salt-syndic(1)` \ No newline at end of file +:manpage:`salt-syndic(1)` diff --git a/doc/ref/cli/salt.rst b/doc/ref/cli/salt.rst index 1cbad32ba6f7..da6de812b0bd 100644 --- a/doc/ref/cli/salt.rst +++ b/doc/ref/cli/salt.rst @@ -11,9 +11,9 @@ Synopsis salt -E '.*' [ options ] sys.doc cmd - salt -G 'os:Arch.*' [ options ] test.ping + salt -G 'os:Arch.*' [ options ] test.version - salt -C 'G@os:Arch.* and webserv* or G@kernel:FreeBSD' [ options ] test.ping + salt -C 'G@os:Arch.* and webserv* or G@kernel:FreeBSD' [ options ] test.version Description =========== diff --git a/doc/ref/configuration/index.rst b/doc/ref/configuration/index.rst index b57f25ccd6d1..b54d7053d53f 100644 --- a/doc/ref/configuration/index.rst +++ b/doc/ref/configuration/index.rst @@ -199,28 +199,28 @@ Sending Commands ================ Communication between the Master and a Minion may be verified by running -the ``test.ping`` command: +the ``test.version`` command: .. code-block:: bash - [root@master ~]# salt alpha test.ping + [root@master ~]# salt alpha test.version alpha: - True + 2018.3.4 Communication between the Master and all Minions may be tested in a similar way: .. code-block:: bash - [root@master ~]# salt '*' test.ping + [root@master ~]# salt '*' test.version alpha: - True + 2018.3.4 bravo: - True + 2018.3.4 charlie: - True + 2018.3.4 delta: - True + 2018.3.4 Each of the Minions should send a ``True`` response as shown above. diff --git a/doc/ref/executors/index.rst b/doc/ref/executors/index.rst index f7a0e33a8412..6aaf8755fb2e 100644 --- a/doc/ref/executors/index.rst +++ b/doc/ref/executors/index.rst @@ -30,7 +30,7 @@ The same could be done by command line: .. code-block:: bash - salt -t 40 --module-executors='[splay, direct_call]' --executor-opts='{splaytime: 30}' '*' test.ping + salt -t 40 --module-executors='[splay, direct_call]' --executor-opts='{splaytime: 30}' '*' test.version And the same command called via netapi will look like this: @@ -43,7 +43,7 @@ And the same command called via netapi will look like this: -d '[{ "client": "local", "tgt": "*", - "fun": "test.ping", + "fun": "test.version", "module_executors": ["splay", "direct_call"], "executor_opts": {"splaytime": 10} }]' diff --git a/doc/ref/peer.rst b/doc/ref/peer.rst index 83118d8b7b30..3e4068d93bbc 100644 --- a/doc/ref/peer.rst +++ b/doc/ref/peer.rst @@ -107,11 +107,11 @@ comes with a number of functions to execute peer communication in different ways. Currently there are three functions in the publish module. These examples will show how to test the peer system via the salt-call command. -To execute test.ping on all minions: +To execute test.version on all minions: .. code-block:: bash - # salt-call publish.publish \* test.ping + # salt-call publish.publish \* test.version To execute the manage.up runner: @@ -123,7 +123,7 @@ To match minions using other matchers, use ``tgt_type``: .. code-block:: bash - # salt-call publish.publish 'webserv* and not G@os:Ubuntu' test.ping tgt_type='compound' + # salt-call publish.publish 'webserv* and not G@os:Ubuntu' test.version tgt_type='compound' .. note:: In pre-2017.7.0 releases, use ``expr_form`` instead of ``tgt_type``. diff --git a/doc/ref/returners/index.rst b/doc/ref/returners/index.rst index 36176f42aecb..a5601a66f99a 100644 --- a/doc/ref/returners/index.rst +++ b/doc/ref/returners/index.rst @@ -31,7 +31,7 @@ Specifying what returners to use is done when the command is invoked: .. code-block:: bash - salt '*' test.ping --return redis_return + salt '*' test.version --return redis_return This command will ensure that the redis_return returner is used. @@ -39,10 +39,10 @@ It is also possible to specify multiple returners: .. code-block:: bash - salt '*' test.ping --return mongo_return,redis_return,cassandra_return + salt '*' test.version --return mongo_return,redis_return,cassandra_return In this scenario all three returners will be called and the data from the -test.ping command will be sent out to the three named returners. +test.version command will be sent out to the three named returners. Writing a Returner ================== @@ -61,13 +61,13 @@ Other optional functions can be included to add support for ``returner`` The ``returner`` function must accept a single argument. The argument contains return data from the called minion function. If the minion - function ``test.ping`` is called, the value of the argument will be a + function ``test.version`` is called, the value of the argument will be a dictionary. Run the following command from a Salt master to get a sample of the dictionary: .. code-block:: bash - salt-call --local --metadata test.ping --out=pprint + salt-call --local --metadata test.version --out=pprint .. code-block:: python @@ -246,12 +246,12 @@ Sample: "master_minion": { "fun_args": [], "jid": "20150330121011408195", - "return": true, + "return": "2018.3.4", "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2015-03-30T12:10:12.708663", - "fun": "test.ping", + "fun": "test.version", "id": "master_minion" } } @@ -267,9 +267,9 @@ Sample: { "local": { - "minion1": "test.ping", - "minion3": "test.ping", - "minion2": "test.ping" + "minion1": "test.version", + "minion3": "test.version", + "minion2": "test.version" } } @@ -344,7 +344,7 @@ Testing the Returner The ``returner``, ``prep_jid``, ``save_load``, ``get_load``, and ``event_return`` functions can be tested by configuring the :conf_master:`master_job_cache` and `Event Returners`_ in the master config -file and submitting a job to ``test.ping`` each minion from the master. +file and submitting a job to ``test.version`` each minion from the master. Once you have successfully exercised the Master Job Cache functions, test the External Job Cache functions using the ``ret`` execution module. @@ -352,7 +352,7 @@ External Job Cache functions using the ``ret`` execution module. .. code-block:: bash salt-call ret.get_jids cassandra_cql --output=json - salt-call ret.get_fun cassandra_cql test.ping --output=json + salt-call ret.get_fun cassandra_cql test.version --output=json salt-call ret.get_minions cassandra_cql --output=json salt-call ret.get_jid cassandra_cql 20150330121011408195 --output=json diff --git a/doc/ref/runners/index.rst b/doc/ref/runners/index.rst index 4d38a3ef2a03..92f07c26737d 100644 --- a/doc/ref/runners/index.rst +++ b/doc/ref/runners/index.rst @@ -122,6 +122,6 @@ responding to Salt calls could look like this: Print a list of all of the minions that are up ''' client = salt.client.LocalClient(__opts__['conf_file']) - minions = client.cmd('*', 'test.ping', timeout=1) + minions = client.cmd('*', 'test.version', timeout=1) for minion in sorted(minions): print minion diff --git a/doc/topics/blackout/index.rst b/doc/topics/blackout/index.rst index 2ddf9e82baf8..c1b41b334b36 100644 --- a/doc/topics/blackout/index.rst +++ b/doc/topics/blackout/index.rst @@ -22,5 +22,5 @@ allowed during blackout. This is configured with the special pillar key .. code-block:: yaml minion_blackout_whitelist: - - test.ping - - pillar.get + - test.version + - pillar.get diff --git a/doc/topics/cloud/aws.rst b/doc/topics/cloud/aws.rst index 376edc6115a8..ba7ea8a166b3 100644 --- a/doc/topics/cloud/aws.rst +++ b/doc/topics/cloud/aws.rst @@ -324,7 +324,7 @@ it can be verified with Salt: .. code-block:: bash - # salt 'ami.example.com' test.ping + # salt 'ami.example.com' test.version Required Settings diff --git a/doc/topics/cloud/azure.rst b/doc/topics/cloud/azure.rst index 3066c39d6ee3..0426668f3cec 100644 --- a/doc/topics/cloud/azure.rst +++ b/doc/topics/cloud/azure.rst @@ -120,7 +120,7 @@ it can be verified with Salt: .. code-block:: bash - salt newinstance test.ping + salt newinstance test.version Profile Options diff --git a/doc/topics/cloud/azurearm.rst b/doc/topics/cloud/azurearm.rst index f77895475a24..481e77edf7c3 100644 --- a/doc/topics/cloud/azurearm.rst +++ b/doc/topics/cloud/azurearm.rst @@ -121,7 +121,7 @@ it can be verified with Salt: .. code-block:: bash - salt newinstance test.ping + salt newinstance test.version Profile Options diff --git a/doc/topics/cloud/deploy.rst b/doc/topics/cloud/deploy.rst index 4a75d36c72aa..c84e2a449d0d 100644 --- a/doc/topics/cloud/deploy.rst +++ b/doc/topics/cloud/deploy.rst @@ -123,7 +123,7 @@ Post-Deploy Commands Once a minion has been deployed, it has the option to run a salt command. Normally, this would be the :py:func:`state.apply `, which would finish provisioning the VM. Another common option (for testing) is -to use :py:func:`test.ping `. This is configured in the +to use :py:func:`test.version `. This is configured in the main cloud config file: .. code-block:: yaml diff --git a/doc/topics/cloud/gce.rst b/doc/topics/cloud/gce.rst index 58efe8196082..2d52ef1ee7a8 100644 --- a/doc/topics/cloud/gce.rst +++ b/doc/topics/cloud/gce.rst @@ -170,7 +170,7 @@ it can be verified with Salt: .. code-block:: bash - salt gce-instance test.ping + salt gce-instance test.version GCE Specific Settings diff --git a/doc/topics/cloud/libvirt.rst b/doc/topics/cloud/libvirt.rst index 79e88831c28f..33099b0b334a 100644 --- a/doc/topics/cloud/libvirt.rst +++ b/doc/topics/cloud/libvirt.rst @@ -84,7 +84,7 @@ it can be verified with Salt: .. code-block:: bash - # salt my-centos7-clone test.ping + # salt my-centos7-clone test.version Required Settings diff --git a/doc/topics/cloud/linode.rst b/doc/topics/cloud/linode.rst index c0ce064a0788..093100c28336 100644 --- a/doc/topics/cloud/linode.rst +++ b/doc/topics/cloud/linode.rst @@ -62,7 +62,7 @@ it can be verified with Salt: .. code-block:: bash - salt linode-instance test.ping + salt linode-instance test.version Listing Sizes diff --git a/doc/topics/cloud/opennebula.rst b/doc/topics/cloud/opennebula.rst index a453c475f2e8..cd081244fe5a 100644 --- a/doc/topics/cloud/opennebula.rst +++ b/doc/topics/cloud/opennebula.rst @@ -96,7 +96,7 @@ Once the instance has been created with salt-minion installed, connectivity to i .. code-block:: bash - salt my-new-vm test.ping + salt my-new-vm test.version OpenNebula uses an image --> template --> virtual machine paradigm where the template draws on the image, or disk, and virtual machines are created from templates. Because of this, there is no need to define a ``size`` in the cloud diff --git a/doc/topics/cloud/parallels.rst b/doc/topics/cloud/parallels.rst index 9031d4b3a65b..33a2b3a4ec47 100644 --- a/doc/topics/cloud/parallels.rst +++ b/doc/topics/cloud/parallels.rst @@ -91,7 +91,7 @@ it can be verified with Salt: .. code-block:: bash - # salt myubuntu test.ping + # salt myubuntu test.version Required Settings diff --git a/doc/topics/cloud/proxmox.rst b/doc/topics/cloud/proxmox.rst index 643b6a7fbecd..c790e462d6d7 100644 --- a/doc/topics/cloud/proxmox.rst +++ b/doc/topics/cloud/proxmox.rst @@ -91,7 +91,7 @@ it can be verified with Salt: .. code-block:: bash - # salt myubuntu test.ping + # salt myubuntu test.version Required Settings diff --git a/doc/topics/cloud/saltify.rst b/doc/topics/cloud/saltify.rst index aed77baec8fe..eb04cc5bae2e 100644 --- a/doc/topics/cloud/saltify.rst +++ b/doc/topics/cloud/saltify.rst @@ -195,7 +195,7 @@ Connectivity to the new "Salted" instances can now be verified with Salt: .. code-block:: bash - salt 'my-instance-*' test.ping + salt 'my-instance-*' test.version Credential Verification ======================= @@ -203,7 +203,7 @@ Credential Verification Because the Saltify driver does not actually create VM's, unlike other salt-cloud drivers, it has special behaviour when the ``deploy`` option is set to ``False``. When the cloud configuration specifies ``deploy: False``, the -Saltify driver will attept to authenticate to the target node(s) and return +Saltify driver will attempt to authenticate to the target node(s) and return ``True`` for each one that succeeds. This can be useful to verify ports, protocols, services and credentials are correctly configured before a live deployment. diff --git a/doc/topics/cloud/softlayer.rst b/doc/topics/cloud/softlayer.rst index d073f39b9823..8283789b8e32 100644 --- a/doc/topics/cloud/softlayer.rst +++ b/doc/topics/cloud/softlayer.rst @@ -221,9 +221,9 @@ with its short hostname, ``my-vm``): Rejected Keys: # # - # salt my-vm.example.com test.ping + # salt my-vm.example.com test.version my-vm.example.com: - True + 2018.3.4 # # # salt-cloud -d my-vm.example.com @@ -334,7 +334,7 @@ it can be verified with Salt: .. code-block:: bash - # salt 'myserver.example.com' test.ping + # salt 'myserver.example.com' test.version Dedicated Host ~~~~~~~~~~~~~~ diff --git a/doc/topics/cloud/vagrant.rst b/doc/topics/cloud/vagrant.rst index e1905396288e..790c7140ae5f 100644 --- a/doc/topics/cloud/vagrant.rst +++ b/doc/topics/cloud/vagrant.rst @@ -119,7 +119,7 @@ to it can be verified with Salt: .. code-block:: bash - salt my-id test.ping + salt my-id test.version .. _host provisioning example: diff --git a/doc/topics/cloud/vexxhost.rst b/doc/topics/cloud/vexxhost.rst index 98054e10641e..f672ead61a6e 100644 --- a/doc/topics/cloud/vexxhost.rst +++ b/doc/topics/cloud/vexxhost.rst @@ -111,7 +111,7 @@ the following command: .. code-block:: bash - # salt vh_instance1 test.ping + # salt vh_instance1 test.version You can now continue to provision new instances and they will all automatically be set up as minions of the master you've defined in the configuration file. diff --git a/doc/topics/cloud/xen.rst b/doc/topics/cloud/xen.rst index 85f464b95e26..dd87e19169ff 100644 --- a/doc/topics/cloud/xen.rst +++ b/doc/topics/cloud/xen.rst @@ -160,7 +160,7 @@ it can be verified with Salt: .. code-block:: bash - salt xenvm02 test.ping + salt xenvm02 test.version Listing Sizes diff --git a/doc/topics/development/architecture.rst b/doc/topics/development/architecture.rst index 85f7695e22b2..1c717092f830 100644 --- a/doc/topics/development/architecture.rst +++ b/doc/topics/development/architecture.rst @@ -109,7 +109,7 @@ against the command target. The typical lifecycle of a salt job from the perspective of the master might be as follows: -1) A command is issued on the CLI. For example, 'salt my_minion test.ping'. +1) A command is issued on the CLI. For example, 'salt my_minion test.version'. 2) The 'salt' command uses LocalClient to generate a request to the salt master by connecting to the ReqServer on TCP:4506 and issuing the job. diff --git a/doc/topics/development/hacking.rst b/doc/topics/development/hacking.rst index 3631134c9c54..0037cffe878a 100644 --- a/doc/topics/development/hacking.rst +++ b/doc/topics/development/hacking.rst @@ -160,7 +160,7 @@ installation is working: salt-minion -c ./etc/salt -d salt-key -c ./etc/salt -L salt-key -c ./etc/salt -A - salt -c ./etc/salt '*' test.ping + salt -c ./etc/salt '*' test.version Running the master and minion in debug mode can be helpful when developing. To do this, add ``-l debug`` to the calls to ``salt-master`` and ``salt-minion``. diff --git a/doc/topics/eauth/access_control.rst b/doc/topics/eauth/access_control.rst index 6c0373b707c8..76f8149a2630 100644 --- a/doc/topics/eauth/access_control.rst +++ b/doc/topics/eauth/access_control.rst @@ -67,7 +67,7 @@ other minions based on standard targets (all matchers are supported except the c external_auth: pam: dave: - - test.ping + - test.version - mongo\*: - network.* - log\*: @@ -78,7 +78,7 @@ other minions based on standard targets (all matchers are supported except the c steve: - .* -The above allows for all minions to be hit by test.ping by dave, and adds a +The above allows for all minions to be hit by test.version by dave, and adds a few functions that dave can execute on other minions. It also allows steve unrestricted access to salt commands. diff --git a/doc/topics/eauth/index.rst b/doc/topics/eauth/index.rst index 39249ba7b353..623656f4f70e 100644 --- a/doc/topics/eauth/index.rst +++ b/doc/topics/eauth/index.rst @@ -180,7 +180,7 @@ any user on the same system as the master with the ``-a`` option: .. code-block:: bash - $ salt -a pam web\* test.ping + $ salt -a pam web\* test.version The system will ask the user for the credentials required by the authentication system and then publish the command. @@ -198,7 +198,7 @@ adding a ``-T`` option when authenticating: .. code-block:: bash - $ salt -T -a pam web\* test.ping + $ salt -T -a pam web\* test.version Now a token will be created that has an expiration of 12 hours (by default). This token is stored in a file named ``salt_token`` in the active user's home diff --git a/doc/topics/event/master_events.rst b/doc/topics/event/master_events.rst index 1c11928224e9..420021faa486 100644 --- a/doc/topics/event/master_events.rst +++ b/doc/topics/event/master_events.rst @@ -69,7 +69,7 @@ Job events ``G@os_family:RedHat``, etc. :var tgt_type: The type of targeting used: ``glob``, ``grain``, ``compound``, etc. - :var fun: The function to run on minions: ``test.ping``, + :var fun: The function to run on minions: ``test.version``, ``network.interfaces``, etc. :var arg: A list of arguments to pass to the function that will be called. @@ -85,7 +85,7 @@ Job events :var id: The minion ID. :var jid: The job ID. :var retcode: The return code for the job. - :var fun: The function the minion ran. E.g., ``test.ping``. + :var fun: The function the minion ran. E.g., ``test.version``. :var return: The data returned from the execution module. .. salt:event:: salt/job//prog// diff --git a/doc/topics/installation/index.rst b/doc/topics/installation/index.rst index cbcbb5b93433..3261c184f790 100644 --- a/doc/topics/installation/index.rst +++ b/doc/topics/installation/index.rst @@ -25,12 +25,12 @@ The general installation process is as follows: 4. Accept the Salt :ref:`minion keys ` after the Salt minion connects. -After this, you should be able to run a simple command and receive returns from +After this, you should be able to run a simple command and receive salt version returns from all connected Salt minions. .. code-block:: bash - salt '*' test.ping + salt '*' test.version Quick Install ------------- diff --git a/doc/topics/installation/windows.rst b/doc/topics/installation/windows.rst index f100fdff9d38..fef6d7240d89 100644 --- a/doc/topics/installation/windows.rst +++ b/doc/topics/installation/windows.rst @@ -516,7 +516,7 @@ Testing the Salt minion .. code-block:: bash - sudo salt '*' test.ping + sudo salt '*' test.version You should get the following response: ``{'your minion hostname': True}`` diff --git a/doc/topics/proxyminion/demo.rst b/doc/topics/proxyminion/demo.rst index e72cc28e5603..87c585e484b2 100644 --- a/doc/topics/proxyminion/demo.rst +++ b/doc/topics/proxyminion/demo.rst @@ -98,7 +98,7 @@ the 'url' key above should say ``url: http://127.0.0.1:8000`` .. code-block:: bash - salt p8000 test.ping + salt p8000 test.version 8. The REST service implements a degenerately simple pkg and service provider as well as a small set of grains. To "install" a package, use a standard diff --git a/doc/topics/ssh/index.rst b/doc/topics/ssh/index.rst index 95a2bdecdbfa..f931f4e149db 100644 --- a/doc/topics/ssh/index.rst +++ b/doc/topics/ssh/index.rst @@ -127,7 +127,7 @@ command: .. code-block:: bash - salt-ssh '*' test.ping + salt-ssh '*' test.version Commands with ``salt-ssh`` follow the same syntax as the ``salt`` command. @@ -217,8 +217,8 @@ YAML contents: ssh_wipe: True Instead of having to call -``salt-ssh --config-dir=path/to/config/dir --max-procs=30 --wipe \* test.ping`` you -can call ``salt-ssh \* test.ping``. +``salt-ssh --config-dir=path/to/config/dir --max-procs=30 --wipe \* test.version`` you +can call ``salt-ssh \* test.version``. Boolean-style options should be specified in their YAML representation. diff --git a/doc/topics/targeting/batch.rst b/doc/topics/targeting/batch.rst index f8e0aae7936d..41a6f2e26a1e 100644 --- a/doc/topics/targeting/batch.rst +++ b/doc/topics/targeting/batch.rst @@ -9,11 +9,11 @@ supported. .. code-block:: bash - salt '*' -b 10 test.ping + salt '*' -b 10 test.version salt -G 'os:RedHat' --batch-size 25% apache.signal restart -This will only run test.ping on 10 of the targeted minions at a time and then +This will only run test.version on 10 of the targeted minions at a time and then restart apache on 25% of the minions matching ``os:RedHat`` at a time and work through them all until the task is complete. This makes jobs like rolling web server restarts behind a load balancer or doing maintenance on BSD firewalls diff --git a/doc/topics/targeting/compound.rst b/doc/topics/targeting/compound.rst index 59fa619b4c14..cf91b578bdf8 100644 --- a/doc/topics/targeting/compound.rst +++ b/doc/topics/targeting/compound.rst @@ -31,7 +31,7 @@ matches the :mod:`regular expression ` ``web-dc1-srv.*``: .. code-block:: bash - salt -C 'webserv* and G@os:Debian or E@web-dc1-srv.*' test.ping + salt -C 'webserv* and G@os:Debian or E@web-dc1-srv.*' test.version That same example expressed in a :term:`top file` looks like the following: @@ -48,20 +48,20 @@ Excluding a minion based on its ID is also possible: .. code-block:: bash - salt -C 'not web-dc1-srv' test.ping + salt -C 'not web-dc1-srv' test.version Versions prior to 2015.8.0 a leading ``not`` was not supported in compound matches. Instead, something like the following was required: .. code-block:: bash - salt -C '* and not G@kernel:Darwin' test.ping + salt -C '* and not G@kernel:Darwin' test.version Excluding a minion based on its ID was also possible: .. code-block:: bash - salt -C '* and not web-dc1-srv' test.ping + salt -C '* and not web-dc1-srv' test.version Precedence Matching ------------------- @@ -70,7 +70,7 @@ Matchers can be grouped together with parentheses to explicitly declare preceden .. code-block:: bash - salt -C '( ms-1 or G@id:ms-3 ) and G@id:ms-3' test.ping + salt -C '( ms-1 or G@id:ms-3 ) and G@id:ms-3' test.version .. note:: diff --git a/doc/topics/targeting/globbing.rst b/doc/topics/targeting/globbing.rst index 5ced1962f965..fd59f0bfcf97 100644 --- a/doc/topics/targeting/globbing.rst +++ b/doc/topics/targeting/globbing.rst @@ -31,39 +31,39 @@ Match all minions: .. code-block:: bash - salt '*' test.ping + salt '*' test.version Match all minions in the example.net domain or any of the example domains: .. code-block:: bash - salt '*.example.net' test.ping - salt '*.example.*' test.ping + salt '*.example.net' test.version + salt '*.example.*' test.version Match all the ``webN`` minions in the example.net domain (``web1.example.net``, ``web2.example.net`` … ``webN.example.net``): .. code-block:: bash - salt 'web?.example.net' test.ping + salt 'web?.example.net' test.version Match the ``web1`` through ``web5`` minions: .. code-block:: bash - salt 'web[1-5]' test.ping + salt 'web[1-5]' test.version Match the ``web1`` and ``web3`` minions: .. code-block:: bash - salt 'web[1,3]' test.ping + salt 'web[1,3]' test.version Match the ``web-x``, ``web-y``, and ``web-z`` minions: .. code-block:: bash - salt 'web-[x-z]' test.ping + salt 'web-[x-z]' test.version .. note:: @@ -81,7 +81,7 @@ Match both ``web1-prod`` and ``web1-devel`` minions: .. code-block:: bash - salt -E 'web1-(prod|devel)' test.ping + salt -E 'web1-(prod|devel)' test.version When using regular expressions in a State's :term:`top file`, you must specify the matcher as the first option. The following example executes the contents of @@ -102,4 +102,4 @@ At the most basic level, you can specify a flat list of minion IDs: .. code-block:: bash - salt -L 'web1,web2,web3' test.ping \ No newline at end of file + salt -L 'web1,web2,web3' test.version diff --git a/doc/topics/targeting/grains.rst b/doc/topics/targeting/grains.rst index 6f081f9c4835..c38adabb3a13 100644 --- a/doc/topics/targeting/grains.rst +++ b/doc/topics/targeting/grains.rst @@ -10,7 +10,7 @@ For example, the following matches all CentOS minions: .. code-block:: bash - salt -G 'os:CentOS' test.ping + salt -G 'os:CentOS' test.version Match all minions with 64-bit CPUs, and return number of CPU cores for each matching minion: diff --git a/doc/topics/targeting/index.rst b/doc/topics/targeting/index.rst index 4c3ff964a38f..ffdf47b0bcce 100644 --- a/doc/topics/targeting/index.rst +++ b/doc/topics/targeting/index.rst @@ -37,7 +37,7 @@ the target is the grain key followed by a glob expression: "os:Arch*". .. code-block:: bash - salt -G 'os:Fedora' test.ping + salt -G 'os:Fedora' test.version Will return True from all of the minions running Fedora. @@ -62,7 +62,7 @@ This is well defined with an example: .. code-block:: bash - salt -C 'G@os:Debian and webser* or E@db.*' test.ping + salt -C 'G@os:Debian and webser* or E@db.*' test.version In this example any minion who's id starts with ``webser`` and is running Debian, or any minion who's id starts with db will be matched. diff --git a/doc/topics/targeting/ipcidr.rst b/doc/topics/targeting/ipcidr.rst index 40247bc246d6..2adf0f6f773d 100644 --- a/doc/topics/targeting/ipcidr.rst +++ b/doc/topics/targeting/ipcidr.rst @@ -9,14 +9,14 @@ notation). .. code-block:: bash - salt -S 192.168.40.20 test.ping - salt -S 2001:db8::/64 test.ping + salt -S 192.168.40.20 test.version + salt -S 2001:db8::/64 test.version Ipcidr matching can also be used in compound matches .. code-block:: bash - salt -C 'S@10.0.0.0/24 and G@os:Debian' test.ping + salt -C 'S@10.0.0.0/24 and G@os:Debian' test.version It is also possible to use in both pillar and state-matching diff --git a/doc/topics/targeting/nodegroups.rst b/doc/topics/targeting/nodegroups.rst index 6043d959af19..a4cfacca55e2 100644 --- a/doc/topics/targeting/nodegroups.rst +++ b/doc/topics/targeting/nodegroups.rst @@ -57,7 +57,7 @@ To match a nodegroup on the CLI, use the ``-N`` command-line option: .. code-block:: bash - salt -N group1 test.ping + salt -N group1 test.version .. note:: diff --git a/doc/topics/targeting/pillar.rst b/doc/topics/targeting/pillar.rst index a86b0bd8f2e4..ab79cf535a08 100644 --- a/doc/topics/targeting/pillar.rst +++ b/doc/topics/targeting/pillar.rst @@ -21,7 +21,7 @@ Example: .. code-block:: bash - salt -I 'somekey:specialvalue' test.ping + salt -I 'somekey:specialvalue' test.version Like with :ref:`Grains `, it is possible to use globbing as well as match nested values in Pillar, by adding colons for each level that @@ -31,4 +31,4 @@ is being traversed. The below example would match minions with a pillar named .. code-block:: bash - salt -I 'foo:bar:baz*' test.ping + salt -I 'foo:bar:baz*' test.version diff --git a/doc/topics/targeting/range.rst b/doc/topics/targeting/range.rst index e5bd3f9dab91..b3780c60546d 100644 --- a/doc/topics/targeting/range.rst +++ b/doc/topics/targeting/range.rst @@ -72,11 +72,11 @@ One might target host1 through host100 in the test.com domain with Salt as follo .. code-block:: bash - salt --range %test:CLUSTER test.ping + salt --range %test:CLUSTER test.version The following salt command would target three hosts: ``frontend``, ``backend``, and ``mysql``: .. code-block:: bash - salt --range %test:APPS test.ping + salt --range %test:APPS test.version diff --git a/doc/topics/thorium/index.rst b/doc/topics/thorium/index.rst index f37d3edbf865..0eb602d7a09b 100644 --- a/doc/topics/thorium/index.rst +++ b/doc/topics/thorium/index.rst @@ -285,12 +285,12 @@ event bus, and returns ``True`` if that event's tag matches. For example: run_remote_ex: local.cmd: - tgt: '*' - - func: test.ping + - func: test.version - require: - check: salt/foo/*/bar This formula will look for an event whose tag is ``salt/foo//bar`` and -if it comes in, issue a ``test.ping`` to all minions. +if it comes in, issue a ``test.version`` to all minions. Register Persistence diff --git a/doc/topics/topology/syndic.rst b/doc/topics/topology/syndic.rst index da598343f11c..f51cdf1fe61e 100644 --- a/doc/topics/topology/syndic.rst +++ b/doc/topics/topology/syndic.rst @@ -131,15 +131,15 @@ On the Master node: Unaccepted Keys: Rejected Keys: - # salt '*' test.ping + # salt '*' test.version minion_1: - True + 2018.3.4 minion_2: - True + 2018.3.4 minion_4: - True + 2018.3.4 minion_3: - True + 2018.3.4 Topology ======== diff --git a/doc/topics/tutorials/cloud_controller.rst b/doc/topics/tutorials/cloud_controller.rst index 74dfe813195b..6bbdd81764bd 100644 --- a/doc/topics/tutorials/cloud_controller.rst +++ b/doc/topics/tutorials/cloud_controller.rst @@ -277,7 +277,7 @@ This command will return data about all of the hypervisors and respective virtual machines. Now that the new VM is booted it should have contacted the Salt Master, a -``test.ping`` will reveal if the new VM is running. +``test.version`` will reveal if the new VM is running. QEMU copy on write support diff --git a/doc/topics/tutorials/docker_sls.rst b/doc/topics/tutorials/docker_sls.rst index c7e3762f10bc..0e075cb8240a 100644 --- a/doc/topics/tutorials/docker_sls.rst +++ b/doc/topics/tutorials/docker_sls.rst @@ -81,7 +81,7 @@ simple `salt-call` command: .. code-block:: bash - salt-call --local dockerng.call test test.ping + salt-call --local dockerng.call test test.version salt-call --local dockerng.call test network.interfaces salt-call --local dockerng.call test disk.usage salt-call --local dockerng.call test pkg.list_pkgs diff --git a/doc/topics/tutorials/esxi_proxy_minion.rst b/doc/topics/tutorials/esxi_proxy_minion.rst index bce78b5f41db..3cbe2d847e8d 100644 --- a/doc/topics/tutorials/esxi_proxy_minion.rst +++ b/doc/topics/tutorials/esxi_proxy_minion.rst @@ -227,9 +227,10 @@ This allows you to use any number of potential fallback passwords. This scenario is especially true, and even slower, when the proxy minion first starts. If the correct password is not the first password - on the list, it may take up to a minute for ``test.ping`` to respond - with a ``True`` result. Once the initial authorization is complete, the - responses for commands will be a little faster. + on the list, it may take up to a minute for ``test.version`` to respond + with salt's version installed (Example: ``2018.3.4``. Once the initial + authorization is complete, the responses for commands will be a little + faster. To avoid these longer waiting periods, SaltStack recommends moving the correct password to the top of the list and restarting the proxy minion @@ -366,7 +367,7 @@ proxy processes! .. code-block:: bash - # salt 'esxi-*' test.ping + # salt 'esxi-*' test.version esxi-1: True esxi-3: @@ -377,7 +378,7 @@ Executing Commands ================== Now that you've configured your Proxy Minions and have them responding successfully -to a ``test.ping``, we can start executing commands against the ESXi hosts via Salt. +to a ``test.version``, we can start executing commands against the ESXi hosts via Salt. It's important to understand how this particular proxy works, and there are a couple of important pieces to be aware of in order to start running remote execution and diff --git a/doc/topics/tutorials/firewall.rst b/doc/topics/tutorials/firewall.rst index 31fb3c84189b..eaa476c51ce5 100644 --- a/doc/topics/tutorials/firewall.rst +++ b/doc/topics/tutorials/firewall.rst @@ -231,5 +231,5 @@ be set on the Master: needs to communicate with the listening network socket of ``salt-master`` on the *loopback* interface. Without this you will see no outgoing Salt traffic from the master, even for a simple - ``salt '*' test.ping``, because the ``salt`` client never reached + ``salt '*' test.version``, because the ``salt`` client never reached the ``salt-master`` to tell it to carry out the execution. diff --git a/doc/topics/tutorials/modules.rst b/doc/topics/tutorials/modules.rst index e1dbde7d764f..4d6e03b3c978 100644 --- a/doc/topics/tutorials/modules.rst +++ b/doc/topics/tutorials/modules.rst @@ -27,14 +27,14 @@ following function. The default filter is a glob on the minion id. For example: .. code-block:: bash - salt '*' test.ping - salt '*.example.org' test.ping + salt '*' test.version + salt '*.example.org' test.version Targets can be based on minion system information using the Grains system: .. code-block:: bash - salt -G 'os:Ubuntu' test.ping + salt -G 'os:Ubuntu' test.version .. seealso:: :ref:`Grains system ` @@ -42,19 +42,19 @@ Targets can be filtered by regular expression: .. code-block:: bash - salt -E 'virtmach[0-9]' test.ping + salt -E 'virtmach[0-9]' test.version Targets can be explicitly specified in a list: .. code-block:: bash - salt -L 'foo,bar,baz,quo' test.ping + salt -L 'foo,bar,baz,quo' test.version Or Multiple target types can be combined in one command: .. code-block:: bash - salt -C 'G@os:Ubuntu and webser* or E@database.*' test.ping + salt -C 'G@os:Ubuntu and webser* or E@database.*' test.version function @@ -74,7 +74,7 @@ Show all currently available minions: .. code-block:: bash - salt '*' test.ping + salt '*' test.version Run an arbitrary shell command: @@ -99,4 +99,4 @@ Optional, keyword arguments are also supported: salt '*' pip.install salt timeout=5 upgrade=True -They are always in the form of ``kwarg=argument``. \ No newline at end of file +They are always in the form of ``kwarg=argument``. diff --git a/doc/topics/tutorials/multimaster_pki.rst b/doc/topics/tutorials/multimaster_pki.rst index 0dc9dbb5b94f..b647c88789b2 100644 --- a/doc/topics/tutorials/multimaster_pki.rst +++ b/doc/topics/tutorials/multimaster_pki.rst @@ -238,7 +238,7 @@ The minion will connect to the first master from its master list [DEBUG ] Decrypting the current master AES key -A test.ping on the master the minion is currently connected to should be run to +A test.version on the master the minion is currently connected to should be run to test connectivity. If successful, that master should be turned off. A firewall-rule denying the diff --git a/doc/topics/tutorials/walkthrough.rst b/doc/topics/tutorials/walkthrough.rst index 9f691c5cc08b..62be2f04f071 100644 --- a/doc/topics/tutorials/walkthrough.rst +++ b/doc/topics/tutorials/walkthrough.rst @@ -238,16 +238,16 @@ start with looks like this: .. code-block:: bash - salt '*' test.ping + salt '*' test.version The ``*`` is the target, which specifies all minions. -``test.ping`` tells the minion to run the :py:func:`test.ping -` function. +``test.version`` tells the minion to run the :py:func:`test.version +` function. -In the case of ``test.ping``, ``test`` refers to a :ref:`execution module -`. ``ping`` refers to the :py:func:`ping -` function contained in the aforementioned ``test`` +In the case of ``test.version``, ``test`` refers to a :ref:`execution module +`. ``version`` refers to the :py:func:`version +` function contained in the aforementioned ``test`` module. .. note:: @@ -257,12 +257,10 @@ module. services. The result of running this command will be the master instructing all of the -minions to execute :py:func:`test.ping ` in parallel -and return the result. - -This is not an actual ICMP ping, but rather a simple function which returns ``True``. -Using :py:func:`test.ping ` is a good way of confirming that a minion is -connected. +minions to execute :py:func:`test.version ` in parallel +and return the result. Using :py:func:`test.version ` +is a good way of confirming that a minion is connected, and reaffirm to the user +the salt version(s) they have installed on the minions. .. note:: @@ -271,7 +269,7 @@ connected. well by using the :conf_minion:`id` parameter. Of course, there are hundreds of other modules that can be called just as -``test.ping`` can. For example, the following would return disk usage on all +``test.version`` can. For example, the following would return disk usage on all targeted minions: .. code-block:: bash @@ -591,7 +589,7 @@ This formula can be referenced via the following command: .. note:: :py:func:`state.apply ` is just another remote - execution function, just like :py:func:`test.ping ` + execution function, just like :py:func:`test.version ` or :py:func:`disk.usage `. It simply takes the name of an SLS file as an argument. diff --git a/doc/topics/tutorials/walkthrough_macosx.rst b/doc/topics/tutorials/walkthrough_macosx.rst index 00d6c31febda..f1e5de026a61 100644 --- a/doc/topics/tutorials/walkthrough_macosx.rst +++ b/doc/topics/tutorials/walkthrough_macosx.rst @@ -412,9 +412,9 @@ following: .. code-block:: bash - sudo salt '*' test.ping + sudo salt '*' test.version -You should see your minion answering the ping. It's now time to do some +You should see your minion answering with its salt version. It's now time to do some configuration. From e8c8dba161ffee6508571b55a994dcff73fbbece Mon Sep 17 00:00:00 2001 From: Alan Cugler Date: Mon, 25 Mar 2019 16:10:29 -0500 Subject: [PATCH 041/340] Added in an explaination of the --zone flag approved in #52251 --- doc/topics/tutorials/firewall.rst | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/doc/topics/tutorials/firewall.rst b/doc/topics/tutorials/firewall.rst index 31fb3c84189b..b704964488f5 100644 --- a/doc/topics/tutorials/firewall.rst +++ b/doc/topics/tutorials/firewall.rst @@ -28,8 +28,11 @@ FirewallD use the command line client ``firewall-cmd``. firewall-cmd --permanent --zone= --add-port=4505-4506/tcp -Please choose the desired zone according to your setup. Don't forget to reload -after you made your changes. +A network zone defines the security level of trust for the the network. +The user should choose an appropriate zone value for their setup. +Possible values include: drop, block, public, external, dmz, work, home, internal, trusted. + +Don't forget to reload after you made your changes. .. code-block:: bash From c04ba6e35efaf3b4f9646e24b2716aacc2f1fe17 Mon Sep 17 00:00:00 2001 From: Alan Cugler Date: Tue, 26 Mar 2019 09:58:26 -0500 Subject: [PATCH 042/340] Changed a text artifact from the ping function to supporting text for the version function. --- doc/ref/configuration/index.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/ref/configuration/index.rst b/doc/ref/configuration/index.rst index b54d7053d53f..b5098a4978ba 100644 --- a/doc/ref/configuration/index.rst +++ b/doc/ref/configuration/index.rst @@ -222,7 +222,8 @@ similar way: delta: 2018.3.4 -Each of the Minions should send a ``True`` response as shown above. +Each of the Minions should send a ``2018.3.4`` response as shown above, +or any other salt version installed. What's Next? ============ From 32aafab79a2fe6bbf15d8830d480f3a8546900eb Mon Sep 17 00:00:00 2001 From: Wayne Werner Date: Fri, 22 Mar 2019 14:19:26 -0500 Subject: [PATCH 043/340] Ignore vscode and fix swap ignore --- .gitignore | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index b3d444d2fbaf..644e6fc26175 100644 --- a/.gitignore +++ b/.gitignore @@ -2,7 +2,7 @@ /src *.py[co] pkg/arch/*.tar.xz -*.sw[pon] +*.sw[a-p] doc/_build dist MANIFEST @@ -84,6 +84,9 @@ tests/unit/templates/roots # Pycharm .idea +# VS Code +.vscode + # Ignore the log directory created by tests /logs tests/integration/cloud/providers/logs From 604b671ef92de2c8a134ad3933e23cef1f113e49 Mon Sep 17 00:00:00 2001 From: Wayne Werner Date: Fri, 22 Mar 2019 14:52:19 -0500 Subject: [PATCH 044/340] Return binary data from gpg renderer If we receive binary data, we should respond with binary data. --- salt/renderers/gpg.py | 36 ++++++++++++++------------ tests/unit/renderers/test_gpg.py | 44 ++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+), 16 deletions(-) diff --git a/salt/renderers/gpg.py b/salt/renderers/gpg.py index ff32ab7f2b4e..8ba5cfed1e86 100644 --- a/salt/renderers/gpg.py +++ b/salt/renderers/gpg.py @@ -228,7 +228,11 @@ log = logging.getLogger(__name__) GPG_CIPHERTEXT = re.compile( - r'-----BEGIN PGP MESSAGE-----.*?-----END PGP MESSAGE-----', re.DOTALL) + salt.utils.stringutils.to_bytes( + r'-----BEGIN PGP MESSAGE-----.*?-----END PGP MESSAGE-----' + ), + re.DOTALL, +) def _get_gpg_exec(): @@ -281,37 +285,37 @@ def _decrypt_ciphertext(cipher): proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=False) decrypted_data, decrypt_error = proc.communicate(input=cipher) if not decrypted_data: - try: - cipher = salt.utils.stringutils.to_unicode(cipher) - except UnicodeDecodeError: - # decrypted data contains undecodable binary data - pass log.warning( - 'Could not decrypt cipher %s, received: %s', + 'Could not decrypt cipher %r, received: %r', cipher, decrypt_error ) return cipher else: - try: - decrypted_data = salt.utils.stringutils.to_unicode(decrypted_data) - except UnicodeDecodeError: - # decrypted data contains undecodable binary data - pass return decrypted_data def _decrypt_ciphertexts(cipher, translate_newlines=False): + cipher = salt.utils.stringutils.to_bytes(cipher) if translate_newlines: - cipher = cipher.replace(r'\n', '\n') - ret, num = GPG_CIPHERTEXT.subn(lambda m: _decrypt_ciphertext(m.group()), cipher) + cipher = cipher.replace(rb'\n', b'\n') + def replace(match): + result = salt.utils.stringutils.to_bytes(_decrypt_ciphertext(match.group())) + return result + ret, num = GPG_CIPHERTEXT.subn(replace, salt.utils.stringutils.to_bytes(cipher)) if num > 0: # Remove trailing newlines. Without if crypted value initially specified as a YAML multiline # it will conain unexpected trailing newline. - return ret.rstrip('\n') + ret = ret.rstrip(b'\n') else: - return cipher + ret = cipher + try: + ret = salt.utils.stringutils.to_unicode(ret) + except UnicodeDecodeError: + # decrypted data contains some sort of binary data - not our problem + pass + return ret def _decrypt_object(obj, translate_newlines=False): ''' diff --git a/tests/unit/renderers/test_gpg.py b/tests/unit/renderers/test_gpg.py index 981568660ccd..51eaeb527d4e 100644 --- a/tests/unit/renderers/test_gpg.py +++ b/tests/unit/renderers/test_gpg.py @@ -3,6 +3,8 @@ # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals +from textwrap import dedent + # Import Salt Testing libs from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import skipIf, TestCase @@ -100,3 +102,45 @@ def test_render(self): with patch('salt.renderers.gpg._get_key_dir', MagicMock(return_value=key_dir)): with patch('salt.renderers.gpg._decrypt_object', MagicMock(return_value=secret)): self.assertEqual(gpg.render(crypted), secret) + + def test_multi_render(self): + key_dir = '/etc/salt/gpgkeys' + secret = 'Use more salt.' + expected = '\n'.join([secret]*3) + crypted = dedent('''\ + -----BEGIN PGP MESSAGE----- + !@#$%^&*()_+ + -----END PGP MESSAGE----- + -----BEGIN PGP MESSAGE----- + !@#$%^&*()_+ + -----END PGP MESSAGE----- + -----BEGIN PGP MESSAGE----- + !@#$%^&*()_+ + -----END PGP MESSAGE----- + ''') + + with patch('salt.renderers.gpg._get_gpg_exec', MagicMock(return_value=True)): + with patch('salt.renderers.gpg._get_key_dir', MagicMock(return_value=key_dir)): + with patch('salt.renderers.gpg._decrypt_ciphertext', MagicMock(return_value=secret)): + self.assertEqual(gpg.render(crypted), expected) + + def test_render_with_binary_data_should_return_binary_data(self): + key_dir = '/etc/salt/gpgkeys' + secret = b'Use\x8b more\x8b salt.' + expected = b'\n'.join([secret]*3) + crypted = dedent('''\ + -----BEGIN PGP MESSAGE----- + !@#$%^&*()_+ + -----END PGP MESSAGE----- + -----BEGIN PGP MESSAGE----- + !@#$%^&*()_+ + -----END PGP MESSAGE----- + -----BEGIN PGP MESSAGE----- + !@#$%^&*()_+ + -----END PGP MESSAGE----- + ''') + + with patch('salt.renderers.gpg._get_gpg_exec', MagicMock(return_value=True)): + with patch('salt.renderers.gpg._get_key_dir', MagicMock(return_value=key_dir)): + with patch('salt.renderers.gpg._decrypt_ciphertext', MagicMock(return_value=secret)): + self.assertEqual(gpg.render(crypted), expected) \ No newline at end of file From c4b385b92a4f248c992a064100ac86882d13bfaf Mon Sep 17 00:00:00 2001 From: Wayne Werner Date: Fri, 22 Mar 2019 15:31:01 -0500 Subject: [PATCH 045/340] Allow binary pillar data There's no real reason that pillars can't/shouldn't be able to contain binary data. This gives us the ability to say that it's OK. --- salt/utils/data.py | 10 +++++++--- tests/unit/utils/test_data.py | 5 +++++ 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/salt/utils/data.py b/salt/utils/data.py index 4ec7b3126246..eb6a817bc8d6 100644 --- a/salt/utils/data.py +++ b/salt/utils/data.py @@ -214,10 +214,14 @@ def decode(data, encoding=None, errors='strict', keep=False, def decode_dict(data, encoding=None, errors='strict', keep=False, normalize=False, preserve_dict_class=False, - preserve_tuples=False, to_str=False): + preserve_tuples=False, to_str=False, keep_pillar=False): ''' Decode all string values to Unicode. Optionally use to_str=True to ensure strings are str types and not unicode on Python 2. + + keep_pillar + If ``True``, pillar data that cannot be converted to unicode should be + kept as binary data (bytes on Python 3, str on Python 2). ''' _decode_func = salt.utils.stringutils.to_unicode \ if not to_str \ @@ -240,7 +244,7 @@ def decode_dict(data, encoding=None, errors='strict', keep=False, # means we are going to leave the value as-is. pass except UnicodeDecodeError: - if not keep: + if (key == 'pillar' and not keep_pillar) or not keep: raise if isinstance(value, list): @@ -264,7 +268,7 @@ def decode_dict(data, encoding=None, errors='strict', keep=False, # means we are going to leave the value as-is. pass except UnicodeDecodeError: - if not keep: + if (key == 'pillar' and not keep_pillar) and not keep: raise rv[key] = value diff --git a/tests/unit/utils/test_data.py b/tests/unit/utils/test_data.py index 030f22b202cc..1b3b39c5d5b2 100644 --- a/tests/unit/utils/test_data.py +++ b/tests/unit/utils/test_data.py @@ -598,3 +598,8 @@ def test_stringify(self): salt.utils.data.stringify(['one', 'two', str('three'), 4, 5]), # future lint: disable=blacklisted-function ['one', 'two', 'three', '4', '5'] ) + + def test_decode_dict_with_keep_pillar_and_binary_data_should_keep_binary_data(self): + data = {'pillar': b'\x8b'} + decoded_data = salt.utils.data.decode_dict(data, keep_pillar=True) + self.assertEqual(decoded_data, data) \ No newline at end of file From 2b8c78289190ab0f2081788c1c691667ea45e6b8 Mon Sep 17 00:00:00 2001 From: Wayne Werner Date: Tue, 26 Mar 2019 19:07:09 -0500 Subject: [PATCH 046/340] Test gpg render with replace newlines --- salt/renderers/gpg.py | 9 +++++---- tests/unit/renderers/test_gpg.py | 26 +++++++++++++++++++++++++- 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/salt/renderers/gpg.py b/salt/renderers/gpg.py index 8ba5cfed1e86..e80c6bb0eb01 100644 --- a/salt/renderers/gpg.py +++ b/salt/renderers/gpg.py @@ -296,13 +296,14 @@ def _decrypt_ciphertext(cipher): def _decrypt_ciphertexts(cipher, translate_newlines=False): - cipher = salt.utils.stringutils.to_bytes(cipher) + to_bytes = salt.utils.stringutils.to_bytes + cipher = to_bytes(cipher) if translate_newlines: - cipher = cipher.replace(rb'\n', b'\n') + cipher = cipher.replace(to_bytes(r'\n'), to_bytes('\n')) def replace(match): - result = salt.utils.stringutils.to_bytes(_decrypt_ciphertext(match.group())) + result = to_bytes(_decrypt_ciphertext(match.group())) return result - ret, num = GPG_CIPHERTEXT.subn(replace, salt.utils.stringutils.to_bytes(cipher)) + ret, num = GPG_CIPHERTEXT.subn(replace, to_bytes(cipher)) if num > 0: # Remove trailing newlines. Without if crypted value initially specified as a YAML multiline # it will conain unexpected trailing newline. diff --git a/tests/unit/renderers/test_gpg.py b/tests/unit/renderers/test_gpg.py index 51eaeb527d4e..2ab9604a6d2b 100644 --- a/tests/unit/renderers/test_gpg.py +++ b/tests/unit/renderers/test_gpg.py @@ -143,4 +143,28 @@ def test_render_with_binary_data_should_return_binary_data(self): with patch('salt.renderers.gpg._get_gpg_exec', MagicMock(return_value=True)): with patch('salt.renderers.gpg._get_key_dir', MagicMock(return_value=key_dir)): with patch('salt.renderers.gpg._decrypt_ciphertext', MagicMock(return_value=secret)): - self.assertEqual(gpg.render(crypted), expected) \ No newline at end of file + self.assertEqual(gpg.render(crypted), expected) + + def test_render_with_translate_newlines_should_translate_newlines(self): + key_dir = '/etc/salt/gpgkeys' + secret = b'Use\x8b more\x8b salt.' + expected = b'\n\n'.join([secret]*3) + crypted = dedent('''\ + -----BEGIN PGP MESSAGE----- + !@#$%^&*()_+ + -----END PGP MESSAGE-----\\n + -----BEGIN PGP MESSAGE----- + !@#$%^&*()_+ + -----END PGP MESSAGE-----\\n + -----BEGIN PGP MESSAGE----- + !@#$%^&*()_+ + -----END PGP MESSAGE----- + ''') + + with patch('salt.renderers.gpg._get_gpg_exec', MagicMock(return_value=True)): + with patch('salt.renderers.gpg._get_key_dir', MagicMock(return_value=key_dir)): + with patch('salt.renderers.gpg._decrypt_ciphertext', MagicMock(return_value=secret)): + self.assertEqual( + gpg.render(crypted, translate_newlines=True), + expected, + ) \ No newline at end of file From f2aebf9cb5be13aa23afcac3fe10e285928207de Mon Sep 17 00:00:00 2001 From: Wayne Werner Date: Tue, 26 Mar 2019 19:10:53 -0500 Subject: [PATCH 047/340] Assume file contents are binary Try to encode as unicode, but if not, just fall back to binary. That's probably what the data was in the first place. --- salt/states/file.py | 18 +++++++++++++----- tests/unit/states/test_file.py | 20 ++++++++++++++++++++ 2 files changed, 33 insertions(+), 5 deletions(-) diff --git a/salt/states/file.py b/salt/states/file.py index b7a202b7a130..c8d949770a11 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -2615,11 +2615,8 @@ def managed(name, 'to True to allow the managed file to be empty.' .format(contents_id) ) - if isinstance(use_contents, six.binary_type) and b'\0' in use_contents: - contents = use_contents - elif isinstance(use_contents, six.text_type) and str('\0') in use_contents: - contents = use_contents - else: + + try: validated_contents = _validate_str_list(use_contents) if not validated_contents: return _error( @@ -2634,6 +2631,17 @@ def managed(name, contents += line.rstrip('\n').rstrip('\r') + os.linesep if contents_newline and not contents.endswith(os.linesep): contents += os.linesep + except UnicodeDecodeError: + # Either something terrible happened, or we have binary data. + if template: + return _error( + ret, + 'Contents specified by contents/contents_pillar/' + 'contents_grains appears to be binary data, and' + ' as will not be able to be treated as a Jinja' + ' template.' + ) + contents = use_contents if template: contents = __salt__['file.apply_template_on_contents']( contents, diff --git a/tests/unit/states/test_file.py b/tests/unit/states/test_file.py index 1e6ab2efa666..76d0581b042a 100644 --- a/tests/unit/states/test_file.py +++ b/tests/unit/states/test_file.py @@ -604,6 +604,26 @@ def test_missing(self): # 'managed' function tests: 1 + def test_file_managed_should_fall_back_to_binary(self): + expected_contents = b'\x8b' + filename = '/tmp/blarg' + mock_manage = MagicMock(return_value={'fnord': 'fnords'}) + with patch('salt.states.file._load_accumulators', + MagicMock(return_value=([], []))): + with patch.dict(filestate.__salt__, + { + 'file.get_managed': MagicMock(return_value=['', '', '']), + 'file.source_list': MagicMock(return_value=['', '']), + 'file.manage_file': mock_manage, + 'pillar.get': MagicMock(return_value=expected_contents), + }): + ret = filestate.managed( + filename, + contents_pillar='fnord', + ) + actual_contents = mock_manage.call_args[0][14] + self.assertEqual(actual_contents, expected_contents) + def test_managed(self): ''' Test to manage a given file, this function allows for a file to be From fb010c0a6eee1ba63986236f3b800223e45cb183 Mon Sep 17 00:00:00 2001 From: Wayne Werner Date: Tue, 26 Mar 2019 19:58:29 -0500 Subject: [PATCH 048/340] Add binary pillar to the docs --- doc/topics/pillar/index.rst | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/doc/topics/pillar/index.rst b/doc/topics/pillar/index.rst index a071b15ddc86..db0dabd593cf 100644 --- a/doc/topics/pillar/index.rst +++ b/doc/topics/pillar/index.rst @@ -747,6 +747,34 @@ done: .. _`minion config file`: https://github.com/saltstack/salt/tree/develop/doc/ref/configuration/minion.rst .. _`master config template`: https://github.com/saltstack/salt/tree/develop/conf/master +Binary Data in the Pillar +========================= + +Salt has partial support for binary pillar data. + +.. note:: + + There are some situations (such as salt-ssh) where only text (ASCII or + Unicode) is allowed. + +The simplest way to embed binary data in your pillar is to make use of YAML's +built-in binary data type, which requires base64 encoded data. + +.. code-block:: yaml + + salt_pic: !!binary + iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAMAAAC67D+PAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAA + +Then you can use it as a ``contents_pillar`` in a state: + +.. code-block:: yaml + + /tmp/salt.png: + file.managed: + - contents_pillar: salt_pic + +It is also possible to add ASCII-armored encrypted data to pillars, as +mentioned in the Pillar Encryption section. Master Config in Pillar ======================= From 28c29459c8d93863d480f38d0d56c0904e7b77af Mon Sep 17 00:00:00 2001 From: Wayne Werner Date: Tue, 26 Mar 2019 20:09:22 -0500 Subject: [PATCH 049/340] keep_pillar not needed --- salt/utils/data.py | 10 +++------- tests/unit/utils/test_data.py | 5 ----- 2 files changed, 3 insertions(+), 12 deletions(-) diff --git a/salt/utils/data.py b/salt/utils/data.py index eb6a817bc8d6..4ec7b3126246 100644 --- a/salt/utils/data.py +++ b/salt/utils/data.py @@ -214,14 +214,10 @@ def decode(data, encoding=None, errors='strict', keep=False, def decode_dict(data, encoding=None, errors='strict', keep=False, normalize=False, preserve_dict_class=False, - preserve_tuples=False, to_str=False, keep_pillar=False): + preserve_tuples=False, to_str=False): ''' Decode all string values to Unicode. Optionally use to_str=True to ensure strings are str types and not unicode on Python 2. - - keep_pillar - If ``True``, pillar data that cannot be converted to unicode should be - kept as binary data (bytes on Python 3, str on Python 2). ''' _decode_func = salt.utils.stringutils.to_unicode \ if not to_str \ @@ -244,7 +240,7 @@ def decode_dict(data, encoding=None, errors='strict', keep=False, # means we are going to leave the value as-is. pass except UnicodeDecodeError: - if (key == 'pillar' and not keep_pillar) or not keep: + if not keep: raise if isinstance(value, list): @@ -268,7 +264,7 @@ def decode_dict(data, encoding=None, errors='strict', keep=False, # means we are going to leave the value as-is. pass except UnicodeDecodeError: - if (key == 'pillar' and not keep_pillar) and not keep: + if not keep: raise rv[key] = value diff --git a/tests/unit/utils/test_data.py b/tests/unit/utils/test_data.py index 1b3b39c5d5b2..030f22b202cc 100644 --- a/tests/unit/utils/test_data.py +++ b/tests/unit/utils/test_data.py @@ -598,8 +598,3 @@ def test_stringify(self): salt.utils.data.stringify(['one', 'two', str('three'), 4, 5]), # future lint: disable=blacklisted-function ['one', 'two', 'three', '4', '5'] ) - - def test_decode_dict_with_keep_pillar_and_binary_data_should_keep_binary_data(self): - data = {'pillar': b'\x8b'} - decoded_data = salt.utils.data.decode_dict(data, keep_pillar=True) - self.assertEqual(decoded_data, data) \ No newline at end of file From 8255901b33f0c782459223f36c81552a594ed8f4 Mon Sep 17 00:00:00 2001 From: Megan Wilhite Date: Wed, 27 Mar 2019 12:14:07 -0400 Subject: [PATCH 050/340] document `regex_replace` Jinja filter (#52326) Fixes #48952. --- doc/topics/jinja/index.rst | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/doc/topics/jinja/index.rst b/doc/topics/jinja/index.rst index 5f2b46d56a44..011cbaeac4eb 100644 --- a/doc/topics/jinja/index.rst +++ b/doc/topics/jinja/index.rst @@ -405,6 +405,29 @@ Returns: None +.. jinja_ref:: regex_replace + +``regex_replace`` +----------------- + +.. versionadded:: 2017.7.0 + +Searches for a pattern and replaces with a sequence of characters. + +Example: + +.. code-block:: jinja + + {% set my_text = 'yes, this is a TEST' %} + {{ my_text | regex_replace(' ([a-z])', '__\\1', ignorecase=True) }} + +Returns: + +.. code-block:: text + + yes,__this__is__a__TEST + + .. jinja_ref:: uuid ``uuid`` From 364ef065e8d4dd89942e764558378ee4243861dd Mon Sep 17 00:00:00 2001 From: Alan Cugler Date: Thu, 21 Feb 2019 13:52:28 -0600 Subject: [PATCH 051/340] Added mention of map file alternative for minion configuration options. Fixes: #51127 --- doc/topics/cloud/config.rst | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/doc/topics/cloud/config.rst b/doc/topics/cloud/config.rst index 1d124fb95e8f..f50593e8c88f 100644 --- a/doc/topics/cloud/config.rst +++ b/doc/topics/cloud/config.rst @@ -27,10 +27,12 @@ cloud is operating on. Minion Configuration ==================== -The default minion configuration is set up in this file. Minions created by -salt-cloud derive their configuration from this file. Almost all parameters -found in :ref:`Configuring the Salt Minion ` can -be used here. +The default minion configuration is set up in this file or alternatively used in +any ``.conf`` file listed in the ``/etc/salt/cloud.maps.d/`` +:ref:`map file ` directory. Minions created by salt-cloud +traditionally derive their configuration from this file. Almost all parameters +found in :ref:`Configuring the Salt Minion ` can be +used here. .. code-block:: yaml From 9ca0f72308a431c4d8120dfa6128413aed0d9f04 Mon Sep 17 00:00:00 2001 From: Alan Cugler Date: Wed, 27 Mar 2019 12:26:52 -0500 Subject: [PATCH 052/340] Changed where to mention the map files compatibility for minion configuration options. This is a result of @Ch3LL suggestion on the PR itself for this doc change. --- doc/topics/cloud/config.rst | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/doc/topics/cloud/config.rst b/doc/topics/cloud/config.rst index f50593e8c88f..ffe619049f7d 100644 --- a/doc/topics/cloud/config.rst +++ b/doc/topics/cloud/config.rst @@ -27,10 +27,8 @@ cloud is operating on. Minion Configuration ==================== -The default minion configuration is set up in this file or alternatively used in -any ``.conf`` file listed in the ``/etc/salt/cloud.maps.d/`` -:ref:`map file ` directory. Minions created by salt-cloud -traditionally derive their configuration from this file. Almost all parameters +The default minion configuration is set up in this file. Minions created by +salt-cloud derive their configuration from this file. Almost all parameters found in :ref:`Configuring the Salt Minion ` can be used here. @@ -46,7 +44,7 @@ and its listening port, if the port is not set to the default. Similar to most other settings, Minion configuration settings are inherited across configuration files. For example, the master setting might be contained in the main ``cloud`` configuration file as demonstrated above, but additional -settings can be placed in the provider or profile: +settings can be placed in the provider, profile or map configuration files: .. code-block:: yaml From 23733bde5ada770213fbb122d041fd8b02cdb28c Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Wed, 27 Mar 2019 23:33:10 +0300 Subject: [PATCH 053/340] Regression test for parallel IPCMessageSubscriber support --- tests/unit/transport/test_ipc.py | 85 ++++++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) diff --git a/tests/unit/transport/test_ipc.py b/tests/unit/transport/test_ipc.py index 942ac8fc6c65..3492be10649b 100644 --- a/tests/unit/transport/test_ipc.py +++ b/tests/unit/transport/test_ipc.py @@ -29,6 +29,9 @@ from tests.support.paths import TMP from tests.support.unit import skipIf +import pytest +import threading + log = logging.getLogger(__name__) @@ -154,3 +157,85 @@ def test_multistream_errors(self): self.channel.send({'stop': True}) self.wait() self.assertEqual(self.payloads[:-1], [None, None, 'foo', 'foo']) + + +@skipIf(salt.utils.platform.is_windows(), 'Windows does not support Posix IPC') +class IPCMessagePubSubCase(tornado.testing.AsyncTestCase): + ''' + Test all of the clear msg stuff + ''' + def setUp(self): + super(IPCMessagePubSubCase, self).setUp() + self.opts = {'ipc_write_buffer': 0} + self.socket_path = os.path.join(TMP, 'ipc_test.ipc') + self.pub_channel = self._get_pub_channel() + self.sub_channel = self._get_sub_channel() + + def _get_pub_channel(self): + pub_channel = salt.transport.ipc.IPCMessagePublisher( + self.opts, + self.socket_path, + ) + pub_channel.start() + return pub_channel + + def _get_sub_channel(self): + sub_channel = salt.transport.ipc.IPCMessageSubscriber( + socket_path=self.socket_path, + io_loop=self.io_loop, + ) + sub_channel.connect(callback=self.stop) + self.wait() + return sub_channel + + def tearDown(self): + super(IPCMessagePubSubCase, self).tearDown() + try: + self.pub_channel.close() + except socket.error as exc: + if exc.errno != errno.EBADF: + # If its not a bad file descriptor error, raise + raise + try: + self.sub_channel.close() + except socket.error as exc: + if exc.errno != errno.EBADF: + # If its not a bad file descriptor error, raise + raise + os.unlink(self.socket_path) + del self.pub_channel + del self.sub_channel + + def test_multi_client_reading(self): + # To be completely fair let's create 2 clients. + client1 = self.sub_channel + client2 = self._get_sub_channel() + call_cnt = [] + + # Create a watchdog to be safe from hanging in sync loops (what old code did) + evt = threading.Event() + + def close_server(): + if evt.wait(1): + return + client2.close() + self.stop() + + watchdog = threading.Thread(target=close_server) + watchdog.start() + + # Runs in ioloop thread so we're safe from race conditions here + def handler(raw): + call_cnt.append(raw) + if len(call_cnt) >= 2: + evt.set() + self.stop() + + # Now let both waiting data at once + client1.read_async(handler) + client2.read_async(handler) + self.pub_channel.publish('TEST') + self.wait() + self.assertEqual(len(call_cnt), 2) + self.assertEqual(call_cnt[0], 'TEST') + self.assertEqual(call_cnt[1], 'TEST') From 37aeba314330a5cefdf9ca1d5ce069bc790e692f Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Wed, 27 Mar 2019 23:39:00 +0300 Subject: [PATCH 054/340] Minor: Fix typo in docstring --- salt/transport/ipc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index ee3c5d7c43c3..30e340c0b8b8 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -589,7 +589,7 @@ class IPCMessageSubscriberService(IPCClient): of IPCMessageSubscriber instances feeding all of them with data. It closes automatically when there are no more subscribers. - To use this rever to IPCMessageSubscriber documentation. + To use this refer to IPCMessageSubscriber documentation. ''' def __singleton_init__(self, socket_path, io_loop=None): super(IPCMessageSubscriberService, self).__singleton_init__( From 87bb513d6361150c6501e834f01f2e61706a6b3e Mon Sep 17 00:00:00 2001 From: Shane Lee Date: Wed, 27 Mar 2019 14:58:57 -0600 Subject: [PATCH 055/340] Fix failing symlink test (#52145) --- tests/unit/modules/test_win_file.py | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/tests/unit/modules/test_win_file.py b/tests/unit/modules/test_win_file.py index 3da7b45a6f40..8c7938843c30 100644 --- a/tests/unit/modules/test_win_file.py +++ b/tests/unit/modules/test_win_file.py @@ -313,14 +313,21 @@ def test_issue_52002_check_file_remove_symlink(self): ''' Make sure that directories including symlinks or symlinks can be removed ''' - base = temp.dir(prefix='base') - target = os.path.join(base, 'child 1', 'target/') + base = temp.dir(prefix='base-') + target = os.path.join(base, 'child 1', 'target\\') symlink = os.path.join(base, 'child 2', 'link') - self.assertFalse(win_file.directory_exists(target)) - self.assertFalse(win_file.directory_exists(symlink)) - self.assertTrue(win_file.makedirs_(target)) - self.assertTrue(win_file.directory_exists(symlink)) - self.assertTrue(win_file.symlink(target, symlink)) - self.assertTrue(win_file.is_link(symlink)) - self.assertTrue(win_file.remove(base)) - self.assertFalse(win_file.directory_exists(base)) + try: + # Create environment + self.assertFalse(win_file.directory_exists(target)) + self.assertFalse(win_file.directory_exists(symlink)) + self.assertTrue(win_file.makedirs_(target)) + self.assertTrue(win_file.makedirs_(symlink)) + self.assertTrue(win_file.symlink(target, symlink)) + self.assertTrue(win_file.directory_exists(symlink)) + self.assertTrue(win_file.is_link(symlink)) + # Test removal of directory containing symlink + self.assertTrue(win_file.remove(base)) + self.assertFalse(win_file.directory_exists(base)) + finally: + if os.path.exists(base): + win_file.remove(base) From 74ab4d3792bd72f6810168d739a8ca7c43ff7471 Mon Sep 17 00:00:00 2001 From: twangboy Date: Wed, 27 Mar 2019 17:03:13 -0600 Subject: [PATCH 056/340] Use old way to get osrelease if new way fails --- salt/grains/core.py | 76 ++++++++++++++++------- tests/unit/grains/test_core.py | 109 +++++++++++++++++++++++++++++++++ 2 files changed, 162 insertions(+), 23 deletions(-) diff --git a/salt/grains/core.py b/salt/grains/core.py index 9758275f2c53..60d701c8ea11 100644 --- a/salt/grains/core.py +++ b/salt/grains/core.py @@ -1131,6 +1131,57 @@ def _clean_value(key, val): return val +def _windows_os_release_grain(caption, product_type): + ''' + helper function for getting the osrelease grain + :return: + ''' + # This creates the osrelease grain based on the Windows Operating + # System Product Name. As long as Microsoft maintains a similar format + # this should be future proof + version = 'Unknown' + release = '' + if 'Server' in caption: + for item in caption.split(' '): + # If it's all digits, then it's version + if re.match(r'\d+', item): + version = item + # If it starts with R and then numbers, it's the release + # ie: R2 + if re.match(r'^R\d+$', item): + release = item + os_release = '{0}Server{1}'.format(version, release) + else: + for item in caption.split(' '): + # If it's a number, decimal number, Thin or Vista, then it's the + # version + if re.match(r'^(\d+(\.\d+)?)|Thin|Vista|XP$', item): + version = item + os_release = version + + # If the version is still Unknown, revert back to the old way of getting + # the os_release + # https://github.com/saltstack/salt/issues/52339 + if os_release in ['Unknown']: + os_release = platform.release() + server = {'Vista': '2008Server', + '7': '2008ServerR2', + '8': '2012Server', + '8.1': '2012ServerR2', + '10': '2016Server'} + + # Starting with Python 2.7.12 and 3.5.2 the `platform.uname()` + # function started reporting the Desktop version instead of the + # Server version on # Server versions of Windows, so we need to look + # those up. So, if you find a Server Platform that's a key in the + # server dictionary, then lookup the actual Server Release. + # (Product Type 1 is Desktop, Everything else is Server) + if product_type > 1 and os_release in server: + os_release = server[os_release] + + return os_release + + def _windows_platform_data(): ''' Use the platform module for as much as we can. @@ -1177,7 +1228,6 @@ def _windows_platform_data(): except IndexError: log.debug('Motherboard info not available on this system') - os_release = platform.release() kernel_version = platform.version() info = salt.utils.win_osinfo.get_os_version_info() @@ -1185,28 +1235,8 @@ def _windows_platform_data(): if info['ServicePackMajor'] > 0: service_pack = ''.join(['SP', six.text_type(info['ServicePackMajor'])]) - # This creates the osrelease grain based on the Windows Operating - # System Product Name. As long as Microsoft maintains a similar format - # this should be future proof - version = 'Unknown' - release = '' - if 'Server' in osinfo.Caption: - for item in osinfo.Caption.split(' '): - # If it's all digits, then it's version - if re.match(r'\d+', item): - version = item - # If it starts with R and then numbers, it's the release - # ie: R2 - if re.match(r'^R\d+$', item): - release = item - os_release = '{0}Server{1}'.format(version, release) - else: - for item in osinfo.Caption.split(' '): - # If it's a number, decimal number, Thin or Vista, then it's the - # version - if re.match(r'^(\d+(\.\d+)?)|Thin|Vista$', item): - version = item - os_release = version + os_release = _windows_os_release_grain(caption=osinfo.Caption, + product_type=osinfo.ProductType) grains = { 'kernelrelease': _clean_value('kernelrelease', osinfo.Version), diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py index 3874b0001c28..aab6b52452b6 100644 --- a/tests/unit/grains/test_core.py +++ b/tests/unit/grains/test_core.py @@ -580,6 +580,115 @@ def test_ubuntu_artful_os_grains(self): } self._run_os_grains_tests("ubuntu-17.10", _os_release_map, expectation) + def test__windows_os_release_grain(self): + versions = { + 'Windows 10 Home': '10', + 'Windows 10 Pro': '10', + 'Windows 10 Pro for Workstations': '10', + 'Windows 10 Pro Education': '10', + 'Windows 10 Enterprise': '10', + 'Windows 10 Enterprise LTSB': '10', + 'Windows 10 Education': '10', + 'Windows 10 IoT Core': '10', + 'Windows 10 IoT Enterprise': '10', + 'Windows 10 S': '10', + 'Windows 8.1': '8.1', + 'Windows 8.1 Pro': '8.1', + 'Windows 8.1 Enterprise': '8.1', + 'Windows 8.1 OEM': '8.1', + 'Windows 8.1 with Bing': '8.1', + 'Windows 8': '8', + 'Windows 8 Pro': '8', + 'Windows 8 Enterprise': '8', + 'Windows 8 OEM': '8', + 'Windows 7 Starter': '7', + 'Windows 7 Home Basic': '7', + 'Windows 7 Home Premium': '7', + 'Windows 7 Professional': '7', + 'Windows 7 Enterprise': '7', + 'Windows 7 Ultimate': '7', + 'Windows Thin PC': 'Thin', + 'Windows Vista Starter': 'Vista', + 'Windows Vista Home Basic': 'Vista', + 'Windows Vista Home Premium': 'Vista', + 'Windows Vista Business': 'Vista', + 'Windows Vista Enterprise': 'Vista', + 'Windows Vista Ultimate': 'Vista', + 'Windows Server 2019 Essentials': '2019Server', + 'Windows Server 2019 Standard': '2019Server', + 'Windows Server 2019 Datacenter': '2019Server', + 'Windows Server 2016 Essentials': '2016Server', + 'Windows Server 2016 Standard': '2016Server', + 'Windows Server 2016 Datacenter': '2016Server', + 'Windows Server 2012 R2 Foundation': '2012ServerR2', + 'Windows Server 2012 R2 Essentials': '2012ServerR2', + 'Windows Server 2012 R2 Standard': '2012ServerR2', + 'Windows Server 2012 R2 Datacenter': '2012ServerR2', + 'Windows Server 2012 Foundation': '2012Server', + 'Windows Server 2012 Essentials': '2012Server', + 'Windows Server 2012 Standard': '2012Server', + 'Windows Server 2012 Datacenter': '2012Server', + 'Windows MultiPoint Server 2012': '2012Server', + 'Windows Small Business Server 2011': '2011Server', + 'Windows MultiPoint Server 2011': '2011Server', + 'Windows Home Server 2011': '2011Server', + 'Windows MultiPoint Server 2010': '2010Server', + 'Windows Server 2008 R2 Foundation': '2008ServerR2', + 'Windows Server 2008 R2 Standard': '2008ServerR2', + 'Windows Server 2008 R2 Enterprise': '2008ServerR2', + 'Windows Server 2008 R2 Datacenter': '2008ServerR2', + 'Windows Server 2008 R2 for Itanium-based Systems': '2008ServerR2', + 'Windows Web Server 2008 R2': '2008ServerR2', + 'Windows Storage Server 2008 R2': '2008ServerR2', + 'Windows HPC Server 2008 R2': '2008ServerR2', + 'Windows Server 2008 Standard': '2008Server', + 'Windows Server 2008 Enterprise': '2008Server', + 'Windows Server 2008 Datacenter': '2008Server', + 'Windows Server 2008 for Itanium-based Systems': '2008Server', + 'Windows Server Foundation 2008': '2008Server', + 'Windows Essential Business Server 2008': '2008Server', + 'Windows HPC Server 2008': '2008Server', + 'Windows Small Business Server 2008': '2008Server', + 'Windows Storage Server 2008': '2008Server', + 'Windows Web Server 2008': '2008Server' + } + for caption in versions: + version = core._windows_os_release_grain(caption, 1) + self.assertEqual( + version, + versions[caption], + 'version: {0}\n' + 'found: {1}\n' + 'caption: {2}'.format(version, versions[caption], caption) + ) + + embedded_versions = { + 'Windows Embedded 8.1 Industry Pro': '8.1', + 'Windows Embedded 8 Industry Pro': '8', + 'Windows POSReady 7': '7', + 'Windows Embedded Standard 7': '7', + 'Windows Embedded POSReady 2009': '2009', + 'Windows Embedded Standard 2009': '2009', + 'Windows XP Embedded': 'XP', + } + for caption in embedded_versions: + version = core._windows_os_release_grain(caption, 1) + self.assertEqual( + version, + embedded_versions[caption], + '{0} != {1}\n' + 'version: {0}\n' + 'found: {1}\n' + 'caption: {2}'.format(version, embedded_versions[caption], caption) + ) + + # Special Cases + # Windows Embedded Standard is Windows 7 + caption = 'Windows Embedded Standard' + with patch('platform.release', MagicMock(return_value='7')): + version = core._windows_os_release_grain(caption, 1) + self.assertEqual(version, '7') + @skipIf(not salt.utils.platform.is_linux(), 'System is not Linux') def test_linux_memdata(self): ''' From 846160843843ce2a8ee2299f6876d089b415db2c Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Wed, 27 Mar 2019 16:53:28 -0700 Subject: [PATCH 057/340] Updating the incron module, state module and tests to remove use of comments. --- salt/modules/incron.py | 36 +++++++++++++------------------ salt/states/incron.py | 5 +++++ tests/unit/modules/test_incron.py | 4 ++-- 3 files changed, 22 insertions(+), 23 deletions(-) diff --git a/salt/modules/incron.py b/salt/modules/incron.py index b5a2dfeb6db6..0a6fe2b9d87a 100644 --- a/salt/modules/incron.py +++ b/salt/modules/incron.py @@ -53,11 +53,10 @@ def _render_tab(lst): for pre in lst['pre']: ret.append('{0}\n'.format(pre)) for cron in lst['crons']: - ret.append('{0} {1} {2} {3}\n'.format(cron['path'], - cron['mask'], - cron['cmd'], - TAG - ) + ret.append('{0} {1} {2}\n'.format(cron['path'], + cron['mask'], + cron['cmd'], + ) ) return ret @@ -191,23 +190,18 @@ def list_tab(user): 'pre': [] } flag = False - comment = None - tag = '# Line managed by Salt, do not edit' for line in data.splitlines(): - if line.endswith(tag): - if len(line.split()) > 3: - # Appears to be a standard incron line - comps = line.split() - path = comps[0] - mask = comps[1] - (cmd, comment) = ' '.join(comps[2:]).split(' # ') - - dat = {'path': path, - 'mask': mask, - 'cmd': cmd, - 'comment': comment} - ret['crons'].append(dat) - comment = None + if len(line.split()) > 3: + # Appears to be a standard incron line + comps = line.split() + path = comps[0] + mask = comps[1] + cmd = ' '.join(comps[2:]) + + dat = {'path': path, + 'mask': mask, + 'cmd': cmd} + ret['crons'].append(dat) else: ret['pre'].append(line) return ret diff --git a/salt/states/incron.py b/salt/states/incron.py index c98145a60380..97479a811a79 100644 --- a/salt/states/incron.py +++ b/salt/states/incron.py @@ -44,6 +44,9 @@ # Import Python libs from __future__ import absolute_import, print_function, unicode_literals +import logging +log = logging.getLogger(__name__) + def _check_cron(user, path, @@ -56,6 +59,8 @@ def _check_cron(user, arg_mask.sort() lst = __salt__['incron.list_tab'](user) + if cmd.endswith('\n'): + cmd = cmd[:-1] for cron in lst['crons']: if path == cron['path'] and cron['cmd'] == cmd: cron_mask = cron['mask'].split(',') diff --git a/tests/unit/modules/test_incron.py b/tests/unit/modules/test_incron.py index 6babf032f5fc..09890c6c025d 100644 --- a/tests/unit/modules/test_incron.py +++ b/tests/unit/modules/test_incron.py @@ -102,7 +102,7 @@ def test_set_job(self): val = {'pre': [], 'crons': [{'path': '/home/cybage', 'mask': 'IN_MODIFY', - 'cmd': 'echo "SALT"', 'comment': ''}]} + 'cmd': 'echo "SALT"'}]} with patch.object(incron, 'list_tab', MagicMock(return_value=val)): self.assertEqual(incron.set_job('cybage', '/home/cybage', @@ -135,7 +135,7 @@ def test_set_job(self): val = {'pre': [], 'crons': [{'path': '/home/cybage', 'mask': 'IN_MODIFY,IN_DELETE', - 'cmd': 'echo "SALT"', 'comment': ''}]} + 'cmd': 'echo "SALT"'}]} with patch.object(incron, 'list_tab', MagicMock(return_value=val)): mock = MagicMock(return_value='incrontab') From 2aa971d2e521722d7dee9e8a82dec9ff372b4bad Mon Sep 17 00:00:00 2001 From: Wayne Werner Date: Thu, 28 Mar 2019 12:41:16 -0500 Subject: [PATCH 058/340] lint cleanup --- salt/renderers/gpg.py | 3 +++ tests/unit/renderers/test_gpg.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/salt/renderers/gpg.py b/salt/renderers/gpg.py index e80c6bb0eb01..c006f42dab5e 100644 --- a/salt/renderers/gpg.py +++ b/salt/renderers/gpg.py @@ -300,9 +300,11 @@ def _decrypt_ciphertexts(cipher, translate_newlines=False): cipher = to_bytes(cipher) if translate_newlines: cipher = cipher.replace(to_bytes(r'\n'), to_bytes('\n')) + def replace(match): result = to_bytes(_decrypt_ciphertext(match.group())) return result + ret, num = GPG_CIPHERTEXT.subn(replace, to_bytes(cipher)) if num > 0: # Remove trailing newlines. Without if crypted value initially specified as a YAML multiline @@ -318,6 +320,7 @@ def replace(match): pass return ret + def _decrypt_object(obj, translate_newlines=False): ''' Recursively try to decrypt any object. If the object is a six.string_types diff --git a/tests/unit/renderers/test_gpg.py b/tests/unit/renderers/test_gpg.py index 2ab9604a6d2b..c02a1ba9588e 100644 --- a/tests/unit/renderers/test_gpg.py +++ b/tests/unit/renderers/test_gpg.py @@ -167,4 +167,4 @@ def test_render_with_translate_newlines_should_translate_newlines(self): self.assertEqual( gpg.render(crypted, translate_newlines=True), expected, - ) \ No newline at end of file + ) From 937259c4445ab4c9dde04452f56222fdb0b0d4bc Mon Sep 17 00:00:00 2001 From: Wayne Werner Date: Mon, 1 Apr 2019 17:51:08 -0500 Subject: [PATCH 059/340] Refactor test_cron test It's possible that the dict could contain some more helpful information when/if this test fails again. This assertion improves the potential for getting useful information in the test failure. --- tests/integration/states/test_cron.py | 39 ++++++++++++++++++++------- 1 file changed, 30 insertions(+), 9 deletions(-) diff --git a/tests/integration/states/test_cron.py b/tests/integration/states/test_cron.py index 6aec52536d79..4c2d8d0958d7 100644 --- a/tests/integration/states/test_cron.py +++ b/tests/integration/states/test_cron.py @@ -40,16 +40,37 @@ def tearDown(self): # Delete user self.run_state('user.absent', name='test_cron_user') - def test_managed(self): - ''' - file.managed - ''' + def test_46881(self): + user_id = 'test_cron_user' + _expected = { + 'changes': { + 'diff': '--- \n+++ \n@@ -1 +1,2 @@\n-\n+# Lines below here are managed by Salt, do not edit\n+@hourly touch /tmp/test-file\n', + 'group': user_id, + 'user': user_id, + }, + } ret = self.run_state( 'cron.file', name='salt://issue-46881/cron', - user='test_cron_user' + user=user_id, + ) + # There are several keys that do not really matter to this test. + # We could just delete them, but then we lose their contents to + # aid in debugging (see https://github.com/saltstack/salt/issues/52079) + ignored_keys = ( + '__id__', + '__sls__', + '__run_num__', + 'comment', + 'duration', + 'name', + 'start_time', + 'result', + ) + id_ = 'cron_|-salt://issue-46881/cron_|-salt://issue-46881/cron_|-file' + for key in ignored_keys: + _expected[key] = ret[id_].get(key) + self.assertDictEqual( + _expected, + ret[id_], ) - _expected = '--- \n+++ \n@@ -1 +1,2 @@\n-\n+# Lines below here are managed by Salt, do not edit\n+@hourly touch /tmp/test-file\n' - self.assertIn('changes', ret['cron_|-salt://issue-46881/cron_|-salt://issue-46881/cron_|-file']) - self.assertIn('diff', ret['cron_|-salt://issue-46881/cron_|-salt://issue-46881/cron_|-file']['changes']) - self.assertEqual(_expected, ret['cron_|-salt://issue-46881/cron_|-salt://issue-46881/cron_|-file']['changes']['diff']) From 0bd08261bedbcf4618e19e34b64987273c5581ad Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Tue, 2 Apr 2019 02:24:14 +0000 Subject: [PATCH 060/340] Fix issue #51869 and add cert signing test --- salt/modules/x509.py | 5 +- .../files/conf/master.d/peers.conf | 3 + .../files/conf/minion.d/signing_policies.conf | 14 ++++ .../integration/files/file/base/test_cert.sls | 68 +++++++++++++++++++ tests/integration/states/test_x509.py | 11 ++- 5 files changed, 97 insertions(+), 4 deletions(-) create mode 100644 tests/integration/files/conf/master.d/peers.conf create mode 100644 tests/integration/files/conf/minion.d/signing_policies.conf create mode 100644 tests/integration/files/file/base/test_cert.sls diff --git a/salt/modules/x509.py b/salt/modules/x509.py index b24a4c23e2d6..fdaa41aba44b 100644 --- a/salt/modules/x509.py +++ b/salt/modules/x509.py @@ -26,6 +26,7 @@ import salt.utils.files import salt.utils.path import salt.utils.stringutils +import salt.utils.data import salt.utils.platform import salt.exceptions from salt.ext import six @@ -366,7 +367,6 @@ def _get_certificate_obj(cert): ''' if isinstance(cert, M2Crypto.X509.X509): return cert - text = _text_or_file(cert) text = get_pem_entry(text, pem_type='CERTIFICATE') return M2Crypto.X509.load_cert_string(text) @@ -1391,11 +1391,10 @@ def create_certificate( for ignore in list(_STATE_INTERNAL_KEYWORDS) + \ ['listen_in', 'preqrequired', '__prerequired__']: kwargs.pop(ignore, None) - certs = __salt__['publish.publish']( tgt=ca_server, fun='x509.sign_remote_certificate', - arg=six.text_type(kwargs)) + arg=salt.utils.data.decode_dict(kwargs, to_str=True)) if not any(certs): raise salt.exceptions.SaltInvocationError( diff --git a/tests/integration/files/conf/master.d/peers.conf b/tests/integration/files/conf/master.d/peers.conf new file mode 100644 index 000000000000..b28b03ddc0b4 --- /dev/null +++ b/tests/integration/files/conf/master.d/peers.conf @@ -0,0 +1,3 @@ +peer: + .*: + - x509.sign_remote_certificate diff --git a/tests/integration/files/conf/minion.d/signing_policies.conf b/tests/integration/files/conf/minion.d/signing_policies.conf new file mode 100644 index 000000000000..d3fc6b5c3030 --- /dev/null +++ b/tests/integration/files/conf/minion.d/signing_policies.conf @@ -0,0 +1,14 @@ +x509_signing_policies: + ca_policy: + - minions: '*' + - signing_private_key: /etc/pki/ca.key + - signing_cert: /etc/pki/ca.crt + - O: Test Company + - basicConstraints: "CA:false" + - keyUsage: "critical digitalSignature, keyEncipherment" + - extendedKeyUsage: "critical serverAuth, clientAuth" + - subjectKeyIdentifier: hash + - authorityKeyIdentifier: keyid + - days_valid: 730 + - copypath: /etc/pki + diff --git a/tests/integration/files/file/base/test_cert.sls b/tests/integration/files/file/base/test_cert.sls new file mode 100644 index 000000000000..62023d97ae9d --- /dev/null +++ b/tests/integration/files/file/base/test_cert.sls @@ -0,0 +1,68 @@ +{% set tmp_dir = pillar['tmp_dir'] %} +salt-minion: + service.running: + - enable: True + - listen: + - file: /tmp/salt-tests-tmpdir/config/minion.d/signing_policies.conf + +{{ tmp_dir }}/pki: + file.directory + +{{ tmp_dir }}/pki/issued_certs: + file.directory + +{{ tmp_dir }}/pki/ca.key: + x509.private_key_managed: + - bits: 4096 + - require: + - file: /etc/pki + +{{ tmp_dir }}/pki/ca.crt: + x509.certificate_managed: + - signing_private_key: {{ tmp_dir }}/pki/ca.key + - CN: ca.example.com + - C: US + - ST: Utah + - L: Salt Lake City + - basicConstraints: "critical CA:true" + - keyUsage: "critical cRLSign, keyCertSign" + - subjectKeyIdentifier: hash + - authorityKeyIdentifier: keyid,issuer:always + - days_valid: 3650 + - days_remaining: 0 + - backup: True + - managed_private_key: + name: {{ tmp_dir }}/pki/ca.key + bits: 4096 + backup: True + - require: + - file: {{ tmp_dir }}/pki + - salt-minion + - {{ tmp_dir }}/pki/ca.key + +mine.send: + module.run: + - func: x509.get_pem_entries + - kwargs: + glob_path: {{ tmp_dir }}/pki/ca.crt + - onchanges: + - x509: {{ tmp_dir }}/pki/ca.crt + +{{ tmp_dir }}/pki/test.key: + x509.private_key_managed: + - bits: 4096 + - backup: True + +test_crt: + x509.certificate_managed: + - name: {{ tmp_dir }}/pki/test.crt + - ca_server: minion + - signing_policy: ca_policy + - public_key: {{ tmp_dir }}/pki/test.key + - CN: minion + - days_remaining: 30 + - backup: True + - managed_private_key: + name: {{ tmp_dir }}/pki/test.key + bits: 4096 + backup: True diff --git a/tests/integration/states/test_x509.py b/tests/integration/states/test_x509.py index 763d806ee4d9..8f058a151075 100644 --- a/tests/integration/states/test_x509.py +++ b/tests/integration/states/test_x509.py @@ -1,13 +1,14 @@ # -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import os +import pprint import logging import salt.utils.files from salt.ext import six from tests.support.helpers import with_tempfile -from tests.support.paths import BASE_FILES +from tests.support.paths import BASE_FILES, TMP from tests.support.case import ModuleCase from tests.support.unit import skipIf from tests.support.mixins import SaltReturnAssertsMixin @@ -61,3 +62,11 @@ def test_issue_49008(self, keyfile, crtfile): assert state_result['result'] is True, state_result assert os.path.exists(keyfile) assert os.path.exists(crtfile) + + def test_cert_signing(self): + ret = self.run_function('state.apply', ['test_cert'], pillar={'tmp_dir': TMP}) + key = 'x509_|-test_crt_|-{}/pki/test.crt_|-certificate_managed'.format(TMP) + assert key in ret + assert 'changes' in ret[key] + assert 'Certificate' in ret[key]['changes'] + assert 'New' in ret[key]['changes']['Certificate'] From 56304983d06789cb9292a710e688350e45f9e8ca Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Tue, 2 Apr 2019 17:22:22 +0000 Subject: [PATCH 061/340] Fix signing policies path --- tests/integration/files/file/base/test_cert.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/files/file/base/test_cert.sls b/tests/integration/files/file/base/test_cert.sls index 62023d97ae9d..a30d55f7e553 100644 --- a/tests/integration/files/file/base/test_cert.sls +++ b/tests/integration/files/file/base/test_cert.sls @@ -3,7 +3,7 @@ salt-minion: service.running: - enable: True - listen: - - file: /tmp/salt-tests-tmpdir/config/minion.d/signing_policies.conf + - file: {{ tmp_dir }}/config/minion.d/signing_policies.conf {{ tmp_dir }}/pki: file.directory From 7f5f7ddfd4d26c5bd9cd0a3075f05c467c7c29be Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Tue, 2 Apr 2019 17:43:23 +0000 Subject: [PATCH 062/340] Fix requisite path --- tests/integration/files/file/base/test_cert.sls | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/integration/files/file/base/test_cert.sls b/tests/integration/files/file/base/test_cert.sls index a30d55f7e553..e2ca8b120a59 100644 --- a/tests/integration/files/file/base/test_cert.sls +++ b/tests/integration/files/file/base/test_cert.sls @@ -1,9 +1,9 @@ {% set tmp_dir = pillar['tmp_dir'] %} -salt-minion: - service.running: - - enable: True - - listen: - - file: {{ tmp_dir }}/config/minion.d/signing_policies.conf +#salt-minion: +# service.running: +# - enable: True +# - listen: +# - file: {{ tmp_dir }}/config/minion.d/signing_policies.conf {{ tmp_dir }}/pki: file.directory @@ -15,7 +15,7 @@ salt-minion: x509.private_key_managed: - bits: 4096 - require: - - file: /etc/pki + - file: {{ tmp_dir }}/pki {{ tmp_dir }}/pki/ca.crt: x509.certificate_managed: From a0e245800d7a6d0b5d72a5bbfbc3f04280b02540 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Tue, 2 Apr 2019 17:47:06 +0000 Subject: [PATCH 063/340] fix unused import --- tests/integration/states/test_x509.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/integration/states/test_x509.py b/tests/integration/states/test_x509.py index 8f058a151075..28be7579c0f8 100644 --- a/tests/integration/states/test_x509.py +++ b/tests/integration/states/test_x509.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import os -import pprint import logging import salt.utils.files From e3f5be36bb64f93259d59eb58bd173c8efaf0f52 Mon Sep 17 00:00:00 2001 From: Wayne Werner Date: Tue, 2 Apr 2019 13:14:49 -0500 Subject: [PATCH 064/340] Use uncommon item in mine delete test Rather than using `grains.items`, `test.arg` is less common, and specifiying a value that isn't currently used anywhere else should eliminate any chance of race condition for this test. --- tests/integration/modules/test_mine.py | 53 ++++++++++++++++++++++---- 1 file changed, 45 insertions(+), 8 deletions(-) diff --git a/tests/integration/modules/test_mine.py b/tests/integration/modules/test_mine.py index ef03a6e2cde3..9c7a4ad798af 100644 --- a/tests/integration/modules/test_mine.py +++ b/tests/integration/modules/test_mine.py @@ -111,35 +111,72 @@ def test_mine_delete(self): ['grains.items'] ) ) + # Smoke testing that grains should now exist in the mine + ret_grains = self.run_function( + 'mine.get', + ['minion', 'grains.items'] + ) + self.assertEqual(ret_grains['minion']['id'], 'minion') self.assertTrue( self.run_function( 'mine.send', - ['test.echo', 'foo'] + ['test.arg', 'foo=bar', 'fnord=roscivs'], ) ) - ret_grains = self.run_function( + ret_args = self.run_function( 'mine.get', - ['minion', 'grains.items'] + ['minion', 'test.arg'] + ) + expected = { + 'minion': { + 'args': [], + 'kwargs': { + 'fnord': 'roscivs', + 'foo': 'bar', + }, + }, + } + # Smoke testing that test.arg exists in the mine + self.assertDictEqual(ret_args, expected) + self.assertTrue( + self.run_function( + 'mine.send', + ['test.echo', 'foo'] + ) ) - self.assertEqual(ret_grains['minion']['id'], 'minion') ret_echo = self.run_function( 'mine.get', ['minion', 'test.echo'] ) + # Smoke testing that we were also able to set test.echo in the mine self.assertEqual(ret_echo['minion'], 'foo') self.assertTrue( self.run_function( 'mine.delete', - ['grains.items'] + ['test.arg'] ) ) - ret_grains_deleted = self.run_function( + ret_arg_deleted = self.run_function( 'mine.get', - ['minion', 'grains.items'] + ['minion', 'test.arg'] + ) + # Now comes the real test - did we obliterate test.arg from the mine? + # We could assert this a different way, but there shouldn't be any + # other tests that are setting this mine value, so this should + # definitely avoid any race conditions. + self.assertFalse( + ret_arg_deleted.get('minion', {}) + .get('kwargs', {}) + .get('fnord', None) == 'roscivs', + '{} contained "fnord":"roscivs", which should be gone'.format( + ret_arg_deleted, + ) ) - self.assertEqual(ret_grains_deleted.get('minion', None), None) ret_echo_stays = self.run_function( 'mine.get', ['minion', 'test.echo'] ) + # Of course, one more health check - we want targeted removal. + # This isn't horseshoes or hand grenades - test.arg should go away + # but test.echo should still be available. self.assertEqual(ret_echo_stays['minion'], 'foo') From 8c1b1dbd92cd54268ccca5a10a67e614bc3e3970 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Tue, 2 Apr 2019 19:48:52 +0000 Subject: [PATCH 065/340] Use tmp pillar for signing policies --- .../files/conf/minion.d/signing_policies.conf | 14 --------- .../integration/files/file/base/test_cert.sls | 9 ++---- tests/integration/states/test_x509.py | 30 ++++++++++++++++++- 3 files changed, 32 insertions(+), 21 deletions(-) delete mode 100644 tests/integration/files/conf/minion.d/signing_policies.conf diff --git a/tests/integration/files/conf/minion.d/signing_policies.conf b/tests/integration/files/conf/minion.d/signing_policies.conf deleted file mode 100644 index d3fc6b5c3030..000000000000 --- a/tests/integration/files/conf/minion.d/signing_policies.conf +++ /dev/null @@ -1,14 +0,0 @@ -x509_signing_policies: - ca_policy: - - minions: '*' - - signing_private_key: /etc/pki/ca.key - - signing_cert: /etc/pki/ca.crt - - O: Test Company - - basicConstraints: "CA:false" - - keyUsage: "critical digitalSignature, keyEncipherment" - - extendedKeyUsage: "critical serverAuth, clientAuth" - - subjectKeyIdentifier: hash - - authorityKeyIdentifier: keyid - - days_valid: 730 - - copypath: /etc/pki - diff --git a/tests/integration/files/file/base/test_cert.sls b/tests/integration/files/file/base/test_cert.sls index e2ca8b120a59..a04142a47aa9 100644 --- a/tests/integration/files/file/base/test_cert.sls +++ b/tests/integration/files/file/base/test_cert.sls @@ -1,9 +1,4 @@ {% set tmp_dir = pillar['tmp_dir'] %} -#salt-minion: -# service.running: -# - enable: True -# - listen: -# - file: {{ tmp_dir }}/config/minion.d/signing_policies.conf {{ tmp_dir }}/pki: file.directory @@ -37,7 +32,6 @@ backup: True - require: - file: {{ tmp_dir }}/pki - - salt-minion - {{ tmp_dir }}/pki/ca.key mine.send: @@ -66,3 +60,6 @@ test_crt: name: {{ tmp_dir }}/pki/test.key bits: 4096 backup: True + - require: + - {{ tmp_dir }}/pki/ca.crt + - {{ tmp_dir }}/pki/test.key diff --git a/tests/integration/states/test_x509.py b/tests/integration/states/test_x509.py index 28be7579c0f8..8e7c44bd42bd 100644 --- a/tests/integration/states/test_x509.py +++ b/tests/integration/states/test_x509.py @@ -5,9 +5,10 @@ import salt.utils.files from salt.ext import six +import textwrap from tests.support.helpers import with_tempfile -from tests.support.paths import BASE_FILES, TMP +from tests.support.paths import BASE_FILES, TMP, TMP_PILLAR_TREE, PILLAR_DIR from tests.support.case import ModuleCase from tests.support.unit import skipIf from tests.support.mixins import SaltReturnAssertsMixin @@ -27,10 +28,37 @@ class x509Test(ModuleCase, SaltReturnAssertsMixin): @classmethod def setUpClass(cls): + with salt.utils.files.fopen(os.path.join(TMP_PILLAR_TREE, 'signing_policies.sls'), 'w') as fp: + fp.write(textwrap.dedent('''\ + x509_signing_policies: + ca_policy: + - minions: '*' + - signing_private_key: {0}/pki/ca.key + - signing_cert: {0}/pki/ca.crt + - O: Test Company + - basicConstraints: "CA:false" + - keyUsage: "critical digitalSignature, keyEncipherment" + - extendedKeyUsage: "critical serverAuth, clientAuth" + - subjectKeyIdentifier: hash + - authorityKeyIdentifier: keyid + - days_valid: 730 + - copypath: {0}/pki + '''.format(TMP))) + with salt.utils.files.fopen(os.path.join(TMP_PILLAR_TREE, 'top.sls'), 'w') as fp: + fp.write(textwrap.dedent('''\ + base: + '*': + - signing_policies + ''')) cert_path = os.path.join(BASE_FILES, 'x509_test.crt') with salt.utils.files.fopen(cert_path) as fp: cls.x509_cert_text = fp.read() + @classmethod + def tearDownClass(cls): + os.remove(os.path.join(TMP_PILLAR_TREE, 'signing_policies.sls')) + os.remove(os.path.join(TMP_PILLAR_TREE, 'top.sls')) + def run_function(self, *args, **kwargs): ret = super(x509Test, self).run_function(*args, **kwargs) log.debug('ret = %s', ret) From 5d231f4c092c9d737f5989ccbf0f05a0eed62b77 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Tue, 2 Apr 2019 19:50:55 +0000 Subject: [PATCH 066/340] remove unused import --- tests/integration/states/test_x509.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/states/test_x509.py b/tests/integration/states/test_x509.py index 8e7c44bd42bd..c39a0724fe86 100644 --- a/tests/integration/states/test_x509.py +++ b/tests/integration/states/test_x509.py @@ -8,7 +8,7 @@ import textwrap from tests.support.helpers import with_tempfile -from tests.support.paths import BASE_FILES, TMP, TMP_PILLAR_TREE, PILLAR_DIR +from tests.support.paths import BASE_FILES, TMP, TMP_PILLAR_TREE from tests.support.case import ModuleCase from tests.support.unit import skipIf from tests.support.mixins import SaltReturnAssertsMixin From 6d98577e37c359224bd006f995872c341afce4e3 Mon Sep 17 00:00:00 2001 From: David Murphy < dmurphy@saltstack.com> Date: Tue, 2 Apr 2019 14:02:14 -0600 Subject: [PATCH 067/340] Altered code to support salt-ssh on AIX --- salt/client/ssh/ssh_py_shim.py | 3 ++- salt/utils/rsax931.py | 8 ++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/salt/client/ssh/ssh_py_shim.py b/salt/client/ssh/ssh_py_shim.py index 55c4e7c854e8..60059d66d872 100644 --- a/salt/client/ssh/ssh_py_shim.py +++ b/salt/client/ssh/ssh_py_shim.py @@ -118,7 +118,8 @@ def need_deployment(): if dstat.st_uid != euid: # Attack detected, try again need_deployment() - if dstat.st_mode != 16832: + # AIX has non-POSIX bit 0o240700, isolate to 0o40700 + if dstat.st_mode & ~65536 != 16832: # Attack detected need_deployment() # If SUDOing then also give the super user group write permissions diff --git a/salt/utils/rsax931.py b/salt/utils/rsax931.py index 33adf9982ab0..b1576e88b5c9 100644 --- a/salt/utils/rsax931.py +++ b/salt/utils/rsax931.py @@ -45,6 +45,14 @@ def _load_libcrypto(): # two checks below lib = glob.glob('/opt/local/lib/libcrypto.so*') + glob.glob('/opt/tools/lib/libcrypto.so*') lib = lib[0] if len(lib) > 0 else None + if not lib and salt.utils.platform.is_aix(): + if os.ospath.isdir('/opt/salt/lib'): + # preference for Salt installed fileset + lib = glob.glob('/opt/salt/lib/libcrypto.so*') + lib = lib[0] if len(lib) > 0 else None + else: + lib = glob.glob('/opt/freeware/lib/libcrypto.so*') + lib = lib[0] if len(lib) > 0 else None if lib: return cdll.LoadLibrary(lib) raise OSError('Cannot locate OpenSSL libcrypto') From 95c3aba4263b613826af6fa4f0c356c4154e7806 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Tue, 2 Apr 2019 20:24:55 +0000 Subject: [PATCH 068/340] Add config for listener --- .../files/file/base/signing_policies.conf | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 tests/integration/files/file/base/signing_policies.conf diff --git a/tests/integration/files/file/base/signing_policies.conf b/tests/integration/files/file/base/signing_policies.conf new file mode 100644 index 000000000000..d3fc6b5c3030 --- /dev/null +++ b/tests/integration/files/file/base/signing_policies.conf @@ -0,0 +1,14 @@ +x509_signing_policies: + ca_policy: + - minions: '*' + - signing_private_key: /etc/pki/ca.key + - signing_cert: /etc/pki/ca.crt + - O: Test Company + - basicConstraints: "CA:false" + - keyUsage: "critical digitalSignature, keyEncipherment" + - extendedKeyUsage: "critical serverAuth, clientAuth" + - subjectKeyIdentifier: hash + - authorityKeyIdentifier: keyid + - days_valid: 730 + - copypath: /etc/pki + From 8bfa3403fb317153b08050276bfc59c0367cab68 Mon Sep 17 00:00:00 2001 From: David Murphy < dmurphy@saltstack.com> Date: Tue, 2 Apr 2019 14:25:07 -0600 Subject: [PATCH 069/340] Correct typo --- salt/utils/rsax931.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/utils/rsax931.py b/salt/utils/rsax931.py index b1576e88b5c9..14c700a48f71 100644 --- a/salt/utils/rsax931.py +++ b/salt/utils/rsax931.py @@ -46,7 +46,7 @@ def _load_libcrypto(): lib = glob.glob('/opt/local/lib/libcrypto.so*') + glob.glob('/opt/tools/lib/libcrypto.so*') lib = lib[0] if len(lib) > 0 else None if not lib and salt.utils.platform.is_aix(): - if os.ospath.isdir('/opt/salt/lib'): + if os.path.isdir('/opt/salt/lib'): # preference for Salt installed fileset lib = glob.glob('/opt/salt/lib/libcrypto.so*') lib = lib[0] if len(lib) > 0 else None From 215bf931f21facd742f7d8c741f56c8cf93e70dc Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Tue, 2 Apr 2019 21:08:00 +0000 Subject: [PATCH 070/340] Refresh after modifying tmp pillars --- tests/integration/states/test_x509.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/states/test_x509.py b/tests/integration/states/test_x509.py index c39a0724fe86..3c2c7fe92c62 100644 --- a/tests/integration/states/test_x509.py +++ b/tests/integration/states/test_x509.py @@ -50,6 +50,7 @@ def setUpClass(cls): '*': - signing_policies ''')) + self.run_function('saltutil.refresh_pillar') cert_path = os.path.join(BASE_FILES, 'x509_test.crt') with salt.utils.files.fopen(cert_path) as fp: cls.x509_cert_text = fp.read() @@ -58,6 +59,7 @@ def setUpClass(cls): def tearDownClass(cls): os.remove(os.path.join(TMP_PILLAR_TREE, 'signing_policies.sls')) os.remove(os.path.join(TMP_PILLAR_TREE, 'top.sls')) + self.run_function('saltutil.refresh_pillar') def run_function(self, *args, **kwargs): ret = super(x509Test, self).run_function(*args, **kwargs) From affd9b88b85804433e1636d7d4df8d4ba363979e Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Tue, 2 Apr 2019 21:12:11 +0000 Subject: [PATCH 071/340] Fix setup/teardown methods --- tests/integration/states/test_x509.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/integration/states/test_x509.py b/tests/integration/states/test_x509.py index 3c2c7fe92c62..99709aa434cf 100644 --- a/tests/integration/states/test_x509.py +++ b/tests/integration/states/test_x509.py @@ -28,6 +28,11 @@ class x509Test(ModuleCase, SaltReturnAssertsMixin): @classmethod def setUpClass(cls): + cert_path = os.path.join(BASE_FILES, 'x509_test.crt') + with salt.utils.files.fopen(cert_path) as fp: + cls.x509_cert_text = fp.read() + + def setUp(self): with salt.utils.files.fopen(os.path.join(TMP_PILLAR_TREE, 'signing_policies.sls'), 'w') as fp: fp.write(textwrap.dedent('''\ x509_signing_policies: @@ -51,12 +56,8 @@ def setUpClass(cls): - signing_policies ''')) self.run_function('saltutil.refresh_pillar') - cert_path = os.path.join(BASE_FILES, 'x509_test.crt') - with salt.utils.files.fopen(cert_path) as fp: - cls.x509_cert_text = fp.read() - @classmethod - def tearDownClass(cls): + def tearDown(self): os.remove(os.path.join(TMP_PILLAR_TREE, 'signing_policies.sls')) os.remove(os.path.join(TMP_PILLAR_TREE, 'top.sls')) self.run_function('saltutil.refresh_pillar') From 855f31a1aa2a5819a15b97907da1cd67d8c94f38 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Tue, 2 Apr 2019 14:29:53 -0700 Subject: [PATCH 072/340] Remove un-used file --- .../files/file/base/signing_policies.conf | 14 -------------- 1 file changed, 14 deletions(-) delete mode 100644 tests/integration/files/file/base/signing_policies.conf diff --git a/tests/integration/files/file/base/signing_policies.conf b/tests/integration/files/file/base/signing_policies.conf deleted file mode 100644 index d3fc6b5c3030..000000000000 --- a/tests/integration/files/file/base/signing_policies.conf +++ /dev/null @@ -1,14 +0,0 @@ -x509_signing_policies: - ca_policy: - - minions: '*' - - signing_private_key: /etc/pki/ca.key - - signing_cert: /etc/pki/ca.crt - - O: Test Company - - basicConstraints: "CA:false" - - keyUsage: "critical digitalSignature, keyEncipherment" - - extendedKeyUsage: "critical serverAuth, clientAuth" - - subjectKeyIdentifier: hash - - authorityKeyIdentifier: keyid - - days_valid: 730 - - copypath: /etc/pki - From 9d1b2a021772bd806ac11dd66ef50e6c032cb2ce Mon Sep 17 00:00:00 2001 From: Wayne Werner Date: Tue, 2 Apr 2019 19:10:36 -0500 Subject: [PATCH 073/340] Only return None on host.present changes when test Previously, when using `test=True`, `host.present` would return `None` always. In Salt we return `True` when there are (or would be) changes, `False` when there are no changes, and `None` when there *would* have been changes. This brings `host.present` in-line with that convention. --- salt/states/host.py | 4 +- tests/unit/states/test_host.py | 85 ++++++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+), 1 deletion(-) diff --git a/salt/states/host.py b/salt/states/host.py index 8005ca71480c..e38646e4fc93 100644 --- a/salt/states/host.py +++ b/salt/states/host.py @@ -86,7 +86,7 @@ def present(name, ip, clean=False): # pylint: disable=C0103 ''' ret = {'name': name, 'changes': {}, - 'result': None if __opts__['test'] else True, + 'result': True, 'comment': ''} if not isinstance(ip, list): @@ -135,6 +135,7 @@ def present(name, ip, clean=False): # pylint: disable=C0103 for addr, name in to_add: if __opts__['test']: + ret['result'] = None comments.append( 'Host {0} ({1}) would be added'.format(name, addr) ) @@ -149,6 +150,7 @@ def present(name, ip, clean=False): # pylint: disable=C0103 for addr, name in to_remove: if __opts__['test']: + ret['result'] = None comments.append( 'Host {0} ({1}) would be removed'.format(name, addr) ) diff --git a/tests/unit/states/test_host.py b/tests/unit/states/test_host.py index 954b27490665..7f5b9c7ce996 100644 --- a/tests/unit/states/test_host.py +++ b/tests/unit/states/test_host.py @@ -25,6 +25,19 @@ class HostTestCase(TestCase, LoaderModuleMockMixin): ''' Validate the host state ''' + hostname = 'salt' + localhost_ip = '127.0.0.1' + ip_list = ['203.0.113.113', '203.0.113.14'] + default_hosts = { + ip_list[0]: [hostname], + ip_list[1]: [hostname], + } + + def setUp(self): + self.add_host_mock = MagicMock(return_value=True) + self.rm_host_mock = MagicMock(return_value=True) + self.list_hosts_mock = MagicMock(return_value=self.default_hosts) + def setup_loader_modules(self): return { host: { @@ -281,6 +294,78 @@ def test_present(self): assert add_host.mock_calls == [], add_host.mock_calls assert rm_host.mock_calls == [], rm_host.mock_calls + + def test_host_present_should_return_True_if_test_and_no_changes(self): + expected = { + 'comment': 'Host {} ({}) already present'.format( + self.hostname, + self.ip_list[0], + ), + 'changes': {}, + 'name': self.hostname, + 'result': True, + } + list_hosts = MagicMock( + return_value={self.ip_list[0]: [self.hostname]}, + ) + with patch.dict(host.__salt__, + {'hosts.list_hosts': list_hosts, + 'hosts.add_host': self.add_host_mock, + 'hosts.rm_host': self.rm_host_mock}): + with patch.dict(host.__opts__, {'test': True}): + ret = host.present(self.hostname, self.ip_list[:1]) + + self.assertDictEqual(ret, expected) + + def test_host_present_should_return_None_if_test_and_adding(self): + expected = { + 'comment': '\n'.join([ + 'Host {} ({}) already present', + 'Host {} ({}) would be added', + ]).format( + self.hostname, + self.ip_list[0], + self.hostname, + self.ip_list[1], + ), + 'changes': {'added': {self.ip_list[1]: [self.hostname]}}, + 'name': self.hostname, + 'result': None, + } + list_hosts = MagicMock( + return_value={self.ip_list[0]: [self.hostname]}, + ) + with patch.dict(host.__salt__, + {'hosts.list_hosts': list_hosts, + 'hosts.add_host': self.add_host_mock, + 'hosts.rm_host': self.rm_host_mock}): + with patch.dict(host.__opts__, {'test': True}): + ret = host.present(self.hostname, self.ip_list) + self.assertDictEqual(ret, expected) + + def test_host_present_should_return_None_if_test_and_removing(self): + expected = { + 'comment': '\n'.join([ + 'Host {} ({}) already present', + 'Host {} ({}) would be removed', + ]).format( + self.hostname, + self.ip_list[0], + self.hostname, + self.ip_list[1], + ), + 'changes': {'removed': {self.ip_list[1]: [self.hostname]}}, + 'name': self.hostname, + 'result': None, + } + with patch.dict(host.__salt__, + {'hosts.list_hosts': self.list_hosts_mock, + 'hosts.add_host': self.add_host_mock, + 'hosts.rm_host': self.rm_host_mock}): + with patch.dict(host.__opts__, {'test': True}): + ret = host.present(self.hostname, self.ip_list[:1], clean=True) + self.assertDictEqual(ret, expected) + def test_absent(self): ''' Test to ensure that the named host is absent From e33f5c1fa42fbaf64965232e63cf8c39bf889db5 Mon Sep 17 00:00:00 2001 From: Justin Findlay Date: Fri, 29 Mar 2019 09:33:59 -0700 Subject: [PATCH 074/340] modules.network: standardize util check --- salt/modules/network.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/salt/modules/network.py b/salt/modules/network.py index 28bcff162263..e5fa4f05b193 100644 --- a/salt/modules/network.py +++ b/salt/modules/network.py @@ -823,6 +823,7 @@ def active_tcp(): return {} +@salt.utils.decorators.path.which('traceroute') def traceroute(host): ''' Performs a traceroute to a 3rd party host @@ -840,12 +841,7 @@ def traceroute(host): salt '*' network.traceroute archlinux.org ''' ret = [] - if not salt.utils.path.which('traceroute'): - log.info('This minion does not have traceroute installed') - return ret - cmd = 'traceroute {0}'.format(salt.utils.network.sanitize_host(host)) - out = __salt__['cmd.run'](cmd) # Parse version of traceroute From 5b64fc054c4aee8c055d29f3ac51b4286d65c50e Mon Sep 17 00:00:00 2001 From: Justin Findlay Date: Sun, 31 Mar 2019 19:43:12 -0700 Subject: [PATCH 075/340] modules.network: log and skip problematic traceroute lines --- salt/modules/network.py | 68 +++++++++++++++++++++++------------------ 1 file changed, 39 insertions(+), 29 deletions(-) diff --git a/salt/modules/network.py b/salt/modules/network.py index e5fa4f05b193..8b4dfcead469 100644 --- a/salt/modules/network.py +++ b/salt/modules/network.py @@ -848,17 +848,16 @@ def traceroute(host): if salt.utils.platform.is_sunos() or salt.utils.platform.is_aix(): traceroute_version = [0, 0, 0] else: - cmd2 = 'traceroute --version' - out2 = __salt__['cmd.run'](cmd2) + version_out = __salt__['cmd.run']('traceroute --version') try: # Linux traceroute version looks like: # Modern traceroute for Linux, version 2.0.19, Dec 10 2012 # Darwin and FreeBSD traceroute version looks like: Version 1.4a12+[FreeBSD|Darwin] - traceroute_version_raw = re.findall(r'.*[Vv]ersion (\d+)\.([\w\+]+)\.*(\w*)', out2)[0] - log.debug('traceroute_version_raw: %s', traceroute_version_raw) + version_raw = re.findall(r'.*[Vv]ersion (\d+)\.([\w\+]+)\.*(\w*)', version_out)[0] + log.debug('traceroute_version_raw: %s', version_raw) traceroute_version = [] - for t in traceroute_version_raw: + for t in version_raw: try: traceroute_version.append(int(t)) except ValueError: @@ -873,26 +872,28 @@ def traceroute(host): traceroute_version = [0, 0, 0] for line in out.splitlines(): + # Pre requirements for line parsing + skip_line = False if ' ' not in line: - continue + skip_line = True if line.startswith('traceroute'): - continue - + skip_line = True if salt.utils.platform.is_aix(): if line.startswith('trying to get source for'): - continue - + skip_line = True if line.startswith('source should be'): - continue - + skip_line = True if line.startswith('outgoing MTU'): - continue - + skip_line = True if line.startswith('fragmentation required'): - continue + skip_line = True + if skip_line: + log.debug('Skipping traceroute output line: %s', line) + continue + # Parse output from unix variants if 'Darwin' in six.text_type(traceroute_version[1]) or \ - 'FreeBSD' in six.text_type(traceroute_version[1]) or \ + 'FreeBSD' in six.text_type(traceroute_version[1]) or \ __grains__['kernel'] in ('SunOS', 'AIX'): try: traceline = re.findall(r'\s*(\d*)\s+(.*)\s+\((.*)\)\s+(.*)$', line)[0] @@ -919,14 +920,15 @@ def traceroute(host): except IndexError: result = {} + # Parse output from specific version ranges elif (traceroute_version[0] >= 2 and traceroute_version[2] >= 14 or traceroute_version[0] >= 2 and traceroute_version[1] > 0): comps = line.split(' ') - if comps[1] == '* * *': + if len(comps) >= 2 and comps[1] == '* * *': result = { 'count': int(comps[0]), 'hostname': '*'} - else: + elif len(comps) >= 5: result = { 'count': int(comps[0]), 'hostname': comps[1].split()[0], @@ -934,21 +936,29 @@ def traceroute(host): 'ms1': float(comps[2].split()[0]), 'ms2': float(comps[3].split()[0]), 'ms3': float(comps[4].split()[0])} + else: + result = {} + + # Parse anything else else: comps = line.split() - result = { - 'count': comps[0], - 'hostname': comps[1], - 'ip': comps[2], - 'ms1': comps[4], - 'ms2': comps[6], - 'ms3': comps[8], - 'ping1': comps[3], - 'ping2': comps[5], - 'ping3': comps[7]} + if len(comps) >= 8: + result = { + 'count': comps[0], + 'hostname': comps[1], + 'ip': comps[2], + 'ms1': comps[4], + 'ms2': comps[6], + 'ms3': comps[8], + 'ping1': comps[3], + 'ping2': comps[5], + 'ping3': comps[7]} + else: + result = {} ret.append(result) - + if not result: + log.warn('Cannot parse traceroute output line: %s', line) return ret From 2f4923123331339a52ab10a0fd84df31c75abdf1 Mon Sep 17 00:00:00 2001 From: Justin Findlay Date: Tue, 2 Apr 2019 20:43:03 -0700 Subject: [PATCH 076/340] modules.network: update unit test --- tests/unit/modules/test_network.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/unit/modules/test_network.py b/tests/unit/modules/test_network.py index a72a018cf40b..0f9c28d9c64b 100644 --- a/tests/unit/modules/test_network.py +++ b/tests/unit/modules/test_network.py @@ -113,14 +113,15 @@ def test_traceroute(self): ''' Test for Performs a traceroute to a 3rd party host ''' - with patch.object(salt.utils.path, 'which', side_effect=[False, True]): - self.assertListEqual(network.traceroute('host'), []) + with patch('salt.utils.path.which', MagicMock(return_value='traceroute')): + with patch.dict(network.__salt__, {'cmd.run': MagicMock(return_value='')}): + self.assertListEqual(network.traceroute('gentoo.org'), []) with patch.object(salt.utils.network, 'sanitize_host', - return_value='A'): + return_value='gentoo.org'): with patch.dict(network.__salt__, {'cmd.run': - MagicMock(return_value="")}): - self.assertListEqual(network.traceroute('host'), []) + MagicMock(return_value='')}): + self.assertListEqual(network.traceroute('gentoo.org'), []) def test_dig(self): ''' From 0473683aceaba9a975fc28f72ff80e8ab12dea2d Mon Sep 17 00:00:00 2001 From: Benjamin Drung Date: Wed, 3 Apr 2019 14:50:12 +0200 Subject: [PATCH 077/340] Skip SampleConfTest if sample conf directories are missing The release tarball does not contain `conf/cloud.profiles.d`, `conf/cloud.providers.d`, and `conf/cloud.maps.d`. Therefore the test cases will fail: ``` ====================================================================== ERROR: test_conf_cloud_maps_d_files_are_commented (unit.test_config.SampleConfTest) [CPU:0.0%|MEM:53.9%] ---------------------------------------------------------------------- Traceback (most recent call last): File "tests/unit/test_config.py", line 236, in test_conf_cloud_maps_d_files_are_commented cloud_sample_files = os.listdir(SAMPLE_CONF_DIR + 'cloud.maps.d/') FileNotFoundError: [Errno 2] No such file or directory: 'conf/cloud.maps.d/' ====================================================================== ERROR: test_conf_cloud_profiles_d_files_are_commented (unit.test_config.SampleConfTest) [CPU:0.0%|MEM:53.9%] ---------------------------------------------------------------------- Traceback (most recent call last): File "tests/unit/test_config.py", line 200, in test_conf_cloud_profiles_d_files_are_commented cloud_sample_files = os.listdir(SAMPLE_CONF_DIR + 'cloud.profiles.d/') FileNotFoundError: [Errno 2] No such file or directory: 'conf/cloud.profiles.d/' ====================================================================== ERROR: test_conf_cloud_providers_d_files_are_commented (unit.test_config.SampleConfTest) [CPU:0.0%|MEM:53.9%] ---------------------------------------------------------------------- Traceback (most recent call last): File "tests/unit/test_config.py", line 218, in test_conf_cloud_providers_d_files_are_commented cloud_sample_files = os.listdir(SAMPLE_CONF_DIR + 'cloud.providers.d/') FileNotFoundError: [Errno 2] No such file or directory: 'conf/cloud.providers.d/' ``` Signed-off-by: Benjamin Drung --- tests/unit/test_config.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index 740e086cf7fb..69b8aefb7276 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -197,9 +197,12 @@ def test_conf_cloud_profiles_d_files_are_commented(self): commented out. This test loops through all of the files in that directory to check for any lines that are not commented or blank. ''' - cloud_sample_files = os.listdir(SAMPLE_CONF_DIR + 'cloud.profiles.d/') + cloud_sample_dir = SAMPLE_CONF_DIR + 'cloud.profiles.d/' + if not os.path.exists(cloud_sample_dir): + self.skipTest("Sample config directory '{}' is missing.".format(cloud_sample_dir)) + cloud_sample_files = os.listdir(cloud_sample_dir) for conf_file in cloud_sample_files: - profile_conf = SAMPLE_CONF_DIR + 'cloud.profiles.d/' + conf_file + profile_conf = cloud_sample_dir + conf_file ret = salt.config._read_conf_file(profile_conf) self.assertEqual( ret, @@ -215,9 +218,12 @@ def test_conf_cloud_providers_d_files_are_commented(self): commented out. This test loops through all of the files in that directory to check for any lines that are not commented or blank. ''' - cloud_sample_files = os.listdir(SAMPLE_CONF_DIR + 'cloud.providers.d/') + cloud_sample_dir = SAMPLE_CONF_DIR + 'cloud.providers.d/' + if not os.path.exists(cloud_sample_dir): + self.skipTest("Sample config directory '{}' is missing.".format(cloud_sample_dir)) + cloud_sample_files = os.listdir(cloud_sample_dir) for conf_file in cloud_sample_files: - provider_conf = SAMPLE_CONF_DIR + 'cloud.providers.d/' + conf_file + provider_conf = cloud_sample_dir + conf_file ret = salt.config._read_conf_file(provider_conf) self.assertEqual( ret, @@ -233,9 +239,12 @@ def test_conf_cloud_maps_d_files_are_commented(self): commented out. This test loops through all of the files in that directory to check for any lines that are not commented or blank. ''' - cloud_sample_files = os.listdir(SAMPLE_CONF_DIR + 'cloud.maps.d/') + cloud_sample_dir = SAMPLE_CONF_DIR + 'cloud.maps.d/' + if not os.path.exists(cloud_sample_dir): + self.skipTest("Sample config directory '{}' is missing.".format(cloud_sample_dir)) + cloud_sample_files = os.listdir(cloud_sample_dir) for conf_file in cloud_sample_files: - map_conf = SAMPLE_CONF_DIR + 'cloud.maps.d/' + conf_file + map_conf = cloud_sample_dir + conf_file ret = salt.config._read_conf_file(map_conf) self.assertEqual( ret, From e74f78fca666e730f48f24082e4824b67af7eb9f Mon Sep 17 00:00:00 2001 From: Benjamin Drung Date: Wed, 3 Apr 2019 15:04:20 +0200 Subject: [PATCH 078/340] Skip ExtendTestCase if templates directory is missing The release tarball does not contain the `templates` directory. Therefore `ExtendTestCase` will fail: ``` ====================================================================== ERROR: test_run (unit.utils.test_extend.ExtendTestCase) [CPU:0.0%|MEM:53.9%] ---------------------------------------------------------------------- Traceback (most recent call last): File "tests/unit/utils/test_extend.py", line 40, in test_run out = salt.utils.extend.run('test', 'test', 'this description', integration.CODE_DIR, False) File "salt/utils/extend.py", line 242, in run MODULE_OPTIONS = _fetch_templates(os.path.join(salt_dir, 'templates')) File "salt/utils/extend.py", line 76, in _fetch_templates for item in os.listdir(src): FileNotFoundError: [Errno 2] No such file or directory: ' templates' ``` Signed-off-by: Benjamin Drung --- tests/unit/utils/test_extend.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/unit/utils/test_extend.py b/tests/unit/utils/test_extend.py index 2cf90fcaaf95..9cbb767ca503 100644 --- a/tests/unit/utils/test_extend.py +++ b/tests/unit/utils/test_extend.py @@ -14,7 +14,7 @@ from datetime import date # Import Salt Testing libs -from tests.support.unit import TestCase +from tests.support.unit import TestCase, skipIf from tests.support.mock import MagicMock, patch # Import salt libs @@ -35,6 +35,8 @@ def tearDown(self): shutil.rmtree(self.out, True) os.chdir(self.starting_dir) + @skipIf(not os.path.exists(os.path.join(integration.CODE_DIR, 'templates')), + "Test template directory 'templates/' missing.") def test_run(self): with patch('sys.exit', MagicMock): out = salt.utils.extend.run('test', 'test', 'this description', integration.CODE_DIR, False) From 40100e59fce017fc22c71dc1c42ed1a67128ba41 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 27 Sep 2018 23:12:58 -0500 Subject: [PATCH 079/340] Replace "pchanges" with "changes" to fix onchanges/prereq requisites Since "pchanges" was never supported in the state compiler, and "changes" is what these reqs always used, replacing "pchanges" with "changes" will allow those requisites to work in test mode. --- doc/ref/states/writing.rst | 7 +- salt/states/archive.py | 28 ++-- salt/states/boto_cloudfront.py | 4 +- salt/states/boto_s3.py | 2 +- salt/states/boto_sqs.py | 6 +- salt/states/chocolatey.py | 31 ++--- salt/states/dvs.py | 48 ++++--- salt/states/esxdatacenter.py | 24 ++-- salt/states/esxi.py | 27 ++-- salt/states/file.py | 161 ++++++++++------------ salt/states/glance_image.py | 8 +- salt/states/kernelpkg.py | 3 +- salt/states/keystone_domain.py | 9 +- salt/states/keystone_endpoint.py | 5 +- salt/states/keystone_group.py | 7 +- salt/states/keystone_project.py | 7 +- salt/states/keystone_role.py | 4 +- salt/states/keystone_service.py | 5 +- salt/states/keystone_user.py | 7 +- salt/states/linux_acl.py | 7 +- salt/states/net_napalm_yang.py | 2 - salt/states/neutron_network.py | 7 +- salt/states/neutron_secgroup.py | 7 +- salt/states/neutron_secgroup_rule.py | 6 +- salt/states/neutron_subnet.py | 7 +- salt/states/pbm.py | 53 +++---- salt/states/snapper.py | 3 +- salt/states/solrcloud.py | 71 ++++------ salt/utils/napalm.py | 6 - salt/utils/state.py | 4 - tests/integration/modules/test_state.py | 6 +- tests/integration/states/test_file.py | 2 - tests/unit/states/test_boto_cloudfront.py | 4 +- tests/unit/states/test_boto_sqs.py | 6 +- tests/unit/states/test_esxdatacenter.py | 10 +- tests/unit/states/test_file.py | 90 +++++------- tests/unit/states/test_linux_acl.py | 33 ++--- tests/unit/utils/test_state.py | 50 ------- 38 files changed, 300 insertions(+), 467 deletions(-) diff --git a/doc/ref/states/writing.rst b/doc/ref/states/writing.rst index a2f4548844d3..ebc7e074c7fc 100644 --- a/doc/ref/states/writing.rst +++ b/doc/ref/states/writing.rst @@ -256,10 +256,6 @@ A State Module must return a dict containing the following keys/values: Prefer to keep line lengths short (use multiple lines as needed), and end with punctuation (e.g. a period) to delimit multiple comments. -The return data can also, include the **pchanges** key, this stands for -`predictive changes`. The **pchanges** key informs the State system what -changes are predicted to occur. - .. note:: States should not return data which cannot be serialized such as frozensets. @@ -445,7 +441,6 @@ Example state module 'changes': {}, 'result': False, 'comment': '', - 'pchanges': {}, } # Start with basic error-checking. Do all the passed parameters make sense @@ -466,7 +461,7 @@ Example state module # in ``test=true`` mode. if __opts__['test'] == True: ret['comment'] = 'The state of "{0}" will be changed.'.format(name) - ret['pchanges'] = { + ret['changes'] = { 'old': current_state, 'new': 'Description, diff, whatever of the new state', } diff --git a/salt/states/archive.py b/salt/states/archive.py index 4ca156cd0485..27c83fa27f0c 100644 --- a/salt/states/archive.py +++ b/salt/states/archive.py @@ -1431,25 +1431,19 @@ def extracted(name, dir_result = __states__['file.directory'](full_path, user=user, group=group, - recurse=recurse, - test=__opts__['test']) + recurse=recurse) log.debug('file.directory: %s', dir_result) - if __opts__['test']: - if dir_result.get('pchanges'): - ret['changes']['updated ownership'] = True - else: - try: - if dir_result['result']: - if dir_result['changes']: - ret['changes']['updated ownership'] = True - else: - enforce_failed.append(full_path) - except (KeyError, TypeError): - log.warning( - 'Bad state return %s for file.directory state on %s', - dir_result, dirname - ) + if dir_result.get('changes'): + ret['changes']['updated ownership'] = True + try: + if not dir_result['result']: + enforce_failed.append(full_path) + except (KeyError, TypeError): + log.warning( + 'Bad state return %s for file.directory state on %s', + dir_result, dirname + ) for filename in enforce_files + enforce_links: full_path = os.path.join(name, filename) diff --git a/salt/states/boto_cloudfront.py b/salt/states/boto_cloudfront.py index 27c6260e9d91..d29d3df23592 100644 --- a/salt/states/boto_cloudfront.py +++ b/salt/states/boto_cloudfront.py @@ -135,7 +135,7 @@ def present( if __opts__['test']: ret['result'] = None ret['comment'] = 'Distribution {0} set for creation.'.format(name) - ret['pchanges'] = {'old': None, 'new': name} + ret['changes'] = {'old': None, 'new': name} return ret res = __salt__['boto_cloudfront.create_distribution']( @@ -203,7 +203,7 @@ def _yaml_safe_dump(attrs): 'Distribution {0} set for new config:'.format(name), changes_diff, ]) - ret['pchanges'] = {'diff': changes_diff} + ret['changes'] = {'diff': changes_diff} return ret res = __salt__['boto_cloudfront.update_distribution']( diff --git a/salt/states/boto_s3.py b/salt/states/boto_s3.py index a75fe71afa1f..49e77510cf6d 100644 --- a/salt/states/boto_s3.py +++ b/salt/states/boto_s3.py @@ -282,7 +282,7 @@ def _yaml_safe_dump(attrs): ret['result'] = None ret['comment'] = 'S3 object {0} set to be {1}d.'.format(name, action) ret['comment'] += '\nChanges:\n{0}'.format(changes_diff) - ret['pchanges'] = {'diff': changes_diff} + ret['changes'] = {'diff': changes_diff} return ret r = __salt__['boto_s3.upload_file']( diff --git a/salt/states/boto_sqs.py b/salt/states/boto_sqs.py index 9f42dedf09ad..964c6e863ec8 100644 --- a/salt/states/boto_sqs.py +++ b/salt/states/boto_sqs.py @@ -136,7 +136,7 @@ def present( ret['comment'].append( 'SQS queue {0} is set to be created.'.format(name), ) - ret['pchanges'] = {'old': None, 'new': name} + ret['changes'] = {'old': None, 'new': name} return ret r = __salt__['boto_sqs.create']( @@ -225,7 +225,7 @@ def _yaml_safe_dump(attrs): attributes_diff, ) ) - ret['pchanges'] = {'attributes': {'diff': attributes_diff}} + ret['changes'] = {'attributes': {'diff': attributes_diff}} return ret r = __salt__['boto_sqs.set_attributes']( @@ -300,7 +300,7 @@ def absent( if __opts__['test']: ret['result'] = None ret['comment'] = 'SQS queue {0} is set to be removed.'.format(name) - ret['pchanges'] = {'old': name, 'new': None} + ret['changes'] = {'old': name, 'new': None} return ret r = __salt__['boto_sqs.delete']( diff --git a/salt/states/chocolatey.py b/salt/states/chocolatey.py index 5f2e6e9842be..021e9ac68b2d 100644 --- a/salt/states/chocolatey.py +++ b/salt/states/chocolatey.py @@ -336,7 +336,6 @@ def upgraded(name, ret = {'name': name, 'result': True, 'changes': {}, - 'pchanges': {}, 'comment': ''} # Get list of currently installed packages @@ -346,12 +345,10 @@ def upgraded(name, # Package not installed if name.lower() not in [package.lower() for package in pre_install.keys()]: if version: - ret['pchanges'] = { - name: 'Version {0} will be installed'.format(version) - } + ret['changes'][name] = 'Version {0} will be installed'.format(version) ret['comment'] = 'Install version {0}'.format(version) else: - ret['pchanges'] = {name: 'Latest version will be installed'} + ret['changes'][name] = 'Latest version will be installed' ret['comment'] = 'Install latest version' # Package installed @@ -378,8 +375,7 @@ def upgraded(name, oper="==", ver2=version): if force: - ret['pchanges'] = { - name: 'Version {0} will be reinstalled'.format(version)} + ret['changes'][name] = 'Version {0} will be reinstalled'.format(version) ret['comment'] = 'Reinstall {0} {1}'.format(full_name, version) else: ret['comment'] = '{0} {1} is already installed'.format( @@ -389,11 +385,9 @@ def upgraded(name, # If installed version is older than new version if salt.utils.versions.compare( ver1=installed_version, oper="<", ver2=version): - ret['pchanges'] = { - name: 'Version {0} will be upgraded to Version {1}'.format( - installed_version, version - ) - } + ret['changes'][name] = 'Version {0} will be upgraded to Version {1}'.format( + installed_version, version + ) ret['comment'] = 'Upgrade {0} {1} to {2}'.format( full_name, installed_version, version ) @@ -409,13 +403,13 @@ def upgraded(name, else: ret['comment'] = 'No version found to install' - # Return if `test=True` - if __opts__['test']: - ret['result'] = None + # Return if there are no changes to be made + if not ret['changes']: return ret - # Return if there are no changes to be made - if not ret['pchanges']: + # Return if running in test mode + if __opts__['test']: + ret['result'] = None return ret # Install the package @@ -439,6 +433,9 @@ def upgraded(name, # Get list of installed packages after 'chocolatey.install' post_install = __salt__['chocolatey.list'](local_only=True) + # Prior to this, ret['changes'] would have contained expected changes, + # replace them with the actual changes now that we have completed the + # installation. ret['changes'] = salt.utils.data.compare_dicts(pre_install, post_install) return ret diff --git a/salt/states/dvs.py b/salt/states/dvs.py index 421254a32753..1ff39cde00eb 100644 --- a/salt/states/dvs.py +++ b/salt/states/dvs.py @@ -401,13 +401,11 @@ def dvs_configured(name, dvs): ''.format(dvs_name, datacenter_name)), 'result': True}) else: - ret.update({'comment': '\n'.join(comments)}) - if __opts__['test']: - ret.update({'pchanges': changes, - 'result': None}) - else: - ret.update({'changes': changes, - 'result': True}) + ret.update({ + 'comment': '\n'.join(comments), + 'changes': changes, + 'result': None if __opts__['test'] else True, + }) return ret @@ -512,8 +510,10 @@ def portgroups_configured(name, dvs, portgroups): log.info('Running state {0} on DVS \'{1}\', datacenter ' '\'{2}\''.format(name, dvs, datacenter)) changes_required = False - ret = {'name': name, 'changes': {}, 'result': None, 'comment': None, - 'pchanges': {}} + ret = {'name': name, + 'changes': {}, + 'result': None, + 'comment': None} comments = [] changes = {} changes_required = False @@ -623,13 +623,11 @@ def portgroups_configured(name, dvs, portgroups): 'Nothing to be done.'.format(dvs, datacenter)), 'result': True}) else: - ret.update({'comment': '\n'.join(comments)}) - if __opts__['test']: - ret.update({'pchanges': changes, - 'result': None}) - else: - ret.update({'changes': changes, - 'result': True}) + ret.update({ + 'comment': '\n'.join(comments), + 'changes': changes, + 'result': None if __opts__['test'] else True, + }) return ret @@ -649,8 +647,10 @@ def uplink_portgroup_configured(name, dvs, uplink_portgroup): log.info('Running {0} on DVS \'{1}\', datacenter \'{2}\'' ''.format(name, dvs, datacenter)) changes_required = False - ret = {'name': name, 'changes': {}, 'result': None, 'comment': None, - 'pchanges': {}} + ret = {'name': name, + 'changes': {}, + 'result': None, + 'comment': None} comments = [] changes = {} changes_required = False @@ -708,11 +708,9 @@ def uplink_portgroup_configured(name, dvs, uplink_portgroup): 'Nothing to be done.'.format(dvs, datacenter)), 'result': True}) else: - ret.update({'comment': '\n'.join(comments)}) - if __opts__['test']: - ret.update({'pchanges': changes, - 'result': None}) - else: - ret.update({'changes': changes, - 'result': True}) + ret.update({ + 'comment': '\n'.join(comments), + 'changes': changes, + 'result': None if __opts__['test'] else True, + }) return ret diff --git a/salt/states/esxdatacenter.py b/salt/states/esxdatacenter.py index 09c69750ed6f..ae83b4d37174 100644 --- a/salt/states/esxdatacenter.py +++ b/salt/states/esxdatacenter.py @@ -89,11 +89,11 @@ def datacenter_configured(name): dc_name = name log.info('Running datacenter_configured for datacenter \'{0}\'' ''.format(dc_name)) - ret = {'name': name, 'changes': {}, 'pchanges': {}, - 'result': None, 'comment': 'Default'} + ret = {'name': name, + 'changes': {}, + 'result': None, + 'comment': 'Default'} comments = [] - changes = {} - pchanges = {} si = None try: si = __salt__['vsphere.get_service_instance_via_proxy']() @@ -103,27 +103,19 @@ def datacenter_configured(name): if __opts__['test']: comments.append('State will create ' 'datacenter \'{0}\'.'.format(dc_name)) - log.info(comments[-1]) - pchanges.update({'new': {'name': dc_name}}) else: log.debug('Creating datacenter \'{0}\'. '.format(dc_name)) __salt__['vsphere.create_datacenter'](dc_name, si) comments.append('Created datacenter \'{0}\'.'.format(dc_name)) - log.info(comments[-1]) - changes.update({'new': {'name': dc_name}}) + log.info(comments[-1]) + ret['changes'].update({'new': {'name': dc_name}}) else: comments.append('Datacenter \'{0}\' already exists. Nothing to be ' 'done.'.format(dc_name)) log.info(comments[-1]) __salt__['vsphere.disconnect'](si) - if __opts__['test'] and pchanges: - ret_status = None - else: - ret_status = True - ret.update({'result': ret_status, - 'comment': '\n'.join(comments), - 'changes': changes, - 'pchanges': pchanges}) + ret['comment'] = '\n'.join(comments) + ret['result'] = None if __opts__['test'] and ret['changes'] else True return ret except salt.exceptions.CommandExecutionError as exc: log.error('Error: {}'.format(exc)) diff --git a/salt/states/esxi.py b/salt/states/esxi.py index 486d9df53e79..8728224716dd 100644 --- a/salt/states/esxi.py +++ b/salt/states/esxi.py @@ -1070,8 +1070,10 @@ def diskgroups_configured(name, diskgroups, erase_disks=False): else proxy_details['esxi_host'] log.info('Running state {0} for host \'{1}\''.format(name, hostname)) # Variable used to return the result of the invocation - ret = {'name': name, 'result': None, 'changes': {}, - 'pchanges': {}, 'comments': None} + ret = {'name': name, + 'result': None, + 'changes': {}, + 'comments': None} # Signals if errors have been encountered errors = False # Signals if changes are required @@ -1294,12 +1296,8 @@ def diskgroups_configured(name, diskgroups, erase_disks=False): None if __opts__['test'] else # running in test mode False if errors else True) # found errors; defaults to True ret.update({'result': result, - 'comment': '\n'.join(comments)}) - if changes: - if __opts__['test']: - ret['pchanges'] = diskgroup_changes - elif changes: - ret['changes'] = diskgroup_changes + 'comment': '\n'.join(comments), + 'changes': diskgroup_changes}) return ret @@ -1387,8 +1385,10 @@ def host_cache_configured(name, enabled, datastore, swap_size='100%', else proxy_details['esxi_host'] log.trace('hostname = %s', hostname) log.info('Running host_cache_swap_configured for host \'%s\'', hostname) - ret = {'name': hostname, 'comment': 'Default comments', - 'result': None, 'changes': {}, 'pchanges': {}} + ret = {'name': hostname, + 'comment': 'Default comments', + 'result': None, + 'changes': {}} result = None if __opts__['test'] else True # We assume success needs_setting = False comments = [] @@ -1582,11 +1582,8 @@ def host_cache_configured(name, enabled, datastore, swap_size='100%', __salt__['vsphere.disconnect'](si) log.info(comments[-1]) ret.update({'comment': '\n'.join(comments), - 'result': result}) - if __opts__['test']: - ret['pchanges'] = changes - else: - ret['changes'] = changes + 'result': result, + 'changes': changes}) return ret except CommandExecutionError as err: log.error('Error: %s.', err) diff --git a/salt/states/file.py b/salt/states/file.py index f095ed8a2ee7..788edd576f6e 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -1024,36 +1024,36 @@ def _symlink_check(name, target, force, user, group, win_owner): ''' Check the symlink function ''' - pchanges = {} + changes = {} if not os.path.exists(name) and not __salt__['file.is_link'](name): - pchanges['new'] = name + changes['new'] = name return None, 'Symlink {0} to {1} is set for creation'.format( name, target - ), pchanges + ), changes if __salt__['file.is_link'](name): if __salt__['file.readlink'](name) != target: - pchanges['change'] = name + changes['change'] = name return None, 'Link {0} target is set to be changed to {1}'.format( name, target - ), pchanges + ), changes else: result = True msg = 'The symlink {0} is present'.format(name) if not _check_symlink_ownership(name, user, group, win_owner): result = None - pchanges['ownership'] = '{0}:{1}'.format(*_get_symlink_ownership(name)) + changes['ownership'] = '{0}:{1}'.format(*_get_symlink_ownership(name)) msg += ( ', but the ownership of the symlink would be changed ' 'from {2}:{3} to {0}:{1}' ).format(user, group, *_get_symlink_ownership(name)) - return result, msg, pchanges + return result, msg, changes else: if force: return None, ('The file or directory {0} is set for removal to ' 'make way for a new symlink targeting {1}' - .format(name, target)), pchanges + .format(name, target)), changes return False, ('File or directory exists where the symlink {0} ' - 'should be. Did you mean to use force?'.format(name)), pchanges + 'should be. Did you mean to use force?'.format(name)), changes def _test_owner(kwargs, user=None): @@ -1215,12 +1215,12 @@ def _shortcut_check(name, ''' Check the shortcut function ''' - pchanges = {} + changes = {} if not os.path.exists(name): - pchanges['new'] = name + changes['new'] = name return None, 'Shortcut "{0}" to "{1}" is set for creation'.format( name, target - ), pchanges + ), changes if os.path.isfile(name): with salt.utils.winapi.Com(): @@ -1241,28 +1241,28 @@ def _shortcut_check(name, ) if not all(state_checks): - pchanges['change'] = name + changes['change'] = name return None, 'Shortcut "{0}" target is set to be changed to "{1}"'.format( name, target - ), pchanges + ), changes else: result = True msg = 'The shortcut "{0}" is present'.format(name) if not _check_shortcut_ownership(name, user): result = None - pchanges['ownership'] = '{0}'.format(_get_shortcut_ownership(name)) + changes['ownership'] = '{0}'.format(_get_shortcut_ownership(name)) msg += ( ', but the ownership of the shortcut would be changed ' 'from {1} to {0}' ).format(user, _get_shortcut_ownership(name)) - return result, msg, pchanges + return result, msg, changes else: if force: return None, ('The link or directory "{0}" is set for removal to ' 'make way for a new shortcut targeting "{1}"' - .format(name, target)), pchanges + .format(name, target)), changes return False, ('Link or directory exists where the shortcut "{0}" ' - 'should be. Did you mean to use force?'.format(name)), pchanges + 'should be. Did you mean to use force?'.format(name)), changes def _makedirs(name, @@ -1489,12 +1489,12 @@ def symlink( msg += '.' return _error(ret, msg) - presult, pcomment, ret['pchanges'] = _symlink_check(name, - target, - force, - user, - group, - win_owner) + presult, pcomment, pchanges = _symlink_check(name, + target, + force, + user, + group, + win_owner) if not os.path.isdir(os.path.dirname(name)): if makedirs: @@ -1527,6 +1527,7 @@ def symlink( if __opts__['test']: ret['result'] = presult ret['comment'] = pcomment + ret['changes'] = pchanges return ret if __salt__['file.is_link'](name): @@ -1632,7 +1633,6 @@ def absent(name, ret = {'name': name, 'changes': {}, - 'pchanges': {}, 'result': True, 'comment': ''} if not name: @@ -1644,9 +1644,9 @@ def absent(name, if name == '/': return _error(ret, 'Refusing to make "/" absent') if os.path.isfile(name) or os.path.islink(name): - ret['pchanges']['removed'] = name if __opts__['test']: ret['result'] = None + ret['changes']['removed'] = name ret['comment'] = 'File {0} is set for removal'.format(name) return ret try: @@ -1661,9 +1661,9 @@ def absent(name, return _error(ret, '{0}'.format(exc)) elif os.path.isdir(name): - ret['pchanges']['removed'] = name if __opts__['test']: ret['result'] = None + ret['changes']['removed'] = name ret['comment'] = 'Directory {0} is set for removal'.format(name) return ret try: @@ -1699,7 +1699,6 @@ def exists(name, ret = {'name': name, 'changes': {}, - 'pchanges': {}, 'result': True, 'comment': ''} if not name: @@ -1724,7 +1723,6 @@ def missing(name, ret = {'name': name, 'changes': {}, - 'pchanges': {}, 'result': True, 'comment': ''} if not name: @@ -2316,7 +2314,6 @@ def managed(name, name = os.path.expanduser(name) ret = {'changes': {}, - 'pchanges': {}, 'comment': '', 'name': name, 'result': True} @@ -2563,7 +2560,7 @@ def managed(name, try: if __opts__['test']: if 'file.check_managed_changes' in __salt__: - ret['pchanges'] = __salt__['file.check_managed_changes']( + ret['changes'] = __salt__['file.check_managed_changes']( name, source, source_hash, @@ -2594,15 +2591,15 @@ def managed(name, reset=win_perms_reset) except CommandExecutionError as exc: if exc.strerror.startswith('Path not found'): - ret['pchanges'] = '{0} will be created'.format(name) + ret['changes'] = '{0} will be created'.format(name) - if isinstance(ret['pchanges'], tuple): - ret['result'], ret['comment'] = ret['pchanges'] - elif ret['pchanges']: + if isinstance(ret['changes'], tuple): + ret['result'], ret['comment'] = ret['changes'] + elif ret['changes']: ret['result'] = None ret['comment'] = 'The file {0} is set to be changed'.format(name) - if 'diff' in ret['pchanges'] and not show_changes: - ret['pchanges']['diff'] = '' + if 'diff' in ret['changes'] and not show_changes: + ret['changes']['diff'] = '' else: ret['result'] = True ret['comment'] = 'The file {0} is in the correct state'.format(name) @@ -3043,7 +3040,6 @@ def directory(name, name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, - 'pchanges': {}, 'result': True, 'comment': ''} if not name: @@ -3117,19 +3113,19 @@ def directory(name, # Remove whatever is in the way if os.path.isfile(name): if __opts__['test']: - ret['pchanges']['forced'] = 'File was forcibly replaced' + ret['changes']['forced'] = 'File would be forcibly replaced' else: os.remove(name) ret['changes']['forced'] = 'File was forcibly replaced' elif __salt__['file.is_link'](name): if __opts__['test']: - ret['pchanges']['forced'] = 'Symlink was forcibly replaced' + ret['changes']['forced'] = 'Symlink would be forcibly replaced' else: __salt__['file.remove'](name) ret['changes']['forced'] = 'Symlink was forcibly replaced' else: if __opts__['test']: - ret['pchanges']['forced'] = 'Directory was forcibly replaced' + ret['changes']['forced'] = 'Directory would be forcibly replaced' else: __salt__['file.remove'](name) ret['changes']['forced'] = 'Directory was forcibly replaced' @@ -3158,11 +3154,11 @@ def directory(name, require, exclude_pat, max_depth, follow_symlinks) if pchanges: - ret['pchanges'].update(pchanges) + ret['changes'].update(pchanges) # Don't run through the reset of the function if there are no changes to be # made - if not ret['pchanges'] or __opts__['test']: + if __opts__['test'] or not ret['changes']: ret['result'] = presult ret['comment'] = pcomment return ret @@ -3581,7 +3577,6 @@ def recurse(name, ret = { 'name': name, 'changes': {}, - 'pchanges': {}, 'result': True, 'comment': {} # { path: [comment, ...] } } @@ -3880,7 +3875,6 @@ def retention_schedule(name, retain, strptime_format=None, timezone=None): name = os.path.expanduser(name) ret = {'name': name, 'changes': {'retained': [], 'deleted': [], 'ignored': []}, - 'pchanges': {'retained': [], 'deleted': [], 'ignored': []}, 'result': True, 'comment': ''} if not name: @@ -3990,7 +3984,7 @@ def get_first_n_at_depth(fwt, depth, n): 'deleted': deletable_files, 'ignored': sorted(list(ignored_files), reverse=True), } - ret['pchanges'] = changes + ret['changes'] = changes # TODO: track and report how much space was / would be reclaimed if __opts__['test']: @@ -4131,7 +4125,6 @@ def line(name, content=None, match=None, mode=None, location=None, name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, - 'pchanges': {}, 'result': True, 'comment': ''} if not name: @@ -4165,14 +4158,13 @@ def line(name, content=None, match=None, mode=None, location=None, before=before, after=after, show_changes=show_changes, backup=backup, quiet=quiet, indent=indent) if changes: - ret['pchanges']['diff'] = changes + ret['changes']['diff'] = changes if __opts__['test']: ret['result'] = None - ret['comment'] = 'Changes would be made:\ndiff:\n{0}'.format(changes) + ret['comment'] = 'Changes would be made' else: ret['result'] = True ret['comment'] = 'Changes were made' - ret['changes'] = {'diff': changes} else: ret['result'] = True ret['comment'] = 'No changes needed to be made' @@ -4322,7 +4314,6 @@ def replace(name, ret = {'name': name, 'changes': {}, - 'pchanges': {}, 'result': True, 'comment': ''} if not name: @@ -4352,14 +4343,13 @@ def replace(name, backslash_literal=backslash_literal) if changes: - ret['pchanges']['diff'] = changes + ret['changes']['diff'] = changes if __opts__['test']: ret['result'] = None - ret['comment'] = 'Changes would have been made:\ndiff:\n{0}'.format(changes) + ret['comment'] = 'Changes would have been made' else: ret['result'] = True ret['comment'] = 'Changes were made' - ret['changes'] = {'diff': changes} else: ret['result'] = True ret['comment'] = 'No changes needed to be made' @@ -4581,7 +4571,6 @@ def blockreplace( ret = {'name': name, 'changes': {}, - 'pchanges': {}, 'result': False, 'comment': ''} if not name: @@ -4654,13 +4643,11 @@ def blockreplace( return ret if changes: - ret['pchanges'] = {'diff': changes} + ret['changes']['diff'] = changes if __opts__['test']: - ret['changes']['diff'] = ret['pchanges']['diff'] ret['result'] = None ret['comment'] = 'Changes would be made' else: - ret['changes']['diff'] = ret['pchanges']['diff'] ret['result'] = True ret['comment'] = 'Changes were made' else: @@ -4711,7 +4698,6 @@ def comment(name, regex, char='#', backup='.bak'): ret = {'name': name, 'changes': {}, - 'pchanges': {}, 'result': False, 'comment': ''} if not name: @@ -4741,8 +4727,8 @@ def comment(name, regex, char='#', backup='.bak'): else: return _error(ret, '{0}: Pattern not found'.format(unanchor_regex)) - ret['pchanges'][name] = 'updated' if __opts__['test']: + ret['changes'][name] = 'updated' ret['comment'] = 'File {0} is set to be updated'.format(name) ret['result'] = None return ret @@ -4821,7 +4807,6 @@ def uncomment(name, regex, char='#', backup='.bak'): ret = {'name': name, 'changes': {}, - 'pchanges': {}, 'result': False, 'comment': ''} if not name: @@ -4848,26 +4833,20 @@ def uncomment(name, regex, char='#', backup='.bak'): else: return _error(ret, '{0}: Pattern not found'.format(regex)) - ret['pchanges'][name] = 'updated' if __opts__['test']: + ret['changes'][name] = 'updated' ret['comment'] = 'File {0} is set to be updated'.format(name) ret['result'] = None return ret with salt.utils.files.fopen(name, 'rb') as fp_: - slines = fp_.read() - if six.PY3: - slines = slines.decode(__salt_system_encoding__) - slines = slines.splitlines(True) + slines = salt.utils.data.decode(fp_.readlines()) # Perform the edit __salt__['file.comment_line'](name, regex, char, False, backup) with salt.utils.files.fopen(name, 'rb') as fp_: - nlines = fp_.read() - if six.PY3: - nlines = nlines.decode(__salt_system_encoding__) - nlines = nlines.splitlines(True) + nlines = salt.utils.data.decode(fp_.readlines()) # Check the result ret['result'] = __salt__['file.search']( @@ -5031,10 +5010,9 @@ def append(name, .. versionadded:: 0.9.5 ''' ret = {'name': name, - 'changes': {}, - 'pchanges': {}, - 'result': False, - 'comment': ''} + 'changes': {}, + 'result': False, + 'comment': ''} if not name: return _error(ret, 'Must provide name to file.append') @@ -5069,12 +5047,12 @@ def append(name, except CommandExecutionError as exc: return _error(ret, 'Drive {0} is not mapped'.format(exc.message)) - if salt.utils.platform.is_windows(): - check_res, check_msg, ret['pchanges'] = _check_directory_win(dirname) - else: - check_res, check_msg, ret['pchanges'] = _check_directory(dirname) + check_res, check_msg, check_changes = _check_directory_win(dirname) \ + if salt.utils.platform.is_windows() \ + else _check_directory(dirname) if not check_res: + ret['changes'] = check_changes return _error(ret, check_msg) check_res, check_msg = _check_file(name) @@ -5323,7 +5301,6 @@ def prepend(name, ret = {'name': name, 'changes': {}, - 'pchanges': {}, 'result': False, 'comment': ''} if not name: @@ -5353,11 +5330,12 @@ def prepend(name, except CommandExecutionError as exc: return _error(ret, 'Drive {0} is not mapped'.format(exc.message)) - if salt.utils.platform.is_windows(): - check_res, check_msg, ret['pchanges'] = _check_directory_win(dirname) - else: - check_res, check_msg, ret['pchanges'] = _check_directory(dirname) + check_res, check_msg, check_changes = _check_directory_win(dirname) \ + if salt.utils.platform.is_windows() \ + else _check_directory(dirname) + if not check_res: + ret['changes'] = check_changes return _error(ret, check_msg) check_res, check_msg = _check_file(name) @@ -6806,17 +6784,18 @@ def shortcut( msg += '.' return _error(ret, msg) - presult, pcomment, ret['pchanges'] = _shortcut_check(name, - target, - arguments, - working_dir, - description, - icon_location, - force, - user) + presult, pcomment, pchanges = _shortcut_check(name, + target, + arguments, + working_dir, + description, + icon_location, + force, + user) if __opts__['test']: ret['result'] = presult ret['comment'] = pcomment + ret['changes'] = pchanges return ret if not os.path.isdir(os.path.dirname(name)): diff --git a/salt/states/glance_image.py b/salt/states/glance_image.py index aff285a48d71..d9d9e971c3c2 100644 --- a/salt/states/glance_image.py +++ b/salt/states/glance_image.py @@ -52,15 +52,16 @@ def present(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['glanceng.setup_clouds'](auth) image = __salt__['glanceng.image_get'](name=name) if not image: - if __opts__['test'] is True: + if __opts__['test']: ret['result'] = None ret['changes'] = kwargs - ret['pchanges'] = ret['changes'] ret['comment'] = 'Image {} will be created.'.format(name) return ret @@ -91,10 +92,9 @@ def absent(name, auth=None): image = __salt__['glanceng.image_get'](name=name) if image: - if __opts__['test'] is True: + if __opts__['test']: ret['result'] = None ret['changes'] = {'name': name} - ret['pchanges'] = ret['changes'] ret['comment'] = 'Image {} will be deleted.'.format(name) return ret diff --git a/salt/states/kernelpkg.py b/salt/states/kernelpkg.py index 6d4fd56357c2..7ed558cd388e 100644 --- a/salt/states/kernelpkg.py +++ b/salt/states/kernelpkg.py @@ -144,8 +144,7 @@ def latest_active(name, at_time=None, **kwargs): # pylint: disable=unused-argum if __opts__['test']: ret['result'] = None - ret['changes'] = {} - ret['pchanges'] = {'kernel': { + ret['changes'] = {'kernel': { 'old': active, 'new': latest }} diff --git a/salt/states/keystone_domain.py b/salt/states/keystone_domain.py index 27d98657e700..095a181cc037 100644 --- a/salt/states/keystone_domain.py +++ b/salt/states/keystone_domain.py @@ -56,15 +56,16 @@ def present(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['keystoneng.setup_clouds'](auth) domain = __salt__['keystoneng.domain_get'](name=name) if not domain: - if __opts__['test'] is True: + if __opts__['test']: ret['result'] = None ret['changes'] = kwargs - ret['pchanges'] = ret['changes'] ret['comment'] = 'Domain {} will be created.'.format(name) return ret @@ -76,10 +77,9 @@ def present(name, auth=None, **kwargs): changes = __salt__['keystoneng.compare_changes'](domain, **kwargs) if changes: - if __opts__['test'] is True: + if __opts__['test']: ret['result'] = None ret['changes'] = changes - ret['pchanges'] = ret['changes'] ret['comment'] = 'Domain {} will be updated.'.format(name) return ret @@ -111,7 +111,6 @@ def absent(name, auth=None): if __opts__['test'] is True: ret['result'] = None ret['changes'] = {'name': name} - ret['pchanges'] = ret['changes'] ret['comment'] = 'Domain {} will be deleted.'.format(name) return ret diff --git a/salt/states/keystone_endpoint.py b/salt/states/keystone_endpoint.py index fb6151519d31..7b19913572a9 100644 --- a/salt/states/keystone_endpoint.py +++ b/salt/states/keystone_endpoint.py @@ -101,6 +101,8 @@ def present(name, service_name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['keystoneng.setup_clouds'](auth) success, val = _, endpoint = _common(ret, name, service_name, kwargs) @@ -111,7 +113,6 @@ def present(name, service_name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = kwargs - ret['pchanges'] = ret['changes'] ret['comment'] = 'Endpoint will be created.' return ret @@ -131,7 +132,6 @@ def present(name, service_name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = changes - ret['pchanges'] = ret['changes'] ret['comment'] = 'Endpoint will be updated.' return ret @@ -174,7 +174,6 @@ def absent(name, service_name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = {'id': endpoint.id} - ret['pchanges'] = ret['changes'] ret['comment'] = 'Endpoint will be deleted.' return ret diff --git a/salt/states/keystone_group.py b/salt/states/keystone_group.py index cf636e40d341..cfd4af02c0aa 100644 --- a/salt/states/keystone_group.py +++ b/salt/states/keystone_group.py @@ -73,6 +73,8 @@ def present(name, auth=None, **kwargs): __salt__['keystoneng.setup_cloud'](auth) + kwargs = __utils__['args.clean_kwargs'](**kwargs) + kwargs['name'] = name group = _common(kwargs) @@ -80,7 +82,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = kwargs - ret['pchanges'] = ret['changes'] ret['comment'] = 'Group will be created.' return ret @@ -94,7 +95,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = changes - ret['pchanges'] = ret['changes'] ret['comment'] = 'Group will be updated.' return ret @@ -120,6 +120,8 @@ def absent(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['keystoneng.setup_cloud'](auth) kwargs['name'] = name @@ -129,7 +131,6 @@ def absent(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = {'id': group.id} - ret['pchanges'] = ret['changes'] ret['comment'] = 'Group will be deleted.' return ret diff --git a/salt/states/keystone_project.py b/salt/states/keystone_project.py index 94a6cc52acec..bb9327b5db0b 100644 --- a/salt/states/keystone_project.py +++ b/salt/states/keystone_project.py @@ -72,6 +72,8 @@ def present(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['keystoneng.setup_clouds'](auth) kwargs['name'] = name @@ -81,7 +83,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = kwargs - ret['pchanges'] = ret['changes'] ret['comment'] = 'Project will be created.' return ret @@ -95,7 +96,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = changes - ret['pchanges'] = ret['changes'] ret['comment'] = 'Project will be updated.' return ret @@ -121,6 +121,8 @@ def absent(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['keystoneng.setup_clouds'](auth) kwargs['name'] = name @@ -130,7 +132,6 @@ def absent(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = {'id': project.id} - ret['pchanges'] = ret['changes'] ret['comment'] = 'Project will be deleted.' return ret diff --git a/salt/states/keystone_role.py b/salt/states/keystone_role.py index 394a51cfb7e3..d90d45f0a2bc 100644 --- a/salt/states/keystone_role.py +++ b/salt/states/keystone_role.py @@ -52,6 +52,8 @@ def present(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['keystoneng.setup_clouds'](auth) kwargs['name'] = name @@ -61,7 +63,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = kwargs - ret['pchanges'] = ret['changes'] ret['comment'] = 'Role will be created.' return ret @@ -95,7 +96,6 @@ def absent(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = {'id': role.id} - ret['pchanges'] = ret['changes'] ret['comment'] = 'Role will be deleted.' return ret diff --git a/salt/states/keystone_service.py b/salt/states/keystone_service.py index ac62b5958469..faca6d623573 100644 --- a/salt/states/keystone_service.py +++ b/salt/states/keystone_service.py @@ -61,6 +61,8 @@ def present(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['keystoneng.setup_clouds'](auth) service = __salt__['keystoneng.service_get'](name=name) @@ -69,7 +71,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = kwargs - ret['pchanges'] = ret['changes'] ret['comment'] = 'Service will be created.' return ret @@ -84,7 +85,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = changes - ret['pchanges'] = ret['changes'] ret['comment'] = 'Service will be updated.' return ret @@ -117,7 +117,6 @@ def absent(name, auth=None): if __opts__['test'] is True: ret['result'] = None ret['changes'] = {'id': service.id} - ret['pchanges'] = ret['changes'] ret['comment'] = 'Service will be deleted.' return ret diff --git a/salt/states/keystone_user.py b/salt/states/keystone_user.py index 23f95fd260fa..a1bfd8d85ec1 100644 --- a/salt/states/keystone_user.py +++ b/salt/states/keystone_user.py @@ -83,6 +83,8 @@ def present(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['keystoneng.setup_clouds'](auth) kwargs['name'] = name @@ -92,7 +94,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = kwargs - ret['pchanges'] = ret['changes'] ret['comment'] = 'User will be created.' return ret @@ -106,7 +107,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = changes - ret['pchanges'] = ret['changes'] ret['comment'] = 'User will be updated.' return ret @@ -133,6 +133,8 @@ def absent(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['keystoneng.setup_clouds'](auth) kwargs['name'] = name @@ -142,7 +144,6 @@ def absent(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = {'id': user.id} - ret['pchanges'] = ret['changes'] ret['comment'] = 'User will be deleted.' return ret diff --git a/salt/states/linux_acl.py b/salt/states/linux_acl.py index f7408514b75d..b705a6b580e7 100644 --- a/salt/states/linux_acl.py +++ b/salt/states/linux_acl.py @@ -59,7 +59,6 @@ def present(name, acl_type, acl_name='', perms='', recurse=False): ret = {'name': name, 'result': True, 'changes': {}, - 'pchanges': {}, 'comment': ''} _octal = {'r': 4, 'w': 2, 'x': 1, '-': 0} @@ -132,7 +131,7 @@ def present(name, acl_type, acl_name='', perms='', recurse=False): acl_name, new_perms, perms), - 'result': None, 'pchanges': changes}) + 'result': None, 'changes': changes}) return ret try: __salt__['acl.modfacl'](acl_type, acl_name, perms, name, @@ -151,8 +150,8 @@ def present(name, acl_type, acl_name='', perms='', recurse=False): if __opts__['test']: ret.update({'comment': 'New permissions will be applied for ' - '{0}: {1}'.format(acl_name, perms), - 'result': None, 'pchanges': changes}) + '{0}: {1}'.format(acl_name, perms), + 'result': None, 'changes': changes}) ret['result'] = None return ret diff --git a/salt/states/net_napalm_yang.py b/salt/states/net_napalm_yang.py index fc7a0633ad17..8b9726786f53 100644 --- a/salt/states/net_napalm_yang.py +++ b/salt/states/net_napalm_yang.py @@ -94,8 +94,6 @@ def managed(name, compliance_report: ``False`` Return the compliance report in the comment. - The compliance report structured object can be found however - in the ``pchanges`` field of the output (not displayed on the CLI). .. versionadded:: 2017.7.3 diff --git a/salt/states/neutron_network.py b/salt/states/neutron_network.py index e9f2b8a0d053..191207e8260b 100644 --- a/salt/states/neutron_network.py +++ b/salt/states/neutron_network.py @@ -72,6 +72,8 @@ def present(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['neutronng.setup_clouds'](auth) kwargs['name'] = name @@ -81,7 +83,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = kwargs - ret['pchanges'] = ret['changes'] ret['comment'] = 'Network will be created.' return ret @@ -115,7 +116,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = changes - ret['pchanges'] = ret['changes'] ret['comment'] = 'Project will be updated.' return ret @@ -140,6 +140,8 @@ def absent(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['neutronng.setup_clouds'](auth) kwargs['name'] = name @@ -149,7 +151,6 @@ def absent(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = {'id': network.id} - ret['pchanges'] = ret['changes'] ret['comment'] = 'Network will be deleted.' return ret diff --git a/salt/states/neutron_secgroup.py b/salt/states/neutron_secgroup.py index 7859ac60df76..1a62ecd67112 100644 --- a/salt/states/neutron_secgroup.py +++ b/salt/states/neutron_secgroup.py @@ -74,6 +74,8 @@ def present(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['neutronng.setup_clouds'](auth) if 'project_name' in kwargs: @@ -95,7 +97,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = kwargs - ret['pchanges'] = ret['changes'] ret['comment'] = 'Security Group will be created.' return ret @@ -109,7 +110,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = changes - ret['pchanges'] = ret['changes'] ret['comment'] = 'Security Group will be updated.' return ret @@ -133,6 +133,8 @@ def absent(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['neutronng.setup_clouds'](auth) kwargs['project_id'] = __salt__['keystoneng.project_get']( @@ -147,7 +149,6 @@ def absent(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = {'id': secgroup.id} - ret['pchanges'] = ret['changes'] ret['comment'] = 'Security group will be deleted.' return ret diff --git a/salt/states/neutron_secgroup_rule.py b/salt/states/neutron_secgroup_rule.py index 888969e90d5d..ccc6f2f064ff 100644 --- a/salt/states/neutron_secgroup_rule.py +++ b/salt/states/neutron_secgroup_rule.py @@ -77,6 +77,8 @@ def present(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['neutronng.setup_clouds'](auth) if 'project_name' in kwargs: @@ -112,7 +114,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = kwargs - ret['pchanges'] = ret['changes'] ret['comment'] = 'Security Group rule will be created.' return ret @@ -166,10 +167,9 @@ def absent(name, auth=None, **kwargs): rule_exists = True if rule_exists: - if __opts__['test'] is True: + if __opts__['test']: ret['result'] = None ret['changes'] = {'id': kwargs['rule_id']} - ret['pchanges'] = ret['changes'] ret['comment'] = 'Security group rule will be deleted.' return ret diff --git a/salt/states/neutron_subnet.py b/salt/states/neutron_subnet.py index 43e4ab3ccf80..58219019eea3 100644 --- a/salt/states/neutron_subnet.py +++ b/salt/states/neutron_subnet.py @@ -96,16 +96,17 @@ def present(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['neutronng.setup_clouds'](auth) kwargs['subnet_name'] = name subnet = __salt__['neutronng.subnet_get'](name=name) if subnet is None: - if __opts__['test'] is True: + if __opts__['test']: ret['result'] = None ret['changes'] = kwargs - ret['pchanges'] = ret['changes'] ret['comment'] = 'Subnet will be created.' return ret @@ -119,7 +120,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = changes - ret['pchanges'] = ret['changes'] ret['comment'] = 'Project will be updated.' return ret @@ -160,7 +160,6 @@ def absent(name, auth=None): if __opts__['test'] is True: ret['result'] = None ret['changes'] = {'id': subnet.id} - ret['pchanges'] = ret['changes'] ret['comment'] = 'Project will be deleted.' return ret diff --git a/salt/states/pbm.py b/salt/states/pbm.py index 00945fc65cf6..836c95b807da 100644 --- a/salt/states/pbm.py +++ b/salt/states/pbm.py @@ -156,8 +156,10 @@ def default_vsan_policy_configured(name, policy): '\'{1}\''.format(name, vcenter)) log.trace('policy = {0}'.format(policy)) changes_required = False - ret = {'name': name, 'changes': {}, 'result': None, 'comment': None, - 'pchanges': {}} + ret = {'name': name, + 'changes': {}, + 'result': None, + 'comment': None} comments = [] changes = {} changes_required = False @@ -266,13 +268,11 @@ def default_vsan_policy_configured(name, policy): 'Nothing to be done.'.format(vcenter)), 'result': True}) else: - ret.update({'comment': '\n'.join(comments)}) - if __opts__['test']: - ret.update({'pchanges': changes, - 'result': None}) - else: - ret.update({'changes': changes, - 'result': True}) + ret.update({ + 'comment': '\n'.join(comments), + 'changes': changes, + 'result': None if __opts__['test'] else True, + }) return ret @@ -286,8 +286,10 @@ def storage_policies_configured(name, policies): comments = [] changes = [] changes_required = False - ret = {'name': name, 'changes': {}, 'result': None, 'comment': None, - 'pchanges': {}} + ret = {'name': name, + 'changes': {}, + 'result': None, + 'comment': None} log.trace('policies = {0}'.format(policies)) si = None try: @@ -430,13 +432,11 @@ def storage_policies_configured(name, policies): 'Nothing to be done.'.format(vcenter)), 'result': True}) else: - ret.update({'comment': '\n'.join(comments)}) - if __opts__['test']: - ret.update({'pchanges': {'storage_policies': changes}, - 'result': None}) - else: - ret.update({'changes': {'storage_policies': changes}, - 'result': True}) + ret.update({ + 'comment': '\n'.join(comments), + 'changes': {'storage_policies': changes}, + 'result': None if __opts__['test'] else True, + }) return ret @@ -454,8 +454,10 @@ def default_storage_policy_assigned(name, policy, datastore): ''.format(name, policy, datastore)) changes = {} changes_required = False - ret = {'name': name, 'changes': {}, 'result': None, 'comment': None, - 'pchanges': {}} + ret = {'name': name, + 'changes': {}, + 'result': None, + 'comment': None} si = None try: si = __salt__['vsphere.get_service_instance_via_proxy']() @@ -488,14 +490,13 @@ def default_storage_policy_assigned(name, policy, datastore): ret.update({'comment': exc.strerror, 'result': False if not __opts__['test'] else None}) return ret + ret['comment'] = comment if changes_required: - if __opts__['test']: - ret.update({'result': None, - 'pchanges': changes}) - else: - ret.update({'result': True, - 'changes': changes}) + ret.update({ + 'changes': changes, + 'result': None if __opts__['test'] else True, + }) else: ret['result'] = True return ret diff --git a/salt/states/snapper.py b/salt/states/snapper.py index 0b8eea53964f..c49b11416228 100644 --- a/salt/states/snapper.py +++ b/salt/states/snapper.py @@ -199,8 +199,7 @@ def baseline_snapshot(name, number=None, tag=None, include_diff=True, config='ro filename=file).get(file, {})) if __opts__['test'] and status: - ret['pchanges'] = status - ret['changes'] = ret['pchanges'] + ret['changes'] = status ret['comment'] = "{0} files changes are set to be undone".format(len(status.keys())) ret['result'] = None elif __opts__['test'] and not status: diff --git a/salt/states/solrcloud.py b/salt/states/solrcloud.py index 3a00b85715b5..4079be7a6a5e 100644 --- a/salt/states/solrcloud.py +++ b/salt/states/solrcloud.py @@ -34,10 +34,9 @@ def alias(name, collections, **kwargs): 'changes': {}, 'result': False, 'comment': '', - 'pchanges': {}, } - if __salt__["solrcloud.alias_exists"](name, **kwargs): + if __salt__['solrcloud.alias_exists'](name, **kwargs): alias_content = __salt__['solrcloud.alias_get_collections'](name, **kwargs) diff = set(alias_content).difference(set(collections)) @@ -48,38 +47,31 @@ def alias(name, collections, **kwargs): if __opts__['test']: ret['comment'] = 'The alias "{0}" will be updated.'.format(name) - ret['pchanges'] = { - 'old': ",".join(alias_content), - 'new': ",".join(collections) - } ret['result'] = None else: - __salt__["solrcloud.alias_set_collections"](name, collections, **kwargs) + __salt__['solrcloud.alias_set_collections'](name, collections, **kwargs) ret['comment'] = 'The alias "{0}" has been updated.'.format(name) - ret['changes'] = { - 'old': ",".join(alias_content), - 'new': ",".join(collections) - } - ret['result'] = True + + ret['changes'] = { + 'old': ','.join(alias_content), + 'new': ','.join(collections), + } + else: if __opts__['test']: ret['comment'] = 'The alias "{0}" will be created.'.format(name) - ret['pchanges'] = { - 'old': None, - 'new': ",".join(collections) - } ret['result'] = None else: - __salt__["solrcloud.alias_set_collections"](name, collections, **kwargs) + __salt__['solrcloud.alias_set_collections'](name, collections, **kwargs) ret['comment'] = 'The alias "{0}" has been created.'.format(name) - ret['changes'] = { - 'old': None, - 'new': ",".join(collections) - } - ret['result'] = True + ret['changes'] = { + 'old': None, + 'new': ','.join(collections), + } + return ret @@ -101,7 +93,6 @@ def collection(name, options=None, **kwargs): 'changes': {}, 'result': False, 'comment': '', - 'pchanges': {}, } if options is None: @@ -137,42 +128,32 @@ def collection(name, options=None, **kwargs): if __opts__['test']: ret['comment'] = 'Collection options "{0}" will be changed.'.format(name) - ret['pchanges'] = { - 'old': salt.utils.json.dumps(current_options, sort_keys=True, indent=4, separators=(',', ': ')), - 'new': salt.utils.json.dumps(options, sort_keys=True, indent=4, separators=(',', ': ')) - } ret['result'] = None - - return ret else: - __salt__["solrcloud.collection_set_options"](name, diff, **kwargs) - + __salt__['solrcloud.collection_set_options'](name, diff, **kwargs) ret['comment'] = 'Parameters were updated for collection "{0}".'.format(name) ret['result'] = True - ret['changes'] = { - 'old': salt.utils.json.dumps(current_options, sort_keys=True, indent=4, separators=(',', ': ')), - 'new': salt.utils.json.dumps(options, sort_keys=True, indent=4, separators=(',', ': ')) - } - return ret + ret['changes'] = { + 'old': salt.utils.json.dumps(current_options, sort_keys=True, indent=4, separators=(',', ': ')), + 'new': salt.utils.json.dumps(options, sort_keys=True, indent=4, separators=(',', ': ')) + } + return ret else: + new_changes = salt.utils.json.dumps(options, sort_keys=True, indent=4, separators=(',', ': ')) if __opts__['test']: ret['comment'] = 'The collection "{0}" will be created.'.format(name) - ret['pchanges'] = { - 'old': None, - 'new': str('options=') + new_changes # future lint: disable=blacklisted-function - } ret['result'] = None else: __salt__["solrcloud.collection_create"](name, options, **kwargs) ret['comment'] = 'The collection "{0}" has been created.'.format(name) - ret['changes'] = { - 'old': None, - 'new': str('options=') + new_changes # future lint: disable=blacklisted-function - } - ret['result'] = True + ret['changes'] = { + 'old': None, + 'new': str('options=') + new_changes # future lint: disable=blacklisted-function + } + return ret diff --git a/salt/utils/napalm.py b/salt/utils/napalm.py index 43536fc9a3e5..748945bf6b1c 100644 --- a/salt/utils/napalm.py +++ b/salt/utils/napalm.py @@ -437,7 +437,6 @@ def default_ret(name): ''' ret = { 'name': name, - 'pchanges': {}, 'changes': {}, 'result': False, 'comment': '' @@ -455,19 +454,14 @@ def loaded_ret(ret, loaded, test, debug, compliance_report=False, opts=None): ''' # Always get the comment changes = {} - pchanges = {} ret['comment'] = loaded['comment'] if 'diff' in loaded: changes['diff'] = loaded['diff'] - pchanges['diff'] = loaded['diff'] if 'compliance_report' in loaded: if compliance_report: changes['compliance_report'] = loaded['compliance_report'] - pchanges['compliance_report'] = loaded['compliance_report'] if debug and 'loaded_config' in loaded: changes['loaded_config'] = loaded['loaded_config'] - pchanges['loaded_config'] = loaded['loaded_config'] - ret['pchanges'] = pchanges if changes.get('diff'): ret['comment'] = '{comment_base}\n\nConfiguration diff:\n\n{diff}'.format(comment_base=ret['comment'], diff=changes['diff']) diff --git a/salt/utils/state.py b/salt/utils/state.py index b90f36beaac4..371f393a4aca 100644 --- a/salt/utils/state.py +++ b/salt/utils/state.py @@ -212,10 +212,6 @@ def state_func(name, config, alarm=None): original_return.setdefault('changes', {}) original_return['changes'][subkey] = sub_return['changes'] - if sub_return.get('pchanges'): # pchanges may or may not exist - original_return.setdefault('pchanges', {}) - original_return['pchanges'][subkey] = sub_return['pchanges'] - return original_return diff --git a/tests/integration/modules/test_state.py b/tests/integration/modules/test_state.py index 730c33c8dbdb..bc5bf80907ea 100644 --- a/tests/integration/modules/test_state.py +++ b/tests/integration/modules/test_state.py @@ -83,9 +83,7 @@ def _reline(path, ending=DEFAULT_ENDING): fhw.write(line + ending) destpath = os.path.join(BASE_FILES, 'testappend', 'firstif') - _reline(destpath) destpath = os.path.join(BASE_FILES, 'testappend', 'secondif') - _reline(destpath) def test_show_highstate(self): ''' @@ -1904,7 +1902,7 @@ def test_state_sls_id_test(self): for key, val in ret.items(): self.assertEqual(val['comment'], comment) - self.assertEqual(val['changes'], {}) + self.assertEqual(val['changes'], {'newfile': testfile}) def test_state_sls_id_test_state_test_post_run(self): ''' @@ -1937,7 +1935,7 @@ def test_state_sls_id_test_true(self): self.assertEqual( val['comment'], 'The file {0} is set to be changed'.format(file_name)) - self.assertEqual(val['changes'], {}) + self.assertEqual(val['changes'], {'newfile': file_name}) def test_state_sls_id_test_true_post_run(self): ''' diff --git a/tests/integration/states/test_file.py b/tests/integration/states/test_file.py index 34df82cf9dbe..808598a4aea2 100644 --- a/tests/integration/states/test_file.py +++ b/tests/integration/states/test_file.py @@ -690,7 +690,6 @@ def test_managed_source_hash_indifferent_case(self): source_hash=uppercase_hash ) assert ret[state_name]['result'] is True - assert ret[state_name]['pchanges'] == {} assert ret[state_name]['changes'] == {} # Test uppercase source_hash using test=true @@ -703,7 +702,6 @@ def test_managed_source_hash_indifferent_case(self): test=True ) assert ret[state_name]['result'] is True - assert ret[state_name]['pchanges'] == {} assert ret[state_name]['changes'] == {} finally: diff --git a/tests/unit/states/test_boto_cloudfront.py b/tests/unit/states/test_boto_cloudfront.py index e6179e2de748..25f26d561136 100644 --- a/tests/unit/states/test_boto_cloudfront.py +++ b/tests/unit/states/test_boto_cloudfront.py @@ -91,7 +91,7 @@ def test_present_from_scratch(self): self.base_ret_with({ 'result': None, 'comment': comment, - 'pchanges': {'old': None, 'new': self.name}, + 'changes': {'old': None, 'new': self.name}, }), ) @@ -191,7 +191,7 @@ def test_present_update_config_and_tags(self): self.base_ret_with({ 'result': None, 'comment': '\n'.join([header, diff]), - 'pchanges': {'diff': diff}, + 'changes': {'diff': diff}, }), ) diff --git a/tests/unit/states/test_boto_sqs.py b/tests/unit/states/test_boto_sqs.py index f0b29b044596..2b8e46ac88cb 100644 --- a/tests/unit/states/test_boto_sqs.py +++ b/tests/unit/states/test_boto_sqs.py @@ -74,7 +74,7 @@ def test_present(self): ret.update({ 'result': None, 'comment': comt, - 'pchanges': {'old': None, 'new': 'mysqs'}, + 'changes': {'old': None, 'new': 'mysqs'}, }) self.assertDictEqual(boto_sqs.present(name), ret) diff = textwrap.dedent('''\ @@ -101,7 +101,7 @@ def test_present(self): ] ret.update({ 'comment': comt, - 'pchanges': {'attributes': {'diff': diff}}, + 'changes': {'attributes': {'diff': diff}}, }) self.assertDictEqual(boto_sqs.present(name, attributes), ret) @@ -133,6 +133,6 @@ def test_absent(self): ret.update({ 'result': None, 'comment': comt, - 'pchanges': {'old': name, 'new': None}, + 'changes': {'old': name, 'new': None}, }) self.assertDictEqual(boto_sqs.absent(name), ret) diff --git a/tests/unit/states/test_esxdatacenter.py b/tests/unit/states/test_esxdatacenter.py index a55dd0308a43..38d6f9a86b6a 100644 --- a/tests/unit/states/test_esxdatacenter.py +++ b/tests/unit/states/test_esxdatacenter.py @@ -64,7 +64,6 @@ def test_dc_name_different_proxy(self): res = esxdatacenter.datacenter_configured('fake_dc') self.assertDictEqual(res, {'name': 'fake_dc', 'changes': {}, - 'pchanges': {}, 'result': True, 'comment': 'Datacenter \'fake_dc\' already ' 'exists. Nothing to be done.'}) @@ -78,7 +77,6 @@ def test_dc_name_esxdatacenter_proxy(self): res = esxdatacenter.datacenter_configured('fake_dc') self.assertDictEqual(res, {'name': 'fake_dc', 'changes': {}, - 'pchanges': {}, 'result': True, 'comment': 'Datacenter \'proxy_dc\' ' 'already exists. Nothing to be done.'}) @@ -112,7 +110,6 @@ def test_create_datacenter(self): self.assertDictEqual(res, {'name': 'fake_dc', 'changes': {'new': {'name': 'fake_dc'}}, - 'pchanges': {}, 'result': True, 'comment': 'Created datacenter \'fake_dc\'.'}) @@ -124,8 +121,7 @@ def test_create_datacenter_test_mode(self): res = esxdatacenter.datacenter_configured('fake_dc') self.assertDictEqual(res, {'name': 'fake_dc', - 'changes': {}, - 'pchanges': {'new': {'name': 'fake_dc'}}, + 'changes': {'new': {'name': 'fake_dc'}}, 'result': None, 'comment': 'State will create ' 'datacenter \'fake_dc\'.'}) @@ -138,7 +134,6 @@ def test_nothing_to_be_done_test_mode(self): res = esxdatacenter.datacenter_configured('fake_dc') self.assertDictEqual(res, {'name': 'fake_dc', 'changes': {}, - 'pchanges': {}, 'result': True, 'comment': 'Datacenter \'fake_dc\' already ' 'exists. Nothing to be done.'}) @@ -154,7 +149,6 @@ def test_state_get_service_instance_raise_command_execution_error(self): self.assertEqual(mock_disconnect.call_count, 0) self.assertDictEqual(res, {'name': 'fake_dc', 'changes': {}, - 'pchanges': {}, 'result': False, 'comment': 'Error'}) @@ -169,7 +163,6 @@ def test_state_raise_command_execution_error_after_si(self): mock_disconnect.assert_called_once_with(self.mock_si) self.assertDictEqual(res, {'name': 'fake_dc', 'changes': {}, - 'pchanges': {}, 'result': False, 'comment': 'Error'}) @@ -182,6 +175,5 @@ def test_state_raise_command_execution_error_test_mode(self): res = esxdatacenter.datacenter_configured('fake_dc') self.assertDictEqual(res, {'name': 'fake_dc', 'changes': {}, - 'pchanges': {}, 'result': None, 'comment': 'Error'}) diff --git a/tests/unit/states/test_file.py b/tests/unit/states/test_file.py index 87b90a8af3bb..4cd0d34874f8 100644 --- a/tests/unit/states/test_file.py +++ b/tests/unit/states/test_file.py @@ -215,7 +215,7 @@ def return_val(kwargs): ' is set for creation').format(name, target) ret = return_val({'comment': comt, 'result': None, - 'pchanges': {'new': name}}) + 'changes': {'new': name}}) self.assertDictEqual(filestate.symlink(name, target, user=user, group=group), ret) @@ -243,7 +243,7 @@ def return_val(kwargs): comt = ('Directory {0} for symlink is not present').format(test_dir) ret = return_val({'comment': comt, 'result': False, - 'pchanges': {'new': name}}) + 'changes': {}}) self.assertDictEqual(filestate.symlink(name, target, user=user, group=group), ret) @@ -267,7 +267,7 @@ def return_val(kwargs): '{1}:{2}'.format(name, user, group)) ret = return_val({'comment': comt, 'result': True, - 'pchanges': {}}) + 'changes': {}}) self.assertDictEqual(filestate.symlink(name, target, user=user, group=group), ret) @@ -287,7 +287,7 @@ def return_val(kwargs): ' should go') ret = return_val({'comment': comt, 'result': False, - 'pchanges': {'new': name}}) + 'changes': {'new': name}}) self.assertDictEqual(filestate.symlink (name, target, user=user, group=group, backupname='SALT'), @@ -308,7 +308,7 @@ def return_val(kwargs): comt = ('File exists where the symlink {0} should be' .format(name)) ret = return_val({'comment': comt, - 'pchanges': {'new': name}, + 'changes': {}, 'result': False}) self.assertDictEqual(filestate.symlink (name, target, user=user, @@ -328,7 +328,7 @@ def return_val(kwargs): with patch.object(os.path, 'exists', mock_f): with patch('salt.utils.win_functions.get_sid_from_name', return_value='test-sid'): comt = ('File exists where the symlink {0} should be'.format(name)) - ret = return_val({'comment': comt, 'result': False, 'pchanges': {'new': '/tmp/testfile.txt'}}) + ret = return_val({'comment': comt, 'result': False, 'changes': {}}) self.assertDictEqual(filestate.symlink (name, target, user=user, group=group), ret) @@ -347,7 +347,7 @@ def return_val(kwargs): with patch.object(os.path, 'exists', mock_f): with patch('salt.utils.win_functions.get_sid_from_name', return_value='test-sid'): comt = ('Directory exists where the symlink {0} should be'.format(name)) - ret = return_val({'comment': comt, 'result': False, 'pchanges': {'new': '/tmp/testfile.txt'}}) + ret = return_val({'comment': comt, 'result': False, 'changes': {}}) self.assertDictEqual(filestate.symlink (name, target, user=user, group=group), ret) @@ -366,7 +366,7 @@ def return_val(kwargs): with patch('salt.utils.win_functions.get_sid_from_name', return_value='test-sid'): comt = ('Unable to create new symlink {0} -> ' '{1}: '.format(name, target)) - ret = return_val({'comment': comt, 'result': False, 'pchanges': {'new': '/tmp/testfile.txt'}}) + ret = return_val({'comment': comt, 'result': False, 'changes': {}}) self.assertDictEqual(filestate.symlink (name, target, user=user, group=group), ret) @@ -388,7 +388,7 @@ def return_val(kwargs): with patch('salt.utils.win_functions.get_sid_from_name', return_value='test-sid'): comt = 'Created new symlink {0} -> {1}'.format(name, target) ret = return_val({'comment': comt, - 'result': True, 'pchanges': {'new': '/tmp/testfile.txt'}, + 'result': True, 'changes': {'new': '/tmp/testfile.txt'}, 'changes': {'new': name}}) self.assertDictEqual(filestate.symlink (name, target, user=user, @@ -415,7 +415,6 @@ def return_val(kwargs): '{2}:{3}'.format(name, target, user, group)) ret = return_val({'comment': comt, 'result': False, - 'pchanges': {'new': '/tmp/testfile.txt'}, 'changes': {'new': name}}) self.assertDictEqual(filestate.symlink (name, target, user=user, @@ -431,7 +430,6 @@ def test_absent(self): ret = {'name': name, 'result': False, 'comment': '', - 'pchanges': {}, 'changes': {}} mock_t = MagicMock(return_value=True) @@ -462,17 +460,15 @@ def test_absent(self): ret.update({'comment': comt, 'name': name, 'result': None, - 'pchanges': {'removed': '/fake/file.conf'}}) + 'changes': {'removed': '/fake/file.conf'}}) self.assertDictEqual(filestate.absent(name), ret) - ret.update({'pchanges': {}}) with patch.dict(filestate.__opts__, {'test': False}): with patch.dict(filestate.__salt__, {'file.remove': mock_file}): comt = ('Removed file {0}'.format(name)) ret.update({'comment': comt, 'result': True, - 'changes': {'removed': name}, - 'pchanges': {'removed': name}}) + 'changes': {'removed': name}}) self.assertDictEqual(filestate.absent(name), ret) comt = ('Removed file {0}'.format(name)) @@ -480,7 +476,6 @@ def test_absent(self): 'result': False, 'changes': {}}) self.assertDictEqual(filestate.absent(name), ret) - ret.update({'pchanges': {}}) with patch.object(os.path, 'isfile', mock_f): with patch.object(os.path, 'isdir', mock_t): @@ -488,7 +483,7 @@ def test_absent(self): comt = \ 'Directory {0} is set for removal'.format(name) ret.update({'comment': comt, - 'pchanges': {'removed': name}, + 'changes': {'removed': name}, 'result': None}) self.assertDictEqual(filestate.absent(name), ret) @@ -505,7 +500,6 @@ def test_absent(self): ret.update({'comment': comt, 'result': False, 'changes': {}}) self.assertDictEqual(filestate.absent(name), ret) - ret.update({'pchanges': {}}) with patch.object(os.path, 'isdir', mock_f): with patch.dict(filestate.__opts__, {'test': True}): @@ -524,8 +518,7 @@ def test_exists(self): ret = {'name': name, 'result': False, 'comment': '', - 'changes': {}, - 'pchanges': {}} + 'changes': {}} mock_t = MagicMock(return_value=True) mock_f = MagicMock(return_value=False) @@ -561,7 +554,7 @@ def test_missing(self): mock_f = MagicMock(return_value=False) comt = ('Must provide name to file.missing') - ret.update({'comment': comt, 'name': '', 'pchanges': {}}) + ret.update({'comment': comt, 'name': '', 'changes': {}}) self.assertDictEqual(filestate.missing(''), ret) with patch.object(os.path, 'exists', mock_t): @@ -632,7 +625,7 @@ def test_managed(self): 'file.manage_file': mock_ex, 'cmd.run_all': mock_cmd_fail}): comt = ('Destination file name is required') - ret.update({'comment': comt, 'name': '', 'pchanges': {}}) + ret.update({'comment': comt, 'name': '', 'changes': {}}) self.assertDictEqual(filestate.managed(''), ret) with patch.object(os.path, 'isfile', mock_f): @@ -737,13 +730,12 @@ def test_managed(self): comt = ('check_cmd execution failed') ret.update({'comment': comt, 'result': False, 'skip_watch': True}) - ret.pop('pchanges') self.assertDictEqual(filestate.managed (name, user=user, group=group, check_cmd='A'), ret) comt = ('check_cmd execution failed') - ret.update({'comment': True, 'pchanges': {}}) + ret.update({'comment': True, 'changes': {}}) ret.pop('skip_watch', None) self.assertDictEqual(filestate.managed (name, user=user, group=group), @@ -800,7 +792,7 @@ def test_directory(self): ret = {'name': name, 'result': False, 'comment': '', - 'pchanges': {}, + 'changes': {}, 'changes': {}} comt = ('Must provide name to file.directory') @@ -892,12 +884,10 @@ def test_directory(self): else: comt = ('The following files will be changed:\n{0}:' ' directory - new\n'.format(name)) - p_chg = {name: {'directory': 'new'}} ret.update({ 'comment': comt, 'result': None, - 'pchanges': p_chg, - 'changes': {} + 'changes': {name: {'directory': 'new'}} }) self.assertDictEqual(filestate.directory(name, user=user, @@ -908,7 +898,7 @@ def test_directory(self): with patch.object(os.path, 'isdir', mock_f): comt = ('No directory to create {0} in' .format(name)) - ret.update({'comment': comt, 'result': False, 'changes': {}}) + ret.update({'comment': comt, 'result': False}) self.assertDictEqual(filestate.directory (name, user=user, group=group), ret) @@ -927,7 +917,7 @@ def test_directory(self): 'options "ignore_files" and ' '"ignore_dirs" at the same ' 'time.', - 'pchanges': {}}) + 'changes': {}}) with patch.object(os.path, 'isdir', mock_t): self.assertDictEqual(filestate.directory (name, user=user, @@ -955,7 +945,6 @@ def test_recurse(self): ret = {'name': name, 'result': False, 'comment': '', - 'pchanges': {}, 'changes': {}} comt = ("'mode' is not allowed in 'file.recurse'." @@ -1044,7 +1033,7 @@ def test_replace(self): 'changes': {}} comt = ('Must provide name to file.replace') - ret.update({'comment': comt, 'name': '', 'pchanges': {}}) + ret.update({'comment': comt, 'name': '', 'changes': {}}) self.assertDictEqual(filestate.replace('', pattern, repl), ret) mock_t = MagicMock(return_value=True) @@ -1078,7 +1067,6 @@ def test_blockreplace(self): ret = {'name': name, 'result': False, 'comment': '', - 'pchanges': {}, 'changes': {}} comt = ('Must provide name to file.blockreplace') @@ -1098,8 +1086,7 @@ def test_blockreplace(self): with patch.dict(filestate.__opts__, {'test': True}): comt = ('Changes would be made') ret.update({'comment': comt, 'result': None, - 'changes': {'diff': True}, - 'pchanges': {'diff': True}}) + 'changes': {'diff': True}}) self.assertDictEqual(filestate.blockreplace(name), ret) # 'comment' function tests: 1 @@ -1115,7 +1102,6 @@ def test_comment(self): ret = {'name': name, 'result': False, 'comment': '', - 'pchanges': {}, 'changes': {}} comt = ('Must provide name to file.comment') @@ -1146,14 +1132,15 @@ def test_comment(self): 'file.comment_line': mock_t}): with patch.dict(filestate.__opts__, {'test': True}): comt = ('File {0} is set to be updated'.format(name)) - ret.update({'comment': comt, 'result': None, 'pchanges': {name: 'updated'}}) + ret.update({'comment': comt, 'result': None, 'changes': {name: 'updated'}}) self.assertDictEqual(filestate.comment(name, regex), ret) with patch.dict(filestate.__opts__, {'test': False}): with patch.object(salt.utils.files, 'fopen', MagicMock(mock_open())): comt = ('Commented lines successfully') - ret.update({'comment': comt, 'result': True}) + ret.update({'comment': comt, 'result': True, + 'changes': {}}) self.assertDictEqual(filestate.comment(name, regex), ret) @@ -1168,7 +1155,6 @@ def test_uncomment(self): regex = 'bind 127.0.0.1' ret = {'name': name, - 'pchanges': {}, 'result': False, 'comment': '', 'changes': {}} @@ -1201,14 +1187,16 @@ def test_uncomment(self): with patch.dict(filestate.__opts__, {'test': True}): comt = ('File {0} is set to be updated'.format(name)) - ret.update({'comment': comt, 'result': None, 'pchanges': {name: 'updated'}, }) + ret.update({'comment': comt, 'result': None, + 'changes': {name: 'updated'}}) self.assertDictEqual(filestate.uncomment(name, regex), ret) with patch.dict(filestate.__opts__, {'test': False}): with patch.object(salt.utils.files, 'fopen', MagicMock(mock_open())): comt = ('Uncommented lines successfully') - ret.update({'comment': comt, 'result': True}) + ret.update({'comment': comt, 'result': True, + 'changes': {}}) self.assertDictEqual(filestate.uncomment(name, regex), ret) # 'prepend' function tests: 1 @@ -1228,7 +1216,6 @@ def test_prepend(self): ret = {'name': name, 'result': False, 'comment': '', - 'pchanges': {}, 'changes': {}} comt = ('Must provide name to file.prepend') @@ -1251,24 +1238,23 @@ def test_prepend(self): 'file.prepend': mock_t}): comt = ('The following files will be changed:\n/tmp/etc:' ' directory - new\n') - pchanges = {'/tmp/etc': {'directory': 'new'}} + changes = {'/tmp/etc': {'directory': 'new'}} if salt.utils.platform.is_windows(): comt = 'The directory "c:\\tmp\\etc" will be changed' - pchanges = {'c:\\tmp\\etc': {'directory': 'new'}} - ret.update({'comment': comt, 'name': name, 'pchanges': pchanges}) + changes = {'c:\\tmp\\etc': {'directory': 'new'}} + ret.update({'comment': comt, 'name': name, 'changes': changes}) self.assertDictEqual(filestate.prepend(name, makedirs=True), ret) with patch.object(os.path, 'isabs', mock_f): comt = ('Specified file {0} is not an absolute path' .format(name)) - ret.update({'comment': comt, 'pchanges': {}}) + ret.update({'comment': comt, 'changes': {}}) self.assertDictEqual(filestate.prepend(name), ret) with patch.object(os.path, 'isabs', mock_t): with patch.object(os.path, 'exists', mock_t): comt = ("Failed to load template file {0}".format(source)) - ret.pop('pchanges') ret.update({'comment': comt, 'name': source, 'data': []}) self.assertDictEqual(filestate.prepend(name, source=source), ret) @@ -1282,8 +1268,9 @@ def test_prepend(self): change = {'diff': 'Replace binary file'} comt = ('File {0} is set to be updated' .format(name)) - ret.update({'comment': comt, 'result': None, - 'changes': change, 'pchanges': {}}) + ret.update({'comment': comt, + 'result': None, + 'changes': change}) self.assertDictEqual(filestate.prepend (name, text=text), ret) @@ -1871,7 +1858,6 @@ def run_checks(isdir=mock_t, strptime_format=None, test=False): expected_ret = { 'name': fake_name, 'changes': {'retained': [], 'deleted': [], 'ignored': []}, - 'pchanges': {'retained': [], 'deleted': [], 'ignored': []}, 'result': True, 'comment': 'Name provided to file.retention must be a directory', } @@ -1917,8 +1903,7 @@ def run_checks(isdir=mock_t, strptime_format=None, test=False): deleted_files = sorted(list(set(fake_file_list) - retained_files - set(ignored_files)), reverse=True) retained_files = sorted(list(retained_files), reverse=True) - changes = {'retained': retained_files, 'deleted': deleted_files, 'ignored': ignored_files} - expected_ret['pchanges'] = changes + expected_ret['changes'] = {'retained': retained_files, 'deleted': deleted_files, 'ignored': ignored_files} if test: expected_ret['result'] = None expected_ret['comment'] = ('{0} backups would have been removed from {1}.\n' @@ -1926,7 +1911,6 @@ def run_checks(isdir=mock_t, strptime_format=None, test=False): else: expected_ret['comment'] = ('{0} backups were removed from {1}.\n' ''.format(len(deleted_files), fake_name)) - expected_ret['changes'] = changes mock_remove.assert_has_calls( [call(os.path.join(fake_name, x)) for x in deleted_files], any_order=True diff --git a/tests/unit/states/test_linux_acl.py b/tests/unit/states/test_linux_acl.py index 8d60f9c80660..04f96b4d936c 100644 --- a/tests/unit/states/test_linux_acl.py +++ b/tests/unit/states/test_linux_acl.py @@ -69,13 +69,12 @@ def test_present(self): ''.format(acl_name, perms)) ret = {'name': name, 'comment': comt, - 'changes': {}, - 'pchanges': {'new': {'acl_name': acl_name, + 'changes': {'new': {'acl_name': acl_name, 'acl_type': acl_type, 'perms': perms}, - 'old': {'acl_name': acl_name, - 'acl_type': acl_type, - 'perms': 'r-x'}}, + 'old': {'acl_name': acl_name, + 'acl_type': acl_type, + 'perms': 'r-x'}}, 'result': None} self.assertDictEqual(linux_acl.present(name, acl_type, acl_name, @@ -92,7 +91,6 @@ def test_present(self): 'old': {'acl_name': acl_name, 'acl_type': acl_type, 'perms': 'r-x'}}, - 'pchanges': {}, 'result': True} self.assertDictEqual(linux_acl.present(name, acl_type, acl_name, perms), @@ -106,7 +104,6 @@ def test_present(self): ret = {'name': name, 'comment': comt, 'changes': {}, - 'pchanges': {}, 'result': False} self.assertDictEqual(linux_acl.present(name, acl_type, acl_name, perms), @@ -118,10 +115,9 @@ def test_present(self): 'for {0}: {1}'.format(acl_name, perms)) ret = {'name': name, 'comment': comt, - 'changes': {}, - 'pchanges': {'new': {'acl_name': acl_name, - 'acl_type': acl_type, - 'perms': perms}}, + 'changes': {'new': {'acl_name': acl_name, + 'acl_type': acl_type, + 'perms': perms}}, 'result': None} self.assertDictEqual(linux_acl.present(name, acl_type, acl_name, perms), @@ -135,7 +131,6 @@ def test_present(self): 'changes': {'new': {'acl_name': acl_name, 'acl_type': acl_type, 'perms': perms}}, - 'pchanges': {}, 'result': True} self.assertDictEqual(linux_acl.present(name, acl_type, acl_name, perms), @@ -149,7 +144,6 @@ def test_present(self): ret = {'name': name, 'comment': comt, 'changes': {}, - 'pchanges': {}, 'result': False} self.assertDictEqual(linux_acl.present(name, acl_type, acl_name, perms), @@ -163,13 +157,12 @@ def test_present(self): ''.format(acl_name, perms)) ret = {'name': name, 'comment': comt, - 'changes': {}, - 'pchanges': {'new': {'acl_name': acl_name, + 'changes': {'new': {'acl_name': acl_name, 'acl_type': acl_type, 'perms': perms}, - 'old': {'acl_name': acl_name, - 'acl_type': acl_type, - 'perms': 'rwx'}}, + 'old': {'acl_name': acl_name, + 'acl_type': acl_type, + 'perms': 'rwx'}}, 'result': None} self.assertDictEqual(linux_acl.present(name, acl_type, acl_name, @@ -183,7 +176,6 @@ def test_present(self): ret = {'name': name, 'comment': comt, 'changes': {}, - 'pchanges': {}, 'result': True} self.assertDictEqual(linux_acl.present(name, acl_type, acl_name, @@ -191,8 +183,7 @@ def test_present(self): # No acl type comt = ('ACL Type does not exist') - ret = {'name': name, 'comment': comt, 'result': False, - 'changes': {}, 'pchanges': {}} + ret = {'name': name, 'comment': comt, 'result': False, 'changes': {}} self.assertDictEqual(linux_acl.present(name, acl_type, acl_name, perms), ret) diff --git a/tests/unit/utils/test_state.py b/tests/unit/utils/test_state.py index d076e7d00436..0f356c59e72a 100644 --- a/tests/unit/utils/test_state.py +++ b/tests/unit/utils/test_state.py @@ -527,56 +527,6 @@ def test_merge_changes(self): 'alarms': secondary_changes, }) - def test_merge_pchanges(self): - primary_pchanges = {'old': None, 'new': 'my_resource'} - secondary_pchanges = {'old': None, 'new': ['alarm-1', 'alarm-2']} - - # Neither main nor sub pchanges case - m = copy.deepcopy(self.main_ret) - s = copy.deepcopy(self.sub_ret) - res = salt.utils.state.merge_subreturn(m, s) - self.assertNotIn('pchanges', res) - - # No main pchanges, sub pchanges - m = copy.deepcopy(self.main_ret) - s = copy.deepcopy(self.sub_ret) - s['pchanges'] = copy.deepcopy(secondary_pchanges) - res = salt.utils.state.merge_subreturn(m, s) - self.assertDictEqual(res['pchanges'], { - 'secondary': secondary_pchanges - }) - - # Main pchanges, no sub pchanges - m = copy.deepcopy(self.main_ret) - m['pchanges'] = copy.deepcopy(primary_pchanges) - s = copy.deepcopy(self.sub_ret) - res = salt.utils.state.merge_subreturn(m, s) - self.assertDictEqual(res['pchanges'], primary_pchanges) - - # Both main and sub pchanges, new pchanges don't affect existing ones - m = copy.deepcopy(self.main_ret) - m['pchanges'] = copy.deepcopy(primary_pchanges) - s = copy.deepcopy(self.sub_ret) - s['pchanges'] = copy.deepcopy(secondary_pchanges) - res = salt.utils.state.merge_subreturn(m, s) - self.assertDictEqual(res['pchanges'], { - 'old': None, - 'new': 'my_resource', - 'secondary': secondary_pchanges, - }) - - # The subkey parameter is respected - m = copy.deepcopy(self.main_ret) - m['pchanges'] = copy.deepcopy(primary_pchanges) - s = copy.deepcopy(self.sub_ret) - s['pchanges'] = copy.deepcopy(secondary_pchanges) - res = salt.utils.state.merge_subreturn(m, s, subkey='alarms') - self.assertDictEqual(res['pchanges'], { - 'old': None, - 'new': 'my_resource', - 'alarms': secondary_pchanges, - }) - def test_merge_comments(self): main_comment_1 = 'First primary comment.' main_comment_2 = 'Second primary comment.' From 509e5aa3e1d8fc2bb70ccb2f4219702e9f59a656 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Fri, 28 Sep 2018 21:21:30 -0500 Subject: [PATCH 080/340] Add test mode changes to file.touch state --- salt/states/file.py | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/salt/states/file.py b/salt/states/file.py index 788edd576f6e..fe21b3f0dcad 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -968,16 +968,25 @@ def _check_touch(name, atime, mtime): ''' Check to see if a file needs to be updated or created ''' + ret = { + 'result': None, + 'comment': '', + 'changes': {'new': name}, + } if not os.path.exists(name): - return None, 'File {0} is set to be created'.format(name) - stats = __salt__['file.stats'](name, follow_symlinks=False) - if atime is not None: - if six.text_type(atime) != six.text_type(stats['atime']): - return None, 'Times set to be updated on file {0}'.format(name) - if mtime is not None: - if six.text_type(mtime) != six.text_type(stats['mtime']): - return None, 'Times set to be updated on file {0}'.format(name) - return True, 'File {0} exists and has the correct times'.format(name) + ret['comment'] = 'File {0} is set to be created'.format(name) + else: + stats = __salt__['file.stats'](name, follow_symlinks=False) + if ((atime is not None + and six.text_type(atime) != six.text_type(stats['atime'])) or + (mtime is not None + and six.text_type(mtime) != six.text_type(stats['mtime']))): + ret['comment'] = 'Times set to be updated on file {0}'.format(name) + ret['changes'] = {'touched': name} + else: + ret['result'] = True + ret['comment'] = 'File {0} exists and has the correct times'.format(name) + return ret def _get_symlink_ownership(path): @@ -5618,7 +5627,7 @@ def touch(name, atime=None, mtime=None, makedirs=False): ) if __opts__['test']: - ret['result'], ret['comment'] = _check_touch(name, atime, mtime) + ret.update(_check_touch(name, atime, mtime)) return ret if makedirs: From 742c3ed5253061c4ac44bad6f13cd3c9f5179d55 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Fri, 28 Sep 2018 21:23:13 -0500 Subject: [PATCH 081/340] Add exception logging in flaky decorator --- tests/support/helpers.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/support/helpers.py b/tests/support/helpers.py index 712899a2a2ee..ffcab4242967 100644 --- a/tests/support/helpers.py +++ b/tests/support/helpers.py @@ -211,7 +211,10 @@ def wrap(cls): if attempt >= 3: raise exc backoff_time = attempt ** 2 - log.info('Found Exception. Waiting %s seconds to retry.', backoff_time) + log.info( + 'Found Exception. Waiting %s seconds to retry.', + backoff_time + ) time.sleep(backoff_time) return cls return wrap From c527c4b90c9a35a3eddc175762deb775ecbe87c9 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Fri, 28 Sep 2018 21:23:51 -0500 Subject: [PATCH 082/340] Add repack_state_returns to TestCase This allows for a state return dict to be repacked so that the top level keys are the IDs, making it much easier to analyze and craft asserts against the results. --- tests/support/unit.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tests/support/unit.py b/tests/support/unit.py index 70cd8b789153..7e862e919f9e 100644 --- a/tests/support/unit.py +++ b/tests/support/unit.py @@ -268,6 +268,19 @@ def assertNotAlmostEquals(self, *args, **kwargs): ) # return _TestCase.assertNotAlmostEquals(self, *args, **kwargs) + def repack_state_returns(self, state_ret): + ''' + Accepts a state return dict and returns it back with the top level key + names rewritten such that the ID declaration is the key instead of the + State's unique tag. For example: 'foo' instead of + 'file_|-foo_|-/etc/foo.conf|-managed' + + This makes it easier to work with state returns when crafting asserts + after running states. + ''' + assert isinstance(state_ret, dict), state_ret + return {x.split('_|-')[1]: y for x, y in six.iteritems(state_ret)} + def failUnlessEqual(self, *args, **kwargs): raise DeprecationWarning( 'The {0}() function is deprecated. Please start using {1}() ' From 7e43cbb07c29adb365a3919ebfe27329e1c18bbf Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Sun, 30 Sep 2018 16:45:25 -0500 Subject: [PATCH 083/340] Update file.touch unit tests to reflect addition of changes in test mode --- tests/unit/states/test_file.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/unit/states/test_file.py b/tests/unit/states/test_file.py index 4cd0d34874f8..43b9f952e5f2 100644 --- a/tests/unit/states/test_file.py +++ b/tests/unit/states/test_file.py @@ -1381,14 +1381,18 @@ def test_touch(self): with patch.object(os.path, 'exists', mock_f): with patch.dict(filestate.__opts__, {'test': True}): comt = ('File {0} is set to be created'.format(name)) - ret.update({'comment': comt, 'result': None}) + ret.update({'comment': comt, + 'result': None, + 'changes': {'new': name}}) self.assertDictEqual(filestate.touch(name), ret) with patch.dict(filestate.__opts__, {'test': False}): with patch.object(os.path, 'isdir', mock_f): comt = ('Directory not present to touch file {0}' .format(name)) - ret.update({'comment': comt, 'result': False}) + ret.update({'comment': comt, + 'result': False, + 'changes': {}}) self.assertDictEqual(filestate.touch(name), ret) with patch.object(os.path, 'isdir', mock_t): From 20c9f0f121db421bbe501dee9e95303da8fc1334 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Fri, 28 Sep 2018 21:27:43 -0500 Subject: [PATCH 084/340] Make it possible to use prereq with test and saltmod state mods this requisite passes a __prerequired__ kwarg so these need to support **kwargs for that reason. This also changes the comment for salt.function in test mode because it's inaccurate and doesn't make sense. --- salt/states/saltmod.py | 22 +++++++++++----------- salt/states/test.py | 8 ++++---- tests/unit/states/test_saltmod.py | 6 ++---- 3 files changed, 17 insertions(+), 19 deletions(-) diff --git a/salt/states/saltmod.py b/salt/states/saltmod.py index 7386b0e529d5..ac6f1bc022f5 100644 --- a/salt/states/saltmod.py +++ b/salt/states/saltmod.py @@ -442,7 +442,8 @@ def function( kwarg=None, timeout=None, batch=None, - subset=None): + subset=None, + **kwargs): # pylint: disable=unused-argument ''' Execute a single module function on a remote minion via salt or salt-ssh @@ -497,15 +498,15 @@ def function( ''' func_ret = {'name': name, - 'changes': {}, - 'comment': '', - 'result': True} + 'changes': {}, + 'comment': '', + 'result': True} if kwarg is None: kwarg = {} if isinstance(arg, six.string_types): - func_ret['warnings'] = ['Please specify \'arg\' as a list, not a string. ' - 'Modifying in place, but please update SLS file ' - 'to remove this warning.'] + func_ret['warnings'] = [ + 'Please specify \'arg\' as a list of arguments.' + ] arg = arg.split() cmd_kw = {'arg': arg or [], 'kwarg': kwarg, 'ret': ret, 'timeout': timeout} @@ -539,9 +540,8 @@ def function( fun = name if __opts__['test'] is True: - func_ret['comment'] = ( - 'Function {0} will be executed on target {1} as test={2}' - ).format(fun, tgt, six.text_type(False)) + func_ret['comment'] = \ + 'Function {0} would be executed on target {1}'.format(fun, tgt) func_ret['result'] = None return func_ret try: @@ -783,7 +783,7 @@ def runner(name, **kwargs): return ret -def parallel_runners(name, runners): +def parallel_runners(name, runners, **kwargs): # pylint: disable=unused-argument ''' Executes multiple runner modules on the master in parallel. diff --git a/salt/states/test.py b/salt/states/test.py index a5659b577fbe..aea09a7a9201 100644 --- a/salt/states/test.py +++ b/salt/states/test.py @@ -67,7 +67,7 @@ def nop(name, **kwargs): return succeed_without_changes(name) -def succeed_without_changes(name): +def succeed_without_changes(name, **kwargs): # pylint: disable=unused-argument ''' Returns successful. @@ -85,7 +85,7 @@ def succeed_without_changes(name): return ret -def fail_without_changes(name): +def fail_without_changes(name, **kwargs): # pylint: disable=unused-argument ''' Returns failure. @@ -108,7 +108,7 @@ def fail_without_changes(name): return ret -def succeed_with_changes(name): +def succeed_with_changes(name, **kwargs): # pylint: disable=unused-argument ''' Returns successful and changes is not empty @@ -141,7 +141,7 @@ def succeed_with_changes(name): return ret -def fail_with_changes(name): +def fail_with_changes(name, **kwargs): # pylint: disable=unused-argument ''' Returns failure and changes is not empty. diff --git a/tests/unit/states/test_saltmod.py b/tests/unit/states/test_saltmod.py index d14edafed73d..2408ead9e136 100644 --- a/tests/unit/states/test_saltmod.py +++ b/tests/unit/states/test_saltmod.py @@ -175,13 +175,11 @@ def test_function(self): name = 'state' tgt = 'larry' - comt = ('Function state will be executed' - ' on target {0} as test=False'.format(tgt)) - ret = {'name': name, 'changes': {}, 'result': None, - 'comment': comt} + 'comment': 'Function state would be executed ' + 'on target {0}'.format(tgt)} with patch.dict(saltmod.__opts__, {'test': True}): self.assertDictEqual(saltmod.function(name, tgt), ret) From bcfad506bbf45e940ddab2db0c06f809c270a246 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Fri, 28 Sep 2018 21:30:06 -0500 Subject: [PATCH 085/340] Add integration tests for test mode onchanges/prereq --- .../files/file/base/onchanges_prereq.sls | 22 ++++ .../files/file/base/orch/req_test.sls | 3 + tests/integration/runners/test_state.py | 116 ++++++++++++++++++ tests/integration/states/test_file.py | 81 ++++++++++++ tests/unit/states/test_file.py | 5 +- 5 files changed, 224 insertions(+), 3 deletions(-) create mode 100644 tests/integration/files/file/base/onchanges_prereq.sls create mode 100644 tests/integration/files/file/base/orch/req_test.sls diff --git a/tests/integration/files/file/base/onchanges_prereq.sls b/tests/integration/files/file/base/onchanges_prereq.sls new file mode 100644 index 000000000000..9ab27b71e23e --- /dev/null +++ b/tests/integration/files/file/base/onchanges_prereq.sls @@ -0,0 +1,22 @@ +one: + file.managed: + - name: {{ pillar['file1'] }} + - source: {{ pillar['source'] }} + +# This should run because there were changes +two: + test.succeed_without_changes: + - {{ pillar['req'] }}: + - file: one + +# Run the same state as "one" again, this should not cause changes +three: + file.managed: + - name: {{ pillar['file2'] }} + - source: {{ pillar['source'] }} + +# This should not run because there should be no changes +four: + test.succeed_without_changes: + - {{ pillar['req'] }}: + - file: three diff --git a/tests/integration/files/file/base/orch/req_test.sls b/tests/integration/files/file/base/orch/req_test.sls new file mode 100644 index 000000000000..cb992de82998 --- /dev/null +++ b/tests/integration/files/file/base/orch/req_test.sls @@ -0,0 +1,3 @@ +{{ salt['runtests_helpers.get_salt_temp_dir_for_path']('orch.req_test') }}: + file.managed: + - contents: 'Hello world!' diff --git a/tests/integration/runners/test_state.py b/tests/integration/runners/test_state.py index d41909c1806c..f674a40b7828 100644 --- a/tests/integration/runners/test_state.py +++ b/tests/integration/runners/test_state.py @@ -643,3 +643,119 @@ def test_orchestration_with_pillar_dot_items(self): self.assertTrue(received) del listener signal.alarm(0) + + def test_orchestration_onchanges_and_prereq(self): + ''' + Test to confirm that the parallel state requisite works in orch + we do this by running 10 test.sleep's of 10 seconds, and insure it only takes roughly 10s + ''' + self.write_conf({ + 'fileserver_backend': ['roots'], + 'file_roots': { + 'base': [self.base_env], + }, + }) + + orch_sls = os.path.join(self.base_env, 'orch.sls') + with salt.utils.files.fopen(orch_sls, 'w') as fp_: + fp_.write(textwrap.dedent(''' + manage_a_file: + salt.state: + - tgt: minion + - sls: + - orch.req_test + + do_onchanges: + salt.function: + - tgt: minion + - name: test.ping + - onchanges: + - salt: manage_a_file + + do_prereq: + salt.function: + - tgt: minion + - name: test.ping + - prereq: + - salt: manage_a_file + ''')) + + listener = salt.utils.event.get_event( + 'master', + sock_dir=self.master_opts['sock_dir'], + transport=self.master_opts['transport'], + opts=self.master_opts) + + try: + jid1 = self.run_run_plus( + 'state.orchestrate', + 'orch', + test=True, + __reload_config=True).get('jid') + + # Run for real to create the file + self.run_run_plus( + 'state.orchestrate', + 'orch', + __reload_config=True).get('jid') + + # Run again in test mode. Since there were no changes, the + # requisites should not fire. + jid2 = self.run_run_plus( + 'state.orchestrate', + 'orch', + test=True, + __reload_config=True).get('jid') + finally: + try: + os.remove(os.path.join(TMP, 'orch.req_test')) + except OSError: + pass + + assert jid1 is not None + assert jid2 is not None + + tags = {'salt/run/{0}/ret'.format(x): x for x in (jid1, jid2)} + ret = {} + + signal.signal(signal.SIGALRM, self.alarm_handler) + signal.alarm(self.timeout) + try: + while True: + event = listener.get_event(full=True) + if event is None: + continue + + if event['tag'] in tags: + ret[tags.pop(event['tag'])] = self.repack_state_returns( + event['data']['return']['data']['master'] + ) + if not tags: + # If tags is empty, we've grabbed all the returns we + # wanted, so let's stop listening to the event bus. + break + finally: + del listener + signal.alarm(0) + + for sls_id in ('manage_a_file', 'do_onchanges', 'do_prereq'): + # The first time through, all three states should have a None + # result, while the second time through, they should all have a + # True result. + assert ret[jid1][sls_id]['result'] is None, \ + 'result of {0} ({1}) is not None'.format( + sls_id, + ret[jid1][sls_id]['result']) + assert ret[jid2][sls_id]['result'] is True, \ + 'result of {0} ({1}) is not True'.format( + sls_id, + ret[jid2][sls_id]['result']) + + # The file.managed state should have shown changes in the test mode + # return data. + assert ret[jid1]['manage_a_file']['changes'] + + # After the file was created, running again in test mode should have + # shown no changes. + assert not ret[jid2]['manage_a_file']['changes'], \ + ret[jid2]['manage_a_file']['changes'] diff --git a/tests/integration/states/test_file.py b/tests/integration/states/test_file.py index 808598a4aea2..7fcba492db77 100644 --- a/tests/integration/states/test_file.py +++ b/tests/integration/states/test_file.py @@ -755,6 +755,87 @@ def test_managed_keep_source_false_salt(self, name): result = self.run_function('cp.is_cached', [source, saltenv]) assert result == '', 'File is still cached at {0}'.format(result) + @with_tempfile(create=False) + @with_tempfile(create=False) + def test_file_managed_onchanges(self, file1, file2): + ''' + Test file.managed state with onchanges + ''' + pillar = {'file1': file1, + 'file2': file2, + 'source': 'salt://testfile', + 'req': 'onchanges'} + + # Lay down the file used in the below SLS to ensure that when it is + # run, there are no changes. + self.run_state( + 'file.managed', + name=pillar['file2'], + source=pillar['source']) + + ret = self.repack_state_returns( + self.run_function( + 'state.apply', + mods='onchanges_prereq', + pillar=pillar, + test=True, + ) + ) + # The file states should both exit with None + assert ret['one']['result'] is None, ret['one']['result'] + assert ret['three']['result'] is True, ret['three']['result'] + # The first file state should have changes, since a new file was + # created. The other one should not, since we already created that file + # before applying the SLS file. + assert ret['one']['changes'] + assert not ret['three']['changes'], ret['three']['changes'] + # The state watching 'one' should have been run due to changes + assert ret['two']['comment'] == 'Success!', ret['two']['comment'] + # The state watching 'three' should not have been run + assert ret['four']['comment'] == \ + 'State was not run because none of the onchanges reqs changed', \ + ret['four']['comment'] + + @with_tempfile(create=False) + @with_tempfile(create=False) + def test_file_managed_prereq(self, file1, file2): + ''' + Test file.managed state with prereq + ''' + pillar = {'file1': file1, + 'file2': file2, + 'source': 'salt://testfile', + 'req': 'prereq'} + + # Lay down the file used in the below SLS to ensure that when it is + # run, there are no changes. + self.run_state( + 'file.managed', + name=pillar['file2'], + source=pillar['source']) + + ret = self.repack_state_returns( + self.run_function( + 'state.apply', + mods='onchanges_prereq', + pillar=pillar, + test=True, + ) + ) + # The file states should both exit with None + assert ret['one']['result'] is None, ret['one']['result'] + assert ret['three']['result'] is True, ret['three']['result'] + # The first file state should have changes, since a new file was + # created. The other one should not, since we already created that file + # before applying the SLS file. + assert ret['one']['changes'] + assert not ret['three']['changes'], ret['three']['changes'] + # The state watching 'one' should have been run due to changes + assert ret['two']['comment'] == 'Success!', ret['two']['comment'] + # The state watching 'three' should not have been run + assert ret['four']['comment'] == 'No changes detected', \ + ret['four']['comment'] + def test_directory(self): ''' file.directory diff --git a/tests/unit/states/test_file.py b/tests/unit/states/test_file.py index 43b9f952e5f2..fdd65eaf6740 100644 --- a/tests/unit/states/test_file.py +++ b/tests/unit/states/test_file.py @@ -388,8 +388,8 @@ def return_val(kwargs): with patch('salt.utils.win_functions.get_sid_from_name', return_value='test-sid'): comt = 'Created new symlink {0} -> {1}'.format(name, target) ret = return_val({'comment': comt, - 'result': True, 'changes': {'new': '/tmp/testfile.txt'}, - 'changes': {'new': name}}) + 'result': True, + 'changes': {'new': name}}) self.assertDictEqual(filestate.symlink (name, target, user=user, group=group), ret) @@ -792,7 +792,6 @@ def test_directory(self): ret = {'name': name, 'result': False, 'comment': '', - 'changes': {}, 'changes': {}} comt = ('Must provide name to file.directory') From bafd475bb19b493c755416c40a76b3506cc3919c Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Mon, 1 Oct 2018 12:57:59 -0500 Subject: [PATCH 086/340] Update kernelpkg test to reflect pchanges removal --- tests/unit/states/test_kernelpkg.py | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/tests/unit/states/test_kernelpkg.py b/tests/unit/states/test_kernelpkg.py index f2ba87ecee88..4a81aacbf47c 100644 --- a/tests/unit/states/test_kernelpkg.py +++ b/tests/unit/states/test_kernelpkg.py @@ -114,22 +114,28 @@ def test_latest_active_with_changes(self): Test - latest_active when a new kernel is available ''' reboot = MagicMock(return_value=True) - with patch.dict(kernelpkg.__salt__, {'kernelpkg.needs_reboot': reboot}): - with patch.dict(kernelpkg.__opts__, {'test': False}): - kernelpkg.__salt__['system.reboot'].reset_mock() - ret = kernelpkg.latest_active(name=STATE_NAME) - self.assertEqual(ret['name'], STATE_NAME) - self.assertTrue(ret['result']) - self.assertIsInstance(ret['changes'], dict) - self.assertIsInstance(ret['comment'], six.text_type) - self.assert_called_once(kernelpkg.__salt__['system.reboot']) + latest = MagicMock(return_value=1) + with patch.dict( + kernelpkg.__salt__, {'kernelpkg.needs_reboot': reboot, + 'kernelpkg.latest_installed': latest}), \ + patch.dict(kernelpkg.__opts__, {'test': False}): + kernelpkg.__salt__['system.reboot'].reset_mock() + ret = kernelpkg.latest_active(name=STATE_NAME) + self.assertEqual(ret['name'], STATE_NAME) + self.assertTrue(ret['result']) + self.assertIsInstance(ret['changes'], dict) + self.assertIsInstance(ret['comment'], six.text_type) + self.assert_called_once(kernelpkg.__salt__['system.reboot']) with patch.dict(kernelpkg.__opts__, {'test': True}): kernelpkg.__salt__['system.reboot'].reset_mock() ret = kernelpkg.latest_active(name=STATE_NAME) self.assertEqual(ret['name'], STATE_NAME) self.assertIsNone(ret['result']) - self.assertDictEqual(ret['changes'], {}) + self.assertDictEqual( + ret['changes'], + {'kernel': {'new': 1, 'old': 0}} + ) self.assertIsInstance(ret['comment'], six.text_type) kernelpkg.__salt__['system.reboot'].assert_not_called() From 49f26316c89332245cba65bd49f61ff34bfba5b4 Mon Sep 17 00:00:00 2001 From: Bruno Binet Date: Fri, 5 Apr 2019 15:29:40 +0200 Subject: [PATCH 087/340] Remove unused salt.crypt import This causes pepper to fail on windows because of missing libcrypto. See also #51655 for similar salt.crypt removals --- salt/fileclient.py | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/fileclient.py b/salt/fileclient.py index e6b7d406e767..718f957c2098 100644 --- a/salt/fileclient.py +++ b/salt/fileclient.py @@ -20,7 +20,6 @@ CommandExecutionError, MinionError ) import salt.client -import salt.crypt import salt.loader import salt.payload import salt.transport.client From 9607c380652631cb041c5ff6c6da134b1162dffa Mon Sep 17 00:00:00 2001 From: Bruno Binet Date: Fri, 5 Apr 2019 15:37:54 +0200 Subject: [PATCH 088/340] Also remove unused salt.crypt import in pillar/__init__.py --- salt/pillar/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/pillar/__init__.py b/salt/pillar/__init__.py index 94dd9695ca1d..f2a1a86e25a2 100644 --- a/salt/pillar/__init__.py +++ b/salt/pillar/__init__.py @@ -19,7 +19,6 @@ import salt.loader import salt.fileclient import salt.minion -import salt.crypt import salt.transport.client import salt.utils.args import salt.utils.cache From f848aa474d98f5373124ca987891f0ffde5c959b Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Fri, 5 Apr 2019 11:31:03 -0700 Subject: [PATCH 089/340] Adding support back in with, a gated configuration option, for the old YAML Loader. --- doc/topics/releases/2019.2.1.rst | 28 ++++++++++++++++++++++ salt/renderers/yaml.py | 18 +++++++++++--- tests/unit/renderers/test_yaml.py | 39 +++++++++++++++++++++++++++++++ 3 files changed, 82 insertions(+), 3 deletions(-) diff --git a/doc/topics/releases/2019.2.1.rst b/doc/topics/releases/2019.2.1.rst index ccc0645fd960..7b37582e59da 100644 --- a/doc/topics/releases/2019.2.1.rst +++ b/doc/topics/releases/2019.2.1.rst @@ -4,3 +4,31 @@ In Progress: Salt 2019.2.1 Release Notes Version 2019.2.1 is an **unreleased** bugfix release for :ref:`2019.2.0 `. This release is still in progress and has not been released yet. + +Change to YAML Renderer +======================= + +.. code-block:: jinja + + /etc/foo.conf: + file.managed: + - source: salt://foo.conf.jinja + - template: jinja + - context: + data: {{ data }} + +In 2019.2.0, the above SLS will result in an error message following changes to +the YAML renderer that now require the new Jinja filter `tojson`. + +.. code-block:: jinja + + /etc/foo.conf: + file.managed: + - source: salt://foo.conf.jinja + - template: jinja + - context: + data: {{ data|tojson }} + +In 2019.2.1, we introduce a new configuration option for both the Salt master and Salt minion +configurations to be able to support the older YAML renderer. Using the option +`use_yamlloader_old` will allow the YAML renderer to function as before. diff --git a/salt/renderers/yaml.py b/salt/renderers/yaml.py index 93ae327e0f48..ea3e7ceb1a1d 100644 --- a/salt/renderers/yaml.py +++ b/salt/renderers/yaml.py @@ -16,7 +16,8 @@ # Import salt libs import salt.utils.url -from salt.utils.yamlloader import SaltYamlSafeLoader, load +import salt.utils.yamlloader as yamlloader_new +import salt.utils.yamlloader_old as yamlloader_old from salt.utils.odict import OrderedDict from salt.exceptions import SaltRenderError from salt.ext import six @@ -35,7 +36,11 @@ def get_yaml_loader(argline): Return the ordered dict yaml loader ''' def yaml_loader(*args): - return SaltYamlSafeLoader(*args, dictclass=OrderedDict) + if __opts__.get('use_yamlloader_old'): + yamlloader = yamlloader_old + else: + yamlloader = yamlloader_new + return yamlloader.SaltYamlSafeLoader(*args, dictclass=OrderedDict) return yaml_loader @@ -46,11 +51,18 @@ def render(yaml_data, saltenv='base', sls='', argline='', **kws): :rtype: A Python data structure ''' + if __opts__.get('use_yamlloader_old'): + log.warning('Using the old YAML Loader for rendering, ' + 'consider disabling this and using the tojson' + ' filter.') + yamlloader = yamlloader_old + else: + yamlloader = yamlloader_new if not isinstance(yaml_data, string_types): yaml_data = yaml_data.read() with warnings.catch_warnings(record=True) as warn_list: try: - data = load(yaml_data, Loader=get_yaml_loader(argline)) + data = yamlloader.load(yaml_data, Loader=get_yaml_loader(argline)) except ScannerError as exc: err_type = _ERROR_MAP.get(exc.problem, exc.problem) line_num = exc.problem_mark.line + 1 diff --git a/tests/unit/renderers/test_yaml.py b/tests/unit/renderers/test_yaml.py index 7eb82b89d021..b7b093c1c460 100644 --- a/tests/unit/renderers/test_yaml.py +++ b/tests/unit/renderers/test_yaml.py @@ -3,12 +3,19 @@ # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals +import collections +import textwrap + # Import Salt Testing libs from tests.support.mixins import LoaderModuleMockMixin from tests.support.unit import TestCase +from tests.support.mock import ( + patch +) # Import Salt libs import salt.renderers.yaml as yaml +from salt.ext import six class YAMLRendererTestCase(TestCase, LoaderModuleMockMixin): @@ -16,6 +23,27 @@ class YAMLRendererTestCase(TestCase, LoaderModuleMockMixin): def setup_loader_modules(self): return {yaml: {}} + def assert_unicode(self, value): + ''' + Make sure the entire data structure is unicode + ''' + if six.PY3: + return + if isinstance(value, six.string_types): + if not isinstance(value, six.text_type): + self.raise_error(value) + elif isinstance(value, collections.Mapping): + for k, v in six.iteritems(value): + self.assert_unicode(k) + self.assert_unicode(v) + elif isinstance(value, collections.Iterable): + for item in value: + self.assert_unicode(item) + + def assert_matches(self, ret, expected): + self.assertEqual(ret, expected) + self.assert_unicode(ret) + def test_yaml_render_string(self): data = 'string' result = yaml.render(data) @@ -27,3 +55,14 @@ def test_yaml_render_unicode(self): result = yaml.render(data) self.assertEqual(result, u'python unicode string') + + def test_yaml_render_old_unicode(self): + config = {'use_yamlloader_old': True} + with patch.dict(yaml.__opts__, config): # pylint: disable=no-member + self.assert_matches( + yaml.render(textwrap.dedent('''\ + foo: + a: Д + b: {'a': u'\\u0414'}''')), + {'foo': {'a': u'\u0414', 'b': {'a': u'\u0414'}}} + ) From 71cd303a8f9001946893a105a69738f2d8f71d78 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Fri, 5 Apr 2019 12:06:28 -0700 Subject: [PATCH 090/340] Adding missing yamlloader_old.py --- salt/utils/yamlloader_old.py | 225 +++++++++++++++++++++++++++++++++++ 1 file changed, 225 insertions(+) create mode 100644 salt/utils/yamlloader_old.py diff --git a/salt/utils/yamlloader_old.py b/salt/utils/yamlloader_old.py new file mode 100644 index 000000000000..2ab301bfeb8e --- /dev/null +++ b/salt/utils/yamlloader_old.py @@ -0,0 +1,225 @@ +# -*- coding: utf-8 -*- +''' +Custom YAML loading in Salt +''' + +# Import python libs +from __future__ import absolute_import, print_function, unicode_literals +import re +import warnings + +import yaml # pylint: disable=blacklisted-import +from yaml.nodes import MappingNode, SequenceNode +from yaml.constructor import ConstructorError +try: + yaml.Loader = yaml.CLoader + yaml.Dumper = yaml.CDumper +except Exception: + pass + +import salt.utils.stringutils + +__all__ = ['SaltYamlSafeLoader', 'load', 'safe_load'] + + +class DuplicateKeyWarning(RuntimeWarning): + ''' + Warned when duplicate keys exist + ''' + + +warnings.simplefilter('always', category=DuplicateKeyWarning) + + +# with code integrated from https://gist.github.com/844388 +class SaltYamlSafeLoader(yaml.SafeLoader): + ''' + Create a custom YAML loader that uses the custom constructor. This allows + for the YAML loading defaults to be manipulated based on needs within salt + to make things like sls file more intuitive. + ''' + def __init__(self, stream, dictclass=dict): + super(SaltYamlSafeLoader, self).__init__(stream) + if dictclass is not dict: + # then assume ordered dict and use it for both !map and !omap + self.add_constructor( + 'tag:yaml.org,2002:map', + type(self).construct_yaml_map) + self.add_constructor( + 'tag:yaml.org,2002:omap', + type(self).construct_yaml_map) + self.add_constructor( + 'tag:yaml.org,2002:str', + type(self).construct_yaml_str) + self.add_constructor( + 'tag:yaml.org,2002:python/unicode', + type(self).construct_unicode) + self.add_constructor( + 'tag:yaml.org,2002:timestamp', + type(self).construct_scalar) + self.dictclass = dictclass + + def construct_yaml_map(self, node): + data = self.dictclass() + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_unicode(self, node): + return node.value + + def construct_mapping(self, node, deep=False): + ''' + Build the mapping for YAML + ''' + if not isinstance(node, MappingNode): + raise ConstructorError( + None, + None, + 'expected a mapping node, but found {0}'.format(node.id), + node.start_mark) + + self.flatten_mapping(node) + + context = 'while constructing a mapping' + mapping = self.dictclass() + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + try: + hash(key) + except TypeError: + raise ConstructorError( + context, + node.start_mark, + "found unacceptable key {0}".format(key_node.value), + key_node.start_mark) + value = self.construct_object(value_node, deep=deep) + if key in mapping: + raise ConstructorError( + context, + node.start_mark, + "found conflicting ID '{0}'".format(key), + key_node.start_mark) + mapping[key] = value + return mapping + + def construct_scalar(self, node): + ''' + Verify integers and pass them in correctly is they are declared + as octal + ''' + if node.tag == 'tag:yaml.org,2002:int': + if node.value == '0': + pass + elif node.value.startswith('0') and not node.value.startswith(('0b', '0x')): + node.value = node.value.lstrip('0') + # If value was all zeros, node.value would have been reduced to + # an empty string. Change it to '0'. + if node.value == '': + node.value = '0' + elif node.tag == 'tag:yaml.org,2002:str': + # If any string comes in as a quoted unicode literal, eval it into + # the proper unicode string type. + if re.match(r'^u([\'"]).+\1$', node.value, flags=re.IGNORECASE): + node.value = eval(node.value, {}, {}) # pylint: disable=W0123 + return super(SaltYamlSafeLoader, self).construct_scalar(node) + + def construct_yaml_str(self, node): + value = self.construct_scalar(node) + return salt.utils.stringutils.to_unicode(value) + + def fetch_plain(self): + ''' + Handle unicode literal strings which appear inline in the YAML + ''' + orig_line = self.line + orig_column = self.column + orig_pointer = self.pointer + log.warning('=== Handling unicode literal string ===') + try: + return super(SaltYamlSafeLoader, self).fetch_plain() + except yaml.scanner.ScannerError as exc: + problem_line = self.line + problem_column = self.column + problem_pointer = self.pointer + if exc.problem == "found unexpected ':'": + # Reset to prior position + self.line = orig_line + self.column = orig_column + self.pointer = orig_pointer + if self.peek(0) == 'u': + # Might be a unicode literal string, check for 2nd char and + # call the appropriate fetch func if it's a quote + quote_char = self.peek(1) + if quote_char in ("'", '"'): + # Skip the "u" prefix by advancing the column and + # pointer by 1 + self.column += 1 + self.pointer += 1 + if quote_char == '\'': + return self.fetch_single() + else: + return self.fetch_double() + else: + # This wasn't a unicode literal string, so the caught + # exception was correct. Restore the old position and + # then raise the caught exception. + self.line = problem_line + self.column = problem_column + self.pointer = problem_pointer + # Raise the caught exception + raise exc + + def flatten_mapping(self, node): + merge = [] + index = 0 + while index < len(node.value): + key_node, value_node = node.value[index] + + if key_node.tag == 'tag:yaml.org,2002:merge': + del node.value[index] + if isinstance(value_node, MappingNode): + self.flatten_mapping(value_node) + merge.extend(value_node.value) + elif isinstance(value_node, SequenceNode): + submerge = [] + for subnode in value_node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing a mapping", + node.start_mark, + "expected a mapping for merging, but found {0}".format(subnode.id), + subnode.start_mark) + self.flatten_mapping(subnode) + submerge.append(subnode.value) + submerge.reverse() + for value in submerge: + merge.extend(value) + else: + raise ConstructorError("while constructing a mapping", + node.start_mark, + "expected a mapping or list of mappings for merging, but found {0}".format(value_node.id), + value_node.start_mark) + elif key_node.tag == 'tag:yaml.org,2002:value': + key_node.tag = 'tag:yaml.org,2002:str' + index += 1 + else: + index += 1 + if merge: + # Here we need to discard any duplicate entries based on key_node + existing_nodes = [name_node.value for name_node, value_node in node.value] + mergeable_items = [x for x in merge if x[0].value not in existing_nodes] + + node.value = mergeable_items + node.value + + +def load(stream, Loader=SaltYamlSafeLoader): + return yaml.load(stream, Loader=Loader) + + +def safe_load(stream, Loader=SaltYamlSafeLoader): + ''' + .. versionadded:: 2018.3.0 + + Helper function which automagically uses our custom loader. + ''' + return yaml.load(stream, Loader=Loader) From 27679848998c8a8ca5c859771600652ffe9e4e5f Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Fri, 5 Apr 2019 12:10:15 -0700 Subject: [PATCH 091/340] Removing logging entry --- salt/utils/yamlloader_old.py | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/utils/yamlloader_old.py b/salt/utils/yamlloader_old.py index 2ab301bfeb8e..8282c04ddef5 100644 --- a/salt/utils/yamlloader_old.py +++ b/salt/utils/yamlloader_old.py @@ -135,7 +135,6 @@ def fetch_plain(self): orig_line = self.line orig_column = self.column orig_pointer = self.pointer - log.warning('=== Handling unicode literal string ===') try: return super(SaltYamlSafeLoader, self).fetch_plain() except yaml.scanner.ScannerError as exc: From cce6200facacf3f78c9b20840c28e5ddce51c1b8 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Fri, 5 Apr 2019 13:42:18 -0700 Subject: [PATCH 092/340] Fixing lint Fixing lint --- tests/unit/modules/test_win_file.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/modules/test_win_file.py b/tests/unit/modules/test_win_file.py index cb9fed352109..b11fd0cd4870 100644 --- a/tests/unit/modules/test_win_file.py +++ b/tests/unit/modules/test_win_file.py @@ -357,4 +357,4 @@ def test_issue_52002_check_file_remove_symlink(self): self.assertFalse(win_file.directory_exists(base)) finally: if os.path.exists(base): - win_file.remove(base) \ No newline at end of file + win_file.remove(base) From 2db7a988b2487a8c8dcc60278d8a93707d833015 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Wed, 6 Jun 2018 09:49:36 +0200 Subject: [PATCH 093/340] Let virt running state provide errors As mentioned in issue 47972, applying the virt.running state doesn't report any error if the libvirt create call actually failed. This commit introduces proper handling of the libvirt errors to let users see the libvirt error in case of failure. Also add test cases for virt.running to prevent regression. (cherry picked from commit 451e7da55bd232546c4d30ec36d432de2d5a14ec) --- tests/unit/states/test_virt.py | 41 +++++++++++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/tests/unit/states/test_virt.py b/tests/unit/states/test_virt.py index 2e421319ad9a..704ae4728d42 100644 --- a/tests/unit/states/test_virt.py +++ b/tests/unit/states/test_virt.py @@ -23,13 +23,29 @@ import salt.utils.files +class LibvirtMock(MagicMock): # pylint: disable=too-many-ancestors + ''' + libvirt library mockup + ''' + + class libvirtError(Exception): # pylint: disable=invalid-name + ''' + libvirt error mockup + ''' + + @skipIf(NO_MOCK, NO_MOCK_REASON) class LibvirtTestCase(TestCase, LoaderModuleMockMixin): ''' Test cases for salt.states.libvirt ''' def setup_loader_modules(self): - return {virt: {}} + self.mock_libvirt = LibvirtMock() # pylint: disable=attribute-defined-outside-init + self.addCleanup(delattr, self, 'mock_libvirt') + loader_globals = { + 'libvirt': self.mock_libvirt + } + return {virt: loader_globals} @classmethod def setUpClass(cls): @@ -195,3 +211,26 @@ def test_keys_with_all_options(self): locality='Los_Angeles', organization='SaltStack', expiration_days=700), ret) + + def test_running(self): + ''' + running state test cases. + ''' + ret = {'name': 'myvm', + 'changes': {}, + 'result': True, + 'comment': 'myvm is running'} + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.vm_state': MagicMock(return_value='stopped'), + 'virt.start': MagicMock(return_value=0) + }): + ret.update({'changes': {'myvm': 'Domain started'}, + 'comment': 'Domain myvm started'}) + self.assertDictEqual(virt.running('myvm'), ret) + + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.vm_state': MagicMock(return_value='stopped'), + 'virt.start': MagicMock(side_effect=[self.mock_libvirt.libvirtError('libvirt error msg')]) + }): + ret.update({'changes': {}, 'result': False, 'comment': 'libvirt error msg'}) + self.assertDictEqual(virt.running('myvm'), ret) From c6a444ba875122b29c01d8be9b3ab9e1c95e321f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Fri, 22 Jun 2018 11:52:13 +0200 Subject: [PATCH 094/340] Add test case for virt.running Test virt.running when the domain needs to be edited and started. This will prevent regressions when improving virt.running to handle the new virt.init parameters. (cherry picked from commit 495db345a570cb14cd9b0ae96e1bb0f3fad6aef0) --- tests/unit/states/test_virt.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tests/unit/states/test_virt.py b/tests/unit/states/test_virt.py index 704ae4728d42..6999ab943334 100644 --- a/tests/unit/states/test_virt.py +++ b/tests/unit/states/test_virt.py @@ -21,6 +21,7 @@ # Import Salt Libs import salt.states.virt as virt import salt.utils.files +from salt.exceptions import CommandExecutionError class LibvirtMock(MagicMock): # pylint: disable=too-many-ancestors @@ -228,6 +229,17 @@ def test_running(self): 'comment': 'Domain myvm started'}) self.assertDictEqual(virt.running('myvm'), ret) + init_mock = MagicMock(return_value=True) + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.vm_state': MagicMock(side_effect=CommandExecutionError('not found')), + 'virt.init': init_mock, + 'virt.start': MagicMock(return_value=0) + }): + ret.update({'changes': {'myvm': 'Domain defined and started'}, + 'comment': 'Domain myvm defined and started'}) + self.assertDictEqual(virt.running('myvm', cpu=2, mem=2048, image='/path/to/img.qcow2'), ret) + init_mock.assert_called_with('myvm', cpu=2, mem=2048, image='/path/to/img.qcow2') + with patch.dict(virt.__salt__, { # pylint: disable=no-member 'virt.vm_state': MagicMock(return_value='stopped'), 'virt.start': MagicMock(side_effect=[self.mock_libvirt.libvirtError('libvirt error msg')]) From 0681d865f6216f4de481597d79f4c8bc9a312f66 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Mon, 25 Jun 2018 15:58:40 +0200 Subject: [PATCH 095/340] virt.running support for all virt.init options virt.running actually calls virt.init if the virtual machine doesn't exist. Let the user define all the virt.init possible options in his running states too. (cherry picked from commit cb00a5f9b4c9a2a863da3c1107ca6458a4092c3d) --- tests/unit/states/test_virt.py | 72 +++++++++++++++++++++++++++++++++- 1 file changed, 70 insertions(+), 2 deletions(-) diff --git a/tests/unit/states/test_virt.py b/tests/unit/states/test_virt.py index 6999ab943334..a496ba3ea479 100644 --- a/tests/unit/states/test_virt.py +++ b/tests/unit/states/test_virt.py @@ -237,8 +237,76 @@ def test_running(self): }): ret.update({'changes': {'myvm': 'Domain defined and started'}, 'comment': 'Domain myvm defined and started'}) - self.assertDictEqual(virt.running('myvm', cpu=2, mem=2048, image='/path/to/img.qcow2'), ret) - init_mock.assert_called_with('myvm', cpu=2, mem=2048, image='/path/to/img.qcow2') + self.assertDictEqual(virt.running('myvm', + cpu=2, + mem=2048, + image='/path/to/img.qcow2'), ret) + init_mock.assert_called_with('myvm', cpu=2, mem=2048, image='/path/to/img.qcow2', + disk=None, disks=None, nic=None, interfaces=None, + graphics=None, hypervisor=None, + seed=True, install=True, pub_key=None, priv_key=None) + + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.vm_state': MagicMock(side_effect=CommandExecutionError('not found')), + 'virt.init': init_mock, + 'virt.start': MagicMock(return_value=0) + }): + ret.update({'changes': {'myvm': 'Domain defined and started'}, + 'comment': 'Domain myvm defined and started'}) + disks = [{ + 'name': 'system', + 'size': 8192, + 'overlay_image': True, + 'pool': 'default', + 'image': '/path/to/image.qcow2' + }, + { + 'name': 'data', + 'size': 16834 + }] + ifaces = [{ + 'name': 'eth0', + 'mac': '01:23:45:67:89:AB' + }, + { + 'name': 'eth1', + 'type': 'network', + 'source': 'admin' + }] + graphics = {'type': 'spice', 'listen': {'type': 'address', 'address': '192.168.0.1'}} + self.assertDictEqual(virt.running('myvm', + cpu=2, + mem=2048, + vm_type='qemu', + disk_profile='prod', + disks=disks, + nic_profile='prod', + interfaces=ifaces, + graphics=graphics, + seed=False, + install=False, + pub_key='/path/to/key.pub', + priv_key='/path/to/key', + connection='someconnection', + username='libvirtuser', + password='supersecret'), ret) + init_mock.assert_called_with('myvm', + cpu=2, + mem=2048, + image=None, + disk='prod', + disks=disks, + nic='prod', + interfaces=ifaces, + graphics=graphics, + hypervisor='qemu', + seed=False, + install=False, + pub_key='/path/to/key.pub', + priv_key='/path/to/key', + connection='someconnection', + username='libvirtuser', + password='supersecret') with patch.dict(virt.__salt__, { # pylint: disable=no-member 'virt.vm_state': MagicMock(return_value='stopped'), From 13d7819c7fe0f7c91c2d7f396d4ab855d2f61043 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Thu, 28 Jun 2018 09:42:07 +0200 Subject: [PATCH 096/340] Add missing virt states unit tests So far only the virt.running state is unit tested. Add tests for the other states. Deprecated virt states have been purposedly left untested. (cherry picked from commit fc75872fb63e254eecc782168ff8b37157d9e514) --- tests/unit/states/test_virt.py | 341 +++++++++++++++++++++++++++++++++ 1 file changed, 341 insertions(+) diff --git a/tests/unit/states/test_virt.py b/tests/unit/states/test_virt.py index a496ba3ea479..29735fb6dbf0 100644 --- a/tests/unit/states/test_virt.py +++ b/tests/unit/states/test_virt.py @@ -23,6 +23,9 @@ import salt.utils.files from salt.exceptions import CommandExecutionError +# Import 3rd-party libs +from salt.ext import six + class LibvirtMock(MagicMock): # pylint: disable=too-many-ancestors ''' @@ -34,6 +37,12 @@ class libvirtError(Exception): # pylint: disable=invalid-name libvirt error mockup ''' + def get_error_message(self): + ''' + Fake function return error message + ''' + return six.text_type(self) + @skipIf(NO_MOCK, NO_MOCK_REASON) class LibvirtTestCase(TestCase, LoaderModuleMockMixin): @@ -314,3 +323,335 @@ def test_running(self): }): ret.update({'changes': {}, 'result': False, 'comment': 'libvirt error msg'}) self.assertDictEqual(virt.running('myvm'), ret) + + def test_stopped(self): + ''' + stopped state test cases. + ''' + ret = {'name': 'myvm', + 'changes': {}, + 'result': True} + + shutdown_mock = MagicMock(return_value=True) + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.list_domains': MagicMock(return_value=['myvm', 'vm1']), + 'virt.shutdown': shutdown_mock + }): + ret.update({'changes': { + 'stopped': [{'domain': 'myvm', 'shutdown': True}] + }, + 'comment': 'Machine has been shut down'}) + self.assertDictEqual(virt.stopped('myvm'), ret) + shutdown_mock.assert_called_with('myvm', connection=None, username=None, password=None) + + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.list_domains': MagicMock(return_value=['myvm', 'vm1']), + 'virt.shutdown': shutdown_mock, + }): + self.assertDictEqual(virt.stopped('myvm', + connection='myconnection', + username='user', + password='secret'), ret) + shutdown_mock.assert_called_with('myvm', connection='myconnection', username='user', password='secret') + + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.list_domains': MagicMock(return_value=['myvm', 'vm1']), + 'virt.shutdown': MagicMock(side_effect=self.mock_libvirt.libvirtError('Some error')) + }): + ret.update({'changes': {'ignored': [{'domain': 'myvm', 'issue': 'Some error'}]}, + 'result': False, + 'comment': 'No changes had happened'}) + self.assertDictEqual(virt.stopped('myvm'), ret) + + with patch.dict(virt.__salt__, {'virt.list_domains': MagicMock(return_value=[])}): # pylint: disable=no-member + ret.update({'changes': {}, 'result': False, 'comment': 'No changes had happened'}) + self.assertDictEqual(virt.stopped('myvm'), ret) + + def test_powered_off(self): + ''' + powered_off state test cases. + ''' + ret = {'name': 'myvm', + 'changes': {}, + 'result': True} + + stop_mock = MagicMock(return_value=True) + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.list_domains': MagicMock(return_value=['myvm', 'vm1']), + 'virt.stop': stop_mock + }): + ret.update({'changes': { + 'unpowered': [{'domain': 'myvm', 'stop': True}] + }, + 'comment': 'Machine has been powered off'}) + self.assertDictEqual(virt.powered_off('myvm'), ret) + stop_mock.assert_called_with('myvm', connection=None, username=None, password=None) + + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.list_domains': MagicMock(return_value=['myvm', 'vm1']), + 'virt.stop': stop_mock, + }): + self.assertDictEqual(virt.powered_off('myvm', + connection='myconnection', + username='user', + password='secret'), ret) + stop_mock.assert_called_with('myvm', connection='myconnection', username='user', password='secret') + + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.list_domains': MagicMock(return_value=['myvm', 'vm1']), + 'virt.stop': MagicMock(side_effect=self.mock_libvirt.libvirtError('Some error')) + }): + ret.update({'changes': {'ignored': [{'domain': 'myvm', 'issue': 'Some error'}]}, + 'result': False, + 'comment': 'No changes had happened'}) + self.assertDictEqual(virt.powered_off('myvm'), ret) + + with patch.dict(virt.__salt__, {'virt.list_domains': MagicMock(return_value=[])}): # pylint: disable=no-member + ret.update({'changes': {}, 'result': False, 'comment': 'No changes had happened'}) + self.assertDictEqual(virt.powered_off('myvm'), ret) + + def test_snapshot(self): + ''' + snapshot state test cases. + ''' + ret = {'name': 'myvm', + 'changes': {}, + 'result': True} + + snapshot_mock = MagicMock(return_value=True) + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.list_domains': MagicMock(return_value=['myvm', 'vm1']), + 'virt.snapshot': snapshot_mock + }): + ret.update({'changes': { + 'saved': [{'domain': 'myvm', 'snapshot': True}] + }, + 'comment': 'Snapshot has been taken'}) + self.assertDictEqual(virt.snapshot('myvm'), ret) + snapshot_mock.assert_called_with('myvm', suffix=None, connection=None, username=None, password=None) + + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.list_domains': MagicMock(return_value=['myvm', 'vm1']), + 'virt.snapshot': snapshot_mock, + }): + self.assertDictEqual(virt.snapshot('myvm', + suffix='snap', + connection='myconnection', + username='user', + password='secret'), ret) + snapshot_mock.assert_called_with('myvm', + suffix='snap', + connection='myconnection', + username='user', + password='secret') + + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.list_domains': MagicMock(return_value=['myvm', 'vm1']), + 'virt.snapshot': MagicMock(side_effect=self.mock_libvirt.libvirtError('Some error')) + }): + ret.update({'changes': {'ignored': [{'domain': 'myvm', 'issue': 'Some error'}]}, + 'result': False, + 'comment': 'No changes had happened'}) + self.assertDictEqual(virt.snapshot('myvm'), ret) + + with patch.dict(virt.__salt__, {'virt.list_domains': MagicMock(return_value=[])}): # pylint: disable=no-member + ret.update({'changes': {}, 'result': False, 'comment': 'No changes had happened'}) + self.assertDictEqual(virt.snapshot('myvm'), ret) + + def test_rebooted(self): + ''' + rebooted state test cases. + ''' + ret = {'name': 'myvm', + 'changes': {}, + 'result': True} + + reboot_mock = MagicMock(return_value=True) + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.list_domains': MagicMock(return_value=['myvm', 'vm1']), + 'virt.reboot': reboot_mock + }): + ret.update({'changes': { + 'rebooted': [{'domain': 'myvm', 'reboot': True}] + }, + 'comment': 'Machine has been rebooted'}) + self.assertDictEqual(virt.rebooted('myvm'), ret) + reboot_mock.assert_called_with('myvm', connection=None, username=None, password=None) + + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.list_domains': MagicMock(return_value=['myvm', 'vm1']), + 'virt.reboot': reboot_mock, + }): + self.assertDictEqual(virt.rebooted('myvm', + connection='myconnection', + username='user', + password='secret'), ret) + reboot_mock.assert_called_with('myvm', + connection='myconnection', + username='user', + password='secret') + + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.list_domains': MagicMock(return_value=['myvm', 'vm1']), + 'virt.reboot': MagicMock(side_effect=self.mock_libvirt.libvirtError('Some error')) + }): + ret.update({'changes': {'ignored': [{'domain': 'myvm', 'issue': 'Some error'}]}, + 'result': False, + 'comment': 'No changes had happened'}) + self.assertDictEqual(virt.rebooted('myvm'), ret) + + with patch.dict(virt.__salt__, {'virt.list_domains': MagicMock(return_value=[])}): # pylint: disable=no-member + ret.update({'changes': {}, 'result': False, 'comment': 'No changes had happened'}) + self.assertDictEqual(virt.rebooted('myvm'), ret) + + def test_network_running(self): + ''' + network_running state test cases. + ''' + ret = {'name': 'mynet', 'changes': {}, 'result': True, 'comment': ''} + define_mock = MagicMock(return_value=True) + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.network_info': MagicMock(return_value={}), + 'virt.network_define': define_mock + }): + ret.update({'changes': {'mynet': 'Network defined and started'}, + 'comment': 'Network mynet defined and started'}) + self.assertDictEqual(virt.network_running('mynet', + 'br2', + 'bridge', + vport='openvswitch', + tag=180, + autostart=False, + connection='myconnection', + username='user', + password='secret'), ret) + define_mock.assert_called_with('mynet', + 'br2', + 'bridge', + 'openvswitch', + tag=180, + autostart=False, + start=True, + connection='myconnection', + username='user', + password='secret') + + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.network_info': MagicMock(return_value={'active': True}), + 'virt.network_define': define_mock, + }): + ret.update({'changes': {}, 'comment': 'Network mynet exists and is running'}) + self.assertDictEqual(virt.network_running('mynet', 'br2', 'bridge'), ret) + + start_mock = MagicMock(return_value=True) + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.network_info': MagicMock(return_value={'active': False}), + 'virt.network_start': start_mock, + 'virt.network_define': define_mock, + }): + ret.update({'changes': {'mynet': 'Network started'}, 'comment': 'Network mynet started'}) + self.assertDictEqual(virt.network_running('mynet', + 'br2', + 'bridge', + connection='myconnection', + username='user', + password='secret'), ret) + start_mock.assert_called_with('mynet', connection='myconnection', username='user', password='secret') + + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.network_info': MagicMock(return_value={}), + 'virt.network_define': MagicMock(side_effect=self.mock_libvirt.libvirtError('Some error')) + }): + ret.update({'changes': {}, 'comment': 'Some error', 'result': False}) + self.assertDictEqual(virt.network_running('mynet', 'br2', 'bridge'), ret) + + def test_pool_running(self): + ''' + pool_running state test cases. + ''' + ret = {'name': 'mypool', 'changes': {}, 'result': True, 'comment': ''} + mocks = {mock: MagicMock(return_value=True) for mock in ['define', 'autostart', 'build', 'start']} + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.pool_info': MagicMock(return_value={}), + 'virt.pool_define': mocks['define'], + 'virt.pool_build': mocks['build'], + 'virt.pool_start': mocks['start'], + 'virt.pool_set_autostart': mocks['autostart'] + }): + ret.update({'changes': {'mypool': 'Pool defined and started'}, + 'comment': 'Pool mypool defined and started'}) + self.assertDictEqual(virt.pool_running('mypool', + ptype='logical', + target='/dev/base', + permissions={'mode': '0770', + 'owner': 1000, + 'group': 100, + 'label': 'seclabel'}, + source={'devices': [{'path': '/dev/sda'}]}, + transient=True, + autostart=True, + connection='myconnection', + username='user', + password='secret'), ret) + mocks['define'].assert_called_with('mypool', + ptype='logical', + target='/dev/base', + permissions={'mode': '0770', + 'owner': 1000, + 'group': 100, + 'label': 'seclabel'}, + source_devices=[{'path': '/dev/sda'}], + source_dir=None, + source_adapter=None, + source_hosts=None, + source_auth=None, + source_name=None, + source_format=None, + transient=True, + start=True, + connection='myconnection', + username='user', + password='secret') + mocks['autostart'].assert_called_with('mypool', + state='on', + connection='myconnection', + username='user', + password='secret') + mocks['build'].assert_called_with('mypool', + connection='myconnection', + username='user', + password='secret') + + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.pool_info': MagicMock(return_value={'state': 'running'}), + }): + ret.update({'changes': {}, 'comment': 'Pool mypool exists and is running'}) + self.assertDictEqual(virt.pool_running('mypool', + ptype='logical', + target='/dev/base', + source={'devices': [{'path': '/dev/sda'}]}), ret) + + for mock in mocks: + mocks[mock].reset_mock() + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.pool_info': MagicMock(return_value={'state': 'stopped'}), + 'virt.pool_build': mocks['build'], + 'virt.pool_start': mocks['start'] + }): + ret.update({'changes': {'mypool': 'Pool started'}, 'comment': 'Pool mypool started'}) + self.assertDictEqual(virt.pool_running('mypool', + ptype='logical', + target='/dev/base', + source={'devices': [{'path': '/dev/sda'}]}), ret) + mocks['start'].assert_called_with('mypool', connection=None, username=None, password=None) + mocks['build'].assert_not_called() + + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.pool_info': MagicMock(return_value={}), + 'virt.pool_define': MagicMock(side_effect=self.mock_libvirt.libvirtError('Some error')) + }): + ret.update({'changes': {}, 'comment': 'Some error', 'result': False}) + self.assertDictEqual(virt.pool_running('mypool', + ptype='logical', + target='/dev/base', + source={'devices': [{'path': '/dev/sda'}]}), ret) From 1c65d25eb48023060df34435fd673d73d4f6a58f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Tue, 24 Jul 2018 12:08:51 +0200 Subject: [PATCH 097/340] Updating running domains in virt.running So far virt.running does nothing if the corresponding domain is already defined. Use the new virt.update function to change the domain configuration. (cherry picked from commit 2a5f6ae5d69be71daeab6c9cbe4dd642255ff3c6) --- tests/unit/states/test_virt.py | 43 +++++++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) diff --git a/tests/unit/states/test_virt.py b/tests/unit/states/test_virt.py index 29735fb6dbf0..da11ca5f2743 100644 --- a/tests/unit/states/test_virt.py +++ b/tests/unit/states/test_virt.py @@ -232,7 +232,7 @@ def test_running(self): 'comment': 'myvm is running'} with patch.dict(virt.__salt__, { # pylint: disable=no-member 'virt.vm_state': MagicMock(return_value='stopped'), - 'virt.start': MagicMock(return_value=0) + 'virt.start': MagicMock(return_value=0), }): ret.update({'changes': {'myvm': 'Domain started'}, 'comment': 'Domain myvm started'}) @@ -324,6 +324,47 @@ def test_running(self): ret.update({'changes': {}, 'result': False, 'comment': 'libvirt error msg'}) self.assertDictEqual(virt.running('myvm'), ret) + # Working update case when running + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.vm_state': MagicMock(return_value='running'), + 'virt.update': MagicMock(return_value={'definition': True, 'cpu': True}) + }): + ret.update({'changes': {'myvm': {'definition': True, 'cpu': True}}, + 'result': True, + 'comment': 'Domain myvm updated, restart to fully apply the changes'}) + self.assertDictEqual(virt.running('myvm', update=True, cpu=2), ret) + + # Working update case when stopped + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.vm_state': MagicMock(return_value='stopped'), + 'virt.start': MagicMock(return_value=0), + 'virt.update': MagicMock(return_value={'definition': True}) + }): + ret.update({'changes': {'myvm': 'Domain updated and started'}, + 'result': True, + 'comment': 'Domain myvm updated and started'}) + self.assertDictEqual(virt.running('myvm', update=True, cpu=2), ret) + + # Failed live update case + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.vm_state': MagicMock(return_value='running'), + 'virt.update': MagicMock(return_value={'definition': True, 'cpu': False, 'errors': ['some error']}) + }): + ret.update({'changes': {'myvm': {'definition': True, 'cpu': False, 'errors': ['some error']}}, + 'result': True, + 'comment': 'Domain myvm updated, but some live update(s) failed'}) + self.assertDictEqual(virt.running('myvm', update=True, cpu=2), ret) + + # Failed definition update case + with patch.dict(virt.__salt__, { # pylint: disable=no-member + 'virt.vm_state': MagicMock(return_value='running'), + 'virt.update': MagicMock(side_effect=[self.mock_libvirt.libvirtError('error message')]) + }): + ret.update({'changes': {}, + 'result': False, + 'comment': 'error message'}) + self.assertDictEqual(virt.running('myvm', update=True, cpu=2), ret) + def test_stopped(self): ''' stopped state test cases. From 0871c028c6a7d16414b72f60a87425f805d6269d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Wed, 5 Sep 2018 12:21:30 +0200 Subject: [PATCH 098/340] virt: allow defining the VM type and arch when creating it MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some hypervisors can handle several CPU architectures or have different virtualization types. This is reflected in libvirt by the OS type (badly named, indeed) and the arch value. Allow users to set them when creating a VM using either virt.init or virt.running. Signed-off-by: Cédric Bosdonnat (cherry picked from commit 2463ebe5a82b1a017004e8e0e390535485dc703e) --- tests/unit/states/test_virt.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/unit/states/test_virt.py b/tests/unit/states/test_virt.py index da11ca5f2743..e579b0654d77 100644 --- a/tests/unit/states/test_virt.py +++ b/tests/unit/states/test_virt.py @@ -251,6 +251,7 @@ def test_running(self): mem=2048, image='/path/to/img.qcow2'), ret) init_mock.assert_called_with('myvm', cpu=2, mem=2048, image='/path/to/img.qcow2', + os_type=None, arch=None, disk=None, disks=None, nic=None, interfaces=None, graphics=None, hypervisor=None, seed=True, install=True, pub_key=None, priv_key=None) @@ -286,6 +287,8 @@ def test_running(self): self.assertDictEqual(virt.running('myvm', cpu=2, mem=2048, + os_type='linux', + arch='i686', vm_type='qemu', disk_profile='prod', disks=disks, @@ -302,6 +305,8 @@ def test_running(self): init_mock.assert_called_with('myvm', cpu=2, mem=2048, + os_type='linux', + arch='i686', image=None, disk='prod', disks=disks, From 30981d25b9d884a68be4435502df6b8f8c2b2fcc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Tue, 3 Jul 2018 15:15:02 +0200 Subject: [PATCH 099/340] Remove unneeded kwargs in virt states Avoid using kwargs to get the states parameters, perfer documented named parameters with default value. (cherry picked from commit c7c5d6ee88fbc74d0ee0aeab41beb421d8625f05) --- tests/unit/states/test_virt.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/unit/states/test_virt.py b/tests/unit/states/test_virt.py index e579b0654d77..90f3344f77cc 100644 --- a/tests/unit/states/test_virt.py +++ b/tests/unit/states/test_virt.py @@ -254,7 +254,8 @@ def test_running(self): os_type=None, arch=None, disk=None, disks=None, nic=None, interfaces=None, graphics=None, hypervisor=None, - seed=True, install=True, pub_key=None, priv_key=None) + seed=True, install=True, pub_key=None, priv_key=None, + connection=None, username=None, password=None) with patch.dict(virt.__salt__, { # pylint: disable=no-member 'virt.vm_state': MagicMock(side_effect=CommandExecutionError('not found')), From 25b968151e08ef6102f59069c88e74d4715ba258 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Wed, 27 Mar 2019 17:08:41 +0100 Subject: [PATCH 100/340] virt.pool_running: fix pool start Building a libvirt pool starts it. When defining a new pool, we need to let build start it or we will get libvirt errors. --- salt/states/virt.py | 7 +------ tests/unit/states/test_virt.py | 5 ++--- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/salt/states/virt.py b/salt/states/virt.py index 90693880df2d..d411f864cd68 100644 --- a/salt/states/virt.py +++ b/salt/states/virt.py @@ -780,7 +780,7 @@ def pool_running(name, source_name=(source or {}).get('name', None), source_format=(source or {}).get('format', None), transient=transient, - start=True, + start=False, connection=connection, username=username, password=password) @@ -795,11 +795,6 @@ def pool_running(name, connection=connection, username=username, password=password) - - __salt__['virt.pool_start'](name, - connection=connection, - username=username, - password=password) ret['changes'][name] = 'Pool defined and started' ret['comment'] = 'Pool {0} defined and started'.format(name) except libvirt.libvirtError as err: diff --git a/tests/unit/states/test_virt.py b/tests/unit/states/test_virt.py index 90f3344f77cc..80229899375c 100644 --- a/tests/unit/states/test_virt.py +++ b/tests/unit/states/test_virt.py @@ -31,12 +31,10 @@ class LibvirtMock(MagicMock): # pylint: disable=too-many-ancestors ''' libvirt library mockup ''' - class libvirtError(Exception): # pylint: disable=invalid-name ''' libvirt error mockup ''' - def get_error_message(self): ''' Fake function return error message @@ -655,7 +653,7 @@ def test_pool_running(self): source_name=None, source_format=None, transient=True, - start=True, + start=False, connection='myconnection', username='user', password='secret') @@ -668,6 +666,7 @@ def test_pool_running(self): connection='myconnection', username='user', password='secret') + mocks['start'].assert_not_called() with patch.dict(virt.__salt__, { # pylint: disable=no-member 'virt.pool_info': MagicMock(return_value={'state': 'running'}), From b297e7db200143bf02151e475a8aa1399540d7ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Wed, 13 Mar 2019 09:43:51 +0100 Subject: [PATCH 101/340] virt: handle whitespaces in VM names The disk creation code is now ready to handle whitespaces in virtual machine name. --- salt/modules/virt.py | 8 +++--- tests/unit/modules/test_virt.py | 46 ++++++++++++++++----------------- 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/salt/modules/virt.py b/salt/modules/virt.py index b45c5f522d86..50df89dbd244 100644 --- a/salt/modules/virt.py +++ b/salt/modules/virt.py @@ -760,14 +760,14 @@ def _qemu_image_create(disk, create_overlay=False, saltenv='base'): qcow2 = False if salt.utils.path.which('qemu-img'): - res = __salt__['cmd.run']('qemu-img info {}'.format(sfn)) + res = __salt__['cmd.run']('qemu-img info "{}"'.format(sfn)) imageinfo = salt.utils.yaml.safe_load(res) qcow2 = imageinfo['file format'] == 'qcow2' try: if create_overlay and qcow2: log.info('Cloning qcow2 image %s using copy on write', sfn) __salt__['cmd.run']( - 'qemu-img create -f qcow2 -o backing_file={0} {1}' + 'qemu-img create -f qcow2 -o backing_file="{0}" "{1}"' .format(sfn, img_dest).split()) else: log.debug('Copying %s to %s', sfn, img_dest) @@ -778,7 +778,7 @@ def _qemu_image_create(disk, create_overlay=False, saltenv='base'): if disk_size and qcow2: log.debug('Resize qcow2 image to %sM', disk_size) __salt__['cmd.run']( - 'qemu-img resize {0} {1}M' + 'qemu-img resize "{0}" {1}M' .format(img_dest, disk_size) ) @@ -800,7 +800,7 @@ def _qemu_image_create(disk, create_overlay=False, saltenv='base'): if disk_size: log.debug('Create empty image with size %sM', disk_size) __salt__['cmd.run']( - 'qemu-img create -f {0} {1} {2}M' + 'qemu-img create -f {0} "{1}" {2}M' .format(disk.get('format', 'qcow2'), img_dest, disk_size) ) else: diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py index 3a69adece123..6546a0467c8b 100644 --- a/tests/unit/modules/test_virt.py +++ b/tests/unit/modules/test_virt.py @@ -1106,7 +1106,7 @@ def test_init(self): with patch.dict(virt.__salt__, {'cmd.run': mock_run}): # pylint: disable=no-member # Ensure the init() function allows creating VM without NIC and disk - virt.init('testvm', + virt.init('test vm', 2, 1234, nic=None, @@ -1120,7 +1120,7 @@ def test_init(self): # Test case creating disks defineMock.reset_mock() mock_run.reset_mock() - virt.init('testvm', + virt.init('test vm', 2, 1234, nic=None, @@ -1134,10 +1134,10 @@ def test_init(self): definition = ET.fromstring(defineMock.call_args_list[0][0][0]) disk_sources = [disk.find('source').get('file') if disk.find('source') is not None else None for disk in definition.findall('./devices/disk')] - expected_disk_path = os.path.join(root_dir, 'testvm_system.qcow2') + expected_disk_path = os.path.join(root_dir, 'test vm_system.qcow2') self.assertEqual(disk_sources, [expected_disk_path, None]) self.assertEqual(mock_run.call_args[0][0], - 'qemu-img create -f qcow2 {0} 10240M'.format(expected_disk_path)) + 'qemu-img create -f qcow2 "{0}" 10240M'.format(expected_disk_path)) self.assertEqual(mock_chmod.call_args[0][0], expected_disk_path) def test_update(self): @@ -1147,7 +1147,7 @@ def test_update(self): root_dir = os.path.join(salt.syspaths.ROOT_DIR, 'srv', 'salt-images') xml = ''' - myvm + my vm 1048576 1048576 1 @@ -1157,7 +1157,7 @@ def test_update(self): - + @@ -1165,7 +1165,7 @@ def test_update(self): - + @@ -1198,7 +1198,7 @@ def test_update(self): '''.format(root_dir, os.sep) - domain_mock = self.set_mock_vm('myvm', xml) + domain_mock = self.set_mock_vm('my vm', xml) domain_mock.OSType = MagicMock(return_value='hvm') define_mock = MagicMock(return_value=True) self.mock_conn.defineXML = define_mock @@ -1211,7 +1211,7 @@ def test_update(self): 'cpu': True, 'disk': {'attached': [], 'detached': []}, 'interface': {'attached': [], 'detached': []} - }, virt.update('myvm', cpu=2)) + }, virt.update('my vm', cpu=2)) setxml = ET.fromstring(define_mock.call_args[0][0]) self.assertEqual(setxml.find('vcpu').text, '2') self.assertEqual(setvcpus_mock.call_args[0][0], 2) @@ -1225,7 +1225,7 @@ def test_update(self): 'mem': True, 'disk': {'attached': [], 'detached': []}, 'interface': {'attached': [], 'detached': []} - }, virt.update('myvm', mem=2048)) + }, virt.update('my vm', mem=2048)) setxml = ET.fromstring(define_mock.call_args[0][0]) self.assertEqual(setxml.find('memory').text, '2048') self.assertEqual(setxml.find('memory').get('unit'), 'MiB') @@ -1240,21 +1240,21 @@ def test_update(self): mock_run = MagicMock() with patch.dict(os.__dict__, {'chmod': mock_chmod, 'makedirs': MagicMock()}): # pylint: disable=no-member with patch.dict(virt.__salt__, {'cmd.run': mock_run}): # pylint: disable=no-member - ret = virt.update('myvm', disk_profile='default', disks=[ + ret = virt.update('my vm', disk_profile='default', disks=[ {'name': 'cddrive', 'device': 'cdrom', 'source_file': None, 'model': 'ide'}, {'name': 'added', 'size': 2048}]) added_disk_path = os.path.join( - virt.__salt__['config.get']('virt:images'), 'myvm_added.qcow2') # pylint: disable=no-member + virt.__salt__['config.get']('virt:images'), 'my vm_added.qcow2') # pylint: disable=no-member self.assertEqual(mock_run.call_args[0][0], - 'qemu-img create -f qcow2 {0} 2048M'.format(added_disk_path)) + 'qemu-img create -f qcow2 "{0}" 2048M'.format(added_disk_path)) self.assertEqual(mock_chmod.call_args[0][0], added_disk_path) self.assertListEqual( - [None, os.path.join(root_dir, 'myvm_added.qcow2')], + [None, os.path.join(root_dir, 'my vm_added.qcow2')], [ET.fromstring(disk).find('source').get('file') if str(disk).find(' -1 else None for disk in ret['disk']['attached']]) self.assertListEqual( - [os.path.join(root_dir, 'myvm_data.qcow2')], + [os.path.join(root_dir, 'my vm_data.qcow2')], [ET.fromstring(disk).find('source').get('file') for disk in ret['disk']['detached']]) self.assertEqual(devattach_mock.call_count, 2) devdetach_mock.assert_called_once() @@ -1271,7 +1271,7 @@ def test_update(self): devattach_mock.reset_mock() devdetach_mock.reset_mock() with patch.dict(salt.modules.config.__opts__, mock_config): # pylint: disable=no-member - ret = virt.update('myvm', nic_profile='myprofile', + ret = virt.update('my vm', nic_profile='myprofile', interfaces=[{'name': 'eth0', 'type': 'network', 'source': 'default', 'mac': '52:54:00:39:02:b1'}, {'name': 'eth1', 'type': 'network', 'source': 'newnet'}]) @@ -1285,7 +1285,7 @@ def test_update(self): # Remove nics case devattach_mock.reset_mock() devdetach_mock.reset_mock() - ret = virt.update('myvm', nic_profile=None, interfaces=[]) + ret = virt.update('my vm', nic_profile=None, interfaces=[]) self.assertEqual([], ret['interface']['attached']) self.assertEqual(2, len(ret['interface']['detached'])) devattach_mock.assert_not_called() @@ -1294,7 +1294,7 @@ def test_update(self): # Remove disks case (yeah, it surely is silly) devattach_mock.reset_mock() devdetach_mock.reset_mock() - ret = virt.update('myvm', disk_profile=None, disks=[]) + ret = virt.update('my vm', disk_profile=None, disks=[]) self.assertEqual([], ret['disk']['attached']) self.assertEqual(2, len(ret['disk']['detached'])) devattach_mock.assert_not_called() @@ -1305,7 +1305,7 @@ def test_update(self): 'definition': True, 'disk': {'attached': [], 'detached': []}, 'interface': {'attached': [], 'detached': []} - }, virt.update('myvm', graphics={'type': 'vnc'})) + }, virt.update('my vm', graphics={'type': 'vnc'})) setxml = ET.fromstring(define_mock.call_args[0][0]) self.assertEqual('vnc', setxml.find('devices/graphics').get('type')) @@ -1314,7 +1314,7 @@ def test_update(self): 'definition': False, 'disk': {'attached': [], 'detached': []}, 'interface': {'attached': [], 'detached': []} - }, virt.update('myvm', cpu=1, mem=1024, + }, virt.update('my vm', cpu=1, mem=1024, disk_profile='default', disks=[{'name': 'data', 'size': 2048}], nic_profile='myprofile', interfaces=[{'name': 'eth0', 'type': 'network', 'source': 'default', @@ -1328,7 +1328,7 @@ def test_update(self): self.mock_conn.defineXML.side_effect = self.mock_libvirt.libvirtError("Test error") setmem_mock.reset_mock() with self.assertRaises(self.mock_libvirt.libvirtError): - virt.update('myvm', mem=2048) + virt.update('my vm', mem=2048) # Failed single update failure case self.mock_conn.defineXML = MagicMock(return_value=True) @@ -1338,7 +1338,7 @@ def test_update(self): 'errors': ['Failed to live change memory'], 'disk': {'attached': [], 'detached': []}, 'interface': {'attached': [], 'detached': []} - }, virt.update('myvm', mem=2048)) + }, virt.update('my vm', mem=2048)) # Failed multiple updates failure case self.assertEqual({ @@ -1347,7 +1347,7 @@ def test_update(self): 'cpu': True, 'disk': {'attached': [], 'detached': []}, 'interface': {'attached': [], 'detached': []} - }, virt.update('myvm', cpu=4, mem=2048)) + }, virt.update('my vm', cpu=4, mem=2048)) def test_mixed_dict_and_list_as_profile_objects(self): ''' From 2c527125564e8dd74ea1a70163eaa7eff19f3917 Mon Sep 17 00:00:00 2001 From: Ch3LL Date: Mon, 8 Apr 2019 10:43:58 -0400 Subject: [PATCH 102/340] Fix tests for pchanges backport into 2018.3 --- tests/unit/states/test_file.py | 2 +- tests/unit/states/test_net_napalm_yang.py | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/tests/unit/states/test_file.py b/tests/unit/states/test_file.py index fdd65eaf6740..1e1f7083621c 100644 --- a/tests/unit/states/test_file.py +++ b/tests/unit/states/test_file.py @@ -287,7 +287,7 @@ def return_val(kwargs): ' should go') ret = return_val({'comment': comt, 'result': False, - 'changes': {'new': name}}) + 'changes': {}}) self.assertDictEqual(filestate.symlink (name, target, user=user, group=group, backupname='SALT'), diff --git a/tests/unit/states/test_net_napalm_yang.py b/tests/unit/states/test_net_napalm_yang.py index ccb0fa7cf289..40318977bb95 100644 --- a/tests/unit/states/test_net_napalm_yang.py +++ b/tests/unit/states/test_net_napalm_yang.py @@ -30,8 +30,7 @@ def setup_loader_modules(self): def test_managed(self): ret = {'changes': {}, 'comment': 'Loaded.', - 'name': 'test', 'result': False, - 'pchanges': {'compliance_report': {'complies': False}}} + 'name': 'test', 'result': False} parse = MagicMock(return_value='abcdef') temp_file = MagicMock(return_value='') compliance_report = MagicMock(return_value={'complies': False}) @@ -55,8 +54,7 @@ def test_managed(self): def test_configured(self): ret = {'changes': {}, 'comment': 'Loaded.', - 'name': 'test', 'result': False, - 'pchanges': {}} + 'name': 'test', 'result': False} load_config = MagicMock(return_value={'comment': 'Loaded.'}) with patch('salt.utils.files.fopen'): From b374034f00c6990d3bcc703b106776957ff1750a Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Mon, 8 Apr 2019 16:01:34 +0000 Subject: [PATCH 103/340] Re-raise queued exceptions with traceback --- salt/transport/ipc.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index 30e340c0b8b8..3feb0a90d3db 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -9,6 +9,7 @@ import socket import weakref import time +import sys # Import 3rd-party libs import msgpack @@ -83,6 +84,11 @@ def _done_callback(self, future): self.set_exception(exc) +class IPCExceptionProxy(object): + def __init__(self, orig_info): + self.orig_info = orig_info + + class IPCServer(object): ''' A Tornado IPC server very similar to Tornado's TCPServer class @@ -648,6 +654,7 @@ def _read(self, timeout, callback=None): break except Exception as exc: log.error('Exception occurred in Subscriber while handling stream: %s', exc) + exc = IPCExceptionProxy(sys.exc_info()) self._feed_subscribers([exc]) break @@ -755,13 +762,19 @@ def read_async(self, callback, timeout=None): raise tornado.gen.Return(None) if data is None: break - elif isinstance(data, Exception): - raise data + elif isinstance(data, IPCExceptionProxy): + self.reraise(data.orig_info) elif callback: self.service.io_loop.spawn_callback(callback, data) else: raise tornado.gen.Return(data) + def reraise(self, exc_info): + if six.PY2: + raise exc_info[0], exc_info[1], exc_info[2] + else: + raise exc_info[0].with_traceback(exc_info[1], exc_info[2]) + def read_sync(self, timeout=None): ''' Read a message from an IPC socket From 503cdd246e772542e1225809e54f9c6de8e9287b Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Mon, 8 Apr 2019 16:04:22 +0000 Subject: [PATCH 104/340] Remove IPCClient singleton --- salt/transport/ipc.py | 55 ++++++++++--------------------------------- 1 file changed, 13 insertions(+), 42 deletions(-) diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index 3feb0a90d3db..4c154d62e0d0 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -243,31 +243,7 @@ class IPCClient(object): case it is used as the port for a tcp localhost connection. ''' - - # Create singleton map between two sockets - instance_map = weakref.WeakKeyDictionary() - - def __new__(cls, socket_path, io_loop=None): - io_loop = io_loop or tornado.ioloop.IOLoop.current() - if io_loop not in IPCClient.instance_map: - IPCClient.instance_map[io_loop] = weakref.WeakValueDictionary() - loop_instance_map = IPCClient.instance_map[io_loop] - - # FIXME - key = six.text_type(socket_path) - - client = loop_instance_map.get(key) - if client is None: - log.debug('Initializing new IPCClient for path: %s', key) - client = object.__new__(cls) - # FIXME - client.__singleton_init__(io_loop=io_loop, socket_path=socket_path) - loop_instance_map[key] = client - else: - log.debug('Re-using IPCClient for %s', key) - return client - - def __singleton_init__(self, socket_path, io_loop=None): + def __init__(self, socket_path, io_loop=None): ''' Create a new IPC client @@ -286,10 +262,6 @@ def __singleton_init__(self, socket_path, io_loop=None): encoding = 'utf-8' self.unpacker = msgpack.Unpacker(encoding=encoding) - def __init__(self, socket_path, io_loop=None): - # Handled by singleton __new__ - pass - def connected(self): return self.stream is not None and not self.stream.closed() @@ -360,7 +332,16 @@ def _connect(self, timeout=None): yield tornado.gen.sleep(1) def __del__(self): - self.close() + try: + self.close() + except socket.error as exc: + if exc.errno != errno.EBADF: + # If its not a bad file descriptor error, raise + raise + except TypeError: + # This is raised when Python's GC has collected objects which + # would be needed when calling self.close() + pass def close(self): ''' @@ -374,16 +355,6 @@ def close(self): if self.stream is not None and not self.stream.closed(): self.stream.close() - # Remove the entry from the instance map so - # that a closed entry may not be reused. - # This forces this operation even if the reference - # count of the entry has not yet gone to zero. - if self.io_loop in IPCClient.instance_map: - loop_instance_map = IPCClient.instance_map[self.io_loop] - key = six.text_type(self.socket_path) - if key in loop_instance_map: - del loop_instance_map[key] - class IPCMessageClient(IPCClient): ''' @@ -597,8 +568,8 @@ class IPCMessageSubscriberService(IPCClient): To use this refer to IPCMessageSubscriber documentation. ''' - def __singleton_init__(self, socket_path, io_loop=None): - super(IPCMessageSubscriberService, self).__singleton_init__( + def __init__(self, socket_path, io_loop=None): + super(IPCMessageSubscriberService, self).__init__( socket_path, io_loop=io_loop) self.saved_data = [] self._read_in_progress = Lock() From 2b35437838acf5584b9da9e192fd1c46452cccf4 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Mon, 8 Apr 2019 16:55:44 +0000 Subject: [PATCH 105/340] Fix linter issues --- salt/transport/ipc.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index 4c154d62e0d0..042850043daa 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -5,6 +5,7 @@ # Import Python libs from __future__ import absolute_import, print_function, unicode_literals +import errno import logging import socket import weakref @@ -742,7 +743,7 @@ def read_async(self, callback, timeout=None): def reraise(self, exc_info): if six.PY2: - raise exc_info[0], exc_info[1], exc_info[2] + raise exc_info[0], exc_info[1], exc_info[2] # pylint: disable=W1699 else: raise exc_info[0].with_traceback(exc_info[1], exc_info[2]) From 47203f7f79e352a0e43040238a52c77549edc3bc Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Mon, 8 Apr 2019 17:17:20 +0000 Subject: [PATCH 106/340] Fix ipc unit tests --- tests/unit/transport/test_ipc.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/tests/unit/transport/test_ipc.py b/tests/unit/transport/test_ipc.py index 0ca8ebff8640..939c4958318d 100644 --- a/tests/unit/transport/test_ipc.py +++ b/tests/unit/transport/test_ipc.py @@ -86,13 +86,14 @@ class IPCMessageClient(BaseIPCReqCase): ''' def _get_channel(self): - channel = salt.transport.ipc.IPCMessageClient( - socket_path=self.socket_path, - io_loop=self.io_loop, - ) - channel.connect(callback=self.stop) - self.wait() - return channel + if not hasattr(self, 'channel') or self.channel is None: + self.channel = salt.transport.ipc.IPCMessageClient( + socket_path=self.socket_path, + io_loop=self.io_loop, + ) + self.channel.connect(callback=self.stop) + self.wait() + return self.channel def setUp(self): super(IPCMessageClient, self).setUp() @@ -106,6 +107,8 @@ def tearDown(self): if exc.errno != errno.EBADF: # If its not a bad file descriptor error, raise raise + finally: + self.channel = None def test_basic_send(self): msg = {'foo': 'bar', 'stop': True} From dd2306921f1b7ead0af1a40f3024b6d997f5d761 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Mon, 8 Apr 2019 14:40:06 -0700 Subject: [PATCH 107/340] Adding tests for NACL changes. --- salt/utils/nacl.py | 2 ++ tests/integration/runners/test_nacl.py | 38 ++++++++++++++++++++++++++ tests/unit/modules/test_nacl.py | 1 + 3 files changed, 41 insertions(+) diff --git a/salt/utils/nacl.py b/salt/utils/nacl.py index 6f2ee668a7b3..5ce9621785f1 100644 --- a/salt/utils/nacl.py +++ b/salt/utils/nacl.py @@ -6,6 +6,7 @@ # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import base64 +import logging import os # Import Salt libs @@ -18,6 +19,7 @@ import salt.utils.win_functions import salt.utils.win_dacl +log = logging.getLogger(__name__) REQ_ERROR = None try: diff --git a/tests/integration/runners/test_nacl.py b/tests/integration/runners/test_nacl.py index 9f6a802e51ef..f4d7ab5f0c44 100644 --- a/tests/integration/runners/test_nacl.py +++ b/tests/integration/runners/test_nacl.py @@ -5,6 +5,8 @@ # Import Python libs from __future__ import absolute_import, print_function, unicode_literals +import logging + # Import Salt Testing libs from tests.support.case import ShellCase from tests.support.unit import skipIf @@ -16,6 +18,8 @@ except ImportError: HAS_LIBNACL = False +log = logging.getLogger(__name__) + @skipIf(not HAS_LIBNACL, 'skipping test_nacl, libnacl is unavailable') class NaclTest(ShellCase): @@ -150,3 +154,37 @@ def test_secretbox_enc_dec(self): sk=sk, ) self.assertEqual(unencrypted_data, ret['return']) + + def test_enc_dec_no_pk_no_sk(self): + ''' + Store, list, fetch, then flush data + ''' + # Store the data + ret = self.run_run_plus( + 'nacl.keygen', + ) + self.assertIn('pk', ret['return']) + self.assertIn('sk', ret['return']) + pk = ret['return']['pk'] + sk = ret['return']['sk'] + + unencrypted_data = b'hello' + + # Encrypt with pk + ret = self.run_run_plus( + 'nacl.enc', + data=unencrypted_data, + pk=None, + ) + self.assertIn('Exception: no pubkey or pk_file found', ret['return']) + + self.assertIn('return', ret) + encrypted_data = ret['return'] + + # Decrypt with sk + ret = self.run_run_plus( + 'nacl.dec', + data=encrypted_data, + sk=None, + ) + self.assertIn('Exception: no key or sk_file found', ret['return']) diff --git a/tests/unit/modules/test_nacl.py b/tests/unit/modules/test_nacl.py index 6d7505d3c263..20a9b5fc8534 100644 --- a/tests/unit/modules/test_nacl.py +++ b/tests/unit/modules/test_nacl.py @@ -14,6 +14,7 @@ from tests.support.unit import TestCase from tests.support.unit import skipIf + try: import libnacl.secret # pylint: disable=unused-import import libnacl.sealed # pylint: disable=unused-import From 29999b04939b7ab42a7d5ea811e163ceb6aedcf2 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Tue, 9 Apr 2019 17:49:10 +0000 Subject: [PATCH 108/340] Close message service on subscriber close --- salt/transport/ipc.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index 042850043daa..f1b76772e072 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -311,9 +311,8 @@ def _connect(self, timeout=None): if self.stream is None: with salt.utils.asynchronous.current_ioloop(self.io_loop): self.stream = IOStream( - socket.socket(sock_type, socket.SOCK_STREAM), + socket.socket(sock_type, socket.SOCK_STREAM) ) - try: log.trace('IPCClient: Connecting to socket: %s', self.socket_path) yield self.stream.connect(sock_addr) @@ -659,8 +658,7 @@ def close(self): Sockets and filehandles should be closed explicitly, to prevent leaks. ''' - if not self._closing: - super(IPCMessageSubscriberService, self).close() + super(IPCMessageSubscriberService, self).close() def __del__(self): if IPCMessageSubscriberService in globals(): @@ -760,6 +758,7 @@ def read_sync(self, timeout=None): def close(self): self.service.unsubscribe(self) + self.service.close() def __del__(self): self.close() From c7ad732854aad9d6b8f43219b496eeed02f01092 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Tue, 9 Apr 2019 17:58:13 +0000 Subject: [PATCH 109/340] Use six.reraise for py3 compatability --- salt/transport/ipc.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index f1b76772e072..bec070c64a94 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -733,18 +733,12 @@ def read_async(self, callback, timeout=None): if data is None: break elif isinstance(data, IPCExceptionProxy): - self.reraise(data.orig_info) + six.reraise(*data.orig_info) elif callback: self.service.io_loop.spawn_callback(callback, data) else: raise tornado.gen.Return(data) - def reraise(self, exc_info): - if six.PY2: - raise exc_info[0], exc_info[1], exc_info[2] # pylint: disable=W1699 - else: - raise exc_info[0].with_traceback(exc_info[1], exc_info[2]) - def read_sync(self, timeout=None): ''' Read a message from an IPC socket From 7c4abd6d6ed4c8641d97cc59107b8e967d01f36d Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Tue, 9 Apr 2019 11:05:34 -0700 Subject: [PATCH 110/340] changes needs to be a dictionary. --- salt/states/file.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/states/file.py b/salt/states/file.py index d41fb4f9ee41..8f5463703928 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -2767,7 +2767,7 @@ def managed(name, reset=win_perms_reset) except CommandExecutionError as exc: if exc.strerror.startswith('Path not found'): - ret['changes'] = '{0} will be created'.format(name) + ret['changes'] = {name: 'will be created'} if isinstance(ret['changes'], tuple): ret['result'], ret['comment'] = ret['changes'] From eb517e1a39e61306d9b26cd27595cc23b90c93ba Mon Sep 17 00:00:00 2001 From: Wayne Werner Date: Wed, 6 Mar 2019 14:00:03 -0600 Subject: [PATCH 111/340] Swap '/' for '.' when matching --- salt/pillar/__init__.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/salt/pillar/__init__.py b/salt/pillar/__init__.py index 94dd9695ca1d..6fe2b5f49e21 100644 --- a/salt/pillar/__init__.py +++ b/salt/pillar/__init__.py @@ -774,7 +774,10 @@ def render_pstate(self, sls, saltenv, mods, defaults=None): key = None try: - matched_pstates += fnmatch.filter(self.avail[saltenv], sub_sls) + matched_pstates.extend(fnmatch.filter( + self.avail[saltenv], + sub_sls.replace('/', '.'), + )) except KeyError: errors.extend( ['No matching pillar environment for environment ' From 29c676d24b3ab5c6e600bb08258b10a37b64925a Mon Sep 17 00:00:00 2001 From: Wayne Werner Date: Fri, 8 Mar 2019 12:56:25 -0600 Subject: [PATCH 112/340] Ensure _closing exists This was raising an ignored exception because _closing was set later in __init__. Not sure if it must be the last line in __init__. --- salt/pillar/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/pillar/__init__.py b/salt/pillar/__init__.py index 6fe2b5f49e21..4899ac31fb7a 100644 --- a/salt/pillar/__init__.py +++ b/salt/pillar/__init__.py @@ -257,7 +257,7 @@ def compile_pillar(self): return ret_pillar def destroy(self): - if self._closing: + if hasattr(self, '_closing') and self._closing: return self._closing = True From ee3115fa9434c99d0ad6ff4d545b1ad61d3eee29 Mon Sep 17 00:00:00 2001 From: Wayne Werner Date: Mon, 11 Mar 2019 15:11:55 -0500 Subject: [PATCH 113/340] Allow leading dots and / as pillar separators --- salt/pillar/__init__.py | 2 +- tests/unit/test_pillar.py | 33 ++++++++++++++++++++++++++++++--- 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/salt/pillar/__init__.py b/salt/pillar/__init__.py index 4899ac31fb7a..38225cac4e62 100644 --- a/salt/pillar/__init__.py +++ b/salt/pillar/__init__.py @@ -776,7 +776,7 @@ def render_pstate(self, sls, saltenv, mods, defaults=None): try: matched_pstates.extend(fnmatch.filter( self.avail[saltenv], - sub_sls.replace('/', '.'), + sub_sls.lstrip('.').replace('/', '.'), )) except KeyError: errors.extend( diff --git a/tests/unit/test_pillar.py b/tests/unit/test_pillar.py index 41dd00e89a9b..64c77b186ad7 100644 --- a/tests/unit/test_pillar.py +++ b/tests/unit/test_pillar.py @@ -659,14 +659,21 @@ def test_include(self, tempdir): 'oscodename': 'raring', 'osfullname': 'Ubuntu', 'osrelease': '13.04', - 'kernel': 'Linux' + 'kernel': 'Linux', } sls_files = self._setup_test_include_sls(tempdir) fc_mock = MockFileclient( cache_file=sls_files['top']['dest'], get_state=sls_files, - list_states=['top', 'test.init', 'test.sub1', - 'test.sub2', 'test.sub_wildcard_1'], + list_states=[ + 'top', + 'test.init', + 'test.sub1', + 'test.sub2', + 'test.sub_wildcard_1', + 'test.sub_with_init_dot', + 'test.sub.with.slashes', + ], ) with patch.object(salt.fileclient, 'get_file_client', MagicMock(return_value=fc_mock)): @@ -677,6 +684,8 @@ def test_include(self, tempdir): self.assertEqual(compiled_pillar['foo_wildcard'], 'bar_wildcard') self.assertEqual(compiled_pillar['foo1'], 'bar1') self.assertEqual(compiled_pillar['foo2'], 'bar2') + self.assertEqual(compiled_pillar['sub_with_slashes'], 'sub_slashes_worked') + self.assertEqual(compiled_pillar['sub_init_dot'], 'sub_with_init_dot_worked') def _setup_test_include_sls(self, tempdir): top_file = tempfile.NamedTemporaryFile(dir=tempdir, delete=False) @@ -695,6 +704,8 @@ def _setup_test_include_sls(self, tempdir): include: - test.sub1 - test.sub_wildcard* + - .test.sub_with_init_dot + - test/sub/with/slashes ''') init_sls.flush() sub1_sls = tempfile.NamedTemporaryFile(dir=tempdir, delete=False) @@ -717,12 +728,28 @@ def _setup_test_include_sls(self, tempdir): ''') sub_wildcard_1_sls.flush() + sub_with_init_dot_sls = tempfile.NamedTemporaryFile(dir=tempdir, delete=False) + sub_with_init_dot_sls.write(b''' +sub_init_dot: + sub_with_init_dot_worked +''') + sub_with_init_dot_sls.flush() + + sub_with_slashes_sls = tempfile.NamedTemporaryFile(dir=tempdir, delete=False) + sub_with_slashes_sls.write(b''' +sub_with_slashes: + sub_slashes_worked +''') + sub_with_slashes_sls.flush() + return { 'top': {'path': '', 'dest': top_file.name}, 'test': {'path': '', 'dest': init_sls.name}, 'test.sub1': {'path': '', 'dest': sub1_sls.name}, 'test.sub2': {'path': '', 'dest': sub2_sls.name}, 'test.sub_wildcard_1': {'path': '', 'dest': sub_wildcard_1_sls.name}, + 'test.sub_with_init_dot': {'path': '', 'dest': sub_with_init_dot_sls.name}, + 'test.sub.with.slashes': {'path': '', 'dest': sub_with_slashes_sls.name}, } From 1bdaf2931b9081a209ba96c300317e6aee46d8b9 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Wed, 10 Apr 2019 03:32:20 +0000 Subject: [PATCH 114/340] Ensure exceptions in service future are handled --- salt/transport/ipc.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index bec070c64a94..5899e08650a9 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -574,6 +574,7 @@ def __init__(self, socket_path, io_loop=None): self.saved_data = [] self._read_in_progress = Lock() self.handlers = weakref.WeakSet() + self.read_stream_future = None def _subscribe(self, handler): self.handlers.add(handler) @@ -601,16 +602,16 @@ def _read(self, timeout, callback=None): if timeout is None: timeout = 5 - read_stream_future = None + self.read_stream_future = None while self._has_subscribers(): - if read_stream_future is None: - read_stream_future = self.stream.read_bytes(4096, partial=True) + if self.read_stream_future is None: + self.read_stream_future = self.stream.read_bytes(4096, partial=True) try: wire_bytes = yield FutureWithTimeout(self.io_loop, - read_stream_future, + self.read_stream_future, timeout) - read_stream_future = None + self.read_stream_future = None self.unpacker.feed(wire_bytes) msgs = [msg['body'] for msg in self.unpacker] @@ -650,7 +651,7 @@ def read(self, handler, timeout=None): except Exception as exc: log.error('Exception occurred while Subscriber connecting: %s', exc) yield tornado.gen.sleep(1) - self._read(timeout) + yield self._read(timeout) def close(self): ''' @@ -659,6 +660,10 @@ def close(self): leaks. ''' super(IPCMessageSubscriberService, self).close() + if self.read_stream_future is not None and self.read_stream_future.done(): + exc = self.read_stream_future.exception() + if exc and not isinstance(exc, tornado.iostream.StreamClosedError): + log.error("Read future returned exception %r", exc) def __del__(self): if IPCMessageSubscriberService in globals(): From f66d47451b7423068b255d9f20b8fab585fd0315 Mon Sep 17 00:00:00 2001 From: Cristian Hotea Date: Thu, 30 Aug 2018 12:58:41 +0300 Subject: [PATCH 115/340] Add support to avoid calling refresh_db in opkg.del_repo Since refresh_db is a costly operation we should have the option of not calling that Signed-off-by: Cristian Hotea --- salt/modules/opkg.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/modules/opkg.py b/salt/modules/opkg.py index 334cc6729bbc..8ba83c23f57e 100644 --- a/salt/modules/opkg.py +++ b/salt/modules/opkg.py @@ -1154,6 +1154,7 @@ def del_repo(alias, **kwargs): # pylint: disable=unused-argument salt '*' pkg.del_repo alias ''' + refresh = salt.utils.data.is_true(kwargs.get('refresh', True)) repos = list_repos() if repos: deleted_from = dict() @@ -1179,8 +1180,8 @@ def del_repo(alias, **kwargs): # pylint: disable=unused-argument except OSError: pass ret += msg.format(alias, repo_file) - # explicit refresh after a repo is deleted - refresh_db() + if refresh: + refresh_db() return ret return "Repo {0} doesn't exist in the opkg repo lists".format(alias) From c8d547ed6a46c9e9b330099368ae7cdddcfadd1a Mon Sep 17 00:00:00 2001 From: Cristian Hotea Date: Wed, 10 Oct 2018 16:27:56 +0300 Subject: [PATCH 116/340] Use prune option in Pygit2 provider when fetching Pygit2 version 0.26.2 added support for pruning when fetching. In this way Pygit2 provider will no longer need to leverage git commanand line utility for pruning the remote. Signed-off-by: Cristian Hotea --- salt/utils/gitfs.py | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/salt/utils/gitfs.py b/salt/utils/gitfs.py index 2138e76d31c1..d52d1218c6ef 100644 --- a/salt/utils/gitfs.py +++ b/salt/utils/gitfs.py @@ -1611,11 +1611,19 @@ def clean_stale_refs(self, local_refs=None): # pylint: disable=arguments-differ ''' Clean stale local refs so they don't appear as fileserver environments ''' + try: + if pygit2.GIT_FETCH_PRUNE: + # Don't need to clean anything, pygit2 can do it by itself + return [] + except AttributeError: + # However, only in 0.26.2 and newer + pass if self.credentials is not None: log.debug( - 'pygit2 does not support detecting stale refs for ' - 'authenticated remotes, saltenvs will not reflect ' - 'branches/tags removed from remote \'%s\'', self.id + 'The installed version of pygit2 (%s) does not support ' + 'detecting stale refs for authenticated remotes, saltenvs ' + 'will not reflect branches/tags removed from remote \'%s\'', + PYGIT2_VERSION, self.id ) return [] return super(Pygit2, self).clean_stale_refs() @@ -1721,6 +1729,11 @@ def _fetch(self): else: if self.credentials is not None: origin.credentials = self.credentials + try: + fetch_kwargs['prune'] = pygit2.GIT_FETCH_PRUNE + except AttributeError: + # pruning only available in pygit2 >= 0.26.2 + pass try: fetch_results = origin.fetch(**fetch_kwargs) except GitError as exc: @@ -2573,7 +2586,8 @@ def _recommend(): LIBGIT2_VERSION ) ) - if not salt.utils.path.which('git'): + if not getattr(pygit2, 'GIT_FETCH_PRUNE', False) \ + and not salt.utils.path.which('git'): errors.append( 'The git command line utility is required when using the ' '\'pygit2\' {0}_provider.'.format(self.role) From a891fd3a60e5322514b49ecb40badde90d158b08 Mon Sep 17 00:00:00 2001 From: Cristian Hotea Date: Thu, 30 Aug 2018 14:02:08 +0300 Subject: [PATCH 117/340] Make opkg.del_repo be compatible with pkrepo state module Pkgrepo state module is using a named argument when calling pkg.del_repo. Change the name of the argument from 'alias' to 'repo' and update all other functions to be consistent on naming. Signed-off-by: Cristian Hotea --- salt/modules/opkg.py | 66 ++++++++++++++++++++++---------------------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/salt/modules/opkg.py b/salt/modules/opkg.py index 334cc6729bbc..4c6eed6595e6 100644 --- a/salt/modules/opkg.py +++ b/salt/modules/opkg.py @@ -1067,7 +1067,7 @@ def list_repos(**kwargs): # pylint: disable=unused-argument return repos -def get_repo(alias, **kwargs): # pylint: disable=unused-argument +def get_repo(repo, **kwargs): # pylint: disable=unused-argument ''' Display a repo from the ``/etc/opkg/*.conf`` @@ -1075,19 +1075,19 @@ def get_repo(alias, **kwargs): # pylint: disable=unused-argument .. code-block:: bash - salt '*' pkg.get_repo alias + salt '*' pkg.get_repo repo ''' repos = list_repos() if repos: for source in six.itervalues(repos): for sub in source: - if sub['name'] == alias: + if sub['name'] == repo: return sub return {} -def _del_repo_from_file(alias, filepath): +def _del_repo_from_file(repo, filepath): ''' Remove a repo from filepath ''' @@ -1100,30 +1100,30 @@ def _del_repo_from_file(alias, filepath): if line.startswith('#'): line = line[1:] cols = salt.utils.args.shlex_split(line.strip()) - if alias != cols[1]: + if repo != cols[1]: output.append(salt.utils.stringutils.to_str(line)) with salt.utils.files.fopen(filepath, 'w') as fhandle: fhandle.writelines(output) -def _add_new_repo(alias, uri, compressed, enabled=True): +def _add_new_repo(repo, uri, compressed, enabled=True): ''' Add a new repo entry ''' repostr = '# ' if not enabled else '' repostr += 'src/gz ' if compressed else 'src ' - if ' ' in alias: - repostr += '"' + alias + '" ' + if ' ' in repo: + repostr += '"' + repo + '" ' else: - repostr += alias + ' ' + repostr += repo + ' ' repostr += uri + '\n' - conffile = os.path.join(OPKG_CONFDIR, alias + '.conf') + conffile = os.path.join(OPKG_CONFDIR, repo + '.conf') with salt.utils.files.fopen(conffile, 'a') as fhandle: fhandle.write(salt.utils.stringutils.to_str(repostr)) -def _mod_repo_in_file(alias, repostr, filepath): +def _mod_repo_in_file(repo, repostr, filepath): ''' Replace a repo entry in filepath with repostr ''' @@ -1133,7 +1133,7 @@ def _mod_repo_in_file(alias, repostr, filepath): cols = salt.utils.args.shlex_split( salt.utils.stringutils.to_unicode(line).strip() ) - if alias not in cols: + if repo not in cols: output.append(line) else: output.append(salt.utils.stringutils.to_str(repostr + '\n')) @@ -1141,7 +1141,7 @@ def _mod_repo_in_file(alias, repostr, filepath): fhandle.writelines(output) -def del_repo(alias, **kwargs): # pylint: disable=unused-argument +def del_repo(repo, **kwargs): # pylint: disable=unused-argument ''' Delete a repo from ``/etc/opkg/*.conf`` @@ -1152,21 +1152,21 @@ def del_repo(alias, **kwargs): # pylint: disable=unused-argument .. code-block:: bash - salt '*' pkg.del_repo alias + salt '*' pkg.del_repo repo ''' repos = list_repos() if repos: deleted_from = dict() - for repo in repos: - source = repos[repo][0] - if source['name'] == alias: + for repository in repos: + source = repos[repository][0] + if source['name'] == repo: deleted_from[source['file']] = 0 - _del_repo_from_file(alias, source['file']) + _del_repo_from_file(repo, source['file']) if deleted_from: ret = '' - for repo in repos: - source = repos[repo][0] + for repository in repos: + source = repos[repository][0] if source['file'] in deleted_from: deleted_from[source['file']] += 1 for repo_file, count in six.iteritems(deleted_from): @@ -1178,22 +1178,22 @@ def del_repo(alias, **kwargs): # pylint: disable=unused-argument os.remove(repo_file) except OSError: pass - ret += msg.format(alias, repo_file) + ret += msg.format(repo, repo_file) # explicit refresh after a repo is deleted refresh_db() return ret - return "Repo {0} doesn't exist in the opkg repo lists".format(alias) + return "Repo {0} doesn't exist in the opkg repo lists".format(repo) -def mod_repo(alias, **kwargs): +def mod_repo(repo, **kwargs): ''' Modify one or more values for a repo. If the repo does not exist, it will be created, so long as uri is defined. The following options are available to modify a repo definition: - alias + repo alias by which opkg refers to the repo. uri the URI to the repo. @@ -1209,8 +1209,8 @@ def mod_repo(alias, **kwargs): .. code-block:: bash - salt '*' pkg.mod_repo alias uri=http://new/uri - salt '*' pkg.mod_repo alias enabled=False + salt '*' pkg.mod_repo repo uri=http://new/uri + salt '*' pkg.mod_repo repo enabled=False ''' repos = list_repos() found = False @@ -1218,9 +1218,9 @@ def mod_repo(alias, **kwargs): if 'uri' in kwargs: uri = kwargs['uri'] - for repo in repos: - source = repos[repo][0] - if source['name'] == alias: + for repository in repos: + source = repos[repository][0] + if source['name'] == repo: found = True repostr = '' if 'enabled' in kwargs and not kwargs['enabled']: @@ -1229,13 +1229,13 @@ def mod_repo(alias, **kwargs): repostr += 'src/gz ' if kwargs['compressed'] else 'src' else: repostr += 'src/gz' if source['compressed'] else 'src' - repo_alias = kwargs['alias'] if 'alias' in kwargs else alias + repo_alias = kwargs['alias'] if 'alias' in kwargs else repo if ' ' in repo_alias: repostr += ' "{0}"'.format(repo_alias) else: repostr += ' {0}'.format(repo_alias) repostr += ' {0}'.format(kwargs['uri'] if 'uri' in kwargs else source['uri']) - _mod_repo_in_file(alias, repostr, source['file']) + _mod_repo_in_file(repo, repostr, source['file']) elif uri and source['uri'] == uri: raise CommandExecutionError( 'Repository \'{0}\' already exists as \'{1}\'.'.format(uri, source['name'])) @@ -1244,12 +1244,12 @@ def mod_repo(alias, **kwargs): # Need to add a new repo if 'uri' not in kwargs: raise CommandExecutionError( - 'Repository \'{0}\' not found and no URI passed to create one.'.format(alias)) + 'Repository \'{0}\' not found and no URI passed to create one.'.format(repo)) # If compressed is not defined, assume True compressed = kwargs['compressed'] if 'compressed' in kwargs else True # If enabled is not defined, assume True enabled = kwargs['enabled'] if 'enabled' in kwargs else True - _add_new_repo(alias, kwargs['uri'], compressed, enabled) + _add_new_repo(repo, kwargs['uri'], compressed, enabled) if 'refresh' in kwargs: refresh_db() From 0e0c42e204dce003f7da1d4bda14a8331eecf231 Mon Sep 17 00:00:00 2001 From: Matt Phillips Date: Wed, 10 Apr 2019 09:41:25 -0400 Subject: [PATCH 118/340] 2018 backport: modules.cmdmod: handle windows environ better python exposes an nt.environ for case insensitive environment behavior that is native to windows; so it makes sense to use this instead of os.environ to avoid enexpected behavior and failure. further detail: https://bugs.python.org/issue28824 --- salt/modules/cmdmod.py | 13 +++++++++++-- tests/integration/modules/test_cmdmod.py | 9 +++++++++ 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py index a3b2e406e43d..b48f558ddfad 100644 --- a/salt/modules/cmdmod.py +++ b/salt/modules/cmdmod.py @@ -539,7 +539,11 @@ def _get_stripped(cmd): run_env = env else: - run_env = os.environ.copy() + if salt.utils.platform.is_windows(): + import nt + run_env = nt.environ.copy() + else: + run_env = os.environ.copy() run_env.update(env) if prepend_path: @@ -3033,7 +3037,12 @@ def shell_info(shell, list_modules=False): # salt-call will general have home set, the salt-minion service may not # We need to assume ports of unix shells to windows will look after # themselves in setting HOME as they do it in many different ways - newenv = os.environ + if salt.utils.platform.is_windows(): + import nt + newenv = nt.environ + else: + newenv = os.environ + if ('HOME' not in newenv) and (not salt.utils.platform.is_windows()): newenv['HOME'] = os.path.expanduser('~') log.debug('HOME environment set to %s', newenv['HOME']) diff --git a/tests/integration/modules/test_cmdmod.py b/tests/integration/modules/test_cmdmod.py index 84908034195a..bb8c82b20472 100644 --- a/tests/integration/modules/test_cmdmod.py +++ b/tests/integration/modules/test_cmdmod.py @@ -380,3 +380,12 @@ def test_cmd_run_whoami(self): self.assertIn('administrator', cmd) else: self.assertEqual('root', cmd) + + @skipIf(not salt.utils.platform.is_windows(), 'minion is not windows') + def test_windows_env_handling(self): + ''' + Ensure that nt.environ is used properly with cmd.run* + ''' + out = self.run_function('cmd.run', ['set'], env={"abc": "123", "ABC": "456"}).splitlines() + self.assertIn('abc=123', out) + self.assertIn('ABC=456', out) From 736a43752094213afefdd00f5ef1ccc5b8e67d62 Mon Sep 17 00:00:00 2001 From: Matt Phillips Date: Wed, 10 Apr 2019 09:39:32 -0400 Subject: [PATCH 119/340] 2019.2 bacpkport: modules.cmdmod: handle windows environ better python exposes an nt.environ for case insensitive environment behavior that is native to windows; so it makes sense to use this instead of os.environ to avoid enexpected behavior and failure. further detail: https://bugs.python.org/issue28824 --- salt/modules/cmdmod.py | 13 +++++++++++-- tests/integration/modules/test_cmdmod.py | 9 +++++++++ 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/salt/modules/cmdmod.py b/salt/modules/cmdmod.py index 81c4d3f81145..a733e396ce57 100644 --- a/salt/modules/cmdmod.py +++ b/salt/modules/cmdmod.py @@ -570,7 +570,11 @@ def _get_stripped(cmd): run_env = env else: - run_env = os.environ.copy() + if salt.utils.platform.is_windows(): + import nt + run_env = nt.environ.copy() + else: + run_env = os.environ.copy() run_env.update(env) if prepend_path: @@ -3284,7 +3288,12 @@ def shell_info(shell, list_modules=False): # salt-call will general have home set, the salt-minion service may not # We need to assume ports of unix shells to windows will look after # themselves in setting HOME as they do it in many different ways - newenv = os.environ + if salt.utils.platform.is_windows(): + import nt + newenv = nt.environ + else: + newenv = os.environ + if ('HOME' not in newenv) and (not salt.utils.platform.is_windows()): newenv['HOME'] = os.path.expanduser('~') log.debug('HOME environment set to %s', newenv['HOME']) diff --git a/tests/integration/modules/test_cmdmod.py b/tests/integration/modules/test_cmdmod.py index abbb73eb2aa1..ef96f5843064 100644 --- a/tests/integration/modules/test_cmdmod.py +++ b/tests/integration/modules/test_cmdmod.py @@ -403,3 +403,12 @@ def test_cmd_run_whoami(self): self.assertIn('administrator', cmd) else: self.assertEqual('root', cmd) + + @skipIf(not salt.utils.platform.is_windows(), 'minion is not windows') + def test_windows_env_handling(self): + ''' + Ensure that nt.environ is used properly with cmd.run* + ''' + out = self.run_function('cmd.run', ['set'], env={"abc": "123", "ABC": "456"}).splitlines() + self.assertIn('abc=123', out) + self.assertIn('ABC=456', out) From f1d0e025ec444a8311c6788dff90cf8f087e04a7 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Mon, 8 Apr 2019 16:01:34 +0000 Subject: [PATCH 120/340] Re-raise queued exceptions with traceback --- salt/transport/ipc.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index 69032ff352c6..5013feb75c3a 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -11,6 +11,7 @@ import weakref import time import threading +import sys # Import 3rd-party libs import msgpack @@ -85,6 +86,11 @@ def _done_callback(self, future): self.set_exception(exc) +class IPCExceptionProxy(object): + def __init__(self, orig_info): + self.orig_info = orig_info + + class IPCServer(object): ''' A Tornado IPC server very similar to Tornado's TCPServer class @@ -694,6 +700,7 @@ def _read(self, timeout, callback=None): break except Exception as exc: log.error('Exception occurred in Subscriber while handling stream: %s', exc) + exc = IPCExceptionProxy(sys.exc_info()) self._feed_subscribers([exc]) break @@ -801,13 +808,19 @@ def read_async(self, callback, timeout=None): raise tornado.gen.Return(None) if data is None: break - elif isinstance(data, Exception): - raise data + elif isinstance(data, IPCExceptionProxy): + self.reraise(data.orig_info) elif callback: self.service.io_loop.spawn_callback(callback, data) else: raise tornado.gen.Return(data) + def reraise(self, exc_info): + if six.PY2: + raise exc_info[0], exc_info[1], exc_info[2] + else: + raise exc_info[0].with_traceback(exc_info[1], exc_info[2]) + def read_sync(self, timeout=None): ''' Read a message from an IPC socket From 024b4b61ebcf17be0cab112c2c8a675a17260794 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Mon, 8 Apr 2019 16:04:22 +0000 Subject: [PATCH 121/340] Remove IPCClient singleton --- salt/transport/ipc.py | 65 ++++++------------------------------------- 1 file changed, 8 insertions(+), 57 deletions(-) diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index 5013feb75c3a..84f8959e7825 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -250,36 +250,7 @@ class IPCClient(object): case it is used as the port for a tcp localhost connection. ''' - - # Create singleton map between two sockets - instance_map = weakref.WeakKeyDictionary() - - def __new__(cls, socket_path, io_loop=None): - io_loop = io_loop or tornado.ioloop.IOLoop.current() - if io_loop not in IPCClient.instance_map: - IPCClient.instance_map[io_loop] = weakref.WeakValueDictionary() - loop_instance_map = IPCClient.instance_map[io_loop] - - # FIXME - key = six.text_type(socket_path) - - client = loop_instance_map.get(key) - if client is None: - log.debug('Initializing new IPCClient for path: %s', key) - client = object.__new__(cls) - # FIXME - client.__singleton_init__(io_loop=io_loop, socket_path=socket_path) - client._instance_key = key - loop_instance_map[key] = client - client._refcount = 1 - client._refcount_lock = threading.RLock() - else: - log.debug('Re-using IPCClient for %s', key) - with client._refcount_lock: - client._refcount += 1 - return client - - def __singleton_init__(self, socket_path, io_loop=None): + def __init__(self, socket_path, io_loop=None): ''' Create a new IPC client @@ -298,10 +269,6 @@ def __singleton_init__(self, socket_path, io_loop=None): encoding = 'utf-8' self.unpacker = msgpack.Unpacker(encoding=encoding) - def __init__(self, socket_path, io_loop=None): - # Handled by singleton __new__ - pass - def connected(self): return self.stream is not None and not self.stream.closed() @@ -373,16 +340,11 @@ def _connect(self, timeout=None): def __del__(self): try: - with self._refcount_lock: - # Make sure we actually close no matter if something - # went wrong with our ref counting - self._refcount = 1 - try: - self.close() - except socket.error as exc: - if exc.errno != errno.EBADF: - # If its not a bad file descriptor error, raise - raise + self.close() + except socket.error as exc: + if exc.errno != errno.EBADF: + # If its not a bad file descriptor error, raise + raise except TypeError: # This is raised when Python's GC has collected objects which # would be needed when calling self.close() @@ -414,17 +376,6 @@ def close(self): if self.stream is not None and not self.stream.closed(): self.stream.close() - # Remove the entry from the instance map so - # that a closed entry may not be reused. - # This forces this operation even if the reference - # count of the entry has not yet gone to zero. - if self.io_loop in self.__class__.instance_map: - loop_instance_map = self.__class__.instance_map[self.io_loop] - if self._instance_key in loop_instance_map: - del loop_instance_map[self._instance_key] - if not loop_instance_map: - del self.__class__.instance_map[self.io_loop] - class IPCMessageClient(IPCClient): ''' @@ -643,8 +594,8 @@ class IPCMessageSubscriberService(IPCClient): To use this refer to IPCMessageSubscriber documentation. ''' - def __singleton_init__(self, socket_path, io_loop=None): - super(IPCMessageSubscriberService, self).__singleton_init__( + def __init__(self, socket_path, io_loop=None): + super(IPCMessageSubscriberService, self).__init__( socket_path, io_loop=io_loop) self.saved_data = [] self._read_in_progress = Lock() From c80da32234a23f45f7fd4f43afe1bc19db9ca39d Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Mon, 8 Apr 2019 16:55:44 +0000 Subject: [PATCH 122/340] Fix linter issues --- salt/transport/ipc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index 84f8959e7825..7f94f45f157f 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -768,7 +768,7 @@ def read_async(self, callback, timeout=None): def reraise(self, exc_info): if six.PY2: - raise exc_info[0], exc_info[1], exc_info[2] + raise exc_info[0], exc_info[1], exc_info[2] # pylint: disable=W1699 else: raise exc_info[0].with_traceback(exc_info[1], exc_info[2]) From 6d80789cf1736f00fb9618fa0e639a4b7683e1eb Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Mon, 8 Apr 2019 17:17:20 +0000 Subject: [PATCH 123/340] Fix ipc unit tests --- tests/unit/transport/test_ipc.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/tests/unit/transport/test_ipc.py b/tests/unit/transport/test_ipc.py index 5cc5a70ee89e..9626ab38d39a 100644 --- a/tests/unit/transport/test_ipc.py +++ b/tests/unit/transport/test_ipc.py @@ -86,13 +86,14 @@ class IPCMessageClient(BaseIPCReqCase): ''' def _get_channel(self): - channel = salt.transport.ipc.IPCMessageClient( - socket_path=self.socket_path, - io_loop=self.io_loop, - ) - channel.connect(callback=self.stop) - self.wait() - return channel + if not hasattr(self, 'channel') or self.channel is None: + self.channel = salt.transport.ipc.IPCMessageClient( + socket_path=self.socket_path, + io_loop=self.io_loop, + ) + self.channel.connect(callback=self.stop) + self.wait() + return self.channel def setUp(self): super(IPCMessageClient, self).setUp() @@ -107,6 +108,8 @@ def tearDown(self): if exc.errno != errno.EBADF: # If its not a bad file descriptor error, raise raise + finally: + self.channel = None def test_singleton(self): channel = self._get_channel() From 25f5a90bd637d9691ea5f709aad9b5d7b8125928 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Tue, 9 Apr 2019 17:58:13 +0000 Subject: [PATCH 124/340] Use six.reraise for py3 compatability --- salt/transport/ipc.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index 7f94f45f157f..920951a7bf48 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -760,18 +760,12 @@ def read_async(self, callback, timeout=None): if data is None: break elif isinstance(data, IPCExceptionProxy): - self.reraise(data.orig_info) + six.reraise(*data.orig_info) elif callback: self.service.io_loop.spawn_callback(callback, data) else: raise tornado.gen.Return(data) - def reraise(self, exc_info): - if six.PY2: - raise exc_info[0], exc_info[1], exc_info[2] # pylint: disable=W1699 - else: - raise exc_info[0].with_traceback(exc_info[1], exc_info[2]) - def read_sync(self, timeout=None): ''' Read a message from an IPC socket From c9ec8b1f9721e70317335e411987202fe4a25e5e Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Wed, 10 Apr 2019 03:32:20 +0000 Subject: [PATCH 125/340] Ensure exceptions in service future are handled --- salt/transport/ipc.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index 920951a7bf48..65c090e2f7af 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -600,6 +600,7 @@ def __init__(self, socket_path, io_loop=None): self.saved_data = [] self._read_in_progress = Lock() self.handlers = weakref.WeakSet() + self.read_stream_future = None def _subscribe(self, handler): self.handlers.add(handler) @@ -627,16 +628,16 @@ def _read(self, timeout, callback=None): if timeout is None: timeout = 5 - read_stream_future = None + self.read_stream_future = None while self._has_subscribers(): - if read_stream_future is None: - read_stream_future = self.stream.read_bytes(4096, partial=True) + if self.read_stream_future is None: + self.read_stream_future = self.stream.read_bytes(4096, partial=True) try: wire_bytes = yield FutureWithTimeout(self.io_loop, - read_stream_future, + self.read_stream_future, timeout) - read_stream_future = None + self.read_stream_future = None self.unpacker.feed(wire_bytes) msgs = [msg['body'] for msg in self.unpacker] @@ -676,7 +677,7 @@ def read(self, handler, timeout=None): except Exception as exc: log.error('Exception occurred while Subscriber connecting: %s', exc) yield tornado.gen.sleep(1) - self._read(timeout) + yield self._read(timeout) def close(self): ''' @@ -684,8 +685,11 @@ def close(self): Sockets and filehandles should be closed explicitly, to prevent leaks. ''' - if not self._closing: - super(IPCMessageSubscriberService, self).close() + super(IPCMessageSubscriberService, self).close() + if self.read_stream_future is not None and self.read_stream_future.done(): + exc = self.read_stream_future.exception() + if exc and not isinstance(exc, tornado.iostream.StreamClosedError): + log.error("Read future returned exception %r", exc) def __del__(self): if IPCMessageSubscriberService in globals(): From 82150f0241107a1a02a4fa54da74c4b202c6aeb3 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Wed, 10 Apr 2019 18:01:52 +0000 Subject: [PATCH 126/340] Remove un-used import --- salt/transport/ipc.py | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index 65c090e2f7af..e0937f7a5b0b 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -10,7 +10,6 @@ import socket import weakref import time -import threading import sys # Import 3rd-party libs From 0141b7f74484abcb3d6e418df56664a72c5957cb Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Wed, 10 Apr 2019 18:20:31 +0000 Subject: [PATCH 127/340] Fix merge wart --- salt/transport/ipc.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index e0937f7a5b0b..c23af2956647 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -358,16 +358,6 @@ def close(self): if self._closing: return - if self._refcount > 1: - # Decrease refcount - with self._refcount_lock: - self._refcount -= 1 - log.debug( - 'This is not the last %s instance. Not closing yet.', - self.__class__.__name__ - ) - return - self._closing = True log.debug('Closing %s instance', self.__class__.__name__) From f45d29b222e0d9686b12a5fb55ebdaf4125a2a3f Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Wed, 10 Apr 2019 19:16:14 +0000 Subject: [PATCH 128/340] Remove un-needed test --- tests/unit/transport/test_ipc.py | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/tests/unit/transport/test_ipc.py b/tests/unit/transport/test_ipc.py index 9626ab38d39a..3f5ad99f8a70 100644 --- a/tests/unit/transport/test_ipc.py +++ b/tests/unit/transport/test_ipc.py @@ -123,23 +123,6 @@ def test_singleton(self): self.wait() self.assertEqual(self.payloads[0], msg) - def test_last_singleton_instance_closes(self): - channel = self._get_channel() - msg = {'foo': 'bar', 'stop': True} - log.debug('Sending msg1') - self.channel.send(msg) - self.wait() - self.assertEqual(self.payloads[0], msg) - channel.close() - # Since this is a singleton, and only the last singleton instance - # should actually close the connection, the next code should still - # work and not timeout - msg = {'bar': 'foo', 'stop': True} - log.debug('Sending msg2') - self.channel.send(msg) - self.wait() - self.assertEqual(self.payloads[1], msg) - def test_basic_send(self): msg = {'foo': 'bar', 'stop': True} self.channel.send(msg) From 7745242f294f1eb93775dc8c70c7534edc85d084 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Wed, 10 Apr 2019 20:51:19 +0000 Subject: [PATCH 129/340] Revert "Fix #22063: pillar wildcard support include" This reverts commit fbab73a35c2a36e54ace814f2b45119e320b61e3. --- salt/pillar/__init__.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/salt/pillar/__init__.py b/salt/pillar/__init__.py index 94dd9695ca1d..3f0369330548 100644 --- a/salt/pillar/__init__.py +++ b/salt/pillar/__init__.py @@ -763,8 +763,6 @@ def render_pstate(self, sls, saltenv, mods, defaults=None): else: # render included state(s) include_states = [] - - matched_pstates = [] for sub_sls in state.pop('include'): if isinstance(sub_sls, dict): sub_sls, v = next(six.iteritems(sub_sls)) @@ -772,16 +770,6 @@ def render_pstate(self, sls, saltenv, mods, defaults=None): key = v.get('key', None) else: key = None - - try: - matched_pstates += fnmatch.filter(self.avail[saltenv], sub_sls) - except KeyError: - errors.extend( - ['No matching pillar environment for environment ' - '\'{0}\' found'.format(saltenv)] - ) - - for sub_sls in set(matched_pstates): if sub_sls not in mods: nstate, mods, err = self.render_pstate( sub_sls, From c79f496021fd4dbd327bfcacb8fc33a01c9ecb7f Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Wed, 10 Apr 2019 20:55:46 +0000 Subject: [PATCH 130/340] Add pillar include tests --- .../files/pillar/base/glob_include.sls | 2 ++ .../files/pillar/base/glob_include_a.sls | 2 ++ .../files/pillar/base/glob_include_b.sls | 2 ++ .../files/pillar/base/include-a.sls | 2 ++ .../files/pillar/base/include-b.sls | 2 ++ .../integration/files/pillar/base/include.sls | 5 ++++ .../integration/pillar/test_pillar_include.py | 25 +++++++++++++++++++ 7 files changed, 40 insertions(+) create mode 100644 tests/integration/files/pillar/base/glob_include.sls create mode 100644 tests/integration/files/pillar/base/glob_include_a.sls create mode 100644 tests/integration/files/pillar/base/glob_include_b.sls create mode 100644 tests/integration/files/pillar/base/include-a.sls create mode 100644 tests/integration/files/pillar/base/include-b.sls create mode 100644 tests/integration/files/pillar/base/include.sls create mode 100644 tests/integration/pillar/test_pillar_include.py diff --git a/tests/integration/files/pillar/base/glob_include.sls b/tests/integration/files/pillar/base/glob_include.sls new file mode 100644 index 000000000000..4950242df7be --- /dev/null +++ b/tests/integration/files/pillar/base/glob_include.sls @@ -0,0 +1,2 @@ +include: + - 'glob_include*' diff --git a/tests/integration/files/pillar/base/glob_include_a.sls b/tests/integration/files/pillar/base/glob_include_a.sls new file mode 100644 index 000000000000..226c294cf9e5 --- /dev/null +++ b/tests/integration/files/pillar/base/glob_include_a.sls @@ -0,0 +1,2 @@ +glob-a: + - 'Entry A' diff --git a/tests/integration/files/pillar/base/glob_include_b.sls b/tests/integration/files/pillar/base/glob_include_b.sls new file mode 100644 index 000000000000..257e261283ff --- /dev/null +++ b/tests/integration/files/pillar/base/glob_include_b.sls @@ -0,0 +1,2 @@ +glob-b: + - 'Entry B' diff --git a/tests/integration/files/pillar/base/include-a.sls b/tests/integration/files/pillar/base/include-a.sls new file mode 100644 index 000000000000..1df26dbad0da --- /dev/null +++ b/tests/integration/files/pillar/base/include-a.sls @@ -0,0 +1,2 @@ +a: + - 'Entry A' diff --git a/tests/integration/files/pillar/base/include-b.sls b/tests/integration/files/pillar/base/include-b.sls new file mode 100644 index 000000000000..de1612327a16 --- /dev/null +++ b/tests/integration/files/pillar/base/include-b.sls @@ -0,0 +1,2 @@ +b: + - 'Entry B' diff --git a/tests/integration/files/pillar/base/include.sls b/tests/integration/files/pillar/base/include.sls new file mode 100644 index 000000000000..cae2926c21ba --- /dev/null +++ b/tests/integration/files/pillar/base/include.sls @@ -0,0 +1,5 @@ +include: + - include-a: + key: element:a + - include-b: + key: element:b diff --git a/tests/integration/pillar/test_pillar_include.py b/tests/integration/pillar/test_pillar_include.py new file mode 100644 index 000000000000..2145df276f07 --- /dev/null +++ b/tests/integration/pillar/test_pillar_include.py @@ -0,0 +1,25 @@ +from __future__ import unicode_literals +from tests.support.case import ModuleCase + + +class PillarIncludeTest(ModuleCase): + + def test_pillar_include(self): + ''' + Test pillar include + ''' + ret = self.minion_run('pillar.items') + assert 'a' in ret['element'] + assert ret['element']['a'] == {'a': ['Entry A']} + assert 'b' in ret['element'] + assert ret['element']['b'] == {'b': ['Entry B']} + + def test_pillar_glob_include(self): + ''' + Test pillar include via glob pattern + ''' + ret = self.minion_run('pillar.items') + assert 'glob-a' in ret + assert ret['glob-a'] == 'Entry A' + assert 'glob-b' in ret + assert ret['glob-b'] == 'Entry B' From e84cd413a9eedfae7e08b289de6714e3a7bd79c3 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Wed, 10 Apr 2019 20:56:07 +0000 Subject: [PATCH 131/340] Fix pillar include wart --- salt/pillar/__init__.py | 60 +++++++++++++++++++++++------------------ 1 file changed, 34 insertions(+), 26 deletions(-) diff --git a/salt/pillar/__init__.py b/salt/pillar/__init__.py index 3f0369330548..61e7ac433f06 100644 --- a/salt/pillar/__init__.py +++ b/salt/pillar/__init__.py @@ -770,32 +770,40 @@ def render_pstate(self, sls, saltenv, mods, defaults=None): key = v.get('key', None) else: key = None - if sub_sls not in mods: - nstate, mods, err = self.render_pstate( - sub_sls, - saltenv, - mods, - defaults - ) - if nstate: - if key: - # If key is x:y, convert it to {x: {y: nstate}} - for key_fragment in reversed(key.split(":")): - nstate = { - key_fragment: nstate - } - if not self.opts.get('pillar_includes_override_sls', False): - include_states.append(nstate) - else: - state = merge( - state, - nstate, - self.merge_strategy, - self.opts.get('renderer', 'yaml'), - self.opts.get('pillar_merge_lists', False)) - if err: - errors += err - + try: + matched_pstates = fnmatch.filter(self.avail[saltenv], sub_sls) + except KeyError: + errors.extend( + ['No matching pillar environment for environment ' + '\'{0}\' found'.format(saltenv)] + ) + matched_pstates = [sub_sls] + for m_sub_sls in matched_pstates: + if m_sub_sls not in mods: + nstate, mods, err = self.render_pstate( + m_sub_sls, + saltenv, + mods, + defaults + ) + if nstate: + if key: + # If key is x:y, convert it to {x: {y: nstate}} + for key_fragment in reversed(key.split(":")): + nstate = { + key_fragment: nstate + } + if not self.opts.get('pillar_includes_override_sls', False): + include_states.append(nstate) + else: + state = merge( + state, + nstate, + self.merge_strategy, + self.opts.get('renderer', 'yaml'), + self.opts.get('pillar_merge_lists', False)) + if err: + errors += err if not self.opts.get('pillar_includes_override_sls', False): # merge included state(s) with the current state # merged last to ensure that its values are From 5dc5de9dab24cf698dc26db24d1e1697472c2e05 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Wed, 10 Apr 2019 21:03:43 +0000 Subject: [PATCH 132/340] Use file encoding and add docstring --- tests/integration/pillar/test_pillar_include.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/integration/pillar/test_pillar_include.py b/tests/integration/pillar/test_pillar_include.py index 2145df276f07..411c03f06d1e 100644 --- a/tests/integration/pillar/test_pillar_include.py +++ b/tests/integration/pillar/test_pillar_include.py @@ -1,4 +1,9 @@ +# -*- coding: utf-8 -*- +''' +Pillar include tests +''' from __future__ import unicode_literals + from tests.support.case import ModuleCase From 1a33bde52cc2dc6b009158374f3ddec2a94d5c2d Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Wed, 10 Apr 2019 21:20:22 +0000 Subject: [PATCH 133/340] import __future__.absolute_import --- tests/integration/pillar/test_pillar_include.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/pillar/test_pillar_include.py b/tests/integration/pillar/test_pillar_include.py index 411c03f06d1e..3875179264ea 100644 --- a/tests/integration/pillar/test_pillar_include.py +++ b/tests/integration/pillar/test_pillar_include.py @@ -2,7 +2,7 @@ ''' Pillar include tests ''' -from __future__ import unicode_literals +from __future__ import absolute_import, unicode_literals from tests.support.case import ModuleCase From 307cec64e11315d41d4b1b65b04d4fe5445ade2f Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Wed, 10 Apr 2019 21:51:25 +0000 Subject: [PATCH 134/340] Fix pillar tests --- tests/integration/files/pillar/base/top.sls | 2 ++ tests/integration/pillar/test_pillar_include.py | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/integration/files/pillar/base/top.sls b/tests/integration/files/pillar/base/top.sls index 0bcfdfb9bd7c..c7c2a8342c6a 100644 --- a/tests/integration/files/pillar/base/top.sls +++ b/tests/integration/files/pillar/base/top.sls @@ -3,6 +3,8 @@ base: - generic - blackout - sdb + - include + - glob_include 'sub_minion': - sdb - generic diff --git a/tests/integration/pillar/test_pillar_include.py b/tests/integration/pillar/test_pillar_include.py index 3875179264ea..1451aee13cd4 100644 --- a/tests/integration/pillar/test_pillar_include.py +++ b/tests/integration/pillar/test_pillar_include.py @@ -25,6 +25,6 @@ def test_pillar_glob_include(self): ''' ret = self.minion_run('pillar.items') assert 'glob-a' in ret - assert ret['glob-a'] == 'Entry A' + assert ret['glob-a'] == ['Entry A'] assert 'glob-b' in ret - assert ret['glob-b'] == 'Entry B' + assert ret['glob-b'] == ['Entry B'] From b0af11f3d3c677cae765388f2ea62a31a1253248 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Wed, 10 Apr 2019 22:41:47 +0000 Subject: [PATCH 135/340] Run include tests when pillar source changes --- tests/filename_map.yml | 1 + tests/unit/test_module_names.py | 1 + 2 files changed, 2 insertions(+) diff --git a/tests/filename_map.yml b/tests/filename_map.yml index 02e622cee106..3f4dc01cb370 100644 --- a/tests/filename_map.yml +++ b/tests/filename_map.yml @@ -192,6 +192,7 @@ salt/output/*: salt/pillar/__init__.py: - integration.minion.test_pillar + - integration.pillar.test_pillar_include salt/(cli/run\.py|runner\.py): - integration.shell.test_runner diff --git a/tests/unit/test_module_names.py b/tests/unit/test_module_names.py index 1efcb5869e8f..87348d7885cd 100644 --- a/tests/unit/test_module_names.py +++ b/tests/unit/test_module_names.py @@ -142,6 +142,7 @@ def test_module_name_source_match(self): 'integration.netapi.rest_tornado.test_app', 'integration.netapi.rest_cherrypy.test_app_pam', 'integration.output.test_output', + 'integration.pillar.test_pillar_include', 'integration.proxy.test_shell', 'integration.proxy.test_simple', 'integration.reactor.test_reactor', From e3e1d2ed087b5c9cb50683e958345534de19c574 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Tue, 19 Feb 2019 18:34:28 +0000 Subject: [PATCH 136/340] fix reactor ping test --- tests/support/mixins.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/support/mixins.py b/tests/support/mixins.py index fab04668e421..f45b3c62ca92 100644 --- a/tests/support/mixins.py +++ b/tests/support/mixins.py @@ -662,7 +662,7 @@ def _clean_queue(): except Exception as exc: # This is broad but we'll see all kinds of issues right now # if we drop the proc out from under the socket while we're reading - log.exception("Exception caught while getting events %r", exc) + log.exception("Exception caught while getting events %r", exc) q.put(events) From 75ddeb30740333803d41d3ff2790491595d8d840 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Tue, 19 Feb 2019 11:44:44 -0700 Subject: [PATCH 137/340] fix linter --- tests/support/mixins.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/support/mixins.py b/tests/support/mixins.py index f45b3c62ca92..fab04668e421 100644 --- a/tests/support/mixins.py +++ b/tests/support/mixins.py @@ -662,7 +662,7 @@ def _clean_queue(): except Exception as exc: # This is broad but we'll see all kinds of issues right now # if we drop the proc out from under the socket while we're reading - log.exception("Exception caught while getting events %r", exc) + log.exception("Exception caught while getting events %r", exc) q.put(events) From 7836ac449fbca7920f825d49dea7b0013be31759 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Thu, 11 Apr 2019 01:12:25 +0000 Subject: [PATCH 138/340] Make SaltEvent and NamespacedEvent context aware --- salt/utils/event.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/salt/utils/event.py b/salt/utils/event.py index 4dc336a5d9a1..a58f8e5f00f4 100644 --- a/salt/utils/event.py +++ b/salt/utils/event.py @@ -884,6 +884,12 @@ def __del__(self): except Exception: pass + def __enter__(self): + return self + + def __exit__(self): + self.destroy() + class MasterEvent(SaltEvent): ''' @@ -932,6 +938,15 @@ def fire_event(self, data, tag): if self.print_func is not None: self.print_func(tag, data) + def destroy(self): + self.event.destroy() + + def __enter__(self): + return self + + def __exit__(self): + self.destroy() + class MinionEvent(SaltEvent): ''' From 43d2d544f52eec440fc4bb8edb2ec2e6c9b3114f Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Thu, 11 Apr 2019 01:12:41 +0000 Subject: [PATCH 139/340] Handle event closures explicity in salt.utils.cloud --- salt/utils/cloud.py | 59 +++++++++++++++++++++------------------------ 1 file changed, 28 insertions(+), 31 deletions(-) diff --git a/salt/utils/cloud.py b/salt/utils/cloud.py index 691ccf1f56b3..941b9c09e43b 100644 --- a/salt/utils/cloud.py +++ b/salt/utils/cloud.py @@ -1772,25 +1772,22 @@ def fire_event(key, msg, tag, sock_dir, args=None, transport='zeromq'): ''' Fire deploy action ''' - event = salt.utils.event.get_event( - 'master', - sock_dir, - transport, - listen=False) - - try: - event.fire_event(msg, tag) - except ValueError: - # We're using at least a 0.17.x version of salt - if isinstance(args, dict): - args[key] = msg - else: - args = {key: msg} - event.fire_event(args, tag) + with salt.utils.event.get_event( 'master', sock_dir, transport, listen=False) as event: + try: + event.fire_event(msg, tag) + except ValueError: + # We're using at least a 0.17.x version of salt + if isinstance(args, dict): + args[key] = msg + else: + args = {key: msg} + event.fire_event(args, tag) + finally: + event.destroy() - # https://github.com/zeromq/pyzmq/issues/173#issuecomment-4037083 - # Assertion failed: get_load () == 0 (poller_base.cpp:32) - time.sleep(0.025) + # https://github.com/zeromq/pyzmq/issues/173#issuecomment-4037083 + # Assertion failed: get_load () == 0 (poller_base.cpp:32) + time.sleep(0.025) def _exec_ssh_cmd(cmd, error_msg=None, allow_failure=False, **kwargs): @@ -2257,19 +2254,19 @@ def check_auth(name, sock_dir=None, queue=None, timeout=300): This function is called from a multiprocess instance, to wait for a minion to become available to receive salt commands ''' - event = salt.utils.event.SaltEvent('master', sock_dir, listen=True) - starttime = time.mktime(time.localtime()) - newtimeout = timeout - log.debug('In check_auth, waiting for %s to become available', name) - while newtimeout > 0: - newtimeout = timeout - (time.mktime(time.localtime()) - starttime) - ret = event.get_event(full=True) - if ret is None: - continue - if ret['tag'] == 'minion_start' and ret['data']['id'] == name: - queue.put(name) - newtimeout = 0 - log.debug('Minion %s is ready to receive commands', name) + with salt.utils.event.SaltEvent('master', sock_dir, listen=True) as event: + starttime = time.mktime(time.localtime()) + newtimeout = timeout + log.debug('In check_auth, waiting for %s to become available', name) + while newtimeout > 0: + newtimeout = timeout - (time.mktime(time.localtime()) - starttime) + ret = event.get_event(full=True) + if ret is None: + continue + if ret['tag'] == 'minion_start' and ret['data']['id'] == name: + queue.put(name) + newtimeout = 0 + log.debug('Minion %s is ready to receive commands', name) def ip_to_int(ip): From 8dd5b98e562e1ab7f02009343cc915667acb51c7 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Thu, 11 Apr 2019 01:13:08 +0000 Subject: [PATCH 140/340] Handle event closures explicity in salt.utils.reactor --- salt/utils/reactor.py | 62 +++++++++++++++++++++---------------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/salt/utils/reactor.py b/salt/utils/reactor.py index 903283b1389c..69c7dffbec92 100644 --- a/salt/utils/reactor.py +++ b/salt/utils/reactor.py @@ -229,43 +229,43 @@ def run(self): salt.utils.process.appendproctitle(self.__class__.__name__) # instantiate some classes inside our new process - self.event = salt.utils.event.get_event( + with salt.utils.event.get_event( self.opts['__role'], self.opts['sock_dir'], self.opts['transport'], opts=self.opts, - listen=True) - self.wrap = ReactWrap(self.opts) + listen=True) as event: + self.wrap = ReactWrap(self.opts) - for data in self.event.iter_events(full=True): - # skip all events fired by ourselves - if data['data'].get('user') == self.wrap.event_user: - continue - if data['tag'].endswith('salt/reactors/manage/add'): - _data = data['data'] - res = self.add_reactor(_data['event'], _data['reactors']) - self.event.fire_event({'reactors': self.list_all(), - 'result': res}, - 'salt/reactors/manage/add-complete') - elif data['tag'].endswith('salt/reactors/manage/delete'): - _data = data['data'] - res = self.delete_reactor(_data['event']) - self.event.fire_event({'reactors': self.list_all(), - 'result': res}, - 'salt/reactors/manage/delete-complete') - elif data['tag'].endswith('salt/reactors/manage/list'): - self.event.fire_event({'reactors': self.list_all()}, - 'salt/reactors/manage/list-results') - else: - reactors = self.list_reactors(data['tag']) - if not reactors: + for data in event.iter_events(full=True): + # skip all events fired by ourselves + if data['data'].get('user') == self.wrap.event_user: continue - chunks = self.reactions(data['tag'], data['data'], reactors) - if chunks: - try: - self.call_reactions(chunks) - except SystemExit: - log.warning('Exit ignored by reactor') + if data['tag'].endswith('salt/reactors/manage/add'): + _data = data['data'] + res = self.add_reactor(_data['event'], _data['reactors']) + event.fire_event({'reactors': self.list_all(), + 'result': res}, + 'salt/reactors/manage/add-complete') + elif data['tag'].endswith('salt/reactors/manage/delete'): + _data = data['data'] + res = self.delete_reactor(_data['event']) + event.fire_event({'reactors': self.list_all(), + 'result': res}, + 'salt/reactors/manage/delete-complete') + elif data['tag'].endswith('salt/reactors/manage/list'): + event.fire_event({'reactors': self.list_all()}, + 'salt/reactors/manage/list-results') + else: + reactors = self.list_reactors(data['tag']) + if not reactors: + continue + chunks = self.reactions(data['tag'], data['data'], reactors) + if chunks: + try: + self.call_reactions(chunks) + except SystemExit: + log.warning('Exit ignored by reactor') class ReactWrap(object): From 709abcadef43aa038f75592ca0d8c7ff1b2fba4f Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Thu, 11 Apr 2019 01:13:35 +0000 Subject: [PATCH 141/340] Handle event closures explicity in salt.utils.schedule --- salt/utils/schedule.py | 104 +++++++++++++++++++++-------------------- 1 file changed, 53 insertions(+), 51 deletions(-) diff --git a/salt/utils/schedule.py b/salt/utils/schedule.py index 8d2947649f75..0c1a54344231 100644 --- a/salt/utils/schedule.py +++ b/salt/utils/schedule.py @@ -254,10 +254,10 @@ def delete_job(self, name, persist=True): log.warning("Cannot delete job %s, it's in the pillar!", name) # Fire the complete event back along with updated list of schedule - evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) - evt.fire_event({'complete': True, - 'schedule': self._get_schedule()}, - tag='/salt/minion/minion_schedule_delete_complete') + with salt.utils.event.get_event('minion', opts=self.opts, listen=False) as evt: + evt.fire_event({'complete': True, + 'schedule': self._get_schedule()}, + tag='/salt/minion/minion_schedule_delete_complete') # remove from self.intervals if name in self.intervals: @@ -287,10 +287,10 @@ def delete_job_prefix(self, name, persist=True): log.warning("Cannot delete job %s, it's in the pillar!", job) # Fire the complete event back along with updated list of schedule - evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) - evt.fire_event({'complete': True, - 'schedule': self._get_schedule()}, - tag='/salt/minion/minion_schedule_delete_complete') + with salt.utils.event.get_event('minion', opts=self.opts, listen=False) as evt: + evt.fire_event({'complete': True, + 'schedule': self._get_schedule()}, + tag='/salt/minion/minion_schedule_delete_complete') # remove from self.intervals for job in list(self.intervals.keys()): @@ -334,10 +334,10 @@ def add_job(self, data, persist=True): self.opts['schedule'].update(data) # Fire the complete event back along with updated list of schedule - evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) - evt.fire_event({'complete': True, - 'schedule': self._get_schedule()}, - tag='/salt/minion/minion_schedule_add_complete') + with salt.utils.event.get_event('minion', opts=self.opts, listen=False) as evt: + evt.fire_event({'complete': True, + 'schedule': self._get_schedule()}, + tag='/salt/minion/minion_schedule_add_complete') if persist: self.persist() @@ -354,10 +354,10 @@ def enable_job(self, name, persist=True): log.warning("Cannot modify job %s, it's in the pillar!", name) # Fire the complete event back along with updated list of schedule - evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) - evt.fire_event({'complete': True, - 'schedule': self._get_schedule()}, - tag='/salt/minion/minion_schedule_enabled_job_complete') + with salt.utils.event.get_event('minion', opts=self.opts, listen=False) as evt: + evt.fire_event({'complete': True, + 'schedule': self._get_schedule()}, + tag='/salt/minion/minion_schedule_enabled_job_complete') if persist: self.persist() @@ -373,11 +373,11 @@ def disable_job(self, name, persist=True): elif name in self._get_schedule(include_opts=False): log.warning("Cannot modify job %s, it's in the pillar!", name) - # Fire the complete event back along with updated list of schedule - evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) - evt.fire_event({'complete': True, - 'schedule': self._get_schedule()}, - tag='/salt/minion/minion_schedule_disabled_job_complete') + with salt.utils.event.get_event('minion', opts=self.opts, listen=False) as evt: + # Fire the complete event back along with updated list of schedule + evt.fire_event({'complete': True, + 'schedule': self._get_schedule()}, + tag='/salt/minion/minion_schedule_disabled_job_complete') if persist: self.persist() @@ -451,10 +451,10 @@ def enable_schedule(self): self.opts['schedule']['enabled'] = True # Fire the complete event back along with updated list of schedule - evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) - evt.fire_event({'complete': True, - 'schedule': self._get_schedule()}, - tag='/salt/minion/minion_schedule_enabled_complete') + with salt.utils.event.get_event('minion', opts=self.opts, listen=False) as evt: + evt.fire_event({'complete': True, + 'schedule': self._get_schedule()}, + tag='/salt/minion/minion_schedule_enabled_complete') def disable_schedule(self): ''' @@ -463,10 +463,10 @@ def disable_schedule(self): self.opts['schedule']['enabled'] = False # Fire the complete event back along with updated list of schedule - evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) - evt.fire_event({'complete': True, - 'schedule': self._get_schedule()}, - tag='/salt/minion/minion_schedule_disabled_complete') + with salt.utils.event.get_event('minion', opts=self.opts, listen=False) as evt: + evt.fire_event({'complete': True, + 'schedule': self._get_schedule()}, + tag='/salt/minion/minion_schedule_disabled_complete') def reload(self, schedule): ''' @@ -491,9 +491,9 @@ def list(self, where): schedule = self._get_schedule() # Fire the complete event back along with the list of schedule - evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) - evt.fire_event({'complete': True, 'schedule': schedule}, - tag='/salt/minion/minion_schedule_list_complete') + with salt.utils.event.get_event('minion', opts=self.opts, listen=False) as evt: + evt.fire_event({'complete': True, 'schedule': schedule}, + tag='/salt/minion/minion_schedule_list_complete') def save_schedule(self): ''' @@ -502,9 +502,9 @@ def save_schedule(self): self.persist() # Fire the complete event back along with the list of schedule - evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) - evt.fire_event({'complete': True}, - tag='/salt/minion/minion_schedule_saved') + with salt.utils.event.get_event('minion', opts=self.opts, listen=False) as evt: + evt.fire_event({'complete': True}, + tag='/salt/minion/minion_schedule_saved') def postpone_job(self, name, data): ''' @@ -531,10 +531,10 @@ def postpone_job(self, name, data): log.warning("Cannot modify job %s, it's in the pillar!", name) # Fire the complete event back along with updated list of schedule - evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) - evt.fire_event({'complete': True, - 'schedule': self._get_schedule()}, - tag='/salt/minion/minion_schedule_postpone_job_complete') + with salt.utils.event.get_event('minion', opts=self.opts, listen=False) as evt: + evt.fire_event({'complete': True, + 'schedule': self._get_schedule()}, + tag='/salt/minion/minion_schedule_postpone_job_complete') def skip_job(self, name, data): ''' @@ -555,10 +555,10 @@ def skip_job(self, name, data): log.warning("Cannot modify job %s, it's in the pillar!", name) # Fire the complete event back along with updated list of schedule - evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) - evt.fire_event({'complete': True, - 'schedule': self._get_schedule()}, - tag='/salt/minion/minion_schedule_skip_job_complete') + with salt.utils.event.get_event('minion', opts=self.opts, listen=False) as evt: + evt.fire_event({'complete': True, + 'schedule': self._get_schedule()}, + tag='/salt/minion/minion_schedule_skip_job_complete') def get_next_fire_time(self, name, fmt='%Y-%m-%dT%H:%M:%S'): ''' @@ -573,9 +573,9 @@ def get_next_fire_time(self, name, fmt='%Y-%m-%dT%H:%M:%S'): _next_fire_time = _next_fire_time.strftime(fmt) # Fire the complete event back along with updated list of schedule - evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) - evt.fire_event({'complete': True, 'next_fire_time': _next_fire_time}, - tag='/salt/minion/minion_schedule_next_fire_time_complete') + with salt.utils.event.get_event('minion', opts=self.opts, listen=False) as evt: + evt.fire_event({'complete': True, 'next_fire_time': _next_fire_time}, + tag='/salt/minion/minion_schedule_next_fire_time_complete') def job_status(self, name): ''' @@ -694,15 +694,14 @@ def handle_func(self, multiprocessing_enabled, func, data): jid = salt.utils.jid.gen_jid(self.opts) tag = salt.utils.event.tagify(jid, prefix='salt/scheduler/') - event = salt.utils.event.get_event( + namespaced_event = salt.utils.event.NamespacedEvent( + salt.utils.event.get_event( self.opts['__role'], self.opts['sock_dir'], self.opts['transport'], opts=self.opts, - listen=False) - - namespaced_event = salt.utils.event.NamespacedEvent( - event, + listen=False, + ), tag, print_func=None ) @@ -804,6 +803,9 @@ def handle_func(self, multiprocessing_enabled, func, data): event.fire_event(load, '__schedule_return') except Exception as exc: log.exception('Unhandled exception firing __schedule_return event') + finally: + event.destroy() + namespaced_event.destroy() if not self.standalone: log.debug('schedule.handle_func: Removing %s', proc_fn) From 85fec9e7285318f27525614b8395c68f3681fdbc Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Thu, 11 Apr 2019 01:26:49 +0000 Subject: [PATCH 142/340] Handle event closures explicity in salt.client.mixins --- salt/client/mixins.py | 236 +++++++++++++++++++++--------------------- 1 file changed, 117 insertions(+), 119 deletions(-) diff --git a/salt/client/mixins.py b/salt/client/mixins.py index a004d5e1125c..b336ce7f0592 100644 --- a/salt/client/mixins.py +++ b/salt/client/mixins.py @@ -159,19 +159,19 @@ def cmd_sync(self, low, timeout=None, full_return=False): 'eauth': 'pam', }) ''' - event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=True) - job = self.master_call(**low) - ret_tag = salt.utils.event.tagify('ret', base=job['tag']) + with salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=True) as event: + job = self.master_call(**low) + ret_tag = salt.utils.event.tagify('ret', base=job['tag']) - if timeout is None: - timeout = self.opts.get('rest_timeout', 300) - ret = event.get_event(tag=ret_tag, full=True, wait=timeout, auto_reconnect=True) - if ret is None: - raise salt.exceptions.SaltClientTimeout( - "RunnerClient job '{0}' timed out".format(job['jid']), - jid=job['jid']) + if timeout is None: + timeout = self.opts.get('rest_timeout', 300) + with event.get_event(tag=ret_tag, full=True, wait=timeout, auto_reconnect=True) as evt: + if ret is None: + raise salt.exceptions.SaltClientTimeout( + "RunnerClient job '{0}' timed out".format(job['jid']), + jid=job['jid']) - return ret if full_return else ret['data']['return'] + return ret if full_return else ret['data']['return'] def cmd(self, fun, arg=None, pub_data=None, kwarg=None, print_event=True, full_return=False): ''' @@ -307,12 +307,6 @@ def _low(self, fun, low, print_event=True, full_return=False): 'user': low.get('__user__', 'UNKNOWN'), } - event = salt.utils.event.get_event( - 'master', - self.opts['sock_dir'], - self.opts['transport'], - opts=self.opts, - listen=False) if print_event: print_func = self.print_async_event \ @@ -323,113 +317,117 @@ def _low(self, fun, low, print_event=True, full_return=False): # runner/wheel output during orchestration). print_func = None - namespaced_event = salt.utils.event.NamespacedEvent( - event, + with salt.utils.event.NamespacedEvent( + salt.utils.event.get_event( + 'master', + self.opts['sock_dir'], + self.opts['transport'], + opts=self.opts, + listen=False, + ), tag, print_func=print_func - ) + ) as namespaced_event + + # TODO: test that they exist + # TODO: Other things to inject?? + func_globals = {'__jid__': jid, + '__user__': data['user'], + '__tag__': tag, + # weak ref to avoid the Exception in interpreter + # teardown of event + '__jid_event__': weakref.proxy(namespaced_event), + } - # TODO: test that they exist - # TODO: Other things to inject?? - func_globals = {'__jid__': jid, - '__user__': data['user'], - '__tag__': tag, - # weak ref to avoid the Exception in interpreter - # teardown of event - '__jid_event__': weakref.proxy(namespaced_event), - } - - try: - self_functions = pycopy.copy(self.functions) - salt.utils.lazy.verify_fun(self_functions, fun) - - # Inject some useful globals to *all* the function's global - # namespace only once per module-- not per func - completed_funcs = [] - - for mod_name in six.iterkeys(self_functions): - if '.' not in mod_name: - continue - mod, _ = mod_name.split('.', 1) - if mod in completed_funcs: - continue - completed_funcs.append(mod) - for global_key, value in six.iteritems(func_globals): - self.functions[mod_name].__globals__[global_key] = value - - # There are some discrepancies of what a "low" structure is in the - # publisher world it is a dict including stuff such as jid, fun, - # arg (a list of args, with kwargs packed in). Historically this - # particular one has had no "arg" and just has had all the kwargs - # packed into the top level object. The plan is to move away from - # that since the caller knows what is an arg vs a kwarg, but while - # we make the transition we will load "kwargs" using format_call if - # there are no kwargs in the low object passed in. - - if 'arg' in low and 'kwarg' in low: - args = low['arg'] - kwargs = low['kwarg'] - else: - f_call = salt.utils.args.format_call( - self.functions[fun], - low, - expected_extra_kws=CLIENT_INTERNAL_KEYWORDS - ) - args = f_call.get('args', ()) - kwargs = f_call.get('kwargs', {}) - - # Update the event data with loaded args and kwargs - data['fun_args'] = list(args) + ([kwargs] if kwargs else []) - func_globals['__jid_event__'].fire_event(data, 'new') - - # Initialize a context for executing the method. - with tornado.stack_context.StackContext(self.functions.context_dict.clone): - data['return'] = self.functions[fun](*args, **kwargs) - try: - data['success'] = self.context.get('retcode', 0) == 0 - except AttributeError: - # Assume a True result if no context attribute - data['success'] = True - if isinstance(data['return'], dict) and 'data' in data['return']: - # some functions can return boolean values - data['success'] = salt.utils.state.check_result(data['return']['data']) - except (Exception, SystemExit) as ex: - if isinstance(ex, salt.exceptions.NotImplemented): - data['return'] = six.text_type(ex) - else: - data['return'] = 'Exception occurred in {0} {1}: {2}'.format( - self.client, - fun, - traceback.format_exc(), - ) - data['success'] = False - - if self.store_job: try: - salt.utils.job.store_job( - self.opts, - { - 'id': self.opts['id'], - 'tgt': self.opts['id'], - 'jid': data['jid'], - 'return': data, - }, - event=None, - mminion=self.mminion, + self_functions = pycopy.copy(self.functions) + salt.utils.lazy.verify_fun(self_functions, fun) + + # Inject some useful globals to *all* the function's global + # namespace only once per module-- not per func + completed_funcs = [] + + for mod_name in six.iterkeys(self_functions): + if '.' not in mod_name: + continue + mod, _ = mod_name.split('.', 1) + if mod in completed_funcs: + continue + completed_funcs.append(mod) + for global_key, value in six.iteritems(func_globals): + self.functions[mod_name].__globals__[global_key] = value + + # There are some discrepancies of what a "low" structure is in the + # publisher world it is a dict including stuff such as jid, fun, + # arg (a list of args, with kwargs packed in). Historically this + # particular one has had no "arg" and just has had all the kwargs + # packed into the top level object. The plan is to move away from + # that since the caller knows what is an arg vs a kwarg, but while + # we make the transition we will load "kwargs" using format_call if + # there are no kwargs in the low object passed in. + + if 'arg' in low and 'kwarg' in low: + args = low['arg'] + kwargs = low['kwarg'] + else: + f_call = salt.utils.args.format_call( + self.functions[fun], + low, + expected_extra_kws=CLIENT_INTERNAL_KEYWORDS ) - except salt.exceptions.SaltCacheError: - log.error('Could not store job cache info. ' - 'Job details for this run may be unavailable.') - - # Outputters _can_ mutate data so write to the job cache first! - namespaced_event.fire_event(data, 'ret') - - # if we fired an event, make sure to delete the event object. - # This will ensure that we call destroy, which will do the 0MQ linger - log.info('Runner completed: %s', data['jid']) - del event - del namespaced_event - return data if full_return else data['return'] + args = f_call.get('args', ()) + kwargs = f_call.get('kwargs', {}) + + # Update the event data with loaded args and kwargs + data['fun_args'] = list(args) + ([kwargs] if kwargs else []) + func_globals['__jid_event__'].fire_event(data, 'new') + + # Initialize a context for executing the method. + with tornado.stack_context.StackContext(self.functions.context_dict.clone): + data['return'] = self.functions[fun](*args, **kwargs) + try: + data['success'] = self.context.get('retcode', 0) == 0 + except AttributeError: + # Assume a True result if no context attribute + data['success'] = True + if isinstance(data['return'], dict) and 'data' in data['return']: + # some functions can return boolean values + data['success'] = salt.utils.state.check_result(data['return']['data']) + except (Exception, SystemExit) as ex: + if isinstance(ex, salt.exceptions.NotImplemented): + data['return'] = six.text_type(ex) + else: + data['return'] = 'Exception occurred in {0} {1}: {2}'.format( + self.client, + fun, + traceback.format_exc(), + ) + data['success'] = False + + if self.store_job: + try: + salt.utils.job.store_job( + self.opts, + { + 'id': self.opts['id'], + 'tgt': self.opts['id'], + 'jid': data['jid'], + 'return': data, + }, + event=None, + mminion=self.mminion, + ) + except salt.exceptions.SaltCacheError: + log.error('Could not store job cache info. ' + 'Job details for this run may be unavailable.') + + # Outputters _can_ mutate data so write to the job cache first! + namespaced_event.fire_event(data, 'ret') + + # if we fired an event, make sure to delete the event object. + # This will ensure that we call destroy, which will do the 0MQ linger + log.info('Runner completed: %s', data['jid']) + return data if full_return else data['return'] def get_docs(self, arg=None): ''' From 12e98c4265205ae068656da25e4d090dcd0eddf3 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Thu, 11 Apr 2019 01:32:05 +0000 Subject: [PATCH 143/340] Handle event closures explicity in salt.crypt --- salt/crypt.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/crypt.py b/salt/crypt.py index c2ecf7e032bd..d7ffcaf876ca 100644 --- a/salt/crypt.py +++ b/salt/crypt.py @@ -650,8 +650,8 @@ def _authenticate(self): self._authenticate_future.set_result(True) # mark the sign-in as complete # Notify the bus about creds change if self.opts.get('auth_events') is True: - event = salt.utils.event.get_event(self.opts.get('__role'), opts=self.opts, listen=False) - event.fire_event({'key': key, 'creds': creds}, salt.utils.event.tagify(prefix='auth', suffix='creds')) + with salt.utils.event.get_event(self.opts.get('__role'), opts=self.opts, listen=False) as event: + event.fire_event({'key': key, 'creds': creds}, salt.utils.event.tagify(prefix='auth', suffix='creds')) @tornado.gen.coroutine def sign_in(self, timeout=60, safe=True, tries=1, channel=None): From 235ebfa88eb4a7316f34af011a051376689f7856 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Thu, 11 Apr 2019 01:32:49 +0000 Subject: [PATCH 144/340] Handle event closures explicity in salt.beacons --- salt/beacons/__init__.py | 74 ++++++++++++++++++++-------------------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/salt/beacons/__init__.py b/salt/beacons/__init__.py index d464e247402d..5cc15d348205 100644 --- a/salt/beacons/__init__.py +++ b/salt/beacons/__init__.py @@ -233,9 +233,9 @@ def list_beacons(self, beacons = self._get_beacons(include_pillar, include_opts) # Fire the complete event back along with the list of beacons - evt = salt.utils.event.get_event('minion', opts=self.opts) - evt.fire_event({'complete': True, 'beacons': beacons}, - tag='/salt/minion/minion_beacons_list_complete') + with salt.utils.event.get_event('minion', opts=self.opts) as evt: + evt.fire_event({'complete': True, 'beacons': beacons}, + tag='/salt/minion/minion_beacons_list_complete') return True @@ -247,9 +247,9 @@ def list_available_beacons(self): for _beacon in self.beacons if '.beacon' in _beacon] # Fire the complete event back along with the list of beacons - evt = salt.utils.event.get_event('minion', opts=self.opts) - evt.fire_event({'complete': True, 'beacons': _beacons}, - tag='/salt/minion/minion_beacons_list_available_complete') + with salt.utils.event.get_event('minion', opts=self.opts) as evt: + evt.fire_event({'complete': True, 'beacons': _beacons}, + tag='/salt/minion/minion_beacons_list_available_complete') return True @@ -270,11 +270,11 @@ def validate_beacon(self, name, beacon_data): valid = True # Fire the complete event back along with the list of beacons - evt = salt.utils.event.get_event('minion', opts=self.opts) - evt.fire_event({'complete': True, - 'vcomment': vcomment, - 'valid': valid}, - tag='/salt/minion/minion_beacon_validation_complete') + with salt.utils.event.get_event('minion', opts=self.opts) as evt: + evt.fire_event({'complete': True, + 'vcomment': vcomment, + 'valid': valid}, + tag='/salt/minion/minion_beacon_validation_complete') return True @@ -300,10 +300,10 @@ def add_beacon(self, name, beacon_data): self.opts['beacons'].update(data) # Fire the complete event back along with updated list of beacons - evt = salt.utils.event.get_event('minion', opts=self.opts) - evt.fire_event({'complete': complete, 'comment': comment, - 'beacons': self.opts['beacons']}, - tag='/salt/minion/minion_beacon_add_complete') + with salt.utils.event.get_event('minion', opts=self.opts) as evt: + evt.fire_event({'complete': complete, 'comment': comment, + 'beacons': self.opts['beacons']}, + tag='/salt/minion/minion_beacon_add_complete') return True @@ -326,10 +326,10 @@ def modify_beacon(self, name, beacon_data): self.opts['beacons'].update(data) # Fire the complete event back along with updated list of beacons - evt = salt.utils.event.get_event('minion', opts=self.opts) - evt.fire_event({'complete': complete, 'comment': comment, - 'beacons': self.opts['beacons']}, - tag='/salt/minion/minion_beacon_modify_complete') + with salt.utils.event.get_event('minion', opts=self.opts) as evt: + evt.fire_event({'complete': complete, 'comment': comment, + 'beacons': self.opts['beacons']}, + tag='/salt/minion/minion_beacon_modify_complete') return True def delete_beacon(self, name): @@ -350,10 +350,10 @@ def delete_beacon(self, name): complete = True # Fire the complete event back along with updated list of beacons - evt = salt.utils.event.get_event('minion', opts=self.opts) - evt.fire_event({'complete': complete, 'comment': comment, - 'beacons': self.opts['beacons']}, - tag='/salt/minion/minion_beacon_delete_complete') + with salt.utils.event.get_event('minion', opts=self.opts) as evt: + evt.fire_event({'complete': complete, 'comment': comment, + 'beacons': self.opts['beacons']}, + tag='/salt/minion/minion_beacon_delete_complete') return True @@ -365,9 +365,9 @@ def enable_beacons(self): self.opts['beacons']['enabled'] = True # Fire the complete event back along with updated list of beacons - evt = salt.utils.event.get_event('minion', opts=self.opts) - evt.fire_event({'complete': True, 'beacons': self.opts['beacons']}, - tag='/salt/minion/minion_beacons_enabled_complete') + with salt.utils.event.get_event('minion', opts=self.opts) as evt: + evt.fire_event({'complete': True, 'beacons': self.opts['beacons']}, + tag='/salt/minion/minion_beacons_enabled_complete') return True @@ -379,9 +379,9 @@ def disable_beacons(self): self.opts['beacons']['enabled'] = False # Fire the complete event back along with updated list of beacons - evt = salt.utils.event.get_event('minion', opts=self.opts) - evt.fire_event({'complete': True, 'beacons': self.opts['beacons']}, - tag='/salt/minion/minion_beacons_disabled_complete') + with salt.utils.event.get_event('minion', opts=self.opts) as evt: + evt.fire_event({'complete': True, 'beacons': self.opts['beacons']}, + tag='/salt/minion/minion_beacons_disabled_complete') return True @@ -400,10 +400,10 @@ def enable_beacon(self, name): complete = True # Fire the complete event back along with updated list of beacons - evt = salt.utils.event.get_event('minion', opts=self.opts) - evt.fire_event({'complete': complete, 'comment': comment, - 'beacons': self.opts['beacons']}, - tag='/salt/minion/minion_beacon_enabled_complete') + with salt.utils.event.get_event('minion', opts=self.opts) as evt: + evt.fire_event({'complete': complete, 'comment': comment, + 'beacons': self.opts['beacons']}, + tag='/salt/minion/minion_beacon_enabled_complete') return True @@ -422,10 +422,10 @@ def disable_beacon(self, name): complete = True # Fire the complete event back along with updated list of beacons - evt = salt.utils.event.get_event('minion', opts=self.opts) - evt.fire_event({'complete': complete, 'comment': comment, - 'beacons': self.opts['beacons']}, - tag='/salt/minion/minion_beacon_disabled_complete') + with salt.utils.event.get_event('minion', opts=self.opts) as evt: + evt.fire_event({'complete': complete, 'comment': comment, + 'beacons': self.opts['beacons']}, + tag='/salt/minion/minion_beacon_disabled_complete') return True From 22e1014300b0a769f82b2da5fddcf21f0ac46004 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Thu, 11 Apr 2019 01:35:21 +0000 Subject: [PATCH 145/340] Fix missing colon --- salt/client/mixins.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/client/mixins.py b/salt/client/mixins.py index b336ce7f0592..162ae281df1a 100644 --- a/salt/client/mixins.py +++ b/salt/client/mixins.py @@ -327,7 +327,7 @@ def _low(self, fun, low, print_event=True, full_return=False): ), tag, print_func=print_func - ) as namespaced_event + ) as namespaced_event: # TODO: test that they exist # TODO: Other things to inject?? From a4db7295293858feb69f0fbf092d74295da6a206 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Thu, 11 Apr 2019 01:50:43 +0000 Subject: [PATCH 146/340] Fix wart in salt.client.mixins change --- salt/client/mixins.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/salt/client/mixins.py b/salt/client/mixins.py index 162ae281df1a..d411e4cca3c5 100644 --- a/salt/client/mixins.py +++ b/salt/client/mixins.py @@ -165,13 +165,13 @@ def cmd_sync(self, low, timeout=None, full_return=False): if timeout is None: timeout = self.opts.get('rest_timeout', 300) - with event.get_event(tag=ret_tag, full=True, wait=timeout, auto_reconnect=True) as evt: - if ret is None: - raise salt.exceptions.SaltClientTimeout( - "RunnerClient job '{0}' timed out".format(job['jid']), - jid=job['jid']) + ret = event.get_event(tag=ret_tag, full=True, wait=timeout, auto_reconnect=True) + if ret is None: + raise salt.exceptions.SaltClientTimeout( + "RunnerClient job '{0}' timed out".format(job['jid']), + jid=job['jid']) - return ret if full_return else ret['data']['return'] + return ret if full_return else ret['data']['return'] def cmd(self, fun, arg=None, pub_data=None, kwarg=None, print_event=True, full_return=False): ''' From 6a5741ce2211af282ec95f59e4bac7e7f632d06a Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Thu, 11 Apr 2019 01:52:34 +0000 Subject: [PATCH 147/340] Fix linter wart in salt.utils.cloud --- salt/utils/cloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/utils/cloud.py b/salt/utils/cloud.py index 941b9c09e43b..be1577a046fd 100644 --- a/salt/utils/cloud.py +++ b/salt/utils/cloud.py @@ -1772,7 +1772,7 @@ def fire_event(key, msg, tag, sock_dir, args=None, transport='zeromq'): ''' Fire deploy action ''' - with salt.utils.event.get_event( 'master', sock_dir, transport, listen=False) as event: + with salt.utils.event.get_event('master', sock_dir, transport, listen=False) as event: try: event.fire_event(msg, tag) except ValueError: From f8553868f06b5dc73fa416ee325ed761d11f1f9d Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Thu, 11 Apr 2019 01:56:39 +0000 Subject: [PATCH 148/340] Handle event closures explicity in salt.engines --- salt/engines/http_logstash.py | 40 +++++++++--------- salt/engines/logentries.py | 74 +++++++++++++++++---------------- salt/engines/logstash_engine.py | 38 +++++++++-------- salt/engines/test.py | 33 ++++++++------- 4 files changed, 99 insertions(+), 86 deletions(-) diff --git a/salt/engines/http_logstash.py b/salt/engines/http_logstash.py index 4a92718fdfbd..9d3df3b4c80f 100644 --- a/salt/engines/http_logstash.py +++ b/salt/engines/http_logstash.py @@ -98,22 +98,24 @@ def start(url, funs=None, tags=None): instance = 'master' else: instance = 'minion' - event_bus = salt.utils.event.get_event(instance, - sock_dir=__opts__['sock_dir'], - transport=__opts__['transport'], - opts=__opts__) - while True: - event = event_bus.get_event(full=True) - if event: - publish = True - if isinstance(tags, list) and len(tags) > 0: - found_match = False - for tag in tags: - if fnmatch.fnmatch(event['tag'], tag): - found_match = True - publish = found_match - if funs and 'fun' in event['data']: - if not event['data']['fun'] in funs: - publish = False - if publish: - _logstash(url, event['data']) + with salt.utils.event.get_event( + instance, + sock_dir=__opts__['sock_dir'], + transport=__opts__['transport'], + opts=__opts__, + ) as event_bus: + while True: + event = event_bus.get_event(full=True) + if event: + publish = True + if isinstance(tags, list) and len(tags) > 0: + found_match = False + for tag in tags: + if fnmatch.fnmatch(event['tag'], tag): + found_match = True + publish = found_match + if funs and 'fun' in event['data']: + if not event['data']['fun'] in funs: + publish = False + if publish: + _logstash(url, event['data']) diff --git a/salt/engines/logentries.py b/salt/engines/logentries.py index 7b59ba483482..e5065f64e1fc 100644 --- a/salt/engines/logentries.py +++ b/salt/engines/logentries.py @@ -171,6 +171,22 @@ def open_connection(self): SocketAppender = TLSSocketAppender +def event_bus_context(opts): + if opts.get('id').endswith('_master'): + event_bus = salt.utils.event.get_master_event( + opts, + opts['sock_dir'], + listen=True) + else: + event_bus = salt.utils.event.get_event( + 'minion', + transport=opts['transport'], + opts=opts, + sock_dir=opts['sock_dir'], + listen=True) + return event_bus + + def start(endpoint='data.logentries.com', port=10000, token=None, @@ -178,38 +194,26 @@ def start(endpoint='data.logentries.com', ''' Listen to salt events and forward them to Logentries ''' - if __opts__.get('id').endswith('_master'): - event_bus = salt.utils.event.get_master_event( - __opts__, - __opts__['sock_dir'], - listen=True) - else: - event_bus = salt.utils.event.get_event( - 'minion', - transport=__opts__['transport'], - opts=__opts__, - sock_dir=__opts__['sock_dir'], - listen=True) - log.debug('Logentries engine started') - - try: - val = uuid.UUID(token) - except ValueError: - log.warning('Not a valid logentries token') - - appender = SocketAppender(verbose=False, LE_API=endpoint, LE_PORT=port) - appender.reopen_connection() - - while True: - event = event_bus.get_event() - if event: - # future lint: disable=blacklisted-function - msg = str(' ').join(( - salt.utils.stringutils.to_str(token), - salt.utils.stringutils.to_str(tag), - salt.utils.json.dumps(event) - )) - # future lint: enable=blacklisted-function - appender.put(msg) - - appender.close_connection() + with event_bus_context(__opts__) as event_bus: + log.debug('Logentries engine started') + try: + val = uuid.UUID(token) + except ValueError: + log.warning('Not a valid logentries token') + + appender = SocketAppender(verbose=False, LE_API=endpoint, LE_PORT=port) + appender.reopen_connection() + + while True: + event = event_bus.get_event() + if event: + # future lint: disable=blacklisted-function + msg = str(' ').join(( + salt.utils.stringutils.to_str(token), + salt.utils.stringutils.to_str(tag), + salt.utils.json.dumps(event) + )) + # future lint: enable=blacklisted-function + appender.put(msg) + + appender.close_connection() diff --git a/salt/engines/logstash_engine.py b/salt/engines/logstash_engine.py index 28f537f5e43f..c587aa339838 100644 --- a/salt/engines/logstash_engine.py +++ b/salt/engines/logstash_engine.py @@ -45,6 +45,22 @@ def __virtual__(): log = logging.getLogger(__name__) +def event_bus_context(opts): + if opts.get('id').endswith('_master'): + event_bus = salt.utils.event.get_master_event( + opts, + opts['sock_dir'], + listen=True) + else: + event_bus = salt.utils.event.get_event( + 'minion', + transport=opts['transport'], + opts=opts, + sock_dir=opts['sock_dir'], + listen=True) + return event_bus + + def start(host, port=5959, tag='salt/engine/logstash', proto='udp'): ''' Listen to salt events and forward them to logstash @@ -59,21 +75,9 @@ def start(host, port=5959, tag='salt/engine/logstash', proto='udp'): logstash_logger.setLevel(logging.INFO) logstash_logger.addHandler(logstashHandler(host, port, version=1)) - if __opts__.get('id').endswith('_master'): - event_bus = salt.utils.event.get_master_event( - __opts__, - __opts__['sock_dir'], - listen=True) - else: - event_bus = salt.utils.event.get_event( - 'minion', - transport=__opts__['transport'], - opts=__opts__, - sock_dir=__opts__['sock_dir'], - listen=True) + with event_bus_context(__opts__) as event_bus(): log.debug('Logstash engine started') - - while True: - event = event_bus.get_event() - if event: - logstash_logger.info(tag, extra=event) + while True: + event = event_bus.get_event() + if event: + logstash_logger.info(tag, extra=event) diff --git a/salt/engines/test.py b/salt/engines/test.py index a078e403ab1d..0cc5d1f8ada1 100644 --- a/salt/engines/test.py +++ b/salt/engines/test.py @@ -14,26 +14,29 @@ log = logging.getLogger(__name__) -def start(): - ''' - Listen to events and write them to a log file - ''' - if __opts__['__role'] == 'master': +def event_bus_context(opts): + if opts['__role'] == 'master': event_bus = salt.utils.event.get_master_event( - __opts__, - __opts__['sock_dir'], + opts, + opts['sock_dir'], listen=True) else: event_bus = salt.utils.event.get_event( 'minion', - transport=__opts__['transport'], - opts=__opts__, - sock_dir=__opts__['sock_dir'], + transport=opts['transport'], + opts=opts, + sock_dir=opts['sock_dir'], listen=True) log.debug('test engine started') - while True: - event = event_bus.get_event() - jevent = salt.utils.json.dumps(event) - if event: - log.debug(jevent) + +def start(): + ''' + Listen to events and write them to a log file + ''' + with event_bus_context(__opts__) as event: + while True: + event = event_bus.get_event() + jevent = salt.utils.json.dumps(event) + if event: + log.debug(jevent) From 9805f38e1316a85398bb5e1676d498e593f84e47 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Thu, 11 Apr 2019 02:04:55 +0000 Subject: [PATCH 149/340] Fix wart in IPC merge forward --- salt/transport/ipc.py | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index c23af2956647..12d58fdf374c 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -772,6 +772,7 @@ def read_sync(self, timeout=None): def close(self): self.service.unsubscribe(self) + self.service.close() def __del__(self): self.close() From ccf9240f36cbdc9cd44b4bc862ca71538ca17310 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Thu, 11 Apr 2019 02:13:17 +0000 Subject: [PATCH 150/340] Fix wart in event bus context --- salt/engines/test.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/engines/test.py b/salt/engines/test.py index 0cc5d1f8ada1..e91dbe596125 100644 --- a/salt/engines/test.py +++ b/salt/engines/test.py @@ -28,13 +28,14 @@ def event_bus_context(opts): sock_dir=opts['sock_dir'], listen=True) log.debug('test engine started') + return event_bus def start(): ''' Listen to events and write them to a log file ''' - with event_bus_context(__opts__) as event: + with event_bus_context(__opts__) as event_bus: while True: event = event_bus.get_event() jevent = salt.utils.json.dumps(event) From 46b8f88abdcea33fa5ad96aad01557bdc58abbde Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Thu, 11 Apr 2019 02:14:34 +0000 Subject: [PATCH 151/340] Fix linter warning --- salt/client/mixins.py | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/client/mixins.py b/salt/client/mixins.py index d411e4cca3c5..2711b7f31e61 100644 --- a/salt/client/mixins.py +++ b/salt/client/mixins.py @@ -307,7 +307,6 @@ def _low(self, fun, low, print_event=True, full_return=False): 'user': low.get('__user__', 'UNKNOWN'), } - if print_event: print_func = self.print_async_event \ if hasattr(self, 'print_async_event') \ From 1ca3bae00a59afe720a1a9012199a28ebc6c73c0 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Thu, 11 Apr 2019 02:17:20 +0000 Subject: [PATCH 152/340] Handle event closures explicity in salt.fileserver.svnfs --- salt/fileserver/svnfs.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/fileserver/svnfs.py b/salt/fileserver/svnfs.py index 24226a702f1c..0177d1de8b6e 100644 --- a/salt/fileserver/svnfs.py +++ b/salt/fileserver/svnfs.py @@ -471,13 +471,13 @@ def update(): # if there is a change, fire an event if __opts__.get('fileserver_events', False): - event = salt.utils.event.get_event( + with salt.utils.event.get_event( 'master', __opts__['sock_dir'], __opts__['transport'], opts=__opts__, - listen=False) - event.fire_event(data, tagify(['svnfs', 'update'], prefix='fileserver')) + listen=False) as event: + event.fire_event(data, tagify(['svnfs', 'update'], prefix='fileserver')) try: salt.fileserver.reap_fileserver_cache_dir( os.path.join(__opts__['cachedir'], 'svnfs/hash'), From 71cf825b1c7681bd62a202e70dff04519374b21a Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Thu, 11 Apr 2019 02:21:16 +0000 Subject: [PATCH 153/340] Event not callable --- salt/engines/logstash_engine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/engines/logstash_engine.py b/salt/engines/logstash_engine.py index c587aa339838..7883bc276cce 100644 --- a/salt/engines/logstash_engine.py +++ b/salt/engines/logstash_engine.py @@ -75,7 +75,7 @@ def start(host, port=5959, tag='salt/engine/logstash', proto='udp'): logstash_logger.setLevel(logging.INFO) logstash_logger.addHandler(logstashHandler(host, port, version=1)) - with event_bus_context(__opts__) as event_bus(): + with event_bus_context(__opts__) as event_bus: log.debug('Logstash engine started') while True: event = event_bus.get_event() From 873f73f07553a534035215f046eefa5c88e007f0 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Thu, 11 Apr 2019 05:27:24 +0000 Subject: [PATCH 154/340] Fix context manager __exit__ signature --- salt/utils/event.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/utils/event.py b/salt/utils/event.py index a58f8e5f00f4..6dfb7e790a90 100644 --- a/salt/utils/event.py +++ b/salt/utils/event.py @@ -887,7 +887,7 @@ def __del__(self): def __enter__(self): return self - def __exit__(self): + def __exit__(self, *args): self.destroy() @@ -944,7 +944,7 @@ def destroy(self): def __enter__(self): return self - def __exit__(self): + def __exit__(self, *args): self.destroy() From e9d175bc39d548d432c6c7e539976fe4a5784fbb Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 11 Apr 2019 09:54:31 -0500 Subject: [PATCH 155/340] Fix traceback when invalid source passed to archive.extracted When the source is procedurally generated (e.g. from the result of a remote execution function call), and that source ends up being an empty string or some other invalid value, the call to `file.source_list` will return a path of `None` which will cause a `TypeError` later on when we try to run a urlparse on it. --- salt/states/archive.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/salt/states/archive.py b/salt/states/archive.py index 4ca156cd0485..5bae1688643e 100644 --- a/salt/states/archive.py +++ b/salt/states/archive.py @@ -777,6 +777,11 @@ def extracted(name, ret['comment'] = exc.strerror return ret + if not source_match: + ret['result'] = False + ret['comment'] = 'Invalid source "{0}"'.format(source) + return ret + urlparsed_source = _urlparse(source_match) urlparsed_scheme = urlparsed_source.scheme urlparsed_path = os.path.join( From 6e7def46a94c051eaf1e829a26051889136310e9 Mon Sep 17 00:00:00 2001 From: Ch3LL Date: Thu, 11 Apr 2019 12:20:18 -0400 Subject: [PATCH 156/340] remove pchanges for windows file modules --- salt/modules/win_file.py | 25 +++++++++++-------------- salt/states/file.py | 2 +- 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/salt/modules/win_file.py b/salt/modules/win_file.py index 860e5cb30a09..26ba5b1d9521 100644 --- a/salt/modules/win_file.py +++ b/salt/modules/win_file.py @@ -1640,7 +1640,6 @@ def check_perms(path, if not ret: ret = {'name': path, 'changes': {}, - 'pchanges': {}, 'comment': [], 'result': True} orig_comment = '' @@ -1654,7 +1653,7 @@ def check_perms(path, current_owner = salt.utils.win_dacl.get_owner(obj_name=path) if owner != current_owner: if __opts__['test'] is True: - ret['pchanges']['owner'] = owner + ret['changes']['owner'] = owner else: try: salt.utils.win_dacl.set_owner( @@ -1729,13 +1728,12 @@ def check_perms(path, changes[user]['applies_to'] = applies_to if changes: - ret['pchanges']['deny_perms'] = {} ret['changes']['deny_perms'] = {} for user in changes: user_name = salt.utils.win_dacl.get_name(principal=user) if __opts__['test'] is True: - ret['pchanges']['deny_perms'][user] = changes[user] + ret['changes']['deny_perms'][user] = changes[user] else: # Get applies_to applies_to = None @@ -1851,12 +1849,11 @@ def check_perms(path, changes[user]['applies_to'] = applies_to if changes: - ret['pchanges']['grant_perms'] = {} ret['changes']['grant_perms'] = {} for user in changes: user_name = salt.utils.win_dacl.get_name(principal=user) if __opts__['test'] is True: - ret['pchanges']['grant_perms'][user] = changes[user] + ret['changes']['grant_perms'][user] = changes[user] else: applies_to = None if 'applies_to' not in changes[user]: @@ -1914,7 +1911,7 @@ def check_perms(path, if inheritance is not None: if not inheritance == salt.utils.win_dacl.get_inheritance(obj_name=path): if __opts__['test'] is True: - ret['pchanges']['inheritance'] = inheritance + ret['changes']['inheritance'] = inheritance else: try: salt.utils.win_dacl.set_inheritance( @@ -1939,9 +1936,9 @@ def check_perms(path, if 'grant' in cur_perms[user_name] and \ not cur_perms[user_name]['grant']['inherited']: if __opts__['test'] is True: - if 'remove_perms' not in ret['pchanges']: - ret['pchanges']['remove_perms'] = {} - ret['pchanges']['remove_perms'].update( + if 'remove_perms' not in ret['changes']: + ret['changes']['remove_perms'] = {} + ret['changes']['remove_perms'].update( {user_name: cur_perms[user_name]}) else: if 'remove_perms' not in ret['changes']: @@ -1958,9 +1955,9 @@ def check_perms(path, if 'deny' in cur_perms[user_name] and \ not cur_perms[user_name]['deny']['inherited']: if __opts__['test'] is True: - if 'remove_perms' not in ret['pchanges']: - ret['pchanges']['remove_perms'] = {} - ret['pchanges']['remove_perms'].update( + if 'remove_perms' not in ret['changes']: + ret['changes']['remove_perms'] = {} + ret['changes']['remove_perms'].update( {user_name: cur_perms[user_name]}) else: if 'remove_perms' not in ret['changes']: @@ -1983,7 +1980,7 @@ def check_perms(path, ret['comment'] = '\n'.join(ret['comment']) # Set result for test = True - if __opts__['test'] and (ret['changes'] or ret['pchanges']): + if __opts__['test'] and (ret['changes']): ret['result'] = None return ret diff --git a/salt/states/file.py b/salt/states/file.py index fe21b3f0dcad..7fce36ce18f5 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -2600,7 +2600,7 @@ def managed(name, reset=win_perms_reset) except CommandExecutionError as exc: if exc.strerror.startswith('Path not found'): - ret['changes'] = '{0} will be created'.format(name) + ret['changes'] = {'new': '{0} will be created'.format(name)} if isinstance(ret['changes'], tuple): ret['result'], ret['comment'] = ret['changes'] From 4d44229a8f9f92427bdc0a7fe4a1f3d33538040e Mon Sep 17 00:00:00 2001 From: Ch3LL Date: Thu, 11 Apr 2019 12:45:59 -0400 Subject: [PATCH 157/340] Update dict correctly in file state --- salt/states/file.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/states/file.py b/salt/states/file.py index 7fce36ce18f5..ea9ee0ce81dd 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -2600,7 +2600,7 @@ def managed(name, reset=win_perms_reset) except CommandExecutionError as exc: if exc.strerror.startswith('Path not found'): - ret['changes'] = {'new': '{0} will be created'.format(name)} + ret['changes']['newfile'] = '{0} will be created'.format(name) if isinstance(ret['changes'], tuple): ret['result'], ret['comment'] = ret['changes'] From 5c84427688453a580771c8430f6911c840ec6959 Mon Sep 17 00:00:00 2001 From: Ch3LL Date: Thu, 11 Apr 2019 12:56:29 -0400 Subject: [PATCH 158/340] use same newfile message on linux for windows file state --- salt/states/file.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/states/file.py b/salt/states/file.py index ea9ee0ce81dd..c43eb7f11938 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -2600,7 +2600,7 @@ def managed(name, reset=win_perms_reset) except CommandExecutionError as exc: if exc.strerror.startswith('Path not found'): - ret['changes']['newfile'] = '{0} will be created'.format(name) + ret['changes']['newfile'] = name if isinstance(ret['changes'], tuple): ret['result'], ret['comment'] = ret['changes'] From 1e9dd7fd3f1fed843a61a916d95cc61ace48a2f7 Mon Sep 17 00:00:00 2001 From: Matt Phillips Date: Thu, 11 Apr 2019 13:19:20 -0400 Subject: [PATCH 159/340] transport.zmq: fix bug introduced by b7df7e75cf2 this change was introduced to address evidently another bug where somehow self.opts has no master_uri - but in the process it prioritized using master_ip over master_uri even when master_uri _is_ there. providing master_uri as a way of specifiying which msater you would like to create a channel for is documented used elsewhere in the codebase in multi-master scenarios (ie, event.send, saltutil.revoke_auth) --- salt/transport/zeromq.py | 10 ++++++++-- tests/unit/transport/test_zeromq.py | 11 +++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/salt/transport/zeromq.py b/salt/transport/zeromq.py index 137737b93d55..3045ae4db1e5 100644 --- a/salt/transport/zeromq.py +++ b/salt/transport/zeromq.py @@ -34,7 +34,7 @@ import salt.transport.server import salt.transport.mixins.auth from salt.ext import six -from salt.exceptions import SaltReqTimeoutError +from salt.exceptions import SaltReqTimeoutError, SaltException from salt._compat import ipaddress from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO, LIBZMQ_VERSION_INFO @@ -260,12 +260,18 @@ def __del__(self): @property def master_uri(self): + if 'master_uri' in self.opts: + return self.opts['master_uri'] + + # if by chance master_uri is not there.. if 'master_ip' in self.opts: return _get_master_uri(self.opts['master_ip'], self.opts['master_port'], source_ip=self.opts.get('source_ip'), source_port=self.opts.get('source_ret_port')) - return self.opts['master_uri'] + + # if we've reached here something is very abnormal + raise SaltException('ReqChannel: missing master_uri/master_ip in self.opts') def _package_load(self, load): return { diff --git a/tests/unit/transport/test_zeromq.py b/tests/unit/transport/test_zeromq.py index 7b96bb90b25d..2ed77c8860c1 100644 --- a/tests/unit/transport/test_zeromq.py +++ b/tests/unit/transport/test_zeromq.py @@ -145,6 +145,17 @@ def _handle_payload(cls, payload): ''' raise tornado.gen.Return((payload, {'fun': 'send_clear'})) + def test_master_uri_override(self): + ''' + ensure master_uri kwarg is respected + ''' + # minion_config should be 127.0.0.1, we want a different uri that still connects + uri = 'tcp://{master_ip}:{master_port}'.format(master_ip='localhost', master_port=self.minion_config['master_port']) + + channel = salt.transport.Channel.factory(self.minion_config, master_uri=uri) + self.assertIn('localhost', channel.master_uri) + del channel + @flaky @skipIf(ON_SUSE, 'Skipping until https://github.com/saltstack/salt/issues/32902 gets fixed') From b682ec5a19c81c7d3c40b6f081eba6ecaa5c1af4 Mon Sep 17 00:00:00 2001 From: Matt Phillips Date: Thu, 11 Apr 2019 13:19:20 -0400 Subject: [PATCH 160/340] transport.zmq: fix bug introduced by b7df7e75cf2 this change was introduced to address evidently another bug where somehow self.opts has no master_uri - but in the process it prioritized using master_ip over master_uri even when master_uri _is_ there. providing master_uri as a way of specifiying which msater you would like to create a channel for is documented used elsewhere in the codebase in multi-master scenarios (ie, event.send, saltutil.revoke_auth) --- salt/transport/zeromq.py | 10 ++++++++-- tests/unit/transport/test_zeromq.py | 11 +++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/salt/transport/zeromq.py b/salt/transport/zeromq.py index c0f58495985c..880ad32607ff 100644 --- a/salt/transport/zeromq.py +++ b/salt/transport/zeromq.py @@ -32,7 +32,7 @@ import salt.transport.server import salt.transport.mixins.auth from salt.ext import six -from salt.exceptions import SaltReqTimeoutError +from salt.exceptions import SaltReqTimeoutError, SaltException from salt._compat import ipaddress from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO, LIBZMQ_VERSION_INFO @@ -214,12 +214,18 @@ def __del__(self): @property def master_uri(self): + if 'master_uri' in self.opts: + return self.opts['master_uri'] + + # if by chance master_uri is not there.. if 'master_ip' in self.opts: return _get_master_uri(self.opts['master_ip'], self.opts['master_port'], source_ip=self.opts.get('source_ip'), source_port=self.opts.get('source_ret_port')) - return self.opts['master_uri'] + + # if we've reached here something is very abnormal + raise SaltException('ReqChannel: missing master_uri/master_ip in self.opts') def _package_load(self, load): return { diff --git a/tests/unit/transport/test_zeromq.py b/tests/unit/transport/test_zeromq.py index 7b96bb90b25d..2ed77c8860c1 100644 --- a/tests/unit/transport/test_zeromq.py +++ b/tests/unit/transport/test_zeromq.py @@ -145,6 +145,17 @@ def _handle_payload(cls, payload): ''' raise tornado.gen.Return((payload, {'fun': 'send_clear'})) + def test_master_uri_override(self): + ''' + ensure master_uri kwarg is respected + ''' + # minion_config should be 127.0.0.1, we want a different uri that still connects + uri = 'tcp://{master_ip}:{master_port}'.format(master_ip='localhost', master_port=self.minion_config['master_port']) + + channel = salt.transport.Channel.factory(self.minion_config, master_uri=uri) + self.assertIn('localhost', channel.master_uri) + del channel + @flaky @skipIf(ON_SUSE, 'Skipping until https://github.com/saltstack/salt/issues/32902 gets fixed') From cd5cd7d0658b89abdd17f98ed2de223c7fe10357 Mon Sep 17 00:00:00 2001 From: Pedro Algarvio Date: Thu, 11 Apr 2019 07:53:19 +0100 Subject: [PATCH 161/340] The exit code matters. Prefix any output with ERROR on non 0 exit code because that's what the supervisord state checks for. --- salt/modules/supervisord.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/salt/modules/supervisord.py b/salt/modules/supervisord.py index acb958723212..9eb71418dd1d 100644 --- a/salt/modules/supervisord.py +++ b/salt/modules/supervisord.py @@ -63,10 +63,12 @@ def _ctl_cmd(cmd, name, conf_file, bin_env): def _get_return(ret): - if ret['retcode'] == 0: - return ret['stdout'] - else: - return '' + retmsg = ret['stdout'] + if ret['retcode'] != 0: + # This is a non 0 exit code + if 'ERROR' not in retmsg: + retmsg = 'ERROR: {}'.format(retmsg) + return retmsg def start(name='all', user=None, conf_file=None, bin_env=None): From dc9ace6405f4e06d3e255d9faa2a2f16cfe280c9 Mon Sep 17 00:00:00 2001 From: Pedro Algarvio Date: Thu, 11 Apr 2019 07:53:19 +0100 Subject: [PATCH 162/340] The exit code matters. Prefix any output with ERROR on non 0 exit code because that's what the supervisord state checks for. --- salt/modules/supervisord.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/salt/modules/supervisord.py b/salt/modules/supervisord.py index acb958723212..9eb71418dd1d 100644 --- a/salt/modules/supervisord.py +++ b/salt/modules/supervisord.py @@ -63,10 +63,12 @@ def _ctl_cmd(cmd, name, conf_file, bin_env): def _get_return(ret): - if ret['retcode'] == 0: - return ret['stdout'] - else: - return '' + retmsg = ret['stdout'] + if ret['retcode'] != 0: + # This is a non 0 exit code + if 'ERROR' not in retmsg: + retmsg = 'ERROR: {}'.format(retmsg) + return retmsg def start(name='all', user=None, conf_file=None, bin_env=None): From 3a3114d819093a0074c5e73ae5dffc3300201f08 Mon Sep 17 00:00:00 2001 From: Daniel Wozniak Date: Thu, 11 Apr 2019 13:43:23 -0700 Subject: [PATCH 163/340] Fix merge conflict wart --- salt/modules/opkg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/opkg.py b/salt/modules/opkg.py index 84355e9264a7..8a3ab24a4af4 100644 --- a/salt/modules/opkg.py +++ b/salt/modules/opkg.py @@ -1179,7 +1179,7 @@ def del_repo(repo, **kwargs): # pylint: disable=unused-argument os.remove(repo_file) except OSError: pass - ret += msg.format(alias, repo_file) + ret += msg.format(repo, repo_file) if refresh: refresh_db() return ret From 7a2f43251092163223e169c3d5eb2b51a70c7236 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Thu, 11 Apr 2019 21:17:28 +0000 Subject: [PATCH 164/340] Only destroy namespaced_event when it exists --- salt/utils/schedule.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/utils/schedule.py b/salt/utils/schedule.py index 0c1a54344231..ddafbf65edd7 100644 --- a/salt/utils/schedule.py +++ b/salt/utils/schedule.py @@ -805,7 +805,9 @@ def handle_func(self, multiprocessing_enabled, func, data): log.exception('Unhandled exception firing __schedule_return event') finally: event.destroy() - namespaced_event.destroy() + + if self.opts['__role'] == 'master': + namespaced_event.destroy() if not self.standalone: log.debug('schedule.handle_func: Removing %s', proc_fn) From dd002fdad827a6c7254f3c9421a55ba200ea5ff2 Mon Sep 17 00:00:00 2001 From: Daniel Wozniak Date: Thu, 11 Apr 2019 15:38:15 -0700 Subject: [PATCH 165/340] Fix linter --- tests/unit/states/test_host.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/unit/states/test_host.py b/tests/unit/states/test_host.py index 7f5b9c7ce996..645a3d8a75f9 100644 --- a/tests/unit/states/test_host.py +++ b/tests/unit/states/test_host.py @@ -294,7 +294,6 @@ def test_present(self): assert add_host.mock_calls == [], add_host.mock_calls assert rm_host.mock_calls == [], rm_host.mock_calls - def test_host_present_should_return_True_if_test_and_no_changes(self): expected = { 'comment': 'Host {} ({}) already present'.format( From 876dd18e9be281cb85fbe11ec9b6ed81a722c713 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Fri, 12 Apr 2019 03:58:00 +0000 Subject: [PATCH 166/340] Fix merge wart --- salt/pillar/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/pillar/__init__.py b/salt/pillar/__init__.py index e95e9113ff79..4592ac0c7bbc 100644 --- a/salt/pillar/__init__.py +++ b/salt/pillar/__init__.py @@ -774,7 +774,7 @@ def render_pstate(self, sls, saltenv, mods, defaults=None): matched_pstates = fnmatch.filter( self.avail[saltenv], sub_sls.lstrip('.').replace('/', '.'), - )) + ) except KeyError: errors.extend( ['No matching pillar environment for environment ' From 84ff03ad6efdd5e3ee162ad53f5b5c4f3cb000b2 Mon Sep 17 00:00:00 2001 From: lomeroe Date: Fri, 12 Apr 2019 10:55:11 -0500 Subject: [PATCH 167/340] Correct attempting to use the serializer_name to pull deserializer_opts when deserializer_name is used as the key in the deserializer_options dict. --- salt/states/file.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/states/file.py b/salt/states/file.py index 81fe9e3eb8c4..8e184afd0583 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -6846,7 +6846,7 @@ def serialize(name, try: existing_data = __serializers__[deserializer_name]( fhr, - **deserializer_options.get(serializer_name, {}) + **deserializer_options.get(deserializer_name, {}) ) except (TypeError, DeserializationError) as exc: ret['result'] = False From 9ec54c47c7c1ec842dfd7d708f367c2c68b26499 Mon Sep 17 00:00:00 2001 From: twangboy Date: Fri, 12 Apr 2019 13:44:49 -0600 Subject: [PATCH 168/340] Add TLS support up to TLS 1.2 --- pkg/windows/modules/download-module.psm1 | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/windows/modules/download-module.psm1 b/pkg/windows/modules/download-module.psm1 index 5c5ea9773605..8135b8da7ef5 100644 --- a/pkg/windows/modules/download-module.psm1 +++ b/pkg/windows/modules/download-module.psm1 @@ -1,3 +1,6 @@ +# Powershell supports only TLS 1.0 by default. Add support up to TLS 1.2 +[System.Net.ServicePointManager]::SecurityProtocol = [System.Net.SecurityProtocolType]'Tls,Tls11,Tls12' + Function DownloadFileWithProgress { # Code for this function borrowed from http://poshcode.org/2461 From b394ad566d0d1c141c8a2415f62ca9329d161f68 Mon Sep 17 00:00:00 2001 From: twangboy Date: Fri, 12 Apr 2019 14:47:22 -0600 Subject: [PATCH 169/340] Bring #52191 into 2019.2 branch --- salt/modules/chocolatey.py | 171 +++++++++++++++++-------------------- 1 file changed, 80 insertions(+), 91 deletions(-) diff --git a/salt/modules/chocolatey.py b/salt/modules/chocolatey.py index 3547e8d68bd8..de2575a7661e 100644 --- a/salt/modules/chocolatey.py +++ b/salt/modules/chocolatey.py @@ -9,9 +9,10 @@ # Import python libs import logging -import os.path +import os import re import tempfile +from requests.structures import CaseInsensitiveDict # Import salt libs import salt.utils.data @@ -48,65 +49,72 @@ def __virtual__(): return 'chocolatey' -def _clear_context(context): +def _clear_context(): ''' Clear variables stored in __context__. Run this function when a new version of chocolatey is installed. ''' for var in (x for x in __context__ if x.startswith('chocolatey.')): - context.pop(var) + __context__.pop(var) -def _yes(context): +def _yes(): ''' Returns ['--yes'] if on v0.9.9.0 or later, otherwise returns an empty list ''' if 'chocolatey._yes' in __context__: - return context['chocolatey._yes'] + return __context__['chocolatey._yes'] if _LooseVersion(chocolatey_version()) >= _LooseVersion('0.9.9'): answer = ['--yes'] else: answer = [] - context['chocolatey._yes'] = answer - return answer + __context__['chocolatey._yes'] = answer + return __context__['chocolatey._yes'] -def _no_progress(context): +def _no_progress(): ''' Returns ['--no-progress'] if on v0.10.4 or later, otherwise returns an empty list ''' if 'chocolatey._no_progress' in __context__: - return context['chocolatey._no_progress'] + return __context__['chocolatey._no_progress'] if _LooseVersion(chocolatey_version()) >= _LooseVersion('0.10.4'): answer = ['--no-progress'] else: log.warning('--no-progress unsupported in choco < 0.10.4') answer = [] - context['chocolatey._no_progress'] = answer - return answer + __context__['chocolatey._no_progress'] = answer + return __context__['chocolatey._no_progress'] -def _find_chocolatey(context, salt): +def _find_chocolatey(): ''' Returns the full path to chocolatey.bat on the host. ''' - if 'chocolatey._path' in context: - return context['chocolatey._path'] - choc_defaults = ['C:\\Chocolatey\\bin\\chocolatey.bat', - 'C:\\ProgramData\\Chocolatey\\bin\\chocolatey.exe', ] - - choc_path = salt['cmd.which']('chocolatey.exe') - if not choc_path: - for choc_dir in choc_defaults: - if salt['cmd.has_exec'](choc_dir): - choc_path = choc_dir - if not choc_path: - err = ('Chocolatey not installed. Use chocolatey.bootstrap to ' - 'install the Chocolatey package manager.') - raise CommandExecutionError(err) - context['chocolatey._path'] = choc_path - return choc_path + # Check context + if 'chocolatey._path' in __context__: + return __context__['chocolatey._path'] + + # Check the path + choc_path = __salt__['cmd.which']('chocolatey.exe') + if choc_path: + __context__['chocolatey._path'] = choc_path + return __context__['chocolatey._path'] + + # Check in common locations + choc_defaults = [ + os.path.join(os.environ.get('ProgramData'), 'Chocolatey', 'bin', 'chocolatey.exe'), + os.path.join(os.environ.get('SystemDrive'), 'Chocolatey', 'bin', 'chocolatey.bat')] + for choc_exe in choc_defaults: + if os.path.isfile(choc_exe): + __context__['chocolatey._path'] = choc_exe + return __context__['chocolatey._path'] + + # Not installed, raise an error + err = ('Chocolatey not installed. Use chocolatey.bootstrap to ' + 'install the Chocolatey package manager.') + raise CommandExecutionError(err) def chocolatey_version(): @@ -122,7 +130,7 @@ def chocolatey_version(): if 'chocolatey._version' in __context__: return __context__['chocolatey._version'] - cmd = [_find_chocolatey(__context__, __salt__)] + cmd = [_find_chocolatey()] cmd.append('-v') out = __salt__['cmd.run'](cmd, python_shell=False) __context__['chocolatey._version'] = out @@ -155,7 +163,7 @@ def bootstrap(force=False): ''' # Check if Chocolatey is already present in the path try: - choc_path = _find_chocolatey(__context__, __salt__) + choc_path = _find_chocolatey() except CommandExecutionError: choc_path = None if choc_path and not force: @@ -219,9 +227,8 @@ def bootstrap(force=False): result = __salt__['cmd.run_all'](cmd, python_shell=True) if result['retcode'] != 0: - raise CommandExecutionError( - 'Bootstrapping Chocolatey failed: {0}'.format(result['stderr']) - ) + err = 'Bootstrapping Chocolatey failed: {0}'.format(result['stderr']) + raise CommandExecutionError(err) return result['stdout'] @@ -271,7 +278,7 @@ def list_(narrow=None, salt '*' chocolatey.list salt '*' chocolatey.list all_versions=True ''' - choc_path = _find_chocolatey(__context__, __salt__) + choc_path = _find_chocolatey() cmd = [choc_path, 'list'] if narrow: cmd.append(narrow) @@ -292,11 +299,10 @@ def list_(narrow=None, result = __salt__['cmd.run_all'](cmd, python_shell=False) if result['retcode'] != 0: - raise CommandExecutionError( - 'Running chocolatey failed: {0}'.format(result['stdout']) - ) + err = 'Running chocolatey failed: {0}'.format(result['stdout']) + raise CommandExecutionError(err) - ret = {} + ret = CaseInsensitiveDict({}) pkg_re = re.compile(r'(\S+)\|(\S+)') for line in result['stdout'].split('\n'): if line.startswith("No packages"): @@ -325,14 +331,13 @@ def list_webpi(): salt '*' chocolatey.list_webpi ''' - choc_path = _find_chocolatey(__context__, __salt__) + choc_path = _find_chocolatey() cmd = [choc_path, 'list', '--source', 'webpi'] result = __salt__['cmd.run_all'](cmd, python_shell=False) if result['retcode'] != 0: - raise CommandExecutionError( - 'Running chocolatey failed: {0}'.format(result['stdout']) - ) + err = 'Running chocolatey failed: {0}'.format(result['stdout']) + raise CommandExecutionError(err) return result['stdout'] @@ -351,14 +356,13 @@ def list_windowsfeatures(): salt '*' chocolatey.list_windowsfeatures ''' - choc_path = _find_chocolatey(__context__, __salt__) + choc_path = _find_chocolatey() cmd = [choc_path, 'list', '--source', 'windowsfeatures'] result = __salt__['cmd.run_all'](cmd, python_shell=False) if result['retcode'] != 0: - raise CommandExecutionError( - 'Running chocolatey failed: {0}'.format(result['stdout']) - ) + err = 'Running chocolatey failed: {0}'.format(result['stdout']) + raise CommandExecutionError(err) return result['stdout'] @@ -449,7 +453,7 @@ def install(name, raise SaltInvocationError( 'Cannot use \'force\' in conjunction with \'allow_multiple\'') - choc_path = _find_chocolatey(__context__, __salt__) + choc_path = _find_chocolatey() # chocolatey helpfully only supports a single package argument # CORRECTION: it also supports multiple package names separated by spaces # but any additional arguments apply to ALL packages specified @@ -476,18 +480,17 @@ def install(name, cmd.extend(['--execution-timeout', execution_timeout]) # Salt doesn't need to see the progress - cmd.extend(_no_progress(__context__)) - cmd.extend(_yes(__context__)) + cmd.extend(_no_progress()) + cmd.extend(_yes()) result = __salt__['cmd.run_all'](cmd, python_shell=False) if result['retcode'] not in [0, 1641, 3010]: - raise CommandExecutionError( - 'Running chocolatey failed: {0}'.format(result['stdout']) - ) + err = 'Running chocolatey failed: {0}'.format(result['stdout']) + raise CommandExecutionError(err) if name == 'chocolatey': - _clear_context(__context__) + _clear_context() return result['stdout'] @@ -587,25 +590,23 @@ def install_missing(name, version=None, source=None): salt '*' chocolatey.install_missing salt '*' chocolatey.install_missing version= ''' - choc_path = _find_chocolatey(__context__, __salt__) if _LooseVersion(chocolatey_version()) >= _LooseVersion('0.9.8.24'): log.warning('installmissing is deprecated, using install') return install(name, version=version) # chocolatey helpfully only supports a single package argument - cmd = [choc_path, 'installmissing', name] + cmd = [_find_chocolatey(), 'installmissing', name] if version: cmd.extend(['--version', version]) if source: cmd.extend(['--source', source]) # Shouldn't need this as this code should never run on v0.9.9 and newer - cmd.extend(_yes(__context__)) + cmd.extend(_yes()) result = __salt__['cmd.run_all'](cmd, python_shell=False) if result['retcode'] != 0: - raise CommandExecutionError( - 'Running chocolatey failed: {0}'.format(result['stdout']) - ) + err = 'Running chocolatey failed: {0}'.format(result['stdout']) + raise CommandExecutionError(err) return result['stdout'] @@ -723,22 +724,20 @@ def uninstall(name, version=None, uninstall_args=None, override_args=False): salt '*' chocolatey.uninstall version= salt '*' chocolatey.uninstall version= uninstall_args= override_args=True ''' - choc_path = _find_chocolatey(__context__, __salt__) # chocolatey helpfully only supports a single package argument - cmd = [choc_path, 'uninstall', name] + cmd = [_find_chocolatey(), 'uninstall', name] if version: cmd.extend(['--version', version]) if uninstall_args: cmd.extend(['--uninstallarguments', uninstall_args]) if override_args: cmd.extend(['--overridearguments']) - cmd.extend(_yes(__context__)) + cmd.extend(_yes()) result = __salt__['cmd.run_all'](cmd, python_shell=False) if result['retcode'] not in [0, 1605, 1614, 1641]: - raise CommandExecutionError( - 'Running chocolatey failed: {0}'.format(result['stdout']) - ) + err = 'Running chocolatey failed: {0}'.format(result['stdout']) + raise CommandExecutionError(err) return result['stdout'] @@ -805,8 +804,7 @@ def upgrade(name, salt "*" chocolatey.upgrade pre_versions=True ''' # chocolatey helpfully only supports a single package argument - choc_path = _find_chocolatey(__context__, __salt__) - cmd = [choc_path, 'upgrade', name] + cmd = [_find_chocolatey(), 'upgrade', name] if version: cmd.extend(['--version', version]) if source: @@ -825,15 +823,14 @@ def upgrade(name, cmd.extend(['--packageparameters', package_args]) # Salt doesn't need to see the progress - cmd.extend(_no_progress(__context__)) - cmd.extend(_yes(__context__)) + cmd.extend(_no_progress()) + cmd.extend(_yes()) result = __salt__['cmd.run_all'](cmd, python_shell=False) if result['retcode'] not in [0, 1641, 3010]: - raise CommandExecutionError( - 'Running chocolatey failed: {0}'.format(result['stdout']) - ) + err = 'Running chocolatey failed: {0}'.format(result['stdout']) + raise CommandExecutionError(err) return result['stdout'] @@ -861,27 +858,25 @@ def update(name, source=None, pre_versions=False): salt "*" chocolatey.update pre_versions=True ''' # chocolatey helpfully only supports a single package argument - choc_path = _find_chocolatey(__context__, __salt__) if _LooseVersion(chocolatey_version()) >= _LooseVersion('0.9.8.24'): log.warning('update is deprecated, using upgrade') return upgrade(name, source=source, pre_versions=pre_versions) - cmd = [choc_path, 'update', name] + cmd = [_find_chocolatey(), 'update', name] if source: cmd.extend(['--source', source]) if salt.utils.data.is_true(pre_versions): cmd.append('--prerelease') # Salt doesn't need to see the progress - cmd.extend(_no_progress(__context__)) - cmd.extend(_yes(__context__)) + cmd.extend(_no_progress()) + cmd.extend(_yes()) result = __salt__['cmd.run_all'](cmd, python_shell=False) if result['retcode'] not in [0, 1641, 3010]: - raise CommandExecutionError( - 'Running chocolatey failed: {0}'.format(result['stdout']) - ) + err = 'Running chocolatey failed: {0}'.format(result['stdout']) + raise CommandExecutionError(err) return result['stdout'] @@ -919,7 +914,6 @@ def version(name, check_remote=False, source=None, pre_versions=False): salt "*" chocolatey.version check_remote=True ''' installed = list_(narrow=name, local_only=True) - installed = {k.lower(): v for k, v in installed.items()} packages = {} lower_name = name.lower() @@ -929,7 +923,6 @@ def version(name, check_remote=False, source=None, pre_versions=False): if check_remote: available = list_(narrow=name, pre_versions=pre_versions, source=source) - available = {k.lower(): v for k, v in available.items()} for pkg in packages: packages[pkg] = {'installed': installed[pkg], @@ -964,8 +957,7 @@ def add_source(name, source_location, username=None, password=None): salt '*' chocolatey.add_source user= password= ''' - choc_path = _find_chocolatey(__context__, __salt__) - cmd = [choc_path, 'sources', 'add', '--name', name, '--source', source_location] + cmd = [_find_chocolatey(), 'sources', 'add', '--name', name, '--source', source_location] if username: cmd.extend(['--user', username]) if password: @@ -973,9 +965,8 @@ def add_source(name, source_location, username=None, password=None): result = __salt__['cmd.run_all'](cmd, python_shell=False) if result['retcode'] != 0: - raise CommandExecutionError( - 'Running chocolatey failed: {0}'.format(result['stdout']) - ) + err = 'Running chocolatey failed: {0}'.format(result['stdout']) + raise CommandExecutionError(err) return result['stdout'] @@ -991,14 +982,12 @@ def _change_source_state(name, state): State in which you want the chocolatey repository. ''' - choc_path = _find_chocolatey(__context__, __salt__) - cmd = [choc_path, 'source', state, '--name', name] + cmd = [_find_chocolatey(), 'source', state, '--name', name] result = __salt__['cmd.run_all'](cmd, python_shell=False) if result['retcode'] != 0: - raise CommandExecutionError( - 'Running chocolatey failed: {0}'.format(result['stdout']) - ) + err = 'Running chocolatey failed: {0}'.format(result['stdout']) + raise CommandExecutionError(err) return result['stdout'] From c70772c5a0cfd26520708d56a4a2cfbcfddb3504 Mon Sep 17 00:00:00 2001 From: Ch3LL Date: Fri, 12 Apr 2019 19:50:52 -0400 Subject: [PATCH 170/340] Fix linux state tests- add new comment --- tests/integration/modules/test_state.py | 8 ++++---- tests/integration/ssh/test_state.py | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/integration/modules/test_state.py b/tests/integration/modules/test_state.py index e26d4cf0addb..6e431065af55 100644 --- a/tests/integration/modules/test_state.py +++ b/tests/integration/modules/test_state.py @@ -1911,12 +1911,12 @@ def test_state_sls_id_test(self): ''' self._add_runtime_pillar(pillar={'test': True}) testfile = os.path.join(TMP, 'testfile') - comment = 'The file {0} is set to be changed'.format(testfile) + comment = 'The file {0} is set to be changed\nNote: No changes made, actual changes may\nbe different due to other states.'.format(testfile) ret = self.run_function('state.sls', ['core']) for key, val in ret.items(): self.assertEqual(val['comment'], comment) - self.assertEqual(val['changes'], {}) + self.assertEqual(val['changes'], {'newfile': testfile}) def test_state_sls_id_test_state_test_post_run(self): ''' @@ -1948,8 +1948,8 @@ def test_state_sls_id_test_true(self): for key, val in ret.items(): self.assertEqual( val['comment'], - 'The file {0} is set to be changed'.format(file_name)) - self.assertEqual(val['changes'], {}) + 'The file {0} is set to be changed\nNote: No changes made, actual changes may\nbe different due to other states.'.format(file_name)) + self.assertEqual(val['changes'], {'newfile': file_name}) def test_state_sls_id_test_true_post_run(self): ''' diff --git a/tests/integration/ssh/test_state.py b/tests/integration/ssh/test_state.py index 5cc243cc50d1..263ec7347a06 100644 --- a/tests/integration/ssh/test_state.py +++ b/tests/integration/ssh/test_state.py @@ -62,7 +62,7 @@ def test_state_sls_id(self): ret = self.run_function('state.sls_id', ['ssh-file-test', SSH_SLS, 'test=True']) self._check_dict_ret(ret=ret, val='comment', - exp_ret='The file /tmp/test is set to be changed') + exp_ret='The file /tmp/test is set to be changed\nNote: No changes made, actual changes may\nbe different due to other states.') # check state.sls_id without test=True ret = self.run_function('state.sls_id', ['ssh-file-test', SSH_SLS]) From 795a7f67daa07da8bbb3c786b05b079d22a32df6 Mon Sep 17 00:00:00 2001 From: Ch3LL Date: Fri, 12 Apr 2019 21:32:33 -0400 Subject: [PATCH 171/340] Fix windows tests - increase timeout --- tests/integration/client/test_kwarg.py | 5 +++++ tests/integration/client/test_standard.py | 6 +++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/tests/integration/client/test_kwarg.py b/tests/integration/client/test_kwarg.py index bb30ccb63d9e..bc190fa35cae 100644 --- a/tests/integration/client/test_kwarg.py +++ b/tests/integration/client/test_kwarg.py @@ -8,12 +8,16 @@ # Import 3rd-party libs from salt.ext import six +import salt.utils.platform class StdTest(ModuleCase): ''' Test standard client calls ''' + def setUp(self): + self.TIMEOUT = 600 if salt.utils.platform.is_windows() else 10 + def test_cli(self): ''' Test cli function @@ -84,6 +88,7 @@ def test_kwarg_type(self): 'minion', 'test.arg_type', ['a', 1], + timeout=self.TIMEOUT, kwarg={'outer': {'a': terrible_yaml_string}, 'inner': 'value'} ) diff --git a/tests/integration/client/test_standard.py b/tests/integration/client/test_standard.py index bf108e0442dd..8118036a2edf 100644 --- a/tests/integration/client/test_standard.py +++ b/tests/integration/client/test_standard.py @@ -9,12 +9,15 @@ # Import salt libs import salt.utils.files +import salt.utils.platform class StdTest(ModuleCase): ''' Test standard client calls ''' + def setUp(self): + self.TIMEOUT = 600 if salt.utils.platform.is_windows() else 10 def test_cli(self): ''' @@ -159,7 +162,8 @@ def test_missing_minion_list(self): ret = self.client.cmd( 'minion,ghostminion', 'test.ping', - tgt_type='list' + tgt_type='list', + timeout=self.TIMEOUT ) self.assertIn('minion', ret) self.assertIn('ghostminion', ret) From e3a031888e4091e1e6a53ecd906b383771c546fb Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Sat, 13 Apr 2019 20:50:08 +0000 Subject: [PATCH 172/340] Fix integration.client.test_kwarg test timeouts --- tests/integration/client/test_kwarg.py | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/tests/integration/client/test_kwarg.py b/tests/integration/client/test_kwarg.py index bb30ccb63d9e..a82d98da0e72 100644 --- a/tests/integration/client/test_kwarg.py +++ b/tests/integration/client/test_kwarg.py @@ -10,6 +10,9 @@ from salt.ext import six +TIMEOUT = 600 + + class StdTest(ModuleCase): ''' Test standard client calls @@ -81,12 +84,15 @@ def test_kwarg_type(self): ''' terrible_yaml_string = 'foo: ""\n# \'' ret = self.client.cmd_full_return( - 'minion', - 'test.arg_type', - ['a', 1], - kwarg={'outer': {'a': terrible_yaml_string}, - 'inner': 'value'} - ) + 'minion', + 'test.arg_type', + ['a', 1], + kwarg={ + 'outer': {'a': terrible_yaml_string}, + 'inner': 'value' + }, + timeout=TIMEOUT, + ) data = ret['minion']['ret'] self.assertIn(six.text_type.__name__, data['args'][0]) self.assertIn('int', data['args'][1]) @@ -94,7 +100,9 @@ def test_kwarg_type(self): self.assertIn(six.text_type.__name__, data['kwargs']['inner']) def test_full_return_kwarg(self): - ret = self.client.cmd('minion', 'test.ping', full_return=True) + ret = self.client.cmd( + 'minion', 'test.ping', full_return=True, timeout=TIMEOUT, + ) for mid, data in ret.items(): self.assertIn('retcode', data) @@ -107,7 +115,9 @@ def test_cmd_arg_kwarg_parsing(self): ], kwarg={ 'quux': 'Quux', - }) + }, + timeout=TIMEOUT, + ) self.assertEqual(ret['minion'], { 'args': ['foo'], From 4d2625a7a07b3064333ca859d4896d5a2cfb0145 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Sat, 13 Apr 2019 21:30:59 +0000 Subject: [PATCH 173/340] Fix windows platform grains Fix unit.grains.test_core.CoreGrainsTestCase.test__windows_platform_data by removing the 'domain' grain since it is not included on any branch --- tests/unit/grains/test_core.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py index 5bd22ce7d1c5..b2f784b14fa7 100644 --- a/tests/unit/grains/test_core.py +++ b/tests/unit/grains/test_core.py @@ -138,7 +138,6 @@ def test__windows_platform_data(self): grains = core._windows_platform_data() keys = ['biosversion', 'osrelease', - 'domain', 'kernelrelease', 'motherboard', 'serialnumber', From ea0520c8e3eedec2a835d06419db6fb10b84538d Mon Sep 17 00:00:00 2001 From: lomeroe Date: Sun, 14 Apr 2019 10:49:50 -0500 Subject: [PATCH 174/340] update test for serialization opts --- tests/integration/states/test_file.py | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/tests/integration/states/test_file.py b/tests/integration/states/test_file.py index fa75d14b6c0b..ac444504a17d 100644 --- a/tests/integration/states/test_file.py +++ b/tests/integration/states/test_file.py @@ -42,6 +42,7 @@ import salt.utils.path import salt.utils.platform import salt.utils.stringutils +import salt.serializers.configparser from salt.utils.versions import LooseVersion as _LooseVersion HAS_PWD = True @@ -1710,16 +1711,16 @@ def test_serializer_deserializer_opts(self, name): ''' Test the serializer_opts and deserializer_opts options ''' - data1 = {'foo': {'bar': 'baz'}} + data1 = {'foo': {'bar': '%(x)s'}} data2 = {'foo': {'abc': 123}} - merged = {'foo': {'bar': 'baz', 'abc': 123}} + merged = {'foo': {'y': 'not_used', 'x': 'baz', 'abc': 123, 'bar': u'baz'}} ret = self.run_state( 'file.serialize', name=name, dataset=data1, - formatter='json', - deserializer_opts=[{'encoding': 'latin-1'}]) + formatter='configparser', + deserializer_opts=[{'defaults': {'y': 'not_used'}}]) ret = ret[next(iter(ret))] assert ret['result'], ret # We should have warned about deserializer_opts being used when @@ -1727,25 +1728,31 @@ def test_serializer_deserializer_opts(self, name): assert 'warnings' in ret # Run with merge_if_exists, as well as serializer and deserializer opts + # deserializer opts will be used for string interpolation of the %(x)s + # that was written to the file with data1 (i.e. bar should become baz) ret = self.run_state( 'file.serialize', name=name, dataset=data2, - formatter='json', + formatter='configparser', merge_if_exists=True, - serializer_opts=[{'indent': 8}], - deserializer_opts=[{'encoding': 'latin-1'}]) + serializer_opts=[{'defaults': {'y': 'not_used'}}], + deserializer_opts=[{'defaults': {'x': 'baz'}}]) ret = ret[next(iter(ret))] assert ret['result'], ret with salt.utils.files.fopen(name) as fp_: - serialized_data = salt.utils.json.load(fp_) + serialized_data = salt.serializers.configparser.deserialize(fp_) # If this test fails, this debug logging will help tell us how the # serialized data differs from what was serialized. log.debug('serialized_data = %r', serialized_data) log.debug('merged = %r', merged) - assert serialized_data == merged + # serializing with a default of 'y' will add y = not_used into foo + assert serialized_data['foo']['y'] == merged['foo']['y'] + # deserializing with default of x = baz will perform interpolation on %(x)s + # and bar will then = baz + assert serialized_data['foo']['bar'] == merged['foo']['bar'] @with_tempdir() def test_replace_issue_18841_omit_backup(self, base_dir): From 6ca8f4b978ad0f28f114b69c61cde215b5057fdd Mon Sep 17 00:00:00 2001 From: Jeroen Schutrup Date: Sun, 12 Aug 2018 19:43:22 +0200 Subject: [PATCH 175/340] Try/except undefineFlags() as this operation is not supported on bhyve (cherry picked from commit 29a44aceb1a73347ac07dd241b4a64a4a38cef6e) --- salt/modules/virt.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/salt/modules/virt.py b/salt/modules/virt.py index 50df89dbd244..8d63b6ff4c8b 100644 --- a/salt/modules/virt.py +++ b/salt/modules/virt.py @@ -3189,7 +3189,10 @@ def purge(vm_, dirs=False, removables=None, **kwargs): shutil.rmtree(dir_) if getattr(libvirt, 'VIR_DOMAIN_UNDEFINE_NVRAM', False): # This one is only in 1.2.8+ - dom.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_NVRAM) + try: + dom.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_NVRAM) + except Exception: + dom.undefine() else: dom.undefine() conn.close() From 474efa1ce42c6c29c4ffd4ac733faabaab6f0ac0 Mon Sep 17 00:00:00 2001 From: Ch3LL Date: Mon, 15 Apr 2019 12:14:55 -0400 Subject: [PATCH 176/340] Fix pylint and state test failure --- salt/modules/chocolatey.py | 1 - salt/states/file.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/salt/modules/chocolatey.py b/salt/modules/chocolatey.py index 6a1dc7e1db0d..9d98a47e8993 100644 --- a/salt/modules/chocolatey.py +++ b/salt/modules/chocolatey.py @@ -298,7 +298,6 @@ def list_(narrow=None, result = __salt__['cmd.run_all'](cmd, python_shell=False) - # Chocolatey introduced Enhanced Exit Codes starting with version 0.10.12 # Exit Code 2 means there were no results, but is not a failure # This may start to effect other functions in the future as Chocolatey diff --git a/salt/states/file.py b/salt/states/file.py index 81fe9e3eb8c4..0a790e281738 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -2775,7 +2775,7 @@ def managed(name, reset=win_perms_reset) except CommandExecutionError as exc: if exc.strerror.startswith('Path not found'): - ret['changes'] = {name: 'will be created'} + ret['changes']['newfile'] = name if isinstance(ret['changes'], tuple): ret['result'], ret['comment'] = ret['changes'] From 28af7170789c4c1db8e4c4fb6bb11923604fa871 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Mon, 15 Apr 2019 09:22:58 -0700 Subject: [PATCH 177/340] Marking the two tests in test_gce as flaky. --- tests/integration/cloud/clouds/test_gce.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/integration/cloud/clouds/test_gce.py b/tests/integration/cloud/clouds/test_gce.py index 6a02ab26f278..26ed517eb462 100644 --- a/tests/integration/cloud/clouds/test_gce.py +++ b/tests/integration/cloud/clouds/test_gce.py @@ -14,7 +14,7 @@ # Import Salt Testing Libs from tests.support.case import ShellCase from tests.support.paths import FILES -from tests.support.helpers import expensiveTest, generate_random_name +from tests.support.helpers import expensiveTest, generate_random_name, flaky TIMEOUT = 500 @@ -72,6 +72,7 @@ def setUp(self): .format(provider) ) + @flaky def test_instance(self): ''' Tests creating and deleting an instance on GCE @@ -100,6 +101,7 @@ def test_instance(self): except AssertionError: raise + @flaky def test_instance_extra(self): ''' Tests creating and deleting an instance on GCE From eba45074ce52ced24d0a6efa6f1ae375ac7a3f85 Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 15 Apr 2019 10:42:43 -0600 Subject: [PATCH 178/340] Bring 52170 into 2019.2 Mostly documentation fixes --- salt/modules/win_task.py | 1118 ++++++++++++++++++++++---------------- 1 file changed, 640 insertions(+), 478 deletions(-) diff --git a/salt/modules/win_task.py b/salt/modules/win_task.py index 50450e906fee..d28d02909e3a 100644 --- a/salt/modules/win_task.py +++ b/salt/modules/win_task.py @@ -10,16 +10,16 @@ You can list all tasks, folders, triggers, and actions. ''' # Import Python libs -from __future__ import absolute_import, unicode_literals, print_function +from __future__ import absolute_import, print_function, unicode_literals +from datetime import datetime import logging import time -from datetime import datetime # Import Salt libs import salt.utils.platform import salt.utils.winapi -# Import 3rd-party libraries +# Import 3rd Party Libraries try: import pythoncom import win32com.client @@ -321,18 +321,25 @@ def list_tasks(location='\\'): r''' List all tasks located in a specific location in the task scheduler. - :param str location: A string value representing the folder from which you - want to list tasks. Default is '\\' which is the root for the task - scheduler (C:\Windows\System32\tasks). + Args: + + location (str): + A string value representing the folder from which you want to list + tasks. Default is '\' which is the root for the task scheduler + (C:\Windows\System32\tasks). - :return: Returns a list of tasks. - :rtype: list + Returns: + list: Returns a list of tasks CLI Example: .. code-block:: bash + # List all tasks in the default location salt 'minion-id' task.list_tasks + + # List all tasks in the Microsoft\XblGameSave Directory + salt 'minion-id' task.list_tasks Microsoft\XblGameSave ''' # Create the task service object with salt.utils.winapi.Com(): @@ -354,18 +361,25 @@ def list_folders(location='\\'): r''' List all folders located in a specific location in the task scheduler. - :param str location: A string value representing the folder from which you - want to list tasks. Default is '\\' which is the root for the task - scheduler (C:\Windows\System32\tasks). + Args: + + location (str): + A string value representing the folder from which you want to list + tasks. Default is '\' which is the root for the task scheduler + (C:\Windows\System32\tasks). - :return: Returns a list of folders. - :rtype: list + Returns: + list: Returns a list of folders. CLI Example: .. code-block:: bash + # List all folders in the default location salt 'minion-id' task.list_folders + + # List all folders in the Microsoft directory + salt 'minion-id' task.list_folders Microsoft ''' # Create the task service object with salt.utils.winapi.Com(): @@ -387,20 +401,28 @@ def list_triggers(name, location='\\'): r''' List all triggers that pertain to a task in the specified location. - :param str name: The name of the task for which list triggers. + Args: + + name (str): + The name of the task for which list triggers. - :param str location: A string value representing the location of the task - from which to list triggers. Default is '\\' which is the root for the - task scheduler (C:\Windows\System32\tasks). + location (str): A string value representing the location of the task + from which to list triggers. Default is '\' which is the root for + the task scheduler (C:\Windows\System32\tasks). - :return: Returns a list of triggers. - :rtype: list + Returns: + list: Returns a list of triggers. CLI Example: .. code-block:: bash + # List all triggers for a task in the default location salt 'minion-id' task.list_triggers + + # List all triggers for the XblGameSaveTask in the Microsoft\XblGameSave + # location + salt 'minion-id' task.list_triggers XblGameSaveTask Microsoft\XblGameSave ''' # Create the task service object with salt.utils.winapi.Com(): @@ -423,20 +445,29 @@ def list_actions(name, location='\\'): r''' List all actions that pertain to a task in the specified location. - :param str name: The name of the task for which list actions. + Args: - :param str location: A string value representing the location of the task - from which to list actions. Default is '\\' which is the root for the - task scheduler (C:\Windows\System32\tasks). + name (str): + The name of the task for which list actions. - :return: Returns a list of actions. - :rtype: list + location (str): + A string value representing the location of the task from which to + list actions. Default is '\' which is the root for the task + scheduler (C:\Windows\System32\tasks). + + Returns: + list: Returns a list of actions. CLI Example: .. code-block:: bash + # List all actions for a task in the default location salt 'minion-id' task.list_actions + + # List all actions for the XblGameSaveTask in the Microsoft\XblGameSave + # location + salt 'minion-id' task.list_actions XblGameSaveTask Microsoft\XblGameSave ''' # Create the task service object with salt.utils.winapi.Com(): @@ -469,25 +500,30 @@ def create_task(name, - :py:func:`add_action` - :py:func:`add_trigger` - :param str name: The name of the task. This will be displayed in the task - scheduler. + Args: - :param str location: A string value representing the location in which to - create the task. Default is '\\' which is the root for the task - scheduler (C:\Windows\System32\tasks). + name (str): + The name of the task. This will be displayed in the task scheduler. - :param str user_name: The user account under which to run the task. To - specify the 'System' account, use 'System'. The password will be - ignored. + location (str): + A string value representing the location in which to create the + task. Default is '\' which is the root for the task scheduler + (C:\Windows\System32\tasks). - :param str password: The password to use for authentication. This should set - the task to run whether the user is logged in or not, but is currently - not working. + user_name (str): + The user account under which to run the task. To specify the + 'System' account, use 'System'. The password will be ignored. - :param bool force: If the task exists, overwrite the existing task. + password (str): + The password to use for authentication. This should set the task to + run whether the user is logged in or not, but is currently not + working. - :return: True if successful, False if unsuccessful - :rtype: bool + force (bool): + If the task exists, overwrite the existing task. + + Returns: + bool: ``True`` if successful, otherwise ``False`` CLI Example: @@ -547,29 +583,35 @@ def create_task_from_xml(name, r''' Create a task based on XML. Source can be a file or a string of XML. - :param str name: The name of the task. This will be displayed in the task - scheduler. + Args: - :param str location: A string value representing the location in which to - create the task. Default is '\\' which is the root for the task - scheduler (C:\Windows\System32\tasks). + name (str): + The name of the task. This will be displayed in the task scheduler. - :param str xml_text: A string of xml representing the task to be created. - This will be overridden by `xml_path` if passed. + location (str): + A string value representing the location in which to create the + task. Default is '\' which is the root for the task scheduler + (C:\Windows\System32\tasks). - :param str xml_path: The path to an XML file on the local system containing - the xml that defines the task. This will override `xml_text` + xml_text (str): + A string of xml representing the task to be created. This will be + overridden by `xml_path` if passed. - :param str user_name: The user account under which to run the task. To - specify the 'System' account, use 'System'. The password will be - ignored. + xml_path (str): + The path to an XML file on the local system containing the xml that + defines the task. This will override `xml_text` - :param str password: The password to use for authentication. This should set - the task to run whether the user is logged in or not, but is currently - not working. + user_name (str): + The user account under which to run the task. To specify the + 'System' account, use 'System'. The password will be ignored. - :return: True if successful, False if unsuccessful - :rtype: bool + password (str): + The password to use for authentication. This should set the task to + run whether the user is logged in or not, but is currently not + working. + + Returns: + bool: ``True`` if successful, otherwise ``False`` CLI Example: @@ -631,7 +673,7 @@ def create_task_from_xml(name, except KeyError: failure_code = 'Unknown Failure: {0}'.format(error) - log.debug('Failed to create task: %s', failure_code) + log.debug('Failed to create task: {0}'.format(failure_code)) # Verify creation if name in list_tasks(location): @@ -644,15 +686,19 @@ def create_folder(name, location='\\'): r''' Create a folder in which to create tasks. - :param str name: The name of the folder. This will be displayed in the task - scheduler. + Args: - :param str location: A string value representing the location in which to - create the folder. Default is '\\' which is the root for the task - scheduler (C:\Windows\System32\tasks). + name (str): + The name of the folder. This will be displayed in the task + scheduler. - :return: True if successful, False if unsuccessful - :rtype: bool + location (str): + A string value representing the location in which to create the + folder. Default is '\' which is the root for the task scheduler + (C:\Windows\System32\tasks). + + Returns: + bool: ``True`` if successful, otherwise ``False`` CLI Example: @@ -714,148 +760,167 @@ def edit_task(name=None, r''' Edit the parameters of a task. Triggers and Actions cannot be edited yet. - :param str name: The name of the task. This will be displayed in the task - scheduler. - - :param str location: A string value representing the location in which to - create the task. Default is '\\' which is the root for the task - scheduler (C:\Windows\System32\tasks). - - :param str user_name: The user account under which to run the task. To - specify the 'System' account, use 'System'. The password will be - ignored. - - :param str password: The password to use for authentication. This should set - the task to run whether the user is logged in or not, but is currently - not working. - - .. note:: - The combination of user_name and password determine how the task runs. - For example, if a username is passed without at password the task will - only run when the user is logged in. If a password is passed as well - the task will run whether the user is logged on or not. If you pass - 'System' as the username the task will run as the system account (the - password parameter is ignored. - - :param str description: A string representing the text that will be - displayed in the description field in the task scheduler. - - :param bool enabled: A boolean value representing whether or not the task is - enabled. - - :param bool hidden: A boolean value representing whether or not the task is - hidden. - - :param bool run_if_idle: Boolean value that indicates that the Task - Scheduler will run the task only if the computer is in an idle state. - - :param str idle_duration: A value that indicates the amount of time that the - computer must be in an idle state before the task is run. Valid values - are: - - - 1 minute - - 5 minutes - - 10 minutes - - 15 minutes - - 30 minutes - - 1 hour - - :param str idle_wait_timeout: A value that indicates the amount of time that - the Task Scheduler will wait for an idle condition to occur. Valid - values are: - - - Do not wait - - 1 minute - - 5 minutes - - 10 minutes - - 15 minutes - - 30 minutes - - 1 hour - - 2 hours - - :param bool idle_stop_on_end: Boolean value that indicates that the Task - Scheduler will terminate the task if the idle condition ends before the - task is completed. - - :param bool idle_restart: Boolean value that indicates whether the task is - restarted when the computer cycles into an idle condition more than - once. - - :param bool ac_only: Boolean value that indicates that the Task Scheduler - will launch the task only while on AC power. - - :param bool stop_if_on_batteries: Boolean value that indicates that the task - will be stopped if the computer begins to run on battery power. - - :param bool wake_to_run: Boolean value that indicates that the Task - Scheduler will wake the computer when it is time to run the task. - - :param bool run_if_network: Boolean value that indicates that the Task - Scheduler will run the task only when a network is available. - - :param guid network_id: GUID value that identifies a network profile. - - :param str network_name: Sets the name of a network profile. The name is - used for display purposes. - - :param bool allow_demand_start: Boolean value that indicates that the task - can be started by using either the Run command or the Context menu. - - :param bool start_when_available: Boolean value that indicates that the Task - Scheduler can start the task at any time after its scheduled time has - passed. - - :param restart_every: A value that specifies the interval between task - restart attempts. Valid values are: - - - False (to disable) - - 1 minute - - 5 minutes - - 10 minutes - - 15 minutes - - 30 minutes - - 1 hour - - 2 hours - - :param int restart_count: The number of times the Task Scheduler will - attempt to restart the task. Valid values are integers 1 - 999. - - :param execution_time_limit: The amount of time allowed to complete the - task. Valid values are: - - - False (to disable) - - 1 hour - - 2 hours - - 4 hours - - 8 hours - - 12 hours - - 1 day - - 3 days - - :param bool force_stop: Boolean value that indicates that the task may be - terminated by using TerminateProcess. - - :param delete_after: The amount of time that the Task Scheduler will - wait before deleting the task after it expires. Requires a trigger with - an expiration date. Valid values are: - - - False (to disable) - - Immediately - - 30 days - - 90 days - - 180 days - - 365 days - - :param str multiple_instances: Sets the policy that defines how the Task - Scheduler deals with multiple instances of the task. Valid values are: - - - Parallel - - Queue - - No New Instance - - Stop Existing - - :return: True if successful, False if unsuccessful - :rtype: bool + Args: + + name (str): + The name of the task. This will be displayed in the task scheduler. + + location (str): + A string value representing the location in which to create the + task. Default is '\' which is the root for the task scheduler + (C:\Windows\System32\tasks). + + user_name (str): + The user account under which to run the task. To specify the + 'System' account, use 'System'. The password will be ignored. + + password (str): + The password to use for authentication. This should set the task to + run whether the user is logged in or not, but is currently not + working. + + .. note:: + The combination of user_name and password determine how the + task runs. For example, if a username is passed without at + password the task will only run when the user is logged in. If a + password is passed as well the task will run whether the user is + logged on or not. If you pass 'System' as the username the task + will run as the system account (the password parameter is + ignored). + + description (str): + A string representing the text that will be displayed in the + description field in the task scheduler. + + enabled (bool): + A boolean value representing whether or not the task is enabled. + + hidden (bool): + A boolean value representing whether or not the task is hidden. + + run_if_idle (bool): + Boolean value that indicates that the Task Scheduler will run the + task only if the computer is in an idle state. + + idle_duration (str): + A value that indicates the amount of time that the computer must be + in an idle state before the task is run. Valid values are: + + - 1 minute + - 5 minutes + - 10 minutes + - 15 minutes + - 30 minutes + - 1 hour + + idle_wait_timeout (str): + A value that indicates the amount of time that the Task Scheduler + will wait for an idle condition to occur. Valid values are: + + - Do not wait + - 1 minute + - 5 minutes + - 10 minutes + - 15 minutes + - 30 minutes + - 1 hour + - 2 hours + + idle_stop_on_end (bool): + Boolean value that indicates that the Task Scheduler will terminate + the task if the idle condition ends before the task is completed. + + idle_restart (bool): + Boolean value that indicates whether the task is restarted when the + computer cycles into an idle condition more than once. + + ac_only (bool): + Boolean value that indicates that the Task Scheduler will launch the + task only while on AC power. + + stop_if_on_batteries (bool): + Boolean value that indicates that the task will be stopped if the + computer begins to run on battery power. + + wake_to_run (bool): + Boolean value that indicates that the Task Scheduler will wake the + computer when it is time to run the task. + + run_if_network (bool): + Boolean value that indicates that the Task Scheduler will run the + task only when a network is available. + + network_id (guid): + GUID value that identifies a network profile. + + network_name (str): + Sets the name of a network profile. The name is used for display + purposes. + + allow_demand_start (bool): + Boolean value that indicates that the task can be started by using + either the Run command or the Context menu. + + start_when_available (bool): + Boolean value that indicates that the Task Scheduler can start the + task at any time after its scheduled time has passed. + + restart_every (str): + A value that specifies the interval between task restart attempts. + Valid values are: + + - False (to disable) + - 1 minute + - 5 minutes + - 10 minutes + - 15 minutes + - 30 minutes + - 1 hour + - 2 hours + + restart_count (int): + The number of times the Task Scheduler will attempt to restart the + task. Valid values are integers 1 - 999. + + execution_time_limit (bool, str): + The amount of time allowed to complete the task. Valid values are: + + - False (to disable) + - 1 hour + - 2 hours + - 4 hours + - 8 hours + - 12 hours + - 1 day + - 3 days + + force_stop (bool): + Boolean value that indicates that the task may be terminated by + using TerminateProcess. + + delete_after (bool, str): + The amount of time that the Task Scheduler will wait before deleting + the task after it expires. Requires a trigger with an expiration + date. Valid values are: + + - False (to disable) + - Immediately + - 30 days + - 90 days + - 180 days + - 365 days + + multiple_instances (str): + Sets the policy that defines how the Task Scheduler deals with + multiple instances of the task. Valid values are: + + - Parallel + - Queue + - No New Instance + - Stop Existing + + Returns: + bool: ``True`` if successful, otherwise ``False`` CLI Example: @@ -1027,14 +1092,17 @@ def delete_task(name, location='\\'): r''' Delete a task from the task scheduler. - :param str name: The name of the task to delete. + Args: + name (str): + The name of the task to delete. - :param str location: A string value representing the location of the task. - Default is '\\' which is the root for the task scheduler - (C:\Windows\System32\tasks). + location (str): + A string value representing the location of the task. Default is + '\' which is the root for the task scheduler + (C:\Windows\System32\tasks). - :return: True if successful, False if unsuccessful - :rtype: bool + Returns: + bool: ``True`` if successful, otherwise ``False`` CLI Example: @@ -1067,14 +1135,18 @@ def delete_folder(name, location='\\'): r''' Delete a folder from the task scheduler. - :param str name: The name of the folder to delete. + Args: - :param str location: A string value representing the location of the - folder. Default is '\\' which is the root for the task scheduler - (C:\Windows\System32\tasks). + name (str): + The name of the folder to delete. - :return: True if successful, False if unsuccessful - :rtype: bool + location (str): + A string value representing the location of the folder. Default is + '\' which is the root for the task scheduler + (C:\Windows\System32\tasks). + + Returns: + bool: ``True`` if successful, otherwise ``False`` CLI Example: @@ -1108,14 +1180,18 @@ def run(name, location='\\'): r''' Run a scheduled task manually. - :param str name: The name of the task to run. + Args: - :param str location: A string value representing the location of the task. - Default is '\\' which is the root for the task scheduler - (C:\Windows\System32\tasks). + name (str): + The name of the task to run. - :return: True if successful, False if unsuccessful - :rtype: bool + location (str): + A string value representing the location of the task. Default is '\' + which is the root for the task scheduler + (C:\Windows\System32\tasks). + + Returns: + bool: ``True`` if successful, otherwise ``False`` CLI Example: @@ -1147,14 +1223,18 @@ def run_wait(name, location='\\'): r''' Run a scheduled task and return when the task finishes - :param str name: The name of the task to run. + Args: - :param str location: A string value representing the location of the task. - Default is '\\' which is the root for the task scheduler - (C:\Windows\System32\tasks). + name (str): + The name of the task to run. - :return: True if successful, False if unsuccessful - :rtype: bool + location (str): + A string value representing the location of the task. Default is '\' + which is the root for the task scheduler + (C:\Windows\System32\tasks). + + Returns: + bool: ``True`` if successful, otherwise ``False`` CLI Example: @@ -1204,14 +1284,18 @@ def stop(name, location='\\'): r''' Stop a scheduled task. - :param str name: The name of the task to stop. + Args: - :param str location: A string value representing the location of the task. - Default is '\\' which is the root for the task scheduler - (C:\Windows\System32\tasks). + name (str): + The name of the task to stop. - :return: True if successful, False if unsuccessful - :rtype: bool + location (str): + A string value representing the location of the task. Default is '\' + which is the root for the task scheduler + (C:\Windows\System32\tasks). + + Returns: + bool: ``True`` if successful, otherwise ``False`` CLI Example: @@ -1243,21 +1327,24 @@ def status(name, location='\\'): r''' Determine the status of a task. Is it Running, Queued, Ready, etc. - :param str name: The name of the task for which to return the status + Args: - :param str location: A string value representing the location of the task. - Default is '\\' which is the root for the task scheduler - (C:\Windows\System32\tasks). + name (str): + The name of the task for which to return the status - :return: The current status of the task. Will be one of the following: + location (str): + A string value representing the location of the task. Default is '\' + which is the root for the task scheduler + (C:\Windows\System32\tasks). - - Unknown - - Disabled - - Queued - - Ready - - Running + Returns: + str: The current status of the task. Will be one of the following: - :rtype: string + - Unknown + - Disabled + - Queued + - Ready + - Running CLI Example: @@ -1285,14 +1372,18 @@ def info(name, location='\\'): r''' Get the details about a task in the task scheduler. - :param str name: The name of the task for which to return the status + Args: - :param str location: A string value representing the location of the task. - Default is '\\' which is the root for the task scheduler - (C:\Windows\System32\tasks). + name (str): + The name of the task for which to return the status + + location (str): + A string value representing the location of the task. Default is '\' + which is the root for the task scheduler + (C:\Windows\System32\tasks). - :return: - :rtype: dict + Returns: + dict: A dictionary containing the task configuration CLI Example: @@ -1423,64 +1514,91 @@ def add_action(name=None, r''' Add an action to a task. - :param str name: The name of the task to which to add the action. + Args: - :param str location: A string value representing the location of the task. - Default is '\\' which is the root for the task scheduler - (C:\Windows\System32\tasks). + name (str): + The name of the task to which to add the action. - :param str action_type: The type of action to add. There are three action - types. Each one requires its own set of Keyword Arguments (kwargs). Valid - values are: + location (str): + A string value representing the location of the task. Default is '\' + which is the root for the task scheduler + (C:\Windows\System32\tasks). - - Execute - - Email - - Message + action_type (str): + The type of action to add. There are three action types. Each one + requires its own set of Keyword Arguments (kwargs). Valid values + are: + + - Execute + - Email + - Message Required arguments for each action_type: **Execute** - Execute a command or an executable - :param str cmd: (required) The command / executable to run. + cmd (str): + (required) The command / executable to run. - :param str arguments: (optional) Arguments to be passed to the command / - executable. To launch a script the first command will need to be the - interpreter for the script. For example, to run a vbscript you would - pass `cscript.exe` in the `cmd` parameter and pass the script in the - `arguments` parameter as follows: + arguments (str): + (optional) Arguments to be passed to the command / executable. To + launch a script the first command will need to be the interpreter + for the script. For example, to run a vbscript you would pass + ``cscript.exe`` in the `cmd` parameter and pass the script in the + ``arguments`` parameter as follows: - - ``cmd='cscript.exe' arguments='c:\scripts\myscript.vbs'`` + - ``cmd='cscript.exe' arguments='c:\scripts\myscript.vbs'`` - Batch files do not need an interpreter and may be passed to the cmd - parameter directly. + Batch files do not need an interpreter and may be passed to the cmd + parameter directly. - :param str start_in: (optional) The current working directory for the - command. + start_in (str): + (optional) The current working directory for the command. **Email** - Send and email. Requires ``server``, ``from``, and ``to`` or ``cc``. - :param str from: The sender - :param str reply_to: Who to reply to - :param str to: The recipient - :param str cc: The CC recipient - :param str bcc: The BCC recipient - :param str subject: The subject of the email - :param str body: The Message Body of the email - :param str server: The server used to send the email - :param list attachments: A list of attachments. These will be the paths to - the files to attach. ie: ``attachments="['C:\attachment1.txt', - 'C:\attachment2.txt']"`` + from (str): + The sender + + reply_to (str): + Who to reply to + + to (str): + The recipient + + cc (str): + The CC recipient + + bcc (str): + The BCC recipient + + subject (str): + The subject of the email + + body (str): + The Message Body of the email + + server (str): + The server used to send the email + + attachments (list): + A list of attachments. These will be the paths to the files to + attach. ie: ``attachments="['C:\attachment1.txt', + 'C:\attachment2.txt']"`` **Message** - Display a dialog box. The task must be set to "Run only when user is logged on" in order for the dialog box to display. Both parameters are required. - :param str title: The dialog box title. - :param str message: The dialog box message body + title (str): + The dialog box title. - :return: True if successful, False if unsuccessful - :rtype: bool + message (str): + The dialog box message body + + Returns: + dict: A dictionary containing the task configuration CLI Example: @@ -1591,7 +1709,7 @@ def _clear_actions(name, location='\\'): :param str name: The name of the task from which to clear all actions. :param str location: A string value representing the location of the task. - Default is '\\' which is the root for the task scheduler + Default is '\' which is the root for the task scheduler (C:\Windows\System32\tasks). :return: True if successful, False if unsuccessful @@ -1644,125 +1762,156 @@ def add_trigger(name=None, delay=None, **kwargs): r''' - :param str name: The name of the task to which to add the trigger. - - :param str location: A string value representing the location of the task. - Default is '\\' which is the root for the task scheduler - (C:\Windows\System32\tasks). - - :param str trigger_type: The type of trigger to create. This is defined - when the trigger is created and cannot be changed later. Options are as - follows: - - - Event - - Once - - Daily - - Weekly - - Monthly - - MonthlyDay - - OnIdle - - OnTaskCreation - - OnBoot - - OnLogon - - OnSessionChange - - :param bool trigger_enabled: Boolean value that indicates whether the - trigger is enabled. - - :param str start_date: The date when the trigger is activated. If no value - is passed, the current date will be used. Can be one of the following - formats: - - - %Y-%m-%d - - %m-%d-%y - - %m-%d-%Y - - %m/%d/%y - - %m/%d/%Y - - %Y/%m/%d - - :param str start_time: The time when the trigger is activated. If no value - is passed, midnight will be used. Can be one of the following formats: - - - %I:%M:%S %p - - %I:%M %p - - %H:%M:%S - - %H:%M - - :param str end_date: The date when the trigger is deactivated. The trigger - cannot start the task after it is deactivated. Can be one of the - following formats: - - - %Y-%m-%d - - %m-%d-%y - - %m-%d-%Y - - %m/%d/%y - - %m/%d/%Y - - %Y/%m/%d - - :param str end_time: The time when the trigger is deactivated. If the this - is not passed with ``end_date`` it will be set to midnight. Can be one - of the following formats: - - - %I:%M:%S %p - - %I:%M %p - - %H:%M:%S - - %H:%M - - :param str random_delay: The delay time that is randomly added to the start - time of the trigger. Valid values are: - - - 30 seconds - - 1 minute - - 30 minutes - - 1 hour - - 8 hours - - 1 day - - :param str repeat_interval: The amount of time between each restart of the - task. Valid values are: - - - 5 minutes - - 10 minutes - - 15 minutes - - 30 minutes - - 1 hour - - :param str repeat_duration: How long the pattern is repeated. Valid values - are: - - - Indefinitely - - 15 minutes - - 30 minutes - - 1 hour - - 12 hours - - 1 day - - :param bool repeat_stop_at_duration_end: Boolean value that indicates if a - running instance of the task is stopped at the end of the repetition - pattern duration. - - :param str execution_time_limit: The maximum amount of time that the task - launched by the trigger is allowed to run. Valid values are: - - - 30 minutes - - 1 hour - - 2 hours - - 4 hours - - 8 hours - - 12 hours - - 1 day - - 3 days (default) - - :param str delay: The time the trigger waits after its activation to start the task. - Valid values are: - - - 15 seconds - - 30 seconds - - 1 minute - - 30 minutes - - 1 hour - - 8 hours - - 1 day + Add a trigger to a Windows Scheduled task + + Args: + + name (str): + The name of the task to which to add the trigger. + + location (str): + A string value representing the location of the task. Default is + '\' which is the root for the task scheduler + (C:\Windows\System32\tasks). + + trigger_type (str): + The type of trigger to create. This is defined when the trigger is + created and cannot be changed later. Options are as follows: + + - Event + - Once + - Daily + - Weekly + - Monthly + - MonthlyDay + - OnIdle + - OnTaskCreation + - OnBoot + - OnLogon + - OnSessionChange + + trigger_enabled (bool): + Boolean value that indicates whether the trigger is enabled. + + start_date (str): + The date when the trigger is activated. If no value is passed, the + current date will be used. Can be one of the following formats: + + - %Y-%m-%d + - %m-%d-%y + - %m-%d-%Y + - %m/%d/%y + - %m/%d/%Y + - %Y/%m/%d + + start_time (str): + The time when the trigger is activated. If no value is passed, + midnight will be used. Can be one of the following formats: + + - %I:%M:%S %p + - %I:%M %p + - %H:%M:%S + - %H:%M + + end_date (str): + The date when the trigger is deactivated. The trigger cannot start + the task after it is deactivated. Can be one of the following + formats: + + - %Y-%m-%d + - %m-%d-%y + - %m-%d-%Y + - %m/%d/%y + - %m/%d/%Y + - %Y/%m/%d + + end_time (str): + The time when the trigger is deactivated. If the this is not passed + with ``end_date`` it will be set to midnight. Can be one of the + following formats: + + - %I:%M:%S %p + - %I:%M %p + - %H:%M:%S + - %H:%M + + random_delay (str): + The delay time that is randomly added to the start time of the + trigger. Valid values are: + + - 30 seconds + - 1 minute + - 30 minutes + - 1 hour + - 8 hours + - 1 day + + .. note:: + This parameter applies to the following trigger types + + - Once + - Daily + - Weekly + - Monthly + - MonthlyDay + + repeat_interval (str): + The amount of time between each restart of the task. Valid values + are: + + - 5 minutes + - 10 minutes + - 15 minutes + - 30 minutes + - 1 hour + + repeat_duration (str): + How long the pattern is repeated. Valid values are: + + - Indefinitely + - 15 minutes + - 30 minutes + - 1 hour + - 12 hours + - 1 day + + repeat_stop_at_duration_end (bool): + Boolean value that indicates if a running instance of the task is + stopped at the end of the repetition pattern duration. + + execution_time_limit (str): + The maximum amount of time that the task launched by the trigger is + allowed to run. Valid values are: + + - 30 minutes + - 1 hour + - 2 hours + - 4 hours + - 8 hours + - 12 hours + - 1 day + - 3 days (default) + + delay (str): + The time the trigger waits after its activation to start the task. + Valid values are: + + - 15 seconds + - 30 seconds + - 1 minute + - 30 minutes + - 1 hour + - 8 hours + - 1 day + + .. note:: + This parameter applies to the following trigger types: + + - OnLogon + - OnBoot + - Event + - OnTaskCreation + - OnSessionChange **kwargs** @@ -1771,9 +1920,10 @@ def add_trigger(name=None, *Event* - :param str subscription: An event definition in xml format that fires the - trigger. The easiest way to get this would is to create an event in - windows task scheduler and then copy the xml text. + subscription (str): + An event definition in xml format that fires the trigger. The + easiest way to get this would is to create an event in Windows Task + Scheduler and then copy the xml text. *Once* @@ -1781,61 +1931,69 @@ def add_trigger(name=None, *Daily* - :param int days_interval: The interval between days in the schedule. An - interval of 1 produces a daily schedule. An interval of 2 produces an - every-other day schedule. If no interval is specified, 1 is used. Valid - entries are 1 - 999. - + days_interval (int): + The interval between days in the schedule. An interval of 1 produces + a daily schedule. An interval of 2 produces an every-other day + schedule. If no interval is specified, 1 is used. Valid entries are + 1 - 999. *Weekly* - :param int weeks_interval: The interval between weeks in the schedule. - An interval of 1 produces a weekly schedule. An interval of 2 produces - an every-other week schedule. If no interval is specified, 1 is used. - Valid entries are 1 - 52. + weeks_interval (int): + The interval between weeks in the schedule. An interval of 1 + produces a weekly schedule. An interval of 2 produces an every-other + week schedule. If no interval is specified, 1 is used. Valid entries + are 1 - 52. - param list days_of_week: Sets the days of the week on which the task - runs. Should be a list. ie: ['Monday','Wednesday','Friday']. Valid - entries are the names of the days of the week. + days_of_week (list): + Sets the days of the week on which the task runs. Should be a list. + ie: ``['Monday','Wednesday','Friday']``. Valid entries are the names + of the days of the week. *Monthly* - :param list months_of_year: Sets the months of the year during which the - task runs. Should be a list. ie: ['January','July']. Valid entries are - the full names of all the months. + months_of_year (list): + Sets the months of the year during which the task runs. Should be a + list. ie: ``['January','July']``. Valid entries are the full names + of all the months. - :param list days_of_month: Sets the days of the month during which the - task runs. Should be a list. ie: [1, 15, 'Last']. Options are all days - of the month 1 - 31 and the word 'Last' to indicate the last day of the - month. + days_of_month (list): + Sets the days of the month during which the task runs. Should be a + list. ie: ``[1, 15, 'Last']``. Options are all days of the month 1 - + 31 and the word 'Last' to indicate the last day of the month. - :param bool last_day_of_month: Boolean value that indicates that the - task runs on the last day of the month regardless of the actual date of - that day. + last_day_of_month (bool): + Boolean value that indicates that the task runs on the last day of + the month regardless of the actual date of that day. - You can set the task to run on the last day of the month by either - including the word 'Last' in the list of days, or setting the parameter - 'last_day_of_month` equal to True. + .. note:: + You can set the task to run on the last day of the month by either + including the word 'Last' in the list of days, or setting the + parameter 'last_day_of_month` equal to True. *MonthlyDay* - :param list months_of_year: Sets the months of the year during which the - task runs. Should be a list. ie: ['January','July']. Valid entries are - the full names of all the months. + months_of_year (list): + Sets the months of the year during which the task runs. Should be a + list. ie: ``['January','July']``. Valid entries are the full names + of all the months. - :param list weeks_of_month: Sets the weeks of the month during which the - task runs. Should be a list. ie: ['First','Third']. Valid options are: + weeks_of_month (list): + Sets the weeks of the month during which the task runs. Should be a + list. ie: ``['First','Third']``. Valid options are: - - First - - Second - - Third - - Fourth + - First + - Second + - Third + - Fourth - :param bool last_week_of_month: Boolean value that indicates that the task - runs on the last week of the month. + last_week_of_month (bool): + Boolean value that indicates that the task runs on the last week of + the month. - :param list days_of_week: Sets the days of the week during which the task - runs. Should be a list. ie: ['Monday','Wednesday','Friday']. Valid - entries are the names of the days of the week. + days_of_week (list): + Sets the days of the week during which the task runs. Should be a + list. ie: ``['Monday','Wednesday','Friday']``. Valid entries are + the names of the days of the week. *OnIdle* No special parameters required. @@ -1851,38 +2009,38 @@ def add_trigger(name=None, *OnSessionChange* - :param str session_user_name: Sets the user for the Terminal Server - session. When a session state change is detected for this user, a task - is started. To detect session status change for any user, do not pass - this parameter. - - :param str state_change: Sets the kind of Terminal Server session change - that would trigger a task launch. Valid options are: + session_user_name (str): + Sets the user for the Terminal Server session. When a session state + change is detected for this user, a task is started. To detect + session status change for any user, do not pass this parameter. - - ConsoleConnect: When you connect to a user session (switch users) - - ConsoleDisconnect: When you disconnect a user session (switch users) - - RemoteConnect: When a user connects via Remote Desktop - - RemoteDisconnect: When a user disconnects via Remote Desktop - - SessionLock: When the workstation is locked - - SessionUnlock: When the workstation is unlocked + state_change (str): + Sets the kind of Terminal Server session change that would trigger a + task launch. Valid options are: - .. note:: + - ConsoleConnect: When you connect to a user session (switch users) + - ConsoleDisconnect: When you disconnect a user session (switch users) + - RemoteConnect: When a user connects via Remote Desktop + - RemoteDisconnect: When a user disconnects via Remote Desktop + - SessionLock: When the workstation is locked + - SessionUnlock: When the workstation is unlocked - Arguments are parsed by the YAML loader and are subject to yaml's - idiosyncrasies. Therefore, time values in some formats (``%H:%M:%S`` and - ``%H:%M``) should to be quoted. See `YAML IDIOSYNCRASIES`_ for more details. + .. note:: - .. _`YAML IDIOSYNCRASIES`: https://docs.saltstack.com/en/latest/topics/troubleshooting/yaml_idiosyncrasies.html#time-expressions + Arguments are parsed by the YAML loader and are subject to yaml's + idiosyncrasies. Therefore, time values in some formats (``%H:%M:%S`` and + ``%H:%M``) should to be quoted. See `YAML IDIOSYNCRASIES`_ for more details. + .. _`YAML IDIOSYNCRASIES`: https://docs.saltstack.com/en/latest/topics/troubleshooting/yaml_idiosyncrasies.html#time-expressions - :return: True if successful, False if unsuccessful - :rtype: bool + Returns: + bool: ``True`` if successful, otherwise ``False`` CLI Example: .. code-block:: bash - salt 'minion-id' task.add_trigger trigger_type=Once trigger_enabled=True start_date=2016/12/1 start_time='"12:01"' + salt 'minion-id' task.add_trigger trigger_type=Once trigger_enabled=True start_date=2016/12/1 start_time=12:01 ''' if not trigger_type: return 'Required parameter "trigger_type" not specified' @@ -2171,14 +2329,18 @@ def clear_triggers(name, location='\\'): r''' Remove all triggers from the task. - :param str name: The name of the task from which to clear all triggers. + Args: - :param str location: A string value representing the location of the task. - Default is '\\' which is the root for the task scheduler - (C:\Windows\System32\tasks). + name (str): + The name of the task from which to clear all triggers. - :return: True if successful, False if unsuccessful - :rtype: bool + location (str): + A string value representing the location of the task. Default is '\' + which is the root for the task scheduler + (C:\Windows\System32\tasks). + + Returns: + bool: ``True`` if successful, otherwise ``False`` CLI Example: From b6494950e616f324ce4aee7219874c64ac5ee368 Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 15 Apr 2019 11:16:50 -0600 Subject: [PATCH 179/340] Honor 80 character line limit --- salt/modules/win_task.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/salt/modules/win_task.py b/salt/modules/win_task.py index d28d02909e3a..0e561da72145 100644 --- a/salt/modules/win_task.py +++ b/salt/modules/win_task.py @@ -2027,9 +2027,10 @@ def add_trigger(name=None, .. note:: - Arguments are parsed by the YAML loader and are subject to yaml's - idiosyncrasies. Therefore, time values in some formats (``%H:%M:%S`` and - ``%H:%M``) should to be quoted. See `YAML IDIOSYNCRASIES`_ for more details. + Arguments are parsed by the YAML loader and are subject to + yaml's idiosyncrasies. Therefore, time values in some formats + (``%H:%M:%S`` and ``%H:%M``) should to be quoted. See + `YAML IDIOSYNCRASIES`_ for more details. .. _`YAML IDIOSYNCRASIES`: https://docs.saltstack.com/en/latest/topics/troubleshooting/yaml_idiosyncrasies.html#time-expressions From 3baeedfcb2cbf2dbded34c74add85b725c823fb8 Mon Sep 17 00:00:00 2001 From: matt LLVW Date: Mon, 15 Apr 2019 19:21:37 +0200 Subject: [PATCH 180/340] fix: #51842 --- salt/auth/django.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/auth/django.py b/salt/auth/django.py index 00dc5575baba..1e3c4d102a81 100644 --- a/salt/auth/django.py +++ b/salt/auth/django.py @@ -28,9 +28,9 @@ .. code-block:: python class SaltExternalAuthModel(models.Model): - user_fk = models.ForeignKey(auth.User) - minion_matcher = models.CharField() - minion_fn = models.CharField() + user_fk = models.ForeignKey(User, on_delete=models.CASCADE) + minion_or_fn_matcher = models.CharField(max_length=255) + minion_fn = models.CharField(max_length=255) The :conf_master:`external_auth` clause in the master config would then look like this: From e01077bfc27e09eafb5b219dd9406ebe63da99e7 Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 15 Apr 2019 13:02:55 -0600 Subject: [PATCH 181/340] Fix docs issue... maybe... --- salt/modules/win_task.py | 134 +++++++++++++++++++-------------------- 1 file changed, 64 insertions(+), 70 deletions(-) diff --git a/salt/modules/win_task.py b/salt/modules/win_task.py index 0e561da72145..31a509814966 100644 --- a/salt/modules/win_task.py +++ b/salt/modules/win_task.py @@ -325,8 +325,8 @@ def list_tasks(location='\\'): location (str): A string value representing the folder from which you want to list - tasks. Default is '\' which is the root for the task scheduler - (C:\Windows\System32\tasks). + tasks. Default is ``\`` which is the root for the task scheduler + (``C:\Windows\System32\tasks``). Returns: list: Returns a list of tasks @@ -365,8 +365,8 @@ def list_folders(location='\\'): location (str): A string value representing the folder from which you want to list - tasks. Default is '\' which is the root for the task scheduler - (C:\Windows\System32\tasks). + tasks. Default is ``\`` which is the root for the task scheduler + (``C:\Windows\System32\tasks``). Returns: list: Returns a list of folders. @@ -407,8 +407,8 @@ def list_triggers(name, location='\\'): The name of the task for which list triggers. location (str): A string value representing the location of the task - from which to list triggers. Default is '\' which is the root for - the task scheduler (C:\Windows\System32\tasks). + from which to list triggers. Default is ``\`` which is the root for + the task scheduler (``C:\Windows\System32\tasks``). Returns: list: Returns a list of triggers. @@ -452,8 +452,8 @@ def list_actions(name, location='\\'): location (str): A string value representing the location of the task from which to - list actions. Default is '\' which is the root for the task - scheduler (C:\Windows\System32\tasks). + list actions. Default is ``\`` which is the root for the task + scheduler (``C:\Windows\System32\tasks``). Returns: list: Returns a list of actions. @@ -507,8 +507,8 @@ def create_task(name, location (str): A string value representing the location in which to create the - task. Default is '\' which is the root for the task scheduler - (C:\Windows\System32\tasks). + task. Default is ``\`` which is the root for the task scheduler + (``C:\Windows\System32\tasks``). user_name (str): The user account under which to run the task. To specify the @@ -590,8 +590,8 @@ def create_task_from_xml(name, location (str): A string value representing the location in which to create the - task. Default is '\' which is the root for the task scheduler - (C:\Windows\System32\tasks). + task. Default is ``\`` which is the root for the task scheduler + (``C:\Windows\System32\tasks``). xml_text (str): A string of xml representing the task to be created. This will be @@ -694,8 +694,8 @@ def create_folder(name, location='\\'): location (str): A string value representing the location in which to create the - folder. Default is '\' which is the root for the task scheduler - (C:\Windows\System32\tasks). + folder. Default is ``\`` which is the root for the task scheduler + (``C:\Windows\System32\tasks``). Returns: bool: ``True`` if successful, otherwise ``False`` @@ -767,8 +767,8 @@ def edit_task(name=None, location (str): A string value representing the location in which to create the - task. Default is '\' which is the root for the task scheduler - (C:\Windows\System32\tasks). + task. Default is ``\`` which is the root for the task scheduler + (``C:\Windows\System32\tasks``). user_name (str): The user account under which to run the task. To specify the @@ -1098,8 +1098,8 @@ def delete_task(name, location='\\'): location (str): A string value representing the location of the task. Default is - '\' which is the root for the task scheduler - (C:\Windows\System32\tasks). + ``\`` which is the root for the task scheduler + (``C:\Windows\System32\tasks``). Returns: bool: ``True`` if successful, otherwise ``False`` @@ -1142,8 +1142,8 @@ def delete_folder(name, location='\\'): location (str): A string value representing the location of the folder. Default is - '\' which is the root for the task scheduler - (C:\Windows\System32\tasks). + ``\`` which is the root for the task scheduler + (``C:\Windows\System32\tasks``). Returns: bool: ``True`` if successful, otherwise ``False`` @@ -1186,9 +1186,9 @@ def run(name, location='\\'): The name of the task to run. location (str): - A string value representing the location of the task. Default is '\' + A string value representing the location of the task. Default is ``\`` which is the root for the task scheduler - (C:\Windows\System32\tasks). + (``C:\Windows\System32\tasks``). Returns: bool: ``True`` if successful, otherwise ``False`` @@ -1229,9 +1229,9 @@ def run_wait(name, location='\\'): The name of the task to run. location (str): - A string value representing the location of the task. Default is '\' - which is the root for the task scheduler - (C:\Windows\System32\tasks). + A string value representing the location of the task. Default is + ``\`` which is the root for the task scheduler + (``C:\Windows\System32\tasks``). Returns: bool: ``True`` if successful, otherwise ``False`` @@ -1290,9 +1290,9 @@ def stop(name, location='\\'): The name of the task to stop. location (str): - A string value representing the location of the task. Default is '\' - which is the root for the task scheduler - (C:\Windows\System32\tasks). + A string value representing the location of the task. Default is + ``\`` which is the root for the task scheduler + (``C:\Windows\System32\tasks``). Returns: bool: ``True`` if successful, otherwise ``False`` @@ -1333,9 +1333,9 @@ def status(name, location='\\'): The name of the task for which to return the status location (str): - A string value representing the location of the task. Default is '\' - which is the root for the task scheduler - (C:\Windows\System32\tasks). + A string value representing the location of the task. Default is + ``\`` which is the root for the task scheduler + (``C:\Windows\System32\tasks``). Returns: str: The current status of the task. Will be one of the following: @@ -1378,9 +1378,9 @@ def info(name, location='\\'): The name of the task for which to return the status location (str): - A string value representing the location of the task. Default is '\' - which is the root for the task scheduler - (C:\Windows\System32\tasks). + A string value representing the location of the task. Default is + ``\`` which is the root for the task scheduler + (``C:\Windows\System32\tasks``). Returns: dict: A dictionary containing the task configuration @@ -1520,9 +1520,9 @@ def add_action(name=None, The name of the task to which to add the action. location (str): - A string value representing the location of the task. Default is '\' - which is the root for the task scheduler - (C:\Windows\System32\tasks). + A string value representing the location of the task. Default is + ``\`` which is the root for the task scheduler + (``C:\Windows\System32\tasks``). action_type (str): The type of action to add. There are three action types. Each one @@ -1535,7 +1535,9 @@ def add_action(name=None, Required arguments for each action_type: - **Execute** - Execute a command or an executable + **Execute** + + Execute a command or an executable cmd (str): (required) The command / executable to run. @@ -1555,39 +1557,26 @@ def add_action(name=None, start_in (str): (optional) The current working directory for the command. - **Email** - Send and email. Requires ``server``, ``from``, and ``to`` or - ``cc``. - - from (str): - The sender - - reply_to (str): - Who to reply to - - to (str): - The recipient - - cc (str): - The CC recipient + **Email** - bcc (str): - The BCC recipient - - subject (str): - The subject of the email - - body (str): - The Message Body of the email - - server (str): - The server used to send the email + Send and email. Requires ``server``, ``from``, and ``to`` or ``cc``. + from (str): The sender + reply_to (str): Who to reply to + to (str): The recipient + cc (str): The CC recipient + bcc (str): The BCC recipient + subject (str): The subject of the email + body (str): The Message Body of the email + server (str): The server used to send the email attachments (list): A list of attachments. These will be the paths to the files to attach. ie: ``attachments="['C:\attachment1.txt', 'C:\attachment2.txt']"`` - **Message** - Display a dialog box. The task must be set to "Run only when + **Message** + + Display a dialog box. The task must be set to "Run only when user is logged on" in order for the dialog box to display. Both parameters are required. @@ -1709,8 +1698,8 @@ def _clear_actions(name, location='\\'): :param str name: The name of the task from which to clear all actions. :param str location: A string value representing the location of the task. - Default is '\' which is the root for the task scheduler - (C:\Windows\System32\tasks). + Default is ``\`` which is the root for the task scheduler + (``C:\Windows\System32\tasks``). :return: True if successful, False if unsuccessful :rtype: bool @@ -1771,8 +1760,8 @@ def add_trigger(name=None, location (str): A string value representing the location of the task. Default is - '\' which is the root for the task scheduler - (C:\Windows\System32\tasks). + ``\`` which is the root for the task scheduler + (``C:\Windows\System32\tasks``). trigger_type (str): The type of trigger to create. This is defined when the trigger is @@ -1936,6 +1925,7 @@ def add_trigger(name=None, a daily schedule. An interval of 2 produces an every-other day schedule. If no interval is specified, 1 is used. Valid entries are 1 - 999. + *Weekly* weeks_interval (int): @@ -1996,15 +1986,19 @@ def add_trigger(name=None, the names of the days of the week. *OnIdle* + No special parameters required. *OnTaskCreation* + No special parameters required. *OnBoot* + No special parameters required. *OnLogon* + No special parameters required. *OnSessionChange* @@ -2336,9 +2330,9 @@ def clear_triggers(name, location='\\'): The name of the task from which to clear all triggers. location (str): - A string value representing the location of the task. Default is '\' + A string value representing the location of the task. Default is ``\`` which is the root for the task scheduler - (C:\Windows\System32\tasks). + (``C:\Windows\System32\tasks``). Returns: bool: ``True`` if successful, otherwise ``False`` From c691e0d1af8c9f41e32e7a5a63b840f188c010d6 Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 15 Apr 2019 13:26:31 -0600 Subject: [PATCH 182/340] More doc fixes --- salt/modules/win_task.py | 262 +++++++++++++++++++-------------------- 1 file changed, 131 insertions(+), 131 deletions(-) diff --git a/salt/modules/win_task.py b/salt/modules/win_task.py index 31a509814966..289bbe9b24af 100644 --- a/salt/modules/win_task.py +++ b/salt/modules/win_task.py @@ -496,9 +496,9 @@ def create_task(name, Create a new task in the designated location. This function has many keyword arguments that are not listed here. For additional arguments see: - - :py:func:`edit_task` - - :py:func:`add_action` - - :py:func:`add_trigger` + - :py:func:`edit_task` + - :py:func:`add_action` + - :py:func:`add_trigger` Args: @@ -806,25 +806,25 @@ def edit_task(name=None, A value that indicates the amount of time that the computer must be in an idle state before the task is run. Valid values are: - - 1 minute - - 5 minutes - - 10 minutes - - 15 minutes - - 30 minutes - - 1 hour + - 1 minute + - 5 minutes + - 10 minutes + - 15 minutes + - 30 minutes + - 1 hour idle_wait_timeout (str): A value that indicates the amount of time that the Task Scheduler will wait for an idle condition to occur. Valid values are: - - Do not wait - - 1 minute - - 5 minutes - - 10 minutes - - 15 minutes - - 30 minutes - - 1 hour - - 2 hours + - Do not wait + - 1 minute + - 5 minutes + - 10 minutes + - 15 minutes + - 30 minutes + - 1 hour + - 2 hours idle_stop_on_end (bool): Boolean value that indicates that the Task Scheduler will terminate @@ -869,14 +869,14 @@ def edit_task(name=None, A value that specifies the interval between task restart attempts. Valid values are: - - False (to disable) - - 1 minute - - 5 minutes - - 10 minutes - - 15 minutes - - 30 minutes - - 1 hour - - 2 hours + - False (to disable) + - 1 minute + - 5 minutes + - 10 minutes + - 15 minutes + - 30 minutes + - 1 hour + - 2 hours restart_count (int): The number of times the Task Scheduler will attempt to restart the @@ -885,14 +885,14 @@ def edit_task(name=None, execution_time_limit (bool, str): The amount of time allowed to complete the task. Valid values are: - - False (to disable) - - 1 hour - - 2 hours - - 4 hours - - 8 hours - - 12 hours - - 1 day - - 3 days + - False (to disable) + - 1 hour + - 2 hours + - 4 hours + - 8 hours + - 12 hours + - 1 day + - 3 days force_stop (bool): Boolean value that indicates that the task may be terminated by @@ -903,21 +903,21 @@ def edit_task(name=None, the task after it expires. Requires a trigger with an expiration date. Valid values are: - - False (to disable) - - Immediately - - 30 days - - 90 days - - 180 days - - 365 days + - False (to disable) + - Immediately + - 30 days + - 90 days + - 180 days + - 365 days multiple_instances (str): Sets the policy that defines how the Task Scheduler deals with multiple instances of the task. Valid values are: - - Parallel - - Queue - - No New Instance - - Stop Existing + - Parallel + - Queue + - No New Instance + - Stop Existing Returns: bool: ``True`` if successful, otherwise ``False`` @@ -1529,9 +1529,9 @@ def add_action(name=None, requires its own set of Keyword Arguments (kwargs). Valid values are: - - Execute - - Email - - Message + - Execute + - Email + - Message Required arguments for each action_type: @@ -1549,7 +1549,7 @@ def add_action(name=None, ``cscript.exe`` in the `cmd` parameter and pass the script in the ``arguments`` parameter as follows: - - ``cmd='cscript.exe' arguments='c:\scripts\myscript.vbs'`` + - ``cmd='cscript.exe' arguments='c:\scripts\myscript.vbs'`` Batch files do not need an interpreter and may be passed to the cmd parameter directly. @@ -1767,17 +1767,17 @@ def add_trigger(name=None, The type of trigger to create. This is defined when the trigger is created and cannot be changed later. Options are as follows: - - Event - - Once - - Daily - - Weekly - - Monthly - - MonthlyDay - - OnIdle - - OnTaskCreation - - OnBoot - - OnLogon - - OnSessionChange + - Event + - Once + - Daily + - Weekly + - Monthly + - MonthlyDay + - OnIdle + - OnTaskCreation + - OnBoot + - OnLogon + - OnSessionChange trigger_enabled (bool): Boolean value that indicates whether the trigger is enabled. @@ -1786,83 +1786,83 @@ def add_trigger(name=None, The date when the trigger is activated. If no value is passed, the current date will be used. Can be one of the following formats: - - %Y-%m-%d - - %m-%d-%y - - %m-%d-%Y - - %m/%d/%y - - %m/%d/%Y - - %Y/%m/%d + - %Y-%m-%d + - %m-%d-%y + - %m-%d-%Y + - %m/%d/%y + - %m/%d/%Y + - %Y/%m/%d start_time (str): The time when the trigger is activated. If no value is passed, midnight will be used. Can be one of the following formats: - - %I:%M:%S %p - - %I:%M %p - - %H:%M:%S - - %H:%M + - %I:%M:%S %p + - %I:%M %p + - %H:%M:%S + - %H:%M end_date (str): The date when the trigger is deactivated. The trigger cannot start the task after it is deactivated. Can be one of the following formats: - - %Y-%m-%d - - %m-%d-%y - - %m-%d-%Y - - %m/%d/%y - - %m/%d/%Y - - %Y/%m/%d + - %Y-%m-%d + - %m-%d-%y + - %m-%d-%Y + - %m/%d/%y + - %m/%d/%Y + - %Y/%m/%d end_time (str): - The time when the trigger is deactivated. If the this is not passed + The time when the trigger is deactivated. If this is not passed with ``end_date`` it will be set to midnight. Can be one of the following formats: - - %I:%M:%S %p - - %I:%M %p - - %H:%M:%S - - %H:%M + - %I:%M:%S %p + - %I:%M %p + - %H:%M:%S + - %H:%M random_delay (str): The delay time that is randomly added to the start time of the trigger. Valid values are: - - 30 seconds - - 1 minute - - 30 minutes - - 1 hour - - 8 hours - - 1 day + - 30 seconds + - 1 minute + - 30 minutes + - 1 hour + - 8 hours + - 1 day .. note:: This parameter applies to the following trigger types - - Once - - Daily - - Weekly - - Monthly - - MonthlyDay + - Once + - Daily + - Weekly + - Monthly + - MonthlyDay repeat_interval (str): The amount of time between each restart of the task. Valid values are: - - 5 minutes - - 10 minutes - - 15 minutes - - 30 minutes - - 1 hour + - 5 minutes + - 10 minutes + - 15 minutes + - 30 minutes + - 1 hour repeat_duration (str): How long the pattern is repeated. Valid values are: - - Indefinitely - - 15 minutes - - 30 minutes - - 1 hour - - 12 hours - - 1 day + - Indefinitely + - 15 minutes + - 30 minutes + - 1 hour + - 12 hours + - 1 day repeat_stop_at_duration_end (bool): Boolean value that indicates if a running instance of the task is @@ -1872,35 +1872,35 @@ def add_trigger(name=None, The maximum amount of time that the task launched by the trigger is allowed to run. Valid values are: - - 30 minutes - - 1 hour - - 2 hours - - 4 hours - - 8 hours - - 12 hours - - 1 day - - 3 days (default) + - 30 minutes + - 1 hour + - 2 hours + - 4 hours + - 8 hours + - 12 hours + - 1 day + - 3 days (default) delay (str): The time the trigger waits after its activation to start the task. Valid values are: - - 15 seconds - - 30 seconds - - 1 minute - - 30 minutes - - 1 hour - - 8 hours - - 1 day + - 15 seconds + - 30 seconds + - 1 minute + - 30 minutes + - 1 hour + - 8 hours + - 1 day .. note:: This parameter applies to the following trigger types: - - OnLogon - - OnBoot - - Event - - OnTaskCreation - - OnSessionChange + - OnLogon + - OnBoot + - Event + - OnTaskCreation + - OnSessionChange **kwargs** @@ -1971,10 +1971,10 @@ def add_trigger(name=None, Sets the weeks of the month during which the task runs. Should be a list. ie: ``['First','Third']``. Valid options are: - - First - - Second - - Third - - Fourth + - First + - Second + - Third + - Fourth last_week_of_month (bool): Boolean value that indicates that the task runs on the last week of @@ -2012,12 +2012,12 @@ def add_trigger(name=None, Sets the kind of Terminal Server session change that would trigger a task launch. Valid options are: - - ConsoleConnect: When you connect to a user session (switch users) - - ConsoleDisconnect: When you disconnect a user session (switch users) - - RemoteConnect: When a user connects via Remote Desktop - - RemoteDisconnect: When a user disconnects via Remote Desktop - - SessionLock: When the workstation is locked - - SessionUnlock: When the workstation is unlocked + - ConsoleConnect: When you connect to a user session (switch users) + - ConsoleDisconnect: When you disconnect a user session (switch users) + - RemoteConnect: When a user connects via Remote Desktop + - RemoteDisconnect: When a user disconnects via Remote Desktop + - SessionLock: When the workstation is locked + - SessionUnlock: When the workstation is unlocked .. note:: From 18cfc158958478dec146fde53d1aefedbe2db1be Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 15 Apr 2019 14:01:43 -0600 Subject: [PATCH 183/340] Fix docs... attempt 3 --- salt/modules/win_task.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/modules/win_task.py b/salt/modules/win_task.py index 289bbe9b24af..b546488799ec 100644 --- a/salt/modules/win_task.py +++ b/salt/modules/win_task.py @@ -1804,8 +1804,7 @@ def add_trigger(name=None, end_date (str): The date when the trigger is deactivated. The trigger cannot start - the task after it is deactivated. Can be one of the following - formats: + the task after it is deactivated. Can be one of the following formats: - %Y-%m-%d - %m-%d-%y @@ -2251,11 +2250,12 @@ def add_trigger(name=None, trigger.DaysOfMonth = bits_days trigger.RunOnLastDayOfMonth = kwargs.get('last_day_of_month', False) else: - return 'Monthly trigger requires "days_of_month" or "last_day_of_month" parameters' + return 'Monthly trigger requires "days_of_month" or "last_day_of_' \ + 'month" parameters' # Monthly Day Of Week Trigger Parameters elif trigger_types[trigger_type] == TASK_TRIGGER_MONTHLYDOW: - trigger.Id = 'Monthy_DOW_ID1' + trigger.Id = 'Monthly_DOW_ID1' if kwargs.get('months_of_year', False): bits_months = 0 for month in kwargs.get('months_of_year'): From f6f33812d2cf1c391be53d46283818bec03f93c3 Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 15 Apr 2019 14:31:14 -0600 Subject: [PATCH 184/340] Fix docs... attempt 4 --- salt/modules/win_task.py | 102 +++++++++++++++++++++++---------------- 1 file changed, 61 insertions(+), 41 deletions(-) diff --git a/salt/modules/win_task.py b/salt/modules/win_task.py index b546488799ec..335719212a9b 100644 --- a/salt/modules/win_task.py +++ b/salt/modules/win_task.py @@ -155,7 +155,8 @@ 0x41306: 'Task was terminated by the user', 0x8004130F: 'Credentials became corrupted', 0x8004131F: 'An instance of this task is already running', - 0x800704DD: 'The service is not available (Run only when logged in?)', + 0x800704DD: 'The service is not available (Run only when logged ' + 'in?)', 0x800710E0: 'The operator or administrator has refused the request', 0xC000013A: 'The application terminated as a result of CTRL+C', 0xC06D007E: 'Unknown software exception'} @@ -169,7 +170,7 @@ def __virtual__(): if not HAS_DEPENDENCIES: log.warning('Could not load dependencies for %s', __virtualname__) return __virtualname__ - return (False, "Module win_task: module only works on Windows systems") + return False, 'Module win_task: module only works on Windows systems' def _get_date_time_format(dt_string): @@ -302,7 +303,8 @@ def _save_task_definition(name, except pythoncom.com_error as error: hr, msg, exc, arg = error.args # pylint: disable=W0633 - fc = {-2147024773: 'The filename, directory name, or volume label syntax is incorrect', + fc = {-2147024773: 'The filename, directory name, or volume label ' + 'syntax is incorrect', -2147024894: 'The system cannot find the file specified', -2147216615: 'Required element or attribute missing', -2147216616: 'Value incorrectly formatted or out of range', @@ -422,7 +424,7 @@ def list_triggers(name, location='\\'): # List all triggers for the XblGameSaveTask in the Microsoft\XblGameSave # location - salt 'minion-id' task.list_triggers XblGameSaveTask Microsoft\XblGameSave + salt '*' task.list_triggers XblGameSaveTask Microsoft\XblGameSave ''' # Create the task service object with salt.utils.winapi.Com(): @@ -617,7 +619,7 @@ def create_task_from_xml(name, .. code-block:: bash - salt 'minion-id' task.create_task_from_xml xml_path=C:\task.xml + salt '*' task.create_task_from_xml xml_path=C:\task.xml ''' # Check for existing task if name in list_tasks(location): @@ -780,6 +782,7 @@ def edit_task(name=None, working. .. note:: + The combination of user_name and password determine how the task runs. For example, if a username is passed without at password the task will only run when the user is logged in. If a @@ -926,7 +929,7 @@ def edit_task(name=None, .. code-block:: bash - salt 'minion-id' task.edit_task description='This task is awesome' + salt '*' task.edit_task description='This task is awesome' ''' # TODO: Add more detailed return for items changed @@ -1047,7 +1050,8 @@ def edit_task(name=None, task_definition.Settings.RestartInterval = '' else: if restart_every in duration: - task_definition.Settings.RestartInterval = _lookup_first(duration, restart_every) + task_definition.Settings.RestartInterval = _lookup_first( + duration, restart_every) else: return 'Invalid value for "restart_every"' if task_definition.Settings.RestartInterval: @@ -1061,7 +1065,8 @@ def edit_task(name=None, task_definition.Settings.ExecutionTimeLimit = 'PT0S' else: if execution_time_limit in duration: - task_definition.Settings.ExecutionTimeLimit = _lookup_first(duration, execution_time_limit) + task_definition.Settings.ExecutionTimeLimit = _lookup_first( + duration, execution_time_limit) else: return 'Invalid value for "execution_time_limit"' if force_stop is not None: @@ -1071,7 +1076,8 @@ def edit_task(name=None, if delete_after is False: task_definition.Settings.DeleteExpiredTaskAfter = '' if delete_after in duration: - task_definition.Settings.DeleteExpiredTaskAfter = _lookup_first(duration, delete_after) + task_definition.Settings.DeleteExpiredTaskAfter = _lookup_first( + duration, delete_after) else: return 'Invalid value for "delete_after"' if multiple_instances is not None: @@ -1186,8 +1192,8 @@ def run(name, location='\\'): The name of the task to run. location (str): - A string value representing the location of the task. Default is ``\`` - which is the root for the task scheduler + A string value representing the location of the task. Default is + ``\`` which is the root for the task scheduler (``C:\Windows\System32\tasks``). Returns: @@ -1215,7 +1221,7 @@ def run(name, location='\\'): try: task.Run('') return True - except pythoncom.com_error as error: + except pythoncom.com_error: return False @@ -1319,7 +1325,7 @@ def stop(name, location='\\'): try: task.Stop(0) return True - except pythoncom.com_error as error: + except pythoncom.com_error: return False @@ -1413,39 +1419,43 @@ def info(name, location='\\'): def_set = task.Definition.Settings - settings = {} - settings['allow_demand_start'] = def_set.AllowDemandStart - settings['force_stop'] = def_set.AllowHardTerminate + settings = { + 'allow_demand_start': def_set.AllowDemandStart, + 'force_stop': def_set.AllowHardTerminate} if def_set.DeleteExpiredTaskAfter == '': settings['delete_after'] = False elif def_set.DeleteExpiredTaskAfter == 'PT0S': settings['delete_after'] = 'Immediately' else: - settings['delete_after'] = _reverse_lookup(duration, def_set.DeleteExpiredTaskAfter) + settings['delete_after'] = _reverse_lookup( + duration, def_set.DeleteExpiredTaskAfter) if def_set.ExecutionTimeLimit == '': settings['execution_time_limit'] = False else: - settings['execution_time_limit'] = _reverse_lookup(duration, def_set.ExecutionTimeLimit) + settings['execution_time_limit'] = _reverse_lookup( + duration, def_set.ExecutionTimeLimit) - settings['multiple_instances'] = _reverse_lookup(instances, def_set.MultipleInstances) + settings['multiple_instances'] = _reverse_lookup( + instances, def_set.MultipleInstances) if def_set.RestartInterval == '': settings['restart_interval'] = False else: - settings['restart_interval'] = _reverse_lookup(duration, def_set.RestartInterval) + settings['restart_interval'] = _reverse_lookup( + duration, def_set.RestartInterval) if settings['restart_interval']: settings['restart_count'] = def_set.RestartCount settings['stop_if_on_batteries'] = def_set.StopIfGoingOnBatteries settings['wake_to_run'] = def_set.WakeToRun - conditions = {} - conditions['ac_only'] = def_set.DisallowStartIfOnBatteries - conditions['run_if_idle'] = def_set.RunOnlyIfIdle - conditions['run_if_network'] = def_set.RunOnlyIfNetworkAvailable - conditions['start_when_available'] = def_set.StartWhenAvailable + conditions = { + 'ac_only': def_set.DisallowStartIfOnBatteries, + 'run_if_idle': def_set.RunOnlyIfIdle, + 'run_if_network': def_set.RunOnlyIfNetworkAvailable, + 'start_when_available': def_set.StartWhenAvailable} if conditions['run_if_idle']: idle_set = def_set.IdleSettings @@ -1461,8 +1471,7 @@ def info(name, location='\\'): actions = [] for actionObj in task.Definition.Actions: - action = {} - action['action_type'] = _reverse_lookup(action_types, actionObj.Type) + action = {'action_type': _reverse_lookup(action_types, actionObj.Type)} if actionObj.Path: action['cmd'] = actionObj.Path if actionObj.Arguments: @@ -1473,10 +1482,11 @@ def info(name, location='\\'): triggers = [] for triggerObj in task.Definition.Triggers: - trigger = {} - trigger['trigger_type'] = _reverse_lookup(trigger_types, triggerObj.Type) + trigger = { + 'trigger_type': _reverse_lookup(trigger_types, triggerObj.Type)} if triggerObj.ExecutionTimeLimit: - trigger['execution_time_limit'] = _reverse_lookup(duration, triggerObj.ExecutionTimeLimit) + trigger['execution_time_limit'] = _reverse_lookup( + duration, triggerObj.ExecutionTimeLimit) if triggerObj.StartBoundary: start_date, start_time = triggerObj.StartBoundary.split('T', 1) trigger['start_date'] = start_date @@ -1488,7 +1498,8 @@ def info(name, location='\\'): trigger['enabled'] = triggerObj.Enabled if hasattr(triggerObj, 'RandomDelay'): if triggerObj.RandomDelay: - trigger['random_delay'] = _reverse_lookup(duration, triggerObj.RandomDelay) + trigger['random_delay'] = _reverse_lookup( + duration, triggerObj.RandomDelay) else: trigger['random_delay'] = False if hasattr(triggerObj, 'Delay'): @@ -1804,7 +1815,8 @@ def add_trigger(name=None, end_date (str): The date when the trigger is deactivated. The trigger cannot start - the task after it is deactivated. Can be one of the following formats: + the task after it is deactivated. Can be one of the following + formats: - %Y-%m-%d - %m-%d-%y @@ -1835,6 +1847,7 @@ def add_trigger(name=None, - 1 day .. note:: + This parameter applies to the following trigger types - Once @@ -1893,6 +1906,7 @@ def add_trigger(name=None, - 1 day .. note:: + This parameter applies to the following trigger types: - OnLogon @@ -1955,6 +1969,7 @@ def add_trigger(name=None, the month regardless of the actual date of that day. .. note:: + You can set the task to run on the last day of the month by either including the word 'Last' in the list of days, or setting the parameter 'last_day_of_month` equal to True. @@ -2011,8 +2026,10 @@ def add_trigger(name=None, Sets the kind of Terminal Server session change that would trigger a task launch. Valid options are: - - ConsoleConnect: When you connect to a user session (switch users) - - ConsoleDisconnect: When you disconnect a user session (switch users) + - ConsoleConnect: When you connect to a user session (switch + users) + - ConsoleDisconnect: When you disconnect a user session (switch + users) - RemoteConnect: When a user connects via Remote Desktop - RemoteDisconnect: When a user disconnects via Remote Desktop - SessionLock: When the workstation is locked @@ -2129,7 +2146,6 @@ def add_trigger(name=None, tm_obj.strftime('%H:%M:%S')) dt_obj = None - tm_obj = None if end_date: date_format = _get_date_time_format(end_date) if date_format: @@ -2192,10 +2208,12 @@ def add_trigger(name=None, if repeat_interval: trigger.Repetition.Interval = _lookup_first(duration, repeat_interval) if repeat_duration: - trigger.Repetition.Duration = _lookup_first(duration, repeat_duration) + trigger.Repetition.Duration = _lookup_first(duration, + repeat_duration) trigger.Repetition.StopAtDurationEnd = repeat_stop_at_duration_end if execution_time_limit: - trigger.ExecutionTimeLimit = _lookup_first(duration, execution_time_limit) + trigger.ExecutionTimeLimit = _lookup_first(duration, + execution_time_limit) if end_boundary: trigger.EndBoundary = end_boundary trigger.Enabled = trigger_enabled @@ -2271,9 +2289,11 @@ def add_trigger(name=None, for week in kwargs.get('weeks_of_month'): bits_weeks |= weeks[week] trigger.WeeksOfMonth = bits_weeks - trigger.RunOnLastWeekOfMonth = kwargs.get('last_week_of_month', False) + trigger.RunOnLastWeekOfMonth = kwargs.get('last_week_of_month', + False) else: - return 'Monthly DOW trigger requires "weeks_of_month" or "last_week_of_month" parameters' + return 'Monthly DOW trigger requires "weeks_of_month" or "last_' \ + 'week_of_month" parameters' if kwargs.get('days_of_week', False): bits_days = 0 @@ -2330,8 +2350,8 @@ def clear_triggers(name, location='\\'): The name of the task from which to clear all triggers. location (str): - A string value representing the location of the task. Default is ``\`` - which is the root for the task scheduler + A string value representing the location of the task. Default is + ``\`` which is the root for the task scheduler (``C:\Windows\System32\tasks``). Returns: From a66716ebc5213c3cf3ccc2c471b722a1e16c9ff9 Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 15 Apr 2019 14:55:12 -0600 Subject: [PATCH 185/340] Fix docs... attempt 5 --- salt/modules/win_task.py | 239 +++++++++++++++++++++------------------ 1 file changed, 127 insertions(+), 112 deletions(-) diff --git a/salt/modules/win_task.py b/salt/modules/win_task.py index 335719212a9b..5116cde4bfa6 100644 --- a/salt/modules/win_task.py +++ b/salt/modules/win_task.py @@ -1548,54 +1548,54 @@ def add_action(name=None, **Execute** - Execute a command or an executable + Execute a command or an executable - cmd (str): - (required) The command / executable to run. + cmd (str): + (required) The command or executable to run. - arguments (str): - (optional) Arguments to be passed to the command / executable. To - launch a script the first command will need to be the interpreter - for the script. For example, to run a vbscript you would pass - ``cscript.exe`` in the `cmd` parameter and pass the script in the - ``arguments`` parameter as follows: + arguments (str): + (optional) Arguments to be passed to the command or executable. + To launch a script the first command will need to be the + interpreter for the script. For example, to run a vbscript you + would pass ``cscript.exe`` in the `cmd` parameter and pass the + script in the ``arguments`` parameter as follows: - - ``cmd='cscript.exe' arguments='c:\scripts\myscript.vbs'`` + - ``cmd='cscript.exe' arguments='c:\scripts\myscript.vbs'`` - Batch files do not need an interpreter and may be passed to the cmd - parameter directly. + Batch files do not need an interpreter and may be passed to the + cmd parameter directly. - start_in (str): - (optional) The current working directory for the command. + start_in (str): + (optional) The current working directory for the command. **Email** - Send and email. Requires ``server``, ``from``, and ``to`` or ``cc``. - - from (str): The sender - reply_to (str): Who to reply to - to (str): The recipient - cc (str): The CC recipient - bcc (str): The BCC recipient - subject (str): The subject of the email - body (str): The Message Body of the email - server (str): The server used to send the email - attachments (list): - A list of attachments. These will be the paths to the files to - attach. ie: ``attachments="['C:\attachment1.txt', - 'C:\attachment2.txt']"`` + Send and email. Requires ``server``, ``from``, and ``to`` or ``cc``. + + from (str): The sender + reply_to (str): Who to reply to + to (str): The recipient + cc (str): The CC recipient + bcc (str): The BCC recipient + subject (str): The subject of the email + body (str): The Message Body of the email + server (str): The server used to send the email + attachments (list): + A list of attachments. These will be the paths to the files to + attach. ie: ``attachments="['C:\attachment1.txt', + 'C:\attachment2.txt']"`` **Message** - Display a dialog box. The task must be set to "Run only when - user is logged on" in order for the dialog box to display. Both parameters - are required. + Display a dialog box. The task must be set to "Run only when user is + logged on" in order for the dialog box to display. Both parameters are + required. - title (str): - The dialog box title. + title (str): + The dialog box title. - message (str): - The dialog box message body + message (str): + The dialog box message body Returns: dict: A dictionary containing the task configuration @@ -1922,127 +1922,142 @@ def add_trigger(name=None, *Event* - subscription (str): - An event definition in xml format that fires the trigger. The - easiest way to get this would is to create an event in Windows Task - Scheduler and then copy the xml text. + The trigger will be fired by an event. + + subscription (str): + An event definition in xml format that fires the trigger. The + easiest way to get this would is to create an event in Windows + Task Scheduler and then copy the xml text. *Once* - No special parameters required. + No special parameters required. *Daily* - days_interval (int): - The interval between days in the schedule. An interval of 1 produces - a daily schedule. An interval of 2 produces an every-other day - schedule. If no interval is specified, 1 is used. Valid entries are - 1 - 999. + The task will run daily. + + days_interval (int): + The interval between days in the schedule. An interval of 1 + produces a daily schedule. An interval of 2 produces an + every-other day schedule. If no interval is specified, 1 is + used. Valid entries are 1 - 999. *Weekly* - weeks_interval (int): - The interval between weeks in the schedule. An interval of 1 - produces a weekly schedule. An interval of 2 produces an every-other - week schedule. If no interval is specified, 1 is used. Valid entries - are 1 - 52. + The task will run weekly. - days_of_week (list): - Sets the days of the week on which the task runs. Should be a list. - ie: ``['Monday','Wednesday','Friday']``. Valid entries are the names - of the days of the week. + weeks_interval (int): + The interval between weeks in the schedule. An interval of 1 + produces a weekly schedule. An interval of 2 produces an + every-other week schedule. If no interval is specified, 1 is + used. Valid entries are 1 - 52. + + days_of_week (list): + Sets the days of the week on which the task runs. Should be a + list. ie: ``['Monday','Wednesday','Friday']``. Valid entries are + the names of the days of the week. *Monthly* - months_of_year (list): - Sets the months of the year during which the task runs. Should be a - list. ie: ``['January','July']``. Valid entries are the full names - of all the months. + The task will run monthly. + + months_of_year (list): + Sets the months of the year during which the task runs. Should + be a list. ie: ``['January','July']``. Valid entries are the + full names of all the months. - days_of_month (list): - Sets the days of the month during which the task runs. Should be a - list. ie: ``[1, 15, 'Last']``. Options are all days of the month 1 - - 31 and the word 'Last' to indicate the last day of the month. + days_of_month (list): + Sets the days of the month during which the task runs. Should be + a list. ie: ``[1, 15, 'Last']``. Options are all days of the + month 1 - 31 and the word 'Last' to indicate the last day of the + month. - last_day_of_month (bool): - Boolean value that indicates that the task runs on the last day of - the month regardless of the actual date of that day. + last_day_of_month (bool): + Boolean value that indicates that the task runs on the last day + of the month regardless of the actual date of that day. - .. note:: + .. note:: - You can set the task to run on the last day of the month by either - including the word 'Last' in the list of days, or setting the - parameter 'last_day_of_month` equal to True. + You can set the task to run on the last day of the month by + either including the word 'Last' in the list of days, or + setting the parameter 'last_day_of_month` equal to ``True``. *MonthlyDay* - months_of_year (list): - Sets the months of the year during which the task runs. Should be a - list. ie: ``['January','July']``. Valid entries are the full names - of all the months. + The task will run monthly an the specified day. - weeks_of_month (list): - Sets the weeks of the month during which the task runs. Should be a - list. ie: ``['First','Third']``. Valid options are: + months_of_year (list): + Sets the months of the year during which the task runs. Should + be a list. ie: ``['January','July']``. Valid entries are the + full names of all the months. - - First - - Second - - Third - - Fourth + weeks_of_month (list): + Sets the weeks of the month during which the task runs. Should + be a list. ie: ``['First','Third']``. Valid options are: - last_week_of_month (bool): - Boolean value that indicates that the task runs on the last week of - the month. + - First + - Second + - Third + - Fourth - days_of_week (list): - Sets the days of the week during which the task runs. Should be a - list. ie: ``['Monday','Wednesday','Friday']``. Valid entries are - the names of the days of the week. + last_week_of_month (bool): + Boolean value that indicates that the task runs on the last week + of the month. + + days_of_week (list): + Sets the days of the week during which the task runs. Should be + a list. ie: ``['Monday','Wednesday','Friday']``. Valid entries + are the names of the days of the week. *OnIdle* - No special parameters required. + No special parameters required. *OnTaskCreation* - No special parameters required. + No special parameters required. *OnBoot* - No special parameters required. + No special parameters required. *OnLogon* - No special parameters required. + No special parameters required. *OnSessionChange* - session_user_name (str): - Sets the user for the Terminal Server session. When a session state - change is detected for this user, a task is started. To detect - session status change for any user, do not pass this parameter. + The task will be triggered by a session change. - state_change (str): - Sets the kind of Terminal Server session change that would trigger a - task launch. Valid options are: + session_user_name (str): + Sets the user for the Terminal Server session. When a session + state change is detected for this user, a task is started. To + detect session status change for any user, do not pass this + parameter. - - ConsoleConnect: When you connect to a user session (switch - users) - - ConsoleDisconnect: When you disconnect a user session (switch - users) - - RemoteConnect: When a user connects via Remote Desktop - - RemoteDisconnect: When a user disconnects via Remote Desktop - - SessionLock: When the workstation is locked - - SessionUnlock: When the workstation is unlocked + state_change (str): + Sets the kind of Terminal Server session change that would + trigger a task launch. Valid options are: - .. note:: + - ConsoleConnect: When you connect to a user session (switch + users) + - ConsoleDisconnect: When you disconnect a user session + (switch users) + - RemoteConnect: When a user connects via Remote Desktop + - RemoteDisconnect: When a user disconnects via Remote + Desktop + - SessionLock: When the workstation is locked + - SessionUnlock: When the workstation is unlocked + + .. note:: - Arguments are parsed by the YAML loader and are subject to - yaml's idiosyncrasies. Therefore, time values in some formats - (``%H:%M:%S`` and ``%H:%M``) should to be quoted. See - `YAML IDIOSYNCRASIES`_ for more details. + Arguments are parsed by the YAML loader and are subject to + yaml's idiosyncrasies. Therefore, time values in some + formats (``%H:%M:%S`` and ``%H:%M``) should to be quoted. + See `YAML IDIOSYNCRASIES`_ for more details. - .. _`YAML IDIOSYNCRASIES`: https://docs.saltstack.com/en/latest/topics/troubleshooting/yaml_idiosyncrasies.html#time-expressions + .. _`YAML IDIOSYNCRASIES`: https://docs.saltstack.com/en/latest/topics/troubleshooting/yaml_idiosyncrasies.html#time-expressions Returns: bool: ``True`` if successful, otherwise ``False`` From 58f0cd2b86c5a40c197714da7ce7753bf3c4d213 Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 15 Apr 2019 15:04:53 -0600 Subject: [PATCH 186/340] Fix docs... attempt 6 --- salt/modules/win_task.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/salt/modules/win_task.py b/salt/modules/win_task.py index 5116cde4bfa6..1619db3b9c46 100644 --- a/salt/modules/win_task.py +++ b/salt/modules/win_task.py @@ -1573,13 +1573,21 @@ def add_action(name=None, Send and email. Requires ``server``, ``from``, and ``to`` or ``cc``. from (str): The sender + reply_to (str): Who to reply to + to (str): The recipient + cc (str): The CC recipient + bcc (str): The BCC recipient + subject (str): The subject of the email + body (str): The Message Body of the email + server (str): The server used to send the email + attachments (list): A list of attachments. These will be the paths to the files to attach. ie: ``attachments="['C:\attachment1.txt', From 3442202bb413e2f095e5f3150f8c488139d5462c Mon Sep 17 00:00:00 2001 From: Ch3LL Date: Mon, 15 Apr 2019 17:32:57 -0400 Subject: [PATCH 187/340] Increase timeout for test_kwarg tests --- tests/integration/client/test_kwarg.py | 9 +++++++-- tests/integration/modules/test_state.py | 3 ++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/tests/integration/client/test_kwarg.py b/tests/integration/client/test_kwarg.py index bc190fa35cae..8c129530797d 100644 --- a/tests/integration/client/test_kwarg.py +++ b/tests/integration/client/test_kwarg.py @@ -73,6 +73,7 @@ def test_full_returns(self): 'minion', 'test.arg', ['foo', 'bar', 'baz'], + timeout=self.TIMEOUT, kwarg={'qux': 'quux'} ) data = ret['minion']['ret'] @@ -99,7 +100,9 @@ def test_kwarg_type(self): self.assertIn(six.text_type.__name__, data['kwargs']['inner']) def test_full_return_kwarg(self): - ret = self.client.cmd('minion', 'test.ping', full_return=True) + ret = self.client.cmd( + 'minion', 'test.ping', full_return=True, timeout=self.TIMEOUT, + ) for mid, data in ret.items(): self.assertIn('retcode', data) @@ -112,8 +115,10 @@ def test_cmd_arg_kwarg_parsing(self): ], kwarg={ 'quux': 'Quux', - }) + }, + timeout=self.TIMEOUT, + ) self.assertEqual(ret['minion'], { 'args': ['foo'], 'kwargs': { diff --git a/tests/integration/modules/test_state.py b/tests/integration/modules/test_state.py index 6e431065af55..06c80f9cf523 100644 --- a/tests/integration/modules/test_state.py +++ b/tests/integration/modules/test_state.py @@ -13,7 +13,7 @@ # Import Salt Testing libs from tests.support.case import ModuleCase -from tests.support.helpers import with_tempdir +from tests.support.helpers import with_tempdir, flaky from tests.support.unit import skipIf from tests.support.paths import BASE_FILES, TMP, TMP_PILLAR_TREE from tests.support.mixins import SaltReturnAssertsMixin @@ -1762,6 +1762,7 @@ def run_create(self): with salt.utils.files.fopen(testfile, 'a'): pass + @flaky def test_retry_option_eventual_success(self): ''' test a state with the retry option that should return True after at least 4 retry attmempt From a279d458439182dca29cc3e0ab2f0a859eec2b68 Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 15 Apr 2019 16:31:20 -0600 Subject: [PATCH 188/340] Final doc fixes --- salt/modules/win_task.py | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/salt/modules/win_task.py b/salt/modules/win_task.py index 1619db3b9c46..7e39a87c1824 100644 --- a/salt/modules/win_task.py +++ b/salt/modules/win_task.py @@ -365,10 +365,10 @@ def list_folders(location='\\'): Args: - location (str): - A string value representing the folder from which you want to list - tasks. Default is ``\`` which is the root for the task scheduler - (``C:\Windows\System32\tasks``). + location (str): + A string value representing the folder from which you want to list + tasks. Default is ``\`` which is the root for the task scheduler + (``C:\Windows\System32\tasks``). Returns: list: Returns a list of folders. @@ -408,9 +408,10 @@ def list_triggers(name, location='\\'): name (str): The name of the task for which list triggers. - location (str): A string value representing the location of the task - from which to list triggers. Default is ``\`` which is the root for - the task scheduler (``C:\Windows\System32\tasks``). + location (str): + A string value representing the location of the task from which to + list triggers. Default is ``\`` which is the root for the task + scheduler (``C:\Windows\System32\tasks``). Returns: list: Returns a list of triggers. @@ -675,7 +676,7 @@ def create_task_from_xml(name, except KeyError: failure_code = 'Unknown Failure: {0}'.format(error) - log.debug('Failed to create task: {0}'.format(failure_code)) + log.debug('Failed to create task: %s', failure_code) # Verify creation if name in list_tasks(location): @@ -1772,6 +1773,15 @@ def add_trigger(name=None, r''' Add a trigger to a Windows Scheduled task + .. note:: + + Arguments are parsed by the YAML loader and are subject to + yaml's idiosyncrasies. Therefore, time values in some + formats (``%H:%M:%S`` and ``%H:%M``) should to be quoted. + See `YAML IDIOSYNCRASIES`_ for more details. + + .. _`YAML IDIOSYNCRASIES`: https://docs.saltstack.com/en/latest/topics/troubleshooting/yaml_idiosyncrasies.html#time-expressions + Args: name (str): @@ -2058,15 +2068,6 @@ def add_trigger(name=None, - SessionLock: When the workstation is locked - SessionUnlock: When the workstation is unlocked - .. note:: - - Arguments are parsed by the YAML loader and are subject to - yaml's idiosyncrasies. Therefore, time values in some - formats (``%H:%M:%S`` and ``%H:%M``) should to be quoted. - See `YAML IDIOSYNCRASIES`_ for more details. - - .. _`YAML IDIOSYNCRASIES`: https://docs.saltstack.com/en/latest/topics/troubleshooting/yaml_idiosyncrasies.html#time-expressions - Returns: bool: ``True`` if successful, otherwise ``False`` @@ -2074,7 +2075,7 @@ def add_trigger(name=None, .. code-block:: bash - salt 'minion-id' task.add_trigger trigger_type=Once trigger_enabled=True start_date=2016/12/1 start_time=12:01 + salt 'minion-id' task.add_trigger trigger_type=Once trigger_enabled=True start_date=2016/12/1 start_time='"12:01"' ''' if not trigger_type: return 'Required parameter "trigger_type" not specified' From deb0b10bb41938c4c5cffb1a16805cc6e6cd37dd Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 15 Apr 2019 16:41:04 -0600 Subject: [PATCH 189/340] Fix some lint --- salt/modules/win_task.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/salt/modules/win_task.py b/salt/modules/win_task.py index 7e39a87c1824..cef1e73b4ebf 100644 --- a/salt/modules/win_task.py +++ b/salt/modules/win_task.py @@ -1960,7 +1960,7 @@ def add_trigger(name=None, produces a daily schedule. An interval of 2 produces an every-other day schedule. If no interval is specified, 1 is used. Valid entries are 1 - 999. - + *Weekly* The task will run weekly. @@ -2029,19 +2029,19 @@ def add_trigger(name=None, are the names of the days of the week. *OnIdle* - + No special parameters required. *OnTaskCreation* - + No special parameters required. *OnBoot* - + No special parameters required. *OnLogon* - + No special parameters required. *OnSessionChange* From 87cf385e645e99dda4260d4f59069cf19bed349c Mon Sep 17 00:00:00 2001 From: Ch3LL Date: Tue, 16 Apr 2019 09:58:47 -0400 Subject: [PATCH 190/340] increase timeout on test_state for windows --- tests/integration/modules/test_state.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/tests/integration/modules/test_state.py b/tests/integration/modules/test_state.py index 06c80f9cf523..cd1939405dd4 100644 --- a/tests/integration/modules/test_state.py +++ b/tests/integration/modules/test_state.py @@ -86,6 +86,7 @@ def _reline(path, ending=DEFAULT_ENDING): _reline(destpath) destpath = os.path.join(BASE_FILES, 'testappend', 'secondif') _reline(destpath) + cls.TIMEOUT = 600 if salt.utils.platform.is_windows() else 10 def test_show_highstate(self): ''' @@ -1478,7 +1479,9 @@ def test_multiple_onfail_requisite(self): https://github.com/saltstack/salt/issues/22370 ''' - state_run = self.run_function('state.sls', mods='requisites.onfail_multiple') + state_run = self.run_function('state.sls', + mods='requisites.onfail_multiple', + timeout=self.TIMEOUT) retcode = state_run['cmd_|-c_|-echo itworked_|-run']['changes']['retcode'] self.assertEqual(retcode, 0) @@ -1678,7 +1681,9 @@ def test_listen_requisite_resolution_names(self): ''' # Only run the state once and keep the return data - state_run = self.run_function('state.sls', mods='requisites.listen_names') + state_run = self.run_function('state.sls', + mods='requisites.listen_names', + timeout=self.TIMEOUT) self.assertIn('test_|-listener_service_|-nginx_|-mod_watch', state_run) self.assertIn('test_|-listener_service_|-crond_|-mod_watch', state_run) From fd19cca2f6c0105996b651e2488b549aeb0a48b0 Mon Sep 17 00:00:00 2001 From: twangboy Date: Tue, 16 Apr 2019 11:10:58 -0600 Subject: [PATCH 191/340] Remove some warts in the docs --- salt/modules/win_task.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/modules/win_task.py b/salt/modules/win_task.py index cef1e73b4ebf..4f7e7a5c5d93 100644 --- a/salt/modules/win_task.py +++ b/salt/modules/win_task.py @@ -598,11 +598,11 @@ def create_task_from_xml(name, xml_text (str): A string of xml representing the task to be created. This will be - overridden by `xml_path` if passed. + overridden by ``xml_path`` if passed. xml_path (str): The path to an XML file on the local system containing the xml that - defines the task. This will override `xml_text` + defines the task. This will override ``xml_text`` user_name (str): The user account under which to run the task. To specify the @@ -1558,7 +1558,7 @@ def add_action(name=None, (optional) Arguments to be passed to the command or executable. To launch a script the first command will need to be the interpreter for the script. For example, to run a vbscript you - would pass ``cscript.exe`` in the `cmd` parameter and pass the + would pass ``cscript.exe`` in the ``cmd`` parameter and pass the script in the ``arguments`` parameter as follows: - ``cmd='cscript.exe' arguments='c:\scripts\myscript.vbs'`` @@ -1999,7 +1999,7 @@ def add_trigger(name=None, You can set the task to run on the last day of the month by either including the word 'Last' in the list of days, or - setting the parameter 'last_day_of_month` equal to ``True``. + setting the parameter 'last_day_of_month' equal to ``True``. *MonthlyDay* From 880375c4fcbcccd89f59e0e370b0c5949c93c4d9 Mon Sep 17 00:00:00 2001 From: David Murphy < dmurphy@saltstack.com> Date: Fri, 12 Apr 2019 17:35:55 -0600 Subject: [PATCH 192/340] Initial updated ssh tests for AIX support --- tests/integration/__init__.py | 5 +++-- tests/integration/doc/test_man.py | 1 + tests/integration/ssh/test_grains.py | 2 ++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index eb246534cef2..af4d4b8bffbe 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -104,15 +104,16 @@ def get_unused_localhost_port(): DARWIN = True if sys.platform.startswith('darwin') else False BSD = True if 'bsd' in sys.platform else False + AIX = True if sys.platform.startswith('aix') else False - if DARWIN and port in _RUNTESTS_PORTS: + if (AIX or DARWIN) and port in _RUNTESTS_PORTS: port = get_unused_localhost_port() usock.close() return port _RUNTESTS_PORTS[port] = usock - if DARWIN or BSD: + if DARWIN or BSD or AIX: usock.close() return port diff --git a/tests/integration/doc/test_man.py b/tests/integration/doc/test_man.py index 6e2bb15ac68c..13773829a4f5 100644 --- a/tests/integration/doc/test_man.py +++ b/tests/integration/doc/test_man.py @@ -17,6 +17,7 @@ @skipIf(salt.utils.platform.is_windows(), 'minion is windows') +@skipIf(salt.utils.platform.is_aix(), 'minion is AIX') class ManTest(ModuleCase): rootdir = os.path.join(TMP, 'mantest') # Map filenames to search strings which should be in the manpage diff --git a/tests/integration/ssh/test_grains.py b/tests/integration/ssh/test_grains.py index 841537b4c5c2..ddfb1a2196b1 100644 --- a/tests/integration/ssh/test_grains.py +++ b/tests/integration/ssh/test_grains.py @@ -24,5 +24,7 @@ def test_grains_items(self): grain = 'Linux' if salt.utils.platform.is_darwin(): grain = 'Darwin' + if salt.utils.platform.is_aix(): + grain = 'AIX' self.assertEqual(ret['kernel'], grain) self.assertTrue(isinstance(ret, dict)) From bb4fa5ad95ca915630e2a9613960a6f30c1ff7b9 Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Tue, 16 Apr 2019 15:49:28 +0300 Subject: [PATCH 193/340] Revert "Minor: Fix typo in docstring" This reverts commit 37aeba314330a5cefdf9ca1d5ce069bc790e692f. --- salt/transport/ipc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index 5899e08650a9..f90be4a20daa 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -566,7 +566,7 @@ class IPCMessageSubscriberService(IPCClient): of IPCMessageSubscriber instances feeding all of them with data. It closes automatically when there are no more subscribers. - To use this refer to IPCMessageSubscriber documentation. + To use this rever to IPCMessageSubscriber documentation. ''' def __init__(self, socket_path, io_loop=None): super(IPCMessageSubscriberService, self).__init__( From 7768ba22a7dca03cb1e7d2c12a36e3a737a5a6d9 Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Tue, 16 Apr 2019 15:50:50 +0300 Subject: [PATCH 194/340] Revert "Update doc conf with the new import `tornado.queues`" This reverts commit 684bf584f68bef5d1965e81494dfbd00f5c46542. --- doc/conf.py | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/conf.py b/doc/conf.py index 7b2e3b444646..4814536bf60a 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -131,7 +131,6 @@ def inner(fn, *iargs, **ikwargs): # pylint: disable=unused-argument 'tornado.ioloop', 'tornado.iostream', 'tornado.netutil', - 'tornado.queues', 'tornado.simple_httpclient', 'tornado.stack_context', 'tornado.web', From 9cca4022f44c65ffd8c1cd025a0d29ea33b95ca1 Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Tue, 16 Apr 2019 15:51:49 +0300 Subject: [PATCH 195/340] Revert "Support parallel work of multiple IPCMEssageSubscribers in one process" This reverts commit 710ab50624b16012d54485beeff151ff5940846a. --- salt/transport/ipc.py | 281 +++++++++++++++++++----------------------- 1 file changed, 128 insertions(+), 153 deletions(-) diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index f90be4a20daa..e4b26ac68f1c 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -20,8 +20,7 @@ import tornado.gen import tornado.netutil import tornado.concurrent -import tornado.queues -from tornado.locks import Lock +from tornado.locks import Semaphore from tornado.ioloop import IOLoop, TimeoutError as TornadoTimeoutError from tornado.iostream import IOStream # Import Salt libs @@ -560,121 +559,11 @@ def __del__(self): self.close() -class IPCMessageSubscriberService(IPCClient): - ''' - IPC message subscriber service that is a standalone singleton class starting once for a number - of IPCMessageSubscriber instances feeding all of them with data. It closes automatically when - there are no more subscribers. - - To use this rever to IPCMessageSubscriber documentation. - ''' - def __init__(self, socket_path, io_loop=None): - super(IPCMessageSubscriberService, self).__init__( - socket_path, io_loop=io_loop) - self.saved_data = [] - self._read_in_progress = Lock() - self.handlers = weakref.WeakSet() - self.read_stream_future = None - - def _subscribe(self, handler): - self.handlers.add(handler) - - def unsubscribe(self, handler): - self.handlers.discard(handler) - - def _has_subscribers(self): - return bool(self.handlers) - - def _feed_subscribers(self, data): - for subscriber in self.handlers: - subscriber._feed(data) - - @tornado.gen.coroutine - def _read(self, timeout, callback=None): - try: - yield self._read_in_progress.acquire(timeout=0) - except tornado.gen.TimeoutError: - raise tornado.gen.Return(None) - - log.debug('IPC Subscriber Service is starting reading') - # If timeout is not specified we need to set some here to make the service able to check - # is there any handler waiting for data. - if timeout is None: - timeout = 5 - - self.read_stream_future = None - while self._has_subscribers(): - if self.read_stream_future is None: - self.read_stream_future = self.stream.read_bytes(4096, partial=True) - - try: - wire_bytes = yield FutureWithTimeout(self.io_loop, - self.read_stream_future, - timeout) - self.read_stream_future = None - - self.unpacker.feed(wire_bytes) - msgs = [msg['body'] for msg in self.unpacker] - self._feed_subscribers(msgs) - except TornadoTimeoutError: - # Continue checking are there alive waiting handlers - # Keep 'read_stream_future' alive to wait it more in the next loop - continue - except tornado.iostream.StreamClosedError as exc: - log.trace('Subscriber disconnected from IPC %s', self.socket_path) - self._feed_subscribers([None]) - break - except Exception as exc: - log.error('Exception occurred in Subscriber while handling stream: %s', exc) - exc = IPCExceptionProxy(sys.exc_info()) - self._feed_subscribers([exc]) - break - - log.debug('IPC Subscriber Service is stopping due to a lack of subscribers') - self._read_in_progress.release() - raise tornado.gen.Return(None) - - @tornado.gen.coroutine - def read(self, handler, timeout=None): - ''' - Asynchronously read messages and invoke a callback when they are ready. - - :param callback: A callback with the received data - ''' - self._subscribe(handler) - while not self.connected(): - try: - yield self.connect(timeout=5) - except tornado.iostream.StreamClosedError: - log.trace('Subscriber closed stream on IPC %s before connect', self.socket_path) - yield tornado.gen.sleep(1) - except Exception as exc: - log.error('Exception occurred while Subscriber connecting: %s', exc) - yield tornado.gen.sleep(1) - yield self._read(timeout) - - def close(self): - ''' - Routines to handle any cleanup before the instance shuts down. - Sockets and filehandles should be closed explicitly, to prevent - leaks. - ''' - super(IPCMessageSubscriberService, self).close() - if self.read_stream_future is not None and self.read_stream_future.done(): - exc = self.read_stream_future.exception() - if exc and not isinstance(exc, tornado.iostream.StreamClosedError): - log.error("Read future returned exception %r", exc) - - def __del__(self): - if IPCMessageSubscriberService in globals(): - self.close() - - -class IPCMessageSubscriber(object): +class IPCMessageSubscriber(IPCClient): ''' Salt IPC message subscriber - Create or reuse an IPC client to receive messages from IPC publisher + Create an IPC client to receive messages from IPC publisher An example of a very simple IPCMessageSubscriber connecting to an IPCMessagePublisher. This example assumes an already running IPCMessagePublisher. @@ -703,61 +592,147 @@ class IPCMessageSubscriber(object): # Wait for some data package = ipc_subscriber.read_sync() ''' - def __init__(self, socket_path, io_loop=None): - self.service = IPCMessageSubscriberService(socket_path, io_loop) - self.queue = tornado.queues.Queue() - - def connected(self): - return self.service.connected() - - def connect(self, callback=None, timeout=None): - return self.service.connect(callback=callback, timeout=timeout) + def __singleton_init__(self, socket_path, io_loop=None): + super(IPCMessageSubscriber, self).__singleton_init__( + socket_path, io_loop=io_loop) + self._read_sync_future = None + self._read_stream_future = None + self._sync_ioloop_running = False + self.saved_data = [] + self._sync_read_in_progress = Semaphore() @tornado.gen.coroutine - def _feed(self, msgs): - for msg in msgs: - yield self.queue.put(msg) + def _read_sync(self, timeout): + yield self._sync_read_in_progress.acquire() + exc_to_raise = None + ret = None - @tornado.gen.coroutine - def read_async(self, callback, timeout=None): - ''' - Asynchronously read messages and invoke a callback when they are ready. + try: + while True: + if self._read_stream_future is None: + self._read_stream_future = self.stream.read_bytes(4096, partial=True) - :param callback: A callback with the received data - ''' - self.service.read(self) - while True: - try: - if timeout is not None: - deadline = time.time() + timeout + if timeout is None: + wire_bytes = yield self._read_stream_future else: - deadline = None - data = yield self.queue.get(timeout=deadline) - except tornado.gen.TimeoutError: - raise tornado.gen.Return(None) - if data is None: - break - elif isinstance(data, IPCExceptionProxy): - six.reraise(*data.orig_info) - elif callback: - self.service.io_loop.spawn_callback(callback, data) - else: - raise tornado.gen.Return(data) + future_with_timeout = FutureWithTimeout( + self.io_loop, self._read_stream_future, timeout) + wire_bytes = yield future_with_timeout + + self._read_stream_future = None + + # Remove the timeout once we get some data or an exception + # occurs. We will assume that the rest of the data is already + # there or is coming soon if an exception doesn't occur. + timeout = None + + self.unpacker.feed(wire_bytes) + first = True + for framed_msg in self.unpacker: + if first: + ret = framed_msg['body'] + first = False + else: + self.saved_data.append(framed_msg['body']) + if not first: + # We read at least one piece of data + break + except TornadoTimeoutError: + # In the timeout case, just return None. + # Keep 'self._read_stream_future' alive. + ret = None + except tornado.iostream.StreamClosedError as exc: + log.trace('Subscriber disconnected from IPC %s', self.socket_path) + self._read_stream_future = None + exc_to_raise = exc + except Exception as exc: + log.error('Exception occurred in Subscriber while handling stream: %s', exc) + self._read_stream_future = None + exc_to_raise = exc + + if self._sync_ioloop_running: + # Stop the IO Loop so that self.io_loop.start() will return in + # read_sync(). + self.io_loop.spawn_callback(self.io_loop.stop) + + if exc_to_raise is not None: + raise exc_to_raise # pylint: disable=E0702 + self._sync_read_in_progress.release() + raise tornado.gen.Return(ret) def read_sync(self, timeout=None): ''' Read a message from an IPC socket + The socket must already be connected. The associated IO Loop must NOT be running. :param int timeout: Timeout when receiving message :return: message data if successful. None if timed out. Will raise an exception for all other error conditions. ''' - return self.service.io_loop.run_sync(lambda: self.read_async(None, timeout)) + if self.saved_data: + return self.saved_data.pop(0) + + self._sync_ioloop_running = True + self._read_sync_future = self._read_sync(timeout) + self.io_loop.start() + self._sync_ioloop_running = False + + ret_future = self._read_sync_future + self._read_sync_future = None + return ret_future.result() + + @tornado.gen.coroutine + def _read_async(self, callback): + while not self.stream.closed(): + try: + self._read_stream_future = self.stream.read_bytes(4096, partial=True) + wire_bytes = yield self._read_stream_future + self._read_stream_future = None + self.unpacker.feed(wire_bytes) + for framed_msg in self.unpacker: + body = framed_msg['body'] + self.io_loop.spawn_callback(callback, body) + except tornado.iostream.StreamClosedError: + log.trace('Subscriber disconnected from IPC %s', self.socket_path) + break + except Exception as exc: + log.error('Exception occurred while Subscriber handling stream: %s', exc) + + @tornado.gen.coroutine + def read_async(self, callback): + ''' + Asynchronously read messages and invoke a callback when they are ready. + + :param callback: A callback with the received data + ''' + while not self.connected(): + try: + yield self.connect(timeout=5) + except tornado.iostream.StreamClosedError: + log.trace('Subscriber closed stream on IPC %s before connect', self.socket_path) + yield tornado.gen.sleep(1) + except Exception as exc: + log.error('Exception occurred while Subscriber connecting: %s', exc) + yield tornado.gen.sleep(1) + yield self._read_async(callback) def close(self): - self.service.unsubscribe(self) - self.service.close() + ''' + Routines to handle any cleanup before the instance shuts down. + Sockets and filehandles should be closed explicitly, to prevent + leaks. + ''' + if not self._closing: + IPCClient.close(self) + # This will prevent this message from showing up: + # '[ERROR ] Future exception was never retrieved: + # StreamClosedError' + if self._read_sync_future is not None and self._read_sync_future.done(): + self._read_sync_future.exception() + if self._read_stream_future is not None and self._read_stream_future.done(): + self._read_stream_future.exception() def __del__(self): - self.close() + if IPCMessageSubscriber in globals(): + self.close() From 82071753ca261d64ea6b60b2c2510e9eef00ab66 Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Tue, 16 Apr 2019 19:23:43 +0300 Subject: [PATCH 196/340] Drop singleton from IPCClient --- salt/transport/ipc.py | 109 +++++++++++++++--------------------------- 1 file changed, 38 insertions(+), 71 deletions(-) diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index e4b26ac68f1c..145d5c639bae 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -8,9 +8,7 @@ import errno import logging import socket -import weakref import time -import sys # Import 3rd-party libs import msgpack @@ -20,7 +18,7 @@ import tornado.gen import tornado.netutil import tornado.concurrent -from tornado.locks import Semaphore +from tornado.locks import Lock from tornado.ioloop import IOLoop, TimeoutError as TornadoTimeoutError from tornado.iostream import IOStream # Import Salt libs @@ -84,11 +82,6 @@ def _done_callback(self, future): self.set_exception(exc) -class IPCExceptionProxy(object): - def __init__(self, orig_info): - self.orig_info = orig_info - - class IPCServer(object): ''' A Tornado IPC server very similar to Tornado's TCPServer class @@ -592,21 +585,23 @@ class IPCMessageSubscriber(IPCClient): # Wait for some data package = ipc_subscriber.read_sync() ''' - def __singleton_init__(self, socket_path, io_loop=None): - super(IPCMessageSubscriber, self).__singleton_init__( + def __init__(self, socket_path, io_loop=None): + super(IPCMessageSubscriber, self).__init__( socket_path, io_loop=io_loop) - self._read_sync_future = None self._read_stream_future = None - self._sync_ioloop_running = False - self.saved_data = [] - self._sync_read_in_progress = Semaphore() + self._saved_data = [] + self._read_in_progress = Lock() @tornado.gen.coroutine - def _read_sync(self, timeout): - yield self._sync_read_in_progress.acquire() + def _read(self, timeout, callback=None): + try: + yield self._read_in_progress.acquire(timeout=0) + except tornado.gen.TimeoutError: + raise tornado.gen.Return(None) + + log.debug('IPC Subscriber is starting reading') exc_to_raise = None ret = None - try: while True: if self._read_stream_future is None: @@ -615,10 +610,9 @@ def _read_sync(self, timeout): if timeout is None: wire_bytes = yield self._read_stream_future else: - future_with_timeout = FutureWithTimeout( - self.io_loop, self._read_stream_future, timeout) - wire_bytes = yield future_with_timeout - + wire_bytes = yield FutureWithTimeout(self.io_loop, + self._read_stream_future, + timeout) self._read_stream_future = None # Remove the timeout once we get some data or an exception @@ -627,15 +621,17 @@ def _read_sync(self, timeout): timeout = None self.unpacker.feed(wire_bytes) - first = True + first_sync_msg = True for framed_msg in self.unpacker: - if first: + if callback: + self.io_loop.spawn_callback(callback, framed_msg['body']) + elif first_sync_msg: ret = framed_msg['body'] - first = False + first_sync_msg = False else: - self.saved_data.append(framed_msg['body']) - if not first: - # We read at least one piece of data + self._saved_data.append(framed_msg['body']) + if not first_sync_msg: + # We read at least one piece of data and we're on sync run break except TornadoTimeoutError: # In the timeout case, just return None. @@ -650,14 +646,9 @@ def _read_sync(self, timeout): self._read_stream_future = None exc_to_raise = exc - if self._sync_ioloop_running: - # Stop the IO Loop so that self.io_loop.start() will return in - # read_sync(). - self.io_loop.spawn_callback(self.io_loop.stop) - if exc_to_raise is not None: raise exc_to_raise # pylint: disable=E0702 - self._sync_read_in_progress.release() + self._read_in_progress.release() raise tornado.gen.Return(ret) def read_sync(self, timeout=None): @@ -670,34 +661,9 @@ def read_sync(self, timeout=None): :return: message data if successful. None if timed out. Will raise an exception for all other error conditions. ''' - if self.saved_data: - return self.saved_data.pop(0) - - self._sync_ioloop_running = True - self._read_sync_future = self._read_sync(timeout) - self.io_loop.start() - self._sync_ioloop_running = False - - ret_future = self._read_sync_future - self._read_sync_future = None - return ret_future.result() - - @tornado.gen.coroutine - def _read_async(self, callback): - while not self.stream.closed(): - try: - self._read_stream_future = self.stream.read_bytes(4096, partial=True) - wire_bytes = yield self._read_stream_future - self._read_stream_future = None - self.unpacker.feed(wire_bytes) - for framed_msg in self.unpacker: - body = framed_msg['body'] - self.io_loop.spawn_callback(callback, body) - except tornado.iostream.StreamClosedError: - log.trace('Subscriber disconnected from IPC %s', self.socket_path) - break - except Exception as exc: - log.error('Exception occurred while Subscriber handling stream: %s', exc) + if self._saved_data: + return self._saved_data.pop(0) + return self.io_loop.run_sync(lambda: self._read(timeout)) @tornado.gen.coroutine def read_async(self, callback): @@ -715,7 +681,7 @@ def read_async(self, callback): except Exception as exc: log.error('Exception occurred while Subscriber connecting: %s', exc) yield tornado.gen.sleep(1) - yield self._read_async(callback) + yield self._read(None, callback) def close(self): ''' @@ -723,15 +689,16 @@ def close(self): Sockets and filehandles should be closed explicitly, to prevent leaks. ''' - if not self._closing: - IPCClient.close(self) - # This will prevent this message from showing up: - # '[ERROR ] Future exception was never retrieved: - # StreamClosedError' - if self._read_sync_future is not None and self._read_sync_future.done(): - self._read_sync_future.exception() - if self._read_stream_future is not None and self._read_stream_future.done(): - self._read_stream_future.exception() + if self._closing: + return + super(IPCMessageSubscriber, self).close() + # This will prevent this message from showing up: + # '[ERROR ] Future exception was never retrieved: + # StreamClosedError' + if self._read_stream_future is not None and self._read_stream_future.done(): + exc = self._read_stream_future.exception() + if exc and not isinstance(exc, tornado.iostream.StreamClosedError): + log.error("Read future returned exception %r", exc) def __del__(self): if IPCMessageSubscriber in globals(): From 09afb6e7f656d6819165c619332e71ef6948c107 Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Wed, 17 Apr 2019 00:47:39 +0300 Subject: [PATCH 197/340] A couple of race conditions fixes and a test update. --- salt/transport/ipc.py | 17 +++++++++-------- tests/unit/transport/test_ipc.py | 13 +++++++++++++ 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index 145d5c639bae..9c92fe2cb641 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -20,7 +20,7 @@ import tornado.concurrent from tornado.locks import Lock from tornado.ioloop import IOLoop, TimeoutError as TornadoTimeoutError -from tornado.iostream import IOStream +from tornado.iostream import IOStream, StreamClosedError # Import Salt libs import salt.transport.client import salt.transport.frame @@ -176,7 +176,7 @@ def return_message(msg): for framed_msg in unpacker: body = framed_msg['body'] self.io_loop.spawn_callback(self.payload_handler, body, write_callback(stream, framed_msg['head'])) - except tornado.iostream.StreamClosedError: + except StreamClosedError: log.trace('Client disconnected from IPC %s', self.socket_path) break except socket.error as exc: @@ -491,7 +491,7 @@ def start(self): def _write(self, stream, pack): try: yield stream.write(pack) - except tornado.iostream.StreamClosedError: + except StreamClosedError: log.trace('Client disconnected from IPC %s', self.socket_path) self.streams.discard(stream) except Exception as exc: @@ -595,7 +595,7 @@ def __init__(self, socket_path, io_loop=None): @tornado.gen.coroutine def _read(self, timeout, callback=None): try: - yield self._read_in_progress.acquire(timeout=0) + yield self._read_in_progress.acquire(timeout=0.00000001) except tornado.gen.TimeoutError: raise tornado.gen.Return(None) @@ -637,7 +637,7 @@ def _read(self, timeout, callback=None): # In the timeout case, just return None. # Keep 'self._read_stream_future' alive. ret = None - except tornado.iostream.StreamClosedError as exc: + except StreamClosedError as exc: log.trace('Subscriber disconnected from IPC %s', self.socket_path) self._read_stream_future = None exc_to_raise = exc @@ -646,9 +646,10 @@ def _read(self, timeout, callback=None): self._read_stream_future = None exc_to_raise = exc + self._read_in_progress.release() + if exc_to_raise is not None: raise exc_to_raise # pylint: disable=E0702 - self._read_in_progress.release() raise tornado.gen.Return(ret) def read_sync(self, timeout=None): @@ -675,7 +676,7 @@ def read_async(self, callback): while not self.connected(): try: yield self.connect(timeout=5) - except tornado.iostream.StreamClosedError: + except StreamClosedError: log.trace('Subscriber closed stream on IPC %s before connect', self.socket_path) yield tornado.gen.sleep(1) except Exception as exc: @@ -697,7 +698,7 @@ def close(self): # StreamClosedError' if self._read_stream_future is not None and self._read_stream_future.done(): exc = self._read_stream_future.exception() - if exc and not isinstance(exc, tornado.iostream.StreamClosedError): + if exc and not isinstance(exc, StreamClosedError): log.error("Read future returned exception %r", exc) def __del__(self): diff --git a/tests/unit/transport/test_ipc.py b/tests/unit/transport/test_ipc.py index 939c4958318d..2c3191076b85 100644 --- a/tests/unit/transport/test_ipc.py +++ b/tests/unit/transport/test_ipc.py @@ -240,3 +240,16 @@ def handler(raw): self.assertEqual(len(call_cnt), 2) self.assertEqual(call_cnt[0], 'TEST') self.assertEqual(call_cnt[1], 'TEST') + + def test_sync_reading(self): + # To be completely fair let's create 2 clients. + client1 = self.sub_channel + client2 = self._get_sub_channel() + call_cnt = [] + + # Now let both waiting data at once + self.pub_channel.publish('TEST') + ret1 = client1.read_sync() + ret2 = client2.read_sync() + self.assertEqual(ret1, 'TEST') + self.assertEqual(ret2, 'TEST') From 69e9416612a2f6185962e0dd9af9fab67beac60f Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Tue, 16 Apr 2019 15:49:28 +0300 Subject: [PATCH 198/340] Revert "Minor: Fix typo in docstring" This reverts commit 37aeba314330a5cefdf9ca1d5ce069bc790e692f. --- salt/transport/ipc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index 12d58fdf374c..a853b258f4d3 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -581,7 +581,7 @@ class IPCMessageSubscriberService(IPCClient): of IPCMessageSubscriber instances feeding all of them with data. It closes automatically when there are no more subscribers. - To use this refer to IPCMessageSubscriber documentation. + To use this rever to IPCMessageSubscriber documentation. ''' def __init__(self, socket_path, io_loop=None): super(IPCMessageSubscriberService, self).__init__( From 7f26e764fce51eafee1971274e6c693bc5de87ee Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Tue, 16 Apr 2019 15:50:50 +0300 Subject: [PATCH 199/340] Revert "Update doc conf with the new import `tornado.queues`" This reverts commit 684bf584f68bef5d1965e81494dfbd00f5c46542. --- doc/conf.py | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/conf.py b/doc/conf.py index 1f79530f24e5..0f31c080b472 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -132,7 +132,6 @@ def inner(fn, *iargs, **ikwargs): # pylint: disable=unused-argument 'tornado.ioloop', 'tornado.iostream', 'tornado.netutil', - 'tornado.queues', 'tornado.simple_httpclient', 'tornado.stack_context', 'tornado.web', From 7cf6d549f2233b40ea3d8adf082c9f3970e5c932 Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Tue, 16 Apr 2019 15:51:49 +0300 Subject: [PATCH 200/340] Revert "Support parallel work of multiple IPCMEssageSubscribers in one process" This reverts commit 710ab50624b16012d54485beeff151ff5940846a. --- salt/transport/ipc.py | 281 +++++++++++++++++++----------------------- 1 file changed, 128 insertions(+), 153 deletions(-) diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index a853b258f4d3..33f5e58e7f1d 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -20,8 +20,7 @@ import tornado.gen import tornado.netutil import tornado.concurrent -import tornado.queues -from tornado.locks import Lock +from tornado.locks import Semaphore from tornado.ioloop import IOLoop, TimeoutError as TornadoTimeoutError from tornado.iostream import IOStream # Import Salt libs @@ -575,121 +574,11 @@ def __del__(self): pass -class IPCMessageSubscriberService(IPCClient): - ''' - IPC message subscriber service that is a standalone singleton class starting once for a number - of IPCMessageSubscriber instances feeding all of them with data. It closes automatically when - there are no more subscribers. - - To use this rever to IPCMessageSubscriber documentation. - ''' - def __init__(self, socket_path, io_loop=None): - super(IPCMessageSubscriberService, self).__init__( - socket_path, io_loop=io_loop) - self.saved_data = [] - self._read_in_progress = Lock() - self.handlers = weakref.WeakSet() - self.read_stream_future = None - - def _subscribe(self, handler): - self.handlers.add(handler) - - def unsubscribe(self, handler): - self.handlers.discard(handler) - - def _has_subscribers(self): - return bool(self.handlers) - - def _feed_subscribers(self, data): - for subscriber in self.handlers: - subscriber._feed(data) - - @tornado.gen.coroutine - def _read(self, timeout, callback=None): - try: - yield self._read_in_progress.acquire(timeout=0) - except tornado.gen.TimeoutError: - raise tornado.gen.Return(None) - - log.debug('IPC Subscriber Service is starting reading') - # If timeout is not specified we need to set some here to make the service able to check - # is there any handler waiting for data. - if timeout is None: - timeout = 5 - - self.read_stream_future = None - while self._has_subscribers(): - if self.read_stream_future is None: - self.read_stream_future = self.stream.read_bytes(4096, partial=True) - - try: - wire_bytes = yield FutureWithTimeout(self.io_loop, - self.read_stream_future, - timeout) - self.read_stream_future = None - - self.unpacker.feed(wire_bytes) - msgs = [msg['body'] for msg in self.unpacker] - self._feed_subscribers(msgs) - except TornadoTimeoutError: - # Continue checking are there alive waiting handlers - # Keep 'read_stream_future' alive to wait it more in the next loop - continue - except tornado.iostream.StreamClosedError as exc: - log.trace('Subscriber disconnected from IPC %s', self.socket_path) - self._feed_subscribers([None]) - break - except Exception as exc: - log.error('Exception occurred in Subscriber while handling stream: %s', exc) - exc = IPCExceptionProxy(sys.exc_info()) - self._feed_subscribers([exc]) - break - - log.debug('IPC Subscriber Service is stopping due to a lack of subscribers') - self._read_in_progress.release() - raise tornado.gen.Return(None) - - @tornado.gen.coroutine - def read(self, handler, timeout=None): - ''' - Asynchronously read messages and invoke a callback when they are ready. - - :param callback: A callback with the received data - ''' - self._subscribe(handler) - while not self.connected(): - try: - yield self.connect(timeout=5) - except tornado.iostream.StreamClosedError: - log.trace('Subscriber closed stream on IPC %s before connect', self.socket_path) - yield tornado.gen.sleep(1) - except Exception as exc: - log.error('Exception occurred while Subscriber connecting: %s', exc) - yield tornado.gen.sleep(1) - yield self._read(timeout) - - def close(self): - ''' - Routines to handle any cleanup before the instance shuts down. - Sockets and filehandles should be closed explicitly, to prevent - leaks. - ''' - super(IPCMessageSubscriberService, self).close() - if self.read_stream_future is not None and self.read_stream_future.done(): - exc = self.read_stream_future.exception() - if exc and not isinstance(exc, tornado.iostream.StreamClosedError): - log.error("Read future returned exception %r", exc) - - def __del__(self): - if IPCMessageSubscriberService in globals(): - self.close() - - -class IPCMessageSubscriber(object): +class IPCMessageSubscriber(IPCClient): ''' Salt IPC message subscriber - Create or reuse an IPC client to receive messages from IPC publisher + Create an IPC client to receive messages from IPC publisher An example of a very simple IPCMessageSubscriber connecting to an IPCMessagePublisher. This example assumes an already running IPCMessagePublisher. @@ -718,61 +607,147 @@ class IPCMessageSubscriber(object): # Wait for some data package = ipc_subscriber.read_sync() ''' - def __init__(self, socket_path, io_loop=None): - self.service = IPCMessageSubscriberService(socket_path, io_loop) - self.queue = tornado.queues.Queue() - - def connected(self): - return self.service.connected() - - def connect(self, callback=None, timeout=None): - return self.service.connect(callback=callback, timeout=timeout) + def __singleton_init__(self, socket_path, io_loop=None): + super(IPCMessageSubscriber, self).__singleton_init__( + socket_path, io_loop=io_loop) + self._read_sync_future = None + self._read_stream_future = None + self._sync_ioloop_running = False + self.saved_data = [] + self._sync_read_in_progress = Semaphore() @tornado.gen.coroutine - def _feed(self, msgs): - for msg in msgs: - yield self.queue.put(msg) + def _read_sync(self, timeout): + yield self._sync_read_in_progress.acquire() + exc_to_raise = None + ret = None - @tornado.gen.coroutine - def read_async(self, callback, timeout=None): - ''' - Asynchronously read messages and invoke a callback when they are ready. + try: + while True: + if self._read_stream_future is None: + self._read_stream_future = self.stream.read_bytes(4096, partial=True) - :param callback: A callback with the received data - ''' - self.service.read(self) - while True: - try: - if timeout is not None: - deadline = time.time() + timeout + if timeout is None: + wire_bytes = yield self._read_stream_future else: - deadline = None - data = yield self.queue.get(timeout=deadline) - except tornado.gen.TimeoutError: - raise tornado.gen.Return(None) - if data is None: - break - elif isinstance(data, IPCExceptionProxy): - six.reraise(*data.orig_info) - elif callback: - self.service.io_loop.spawn_callback(callback, data) - else: - raise tornado.gen.Return(data) + future_with_timeout = FutureWithTimeout( + self.io_loop, self._read_stream_future, timeout) + wire_bytes = yield future_with_timeout + + self._read_stream_future = None + + # Remove the timeout once we get some data or an exception + # occurs. We will assume that the rest of the data is already + # there or is coming soon if an exception doesn't occur. + timeout = None + + self.unpacker.feed(wire_bytes) + first = True + for framed_msg in self.unpacker: + if first: + ret = framed_msg['body'] + first = False + else: + self.saved_data.append(framed_msg['body']) + if not first: + # We read at least one piece of data + break + except TornadoTimeoutError: + # In the timeout case, just return None. + # Keep 'self._read_stream_future' alive. + ret = None + except tornado.iostream.StreamClosedError as exc: + log.trace('Subscriber disconnected from IPC %s', self.socket_path) + self._read_stream_future = None + exc_to_raise = exc + except Exception as exc: + log.error('Exception occurred in Subscriber while handling stream: %s', exc) + self._read_stream_future = None + exc_to_raise = exc + + if self._sync_ioloop_running: + # Stop the IO Loop so that self.io_loop.start() will return in + # read_sync(). + self.io_loop.spawn_callback(self.io_loop.stop) + + if exc_to_raise is not None: + raise exc_to_raise # pylint: disable=E0702 + self._sync_read_in_progress.release() + raise tornado.gen.Return(ret) def read_sync(self, timeout=None): ''' Read a message from an IPC socket + The socket must already be connected. The associated IO Loop must NOT be running. :param int timeout: Timeout when receiving message :return: message data if successful. None if timed out. Will raise an exception for all other error conditions. ''' - return self.service.io_loop.run_sync(lambda: self.read_async(None, timeout)) + if self.saved_data: + return self.saved_data.pop(0) + + self._sync_ioloop_running = True + self._read_sync_future = self._read_sync(timeout) + self.io_loop.start() + self._sync_ioloop_running = False + + ret_future = self._read_sync_future + self._read_sync_future = None + return ret_future.result() + + @tornado.gen.coroutine + def _read_async(self, callback): + while not self.stream.closed(): + try: + self._read_stream_future = self.stream.read_bytes(4096, partial=True) + wire_bytes = yield self._read_stream_future + self._read_stream_future = None + self.unpacker.feed(wire_bytes) + for framed_msg in self.unpacker: + body = framed_msg['body'] + self.io_loop.spawn_callback(callback, body) + except tornado.iostream.StreamClosedError: + log.trace('Subscriber disconnected from IPC %s', self.socket_path) + break + except Exception as exc: + log.error('Exception occurred while Subscriber handling stream: %s', exc) + + @tornado.gen.coroutine + def read_async(self, callback): + ''' + Asynchronously read messages and invoke a callback when they are ready. + + :param callback: A callback with the received data + ''' + while not self.connected(): + try: + yield self.connect(timeout=5) + except tornado.iostream.StreamClosedError: + log.trace('Subscriber closed stream on IPC %s before connect', self.socket_path) + yield tornado.gen.sleep(1) + except Exception as exc: + log.error('Exception occurred while Subscriber connecting: %s', exc) + yield tornado.gen.sleep(1) + yield self._read_async(callback) def close(self): - self.service.unsubscribe(self) - self.service.close() + ''' + Routines to handle any cleanup before the instance shuts down. + Sockets and filehandles should be closed explicitly, to prevent + leaks. + ''' + if not self._closing: + IPCClient.close(self) + # This will prevent this message from showing up: + # '[ERROR ] Future exception was never retrieved: + # StreamClosedError' + if self._read_sync_future is not None and self._read_sync_future.done(): + self._read_sync_future.exception() + if self._read_stream_future is not None and self._read_stream_future.done(): + self._read_stream_future.exception() def __del__(self): - self.close() + if IPCMessageSubscriber in globals(): + self.close() From 9c85734a3cc504f35188a2376f6c96bd4806222f Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Tue, 16 Apr 2019 19:23:43 +0300 Subject: [PATCH 201/340] Drop singleton from IPCClient --- salt/transport/ipc.py | 109 +++++++++++++++--------------------------- 1 file changed, 38 insertions(+), 71 deletions(-) diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index 33f5e58e7f1d..fcb882fc29ac 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -8,9 +8,7 @@ import errno import logging import socket -import weakref import time -import sys # Import 3rd-party libs import msgpack @@ -20,7 +18,7 @@ import tornado.gen import tornado.netutil import tornado.concurrent -from tornado.locks import Semaphore +from tornado.locks import Lock from tornado.ioloop import IOLoop, TimeoutError as TornadoTimeoutError from tornado.iostream import IOStream # Import Salt libs @@ -84,11 +82,6 @@ def _done_callback(self, future): self.set_exception(exc) -class IPCExceptionProxy(object): - def __init__(self, orig_info): - self.orig_info = orig_info - - class IPCServer(object): ''' A Tornado IPC server very similar to Tornado's TCPServer class @@ -607,21 +600,23 @@ class IPCMessageSubscriber(IPCClient): # Wait for some data package = ipc_subscriber.read_sync() ''' - def __singleton_init__(self, socket_path, io_loop=None): - super(IPCMessageSubscriber, self).__singleton_init__( + def __init__(self, socket_path, io_loop=None): + super(IPCMessageSubscriber, self).__init__( socket_path, io_loop=io_loop) - self._read_sync_future = None self._read_stream_future = None - self._sync_ioloop_running = False - self.saved_data = [] - self._sync_read_in_progress = Semaphore() + self._saved_data = [] + self._read_in_progress = Lock() @tornado.gen.coroutine - def _read_sync(self, timeout): - yield self._sync_read_in_progress.acquire() + def _read(self, timeout, callback=None): + try: + yield self._read_in_progress.acquire(timeout=0) + except tornado.gen.TimeoutError: + raise tornado.gen.Return(None) + + log.debug('IPC Subscriber is starting reading') exc_to_raise = None ret = None - try: while True: if self._read_stream_future is None: @@ -630,10 +625,9 @@ def _read_sync(self, timeout): if timeout is None: wire_bytes = yield self._read_stream_future else: - future_with_timeout = FutureWithTimeout( - self.io_loop, self._read_stream_future, timeout) - wire_bytes = yield future_with_timeout - + wire_bytes = yield FutureWithTimeout(self.io_loop, + self._read_stream_future, + timeout) self._read_stream_future = None # Remove the timeout once we get some data or an exception @@ -642,15 +636,17 @@ def _read_sync(self, timeout): timeout = None self.unpacker.feed(wire_bytes) - first = True + first_sync_msg = True for framed_msg in self.unpacker: - if first: + if callback: + self.io_loop.spawn_callback(callback, framed_msg['body']) + elif first_sync_msg: ret = framed_msg['body'] - first = False + first_sync_msg = False else: - self.saved_data.append(framed_msg['body']) - if not first: - # We read at least one piece of data + self._saved_data.append(framed_msg['body']) + if not first_sync_msg: + # We read at least one piece of data and we're on sync run break except TornadoTimeoutError: # In the timeout case, just return None. @@ -665,14 +661,9 @@ def _read_sync(self, timeout): self._read_stream_future = None exc_to_raise = exc - if self._sync_ioloop_running: - # Stop the IO Loop so that self.io_loop.start() will return in - # read_sync(). - self.io_loop.spawn_callback(self.io_loop.stop) - if exc_to_raise is not None: raise exc_to_raise # pylint: disable=E0702 - self._sync_read_in_progress.release() + self._read_in_progress.release() raise tornado.gen.Return(ret) def read_sync(self, timeout=None): @@ -685,34 +676,9 @@ def read_sync(self, timeout=None): :return: message data if successful. None if timed out. Will raise an exception for all other error conditions. ''' - if self.saved_data: - return self.saved_data.pop(0) - - self._sync_ioloop_running = True - self._read_sync_future = self._read_sync(timeout) - self.io_loop.start() - self._sync_ioloop_running = False - - ret_future = self._read_sync_future - self._read_sync_future = None - return ret_future.result() - - @tornado.gen.coroutine - def _read_async(self, callback): - while not self.stream.closed(): - try: - self._read_stream_future = self.stream.read_bytes(4096, partial=True) - wire_bytes = yield self._read_stream_future - self._read_stream_future = None - self.unpacker.feed(wire_bytes) - for framed_msg in self.unpacker: - body = framed_msg['body'] - self.io_loop.spawn_callback(callback, body) - except tornado.iostream.StreamClosedError: - log.trace('Subscriber disconnected from IPC %s', self.socket_path) - break - except Exception as exc: - log.error('Exception occurred while Subscriber handling stream: %s', exc) + if self._saved_data: + return self._saved_data.pop(0) + return self.io_loop.run_sync(lambda: self._read(timeout)) @tornado.gen.coroutine def read_async(self, callback): @@ -730,7 +696,7 @@ def read_async(self, callback): except Exception as exc: log.error('Exception occurred while Subscriber connecting: %s', exc) yield tornado.gen.sleep(1) - yield self._read_async(callback) + yield self._read(None, callback) def close(self): ''' @@ -738,15 +704,16 @@ def close(self): Sockets and filehandles should be closed explicitly, to prevent leaks. ''' - if not self._closing: - IPCClient.close(self) - # This will prevent this message from showing up: - # '[ERROR ] Future exception was never retrieved: - # StreamClosedError' - if self._read_sync_future is not None and self._read_sync_future.done(): - self._read_sync_future.exception() - if self._read_stream_future is not None and self._read_stream_future.done(): - self._read_stream_future.exception() + if self._closing: + return + super(IPCMessageSubscriber, self).close() + # This will prevent this message from showing up: + # '[ERROR ] Future exception was never retrieved: + # StreamClosedError' + if self._read_stream_future is not None and self._read_stream_future.done(): + exc = self._read_stream_future.exception() + if exc and not isinstance(exc, tornado.iostream.StreamClosedError): + log.error("Read future returned exception %r", exc) def __del__(self): if IPCMessageSubscriber in globals(): From fabbcac8e662402e551f711a138e9d1b0e3eb8e9 Mon Sep 17 00:00:00 2001 From: Dmitry Kuzmenko Date: Wed, 17 Apr 2019 00:47:39 +0300 Subject: [PATCH 202/340] A couple of race conditions fixes and a test update. --- salt/transport/ipc.py | 17 +++++++++-------- tests/unit/transport/test_ipc.py | 13 +++++++++++++ 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/salt/transport/ipc.py b/salt/transport/ipc.py index fcb882fc29ac..18eff89b705f 100644 --- a/salt/transport/ipc.py +++ b/salt/transport/ipc.py @@ -20,7 +20,7 @@ import tornado.concurrent from tornado.locks import Lock from tornado.ioloop import IOLoop, TimeoutError as TornadoTimeoutError -from tornado.iostream import IOStream +from tornado.iostream import IOStream, StreamClosedError # Import Salt libs import salt.transport.client import salt.transport.frame @@ -176,7 +176,7 @@ def return_message(msg): for framed_msg in unpacker: body = framed_msg['body'] self.io_loop.spawn_callback(self.payload_handler, body, write_callback(stream, framed_msg['head'])) - except tornado.iostream.StreamClosedError: + except StreamClosedError: log.trace('Client disconnected from IPC %s', self.socket_path) break except socket.error as exc: @@ -501,7 +501,7 @@ def start(self): def _write(self, stream, pack): try: yield stream.write(pack) - except tornado.iostream.StreamClosedError: + except StreamClosedError: log.trace('Client disconnected from IPC %s', self.socket_path) self.streams.discard(stream) except Exception as exc: @@ -610,7 +610,7 @@ def __init__(self, socket_path, io_loop=None): @tornado.gen.coroutine def _read(self, timeout, callback=None): try: - yield self._read_in_progress.acquire(timeout=0) + yield self._read_in_progress.acquire(timeout=0.00000001) except tornado.gen.TimeoutError: raise tornado.gen.Return(None) @@ -652,7 +652,7 @@ def _read(self, timeout, callback=None): # In the timeout case, just return None. # Keep 'self._read_stream_future' alive. ret = None - except tornado.iostream.StreamClosedError as exc: + except StreamClosedError as exc: log.trace('Subscriber disconnected from IPC %s', self.socket_path) self._read_stream_future = None exc_to_raise = exc @@ -661,9 +661,10 @@ def _read(self, timeout, callback=None): self._read_stream_future = None exc_to_raise = exc + self._read_in_progress.release() + if exc_to_raise is not None: raise exc_to_raise # pylint: disable=E0702 - self._read_in_progress.release() raise tornado.gen.Return(ret) def read_sync(self, timeout=None): @@ -690,7 +691,7 @@ def read_async(self, callback): while not self.connected(): try: yield self.connect(timeout=5) - except tornado.iostream.StreamClosedError: + except StreamClosedError: log.trace('Subscriber closed stream on IPC %s before connect', self.socket_path) yield tornado.gen.sleep(1) except Exception as exc: @@ -712,7 +713,7 @@ def close(self): # StreamClosedError' if self._read_stream_future is not None and self._read_stream_future.done(): exc = self._read_stream_future.exception() - if exc and not isinstance(exc, tornado.iostream.StreamClosedError): + if exc and not isinstance(exc, StreamClosedError): log.error("Read future returned exception %r", exc) def __del__(self): diff --git a/tests/unit/transport/test_ipc.py b/tests/unit/transport/test_ipc.py index 3f5ad99f8a70..d7495b93c70c 100644 --- a/tests/unit/transport/test_ipc.py +++ b/tests/unit/transport/test_ipc.py @@ -253,3 +253,16 @@ def handler(raw): self.assertEqual(len(call_cnt), 2) self.assertEqual(call_cnt[0], 'TEST') self.assertEqual(call_cnt[1], 'TEST') + + def test_sync_reading(self): + # To be completely fair let's create 2 clients. + client1 = self.sub_channel + client2 = self._get_sub_channel() + call_cnt = [] + + # Now let both waiting data at once + self.pub_channel.publish('TEST') + ret1 = client1.read_sync() + ret2 = client2.read_sync() + self.assertEqual(ret1, 'TEST') + self.assertEqual(ret2, 'TEST') From 70787c7c36f5580513e8b8d988c409f9fc08c1b2 Mon Sep 17 00:00:00 2001 From: zer0def Date: Wed, 17 Apr 2019 08:58:02 +0200 Subject: [PATCH 203/340] Address inconsistent usage of `get_xml` function in `virt` execution module. Fixes #52431 --- salt/modules/virt.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/modules/virt.py b/salt/modules/virt.py index 50df89dbd244..4da6d4f4f05a 100644 --- a/salt/modules/virt.py +++ b/salt/modules/virt.py @@ -2435,7 +2435,9 @@ def get_xml(vm_, **kwargs): salt '*' virt.get_xml ''' conn = __get_conn(**kwargs) - xml_desc = _get_domain(conn, vm_).XMLDesc(0) + xml_desc = vm_.XMLDesc(0) if isinstance( + vm_, libvirt.virDomain + ) else _get_domain(conn, vm_).XMLDesc(0) conn.close() return xml_desc From 1d440d3755fe4e855e74840fa3249f8b99b7b18a Mon Sep 17 00:00:00 2001 From: Wayne Werner Date: Wed, 10 Apr 2019 13:57:20 -0500 Subject: [PATCH 204/340] Update CodeClimate settings https://codeclimate.com/github/saltstack/salt/pull/51461 had a matching block with mass of 38. Apparently that's too low because that code was not even the same at all. Bumping it up a bit to avoid false positives. Also disable argument count checks because we really don't need that here. --- .codeclimate.yml | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/.codeclimate.yml b/.codeclimate.yml index 49825b4bca4e..79988699bf44 100644 --- a/.codeclimate.yml +++ b/.codeclimate.yml @@ -1,13 +1,21 @@ -languages: - Ruby: false - JavaScript: false - Python: true - PHP: false +version: "2" -engines: +exclude_patterns: + - "*.js" + - "*.rb" + - "*.php" + +plugins: radon: enabled: true - exclude_paths: + exclude_patterns: - "templates/" config: threshold: "D" + +checks: + argument-count: + enabled: false + similar-code: + config: + threshold: 40 From 4d9c49bca37d3d6f66b738c8cb2e26eccad83164 Mon Sep 17 00:00:00 2001 From: Wayne Werner Date: Wed, 17 Apr 2019 11:22:50 -0400 Subject: [PATCH 205/340] We have long files --- .codeclimate.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.codeclimate.yml b/.codeclimate.yml index 79988699bf44..55491ac4e55e 100644 --- a/.codeclimate.yml +++ b/.codeclimate.yml @@ -16,6 +16,8 @@ plugins: checks: argument-count: enabled: false + file-lines: + enabled: false similar-code: config: threshold: 40 From e09be8425b5c3e3310607d3513c3513b74929985 Mon Sep 17 00:00:00 2001 From: Ch3LL Date: Wed, 17 Apr 2019 15:45:05 -0400 Subject: [PATCH 206/340] Update test_schema to mirror the new ValidationErrors in 3.0.0 --- tests/unit/utils/test_schema.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/tests/unit/utils/test_schema.py b/tests/unit/utils/test_schema.py index 677cd0778be6..a07fcbab6bce 100644 --- a/tests/unit/utils/test_schema.py +++ b/tests/unit/utils/test_schema.py @@ -506,7 +506,10 @@ class Requirements(BaseRequirements): {'personal_access_token': 'foo'}, Requirements.serialize() ) - self.assertIn('is not valid under any of the given schemas', excinfo.exception.message) + if JSONSCHEMA_VERSION >= _LooseVersion('3.0.0'): + self.assertIn('\'ssh_key_file\' is a required property', excinfo.exception.message) + else: + self.assertIn('is not valid under any of the given schemas', excinfo.exception.message) def test_boolean_config(self): item = schema.BooleanItem(title='Hungry', description='Are you hungry?') @@ -1730,7 +1733,10 @@ class TestConf(schema.Schema): with self.assertRaises(jsonschema.exceptions.ValidationError) as excinfo: jsonschema.validate({'item': {'sides': '4', 'color': 'blue'}}, TestConf.serialize()) - self.assertIn('is not valid under any of the given schemas', excinfo.exception.message) + if JSONSCHEMA_VERSION >= _LooseVersion('3.0.0'): + self.assertIn('\'4\' is not of type \'boolean\'', excinfo.exception.message) + else: + self.assertIn('is not valid under any of the given schemas', excinfo.exception.message) class TestConf(schema.Schema): item = schema.DictItem( @@ -1833,7 +1839,10 @@ class TestConf(schema.Schema): with self.assertRaises(jsonschema.exceptions.ValidationError) as excinfo: jsonschema.validate({'item': ['maybe']}, TestConf.serialize()) - self.assertIn('is not valid under any of the given schemas', excinfo.exception.message) + if JSONSCHEMA_VERSION >= _LooseVersion('3.0.0'): + self.assertIn('\'maybe\' is not one of [\'yes\']', excinfo.exception.message) + else: + self.assertIn('is not valid under any of the given schemas', excinfo.exception.message) with self.assertRaises(jsonschema.exceptions.ValidationError) as excinfo: jsonschema.validate({'item': 2}, TestConf.serialize()) @@ -1885,7 +1894,10 @@ class TestConf(schema.Schema): with self.assertRaises(jsonschema.exceptions.ValidationError) as excinfo: jsonschema.validate({'item': ['maybe']}, TestConf.serialize()) - self.assertIn('is not valid under any of the given schemas', excinfo.exception.message) + if JSONSCHEMA_VERSION >= _LooseVersion('3.0.0'): + self.assertIn('\'maybe\' is not one of [\'yes\']', excinfo.exception.message) + else: + self.assertIn('is not valid under any of the given schemas', excinfo.exception.message) with self.assertRaises(jsonschema.exceptions.ValidationError) as excinfo: jsonschema.validate({'item': 2}, TestConf.serialize()) From 6af1e1ac7117e4330bd840b6f49d8d8657d8af76 Mon Sep 17 00:00:00 2001 From: Wayne Werner Date: Wed, 17 Apr 2019 19:13:09 -0400 Subject: [PATCH 207/340] Add attrs for Ubuntu For some reason an empty attrs shows up in changes. This is will go ahead and add attrs to the expected changes if it exists. --- tests/integration/states/test_cron.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/integration/states/test_cron.py b/tests/integration/states/test_cron.py index 4c2d8d0958d7..ef9501ba3e70 100644 --- a/tests/integration/states/test_cron.py +++ b/tests/integration/states/test_cron.py @@ -70,6 +70,9 @@ def test_46881(self): id_ = 'cron_|-salt://issue-46881/cron_|-salt://issue-46881/cron_|-file' for key in ignored_keys: _expected[key] = ret[id_].get(key) + retchanges = ret[id_].get('changes', {}).get('attrs', None) + if retchanges is not None: + _expected['changes']['attrs'] = retchanges self.assertDictEqual( _expected, ret[id_], From b2698d10b8d404fb1948170f40132cd5be78e557 Mon Sep 17 00:00:00 2001 From: zer0def Date: Thu, 18 Apr 2019 12:45:05 +0200 Subject: [PATCH 208/340] Added unit test for `virt.get_xml()`. --- tests/unit/modules/test_virt.py | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py index 6546a0467c8b..b6ddd05634c8 100644 --- a/tests/unit/modules/test_virt.py +++ b/tests/unit/modules/test_virt.py @@ -38,6 +38,10 @@ class LibvirtMock(MagicMock): # pylint: disable=too-many-ancestors ''' Libvirt library mock ''' + class virDomain(MagicMock): + ''' + virDomain mock + ''' class libvirtError(Exception): ''' @@ -76,7 +80,7 @@ def set_mock_vm(self, name, xml): Define VM to use in tests ''' self.mock_conn.listDefinedDomains.return_value = [name] # pylint: disable=no-member - mock_domain = MagicMock() + mock_domain = self.mock_libvirt.virDomain() self.mock_conn.lookupByName.return_value = mock_domain # pylint: disable=no-member mock_domain.XMLDesc.return_value = xml # pylint: disable=no-member @@ -1437,6 +1441,23 @@ def test_get_nics(self): self.assertEqual('bridge', nic['type']) self.assertEqual('ac:de:48:b6:8b:59', nic['mac']) + def test_get_xml(self): + ''' + Test virt.get_xml() + ''' + xml = ''' + test-vm + + + + + + + ''' + domain = self.set_mock_vm("test-vm", xml) + self.assertEqual(xml, virt.get_xml('test-vm')) + self.assertEqual(xml, virt.get_xml(domain)) + def test_parse_qemu_img_info(self): ''' Make sure that qemu-img info output is properly parsed From 50a2d14a31ad8de34372d31488ba08915b3c9515 Mon Sep 17 00:00:00 2001 From: Wayne Werner Date: Thu, 18 Apr 2019 08:49:40 -0400 Subject: [PATCH 209/340] Use functools.wraps with decorators Otherwise, we're not fully wrapping the function so expected attributes (at least __opts__ anyway) go missing. Fixes #44639 --- salt/utils/decorators/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/utils/decorators/__init__.py b/salt/utils/decorators/__init__.py index 81d1812833ea..7048724aa910 100644 --- a/salt/utils/decorators/__init__.py +++ b/salt/utils/decorators/__init__.py @@ -344,6 +344,7 @@ def __call__(self, function): ''' _DeprecationDecorator.__call__(self, function) + @wraps(function) def _decorate(*args, **kwargs): ''' Decorator function. @@ -518,6 +519,7 @@ def __call__(self, function): ''' _DeprecationDecorator.__call__(self, function) + @wraps(function) def _decorate(*args, **kwargs): ''' Decorator function. From 6db2beb6c0a2d6b230bfa4465a4a1b793a3aab84 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Thu, 27 Sep 2018 23:12:58 -0500 Subject: [PATCH 210/340] Replace "pchanges" with "changes" to fix onchanges/prereq requisites Since "pchanges" was never supported in the state compiler, and "changes" is what these reqs always used, replacing "pchanges" with "changes" will allow those requisites to work in test mode. Conflicts: - salt/states/file.py - salt/states/linux_acl.py - salt/utils/napalm.py - tests/integration/modules/test_state.py - tests/unit/states/test_file.py --- doc/ref/states/writing.rst | 7 +- salt/states/archive.py | 28 ++--- salt/states/boto_cloudfront.py | 4 +- salt/states/boto_s3.py | 2 +- salt/states/boto_sqs.py | 6 +- salt/states/chocolatey.py | 31 +++-- salt/states/dvs.py | 48 ++++--- salt/states/esxdatacenter.py | 24 ++-- salt/states/esxi.py | 27 ++-- salt/states/file.py | 147 ++++++++++------------ salt/states/glance_image.py | 8 +- salt/states/kernelpkg.py | 3 +- salt/states/keystone_domain.py | 9 +- salt/states/keystone_endpoint.py | 5 +- salt/states/keystone_group.py | 7 +- salt/states/keystone_project.py | 7 +- salt/states/keystone_role.py | 4 +- salt/states/keystone_service.py | 5 +- salt/states/keystone_user.py | 7 +- salt/states/linux_acl.py | 5 +- salt/states/net_napalm_yang.py | 2 - salt/states/neutron_network.py | 7 +- salt/states/neutron_secgroup.py | 7 +- salt/states/neutron_secgroup_rule.py | 6 +- salt/states/neutron_subnet.py | 7 +- salt/states/pbm.py | 53 ++++---- salt/states/snapper.py | 3 +- salt/states/solrcloud.py | 71 ++++------- salt/utils/napalm.py | 7 -- salt/utils/state.py | 4 - tests/integration/states/test_file.py | 2 - tests/unit/states/test_boto_cloudfront.py | 4 +- tests/unit/states/test_boto_sqs.py | 6 +- tests/unit/states/test_esxdatacenter.py | 10 +- tests/unit/states/test_file.py | 91 ++++++-------- tests/unit/states/test_linux_acl.py | 33 ++--- tests/unit/utils/test_state.py | 50 -------- 37 files changed, 290 insertions(+), 457 deletions(-) diff --git a/doc/ref/states/writing.rst b/doc/ref/states/writing.rst index 481bec1fbae0..dce5853eaf62 100644 --- a/doc/ref/states/writing.rst +++ b/doc/ref/states/writing.rst @@ -259,10 +259,6 @@ A State Module must return a dict containing the following keys/values: Prefer to keep line lengths short (use multiple lines as needed), and end with punctuation (e.g. a period) to delimit multiple comments. -The return data can also, include the **pchanges** key, this stands for -`predictive changes`. The **pchanges** key informs the State system what -changes are predicted to occur. - .. note:: States should not return data which cannot be serialized such as frozensets. @@ -448,7 +444,6 @@ Example state module 'changes': {}, 'result': False, 'comment': '', - 'pchanges': {}, } # Start with basic error-checking. Do all the passed parameters make sense @@ -469,7 +464,7 @@ Example state module # in ``test=true`` mode. if __opts__['test'] == True: ret['comment'] = 'The state of "{0}" will be changed.'.format(name) - ret['pchanges'] = { + ret['changes'] = { 'old': current_state, 'new': 'Description, diff, whatever of the new state', } diff --git a/salt/states/archive.py b/salt/states/archive.py index 5bae1688643e..4cd5525ab55a 100644 --- a/salt/states/archive.py +++ b/salt/states/archive.py @@ -1436,25 +1436,19 @@ def extracted(name, dir_result = __states__['file.directory'](full_path, user=user, group=group, - recurse=recurse, - test=__opts__['test']) + recurse=recurse) log.debug('file.directory: %s', dir_result) - if __opts__['test']: - if dir_result.get('pchanges'): - ret['changes']['updated ownership'] = True - else: - try: - if dir_result['result']: - if dir_result['changes']: - ret['changes']['updated ownership'] = True - else: - enforce_failed.append(full_path) - except (KeyError, TypeError): - log.warning( - 'Bad state return %s for file.directory state on %s', - dir_result, dirname - ) + if dir_result.get('changes'): + ret['changes']['updated ownership'] = True + try: + if not dir_result['result']: + enforce_failed.append(full_path) + except (KeyError, TypeError): + log.warning( + 'Bad state return %s for file.directory state on %s', + dir_result, dirname + ) for filename in enforce_files + enforce_links: full_path = os.path.join(name, filename) diff --git a/salt/states/boto_cloudfront.py b/salt/states/boto_cloudfront.py index 27c6260e9d91..d29d3df23592 100644 --- a/salt/states/boto_cloudfront.py +++ b/salt/states/boto_cloudfront.py @@ -135,7 +135,7 @@ def present( if __opts__['test']: ret['result'] = None ret['comment'] = 'Distribution {0} set for creation.'.format(name) - ret['pchanges'] = {'old': None, 'new': name} + ret['changes'] = {'old': None, 'new': name} return ret res = __salt__['boto_cloudfront.create_distribution']( @@ -203,7 +203,7 @@ def _yaml_safe_dump(attrs): 'Distribution {0} set for new config:'.format(name), changes_diff, ]) - ret['pchanges'] = {'diff': changes_diff} + ret['changes'] = {'diff': changes_diff} return ret res = __salt__['boto_cloudfront.update_distribution']( diff --git a/salt/states/boto_s3.py b/salt/states/boto_s3.py index a75fe71afa1f..49e77510cf6d 100644 --- a/salt/states/boto_s3.py +++ b/salt/states/boto_s3.py @@ -282,7 +282,7 @@ def _yaml_safe_dump(attrs): ret['result'] = None ret['comment'] = 'S3 object {0} set to be {1}d.'.format(name, action) ret['comment'] += '\nChanges:\n{0}'.format(changes_diff) - ret['pchanges'] = {'diff': changes_diff} + ret['changes'] = {'diff': changes_diff} return ret r = __salt__['boto_s3.upload_file']( diff --git a/salt/states/boto_sqs.py b/salt/states/boto_sqs.py index 9f42dedf09ad..964c6e863ec8 100644 --- a/salt/states/boto_sqs.py +++ b/salt/states/boto_sqs.py @@ -136,7 +136,7 @@ def present( ret['comment'].append( 'SQS queue {0} is set to be created.'.format(name), ) - ret['pchanges'] = {'old': None, 'new': name} + ret['changes'] = {'old': None, 'new': name} return ret r = __salt__['boto_sqs.create']( @@ -225,7 +225,7 @@ def _yaml_safe_dump(attrs): attributes_diff, ) ) - ret['pchanges'] = {'attributes': {'diff': attributes_diff}} + ret['changes'] = {'attributes': {'diff': attributes_diff}} return ret r = __salt__['boto_sqs.set_attributes']( @@ -300,7 +300,7 @@ def absent( if __opts__['test']: ret['result'] = None ret['comment'] = 'SQS queue {0} is set to be removed.'.format(name) - ret['pchanges'] = {'old': name, 'new': None} + ret['changes'] = {'old': name, 'new': None} return ret r = __salt__['boto_sqs.delete']( diff --git a/salt/states/chocolatey.py b/salt/states/chocolatey.py index 5f2e6e9842be..021e9ac68b2d 100644 --- a/salt/states/chocolatey.py +++ b/salt/states/chocolatey.py @@ -336,7 +336,6 @@ def upgraded(name, ret = {'name': name, 'result': True, 'changes': {}, - 'pchanges': {}, 'comment': ''} # Get list of currently installed packages @@ -346,12 +345,10 @@ def upgraded(name, # Package not installed if name.lower() not in [package.lower() for package in pre_install.keys()]: if version: - ret['pchanges'] = { - name: 'Version {0} will be installed'.format(version) - } + ret['changes'][name] = 'Version {0} will be installed'.format(version) ret['comment'] = 'Install version {0}'.format(version) else: - ret['pchanges'] = {name: 'Latest version will be installed'} + ret['changes'][name] = 'Latest version will be installed' ret['comment'] = 'Install latest version' # Package installed @@ -378,8 +375,7 @@ def upgraded(name, oper="==", ver2=version): if force: - ret['pchanges'] = { - name: 'Version {0} will be reinstalled'.format(version)} + ret['changes'][name] = 'Version {0} will be reinstalled'.format(version) ret['comment'] = 'Reinstall {0} {1}'.format(full_name, version) else: ret['comment'] = '{0} {1} is already installed'.format( @@ -389,11 +385,9 @@ def upgraded(name, # If installed version is older than new version if salt.utils.versions.compare( ver1=installed_version, oper="<", ver2=version): - ret['pchanges'] = { - name: 'Version {0} will be upgraded to Version {1}'.format( - installed_version, version - ) - } + ret['changes'][name] = 'Version {0} will be upgraded to Version {1}'.format( + installed_version, version + ) ret['comment'] = 'Upgrade {0} {1} to {2}'.format( full_name, installed_version, version ) @@ -409,13 +403,13 @@ def upgraded(name, else: ret['comment'] = 'No version found to install' - # Return if `test=True` - if __opts__['test']: - ret['result'] = None + # Return if there are no changes to be made + if not ret['changes']: return ret - # Return if there are no changes to be made - if not ret['pchanges']: + # Return if running in test mode + if __opts__['test']: + ret['result'] = None return ret # Install the package @@ -439,6 +433,9 @@ def upgraded(name, # Get list of installed packages after 'chocolatey.install' post_install = __salt__['chocolatey.list'](local_only=True) + # Prior to this, ret['changes'] would have contained expected changes, + # replace them with the actual changes now that we have completed the + # installation. ret['changes'] = salt.utils.data.compare_dicts(pre_install, post_install) return ret diff --git a/salt/states/dvs.py b/salt/states/dvs.py index 421254a32753..1ff39cde00eb 100644 --- a/salt/states/dvs.py +++ b/salt/states/dvs.py @@ -401,13 +401,11 @@ def dvs_configured(name, dvs): ''.format(dvs_name, datacenter_name)), 'result': True}) else: - ret.update({'comment': '\n'.join(comments)}) - if __opts__['test']: - ret.update({'pchanges': changes, - 'result': None}) - else: - ret.update({'changes': changes, - 'result': True}) + ret.update({ + 'comment': '\n'.join(comments), + 'changes': changes, + 'result': None if __opts__['test'] else True, + }) return ret @@ -512,8 +510,10 @@ def portgroups_configured(name, dvs, portgroups): log.info('Running state {0} on DVS \'{1}\', datacenter ' '\'{2}\''.format(name, dvs, datacenter)) changes_required = False - ret = {'name': name, 'changes': {}, 'result': None, 'comment': None, - 'pchanges': {}} + ret = {'name': name, + 'changes': {}, + 'result': None, + 'comment': None} comments = [] changes = {} changes_required = False @@ -623,13 +623,11 @@ def portgroups_configured(name, dvs, portgroups): 'Nothing to be done.'.format(dvs, datacenter)), 'result': True}) else: - ret.update({'comment': '\n'.join(comments)}) - if __opts__['test']: - ret.update({'pchanges': changes, - 'result': None}) - else: - ret.update({'changes': changes, - 'result': True}) + ret.update({ + 'comment': '\n'.join(comments), + 'changes': changes, + 'result': None if __opts__['test'] else True, + }) return ret @@ -649,8 +647,10 @@ def uplink_portgroup_configured(name, dvs, uplink_portgroup): log.info('Running {0} on DVS \'{1}\', datacenter \'{2}\'' ''.format(name, dvs, datacenter)) changes_required = False - ret = {'name': name, 'changes': {}, 'result': None, 'comment': None, - 'pchanges': {}} + ret = {'name': name, + 'changes': {}, + 'result': None, + 'comment': None} comments = [] changes = {} changes_required = False @@ -708,11 +708,9 @@ def uplink_portgroup_configured(name, dvs, uplink_portgroup): 'Nothing to be done.'.format(dvs, datacenter)), 'result': True}) else: - ret.update({'comment': '\n'.join(comments)}) - if __opts__['test']: - ret.update({'pchanges': changes, - 'result': None}) - else: - ret.update({'changes': changes, - 'result': True}) + ret.update({ + 'comment': '\n'.join(comments), + 'changes': changes, + 'result': None if __opts__['test'] else True, + }) return ret diff --git a/salt/states/esxdatacenter.py b/salt/states/esxdatacenter.py index 09c69750ed6f..ae83b4d37174 100644 --- a/salt/states/esxdatacenter.py +++ b/salt/states/esxdatacenter.py @@ -89,11 +89,11 @@ def datacenter_configured(name): dc_name = name log.info('Running datacenter_configured for datacenter \'{0}\'' ''.format(dc_name)) - ret = {'name': name, 'changes': {}, 'pchanges': {}, - 'result': None, 'comment': 'Default'} + ret = {'name': name, + 'changes': {}, + 'result': None, + 'comment': 'Default'} comments = [] - changes = {} - pchanges = {} si = None try: si = __salt__['vsphere.get_service_instance_via_proxy']() @@ -103,27 +103,19 @@ def datacenter_configured(name): if __opts__['test']: comments.append('State will create ' 'datacenter \'{0}\'.'.format(dc_name)) - log.info(comments[-1]) - pchanges.update({'new': {'name': dc_name}}) else: log.debug('Creating datacenter \'{0}\'. '.format(dc_name)) __salt__['vsphere.create_datacenter'](dc_name, si) comments.append('Created datacenter \'{0}\'.'.format(dc_name)) - log.info(comments[-1]) - changes.update({'new': {'name': dc_name}}) + log.info(comments[-1]) + ret['changes'].update({'new': {'name': dc_name}}) else: comments.append('Datacenter \'{0}\' already exists. Nothing to be ' 'done.'.format(dc_name)) log.info(comments[-1]) __salt__['vsphere.disconnect'](si) - if __opts__['test'] and pchanges: - ret_status = None - else: - ret_status = True - ret.update({'result': ret_status, - 'comment': '\n'.join(comments), - 'changes': changes, - 'pchanges': pchanges}) + ret['comment'] = '\n'.join(comments) + ret['result'] = None if __opts__['test'] and ret['changes'] else True return ret except salt.exceptions.CommandExecutionError as exc: log.error('Error: {}'.format(exc)) diff --git a/salt/states/esxi.py b/salt/states/esxi.py index 486d9df53e79..8728224716dd 100644 --- a/salt/states/esxi.py +++ b/salt/states/esxi.py @@ -1070,8 +1070,10 @@ def diskgroups_configured(name, diskgroups, erase_disks=False): else proxy_details['esxi_host'] log.info('Running state {0} for host \'{1}\''.format(name, hostname)) # Variable used to return the result of the invocation - ret = {'name': name, 'result': None, 'changes': {}, - 'pchanges': {}, 'comments': None} + ret = {'name': name, + 'result': None, + 'changes': {}, + 'comments': None} # Signals if errors have been encountered errors = False # Signals if changes are required @@ -1294,12 +1296,8 @@ def diskgroups_configured(name, diskgroups, erase_disks=False): None if __opts__['test'] else # running in test mode False if errors else True) # found errors; defaults to True ret.update({'result': result, - 'comment': '\n'.join(comments)}) - if changes: - if __opts__['test']: - ret['pchanges'] = diskgroup_changes - elif changes: - ret['changes'] = diskgroup_changes + 'comment': '\n'.join(comments), + 'changes': diskgroup_changes}) return ret @@ -1387,8 +1385,10 @@ def host_cache_configured(name, enabled, datastore, swap_size='100%', else proxy_details['esxi_host'] log.trace('hostname = %s', hostname) log.info('Running host_cache_swap_configured for host \'%s\'', hostname) - ret = {'name': hostname, 'comment': 'Default comments', - 'result': None, 'changes': {}, 'pchanges': {}} + ret = {'name': hostname, + 'comment': 'Default comments', + 'result': None, + 'changes': {}} result = None if __opts__['test'] else True # We assume success needs_setting = False comments = [] @@ -1582,11 +1582,8 @@ def host_cache_configured(name, enabled, datastore, swap_size='100%', __salt__['vsphere.disconnect'](si) log.info(comments[-1]) ret.update({'comment': '\n'.join(comments), - 'result': result}) - if __opts__['test']: - ret['pchanges'] = changes - else: - ret['changes'] = changes + 'result': result, + 'changes': changes}) return ret except CommandExecutionError as err: log.error('Error: %s.', err) diff --git a/salt/states/file.py b/salt/states/file.py index 79447b5ecfab..0e1381e39356 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -1031,36 +1031,36 @@ def _symlink_check(name, target, force, user, group, win_owner): ''' Check the symlink function ''' - pchanges = {} + changes = {} if not os.path.exists(name) and not __salt__['file.is_link'](name): - pchanges['new'] = name + changes['new'] = name return None, 'Symlink {0} to {1} is set for creation'.format( name, target - ), pchanges + ), changes if __salt__['file.is_link'](name): if __salt__['file.readlink'](name) != target: - pchanges['change'] = name + changes['change'] = name return None, 'Link {0} target is set to be changed to {1}'.format( name, target - ), pchanges + ), changes else: result = True msg = 'The symlink {0} is present'.format(name) if not _check_symlink_ownership(name, user, group, win_owner): result = None - pchanges['ownership'] = '{0}:{1}'.format(*_get_symlink_ownership(name)) + changes['ownership'] = '{0}:{1}'.format(*_get_symlink_ownership(name)) msg += ( ', but the ownership of the symlink would be changed ' 'from {2}:{3} to {0}:{1}' ).format(user, group, *_get_symlink_ownership(name)) - return result, msg, pchanges + return result, msg, changes else: if force: return None, ('The file or directory {0} is set for removal to ' 'make way for a new symlink targeting {1}' - .format(name, target)), pchanges + .format(name, target)), changes return False, ('File or directory exists where the symlink {0} ' - 'should be. Did you mean to use force?'.format(name)), pchanges + 'should be. Did you mean to use force?'.format(name)), changes def _test_owner(kwargs, user=None): @@ -1222,12 +1222,12 @@ def _shortcut_check(name, ''' Check the shortcut function ''' - pchanges = {} + changes = {} if not os.path.exists(name): - pchanges['new'] = name + changes['new'] = name return None, 'Shortcut "{0}" to "{1}" is set for creation'.format( name, target - ), pchanges + ), changes if os.path.isfile(name): with salt.utils.winapi.Com(): @@ -1248,28 +1248,28 @@ def _shortcut_check(name, ) if not all(state_checks): - pchanges['change'] = name + changes['change'] = name return None, 'Shortcut "{0}" target is set to be changed to "{1}"'.format( name, target - ), pchanges + ), changes else: result = True msg = 'The shortcut "{0}" is present'.format(name) if not _check_shortcut_ownership(name, user): result = None - pchanges['ownership'] = '{0}'.format(_get_shortcut_ownership(name)) + changes['ownership'] = '{0}'.format(_get_shortcut_ownership(name)) msg += ( ', but the ownership of the shortcut would be changed ' 'from {1} to {0}' ).format(user, _get_shortcut_ownership(name)) - return result, msg, pchanges + return result, msg, changes else: if force: return None, ('The link or directory "{0}" is set for removal to ' 'make way for a new shortcut targeting "{1}"' - .format(name, target)), pchanges + .format(name, target)), changes return False, ('Link or directory exists where the shortcut "{0}" ' - 'should be. Did you mean to use force?'.format(name)), pchanges + 'should be. Did you mean to use force?'.format(name)), changes def _makedirs(name, @@ -1499,12 +1499,12 @@ def symlink( msg += '.' return _error(ret, msg) - presult, pcomment, ret['pchanges'] = _symlink_check(name, - target, - force, - user, - group, - win_owner) + presult, pcomment, pchanges = _symlink_check(name, + target, + force, + user, + group, + win_owner) if not os.path.isdir(os.path.dirname(name)): if makedirs: @@ -1537,6 +1537,7 @@ def symlink( if __opts__['test']: ret['result'] = presult ret['comment'] = pcomment + ret['changes'] = pchanges return ret if __salt__['file.is_link'](name): @@ -1659,7 +1660,6 @@ def absent(name, ret = {'name': name, 'changes': {}, - 'pchanges': {}, 'result': True, 'comment': ''} if not name: @@ -1671,9 +1671,9 @@ def absent(name, if name == '/': return _error(ret, 'Refusing to make "/" absent') if os.path.isfile(name) or os.path.islink(name): - ret['pchanges']['removed'] = name if __opts__['test']: ret['result'] = None + ret['changes']['removed'] = name ret['comment'] = 'File {0} is set for removal'.format(name) return ret try: @@ -1688,9 +1688,9 @@ def absent(name, return _error(ret, '{0}'.format(exc)) elif os.path.isdir(name): - ret['pchanges']['removed'] = name if __opts__['test']: ret['result'] = None + ret['changes']['removed'] = name ret['comment'] = 'Directory {0} is set for removal'.format(name) return ret try: @@ -1849,7 +1849,6 @@ def exists(name, ret = {'name': name, 'changes': {}, - 'pchanges': {}, 'result': True, 'comment': ''} if not name: @@ -1874,7 +1873,6 @@ def missing(name, ret = {'name': name, 'changes': {}, - 'pchanges': {}, 'result': True, 'comment': ''} if not name: @@ -2483,7 +2481,6 @@ def managed(name, name = os.path.expanduser(name) ret = {'changes': {}, - 'pchanges': {}, 'comment': '', 'name': name, 'result': True} @@ -3226,7 +3223,6 @@ def directory(name, name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, - 'pchanges': {}, 'result': True, 'comment': ''} if not name: @@ -3300,19 +3296,19 @@ def directory(name, # Remove whatever is in the way if os.path.isfile(name): if __opts__['test']: - ret['pchanges']['forced'] = 'File was forcibly replaced' + ret['changes']['forced'] = 'File would be forcibly replaced' else: os.remove(name) ret['changes']['forced'] = 'File was forcibly replaced' elif __salt__['file.is_link'](name): if __opts__['test']: - ret['pchanges']['forced'] = 'Symlink was forcibly replaced' + ret['changes']['forced'] = 'Symlink would be forcibly replaced' else: __salt__['file.remove'](name) ret['changes']['forced'] = 'Symlink was forcibly replaced' else: if __opts__['test']: - ret['pchanges']['forced'] = 'Directory was forcibly replaced' + ret['changes']['forced'] = 'Directory would be forcibly replaced' else: __salt__['file.remove'](name) ret['changes']['forced'] = 'Directory was forcibly replaced' @@ -3341,11 +3337,11 @@ def directory(name, require, exclude_pat, max_depth, follow_symlinks) if pchanges: - ret['pchanges'].update(pchanges) + ret['changes'].update(pchanges) # Don't run through the reset of the function if there are no changes to be # made - if not ret['pchanges'] or __opts__['test']: + if __opts__['test'] or not ret['changes']: ret['result'] = presult ret['comment'] = pcomment return ret @@ -3764,7 +3760,6 @@ def recurse(name, ret = { 'name': name, 'changes': {}, - 'pchanges': {}, 'result': True, 'comment': {} # { path: [comment, ...] } } @@ -4063,7 +4058,6 @@ def retention_schedule(name, retain, strptime_format=None, timezone=None): name = os.path.expanduser(name) ret = {'name': name, 'changes': {'retained': [], 'deleted': [], 'ignored': []}, - 'pchanges': {'retained': [], 'deleted': [], 'ignored': []}, 'result': True, 'comment': ''} if not name: @@ -4173,7 +4167,7 @@ def get_first_n_at_depth(fwt, depth, n): 'deleted': deletable_files, 'ignored': sorted(list(ignored_files), reverse=True), } - ret['pchanges'] = changes + ret['changes'] = changes # TODO: track and report how much space was / would be reclaimed if __opts__['test']: @@ -4314,7 +4308,6 @@ def line(name, content=None, match=None, mode=None, location=None, name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, - 'pchanges': {}, 'result': True, 'comment': ''} if not name: @@ -4348,14 +4341,13 @@ def line(name, content=None, match=None, mode=None, location=None, before=before, after=after, show_changes=show_changes, backup=backup, quiet=quiet, indent=indent) if changes: - ret['pchanges']['diff'] = changes + ret['changes']['diff'] = changes if __opts__['test']: ret['result'] = None - ret['comment'] = 'Changes would be made:\ndiff:\n{0}'.format(changes) + ret['comment'] = 'Changes would be made' else: ret['result'] = True ret['comment'] = 'Changes were made' - ret['changes'] = {'diff': changes} else: ret['result'] = True ret['comment'] = 'No changes needed to be made' @@ -4505,7 +4497,6 @@ def replace(name, ret = {'name': name, 'changes': {}, - 'pchanges': {}, 'result': True, 'comment': ''} if not name: @@ -4535,14 +4526,13 @@ def replace(name, backslash_literal=backslash_literal) if changes: - ret['pchanges']['diff'] = changes + ret['changes']['diff'] = changes if __opts__['test']: ret['result'] = None - ret['comment'] = 'Changes would have been made:\ndiff:\n{0}'.format(changes) + ret['comment'] = 'Changes would have been made' else: ret['result'] = True ret['comment'] = 'Changes were made' - ret['changes'] = {'diff': changes} else: ret['result'] = True ret['comment'] = 'No changes needed to be made' @@ -4764,7 +4754,6 @@ def blockreplace( ret = {'name': name, 'changes': {}, - 'pchanges': {}, 'result': False, 'comment': ''} if not name: @@ -4837,13 +4826,11 @@ def blockreplace( return ret if changes: - ret['pchanges'] = {'diff': changes} + ret['changes']['diff'] = changes if __opts__['test']: - ret['changes']['diff'] = ret['pchanges']['diff'] ret['result'] = None ret['comment'] = 'Changes would be made' else: - ret['changes']['diff'] = ret['pchanges']['diff'] ret['result'] = True ret['comment'] = 'Changes were made' else: @@ -4894,7 +4881,6 @@ def comment(name, regex, char='#', backup='.bak'): ret = {'name': name, 'changes': {}, - 'pchanges': {}, 'result': False, 'comment': ''} if not name: @@ -4924,8 +4910,8 @@ def comment(name, regex, char='#', backup='.bak'): else: return _error(ret, '{0}: Pattern not found'.format(unanchor_regex)) - ret['pchanges'][name] = 'updated' if __opts__['test']: + ret['changes'][name] = 'updated' ret['comment'] = 'File {0} is set to be updated'.format(name) ret['result'] = None return ret @@ -5004,7 +4990,6 @@ def uncomment(name, regex, char='#', backup='.bak'): ret = {'name': name, 'changes': {}, - 'pchanges': {}, 'result': False, 'comment': ''} if not name: @@ -5031,26 +5016,20 @@ def uncomment(name, regex, char='#', backup='.bak'): else: return _error(ret, '{0}: Pattern not found'.format(regex)) - ret['pchanges'][name] = 'updated' if __opts__['test']: + ret['changes'][name] = 'updated' ret['comment'] = 'File {0} is set to be updated'.format(name) ret['result'] = None return ret with salt.utils.files.fopen(name, 'rb') as fp_: - slines = fp_.read() - if six.PY3: - slines = slines.decode(__salt_system_encoding__) - slines = slines.splitlines(True) + slines = salt.utils.data.decode(fp_.readlines()) # Perform the edit __salt__['file.comment_line'](name, regex, char, False, backup) with salt.utils.files.fopen(name, 'rb') as fp_: - nlines = fp_.read() - if six.PY3: - nlines = nlines.decode(__salt_system_encoding__) - nlines = nlines.splitlines(True) + nlines = salt.utils.data.decode(fp_.readlines()) # Check the result ret['result'] = __salt__['file.search']( @@ -5214,10 +5193,9 @@ def append(name, .. versionadded:: 0.9.5 ''' ret = {'name': name, - 'changes': {}, - 'pchanges': {}, - 'result': False, - 'comment': ''} + 'changes': {}, + 'result': False, + 'comment': ''} if not name: return _error(ret, 'Must provide name to file.append') @@ -5252,12 +5230,12 @@ def append(name, except CommandExecutionError as exc: return _error(ret, 'Drive {0} is not mapped'.format(exc.message)) - if salt.utils.platform.is_windows(): - check_res, check_msg, ret['pchanges'] = _check_directory_win(dirname) - else: - check_res, check_msg, ret['pchanges'] = _check_directory(dirname) + check_res, check_msg, check_changes = _check_directory_win(dirname) \ + if salt.utils.platform.is_windows() \ + else _check_directory(dirname) if not check_res: + ret['changes'] = check_changes return _error(ret, check_msg) check_res, check_msg = _check_file(name) @@ -5506,7 +5484,6 @@ def prepend(name, ret = {'name': name, 'changes': {}, - 'pchanges': {}, 'result': False, 'comment': ''} if not name: @@ -5536,11 +5513,12 @@ def prepend(name, except CommandExecutionError as exc: return _error(ret, 'Drive {0} is not mapped'.format(exc.message)) - if salt.utils.platform.is_windows(): - check_res, check_msg, ret['pchanges'] = _check_directory_win(dirname) - else: - check_res, check_msg, ret['pchanges'] = _check_directory(dirname) + check_res, check_msg, check_changes = _check_directory_win(dirname) \ + if salt.utils.platform.is_windows() \ + else _check_directory(dirname) + if not check_res: + ret['changes'] = check_changes return _error(ret, check_msg) check_res, check_msg = _check_file(name) @@ -7399,17 +7377,18 @@ def shortcut( msg += '.' return _error(ret, msg) - presult, pcomment, ret['pchanges'] = _shortcut_check(name, - target, - arguments, - working_dir, - description, - icon_location, - force, - user) + presult, pcomment, pchanges = _shortcut_check(name, + target, + arguments, + working_dir, + description, + icon_location, + force, + user) if __opts__['test']: ret['result'] = presult ret['comment'] = pcomment + ret['changes'] = pchanges return ret if not os.path.isdir(os.path.dirname(name)): diff --git a/salt/states/glance_image.py b/salt/states/glance_image.py index aff285a48d71..d9d9e971c3c2 100644 --- a/salt/states/glance_image.py +++ b/salt/states/glance_image.py @@ -52,15 +52,16 @@ def present(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['glanceng.setup_clouds'](auth) image = __salt__['glanceng.image_get'](name=name) if not image: - if __opts__['test'] is True: + if __opts__['test']: ret['result'] = None ret['changes'] = kwargs - ret['pchanges'] = ret['changes'] ret['comment'] = 'Image {} will be created.'.format(name) return ret @@ -91,10 +92,9 @@ def absent(name, auth=None): image = __salt__['glanceng.image_get'](name=name) if image: - if __opts__['test'] is True: + if __opts__['test']: ret['result'] = None ret['changes'] = {'name': name} - ret['pchanges'] = ret['changes'] ret['comment'] = 'Image {} will be deleted.'.format(name) return ret diff --git a/salt/states/kernelpkg.py b/salt/states/kernelpkg.py index 6d4fd56357c2..7ed558cd388e 100644 --- a/salt/states/kernelpkg.py +++ b/salt/states/kernelpkg.py @@ -144,8 +144,7 @@ def latest_active(name, at_time=None, **kwargs): # pylint: disable=unused-argum if __opts__['test']: ret['result'] = None - ret['changes'] = {} - ret['pchanges'] = {'kernel': { + ret['changes'] = {'kernel': { 'old': active, 'new': latest }} diff --git a/salt/states/keystone_domain.py b/salt/states/keystone_domain.py index 27d98657e700..095a181cc037 100644 --- a/salt/states/keystone_domain.py +++ b/salt/states/keystone_domain.py @@ -56,15 +56,16 @@ def present(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['keystoneng.setup_clouds'](auth) domain = __salt__['keystoneng.domain_get'](name=name) if not domain: - if __opts__['test'] is True: + if __opts__['test']: ret['result'] = None ret['changes'] = kwargs - ret['pchanges'] = ret['changes'] ret['comment'] = 'Domain {} will be created.'.format(name) return ret @@ -76,10 +77,9 @@ def present(name, auth=None, **kwargs): changes = __salt__['keystoneng.compare_changes'](domain, **kwargs) if changes: - if __opts__['test'] is True: + if __opts__['test']: ret['result'] = None ret['changes'] = changes - ret['pchanges'] = ret['changes'] ret['comment'] = 'Domain {} will be updated.'.format(name) return ret @@ -111,7 +111,6 @@ def absent(name, auth=None): if __opts__['test'] is True: ret['result'] = None ret['changes'] = {'name': name} - ret['pchanges'] = ret['changes'] ret['comment'] = 'Domain {} will be deleted.'.format(name) return ret diff --git a/salt/states/keystone_endpoint.py b/salt/states/keystone_endpoint.py index fb6151519d31..7b19913572a9 100644 --- a/salt/states/keystone_endpoint.py +++ b/salt/states/keystone_endpoint.py @@ -101,6 +101,8 @@ def present(name, service_name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['keystoneng.setup_clouds'](auth) success, val = _, endpoint = _common(ret, name, service_name, kwargs) @@ -111,7 +113,6 @@ def present(name, service_name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = kwargs - ret['pchanges'] = ret['changes'] ret['comment'] = 'Endpoint will be created.' return ret @@ -131,7 +132,6 @@ def present(name, service_name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = changes - ret['pchanges'] = ret['changes'] ret['comment'] = 'Endpoint will be updated.' return ret @@ -174,7 +174,6 @@ def absent(name, service_name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = {'id': endpoint.id} - ret['pchanges'] = ret['changes'] ret['comment'] = 'Endpoint will be deleted.' return ret diff --git a/salt/states/keystone_group.py b/salt/states/keystone_group.py index cf636e40d341..cfd4af02c0aa 100644 --- a/salt/states/keystone_group.py +++ b/salt/states/keystone_group.py @@ -73,6 +73,8 @@ def present(name, auth=None, **kwargs): __salt__['keystoneng.setup_cloud'](auth) + kwargs = __utils__['args.clean_kwargs'](**kwargs) + kwargs['name'] = name group = _common(kwargs) @@ -80,7 +82,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = kwargs - ret['pchanges'] = ret['changes'] ret['comment'] = 'Group will be created.' return ret @@ -94,7 +95,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = changes - ret['pchanges'] = ret['changes'] ret['comment'] = 'Group will be updated.' return ret @@ -120,6 +120,8 @@ def absent(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['keystoneng.setup_cloud'](auth) kwargs['name'] = name @@ -129,7 +131,6 @@ def absent(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = {'id': group.id} - ret['pchanges'] = ret['changes'] ret['comment'] = 'Group will be deleted.' return ret diff --git a/salt/states/keystone_project.py b/salt/states/keystone_project.py index 94a6cc52acec..bb9327b5db0b 100644 --- a/salt/states/keystone_project.py +++ b/salt/states/keystone_project.py @@ -72,6 +72,8 @@ def present(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['keystoneng.setup_clouds'](auth) kwargs['name'] = name @@ -81,7 +83,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = kwargs - ret['pchanges'] = ret['changes'] ret['comment'] = 'Project will be created.' return ret @@ -95,7 +96,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = changes - ret['pchanges'] = ret['changes'] ret['comment'] = 'Project will be updated.' return ret @@ -121,6 +121,8 @@ def absent(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['keystoneng.setup_clouds'](auth) kwargs['name'] = name @@ -130,7 +132,6 @@ def absent(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = {'id': project.id} - ret['pchanges'] = ret['changes'] ret['comment'] = 'Project will be deleted.' return ret diff --git a/salt/states/keystone_role.py b/salt/states/keystone_role.py index 394a51cfb7e3..d90d45f0a2bc 100644 --- a/salt/states/keystone_role.py +++ b/salt/states/keystone_role.py @@ -52,6 +52,8 @@ def present(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['keystoneng.setup_clouds'](auth) kwargs['name'] = name @@ -61,7 +63,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = kwargs - ret['pchanges'] = ret['changes'] ret['comment'] = 'Role will be created.' return ret @@ -95,7 +96,6 @@ def absent(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = {'id': role.id} - ret['pchanges'] = ret['changes'] ret['comment'] = 'Role will be deleted.' return ret diff --git a/salt/states/keystone_service.py b/salt/states/keystone_service.py index ac62b5958469..faca6d623573 100644 --- a/salt/states/keystone_service.py +++ b/salt/states/keystone_service.py @@ -61,6 +61,8 @@ def present(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['keystoneng.setup_clouds'](auth) service = __salt__['keystoneng.service_get'](name=name) @@ -69,7 +71,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = kwargs - ret['pchanges'] = ret['changes'] ret['comment'] = 'Service will be created.' return ret @@ -84,7 +85,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = changes - ret['pchanges'] = ret['changes'] ret['comment'] = 'Service will be updated.' return ret @@ -117,7 +117,6 @@ def absent(name, auth=None): if __opts__['test'] is True: ret['result'] = None ret['changes'] = {'id': service.id} - ret['pchanges'] = ret['changes'] ret['comment'] = 'Service will be deleted.' return ret diff --git a/salt/states/keystone_user.py b/salt/states/keystone_user.py index 23f95fd260fa..a1bfd8d85ec1 100644 --- a/salt/states/keystone_user.py +++ b/salt/states/keystone_user.py @@ -83,6 +83,8 @@ def present(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['keystoneng.setup_clouds'](auth) kwargs['name'] = name @@ -92,7 +94,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = kwargs - ret['pchanges'] = ret['changes'] ret['comment'] = 'User will be created.' return ret @@ -106,7 +107,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = changes - ret['pchanges'] = ret['changes'] ret['comment'] = 'User will be updated.' return ret @@ -133,6 +133,8 @@ def absent(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['keystoneng.setup_clouds'](auth) kwargs['name'] = name @@ -142,7 +144,6 @@ def absent(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = {'id': user.id} - ret['pchanges'] = ret['changes'] ret['comment'] = 'User will be deleted.' return ret diff --git a/salt/states/linux_acl.py b/salt/states/linux_acl.py index f38adfbab730..2c6cd1275a46 100644 --- a/salt/states/linux_acl.py +++ b/salt/states/linux_acl.py @@ -103,7 +103,6 @@ def present(name, acl_type, acl_name='', perms='', recurse=False, force=False): ret = {'name': name, 'result': True, 'changes': {}, - 'pchanges': {}, 'comment': ''} _octal = {'r': 4, 'w': 2, 'x': 1, '-': 0} @@ -176,7 +175,7 @@ def present(name, acl_type, acl_name='', perms='', recurse=False, force=False): acl_name, new_perms, perms), - 'result': None, 'pchanges': changes}) + 'result': None, 'changes': changes}) return ret try: if force: @@ -199,7 +198,7 @@ def present(name, acl_type, acl_name='', perms='', recurse=False, force=False): if __opts__['test']: ret.update({'comment': 'New permissions will be applied for ' '{0}: {1}'.format(acl_name, perms), - 'result': None, 'pchanges': changes}) + 'result': None, 'changes': changes}) ret['result'] = None return ret diff --git a/salt/states/net_napalm_yang.py b/salt/states/net_napalm_yang.py index fc7a0633ad17..8b9726786f53 100644 --- a/salt/states/net_napalm_yang.py +++ b/salt/states/net_napalm_yang.py @@ -94,8 +94,6 @@ def managed(name, compliance_report: ``False`` Return the compliance report in the comment. - The compliance report structured object can be found however - in the ``pchanges`` field of the output (not displayed on the CLI). .. versionadded:: 2017.7.3 diff --git a/salt/states/neutron_network.py b/salt/states/neutron_network.py index e9f2b8a0d053..191207e8260b 100644 --- a/salt/states/neutron_network.py +++ b/salt/states/neutron_network.py @@ -72,6 +72,8 @@ def present(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['neutronng.setup_clouds'](auth) kwargs['name'] = name @@ -81,7 +83,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = kwargs - ret['pchanges'] = ret['changes'] ret['comment'] = 'Network will be created.' return ret @@ -115,7 +116,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = changes - ret['pchanges'] = ret['changes'] ret['comment'] = 'Project will be updated.' return ret @@ -140,6 +140,8 @@ def absent(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['neutronng.setup_clouds'](auth) kwargs['name'] = name @@ -149,7 +151,6 @@ def absent(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = {'id': network.id} - ret['pchanges'] = ret['changes'] ret['comment'] = 'Network will be deleted.' return ret diff --git a/salt/states/neutron_secgroup.py b/salt/states/neutron_secgroup.py index 7859ac60df76..1a62ecd67112 100644 --- a/salt/states/neutron_secgroup.py +++ b/salt/states/neutron_secgroup.py @@ -74,6 +74,8 @@ def present(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['neutronng.setup_clouds'](auth) if 'project_name' in kwargs: @@ -95,7 +97,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = kwargs - ret['pchanges'] = ret['changes'] ret['comment'] = 'Security Group will be created.' return ret @@ -109,7 +110,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = changes - ret['pchanges'] = ret['changes'] ret['comment'] = 'Security Group will be updated.' return ret @@ -133,6 +133,8 @@ def absent(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['neutronng.setup_clouds'](auth) kwargs['project_id'] = __salt__['keystoneng.project_get']( @@ -147,7 +149,6 @@ def absent(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = {'id': secgroup.id} - ret['pchanges'] = ret['changes'] ret['comment'] = 'Security group will be deleted.' return ret diff --git a/salt/states/neutron_secgroup_rule.py b/salt/states/neutron_secgroup_rule.py index 888969e90d5d..ccc6f2f064ff 100644 --- a/salt/states/neutron_secgroup_rule.py +++ b/salt/states/neutron_secgroup_rule.py @@ -77,6 +77,8 @@ def present(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['neutronng.setup_clouds'](auth) if 'project_name' in kwargs: @@ -112,7 +114,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = kwargs - ret['pchanges'] = ret['changes'] ret['comment'] = 'Security Group rule will be created.' return ret @@ -166,10 +167,9 @@ def absent(name, auth=None, **kwargs): rule_exists = True if rule_exists: - if __opts__['test'] is True: + if __opts__['test']: ret['result'] = None ret['changes'] = {'id': kwargs['rule_id']} - ret['pchanges'] = ret['changes'] ret['comment'] = 'Security group rule will be deleted.' return ret diff --git a/salt/states/neutron_subnet.py b/salt/states/neutron_subnet.py index 43e4ab3ccf80..58219019eea3 100644 --- a/salt/states/neutron_subnet.py +++ b/salt/states/neutron_subnet.py @@ -96,16 +96,17 @@ def present(name, auth=None, **kwargs): 'result': True, 'comment': ''} + kwargs = __utils__['args.clean_kwargs'](**kwargs) + __salt__['neutronng.setup_clouds'](auth) kwargs['subnet_name'] = name subnet = __salt__['neutronng.subnet_get'](name=name) if subnet is None: - if __opts__['test'] is True: + if __opts__['test']: ret['result'] = None ret['changes'] = kwargs - ret['pchanges'] = ret['changes'] ret['comment'] = 'Subnet will be created.' return ret @@ -119,7 +120,6 @@ def present(name, auth=None, **kwargs): if __opts__['test'] is True: ret['result'] = None ret['changes'] = changes - ret['pchanges'] = ret['changes'] ret['comment'] = 'Project will be updated.' return ret @@ -160,7 +160,6 @@ def absent(name, auth=None): if __opts__['test'] is True: ret['result'] = None ret['changes'] = {'id': subnet.id} - ret['pchanges'] = ret['changes'] ret['comment'] = 'Project will be deleted.' return ret diff --git a/salt/states/pbm.py b/salt/states/pbm.py index 00945fc65cf6..836c95b807da 100644 --- a/salt/states/pbm.py +++ b/salt/states/pbm.py @@ -156,8 +156,10 @@ def default_vsan_policy_configured(name, policy): '\'{1}\''.format(name, vcenter)) log.trace('policy = {0}'.format(policy)) changes_required = False - ret = {'name': name, 'changes': {}, 'result': None, 'comment': None, - 'pchanges': {}} + ret = {'name': name, + 'changes': {}, + 'result': None, + 'comment': None} comments = [] changes = {} changes_required = False @@ -266,13 +268,11 @@ def default_vsan_policy_configured(name, policy): 'Nothing to be done.'.format(vcenter)), 'result': True}) else: - ret.update({'comment': '\n'.join(comments)}) - if __opts__['test']: - ret.update({'pchanges': changes, - 'result': None}) - else: - ret.update({'changes': changes, - 'result': True}) + ret.update({ + 'comment': '\n'.join(comments), + 'changes': changes, + 'result': None if __opts__['test'] else True, + }) return ret @@ -286,8 +286,10 @@ def storage_policies_configured(name, policies): comments = [] changes = [] changes_required = False - ret = {'name': name, 'changes': {}, 'result': None, 'comment': None, - 'pchanges': {}} + ret = {'name': name, + 'changes': {}, + 'result': None, + 'comment': None} log.trace('policies = {0}'.format(policies)) si = None try: @@ -430,13 +432,11 @@ def storage_policies_configured(name, policies): 'Nothing to be done.'.format(vcenter)), 'result': True}) else: - ret.update({'comment': '\n'.join(comments)}) - if __opts__['test']: - ret.update({'pchanges': {'storage_policies': changes}, - 'result': None}) - else: - ret.update({'changes': {'storage_policies': changes}, - 'result': True}) + ret.update({ + 'comment': '\n'.join(comments), + 'changes': {'storage_policies': changes}, + 'result': None if __opts__['test'] else True, + }) return ret @@ -454,8 +454,10 @@ def default_storage_policy_assigned(name, policy, datastore): ''.format(name, policy, datastore)) changes = {} changes_required = False - ret = {'name': name, 'changes': {}, 'result': None, 'comment': None, - 'pchanges': {}} + ret = {'name': name, + 'changes': {}, + 'result': None, + 'comment': None} si = None try: si = __salt__['vsphere.get_service_instance_via_proxy']() @@ -488,14 +490,13 @@ def default_storage_policy_assigned(name, policy, datastore): ret.update({'comment': exc.strerror, 'result': False if not __opts__['test'] else None}) return ret + ret['comment'] = comment if changes_required: - if __opts__['test']: - ret.update({'result': None, - 'pchanges': changes}) - else: - ret.update({'result': True, - 'changes': changes}) + ret.update({ + 'changes': changes, + 'result': None if __opts__['test'] else True, + }) else: ret['result'] = True return ret diff --git a/salt/states/snapper.py b/salt/states/snapper.py index 0b8eea53964f..c49b11416228 100644 --- a/salt/states/snapper.py +++ b/salt/states/snapper.py @@ -199,8 +199,7 @@ def baseline_snapshot(name, number=None, tag=None, include_diff=True, config='ro filename=file).get(file, {})) if __opts__['test'] and status: - ret['pchanges'] = status - ret['changes'] = ret['pchanges'] + ret['changes'] = status ret['comment'] = "{0} files changes are set to be undone".format(len(status.keys())) ret['result'] = None elif __opts__['test'] and not status: diff --git a/salt/states/solrcloud.py b/salt/states/solrcloud.py index 3a00b85715b5..4079be7a6a5e 100644 --- a/salt/states/solrcloud.py +++ b/salt/states/solrcloud.py @@ -34,10 +34,9 @@ def alias(name, collections, **kwargs): 'changes': {}, 'result': False, 'comment': '', - 'pchanges': {}, } - if __salt__["solrcloud.alias_exists"](name, **kwargs): + if __salt__['solrcloud.alias_exists'](name, **kwargs): alias_content = __salt__['solrcloud.alias_get_collections'](name, **kwargs) diff = set(alias_content).difference(set(collections)) @@ -48,38 +47,31 @@ def alias(name, collections, **kwargs): if __opts__['test']: ret['comment'] = 'The alias "{0}" will be updated.'.format(name) - ret['pchanges'] = { - 'old': ",".join(alias_content), - 'new': ",".join(collections) - } ret['result'] = None else: - __salt__["solrcloud.alias_set_collections"](name, collections, **kwargs) + __salt__['solrcloud.alias_set_collections'](name, collections, **kwargs) ret['comment'] = 'The alias "{0}" has been updated.'.format(name) - ret['changes'] = { - 'old': ",".join(alias_content), - 'new': ",".join(collections) - } - ret['result'] = True + + ret['changes'] = { + 'old': ','.join(alias_content), + 'new': ','.join(collections), + } + else: if __opts__['test']: ret['comment'] = 'The alias "{0}" will be created.'.format(name) - ret['pchanges'] = { - 'old': None, - 'new': ",".join(collections) - } ret['result'] = None else: - __salt__["solrcloud.alias_set_collections"](name, collections, **kwargs) + __salt__['solrcloud.alias_set_collections'](name, collections, **kwargs) ret['comment'] = 'The alias "{0}" has been created.'.format(name) - ret['changes'] = { - 'old': None, - 'new': ",".join(collections) - } - ret['result'] = True + ret['changes'] = { + 'old': None, + 'new': ','.join(collections), + } + return ret @@ -101,7 +93,6 @@ def collection(name, options=None, **kwargs): 'changes': {}, 'result': False, 'comment': '', - 'pchanges': {}, } if options is None: @@ -137,42 +128,32 @@ def collection(name, options=None, **kwargs): if __opts__['test']: ret['comment'] = 'Collection options "{0}" will be changed.'.format(name) - ret['pchanges'] = { - 'old': salt.utils.json.dumps(current_options, sort_keys=True, indent=4, separators=(',', ': ')), - 'new': salt.utils.json.dumps(options, sort_keys=True, indent=4, separators=(',', ': ')) - } ret['result'] = None - - return ret else: - __salt__["solrcloud.collection_set_options"](name, diff, **kwargs) - + __salt__['solrcloud.collection_set_options'](name, diff, **kwargs) ret['comment'] = 'Parameters were updated for collection "{0}".'.format(name) ret['result'] = True - ret['changes'] = { - 'old': salt.utils.json.dumps(current_options, sort_keys=True, indent=4, separators=(',', ': ')), - 'new': salt.utils.json.dumps(options, sort_keys=True, indent=4, separators=(',', ': ')) - } - return ret + ret['changes'] = { + 'old': salt.utils.json.dumps(current_options, sort_keys=True, indent=4, separators=(',', ': ')), + 'new': salt.utils.json.dumps(options, sort_keys=True, indent=4, separators=(',', ': ')) + } + return ret else: + new_changes = salt.utils.json.dumps(options, sort_keys=True, indent=4, separators=(',', ': ')) if __opts__['test']: ret['comment'] = 'The collection "{0}" will be created.'.format(name) - ret['pchanges'] = { - 'old': None, - 'new': str('options=') + new_changes # future lint: disable=blacklisted-function - } ret['result'] = None else: __salt__["solrcloud.collection_create"](name, options, **kwargs) ret['comment'] = 'The collection "{0}" has been created.'.format(name) - ret['changes'] = { - 'old': None, - 'new': str('options=') + new_changes # future lint: disable=blacklisted-function - } - ret['result'] = True + ret['changes'] = { + 'old': None, + 'new': str('options=') + new_changes # future lint: disable=blacklisted-function + } + return ret diff --git a/salt/utils/napalm.py b/salt/utils/napalm.py index 73c815970b0d..03be965950cb 100644 --- a/salt/utils/napalm.py +++ b/salt/utils/napalm.py @@ -492,7 +492,6 @@ def default_ret(name): ''' ret = { 'name': name, - 'pchanges': {}, 'changes': {}, 'result': False, 'comment': '' @@ -510,22 +509,16 @@ def loaded_ret(ret, loaded, test, debug, compliance_report=False, opts=None): ''' # Always get the comment changes = {} - pchanges = {} ret['comment'] = loaded['comment'] if 'diff' in loaded: changes['diff'] = loaded['diff'] - pchanges['diff'] = loaded['diff'] if 'commit_id' in loaded: changes['commit_id'] = loaded['commit_id'] - pchanges['commit_id'] = loaded['commit_id'] if 'compliance_report' in loaded: if compliance_report: changes['compliance_report'] = loaded['compliance_report'] - pchanges['compliance_report'] = loaded['compliance_report'] if debug and 'loaded_config' in loaded: changes['loaded_config'] = loaded['loaded_config'] - pchanges['loaded_config'] = loaded['loaded_config'] - ret['pchanges'] = pchanges if changes.get('diff'): ret['comment'] = '{comment_base}\n\nConfiguration diff:\n\n{diff}'.format(comment_base=ret['comment'], diff=changes['diff']) diff --git a/salt/utils/state.py b/salt/utils/state.py index b90f36beaac4..371f393a4aca 100644 --- a/salt/utils/state.py +++ b/salt/utils/state.py @@ -212,10 +212,6 @@ def state_func(name, config, alarm=None): original_return.setdefault('changes', {}) original_return['changes'][subkey] = sub_return['changes'] - if sub_return.get('pchanges'): # pchanges may or may not exist - original_return.setdefault('pchanges', {}) - original_return['pchanges'][subkey] = sub_return['pchanges'] - return original_return diff --git a/tests/integration/states/test_file.py b/tests/integration/states/test_file.py index ac444504a17d..d42bcd363f30 100644 --- a/tests/integration/states/test_file.py +++ b/tests/integration/states/test_file.py @@ -751,7 +751,6 @@ def test_managed_source_hash_indifferent_case(self): source_hash=uppercase_hash ) assert ret[state_name]['result'] is True - assert ret[state_name]['pchanges'] == {} assert ret[state_name]['changes'] == {} # Test uppercase source_hash using test=true @@ -764,7 +763,6 @@ def test_managed_source_hash_indifferent_case(self): test=True ) assert ret[state_name]['result'] is True - assert ret[state_name]['pchanges'] == {} assert ret[state_name]['changes'] == {} finally: diff --git a/tests/unit/states/test_boto_cloudfront.py b/tests/unit/states/test_boto_cloudfront.py index e6179e2de748..25f26d561136 100644 --- a/tests/unit/states/test_boto_cloudfront.py +++ b/tests/unit/states/test_boto_cloudfront.py @@ -91,7 +91,7 @@ def test_present_from_scratch(self): self.base_ret_with({ 'result': None, 'comment': comment, - 'pchanges': {'old': None, 'new': self.name}, + 'changes': {'old': None, 'new': self.name}, }), ) @@ -191,7 +191,7 @@ def test_present_update_config_and_tags(self): self.base_ret_with({ 'result': None, 'comment': '\n'.join([header, diff]), - 'pchanges': {'diff': diff}, + 'changes': {'diff': diff}, }), ) diff --git a/tests/unit/states/test_boto_sqs.py b/tests/unit/states/test_boto_sqs.py index f0b29b044596..2b8e46ac88cb 100644 --- a/tests/unit/states/test_boto_sqs.py +++ b/tests/unit/states/test_boto_sqs.py @@ -74,7 +74,7 @@ def test_present(self): ret.update({ 'result': None, 'comment': comt, - 'pchanges': {'old': None, 'new': 'mysqs'}, + 'changes': {'old': None, 'new': 'mysqs'}, }) self.assertDictEqual(boto_sqs.present(name), ret) diff = textwrap.dedent('''\ @@ -101,7 +101,7 @@ def test_present(self): ] ret.update({ 'comment': comt, - 'pchanges': {'attributes': {'diff': diff}}, + 'changes': {'attributes': {'diff': diff}}, }) self.assertDictEqual(boto_sqs.present(name, attributes), ret) @@ -133,6 +133,6 @@ def test_absent(self): ret.update({ 'result': None, 'comment': comt, - 'pchanges': {'old': name, 'new': None}, + 'changes': {'old': name, 'new': None}, }) self.assertDictEqual(boto_sqs.absent(name), ret) diff --git a/tests/unit/states/test_esxdatacenter.py b/tests/unit/states/test_esxdatacenter.py index a55dd0308a43..38d6f9a86b6a 100644 --- a/tests/unit/states/test_esxdatacenter.py +++ b/tests/unit/states/test_esxdatacenter.py @@ -64,7 +64,6 @@ def test_dc_name_different_proxy(self): res = esxdatacenter.datacenter_configured('fake_dc') self.assertDictEqual(res, {'name': 'fake_dc', 'changes': {}, - 'pchanges': {}, 'result': True, 'comment': 'Datacenter \'fake_dc\' already ' 'exists. Nothing to be done.'}) @@ -78,7 +77,6 @@ def test_dc_name_esxdatacenter_proxy(self): res = esxdatacenter.datacenter_configured('fake_dc') self.assertDictEqual(res, {'name': 'fake_dc', 'changes': {}, - 'pchanges': {}, 'result': True, 'comment': 'Datacenter \'proxy_dc\' ' 'already exists. Nothing to be done.'}) @@ -112,7 +110,6 @@ def test_create_datacenter(self): self.assertDictEqual(res, {'name': 'fake_dc', 'changes': {'new': {'name': 'fake_dc'}}, - 'pchanges': {}, 'result': True, 'comment': 'Created datacenter \'fake_dc\'.'}) @@ -124,8 +121,7 @@ def test_create_datacenter_test_mode(self): res = esxdatacenter.datacenter_configured('fake_dc') self.assertDictEqual(res, {'name': 'fake_dc', - 'changes': {}, - 'pchanges': {'new': {'name': 'fake_dc'}}, + 'changes': {'new': {'name': 'fake_dc'}}, 'result': None, 'comment': 'State will create ' 'datacenter \'fake_dc\'.'}) @@ -138,7 +134,6 @@ def test_nothing_to_be_done_test_mode(self): res = esxdatacenter.datacenter_configured('fake_dc') self.assertDictEqual(res, {'name': 'fake_dc', 'changes': {}, - 'pchanges': {}, 'result': True, 'comment': 'Datacenter \'fake_dc\' already ' 'exists. Nothing to be done.'}) @@ -154,7 +149,6 @@ def test_state_get_service_instance_raise_command_execution_error(self): self.assertEqual(mock_disconnect.call_count, 0) self.assertDictEqual(res, {'name': 'fake_dc', 'changes': {}, - 'pchanges': {}, 'result': False, 'comment': 'Error'}) @@ -169,7 +163,6 @@ def test_state_raise_command_execution_error_after_si(self): mock_disconnect.assert_called_once_with(self.mock_si) self.assertDictEqual(res, {'name': 'fake_dc', 'changes': {}, - 'pchanges': {}, 'result': False, 'comment': 'Error'}) @@ -182,6 +175,5 @@ def test_state_raise_command_execution_error_test_mode(self): res = esxdatacenter.datacenter_configured('fake_dc') self.assertDictEqual(res, {'name': 'fake_dc', 'changes': {}, - 'pchanges': {}, 'result': None, 'comment': 'Error'}) diff --git a/tests/unit/states/test_file.py b/tests/unit/states/test_file.py index 76d0581b042a..ea5fa3def531 100644 --- a/tests/unit/states/test_file.py +++ b/tests/unit/states/test_file.py @@ -226,7 +226,7 @@ def return_val(kwargs): comt = 'Symlink {0} to {1} is set for creation'.format(name, target) ret = return_val({'comment': comt, 'result': None, - 'pchanges': {'new': name}}) + 'changes': {'new': name}}) self.assertDictEqual(filestate.symlink(name, target, user=user, group=group), ret) @@ -249,7 +249,7 @@ def return_val(kwargs): comt = 'Directory {0} for symlink is not present'.format(test_dir) ret = return_val({'comment': comt, 'result': False, - 'pchanges': {'new': name}}) + 'changes': {}}) self.assertDictEqual(filestate.symlink(name, target, user=user, group=group), ret) @@ -271,7 +271,7 @@ def return_val(kwargs): comt = 'Symlink {0} is present and owned by {1}:{2}'.format(name, user, group) ret = return_val({'comment': comt, 'result': True, - 'pchanges': {}}) + 'changes': {}}) self.assertDictEqual(filestate.symlink(name, target, user=user, group=group), ret) @@ -292,7 +292,7 @@ def return_val(kwargs): '{1} - backup: {2}'.format(name, target, os.path.join(test_dir, 'SALT')) ret.update({'comment': comt, 'result': False, - 'pchanges': {'new': name}}) + 'changes': {}}) self.assertDictEqual( filestate.symlink(name, target, user=user, group=group, backupname='SALT'), @@ -312,7 +312,7 @@ def return_val(kwargs): comt = 'Backupname must be an absolute path or a file name: {0}'.format('tmp/SALT') ret.update({'comment': comt, 'result': False, - 'pchanges': {'new': name}}) + 'changes': {}}) self.assertDictEqual( filestate.symlink(name, target, user=user, group=group, backupname='tmp/SALT'), ret) @@ -331,7 +331,7 @@ def return_val(kwargs): patch('salt.utils.win_functions.get_sid_from_name', return_value='test-sid'): comt = 'File exists where the symlink {0} should be'.format(name) ret = return_val({'comment': comt, - 'pchanges': {'new': name}, + 'changes': {}, 'result': False}) self.assertDictEqual( filestate.symlink(name, target, user=user, group=group), @@ -353,7 +353,7 @@ def return_val(kwargs): comt = 'File exists where the symlink {0} should be'.format(name) ret = return_val({'comment': comt, 'result': False, - 'pchanges': {'new': name}}) + 'changes': {}}) self.assertDictEqual( filestate.symlink(name, target, user=user, group=group), ret) @@ -374,7 +374,7 @@ def return_val(kwargs): comt = 'Directory exists where the symlink {0} should be'.format(name) ret = return_val({'comment': comt, 'result': False, - 'pchanges': {'new': name}}) + 'changes': {}}) self.assertDictEqual( filestate.symlink(name, target, user=user, group=group), ret) @@ -394,7 +394,7 @@ def return_val(kwargs): comt = 'Unable to create new symlink {0} -> {1}: '.format(name, target) ret = return_val({'comment': comt, 'result': False, - 'pchanges': {'new': name}}) + 'changes': {}}) self.assertDictEqual( filestate.symlink(name, target, user=user, group=group), ret) @@ -417,7 +417,6 @@ def return_val(kwargs): comt = 'Created new symlink {0} -> {1}'.format(name, target) ret = return_val({'comment': comt, 'result': True, - 'pchanges': {'new': name}, 'changes': {'new': name}}) self.assertDictEqual( filestate.symlink(name, target, user=user, group=group), @@ -443,7 +442,6 @@ def return_val(kwargs): 'ownership to {2}:{3}'.format(name, target, user, group) ret = return_val({'comment': comt, 'result': False, - 'pchanges': {'new': name}, 'changes': {'new': name}}) self.assertDictEqual( filestate.symlink(name, target, user=user, group=group), @@ -459,7 +457,6 @@ def test_absent(self): ret = {'name': name, 'result': False, 'comment': '', - 'pchanges': {}, 'changes': {}} mock_t = MagicMock(return_value=True) @@ -490,17 +487,15 @@ def test_absent(self): ret.update({'comment': comt, 'name': name, 'result': None, - 'pchanges': {'removed': '/fake/file.conf'}}) + 'changes': {'removed': '/fake/file.conf'}}) self.assertDictEqual(filestate.absent(name), ret) - ret.update({'pchanges': {}}) with patch.dict(filestate.__opts__, {'test': False}): with patch.dict(filestate.__salt__, {'file.remove': mock_file}): comt = ('Removed file {0}'.format(name)) ret.update({'comment': comt, 'result': True, - 'changes': {'removed': name}, - 'pchanges': {'removed': name}}) + 'changes': {'removed': name}}) self.assertDictEqual(filestate.absent(name), ret) comt = ('Removed file {0}'.format(name)) @@ -508,7 +503,6 @@ def test_absent(self): 'result': False, 'changes': {}}) self.assertDictEqual(filestate.absent(name), ret) - ret.update({'pchanges': {}}) with patch.object(os.path, 'isfile', mock_f): with patch.object(os.path, 'isdir', mock_t): @@ -516,7 +510,7 @@ def test_absent(self): comt = \ 'Directory {0} is set for removal'.format(name) ret.update({'comment': comt, - 'pchanges': {'removed': name}, + 'changes': {'removed': name}, 'result': None}) self.assertDictEqual(filestate.absent(name), ret) @@ -533,7 +527,6 @@ def test_absent(self): ret.update({'comment': comt, 'result': False, 'changes': {}}) self.assertDictEqual(filestate.absent(name), ret) - ret.update({'pchanges': {}}) with patch.object(os.path, 'isdir', mock_f): with patch.dict(filestate.__opts__, {'test': True}): @@ -552,8 +545,7 @@ def test_exists(self): ret = {'name': name, 'result': False, 'comment': '', - 'changes': {}, - 'pchanges': {}} + 'changes': {}} mock_t = MagicMock(return_value=True) mock_f = MagicMock(return_value=False) @@ -589,7 +581,7 @@ def test_missing(self): mock_f = MagicMock(return_value=False) comt = ('Must provide name to file.missing') - ret.update({'comment': comt, 'name': '', 'pchanges': {}}) + ret.update({'comment': comt, 'name': '', 'changes': {}}) self.assertDictEqual(filestate.missing(''), ret) with patch.object(os.path, 'exists', mock_t): @@ -680,7 +672,7 @@ def test_managed(self): 'file.manage_file': mock_ex, 'cmd.run_all': mock_cmd_fail}): comt = ('Destination file name is required') - ret.update({'comment': comt, 'name': '', 'pchanges': {}}) + ret.update({'comment': comt, 'name': '', 'changes': {}}) self.assertDictEqual(filestate.managed(''), ret) with patch.object(os.path, 'isfile', mock_f): @@ -785,13 +777,12 @@ def test_managed(self): comt = ('check_cmd execution failed') ret.update({'comment': comt, 'result': False, 'skip_watch': True}) - ret.pop('pchanges') self.assertDictEqual(filestate.managed (name, user=user, group=group, check_cmd='A'), ret) comt = ('check_cmd execution failed') - ret.update({'comment': True, 'pchanges': {}}) + ret.update({'comment': True, 'changes': {}}) ret.pop('skip_watch', None) self.assertDictEqual(filestate.managed (name, user=user, group=group), @@ -848,7 +839,7 @@ def test_directory(self): ret = {'name': name, 'result': False, 'comment': '', - 'pchanges': {}, + 'changes': {}, 'changes': {}} comt = ('Must provide name to file.directory') @@ -940,12 +931,10 @@ def test_directory(self): else: comt = ('The following files will be changed:\n{0}:' ' directory - new\n'.format(name)) - p_chg = {name: {'directory': 'new'}} ret.update({ 'comment': comt, 'result': None, - 'pchanges': p_chg, - 'changes': {} + 'changes': {name: {'directory': 'new'}} }) self.assertDictEqual(filestate.directory(name, user=user, @@ -956,7 +945,7 @@ def test_directory(self): with patch.object(os.path, 'isdir', mock_f): comt = ('No directory to create {0} in' .format(name)) - ret.update({'comment': comt, 'result': False, 'changes': {}}) + ret.update({'comment': comt, 'result': False}) self.assertDictEqual(filestate.directory (name, user=user, group=group), ret) @@ -975,7 +964,7 @@ def test_directory(self): 'options "ignore_files" and ' '"ignore_dirs" at the same ' 'time.', - 'pchanges': {}}) + 'changes': {}}) with patch.object(os.path, 'isdir', mock_t): self.assertDictEqual(filestate.directory (name, user=user, @@ -1003,7 +992,6 @@ def test_recurse(self): ret = {'name': name, 'result': False, 'comment': '', - 'pchanges': {}, 'changes': {}} comt = ("'mode' is not allowed in 'file.recurse'." @@ -1092,7 +1080,7 @@ def test_replace(self): 'changes': {}} comt = ('Must provide name to file.replace') - ret.update({'comment': comt, 'name': '', 'pchanges': {}}) + ret.update({'comment': comt, 'name': '', 'changes': {}}) self.assertDictEqual(filestate.replace('', pattern, repl), ret) mock_t = MagicMock(return_value=True) @@ -1126,7 +1114,6 @@ def test_blockreplace(self): ret = {'name': name, 'result': False, 'comment': '', - 'pchanges': {}, 'changes': {}} comt = ('Must provide name to file.blockreplace') @@ -1146,8 +1133,7 @@ def test_blockreplace(self): with patch.dict(filestate.__opts__, {'test': True}): comt = ('Changes would be made') ret.update({'comment': comt, 'result': None, - 'changes': {'diff': True}, - 'pchanges': {'diff': True}}) + 'changes': {'diff': True}}) self.assertDictEqual(filestate.blockreplace(name), ret) # 'comment' function tests: 1 @@ -1163,7 +1149,6 @@ def test_comment(self): ret = {'name': name, 'result': False, 'comment': '', - 'pchanges': {}, 'changes': {}} comt = ('Must provide name to file.comment') @@ -1194,14 +1179,15 @@ def test_comment(self): 'file.comment_line': mock_t}): with patch.dict(filestate.__opts__, {'test': True}): comt = ('File {0} is set to be updated'.format(name)) - ret.update({'comment': comt, 'result': None, 'pchanges': {name: 'updated'}}) + ret.update({'comment': comt, 'result': None, 'changes': {name: 'updated'}}) self.assertDictEqual(filestate.comment(name, regex), ret) with patch.dict(filestate.__opts__, {'test': False}): with patch.object(salt.utils.files, 'fopen', MagicMock(mock_open())): comt = ('Commented lines successfully') - ret.update({'comment': comt, 'result': True}) + ret.update({'comment': comt, 'result': True, + 'changes': {}}) self.assertDictEqual(filestate.comment(name, regex), ret) @@ -1216,7 +1202,6 @@ def test_uncomment(self): regex = 'bind 127.0.0.1' ret = {'name': name, - 'pchanges': {}, 'result': False, 'comment': '', 'changes': {}} @@ -1249,14 +1234,16 @@ def test_uncomment(self): with patch.dict(filestate.__opts__, {'test': True}): comt = ('File {0} is set to be updated'.format(name)) - ret.update({'comment': comt, 'result': None, 'pchanges': {name: 'updated'}, }) + ret.update({'comment': comt, 'result': None, + 'changes': {name: 'updated'}}) self.assertDictEqual(filestate.uncomment(name, regex), ret) with patch.dict(filestate.__opts__, {'test': False}): with patch.object(salt.utils.files, 'fopen', MagicMock(mock_open())): comt = ('Uncommented lines successfully') - ret.update({'comment': comt, 'result': True}) + ret.update({'comment': comt, 'result': True, + 'changes': {}}) self.assertDictEqual(filestate.uncomment(name, regex), ret) # 'prepend' function tests: 1 @@ -1276,7 +1263,6 @@ def test_prepend(self): ret = {'name': name, 'result': False, 'comment': '', - 'pchanges': {}, 'changes': {}} comt = ('Must provide name to file.prepend') @@ -1299,24 +1285,23 @@ def test_prepend(self): 'file.prepend': mock_t}): comt = ('The following files will be changed:\n/tmp/etc:' ' directory - new\n') - pchanges = {'/tmp/etc': {'directory': 'new'}} + changes = {'/tmp/etc': {'directory': 'new'}} if salt.utils.platform.is_windows(): comt = 'The directory "c:\\tmp\\etc" will be changed' - pchanges = {'c:\\tmp\\etc': {'directory': 'new'}} - ret.update({'comment': comt, 'name': name, 'pchanges': pchanges}) + changes = {'c:\\tmp\\etc': {'directory': 'new'}} + ret.update({'comment': comt, 'name': name, 'changes': changes}) self.assertDictEqual(filestate.prepend(name, makedirs=True), ret) with patch.object(os.path, 'isabs', mock_f): comt = ('Specified file {0} is not an absolute path' .format(name)) - ret.update({'comment': comt, 'pchanges': {}}) + ret.update({'comment': comt, 'changes': {}}) self.assertDictEqual(filestate.prepend(name), ret) with patch.object(os.path, 'isabs', mock_t): with patch.object(os.path, 'exists', mock_t): comt = ("Failed to load template file {0}".format(source)) - ret.pop('pchanges') ret.update({'comment': comt, 'name': source, 'data': []}) self.assertDictEqual(filestate.prepend(name, source=source), ret) @@ -1330,8 +1315,9 @@ def test_prepend(self): change = {'diff': 'Replace binary file'} comt = ('File {0} is set to be updated' .format(name)) - ret.update({'comment': comt, 'result': None, - 'changes': change, 'pchanges': {}}) + ret.update({'comment': comt, + 'result': None, + 'changes': change}) self.assertDictEqual(filestate.prepend (name, text=text), ret) @@ -1849,7 +1835,6 @@ def run_checks(isdir=mock_t, strptime_format=None, test=False): expected_ret = { 'name': fake_name, 'changes': {'retained': [], 'deleted': [], 'ignored': []}, - 'pchanges': {'retained': [], 'deleted': [], 'ignored': []}, 'result': True, 'comment': 'Name provided to file.retention must be a directory', } @@ -1895,8 +1880,7 @@ def run_checks(isdir=mock_t, strptime_format=None, test=False): deleted_files = sorted(list(set(fake_file_list) - retained_files - set(ignored_files)), reverse=True) retained_files = sorted(list(retained_files), reverse=True) - changes = {'retained': retained_files, 'deleted': deleted_files, 'ignored': ignored_files} - expected_ret['pchanges'] = changes + expected_ret['changes'] = {'retained': retained_files, 'deleted': deleted_files, 'ignored': ignored_files} if test: expected_ret['result'] = None expected_ret['comment'] = ('{0} backups would have been removed from {1}.\n' @@ -1904,7 +1888,6 @@ def run_checks(isdir=mock_t, strptime_format=None, test=False): else: expected_ret['comment'] = ('{0} backups were removed from {1}.\n' ''.format(len(deleted_files), fake_name)) - expected_ret['changes'] = changes mock_remove.assert_has_calls( [call(os.path.join(fake_name, x)) for x in deleted_files], any_order=True diff --git a/tests/unit/states/test_linux_acl.py b/tests/unit/states/test_linux_acl.py index da9b3fd24fe5..54f359983c2e 100644 --- a/tests/unit/states/test_linux_acl.py +++ b/tests/unit/states/test_linux_acl.py @@ -69,13 +69,12 @@ def test_present(self): ''.format(acl_name, perms)) ret = {'name': name, 'comment': comt, - 'changes': {}, - 'pchanges': {'new': {'acl_name': acl_name, + 'changes': {'new': {'acl_name': acl_name, 'acl_type': acl_type, 'perms': perms}, - 'old': {'acl_name': acl_name, - 'acl_type': acl_type, - 'perms': 'r-x'}}, + 'old': {'acl_name': acl_name, + 'acl_type': acl_type, + 'perms': 'r-x'}}, 'result': None} self.assertDictEqual(linux_acl.present(name, acl_type, acl_name, @@ -92,7 +91,6 @@ def test_present(self): 'old': {'acl_name': acl_name, 'acl_type': acl_type, 'perms': 'r-x'}}, - 'pchanges': {}, 'result': True} self.assertDictEqual(linux_acl.present(name, acl_type, acl_name, perms), @@ -106,7 +104,6 @@ def test_present(self): ret = {'name': name, 'comment': comt, 'changes': {}, - 'pchanges': {}, 'result': False} self.assertDictEqual(linux_acl.present(name, acl_type, acl_name, perms), @@ -118,10 +115,9 @@ def test_present(self): 'for {0}: {1}'.format(acl_name, perms)) ret = {'name': name, 'comment': comt, - 'changes': {}, - 'pchanges': {'new': {'acl_name': acl_name, - 'acl_type': acl_type, - 'perms': perms}}, + 'changes': {'new': {'acl_name': acl_name, + 'acl_type': acl_type, + 'perms': perms}}, 'result': None} self.assertDictEqual(linux_acl.present(name, acl_type, acl_name, perms), @@ -135,7 +131,6 @@ def test_present(self): 'changes': {'new': {'acl_name': acl_name, 'acl_type': acl_type, 'perms': perms}}, - 'pchanges': {}, 'result': True} self.assertDictEqual(linux_acl.present(name, acl_type, acl_name, perms), @@ -149,7 +144,6 @@ def test_present(self): ret = {'name': name, 'comment': comt, 'changes': {}, - 'pchanges': {}, 'result': False} self.assertDictEqual(linux_acl.present(name, acl_type, acl_name, perms), @@ -163,13 +157,12 @@ def test_present(self): ''.format(acl_name, perms)) ret = {'name': name, 'comment': comt, - 'changes': {}, - 'pchanges': {'new': {'acl_name': acl_name, + 'changes': {'new': {'acl_name': acl_name, 'acl_type': acl_type, 'perms': perms}, - 'old': {'acl_name': acl_name, - 'acl_type': acl_type, - 'perms': 'rwx'}}, + 'old': {'acl_name': acl_name, + 'acl_type': acl_type, + 'perms': 'rwx'}}, 'result': None} self.assertDictEqual(linux_acl.present(name, acl_type, acl_name, @@ -183,7 +176,6 @@ def test_present(self): ret = {'name': name, 'comment': comt, 'changes': {}, - 'pchanges': {}, 'result': True} self.assertDictEqual(linux_acl.present(name, acl_type, acl_name, @@ -191,8 +183,7 @@ def test_present(self): # No acl type comt = ('ACL Type does not exist') - ret = {'name': name, 'comment': comt, 'result': False, - 'changes': {}, 'pchanges': {}} + ret = {'name': name, 'comment': comt, 'result': False, 'changes': {}} self.assertDictEqual(linux_acl.present(name, acl_type, acl_name, perms), ret) diff --git a/tests/unit/utils/test_state.py b/tests/unit/utils/test_state.py index d076e7d00436..0f356c59e72a 100644 --- a/tests/unit/utils/test_state.py +++ b/tests/unit/utils/test_state.py @@ -527,56 +527,6 @@ def test_merge_changes(self): 'alarms': secondary_changes, }) - def test_merge_pchanges(self): - primary_pchanges = {'old': None, 'new': 'my_resource'} - secondary_pchanges = {'old': None, 'new': ['alarm-1', 'alarm-2']} - - # Neither main nor sub pchanges case - m = copy.deepcopy(self.main_ret) - s = copy.deepcopy(self.sub_ret) - res = salt.utils.state.merge_subreturn(m, s) - self.assertNotIn('pchanges', res) - - # No main pchanges, sub pchanges - m = copy.deepcopy(self.main_ret) - s = copy.deepcopy(self.sub_ret) - s['pchanges'] = copy.deepcopy(secondary_pchanges) - res = salt.utils.state.merge_subreturn(m, s) - self.assertDictEqual(res['pchanges'], { - 'secondary': secondary_pchanges - }) - - # Main pchanges, no sub pchanges - m = copy.deepcopy(self.main_ret) - m['pchanges'] = copy.deepcopy(primary_pchanges) - s = copy.deepcopy(self.sub_ret) - res = salt.utils.state.merge_subreturn(m, s) - self.assertDictEqual(res['pchanges'], primary_pchanges) - - # Both main and sub pchanges, new pchanges don't affect existing ones - m = copy.deepcopy(self.main_ret) - m['pchanges'] = copy.deepcopy(primary_pchanges) - s = copy.deepcopy(self.sub_ret) - s['pchanges'] = copy.deepcopy(secondary_pchanges) - res = salt.utils.state.merge_subreturn(m, s) - self.assertDictEqual(res['pchanges'], { - 'old': None, - 'new': 'my_resource', - 'secondary': secondary_pchanges, - }) - - # The subkey parameter is respected - m = copy.deepcopy(self.main_ret) - m['pchanges'] = copy.deepcopy(primary_pchanges) - s = copy.deepcopy(self.sub_ret) - s['pchanges'] = copy.deepcopy(secondary_pchanges) - res = salt.utils.state.merge_subreturn(m, s, subkey='alarms') - self.assertDictEqual(res['pchanges'], { - 'old': None, - 'new': 'my_resource', - 'alarms': secondary_pchanges, - }) - def test_merge_comments(self): main_comment_1 = 'First primary comment.' main_comment_2 = 'Second primary comment.' From 0ba0ddf8fa7f361cdaff0b32113405b4bca56f2f Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Fri, 28 Sep 2018 21:21:30 -0500 Subject: [PATCH 211/340] Add test mode changes to file.touch state --- salt/states/file.py | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/salt/states/file.py b/salt/states/file.py index 0e1381e39356..08afadb82ad3 100644 --- a/salt/states/file.py +++ b/salt/states/file.py @@ -975,16 +975,25 @@ def _check_touch(name, atime, mtime): ''' Check to see if a file needs to be updated or created ''' + ret = { + 'result': None, + 'comment': '', + 'changes': {'new': name}, + } if not os.path.exists(name): - return None, 'File {0} is set to be created'.format(name) - stats = __salt__['file.stats'](name, follow_symlinks=False) - if atime is not None: - if six.text_type(atime) != six.text_type(stats['atime']): - return None, 'Times set to be updated on file {0}'.format(name) - if mtime is not None: - if six.text_type(mtime) != six.text_type(stats['mtime']): - return None, 'Times set to be updated on file {0}'.format(name) - return True, 'File {0} exists and has the correct times'.format(name) + ret['comment'] = 'File {0} is set to be created'.format(name) + else: + stats = __salt__['file.stats'](name, follow_symlinks=False) + if ((atime is not None + and six.text_type(atime) != six.text_type(stats['atime'])) or + (mtime is not None + and six.text_type(mtime) != six.text_type(stats['mtime']))): + ret['comment'] = 'Times set to be updated on file {0}'.format(name) + ret['changes'] = {'touched': name} + else: + ret['result'] = True + ret['comment'] = 'File {0} exists and has the correct times'.format(name) + return ret def _get_symlink_ownership(path): @@ -6107,7 +6116,7 @@ def touch(name, atime=None, mtime=None, makedirs=False): ) if __opts__['test']: - ret['result'], ret['comment'] = _check_touch(name, atime, mtime) + ret.update(_check_touch(name, atime, mtime)) return ret if makedirs: From 982e693ef6b8afbc3152d6f0450060f46b56a292 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Fri, 28 Sep 2018 21:23:13 -0500 Subject: [PATCH 212/340] Add exception logging in flaky decorator --- tests/support/helpers.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/support/helpers.py b/tests/support/helpers.py index 626da6a06922..f9554dcb5b5a 100644 --- a/tests/support/helpers.py +++ b/tests/support/helpers.py @@ -211,7 +211,10 @@ def wrap(cls): if attempt >= attempts -1: raise exc backoff_time = attempt ** 2 - log.info('Found Exception. Waiting %s seconds to retry.', backoff_time) + log.info( + 'Found Exception. Waiting %s seconds to retry.', + backoff_time + ) time.sleep(backoff_time) return cls return wrap From ceb3f4d91fa56a403a2c99432c1ab3412b295a05 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Fri, 28 Sep 2018 21:23:51 -0500 Subject: [PATCH 213/340] Add repack_state_returns to TestCase This allows for a state return dict to be repacked so that the top level keys are the IDs, making it much easier to analyze and craft asserts against the results. --- tests/support/unit.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tests/support/unit.py b/tests/support/unit.py index 70cd8b789153..7e862e919f9e 100644 --- a/tests/support/unit.py +++ b/tests/support/unit.py @@ -268,6 +268,19 @@ def assertNotAlmostEquals(self, *args, **kwargs): ) # return _TestCase.assertNotAlmostEquals(self, *args, **kwargs) + def repack_state_returns(self, state_ret): + ''' + Accepts a state return dict and returns it back with the top level key + names rewritten such that the ID declaration is the key instead of the + State's unique tag. For example: 'foo' instead of + 'file_|-foo_|-/etc/foo.conf|-managed' + + This makes it easier to work with state returns when crafting asserts + after running states. + ''' + assert isinstance(state_ret, dict), state_ret + return {x.split('_|-')[1]: y for x, y in six.iteritems(state_ret)} + def failUnlessEqual(self, *args, **kwargs): raise DeprecationWarning( 'The {0}() function is deprecated. Please start using {1}() ' From ed214c4cdb41ebcb02aa1ff661b15e47f42bed23 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Fri, 28 Sep 2018 21:27:43 -0500 Subject: [PATCH 214/340] Make it possible to use prereq with test and saltmod state mods this requisite passes a __prerequired__ kwarg so these need to support **kwargs for that reason. This also changes the comment for salt.function in test mode because it's inaccurate and doesn't make sense. Conflicts: - salt/states/test.py --- salt/states/saltmod.py | 22 +++++++++++----------- salt/states/test.py | 8 ++++---- tests/unit/states/test_saltmod.py | 6 ++---- 3 files changed, 17 insertions(+), 19 deletions(-) diff --git a/salt/states/saltmod.py b/salt/states/saltmod.py index 6ebf03399944..fcc06f69e907 100644 --- a/salt/states/saltmod.py +++ b/salt/states/saltmod.py @@ -427,7 +427,8 @@ def function( kwarg=None, timeout=None, batch=None, - subset=None): + subset=None, + **kwargs): # pylint: disable=unused-argument ''' Execute a single module function on a remote minion via salt or salt-ssh @@ -478,15 +479,15 @@ def function( ''' func_ret = {'name': name, - 'changes': {}, - 'comment': '', - 'result': True} + 'changes': {}, + 'comment': '', + 'result': True} if kwarg is None: kwarg = {} if isinstance(arg, six.string_types): - func_ret['warnings'] = ['Please specify \'arg\' as a list, not a string. ' - 'Modifying in place, but please update SLS file ' - 'to remove this warning.'] + func_ret['warnings'] = [ + 'Please specify \'arg\' as a list of arguments.' + ] arg = arg.split() cmd_kw = {'arg': arg or [], 'kwarg': kwarg, 'ret': ret, 'timeout': timeout} @@ -509,9 +510,8 @@ def function( fun = name if __opts__['test'] is True: - func_ret['comment'] = ( - 'Function {0} will be executed on target {1} as test={2}' - ).format(fun, tgt, six.text_type(False)) + func_ret['comment'] = \ + 'Function {0} would be executed on target {1}'.format(fun, tgt) func_ret['result'] = None return func_ret try: @@ -751,7 +751,7 @@ def runner(name, **kwargs): return ret -def parallel_runners(name, runners): +def parallel_runners(name, runners, **kwargs): # pylint: disable=unused-argument ''' Executes multiple runner modules on the master in parallel. diff --git a/salt/states/test.py b/salt/states/test.py index de4c4ac5ddfa..aea09a7a9201 100644 --- a/salt/states/test.py +++ b/salt/states/test.py @@ -67,7 +67,7 @@ def nop(name, **kwargs): return succeed_without_changes(name) -def succeed_without_changes(name, **kwargs): +def succeed_without_changes(name, **kwargs): # pylint: disable=unused-argument ''' Returns successful. @@ -85,7 +85,7 @@ def succeed_without_changes(name, **kwargs): return ret -def fail_without_changes(name, **kwargs): +def fail_without_changes(name, **kwargs): # pylint: disable=unused-argument ''' Returns failure. @@ -108,7 +108,7 @@ def fail_without_changes(name, **kwargs): return ret -def succeed_with_changes(name, **kwargs): +def succeed_with_changes(name, **kwargs): # pylint: disable=unused-argument ''' Returns successful and changes is not empty @@ -141,7 +141,7 @@ def succeed_with_changes(name, **kwargs): return ret -def fail_with_changes(name, **kwargs): +def fail_with_changes(name, **kwargs): # pylint: disable=unused-argument ''' Returns failure and changes is not empty. diff --git a/tests/unit/states/test_saltmod.py b/tests/unit/states/test_saltmod.py index d14edafed73d..2408ead9e136 100644 --- a/tests/unit/states/test_saltmod.py +++ b/tests/unit/states/test_saltmod.py @@ -175,13 +175,11 @@ def test_function(self): name = 'state' tgt = 'larry' - comt = ('Function state will be executed' - ' on target {0} as test=False'.format(tgt)) - ret = {'name': name, 'changes': {}, 'result': None, - 'comment': comt} + 'comment': 'Function state would be executed ' + 'on target {0}'.format(tgt)} with patch.dict(saltmod.__opts__, {'test': True}): self.assertDictEqual(saltmod.function(name, tgt), ret) From 4bc5fd008ed5bca59bdf777ca27d018892fb4696 Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Fri, 28 Sep 2018 21:30:06 -0500 Subject: [PATCH 215/340] Add integration tests for test mode onchanges/prereq Conflicts: - tests/unit/states/test_file.py --- .../files/file/base/onchanges_prereq.sls | 22 ++++ .../files/file/base/orch/req_test.sls | 3 + tests/integration/runners/test_state.py | 116 ++++++++++++++++++ tests/integration/states/test_file.py | 81 ++++++++++++ tests/unit/states/test_file.py | 1 - 5 files changed, 222 insertions(+), 1 deletion(-) create mode 100644 tests/integration/files/file/base/onchanges_prereq.sls create mode 100644 tests/integration/files/file/base/orch/req_test.sls diff --git a/tests/integration/files/file/base/onchanges_prereq.sls b/tests/integration/files/file/base/onchanges_prereq.sls new file mode 100644 index 000000000000..9ab27b71e23e --- /dev/null +++ b/tests/integration/files/file/base/onchanges_prereq.sls @@ -0,0 +1,22 @@ +one: + file.managed: + - name: {{ pillar['file1'] }} + - source: {{ pillar['source'] }} + +# This should run because there were changes +two: + test.succeed_without_changes: + - {{ pillar['req'] }}: + - file: one + +# Run the same state as "one" again, this should not cause changes +three: + file.managed: + - name: {{ pillar['file2'] }} + - source: {{ pillar['source'] }} + +# This should not run because there should be no changes +four: + test.succeed_without_changes: + - {{ pillar['req'] }}: + - file: three diff --git a/tests/integration/files/file/base/orch/req_test.sls b/tests/integration/files/file/base/orch/req_test.sls new file mode 100644 index 000000000000..cb992de82998 --- /dev/null +++ b/tests/integration/files/file/base/orch/req_test.sls @@ -0,0 +1,3 @@ +{{ salt['runtests_helpers.get_salt_temp_dir_for_path']('orch.req_test') }}: + file.managed: + - contents: 'Hello world!' diff --git a/tests/integration/runners/test_state.py b/tests/integration/runners/test_state.py index 3506a8f7ebe3..db0e5ff5a5f0 100644 --- a/tests/integration/runners/test_state.py +++ b/tests/integration/runners/test_state.py @@ -643,3 +643,119 @@ def test_orchestration_with_pillar_dot_items(self): self.assertTrue(received) del listener signal.alarm(0) + + def test_orchestration_onchanges_and_prereq(self): + ''' + Test to confirm that the parallel state requisite works in orch + we do this by running 10 test.sleep's of 10 seconds, and insure it only takes roughly 10s + ''' + self.write_conf({ + 'fileserver_backend': ['roots'], + 'file_roots': { + 'base': [self.base_env], + }, + }) + + orch_sls = os.path.join(self.base_env, 'orch.sls') + with salt.utils.files.fopen(orch_sls, 'w') as fp_: + fp_.write(textwrap.dedent(''' + manage_a_file: + salt.state: + - tgt: minion + - sls: + - orch.req_test + + do_onchanges: + salt.function: + - tgt: minion + - name: test.ping + - onchanges: + - salt: manage_a_file + + do_prereq: + salt.function: + - tgt: minion + - name: test.ping + - prereq: + - salt: manage_a_file + ''')) + + listener = salt.utils.event.get_event( + 'master', + sock_dir=self.master_opts['sock_dir'], + transport=self.master_opts['transport'], + opts=self.master_opts) + + try: + jid1 = self.run_run_plus( + 'state.orchestrate', + 'orch', + test=True, + __reload_config=True).get('jid') + + # Run for real to create the file + self.run_run_plus( + 'state.orchestrate', + 'orch', + __reload_config=True).get('jid') + + # Run again in test mode. Since there were no changes, the + # requisites should not fire. + jid2 = self.run_run_plus( + 'state.orchestrate', + 'orch', + test=True, + __reload_config=True).get('jid') + finally: + try: + os.remove(os.path.join(TMP, 'orch.req_test')) + except OSError: + pass + + assert jid1 is not None + assert jid2 is not None + + tags = {'salt/run/{0}/ret'.format(x): x for x in (jid1, jid2)} + ret = {} + + signal.signal(signal.SIGALRM, self.alarm_handler) + signal.alarm(self.timeout) + try: + while True: + event = listener.get_event(full=True) + if event is None: + continue + + if event['tag'] in tags: + ret[tags.pop(event['tag'])] = self.repack_state_returns( + event['data']['return']['data']['master'] + ) + if not tags: + # If tags is empty, we've grabbed all the returns we + # wanted, so let's stop listening to the event bus. + break + finally: + del listener + signal.alarm(0) + + for sls_id in ('manage_a_file', 'do_onchanges', 'do_prereq'): + # The first time through, all three states should have a None + # result, while the second time through, they should all have a + # True result. + assert ret[jid1][sls_id]['result'] is None, \ + 'result of {0} ({1}) is not None'.format( + sls_id, + ret[jid1][sls_id]['result']) + assert ret[jid2][sls_id]['result'] is True, \ + 'result of {0} ({1}) is not True'.format( + sls_id, + ret[jid2][sls_id]['result']) + + # The file.managed state should have shown changes in the test mode + # return data. + assert ret[jid1]['manage_a_file']['changes'] + + # After the file was created, running again in test mode should have + # shown no changes. + assert not ret[jid2]['manage_a_file']['changes'], \ + ret[jid2]['manage_a_file']['changes'] diff --git a/tests/integration/states/test_file.py b/tests/integration/states/test_file.py index d42bcd363f30..016b9ccc04c1 100644 --- a/tests/integration/states/test_file.py +++ b/tests/integration/states/test_file.py @@ -816,6 +816,87 @@ def test_managed_keep_source_false_salt(self, name): result = self.run_function('cp.is_cached', [source, saltenv]) assert result == '', 'File is still cached at {0}'.format(result) + @with_tempfile(create=False) + @with_tempfile(create=False) + def test_file_managed_onchanges(self, file1, file2): + ''' + Test file.managed state with onchanges + ''' + pillar = {'file1': file1, + 'file2': file2, + 'source': 'salt://testfile', + 'req': 'onchanges'} + + # Lay down the file used in the below SLS to ensure that when it is + # run, there are no changes. + self.run_state( + 'file.managed', + name=pillar['file2'], + source=pillar['source']) + + ret = self.repack_state_returns( + self.run_function( + 'state.apply', + mods='onchanges_prereq', + pillar=pillar, + test=True, + ) + ) + # The file states should both exit with None + assert ret['one']['result'] is None, ret['one']['result'] + assert ret['three']['result'] is True, ret['three']['result'] + # The first file state should have changes, since a new file was + # created. The other one should not, since we already created that file + # before applying the SLS file. + assert ret['one']['changes'] + assert not ret['three']['changes'], ret['three']['changes'] + # The state watching 'one' should have been run due to changes + assert ret['two']['comment'] == 'Success!', ret['two']['comment'] + # The state watching 'three' should not have been run + assert ret['four']['comment'] == \ + 'State was not run because none of the onchanges reqs changed', \ + ret['four']['comment'] + + @with_tempfile(create=False) + @with_tempfile(create=False) + def test_file_managed_prereq(self, file1, file2): + ''' + Test file.managed state with prereq + ''' + pillar = {'file1': file1, + 'file2': file2, + 'source': 'salt://testfile', + 'req': 'prereq'} + + # Lay down the file used in the below SLS to ensure that when it is + # run, there are no changes. + self.run_state( + 'file.managed', + name=pillar['file2'], + source=pillar['source']) + + ret = self.repack_state_returns( + self.run_function( + 'state.apply', + mods='onchanges_prereq', + pillar=pillar, + test=True, + ) + ) + # The file states should both exit with None + assert ret['one']['result'] is None, ret['one']['result'] + assert ret['three']['result'] is True, ret['three']['result'] + # The first file state should have changes, since a new file was + # created. The other one should not, since we already created that file + # before applying the SLS file. + assert ret['one']['changes'] + assert not ret['three']['changes'], ret['three']['changes'] + # The state watching 'one' should have been run due to changes + assert ret['two']['comment'] == 'Success!', ret['two']['comment'] + # The state watching 'three' should not have been run + assert ret['four']['comment'] == 'No changes detected', \ + ret['four']['comment'] + def test_directory(self): ''' file.directory diff --git a/tests/unit/states/test_file.py b/tests/unit/states/test_file.py index ea5fa3def531..701ad7debf88 100644 --- a/tests/unit/states/test_file.py +++ b/tests/unit/states/test_file.py @@ -839,7 +839,6 @@ def test_directory(self): ret = {'name': name, 'result': False, 'comment': '', - 'changes': {}, 'changes': {}} comt = ('Must provide name to file.directory') From 885d6ff2814390067a8426a3882e095d3e7bf8cb Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Sun, 30 Sep 2018 16:45:25 -0500 Subject: [PATCH 216/340] Update file.touch unit tests to reflect addition of changes in test mode --- tests/unit/states/test_file.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/unit/states/test_file.py b/tests/unit/states/test_file.py index 701ad7debf88..2c6cf320fee9 100644 --- a/tests/unit/states/test_file.py +++ b/tests/unit/states/test_file.py @@ -1357,14 +1357,18 @@ def test_touch(self): with patch.object(os.path, 'exists', mock_f): with patch.dict(filestate.__opts__, {'test': True}): comt = ('File {0} is set to be created'.format(name)) - ret.update({'comment': comt, 'result': None}) + ret.update({'comment': comt, + 'result': None, + 'changes': {'new': name}}) self.assertDictEqual(filestate.touch(name), ret) with patch.dict(filestate.__opts__, {'test': False}): with patch.object(os.path, 'isdir', mock_f): comt = ('Directory not present to touch file {0}' .format(name)) - ret.update({'comment': comt, 'result': False}) + ret.update({'comment': comt, + 'result': False, + 'changes': {}}) self.assertDictEqual(filestate.touch(name), ret) with patch.object(os.path, 'isdir', mock_t): From 0f296bb7a7981012707a9a452150365bac70af0d Mon Sep 17 00:00:00 2001 From: Erik Johnson Date: Mon, 1 Oct 2018 12:57:59 -0500 Subject: [PATCH 217/340] Update kernelpkg test to reflect pchanges removal --- tests/unit/states/test_kernelpkg.py | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/tests/unit/states/test_kernelpkg.py b/tests/unit/states/test_kernelpkg.py index f2ba87ecee88..4a81aacbf47c 100644 --- a/tests/unit/states/test_kernelpkg.py +++ b/tests/unit/states/test_kernelpkg.py @@ -114,22 +114,28 @@ def test_latest_active_with_changes(self): Test - latest_active when a new kernel is available ''' reboot = MagicMock(return_value=True) - with patch.dict(kernelpkg.__salt__, {'kernelpkg.needs_reboot': reboot}): - with patch.dict(kernelpkg.__opts__, {'test': False}): - kernelpkg.__salt__['system.reboot'].reset_mock() - ret = kernelpkg.latest_active(name=STATE_NAME) - self.assertEqual(ret['name'], STATE_NAME) - self.assertTrue(ret['result']) - self.assertIsInstance(ret['changes'], dict) - self.assertIsInstance(ret['comment'], six.text_type) - self.assert_called_once(kernelpkg.__salt__['system.reboot']) + latest = MagicMock(return_value=1) + with patch.dict( + kernelpkg.__salt__, {'kernelpkg.needs_reboot': reboot, + 'kernelpkg.latest_installed': latest}), \ + patch.dict(kernelpkg.__opts__, {'test': False}): + kernelpkg.__salt__['system.reboot'].reset_mock() + ret = kernelpkg.latest_active(name=STATE_NAME) + self.assertEqual(ret['name'], STATE_NAME) + self.assertTrue(ret['result']) + self.assertIsInstance(ret['changes'], dict) + self.assertIsInstance(ret['comment'], six.text_type) + self.assert_called_once(kernelpkg.__salt__['system.reboot']) with patch.dict(kernelpkg.__opts__, {'test': True}): kernelpkg.__salt__['system.reboot'].reset_mock() ret = kernelpkg.latest_active(name=STATE_NAME) self.assertEqual(ret['name'], STATE_NAME) self.assertIsNone(ret['result']) - self.assertDictEqual(ret['changes'], {}) + self.assertDictEqual( + ret['changes'], + {'kernel': {'new': 1, 'old': 0}} + ) self.assertIsInstance(ret['comment'], six.text_type) kernelpkg.__salt__['system.reboot'].assert_not_called() From fb3b75d9ae6c779ca0c8567bc46296c96cb435a6 Mon Sep 17 00:00:00 2001 From: Ch3LL Date: Mon, 8 Apr 2019 10:43:58 -0400 Subject: [PATCH 218/340] Fix tests for pchanges backport into 2018.3 Conflicts: - tests/unit/states/test_file.py --- tests/unit/states/test_net_napalm_yang.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/unit/states/test_net_napalm_yang.py b/tests/unit/states/test_net_napalm_yang.py index ccb0fa7cf289..40318977bb95 100644 --- a/tests/unit/states/test_net_napalm_yang.py +++ b/tests/unit/states/test_net_napalm_yang.py @@ -30,8 +30,7 @@ def setup_loader_modules(self): def test_managed(self): ret = {'changes': {}, 'comment': 'Loaded.', - 'name': 'test', 'result': False, - 'pchanges': {'compliance_report': {'complies': False}}} + 'name': 'test', 'result': False} parse = MagicMock(return_value='abcdef') temp_file = MagicMock(return_value='') compliance_report = MagicMock(return_value={'complies': False}) @@ -55,8 +54,7 @@ def test_managed(self): def test_configured(self): ret = {'changes': {}, 'comment': 'Loaded.', - 'name': 'test', 'result': False, - 'pchanges': {}} + 'name': 'test', 'result': False} load_config = MagicMock(return_value={'comment': 'Loaded.'}) with patch('salt.utils.files.fopen'): From 6bfa45600c20babf8251f14adbfa8ffc98f47b4b Mon Sep 17 00:00:00 2001 From: Wayne Werner Date: Thu, 18 Apr 2019 11:06:49 -0400 Subject: [PATCH 219/340] Add tests for wraps During testing I also discovered that `_ignores_kwargs` was also missing the @wraps decorator. Now it has it. --- salt/utils/decorators/__init__.py | 1 + tests/unit/utils/test_decorators.py | 36 +++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/salt/utils/decorators/__init__.py b/salt/utils/decorators/__init__.py index 7048724aa910..81e51488af39 100644 --- a/salt/utils/decorators/__init__.py +++ b/salt/utils/decorators/__init__.py @@ -574,6 +574,7 @@ def ignores_kwargs(*kwarg_names): List of argument names to ignore ''' def _ignores_kwargs(fn): + @wraps(fn) def __ignores_kwargs(*args, **kwargs): kwargs_filtered = kwargs.copy() for name in kwarg_names: diff --git a/tests/unit/utils/test_decorators.py b/tests/unit/utils/test_decorators.py index b4e3a68686a2..7d4d37ca8d7a 100644 --- a/tests/unit/utils/test_decorators.py +++ b/tests/unit/utils/test_decorators.py @@ -352,3 +352,39 @@ def test_with_deprecated_opt_in_use_superseded_and_deprecated_in_pillar(self): depr._curr_version = self._mk_version("Helium")[1] with self.assertRaises(SaltConfigurationError): assert depr(self.new_function)() == self.new_function() + + def test_with_depreciated_should_wrap_function(self): + def func(): pass + + wrapped = decorators.with_deprecated({}, "Beryllium")(func) + assert wrapped.__module__ == func.__module__ + + def test_is_deprecated_should_wrap_function(self): + def func(): pass + + wrapped = decorators.is_deprecated({}, "Beryllium")(func) + assert wrapped.__module__ == func.__module__ + + def test_ensure_unicode_args_should_wrap_function(self): + def func(): pass + + wrapped = decorators.ensure_unicode_args(func) + assert wrapped.__module__ == func.__module__ + + def test_ignores_kwargs_should_wrap_function(self): + def func(): pass + + wrapped = decorators.ignores_kwargs('foo', 'bar')(func) + assert wrapped.__module__ == func.__module__ + + def test_memoize_should_wrap_function(self): + def func(): pass + + wrapped = decorators.memoize(func) + assert wrapped.__module__ == func.__module__ + + def timing_should_wrap_function(self): + def func(): pass + + wrapped = decorators.timing(func) + assert wrapped.__module__ == func.__module__ \ No newline at end of file From f3c7f2775b1cdbcdf227e7edded28d2e2cda61cf Mon Sep 17 00:00:00 2001 From: Ch3LL Date: Thu, 11 Apr 2019 12:20:18 -0400 Subject: [PATCH 220/340] remove pchanges for windows file modules Conflicts: - salt/modules/win_file.py - salt/states/file.py From 93bdd086cfd7a26e5643aeb7575409cce6284610 Mon Sep 17 00:00:00 2001 From: Ch3LL Date: Thu, 11 Apr 2019 12:45:59 -0400 Subject: [PATCH 221/340] Update dict correctly in file state Conflicts: - salt/states/file.py From 81b865f667528a6e8aa5b75d8b209c519c2488d5 Mon Sep 17 00:00:00 2001 From: Ch3LL Date: Thu, 11 Apr 2019 12:56:29 -0400 Subject: [PATCH 222/340] use same newfile message on linux for windows file state From 72a0881a6cbd216b155ef31040fab129bc444028 Mon Sep 17 00:00:00 2001 From: Wayne Werner Date: Thu, 18 Apr 2019 11:21:09 -0400 Subject: [PATCH 223/340] Lint cleanup --- tests/unit/utils/test_decorators.py | 36 ++++++++++------------------- 1 file changed, 12 insertions(+), 24 deletions(-) diff --git a/tests/unit/utils/test_decorators.py b/tests/unit/utils/test_decorators.py index 7d4d37ca8d7a..ceb1167508ff 100644 --- a/tests/unit/utils/test_decorators.py +++ b/tests/unit/utils/test_decorators.py @@ -354,37 +354,25 @@ def test_with_deprecated_opt_in_use_superseded_and_deprecated_in_pillar(self): assert depr(self.new_function)() == self.new_function() def test_with_depreciated_should_wrap_function(self): - def func(): pass - - wrapped = decorators.with_deprecated({}, "Beryllium")(func) - assert wrapped.__module__ == func.__module__ + wrapped = decorators.with_deprecated({}, "Beryllium")(self.old_function) + assert wrapped.__module__ == self.old_function.__module__ def test_is_deprecated_should_wrap_function(self): - def func(): pass - - wrapped = decorators.is_deprecated({}, "Beryllium")(func) - assert wrapped.__module__ == func.__module__ + wrapped = decorators.is_deprecated({}, "Beryllium")(self.old_function) + assert wrapped.__module__ == self.old_function.__module__ def test_ensure_unicode_args_should_wrap_function(self): - def func(): pass - - wrapped = decorators.ensure_unicode_args(func) - assert wrapped.__module__ == func.__module__ + wrapped = decorators.ensure_unicode_args(self.old_function) + assert wrapped.__module__ == self.old_function.__module__ def test_ignores_kwargs_should_wrap_function(self): - def func(): pass - - wrapped = decorators.ignores_kwargs('foo', 'bar')(func) - assert wrapped.__module__ == func.__module__ + wrapped = decorators.ignores_kwargs('foo', 'bar')(self.old_function) + assert wrapped.__module__ == self.old_function.__module__ def test_memoize_should_wrap_function(self): - def func(): pass - - wrapped = decorators.memoize(func) - assert wrapped.__module__ == func.__module__ + wrapped = decorators.memoize(self.old_function) + assert wrapped.__module__ == self.old_function.__module__ def timing_should_wrap_function(self): - def func(): pass - - wrapped = decorators.timing(func) - assert wrapped.__module__ == func.__module__ \ No newline at end of file + wrapped = decorators.timing(self.old_function) + assert wrapped.__module__ == self.old_function.__module__ From 00e214ce5e455dba141a3534abe6fe5a0bc109b1 Mon Sep 17 00:00:00 2001 From: Wayne Werner Date: Thu, 4 Apr 2019 17:03:26 -0500 Subject: [PATCH 224/340] Update state.module docs Hopefully this should be easier to grok, as well as more obvious that the documentation contains two styles. --- salt/states/module.py | 178 ++++++++++++++++++++++++++++-------------- 1 file changed, 120 insertions(+), 58 deletions(-) diff --git a/salt/states/module.py b/salt/states/module.py index 9968529ffda9..2d2ae586779d 100644 --- a/salt/states/module.py +++ b/salt/states/module.py @@ -3,89 +3,109 @@ Execution of Salt modules from within states ============================================ +.. note:: + + There are two styles of calling ``module.run``. To use the new style + you must add the following to your ``/etc/salt/minion`` config file: + + .. code-block:: yaml + + use_superseded: + - module.run + With `module.run` these states allow individual execution module calls to be -made via states. To call a single module function use a :mod:`module.run ` -state: +made via states. To call a single module function use a +:mod:`module.run ` state: .. code-block:: yaml + # Old Style mine.send: module.run: - network.interfaces -Note that this example is probably unnecessary to use in practice, since the -``mine_functions`` and ``mine_interval`` config parameters can be used to -schedule updates for the mine (see :ref:`here ` for more info). + # New Style + mine.send: + module.run: + # Note the trailing `:` + - network.interfaces: + + +.. note:: + + The previous example is contrived and probably unnecessary to use in practice, + since the ``mine_functions`` and ``mine_interval`` config parameters + can be used to schedule updates for the mine (see :ref:`here ` + for more info). It is sometimes desirable to trigger a function call after a state is executed, for this the :mod:`module.wait ` state can be used: .. code-block:: yaml - fetch_out_of_band: - module.run: - - git.fetch: - - cwd: /path/to/my/repo - - user: myuser - - opts: '--all' - -Another example: + add example to hosts: + file.append: + - name: /etc/hosts + - text: 203.0.113.13 example.com -.. code-block:: yaml + # Old Style + mine.send: + module.wait: + - name: hosts.list + - watch: + - file: add example to hosts + # New Style mine.send: - module.run: - - network.ip_addrs: - - interface: eth0 + module.wait: + # Again, note the trailing `:` + - hosts.list_hosts: + - watch: + - file: add example to hosts -And more complex example: +In the old style, all arguments that the ``module`` state does not consume are +passed through to the execution module function being executed: .. code-block:: yaml - eventsviewer: + show off module.run with args: module.run: - - task.create_task: - - name: events-viewer - - user_name: System - - action_type: Execute - - cmd: 'c:\netops\scripts\events_viewer.bat' - - trigger_type: 'Daily' - - start_date: '2017-1-20' - - start_time: '11:59PM' - -Please note, this is a new behaviour of `module.run` function. - -With the previous `module.run` there are several differences: - -- The need of `name` keyword -- The need of `m_` prefix -- No way to call more than one function at once + - name: test.random_hash + - size: 42 + - hash_type: sha256 -For example: +In the new style, they are simply nested under the module name: .. code-block:: yaml - mine.send: - module.wait: - - name: network.interfaces - - watch: - - file: /etc/network/interfaces + show off module.run with args: + module.run: + # Note the lack of `name: `, and trailing `:` + - test.random_hash: + - size: 42 + - hash_type: sha256 -All arguments that the ``module`` state does not consume are passed through to -the execution module function being executed: +If the module takes ``*args``, they can be passed in as well: .. code-block:: yaml - fetch_out_of_band: + args and kwargs: module.run: - - name: git.fetch - - cwd: /path/to/my/repo - - user: myuser - - opts: '--all' + - test.arg: + - isn't + - this + - fun + - this: that + - salt: stack + + +Legacy (Default) Examples +------------------------- -Due to how the state system works, if a module function accepts an -argument called, ``name``, then ``m_name`` must be used to specify that -argument, to avoid a collision with the ``name`` argument. +If you're using the legacy ``module.run``, due to how the state system works, +if a module function accepts an argument called, ``name``, then ``m_name`` must +be used to specify that argument, to avoid a collision with the ``name`` +argument. Here is a list of keywords hidden by the state system, which must be prefixed with ``m_``: @@ -133,6 +153,15 @@ delvol_on_destroy: 'True' } +Other modules take the keyword arguments using this style: + +.. code-block:: yaml + + mac_enable_ssh: + module.run: + - name: system.set_remote_login + - enable: True + Another example that creates a recurring task that runs a batch file on a Windows system: @@ -151,26 +180,59 @@ start_time: '11:59PM' } -Another option is to use the new version of `module.run`. With which you can call one (or more!) -functions at once the following way: + + +Modern Examples +--------------- + +Here are some other examples using the modern ``module.run``: .. code-block:: yaml - call_something: + fetch_out_of_band: module.run: - git.fetch: - cwd: /path/to/my/repo - user: myuser - opts: '--all' -By default this behaviour is not turned on. In order to do so, please add the following -configuration to the minion: +Yet another example: .. code-block:: yaml - use_superseded: - - module.run + mine.send: + module.run: + - network.ip_addrs: + - interface: eth0 +And more complex example: + +.. code-block:: yaml + + eventsviewer: + module.run: + - task.create_task: + - name: events-viewer + - user_name: System + - action_type: Execute + - cmd: 'c:\netops\scripts\events_viewer.bat' + - trigger_type: 'Daily' + - start_date: '2017-1-20' + - start_time: '11:59PM' + +With the modern ``module.run``, you can also run multiple different modules +within the same state: + +.. code-block:: yaml + + run all the things: + module.run: + - test.arg: + - so: cool + - test.version: + - test.true: + - test.fib: + - 4 ''' from __future__ import absolute_import, print_function, unicode_literals From 10a0b0bfe532707d1f1e0f991ab20ef4acc0aebd Mon Sep 17 00:00:00 2001 From: Wayne Werner Date: Fri, 5 Apr 2019 12:20:26 -0500 Subject: [PATCH 225/340] Add note for when old style will be removed --- salt/states/module.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/salt/states/module.py b/salt/states/module.py index 2d2ae586779d..f614ab90e770 100644 --- a/salt/states/module.py +++ b/salt/states/module.py @@ -13,6 +13,11 @@ use_superseded: - module.run +.. note:: + + The old style of ``module.run`` will no longer be available in the Sodium + release. + With `module.run` these states allow individual execution module calls to be made via states. To call a single module function use a :mod:`module.run ` state: From 3cb974b8544d9f22cda3b691d669fc99da91b992 Mon Sep 17 00:00:00 2001 From: Wayne Werner Date: Wed, 17 Apr 2019 10:39:18 -0400 Subject: [PATCH 226/340] Rewrite module.run docs Took into account some feedback, also tested more things. --- salt/states/module.py | 254 ++++++++++++++++++++++++++---------------- 1 file changed, 157 insertions(+), 97 deletions(-) diff --git a/salt/states/module.py b/salt/states/module.py index f614ab90e770..d21e327c4ec1 100644 --- a/salt/states/module.py +++ b/salt/states/module.py @@ -5,71 +5,137 @@ .. note:: - There are two styles of calling ``module.run``. To use the new style - you must add the following to your ``/etc/salt/minion`` config file: + There are two styles of calling ``module.run``. **The legacy style will no + longer be available starting in the Sodium release.** To opt-in early to the + new style you must add the following to your ``/etc/salt/minion`` config + file: .. code-block:: yaml use_superseded: - module.run -.. note:: +With `module.run` these states allow individual execution module calls to be +made via states. Here's a contrived example, to show you how it's done: - The old style of ``module.run`` will no longer be available in the Sodium - release. +.. code-block:: yaml -With `module.run` these states allow individual execution module calls to be -made via states. To call a single module function use a -:mod:`module.run ` state: + # New Style + test.random_hash: + module.run: + - test.random_hash: + - size: 42 + - hash_type: sha256 + + # Legacy Style + test.random_hash: + module.run: + - size: 42 + - hash_type: sha256 + +In the new style, the state ID (``test.random_hash``, in this case) is +irrelevant when using ``module.run``. It could have very well been written: .. code-block:: yaml - # Old Style - mine.send: + Generate a random hash: module.run: - - network.interfaces + - test.random_hash: + - size: 42 + - hash_type: sha256 - # New Style - mine.send: +For a simple state like that it's not a big deal, but if the module you're +using has certain parameters, things can get cluttered, fast. Using the +contrived custom module (stuck in ``/srv/salt/_modules/foo.py``, or your +configured file_roots_): + +.. code-block:: python + + def bar(name, names, fun, state, saltenv): + return "Name: {name} Names: {names} Fun: {fun} State: {state} Saltenv: {saltenv}".format(**locals()) + +Your legacy state has to look like this: + +.. code-block:: yaml + + # Legacy style + Unfortunate example: module.run: - # Note the trailing `:` - - network.interfaces: + - name: foo.bar + - m_name: Some name + - m_names: + - Such names + - very wow + - m_state: Arkansas + - m_fun: Such fun + - m_saltenv: Salty +With the new style it's much cleaner: -.. note:: +.. code-block:: yaml - The previous example is contrived and probably unnecessary to use in practice, - since the ``mine_functions`` and ``mine_interval`` config parameters - can be used to schedule updates for the mine (see :ref:`here ` - for more info). + # New style + Better: + module.run: + - foo.bar: + - name: Some name + - names: + - Such names + - very wow + - state: Arkansas + - fun: Such fun + - saltenv: Salty + +The new style also allows multiple modules in one state. For instance, you can +do this: -It is sometimes desirable to trigger a function call after a state is executed, -for this the :mod:`module.wait ` state can be used: +.. code-block:: yaml + + Do many things: + module.run: + - test.random_hash: + - size: 10 + - hash_type: md5 + # Note the `:` at the end + - test.true: + - test.arg: + - this + - has + - args + - and: kwargs + - isn't: that neat? + # Note the `:` at the end, too + - test.version: + - test.fib: + - 4 + +Where in the legacy style you would have had to split your states like this: .. code-block:: yaml - add example to hosts: - file.append: - - name: /etc/hosts - - text: 203.0.113.13 example.com + test.random_hash: + module.run: + - size: 10 + - hash_type: md5 - # Old Style - mine.send: - module.wait: - - name: hosts.list - - watch: - - file: add example to hosts + test.nop: + module.run - # New Style - mine.send: - module.wait: - # Again, note the trailing `:` - - hosts.list_hosts: - - watch: - - file: add example to hosts + test.arg: + module.run: + - args: + - this + - has + - args + - kwargs: + and: kwargs + isn't: that neat? -In the old style, all arguments that the ``module`` state does not consume are -passed through to the execution module function being executed: + test.version: + module.run + +Another difference is that in the legacy style, unconsumed arguments to the +``module`` state were simply passed into the module function being executed: .. code-block:: yaml @@ -79,7 +145,8 @@ - size: 42 - hash_type: sha256 -In the new style, they are simply nested under the module name: +The new style is much more explicit, with the arguments and keyword arguments +being nested under the name of the function: .. code-block:: yaml @@ -90,7 +157,7 @@ - size: 42 - hash_type: sha256 -If the module takes ``*args``, they can be passed in as well: +If the function takes ``*args``, they can be passed in as well: .. code-block:: yaml @@ -103,6 +170,52 @@ - this: that - salt: stack +Modern Examples +--------------- + +Here are some other examples using the modern ``module.run``: + +.. code-block:: yaml + + fetch_out_of_band: + module.run: + - git.fetch: + - cwd: /path/to/my/repo + - user: myuser + - opts: '--all' + +A more complex example: + +.. code-block:: yaml + + eventsviewer: + module.run: + - task.create_task: + - name: events-viewer + - user_name: System + - action_type: Execute + - cmd: 'c:\netops\scripts\events_viewer.bat' + - trigger_type: 'Daily' + - start_date: '2017-1-20' + - start_time: '11:59PM' + +It is sometimes desirable to trigger a function call after a state is executed, +for this the :mod:`module.wait ` state can be used: + +.. code-block:: yaml + + add example to hosts: + file.append: + - name: /etc/hosts + - text: 203.0.113.13 example.com + + # New Style + mine.send: + module.wait: + # Again, note the trailing `:` + - hosts.list_hosts: + - watch: + - file: add example to hosts Legacy (Default) Examples ------------------------- @@ -185,59 +298,7 @@ start_time: '11:59PM' } - - -Modern Examples ---------------- - -Here are some other examples using the modern ``module.run``: - -.. code-block:: yaml - - fetch_out_of_band: - module.run: - - git.fetch: - - cwd: /path/to/my/repo - - user: myuser - - opts: '--all' - -Yet another example: - -.. code-block:: yaml - - mine.send: - module.run: - - network.ip_addrs: - - interface: eth0 - -And more complex example: - -.. code-block:: yaml - - eventsviewer: - module.run: - - task.create_task: - - name: events-viewer - - user_name: System - - action_type: Execute - - cmd: 'c:\netops\scripts\events_viewer.bat' - - trigger_type: 'Daily' - - start_date: '2017-1-20' - - start_time: '11:59PM' - -With the modern ``module.run``, you can also run multiple different modules -within the same state: - -.. code-block:: yaml - - run all the things: - module.run: - - test.arg: - - so: cool - - test.version: - - test.true: - - test.fib: - - 4 +.. _file_roots: https://docs.saltstack.com/en/latest/ref/configuration/master.html#file-roots ''' from __future__ import absolute_import, print_function, unicode_literals @@ -549,7 +610,6 @@ def _run(name, **kwargs): nkwargs = {} if aspec.keywords and aspec.keywords in kwargs: nkwargs = kwargs.pop(aspec.keywords) - if not isinstance(nkwargs, dict): msg = "'{0}' must be a dict." ret['comment'] = msg.format(aspec.keywords) From 15a887efd2bb2ee4d3441060a8472eb51c50a188 Mon Sep 17 00:00:00 2001 From: Ch3LL Date: Thu, 18 Apr 2019 14:53:09 -0400 Subject: [PATCH 227/340] Fix pylint --- salt/engines/logstash_engine.py | 2 ++ salt/modules/win_file.py | 1 + 2 files changed, 3 insertions(+) diff --git a/salt/engines/logstash_engine.py b/salt/engines/logstash_engine.py index e7286cf55910..8734aee5ba47 100644 --- a/salt/engines/logstash_engine.py +++ b/salt/engines/logstash_engine.py @@ -43,6 +43,7 @@ def __virtual__(): if logstash is not None \ else (False, 'python-logstash not installed') + def event_bus_context(opts): if opts.get('id').endswith('_master'): event_bus = salt.utils.event.get_master_event( @@ -58,6 +59,7 @@ def event_bus_context(opts): listen=True) return event_bus + def start(host, port=5959, tag='salt/engine/logstash', proto='udp'): ''' Listen to salt events and forward them to logstash diff --git a/salt/modules/win_file.py b/salt/modules/win_file.py index e2d465d418ed..5eea2200faa4 100644 --- a/salt/modules/win_file.py +++ b/salt/modules/win_file.py @@ -1647,6 +1647,7 @@ def check_perms(path, inheritance=inheritance, reset=reset) + def set_perms(path, grant_perms=None, deny_perms=None, From 27c79d2a210d1bdecbabec843dcd93febc5379fc Mon Sep 17 00:00:00 2001 From: Pedro Algarvio Date: Wed, 17 Apr 2019 19:07:57 +0100 Subject: [PATCH 228/340] Update integration.modules.test_network.NetworkTest.test_network_ping test address --- tests/integration/modules/test_network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/modules/test_network.py b/tests/integration/modules/test_network.py index 4d96d909390b..0738fbb97d6d 100644 --- a/tests/integration/modules/test_network.py +++ b/tests/integration/modules/test_network.py @@ -11,7 +11,7 @@ import salt.utils.path import salt.utils.platform -URL = 'repo.saltstack.com' +URL = 'google-public-dns-a.google.com' class NetworkTest(ModuleCase): From 5a0464bdefd2714e9ee8553c313cdbb97987e451 Mon Sep 17 00:00:00 2001 From: twangboy Date: Thu, 18 Apr 2019 14:15:44 -0600 Subject: [PATCH 229/340] Bring 51661 into 2019.2.1 --- salt/utils/gitfs.py | 2 +- tests/integration/pillar/test_git_pillar.py | 181 ++++++++++++++++++++ 2 files changed, 182 insertions(+), 1 deletion(-) diff --git a/salt/utils/gitfs.py b/salt/utils/gitfs.py index d52d1218c6ef..e4f3fe81df81 100644 --- a/salt/utils/gitfs.py +++ b/salt/utils/gitfs.py @@ -3016,7 +3016,7 @@ def checkout(self): if repo.branch == '__env__' and hasattr(repo, 'all_saltenvs'): env = self.opts.get('pillarenv') \ or self.opts.get('saltenv') \ - or self.opts.get('git_pillar_base') + or 'base' elif repo.env: env = repo.env else: diff --git a/tests/integration/pillar/test_git_pillar.py b/tests/integration/pillar/test_git_pillar.py index 5d9a374f6e50..9ade88840bf9 100644 --- a/tests/integration/pillar/test_git_pillar.py +++ b/tests/integration/pillar/test_git_pillar.py @@ -488,6 +488,33 @@ def test_all_saltenvs(self): 'nested_dict': {'dev': True}}} ) + def test_all_saltenvs_base(self): + ''' + Test all_saltenvs parameter with base pillarenv. + ''' + ret = self.get_pillar('''\ + file_ignore_regex: [] + file_ignore_glob: [] + git_pillar_provider: gitpython + cachedir: {cachedir} + extension_modules: {extmods} + ext_pillar: + - git: + - __env__ {url_extra_repo}: + - all_saltenvs: master + - __env__ {url}: + - mountpoint: nowhere + ''') + self.assertEqual( + ret, + {'branch': 'master', + 'motd': 'The force will be with you. Always.', + 'mylist': ['master'], + 'mydict': {'master': True, + 'nested_list': ['master'], + 'nested_dict': {'master': True}}} + ) + @destructiveTest @skipIf(NO_MOCK, NO_MOCK_REASON) @@ -1742,6 +1769,103 @@ def test_all_saltenvs(self, grains): ''') self.assertEqual(ret, expected) + @requires_system_grains + def test_all_saltenvs_base(self, grains): + ''' + Test all_saltenvs parameter. + ''' + expected = {'branch': 'master', + 'motd': 'The force will be with you. Always.', + 'mylist': ['master'], + 'mydict': {'master': True, + 'nested_list': ['master'], + 'nested_dict': {'master': True} + } + } + + # Test with passphraseless key and global credential options + ret = self.get_pillar('''\ + file_ignore_regex: [] + file_ignore_glob: [] + git_pillar_provider: pygit2 + git_pillar_pubkey: {pubkey_nopass} + git_pillar_privkey: {privkey_nopass} + cachedir: {cachedir} + extension_modules: {extmods} + ext_pillar: + - git: + - __env__ {url_extra_repo}: + - all_saltenvs: master + - __env__ {url}: + - mountpoint: nowhere + ''') + self.assertEqual(ret, expected) + + # Test with passphraseless key and per-repo credential options + ret = self.get_pillar('''\ + file_ignore_regex: [] + file_ignore_glob: [] + git_pillar_provider: pygit2 + cachedir: {cachedir} + extension_modules: {extmods} + ext_pillar: + - git: + - __env__ {url_extra_repo}: + - all_saltenvs: master + - pubkey: {pubkey_nopass} + - privkey: {privkey_nopass} + - __env__ {url}: + - mountpoint: nowhere + - pubkey: {pubkey_nopass} + - privkey: {privkey_nopass} + ''') + self.assertEqual(ret, expected) + + if grains['os_family'] == 'Debian': + # passphrase-protected currently does not work here + return + + # Test with passphrase-protected key and global credential options + ret = self.get_pillar('''\ + file_ignore_regex: [] + file_ignore_glob: [] + git_pillar_provider: pygit2 + git_pillar_pubkey: {pubkey_withpass} + git_pillar_privkey: {privkey_withpass} + git_pillar_passphrase: {passphrase} + cachedir: {cachedir} + extension_modules: {extmods} + ext_pillar: + - git: + - __env__ {url_extra_repo}: + - all_saltenvs: master + - __env__ {url}: + - mountpoint: nowhere + ''') + self.assertEqual(ret, expected) + + # Test with passphrase-protected key and per-repo credential options + ret = self.get_pillar('''\ + file_ignore_regex: [] + file_ignore_glob: [] + git_pillar_provider: pygit2 + cachedir: {cachedir} + extension_modules: {extmods} + ext_pillar: + - git: + - __env__ {url_extra_repo}: + - all_saltenvs: master + - pubkey: {pubkey_nopass} + - privkey: {privkey_nopass} + - passphrase: {passphrase} + - __env__ {url}: + - mountpoint: nowhere + - pubkey: {pubkey_nopass} + - privkey: {privkey_nopass} + - passphrase: {passphrase} + ''') + self.assertEqual(ret, expected) + @skipIf(NO_MOCK, NO_MOCK_REASON) @skipIf(_windows_or_mac(), 'minion is windows or mac') @@ -2119,6 +2243,33 @@ def test_all_saltenvs(self): 'nested_dict': {'dev': True}}} ) + def test_all_saltenvs_base(self): + ''' + Test all_saltenvs parameter with base pillarenv. + ''' + ret = self.get_pillar('''\ + file_ignore_regex: [] + file_ignore_glob: [] + git_pillar_provider: pygit2 + cachedir: {cachedir} + extension_modules: {extmods} + ext_pillar: + - git: + - __env__ {url_extra_repo}: + - all_saltenvs: master + - __env__ {url}: + - mountpoint: nowhere + ''') + self.assertEqual( + ret, + {'branch': 'master', + 'motd': 'The force will be with you. Always.', + 'mylist': ['master'], + 'mydict': {'master': True, + 'nested_list': ['master'], + 'nested_dict': {'master': True}}} + ) + @skipIf(NO_MOCK, NO_MOCK_REASON) @skipIf(_windows_or_mac(), 'minion is windows or mac') @@ -2719,3 +2870,33 @@ def test_all_saltenvs(self): 'nested_list': ['dev'], 'nested_dict': {'dev': True}}} ) + + def test_all_saltenvs_base(self): + ''' + Test all_saltenvs parameter with base pillarenv. + ''' + ret = self.get_pillar('''\ + file_ignore_regex: [] + file_ignore_glob: [] + git_pillar_provider: pygit2 + git_pillar_user: {user} + git_pillar_password: {password} + git_pillar_insecure_auth: True + cachedir: {cachedir} + extension_modules: {extmods} + ext_pillar: + - git: + - __env__ {url_extra_repo}: + - all_saltenvs: master + - __env__ {url}: + - mountpoint: nowhere + ''') + self.assertEqual( + ret, + {'branch': 'master', + 'motd': 'The force will be with you. Always.', + 'mylist': ['master'], + 'mydict': {'master': True, + 'nested_list': ['master'], + 'nested_dict': {'master': True}}} + ) From 105784a0da99f99622101bb6952e6de55a62996d Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Thu, 18 Apr 2019 22:09:13 +0000 Subject: [PATCH 230/340] Remove unused method from pytest engine --- .../files/engines/runtests_engine.py | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/tests/integration/files/engines/runtests_engine.py b/tests/integration/files/engines/runtests_engine.py index 4db9e7280952..7cc20cd3e1c9 100644 --- a/tests/integration/files/engines/runtests_engine.py +++ b/tests/integration/files/engines/runtests_engine.py @@ -56,11 +56,7 @@ def start(self): @gen.coroutine def _start(self): - if self.opts['__role'] == 'minion': - yield self.listen_to_minion_connected_event() - else: - self.io_loop.spawn_callback(self.fire_master_started_event) - + self.io_loop.spawn_callback(self.fire_master_started_event) port = int(self.opts['runtests_conn_check_port']) log.info('Starting Pytest Engine(role=%s) on port %s', self.opts['__role'], port) self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) @@ -92,19 +88,6 @@ def handle_connection(self, connection, address): # This is not macOS !? pass - @gen.coroutine - def listen_to_minion_connected_event(self): - log.info('Listening for minion connected event...') - minion_start_event_match = 'salt/minion/{0}/start'.format(self.opts['id']) - event_bus = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=True) - event_bus.subscribe(minion_start_event_match) - while True: - event = event_bus.get_event(full=True, no_block=True) - if event is not None and event['tag'] == minion_start_event_match: - log.info('Got minion connected event: %s', event) - break - yield gen.sleep(0.25) - @gen.coroutine def fire_master_started_event(self): log.info('Firing salt-master started event...') From ebe2d7238cb01e1444c1a99e75caf78c33522df0 Mon Sep 17 00:00:00 2001 From: "Gareth J. Greenaway" Date: Thu, 18 Apr 2019 16:51:31 -0700 Subject: [PATCH 231/340] Fixing the beacons.reset function. Once the reset has taken place in beacons/__init__.py we need to fire an event back to complete the loop and ensure that everything worked as expected. --- salt/beacons/__init__.py | 11 +++++++++++ salt/modules/beacons.py | 4 ++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/salt/beacons/__init__.py b/salt/beacons/__init__.py index d464e247402d..7b42ca37fd7c 100644 --- a/salt/beacons/__init__.py +++ b/salt/beacons/__init__.py @@ -434,3 +434,14 @@ def reset(self): Reset the beacons to defaults ''' self.opts['beacons'] = {} + + comment = 'Beacon Reset' + complete = True + + # Fire the complete event back along with updated list of beacons + evt = salt.utils.event.get_event('minion', opts=self.opts) + evt.fire_event({'complete': complete, 'comment': comment, + 'beacons': self.opts['beacons']}, + tag='/salt/minion/minion_beacon_reset_complete') + + return True diff --git a/salt/modules/beacons.py b/salt/modules/beacons.py index 3adad6c91d4f..e87353700f06 100644 --- a/salt/modules/beacons.py +++ b/salt/modules/beacons.py @@ -613,7 +613,7 @@ def reset(**kwargs): ret = {'comment': [], 'result': True} - if 'test' in kwargs and kwargs['test']: + if kwargs.get('test'): ret['comment'] = 'Beacons would be reset.' else: try: @@ -628,7 +628,7 @@ def reset(**kwargs): ret['comment'] = 'Beacon configuration reset.' else: ret['result'] = False - ret['comment'] = event_ret['comment'] + ret['comment'] = 'Something went wrong.' return ret except KeyError: # Effectively a no-op, since we can't really return without an event system From 070ae845deb1234114a72d03b877c214cc1c83f6 Mon Sep 17 00:00:00 2001 From: tanlingyun2005 Date: Fri, 19 Apr 2019 16:37:11 +0800 Subject: [PATCH 232/340] fix TypeError: argument of type int is not iterable --- salt/cli/batch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/cli/batch.py b/salt/cli/batch.py index e3a7bf9bcf53..8b55cf114b05 100644 --- a/salt/cli/batch.py +++ b/salt/cli/batch.py @@ -85,7 +85,7 @@ def get_bnum(self): ''' partition = lambda x: float(x) / 100.0 * len(self.minions) try: - if '%' in self.opts['batch']: + if isinstance(self.opts['batch'], str) and '%' in self.opts['batch']: res = partition(float(self.opts['batch'].strip('%'))) if res < 1: return int(math.ceil(res)) From 423f6f96b710566c5ed55e6ed539a8e7c4b49a1b Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Fri, 19 Apr 2019 16:16:01 +0000 Subject: [PATCH 233/340] Fix vent assertion race condition --- tests/integration/reactor/test_reactor.py | 2 +- tests/support/mixins.py | 34 +++++++++++++++++------ 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/tests/integration/reactor/test_reactor.py b/tests/integration/reactor/test_reactor.py index 665d3cdb43b7..596119d34780 100644 --- a/tests/integration/reactor/test_reactor.py +++ b/tests/integration/reactor/test_reactor.py @@ -35,4 +35,4 @@ def test_ping_reaction(self): e.fire_event({'a': 'b'}, '/test_event') - self.assertMinionEventReceived({'a': 'b'}) + self.assertMinionEventReceived({'a': 'b'}, timeout=30) diff --git a/tests/support/mixins.py b/tests/support/mixins.py index 4132eb2def92..8c6a0131f6b3 100644 --- a/tests/support/mixins.py +++ b/tests/support/mixins.py @@ -660,6 +660,14 @@ def _clean_queue(): sock_dir=a_config.get_config('minion')['sock_dir'], opts=a_config.get_config('minion'), ) + + # Wait for event bus to be connected + while not event.connect_pull(30): + time.sleep(1) + + # Notify parent process that the event bus is connected + q.put('CONNECTED') + while True: try: events = event.get_event(full=False) @@ -682,6 +690,11 @@ def __new__(cls, *args, **kwargs): target=_fetch_events, args=(cls.q,) ) cls.fetch_proc.start() + # Wait for the event bus to be connected + msg = cls.q.get(block=True) + if msg != 'CONNECTED': + # Just in case something very bad happens + raise RuntimeError('Unexpected message in test\'s event queue') return object.__new__(cls) def __exit__(self, *args, **kwargs): @@ -691,19 +704,22 @@ def assertMinionEventFired(self, tag): #TODO raise salt.exceptions.NotImplemented('assertMinionEventFired() not implemented') - def assertMinionEventReceived(self, desired_event): - queue_wait = 5 # 2.5s - while self.q.empty(): - time.sleep(0.5) # Wait for events to be pushed into the queue - queue_wait -= 1 - if queue_wait <= 0: - raise AssertionError('Queue wait timer expired') - while not self.q.empty(): # This is not thread-safe and may be inaccurate - event = self.q.get() + def assertMinionEventReceived(self, desired_event, timeout=5, sleep_time=0.5): + start = time.time() + while True: + try: + event = self.q.get(False) + except Empty: + time.sleep(sleep_time) + if time.time() - start >= timeout: + break + continue if isinstance(event, dict): event.pop('_stamp') if desired_event == event: self.fetch_proc.terminate() return True + if time.time() - start >= timeout: + break self.fetch_proc.terminate() raise AssertionError('Event {0} was not received by minion'.format(desired_event)) From c0010229132483ed0aa9e1abda0565a547dd00bf Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Fri, 19 Apr 2019 16:22:52 +0000 Subject: [PATCH 234/340] Fix linter --- tests/support/mixins.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/support/mixins.py b/tests/support/mixins.py index 8c6a0131f6b3..f43a1d3aa9cd 100644 --- a/tests/support/mixins.py +++ b/tests/support/mixins.py @@ -47,6 +47,7 @@ # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import zip # pylint: disable=import-error,redefined-builtin +from salt.ext.six.moves.queue import Empty # pylint: disable=import-error,no-name-in-module log = logging.getLogger(__name__) From 211915ca90a27544b0230c4ae352754d1af58e71 Mon Sep 17 00:00:00 2001 From: tanlingyun2005 Date: Sat, 20 Apr 2019 02:52:50 +0800 Subject: [PATCH 235/340] fix salt/cli/batch.py TypeError: argument of type int is not iterable --- salt/cli/batch.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/cli/batch.py b/salt/cli/batch.py index 8b55cf114b05..208e980adba3 100644 --- a/salt/cli/batch.py +++ b/salt/cli/batch.py @@ -85,7 +85,8 @@ def get_bnum(self): ''' partition = lambda x: float(x) / 100.0 * len(self.minions) try: - if isinstance(self.opts['batch'], str) and '%' in self.opts['batch']: + if (isinstance(self.opts['batch'], str) or isinstance(self.opts['batch'], unicode)) \ + and '%' in self.opts['batch']: res = partition(float(self.opts['batch'].strip('%'))) if res < 1: return int(math.ceil(res)) From 509c79728c041ffb3e63efee882bd07fa1b76d93 Mon Sep 17 00:00:00 2001 From: tanlingyun2005 Date: Sat, 20 Apr 2019 03:30:44 +0800 Subject: [PATCH 236/340] bug fix salt/cli/batch.py --- salt/cli/batch.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/salt/cli/batch.py b/salt/cli/batch.py index 208e980adba3..02477b553586 100644 --- a/salt/cli/batch.py +++ b/salt/cli/batch.py @@ -8,6 +8,7 @@ import math import time import copy +import sys from datetime import datetime, timedelta # Import salt libs @@ -85,8 +86,11 @@ def get_bnum(self): ''' partition = lambda x: float(x) / 100.0 * len(self.minions) try: - if (isinstance(self.opts['batch'], str) or isinstance(self.opts['batch'], unicode)) \ - and '%' in self.opts['batch']: + if sys.version_info.major == 3: + batch_is_str = isinstance(self.opts['batch'], str) + else: + batch_is_str = isinstance(self.opts['batch'], str) or isinstance(self.opts['batch'], unicode) + if batch_is_str and '%' in self.opts['batch']: res = partition(float(self.opts['batch'].strip('%'))) if res < 1: return int(math.ceil(res)) From 538d5cf1c3fce8c0de72dbfda2d2682d53c2a57e Mon Sep 17 00:00:00 2001 From: Ch3LL Date: Fri, 19 Apr 2019 16:00:39 -0400 Subject: [PATCH 237/340] Remove pchanges from win_dacl --- salt/utils/win_dacl.py | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/salt/utils/win_dacl.py b/salt/utils/win_dacl.py index 8eb69c024a44..8d56f8df01d5 100644 --- a/salt/utils/win_dacl.py +++ b/salt/utils/win_dacl.py @@ -2125,17 +2125,15 @@ def _check_perms(obj_name, obj_type, new_perms, cur_perms, access_mode, ret): changes[user]['applies_to'] = applies_to if changes: - if 'perms' not in ret['pchanges']: - ret['pchanges']['perms'] = {} if 'perms' not in ret['changes']: ret['changes']['perms'] = {} for user in changes: user_name = get_name(principal=user) if __opts__['test'] is True: - if user not in ret['pchanges']['perms']: - ret['pchanges']['perms'][user] = {} - ret['pchanges']['perms'][user][access_mode] = changes[user][access_mode] + if user not in ret['changes']['perms']: + ret['changes']['perms'][user] = {} + ret['changes']['perms'][user][access_mode] = changes[user][access_mode] else: # Get applies_to applies_to = None @@ -2291,7 +2289,6 @@ def check_perms(obj_name, if not ret: ret = {'name': obj_name, 'changes': {}, - 'pchanges': {}, 'comment': [], 'result': True} orig_comment = '' @@ -2305,7 +2302,7 @@ def check_perms(obj_name, current_owner = get_owner(obj_name=obj_name, obj_type=obj_type) if owner != current_owner: if __opts__['test'] is True: - ret['pchanges']['owner'] = owner + ret['changes']['owner'] = owner else: try: set_owner(obj_name=obj_name, @@ -2323,7 +2320,7 @@ def check_perms(obj_name, if not inheritance == get_inheritance(obj_name=obj_name, obj_type=obj_type): if __opts__['test'] is True: - ret['pchanges']['inheritance'] = inheritance + ret['changes']['inheritance'] = inheritance else: try: set_inheritance( @@ -2371,9 +2368,9 @@ def check_perms(obj_name, user_name.lower() not in set(k.lower() for k in grant_perms): if 'grant' in cur_perms['Not Inherited'][user_name]: if __opts__['test'] is True: - if 'remove_perms' not in ret['pchanges']: - ret['pchanges']['remove_perms'] = {} - ret['pchanges']['remove_perms'].update( + if 'remove_perms' not in ret['changes']: + ret['changes']['remove_perms'] = {} + ret['changes']['remove_perms'].update( {user_name: cur_perms['Not Inherited'][user_name]}) else: if 'remove_perms' not in ret['changes']: @@ -2390,9 +2387,9 @@ def check_perms(obj_name, user_name.lower() not in set(k.lower() for k in deny_perms): if 'deny' in cur_perms['Not Inherited'][user_name]: if __opts__['test'] is True: - if 'remove_perms' not in ret['pchanges']: - ret['pchanges']['remove_perms'] = {} - ret['pchanges']['remove_perms'].update( + if 'remove_perms' not in ret['changes']: + ret['changes']['remove_perms'] = {} + ret['changes']['remove_perms'].update( {user_name: cur_perms['Not Inherited'][user_name]}) else: if 'remove_perms' not in ret['changes']: @@ -2416,7 +2413,7 @@ def check_perms(obj_name, ret['comment'] = '\n'.join(ret['comment']) # Set result for test = True - if __opts__['test'] and (ret['changes'] or ret['pchanges']): + if __opts__['test'] and (ret['changes']): ret['result'] = None return ret From 72aea243579dac43bdd0d324cb4339f21a89725b Mon Sep 17 00:00:00 2001 From: Ch3LL Date: Fri, 19 Apr 2019 17:22:28 -0400 Subject: [PATCH 238/340] Remove pchanges from win_dacl --- salt/utils/win_dacl.py | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/salt/utils/win_dacl.py b/salt/utils/win_dacl.py index 8eb69c024a44..8d56f8df01d5 100644 --- a/salt/utils/win_dacl.py +++ b/salt/utils/win_dacl.py @@ -2125,17 +2125,15 @@ def _check_perms(obj_name, obj_type, new_perms, cur_perms, access_mode, ret): changes[user]['applies_to'] = applies_to if changes: - if 'perms' not in ret['pchanges']: - ret['pchanges']['perms'] = {} if 'perms' not in ret['changes']: ret['changes']['perms'] = {} for user in changes: user_name = get_name(principal=user) if __opts__['test'] is True: - if user not in ret['pchanges']['perms']: - ret['pchanges']['perms'][user] = {} - ret['pchanges']['perms'][user][access_mode] = changes[user][access_mode] + if user not in ret['changes']['perms']: + ret['changes']['perms'][user] = {} + ret['changes']['perms'][user][access_mode] = changes[user][access_mode] else: # Get applies_to applies_to = None @@ -2291,7 +2289,6 @@ def check_perms(obj_name, if not ret: ret = {'name': obj_name, 'changes': {}, - 'pchanges': {}, 'comment': [], 'result': True} orig_comment = '' @@ -2305,7 +2302,7 @@ def check_perms(obj_name, current_owner = get_owner(obj_name=obj_name, obj_type=obj_type) if owner != current_owner: if __opts__['test'] is True: - ret['pchanges']['owner'] = owner + ret['changes']['owner'] = owner else: try: set_owner(obj_name=obj_name, @@ -2323,7 +2320,7 @@ def check_perms(obj_name, if not inheritance == get_inheritance(obj_name=obj_name, obj_type=obj_type): if __opts__['test'] is True: - ret['pchanges']['inheritance'] = inheritance + ret['changes']['inheritance'] = inheritance else: try: set_inheritance( @@ -2371,9 +2368,9 @@ def check_perms(obj_name, user_name.lower() not in set(k.lower() for k in grant_perms): if 'grant' in cur_perms['Not Inherited'][user_name]: if __opts__['test'] is True: - if 'remove_perms' not in ret['pchanges']: - ret['pchanges']['remove_perms'] = {} - ret['pchanges']['remove_perms'].update( + if 'remove_perms' not in ret['changes']: + ret['changes']['remove_perms'] = {} + ret['changes']['remove_perms'].update( {user_name: cur_perms['Not Inherited'][user_name]}) else: if 'remove_perms' not in ret['changes']: @@ -2390,9 +2387,9 @@ def check_perms(obj_name, user_name.lower() not in set(k.lower() for k in deny_perms): if 'deny' in cur_perms['Not Inherited'][user_name]: if __opts__['test'] is True: - if 'remove_perms' not in ret['pchanges']: - ret['pchanges']['remove_perms'] = {} - ret['pchanges']['remove_perms'].update( + if 'remove_perms' not in ret['changes']: + ret['changes']['remove_perms'] = {} + ret['changes']['remove_perms'].update( {user_name: cur_perms['Not Inherited'][user_name]}) else: if 'remove_perms' not in ret['changes']: @@ -2416,7 +2413,7 @@ def check_perms(obj_name, ret['comment'] = '\n'.join(ret['comment']) # Set result for test = True - if __opts__['test'] and (ret['changes'] or ret['pchanges']): + if __opts__['test'] and (ret['changes']): ret['result'] = None return ret From c3971a58025597c1ee1c3d40c37082ce9bbef9db Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Sat, 20 Apr 2019 03:16:46 +0000 Subject: [PATCH 239/340] Fix salt.modules.publish and salt.states.x509 tests The peers.conf addition for the x509 state tests broke the salt.modules.publish tests. Move the configuration to the test master's config. Also increase the publish call timeout in the x509 module to make the x509 state tests reliable on all platforms. --- salt/modules/x509.py | 5 ++++- tests/integration/files/conf/master | 3 ++- tests/integration/files/conf/master.d/peers.conf | 3 --- tests/integration/states/test_x509.py | 3 +++ 4 files changed, 9 insertions(+), 5 deletions(-) delete mode 100644 tests/integration/files/conf/master.d/peers.conf diff --git a/salt/modules/x509.py b/salt/modules/x509.py index fdaa41aba44b..92e39a6b885f 100644 --- a/salt/modules/x509.py +++ b/salt/modules/x509.py @@ -1391,10 +1391,13 @@ def create_certificate( for ignore in list(_STATE_INTERNAL_KEYWORDS) + \ ['listen_in', 'preqrequired', '__prerequired__']: kwargs.pop(ignore, None) + # TODO: Make timeout configurable in Neon certs = __salt__['publish.publish']( tgt=ca_server, fun='x509.sign_remote_certificate', - arg=salt.utils.data.decode_dict(kwargs, to_str=True)) + arg=salt.utils.data.decode_dict(kwargs, to_str=True), + timeout=30 + ) if not any(certs): raise salt.exceptions.SaltInvocationError( diff --git a/tests/integration/files/conf/master b/tests/integration/files/conf/master index 68e38f618c37..5c2a9ad8a876 100644 --- a/tests/integration/files/conf/master +++ b/tests/integration/files/conf/master @@ -28,7 +28,8 @@ tcp_master_workers: 64515 peer: '.*': - - 'test.*' + - '(x509|test).*' + #- 'x509.sign_remote_certificate' ext_pillar: - ext_pillar_opts: diff --git a/tests/integration/files/conf/master.d/peers.conf b/tests/integration/files/conf/master.d/peers.conf deleted file mode 100644 index b28b03ddc0b4..000000000000 --- a/tests/integration/files/conf/master.d/peers.conf +++ /dev/null @@ -1,3 +0,0 @@ -peer: - .*: - - x509.sign_remote_certificate diff --git a/tests/integration/states/test_x509.py b/tests/integration/states/test_x509.py index 99709aa434cf..1808d3c19ef2 100644 --- a/tests/integration/states/test_x509.py +++ b/tests/integration/states/test_x509.py @@ -60,6 +60,9 @@ def setUp(self): def tearDown(self): os.remove(os.path.join(TMP_PILLAR_TREE, 'signing_policies.sls')) os.remove(os.path.join(TMP_PILLAR_TREE, 'top.sls')) + certs_path = os.path.join(TMP, 'pki') + if os.path.exists(certs_path): + salt.utils.files.rm_rf(certs_path) self.run_function('saltutil.refresh_pillar') def run_function(self, *args, **kwargs): From 9b6f9f94d0efb6863f88f2e7f9f85b8043727676 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Sat, 20 Apr 2019 03:57:36 +0000 Subject: [PATCH 240/340] Clean up cruft --- tests/integration/files/conf/master | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/integration/files/conf/master b/tests/integration/files/conf/master index 5c2a9ad8a876..4c23eb33618b 100644 --- a/tests/integration/files/conf/master +++ b/tests/integration/files/conf/master @@ -29,7 +29,6 @@ tcp_master_workers: 64515 peer: '.*': - '(x509|test).*' - #- 'x509.sign_remote_certificate' ext_pillar: - ext_pillar_opts: From 0ea007de6f59c708956778cc479b13d1eb653d1a Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Sun, 21 Apr 2019 22:19:45 +0000 Subject: [PATCH 241/340] Parse chattr version and fix test case - Parse the system's chattr version from the chattr man page - Fix integration.states.test_cron on centos 6 --- salt/modules/file.py | 37 ++++++++++++++++++++++++++++++++- tests/filename_map.yml | 3 +++ tests/unit/modules/test_file.py | 27 +++++++++++++++++++++++- 3 files changed, 65 insertions(+), 2 deletions(-) diff --git a/salt/modules/file.py b/salt/modules/file.py index f85103d18d18..fe079c161c93 100644 --- a/salt/modules/file.py +++ b/salt/modules/file.py @@ -23,6 +23,7 @@ import shutil import stat import string +import subprocess import sys import tempfile import time @@ -61,6 +62,7 @@ import salt.utils.url import salt.utils.user import salt.utils.data +import salt.utils.versions from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError, get_error_message as _get_error_message from salt.utils.files import HASHES, HASHES_REVMAP @@ -159,6 +161,32 @@ def _splitlines_preserving_trailing_newline(str): return lines +def _get_chattr_man(): + ''' + Get the contents of the chattr man page + ''' + return subprocess.check_output(['man', 'chattr']) + + +def _parse_chattr_man(man): + ''' + Parse the contents of a chattr man page to find the E2fsprogs version + ''' + match = re.search('E2fsprogs version [0-9\.]+', man) + if match: + version = match.group().strip('E2fsprogs version ') + else: + version = None + return version + + +def _chattr_version(): + ''' + Return the version of chattr installed + ''' + return _parse_chattr_man(_get_chattr_man()) + + def gid_to_group(gid): ''' Convert the group id to the group name on this system @@ -577,7 +605,14 @@ def lsattr(path): for line in result.splitlines(): if not line.startswith('lsattr: '): vals = line.split(None, 1) - results[vals[1]] = re.findall(r"[aAcCdDeijPsStTu]", vals[0]) + needed_version = salt.utils.versions.LooseVersion('1.41.12') + chattr_version = salt.utils.versions.LooseVersion(_chattr_version()) + # The version of chattr on Centos 6 does not support extended + # attributes. + if chattr_version > needed_version: + results[vals[1]] = re.findall(r"[aAcCdDeijPsStTu]", vals[0]) + else: + results[vals[1]] = re.findall(r"[acdijstuADST]", vals[0]) return results diff --git a/tests/filename_map.yml b/tests/filename_map.yml index 3f4dc01cb370..a3b3b708db20 100644 --- a/tests/filename_map.yml +++ b/tests/filename_map.yml @@ -43,6 +43,9 @@ salt/modules/dockermod.py: - integration.states.test_docker_container - integration.states.test_docker_network +salt/modules/file.py: + - integration.states.test_cron + salt/modules/influxdb08mod.py: - unit.states.test_influxdb08_database - unit.states.test_influxdb08_user diff --git a/tests/unit/modules/test_file.py b/tests/unit/modules/test_file.py index 2c77ebeb4948..217b107fba5e 100644 --- a/tests/unit/modules/test_file.py +++ b/tests/unit/modules/test_file.py @@ -8,7 +8,7 @@ import textwrap # Import Salt Testing libs -from tests.support.helpers import with_tempfile +from tests.support.helpers import with_tempfile, dedent from tests.support.mixins import LoaderModuleMockMixin from tests.support.paths import TMP from tests.support.unit import TestCase, skipIf @@ -2045,3 +2045,28 @@ def test_source_list_for_list_returns_local_file_proto_from_dict(self): ret = filemod.source_list( [{'file://' + self.myfile: ''}], 'filehash', 'base') self.assertEqual(list(ret), ['file://' + self.myfile, 'filehash']) + + +class ChattrVersionTests(TestCase): + CHATTR_MAN = ( + 'AVAILABILITY\n' + 'chattr is part of the e2fsprogs package and is available ' + 'from http://e2fsprogs.sourceforge.net.\n' + 'SEE ALSO\n' + ' lsattr(1), btrfs(5), ext4(5), xfs(5).\n\n' + 'E2fsprogs version 1.43.4 ' + ' ' + 'January 2017 ' + ' ' + ' CHATTR(1)' + ) + + def test_chattr_version(self): + man_out = dedent(self.CHATTR_MAN) + parsed_version = filemod._parse_chattr_man(man_out) + assert parsed_version == '1.43.4', parsed_version + + def test_chattr_version(self): + with patch('subprocess.check_output', return_value=self.CHATTR_MAN): + parsed_version = filemod._chattr_version() + assert parsed_version == '1.43.4', parsed_version From cf88c273536311228fbcf1b1ceca26030a47652e Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Sun, 21 Apr 2019 22:43:15 +0000 Subject: [PATCH 242/340] Fix linter issues --- salt/modules/file.py | 4 ++-- tests/unit/modules/test_file.py | 10 ++++++++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/salt/modules/file.py b/salt/modules/file.py index fe079c161c93..555c42f9758d 100644 --- a/salt/modules/file.py +++ b/salt/modules/file.py @@ -172,7 +172,7 @@ def _parse_chattr_man(man): ''' Parse the contents of a chattr man page to find the E2fsprogs version ''' - match = re.search('E2fsprogs version [0-9\.]+', man) + match = re.search(r'E2fsprogs version [0-9\.]+', man) if match: version = match.group().strip('E2fsprogs version ') else: @@ -608,7 +608,7 @@ def lsattr(path): needed_version = salt.utils.versions.LooseVersion('1.41.12') chattr_version = salt.utils.versions.LooseVersion(_chattr_version()) # The version of chattr on Centos 6 does not support extended - # attributes. + # attributes. if chattr_version > needed_version: results[vals[1]] = re.findall(r"[aAcCdDeijPsStTu]", vals[0]) else: diff --git a/tests/unit/modules/test_file.py b/tests/unit/modules/test_file.py index 217b107fba5e..465a520ec9a2 100644 --- a/tests/unit/modules/test_file.py +++ b/tests/unit/modules/test_file.py @@ -2061,12 +2061,18 @@ class ChattrVersionTests(TestCase): ' CHATTR(1)' ) - def test_chattr_version(self): + def test__parse_chattr_version(self): + ''' + Validate we can parse the E2fsprogs version from the chattr man page + ''' man_out = dedent(self.CHATTR_MAN) parsed_version = filemod._parse_chattr_man(man_out) assert parsed_version == '1.43.4', parsed_version - def test_chattr_version(self): + def test__chattr_version(self): + ''' + The _chattr_version method works + ''' with patch('subprocess.check_output', return_value=self.CHATTR_MAN): parsed_version = filemod._chattr_version() assert parsed_version == '1.43.4', parsed_version From 169c2d56aad39a41870ce9fd12c311112a190691 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Mon, 22 Apr 2019 00:11:13 +0000 Subject: [PATCH 243/340] Chattr version py3 fix --- salt/modules/file.py | 5 ++++- tests/unit/modules/test_file.py | 4 ++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/salt/modules/file.py b/salt/modules/file.py index 555c42f9758d..af31d5a81a1f 100644 --- a/salt/modules/file.py +++ b/salt/modules/file.py @@ -172,7 +172,10 @@ def _parse_chattr_man(man): ''' Parse the contents of a chattr man page to find the E2fsprogs version ''' - match = re.search(r'E2fsprogs version [0-9\.]+', man) + match = re.search( + r'E2fsprogs version [0-9\.]+', + salt.utils.string_utils.to_str(man), + ) if match: version = match.group().strip('E2fsprogs version ') else: diff --git a/tests/unit/modules/test_file.py b/tests/unit/modules/test_file.py index 465a520ec9a2..829520821b30 100644 --- a/tests/unit/modules/test_file.py +++ b/tests/unit/modules/test_file.py @@ -2048,7 +2048,7 @@ def test_source_list_for_list_returns_local_file_proto_from_dict(self): class ChattrVersionTests(TestCase): - CHATTR_MAN = ( + CHATTR_MAN = salt.utils.stringutils.to_bytes(( 'AVAILABILITY\n' 'chattr is part of the e2fsprogs package and is available ' 'from http://e2fsprogs.sourceforge.net.\n' @@ -2059,7 +2059,7 @@ class ChattrVersionTests(TestCase): 'January 2017 ' ' ' ' CHATTR(1)' - ) + )) def test__parse_chattr_version(self): ''' From e69fcc57de4715a60f228e7c3a05aca7291690b4 Mon Sep 17 00:00:00 2001 From: "Daniel A. Wozniak" Date: Mon, 22 Apr 2019 00:46:01 +0000 Subject: [PATCH 244/340] Fix typo --- salt/modules/file.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/modules/file.py b/salt/modules/file.py index af31d5a81a1f..01bdc86a6803 100644 --- a/salt/modules/file.py +++ b/salt/modules/file.py @@ -174,7 +174,7 @@ def _parse_chattr_man(man): ''' match = re.search( r'E2fsprogs version [0-9\.]+', - salt.utils.string_utils.to_str(man), + salt.utils.stringutils.to_str(man), ) if match: version = match.group().strip('E2fsprogs version ') From 69d3b25f3a7d75849c5d1aceba6c964d80c2ec61 Mon Sep 17 00:00:00 2001 From: Ch3LL Date: Mon, 22 Apr 2019 16:16:51 -0400 Subject: [PATCH 245/340] Fix vsphere doc warning - add xml code block --- salt/modules/vsphere.py | 80 +++++++++++++++++++++-------------------- 1 file changed, 41 insertions(+), 39 deletions(-) diff --git a/salt/modules/vsphere.py b/salt/modules/vsphere.py index 854a13ddc010..96254e0591ad 100644 --- a/salt/modules/vsphere.py +++ b/salt/modules/vsphere.py @@ -6936,45 +6936,47 @@ def add_host_to_dvs(host, username, password, vmknic_name, vmnic_name, to sniff the SOAP stream from Powershell to our vSphere server and got this snippet out: - - <_this type="HostNetworkSystem">networkSystem-187 - - - edit - vSwitch0 - - 7812 - - - - edit - 73 a4 05 50 b0 d2 7e b9-38 80 5d 24 65 8f da 70 - - - vmnic0 - - - - - remove - - Management Network-1 - - - - edit - vmk0 - - - - 73 a4 05 50 b0 d2 7e b9-38 80 5d 24 65 8f da 70 - dvportgroup-191 - - - - - modify - + .. code-block:: xml + + + <_this type="HostNetworkSystem">networkSystem-187 + + + edit + vSwitch0 + + 7812 + + + + edit + 73 a4 05 50 b0 d2 7e b9-38 80 5d 24 65 8f da 70 + + + vmnic0 + + + + + remove + + Management Network-1 + + + + edit + vmk0 + + + + 73 a4 05 50 b0 d2 7e b9-38 80 5d 24 65 8f da 70 + dvportgroup-191 + + + + + modify + The SOAP API maps closely to PyVmomi, so from there it was (relatively) easy to figure out what Python to write. From 915c780775736d0044f44b22150854e200a6aa76 Mon Sep 17 00:00:00 2001 From: twangboy Date: Mon, 22 Apr 2019 16:30:32 -0600 Subject: [PATCH 246/340] Fix issues with the win_file tests Remove pchanges Migrate to __utils__['dacl.check_perms'] --- tests/unit/modules/test_win_file.py | 213 +++++++++++----------------- 1 file changed, 79 insertions(+), 134 deletions(-) diff --git a/tests/unit/modules/test_win_file.py b/tests/unit/modules/test_win_file.py index b11fd0cd4870..6081922f5ea8 100644 --- a/tests/unit/modules/test_win_file.py +++ b/tests/unit/modules/test_win_file.py @@ -15,10 +15,10 @@ # Import Salt Libs import salt.modules.win_file as win_file +import salt.utils.win_dacl as win_dacl import salt.modules.temp as temp -from salt.exceptions import CommandExecutionError import salt.utils.platform -import salt.utils.win_dacl +from salt.exceptions import CommandExecutionError @skipIf(NO_MOCK, NO_MOCK_REASON) @@ -33,11 +33,11 @@ class WinFileTestCase(TestCase, LoaderModuleMockMixin): FAKE_PATH = os.sep.join(['path', 'does', 'not', 'exist']) def setup_loader_modules(self): - return {win_file: { - '__utils__': { - 'dacl.set_perms': salt.utils.win_dacl.set_perms + return { + win_file: { + '__utils__': {'dacl.set_perms': win_dacl.set_perms} } - }} + } def test_issue_43328_stats(self): ''' @@ -98,8 +98,12 @@ def setup_loader_modules(self): self.current_user = salt.utils.win_functions.get_current_user(False) return { win_file: { - '__opts__': { - 'test': False}}} + '__utils__': {'dacl.check_perms': win_dacl.check_perms} + }, + win_dacl: { + '__opts__': {'test': False}, + } + } def setUp(self): self.temp_file = tempfile.NamedTemporaryFile(delete=False) @@ -119,12 +123,11 @@ def test_check_perms_set_owner_test_true(self): ''' Test setting the owner of a file with test=True ''' - with patch.dict(win_file.__opts__, {'test': True}): - expected = {'comment': '', - 'changes': {}, - 'pchanges': {'owner': 'Administrators'}, - 'name': self.temp_file.name, - 'result': None} + expected = {'comment': '', + 'changes': {'owner': 'Administrators'}, + 'name': self.temp_file.name, + 'result': None} + with patch.dict(win_dacl.__opts__, {'test': True}): ret = win_file.check_perms(path=self.temp_file.name, owner='Administrators', inheritance=None) @@ -135,7 +138,6 @@ def test_check_perms_set_owner(self): Test setting the owner of a file ''' expected = {'comment': '', - 'pchanges': {}, 'changes': {'owner': 'Administrators'}, 'name': self.temp_file.name, 'result': True} @@ -148,19 +150,14 @@ def test_check_perms_deny_test_true(self): ''' Test setting deny perms on a file with test=True ''' - with patch.dict(win_file.__opts__, {'test': True}): - expected = {'comment': '', - 'pchanges': { - 'deny_perms': { - 'Users': {'perms': 'read_execute'}}}, - 'changes': {'deny_perms': {}}, - 'name': self.temp_file.name, - 'result': None} + expected = {'comment': '', + 'changes': {'perms': {'Users': {'deny': 'read_execute'}}}, + 'name': self.temp_file.name, + 'result': None} + with patch.dict(win_dacl.__opts__, {'test': True}): ret = win_file.check_perms( path=self.temp_file.name, - deny_perms={ - 'Users': { - 'perms': 'read_execute'}}, + deny_perms={'Users': {'perms': 'read_execute'}}, inheritance=None) self.assertDictEqual(expected, ret) @@ -169,36 +166,27 @@ def test_check_perms_deny(self): Test setting deny perms on a file ''' expected = {'comment': '', - 'pchanges': {'deny_perms': {}}, - 'changes': { - 'deny_perms': { - 'Users': {'perms': 'read_execute'}}}, + 'changes': {'perms': {'Users': {'deny': 'read_execute'}}}, 'name': self.temp_file.name, 'result': True} - ret = win_file.check_perms(path=self.temp_file.name, - deny_perms={ - 'Users': { - 'perms': 'read_execute'}}, - inheritance=None) + ret = win_file.check_perms( + path=self.temp_file.name, + deny_perms={'Users': {'perms': 'read_execute'}}, + inheritance=None) self.assertDictEqual(expected, ret) def test_check_perms_grant_test_true(self): ''' Test setting grant perms on a file with test=True ''' - with patch.dict(win_file.__opts__, {'test': True}): - expected = {'comment': '', - 'pchanges': { - 'grant_perms': { - 'Users': {'perms': 'read_execute'}}}, - 'changes': {'grant_perms': {}}, - 'name': self.temp_file.name, - 'result': None} + expected = {'comment': '', + 'changes': {'perms': {'Users': {'grant': 'read_execute'}}}, + 'name': self.temp_file.name, + 'result': None} + with patch.dict(win_dacl.__opts__, {'test': True}): ret = win_file.check_perms( path=self.temp_file.name, - grant_perms={ - 'Users': { - 'perms': 'read_execute'}}, + grant_perms={'Users': {'perms': 'read_execute'}}, inheritance=None) self.assertDictEqual(expected, ret) @@ -207,29 +195,24 @@ def test_check_perms_grant(self): Test setting grant perms on a file ''' expected = {'comment': '', - 'pchanges': {'grant_perms': {}}, - 'changes': { - 'grant_perms': { - 'Users': {'perms': 'read_execute'}}}, + 'changes': {'perms': {'Users': {'grant': 'read_execute'}}}, 'name': self.temp_file.name, 'result': True} - ret = win_file.check_perms(path=self.temp_file.name, - grant_perms={ - 'Users': { - 'perms': 'read_execute'}}, - inheritance=None) + ret = win_file.check_perms( + path=self.temp_file.name, + grant_perms={'Users': {'perms': 'read_execute'}}, + inheritance=None) self.assertDictEqual(expected, ret) def test_check_perms_inheritance_false_test_true(self): ''' Test setting inheritance to False with test=True ''' - with patch.dict(win_file.__opts__, {'test': True}): - expected = {'comment': '', - 'pchanges': {'inheritance': False}, - 'changes': {}, - 'name': self.temp_file.name, - 'result': None} + expected = {'comment': '', + 'changes': {'inheritance': False}, + 'name': self.temp_file.name, + 'result': None} + with patch.dict(win_dacl.__opts__, {'test': True}): ret = win_file.check_perms(path=self.temp_file.name, inheritance=False) self.assertDictEqual(expected, ret) @@ -239,7 +222,6 @@ def test_check_perms_inheritance_false(self): Test setting inheritance to False ''' expected = {'comment': '', - 'pchanges': {}, 'changes': {'inheritance': False}, 'name': self.temp_file.name, 'result': True} @@ -252,7 +234,6 @@ def test_check_perms_inheritance_true(self): Test setting inheritance to true when it's already true (default) ''' expected = {'comment': '', - 'pchanges': {}, 'changes': {}, 'name': self.temp_file.name, 'result': True} @@ -272,31 +253,24 @@ def test_check_perms_reset_test_true(self): salt.utils.win_dacl.set_permissions(obj_name=self.temp_file.name, principal='Administrator', permissions='full_control') - - with patch.dict(win_file.__opts__, {'test': True}): - expected = { - 'comment': '', - 'pchanges': { - 'remove_perms': { - 'Administrator': { - 'grant': { - 'applies to': 'Not Inherited (file)', - 'permissions': ['Full control'], - 'inherited': False}}}, - 'grant_perms': { - 'Administrators': {'perms': 'full_control'}, - 'Users': {'perms': 'read_execute'}}}, - 'changes': {'grant_perms': {}}, - 'name': self.temp_file.name, - 'result': None} - ret = win_file.check_perms(path=self.temp_file.name, - grant_perms={ - 'Users': { - 'perms': 'read_execute'}, - 'Administrators': { - 'perms': 'full_control'}}, - inheritance=False, - reset=True) + expected = {'comment': '', + 'changes': { + 'perms': { + 'Administrators': {'grant': 'full_control'}, + 'Users': {'grant': 'read_execute'}}, + 'remove_perms': { + 'Administrator': { + 'grant': {'applies to': 'Not Inherited (file)', + 'permissions': 'Full control'}}}}, + 'name': self.temp_file.name, + 'result': None} + with patch.dict(win_dacl.__opts__, {'test': True}): + ret = win_file.check_perms( + path=self.temp_file.name, + grant_perms={'Users': {'perms': 'read_execute'}, + 'Administrators': {'perms': 'full_control'}}, + inheritance=False, + reset=True) self.assertDictEqual(expected, ret) def test_check_perms_reset(self): @@ -311,50 +285,21 @@ def test_check_perms_reset(self): salt.utils.win_dacl.set_permissions(obj_name=self.temp_file.name, principal='Administrator', permissions='full_control') - expected = { - 'comment': '', - 'pchanges': {'grant_perms': {}}, - 'changes': { - 'remove_perms': { - 'Administrator': { - 'grant': { - 'applies to': 'Not Inherited (file)', - 'permissions': ['Full control'], - 'inherited': False}}}, - 'grant_perms': { - 'Administrators': {'perms': 'full_control'}, - 'Users': {'perms': 'read_execute'}}}, - 'name': self.temp_file.name, - 'result': True} - ret = win_file.check_perms(path=self.temp_file.name, - grant_perms={ - 'Users': { - 'perms': 'read_execute'}, - 'Administrators': { - 'perms': 'full_control'}}, - inheritance=False, - reset=True) + expected = {'comment': '', + 'changes': { + 'perms': { + 'Administrators': {'grant': 'full_control'}, + 'Users': {'grant': 'read_execute'}}, + 'remove_perms': { + 'Administrator': { + 'grant': {'applies to': 'Not Inherited (file)', + 'permissions': 'Full control'}}}}, + 'name': self.temp_file.name, + 'result': True} + ret = win_file.check_perms( + path=self.temp_file.name, + grant_perms={'Users': {'perms': 'read_execute'}, + 'Administrators': {'perms': 'full_control'}}, + inheritance=False, + reset=True) self.assertDictEqual(expected, ret) - - def test_issue_52002_check_file_remove_symlink(self): - ''' - Make sure that directories including symlinks or symlinks can be removed - ''' - base = temp.dir(prefix='base-') - target = os.path.join(base, 'child 1', 'target\\') - symlink = os.path.join(base, 'child 2', 'link') - try: - # Create environment - self.assertFalse(win_file.directory_exists(target)) - self.assertFalse(win_file.directory_exists(symlink)) - self.assertTrue(win_file.makedirs_(target)) - self.assertTrue(win_file.makedirs_(symlink)) - self.assertTrue(win_file.symlink(target, symlink)) - self.assertTrue(win_file.directory_exists(symlink)) - self.assertTrue(win_file.is_link(symlink)) - # Test removal of directory containing symlink - self.assertTrue(win_file.remove(base)) - self.assertFalse(win_file.directory_exists(base)) - finally: - if os.path.exists(base): - win_file.remove(base) From 8e91ddaa222a75021f50d8d4d4bd189d7939eb60 Mon Sep 17 00:00:00 2001 From: waheedi Date: Tue, 23 Apr 2019 13:02:46 +0200 Subject: [PATCH 247/340] testing new virt methods --- salt/modules/virt.py | 175 +++++++++++++++++++++++++------- tests/unit/modules/test_virt.py | 12 +++ 2 files changed, 149 insertions(+), 38 deletions(-) diff --git a/salt/modules/virt.py b/salt/modules/virt.py index 4da6d4f4f05a..0739a398af7e 100644 --- a/salt/modules/virt.py +++ b/salt/modules/virt.py @@ -323,57 +323,45 @@ def _parse_qemu_img_info(info): def _get_uuid(dom): ''' - Return a uuid from the named vm - - CLI Example: - - .. code-block:: bash - - salt '*' virt.get_uuid + Get uuid from a libvirt domain object. ''' - return ElementTree.fromstring(get_xml(dom)).find('uuid').text + uuid = ElementTree.fromstring(dom.XMLDesc(0)).find('uuid').text + + return uuid def _get_on_poweroff(dom): ''' - Return `on_poweroff` setting from the named vm - - CLI Example: - - .. code-block:: bash - - salt '*' virt.get_on_restart + Get on_poweroff from a libvirt domain object. ''' - node = ElementTree.fromstring(get_xml(dom)).find('on_poweroff') + node = ElementTree.fromstring(dom.XMLDesc(0)).find('on_poweroff') + return node.text if node is not None else '' def _get_on_reboot(dom): ''' - Return `on_reboot` setting from the named vm - - CLI Example: - - .. code-block:: bash - - salt '*' virt.get_on_reboot + Get on_reboot from a libvirt domain object. ''' - node = ElementTree.fromstring(get_xml(dom)).find('on_reboot') + node = ElementTree.fromstring(dom.XMLDesc(0)).find('on_reboot') + return node.text if node is not None else '' def _get_on_crash(dom): ''' - Return `on_crash` setting from the named vm + Get on_crash from a libvirt domain object. + ''' + node = ElementTree.fromstring(dom.XMLDesc(0)).find('on_crash') - CLI Example: + return node.text if node is not None else '' - .. code-block:: bash - salt '*' virt.get_on_crash +def _get_macs(dom): ''' - node = ElementTree.fromstring(get_xml(dom)).find('on_crash') - return node.text if node is not None else '' + Get mac addresses (macs) from a libvirt domain object. + ''' + return [node.get('address') for node in dom.XMLDesc(0).findall('devices/interface/mac')] def _get_nics(dom): @@ -2155,8 +2143,11 @@ def get_macs(vm_, **kwargs): salt '*' virt.get_macs ''' - doc = ElementTree.fromstring(get_xml(vm_, **kwargs)) - return [node.get('address') for node in doc.findall('devices/interface/mac')] + conn = __get_conn(**kwargs) + macs = _get_macs(_get_domain(conn, vm_)) + conn.close() + + return macs def get_graphics(vm_, **kwargs): @@ -2412,6 +2403,114 @@ def full_info(**kwargs): conn.close() return info +def get_uuid(vm_, **kwargs): + ''' + Return a uuid from the named vm + + :param vm_: name of the domain + :param connection: libvirt connection URI, overriding defaults + + .. versionadded:: 2019.2.0 + :param username: username to connect with, overriding defaults + + .. versionadded:: 2019.2.0 + :param password: password to connect with, overriding defaults + + .. versionadded:: 2019.2.0 + + CLI Example: + + .. code-block:: bash + + salt '*' virt.get_uuid + ''' + conn = __get_conn(**kwargs) + uuid = _get_uuid(_get_domain(conn, vm_)) + + return uuid + + +def get_on_poweroff(vm_, **kwargs): + ''' + Return a on_poweroff from the named vm + + :param vm_: name of the domain + :param connection: libvirt connection URI, overriding defaults + + .. versionadded:: 2019.2.0 + :param username: username to connect with, overriding defaults + + .. versionadded:: 2019.2.0 + :param password: password to connect with, overriding defaults + + .. versionadded:: 2019.2.0 + + + CLI Example: + + .. code-block:: bash + + salt '*' virt.get_on_poweroff + ''' + conn = __get_conn(**kwargs) + on_poweroff = _get_on_poweroff(_get_domain(conn, vm_)) + + return on_poweroff + + +def get_on_reboot(vm_, **kwargs): + ''' + Return a on_reboot from the named vm + + :param vm_: name of the domain + :param connection: libvirt connection URI, overriding defaults + + .. versionadded:: 2019.2.0 + :param username: username to connect with, overriding defaults + + .. versionadded:: 2019.2.0 + :param password: password to connect with, overriding defaults + + .. versionadded:: 2019.2.0 + + CLI Example: + + .. code-block:: bash + + salt '*' virt.get_on_reboot + ''' + conn = __get_conn(**kwargs) + on_reboot = _get_on_reboot(_get_domain(conn, vm_)) + + return on_reboot + + +def get_on_crash(vm_, **kwargs): + ''' + Return a on_crash from the named vm + + :param vm_: name of the domain + :param connection: libvirt connection URI, overriding defaults + + .. versionadded:: 2019.2.0 + :param username: username to connect with, overriding defaults + + .. versionadded:: 2019.2.0 + :param password: password to connect with, overriding defaults + + .. versionadded:: 2019.2.0 + + CLI Example: + + .. code-block:: bash + + salt '*' virt.get_on_crash + ''' + conn = __get_conn(**kwargs) + on_crash = _get_on_crash(_get_domain(conn, vm_)) + + return on_crash + def get_xml(vm_, **kwargs): ''' @@ -3505,14 +3604,14 @@ def vm_diskstats(vm_=None, **kwargs): .. code-block:: bash - salt '*' virt.vm_blockstats + salt '*' virt.vm_diskstats ''' - def get_disk_devs(dom): + def _get_disk_devs(dom): ''' - Extract the disk devices names from the domain XML definition + Get the disk devices names from a libvirt domain object. ''' - doc = ElementTree.fromstring(get_xml(dom, **kwargs)) - return [target.get('dev') for target in doc.findall('devices/disk/target')] + return [target.get('dev') for target in dom.XMLDesc(0).findall('devices/disk/target')] + def _info(dom): ''' @@ -3520,7 +3619,7 @@ def _info(dom): ''' # Do not use get_disks, since it uses qemu-img and is very slow # and unsuitable for any sort of real time statistics - disks = get_disk_devs(dom) + disks = _get_disk_devs(dom) ret = {'rd_req': 0, 'rd_bytes': 0, 'wr_req': 0, diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py index b6ddd05634c8..f89848977539 100644 --- a/tests/unit/modules/test_virt.py +++ b/tests/unit/modules/test_virt.py @@ -2663,3 +2663,15 @@ def test_pool_list_volumes(self): self.mock_conn.storagePoolLookupByName.return_value = mock_pool # pylint: enable=no-member self.assertEqual(names, virt.pool_list_volumes('default')) + + def test_get_uuid(self): + ''' + Test virt.get_uuid() + ''' + mock = MagicMock(return_value={}) + with patch.dict(virt.__salt__, {'config.get': mock}): # pylint: disable=no-member + ret = virt.get_uuid() + self.assertTrue(len(ret) == 1) + uuid = ret[0] + print uuid + From d4abddd3ed7d8988692312cf7244d8088250a8c3 Mon Sep 17 00:00:00 2001 From: dextertan Date: Tue, 23 Apr 2019 19:07:15 +0800 Subject: [PATCH 248/340] fix TypeError: argument of type int is not iterable --- salt/cli/batch.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/salt/cli/batch.py b/salt/cli/batch.py index 02477b553586..6c92769ed0d0 100644 --- a/salt/cli/batch.py +++ b/salt/cli/batch.py @@ -8,7 +8,6 @@ import math import time import copy -import sys from datetime import datetime, timedelta # Import salt libs @@ -86,11 +85,7 @@ def get_bnum(self): ''' partition = lambda x: float(x) / 100.0 * len(self.minions) try: - if sys.version_info.major == 3: - batch_is_str = isinstance(self.opts['batch'], str) - else: - batch_is_str = isinstance(self.opts['batch'], str) or isinstance(self.opts['batch'], unicode) - if batch_is_str and '%' in self.opts['batch']: + if isinstance(self.opts['batch'], six.string_types) and '%' in self.opts['batch']: res = partition(float(self.opts['batch'].strip('%'))) if res < 1: return int(math.ceil(res)) From 4781020d2d61e8e71ae54910e10fb6fc2ab22a52 Mon Sep 17 00:00:00 2001 From: waheedi Date: Tue, 23 Apr 2019 13:26:11 +0200 Subject: [PATCH 249/340] test_Get_uuid --- tests/unit/modules/test_virt.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py index f89848977539..8d833d6bc101 100644 --- a/tests/unit/modules/test_virt.py +++ b/tests/unit/modules/test_virt.py @@ -2668,10 +2668,14 @@ def test_get_uuid(self): ''' Test virt.get_uuid() ''' - mock = MagicMock(return_value={}) - with patch.dict(virt.__salt__, {'config.get': mock}): # pylint: disable=no-member - ret = virt.get_uuid() - self.assertTrue(len(ret) == 1) - uuid = ret[0] - print uuid + root_dir = os.path.join(salt.syspaths.ROOT_DIR, 'srv', 'salt-images') + xml = ''' + + minion-1 + e6e3f990-8997-4a5e-8cb7-ea835eae4bbe + + ''' + + domain = self.set_mock_vm("test-vm-info", xml) + self.assertEqual("e6e3f990-8997-4a5e-8cb7-ea835eae4bbe", virt.get_uuid('test-vm-info')) From 2d4f0c7d86ff4465d8ab8852ec03d7dfe3a39b0a Mon Sep 17 00:00:00 2001 From: waheedi Date: Tue, 23 Apr 2019 13:47:30 +0200 Subject: [PATCH 250/340] added tests for vm_info --- tests/unit/modules/test_virt.py | 196 +++++++++++++++++++++++++++++--- 1 file changed, 182 insertions(+), 14 deletions(-) diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py index 8d833d6bc101..af106fe57e12 100644 --- a/tests/unit/modules/test_virt.py +++ b/tests/unit/modules/test_virt.py @@ -1398,6 +1398,188 @@ def test_mixed_dict_and_list_as_profile_objects(self): re.match('^([0-9A-F]{2}[:-]){5}([0-9A-F]{2})$', interface_attrs['mac'], re.I)) + def test_vm_info(self): + ''' + Test virt.vm_info(vm_name) + ''' + xml = ''' + + minion-1 + e6e3f990-8997-4a5e-8cb7-ea835eae4bbe + + + + + + 819200 + 819200 + 1 + + hvm + + + + + + + + + + + + + destroy + restart + destroy + + + + + + /usr/bin/qemu-system-x86_64 + + + + +
+ + + + + +
+ + +
+ + + +
+ + + +
+ + + +
+ + + +
+ + + + + +
+ + + + + + + + + + +
+ + + + + + + +
+ +